diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..fe4295e3f53 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,5 @@ +# Migrated code style to Black +eabf877cbb86b281fdd37a3fa3cc0edf9b8eb874 +321463922724b225988e517da54a18bad90bc316 +927a9f23a11bc2c83e4fc1a0d004efd98f7cb812 +6751504adf095d5d034e6406fbb0e914924aecff diff --git a/.github/PULL_REQUEST_TEMPLATE b/.github/PULL_REQUEST_TEMPLATE index 46b5c5472ed..b6af6eaf962 100644 --- a/.github/PULL_REQUEST_TEMPLATE +++ b/.github/PULL_REQUEST_TEMPLATE @@ -4,15 +4,13 @@ Lines should be wrapped at about 72 characters. Please also update the CIME documentation, if necessary, in doc/source/rst and indicate below if you need to have the gh-pages html regenerated.] -Test suite: -Test baseline: -Test namelist changes: +Test suite: +Test baseline: +Test namelist changes: Test status: [bit for bit, roundoff, climate changing] Fixes [CIME Github issue #] -User interface changes?: +User interface changes?: Update gh-pages html (Y/N)?: - -Code review: diff --git a/.github/scripts/ghcr-prune.py b/.github/scripts/ghcr-prune.py new file mode 100644 index 00000000000..37c538bd621 --- /dev/null +++ b/.github/scripts/ghcr-prune.py @@ -0,0 +1,251 @@ +import argparse +import logging +import requests +import re +import json +from datetime import datetime +from datetime import timedelta + + +class GHCRPruneError(Exception): + pass + + +description = """ +This script can be used to prune container images hosted on ghcr.io.\n + +Our testing workflow will build and push container images to ghcr.io +that are only used for testing. This script is used to cleanup these +temporary images. + +You can filter containers by any combination of name, age, and untagged. +""" + +parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter +) + +parser.add_argument("--token", required=True, help='GitHub token with "repo" scope') +parser.add_argument("--org", required=True, help="Organization name") +parser.add_argument("--name", required=True, help="Package name") +parser.add_argument( + "--age", + type=int, + help="Filter versions by age, removing anything older than", + default=7, +) +parser.add_argument( + "--filter", help="Filter which versions are consider for pruning", default=".*" +) +parser.add_argument( + "--filter-pr", + action="store_true", + help="Filter pull requests, will skip removal if pull request is still open.", +) +parser.add_argument("--pr-prefix", default="pr-", help="Prefix for a pull request tag") +parser.add_argument("--untagged", action="store_true", help="Prune untagged versions") +parser.add_argument( + "--dry-run", action="store_true", help="Does not actually delete anything" +) + +logging_group = parser.add_argument_group("logging") +logging_group.add_argument( + "--log-level", choices=("DEBUG", "INFO", "WARNING", "ERROR"), default="INFO" +) + +kwargs = vars(parser.parse_args()) + +logging.basicConfig(level=kwargs["log_level"]) + +logger = logging.getLogger("ghcr-prune") + +logger.debug(f"Running with arguments:\n{kwargs}") + + +class GitHubPaginate: + """Iterator for GitHub API. + + Provides small wrapper for GitHub API to utilize paging in API calls. + + https://docs.github.com/en/rest/using-the-rest-api/using-pagination-in-the-rest-api?apiVersion=2022-11-28 + """ + + def __init__( + self, token, org, name, age, filter, untagged, filter_pr, pr_prefix, **_ + ): + self.token = token + self.session = None + self.url = ( + f"https://api.github.com/orgs/{org}/packages/container/{name}/versions" + ) + self.pr_url = f"https://api.github.com/repos/{org}/{name}/pulls" + self.expired = datetime.now() - timedelta(days=age) + self.filter_pr = filter_pr + self.pr_prefix = pr_prefix + self.filter = re.compile(filter) + self.page = None + self.untagged = untagged + + def create_session(self): + self.session = requests.Session() + self.session.headers.update( + { + "Accept": "application/vnd.github+json", + "Authorization": f"Bearer {self.token}", + "X-GitHub-Api-Version": "2022-11-28", + } + ) + + def is_pr_open(self, pr_number): + logger.info(f"Checking if PR {pr_number} is still open") + + pr_url = f"{self.pr_url}/{pr_number}" + + response = self.session.get(pr_url) + + response.raise_for_status() + + data = response.json() + + state = data["state"] + + return state == "open" + + def grab_page(self): + if self.session is None: + raise GHCRPruneError("Must create session first") + + if self.url is None: + raise GHCRPruneError("No more pages") + + response = self.session.get(self.url) + + response.raise_for_status() + + remaining = int(response.headers["X-RateLimit-Remaining"]) + + logger.debug(f"Remaining api limit {remaining}") + + if remaining <= 0: + reset = response.headers["X-RateLimit-Reset"] + + raise GHCRPruneError(f"Hit ratelimit will reset at {reset}") + + try: + self.url = self.get_next_url(response.headers["Link"]) + except Exception as e: + logger.debug(f"No Link header found {e}") + + self.url = None + + return self.filter_results(response.json()) + + def get_next_url(self, link): + match = re.match("<([^>]*)>.*", link) + + if match is None: + raise Exception("Could not determine next link") + + return match.group(1) + + def filter_results(self, data): + results = [] + + logger.info(f"Processing {len(data)} containers") + + logger.info(f"Expiration date set to {self.expired}") + + for x in data: + url = x["url"] + updated_at = datetime.strptime(x["updated_at"], "%Y-%m-%dT%H:%M:%SZ") + + logger.debug(f"Processing\n{json.dumps(x, indent=2)}") + + tags = x["metadata"]["container"]["tags"] + + if len(tags) == 0: + logger.info(f'Found untagged version {x["id"]}') + + if self.untagged: + logger.info(f'Pruning version {x["id"]}') + + results.append(url) + + continue + + # Any tag that is still valid will cause a pacakge version to not be removed + remove_package_version = True + + for tag in tags: + if self.filter_pr and tag.startswith(self.pr_prefix): + pr_number = tag[len(self.pr_prefix) :] + + if self.is_pr_open(pr_number): + logger.info( + f"Skipping package version {x['id']}, PR {pr_number} is still open" + ) + + remove_package_version = False + + break + elif self.filter.match(tag) and updated_at > self.expired: + logger.info( + f"Skipping package version {x['id']}, tag {tag!r} matched but was updated at {updated_at}" + ) + + remove_package_version = False + + break + else: + logger.info(f"Skipping package version {x['id']}, tag {tag!r}") + + remove_package_version = False + + break + + if remove_package_version: + logger.info(f"Pruning package version {x['id']}") + + results.append(url) + + return results + + def __iter__(self): + self.create_session() + + return self + + def __next__(self): + if self.page is None or len(self.page) == 0: + try: + self.page = self.grab_page() + except GHCRPruneError as e: + logger.debug(f"StopIteration condition {e!r}") + + raise StopIteration from None + + try: + item = self.page.pop(0) + except IndexError: + raise StopIteration from None + + return item + + def remove_container(self, url): + if self.session is None: + raise Exception("Must create session first") + + response = self.session.delete(url) + + response.raise_for_status() + + logger.debug(f"{response.headers}") + + +pager = GitHubPaginate(**kwargs) + +for url in pager: + logger.info(f"Pruning {url}") + + if not kwargs["dry_run"]: + pager.remove_container(url) diff --git a/.github/workflows/bumpversion.yml b/.github/workflows/bumpversion.yml new file mode 100644 index 00000000000..de41a6c5ddb --- /dev/null +++ b/.github/workflows/bumpversion.yml @@ -0,0 +1,24 @@ +name: Bump version +on: + push: + branches: + - master +permissions: {} +jobs: + build: + permissions: + contents: write # to create a tag (mathieudutour/github-tag-action) + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Bump version and push tag + id: tag_version + uses: mathieudutour/github-tag-action@v6.0 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + create_annotated_tag: true + default_bump: patch + dry_run: false + tag_prefix: cime + fetch_all_tags: true diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000000..13b64e7cd92 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,132 @@ +name: docs + +on: + push: + branches: + - master + paths: + - 'doc/**' + + pull_request: + branches: + - master + paths: + - 'doc/**' + + workflow_dispatch: + +permissions: + contents: read +jobs: + cleanup: + permissions: + contents: write # for git push + name: Cleanup branch previews + runs-on: ubuntu-latest + if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' + steps: + - uses: actions/checkout@v4 + with: + ref: 'gh-pages' + fetch-depth: 0 + lfs: true + path: gh-pages + - name: Remove branch previews + run: | + pushd $GITHUB_WORKSPACE/gh-pages + + if [[ -e "$GITHUB_WORKSPACE/gh-pages/branch" ]]; then + ls -la "$GITHUB_WORKSPACE/gh-pages/branch" + + for name in `ls branch/` + do + if [[ -z "$(git show-ref --quiet ${name})" ]] + then + git rm -rf branch/${name} + + echo "Removed $GITHUB_WORKSPACE/gh-pages/branch/${name}" + fi + done + + git config user.name github-actions[bot] + git config user.email github-actions[bot]@users.noreply.github.com + git commit -m "Clean up branch previews" + git push + fi + + echo "Done cleaning branches" + build-and-deploy: + permissions: + contents: write # for peaceiris/actions-gh-pages to push + pull-requests: write # to comment on pull requests + needs: cleanup + if: ${{ always() }} + name: Build and deploy documentation + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + lfs: true + - name: Install python 3.x + uses: actions/setup-python@v5 + with: + python-version: '3.x' + # https://github.com/actions/cache/blob/main/examples.md#python---pip + - name: Cache pip + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('doc/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + - name: Install dependencies + run: | + pip install -r doc/requirements.txt + # Build documentation under ${PWD}/_build + - name: Build Sphinx docs + run: | + make BUILDDIR=${PWD}/_build -C doc/ html + - name: Push PR preview + if: | + github.event_name == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.repository + uses: peaceiris/actions-gh-pages@v4 + with: + github_token: ${{secrets.GITHUB_TOKEN}} + publish_dir: './_build/html' + destination_dir: './branch/${{ github.event.pull_request.head.ref }}/html' + user_name: 'github-actions[bot]' + user_email: 'github-actions[bot]@users.noreply.github.com' + - name: Comment about previewing documentation + if: | + github.event_name == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.repository + uses: actions/github-script@v6 + with: + script: | + const comments = await github.paginate(github.rest.issues.listComments, { + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number + }); + + const havePosted = comments.map(x => x.user.login).some(x => x === "github-actions[bot]"); + + if (!havePosted) { + await github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: 'You can preview documentation at https://esmci.github.io/cime/branch/${{ github.event.pull_request.head.ref }}/html/index.html' + }) + } + - name: Push new docs + if: ${{ github.event_name == 'push' }} + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{secrets.GITHUB_TOKEN}} + publish_dir: './_build/html' + destination_dir: './versions/master/html' + user_name: 'github-actions[bot]' + user_email: 'github-actions[bot]@users.noreply.github.com' diff --git a/.github/workflows/ghcr-prune.yml b/.github/workflows/ghcr-prune.yml new file mode 100644 index 00000000000..ca4649170b0 --- /dev/null +++ b/.github/workflows/ghcr-prune.yml @@ -0,0 +1,20 @@ +name: Prune ghcr.io container images +on: + workflow_dispatch: + +permissions: {} + +jobs: + prune: + permissions: + packages: write + pull-requests: read + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + - run: | + pip install requests + + # remove containers older than 14 days and only generated by testing workflow + python .github/scripts/ghcr-prune.py --token ${{ secrets.GITHUB_TOKEN }} --org esmci --name cime --age 14 --filter sha- --filter-pr --untagged diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..ce0933e7515 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,24 @@ +name: 'Close stale issues and PRs' +on: + schedule: + # Run every day at 1:30AM + - cron: '30 1 * * *' +jobs: + stale: + permissions: + issues: write + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v8 + with: + stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.' + close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.' + days-before-stale: 90 + days-before-close: 5 + days-before-pr-close: -1 + # Issues with this label are exempt from being checked if they are stale... + exempt-issue-labels: Low Priority + # Below are currently defaults, but given in case we decide to change + operations-per-run: 30 + stale-issue-label: Stale + close-issue-reason: not_planned diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml new file mode 100644 index 00000000000..495bd88573e --- /dev/null +++ b/.github/workflows/testing.yml @@ -0,0 +1,206 @@ +name: cime testing + +on: + push: + branches: + - master + paths: + - 'CIME/**' + - 'scripts/**' + - 'tools/**' + - 'utils/**' + - 'docker/**' + + pull_request: + branches: + - master + paths: + - 'CIME/**' + - 'scripts/**' + - 'tools/**' + - 'utils/**' + - 'docker/**' + + workflow_dispatch: + +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + packages: read + +jobs: + build-containers: + runs-on: ubuntu-latest + if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }} + permissions: + packages: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/ESMCI/cime + tags: | + type=raw,value=latest,enable=${{ github.event_name == 'push' }} + type=ref,event=pr,enable=${{ github.event_name == 'pull_request' }} + type=sha,format=long + - name: Build and push + uses: docker/build-push-action@v6 + with: + target: base + context: docker/ + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + + pre-commit: + runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && ! cancelled() }} + timeout-minutes: 2 + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Set up python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + - name: Runs pre-commit + run: | + pip install pre-commit + pre-commit run -a + + # Runs unit testing under different python versions. + unit-testing: + runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && always() && ! cancelled() }} + needs: build-containers + container: + image: ghcr.io/esmci/cime:${{ github.event.pull_request.head.repo.full_name == github.repository && format('sha-{0}', github.sha) || 'latest' }} + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + fail-fast: false + matrix: + python-version: ["3.8", "3.10", "3.12"] + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Run tests + shell: bash + env: + CIME_MODEL: "cesm" + CIME_DRIVER: "nuopc" + CIME_TEST_PLATFORM: ubuntu-latest + run: | + export CIME_REMOTE=https://github.com/${{ github.event.pull_request.head.repo.full_name || github.repository }} + export CIME_BRANCH=${GITHUB_HEAD_REF:-${GITHUB_REF##*/}} + + source /entrypoint.sh + + # from 'entrypoint.sh', create and activate new environment + create_environment ${{ matrix.python-version }} + + pip install -r test-requirements.txt + + # GitHub runner home is different than container + cp -rf /root/.cime /github/home/ + + pytest -vvv --cov=CIME --machine docker --no-fortran-run CIME/tests/test_unit* + + # Run system tests + system-testing: + runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && always() && ! cancelled() }} + needs: build-containers + container: + image: ghcr.io/esmci/cime:${{ github.event.pull_request.head.repo.full_name == github.repository && format('sha-{0}', github.sha) || 'latest' }} + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + options: --hostname docker + strategy: + # allow all jobs to finish + fail-fast: false + matrix: + model: ["e3sm", "cesm"] + driver: ["mct", "nuopc"] + exclude: + # exclude nuopc driver when running e3sm tests + - model: "e3sm" + driver: "nuopc" + # exclude mct driver when running cesm tests + - model: "cesm" + driver: "mct" + steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: Cache inputdata + uses: actions/cache@v3 + with: + path: /storage/inputdata + key: inputdata-2 + - name: Run tests + shell: bash + env: + CIME_MODEL: ${{ matrix.model }} + CIME_DRIVER: ${{ matrix.driver }} + CIME_TEST_PLATFORM: ubuntu-latest + run: | + pip install -r test-requirements.txt + + export CIME_REMOTE=https://github.com/${{ github.event.pull_request.head.repo.full_name || github.repository }} + export CIME_BRANCH=${GITHUB_HEAD_REF:-${GITHUB_REF##*/}} + + source /entrypoint.sh + + # GitHub runner home is different than container + cp -rf /root/.cime /github/home/ + + source /opt/conda/etc/profile.d/conda.sh + + conda activate base + + # container libnetcdf is 4.9.2 as cesm requires esmf >8.6.1 + # e3sm scorpio incompatible with 4.9.2, downgrade to 4.9.1 + # only reference found about scorpio incompatibility with 4.9.2 (https://github.com/E3SM-Project/scorpio/issues/554#issuecomment-1877361470) + # TODO open scorpio issue, possible solutions; 1. support two conda environments in container 2. maybe move from conda to spack? build all libraries in image + if [[ "${CIME_MODEL}" == "e3sm" ]]; then + mamba install -y 'libnetcdf=4.9.1' + fi + + pytest -vvv --cov=CIME --machine docker --no-fortran-run --no-teardown CIME/tests/test_sys* + - uses: mxschmitt/action-tmate@v3 + if: ${{ !always() }} + with: + limit-access-to-actor: true + - name: Create testing log archive + if: ${{ failure() }} + shell: bash + run: tar -czvf /testing-logs-${GITHUB_RUN_NUMBER}-${{ matrix.model }}-${{ matrix.driver }}.tar.gz /storage/cases/ + # How to download artifacts: + # https://docs.github.com/en/actions/managing-workflow-runs/downloading-workflow-artifacts + - name: Upload testing logs + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: testing-logs-${{ github.run_number }}-${{ matrix.model }}-${{ matrix.driver }} + path: /testing-logs-${{ github.run_number}}-${{ matrix.model }}-${{ matrix.driver }}.tar.gz + retention-days: 4 diff --git a/.gitignore b/.gitignore index ec68c4c64ac..a1c5112c573 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,11 @@ *.pyc buildnmlc buildlib.*c +CIME.egg-info/ +build/ +_build/ +dist/ +.coverage # Ignore emacs backup files *~ @@ -18,5 +23,9 @@ scripts/Tools/JENKINS* # Ignore anything that are produced under scripts "cases" directory /scripts/cases/ -#Ignore nuopc driver available seperatly -src/drivers/nuopc \ No newline at end of file +#Ignore Externals +components +libraries +share +test_coverage/** +*.bak diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..13f9ecb952f --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "CIME/non_py/cprnc"] + path = CIME/non_py/cprnc + url = git@github.com:ESMCI/cprnc diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..c478a540731 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,27 @@ +exclude: ^utils/.*$ + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-xml + files: config/ + - id: end-of-file-fixer + exclude: doc/ + - id: trailing-whitespace + exclude: doc/ + - id: debug-statements + exclude: doc/|CIME/utils.py + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + files: CIME + - repo: https://github.com/PyCQA/pylint + rev: v2.11.1 + hooks: + - id: pylint + args: + - --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement,logging-format-interpolation,no-name-in-module,arguments-renamed,unspecified-encoding,protected-access,import-error,no-member,logging-fstring-interpolation + files: CIME + exclude: CIME/(tests|Tools|code_checker.py) diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index f414e711d2b..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: python - -before_install: - - sudo apt-get install -y libxml2-utils - -install: - - pip install pylint - -python: - - '2.7' - - '3.6' - -env: - - CIME_MODEL=cesm - -script: cd scripts/tests; ./scripts_regression_tests.py --machine centos7-linux A_RunUnitTests B_CheckCode G_TestMacrosBasic H_TestMakeMacros I_TestCMakeMacros - -# In addition to building PRs, also run the build on the following branches whenever -# it's pushed -branches: - only: - - master - - maint-5.6 - - nuopc-cmeps diff --git a/scripts/Tools/__init__.py b/CIME/BuildTools/__init__.py similarity index 100% rename from scripts/Tools/__init__.py rename to CIME/BuildTools/__init__.py diff --git a/CIME/BuildTools/configure.py b/CIME/BuildTools/configure.py new file mode 100755 index 00000000000..6e77a1a15c5 --- /dev/null +++ b/CIME/BuildTools/configure.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python3 + +"""This script writes CIME build information to a directory. + +The pieces of information that will be written include: + +1. Machine-specific build settings (i.e. the "Macros" file). +2. File-specific build settings (i.e. "Depends" files). +3. Environment variable loads (i.e. the env_mach_specific files). + +The .env_mach_specific.sh and .env_mach_specific.csh files are specific to a +given compiler, MPI library, and DEBUG setting. By default, these will be the +machine's default compiler, the machine's default MPI library, and FALSE, +respectively. These can be changed by setting the environment variables +COMPILER, MPILIB, and DEBUG, respectively. +""" + +from CIME.XML.standard_module_setup import * +from CIME.utils import ( + expect, + safe_copy, + get_model, + get_src_root, + stringify_bool, + copy_local_macros_to_dir, +) +from CIME.XML.env_mach_specific import EnvMachSpecific +from CIME.XML.files import Files +from CIME.build import CmakeTmpBuildDir + +import shutil, glob + +logger = logging.getLogger(__name__) + + +def configure( + machobj, + output_dir, + macros_format, + compiler, + mpilib, + debug, + comp_interface, + sysos, + unit_testing=False, + noenv=False, + threaded=False, + extra_machines_dir=None, +): + """Add Macros, Depends, and env_mach_specific files to a directory. + + Arguments: + machobj - Machines argument for this machine. + output_dir - Directory in which to place output. + macros_format - Container containing the string 'Makefile' to produce + Makefile Macros output, and/or 'CMake' for CMake output. + compiler - String containing the compiler vendor to configure for. + mpilib - String containing the MPI implementation to configure for. + debug - Boolean specifying whether debugging options are enabled. + unit_testing - Boolean specifying whether we're running unit tests (as + opposed to a system run) + extra_machines_dir - String giving path to an additional directory that will be + searched for cmake_macros. + """ + new_cmake_macros_dir = Files(comp_interface=comp_interface).get_value( + "CMAKE_MACROS_DIR" + ) + for form in macros_format: + + if not os.path.isfile(os.path.join(output_dir, "Macros.cmake")): + safe_copy(os.path.join(new_cmake_macros_dir, "Macros.cmake"), output_dir) + output_cmake_macros_dir = os.path.join(output_dir, "cmake_macros") + if not os.path.exists(output_cmake_macros_dir): + shutil.copytree(new_cmake_macros_dir, output_cmake_macros_dir) + ccs_mach_dir = os.path.join( + new_cmake_macros_dir, "..", machobj.get_machine_name() + ) + for f in glob.iglob(os.path.join(ccs_mach_dir, "*.cmake")): + print(f"copying {f} to {output_cmake_macros_dir}") + safe_copy(f, output_cmake_macros_dir) + + copy_local_macros_to_dir( + output_cmake_macros_dir, extra_machdir=extra_machines_dir + ) + + if form == "Makefile": + # Use the cmake macros to generate the make macros + cmake_args = " -DOS={} -DMACH={} -DCOMPILER={} -DDEBUG={} -DMPILIB={} -Dcompile_threaded={} -DCASEROOT={}".format( + sysos, + machobj.get_machine_name(), + compiler, + stringify_bool(debug), + mpilib, + stringify_bool(threaded), + output_dir, + ) + + with CmakeTmpBuildDir(macroloc=output_dir) as cmaketmp: + output = cmaketmp.get_makefile_vars(cmake_args=cmake_args) + + with open(os.path.join(output_dir, "Macros.make"), "w") as fd: + fd.write(output) + + copy_depends_files( + machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler + ) + generate_env_mach_specific( + output_dir, + machobj, + compiler, + mpilib, + debug, + comp_interface, + sysos, + unit_testing, + threaded, + noenv=noenv, + ) + + +def copy_depends_files(machine_name, machines_dir, output_dir, compiler): + """ + Copy any system or compiler Depends files if they do not exist in the output directory + If there is a match for Depends.machine_name.compiler copy that and ignore the others + """ + # Note, the cmake build system does not stop if Depends.mach.compiler.cmake is found + makefiles_done = False + both = "{}.{}".format(machine_name, compiler) + for suffix in [both, machine_name, compiler]: + for extra_suffix in ["", ".cmake"]: + if extra_suffix == "" and makefiles_done: + continue + + basename = "Depends.{}{}".format(suffix, extra_suffix) + dfile = os.path.join(machines_dir, basename) + outputdfile = os.path.join(output_dir, basename) + if os.path.isfile(dfile): + if suffix == both and extra_suffix == "": + makefiles_done = True + if not os.path.exists(outputdfile): + safe_copy(dfile, outputdfile) + + +class FakeCase(object): + def __init__( + self, compiler, mpilib, debug, comp_interface, threading=False, gpu_type="none" + ): + # PIO_VERSION is needed to parse config_machines.xml but isn't otherwise used + # by FakeCase + self._vals = { + "COMPILER": compiler, + "MPILIB": mpilib, + "DEBUG": debug, + "COMP_INTERFACE": comp_interface, + "PIO_VERSION": 2, + "GPU_TYPE": gpu_type, + "BUILD_THREADED": threading, + "MODEL": get_model(), + "SRCROOT": get_src_root(), + } + + def get_build_threaded(self): + return self.get_value("BUILD_THREADED") + + def get_case_root(self): + """Returns the root directory for this case.""" + return self.get_value("CASEROOT") + + def get_value(self, attrib): + expect( + attrib in self._vals, + "FakeCase does not support getting value of '%s'" % attrib, + ) + return self._vals[attrib] + + def set_value(self, attrib, value): + """Sets a given variable value for the case""" + self._vals[attrib] = value + + +def generate_env_mach_specific( + output_dir, + machobj, + compiler, + mpilib, + debug, + comp_interface, + sysos, + unit_testing, + threaded, + noenv=False, +): + """ + env_mach_specific generation. + """ + ems_path = os.path.join(output_dir, "env_mach_specific.xml") + if os.path.exists(ems_path): + logger.warning("{} already exists, delete to replace".format(ems_path)) + return + + ems_file = EnvMachSpecific( + output_dir, unit_testing=unit_testing, standalone_configure=True + ) + ems_file.populate( + machobj, + attributes={"mpilib": mpilib, "compiler": compiler, "threaded": threaded}, + ) + ems_file.write() + + if noenv: + return + + fake_case = FakeCase(compiler, mpilib, debug, comp_interface) + ems_file.load_env(fake_case) + for shell in ("sh", "csh"): + ems_file.make_env_mach_specific_file(shell, fake_case, output_dir=output_dir) + shell_path = os.path.join(output_dir, ".env_mach_specific." + shell) + with open(shell_path, "a") as shell_file: + if shell == "sh": + shell_file.write("\nexport COMPILER={}\n".format(compiler)) + shell_file.write("export MPILIB={}\n".format(mpilib)) + shell_file.write("export DEBUG={}\n".format(repr(debug).upper())) + shell_file.write("export OS={}\n".format(sysos)) + else: + shell_file.write("\nsetenv COMPILER {}\n".format(compiler)) + shell_file.write("setenv MPILIB {}\n".format(mpilib)) + shell_file.write("setenv DEBUG {}\n".format(repr(debug).upper())) + shell_file.write("setenv OS {}\n".format(sysos)) diff --git a/CIME/ParamGen/README.ipynb b/CIME/ParamGen/README.ipynb new file mode 100644 index 00000000000..874f55bed8f --- /dev/null +++ b/CIME/ParamGen/README.ipynb @@ -0,0 +1,1100 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ParamGen Quickstart Guide\n", + "Alper Altuntas, NCAR\\\n", + "Boulder, CO - 2021" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Introduction\n", + "\n", + "ParamGen is a lightweight, generic Python module for generating runtime parameters for earth system modeling applications. The module supports arbitrary Python expressions for the specification of parameter values. This provides a high level of flexibility and genericity.\n", + "\n", + "ParamGen infers the values of model parameters from inclusive sets of *default parameters databases* (DPD) to be put together and maintained by the model developers. These databases are typically stored in a file written in a markup language such as xml, yaml or json. ParamGen is generic, i.e., it is agnostic of any details of a particular modeling framework, model component, or input/output format. By default, the base ParamGen class supports xml, yaml and json as DPD (input) format. The only out-of-the-box output format, on the other hand, is the Fortran namelist format. New input and output formats can easily be introduced by application developers via class inheritance as will be discussed in this document. \n", + "\n", + "The primary property of a ParamGen instance is its `.data` member, which is of type Python dictionary, i.e., a collection of key-value pairs. When a ParamGen instance gets created, a dictionary must be provided to the ParamGen constructor to be accepted as its initial `.data`. This initial dictionary corresponds to the DPD, which may be read from xml, yaml, json, etc.\n", + "\n", + "In the simplest case, the keys correspond to parameter names and the values correspond to parameter values. In a more involved case, the `.data` member may be formed as a nested dictionary for grouping model parameters into seperate namelist modules. Moreover, the keys of the `.data` member may consist of logical expressions, i.e., *guards*. The notion of guards is one of the most important concepts in ParamGen. A *guard* is a proposition of a parameter value (similar to how guards are propositions of commands in Dijkstra's Guarded Command Language). Take the following data, for instance:\n", + "\n", + "```\n", + "NIGLOBAL:\n", + " $OCN_GRID == \"gx1v6\":\n", + " 320\n", + " $OCN_GRID == \"tx0.66v1\": \n", + " 540\n", + "...\n", + "```\n", + "\n", + "In the above nested dictionary, `NIGLOBAL` is interpreted as one of the parameter names. Within the inner dictionary, however, we have two keys, both of which are logical expressions. These logical expressions, or guards, are regarded as propositions of the values following them. After the instantiation, the `.reduce()` method may be called to evaluate the guards and determine the values of model parameters. In the above example, assuming the expandable variable `OCN_GRID` is `\"tx0.66v1\"`, calling the reduce method results in: \n", + "\n", + "```\n", + "NIGLOBAL:\n", + " 540\n", + "...\n", + "```\n", + "\n", + "Finally, the `.write()` method may be called to write the set of parameters in a desired format.\n", + "\n", + "*Note: not sure about the \"default parameters database\" (DPD) term. Any alternative suggestions welcome.* -aa" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. ParamGen in Action\n", + "\n", + "#### Obtaining ParamGen\n", + "\n", + "Although ParamGen is model-agnostic, it is currently distributed within an experimental CESM fork. To obtain this CESM version, run the following commands:\n", + "\n", + "```\n", + "git clone https://github.com/alperaltuntas/CESM.git -b paramGenBeta\n", + "(cd CESM; ./manage_externals/checkout_externals -o)\n", + "```\n", + "\n", + "In the above CESM sandbox, ParamGen is located in `CESM/cime/scripts/lib/CIME/ParamGen`\n", + "\n", + "#### Importing ParamGen class\n", + "\n", + "The first step of working with ParamGen is to import the module. \n", + "To import this experimental version of ParamGen module:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from paramgen import ParamGen" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: In the case of a CESM model, ParamGen would be imported from a buildnml script. To do so, one would first append the ParamGen directory to the PATH. See `CESM/components/mom/cime_config/buildnml` as an example." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Instantiating a ParamGen object:\n", + "ParamGen constructor expects a `data` argument, that is a Python dictionary which may be nested or not. This dictionary corresponds to the default parameters database (DPD) that is the collection of parameter name-value pairs for all possible configurations. Let's first define a simple Python dictionary containing three variables `X`, `Y`, and `Z`:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "DPD_dict = {\"X\" : 1.0,\n", + " \"Y\" : True,\n", + " \"Z\" : \"foo\" }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, create a ParamGen instance with this DPD dictionary:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "pg = ParamGen(DPD_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'X': 1.0, 'Y': True, 'Z': 'foo'}" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now call the reduce method to generate the final version of `.data`:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "pg.reduce()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'X': 1.0, 'Y': True, 'Z': 'foo'}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As expected, the reduced data is not any different from the initial data we passed to ParamGen constructor. The `.reduce()` method makes a difference only when the initial data contains conditionals, variable expansion, or Python expressions. Before describing these mechanisms, let's generate the same ParamGen instance via yaml and json formats:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Instantiating a ParamGen object via yaml or json:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "ParamGen can be instantiated via yaml or json files using the following methods:\n", + "\n", + "- `.from_yaml()`\n", + "- `.from_json()`\n", + "\n", + "\n", + "Under the hood, these methods simply create a Python dictionary from files with these formats and then call the ParamGen constructor with the generated dictionary." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### yaml" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing DPD.yaml\n" + ] + } + ], + "source": [ + "%%writefile DPD.yaml\n", + "X: 1.0\n", + "Y: True\n", + "Z: foo" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'X': 1.0, 'Y': True, 'Z': 'foo'}" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# (2) Create a ParamGen instance:\n", + "pg = ParamGen.from_yaml(\"DPD.yaml\")\n", + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### json" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing DPD.json\n" + ] + } + ], + "source": [ + "%%writefile DPD.json\n", + "{\n", + " \"X\": 1.0,\n", + " \"Y\": true,\n", + " \"Z\": \"foo\"\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'X': 1.0, 'Y': True, 'Z': 'foo'}" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# (2) Create a ParamGen instance:\n", + "pg = ParamGen.from_json(\"DPD.json\")\n", + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Instantiating a ParamGen object via XML:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In a similar fashion, a ParamGen object may be created from an XML file. However, when working with XML, a specific schema must be satisfied. See the XML_NML.ipynb document for more information on how to work with XML within the ParamGen framework.\n", + "\n", + "Out of the three commonly used markup languages, yaml has the most readible and concise syntax, especially when working with large number of parameters and nested entries. The disadvantage of yaml is that it is not distributed with the standard Python, unlike xml and json. So a third party yaml parser, e.g., PyYAML, is required.\n", + "\n", + "Instead of using these file formats, we will continue to create ParamGen instances using Python dictionaries explicitly in the remainder of this documentation. Recall that ParamGen converts these formats to a dictionary before creating an instance so the instructions below apply to all ParamGen instances regardless of which DPD format is used." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ParamGen Mechanisms\n", + "- Variable Expansion\n", + "- Guards\n", + "- Formulas\n", + "- Appending" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Variable expansion\n", + "\n", + "Similar to shell parameter expansion mechanism in Linux, The `$` character may be used to introduce expandable variables in DPDs. These variables are expanded, i.e., replaced with their values, when the `.reduce()` method is called. Variable expansion may be employed in both keys and values of DPD dictionaries. To illustrate this mechanism, we define a new ParamGen instance:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "pg = ParamGen({\n", + " \"${alpha}\": 1.0,\n", + " \"Y\": \"$beta\",\n", + " \"$gamma\": \"foo\"\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'${alpha}': 1.0, 'Y': '$beta', '$gamma': 'foo'}" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the above ParamGen instantiation, we specify three expandable variables in keys and values: `alpha`, `beta`, `gamma`. \n", + "When expandable variables are included in the initial data, an `expand_func` must be provided. This function is required to take a string as an argument and return a scalar, i.e., a string, integer, float, or a boolean variable. The passed string corresponds to the variable name, while the return value corresponds the value of the expandable variable. A rather simple `expand_func` is defined below for demonstration purposes." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def expand_func(varname):\n", + " if varname == \"alpha\":\n", + " return \"X\"\n", + " elif varname == \"beta\":\n", + " return True\n", + " elif varname == \"gamma\":\n", + " return \"Z\"\n", + " else:\n", + " raise RuntimeError(\"Unknown variable\")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "pg.reduce(expand_func)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'X': 1.0, 'Y': 'True', '\"Z\"': 'foo'}" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As seen above, all the expandable variables are expanded, i.e., replaced with their respective values. Notice that variable `beta` is converted from bool to string during variable expansion. The same behavior applies to numeric variables as well. However, this behavior is not restrictive because (1) all values are converted to strings before they are written out to text files anyways and (2) all logical expressions and formulas are to be strings to be evaluated. \n", + "\n", + "**Warning:** There is a behavioral difference between specifying string variables with curly braces vs. without curly braces. When a variable of type string gets specified ***without*** curly braces, it's value is automatically enclosed by quotes when the `reduce()` method is called. However, string variables specified ***with*** curly braces are not automatically enclosed by quotes. This behavior can be observed with the variable `gamma` which expands to `'\"Z\"'`. Had we specified gamma with curly braces, i.e., `${gamma}`, the value would rather be `'Z'`, and not `'\"Z\"'`. This can be confirmed with the variable `alpha` above, which expands to `'X'`.\n", + "\n", + "This behavior is introduced in ParamGen as a means of keeping conditional expressions more concise. Compare the following two logical ParamGen expressions, which are equivalent, but the first one has expandable variables defined with curly braces. In the first version, not only do we have to explicitly enclose expandable variables with quotes (`\"${...}\"`), but also the entire expression (`'...'`) so as to make sure that YAML parser treats the entire logical formula as a single expression. In the second version, neither of the quotes is necessary, except, of course, for the literal strings `\"gx1v7\"` and `\"datm\"`.\n", + "\n", + "`' \"${OCN_GRID}\" == \"gx1v7\" and \"${COMP_ATM}\" == \"datm\" ':`\n", + "\n", + "`$OCN_GRID == \"gx1v7\" and $COMP_ATM == \"datm\":`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### CESM/CIME XML variables as expandable variables" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Within the CESM framework, CIME XML variables may easily be specified in DBDs as expandable variables. Typically, `ParamGen` is utilized in `buildnml` scripts of components. The first argument of all `buildnml` methods is the `case` variable which is an instance of `CIME.case.Case`. This CIME case object has a `.get_value()` method that returns the value of a given XML variable. This method may simply be passed to the `reduce()` method of ParamGen as an expand function:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```\n", + "def expand_func(varname):\n", + " case.get_value(varname)\n", + " \n", + "pg.reduce(expand_func)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Or, more concisely:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```\n", + "pg.reduce(lambda varname: case.get_value(varname))\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Examples of this usage can be found in MOM6 implementation of ParamGen. Check out the following derived ParamGen classes of MOM6:\n", + "\n", + " - CESM/components/mom/cime_config/MOM_RPS/FType_input_nml.py\n", + " - CESM/components/mom/cime_config/MOM_RPS/FType_MOM_params.py\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Guards" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Recall that the keys of the `.data` dictionary specify the parameter names while the values correspond to the respective parameter values. Depending on the context, the keys may also be interpreted as guards, i.e., propositions of parameter values. The keys are interpreted as guards if all the keys at a certain level are logical expressions that evaluate to True or False. A data dictionary *without* any guards:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "dict1 = {\n", + " \"var1\": 1,\n", + " \"var2\": 2\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A data dictionary with some guards:" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "dict2 = {\n", + " \"var1\": {\n", + " True: 1,\n", + " False: 0\n", + " },\n", + " \"var2\": 2\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the above dictionary `dict2`, the variable `var1` has two options, `1` and `0`. Which value gets picked for \"var1\" depends on the guards, i.e., the propositions `True` and `False`. Now let's create a ParamGen instance with the dictionary `dict2`:" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'var1': {True: 1, False: 0}, 'var2': 2}" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg = ParamGen(dict2)\n", + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Observe the effect of calling the reduce method below:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'var1': 1, 'var2': 2}" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg.reduce()\n", + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Logical Python expressions as guards\n", + "\n", + "The guards above are trivially specified to be `True` and `False`. In practice, however, guards are arbitrary Python expressions that evaluate to `True` or `False`. These expressions may have expandable variables, standard Python operators, method calls, etc. For an expression to be regarded as a guard, then, the expression must evaluate to `True` or `False`.\n", + "\n", + "Note: In YAML, the quotes enclosing the expressions are not necessary, since the YAML parser automatically interprets those logical expressions as strings.\n", + " \n", + "The following is an example with arbitrary Python expressions as guards:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'var1': 1, 'var2': 2}" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def expand_func(varname):\n", + " if varname == \"one\":\n", + " return 1.0\n", + " elif varname == \"two\":\n", + " return 2.0\n", + " else:\n", + " raise RuntimeError(\"Unknown variable\")\n", + " \n", + " \n", + "dict3 = {\n", + " \"var1\": {\n", + " '$one < $two' : 1,\n", + " '$one > $two' : 0\n", + " },\n", + " \"var2\": 2\n", + "}\n", + "\n", + "pg = ParamGen(dict3)\n", + "pg.reduce(expand_func)\n", + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Guard behavior\n", + "\n", + "- If multiple guards evaluate to True, the last option gets picked. If it is desired to pick the first valid option, however, the default behavior may be changed by setting the optional `match` argument of ParamGen to `first`. For example: `pg = ParamGen(dict2, match='first')`\n", + "- If no guards evaluate to True, the parameter value gets set to `None`. In a model-specific write method, the parameters with the value `None` may, for example, be chosen to be omitted by the application developer. \n", + "- the `else` keyword evaluates to True only if all other guards evaluate to False.\n", + "- When an expandable variable is attempted to be expanded, and if the value is undefined, ParamGen throws an error. In some cases, certain expandable variables may be defined only for certain configurations. \n", + "For instance, in the below example, the variable `INIT_LAYERS_FROM_Z_FILE` is defined only if the `OCN_GRID` is one of `[\"gx1v6\", \"tx0.66v1\", \"tx0.25v1\"]`. Therefore, to avoid undefined expandable variable error, we place the `INIT_LAYERS_FROM_Z_FILE` check below the `OCN_GRID` check, as follows:\n", + "\n", + "```\n", + " tempsalt:\n", + " $OCN_GRID in [\"gx1v6\", \"tx0.66v1\", \"tx0.25v1\"]:\n", + " $INIT_LAYERS_FROM_Z_FILE == \"True\":\n", + " \"${INPUTDIR}/${TEMP_SALT_Z_INIT_FILE}\"\n", + "```\n", + "\n", + " This ensures that ParamGen attempts to expand `INIT_LAYERS_FROM_Z_FILE` variable only when `$OCN_GRID in [\"gx1v6\", \"tx0.66v1\", \"tx0.25v1\"]` evaluates to True." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Formulas" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In ParamGen, a variable value may be specified as a formula to be evaluated. This is done by setting the first character of a value to a space delimited `=` character. See the below example:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'var1': 5}" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg = ParamGen({\n", + " 'var1' : '= 2+3'\n", + "})\n", + "pg.reduce()\n", + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that formulas may also include expandable variables:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'var1': 2.5}" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg = ParamGen({\n", + " 'var1' : '= (2+3) / $two'\n", + "})\n", + "pg.reduce(expand_func)\n", + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Appending" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `append()` method of ParamGen adds the data of a given ParamGen instance to the existing data. If a data entry with the same name already exists, it's value gets overriden with the new value. Otherwise, the new data entry is simply appended to the existing data." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'a': 1, 'b': 3, 'c': 4}" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg1 = ParamGen({'a':1, 'b':2})\n", + "pg2 = ParamGen({'b':3, 'c':4})\n", + "pg1.append(pg2)\n", + "pg1.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Special Use Cases\n", + "\n", + "### Referencing across multiple ParamGen instances\n", + "\n", + "The genericity that comes with the custom expand functions allows us to reference the data of a ParamGen instance in another ParamGen instance. To illustrate this use case, we define two ParamGen instances:" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "pg1 = ParamGen({\n", + " 'var1' : 'foo',\n", + " 'var2' : 'bar'\n", + "})\n", + "\n", + "pg2 = ParamGen({\n", + " 'var3': '${var1}${var2}' \n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice above that the second ParamGen instance, pg2, data includes references to variables defined in pg1 data. Now let's reduce the data of pg2 and pass a lambda function that returns the values of pg1 variables:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "pg2.reduce(lambda varname: pg1.data[varname])" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'var3': 'foobar'}" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg2.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Note:** Cross-referencing, i.e., references to variables within the same instance, is not supported. (May be added later on if need be. -aa)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Custom value inference\n", + "\n", + "More involved expand functions may allow higher customizations. In the below example, for instance, we read in an xarray dataset `ds` and set the value of an expandable variable `my_fields_list` to the list of all variables in `ds`, that is `\"lat air lon time\"`." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "pg1 = ParamGen({\n", + " 'var1' : 'foo',\n", + " 'var2' : 'bar'\n", + "})\n", + "\n", + "pg2 = ParamGen({\n", + " 'param1': '${var1}${var2}',\n", + " 'param2': '$my_fields_list'\n", + "})\n", + "\n", + "def expand_function(varname):\n", + " if varname in pg1.data:\n", + " return pg1.data[varname]\n", + " elif varname == \"my_fields_list\":\n", + " try:\n", + " import xarray as xr\n", + " ds = xr.tutorial.load_dataset(\"air_temperature\")\n", + " return ' '.join([var for var in ds.variables])\n", + " except:\n", + " print(\"Cannot load xarray module. Skipping...\")\n", + " return None" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "pg2.reduce(expand_function)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'param1': 'foobar', 'param2': '\"lat air lon time\"'}" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg2.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Regular expression searches as guards\n", + "\n", + "In some cases, it may be desirable to do regex searches as opposed to simpler string comparisons. One such example use case is provided below:" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "pg = ParamGen({\n", + " 'USE_MARBL_TRACERS': {\n", + " 'bool(re.search(\"MOM6%[^_]*MARBL\", $COMPSET ))': True,\n", + " 'else': False\n", + " }\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'USE_MARBL_TRACERS': True}" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pg.reduce(lambda varname:'1850_DATM%NYF_SLND_DICE%SSMI_MOM6%MARBL_DROF%NYF_SGLC_SWAV')\n", + "pg.data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice how the `re.search()` method is used above in the first value guard. The guard evaluates to true since the `re.search()` method is able to find `\"MOM6%[^_]*MARBL\"` regex pattern in the specified COMPSET: \n", + "`1850_DATM%NYF_SLND_DICE%SSMI_MOM6%MARBL_DROF%NYF_SGLC_SWAV`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Notes on ParamGen for MOM6 in CESM\n", + "\n", + "Here, we briefly describe how ParamGen is used within CESM to generate MOM6 runtime parameters. Called from the `buildnml` script of MOM6+CESM, the ParamGen module is used to generate the four main MOM6 runtime input files:\n", + "\n", + " 1. **MOM_input:** Default MOM6 runtime parameters. The file syntax is based on the simple `key = value` pair. Example parameter entries from a typical MOM6 experiment:\n", + " \n", + " ```\n", + " DIABATIC_FIRST = True ! If true, apply diabatic and thermodynamic processes...\n", + " DT_THERM = 3600.0 ! The thermodynamic and tracer advection time step.\n", + " MIN_SALINITY = 0.0. ! The minimum value of salinity when BOUND_SALINITY=True.\n", + " ```\n", + " \n", + " 2. **MOM_override:** An auxiliary file to override parameter values set in MOM_input\n", + " 3. **input.nml:** A file to set some general MOM6 and FMS variables. The file is in classical Fortran namelist format.\n", + " 4. **diag_table:** An input file to configure the model diagnostics. It has a relatively complex syntax. See https://mom6.readthedocs.io/en/latest/api/generated/pages/Diagnostics.html\n", + " \n", + " In addition to these files, ParamGen is used to generate the MOM6 version of the `input_data_list` file needed by CIME.\n", + " \n", + " ### Default Parameters Databases \n", + " \n", + " For each of the input files mentioned above, we have a DBD that includes all of the default parameter values for any possible model configuration. In the case of `MOM_input` for instance, we have a DBD called `MOM_input.yaml` located in `components/mom/param_templates`. An example entry from this file:\n", + " \n", + " ```\n", + " DT_THERM:\n", + " description: |\n", + " \"[s] default = 3600.0\n", + " The thermodynamic and tracer advection time step.\n", + " value:\n", + " $OCN_GRID == \"MISOMIP\": 1800.0\n", + " else: >\n", + " = ( ( $NCPL_BASE_PERIOD ==\"decade\") * 86400.0 * 3650 +\n", + " ( $NCPL_BASE_PERIOD ==\"year\") * 86400.0 * 365 +\n", + " ( $NCPL_BASE_PERIOD ==\"day\") * 86400.0 +\n", + " ( $NCPL_BASE_PERIOD ==\"hour\") * 3600.0 ) / $OCN_NCPL\n", + "```\n", + "\n", + "In the above entry, the default value of the runtime parameter `DT_THERM` is specified, which depends on a few CIME variables such as `OCN_GRID`, and `OCN_NCPL`. Notice the usage of expandable variables, guards, and a formula.\n", + "\n", + "Assuming `$OCN_GRID != \"MISOMIP\"`, `$NCPL_BASE_PERIOD == \"day\"`, and `$OCN_NCPL==24`, the runtime parameter `DT_THERM` gets reduced to 3600.0 when `.reduce()` method is called. \n", + "\n", + "### Utilizing ParamGen class as a base\n", + "\n", + "For each of the file category, we have developed individual classes derived from the `ParamGen` class. These classes are located in `CESM/components/mom/cime_config/MOM_RPS/` and are utilized in the `buildnml` file to generate the corresponding input files. \n", + "\n", + " - FType_MOM_params.py\n", + " - FType_diag_table.py\n", + " - FType_input_data_list.py\n", + " - FType_input_nml.py\n", + "\n", + "Since the Fortran namelist syntax is already available as an out-of the box format, the most straightforward one is `FType_input_nml.py` which produces the `input.nml` file. The whole module consists of 15 lines of code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os, sys\n", + "\n", + "CIMEROOT = os.environ.get(\"CIMEROOT\")\n", + "if CIMEROOT is None:\n", + " raise SystemExit(\"ERROR: must set CIMEROOT environment variable\")\n", + "sys.path.append(os.path.join(CIMEROOT, \"scripts\", \"lib\", \"CIME\", \"ParamGen\"))\n", + "from paramgen import ParamGen\n", + "\n", + "class FType_input_nml(ParamGen):\n", + " \"\"\"Encapsulates data and read/write methods for MOM6 (FMS) input.nml file\"\"\"\n", + "\n", + " def write(self, output_path, case):\n", + " self.reduce(lambda varname: case.get_value(varname))\n", + " self.write_nml(output_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Within the `buildnml` script, the above class is instantiated and utilized as follows:\n", + "\n", + "```\n", + "...\n", + "input_nml = FType_input_nml.from_json(input_nml_src)\n", + "input_nml.write(input_nml_dest, case)\n", + "```\n", + "\n", + "#### (todo) more descriptions and notes to come." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "base" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/CIME/ParamGen/XML_NML.ipynb b/CIME/ParamGen/XML_NML.ipynb new file mode 100644 index 00000000000..bf87f0d735e --- /dev/null +++ b/CIME/ParamGen/XML_NML.ipynb @@ -0,0 +1,338 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# XML Namelist Format in ParamGen\n", + "Alper Altuntas, NCAR\\\n", + "Boulder, CO - 2021" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Introduction\n", + "\n", + "Here, we briefly describe a special use case of ParamGen: XML-based namelist format. This document is complementary to the README.ipynb file, which describes ParamGen in a broader context and with more detail. It is advised to read the README.ipynb file first." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. XML Namelist Template" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The YAML and JSON frontends of ParamGen are quite flexible in terms of the data layout, or schema. In the case of XML, however, we work with a predefined schema that resembles CESM's `entry_id_namelist.xsd`. The new ParamGen schema, called `entry_id_pg.xsd`, is located in `cime/scripts/lib/CIME/ParamGen/xml_schema/`\n", + "\n", + "We first write an example xml file, named `my_tamplate.xml` below that conforms to this new schema." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing my_template.xml\n" + ] + } + ], + "source": [ + "%%writefile my_template.xml\n", + "\n", + "\n", + "\n", + "\n", + " \n", + " real\n", + " setup_nml\n", + " Days per year\n", + " \n", + " 365\n", + " \n", + " \n", + "\n", + "\n", + " \n", + " logical\n", + " icefields_nml\n", + " f_anglet\n", + " \n", + " .true.\n", + " .false.\n", + " \n", + " \n", + "\n", + " \n", + " char\n", + " setup_nml\n", + " Method of ice cover initialization.\n", + " \n", + " UNSET\n", + " ${DIN_LOC_ROOT}/ice/cice/b40.t31x3.20th.cice.r.2006-01-01-00000.nc\n", + " ${DIN_LOC_ROOT}/ice/cice/b.e15.B1850G.f09_g16.pi_control.25.cice.r.0041-01-01-00000.nc\n", + " ${DIN_LOC_ROOT}/ice/cice/g.e11.G.T62_t12.002.cice.r.0016-01-01-00000.nc\n", + " ${DIN_LOC_ROOT}/ice/cice/cice5ic/r26RBRCICE5g0.cice.r.1990-09-01-00000.nc\n", + " \n", + " \n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The above xml file includes three namelist variable definitions taken from CICE `namelist_definition_cice.xml`. Notice how the format is very similar to the original namelist definition format. Some of the differences between the original `entry_id_namelist.xsd` vs the new `entry_id_pg.xsd:`\n", + "\n", + "- To easily distinguish these schemas, the root element in the new schema is called `entry_id_pg`, and not `entry_id`.\n", + "- Currently, only a subset of descriptive child elements are supported for `entry_id_pg` entries. These are `type`, `group`, and `desc`. More elements may be added as needed.\n", + "- In the traditional format, value propositions would be specified with arbitrary ``, attributes, e.g., `hgrid=\"gx3v7`\". The new format also supports this specification type. And within a value entry, multiple key=value attributes may be specified, in which case they are joined with logical AND. In ParamGen implementation of XML specification, however, there is an alternative method of specifying guards, which brings about greater flexibility. \n", + "- The more flexible method is based on specifying guards via the `guard=` attribute. A `guard=` attribute can be any arbitrary Python expression that evalutes to True or False. These expressions can involve any variables (see `expand_func` description in README.ipynb) and any standard Python operators, methods, list comprehensions, etc. Notice how the `.startswith()` method is used to abbreviate the `ice_ic` value list compared to the the traditional proposition specification which would require multiple value entries for each grid starting with \"gx1v\", \"tx0.1v\", and \"ar9v3\"." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before showcasing the ParamGen module, we define an `expand_func`. Recall that ParamGen makes use of custom `expand_func` to infer the values of expandable variables. In the above xml file, we have three such variables: `cice_mode`, `ICE_GRID`, and `DIN_LOC_ROOT`." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def expand_func_demo(varname):\n", + " return {\n", + " 'ICE_GRID': 'gx1v6',\n", + " 'DIN_LOC_ROOT': '/glade/p/cesmdata/cseg/inputdata',\n", + " 'cice_mode': 'thermo_only',\n", + " }[varname]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "While the above `expand_func_demo` is a trivial one for demonstration purposes, the below `expand_func` is a real-world example taken from MOM6 within CESM:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def expand_func(varname):\n", + " return case.get_value(varname)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "where `case` is a CIME case object whose `get_value()` method returns the values of XML variables like `ICE_GRID`, `DIN_LOC_ROOT`, etc." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. ParamGen XML namelist format in action\n", + "\n", + "We first import the ParamGen class as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from paramgen import ParamGen" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now instantiate a ParamGen object by passing the `my_template.xml` file path to the `from_xml_nml()` method of ParamGen" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "pg = ParamGen.from_xml_nml(\"./my_template.xml\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After having instantiated the ParamGen object, we can call its `reduce` method to evaluate guards and infer final namelist variable values. Notice that we pass in the `expand_func_demo` method so ParamGen can infer the values of expandable variables such as `ICE_GRID`." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "pg.reduce(expand_func_demo)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we can write out a Fortran namelist file as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "pg.write_nml(\"./my_nml_file.nml\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The resulting namelist file is as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "&setup_nml\n", + " days_per_year = 365\n", + " ice_ic = /glade/p/cesmdata/cseg/inputdata/ice/cice/b.e15.B1850G.f09_g16.pi_control.25.cice.r.0041-01-01-00000.nc\n", + "/\n", + "\n", + "&icefields_nml\n", + " f_anglet = .true.\n", + "/\n", + "\n" + ] + } + ], + "source": [ + "!cat ./my_nml_file.nml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In addition to writing out the namelist file, we can access the data directly, both before and after the `reduce()` method is called. Some examples:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ".true.\n" + ] + } + ], + "source": [ + "# print out the final value of `f_anglet`:\n", + "print(pg.data['icefields_nml']['f_anglet']['values'])" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Days per year\n" + ] + } + ], + "source": [ + "# print out the description of `days_per_year`:\n", + "print(pg.data['setup_nml']['days_per_year']['desc'])" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "char\n" + ] + } + ], + "source": [ + "# print out the type of `ice_ic`:\n", + "print(pg.data['setup_nml']['ice_ic']['type'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "CMIP6 2019.10", + "language": "python", + "name": "cmip6-201910" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/CIME/ParamGen/paramgen.py b/CIME/ParamGen/paramgen.py new file mode 100644 index 00000000000..16c735577e1 --- /dev/null +++ b/CIME/ParamGen/paramgen.py @@ -0,0 +1,517 @@ +import os +import sys +import re +from copy import deepcopy +import logging +import subprocess +import shutil + +try: + from paramgen_utils import is_logical_expr, is_formula, has_unexpanded_var + from paramgen_utils import eval_formula +except ModuleNotFoundError: + from CIME.ParamGen.paramgen_utils import ( + is_logical_expr, + is_formula, + has_unexpanded_var, + ) + from CIME.ParamGen.paramgen_utils import eval_formula + +assert ( + sys.version_info.major == 3 and sys.version_info.minor >= 6 +), "ParamGen requires Python 3.6 or later." + +logger = logging.getLogger(__name__) + + +class ParamGen: + """ + ParamGen is a versatile, generic, lightweight base class to be used when developing namelist + and parameter generator tools for scientific modeling applications. + + Attributes + ---------- + data : dict + The data attribute to operate over. See Methods for the list of operations. + match: str + "first" or "last". + + Methods + ------- + from_json(input_path) + Reads in a given json input file and initializes a ParamGen object. + from_yaml(input_path) + Reads in a given yaml input file and initializes a ParamGen object. + """ + + def __init__(self, data_dict, match="last"): + assert isinstance( + data_dict, dict + ), "ParamGen class requires a dict as the initial data." + # self._validate_schema(data_dict) + self._original_data = deepcopy(data_dict) + self._data = deepcopy(data_dict) + self._reduced = False + self._match = match + + @property + def data(self): + """Returns the data attribute of the ParamGen instance.""" + return self._data + + @property + def reduced(self): + """Returns True if reduce() method is called for this instance of ParamGen.""" + return self._reduced + + @property + def is_empty(self): + """Returns True if the data property is empty.""" + return len(self._data) == 0 + + @classmethod + def from_json(cls, input_path, match="last"): + """ + Reads in a given json input file and initializes a ParamGen object. + + Parameters + ---------- + input_path: str + Path to json input file containing the defaults. + match: str + "first" or "last" + Returns + ------- + ParamGen + A ParamGen object with the data read from input_path. + """ + + import json + + with open(input_path) as json_file: + _data = json.load(json_file) + return cls(_data, match) + + @classmethod + def from_yaml(cls, input_path, match="last"): + """ + Reads in a given yaml input file and initializes a ParamGen object. + + Parameters + ---------- + input_path: str + Path to yaml input file containing the defaults. + match: str + "first" or "last" + Returns + ------- + ParamGen + A ParamGen object with the data read from input_path. + """ + + import yaml + + with open(input_path) as yaml_file: + _data = yaml.safe_load(yaml_file) + return cls(_data, match) + + @classmethod + def from_xml_nml(cls, input_path, match="last", no_duplicates=False): + """ + Reads in a given xml input file and initializes a ParamGen object. The XML file must conform to the + entry_id_pg.xsd schema that's defined within ParamGen. + + Parameters + ---------- + input_path: str + Path to xml input file containing the defaults. + match: str + "first" or "last" + no_duplicates: bool + If True, then any duplicate entry ids in the XML file will result in an error. + + Returns + ------- + ParamGen + A ParamGen object with the data read from input_path. + """ + + # First check whether the given xml file conforms to the entry_id_pg.xsd schema + xmllint = shutil.which("xmllint") + if xmllint is None: + logger.warning("Couldn't find xmllint. Skipping schema check") + else: + schema_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "xml_schema", + "entry_id_pg.xsd", + ) + xmllint_cmd = "{} --xinclude --noout --schema {} {}".format( + xmllint, schema_path, input_path + ) + stat = subprocess.run( + xmllint_cmd, shell=True, capture_output=True, text=True, check=True + ) + assert ( + stat.returncode == 0 + ), "While checking file {} against nml schema, received following errmsg: {}".format( + input_path, stat.stderr + ) + + import xml.etree.ElementTree as ET + + xml_tree = ET.parse(input_path) + root = xml_tree.getroot() + data = {} + + # Loop over all entries (namelist variables) + for entry in list(root): + # where each entry corresponds to a namelist variable, i.e., parameter + param_name = entry.attrib["id"] + if no_duplicates and param_name in data: + # No duplicate namelist entries allowed, so raise error: + emsg = "Entry id '{}' listed twice in file:\n'{}'" + raise ValueError(emsg.format(param_name, input_path)) + else: + data[param_name] = {} + + # loop over child entries and attributes of the parameter + for child in list(entry): + if child.tag == "values": + data[param_name]["values"] = {} + values = list(child) + + # check if the values have logical guards as propositions + guards = {} + for value in values: + if "guard" in value.attrib: + assert len(value.attrib) == 1, ( + "If an explicit guard attribute is provided for a value," + + "no other attribute may be provided. Check parameter {}".format( + param_name + ) + ) + guards[value] = value.attrib["guard"] + elif len(value.attrib) > 0: + guards[value] = " and ".join( + [ + '"${{{}}}" == "{}"'.format( + str(guard_var), str(guard_val) + ) + for guard_var, guard_val in value.attrib.items() + ] + ) + else: + assert "else" not in guards.values(), ( + "Multiple values with no guards (proposition)" + + "detected in variable {}".format(param_name) + ) + guards[value] = "else" + + if len(values) == 1 and guards[values[0]] == "else": + data[param_name]["values"] = list(values)[0].text.strip() + else: + for value, guard in guards.items(): + data[param_name]["values"][guard] = value.text.strip() + + else: + # a child element other than the element (.e.g, type, desc, group, etc.) + data[param_name][child.tag] = child.text.strip() + + # now group the parameters according to their group_id's: + _data = {} # grouped data + for param_name in data: + param_group = data[param_name]["group"] + if param_group not in _data: + _data[param_group] = {} + _data[param_group][param_name] = data[param_name] + + return cls(_data, match) + + @staticmethod + def _expand_vars(expr, expand_func): + """Replaces the expandable variables with their values in a given expression (expr) of type str. + + Parameters + ---------- + expr: str + expression with expandable variables, e.g., "$OCN_GRID == "tx0.66v1" + expand_func: + a callable objects that can return the value of any expandable variable specified in expr" + Returns + ------- + an expresion of type string where the expandable variables are replaced with their values. + + Example + ------- + >>> ParamGen._expand_vars("$x + 2 == $z", (lambda var: 1 if var=='x' else 3) ) + '1 + 2 == 3' + """ + + if expand_func is None: + return expr # No expansion function is provided, so return. + + assert isinstance( + expr, str + ), "Expression passed to _expand_vars must be string." + expandable_vars = re.findall(r"(\$\w+|\${\w+\})", expr) + for word in expandable_vars: + word_stripped = ( + word.strip().replace("$", "").replace("{", "").replace("}", "") + ) + word_expanded = expand_func(word_stripped) + assert ( + word_expanded is not None + ), "Cannot determine the value of the variable: {}.".format(word) + + # enclose with quotes if expanded var is a string and is expression sans curly braces + if isinstance(word_expanded, str) and word[1] != "{": + word_expanded = '"' + word_expanded + '"' + else: + word_expanded = str(word_expanded) + + expr = re.sub( + r"(\$\b" + word_stripped + r"\b|\$\{" + word_stripped + r"\})", + word_expanded, + expr, + ) + + return expr + + @staticmethod + def is_guarded_dict(data_dict): + """Returns true if all the keys of a dictionary are logical expressions, i.e., guards. + + Parameters + ---------- + data_dict: dict + A dictionary where the keys may be logical expressions (of type string) + + Example + ------- + >>> ParamGen.is_guarded_dict({True: 'x', 'False': 'y'}) + True + >>> ParamGen.is_guarded_dict({ "'tx0.66v1' == 'tx0.66v1'": 'x', False: 'y'}) + True + >>> ParamGen.is_guarded_dict({'i':'x', 'j':'y'}) + False + """ + if not isinstance(data_dict, dict): + return False + + keys_logical = [is_logical_expr(str(key)) for key in data_dict] + if all(keys_logical): + return True + if any(keys_logical): + raise RuntimeError( + "Only subset of the following are guarded entries, i.e., logical " + + "expressions as keys:\n\t" + + str(data_dict) + ) + return False + + def _impose_guards(self, data_dict): + + """Given a data_dict with guarded entries, evaluates the guards and returns the entry whose guard (key) + evaluates to True. If multiple guards evaluate to true, the first or the last entry with the True guard is + returned, depending on the "match" arg passed to ParamGen initializer. This method is intended to be called + from _reduce_recursive only. + + Parameters + ---------- + data_dict: dict + A dictionary whose keys are all logical expressions, i.e., guards. + + Example + ------- + >>> obj = ParamGen({1>2: 'a', 3>2: 'b'}) + >>> new_data = obj._impose_guards(obj.data) + >>> new_data + 'b' + """ + + def _eval_guard(guard): + """returns true if a guard evaluates to true.""" + assert isinstance( + guard, str + ), "Expression passed to _eval_guard must be string." + + if has_unexpanded_var(guard): + raise RuntimeError( + "The guard " + + guard + + " has an expandable variable ($var) " + + "that's not expanded yet. All variables must already be expanded before " + + "guards can be evaluated." + ) + + guard_evaluated = eval_formula(guard) + assert isinstance(guard_evaluated, bool), "Guard is not boolean: {}".format( + guard + ) + return guard_evaluated + + if not ParamGen.is_guarded_dict(data_dict): + return data_dict + + guards_eval_true = [] # list of guards that evaluate to true. + for guard in data_dict: + if guard == "else" or _eval_guard(str(guard)) is True: + guards_eval_true.append(guard) + + if len(guards_eval_true) > 1 and "else" in guards_eval_true: + guards_eval_true.remove("else") + elif len(guards_eval_true) == 0: + return None + + if self._match == "first": + return data_dict[guards_eval_true[0]] + if self._match == "last": + return data_dict[guards_eval_true[-1]] + raise RuntimeError("Unknown match option.") + + def _reduce_recursive(self, data, expand_func=None): + """A recursive method to reduce a given data_dict. This method is intended to be called by the reduce method + only. Check the docstring of the reduce method for more information.""" + + if isinstance(data, dict): + + # (1) Expand vars in keys + if expand_func is not None: + data = { + ParamGen._expand_vars(key, expand_func): data[key] for key in data + } + + # (2) Evaulate guards (if applicable) + if ParamGen.is_guarded_dict(data): + data = self._reduce_recursive(self._impose_guards(data), expand_func) + + # (3) Call _reduce_recursive for all branches of dict + else: + for key in data: + data[key] = self._reduce_recursive(data[key], expand_func) + + else: # data is not a dict, and so is a value. + + # (4) Finally, process values by expanding vars and applying formulas + if isinstance(data, str): + data = ParamGen._expand_vars(data, expand_func) + if is_formula(data): + data = eval_formula(data.strip()[1:]) + + return data + + def reduce(self, expand_func=None): + """ + Reduces the data of a ParamGen instances by recursively expanding its variables, + imposing conditional guards, and evaluating the formulas in values to determine + the final values of parameters. + + Parameters + ---------- + expand_func: (optional) + a callable objects that can return the value of any expandable variable specified." + + Example + ------- + >>> obj = ParamGen({1>2: 'a', 3>2: 'b'}) + >>> obj.reduce() + >>> obj.data + 'b' + """ + + assert ( + callable(expand_func) or expand_func is None + ), "expand_func argument must be a function" + assert not self.reduced, "ParamGen data already reduced." + assert not self.is_empty, "Empty ParamGen data." + + self._data = self._reduce_recursive(self._data, expand_func) + self._reduced = True + + def append(self, pg_obj): + """Adds the data of a given ParamGen instance to the self data. If a data entry already exists in self, + the value is overriden. Otherwise, the new data entry is simply added to self. + + Parameters + ---------- + pg_obj: ParamGen object + A ParamGen instance whose data is to be appended to the self data + + Example + ------- + >>> obj1 = ParamGen({'a':1, 'b':2}) + >>> obj2 = ParamGen({'b':3, 'c':4}) + >>> obj1.append(obj2) + >>> obj1.data + {'a': 1, 'b': 3, 'c': 4} + """ + + assert isinstance(pg_obj, ParamGen), "can only append ParamGen to Paramgen" + assert ( + self.reduced == pg_obj.reduced + ), "Cannot append reduced ParamGen instance to unreduced, and vice versa." + + def _append_recursive(old_dict, new_dict): + for key, val in new_dict.items(): + if key in old_dict: + old_val = old_dict[key] + if isinstance(val, dict) and isinstance(old_val, dict): + _append_recursive(old_dict[key], new_dict[key]) + else: + old_dict[key] = new_dict[key] + else: + old_dict[key] = new_dict[key] + + _append_recursive(self._data, pg_obj._data) + + def reset(self): + """Resets the ParamGen object to its initial state, i.e., undoes the reduce method. + + Example + ------- + >>> obj = ParamGen({True:1, False:0}) + >>> obj.reduce() + >>> obj.data + 1 + >>> obj.reset() + >>> obj.data + {True: 1, False: 0} + """ + self._data = deepcopy(self._original_data) + self._reduced = False + + def write_nml(self, output_path): + """Writes the reduced data in Fortran namelist format if the data conforms to the format. + + Parameters + ---------- + output_path: str object + Path to the namelist file to be created. + """ + + assert ( + self._reduced + ), "The data may be written only after the reduce method is called." + + # check *schema* after reduction + for grp, var in self.data.items(): + # grp is the namelist module name, while var is a dictionary corresponding to the vars in namelist + assert isinstance(grp, str), "Invalid data format" + assert isinstance(var, dict), "Invalid data format" + for vls in var.values(): + # vnm is the var name, and vls is its values element + assert isinstance(vls, dict), "Invalid data format" + for val in vls: + # val is a value in the values dict + assert isinstance(val, str), "Invalid data format" + + # write the namelist file + with open(output_path, "w") as nml: + for module in self._data: + nml.write("&{}\n".format(module)) + for var in self._data[module]: + val = str(self._data[module][var]["values"]).strip() + if val is not None: + nml.write(" {} = {}\n".format(var, val)) + nml.write("/\n\n") diff --git a/CIME/ParamGen/paramgen_utils.py b/CIME/ParamGen/paramgen_utils.py new file mode 100644 index 00000000000..19e44fd5bbc --- /dev/null +++ b/CIME/ParamGen/paramgen_utils.py @@ -0,0 +1,233 @@ +"""Auxiliary functions to be used in ParamGen and derived classes""" + +import re + + +def is_number(var): + """ + Returns True if the passed var (of type string) is a number. Returns + False if var is not a string or if it is not a number. + This function is an alternative to isnumeric(), which can't handle + scientific notation. + + Parameters + ---------- + var: str + variable to check whether number or not + Returns + ------- + True or False + + Example + ------- + >>> "1e-6".isnumeric() + False + >>> is_number("1e-6") and is_number(1) and is_number(3.14) + True + >>> is_number([1,2]) or is_number("hello") + False + """ + try: + float(var) + except ValueError: + return False + except TypeError: + return False + return True + + +def is_logical_expr(expr): + """ + Returns True if a string is a logical expression. + + Please note that fortran array syntax allows for + the use of parantheses and colons in namelist + variable names, which "eval" counts as a syntax error. + + Parameters + ---------- + expr: str + expression to check whether logical or not + Returns + ------- + True or False + + Example + ------- + >>> is_logical_expr("0 > 1000") + True + >>> is_logical_expr("3+4") + False + """ + + assert isinstance( + expr, str + ), "Expression passed to is_logical_expr function must be a string." + + # special case: + if expr.strip() == "else": + return True + + try: + return isinstance(eval(expr), bool) + except (NameError, SyntaxError): + return False + + +def is_formula(expr): + """ + Returns True if expr is a ParamGen formula to evaluate. This is determined by + checking whether expr is a string with a length of 1 or greater and if the + first character of expr is '='. + + Parameters + ---------- + expr: str + expression to check whether formula or not + Returns + ------- + True or False + + Example + ------- + >>> is_formula("3*5") + False + >>> is_formula("= 3*5") + True + """ + + return isinstance(expr, str) and len(expr) > 0 and expr.strip()[0] == "=" + + +def has_unexpanded_var(expr): + """ + Checks if a given expression has an expandable variable, e.g., $OCN_GRID, + that's not expanded yet. + + Parameters + ---------- + expr: str + expression to check + Returns + ------- + True or False + + Example + ------- + >>> has_unexpanded_var("${OCN_GRID} == tx0.66v1") + True + """ + + return isinstance(expr, str) and bool(re.search(r"(\$\w+|\${\w+\})", expr)) + + +def get_expandable_vars(expr): + """ + Returns the set of expandable vars from an expression. + + Parameters + ---------- + expr: str + expression to look for + Returns + ------- + a set of strings containing the expandable var names. + + Example + ------- + >>> get_expandable_vars("var1 $var2") + {'var2'} + >>> get_expandable_vars("var3 ${var4}") + {'var4'} + """ + expandable_vars = re.findall(r"(\$\w+|\${\w+\})", expr) + expandable_vars_stripped = set() + for var in expandable_vars: + var_stripped = var.strip().replace("$", "").replace("{", "").replace("}", "") + expandable_vars_stripped.add(var_stripped) + return expandable_vars_stripped + + +def _check_comparison_types(formula): + """ + A check to detect the comparison of different data types. This function + replaces equality comparisons with order comparisons to check whether + any variables of different types are compared. From Python 3.6 documentation: + A default order comparison (<, >, <=, and >=) is not provided; an attempt + raises TypeError. A motivation for this default behavior is the lack of a + similar invariant as for equality. + + Parameters + ---------- + formula: str + formula to check if it includes comparisons of different data types + Returns + ------- + True (or raises TypeError) + + Example + ------- + >>> _check_comparison_types("3.1 > 3") + True + >>> _check_comparison_types("'3.1' == 3.1") + Traceback (most recent call last): + ... + TypeError: The following formula may be comparing different types of variables: '3.1' == 3.1 + """ + guard_test = formula.replace("==", ">").replace("!=", ">").replace("<>", ">") + try: + eval(guard_test) + except TypeError as type_error: + raise TypeError( + "The following formula may be comparing different types of variables: {}".format( + formula + ) + ) from type_error + return True + + +def eval_formula(formula): + """ + This function evaluates a given formula and returns the result. It also + carries out several sanity checks before evaluation. + + Parameters + ---------- + formula: str + formula to evaluate + Returns + ------- + eval(formula) + + Example + ------- + >>> eval_formula("3*5") + 15 + >>> eval_formula("'tx0.66v1' != 'gx1v6'") + True + >>> eval_formula('$OCN_GRID != "gx1v6"') + Traceback (most recent call last): + ... + AssertionError + """ + + # make sure no expandable var exists in the formula. (They must already + # be expanded before this function is called.) + assert not has_unexpanded_var(formula) + + # Check whether any different data types are being compared + _check_comparison_types(formula) + + # now try to evaluate the formula: + try: + result = eval(formula) + except (TypeError, NameError, SyntaxError) as error: + raise RuntimeError("Cannot evaluate formula: " + formula) from error + + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/CIME/ParamGen/xml_schema/entry_id_pg.xsd b/CIME/ParamGen/xml_schema/entry_id_pg.xsd new file mode 100644 index 00000000000..b9ca87e94be --- /dev/null +++ b/CIME/ParamGen/xml_schema/entry_id_pg.xsd @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/CIME/Servers/__init__.py b/CIME/Servers/__init__.py new file mode 100644 index 00000000000..fb8307ba59d --- /dev/null +++ b/CIME/Servers/__init__.py @@ -0,0 +1,19 @@ +# pylint: disable=import-error +from shutil import which + +has_gftp = which("globus-url-copy") +has_svn = which("svn") +has_wget = which("wget") +has_ftp = True +try: + from ftplib import FTP +except ImportError: + has_ftp = False +if has_ftp: + from CIME.Servers.ftp import FTP +if has_svn: + from CIME.Servers.svn import SVN +if has_wget: + from CIME.Servers.wget import WGET +if has_gftp: + from CIME.Servers.gftp import GridFTP diff --git a/CIME/Servers/ftp.py b/CIME/Servers/ftp.py new file mode 100644 index 00000000000..209525a2bac --- /dev/null +++ b/CIME/Servers/ftp.py @@ -0,0 +1,114 @@ +""" +FTP Server class. Interact with a server using FTP protocol +""" +# pylint: disable=super-init-not-called +from CIME.XML.standard_module_setup import * +from CIME.Servers.generic_server import GenericServer +from CIME.utils import Timeout +from ftplib import FTP as FTPpy +from ftplib import all_errors as all_ftp_errors +import socket + +logger = logging.getLogger(__name__) +# I think that multiple inheritence would be useful here, but I couldnt make it work +# in a py2/3 compatible way. +class FTP(GenericServer): + def __init__(self, address, user="", passwd="", server=None): + if not user: + user = "" + if not passwd: + passwd = "" + expect(server, " Must call via ftp_login function") + root_address = address.split("/", 1)[1] + self.ftp = server + self._ftp_server = address + stat = self.ftp.login(user, passwd) + logger.debug("login stat {}".format(stat)) + if "Login successful" not in stat: + logging.warning( + "FAIL: Could not login to ftp server {}\n error {}".format( + address, stat + ) + ) + return None + try: + stat = self.ftp.cwd(root_address) + except all_ftp_errors as err: + logging.warning("ftplib returned error {}".format(err)) + return None + + logger.debug("cwd {} stat {}".format(root_address, stat)) + if "Directory successfully changed" not in stat: + logging.warning( + "FAIL: Could not cd to server root directory {}\n error {}".format( + root_address, stat + ) + ) + return None + + @classmethod + def ftp_login(cls, address, user="", passwd=""): + ftp_server, root_address = address.split("/", 1) + logger.info("server address {} root path {}".format(ftp_server, root_address)) + try: + with Timeout(60): + ftp = FTPpy(ftp_server) + + except socket.error as e: + logger.warning("ftp login timeout! {} ".format(e)) + return None + except RuntimeError: + logger.warning("ftp login timeout!") + return None + result = None + try: + result = cls(address, user=user, passwd=passwd, server=ftp) + except all_ftp_errors as e: + logger.warning("ftp error: {}".format(e)) + + return result + + def fileexists(self, rel_path): + try: + stat = self.ftp.nlst(rel_path) + except all_ftp_errors: + logger.warning("ERROR from ftp server, trying next server") + return False + + if rel_path not in stat: + if not stat or not stat[0].startswith(rel_path): + logging.warning( + "FAIL: File {} not found.\nerror {}".format(rel_path, stat) + ) + return False + return True + + def getfile(self, rel_path, full_path): + try: + stat = self.ftp.retrbinary( + "RETR {}".format(rel_path), open(full_path, "wb").write + ) + except all_ftp_errors: + if os.path.isfile(full_path): + os.remove(full_path) + logger.warning("ERROR from ftp server, trying next server") + return False + + if stat != "226 Transfer complete.": + logging.warning( + "FAIL: Failed to retreve file '{}' from FTP repo '{}' stat={}\n".format( + rel_path, self._ftp_server, stat + ) + ) + return False + return True + + def getdirectory(self, rel_path, full_path): + try: + stat = self.ftp.nlst(rel_path) + except all_ftp_errors: + logger.warning("ERROR from ftp server, trying next server") + return False + + for _file in stat: + self.getfile(_file, full_path + os.sep + os.path.basename(_file)) diff --git a/CIME/Servers/generic_server.py b/CIME/Servers/generic_server.py new file mode 100644 index 00000000000..537df181324 --- /dev/null +++ b/CIME/Servers/generic_server.py @@ -0,0 +1,27 @@ +""" +Generic Server class. There should be little or no functionality in this class, it serves only +to make sure that specific server classes maintain a consistant argument list and functionality +so that they are interchangable objects +""" +# pylint: disable=unused-argument + +from CIME.XML.standard_module_setup import * +from socket import _GLOBAL_DEFAULT_TIMEOUT + +logger = logging.getLogger(__name__) + + +class GenericServer(object): + def __init__( + self, host=" ", user=" ", passwd=" ", acct=" ", timeout=_GLOBAL_DEFAULT_TIMEOUT + ): + raise NotImplementedError + + def fileexists(self, rel_path): + """Returns True if rel_path exists on server""" + raise NotImplementedError + + def getfile(self, rel_path, full_path): + """Get file from rel_path on server and place in location full_path on client + fail if full_path already exists on client, return True if successful""" + raise NotImplementedError diff --git a/CIME/Servers/gftp.py b/CIME/Servers/gftp.py new file mode 100644 index 00000000000..f23943b583a --- /dev/null +++ b/CIME/Servers/gftp.py @@ -0,0 +1,59 @@ +""" +GridFTP Server class. Interact with a server using GridFTP protocol +""" +# pylint: disable=super-init-not-called +from CIME.XML.standard_module_setup import * +from CIME.Servers.generic_server import GenericServer +from CIME.utils import run_cmd + +logger = logging.getLogger(__name__) + + +class GridFTP(GenericServer): + def __init__(self, address, user="", passwd=""): + self._root_address = address + + def fileexists(self, rel_path): + stat, out, err = run_cmd( + "globus-url-copy -list {}".format( + os.path.join(self._root_address, os.path.dirname(rel_path)) + os.sep + ) + ) + if stat or os.path.basename(rel_path) not in out: + logging.warning( + "FAIL: File {} not found.\nstat={} error={}".format(rel_path, stat, err) + ) + return False + return True + + def getfile(self, rel_path, full_path): + stat, _, err = run_cmd( + "globus-url-copy -v {} file://{}".format( + os.path.join(self._root_address, rel_path), full_path + ) + ) + + if stat != 0: + logging.warning( + "FAIL: GridFTP repo '{}' does not have file '{}' error={}\n".format( + self._root_address, rel_path, err + ) + ) + return False + return True + + def getdirectory(self, rel_path, full_path): + stat, _, err = run_cmd( + "globus-url-copy -v -r {}{} file://{}{}".format( + os.path.join(self._root_address, rel_path), os.sep, full_path, os.sep + ) + ) + + if stat != 0: + logging.warning( + "FAIL: GridFTP repo '{}' does not have directory '{}' error={}\n".format( + self._root_address, rel_path, err + ) + ) + return False + return True diff --git a/CIME/Servers/svn.py b/CIME/Servers/svn.py new file mode 100644 index 00000000000..7e06ab310d4 --- /dev/null +++ b/CIME/Servers/svn.py @@ -0,0 +1,89 @@ +""" +SVN Server class. Interact with a server using SVN protocol +""" +# pylint: disable=super-init-not-called +from CIME.XML.standard_module_setup import * +from CIME.Servers.generic_server import GenericServer + +logger = logging.getLogger(__name__) + + +class SVN(GenericServer): + def __init__(self, address, user="", passwd=""): + self._args = "" + if user: + self._args += "--username {}".format(user) + if passwd: + self._args += "--password {}".format(passwd) + + self._svn_loc = address + + err = run_cmd( + "svn --non-interactive --trust-server-cert {} ls {}".format( + self._args, address + ) + )[0] + if err != 0: + logging.warning( + """ +Could not connect to svn repo '{0}' +This is most likely either a credential, proxy, or network issue . +To check connection and store your credential run 'svn ls {0}' and permanently store your password""".format( + address + ) + ) + return None + + def fileexists(self, rel_path): + full_url = os.path.join(self._svn_loc, rel_path) + stat, out, err = run_cmd( + "svn --non-interactive --trust-server-cert {} ls {}".format( + self._args, full_url + ) + ) + if stat != 0: + logging.warning( + "FAIL: SVN repo '{}' does not have file '{}'\nReason:{}\n{}\n".format( + self._svn_loc, full_url, out, err + ) + ) + return False + return True + + def getfile(self, rel_path, full_path): + if not rel_path: + return False + full_url = os.path.join(self._svn_loc, rel_path) + stat, output, errput = run_cmd( + "svn --non-interactive --trust-server-cert {} export {} {}".format( + self._args, full_url, full_path + ) + ) + if stat != 0: + logging.warning( + "svn export failed with output: {} and errput {}\n".format( + output, errput + ) + ) + return False + else: + logging.info("SUCCESS\n") + return True + + def getdirectory(self, rel_path, full_path): + full_url = os.path.join(self._svn_loc, rel_path) + stat, output, errput = run_cmd( + "svn --non-interactive --trust-server-cert {} export --force {} {}".format( + self._args, full_url, full_path + ) + ) + if stat != 0: + logging.warning( + "svn export failed with output: {} and errput {}\n".format( + output, errput + ) + ) + return False + else: + logging.info("SUCCESS\n") + return True diff --git a/CIME/Servers/wget.py b/CIME/Servers/wget.py new file mode 100644 index 00000000000..56517cffa7b --- /dev/null +++ b/CIME/Servers/wget.py @@ -0,0 +1,102 @@ +""" +WGET Server class. Interact with a server using WGET protocol +""" +# pylint: disable=super-init-not-called +from CIME.XML.standard_module_setup import * +from CIME.Servers.generic_server import GenericServer + +logger = logging.getLogger(__name__) + + +class WGET(GenericServer): + def __init__(self, address, user="", passwd=""): + self._args = "--no-check-certificate " + if user: + self._args += "--user {} ".format(user) + if passwd: + self._args += "--password {} ".format(passwd) + self._server_loc = address + + @classmethod + def wget_login(cls, address, user="", passwd=""): + args = "--no-check-certificate " + if user: + args += "--user {} ".format(user) + if passwd: + args += "--password {} ".format(passwd) + + try: + err = run_cmd("wget {} --spider {}".format(args, address), timeout=60)[0] + except: + logger.warning( + "Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .(location 1)".format( + address + ) + ) + return None + + if err and not "storage.neonscience.org" in address: + logger.warning( + "Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .(location 2)".format( + address + ) + ) + return None + + return cls(address, user=user, passwd=passwd) + + def fileexists(self, rel_path): + full_url = os.path.join(self._server_loc, rel_path) + stat, out, err = run_cmd("wget {} --spider {}".format(self._args, full_url)) + + if stat != 0: + logging.warning( + "FAIL: Repo '{}' does not have file '{}'\nReason:{}\n{}\n".format( + self._server_loc, full_url, out, err + ) + ) + return False + return True + + def getfile(self, rel_path, full_path): + full_url = os.path.join(self._server_loc, rel_path) + stat, output, errput = run_cmd( + "wget {} {} -nc --output-document {}".format( + self._args, full_url, full_path + ) + ) + if stat != 0: + logging.warning( + "wget failed with output: {} and errput {}\n".format(output, errput) + ) + # wget puts an empty file if it fails. + try: + os.remove(full_path) + except OSError: + pass + return False + else: + logging.info("SUCCESS\n") + return True + + def getdirectory(self, rel_path, full_path): + full_url = os.path.join(self._server_loc, rel_path) + stat, output, errput = run_cmd( + "wget {} {} -r -N --no-directories ".format(self._args, full_url + os.sep), + from_dir=full_path, + ) + logger.debug(output) + logger.debug(errput) + if stat != 0: + logging.warning( + "wget failed with output: {} and errput {}\n".format(output, errput) + ) + # wget puts an empty file if it fails. + try: + os.remove(full_path) + except OSError: + pass + return False + else: + logging.info("SUCCESS\n") + return True diff --git a/CIME/SystemTests/README b/CIME/SystemTests/README new file mode 100644 index 00000000000..61d0eec7f40 --- /dev/null +++ b/CIME/SystemTests/README @@ -0,0 +1,153 @@ +The following are the test functionality categories: + 1) smoke tests + 2) basic reproducibility tests + 3) restart tests + 4) threading/pe-count modification tests + 5) sequencing (layout) modification tests + 6) multi-instance tests + 7) performance tests + 8) spinup tests (TODO) + 9) other component-specific tests + +Some tests not yet implemented in python. They can be found in +cime/scripts/Testing/Testcases + + +NOTES: +- IOP is currently not functional + +====================================================================== + Smoke Tests +====================================================================== + +SMS smoke startup test (default length) + do a 5 day initial test (suffix: base) + if $IOP_ON is set then suffix is base_iop + success for non-iop is just a successful coupler + +====================================================================== + Basic reproducibility Tests +====================================================================== + +REP reproducibility: do two identical runs give the same results? + +====================================================================== + Restart Tests +====================================================================== + +ERS exact restart from startup (default 6 days + 5 days) + do an 11 day initial test - write a restart at day 6 (suffix: base) + if $IOP_ON is set then suffix is base_iop + do a 5 day restart test starting from restart at day 6 (suffix: rest) + if $IOP_ON is set then suffix is rest_iop + compare component history files ".base" and ".rest" at day 11 + +ERP pes counts hybrid (open-MP/MPI) restart bfb test from startup, default 6 days + 5 days (previousy PER) + initial pes set up out of the box + do an 11 day initial test - write a restart at day 6 (suffix base) + half the number of tasks and threads for each component + do a 5 day restart test starting from restart at day 6 (suffix rest) + this is just like an ERS test but the pe-counts/threading count are modified on restart + +ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) + (1) ref1case + do an initial for ${STOP_N}/6 writing restarts at ${STOP_N}/6 + ref1 case is a clone of the main case (by default this will be 4 days) + short term archiving is on + (2) ref2case + do a hybrid for ${STOP_N}-${STOP_N}/6 running with ref1 restarts from ${STOP_N}/6 + and writing restarts at ( ${STOP_N} - ${STOP_N}/6 )/2 +1 + (by default will run for 18 days and write a restart after 10 days) + ref2 case is a clone of the main case + short term archiving is on + (3) case + do a branch run starting from restart written in ref2 case + and run for ??? days + (4) case do a restart run from the branch case + +ERT Similar to ERS but longer. 2 months + 1 month + + +====================================================================== + Restart and Archive Tests +====================================================================== +ERR does an ERS test except that after the initial run the short term archive tool is run + which moves model output out of the run directory into the short-term archive directory + then the restart run is staged from the short term archive directory. In batch mode there are + four submitted jobs for this test (mira excepted) these are run1, sta1, run2 and sta2 + run1 and sta1 are submitted together with RESUBMIT=1. sta1 has a batch system dependancy + on successful completion of run1, when sta1 is completed it uses the cime resubmit capabilty + to submit run2. + + +====================================================================== + Threading/PE-Counts/Pe-Sequencing Tests +====================================================================== + +PET modified threading openmp bfb test (seq tests) + do an initial run where all components are threaded by default (suffix: base) + do another initial run with nthrds=1 for all components (suffix: single_thread) + compare base and single_thread + +PEM modified pe counts mpi bfb test (seq tests) + do an initial run with default pe layout (suffix: base) + do another initial run with modified pes (NTASKS_XXX => NTASKS_XXX/2) (suffix: modpes) + compare base and single_thread + +PEA single pe bfb test + do an initial run on 1 pe with mpi (suffix: base) + do the same run on 1 pe with mpiserial (suffix: mpiserial) + +====================================================================== + Sequencing (layout) Tests (smoke) +====================================================================== + +SEQ different sequencing bfb test + do an initial run test with out-of-box PE-layout (suffix: base) + do a second run where all root pes are at pe-0 (suffix: seq) + compare base and seq + +====================================================================== + Multi-Instance Tests (smoke) +====================================================================== + +NCK multi-instance validation vs single instance - sequential PE for instances (default length) + do an initial run test with NINST 1 (suffix: base) + do an initial run test with NINST 2 (suffix: multiinst for both _0001 and _0002) + compare base and _0001 and _0002 + +NCR multi-instance validation vs single instance - concurrent PE for instances (default length) + do an initial run test with NINST 1 (suffix: base) + do an initial run test with NINST 2 (suffix: multiinst for both _0001 and _0002) + compare base and _0001 and _0002 + (***note that NCR_script and NCK_script are the same - but NCR_build.csh and NCK_build.csh are different***) + +NOC multi-instance validation for single instance ocean (default length) + do an initial run test with NINST 2 (other than ocn), with mod to instance 1 (suffix: inst1_base, inst2_mod) + do an initial run test with NINST 2 (other than ocn), with mod to instance 2 (suffix: inst1_base, inst2_mod) + compare inst1_base with inst2_base + compare inst1_mod with inst2_mod + + +====================================================================== + Performance Tests +====================================================================== + +PFS system performance test. Do 20 day run, no restarts +ICP cice performance test + +====================================================================== + SPINUP tests +====================================================================== + +SSP smoke CLM spinup test (only valid for CLM compsets with CN or BGC) (TODO - change to SPL) + do an initial spin test (setting CLM_BLDNML_OTPS to -bgc_spinup_on) + write restarts at the end of the run + short term archiving is on + do a hybrid non-spinup run run from the restart files generated in the first phase + +====================================================================== + Other component-specific tests +====================================================================== + +LII CLM initial condition interpolation test diff --git a/scripts/lib/CIME/BuildTools/__init__.py b/CIME/SystemTests/__init__.py similarity index 100% rename from scripts/lib/CIME/BuildTools/__init__.py rename to CIME/SystemTests/__init__.py diff --git a/CIME/SystemTests/dae.py b/CIME/SystemTests/dae.py new file mode 100644 index 00000000000..175254d2d1b --- /dev/null +++ b/CIME/SystemTests/dae.py @@ -0,0 +1,214 @@ +""" +Implementation of the CIME data assimilation test: +Compares standard run with run broken into two data assimilation cycles. +Runs a simple DA script on each cycle which performs checks but does not +change any model state (restart files). Compares answers of two runs. + +""" + +import os.path +import logging +import glob +import gzip + +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.utils import expect + +############################################################################### +class DAE(SystemTestsCompareTwo): + ############################################################################### + """ + Implementation of the CIME data assimilation test: + Compares standard run with a run broken into two data assimilation cycles. + Runs a simple DA script on each cycle which performs checks but does not + change any model state (restart files). Compares answers of two runs. + Refers to a faux data assimilation script in the + cime/scripts/data_assimilation directory + """ + + ########################################################################### + def __init__(self, case, **kwargs): + ########################################################################### + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=False, + run_two_suffix="da", + run_one_description="no data assimilation", + run_two_description="data assimilation", + **kwargs, + ) + + ########################################################################### + def _case_one_setup(self): + ########################################################################### + # Even though there may be test mods turning on data assimilation, + # case1 is the control so turn it off + self._case.set_value("DATA_ASSIMILATION_SCRIPT", "") + self._case.set_value("DATA_ASSIMILATION_CYCLES", 1) + + ########################################################################### + def _case_two_setup(self): + ########################################################################### + # Allow testmods to set an assimilation script + if len(self._case.get_value("DATA_ASSIMILATION_SCRIPT")) == 0: + # We need to find the scripts/data_assimilation directory + # LIB_DIR should be our parent dir + da_dir = os.path.join( + self._case.get_value("CIMEROOT"), "scripts/data_assimilation" + ) + expect( + os.path.isdir(da_dir), + "ERROR: da_dir, '{}', does not exist".format(da_dir), + ) + da_file = os.path.join(da_dir, "da_no_data_mod.sh") + expect( + os.path.isfile(da_file), + "ERROR: da_file, '{}', does not exist".format(da_file), + ) + # Set up two data assimilation cycles each half of the full run + self._case.set_value("DATA_ASSIMILATION_SCRIPT", da_file) + + # We need at least 2 DA cycles + da_cycles = self._case.get_value("DATA_ASSIMILATION_CYCLES") + if da_cycles < 2: + da_cycles = 2 + self._case.set_value("DATA_ASSIMILATION_CYCLES", da_cycles) + stopn = self._case.get_value("STOP_N") + expect( + (stopn % da_cycles) == 0, + "ERROR: DAE test with {0} cycles requires that STOP_N be divisible by {0}".format( + da_cycles + ), + ) + stopn = int(stopn / da_cycles) + self._case.set_value("STOP_N", stopn) + + self._case.flush() + + ########################################################################### + def run_phase(self): # pylint: disable=arguments-differ + ########################################################################### + # Clean up any da.log files in case this is a re-run. + self._activate_case2() + case_root = self._get_caseroot2() + rundir2 = self._case.get_value("RUNDIR") + da_files = glob.glob(os.path.join(rundir2, "da.log.*")) + for file_ in da_files: + os.remove(file_) + # End for + + # CONTINUE_RUN ends up TRUE, set it back in case this is a re-run. + with self._case: + self._case.set_value("CONTINUE_RUN", False) + # Turn off post DA in case this is a re-run + for comp in self._case.get_values("COMP_CLASSES"): + if comp == "ESP": + continue + else: + self._case.set_value("DATA_ASSIMILATION_{}".format(comp), False) + + # Start normal run here + self._activate_case1() + SystemTestsCompareTwo.run_phase(self) + + # Do some checks on the data assimilation 'output' from case2 + self._activate_case2() + da_files = glob.glob(os.path.join(rundir2, "da.log.*")) + if da_files is None: + logger = logging.getLogger(__name__) + path = os.path.join(case_root, "da.log.*") + logger.warning("No DA files in {}".format(path)) + + da_cycles = self._case.get_value("DATA_ASSIMILATION_CYCLES") + expect( + (da_files is not None) and (len(da_files) == da_cycles), + "ERROR: There were {:d} DA cycles in run but {:d} DA files were found".format( + da_cycles, len(da_files) if da_files is not None else 0 + ), + ) + da_files.sort() + cycle_num = 0 + compset = self._case.get_value("COMPSET") + # Special case for DWAV so we can make sure other variables are set + is_dwav = "_DWAV" in compset + for fname in da_files: + found_caseroot = False + found_cycle = False + found_signal = 0 + found_init = 0 + if is_dwav: + expected_init = self._case.get_value("NINST_WAV") + else: + # Expect a signal from every instance of every DA component + expected_init = 0 + for comp in self._case.get_values("COMP_CLASSES"): + if comp == "ESP": + continue + elif self._case.get_value("DATA_ASSIMILATION_{}".format(comp)): + expected_init = expected_init + self._case.get_value( + "NINST_{}".format(comp) + ) + + # Adjust expected initial run and post-DA numbers + if cycle_num == 0: + expected_signal = 0 + else: + expected_signal = expected_init + expected_init = 0 + + with gzip.open(fname, "r") as dfile: + for bline in dfile: + line = bline.decode("utf-8") + expect( + not "ERROR" in line, + "ERROR, error line {} found in {}".format(line, fname), + ) + if "caseroot" in line[0:8]: + found_caseroot = True + elif "cycle" in line[0:5]: + found_cycle = True + expect( + int(line[7:]) == cycle_num, + "ERROR: Wrong cycle ({:d}) found in {} (expected {:d})".format( + int(line[7:]), fname, cycle_num + ), + ) + elif "resume signal" in line: + found_signal = found_signal + 1 + expect( + "Post-DA resume signal found" in line[0:27], + "ERROR: bad post-DA message found in {}".format(fname), + ) + elif "Initial run" in line: + found_init = found_init + 1 + expect( + "Initial run signal found" in line[0:24], + "ERROR: bad Initial run message found in {}".format(fname), + ) + else: + expect( + False, + "ERROR: Unrecognized line ('{}') found in {}".format( + line, fname + ), + ) + + # End for + expect(found_caseroot, "ERROR: No caseroot found in {}".format(fname)) + expect(found_cycle, "ERROR: No cycle found in {}".format(fname)) + expect( + found_signal == expected_signal, + "ERROR: Expected {} post-DA resume signal message(s), {} found in {}".format( + expected_signal, found_signal, fname + ), + ) + expect( + found_init == expected_init, + "ERROR: Expected {} Initial run message(s), {} found in {}".format( + expected_init, found_init, fname + ), + ) + # End with + cycle_num = cycle_num + 1 + # End for diff --git a/CIME/SystemTests/eri.py b/CIME/SystemTests/eri.py new file mode 100644 index 00000000000..6d80a8fd808 --- /dev/null +++ b/CIME/SystemTests/eri.py @@ -0,0 +1,302 @@ +""" +CIME ERI test This class inherits from SystemTestsCommon +""" + +from CIME.XML.standard_module_setup import * +from CIME.utils import safe_copy +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from stat import S_ISDIR, ST_CTIME, ST_MODE +import shutil, glob, os + +logger = logging.getLogger(__name__) + + +def _get_rest_date(archive_root): + restdir = os.path.join(archive_root, "rest") + # get all entries in the directory w/ stats + entries = (os.path.join(restdir, fn) for fn in os.listdir(restdir)) + entries = ((os.stat(path), path) for path in entries) + entries = sorted( + (stat[ST_CTIME], path) for stat, path in entries if S_ISDIR(stat[ST_MODE]) + ) + last_dir = os.path.basename(entries[-1][1]) + ref_sec = last_dir[-5:] + ref_date = last_dir[:10] + return ref_date, ref_sec + + +def _helper(dout_sr, refdate, refsec, rundir): + rest_path = os.path.join(dout_sr, "rest", "{}-{}".format(refdate, refsec)) + if not os.path.exists(rundir): + os.makedirs(rundir) + for item in glob.glob("{}/*{}*".format(rest_path, refdate)): + dst = os.path.join(rundir, os.path.basename(item)) + if not os.path.exists(rundir): + os.mkdir(rundir) + elif os.path.exists(dst): + os.remove(dst) + if not "rpointer" in item: + os.symlink(item, dst) + + for item in glob.glob("{}/*rpointer*".format(rest_path)): + safe_copy(item, rundir) + + +class ERI(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the ERI system test + """ + SystemTestsCommon.__init__(self, case, **kwargs) + self._testname = "ERI" + + def run_phase(self): + caseroot = self._case.get_value("CASEROOT") + clone1_path = "{}.ref1".format(caseroot) + clone2_path = "{}.ref2".format(caseroot) + # self._case.set_value("CHECK_TIMING", False) + + # + # clone the main case to create ref1 and ref2 cases + # + for clone_path in [clone1_path, clone2_path]: + if os.path.exists(clone_path): + shutil.rmtree(clone_path) + + clone1, clone2 = [ + self._case.create_clone(clone_path, keepexe=True) + for clone_path in [clone1_path, clone2_path] + ] + orig_case = self._case + orig_casevar = orig_case.get_value("CASE") + # + # determine run lengths needed below + # + stop_n = self._case.get_value("STOP_N") + stop_option = self._case.get_value("STOP_OPTION") + run_startdate = self._case.get_value("RUN_STARTDATE") + start_tod = self._case.get_value("START_TOD") + if start_tod == 0: + start_tod = "00000" + + stop_n1 = int(stop_n / 6) + rest_n1 = stop_n1 + start_1 = run_startdate + + stop_n2 = stop_n - stop_n1 + + hist_n = stop_n2 + + start_1_year, start_1_month, start_1_day = [ + int(item) for item in start_1.split("-") + ] + start_2_year = start_1_year + 2 + start_2 = "{:04d}-{:02d}-{:02d}".format( + start_2_year, start_1_month, start_1_day + ) + rest_n2 = self._set_restart_interval( + stop_n=stop_n2, + stop_option=stop_option, + startdate=start_2, + starttime=start_tod, + ) + + stop_n3 = stop_n2 - rest_n2 + + ninst = self._case.get_value("NINST") + drvrest = "rpointer.cpl" + if ninst is not None and ninst > 1: + drvrest += "_0001" + drvrest += self._rest_time + self._set_drv_restart_pointer(drvrest) + + rest_n3 = self._set_restart_interval( + stop_n=stop_n3, + stop_option=stop_option, + startdate=start_2, + starttime=start_tod, + ) + stop_n4 = stop_n3 - rest_n3 + + expect(stop_n4 >= 1 and stop_n1 >= 1, "Run length too short") + + # + # (1) Test run: + # do an initial ref1 case run + # cloned the case and running there + # (NOTE: short term archiving is on) + # + + os.chdir(clone1_path) + self._set_active_case(clone1) + + logger.info( + "ref1 startup: doing a {} {} startup run from {} and {} seconds".format( + stop_n1, stop_option, start_1, start_tod + ) + ) + logger.info(" writing restarts at {} {}".format(rest_n1, stop_option)) + logger.info(" short term archiving is on ") + + with clone1: + clone1.set_value("CONTINUE_RUN", False) + clone1.set_value("RUN_STARTDATE", start_1) + clone1.set_value("STOP_N", stop_n1) + clone1.set_value("REST_OPTION", stop_option) + clone1.set_value("REST_N", rest_n1) + clone1.set_value("HIST_OPTION", "never") + + dout_sr1 = clone1.get_value("DOUT_S_ROOT") + + # force cam/eam namelist to write out initial file at end of run + for model in ["cam", "eam"]: + user_nl = "user_nl_{}".format(model) + if os.path.exists(user_nl): + if "inithist" not in open(user_nl, "r").read(): + with open(user_nl, "a") as fd: + fd.write("inithist = 'ENDOFRUN'\n") + + with clone1: + clone1.case_setup(test_mode=True, reset=True) + # if the initial case is hybrid this will put the reference data in the correct location + clone1.check_all_input_data() + + self._skip_pnl = False + self.run_indv(st_archive=True, suffix=None) + + # + # (2) Test run: + # do a hybrid ref2 case run + # cloned the main case and running with ref1 restarts + # (NOTE: short term archiving is on) + # + + os.chdir(clone2_path) + self._set_active_case(clone2) + + # Set startdate to start2, set ref date based on ref1 restart + refdate_2, refsec_2 = _get_rest_date(dout_sr1) + + logger.info( + "ref2 hybrid: doing a {} {} startup hybrid run".format(stop_n2, stop_option) + ) + logger.info( + " starting from {} and using ref1 {} and {} seconds".format( + start_2, refdate_2, refsec_2 + ) + ) + logger.info(" writing restarts at {} {}".format(rest_n2, stop_option)) + logger.info(" short term archiving is on ") + + # setup ref2 case + with clone2: + clone2.set_value("RUN_TYPE", "hybrid") + clone2.set_value("RUN_STARTDATE", start_2) + clone2.set_value("RUN_REFCASE", "{}.ref1".format(orig_casevar)) + clone2.set_value("RUN_REFDATE", refdate_2) + clone2.set_value("RUN_REFTOD", refsec_2) + clone2.set_value("GET_REFCASE", False) + clone2.set_value("CONTINUE_RUN", False) + clone2.set_value("STOP_N", stop_n2) + clone2.set_value("REST_OPTION", stop_option) + clone2.set_value("REST_N", rest_n2) + clone2.set_value("HIST_OPTION", stop_option) + clone2.set_value("HIST_N", hist_n) + + rundir2 = clone2.get_value("RUNDIR") + dout_sr2 = clone2.get_value("DOUT_S_ROOT") + + _helper(dout_sr1, refdate_2, refsec_2, rundir2) + + # run ref2 case (all component history files will go to short term archiving) + with clone2: + clone2.case_setup(test_mode=True, reset=True) + + self._skip_pnl = False + self.run_indv(suffix="hybrid", st_archive=True) + + # + # (3a) Test run: + # do a branch run from ref2 restart (short term archiving is off) + # + + os.chdir(caseroot) + self._set_active_case(orig_case) + refdate_3, refsec_3 = _get_rest_date(dout_sr2) + + logger.info("branch: doing a {} {} branch".format(stop_n3, stop_option)) + logger.info( + " starting from ref2 {} and {} seconds restarts".format( + refdate_3, refsec_3 + ) + ) + logger.info(" writing restarts at {} {}".format(rest_n3, stop_option)) + logger.info(" short term archiving is off") + + self._case.set_value("RUN_TYPE", "branch") + self._case.set_value( + "RUN_REFCASE", "{}.ref2".format(self._case.get_value("CASE")) + ) + self._case.set_value("RUN_REFDATE", refdate_3) + self._case.set_value("RUN_REFTOD", refsec_3) + self._case.set_value("GET_REFCASE", False) + self._case.set_value("CONTINUE_RUN", False) + self._case.set_value("STOP_N", stop_n3) + self._set_restart_interval( + stop_n=stop_n3, startdate=refdate_3, starttime=refsec_3 + ) + + self._case.set_value("HIST_OPTION", stop_option) + self._case.set_value("HIST_N", stop_n2) + self._case.set_value("DOUT_S", False) + self._case.flush() + + rundir = self._case.get_value("RUNDIR") + if not os.path.exists(rundir): + os.makedirs(rundir) + + _helper(dout_sr2, refdate_3, refsec_3, rundir) + + # link the hybrid history files from ref2 to the run dir for comparison + for item in glob.iglob("%s/*.hybrid" % rundir2): + newfile = "{}".format(item.replace(".ref2", "")) + newfile = os.path.basename(newfile) + dst = os.path.join(rundir, newfile) + if os.path.exists(dst): + os.remove(dst) + os.symlink(item, dst) + + self._skip_pnl = False + # run branch case (short term archiving is off) + self.run_indv() + + # + # (3b) Test run: + # do a restart continue from (3a) (short term archiving off) + # + + logger.info( + "branch restart: doing a {} {} continue restart test".format( + stop_n4, stop_option + ) + ) + + self._case.set_value("CONTINUE_RUN", True) + self._case.set_value("STOP_N", stop_n4) + self._case.set_value("REST_OPTION", "never") + self._case.set_value("DOUT_S", False) + self._case.set_value("HIST_OPTION", stop_option) + self._case.set_value("HIST_N", hist_n) + drvrest = "rpointer.cpl" + if ninst is not None and ninst > 1: + drvrest += "_0001" + drvrest += self._rest_time + + self._set_drv_restart_pointer(drvrest) + self._case.flush() + + # do the restart run (short term archiving is off) + self.run_indv(suffix="rest") + + self._component_compare_test("base", "hybrid") + self._component_compare_test("base", "rest") diff --git a/CIME/SystemTests/erio.py b/CIME/SystemTests/erio.py new file mode 100644 index 00000000000..a1e7b041cc6 --- /dev/null +++ b/CIME/SystemTests/erio.py @@ -0,0 +1,82 @@ +""" +ERIO tests restart with different PIO methods + +This class inherits from SystemTestsCommon +""" +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_common import SystemTestsCommon + +logger = logging.getLogger(__name__) + + +class ERIO(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to file env_test.xml in the case directory + """ + SystemTestsCommon.__init__(self, case, expected=["TEST"], **kwargs) + + self._pio_types = self._case.get_env("run").get_valid_values("PIO_TYPENAME") + self._stop_n = self._case.get_value("STOP_N") + + def _full_run(self, pio_type): + stop_option = self._case.get_value("STOP_OPTION") + expect(self._stop_n > 0, "Bad STOP_N: {:d}".format(self._stop_n)) + + # Move to config_tests.xml once that's ready + rest_n = int(self._stop_n / 2) + 1 + self._case.set_value("REST_N", rest_n) + self._case.set_value("REST_OPTION", stop_option) + self._case.set_value("HIST_N", self._stop_n) + self._case.set_value("HIST_OPTION", stop_option) + self._case.set_value("CONTINUE_RUN", False) + self._case.flush() + + expect( + self._stop_n > 2, "ERROR: stop_n value {:d} too short".format(self._stop_n) + ) + logger.info( + "doing an {0} {1} initial test with restart file at {2} {1} with pio type {3}".format( + str(self._stop_n), stop_option, str(rest_n), pio_type + ) + ) + self.run_indv(suffix=pio_type) + + def _restart_run(self, pio_type, other_pio_type): + stop_option = self._case.get_value("STOP_OPTION") + + rest_n = int(self._stop_n / 2) + 1 + stop_new = self._stop_n - rest_n + expect( + stop_new > 0, + "ERROR: stop_n value {:d} too short {:d} {:d}".format( + stop_new, self._stop_n, rest_n + ), + ) + + self._case.set_value("STOP_N", stop_new) + self._case.set_value("CONTINUE_RUN", True) + self._case.set_value("REST_OPTION", "never") + self._case.flush() + logger.info( + "doing an {} {} restart test with {} against {}".format( + str(stop_new), stop_option, pio_type, other_pio_type + ) + ) + + suffix = "{}.{}".format(other_pio_type, pio_type) + self.run_indv(suffix=suffix) + + # Compare restart file + self._component_compare_test(other_pio_type, suffix) + + def run_phase(self): + + for idx, pio_type1 in enumerate(self._pio_types): + if pio_type1 != "default" and pio_type1 != "nothing": + self._case.set_value("PIO_TYPENAME", pio_type1) + self._full_run(pio_type1) + for pio_type2 in self._pio_types[idx + 1 :]: + if pio_type2 != "default" and pio_type2 != "nothing": + self._case.set_value("PIO_TYPENAME", pio_type2) + self._restart_run(pio_type2, pio_type1) diff --git a/CIME/SystemTests/erp.py b/CIME/SystemTests/erp.py new file mode 100644 index 00000000000..6d58248c138 --- /dev/null +++ b/CIME/SystemTests/erp.py @@ -0,0 +1,47 @@ +""" +CIME ERP test. This class inherits from RestartTest + +This is a pes counts hybrid (open-MP/MPI) restart bfb test from +startup. This is just like an ERS test but the pe-counts/threading +count are modified on restart. +(1) Do an initial run with pes set up out of the box (suffix base) +(2) Do a restart test with half the number of tasks and threads (suffix rest) +""" + +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.restart_tests import RestartTest + +logger = logging.getLogger(__name__) + + +class ERP(RestartTest): + def __init__(self, case, **kwargs): + """ + initialize a test object + """ + RestartTest.__init__( + self, + case, + separate_builds=True, + run_two_suffix="rest", + run_one_description="initial", + run_two_description="restart", + **kwargs + ) + + def _case_two_setup(self): + # halve the number of tasks and threads + for comp in self._case.get_values("COMP_CLASSES"): + ntasks = self._case1.get_value("NTASKS_{}".format(comp)) + nthreads = self._case1.get_value("NTHRDS_{}".format(comp)) + rootpe = self._case1.get_value("ROOTPE_{}".format(comp)) + if nthreads > 1: + self._case.set_value("NTHRDS_{}".format(comp), int(nthreads / 2)) + if ntasks > 1: + self._case.set_value("NTASKS_{}".format(comp), int(ntasks / 2)) + self._case.set_value("ROOTPE_{}".format(comp), int(rootpe / 2)) + + RestartTest._case_two_setup(self) + + def _case_one_custom_postrun_action(self): + self.copy_case1_restarts_to_case2() diff --git a/CIME/SystemTests/err.py b/CIME/SystemTests/err.py new file mode 100644 index 00000000000..6d44a2169fa --- /dev/null +++ b/CIME/SystemTests/err.py @@ -0,0 +1,66 @@ +""" +CIME ERR test This class inherits from ERS +ERR tests short term archiving and restart capabilities +""" +import glob, os +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.restart_tests import RestartTest +from CIME.utils import safe_copy, ls_sorted_by_mtime + +logger = logging.getLogger(__name__) + + +class ERR(RestartTest): + def __init__(self, case, **kwargs): # pylint: disable=super-init-not-called + """ + initialize an object interface to the ERR system test + """ + super(ERR, self).__init__( + case, + separate_builds=False, + run_two_suffix="rest", + run_one_description="initial", + run_two_description="restart", + multisubmit=True, + **kwargs + ) + + def _case_one_setup(self): + super(ERR, self)._case_one_setup() + self._case.set_value("DOUT_S", True) + + def _case_two_setup(self): + super(ERR, self)._case_two_setup() + self._case.set_value("DOUT_S", False) + + def _case_two_custom_prerun_action(self): + dout_s_root = self._case1.get_value("DOUT_S_ROOT") + self._drv_restart_pointer = self._case2.get_value("DRV_RESTART_POINTER") + if self._drv_restart_pointer is None: + rest_root = os.path.abspath(os.path.join(dout_s_root, "rest")) + restart_list = ls_sorted_by_mtime(rest_root) + expect( + len(restart_list) >= 1, "No restart files found in {}".format(rest_root) + ) + self._case.restore_from_archive( + rest_dir=os.path.join(rest_root, restart_list[0]) + ) + else: + resttime = self._drv_restart_pointer[-16:] + rest_root = os.path.abspath(os.path.join(dout_s_root, "rest", resttime)) + expect(os.path.isdir(rest_root), "None such directory {}".format(rest_root)) + self._case.restore_from_archive(rest_dir=rest_root) + + def _case_two_custom_postrun_action(self): + # Link back to original case1 name + # This is needed so that the necessary files are present for + # baseline comparison and generation, + # since some of them may have been moved to the archive directory + for case_file in glob.iglob( + os.path.join( + self._case1.get_value("RUNDIR"), "*.nc.{}".format(self._run_one_suffix) + ) + ): + orig_file = case_file[: -(1 + len(self._run_one_suffix))] + if not os.path.isfile(orig_file): + safe_copy(case_file, orig_file) diff --git a/scripts/lib/CIME/SystemTests/erri.py b/CIME/SystemTests/erri.py similarity index 76% rename from scripts/lib/CIME/SystemTests/erri.py rename to CIME/SystemTests/erri.py index 6fd79b3e1fa..7851bd4bb66 100644 --- a/scripts/lib/CIME/SystemTests/erri.py +++ b/CIME/SystemTests/erri.py @@ -10,20 +10,20 @@ logger = logging.getLogger(__name__) -class ERRI(ERR): - def __init__(self, case): +class ERRI(ERR): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERU system test """ - ERR.__init__(self, case) + ERR.__init__(self, case, **kwargs) def _case_two_custom_postrun_action(self): rundir = self._case.get_value("RUNDIR") - for logname_gz in glob.glob(os.path.join(rundir, '*.log*.gz')): + for logname_gz in glob.glob(os.path.join(rundir, "*.log*.gz")): # gzipped logfile names are of the form $LOGNAME.gz # Removing the last three characters restores the original name logname = logname_gz[:-3] - with gzip.open(logname_gz, 'rb') as f_in, open(logname, 'w') as f_out: + with gzip.open(logname_gz, "rb") as f_in, open(logname, "w") as f_out: shutil.copyfileobj(f_in, f_out) os.remove(logname_gz) diff --git a/CIME/SystemTests/ers.py b/CIME/SystemTests/ers.py new file mode 100644 index 00000000000..0e93d8afa1c --- /dev/null +++ b/CIME/SystemTests/ers.py @@ -0,0 +1,63 @@ +""" +CIME restart test This class inherits from SystemTestsCommon +""" +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_common import SystemTestsCommon +import glob + +logger = logging.getLogger(__name__) + + +class ERS(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the ERS system test + """ + SystemTestsCommon.__init__(self, case, **kwargs) + + def _ers_first_phase(self): + self._rest_n = self._set_restart_interval() + # set_restart_interval can change case settings that buildnmls may depend on + # so ensure buildnmls will not be skipped during case_run + self._skip_pnl = False + self.run_indv() + + def _ers_second_phase(self): + stop_n = self._case.get_value("STOP_N") + stop_option = self._case.get_value("STOP_OPTION") + + stop_new = stop_n - self._rest_n + expect( + stop_new > 0, + "ERROR: stop_n value {:d} too short {:d} {:d}".format( + stop_new, stop_n, self._rest_n + ), + ) + rundir = self._case.get_value("RUNDIR") + for pfile in glob.iglob(os.path.join(rundir, "PET*")): + os.rename( + pfile, + os.path.join(os.path.dirname(pfile), "run1." + os.path.basename(pfile)), + ) + ninst = self._case.get_value("NINST") + drvrest = "rpointer.cpl" + if ninst is not None and ninst > 1: + drvrest += "_0001" + drvrest += self._rest_time + + self._set_drv_restart_pointer(drvrest) + self._case.set_value("HIST_N", stop_n) + self._case.set_value("STOP_N", stop_new) + self._case.set_value("CONTINUE_RUN", True) + self._case.set_value("REST_OPTION", "never") + self._case.flush() + logger.info("doing an {} {} restart test".format(str(stop_new), stop_option)) + self._skip_pnl = False + self.run_indv(suffix="rest") + + # Compare restart file + self._component_compare_test("base", "rest") + + def run_phase(self): + self._ers_first_phase() + self._ers_second_phase() diff --git a/CIME/SystemTests/ers2.py b/CIME/SystemTests/ers2.py new file mode 100644 index 00000000000..63a10399b49 --- /dev/null +++ b/CIME/SystemTests/ers2.py @@ -0,0 +1,65 @@ +""" +CIME restart test 2 This class inherits from SystemTestsCommon +""" +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_common import SystemTestsCommon + +logger = logging.getLogger(__name__) + + +class ERS2(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the ERS2 system test + """ + SystemTestsCommon.__init__(self, case, **kwargs) + + def _ers2_first_phase(self): + stop_n = self._case.get_value("STOP_N") + stop_option = self._case.get_value("STOP_OPTION") + rest_n = self._case.get_value("REST_N") + + # Don't need restarts for first run + self._case.set_value("REST_OPTION", "never") + + expect(stop_n > 0, "Bad STOP_N: {:d}".format(stop_n)) + expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n)) + + logger.info( + "doing an {0} {1} initial test with restart file at {2} {1}".format( + str(stop_n), stop_option, str(rest_n) + ) + ) + self.run_indv() + + def _ers2_second_phase(self): + stop_n = self._case.get_value("STOP_N") + stop_option = self._case.get_value("STOP_OPTION") + + rest_n = int(stop_n / 2 + 1) + stop_new = rest_n + + self._case.set_value("REST_OPTION", stop_option) + self._case.set_value("STOP_N", stop_new) + self._case.flush() + logger.info( + "doing first part {} {} restart test".format(str(stop_new), stop_option) + ) + self.run_indv(suffix="intermediate") + + stop_new = int(stop_n - rest_n) + self._case.set_value("STOP_N", stop_new) + self._case.set_value("CONTINUE_RUN", True) + self._case.set_value("REST_OPTION", "never") + + logger.info( + "doing second part {} {} restart test".format(str(stop_new), stop_option) + ) + self.run_indv(suffix="rest") + + # Compare restart file + self._component_compare_test("base", "rest") + + def run_phase(self): + self._ers2_first_phase() + self._ers2_second_phase() diff --git a/scripts/lib/CIME/SystemTests/ert.py b/CIME/SystemTests/ert.py similarity index 90% rename from scripts/lib/CIME/SystemTests/ert.py rename to CIME/SystemTests/ert.py index 18664c58cc7..b912f7248b7 100644 --- a/scripts/lib/CIME/SystemTests/ert.py +++ b/CIME/SystemTests/ert.py @@ -8,13 +8,13 @@ logger = logging.getLogger(__name__) -class ERT(SystemTestsCommon): - def __init__(self, case): +class ERT(SystemTestsCommon): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERT system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) def _ert_first_phase(self): @@ -36,7 +36,7 @@ def _ert_second_phase(self): self._case.set_value("STOP_N", 1) self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("REST_OPTION","never") + self._case.set_value("REST_OPTION", "never") self._case.flush() logger.info("doing an 1 month restart test with no restart files") diff --git a/CIME/SystemTests/funit.py b/CIME/SystemTests/funit.py new file mode 100644 index 00000000000..a7c21a06944 --- /dev/null +++ b/CIME/SystemTests/funit.py @@ -0,0 +1,75 @@ +""" +CIME FUNIT test. This class inherits from SystemTestsCommon. It runs +the fortran unit tests; grid and compset are ignored. +""" +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.build import post_build +from CIME.status import append_testlog +from CIME.utils import get_cime_root +from CIME.test_status import * + +logger = logging.getLogger(__name__) + + +class FUNIT(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the FUNIT system test + """ + SystemTestsCommon.__init__(self, case, **kwargs) + case.load_env() + + def build_phase(self, sharedlib_only=False, model_only=False): + if not sharedlib_only: + exeroot = self._case.get_value("EXEROOT") + logfile = os.path.join(exeroot, "funit.bldlog") + with open(logfile, "w") as fd: + fd.write("No-op\n") + + post_build(self._case, [logfile], build_complete=True) + + def get_test_spec_dir(self): + """ + Override this to change what gets tested. + """ + return get_cime_root() + + def run_phase(self): + + rundir = self._case.get_value("RUNDIR") + exeroot = self._case.get_value("EXEROOT") + mach = self._case.get_value("MACH") + + log = os.path.join(rundir, "funit.log") + if os.path.exists(log): + os.remove(log) + + test_spec_dir = self.get_test_spec_dir() + unit_test_tool = os.path.abspath( + os.path.join( + get_cime_root(), "scripts", "fortran_unit_testing", "run_tests.py" + ) + ) + args = "--build-dir {} --test-spec-dir {} --machine {}".format( + exeroot, test_spec_dir, mach + ) + + stat = run_cmd( + "{} {} >& funit.log".format(unit_test_tool, args), from_dir=rundir + )[0] + + append_testlog(open(os.path.join(rundir, "funit.log"), "r").read()) + + expect(stat == 0, "RUN FAIL for FUNIT") + + # Funit is a bit of an oddball test since it's not really running the E3SM model + # We need to override some methods to make the core infrastructure work. + + def _generate_baseline(self): + with self._test_status: + self._test_status.set_status(GENERATE_PHASE, TEST_PASS_STATUS) + + def _compare_baseline(self): + with self._test_status: + self._test_status.set_status(BASELINE_PHASE, TEST_PASS_STATUS) diff --git a/CIME/SystemTests/homme.py b/CIME/SystemTests/homme.py new file mode 100644 index 00000000000..597be0b9a09 --- /dev/null +++ b/CIME/SystemTests/homme.py @@ -0,0 +1,7 @@ +from CIME.SystemTests.hommebaseclass import HommeBase + + +class HOMME(HommeBase): + def __init__(self, case, **kwargs): + HommeBase.__init__(self, case, **kwargs) + self.cmakesuffix = "" diff --git a/CIME/SystemTests/hommebaseclass.py b/CIME/SystemTests/hommebaseclass.py new file mode 100644 index 00000000000..8ea4a03c6b9 --- /dev/null +++ b/CIME/SystemTests/hommebaseclass.py @@ -0,0 +1,144 @@ +""" +CIME HOMME test. This class inherits from SystemTestsCommon +""" +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.build import post_build +from CIME.status import append_testlog +from CIME.utils import SharedArea +from CIME.test_status import * + +import shutil + +logger = logging.getLogger(__name__) + + +class HommeBase(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the SMS system test + """ + SystemTestsCommon.__init__(self, case, **kwargs) + case.load_env() + self.csnd = "not defined" + self.cmakesuffix = self.csnd + + def build_phase(self, sharedlib_only=False, model_only=False): + if not sharedlib_only: + # Build HOMME + srcroot = self._case.get_value("SRCROOT") + mach = self._case.get_value("MACH") + procs = self._case.get_value("TOTALPES") + exeroot = self._case.get_value("EXEROOT") + baseline = self._case.get_value("BASELINE_ROOT") + basecmp = self._case.get_value("BASECMP_CASE") + compare = self._case.get_value("COMPARE_BASELINE") + gmake = self._case.get_value("GMAKE") + gmake_j = self._case.get_value("GMAKE_J") + cprnc = self._case.get_value("CCSM_CPRNC") + + if compare: + basename = basecmp + baselinedir = baseline + else: + basename = "" + baselinedir = exeroot + + expect( + self.cmakesuffix != self.csnd, + "ERROR in hommebaseclass: Must have cmakesuffix set up", + ) + + cmake_cmd = "cmake -C {0}/components/homme/cmake/machineFiles/{1}{6}.cmake -DUSE_NUM_PROCS={2} {0}/components/homme -DHOMME_BASELINE_DIR={3}/{4} -DCPRNC_DIR={5}/..".format( + srcroot, mach, procs, baselinedir, basename, cprnc, self.cmakesuffix + ) + + run_cmd_no_fail( + cmake_cmd, + arg_stdout="homme.bldlog", + combine_output=True, + from_dir=exeroot, + ) + run_cmd_no_fail( + "{} -j{} VERBOSE=1 test-execs".format(gmake, gmake_j), + arg_stdout="homme.bldlog", + combine_output=True, + from_dir=exeroot, + ) + + post_build( + self._case, [os.path.join(exeroot, "homme.bldlog")], build_complete=True + ) + + def run_phase(self): + + rundir = self._case.get_value("RUNDIR") + exeroot = self._case.get_value("EXEROOT") + baseline = self._case.get_value("BASELINE_ROOT") + compare = self._case.get_value("COMPARE_BASELINE") + generate = self._case.get_value("GENERATE_BASELINE") + basegen = self._case.get_value("BASEGEN_CASE") + gmake = self._case.get_value("GMAKE") + + log = os.path.join(rundir, "homme.log") + if os.path.exists(log): + os.remove(log) + + if generate: + full_baseline_dir = os.path.join(baseline, basegen, "tests", "baseline") + stat = run_cmd( + "{} -j 4 baseline".format(gmake), + arg_stdout=log, + combine_output=True, + from_dir=exeroot, + )[0] + if stat == 0: + if os.path.isdir(full_baseline_dir): + shutil.rmtree(full_baseline_dir) + + with SharedArea(): + shutil.copytree( + os.path.join(exeroot, "tests", "baseline"), + full_baseline_dir, + ) + + elif compare: + stat = run_cmd( + "{} -j 4 check".format(gmake), + arg_stdout=log, + combine_output=True, + from_dir=exeroot, + )[0] + + else: + stat = run_cmd( + "{} -j 4 baseline".format(gmake), + arg_stdout=log, + combine_output=True, + from_dir=exeroot, + )[0] + if stat == 0: + stat = run_cmd( + "{} -j 4 check".format(gmake), + arg_stdout=log, + combine_output=True, + from_dir=exeroot, + )[0] + + # Add homme.log output to TestStatus.log so that it can + # appear on the dashboard. Otherwise, the TestStatus.log + # is pretty useless for this test. + append_testlog(open(log, "r").read()) + + expect(stat == 0, "RUN FAIL for HOMME") + + # Homme is a bit of an oddball test since it's not really running the E3SM model + # We need to override some methods to make the core infrastructure work. + + def _generate_baseline(self): + with self._test_status: + self._test_status.set_status(GENERATE_PHASE, TEST_PASS_STATUS) + + def _compare_baseline(self): + with self._test_status: + self._test_status.set_status(BASELINE_PHASE, TEST_PASS_STATUS) diff --git a/CIME/SystemTests/hommebfb.py b/CIME/SystemTests/hommebfb.py new file mode 100644 index 00000000000..87e566bf918 --- /dev/null +++ b/CIME/SystemTests/hommebfb.py @@ -0,0 +1,7 @@ +from CIME.SystemTests.hommebaseclass import HommeBase + + +class HOMMEBFB(HommeBase): + def __init__(self, case, **kwargs): + HommeBase.__init__(self, case, **kwargs) + self.cmakesuffix = "-bfb" diff --git a/CIME/SystemTests/icp.py b/CIME/SystemTests/icp.py new file mode 100644 index 00000000000..8d8c5e0ea59 --- /dev/null +++ b/CIME/SystemTests/icp.py @@ -0,0 +1,25 @@ +""" +CIME ICP test This class inherits from SystemTestsCommon +""" +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_common import SystemTestsCommon + + +class ICP(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to file env_test.xml in the case directory + """ + SystemTestsCommon.__init__(self, case, **kwargs) + + def build_phase(self, sharedlib_only=False, model_only=False): + self._case.set_value("CICE_AUTO_DECOMP", "false") + + def run_phase(self): + self._case.set_value("CONTINUE_RUN", False) + self._case.set_value("REST_OPTION", "none") + self._case.set_value("HIST_OPTION", "$STOP_OPTION") + self._case.set_value("HIST_N", "$STOP_N") + self._case.flush() + + self.run_indv(self) diff --git a/CIME/SystemTests/irt.py b/CIME/SystemTests/irt.py new file mode 100644 index 00000000000..1f3637eb5a0 --- /dev/null +++ b/CIME/SystemTests/irt.py @@ -0,0 +1,45 @@ +""" +Implementation of the CIME IRT. (Interim Restart Test) +This test the model's restart capability as well as the short term archiver's interim restart capability + +(1) Do a Run of length N with restart at N/2 and DOUT_S_SAVE_INTERIM_RESTART set to TRUE +(2) Archive Run using ST archive tools +(3) Recover first interim restart to the case2 run directory +(4) Start case2 from restart and run to the end of case1 +(5) compare results. +(6) this test does not save or compare history files in baselines. + +""" + +from CIME.SystemTests.restart_tests import RestartTest +from CIME.XML.standard_module_setup import * +from CIME.utils import ls_sorted_by_mtime + +logger = logging.getLogger(__name__) + + +class IRT(RestartTest): + def __init__(self, case, **kwargs): + RestartTest.__init__( + self, + case, + separate_builds=False, + run_two_suffix="restart", + run_one_description="initial", + run_two_description="restart", + multisubmit=False, + **kwargs + ) + self._skip_pnl = False + + def _case_one_custom_postrun_action(self): + self._case.case_st_archive() + # Since preview namelist is run before _case_two_prerun_action, we need to do this here. + dout_s_root = self._case1.get_value("DOUT_S_ROOT") + restart_list = ls_sorted_by_mtime(os.path.join(dout_s_root, "rest")) + logger.info("Restart directory list is {}".format(restart_list)) + expect(len(restart_list) >= 2, "Expected at least two restart directories") + # Get the older of the two restart directories + self._case2.restore_from_archive( + rest_dir=os.path.abspath(os.path.join(dout_s_root, "rest", restart_list[0])) + ) diff --git a/CIME/SystemTests/ldsta.py b/CIME/SystemTests/ldsta.py new file mode 100644 index 00000000000..a5f7c9196d5 --- /dev/null +++ b/CIME/SystemTests/ldsta.py @@ -0,0 +1,79 @@ +""" +CIME last date short term archiver test. This class inherits from SystemTestsCommon +It does a run without restarting, then runs the archiver with various last-date parameters +The test verifies the archive directory contains the expected files +""" + +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.utils import expect +from CIME.date import get_file_date + +import datetime +import glob +import os +import random +import shutil + +logger = logging.getLogger(__name__) + +# datetime objects can't be used anywhere else +def _date_to_datetime(date_obj): + return datetime.datetime( + year=date_obj.year(), + month=date_obj.month(), + day=date_obj.day(), + hour=date_obj.hour(), + minute=date_obj.minute(), + second=date_obj.second(), + ) + + +class LDSTA(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the SMS system test + """ + SystemTestsCommon.__init__(self, case, **kwargs) + + def run_phase(self): + archive_dir = self._case.get_value("DOUT_S_ROOT") + if os.path.isdir(archive_dir): + shutil.rmtree(archive_dir) + self.run_indv() + # finished running, so all archive files should exist + start_date = _date_to_datetime( + get_file_date(self._case.get_value("RUN_STARTDATE")) + ) + rest_dir = os.path.join(archive_dir, "rest") + delta_day = datetime.timedelta(1) + current_date = start_date + delta_day + next_datecheck = current_date + days_left = self._case.get_value("STOP_N") + final_date = start_date + delta_day * days_left + while current_date < final_date: + logger.info("Testing archiving with last date: {}".format(current_date)) + current_date_str = "{:04}-{:02}-{:02}".format( + current_date.year, current_date.month, current_date.day + ) + self._case.case_st_archive(last_date_str=current_date_str, copy_only=False) + archive_dates = [ + _date_to_datetime(get_file_date(fname)) + for fname in glob.glob(os.path.join(rest_dir, "*")) + ] + while next_datecheck <= current_date: + expect( + next_datecheck in archive_dates, + "Not all dates generated and/or archived: " + + "{} is missing".format(next_datecheck), + ) + next_datecheck += delta_day + for date in archive_dates: + expect( + date <= current_date, + "Archived date greater than specified by last-date: " + + "{}".format(date), + ) + num_days = random.randint(1, min(3, days_left)) + days_left -= num_days + current_date += num_days * delta_day diff --git a/CIME/SystemTests/mcc.py b/CIME/SystemTests/mcc.py new file mode 100644 index 00000000000..a4b839cf1e9 --- /dev/null +++ b/CIME/SystemTests/mcc.py @@ -0,0 +1,34 @@ +""" +Implemetation of CIME MCC test: Compares ensemble methods + +This does two runs: In the first we run a three member ensemble using the + MULTI_DRIVER capability, then we run a second single instance case and compare +""" +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo + +logger = logging.getLogger(__name__) + + +class MCC(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): + self._comp_classes = [] + self._test_instances = 3 + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="single_instance", + run_two_description="single instance", + run_one_description="multi driver", + **kwargs + ) + + def _case_one_setup(self): + # The multicoupler case will increase the number of tasks by the + # number of requested couplers. + self._case.set_value("MULTI_DRIVER", True) + self._case.set_value("NINST", self._test_instances) + + def _case_two_setup(self): + self._case.set_value("NINST", 1) diff --git a/CIME/SystemTests/mvk.py b/CIME/SystemTests/mvk.py new file mode 100644 index 00000000000..60f863a2670 --- /dev/null +++ b/CIME/SystemTests/mvk.py @@ -0,0 +1,336 @@ +""" +Multivariate test for climate reproducibility using the Kolmogrov-Smirnov (K-S) +test and based on The CESM/E3SM model's multi-instance capability is used to +conduct an ensemble of simulations starting from different initial conditions. + +This class inherits from SystemTestsCommon. +""" + +import os +import json +import logging +from shutil import copytree + +from CIME import test_status +from CIME import utils +from CIME.status import append_testlog +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.case.case_setup import case_setup +from CIME.XML.machines import Machines +from CIME.config import ConfigBase +from CIME.SystemTests import test_mods +from CIME.namelist import Namelist + +import evv4esm # pylint: disable=import-error +from evv4esm.__main__ import main as evv # pylint: disable=import-error + +version = evv4esm.__version_info__ + +assert version >= (0, 5, 0), "Please install evv4esm greater or equal to 0.5.0" + +EVV_LIB_DIR = os.path.abspath(os.path.dirname(evv4esm.__file__)) + +logger = logging.getLogger(__name__) + + +class MVKConfig(ConfigBase): + def __init__(self): + super().__init__() + + if self.loaded: + return + + self._set_attribute("component", "", "The main component.") + self._set_attribute( + "components", [], "Components that require namelist customization." + ) + self._set_attribute("ninst", 30, "The number of instances.") + self._set_attribute( + "var_set", "default", "Name of the variable set to analyze." + ) + self._set_attribute("ref_case", "Baseline", "Name of the reference case.") + self._set_attribute("test_case", "Test", "Name of the test case.") + + def generate_namelist( + self, case, component, i, filename + ): # pylint: disable=unused-argument + """Generate per instance namelist. + + This method is called for each instance to generate the desired + modifications. + + Args: + case (CIME.case.case.Case): The case instance. + component (str): Component the namelist belongs to. + i (int): Instance unique number. + filename (str): Name of the namelist that needs to be created. + """ + namelist = Namelist() + + with namelist(filename) as nml: + nml.set_variable_value("", "new_random", True) + nml.set_variable_value("", "pertlim", "1.0e-10") + nml.set_variable_value("", "seed_custom", f"{i}") + nml.set_variable_value("", "seed_clock", True) + + def evv_test_config(self, case, config): # pylint: disable=unused-argument + """Customize the evv4esm configuration. + + This method is used to customize the default evv4esm configuration + or generate a completely new one. + + The return configuration will be written to `$RUNDIR/$CASE.json`. + + Args: + case (CIME.case.case.Case): The case instance. + config (dict): Default evv4esm configuration. + + Returns: + dict: Dictionary with test configuration. + """ + return config + + def _default_evv_test_config(self, run_dir, base_dir, evv_lib_dir): + config = { + "module": os.path.join(evv_lib_dir, "extensions", "ks.py"), + "test-case": self.test_case, + "test-dir": run_dir, + "ref-case": self.ref_case, + "ref-dir": base_dir, + "var-set": self.var_set, + "ninst": self.ninst, + "component": self.component, + } + + return config + + +class MVK(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the MVK test + """ + self._config = None + + SystemTestsCommon.__init__(self, case, **kwargs) + + *_, case_test_mods = utils.parse_test_name(self._casebaseid) + + test_mods_paths = test_mods.find_test_mods( + case.get_value("COMP_INTERFACE"), case_test_mods + ) + + for test_mods_path in test_mods_paths: + self._config = MVKConfig.load(os.path.join(test_mods_path, "params.py")) + + if self._config is None: + self._config = MVKConfig() + + # Use old behavior for component + if self._config.component == "": + # TODO remove model specific + if self._case.get_value("MODEL") == "e3sm": + self._config.component = "eam" + else: + self._config.component = "cam" + + if len(self._config.components) == 0: + self._config.components = [self._config.component] + elif ( + self._config.component != "" + and self._config.component not in self._config.components + ): + self._config.components.extend([self._config.component]) + + if ( + self._case.get_value("RESUBMIT") == 0 + and self._case.get_value("GENERATE_BASELINE") is False + ): + self._case.set_value("COMPARE_BASELINE", True) + else: + self._case.set_value("COMPARE_BASELINE", False) + + def build_phase(self, sharedlib_only=False, model_only=False): + # Only want this to happen once. It will impact the sharedlib build + # so it has to happen there. + if not model_only: + logging.warning("Starting to build multi-instance exe") + + for comp in self._case.get_values("COMP_CLASSES"): + self._case.set_value("NTHRDS_{}".format(comp), 1) + + ntasks = self._case.get_value("NTASKS_{}".format(comp)) + + self._case.set_value( + "NTASKS_{}".format(comp), ntasks * self._config.ninst + ) + + if comp != "CPL": + self._case.set_value("NINST_{}".format(comp), self._config.ninst) + + self._case.flush() + + case_setup(self._case, test_mode=False, reset=True) + + for i in range(1, self._config.ninst + 1): + for component in self._config.components: + filename = "user_nl_{}_{:04d}".format(component, i) + + self._config.generate_namelist(self._case, component, i, filename) + + self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) + + def _generate_baseline(self): + """ + generate a new baseline case based on the current test + """ + super(MVK, self)._generate_baseline() + + with utils.SharedArea(): + basegen_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASEGEN_CASE"), + ) + + rundir = self._case.get_value("RUNDIR") + ref_case = self._case.get_value("RUN_REFCASE") + + env_archive = self._case.get_env("archive") + hists = env_archive.get_all_hist_files( + self._case.get_value("CASE"), + self._config.component, + rundir, + ref_case=ref_case, + ) + logger.debug("MVK additional baseline files: {}".format(hists)) + hists = [os.path.join(rundir, hist) for hist in hists] + for hist in hists: + basename = hist[hist.rfind(self._config.component) :] + baseline = os.path.join(basegen_dir, basename) + if os.path.exists(baseline): + os.remove(baseline) + + utils.safe_copy(hist, baseline, preserve_meta=False) + + def _compare_baseline(self): + with self._test_status: + if int(self._case.get_value("RESUBMIT")) > 0: + # This is here because the comparison is run for each submission + # and we only want to compare once the whole run is finished. We + # need to return a pass here to continue the submission process. + self._test_status.set_status( + test_status.BASELINE_PHASE, test_status.TEST_PASS_STATUS + ) + return + + self._test_status.set_status( + test_status.BASELINE_PHASE, test_status.TEST_FAIL_STATUS + ) + + run_dir = self._case.get_value("RUNDIR") + case_name = self._case.get_value("CASE") + base_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASECMP_CASE"), + ) + + test_name = "{}".format(case_name.split(".")[-1]) + + default_config = self._config._default_evv_test_config( + run_dir, + base_dir, + EVV_LIB_DIR, + ) + + test_config = self._config.evv_test_config( + self._case, + default_config, + ) + + evv_config = {test_name: test_config} + + json_file = os.path.join(run_dir, f"{case_name}.json") + with open(json_file, "w") as config_file: + json.dump(evv_config, config_file, indent=4) + + evv_out_dir = os.path.join(run_dir, f"{case_name}.evv") + evv(["-e", json_file, "-o", evv_out_dir]) + + self.update_testlog(test_name, case_name, evv_out_dir) + + def update_testlog(self, test_name, case_name, evv_out_dir): + comments = self.process_evv_output(evv_out_dir) + + status = self._test_status.get_status(test_status.BASELINE_PHASE) + + mach_name = self._case.get_value("MACH") + + mach_obj = Machines(machine=mach_name) + + htmlroot = utils.get_htmlroot(mach_obj) + + if htmlroot is not None: + urlroot = utils.get_urlroot(mach_obj) + + with utils.SharedArea(): + copytree( + evv_out_dir, + os.path.join(htmlroot, "evv", case_name), + ) + + if urlroot is None: + urlroot = "[{}_URL]".format(mach_name.capitalize()) + + viewing = "{}/evv/{}/index.html".format(urlroot, case_name) + else: + viewing = ( + "{}\n" + " EVV viewing instructions can be found at: " + " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" + "climate_reproducibility/README.md#test-passfail-and-extended-output" + "".format(evv_out_dir) + ) + + comments = ( + "{} {} for test '{}'.\n" + " {}\n" + " EVV results can be viewed at:\n" + " {}".format( + test_status.BASELINE_PHASE, + status, + test_name, + comments, + viewing, + ) + ) + + append_testlog(comments, self._orig_caseroot) + + def process_evv_output(self, evv_out_dir): + with open(os.path.join(evv_out_dir, "index.json")) as evv_f: + evv_status = json.load(evv_f) + + comments = "" + + for evv_ele in evv_status["Page"]["elements"]: + if "Table" in evv_ele: + comments = "; ".join( + "{}: {}".format(key, val[0]) + for key, val in evv_ele["Table"]["data"].items() + ) + + if evv_ele["Table"]["data"]["Test status"][0].lower() == "pass": + with self._test_status: + self._test_status.set_status( + test_status.BASELINE_PHASE, + test_status.TEST_PASS_STATUS, + ) + + break + + return comments + + +if __name__ == "__main__": + _config = MVKConfig() + _config.print_rst_table() diff --git a/scripts/lib/CIME/SystemTests/nck.py b/CIME/SystemTests/nck.py similarity index 77% rename from scripts/lib/CIME/SystemTests/nck.py rename to CIME/SystemTests/nck.py index 4f6358b8311..f75a2914215 100644 --- a/scripts/lib/CIME/SystemTests/nck.py +++ b/CIME/SystemTests/nck.py @@ -13,15 +13,19 @@ logger = logging.getLogger(__name__) -class NCK(SystemTestsCompareTwo): - def __init__(self, case): +class NCK(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): self._comp_classes = [] - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = 'multiinst', - run_one_description = 'one instance', - run_two_description = 'two instances') + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="multiinst", + run_one_description="one instance", + run_two_description="two instances", + **kwargs, + ) def _common_setup(self): # We start by halving the number of tasks for both cases. This ensures @@ -34,28 +38,26 @@ def _common_setup(self): self._comp_classes.remove("CPL") for comp in self._comp_classes: ntasks = self._case.get_value("NTASKS_{}".format(comp)) - if ( ntasks > 1 ): - self._case.set_value("NTASKS_{}".format(comp), int(ntasks/2)) + if ntasks > 1: + self._case.set_value("NTASKS_{}".format(comp), int(ntasks / 2)) # the following assures that both cases use the same number of total tasks rootpe = self._case.get_value("ROOTPE_{}".format(comp)) - if ( rootpe > 1 ): - self._case.set_value("ROOTPE_{}".format(comp), int(rootpe+ntasks/2)) + if rootpe > 1: + self._case.set_value("ROOTPE_{}".format(comp), int(rootpe + ntasks / 2)) def _case_one_setup(self): for comp in self._comp_classes: self._case.set_value("NINST_{}".format(comp), 1) - def _case_two_setup(self): for comp in self._comp_classes: - if (comp == "ESP"): + if comp == "ESP": self._case.set_value("NINST_{}".format(comp), 1) else: self._case.set_value("NINST_{}".format(comp), 2) ntasks = self._case.get_value("NTASKS_{}".format(comp)) rootpe = self._case.get_value("ROOTPE_{}".format(comp)) - if ( rootpe > 1 ): - self._case.set_value("ROOTPE_{}".format(comp), int(rootpe-ntasks)) - self._case.set_value("NTASKS_{}".format(comp), ntasks*2) - self._case.case_setup(test_mode=True, reset=True) + if rootpe > 1: + self._case.set_value("ROOTPE_{}".format(comp), int(rootpe - ntasks)) + self._case.set_value("NTASKS_{}".format(comp), ntasks * 2) diff --git a/scripts/lib/CIME/SystemTests/ncr.py b/CIME/SystemTests/ncr.py similarity index 85% rename from scripts/lib/CIME/SystemTests/ncr.py rename to CIME/SystemTests/ncr.py index 5dc34e02a22..f0de168ac13 100644 --- a/scripts/lib/CIME/SystemTests/ncr.py +++ b/CIME/SystemTests/ncr.py @@ -13,17 +13,21 @@ logger = logging.getLogger(__name__) -class NCR(SystemTestsCompareTwo): - def __init__(self, case): +class NCR(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): """ initialize an NCR test """ - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = "singleinst", - run_one_description = "two instances, each with the same number of tasks", - run_two_description = "default build") + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="singleinst", + run_one_description="two instances, each with the same number of tasks", + run_two_description="default build", + **kwargs + ) def _comp_classes(self): # Return the components which we need to set things for diff --git a/scripts/lib/CIME/SystemTests/nodefail.py b/CIME/SystemTests/nodefail.py similarity index 88% rename from scripts/lib/CIME/SystemTests/nodefail.py rename to CIME/SystemTests/nodefail.py index 5706ffd2e39..d975cfc5bfd 100644 --- a/scripts/lib/CIME/SystemTests/nodefail.py +++ b/CIME/SystemTests/nodefail.py @@ -7,16 +7,16 @@ logger = logging.getLogger(__name__) -class NODEFAIL(ERS): - def __init__(self, case): +class NODEFAIL(ERS): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERS system test """ - ERS.__init__(self, case) + ERS.__init__(self, case, **kwargs) self._fail_sentinel = os.path.join(case.get_value("RUNDIR"), "FAIL_SENTINEL") - self._fail_str = case.get_value("NODE_FAIL_REGEX") + self._fail_str = case.get_value("NODE_FAIL_REGEX") def _restart_fake_phase(self): # Swap out model.exe for one that emits node failures @@ -27,8 +27,7 @@ def _restart_fake_phase(self): logname = "med" else: logname = "cpl" - fake_exe = \ -"""#!/bin/bash + fake_exe = """#!/bin/bash fail_sentinel={0} cpl_log={1}/{4}.log.$LID @@ -48,7 +47,9 @@ def _restart_fake_phase(self): echo Insta pass echo SUCCESSFUL TERMINATION > $cpl_log fi -""".format(self._fail_sentinel, rundir, get_model(), self._fail_str, logname) +""".format( + self._fail_sentinel, rundir, get_model(), self._fail_str, logname + ) fake_exe_file = os.path.join(exeroot, "fake.sh") with open(fake_exe_file, "w") as fd: diff --git a/CIME/SystemTests/pea.py b/CIME/SystemTests/pea.py new file mode 100644 index 00000000000..4fb3a4569ca --- /dev/null +++ b/CIME/SystemTests/pea.py @@ -0,0 +1,50 @@ +""" +Implementation of the CIME PEA test. + +Builds runs and compares a single processor mpi model to a model built using mpi-serial +(1) do a run with default mpi library (suffix base) +(2) do a run with mpi-serial (suffix mpi-serial) +""" + +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.XML.standard_module_setup import * +from CIME.XML.machines import Machines + +logger = logging.getLogger(__name__) + + +class PEA(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="mpi-serial", + run_one_description="default mpi library", + run_two_description="mpi-serial", + **kwargs, + ) + + def _common_setup(self): + for comp in self._case.get_values("COMP_CLASSES"): + self._case.set_value("NTASKS_{}".format(comp), 1) + self._case.set_value("NTHRDS_{}".format(comp), 1) + self._case.set_value("ROOTPE_{}".format(comp), 0) + + def _case_one_setup(self): + pass + + def _case_two_setup(self): + mach_name = self._case.get_value("MACH") + mach_obj = Machines(machine=mach_name) + if mach_obj.is_valid_MPIlib("mpi-serial"): + self._case.set_value("MPILIB", "mpi-serial") + else: + logger.warning( + "mpi-serial is not supported on machine '{}', " + "so we have to fall back to default MPI and " + "therefore very little is being tested".format(mach_name) + ) + + if os.path.isfile("Macros"): + os.remove("Macros") diff --git a/CIME/SystemTests/pem.py b/CIME/SystemTests/pem.py new file mode 100644 index 00000000000..d98f9e4a2c7 --- /dev/null +++ b/CIME/SystemTests/pem.py @@ -0,0 +1,44 @@ +""" +Implementation of the CIME PEM test: Tests bfb with different MPI +processor counts + +This is just like running a smoke test twice - but the pe-counts +are modified the second time. +(1) Run with pes set up out of the box (suffix base) +(2) Run with half the number of tasks (suffix modpes) +""" + +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo + +logger = logging.getLogger(__name__) + + +class PEM(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): + build_separately = False + # cice, pop require separate builds + comps = case.get_compset_components() + if "cice" in comps or "pop" in comps: + build_separately = True + + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=build_separately, + run_two_suffix="modpes", + run_one_description="default pe counts", + run_two_description="halved pe counts", + **kwargs + ) + + def _case_one_setup(self): + pass + + def _case_two_setup(self): + for comp in self._case.get_values("COMP_CLASSES"): + ntasks = self._case.get_value("NTASKS_{}".format(comp)) + rootpe = self._case1.get_value("ROOTPE_{}".format(comp)) + if ntasks > 1: + self._case.set_value("NTASKS_{}".format(comp), int(ntasks / 2)) + self._case.set_value("ROOTPE_{}".format(comp), int(rootpe / 2)) diff --git a/CIME/SystemTests/pet.py b/CIME/SystemTests/pet.py new file mode 100644 index 00000000000..432d7c99303 --- /dev/null +++ b/CIME/SystemTests/pet.py @@ -0,0 +1,40 @@ +""" +Implementation of the CIME PET test. This class inherits from SystemTestsCommon + +This is an openmp test to determine that changing thread counts does not change answers. +(1) do an initial run where all components are threaded by default (suffix: base) +(2) do another initial run with nthrds=1 for all components (suffix: single_thread) +""" + +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo + +logger = logging.getLogger(__name__) + + +class PET(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): + """ + initialize a test object + """ + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=False, + multisubmit=True, + run_two_suffix="single_thread", + run_one_description="default threading", + run_two_description="threads set to 1", + **kwargs + ) + + def _case_one_setup(self): + # first make sure that all components have threaded settings + for comp in self._case.get_values("COMP_CLASSES"): + if self._case.get_value("NTHRDS_{}".format(comp)) <= 1: + self._case.set_value("NTHRDS_{}".format(comp), 2) + + def _case_two_setup(self): + # Do a run with all threads set to 1 + for comp in self._case.get_values("COMP_CLASSES"): + self._case.set_value("NTHRDS_{}".format(comp), 1) diff --git a/scripts/lib/CIME/SystemTests/pfs.py b/CIME/SystemTests/pfs.py similarity index 84% rename from scripts/lib/CIME/SystemTests/pfs.py rename to CIME/SystemTests/pfs.py index 37f090e6a90..ed61d204e8a 100644 --- a/scripts/lib/CIME/SystemTests/pfs.py +++ b/CIME/SystemTests/pfs.py @@ -9,13 +9,13 @@ logger = logging.getLogger(__name__) -class PFS(SystemTestsCommon): - def __init__(self, case): +class PFS(SystemTestsCommon): + def __init__(self, case, **kwargs): """ initialize an object interface to the PFS system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) def run_phase(self): logger.info("doing an 20 day initial test, no restarts written") diff --git a/CIME/SystemTests/pgn.py b/CIME/SystemTests/pgn.py new file mode 100644 index 00000000000..07ac1f4cb08 --- /dev/null +++ b/CIME/SystemTests/pgn.py @@ -0,0 +1,353 @@ +""" +Perturbation Growth New (PGN) - The CESM/ACME model's +multi-instance capability is used to conduct an ensemble +of simulations starting from different initial conditions. + +This class inherits from SystemTestsCommon. + +""" + +from __future__ import division + +import os +import re +import json +import shutil +import logging + +from collections import OrderedDict +from shutil import copytree + +import pandas as pd +import numpy as np + + +import CIME.test_status +import CIME.utils +from CIME.status import append_testlog +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.case.case_setup import case_setup +from CIME.XML.machines import Machines + +import evv4esm # pylint: disable=import-error +from evv4esm.extensions import pg # pylint: disable=import-error +from evv4esm.__main__ import main as evv # pylint: disable=import-error + +evv_lib_dir = os.path.abspath(os.path.dirname(evv4esm.__file__)) + +logger = logging.getLogger(__name__) + +NUMBER_INITIAL_CONDITIONS = 6 +PERTURBATIONS = OrderedDict( + [ + ("woprt", 0.0), + ("posprt", 1.0e-14), + ("negprt", -1.0e-14), + ] +) +FCLD_NC = "cam.h0.cloud.nc" +INIT_COND_FILE_TEMPLATE = ( + "20240305.v3p0p0.F2010.ne4pg2_oQU480.chrysalis.{}.{}.0002-{:02d}-01-00000.nc" +) +INSTANCE_FILE_TEMPLATE = "{}{}_{:04d}.h0.0001-01-01-00000{}.nc" + + +class PGN(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the PGN test + """ + super(PGN, self).__init__(case, **kwargs) + if self._case.get_value("MODEL") == "e3sm": + self.atmmod = "eam" + self.lndmod = "elm" + self.atmmodIC = "eam" + self.lndmodIC = "elm" + else: + self.atmmod = "cam" + self.lndmod = "clm" + self.atmmodIC = "cam" + self.lndmodIC = "clm2" + + def build_phase(self, sharedlib_only=False, model_only=False): + ninst = NUMBER_INITIAL_CONDITIONS * len(PERTURBATIONS) + logger.debug("PGN_INFO: number of instance: " + str(ninst)) + + default_ninst = self._case.get_value("NINST_ATM") + + if default_ninst == 1: # if multi-instance is not already set + # Only want this to happen once. It will impact the sharedlib build + # so it has to happen here. + if not model_only: + # Lay all of the components out concurrently + logger.debug( + "PGN_INFO: Updating NINST for multi-instance in env_mach_pes.xml" + ) + for comp in ["ATM", "OCN", "WAV", "GLC", "ICE", "ROF", "LND"]: + ntasks = self._case.get_value("NTASKS_{}".format(comp)) + self._case.set_value("ROOTPE_{}".format(comp), 0) + self._case.set_value("NINST_{}".format(comp), ninst) + self._case.set_value("NTASKS_{}".format(comp), ntasks * ninst) + + self._case.set_value("ROOTPE_CPL", 0) + self._case.set_value("NTASKS_CPL", ntasks * ninst) + self._case.flush() + + case_setup(self._case, test_mode=False, reset=True) + + logger.debug("PGN_INFO: Updating user_nl_* files") + + csmdata_root = self._case.get_value("DIN_LOC_ROOT") + csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4pg2_v3_init") + csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4pg2_v3_init") + + iinst = 1 + for icond in range(1, NUMBER_INITIAL_CONDITIONS + 1): + fatm_in = os.path.join( + csmdata_atm, INIT_COND_FILE_TEMPLATE.format(self.atmmodIC, "i", icond) + ) + flnd_in = os.path.join( + csmdata_lnd, INIT_COND_FILE_TEMPLATE.format(self.lndmodIC, "r", icond) + ) + for iprt in PERTURBATIONS.values(): + with open( + "user_nl_{}_{:04d}".format(self.atmmod, iinst), "w" + ) as atmnlfile, open( + "user_nl_{}_{:04d}".format(self.lndmod, iinst), "w" + ) as lndnlfile: + + atmnlfile.write("ncdata = '{}' \n".format(fatm_in)) + lndnlfile.write("finidat = '{}' \n".format(flnd_in)) + + atmnlfile.write("avgflag_pertape = 'I' \n") + atmnlfile.write("nhtfrq = 1 \n") + atmnlfile.write("mfilt = 2 \n") + atmnlfile.write("ndens = 1 \n") + atmnlfile.write("pergro_mods = .true. \n") + atmnlfile.write("pergro_test_active = .true. \n") + + if iprt != 0.0: + atmnlfile.write("pertlim = {} \n".format(iprt)) + + iinst += 1 + + self._case.set_value("STOP_N", "1") + self._case.set_value("STOP_OPTION", "nsteps") + self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) + + def get_var_list(self): + """ + Get variable list for pergro specific output vars + """ + rundir = self._case.get_value("RUNDIR") + prg_fname = "pergro_ptend_names.txt" + var_file = os.path.join(rundir, prg_fname) + CIME.utils.expect( + os.path.isfile(var_file), + "File {} does not exist in: {}".format(prg_fname, rundir), + ) + + with open(var_file, "r") as fvar: + var_list = fvar.readlines() + + return list(map(str.strip, var_list)) + + def _compare_baseline(self): + """ + Compare baselines in the pergro test sense. That is, + compare PGE from the test simulation with the baseline + cloud + """ + with self._test_status: + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS + ) + + logger.debug("PGN_INFO:BASELINE COMPARISON STARTS") + + run_dir = self._case.get_value("RUNDIR") + case_name = self._case.get_value("CASE") + base_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASECMP_CASE"), + ) + + var_list = self.get_var_list() + + test_name = "{}".format(case_name.split(".")[-1]) + evv_config = { + test_name: { + "module": os.path.join(evv_lib_dir, "extensions", "pg.py"), + "test-case": case_name, + "test-name": "Test", + "test-dir": run_dir, + "ref-name": "Baseline", + "ref-dir": base_dir, + "variables": var_list, + "perturbations": PERTURBATIONS, + "pge-cld": FCLD_NC, + "ninit": NUMBER_INITIAL_CONDITIONS, + "init-file-template": INIT_COND_FILE_TEMPLATE, + "instance-file-template": INSTANCE_FILE_TEMPLATE, + "init-model": "cam", + "component": self.atmmod, + } + } + + json_file = os.path.join(run_dir, ".".join([case_name, "json"])) + with open(json_file, "w") as config_file: + json.dump(evv_config, config_file, indent=4) + + evv_out_dir = os.path.join(run_dir, ".".join([case_name, "evv"])) + evv(["-e", json_file, "-o", evv_out_dir]) + + with open(os.path.join(evv_out_dir, "index.json"), "r") as evv_f: + evv_status = json.load(evv_f) + + comments = "" + for evv_ele in evv_status["Page"]["elements"]: + if "Table" in evv_ele: + comments = "; ".join( + "{}: {}".format(key, val[0]) + for key, val in evv_ele["Table"]["data"].items() + ) + if evv_ele["Table"]["data"]["Test status"][0].lower() == "pass": + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, + CIME.test_status.TEST_PASS_STATUS, + ) + break + + status = self._test_status.get_status(CIME.test_status.BASELINE_PHASE) + mach_name = self._case.get_value("MACH") + mach_obj = Machines(machine=mach_name) + htmlroot = CIME.utils.get_htmlroot(mach_obj) + urlroot = CIME.utils.get_urlroot(mach_obj) + if htmlroot is not None: + with CIME.utils.SharedArea(): + copytree( + evv_out_dir, + os.path.join(htmlroot, "evv", case_name), + ) + if urlroot is None: + urlroot = "[{}_URL]".format(mach_name.capitalize()) + viewing = "{}/evv/{}/index.html".format(urlroot, case_name) + else: + viewing = ( + "{}\n" + " EVV viewing instructions can be found at: " + " https://github.com/ESMCI/CIME/blob/master/scripts/" + "climate_reproducibility/README.md#test-passfail-and-extended-output" + "".format(evv_out_dir) + ) + comments = ( + "{} {} for test '{}'.\n" + " {}\n" + " EVV results can be viewed at:\n" + " {}".format( + CIME.test_status.BASELINE_PHASE, + status, + test_name, + comments, + viewing, + ) + ) + + append_testlog(comments, self._orig_caseroot) + + def run_phase(self): + logger.debug("PGN_INFO: RUN PHASE") + + self.run_indv() + + # Here were are in case directory, we need to go to the run directory + # and rename files + rundir = self._case.get_value("RUNDIR") + casename = self._case.get_value("CASE") + logger.debug("PGN_INFO: Case name is:{}".format(casename)) + + for icond in range(NUMBER_INITIAL_CONDITIONS): + for iprt, ( + prt_name, + prt_value, # pylint: disable=unused-variable + ) in enumerate(PERTURBATIONS.items()): + iinst = pg._sub2instance(icond, iprt, len(PERTURBATIONS)) + fname = os.path.join( + rundir, + INSTANCE_FILE_TEMPLATE.format( + casename + ".", self.atmmod, iinst, "" + ), + ) + renamed_fname = re.sub(r"\.nc$", "_{}.nc".format(prt_name), fname) + + logger.debug("PGN_INFO: fname to rename:{}".format(fname)) + logger.debug("PGN_INFO: Renamed file:{}".format(renamed_fname)) + try: + shutil.move(fname, renamed_fname) + except IOError: + CIME.utils.expect( + os.path.isfile(renamed_fname), + "ERROR: File {} does not exist".format(renamed_fname), + ) + logger.debug( + "PGN_INFO: Renamed file already exists:" + "{}".format(renamed_fname) + ) + + logger.debug("PGN_INFO: RUN PHASE ENDS") + + def _generate_baseline(self): + super(PGN, self)._generate_baseline() + + basegen_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASEGEN_CASE") + ) + + rundir = self._case.get_value("RUNDIR") + casename = self._case.get_value("CASE") + + var_list = self.get_var_list() + nvar = len(var_list) + nprt = len(PERTURBATIONS) + rmse_prototype = {} + for icond in range(NUMBER_INITIAL_CONDITIONS): + prt_rmse = {} + for iprt, prt_name in enumerate(PERTURBATIONS): + if prt_name == "woprt": + continue + iinst_ctrl = pg._sub2instance(icond, 0, nprt) + ifile_ctrl = os.path.join( + rundir, + INSTANCE_FILE_TEMPLATE.format( + casename + ".", self.atmmod, iinst_ctrl, "_woprt" + ), + ) + + iinst_test = pg._sub2instance(icond, iprt, nprt) + ifile_test = os.path.join( + rundir, + INSTANCE_FILE_TEMPLATE.format( + casename + ".", self.atmmod, iinst_test, "_" + prt_name + ), + ) + + prt_rmse[prt_name] = pg.variables_rmse( + ifile_test, ifile_ctrl, var_list, "t_" + ) + rmse_prototype[icond] = pd.concat(prt_rmse) + rmse = pd.concat(rmse_prototype) + cld_rmse = np.reshape( + rmse.RMSE.values, (NUMBER_INITIAL_CONDITIONS, nprt - 1, nvar) + ) + + pg.rmse_writer( + os.path.join(rundir, FCLD_NC), + cld_rmse, + list(PERTURBATIONS.keys()), + var_list, + INIT_COND_FILE_TEMPLATE, + "cam", + ) + + logger.debug("PGN_INFO:copy:{} to {}".format(FCLD_NC, basegen_dir)) + shutil.copy(os.path.join(rundir, FCLD_NC), basegen_dir) diff --git a/CIME/SystemTests/pre.py b/CIME/SystemTests/pre.py new file mode 100644 index 00000000000..23547d46430 --- /dev/null +++ b/CIME/SystemTests/pre.py @@ -0,0 +1,146 @@ +""" +Implementation of the CIME pause/resume test: Tests having driver +'pause' (write cpl restart file) and 'resume' (read cpl restart file) +possibly changing the restart file. Compared to non-pause/resume run. +Test can also be run with other component combinations. +Test requires DESP component to function correctly. +""" + +import os.path +import logging +import glob + +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.utils import expect +from CIME.hist_utils import cprnc + +############################################################################### +class PRE(SystemTestsCompareTwo): + ############################################################################### + """ + Implementation of the CIME pause/resume test: Tests having driver + 'pause' (write cpl and/or other restart file(s)) and 'resume' + (read cpl and/or other restart file(s)) possibly changing restart + file. Compare to non-pause/resume run. + """ + + ########################################################################### + def __init__(self, case, **kwargs): + ########################################################################### + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=False, + run_two_suffix="pr", + run_one_description="no pause/resume", + run_two_description="pause/resume", + **kwargs + ) + self._stopopt = "" + self._stopn = 0 + self._cprnc_exe = None + + ########################################################################### + def _case_one_setup(self): + ########################################################################### + pass + + ########################################################################### + def _case_two_setup(self): + ########################################################################### + # Set up a pause/resume run + stopopt = self._case1.get_value("STOP_OPTION") + stopn = self._case1.get_value("STOP_N") + self._case.set_value("STOP_OPTION", stopopt) + self._case.set_value("STOP_N", stopn) + self._case.set_value("ESP_RUN_ON_PAUSE", "TRUE") + if stopn > 3: + pausen = 2 + else: + pausen = 1 + # End if + + self._case.set_value("PAUSE_OPTION", stopopt) + self._case.set_value("PAUSE_N", pausen) + comps = self._case.get_values("COMP_CLASSES") + pause_active = [] + for comp in comps: + pause_active.append(self._case.get_value("PAUSE_ACTIVE_{}".format(comp))) + + expect(any(pause_active), "No pause_active flag is set") + + self._case.flush() + + ########################################################################### + def run_phase(self): # pylint: disable=arguments-differ + ########################################################################### + self._activate_case2() + should_match = self._case.get_value("DESP_MODE") == "NOCHANGE" + SystemTestsCompareTwo.run_phase(self, success_change=not should_match) + # Look for expected coupler restart files + logger = logging.getLogger(__name__) + self._activate_case1() + rundir1 = self._case.get_value("RUNDIR") + self._cprnc_exe = self._case.get_value("CCSM_CPRNC") + self._activate_case2() + rundir2 = self._case.get_value("RUNDIR") + compare_ok = True + multi_driver = self._case.get_value("MULTI_DRIVER") + comps = self._case.get_values("COMP_CLASSES") + for comp in comps: + if not self._case.get_value("PAUSE_ACTIVE_{}".format(comp)): + continue + if comp == "CPL": + if multi_driver: + ninst = self._case.get_value("NINST_MAX") + else: + ninst = 1 + else: + ninst = self._case.get_value("NINST_{}".format(comp)) + + comp_name = self._case.get_value("COMP_{}".format(comp)) + for index in range(1, ninst + 1): + if ninst == 1: + rname = "*.{}.r.*".format(comp_name) + else: + rname = "*.{}_{:04d}.r.*".format(comp_name, index) + + restart_files_1 = glob.glob(os.path.join(rundir1, rname)) + expect( + (len(restart_files_1) > 0), + "No case1 restart files for {}".format(comp), + ) + restart_files_2 = glob.glob(os.path.join(rundir2, rname)) + expect( + (len(restart_files_2) > len(restart_files_1)), + "No pause (restart) files found in case2 for {}".format(comp), + ) + # Do cprnc of restart files. + rfile1 = restart_files_1[len(restart_files_1) - 1] + # rfile2 has to match rfile1 (same time string) + parts = os.path.basename(rfile1).split(".") + glob_str = "*.{}".format(".".join(parts[len(parts) - 4 :])) + restart_files_2 = glob.glob(os.path.join(rundir2, glob_str)) + expect( + (len(restart_files_2) == 1), + "Missing case2 restart file, {}", + glob_str, + ) + rfile2 = restart_files_2[0] + ok = cprnc( + comp, rfile1, rfile2, self._case, rundir2, cprnc_exe=self._cprnc_exe + )[0] + logger.warning( + "CPRNC result for {}: {}".format( + os.path.basename(rfile1), + "PASS" if (ok == should_match) else "FAIL", + ) + ) + compare_ok = compare_ok and (should_match == ok) + + expect( + compare_ok, + "Not all restart files {}".format( + "matched" if should_match else "failed to match" + ), + ) diff --git a/CIME/SystemTests/rep.py b/CIME/SystemTests/rep.py new file mode 100644 index 00000000000..367409ac3fa --- /dev/null +++ b/CIME/SystemTests/rep.py @@ -0,0 +1,20 @@ +""" +Implementation of the CIME REP test + +This test verifies that two identical runs give bit-for-bit results +""" + +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo + + +class REP(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): + SystemTestsCompareTwo.__init__( + self, case, separate_builds=False, run_two_suffix="rep2", **kwargs + ) + + def _case_one_setup(self): + pass + + def _case_two_setup(self): + pass diff --git a/CIME/SystemTests/restart_tests.py b/CIME/SystemTests/restart_tests.py new file mode 100644 index 00000000000..ffc50270ce2 --- /dev/null +++ b/CIME/SystemTests/restart_tests.py @@ -0,0 +1,58 @@ +""" +Abstract class for restart tests +""" + +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.XML.standard_module_setup import * + +logger = logging.getLogger(__name__) + + +class RestartTest(SystemTestsCompareTwo): + def __init__( + self, + case, + separate_builds, + run_two_suffix="restart", + run_one_description="initial", + run_two_description="restart", + multisubmit=False, + **kwargs + ): + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds, + run_two_suffix=run_two_suffix, + run_one_description=run_one_description, + run_two_description=run_two_description, + multisubmit=multisubmit, + **kwargs + ) + + def _case_one_setup(self): + stop_n = self._case1.get_value("STOP_N") + expect(stop_n >= 3, "STOP_N must be at least 3, STOP_N = {}".format(stop_n)) + self._set_restart_interval() + + def _case_two_setup(self): + rest_n = self._case1.get_value("REST_N") + stop_n = self._case1.get_value("STOP_N") + stop_new = stop_n - rest_n + expect( + stop_new > 0, + "ERROR: stop_n value {:d} too short {:d} {:d}".format( + stop_new, stop_n, rest_n + ), + ) + # hist_n is set to the stop_n value of case1 + self._case.set_value("HIST_N", stop_n) + self._case.set_value("STOP_N", stop_new) + self._case.set_value("CONTINUE_RUN", True) + self._case.set_value("REST_OPTION", "never") + ninst = self._case.get_value("NINST") + drvrest = "rpointer.cpl" + if ninst is not None and ninst > 1: + drvrest += "_0001" + drvrest += self._rest_time + self._set_drv_restart_pointer(drvrest) diff --git a/CIME/SystemTests/reuseinitfiles.py b/CIME/SystemTests/reuseinitfiles.py new file mode 100644 index 00000000000..76d8bb0522e --- /dev/null +++ b/CIME/SystemTests/reuseinitfiles.py @@ -0,0 +1,61 @@ +""" +Implementation of the CIME REUSEINITFILES test + +This test does two runs: + +(1) A standard initial run + +(2) A run that reuses the init-generated files from run (1). + +This verifies that it works to reuse these init-generated files, and that you can get +bit-for-bit results by doing so. This is important because these files are typically +reused whenever a user reruns an initial case. +""" + +import os +import shutil +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.SystemTests.system_tests_common import INIT_GENERATED_FILES_DIRNAME + + +class REUSEINITFILES(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=False, + run_two_suffix="reuseinit", + run_one_description="standard initial run", + run_two_description="reuse init-generated files from run 1", + # The following line is a key part of this test: we will copy the + # init_generated_files from case1 and then need to make sure they are NOT + # deleted like is normally done for tests: + case_two_keep_init_generated_files=True, + **kwargs + ) + + def _case_one_setup(self): + pass + + def _case_two_setup(self): + pass + + def _case_two_custom_prerun_action(self): + case1_igf_dir = os.path.join( + self._case1.get_value("RUNDIR"), INIT_GENERATED_FILES_DIRNAME + ) + case2_igf_dir = os.path.join( + self._case2.get_value("RUNDIR"), INIT_GENERATED_FILES_DIRNAME + ) + + expect( + os.path.isdir(case1_igf_dir), + "ERROR: Expected a directory named {} in case1's rundir".format( + INIT_GENERATED_FILES_DIRNAME + ), + ) + if os.path.isdir(case2_igf_dir): + shutil.rmtree(case2_igf_dir) + + shutil.copytree(case1_igf_dir, case2_igf_dir) diff --git a/CIME/SystemTests/seq.py b/CIME/SystemTests/seq.py new file mode 100644 index 00000000000..7413f900899 --- /dev/null +++ b/CIME/SystemTests/seq.py @@ -0,0 +1,51 @@ +""" +sequencing bfb test (10 day seq,conc tests) +""" +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo + +logger = logging.getLogger(__name__) + + +class SEQ(SystemTestsCompareTwo): + def __init__(self, case, **kwargs): + """ + initialize an object interface to file env_test.xml in the case directory + """ + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="seq", + run_one_description="base", + run_two_description="sequence", + **kwargs + ) + + def _case_one_setup(self): + pass + + def _case_two_setup(self): + comp_classes = self._case.get_values("COMP_CLASSES") + any_changes = False + for comp in comp_classes: + any_changes |= self._case.get_value("ROOTPE_{}".format(comp)) != 0 + if any_changes: + for comp in comp_classes: + self._case.set_value("ROOTPE_{}".format(comp), 0) + else: + totalpes = self._case.get_value("TOTALPES") + newntasks = max(1, totalpes // len(comp_classes)) + rootpe = newntasks + + for comp in comp_classes: + # here we set the cpl to have the first 2 tasks + # and each component to have a different ROOTPE + if comp == "CPL": + self._case.set_value("NTASKS_CPL", newntasks) + else: + self._case.set_value("NTASKS_{}".format(comp), newntasks) + self._case.set_value("ROOTPE_{}".format(comp), rootpe) + rootpe += newntasks + + self._case.flush() diff --git a/scripts/lib/CIME/SystemTests/sms.py b/CIME/SystemTests/sms.py similarity index 81% rename from scripts/lib/CIME/SystemTests/sms.py rename to CIME/SystemTests/sms.py index 5c5aaafd44f..17672b47052 100644 --- a/scripts/lib/CIME/SystemTests/sms.py +++ b/CIME/SystemTests/sms.py @@ -8,10 +8,10 @@ logger = logging.getLogger(__name__) -class SMS(SystemTestsCommon): - def __init__(self, case): +class SMS(SystemTestsCommon): + def __init__(self, case, **kwargs): """ initialize an object interface to the SMS system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) diff --git a/CIME/SystemTests/system_tests_common.py b/CIME/SystemTests/system_tests_common.py new file mode 100644 index 00000000000..2ca10d937e4 --- /dev/null +++ b/CIME/SystemTests/system_tests_common.py @@ -0,0 +1,1330 @@ +""" +Base class for CIME system tests +""" + +from CIME.XML.standard_module_setup import * +from CIME.XML.env_run import EnvRun +from CIME.XML.env_test import EnvTest +from CIME.status import append_testlog +from CIME.utils import ( + get_model, + safe_copy, + get_timestamp, + CIMEError, + expect, + get_current_commit, + SharedArea, + is_comp_standalone, +) +from CIME.test_status import * +from CIME.hist_utils import ( + copy_histfiles, + compare_test, + generate_teststatus, + compare_baseline, + get_ts_synopsis, + generate_baseline, +) +from CIME.config import Config +from CIME.provenance import save_test_time, get_test_success +from CIME.locked_files import LOCKED_DIR, lock_file, is_locked +from CIME.baselines.performance import ( + get_latest_cpl_logs, + perf_get_memory_list, + perf_compare_memory_baseline, + perf_compare_throughput_baseline, + perf_write_baseline, + load_coupler_customization, +) +import CIME.build as build +from datetime import datetime, timedelta +import glob, gzip, time, traceback, os, math, calendar + +from contextlib import ExitStack + +logger = logging.getLogger(__name__) + +# Name of directory under the run directory in which init-generated files are placed +INIT_GENERATED_FILES_DIRNAME = "init_generated_files" + + +def fix_single_exe_case(case): + """Fixes cases created with --single-exe. + + When tests are created using --single-exe, the test_scheduler will set + `BUILD_COMPLETE` to True, but some tests require calls to `case.case_setup` + which can resets `BUILD_COMPLETE` to false. This function will check if a + case was created with `--single-exe` and ensure `BUILD_COMPLETE` is True. + + Returns: + True when case required modification otherwise False. + """ + if is_single_exe_case(case): + with ExitStack() as stack: + # enter context if case is still read-only, entering the context + # multiple times can cause side effects for later calls to + # `set_value` when it's assumed the cause is writeable. + if case._read_only_mode: + stack.enter_context(case) + + case.set_value("BUILD_COMPLETE", True) + + return True + + return False + + +def is_single_exe_case(case): + """Determines if the case was created with the --single-exe option. + + If `CASEROOT` is not part of `EXEROOT` and the `TEST` variable is True, + then its safe to assume the case was created with `./create_test` + and the `--single-exe` option. + + Returns: + True when the case was created with `--single-exe` otherwise false. + """ + caseroot = case.get_value("CASEROOT") + + exeroot = case.get_value("EXEROOT") + + test = case.get_value("TEST") + + return caseroot not in exeroot and test + + +class SystemTestsCommon(object): + def __init__( + self, case, expected=None, **kwargs + ): # pylint: disable=unused-argument + """ + initialize a CIME system test object, if the locked env_run.orig.xml + does not exist copy the current env_run.xml file. If it does exist restore values + changed in a previous run of the test. + """ + self._case = case + caseroot = case.get_value("CASEROOT") + self._caseroot = caseroot + self._orig_caseroot = caseroot + self._runstatus = None + self._casebaseid = self._case.get_value("CASEBASEID") + self._test_status = TestStatus(test_dir=caseroot, test_name=self._casebaseid) + self._init_environment(caseroot) + self._init_locked_files(caseroot, expected) + self._skip_pnl = False + self._rest_time = None + self._cpllog = ( + "med" if self._case.get_value("COMP_INTERFACE") == "nuopc" else "cpl" + ) + self._ninja = False + self._dry_run = False + self._user_separate_builds = False + self._expected_num_cmp = None + self._rest_n = None + # Does the model support this variable? + self._drv_restart_pointer = self._case.get_value("DRV_RESTART_POINTER") + + def _set_drv_restart_pointer(self, value): + if self._drv_restart_pointer: + logger.info("setting DRV_RESTART_POINTER={}".format(value)) + self._case.set_value("DRV_RESTART_POINTER", value) + + def _set_restart_interval( + self, stop_n=None, stop_option=None, startdate=None, starttime=None + ): + if not stop_n: + stop_n = self._case.get_value("STOP_N") + if not stop_option: + stop_option = self._case.get_value("STOP_OPTION") + + self._case.set_value("REST_OPTION", stop_option) + + # We need to make sure the run is long enough and to set REST_N to a + # value that makes sense for all components + maxncpl = 10000 + minncpl = 0 + for comp in self._case.get_values("COMP_CLASSES"): + if comp == "CPL": + continue + compname = self._case.get_value("COMP_{}".format(comp)) + + # ignore stub components in this test. + if compname == "s{}".format(comp.lower()): + ncpl = None + else: + ncpl = self._case.get_value("{}_NCPL".format(comp)) + + if ncpl and maxncpl > ncpl: + maxncpl = ncpl + if ncpl and minncpl < ncpl: + minncpl = ncpl + + comp_interface = self._case.get_value("COMP_INTERFACE") + # mct doesn't care about maxncpl so set it to minncpl + if comp_interface == "mct": + maxncpl = minncpl + + ncpl_base_period = self._case.get_value("NCPL_BASE_PERIOD") + if ncpl_base_period == "hour": + coupling_secs = 3600 / maxncpl + timestep = 3600 / minncpl + elif ncpl_base_period == "day": + coupling_secs = 86400 / maxncpl + timestep = 86400 / minncpl + elif ncpl_base_period == "year": + coupling_secs = 31536000 / maxncpl + timestep = 31536000 / minncpl + elif ncpl_base_period == "decade": + coupling_secs = 315360000 / maxncpl + timestep = 315360000 / minncpl + + # Convert stop_n to units of coupling intervals + factor = 1 + if stop_option == "nsteps": + factor = timestep + elif stop_option == "nminutes": + factor = 60 + elif stop_option == "nhours": + factor = 3600 + elif stop_option == "ndays": + factor = 86400 + elif stop_option == "nyears": + factor = 315360000 + else: + expect(False, f"stop_option {stop_option} not available for this test") + stop_n = int(stop_n * factor // coupling_secs) + if self._case.get_value("TESTCASE") == "IRT": + rest_n = math.ceil((stop_n // 3) * coupling_secs / factor) + else: + rest_n = math.ceil((stop_n // 2 + 1) * coupling_secs / factor) + expect(stop_n > 0, "Bad STOP_N: {:d}".format(stop_n)) + expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n)) + cal = self._case.get_value("CALENDAR") + if not starttime: + starttime = self._case.get_value("START_TOD") + if not startdate: + startdate = self._case.get_value("RUN_STARTDATE") + + if "-" in startdate: + syr, smon, sday = startdate.split("-") + syr = int(syr) + smon = int(smon) + sday = int(sday) + else: + startdate = int(startdate) + syr = int(startdate / 10000) + smon = int((startdate - syr * 10000) / 100) + sday = startdate - syr * 10000 - smon * 100 + + addyr = syr // 10000 + syr = syr % 10000 + + startdatetime = datetime.strptime( + f"{syr:04d}{smon:02d}{sday:02d}", "%Y%m%d" + ) + timedelta(seconds=int(starttime)) + + if stop_option == "nsteps": + rtd = timedelta(seconds=rest_n * factor) + elif stop_option == "nminutes": + rtd = timedelta(minutes=rest_n) + elif stop_option == "nhours": + rtd = timedelta(hours=rest_n) + elif stop_option == "ndays": + rtd = timedelta(days=rest_n) + elif stop_option == "nyears": + rtd = timedelta(days=rest_n * 365) + else: + expect(False, f"stop_option {stop_option} not available for this test") + + restdatetime = startdatetime + rtd + # We are working with python datatime and the model uses a NO_LEAP 365 day calendar + # so we need to correct for leap years + if cal == "NO_LEAP": + restdatetime = restdatetime + self._leap_year_correction( + startdatetime, restdatetime + ) + ryr = int(restdatetime.year) + ryr += 10000 * addyr + self._rest_time = f".{ryr:04d}-{restdatetime.month:02d}-{restdatetime.day:02d}-" + h = restdatetime.hour + m = restdatetime.minute + s = restdatetime.second + self._rest_time += f"{(h*3600+m*60+s):05d}" + + logger.info( + "doing an {0} {1} initial test with restart file at {2} {1}".format( + str(stop_n), stop_option, str(rest_n) + ) + ) + self._case.set_value("REST_N", rest_n) + + return rest_n + + @staticmethod + def _leap_year_correction(startdatetime, restdatetime): + """ + Compute correction needed for restdate time if model is using NO_LEAP calendar + + >>> SystemTestsCommon._leap_year_correction(datetime.strptime("00031231","%Y%m%d"), datetime.strptime("00040101","%Y%m%d")) + datetime.timedelta(0) + >>> SystemTestsCommon._leap_year_correction(datetime.strptime("20000225","%Y%m%d"), datetime.strptime("20000301","%Y%m%d")) + datetime.timedelta(days=1) + >>> SystemTestsCommon._leap_year_correction(datetime.strptime("20010225","%Y%m%d"), datetime.strptime("20010301","%Y%m%d")) + datetime.timedelta(0) + >>> SystemTestsCommon._leap_year_correction(datetime.strptime("20010225","%Y%m%d"), datetime.strptime("20050301","%Y%m%d")) + datetime.timedelta(days=1) + >>> SystemTestsCommon._leap_year_correction(datetime.strptime("18500101","%Y%m%d"), datetime.strptime("20201231","%Y%m%d")) + datetime.timedelta(days=42) + """ + dayscorrected = 0 + syr = startdatetime.year + smon = startdatetime.month + ryr = syr + rmon = restdatetime.month + while ryr < restdatetime.year: + if calendar.isleap(ryr): + dayscorrected += 1 + ryr = ryr + 1 + if rmon > 2 and (smon <= 2 or restdatetime.year > syr): + if calendar.isleap(ryr): + dayscorrected += 1 + logger.info("correcting calendar for no leap {}".format(dayscorrected)) + return timedelta(days=dayscorrected) + + def _init_environment(self, caseroot): + """ + Do initializations of environment variables that are needed in __init__ + """ + # Needed for sh scripts + os.environ["CASEROOT"] = caseroot + + def _init_locked_files(self, caseroot, expected): + """ + If the locked env_run.orig.xml does not exist, copy the current + env_run.xml file. If it does exist, restore values changed in a previous + run of the test. + """ + if is_locked("env_run.orig.xml", caseroot): + self.compare_env_run(expected=expected) + elif os.path.isfile(os.path.join(caseroot, "env_run.xml")): + lock_file("env_run.xml", caseroot, newname="env_run.orig.xml") + + def _resetup_case(self, phase, reset=False): + """ + Re-setup this case. This is necessary if user is re-running an already-run + phase. + """ + # We never want to re-setup if we're doing the resubmitted run + phase_status = self._test_status.get_status(phase) + phase_comment = self._test_status.get_comment(phase) + rerunning = ( + phase_status != TEST_PEND_STATUS or phase_comment == TEST_RERUN_COMMENT + ) + if reset or (self._case.get_value("IS_FIRST_RUN") and rerunning): + + logging.warning( + "Resetting case due to detected re-run of phase {}".format(phase) + ) + self._case.set_initial_test_values() + self._case.case_setup(reset=True, test_mode=True) + fix_single_exe_case(self._case) + + def build( + self, + sharedlib_only=False, + model_only=False, + ninja=False, + dry_run=False, + separate_builds=False, + skip_submit=False, + ): + """ + Do NOT override this method, this method is the framework that + controls the build phase. build_phase is the extension point + that subclasses should use. + """ + success = True + self._ninja = ninja + self._dry_run = dry_run + self._user_separate_builds = separate_builds + + was_run_pend = self._test_status.current_is(RUN_PHASE, TEST_PEND_STATUS) + + for phase_name, phase_bool in [ + (SHAREDLIB_BUILD_PHASE, not model_only), + (MODEL_BUILD_PHASE, not sharedlib_only), + ]: + if phase_bool: + self._resetup_case(phase_name) + with self._test_status: + self._test_status.set_status(phase_name, TEST_PEND_STATUS) + + start_time = time.time() + try: + self.build_phase( + sharedlib_only=(phase_name == SHAREDLIB_BUILD_PHASE), + model_only=(phase_name == MODEL_BUILD_PHASE), + ) + except ( + BaseException + ) as e: # We want KeyboardInterrupts to generate FAIL status + success = False + if isinstance(e, CIMEError): + # Don't want to print stacktrace for a build failure since that + # is not a CIME/infrastructure problem. + excmsg = str(e) + else: + excmsg = "Exception during build:\n{}\n{}".format( + str(e), traceback.format_exc() + ) + + append_testlog(excmsg, self._orig_caseroot) + raise + + finally: + time_taken = time.time() - start_time + with self._test_status: + self._test_status.set_status( + phase_name, + TEST_PASS_STATUS if success else TEST_FAIL_STATUS, + comments=("time={:d}".format(int(time_taken))), + ) + + # Building model while job is queued and awaiting run + if ( + skip_submit + and was_run_pend + and self._test_status.current_is(SUBMIT_PHASE, TEST_PEND_STATUS) + ): + with self._test_status: + self._test_status.set_status(SUBMIT_PHASE, TEST_PASS_STATUS) + + return success + + def build_phase(self, sharedlib_only=False, model_only=False): + """ + This is the default build phase implementation, it just does an individual build. + This is the subclass' extension point if they need to define a custom build + phase. + + PLEASE THROW EXCEPTION ON FAIL + """ + self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) + + def build_indv(self, sharedlib_only=False, model_only=False): + """ + Perform an individual build + """ + model = self._case.get_value("MODEL") + build.case_build( + self._caseroot, + case=self._case, + sharedlib_only=sharedlib_only, + model_only=model_only, + save_build_provenance=not model == "cesm", + ninja=self._ninja, + dry_run=self._dry_run, + separate_builds=self._user_separate_builds, + ) + logger.info("build_indv complete") + + def clean_build(self, comps=None): + if comps is None: + comps = [x.lower() for x in self._case.get_values("COMP_CLASSES")] + build.clean(self._case, cleanlist=comps) + + def run(self, skip_pnl=False): + """ + Do NOT override this method, this method is the framework that controls + the run phase. run_phase is the extension point that subclasses should use. + """ + success = True + start_time = time.time() + wav_comp = self._case.get_value("COMP_WAV") + # WW3 requires pnl to be run again after the build phase. + if wav_comp and wav_comp == "ww3": + self._skip_pnl = False + else: + self._skip_pnl = skip_pnl + + try: + self._resetup_case(RUN_PHASE) + do_baseline_ops = True + with self._test_status: + self._test_status.set_status(RUN_PHASE, TEST_PEND_STATUS) + + # We do not want to do multiple repetitions of baseline operations for + # multi-submit tests. We just want to do them upon the final submission. + # Other submissions will need to mark those phases as PEND to ensure wait_for_tests + # waits for them. + if self._case.get_value("BATCH_SYSTEM") != "none": + do_baseline_ops = self._case.get_value("RESUBMIT") == 0 + + self.run_phase() + if self._case.get_value("GENERATE_BASELINE"): + if do_baseline_ops: + self._phase_modifying_call(GENERATE_PHASE, self._generate_baseline) + else: + with self._test_status: + self._test_status.set_status(GENERATE_PHASE, TEST_PEND_STATUS) + + if self._case.get_value("COMPARE_BASELINE"): + if do_baseline_ops: + self._phase_modifying_call(BASELINE_PHASE, self._compare_baseline) + comp_standalone, _ = is_comp_standalone(self._case) + if not comp_standalone: + self._phase_modifying_call(MEMCOMP_PHASE, self._compare_memory) + self._phase_modifying_call( + THROUGHPUT_PHASE, self._compare_throughput + ) + else: + with self._test_status: + self._test_status.set_status(BASELINE_PHASE, TEST_PEND_STATUS) + self._test_status.set_status(MEMCOMP_PHASE, TEST_PEND_STATUS) + self._test_status.set_status(THROUGHPUT_PHASE, TEST_PEND_STATUS) + + self._phase_modifying_call(MEMLEAK_PHASE, self._check_for_memleak) + self._phase_modifying_call(STARCHIVE_PHASE, self._st_archive_case_test) + + except BaseException as e: # We want KeyboardInterrupts to generate FAIL status + success = False + if isinstance(e, CIMEError): + # Don't want to print stacktrace for a model failure since that + # is not a CIME/infrastructure problem. + excmsg = str(e) + else: + excmsg = "Exception during run:\n{}\n{}".format( + str(e), traceback.format_exc() + ) + + append_testlog(excmsg, self._orig_caseroot) + raise + + finally: + # Writing the run status should be the very last thing due to wait_for_tests + time_taken = time.time() - start_time + status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS + with self._test_status: + self._test_status.set_status( + RUN_PHASE, status, comments=("time={:d}".format(int(time_taken))) + ) + + config = Config.instance() + + if config.verbose_run_phase: + # If run phase worked, remember the time it took in order to improve later walltime ests + baseline_root = self._case.get_value("BASELINE_ROOT") + if success: + srcroot = self._case.get_value("SRCROOT") + save_test_time( + baseline_root, + self._casebaseid, + time_taken, + get_current_commit(repo=srcroot), + ) + + # If overall things did not pass, offer the user some insight into what might have broken things + overall_status = self._test_status.get_overall_test_status( + ignore_namelists=True + )[0] + if overall_status != TEST_PASS_STATUS: + srcroot = self._case.get_value("SRCROOT") + worked_before, last_pass, last_fail_transition = get_test_success( + baseline_root, srcroot, self._casebaseid + ) + + if worked_before: + if last_pass is not None: + # commits between last_pass and now broke things + stat, out, err = run_cmd( + "git rev-list --first-parent {}..{}".format( + last_pass, "HEAD" + ), + from_dir=srcroot, + ) + if stat == 0: + append_testlog( + "NEW FAIL: Potentially broken merges:\n{}".format( + out + ), + self._orig_caseroot, + ) + else: + logger.warning( + "Unable to list potentially broken merges: {}\n{}".format( + out, err + ) + ) + else: + if last_pass is not None and last_fail_transition is not None: + # commits between last_pass and last_fail_transition broke things + stat, out, err = run_cmd( + "git rev-list --first-parent {}..{}".format( + last_pass, last_fail_transition + ), + from_dir=srcroot, + ) + if stat == 0: + append_testlog( + "OLD FAIL: Potentially broken merges:\n{}".format( + out + ), + self._orig_caseroot, + ) + else: + logger.warning( + "Unable to list potentially broken merges: {}\n{}".format( + out, err + ) + ) + + if config.baseline_store_teststatus and self._case.get_value( + "GENERATE_BASELINE" + ): + baseline_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASEGEN_CASE"), + ) + generate_teststatus(self._caseroot, baseline_dir) + + # We return success if the run phase worked; memleaks, diffs will NOT be taken into account + # with this return value. + return success + + def run_phase(self): + """ + This is the default run phase implementation, it just does an individual run. + This is the subclass' extension point if they need to define a custom run phase. + + PLEASE THROW AN EXCEPTION ON FAIL + """ + self.run_indv() + + def _get_caseroot(self): + """ + Returns the current CASEROOT value + """ + return self._caseroot + + def _set_active_case(self, case): + """ + Use for tests that have multiple cases + """ + self._case = case + self._case.load_env(reset=True) + self._caseroot = case.get_value("CASEROOT") + + def run_indv( + self, + suffix="base", + st_archive=False, + submit_resubmits=None, + keep_init_generated_files=False, + ): + """ + Perform an individual run. Raises an EXCEPTION on fail. + + keep_init_generated_files: If False (the default), we remove the + init_generated_files subdirectory of the run directory before running the case. + This is usually what we want for tests, but some specific tests may want to leave + this directory in place, so can set this variable to True to do so. + """ + stop_n = self._case.get_value("STOP_N") + stop_option = self._case.get_value("STOP_OPTION") + run_type = self._case.get_value("RUN_TYPE") + rundir = self._case.get_value("RUNDIR") + try: + self._case.check_all_input_data() + except CIMEError: + caseroot = self._case.get_value("CASEROOT") + raise CIMEError( + "Could not find all inputdata on any server, try " + "manually running `./check_input_data --download " + f"--verbose` from {caseroot!r}." + ) from None + if submit_resubmits is None: + do_resub = self._case.get_value("BATCH_SYSTEM") != "none" + else: + do_resub = submit_resubmits + + # remove any cprnc output leftover from previous runs + for compout in glob.iglob(os.path.join(rundir, "*.cprnc.out")): + os.remove(compout) + + if not keep_init_generated_files: + # remove all files in init_generated_files directory if it exists + init_generated_files_dir = os.path.join( + rundir, INIT_GENERATED_FILES_DIRNAME + ) + if os.path.isdir(init_generated_files_dir): + for init_file in glob.iglob( + os.path.join(init_generated_files_dir, "*") + ): + os.remove(init_file) + + infostr = "doing an {:d} {} {} test".format(stop_n, stop_option, run_type) + + rest_option = self._case.get_value("REST_OPTION") + if rest_option == "none" or rest_option == "never": + infostr += ", no restarts written" + else: + rest_n = self._case.get_value("REST_N") + infostr += ", with restarts every {:d} {}".format(rest_n, rest_option) + + logger.info(infostr) + + self._case.case_run(skip_pnl=self._skip_pnl, submit_resubmits=do_resub) + + if not self._coupler_log_indicates_run_complete(): + expect(False, "Coupler did not indicate run passed") + + if suffix is not None: + self._component_compare_copy(suffix) + + if st_archive: + self._case.case_st_archive(resubmit=True) + + def _coupler_log_indicates_run_complete(self): + newestcpllogfiles = get_latest_cpl_logs(self._case) + logger.debug("Latest Coupler log file(s) {}".format(newestcpllogfiles)) + # Exception is raised if the file is not compressed + allgood = len(newestcpllogfiles) + for cpllog in newestcpllogfiles: + try: + if b"SUCCESSFUL TERMINATION" in gzip.open(cpllog, "rb").read(): + allgood = allgood - 1 + except Exception as e: # Probably want to be more specific here + msg = e.__str__() + + logger.info( + "{} is not compressed, assuming run failed {}".format(cpllog, msg) + ) + + return allgood == 0 + + def _component_compare_copy(self, suffix): + # Only match .nc files + comments, num_copied = copy_histfiles(self._case, suffix, match_suffix="nc") + self._expected_num_cmp = num_copied + + append_testlog(comments, self._orig_caseroot) + + def _log_cprnc_output_tail(self, filename_pattern, prepend=None): + rundir = self._case.get_value("RUNDIR") + + glob_pattern = "{}/{}".format(rundir, filename_pattern) + + cprnc_logs = glob.glob(glob_pattern) + + for output in cprnc_logs: + with open(output) as fin: + cprnc_log_tail = fin.readlines()[-20:] + + cprnc_log_tail.insert(0, "tail -n20 {}\n\n".format(output)) + + if prepend is not None: + cprnc_log_tail.insert(0, "{}\n\n".format(prepend)) + + append_testlog("".join(cprnc_log_tail), self._orig_caseroot) + + def _component_compare_test( + self, suffix1, suffix2, success_change=False, ignore_fieldlist_diffs=False + ): + """ + Return value is not generally checked, but is provided in case a custom + run case needs indirection based on success. + If success_change is True, success requires some files to be different. + If ignore_fieldlist_diffs is True, then: If the two cases differ only in their + field lists (i.e., all shared fields are bit-for-bit, but one case has some + diagnostic fields that are missing from the other case), treat the two cases + as identical. + """ + success, comments, num_compared = self._do_compare_test( + suffix1, suffix2, ignore_fieldlist_diffs=ignore_fieldlist_diffs + ) + if success_change: + success = not success + + if ( + self._expected_num_cmp is not None + and num_compared is not None + and self._expected_num_cmp != num_compared + ): + comments = comments.replace("PASS", "") + comments += """\nWARNING +Expected to compare {} hist files, but only compared {}. It's possible +that the hist_file_extension entry in config_archive.xml is not correct +for some of your components. +""".format( + self._expected_num_cmp, num_compared + ) + + append_testlog(comments, self._orig_caseroot) + + pattern = "*.nc.{}.cprnc.out".format(suffix1) + message = "compared suffixes suffix1 {!r} suffix2 {!r}".format(suffix1, suffix2) + + self._log_cprnc_output_tail(pattern, message) + + status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS + with self._test_status: + self._test_status.set_status( + "{}_{}_{}".format(COMPARE_PHASE, suffix1, suffix2), status + ) + return success + + def _do_compare_test(self, suffix1, suffix2, ignore_fieldlist_diffs=False): + """ + Wraps the call to compare_test to facilitate replacement in unit + tests + """ + return compare_test( + self._case, suffix1, suffix2, ignore_fieldlist_diffs=ignore_fieldlist_diffs + ) + + def _st_archive_case_test(self): + result = self._case.test_env_archive() + with self._test_status: + if result: + self._test_status.set_status(STARCHIVE_PHASE, TEST_PASS_STATUS) + else: + self._test_status.set_status(STARCHIVE_PHASE, TEST_FAIL_STATUS) + + def _phase_modifying_call(self, phase, function): + """ + Ensures that unexpected exceptions from phases will result in a FAIL result + in the TestStatus file for that phase. + """ + try: + function() + except Exception as e: # Do NOT want to catch KeyboardInterrupt + msg = e.__str__() + excmsg = "Exception during {}:\n{}\n{}".format( + phase, msg, traceback.format_exc() + ) + + logger.warning(excmsg) + append_testlog(excmsg, self._orig_caseroot) + + with self._test_status: + self._test_status.set_status( + phase, TEST_FAIL_STATUS, comments="exception" + ) + + def _check_for_memleak(self): + """ + Examine memory usage as recorded in the cpl log file and look for unexpected + increases. + """ + config = load_coupler_customization(self._case) + + # default to 0.1 + tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") or 0.1 + + expect(tolerance > 0.0, "Bad value for memleak tolerance in test") + + with self._test_status: + try: + memleak, comment = config.perf_check_for_memory_leak( + self._case, tolerance + ) + except AttributeError: + memleak, comment = perf_check_for_memory_leak(self._case, tolerance) + + if memleak: + append_testlog(comment, self._orig_caseroot) + + status = TEST_FAIL_STATUS + else: + status = TEST_PASS_STATUS + + self._test_status.set_status(MEMLEAK_PHASE, status, comments=comment) + + def compare_env_run(self, expected=None): + """ + Compare env_run file to original and warn about differences + """ + components = self._case.get_values("COMP_CLASSES") + f1obj = self._case.get_env("run") + f2obj = EnvRun( + self._caseroot, + os.path.join(LOCKED_DIR, "env_run.orig.xml"), + components=components, + ) + diffs = f1obj.compare_xml(f2obj) + for key in diffs.keys(): + if expected is not None and key in expected: + logging.warning(" Resetting {} for test".format(key)) + f1obj.set_value(key, f2obj.get_value(key, resolved=False)) + else: + print( + "WARNING: Found difference in test {}: case: {} original value {}".format( + key, diffs[key][0], diffs[key][1] + ) + ) + return False + return True + + def _compare_memory(self): + """ + Compares current test memory usage to baseline. + """ + with self._test_status: + try: + below_tolerance, comment = perf_compare_memory_baseline(self._case) + except Exception as e: + logger.info("Failed to compare memory usage baseline: {!s}".format(e)) + + self._test_status.set_status( + MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=str(e) + ) + else: + if below_tolerance is not None: + append_testlog(comment, self._orig_caseroot) + + if ( + below_tolerance + and self._test_status.get_status(MEMCOMP_PHASE) is None + ): + self._test_status.set_status(MEMCOMP_PHASE, TEST_PASS_STATUS) + elif ( + self._test_status.get_status(MEMCOMP_PHASE) != TEST_FAIL_STATUS + ): + self._test_status.set_status( + MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=comment + ) + + def _compare_throughput(self): + """ + Compares current test throughput to baseline. + """ + with self._test_status: + try: + below_tolerance, comment = perf_compare_throughput_baseline(self._case) + except Exception as e: + logger.info("Failed to compare throughput baseline: {!s}".format(e)) + + self._test_status.set_status( + THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=str(e) + ) + else: + if below_tolerance is not None: + append_testlog(comment, self._orig_caseroot) + + if ( + below_tolerance + and self._test_status.get_status(THROUGHPUT_PHASE) is None + ): + self._test_status.set_status(THROUGHPUT_PHASE, TEST_PASS_STATUS) + elif ( + self._test_status.get_status(THROUGHPUT_PHASE) + != TEST_FAIL_STATUS + ): + self._test_status.set_status( + THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment + ) + + def _compare_baseline(self): + """ + compare the current test output to a baseline result + """ + with self._test_status: + # compare baseline + success, comments = compare_baseline(self._case) + + append_testlog(comments, self._orig_caseroot) + + pattern = "*.nc.cprnc.out" + + self._log_cprnc_output_tail(pattern) + + status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS + baseline_name = self._case.get_value("BASECMP_CASE") + ts_comments = ( + os.path.dirname(baseline_name) + ": " + get_ts_synopsis(comments) + ) + self._test_status.set_status(BASELINE_PHASE, status, comments=ts_comments) + + def _generate_baseline(self): + """ + generate a new baseline case based on the current test + """ + with self._test_status: + # generate baseline + success, comments = generate_baseline(self._case) + append_testlog(comments, self._orig_caseroot) + status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS + baseline_name = self._case.get_value("BASEGEN_CASE") + self._test_status.set_status( + GENERATE_PHASE, status, comments=os.path.dirname(baseline_name) + ) + basegen_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASEGEN_CASE"), + ) + # copy latest cpl log to baseline + # drop the date so that the name is generic + newestcpllogfiles = get_latest_cpl_logs(self._case) + with SharedArea(): + # TODO ever actually more than one cpl log? + for cpllog in newestcpllogfiles: + m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) + + if m is not None: + baselog = os.path.join(basegen_dir, m.group(1)) + ".gz" + + safe_copy( + cpllog, + os.path.join(basegen_dir, baselog), + preserve_meta=False, + ) + + perf_write_baseline(self._case, basegen_dir, cpllog) + + +def perf_check_for_memory_leak(case, tolerance): + leak = False + comment = "" + + latestcpllogs = get_latest_cpl_logs(case) + + for cpllog in latestcpllogs: + try: + memlist = perf_get_memory_list(case, cpllog) + except RuntimeError: + return False, "insufficient data for memleak test" + + # last day - second day, skip first day, can be too low while initializing + elapsed_days = int(memlist[-1][0]) - int(memlist[1][0]) + + finalmem, originalmem = float(memlist[-1][1]), float(memlist[1][1]) + + memdiff = -1 if originalmem <= 0 else (finalmem - originalmem) / originalmem + + if memdiff < 0: + leak = False + comment = "data for memleak test is insufficient" + elif memdiff < tolerance: + leak = False + comment = "" + else: + leak = True + comment = ( + "memleak detected, memory went from {:f} to {:f} in {:d} days".format( + originalmem, finalmem, elapsed_days + ) + ) + + return leak, comment + + +class FakeTest(SystemTestsCommon): + """ + Inheriters of the FakeTest Class are intended to test the code. + + All members of the FakeTest Class must + have names beginning with "TEST" this is so that the find_system_test + in utils.py will work with these classes. + """ + + def __init__(self, case, expected=None, **kwargs): + super(FakeTest, self).__init__(case, expected=expected, **kwargs) + self._script = None + self._requires_exe = False + self._case._non_local = True + self._original_exe = self._case.get_value("run_exe") + + def _set_script(self, script, requires_exe=False): + self._script = script + self._requires_exe = requires_exe + + def _resetup_case(self, phase, reset=False): + run_exe = self._case.get_value("run_exe") + super(FakeTest, self)._resetup_case(phase, reset=reset) + self._case.set_value("run_exe", run_exe) + + def build_phase(self, sharedlib_only=False, model_only=False): + if self._requires_exe: + super(FakeTest, self).build_phase( + sharedlib_only=sharedlib_only, model_only=model_only + ) + + if not sharedlib_only: + exeroot = self._case.get_value("EXEROOT") + modelexe = os.path.join(exeroot, "fake.exe") + self._case.set_value("run_exe", modelexe) + + with open(modelexe, "w") as f: + f.write("#!/bin/bash\n") + f.write(self._script) + + os.chmod(modelexe, 0o755) + + if not self._requires_exe: + build.post_build(self._case, [], build_complete=True) + else: + expect( + os.path.exists(modelexe), + "Could not find expected file {}".format(modelexe), + ) + logger.info( + "FakeTest build_phase complete {} {}".format( + modelexe, self._requires_exe + ) + ) + + def run_indv( + self, + suffix="base", + st_archive=False, + submit_resubmits=None, + keep_init_generated_files=False, + ): + mpilib = self._case.get_value("MPILIB") + # This flag is needed by mpt to run a script under mpiexec + if mpilib == "mpt": + os.environ["MPI_SHEPHERD"] = "true" + super(FakeTest, self).run_indv( + suffix, st_archive=st_archive, submit_resubmits=submit_resubmits + ) + + +class TESTRUNPASS(FakeTest): + def build_phase(self, sharedlib_only=False, model_only=False): + rundir = self._case.get_value("RUNDIR") + cimeroot = self._case.get_value("CIMEROOT") + case = self._case.get_value("CASE") + script = """ +echo Insta pass +echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID +cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) + self._set_script(script) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + + +class TESTRUNDIFF(FakeTest): + """ + You can generate a diff with this test as follows: + 1) Run the test and generate a baseline + 2) set TESTRUNDIFF_ALTERNATE environment variable to TRUE + 3) Re-run the same test from step 1 but do a baseline comparison instead of generation + 3.a) This should give you a DIFF + """ + + def build_phase(self, sharedlib_only=False, model_only=False): + rundir = self._case.get_value("RUNDIR") + cimeroot = self._case.get_value("CIMEROOT") + case = self._case.get_value("CASE") + script = """ +echo Insta pass +echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID +if [ -z "$TESTRUNDIFF_ALTERNATE" ]; then + cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc +else + cp {root}/scripts/tests/cpl.hi2.nc.test {rundir}/{case}.cpl.hi.0.nc +fi +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) + self._set_script(script) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + + +class TESTRUNDIFFRESUBMIT(TESTRUNDIFF): + pass + + +class TESTTESTDIFF(FakeTest): + def build_phase(self, sharedlib_only=False, model_only=False): + rundir = self._case.get_value("RUNDIR") + cimeroot = self._case.get_value("CIMEROOT") + case = self._case.get_value("CASE") + script = """ +echo Insta pass +echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID +cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc +cp {root}/scripts/tests/cpl.hi2.nc.test {rundir}/{case}.cpl.hi.0.nc.rest +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) + self._set_script(script) + super(TESTTESTDIFF, self).build_phase( + sharedlib_only=sharedlib_only, model_only=model_only + ) + + def run_phase(self): + super(TESTTESTDIFF, self).run_phase() + self._component_compare_test("base", "rest") + + +class TESTRUNFAIL(FakeTest): + def build_phase(self, sharedlib_only=False, model_only=False): + rundir = self._case.get_value("RUNDIR") + cimeroot = self._case.get_value("CIMEROOT") + case = self._case.get_value("CASE") + script = """ +if [ -z "$TESTRUNFAIL_PASS" ]; then + echo Insta fail + echo model failed > {rundir}/{log}.log.$LID + exit -1 +else + echo Insta pass + echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID + cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc +fi +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) + self._set_script(script) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + + +class TESTRUNFAILRESET(TESTRUNFAIL): + """This fake test can fail for two reasons: + 1. As in the TESTRUNFAIL test: If the environment variable TESTRUNFAIL_PASS is *not* set + 2. Even if that environment variable *is* set, it will fail if STOP_N differs from the + original value + + The purpose of (2) is to ensure that test's values get properly reset if the test is + rerun after an initial failure. + """ + + def run_indv( + self, + suffix="base", + st_archive=False, + submit_resubmits=None, + keep_init_generated_files=False, + ): + # Make sure STOP_N matches the original value for the case. This tests that STOP_N + # has been reset properly if we are rerunning the test after a failure. + env_test = EnvTest(self._get_caseroot()) + stop_n = self._case.get_value("STOP_N") + stop_n_test = int(env_test.get_test_parameter("STOP_N")) + expect( + stop_n == stop_n_test, + "Expect STOP_N to match original ({} != {})".format(stop_n, stop_n_test), + ) + + # Now modify STOP_N so that an error will be generated if it isn't reset properly + # upon a rerun + self._case.set_value("STOP_N", stop_n + 1) + + super(TESTRUNFAILRESET, self).run_indv( + suffix=suffix, st_archive=st_archive, submit_resubmits=submit_resubmits + ) + + +class TESTRUNFAILEXC(TESTRUNPASS): + def run_phase(self): + raise RuntimeError("Exception from run_phase") + + +class TESTRUNSTARCFAIL(TESTRUNPASS): + def _st_archive_case_test(self): + raise RuntimeError("Exception from st archive") + + +class TESTBUILDFAIL(TESTRUNPASS): + def build_phase(self, sharedlib_only=False, model_only=False): + if "TESTBUILDFAIL_PASS" in os.environ: + TESTRUNPASS.build_phase(self, sharedlib_only, model_only) + else: + if not sharedlib_only: + blddir = self._case.get_value("EXEROOT") + bldlog = os.path.join( + blddir, + "{}.bldlog.{}".format(get_model(), get_timestamp("%y%m%d-%H%M%S")), + ) + with open(bldlog, "w") as fd: + fd.write("BUILD FAIL: Intentional fail for testing infrastructure") + + expect(False, "BUILD FAIL: Intentional fail for testing infrastructure") + + +class TESTBUILDFAILEXC(FakeTest): + def __init__(self, case, **kwargs): + FakeTest.__init__(self, case, **kwargs) + raise RuntimeError("Exception from init") + + +class TESTRUNUSERXMLCHANGE(FakeTest): + def build_phase(self, sharedlib_only=False, model_only=False): + caseroot = self._case.get_value("CASEROOT") + modelexe = self._case.get_value("run_exe") + new_stop_n = self._case.get_value("STOP_N") * 2 + + script = """ +cd {caseroot} +./xmlchange --file env_test.xml STOP_N={stopn} +./xmlchange RESUBMIT=1,STOP_N={stopn},CONTINUE_RUN=FALSE,RESUBMIT_SETS_CONTINUE_RUN=FALSE +cd - +{originalexe} "$@" +cd {caseroot} +./xmlchange run_exe={modelexe} +sleep 5 +""".format( + originalexe=self._original_exe, + caseroot=caseroot, + modelexe=modelexe, + stopn=str(new_stop_n), + ) + self._set_script(script, requires_exe=True) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + + def run_phase(self): + self.run_indv(submit_resubmits=True) + + +class TESTRUNSLOWPASS(FakeTest): + def build_phase(self, sharedlib_only=False, model_only=False): + rundir = self._case.get_value("RUNDIR") + cimeroot = self._case.get_value("CIMEROOT") + case = self._case.get_value("CASE") + script = """ +sleep 300 +echo Slow pass +echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID +cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) + self._set_script(script) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + + +class TESTMEMLEAKFAIL(FakeTest): + def build_phase(self, sharedlib_only=False, model_only=False): + rundir = self._case.get_value("RUNDIR") + cimeroot = self._case.get_value("CIMEROOT") + case = self._case.get_value("CASE") + testfile = os.path.join(cimeroot, "scripts", "tests", "cpl.log.failmemleak.gz") + script = """ +echo Insta pass +gunzip -c {testfile} > {rundir}/{log}.log.$LID +cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc +""".format( + testfile=testfile, rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) + self._set_script(script) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + + +class TESTMEMLEAKPASS(FakeTest): + def build_phase(self, sharedlib_only=False, model_only=False): + rundir = self._case.get_value("RUNDIR") + cimeroot = self._case.get_value("CIMEROOT") + case = self._case.get_value("CASE") + testfile = os.path.join(cimeroot, "scripts", "tests", "cpl.log.passmemleak.gz") + script = """ +echo Insta pass +gunzip -c {testfile} > {rundir}/{log}.log.$LID +cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc +""".format( + testfile=testfile, rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) + self._set_script(script) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) diff --git a/CIME/SystemTests/system_tests_compare_n.py b/CIME/SystemTests/system_tests_compare_n.py new file mode 100644 index 00000000000..5d7dc405304 --- /dev/null +++ b/CIME/SystemTests/system_tests_compare_n.py @@ -0,0 +1,568 @@ +""" +Base class for CIME system tests that involve doing multiple runs and comparing the base run (index=0) +with the subsequent runs (indices=1..N-1). + +NOTE: Below is the flow of a multisubmit test. +Non-batch: +case_submit -> case_run # PHASE 1 + -> case_run # PHASE 2 + ... + -> case_run # PHASE N + +batch: +case_submit -> case_run # PHASE 1 +case_run -> case_submit +case_submit -> case_run # PHASE 2 +... +case_submit -> case_run # PHASE N + +In the __init__ method for your test, you MUST call + SystemTestsCompareN.__init__ +See the documentation of that method for details. + +Classes that inherit from this are REQUIRED to implement the following method: + +(1) _case_setup + This method will be called to set up case i, where i==0 corresponds to the base case + and i=={1,..N-1} corresponds to subsequent runs to be compared with the base. + +In addition, they MAY require the following methods: + +(1) _common_setup + This method will be called to set up all cases. It should contain any setup + that's needed in all cases. This is called before _case_setup_config + +(2) _case_custom_prerun_action(self, i): + Use this to do arbitrary actions immediately before running case i + +(3) _case_custom_postrun_action(self, i): + Use this to do arbitrary actions immediately after running case one +""" + +from CIME.XML.standard_module_setup import * +from CIME.SystemTests.system_tests_common import SystemTestsCommon, fix_single_exe_case +from CIME.case import Case +from CIME.config import Config +from CIME.test_status import * + +import shutil, os, glob + +logger = logging.getLogger(__name__) + + +class SystemTestsCompareN(SystemTestsCommon): + def __init__( + self, + case, + N=2, + separate_builds=False, + run_suffixes=None, + run_descriptions=None, + multisubmit=False, + ignore_fieldlist_diffs=False, + dry_run=False, + **kwargs + ): + """ + Initialize a SystemTestsCompareN object. Individual test cases that + inherit from SystemTestsCompareN MUST call this __init__ method. + + Args: + case: case object passsed to __init__ method of individual + test. This is the main case associated with the test. + N (int): number of test cases including the base case. + separate_builds (bool): Whether separate builds are needed for the + cases. If False, case[i:1..N-1] uses the case[0] executable. + run_suffixes (list of str, optional): List of suffixes appended to the case names. + Defaults to ["base", "subsq_1", "subsq_2", .. "subsq_N-1"]. Each + suffix must be unique. + run_descriptions (list of str, optional): Descriptions printed to log file + of each case when starting the runs. Defaults to ['']*N. + multisubmit (bool): Do base and subsequent runs as different submissions. + Designed for tests with RESUBMIT=1 + ignore_fieldlist_diffs (bool): If True, then: If the cases differ only in + their field lists (i.e., all shared fields are bit-for-bit, but one case + has some diagnostic fields that are missing from the base case), treat + the cases as identical. (This is needed for tests where one case + exercises an option that produces extra diagnostic fields.) + """ + SystemTestsCommon.__init__(self, case, **kwargs) + + self._separate_builds = separate_builds + self._ignore_fieldlist_diffs = ignore_fieldlist_diffs + + expect(N > 1, "Number of cases must be greater than 1.") + self._cases = [None] * N + self.N = N + + if run_suffixes: + expect( + isinstance(run_suffixes, list) + and all([isinstance(sfx, str) for sfx in run_suffixes]), + "run_suffixes must be a list of strings", + ) + expect( + len(run_suffixes) == self.N, + "run_suffixes list must include {} strings".format(self.N), + ) + expect( + len(set(run_suffixes)) == len(run_suffixes), + "each suffix in run_suffixes must be unique", + ) + self._run_suffixes = [sfx.rstrip() for sfx in run_suffixes] + else: + self._run_suffixes = ["base"] + ["subsq_{}".format(i) for i in range(1, N)] + + if run_descriptions: + expect( + isinstance(run_descriptions, list) + and all([isinstance(dsc, str) for dsc in run_descriptions]), + "run_descriptions must be a list of strings", + ) + expect( + len(run_descriptions) == self.N, + "run_descriptions list must include {} strings".format(self.N), + ) + self._run_descriptions = run_descriptions + else: + self._run_descriptions = [""] * self.N + + # Set the base case for referencing purposes + self._cases[0] = self._case + self._caseroots = self._get_caseroots() + + if not dry_run: + self._setup_cases_if_not_yet_done() + + self._multisubmit = ( + multisubmit and self._cases[0].get_value("BATCH_SYSTEM") != "none" + ) + + # ======================================================================== + # Methods that MUST be implemented by specific tests that inherit from this + # base class + # ======================================================================== + + def _case_setup(self, i): + """ + This method will be called to set up case[i], where case[0] is the base case. + + This should be written to refer to self._case: this object will point to + case[i] at the point that this is called. + """ + raise NotImplementedError + + # ======================================================================== + # Methods that MAY be implemented by specific tests that inherit from this + # base class, if they have any work to do in these methods + # ======================================================================== + + def _common_setup(self): + """ + This method will be called to set up all cases. It should contain any setup + that's needed in both cases. + + This should be written to refer to self._case: It will be called once with + self._case pointing to case1, and once with self._case pointing to case2. + """ + + def _case_custom_prerun_action(self, i): + """ + Use to do arbitrary actions immediately before running case i:0..N + """ + + def _case_custom_postrun_action(self, i): + """ + Use to do arbitrary actions immediately after running case i:0..N + """ + + # ======================================================================== + # Main public methods + # ======================================================================== + + def build_phase(self, sharedlib_only=False, model_only=False): + # Subtle issue: base case is already in a writeable state since it tends to be opened + # with a with statement in all the API entrances in CIME. subsequent cases were + # created via clone, not a with statement, so it's not in a writeable state, + # so we need to use a with statement here to put it in a writeable state. + config = Config.instance() + + for i in range(1, self.N): + with self._cases[i]: + if self._separate_builds: + self._activate_case(0) + self.build_indv( + sharedlib_only=sharedlib_only, model_only=model_only + ) + self._activate_case(i) + # Although we're doing separate builds, it still makes sense + # to share the sharedlibroot area with case1 so we can reuse + # pieces of the build from there. + if config.common_sharedlibroot: + # We need to turn off this change for E3SM because it breaks + # the MPAS build system + ## TODO: ^this logic mimics what's done in SystemTestsCompareTwo + # Confirm this is needed in SystemTestsCompareN as well. + self._cases[i].set_value( + "SHAREDLIBROOT", self._cases[0].get_value("SHAREDLIBROOT") + ) + + self.build_indv( + sharedlib_only=sharedlib_only, model_only=model_only + ) + else: + self._activate_case(0) + self.build_indv( + sharedlib_only=sharedlib_only, model_only=model_only + ) + # pio_typename may be changed during the build if the default is not a + # valid value for this build, update case i to reflect this change + for comp in self._cases[i].get_values("COMP_CLASSES"): + comp_pio_typename = "{}_PIO_TYPENAME".format(comp) + self._cases[i].set_value( + comp_pio_typename, + self._cases[0].get_value(comp_pio_typename), + ) + + # The following is needed when _case_two_setup has a case_setup call + # despite sharing the build (e.g., to change NTHRDS) + self._cases[i].set_value("BUILD_COMPLETE", True) + + def run_phase(self, success_change=False): # pylint: disable=arguments-differ + """ + Runs all phases of the N-phase test and compares base results with subsequent ones + If success_change is True, success requires some files to be different + """ + is_first_run = self._cases[0].get_value("IS_FIRST_RUN") + + # On a batch system with a multisubmit test "RESUBMIT" is used to track + # which phase is being ran. By the end of the test it equals 0. If the + # the test fails in a way where the RUN_PHASE is PEND then "RESUBMIT" + # does not get reset to 1 on a rerun and the first phase is skipped + # causing the COMPARE_PHASE to fail. This ensures that "RESUBMIT" will + # get reset if the test state is not correct for a rerun. + # NOTE: "IS_FIRST_RUN" is reset in "case_submit.py" + ### todo: confirm below code block + if ( + is_first_run + and self._multisubmit + and self._cases[0].get_value("RESUBMIT") == 0 + ): + self._resetup_case(RUN_PHASE, reset=True) + + base_phase = ( + self._cases[0].get_value("RESUBMIT") == 1 + ) # Only relevant for multi-submit tests + run_type = self._cases[0].get_value("RUN_TYPE") + + logger.info( + "_multisubmit {} first phase {}".format(self._multisubmit, base_phase) + ) + + # First run + if not self._multisubmit or base_phase: + logger.info("Doing first run: " + self._run_descriptions[0]) + + # Add a PENDing compare phase so that we'll notice if the second part of compare two + # doesn't run. + compare_phase_name = "{}_{}_{}".format( + COMPARE_PHASE, self._run_suffixes[1], self._run_suffixes[0] + ) + with self._test_status: + self._test_status.set_status(compare_phase_name, TEST_PEND_STATUS) + + self._activate_case(0) + self._case_custom_prerun_action(0) + self.run_indv(suffix=self._run_suffixes[0]) + self._case_custom_postrun_action(0) + + # Subsequent runs + if not self._multisubmit or not base_phase: + # Subtle issue: case1 is already in a writeable state since it tends to be opened + # with a with statement in all the API entrances in CIME. subsq cases were created + # via clone, not a with statement, so it's not in a writeable state, so we need to + # use a with statement here to put it in a writeable state. + for i in range(1, self.N): + with self._cases[i]: + logger.info("Doing run {}: ".format(i) + self._run_descriptions[i]) + self._activate_case(i) + # This assures that case i namelists are populated + self._skip_pnl = False + # we need to make sure run i is properly staged. + if run_type != "startup": + self._cases[i].check_case() + + self._case_custom_prerun_action(i) + self.run_indv(suffix=self._run_suffixes[i]) + self._case_custom_postrun_action(i) + # Compare results + self._activate_case(0) + self._link_to_subsq_case_output(i) + self._component_compare_test( + self._run_suffixes[i], + self._run_suffixes[0], + success_change=success_change, + ignore_fieldlist_diffs=self._ignore_fieldlist_diffs, + ) + + # ======================================================================== + # Private methods + # ======================================================================== + + def _get_caseroots(self): + """ + Determines and returns caseroot for each cases and returns a list + """ + casename_base = self._cases[0].get_value("CASE") + caseroot_base = self._get_caseroot() + + return [caseroot_base] + [ + os.path.join(caseroot_base, "case{}".format(i), casename_base) + for i in range(1, self.N) + ] + + def _get_subsq_output_root(self, i): + """ + Determines and returns cime_output_root for case i where i!=0 + + Assumes that self._case1 is already set to point to the case1 object + """ + # Since subsequent cases have the same name as base, their CIME_OUTPUT_ROOT + # must also be different, so that anything put in + # $CIME_OUTPUT_ROOT/$CASE/ is not accidentally shared between + # cases. (Currently nothing is placed here, but this + # helps prevent future problems.) + + expect(i != 0, "ERROR: cannot call _get_subsq_output_root for the base class") + + output_root_i = os.path.join( + self._cases[0].get_value("CIME_OUTPUT_ROOT"), + self._cases[0].get_value("CASE"), + "case{}_output_root".format(i), + ) + return output_root_i + + def _get_subsq_case_exeroot(self, i): + """ + Gets exeroot for case i. + + Returns None if we should use the default value of exeroot. + """ + + expect(i != 0, "ERROR: cannot call _get_subsq_case_exeroot for the base class") + + if self._separate_builds: + # subsequent case's EXEROOT needs to be somewhere that (1) is unique + # to this case (considering that all cases have the + # same case name), and (2) does not have too long of a path + # name (because too-long paths can make some compilers + # fail). + base_exeroot = self._cases[0].get_value("EXEROOT") + case_i_exeroot = os.path.join(base_exeroot, "case{}bld".format(i)) + else: + # Use default exeroot + case_i_exeroot = None + return case_i_exeroot + + def _get_subsq_case_rundir(self, i): + """ + Gets rundir for case i. + """ + + expect(i != 0, "ERROR: cannot call _get_subsq_case_rundir for the base class") + + # subsequent case's RUNDIR needs to be somewhere that is unique to this + # case (considering that all cases have the same case + # name). Note that the location below is symmetrical to the + # location of case's EXEROOT set in _get_subsq_case_exeroot. + base_rundir = self._cases[0].get_value("RUNDIR") + case_i_rundir = os.path.join(base_rundir, "case{}run".format(i)) + return case_i_rundir + + def _setup_cases_if_not_yet_done(self): + """ + Determines if subsequent cases already exist on disk. If they do, this method + creates the self.cases entries pointing to the case directories. If they + don't exist, then this method creates cases[i:1..N-1] as a clone of cases[0], and + sets the self.cases objects appropriately. + + This also does the setup for all cases including the base case. + + Assumes that the following variables are already set in self: + _caseroots + _cases[0] + + Sets self.cases[i:1..N-1] + """ + + # Use the existence of the cases[N-1] directory to signal whether we have + # done the necessary test setup for all cases: When we initially create + # the last case directory, we set up all cases; then, if we find that + # the last case directory already exists, we assume that the setup has + # already been done for all cases. (In some cases it could be problematic + # to redo the test setup when it's not needed - e.g., by appending things + # to user_nl files multiple times. This is why we want to make sure to just + # do the test setup once.) + if os.path.exists(self._caseroots[-1]): + for i in range(1, self.N): + caseroot_i = self._caseroots[i] + self._cases[i] = self._case_from_existing_caseroot(caseroot_i) + else: + # Create the subsequent cases by cloning the base case. + for i in range(1, self.N): + self._cases[i] = self._cases[0].create_clone( + self._caseroots[i], + keepexe=not self._separate_builds, + cime_output_root=self._get_subsq_output_root(i), + exeroot=self._get_subsq_case_exeroot(i), + rundir=self._get_subsq_case_rundir(i), + ) + self._write_info_to_subsq_case_output_root(i) + + # Set up all cases, including the base case. + for i in range(0, self.N): + caseroot_i = self._caseroots[i] + try: + self._setup_case(i) + except BaseException: + # If a problem occurred in setting up the test case i, it's + # important to remove the case i directory: If it's kept around, + # that would signal that test setup was done successfully, and + # thus doesn't need to be redone - which is not the case. Of + # course, we'll likely be left in an inconsistent state in this + # case, but if we didn't remove the case i directory, the next + # re-build of the test would think, "okay, setup is done, I can + # move on to the build", which would be wrong. + if os.path.isdir(caseroot_i): + shutil.rmtree(caseroot_i) + self._activate_case(0) + logger.warning( + "WARNING: Test case setup failed. Case {} has been removed, " + "but the main case may be in an inconsistent state. " + "If you want to rerun this test, you should create " + "a new test rather than trying to rerun this one.".format(i) + ) + raise + + def _case_from_existing_caseroot(self, caseroot): + """ + Returns a Case object from an existing caseroot directory + + Args: + caseroot (str): path to existing caseroot + """ + return Case(case_root=caseroot, read_only=False) + + def _activate_case(self, i): + """ + Make case i active for upcoming calls + """ + os.chdir(self._caseroots[i]) + self._set_active_case(self._cases[i]) + + def _write_info_to_subsq_case_output_root(self, i): + """ + Writes a file with some helpful information to case[i]'s + output_root. + + The motivation here is two-fold: + + (1) Currently, case i's output_root directory is empty. + This could be confusing. + + (2) For users who don't know where to look, it could be hard to + find case i's bld and run directories. It is somewhat easier + to stumble upon case i output_root, so we put a file there + pointing them to the right place. + """ + + readme_path = os.path.join(self._get_subsq_output_root(i), "README") + try: + with open(readme_path, "w") as fd: + fd.write("This directory is typically empty.\n\n") + fd.write( + "case's run dir is here: {}\n\n".format( + self._cases[i].get_value("RUNDIR") + ) + ) + fd.write( + "case's bld dir is here: {}\n".format( + self._cases[i].get_value("EXEROOT") + ) + ) + except IOError: + # It's not a big deal if we can't write the README file + # (e.g., because the directory doesn't exist or isn't + # writeable; note that the former may be the case in unit + # tests). So just continue merrily on our way if there was a + # problem. + pass + + def _setup_case(self, i): + """ + Does all test-specific set up for the test case i. + """ + + # Set up case 1 + self._activate_case(i) + self._common_setup() + self._case_setup(i) + fix_single_exe_case(self._cases[i]) + if i == 0: + # Flush the case so that, if errors occur later, then at least base case is + # in a correct, post-setup state. This is important because the mere + # existence of a cases[-1] directory signals that setup is done. So if the + # build fails and the user rebuilds, setup won't be redone - so it's + # important to ensure that the results of setup are flushed to disk. + # + # Note that base case will be in its post-setup state even if case[i!=0] setup fails. + self._case.flush() + # This assures that case one namelists are populated + # and creates the case.test script + self._case.case_setup(test_mode=False, reset=True) + fix_single_exe_case(self._case) + else: + # Go back to base case to ensure that's where we are for any following code + self._activate_case(0) + + def _link_to_subsq_case_output(self, i): + """ + Looks for all files in rundir-i matching the pattern casename-i*.nc.run-i-suffix + + For each file found, makes a link in base rundir pointing to this file; the + link is renamed so that the original occurrence of casename-i is replaced + with base casename. + + For example: + + /glade/scratch/sacks/somecase/run/somecase.clm2.h0.nc.run2 -> + /glade/scratch/sacks/somecase.run2/run/somecase.run2.clm2.h0.nc.run2 + + If the destination link already exists and points to the correct + location, it is maintained as is. However, an exception will be raised + if the destination link is not exactly as it should be: we avoid + overwriting some existing file or link. + """ + + expect( + i != 0, "ERROR: cannot call _link_to_subsq_case_output for the base class" + ) + + base_casename = self._cases[0].get_value("CASE") + subsq_casename = self._cases[i].get_value("CASE") + base_rundir = self._cases[0].get_value("RUNDIR") + subsq_rundir = self._cases[i].get_value("RUNDIR") + + pattern = "{}*.nc.{}".format(subsq_casename, self._run_suffixes[i]) + subsq_case_files = glob.glob(os.path.join(subsq_rundir, pattern)) + for one_file in subsq_case_files: + file_basename = os.path.basename(one_file) + modified_basename = file_basename.replace(subsq_casename, base_casename, 1) + one_link = os.path.join(base_rundir, modified_basename) + if os.path.islink(one_link) and os.readlink(one_link) == one_file: + # Link is already set up correctly: do nothing + # (os.symlink raises an exception if you try to replace an + # existing file) + pass + else: + os.symlink(one_file, one_link) diff --git a/scripts/lib/CIME/SystemTests/system_tests_compare_two.py b/CIME/SystemTests/system_tests_compare_two.py similarity index 77% rename from scripts/lib/CIME/SystemTests/system_tests_compare_two.py rename to CIME/SystemTests/system_tests_compare_two.py index 6958cf81824..5eaac4948e1 100644 --- a/scripts/lib/CIME/SystemTests/system_tests_compare_two.py +++ b/CIME/SystemTests/system_tests_compare_two.py @@ -2,6 +2,16 @@ Base class for CIME system tests that involve doing two runs and comparing their output. +NOTE: Below is the flow of a multisubmit test. +Non-batch: +case_submit -> case_run # PHASE 1 + -> case_run # PHASE 2 + +batch: +case_submit -> case_run # PHASE 1 +case_run -> case_submit +case_submit -> case_run # PHASE 2 + In the __init__ method for your test, you MUST call SystemTestsCompareTwo.__init__ See the documentation of that method for details. @@ -14,6 +24,9 @@ (2) _case_two_setup This method will be called to set up case 2, the "test" case +Note that the base class will always call case_setup(reset=True) on +both case1 and case2 during setup. + In addition, they MAY require the following methods: (1) _common_setup @@ -35,25 +48,30 @@ """ from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.SystemTests.system_tests_common import SystemTestsCommon, fix_single_exe_case from CIME.case import Case -from CIME.utils import get_model +from CIME.config import Config from CIME.test_status import * import shutil, os, glob logger = logging.getLogger(__name__) -class SystemTestsCompareTwo(SystemTestsCommon): - def __init__(self, - case, - separate_builds = False, - run_two_suffix = 'test', - run_one_description = '', - run_two_description = '', - multisubmit = False, - ignore_fieldlist_diffs = False): +class SystemTestsCompareTwo(SystemTestsCommon): + def __init__( + self, + case, + separate_builds=False, + run_two_suffix="test", + run_one_description="", + run_two_description="", + multisubmit=False, + ignore_fieldlist_diffs=False, + case_two_keep_init_generated_files=False, + dry_run=False, + **kwargs + ): """ Initialize a SystemTestsCompareTwo object. Individual test cases that inherit from SystemTestsCompareTwo MUST call this __init__ method. @@ -77,11 +95,19 @@ def __init__(self, has some diagnostic fields that are missing from the other case), treat the two cases as identical. (This is needed for tests where one case exercises an option that produces extra diagnostic fields.) + case_two_keep_init_generated_files (bool): If True, then do NOT remove the + init_generated_files subdirectory of the case2 run directory before + running case2. This should typically be kept at its default (False) so + that rerunning a test gives the same behavior as in the initial run rather + than reusing init_generated_files in the second run. However, this option + is provided for the sake of specific tests, e.g., a test of the behavior + of running with init_generated_files in place. """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) self._separate_builds = separate_builds self._ignore_fieldlist_diffs = ignore_fieldlist_diffs + self._case_two_keep_init_generated_files = case_two_keep_init_generated_files # run_one_suffix is just used as the suffix for the netcdf files # produced by the first case; we may eventually remove this, but for now @@ -95,10 +121,12 @@ def __init__(self, # be set in the call to the constructor just like run_two_suffix # currently is. Or, if these tools are rewritten to work without any # suffix, then run_one_suffix can be removed entirely. - self._run_one_suffix = 'base' + self._run_one_suffix = "base" self._run_two_suffix = run_two_suffix.rstrip() - expect(self._run_two_suffix != self._run_one_suffix, - "ERROR: Must have different suffixes for run one and run two") + expect( + self._run_two_suffix != self._run_one_suffix, + "ERROR: Must have different suffixes for run one and run two", + ) self._run_one_description = run_one_description self._run_two_description = run_two_description @@ -113,9 +141,13 @@ def __init__(self, # _setup_cases_if_not_yet_done self._case2 = None - self._setup_cases_if_not_yet_done() + # Prevent additional setup_case calls when detecting support for `--single-exe` + if not dry_run: + self._setup_cases_if_not_yet_done() - self._multisubmit = multisubmit and self._case1.get_value("BATCH_SYSTEM") != "none" + self._multisubmit = ( + multisubmit and self._case1.get_value("BATCH_SYSTEM") != "none" + ) # ======================================================================== # Methods that MUST be implemented by specific tests that inherit from this @@ -192,11 +224,12 @@ def build_phase(self, sharedlib_only=False, model_only=False): # Although we're doing separate builds, it still makes sense # to share the sharedlibroot area with case1 so we can reuse # pieces of the build from there. - if get_model() != "e3sm": + if Config.instance().common_sharedlibroot: # We need to turn off this change for E3SM because it breaks # the MPAS build system - self._case2.set_value("SHAREDLIBROOT", - self._case1.get_value("SHAREDLIBROOT")) + self._case2.set_value( + "SHAREDLIBROOT", self._case1.get_value("SHAREDLIBROOT") + ) self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) else: @@ -206,43 +239,70 @@ def build_phase(self, sharedlib_only=False, model_only=False): # valid value for this build, update case2 to reflect this change for comp in self._case1.get_values("COMP_CLASSES"): comp_pio_typename = "{}_PIO_TYPENAME".format(comp) - self._case2.set_value(comp_pio_typename, self._case1.get_value(comp_pio_typename)) + self._case2.set_value( + comp_pio_typename, self._case1.get_value(comp_pio_typename) + ) # The following is needed when _case_two_setup has a case_setup call # despite sharing the build (e.g., to change NTHRDS) - self._case2.set_value("BUILD_COMPLETE",True) + self._case2.set_value("BUILD_COMPLETE", True) def run_phase(self, success_change=False): # pylint: disable=arguments-differ """ Runs both phases of the two-phase test and compares their results If success_change is True, success requires some files to be different """ - first_phase = self._case1.get_value("RESUBMIT") == 1 # Only relevant for multi-submit tests + is_first_run = self._case1.get_value("IS_FIRST_RUN") + + compare_phase_name = "{}_{}_{}".format( + COMPARE_PHASE, self._run_one_suffix, self._run_two_suffix + ) + + # On a batch system with a multisubmit test "RESUBMIT" is used to track + # which phase is being ran. By the end of the test it equals 0. If the + # the test fails in a way where the RUN_PHASE is PEND then "RESUBMIT" + # does not get reset to 1 on a rerun and the first phase is skiped + # causing the COMPARE_PHASE to fail. This ensures that "RESUBMIT" will + # get reset if the test state is not correct for a rerun. + # NOTE: "IS_FIRST_RUN" is reset in "case_submit.py" + if ( + is_first_run + and self._multisubmit + and self._case1.get_value("RESUBMIT") == 0 + ): + self._resetup_case(RUN_PHASE, reset=True) + + first_phase = ( + self._case1.get_value("RESUBMIT") == 1 + ) # Only relevant for multi-submit tests run_type = self._case1.get_value("RUN_TYPE") + logger.info( + "_multisubmit {} first phase {}".format(self._multisubmit, first_phase) + ) + # First run if not self._multisubmit or first_phase: - logger.info('Doing first run: ' + self._run_one_description) + logger.info("Doing first run: " + self._run_one_description) # Add a PENDing compare phase so that we'll notice if the second part of compare two # doesn't run. with self._test_status: - self._test_status.set_status("{}_{}_{}".format(COMPARE_PHASE, self._run_one_suffix, self._run_two_suffix), TEST_PEND_STATUS) + self._test_status.set_status(compare_phase_name, TEST_PEND_STATUS) self._activate_case1() self._case_one_custom_prerun_action() - self.run_indv(suffix = self._run_one_suffix) + self.run_indv(suffix=self._run_one_suffix) self._case_one_custom_postrun_action() # Second run - logger.info("_multisubmit {} first phase {}".format(self._multisubmit, first_phase)) if not self._multisubmit or not first_phase: # Subtle issue: case1 is already in a writeable state since it tends to be opened # with a with statement in all the API entrances in CIME. case2 was created via clone, # not a with statement, so it's not in a writeable state, so we need to use a with # statement here to put it in a writeable state. with self._case2: - logger.info('Doing second run: ' + self._run_two_description) + logger.info("Doing second run: " + self._run_two_description) self._activate_case2() # This assures that case two namelists are populated self._skip_pnl = False @@ -251,15 +311,21 @@ def run_phase(self, success_change=False): # pylint: disable=arguments-differ self._case2.check_case() self._case_two_custom_prerun_action() - self.run_indv(suffix = self._run_two_suffix) + self.run_indv( + suffix=self._run_two_suffix, + keep_init_generated_files=self._case_two_keep_init_generated_files, + ) self._case_two_custom_postrun_action() # Compare results # Case1 is the "main" case, and we need to do the comparisons from there self._activate_case1() self._link_to_case2_output() - self._component_compare_test(self._run_one_suffix, self._run_two_suffix, - success_change=success_change, - ignore_fieldlist_diffs=self._ignore_fieldlist_diffs) + self._component_compare_test( + self._run_one_suffix, + self._run_two_suffix, + success_change=success_change, + ignore_fieldlist_diffs=self._ignore_fieldlist_diffs, + ) def copy_case1_restarts_to_case2(self): """ @@ -271,9 +337,11 @@ def copy_case1_restarts_to_case2(self): files. """ rundir2 = self._case2.get_value("RUNDIR") - self._case1.archive_last_restarts(archive_restdir = rundir2, - rundir=self._case1.get_value("RUNDIR"), - link_to_restart_files = True) + self._case1.archive_last_restarts( + archive_restdir=rundir2, + rundir=self._case1.get_value("RUNDIR"), + link_to_restart_files=True, + ) # ======================================================================== # Private methods @@ -304,8 +372,11 @@ def _get_output_root2(self): # $CIME_OUTPUT_ROOT/$CASE/ is not accidentally shared between # case1 and case2. (Currently nothing is placed here, but this # helps prevent future problems.) - output_root2 = os.path.join(self._case1.get_value("CIME_OUTPUT_ROOT"), - self._case1.get_value("CASE"), "case2_output_root") + output_root2 = os.path.join( + self._case1.get_value("CIME_OUTPUT_ROOT"), + self._case1.get_value("CASE"), + "case2_output_root", + ) return output_root2 def _get_case2_exeroot(self): @@ -370,10 +441,11 @@ def _setup_cases_if_not_yet_done(self): try: self._case2 = self._case1.create_clone( self._caseroot2, - keepexe = not self._separate_builds, - cime_output_root = self._get_output_root2(), - exeroot = self._get_case2_exeroot(), - rundir = self._get_case2_rundir()) + keepexe=not self._separate_builds, + cime_output_root=self._get_output_root2(), + exeroot=self._get_case2_exeroot(), + rundir=self._get_case2_rundir(), + ) self._write_info_to_case2_output_root() self._setup_cases() except BaseException: @@ -388,10 +460,12 @@ def _setup_cases_if_not_yet_done(self): if os.path.isdir(self._caseroot2): shutil.rmtree(self._caseroot2) self._activate_case1() - logger.warning("WARNING: Test case setup failed. Case2 has been removed, " - "but the main case may be in an inconsistent state. " - "If you want to rerun this test, you should create " - "a new test rather than trying to rerun this one.") + logger.warning( + "WARNING: Test case setup failed. Case2 has been removed, " + "but the main case may be in an inconsistent state. " + "If you want to rerun this test, you should create " + "a new test rather than trying to rerun this one." + ) raise def _case_from_existing_caseroot(self, caseroot): @@ -437,10 +511,16 @@ def _write_info_to_case2_output_root(self): try: with open(readme_path, "w") as fd: fd.write("This directory is typically empty.\n\n") - fd.write("case2's run dir is here: {}\n\n".format( - self._case2.get_value("RUNDIR"))) - fd.write("case2's bld dir is here: {}\n".format( - self._case2.get_value("EXEROOT"))) + fd.write( + "case2's run dir is here: {}\n\n".format( + self._case2.get_value("RUNDIR") + ) + ) + fd.write( + "case2's bld dir is here: {}\n".format( + self._case2.get_value("EXEROOT") + ) + ) except IOError: # It's not a big deal if we can't write the README file # (e.g., because the directory doesn't exist or isn't @@ -475,12 +555,16 @@ def _setup_cases(self): # This assures that case one namelists are populated # and creates the case.test script self._case.case_setup(test_mode=False, reset=True) + fix_single_exe_case(self._case) # Set up case 2 with self._case2: self._activate_case2() self._common_setup() self._case_two_setup() + self._case2.case_setup(test_mode=True, reset=True) + + fix_single_exe_case(self._case2) # Go back to case 1 to ensure that's where we are for any following code self._activate_case1() @@ -510,14 +594,13 @@ def _link_to_case2_output(self): rundir2 = self._case2.get_value("RUNDIR") run2suffix = self._run_two_suffix - pattern = '{}*.nc.{}'.format(casename2, run2suffix) + pattern = "{}*.nc.{}".format(casename2, run2suffix) case2_files = glob.glob(os.path.join(rundir2, pattern)) for one_file in case2_files: file_basename = os.path.basename(one_file) modified_basename = file_basename.replace(casename2, casename1, 1) one_link = os.path.join(rundir1, modified_basename) - if (os.path.islink(one_link) and - os.readlink(one_link) == one_file): + if os.path.islink(one_link) and os.readlink(one_link) == one_file: # Link is already set up correctly: do nothing # (os.symlink raises an exception if you try to replace an # existing file) diff --git a/CIME/SystemTests/test_mods.py b/CIME/SystemTests/test_mods.py new file mode 100644 index 00000000000..2db22c5833a --- /dev/null +++ b/CIME/SystemTests/test_mods.py @@ -0,0 +1,81 @@ +import logging +import os + +from CIME.utils import CIMEError +from CIME.XML.files import Files + +logger = logging.getLogger(__name__) + +MODS_DIR_VARS = ("TESTS_MODS_DIR", "USER_MODS_DIR") + + +def find_test_mods(comp_interface, test_mods): + """Finds paths from names of testmods. + + Testmod format is `${component}-${testmod}`. Each testmod is search for + it it's component respective `TESTS_MODS_DIR` and `USER_MODS_DIR`. + + Args: + comp_interface (str): Name of the component interface. + test_mods (list): List of testmods names. + + Returns: + List of paths for each testmod. + + Raises: + CIMEError: If a testmod is not in correct format. + CIMEError: If testmod could not be found. + """ + if test_mods is None: + return [] + + files = Files(comp_interface=comp_interface) + + test_mods_paths = [] + + logger.debug("Checking for testmods {}".format(test_mods)) + + for test_mod in test_mods: + if test_mod.find("/") != -1: + component, mod_path = test_mod.split("/", 1) + else: + raise CIMEError( + f"Invalid testmod, format should be `${{component}}-${{testmod}}`, got {test_mod!r}" + ) + + logger.debug( + "Searching for testmod {!r} for component {!r}".format(mod_path, component) + ) + + test_mod_path = None + + for var in MODS_DIR_VARS: + mods_dir = files.get_value(var, {"component": component}) + + try: + candidate_path = os.path.join(mods_dir, component, mod_path) + except TypeError: + # mods_dir is None + continue + + logger.debug( + "Checking for testmod {!r} in {!r}".format(test_mod, candidate_path) + ) + + if os.path.exists(candidate_path): + test_mod_path = candidate_path + + logger.debug( + "Found testmod {!r} for component {!r} in {!r}".format( + mod_path, component, test_mod_path + ) + ) + + break + + if test_mod_path is None: + raise CIMEError(f"Could not locate testmod {mod_path!r}") + + test_mods_paths.append(test_mod_path) + + return test_mods_paths diff --git a/scripts/lib/CIME/SystemTests/__init__.py b/CIME/SystemTests/test_utils/__init__.py similarity index 100% rename from scripts/lib/CIME/SystemTests/__init__.py rename to CIME/SystemTests/test_utils/__init__.py diff --git a/CIME/SystemTests/test_utils/user_nl_utils.py b/CIME/SystemTests/test_utils/user_nl_utils.py new file mode 100644 index 00000000000..930d683666b --- /dev/null +++ b/CIME/SystemTests/test_utils/user_nl_utils.py @@ -0,0 +1,59 @@ +""" +This module contains functions for working with user_nl files in system tests. +""" + +import os +import glob + + +def append_to_user_nl_files(caseroot, component, contents): + """ + Append the string(s) given by 'contents' to the end of each user_nl file for + the given component (there may be multiple such user_nl files in the case of + a multi-instance test). + + Also puts new lines before and after the appended text - so 'contents' + does not need to contain a trailing new line (but it's also okay if it + does). + + Args: + caseroot (str): Full path to the case directory + + component (str): Name of component (e.g., 'clm'). This is used to + determine which user_nl files are appended to. For example, for + component='clm', this function will operate on all user_nl files + matching the pattern 'user_nl_clm*'. (We do a wildcard match to + handle multi-instance tests.) + + contents (str or list-like): Contents to append to the end of each user_nl + file. If list-like, each item will be appended on its own line. + """ + + if isinstance(contents, str): + contents = [contents] + + files = _get_list_of_user_nl_files(caseroot, component) + + if len(files) == 0: + raise RuntimeError("No user_nl files found for component " + component) + + for one_file in files: + with open(one_file, "a") as user_nl_file: + user_nl_file.write("\n") + for c in contents: + user_nl_file.write(c + "\n") + + +def _get_list_of_user_nl_files(path, component): + """Get a list of all user_nl files in the current path for the component + of interest. For a component 'foo', we match all files of the form + user_nl_foo* - with a wildcard match at the end in order to match files + in a multi-instance case. + + The list of returned files gives their full path. + """ + + file_pattern = "user_nl_" + component + "*" + file_list = glob.glob(os.path.join(path, file_pattern)) + + return file_list diff --git a/CIME/SystemTests/tsc.py b/CIME/SystemTests/tsc.py new file mode 100644 index 00000000000..be695b7f482 --- /dev/null +++ b/CIME/SystemTests/tsc.py @@ -0,0 +1,276 @@ +""" +Solution reproducibility test based on time-step convergence +The CESM/ACME model's +multi-instance capability is used to conduct an ensemble +of simulations starting from different initial conditions. + +This class inherits from SystemTestsCommon. +""" + +import os +import json +import logging + +from shutil import copytree + +import CIME.test_status +import CIME.utils +from CIME.status import append_testlog +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.case.case_setup import case_setup +from CIME.hist_utils import rename_all_hist_files +from CIME.XML.machines import Machines + +import evv4esm # pylint: disable=import-error +from evv4esm.__main__ import main as evv # pylint: disable=import-error + +evv_lib_dir = os.path.abspath(os.path.dirname(evv4esm.__file__)) + +logger = logging.getLogger(__name__) + + +NINST = 12 +SIM_LENGTH = 600 # seconds +OUT_FREQ = 10 # seconds +INSPECT_AT = [300, 450, 600] # seconds +INIT_COND_FILE_TEMPLATE = ( + "20240305.v3p0p0.F2010.ne4pg2_oQU480.chrysalis.{}.{}.0002-{:02d}-01-00000.nc" +) +VAR_LIST = [ + "T", + "Q", + "V", + "CLDLIQ", + "CLDICE", + "NUMLIQ", + "NUMICE", + "num_a1", + "num_a2", + "num_a3", +] +P_THRESHOLD = 0.005 + + +class TSC(SystemTestsCommon): + def __init__(self, case, **kwargs): + """ + initialize an object interface to the TSC test + """ + super(TSC, self).__init__(case, **kwargs) + if self._case.get_value("MODEL") == "e3sm": + self.atmmod = "eam" + self.lndmod = "elm" + self.atmmodIC = "eam" + self.lndmodIC = "elm" + else: + self.atmmod = "cam" + self.lndmod = "clm" + self.atmmodIC = "cam" + self.lndmodIC = "clm2" + + def build_phase(self, sharedlib_only=False, model_only=False): + # Only want this to happen once. It will impact the sharedlib build + # so it has to happen there. + if not model_only: + logging.warning("Starting to build multi-instance exe") + for comp in ["ATM", "OCN", "WAV", "GLC", "ICE", "ROF", "LND"]: + ntasks = self._case.get_value("NTASKS_{}".format(comp)) + self._case.set_value("ROOTPE_{}".format(comp), 0) + self._case.set_value("NINST_{}".format(comp), NINST) + self._case.set_value("NTASKS_{}".format(comp), ntasks * NINST) + + self._case.set_value("ROOTPE_CPL", 0) + self._case.set_value("NTASKS_CPL", ntasks * NINST) + self._case.flush() + + case_setup(self._case, test_mode=False, reset=True) + + self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) + + def _run_with_specified_dtime(self, dtime=2): + """ + Conduct one multi-instance run with a specified time step size. + + :param dtime (int): Specified time step size in seconds + """ + coupling_frequency = 86400 // dtime + + self._case.set_value("ATM_NCPL", str(coupling_frequency)) + se_tstep = dtime / 12 + + nsteps = SIM_LENGTH // dtime + self._case.set_value("STOP_N", str(nsteps)) + self._case.set_value("STOP_OPTION", "nsteps") + + csmdata_root = self._case.get_value("DIN_LOC_ROOT") + csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4pg2_v3_init") + csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4pg2_v3_init") + + nstep_output = OUT_FREQ // dtime + for iinst in range(1, NINST + 1): + fatm_in = os.path.join( + csmdata_atm, + INIT_COND_FILE_TEMPLATE.format(self.atmmodIC, "i", iinst), + ) + flnd_in = os.path.join( + csmdata_lnd, + INIT_COND_FILE_TEMPLATE.format(self.lndmodIC, "r", iinst), + ) + + with open(f"user_nl_{self.atmmod}_{iinst:04d}", "w+") as atmnlfile: + + atmnlfile.write("ncdata = '{}' \n".format(fatm_in)) + + atmnlfile.write("dtime = {} \n".format(dtime)) + atmnlfile.write("se_tstep = {} \n".format(se_tstep)) + atmnlfile.write("iradsw = 2 \n") + atmnlfile.write("iradlw = 2 \n") + + atmnlfile.write("avgflag_pertape = 'I' \n") + atmnlfile.write("nhtfrq = {} \n".format(nstep_output)) + atmnlfile.write("mfilt = 1 \n") + atmnlfile.write("ndens = 1 \n") + atmnlfile.write("empty_htapes = .true. \n") + atmnlfile.write( + "fincl1 = 'PS','U','LANDFRAC',{} \n".format( + "".join(["'{}',".format(s) for s in VAR_LIST])[:-1] + ) + ) + + with open(f"user_nl_{self.lndmod}_{iinst:04d}", "w+") as lndnlfile: + lndnlfile.write("finidat = '{}' \n".format(flnd_in)) + lndnlfile.write("dtime = {} \n".format(dtime)) + + # Force rebuild namelists + self._skip_pnl = False + + self.run_indv() + + rename_all_hist_files(self._case, suffix="DT{:04d}".format(dtime)) + + def run_phase(self): + self._run_with_specified_dtime(dtime=2) + + if self._case.get_value("GENERATE_BASELINE"): + self._run_with_specified_dtime(dtime=1) + + def _compare_baseline(self): + with self._test_status as ts: + ts.set_status( + CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS + ) + + run_dir = self._case.get_value("RUNDIR") + case_name = self._case.get_value("CASE") + base_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASECMP_CASE"), + ) + + test_name = "{}".format(case_name.split(".")[-1]) + evv_config = { + test_name: { + "module": os.path.join(evv_lib_dir, "extensions", "tsc.py"), + "test-case": case_name, + "test-dir": run_dir, + "ref-case": "Baseline", + "ref-dir": base_dir, + "time-slice": [OUT_FREQ, SIM_LENGTH], + "inspect-times": INSPECT_AT, + "variables": VAR_LIST, + "p-threshold": P_THRESHOLD, + "component": self.atmmod, + } + } + + json_file = os.path.join(run_dir, ".".join([case_name, "json"])) + with open(json_file, "w") as config_file: + json.dump(evv_config, config_file, indent=4) + + evv_out_dir = os.path.join(run_dir, ".".join([case_name, "evv"])) + evv(["-e", json_file, "-o", evv_out_dir]) + + with open(os.path.join(evv_out_dir, "index.json"), "r") as evv_f: + evv_status = json.load(evv_f) + + comments = "" + for evv_ele in evv_status["Page"]["elements"]: + if "Table" in evv_ele: + comments = "; ".join( + "{}: {}".format(key, val[0]) + for key, val in evv_ele["Table"]["data"].items() + ) + if evv_ele["Table"]["data"]["Test status"][0].lower() == "pass": + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, + CIME.test_status.TEST_PASS_STATUS, + ) + break + + status = self._test_status.get_status(CIME.test_status.BASELINE_PHASE) + mach_name = self._case.get_value("MACH") + mach_obj = Machines(machine=mach_name) + htmlroot = CIME.utils.get_htmlroot(mach_obj) + urlroot = CIME.utils.get_urlroot(mach_obj) + if htmlroot is not None: + with CIME.utils.SharedArea(): + copytree( + evv_out_dir, + os.path.join(htmlroot, "evv", case_name), + ) + if urlroot is None: + urlroot = "[{}_URL]".format(mach_name.capitalize()) + viewing = "{}/evv/{}/index.html".format(urlroot, case_name) + else: + viewing = ( + "{}\n" + " EVV viewing instructions can be found at: " + " https://github.com/ESMCI/CIME/blob/master/scripts/" + "climate_reproducibility/README.md#test-passfail-and-extended-output" + "".format(evv_out_dir) + ) + + comments = ( + "{} {} for test '{}'.\n" + " {}\n" + " EVV results can be viewed at:\n" + " {}".format( + CIME.test_status.BASELINE_PHASE, + status, + test_name, + comments, + viewing, + ) + ) + + append_testlog(comments, self._orig_caseroot) + + def _generate_baseline(self): + super(TSC, self)._generate_baseline() + + with CIME.utils.SharedArea(): + basegen_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASEGEN_CASE"), + ) + + rundir = self._case.get_value("RUNDIR") + ref_case = self._case.get_value("RUN_REFCASE") + + env_archive = self._case.get_env("archive") + hists = env_archive.get_all_hist_files( + self._case.get_value("CASE"), + self.atmmod, + rundir, + r"DT\d*", + ref_case=ref_case, + ) + hists = [os.path.join(rundir, hist) for hist in hists] + logger.debug("TSC additional baseline files: {}".format(hists)) + for hist in hists: + basename = hist[hist.rfind(self.atmmod) :] + baseline = os.path.join(basegen_dir, basename) + if os.path.exists(baseline): + os.remove(baseline) + + CIME.utils.safe_copy(hist, baseline, preserve_meta=False) diff --git a/CIME/Tools/Makefile b/CIME/Tools/Makefile new file mode 100644 index 00000000000..f9ddeb77c95 --- /dev/null +++ b/CIME/Tools/Makefile @@ -0,0 +1,1071 @@ +#=============================================================================== +# +# Common Makefile: a framework for building all CIME components and more +# +#=============================================================================== +ifdef MODEL + ifndef COMP_NAME + $(warning "Variable MODEL is deprecated, please use COMP_NAME instead") + COMP_NAME:=$(MODEL) + else + ifneq ($(MODEL), $(COMP_NAME)) + $(warning "MODEL is inconsistent with COMP_NAME $(MODEL) $(COMP_NAME)") + COMP_NAME:=$(MODEL) + endif + endif +endif + +# Set up special characters +null := +comma := , + +# Load dependency search path. +dirs := . +dirs += $(shell cat Filepath) + +cpp_dirs := $(dirs) +# Add INCROOT to path for Depends and Include +MINCROOT := +ifdef INCROOT + cpp_dirs += $(INCROOT) + MINCROOT := $(INCROOT) +endif + +# Expand any tildes in directory names. Change spaces to colons. +VPATH := $(foreach dir,$(cpp_dirs),$(wildcard $(dir))) +VPATH := $(subst $(space),:,$(VPATH)) + +RM := rm +CP := cp + +exec_se: $(EXEC_SE) Depends +complib: $(COMPLIB) Depends + +# Determine whether to compile threaded or not +# Set the THREADDIR for the shared build +# based on the threaded build status +ifeq ($(strip $(SMP)),TRUE) + THREADDIR = threads + compile_threaded = TRUE +else + ifeq ($(strip $(BUILD_THREADED)),TRUE) + THREADDIR = threads + compile_threaded = TRUE + else + THREADDIR = nothreads + compile_threaded = FALSE + endif +endif + +# set the debug directory based on the debug status +ifeq ($(strip $(DEBUG)),TRUE) + DEBUGDIR = debug +else + DEBUGDIR = nodebug +endif + +SLIBS ?= $(USER_SLIBS) + +ifndef MOD_SUFFIX + MOD_SUFFIX := mod +endif + +#=============================================================================== +# set CPP options (must use this before any flags or cflags settings) +#=============================================================================== + +CPPDEFS := $(USER_CPPDEFS) -D$(OS) + +-include $(CASEROOT)/Macros.make + +# Unless DEBUG mode is enabled, use NDEBUG to turn off assert statements. +ifeq ($(strip $(DEBUG)),TRUE) + # e3sm still has components that cannot build with -DDEBUG + ifneq ($(CIME_MODEL),e3sm) + CPPDEFS += -DDEBUG + endif +else + CPPDEFS += -DNDEBUG +endif + +# USE_ESMF_LIB is currently only defined in env_build.xml +ifeq ($(USE_ESMF_LIB), TRUE) + CPPDEFS += -DUSE_ESMF_LIB +endif + +ifeq ($(COMPARE_TO_NUOPC), TRUE) + CPPDEFS += -DCOMPARE_TO_NUOPC +endif + +ifeq ($(strip $(MPILIB)),mpi-serial) + CPPDEFS += -DNO_MPI2 +else + CPPDEFS += -DHAVE_MPI +endif + +ifeq (,$(SHAREDPATH)) + SHAREDPATH = $(COMPILER)/$(MPILIB)/$(DEBUGDIR)/$(THREADDIR) + INSTALL_SHAREDPATH = $(EXEROOT)/$(SHAREDPATH) +endif + +#=============================================================================== +# User-specified INCLDIR +#=============================================================================== +INCLDIR := -I. +ifdef USER_INCLDIR + INCLDIR += $(USER_INCLDIR) +endif + +FTORCH_DIR = $(SHAREDLIBROOT)/$(SHAREDPATH)/FTorch/ +ifneq "$(wildcard $(FTORCH_DIR) )" "" + INCLDIR += -I$(FTORCH_DIR)/modules -I$(FTORCH_DIR) + SLIBS += -Wl,-rpath,$(FTORCH_DIR) -L$(FTORCH_DIR) -lftorch_wrapper +endif +ifeq ($(strip $(USE_FTORCH)), TRUE) + CPPDEFS += -DUSE_FTORCH + SLIBS += -Wl,-rpath,$(TORCH_DIR)/lib -L$(TORCH_DIR)/lib -ltorch_cpu -lc10 -lstdc++ -Wl,-rpath,$(FTORCH_DIR)/src -L$(FTORCH_DIR)/src -lftorch +endif + +ifdef FSTDLIB_PKGCONFIG + STDLIB_INC := `pkg-config --cflags fortran_stdlib` + STDLIB_LIBS := `pkg-config --libs fortran_stdlib` + INCLDIR += $(STDLIB_INC) + SLIBS += $(STDLIB_LIBS) +endif + +ifeq ($(strip $(USE_FMS)), TRUE) + SLIBS += -lfms + INCLDIR += -I$(SHAREDLIBROOT)/$(SHAREDPATH)/FMS/ +endif + + +CPPDEFS += -DNUOPC_INTERFACE +INCLDIR += -I$(SHAREDLIBROOT)/$(SHAREDPATH)/CDEPS/fox/include -I$(SHAREDLIBROOT)/$(SHAREDPATH)/CDEPS/dshr +# FoX libraries are provided and built by CDEPS +FoX_LIBS := -L$(SHAREDLIBROOT)/$(SHAREDPATH)/CDEPS/fox/lib -lFoX_dom -lFoX_sax -lFoX_utils -lFoX_fsys -lFoX_wxml -lFoX_common -lFoX_fsys +ULIBS += -L$(SHAREDLIBROOT)/$(SHAREDPATH)/CDEPS/dshr -ldshr -L$(SHAREDLIBROOT)/$(SHAREDPATH)/CDEPS/streams -lstreams +SLIBS += $(FoX_LIBS) + +CPPDEFS += -DPIO2 + +# Not clear how to escape commas for libraries with their own configure +# script, and they don't need this defined anyway, so leave this out of +# FPPDEFS. +ifeq ($(HAS_F2008_CONTIGUOUS),TRUE) + CONTIGUOUS_FLAG := -DUSE_CONTIGUOUS=contiguous, +else + CONTIGUOUS_FLAG := -DUSE_CONTIGUOUS= +endif + +ifdef CPRE + CONTIGUOUS_FLAG := $(subst $(comma),\\$(comma),$(CONTIGUOUS_FLAG)) + CONTIGUOUS_FLAG := $(patsubst -D%,$(CPRE)%,$(CONTIGUOUS_FLAG)) +endif + +AR ?= ar +ARFLAGS ?= -r + +ifdef NETCDF_C_PATH + ifndef NETCDF_FORTRAN_PATH + $(error "NETCDF_C_PATH specified without NETCDF_FORTRAN_PATH") + endif + NETCDF_SEPARATE:=TRUE + ifndef INC_NETCDF_C + INC_NETCDF_C:=$(NETCDF_C_PATH)/include + endif + ifndef INC_NETCDF_FORTRAN + INC_NETCDF_FORTRAN:=$(NETCDF_FORTRAN_PATH)/include + endif + ifndef LIB_NETCDF_C + LIB_NETCDF_C:=$(NETCDF_C_PATH)/lib + endif + ifndef LIB_NETCDF_FORTRAN + LIB_NETCDF_FORTRAN:=$(NETCDF_FORTRAN_PATH)/lib + endif +else ifdef NETCDF_FORTRAN_PATH + $(error "NETCDF_FORTRAN_PATH specified without NETCDF_C_PATH") +else ifdef NETCDF_PATH + NETCDF_SEPARATE:=FALSE + ifndef INC_NETCDF + INC_NETCDF:=$(NETCDF_PATH)/include + endif + ifndef LIB_NETCDF + LIB_NETCDF:=$(NETCDF_PATH)/lib + endif +else + # No Netcdf is an error unless target is clean or DEP + ifneq ($(MAKECMDGOALS), db_files) + ifneq ($(MAKECMDGOALS), db_flags) + ifeq (,$(findstring clean,$(MAKECMDGOALS))) + $(error NETCDF not found: Define NETCDF_PATH or NETCDF_C_PATH and NETCDF_FORTRAN_PATH in config_machines.xml or cmake_macros) + endif + endif + endif +endif + +ifeq ($(MPILIB),mpi-serial) + ifdef PNETCDF_PATH + undefine PNETCDF_PATH + endif +else + ifdef PNETCDF_PATH + ifndef $(INC_PNETCDF) + INC_PNETCDF:=$(PNETCDF_PATH)/include + endif + ifndef LIB_PNETCDF + LIB_PNETCDF:=$(PNETCDF_PATH)/lib + endif + endif +endif + +# Set PETSc info if it is being used +ifeq ($(strip $(USE_PETSC)), TRUE) + ifdef PETSC_PATH + ifndef INC_PETSC + INC_PETSC:=$(PETSC_PATH)/include + endif + ifndef LIB_PETSC + LIB_PETSC:=$(PETSC_PATH)/lib + endif + else + $(error PETSC_PATH must be defined when USE_PETSC is TRUE) + endif + + # Get the "PETSC_LIB" list an env var + include $(PETSC_PATH)/lib/petsc/conf/variables +endif + +# Set MOAB info if it is being used +ifeq ($(COMP_INTERFACE), moab) + ifdef MOAB_PATH + CPPDEFS += -DHAVE_MOAB + ifndef INC_MOAB + INC_MOAB:=$(MOAB_PATH)/include + endif + ifndef LIB_MOAB + LIB_MOAB:=$(MOAB_PATH)/lib + endif + else + $(error MOAB_PATH must be defined when using moab driver) + endif +endif + +# Set HAVE_SLASHPROC on LINUX systems which are not bluegene or Darwin (OSx) + +ifeq ($(findstring -DLINUX,$(CPPDEFS)),-DLINUX) + ifneq ($(findstring DBG,$(CPPDEFS)),DBG) + ifneq ($(findstring Darwin,$(CPPDEFS)),Darwin) + CPPDEFS += -DHAVE_SLASHPROC + endif + endif +endif + +ifdef LIB_PNETCDF + CPPDEFS += -D_PNETCDF + SLIBS += -L$(LIB_PNETCDF) -lpnetcdf +endif + +# Set esmf.mk location with ESMF_LIBDIR having precedence over ESMFMKFILE +CIME_ESMFMKFILE := undefined_ESMFMKFILE +ifdef ESMFMKFILE + CIME_ESMFMKFILE := $(ESMFMKFILE) +endif +ifdef ESMF_LIBDIR + CIME_ESMFMKFILE := $(ESMF_LIBDIR)/esmf.mk +endif +# For compiling and linking with external ESMF. +# If linking to external ESMF library then include esmf.mk +# ESMF_F90COMPILEPATHS +# ESMF_F90ESMFLINKPATHS +# ESMF_F90LINKRPATHS +# ESMF_F90ESMFLINKLIBS +# ESMF_F90LINKPATHS +ifeq ($(USE_ESMF_LIB), TRUE) + -include $(CIME_ESMFMKFILE) + CPPDEFS += -DESMF_VERSION_MAJOR=$(ESMF_VERSION_MAJOR) -DESMF_VERSION_MINOR=$(ESMF_VERSION_MINOR) + FFLAGS += $(ESMF_F90COMPILEPATHS) + SLIBS += $(ESMF_F90ESMFLINKPATHS) $(ESMF_F90ESMFLINKRPATHS) $(ESMF_F90ESMFLINKLIBS) $(ESMF_F90LINKPATHS) +endif + +# Stub libraries do not need to be built for nuopc driver +# so it will override these settings on the command line +ATM_PRESENT ?= TRUE +ICE_PRESENT ?= TRUE +LND_PRESENT ?= TRUE +OCN_PRESENT ?= TRUE +ROF_PRESENT ?= TRUE +GLC_PRESENT ?= TRUE +WAV_PRESENT ?= TRUE +ESP_PRESENT ?= TRUE +IAC_PRESENT ?= TRUE +MED_PRESENT ?= TRUE +ifeq ($(ULIBDEP),$(null)) + ifneq ($(LIBROOT),$(null)) + ifeq ($(ATM_PRESENT),TRUE) + ULIBDEP += $(LIBROOT)/libatm.a + CPPDEFS += -DATM_PRESENT + endif + ifeq ($(ICE_PRESENT),TRUE) + ULIBDEP += $(LIBROOT)/libice.a + CPPDEFS += -DICE_PRESENT + endif + ifeq ($(LND_PRESENT),TRUE) + ULIBDEP += $(LNDLIBDIR)/$(LNDLIB) + CPPDEFS += -DLND_PRESENT + endif + ifeq ($(OCN_PRESENT),TRUE) + ULIBDEP += $(LIBROOT)/libocn.a + CPPDEFS += -DOCN_PRESENT + endif + ifeq ($(ROF_PRESENT),TRUE) + ULIBDEP += $(LIBROOT)/librof.a + CPPDEFS += -DROF_PRESENT + endif + ifeq ($(GLC_PRESENT),TRUE) + ULIBDEP += $(LIBROOT)/libglc.a + CPPDEFS += -DGLC_PRESENT + endif + ifeq ($(WAV_PRESENT),TRUE) + ULIBDEP += $(LIBROOT)/libwav.a + CPPDEFS += -DWAV_PRESENT + endif + ifeq ($(ESP_PRESENT),TRUE) + ULIBDEP += $(LIBROOT)/libesp.a + CPPDEFS += -DESP_PRESENT + endif + ifeq ($(IAC_PRESENT),TRUE) + ULIBDEP += $(LIBROOT)/libiac.a + endif + ifeq ($(MED_PRESENT),TRUE) + CPPDEFS += -DMED_PRESENT + endif + endif +endif + +CPPDEFS += -DPIO$(PIO_VERSION) + +ifdef CPRE + FPPDEFS := $(subst $(comma),\\$(comma),$(CPPDEFS)) + FPPDEFS := $(patsubst -D%,$(CPRE)%,$(FPPDEFS)) + EXTRA_PIO_FPPDEFS := $(subst $(comma),\\$(comma),$(EXTRA_PIO_CPPDEFS)) + EXTRA_PIO_FPPDEFS := $(patsubst -D%,$(CPRE)%,$(EXTRA_PIO_FPPDEFS)) +else + FPPDEFS := $(CPPDEFS) + EXTRA_PIO_FPPDEFS := $(EXTRA_PIO_CPPDEFS) +endif + +#=============================================================================== +# Set config args for pio to blank and then enable serial +#=============================================================================== +ifndef CONFIG_ARGS + CONFIG_ARGS := +endif +ifeq ($(findstring pio,$(COMP_NAME)),pio) + CONFIG_ARGS+= --enable-timing + ifeq ($DEBUG,TRUE) + CONFIG_ARGS+= --enable-debug + endif +endif + +#=============================================================================== +# MPI-serial library +#=============================================================================== + +ifeq ($(strip $(MPILIB)), mpi-serial) + CC := $(SCC) + FC := $(SFC) + CXX := $(SCXX) + MPIFC := $(SFC) + MPICC := $(SCC) + MPICXX := $(SCXX) + ifndef MPI_SERIAL_PATH + CONFIG_ARGS += MCT_PATH=$(SHAREDLIBROOT)/$(SHAREDPATH)/mpi-serial + else + CONFIG_ARGS += MCT_PATH=$(MPI_SERIAL_PATH) + INC_MPI := $(MPI_SERIAL_PATH)/include + LIB_MPI := $(MPI_SERIAL_PATH)/lib + endif +else + CC := $(MPICC) + FC := $(MPIFC) + CXX := $(MPICXX) + ifdef MPI_PATH + INC_MPI := $(MPI_PATH)/include + LIB_MPI := $(MPI_PATH)/lib + endif +endif +LD := $(MPIFC) + +CSM_SHR_INCLUDE:=$(INSTALL_SHAREDPATH)/include +# This is needed so that dependancies are found +VPATH+=$(CSM_SHR_INCLUDE) + +#=============================================================================== +# Set include paths (needed after override for any model specific builds below) +#=============================================================================== +INCLDIR += -I$(INSTALL_SHAREDPATH)/include -I$(INSTALL_SHAREDPATH)/include -I$(INSTALL_SHAREDPATH)/finclude + +ifeq ($(NETCDF_SEPARATE), FALSE) + INCLDIR += -I$(INC_NETCDF) +else ifeq ($(NETCDF_SEPARATE), TRUE) + INCLDIR += -I$(INC_NETCDF_C) -I$(INC_NETCDF_FORTRAN) +endif +ifdef MOD_NETCDF + INCLDIR += -I$(MOD_NETCDF) +endif +ifdef INC_MPI + INCLDIR += -I$(INC_MPI) +endif +ifdef INC_PNETCDF + INCLDIR += -I$(INC_PNETCDF) +endif +ifdef INC_PETSC + INCLDIR += -I$(INC_PETSC) +endif +ifdef INC_MOAB + INCLDIR += -I$(INC_MOAB) +endif + +ifeq ($(COMP_NAME),driver) + INCLDIR += -I$(EXEROOT)/atm/obj -I$(EXEROOT)/ice/obj -I$(EXEROOT)/ocn/obj -I$(EXEROOT)/glc/obj -I$(EXEROOT)/rof/obj -I$(EXEROOT)/wav/obj -I$(EXEROOT)/esp/obj -I$(EXEROOT)/iac/obj +# nagfor and gcc have incompatible LDFLAGS. +# nagfor requires the weird "-Wl,-Wl,," syntax. + ifeq ($(strip $(COMPILER)),nag) + ifeq ($(NETCDF_SEPARATE), false) + SLIBS += -Wl,-Wl,,-rpath=$(LIB_NETCDF) + else ifeq ($(NETCDF_SEPARATE), true) + SLIBS += -Wl,-Wl,,-rpath=$(LIB_NETCDF_C) + SLIBS += -Wl,-Wl,,-rpath=$(LIB_NETCDF_FORTRAN) + endif + endif +else + ifeq ($(strip $(COMPILER)),nag) + ifeq ($(DEBUG), TRUE) + ifneq (,$(filter $(strip $(MACH)),hobart izumi)) + # GCC needs to be able to link to + # nagfor runtime to get autoconf + # tests to work. + CFLAGS += -Wl,--as-needed,--allow-shlib-undefined + SLIBS += -L$(COMPILER_PATH)/lib/NAG_Fortran -lf62rts + endif + endif + endif +endif + +ifdef PIO_LIBDIR + ifeq ($(PIO_VERSION),$(PIO_VERSION_MAJOR)) + INCLDIR += -I$(PIO_INCDIR) + SLIBS += -L$(PIO_LIBDIR) + else + # If PIO_VERSION_MAJOR doesnt match, build from source + unexport PIO_LIBDIR + endif +endif +PIO_LIBDIR ?= $(INSTALL_SHAREDPATH)/lib + +ifndef GPTL_LIBDIR + GPTL_LIBDIR=$(INSTALL_SHAREDPATH)/lib +endif + +ifndef GLC_DIR + GLC_DIR=$(EXEROOT)/glc +endif +ifndef CISM_LIBDIR + CISM_LIBDIR=$(GLC_DIR)/lib +endif +ifndef GLCROOT + # Backwards compatibility + GLCROOT=$(CIMEROOT)/../components/cism +endif + +INCLDIR += -I$(INSTALL_SHAREDPATH)/include + +CFLAGS+=$(CPPDEFS) +CXXFLAGS+=$(CPPDEFS) +CONFIG_ARGS += CC="$(CC)" FC="$(FC)" MPICC="$(MPICC)" \ + MPIFC="$(MPIFC)" FCFLAGS="$(FFLAGS) $(FREEFLAGS) $(INCLDIR)" \ + CPPDEFS="$(CPPDEFS)" CFLAGS="$(CFLAGS) -I.. $(INCLDIR)" \ + LDFLAGS="$(LDFLAGS)" + +ifeq ($(NETCDF_SEPARATE), FALSE) + CONFIG_ARGS += NETCDF_PATH=$(NETCDF_PATH) +else ifeq ($(NETCDF_SEPARATE), TRUE) + CONFIG_ARGS += NETCDF_PATH=$(NETCDF_C_PATH) +endif + +FFLAGS += $(FPPDEFS) +FFLAGS_NOOPT += $(FPPDEFS) + +ifeq ($(findstring -cosp,$(CAM_CONFIG_OPTS)),-cosp) + # The following is for the COSP simulator code: + COSP_LIBDIR:=$(abspath $(EXEROOT)/atm/obj/cosp) + ifeq ($(COMP_NAME),driver) + INCLDIR+=-I$(COSP_LIBDIR) + endif +endif + +COMP_ATM ?= $(shell $(CASEROOT)/xmlquery --caseroot $(CASEROOT) COMP_ATM --value) +ifeq ($(strip $(COMP_ATM)),cam) + CAM_DYCORE ?= $(shell $(CASEROOT)/xmlquery --caseroot $(CASEROOT) CAM_DYCORE --value) + ifeq ($(CAM_DYCORE),fv3) + FV3CORE_LIBDIR:=$(abspath $(EXEROOT)/atm/obj/atmos_cubed_sphere) + INCLDIR+=-I$(FV3CORE_LIBDIR) -I$(FV3CORE_LIBDIR)/../ -I../$(INSTALL_SHAREDPATH)/include -I../$(CSM_SHR_INCLUDE) -I$(abspath $(EXEROOT)/FMS) -I$(CIMEROOT)/../libraries/FMS/src/include + endif + + ifeq ($(CAM_DYCORE),mpas) + # For building CAM with the MPAS dycore + MPAS_LIBDIR:=$(abspath $(EXEROOT)/atm/obj/mpas) + ifeq ($(COMP_NAME),driver) + # This is needed by some compilers when building the driver. + INCLDIR+=-I$(MPAS_LIBDIR) + endif + endif +endif + +ifeq ($(COMP_NAME),cam) + # These RRTMG files take an extraordinarily long time to compile with optimization. + # Until mods are made to read the data from files, just remove optimization from + # their compilation. +rrtmg_lw_k_g.o: rrtmg_lw_k_g.f90 + $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< + +rrtmg_sw_k_g.o: rrtmg_sw_k_g.f90 + $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< + +ifdef COSP_LIBDIR +INCLDIR+=-I$(COSP_LIBDIR) -I$(COSP_LIBDIR)/../ -I../$(INSTALL_SHAREDPATH)/include -I../$(CSM_SHR_INCLUDE) +$(COSP_LIBDIR)/libcosp.a: cam_abortutils.o + $(MAKE) -C $(COSP_LIBDIR) F90='$(FC)' F90FLAGS='$(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FC_AUTO_R8)' \ + F90FLAGS_noauto='$(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS)' \ + F90FLAGS_fixed='$(INCLDIR) $(INCS) $(FIXEDFLAGS) $(FFLAGS) $(FC_AUTO_R8)' + +cospsimulator_intr.o: $(COSP_LIBDIR)/libcosp.a +endif + +ifdef FV3CORE_LIBDIR +$(FV3CORE_LIBDIR)/libfv3core.a: $(LIBROOT)/libfms.a + $(MAKE) -C $(FV3CORE_LIBDIR) complib COMPLIB='$(FV3CORE_LIBDIR)/libfv3core.a' F90='$(FC)' CC='$(CC)' FFLAGS='$(FFLAGS) $(FC_AUTO_R8)' CFLAGS='$(CFLAGS)' INCLDIR='$(INCLDIR)' FC_TYPE='$(COMPILER)' + +dyn_grid.o: $(FV3CORE_LIBDIR)/libfv3core.a +endif + +ifdef MPAS_LIBDIR + ABS_INSTALL_SHAREDPATH = $(abspath $(INSTALL_SHAREDPATH)) + ABS_ESMF_PATH = $(abspath $(INSTALL_SHAREDPATH)) + ifeq ($(PIO_VERSION),2) + PIODEF := -DUSE_PIO2 + endif + INCLDIR+=-I$(MPAS_LIBDIR) + INCLDIR+=-I$(abspath $(EXEROOT)/atm/obj) + +# To ensure that the MPAS-A dycore library is always updated whenever dycore source +# files are changed, the libmpas target here is declared as PHONY; under ideal circumstances +# this isn't necessary, since libmpas should never be an actual file (the library that is created +# is named libmpas.a), but adding the PHONY declaration provides an extra bit of safety +.PHONY: libmpas +# The CASEROOT, COMPILER and MACH are added so that the Depends file could be visible to +# the MPAS dycore. +# The GPUFLAGS is added so that the GPU flags defined in ccs_config_cesm could also be +# used to build the MPAS dycore if needed. +libmpas: cam_abortutils.o physconst.o + $(MAKE) -C $(MPAS_LIBDIR) CC="$(CC)" FC="$(FC)" PIODEF="$(PIODEF)" \ + FFLAGS='$(FREEFLAGS) $(FFLAGS)' GPUFLAGS='$(GPUFLAGS)' \ + CASEROOT='$(CASEROOT)' COMPILER='$(COMPILER)' MACH='$(MACH)' \ + FCINCLUDES='$(INCLDIR) $(INCS) -I$(ABS_INSTALL_SHAREDPATH)/include -I$(ABS_ESMF_PATH)/include' + +dyn_comp.o: libmpas +dyn_grid.o: libmpas +endif + +endif + +# System libraries (netcdf, mpi, pnetcdf, esmf, trilinos, etc.) +ifeq ($(NETCDF_SEPARATE), FALSE) + ifeq (,$(findstring $(LIB_NETCDF),$(SLIBS))) + SLIBS += -L$(LIB_NETCDF) -lnetcdff -lnetcdf + endif +else ifeq ($(NETCDF_SEPARATE), TRUE) + ifeq (,$(findstring $(LIB_NETCDF_C), $(SLIBS))) + SLIBS += -L$(LIB_NETCDF_FORTRAN) -L$(LIB_NETCDF_C) -lnetcdff -lnetcdf + else ifeq (,$(findstring $(LIB_NETCDF_FOTRAN), $(SLIBS))) + SLIBS += -L$(LIB_NETCDF_FORTRAN) -L$(LIB_NETCDF_C) -lnetcdff -lnetcdf + endif +endif + +ifdef LAPACK_LIBDIR + SLIBS += -L$(LAPACK_LIBDIR) -llapack -lblas +endif +ifdef LIB_MPI + ifndef MPI_SERIAL_PATH + ifndef MPI_LIB_NAME + SLIBS += -L$(LIB_MPI) -lmpi + else + SLIBS += -L$(LIB_MPI) -l$(MPI_LIB_NAME) + endif + endif +endif + +# Add xios libraries for NEMO +ifdef XIOS_PATH + SLIBS += -L$(XIOS_PATH)/lib -lxios -lstdc++ +endif + +# Add PETSc libraries +ifeq ($(strip $(USE_PETSC)), TRUE) + SLIBS += ${PETSC_LIB} +endif + +# Add MOAB libraries. These are defined in the MOAB_LINK_LIBS env var that was included above +ifeq ($(strip $(USE_MOAB)), TRUE) + SLIBS += $(IMESH_LIBS) +endif + +# Remove arch flag if it exists +F90_LDFLAGS := $(filter-out -arch%,$(LDFLAGS)) +ifdef GPUFLAGS + F90_LDFLAGS += $(GPUFLAGS) +endif + +# Machine stuff to appear last on the link step +ifndef MLIBS + MLIBS := +endif + +#------------------------------------------------------------------------------ +# Drive configure scripts for support libraries +#------------------------------------------------------------------------------ + +ifneq ("$(wildcard $(SRCROOT)/libraries)","") + EXTERN_PATH = $(SRCROOT)/libraries +else + EXTERN_PATH = $(SRCROOT)/externals +endif + +$(SHAREDLIBROOT)/$(SHAREDPATH)/mpi-serial/Makefile.conf: + @echo "SHAREDLIBROOT |$(SHAREDLIBROOT)| SHAREDPATH |$(SHAREDPATH)|"; \ + $(CONFIG_SHELL) $(EXTERN_PATH)/mpi-serial/configure $(CONFIG_ARGS) --srcdir $(EXTERN_PATH)/mpi-serial + +ifndef IO_LIB_SRCROOT + ifndef PIO_SRCROOT + PIO_SRCROOT = $(EXTERN_PATH) + endif + + PIO_SRC_DIR = $(PIO_SRCROOT)/parallelio +else + PIO_SRC_DIR = $(IO_LIB_SRCROOT)/$(IO_LIB_v$(PIO_VERSION)_SRCDIR) +endif + +# This is a pio2 library +PIOLIB = $(PIO_LIBDIR)/libpiof.a $(PIO_LIBDIR)/libpioc.a +PIOLIBNAME = -lpiof -lpioc + +GPTLLIB = $(GPTL_LIBDIR)/libgptl.a + +ULIBS += -L$(INSTALL_SHAREDPATH)/lib + +ULIBS += -lcsm_share -L$(INSTALL_SHAREDPATH)/lib $(PIOLIBNAME) -lgptl + +#------------------------------------------------------------------------------ +# Drive cmake script for cism and pio +#------------------------------------------------------------------------------ + +ifndef CMAKE_OPTS + CMAKE_OPTS := +endif +# note that the fortran flags include neither the FREEFLAGS nor the +# FIXEDFLAGS, so that both free & fixed code can be built (cmake +# doesn't seem to be able to differentiate between free & fixed +# fortran flags) +CMAKE_OPTS += -Wno-dev -D CMAKE_Fortran_FLAGS:STRING="$(FFLAGS) $(EXTRA_PIO_FPPDEFS) $(INCLDIR)" \ + -D CMAKE_C_FLAGS:STRING="$(CFLAGS) $(EXTRA_PIO_CPPDEFS) $(INCLDIR)" \ + -D CMAKE_CXX_FLAGS:STRING="$(CXXFLAGS) $(EXTRA_PIO_CPPDEFS) $(INCLDIR)" \ + -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ + -D GPTL_PATH:STRING=$(INSTALL_SHAREDPATH) \ + -D PIO_ENABLE_TESTS:BOOL=OFF \ + -D PIO_USE_MALLOC:BOOL=ON \ + -D USER_CMAKE_MODULE_PATH:LIST="$(CIMEROOT)/CIME/non_py/src/CMake;$(EXTERN_PATH)/parallelio/cmake" + +ifdef ADIOS2_DIR + CMAKE_OPTS += -D WITH_ADIOS2:BOOL=ON +endif +ifeq ($(DEBUG), TRUE) + CMAKE_OPTS += -D PIO_ENABLE_LOGGING=ON +endif + +# Allow for separate installations of the NetCDF C and Fortran libraries +ifeq ($(NETCDF_SEPARATE), FALSE) + CMAKE_OPTS += -D NetCDF_PATH:PATH=$(NETCDF_PATH) +else ifeq ($(NETCDF_SEPARATE), TRUE) + # NETCDF_Fortran_DIR points to the separate + # installation of Fortran NetCDF for PIO + CMAKE_OPTS += -D NetCDF_C_PATH:PATH=$(NETCDF_C_PATH) \ + -D NetCDF_Fortran_PATH:PATH=$(NETCDF_FORTRAN_PATH) +endif + +ifdef ZLIB_PATH + CMAKE_OPTS += -D LIBZ_PATH:STRING="$(ZLIB_PATH)" +endif + +ifdef SZIP_PATH + CMAKE_OPTS += -D SZIP_PATH:STRING="$(SZIP_PATH)" +endif + +ifdef HDF5_PATH + CMAKE_OPTS += -D HDF5_PATH:STRING="$(HDF5_PATH)" +endif + +ifdef PNETCDF_PATH + CMAKE_OPTS += -D PnetCDF_PATH:STRING="$(PNETCDF_PATH)" +else + CMAKE_OPTS += -D WITH_PNETCDF:LOGICAL=FALSE -D PIO_USE_MPIIO:LOGICAL=FALSE +endif +ifdef PIO_FILESYSTEM_HINTS + CMAKE_OPTS += -D PIO_FILESYSTEM_HINTS:STRING="$(PIO_FILESYSTEM_HINTS)" +endif +ifeq ($(MPILIB),mpi-serial) + CMAKE_OPTS += -D PIO_USE_MPISERIAL=TRUE -D MPISERIAL_PATH=$(INSTALL_SHAREDPATH) +endif + +# This captures the many cism-specific options to cmake +CMAKE_OPTS += $(USER_CMAKE_OPTS) + +# CMake doesn't seem to like it when you define compilers via -D +# CMAKE_C_COMPILER, etc., when you rerun cmake with an existing +# cache. So doing this via environment variables instead. +ifndef CMAKE_ENV_VARS + CMAKE_ENV_VARS := +endif +CMAKE_ENV_VARS += CC=$(CC) \ + CXX=$(CXX) \ + FC=$(FC) \ + LDFLAGS="$(LDFLAGS)" + +F90_LDFLAGS += $(FFLAGS) + +# We declare GLCMakefile to be a phony target so that cmake is +# always rerun whenever invoking 'make GLCMakefile'; this is +# desirable to pick up any new source files that may have been added +.PHONY: GLCMakefile +GLCMakefile: + cd $(GLC_DIR); \ + $(CMAKE_ENV_VARS) cmake $(CMAKE_OPTS) $(GLCROOT)/source_cism + +$(PIO_LIBDIR)/Makefile: + cd $(PIO_LIBDIR); \ + $(CMAKE_ENV_VARS) cmake $(CMAKE_OPTS) $(PIO_SRC_DIR) + +FTMakefile: + cd $(FTORCH_DIR) + $(CMAKE_ENV_VARS) cmake $(CMAKE_OPTS) $(FTORCH_SRC_DIR) + +#------------------------------------------------------------------------------- +# Build & include dependency files +#------------------------------------------------------------------------------- + +touch_filepath: + touch Filepath + +# Get list of files and build dependency file for all .o files +# using perl scripts mkSrcfiles and mkDepends +# if a source is of form .F90.in strip the .in before creating the list of objects +SOURCES := $(shell cat Srcfiles) +BASENAMES := $(basename $(basename $(SOURCES))) +OBJS := $(addsuffix .o, $(BASENAMES)) +INCS := $(foreach dir,$(cpp_dirs),-I$(dir)) + +CURDIR := $(shell pwd) + +Depends: Srcfiles Deppath + $(CASETOOLS)/mkDepends $(USER_MKDEPENDS_OPTS) Deppath Srcfiles > $@ + +Deppath: Filepath + $(CP) -f Filepath $@ + @echo "$(MINCROOT)" >> $@ + +Srcfiles: Filepath + $(CASETOOLS)/mkSrcfiles + +Filepath: + @echo "$(VPATH)" > $@ + +#------------------------------------------------------------------------------- +# echo file names, paths, compile flags, etc. used during build +#------------------------------------------------------------------------------- + +db_files: + @echo " " + @echo "* VPATH := $(VPATH)" + @echo "* INCS := $(INCS)" + @echo "* OBJS := $(OBJS)" +db_flags: + @echo " " + @echo "* cc := $(CC) $(CFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F.o := $(FC) $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR)" + @echo "* .F90.o := $(FC) $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR)" + +#------------------------------------------------------------------------------- +# Rules used for the tests run by "configure -test" +#------------------------------------------------------------------------------- + +test_fc: test_fc.o + $(LD) -o $@ test_fc.o $(F90_LDFLAGS) +ifeq ($(NETCDF_SEPARATE), FALSE) +test_nc: test_nc.o + $(LD) -o $@ test_nc.o -L$(LIB_NETCDF) -lnetcdff -lnetcdf $(F90_LDFLAGS) +else ifeq ($(NETCDF_SEPARATE), TRUE) +test_nc: test_nc.o + $(LD) -o $@ test_nc.o -L$(LIB_NETCDF_FORTRAN) -L$(LIB_NETCDF_C) -lnetcdff -lnetcdf $(F90_LDFLAGS) +endif +test_mpi: test_mpi.o + $(LD) -o $@ test_mpi.o $(F90_LDFLAGS) +test_esmf: test_esmf.o + $(LD) -o $@ test_esmf.o $(F90_LDFLAGS) + +#------------------------------------------------------------------------------- +# create list of component libraries - hard-wired for current ccsm components +#------------------------------------------------------------------------------- +ifeq ($(COMP_LND),clm) + USE_SHARED_CLM=TRUE +else + USE_SHARED_CLM=FALSE +endif + +ifeq ($(USE_SHARED_CLM),FALSE) + LNDOBJDIR = $(EXEROOT)/lnd/obj + LNDLIBDIR=$(LIBROOT) + ifeq ($(COMP_LND),clm) + LNDLIB := libclm.a + else + LNDLIB := liblnd.a + endif + INCLDIR += -I$(LNDOBJDIR) +else + LNDLIB := libclm.a + LNDOBJDIR = $(SHAREDLIBROOT)/$(SHAREDPATH)/clm/obj + LNDLIBDIR = $(EXEROOT)/$(SHAREDPATH)/lib + INCLDIR += -I$(INSTALL_SHAREDPATH)/include + ifeq ($(COMP_NAME),clm) + INCLUDE_DIR = $(INSTALL_SHAREDPATH)/include + endif +endif +ifeq ($(LND_PRESENT),TRUE) + INCLDIR += -I$(LNDOBJDIR) +endif +ifeq ($(COMP_GLC), cism) + ULIBDEP += $(CISM_LIBDIR)/libglimmercismfortran.a +endif +ifeq ($(OCN_SUBMODEL),moby) + ULIBDEP += $(LIBROOT)/libmoby.a +endif + +ifdef COSP_LIBDIR + ifeq ($(CIME_MODEL),cesm) + ULIBDEP += $(COSP_LIBDIR)/libcosp.a + endif +endif + +ifdef FV3CORE_LIBDIR + ULIBDEP += $(FV3CORE_LIBDIR)/libfv3core.a + ULIBDEP += $(LIBROOT)/libfms.a +endif + +ifdef MPAS_LIBDIR + ULIBDEP += $(MPAS_LIBDIR)/libmpas.a +endif + +ifndef CLIBS + ifdef ULIBDEP + # For each occurrence of something like /path/to/foo/libbar.a in ULIBDEP, + # CLIBS will contain -L/path/to/foo -lbar + CLIBS := $(foreach LIBDEP,$(strip $(ULIBDEP)), -L$(dir $(LIBDEP)) $(patsubst lib%.a,-l%,$(notdir $(LIBDEP)))) + endif +endif + +# libcsm_share.a is in ULIBDEP, but -lcsm_share is in ULIBS rather than CLIBS, +# so this needs to be added after creating CLIBS above + +CSMSHARELIB = $(INSTALL_SHAREDPATH)/lib/libcsm_share.a +ULIBDEP += $(CSMSHARELIB) + +GENF90 ?= $(CIMEROOT)/CIME/non_py/externals/genf90/genf90.pl + +#------------------------------------------------------------------------------- +# build rules: +#------------------------------------------------------------------------------- + +.SUFFIXES: +.SUFFIXES: .F90 .F .f90 .f .c .cpp .o .in + +ifeq ($(MPILIB),mpi-serial) + ifdef MPI_SERIAL_PATH + MPISERIAL = $(MPI_SERIAL_PATH)/lib/libmpi-serial.a + MLIBS += -L$(MPI_SERIAL_PATH)/lib -lmpi-serial + CMAKE_OPTS += -DMPI_C_INCLUDE_PATH=$(MPI_SERIAL_PATH)/include \ + -DMPI_Fortran_INCLUDE_PATH=$(MPI_SERIAL_PATH)/include \ + -DMPI_C_LIBRARIES=$(MPI_SERIAL_PATH)/lib/libmpi-serial.a \ + -DMPI_Fortran_LIBRARIES=$(MPI_SERIAL_PATH)/lib/libmpi-serial.a + else + MPISERIAL = $(INSTALL_SHAREDPATH)/lib/libmpi-serial.a + MLIBS += -L$(INSTALL_SHAREDPATH)/lib -lmpi-serial + CMAKE_OPTS += -DMPI_C_INCLUDE_PATH=$(INSTALL_SHAREDPATH)/include \ + -DMPI_Fortran_INCLUDE_PATH=$(INSTALL_SHAREDPATH)/include \ + -DMPI_C_LIBRARIES=$(INSTALL_SHAREDPATH)/lib/libmpi-serial.a \ + -DMPI_Fortran_LIBRARIES=$(INSTALL_SHAREDPATH)/lib/libmpi-serial.a + endif +endif + +$(PIOLIB) : $(MPISERIAL) $(GPTLLIB) + +$(CSMSHARELIB): $(PIOLIB) $(GPTLLIB) + +ifneq ($(findstring csm_share,$(COMP_NAME)),csm_share) + $(OBJS): $(CSMSHARELIB) +else + complib: install_lib +endif + +install_lib: $(COMPLIB) + $(CP) -p $(COMPLIB) $(CSMSHARELIB) + $(CP) -p *.$(MOD_SUFFIX) *.h $(INCLUDE_DIR) + +# This rule writes the include flags and the link flags used in the $(EXEC_SE) rule below +# It expects the variable OUTPUT_FILE to be defined +# Set COMP_NAME=driver to get the same flags as are used when building the driver +.PHONY: write_include_and_link_flags +write_include_and_link_flags: + @$(RM) -f $(OUTPUT_FILE) + @echo CIME_CSM_SHR_INCLUDE = $(CSM_SHR_INCLUDE) >> $(OUTPUT_FILE) + @echo CIME_ESMF_F90COMPILEPATHS = $(ESMF_F90COMPILEPATHS) >> $(OUTPUT_FILE) + @echo CIME_INCLDIR = $(INCLDIR) >> $(OUTPUT_FILE) + @echo CIME_INCS = $(INCS) >> $(OUTPUT_FILE) + @echo CIME_CLIBS = $(CLIBS) >> $(OUTPUT_FILE) + @echo CIME_ULIBS = $(ULIBS) >> $(OUTPUT_FILE) + @echo CIME_SLIBS = $(SLIBS) >> $(OUTPUT_FILE) + @echo CIME_MLIBS = $(MLIBS) >> $(OUTPUT_FILE) + @echo CIME_F90_LDFLAGS = $(F90_LDFLAGS) >> $(OUTPUT_FILE) + +# If variables are added to this rule, similar changes should be made in the write_link_flags rule above +$(EXEC_SE): $(OBJS) $(ULIBDEP) $(CSMSHARELIB) $(PIOLIB) $(GPTLLIB) + $(LD) -o $(EXEC_SE) $(OBJS) $(CLIBS) $(ULIBS) $(SLIBS) $(MLIBS) $(F90_LDFLAGS) + +$(COMPLIB): $(OBJS) + $(AR) $(ARFLAGS) $(COMPLIB) $(OBJS) + +.c.o: + $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) $< +.F.o: + $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FIXEDFLAGS) $< +.f.o: + $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FIXEDFLAGS) $< +.f90.o: + $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $< +.F90.o: + $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $(CONTIGUOUS_FLAG) $< +.cpp.o: + $(CXX) -c $(INCLDIR) $(INCS) $(CXXFLAGS) $< + +%.F90: %.F90.in + $(GENF90) $< > $@ + +clean_dependsatm: + $(RM) -f $(EXEROOT)/atm/obj/Srcfiles + +clean_dependscpl: + $(RM) -f $(EXEROOT)/cpl/obj/Srcfiles + +clean_dependsocn: + $(RM) -f $(EXEROOT)/ocn/obj/Srcfiles + +clean_dependswav: + $(RM) -f $(EXEROOT)/wav/obj/Srcfiles + +clean_dependsiac: + $(RM) -f $(EXEROOT)/iac/obj/Srcfiles + +clean_dependsglc: + $(RM) -f $(EXEROOT)/glc/obj/Srcfiles + +clean_dependsice: + $(RM) -f $(EXEROOT)/ice/obj/Srcfiles + +clean_dependsrof: + $(RM) -f $(EXEROOT)/rof/obj/Srcfiles + +clean_dependsesp: + $(RM) -f $(EXEROOT)/esp/obj/Srcfiles + +clean_dependslnd: + $(RM) -f $(LNDOBJDIR)/Srcfiles + +clean_dependscsmshare: + $(RM) -f $(SHAREDLIBROOT)/$(SHAREDPATH)/csm_share/Srcfiles + +clean_depends: clean_dependsatm clean_dependscpl clean_dependswav clean_dependsglc clean_dependsice clean_dependsrof clean_dependslnd clean_dependscsmshare clean_dependsesp clean_dependsiac + + +cleanatm: + $(RM) -f $(LIBROOT)/libatm.a + $(RM) -fr $(EXEROOT)/atm/obj + +cleancpl: + $(RM) -fr $(EXEROOT)/cpl/obj + +cleanocn: + $(RM) -f $(LIBROOT)/libocn.a + $(RM) -fr $(EXEROOT)/ocn/obj + +cleanwav: + $(RM) -f $(LIBROOT)/libwav.a + $(RM) -fr $(EXEROOT)/wav/obj + +cleaniac: + $(RM) -f $(LIBROOT)/libiac.a + $(RM) -fr $(EXEROOT)/iac/obj + +cleanesp: + $(RM) -f $(LIBROOT)/libesp.a + $(RM) -fr $(EXEROOT)/esp/obj + +cleanglc: + $(RM) -f $(LIBROOT)/libglc.a + $(RM) -fr $(EXEROOT)/glc + +cleanice: + $(RM) -f $(LIBROOT)/libice.a + $(RM) -fr $(EXEROOT)/ice/obj + +cleanrof: + $(RM) -f $(LIBROOT)/librof.a + $(RM) -fr $(EXEROOT)/rof/obj + +cleanlnd: + $(RM) -f $(LIBROOT)/$(LNDLIB) + $(RM) -fr $(LNDOBJDIR) + +cleancsmshare: + $(RM) -f $(CSMSHARELIB) + $(RM) -fr $(SHAREDLIBROOT)/$(SHAREDPATH)/csm_share + +cleanpio: + $(RM) -f $(PIO_LIBDIR)/libpio* + $(RM) -fr $(SHAREDLIBROOT)/$(SHAREDPATH)/pio + +cleangptl: + $(RM) -f $(GPTLLIB) + $(RM) -fr $(SHAREDLIBROOT)/$(SHAREDPATH)/gptl + +clean: cleanatm cleanocn cleanwav cleanglc cleanice cleanrof cleanlnd cleanesp cleaniac + +realclean: clean cleancsmshare cleanpio cleangptl + +# the if-tests prevent DEPS files from being created when they're not needed +ifneq ($(MAKECMDGOALS), db_files) +ifneq ($(MAKECMDGOALS), db_flags) +ifeq (,$(findstring clean,$(MAKECMDGOALS))) + -include Depends $(CASEROOT)/Depends.$(COMPILER) $(CASEROOT)/Depends.$(MACH) $(CASEROOT)/Depends.$(MACH).$(COMPILER) +endif +endif +endif +ifeq ($(COMP_NAME),csm_share) + shr_assert_mod.mod: shr_assert_mod.o +endif diff --git a/scripts/lib/CIME/SystemTests/test_utils/__init__.py b/CIME/Tools/__init__.py similarity index 100% rename from scripts/lib/CIME/SystemTests/test_utils/__init__.py rename to CIME/Tools/__init__.py diff --git a/scripts/Tools/advanced-py-prof b/CIME/Tools/advanced-py-prof similarity index 100% rename from scripts/Tools/advanced-py-prof rename to CIME/Tools/advanced-py-prof diff --git a/CIME/Tools/archive_metadata b/CIME/Tools/archive_metadata new file mode 100755 index 00000000000..191050254f9 --- /dev/null +++ b/CIME/Tools/archive_metadata @@ -0,0 +1,1968 @@ +#!/usr/bin/env python3 +""" +Gather all the case metadata and send it to the experiments databases +via a web post and SVN check-in + +Author: CSEG +""" +import argparse +import datetime +import filecmp +import getpass +import glob +import gzip +import json +import io +import os +from os.path import expanduser +import re +import shutil +import ssl +import subprocess +import sys +from string import Template +import configparser +import urllib + +from standard_script_setup import * +from CIME.case import Case +from CIME.utils import is_last_process_complete + +# define global constants +logger = logging.getLogger(__name__) +_svn_expdb_url = "https://svn-cesm2-expdb.cgd.ucar.edu" +_exp_types = ["CMIP6", "production", "tuning", "lens", "C1", "C2", "C3", "C4", "C5"] +_xml_vars = [ + "CASE", + "COMPILER", + "COMPSET", + "CONTINUE_RUN", + "DOUT_S", + "DOUT_S_ROOT", + "GRID", + "MACH", + "MPILIB", + "MODEL", + "MODEL_VERSION", + "REST_N", + "REST_OPTION", + "RUNDIR", + "RUN_REFCASE", + "RUN_REFDATE", + "RUN_STARTDATE", + "RUN_TYPE", + "STOP_N", + "STOP_OPTION", + "USER", +] +_run_vars = ["JOB_QUEUE", "JOB_WALLCLOCK_TIME", "PROJECT"] +_archive_list = [ + "Buildconf", + "CaseDocs", + "CaseStatus", + "LockedFiles", + "Macros.make", + "README.case", + "SourceMods", + "software_environment.txt", +] +_call_template = Template( + 'in "$function" - Ignoring SVN repo update\n' + 'SVN error executing command "$cmd". \n' + "$error: $strerror" +) +_copy_template = Template( + 'in "$function" - Unable to copy "$source" to "$dest"' "$error: $strerror" +) +_svn_error_template = Template( + 'in "$function" - SVN client unavailable\n' + 'SVN error executing command "$cmd". \n' + "$error: $strerror" +) +_ignore_patterns = ["*.pyc", "^.git", "tmp", ".svn", "*~"] +_pp_xml_vars = { + "atm": "ATMDIAG_test_path_climo", + "glc": "", + "lnd": "LNDDIAG_PTMPDIR_1", + "ice": "ICEDIAG_PATH_CLIMO_CONT", + "ocn": "OCNDIAG_TAVGDIR", + "rof": "", + "timeseries": "TIMESERIES_OUTPUT_ROOTDIR", + "xconform": "CONFORM_OUTPUT_DIR", +} +_pp_diag_vars = { + "atm": ["ATMDIAG_test_first_yr", "ATMDIAG_test_nyrs"], + "ice": ["ICEDIAG_BEGYR_CONT", "ICEDIAG_ENDYR_CONT", "ICEDIAG_YRS_TO_AVG"], + "lnd": [ + "LNDDIAG_clim_first_yr_1", + "LNDDIAG_clim_num_yrs_1", + "LNDDIAG_trends_first_yr_1", + "LNDDIAG_trends_num_yrs_1", + ], + "ocn": [ + "OCNDIAG_YEAR0", + "OCNDIAG_YEAR1", + "OCNDIAG_TSERIES_YEAR0", + "OCNDIAG_TSERIES_YEAR1", + ], +} +_pp_tseries_comps = ["atm", "glc", "ice", "lnd", "ocn", "rof"] + +# setting the ssl context to avoid issues with CGD certificates +_context = ssl._create_unverified_context() # pylint:disable=protected-access + +# ------------------------------------------------------------------------------- +class PasswordPromptAction(argparse.Action): + # ------------------------------------------------------------------------------- + """SVN developer's password class handler""" + # pylint: disable=redefined-builtin + def __init__( + self, + option_strings=None, + dest=None, + default=None, + required=False, + nargs=0, + help=None, + ): + super(PasswordPromptAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + required=required, + nargs=nargs, + help=help, + ) + + def __call__(self, parser, args, values, option_string=None): + # check if ~/.subversion/cmip6.conf exists + home = expanduser("~") + conf_path = os.path.join(home, ".subversion/cmip6.conf") + if os.path.exists(conf_path): + # read the .cmip6.conf file + config = configparser.SafeConfigParser() + config.read(conf_path) + password = config.get("svn", "password") + else: + password = getpass.getpass() + setattr(args, self.dest, password) + + +# --------------------------------------------------------------------- +def basic_authorization(user, password): + # --------------------------------------------------------------------- + """Basic authentication encoding""" + sauth = user + ":" + password + return "Basic " + sauth.encode("base64").rstrip() + + +# --------------------------------------------------------------------- +class SVNException(Exception): + # --------------------------------------------------------------------- + """SVN command exception handler""" + + def __init__(self, value): + super(SVNException, self).__init__(value) + self.value = value + + def __str__(self): + return repr(self.value) + + +# ------------------------------------------------------------------------------- +def commandline_options(args): + # ------------------------------------------------------------------------------- + """Process the command line arguments.""" + parser = argparse.ArgumentParser( + description="Query and parse the caseroot files to gather metadata information" + " that can be posted to the CESM experiments database." + " " + " CMIP6 experiment case names must be reserved already in the" + " experiment database. Please see:" + " https://csesgweb.cgd.ucar.edu/expdb2.0 for details." + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "--user", + dest="user", + type=str, + default=None, + required=True, + help="User name for SVN CESM developer access (required)", + ) + + parser.add_argument( + "--password", + dest="password", + action=PasswordPromptAction, + default="", + required=True, + help="Password for SVN CESM developer access (required)", + ) + + parser.add_argument( + "--caseroot", + nargs=1, + required=False, + help="Fully quailfied path to case root directory (optional). " + "Defaults to current working directory.", + ) + + parser.add_argument( + "--workdir", + nargs=1, + required=False, + help="Fully quailfied path to directory for storing intermediate " + "case files. A sub-directory called " + "archive_temp_dir is created, populated " + "with case files, and posted to the CESM experiments database and " + 'SVN repository at URL "{0}". ' + "This argument can be used to archive a caseroot when the user " + "does not have write permission in the caseroot (optional). " + "Defaults to current working directory.".format(_svn_expdb_url), + ) + + parser.add_argument( + "--expType", + dest="expType", + nargs=1, + required=True, + choices=_exp_types, + help="Experiment type. For CMIP6 experiments, the case must already " + "exist in the experiments database at URL " + ' "http://csegweb.cgd.ucar.edu/expdb2.0" (required). ' + 'Must be one of "{0}"'.format(_exp_types), + ) + + parser.add_argument( + "--title", + nargs=1, + required=False, + default=None, + help="Title of experiment (optional).", + ) + + parser.add_argument( + "--ignore-logs", + dest="ignore_logs", + action="store_true", + help="Ignore updating the SVN repository with the caseroot/logs files. " + "The experiments database will be updated (optional).", + ) + + parser.add_argument( + "--ignore-timing", + dest="ignore_timing", + action="store_true", + help="Ignore updating the the SVN repository with caseroot/timing files." + "The experiments database will be updated (optional).", + ) + + parser.add_argument( + "--ignore-repo-update", + dest="ignore_repo_update", + action="store_true", + help="Ignore updating the SVN repository with all the caseroot files. " + "The experiments database will be updated (optional).", + ) + + parser.add_argument( + "--add-files", + dest="user_add_files", + required=False, + help="Comma-separated list with no spaces of files or directories to be " + "added to the SVN repository. These are in addition to the default added " + "caseroot files and directories: " + '"{0}, *.xml, user_nl_*" (optional).'.format(_archive_list), + ) + + parser.add_argument( + "--dryrun", + action="store_true", + help="Parse settings and print what actions will be taken but " + "do not execute the action (optional).", + ) + + parser.add_argument( + "--query_cmip6", + nargs=2, + required=False, + help="Query the experiments database global attributes " + "for specified CMIP6 casename as argument 1. " + "Writes a json formatted output file, specified by argument 2, " + "to subdir archive_files (optional).", + ) + + parser.add_argument( + "--test-post", + dest="test_post", + action="store_true", + help="Post metadata to the test expdb2.0 web application server " + 'at URL "http://csegwebdev.cgd.ucar.edu/expdb2.0". ' + "No --test-post argument defaults to posting metadata to the " + "production expdb2.0 web application server " + 'at URL "http://csegweb.cgd.ucar.edu/expdb2.0" (optional).', + ) + + opts = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return opts + + +# --------------------------------------------------------------------- +def get_case_vars(case_dict, case): + # --------------------------------------------------------------------- + """get_case_vars + loop through the global list of XML vars and get the values + from the case object into a case dictionary + + Arguments: + case_dict (dict) - case dictionary to store XML variables + case (object) - case object + """ + logger.debug("get_case_vars") + + for xml_id in _xml_vars: + case_dict[xml_id] = case.get_value(xml_id, resolved=True, subgroup=None) + + for xml_id in _run_vars: + case_dict[xml_id] = case.get_value(xml_id, resolved=True, subgroup="case.run") + + return case_dict + + +# --------------------------------------------------------------------- +def get_disk_usage(path): + # --------------------------------------------------------------------- + """get_disk_usage + return the total disk usage in bytes for a given path. + + Arguments: + path - path to start + """ + logger.debug("get_disk_usage") + total_size = 0 + cwd = os.getcwd() + if os.path.exists(path): + os.chdir(path) + cmd = ["du", "--summarize", "--block-size=1"] + try: + total_size = subprocess.check_output(cmd) + total_size = total_size.replace("\t.\n", "") + except subprocess.CalledProcessError: + msg = "Error executing command = '{0}'".format(cmd) + logger.warning(msg) + os.chdir(cwd) + return int(total_size) + + +# --------------------------------------------------------------------- +def get_ocn_disk_usage(path): + # --------------------------------------------------------------------- + """get_ocn_disk_usage + return the total disk usage in bytes for a given path. + + Arguments: + path - path to start + """ + logger.debug("get_ocn_disk_usage") + total_size = 0 + paths = glob.glob(path) + for path in paths: + total_size += get_disk_usage(path) + return int(total_size) + + +# --------------------------------------------------------------------- +def get_pp_path(pp_dir, process): + # --------------------------------------------------------------------- + """get_pp_path + return the XML path for process + + Arguments: + pp_dir - path to postprocess directory + process - process name + """ + logger.debug("get_pp_path") + + cwd = os.getcwd() + os.chdir(pp_dir) + + pp_path_var = "" + if process == "timeseries": + pp_path_var = _pp_xml_vars["timeseries"] + elif process == "xconform": + pp_path_var = _pp_xml_vars["xconform"] + + cmd = ["./pp_config", "--get", pp_path_var, "--value"] + try: + pp_path = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + msg = "Error executing command = '{0}'".format(cmd) + logger.warning(msg) + + if len(pp_path) > 2: + pp_path = pp_path.rstrip() + else: + pp_path = "" + + os.chdir(cwd) + return pp_path + + +# --------------------------------------------------------------------- +def get_diag_dates(comp, pp_dir): + # --------------------------------------------------------------------- + """get_diag_dates + + Query the postprocessing env_diags_[comp].xml file to get the model diag + dates for the given component. + """ + logger.debug("get_diag_dates") + + cwd = os.getcwd() + os.chdir(pp_dir) + + model_dates = "" + pp_vars = _pp_diag_vars.get(comp) + for pp_var in pp_vars: + cmd = ["./pp_config", "--get", pp_var, "--value"] + try: + pp_value = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + msg = "Error executing command = '{0}'".format(cmd) + logger.warning(msg) + tmp_dates = "{0} = {1}".format(pp_var, pp_value) + model_dates = model_dates + tmp_dates + + os.chdir(cwd) + return model_dates + + +# --------------------------------------------------------------------- +def get_pp_status(case_dict): + # --------------------------------------------------------------------- + """get_pp_status + Parse the postprocessing log files + looking for status information + + Arguments: + case_dict (dict) - case dictionary to store XML variables + """ + logger.debug("get_pp_status") + + # initialize status variables + msg_avg = dict() + msg_diags = dict() + diag_comps = ["atm", "ice", "lnd", "ocn"] + + pp_dir = os.path.join(case_dict["CASEROOT"], "postprocess") + pp_log_dir = os.path.join(case_dict["CASEROOT"], "postprocess", "logs") + + msg_avg["atm"] = "COMPLETED SUCCESSFULLY" + msg_diags["atm"] = "Successfully completed generating atmosphere diagnostics" + case_dict["atm_avg_dates"] = case_dict["atm_diag_dates"] = get_diag_dates( + "atm", pp_dir + ) + + msg_avg["ice"] = "Successfully completed generating ice climatology averages" + msg_diags["ice"] = "Successfully completed generating ice diagnostics" + case_dict["ice_avg_dates"] = case_dict["ice_diag_dates"] = get_diag_dates( + "ice", pp_dir + ) + + msg_avg["lnd"] = "COMPLETED SUCCESSFULLY" + msg_diags["lnd"] = "Successfully completed generating land diagnostics" + case_dict["lnd_avg_dates"] = case_dict["lnd_diag_dates"] = get_diag_dates( + "lnd", pp_dir + ) + + msg_avg["ocn"] = "Successfully completed generating ocean climatology averages" + msg_diags["ocn"] = "Successfully completed generating ocean diagnostics" + case_dict["ocn_avg_dates"] = case_dict["ocn_diag_dates"] = get_diag_dates( + "ocn", pp_dir + ) + + for comp in diag_comps: + case_dict[comp + "_avg_status"] = "Unknown" + case_dict[comp + "_diag_status"] = "Unknown" + + if comp != "ocn": + case_dict[comp + "_avg_path"] = os.path.join( + case_dict["DOUT_S_ROOT"], comp, "proc/climo" + ) + case_dict[comp + "_avg_size"] = get_disk_usage( + case_dict[comp + "_avg_path"] + ) + case_dict[comp + "_diag_path"] = os.path.join( + case_dict["DOUT_S_ROOT"], comp, "proc/diag" + ) + case_dict[comp + "_diag_size"] = get_disk_usage( + case_dict[comp + "_diag_path"] + ) + else: + case_dict[comp + "_avg_path"] = os.path.join( + case_dict["DOUT_S_ROOT"], comp, "proc/climo*" + ) + case_dict[comp + "_avg_size"] = get_ocn_disk_usage( + case_dict[comp + "_avg_path"] + ) + case_dict[comp + "_diag_path"] = os.path.join( + case_dict["DOUT_S_ROOT"], comp, "proc/diag*" + ) + case_dict[comp + "_diag_size"] = get_ocn_disk_usage( + case_dict[comp + "_diag_path"] + ) + + avg_logs = list() + avg_file_pattern = "{0}/{1}_averages.log.*".format(pp_log_dir, comp) + avg_logs = glob.glob(avg_file_pattern) + + if avg_logs: + log_file = max(avg_logs, key=os.path.getctime) + if is_last_process_complete( + log_file, msg_avg[comp], "Average list complies with standards." + ): + case_dict[comp + "_avg_status"] = "Succeeded" + else: + case_dict[comp + "_avg_status"] = "Started" + + diag_logs = list() + diag_file_pattern = "{0}/{1}_diagnostics.log.*".format(pp_log_dir, comp) + diag_logs = glob.glob(diag_file_pattern) + + if diag_logs: + log_file = max(diag_logs, key=os.path.getctime) + if is_last_process_complete(log_file, msg_diags[comp], "ncks version"): + case_dict[comp + "_diag_status"] = "Succeeded" + else: + case_dict[comp + "_diag_status"] = "Started" + + # get overall timeseries status + case_dict["timeseries_status"] = "Unknown" + case_dict["timeseries_path"] = get_pp_path(pp_dir, "timeseries") + case_dict["timeseries_size"] = 0 + case_dict["timeseries_dates"] = "{0}-{1}".format( + case_dict["RUN_STARTDATE"].replace("-", ""), + case_dict["RUN_STARTDATE"].replace("-", ""), + ) + case_dict["timeseries_total_time"] = 0 + tseries_logs = list() + tseries_file_pattern = "{0}/timeseries.log.*".format(pp_log_dir) + tseries_logs = glob.glob(tseries_file_pattern) + if tseries_logs: + log_file = max(tseries_logs, key=os.path.getctime) + if is_last_process_complete( + filepath=log_file, expect_text="Successfully completed", fail_text="opening" + ): + case_dict["timeseries_status"] = "Succeeded" + with open(log_file, "r") as fname: + log_content = fname.readlines() + total_time = [line for line in log_content if "Total Time:" in line] + case_dict["timeseries_total_time"] = " ".join(total_time[0].split()) + else: + case_dict["timeseries_status"] = "Started" + sta_dates = case_dict["sta_last_date"].split("-") + case_dict["timeseries_dates"] = "{0}-{1}".format( + case_dict["RUN_STARTDATE"].replace("-", ""), "".join(sta_dates[:-1]) + ) + for comp in _pp_tseries_comps: + tseries_path = "{0}/{1}/proc/tseries".format( + case_dict["timeseries_path"], comp + ) + case_dict["timeseries_size"] += get_disk_usage(tseries_path) + + # get iconform status = this initializes files in the POSTPROCESS_PATH + case_dict["iconform_status"] = "Unknown" + case_dict["iconform_path"] = "" + case_dict["iconform_size"] = 0 + case_dict["iconform_dates"] = case_dict["timeseries_dates"] + + iconform_logs = list() + iconform_file_pattern = "{0}/iconform.log.*".format(pp_log_dir) + iconform_logs = glob.glob(iconform_file_pattern) + if iconform_logs: + log_file = max(iconform_logs, key=os.path.getctime) + if is_last_process_complete( + log_file, + "Successfully created the conform tool", + "Running createOutputSpecs", + ): + case_dict["iconform_status"] = "Succeeded" + else: + case_dict["iconform_status"] = "Started" + + # get xconform status + case_dict["xconform_path"] = "" + case_dict["xconform_path"] = get_pp_path(pp_dir, "xconform") + case_dict["xconform_status"] = "Unknown" + case_dict["xconform_size"] = get_disk_usage(case_dict["xconform_path"]) + case_dict["xconform_dates"] = case_dict["timeseries_dates"] + case_dict["xconform_total_time"] = 0 + + xconform_logs = list() + xconform_file_pattern = "{0}/xconform.log.*".format(pp_log_dir) + xconform_logs = glob.glob(xconform_file_pattern) + if xconform_logs: + log_file = max(xconform_logs, key=os.path.getctime) + if is_last_process_complete( + log_file, + "Successfully completed converting all files", + "cesm_conform_generator INFO", + ): + case_dict["xconform_status"] = "Succeeded" + case_dict["xconform_size"] = get_disk_usage(case_dict["xconform_path"]) + with open(log_file, "r") as fname: + log_content = fname.readlines() + total_time = [line for line in log_content if "Total Time:" in line] + if total_time: + case_dict["xconform_total_time"] = " ".join(total_time[0].split()) + else: + case_dict["xconform_status"] = "Started" + + return case_dict + + +# --------------------------------------------------------------------- +def get_run_last_date(casename, run_path): + # --------------------------------------------------------------------- + """get_run_last_date + parse the last cpl.r file in the run_path to retrieve that last date. + + Arguments: + casename + run_path - path to run directory + """ + logger.debug("get_run_last_date") + + pattern = "{0}.cpl.r.*.nc".format(casename) + cpl_files = sorted(glob.glob(os.path.join(run_path, pattern))) + + if cpl_files: + _, cpl_file = os.path.split(cpl_files[-1]) + fparts = cpl_file.split(".") + return fparts[-2] + + return "0000-00-00" + + +# --------------------------------------------------------------------- +def get_sta_last_date(sta_path): + # --------------------------------------------------------------------- + """get_sta_last_date + parse the last rest directory in the sta_path to retrieve that last date. + + Arguments: + sta_path - path to run directory + """ + logger.debug("get_sta_last_date") + + rest_dirs = sorted(glob.glob(os.path.join(sta_path, "rest/*"))) + + if rest_dirs: + _, rest_dir = os.path.split(rest_dirs[-1]) + return rest_dir + + return "0000-00-00" + + +# --------------------------------------------------------------------- +def get_case_status(case_dict): + # --------------------------------------------------------------------- + """get_case_status + Parse the CaseStatus and postprocessing log files + looking for status information + + Arguments: + case_dict (dict) - case dictionary to store XML variables + """ + logger.debug("get_case_status") + + # initialize status variables + case_dict["run_status"] = "Unknown" + case_dict["run_path"] = case_dict["RUNDIR"] + case_dict["run_size"] = 0 + case_dict["run_last_date"] = case_dict["RUN_STARTDATE"] + + case_dict["sta_status"] = "Unknown" + case_dict["sta_path"] = case_dict["DOUT_S_ROOT"] + case_dict["sta_size"] = 0 + case_dict["sta_last_date"] = case_dict["RUN_STARTDATE"] + + cstatus = case_dict["CASEROOT"] + "/CaseStatus" + if os.path.exists(cstatus): + # get the run status + run_status_1 = is_last_process_complete( + cstatus, "case.run success", "case.run starting" + ) + run_status_2 = is_last_process_complete( + cstatus, "model execution success", "model execution starting" + ) + if run_status_1 is True or run_status_2 is True: + case_dict["run_status"] = "Succeeded" + case_dict["run_size"] = get_disk_usage(case_dict["run_path"]) + case_dict["run_last_date"] = get_run_last_date( + case_dict["CASE"], case_dict["run_path"] + ) + + # get the STA status + if case_dict["DOUT_S"]: + # get only the history, rest and logs dir - ignoring the proc subdirs + sta_status = is_last_process_complete( + cstatus, "st_archive success", "st_archive starting" + ) + case_dict["sta_last_date"] = get_sta_last_date(case_dict["DOUT_S_ROOT"]) + if sta_status is True: + case_dict["sta_status"] = "Succeeded" + # exclude the proc directories in the sta size estimates + for subdir in [ + "atm/hist", + "cpl/hist", + "esp/hist", + "ice/hist", + "glc/hist", + "lnd/hist", + "logs", + "ocn/hist", + "rest", + "rof/hist", + "wav/hist", + "iac/hist", + ]: + path = os.path.join(case_dict["sta_path"], subdir) + if os.path.isdir(path): + case_dict["sta_size"] += get_disk_usage(path) + + # check if the postprocess dir exists in the caseroot + case_dict["postprocess"] = False + if os.path.exists(case_dict["CASEROOT"] + "/postprocess"): + case_dict["postprocess"] = True + case_dict = get_pp_status(case_dict) + + return case_dict + + +# --------------------------------------------------------------------- +def check_expdb_case(case_dict, username, password): + # --------------------------------------------------------------------- + """check_exp_case + Cross check the casename with the database for a CMIP6 experiment + + Arguments: + case_dict (dict) - case dictionary to store XML variables + username (string) - SVN developer's username + password (string) - SVN developer's password + + Return case_id value; 0 if does not exist or > 0 for exists. + + """ + logger.debug("check_expdb_case") + data_dict = { + "casename": case_dict["CASE"], + "queryType": "checkCaseExists", + "expType": case_dict["expType"], + } + data = json.dumps(data_dict) + params = urllib.parse.urlencode( + dict(username=username, password=password, data=data) + ) + try: + response = urllib.request.urlopen( + case_dict["query_expdb_url"], params, context=_context + ) + output = json.loads(response.read().decode()) + except urllib.error.HTTPError as http_e: + logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) + sys.exit(1) + except urllib.error.URLError as url_e: + logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) + sys.exit(1) + + return int(output["case_id"]) + + +# --------------------------------------------------------------------- +def query_expdb_cmip6(case_dict, username, password): + # --------------------------------------------------------------------- + """query_exp_case + Query the expdb for CMIP6 casename = case_dict['q_casename'] metadata. + Write out a json file to case_dict['q_outfile']. + + Arguments: + case_dict (dict) - case dictionary to store XML variables + username (string) - SVN developer's username + password (string) - SVN developer's password + + """ + logger.debug("query_expdb_cmip6") + exists = False + data_dict = { + "casename": case_dict["q_casename"], + "queryType": "CMIP6GlobalAtts", + "expType": "CMIP6", + } + data = json.dumps(data_dict) + params = urllib.parse.urlencode( + dict(username=username, password=password, data=data) + ) + try: + response = urllib.request.urlopen( + case_dict["query_expdb_url"], params, context=_context + ) + output = json.load(response) + except urllib.error.HTTPError as http_e: + logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) + except urllib.error.URLError as url_e: + logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) + + if output: + if not os.path.exists("{0}/archive_files".format(case_dict["workdir"])): + os.makedirs("{0}/archive_files".format(case_dict["workdir"])) + + filename = "{0}/archive_files/{1}".format( + case_dict["workdir"], case_dict["q_outfile"] + ) + with io.open(filename, "w+", encoding="utf-8") as fname: + fname.write(json.dumps(output, ensure_ascii=False)) + fname.close() + exists = True + + return exists + + +# --------------------------------------------------------------------- +def create_json(case_dict): + # --------------------------------------------------------------------- + """create_json + Create a JSON file in the caseroot/archive_files dir. + + Arguments: + case_dict (dict) - case dictionary to store XML variables + """ + logger.debug("create_json") + + if not os.path.exists("{0}/archive_files".format(case_dict["workdir"])): + os.makedirs("{0}/archive_files".format(case_dict["workdir"])) + + filename = "{0}/archive_files/json.{1}".format( + case_dict["workdir"], datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ) + with io.open(filename, "wb") as fname: + jstr = str(json.dumps(case_dict, indent=4, sort_keys=True, ensure_ascii=False)) + if isinstance(jstr, str): + jstr = jstr.decode("utf-8") + fname.write(jstr) + fname.close() + + +# --------------------------------------------------------------------- +def post_json(case_dict, username, password): + # --------------------------------------------------------------------- + """post_json + Post a JSON file in the caseroot/archive_files to the + remote expdb URL. + + Arguments: + case_dict (dict) - case dictionary to store XML variables + username (string) - SVN developers username + password (string) - SVN developers password + """ + logger.debug("post_json") + + case_dict["COMPSET"] = urllib.parse.quote(case_dict["COMPSET"]) + case_dict["GRID"] = urllib.parse.quote(case_dict["GRID"]) + data = json.dumps(case_dict) + params = urllib.parse.urlencode( + dict(username=username, password=password, data=data) + ) + try: + urllib.request.urlopen(case_dict["json_expdb_url"], params, context=_context) + except urllib.error.HTTPError as http_e: + logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) + except urllib.error.URLError as url_e: + logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) + + +# --------------------------------------------------------------------- +def check_svn(): + # --------------------------------------------------------------------- + """check_svn + + make sure svn client is installed and accessible + """ + logger.debug("check_svn") + + cmd = ["svn", "--version"] + svn_exists = True + result = "" + try: + result = subprocess.check_output(cmd) + except subprocess.CalledProcessError as error: + msg = _svn_error_template.substitute( + function="check_svn", cmd=cmd, error=error.returncode, strerror=error.output + ) + svn_exists = False + logger.info(msg) + raise SVNException(msg) + + if "version" not in result: + msg = "SVN is not available. Ignoring SVN update" + svn_exists = False + raise SVNException(msg) + + return svn_exists + + +# --------------------------------------------------------------------- +def create_temp_archive(case_dict): + # --------------------------------------------------------------------- + """create_temp_archive + + Create a temporary SVN sandbox directory in the current caseroot + """ + archive_temp_dir = "{0}/archive_temp_dir".format(case_dict["workdir"]) + logger.debug("create_temp_archive %s", archive_temp_dir) + + if not os.path.exists(archive_temp_dir): + os.makedirs(archive_temp_dir) + else: + logger.info( + "ERROR archive_metadata archive_temp_dir already exists. exiting..." + ) + sys.exit(1) + + return archive_temp_dir + + +# --------------------------------------------------------------------- +def check_svn_repo(case_dict, username, password): + # --------------------------------------------------------------------- + """check_svn_repo + + check if a SVN repo exists for this case + """ + logger.debug("check_svn_repo") + + repo_exists = False + svn_repo = "{0}/trunk".format(case_dict["svn_repo_url"]) + cmd = ["svn", "list", svn_repo, "--username", username, "--password", password] + result = "" + try: + result = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + msg = "SVN repo does not exist for this case. A new one will be created." + logger.warning(msg) + + if re.search("README.archive", result): + repo_exists = True + + return repo_exists + + +# --------------------------------------------------------------------- +def get_trunk_tag(case_dict, username, password): + # --------------------------------------------------------------------- + """get_trunk_tag + + return the most recent trunk tag as an integer + """ + logger.debug("get_trunk_tag") + + tag = 0 + svn_repo = "{0}/trunk_tags".format(case_dict["svn_repo_url"]) + cmd = ["svn", "list", svn_repo, "--username", username, "--password", password] + result = "" + try: + result = subprocess.check_output(cmd) + except subprocess.CalledProcessError as error: + cmd_nopasswd = [ + "svn", + "list", + svn_repo, + "--username", + username, + "--password", + "******", + ] + msg = _call_template.substitute( + function="get_trunk_tag", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + if result: + last_tag = [i for i in result.split("\n") if i][-1] + last_tag = last_tag[:-1].split("_")[-1] + tag = int(last_tag.lstrip("0")) + + return tag + + +# --------------------------------------------------------------------- +def checkout_repo(case_dict, username, password): + # --------------------------------------------------------------------- + """checkout_repo + + checkout the repo into the archive_temp_dir + """ + logger.debug("checkout_repo") + + os.chdir(case_dict["archive_temp_dir"]) + svn_repo = "{0}/trunk".format(case_dict["svn_repo_url"]) + cmd = ["svn", "co", "--username", username, "--password", password, svn_repo, "."] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + cmd_nopasswd = [ + "svn", + "co", + "--username", + username, + "--password", + "******", + svn_repo, + ".", + ] + msg = _call_template.substitute( + function="checkout_repo", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + os.chdir(case_dict["CASEROOT"]) + + +# --------------------------------------------------------------------- +def create_readme(case_dict): + # --------------------------------------------------------------------- + """create_readme + + Create a generic README.archive file + """ + logger.debug("create_readme") + os.chdir(case_dict["archive_temp_dir"]) + + fname = open("README.archive", "w") + fname.write("Archived metadata is available for this case at URL:\n") + fname.write(case_dict["base_expdb_url"]) + fname.close() + + +# --------------------------------------------------------------------- +def update_repo_add_file(filename, dir1, dir2): + # --------------------------------------------------------------------- + """update_repo_add_file + + Add a file to the SVN repository + """ + src = os.path.join(dir1, filename) + dest = os.path.join(dir2, filename) + logger.debug("left_only: " + src + " -> " + dest) + if not os.path.exists(dest): + shutil.copy2(src, dest) + cmd = ["svn", "add", "--parents", dest] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + +# --------------------------------------------------------------------- +def update_repo_rm_file(filename, dir1, dir2): + # --------------------------------------------------------------------- + """update_repo_rm_file + + Remove a file from the SVN repository + """ + src = os.path.join(dir2, filename) + dest = os.path.join(dir1, filename) + logger.debug("right_only: " + src + " -> " + dest) + if os.path.exists(dest): + cmd = ["svn", "rm", dest] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + +# --------------------------------------------------------------------- +def update_repo_copy_file(filename, dir1, dir2): + # --------------------------------------------------------------------- + """update_repo_copy_file + + Copy a file into the SVN local repo + """ + src = os.path.join(dir1, filename) + dest = os.path.join(dir2, filename) + shutil.copy2(src, dest) + + +# --------------------------------------------------------------------- +def compare_dir_trees(dir1, dir2, archive_list): + # --------------------------------------------------------------------- + """compare_dir_trees + + Compare two directories recursively. Files in each directory are + assumed to be equal if their names and contents are equal. + """ + xml_files = glob.glob(os.path.join(dir1, "*.xml")) + user_nl_files = glob.glob(os.path.join(dir1, "user_nl_*")) + dirs_cmp = filecmp.dircmp(dir1, dir2, _ignore_patterns) + + left_only = [ + fn + for fn in dirs_cmp.left_only + if not os.path.islink(fn) + and (fn in xml_files or fn in user_nl_files or fn in archive_list) + ] + right_only = [ + fn + for fn in dirs_cmp.right_only + if not os.path.islink(fn) + and (fn in xml_files or fn in user_nl_files or fn in archive_list) + ] + funny_files = [ + fn + for fn in dirs_cmp.funny_files + if not os.path.islink(fn) + and (fn in xml_files or fn in user_nl_files or fn in archive_list) + ] + + # files and directories need to be added to svn repo from the caseroot + if left_only: + for filename in left_only: + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": + update_repo_add_file(filename, dir1, dir2) + else: + new_dir1 = os.path.join(dir1, filename) + new_dir2 = os.path.join(dir2, filename) + os.makedirs(new_dir2) + cmd = ["svn", "add", new_dir2] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + # recurse through this new subdir + new_archive_list = [filename] + compare_dir_trees(new_dir1, new_dir2, new_archive_list) + + # files need to be removed from svn repo that are no longer in the caseroot + if right_only: + for filename in right_only: + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": + update_repo_rm_file(filename, dir1, dir2) + + # files are the same but could not be compared so copy the caseroot version + if funny_files: + for filename in funny_files: + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": + update_repo_copy_file(filename, dir1, dir2) + + # common files have changed in the caseroot and need to be copied to the svn repo + (_, mismatch, errors) = filecmp.cmpfiles( + dir1, dir2, dirs_cmp.common_files, shallow=False + ) + if mismatch: + for filename in mismatch: + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": + update_repo_copy_file(filename, dir1, dir2) + + # error in file comparison so copy the caseroot file to the svn repo + if errors: + for filename in errors: + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": + update_repo_copy_file(filename, dir1, dir2) + + # recurse through the subdirs + common_dirs = dirs_cmp.common_dirs + if common_dirs: + for common_dir in common_dirs: + if common_dir in archive_list: + new_dir1 = os.path.join(dir1, common_dir) + new_dir2 = os.path.join(dir2, common_dir) + compare_dir_trees(new_dir1, new_dir2, archive_list) + else: + return + + +# --------------------------------------------------------------------- +def update_local_repo(case_dict, ignore_logs, ignore_timing): + # --------------------------------------------------------------------- + """update_local_repo + + Compare and update local SVN sandbox + """ + logger.debug("update_local_repo") + from_dir = case_dict["CASEROOT"] + to_dir = case_dict["archive_temp_dir"] + + compare_dir_trees(from_dir, to_dir, case_dict["archive_list"]) + + # check if ignore_logs is specified + if ignore_logs: + os.chdir(to_dir) + if os.path.isdir("./logs"): + try: + shutil.rmtree("./logs") + except OSError: + logger.warning( + 'in "update_local_repo" - Unable to remove "logs" in archive dir.' + ) + + cmd = ["svn", "delete", "./logs"] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + if os.path.isdir("./postprocess/logs"): + os.chdir("./postprocess") + try: + shutil.rmtree("./logs") + except OSError: + logger.warning( + 'in "update_local_repo" - ' + 'Unable to remove "postprocess/logs" in archive dir.' + ) + + cmd = ["svn", "delete", "./logs"] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + else: + # add log files + if os.path.exists("{0}/logs".format(from_dir)): + if not os.path.exists("{0}/logs".format(to_dir)): + os.makedirs("{0}/logs".format(to_dir)) + os.chdir(os.path.join(from_dir, "logs")) + for filename in glob.glob("*.*"): + update_repo_add_file( + filename, + os.path.join(from_dir, "logs"), + os.path.join(to_dir, "logs"), + ) + + if os.path.exists("{0}/postprocess/logs".format(from_dir)): + if not os.path.exists("{0}/postprocess/logs".format(to_dir)): + os.makedirs("{0}/postprocess/logs".format(to_dir)) + os.chdir(os.path.join(from_dir, "postprocess/logs")) + for filename in glob.glob("*.*"): + update_repo_add_file( + filename, + os.path.join(from_dir, "postprocess", "logs"), + os.path.join(to_dir, "postprocess", "logs"), + ) + + # check if ignore_timing is specified + if ignore_timing: + os.chdir(case_dict["archive_temp_dir"]) + if os.path.isdir("./timing"): + try: + shutil.rmtree("./timing") + except OSError: + logger.warning( + 'in "update_local_repo" - Unable to remove "timing" in archive dir.' + ) + + cmd = ["svn", "delete", "./timing"] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + else: + # add timing files + if os.path.exists("{0}/timing".format(from_dir)): + if not os.path.exists("{0}/timing".format(to_dir)): + os.makedirs("{0}/timing".format(to_dir)) + os.chdir(os.path.join(from_dir, "timing")) + for filename in glob.glob("*.*"): + update_repo_add_file( + filename, + os.path.join(from_dir, "timing"), + os.path.join(to_dir, "timing"), + ) + + +# --------------------------------------------------------------------- +def populate_local_repo(case_dict, ignore_logs, ignore_timing): + # --------------------------------------------------------------------- + """populate_local_repo + + Populate local SVN sandbox + """ + logger.debug("populate_local_repo") + os.chdir(case_dict["CASEROOT"]) + + # loop through the archive_list and copy to the temp archive dir + for archive in case_dict["archive_list"]: + if os.path.exists(archive): + if os.path.isdir(archive): + try: + target = case_dict["archive_temp_dir"] + "/" + archive + shutil.copytree( + archive, + target, + symlinks=False, + ignore=shutil.ignore_patterns(*_ignore_patterns), + ) + except OSError as error: + msg = _copy_template.substitute( + function="populate_local_repo", + source=archive, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) + logger.warning(msg) + else: + try: + shutil.copy2(archive, case_dict["archive_temp_dir"]) + except OSError as error: + msg = _copy_template.substitute( + function="populate_local_repo", + source=archive, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) + logger.warning(msg) + + # add files with .xml as the suffix + xml_files = glob.glob("*.xml") + for xml_file in xml_files: + if os.path.isfile(xml_file): + try: + shutil.copy2(xml_file, case_dict["archive_temp_dir"]) + except OSError as error: + msg = _copy_template.substitute( + function="populate_local_repo", + source=xml_file, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) + logger.warning(msg) + + # add files with .xml as the suffix from the postprocess directory + if os.path.isdir("./postprocess"): + pp_path = "{0}/{1}".format(case_dict["archive_temp_dir"], "postprocess") + if not os.path.exists(pp_path): + os.mkdir(pp_path) + xml_files = glob.glob("./postprocess/*.xml") + for xml_file in xml_files: + if os.path.isfile(xml_file): + try: + shutil.copy2(xml_file, pp_path) + except OSError as error: + msg = _copy_template.substitute( + function="populate_local_repo", + source=xml_file, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) + logger.warning(msg) + + # add files with user_nl_ as the prefix + user_files = glob.glob("user_nl_*") + for user_file in user_files: + if os.path.isfile(user_file): + try: + shutil.copy2(user_file, case_dict["archive_temp_dir"]) + except OSError as error: + msg = _copy_template.substitute( + function="populate_local_repo", + source=user_file, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) + logger.warning(msg) + + # add files with Depends as the prefix + conf_files = glob.glob("Depends.*") + for conf_file in conf_files: + if os.path.isfile(conf_file): + try: + shutil.copy2(conf_file, case_dict["archive_temp_dir"]) + except OSError as error: + msg = _copy_template.substitute( + function="populate_local_repo", + source=conf_file, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) + logger.warning(msg) + + # check if ignore_logs is specified + if ignore_logs: + os.chdir(case_dict["archive_temp_dir"]) + if os.path.isdir("./logs"): + try: + shutil.rmtree("./logs") + except OSError: + logger.warning( + 'in "populate_local_repo" - Unable to remove "logs" in archive_temp_dir.' + ) + if os.path.isdir("./postprocess/logs"): + os.chdir("./postprocess") + try: + shutil.rmtree("./logs") + except OSError: + logger.warning( + 'in "populate_local_repo" - ' + 'Unable to remove "postprocess/logs" in archive_temp_dir.' + ) + os.chdir(case_dict["CASEROOT"]) + + # check if ignore_timing is specified + if ignore_timing: + os.chdir(case_dict["archive_temp_dir"]) + if os.path.isdir("./timing"): + try: + shutil.rmtree("./timing") + except OSError: + logger.warning( + 'in "populate_local_repo" - Unable to remove "timing" in archive_temp_dir.' + ) + os.chdir(case_dict["CASEROOT"]) + + +# --------------------------------------------------------------------- +def checkin_trunk(case_dict, svn_cmd, message, username, password): + # --------------------------------------------------------------------- + """checkin_trunk + + Check in the local SVN sandbox to the remote trunk + """ + logger.debug("checkin_trunk") + + os.chdir(case_dict["archive_temp_dir"]) + svn_repo = "{0}/trunk".format(case_dict["svn_repo_url"]) + msg = '"{0}"'.format(message) + cmd = [ + "svn", + svn_cmd, + "--username", + username, + "--password", + password, + ".", + "--message", + msg, + ] + + if svn_cmd in ["import"]: + # create the trunk dir + msg = '"create trunk"' + cmd = [ + "svn", + "mkdir", + "--parents", + svn_repo, + "--username", + username, + "--password", + password, + "--message", + msg, + ] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + cmd_nopasswd = [ + "svn", + "mkdir", + "--parents", + svn_repo, + "--username", + username, + "--password", + "******", + "--message", + msg, + ] + msg = _call_template.substitute( + function="checkin_trunk", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + # create the trunk_tags dir + tags = "{0}/trunk_tags".format(case_dict["svn_repo_url"]) + msg = '"create trunk_tags"' + cmd = [ + "svn", + "mkdir", + tags, + "--username", + username, + "--password", + password, + "--message", + msg, + ] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + cmd_nopasswd = [ + "svn", + "mkdir", + tags, + "--username", + username, + "--password", + "******", + "--message", + msg, + ] + msg = _call_template.substitute( + function="checkin_trunk", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + msg = '"{0}"'.format(message) + cmd = [ + "svn", + svn_cmd, + "--username", + username, + "--password", + password, + ".", + svn_repo, + "--message", + msg, + ] + + # check-in the trunk to svn + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + cmd_nopasswd = [ + "svn", + svn_cmd, + "--username", + username, + "--password", + "******", + ".", + "--message", + msg, + ] + msg = _call_template.substitute( + function="checkin_trunk", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + +# --------------------------------------------------------------------- +def create_tag(case_dict, new_tag, username, password): + # --------------------------------------------------------------------- + """create_tag + + create a new trunk tag in the remote repo + """ + logger.debug("create_tag") + + # create a new trunk tag + os.chdir(case_dict["archive_temp_dir"]) + svn_repo = "{0}/trunk".format(case_dict["svn_repo_url"]) + svn_repo_tag = "{0}/trunk_tags/{1}".format(case_dict["svn_repo_url"], new_tag) + msg = '"create new trunk tag"' + cmd = [ + "svn", + "copy", + "--username", + username, + "--password", + password, + svn_repo, + svn_repo_tag, + "--message", + msg, + ] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError as error: + cmd_nopasswd = [ + "svn", + "copy", + "--username", + username, + "--password", + "******", + svn_repo, + svn_repo_tag, + "--message", + msg, + ] + msg = _call_template.substitute( + function="checkin_trunk", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) + logger.warning(msg) + raise SVNException(msg) + + +# ------------------------------------------------------------------------- +def update_repo(ignore_logs, ignore_timing, case_dict, username, password): + # ------------------------------------------------------------------------- + """update_repo + + Update SVN repo + """ + logger.debug("update_repo") + + try: + # check if svn client is installed + svn_exists = check_svn() + + if svn_exists: + # check if the case repo exists + case_dict["svn_repo_url"] = "{0}/{1}".format( + _svn_expdb_url, case_dict["CASE"] + ) + repo_exists = check_svn_repo(case_dict, username, password) + case_dict["archive_temp_dir"] = create_temp_archive(case_dict) + case_dict["archive_list"] = _archive_list + case_dict["user_add_files"] + + if repo_exists: + # update trunk and make a new tag + last_tag = get_trunk_tag(case_dict, username, password) + new_tag = "{0}_{1}".format( + case_dict["CASE"], str(last_tag + 1).zfill(4) + ) + checkout_repo(case_dict, username, password) + update_local_repo(case_dict, ignore_logs, ignore_timing) + msg = "update case metadata for {0} by {1}".format( + case_dict["CASE"], username + ) + checkin_trunk(case_dict, "ci", msg, username, password) + create_tag(case_dict, new_tag, username, password) + logger.info( + 'SVN repository trunk updated at URL "%s"', + case_dict["svn_repo_url"], + ) + logger.info(' and a new trunk tag created "%s"', new_tag) + else: + # create a new case repo + new_tag = "{0}_0001".format(case_dict["CASE"]) + create_readme(case_dict) + populate_local_repo(case_dict, ignore_logs, ignore_timing) + msg = "initial import of case metadata for {0} by {1}".format( + case_dict["CASE"], username + ) + checkin_trunk(case_dict, "import", msg, username, password) + create_tag(case_dict, new_tag, username, password) + logger.info( + 'SVN repository imported to trunk URL "%s"', + case_dict["svn_repo_url"], + ) + logger.info(' and a new trunk tag created for "%s"', new_tag) + + except SVNException: + pass + + return case_dict + + +# --------------------------------------------------------------------- +def get_timing_data(case_dict): + # --------------------------------------------------------------------- + """get_timing_data + parse the timing data file and add information to the case_dict + + Arguments: + case_dict (dict) - case dictionary to store XML variables + """ + logger.debug("get_timing_data") + + # initialize the timing values in the dictionary + case_dict["model_cost"] = "undefined" + case_dict["model_throughput"] = "undefined" + + timing_dir = case_dict["CASEROOT"] + "/timing" + last_time = "" + if os.path.exists(timing_dir): + # check if timing files exists + timing_file_pattern = "cesm_timing." + case_dict["CASE"] + last_time = max( + glob.glob(timing_dir + "/" + timing_file_pattern + ".*"), + key=os.path.getctime, + ) + if last_time: + if "gz" in last_time: + # gunzip file first + with gzip.open(last_time, "rb") as fname: + file_content = fname.readlines() + else: + with open(last_time, "r") as fname: + file_content = fname.readlines() + + # search the file content for matching lines + model_cost = [line for line in file_content if "Model Cost:" in line] + model_throughput = [ + line for line in file_content if "Model Throughput:" in line + ] + + case_dict["model_cost"] = " ".join(model_cost[0].split()) + case_dict["model_throughput"] = " ".join(model_throughput[0].split()) + + return case_dict + + +# --------------------------------------------------------------------- +def initialize_main(options): + # --------------------------------------------------------------------- + """initialize_main + + Initialize the case dictionary data structure with command line options + """ + logger.debug("intialize_main") + + case_dict = dict() + + case_dict["CASEROOT"] = os.getcwd() + if options.caseroot: + case_dict["CASEROOT"] = options.caseroot[0] + + case_dict["workdir"] = case_dict["CASEROOT"] + if options.workdir: + case_dict["workdir"] = options.workdir[0] + + username = None + if options.user: + username = options.user + case_dict["svnlogin"] = username + + password = None + if options.password: + password = options.password + + if options.expType: + case_dict["expType"] = options.expType[0] + + case_dict["title"] = None + if options.title: + case_dict["title"] = options.title[0] + + case_dict["dryrun"] = False + if options.dryrun: + case_dict["dryrun"] = True + + case_dict["archive_temp_dir"] = "" + + case_dict["user_add_files"] = list() + if options.user_add_files: + case_dict["user_add_files"] = options.user_add_files.split(",") + + case_dict["q_casename"] = "" + case_dict["q_outfile"] = "" + if options.query_cmip6: + case_dict["q_casename"] = options.query_cmip6[0] + case_dict["q_outfile"] = options.query_cmip6[1] + + case_dict["base_expdb_url"] = "https://csegweb.cgd.ucar.edu/expdb2.0" + if options.test_post: + case_dict["base_expdb_url"] = "https://csegwebdev.cgd.ucar.edu/expdb2.0" + case_dict["json_expdb_url"] = ( + case_dict["base_expdb_url"] + "/cgi-bin/processJSON.cgi" + ) + case_dict["query_expdb_url"] = case_dict["base_expdb_url"] + "/cgi-bin/query.cgi" + + return case_dict, username, password + + +# --------------------------------------------------------------------- +def main_func(options): + # --------------------------------------------------------------------- + """main function + + Arguments: + options (list) - input options from command line + """ + logger.debug("main_func") + + (case_dict, username, password) = initialize_main(options) + + # check if query_cmip6 argument is specified + if options.query_cmip6: + if case_dict["dryrun"]: + logger.info("Dryrun - calling query_expdb_cmip6 for case metadata") + else: + if query_expdb_cmip6(case_dict, username, password): + logger.info( + 'Casename "%s" CMIP6 global attribute ' + 'metadata written to "%s/archive_files/%s" ' + 'from "%s"', + case_dict["workdir"], + case_dict["q_casename"], + case_dict["q_outfile"], + case_dict["query_expdb_url"], + ) + logger.info("Successful completion of archive_metadata") + sys.exit(0) + else: + logger.info( + 'ERROR archive_metadata failed to find "%s" ' + 'in experiments database at "%s".', + case_dict["q_casename"], + case_dict["query_expdb_url"], + ) + sys.exit(1) + + # loop through the _xml_vars gathering values + with Case(case_dict["CASEROOT"], read_only=True) as case: + if case_dict["dryrun"]: + logger.info("Dryrun - calling get_case_vars") + else: + case_dict = get_case_vars(case_dict, case) + + # check reserved casename expdb for CMIP6 experiments + if case_dict["expType"].lower() == "cmip6": + if case_dict["dryrun"]: + logger.info( + "Dryrun - calling check_expdb_case for CMIP6 experiment reservation" + ) + else: + case_dict["case_id"] = check_expdb_case(case_dict, username, password) + if case_dict["case_id"] < 1: + logger.info( + "Unable to archive CMIP6 metadata. " + '"%s" casename does not exist in database. ' + "All CMIP6 experiments casenames must be " + "reserved in the experiments database at URL: " + "https://csegweb.cgd.ucar.edu/expdb2.0 " + "prior to running archive_metadata.", + case_dict["CASE"], + ) + sys.exit(1) + + # get the case status into the case_dict + if case_dict["dryrun"]: + logger.info("Dryrun - calling get_case_status") + else: + case_dict = get_case_status(case_dict) + + # create / update the cesm expdb repo with the caseroot files + if not options.ignore_repo_update: + if case_dict["dryrun"]: + logger.info("Dryrun - calling update_repo") + else: + case_dict = update_repo( + options.ignore_logs, + options.ignore_timing, + case_dict, + username, + password, + ) + + # parse the timing data into the case_dict + if not options.ignore_timing: + if case_dict["dryrun"]: + logger.info("Dryrun - calling get_timing_data") + else: + case_dict = get_timing_data(case_dict) + + # Create a JSON file containing the case_dict with the date appended to the filename + if case_dict["dryrun"]: + logger.info("Dryrun - calling create_json") + else: + create_json(case_dict) + + # post the JSON to the remote DB + if case_dict["dryrun"]: + logger.info("Dryrun - calling post_json") + else: + post_json(case_dict, username, password) + + # clean-up the temporary archive files dir + if case_dict["dryrun"]: + logger.info('Dryrun - deleting "./archive_temp_dir"') + else: + if not options.ignore_repo_update and os.path.exists( + case_dict["archive_temp_dir"] + ): + shutil.rmtree(case_dict["archive_temp_dir"]) + + logger.info("Successful completion of archive_metadata") + + return 0 + + +# =================================== +if __name__ == "__main__": + + try: + __status__ = main_func(commandline_options(sys.argv)) + sys.exit(__status__) + except Exception as error: + print("{}".format(str(error))) + sys.exit(1) diff --git a/CIME/Tools/bld_diff b/CIME/Tools/bld_diff new file mode 100755 index 00000000000..27193d61560 --- /dev/null +++ b/CIME/Tools/bld_diff @@ -0,0 +1,218 @@ +#! /usr/bin/env python3 + +""" +Try to calculate and succinctly present the differences between two bld logs +for the same component +""" + +from standard_script_setup import * +from CIME.utils import run_cmd_no_fail + +import argparse, sys, os, gzip + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} log1 log2 +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + > {0} case1 case2 +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("log1", help="First log.") + + parser.add_argument("log2", help="Second log.") + + parser.add_argument( + "-I", + "--ignore-includes", + action="store_true", + help="Ignore differences in include flags", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.log1, args.log2, args.ignore_includes + + +############################################################################### +def is_compile_line(line): + ############################################################################### + return ( + line.count("-I") > 0 + and not line.startswith("gmake ") + and not line.startswith("make ") + ) + + +############################################################################### +def get_compile_lines_from_log(logfile_text): + ############################################################################### + result = [] + for line in logfile_text.splitlines(): + if is_compile_line(line): + result.append(line) + + return result + + +_SRCFILE_ENDINGS = (".F", ".f", ".c", ".F90", ".f90", ".cpp") +############################################################################### +def parse_log(logfile_text): + ############################################################################### + compile_lines = get_compile_lines_from_log(logfile_text) + result = {} + for compile_line in compile_lines: + items = compile_line.split() + compiled_file = None + for item in items: + for ending in _SRCFILE_ENDINGS: + if item.endswith(ending): + # expect(compiled_file is None, "Found multiple things that look like files in '{}'".format(compile_line)) + compiled_file = os.path.basename(item) + break + + if compiled_file: + break + + if compiled_file is None: + print( + "WARNING: Found nothing that looks like a file in '{}'".format( + compile_line + ) + ) + else: + if compiled_file in result: + print( + "WARNING: Found multiple compilations of {}".format(compiled_file) + ) + result[compiled_file] = items + + # TODO - Need to capture link lines too + + return result + + +############################################################################### +def get_case_from_log(logpath): + ############################################################################### + return os.path.abspath(os.path.join(os.path.dirname(logpath), "..")) + + +############################################################################### +def read_maybe_gzip(filepath): + ############################################################################### + opener = ( + lambda: gzip.open(filepath, "rt") + if filepath.endswith(".gz") + else open(filepath, "r") + ) + with opener() as fd: + return fd.read() + + +############################################################################### +def log_diff(log1, log2, repls, ignore_includes): + ############################################################################### + """ + Search for build/link commands and compare them + """ + are_same = True + + # Read files + log1_contents = read_maybe_gzip(log1) + log2_contents = read_maybe_gzip(log2) + + # Normalize log2 + for replace_item, replace_with in repls.items(): + log2_contents = log2_contents.replace(replace_item, replace_with) + + # Transform log contents to a map of filename -> compile_args + compile_dict1 = parse_log(log1_contents) + compile_dict2 = parse_log(log2_contents) + + file_set1 = set(compile_dict1.keys()) + file_set2 = set(compile_dict2.keys()) + + for item in file_set1 - file_set2: + print("{} is missing compilation of {}".format(log2, item)) + are_same = False + + for item in file_set2 - file_set1: + print("{} has unexpected compilation of {}".format(log2, item)) + are_same = False + + for item in file_set1 & file_set2: + print("Checking compilation of {}".format(item)) + flags1 = compile_dict1[item] + flags2 = compile_dict2[item] + + missing = set(flags1) - set(flags2) + extra = set(flags2) - set(flags1) + + # Let's not worry about order yet even though some flags are order-sensitive + for flag in missing: + if not (ignore_includes and flag.startswith("-I")) and item not in flag: + print(" Missing flag {}".format(flag)) + are_same = False + + for flag in extra: + if ( + flag != "-o" + and not flag.startswith("CMakeFiles") + and not (ignore_includes and flag.startswith("-I")) + and item not in flag + ): + print(" Extra flag {}".format(flag)) + are_same = False + + return are_same + + +############################################################################### +def _main_func(description): + ############################################################################### + log1, log2, ignore_includes = parse_command_line(sys.argv, description) + + xml_normalize_fields = ["TEST_TESTID", "SRCROOT"] + repls = {} + for xml_normalize_field in xml_normalize_fields: + try: + case1 = get_case_from_log(log1) + case2 = get_case_from_log(log2) + val1 = run_cmd_no_fail( + "./xmlquery --value {}".format(xml_normalize_field), from_dir=case1 + ) + val2 = run_cmd_no_fail( + "./xmlquery --value {}".format(xml_normalize_field), from_dir=case2 + ) + if os.sep in val1: + repls[os.path.normpath(val2)] = os.path.normpath(val1) + else: + repls[val2] = val1 + except Exception as e: + logging.warning( + "Warning, failed to normalize on {}: {}".format( + xml_normalize_field, str(e) + ) + ) + repls = {} + + same = log_diff(log1, log2, repls, ignore_includes) + sys.exit(0 if same == 0 else 1) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/bless_test_results b/CIME/Tools/bless_test_results new file mode 100755 index 00000000000..093ae0d79b8 --- /dev/null +++ b/CIME/Tools/bless_test_results @@ -0,0 +1,198 @@ +#!/usr/bin/env python3 + +""" +Analyze results from a test root area, finding namelist and non-BFB +changes, and updating baselines. Purpose is, instead of re-running tests +in generate mode, which is very slow, allow for very fast analsis and +blessing of diffs. + +You may need to load modules for cprnc to work. +""" +from standard_script_setup import * + +from CIME.utils import expect +from CIME.XML.machines import Machines +from CIME.bless_test_results import bless_test_results + +import argparse +import sys +import os +import logging + +_MACHINE = Machines() + + +def parse_command_line(args, description): + parser = argparse.ArgumentParser( + usage="""\n{0} [-n] [-r ] [-b ] [-c ] [ ...] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# From most recent run, bless any namelist changes \033[0m + > {0} -n + \033[1;32m# From most recent run, bless all changes \033[0m + > {0} + \033[1;32m# From most recent run, bless changes to test foo and bar only \033[0m + > {0} foo bar + \033[1;32m# From most recent run, bless only namelist changes to test foo and bar only \033[0m + > {0} -n foo bar + \033[1;32m# From most recent run of jenkins, bless history changes for next \033[0m + > {0} -r /home/jenkins/acme/scratch/jenkins -b next --hist-only +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + create_bless_options(parser) + + create_baseline_options(parser) + + create_test_options(parser) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "-c", + "--compiler", + default=_MACHINE.get_default_compiler(), + help="Compiler of run you want to bless", + ) + + parser.add_argument( + "-p", + "--no-skip-pass", + action="store_true", + help="Normally, if namelist or baseline phase exists and shows PASS, we assume no bless is needed. " + "This option forces the bless to happen regardless.", + ) + + parser.add_argument( + "-l", + "--lock-baselines", + action="store_true", + help="Turn off group write access for baselines", + ) + + mutual_execution = parser.add_mutually_exclusive_group() + + mutual_execution.add_argument( + "--report-only", + action="store_true", + help="Only report what files will be overwritten and why. Caution is a good thing when updating baselines", + ) + + mutual_execution.add_argument( + "-f", + "--force", + action="store_true", + help="Update every diff without asking. VERY DANGEROUS. Should only be used within testing scripts.", + ) + + parser.add_argument( + "--pes-file", + help="Full pathname of an optional pes specification file. The file" + "\ncan follow either the config_pes.xml or the env_mach_pes.xml format.", + ) + + parser.add_argument("--exclude", nargs="*", help="Exclude tests") + + parser.add_argument( + "bless_tests", + nargs="*", + help="When blessing, limit the bless to tests matching these regex", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return vars(args) + + +def create_bless_options(parser): + bless_group = parser.add_argument_group("Bless options") + + mutual_bless_group = bless_group.add_mutually_exclusive_group() + + mutual_bless_group.add_argument( + "-n", "--namelists-only", action="store_true", help="Only analyze namelists." + ) + + mutual_bless_group.add_argument( + "--hist-only", action="store_true", help="Only analyze history files." + ) + + mutual_perf_group = bless_group.add_mutually_exclusive_group() + + mutual_perf_group.add_argument( + "--bless-tput", + action="store_true", + help="Bless throughput, use `--bless-perf` to bless throughput and memory", + ) + + mutual_perf_group.add_argument( + "--bless-mem", + action="store_true", + help="Bless memory, use `--bless-perf` to bless throughput and memory", + ) + + bless_group.add_argument( + "--bless-perf", action="store_true", help="Bless both throughput and memory" + ) + + +def create_baseline_options(parser): + baseline_group = parser.add_argument_group("Baseline options") + + baseline_group.add_argument( + "-b", + "--baseline-name", + help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.", + ) + + baseline_group.add_argument( + "--baseline-root", + help="Root of baselines. Default will use the BASELINE_ROOT from the case.", + ) + + +def create_test_options(parser): + default_testroot = _MACHINE.get_value("CIME_OUTPUT_ROOT") + + test_group = parser.add_argument_group("Test options") + + test_group.add_argument( + "-r", + "--test-root", + default=default_testroot, + help="Path to test results that are being blessed", + ) + + test_group.add_argument( + "--new-test-root", + help="If bless_test_results needs to create cases (for blessing namelists), use this root area", + ) + + test_group.add_argument( + "--new-test-id", + help="If bless_test_results needs to create cases (for blessing namelists), use this test id", + ) + + test_group.add_argument( + "-t", + "--test-id", + help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.", + ) + + +def _main_func(description): + kwargs = parse_command_line(sys.argv, description) + + success = bless_test_results(**kwargs) + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/case.build b/CIME/Tools/case.build new file mode 100755 index 00000000000..4edf177198e --- /dev/null +++ b/CIME/Tools/case.build @@ -0,0 +1,267 @@ +#!/usr/bin/env python3 + +""" +Builds the case. + +case.setup must be run before this. In addition, any changes to env_build.xml +must be made before running this. + +This must be run before running case.submit. + +There are two usage modes; both modes accept the --caseroot option, but +other options are specific to one mode or the other: + +1) To build the model: + + Typical usage is simply: + ./case.build + + This can be used for the initial build as well as for incrementally + rebuilding after changing some source files. + + Optionally, you can specify one of the following options, although this is + not common: + --sharedlib-only + --model-only + --build ... + + In addition, if you'd like to skip saving build provenance (typically because + there was some error in doing so), you can add: + --skip-provenance-check + +2) To clean part or all of the build: + + To clean the whole build; this should be done after modifying either + env_build.xml or Macros.make: + ./case.build --clean-all + + To clean select portions of the build, for example, after adding new source + files for one component: + ./case.build --clean ... + or: + ./case.build --clean-depends ... +""" + +from standard_script_setup import * + +import CIME.build as build +from CIME.case import Case +from CIME.utils import find_system_test, get_model +from CIME.test_status import * + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to build.\n" "Default is current directory.", + ) + + if get_model() == "e3sm": + parser.add_argument( + "--ninja", + action="store_true", + help="Use ninja backed for CMake (instead of gmake). " + "The ninja backend is better at scanning fortran dependencies but " + "seems to be less reliable across different platforms and compilers.", + ) + + parser.add_argument( + "--separate-builds", + action="store_true", + help="Build each component one at a time, separately, with output going to separate logs", + ) + + parser.add_argument( + "--skip-submit", + action="store_true", + help="Sets the current test phase to RUN, skipping the SUBMIT phase. This " + "may be useful if rebuilding the model while this test is in the batch queue. " + "ONLY USE IF A TEST CASE, OTHERWISE IGNORED.", + ) + + parser.add_argument( + "--dry-run", + action="store_true", + help="Just print the cmake and ninja commands.", + ) + + mutex_group = parser.add_mutually_exclusive_group() + + # TODO mvertens: the following is hard-wired - otherwise it does not work with nuopc + # files = Files() + # config_file = files.get_value("CONFIG_CPL_FILE") + # component = Component(config_file, "CPL") + # comps = [x.lower() for x in component.get_valid_model_components()] + comps = ["cpl", "atm", "lnd", "ice", "ocn", "rof", "glc", "wav", "esp", "iac"] + libs = ["csmshare", "mct", "pio", "gptl"] + allobjs = comps + libs + + mutex_group.add_argument( + "--sharedlib-only", action="store_true", help="Only build shared libraries." + ) + + mutex_group.add_argument( + "-m", + "--model-only", + action="store_true", + help="Assume shared libraries are already built.", + ) + + mutex_group.add_argument( + "-b", + "--build", + nargs="+", + choices=allobjs, + help="Libraries to build.\n" "Will cause namelist generation to be skipped.", + ) + + mutex_group.add_argument( + "--skip-provenance-check", + action="store_true", + help="Do not check and save build provenance", + ) + + mutex_group.add_argument( + "--clean-all", + action="store_true", + help="Clean all objects (including sharedlib objects that may be\n" + "used by other builds).", + ) + + mutex_group.add_argument( + "--clean", + nargs="*", + choices=allobjs, + help="Clean objects associated with specific libraries.\n" + "With no arguments, clean all objects other than sharedlib objects.", + ) + + mutex_group.add_argument( + "--clean-depends", + nargs="*", + choices=comps + ["csmshare"], + help="Clean Depends and Srcfiles only.\n" + "This allows you to rebuild after adding new\n" + "files in the source tree or in SourceMods.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + clean_depends = ( + args.clean_depends + if args.clean_depends is None or len(args.clean_depends) + else comps + ) + + cleanlist = args.clean if args.clean is None or len(args.clean) else comps + buildlist = None if args.build is None or len(args.build) == 0 else args.build + + if get_model() != "e3sm": + args.separate_builds = False + args.ninja = False + + return ( + args.caseroot, + args.sharedlib_only, + args.model_only, + cleanlist, + args.clean_all, + buildlist, + clean_depends, + not args.skip_provenance_check, + args.separate_builds, + args.ninja, + args.dry_run, + args.skip_submit, + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + ( + caseroot, + sharedlib_only, + model_only, + cleanlist, + clean_all, + buildlist, + clean_depends, + save_build_provenance, + separate_builds, + ninja, + dry_run, + skip_submit, + ) = parse_command_line(sys.argv, description) + + success = True + with Case(caseroot, read_only=False, record=True) as case: + testname = case.get_value("TESTCASE") + + if cleanlist is not None or clean_all or clean_depends is not None: + build.clean( + case, + cleanlist=cleanlist, + clean_all=clean_all, + clean_depends=clean_depends, + ) + elif testname is not None: + logging.warning( + "Building test for {} in directory {}".format(testname, caseroot) + ) + try: + # The following line can throw exceptions if the testname is + # not found or the test constructor throws. We need to be + # sure to leave TestStatus in the appropriate state if that + # happens. + test = find_system_test(testname, case)(case) + except BaseException: + phase_to_fail = ( + MODEL_BUILD_PHASE if model_only else SHAREDLIB_BUILD_PHASE + ) + with TestStatus(test_dir=caseroot) as ts: + ts.set_status( + phase_to_fail, TEST_FAIL_STATUS, comments="failed to initialize" + ) + raise + + expect( + buildlist is None, + "Build lists don't work with tests, use create_newcase (not create_test) to use this feature", + ) + success = test.build( + sharedlib_only=sharedlib_only, + model_only=model_only, + ninja=ninja, + dry_run=dry_run, + separate_builds=separate_builds, + skip_submit=skip_submit, + ) + + else: + success = build.case_build( + caseroot, + case=case, + sharedlib_only=sharedlib_only, + model_only=model_only, + buildlist=buildlist, + save_build_provenance=save_build_provenance, + separate_builds=separate_builds, + ninja=ninja, + dry_run=dry_run, + ) + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/case.cmpgen_namelists b/CIME/Tools/case.cmpgen_namelists new file mode 100755 index 00000000000..c9930e71795 --- /dev/null +++ b/CIME/Tools/case.cmpgen_namelists @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 + +""" +case.cmpgen_namelists - perform namelist baseline operations (compare, +generate, or both) for this case. +""" + +from standard_script_setup import * + +from CIME.case import Case +from argparse import RawTextHelpFormatter + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory for which namelists are compared/generated. " + "\nDefault is current directory.", + ) + + parser.add_argument( + "-c", + "--compare", + action="store_true", + help="Force a namelist comparison against baselines. " + "\nDefault is to follow the case specification.", + ) + + parser.add_argument( + "-g", + "--generate", + action="store_true", + help="Force a generation of namelist baselines. " + "\nDefault is to follow the case specification.", + ) + + parser.add_argument( + "--compare-name", + help="Force comparison to use baselines with this name. " + "\nDefault is to follow the case specification.", + ) + + parser.add_argument( + "--generate-name", + help="Force generation to use baselines with this name. " + "\nDefault is to follow the case specification.", + ) + + parser.add_argument( + "--baseline-root", + help="Root of baselines. " "\nDefault is the case's BASELINE_ROOT.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return ( + args.caseroot, + args.compare, + args.generate, + args.compare_name, + args.generate_name, + args.baseline_root, + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + ( + caseroot, + compare, + generate, + compare_name, + generate_name, + baseline_root, + ) = parse_command_line(sys.argv, description) + with Case(caseroot, read_only=False) as case: + success = case.case_cmpgen_namelists( + compare, generate, compare_name, generate_name, baseline_root + ) + + sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/case.qstatus b/CIME/Tools/case.qstatus new file mode 100755 index 00000000000..4902dc511d5 --- /dev/null +++ b/CIME/Tools/case.qstatus @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +""" +Shows the batch status of all jobs associated with this case. + +Typical usage is simply: + ./case.qstatus +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.test_status import * + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to query.\n" "Default is current directory.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.caseroot + + +############################################################################### +def _main_func(description): + ############################################################################### + caseroot = parse_command_line(sys.argv, description) + + with Case(caseroot, read_only=False) as case: + case.report_job_status() + + sys.exit(0) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/case.setup b/CIME/Tools/case.setup new file mode 100755 index 00000000000..37901fd8c61 --- /dev/null +++ b/CIME/Tools/case.setup @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 + +""" +Creates various files and directories needed in order to build the case, +create namelists and run the case. + +Any changes to env_mach_pes.xml and env_mach_specific.xml must be made +before running this. + +This must be run before running case.build. + +To run this initially for the case, simply run: + ./case.setup + +To rerun after making changes to env_mach_pes.xml or env_mach_specific.xml, run: + ./case.setup --reset +""" + +from standard_script_setup import * +from CIME.case import Case + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to setup.\n" "Default is current directory.", + ) + + parser.add_argument( + "-c", + "--clean", + action="store_true", + help="Removes the batch run script for target machine.\n" + "If the testmode argument is present then keep the test\n" + "script if it is present - otherwise remove it.\n" + "The user_nl_xxx and Macros files are never removed by case.setup -\n" + "you must remove them manually.", + ) + + parser.add_argument( + "-t", + "--test-mode", + action="store_true", + help="Keeps the test script when the --clean argument is used.", + ) + + parser.add_argument( + "-r", + "--reset", + action="store_true", + help="Does a clean followed by setup.\n" + "This flag should be used when rerunning case.setup after it\n" + "has already been run for this case.", + ) + + parser.add_argument( + "-k", + "--keep", + action="append", + default=[], + help="When cleaning/resetting a case, do not remove/refresh files in this list. " + "Choices are batch script, env_mach_specific.xml, Macros.make, Macros.cmake. " + "Use should use this if you have local modifications to these files that you want to keep.", + ) + + parser.add_argument( + "--disable-git", + action="store_true", + help="Disable the interface to git, this will result in reduced provenance for your case.", + ) + + parser.add_argument( + "-N", + "--non-local", + action="store_true", + help="Use when you've requested a machine that you aren't on. " + "Will reduce errors for missing directories etc.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return ( + args.caseroot, + args.clean, + args.test_mode, + args.reset, + args.keep, + args.disable_git, + args.non_local, + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + ( + caseroot, + clean, + test_mode, + reset, + keep, + disable_git, + non_local, + ) = parse_command_line(sys.argv, description) + with Case(caseroot, read_only=False, record=True, non_local=non_local) as case: + case.case_setup( + clean=clean, + test_mode=test_mode, + reset=reset, + keep=keep, + disable_git=disable_git, + ) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/case.submit b/CIME/Tools/case.submit new file mode 100755 index 00000000000..5de69368100 --- /dev/null +++ b/CIME/Tools/case.submit @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 + +""" +Submits the case to the queuing system, or runs it if there is no queueing system. + +Also submits any other jobs (such as the short-term archiver) associated with this case. + +Running case.submit is the only way you should start a job. + +Typical usage is simply: + ./case.submit + +Other examples: + ./case.submit -m begin,end + Submits the case, requesting mail at job beginning and end +""" + +import configparser +from standard_script_setup import * +from CIME.case import Case +from CIME.utils import expect + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to submit.\n" "Default is current directory.", + ) + + parser.add_argument( + "--job", + "-j", + help="Name of the job to be submitted;\n" + "can be any of the jobs listed in env_batch.xml.\n" + "This will be the first job of any defined workflow. " + "Default is case.run.", + ) + + parser.add_argument( + "--only-job", + help="Name of the job to be submitted;\n" + "can be any of the jobs listed in env_batch.xml.\n" + "Only this job will be run, workflow and RESUBMIT are ignored. " + "Default is case.run.", + ) + + parser.add_argument( + "--no-batch", + action="store_true", + help="Do not submit jobs to batch system, run locally.", + ) + + parser.add_argument( + "--prereq", + help="Specify a prerequisite job id, this job will not start until the\n" + "job with this id is completed (batch mode only). This feature overrides the\n" + "CONTINUE_RUN=TRUE check for the existance of restart files and assumes that the\n" + "files will be present when the case starts.", + ) + + parser.add_argument( + "--prereq-allow-failure", + action="store_true", + help="Allows starting the run even if the prerequisite fails.\n" + "This also allows resubmits to run if the original failed and the\n" + "resubmit was submitted to the queue with the orginal as a dependency,\n" + "as in the case of --resubmit-immediate.", + ) + + parser.add_argument( + "--resubmit", + action="store_true", + help="Used with tests only, to continue rather than restart a test.", + ) + + parser.add_argument( + "--resubmit-immediate", + action="store_true", + help="This queues all of the resubmissions immediately after\n" + "the first job is queued. These rely on the queue system to\n" + "handle dependencies.", + ) + + parser.add_argument( + "--skip-preview-namelist", + action="store_true", + help="Skip calling preview-namelist during case.run.", + ) + + CIME.utils.add_mail_type_args(parser) + + parser.add_argument( + "-a", + "--batch-args", + help="Used to pass additional arguments to batch system.\n" + "Do not use this feature to specify job prerequisites, use the --prereq feature instead.", + ) + + parser.add_argument( + "--chksum", action="store_true", help="Verifies input data checksums." + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + CIME.utils.resolve_mail_type_args(args) + + expect( + args.job is None or args.only_job is None, + "Cannot specify both --job and --only-job", + ) + job = None + workflow = True + if args.job: + job = args.job + elif args.only_job: + job = args.only_job + workflow = False + + return ( + args.caseroot, + job, + args.no_batch, + args.prereq, + args.prereq_allow_failure, + args.resubmit, + args.resubmit_immediate, + args.skip_preview_namelist, + args.mail_user, + args.mail_type, + args.batch_args, + workflow, + args.chksum, + ) + + +############################################################################### +def _main_func(description, test_args=False): + ############################################################################### + ( + caseroot, + job, + no_batch, + prereq, + allow_fail, + resubmit, + resubmit_immediate, + skip_pnl, + mail_user, + mail_type, + batch_args, + workflow, + chksum, + ) = parse_command_line(sys.argv, description) + + # save these options to a hidden file for use during resubmit + config_file = os.path.join(caseroot, ".submit_options") + if skip_pnl or mail_user or mail_type or batch_args: + config = configparser.RawConfigParser() + config.add_section("SubmitOptions") + if skip_pnl: + config.set("SubmitOptions", "skip_pnl", "True") + if mail_user: + config.set("SubmitOptions", "mail_user", mail_user) + if mail_type: + config.set("SubmitOptions", "mail_type", ",".join(mail_type)) + if batch_args: + config.set("SubmitOptions", "batch_args", batch_args) + with open(config_file, "w") as fd: + config.write(fd) + elif os.path.exists(config_file): + os.remove(config_file) + + if not test_args: + with Case(caseroot, read_only=False, record=True) as case: + case.submit( + job=job, + no_batch=no_batch, + prereq=prereq, + allow_fail=allow_fail, + resubmit=resubmit, + resubmit_immediate=resubmit_immediate, + skip_pnl=skip_pnl, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + workflow=workflow, + chksum=chksum, + ) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/case_diff b/CIME/Tools/case_diff new file mode 100755 index 00000000000..e642dd8e7ac --- /dev/null +++ b/CIME/Tools/case_diff @@ -0,0 +1,170 @@ +#! /usr/bin/env python3 + +""" +Try to calculate and succinctly present the differences between two large +directory trees. +""" + +from standard_script_setup import * +from CIME.utils import run_cmd, run_cmd_no_fail + +import argparse, sys, os + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} case1 case2 [skip-files] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + > {0} case1 case2 +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("case1", help="First case.") + + parser.add_argument("case2", help="Second case.") + + parser.add_argument( + "skip_list", + nargs="*", + help="skip these files. You'll probably want to skip the bld directory if it's inside the case", + ) + + parser.add_argument( + "-b", "--show-binary", action="store_true", help="Show binary diffs" + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.case1, args.case2, args.show_binary, args.skip_list + + +############################################################################### +def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): + ############################################################################### + """ + Starting at dir1, dir2 respectively, compare their contents + """ + # The assertions below hurt performance + # assert os.path.isdir(dir1), dir1 + " not a directory" + # assert os.path.isdir(dir2), dir2 + " not a directory" + + # Get contents of both directories + dir1_contents = set(os.listdir(dir1)) + dir2_contents = set(os.listdir(dir2)) + + # Use set operations to figure out what they have in common + dir1_only = dir1_contents - dir2_contents + dir2_only = dir2_contents - dir1_contents + both = dir1_contents & dir2_contents + + num_differing_files = 0 + + # Print the unique items + for dirname, set_obj in [(dir1, dir1_only), (dir2, dir2_only)]: + for item in sorted(set_obj): + if item not in skip_list: + print( + "===============================================================================" + ) + print(os.path.join(dirname, item), "is unique") + num_differing_files += 1 + + # Handling of the common items is trickier + for item in sorted(both): + if item in skip_list: + continue + path1 = os.path.join(dir1, item) + path2 = os.path.join(dir2, item) + path1isdir = os.path.isdir(path1) + + # If the directory status of the files differs, report diff + if path1isdir != os.path.isdir(path2): + print( + "===============================================================================" + ) + print(path1 + " DIFFERS (directory status)") + num_differing_files += 1 + continue + + # If we've made it this far, the files' status is the same. If the + # files are directories, recursively check them, otherwise check + # that the file contents match + if path1isdir: + num_differing_files += recursive_diff( + path1, path2, repls, show_binary, skip_list + ) + else: + # # As a (huge) performance enhancement, if the files have the same + # # size, we assume the contents match + # if (os.path.getsize(path1) != os.path.getsize(path2)): + # print path1 + " DIFFERS (contents)" + + stat, out, err = run_cmd("file {}".format(path1)) + if stat != 0: + logging.warning( + "Failed to probe file '{}', out: '{}', err: '{}'".format( + path1, out, err + ) + ) + continue + + is_text_file = "text" in out + if not (not show_binary and not is_text_file): + the_text = open(path2, "r").read() + for replace_item, replace_with in repls.items(): + the_text = the_text.replace(replace_item, replace_with) + + stat, out, _ = run_cmd("diff -w {} -".format(path1), input_str=the_text) + if stat != 0: + print( + "===============================================================================" + ) + print(path1 + " DIFFERS (contents)") + num_differing_files += 1 + print(" " + out) + + return num_differing_files + + +############################################################################### +def _main_func(description): + ############################################################################### + case1, case2, show_binary, skip_list = parse_command_line(sys.argv, description) + + xml_normalize_fields = ["TEST_TESTID", "SRCROOT"] + repls = {} + for xml_normalize_field in xml_normalize_fields: + try: + val1 = run_cmd_no_fail( + "./xmlquery --value {}".format(xml_normalize_field), from_dir=case1 + ) + val2 = run_cmd_no_fail( + "./xmlquery --value {}".format(xml_normalize_field), from_dir=case2 + ) + if os.sep in val1: + repls[os.path.normpath(val2)] = os.path.normpath(val1) + else: + repls[val2] = val1 + except Exception: + logging.warning("Warning, failed to normalize on " + xml_normalize_field) + repls = {} + + num_differing_files = recursive_diff(case1, case2, repls, show_binary, skip_list) + logging.info(num_differing_files, "files are different") + sys.exit(0 if num_differing_files == 0 else 1) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/check_case b/CIME/Tools/check_case new file mode 100755 index 00000000000..9b061046643 --- /dev/null +++ b/CIME/Tools/check_case @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 + +""" +Script to verify that the case is ready for submission. + +Typical usage is simply: + ./check_case + +You can run this before running case.submit to: + - Ensure that all of the env xml files are in sync with the locked files + - Create namelists (thus verifying that there will be no problems with + namelist generation) + - Ensure that the build is complete + +Running this is completely optional: these checks will be done +automatically when running case.submit. However, you can run this if you +want to perform these checks without actually submitting the case. +""" + +from standard_script_setup import * + +from CIME.utils import expect +from CIME.case import Case +from CIME.locked_files import check_lockedfiles + +import argparse + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + +############################################################################### +def _main_func(description): + ############################################################################### + parse_command_line(sys.argv, description) + + with Case(read_only=False, record=True) as case: + check_lockedfiles(case) + + case.create_namelists() + + build_complete = case.get_value("BUILD_COMPLETE") + + if not build_complete: + expect(False, "Please rebuild the model interactively by calling case.build") + + logger.info("check_case OK ") + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/check_input_data b/CIME/Tools/check_input_data new file mode 100755 index 00000000000..179af50c4fd --- /dev/null +++ b/CIME/Tools/check_input_data @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 + +""" +This script determines if the required data files for your case exist on local disk in the appropriate subdirectory of +$DIN_LOC_ROOT. It automatically downloads missing data required for your simulation. + +It is recommended that users on a given system share a common $DIN_LOC_ROOT directory to avoid duplication on +disk of large amounts of input data. You may need to talk to your system administrator in order to set this up. + +This script should be run from $CASEROOT. + +To verify the presence of required data use: + ./check_input_data + +To obtain missing datasets from the input data server(s) use: + ./check_input_data --download + +This script is automatically called by the case control system, when the case is built and submitted. +So manual usage of this script is optional. +""" +from standard_script_setup import * +from CIME.case import Case + +import argparse + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "--protocol", default=None, help="The input data protocol to download data." + ) + + parser.add_argument( + "--server", + default=None, + help="The input data repository from which to download data.", + ) + + parser.add_argument( + "-i", + "--input-data-root", + default=None, + help="The root directory where input data goes,\n" + "use xmlquery DIN_LOC_ROOT to see default value.", + ) + + parser.add_argument( + "--data-list-dir", default="Buildconf", help="Where to find list of input files" + ) + + parser.add_argument( + "--download", + action="store_true", + help="Attempt to download missing input files", + ) + + parser.add_argument( + "--chksum", + action="store_true", + help="chksum inputfiles against inputdata_chksum.dat (if available)", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return ( + args.protocol, + args.server, + args.input_data_root, + args.data_list_dir, + args.download, + args.chksum, + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + ( + protocol, + address, + input_data_root, + data_list_dir, + download, + chksum, + ) = parse_command_line(sys.argv, description) + + with Case() as case: + sys.exit( + 0 + if case.check_all_input_data( + protocol=protocol, + address=address, + input_data_root=input_data_root, + data_list_dir=data_list_dir, + download=download, + chksum=chksum, + ) + else 1 + ) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/check_lockedfiles b/CIME/Tools/check_lockedfiles new file mode 100755 index 00000000000..a959b953570 --- /dev/null +++ b/CIME/Tools/check_lockedfiles @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +""" +This script compares xml files +""" + +from standard_script_setup import * +from CIME.case import Case +from CIME.locked_files import check_lockedfiles + + +def parse_command_line(args, description): + parser = argparse.ArgumentParser( + usage="""\n{0} [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# check_lockedfiles SMS\033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "--caseroot", default=os.getcwd(), help="Case directory to build" + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.caseroot + + +def _main_func(description): + caseroot = parse_command_line(sys.argv, description) + + with Case(case_root=caseroot, read_only=True) as case: + check_lockedfiles(case) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/cime_bisect b/CIME/Tools/cime_bisect new file mode 100755 index 00000000000..e427679f692 --- /dev/null +++ b/CIME/Tools/cime_bisect @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 + +""" +A script to help track down the commit that caused tests to fail. This script +can do bisections for both cime and the model that houses it, just be sure +you run this script from the root of the repo you want to bisect. + +NOTE: this tool will only work for models that use git and, for bisecting CIME, +bring in CIME via submodule or clone. +""" + +from standard_script_setup import * +from CIME.utils import expect, run_cmd_no_fail, run_cmd + +import argparse, sys, os, re + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--bad=] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Bisect ERS.f45_g37.B1850C5 which got broken in the last 4 CIME commits \033[0m + > cd + > {0} HEAD~4 ERS.f45_g37.B1850C5 + + \033[1;32m# Bisect ERS.f45_g37.B1850C5 which got broken in the last 4 MODEL commits \033[0m + > cd + > {0} HEAD~4 ERS.f45_g37.B1850C5 + + \033[1;32m# Bisect ERS.f45_g37.B1850C5 which started to DIFF in the last 4 commits \033[0m + > cd + > {0} HEAD~4 'ERS.f45_g37.B1850C5 -c -b master' + + \033[1;32m# Bisect a build error for ERS.f45_g37.B1850C5 which got broken in the last 4 commits \033[0m + > cd + > {0} HEAD~4 'ERS.f45_g37.B1850C5 --no-run' + + \033[1;32m# Bisect two different failing tests which got broken in the last 4 commits \033[0m + > cd + > {0} HEAD~4 'ERS.f45_g37.B1850C5 --no-run' 'SMS.f45_g37.F' + + +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("good", help="Name of most recent known good commit.") + + parser.add_argument( + "-B", + "--bad", + default="HEAD", + help="Name of bad commit, default is current HEAD.", + ) + + parser.add_argument( + "-a", + "--all-commits", + action="store_true", + help="Test all commits, not just merges", + ) + + parser.add_argument("-S", "--script", help="Use your own custom script instead") + + parser.add_argument( + "testargs", + nargs="*", + help="String to pass to create_test. Combine with single quotes if it includes multiple args.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + expect( + os.path.exists(".git"), + "Please run the root of a repo. Either your CIME repo or model depending on which one you want to bisect on", + ) + + return args.testargs, args.good, args.bad, args.all_commits, args.script + + +############################################################################### +def cime_bisect(testargs, good, bad, commits_to_skip, custom_script): + ############################################################################### + logger.info("####################################################") + logger.info("TESTING WITH ARGS '{}'".format(testargs)) + logger.info("####################################################") + + if os.path.exists("scripts/create_test"): + create_test = os.path.join(os.getcwd(), "scripts", "create_test") + else: + create_test = os.path.join(os.getcwd(), "cime", "scripts", "create_test") + + expect(os.path.exists(create_test), "Please run the root of a CIME repo") + + # Basic setup + run_cmd_no_fail("git bisect start") + run_cmd_no_fail("git bisect good {}".format(good), verbose=True) + run_cmd_no_fail("git bisect bad {}".format(bad), verbose=True) + if commits_to_skip: + run_cmd_no_fail("git bisect skip {}".format(" ".join(commits_to_skip))) + + # Formulate the create_test command, let create_test make the test-id, it will use + # a timestamp that will allow us to avoid collisions + thing_to_run = ( + custom_script if custom_script else "{} {}".format(create_test, testargs) + ) + bisect_cmd = "git submodule update --recursive && {}".format(thing_to_run) + + if not custom_script: + is_batch = False + try: + from CIME.XML.machines import Machines + + machine = Machines() + is_batch = machine.has_batch_system() + except: + pass + + if ( + is_batch + and "--no-run" not in testargs + and "--no-build" not in testargs + and "--no-setup" not in testargs + ): + expect( + "--wait" in testargs, + "Your create_test command likely needs --wait to work correctly with bisect", + ) + + try: + cmd = "git bisect run sh -c '{}'".format(bisect_cmd) + + output = run_cmd(cmd, verbose=True)[1] + + # Get list of potentially bad commits from output + lines = output.splitlines() + regex = re.compile(r"^([a-f0-9]{40}).*$") + bad_commits = set( + [regex.match(line).groups()[0] for line in lines if regex.match(line)] + ) + + bad_commits_filtered = bad_commits - commits_to_skip + + expect(len(bad_commits_filtered) == 1, bad_commits_filtered) + + logger.info("####################################################") + logger.info("BAD MERGE FOR ARGS '{}' IS:".format(testargs)) + logger.info("####################################################") + logger.warning( + run_cmd_no_fail("git show {}".format(bad_commits_filtered.pop())) + ) + + finally: + run_cmd_no_fail("git bisect reset && git submodule update --recursive") + + +############################################################################### +def _main_func(description): + ############################################################################### + testargs, good, bad, all_commits, custom_script = parse_command_line( + sys.argv, description + ) + + # Important: we only want to test merges + if not all_commits: + commits_we_want_to_test = run_cmd_no_fail( + "git rev-list {}..{} --merges --first-parent".format(good, bad) + ).splitlines() + all_commits_ = run_cmd_no_fail( + "git rev-list {}..{}".format(good, bad) + ).splitlines() + commits_to_skip = set(all_commits_) - set(commits_we_want_to_test) + logger.info("Skipping {} non-merge commits".format(len(commits_to_skip))) + for item in commits_to_skip: + logger.debug(item) + else: + commits_to_skip = set() + + if custom_script: + cime_bisect(custom_script, good, bad, commits_to_skip, custom_script) + else: + for set_of_test_args in testargs: + cime_bisect(set_of_test_args, good, bad, commits_to_skip, custom_script) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/code_checker b/CIME/Tools/code_checker new file mode 100755 index 00000000000..6f7510337ab --- /dev/null +++ b/CIME/Tools/code_checker @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 + +""" +Ensure that all CIME python files are free of errors +and follow the PEP8 standard. +""" + +from standard_script_setup import * + +from CIME.code_checker import check_code, expect + +import argparse, sys, os + +# pylint: disable=import-error +from shutil import which + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Check code \033[0m + > {0} + + \033[1;32m# Check code single file case.py \033[0m + \033[1;32m# Note, you do NOT have to provide the path to this file, the tool will find it \033[0m + > {0} case.py +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "-j", + "--num-procs", + type=int, + default=10, + help="The number of files to check in parallel", + ) + + parser.add_argument( + "files", + nargs="*", + help="Restrict checking to specific files. Relative name is fine.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.num_procs, args.files + + +############################################################################### +def _main_func(description): + ############################################################################### + pylint = which("pylint") + expect(pylint is not None, "pylint not found") + + num_procs, files = parse_command_line(sys.argv, description) + + results = check_code(files, num_procs=num_procs, interactive=True) + for result in results.values(): + if result != "": + sys.exit(1) + + sys.exit(0) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/compare_namelists b/CIME/Tools/compare_namelists new file mode 100755 index 00000000000..5aba49edbc0 --- /dev/null +++ b/CIME/Tools/compare_namelists @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 + +""" +Compare namelists. Should be called by an ACME test. Designed +to not be sensitive to order or whitespace. +""" + +from standard_script_setup import * +import CIME.compare_namelists +from CIME.utils import expect + +import argparse, sys, os + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [-c ] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Compare namelist files\033[0m + > {0} baseline_dir/test/namelistfile mytestarea/namelistfile -c +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("gold_file", help="Path to gold file") + + parser.add_argument("new_file", help="Path to file to compare against gold") + + parser.add_argument( + "-c", + "--case", + action="store", + dest="case", + default=None, + help="The case base id (..). Helps us normalize data.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + # Normalize case + if args.case is not None: + args.case = CIME.utils.normalize_case_id(args.case) + + return args.gold_file, args.new_file, args.case + + +############################################################################### +def _main_func(description): + ############################################################################### + gold_file, compare_file, case = parse_command_line(sys.argv, description) + + if case is None: + logging.warning( + "No case id data available, will not be able to normalize values as effectively" + ) + else: + logging.info("Using case: '{}'".format(case)) + + success, comments = CIME.compare_namelists.compare_namelist_files( + gold_file, compare_file, case + ) + expect( + success, + "Namelist diff between files {} and {}\n{}".format( + gold_file, compare_file, comments + ), + ) + + print("Files {} and {} MATCH".format(gold_file, compare_file)) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/compare_test_results b/CIME/Tools/compare_test_results new file mode 100755 index 00000000000..3b59ccd007f --- /dev/null +++ b/CIME/Tools/compare_test_results @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 + +"""Analyze results from a test root area, comparing non-BFB changes. + +Purpose is, instead of re-running tests in compare mode, which is very slow, +allow for very fast analysis of diffs. + +Outputs results for each test to stdout (one line per test); possible status +codes are: PASS, FAIL, SKIP. (A SKIP denotes a test that did not make it to the +run phase or a test for which the run phase did not pass: we skip baseline +comparisons in this case.) + +In addition, creates files named compare.log.BASELINE_NAME.TIMESTAMP in each +test directory, which contain more detailed output. Also creates +*.cprnc.out.BASELINE_NAME.TIMESTAMP files in each run directory. + +Returns a 0 exit status if all tests are bit-for-bit, and a non-zero exit status +(TESTS_FAILED_ERR_CODE) if any tests differed from the baseline. + +You may need to load modules for cprnc to work. + +""" + +from standard_script_setup import * + +from CIME.XML.machines import Machines +from CIME.compare_test_results import compare_test_results + +import argparse, sys, os + +_MACHINE = Machines() + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [-r ] [-b -c ] [-t ] [ ...] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# From most recent run, compare all changes \033[0m + > {0} + \033[1;32m# From most recent run, compare only changes for test foo and bar only \033[0m + > {0} foo bar + \033[1;32m# For an old run where you know test-id, compare only changes for test foo and bar only \033[0m + > {0} foo bar -t mytestid + \033[1;32m# From most recent run of jenkins, compare history changes for next \033[0m + > {0} -r /home/jenkins/acme/scratch/jenkins -b next + \033[1;32m# For typical CESM workflow, where baselines are named with tags \033[0m + > {0} -t TESTID -b BASELINE_TAG +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + default_compiler = _MACHINE.get_default_compiler() + scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") + default_testroot = os.path.join(scratch_root) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "-n", "--namelists-only", action="store_true", help="Only analyze namelists." + ) + + parser.add_argument( + "--hist-only", action="store_true", help="Only analyze history files." + ) + + parser.add_argument( + "-b", + "--baseline-name", + help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.", + ) + + parser.add_argument( + "--baseline-root", + help="Root of baselines. Default will use BASELINE_ROOT from the case.", + ) + + parser.add_argument( + "-c", + "--compiler", + default=default_compiler, + help="Compiler of run you want to compare", + ) + + parser.add_argument( + "-r", + "--test-root", + default=default_testroot, + help="Path to test results that are being compared", + ) + + parser.add_argument( + "-t", + "--test-id", + help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.", + ) + + parser.add_argument( + "compare_tests", + nargs="*", + help="When comparing, limit the comparison to tests matching these regex", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return ( + args.baseline_name, + args.baseline_root, + args.test_root, + args.compiler, + args.test_id, + args.compare_tests, + args.namelists_only, + args.hist_only, + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + ( + baseline_name, + baseline_root, + test_root, + compiler, + test_id, + compare_tests, + namelists_only, + hist_only, + ) = parse_command_line(sys.argv, description) + + success = compare_test_results( + baseline_name, + baseline_root, + test_root, + compiler, + test_id, + compare_tests, + namelists_only, + hist_only, + ) + sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/component_compare_baseline b/CIME/Tools/component_compare_baseline new file mode 100755 index 00000000000..2adad3b94ad --- /dev/null +++ b/CIME/Tools/component_compare_baseline @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 + +""" +Compares current component history files against baselines +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.hist_utils import compare_baseline + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Compare baselines \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) + + parser.add_argument("-b", "--baseline-dir", help="Use custom baseline dir") + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.caseroot, args.baseline_dir + + +############################################################################### +def _main_func(description): + ############################################################################### + caseroot, baseline_dir = parse_command_line(sys.argv, description) + with Case(caseroot) as case: + success, comments = compare_baseline(case, baseline_dir) + print(comments) + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/component_compare_copy b/CIME/Tools/component_compare_copy new file mode 100755 index 00000000000..28005cd6674 --- /dev/null +++ b/CIME/Tools/component_compare_copy @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 + +""" +Copy the most recent batch of hist files in a case, adding the given suffix. +This allows us to save these results if we want to run the case again. +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.hist_utils import copy_histfiles + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} suffix [] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Setup case \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("--suffix", help="Suffix to append to hist files") + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.suffix, args.caseroot + + +############################################################################### +def _main_func(description): + ############################################################################### + suffix, caseroot = parse_command_line(sys.argv, description) + with Case(caseroot) as case: + copy_histfiles(case, suffix) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/component_compare_test b/CIME/Tools/component_compare_test new file mode 100755 index 00000000000..ece602e1af4 --- /dev/null +++ b/CIME/Tools/component_compare_test @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 + +""" +Compares two component history files in the testcase directory +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.hist_utils import compare_test + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} suffix1 suffix2 [] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Setup case \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("suffix1", help="The suffix of the first set of files") + + parser.add_argument("suffix2", help="The suffix of the second set of files") + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.suffix1, args.suffix2, args.caseroot + + +############################################################################### +def _main_func(description): + ############################################################################### + suffix1, suffix2, caseroot = parse_command_line(sys.argv, description) + with Case(caseroot) as case: + success, comments = compare_test(case, suffix1, suffix2) + print(comments) + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/component_generate_baseline b/CIME/Tools/component_generate_baseline new file mode 100755 index 00000000000..ff8f39170f0 --- /dev/null +++ b/CIME/Tools/component_generate_baseline @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 + +""" +Copies current component history files into baselines +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.hist_utils import generate_baseline + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Generate baselines \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) + + parser.add_argument("-b", "--baseline-dir", help="Use custom baseline dir") + + parser.add_argument( + "-o", + "--allow-baseline-overwrite", + action="store_true", + help="By default an attempt to overwrite an existing baseline directory " + "will raise an error. Specifying this option allows " + "existing baseline directories to be silently overwritten.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.caseroot, args.baseline_dir, args.allow_baseline_overwrite + + +############################################################################### +def _main_func(description): + ############################################################################### + caseroot, baseline_dir, allow_baseline_overwrite = parse_command_line( + sys.argv, description + ) + with Case(caseroot) as case: + success, comments = generate_baseline( + case, baseline_dir, allow_baseline_overwrite + ) + print(comments) + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/scripts/Tools/concat_daily_hist.csh b/CIME/Tools/concat_daily_hist.csh similarity index 100% rename from scripts/Tools/concat_daily_hist.csh rename to CIME/Tools/concat_daily_hist.csh diff --git a/CIME/Tools/cs.status b/CIME/Tools/cs.status new file mode 100755 index 00000000000..4ab7b7e8ecd --- /dev/null +++ b/CIME/Tools/cs.status @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +""" +List test results based on TestStatus files. + +Typical usage: + ./cs.status /path/to/testroot/*.testid/TestStatus + +Returns True if no errors occured (not based on test statuses). +""" + +from standard_script_setup import * +import argparse, sys, os, logging, glob +from CIME.utils import expect +from CIME.cs_status import cs_status +from CIME import test_status + +_PERFORMANCE_PHASES = [test_status.THROUGHPUT_PHASE, test_status.MEMCOMP_PHASE] + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + parser.add_argument("paths", nargs="*", help="Paths to TestStatus files.") + + options_group = parser.add_mutually_exclusive_group() + + options_group.add_argument( + "-s", "--summary", action="store_true", help="Only show summary" + ) + + options_group.add_argument( + "-f", + "--fails-only", + action="store_true", + help="Only show non-PASSes (this includes PENDs as well as FAILs)", + ) + + parser.add_argument( + "-c", + "--count-fails", + action="append", + default=[], + metavar="PHASE", + help="For this phase, do not give line-by-line output; instead, just report\n" + "the total number of tests that have not PASSed this phase\n" + "(this includes PENDs as well as FAILs).\n" + "This is typically used with the --fails-only option,\n" + "but it can also be used without that option.\n" + "(However, it cannot be used with the --summary option.)\n" + "(Can be specified multiple times.)", + ) + + performance_fails_equivalent = " ".join( + ["--count-fails {}".format(phase) for phase in _PERFORMANCE_PHASES] + ) + parser.add_argument( + "-p", + "--count-performance-fails", + action="store_true", + help="For phases that involve performance comparisons with baseline:\n" + "Do not give line-by-line output; instead, just report the total number\n" + "of tests that have not PASSed this phase.\n" + "(This can be useful because these performance comparisons can be\n" + "subject to machine variability.)\n" + "This is equivalent to specifying:\n" + "{}".format(performance_fails_equivalent), + ) + + parser.add_argument( + "--check-throughput", + action="store_true", + help="Fail if throughput check fails (fail if tests slow down)", + ) + + parser.add_argument( + "--check-memory", + action="store_true", + help="Fail if memory check fails (fail if tests footprint grows)", + ) + + parser.add_argument( + "-x", + "--expected-fails-file", + help="Path to XML file listing expected failures for this test suite", + ) + + parser.add_argument( + "-t", + "--test-id", + action="append", + default=[], + help="Include all tests with this test id.\n" + "(Can be specified multiple times.)", + ) + + parser.add_argument( + "-r", + "--test-root", + default=os.getcwd(), + help="Test root used when --test-id is given", + ) + + parser.add_argument( + "--force-rebuild", + action="store_true", + help="When used with 'test-id', the" + "tests will have their 'BUILD_SHAREDLIB' phase reset to 'PEND'.", + ) + + args = parser.parse_args(args[1:]) + + _validate_args(args) + + if args.count_performance_fails: + args.count_fails.extend(_PERFORMANCE_PHASES) + + return ( + args.paths, + args.summary, + args.fails_only, + args.count_fails, + args.check_throughput, + args.check_memory, + args.expected_fails_file, + args.test_id, + args.test_root, + args.force_rebuild, + ) + + +def _validate_args(args): + if args.force_rebuild: + expect( + args.test_id != [], + "Cannot force a rebuild without 'test-id'", + ) + + expect( + not (args.summary and args.count_fails), + "--count-fails cannot be specified with --summary", + ) + expect( + not (args.summary and args.count_performance_fails), + "--count-performance-fails cannot be specified with --summary", + ) + _validate_phases(args.count_fails, "--count-fails") + + +def _validate_phases(list_of_phases, arg_name): + for phase in list_of_phases: + expect( + phase in test_status.ALL_PHASES, + "Phase {} specified with {} argument is not a valid TestStatus phase".format( + phase, arg_name + ), + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + ( + test_paths, + summary, + fails_only, + count_fails, + check_throughput, + check_memory, + expected_fails_file, + test_ids, + test_root, + force_rebuild, + ) = parse_command_line(sys.argv, description) + for test_id in test_ids: + test_paths.extend( + glob.glob(os.path.join(test_root, "*%s/TestStatus" % test_id)) + ) + + cs_status( + test_paths=test_paths, + summary=summary, + fails_only=fails_only, + count_fails_phase_list=count_fails, + check_throughput=check_throughput, + check_memory=check_memory, + expected_fails_filepath=expected_fails_file, + force_rebuild=force_rebuild, + ) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/e3sm_check_env b/CIME/Tools/e3sm_check_env new file mode 100755 index 00000000000..b1756c9cfd3 --- /dev/null +++ b/CIME/Tools/e3sm_check_env @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 + +""" +A script to verify that the environment is compliant with E3SM's software requirements. + +Be sure to source your env_mach_specific file before running this check. +""" + +from standard_script_setup import * +from CIME.utils import run_cmd + +import sys, os, argparse + +# Here's where we keep the various reports and instructions. +LOG = [] + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--verbose] +OR +{0} --help +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + +############################################################################### +def check_sh(): + ############################################################################### + stat = run_cmd("sh --version")[0] + if stat != 0: + LOG.append("* sh appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") + + +############################################################################### +def check_csh(): # Can't believe I'm actually checking for csh. -JNJ + ############################################################################### + stat = run_cmd("csh --version")[0] + if stat != 0: # Also tolerates tcsh + LOG.append("* csh appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") + + +############################################################################### +def check_perl_module(module_name): + ############################################################################### + stat = run_cmd('perl -e "require {};"'.format(module_name)[0]) + if stat != 0: + LOG.append( + "* E3SM requires the Perl module {}, but it is not available.".format( + module_name + ) + ) + LOG.append(" Please make sure that it exists in your @INC.") + + +############################################################################### +def check_perl(): + ############################################################################### + # First, make sure we have the right version of Perl. + e3sm_perl_major_version = 5 + e3sm_perl_minor_version = 16 + + stat, output, _ = run_cmd("perl -e 'print $^V;'") + if stat != 0: + LOG.append("* Perl appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") + return + + output = output[1:] # get rid of leading 'v' character + major_version, minor_version, _ = [int(item) for item in output.split(".")] + if ( + major_version != e3sm_perl_major_version + or minor_version < e3sm_perl_minor_version + ): + LOG.append( + "* E3SM requires Perl version {:d}.{:d}+. You appear to be using {:d}.{:d}.".format( + e3sm_perl_major_version, + e3sm_perl_minor_version, + major_version, + minor_version, + ) + ) + LOG.append( + " Please check to see whether an appropriate version exists on this machine," + ) + LOG.append(" possibly via a loadable module.") + + # Okay, our version is good. What about all those pesky modules? + check_perl_module("XML::LibXML") + check_perl_module("XML::SAX") + check_perl_module("XML::SAX::Exception") + check_perl_module("Switch") + + +############################################################################### +def check_git(): + ############################################################################### + e3sm_git_major_version = 2 + e3sm_git_minor_version = 0 + + stat, output, _ = run_cmd("git --version") + if stat != 0: + LOG.append("* Git appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") + return + + version = output.split()[-1] + num_dots = version.count(".") + if num_dots == 1: + major_version, minor_version = [int(s) for s in version.split(".")] + elif num_dots == 2: + major_version, minor_version, _ = [int(s) for s in version.split(".")] + else: + LOG.append('* Unparseable git version string: "{}"'.format(output)) + return + + if ( + major_version != e3sm_git_major_version + or minor_version < e3sm_git_minor_version + ): + LOG.append( + "* E3SM requires Git version {:d}.{:d}+. You appear to be using version {:d}.{:d}.".format( + e3sm_git_major_version, + e3sm_git_minor_version, + major_version, + minor_version, + ) + ) + + +############################################################################### +def check_svn(): + ############################################################################### + e3sm_svn_major_version = 1 + e3sm_svn_minor_version = 4 + e3sm_svn_patch_version = 2 + + stat, output, _ = run_cmd("svn --version --quiet") + if stat != 0: + LOG.append("* Subversion appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") + return + + major_version, minor_version, patch_version = [int(s) for s in output.split(".")] + if ( + major_version < e3sm_svn_major_version + or minor_version < e3sm_svn_minor_version + or patch_version < e3sm_svn_patch_version + ): + LOG.append( + "* E3SM requires Subversion version {:d}.{:d}.{:d}+. You appear to be using version {:d}.{:d}.{:d}.".format( + e3sm_svn_major_version, + e3sm_svn_minor_version, + e3sm_svn_patch_version, + major_version, + minor_version, + patch_version, + ) + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + parse_command_line(sys.argv, description) + + check_sh() + check_csh() + check_perl() + check_git() + check_svn() + + if len(LOG) > 0: + print("e3sm_check_env found problems with your E3SM development environment:\n") + for line in LOG: + print(line) + + sys.exit(1) + else: + print( + "e3sm_check_env found no problems with your E3SM development environment." + ) + sys.exit(0) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/generate_cylc_workflow.py b/CIME/Tools/generate_cylc_workflow.py new file mode 100755 index 00000000000..cac503b342b --- /dev/null +++ b/CIME/Tools/generate_cylc_workflow.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 + +""" +Generates a cylc workflow file for the case. See https://cylc.github.io for details about cylc +""" +import os +import sys + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) + +from CIME.Tools.standard_script_setup import * + +from CIME.case import Case +from CIME.utils import expect, transform_vars + +import argparse, re + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory for which namelists are generated.\n" + "Default is current directory.", + ) + + parser.add_argument( + "--cycles", default=1, help="The number of cycles to run, default is RESUBMIT" + ) + + parser.add_argument( + "--ensemble", + default=1, + help="generate suite.rc for an ensemble of cases, the case name argument must end in an integer.\n" + "for example: ./generate_cylc_workflow.py --ensemble 4 \n" + "will generate a workflow file in the current case, if that case is named case.01," + "the workflow will include case.01, case.02, case.03 and case.04", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.caseroot, args.cycles, int(args.ensemble) + + +def cylc_get_ensemble_first_and_last(case, ensemble): + if ensemble == 1: + return 1, None + casename = case.get_value("CASE") + m = re.search(r"(.*[^\d])(\d+)$", casename) + minval = int(m.group(2)) + maxval = minval + ensemble - 1 + return minval, maxval + + +def cylc_get_case_path_string(case, ensemble): + caseroot = case.get_value("CASEROOT") + casename = case.get_value("CASE") + if ensemble == 1: + return "{};".format(caseroot) + basepath = os.path.abspath(caseroot + "/..") + m = re.search(r"(.*[^\d])(\d+)$", casename) + + expect(m, "casename {} must end in an integer for ensemble method".format(casename)) + + return ( + '{basepath}/{basename}$(printf "%0{intlen}d"'.format( + basepath=basepath, basename=m.group(1), intlen=len(m.group(2)) + ) + + " ${CYLC_TASK_PARAM_member});" + ) + + +def cylc_batch_job_template(job, jobname, case, ensemble): + + env_batch = case.get_env("batch") + batch_system_type = env_batch.get_batch_system_type() + batchsubmit = env_batch.get_value("batch_submit") + submit_args = env_batch.get_submit_args(case, job) + case_path_string = cylc_get_case_path_string(case, ensemble) + + return ( + """ + [[{jobname}]] + script = cd {case_path_string} ./case.submit --job {job} + [[[job]]] + batch system = {batch_system_type} + batch submit command template = {batchsubmit} {submit_args} '%(job)s' + [[[directives]]] +""".format( + jobname=jobname, + job=job, + case_path_string=case_path_string, + batch_system_type=batch_system_type, + batchsubmit=batchsubmit, + submit_args=submit_args, + ) + + "{{ batchdirectives }}\n" + ) + + +def cylc_script_job_template(job, case, ensemble): + case_path_string = cylc_get_case_path_string(case, ensemble) + return """ + [[{job}]] + script = cd {case_path_string} ./case.submit --job {job} +""".format( + job=job, case_path_string=case_path_string + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + caseroot, cycles, ensemble = parse_command_line(sys.argv, description) + + expect( + os.path.isfile(os.path.join(caseroot, "CaseStatus")), + "case.setup must be run prior to running {}".format(__file__), + ) + with Case(caseroot, read_only=True) as case: + if cycles == 1: + cycles = max(1, case.get_value("RESUBMIT")) + env_batch = case.get_env("batch") + env_workflow = case.get_env("workflow") + jobs = env_workflow.get_jobs() + casename = case.get_value("CASE") + input_template = os.path.join( + case.get_value("MACHDIR"), "cylc_suite.rc.template" + ) + + overrides = {"cycles": cycles, "casename": casename} + input_text = open(input_template).read() + + first, last = cylc_get_ensemble_first_and_last(case, ensemble) + if ensemble == 1: + overrides.update({"members": "{}".format(first)}) + overrides.update( + {"workflow_description": "case {}".format(case.get_value("CASE"))} + ) + else: + overrides.update({"members": "{}..{}".format(first, last)}) + firstcase = case.get_value("CASE") + intlen = len(str(last)) + lastcase = firstcase[:-intlen] + str(last) + overrides.update( + { + "workflow_description": "ensemble from {} to {}".format( + firstcase, lastcase + ) + } + ) + overrides.update( + {"case_path_string": cylc_get_case_path_string(case, ensemble)} + ) + + for job in jobs: + jobname = job + if job == "case.st_archive": + continue + if job == "case.run": + jobname = "run" + overrides.update(env_batch.get_job_overrides(job, case)) + overrides.update({"job_id": "run." + casename}) + input_text = input_text + cylc_batch_job_template( + job, jobname, case, ensemble + ) + else: + depends_on = env_workflow.get_value("dependency", subgroup=job) + if depends_on.startswith("case."): + depends_on = depends_on[5:] + input_text = input_text.replace( + " => " + depends_on, " => " + depends_on + " => " + job + ) + + overrides.update(env_batch.get_job_overrides(job, case)) + overrides.update({"job_id": job + "." + casename}) + if "total_tasks" in overrides and overrides["total_tasks"] > 1: + input_text = input_text + cylc_batch_job_template( + job, jobname, case, ensemble + ) + else: + input_text = input_text + cylc_script_job_template( + jobname, case, ensemble + ) + + overrides.update( + { + "batchdirectives": env_batch.get_batch_directives( + case, job, overrides=overrides, output_format="cylc" + ) + } + ) + # we need to re-transform for each job to get job size correctly + input_text = transform_vars( + input_text, case=case, subgroup=job, overrides=overrides + ) + + with open("suite.rc", "w") as f: + f.write(case.get_resolved_value(input_text)) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/getTiming b/CIME/Tools/getTiming new file mode 100755 index 00000000000..3f16847385a --- /dev/null +++ b/CIME/Tools/getTiming @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +""" +Get timing information from run +""" + +from standard_script_setup import * +import argparse, sys, os +from CIME.case import Case +from CIME.get_timing import get_timing + + +def parse_command_line(args, description): + parser = argparse.ArgumentParser( + usage="\n%s [-lid|--lid] [-h|--help]" % os.path.basename(args[0]), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "-lid", + "--lid", + help="print using yymmdd-hhmmss format", + default="999999-999999", + ) + parser.add_argument( + "--caseroot", default=os.getcwd(), help="Case directory to get timing for" + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + return args.caseroot, args.lid + + +def __main_func(description): + """ + Reads timing information from $CASEROOT/timing/$MODEL_timing_stats.$lid and + outputs to $CASEROOT/timing/$MODEL_timing.$CASE.$lid + """ + caseroot, lid = parse_command_line(sys.argv, description) + with Case(caseroot, read_only=True) as case: + get_timing(case, lid) + + +if __name__ == "__main__": + __main_func(__doc__) diff --git a/CIME/Tools/get_case_env b/CIME/Tools/get_case_env new file mode 100755 index 00000000000..7e7db4b393a --- /dev/null +++ b/CIME/Tools/get_case_env @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 + +""" +Dump what the CIME environment would be for a case. + +Only supports E3SM for now. +""" +from standard_script_setup import * +from CIME.XML.machines import Machines +from CIME.test_scheduler import TestScheduler +from CIME.utils import parse_test_name, expect, get_src_root +from CIME.config import Config +import CIME.get_tests + +import argparse, tempfile, shutil + +############################################################################### +def parse_command_line(raw_args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""{0} [-c ] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Get the default CIME env \033[0m + > ./{0} + \033[1;32m# Get the default CIME env and load it into your current shell env \033[0m + > eval $(./{0}) + \033[1;32m# Get the CIME env for a different machine or compiler \033[0m + > ./{0} -c SMS.f09_g16.X.mach_compiler + \033[1;32m# Get the CIME env for a different mpi (serial in this case) \033[0m + > ./{0} -c SMS_Mmpi-serial.f09_g16.X + \033[1;32m# Same as above but also load it into current shell env \033[0m + > eval $(./{0} -c SMS_Mmpi-serial.f09_g16.X) + +""".format( + os.path.basename(raw_args[0]) + ), + description=description, + formatter_class=argparse.RawTextHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "-c", + "--case", + default="SMS.f09_g16.X", + help="The case for which you want the env. Default=%(default)s", + ) + + raw_args.append("--silent") + args = CIME.utils.parse_args_and_handle_standard_logging_options(raw_args, parser) + + return args.case + + +############################################################################### +def _main_func(description): + ############################################################################### + casename = parse_command_line(sys.argv, description) + + customize_path = os.path.join(get_src_root(), "cime_config", "customize") + config = Config.load(customize_path) + + machine, compiler = CIME.get_tests.infer_arch_from_tests([casename]) + expect(len(compiler) <= 1, "How did we get multiple compilers from one test case?") + compiler = compiler[0] if len(compiler) == 1 else None + mach_obj = Machines(machine=machine) + compiler = mach_obj.get_default_compiler() if compiler is None else compiler + full_test_name = CIME.get_tests.get_full_test_names( + [casename], mach_obj.get_machine_name(), compiler + )[0] + + output_root = tempfile.mkdtemp() + shell_env = None + + try: + impl = TestScheduler( + [full_test_name], + no_build=True, + machine_name=machine, + compiler=compiler, + output_root=output_root, + ) + success = impl.run_tests() + test_dir = impl._get_test_dir( + full_test_name + ) # pylint: disable=protected-access + shell_exe = os.path.split(os.environ["SHELL"])[-1] + suffix = ".sh" if shell_exe in ["bash", "sh"] else ".csh" + file_to_read = os.path.join(test_dir, ".env_mach_specific{}".format(suffix)) + expect(success, "Failed to create case {}".format(full_test_name)) + + shell_envs = [] + with open(file_to_read, "r") as fd: + for line in fd.readlines(): + if not line.startswith("#") and line.strip() != "": + shell_envs.append(line.strip()) + + shell_env = " && ".join(shell_envs) + + except BaseException: + success = False + errs = str(sys.exc_info()[1]) + finally: + shutil.rmtree(output_root) + + if success: + expect(shell_env is not None, "Bad shell_env state") + print(shell_env) + else: + print(errs) # , file=sys.stderr) + sys.exit(1) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/get_standard_makefile_args b/CIME/Tools/get_standard_makefile_args new file mode 100755 index 00000000000..5357c09cc1a --- /dev/null +++ b/CIME/Tools/get_standard_makefile_args @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 + +""" +Output the list of standard makefile args to the command line. This script +should only be used when the components buildlib is not written in python +""" + +from standard_script_setup import * + +from CIME.build import get_standard_makefile_args +from CIME.case import Case +from CIME.test_status import * + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to build.\n" "Default is current directory.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.caseroot + + +############################################################################### +def _main_func(description): + ############################################################################### + caseroot = parse_command_line(sys.argv, description) + + success = True + with Case(caseroot) as case: + print(get_standard_makefile_args(case)) + + sys.exit(0 if success else 1) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/jenkins_generic_job b/CIME/Tools/jenkins_generic_job new file mode 100755 index 00000000000..b02a5b69199 --- /dev/null +++ b/CIME/Tools/jenkins_generic_job @@ -0,0 +1,358 @@ +#!/usr/bin/env python3 + +""" +Jenkins runs this script to perform a test of an e3sm +test suite. Essentially, a wrapper around create_test and +wait_for_tests that handles cleanup of old test results and +ensures that the batch system is left in a clean state. +""" + +from standard_script_setup import * + +import CIME.wait_for_tests +from CIME.utils import expect +from CIME.XML.machines import Machines +from CIME.jenkins_generic_job import jenkins_generic_job + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [-g] [-d] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run the tests and compare baselines \033[0m + > {0} + \033[1;32m# Run the tests, compare baselines, and update dashboard \033[0m + > {0} -d + \033[1;32m# Run the tests, generating a full set of baselines (useful for first run on a machine) \033[0m + > {0} -g +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + default_baseline = CIME.utils.get_current_branch(repo=CIME.utils.get_src_root()) + if default_baseline is not None: + default_baseline = default_baseline.replace(".", "_").replace( + "/", "_" + ) # Dots or slashes will mess things up + + parser.add_argument( + "-g", "--generate-baselines", action="store_true", help="Generate baselines" + ) + + parser.add_argument( + "--baseline-compare", + action="store_true", + help="Do baseline comparisons. Off by default.", + ) + + parser.add_argument( + "--submit-to-cdash", action="store_true", help="Send results to CDash" + ) + + parser.add_argument( + "-n", + "--no-submit", + action="store_true", + help="Force us to not send results to CDash, overrides --submit-to-cdash. Useful for CI", + ) + + parser.add_argument( + "--update-success", + action="store_true", + help="Record test success in baselines. Only the nightly process should use this in general.", + ) + + parser.add_argument( + "--no-update-success", + action="store_true", + help="For us to not record test success in baselines, overrides --update-success. Useful for CI.", + ) + + parser.add_argument( + "--no-batch", + action="store_true", + help="Do not use batch system even if on batch machine", + ) + + parser.add_argument( + "-c", + "--cdash-build-name", + help="Build name to use for CDash submission. Default will be __", + ) + + parser.add_argument( + "-p", + "--cdash-project", + default=CIME.wait_for_tests.E3SM_MAIN_CDASH, + help="The name of the CDash project where results should be uploaded", + ) + + parser.add_argument( + "-b", + "--baseline-name", + default=default_baseline, + help="Baseline name for baselines to use. Also impacts dashboard job name. Useful for testing a branch other than next or master", + ) + + parser.add_argument( + "-B", + "--baseline-root", + help="Baseline area for baselines to use. Default will be config_machine value for machine", + ) + + parser.add_argument( + "-O", + "--override-baseline-name", + help="Force comparison with these baseines without impacting dashboard or test-id.", + ) + + parser.add_argument( + "-t", "--test-suite", help="Override default e3sm test suite that will be run" + ) + + parser.add_argument( + "-r", + "--scratch-root", + help="Override default e3sm scratch root. Use this to avoid conflicting with other jenkins jobs", + ) + + parser.add_argument( + "--cdash-build-group", + default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, + help="The build group to be used to display results on the CDash dashboard.", + ) + + parser.add_argument( + "-j", + "--parallel-jobs", + type=int, + default=None, + help="Number of tasks create_test should perform simultaneously. Default " + "will be min(num_cores, num_tests).", + ) + + parser.add_argument("--walltime", help="Force a specific walltime for all tests.") + + parser.add_argument( + "-m", + "--machine", + help="The machine for which to build tests, this machine must be defined" + " in the config_machines.xml file for the given model. " + "Default is to match the name of the machine in the test name or " + "the name of the machine this script is run on to the " + "NODENAME_REGEX field in config_machines.xml. This option is highly " + "unsafe and should only be used if you know what you're doing.", + ) + + parser.add_argument( + "--compiler", + help="Compiler to use to build cime. Default will be the default defined for the machine.", + ) + + parser.add_argument( + "-q", "--queue", help="Force create_test to use a specific queue." + ) + + parser.add_argument( + "--check-throughput", + action="store_true", + help="Fail if throughput check fails (fail if tests slow down)", + ) + + parser.add_argument( + "--check-memory", + action="store_true", + help="Fail if memory check fails (fail if tests footprint grows)", + ) + + parser.add_argument( + "--ignore-memleak", + action="store_true", + help="Do not fail if there are memleaks", + ) + + parser.add_argument( + "--ignore-namelists", + action="store_true", + help="Do not fail if there are namelist diffs", + ) + + parser.add_argument( + "--ignore-diffs", + action="store_true", + help="Do not fail if there are history diffs", + ) + + parser.add_argument( + "--save-timing", + action="store_true", + help="Tell create_test to save timings of tests", + ) + + parser.add_argument( + "--pes-file", + help="Full pathname of an optional pes specification file. The file" + "\ncan follow either the config_pes.xml or the env_mach_pes.xml format.", + ) + + parser.add_argument( + "--jenkins-id", + help="Specify an 'id' for the Jenkins jobs.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + if args.no_submit: + args.submit_to_cdash = False + + if args.no_update_success: + args.update_success = False + + expect( + not (args.submit_to_cdash and args.generate_baselines), + "Does not make sense to use --generate-baselines and --submit-to-cdash together", + ) + expect( + not (args.cdash_build_name is not None and not args.submit_to_cdash), + "Does not make sense to use --cdash-build-name without --submit-to-cdash", + ) + expect( + not ( + args.cdash_project is not CIME.wait_for_tests.E3SM_MAIN_CDASH + and not args.submit_to_cdash + ), + "Does not make sense to use --cdash-project without --submit-to-cdash", + ) + + machine = Machines(machine=args.machine) + + args.machine = machine + args.test_suite = ( + machine.get_value("TESTS") if args.test_suite is None else args.test_suite + ) + args.scratch_root = ( + machine.get_value("CIME_OUTPUT_ROOT") + if args.scratch_root is None + else args.scratch_root + ) + args.compiler = ( + machine.get_default_compiler() if args.compiler is None else args.compiler + ) + + expect( + args.baseline_name is not None, + "Failed to probe baseline_name from git branch, please provide one. It is essential for formulating the test-id even if baseline comparisons are not being done", + ) + + if args.override_baseline_name is None: + args.override_baseline_name = args.baseline_name + + return ( + args.generate_baselines, + args.submit_to_cdash, + args.no_batch, + args.baseline_name, + args.cdash_build_name, + args.cdash_project, + args.test_suite, + args.cdash_build_group, + args.baseline_compare, + args.scratch_root, + args.parallel_jobs, + args.walltime, + args.machine, + args.compiler, + args.override_baseline_name, + args.baseline_root, + args.update_success, + args.check_throughput, + args.check_memory, + args.ignore_memleak, + args.ignore_namelists, + args.ignore_diffs, + args.save_timing, + args.pes_file, + args.jenkins_id, + args.queue, + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + ( + generate_baselines, + submit_to_cdash, + no_batch, + baseline_name, + cdash_build_name, + cdash_project, + test_suite, + cdash_build_group, + baseline_compare, + scratch_root, + parallel_jobs, + walltime, + machine, + compiler, + real_baseline_name, + baseline_root, + update_success, + check_throughput, + check_memory, + ignore_memleak, + ignore_namelists, + ignore_diffs, + save_timing, + pes_file, + jenkins_id, + queue, + ) = parse_command_line(sys.argv, description) + + sys.exit( + 0 + if jenkins_generic_job( + generate_baselines, + submit_to_cdash, + no_batch, + baseline_name, + cdash_build_name, + cdash_project, + test_suite, + cdash_build_group, + baseline_compare, + scratch_root, + parallel_jobs, + walltime, + machine, + compiler, + real_baseline_name, + baseline_root, + update_success, + check_throughput, + check_memory, + ignore_memleak, + ignore_namelists, + ignore_diffs, + save_timing, + pes_file, + jenkins_id, + queue, + ) + else CIME.utils.TESTS_FAILED_ERR_CODE + ) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/list_e3sm_tests b/CIME/Tools/list_e3sm_tests new file mode 100755 index 00000000000..58bb436da76 --- /dev/null +++ b/CIME/Tools/list_e3sm_tests @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 + +""" +List e3sm test suites. Can be used to show what's being tested. Can just +list tested grids, compsets, etc. +""" + +from standard_script_setup import * +from CIME import utils +from CIME import get_tests +from CIME.XML.files import Files +from CIME.XML.compsets import Compsets + +import argparse +import logging + + +logger = logging.getLogger(__name__) + + +def parse_command_line(): + description = """This tool will print all test suite names. + +If any test suite names are provided, then all `term` values for the tests in the suites will be listed. + +Examples +-------- +>>> %(prog)s +e3sm_developer +cime_tiny + +>>> %(prog)s e3sm_developer +ERS.f19_g16_rx1.A.docker_gnu +NCK.f19_g16_rx1.A.docker_gnu + +>>> %(prog)s -t compsets e3sm_developer +A +F2010 +I1850ELM + +>>> %(prog)s -t grids e3sm_developer +f19_g16_rx1""" + + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter + ) + + parser.add_argument( + "suites", + nargs="*", + help="The test suites to list.", + ) + + parser.add_argument( + "-t", + "--term", + choices=("compsets", "grids", "testcases", "tests"), + default="tests", + help="Term of the test name to print.", + ) + + compsets_group = parser.add_argument_group("Compset options") + + compsets_group.add_argument( + "-l", "--long", action="store_true", help="Prints the longname of compsets." + ) + + utils.setup_standard_logging_options(parser) + + kwargs = vars(parser.parse_args()) + + utils.configure_logging(**kwargs) + + return kwargs + + +def list_tests(term, suites, long, **_): + values = [x for s in suites for x in get_tests.get_test_suite(s)] + + if term != "tests": + terms = [utils.parse_test_name(x) for x in values] + + index_map = {"compsets": 3, "grids": 2, "testcases": 0} + + index = index_map[term] + + values = set(x[index] for x in terms) + + if long and term == "compsets": + compset_longnames = get_compset_longnames() + + values = set(compset_longnames[x] for x in values) + + print("\n".join(sorted(values))) + + logger.info(f"Found {len(values)!r} {term}") + + +def get_compset_longnames(): + files = Files() + + names = files.get_components("COMPSETS_SPEC_FILE") + + values = {} + + for n in names: + comp_file = files.get_value("COMPSETS_SPEC_FILE", attribute={"component": n}) + + values.update({x for x in Compsets(comp_file)}) + + return values + + +def _main_func(): + args = parse_command_line() + + if len(args["suites"]) == 0: + test_suites = sorted(get_tests.get_test_suites()) + + for suite in test_suites: + print(suite) + + logger.info(f"Found {len(test_suites)!r} test suites") + else: + list_tests(**args) + + +if __name__ == "__main__": + _main_func() diff --git a/scripts/Tools/mkDepends b/CIME/Tools/mkDepends similarity index 99% rename from scripts/Tools/mkDepends rename to CIME/Tools/mkDepends index cd7542732b0..c8bb73bf280 100755 --- a/scripts/Tools/mkDepends +++ b/CIME/Tools/mkDepends @@ -346,7 +346,7 @@ foreach $f (keys %file_includes) { } # Expand - for ($i = 0; $i <= $#expand_incs; ++$i) { + foreach $i (0..$#expand_incs) { push @expand_incs, @{ $include_depends{$expand_incs[$i]} }; } diff --git a/CIME/Tools/mkSrcfiles b/CIME/Tools/mkSrcfiles new file mode 100755 index 00000000000..7f061bf6b5b --- /dev/null +++ b/CIME/Tools/mkSrcfiles @@ -0,0 +1,131 @@ +#!/usr/bin/env perl + +# Make list of files containing source code. The source list contains all +# .F90, .f90, .F, .f, .c and .cpp files in a specified list of directories. +# The directories are specified one per line in a file called Filepath which +# this script tries to open in the current directory. The current +# directory is prepended to the specified list of directories. If Filepath +# doesn't exist then only the source files in the current directory are +# listed. If directories contain file .exclude then source files listed +# in that file are omitted from the source list. +# The list of source files is written to the file Srcfiles. + +# Check usage: +@ARGV == 0 or usage(); + +if ( open(FILEPATH,"< Filepath") ) { + @paths = ; + close( FILEPATH ); +} else { + @paths = (); +} +chomp @paths; +unshift(@paths, '.'); + +my $foundExclude = 0; +foreach $dir (@paths) { # (could check that directories exist here) + + if ( (-e "$dir/.exclude") && (open(EXCLUDE,"$dir/.exclude") ) ) { + # Flag .exclude file as found and add content to excludes array + $foundExclude = 1; + print "Found .exclude file in $dir\n"; + foreach $exclude () { + push(@excludes, ("$dir/$exclude")); + } + close( EXCLUDE ); + } + + $dir =~ s!/?\s*$!!; # remove / and any whitespace at end of directory name + ($dir) = glob $dir; # Expand tildes in path names. +} + +if ($foundExclude) { + print "List of files in .exclude files:\n @excludes\n\n"; +} + +# Loop through the directories and add each filename as a hash key. This +# automatically eliminates redundancies. Ignore files found in excludes array. +%src = (); +my $skip_prefix = $ENV{mkSrcfiles_skip_prefix}; + +foreach $dir (@paths) { + @filenames = (glob("$dir/*.[Ffc]"), glob("$dir/*.[Ff]90"), glob("$dir/*.cpp")); + foreach $filename (@filenames) { + if ($foundExclude) { + if ( grep { /$filename/ } @excludes ) { + print "WARNING: Skipping file $filename (Source files in .exclude are ignored)\n"; + next; + } + } + $filename =~ s!.*/!!; # remove part before last slash + if (defined $skip_prefix){ + if ($filename =~ /^${skip_prefix}/){ + print "WARNING: Skipping file $dir/$filename (Source files beginning in $skip_prefix are ignored\n)"; + next; + } + } + $src{$filename} = 1; + } + + # No exclusion func for templates + @templates = glob("$dir/*.F90.in"); + foreach $filename (@templates) { + $filename =~ s!.*/!!; # remove part before last slash + my $dfile = $filename; + $dfile =~ s/\.in//; + delete $src{$dfile} if(defined $src{$dfile}); + $src{$filename} = 1; + } +} +@excludes = (); + +my @srcfiles; +my $foundcnt=0; +my $writenew=1; +if(-e "Srcfiles"){ # file already exists, do not update if no changes are required + open(SRC,"Srcfiles"); + @srcfiles = ; + close(SRC); + $writenew=0; + foreach $file (@srcfiles){ + chomp $file; + if($src{$file}){ + $src{$file}=0; + }else{ + $writenew=1; # A srcfile was removed + last; + } + + } + foreach $file (keys %src){ + if($src{$file} == 1){ + $writenew=1; # A srcfile was added + last; + } + } +} + +if($writenew==1){ + open(SRC,"> Srcfiles") or die "Can't open Srcfiles\n"; + + foreach $file ( sort keys %src ) { + print SRC "$file\n"; + } + + close( SRC ); +} +#-------------------------------------------------------------------------------------- + +sub usage { + ($ProgName = $0) =~ s!.*/!!; # name of program + die < {0} case1 case2 +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("case1", help="First case. This one will be changed") + + parser.add_argument("case2", help="Second case. This one will not be changed") + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.case1, args.case2 + + +############################################################################### +def normalize_cases(case1, case2): + ############################################################################### + # gunzip all logs + for case_dir in [case1, case2]: + for log_dir in ["bld", "run"]: + gzips = glob.glob(os.path.join(case_dir, log_dir, "*.gz")) + if gzips: + run_cmd_no_fail("gunzip -f {}".format(" ".join(gzips))) + + # Change case1 to be as if it had same test-id as case2 + test_id1 = run_cmd_no_fail("./xmlquery --value TEST_TESTID", from_dir=case1) + test_id2 = run_cmd_no_fail("./xmlquery --value TEST_TESTID", from_dir=case2) + run_cmd_no_fail( + "for item in $(find -type f); do sed -i 's/{}/{}/g' $item; done".format( + test_id1, test_id2 + ), + from_dir=case1, + ) + + # Change case1 to look as if it is was built/run at exact same time as case2 + for log_dir in ["bld", "run"]: + case1_lids = set() + for logfile in glob.glob("{}/{}/*.bldlog.*".format(case1, log_dir)): + case1_lids.add(logfile.split(".")[-1]) + + case2_lids = set() + for logfile in glob.glob("{}/{}/*.bldlog.*".format(case2, log_dir)): + case2_lids.add(logfile.split(".")[-1]) + + case1_lids = list(sorted(case1_lids)) + case2_lids = list(sorted(case2_lids)) + + for case1_lid, case2_lid in zip(case1_lids, case2_lids): + run_cmd_no_fail( + "for item in $(find -type f); do sed -i 's/{}/{}/g' $item; done".format( + case1_lid, case2_lid + ), + from_dir=case1, + ) + + for case1_lid, case2_lid in zip(case1_lids, case2_lids): + files_needing_rename = run_cmd_no_fail( + 'find -depth -name "*.{}"'.format(case1_lid), from_dir=case1 + ).splitlines() + for file_needing_rename in files_needing_rename: + expect(file_needing_rename.endswith(case1_lid), "broken") + new_name = file_needing_rename.rstrip(case1_lid) + case2_lid + os.rename( + os.path.join(case1, file_needing_rename), + os.path.join(case1, new_name), + ) + + # Normalize CIMEROOT + case1_root = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=case1) + case2_root = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=case2) + if case1_root != case2_root: + run_cmd_no_fail( + "for item in $(find -type f); do sed -i 's:{}:{}:g' $item; done".format( + case1_root, case2_root + ), + from_dir=case1, + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + case1, case2 = parse_command_line(sys.argv, description) + + normalize_cases(case1, case2) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/pelayout b/CIME/Tools/pelayout new file mode 100755 index 00000000000..48a13a4cc91 --- /dev/null +++ b/CIME/Tools/pelayout @@ -0,0 +1,286 @@ +#!/usr/bin/env python3 + +""" +This utility allows the CIME user to view and modify a case's PE layout. +With this script, a user can: + +1) View the PE layout of a case + ./pelayout + ./pelayout --format "%C: %06T/%+H" --header "Comp: Tasks /Th" +2) Attempt to scale the number of tasks used by a case + ./pelayout --set-ntasks 144 +3) Set the number of threads used by a case + ./pelayout --set-nthrds 2 + +The --set-ntasks option attempts to scale all components so that the +job will run in the provided number of tasks. For a component using the +maximum number of tasks, this will merely set that component to the new +number. However, for components running in parallel using a portion of +the maximum tasks, --set-ntasks will attempt to scale the tasks +proportionally, changing the value of ROOTPE to maintain the same level +of parallel behavior. If the --set-ntasks algorithm is unable to +automatically find a new layout, it will print an error message +indicating the component(s) it was unable to reset and no changes will +be made to the case. + +Interpreted FORMAT sequences are: +%% a literal % +%C the component name +%T the task count for the component +%H the thread count for the component +%R the PE root for the component + +Standard format extensions, such as a field length and padding are supported. +Python dictionary-format strings are also supported. For instance, +--format "{C:4}", will print the component name padded to 4 spaces. + +If you encounter problems with this tool or find it is missing any +feature that you need, please open an issue on https://github.com/ESMCI/cime +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.utils import expect, convert_to_string +import sys +import re + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + # Start with usage description + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter + ) + CIME.utils.setup_standard_logging_options(parser) + + # Set command line options + parser.add_argument( + "--set-ntasks", default=None, help="Total number of tasks to set for the case" + ) + + parser.add_argument( + "--set-nthrds", + "--set-nthreads", + default=None, + help="Number of threads to set for all components", + ) + + parser.add_argument( + "--format", + default="%4C: %6T/%6H; %6R %6P", + help="Format the PE layout items for each component (see below)", + ) + + parser.add_argument( + "--header", + default="Comp NTASKS NTHRDS ROOTPE PSTRIDE", + help="Custom header for PE layout display", + ) + + parser.add_argument( + "--no-header", + default=False, + action="store_true", + help="Do not print any PE layout header", + ) + + parser.add_argument( + "--caseroot", default=os.getcwd(), help="Case directory to reference" + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + if args.no_header: + args.header = None + # End if + + return args.format, args.set_ntasks, args.set_nthrds, args.header, args.caseroot + + +# End def parse_command_line + + +############################################################################### +def get_value_as_string(case, var, attribute=None, resolved=False, subgroup=None): + ############################################################################### + thistype = case.get_type_info(var) + value = case.get_value( + var, attribute=attribute, resolved=resolved, subgroup=subgroup + ) + if value is not None and thistype: + value = convert_to_string(value, thistype, var) + return value + + +############################################################################### +def format_pelayout(comp, ntasks, nthreads, rootpe, pstride, arg_format): + ############################################################################### + """ + Format the PE layout information for each component, using a default format, + or using the arg_format input, if it exists. + """ + subs = {"C": comp, "T": ntasks, "H": nthreads, "R": rootpe, "P": pstride} + layout_str = re.sub(r"%([0-9]*)C", r"{C:\1}", arg_format) + layout_str = re.sub(r"%([-+0-9]*)T", r"{T:\1}", layout_str) + layout_str = re.sub(r"%([-+0-9]*)H", r"{H:\1}", layout_str) + layout_str = re.sub(r"%([-+0-9]*)R", r"{R:\1}", layout_str) + layout_str = re.sub(r"%([-+0-9]*)P", r"{P:\1}", layout_str) + layout_str = layout_str.format(**subs) + return layout_str + + +# End def format_pelayout + +############################################################################### +def print_pelayout(case, ntasks, nthreads, rootpes, pstrid, arg_format, header): + ############################################################################### + """ + Print the PE layout information for each component, using the format, + if it exists. + """ + comp_classes = case.get_values("COMP_CLASSES") + + if header is not None: + print(header) + # End if + maxthrds = -1 + for comp in comp_classes: + print( + format_pelayout( + comp, + ntasks[comp], + nthreads[comp], + rootpes[comp], + pstrid[comp], + arg_format, + ) + ) + if nthreads[comp] > maxthrds: + maxthrds = nthreads[comp] + # End for + if case.get_value("COMP_INTERFACE") == "nuopc": + eat = case.get_value("ESMF_AWARE_THREADING") + if not eat: + eat = False + print("ESMF_AWARE_THREADING is {}".format(eat)) + tasks = case.get_value("MAX_MPITASKS_PER_NODE") + if not eat: + tasks = tasks / maxthrds + + print("ROOTPE is with respect to {} tasks per node".format(tasks)) + + +# End def print_pelayout + +############################################################################### +def gather_pelayout(case): + ############################################################################### + """ + Gather the PE layout information for each component + """ + ntasks = {} + nthreads = {} + rootpes = {} + pstride = {} + comp_classes = case.get_values("COMP_CLASSES") + + for comp in comp_classes: + ntasks[comp] = int(case.get_value("NTASKS_" + comp)) + nthreads[comp] = int(case.get_value("NTHRDS_" + comp)) + rootpes[comp] = int(case.get_value("ROOTPE_" + comp)) + pstride[comp] = int(case.get_value("PSTRID_" + comp)) + # End for + return ntasks, nthreads, rootpes, pstride + + +# End def gather_pelayout + +############################################################################### +def set_nthreads(case, nthreads): + ############################################################################### + comp_classes = case.get_values("COMP_CLASSES") + + for comp in comp_classes: + case.set_value("NTHRDS", nthreads, comp) + # End for + + +# End def set_nthreads + +############################################################################### +def modify_ntasks(case, new_tot_tasks): + ############################################################################### + comp_classes = case.get_values("COMP_CLASSES") + new_tasks = {} + new_roots = {} + curr_tot_tasks = 0 + + # First, gather current task and root pe info + curr_tasks, _, curr_roots, _ = gather_pelayout(case) + + # How many tasks are currently being used? + for comp in comp_classes: + if (curr_tasks[comp] + curr_roots[comp]) > curr_tot_tasks: + curr_tot_tasks = curr_tasks[comp] + curr_roots[comp] + # End if + # End for + + if new_tot_tasks != curr_tot_tasks: + # Compute new task counts and root pes + for comp in comp_classes: + new_tasks[comp] = curr_tasks[comp] * new_tot_tasks / curr_tot_tasks + new_roots[comp] = curr_roots[comp] * new_tot_tasks / curr_tot_tasks + # End for + + # Check for valid recomputation + mod_ok = True + for comp in comp_classes: + if (new_tasks[comp] * curr_tot_tasks / new_tot_tasks) != curr_tasks[comp]: + logger.error("Task change invalid for {}".format(comp)) + mod_ok = False + + if (new_roots[comp] * curr_tot_tasks / new_tot_tasks) != curr_roots[comp]: + logger.error("Root PE change invalid for {}".format(comp)) + mod_ok = False + # End for + expect(mod_ok, "pelayout unable to set ntasks to {}".format(new_tot_tasks)) + + # We got this far? Go ahead and change PE layout + for comp in comp_classes: + case.set_value("NTASKS_" + comp, new_tasks[comp], comp) + case.set_value("ROOTPE_" + comp, new_roots[comp], comp) + # End for + # End if (#tasks changed) + + +# End def modify_ntasks + +############################################################################### +def _main_func(description): + ############################################################################### + # Initialize command line parser and get command line options + arg_format, set_ntasks, set_nthrds, header, caseroot = parse_command_line( + sys.argv, description + ) + + # Initialize case ; read in all xml files from caseroot + with Case(caseroot, record=True) as case: + if set_nthrds is not None: + set_nthreads(case, set_nthrds) + # End if + if set_ntasks is not None: + modify_ntasks(case, int(set_ntasks)) + # End if + ntasks, nthreads, rootpes, pstrid = gather_pelayout(case) + print_pelayout(case, ntasks, nthreads, rootpes, pstrid, arg_format, header) + # End with + + +# End def _main_func + +if __name__ == "__main__": + _main_func(__doc__) +# End if diff --git a/CIME/Tools/preview_namelists b/CIME/Tools/preview_namelists new file mode 100755 index 00000000000..69b89eb5663 --- /dev/null +++ b/CIME/Tools/preview_namelists @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 + +""" +Creates namelist and other model input files for each component (by running each +component's buildnml script). Then copies the generated files to the CaseDocs +subdirectory for inspection. + +It is not required to run this manually: namelists will be generated +automatically when the run starts. However, this can be useful in order to +review the namelists before submitting the case. + +case.setup must be run before this. + +Typical usage is simply: + ./preview_namelists +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.utils import expect + +import argparse + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory for which namelists are generated.\n" + "Default is current directory.", + ) + + parser.add_argument( + "--component", + help="Specify component's namelist to build.\n" + "If not specified, generates namelists for all components.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args + + +############################################################################### +def _main_func(description): + ############################################################################### + args = parse_command_line(sys.argv, description) + + expect( + os.path.isfile(os.path.join(args.caseroot, "CaseStatus")), + "case.setup must be run prior to running preview_namelists", + ) + with Case(args.caseroot, read_only=False, record=True) as case: + case.create_namelists(component=args.component) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/preview_run b/CIME/Tools/preview_run new file mode 100755 index 00000000000..06946b6abb9 --- /dev/null +++ b/CIME/Tools/preview_run @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 + +""" +Queries key CIME shell commands (mpirun and batch submission). + +To force a certain mpirun command, use: + ./xmlchange MPI_RUN_COMMAND=$your_cmd + +Example: + ./xmlchange MPI_RUN_COMMAND='mpiexec -np 16 --some-flag' + +To force a certain qsub command, use: + ./xmlchange --subgroup=case.run BATCH_COMMAND_FLAGS=$your_flags + +Example: + ./xmlchange --subgroup=case.run BATCH_COMMAND_FLAGS='--some-flag --other-flag' +""" + +from standard_script_setup import * + +from CIME.case import Case + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to query.\n" "Default is current directory.", + ) + + parser.add_argument( + "-j", + "--job", + default=None, + help="The job you want to print.\n" + "Default is case.run (or case.test if this is a test).", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.caseroot, args.job + + +############################################################################### +def _main_func(description): + ############################################################################### + caseroot, job = parse_command_line(sys.argv, description) + logging.disable(logging.INFO) + + with Case(caseroot, read_only=False) as case: + case.preview_run(print, job) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/save_provenance b/CIME/Tools/save_provenance new file mode 100755 index 00000000000..7e3506ff50c --- /dev/null +++ b/CIME/Tools/save_provenance @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 + +""" +This tool provide command-line access to provenance-saving functionality +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.config import Config +from CIME.provenance import * +from CIME.utils import get_lids +from CIME.get_timing import get_timing + +import logging + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Save run (timing) provenance for current case \033[0m + > {0} postrun +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "mode", + choices=("build", "prerun", "postrun"), + help="Phase for which to save provenance. " + "prerun is mostly for infrastructure testing; " + "it does not make sense to store this information manually otherwise", + ) + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) + + parser.add_argument( + "-l", "--lid", help="Force system to save provenance with this LID" + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.mode, args.caseroot, args.lid + + +############################################################################### +def _main_func(description): + ############################################################################### + mode, caseroot, lid = parse_command_line(sys.argv, description) + with Case(caseroot, read_only=False) as case: + srcroot = case.get_value("SRCROOT") + + customize_path = os.path.join(srcroot, "cime_config", "customize") + + config = Config.load(customize_path) + + if mode == "build": + expect( + False, + "Saving build provenance manually is not currently supported " + "but it should already always be happening automatically", + ) + + try: + config.save_build_provenance(case, lid=lid) + except AttributeError: + logger.debug("Could not save build provenance, no handler found") + elif mode == "prerun": + expect(lid is not None, "You must provide LID for prerun mode") + + try: + config.save_prerun_provenance(case, lid=lid) + except AttributeError: + logger.debug("Could not save prerun provenance, no handler found") + elif mode == "postrun": + expect(lid is None, "Please allow me to autodetect LID") + + model = case.get_value("MODEL") + caseid = case.get_value("CASE") + case.set_value("SAVE_TIMING", True) + lids = get_lids(case) + + for lid in lids: + # call get_timing if needed + expected_timing_file = os.path.join( + caseroot, "timing", "{}_timing.{}.{}.gz".format(model, caseid, lid) + ) + + if not os.path.exists(expected_timing_file): + get_timing(case, lid) + + try: + config.save_prerun_provenance(case, lid=lid) + except AttributeError: + logger.debug("Could not save prerun provenance, no handler found") + + try: + config.save_postrun_provenance(case, lid=lid) + except AttributeError: + logger.debug("Could not save postrun provenance, no handler found") + else: + expect(False, "Unhandled mode '{}'".format(mode)) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/scripts/Tools/simple-py-prof b/CIME/Tools/simple-py-prof similarity index 100% rename from scripts/Tools/simple-py-prof rename to CIME/Tools/simple-py-prof diff --git a/CIME/Tools/simple_compare b/CIME/Tools/simple_compare new file mode 100755 index 00000000000..59bc328341c --- /dev/null +++ b/CIME/Tools/simple_compare @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 + +""" +Compare files in a normalized way. Used by create_test for +diffing non-namelist files. +""" + +from standard_script_setup import * +import CIME.simple_compare +from CIME.utils import expect + +import argparse, sys, os + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [-c ] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Compare files\033[0m + > {0} baseline_dir/test/file mytestarea/file -c +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("gold_file", help="Path to gold file") + + parser.add_argument("new_file", help="Path to file to compare against gold") + + parser.add_argument( + "-c", + "--case", + action="store", + dest="case", + default=None, + help="The case base id (..). Helps us normalize data.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + # Normalize case + if args.case is not None: + args.case = CIME.utils.normalize_case_id(args.case) + + return args.gold_file, args.new_file, args.case + + +############################################################################### +def _main_func(description): + ############################################################################### + gold_file, compare_file, case = parse_command_line(sys.argv, description) + + if case is None: + logging.warning( + "No case id data available, will not be able to normalize values as effectively" + ) + else: + logging.info("Using case: '{}'".format(case)) + + if gold_file.endswith("runconfig"): + success, comments = CIME.simple_compare.compare_runconfigfiles( + gold_file, compare_file, case + ) + else: + success, comments = CIME.simple_compare.compare_files( + gold_file, compare_file, case + ) + expect( + success, + "Diff between files {} and {}:\n{}".format(gold_file, compare_file, comments), + ) + + print("Files {} and {} MATCH".format(gold_file, compare_file)) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/standard_script_setup.py b/CIME/Tools/standard_script_setup.py new file mode 100644 index 00000000000..89fe12868cc --- /dev/null +++ b/CIME/Tools/standard_script_setup.py @@ -0,0 +1,54 @@ +""" +Encapsulate the importing of python utils and logging setup, things +that every script should do. +""" +# pylint: disable=unused-import + +import sys, os +import __main__ as main + + +def check_minimum_python_version(major, minor, warn_only=False): + """ + Check your python version. + + >>> check_minimum_python_version(3, 5) + >>> + """ + check = sys.version_info[0] > major or ( + sys.version_info[0] == major and sys.version_info[1] >= minor + ) + if check: + return + msg = ( + "Python " + + str(major) + + "." + + str(minor) + + " is required to run CIME. You have " + + str(sys.version_info[0]) + + "." + + str(sys.version_info[1]) + ) + if warn_only: + print(msg.replace("required", "recommended") + ".") + return + raise RuntimeError(msg + " - please use a newer version of Python.") + + +# Require users to be using >=3.6 +check_minimum_python_version(3, 6) +# Warn users if they are using <3.8 +check_minimum_python_version(3, 8, warn_only=True) + +real_file_dir = os.path.dirname(os.path.realpath(__file__)) +cimeroot = os.path.abspath(os.path.join(real_file_dir, "..", "..")) +sys.path.insert(0, cimeroot) + +# Important: Allows external tools to link up with CIME +os.environ["CIMEROOT"] = cimeroot + +import CIME.utils + +CIME.utils.stop_buffering_output() +import logging, argparse diff --git a/CIME/Tools/testreporter.py b/CIME/Tools/testreporter.py new file mode 100755 index 00000000000..a76c113ac09 --- /dev/null +++ b/CIME/Tools/testreporter.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python3 + +""" +Simple script to populate CESM test database with test results. +""" +import os +import sys + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) + +from CIME.Tools.standard_script_setup import * + +from CIME.XML.env_build import EnvBuild +from CIME.XML.env_case import EnvCase +from CIME.XML.env_test import EnvTest +from CIME.XML.test_reporter import TestReporter +from CIME.utils import expect +from CIME.XML.generic_xml import GenericXML + +import glob + +############################################################################### +def parse_command_line(args): + ############################################################################### + parser = argparse.ArgumentParser() + + CIME.utils.setup_standard_logging_options(parser) + + # Parse command line options + + # parser = argparse.ArgumentParser(description='Arguements for testreporter') + parser.add_argument("--tagname", help="Name of the tag being tested.") + parser.add_argument("--testid", help="Test id, ie c2_0_a6g_ing,c2_0_b6g_gnu.") + parser.add_argument( + "--testroot", help="Root directory for tests to populate the database." + ) + parser.add_argument("--testtype", help="Type of test, prealpha or prebeta.") + parser.add_argument( + "--dryrun", + action="store_true", + help="Do a dry run, database will not be populated.", + ) + parser.add_argument( + "--dumpxml", action="store_true", help="Dump XML test results to sceen." + ) + args = parser.parse_args() + CIME.utils.parse_args_and_handle_standard_logging_options(args) + + return ( + args.testroot, + args.testid, + args.tagname, + args.testtype, + args.dryrun, + args.dumpxml, + ) + + +############################################################################### +def get_testreporter_xml(testroot, testid, tagname, testtype): + ############################################################################### + os.chdir(testroot) + + # + # Retrieve compiler name and mpi library + # + xml_file = glob.glob("*" + testid + "/env_build.xml") + expect( + len(xml_file) > 0, + "Tests not found. It's possible your testid, {} is wrong.".format(testid), + ) + envxml = EnvBuild(".", infile=xml_file[0]) + compiler = envxml.get_value("COMPILER") + mpilib = envxml.get_value("MPILIB") + + # + # Retrieve machine name + # + xml_file = glob.glob("*" + testid + "/env_case.xml") + envxml = EnvCase(".", infile=xml_file[0]) + machine = envxml.get_value("MACH") + + # + # Retrieve baseline tag to compare to + # + xml_file = glob.glob("*" + testid + "/env_test.xml") + envxml = EnvTest(".", infile=xml_file[0]) + baseline = envxml.get_value("BASELINE_NAME_CMP") + + # + # Create XML header + # + + testxml = TestReporter() + testxml.setup_header( + tagname, machine, compiler, mpilib, testroot, testtype, baseline + ) + + # + # Create lists on tests based on the testid in the testroot directory. + # + test_names = glob.glob("*" + testid) + # + # Loop over all tests and parse the test results + # + test_status = {} + for test_name in test_names: + if not os.path.isfile(test_name + "/TestStatus"): + continue + test_status["COMMENT"] = "" + test_status["BASELINE"] = "----" + test_status["MEMCOMP"] = "----" + test_status["MEMLEAK"] = "----" + test_status["NLCOMP"] = "----" + test_status["STATUS"] = "----" + test_status["TPUTCOMP"] = "----" + # + # Check to see if TestStatus is present, if not then continue + # I might want to set the status to fail + # + try: + lines = [line.rstrip("\n") for line in open(test_name + "/TestStatus")] + except (IOError, OSError): + test_status["STATUS"] = "FAIL" + test_status["COMMENT"] = "TestStatus missing. " + continue + # + # Loop over each line of TestStatus, and check for different types of failures. + # + for line in lines: + if "NLCOMP" in line: + test_status["NLCOMP"] = line[0:4] + if "MEMLEAK" in line: + test_status["MEMLEAK"] = line[0:4] + if "MEMCOMP" in line: + test_status["MEMCOMP"] = line[0:4] + if "BASELINE" in line: + test_status["BASELINE"] = line[0:4] + if "TPUTCOMP" in line: + test_status["TPUTCOMP"] = line[0:4] + if "FAIL PFS" in line: + test_status["STATUS"] = "FAIL" + if "INIT" in line: + test_status["INIT"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "SFAIL" + test_status["COMMENT"] += "INIT fail! " + break + if "CREATE_NEWCASE" in line: + test_status["CREATE_NEWCASE"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "SFAIL" + test_status["COMMENT"] += "CREATE_NEWCASE fail! " + break + if "XML" in line: + test_status["XML"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "SFAIL" + test_status["COMMENT"] += "XML fail! " + break + if "SETUP" in line: + test_status["SETUP"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "SFAIL" + test_status["COMMENT"] += "SETUP fail! " + break + if "SHAREDLIB_BUILD" in line: + test_status["SHAREDLIB_BUILD"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "CFAIL" + test_status["COMMENT"] += "SHAREDLIB_BUILD fail! " + break + if "MODEL_BUILD" in line: + test_status["MODEL_BUILD"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "CFAIL" + test_status["COMMENT"] += "MODEL_BUILD fail! " + break + if "SUBMIT" in line: + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "SUBMIT fail! " + break + if "RUN" in line: + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "RUN fail! " + break + if "COMPARE_base_rest" in line: + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Restart fail! " + break + if "COMPARE_base_hybrid" in line: + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Hybrid fail! " + break + if "COMPARE_base_multiinst" in line: + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Multi instance fail! " + break + if "COMPARE_base_test" in line: + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Base test fail! " + break + if "COMPARE_base_single_thread" in line: + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Thread test fail! " + break + + # + # Do not include time comments. Just a preference to have cleaner comments in the test database + # + try: + if "time=" not in line and "GENERATE" not in line: + if "BASELINE" not in line: + test_status["COMMENT"] += line.split(" ", 3)[3] + " " + else: + test_status["COMMENT"] += line.split(" ", 4)[4] + " " + except Exception: # Probably want to be more specific here + pass + + # + # Fill in the xml with the test results + # + testxml.add_result(test_name, test_status) + + return testxml + + +############################################################################## +def _main_func(): + ############################################################################### + + testroot, testid, tagname, testtype, dryrun, dumpxml = parse_command_line(sys.argv) + + testxml = get_testreporter_xml(testroot, testid, tagname, testtype) + + # + # Dump xml to a file. + # + if dumpxml: + GenericXML.write(testxml, outfile="TestRecord.xml") + + # + # Prompt for username and password, then post the XML string to the test database website + # + if not dryrun: + testxml.push2testdb() + + +############################################################################### + +if __name__ == "__main__": + _main_func() diff --git a/CIME/Tools/wait_for_tests b/CIME/Tools/wait_for_tests new file mode 100755 index 00000000000..c166061c99b --- /dev/null +++ b/CIME/Tools/wait_for_tests @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 + +""" +Wait for a queued set of E3SM tests to finish by watching the +TestStatus files. If all tests pass, 0 is returned, otherwise a +non-zero error code is returned. Note that this program waits +for the RUN phase specifically and will not terminate if the +RUN phase didn't happen. +""" + +from standard_script_setup import * + +import CIME.wait_for_tests + +import argparse, sys, os + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [ ...] [--verbose] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Wait for test in current dir\033[0m + > {0} + \033[1;32m# Wait for test in user specified tests\033[0m + > {0} path/to/testdir + \033[1;32m# Wait for all tests in a test area\033[0m + > {0} path/to/testarea/*/TestStatus +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "paths", + default=".", + nargs="*", + help="Paths to test directories or status file. Pwd default.", + ) + + parser.add_argument( + "-n", "--no-wait", action="store_true", help="Do not wait for tests to finish" + ) + + parser.add_argument( + "--no-run", action="store_true", help="Do not expect run phase to be completed" + ) + + parser.add_argument( + "-t", + "--check-throughput", + action="store_true", + help="Fail if throughput check fails (fail if tests slow down)", + ) + + parser.add_argument( + "-m", + "--check-memory", + action="store_true", + help="Fail if memory check fails (fail if tests footprint grows)", + ) + + parser.add_argument( + "-i", + "--ignore-namelist-diffs", + action="store_true", + help="Do not fail a test if the only problem is diffing namelists", + ) + + parser.add_argument( + "--ignore-diffs", + action="store_true", + help="Do not fail a test if the only problem is diffing history files", + ) + + parser.add_argument( + "--ignore-memleak", + action="store_true", + help="Do not fail a test if the only problem is a memleak", + ) + + parser.add_argument( + "--force-log-upload", + action="store_true", + help="Always upload logs to cdash, even if test passed", + ) + + parser.add_argument( + "-b", + "--cdash-build-name", + help="Build name, implies you want results send to Cdash", + ) + + parser.add_argument( + "-p", + "--cdash-project", + default=CIME.wait_for_tests.E3SM_MAIN_CDASH, + help="The name of the CDash project where results should be uploaded", + ) + + parser.add_argument( + "-g", + "--cdash-build-group", + default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, + help="The build group to be used to display results on the CDash dashboard.", + ) + + parser.add_argument("--timeout", type=int, help="Timeout wait in seconds.") + + parser.add_argument( + "--update-success", + action="store_true", + help="Record test success in baselines. Only the nightly process should use this in general.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return ( + args.paths, + args.no_wait, + args.check_throughput, + args.check_memory, + args.ignore_namelist_diffs, + args.ignore_diffs, + args.ignore_memleak, + args.cdash_build_name, + args.cdash_project, + args.cdash_build_group, + args.timeout, + args.force_log_upload, + args.no_run, + args.update_success, + ) + + +############################################################################### +def _main_func(description): + ############################################################################### + ( + test_paths, + no_wait, + check_throughput, + check_memory, + ignore_namelist_diffs, + ignore_diffs, + ignore_memleak, + cdash_build_name, + cdash_project, + cdash_build_group, + timeout, + force_log_upload, + no_run, + update_success, + ) = parse_command_line(sys.argv, description) + + sys.exit( + 0 + if CIME.wait_for_tests.wait_for_tests( + test_paths, + no_wait=no_wait, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelist_diffs, + ignore_diffs=ignore_diffs, + ignore_memleak=ignore_memleak, + cdash_build_name=cdash_build_name, + cdash_project=cdash_project, + cdash_build_group=cdash_build_group, + timeout=timeout, + force_log_upload=force_log_upload, + no_run=no_run, + update_success=update_success, + expect_test_complete=not no_wait, + ) + else CIME.utils.TESTS_FAILED_ERR_CODE + ) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/xmlchange b/CIME/Tools/xmlchange new file mode 100755 index 00000000000..4101f9b5dba --- /dev/null +++ b/CIME/Tools/xmlchange @@ -0,0 +1,399 @@ +#!/usr/bin/env python3 + +""" +Allows changing variables in env_*xml files via a command-line interface. + +This provides two main benefits over editing the xml files by hand: + - Settings are checked immediately for validity + - Settings are echoed to the CaseStatus file, providing a "paper trail" of + changes made by the user. + +Examples: + + To set a single variable: + ./xmlchange REST_N=4 + + To set multiple variables at once: + ./xmlchange REST_OPTION=ndays,REST_N=4 + + Alternative syntax (no longer recommended, but supported for backwards + compatibility; only works for a single variable at a time): + ./xmlchange --id REST_N --val 4 + + To set a variable in which the value has commas, you'll need to use + the alternative syntax: + ./xmlchange --id VARNAME --val "one,two" + + Several xml variables that have settings for each component have somewhat special treatment. + The variables that this currently applies to are: + NTASKS, NTHRDS, ROOTPE, PIO_TYPENAME, PIO_STRIDE, PIO_NUMTASKS, PIO_ASYNC_INTERFACE + For example, to set the number of tasks for all components to 16, use: + ./xmlchange NTASKS=16 + To set just the number of tasks for the atm component, use: + ./xmlchange NTASKS_ATM=16 + + The CIME case xml variables are grouped together in xml elements . + This is done to associate together xml variables with common features. + Most variables are only associated with one group. However, in env_batch.xml, + there are also xml variables that are associated with each potential batch job. + For these variables, the '--subgroup' option may be used to specify a particular + group for which the variable's value will be adjusted. + + As an example, in env_batch.xml, the xml variables JOB_QUEUE and JOB_WALLCLOCK_TIME + appear in each of the batch job groups (defined in config_batch.xml): + + + + To set the variable JOB_WALLCLOCK_TIME only for case.run: + ./xmlchange JOB_WALLCLOCK_TIME=0:30 --subgroup case.run + To set the variable JOB_WALLCLOCK_TIME for all jobs: + ./xmlchange JOB_WALLCLOCK_TIME=0:30 +""" + +from standard_script_setup import * + +from CIME.utils import ( + expect, + convert_to_type, + Timeout, +) +from CIME.status import append_case_status +from CIME.case import Case +from CIME.locked_files import check_lockedfiles + +import re + +# Set logger +logger = logging.getLogger("xmlchange") + + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "listofsettings", + nargs="?", + default="", + help="Comma-separated list of settings in the form: var1=value,var2=value,...", + ) + + parser.add_argument( + "--caseroot", + default=os.getcwd(), + help="Case directory to change.\n" "Default is current directory.", + ) + + # Need to support older single dash version of arguments for compatibility with components + + parser.add_argument( + "--append", + "-append", + action="store_true", + help="Append to the existing value rather than overwriting it.", + ) + + parser.add_argument("--subgroup", "-subgroup", help="Apply to this subgroup only.") + + parser.add_argument( + "--id", + "-id", + help="The variable to set.\n" + "(Used in the alternative --id var --val value form, rather than\n" + "the recommended var=value form.)", + ) + + parser.add_argument( + "--val", + "-val", + help="The value to set.\n" + "(Used in the alternative --id var --val value form, rather than\n" + "the recommended var=value form.)", + ) + + parser.add_argument( + "--file", + "-file", + help="XML file to edit.\n" + "Generally not needed, but can be specified to ensure that only the\n" + "expected file is being changed. (If a variable is not found in this file,\n" + "an error will be generated.)", + ) + + parser.add_argument( + "--delimiter", + "-delimiter", + type=str, + default=",", + help="Delimiter string in listofvalues.\n" "Default is ','.", + ) + + parser.add_argument( + "--dryrun", + "-dryrun", + action="store_true", + help="Parse settings and print key-value pairs, but don't actually change anything.", + ) + + parser.add_argument( + "--noecho", + "-noecho", + action="store_true", + help="Do not update CaseStatus with this change.\n" + "This option is mainly meant to be used by cime scripts: the 'paper trail' in\n" + "CaseStatus is meant to show changes made by the user, so we generally don't\n" + "want this to be contaminated by changes made automatically by cime scripts.", + ) + + parser.add_argument( + "-f", + "--force", + action="store_true", + help="Ignore typing checks and store value.", + ) + + parser.add_argument( + "-N", + "--non-local", + action="store_true", + help="Use when you've requested a machine that you aren't on. " + "Will reduce errors for missing directories etc.", + ) + + parser.add_argument("-loglevel", help="Ignored, only for backwards compatibility.") + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + listofsettings = [] + if len(args.listofsettings): + expect(args.id is None, "Cannot specify both listofsettings and --id") + expect(args.val is None, "Cannot specify both listofsettings and --val") + delimiter = re.escape(args.delimiter) + listofsettings = re.split(r"(?= 0: + expect( + False, + " Could not set {} to {}, remote branch {} already exists, Rename case and try again.".format( + xmlid, xmlval, branch + ), + ) + with Timeout(30): + case._gitinterface.git_operation("remote", "add", "origin", xmlval) + case._gitinterface.git_operation("push", "--set-upstream", "origin", branch) + + +def xmlchange( + caseroot, + listofsettings, + xmlfile, + xmlid, + xmlval, + subgroup, + append, + noecho, + force, + dryrun, + non_local, +): + with Case(caseroot, read_only=False, record=True, non_local=non_local) as case: + comp_classes = case.get_values("COMP_CLASSES") + env_test = None + if ( + case.get_value("TEST") + and os.path.exists(os.path.join(caseroot, "env_test.xml")) + ) and not (xmlfile or xmlfile == "env_test.xml"): + env_test = case.get_env("test") + + if xmlfile: + case.set_file(xmlfile) + + case.set_comp_classes(comp_classes) + + if len(listofsettings): + logger.debug("List of attributes to change: %s", listofsettings) + + # Change values + for setting in listofsettings: + pair = setting.split("=", 1) + expect( + len(pair) == 2, + "Expecting a key value pair in the form of key=value. Got %s" + % (pair), + ) + (xmlid, xmlval) = pair + xmlchange_single_value( + case, + xmlid, + xmlval, + subgroup, + append, + force, + dryrun, + env_test, + caseroot, + xmlfile, + ) + else: + xmlchange_single_value( + case, + xmlid, + xmlval, + subgroup, + append, + force, + dryrun, + env_test, + caseroot, + xmlfile, + ) + + check_lockedfiles( + case, skip=["env_case"], caseroot=caseroot, whitelist=xmlfile, quiet=True + ) + + if not noecho: + argstr = "" + for arg in sys.argv: + argstr += "%s " % arg + msg = " %s " % (argstr) + append_case_status( + "xmlchange", + "success", + msg=msg, + caseroot=caseroot, + gitinterface=case._gitinterface, + ) + + +def _main_func(description): + # pylint: disable=unused-variable + ( + caseroot, + listofsettings, + xmlfile, + xmlid, + xmlval, + subgroup, + append, + noecho, + force, + dry, + non_local, + ) = parse_command_line(sys.argv, description) + + xmlchange( + caseroot, + listofsettings, + xmlfile, + xmlid, + xmlval, + subgroup, + append, + noecho, + force, + dry, + non_local, + ) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/xmlconvertors/config_pes_converter.py b/CIME/Tools/xmlconvertors/config_pes_converter.py new file mode 100755 index 00000000000..cf57c21fb9b --- /dev/null +++ b/CIME/Tools/xmlconvertors/config_pes_converter.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python3 +""" +config_pes_converter.py -- convert (or verify) config_pes elements from CIME2 +format to CIME5. This tool will compare the two versions and suggest updates to +the CIME5 file. + +The location of these files are needed by the script: + CIME2: cime/machines-acme/config_pes.xml + CIME5: config/acme/allactive/config_pesall.xml +""" +import os, sys + +sys.path.insert( + 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) +) + +from CIME import utils +from CIME.Tools.standard_script_setup import * +from CIME.utils import run_cmd +from shutil import which +import xml.etree.ElementTree as ET +import grid_xml_converter + +LOGGER = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args): + ############################################################################### + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + + utils.setup_standard_logging_options(parser) + + # Set command line options + parser.add_argument( + "-cime2file", + "--cime2file", + help="location of config_grid.xml file in CIME2 format", + required=True, + ) + parser.add_argument( + "-cime5file", + "--cime5file", + help="location of config_grids.xml file in CIME5 format", + required=True, + ) + + args = utils.parse_args_and_handle_standard_logging_options(args, parser) + + if args.cime2file is None or args.cime5file is None: + parser.print_help() + exit() + + return args.cime2file, args.cime5file + + +class PesNode(grid_xml_converter.DataNode): + def __init__(self, root): + self.ignore = False + super(PesNode, self).__init__(root) + + def __str__(self): + return ET.tostring(self.xmlnode) + + def setattrib(self, node, tag, key=None): + if key is None: + key = tag + if key in self.data: + node.set(tag, self.data[key]) + else: + node.set(tag, "any") + + def keyvalue(self): + return "{}:{}:{}:{}".format( + self.data["gridname"], + self.data["machname"], + self.data["pesize"], + self.data["compset"], + ) + + def to_cime5(self): + gridnode = ET.Element("grid") + self.setattrib(gridnode, "name", "gridname") + machnode = ET.SubElement(gridnode, "mach") + self.setattrib(machnode, "name", "machname") + pesnode = ET.SubElement(machnode, "pes") + self.setattrib(pesnode, "compset") + self.setattrib(pesnode, "pesize") + commentnode = ET.SubElement(pesnode, "comment") + commentnode.text = "none" + for d in ["ntasks", "nthrds", "rootpe"]: + newnode = ET.SubElement(pesnode, d) + for comp in ["atm", "lnd", "rof", "ice", "ocn", "glc", "wav", "cpl", "iac"]: + tag = d + "_" + comp + if tag in self.data[d]: + ET.SubElement(newnode, tag).text = str(self.data[d][tag]) + + return gridnode + + def __eq__(self, other): + for k in ["gridname", "machname", "pesize", "compset"]: + if k not in self.data and k not in other.data: + continue + if k not in self.data or k not in other.data: + return False + if self.data[k] != other.data[k]: + return False + for d in ["ntasks", "nthrds", "rootpe"]: + for k in self.data[d]: + if k not in self.data[d] and k not in other.data[d]: + continue + if k not in self.data[d] or k not in other.data[d]: + return False + if self.data[d][k] != other.data[d][k]: + return False + return True + + +class Cime5PesNode(PesNode): + def set_data(self, xmlnode): + for d in ["ntasks", "nthrds", "rootpe"]: + self.data[d] = {} + self.xmlnode = xmlnode + self.data["gridname"] = xmlnode.get("name") + machnode = xmlnode.find("mach") + self.data["machname"] = machnode.get("name") + pesnode = machnode.find("pes") + self.data["pesize"] = pesnode.get("pesize") + self.data["compset"] = pesnode.get("compset") + commentnode = pesnode.find("comment") + if commentnode is not None: + self.data["comment"] = commentnode.text + for tag in ["ntasks", "nthrds", "rootpe"]: + node = pesnode.find(tag) + for child in node.getchildren(): + self.data[tag][child.tag] = child.text.strip() + + +class Cime2PesNode(PesNode): + ISDEFAULT = "-999999" + DEFAULTS = {"ntasks": "16", "nthrds": "1", "rootpe": "0"} + + def set_data(self, xmlnode): + # Set Defaults + for d in ["ntasks", "nthrds", "rootpe"]: + self.data[d] = {} + for comp in ["atm", "lnd", "ice", "ocn", "glc", "rof", "wav", "cpl", "iac"]: + self.data["ntasks"]["ntasks_" + comp] = self.ISDEFAULT + self.data["nthrds"]["nthrds_" + comp] = self.ISDEFAULT + self.data["rootpe"]["rootpe_" + comp] = self.ISDEFAULT + + # Read in node + self.xmlnode = xmlnode + for checktag in ["OS", "TEST"]: + check = xmlnode.get(checktag) + if check is not None: + self.ignore = True + return + self.data["machname"] = xmlnode.get("MACH", default="any") + self.data["gridname"] = xmlnode.get("GRID", default="any") + self.data["pesize"] = xmlnode.get("PECOUNT", default="any") + self.data["compset"] = xmlnode.get("CCSM_LCOMPSET", default="any") + for d in ["ntasks", "nthrds", "rootpe"]: + for comp in ["atm", "lnd", "ice", "ocn", "glc", "rof", "wav", "cpl", "iac"]: + tag = d + "_" + comp + node = xmlnode.find(tag.upper()) + if node is not None: + val = node.text.strip() + if val[0] == "$": + resolvetag = val[1:] + if resolvetag == "MAX_TASKS_PER_NODE": + val = "-1" + elif resolvetag == "MAX_GPUS_PER_NODE": + val = "-1" + else: + refnode = xmlnode.find(resolvetag) + if refnode is None: + # use default value + val = self.data[resolvetag.lower()[0:6]][ + resolvetag.lower() + ] + else: + val = xmlnode.find(resolvetag).text.strip() + + self.data[d][tag] = val + # Set to defaults. CIME2 had unresolved defaults that referred + # back to the ATM value, so setting just the ATM value would in effect + # set all values + for d in ["ntasks", "nthrds", "rootpe"]: + atmtag = d + "_atm" + if self.data[d][atmtag] == self.ISDEFAULT: + self.data[d][atmtag] = self.DEFAULTS[d] + for comp in ["lnd", "rof", "ice", "ocn", "glc", "wav", "cpl", "iac"]: + tag = d + "_" + comp + if self.data[d][tag] == self.ISDEFAULT: + self.data[d][tag] = self.data[d][atmtag] + + +class PesTree(grid_xml_converter.DataTree): + def __init__(self, xmlfilename): + # original xml file has bad comments + import re, StringIO + + if os.access(xmlfilename, os.R_OK): + with open(xmlfilename, "r") as xmlfile: + t1 = xmlfile.read() + t2 = re.sub( + r"(?<=)", lambda x: x.group(0).replace("-", " "), t2 + ) + tempxml = StringIO.StringIO(t3) + super(PesTree, self).__init__(tempxml) + tempxml.close() + + else: + super(PesTree, self).__init__(xmlfilename) + + def populate(self): + if self.root is None: + return + xmlnodes = self.root.findall("grid") + nodeclass = Cime5PesNode + + if len(xmlnodes) == 0: + xmlnodes = self.root.findall("pes") + nodeclass = Cime2PesNode + for xmlnode in xmlnodes: + datanode = nodeclass(self.root) + datanode.set_data(xmlnode) + if not datanode.ignore: + self.nodes.append(datanode) + + def writexml(self, addlist, newfilename): + root = ET.Element("config_pes") + for a, b in addlist: + if b is not None: + root.append(ET.Element("REPLACE")) + root.append(b.to_cime5()) + root.append(ET.Element("WITH")) + if a is not None: + root.append(a.to_cime5()) + xmllint = which("xmllint") + if xmllint is not None: + run_cmd( + "{} --format --output {} -".format(xmllint, newfilename), + input_str=ET.tostring(root), + ) + + +def diff_tree(atree, btree): + afound = [] + bfound = [] + oklist = [] + fixlist = [] + addlist = [] + duplist = [] + bkeys = [] + for bnode in btree.nodes: + if bnode.keyvalue() in bkeys: + duplist.append(bnode.keyvalue()) + else: + bkeys.append(bnode.keyvalue()) + + for anode in atree.nodes: + for bnode in btree.nodes: + if bnode in bfound: + continue + if anode.keyvalue() == bnode.keyvalue(): + afound.append(anode) + bfound.append(bnode) + + if anode == bnode: + oklist.append([anode, bnode]) + else: + fixlist.append([anode, bnode]) + break + + if anode in afound: + continue + + addlist.append([anode, None]) + + LOGGER.info("Number of ok nodes: {:d}".format(len(oklist))) + LOGGER.info("Number of wrong nodes: {:d}".format(len(fixlist))) + LOGGER.info("Number of missing nodes: {:d}".format(len(addlist))) + for miss in addlist: + LOGGER.debug(miss[0].keyvalue()) + LOGGER.info("Number of duplicate nodes: {:d}".format(len(duplist))) + for dup in duplist: + LOGGER.info(dup) + return [oklist, fixlist, addlist] + + +def pes_compare(): + cime2file, cime5file = parse_command_line(sys.argv) + + cime2pestree = PesTree(cime2file) + cime5pestree = PesTree(cime5file) + + LOGGER.info("Comparing config_pes files...") + oklist, fixlist, addlist = diff_tree(cime2pestree, cime5pestree) + cime5pestree.postprocess(fixlist, addlist, "tempgrid.xml", cime5file, "badgrid.xml") + + +if __name__ == "__main__": + pes_compare() diff --git a/CIME/Tools/xmlconvertors/convert-grid-v1-to-v2 b/CIME/Tools/xmlconvertors/convert-grid-v1-to-v2 new file mode 100755 index 00000000000..53b22d0e4a6 --- /dev/null +++ b/CIME/Tools/xmlconvertors/convert-grid-v1-to-v2 @@ -0,0 +1,257 @@ +#! /usr/bin/env python3 + +""" +Convert a grid file from v1 to v2. +""" + +import argparse, sys, os + +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) + +from standard_script_setup import * +from CIME.utils import expect +from CIME.XML.generic_xml import GenericXML +import xml.etree.ElementTree as ET + +from collections import OrderedDict + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} +OR +{0} --help +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument("v1file", help="v1 file path") + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.v1file + + +############################################################################### +def convert_gridmaps(v1file_obj, v2file_obj): + ############################################################################### + gridmap_data = [] # (attribs, {name->file}) + + v1gridmaps = v1file_obj.get_child(name="gridmaps") + v1gridmap = v1file_obj.get_children(name="gridmap", root=v1gridmaps) + for gridmap_block in v1gridmap: + attribs = v1file_obj.attrib(gridmap_block) + children = [] + for child in v1file_obj.get_children(root=gridmap_block): + children.append((v1file_obj.name(child), v1file_obj.text(child))) + + gridmap_data.append((attribs, children)) + + v2gridmaps = v2file_obj.make_child("gridmaps") + + for attribs, children in gridmap_data: + gridmap = v2file_obj.make_child("gridmap", attributes=attribs, root=v2gridmaps) + for name, text in children: + v2file_obj.make_child( + "map", attributes={"name": name}, root=gridmap, text=text + ) + + +############################################################################### +def convert_domains(v1file_obj, v2file_obj): + ############################################################################### + domain_data = ( + [] + ) # (name, nx, ny, {filemask->mask->file}, {pathmask->mask->path}, desc) + + v1domains = v1file_obj.get_child(name="domains") + v1domain = v1file_obj.get_children(name="domain", root=v1domains) + for domain_block in v1domain: + attrib = v1file_obj.attrib(domain_block) + expect(attrib.keys() == ["name"], "Unexpected attribs: {}".format(attrib)) + + name = attrib["name"] + + desc = v1file_obj.get_element_text("desc", root=domain_block) + sup = v1file_obj.get_element_text("support", root=domain_block) + nx = v1file_obj.get_element_text("nx", root=domain_block) + ny = v1file_obj.get_element_text("ny", root=domain_block) + + if sup and not desc: + desc = sup + + file_masks, path_masks = OrderedDict(), OrderedDict() + + for child_name, masks in [("file", file_masks), ("path", path_masks)]: + children = v1file_obj.get_children(name=child_name, root=domain_block) + for child in children: + attrib = v1file_obj.attrib(child) + expect(len(attrib) == 1, "Bad {} attrib: {}".format(child_name, attrib)) + mask_key, mask_value = attrib.items()[0] + + component, _ = mask_key.split("_") + masks.setdefault(component, OrderedDict())[ + mask_value + ] = v1file_obj.text(child) + + for child in v1file_obj.get_children(root=domain_block): + expect( + v1file_obj.name(child) + in ["nx", "ny", "file", "path", "desc", "support"], + "Unhandled child of grid '{}'".format(v1file_obj.name(child)), + ) + + domain_data.append((name, nx, ny, file_masks, path_masks, desc)) + + v2domains = v2file_obj.make_child("domains") + + for name, nx, ny, file_masks, path_masks, desc in domain_data: + attribs = {"name": name} if name else {} + domain_block = v2file_obj.make_child( + "domain", attributes=attribs, root=v2domains + ) + + v2file_obj.make_child("nx", root=domain_block, text=nx) + v2file_obj.make_child("ny", root=domain_block, text=ny) + + file_to_attrib = OrderedDict() + for component, mask_values in file_masks.iteritems(): + for mask_value, filename in mask_values.iteritems(): + if filename is None: + continue + + try: + path = path_masks[component][mask_value] + except KeyError: + path = "$DIN_LOC_ROOT/share/domains" + + fullfile = os.path.join(path, filename) + mask_value = mask_value if mask_value not in ["reg", name] else "" + file_to_attrib.setdefault(fullfile, OrderedDict()).setdefault( + mask_value, [] + ).append(component) + + for filename, masks in file_to_attrib.iteritems(): + attrib = {} + expect(len(masks) == 1, "Bad mask") + for mask, components in masks.iteritems(): + attrib["grid"] = "|".join(components) + + if mask: + attrib["mask"] = mask + + v2file_obj.make_child( + "file", attributes=attrib, root=domain_block, text=filename + ) + + if desc: + v2file_obj.make_child("desc", root=domain_block, text=desc) + + +############################################################################### +def convert_grids(v1file_obj, v2file_obj): + ############################################################################### + grid_data = [] # (compset, lname, sname, alias, support) + + v1grids = v1file_obj.get_child(name="grids") + v1grid = v1file_obj.get_children(name="grid", root=v1grids) + for grid_block in v1grid: + attrib = v1file_obj.attrib(grid_block) + + compset = attrib["compset"] if "compset" in attrib else None + expect( + attrib.keys() in [["compset"], []], "Unexpected attribs: {}".format(attrib) + ) + + lname = v1file_obj.get_element_text("lname", root=grid_block) + sname = v1file_obj.get_element_text("sname", root=grid_block) + alias = v1file_obj.get_element_text("alias", root=grid_block) + support = v1file_obj.get_element_text("support", root=grid_block) + + for child in v1file_obj.get_children(root=grid_block): + expect( + v1file_obj.name(child) in ["lname", "sname", "alias", "support"], + "Unhandled child of grid '{}'".format(v1file_obj.name(child)), + ) + + grid_data.append((compset, lname, sname, alias, support)) + + v2grids = v2file_obj.make_child("grids") + + # TODO: How to leverage model_grid_defaults + + for compset, lname, sname, alias, support in grid_data: + v2_alias = alias if alias else sname + attribs = {"alias": v2_alias} if v2_alias else {} + attribs.update({"compset": compset} if compset else {}) + v2grid = v2file_obj.make_child("model_grid", attributes=attribs, root=v2grids) + + pieces_raw = lname.split("_") + pieces = [] + for raw_piece in pieces_raw: + if "%" in raw_piece: + pieces.append(raw_piece) + else: + pieces[-1] += "_" + raw_piece + + ctype_map = { + "a": "atm", + "l": "lnd", + "oi": "ocnice", + "r": "rof", + "m": "mask", + "g": "glc", + "w": "wav", + } + mask = None + for piece in pieces: + ctype, data = piece.split("%") + cname = ctype_map[ctype.strip()] + if cname == "mask": + expect(mask is None, "Multiple masks") + mask = data + else: + v2file_obj.make_child( + "grid", attributes={"name": cname}, text=data, root=v2grid + ) + + if mask is not None: + v2file_obj.make_child("mask", text=mask, root=v2grid) + + +############################################################################### +def convert_to_v2(v1file): + ############################################################################### + v1file_obj = GenericXML(infile=v1file, read_only=True) + v2file_obj = GenericXML( + infile="out.xml", + read_only=False, + root_name_override="grid_data", + root_attrib_override={"version": "2.0"}, + ) + + convert_grids(v1file_obj, v2file_obj) + + convert_domains(v1file_obj, v2file_obj) + + convert_gridmaps(v1file_obj, v2file_obj) + + v2file_obj.write(outfile=sys.stdout) + + +############################################################################### +def _main_func(description): + ############################################################################### + v1file = parse_command_line(sys.argv, description) + + convert_to_v2(v1file) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/Tools/xmlconvertors/grid_xml_converter.py b/CIME/Tools/xmlconvertors/grid_xml_converter.py new file mode 100755 index 00000000000..01de9ddcf9f --- /dev/null +++ b/CIME/Tools/xmlconvertors/grid_xml_converter.py @@ -0,0 +1,502 @@ +#!/usr/bin/env python3 +""" +grid_xml_converter.py -- convert (or verify) grid elements from CIME2 format +to CIME5. This tool will compare the two versions and suggest updates +to the CIME5 file. + +The location of these files are needed by the script: + CIME2: cime/scripts/Tools/config_grid.xml + CIME5: config/acme/config_grids.xml +""" + +# make sure cime2, cime roots are defined +# use categories +# GRID CONFIGURATIONS grid list domain grid maps +# CIME2: cime/scripts/Tools/config_grid.xml +# CIME5: config/acme/config_grids.xml +# + +import os, sys + +sys.path.insert( + 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) +) + +from CIME import utils +from CIME.Tools.standard_script_setup import * +from CIME.utils import run_cmd_no_fail +from shutil import which +import xml.etree.ElementTree as ET +import operator + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args): + ############################################################################### + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + + utils.setup_standard_logging_options(parser) + + # Set command line options + parser.add_argument( + "-cime2file", + "--cime2file", + help="location of config_grid.xml file in CIME2 format", + required=True, + ) + parser.add_argument( + "-cime5file", + "--cime5file", + help="location of config_grids.xml file in CIME5 format", + required=True, + ) + + args = utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.cime2file, args.cime5file + + +class DataNode(object): + """ + non-demoninational dictionary of node data: + """ + + def __init__(self, xmlroot): + self.xmlroot = xmlroot # in case additional information needed + self.data = {} + self.name = None + self.xmlnode = None + + def keyvalue(self): + return self.data[self.key] + + +class GridNode(DataNode): + key = "lname" + + def __str__(self): + return ET.tostring(self.xmlnode) + + def to_cime5(self): + node = ET.Element("grid") + if "compset" in self.data and self.data["compset"] is not None: + node.set("compset", self.data["compset"]) + + for k in ["sname", "lname", "alias", "support"]: + if k in self.data and self.data[k] is not None: + ET.SubElement(node, k).text = self.data[k] + + return node + + def __eq__(self, other): + for k in ["lname", "sname", "compset", "alias"]: + if k not in self.data and k not in other.data: + continue + if k not in self.data or k not in other.data: + return False + if self.data[k] != other.data[k]: + return False + return True + + +class Cime2GridNode(GridNode): + def set_data(self, xmlnode): + self.xmlnode = xmlnode + if xmlnode.text is not None: + self.data["lname"] = xmlnode.text + for k in ["lname", "sname", "alias", "compset"]: + tmpval = xmlnode.get(k) + if tmpval is not None: + self.data[k] = tmpval.strip() + tmpval = xmlnode.get("support_level") + if tmpval is not None: + self.data["support"] = tmpval.strip() + + +class Cime5GridNode(GridNode): + def set_data(self, xmlnode): + self.xmlnode = xmlnode + for k in ["sname", "lname", "support", "alias"]: + if xmlnode.find(k) is not None: + self.data[k] = xmlnode.find(k).text.strip() + if xmlnode.get("compset") is not None: + self.data["compset"] = xmlnode.get("compset").strip() + + +class GridmapNode(DataNode): + def set_data(self, xmlnode): + self.keys = [] + self.data["maps"] = {} + self.xmlnode = xmlnode + for k in [ + "atm_grid", + "lnd_grid", + "ocn_grid", + "rof_grid", + "glc_grid", + "wav_grid", + "ice_grid", + "iac_grid", + ]: + att = xmlnode.get(k) + if att is not None: + self.data[k] = att.strip() + self.keys.append(k) + self.sort() + for child in xmlnode.getchildren(): + self.data["maps"][child.tag] = child.text.strip() + + def sort(self): + newlist = sorted(self.keys, key=operator.itemgetter(0)) + self.keys = newlist + + def to_cime5(self): + node = ET.Element("gridmap") + for k in ["atm_grid", "lnd_grid", "ocn_grid", "rof_grid", "glc_grid"]: + if k in self.data: + node.set(k, self.data[k]) + for key, value in self.data["maps"].items(): + ET.SubElement(node, key).text = value + return node + + def __str__(self): + return str(self.keyvalue()) + str(self.data) + + def __eq__(self, other): + if self.keyvalue() != other.keyvalue(): + return False + if len(self.data["maps"]) != len(other.data["maps"]): + return False + for key, value in self.data["maps"].items(): + if key not in other.data["maps"] or value != other.data["maps"][key]: + return False + return True + + def keyvalue(self): + return "{}:{}:{}:{}".format( + self.keys[0], self.data[self.keys[0]], self.keys[1], self.data[self.keys[1]] + ) + + +class DomainNode(DataNode): + """ + non-demoninational dictionary of domain node information: + """ + + key = "name" + + def to_cime5(self): + node = ET.Element("domain") + node.set("name", self.data["name"]) + for tag in ["nx", "ny", "desc", "support"]: + if tag in self.data: + ET.SubElement(node, tag).text = self.data[tag] + for fop in ["file", "path"]: + if fop in self.data: + for comp, mask, filename in self.data[fop]: + attribs = {"{}{}_mask".format(comp, mask)} + ET.SubElement(node, fop, attribs).text = filename + return node + + def sort(self): + for fop in ["file", "path"]: + newlist = sorted(self.data[fop], key=operator.itemgetter(0)) + self.data[fop] = newlist + + def __eq__(self, other): + # Check for different name, nx, or ny values + for k in ["name", "nx", "ny"]: + if k not in self.data and k not in other.data: + continue + if k not in self.data or k not in other.data: + return False + if self.data[k] != other.data[k]: + return False + # Compare (sorted) file, path lists for equality + for fop in ["file", "path"]: + if fop not in self.data and fop not in other.data: + contine + if fop not in self.data or fop not in other.data: + return False + if len(self.data[fop]) != len(other.data[fop]): + return False + + for i in range(0, len(self.data[fop])): + for j in range(0, 2): + if self.data[fop][i][j] != other.data[fop][i][j]: + return False + + return True + + def __str__(self): + return str(self.data) + + +class Cime2DomainNode(DomainNode): + """ + Read in a domain node from Cime2 xml format + """ + + def set_data(self, xmlnode): + self.xmlnode = xmlnode + self.data["name"] = xmlnode.get("name").strip() + self.data["file"] = [] + self.data["path"] = [] + for tag in ["nx", "ny", "desc"]: + child = xmlnode.find(tag) + if child is not None: + self.data[tag] = child.text + + # Find any griddom nodes that match this name + griddoms = self.xmlroot.findall( + '.griddom[@grid="{}"]'.format(self.data["name"]) + ) + for gd in griddoms: + mask = gd.get("mask") + for comp in ["ATM", "LND", "OCN", "ICE"]: + for fop in ["FILE", "PATH"]: + tag = "{}_DOMAIN_{}".format(comp, fop) + n = gd.find(tag) + if n is not None: + self.data[fop.lower()].append([comp.lower(), mask, n.text]) + # sort the file and path entries + self.sort() + + +class Cime5DomainNode(DomainNode): + """ + Read in a domain node from Cime5 xml format + """ + + def set_data(self, xmlnode): + self.xmlnode = xmlnode + self.data["name"] = xmlnode.get("name") + self.data["file"] = [] + self.data["path"] = [] + for tag in ["nx", "ny", "desc", "support"]: + child = xmlnode.find(tag) + if child is not None: + self.data[tag] = child.text + for comp in ["lnd", "atm", "ocn", "ice"]: + masktag = "{}_mask".format(comp) + for fop in ["file", "path"]: + fopnodes = xmlnode.findall("{}[@{}]".format(fop, masktag)) + for n in fopnodes: + mask = n.get(masktag) + filename = n.text.strip() + self.data[fop].append([comp, mask, filename]) + + # sort the file and path entries + self.sort() + + +class DataTree(object): + def __init__(self, xmlfilename): + self.xmlfilename = xmlfilename + + if hasattr(xmlfilename, "read") or os.access(xmlfilename, os.R_OK): + self.doc = ET.parse(xmlfilename) + else: + self.doc = ET.ElementTree() + + self.root = self.doc.getroot() + self.index = 0 + self.n = 0 + self.nodes = [] + self.populate() + self._xmllint = which(xmllint) + + def next(self): + if self.index >= len(self.nodes): + self.index = 0 + raise StopIteration + if self.index < len(self.nodes): + self.index += 1 + return self.nodes[self.index - 1] + + def __iter__(self): + return self + + def postprocess(self, fixlist, addlist, newxmlfile, currentxmlfile, badxmlfile): + if len(addlist) > 0: + logger.info("\n\nWriting suggested nodes to {}".format(newxmlfile)) + logger.info("Copy 'grid' nodes into corresponding location in") + logger.info(currentxmlfile) + self.writexml(addlist, newxmlfile) + self.writexml(fixlist, badxmlfile) + if len(fixlist) > 0: + logger.info("Some nodes should be removed from") + logger.info("config/acme/config_grids.xml. These nodes") + logger.info("have been written to {}".format(badxmlfile)) + + +class GridTree(DataTree): + def populate(self): + if self.root is None: + return + xmlnodes = self.root.findall("GRID") + nodeclass = Cime2GridNode + if len(xmlnodes) == 0: + xmlnodes = self.root.findall("./grids/grid") + nodeclass = Cime5GridNode + + for xmlnode in xmlnodes: + datanode = nodeclass(self.root) + datanode.set_data(xmlnode) + self.nodes.append(datanode) + + def writexml(self, addlist, newfilename): + root = ET.Element("grid_data") + grids = ET.SubElement(root, "grids") + for a, b in addlist: + if b is not None: + grids.append(ET.Element("REPLACE")) + grids.append(b.to_cime5()) + grids.append(ET.Element("WITH")) + + if a is not None: + grids.append(a.to_cime5()) + if self._xmllint is not None: + run_cmd_no_fail( + "{} --format --output {} -".format(self._xmllint, newfilename), + input_str=ET.tostring(root), + ) + + +class DomainTree(DataTree): + def populate(self): + if self.root is None: + return + + xmlnodes = self.root.findall("gridhorz") + nodeclass = Cime2DomainNode + if len(xmlnodes) == 0: + xmlnodes = self.root.findall("./domains/domain") + nodeclass = Cime5DomainNode + + for node in xmlnodes: + datanode = nodeclass(self.root) + datanode.set_data(node) + self.nodes.append(datanode) + + def writexml(self, addlist, newfilename): + root = ET.Element("grid_data") + domains = ET.SubElement(root, "domains") + for a, b in addlist: + if b is not None: + domains.append(ET.Element("REPLACE")) + domains.append(b.to_cime5()) + domains.append(ET.Element("WITH")) + if a is not None: + domains.append(a.to_cime5()) + + if self._xmllint is not None: + run_cmd_no_fail( + "{} --format --output {} -".format(self._xmllint, newfilename), + input_str=ET.tostring(root), + ) + + +class GridmapTree(DataTree): + def populate(self): + if self.root is None: + return + xmlnodes = self.root.findall("gridmap") + if len(xmlnodes) == 0: + xmlnodes = self.root.findall("./gridmaps/gridmap") + for xmlnode in xmlnodes: + datanode = GridmapNode(self.root) + datanode.set_data(xmlnode) + self.nodes.append(datanode) + + def writexml(self, addlist, newfilename): + root = ET.Element("gridmaps") + gridmaps = ET.SubElement(root, "gridmap") + for a, b in addlist: + if b is not None: + gridmaps.append(ET.Element("REPLACE")) + gridmaps.append(b.to_cime5()) + gridmaps.append(ET.Element("WITH")) + if a is not None: + gridmaps.append(a.to_cime5()) + if self._xmllint is not None: + run_cmd_no_fail( + "{} --format --output {} -".format(self._xmllint, newfilename), + input_str=ET.tostring(root), + ) + + +def diff_tree(atree, btree): + afound = [] + bfound = [] + oklist = [] + fixlist = [] + addlist = [] + duplist = [] + bkeys = [] + for bnode in btree.nodes: + if bnode.keyvalue() in bkeys: + duplist.append(bnode.keyvalue()) + else: + bkeys.append(bnode.keyvalue()) + + for anode in atree.nodes: + for bnode in btree.nodes: + if bnode in bfound: + continue + if anode.keyvalue() == bnode.keyvalue(): + afound.append(anode) + bfound.append(bnode) + + if anode == bnode: + oklist.append([anode, bnode]) + else: + fixlist.append([anode, bnode]) + break + + if anode in afound: + continue + + addlist.append([anode, None]) + + logger.info("Number of ok nodes: {:d}".format(len(oklist))) + logger.info("Number of wrong nodes: {:d}".format(len(fixlist))) + logger.info("Number of missing nodes: {:d}".format(len(addlist))) + logger.info("Number of duplicate nodes: {:d}".format(len(duplist))) + for dup in duplist: + logger.info(dup) + return [oklist, fixlist, addlist] + + +def grid_compare(): + cime2file, cime5file = parse_command_line(sys.argv) + + cime2gridtree = GridTree(cime2file) + cime5gridtree = GridTree(cime5file) + cime2domaintree = DomainTree(cime2file) + cime5domaintree = DomainTree(cime5file) + cime2gridmaptree = GridmapTree(cime2file) + cime5gridmaptree = GridmapTree(cime5file) + + logger.info("Comparing grid nodes...") + oklist, fixlist, addlist = diff_tree(cime2gridtree, cime5gridtree) + cime5gridtree.postprocess( + fixlist, addlist, "tempgrid.xml", cime5file, "badgrid.xml" + ) + + oklist, fixlist, addlist = diff_tree(cime2domaintree, cime5domaintree) + cime5domaintree.postprocess( + fixlist, addlist, "tempdomain.xml", cime5file, "baddomain.xml" + ) + + oklist, fixlist, addlist = diff_tree(cime2gridmaptree, cime5gridmaptree) + cime5gridmaptree.postprocess( + fixlist, addlist, "tempgridmap.xml", cime5file, "badgridmap.xml" + ) + + +if __name__ == "__main__": + grid_compare() diff --git a/CIME/Tools/xmlquery b/CIME/Tools/xmlquery new file mode 100755 index 00000000000..25ca0a2ea2f --- /dev/null +++ b/CIME/Tools/xmlquery @@ -0,0 +1,538 @@ +#!/usr/bin/env python3 + +""" +Allows querying variables from env_*xml files and listing all available variables. + +There are two usage modes: + +1) Querying variables: + + - You can query a variable, or a list of variables via + ./xmlquery var1 + + or, for multiple variables (either comma or space separated) + ./xmlquery var1,var2,var3 .... + ./xmlquery var1 var2 var3 .... + where var1, var2 and var3 are variables that appear in a CIME case xml file + + Several xml variables that have settings for each component have somewhat special treatment + The variables that this currently applies to are + NTASKS, NTHRDS, ROOTPE, PIO_TYPENAME, PIO_STRIDE, PIO_NUMTASKS + As examples: + - to show the number of tasks for each component, issue + ./xmlquery NTASKS + - to show the number of tasks just for the atm component, issue + ./xmlquery NTASKS_ATM + + - The CIME case xml variables are grouped together in xml elements . + This is done to associate together xml variables with common features. + Most variables are only associated with one group. However, in env_batch.xml, + there are also xml variables that are associated with each potential batch job. + For these variables, the '--subgroup' option may be used to query the variable's + value for a particular group. + + As an example, in env_batch.xml, the xml variable JOB_QUEUE appears in each of + the batch job groups (defined in config_batch.xml): + + + + + To query the variable JOB_QUEUE only for one group in case.run, you need + to specify a sub-group argument to xmlquery. + ./xmlquery JOB_QUEUE --subgroup case.run + JOB_QUEUE: regular + ./xmlquery JOB_QUEUE + Results in group case.run + JOB_QUEUE: regular + Results in group case.st_archive + JOB_QUEUE: caldera + Results in group case.test + JOB_QUEUE: regular + + - You can tailor the query by adding ONE of the following possible qualifier arguments: + [--full --fileonly --value --raw --description --get-group --type --valid-values ] + as examples: + ./xmlquery var1,var2 --full + ./xmlquery var1,var2 --fileonly + + - You can query variables via a partial-match, using --partial-match or -p + as examples: + ./xmlquery STOP --partial-match + Results in group run_begin_stop_restart + STOP_DATE: -999 + STOP_N: 5 + STOP_OPTION: ndays + ./xmlquery STOP_N + STOP_N: 5 + + - By default variable values are resolved prior to output. If you want to see the unresolved + value(s), use the --no-resolve qualifier + as examples: + ./xmlquery RUNDIR + RUNDIR: /glade/scratch/mvertens/atest/run + ./xmlquery RUNDIR --no-resolve + RUNDIR: $CIME_OUTPUT_ROOT/$CASE/run + +2) Listing all groups and variables in those groups + + ./xmlquery --listall + + - You can list a subset of variables by adding one of the following qualifier arguments: + [--subgroup GROUP --file FILE] + + As examples: + + If you want to see the all of the variables in group 'case.run' issue + ./xmlquery --listall --subgroup case.run + + If you want to see all of the variables in 'env_run.xml' issue + ./xmlquery --listall --file env_run.xml + + If you want to see all of the variables in LockedFiles/env_build.xml issue + ./xmlquery --listall --file LockedFiles/env_build.xml + + - You can tailor the query by adding ONE of the following possible qualifier arguments: + [--full --fileonly --raw --description --get-group --type --valid-values] + + - The env_mach_specific.xml and env_archive.xml files are not supported by this tool. +""" + +from standard_script_setup import * + +from CIME.case import Case +from CIME.utils import expect, convert_to_string + +import textwrap, sys, re + +logger = logging.getLogger("xmlquery") +unsupported_files = ["env_mach_specific.xml", "env_archive.xml"] +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + # Set command line options + parser.add_argument( + "variables", + nargs="*", + help="Variable name(s) to query from env_*.xml file(s)\n" + "( 'variable_name' from value ).\n" + "Multiple variables can be given, separated by commas or spaces.\n", + ) + + parser.add_argument( + "--caseroot", + "-caseroot", + default=os.getcwd(), + help="Case directory to reference.\n" "Default is current directory.", + ) + + parser.add_argument( + "--listall", + "-listall", + default=False, + action="store_true", + help="List all variables and their values.", + ) + + parser.add_argument( + "--file", + "-file", + help="The file you want to query. If not given, queries all files.\n" + "Typically used with the --listall option.", + ) + + parser.add_argument("--subgroup", "-subgroup", help="Apply to this subgroup only.") + + parser.add_argument( + "-p", + "--partial-match", + action="store_true", + help="Allow partial matches of variable names, treats args as regex.", + ) + + parser.add_argument( + "--no-resolve", + "-no-resolve", + action="store_true", + help="Do not resolve variable values.", + ) + + parser.add_argument( + "-N", + "--non-local", + action="store_true", + help="Use when you've requested a machine that you aren't on. " + "Will reduce errors for missing directories etc.", + ) + + group = parser.add_mutually_exclusive_group() + + group.add_argument( + "--full", + default=False, + action="store_true", + help="Print a full listing for each variable, including value, type,\n" + "valid values, description and file.", + ) + + group.add_argument( + "--fileonly", + "-fileonly", + default=False, + action="store_true", + help="Only print the filename that each variable is defined in.", + ) + + group.add_argument( + "--value", + "-value", + default=False, + action="store_true", + help="Only print one value without newline character.\n" + "If more than one has been found print first value in list.", + ) + + group.add_argument( + "--raw", + default=False, + action="store_true", + help="Print the complete raw record associated with each variable.", + ) + + group.add_argument( + "--description", + default=False, + action="store_true", + help="Print the description associated with each variable.", + ) + + group.add_argument( + "--get-group", + default=False, + action="store_true", + help="Print the group associated with each variable.", + ) + + group.add_argument( + "--type", + default=False, + action="store_true", + help="Print the data type associated with each variable.", + ) + + group.add_argument( + "--valid-values", + default=False, + action="store_true", + help="Print the valid values associated with each variable, if defined.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + if len(sys.argv) == 1: + parser.print_help() + exit() + + if len(args.variables) == 1: + variables = args.variables[0].split(",") + else: + variables = args.variables + + return ( + variables, + args.subgroup, + args.caseroot, + args.listall, + args.fileonly, + args.value, + args.no_resolve, + args.raw, + args.description, + args.get_group, + args.full, + args.type, + args.valid_values, + args.partial_match, + args.file, + args.non_local, + ) + + +def get_value_as_string(case, var, attribute=None, resolved=False, subgroup=None): + if var in [ + "THREAD_COUNT", + "TOTAL_TASKS", + "TASKS_PER_NODE", + "NUM_NODES", + "SPARE_NODES", + "TASKS_PER_NUMA", + "CORES_PER_TASK", + "NGPUS_PER_NODE", + ]: + value = str(getattr(case, var.lower())) + else: + thistype = case.get_type_info(var) + value = case.get_value( + var, attribute=attribute, resolved=resolved, subgroup=subgroup + ) + if value is not None and thistype: + value = convert_to_string(value, thistype, var) + + return value + + +def xmlquery_sub( + case, + variables, + subgroup=None, + fileonly=False, + resolved=True, + raw=False, + description=False, + get_group=False, + full=False, + dtype=False, + valid_values=False, + xmlfile=None, +): + """ + Return list of attributes and their values, print formatted + + """ + results = {} + comp_classes = case.get_values("COMP_CLASSES") + if xmlfile: + case.set_file(xmlfile) + + # Loop over variables + for var in variables: + if subgroup is not None: + groups = [subgroup] + else: + groups = case.get_record_fields(var, "group") + if not groups: + groups = ["none"] + + if xmlfile: + expect( + xmlfile not in unsupported_files, + "XML file {} is unsupported by this tool.".format(xmlfile), + ) + + if not groups: + value = case.get_value(var, resolved=resolved) + results["none"] = {} + results["none"][var] = {} + results["none"][var]["value"] = value + elif not groups: + results["none"] = {} + results["none"][var] = {} + + for group in groups: + if not group in results: + results[group] = {} + if not var in results[group]: + results[group][var] = {} + + expect(group, "No group found for var {}".format(var)) + if get_group: + results[group][var]["get_group"] = group + + value = get_value_as_string(case, var, resolved=resolved, subgroup=group) + if value is None: + var, comp, iscompvar = case.check_if_comp_var(var) + if iscompvar: + value = [] + for comp in comp_classes: + try: + nextval = get_value_as_string( + case, + var, + attribute={"compclass": comp}, + resolved=resolved, + subgroup=group, + ) + except Exception: # probably want to be more specific + nextval = get_value_as_string( + case, + var, + attribute={"compclass": comp}, + resolved=False, + subgroup=group, + ) + + if nextval is not None: + value.append(comp + ":" + "{}".format(nextval)) + else: + value = get_value_as_string( + case, var, resolved=resolved, subgroup=group + ) + + if value is None: + if xmlfile: + expect( + False, + " No results found for variable {} in file {}".format( + var, xmlfile + ), + ) + else: + expect(False, " No results found for variable {}".format(var)) + + results[group][var]["value"] = value + + if raw: + results[group][var]["raw"] = case.get_record_fields(var, "raw") + if description or full: + results[group][var]["desc"] = case.get_record_fields(var, "desc") + if fileonly or full: + results[group][var]["file"] = case.get_record_fields(var, "file") + if dtype or full: + results[group][var]["type"] = case.get_type_info(var) + if valid_values or full: + results[group][var]["valid_values"] = case.get_record_fields( + var, "valid_values" + ) # *** this is the problem *** + + return results + + +def _main_func(description): + # Initialize command line parser and get command line options + ( + variables, + subgroup, + caseroot, + listall, + fileonly, + value, + no_resolve, + raw, + description, + get_group, + full, + dtype, + valid_values, + partial_match, + xmlfile, + non_local, + ) = parse_command_line(sys.argv, description) + + expect( + xmlfile not in unsupported_files, + "XML file {} is unsupported by this tool.".format(xmlfile), + ) + + # Initialize case ; read in all xml files from caseroot + with Case(caseroot, non_local=non_local) as case: + if listall or partial_match: + if xmlfile: + case.set_file(xmlfile) + all_variables = sorted(case.get_record_fields(None, "varid")) + logger.debug("all_variables: {}".format(all_variables)) + if partial_match: + all_matching_vars = [] + for variable in variables: + regex = re.compile(variable) + for all_variable in all_variables: + if regex.search(all_variable): + if subgroup is not None: + vargroups = case.get_record_fields( + all_variable, "group" + ) + if subgroup not in vargroups: + continue + + all_matching_vars.append(all_variable) + + variables = all_matching_vars + else: + if subgroup is not None: + all_matching_vars = [] + for all_variable in all_variables: + vargroups = case.get_record_fields(all_variable, "group") + if subgroup not in vargroups: + continue + else: + all_matching_vars.append(all_variable) + + variables = all_matching_vars + else: + variables = all_variables + expect(variables, "No variables found") + results = xmlquery_sub( + case, + variables, + subgroup, + fileonly, + resolved=not no_resolve, + raw=raw, + description=description, + get_group=get_group, + full=full, + dtype=dtype, + valid_values=valid_values, + xmlfile=xmlfile, + ) + + if full or description: + wrapper = textwrap.TextWrapper() + wrapper.subsequent_indent = "\t\t\t" + wrapper.fix_sentence_endings = True + + cnt = 0 + for group in sorted(iter(results)): + if ( + (len(variables) > 1 or len(results) > 1 or full) + and not get_group + and not value + ): + print("\nResults in group {}".format(group)) + for var in variables: + if var in results[group]: + if raw: + print(results[group][var]["raw"]) + elif get_group: + print("\t{}: {}".format(var, results[group][var]["get_group"])) + elif value: + if cnt > 0: + sys.stdout.write(",") + sys.stdout.write("{}".format(results[group][var]["value"])) + cnt += 1 + elif description: + if results[group][var]["desc"][0] is not None: + desc_text = " ".join(results[group][var]["desc"][0].split()) + print("\t{}: {}".format(var, wrapper.fill(desc_text))) + elif fileonly: + print("\t{}: {}".format(var, results[group][var]["file"])) + elif dtype: + print("\t{}: {}".format(var, results[group][var]["type"])) + elif valid_values: + if "valid_values" in results[group][var]: + print( + "\t{}: {}".format(var, results[group][var]["valid_values"]) + ) + elif full: + if results[group][var]["desc"][0] is not None: + desc_text = " ".join(results[group][var]["desc"][0].split()) + print("\t{}: value={}".format(var, results[group][var]["value"])) + print("\t\ttype: {}".format(results[group][var]["type"][0])) + if "valid_values" in results[group][var]: + print( + "\t\tvalid_values: {}".format( + results[group][var]["valid_values"] + ) + ) + print("\t\tdescription: {}".format(wrapper.fill(desc_text))) + print("\t\tfile: {}".format(results[group][var]["file"][0])) + else: + print("\t{}: {}".format(var, results[group][var]["value"])) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/scripts/Tools/xmltestentry b/CIME/Tools/xmltestentry similarity index 99% rename from scripts/Tools/xmltestentry rename to CIME/Tools/xmltestentry index d4dcf414818..025f13cf491 100755 --- a/scripts/Tools/xmltestentry +++ b/CIME/Tools/xmltestentry @@ -89,4 +89,3 @@ if ($check_num > 0) { print FILE " \n"; close (FILE); } - diff --git a/scripts/lib/CIME/XML/__init__.py b/CIME/XML/__init__.py similarity index 100% rename from scripts/lib/CIME/XML/__init__.py rename to CIME/XML/__init__.py diff --git a/CIME/XML/archive.py b/CIME/XML/archive.py new file mode 100644 index 00000000000..e9d13eae686 --- /dev/null +++ b/CIME/XML/archive.py @@ -0,0 +1,92 @@ +""" +Interface to the archive.xml file. This class inherits from GenericXML.py +""" + +from CIME.XML.standard_module_setup import * +from CIME.config import Config +from CIME.XML.archive_base import ArchiveBase +from CIME.XML.files import Files +from copy import deepcopy + +logger = logging.getLogger(__name__) + + +class Archive(ArchiveBase): + def __init__(self, infile=None, files=None): + """ + initialize an object + """ + if files is None: + files = Files() + schema = files.get_schema("ARCHIVE_SPEC_FILE") + super(Archive, self).__init__(infile, schema) + + def setup(self, env_archive, components, files=None): + if files is None: + files = Files() + + components_node = env_archive.make_child( + "components", attributes={"version": "2.0"} + ) + + arch_components = deepcopy(components) + + config = Config.instance() + + for comp in config.additional_archive_components: + if comp not in arch_components: + arch_components.append(comp) + + for comp in arch_components: + infile = files.get_value("ARCHIVE_SPEC_FILE", {"component": comp}) + + if infile is not None and os.path.isfile(infile): + arch = Archive(infile=infile, files=files) + specs = arch.get_optional_child( + name="comp_archive_spec", attributes={"compname": comp} + ) + else: + if infile is None: + logger.debug( + "No archive file defined for component {}".format(comp) + ) + else: + logger.debug( + "Archive file {} for component {} not found".format( + infile, comp + ) + ) + + specs = self.get_optional_child( + name="comp_archive_spec", attributes={"compname": comp} + ) + + if specs is None: + logger.debug("No archive specs found for component {}".format(comp)) + else: + logger.debug("adding archive spec for {}".format(comp)) + env_archive.add_child(specs, root=components_node) + + def get_all_config_archive_files(self, files): + """ + Returns the list of ARCHIVE_SPEC_FILES that exist on disk as defined in config_files.xml + """ + archive_spec_node = files.get_child("entry", {"id": "ARCHIVE_SPEC_FILE"}) + component_nodes = files.get_children( + "value", root=files.get_child("values", root=archive_spec_node) + ) + config_archive_files = [] + for comp in component_nodes: + attr = self.get(comp, "component") + if attr: + compval = files.get_value( + "ARCHIVE_SPEC_FILE", attribute={"component": attr} + ) + else: + compval = self.get_resolved_value(self.text(comp)) + + if os.path.isfile(compval): + config_archive_files.append(compval) + + config_archive_files = list(set(config_archive_files)) + return config_archive_files diff --git a/CIME/XML/archive_base.py b/CIME/XML/archive_base.py new file mode 100644 index 00000000000..ff8c096ce9f --- /dev/null +++ b/CIME/XML/archive_base.py @@ -0,0 +1,265 @@ +""" +Base class for archive files. This class inherits from generic_xml.py +""" +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +from CIME.utils import convert_to_type + +logger = logging.getLogger(__name__) + + +class ArchiveBase(GenericXML): + def exclude_testing(self, compname): + """ + Checks if component should be excluded from testing. + """ + value = self._get_attribute(compname, "exclude_testing") + + if value is None: + return False + + return convert_to_type(value, "logical") + + def _get_attribute(self, compname, attr_name): + attrib = self.get_entry_attributes(compname) + + if attrib is None: + return None + + return attrib.get(attr_name, None) + + def get_entry_attributes(self, compname): + entry = self.get_entry(compname) + + if entry is None: + return None + + return self.attrib(entry) + + def get_entry(self, compname): + """ + Returns an xml node corresponding to compname in comp_archive_spec + """ + return self.scan_optional_child( + "comp_archive_spec", attributes={"compname": compname} + ) + + def _get_file_node_text(self, attnames, archive_entry): + """ + get the xml text associated with each of the attnames + based at root archive_entry + returns a list of text entries or + an empty list if no entries are found + """ + nodes = [] + textvals = [] + for attname in attnames: + nodes.extend(self.get_children(attname, root=archive_entry)) + for node in nodes: + textvals.append(self.text(node)) + return textvals + + def get_rest_file_extensions(self, archive_entry): + """ + get the xml text associated with each of the rest_file_extensions + based at root archive_entry (root is based on component name) + returns a list of text entries or + an empty list if no entries are found + """ + return self._get_file_node_text(["rest_file_extension"], archive_entry) + + def get_hist_file_extensions(self, archive_entry): + """ + get the xml text associated with each of the hist_file_extensions + based at root archive_entry (root is based on component name) + returns a list of text entries or + an empty list if no entries are found + """ + return self._get_file_node_text(["hist_file_extension"], archive_entry) + + def get_hist_file_ext_regexes(self, archive_entry): + """ + get the xml text associated with each of the hist_file_ext_regex entries + based at root archive_entry (root is based on component name) + returns a list of text entries or + an empty list if no entries are found + """ + return self._get_file_node_text(["hist_file_ext_regex"], archive_entry) + + def get_entry_value(self, name, archive_entry): + """ + get the xml text associated with name under root archive_entry + returns None if no entry is found, expects only one entry + """ + node = self.get_optional_child(name, root=archive_entry) + if node is not None: + return self.text(node) + return None + + def get_latest_hist_files( + self, casename, model, from_dir, suffix="", ref_case=None + ): + """ + get the most recent history files in directory from_dir with suffix if provided + """ + test_hists = self.get_all_hist_files( + casename, model, from_dir, suffix=suffix, ref_case=ref_case + ) + ext_regexes = self.get_hist_file_ext_regexes( + self.get_entry(self._get_compname(model)) + ) + latest_files = {} + histlist = [] + for hist in test_hists: + ext = _get_extension(model, hist, ext_regexes) + latest_files[ext] = hist + + for key in latest_files.keys(): + histlist.append(latest_files[key]) + return histlist + + def get_all_hist_files(self, casename, model, from_dir, suffix="", ref_case=None): + """ + gets all history files in directory from_dir with suffix (if provided) + ignores files with ref_case in the name if ref_case is provided + """ + dmodel = self._get_compname(model) + # remove when component name is changed + if model == "fv3gfs": + model = "fv3" + + hist_files = [] + extensions = self.get_hist_file_extensions(self.get_entry(dmodel)) + if suffix and len(suffix) > 0: + has_suffix = True + else: + has_suffix = False + + # Strip any trailing $ if suffix is present and add it back after the suffix + for ext in extensions: + if ext.endswith("$") and has_suffix: + ext = ext[:-1] + string = model + r"\d?_?(\d{4})?(_d\d{2})?\." + ext + if has_suffix: + if not suffix in string: + string += r"\." + suffix + "$" + + if not string.endswith("$"): + string += "$" + + logger.debug("Regex is {}".format(string)) + pfile = re.compile(string) + hist_files.extend( + [ + f + for f in os.listdir(from_dir) + if pfile.search(f) + and ( + (f.startswith(casename) or f.startswith(model)) + and not f.endswith("cprnc.out") + ) + ] + ) + + if ref_case: + expect( + ref_case not in casename, + "ERROR: ref_case name {} conflicts with casename {}".format( + ref_case, casename + ), + ) + hist_files = [ + h for h in hist_files if not (ref_case in os.path.basename(h)) + ] + + hist_files = list(set(hist_files)) + hist_files.sort() + logger.debug( + "get_all_hist_files returns {} for model {}".format(hist_files, model) + ) + + return hist_files + + @staticmethod + def _get_compname(model): + """ + Given a model name, return a possibly-modified name for use as the compname argument + to get_entry + """ + if model == "cpl": + return "drv" + return model + + +def _get_extension(model, filepath, ext_regexes): + r""" + For a hist file for the given model, return what we call the "extension" + + model - The component model + filepath - The path of the hist file + ext_regexes - A list of model-specific regexes that are matched before falling back on + the general-purpose regex, r'\w+'. In many cases this will be an empty list, + signifying that we should just use the general-purpose regex. + + >>> _get_extension("cpl", "cpl.hi.nc", []) + 'hi' + >>> _get_extension("cpl", "cpl.h.nc", []) + 'h' + >>> _get_extension("cpl", "cpl.h1.nc.base", []) + 'h1' + >>> _get_extension("cpl", "TESTRUNDIFF.cpl.hi.0.nc.base", []) + 'hi' + >>> _get_extension("cpl", "TESTRUNDIFF_Mmpi-serial.f19_g16_rx1.A.melvin_gnu.C.fake_testing_only_20160816_164150-20160816_164240.cpl.h.nc", []) + 'h' + >>> _get_extension("clm","clm2_0002.h0.1850-01-06-00000.nc", []) + '0002.h0' + >>> _get_extension("pop","PFS.f09_g16.B1850.cheyenne_intel.allactive-default.GC.c2_0_b1f2_int.pop.h.ecosys.nday1.0001-01-02.nc", []) + 'h' + >>> _get_extension("mom", "ga0xnw.mom6.frc._0001_001.nc", []) + 'frc' + >>> _get_extension("mom", "ga0xnw.mom6.sfc.day._0001_001.nc", []) + 'sfc.day' + >>> _get_extension("mom", "bixmc5.mom6.prog._0001_01_05_84600.nc", []) + 'prog' + >>> _get_extension("mom", "bixmc5.mom6.hm._0001_01_03_42300.nc", []) + 'hm' + >>> _get_extension("mom", "bixmc5.mom6.hmz._0001_01_03_42300.nc", []) + 'hmz' + >>> _get_extension("pop", "casename.pop.dd.0001-01-02-00000", []) + 'dd' + >>> _get_extension("cism", "casename.cism.gris.h.0002-01-01-0000.nc", [r"\w+\.\w+"]) + 'gris.h' + """ + # Remove with component namechange + if model == "fv3gfs": + model = "fv3" + + basename = os.path.basename(filepath) + m = None + if ext_regexes is None: + ext_regexes = [] + + # First add any model-specific extension regexes; these will be checked before the + # general regex + if model == "mom": + # Need to check 'sfc.day' specially: the embedded '.' messes up the + # general-purpose regex + ext_regexes.append(r"sfc\.day") + + # Now add the general-purpose extension regex + ext_regexes.append(r"\w+") + + for ext_regex in ext_regexes: + full_regex_str = model + r"\d?_?(\d{4})?\.(" + ext_regex + r")[-\w\.]*" + full_regex = re.compile(full_regex_str) + m = full_regex.search(basename) + if m is not None: + if m.group(1) is not None: + result = m.group(1) + "." + m.group(2) + else: + result = m.group(2) + return result + + expect(m, "Failed to get extension for file '{}'".format(filepath)) + + return result diff --git a/CIME/XML/batch.py b/CIME/XML/batch.py new file mode 100644 index 00000000000..75d9b1cfb94 --- /dev/null +++ b/CIME/XML/batch.py @@ -0,0 +1,166 @@ +""" +Interface to the config_batch.xml file. This class inherits from GenericXML.py + +The batch_system type="foo" blocks define most things. Machine-specific overrides +can be defined by providing a batch_system MACH="mach" block. +""" +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +from CIME.XML.files import Files +from CIME.utils import expect + +logger = logging.getLogger(__name__) + + +class Batch(GenericXML): + def __init__( + self, + batch_system=None, + machine=None, + infile=None, + files=None, + extra_machines_dir=None, + ): + """ + initialize an object + + If extra_machines_dir is provided, it should be a string giving a path to an + additional directory that will be searched for a config_batch.xml file; if + found, the contents of this file will be appended to the standard + config_batch.xml. An empty string is treated the same as None. + """ + if files is None: + files = Files() + if infile is None: + infile = files.get_value("BATCH_SPEC_FILE") + + config_dir = os.path.dirname(infile) + + schema = files.get_schema("BATCH_SPEC_FILE") + + GenericXML.__init__(self, infile, schema=schema) + + self.batch_system_node = None + self.machine_node = None + self.batch_system = batch_system + self.machine = machine + + # Append the contents of $HOME/.cime/config_batch.xml if it exists. + # + # Also append the contents of a config_batch.xml file in the directory given by + # extra_machines_dir, if present. + # + # This could cause problems if node matches are repeated when only one is expected. + infile = os.path.join(os.environ.get("HOME"), ".cime", "config_batch.xml") + usehome = False + if os.path.exists(infile): + GenericXML.read(self, infile) + usehome = True + useextra = False + if extra_machines_dir: + infile = os.path.join(extra_machines_dir, "config_batch.xml") + if os.path.exists(infile): + GenericXML.read(self, infile) + useextra = True + if not usehome and not useextra: + batchfile = os.path.join(config_dir, self.machine, "config_batch.xml") + if os.path.exists(batchfile): + GenericXML.read(self, batchfile) + + if self.batch_system is not None: + self.set_batch_system(self.batch_system, machine=machine) + + def get_batch_system(self): + """ + Return the name of the batch system + """ + return self.batch_system + + def get_optional_batch_node(self, nodename, attributes=None): + """ + Return data on a node for a batch system + """ + expect( + self.batch_system_node is not None, + "Batch system not set, use parent get_node?", + ) + + if self.machine_node is not None: + result = self.get_optional_child( + nodename, attributes, root=self.machine_node + ) + if result is None: + return self.get_optional_child( + nodename, attributes, root=self.batch_system_node + ) + else: + return result + else: + return self.get_optional_child( + nodename, attributes, root=self.batch_system_node + ) + + def set_batch_system(self, batch_system, machine=None): + """ + Sets the batch system block in the Batch object + """ + machine = machine if machine is not None else self.machine + if self.batch_system != batch_system or self.batch_system_node is None: + nodes = self.get_children("batch_system", {"type": batch_system}) + for node in nodes: + mach = self.get(node, "MACH") + if mach is None: + self.batch_system_node = node + elif mach == machine: + self.machine = machine + self.machine_node = node + + expect( + self.batch_system_node is not None, + "No batch system '{}' found".format(batch_system), + ) + + return batch_system + + # pylint: disable=arguments-differ + def get_value(self, name, attribute=None, resolved=True, subgroup=None): + """ + Get Value of fields in the config_batch.xml file + """ + expect( + self.batch_system_node is not None, + "Batch object has no batch system defined", + ) + expect(subgroup is None, "This class does not support subgroups") + value = None + + node = self.get_optional_batch_node(name) + if node is not None: + value = self.text(node) + + if resolved: + if value is not None: + value = self.get_resolved_value(value) + elif name in os.environ: + value = os.environ[name] + + return value + + def get_batch_jobs(self): + """ + Return a list of jobs with the first element the name of the case script + and the second a dict of qualifiers for the job + """ + jobs = [] + bnode = self.get_optional_child("batch_jobs") + if bnode: + for jnode in self.get_children(root=bnode): + if self.name(jnode) == "job": + name = self.get(jnode, "name") + jdict = {} + for child in self.get_children(root=jnode): + jdict[self.name(child)] = self.text(child) + + jobs.append((name, jdict)) + + return jobs diff --git a/CIME/XML/component.py b/CIME/XML/component.py new file mode 100644 index 00000000000..abd09c86fd0 --- /dev/null +++ b/CIME/XML/component.py @@ -0,0 +1,366 @@ +""" +Interface to the config_component.xml files. This class inherits from EntryID.py +""" +from CIME.XML.standard_module_setup import * + +from CIME.XML.entry_id import EntryID +from CIME.XML.files import Files +from CIME.utils import get_cime_root + +logger = logging.getLogger(__name__) + + +class Component(EntryID): + def __init__(self, infile, comp_class): + """ + initialize a Component obect from the component xml file in infile + associate the component class with comp_class if provided. + """ + self._comp_class = comp_class + if infile == "testingonly": + self.filename = infile + return + files = Files() + schema = None + EntryID.__init__(self, infile) + schema = files.get_schema( + "CONFIG_{}_FILE".format(comp_class), + attributes={"version": "{}".format(self.get_version())}, + ) + + if schema is not None: + self.validate_xml_file(infile, schema) + + # pylint: disable=arguments-differ + def get_value(self, name, attribute=None, resolved=False, subgroup=None): + expect(subgroup is None, "This class does not support subgroups") + return EntryID.get_value(self, name, attribute, resolved) + + def get_valid_model_components(self): + """ + return a list of all possible valid generic (e.g. atm, clm, ...) model components + from the entries in the model CONFIG_CPL_FILE + """ + components = [] + comps_node = self.get_child("entry", {"id": "COMP_CLASSES"}) + comps = self.get_default_value(comps_node) + components = comps.split(",") + return components + + def _get_value_match(self, node, attributes=None, exact_match=False): + """ + return the best match for the node entries + Note that a component object uses a different matching algorithm than an entryid object + For a component object the _get_value_match used is below and is not the one in entry_id.py + """ + match_value = None + match_max = 0 + match_count = 0 + match_values = [] + expect(not exact_match, " exact_match not implemented in this method") + expect(node is not None, " Empty node in _get_value_match") + values = self.get_optional_child("values", root=node) + if values is None: + return + + # determine match_type if there is a tie + # ASSUME a default of "last" if "match" attribute is not there + match_type = self.get(values, "match", default="last") + + # use the default_value if present + val_node = self.get_optional_child("default_value", root=node) + if val_node is None: + logger.debug("No default_value for {}".format(self.get(node, "id"))) + return val_node + value = self.text(val_node) + if value is not None and len(value) > 0 and value != "UNSET": + match_values.append(value) + + for valnode in self.get_children("value", root=values): + # loop through all the keys in valnode (value nodes) attributes + for key, value in self.attrib(valnode).items(): + # determine if key is in attributes dictionary + match_count = 0 + if attributes is not None and key in attributes: + if re.search(value, attributes[key]): + logger.debug( + "Value {} and key {} match with value {}".format( + value, key, attributes[key] + ) + ) + match_count += 1 + else: + match_count = 0 + break + + # a match is found + if match_count > 0: + # append the current result + if self.get(values, "modifier") == "additive": + match_values.append(self.text(valnode)) + + # replace the current result if it already contains the new value + # otherwise append the current result + elif self.get(values, "modifier") == "merge": + if self.text(valnode) in match_values: + del match_values[:] + match_values.append(self.text(valnode)) + + else: + if match_type == "last": + # take the *last* best match + if match_count >= match_max: + del match_values[:] + match_max = match_count + match_value = self.text(valnode) + elif match_type == "first": + # take the *first* best match + if match_count > match_max: + del match_values[:] + match_max = match_count + match_value = self.text(valnode) + else: + expect( + False, + "match attribute can only have a value of 'last' or 'first'", + ) + + if len(match_values) > 0: + match_value = " ".join(match_values) + + return match_value + + # pylint: disable=arguments-differ + def get_description(self, compsetname): + if self.get_version() == 3.0: + return self._get_description_v3(compsetname, self._comp_class) + else: + return self._get_description_v2(compsetname) + + def get_forcing_description(self, compsetname): + if self.get_version() == 3.0: + return self._get_description_v3(compsetname, "forcing") + else: + return "" + + def _get_description_v3(self, compsetname, comp_class): + """ + version 3 of the config_component.xml file has the description section at the top of the file + the description field has one attribute 'modifier_mode' which has allowed values + '*' 0 or more modifiers (default) + '1' exactly 1 modifier + '?' 0 or 1 modifiers + '+' 1 or more modifiers + + modifiers are fields in the component section of the compsetname following the % symbol. + + The desc field can have an attribute which is the component class ('cpl', 'atm', 'lnd' etc) + or it can have an attribute 'option' which provides descriptions of each optional modifier + or (in the config_component_{model}.xml in the driver only) it can have the attribute 'forcing' + + component descriptions are matched to the compsetname using a set method + """ + expect( + comp_class is not None, "comp_class argument required for version3 files" + ) + comp_class = comp_class.lower() + rootnode = self.get_child("description") + desc = "" + desc_nodes = self.get_children("desc", root=rootnode) + + modifier_mode = self.get(rootnode, "modifier_mode") + if modifier_mode is None: + modifier_mode = "*" + expect( + modifier_mode in ("*", "1", "?", "+"), + "Invalid modifier_mode {} in file {}".format(modifier_mode, self.filename), + ) + optiondesc = {} + if comp_class == "forcing": + for node in desc_nodes: + forcing = self.get(node, "forcing") + if forcing is not None and compsetname.startswith(forcing + "_"): + expect( + len(desc) == 0, + "Too many matches on forcing field {} in file {}".format( + forcing, self.filename + ), + ) + desc = self.text(node) + if desc is None: + desc = compsetname.split("_")[0] + return desc + + # first pass just make a hash of the option descriptions + for node in desc_nodes: + option = self.get(node, "option") + if option is not None: + optiondesc[option] = self.text(node) + + # second pass find a comp_class match + desc = "" + for node in desc_nodes: + compdesc = self.get(node, comp_class) + + if compdesc is not None: + opt_parts = [x.rstrip("]") for x in compdesc.split("[%")] + parts = opt_parts.pop(0).split("%") + reqset = set(parts) + fullset = set(parts + opt_parts) + + match, complist = self._get_description_match( + compsetname, reqset, fullset, modifier_mode + ) + if match: + desc = self.text(node) + for opt in complist: + if opt in optiondesc: + desc += optiondesc[opt] + + # cpl and esp components may not have a description + if comp_class not in ["cpl", "esp"]: + expect( + len(desc) > 0, + "No description found for comp_class {} matching compsetname {} in file {}, expected match in {} % {}".format( + comp_class, + compsetname, + self.filename, + list(reqset), + list(opt_parts), + ), + ) + return desc + + def _get_description_match(self, compsetname, reqset, fullset, modifier_mode): + """ + + >>> obj = Component('testingonly', 'ATM') + >>> obj._get_description_match("1850_DATM%CRU_FRED",set(["DATM"]), set(["DATM","CRU","HSI"]), "*") + (True, ['DATM', 'CRU']) + >>> obj._get_description_match("1850_DATM%FRED_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "*") + (False, None) + >>> obj._get_description_match("1850_DATM_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "?") + (True, ['DATM']) + >>> obj._get_description_match("1850_DATM_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "1") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: Expected exactly one modifer found 0 in ['DATM'] + >>> obj._get_description_match("1850_DATM%CRU%HSI_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "1") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: Expected exactly one modifer found 2 in ['DATM', 'CRU', 'HSI'] + >>> obj._get_description_match("1850_CAM50%WCCM%RCO2_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "*") + (True, ['CAM50', 'WCCM', 'RCO2']) + + # The following is not allowed because the required WCCM field is missing + >>> obj._get_description_match("1850_CAM50%RCO2_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "*") + (False, None) + >>> obj._get_description_match("1850_CAM50_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+") + (False, None) + >>> obj._get_description_match("1850_CAM50%WCCM_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+") + (True, ['CAM50', 'WCCM']) + >>> obj._get_description_match("scn:1850_atm:CAM50%WCCM_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+") + (True, ['CAM50', 'WCCM']) + """ + match = False + comparts = compsetname.split("_") + matchcomplist = None + for comp in comparts: + if ":" in comp: + comp = comp.split(":")[1] + complist = comp.split("%") + cset = set(complist) + + if cset == reqset or (cset > reqset and cset <= fullset): + if modifier_mode == "1": + expect( + len(complist) == 2, + "Expected exactly one modifer found {} in {}".format( + len(complist) - 1, complist + ), + ) + elif modifier_mode == "+": + expect( + len(complist) >= 2, + "Expected one or more modifers found {} in {}".format( + len(complist) - 1, list(reqset) + ), + ) + elif modifier_mode == "?": + expect( + len(complist) <= 2, + "Expected 0 or one modifers found {} in {}".format( + len(complist) - 1, complist + ), + ) + expect( + not match, + "Found multiple matches in file {} for {}".format( + self.filename, comp + ), + ) + match = True + matchcomplist = complist + # found a match + + return match, matchcomplist + + def _get_description_v2(self, compsetname): + rootnode = self.get_child("description") + desc = "" + desc_nodes = self.get_children("desc", root=rootnode) + for node in desc_nodes: + compsetmatch = self.get(node, "compset") + if compsetmatch is not None and re.search(compsetmatch, compsetname): + desc += self.text(node) + + return desc + + def print_values(self): + """ + print values for help and description in target config_component.xml file + """ + helpnode = self.get_child("help") + helptext = self.text(helpnode) + logger.info(" {}".format(helptext)) + entries = self.get_children("entry") + for entry in entries: + name = self.get(entry, "id") + text = self.text(self.get_child("desc", root=entry)) + logger.info(" {:20s} : {}".format(name, text)) + + def return_values(self): + """ + return a list of hashes from target config_component.xml file + This routine is used by external tools in https://github.com/NCAR/CESM_xml2html + """ + entry_dict = dict() + items = list() + helpnode = self.get_optional_child("help") + if helpnode: + helptext = self.text(helpnode) + else: + helptext = "" + entries = self.get_children("entry") + for entry in entries: + item = dict() + name = self.get(entry, "id") + datatype = self.text(self.get_child("type", root=entry)) + valid_values = self.get_valid_values(name) + default_value = self.get_default_value(node=entry) + group = self.text(self.get_child("group", root=entry)) + filename = self.text(self.get_child("file", root=entry)) + text = self.text(self.get_child("desc", root=entry)) + item = { + "name": name, + "datatype": datatype, + "valid_values": valid_values, + "value": default_value, + "group": group, + "filename": filename, + "desc": text.encode("utf-8"), + } + items.append(item) + entry_dict = {"items": items} + + return helptext, entry_dict diff --git a/CIME/XML/compsets.py b/CIME/XML/compsets.py new file mode 100644 index 00000000000..23eca6c825c --- /dev/null +++ b/CIME/XML/compsets.py @@ -0,0 +1,133 @@ +""" +Common interface to XML files which follow the compsets format, +""" + +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +from CIME.XML.entry_id import EntryID +from CIME.XML.files import Files + +logger = logging.getLogger(__name__) + + +class Compsets(GenericXML): + def __init__(self, infile=None, files=None): + if files is None: + files = Files() + schema = files.get_schema("COMPSETS_SPEC_FILE") + GenericXML.__init__(self, infile, schema=schema) + self._index = 0 + self._compsets = None + + def get_compset_match(self, name): + """ + science support is used in cesm to determine if this compset and grid + is scientifically supported. science_support is returned as an array of grids for this compset + """ + nodes = self.get_children("compset") + alias = None + lname = None + + science_support = [] + + for node in nodes: + alias = self.get_element_text("alias", root=node) + lname = self.get_element_text("lname", root=node) + # Users may include case for clarity, but comparisons are case insensitive. + if alias.upper() == name.upper() or lname.upper() == name.upper(): + science_support_nodes = self.get_children("science_support", root=node) + for snode in science_support_nodes: + science_support.append(self.get(snode, "grid")) + logger.debug( + "Found node match with alias: {} and lname: {}".format(alias, lname) + ) + return (lname, alias, science_support) + return (None, None, [False]) + + def get_compset_var_settings(self, compset, grid): + """ + Variables can be set in config_compsets.xml in entry id settings with compset and grid attributes + find and return id value pairs here + """ + entries = self.get_optional_child("entries") + result = [] + if entries is not None: + nodes = self.get_children("entry", root=entries) + # Get an empty entryid obj to use + entryidobj = EntryID() + for node in nodes: + value = entryidobj.get_default_value( + node, {"grid": grid, "compset": compset} + ) + if value is not None: + result.append((self.get(node, "id"), value)) + + return result + + # pylint: disable=arguments-differ + def get_value(self, name, attribute=None, resolved=False, subgroup=None): + expect(subgroup is None, "This class does not support subgroups") + if name == "help": + rootnode = self.get_child("help") + helptext = self.text(rootnode) + return helptext + else: + compsets = {} + nodes = self.get_children("compset") + for node in nodes: + for child in node: + logger.debug( + "Here child is {} with value {}".format( + self.name(child), self.text(child) + ) + ) + if self.name(child) == "alias": + alias = self.text(child) + if self.name(child) == "lname": + lname = self.text(child) + compsets[alias] = lname + return compsets + + def print_values(self, arg_help=True): + help_text = self.get_value(name="help") + compsets = self.get_children("compset") + if arg_help: + logger.info(" {} ".format(help_text)) + + logger.info(" --------------------------------------") + logger.info(" Compset Alias: Compset Long Name ") + logger.info(" --------------------------------------") + for compset in compsets: + logger.info( + " {:20} : {}".format( + self.text(self.get_child("alias", root=compset)), + self.text(self.get_child("lname", root=compset)), + ) + ) + + def get_compset_longnames(self): + compset_nodes = self.get_children("compset") + longnames = [] + for comp in compset_nodes: + longnames.append(self.text(self.get_child("lname", root=comp))) + return longnames + + def __iter__(self): + self._index = 0 + self._compsets = self.get_children("compset") + + return self + + def __next__(self): + if self._index >= len(self._compsets): + raise StopIteration() + + value = self._compsets[self._index] + + alias = self.text(self.get_child("alias", root=value)) + + lname = self.text(self.get_child("lname", root=value)) + + self._index += 1 + + return alias, lname diff --git a/CIME/XML/entry_id.py b/CIME/XML/entry_id.py new file mode 100644 index 00000000000..c090a3633db --- /dev/null +++ b/CIME/XML/entry_id.py @@ -0,0 +1,555 @@ +""" +Common interface to XML files which follow the entry id format, +this is an abstract class and is expected to +be used by other XML interface modules and not directly. +""" +from CIME.XML.standard_module_setup import * +from CIME.utils import expect, convert_to_string, convert_to_type +from CIME.XML.generic_xml import GenericXML + +logger = logging.getLogger(__name__) + + +class EntryID(GenericXML): + def __init__(self, infile=None, schema=None, read_only=True): + GenericXML.__init__(self, infile, schema, read_only=read_only) + self.groups = {} + + def get_default_value(self, node, attributes=None): + """ + Set the value of an entry to the default value for that entry + """ + value = self._get_value_match(node, attributes) + if value is None: + # Fall back to default value + value = self.get_element_text("default_value", root=node) + else: + logger.debug("node is {} value is {}".format(self.get(node, "id"), value)) + + if value is None: + logger.debug("For vid {} value is none".format(self.get(node, "id"))) + value = "" + + return value + + def set_default_value(self, vid, val): + node = self.get_optional_child("entry", {"id": vid}) + if node is not None: + val = self.set_element_text("default_value", val, root=node) + if val is None: + logger.warning( + "Called set_default_value on a node without default_value field" + ) + + return val + + def get_value_match( + self, + vid, + attributes=None, + exact_match=False, + entry_node=None, + replacement_for_none=None, + ): + """Handle this case: + + + X + Y + Z + + + + If replacement_for_none is provided, then: if the found text value would give a + None value, instead replace it with the value given by the replacement_for_none + argument. (However, still return None if no match is found.) This may or may not + be needed, but is in place to maintain some old logic. + + """ + + if entry_node is not None: + value = self._get_value_match( + entry_node, + attributes, + exact_match, + replacement_for_none=replacement_for_none, + ) + else: + node = self.get_optional_child("entry", {"id": vid}) + value = None + if node is not None: + value = self._get_value_match( + node, + attributes, + exact_match, + replacement_for_none=replacement_for_none, + ) + logger.debug("(get_value_match) vid {} value {}".format(vid, value)) + return value + + def _get_value_match( + self, node, attributes=None, exact_match=False, replacement_for_none=None + ): + """ + Note that the component class has a specific version of this function + + If replacement_for_none is provided, then: if the found text value would give a + None value, instead replace it with the value given by the replacement_for_none + argument. (However, still return None if no match is found.) This may or may not + be needed, but is in place to maintain some old logic. + """ + # if there is a element - check to see if there is a match attribute + # if there is NOT a match attribute, then set the default to "first" + # this is different than the component class _get_value_match where the default is "last" + values_node = self.get_optional_child("values", root=node) + if values_node is not None: + match_type = self.get(values_node, "match", default="first") + node = values_node + else: + match_type = "first" + + # Store nodes that match the attributes and their scores. + matches = [] + nodes = self.get_children("value", root=node) + for vnode in nodes: + # For each node in the list start a score. + score = 0 + if attributes: + for attribute in self.attrib(vnode).keys(): + # For each attribute, add to the score. + score += 1 + # If some attribute is specified that we don't know about, + # or the values don't match, it's not a match we want. + if exact_match: + if attribute not in attributes or attributes[ + attribute + ] != self.get(vnode, attribute): + score = -1 + break + else: + if attribute not in attributes or not re.search( + self.get(vnode, attribute), attributes[attribute] + ): + score = -1 + break + + # Add valid matches to the list. + if score >= 0: + matches.append((score, vnode)) + + if not matches: + return None + + # Get maximum score using either a "last" or "first" match in case of a tie + max_score = -1 + mnode = None + for score, node in matches: + if match_type == "last": + # take the *last* best match + if score >= max_score: + max_score = score + mnode = node + elif match_type == "first": + # take the *first* best match + if score > max_score: + max_score = score + mnode = node + else: + expect( + False, + "match attribute can only have a value of 'last' or 'first', value is %s" + % match_type, + ) + + text = self.text(mnode) + if text is None: + # NOTE(wjs, 2021-06-03) I'm not sure when (if ever) this can happen, but I'm + # putting this logic here to maintain some old logic, to be safe. + text = replacement_for_none + return text + + def get_node_element_info(self, vid, element_name): + node = self.get_optional_child("entry", {"id": vid}) + if node is None: + return None + else: + return self._get_node_element_info(node, element_name) + + def _get_node_element_info(self, node, element_name): + return self.get_element_text(element_name, root=node) + + def _get_type_info(self, node): + if node is None: + return None + val = self._get_node_element_info(node, "type") + if val is None: + return "char" + return val + + def get_type_info(self, vid): + vid, _, _ = self.check_if_comp_var(vid) + node = self.scan_optional_child("entry", {"id": vid}) + return self._get_type_info(node) + + # pylint: disable=unused-argument + def check_if_comp_var(self, vid, attribute=None, node=None): + # handled in classes + return vid, None, False + + def _get_default(self, node): + return self._get_node_element_info(node, "default_value") + + # Get description , expect child with tag "description" for parent node + def get_description(self, node): + return self._get_node_element_info(node, "desc") + + # Get group , expect node with tag "group" + # entry id nodes are children of group nodes + def get_groups(self, node): + groups = self.get_children("group") + result = [] + nodes = [] + vid = self.get(node, "id") + for group in groups: + nodes = self.get_children("entry", attributes={"id": vid}, root=group) + if nodes: + result.append(self.get(group, "id")) + + return result + + def get_valid_values(self, vid): + node = self.scan_optional_child("entry", {"id": vid}) + if node is None: + return None + return self._get_valid_values(node) + + def _get_valid_values(self, node): + valid_values = self.get_element_text("valid_values", root=node) + valid_values_list = [] + if valid_values: + valid_values_list = [item.lstrip() for item in valid_values.split(",")] + return valid_values_list + + def set_valid_values(self, vid, new_valid_values): + node = self.scan_optional_child("entry", {"id": vid}) + if node is None: + return None + return self._set_valid_values(node, new_valid_values) + + def get_nodes_by_id(self, vid): + return self.scan_children("entry", {"id": vid}) + + def _set_valid_values(self, node, new_valid_values): + old_vv = self._get_valid_values(node) + if old_vv is None: + self.make_child("valid_values", text=new_valid_values) + logger.debug( + "Adding valid_values {} for {}".format( + new_valid_values, self.get(node, "id") + ) + ) + else: + vv_text = self.set_element_text("valid_values", new_valid_values, root=node) + logger.debug( + "Replacing valid_values {} with {} for {}".format( + old_vv, vv_text, self.get(node, "id") + ) + ) + + current_value = self.get(node, "value") + valid_values_list = self._get_valid_values(node) + if current_value is not None and current_value not in valid_values_list: + logger.warning( + 'WARNING: Current setting for {} not in new valid values. Updating setting to "{}"'.format( + self.get(node, "id"), valid_values_list[0] + ) + ) + self._set_value(node, valid_values_list[0]) + return new_valid_values + + def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False): + """ + Set the value of an entry-id field to value + Returns the value or None if not found + subgroup is ignored in the general routine and applied in specific methods + """ + expect(subgroup is None, "Subgroup not supported") + str_value = self.get_valid_value_string(node, value, vid, ignore_type) + self.set(node, "value", str_value) + return value + + def get_valid_value_string(self, node, value, vid=None, ignore_type=False): + valid_values = self._get_valid_values(node) + if ignore_type: + expect( + isinstance(value, str), + "Value must be type string if ignore_type is true", + ) + str_value = value + return str_value + type_str = self._get_type_info(node) + str_value = convert_to_string(value, type_str, vid) + + if valid_values and not str_value.startswith("$"): + expect( + str_value in valid_values, + "Did not find {} in valid values for {}: {}".format( + value, vid, valid_values + ), + ) + return str_value + + def set_value(self, vid, value, subgroup=None, ignore_type=False): + """ + Set the value of an entry-id field to value + Returns the value or None if not found + subgroup is ignored in the general routine and applied in specific methods + """ + val = None + root = ( + self.root + if subgroup is None + else self.get_optional_child("group", {"id": subgroup}) + ) + node = self.get_optional_child("entry", {"id": vid}, root=root) + if node is not None: + val = self._set_value(node, value, vid, subgroup, ignore_type) + return val + + def get_values(self, vid, attribute=None, resolved=True, subgroup=None): + """ + Same functionality as get_value but it returns a list, if the + value in xml contains commas the list have multiple elements split on + commas + """ + results = [] + node = self.scan_optional_child("entry", {"id": vid}) + if node is None: + return results + str_result = self._get_value( + node, attribute=attribute, resolved=resolved, subgroup=subgroup + ) + str_results = str_result.split(",") + for result in str_results: + # Return value as right type if we were able to fully resolve + # otherwise, we have to leave as string. + if "$" in result: + results.append(result) + else: + type_str = self._get_type_info(node) + results.append(convert_to_type(result, type_str, vid)) + return results + + # pylint: disable=arguments-differ + def get_value(self, vid, attribute=None, resolved=True, subgroup=None): + """ + Get a value for entry with id attribute vid. + or from the values field if the attribute argument is provided + and matches + """ + root = ( + self.root + if subgroup is None + else self.get_optional_child("group", {"id": subgroup}) + ) + node = self.scan_optional_child("entry", {"id": vid}, root=root) + if node is None: + return + + val = self._get_value( + node, attribute=attribute, resolved=resolved, subgroup=subgroup + ) + # Return value as right type if we were able to fully resolve + # otherwise, we have to leave as string. + if val is None: + return val + elif "$" in val: + return val + else: + type_str = self._get_type_info(node) + return convert_to_type(val, type_str, vid) + + def _get_value(self, node, attribute=None, resolved=True, subgroup=None): + """ + internal get_value, does not convert to type + """ + logger.debug("(_get_value) ({}, {}, {})".format(attribute, resolved, subgroup)) + val = None + if node is None: + logger.debug("No node") + return val + + logger.debug( + "Found node {} with attributes {}".format( + self.name(node), self.attrib(node) + ) + ) + if attribute: + vals = self.get_optional_child("values", root=node) + node = vals if vals is not None else node + val = self.get_element_text("value", attributes=attribute, root=node) + elif self.get(node, "value") is not None: + val = self.get(node, "value") + else: + val = self.get_default_value(node) + + if resolved: + val = self.get_resolved_value(val) + + return val + + def get_child_content(self, vid, childname): + val = None + node = self.get_optional_child("entry", {"id": vid}) + if node is not None: + val = self.get_element_text(childname, root=node) + return val + + def get_elements_from_child_content(self, childname, childcontent): + nodes = self.get_children("entry") + elements = [] + for node in nodes: + content = self.get_element_text(childname, root=node) + expect( + content is not None, + "No childname {} for id {}".format(childname, self.get(node, "id")), + ) + if content == childcontent: + elements.append(node) + + return elements + + def add_elements_by_group(self, srcobj, attributes=None, infile=None): + """ + Add elements from srcobj to self under the appropriate + group element, entries to be added must have a child element + with value "infile" + """ + if infile is None: + infile = os.path.basename(self.filename) + + # First get the list of entries in srcobj with matching file children + nodelist = srcobj.get_elements_from_child_content("file", infile) + + # For matchs found: Remove {, , } + # children from each entry and set the default value for the + # new entries in self - putting the entries as children of + # group elements in file $file + for src_node in nodelist: + node = self.copy(src_node) + gname = srcobj.get_element_text("group", root=src_node) + if gname is None: + gname = "group_not_set" + + # If group with id=$gname does not exist in self.groups + # then create the group node and add it to infile file + if gname not in self.groups.keys(): + # initialize an empty list + newgroup = self.make_child(name="group", attributes={"id": gname}) + self.groups[gname] = newgroup + + # Remove {, , } from the entry element + self.cleanupnode(node) + + # Add the entry element to the group + self.add_child(node, root=self.groups[gname]) + + # Set the default value, it may be determined by a regular + # expression match to a dictionary value in attributes matching a + # value attribute in node + value = srcobj.get_default_value(src_node, attributes) + if value is not None and len(value): + self._set_value(node, value) + + logger.debug("Adding to group " + gname) + + return nodelist + + def cleanupnode(self, node): + """ + in env_base.py, not expected to get here + """ + expect(False, " Not expected to be here {}".format(self.get(node, "id"))) + + def compare_xml(self, other, root=None, otherroot=None): + xmldiffs = {} + if root is not None: + expect(otherroot is not None, " inconsistant request") + f1nodes = self.scan_children("entry", root=root) + for node in f1nodes: + vid = self.get(node, "id") + logger.debug("Compare vid {}".format(vid)) + f2match = other.scan_optional_child( + "entry", attributes={"id": vid}, root=otherroot + ) + expect(f2match is not None, "Could not find {} in Locked file".format(vid)) + if node != f2match: + f1val = self.get_value(vid, resolved=False) + if f1val is not None: + f2val = other.get_value(vid, resolved=False) + if f1val != f2val: + xmldiffs[vid] = [f1val, f2val] + elif hasattr(self, "_components"): + # pylint: disable=no-member + for comp in self._components: + f1val = self.get_value( + "{}_{}".format(vid, comp), resolved=False + ) + if f1val is not None: + f2val = other.get_value( + "{}_{}".format(vid, comp), resolved=False + ) + if f1val != f2val: + xmldiffs[f"{vid}_{comp}"] = [f1val, f2val] + else: + if node != f2match: + f1value_nodes = self.get_children("value", root=node) + for valnode in f1value_nodes: + f2valnodes = other.get_children( + "value", + root=f2match, + attributes=self.attrib(valnode), + ) + for f2valnode in f2valnodes: + if ( + self.attrib(valnode) is None + and self.attrib(f2valnode) is None + or self.attrib(f2valnode) + == self.attrib(valnode) + ): + if other.get_resolved_value( + self.text(f2valnode) + ) != self.get_resolved_value( + self.text(valnode) + ): + xmldiffs[ + "{}:{}".format( + vid, self.attrib(valnode) + ) + ] = [ + self.text(valnode), + self.text(f2valnode), + ] + return xmldiffs + + def overwrite_existing_entries(self): + # if there exist two nodes with the same id delete the first one. + for node in self.get_children("entry"): + vid = self.get(node, "id") + samenodes = self.get_nodes_by_id(vid) + if len(samenodes) > 1: + expect( + len(samenodes) == 2, + "Too many matchs for id {} in file {}".format(vid, self.filename), + ) + logger.debug("Overwriting node {}".format(vid)) + read_only = self.read_only + if read_only: + self.read_only = False + self.remove_child(samenodes[0]) + self.read_only = read_only + + def __iter__(self): + for node in self.scan_children("entry"): + vid = self.get(node, "id") + yield vid, self.get_value(vid) diff --git a/CIME/XML/env_archive.py b/CIME/XML/env_archive.py new file mode 100644 index 00000000000..3642cda40a8 --- /dev/null +++ b/CIME/XML/env_archive.py @@ -0,0 +1,37 @@ +""" +Interface to the env_archive.xml file. This class inherits from EnvBase +""" +from CIME.XML.standard_module_setup import * +from CIME import utils +from CIME.XML.archive_base import ArchiveBase +from CIME.XML.env_base import EnvBase + +logger = logging.getLogger(__name__) +# pylint: disable=super-init-not-called +class EnvArchive(ArchiveBase, EnvBase): + def __init__(self, case_root=None, infile="env_archive.xml", read_only=False): + """ + initialize an object interface to file env_archive.xml in the case directory + """ + schema = os.path.join(utils.get_schema_path(), "env_archive.xsd") + EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) + + def get_entries(self): + return self.get_children("comp_archive_spec") + + def get_entry_info(self, archive_entry): + compname = self.get(archive_entry, "compname") + compclass = self.get(archive_entry, "compclass") + return compname, compclass + + def get_rpointer_contents(self, archive_entry): + rpointer_items = [] + rpointer_nodes = self.get_children("rpointer", root=archive_entry) + for rpointer_node in rpointer_nodes: + file_node = self.get_child("rpointer_file", root=rpointer_node) + content_node = self.get_child("rpointer_content", root=rpointer_node) + rpointer_items.append([self.text(file_node), self.text(content_node)]) + return rpointer_items + + def get_type_info(self, vid): + return "char" diff --git a/CIME/XML/env_base.py b/CIME/XML/env_base.py new file mode 100644 index 00000000000..b30d33da988 --- /dev/null +++ b/CIME/XML/env_base.py @@ -0,0 +1,279 @@ +""" +Base class for env files. This class inherits from EntryID.py +""" +from CIME.XML.standard_module_setup import * +from CIME.XML.entry_id import EntryID +from CIME.XML.headers import Headers +from CIME.utils import convert_to_type + +logger = logging.getLogger(__name__) + + +class EnvBase(EntryID): + def __init__(self, case_root, infile, schema=None, read_only=False): + if case_root is None: + case_root = os.getcwd() + self._caseroot = case_root + if os.path.isabs(infile): + fullpath = infile + else: + fullpath = os.path.join(case_root, infile) + + EntryID.__init__(self, fullpath, schema=schema, read_only=read_only) + + self._id_map = None + self._group_map = None + + if not os.path.isfile(fullpath): + headerobj = Headers() + headernode = headerobj.get_header_node(os.path.basename(fullpath)) + self.add_child(headernode) + else: + self._setup_cache() + + def _setup_cache(self): + self._id_map = {} # map id directly to nodes + self._group_map = {} # map group name to entry id dict + + group_elems = self.get_children("group") + for group_elem in group_elems: + group_name = self.get(group_elem, "id") + expect( + group_name not in self._group_map, + "Repeat group '{}'".format(group_name), + ) + group_map = {} + self._group_map[group_name] = group_map + entry_elems = self.get_children("entry", root=group_elem) + for entry_elem in entry_elems: + entry_id = self.get(entry_elem, "id") + expect( + entry_id not in group_map, + "Repeat entry '{}' in group '{}'".format(entry_id, group_name), + ) + group_map[entry_id] = entry_elem + if entry_id in self._id_map: + self._id_map[entry_id].append(entry_elem) + else: + self._id_map[entry_id] = [entry_elem] + + self.lock() + + def change_file(self, newfile, copy=False): + self.unlock() + EntryID.change_file(self, newfile, copy=copy) + self._setup_cache() + + def get_children(self, name=None, attributes=None, root=None): + if ( + self.locked + and name == "entry" + and attributes is not None + and attributes.keys() == ["id"] + ): + entry_id = attributes["id"] + if root is None or self.name(root) == "file": + if entry_id in self._id_map: + return self._id_map[entry_id] + else: + return [] + else: + expect( + self.name(root) == "group", + "Unexpected elem '{}' for {}, attrs {}".format( + self.name(root), self.filename, self.attrib(root) + ), + ) + group_id = self.get(root, "id") + if ( + group_id in self._group_map + and entry_id in self._group_map[group_id] + ): + return [self._group_map[group_id][entry_id]] + else: + return [] + + else: + # Non-compliant look up + return EntryID.get_children( + self, name=name, attributes=attributes, root=root + ) + + def scan_children(self, nodename, attributes=None, root=None): + if ( + self.locked + and nodename == "entry" + and attributes is not None + and attributes.keys() == ["id"] + ): + return EnvBase.get_children( + self, name=nodename, attributes=attributes, root=root + ) + else: + return EntryID.scan_children( + self, nodename, attributes=attributes, root=root + ) + + def set_components(self, components): + if hasattr(self, "_components"): + # pylint: disable=attribute-defined-outside-init + self._components = components + + def check_if_comp_var(self, vid, attribute=None, node=None): + comp = None + if node is None: + nodes = self.scan_children("entry", {"id": vid}) + if len(nodes): + node = nodes[0] + + if node: + valnodes = self.scan_children( + "value", attributes={"compclass": None}, root=node + ) + if len(valnodes) == 0: + logger.debug("vid {} is not a compvar".format(vid)) + return vid, None, False + else: + logger.debug("vid {} is a compvar".format(vid)) + if attribute is not None: + comp = attribute["compclass"] + return vid, comp, True + else: + if hasattr(self, "_components") and self._components: + new_vid = None + for comp in self._components: + if vid.endswith("_" + comp): + new_vid = vid.replace("_" + comp, "", 1) + elif vid.startswith(comp + "_"): + new_vid = vid.replace(comp + "_", "", 1) + elif "_" + comp + "_" in vid: + new_vid = vid.replace(comp + "_", "", 1) + if new_vid is not None: + break + if new_vid is not None: + logger.debug("vid {} is a compvar with comp {}".format(vid, comp)) + return new_vid, comp, True + + return vid, None, False + + def get_value(self, vid, attribute=None, resolved=True, subgroup=None): + """ + Get a value for entry with id attribute vid. + or from the values field if the attribute argument is provided + and matches + """ + value = None + vid, comp, iscompvar = self.check_if_comp_var(vid, attribute) + logger.debug("vid {} comp {} iscompvar {}".format(vid, comp, iscompvar)) + if iscompvar: + if comp is None: + if subgroup is not None: + comp = subgroup + else: + logger.debug("Not enough info to get value for {}".format(vid)) + return value + if attribute is None: + attribute = {"compclass": comp} + else: + attribute["compclass"] = comp + node = self.scan_optional_child("entry", {"id": vid}) + if node is not None: + type_str = self._get_type_info(node) + values = self.get_optional_child("values", root=node) + node = values if values is not None else node + val = self.get_element_text("value", attribute, root=node) + if val is not None: + if val.startswith("$"): + value = val + else: + value = convert_to_type(val, type_str, vid) + return value + + return EntryID.get_value( + self, vid, attribute=attribute, resolved=resolved, subgroup=subgroup + ) + + def set_value(self, vid, value, subgroup=None, ignore_type=False): + """ + Set the value of an entry-id field to value + Returns the value or None if not found + subgroup is ignored in the general routine and applied in specific methods + """ + vid, comp, iscompvar = self.check_if_comp_var(vid, None) + val = None + root = ( + self.root + if subgroup is None + else self.get_optional_child("group", {"id": subgroup}) + ) + node = self.scan_optional_child("entry", {"id": vid}, root=root) + if node is not None: + if iscompvar and comp is None: + # pylint: disable=no-member + for comp in self._components: + val = self._set_value( + node, value, vid, subgroup, ignore_type, compclass=comp + ) + else: + val = self._set_value( + node, value, vid, subgroup, ignore_type, compclass=comp + ) + return val + + # pylint: disable=arguments-differ + def _set_value( + self, node, value, vid=None, subgroup=None, ignore_type=False, compclass=None + ): + if vid is None: + vid = self.get(node, "id") + vid, _, iscompvar = self.check_if_comp_var(vid, node=node) + + if iscompvar: + expect(compclass is not None, "compclass must be specified if is comp var") + attribute = {"compclass": compclass} + str_value = self.get_valid_value_string(node, value, vid, ignore_type) + values = self.get_optional_child("values", root=node) + node = values if values is not None else node + val = self.set_element_text("value", str_value, attribute, root=node) + else: + val = EntryID._set_value(self, node, value, vid, subgroup, ignore_type) + return val + + def get_nodes_by_id(self, varid): + varid, _, _ = self.check_if_comp_var(varid, None) + return EntryID.get_nodes_by_id(self, varid) + + def cleanupnode(self, node): + """ + Remove the , , and childnodes from node + """ + fnode = self.get_child("file", root=node) + self.remove_child(fnode, node) + gnode = self.get_child("group", root=node) + self.remove_child(gnode, node) + dnode = self.get_optional_child("default_value", root=node) + if dnode is not None: + self.remove_child(dnode, node) + + vnode = self.get_optional_child("values", root=node) + if vnode is not None: + componentatt = self.get_children( + "value", attributes={"component": "ATM"}, root=vnode + ) + # backward compatibility (compclasses and component were mixed + # now we seperated into component and compclass) + if len(componentatt) > 0: + for ccnode in self.get_children( + "value", attributes={"component": None}, root=vnode + ): + val = self.get(ccnode, "component") + self.pop(ccnode, "component") + self.set(ccnode, "compclass", val) + + compclassatt = self.get_children( + "value", attributes={"compclass": None}, root=vnode + ) + if len(compclassatt) == 0: + self.remove_child(vnode, root=node) + + return node diff --git a/CIME/XML/env_batch.py b/CIME/XML/env_batch.py new file mode 100644 index 00000000000..9bc4034a41a --- /dev/null +++ b/CIME/XML/env_batch.py @@ -0,0 +1,1609 @@ +""" +Interface to the env_batch.xml file. This class inherits from EnvBase +""" + +import os +from CIME.XML.standard_module_setup import * +from CIME.XML.env_base import EnvBase +from CIME import utils +from CIME.utils import ( + transform_vars, + get_cime_root, + convert_to_seconds, + convert_to_babylonian_time, + get_cime_config, + get_batch_script_for_job, + get_logging_options, + format_time, + add_flag_to_cmd, +) +from collections import OrderedDict +import stat, re, math +import pathlib +from itertools import zip_longest + +logger = logging.getLogger(__name__) + +# pragma pylint: disable=attribute-defined-outside-init + + +class EnvBatch(EnvBase): + def __init__(self, case_root=None, infile="env_batch.xml", read_only=False): + """ + initialize an object interface to file env_batch.xml in the case directory + """ + self._batchtype = None + # This arbitrary setting should always be overwritten + self._default_walltime = "00:20:00" + schema = os.path.join(utils.get_schema_path(), "env_batch.xsd") + super(EnvBatch, self).__init__( + case_root, infile, schema=schema, read_only=read_only + ) + self._batchtype = self.get_batch_system_type() + self._env_workflow = None + + # pylint: disable=arguments-differ + def set_value(self, item, value, subgroup=None, ignore_type=False): + """ + Override the entry_id set_value function with some special cases for this class + """ + val = None + + if item == "JOB_QUEUE": + expect( + value in self._get_all_queue_names() or ignore_type, + "Unknown Job Queue specified use --force to set", + ) + + # allow the user to set item for all jobs if subgroup is not provided + if subgroup is None: + gnodes = self.get_children("group") + for gnode in gnodes: + node = self.get_optional_child("entry", {"id": item}, root=gnode) + if node is not None: + self._set_value(node, value, vid=item, ignore_type=ignore_type) + val = value + else: + group = self.get_optional_child("group", {"id": subgroup}) + if group is not None: + node = self.get_optional_child("entry", {"id": item}, root=group) + if node is not None: + val = self._set_value( + node, value, vid=item, ignore_type=ignore_type + ) + + return val + + # pylint: disable=arguments-differ + def get_value(self, item, attribute=None, resolved=True, subgroup=None): + """ + Must default subgroup to something in order to provide single return value + """ + value = None + node = self.get_optional_child(item, attribute) + if item in ("BATCH_SYSTEM", "PROJECT_REQUIRED"): + return super(EnvBatch, self).get_value(item, attribute, resolved) + + if not node: + # this will take the last instance of item listed in all batch_system elements + bs_nodes = self.get_children("batch_system") + for bsnode in bs_nodes: + cnode = self.get_optional_child(item, attribute, root=bsnode) + if cnode: + node = cnode + if node: + value = self.text(node) + if resolved: + value = self.get_resolved_value(value) + + return value + + def get_type_info(self, vid): + gnodes = self.get_children("group") + for gnode in gnodes: + nodes = self.get_children("entry", {"id": vid}, root=gnode) + type_info = None + for node in nodes: + new_type_info = self._get_type_info(node) + if type_info is None: + type_info = new_type_info + else: + expect( + type_info == new_type_info, + "Inconsistent type_info for entry id={} {} {}".format( + vid, new_type_info, type_info + ), + ) + return type_info + + def get_jobs(self): + groups = self.get_children("group") + results = [] + for group in groups: + if self.get(group, "id") not in ["job_submission", "config_batch"]: + results.append(self.get(group, "id")) + + return results + + def create_job_groups(self, batch_jobs, is_test): + # Subtle: in order to support dynamic batch jobs, we need to remove the + # job_submission group and replace with job-based groups + + orig_group = self.get_child( + "group", + {"id": "job_submission"}, + err_msg="Looks like job groups have already been created", + ) + orig_group_children = super(EnvBatch, self).get_children(root=orig_group) + + childnodes = [] + for child in reversed(orig_group_children): + childnodes.append(child) + + self.remove_child(orig_group) + + for name, jdict in batch_jobs: + if name == "case.run" and is_test: + pass # skip + elif name == "case.test" and not is_test: + pass # skip + elif name == "case.run.sh": + pass # skip + else: + new_job_group = self.make_child("group", {"id": name}) + for field in jdict.keys(): + val = jdict[field] + node = self.make_child( + "entry", {"id": field, "value": val}, root=new_job_group + ) + self.make_child("type", root=node, text="char") + + for child in childnodes: + self.add_child(self.copy(child), root=new_job_group) + + def cleanupnode(self, node): + if self.get(node, "id") == "batch_system": + fnode = self.get_child(name="file", root=node) + self.remove_child(fnode, root=node) + gnode = self.get_child(name="group", root=node) + self.remove_child(gnode, root=node) + vnode = self.get_optional_child(name="values", root=node) + if vnode is not None: + self.remove_child(vnode, root=node) + else: + node = super(EnvBatch, self).cleanupnode(node) + return node + + def set_batch_system(self, batchobj, batch_system_type=None): + if batch_system_type is not None: + self.set_batch_system_type(batch_system_type) + + if batchobj.batch_system_node is not None and batchobj.machine_node is not None: + for node in batchobj.get_children("", root=batchobj.machine_node): + name = self.name(node) + if name != "directives": + oldnode = batchobj.get_optional_child( + name, root=batchobj.batch_system_node + ) + if oldnode is not None: + logger.debug("Replacing {}".format(self.name(oldnode))) + batchobj.remove_child(oldnode, root=batchobj.batch_system_node) + + if batchobj.batch_system_node is not None: + self.add_child(self.copy(batchobj.batch_system_node)) + + if batchobj.machine_node is not None: + self.add_child(self.copy(batchobj.machine_node)) + + from CIME.locked_files import lock_file, unlock_file + + if os.path.exists(os.path.join(self._caseroot, "LockedFiles", "env_batch.xml")): + unlock_file(os.path.basename(batchobj.filename), self._caseroot) + + self.set_value("BATCH_SYSTEM", batch_system_type) + + if os.path.exists(os.path.join(self._caseroot, "LockedFiles")): + lock_file(os.path.basename(batchobj.filename), self._caseroot) + + def get_job_overrides(self, job, case): + if not self._env_workflow: + self._env_workflow = case.get_env("workflow") + ( + total_tasks, + num_nodes, + tasks_per_node, + thread_count, + ngpus_per_node, + ) = self._env_workflow.get_job_specs(case, job) + + overrides = {} + + if total_tasks: + overrides["total_tasks"] = int(total_tasks) + overrides["num_nodes"] = num_nodes + overrides["tasks_per_node"] = tasks_per_node + if thread_count: + overrides["thread_count"] = thread_count + total_tasks = int(total_tasks) * int(thread_count) + else: + total_tasks = int(total_tasks) * case.thread_count + else: + # Total PES accounts for threads as well as mpi tasks + total_tasks = case.get_value("TOTALPES") + thread_count = case.thread_count + if int(total_tasks) < case.get_value("MAX_TASKS_PER_NODE"): + overrides["max_tasks_per_node"] = total_tasks + + # when developed this variable was only needed on derecho, but I have tried to + # make it general enough that it can be used on other systems by defining MEM_PER_TASK and MAX_MEM_PER_NODE in config_machines.xml + # and adding {{ mem_per_node }} in config_batch.xml + mem_per_task = case.get_value("MEM_PER_TASK") + max_tasks_per_node = case.get_value("MAX_TASKS_PER_NODE") + expect( + max_tasks_per_node > 0, + "Error MAX_TASKS_PER_NODE not set or set incorrectly", + ) + max_mem_per_node = case.get_value("MAX_MEM_PER_NODE") + if mem_per_task and total_tasks <= max_tasks_per_node: + # Use memory per task until about a 10th of the node and then use the fraction of total memory + mem_per_node = total_tasks * mem_per_task + mem_per_node = min(mem_per_node, max_mem_per_node) + if total_tasks > max_tasks_per_node / 10: + mem_per_node = int( + float(total_tasks) / float(max_tasks_per_node) * max_mem_per_node + ) + overrides["mem_per_node"] = mem_per_node + elif max_mem_per_node: + overrides["mem_per_node"] = max_mem_per_node + + overrides["ngpus_per_node"] = ngpus_per_node + overrides["mpirun"] = case.get_mpirun_cmd(job=job, overrides=overrides) + return overrides + + def make_batch_script(self, input_template, job, case, outfile=None): + expect( + os.path.exists(input_template), + "input file '{}' does not exist".format(input_template), + ) + overrides = self.get_job_overrides(job, case) + ext = os.path.splitext(job)[-1] + if len(ext) == 0: + ext = job + if ext.startswith("."): + ext = ext[1:] + + # A job name or job array name can be at most 230 characters. It must consist only of alphabetic, numeric, plus + # sign ("+"), dash or minus or hyphen ("-"), underscore ("_"), and dot or period (".") characters + # most of these are checked in utils:check_name, but % is not one of them. + + overrides["job_id"] = ext + "." + case.get_value("CASE").replace("%", "") + + overrides["batchdirectives"] = self.get_batch_directives( + case, job, overrides=overrides + ) + output_text = transform_vars( + open(input_template, "r").read(), + case=case, + subgroup=job, + overrides=overrides, + ) + if not self._env_workflow: + self._env_workflow = case.get_env("workflow") + + output_name = ( + get_batch_script_for_job( + job, hidden=self._env_workflow.hidden_job(case, job) + ) + if outfile is None + else outfile + ) + logger.info("Creating file {}".format(output_name)) + with open(output_name, "w") as fd: + fd.write(output_text) + + # make sure batch script is exectuble + if not os.access(output_name, os.X_OK): + os.chmod( + output_name, + os.stat(output_name).st_mode + | stat.S_IXUSR + | stat.S_IXGRP + | stat.S_IXOTH, + ) + + def set_job_defaults(self, batch_jobs, case): + if self._batchtype is None: + self._batchtype = self.get_batch_system_type() + + if self._batchtype == "none": + return + + if not self._env_workflow: + self._env_workflow = case.get_env("workflow") + known_jobs = self._env_workflow.get_jobs() + + for job, jsect in batch_jobs: + if job not in known_jobs: + continue + + walltime = ( + case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) + if case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) + else None + ) + force_queue = ( + case.get_value("USER_REQUESTED_QUEUE", subgroup=job) + if case.get_value("USER_REQUESTED_QUEUE", subgroup=job) + else None + ) + walltime_format = ( + case.get_value("walltime_format", subgroup=job) + if case.get_value("walltime_format", subgroup=job) + else None + ) + logger.info( + "job is {} USER_REQUESTED_WALLTIME {} USER_REQUESTED_QUEUE {} WALLTIME_FORMAT {}".format( + job, walltime, force_queue, walltime_format + ) + ) + task_count = ( + int(jsect["task_count"]) if "task_count" in jsect else case.total_tasks + ) + + if "walltime" in jsect and walltime is None: + walltime = jsect["walltime"] + + logger.debug( + "Using walltime {!r} from batch job " "spec".format(walltime) + ) + + if "task_count" in jsect: + # job is using custom task_count, need to compute a node_count based on this + node_count = int( + math.ceil(float(task_count) / float(case.tasks_per_node)) + ) + else: + node_count = case.num_nodes + + queue = self.select_best_queue( + node_count, task_count, name=force_queue, walltime=walltime, job=job + ) + if queue is None and walltime is not None: + # Try to see if walltime was the holdup + queue = self.select_best_queue( + node_count, task_count, name=force_queue, walltime=None, job=job + ) + if queue is not None: + # It was, override the walltime if a test, otherwise just warn the user + new_walltime = self.get_queue_specs(queue)[5] + expect(new_walltime is not None, "Should never make it here") + logger.warning( + "WARNING: Requested walltime '{}' could not be matched by any {} queue".format( + walltime, force_queue + ) + ) + if case.get_value("TEST"): + logger.warning( + " Using walltime '{}' instead".format(new_walltime) + ) + walltime = new_walltime + else: + logger.warning( + " Continuing with suspect walltime, batch submission may fail" + ) + + if queue is None: + logger.warning( + "WARNING: No queue on this system met the requirements for this job. Falling back to defaults" + ) + queue = self.get_default_queue() + walltime = self.get_queue_specs(queue)[5] + + ( + _, + _, + _, + walltimedef, + walltimemin, + walltimemax, + _, + _, + _, + ) = self.get_queue_specs(queue) + + if walltime is None: + # Use default walltime if available for queue + if walltimedef is not None: + walltime = walltimedef + else: + # Last chance to figure out a walltime + # No default for queue, take max if available + if walltime is None and walltimemax is not None: + walltime = walltimemax + + # Still no walltime, try max from the default queue + if walltime is None: + # Queue is unknown, use specs from default queue + walltime = self.get(self.get_default_queue(), "walltimemax") + + logger.debug( + "Using walltimemax {!r} from default " + "queue {!r}".format(walltime, self.text(queue)) + ) + + # Still no walltime, use the hardcoded default + if walltime is None: + walltime = self._default_walltime + + logger.debug( + "Last resort using default walltime " + "{!r}".format(walltime) + ) + + # only enforce when not running a test + if not case.get_value("TEST"): + walltime_seconds = convert_to_seconds(walltime) + + # walltime must not be less than walltimemin + if walltimemin is not None: + walltimemin_seconds = convert_to_seconds(walltimemin) + + if walltime_seconds < walltimemin_seconds: + logger.warning( + "WARNING: Job {!r} walltime " + "{!r} is less than queue " + "{!r} minimum walltime " + "{!r}, job might fail".format( + job, walltime, self.text(queue), walltimemin + ) + ) + + # walltime must not be more than walltimemax + if walltimemax is not None: + walltimemax_seconds = convert_to_seconds(walltimemax) + + if walltime_seconds > walltimemax_seconds: + logger.warning( + "WARNING: Job {!r} walltime " + "{!r} is more than queue " + "{!r} maximum walltime " + "{!r}, job might fail".format( + job, walltime, self.text(queue), walltimemax + ) + ) + + walltime_format = self.get_value("walltime_format") + if walltime_format: + seconds = convert_to_seconds(walltime) + full_bab_time = convert_to_babylonian_time(seconds) + walltime = format_time(walltime_format, "%H:%M:%S", full_bab_time) + if not self._env_workflow: + self._env_workflow = case.get_env("workflow") + + self._env_workflow.set_value( + "JOB_QUEUE", self.text(queue), subgroup=job, ignore_type=False + ) + self._env_workflow.set_value("JOB_WALLCLOCK_TIME", walltime, subgroup=job) + logger.debug( + "Job {} queue {} walltime {}".format(job, self.text(queue), walltime) + ) + + def _match_attribs(self, attribs, case, queue): + # check for matches with case-vars + for attrib in attribs: + if attrib in ["default", "prefix"]: + # These are not used for matching + continue + + elif attrib == "queue": + if not self._match(queue, attribs["queue"]): + return False + + else: + val = case.get_value(attrib.upper()) + expect( + val is not None, + "Cannot match attrib '%s', case has no value for it" + % attrib.upper(), + ) + if not self._match(val, attribs[attrib]): + return False + + return True + + def _match(self, my_value, xml_value): + if xml_value.startswith("!"): + result = re.match(xml_value[1:], str(my_value)) is None + elif isinstance(my_value, bool): + if my_value: + result = xml_value == "TRUE" + else: + result = xml_value == "FALSE" + else: + result = re.match(xml_value + "$", str(my_value)) is not None + + logger.debug( + "(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result) + ) + return result + + def get_batch_directives(self, case, job, overrides=None, output_format="default"): + """ """ + result = [] + directive_prefix = None + + roots = self.get_children("batch_system") + queue = case.get_value("JOB_QUEUE", subgroup=job) + if self._batchtype != "none" and not queue in self._get_all_queue_names(): + unknown_queue = True + qnode = self.get_default_queue() + default_queue = self.text(qnode) + else: + unknown_queue = False + + for root in roots: + if root is not None: + if directive_prefix is None: + if output_format == "default": + directive_prefix = self.get_element_text( + "batch_directive", root=root + ) + elif output_format == "cylc": + directive_prefix = " " + if unknown_queue: + unknown_queue_directives = self.get_element_text( + "unknown_queue_directives", root=root + ) + if unknown_queue_directives is None: + queue = default_queue + else: + queue = unknown_queue_directives + + dnodes = self.get_children("directives", root=root) + for dnode in dnodes: + nodes = self.get_children("directive", root=dnode) + if self._match_attribs(self.attrib(dnode), case, queue): + for node in nodes: + directive = self.get_resolved_value( + "" if self.text(node) is None else self.text(node) + ) + if output_format == "cylc": + if self._batchtype == "pbs": + # cylc includes the -N itself, no need to add + if directive.startswith("-N"): + directive = "" + continue + m = re.match(r"\s*(-[\w])", directive) + if m: + directive = re.sub( + r"(-[\w]) ", + "{} = ".format(m.group(1)), + directive, + ) + + default = self.get(node, "default") + if default is None: + directive = transform_vars( + directive, + case=case, + subgroup=job, + default=default, + overrides=overrides, + ) + else: + directive = transform_vars(directive, default=default) + + custom_prefix = self.get(node, "prefix") + prefix = ( + directive_prefix + if custom_prefix is None + else custom_prefix + ) + + result.append( + "{}{}".format( + "" if not prefix else (prefix + " "), directive + ) + ) + + return "\n".join(result) + + def get_submit_args(self, case, job, resolve=True): + """ + return a list of touples (flag, name) + """ + bs_nodes = self.get_children("batch_system") + + submit_arg_nodes = self._get_arg_nodes(case, bs_nodes) + + submitargs = self._process_args(case, submit_arg_nodes, job, resolve=resolve) + + return submitargs + + def _get_arg_nodes(self, case, bs_nodes): + submit_arg_nodes = [] + + for node in bs_nodes: + sanode = self.get_optional_child("submit_args", root=node) + if sanode is not None: + arg_nodes = self.get_children("arg", root=sanode) + + if len(arg_nodes) > 0: + check_paths = [case.get_value("BATCH_SPEC_FILE")] + + user_config_path = os.path.join( + pathlib.Path().home(), ".cime", "config_batch.xml" + ) + + if os.path.exists(user_config_path): + check_paths.append(user_config_path) + + logger.warning( + 'Deprecated "arg" node detected in {}, check files {}'.format( + self.filename, ", ".join(check_paths) + ) + ) + + submit_arg_nodes += arg_nodes + + submit_arg_nodes += self.get_children("argument", root=sanode) + + return submit_arg_nodes + + def _process_args(self, case, submit_arg_nodes, job, resolve=True): + submitargs = " " + + for arg in submit_arg_nodes: + name = None + flag = None + try: + flag, name = self._get_argument(case, arg) + except ValueError: + continue + + if self._batchtype == "cobalt" and job == "case.st_archive": + if flag == "-n": + name = "task_count" + + if flag == "--mode": + continue + + if name is None: + if " " in flag: + flag, name = flag.split() + if name: + if resolve and "$" in name: + rflag = self._resolve_argument(case, flag, name, job) + # This is to prevent -gpu_type=none in qsub args + if rflag.endswith("=none"): + continue + if len(rflag) > len(flag): + submitargs += " {}".format(rflag) + else: + submitargs += " " + add_flag_to_cmd(flag, name) + else: + submitargs += " {}".format(flag) + else: + if resolve: + try: + submitargs += self._resolve_argument(case, flag, name, job) + except ValueError: + continue + else: + submitargs += " " + add_flag_to_cmd(flag, name) + + return submitargs + + def _get_argument(self, case, arg): + flag = self.get(arg, "flag") + + name = self.get(arg, "name") + + # if flag is None then we dealing with new `argument` + if flag is None: + flag = self.text(arg) + job_queue_restriction = self.get(arg, "job_queue") + + if ( + job_queue_restriction is not None + and job_queue_restriction != case.get_value("JOB_QUEUE") + ): + raise ValueError() + + return flag, name + + def _resolve_argument(self, case, flag, name, job): + submitargs = "" + logger.debug("name is {}".format(name)) + # if name.startswith("$"): + # name = name[1:] + + if "$" in name: + parts = name.split("$") + logger.debug("parts are {}".format(parts)) + val = "" + for part in parts: + if part != "": + logger.debug("part is {}".format(part)) + resolved = case.get_value(part, subgroup=job) + if resolved: + val += resolved + else: + val += part + logger.debug("val is {}".format(name)) + val = case.get_resolved_value(val) + else: + val = case.get_value(name, subgroup=job) + + if val is not None and len(str(val)) > 0 and val != "None": + # Try to evaluate val if it contains any whitespace + if " " in val: + try: + rval = eval(val) + except Exception: + rval = val + else: + rval = val + + # We don't want floating-point data (ignore anything else) + if str(rval).replace(".", "", 1).isdigit(): + rval = int(round(float(rval))) + + # need a correction for tasks per node + if flag == "-n" and rval <= 0: + rval = 1 + + if flag == "-q" and rval == "batch" and case.get_value("MACH") == "blues": + # Special case. Do not provide '-q batch' for blues + raise ValueError() + + submitargs = " " + add_flag_to_cmd(flag, rval) + + return submitargs + + def submit_jobs( + self, + case, + no_batch=False, + job=None, + user_prereq=None, + skip_pnl=False, + allow_fail=False, + resubmit_immediate=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ): + """ + no_batch indicates that the jobs should be run directly rather that submitted to a queueing system + job is the first job in the workflow sequence to start + user_prereq is a batch system prerequisite as requested by the user + skip_pnl indicates that the preview_namelist should not be run by this job + allow_fail indicates that the prereq job need only complete not nessasarily successfully to start the next job + resubmit_immediate indicates that all jobs indicated by the RESUBMIT option should be submitted at the same time instead of + waiting to resubmit at the end of the first sequence + workflow is a logical indicating whether only "job" is submitted or the workflow sequence starting with "job" is submitted + """ + + external_workflow = case.get_value("EXTERNAL_WORKFLOW") + if not self._env_workflow: + self._env_workflow = case.get_env("workflow") + alljobs = self._env_workflow.get_jobs() + alljobs = [ + j + for j in alljobs + if os.path.isfile( + os.path.join( + self._caseroot, + get_batch_script_for_job( + j, hidden=self._env_workflow.hidden_job(case, j) + ), + ) + ) + ] + + startindex = 0 + jobs = [] + firstjob = job + if job is not None: + expect(job in alljobs, "Do not know about batch job {}".format(job)) + startindex = alljobs.index(job) + for index, job in enumerate(alljobs): + logger.debug( + "Index {:d} job {} startindex {:d}".format(index, job, startindex) + ) + if index < startindex: + continue + try: + prereq = self._env_workflow.get_value( + "prereq", subgroup=job, resolved=False + ) + if ( + external_workflow + or prereq is None + or job == firstjob + or (dry_run and prereq == "$BUILD_COMPLETE") + ): + prereq = True + else: + prereq = case.get_resolved_value(prereq) + prereq = eval(prereq) + except Exception: + expect( + False, + "Unable to evaluate prereq expression '{}' for job '{}'".format( + self.get_value("prereq", subgroup=job), job + ), + ) + if prereq: + jobs.append( + (job, self._env_workflow.get_value("dependency", subgroup=job)) + ) + + if self._batchtype == "cobalt": + break + + depid = OrderedDict() + jobcmds = [] + + if workflow and resubmit_immediate: + num_submit = case.get_value("RESUBMIT") + 1 + case.set_value("RESUBMIT", 0) + if num_submit <= 0: + num_submit = 1 + else: + num_submit = 1 + + prev_job = None + batch_job_id = None + for _ in range(num_submit): + for job, dependency in jobs: + dep_jobs = get_job_deps(dependency, depid, prev_job, user_prereq) + + logger.debug("job {} depends on {}".format(job, dep_jobs)) + + result = self._submit_single_job( + case, + job, + skip_pnl=skip_pnl, + resubmit_immediate=resubmit_immediate, + dep_jobs=dep_jobs, + allow_fail=allow_fail, + no_batch=no_batch, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + dry_run=dry_run, + workflow=workflow, + ) + + batch_job_id = str(alljobs.index(job)) if dry_run else result + depid[job] = batch_job_id + jobcmds.append((job, result)) + + if self._batchtype == "cobalt" or external_workflow or not workflow: + break + + if not external_workflow and not no_batch: + expect(batch_job_id, "No result from jobs {}".format(jobs)) + prev_job = batch_job_id + + if dry_run: + return jobcmds + else: + return depid + + @staticmethod + def _get_supported_args(job, no_batch): + """ + Returns a map of the supported parameters and their arguments to the given script + TODO: Maybe let each script define this somewhere? + + >>> EnvBatch._get_supported_args("", False) + {} + >>> EnvBatch._get_supported_args("case.test", False) + {'skip_pnl': '--skip-preview-namelist'} + >>> EnvBatch._get_supported_args("case.st_archive", True) + {'resubmit': '--resubmit'} + """ + supported = {} + if job in ["case.run", "case.test"]: + supported["skip_pnl"] = "--skip-preview-namelist" + if job == "case.run": + supported["set_continue_run"] = "--completion-sets-continue-run" + if job in ["case.st_archive", "case.run"]: + if job == "case.st_archive" and no_batch: + supported["resubmit"] = "--resubmit" + else: + supported["submit_resubmits"] = "--resubmit" + return supported + + @staticmethod + def _build_run_args(job, no_batch, **run_args): + """ + Returns a map of the filtered parameters for the given script, + as well as the values passed and the equivalent arguments for calling the script + + >>> EnvBatch._build_run_args("case.run", False, skip_pnl=True, cthulu="f'taghn") + {'skip_pnl': (True, '--skip-preview-namelist')} + >>> EnvBatch._build_run_args("case.run", False, skip_pnl=False, cthulu="f'taghn") + {} + """ + supported_args = EnvBatch._get_supported_args(job, no_batch) + args = {} + for arg_name, arg_value in run_args.items(): + if arg_value and (arg_name in supported_args.keys()): + args[arg_name] = (arg_value, supported_args[arg_name]) + return args + + def _build_run_args_str(self, job, no_batch, **run_args): + """ + Returns a string of the filtered arguments for the given script, + based on the arguments passed + """ + args = self._build_run_args(job, no_batch, **run_args) + run_args_str = " ".join(param for _, param in args.values()) + logging_options = get_logging_options() + if logging_options: + run_args_str += " {}".format(logging_options) + + batch_env_flag = self.get_value("batch_env", subgroup=None) + if not batch_env_flag: + return run_args_str + elif len(run_args_str) > 0: + batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) + logger.debug("batch_system: {}: ".format(batch_system)) + if batch_system == "lsf": + return '{} "all, ARGS_FOR_SCRIPT={}"'.format( + batch_env_flag, run_args_str + ) + else: + return "{} ARGS_FOR_SCRIPT='{}'".format(batch_env_flag, run_args_str) + else: + return "" + + def _submit_single_job( + self, + case, + job, + dep_jobs=None, + allow_fail=False, + no_batch=False, + skip_pnl=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + resubmit_immediate=False, + workflow=True, + ): + if not dry_run: + logger.warning("Submit job {}".format(job)) + batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) + if batch_system is None or batch_system == "none" or no_batch: + logger.info("Starting job script {}".format(job)) + function_name = job.replace(".", "_") + job_name = "." + job + args = self._build_run_args( + job, + True, + skip_pnl=skip_pnl, + set_continue_run=resubmit_immediate, + submit_resubmits=workflow and not resubmit_immediate, + ) + + try: + if hasattr(case, function_name): + if dry_run: + return + + getattr(case, function_name)(**{k: v for k, (v, _) in args.items()}) + else: + expect( + os.path.isfile(job_name), + "Could not find file {}".format(job_name), + ) + if dry_run: + return os.path.join(self._caseroot, job_name) + else: + run_cmd_no_fail( + os.path.join(self._caseroot, job_name), + combine_output=True, + verbose=True, + from_dir=self._caseroot, + ) + except Exception as e: + # We don't want exception from the run phases getting into submit phase + logger.warning("Exception from {}: {}".format(function_name, str(e))) + + return + + submitargs = case.get_value("BATCH_COMMAND_FLAGS", subgroup=job, resolved=False) + + project = case.get_value("PROJECT", subgroup=job) + + if not project: + # If there is no project then we need to remove the project flag + if ( + batch_system == "pbs" or batch_system == "cobalt" + ) and " -A " in submitargs: + submitargs = submitargs.replace("-A", "") + elif batch_system == "lsf" and " -P " in submitargs: + submitargs = submitargs.replace("-P", "") + elif batch_system == "slurm" and " --account " in submitargs: + submitargs = submitargs.replace("--account", "") + + if dep_jobs is not None and len(dep_jobs) > 0: + logger.debug("dependencies: {}".format(dep_jobs)) + if allow_fail: + dep_string = self.get_value("depend_allow_string", subgroup=None) + if dep_string is None: + logger.warning( + "'depend_allow_string' is not defined for this batch system, " + + "falling back to the 'depend_string'" + ) + dep_string = self.get_value("depend_string", subgroup=None) + else: + dep_string = self.get_value("depend_string", subgroup=None) + expect( + dep_string is not None, + "'depend_string' is not defined for this batch system", + ) + + separator_string = self.get_value("depend_separator", subgroup=None) + expect(separator_string is not None, "depend_separator string not defined") + + expect( + "jobid" in dep_string, + "depend_string is missing jobid for prerequisite jobs", + ) + dep_ids_str = str(dep_jobs[0]) + for dep_id in dep_jobs[1:]: + dep_ids_str += separator_string + str(dep_id) + dep_string = dep_string.replace( + "jobid", dep_ids_str.strip() + ) # pylint: disable=maybe-no-member + submitargs += " " + dep_string + + if batch_args is not None: + submitargs += " " + batch_args + + cime_config = get_cime_config() + + if mail_user is None and cime_config.has_option("main", "MAIL_USER"): + mail_user = cime_config.get("main", "MAIL_USER") + + if mail_user is not None: + mail_user_flag = self.get_value("batch_mail_flag", subgroup=None) + if mail_user_flag is not None: + submitargs += " " + mail_user_flag + " " + mail_user + + if mail_type is None: + if job == "case.test" and cime_config.has_option( + "create_test", "MAIL_TYPE" + ): + mail_type = cime_config.get("create_test", "MAIL_TYPE") + elif cime_config.has_option("main", "MAIL_TYPE"): + mail_type = cime_config.get("main", "MAIL_TYPE") + else: + mail_type = self.get_value("batch_mail_default") + + if mail_type: + mail_type = mail_type.split(",") # pylint: disable=no-member + + if mail_type: + mail_type_flag = self.get_value("batch_mail_type_flag", subgroup=None) + if mail_type_flag is not None: + mail_type_args = [] + for indv_type in mail_type: + mail_type_arg = self.get_batch_mail_type(indv_type) + mail_type_args.append(mail_type_arg) + + if mail_type_flag == "-m": + # hacky, PBS-type systems pass multiple mail-types differently + submitargs += " {} {}".format( + mail_type_flag, "".join(mail_type_args) + ) + else: + submitargs += " {} {}".format( + mail_type_flag, + " {} ".format(mail_type_flag).join(mail_type_args), + ) + batchsubmit = self.get_value("batch_submit", subgroup=None) + expect( + batchsubmit is not None, + "Unable to determine the correct command for batch submission.", + ) + batchredirect = self.get_value("batch_redirect", subgroup=None) + batch_env_flag = self.get_value("batch_env", subgroup=None) + run_args = self._build_run_args_str( + job, + False, + skip_pnl=skip_pnl, + set_continue_run=resubmit_immediate, + submit_resubmits=workflow and not resubmit_immediate, + ) + + if batch_system == "lsf" and not batch_env_flag: + sequence = ( + run_args, + batchsubmit, + submitargs, + batchredirect, + get_batch_script_for_job( + job, + hidden=self._env_workflow.hidden_job(case, job), + ), + ) + elif batch_env_flag: + sequence = ( + batchsubmit, + submitargs, + run_args, + batchredirect, + os.path.join( + self._caseroot, + get_batch_script_for_job( + job, + hidden=self._env_workflow.hidden_job(case, job), + ), + ), + ) + else: + sequence = ( + batchsubmit, + submitargs, + batchredirect, + os.path.join( + self._caseroot, + get_batch_script_for_job( + job, + hidden=self._env_workflow.hidden_job(case, job), + ), + ), + run_args, + ) + + submitcmd = " ".join(s.strip() for s in sequence if s is not None) + if submitcmd.startswith("ssh") and "$CASEROOT" in submitcmd: + # add ` before cd $CASEROOT and at end of command + submitcmd = submitcmd.replace("cd $CASEROOT", "'cd $CASEROOT") + "'" + + submitcmd = case.get_resolved_value(submitcmd, subgroup=job) + if dry_run: + return submitcmd + else: + logger.info("Submitting job script {}".format(submitcmd)) + output = run_cmd_no_fail(submitcmd, combine_output=True) + jobid = self.get_job_id(output) + logger.info("Submitted job id is {}".format(jobid)) + return jobid + + def get_batch_mail_type(self, mail_type): + raw = self.get_value("batch_mail_type", subgroup=None) + mail_types = [ + item.strip() for item in raw.split(",") + ] # pylint: disable=no-member + idx = ["never", "all", "begin", "end", "fail"].index(mail_type) + + return mail_types[idx] if idx < len(mail_types) else None + + def get_batch_system_type(self): + nodes = self.get_children("batch_system") + for node in nodes: + type_ = self.get(node, "type") + if type_ is not None: + self._batchtype = type_ + return self._batchtype + + def set_batch_system_type(self, batchtype): + self._batchtype = batchtype + + def get_job_id(self, output): + jobid_pattern = self.get_value("jobid_pattern", subgroup=None) + if self._batchtype and self._batchtype != "none": + expect( + jobid_pattern is not None, + "Could not find jobid_pattern in env_batch.xml", + ) + + # If no output was provided, skip the search. This could + # be because --no-batch was provided. + if not output: + return output + else: + return output + + search_match = re.search(jobid_pattern, output) + expect( + search_match is not None, + "Couldn't match jobid_pattern '{}' within submit output:\n '{}'".format( + jobid_pattern, output + ), + ) + jobid = search_match.group(1) + return jobid + + def queue_meets_spec(self, queue, num_nodes, num_tasks, walltime=None, job=None): + specs = self.get_queue_specs(queue) + + nodemin, nodemax, jobname, _, _, walltimemax, jobmin, jobmax, strict = specs + + # A job name match automatically meets spec + if job is not None and jobname is not None: + return jobname == job + + if ( + nodemin is not None + and num_nodes < nodemin + or nodemax is not None + and num_nodes > nodemax + or jobmin is not None + and num_tasks < jobmin + or jobmax is not None + and num_tasks > jobmax + ): + return False + + if walltime is not None and walltimemax is not None and strict: + walltime_s = convert_to_seconds(walltime) + walltimemax_s = convert_to_seconds(walltimemax) + if walltime_s > walltimemax_s: + return False + + return True + + def _get_all_queue_names(self): + all_queues = [] + all_queues = self.get_all_queues() + + queue_names = [] + for queue in all_queues: + queue_names.append(self.text(queue)) + + return queue_names + + def select_best_queue( + self, num_nodes, num_tasks, name=None, walltime=None, job=None + ): + logger.debug( + "Selecting best queue with criteria nodes={!r}, " + "tasks={!r}, name={!r}, walltime={!r}, job={!r}".format( + num_nodes, num_tasks, name, walltime, job + ) + ) + + # Make sure to check default queue first. + qnodes = self.get_all_queues(name=name) + for qnode in qnodes: + if self.queue_meets_spec( + qnode, num_nodes, num_tasks, walltime=walltime, job=job + ): + logger.debug("Selected queue {!r}".format(self.text(qnode))) + + return qnode + + return None + + def get_queue_specs(self, qnode): + """ + Get queue specifications from node. + + Returns (nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, is_strict) + """ + nodemin = self.get(qnode, "nodemin") + nodemin = None if nodemin is None else int(nodemin) + nodemax = self.get(qnode, "nodemax") + nodemax = None if nodemax is None else int(nodemax) + + jobmin = self.get(qnode, "jobmin") + jobmin = None if jobmin is None else int(jobmin) + jobmax = self.get(qnode, "jobmax") + jobmax = None if jobmax is None else int(jobmax) + + expect( + nodemin is None or jobmin is None, + "Cannot specify both nodemin and jobmin for a queue", + ) + expect( + nodemax is None or jobmax is None, + "Cannot specify both nodemax and jobmax for a queue", + ) + + jobname = self.get(qnode, "jobname") + walltimedef = self.get(qnode, "walltimedef") + walltimemin = self.get(qnode, "walltimemin") + walltimemax = self.get(qnode, "walltimemax") + strict = self.get(qnode, "strict") == "true" + + return ( + nodemin, + nodemax, + jobname, + walltimedef, + walltimemin, + walltimemax, + jobmin, + jobmax, + strict, + ) + + def get_default_queue(self): + bs_nodes = self.get_children("batch_system") + node = None + for bsnode in bs_nodes: + qnodes = self.get_children("queues", root=bsnode) + for qnode in qnodes: + node = self.get_optional_child( + "queue", attributes={"default": "true"}, root=qnode + ) + if node is None: + node = self.get_optional_child("queue", root=qnode) + + expect(node is not None, "No queues found") + return node + + def get_all_queues(self, name=None): + bs_nodes = self.get_children("batch_system") + nodes = [] + default_idx = None + for bsnode in bs_nodes: + qsnode = self.get_optional_child("queues", root=bsnode) + if qsnode is not None: + qnodes = self.get_children("queue", root=qsnode) + for qnode in qnodes: + if name is None or self.text(qnode) == name: + nodes.append(qnode) + if self.get(qnode, "default", default="false") == "true": + default_idx = len(nodes) - 1 + + # Queues are selected by first match, so we want the queue marked + # as default to come first. + if default_idx is not None: + def_node = nodes.pop(default_idx) + nodes.insert(0, def_node) + + return nodes + + def get_children(self, name=None, attributes=None, root=None): + if name == "PROJECT_REQUIRED": + nodes = super(EnvBatch, self).get_children( + "entry", attributes={"id": name}, root=root + ) + else: + nodes = super(EnvBatch, self).get_children( + name, attributes=attributes, root=root + ) + + return nodes + + def get_status(self, jobid): + batch_query = self.get_optional_child("batch_query") + if batch_query is None: + logger.warning("Batch queries not supported on this platform") + else: + cmd = self.text(batch_query) + " " + if self.has(batch_query, "per_job_arg"): + cmd += self.get(batch_query, "per_job_arg") + " " + + cmd += jobid + + status, out, err = run_cmd(cmd) + if status != 0: + logger.warning( + "Batch query command '{}' failed with error '{}'".format(cmd, err) + ) + else: + return out.strip() + + def cancel_job(self, jobid): + batch_cancel = self.get_optional_child("batch_cancel") + if batch_cancel is None: + logger.warning("Batch cancellation not supported on this platform") + return False + else: + cmd = self.text(batch_cancel) + " " + str(jobid) + + status, out, err = run_cmd(cmd) + if status != 0: + logger.warning( + "Batch cancel command '{}' failed with error '{}'".format( + cmd, out + "\n" + err + ) + ) + else: + return True + + def zip(self, other, name): + for self_pnode in self.get_children(name): + try: + other_pnode = other.get_children(name, attributes=self_pnode.attrib)[0] + except (TypeError, IndexError): + other_pnode = None + + for node1 in self.get_children(root=self_pnode): + other_children = other.scan_children( + node1.name, attributes=node1.attrib, root=other_pnode + ) + real_other_children = [] + if not node1.attrib: + # Only keep elements that had no attributes. If node1 has no attributes + # scan_children will return ALL elements with matching name. + for other_child in other_children: + if node1.attrib == other_child.attrib: + real_other_children.append(other_child) + else: + real_other_children = other_children + + expect( + len(real_other_children) == 1, + "Multiple matches in zip for single node", + ) + yield node1, real_other_children[0] + + def _compare_arg(self, index, arg1, arg2): + try: + flag1 = arg1.attrib["flag"] + name1 = arg1.attrib.get("name", "") + except AttributeError: + flag2, name2 = arg2.attrib["flag"], arg2.attrib["name"] + + return {f"arg{index}": ["", f"{flag2} {name2}"]} + + try: + flag2 = arg2.attrib["flag"] + name2 = arg2.attrib.get("name", "") + except AttributeError: + return {f"arg{index}": [f"{flag1} {name1}", ""]} + + if flag1 != flag2 or name1 != name2: + return {f"arg{index}": [f"{flag1} {name1}", f"{flag2} {name2}"]} + + return {} + + def _compare_argument(self, index, arg1, arg2): + if arg1.text != arg2.text: + return {f"argument{index}": [arg1.text, arg2.text]} + + return {} + + def compare_xml(self, other): + xmldiffs = {} + + for node1, node2 in self.zip(other, "batch_system"): + if node1.name == "submit_args": + self_nodes = self.get_children(root=node1) + other_nodes = other.get_children(root=node2) + for i, (x, y) in enumerate( + zip_longest(self_nodes, other_nodes, fillvalue=None) + ): + if (x is not None and x.name == "arg") or ( + y is not None and y.name == "arg" + ): + xmldiffs.update(self._compare_arg(i, x, y)) + elif (x is not None and x.name == "argument") or ( + y is not None and y.name == "argument" + ): + xmldiffs.update(self._compare_node(x, y, i)) + elif node1.name == "directives": + self_nodes = self.get_children(root=node1) + other_nodes = other.get_children(root=node2) + for i, (x, y) in enumerate( + zip_longest(self_nodes, other_nodes, fillvalue=None) + ): + xmldiffs.update(self._compare_node(x, y, i)) + elif node1.name == "queues": + self_nodes = self.get_children(root=node1) + other_nodes = other.get_children(root=node2) + for i, (x, y) in enumerate( + zip_longest(self_nodes, other_nodes, fillvalue=None) + ): + xmldiffs.update(self._compare_node(x, y, i)) + else: + xmldiffs.update(self._compare_node(node1, node2)) + + for node in self.get_children("group"): + group = self.get(node, "id") + f2group = other.get_child("group", attributes={"id": group}) + xmldiffs.update( + super(EnvBatch, self).compare_xml(other, root=node, otherroot=f2group) + ) + return xmldiffs + + def _compare_node(self, x, y, index=None): + """Compares two XML nodes and returns diff. + + Compares the attributes and text of two XML nodes. Handles the case when either node is `None`. + + The `index` argument can be used to append the nodes tag. This can be useful when comparing a list + of XML nodes that all have the same tag to differentiate which nodes are different. + + Args: + x (:obj:`CIME.XML.generic_xml._Element`): First node. + y (:obj:`CIME.XML.generic_xml._Element`): Second node. + index (int, optional): Index of the nodes. + + Returns: + dict: Key is the tag and value is the difference. + """ + diff = {} + + if index is None: + index = "" + + if x is None: + diff[f"{y.name}{index}"] = ["", y.text] + elif y is None: + diff[f"{x.name}{index}"] = [x.text, ""] + elif x.text != y.text or x.attrib != y.attrib: + diff[f"{x.name}{index}"] = [x.text, y.text] + + return diff + + def make_all_batch_files(self, case): + machdir = case.get_value("MACHDIR") + logger.info("Creating batch scripts") + if not self._env_workflow: + self._env_workflow = case.get_env("workflow") + jobs = self._env_workflow.get_jobs() + for job in jobs: + template = case.get_resolved_value( + self._env_workflow.get_value("template", subgroup=job) + ) + if os.path.isabs(template): + input_batch_script = template + else: + input_batch_script = os.path.join(machdir, template) + if os.path.isfile(input_batch_script): + logger.info( + "Writing {} script from input template {}".format( + job, input_batch_script + ) + ) + self.make_batch_script(input_batch_script, job, case) + else: + logger.warning( + "Input template file {} for job {} does not exist or cannot be read.".format( + input_batch_script, job + ) + ) + + +def get_job_deps(dependency, depid, prev_job=None, user_prereq=None): + """ + Gather list of job batch ids that a job depends on. + + Parameters + ---------- + dependency : str + List of dependent job names. + depid : dict + Lookup where keys are job names and values are the batch id. + user_prereq : str + User requested dependency. + + Returns + ------- + list + List of batch ids that job depends on. + """ + deps = [] + dep_jobs = [] + + if user_prereq is not None: + dep_jobs.append(user_prereq) + + if dependency is not None: + # Match all words, excluding "and" and "or" + deps = re.findall(r"\b(?!and\b|or\b)\w+(?:\.\w+)?\b", dependency) + + for dep in deps: + if dep in depid and depid[dep] is not None: + dep_jobs.append(str(depid[dep])) + + if prev_job is not None: + dep_jobs.append(prev_job) + + return dep_jobs diff --git a/CIME/XML/env_build.py b/CIME/XML/env_build.py new file mode 100644 index 00000000000..fe863e414ef --- /dev/null +++ b/CIME/XML/env_build.py @@ -0,0 +1,36 @@ +""" +Interface to the env_build.xml file. This class inherits from EnvBase +""" +from CIME.XML.standard_module_setup import * + +from CIME import utils +from CIME.XML.env_base import EnvBase + +logger = logging.getLogger(__name__) + + +class EnvBuild(EnvBase): + # pylint: disable=unused-argument + def __init__( + self, case_root=None, infile="env_build.xml", components=None, read_only=False + ): + """ + initialize an object interface to file env_build.xml in the case directory + """ + schema = os.path.join(utils.get_schema_path(), "env_entry_id.xsd") + self._caseroot = case_root + EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) + + def set_value(self, vid, value, subgroup=None, ignore_type=False): + """ + Set the value of an entry-id field to value + Returns the value or None if not found + subgroup is ignored in the general routine and applied in specific methods + """ + # Do not allow any of these to be the same as CASEROOT + if vid in ("EXEROOT", "OBJDIR", "LIBROOT"): + utils.expect(value != self._caseroot, f"Cannot set {vid} to CASEROOT") + + return super(EnvBuild, self).set_value( + vid, value, subgroup=subgroup, ignore_type=ignore_type + ) diff --git a/CIME/XML/env_case.py b/CIME/XML/env_case.py new file mode 100644 index 00000000000..1b4c85d6f88 --- /dev/null +++ b/CIME/XML/env_case.py @@ -0,0 +1,21 @@ +""" +Interface to the env_case.xml file. This class inherits from EnvBase +""" +from CIME.XML.standard_module_setup import * + +from CIME import utils +from CIME.XML.env_base import EnvBase + +logger = logging.getLogger(__name__) + + +class EnvCase(EnvBase): + # pylint: disable=unused-argument + def __init__( + self, case_root=None, infile="env_case.xml", components=None, read_only=False + ): + """ + initialize an object interface to file env_case.xml in the case directory + """ + schema = os.path.join(utils.get_schema_path(), "env_entry_id.xsd") + EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) diff --git a/CIME/XML/env_mach_pes.py b/CIME/XML/env_mach_pes.py new file mode 100644 index 00000000000..3dba2c95ecb --- /dev/null +++ b/CIME/XML/env_mach_pes.py @@ -0,0 +1,236 @@ +""" +Interface to the env_mach_pes.xml file. This class inherits from EntryID +""" +from CIME.XML.standard_module_setup import * +from CIME import utils +from CIME.XML.env_base import EnvBase +import math + +logger = logging.getLogger(__name__) + + +class EnvMachPes(EnvBase): + def __init__( + self, + case_root=None, + infile="env_mach_pes.xml", + components=None, + read_only=False, + comp_interface="mct", + ): + """ + initialize an object interface to file env_mach_pes.xml in the case directory + """ + self._components = components + self._comp_interface = comp_interface + + schema = os.path.join(utils.get_schema_path(), "env_mach_pes.xsd") + EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) + + def add_comment(self, comment): + if comment is not None: + node = self.make_child("comment", text=comment) + # make_child adds to the end of the file but we want it to follow the header + # so we need to remove it and add it in the correct position + self.remove_child(node) + self.add_child(node, position=1) + + def get_value( + self, + vid, + attribute=None, + resolved=True, + subgroup=None, + ): # pylint: disable=arguments-differ + # Special variable NINST_MAX is used to determine the number of + # drivers in multi-driver mode. + if vid == "NINST_MAX": + # in the nuopc driver there is only a single NINST value + value = 1 + for comp in self._components: + if comp != "CPL": + value = max(value, self.get_value("NINST_{}".format(comp))) + return value + + value = EnvBase.get_value(self, vid, attribute, resolved, subgroup) + + if "NTASKS" in vid or "ROOTPE" in vid: + max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") + max_cputasks_per_gpu_node = self.get_value("MAX_CPUTASKS_PER_GPU_NODE") + ngpus_per_node = self.get_value("NGPUS_PER_NODE") + if (ngpus_per_node and value) and value < 0: + value = -1 * value * max_cputasks_per_gpu_node + elif value and value < 0: + value = -1 * value * max_mpitasks_per_node + # in the nuopc driver there is only one NINST value + # so that NINST_{comp} = NINST + if "NINST_" in vid and value is None: + value = self.get_value("NINST") + return value + + def set_value(self, vid, value, subgroup=None, ignore_type=False): + """ + Set the value of an entry-id field to value + Returns the value or None if not found + subgroup is ignored in the general routine and applied in specific methods + """ + if vid == "MULTI_DRIVER" and value: + ninst_max = self.get_value("NINST_MAX") + for comp in self._components: + if comp == "CPL": + continue + ninst = self.get_value("NINST_{}".format(comp)) + expect( + ninst == ninst_max, + "All components must have the same NINST value in multi_driver mode. NINST_{}={} shoud be {}".format( + comp, ninst, ninst_max + ), + ) + + if ("NTASKS" in vid or "NTHRDS" in vid) and vid != "PIO_ASYNCIO_NTASKS": + expect(value != 0, f"Cannot set NTASKS or NTHRDS to 0 {vid}") + + return EnvBase.set_value( + self, vid, value, subgroup=subgroup, ignore_type=ignore_type + ) + + def get_max_thread_count(self, comp_classes): + """Find the maximum number of openmp threads for any component in the case""" + max_threads = 1 + for comp in comp_classes: + threads = self.get_value("NTHRDS", attribute={"compclass": comp}) + expect( + threads is not None, + "Error no thread count found for component class {}".format(comp), + ) + if threads > max_threads: + max_threads = threads + return max_threads + + def get_total_tasks(self, comp_classes, async_interface=False): + total_tasks = 0 + maxinst = self.get_value("NINST") + asyncio_ntasks = 0 + asyncio_rootpe = 0 + asyncio_stride = 0 + asyncio_tasks = [] + if maxinst: + comp_interface = "nuopc" + if async_interface: + asyncio_ntasks = self.get_value("PIO_ASYNCIO_NTASKS") + asyncio_rootpe = self.get_value("PIO_ASYNCIO_ROOTPE") + asyncio_stride = self.get_value("PIO_ASYNCIO_STRIDE") + logger.debug( + "asyncio ntasks {} rootpe {} stride {}".format( + asyncio_ntasks, asyncio_rootpe, asyncio_stride + ) + ) + if asyncio_ntasks and asyncio_stride: + for i in range( + asyncio_rootpe, + asyncio_rootpe + (asyncio_ntasks * asyncio_stride), + asyncio_stride, + ): + asyncio_tasks.append(i) + else: + comp_interface = "unknown" + maxinst = 1 + tt = 0 + maxrootpe = 0 + for comp in comp_classes: + ntasks = self.get_value("NTASKS", attribute={"compclass": comp}) + rootpe = self.get_value("ROOTPE", attribute={"compclass": comp}) + pstrid = self.get_value("PSTRID", attribute={"compclass": comp}) + + esmf_aware_threading = self.get_value("ESMF_AWARE_THREADING") + # mct is unaware of threads and they should not be counted here + # if esmf is thread aware they are included + if comp_interface == "nuopc" and esmf_aware_threading: + nthrds = self.get_value("NTHRDS", attribute={"compclass": comp}) + else: + nthrds = 1 + + if comp != "CPL" and comp_interface != "nuopc": + ninst = self.get_value("NINST", attribute={"compclass": comp}) + maxinst = max(maxinst, ninst) + tt = rootpe + nthrds * ((ntasks - 1) * pstrid + 1) + maxrootpe = max(maxrootpe, rootpe) + total_tasks = max(tt, total_tasks) + + if asyncio_tasks: + total_tasks = total_tasks + len(asyncio_tasks) + if self.get_value("MULTI_DRIVER"): + total_tasks *= maxinst + logger.debug("asyncio_tasks {}".format(asyncio_tasks)) + return total_tasks + + def get_tasks_per_node(self, total_tasks, max_thread_count): + expect( + total_tasks > 0, + "totaltasks > 0 expected, totaltasks = {}".format(total_tasks), + ) + if self._comp_interface == "nuopc" and self.get_value("ESMF_AWARE_THREADING"): + ngpus_per_node = self.get_value("NGPUS_PER_NODE") + if ngpus_per_node and ngpus_per_node > 0: + if self.get_value("OVERSUBSCRIBE_GPU"): + tasks_per_node = self.get_value("MAX_CPUTASKS_PER_GPU_NODE") + else: + tasks_per_node = self.get_value("NGPUS_PER_NODE") + else: + tasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") + else: + ngpus_per_node = self.get_value("NGPUS_PER_NODE") + if ngpus_per_node and ngpus_per_node > 0: + if self.get_value("OVERSUBSCRIBE_GPU"): + tasks_per_node = min( + self.get_value("MAX_TASKS_PER_NODE") // max_thread_count, + self.get_value("MAX_CPUTASKS_PER_GPU_NODE"), + total_tasks, + ) + else: + tasks_per_node = min( + self.get_value("MAX_TASKS_PER_NODE") // max_thread_count, + self.get_value("NGPUS_PER_NODE"), + total_tasks, + ) + else: + tasks_per_node = min( + self.get_value("MAX_TASKS_PER_NODE") // max_thread_count, + self.get_value("MAX_MPITASKS_PER_NODE"), + total_tasks, + ) + return tasks_per_node if tasks_per_node > 0 else 1 + + def get_total_nodes(self, total_tasks, max_thread_count): + """ + Return (num_active_nodes, num_spare_nodes) + """ + # threads have already been included in nuopc interface + if self._comp_interface == "nuopc" and self.get_value("ESMF_AWARE_THREADING"): + max_thread_count = 1 + tasks_per_node = self.get_tasks_per_node(total_tasks, max_thread_count) + if self.get_value("OVERSUBSCRIBE_GPU"): + num_nodes = int(math.ceil(float(total_tasks) / tasks_per_node)) + else: + ngpus_per_node = self.get_value("NGPUS_PER_NODE") + if ngpus_per_node and ngpus_per_node > 0: + num_nodes = int(math.ceil(float(total_tasks) / ngpus_per_node)) + else: + num_nodes = int(math.ceil(float(total_tasks) / tasks_per_node)) + return num_nodes, self.get_spare_nodes(num_nodes) + + def get_spare_nodes(self, num_nodes): + force_spare_nodes = self.get_value("FORCE_SPARE_NODES") + if force_spare_nodes != -999: + return force_spare_nodes + + if self.get_value("ALLOCATE_SPARE_NODES"): + ten_pct = int(math.ceil(float(num_nodes) * 0.1)) + if ten_pct < 1: + return 1 # Always provide at lease one spare node + elif ten_pct > 10: + return 10 # Never provide more than 10 spare nodes + else: + return ten_pct + else: + return 0 diff --git a/CIME/XML/env_mach_specific.py b/CIME/XML/env_mach_specific.py new file mode 100644 index 00000000000..e94d367e66c --- /dev/null +++ b/CIME/XML/env_mach_specific.py @@ -0,0 +1,759 @@ +""" +Interface to the env_mach_specific.xml file. This class inherits from EnvBase +""" +from CIME.XML.standard_module_setup import * + +from CIME.XML.env_base import EnvBase +from CIME import utils +from CIME.utils import transform_vars, get_cime_root +import string, resource, platform +from collections import OrderedDict + +logger = logging.getLogger(__name__) + +# Is not of type EntryID but can use functions from EntryID (e.g +# get_type) otherwise need to implement own functions and make GenericXML parent class +class EnvMachSpecific(EnvBase): + # pylint: disable=unused-argument + def __init__( + self, + caseroot=None, + infile="env_mach_specific.xml", + components=None, + unit_testing=False, + read_only=False, + standalone_configure=False, + comp_interface=None, + ): + """ + initialize an object interface to file env_mach_specific.xml in the case directory + + Notes on some arguments: + standalone_configure: logical - whether this is being called from the standalone + configure utility, outside of a case + """ + schema = os.path.join(utils.get_schema_path(), "env_mach_specific.xsd") + EnvBase.__init__(self, caseroot, infile, schema=schema, read_only=read_only) + self._allowed_mpi_attributes = ( + "compiler", + "mpilib", + "threaded", + "unit_testing", + "queue", + "comp_interface", + ) + self._comp_interface = comp_interface + self._unit_testing = unit_testing + self._standalone_configure = standalone_configure + + def populate(self, machobj, attributes=None): + """Add entries to the file using information from a Machines object. + mpilib must match attributes if set + """ + items = ("module_system", "environment_variables", "resource_limits", "mpirun") + default_run_suffix = machobj.get_child("default_run_suffix", root=machobj.root) + + group_node = self.make_child("group", {"id": "compliant_values"}) + settings = {"run_exe": None, "run_misc_suffix": None} + + for item in items: + nodes = machobj.get_first_child_nodes(item) + if item == "environment_variables": + if len(nodes) == 0: + example_text = """This section is for the user to specify any additional machine-specific env var, or to overwite existing ones.\n \n ARGUMENT\n \n """ + self.make_child_comment(text=example_text) + + if item == "mpirun": + for node in nodes: + mpirunnode = machobj.copy(node) + match = True + # We pull the run_exe and run_misc_suffix from the mpirun node if attributes match and use it + # otherwise we use the default. + if attributes: + for attrib in attributes: + val = self.get(mpirunnode, attrib) + if val and attributes[attrib] != val: + match = False + + for subnode in machobj.get_children(root=mpirunnode): + subname = machobj.name(subnode) + if subname == "run_exe" or subname == "run_misc_suffix": + if match: + settings[subname] = self.text(subnode) + self.remove_child(subnode, root=mpirunnode) + + self.add_child(mpirunnode) + else: + for node in nodes: + self.add_child(node) + + for item in ("run_exe", "run_misc_suffix"): + if settings[item]: + value = settings[item] + else: + value = self.text( + machobj.get_child("default_" + item, root=default_run_suffix) + ) + + entity_node = self.make_child( + "entry", {"id": item, "value": value}, root=group_node + ) + self.make_child("type", root=entity_node, text="char") + self.make_child( + "desc", + root=entity_node, + text=( + "executable name" + if item == "run_exe" + else "redirect for job output" + ), + ) + + def _get_modules_for_case(self, case, job=None): + module_nodes = self.get_children( + "modules", root=self.get_child("module_system") + ) + modules_to_load = None + if module_nodes is not None: + modules_to_load = self._compute_module_actions(module_nodes, case, job=job) + + return modules_to_load + + def _get_envs_for_case(self, case, job=None): + env_nodes = self.get_children("environment_variables") + + envs_to_set = None + if env_nodes is not None: + envs_to_set = self._compute_env_actions(env_nodes, case, job=job) + + return envs_to_set + + def load_env(self, case, force_method=None, job=None, verbose=False): + """ + Should only be called by case.load_env + """ + # Do the modules so we can refer to env vars set by the modules + # in the environment_variables block + modules_to_load = self._get_modules_for_case(case) + if modules_to_load is not None: + self._load_modules( + modules_to_load, force_method=force_method, verbose=verbose + ) + + envs_to_set = self._get_envs_for_case(case, job=job) + if envs_to_set is not None: + self._load_envs(envs_to_set, verbose=verbose) + + self._get_resources_for_case(case) + + return [] if envs_to_set is None else envs_to_set + + def _get_resources_for_case(self, case): + resource_nodes = self.get_children("resource_limits") + if resource_nodes is not None: + expect( + platform.system() != "Darwin", + "Mac OS does not support setting resource limits", + ) + nodes = self._compute_resource_actions(resource_nodes, case) + for name, val in nodes: + attr = getattr(resource, name) + limits = resource.getrlimit(attr) + logger.info( + "Setting resource.{} to {} from {}".format(name, val, limits) + ) + limits = (int(val), limits[1]) + resource.setrlimit(attr, limits) + + def _load_modules(self, modules_to_load, force_method=None, verbose=False): + module_system = ( + self.get_module_system_type() if force_method is None else force_method + ) + if module_system == "module": + self._load_module_modules(modules_to_load, verbose=verbose) + elif module_system == "soft": + self._load_modules_generic(modules_to_load, verbose=verbose) + elif module_system == "generic": + self._load_modules_generic(modules_to_load, verbose=verbose) + elif module_system == "none": + self._load_none_modules(modules_to_load) + else: + expect(False, "Unhandled module system '{}'".format(module_system)) + + def list_modules(self): + module_system = self.get_module_system_type() + + # If the user's login shell is not sh, it's possible that modules + # won't be configured so we need to be sure to source the module + # setup script if it exists. + init_path = self.get_module_system_init_path("sh") + if init_path: + source_cmd = ". {} && ".format(init_path) + else: + source_cmd = "" + + if module_system in ["module"]: + return run_cmd_no_fail( + "{}module list".format(source_cmd), combine_output=True + ) + elif module_system == "soft": + # Does soft really not provide this capability? + return "" + elif module_system == "generic": + return run_cmd_no_fail("{}use -lv".format(source_cmd)) + elif module_system == "none": + return "" + else: + expect(False, "Unhandled module system '{}'".format(module_system)) + + def save_all_env_info(self, filename): + """ + Get a string representation of all current environment info and + save it to file. + """ + with open(filename, "w") as f: + f.write(self.list_modules()) + run_cmd_no_fail("echo -e '\n' && env", arg_stdout=filename) + + def get_overrides_nodes(self, case): + overrides = {} + overrides["num_nodes"] = case.num_nodes + fnm = "env_mach_specific.xml" + output_text = transform_vars( + open(fnm, "r").read(), case=case, subgroup=None, overrides=overrides + ) + logger.info("Updating file {}".format(fnm)) + with open(fnm, "w") as fd: + fd.write(output_text) + return overrides + + def make_env_mach_specific_file(self, shell, case, output_dir=""): + """Writes .env_mach_specific.sh or .env_mach_specific.csh + + Args: + shell: string - 'sh' or 'csh' + case: case object + output_dir: string - path to output directory (if empty string, uses current directory) + """ + source_cmd = "." if shell == "sh" else "source" + module_system = self.get_module_system_type() + sh_init_cmd = self.get_module_system_init_path(shell) + sh_mod_cmd = self.get_module_system_cmd_path(shell) + lines = [ + "# This file is for user convenience only and is not used by the model" + ] + + lines.append("# Changes to this file will be ignored and overwritten") + lines.append( + "# Changes to the environment should be made in env_mach_specific.xml" + ) + lines.append("# Run ./case.setup --reset to regenerate this file") + if sh_init_cmd: + lines.append("{} {}".format(source_cmd, sh_init_cmd)) + + if "SOFTENV_ALIASES" in os.environ: + lines.append("{} $SOFTENV_ALIASES".format(source_cmd)) + if "SOFTENV_LOAD" in os.environ: + lines.append("{} $SOFTENV_LOAD".format(source_cmd)) + + if self._unit_testing or self._standalone_configure: + job = None + else: + job = case.get_primary_job() + modules_to_load = self._get_modules_for_case(case, job=job) + envs_to_set = self._get_envs_for_case(case, job=job) + filename = ".env_mach_specific.{}".format(shell) + if modules_to_load is not None: + if module_system == "module": + lines.extend(self._get_module_commands(modules_to_load, shell)) + else: + for action, argument in modules_to_load: + lines.append( + "{} {} {}".format( + sh_mod_cmd, action, "" if argument is None else argument + ) + ) + + if envs_to_set is not None: + for env_name, env_value in envs_to_set: + if shell == "sh": + if env_name == "source": + if env_value.startswith("sh"): + lines.append("{}".format(env_name)) + else: + if env_value is None: + lines.append("unset {}".format(env_name)) + else: + lines.append("export {}={}".format(env_name, env_value)) + + elif shell == "csh": + if env_name == "source": + if env_value.startswith("csh"): + lines.append("{}".format(env_name)) + else: + if env_value is None: + lines.append("unsetenv {}".format(env_name)) + else: + lines.append("setenv {} {}".format(env_name, env_value)) + else: + expect(False, "Unknown shell type: '{}'".format(shell)) + + with open(os.path.join(output_dir, filename), "w") as fd: + fd.write("\n".join(lines) + "\n") + + # Private API + + def _load_envs(self, envs_to_set, verbose=False): + for env_name, env_value in envs_to_set: + logger_func = logger.warning if verbose else logger.debug + if env_value is None and env_name in os.environ: + del os.environ[env_name] + logger_func("Unsetting Environment {}".format(env_name)) + elif env_value is not None: + if env_name == "source": + shell, cmd = env_value.split(" ", 1) + self._source_shell_file("source " + cmd, shell, verbose=verbose) + else: + if verbose: + print("Setting Environment {}={}".format(env_name, env_value)) + logger_func("Setting Environment {}={}".format(env_name, env_value)) + os.environ[env_name] = env_value + + def _compute_module_actions(self, module_nodes, case, job=None): + return self._compute_actions(module_nodes, "command", case, job=job) + + def _compute_env_actions(self, env_nodes, case, job=None): + return self._compute_actions(env_nodes, "env", case, job=job) + + def _compute_resource_actions(self, resource_nodes, case, job=None): + return self._compute_actions(resource_nodes, "resource", case, job=job) + + def _compute_actions(self, nodes, child_tag, case, job=None): + result = [] # list of tuples ("name", "argument") + compiler = case.get_value("COMPILER") + mpilib = case.get_value("MPILIB") + + for node in nodes: + if self._match_attribs(self.attrib(node), case, job=job): + for child in self.get_children(root=node): + expect( + self.name(child) == child_tag, + "Expected {} element".format(child_tag), + ) + if self._match_attribs(self.attrib(child), case, job=job): + val = self.text(child) + if val is not None: + # We allow a couple special substitutions for these fields + for repl_this, repl_with in [ + ("$COMPILER", compiler), + ("$MPILIB", mpilib), + ]: + val = val.replace(repl_this, repl_with) + + val = self.get_resolved_value(val) + expect( + "$" not in val, + "Not safe to leave unresolved items in env var value: '{}'".format( + val + ), + ) + + # intentional unindent, result is appended even if val is None + name = self.get(child, "name") + if name: + result.append((name, val)) + else: + result.append( + ("source", self.get(child, "source") + " " + val) + ) + + return result + + def _match_attribs(self, attribs, case, job=None): + # check for matches with case-vars + for attrib in attribs: + if attrib == "unit_testing": # special case + if not self._match(self._unit_testing, attribs["unit_testing"].upper()): + return False + elif attrib == "queue": + if job is not None: + val = case.get_value("JOB_QUEUE", subgroup=job) + expect( + val is not None, + "Cannot match attrib '%s', case has no value for it" + % attrib.upper(), + ) + if not self._match(val, attribs[attrib]): + return False + elif attrib == "name": + pass + elif attrib == "source": + pass + else: + val = case.get_value(attrib.upper()) + expect( + val is not None, + "Cannot match attrib '%s', case has no value for it" + % attrib.upper(), + ) + if not self._match(val, attribs[attrib]): + return False + + return True + + def _match(self, my_value, xml_value): + if xml_value.startswith("!"): + result = re.match(xml_value[1:] + "$", str(my_value)) is None + elif isinstance(my_value, bool): + if my_value: + result = xml_value == "TRUE" + else: + result = xml_value == "FALSE" + else: + result = re.match(xml_value + "$", str(my_value)) is not None + + logger.debug( + "(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result) + ) + return result + + def _get_module_commands(self, modules_to_load, shell): + # Note this is independent of module system type + mod_cmd = self.get_module_system_cmd_path(shell) + cmds = [] + last_action = None + last_cmd = None + + # Normally, we will try to combine or batch module commands together... + # + # module load X + # module load Y + # module load Z + # + # is the same as ... + # + # module load X Y Z + # + # ... except the latter is significatly faster due to performing 1/3 as + # many forks. + # + # Not all module commands support batching though and we enurmerate those + # here. + actions_that_cannot_be_batched = ["swap", "switch"] + + for action, argument in modules_to_load: + if argument is None: + argument = "" + + if action == last_action and action not in actions_that_cannot_be_batched: + last_cmd = "{} {}".format(last_cmd, argument) + else: + if last_cmd is not None: + cmds.append(last_cmd) + + last_cmd = "{} {} {}".format( + mod_cmd, action, "" if argument is None else argument + ) + last_action = action + + if last_cmd: + cmds.append(last_cmd) + + return cmds + + def _load_module_modules(self, modules_to_load, verbose=False): + logger_func = logger.warning if verbose else logger.debug + for cmd in self._get_module_commands(modules_to_load, "python"): + logger_func("module command is {}".format(cmd)) + stat, py_module_code, errout = run_cmd(cmd) + expect( + stat == 0 and (len(errout) == 0 or self.allow_error()), + "module command {} failed with message:\n{}".format(cmd, errout), + ) + exec(py_module_code) + + def _load_modules_generic(self, modules_to_load, verbose=False): + sh_init_cmd = self.get_module_system_init_path("sh") + sh_mod_cmd = self.get_module_system_cmd_path("sh") + + # Purpose is for environment management system that does not have + # a python interface and therefore can only determine what they + # do by running shell command and looking at the changes + # in the environment. + + cmd = ". {}".format(sh_init_cmd) + + if "SOFTENV_ALIASES" in os.environ: + cmd += " && . $SOFTENV_ALIASES" + if "SOFTENV_LOAD" in os.environ: + cmd += " && . $SOFTENV_LOAD" + + for action, argument in modules_to_load: + cmd += " && {} {} {}".format( + sh_mod_cmd, action, "" if argument is None else argument + ) + + self._source_shell_file(cmd, verbose=verbose) + + def _source_shell_file(self, cmd, shell="sh", verbose=False): + # Use null terminated lines to give us something more definitive to split on. + # Env vars can contain newlines, so splitting on newlines can be ambiguous + logger_func = logger.warning if verbose else logger.debug + cmd += " && env -0" + logger_func("cmd: {}".format(cmd)) + output = run_cmd_no_fail(cmd, executable=shell, verbose=verbose) + + ################################################### + # Parse the output to set the os.environ dictionary + ################################################### + newenv = OrderedDict() + for line in output.split("\0"): + if "=" in line: + key, val = line.split("=", 1) + newenv[key] = val + + # resolve variables + for key, val in newenv.items(): + newenv[key] = string.Template(val).safe_substitute(newenv) + + # Set environment with new or updated values + for key in newenv: + if key in os.environ and os.environ[key] == newenv[key]: + pass + else: + os.environ[key] = newenv[key] + + for oldkey in list(os.environ.keys()): + if oldkey not in newenv: + del os.environ[oldkey] + + def _load_none_modules(self, modules_to_load): + """ + No Action required + """ + expect( + not modules_to_load, + "Module system was specified as 'none' yet there are modules that need to be loaded?", + ) + + def _mach_specific_header(self, shell): + """ + write a shell module file for this case. + """ + header = """ +#!/usr/bin/env {} +#=============================================================================== +# Automatically generated module settings for $self->{{machine}} +# DO NOT EDIT THIS FILE DIRECTLY! Please edit env_mach_specific.xml +# in your CASEROOT. This file is overwritten every time modules are loaded! +#=============================================================================== +""".format( + shell + ) + source_cmd = "." if shell == "sh" else "source" + header += "{} {}".format(source_cmd, self.get_module_system_init_path(shell)) + return header + + def get_module_system_type(self): + """ + Return the module system used on this machine + """ + module_system = self.get_child("module_system") + return self.get(module_system, "type") + + def allow_error(self): + """ + Return True if stderr output from module commands should be assumed + to be an error. Default False. This is necessary since implementations + of environment modules are highlty variable and some systems produce + stderr output even when things are working fine. + """ + module_system = self.get_child("module_system") + value = self.get(module_system, "allow_error") + return value.upper() == "TRUE" if value is not None else False + + def get_module_system_init_path(self, lang): + init_nodes = self.get_optional_child( + "init_path", attributes={"lang": lang}, root=self.get_child("module_system") + ) + return ( + self.get_resolved_value(self.text(init_nodes)) + if init_nodes is not None + else None + ) + + def get_module_system_cmd_path(self, lang): + cmd_nodes = self.get_optional_child( + "cmd_path", attributes={"lang": lang}, root=self.get_child("module_system") + ) + return ( + self.get_resolved_value(self.text(cmd_nodes)) + if cmd_nodes is not None + else None + ) + + def _find_best_mpirun_match(self, attribs): + mpirun_nodes = self.get_children("mpirun") + best_match = None + best_num_matched = -1 + default_match = None + best_num_matched_default = -1 + for mpirun_node in mpirun_nodes: + xml_attribs = self.attrib(mpirun_node) + all_match = True + matches = 0 + is_default = False + + for key, value in attribs.items(): + expect( + key in self._allowed_mpi_attributes, + "Unexpected key {} in mpirun attributes".format(key), + ) + if key in xml_attribs: + if xml_attribs[key].lower() == "false": + xml_attrib = False + elif xml_attribs[key].lower() == "true": + xml_attrib = True + else: + xml_attrib = xml_attribs[key] + + if xml_attrib == value: + matches += 1 + elif ( + key == "mpilib" + and value != "mpi-serial" + and xml_attrib == "default" + ): + is_default = True + else: + all_match = False + break + + if all_match: + if is_default: + if matches > best_num_matched_default: + default_match = mpirun_node + best_num_matched_default = matches + else: + if matches > best_num_matched: + best_match = mpirun_node + best_num_matched = matches + + # if there are no special arguments required for mpi-serial it need not have an entry in config_machines.xml + if ( + "mpilib" in attribs + and attribs["mpilib"] == "mpi-serial" + and best_match is None + ): + raise ValueError() + + expect( + best_match is not None or default_match is not None, + "Could not find a matching MPI for attributes: {}".format(attribs), + ) + + return best_match if best_match is not None else default_match + + def get_aprun_mode(self, attribs): + default_mode = "default" + valid_modes = ("ignore", "default", "override") + + try: + the_match = self._find_best_mpirun_match(attribs) + except ValueError: + return default_mode + + mode_node = self.get_children("aprun_mode", root=the_match) + + if len(mode_node) == 0: + return default_mode + + expect(len(mode_node) == 1, 'Found multiple "aprun_mode" elements.') + + # should have only one element to select from + mode = self.text(mode_node[0]) + + expect( + mode in valid_modes, + f"Value {mode!r} for \"aprun_mode\" is not valid, options are {', '.join(valid_modes)!r}", + ) + + return mode + + def get_aprun_args(self, case, attribs, job, overrides=None): + args = {} + + try: + the_match = self._find_best_mpirun_match(attribs) + except ValueError: + return None + + arg_node = self.get_optional_child("arguments", root=the_match) + + if arg_node: + arg_nodes = self.get_children("arg", root=arg_node) + + for arg_node in arg_nodes: + position = self.get(arg_node, "position") + + if position is None: + position = "per" + + arg_value = transform_vars( + self.text(arg_node), + case=case, + subgroup=job, + overrides=overrides, + default=self.get(arg_node, "default"), + ) + + args[arg_value] = dict(position=position) + + return args + + def get_mpirun(self, case, attribs, job, exe_only=False, overrides=None): + """ + Find best match, return (executable, {arg_name : text}) + """ + args = [] + + try: + the_match = self._find_best_mpirun_match(attribs) + except ValueError: + return "", [], None, None + + # Now that we know the best match, compute the arguments + if not exe_only: + arg_node = self.get_optional_child("arguments", root=the_match) + if arg_node: + arg_nodes = self.get_children("arg", root=arg_node) + for arg_node in arg_nodes: + arg_value = transform_vars( + self.text(arg_node), + case=case, + subgroup=job, + overrides=overrides, + default=self.get(arg_node, "default"), + ) + args.append(arg_value) + + exec_node = self.get_child("executable", root=the_match) + expect(exec_node is not None, "No executable found") + executable = self.text(exec_node) + run_exe = None + run_misc_suffix = None + + run_exe_node = self.get_optional_child("run_exe", root=the_match) + if run_exe_node: + run_exe = self.text(run_exe_node) + + run_misc_suffix_node = self.get_optional_child( + "run_misc_suffix", root=the_match + ) + if run_misc_suffix_node: + run_misc_suffix = self.text(run_misc_suffix_node) + + return executable, args, run_exe, run_misc_suffix + + def get_type_info(self, vid): + return "char" diff --git a/CIME/XML/env_postprocessing.py b/CIME/XML/env_postprocessing.py new file mode 100644 index 00000000000..90f56f24d64 --- /dev/null +++ b/CIME/XML/env_postprocessing.py @@ -0,0 +1,22 @@ +""" +Interface to the env_postprocessing.xml file. This class inherits from EnvBase +""" +from CIME.XML.standard_module_setup import * + +from CIME.XML.env_base import EnvBase + +from CIME import utils + +logger = logging.getLogger(__name__) + + +class EnvPostprocessing(EnvBase): + def __init__( + self, case_root=None, infile="env_postprocessing.xml", read_only=False + ): + """ + initialize an object interface to file env_postprocessing.xml in the case directory + """ + schema = os.path.join(utils.get_schema_path(), "env_entry_id.xsd") + + EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) diff --git a/CIME/XML/env_run.py b/CIME/XML/env_run.py new file mode 100644 index 00000000000..2e34c86c8f7 --- /dev/null +++ b/CIME/XML/env_run.py @@ -0,0 +1,70 @@ +""" +Interface to the env_run.xml file. This class inherits from EnvBase +""" +from CIME.XML.standard_module_setup import * + +from CIME.XML.env_base import EnvBase + +from CIME import utils +from CIME.utils import convert_to_type + +logger = logging.getLogger(__name__) + + +class EnvRun(EnvBase): + def __init__( + self, case_root=None, infile="env_run.xml", components=None, read_only=False + ): + """ + initialize an object interface to file env_run.xml in the case directory + """ + self._components = components + self._pio_async_interface = {} + + if components: + for comp in components: + self._pio_async_interface[comp] = False + + schema = os.path.join(utils.get_schema_path(), "env_entry_id.xsd") + + EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) + + def get_value(self, vid, attribute=None, resolved=True, subgroup=None): + """ + Get a value for entry with id attribute vid. + or from the values field if the attribute argument is provided + and matches. Special case for pio variables when PIO_ASYNC_INTERFACE is True. + """ + if any(self._pio_async_interface.values()): + vid, comp, iscompvar = self.check_if_comp_var(vid, attribute) + if vid.startswith("PIO") and iscompvar: + if comp and comp != "CPL": + logger.warning("Only CPL settings are used for PIO in async mode") + subgroup = "CPL" + + return EnvBase.get_value(self, vid, attribute, resolved, subgroup) + + def set_value(self, vid, value, subgroup=None, ignore_type=False): + """ + Set the value of an entry-id field to value + Returns the value or None if not found + subgroup is ignored in the general routine and applied in specific methods + """ + comp = None + if any(self._pio_async_interface.values()): + vid, comp, iscompvar = self.check_if_comp_var(vid, None) + if vid.startswith("PIO") and iscompvar: + if comp and comp != "CPL": + logger.warning("Only CPL settings are used for PIO in async mode") + subgroup = "CPL" + + if vid == "PIO_ASYNC_INTERFACE": + if comp: + if type(value) == type(True): + self._pio_async_interface[comp] = value + else: + self._pio_async_interface[comp] = convert_to_type( + value, "logical", vid + ) + + return EnvBase.set_value(self, vid, value, subgroup, ignore_type) diff --git a/scripts/lib/CIME/XML/env_test.py b/CIME/XML/env_test.py similarity index 75% rename from scripts/lib/CIME/XML/env_test.py rename to CIME/XML/env_test.py index c74098efdbf..9dbe3615eff 100644 --- a/scripts/lib/CIME/XML/env_test.py +++ b/CIME/XML/env_test.py @@ -8,15 +8,18 @@ logger = logging.getLogger(__name__) + class EnvTest(EnvBase): # pylint: disable=unused-argument - def __init__(self, case_root=None, infile="env_test.xml", components=None, read_only=False): + def __init__( + self, case_root=None, infile="env_test.xml", components=None, read_only=False + ): """ initialize an object interface to file env_test.xml in the case directory """ EnvBase.__init__(self, case_root, infile, read_only=read_only) - def add_test(self,testnode): + def add_test(self, testnode): self.add_child(testnode) self.write() @@ -30,13 +33,20 @@ def set_initial_values(self, case): tnode = self.get_child("test") for child in self.get_children(root=tnode): if self.text(child) is not None: - logger.debug("Setting {} to {} for test".format(self.name(child), self.text(child))) + logger.debug( + "Setting {} to {} for test".format( + self.name(child), self.text(child) + ) + ) if "$" in self.text(child): - case.set_value(self.name(child),self.text(child),ignore_type=True) + case.set_value(self.name(child), self.text(child), ignore_type=True) else: item_type = case.get_type_info(self.name(child)) - value = convert_to_type(self.text(child),item_type,self.name(child)) - case.set_value(self.name(child),value) + if item_type: + value = convert_to_type( + self.text(child), item_type, self.name(child) + ) + case.set_value(self.name(child), value) case.flush() return @@ -46,7 +56,7 @@ def set_test_parameter(self, name, value): otherwise create a node and initialize it to value """ case = self.get_value("TESTCASE") - tnode = self.get_child("test",{"NAME":case}) + tnode = self.get_child("test", {"NAME": case}) idnode = self.get_optional_child(name, root=tnode) if idnode is None: @@ -56,14 +66,14 @@ def set_test_parameter(self, name, value): def get_test_parameter(self, name): case = self.get_value("TESTCASE") - tnode = self.get_child("test",{"NAME":case}) + tnode = self.get_child("test", {"NAME": case}) value = None idnode = self.get_optional_child(name, root=tnode) if idnode is not None: value = self.text(idnode) return value - def get_step_phase_cnt(self,step): + def get_step_phase_cnt(self, step): bldnodes = self.get_children(step) cnt = 0 for node in bldnodes: @@ -71,25 +81,29 @@ def get_step_phase_cnt(self,step): return cnt def get_settings_for_phase(self, name, cnt): - node = self.get_optional_child(name,attributes={"phase":cnt}) + node = self.get_optional_child(name, attributes={"phase": cnt}) settings = [] if node is not None: for child in node: - logger.debug ("Here child is {} with value {}".format(self.name(child), self.text(child))) + logger.debug( + "Here child is {} with value {}".format( + self.name(child), self.text(child) + ) + ) settings.append((self.name(child), self.text(child))) return settings def run_phase_get_clone_name(self, phase): - node = self.get_child("RUN",attributes={"phase":str(phase)}) + node = self.get_child("RUN", attributes={"phase": str(phase)}) if self.has(node, "clone"): return self.get(node, "clone") return None def cleanupnode(self, node): - ''' + """ keep the values component set - ''' + """ fnode = self.get_child(name="file", root=node) self.remove_child(fnode, root=node) gnode = self.get_child(name="group", root=node) diff --git a/CIME/XML/env_workflow.py b/CIME/XML/env_workflow.py new file mode 100644 index 00000000000..8eaa7171ce8 --- /dev/null +++ b/CIME/XML/env_workflow.py @@ -0,0 +1,194 @@ +""" +Interface to the env_workflow.xml file. This class inherits from EnvBase +""" + +from CIME.XML.standard_module_setup import * +from CIME.XML.env_base import EnvBase +from CIME.utils import get_cime_root + +import re, math + +logger = logging.getLogger(__name__) + +# pragma pylint: disable=attribute-defined-outside-init + + +class EnvWorkflow(EnvBase): + def __init__(self, case_root=None, infile="env_workflow.xml", read_only=False): + """ + initialize an object interface to file env_workflow.xml in the case directory + """ + # This arbitrary setting should always be overwritten + # schema = os.path.join(get_cime_root(), "CIME", "config", "xml_schemas", "env_workflow.xsd") + # TODO: define schema for this file + schema = None + self._hidden = {} + super(EnvWorkflow, self).__init__( + case_root, infile, schema=schema, read_only=read_only + ) + + def create_job_groups(self, batch_jobs, is_test): + # Subtle: in order to support dynamic batch jobs, we need to remove the + # job_submission group and replace with job-based groups + orig_group = self.get_optional_child( + "group", + {"id": "job_submission"}, + err_msg="Looks like job groups have already been created", + ) + expect(orig_group, "No workflow groups found") + orig_group_children = super(EnvWorkflow, self).get_children(root=orig_group) + + childnodes = [] + for child in reversed(orig_group_children): + childnodes.append(child) + + self.remove_child(orig_group) + + for name, jdict in batch_jobs: + if name == "case.run" and is_test: + pass # skip + elif name == "case.test" and not is_test: + pass # skip + elif name == "case.run.sh": + pass # skip + else: + new_job_group = self.make_child("group", {"id": name}) + for field in jdict.keys(): + if field == "runtime_parameters": + continue + val = jdict[field] + node = self.make_child( + "entry", {"id": field, "value": val}, root=new_job_group + ) + self.make_child("type", root=node, text="char") + + for child in childnodes: + self.add_child(self.copy(child), root=new_job_group) + + def get_jobs(self): + groups = self.get_children("group") + results = [] + for group in groups: + results.append(self.get(group, "id")) + return results + + def get_type_info(self, vid): + gnodes = self.get_children("group") + type_info = None + for gnode in gnodes: + nodes = self.get_children("entry", {"id": vid}, root=gnode) + type_info = None + for node in nodes: + new_type_info = self._get_type_info(node) + if type_info is None: + type_info = new_type_info + else: + expect( + type_info == new_type_info, + "Inconsistent type_info for entry id={} {} {}".format( + vid, new_type_info, type_info + ), + ) + return type_info + + def hidden_job(self, case, job): + if job not in self._hidden: + self.get_job_specs(case, job) + return self._hidden[job] + + def get_job_specs(self, case, job): + hidden = self.get_value("hidden", subgroup=job) + self._hidden[job] = (hidden is None and job != "case.st_archive") or ( + hidden is not None and hidden.lower() == "true" + ) + + task_count = case.get_resolved_value(self.get_value("task_count", subgroup=job)) + tasks_per_node = case.get_resolved_value( + self.get_value("tasks_per_node", subgroup=job) + ) + thread_count = case.get_resolved_value( + self.get_value("thread_count", subgroup=job) + ) + max_gpus_per_node = case.get_value("MAX_GPUS_PER_NODE") + ngpus_per_node = case.get_value("NGPUS_PER_NODE") + num_nodes = None + if not ngpus_per_node: + max_gpus_per_node = 0 + ngpus_per_node = 0 + if task_count is not None and tasks_per_node is not None: + task_count = int(task_count) + num_nodes = int(math.ceil(float(task_count) / float(tasks_per_node))) + tasks_per_node = task_count // num_nodes + if not thread_count: + thread_count = 1 + if ngpus_per_node > max_gpus_per_node: + ngpus_per_node = max_gpus_per_node + + return ( + task_count, + num_nodes, + tasks_per_node, + thread_count, + ngpus_per_node, + ) + + # pylint: disable=arguments-differ + def get_value(self, item, attribute=None, resolved=True, subgroup="PRIMARY"): + """ + Must default subgroup to something in order to provide single return value + """ + value = None + if subgroup == "PRIMARY": + subgroup = "case.test" if "case.test" in self.get_jobs() else "case.run" + + # pylint: disable=assignment-from-none + if value is None: + value = super(EnvWorkflow, self).get_value( + item, attribute=attribute, resolved=resolved, subgroup=subgroup + ) + + return value + + # pylint: disable=arguments-differ + def set_value(self, item, value, subgroup=None, ignore_type=False): + """ + Override the entry_id set_value function with some special cases for this class + """ + val = None + + # allow the user to set item for all jobs if subgroup is not provided + if subgroup is None: + gnodes = self.get_children("group") + for gnode in gnodes: + node = self.get_optional_child("entry", {"id": item}, root=gnode) + if node is not None: + self._set_value(node, value, vid=item, ignore_type=ignore_type) + val = value + else: + group = self.get_optional_child("group", {"id": subgroup}) + if group is not None: + node = self.get_optional_child("entry", {"id": item}, root=group) + if node is not None: + val = self._set_value( + node, value, vid=item, ignore_type=ignore_type + ) + + return val + + def get_children(self, name=None, attributes=None, root=None): + if name in ( + "JOB_WALLCLOCK_TIME", + "PROJECT", + "CHARGE_ACCOUNT", + "JOB_QUEUE", + "BATCH_COMMAND_FLAGS", + ): + nodes = super(EnvWorkflow, self).get_children( + "entry", attributes={"id": name}, root=root + ) + else: + nodes = super(EnvWorkflow, self).get_children( + name, attributes=attributes, root=root + ) + + return nodes diff --git a/scripts/lib/CIME/XML/expected_fails_file.py b/CIME/XML/expected_fails_file.py similarity index 95% rename from scripts/lib/CIME/XML/expected_fails_file.py rename to CIME/XML/expected_fails_file.py index b1887795c08..82e6ef0a298 100644 --- a/scripts/lib/CIME/XML/expected_fails_file.py +++ b/CIME/XML/expected_fails_file.py @@ -44,15 +44,16 @@ from CIME.XML.standard_module_setup import * +from CIME import utils from CIME.XML.generic_xml import GenericXML from CIME.expected_fails import ExpectedFails logger = logging.getLogger(__name__) -class ExpectedFailsFile(GenericXML): +class ExpectedFailsFile(GenericXML): def __init__(self, infile): - schema = os.path.join(get_cime_root(), "config", "xml_schemas", "expected_fails_file.xsd") + schema = os.path.join(utils.get_schema_path(), "expected_fails_file.xsd") GenericXML.__init__(self, infile, schema=schema) def get_expected_fails(self): diff --git a/CIME/XML/files.py b/CIME/XML/files.py new file mode 100644 index 00000000000..b2612cc8937 --- /dev/null +++ b/CIME/XML/files.py @@ -0,0 +1,172 @@ +""" +Interface to the config_files.xml file. This class inherits from EntryID.py +""" + +import re +import os +from CIME.XML.standard_module_setup import * + +from CIME.XML.entry_id import EntryID +from CIME.utils import ( + expect, + get_cime_root, + get_config_path, + get_schema_path, + get_model, + get_cime_default_driver, +) + +logger = logging.getLogger(__name__) + + +class Files(EntryID): + def __init__(self, comp_interface=None): + """ + initialize an object + + >>> files = Files() + >>> files.get_value('CASEFILE_HEADERS',resolved=False) + '$CIMEROOT/CIME/data/config/config_headers.xml' + """ + if comp_interface is None: + comp_interface = get_cime_default_driver() + cimeroot = get_cime_root() + cimeroot_parent = os.path.dirname(cimeroot) + config_path = get_config_path() + schema_path = get_schema_path() + + infile = os.path.join(config_path, get_model(), "config_files.xml") + expect(os.path.isfile(infile), "Could not find or open file {}".format(infile)) + + schema = os.path.join(schema_path, "entry_id.xsd") + + EntryID.__init__(self, infile, schema=schema) + + config_files_override = os.path.join(cimeroot_parent, ".config_files.xml") + # variables COMP_ROOT_DIR_{} are mutable, all other variables are read only + self.COMP_ROOT_DIR = {} + self._comp_interface = comp_interface + self._cpl_comp = {} + # .config_file.xml at the top level may overwrite COMP_ROOT_DIR_ nodes in config_files + + if os.path.isfile(config_files_override): + self.read(config_files_override) + self.overwrite_existing_entries() + elif self.get_version() >= 3.0: + model_config_files = self.get_value("MODEL_CONFIG_FILES") + self.read(model_config_files) + self.overwrite_existing_entries() + + # pylint: disable=arguments-differ + def get_value( + self, + vid, + attribute=None, + resolved=True, + subgroup=None, + attribute_required=False, + ): + if vid == "COMP_ROOT_DIR_CPL": + if self._cpl_comp: + attribute = self._cpl_comp + elif attribute: + self._cpl_comp = attribute + else: + self._cpl_comp["component"] = "cpl" + if "COMP_ROOT_DIR" in vid: + if vid in self.COMP_ROOT_DIR: + if attribute is not None: + if vid + attribute["component"] in self.COMP_ROOT_DIR: + return self.COMP_ROOT_DIR[vid + attribute["component"]] + else: + return self.COMP_ROOT_DIR[vid] + + newatt = {"comp_interface": self._comp_interface} + if attribute: + newatt.update(attribute) + value = super(Files, self).get_value( + vid, attribute=newatt, resolved=False, subgroup=subgroup + ) + if value is None and attribute is not None: + value = super(Files, self).get_value( + vid, attribute=attribute, resolved=False, subgroup=subgroup + ) + if not value: + if attribute_required: + return value + if value is None: + value = super(Files, self).get_value( + vid, attribute=None, resolved=False, subgroup=subgroup + ) + + if ( + "COMP_ROOT_DIR" not in vid + and value is not None + and "COMP_ROOT_DIR" in value + ): + m = re.search("(COMP_ROOT_DIR_[^/]+)/", value) + comp_root_dir_var_name = m.group(1) + newatt = {"comp_interface": self._comp_interface} + if attribute: + newatt.update(attribute) + + crd_node = self.scan_optional_child( + comp_root_dir_var_name, attributes=newatt + ) + if crd_node: + comp_root_dir = self.get_value( + comp_root_dir_var_name, + attribute=newatt, + resolved=False, + subgroup=subgroup, + ) + else: + comp_root_dir = self.get_value( + comp_root_dir_var_name, + attribute=attribute, + resolved=False, + subgroup=subgroup, + ) + self.set_value(comp_root_dir_var_name, comp_root_dir, subgroup=attribute) + if resolved: + value = value.replace("$" + comp_root_dir_var_name, comp_root_dir) + + if resolved and value is not None: + value = value.replace("$COMP_INTERFACE", self._comp_interface) + value = self.get_resolved_value(value) + return value + + def set_value(self, vid, value, subgroup=None, ignore_type=False): + if "COMP_ROOT_DIR" in vid: + if subgroup is not None: + self.COMP_ROOT_DIR[vid + subgroup["component"]] = value + else: + self.COMP_ROOT_DIR[vid] = value + + else: + expect(False, "Attempt to set a nonmutable variable {}".format(vid)) + return value + + def get_schema(self, nodename, attributes=None): + node = self.get_optional_child("entry", {"id": nodename}) + + schemanode = self.get_optional_child("schema", root=node, attributes=attributes) + + if schemanode is not None: + logger.debug("Found schema for {}".format(nodename)) + return self.get_resolved_value(self.text(schemanode)) + return None + + def get_components(self, nodename): + node = self.get_optional_child("entry", {"id": nodename}) + if node is not None: + valnodes = self.get_children( + "value", root=self.get_child("values", root=node) + ) + values = [] + for valnode in valnodes: + value = self.get(valnode, "component") + values.append(value) + return values + + return None diff --git a/CIME/XML/generic_xml.py b/CIME/XML/generic_xml.py new file mode 100644 index 00000000000..d35815d7829 --- /dev/null +++ b/CIME/XML/generic_xml.py @@ -0,0 +1,756 @@ +""" +Common interface to XML files, this is an abstract class and is expected to +be used by other XML interface modules and not directly. +""" +from CIME.XML.standard_module_setup import * +from CIME.utils import safe_copy, get_src_root + +import xml.etree.ElementTree as ET + +# pylint: disable=import-error +from shutil import which +import getpass +from copy import deepcopy +from collections import namedtuple + +logger = logging.getLogger(__name__) + + +class _Element( + object +): # private class, don't want users constructing directly or calling methods on it + def __init__(self, xml_element): + self.xml_element = xml_element + + def __eq__(self, rhs): + expect(isinstance(rhs, _Element), "Wrong type") + return self.xml_element == rhs.xml_element # pylint: disable=protected-access + + def __ne__(self, rhs): + expect(isinstance(rhs, _Element), "Wrong type") + return self.xml_element != rhs.xml_element # pylint: disable=protected-access + + def __hash__(self): + return hash(self.xml_element) + + def __deepcopy__(self, _): + return _Element(deepcopy(self.xml_element)) + + def __str__(self): + return str(self.xml_element) + + def __repr__(self): + return repr(self.xml_element) + + @property + def name(self): + return self.xml_element.tag + + @property + def text(self): + return self.xml_element.text + + @property + def attrib(self): + return dict(self.xml_element.attrib) + + +class GenericXML(object): + + _FILEMAP = {} + DISABLE_CACHING = False + CacheEntry = namedtuple("CacheEntry", ["tree", "root", "modtime"]) + + @classmethod + def invalidate(cls, filename): + if filename in cls._FILEMAP: + del cls._FILEMAP[filename] + + def __init__( + self, + infile=None, + schema=None, + root_name_override=None, + root_attrib_override=None, + read_only=True, + ): + """ + Initialize an object + """ + logger.debug("Initializing {}".format(infile)) + self.tree = None + self.root = None + self.locked = False + self.read_only = read_only + self.filename = infile + self.needsrewrite = False + if infile is None: + return + + if ( + os.path.isfile(infile) + and os.access(infile, os.R_OK) + and os.stat(infile).st_size > 0 + ): + # If file is defined and exists, read it + self.read(infile, schema) + else: + # if file does not exist create a root xml element + # and set it's id to file + expect( + not self.read_only, + "Makes no sense to have empty read-only file: {}".format(infile), + ) + logger.debug("File {} does not exist.".format(infile)) + expect("$" not in infile, "File path not fully resolved: {}".format(infile)) + + root = _Element(ET.Element("xml")) + + if root_name_override: + self.root = self.make_child( + root_name_override, root=root, attributes=root_attrib_override + ) + else: + self.root = self.make_child( + "file", + root=root, + attributes={"id": os.path.basename(infile), "version": "2.0"}, + ) + + self.tree = ET.ElementTree(root) + + self._FILEMAP[infile] = self.CacheEntry(self.tree, self.root, 0.0) + + def read(self, infile, schema=None): + """ + Read and parse an xml file into the object. The schema variable can either be a path to an xsd schema file or + a dictionary of paths to files by version. + """ + cached_read = False + if not self.DISABLE_CACHING and infile in self._FILEMAP: + timestamp_cache = self._FILEMAP[infile].modtime + timestamp_file = os.path.getmtime(infile) + if timestamp_file == timestamp_cache: + logger.debug("read (cached): {}".format(infile)) + expect( + self.read_only or not self.filename or not self.needsrewrite, + "Reading into object marked for rewrite, file {}".format( + self.filename + ), + ) + self.tree, self.root, _ = self._FILEMAP[infile] + cached_read = True + + if not cached_read: + logger.debug("read: {}".format(infile)) + with open(infile, "r", encoding="utf-8") as fd: + self.read_fd(fd) + version = str(self.get_version()) + if type(schema) is dict: + self.validate_xml_file(infile, schema[version]) + elif schema is not None and self.get_version() > 1.0: + self.validate_xml_file(infile, schema) + + logger.debug("File version is {}".format(str(self.get_version()))) + + self._FILEMAP[infile] = self.CacheEntry( + self.tree, self.root, os.path.getmtime(infile) + ) + + def read_fd(self, fd): + expect( + self.read_only or not self.filename or not self.needsrewrite, + "Reading into object marked for rewrite, file {}".format(self.filename), + ) + read_only = self.read_only + if self.tree: + addroot = _Element(ET.parse(fd).getroot()) + # we need to override the read_only mechanism here to append the xml object + self.read_only = False + if addroot.xml_element.tag == self.name(self.root): + for child in self.get_children(root=addroot): + self.add_child(child) + else: + self.add_child(addroot) + self.read_only = read_only + else: + self.tree = ET.parse(fd) + self.root = _Element(self.tree.getroot()) + include_elems = self.scan_children("xi:include") + # First remove all includes found from the list + for elem in include_elems: + self.read_only = False + self.remove_child(elem) + self.read_only = read_only + # Then recursively add the included files. + for elem in include_elems: + path = os.path.abspath( + os.path.join( + os.getcwd(), os.path.dirname(self.filename), self.get(elem, "href") + ) + ) + logger.debug("Include file {}".format(path)) + self.read(path) + + def lock(self): + """ + A subclass is doing caching, we need to lock the tree structure + in order to avoid invalidating cache. + """ + self.locked = True + + def unlock(self): + self.locked = False + + def change_file(self, newfile, copy=False): + if copy: + new_case = os.path.dirname(newfile) + if not os.path.exists(new_case): + os.makedirs(new_case) + safe_copy(self.filename, newfile) + + self.tree = None + self.filename = newfile + self.read(newfile) + + # + # API for individual node operations + # + + def get(self, node, attrib_name, default=None): + return node.xml_element.get(attrib_name, default=default) + + def has(self, node, attrib_name): + return attrib_name in node.xml_element.attrib + + def set(self, node, attrib_name, value): + if self.get(node, attrib_name) != value: + expect( + not self.read_only, + "read_only: cannot set attrib[{}]={} for node {} in file {}".format( + attrib_name, value, self.name(node), self.filename + ), + ) + if attrib_name == "id": + expect( + not self.locked, + "locked: cannot set attrib[{}]={} for node {} in file {}".format( + attrib_name, value, self.name(node), self.filename + ), + ) + self.needsrewrite = True + return node.xml_element.set(attrib_name, value) + + def pop(self, node, attrib_name): + expect( + not self.read_only, + "read_only: cannot pop attrib[{}] for node {} in file {}".format( + attrib_name, self.name(node), self.filename + ), + ) + if attrib_name == "id": + expect( + not self.locked, + "locked: cannot pop attrib[{}] for node {} in file {}".format( + attrib_name, self.name(node), self.filename + ), + ) + self.needsrewrite = True + return node.xml_element.attrib.pop(attrib_name) + + def attrib(self, node): + # Return a COPY. We do not want clients making changes directly + return ( + None if node.xml_element.attrib is None else dict(node.xml_element.attrib) + ) + + def set_name(self, node, name): + expect( + not self.read_only, + "read_only: set node name {} in file {}".format(name, self.filename), + ) + if node.xml_element.tag != name: + self.needsrewrite = True + node.xml_element.tag = name + + def set_text(self, node, text): + expect( + not self.read_only, + "read_only: set node text {} for node {} in file {}".format( + text, self.name(node), self.filename + ), + ) + if node.xml_element.text != text: + node.xml_element.text = text + self.needsrewrite = True + + def name(self, node): + return node.xml_element.tag + + def text(self, node): + return node.xml_element.text + + def add_child(self, node, root=None, position=None): + """ + Add element node to self at root + """ + expect( + not self.locked and not self.read_only, + "{}: cannot add child {} in file {}".format( + "read_only" if self.read_only else "locked", + self.name(node), + self.filename, + ), + ) + self.needsrewrite = True + root = root if root is not None else self.root + if position is not None: + root.xml_element.insert(position, node.xml_element) + else: + root.xml_element.append(node.xml_element) + + def copy(self, node): + return deepcopy(node) + + def remove_child(self, node, root=None): + expect( + not self.locked and not self.read_only, + "{}: cannot remove child {} in file {}".format( + "read_only" if self.read_only else "locked", + self.name(node), + self.filename, + ), + ) + self.needsrewrite = True + root = root if root is not None else self.root + root.xml_element.remove(node.xml_element) + + def make_child(self, name, attributes=None, root=None, text=None): + expect( + not self.locked and not self.read_only, + "{}: cannot make child {} in file {}".format( + "read_only" if self.read_only else "locked", name, self.filename + ), + ) + root = root if root is not None else self.root + self.needsrewrite = True + if attributes is None: + node = _Element(ET.SubElement(root.xml_element, name)) + else: + node = _Element(ET.SubElement(root.xml_element, name, attrib=attributes)) + + if text: + self.set_text(node, text) + + return node + + def make_child_comment(self, root=None, text=None): + expect( + not self.locked and not self.read_only, + "{}: cannot make child {} in file {}".format( + "read_only" if self.read_only else "locked", text, self.filename + ), + ) + root = root if root is not None else self.root + self.needsrewrite = True + et_comment = ET.Comment(text) + node = _Element(et_comment) + root.xml_element.append(node.xml_element) + return node + + def get_children(self, name=None, attributes=None, root=None): + """ + This is the critical function, its interface and performance are crucial. + + You can specify attributes={key:None} if you want to select children + with the key attribute but you don't care what its value is. + """ + root = root if root is not None else self.root + children = [] + for child in root.xml_element: + if name is not None: + if child.tag != name: + continue + + if attributes is not None: + if child.attrib is None: + continue + else: + match = True + for key, value in attributes.items(): + if key not in child.attrib: + match = False + break + elif value is not None: + if child.attrib[key] != value: + match = False + break + + if not match: + continue + + children.append(_Element(child)) + + return children + + def get_child(self, name=None, attributes=None, root=None, err_msg=None): + child = self.get_optional_child( + root=root, name=name, attributes=attributes, err_msg=err_msg + ) + expect( + child, + err_msg + if err_msg + else "Expected one child, found None with name '{}' and attribs '{}' in file {}".format( + name, attributes, self.filename + ), + ) + return child + + def get_optional_child(self, name=None, attributes=None, root=None, err_msg=None): + children = self.get_children(root=root, name=name, attributes=attributes) + if len(children) > 1: + # see if we can reduce to 1 based on attribute counts + if not attributes: + children = [c for c in children if not c.xml_element.attrib] + else: + attlen = len(attributes) + children = [c for c in children if len(c.xml_element.attrib) == attlen] + + expect( + len(children) <= 1, + err_msg + if err_msg + else "Multiple matches for name '{}' and attribs '{}' in file {}".format( + name, attributes, self.filename + ), + ) + return children[0] if children else None + + def get_element_text(self, element_name, attributes=None, root=None): + element_node = self.get_optional_child( + name=element_name, attributes=attributes, root=root + ) + if element_node is not None: + return self.text(element_node) + return None + + def set_element_text(self, element_name, new_text, attributes=None, root=None): + element_node = self.get_optional_child( + name=element_name, attributes=attributes, root=root + ) + if element_node is not None: + self.set_text(element_node, new_text) + return new_text + return None + + def to_string(self, node, method="xml", encoding="us-ascii"): + return ET.tostring(node.xml_element, method=method, encoding=encoding) + + # + # API for operations over the entire file + # + + def get_version(self): + version = self.get(self.root, "version") + version = 1.0 if version is None else float(version) + return version + + def check_timestamp(self): + """ + Returns True if timestamp matches what is expected + """ + timestamp_cache = self._FILEMAP[self.filename].modtime + if timestamp_cache != 0.0: + timestamp_file = os.path.getmtime(self.filename) + return timestamp_file == timestamp_cache + else: + return True + + def validate_timestamp(self): + timestamp_ok = self.check_timestamp() + expect( + timestamp_ok, + "File {} appears to have changed without a corresponding invalidation.".format( + self.filename + ), + ) + + def write(self, outfile=None, force_write=False): + """ + Write an xml file from data in self + """ + if not (self.needsrewrite or force_write): + return + + self.validate_timestamp() + + if outfile is None: + outfile = self.filename + + logger.debug("write: " + outfile) + + xmlstr = self.get_raw_record() + + # xmllint provides a better format option for the output file + xmllint = which("xmllint") + + if xmllint: + if isinstance(outfile, str): + run_cmd_no_fail( + "{} --format --output {} -".format(xmllint, outfile), + input_str=xmlstr, + ) + else: + outfile.write( + run_cmd_no_fail("{} --format -".format(xmllint), input_str=xmlstr) + ) + + else: + with open(outfile, "w") as xmlout: + xmlout.write(xmlstr) + + self._FILEMAP[self.filename] = self.CacheEntry( + self.tree, self.root, os.path.getmtime(self.filename) + ) + + self.needsrewrite = False + + def scan_child(self, nodename, attributes=None, root=None): + """ + Get an xml element matching nodename with optional attributes. + + Error unless exactly one match. + """ + + nodes = self.scan_children(nodename, attributes=attributes, root=root) + + expect( + len(nodes) == 1, + "Incorrect number of matches, {:d}, for nodename '{}' and attrs '{}' in file '{}'".format( + len(nodes), nodename, attributes, self.filename + ), + ) + return nodes[0] + + def scan_optional_child(self, nodename, attributes=None, root=None): + """ + Get an xml element matching nodename with optional attributes. + + Return None if no match. + """ + nodes = self.scan_children(nodename, attributes=attributes, root=root) + + expect( + len(nodes) <= 1, + "Multiple matches for nodename '{}' and attrs '{}' in file '{}', found {} matches".format( + nodename, attributes, self.filename, len(nodes) + ), + ) + return nodes[0] if nodes else None + + def scan_children(self, nodename, attributes=None, root=None): + + logger.debug( + "(get_nodes) Input values: {}, {}, {}, {}".format( + self.__class__.__name__, nodename, attributes, root + ) + ) + + if root is None: + root = self.root + nodes = [] + + namespace = {"xi": "http://www.w3.org/2001/XInclude"} + + xpath = ".//" + (nodename if nodename else "") + + if attributes: + # xml.etree has limited support for xpath and does not allow more than + # one attribute in an xpath query so we query seperately for each attribute + # and create a result with the intersection of those lists + + for key, value in attributes.items(): + if value is None: + xpath = ".//{}[@{}]".format(nodename, key) + else: + xpath = ".//{}[@{}='{}']".format(nodename, key, value) + + logger.debug("xpath is {}".format(xpath)) + + try: + newnodes = root.xml_element.findall(xpath, namespace) + except Exception as e: + expect( + False, "Bad xpath search term '{}', error: {}".format(xpath, e) + ) + + if not nodes: + nodes = newnodes + else: + for node in nodes[:]: + if node not in newnodes: + nodes.remove(node) + if not nodes: + return [] + + else: + logger.debug("xpath: {}".format(xpath)) + nodes = root.xml_element.findall(xpath, namespace) + + logger.debug("Returning {} nodes ({})".format(len(nodes), nodes)) + + return [_Element(node) for node in nodes] + + def get_value( + self, item, attribute=None, resolved=True, subgroup=None + ): # pylint: disable=unused-argument + """ + get_value is expected to be defined by the derived classes, if you get here + the value was not found in the class. + """ + logger.debug("Get Value for " + item) + return None + + def get_values( + self, vid, attribute=None, resolved=True, subgroup=None + ): # pylint: disable=unused-argument + logger.debug("Get Values for " + vid) + return [] + + def set_value( + self, vid, value, subgroup=None, ignore_type=True + ): # pylint: disable=unused-argument + """ + ignore_type is not used in this flavor + """ + valnodes = self.get_children(vid) + for node in valnodes: + self.set_text(node, value) + + return value if valnodes else None + + def get_resolved_value( + self, raw_value, allow_unresolved_envvars=False, subgroup=None + ): + """ + A value in the xml file may contain references to other xml + variables or to environment variables. These are refered to in + the perl style with $name and $ENV{name}. + + >>> obj = GenericXML() + >>> os.environ["FOO"] = "BAR" + >>> os.environ["BAZ"] = "BARF" + >>> obj.get_resolved_value("one $ENV{FOO} two $ENV{BAZ} three") + 'one BAR two BARF three' + >>> obj.get_resolved_value("2 + 3 - 1") + '4' + >>> obj.get_resolved_value("0001-01-01") + '0001-01-01' + >>> obj.get_resolved_value("$SHELL{echo hi}") == 'hi' + True + """ + logger.debug("raw_value {}".format(raw_value)) + reference_re = re.compile(r"\${?(\w+)}?") + env_ref_re = re.compile(r"\$ENV\{(\w+)\}") + shell_ref_re = re.compile(r"\$SHELL\{([^}]+)\}") + math_re = re.compile(r"\s[+-/*]\s") + item_data = raw_value + + if item_data is None: + return None + + if not isinstance(item_data, str): + return item_data + + for m in env_ref_re.finditer(item_data): + logger.debug("look for {} in env".format(item_data)) + env_var = m.groups()[0] + env_var_exists = env_var in os.environ + if not allow_unresolved_envvars: + expect(env_var_exists, "Undefined env var '{}'".format(env_var)) + if env_var_exists: + item_data = item_data.replace(m.group(), os.environ[env_var]) + + for s in shell_ref_re.finditer(item_data): + logger.debug("execute {} in shell".format(item_data)) + shell_cmd = s.groups()[0] + item_data = item_data.replace(s.group(), run_cmd_no_fail(shell_cmd)) + + for m in reference_re.finditer(item_data): + var = m.groups()[0] + logger.debug("find: {}".format(var)) + # The overridden versions of this method do not simply return None + # so the pylint should not be flagging this + # pylint: disable=assignment-from-none + ref = self.get_value(var, subgroup=subgroup) + + if ref is not None: + logger.debug("resolve: " + str(ref)) + item_data = item_data.replace( + m.group(), self.get_resolved_value(str(ref)) + ) + elif var == "CIMEROOT": + cimeroot = get_cime_root() + item_data = item_data.replace(m.group(), cimeroot) + elif var == "SRCROOT": + srcroot = get_src_root() + item_data = item_data.replace(m.group(), srcroot) + elif var == "USER": + item_data = item_data.replace(m.group(), getpass.getuser()) + + if math_re.search(item_data): + try: + tmp = eval(item_data) + except Exception: + tmp = item_data + item_data = str(tmp) + + return item_data + + def validate_xml_file(self, filename, schema): + """ + validate an XML file against a provided schema file using pylint + """ + expect( + filename and os.path.isfile(filename), + "xml file not found {}".format(filename), + ) + expect( + schema and os.path.isfile(schema), "schema file not found {}".format(schema) + ) + xmllint = which("xmllint") + + expect( + xmllint and os.path.isfile(xmllint), + " xmllint not found in PATH, xmllint is required for cime. PATH={}".format( + os.environ["PATH"] + ), + ) + + logger.debug("Checking file {} against schema {}".format(filename, schema)) + run_cmd_no_fail( + "{} --xinclude --noout --schema {} {}".format(xmllint, schema, filename) + ) + + def get_raw_record(self, root=None): + logger.debug("writing file {}".format(self.filename)) + if root is None: + root = self.root + try: + xmlstr = ET.tostring(root.xml_element) + except ET.ParseError as e: + ET.dump(root.xml_element) + expect( + False, + "Could not write file {}, xml formatting error '{}'".format( + self.filename, e + ), + ) + return xmlstr + + def get_id(self): + xmlid = self.get(self.root, "id") + if xmlid is not None: + return xmlid + return self.name(self.root) diff --git a/CIME/XML/grids.py b/CIME/XML/grids.py new file mode 100644 index 00000000000..60bc1ff3d02 --- /dev/null +++ b/CIME/XML/grids.py @@ -0,0 +1,894 @@ +""" +Common interface to XML files which follow the grids format, +This is not an abstract class - but inherits from the abstact class GenericXML +""" + +from collections import OrderedDict +from CIME.XML.standard_module_setup import * +from CIME.XML.files import Files +from CIME.XML.generic_xml import GenericXML + +logger = logging.getLogger(__name__) + +# Separator character for multiple grids within a single component (currently just used +# for GLC when there are multiple ice sheet grids). It is important that this character +# NOT appear in any file names - or anywhere in the path of directories holding input +# data. +GRID_SEP = ":" + +# elements of a valid grid long name +grid_prefix = { + "atm": "a%", + "lnd": "l%", + "ocnice": "oi%", + "rof": "r%", + "wav": "w%", + "glc": "g%", + "mask": "m%", + "iac": "z%", +} + + +class Grids(GenericXML): + def __init__(self, infile=None, files=None, comp_interface=None): + if files is None: + files = Files(comp_interface=comp_interface) + if infile is None: + infile = files.get_value("GRIDS_SPEC_FILE") + logger.debug(" Grid specification file is {}".format(infile)) + schema = files.get_schema("GRIDS_SPEC_FILE") + expect( + os.path.isfile(infile) and os.access(infile, os.R_OK), + f" grid file not found {infile}", + ) + try: + GenericXML.__init__(self, infile, schema) + except: + # Getting false failures on izumi, change this to a warning + logger.warning("Schema validity test fails for {}".format(infile)) + + self._version = self.get_version() + self._comp_gridnames = self._get_grid_names() + + def _get_grid_names(self): + grids = self.get_child("grids") + model_grid_defaults = self.get_child("model_grid_defaults", root=grids) + nodes = self.get_children("grid", root=model_grid_defaults) + gridnames = [] + for node in nodes: + gn = self.get(node, "name") + if gn not in gridnames: + gridnames.append(gn) + if "mask" not in gridnames: + gridnames.append("mask") + + return gridnames + + def get_grid_info(self, name, compset, driver): + """ + Find the matching grid node + + Returns a dictionary containing relevant grid variables: domains, gridmaps, etc. + """ + gridinfo = {} + atmnlev = None + lndnlev = None + + # mechanism to specify atm levels + atmlevregex = re.compile(r"([^_]+)z(\d+)(.*)$") + levmatch = re.match(atmlevregex, name) + if levmatch: + atmnlev = levmatch.group(2) + name = levmatch.group(1) + levmatch.group(3) + + # mechanism to specify lnd levels + lndlevregex = re.compile(r"(.*_)([^_]+)z(\d+)(_[^m].*)$") + levmatch = re.match(lndlevregex, name) + if levmatch: + lndnlev = levmatch.group(3) + name = levmatch.group(1) + levmatch.group(2) + levmatch.group(4) + + # determine component_grids dictionary and grid longname + if self._valid_lname(name): + lname = name + else: + lname = self._read_config_grids(name, compset, atmnlev, lndnlev) + + gridinfo["GRID"] = lname + component_grids = _ComponentGrids(lname) + + # determine domains given component_grids + domains = self._get_domains(component_grids, atmlevregex, lndlevregex, driver) + + gridinfo.update(domains) + + # determine gridmaps given component_grids + gridmaps = self._get_gridmaps(component_grids, driver, compset) + gridinfo.update(gridmaps) + + component_grids.check_num_elements(gridinfo) + + return gridinfo + + def _valid_lname(self, name): + """ + check if the grid long name is valid + """ + valid = True + for comp in self._comp_gridnames: + if not (grid_prefix[comp] in name): + valid = False + break + return valid + + def _read_config_grids(self, name, compset, atmnlev=None, lndnlev=None): + """ + read config_grids.xml with version 2.0 schema + + Returns a grid long name given the alias ('name' argument) + """ + grids_node = self.get_child("grids") + + # (2)loop over all of the "model grid" nodes and determine is there an alias match with the + # input grid name - if there is an alias match determine if the "compset" and "not_compset" + # regular expression attributes match the match the input compset + + model_gridnodes = self.get_children("model_grid", root=grids_node) + model_gridnode = None + foundalias = False + for node in model_gridnodes: + alias = self.get(node, "alias") + if alias == name: + foundalias = True + foundcompset = False + compset_attrib = self.get(node, "compset") + not_compset_attrib = self.get(node, "not_compset") + if compset_attrib and not_compset_attrib: + compset_match = re.search(compset_attrib, compset) + not_compset_match = re.search(not_compset_attrib, compset) + if compset_match is not None and not_compset_match is None: + foundcompset = True + model_gridnode = node + logger.debug( + "Found match for {} with compset_match {} and not_compset_match {}".format( + alias, compset_attrib, not_compset_attrib + ) + ) + break + elif compset_attrib: + compset_match = re.search(compset_attrib, compset) + if compset_match is not None: + foundcompset = True + model_gridnode = node + logger.debug( + "Found match for {} with compset_match {}".format( + alias, compset_attrib + ) + ) + break + elif not_compset_attrib: + not_compset_match = re.search(not_compset_attrib, compset) + if not_compset_match is None: + foundcompset = True + model_gridnode = node + logger.debug( + "Found match for {} with not_compset_match {}".format( + alias, not_compset_attrib + ) + ) + break + else: + foundcompset = True + model_gridnode = node + logger.debug("Found match for {}".format(alias)) + break + expect(foundalias, "no alias {} defined".format(name)) + # if no match is found in config_grids.xml - exit + expect( + foundcompset, "grid alias {} not valid for compset {}".format(name, compset) + ) + + return self.get_grid_longname( + grids_node, model_gridnode, compset, atmnlev, lndnlev + ) + + def get_grid_longname( + self, grids_node, model_gridnode, compset=None, atmnlev=None, lndnlev=None + ): + model_grid = {} + + for comp_gridname in self._comp_gridnames: + model_grid[comp_gridname] = None + + if compset is not None: + grid_defaults_node = self.get_child("model_grid_defaults", root=grids_node) + + for grid_node in self.get_children("grid", root=grid_defaults_node): + name_attrib = self.get(grid_node, "name") + compset_attrib = self.get(grid_node, "compset") + compset_match = re.search(compset_attrib, compset) + + if compset_match is not None: + model_grid[name_attrib] = self.text(grid_node) + + grid_nodes = self.get_children("grid", root=model_gridnode) + + for grid_node in grid_nodes: + name = self.get(grid_node, "name") + value = self.text(grid_node) + + if model_grid[name] != "null": + model_grid[name] = value + + mask_node = self.get_optional_child("mask", root=model_gridnode) + + if mask_node is not None: + model_grid["mask"] = self.text(mask_node) + else: + model_grid["mask"] = model_grid["ocnice"] + + lname = "" + + for component_gridname in self._comp_gridnames: + if lname: + lname = lname + "_" + grid_prefix[component_gridname] + else: + lname = grid_prefix[component_gridname] + + if model_grid[component_gridname] is not None: + lname += model_grid[component_gridname] + + if component_gridname == "atm" and atmnlev is not None: + if not ("a{:n}ull" in lname): + lname += "z" + atmnlev + elif component_gridname == "lnd" and lndnlev is not None: + if not ("l{:n}ull" in lname): + lname += "z" + lndnlev + else: + lname += "null" + + return lname + + def _get_domains(self, component_grids, atmlevregex, lndlevregex, driver): + """determine domains dictionary for config_grids.xml v2 schema""" + domains = {} + mask_name = component_grids.get_comp_gridname("mask") + + for comp_name in component_grids.get_compnames(include_mask=True): + for grid_name in component_grids.get_comp_gridlist(comp_name): + # Determine grid name with no nlev suffix if there is one + grid_name_nonlev = grid_name + levmatch = re.match(atmlevregex, grid_name) + if levmatch: + grid_name_nonlev = levmatch.group(1) + levmatch.group(3) + levmatch = re.match(lndlevregex, grid_name) + if levmatch: + grid_name_nonlev = ( + levmatch.group(1) + levmatch.group(2) + levmatch.group(4) + ) + self._get_domains_for_one_grid( + domains=domains, + comp_name=comp_name.upper(), + grid_name=grid_name, + grid_name_nonlev=grid_name_nonlev, + mask_name=mask_name, + driver=driver, + ) + + if driver == "nuopc": + # Obtain the root node for the domain entry that sets the mask + if domains["MASK_GRID"] != "null": + mask_domain_node = self.get_optional_child( + "domain", + attributes={"name": domains["MASK_GRID"]}, + root=self.get_child("domains"), + ) + # Now obtain the mesh for the mask for the domain node for that component grid + mesh_node = self.get_child("mesh", root=mask_domain_node) + domains["MASK_MESH"] = self.text(mesh_node) + + return domains + + def _get_domains_for_one_grid( + self, domains, comp_name, grid_name, grid_name_nonlev, mask_name, driver + ): + """Get domain information for the given grid, adding elements to the domains dictionary + + Args: + - domains: dictionary of values, modified in place + - comp_name: uppercase abbreviated name of component (e.g., "ATM") + - grid_name: name of this grid + - grid_name_nonlev: same as grid_name but with any level information stripped out + - mask_name: the mask being used in this case + - driver: the name of the driver being used in this case + """ + domain_node = self.get_optional_child( + "domain", + attributes={"name": grid_name_nonlev}, + root=self.get_child("domains"), + ) + if not domain_node: + domain_root = self.get_optional_child("domains", {"driver": driver}) + if domain_root: + domain_node = self.get_optional_child( + "domain", attributes={"name": grid_name_nonlev}, root=domain_root + ) + if domain_node: + # determine xml variable name + if not "PTS_LAT" in domains: + domains["PTS_LAT"] = "-999.99" + if not "PTS_LON" in domains: + domains["PTS_LON"] = "-999.99" + if not comp_name == "MASK": + if self.get_element_text("nx", root=domain_node): + # If there are multiple grids for this component, then the component + # _NX and _NY values won't end up being used, so we simply set them to 1 + _add_grid_info( + domains, + comp_name + "_NX", + int(self.get_element_text("nx", root=domain_node)), + value_for_multiple=1, + ) + _add_grid_info( + domains, + comp_name + "_NY", + int(self.get_element_text("ny", root=domain_node)), + value_for_multiple=1, + ) + elif self.get_element_text("lon", root=domain_node): + # No need to call _add_grid_info here because, for multiple grids, the + # end result will be the same as the hard-coded 1 used here + domains[comp_name + "_NX"] = 1 + domains[comp_name + "_NY"] = 1 + domains["PTS_LAT"] = self.get_element_text("lat", root=domain_node) + domains["PTS_LON"] = self.get_element_text("lon", root=domain_node) + else: + # No need to call _add_grid_info here because, for multiple grids, the + # end result will be the same as the hard-coded 1 used here + domains[comp_name + "_NX"] = 1 + domains[comp_name + "_NY"] = 1 + + if driver == "mct" or driver == "moab": + # mct + file_nodes = self.get_children("file", root=domain_node) + domain_file = "" + for file_node in file_nodes: + grid_attrib = self.get(file_node, "grid") + mask_attrib = self.get(file_node, "mask") + if grid_attrib is not None and mask_attrib is not None: + grid_match = re.search(comp_name.lower(), grid_attrib) + mask_match = False + if mask_name is not None: + mask_match = mask_name == mask_attrib + if grid_match is not None and mask_match: + domain_file = self.text(file_node) + elif grid_attrib is not None: + grid_match = re.search(comp_name.lower(), grid_attrib) + if grid_match is not None: + domain_file = self.text(file_node) + elif mask_attrib is not None: + mask_match = mask_name == mask_attrib + if mask_match: + domain_file = self.text(file_node) + if domain_file: + _add_grid_info( + domains, + comp_name + "_DOMAIN_FILE", + os.path.basename(domain_file), + ) + path = os.path.dirname(domain_file) + if len(path) > 0: + _add_grid_info(domains, comp_name + "_DOMAIN_PATH", path) + + if driver == "nuopc": + if not comp_name == "MASK": + mesh_nodes = self.get_children("mesh", root=domain_node) + mesh_file = "" + for mesh_node in mesh_nodes: + mesh_file = self.text(mesh_node) + if mesh_file: + _add_grid_info(domains, comp_name + "_DOMAIN_MESH", mesh_file) + if comp_name == "LND" or comp_name == "ATM": + # Note: ONLY want to define PTS_DOMAINFILE for land and ATM + file_node = self.get_optional_child("file", root=domain_node) + if file_node is not None and self.text(file_node) != "unset": + domains["PTS_DOMAINFILE"] = self.text(file_node) + # set up dictionary of domain files for every component + _add_grid_info(domains, comp_name + "_GRID", grid_name) + + def _get_gridmaps(self, component_grids, driver, compset): + """Set all mapping files for config_grids.xml v2 schema + + If a component (e.g., GLC) has multiple grids, then each mapping file variable for + that component will be a colon-delimited list with the appropriate number of + elements. + + If a given gridmap is required but not given explicitly, then its value will be + either "unset" or "idmap". Even in the case of a component with multiple grids + (e.g., GLC), there will only be a single "unset" or "idmap" value. (We do not + currently handle the possibility that some grids will have an "idmap" value while + others have an explicit mapping file. So it is currently an error for "idmap" to + appear in a mapping file variable for a component with multiple grids; this will + be checked elsewhere.) + + """ + gridmaps = {} + + # (1) determine values of gridmaps for target grid + # + # Exclude the ice component from the list of compnames because it is assumed to be + # on the same grid as ocn, so doesn't have any gridmaps of its own + compnames = component_grids.get_compnames( + include_mask=False, exclude_comps=["ice"] + ) + for idx, compname in enumerate(compnames): + for other_compname in compnames[idx + 1 :]: + for gridvalue in component_grids.get_comp_gridlist(compname): + for other_gridvalue in component_grids.get_comp_gridlist( + other_compname + ): + self._get_gridmaps_for_one_grid_pair( + gridmaps=gridmaps, + driver=driver, + compname=compname, + other_compname=other_compname, + gridvalue=gridvalue, + other_gridvalue=other_gridvalue, + ) + + # (2) set all possibly required gridmaps to 'idmap' for mct and 'unset/idmap' for + # nuopc, if they aren't already set + required_gridmaps_node = self.get_child("required_gridmaps") + tmp_gridmap_nodes = self.get_children( + "required_gridmap", root=required_gridmaps_node + ) + required_gridmap_nodes = [] + for node in tmp_gridmap_nodes: + compset_att = self.get(node, "compset") + not_compset_att = self.get(node, "not_compset") + if ( + compset_att + and not compset_att in compset + or not_compset_att + and not_compset_att in compset + ): + continue + required_gridmap_nodes.append(node) + mapname = self.text(node) + if mapname not in gridmaps: + gridmaps[mapname] = _get_unset_gridmap_value( + mapname, component_grids, driver + ) + + # (3) check that all necessary maps are not set to idmap + # + # NOTE(wjs, 2021-05-18) This could probably be combined with the above loop, but + # I'm avoiding making that change now due to fear of breaking this complex logic + # that isn't covered by unit tests. + atm_gridvalue = component_grids.get_comp_gridname("atm") + for node in required_gridmap_nodes: + comp1_name = _strip_grid_from_name(self.get(node, "grid1")) + comp2_name = _strip_grid_from_name(self.get(node, "grid2")) + grid1_value = component_grids.get_comp_gridname(comp1_name) + grid2_value = component_grids.get_comp_gridname(comp2_name) + if grid1_value is not None and grid2_value is not None: + if ( + grid1_value != grid2_value + and grid1_value != "null" + and grid2_value != "null" + ): + map_ = gridmaps[self.text(node)] + if map_ == "idmap": + if comp1_name == "ocn" and grid1_value == atm_gridvalue: + logger.debug( + "ocn_grid == atm_grid so this is not an idmap error" + ) + else: + if driver == "nuopc": + gridmaps[self.text(node)] = "unset" + else: + logger.warning( + "Warning: missing non-idmap {} for {}, {} and {} {} ".format( + self.text(node), + comp1_name, + grid1_value, + comp2_name, + grid2_value, + ) + ) + + return gridmaps + + def _get_gridmaps_for_one_grid_pair( + self, gridmaps, driver, compname, other_compname, gridvalue, other_gridvalue + ): + """Get gridmap information for one pair of grids, adding elements to the gridmaps dictionary + + Args: + - gridmaps: dictionary of values, modified in place + - driver: the name of the driver being used in this case + - compname: abbreviated name of component (e.g., "atm") + - other_compname: abbreviated name of other component (e.g., "ocn") + - gridvalue: name of grid for compname + - other_gridvalue: name of grid for other_compname + """ + gridmaps_roots = self.get_children("gridmaps") + gridmap_nodes = [] + for root in gridmaps_roots: + gmdriver = self.get(root, "driver") + if gmdriver is None or gmdriver == driver: + gridname = compname + "_grid" + other_gridname = other_compname + "_grid" + gridmap_nodes.extend( + self.get_children( + "gridmap", + root=root, + attributes={ + gridname: gridvalue, + other_gridname: other_gridvalue, + }, + ) + ) + + # We first create a dictionary of gridmaps just for this pair of grids, then later + # add these grids to the main gridmaps dict using _add_grid_info. The reason for + # doing this in two steps, using the intermediate these_gridmaps variable, is: If + # there are multiple definitions of a given gridmap for a given grid pair, we just + # want to use one of them, rather than adding them all to the final gridmaps dict. + # (This may not occur in practice, but the logic allowed for this possibility + # before extending it to handle multiple grids for a given component, so we are + # leaving this possibility in place.) + these_gridmaps = {} + for gridmap_node in gridmap_nodes: + expect( + len(self.attrib(gridmap_node)) == 2, + " Bad attribute count in gridmap node %s" % self.attrib(gridmap_node), + ) + map_nodes = self.get_children("map", root=gridmap_node) + for map_node in map_nodes: + name = self.get(map_node, "name") + value = self.text(map_node) + if name is not None and value is not None: + these_gridmaps[name] = value + logger.debug(" gridmap name,value are {}: {}".format(name, value)) + + for name, value in these_gridmaps.items(): + _add_grid_info(gridmaps, name, value) + + def print_values(self, long=False): + # write out help message + helptext = self.get_element_text("help") + logger.info("{} ".format(helptext)) + + logger.info( + "{:5s}-------------------------------------------------------------".format( + "" + ) + ) + logger.info("{:10s} default component grids:\n".format("")) + logger.info(" component compset value ") + logger.info( + "{:5s}-------------------------------------------------------------".format( + "" + ) + ) + default_nodes = self.get_children( + "model_grid_defaults", root=self.get_child("grids") + ) + for default_node in default_nodes: + grid_nodes = self.get_children("grid", root=default_node) + for grid_node in grid_nodes: + name = self.get(grid_node, "name") + compset = self.get(grid_node, "compset") + value = self.text(grid_node) + logger.info(" {:6s} {:15s} {:10s}".format(name, compset, value)) + logger.info( + "{:5s}-------------------------------------------------------------".format( + "" + ) + ) + + domains = {} + if long: + domain_nodes = self.get_children("domain", root=self.get_child("domains")) + for domain_node in domain_nodes: + name = self.get(domain_node, "name") + if name == "null": + continue + desc = self.text(self.get_child("desc", root=domain_node)) + files = "" + file_nodes = self.get_children("file", root=domain_node) + for file_node in file_nodes: + filename = self.text(file_node) + mask_attrib = self.get(file_node, "mask") + grid_attrib = self.get(file_node, "grid") + files += "\n " + filename + if mask_attrib or grid_attrib: + files += " (only for" + if mask_attrib: + files += " mask: " + mask_attrib + if grid_attrib: + files += " grid match: " + grid_attrib + if mask_attrib or grid_attrib: + files += ")" + domains[name] = "\n {} with domain file(s): {} ".format( + desc, files + ) + + grids_node = self.get_child("grids") + model_grid_nodes = self.get_children("model_grid", root=grids_node) + for model_grid_node in model_grid_nodes: + alias = self.get(model_grid_node, "alias") + compset = self.get(model_grid_node, "compset") + not_compset = self.get(model_grid_node, "not_compset") + restriction = "" + if compset: + restriction += "only for compsets that are {} ".format(compset) + if not_compset: + restriction += "only for compsets that are not {} ".format(not_compset) + if restriction: + logger.info("\n alias: {} ({})".format(alias, restriction)) + else: + logger.info("\n alias: {}".format(alias)) + grid_nodes = self.get_children("grid", root=model_grid_node) + grids = "" + gridnames = [] + lname = self.get_grid_longname(grids_node, model_grid_node) + logger.info("\n{:<7}longname: {}".format(" ", lname)) + for grid_node in grid_nodes: + gridnames.append(self.text(grid_node)) + grids += self.get(grid_node, "name") + ":" + self.text(grid_node) + " " + logger.info(" non-default grids are: {}".format(grids)) + mask_nodes = self.get_children("mask", root=model_grid_node) + for mask_node in mask_nodes: + logger.info(" mask is: {}".format(self.text(mask_node))) + if long: + gridnames = set(gridnames) + for gridname in gridnames: + if gridname != "null": + try: + logger.info(" {}".format(domains[gridname])) + except KeyError: + logger.info( + " Could not provide domains for gridname {!r}".format( + gridname + ) + ) + + +# ------------------------------------------------------------------------ +# Helper class: _ComponentGrids +# ------------------------------------------------------------------------ + + +class _ComponentGrids(object): + """This class stores the grid names for each component and allows retrieval in a variety + of formats + + """ + + # Mappings from component names to the single characters used in the grid long name. + # Ordering is potentially important here, because it will determine the order in the + # list returned by get_compnames, which will in turn impact ordering of components in + # iterations. + # + # TODO: this should be in XML, not here + _COMP_NAMES = OrderedDict( + [ + ("atm", "a"), + ("lnd", "l"), + ("ocn", "o"), + ("ice", "i"), + ("rof", "r"), + ("glc", "g"), + ("wav", "w"), + ("iac", "z"), + ("mask", "m"), + ] + ) + + def __init__(self, grid_longname): + self._comp_gridnames = self._get_component_grids_from_longname(grid_longname) + + def _get_component_grids_from_longname(self, name): + """Return a dictionary mapping each compname to its gridname""" + grid_re = re.compile(r"[_]{0,1}[a-z]{1,2}%") + grids = grid_re.split(name)[1:] + prefixes = re.findall("[a-z]+%", name) + component_grids = {} + i = 0 + while i < len(grids): + # In the following, [:-1] strips the trailing '%' + prefix = prefixes[i][:-1] + grid = grids[i] + component_grids[prefix] = grid + i += 1 + component_grids["i"] = component_grids["oi"] + component_grids["o"] = component_grids["oi"] + del component_grids["oi"] + + result = {} + for compname, prefix in self._COMP_NAMES.items(): + result[compname] = component_grids[prefix] + return result + + def get_compnames(self, include_mask=True, exclude_comps=None): + """Return a list of all component names (lower case) + + This can be used for iterating through the grid names + + If include_mask is True (the default), then 'mask' is included in the list of + returned component names. + + If exclude_comps is given, then it should be a list of component names to exclude + from the returned list. For example, if it is ['ice', 'rof'], then 'ice' and 'rof' + are NOT included in the returned list. + + """ + if exclude_comps is None: + all_exclude_comps = [] + else: + all_exclude_comps = exclude_comps + if not include_mask: + all_exclude_comps.append("mask") + result = [k for k in self._COMP_NAMES if k not in all_exclude_comps] + return result + + def get_comp_gridname(self, compname): + """Return the grid name for the given component name""" + return self._comp_gridnames[compname] + + def get_comp_gridlist(self, compname): + """Return a list of individual grids for the given component name + + Usually this list has only a single grid (so the return value will be a + single-element list like ["0.9x1.25"]). However, the glc component (glc) can have + multiple grids, separated by GRID_SEP. In this situation, the return value for + GLC will have multiple elements. + + """ + gridname = self.get_comp_gridname(compname) + return gridname.split(GRID_SEP) + + def get_comp_numgrids(self, compname): + """Return the number of grids for the given component name + + Usually this is one, but the glc component can have multiple grids. + """ + return len(self.get_comp_gridlist(compname)) + + def get_gridmap_total_nmaps(self, gridmap_name): + """Given a gridmap_name like ATM2OCN_FMAPNAME, return the total number of maps needed between the two components + + In most cases, this will be 1, but if either or both components has multiple grids, + then this will be the product of the number of grids for each component. + + """ + comp1_name, comp2_name = _get_compnames_from_mapname(gridmap_name) + comp1_ngrids = self.get_comp_numgrids(comp1_name) + comp2_ngrids = self.get_comp_numgrids(comp2_name) + total_nmaps = comp1_ngrids * comp2_ngrids + return total_nmaps + + def check_num_elements(self, gridinfo): + """Check each member of gridinfo to make sure that it has the correct number of elements + + gridinfo is a dict mapping variable names to their values + + """ + for compname in self.get_compnames(include_mask=False): + for name, value in gridinfo.items(): + if not isinstance(value, str): + # Non-string values only hold a single element, regardless of how many + # grids there are for a component. This is enforced in _add_grid_info + # by requiring value_for_multiple to be provided for non-string + # values. For now, it is *only* those non-string values that only + # carry a single element regardless of the number of grids. If, in the + # future, other variables are added with this property, then this + # logic would need to be extended to skip those variables as well. + # (This could be done by hard-coding some suffixes to skip here. A + # better alternative could be to do away with the value_for_multiple + # argument in _add_grid_info, instead setting a module-level + # dictionary mapping suffixes to their value_for_multiple, and + # referencing that dictionary in both _add_grid_info and here. For + # example: _VALUE_FOR_MULTIPLE = {'_NX': 1, '_NY': 1, '_FOO': 'bar'}.) + continue + name_lower = name.lower() + if name_lower.startswith(compname): + if name_lower.startswith(compname + "_"): + expected_num_elements = self.get_comp_numgrids(compname) + elif name_lower.startswith(compname + "2"): + expected_num_elements = self.get_gridmap_total_nmaps(name) + else: + # We don't know what to expect if the character after compname is + # neither "_" nor "2" + continue + if value.lower() == "unset": + # It's okay for there to be a single "unset" value even for a + # component with multiple grids + continue + num_elements = len(value.split(GRID_SEP)) + expect( + num_elements == expected_num_elements, + "Unexpected number of colon-delimited elements in {}: {} (expected {} elements)".format( + name, value, expected_num_elements + ), + ) + + +# ------------------------------------------------------------------------ +# Some helper functions +# ------------------------------------------------------------------------ + + +def _get_compnames_from_mapname(mapname): + """Given a mapname like ATM2OCN_FMAPNAME, return the two component names + + The returned component names are lowercase. So, for example, if mapname is + ATM2OCN_FMAPNAME, then this function returns a tuple ('atm', 'ocn') + + """ + comp1_name = mapname[0:3].lower() + comp2_name = mapname[4:7].lower() + return comp1_name, comp2_name + + +def _strip_grid_from_name(name): + """Given some string 'name', strip trailing '_grid' from name and return result + + Raises an exception if 'name' doesn't end with '_grid' + """ + expect(name.endswith("_grid"), "{} does not end with _grid".format(name)) + return name[: -len("_grid")] + + +def _add_grid_info(info_dict, key, value, value_for_multiple=None): + """Add a value to info_dict, handling the possibility of multiple grids for a component + + In the basic case, where key is not yet present in info_dict, this is equivalent to + setting: + info_dict[key] = value + + However, if the given key is already present, then instead of overriding the old + value, we instead concatenate, separated by GRID_SEP. This is used in case there are + multiple grids for a given component. An exception to this behavior is: If + value_for_multiple is specified (not None) then, if we find an existing value, then we + instead replace the value with the value given by value_for_multiple. + + value_for_multiple must be specified if value is not a string + + """ + if not isinstance(value, str): + expect( + value_for_multiple is not None, + "_add_grid_info: value_for_multiple must be specified if value is not a string", + ) + if key in info_dict: + if value_for_multiple is not None: + info_dict[key] = value_for_multiple + else: + info_dict[key] += GRID_SEP + value + else: + info_dict[key] = value + + +def _get_unset_gridmap_value(mapname, component_grids, driver): + """Return the appropriate setting for a given gridmap that has not been explicitly set + + This will be 'unset' or 'idmap' depending on various parameters. + """ + if driver == "nuopc": + comp1_name, comp2_name = _get_compnames_from_mapname(mapname) + grid1 = component_grids.get_comp_gridname(comp1_name) + grid2 = component_grids.get_comp_gridname(comp2_name) + if grid1 == grid2: + if grid1 != "null" and grid2 != "null": + gridmap = "idmap" + else: + gridmap = "unset" + else: + gridmap = "unset" + else: + gridmap = "idmap" + + return gridmap diff --git a/CIME/XML/headers.py b/CIME/XML/headers.py new file mode 100644 index 00000000000..5937a1d03cb --- /dev/null +++ b/CIME/XML/headers.py @@ -0,0 +1,29 @@ +""" +Interface to the config_headers.xml file. This class inherits from EntryID.py +""" +from CIME.XML.standard_module_setup import * + +from CIME.XML.generic_xml import GenericXML +from CIME.XML.files import Files + +logger = logging.getLogger(__name__) + + +class Headers(GenericXML): + def __init__(self, infile=None): + """ + initialize an object + + >>> files = Files() + >>> files.get_value('CASEFILE_HEADERS',resolved=False) + '$CIMEROOT/CIME/data/config/config_headers.xml' + """ + if infile is None: + files = Files() + infile = files.get_value("CASEFILE_HEADERS", resolved=True) + super(Headers, self).__init__(infile) + + def get_header_node(self, fname): + fnode = self.get_child("file", attributes={"name": fname}) + headernode = self.get_child("header", root=fnode) + return headernode diff --git a/CIME/XML/inputdata.py b/CIME/XML/inputdata.py new file mode 100644 index 00000000000..18b71dca4dd --- /dev/null +++ b/CIME/XML/inputdata.py @@ -0,0 +1,75 @@ +""" +Interface to the config_inputdata.xml file. This class inherits from GenericXML.py +""" +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +from CIME.XML.files import Files +from CIME.utils import expect + +logger = logging.getLogger(__name__) + + +class Inputdata(GenericXML): + def __init__(self, infile=None, files=None): + """ + initialize a files object given input pes specification file + """ + if files is None: + files = Files() + if infile is None: + infile = files.get_value("INPUTDATA_SPEC_FILE") + schema = files.get_schema("INPUTDATA_SPEC_FILE") + logger.debug("DEBUG: infile is {}".format(infile)) + GenericXML.__init__(self, infile, schema=schema) + + self._servernode = None + + def get_next_server(self, attributes=None): + protocol = None + address = None + user = "" + passwd = "" + chksum_file = None + ic_filepath = None + servernodes = self.get_children("server", attributes=attributes) + + # inventory is a CSV list of available data files and the valid date for each + # expected format is pathtofile,YYYY-MM-DD HH:MM:SS + # currently only used for NEON tower data + inventory = None + if not attributes: + servernodes = [x for x in servernodes if not self.attrib(x)] + + if servernodes: + if self._servernode is None: + self._servernode = servernodes[0] + else: + prevserver = self._servernode + for i, node in enumerate(servernodes): + if self._servernode == node and len(servernodes) > i + 1: + self._servernode = servernodes[i + 1] + break + if prevserver is not None and self._servernode == prevserver: + self._servernode = None + + if self._servernode: + protocol = self.text(self.get_child("protocol", root=self._servernode)) + address = self.text(self.get_child("address", root=self._servernode)) + unode = self.get_optional_child("user", root=self._servernode) + if unode: + user = self.text(unode) + invnode = self.get_optional_child("inventory", root=self._servernode) + if invnode: + inventory = self.text(invnode) + + pnode = self.get_optional_child("password", root=self._servernode) + if pnode: + passwd = self.text(pnode) + csnode = self.get_optional_child("checksum", root=self._servernode) + if csnode: + chksum_file = self.text(csnode) + icnode = self.get_optional_child("ic_filepath", root=self._servernode) + if icnode: + ic_filepath = self.text(icnode) + + return protocol, address, user, passwd, chksum_file, ic_filepath, inventory diff --git a/CIME/XML/machines.py b/CIME/XML/machines.py new file mode 100644 index 00000000000..c40e73ba3b6 --- /dev/null +++ b/CIME/XML/machines.py @@ -0,0 +1,715 @@ +""" +Interface to the config_machines.xml file. This class inherits from GenericXML.py +""" + +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +from CIME.XML.files import Files +from CIME.utils import CIMEError, expect, convert_to_unknown_type, get_cime_config + +import re +import logging +import socket +from functools import partial +from pathlib import Path + +logger = logging.getLogger(__name__) + + +def match_value_by_attribute_regex(element, attribute_name, value): + """Checks element contains attribute whose pattern matches a value. + + If the element does not have the attribute it's considered a match. + + Args: + element (CIME.XML.generic_xml._Element): XML element to check attributes. + attribute_name (str): Name of attribute with regex value. + value (str): Value that is matched against attributes regex value. + + Returns: + bool: True if attribute regex matches the target value otherwise False. + """ + attribute_value = element.attrib.get(attribute_name, None) + + return ( + True + if value is None + or attribute_value is None + or re.match(attribute_value, value) is not None + else False + ) + + +class Machines(GenericXML): + def __init__( + self, + infile=None, + files=None, + machine=None, + extra_machines_dir=None, + read_only=True, + ): + """ + initialize an object + if a filename is provided it will be used, + otherwise if a files object is provided it will be used + otherwise create a files object from default values + + If extra_machines_dir is provided, it should be a string giving a path to an + additional directory that will be searched for a config_machines.xml file; if + found, the contents of this file will be appended to the standard + config_machines.xml. An empty string is treated the same as None. + + The schema variable can be passed as a path to an xsd schema file or a dictionary of paths + with version number as keys. + """ + + self.machine_node = None + self.machine = None + self.machines_dir = None + self.custom_settings = {} + self.extra_machines_dir = extra_machines_dir + + schema = None + checked_files = [] + if files is None: + files = Files() + if infile is None: + infile = files.get_value("MACHINES_SPEC_FILE") + + self.machines_dir = os.path.dirname(infile) + if os.path.exists(infile): + checked_files.append(infile) + else: + expect(False, f"file not found {infile}") + + schema = { + "3.0": files.get_schema( + "MACHINES_SPEC_FILE", attributes={"version": "3.0"} + ), + "2.0": files.get_schema( + "MACHINES_SPEC_FILE", attributes={"version": "2.0"} + ), + } + # Before v3 there was but one choice + if not schema["3.0"]: + schema = files.get_schema("MACHINES_SPEC_FILE") + + logger.debug("Verifying using schema {}".format(schema)) + + GenericXML.__init__(self, infile, schema, read_only=read_only) + + # Append the contents of $HOME/.cime/config_machines.xml if it exists. + # + # Also append the contents of a config_machines.xml file in the directory given by + # extra_machines_dir, if present. + # + # This could cause problems if node matches are repeated when only one is expected. + local_infile = os.path.join( + os.environ.get("HOME"), ".cime", "config_machines.xml" + ) + logger.debug("Infile: {}".format(local_infile)) + + if os.path.exists(local_infile): + GenericXML.read(self, local_infile, schema) + checked_files.append(local_infile) + + if extra_machines_dir: + local_infile = os.path.join(extra_machines_dir, "config_machines.xml") + logger.debug("Infile: {}".format(local_infile)) + if os.path.exists(local_infile): + GenericXML.read(self, local_infile, schema) + checked_files.append(local_infile) + + if machine is None: + if "CIME_MACHINE" in os.environ: + machine = os.environ["CIME_MACHINE"] + else: + cime_config = get_cime_config() + if cime_config.has_option("main", "machine"): + machine = cime_config.get("main", "machine") + if machine is None: + machine = self.probe_machine_name() + + expect( + machine is not None, + f"Could not initialize machine object from {', '.join(checked_files)}. This machine is not available for the target CIME_MODEL.", + ) + self.set_machine(machine, schema=schema) + + def get_child(self, name=None, attributes=None, root=None, err_msg=None): + if root is None: + root = self.machine_node + return super(Machines, self).get_child(name, attributes, root, err_msg) + + def get_machines_dir(self): + """ + Return the directory of the machines file + """ + return self.machines_dir + + def get_extra_machines_dir(self): + return self.extra_machines_dir + + def get_machine_name(self): + """ + Return the name of the machine + """ + return self.machine + + def get_node_names(self): + """ + Return the names of all the child nodes for the target machine + """ + nodes = self.get_children(root=self.machine_node) + node_names = [] + for node in nodes: + node_names.append(self.name(node)) + return node_names + + def get_first_child_nodes(self, nodename): + """ + Return the names of all the child nodes for the target machine + """ + nodes = self.get_children(nodename, root=self.machine_node) + return nodes + + def list_available_machines(self): + """ + Return a list of machines defined for a given CIME_MODEL + """ + machines = [] + nodes = self.get_children("machine") + for node in nodes: + mach = self.get(node, "MACH") + machines.append(mach) + if self.get_version() == 3.0: + machdirs = [ + os.path.basename(f.path) + for f in os.scandir(self.machines_dir) + if f.is_dir() + ] + machdirs.remove("cmake_macros") + machdirs.remove("userdefined_laptop_template") + for mach in machdirs: + if mach not in machines: + machines.append(mach) + + machines.sort() + return machines + + def probe_machine_name(self, warn=True): + """ + Find a matching regular expression for hostname + in the NODENAME_REGEX field in the file. First match wins. + """ + + names_not_found = [] + + nametomatch = socket.getfqdn() + + machine = self._probe_machine_name_one_guess(nametomatch) + + if machine is None: + names_not_found.append(nametomatch) + + nametomatch = socket.gethostname() + machine = self._probe_machine_name_one_guess(nametomatch) + + if machine is None: + names_not_found.append(nametomatch) + + names_not_found_quoted = ["'" + name + "'" for name in names_not_found] + names_not_found_str = " or ".join(names_not_found_quoted) + if warn: + logger.debug( + "Could not find machine match for {}".format( + names_not_found_str + ) + ) + + return machine + + def _probe_machine_name_one_guess(self, nametomatch): + """ + Find a matching regular expression for nametomatch in the NODENAME_REGEX + field in the file. First match wins. Returns None if no match is found. + """ + if self.get_version() < 3: + return self._probe_machine_name_one_guess_v2(nametomatch) + else: + return self._probe_machine_name_one_guess_v3(nametomatch) + + def _probe_machine_name_one_guess_v2(self, nametomatch): + + nodes = self.get_children("machine") + machine = None + for node in nodes: + machtocheck = self.get(node, "MACH") + logger.debug("machine is " + machtocheck) + regex_str_node = self.get_optional_child("NODENAME_REGEX", root=node) + regex_str = ( + machtocheck if regex_str_node is None else self.text(regex_str_node) + ) + + if regex_str is not None: + logger.debug("machine regex string is " + regex_str) + # an environment variable can be used + if regex_str.startswith("$ENV"): + machine_value = self.get_resolved_value( + regex_str, allow_unresolved_envvars=True + ) + if not machine_value.startswith("$ENV"): + try: + match, this_machine = machine_value.split(":") + except ValueError: + expect( + False, + "Bad formation of NODENAME_REGEX. Expected envvar:value, found {}".format( + regex_str + ), + ) + if match == this_machine: + machine = machtocheck + break + else: + regex = re.compile(regex_str) + if regex.match(nametomatch): + logger.debug( + "Found machine: {} matches {}".format( + machtocheck, nametomatch + ) + ) + machine = machtocheck + break + + return machine + + def _probe_machine_name_one_guess_v3(self, nametomatch): + + nodes = self.get_children("NODENAME_REGEX", root=self.root) + + children = [y for x in nodes for y in self.get_children(root=x)] + + machine = None + for child in children: + machtocheck = self.get(child, "MACH") + regex_str = self.text(child) + logger.debug( + "machine is {} regex {}, nametomatch {}".format( + machtocheck, regex_str, nametomatch + ) + ) + + if regex_str is not None: + # an environment variable can be used + if regex_str.startswith("$ENV"): + machine_value = self.get_resolved_value( + regex_str, allow_unresolved_envvars=True + ) + logger.debug("machine_value is {}".format(machine_value)) + if not machine_value.startswith("$ENV"): + try: + match, this_machine = machine_value.split(":") + except ValueError: + expect( + False, + "Bad formation of NODENAME_REGEX. Expected envvar:value, found {}".format( + regex_str + ), + ) + if match == this_machine: + machine = machtocheck + break + else: + regex = re.compile(regex_str) + if regex.match(nametomatch): + logger.debug( + "Found machine: {} matches {}".format( + machtocheck, nametomatch + ) + ) + machine = machtocheck + break + + return machine + + def set_machine(self, machine, schema=None): + """ + Sets the machine block in the Machines object + + >>> machobj = Machines(machine="melvin") + >>> machobj.get_machine_name() + 'melvin' + >>> machobj.set_machine("trump") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: No machine trump found + """ + if machine == "Query": + return machine + elif self.get_version() == 3: + machines_file = Path.home() / ".cime" / machine / "config_machines.xml" + + if machines_file.exists(): + GenericXML.read( + self, + machines_file, + schema=schema, + ) + else: + machines_file = ( + Path(self.machines_dir) / machine / "config_machines.xml" + ) + + if machines_file.exists(): + GenericXML.read( + self, + machines_file, + schema=schema, + ) + self.machine_node = super(Machines, self).get_child( + "machine", + {"MACH": machine}, + err_msg="No machine {} found".format(machine), + ) + + self.machine = machine + return machine + + # pylint: disable=arguments-differ + def get_value(self, name, attributes=None, resolved=True, subgroup=None): + """ + Get Value of fields in the config_machines.xml file + """ + if self.machine_node is None: + logger.debug("Machine object has no machine defined") + return None + + expect(subgroup is None, "This class does not support subgroups") + value = None + + if name in self.custom_settings: + return self.custom_settings[name] + + # COMPILER and MPILIB are special, if called without arguments they get the default value from the + # COMPILERS and MPILIBS lists in the file. + if name == "COMPILER": + value = self.get_default_compiler() + elif name == "MPILIB": + value = self.get_default_MPIlib(attributes) + else: + node = self.get_optional_child( + name, root=self.machine_node, attributes=attributes + ) + if node is not None: + value = self.text(node) + + if resolved: + if value is not None: + value = self.get_resolved_value(value) + elif name in os.environ: + value = os.environ[name] + + value = convert_to_unknown_type(value) + + return value + + def get_field_from_list(self, listname, reqval=None, attributes=None): + """ + Some of the fields have lists of valid values in the xml, parse these + lists and return the first value if reqval is not provided and reqval + if it is a valid setting for the machine + """ + expect(self.machine_node is not None, "Machine object has no machine defined") + supported_values = self.get_value(listname, attributes=attributes) + logger.debug( + "supported values for {} on {} is {}".format( + listname, self.machine, supported_values + ) + ) + # if no match with attributes, try without + if supported_values is None: + supported_values = self.get_value(listname, attributes=None) + + expect( + supported_values is not None, + "No list found for " + listname + " on machine " + self.machine, + ) + supported_values = supported_values.split(",") # pylint: disable=no-member + + if reqval is None or reqval == "UNSET": + return supported_values[0] + + for val in supported_values: + if val == reqval: + return reqval + return None + + def get_default_compiler(self): + """ + Get the compiler to use from the list of COMPILERS + """ + cime_config = get_cime_config() + if cime_config.has_option("main", "COMPILER"): + value = cime_config.get("main", "COMPILER") + expect( + self.is_valid_compiler(value), + "User-selected compiler {} is not supported on machine {}".format( + value, self.machine + ), + ) + else: + value = self.get_field_from_list("COMPILERS") + return value + + def get_default_MPIlib(self, attributes=None): + """ + Get the MPILIB to use from the list of MPILIBS + """ + return self.get_field_from_list("MPILIBS", attributes=attributes) + + def is_valid_compiler(self, compiler): + """ + Check the compiler is valid for the current machine + """ + return self.get_field_from_list("COMPILERS", reqval=compiler) is not None + + def is_valid_MPIlib(self, mpilib, attributes=None): + """ + Check the MPILIB is valid for the current machine + """ + return ( + mpilib == "mpi-serial" + or self.get_field_from_list("MPILIBS", reqval=mpilib, attributes=attributes) + is not None + ) + + def has_batch_system(self): + """ + Return if this machine has a batch system + """ + result = False + batch_system = self.get_optional_child("BATCH_SYSTEM", root=self.machine_node) + if batch_system is not None: + result = ( + self.text(batch_system) is not None + and self.text(batch_system) != "none" + ) + logger.debug("Machine {} has batch: {}".format(self.machine, result)) + return result + + def get_suffix(self, suffix_type): + node = self.get_optional_child("default_run_suffix") + if node is not None: + suffix_node = self.get_optional_child(suffix_type, root=node) + if suffix_node is not None: + return self.text(suffix_node) + + return None + + def set_value(self, vid, value, subgroup=None, ignore_type=True): + # A temporary cache only + self.custom_settings[vid] = value + + def print_values(self, compiler=None): + """Prints machine values. + + Args: + compiler (str, optional): Name of the compiler to print extra details for. Defaults to None. + """ + current = self.probe_machine_name(False) + + if self.machine_node is None: + for machine in self.get_children("machine"): + self._print_machine_values(machine, current) + else: + self._print_machine_values(self.machine_node, current, compiler) + + def _print_machine_values(self, machine, current=None, compiler=None): + """Prints a machines details. + + Args: + machine (CIME.XML.machines.Machine): Machine object. + current (str, optional): Name of the current machine. Defaults to None. + compiler (str, optional): If not None, then modules and environment variables matching compiler are printed. Defaults to None. + + Raises: + CIMEError: If `compiler` is not valid. + """ + name = self.get(machine, "MACH") + if current is not None and current == name: + name = f"{name} (current)" + desc = self.text(self.get_child("DESC", root=machine)) + os_ = self.text(self.get_child("OS", root=machine)) + + compilers = self.text(self.get_child("COMPILERS", root=machine)) + if compiler is not None and compiler not in compilers.split(","): + raise CIMEError( + f"Compiler {compiler!r} is not a valid choice from ({compilers})" + ) + + mpilibs_nodes = self._get_children_filter_attribute_regex( + "MPILIBS", "compiler", compiler, root=machine + ) + mpilibs = set([y for x in mpilibs_nodes for y in self.text(x).split(",")]) + + max_tasks_per_node = self.text( + self.get_child("MAX_TASKS_PER_NODE", root=machine) + ) + max_mpitasks_per_node = self.text( + self.get_child("MAX_MPITASKS_PER_NODE", root=machine) + ) + max_gpus_per_node = self.get_optional_child("MAX_GPUS_PER_NODE", root=machine) + max_gpus_per_node_text = ( + self.text(max_gpus_per_node) if max_gpus_per_node else 0 + ) + + if compiler is not None: + name = f"{name} ({compiler})" + + print(" {} : {} ".format(name, desc)) + print(" os ", os_) + print(" compilers ", compilers) + print(" mpilibs ", ",".join(mpilibs)) + print(" pes/node ", max_mpitasks_per_node) + print(" max_tasks/node ", max_tasks_per_node) + print(" max_gpus/node ", max_gpus_per_node_text) + print("") + + if compiler is not None: + module_system_node = self.get_child("module_system", root=machine) + + def command_formatter(node): + if node.text is None: + return f"{node.attrib['name']}" + else: + return f"{node.attrib['name']} {node.text}" + + print(" Module commands:") + for requirements, commands in self._filter_children_by_compiler( + "modules", "command", compiler, command_formatter, module_system_node + ): + indent = "" if requirements == "" else " " + if requirements != "": + print(f" (with {requirements})") + for x in commands: + print(f" {indent}{x}") + print("") + + def env_formatter(node, machines=None): + return f"{node.attrib['name']}: {machines._get_resolved_environment_variable(node.text)}" + + print(" Environment variables:") + for requirements, variables in self._filter_children_by_compiler( + "environment_variables", + "env", + compiler, + partial(env_formatter, machines=self), + machine, + ): + indent = "" if requirements == "" else " " + if requirements != "": + print(f" (with {requirements})") + for x in variables: + print(f" {indent}{x}") + + def _filter_children_by_compiler(self, parent, child, compiler, formatter, root): + """Filters parent nodes and returns requirements and children of filtered nodes. + + Example of a yielded values: + + "mpilib=openmpi DEBUG=true", ["HOME: /home/dev", "NETCDF_C_PATH: ../netcdf"] + + Args: + parent (str): Name of the nodes to filter. + child (str): Name of the children nodes from filtered parent nodes. + compiler (str): Name of the compiler that will be matched against the regex. + formatter (function): Function to format the child nodes from the parents that match. + root (CIME.XML.generic_xml._Element): Root node to filter parent nodes from. + + Yields: + str, list: Requirements for parent node and list of formated child nodes. + """ + nodes = self._get_children_filter_attribute_regex( + parent, "compiler", compiler, root=root + ) + + for x in nodes: + attrib = {**x.attrib} + attrib.pop("compiler", None) + + requirements = " ".join([f"{y}={z!r}" for y, z in attrib.items()]) + values = [formatter(y) for y in self.get_children(child, root=x)] + + yield requirements, values + + def _get_children_filter_attribute_regex(self, name, attribute_name, value, root): + """Filter children nodes using regex. + + Uses regex from attribute of children nodes to match a value. + + Args: + name (str): Name of the children nodes. + attribute_name (str): Name of the attribute on the child nodes to build regex from. + value (str): Value that is matched using regex from attribute. + root (CIME.XML.generic_xml._Element): Root node to query children nodes from. + + Returns: + list: List of children whose regex attribute matches the value. + """ + return [ + x + for x in self.get_children(name, root=root) + if match_value_by_attribute_regex(x, attribute_name, value) + ] + + def _get_resolved_environment_variable(self, text): + """Attempts to resolve machines environment variable. + + Args: + text (str): Environment variable value. + + Returns: + str: Resolved value or error message. + """ + if text is None: + return "" + + try: + value = self.get_resolved_value(text, allow_unresolved_envvars=True) + except Exception as e: + return f"Failed to resolve {text!r} with: {e!s}" + + if value == text and "$" in text: + value = f"Failed to resolve {text!r}" + + return value + + def return_values(self): + """return a dictionary of machine info + This routine is used by external tools in https://github.com/NCAR/CESM_xml2html + """ + machines = self.get_children("machine") + mach_dict = dict() + logger.debug("Machines return values") + for machine in machines: + name = self.get(machine, "MACH") + desc = self.get_child("DESC", root=machine) + mach_dict[(name, "description")] = self.text(desc) + os_ = self.get_child("OS", root=machine) + mach_dict[(name, "os")] = self.text(os_) + compilers = self.get_child("COMPILERS", root=machine) + mach_dict[(name, "compilers")] = self.text(compilers) + max_tasks_per_node = self.get_child("MAX_TASKS_PER_NODE", root=machine) + mach_dict[(name, "max_tasks_per_node")] = self.text(max_tasks_per_node) + max_mpitasks_per_node = self.get_child( + "MAX_MPITASKS_PER_NODE", root=machine + ) + mach_dict[(name, "max_mpitasks_per_node")] = self.text( + max_mpitasks_per_node + ) + max_gpus_per_node = self.get_child("MAX_GPUS_PER_NODE", root=machine) + mach_dict[(name, "max_gpus_per_node")] = self.text(max_gpus_per_node) + + return mach_dict diff --git a/CIME/XML/namelist_definition.py b/CIME/XML/namelist_definition.py new file mode 100644 index 00000000000..7ca7c477009 --- /dev/null +++ b/CIME/XML/namelist_definition.py @@ -0,0 +1,520 @@ +"""Interface to `namelist_definition.xml`. + +This module contains only one class, `NamelistDefinition`, inheriting from +`EntryID`. +""" + +# Warnings we typically ignore. +# pylint:disable=invalid-name + +# Disable warnings due to using `standard_module_setup` +# pylint:disable=wildcard-import,unused-wildcard-import + +import re +import collections + +from CIME.namelist import ( + fortran_namelist_base_value, + is_valid_fortran_namelist_literal, + character_literal_to_string, + expand_literal_list, + Namelist, + get_fortran_name_only, +) + +from CIME.XML.standard_module_setup import * +from CIME.XML.entry_id import EntryID +from CIME.XML.files import Files + +logger = logging.getLogger(__name__) + +_array_size_re = re.compile(r"^(?P[^(]+)\((?P[^)]+)\)$") + + +class CaseInsensitiveDict(dict): + + """Basic case insensitive dict with strings only keys. + From https://stackoverflow.com/a/27890005""" + + proxy = {} + + def __init__(self, data): + dict.__init__(self) + self.proxy = dict((k.lower(), k) for k in data) + for k in data: + self[k] = data[k] + + def __contains__(self, k): + return k.lower() in self.proxy + + def __delitem__(self, k): + key = self.proxy[k.lower()] + super(CaseInsensitiveDict, self).__delitem__(key) + del self.proxy[k.lower()] + + def __getitem__(self, k): + key = self.proxy[k.lower()] + return super(CaseInsensitiveDict, self).__getitem__(key) + + def get(self, k, default=None): + return self[k] if k in self else default + + def __setitem__(self, k, v): + super(CaseInsensitiveDict, self).__setitem__(k, v) + self.proxy[k.lower()] = k + + +class NamelistDefinition(EntryID): + + """Class representing variable definitions for a namelist. + This class inherits from `EntryID`, and supports most inherited methods; + however, `set_value` is unsupported. + + Additional public methods: + - dict_to_namelist. + - is_valid_value + - validate + """ + + def __init__(self, infile, files=None): + """Construct a `NamelistDefinition` from an XML file.""" + + # if the file is invalid we may not be able to check the version + # but we need to do it this way until we remove the version 1 files + schema = None + if files is None: + files = Files() + schema = files.get_schema("NAMELIST_DEFINITION_FILE") + expect(os.path.isfile(infile), "File {} does not exist".format(infile)) + super(NamelistDefinition, self).__init__(infile, schema=schema) + + self._attributes = {} + self._entry_nodes = [] + self._entry_ids = [] + self._valid_values = {} + self._entry_types = {} + self._group_names = CaseInsensitiveDict({}) + self._nodes = {} + + def set_node_values(self, name, node): + self._entry_nodes.append(node) + self._entry_ids.append(name) + self._nodes[name] = node + self._entry_types[name] = self._get_type(node) + self._valid_values[name] = self._get_valid_values(node) + self._group_names[name] = self.get_group_name(node) + + def set_nodes(self, skip_groups=None): + """ + populates the object data types for all nodes that are not part of the skip_groups array + returns nodes that do not have attributes of `skip_default_entry` or `per_stream_entry` + """ + default_nodes = [] + for node in self.get_children("entry"): + name = self.get(node, "id") + skip_default_entry = self.get(node, "skip_default_entry") == "true" + per_stream_entry = self.get(node, "per_stream_entry") == "true" + + if skip_groups: + group_name = self.get_group_name(node) + + if not group_name in skip_groups: + self.set_node_values(name, node) + + if not skip_default_entry and not per_stream_entry: + default_nodes.append(node) + else: + self.set_node_values(name, node) + + if not skip_default_entry and not per_stream_entry: + default_nodes.append(node) + + return default_nodes + + def get_group_name(self, node=None): + if self.get_version() == 1.0: + group = self.get(node, "group") + elif self.get_version() >= 2.0: + group = self.get_element_text("group", root=node) + return group + + def _get_type(self, node): + if self.get_version() == 1.0: + type_info = self.get(node, "type") + elif self.get_version() >= 2.0: + type_info = self._get_type_info(node) + return type_info + + def _get_valid_values(self, node): + # The "valid_values" attribute is not required, and an empty string has + # the same effect as not specifying it. + # Returns a list from a comma seperated string in xml + valid_values = "" + if self.get_version() == 1.0: + valid_values = self.get(node, "valid_values") + elif self.get_version() >= 2.0: + valid_values = self._get_node_element_info(node, "valid_values") + if valid_values == "": + valid_values = None + if valid_values is not None: + valid_values = valid_values.split(",") + return valid_values + + def get_group(self, name): + return self._group_names[name] + + def rename_group(self, oldgroup, newgroup): + for var in self._group_names: + if self._group_names[var] == oldgroup: + self._group_names[var] = newgroup + + def add_attributes(self, attributes): + self._attributes = attributes + + def get_attributes(self): + """Return this object's attributes dictionary""" + return self._attributes + + def get_entry_nodes(self): + return self._entry_nodes + + def get_per_stream_entries(self): + entries = [] + nodes = self.get_children("entry") + for node in nodes: + per_stream_entry = self.get(node, "per_stream_entry") == "true" + if per_stream_entry: + entries.append(self.get(node, "id")) + return entries + + # Currently we don't use this object to construct new files, and it's no + # good for that purpose anyway, so stop this function from being called. + def set_value(self, vid, value, subgroup=None, ignore_type=True): + """This function is not implemented.""" + raise TypeError("NamelistDefinition does not support `set_value`.") + + # In contrast to the entry_id version of this method, this version doesn't support the + # replacement_for_none argument, because it is hard-coded to ''. + # pylint: disable=arguments-differ + def get_value_match(self, vid, attributes=None, exact_match=True, entry_node=None): + """Return the default value for the variable named `vid`. + + The return value is a list of strings corresponding to the + comma-separated list of entries for the value (length 1 for scalars). If + there is no default value in the file, this returns `None`. + """ + # Merge internal attributes with those passed in. + all_attributes = {} + if self._attributes is not None: + all_attributes.update(self._attributes) + if attributes is not None: + all_attributes.update(attributes) + + if entry_node is None: + entry_node = self._nodes[vid] + # NOTE(wjs, 2021-06-04) In the following call, replacement_for_none='' may not + # actually be needed, but I'm setting it to maintain some old logic, to be safe. + value = super(NamelistDefinition, self).get_value_match( + vid.lower(), + attributes=all_attributes, + exact_match=exact_match, + entry_node=entry_node, + replacement_for_none="", + ) + if value is not None: + value = self._split_defaults_text(value) + + return value + + @staticmethod + def _split_defaults_text(string): + """Take a comma-separated list in a string, and split it into a list.""" + # Some trickiness here; we want to split items on commas, but not inside + # quote-delimited strings. Stripping whitespace is also useful. + value = [] + if len(string): + pos = 0 + delim = None + for i, char in enumerate(string): + if delim is None: + # If not inside a string... + if char in ('"', "'"): + # if we have a quote character, start a string. + delim = char + elif char == ",": + # if we have a comma, this is a new value. + value.append(string[pos:i].strip()) + pos = i + 1 + else: + # If inside a string, the only thing that can happen is the end + # of the string. + if char == delim: + delim = None + value.append(string[pos:].strip()) + return value + + def split_type_string(self, name): + """Split a 'type' attribute string into its component parts. + + The `name` argument is the variable name. + This is used for error reporting purposes. + + The return value is a tuple consisting of the type itself, a length + (which is an integer for character variables, otherwise `None`), and the + size of the array (which is 1 for scalar variables). + """ + type_string = self._entry_types[name] + + # 'char' is frequently used as an abbreviation of 'character'. + type_string = type_string.replace("char", "character") + + # Separate into a size and the rest of the type. + size_match = _array_size_re.search(type_string) + if size_match: + type_string = size_match.group("type") + size_string = size_match.group("size") + try: + size = int(size_string) + except ValueError: + expect( + False, + "In namelist definition, variable {} had the non-integer string {!r} specified as an array size.".format( + name, size_string + ), + ) + else: + size = 1 + + # Separate into a type and an optional length. + type_, star, length = type_string.partition("*") + if star == "*": + # Length allowed only for character variables. + expect( + type_ == "character", + "In namelist definition, length specified for non-character " + "variable {}.".format(name), + ) + # Check that the length is actually an integer, to make the error + # message a bit cleaner if the xml input is bad. + try: + max_len = int(length) + except ValueError: + expect( + False, + "In namelist definition, character variable {} had the non-integer string {!r} specified as a length.".format( + name, length + ), + ) + else: + max_len = None + return type_, max_len, size + + @staticmethod + def _canonicalize_value(type_, value): + """Create 'canonical' version of a value for comparison purposes.""" + canonical_value = [fortran_namelist_base_value(scalar) for scalar in value] + canonical_value = [scalar for scalar in canonical_value if scalar != ""] + if type_ == "character": + canonical_value = [ + character_literal_to_string(scalar) for scalar in canonical_value + ] + elif type_ == "integer": + canonical_value = [int(scalar) for scalar in canonical_value] + return canonical_value + + def is_valid_value(self, name, value): + """Determine whether a value is valid for the named variable. + + The `value` argument must be a list of strings formatted as they would + appear in the namelist (even for scalar variables, in which case the + length of the list is always 1). + """ + # Separate into a type, optional length, and optional size. + type_, max_len, size = self.split_type_string(name) + invalid = [] + + # Check value against type. + for scalar in value: + if not is_valid_fortran_namelist_literal(type_, scalar): + invalid.append(scalar) + if len(invalid) > 0: + logger.warning("Invalid values {}".format(invalid)) + return False + + # Now that we know that the strings as input are valid Fortran, do some + # canonicalization for further checks. + canonical_value = self._canonicalize_value(type_, value) + + # Check maximum length (if applicable). + if max_len is not None: + for scalar in canonical_value: + if len(scalar) > max_len: + return False + + # Check valid value constraints (if applicable). + valid_values = self._valid_values[name] + if valid_values is not None: + expect( + type_ in ("integer", "character"), + "Found valid_values attribute for variable {} with type {}, but valid_values only allowed for character and integer variables.".format( + name, type_ + ), + ) + if type_ == "integer": + compare_list = [int(vv) for vv in valid_values] + else: + compare_list = valid_values + for scalar in canonical_value: + if scalar not in compare_list: + invalid.append(scalar) + if len(invalid) > 0: + logger.warning("Invalid values {}".format(invalid)) + return False + + # Check size of input array. + if len(expand_literal_list(value)) > size: + expect( + False, + "Value index exceeds variable size for variable {}, allowed array length is {} value array size is {}".format( + name, size, len(expand_literal_list(value)) + ), + ) + return True + + def _expect_variable_in_definition(self, name, variable_template): + """Used to get a better error message for an unexpected variable. + case insensitve match""" + + expect( + name in self._entry_ids, + (variable_template + " is not in the namelist definition.").format( + str(name) + ), + ) + + def _user_modifiable_in_variable_definition(self, name): + # Is name user modifiable? + node = self.get_optional_child("entry", attributes={"id": name}) + user_modifiable_only_by_xml = self.get(node, "modify_via_xml") + if user_modifiable_only_by_xml is not None: + expect( + False, + "Cannot change {} in user_nl file: set via xml variable {}".format( + name, user_modifiable_only_by_xml + ), + ) + user_cannot_modify = self.get(node, "cannot_modify_by_user_nl") + if user_cannot_modify is not None: + expect( + False, + "Cannot change {} in user_nl file: {}".format(name, user_cannot_modify), + ) + + def _generate_variable_template(self, filename): + # Improve error reporting when a file name is provided. + if filename is None: + variable_template = "Variable {!r}" + else: + # for the next step we want the name of the original user_nl file not the internal one + # We do this by extracting the component name from the filepath string + if "Buildconf" in filename and "namelist_infile" in filename: + msgfn = "user_nl_" + (filename.split(os.sep)[-2])[:-4] + else: + msgfn = filename + variable_template = "Variable {!r} from file " + repr(str(msgfn)) + return variable_template + + def validate(self, namelist, filename=None): + """Validate a namelist object against this definition. + + The optional `filename` argument can be used to assist in error + reporting when the namelist comes from a specific, known file. + """ + variable_template = self._generate_variable_template(filename) + + # Iterate through variables. + for group_name in namelist.get_group_names(): + for variable_name in namelist.get_variable_names(group_name): + # Check that the variable is defined... + qualified_variable_name = get_fortran_name_only(variable_name) + self._expect_variable_in_definition( + qualified_variable_name, variable_template + ) + + # Check if can actually change this variable via filename change + if filename is not None: + self._user_modifiable_in_variable_definition( + qualified_variable_name + ) + + # and has the right group name... + var_group = self.get_group(qualified_variable_name) + expect( + var_group == group_name, + ( + variable_template + + " is in a group named {!r}, but should be in {!r}." + ).format(str(variable_name), str(group_name), str(var_group)), + ) + + # and has a valid value. + value = namelist.get_variable_value(group_name, variable_name) + expect( + self.is_valid_value(qualified_variable_name, value), + (variable_template + " has invalid value {!r}.").format( + str(variable_name), [str(scalar) for scalar in value] + ), + ) + + def dict_to_namelist(self, dict_, filename=None): + """Converts a dictionary of name-value pairs to a `Namelist`. + + The input is assumed to be similar to the output of `parse` when + `groupless=True` is set. This function uses the namelist definition file + to look up the namelist group associated with each variable, and uses + this information to create a true `Namelist` object. + + The optional `filename` argument can be used to assist in error + reporting when the namelist comes from a specific, known file. + """ + # Improve error reporting when a file name is provided. + variable_template = self._generate_variable_template(filename) + groups = {} + for variable_name in dict_: + variable_lc = variable_name.lower() + qualified_varname = get_fortran_name_only(variable_lc) + self._expect_variable_in_definition(qualified_varname, variable_template) + group_name = self.get_group(qualified_varname) + expect( + group_name is not None, "No group found for var {}".format(variable_lc) + ) + if group_name not in groups: + groups[group_name] = collections.OrderedDict() + groups[group_name][variable_lc] = dict_[variable_name] + return Namelist(groups) + + def get_input_pathname(self, name): + node = self._nodes[name] + if self.get_version() == 1.0: + input_pathname = self.get(node, "input_pathname") + elif self.get_version() >= 2.0: + input_pathname = self._get_node_element_info(node, "input_pathname") + return input_pathname + + # pylint: disable=arguments-differ + def get_default_value(self, item, attribute=None): + """Return the default value for the variable named `item`. + + The return value is a list of strings corresponding to the + comma-separated list of entries for the value (length 1 for scalars). If + there is no default value in the file, this returns `None`. + """ + # Merge internal attributes with those passed in. + all_attributes = {} + if self._attributes is not None: + all_attributes.update(self._attributes) + if attribute is not None: + all_attributes.update(attribute) + + value = self.get_value_match(item.lower(), all_attributes, True) + return value diff --git a/CIME/XML/pes.py b/CIME/XML/pes.py new file mode 100644 index 00000000000..3254751c794 --- /dev/null +++ b/CIME/XML/pes.py @@ -0,0 +1,256 @@ +""" +Interface to the config_pes.xml file. This class inherits from GenericXML.py +""" +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +from CIME.XML.files import Files +from CIME.utils import expect + +logger = logging.getLogger(__name__) + + +class Pes(GenericXML): + def __init__(self, infile, files=None): + """ + initialize a files object given input pes specification file + """ + if files is None: + files = Files() + schema = files.get_schema("PES_SPEC_FILE") + logger.debug("DEBUG: infile is {}".format(infile)) + GenericXML.__init__(self, infile, schema=schema) + + def find_pes_layout(self, grid, compset, machine, pesize_opts="M", mpilib=None): + opes_ntasks = {} + opes_nthrds = {} + opes_rootpe = {} + opes_pstrid = {} + oother_settings = {} + other_settings = {} + append = {} + o_grid_nodes = [] + comments = None + # Get any override nodes + overrides = self.get_optional_child("overrides") + ocomments = None + + if overrides is not None: + o_grid_nodes = self.get_children("grid", root=overrides) + ( + opes_ntasks, + opes_nthrds, + opes_rootpe, + opes_pstrid, + oother_settings, + append, + ocomments, + ) = self._find_matches( + o_grid_nodes, grid, compset, machine, pesize_opts, True + ) + # Get all the nodes + grid_nodes = self.get_children("grid") + if o_grid_nodes: + gn_set = set(grid_nodes) + ogn_set = set(o_grid_nodes) + gn_set.difference_update(ogn_set) + grid_nodes = list(gn_set) + + ( + pes_ntasks, + pes_nthrds, + pes_rootpe, + pes_pstrid, + other_settings, + os_append, + comments, + ) = self._find_matches(grid_nodes, grid, compset, machine, pesize_opts, False) + pes_ntasks.update(opes_ntasks) + pes_nthrds.update(opes_nthrds) + pes_rootpe.update(opes_rootpe) + pes_pstrid.update(opes_pstrid) + other_settings.update(oother_settings) + os_append.update(append) + if ocomments is not None: + comments = ocomments + + if mpilib == "mpi-serial": + for i in iter(pes_ntasks): + pes_ntasks[i] = 1 + for i in iter(pes_rootpe): + pes_rootpe[i] = 0 + for i in iter(pes_pstrid): + pes_pstrid[i] = 0 + + logger.info("Pes setting: grid is {} ".format(grid)) + logger.info("Pes setting: compset is {} ".format(compset)) + logger.info("Pes setting: tasks is {} ".format(pes_ntasks)) + logger.info("Pes setting: threads is {} ".format(pes_nthrds)) + logger.info("Pes setting: rootpe is {} ".format(pes_rootpe)) + logger.info("Pes setting: pstrid is {} ".format(pes_pstrid)) + logger.info("Pes other settings: {}".format(other_settings)) + logger.info("Pes other settings append: {}".format(os_append)) + if comments is not None: + logger.info("Pes comments: {}".format(comments)) + + return ( + pes_ntasks, + pes_nthrds, + pes_rootpe, + pes_pstrid, + other_settings, + os_append, + comments, + ) + + def _find_matches( + self, grid_nodes, grid, compset, machine, pesize_opts, override=False + ): + grid_choice = None + mach_choice = None + compset_choice = None + pesize_choice = None + max_points = -1 + pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings, append = ( + {}, + {}, + {}, + {}, + {}, + {}, + ) + pe_select = None + comment = None + for grid_node in grid_nodes: + grid_match = self.get(grid_node, "name") + if grid_match == "any" or re.search(grid_match, grid): + mach_nodes = self.get_children("mach", root=grid_node) + for mach_node in mach_nodes: + mach_match = self.get(mach_node, "name") + if mach_match == "any" or re.search(mach_match, machine): + pes_nodes = self.get_children("pes", root=mach_node) + for pes_node in pes_nodes: + pesize_match = self.get(pes_node, "pesize") + compset_match = self.get(pes_node, "compset") + if ( + pesize_match == "any" + or ( + pesize_opts is not None + and pesize_match == pesize_opts + ) + ) and ( + compset_match == "any" + or re.search(compset_match, compset) + ): + points = ( + int(grid_match != "any") * 3 + + int(mach_match != "any") * 7 + + int(compset_match != "any") * 2 + + int(pesize_match != "any") + ) + if override and points > 0: + for node in self.get_children(root=pes_node): + vid = self.name(node) + logger.info("vid is {}".format(vid)) + if "comment" in vid: + comment = self.text(node) + elif "ntasks" in vid: + for child in self.get_children(root=node): + pes_ntasks[ + self.name(child).upper() + ] = int(self.text(child)) + elif "nthrds" in vid: + for child in self.get_children(root=node): + pes_nthrds[ + self.name(child).upper() + ] = int(self.text(child)) + elif "rootpe" in vid: + for child in self.get_children(root=node): + pes_rootpe[ + self.name(child).upper() + ] = int(self.text(child)) + elif "pstrid" in vid: + for child in self.get_children(root=node): + pes_pstrid[ + self.name(child).upper() + ] = int(self.text(child)) + # if the value is already upper case its something else we are trying to set + else: + other_settings[vid] = self.text(node) + append[vid] = self.get( + node, "append", default="false" + ) + else: + if points > max_points: + pe_select = pes_node + max_points = points + mach_choice = mach_match + grid_choice = grid_match + compset_choice = compset_match + pesize_choice = pesize_match + elif points == max_points: + logger.warning( + "mach_choice {} mach_match {}".format( + mach_choice, mach_match + ) + ) + logger.warning( + "grid_choice {} grid_match {}".format( + grid_choice, grid_match + ) + ) + logger.warning( + "compset_choice {} compset_match {}".format( + compset_choice, compset_match + ) + ) + logger.warning( + "pesize_choice {} pesize_match {}".format( + pesize_choice, pesize_match + ) + ) + logger.warning("points = {:d}".format(points)) + expect( + False, + "More than one PE layout matches given PE specs", + ) + if not override: + for node in self.get_children(root=pe_select): + vid = self.name(node) + logger.debug("vid is {}".format(vid)) + if "comment" in vid: + comment = self.text(node) + elif "ntasks" in vid: + for child in self.get_children(root=node): + pes_ntasks[self.name(child).upper()] = int(self.text(child)) + elif "nthrds" in vid: + for child in self.get_children(root=node): + pes_nthrds[self.name(child).upper()] = int(self.text(child)) + elif "rootpe" in vid: + for child in self.get_children(root=node): + pes_rootpe[self.name(child).upper()] = int(self.text(child)) + elif "pstrid" in vid: + for child in self.get_children(root=node): + pes_pstrid[self.name(child).upper()] = int(self.text(child)) + # if the value is already upper case its something else we are trying to set + elif vid == self.name(node): + text = self.text(node).strip() + if len(text): + other_settings[vid] = self.text(node) + if grid_choice != "any" or logger.isEnabledFor(logging.DEBUG): + logger.info("Pes setting: grid match is {} ".format(grid_choice)) + if mach_choice != "any" or logger.isEnabledFor(logging.DEBUG): + logger.info("Pes setting: machine match is {} ".format(mach_choice)) + if compset_choice != "any" or logger.isEnabledFor(logging.DEBUG): + logger.info("Pes setting: compset_match is {} ".format(compset_choice)) + if pesize_choice != "any" or logger.isEnabledFor(logging.DEBUG): + logger.info("Pes setting: pesize match is {} ".format(pesize_choice)) + + return ( + pes_ntasks, + pes_nthrds, + pes_rootpe, + pes_pstrid, + other_settings, + append, + comment, + ) diff --git a/CIME/XML/pio.py b/CIME/XML/pio.py new file mode 100644 index 00000000000..54af6112bf0 --- /dev/null +++ b/CIME/XML/pio.py @@ -0,0 +1,68 @@ +""" +Class for config_pio files . This class inherits from EntryID.py +""" +from CIME.XML.standard_module_setup import * +from CIME.XML.entry_id import EntryID +from CIME.XML.files import Files + +from collections import OrderedDict + +logger = logging.getLogger(__name__) + + +class PIO(EntryID): + def __init__(self, comp_classes, infile=None, files=None): + if infile is None: + if files is None: + files = Files() + infile = files.get_value("PIO_SPEC_FILE") + + EntryID.__init__(self, infile) + + self._components = list(comp_classes) + + def check_if_comp_var(self, vid, attribute=None, node=None): + comp = None + new_vid = None + for comp in self._components: + if vid.endswith("_" + comp): + new_vid = vid.replace("_" + comp, "", 1) + elif vid.startswith(comp + "_"): + new_vid = vid.replace(comp + "_", "", 1) + elif "_" + comp + "_" in vid: + new_vid = vid.replace(comp + "_", "", 1) + + if new_vid is not None: + return new_vid, comp, True + + return vid, None, False + + def get_defaults( + self, grid=None, compset=None, mach=None, compiler=None, mpilib=None + ): # pylint: disable=unused-argument + # should we have a env_pio file + defaults = OrderedDict() + save_for_last = [] + + # Load args into attribute dict + attributes = {} + for attrib in ["grid", "compset", "mach", "compiler", "mpilib"]: + if locals()[attrib] is not None: + attributes[attrib] = locals()[attrib] + + # Find defauts + for node in self.get_children("entry"): + value = self.get_default_value(node, attributes) + if value: + myid = self.get(node, "id") + iscompvar = self.check_if_comp_var(myid)[-1] + if iscompvar: + save_for_last.append((myid, value)) + else: + defaults[myid] = value + + # comp-specific vars must come last so they take precedence over general settings + for k, v in save_for_last: + defaults[k] = v + + return defaults diff --git a/CIME/XML/postprocessing.py b/CIME/XML/postprocessing.py new file mode 100644 index 00000000000..3287d145142 --- /dev/null +++ b/CIME/XML/postprocessing.py @@ -0,0 +1,40 @@ +""" +Interface to the config_postprocessing.xml file. This class inherits from EntryID +""" + +from CIME.XML.standard_module_setup import * +from CIME.XML.entry_id import EntryID +from CIME.XML.files import Files +from CIME.utils import expect + +logger = logging.getLogger(__name__) + + +class Postprocessing(EntryID): + def __init__(self, infile=None, files=None): + """ + initialize an object + """ + if files is None: + files = Files() + if infile is None: + infile = files.get_value("POSTPROCESSING_SPEC_FILE") + if infile is not None: + self.file_exists = os.path.isfile(infile) + else: + self.file_exists = False + if not self.file_exists: + return + expect(infile, "No postprocessing file defined in {}".format(files.filename)) + + schema = files.get_schema("POSTPROCESSING_SPEC_FILE") + + EntryID.__init__(self, infile, schema=schema) + + # Append the contents of $HOME/.cime/config_postprocessing.xml if it exists + # This could cause problems if node matchs are repeated when only one is expected + infile = os.path.join( + os.environ.get("HOME"), ".cime", "config_postprocessing.xml" + ) + if os.path.exists(infile): + EntryID.read(self, infile) diff --git a/scripts/lib/CIME/XML/standard_module_setup.py b/CIME/XML/standard_module_setup.py similarity index 99% rename from scripts/lib/CIME/XML/standard_module_setup.py rename to CIME/XML/standard_module_setup.py index d22ba32c4ac..1c934407da5 100644 --- a/scripts/lib/CIME/XML/standard_module_setup.py +++ b/CIME/XML/standard_module_setup.py @@ -1,4 +1,3 @@ - # pragma pylint: disable=unused-import import logging, os, sys, re diff --git a/CIME/XML/stream.py b/CIME/XML/stream.py new file mode 100644 index 00000000000..95dfa30ef7d --- /dev/null +++ b/CIME/XML/stream.py @@ -0,0 +1,49 @@ +""" +Interface to the streams.xml style files. This class inherits from GenericXML.py + +stream files predate cime and so do not conform to entry id format +""" +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +from CIME.XML.files import Files +from CIME.utils import expect + +logger = logging.getLogger(__name__) + + +class Stream(GenericXML): + def __init__(self, infile=None, files=None): + """ + initialize an object + """ + if files is None: + files = Files() + schema = None + GenericXML.__init__(self, infile, schema=schema) + + def get_value(self, item, attribute=None, resolved=True, subgroup=None): + """ + Get Value of fields in a stream.xml file + """ + expect(subgroup is None, "This class does not support subgroups") + value = None + node = None + names = item.split("/") + node = None + for name in names: + node = self.scan_child(name, root=node) + if node is not None: + value = self.text(node).strip() + + if value is None: + # if all else fails + # pylint: disable=assignment-from-none + value = GenericXML.get_value(self, item, attribute, resolved, subgroup) + + if resolved: + if value is not None: + value = self.get_resolved_value(value) + elif item in os.environ: + value = os.environ[item] + + return value diff --git a/CIME/XML/test_reporter.py b/CIME/XML/test_reporter.py new file mode 100644 index 00000000000..93f117a8a46 --- /dev/null +++ b/CIME/XML/test_reporter.py @@ -0,0 +1,84 @@ +""" +Interface to the testreporter xml. This class inherits from GenericXML.py + +""" +# pylint: disable=import-error +import urllib.parse +import urllib.request +from CIME.XML.standard_module_setup import * +from CIME.XML.generic_xml import GenericXML +import ssl + +# pylint: disable=protected-access +ssl._create_default_https_context = ssl._create_unverified_context + + +class TestReporter(GenericXML): + def __init__(self): + """ + initialize an object + """ + self.root = None + + GenericXML.__init__( + self, + root_name_override="testrecord", + read_only=False, + infile="TestRecord.xml", + ) + + def setup_header( + self, tagname, machine, compiler, mpilib, testroot, testtype, baseline + ): + # + # Create the XML header that the testdb is expecting to recieve + # + for name, text, attribs in [ + ("tag_name", tagname, None), + ("mach", machine, None), + ("compiler", compiler, {"version": ""}), + ("mpilib", mpilib, {"version": ""}), + ("testroot", testroot, None), + ("testtype", testtype, None), + ("baselinetag", baseline, None), + ]: + self.make_child(name, attributes=attribs, text=text) + + def add_result(self, test_name, test_status): + # + # Add a test result to the XML structure. + # + tlelem = self.make_child("tests", {"testname": test_name}) + + for attrib_name, text in [ + ("casestatus", None), + ("comment", test_status["COMMENT"]), + ("compare", test_status["BASELINE"]), + ("memcomp", test_status["MEMCOMP"]), + ("memleak", test_status["MEMLEAK"]), + ("nlcomp", test_status["NLCOMP"]), + ("status", test_status["STATUS"]), + ("tputcomp", test_status["TPUTCOMP"]), + ]: + + self.make_child( + "category", attributes={"name": attrib_name}, text=text, root=tlelem + ) + + def push2testdb(self): + # + # Post test result XML to CESM test database + # + xmlstr = self.get_raw_record() + username = input("Username:") + os.system("stty -echo") + password = input("Password:") + os.system("stty echo") + print() + params = {"username": username, "password": password, "testXML": xmlstr} + url = "https://cseg.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" + data = urllib.parse.urlencode(params) + data = data.encode("ascii") + req = urllib.request.Request(url, data) + result = urllib.request.urlopen(req) + print(result.read()) diff --git a/CIME/XML/testlist.py b/CIME/XML/testlist.py new file mode 100644 index 00000000000..6dbe79b8f98 --- /dev/null +++ b/CIME/XML/testlist.py @@ -0,0 +1,142 @@ +""" +Interface to the config_files.xml file. This class inherits from generic_xml.py +It supports version 2.0 of the testlist.xml file + +In version 2 of the file options can be specified to further refine a test or +set of tests. They can be specified either at the top level, in which case they +apply to all machines/compilers for this test: + + + + + + ... + + +or at the level of a particular machine/compiler: + + + + + + + + + + + +Currently supported options are: + +- walltime: sets the wallclock limit in the queuing system + +- memleak_tolerance: specifies the relative memory growth expected for this test + +- comment: has no effect, but is written out when printing the test list + +- workflow: adds a workflow to the test +""" +from CIME.XML.standard_module_setup import * + +from CIME.XML.generic_xml import GenericXML +from CIME.XML.files import Files + +logger = logging.getLogger(__name__) + + +class Testlist(GenericXML): + def __init__(self, infile, files=None): + """ + initialize an object + """ + schema = None + if files is None: + files = Files() + schema = files.get_schema("TESTS_SPEC_FILE") + GenericXML.__init__(self, infile, schema=schema) + expect( + self.get_version() >= 2.0, + "{} is an unsupported version of the testfile format and will be ignored".format( + infile + ), + ) + + def get_tests( + self, + machine=None, + category=None, + compiler=None, + compset=None, + grid=None, + supported_only=False, + ): + tests = [] + attributes = {} + if compset is not None: + attributes["compset"] = compset + if grid is not None: + attributes["grid"] = grid + + testnodes = self.get_children("test", attributes=attributes) + + machatts = {} + if machine is not None: + machatts["name"] = machine + if category is not None: + machatts["category"] = category + if compiler is not None: + machatts["compiler"] = compiler + + for tnode in testnodes: + if ( + supported_only + and self.has(tnode, "supported") + and self.get(tnode, "supported") == "false" + ): + continue + + machnode = self.get_optional_child("machines", root=tnode) + machnodes = ( + None + if machnode is None + else self.get_children("machine", machatts, root=machnode) + ) + if machnodes: + this_test_node = {} + for key, value in self.attrib(tnode).items(): + if key == "name": + this_test_node["testname"] = value + else: + this_test_node[key] = value + + # Get options that apply to all machines/compilers for this test + options = self.get_children("options", root=tnode) + if len(options) > 0: + optionnodes = self.get_children("option", root=options[0]) + else: + optionnodes = [] + for mach in machnodes: + # this_test_node can include multiple tests + this_test = dict(this_test_node) + for key, value in self.attrib(mach).items(): + if key == "name": + this_test["machine"] = value + else: + this_test[key] = value + this_test["options"] = {} + + for onode in optionnodes: + this_test["options"][self.get(onode, "name")] = self.text(onode) + + # Now get options specific to this machine/compiler + options = self.get_optional_child("options", root=mach) + optionnodes = ( + [] + if options is None + else self.get_children("option", root=options) + ) + for onode in optionnodes: + this_test["options"][self.get(onode, "name")] = self.text(onode) + + tests.append(this_test) + + return tests diff --git a/CIME/XML/tests.py b/CIME/XML/tests.py new file mode 100644 index 00000000000..4a9eefc0fc4 --- /dev/null +++ b/CIME/XML/tests.py @@ -0,0 +1,88 @@ +""" +Interface to the config_tests.xml file. This class inherits from GenericEntry +""" +from CIME.XML.standard_module_setup import * + +from CIME.XML.generic_xml import GenericXML +from CIME.XML.files import Files +from CIME.utils import find_system_test +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.SystemTests.system_tests_compare_n import SystemTestsCompareN + +logger = logging.getLogger(__name__) + + +class Tests(GenericXML): + def __init__(self, infile=None, files=None): + """ + initialize an object interface to file config_tests.xml + """ + if infile is None: + if files is None: + files = Files() + infile = files.get_value("CONFIG_TESTS_FILE") + GenericXML.__init__(self, infile) + # append any component specific config_tests.xml files + for comp in files.get_components("CONFIG_TESTS_FILE"): + if comp is None: + continue + infile = files.get_value("CONFIG_TESTS_FILE", attribute={"component": comp}) + if os.path.isfile(infile): + self.read(infile) + + def support_single_exe(self, case): + """Checks if case supports --single-exe. + + Raises: + Exception: If system test cannot be found. + Exception: If `case` does not support --single-exe. + """ + testname = case.get_value("TESTCASE") + + try: + test = find_system_test(testname, case)(case, dry_run=True) + except Exception as e: + raise e + else: + # valid if subclass is SystemTestsCommon or _separate_builds is false + valid = ( + not issubclass(type(test), SystemTestsCompareTwo) + and not issubclass(type(test), SystemTestsCompareN) + ) or not test._separate_builds + + if not valid: + case_base_id = case.get_value("CASEBASEID") + + raise Exception( + f"{case_base_id} does not support the '--single-exe' option as it requires separate builds" + ) + + def get_test_node(self, testname): + logger.debug("Get settings for {}".format(testname)) + node = self.get_child("test", {"NAME": testname}) + logger.debug("Found {}".format(self.text(node))) + return node + + def print_values(self, skip_infrastructure_tests=True): + """ + Print each test type and its description. + + If skip_infrastructure_tests is True, then this does not write + information for tests with the attribute + INFRASTRUCTURE_TEST="TRUE". + """ + all_tests = [] + root = self.get_optional_child("testlist") + if root is not None: + all_tests = self.get_children("test", root=root) + for one_test in all_tests: + if skip_infrastructure_tests: + infrastructure_test = self.get(one_test, "INFRASTRUCTURE_TEST") + if ( + infrastructure_test is not None + and infrastructure_test.upper() == "TRUE" + ): + continue + name = self.get(one_test, "NAME") + desc = self.get_element_text("DESC", root=one_test) + logger.info("{}: {}".format(name, desc)) diff --git a/CIME/XML/testspec.py b/CIME/XML/testspec.py new file mode 100644 index 00000000000..9b4e7c37724 --- /dev/null +++ b/CIME/XML/testspec.py @@ -0,0 +1,66 @@ +""" +Interface to the testspec.xml file. This class inherits from generic_xml.py +""" +from CIME.XML.standard_module_setup import * + +from CIME.XML.generic_xml import GenericXML + +logger = logging.getLogger(__name__) + + +class TestSpec(GenericXML): + def __init__(self, infile): + """ + initialize an object + """ + GenericXML.__init__(self, infile) + self._testnodes = {} + self._testlist_node = None + if os.path.isfile(infile): + testnodes = self.get_children("test") + for node in testnodes: + self._testnodes[self.get(node, "name")] = node + + def set_header( + self, testroot, machine, testid, baselinetag=None, baselineroot=None + ): + tlelem = self.make_child("testlist") + + for name, text in [ + ("testroot", testroot), + ("machine", machine), + ("testid", testid), + ("baselinetag", baselinetag), + ("baselineroot", baselineroot), + ]: + if text is not None: + self.make_child(name, root=tlelem, text=text) + + self._testlist_node = tlelem + + def add_test(self, compiler, mpilib, testname): + expect( + testname not in self._testnodes, + "Test {} already in testlist".format(testname), + ) + + telem = self.make_child( + "test", attributes={"name": testname}, root=self._testlist_node + ) + + for name, text in [("compiler", compiler), ("mpilib", mpilib)]: + self.make_child(name, root=telem, text=text) + + self._testnodes[testname] = telem + + def update_test_status(self, testname, phase, status): + expect( + testname in self._testnodes, + "Test {} not defined in testlist".format(testname), + ) + root = self._testnodes[testname] + pnode = self.get_optional_child("section", {"name": phase}, root=root) + if pnode is not None: + self.set(pnode, "status", status) + else: + self.make_child("section", {"name": phase, "status": status}, root=root) diff --git a/scripts/lib/CIME/XML/workflow.py b/CIME/XML/workflow.py similarity index 76% rename from scripts/lib/CIME/XML/workflow.py rename to CIME/XML/workflow.py index 91767d277eb..4824e03b666 100644 --- a/scripts/lib/CIME/XML/workflow.py +++ b/CIME/XML/workflow.py @@ -9,8 +9,8 @@ logger = logging.getLogger(__name__) -class Workflow(GenericXML): +class Workflow(GenericXML): def __init__(self, infile=None, files=None): """ initialize an object @@ -25,15 +25,15 @@ def __init__(self, infile=None, files=None): GenericXML.__init__(self, infile, schema=schema) - #Append the contents of $HOME/.cime/config_workflow.xml if it exists - #This could cause problems if node matchs are repeated when only one is expected - infile = os.path.join(os.environ.get("HOME"),".cime","config_workflow.xml") + # Append the contents of $HOME/.cime/config_workflow.xml if it exists + # This could cause problems if node matchs are repeated when only one is expected + infile = os.path.join(os.environ.get("HOME"), ".cime", "config_workflow.xml") if os.path.exists(infile): GenericXML.read(self, infile) - def get_workflow_jobs(self, machine, workflow_case="default"): + def get_workflow_jobs(self, machine, workflowid="default"): """ - Return a list of jobs with the first element the name of the case script + Return a list of jobs with the first element the name of the script and the second a dict of qualifiers for the job """ jobs = [] @@ -41,8 +41,13 @@ def get_workflow_jobs(self, machine, workflow_case="default"): findmore = True prepend = False while findmore: - bnode = self.get_optional_child("workflow_jobs", attributes={"case":workflow_case}) - expect(bnode,"No workflow_case {} found in file {}".format(workflow_case, self.filename)) + bnode = self.get_optional_child( + "workflow_jobs", attributes={"id": workflowid} + ) + expect( + bnode, + "No workflow {} found in file {}".format(workflowid, self.filename), + ) if prepend: bnodes = [bnode] + bnodes else: @@ -50,10 +55,10 @@ def get_workflow_jobs(self, machine, workflow_case="default"): prepend = False workflow_attribs = self.attrib(bnode) if "prepend" in workflow_attribs: - workflow_case = workflow_attribs["prepend"] + workflowid = workflow_attribs["prepend"] prepend = True elif "append" in workflow_attribs: - workflow_case = workflow_attribs["append"] + workflowid = workflow_attribs["append"] else: findmore = False for bnode in bnodes: @@ -64,7 +69,7 @@ def get_workflow_jobs(self, machine, workflow_case="default"): for child in self.get_children(root=jnode): if self.name(child) == "runtime_parameters": attrib = self.attrib(child) - if attrib and attrib == {'MACH' : machine}: + if attrib and attrib == {"MACH": machine}: for rtchild in self.get_children(root=child): jdict[self.name(rtchild)] = self.text(rtchild) elif not attrib: diff --git a/scripts/lib/CIME/__init__.py b/CIME/__init__.py similarity index 100% rename from scripts/lib/CIME/__init__.py rename to CIME/__init__.py diff --git a/CIME/aprun.py b/CIME/aprun.py new file mode 100755 index 00000000000..ca767c88db8 --- /dev/null +++ b/CIME/aprun.py @@ -0,0 +1,194 @@ +""" +Aprun is far too complex to handle purely through XML. We need python +code to compute and assemble aprun commands. +""" + +from CIME.XML.standard_module_setup import * + +import math + +logger = logging.getLogger(__name__) + +############################################################################### +def _get_aprun_cmd_for_case_impl( + ntasks, + nthreads, + rootpes, + pstrids, + max_tasks_per_node, + max_mpitasks_per_node, + pio_numtasks, + pio_async_interface, + compiler, + machine, + run_exe, + extra_args, +): + ############################################################################### + """ + No one really understands this code, but we can at least test it. + + >>> ntasks = [512, 675, 168, 512, 128, 168, 168, 512, 1] + >>> nthreads = [2, 2, 2, 2, 4, 2, 2, 2, 1] + >>> rootpes = [0, 0, 512, 0, 680, 512, 512, 0, 0] + >>> pstrids = [1, 1, 1, 1, 1, 1, 1, 1, 1] + >>> max_tasks_per_node = 16 + >>> max_mpitasks_per_node = 16 + >>> pio_numtasks = -1 + >>> pio_async_interface = False + >>> compiler = "pgi" + >>> machine = "titan" + >>> run_exe = "e3sm.exe" + >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, max_mpitasks_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe, None) + (' -S 4 -n 680 -N 8 -d 2 e3sm.exe : -S 2 -n 128 -N 4 -d 4 e3sm.exe ', 117, 808, 4, 4) + >>> compiler = "intel" + >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, max_mpitasks_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe, None) + (' -S 4 -cc numa_node -n 680 -N 8 -d 2 e3sm.exe : -S 2 -cc numa_node -n 128 -N 4 -d 4 e3sm.exe ', 117, 808, 4, 4) + + >>> ntasks = [64, 64, 64, 64, 64, 64, 64, 64, 1] + >>> nthreads = [1, 1, 1, 1, 1, 1, 1, 1, 1] + >>> rootpes = [0, 0, 0, 0, 0, 0, 0, 0, 0] + >>> pstrids = [1, 1, 1, 1, 1, 1, 1, 1, 1] + >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, max_mpitasks_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe, None) + (' -S 8 -cc numa_node -n 64 -N 16 -d 1 e3sm.exe ', 4, 64, 16, 1) + """ + if extra_args is None: + extra_args = {} + + max_tasks_per_node = 1 if max_tasks_per_node < 1 else max_tasks_per_node + + total_tasks = 0 + for ntask, rootpe, pstrid in zip(ntasks, rootpes, pstrids): + tt = rootpe + (ntask - 1) * pstrid + 1 + total_tasks = max(tt, total_tasks) + + # Check if we need to add pio's tasks to the total task count + if pio_async_interface: + total_tasks += pio_numtasks if pio_numtasks > 0 else max_mpitasks_per_node + + # Compute max threads for each mpi task + maxt = [0] * total_tasks + for ntask, nthrd, rootpe, pstrid in zip(ntasks, nthreads, rootpes, pstrids): + c2 = 0 + while c2 < ntask: + s = rootpe + c2 * pstrid + if nthrd > maxt[s]: + maxt[s] = nthrd + + c2 += 1 + + # make sure all maxt values at least 1 + for c1 in range(0, total_tasks): + if maxt[c1] < 1: + maxt[c1] = 1 + + global_flags = " ".join( + [x for x, y in extra_args.items() if y["position"] == "global"] + ) + + per_flags = " ".join([x for x, y in extra_args.items() if y["position"] == "per"]) + + # Compute task and thread settings for batch commands + ( + tasks_per_node, + min_tasks_per_node, + task_count, + thread_count, + max_thread_count, + total_node_count, + total_task_count, + aprun_args, + ) = (0, max_mpitasks_per_node, 1, maxt[0], maxt[0], 0, 0, f" {global_flags}") + c1list = list(range(1, total_tasks)) + c1list.append(None) + for c1 in c1list: + if c1 is None or maxt[c1] != thread_count: + tasks_per_node = min( + max_mpitasks_per_node, int(max_tasks_per_node / thread_count) + ) + + tasks_per_node = min(task_count, tasks_per_node) + + # Compute for every subset + task_per_numa = int(math.ceil(tasks_per_node / 2.0)) + # Option for Titan + if machine == "titan" and tasks_per_node > 1: + aprun_args += " -S {:d}".format(task_per_numa) + if compiler == "intel": + aprun_args += " -cc numa_node" + + aprun_args += " -n {:d} -N {:d} -d {:d} {} {} {}".format( + task_count, + tasks_per_node, + thread_count, + per_flags, + run_exe, + "" if c1 is None else ":", + ) + + node_count = int(math.ceil(float(task_count) / tasks_per_node)) + total_node_count += node_count + total_task_count += task_count + + if tasks_per_node < min_tasks_per_node: + min_tasks_per_node = tasks_per_node + + if c1 is not None: + thread_count = maxt[c1] + max_thread_count = max(max_thread_count, maxt[c1]) + task_count = 1 + + else: + task_count += 1 + + return ( + aprun_args, + total_node_count, + total_task_count, + min_tasks_per_node, + max_thread_count, + ) + + +############################################################################### +def get_aprun_cmd_for_case(case, run_exe, overrides=None, extra_args=None): + ############################################################################### + """ + Given a case, construct and return the aprun command and optimized node count + """ + models = case.get_values("COMP_CLASSES") + ntasks, nthreads, rootpes, pstrids = [], [], [], [] + for model in models: + model = "CPL" if model == "DRV" else model + for the_list, item_name in zip( + [ntasks, nthreads, rootpes, pstrids], + ["NTASKS", "NTHRDS", "ROOTPE", "PSTRID"], + ): + the_list.append(case.get_value("_".join([item_name, model]))) + max_tasks_per_node = case.get_value("MAX_TASKS_PER_NODE") + if overrides: + overrides = { + x: y if isinstance(y, int) or y is None else int(y) + for x, y in overrides.items() + } + if "max_tasks_per_node" in overrides: + max_tasks_per_node = overrides["max_tasks_per_node"] + if "total_tasks" in overrides: + ntasks = [overrides["total_tasks"] if x > 1 else x for x in ntasks] + if "thread_count" in overrides: + nthreads = [overrides["thread_count"] if x > 1 else x for x in nthreads] + + return _get_aprun_cmd_for_case_impl( + ntasks, + nthreads, + rootpes, + pstrids, + max_tasks_per_node, + case.get_value("MAX_MPITASKS_PER_NODE"), + case.get_value("PIO_NUMTASKS"), + case.get_value("PIO_ASYNC_INTERFACE"), + case.get_value("COMPILER"), + case.get_value("MACH"), + run_exe, + extra_args, + ) diff --git a/scripts/lib/CIME/tests/SystemTests/__init__.py b/CIME/baselines/__init__.py similarity index 100% rename from scripts/lib/CIME/tests/SystemTests/__init__.py rename to CIME/baselines/__init__.py diff --git a/CIME/baselines/performance.py b/CIME/baselines/performance.py new file mode 100644 index 00000000000..67c19dbd43f --- /dev/null +++ b/CIME/baselines/performance.py @@ -0,0 +1,612 @@ +import os +import glob +import re +import gzip +import logging +from CIME.config import Config +from CIME.utils import expect, get_src_root, get_current_commit, get_timestamp + +logger = logging.getLogger(__name__) + + +def perf_compare_throughput_baseline(case, baseline_dir=None): + """ + Compares model throughput. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline_dir : str + Overrides the baseline directory. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + Provides explanation from comparison. + """ + if baseline_dir is None: + baseline_dir = case.get_baseline_dir() + + config = load_coupler_customization(case) + + baseline_file = os.path.join(baseline_dir, "cpl-tput.log") + + baseline = read_baseline_file(baseline_file) + + tolerance = case.get_value("TEST_TPUT_TOLERANCE") + + if tolerance is None: + tolerance = 0.1 + + expect( + tolerance > 0.0, + "Bad value for throughput tolerance in test", + ) + + try: + below_tolerance, comment = config.perf_compare_throughput_baseline( + case, baseline, tolerance + ) + except AttributeError: + below_tolerance, comment = _perf_compare_throughput_baseline( + case, baseline, tolerance + ) + + return below_tolerance, comment + + +def perf_compare_memory_baseline(case, baseline_dir=None): + """ + Compares model highwater memory usage. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline_dir : str + Overrides the baseline directory. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + Provides explanation from comparison. + """ + if baseline_dir is None: + baseline_dir = case.get_baseline_dir() + + config = load_coupler_customization(case) + + baseline_file = os.path.join(baseline_dir, "cpl-mem.log") + + baseline = read_baseline_file(baseline_file) + + tolerance = case.get_value("TEST_MEMLEAK_TOLERANCE") + + if tolerance is None: + tolerance = 0.1 + + try: + below_tolerance, comments = config.perf_compare_memory_baseline( + case, baseline, tolerance + ) + except AttributeError: + below_tolerance, comments = _perf_compare_memory_baseline( + case, baseline, tolerance + ) + + return below_tolerance, comments + + +def perf_write_baseline(case, basegen_dir, throughput=True, memory=True): + """ + Writes the baseline performance files. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + basegen_dir : str + Path to baseline directory. + throughput : bool + If true, write throughput baseline. + memory : bool + If true, write memory baseline. + """ + config = load_coupler_customization(case) + + if throughput: + try: + tput, mode = perf_get_throughput(case, config) + except RuntimeError as e: + logger.debug("Could not get throughput: {0!s}".format(e)) + else: + baseline_file = os.path.join(basegen_dir, "cpl-tput.log") + + write_baseline_file(baseline_file, tput, mode) + + logger.info("Updated throughput baseline to {!s}".format(tput)) + + if memory: + try: + mem, mode = perf_get_memory(case, config) + except RuntimeError as e: + logger.info("Could not get memory usage: {0!s}".format(e)) + else: + baseline_file = os.path.join(basegen_dir, "cpl-mem.log") + + write_baseline_file(baseline_file, mem, mode) + + logger.info("Updated memory usage baseline to {!s}".format(mem)) + + +def load_coupler_customization(case): + """ + Loads customizations from the coupler `cime_config` directory. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + CIME.config.Config + Runtime configuration. + """ + comp_root_dir_cpl = case.get_value("COMP_ROOT_DIR_CPL") + + cpl_customize = os.path.join(comp_root_dir_cpl, "cime_config", "customize") + + return Config.load(cpl_customize) + + +def perf_get_throughput(case, config): + """ + Gets the model throughput. + + First attempts to use a coupler define method to retrieve the + models throughput. If this is not defined then the default + method of parsing the coupler log is used. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model throughput. + """ + try: + tput, mode = config.perf_get_throughput(case) + except AttributeError: + tput, mode = _perf_get_throughput(case) + + return tput, mode + + +def perf_get_memory(case, config): + """ + Gets the model memory usage. + + First attempts to use a coupler defined method to retrieve the + models memory usage. If this is not defined then the default + method of parsing the coupler log is used. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model memory usage. + """ + try: + mem, mode = config.perf_get_memory(case) + except AttributeError: + mem, mode = _perf_get_memory(case) + + return mem, mode + + +def write_baseline_file(baseline_file, value, mode="a"): + """ + Writes value to `baseline_file`. + + Parameters + ---------- + baseline_file : str + Path to the baseline file. + value : str + Value to write. + mode : str + Mode to open file with. + """ + with open(baseline_file, mode) as fd: + fd.write(value) + + +def _perf_get_memory(case, cpllog=None): + """ + Default function to retrieve memory usage from the coupler log. + + If the usage is not available from the log then `None` is returned. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + cpllog : str + Overrides the default coupler log. + + Returns + ------- + str or None + Model memory usage or `None`. + + Raises + ------ + RuntimeError + If not enough sample were found. + """ + memlist = perf_get_memory_list(case, cpllog) + + if memlist is None: + raise RuntimeError("Could not get default memory usage") from None + + value = _format_baseline(memlist[-1][1]) + + return value, "a" + + +def perf_get_memory_list(case, cpllog): + if cpllog is None: + cpllog = get_latest_cpl_logs(case) + else: + cpllog = [ + cpllog, + ] + + try: + memlist = get_cpl_mem_usage(cpllog[0]) + except (FileNotFoundError, IndexError): + memlist = None + + logger.debug("Could not parse memory usage from coupler log") + else: + if len(memlist) <= 3: + raise RuntimeError( + f"Found {len(memlist)} memory usage samples, need atleast 4" + ) + + return memlist + + +def _perf_get_throughput(case): + """ + Default function to retrieve throughput from the coupler log. + + If the throughput is not available from the log then `None` is returned. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model throughput or `None`. + """ + cpllog = get_latest_cpl_logs(case) + + try: + tput = get_cpl_throughput(cpllog[0]) + except (FileNotFoundError, IndexError): + tput = None + + logger.debug("Could not parse throughput from coupler log") + + if tput is None: + raise RuntimeError("Could not get default throughput") from None + + value = _format_baseline(tput) + + return value, "a" + + +def get_latest_cpl_logs(case): + """ + find and return the latest cpl log file in the run directory + """ + coupler_log_path = case.get_value("RUNDIR") + + cpllog_name = "med" if case.get_value("COMP_INTERFACE") == "nuopc" else "cpl" + + cpllogs = glob.glob(os.path.join(coupler_log_path, "{}*.log.*".format(cpllog_name))) + + lastcpllogs = [] + + if cpllogs: + lastcpllogs.append(max(cpllogs, key=os.path.getctime)) + + basename = os.path.basename(lastcpllogs[0]) + + suffix = basename.split(".", 1)[1] + + for log in cpllogs: + if log in lastcpllogs: + continue + + if log.endswith(suffix): + lastcpllogs.append(log) + + return lastcpllogs + + +def get_cpl_mem_usage(cpllog): + """ + Read memory usage from coupler log. + + Parameters + ---------- + cpllog : str + Path to the coupler log. + + Returns + ------- + list + Memory usage (data, highwater) as recorded by the coupler or empty list. + """ + memlist = [] + + meminfo = re.compile(r".*model date =\s+(\w+).*memory =\s+(\d+\.?\d+).*highwater") + + if cpllog is not None and os.path.isfile(cpllog): + if ".gz" == cpllog[-3:]: + fopen = gzip.open + else: + fopen = open + + with fopen(cpllog, "rb") as f: + for line in f: + m = meminfo.match(line.decode("utf-8")) + + if m: + memlist.append((float(m.group(1)), float(m.group(2)))) + + # Remove the last mem record, it's sometimes artificially high + if len(memlist) > 0: + memlist.pop() + + return memlist + + +def get_cpl_throughput(cpllog): + """ + Reads throuhgput from coupler log. + + Parameters + ---------- + cpllog : str + Path to the coupler log. + + Returns + ------- + int or None + Throughput as recorded by the coupler or None + """ + if cpllog is not None and os.path.isfile(cpllog): + with gzip.open(cpllog, "rb") as f: + cpltext = f.read().decode("utf-8") + + m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s", cpltext) + + if m: + return float(m.group(1)) + return None + + +def read_baseline_file(baseline_file): + """ + Reads value from `baseline_file`. + + Strips comments and returns the raw content to be decoded. + + Parameters + ---------- + baseline_file : str + Path to the baseline file. + + Returns + ------- + str + Value stored in baseline file without comments. + """ + if not os.path.exists(baseline_file): + return "\nNO file {} found".format(baseline_file) + with open(baseline_file) as fd: + lines = [x.strip() for x in fd.readlines() if not x.startswith("#") and x != ""] + + return "\n".join(lines) + + +def _perf_compare_throughput_baseline(case, baseline, tolerance): + """ + Default throughput baseline comparison. + + Compares the throughput from the coupler to the baseline value. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : list + Lines contained in the baseline file. + tolerance : float + Allowed tolerance for comparison. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + provides explanation from comparison. + """ + current, _ = _perf_get_throughput(case) + + try: + current = float(_parse_baseline(current)) + except (ValueError, TypeError): + comment = "Could not compare throughput to baseline, as baseline had no value." + + return None, comment + + try: + # default baseline is stored as single float + baseline = float(_parse_baseline(baseline)) + except (ValueError, TypeError): + comment = "Could not compare throughput to baseline, as baseline had no value." + + return None, comment + + # comparing ypd so bigger is better + diff = (baseline - current) / baseline + + below_tolerance = None + + if diff is not None: + below_tolerance = diff < tolerance + + info = "Throughput changed by {:.2f}%: baseline={:.3f} sypd, tolerance={:d}%, current={:.3f} sypd".format( + diff * 100, baseline, int(tolerance * 100), current + ) + if below_tolerance: + comment = "TPUTCOMP: " + info + else: + comment = "Error: TPUTCOMP: " + info + + return below_tolerance, comment + + +def _perf_compare_memory_baseline(case, baseline, tolerance): + """ + Default memory usage baseline comparison. + + Compares the highwater memory usage from the coupler to the baseline value. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : list + Lines contained in the baseline file. + tolerance : float + Allowed tolerance for comparison. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + provides explanation from comparison. + """ + try: + current, _ = _perf_get_memory(case) + except RuntimeError as e: + return None, str(e) + + try: + current = float(_parse_baseline(current)) + except (ValueError, TypeError): + comment = "Could not compare throughput to baseline, as baseline had no value." + + return None, comment + + try: + # default baseline is stored as single float + baseline = float(_parse_baseline(baseline)) + except (ValueError, TypeError): + baseline = 0.0 + + try: + diff = (current - baseline) / baseline + except ZeroDivisionError: + diff = 0.0 + + # Should we check if tolerance is above 0 + below_tolerance = None + comment = "" + + if diff is not None: + below_tolerance = diff < tolerance + + info = "Memory usage highwater changed by {:.2f}%: baseline={:.3f} MB, tolerance={:d}%, current={:.3f} MB".format( + diff * 100, baseline, int(tolerance * 100), current + ) + if below_tolerance: + comment = "MEMCOMP: " + info + else: + comment = "Error: MEMCOMP: " + info + + return below_tolerance, comment + + +def _format_baseline(value): + """ + Encodes value with default baseline format. + + Default format: + sha: date: + + Parameters + ---------- + value : str + Baseline value to encode. + + Returns + ------- + value : str + Baseline entry. + """ + commit_hash = get_current_commit(repo=get_src_root()) + + timestamp = get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S") + + return f"sha:{commit_hash} date:{timestamp} {value}\n" + + +def _parse_baseline(data): + """ + Parses default baseline format. + + Default format: + sha: date: + + Parameters + ---------- + data : str + Containing contents of baseline file. + + Returns + ------- + value : str + Value of the latest blessed baseline. + """ + lines = data.split("\n") + lines = [x for x in lines if x != ""] + + try: + value = lines[-1].strip().split(" ")[-1] + except IndexError: + value = None + + return value diff --git a/CIME/bless_test_results.py b/CIME/bless_test_results.py new file mode 100644 index 00000000000..e7e601b682e --- /dev/null +++ b/CIME/bless_test_results.py @@ -0,0 +1,545 @@ +import CIME.compare_namelists, CIME.simple_compare +from CIME.test_scheduler import NAMELIST_PHASE +from CIME.utils import ( + run_cmd, + get_scripts_root, + EnvironmentContext, + parse_test_name, + match_any, +) +from CIME.config import Config +from CIME.test_status import * +from CIME.hist_utils import generate_baseline, compare_baseline, NO_ORIGINAL +from CIME.case import Case +from CIME.test_utils import get_test_status_files +from CIME.baselines.performance import ( + perf_compare_throughput_baseline, + perf_compare_memory_baseline, + perf_write_baseline, +) +import os, time + +logger = logging.getLogger(__name__) + + +def _bless_throughput( + case, + test_name, + baseline_root, + baseline_name, + report_only, + force, +): + success = True + reason = None + below_threshold = False + + baseline_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) + + try: + below_threshold, comment = perf_compare_throughput_baseline( + case, baseline_dir=baseline_dir + ) + except FileNotFoundError as e: + comment = f"Could not read throughput file: {e!s}" + except Exception as e: + comment = f"Error comparing throughput baseline: {e!s}" + + if below_threshold: + logger.info("Throughput diff appears to have been already resolved.") + else: + logger.info(comment) + + if not report_only and ( + force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"] + ): + try: + perf_write_baseline(case, baseline_dir, memory=False) + except Exception as e: + success = False + + reason = f"Failed to write baseline throughput for {test_name!r}: {e!s}" + + return success, reason + + +def _bless_memory( + case, + test_name, + baseline_root, + baseline_name, + report_only, + force, +): + success = True + reason = None + below_threshold = False + + baseline_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) + + try: + below_threshold, comment = perf_compare_memory_baseline( + case, baseline_dir=baseline_dir + ) + except FileNotFoundError as e: + comment = f"Could not read memory usage file: {e!s}" + except Exception as e: + comment = f"Error comparing memory baseline: {e!s}" + + if below_threshold: + logger.info("Memory usage diff appears to have been already resolved.") + else: + logger.info(comment) + + if not report_only and ( + force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"] + ): + try: + perf_write_baseline(case, baseline_dir, throughput=False) + except Exception as e: + success = False + + reason = f"Failed to write baseline memory usage for test {test_name!r}: {e!s}" + + return success, reason + + +############################################################################### +def bless_namelists( + test_name, + report_only, + force, + pes_file, + baseline_name, + baseline_root, + new_test_root=None, + new_test_id=None, +): + ############################################################################### + # Be aware that restart test will overwrite the original namelist files + # with versions of the files that should not be blessed. This forces us to + # re-run create_test. + + # Update namelist files + logger.info("Test '{}' had namelist diff".format(test_name)) + if not report_only and ( + force or input("Update namelists (y/n)? ").upper() in ["Y", "YES"] + ): + config = Config.instance() + + create_test_gen_args = ( + " -g {} ".format(baseline_name) + if config.create_test_flag_mode == "cesm" + else " -g -b {} ".format(baseline_name) + ) + + if new_test_root is not None: + create_test_gen_args += " --test-root={0} --output-root={0} ".format( + new_test_root + ) + if new_test_id is not None: + create_test_gen_args += " -t {}".format(new_test_id) + + if pes_file is not None: + create_test_gen_args += " --pesfile {}".format(pes_file) + + stat, out, _ = run_cmd( + "{}/create_test {} --namelists-only {} --baseline-root {} -o".format( + get_scripts_root(), test_name, create_test_gen_args, baseline_root + ), + combine_output=True, + ) + if stat != 0: + return False, "Namelist regen failed: '{}'".format(out) + else: + return True, None + else: + return True, None + + +def bless_history(test_name, case, baseline_name, baseline_root, report_only, force): + real_user = case.get_value("REALUSER") + with EnvironmentContext(USER=real_user): + + baseline_full_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) + + cmp_result, cmp_comments = compare_baseline( + case, baseline_dir=baseline_full_dir, outfile_suffix=None + ) + if cmp_result: + logger.info("Diff appears to have been already resolved.") + return True, None + else: + logger.info(cmp_comments) + if not report_only and ( + force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"] + ): + # Sometimes, in order to get things passing, files have to be removed + # from the baseline area. + for line in cmp_comments.splitlines(): + if NO_ORIGINAL in line: + file_to_remove = ( + line.split(NO_ORIGINAL)[0].split()[-1].strip("'") + ) + logger.info( + "Removing stale baseline file {}".format(file_to_remove) + ) + os.remove(os.path.join(baseline_full_dir, file_to_remove)) + + gen_result, gen_comments = generate_baseline( + case, baseline_dir=baseline_full_dir + ) + if not gen_result: + logger.warning( + "Hist file bless FAILED for test {}".format(test_name) + ) + return False, "Generate baseline failed: {}".format(gen_comments) + else: + logger.info(gen_comments) + return True, None + else: + return True, None + + +def bless_test_results( + baseline_name, + baseline_root, + test_root, + compiler, + test_id=None, + namelists_only=False, + hist_only=False, + report_only=False, + force=False, + pes_file=None, + bless_tests=None, + no_skip_pass=False, + lock_baselines=False, + new_test_root=None, + new_test_id=None, + exclude=None, + bless_tput=False, + bless_mem=False, + bless_perf=False, + **_, # Capture all for extra +): + if bless_perf: + bless_mem = True + bless_tput = True + + bless_all_non_perf = not (namelists_only | hist_only | bless_tput | bless_mem) + is_perf_bless = bless_mem or bless_tput + + expect( + not (is_perf_bless and hist_only) and not (is_perf_bless and namelists_only), + "Do not mix performance and non-performance blesses", + ) + + test_status_files = get_test_status_files(test_root, compiler, test_id=test_id) + + # auto-adjust test-id if multiple rounds of tests were matched + timestamps = set() + for test_status_file in test_status_files: + timestamp = os.path.basename(os.path.dirname(test_status_file)).split(".")[-1] + timestamps.add(timestamp) + + if len(timestamps) > 1: + logger.warning( + "Multiple sets of tests were matched! Selected only most recent tests." + ) + + most_recent = sorted(timestamps)[-1] + logger.info("Matched test batch is {}".format(most_recent)) + + bless_tests_counts = [] + if bless_tests: + bless_tests_counts = dict([(bless_test, 0) for bless_test in bless_tests]) + + # compile excludes into single regex + if exclude is not None: + exclude = re.compile("|".join([f"({x})" for x in exclude])) + + broken_blesses = [] + for test_status_file in test_status_files: + if not most_recent in test_status_file: + logger.info("Skipping {}".format(test_status_file)) + continue + + test_dir = os.path.dirname(test_status_file) + ts = TestStatus(test_dir=test_dir) + test_name = ts.get_name() + testopts = parse_test_name(test_name)[1] + testopts = [] if testopts is None else testopts + build_only = "B" in testopts + # TODO test_name will never be None otherwise `parse_test_name` would raise an error + if test_name is None: + case_dir = os.path.basename(test_dir) + test_name = CIME.utils.normalize_case_id(case_dir) + if not bless_tests or match_any(test_name, bless_tests_counts): + broken_blesses.append( + ( + "unknown", + "test had invalid TestStatus file: '{}'".format( + test_status_file + ), + ) + ) + continue + else: + continue + + # Must pass tests to continue + has_no_tests = bless_tests in [[], None] + match_test_name = match_any(test_name, bless_tests_counts) + excluded = exclude.match(test_name) if exclude else False + + if (not has_no_tests and not match_test_name) or excluded: + logger.debug("Skipping {!r}".format(test_name)) + + continue + + overall_result, phase = ts.get_overall_test_status( + ignore_namelists=True, + ignore_memleak=True, + ignore_diffs=is_perf_bless, + check_throughput=bless_tput, + check_memory=bless_mem, + ) + + # See if we need to bless namelist + if namelists_only or bless_all_non_perf: + if no_skip_pass: + nl_bless = True + else: + nl_bless = ts.get_status(NAMELIST_PHASE) != TEST_PASS_STATUS + else: + nl_bless = False + + hist_bless, tput_bless, mem_bless = [False] * 3 + + # Skip if test is build only i.e. testopts contains "B" + if not build_only: + hist_bless = is_hist_bless_needed( + test_name, ts, broken_blesses, overall_result, no_skip_pass, phase + ) and (hist_only or bless_all_non_perf) + tput_bless = ( + bless_tput and ts.get_status(THROUGHPUT_PHASE) != TEST_PASS_STATUS + ) + mem_bless = bless_mem and ts.get_status(MEMCOMP_PHASE) != TEST_PASS_STATUS + + expect( + not ((nl_bless or hist_bless) and (tput_bless or mem_bless)), + "Do not mix performance and non-performance blessing", + ) + + # Now, do the bless + if not nl_bless and not hist_bless and not tput_bless and not mem_bless: + logger.info( + "Nothing to bless for test: {}, overall status: {}".format( + test_name, overall_result + ) + ) + else: + logger.debug("Determined blesses for {!r}".format(test_name)) + logger.debug("nl_bless = {}".format(nl_bless)) + logger.debug("hist_bless = {}".format(hist_bless)) + logger.debug("tput_bless = {}".format(tput_bless)) + logger.debug("mem_bless = {}".format(mem_bless)) + + logger.info( + "###############################################################################" + ) + logger.info( + "Blessing results for test: {}, most recent result: {}".format( + test_name, overall_result + ) + ) + logger.info("Case dir: {}".format(test_dir)) + logger.info( + "###############################################################################" + ) + if not force: + time.sleep(2) + + with Case(test_dir) as case: + # Resolve baseline_name and baseline_root + if baseline_name is None: + baseline_name_resolved = case.get_value("BASELINE_NAME_CMP") + if not baseline_name_resolved: + cime_root = CIME.utils.get_cime_root() + baseline_name_resolved = CIME.utils.get_current_branch( + repo=cime_root + ) + else: + baseline_name_resolved = baseline_name + + if baseline_root is None: + baseline_root_resolved = case.get_value("BASELINE_ROOT") + else: + baseline_root_resolved = baseline_root + + if baseline_name_resolved is None: + broken_blesses.append( + (test_name, "Could not determine baseline name") + ) + continue + + if baseline_root_resolved is None: + broken_blesses.append( + (test_name, "Could not determine baseline root") + ) + continue + + # Bless namelists + if nl_bless: + success, reason = bless_namelists( + test_name, + report_only, + force, + pes_file, + baseline_name_resolved, + baseline_root_resolved, + new_test_root=new_test_root, + new_test_id=new_test_id, + ) + if not success: + broken_blesses.append((test_name, reason)) + + # Bless hist files + if hist_bless: + if "HOMME" in test_name: + success = False + reason = "HOMME tests cannot be blessed with bless_for_tests" + else: + success, reason = bless_history( + test_name, + case, + baseline_name_resolved, + baseline_root_resolved, + report_only, + force, + ) + + if not success: + broken_blesses.append((test_name, reason)) + + if tput_bless: + success, reason = _bless_throughput( + case, + test_name, + baseline_root_resolved, + baseline_name_resolved, + report_only, + force, + ) + + if not success: + broken_blesses.append((test_name, reason)) + + if mem_bless: + success, reason = _bless_memory( + case, + test_name, + baseline_root_resolved, + baseline_name_resolved, + report_only, + force, + ) + + if not success: + broken_blesses.append((test_name, reason)) + + if lock_baselines: + baseline_full_dir = os.path.join( + baseline_root_resolved, + baseline_name_resolved, + case.get_value("CASEBASEID"), + ) + stat, out, _ = run_cmd( + f"chmod -R g-w {baseline_full_dir}", combine_output=True + ) + if stat != 0: + msg = ( + f"Failed to lock baselines for {baseline_full_dir}:\n{out}" + ) + logger.warning(msg) + + # Emit a warning if items in bless_tests did not match anything + if bless_tests: + for bless_test, bless_count in bless_tests_counts.items(): + if bless_count == 0: + logger.warning( + """ +bless test arg '{}' did not match any tests in test_root {} with +compiler {} and test_id {}. It's possible that one of these arguments +had a mistake (likely compiler or testid).""".format( + bless_test, test_root, compiler, test_id + ) + ) + + # Make sure user knows that some tests were not blessed + success = True + for broken_bless, reason in broken_blesses: + logger.warning( + "FAILED TO BLESS TEST: {}, reason {}".format(broken_bless, reason) + ) + success = False + + return success + + +def is_hist_bless_needed( + test_name, ts, broken_blesses, overall_result, no_skip_pass, phase +): + needed = False + + run_result = ts.get_status(RUN_PHASE) + + if run_result is None: + broken_blesses.append((test_name, "no run phase")) + logger.warning("Test '{}' did not make it to run phase".format(test_name)) + needed = False + elif run_result != TEST_PASS_STATUS: + broken_blesses.append((test_name, "run phase did not pass")) + logger.warning( + "Test '{}' run phase did not pass, not safe to bless, test status = {}".format( + test_name, ts.phase_statuses_dump() + ) + ) + needed = False + elif overall_result == TEST_FAIL_STATUS: + # Sometimes a test might fail only during the generate phase; e.g., if the user doesn't have + # write permissions in the baseline directory. We still want to bless those tests. + only_failed_generate = False + if ts.get_status(GENERATE_PHASE) == TEST_FAIL_STATUS: + only_failed_generate = True + for p in ALL_PHASES: + if p == GENERATE_PHASE: + continue + phase_result = ts.get_status(p) + if phase_result is TEST_FAIL_STATUS: + only_failed_generate = False + break + if only_failed_generate: + needed = True + else: + broken_blesses.append((test_name, "test did not pass")) + logger.warning( + "Test '{}' did not pass due to phase {}, not safe to bless, test status = {}".format( + test_name, phase, ts.phase_statuses_dump() + ) + ) + needed = False + + elif no_skip_pass: + needed = True + else: + needed = ts.get_status(BASELINE_PHASE) != TEST_PASS_STATUS + + return needed diff --git a/CIME/build.py b/CIME/build.py new file mode 100644 index 00000000000..7c9f5b205b4 --- /dev/null +++ b/CIME/build.py @@ -0,0 +1,1334 @@ +""" +functions for building CIME models +""" + +import glob, shutil, time, threading, subprocess +from pathlib import Path +from CIME.XML.standard_module_setup import * +from CIME.status import run_and_log_case_status +from CIME.utils import ( + get_model, + analyze_build_log, + stringify_bool, + get_timestamp, + run_sub_or_cmd, + run_cmd, + get_batch_script_for_job, + gzip_existing_file, + safe_copy, + is_python_executable, + get_logging_options, + import_from_file, +) +from CIME.config import Config +from CIME.locked_files import lock_file, unlock_file, check_lockedfiles +from CIME.XML.files import Files + +logger = logging.getLogger(__name__) + +config = Config.instance() + +_CMD_ARGS_FOR_BUILD = ( + "CASEROOT", + "CASETOOLS", + "CIMEROOT", + "SRCROOT", + "COMP_INTERFACE", + "COMPILER", + "DEBUG", + "EXEROOT", + "RUNDIR", + "INCROOT", + "LIBROOT", + "MACH", + "MPILIB", + "NINST_VALUE", + "OS", + "PIO_VERSION", + "SHAREDLIBROOT", + "BUILD_THREADED", + "USE_ESMF_LIB", + "USE_MOAB", + "CAM_CONFIG_OPTS", + "COMP_ATM", + "COMP_ICE", + "COMP_GLC", + "COMP_LND", + "COMP_OCN", + "COMP_ROF", + "COMP_WAV", + "COMPARE_TO_NUOPC", + "HOMME_TARGET", + "OCN_SUBMODEL", + "CISM_USE_TRILINOS", + "USE_TRILINOS", + "USE_ALBANY", + "USE_PETSC", + "USE_FTORCH", + "TORCH_DIR", +) + + +class CmakeTmpBuildDir(object): + """ + Use to create a temporary cmake build dir for the purposes of querying + Macros. + """ + + def __init__(self, macroloc=None, rootdir=None, tmpdir=None): + """ + macroloc: The dir containing the cmake macros, default is pwd. This can be a case or CMAKE_MACROS_DIR + rootdir: The dir containing the tmpdir, default is macroloc + tmpdir: The name of the tempdir, default is "cmaketmp" + """ + self._macroloc = os.getcwd() if macroloc is None else macroloc + self._rootdir = self._macroloc if rootdir is None else rootdir + self._tmpdir = "cmaketmp" if tmpdir is None else tmpdir + + self._entered = False + + def get_full_tmpdir(self): + return os.path.join(self._rootdir, self._tmpdir) + + def __enter__(self): + cmake_macros_dir = os.path.join(self._macroloc, "cmake_macros") + expect( + os.path.isdir(cmake_macros_dir), + "Cannot create cmake temp build dir, no {} macros found".format( + cmake_macros_dir + ), + ) + cmake_lists = os.path.join(cmake_macros_dir, "CMakeLists.txt") + full_tmp_dir = self.get_full_tmpdir() + Path(full_tmp_dir).mkdir(parents=False, exist_ok=True) + safe_copy(cmake_lists, full_tmp_dir) + + self._entered = True + + return self + + def __exit__(self, *args): + shutil.rmtree(self.get_full_tmpdir()) + self._entered = False + + def get_makefile_vars(self, case=None, comp=None, cmake_args=None): + """ + Run cmake and process output to a list of variable settings + + case can be None if caller is providing their own cmake args + """ + expect( + self._entered, "Should only call get_makefile_vars within a with statement" + ) + if case is None: + expect( + cmake_args is not None, + "Need either a case or hardcoded cmake_args to generate makefile vars", + ) + + cmake_args = ( + get_standard_cmake_args(case, "DO_NOT_USE") + if cmake_args is None + else cmake_args + ) + dcomp = "-DCOMP_NAME={}".format(comp) if comp else "" + output = run_cmd_no_fail( + "cmake -DCONVERT_TO_MAKE=ON {dcomp} {cmake_args} .".format( + dcomp=dcomp, cmake_args=cmake_args + ), + combine_output=True, + from_dir=self.get_full_tmpdir(), + ) + + lines_to_keep = [] + for line in output.splitlines(): + if "CIME_SET_MAKEFILE_VAR" in line and "BUILD_INTERNAL_IGNORE" not in line: + lines_to_keep.append(line) + + output_to_keep = "\n".join(lines_to_keep) + "\n" + output_to_keep = ( + output_to_keep.replace("CIME_SET_MAKEFILE_VAR ", "") + .replace("CPPDEFS := ", "CPPDEFS := $(CPPDEFS) ") + .replace("SLIBS := ", "SLIBS := $(SLIBS) ") + + "\n" + ) + + return output_to_keep + + +def generate_makefile_macro(case, caseroot): + """ + Generates a flat Makefile macro file based on the CMake cache system. + This macro is only used by certain sharedlibs since components use CMake. + Since indirection based on comp_name is allowed for sharedlibs, each sharedlib must generate + their own macro. + """ + with CmakeTmpBuildDir(macroloc=caseroot) as cmake_tmp: + # Append CMakeLists.txt with compset specific stuff + comps = _get_compset_comps(case) + comps.extend( + [ + "mct", + "pio{}".format(case.get_value("PIO_VERSION")), + "gptl", + "csm_share", + "csm_share_cpl7", + "mpi-serial", + ] + ) + cmake_macro = os.path.join(caseroot, "Macros.cmake") + expect( + os.path.exists(cmake_macro), + "Cannot generate Makefile macro without {}".format(cmake_macro), + ) + + # run once with no COMP_NAME + no_comp_output = cmake_tmp.get_makefile_vars(case=case) + all_output = no_comp_output + no_comp_lines = no_comp_output.splitlines() + + for comp in comps: + comp_output = cmake_tmp.get_makefile_vars(case=case, comp=comp) + # The Tools/Makefile may have already adding things to CPPDEFS and SLIBS + comp_lines = comp_output.splitlines() + first = True + for comp_line in comp_lines: + if comp_line not in no_comp_lines: + if first: + all_output += 'ifeq "$(COMP_NAME)" "{}"\n'.format(comp) + first = False + + all_output += " " + comp_line + "\n" + + if not first: + all_output += "endif\n" + + with open(os.path.join(caseroot, "Macros.make"), "w") as fd: + fd.write( + """ +# This file is auto-generated, do not edit. If you want to change +# sharedlib flags, you can edit the cmake_macros in this case. You +# can change flags for specific sharedlibs only by checking COMP_NAME. + +""" + ) + fd.write(all_output) + + +# pylint:disable=unused-argument +def get_standard_makefile_args(case, shared_lib=False): + make_args = "CIME_MODEL={} ".format(case.get_value("MODEL")) + make_args += " SMP={} ".format(stringify_bool(case.get_build_threaded())) + for var in _CMD_ARGS_FOR_BUILD: + make_args += xml_to_make_variable(case, var) + + return make_args + + +def _get_compset_comps(case): + comps = [] + driver = case.get_value("COMP_INTERFACE") + for comp_class in case.get_values("COMP_CLASSES"): + comp = case.get_value("COMP_{}".format(comp_class)) + if comp == "cpl": + comp = "driver" + if comp == "s{}".format(comp_class.lower()) and driver == "nuopc": + comp = "" + else: + comps.append(comp) + return comps + + +def get_standard_cmake_args(case, sharedpath): + cmake_args = "-DCIME_MODEL={} ".format(case.get_value("MODEL")) + cmake_args += "-DSRC_ROOT={} ".format(case.get_value("SRCROOT")) + cmake_args += " -Dcompile_threaded={} ".format( + stringify_bool(case.get_build_threaded()) + ) + # check settings for GPU + gpu_type = case.get_value("GPU_TYPE") + openacc_gpu_offload = case.get_value("OPENACC_GPU_OFFLOAD") + openmp_gpu_offload = case.get_value("OPENMP_GPU_OFFLOAD") + kokkos_gpu_offload = case.get_value("KOKKOS_GPU_OFFLOAD") + cmake_args += f" -DGPU_TYPE={gpu_type} -DOPENACC_GPU_OFFLOAD={openacc_gpu_offload} -DOPENMP_GPU_OFFLOAD={openmp_gpu_offload} -DKOKKOS_GPU_OFFLOAD={kokkos_gpu_offload} " + + ocn_model = case.get_value("COMP_OCN") + atm_dycore = case.get_value("CAM_DYCORE") + if ocn_model == "mom" or (atm_dycore and atm_dycore == "fv3"): + cmake_args += " -DUSE_FMS=TRUE " + + cmake_args += " -DINSTALL_SHAREDPATH={} ".format( + os.path.join(case.get_value("EXEROOT"), sharedpath) + ) + + # if sharedlibs are common to entire suite, they cannot be customized + # per case/compset + if not config.common_sharedlibroot: + cmake_args += " -DUSE_KOKKOS={} ".format(stringify_bool(uses_kokkos(case))) + comps = _get_compset_comps(case) + cmake_args += " -DCOMP_NAMES='{}' ".format(";".join(comps)) + + for var in _CMD_ARGS_FOR_BUILD: + cmake_args += xml_to_make_variable(case, var, cmake=True) + + atm_model = case.get_value("COMP_ATM") + if atm_model == "scream": + cmake_args += xml_to_make_variable(case, "HOMME_TARGET", cmake=True) + + # Disable compiler checks + cmake_args += " -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER_WORKS=1 -DCMAKE_Fortran_COMPILER_WORKS=1" + + return cmake_args + + +def xml_to_make_variable(case, varname, cmake=False): + varvalue = case.get_value(varname) + if varvalue is None: + return "" + if isinstance(varvalue, bool): + varvalue = stringify_bool(varvalue) + elif isinstance(varvalue, str): + # assure that paths passed to make do not end in / or contain // + varvalue = varvalue.replace("//", "/") + if varvalue.endswith("/"): + varvalue = varvalue[:-1] + if cmake or isinstance(varvalue, str): + return '{}{}="{}" '.format("-D" if cmake else "", varname, varvalue) + else: + return "{}={} ".format(varname, varvalue) + + +############################################################################### +def uses_kokkos(case): + ############################################################################### + cam_target = case.get_value("CAM_TARGET") + # atm_comp = case.get_value("COMP_ATM") # scream does not use the shared kokkoslib for now + + return config.use_kokkos and cam_target in ( + "preqx_kokkos", + "theta-l", + "theta-l_kokkos", + ) + + +############################################################################### +def _build_model( + build_threaded, + exeroot, + incroot, + complist, + lid, + caseroot, + cimeroot, + compiler, + buildlist, + comp_interface, +): + ############################################################################### + logs = [] + thread_bad_results = [] + libroot = os.path.join(exeroot, "lib") + bldroot = None + bld_threads = [] + for model, comp, nthrds, _, config_dir in complist: + if buildlist is not None and model.lower() not in buildlist: + continue + + # aquap has a dependency on atm so we will build it after the threaded loop + if comp == "aquap": + logger.debug("Skip aquap ocn build here") + continue + + # coupler handled seperately + if model == "cpl": + continue + + # special case for clm + # clm 4_5 and newer is a shared (as in sharedlibs, shared by all tests) library + # (but not in E3SM) and should be built in build_libraries + if config.shared_clm_component and comp == "clm": + continue + else: + logger.info(" - Building {} Library ".format(model)) + + smp = nthrds > 1 or build_threaded + + file_build = os.path.join(exeroot, "{}.bldlog.{}".format(model, lid)) + bldroot = os.path.join(exeroot, model, "obj") + logger.debug("bldroot is {}".format(bldroot)) + logger.debug("libroot is {}".format(libroot)) + + # make sure bldroot and libroot exist + for build_dir in [bldroot, libroot]: + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # build the component library + # thread_bad_results captures error output from thread (expected to be empty) + # logs is a list of log files to be compressed and added to the case logs/bld directory + t = threading.Thread( + target=_build_model_thread, + args=( + config_dir, + model, + comp, + caseroot, + libroot, + bldroot, + incroot, + file_build, + thread_bad_results, + smp, + compiler, + ), + ) + t.start() + bld_threads.append(t) + + logs.append(file_build) + + # Wait for threads to finish + for bld_thread in bld_threads: + bld_thread.join() + + expect(not thread_bad_results, "\n".join(thread_bad_results)) + + # + # Now build the executable + # + + if not buildlist: + cime_model = get_model() + file_build = os.path.join(exeroot, "{}.bldlog.{}".format(cime_model, lid)) + + ufs_driver = os.environ.get("UFS_DRIVER") + if config.ufs_alternative_config and ufs_driver == "nems": + config_dir = os.path.join( + cimeroot, os.pardir, "src", "model", "NEMS", "cime", "cime_config" + ) + else: + files = Files(comp_interface=comp_interface) + if comp_interface == "nuopc": + config_dir = os.path.join( + os.path.dirname(files.get_value("BUILD_LIB_FILE", {"lib": "CMEPS"})) + ) + else: + config_dir = os.path.join( + files.get_value("COMP_ROOT_DIR_CPL"), "cime_config" + ) + + expect( + os.path.exists(config_dir), + "Config directory not found {}".format(config_dir), + ) + if "cpl" in complist: + bldroot = os.path.join(exeroot, "cpl", "obj") + if not os.path.isdir(bldroot): + os.makedirs(bldroot) + logger.info( + "Building {} from {}/buildexe with output to {} ".format( + cime_model, config_dir, file_build + ) + ) + with open(file_build, "w") as fd: + stat = run_cmd( + "{}/buildexe {} {} {} ".format(config_dir, caseroot, libroot, bldroot), + from_dir=bldroot, + arg_stdout=fd, + arg_stderr=subprocess.STDOUT, + )[0] + + analyze_build_log("{} exe".format(cime_model), file_build, compiler) + expect(stat == 0, "BUILD FAIL: buildexe failed, cat {}".format(file_build)) + + # Copy the just-built ${MODEL}.exe to ${MODEL}.exe.$LID + safe_copy( + "{}/{}.exe".format(exeroot, cime_model), + "{}/{}.exe.{}".format(exeroot, cime_model, lid), + ) + + logs.append(file_build) + + return logs + + +############################################################################### +def _build_model_cmake( + exeroot, + complist, + lid, + buildlist, + comp_interface, + sharedpath, + separate_builds, + ninja, + dry_run, + case, +): + ############################################################################### + cime_model = get_model() + bldroot = os.path.join(exeroot, "cmake-bld") + libroot = os.path.join(exeroot, "lib") + bldlog = os.path.join(exeroot, "{}.bldlog.{}".format(cime_model, lid)) + srcroot = case.get_value("SRCROOT") + gmake_j = case.get_value("GMAKE_J") + gmake = case.get_value("GMAKE") + + # make sure bldroot and libroot exist + for build_dir in [bldroot, libroot]: + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # Components-specific cmake args. Cmake requires all component inputs to be available + # regardless of requested build list. We do not want to re-invoke cmake + # if it has already been called. + do_timing = "/usr/bin/time -p " if os.path.exists("/usr/bin/time") else "" + if not os.path.exists(os.path.join(bldroot, "CMakeCache.txt")): + cmp_cmake_args = "" + all_models = [] + files = Files(comp_interface=comp_interface) + for model, _, _, _, config_dir in complist: + # Create the Filepath and CIME_cppdefs files + if model == "cpl": + config_dir = os.path.join( + files.get_value("COMP_ROOT_DIR_CPL"), "cime_config" + ) + + cmp_cmake_args += _create_build_metadata_for_component( + config_dir, libroot, bldroot, case + ) + all_models.append(model) + + # Call CMake + cmake_args = get_standard_cmake_args(case, sharedpath) + cmake_env = "" + ninja_path = os.path.join(srcroot, "externals/ninja/bin") + if ninja: + cmake_args += " -GNinja " + cmake_env += "PATH={}:$PATH ".format(ninja_path) + + # Glue all pieces together: + # - cmake environment + # - common (i.e. project-wide) cmake args + # - component-specific cmake args + # - path to src folder + cmake_cmd = "{} {}cmake {} {} {}/components".format( + cmake_env, do_timing, cmake_args, cmp_cmake_args, srcroot + ) + stat = 0 + if dry_run: + logger.info("CMake cmd:\ncd {} && {}\n\n".format(bldroot, cmake_cmd)) + else: + logger.info( + "Configuring full {} model with output to file {}".format( + cime_model, bldlog + ) + ) + logger.info( + " Calling cmake directly, see top of log file for specific call" + ) + with open(bldlog, "w") as fd: + fd.write("Configuring with cmake cmd:\n{}\n\n".format(cmake_cmd)) + + # Add logging before running + cmake_cmd = "({}) >> {} 2>&1".format(cmake_cmd, bldlog) + stat = run_cmd(cmake_cmd, from_dir=bldroot)[0] + expect( + stat == 0, + "BUILD FAIL: cmake config {} failed, cat {}".format(cime_model, bldlog), + ) + + # Set up buildlist + if not buildlist: + if separate_builds: + buildlist = all_models + else: + buildlist = ["cpl"] + + if "cpl" in buildlist: + buildlist.remove("cpl") + buildlist.append("cpl") # must come at end + + # Call Make + logs = [] + for model in buildlist: + t1 = time.time() + + make_cmd = "{}{} -j {}".format( + do_timing, + gmake if not ninja else "{} -v".format(os.path.join(ninja_path, "ninja")), + gmake_j, + ) + if model != "cpl": + make_cmd += " {}".format(model) + curr_log = os.path.join(exeroot, "{}.bldlog.{}".format(model, lid)) + model_name = model + else: + curr_log = bldlog + model_name = cime_model if buildlist == ["cpl"] else model + + if dry_run: + logger.info("Build cmd:\ncd {} && {}\n\n".format(bldroot, make_cmd)) + else: + logger.info( + "Building {} model with output to file {}".format(model_name, curr_log) + ) + logger.info(" Calling make, see top of log file for specific call") + with open(curr_log, "a") as fd: + fd.write("\n\nBuilding with cmd:\n{}\n\n".format(make_cmd)) + + # Add logging before running + make_cmd = "({}) >> {} 2>&1".format(make_cmd, curr_log) + stat = run_cmd(make_cmd, from_dir=bldroot)[0] + expect( + stat == 0, + "BUILD FAIL: build {} failed, cat {}".format(model_name, curr_log), + ) + + t2 = time.time() + if separate_builds: + logger.info(" {} built in {:f} seconds".format(model_name, (t2 - t1))) + + logs.append(curr_log) + + expect(not dry_run, "User requested dry-run only, terminating build") + + # Copy the just-built ${MODEL}.exe to ${MODEL}.exe.$LID + if "cpl" in buildlist: + safe_copy( + "{}/{}.exe".format(exeroot, cime_model), + "{}/{}.exe.{}".format(exeroot, cime_model, lid), + ) + + return logs + + +############################################################################### +def _build_checks( + case, + build_threaded, + comp_interface, + debug, + compiler, + mpilib, + complist, + ninst_build, + smp_value, + model_only, + buildlist, +): + ############################################################################### + """ + check if a build needs to be done and warn if a clean is warrented first + returns the relative sharedpath directory for sharedlibraries + """ + smp_build = case.get_value("SMP_BUILD") + build_status = case.get_value("BUILD_STATUS") + expect( + comp_interface in ("mct", "moab", "nuopc"), + "Only supporting mct nuopc, or moab comp_interfaces at this time, found {}".format( + comp_interface + ), + ) + smpstr = "" + ninst_value = "" + for model, _, nthrds, ninst, _ in complist: + if nthrds > 1: + build_threaded = True + if build_threaded: + smpstr += "{}1".format(model[0]) + else: + smpstr += "{}0".format(model[0]) + ninst_value += "{}{:d}".format((model[0]), ninst) + + case.set_value("SMP_VALUE", smpstr) + case.set_value("NINST_VALUE", ninst_value) + + debugdir = "debug" if debug else "nodebug" + threaddir = "threads" if build_threaded else "nothreads" + sharedpath = os.path.join(compiler, mpilib, debugdir, threaddir) + + logger.debug( + "compiler={} mpilib={} debugdir={} threaddir={}".format( + compiler, mpilib, debugdir, threaddir + ) + ) + + expect( + ninst_build == ninst_value or ninst_build == "0", + """ +ERROR, NINST VALUES HAVE CHANGED + NINST_BUILD = {} + NINST_VALUE = {} + A manual clean of your obj directories is strongly recommended + You should execute the following: + ./case.build --clean + Then rerun the build script interactively + ---- OR ---- + You can override this error message at your own risk by executing: + ./xmlchange -file env_build.xml -id NINST_BUILD -val 0 + Then rerun the build script interactively +""".format( + ninst_build, ninst_value + ), + ) + + expect( + smp_build == smpstr or smp_build == "0", + """ +ERROR, SMP VALUES HAVE CHANGED + SMP_BUILD = {} + SMP_VALUE = {} + smpstr = {} + A manual clean of your obj directories is strongly recommended + You should execute the following: + ./case.build --clean + Then rerun the build script interactively + ---- OR ---- + You can override this error message at your own risk by executing: + ./xmlchange -file env_build.xml -id SMP_BUILD -val 0 + Then rerun the build script interactively +""".format( + smp_build, smp_value, smpstr + ), + ) + + expect( + build_status == 0, + """ +ERROR env_build HAS CHANGED + A manual clean of your obj directories is required + You should execute the following: + ./case.build --clean-all +""", + ) + + case.set_value("BUILD_COMPLETE", False) + + # User may have rm -rf their build directory + case.create_dirs() + + case.flush() + if not model_only and not buildlist: + logger.info("Generating component namelists as part of build") + case.create_namelists() + + return sharedpath + + +############################################################################### +def _build_libraries( + case, + exeroot, + sharedpath, + caseroot, + cimeroot, + libroot, + lid, + compiler, + buildlist, + comp_interface, + complist, +): + ############################################################################### + + shared_lib = os.path.join(exeroot, sharedpath, "lib") + shared_inc = os.path.join(exeroot, sharedpath, "include") + for shared_item in [shared_lib, shared_inc]: + if not os.path.exists(shared_item): + os.makedirs(shared_item) + + mpilib = case.get_value("MPILIB") + ufs_driver = os.environ.get("UFS_DRIVER") + cpl_in_complist = False + for l in complist: + if "cpl" in l: + cpl_in_complist = True + if ufs_driver: + logger.info("UFS_DRIVER is set to {}".format(ufs_driver)) + if ufs_driver and ufs_driver == "nems" and not cpl_in_complist: + libs = [] + elif case.get_value("MODEL") == "cesm": + libs = ["gptl", "pio", "csm_share"] + elif case.get_value("MODEL") == "e3sm": + libs = ["gptl", "mct", "spio", "csm_share"] + else: + libs = ["gptl", "mct", "pio", "csm_share"] + + libs.append("FTorch") + + if mpilib == "mpi-serial": + libs.insert(0, mpilib) + + if uses_kokkos(case) and comp_interface != "nuopc": + libs.append("kokkos") + + # Build shared code of CDEPS nuopc data models + build_script = {} + if comp_interface == "nuopc" and (not ufs_driver or ufs_driver != "nems"): + libs.append("CDEPS") + + ocn_model = case.get_value("COMP_OCN") + + atm_dycore = case.get_value("CAM_DYCORE") + if ocn_model == "mom" or (atm_dycore and atm_dycore == "fv3"): + libs.append("FMS") + + files = Files(comp_interface=comp_interface) + for lib in libs: + build_script[lib] = files.get_value( + "BUILD_LIB_FILE", {"lib": lib}, attribute_required=True + ) + + sharedlibroot = os.path.abspath(case.get_value("SHAREDLIBROOT")) + # Check if we need to build our own cprnc + if case.get_value("TEST"): + cprnc_loc = case.get_value("CCSM_CPRNC") + full_lib_path = os.path.join(sharedlibroot, compiler, "cprnc") + if not cprnc_loc or not os.path.exists(cprnc_loc): + case.set_value("CCSM_CPRNC", os.path.join(full_lib_path, "cprnc")) + if not os.path.isdir(full_lib_path): + os.makedirs(full_lib_path) + libs.insert(0, "cprnc") + + logs = [] + + # generate Makefile macro + generate_makefile_macro(case, caseroot) + + for lib in libs: + if buildlist is not None and lib not in buildlist: + continue + + if lib == "csm_share" or lib == "csm_share_cpl7": + # csm_share adds its own dir name + full_lib_path = os.path.join(sharedlibroot, sharedpath) + elif lib == "mpi-serial": + full_lib_path = os.path.join(sharedlibroot, sharedpath, lib) + elif lib == "cprnc": + full_lib_path = os.path.join(sharedlibroot, compiler, "cprnc") + else: + full_lib_path = os.path.join(sharedlibroot, sharedpath, lib) + + if lib in build_script.keys(): + my_file = build_script[lib] + else: + my_file = os.path.join( + cimeroot, "CIME", "build_scripts", "buildlib.{}".format(lib) + ) + if not my_file: + continue + if not os.path.exists(my_file): + logger.warning( + "Build script {} for component {} not found.".format(my_file, lib) + ) + continue + + file_build = os.path.join(exeroot, "{}.bldlog.{}".format(lib, lid)) + logger.info("Building {} with output to file {}".format(lib, file_build)) + # pio build creates its own directory + if lib != "pio" and not os.path.isdir(full_lib_path): + os.makedirs(full_lib_path) + + run_sub_or_cmd( + my_file, + [full_lib_path, os.path.join(exeroot, sharedpath), caseroot], + "buildlib", + [full_lib_path, os.path.join(exeroot, sharedpath), case], + logfile=file_build, + ) + + analyze_build_log(lib, file_build, compiler) + logs.append(file_build) + if lib == "pio": + bldlog = open(file_build, "r") + for line in bldlog: + if re.search("Current setting for", line): + logger.warning(line) + + # clm not a shared lib for E3SM + if config.shared_clm_component and (buildlist is None or "lnd" in buildlist): + comp_lnd = case.get_value("COMP_LND") + if comp_lnd == "clm": + logging.info(" - Building clm library ") + bldroot = os.path.join(sharedlibroot, sharedpath, "clm", "obj") + libroot = os.path.join(exeroot, sharedpath, "lib") + incroot = os.path.join(exeroot, sharedpath, "include") + file_build = os.path.join(exeroot, "lnd.bldlog.{}".format(lid)) + config_lnd_dir = os.path.dirname(case.get_value("CONFIG_LND_FILE")) + + for ndir in [bldroot, libroot, incroot]: + if not os.path.isdir(ndir): + os.makedirs(ndir) + + smp = "SMP" in os.environ and os.environ["SMP"] == "TRUE" + # thread_bad_results captures error output from thread (expected to be empty) + # logs is a list of log files to be compressed and added to the case logs/bld directory + thread_bad_results = [] + _build_model_thread( + config_lnd_dir, + "lnd", + comp_lnd, + caseroot, + libroot, + bldroot, + incroot, + file_build, + thread_bad_results, + smp, + compiler, + ) + logs.append(file_build) + expect(not thread_bad_results, "\n".join(thread_bad_results)) + + case.flush() # python sharedlib subs may have made XML modifications + return logs + + +############################################################################### +def _build_model_thread( + config_dir, + compclass, + compname, + caseroot, + libroot, + bldroot, + incroot, + file_build, + thread_bad_results, + smp, + compiler, +): + ############################################################################### + logger.info("Building {} with output to {}".format(compclass, file_build)) + t1 = time.time() + cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib") + if os.path.isfile(cmd): + logger.warning("WARNING: using local buildlib script for {}".format(compname)) + else: + cmd = os.path.join(config_dir, "buildlib") + expect(os.path.isfile(cmd), "Could not find buildlib for {}".format(compname)) + + compile_cmd = "COMP_CLASS={compclass} COMP_NAME={compname} {cmd} {caseroot} {libroot} {bldroot} ".format( + compclass=compclass, + compname=compname, + cmd=cmd, + caseroot=caseroot, + libroot=libroot, + bldroot=bldroot, + ) + if config.enable_smp: + compile_cmd = "SMP={} {}".format(stringify_bool(smp), compile_cmd) + + if is_python_executable(cmd): + logging_options = get_logging_options() + if logging_options != "": + compile_cmd = compile_cmd + logging_options + + with open(file_build, "w") as fd: + stat = run_cmd( + compile_cmd, from_dir=bldroot, arg_stdout=fd, arg_stderr=subprocess.STDOUT + )[0] + + if stat != 0: + thread_bad_results.append( + "BUILD FAIL: {}.buildlib failed, cat {}".format(compname, file_build) + ) + + analyze_build_log(compclass, file_build, compiler) + + for mod_file in glob.glob(os.path.join(bldroot, "*_[Cc][Oo][Mm][Pp]_*.mod")): + safe_copy(mod_file, incroot) + + t2 = time.time() + logger.info("{} built in {:f} seconds".format(compname, (t2 - t1))) + + +############################################################################### +def _create_build_metadata_for_component(config_dir, libroot, bldroot, case): + ############################################################################### + """ + Ensure that crucial Filepath and CIME_CPPDEFS files exist for this component. + In many cases, the bld/configure script will have already created these. + """ + bc_path = os.path.join(config_dir, "buildlib_cmake") + expect(os.path.exists(bc_path), "Missing: {}".format(bc_path)) + buildlib = import_from_file( + "buildlib_cmake", os.path.join(config_dir, "buildlib_cmake") + ) + cmake_args = buildlib.buildlib(bldroot, libroot, case) + return "" if cmake_args is None else cmake_args + + +############################################################################### +def _clean_impl(case, cleanlist, clean_all, clean_depends): + ############################################################################### + exeroot = os.path.abspath(case.get_value("EXEROOT")) + case.load_env() + if clean_all: + # If cleanlist is empty just remove the bld directory + expect(exeroot is not None, "No EXEROOT defined in case") + if os.path.isdir(exeroot): + logging.info("cleaning directory {}".format(exeroot)) + shutil.rmtree(exeroot) + + # if clean_all is True also remove the sharedlibpath + sharedlibroot = os.path.abspath(case.get_value("SHAREDLIBROOT")) + expect(sharedlibroot is not None, "No SHAREDLIBROOT defined in case") + if sharedlibroot != exeroot and os.path.isdir(sharedlibroot): + logging.warning("cleaning directory {}".format(sharedlibroot)) + shutil.rmtree(sharedlibroot) + + else: + expect( + (cleanlist is not None and len(cleanlist) > 0) + or (clean_depends is not None and len(clean_depends)), + "Empty cleanlist not expected", + ) + gmake = case.get_value("GMAKE") + + cleanlist = [] if cleanlist is None else cleanlist + clean_depends = [] if clean_depends is None else clean_depends + things_to_clean = cleanlist + clean_depends + + cmake_comp_root = os.path.join(exeroot, "cmake-bld", "cmake") + casetools = case.get_value("CASETOOLS") + classic_cmd = "{} -f {} {}".format( + gmake, + os.path.join(casetools, "Makefile"), + get_standard_makefile_args(case, shared_lib=True), + ) + + for clean_item in things_to_clean: + logging.info("Cleaning {}".format(clean_item)) + cmake_path = os.path.join(cmake_comp_root, clean_item) + if os.path.exists(cmake_path): + # Item was created by cmake build system + clean_cmd = "cd {} && {} clean".format(cmake_path, gmake) + else: + # Item was created by classic build system + # do I need this? generate_makefile_macro(case, caseroot, clean_item) + + clean_cmd = "{} {}{}".format( + classic_cmd, + "clean" if clean_item in cleanlist else "clean_depends", + clean_item, + ) + + logger.info("calling {}".format(clean_cmd)) + run_cmd_no_fail(clean_cmd) + + # unlink Locked files directory + unlock_file("env_build.xml", case.get_value("CASEROOT")) + + # reset following values in xml files + case.set_value("SMP_BUILD", str(0)) + case.set_value("NINST_BUILD", str(0)) + case.set_value("BUILD_STATUS", str(0)) + case.set_value("BUILD_COMPLETE", "FALSE") + case.flush() + + +############################################################################### +def _case_build_impl( + caseroot, + case, + sharedlib_only, + model_only, + buildlist, + save_build_provenance, + separate_builds, + ninja, + dry_run, +): + ############################################################################### + + t1 = time.time() + + expect( + not (sharedlib_only and model_only), + "Contradiction: both sharedlib_only and model_only", + ) + expect( + not (dry_run and not model_only), + "Dry-run is only for model builds, please build sharedlibs first", + ) + logger.info("Building case in directory {}".format(caseroot)) + logger.info("sharedlib_only is {}".format(sharedlib_only)) + logger.info("model_only is {}".format(model_only)) + + expect(os.path.isdir(caseroot), "'{}' is not a valid directory".format(caseroot)) + os.chdir(caseroot) + + expect( + os.path.exists(get_batch_script_for_job(case.get_primary_job())), + "ERROR: must invoke case.setup script before calling build script ", + ) + + cimeroot = case.get_value("CIMEROOT") + + comp_classes = case.get_values("COMP_CLASSES") + + check_lockedfiles(case, skip="env_batch") + + # Retrieve relevant case data + # This environment variable gets set for cesm Make and + # needs to be unset before building again. + if "MODEL" in os.environ: + del os.environ["MODEL"] + build_threaded = case.get_build_threaded() + exeroot = os.path.abspath(case.get_value("EXEROOT")) + incroot = os.path.abspath(case.get_value("INCROOT")) + libroot = os.path.abspath(case.get_value("LIBROOT")) + multi_driver = case.get_value("MULTI_DRIVER") + complist = [] + ninst = 1 + comp_interface = case.get_value("COMP_INTERFACE") + for comp_class in comp_classes: + if comp_class == "CPL": + config_dir = None + if multi_driver: + ninst = case.get_value("NINST_MAX") + else: + config_dir = os.path.dirname( + case.get_value("CONFIG_{}_FILE".format(comp_class)) + ) + if multi_driver: + ninst = 1 + else: + ninst = case.get_value("NINST_{}".format(comp_class)) + + comp = case.get_value("COMP_{}".format(comp_class)) + if comp_interface == "nuopc" and comp in ( + "satm", + "slnd", + "sesp", + "sglc", + "srof", + "sice", + "socn", + "swav", + "siac", + ): + continue + thrds = case.get_value("NTHRDS_{}".format(comp_class)) + expect( + ninst is not None, + "Failed to get ninst for comp_class {}".format(comp_class), + ) + complist.append((comp_class.lower(), comp, thrds, ninst, config_dir)) + os.environ["COMP_{}".format(comp_class)] = comp + + compiler = case.get_value("COMPILER") + mpilib = case.get_value("MPILIB") + debug = case.get_value("DEBUG") + ninst_build = case.get_value("NINST_BUILD") + smp_value = case.get_value("SMP_VALUE") + clm_use_petsc = case.get_value("CLM_USE_PETSC") + mpaso_use_petsc = case.get_value("MPASO_USE_PETSC") + cism_use_trilinos = case.get_value("CISM_USE_TRILINOS") + mali_use_albany = case.get_value("MALI_USE_ALBANY") + mach = case.get_value("MACH") + + # Load some params into env + os.environ["BUILD_THREADED"] = stringify_bool(build_threaded) + cime_model = get_model() + + # TODO need some other method than a flag. + if cime_model == "e3sm" and mach == "titan" and compiler == "pgiacc": + case.set_value("CAM_TARGET", "preqx_acc") + + # This is a timestamp for the build , not the same as the testid, + # and this case may not be a test anyway. For a production + # experiment there may be many builds of the same case. + lid = get_timestamp("%y%m%d-%H%M%S") + os.environ["LID"] = lid + + # Set the overall USE_PETSC variable to TRUE if any of the + # *_USE_PETSC variables are TRUE. + # For now, there is just the one CLM_USE_PETSC variable, but in + # the future there may be others -- so USE_PETSC will be true if + # ANY of those are true. + + use_petsc = bool(clm_use_petsc) or bool(mpaso_use_petsc) + case.set_value("USE_PETSC", use_petsc) + + # Set the overall USE_TRILINOS variable to TRUE if any of the + # *_USE_TRILINOS variables are TRUE. + # For now, there is just the one CISM_USE_TRILINOS variable, but in + # the future there may be others -- so USE_TRILINOS will be true if + # ANY of those are true. + + use_trilinos = False if cism_use_trilinos is None else cism_use_trilinos + case.set_value("USE_TRILINOS", use_trilinos) + + # Set the overall USE_ALBANY variable to TRUE if any of the + # *_USE_ALBANY variables are TRUE. + # For now, there is just the one MALI_USE_ALBANY variable, but in + # the future there may be others -- so USE_ALBANY will be true if + # ANY of those are true. + + use_albany = stringify_bool(mali_use_albany) + case.set_value("USE_ALBANY", use_albany) + + # Load modules + case.load_env() + + sharedpath = _build_checks( + case, + build_threaded, + comp_interface, + debug, + compiler, + mpilib, + complist, + ninst_build, + smp_value, + model_only, + buildlist, + ) + + logs = [] + + if not model_only: + logs = _build_libraries( + case, + exeroot, + sharedpath, + caseroot, + cimeroot, + libroot, + lid, + compiler, + buildlist, + comp_interface, + complist, + ) + + if not sharedlib_only: + if config.build_model_use_cmake: + logs.extend( + _build_model_cmake( + exeroot, + complist, + lid, + buildlist, + comp_interface, + sharedpath, + separate_builds, + ninja, + dry_run, + case, + ) + ) + else: + os.environ["INSTALL_SHAREDPATH"] = os.path.join( + exeroot, sharedpath + ) # for MPAS makefile generators + logs.extend( + _build_model( + build_threaded, + exeroot, + incroot, + complist, + lid, + caseroot, + cimeroot, + compiler, + buildlist, + comp_interface, + ) + ) + + if not buildlist: + # in case component build scripts updated the xml files, update the case object + case.read_xml() + # Note, doing buildlists will never result in the system thinking the build is complete + + post_build( + case, + logs, + build_complete=not (buildlist or sharedlib_only), + save_build_provenance=save_build_provenance, + ) + + t2 = time.time() + + if not sharedlib_only: + logger.info("Total build time: {:f} seconds".format(t2 - t1)) + logger.info("MODEL BUILD HAS FINISHED SUCCESSFULLY") + + return True + + +############################################################################### +def post_build(case, logs, build_complete=False, save_build_provenance=True): + ############################################################################### + for log in logs: + gzip_existing_file(log) + + if build_complete: + # must ensure there's an lid + lid = ( + os.environ["LID"] if "LID" in os.environ else get_timestamp("%y%m%d-%H%M%S") + ) + if save_build_provenance: + try: + Config.instance().save_build_provenance(case, lid=lid) + except AttributeError: + logger.debug("No handler for save_build_provenance was found") + # Set XML to indicate build complete + case.set_value("BUILD_COMPLETE", True) + case.set_value("BUILD_STATUS", 0) + if "SMP_VALUE" in os.environ: + case.set_value("SMP_BUILD", os.environ["SMP_VALUE"]) + + case.flush() + + lock_file("env_build.xml", case.get_value("CASEROOT")) + + +############################################################################### +def case_build( + caseroot, + case, + sharedlib_only=False, + model_only=False, + buildlist=None, + save_build_provenance=True, + separate_builds=False, + ninja=False, + dry_run=False, +): + ############################################################################### + functor = lambda: _case_build_impl( + caseroot, + case, + sharedlib_only, + model_only, + buildlist, + save_build_provenance, + separate_builds, + ninja, + dry_run, + ) + cb = "case.build" + if sharedlib_only == True: + cb = cb + " (SHAREDLIB_BUILD)" + if model_only == True: + cb = cb + " (MODEL_BUILD)" + return run_and_log_case_status( + functor, cb, caseroot=caseroot, gitinterface=case._gitinterface + ) + + +############################################################################### +def clean(case, cleanlist=None, clean_all=False, clean_depends=None): + ############################################################################### + functor = lambda: _clean_impl(case, cleanlist, clean_all, clean_depends) + return run_and_log_case_status( + functor, + "build.clean", + caseroot=case.get_value("CASEROOT"), + gitinterface=case._gitinterface, + ) diff --git a/scripts/lib/CIME/tests/SystemTests/test_utils/__init__.py b/CIME/build_scripts/__init__.py similarity index 100% rename from scripts/lib/CIME/tests/SystemTests/test_utils/__init__.py rename to CIME/build_scripts/__init__.py diff --git a/CIME/build_scripts/buildlib.cprnc b/CIME/build_scripts/buildlib.cprnc new file mode 100755 index 00000000000..51426de27c6 --- /dev/null +++ b/CIME/build_scripts/buildlib.cprnc @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +import sys, os, logging, argparse + +_CIMEROOT = os.getenv("CIMEROOT") +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME import utils +from CIME.utils import run_bld_cmd_ensure_logging, CIMEError +from CIME.case import Case +from CIME.build import get_standard_cmake_args + +logger = logging.getLogger(__name__) + + +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--debug] +OR +{0} --verbose +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + utils.setup_standard_logging_options(parser) + + parser.add_argument("buildroot", help="build path root") + + parser.add_argument("installpath", help="install path ") + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) + + args = utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.buildroot, args.installpath, args.caseroot + + +############################################################################### +def buildlib(bldroot, installpath, case): + ############################################################################### + cimeroot = case.get_value("CIMEROOT") + + # Generate macros and environment + compiler = case.get_value("COMPILER") + run_bld_cmd_ensure_logging( + "{}/CIME/scripts/configure --mpilib=mpi-serial --macros-format=CMake --machine={} --compiler={}".format( + cimeroot, case.get_value("MACH"), compiler + ), + logger, + from_dir=bldroot, + ) + + cmake_args = get_standard_cmake_args(case, "ignore_sharedpath") + + os.environ["CIMEROOT"] = cimeroot + + srcroot = case.get_value("SRCROOT") + + cprnc_src_root = None + candidate_paths = ( + os.path.join(cimeroot, "CIME/non_py/cprnc"), + os.path.join(srcroot, "externals/cprnc"), + ) + + for candidate in candidate_paths: + if os.path.exists(candidate): + cprnc_src_root = candidate + + break + else: + logger.debug("{!r} is not a valid cprnc source path") + + if cprnc_src_root is None: + raise CIMEError("Could not find a valid cprnc source directory") + + cmake_cmd = ". ./.env_mach_specific.sh && NETCDF=$(dirname $(dirname $(which nf-config))) cmake {cmake_args} -DMPILIB=mpi-serial -DDEBUG=FALSE -C Macros.cmake {cprnc_src_root} -DCMAKE_PREFIX_PATH={dest_path} -DBLDROOT={bldroot}".format( + cprnc_src_root=cprnc_src_root, + dest_path=installpath, + cmake_args=cmake_args, + bldroot=bldroot, + ) + + run_bld_cmd_ensure_logging(cmake_cmd, logger, from_dir=bldroot) + + gmake_cmd = case.get_value("GMAKE") + gmake_j = case.get_value("GMAKE_J") + + run_bld_cmd_ensure_logging( + ". ./.env_mach_specific.sh && {} VERBOSE=1 -j {}".format(gmake_cmd, gmake_j), + logger, + from_dir=bldroot, + ) + + +def _main(argv, documentation): + bldroot, installpath, caseroot = parse_command_line(argv, documentation) + with Case(caseroot, read_only=False) as case: + buildlib(bldroot, installpath, case) + + +if __name__ == "__main__": + _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.gptl b/CIME/build_scripts/buildlib.gptl new file mode 100755 index 00000000000..774e5659396 --- /dev/null +++ b/CIME/build_scripts/buildlib.gptl @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +import sys, os, logging, argparse + +cimeroot = os.getenv("CIMEROOT") +sys.path.append(os.path.join(cimeroot, "CIME", "Tools")) + +from standard_script_setup import * +from CIME import utils +from CIME.utils import run_bld_cmd_ensure_logging +from CIME.case import Case +from CIME.build import get_standard_makefile_args + +logger = logging.getLogger(__name__) + + +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--debug] +OR +{0} --verbose +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + utils.setup_standard_logging_options(parser) + + parser.add_argument("buildroot", help="build path root") + + parser.add_argument("installpath", help="install path ") + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) + + args = utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.buildroot, args.installpath, args.caseroot + + +def buildlib(bldroot, installpath, case): + ############################################################################### + caseroot = case.get_value("CASEROOT") + comp_interface = case.get_value("COMP_INTERFACE") + + gptl_dir = os.path.join( + case.get_value("CIMEROOT"), "CIME", "non_py", "src", "timing" + ) + gmake_opts = ( + "-f {gptl}/Makefile install -C {bldroot} MACFILE={macfile} COMP_NAME=gptl GPTL_DIR={gptl} GPTL_LIBDIR={bldroot}" + " SHAREDPATH={install} COMP_INTERFACE={comp_interface} {stdargs} ".format( + gptl=gptl_dir, + bldroot=bldroot, + macfile=os.path.join(caseroot, "Macros.make"), + install=installpath, + comp_interface=comp_interface, + stdargs=get_standard_makefile_args(case, shared_lib=True), + ) + ) + + gmake_cmd = case.get_value("GMAKE") + + # This runs the gptl make command + cmd = "{} {}".format(gmake_cmd, gmake_opts) + run_bld_cmd_ensure_logging(cmd, logger) + + +def _main(argv, documentation): + bldroot, installpath, caseroot = parse_command_line(argv, documentation) + with Case(caseroot) as case: + buildlib(bldroot, installpath, case) + + +if __name__ == "__main__": + _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.internal_components b/CIME/build_scripts/buildlib.internal_components new file mode 100755 index 00000000000..f7f597d776e --- /dev/null +++ b/CIME/build_scripts/buildlib.internal_components @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 + +""" +build cime component model library. This buildlib script is used by all cime internal +components. +""" + +import sys, os + +_CIMEROOT = os.environ.get("CIMEROOT") +if _CIMEROOT == None: + raise ValueError("ERROR: CIMEROOT not defined in buildlib.internal_components.") +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildlib import build_cime_component_lib, parse_input +from CIME.case import Case + + +def buildlib(bldroot, libroot, case, compname=None): + if compname is None: + thisdir = os.path.dirname(os.path.abspath(__file__)) + path, dir1 = os.path.split(thisdir) + _, dir2 = os.path.split(path) + if dir1 == "cime_config": + compname = dir2 + else: + compname = dir1.split(".")[1] + build_cime_component_lib(case, compname, libroot, bldroot) + + +def _main_func(args): + caseroot, libroot, bldroot = parse_input(args) + with Case(caseroot) as case: + buildlib(bldroot, libroot, case) + + +if __name__ == "__main__": + _main_func(sys.argv) diff --git a/CIME/build_scripts/buildlib.kokkos b/CIME/build_scripts/buildlib.kokkos new file mode 100755 index 00000000000..6ecceb6ae48 --- /dev/null +++ b/CIME/build_scripts/buildlib.kokkos @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 + +import os, sys, argparse, logging + +from standard_script_setup import * +from CIME import utils +from CIME.utils import expect, run_bld_cmd_ensure_logging, run_cmd_no_fail, run_cmd +from CIME.case import Case +from CIME.build import get_standard_makefile_args + +logger = logging.getLogger(__name__) + + +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--debug] +OR +{0} --verbose +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + utils.setup_standard_logging_options(parser) + + parser.add_argument("buildroot", help="build path root") + + parser.add_argument("installpath", help="install path ") + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) + + args = utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.buildroot, args.installpath, args.caseroot + + +############################################################################### +def buildlib(bldroot, installpath, case): + ############################################################################### + srcroot = case.get_value("SRCROOT") + kokkos_dir = os.path.join(srcroot, "externals", "kokkos") + expect(os.path.isdir(kokkos_dir), "Missing kokkos submodule") + + # We want to get the compiler and kokkos_options from Macros.make + # (generated from cmake_macros), but we want to otherwise + # let kokkos control flags + make_args = get_standard_makefile_args(case, shared_lib=True) + stat, output, _ = run_cmd( + "make -f Macros.make {} -p | grep KOKKOS_OPTIONS".format(make_args) + ) + if stat == 0: + kokkos_options = output.split(":=")[-1].strip() + else: + # This is the default setup. + kokkos_options = "--with-serial" + build_threaded = case.get_build_threaded() + if build_threaded: + kokkos_options += " --with-openmp" + logger.warning( + "Failed to find custom kokkos options, using default: {:s}.".format( + kokkos_options + ) + ) + + if "--with-cuda" in kokkos_options: + cxx = os.path.join(kokkos_dir, "bin/nvcc_wrapper") + else: + cxx = ( + run_cmd_no_fail("make -f Macros.make {} -p | grep SCXX".format(make_args)) + .split(":=")[-1] + .strip() + ) + + gmake_cmd = case.get_value("GMAKE") + gmake_j = case.get_value("GMAKE_J") + + gen_makefile_cmd = "{kokkos_dir}/generate_makefile.bash {kokkos_options} --disable-tests --compiler={cxx} --prefix={installpath}".format( + kokkos_dir=kokkos_dir, + kokkos_options=kokkos_options, + cxx=cxx, + installpath=installpath, + ) + + run_bld_cmd_ensure_logging(gen_makefile_cmd, logger, from_dir=bldroot) + run_bld_cmd_ensure_logging( + "{} -j {}".format(gmake_cmd, gmake_j), logger, from_dir=bldroot + ) + run_bld_cmd_ensure_logging("{} install".format(gmake_cmd), logger, from_dir=bldroot) + + +def _main(argv, documentation): + bldroot, installpath, caseroot = parse_command_line(argv, documentation) + with Case(caseroot, read_only=False) as case: + buildlib(bldroot, installpath, case) + + +if __name__ == "__main__": + _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.mct b/CIME/build_scripts/buildlib.mct new file mode 100755 index 00000000000..446020839e4 --- /dev/null +++ b/CIME/build_scripts/buildlib.mct @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +import sys, os, logging, argparse + +_CIMEROOT = os.getenv("CIMEROOT") +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.config import Config +from CIME import utils +from CIME.utils import copyifnewer, run_bld_cmd_ensure_logging, expect +from CIME.case import Case +from CIME.build import get_standard_makefile_args +import glob + +logger = logging.getLogger(__name__) + + +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--debug] +OR +{0} --verbose +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + utils.setup_standard_logging_options(parser) + + parser.add_argument("buildroot", help="build path root") + + parser.add_argument("installpath", help="install path ") + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) + + args = utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.buildroot, args.installpath, args.caseroot + + +def buildlib(bldroot, installpath, case): + ############################################################################### + caseroot = case.get_value("CASEROOT") + cimeroot = case.get_value("CIMEROOT") + expect( + os.path.abspath(os.path.realpath(cimeroot)) + == os.path.abspath(os.path.realpath(_CIMEROOT)), + "CIMEROOT mismatch {} vs {}".format(_CIMEROOT, cimeroot), + ) + srcroot = case.get_value("SRCROOT") + + customize_path = os.path.join(srcroot, "cime_config", "customize") + + config = Config.load(customize_path) + + mct_path = config.mct_path.format(srcroot=srcroot) + + for _dir in ("mct", "mpeu"): + if not os.path.isdir(os.path.join(bldroot, _dir)): + os.makedirs(os.path.join(bldroot, _dir)) + copyifnewer( + os.path.join(mct_path, _dir, "Makefile"), + os.path.join(bldroot, _dir, "Makefile"), + ) + + gmake_opts = "-f {} ".format(os.path.join(caseroot, "Tools", "Makefile")) + gmake_opts += " -C {} ".format(bldroot) + gmake_opts += get_standard_makefile_args(case, shared_lib=True) + gmake_opts += "COMP_NAME=mct {}".format(os.path.join(bldroot, "Makefile.conf")) + + gmake_cmd = case.get_value("GMAKE") + + # This runs the mpi-serial configure command + cmd = "{} {}".format(gmake_cmd, gmake_opts) + run_bld_cmd_ensure_logging(cmd, logger) + + # Now we run the mct make command + gmake_opts = "-f {} ".format(os.path.join(mct_path, "Makefile")) + gmake_opts += " -C {} ".format(bldroot) + gmake_opts += " -j {} ".format(case.get_value("GMAKE_J")) + gmake_opts += " SRCDIR={} ".format(os.path.join(mct_path)) + + cmd = "{} {}".format(gmake_cmd, gmake_opts) + run_bld_cmd_ensure_logging(cmd, logger) + + for _dir in ("mct", "mpeu"): + for _file in glob.iglob(os.path.join(bldroot, _dir, "*.a")): + logger.info("Installing {} to {}".format(_file, installpath)) + copyifnewer( + _file, os.path.join(installpath, "lib", os.path.basename(_file)) + ) + for _file in glob.iglob(os.path.join(bldroot, _dir, "*.mod")): + logger.info("Installing {} to {}".format(_file, installpath)) + copyifnewer( + _file, os.path.join(installpath, "include", os.path.basename(_file)) + ) + + +def _main(argv, documentation): + bldroot, installpath, caseroot = parse_command_line(argv, documentation) + with Case(caseroot) as case: + buildlib(bldroot, installpath, case) + + +if __name__ == "__main__": + _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.mpi-serial b/CIME/build_scripts/buildlib.mpi-serial new file mode 100755 index 00000000000..f13d949ae45 --- /dev/null +++ b/CIME/build_scripts/buildlib.mpi-serial @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +import os, sys, logging, argparse + +from standard_script_setup import * +from CIME import utils +from CIME.config import Config +from CIME.utils import copyifnewer, run_bld_cmd_ensure_logging +from CIME.case import Case +from CIME.build import get_standard_makefile_args +import glob + +logger = logging.getLogger(__name__) + + +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--debug] +OR +{0} --verbose +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + utils.setup_standard_logging_options(parser) + + parser.add_argument("buildroot", help="build path root") + + parser.add_argument("installpath", help="install path ") + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) + + args = utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.buildroot, args.installpath, args.caseroot + + +def buildlib(bldroot, installpath, case): + ############################################################################### + caseroot = case.get_value("CASEROOT") + srcroot = case.get_value("SRCROOT") + # check to see if MPI_SERIAL is installed + with open(os.path.join(caseroot, "Macros.make"), "r") as f: + for line in f: + if "MPI_SERIAL_PATH" in line: + return + + customize_path = os.path.join(srcroot, "cime_config", "customize") + + config = Config.load(customize_path) + + mpi_serial_path = config.mpi_serial_path.format(srcroot=srcroot) + print(f"mpi_serial_path is {mpi_serial_path}") + + for _file in glob.iglob(os.path.join(mpi_serial_path, "*.h")): + copyifnewer(_file, os.path.join(bldroot, os.path.basename(_file))) + + gmake_opts = "-f {} ".format(os.path.join(caseroot, "Tools", "Makefile")) + gmake_opts += " -C {} ".format(bldroot) + gmake_opts += " {} ".format(get_standard_makefile_args(case, shared_lib=True)) + gmake_opts += "COMP_NAME=mpi-serial {}".format( + os.path.join(bldroot, "Makefile.conf") + ) + + gmake_cmd = case.get_value("GMAKE") + + # This runs the mpi-serial configure command + cmd = "{} {}".format(gmake_cmd, gmake_opts) + run_bld_cmd_ensure_logging(cmd, logger) + + # Now we run the mpi-serial make command + gmake_opts = "-f {} ".format(os.path.join(mpi_serial_path, "Makefile")) + gmake_opts += " -C {} ".format(bldroot) + gmake_opts += " -j {} ".format(case.get_value("GMAKE_J")) + gmake_opts += " SRCDIR={} ".format(mpi_serial_path) + gmake_opts += " VPATH={}".format(mpi_serial_path) + + cmd = "{} {}".format(gmake_cmd, gmake_opts) + run_bld_cmd_ensure_logging(cmd, logger) + + copyifnewer( + os.path.join(bldroot, "libmpi-serial.a"), + os.path.join(installpath, "lib", "libmpi-serial.a"), + ) + for _file in ("mpi.h", "mpif.h", "mpi.mod", "MPI.mod"): + if os.path.isfile(os.path.join(bldroot, _file)): + copyifnewer( + os.path.join(bldroot, _file), + os.path.join(installpath, "include", _file), + ) + + +def _main(argv, documentation): + bldroot, installpath, caseroot = parse_command_line(argv, documentation) + with Case(caseroot) as case: + buildlib(bldroot, installpath, case) + + +if __name__ == "__main__": + _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.pio b/CIME/build_scripts/buildlib.pio new file mode 100755 index 00000000000..b04367b7b19 --- /dev/null +++ b/CIME/build_scripts/buildlib.pio @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 +import sys, os, logging, argparse + +cimeroot = os.getenv("CIMEROOT") +sys.path.append(os.path.join(cimeroot, "CIME", "Tools")) + +import glob, re +from standard_script_setup import * +from CIME import utils +from CIME.utils import expect, run_bld_cmd_ensure_logging, safe_copy +from CIME.build import get_standard_makefile_args +from CIME.case import Case + +logger = logging.getLogger(__name__) + + +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + usage="""\n{0} [--debug] +OR +{0} --verbose +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run \033[0m + > {0} +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + utils.setup_standard_logging_options(parser) + + parser.add_argument("buildroot", help="build path root") + + parser.add_argument("installpath", help="install path ") + + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) + + args = utils.parse_args_and_handle_standard_logging_options(args, parser) + + return args.buildroot, args.installpath, args.caseroot + + +############################################################################### +def buildlib(bldroot, installpath, case): + ############################################################################### + cime_model = case.get_value("MODEL") + caseroot = case.get_value("CASEROOT") + pio_version = case.get_value("PIO_VERSION") + srcroot = case.get_value("SRCROOT") + scorpio_src_root_dir = None + if cime_model == "e3sm": + scorpio_src_root_dir = os.path.join(srcroot, "externals") + # Scorpio classic is derived from PIO1 + scorpio_classic_dir = "scorpio_classic" + # Scorpio is derived from PIO2 + scorpio_dir = "scorpio" + scorpio_classic_src_dir = os.path.join( + scorpio_src_root_dir, scorpio_classic_dir + ) + scorpio_src_dir = os.path.join(scorpio_src_root_dir, scorpio_dir) + if ( + not os.path.isdir(scorpio_src_root_dir) + or not os.path.isdir(scorpio_classic_src_dir) + or not os.path.isdir(scorpio_src_dir) + ): + scorpio_src_root_dir = None + + # If variable PIO_VERSION_MAJOR is defined in the environment then + # we assume that PIO is installed on the system + # and expect to find + # PIO_LIBDIR, PIO_INCDIR, PIO_TYPENAME_VALID_VALUES + # also defined in the environment. In this case we + # will use the installed pio and not build it here. + installed_pio_version = os.environ.get("PIO_VERSION_MAJOR") + logger.info( + "pio_version_major = {} pio_version = {}".format( + installed_pio_version, pio_version + ) + ) + if installed_pio_version is not None and int(installed_pio_version) == pio_version: + logger.info("Using installed PIO library") + _set_pio_valid_values(case, os.environ.get("PIO_TYPENAME_VALID_VALUES")) + return + + pio_model = "pio{}".format(pio_version) + pio_dir = os.path.join(bldroot, pio_model) + if not os.path.isdir(pio_dir): + os.makedirs(pio_dir) + casetools = case.get_value("CASETOOLS") + if scorpio_src_root_dir: + # Use old genf90 until "short" type is supported + cmake_opts = ( + '"-D GENF90_PATH=' + + os.path.join(scorpio_src_root_dir, scorpio_dir, "src/genf90") + + '" ' + ) + elif pio_version == 1: + cmake_opts = '"-D GENF90_PATH=$CIMEROOT/CIME/non_py/externals/genf90 "' + else: + cmake_opts = '"-D GENF90_PATH=' + srcroot + '/libraries/parallelio/scripts/ "' + + stdargs = get_standard_makefile_args(case, shared_lib=True) + + gmake_vars = ( + "CASEROOT={caseroot} COMP_NAME={pio_model} " + "USER_CMAKE_OPTS={cmake_opts} " + "PIO_LIBDIR={pio_dir} CASETOOLS={casetools} " + "USER_CPPDEFS=-DTIMING".format( + caseroot=caseroot, + pio_model=pio_model, + cmake_opts=cmake_opts, + pio_dir=pio_dir, + casetools=casetools, + ) + ) + + if scorpio_src_root_dir is not None: + gmake_vars += ( + " IO_LIB_SRCROOT={scorpio_src_root_dir} " + " IO_LIB_v1_SRCDIR={scorpio_classic_dir} " + " IO_LIB_v2_SRCDIR={scorpio_dir} ".format( + scorpio_src_root_dir=scorpio_src_root_dir, + scorpio_classic_dir=scorpio_classic_dir, + scorpio_dir=scorpio_dir, + ) + ) + + gmake_opts = ( + "{pio_dir}/Makefile -C {pio_dir} " + " {gmake_vars} {stdargs} -f {casetools}/Makefile".format( + pio_dir=pio_dir, gmake_vars=gmake_vars, casetools=casetools, stdargs=stdargs + ) + ) + + gmake_cmd = case.get_value("GMAKE") + + # This runs the pio cmake command from the cime case Makefile + cmd = "{} {}".format(gmake_cmd, gmake_opts) + run_bld_cmd_ensure_logging(cmd, logger, from_dir=pio_dir) + + # This runs the pio make command from the cmake generated Makefile + run_bld_cmd_ensure_logging( + "{} -j {}".format(gmake_cmd, case.get_value("GMAKE_J")), + logger, + from_dir=pio_dir, + ) + + if pio_version == 1: + installed_lib = os.path.join(installpath, "lib", "libpio.a") + installed_lib_time = 0 + if os.path.isfile(installed_lib): + installed_lib_time = os.path.getmtime(installed_lib) + newlib = os.path.join(pio_dir, "pio", "libpio.a") + newlib_time = os.path.getmtime(newlib) + if newlib_time > installed_lib_time: + logger.info("Installing pio version 1") + safe_copy(newlib, installed_lib) + for glob_to_copy in ("*.h", "*.mod"): + for item in glob.glob(os.path.join(pio_dir, "pio", glob_to_copy)): + safe_copy(item, "{}/include".format(installpath)) + expect_string = "D_NETCDF;" + pnetcdf_string = "D_PNETCDF" + netcdf4_string = "D_NETCDF4" + else: + globs_to_copy = ( + os.path.join("src", "clib", "libpioc.*"), + os.path.join("src", "flib", "libpiof.*"), + os.path.join("src", "clib", "*.h"), + os.path.join("src", "flib", "*.mod"), + ) + for glob_to_copy in globs_to_copy: + installed_file_time = 0 + for item in glob.glob(os.path.join(pio_dir, glob_to_copy)): + if item.endswith(".a") or item.endswith(".so"): + installdir = "lib" + else: + installdir = "include" + installed_file = os.path.join( + installpath, installdir, os.path.basename(item) + ) + item_time = os.path.getmtime(item) + if os.path.isfile(installed_file): + installed_file_time = os.path.getmtime(installed_file) + if item_time > installed_file_time: + safe_copy(item, installed_file) + expect_string = "NetCDF_C_LIBRARY-ADVANCED" + pnetcdf_string = "WITH_PNETCDF:BOOL=ON" + netcdf4_string = "NetCDF_C_HAS_PARALLEL:BOOL=TRUE" + + # make sure case pio_typename valid_values is set correctly + expect_string_found = False + pnetcdf_found = False + netcdf4_parallel_found = False + + cache_file = open(os.path.join(pio_dir, "CMakeCache.txt"), "r") + for line in cache_file: + if re.search(expect_string, line): + expect_string_found = True + if re.search(pnetcdf_string, line): + pnetcdf_found = True + if re.search(netcdf4_string, line): + netcdf4_parallel_found = True + + expect(expect_string_found, "CIME models require NETCDF in PIO build") + valid_values = "netcdf" + if pnetcdf_found: + valid_values += ",pnetcdf" + if netcdf4_parallel_found: + valid_values += ",netcdf4p,netcdf4c" + + _set_pio_valid_values(case, valid_values) + + +def _set_pio_valid_values(case, valid_values): + # nothing means use the general default + valid_values += ",nothing" + logger.warning("Updating valid_values for PIO_TYPENAME: {}".format(valid_values)) + env_run = case.get_env("run") + env_run.set_valid_values("PIO_TYPENAME", valid_values) + + for comp in case.get_values("COMP_CLASSES"): + comp_pio_typename = "{}_PIO_TYPENAME".format(comp) + current_value = case.get_value(comp_pio_typename) + if current_value not in valid_values: + logger.warning( + "Resetting PIO_TYPENAME=netcdf for component {}".format(comp) + ) + env_run.set_value(comp_pio_typename, "netcdf") + + +def _main(argv, documentation): + bldroot, installpath, caseroot = parse_command_line(argv, documentation) + with Case(caseroot, read_only=False) as case: + buildlib(bldroot, installpath, case) + + +if __name__ == "__main__": + _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib_cmake.internal_components b/CIME/build_scripts/buildlib_cmake.internal_components new file mode 100755 index 00000000000..4c939041e62 --- /dev/null +++ b/CIME/build_scripts/buildlib_cmake.internal_components @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +""" +build cime component model library. This buildlib script is used by all cime internal +components. +""" + +import sys, os + +_CIMEROOT = os.environ.get("CIMEROOT") +if _CIMEROOT == None: + raise ValueError( + "ERROR: CIMEROOT not defined in buildlib_cmake.internal_components." + ) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildlib import build_cime_component_lib, parse_input +from CIME.case import Case + + +def buildlib(bldroot, libroot, case, compname=None): + if compname is None: + thisdir = os.path.dirname(os.path.abspath(__file__)) + path, dir1 = os.path.split(thisdir) + _, dir2 = os.path.split(path) + if dir1 == "cime_config": + compname = dir2 + else: + compname = dir1.split(".")[1] + build_cime_component_lib(case, compname, libroot, bldroot) + + +def _main_func(args): + caseroot, libroot, bldroot = parse_input(args) + with Case(caseroot) as case: + buildlib(bldroot, libroot, case) + + +if __name__ == "__main__": + _main_func(sys.argv) diff --git a/CIME/buildlib.py b/CIME/buildlib.py new file mode 100644 index 00000000000..b1152f8923a --- /dev/null +++ b/CIME/buildlib.py @@ -0,0 +1,127 @@ +""" +common utilities for buildlib +""" + +from CIME.XML.standard_module_setup import * +from CIME.case import Case +from CIME.utils import ( + parse_args_and_handle_standard_logging_options, + setup_standard_logging_options, + safe_copy, +) +from CIME.config import Config +from CIME.build import get_standard_makefile_args +from CIME.XML.files import Files + +import sys, os, argparse + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_input(argv): + ############################################################################### + + parser = argparse.ArgumentParser() + + setup_standard_logging_options(parser) + + parser.add_argument("caseroot", default=os.getcwd(), help="Case directory") + + parser.add_argument("libroot", help="root for creating the library") + + parser.add_argument("bldroot", help="root for building library") + + args = parse_args_and_handle_standard_logging_options(argv, parser) + + # Some compilers have trouble with long include paths, setting + # EXEROOT to the relative path from bldroot solves the problem + # doing it in the environment means we don't need to change all of + # the component buildlib scripts + with Case(args.caseroot) as case: + os.environ["EXEROOT"] = os.path.relpath(case.get_value("EXEROOT"), args.bldroot) + + return args.caseroot, args.libroot, args.bldroot + + +############################################################################### +def build_cime_component_lib(case, compname, libroot, bldroot): + ############################################################################### + + casebuild = case.get_value("CASEBUILD") + compclass = compname[1:] # This very hacky + comp_interface = case.get_value("COMP_INTERFACE") + confdir = os.path.join(casebuild, "{}conf".format(compname)) + + if not os.path.exists(confdir): + os.mkdir(confdir) + + with open(os.path.join(confdir, "Filepath"), "w") as out: + out.write( + os.path.join( + case.get_value("CASEROOT"), "SourceMods", "src.{}\n".format(compname) + ) + + "\n" + ) + files = Files(comp_interface=comp_interface) + compdir = files.get_value( + "COMP_ROOT_DIR_" + compclass.upper(), {"component": compname} + ) + if compname.startswith("d"): + out.write(os.path.join(compdir, "src") + "\n") + out.write(os.path.join(compdir) + "\n") + elif compname.startswith("x"): + out.write(os.path.join(compdir, "..", "xshare") + "\n") + out.write(os.path.join(compdir, "src") + "\n") + elif compname.startswith("s"): + out.write(os.path.join(compdir, "src") + "\n") + + with open(os.path.join(confdir, "CIME_cppdefs"), "w") as out: + out.write("") + + config = Config.instance() + + # Build the component + if config.build_cime_component_lib: + safe_copy(os.path.join(confdir, "Filepath"), bldroot) + if os.path.exists(os.path.join(confdir, "CIME_cppdefs")): + safe_copy(os.path.join(confdir, "CIME_cppdefs"), bldroot) + elif os.path.exists(os.path.join(confdir, "CCSM_cppdefs")): + safe_copy(os.path.join(confdir, "CCSM_cppdefs"), bldroot) + run_gmake(case, compclass, compname, libroot, bldroot) + + +############################################################################### +def run_gmake(case, compclass, compname, libroot, bldroot, libname="", user_cppdefs=""): + ############################################################################### + gmake_args = get_standard_makefile_args(case) + + gmake_j = case.get_value("GMAKE_J") + gmake = case.get_value("GMAKE") + + complib = "" + if libname: + complib = os.path.join(libroot, "lib{}.a".format(libname)) + else: + complib = os.path.join(libroot, "lib{}.a".format(compclass)) + + makefile = os.path.join(case.get_value("CASETOOLS"), "Makefile") + + cmd = "{gmake} complib -j {gmake_j:d} COMP_CLASS={compclass} COMP_NAME={compname} COMPLIB={complib} {gmake_args} -f {makefile} -C {bldroot} ".format( + gmake=gmake, + gmake_j=gmake_j, + compclass=compclass, + compname=compname, + complib=complib, + gmake_args=gmake_args, + makefile=makefile, + bldroot=bldroot, + ) + if user_cppdefs: + cmd = cmd + "USER_CPPDEFS='{}'".format(user_cppdefs) + + stat, out, err = run_cmd(cmd, combine_output=True) + print(out) + if stat: + logger.info("buildlib stat={} err={}".format(stat, err)) + os.unlink(complib) + return stat diff --git a/CIME/buildnml.py b/CIME/buildnml.py new file mode 100644 index 00000000000..69cbc5f7dca --- /dev/null +++ b/CIME/buildnml.py @@ -0,0 +1,158 @@ +""" +common implementation for building namelist commands + +These are used by components///cime_config/buildnml +""" + +from CIME.XML.standard_module_setup import * +from CIME.utils import ( + expect, + parse_args_and_handle_standard_logging_options, + setup_standard_logging_options, +) +from CIME.utils import safe_copy +import sys, os, argparse, glob + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_input(argv): + ############################################################################### + + parser = argparse.ArgumentParser() + + setup_standard_logging_options(parser) + + parser.add_argument("caseroot", default=os.getcwd(), help="Case directory") + + args = parse_args_and_handle_standard_logging_options(argv, parser) + + return args.caseroot + + +############################################################################### +# pylint: disable=unused-argument +def build_xcpl_nml(case, caseroot, compname): + ############################################################################### + compclasses = case.get_values("COMP_CLASSES") + compclass = None + for compclass in compclasses: + if case.get_value("COMP_{}".format(compclass)) == compname: + break + expect( + compclass is not None, + "Could not identify compclass for compname {}".format(compname), + ) + rundir = case.get_value("RUNDIR") + comp_interface = case.get_value("COMP_INTERFACE") + + if comp_interface != "nuopc": + ninst = case.get_value("NINST_{}".format(compclass.upper())) + else: + ninst = case.get_value("NINST") + if not ninst: + ninst = 1 + + nx = case.get_value("{}_NX".format(compclass.upper())) + ny = case.get_value("{}_NY".format(compclass.upper())) + + if comp_interface != "nuopc": + if compname == "xrof": + flood_mode = case.get_value("XROF_FLOOD_MODE") + extras = [] + dtype = 1 + npes = 0 + length = 0 + if compname == "xatm": + if ny == 1: + dtype = 2 + extras = [ + ["24", "ncpl number of communications w/coupler per dat"], + ["0.0", "simul time proxy (secs): time between cpl comms"], + ] + elif compname == "xglc" or compname == "xice": + dtype = 2 + elif compname == "xlnd": + dtype = 11 + elif compname == "xocn": + dtype = 4 + elif compname == "xrof": + dtype = 11 + if flood_mode == "ACTIVE": + extras = [[".true.", "flood flag"]] + else: + extras = [[".false.", "flood flag"]] + + for i in range(1, ninst + 1): + # If only 1 file, name is 'compclass_in' + # otherwise files are 'compclass_in0001', 'compclass_in0002', etc + if ninst == 1: + filename = os.path.join(rundir, "{}_in".format(compname)) + else: + filename = os.path.join(rundir, "{}_in_{:04d}".format(compname, i)) + + with open(filename, "w") as infile: + infile.write("{:<20d} ! i-direction global dimension\n".format(nx)) + infile.write("{:<20d} ! j-direction global dimension\n".format(ny)) + if comp_interface != "nuopc": + infile.write( + "{:<20d} ! decomp_type 1=1d-by-lat, 2=1d-by-lon, 3=2d, 4=2d evensquare, 11=segmented\n".format( + dtype + ) + ) + infile.write("{:<20d} ! num of pes for i (type 3 only)\n".format(npes)) + infile.write( + "{:<20d} ! length of segments (type 4 only)\n".format(length) + ) + for extra in extras: + infile.write("{:<20s} ! {}\n".format(extra[0], extra[1])) + + +############################################################################### +def create_namelist_infile(case, user_nl_file, namelist_infile, infile_text=""): + ############################################################################### + lines_input = [] + if os.path.isfile(user_nl_file): + with open(user_nl_file, "r") as file_usernl: + lines_input = file_usernl.readlines() + else: + logger.warning( + "WARNING: No file {} found in case directory".format(user_nl_file) + ) + + lines_output = [] + lines_output.append("&comp_inparm \n") + if infile_text: + lines_output.append(infile_text) + logger.debug("file_infile {} ".format(infile_text)) + + for line in lines_input: + match1 = re.search(r"^[\&\/\!]", line) + match2 = re.search(r"\$([\w\_])+", line) + if match1 is None and match2 is not None: + line = case.get_resolved_value(line) + if match1 is None: + lines_output.append(line) + + lines_output.append("/ \n") + with open(namelist_infile, "w") as file_infile: + file_infile.write("\n".join(lines_output)) + + +def copy_inputs_to_rundir(caseroot, compname, confdir, rundir, inst_string): + + if os.path.isdir(rundir): + filename = compname + "_in" + file_src = os.path.join(confdir, filename) + file_dest = os.path.join(rundir, filename) + if inst_string: + file_dest += inst_string + safe_copy(file_src, file_dest) + + for xmlfile in glob.glob(os.path.join(confdir, "*streams*.xml")): + casexml = os.path.join(caseroot, os.path.basename(xmlfile)) + if os.path.exists(casexml): + logger.info("Using {} for {} streams".format(casexml, compname)) + safe_copy(casexml, rundir) + else: + safe_copy(xmlfile, rundir) diff --git a/CIME/case/README b/CIME/case/README new file mode 100644 index 00000000000..529c7f8cdd5 --- /dev/null +++ b/CIME/case/README @@ -0,0 +1 @@ +Files in this directory are members of the class Case defined in file case.py and should not be directly imported. diff --git a/scripts/lib/CIME/case/__init__.py b/CIME/case/__init__.py similarity index 100% rename from scripts/lib/CIME/case/__init__.py rename to CIME/case/__init__.py diff --git a/CIME/case/case.py b/CIME/case/case.py new file mode 100644 index 00000000000..6449b60ac9d --- /dev/null +++ b/CIME/case/case.py @@ -0,0 +1,2580 @@ +# -*- coding: utf-8 -*- +""" +Wrapper around all env XML for a case. + +All interaction with and between the module files in XML/ takes place +through the Case module. +""" +from copy import deepcopy +import sys +import glob, os, shutil, math, time, hashlib, socket, getpass +from CIME.XML.standard_module_setup import * + +# pylint: disable=import-error,redefined-builtin +from CIME import utils +from CIME.config import Config +from CIME.status import append_status +from CIME.utils import expect, get_cime_root +from CIME.utils import convert_to_type, get_model, set_model +from CIME.utils import get_project, get_charge_account, check_name +from CIME.utils import get_current_commit, safe_copy, get_cime_default_driver +from CIME.gitinterface import GitInterface +from CIME.locked_files import LOCKED_DIR, lock_file +from CIME.XML.machines import Machines +from CIME.XML.pes import Pes +from CIME.XML.files import Files +from CIME.XML.testlist import Testlist +from CIME.XML.component import Component +from CIME.XML.compsets import Compsets +from CIME.XML.grids import Grids +from CIME.XML.batch import Batch +from CIME.XML.workflow import Workflow +from CIME.XML.postprocessing import Postprocessing +from CIME.XML.pio import PIO +from CIME.XML.archive import Archive +from CIME.XML.env_test import EnvTest +from CIME.XML.env_mach_specific import EnvMachSpecific +from CIME.XML.env_case import EnvCase +from CIME.XML.env_mach_pes import EnvMachPes +from CIME.XML.env_build import EnvBuild +from CIME.XML.env_run import EnvRun +from CIME.XML.env_archive import EnvArchive +from CIME.XML.env_batch import EnvBatch +from CIME.XML.env_workflow import EnvWorkflow +from CIME.XML.env_postprocessing import EnvPostprocessing +from CIME.XML.generic_xml import GenericXML +from CIME.user_mod_support import apply_user_mods +from CIME.aprun import get_aprun_cmd_for_case + +logger = logging.getLogger(__name__) + +config = Config.instance() + + +class Case(object): + """ + https://github.com/ESMCI/cime/wiki/Developers-Introduction + The Case class is the heart of the CIME Case Control system. All + interactions with a Case take part through this class. All of the + variables used to create and manipulate a case are defined in xml + files and for every xml file there is a python class to interact + with that file. + + XML files which are part of the CIME distribution and are meant to + be readonly with respect to a case are typically named + config_something.xml and the corresponding python Class is + Something and can be found in file CIME.XML.something.py. I'll + refer to these as the CIME config classes. + + XML files which are part of a case and thus are read/write to a + case are typically named env_whatever.xml and the cooresponding + python modules are CIME.XML.env_whatever.py and classes are + EnvWhatever. I'll refer to these as the Case env classes. + + The Case Class includes an array of the Case env classes, in the + configure function and it's supporting functions defined below + the case object creates and manipulates the Case env classes + by reading and interpreting the CIME config classes. + + This class extends across multiple files, class members external to this file + are listed in the following imports + + """ + + from CIME.case.case_setup import case_setup, _create_case_repo + from CIME.case.case_clone import create_clone, _copy_user_modified_to_clone + from CIME.case.case_test import case_test + from CIME.case.case_submit import check_DA_settings, check_case, submit + from CIME.case.case_st_archive import ( + case_st_archive, + restore_from_archive, + archive_last_restarts, + test_st_archive, + test_env_archive, + ) + from CIME.case.case_run import case_run + from CIME.case.case_cmpgen_namelists import case_cmpgen_namelists + from CIME.case.preview_namelists import create_dirs, create_namelists + from CIME.case.check_input_data import ( + check_all_input_data, + stage_refcase, + check_input_data, + ) + + def __init__(self, case_root=None, read_only=True, record=False, non_local=False): + if case_root is None: + case_root = os.getcwd() + expect( + not os.path.isdir(case_root) + or os.path.isfile(os.path.join(case_root, "env_case.xml")), + "Directory {} does not appear to be a valid case directory".format( + case_root + ), + ) + self._existing_case = os.path.isdir(case_root) + + self._caseroot = case_root + logger.debug("Initializing Case.") + self._read_only_mode = True + self._force_read_only = read_only + self._primary_component = None + + self._env_entryid_files = [] + self._env_generic_files = [] + self._files = [] + self._comp_interface = None + self.gpu_enabled = False + self._non_local = non_local + self.read_xml() + + srcroot = self.get_value("SRCROOT") + + # Propagate `srcroot` to `GenericXML` to resolve $SRCROOT + if srcroot is not None: + utils.GLOBAL["SRCROOT"] = srcroot + + # srcroot may not be known yet, in the instance of creating + # a new case + customize_path = os.path.join(srcroot, "cime_config", "customize") + + config.load(customize_path) + + if record: + self.record_cmd() + + cimeroot = get_cime_root() + + # Insert tools path to support external code trying to import + # standard_script_setup + tools_path = os.path.join(cimeroot, "CIME", "Tools") + if tools_path not in sys.path: + sys.path.insert(0, tools_path) + + # Hold arbitary values. In create_newcase we may set values + # for xml files that haven't been created yet. We need a place + # to store them until we are ready to create the file. At file + # creation we get the values for those fields from this lookup + # table and then remove the entry. + self.lookups = {} + self.set_lookup_value("CIMEROOT", cimeroot) + self._cime_model = get_model() + self.set_lookup_value("MODEL", self._cime_model) + self._compsetname = None + self._gridname = None + self._pesfile = None + self._gridfile = None + self._components = [] + self._component_classes = [] + self._component_description = {} + self._is_env_loaded = False + self._loaded_envs = None + self._gitinterface = None + # these are user_mods as defined in the compset + # Command Line user_mods are handled seperately + + # Derived attributes + self.thread_count = None + self.total_tasks = None + self.tasks_per_node = None + self.ngpus_per_node = 0 + self.num_nodes = None + self.spare_nodes = None + self.tasks_per_numa = None + self.cores_per_task = None + self.srun_binding = None + self.async_io = False + self.iotasks = 0 + + # check if case has been configured and if so initialize derived + if self.get_value("CASEROOT") is not None: + if not self._non_local: + mach = self.get_value("MACH") + extra_machdir = self.get_value("EXTRA_MACHDIR") + if extra_machdir: + machobj = Machines(machine=mach, extra_machines_dir=extra_machdir) + else: + machobj = Machines(machine=mach) + + # This check should only be done on systems with a common filesystem but separate login nodes (ncar) + if "NCAR_HOST" in os.environ: + probed_machine = machobj.probe_machine_name() + if probed_machine: + expect( + mach == probed_machine, + f"Current machine {probed_machine} does not match case machine {mach}.", + ) + if os.path.exists(os.path.join(self.get_value("CASEROOT"), ".git")): + self._gitinterface = GitInterface( + self.get_value("CASEROOT"), logger + ) + + self.initialize_derived_attributes() + + def get_baseline_dir(self): + baseline_root = self.get_value("BASELINE_ROOT") + + baseline_name = self.get_value("BASECMP_CASE") + + return os.path.join(baseline_root, baseline_name) + + def check_if_comp_var(self, vid): + for env_file in self._env_entryid_files: + new_vid, new_comp, iscompvar = env_file.check_if_comp_var(vid) + if iscompvar: + return new_vid, new_comp, iscompvar + + return vid, None, False + + def initialize_derived_attributes(self): + """ + These are derived variables which can be used in the config_* files + for variable substitution using the {{ var }} syntax + """ + set_model(self.get_value("MODEL")) + env_mach_pes = self.get_env("mach_pes") + env_mach_spec = self.get_env("mach_specific") + comp_classes = self.get_values("COMP_CLASSES") + max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") + self.async_io = {} + asyncio = False + for comp in comp_classes: + self.async_io[comp] = self.get_value("PIO_ASYNC_INTERFACE", subgroup=comp) + if self.async_io[comp]: + asyncio = True + + self.iotasks = ( + self.get_value("PIO_ASYNCIO_NTASKS") + if self.get_value("PIO_ASYNCIO_NTASKS") + else 0 + ) + + self.thread_count = env_mach_pes.get_max_thread_count(comp_classes) + + mpi_attribs = { + "compiler": self.get_value("COMPILER"), + "mpilib": self.get_value("MPILIB"), + "threaded": self.get_build_threaded(), + } + + job = self.get_primary_job() + executable = env_mach_spec.get_mpirun(self, mpi_attribs, job, exe_only=True)[0] + if executable is not None and "aprun" in executable: + ( + _, + self.num_nodes, + self.total_tasks, + self.tasks_per_node, + self.thread_count, + ) = get_aprun_cmd_for_case(self, "e3sm.exe") + self.spare_nodes = env_mach_pes.get_spare_nodes(self.num_nodes) + self.num_nodes += self.spare_nodes + else: + self.total_tasks = env_mach_pes.get_total_tasks(comp_classes, asyncio) + self.tasks_per_node = env_mach_pes.get_tasks_per_node( + self.total_tasks, self.thread_count + ) + self.num_nodes, self.spare_nodes = env_mach_pes.get_total_nodes( + self.total_tasks, self.thread_count + ) + + self.num_nodes += self.spare_nodes + + logger.debug( + "total_tasks {} thread_count {}".format(self.total_tasks, self.thread_count) + ) + + max_gpus_per_node = self.get_value("MAX_GPUS_PER_NODE") + + if max_gpus_per_node: + self.ngpus_per_node = self.get_value("NGPUS_PER_NODE") + # update the maximum MPI tasks for a GPU node (could differ from a pure-CPU node) + if self.ngpus_per_node > 0: + max_mpitasks_per_node = self.get_value("MAX_CPUTASKS_PER_GPU_NODE") + + self.tasks_per_numa = int(math.ceil(self.tasks_per_node / 2.0)) + smt_factor = max( + 1, int(self.get_value("MAX_TASKS_PER_NODE") / max_mpitasks_per_node) + ) + + threads_per_node = self.tasks_per_node * self.thread_count + threads_per_core = ( + 1 if (threads_per_node <= max_mpitasks_per_node) else smt_factor + ) + self.cores_per_task = self.thread_count / threads_per_core + + os.environ["OMP_NUM_THREADS"] = str(self.thread_count) + + self.srun_binding = math.floor( + smt_factor * max_mpitasks_per_node / self.tasks_per_node + ) + self.srun_binding = max(1, int(self.srun_binding)) + + # Define __enter__ and __exit__ so that we can use this as a context manager + # and force a flush on exit. + def __enter__(self): + if not self._force_read_only: + self._read_only_mode = False + return self + + def __exit__(self, *_): + self.flush() + self._read_only_mode = True + return False + + def read_xml(self): + for env_file in self._files: + expect( + not env_file.needsrewrite, + "Potential loss of unflushed changes in {}".format(env_file.filename), + ) + + self._env_entryid_files = [] + self._env_entryid_files.append( + EnvCase(self._caseroot, components=None, read_only=self._force_read_only) + ) + components = self._env_entryid_files[0].get_values("COMP_CLASSES") + self._env_entryid_files.append( + EnvRun( + self._caseroot, components=components, read_only=self._force_read_only + ) + ) + self._env_entryid_files.append( + EnvBuild( + self._caseroot, components=components, read_only=self._force_read_only + ) + ) + self._comp_interface = self._env_entryid_files[-1].get_value("COMP_INTERFACE") + + self._env_entryid_files.append( + EnvMachPes( + self._caseroot, + components=components, + read_only=self._force_read_only, + comp_interface=self._comp_interface, + ) + ) + self._env_entryid_files.append( + EnvBatch(self._caseroot, read_only=self._force_read_only) + ) + self._env_entryid_files.append( + EnvWorkflow(self._caseroot, read_only=self._force_read_only) + ) + if not self._existing_case or os.path.isfile("env_postprocessing.xml"): + self._env_entryid_files.append( + EnvPostprocessing(self._caseroot, read_only=self._force_read_only) + ) + + if os.path.isfile(os.path.join(self._caseroot, "env_test.xml")): + self._env_entryid_files.append( + EnvTest( + self._caseroot, + components=components, + read_only=self._force_read_only, + ) + ) + self._env_generic_files = [] + self._env_generic_files.append( + EnvMachSpecific( + self._caseroot, + read_only=self._force_read_only, + comp_interface=self._comp_interface, + ) + ) + self._env_generic_files.append( + EnvArchive(self._caseroot, read_only=self._force_read_only) + ) + self._files = self._env_entryid_files + self._env_generic_files + + def get_case_root(self): + """Returns the root directory for this case.""" + return self._caseroot + + def get_env(self, short_name, allow_missing=False): + full_name = "env_{}.xml".format(short_name) + for env_file in self._files: + if os.path.basename(env_file.filename) == full_name: + return env_file + if allow_missing: + return None + expect(False, "Could not find object for {} in case".format(full_name)) + + def check_timestamps(self, short_name=None): + if short_name is not None: + env_file = self.get_env(short_name) + env_file.check_timestamp() + else: + for env_file in self._files: + env_file.check_timestamp() + + def copy(self, newcasename, newcaseroot, newcimeroot=None, newsrcroot=None): + newcase = deepcopy(self) + for env_file in newcase._files: # pylint: disable=protected-access + basename = os.path.basename(env_file.filename) + newfile = os.path.join(newcaseroot, basename) + env_file.change_file(newfile, copy=True) + + if newcimeroot is not None: + newcase.set_value("CIMEROOT", newcimeroot) + + if newsrcroot is not None: + newcase.set_value("SRCROOT", newsrcroot) + + newcase.set_value("CASE", newcasename) + newcase.set_value("CASEROOT", newcaseroot) + newcase.set_value("CONTINUE_RUN", "FALSE") + newcase.set_value("RESUBMIT", 0) + newcase.set_value("CASE_HASH", newcase.new_hash()) + + # Important, and subtle: Writability should NOT be copied because + # this allows the copy to be modified without needing a "with" statement + # which opens the door to tricky errors such as unflushed writes. + newcase._read_only_mode = True # pylint: disable=protected-access + + return newcase + + def flush(self, flushall=False): + if not os.path.isdir(self._caseroot): + # do not flush if caseroot wasnt created + return + + _postprocessing_spec_file = self.get_value("POSTPROCESSING_SPEC_FILE") + if _postprocessing_spec_file is not None: + have_postprocessing = os.path.isfile(_postprocessing_spec_file) + else: + have_postprocessing = False + if not have_postprocessing: + # Remove env_postprocessing.xml from self._files + self._files = [ + file + for file in self._files + if file.get_id() != "env_postprocessing.xml" + ] + + for env_file in self._files: + env_file.write(force_write=flushall) + + def get_values(self, item, attribute=None, resolved=True, subgroup=None): + for env_file in self._files: + # Wait and resolve in self rather than in env_file + results = env_file.get_values( + item, attribute, resolved=False, subgroup=subgroup + ) + if len(results) > 0: + new_results = [] + if resolved: + for result in results: + if isinstance(result, str): + result = self.get_resolved_value(result) + vtype = env_file.get_type_info(item) + if vtype is not None or vtype != "char": + result = convert_to_type(result, vtype, item) + + new_results.append(result) + + else: + new_results.append(result) + + else: + new_results = results + + return new_results + + # Return empty result + return [] + + def get_value(self, item, attribute=None, resolved=True, subgroup=None): + if item == "GPU_ENABLED": + if not self.gpu_enabled: + if ( + self.get_value("GPU_TYPE") != "none" + and self.get_value("NGPUS_PER_NODE") > 0 + ): + self.gpu_enabled = True + return "true" if self.gpu_enabled else "false" + + result = None + for env_file in self._files: + # Wait and resolve in self rather than in env_file + result = env_file.get_value( + item, attribute, resolved=False, subgroup=subgroup + ) + + if result is not None: + if resolved and isinstance(result, str): + result = self.get_resolved_value(result, subgroup=subgroup) + vtype = env_file.get_type_info(item) + if vtype is not None and vtype != "char": + result = convert_to_type(result, vtype, item) + + return result + + # Return empty result + return result + + def get_record_fields(self, variable, field): + """get_record_fields gets individual requested field from an entry_id file + this routine is used only by xmlquery""" + # Empty result + result = [] + + for env_file in self._env_entryid_files: + # Wait and resolve in self rather than in env_file + logger.debug( + "(get_record_field) Searching in {}".format(env_file.__class__.__name__) + ) + if field == "varid": + roots = env_file.scan_children("entry") + else: + roots = env_file.get_nodes_by_id(variable) + + for root in roots: + if root is not None: + if field == "raw": + result.append(env_file.get_raw_record(root)) + elif field == "desc": + result.append(env_file.get_description(root)) + elif field == "varid": + result.append(env_file.get(root, "id")) + elif field == "group": + result.extend(env_file.get_groups(root)) + elif field == "valid_values": + # pylint: disable=protected-access + vv = env_file._get_valid_values(root) + if vv: + result.extend(vv) + elif field == "file": + result.append(env_file.filename) + + if not result: + for env_file in self._env_generic_files: + roots = env_file.scan_children(variable) + for root in roots: + if root is not None: + if field == "raw": + result.append(env_file.get_raw_record(root)) + elif field == "group": + result.extend(env_file.get_groups(root)) + elif field == "file": + result.append(env_file.filename) + + return list(set(result)) + + def get_type_info(self, item): + result = None + for env_file in self._env_entryid_files: + result = env_file.get_type_info(item) + if result is not None: + return result + + return result + + def get_resolved_value( + self, item, recurse=0, allow_unresolved_envvars=False, subgroup=None + ): + num_unresolved = item.count("$") if item else 0 + recurse_limit = 10 + if num_unresolved > 0 and recurse < recurse_limit: + for env_file in self._env_entryid_files: + item = env_file.get_resolved_value( + item, + allow_unresolved_envvars=allow_unresolved_envvars, + subgroup=subgroup, + ) + if "$" not in item: + return item + else: + item = self.get_resolved_value( + item, + recurse=recurse + 1, + allow_unresolved_envvars=allow_unresolved_envvars, + subgroup=subgroup, + ) + + return item + + def set_value( + self, + item, + value, + subgroup=None, + ignore_type=False, + allow_undefined=False, + return_file=False, + ): + """ + If a file has been defined, and the variable is in the file, + then that value will be set in the file object and the resovled value + is returned unless return_file is True, in which case (resolved_value, filename) + is returned where filename is the name of the modified file. + """ + expect( + not self._read_only_mode, + "Cannot modify case, read_only. " + "Case must be opened with read_only=False and can only be modified within a context manager", + ) + + if item == "CASEROOT": + self._caseroot = value + result = None + + for env_file in self._files: + result = env_file.set_value(item, value, subgroup, ignore_type) + if result is not None: + logger.debug("Will rewrite file {} {}".format(env_file.filename, item)) + return (result, env_file.filename) if return_file else result + + if len(self._files) == 1: + expect( + allow_undefined or result is not None, + "No variable {} found in file {}".format(item, self._files[0].filename), + ) + else: + expect( + allow_undefined or result is not None, + "No variable {} found in case".format(item), + ) + + def set_valid_values(self, item, valid_values): + """ + Update or create a valid_values entry for item and populate it + """ + expect( + not self._read_only_mode, + "Cannot modify case, read_only. " + "Case must be opened with read_only=False and can only be modified within a context manager", + ) + + result = None + for env_file in self._env_entryid_files: + result = env_file.set_valid_values(item, valid_values) + if result is not None: + logger.debug("Will rewrite file {} {}".format(env_file.filename, item)) + return result + + def set_lookup_value(self, item, value): + if item in self.lookups and self.lookups[item] is not None: + logger.warning( + "Item {} already in lookups with value {}".format( + item, self.lookups[item] + ) + ) + else: + logger.debug("Setting in lookups: item {}, value {}".format(item, value)) + self.lookups[item] = value + + def clean_up_lookups(self, allow_undefined=False): + # put anything in the lookups table into existing env objects + for key, value in list(self.lookups.items()): + logger.debug("lookup key {} value {}".format(key, value)) + result = self.set_value(key, value, allow_undefined=allow_undefined) + if result is not None: + del self.lookups[key] + + def _set_compset(self, compset_name, files): + """ + Loop through all the compset files and find the compset + specifation file that matches either the input 'compset_name'. + Note that the input compset name (i.e. compset_name) can be + either a longname or an alias. This will set various compset-related + info. + + Returns a tuple: (compset_alias, science_support, component_defining_compset) + (For a user-defined compset - i.e., a compset without an alias - these + return values will be None, [], None.) + """ + science_support = [] + compset_alias = None + components = files.get_components("COMPSETS_SPEC_FILE") + logger.debug( + " Possible components for COMPSETS_SPEC_FILE are {}".format(components) + ) + + self.set_lookup_value("COMP_INTERFACE", self._comp_interface) + if config.set_comp_root_dir_cpl: + if config.use_nems_comp_root_dir: + ufs_driver = os.environ.get("UFS_DRIVER") + attribute = None + if ufs_driver: + attribute = {"component": "nems"} + comp_root_dir_cpl = files.get_value( + "COMP_ROOT_DIR_CPL", attribute=attribute + ) + else: + comp_root_dir_cpl = files.get_value("COMP_ROOT_DIR_CPL") + + self.set_lookup_value("COMP_ROOT_DIR_CPL", comp_root_dir_cpl) + + # Loop through all of the files listed in COMPSETS_SPEC_FILE and find the file + # that has a match for either the alias or the longname in that order + for component in components: + # Determine the compsets file for this component + compsets_filename = files.get_value( + "COMPSETS_SPEC_FILE", {"component": component} + ) + + # If the file exists, read it and see if there is a match for the compset alias or longname + if os.path.isfile(compsets_filename): + compsets = Compsets(compsets_filename) + match, compset_alias, science_support = compsets.get_compset_match( + name=compset_name + ) + if match is not None: + self._compsetname = match + logger.info("Compset longname is {}".format(match)) + logger.info( + "Compset specification file is {}".format(compsets_filename) + ) + break + + if compset_alias is None: + logger.info( + "Did not find an alias or longname compset match for {} ".format( + compset_name + ) + ) + self._compsetname = compset_name + + # Fill in compset name + self._compsetname, self._components = self.valid_compset( + self._compsetname, compset_alias, files + ) + + # if this is a valiid compset longname there will be at least 7 components. + components = self.get_compset_components() + expect( + len(components) > 6, + "No compset alias {} found and this does not appear to be a compset longname.".format( + compset_name + ), + ) + + return compset_alias, science_support + + def get_primary_component(self): + if self._primary_component is None: + self._primary_component = self._find_primary_component() + return self._primary_component + + def _find_primary_component(self): + """ + try to glean the primary component based on compset name + """ + progcomps = {} + spec = {} + primary_component = None + for comp in self._component_classes: + if comp == "CPL": + continue + spec[comp] = self.get_value("COMP_{}".format(comp)) + notprogcomps = ("D{}".format(comp), "X{}".format(comp), "S{}".format(comp)) + if spec[comp].upper() in notprogcomps: + progcomps[comp] = False + else: + progcomps[comp] = True + expect( + "ATM" in progcomps + and "LND" in progcomps + and "OCN" in progcomps + and "ICE" in progcomps, + " Not finding expected components in {}".format(self._component_classes), + ) + if ( + progcomps["ATM"] + and progcomps["LND"] + and progcomps["OCN"] + and progcomps["ICE"] + ): + primary_component = "allactive" + elif progcomps["LND"] and progcomps["OCN"] and progcomps["ICE"]: + # this is a "J" compset + primary_component = "allactive" + elif progcomps["ATM"] and progcomps["OCN"] and progcomps["ICE"]: + # this is a ufs s2s compset + primary_component = "allactive" + elif progcomps["ATM"]: + if "DOCN%SOM" in self._compsetname and progcomps["LND"]: + # This is an "E" compset + primary_component = "allactive" + else: + # This is an "F" or "Q" compset + primary_component = spec["ATM"] + elif progcomps["LND"]: + # This is an "I" compset + primary_component = spec["LND"] + elif progcomps["OCN"]: + # This is a "C" or "G" compset + primary_component = spec["OCN"] + elif progcomps["ICE"]: + # This is a "D" compset + primary_component = spec["ICE"] + elif "GLC" in progcomps and progcomps["GLC"]: + # This is a "TG" compset + primary_component = spec["GLC"] + elif progcomps["ROF"]: + # This is a "R" compset + primary_component = spec["ROF"] + elif progcomps["WAV"]: + # This is a "V" compset + primary_component = spec["WAV"] + else: + # This is "A", "X" or "S" + primary_component = "drv" + + return primary_component + + def _valid_compset_impl(self, compset_name, compset_alias, comp_classes, comp_hash): + """Add stub models missing in , return full compset name. + is a list of all supported component classes. + is a dictionary where each key is a supported component + (e.g., datm) and the associated value is the index in of + that component's class (e.g., 1 for atm). + >>> import os, shutil, tempfile + >>> workdir = tempfile.mkdtemp() + >>> caseroot = os.path.join(workdir, 'caseroot') # use non-existent caseroot to avoid error about not being a valid case directory in Case __init__ method + >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) + ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) + ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('atm:DATM%NYF_rof:DROF%NYF_scn:2000_ice:DICE%SSMI_ocn:DOCN%DOM', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) + ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DATM%NYF_DICE%SSMI_DOCN%DOM_DROF%NYF', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) + ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DICE%SSMI_DOCN%DOM_DATM%NYF_DROF%NYF', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) + ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DICE%SSMI_DOCN%DOM_DATM%NYF_DROF%NYF_TEST', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) + ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP_TEST', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('1850_CAM60_CLM50%BGC-CROP_CICE_POP2%ECO%ABIO-DIC_MOSART_CISM2%NOEVOLVE_WW3_BGC%BDRD', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1, 'cam':1,'dlnd':2,'clm':2,'slnd':2,'cice':3,'dice':3,'sice':3,'pop':4,'docn':4,'socn':4,'mosart':5,'drof':5,'srof':5,'cism':6,'sglc':6,'ww':7,'swav':7,'ww3':7,'sesp':8}) + ('1850_CAM60_CLM50%BGC-CROP_CICE_POP2%ECO%ABIO-DIC_MOSART_CISM2%NOEVOLVE_WW3_SESP_BGC%BDRD', ['1850', 'CAM60', 'CLM50%BGC-CROP', 'CICE', 'POP2%ECO%ABIO-DIC', 'MOSART', 'CISM2%NOEVOLVE', 'WW3', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('1850_CAM60_CLM50%BGC-CROP_CICE_POP2%ECO%ABIO-DIC_MOSART_CISM2%NOEVOLVE_WW3_BGC%BDRD_TEST', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'IAC', 'ESP'], {'datm':1,'satm':1, 'cam':1,'dlnd':2,'clm':2,'slnd':2,'cice':3,'dice':3,'sice':3,'pop':4,'docn':4,'socn':4,'mosart':5,'drof':5,'srof':5,'cism':6,'sglc':6,'ww':7,'swav':7,'ww3':7,'sesp':8}) + ('1850_CAM60_CLM50%BGC-CROP_CICE_POP2%ECO%ABIO-DIC_MOSART_CISM2%NOEVOLVE_WW3_SIAC_SESP_BGC%BDRD_TEST', ['1850', 'CAM60', 'CLM50%BGC-CROP', 'CICE', 'POP2%ECO%ABIO-DIC', 'MOSART', 'CISM2%NOEVOLVE', 'WW3', 'SIAC', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('1850_SATM_SLND_SICE_SOCN_SGLC_SWAV', 'S', ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'IAC', 'ESP'], {'datm':1,'satm':1, 'cam':1,'dlnd':2,'clm':2,'slnd':2,'cice':3,'dice':3,'sice':3,'pop':4,'docn':4,'socn':4,'mosart':5,'drof':5,'srof':5,'cism':6,'sglc':6,'ww':7,'swav':7,'ww3':7,'sesp':8}) + ('1850_SATM_SLND_SICE_SOCN_SROF_SGLC_SWAV_SIAC_SESP', ['1850', 'SATM', 'SLND', 'SICE', 'SOCN', 'SROF', 'SGLC', 'SWAV', 'SIAC', 'SESP']) + + >>> Case(caseroot, read_only=False)._valid_compset_impl('1850_SATM_SLND_SICE_SOCN_SGLC_SWAV', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'IAC', 'ESP'], {'datm':1,'satm':1, 'cam':1,'dlnd':2,'clm':2,'slnd':2,'cice':3,'dice':3,'sice':3,'pop':4,'docn':4,'socn':4,'mosart':5,'drof':5,'srof':5,'cism':6,'sglc':6,'ww':7,'swav':7,'ww3':7,'sesp':8}) #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + CIMEError: ERROR: Invalid compset name, 1850_SATM_SLND_SICE_SOCN_SGLC_SWAV, all stub components generated + >>> shutil.rmtree(workdir, ignore_errors=True) + """ + # Find the models declared in the compset + model_set = [None] * len(comp_classes) + components = compset_name.split("_") + noncomps = [] + allstubs = True + colonformat = ":" in compset_name + if colonformat: + # make sure that scn: is component[0] as expected + for i in range(1, len(components)): + if components[i].startswith("scn:"): + tmp = components[0] + components[0] = components[i] + components[i] = tmp + break + + model_set[0] = components[0][4:] + else: + model_set[0] = components[0] + + for model in components[1:]: + match = Case.__mod_match_re__.match(model.lower()) + expect(match is not None, "No model match for {}".format(model)) + mod_match = match.group(1) + # Check for noncomponent appends (BGC & TEST) + if mod_match in ("bgc", "test"): + noncomps.append(model) + elif ":" in mod_match: + comp_ind = comp_hash[mod_match[4:]] + model_set[comp_ind] = model + else: + expect(mod_match in comp_hash, "Unknown model type, {}".format(model)) + comp_ind = comp_hash[mod_match] + model_set[comp_ind] = model + + # Fill in missing components with stubs + for comp_ind in range(1, len(model_set)): + if model_set[comp_ind] is None: + comp_class = comp_classes[comp_ind] + stub = "S" + comp_class + logger.info("Automatically adding {} to compset".format(stub)) + model_set[comp_ind] = stub + elif ":" in model_set[comp_ind]: + model_set[comp_ind] = model_set[comp_ind][4:] + + if model_set[comp_ind][0] != "S": + allstubs = False + + expect( + (compset_alias is not None) or (not allstubs), + "Invalid compset name, {}, all stub components generated".format( + compset_name + ), + ) + # Return the completed compset + compsetname = "_".join(model_set) + for noncomp in noncomps: + compsetname = compsetname + "_" + noncomp + return compsetname, model_set + + # RE to match component type name without optional piece (stuff after %). + # Drop any trailing digits (e.g., the 60 in CAM60) to ensure match + # Note, this will also drop trailing digits such as in ww3 but since it + # is handled consistenly, this should not affect functionality. + # Note: interstitial digits are included (e.g., in FV3GFS). + __mod_match_re__ = re.compile(r"([^%]*[^0-9%]+)") + + def valid_compset(self, compset_name, compset_alias, files): + """Add stub models missing in , return full compset name. + is used to collect set of all supported components. + """ + # First, create hash of model names + # A note about indexing. Relevant component classes start at 1 + # because we ignore CPL for finding model components. + # Model components would normally start at zero but since we are + # dealing with a compset, 0 is reserved for the time field + drv_config_file = files.get_value("CONFIG_CPL_FILE") + drv_comp = Component(drv_config_file, "CPL") + comp_classes = drv_comp.get_valid_model_components() + comp_hash = {} # Hash model name to component class index + for comp_ind in range(1, len(comp_classes)): + comp = comp_classes[comp_ind] + # Find list of models for component class + # List can be in different locations, check CONFIG_XXX_FILE + node_name = "CONFIG_{}_FILE".format(comp) + models = files.get_components(node_name) + if (models is None) or (None in models): + # Backup, check COMP_ROOT_DIR_XXX + node_name = "COMP_ROOT_DIR_" + comp + models = files.get_components(node_name) + + expect( + (models is not None) and (None not in models), + "Unable to find list of supported components", + ) + + for model in models: + mod_match = Case.__mod_match_re__.match(model.lower()).group(1) + comp_hash[mod_match] = comp_ind + + return self._valid_compset_impl( + compset_name, compset_alias, comp_classes, comp_hash + ) + + def _set_info_from_primary_component(self, files, pesfile=None): + """ + Sets file and directory paths that depend on the primary component of + this compset. + + Assumes that self._primary_component has already been set. + """ + component = self.get_primary_component() + + compset_spec_file = files.get_value( + "COMPSETS_SPEC_FILE", {"component": component}, resolved=False + ) + + self.set_lookup_value("COMPSETS_SPEC_FILE", compset_spec_file) + if pesfile is None: + self._pesfile = files.get_value("PES_SPEC_FILE", {"component": component}) + pesfile_unresolved = files.get_value( + "PES_SPEC_FILE", {"component": component}, resolved=False + ) + logger.info("Pes specification file is {}".format(self._pesfile)) + else: + self._pesfile = pesfile + pesfile_unresolved = pesfile + expect( + self._pesfile is not None, + "No pesfile found for component {}".format(component), + ) + + self.set_lookup_value("PES_SPEC_FILE", pesfile_unresolved) + + tests_filename = files.get_value( + "TESTS_SPEC_FILE", {"component": component}, resolved=False + ) + tests_mods_dir = files.get_value( + "TESTS_MODS_DIR", {"component": component}, resolved=False + ) + user_mods_dir = files.get_value( + "USER_MODS_DIR", {"component": component}, resolved=False + ) + self.set_lookup_value("TESTS_SPEC_FILE", tests_filename) + self.set_lookup_value("TESTS_MODS_DIR", tests_mods_dir) + self.set_lookup_value("USER_MODS_DIR", user_mods_dir) + + def get_compset_components(self): + # If are doing a create_clone then, self._compsetname is not set yet + components = [] + compset = self.get_value("COMPSET") + if compset is None: + compset = self._compsetname + expect(compset is not None, "compset is not set") + # the first element is always the date operator - skip it + elements = compset.split("_")[1:] # pylint: disable=maybe-no-member + for element in elements: + if ":" in element: + element = element[4:] + # ignore the possible BGC or TEST modifier + if element.upper().startswith("BGC%") or element.upper().startswith("TEST"): + continue + else: + element_component = element.split("%")[0].lower() + if ( + "ww" not in element_component + and "fv3" not in element_component + and "cice" not in element_component + ): + element_component = re.sub(r"[0-9]*", "", element_component) + components.append(element_component) + return components + + def __iter__(self): + for entryid_file in self._env_entryid_files: + for key, val in entryid_file: + if isinstance(val, str) and "$" in val: + yield key, self.get_resolved_value(val) + else: + yield key, val + + def set_comp_classes(self, comp_classes): + self._component_classes = comp_classes + for env_file in self._env_entryid_files: + env_file.set_components(comp_classes) + + def _get_component_config_data(self, files): + # attributes used for multi valued defaults + # attlist is a dictionary used to determine the value element that has the most matches + attlist = { + "compset": self._compsetname, + "grid": self._gridname, + "cime_model": self._cime_model, + } + + # Determine list of component classes that this coupler/driver knows how + # to deal with. This list follows the same order as compset longnames follow. + + # Add the group and elements for the config_files.xml + for env_file in self._env_entryid_files: + env_file.add_elements_by_group(files, attlist) + + drv_config_file = files.get_value("CONFIG_CPL_FILE") + drv_comp = Component(drv_config_file, "CPL") + for env_file in self._env_entryid_files: + env_file.add_elements_by_group(drv_comp, attributes=attlist) + + drv_config_file_model_specific = files.get_value( + "CONFIG_CPL_FILE_MODEL_SPECIFIC" + ) + expect( + os.path.isfile(drv_config_file_model_specific), + "No {} specific file found for driver {}".format( + get_model(), self._comp_interface + ), + ) + drv_comp_model_specific = Component(drv_config_file_model_specific, "CPL") + + self._component_description[ + "forcing" + ] = drv_comp_model_specific.get_forcing_description(self._compsetname) + logger.info( + "Compset forcing is {}".format(self._component_description["forcing"]) + ) + self._component_description["CPL"] = drv_comp_model_specific.get_description( + self._compsetname + ) + if len(self._component_description["CPL"]) > 0: + logger.info("Com forcing is {}".format(self._component_description["CPL"])) + for env_file in self._env_entryid_files: + env_file.add_elements_by_group(drv_comp_model_specific, attributes=attlist) + + self.clean_up_lookups(allow_undefined=True) + + # loop over all elements of both component_classes and components - and get config_component_file for + # for each component + self.set_comp_classes(drv_comp.get_valid_model_components()) + + # will need a change here for new cpl components + root_dir_node_name = "COMP_ROOT_DIR_CPL" + comp_root_dir = files.get_value( + root_dir_node_name, {"component": self._comp_interface}, resolved=False + ) + + if comp_root_dir is not None: + self.set_value(root_dir_node_name, comp_root_dir) + + for i in range(1, len(self._component_classes)): + comp_class = self._component_classes[i] + comp_name = self._components[i - 1] + if ":" in comp_name: + comp_name = comp_name[4:] + root_dir_node_name = "COMP_ROOT_DIR_" + comp_class + node_name = "CONFIG_" + comp_class + "_FILE" + compatt = {"component": comp_name} + comp_root_dir = files.get_value(root_dir_node_name, compatt, resolved=False) + if comp_root_dir is not None: + self.set_value(root_dir_node_name, comp_root_dir) + + # Add the group and elements for the config_files.xml + + comp_config_file = files.get_value(node_name, compatt, resolved=False) + expect( + comp_config_file is not None, + "No component {} found for class {}".format(comp_name, comp_class), + ) + self.set_value(node_name, comp_config_file) + comp_config_file = files.get_value(node_name, compatt) + + expect( + comp_config_file is not None and os.path.isfile(comp_config_file), + "Config file {} for component {} not found.".format( + comp_config_file, comp_name + ), + ) + compobj = Component(comp_config_file, comp_class) + # For files following version 3 schema this also checks the compsetname validity + + self._component_description[comp_class] = compobj.get_description( + self._compsetname + ) + expect( + self._component_description[comp_class] is not None, + "No description found in file {} for component {} in comp_class {}".format( + comp_config_file, comp_name, comp_class + ), + ) + logger.info( + "{} component is {}".format( + comp_class, self._component_description[comp_class] + ) + ) + for env_file in self._env_entryid_files: + env_file.add_elements_by_group(compobj, attributes=attlist) + self.clean_up_lookups(allow_undefined=self._comp_interface == "nuopc") + + def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): + # -------------------------------------------- + # pe layout + # -------------------------------------------- + mach_pes_obj = None + # self._pesfile may already be env_mach_pes.xml if so we can just return + gfile = GenericXML(infile=self._pesfile) + ftype = gfile.get_id() + expect( + ftype == "env_mach_pes.xml" or ftype == "config_pes", + " Do not recognize {} as a valid CIME pes file {}".format( + self._pesfile, ftype + ), + ) + if ftype == "env_mach_pes.xml": + new_mach_pes_obj = EnvMachPes( + infile=self._pesfile, + components=self._component_classes, + comp_interface=self._comp_interface, + ) + self.update_env(new_mach_pes_obj, "mach_pes", blow_away=True) + return new_mach_pes_obj.get_value("TOTALPES") + + pesobj = Pes(self._pesfile) + + match1 = re.match("(.+)x([0-9]+)", "" if pecount is None else pecount) + match2 = re.match("([0-9]+)", "" if pecount is None else pecount) + + pes_ntasks = {} + pes_nthrds = {} + pes_rootpe = {} + pes_pstrid = {} + other = {} + append = {} + comment = None + force_tasks = None + force_thrds = None + if match1: + opti_tasks = match1.group(1) + if opti_tasks.isdigit(): + force_tasks = int(opti_tasks) + else: + pes_ntasks = pesobj.find_pes_layout( + self._gridname, + self._compsetname, + machine_name, + pesize_opts=opti_tasks, + mpilib=mpilib, + )[0] + force_thrds = int(match1.group(2)) + elif match2: + force_tasks = int(match2.group(1)) + pes_nthrds = pesobj.find_pes_layout( + self._gridname, self._compsetname, machine_name, mpilib=mpilib + )[1] + else: + ( + pes_ntasks, + pes_nthrds, + pes_rootpe, + pes_pstrid, + other, + append, + comment, + ) = pesobj.find_pes_layout( + self._gridname, + self._compsetname, + machine_name, + pesize_opts=pecount, + mpilib=mpilib, + ) + + if match1 or match2: + for component_class in self._component_classes: + if force_tasks is not None: + string_ = "NTASKS_" + component_class + pes_ntasks[string_] = force_tasks + + if force_thrds is not None: + string_ = "NTHRDS_" + component_class + pes_nthrds[string_] = force_thrds + + # Always default to zero rootpe if user forced procs and or threads + string_ = "ROOTPE_" + component_class + pes_rootpe[string_] = 0 + + mach_pes_obj = self.get_env("mach_pes") + mach_pes_obj.add_comment(comment) + + if other is not None: + logger.info( + "setting additional fields from config_pes: {}, append {}".format( + other, append + ) + ) + for key, value in list(other.items()): + ovalue = "" + if ( + value.startswith('"') + and value.endswith('"') + or value.startswith("'") + and value.endswith("'") + ): + value = value[1:-1] + if key in append and append[key]: + ovalue = self.get_value(key) + + self.set_value(key, value + " " + ovalue) + + totaltasks = [] + for comp_class in self._component_classes: + ntasks_str = "NTASKS_{}".format(comp_class) + nthrds_str = "NTHRDS_{}".format(comp_class) + rootpe_str = "ROOTPE_{}".format(comp_class) + pstrid_str = "PSTRID_{}".format(comp_class) + + ntasks = pes_ntasks[ntasks_str] if ntasks_str in pes_ntasks else 1 + nthrds = pes_nthrds[nthrds_str] if nthrds_str in pes_nthrds else 1 + rootpe = pes_rootpe[rootpe_str] if rootpe_str in pes_rootpe else 0 + pstrid = pes_pstrid[pstrid_str] if pstrid_str in pes_pstrid else 1 + + totaltasks.append((ntasks + rootpe) * nthrds) + mach_pes_obj.set_value(ntasks_str, ntasks) + mach_pes_obj.set_value(nthrds_str, nthrds) + mach_pes_obj.set_value(rootpe_str, rootpe) + mach_pes_obj.set_value(pstrid_str, pstrid) + + # Make sure that every component has been accounted for + # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. + for compclass in self._component_classes: + key = "NINST_{}".format(compclass) + if compclass == "CPL": + continue + mach_pes_obj.set_value(key, ninst) + + key = "NTASKS_{}".format(compclass) + if key not in pes_ntasks: + mach_pes_obj.set_value(key, 1) + + key = "NTHRDS_{}".format(compclass) + if key not in pes_nthrds: + mach_pes_obj.set_value(key, 1) + + if multi_driver: + mach_pes_obj.set_value("MULTI_DRIVER", True) + + def configure( + self, + compset_name, + grid_name, + machine_name=None, + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ): + expect( + check_name(compset_name, additional_chars="."), + "Invalid compset name {}".format(compset_name), + ) + + self._comp_interface = driver + # -------------------------------------------- + # compset, pesfile, and compset components + # -------------------------------------------- + files = Files(comp_interface=self._comp_interface) + + # -------------------------------------------- + # find and/or fill out compset name + # -------------------------------------------- + + compset_alias, science_support = self._set_compset(compset_name, files) + + self._components = self.get_compset_components() + + # -------------------------------------------- + # grid + # -------------------------------------------- + grids = Grids(gridfile, comp_interface=driver) + + gridinfo = grids.get_grid_info( + name=grid_name, compset=self._compsetname, driver=self._comp_interface + ) + self._gridname = gridinfo["GRID"] + for key, value in list(gridinfo.items()): + logger.debug("Set grid {} {}".format(key, value)) + self.set_lookup_value(key, value) + + # -------------------------------------------- + # component config data + # -------------------------------------------- + + self._get_component_config_data(files) + + # This needs to be called after self.set_comp_classes, which is called + # from self._get_component_config_data + self._primary_component = self.get_primary_component() + + self._set_info_from_primary_component(files, pesfile=pesfile) + + self.clean_up_lookups(allow_undefined=True) + + self.get_compset_var_settings(files) + + self.clean_up_lookups(allow_undefined=True) + + # -------------------------------------------- + # machine + # -------------------------------------------- + # set machine values in env_xxx files + if extra_machines_dir: + self.set_value("EXTRA_MACHDIR", extra_machines_dir) + machobj = Machines(machine=machine_name, extra_machines_dir=extra_machines_dir) + probed_machine = machobj.probe_machine_name() + machine_name = machobj.get_machine_name() + self.set_value("MACH", machine_name) + if probed_machine != machine_name and probed_machine is not None: + logger.warning( + "WARNING: User-selected machine '{}' does not match probed machine '{}'".format( + machine_name, probed_machine + ) + ) + else: + logger.info("Machine is {}".format(machine_name)) + + nodenames = machobj.get_node_names() + nodenames = [ + x + for x in nodenames + if "_system" not in x + and "_variables" not in x + and "mpirun" not in x + and "COMPILER" not in x + and "MPILIB" not in x + and "MAX_MPITASKS_PER_NODE" not in x + and "MAX_TASKS_PER_NODE" not in x + and "MAX_CPUTASKS_PER_GPU_NODE" not in x + and "MAX_GPUS_PER_NODE" not in x + ] + + for nodename in nodenames: + value = machobj.get_value(nodename, resolved=False) + if value: + type_str = self.get_type_info(nodename) + if type_str is not None: + logger.debug("machine nodename {} value {}".format(nodename, value)) + self.set_value(nodename, convert_to_type(value, type_str, nodename)) + + if compiler is None: + compiler = machobj.get_default_compiler() + else: + expect( + machobj.is_valid_compiler(compiler), + "compiler {} is not supported on machine {}".format( + compiler, machine_name + ), + ) + + self.set_value("COMPILER", compiler) + + if mpilib is None: + mpilib = machobj.get_default_MPIlib({"compiler": compiler}) + else: + expect( + machobj.is_valid_MPIlib(mpilib, {"compiler": compiler}), + "MPIlib {} is not supported on machine {}".format(mpilib, machine_name), + ) + self.set_value("MPILIB", mpilib) + for name in ( + "MAX_TASKS_PER_NODE", + "MAX_MPITASKS_PER_NODE", + "MAX_CPUTASKS_PER_GPU_NODE", + "MAX_GPUS_PER_NODE", + ): + dmax = machobj.get_value(name, {"compiler": compiler}) + if not dmax: + dmax = machobj.get_value(name) + if dmax: + self.set_value(name, dmax) + elif name == "MAX_CPUTASKS_PER_GPU_NODE": + logger.debug( + "Variable {} not defined for machine {} and compiler {}".format( + name, machine_name, compiler + ) + ) + elif name == "MAX_GPUS_PER_NODE": + logger.debug( + "Variable {} not defined for machine {} and compiler {}".format( + name, machine_name, compiler + ) + ) + else: + logger.warning( + "Variable {} not defined for machine {} and compiler {}".format( + name, machine_name, compiler + ) + ) + + machdir = machobj.get_machines_dir() + self.set_value("MACHDIR", machdir) + + # Create env_mach_specific settings from machine info. + env_mach_specific_obj = self.get_env("mach_specific") + env_mach_specific_obj.populate( + machobj, + attributes={ + "mpilib": mpilib, + "compiler": compiler, + "threaded": self.get_build_threaded(), + }, + ) + + self._setup_mach_pes(pecount, multi_driver, ninst, machine_name, mpilib) + + if multi_driver and int(ninst) > 1: + logger.info(" Driver/Coupler has %s instances" % ninst) + + # -------------------------------------------- + # archiving system + # -------------------------------------------- + env_archive = self.get_env("archive") + infile_node = files.get_child("entry", {"id": "ARCHIVE_SPEC_FILE"}) + infile = files.get_default_value(infile_node) + infile = self.get_resolved_value(infile) + logger.debug("archive defaults located in {}".format(infile)) + archive = Archive(infile=infile, files=files) + archive.setup(env_archive, self._components, files=files) + + self.set_value("COMPSET", self._compsetname) + + self._set_pio_xml() + logger.info(" Compset is: {} ".format(self._compsetname)) + logger.info(" Grid is: {} ".format(self._gridname)) + logger.info(" Components in compset are: {} ".format(self._components)) + + if not test and not run_unsupported and self._cime_model == "cesm": + if grid_name in science_support: + logger.info( + "\nThis is a CESM scientifically supported compset at this resolution.\n" + ) + else: + self._check_testlists(compset_alias, grid_name, files) + + self.set_value("REALUSER", os.environ["USER"]) + + # Set project id + if project is None: + project = get_project(machobj) + if project is not None: + self.set_value("PROJECT", project) + elif machobj.get_value("PROJECT_REQUIRED"): + expect(project is not None, "PROJECT_REQUIRED is true but no project found") + # Get charge_account id if it exists + charge_account = get_charge_account(machobj, project) + if charge_account is not None: + self.set_value("CHARGE_ACCOUNT", charge_account) + + # Resolve the CIME_OUTPUT_ROOT variable, other than this + # we don't want to resolve variables until we need them + if output_root is None: + output_root = self.get_value("CIME_OUTPUT_ROOT") + output_root = os.path.abspath(output_root) + self.set_value("CIME_OUTPUT_ROOT", output_root) + if non_local: + self.set_value( + "EXEROOT", os.path.join(output_root, self.get_value("CASE"), "bld") + ) + self.set_value( + "RUNDIR", os.path.join(output_root, self.get_value("CASE"), "run") + ) + self.set_value("NONLOCAL", True) + + # Overwriting an existing exeroot or rundir can cause problems + exeroot = self.get_value("EXEROOT") + rundir = self.get_value("RUNDIR") + for wdir in (exeroot, rundir): + logging.debug("wdir is {}".format(wdir)) + if os.path.exists(wdir): + expect( + not test, "Directory {} already exists, aborting test".format(wdir) + ) + if answer is None: + response = input( + "\nDirectory {} already exists, (r)eplace, (a)bort, or (u)se existing?".format( + wdir + ) + ) + else: + response = answer + + if response.startswith("r"): + shutil.rmtree(wdir) + else: + expect(response.startswith("u"), "Aborting by user request") + + # miscellaneous settings + if self.get_value("RUN_TYPE") == "hybrid": + self.set_value("GET_REFCASE", True) + + if case_group: + self.set_value("CASE_GROUP", case_group) + + # Turn on short term archiving as cesm default setting + model = get_model() + self.set_model_version(model) + if config.default_short_term_archiving and not test: + self.set_value("DOUT_S", True) + self.set_value("TIMER_LEVEL", 4) + + if test: + self.set_value("TEST", True) + + self.initialize_derived_attributes() + + # -------------------------------------------- + # batch system (must come after initialize_derived_attributes) + # -------------------------------------------- + env_batch = self.get_env("batch") + + batch_system_type = machobj.get_value("BATCH_SYSTEM") + + logger.info("Batch_system_type is {}".format(batch_system_type)) + batch = Batch( + batch_system=batch_system_type, + machine=machine_name, + files=files, + extra_machines_dir=extra_machines_dir, + ) + + workflow = Workflow(files=files) + + postprocessing = Postprocessing(files=files) + if postprocessing.file_exists: + env_postprocessing = self.get_env("postprocessing") + env_postprocessing.add_elements_by_group(srcobj=postprocessing) + + env_batch.set_batch_system(batch, batch_system_type=batch_system_type) + + bjobs = workflow.get_workflow_jobs(machine=machine_name, workflowid=workflowid) + env_workflow = self.get_env("workflow") + env_workflow.create_job_groups(bjobs, test) + + if walltime: + self.set_value( + "USER_REQUESTED_WALLTIME", walltime, subgroup=self.get_primary_job() + ) + if queue: + self.set_value( + "USER_REQUESTED_QUEUE", queue, subgroup=self.get_primary_job() + ) + + env_batch.set_job_defaults(bjobs, self) + # Set BATCH_COMMAND_FLAGS to the default values + + for job in bjobs: + if test and job[0] == "case.run" or not test and job[0] == "case.test": + continue + submitargs = env_batch.get_submit_args(self, job[0], resolve=False) + self.set_value("BATCH_COMMAND_FLAGS", submitargs, subgroup=job[0]) + + # Make sure that parallel IO is not specified if total_tasks==1 + if self.total_tasks == 1: + for compclass in self._component_classes: + key = "PIO_TYPENAME_{}".format(compclass) + pio_typename = self.get_value(key) + if pio_typename in ("pnetcdf", "netcdf4p"): + self.set_value(key, "netcdf") + + if input_dir is not None: + self.set_value("DIN_LOC_ROOT", os.path.abspath(input_dir)) + + def get_compset_var_settings(self, files): + infile = files.get_value( + "COMPSETS_SPEC_FILE", attribute={"component": self._primary_component} + ) + compset_obj = Compsets(infile=infile, files=files) + matches = compset_obj.get_compset_var_settings( + self._compsetname, self._gridname + ) + for name, value in matches: + if len(value) > 0: + logger.info( + "Compset specific settings: name is {} and value is {}".format( + name, value + ) + ) + self.set_lookup_value(name, value) + + def set_initial_test_values(self): + testobj = self.get_env("test") + testobj.set_initial_values(self) + + def get_batch_jobs(self): + batchobj = self.get_env("batch") + return batchobj.get_jobs() + + def _set_pio_xml(self): + pioobj = PIO(self._component_classes) + grid = self.get_value("GRID") + compiler = self.get_value("COMPILER") + mach = self.get_value("MACH") + compset = self.get_value("COMPSET") + mpilib = self.get_value("MPILIB") + + defaults = pioobj.get_defaults( + grid=grid, compset=compset, mach=mach, compiler=compiler, mpilib=mpilib + ) + + for vid, value in list(defaults.items()): + self.set_value(vid, value) + + def _create_caseroot_tools(self): + machines_dir = os.path.abspath(self.get_value("MACHDIR")) + machine = self.get_value("MACH") + toolsdir = os.path.join(self.get_value("CIMEROOT"), "CIME", "Tools") + casetools = os.path.join(self._caseroot, "Tools") + # setup executable files in caseroot/ + exefiles = ( + os.path.join(toolsdir, "case.setup"), + os.path.join(toolsdir, "case.build"), + os.path.join(toolsdir, "case.submit"), + os.path.join(toolsdir, "case.qstatus"), + os.path.join(toolsdir, "case.cmpgen_namelists"), + os.path.join(toolsdir, "preview_namelists"), + os.path.join(toolsdir, "preview_run"), + os.path.join(toolsdir, "check_input_data"), + os.path.join(toolsdir, "check_case"), + os.path.join(toolsdir, "xmlchange"), + os.path.join(toolsdir, "xmlquery"), + os.path.join(toolsdir, "pelayout"), + ) + try: + for exefile in exefiles: + destfile = os.path.join(self._caseroot, os.path.basename(exefile)) + os.symlink(exefile, destfile) + except Exception as e: + logger.warning("FAILED to set up exefiles: {}".format(str(e))) + + toolfiles = [ + os.path.join(toolsdir, "check_lockedfiles"), + os.path.join(toolsdir, "get_standard_makefile_args"), + os.path.join(toolsdir, "getTiming"), + os.path.join(toolsdir, "save_provenance"), + os.path.join(toolsdir, "Makefile"), + os.path.join(toolsdir, "mkSrcfiles"), + os.path.join(toolsdir, "mkDepends"), + ] + + # used on Titan + if os.path.isfile(os.path.join(toolsdir, "mdiag_reduce.csh")): + toolfiles.append(os.path.join(toolsdir, "mdiag_reduce.csh")) + toolfiles.append(os.path.join(toolsdir, "mdiag_reduce.pl")) + + for toolfile in toolfiles: + destfile = os.path.join(casetools, os.path.basename(toolfile)) + expect(os.path.isfile(toolfile), " File {} does not exist".format(toolfile)) + try: + os.symlink(toolfile, destfile) + except Exception as e: + logger.warning( + "FAILED to set up toolfiles: {} {} {}".format( + str(e), toolfile, destfile + ) + ) + + if config.copy_e3sm_tools: + if os.path.exists(os.path.join(machines_dir, "syslog.{}".format(machine))): + safe_copy( + os.path.join(machines_dir, "syslog.{}".format(machine)), + os.path.join(casetools, "mach_syslog"), + ) + else: + safe_copy( + os.path.join(machines_dir, "syslog.noop"), + os.path.join(casetools, "mach_syslog"), + ) + + srcroot = self.get_value("SRCROOT") + customize_path = os.path.join(srcroot, "cime_config", "customize") + safe_copy(os.path.join(customize_path, "e3sm_compile_wrap.py"), casetools) + + # add archive_metadata to the CASEROOT but only for CESM + if config.copy_cesm_tools: + try: + exefile = os.path.join(toolsdir, "archive_metadata") + destfile = os.path.join(self._caseroot, os.path.basename(exefile)) + os.symlink(exefile, destfile) + except Exception as e: + logger.warning("FAILED to set up exefiles: {}".format(str(e))) + + def _create_caseroot_sourcemods(self): + components = self.get_compset_components() + components.extend(["share", "drv"]) + if self._comp_interface == "nuopc": + components.extend(["cdeps"]) + + readme_message_start = ( + "Put source mods for the {component} library in this directory." + ) + readme_message_end = """ + +WARNING: SourceMods are not kept under version control, and can easily +become out of date if changes are made to the source code on which they +are based. We only recommend using SourceMods for small, short-term +changes that just apply to one or two cases. For larger or longer-term +changes, including gradual, incremental changes towards a final +solution, we highly recommend making changes in the main source tree, +leveraging version control (git or svn). +""" + + for component in components: + directory = os.path.join( + self._caseroot, "SourceMods", "src.{}".format(component) + ) + # don't make SourceMods for stub components + if not os.path.exists(directory) and component not in ( + "satm", + "slnd", + "sice", + "socn", + "sesp", + "sglc", + "swav", + ): + os.makedirs(directory) + # Besides giving some information on SourceMods, this + # README file serves one other important purpose: By + # putting a file inside each SourceMods subdirectory, we + # prevent aggressive scrubbers from scrubbing these + # directories due to being empty (which can cause builds + # to fail). + readme_file = os.path.join(directory, "README") + with open(readme_file, "w") as fd: + fd.write(readme_message_start.format(component=component)) + + if component == "cdeps": + readme_message_extra = """ + +Note that this subdirectory should only contain files from CDEPS's +dshr and streams source code directories. +Files related to specific data models should go in SourceMods subdirectories +for those data models (e.g., src.datm).""" + fd.write(readme_message_extra) + + fd.write(readme_message_end) + + if config.copy_cism_source_mods: + # Note: this is CESM specific, given that we are referencing cism explitly + if "cism" in components: + directory = os.path.join( + self._caseroot, "SourceMods", "src.cism", "source_cism" + ) + if not os.path.exists(directory): + os.makedirs(directory) + readme_file = os.path.join(directory, "README") + str_to_write = """Put source mods for the source_cism library in this subdirectory. +This includes any files from $COMP_ROOT_DIR_GLC/source_cism. Anything +else (e.g., mods to source_glc or drivers) goes in the src.cism +directory, NOT in this subdirectory.""" + + with open(readme_file, "w") as fd: + fd.write(str_to_write) + + def create_caseroot(self, clone=False): + if not os.path.exists(self._caseroot): + # Make the case directory + logger.info(" Creating Case directory {}".format(self._caseroot)) + os.makedirs(self._caseroot) + os.chdir(self._caseroot) + + # Create relevant directories in $self._caseroot + if clone: + newdirs = (LOCKED_DIR, "Tools") + else: + newdirs = ("SourceMods", LOCKED_DIR, "Buildconf", "Tools") + for newdir in newdirs: + os.makedirs(newdir) + + # Open a new README.case file in $self._caseroot + append_status(" ".join(sys.argv), "README.case", caseroot=self._caseroot) + compset_info = "Compset longname is {}".format(self.get_value("COMPSET")) + append_status(compset_info, "README.case", caseroot=self._caseroot) + append_status( + "Compset specification file is {}".format( + self.get_value("COMPSETS_SPEC_FILE") + ), + "README.case", + caseroot=self._caseroot, + ) + append_status( + "Pes specification file is {}".format(self.get_value("PES_SPEC_FILE")), + "README.case", + caseroot=self._caseroot, + ) + if "forcing" in self._component_description: + append_status( + "Forcing is {}".format(self._component_description["forcing"]), + "README.case", + caseroot=self._caseroot, + ) + for component_class in self._component_classes: + if ( + component_class in self._component_description + and len(self._component_description[component_class]) > 0 + ): + if "Stub" not in self._component_description[component_class]: + append_status( + "Component {} is {}".format( + component_class, + self._component_description[component_class], + ), + "README.case", + caseroot=self._caseroot, + ) + if component_class == "CPL": + append_status( + "Using %s coupler instances" % (self.get_value("NINST_CPL")), + "README.case", + caseroot=self._caseroot, + ) + continue + comp_grid = "{}_GRID".format(component_class) + grid_val = self.get_value(comp_grid) + if grid_val != "null": + append_status( + "{} is {}".format(comp_grid, self.get_value(comp_grid)), + "README.case", + caseroot=self._caseroot, + ) + comp = str(self.get_value("COMP_{}".format(component_class))) + user_mods = self._get_comp_user_mods(comp) + if user_mods is not None: + note = "This component includes user_mods {}".format(user_mods) + append_status(note, "README.case", caseroot=self._caseroot) + logger.info(note) + if not clone: + self._create_caseroot_sourcemods() + self._create_caseroot_tools() + + def apply_user_mods(self, user_mods_dirs=None): + """ + User mods can be specified on the create_newcase command line (usually when called from create test) + or they can be in the compset definition, or both. + + If user_mods_dirs is specified, it should be a list of paths giving the user mods + specified on the create_newcase command line. + """ + all_user_mods = [] + for comp in self._component_classes: + component = str(self.get_value("COMP_{}".format(comp))) + if component == self._primary_component: + continue + comp_user_mods = self._get_comp_user_mods(component) + if comp_user_mods is not None: + all_user_mods.append(comp_user_mods) + # get the primary last so that it takes precidence over other components + comp_user_mods = self._get_comp_user_mods(self._primary_component) + if comp_user_mods is not None: + all_user_mods.append(comp_user_mods) + if user_mods_dirs is not None: + all_user_mods.extend(user_mods_dirs) + + # This looping order will lead to the specified user_mods_dirs taking + # precedence over self._user_mods, if there are any conflicts. + for user_mods in all_user_mods: + if os.path.isabs(user_mods): + user_mods_path = user_mods + else: + user_mods_path = self.get_value("USER_MODS_DIR") + user_mods_path = os.path.join(user_mods_path, user_mods) + apply_user_mods(self._caseroot, user_mods_path) + + # User mods may have modified underlying XML files + if all_user_mods: + self.read_xml() + + def _get_comp_user_mods(self, component): + """ + For a component 'foo', gets the value of FOO_USER_MODS. + + Returns None if no value was found, or if the value is an empty string. + """ + comp_user_mods = self.get_value("{}_USER_MODS".format(component.upper())) + # pylint: disable=no-member + if comp_user_mods is None or comp_user_mods == "" or comp_user_mods.isspace(): + return None + else: + return comp_user_mods + + def submit_jobs( + self, + no_batch=False, + job=None, + skip_pnl=None, + prereq=None, + allow_fail=False, + resubmit_immediate=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ): + env_batch = self.get_env("batch") + result = env_batch.submit_jobs( + self, + no_batch=no_batch, + skip_pnl=skip_pnl, + job=job, + user_prereq=prereq, + allow_fail=allow_fail, + resubmit_immediate=resubmit_immediate, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + dry_run=dry_run, + workflow=workflow, + ) + return result + + def get_job_info(self): + """ + Get information on batch jobs associated with this case + """ + xml_job_ids = self.get_value("JOB_IDS") + if not xml_job_ids: + return {} + else: + result = {} + job_infos = xml_job_ids.split(", ") # pylint: disable=no-member + for job_info in job_infos: + jobname, jobid = job_info.split(":") + result[jobname] = jobid + + return result + + def get_job_id(self, output): + env_batch = self.get_env("batch") + return env_batch.get_job_id(output) + + def report_job_status(self): + jobmap = self.get_job_info() + if not jobmap: + logger.info( + "No job ids associated with this case. Either case.submit was not run or was run with no-batch" + ) + else: + for jobname, jobid in list(jobmap.items()): + status = self.get_env("batch").get_status(jobid) + if status: + logger.info("{}: {}".format(jobname, status)) + else: + logger.info( + "{}: Unable to get status. Job may be complete already.".format( + jobname + ) + ) + + def cancel_batch_jobs(self, jobids): + env_batch = self.get_env("batch") + for jobid in jobids: + success = env_batch.cancel_job(jobid) + if not success: + logger.warning("Failed to kill {}".format(jobid)) + + def get_mpirun_cmd(self, job=None, allow_unresolved_envvars=True, overrides=None): + if job is None: + job = self.get_primary_job() + + env_mach_specific = self.get_env("mach_specific") + run_exe = env_mach_specific.get_value("run_exe") + run_misc_suffix = env_mach_specific.get_value("run_misc_suffix") + run_misc_suffix = "" if run_misc_suffix is None else run_misc_suffix + + mpirun_cmd_override = self.get_value("MPI_RUN_COMMAND") + if mpirun_cmd_override not in ["", None, "UNSET"]: + return self.get_resolved_value( + mpirun_cmd_override + " " + run_exe + " " + run_misc_suffix + ) + queue = self.get_value("JOB_QUEUE", subgroup=job) + + # Things that will have to be matched against mpirun element attributes + mpi_attribs = { + "compiler": self.get_value("COMPILER"), + "mpilib": self.get_value("MPILIB"), + "threaded": self.get_build_threaded(), + "queue": queue, + "unit_testing": False, + "comp_interface": self._comp_interface, + } + + ( + executable, + mpi_arg_list, + custom_run_exe, + custom_run_misc_suffix, + ) = env_mach_specific.get_mpirun(self, mpi_attribs, job) + if custom_run_exe: + logger.info("Using a custom run_exe {}".format(custom_run_exe)) + run_exe = custom_run_exe + if custom_run_misc_suffix: + logger.info( + "Using a custom run_misc_suffix {}".format(custom_run_misc_suffix) + ) + run_misc_suffix = custom_run_misc_suffix + + aprun_mode = env_mach_specific.get_aprun_mode(mpi_attribs) + + # special case for aprun + if ( + executable is not None + and "aprun" in executable + and aprun_mode != "ignore" + # and not "theta" in self.get_value("MACH") + ): + extra_args = env_mach_specific.get_aprun_args( + self, mpi_attribs, job, overrides=overrides + ) + + aprun_args, num_nodes, _, _, _ = get_aprun_cmd_for_case( + self, + run_exe, + overrides=overrides, + extra_args=extra_args, + ) + if job in ("case.run", "case.test"): + expect( + (num_nodes + self.spare_nodes) == self.num_nodes, + "Not using optimized num nodes", + ) + return self.get_resolved_value( + executable + aprun_args + " " + run_misc_suffix, + allow_unresolved_envvars=allow_unresolved_envvars, + ) + + else: + mpi_arg_string = " ".join(mpi_arg_list) + + if self.get_value("BATCH_SYSTEM") == "cobalt": + mpi_arg_string += " : " + + ngpus_per_node = self.get_value("NGPUS_PER_NODE") + if ngpus_per_node and ngpus_per_node > 0: + mpi_gpu_run_script = self.get_value("MPI_GPU_WRAPPER_SCRIPT") + if mpi_gpu_run_script: + mpi_arg_string = mpi_arg_string + " " + mpi_gpu_run_script + + return self.get_resolved_value( + "{} {} {} {}".format( + executable if executable is not None else "", + mpi_arg_string, + run_exe, + run_misc_suffix, + ), + allow_unresolved_envvars=allow_unresolved_envvars, + ) + + def set_model_version(self, model): + version = "unknown" + srcroot = self.get_value("SRCROOT") + version = get_current_commit(True, srcroot, tag=(model == "cesm")) + + self.set_value("MODEL_VERSION", version) + + if version != "unknown": + logger.info("{} model version found: {}".format(model, version)) + else: + logger.warning("WARNING: No {} Model version found.".format(model)) + + def load_env(self, reset=False, job=None, verbose=False): + if not self._is_env_loaded or reset: + if job is None: + job = self.get_primary_job() + os.environ["OMP_NUM_THREADS"] = str(self.thread_count) + env_module = self.get_env("mach_specific") + self._loaded_envs = env_module.load_env(self, job=job, verbose=verbose) + self._loaded_envs.append(("OMP_NUM_THREADS", os.environ["OMP_NUM_THREADS"])) + self._is_env_loaded = True + + return self._loaded_envs + + def get_build_threaded(self): + """ + Returns True if current settings require a threaded build/run. + """ + force_threaded = self.get_value("FORCE_BUILD_SMP") + if not self.thread_count: + return False + smp_present = force_threaded or self.thread_count > 1 + return smp_present + + def _check_testlists(self, compset_alias, grid_name, files): + """ + CESM only: check the testlist file for tests of this compset grid combination + + compset_alias should be None for a user-defined compset (i.e., a compset + without an alias) + """ + if "TESTS_SPEC_FILE" in self.lookups: + tests_spec_file = self.get_resolved_value(self.lookups["TESTS_SPEC_FILE"]) + else: + tests_spec_file = self.get_value("TESTS_SPEC_FILE") + + testcnt = 0 + if os.path.isfile(tests_spec_file) and compset_alias is not None: + # It's important that we not try to find matching tests if + # compset_alias is None, since compset=None tells get_tests to find + # tests of all compsets! + # Only collect supported tests as this _check_testlists is only + # called if run_unsupported is False. + tests = Testlist(tests_spec_file, files) + testlist = tests.get_tests( + compset=compset_alias, grid=grid_name, supported_only=True + ) + test_categories = ["prealpha", "prebeta"] + for test in testlist: + if ( + test["category"] in test_categories + or "aux_" in test["category"] + or get_cime_default_driver() in test["category"] + ): + testcnt += 1 + if testcnt > 0: + logger.warning( + "\n*********************************************************************************************************************************" + ) + logger.warning( + "This compset and grid combination is not scientifically supported, however it is used in {:d} tests.".format( + testcnt + ) + ) + logger.warning( + "*********************************************************************************************************************************\n" + ) + else: + expect( + False, + "\nThis compset and grid combination is untested in CESM. " + "Override this warning with the --run-unsupported option to create_newcase.", + error_prefix="STOP: ", + ) + + def set_file(self, xmlfile): + """ + force the case object to consider only xmlfile + """ + expect(os.path.isfile(xmlfile), "Could not find file {}".format(xmlfile)) + + if not self._read_only_mode: + self.flush(flushall=True) + + gfile = GenericXML(infile=xmlfile) + ftype = gfile.get_id() + + logger.warning("setting case file to {}".format(xmlfile)) + components = self.get_value("COMP_CLASSES") + new_env_file = None + for env_file in self._files: + if os.path.basename(env_file.filename) == ftype: + if ftype == "env_run.xml": + new_env_file = EnvRun(infile=xmlfile, components=components) + elif ftype == "env_build.xml": + new_env_file = EnvBuild(infile=xmlfile, components=components) + elif ftype == "env_case.xml": + new_env_file = EnvCase(infile=xmlfile, components=components) + elif ftype == "env_mach_pes.xml": + new_env_file = EnvMachPes( + infile=xmlfile, + components=components, + comp_interface=self._comp_interface, + ) + elif ftype == "env_batch.xml": + new_env_file = EnvBatch(infile=xmlfile) + elif ftype == "env_workflow.xml": + new_env_file = EnvWorkflow(infile=xmlfile) + elif ftype == "env_postprocessing.xml": + new_env_file = EnvPostprocessing(infile=xmlfile) + elif ftype == "env_test.xml": + new_env_file = EnvTest(infile=xmlfile) + elif ftype == "env_archive.xml": + new_env_file = EnvArchive(infile=xmlfile) + elif ftype == "env_mach_specific.xml": + new_env_file = EnvMachSpecific( + infile=xmlfile, comp_interface=self._comp_interface + ) + else: + expect(False, "No match found for file type {}".format(ftype)) + + if new_env_file is not None: + self._env_entryid_files = [] + self._env_generic_files = [] + if ftype in ["env_archive.xml", "env_mach_specific.xml"]: + self._env_generic_files = [new_env_file] + else: + self._env_entryid_files = [new_env_file] + + break + + expect( + new_env_file is not None, "No match found for file type {}".format(ftype) + ) + self._files = [new_env_file] + + def update_env(self, new_object, env_file, blow_away=False): + """ + Replace a case env object file + """ + old_object = self.get_env(env_file) + if not blow_away: + expect( + not old_object.needsrewrite, + "Potential loss of unflushed changes in {}".format(env_file), + ) + + new_object.filename = old_object.filename + if old_object in self._env_entryid_files: + self._env_entryid_files.remove(old_object) + self._env_entryid_files.append(new_object) + elif old_object in self._env_generic_files: + self._env_generic_files.remove(old_object) + self._env_generic_files.append(new_object) + self._files.remove(old_object) + self._files.append(new_object) + + def get_latest_cpl_log(self, coupler_log_path=None, cplname="cpl"): + """ + find and return the latest cpl log file in the + coupler_log_path directory + """ + if coupler_log_path is None: + coupler_log_path = self.get_value("RUNDIR") + cpllog = None + cpllogs = glob.glob(os.path.join(coupler_log_path, "{}.log.*".format(cplname))) + if cpllogs: + cpllog = max(cpllogs, key=os.path.getctime) + return cpllog + else: + return None + + def record_cmd(self, cmd=None, init=False): + lines = [] + caseroot = self.get_value("CASEROOT") + cimeroot = self.get_value("CIMEROOT") + + if cmd is None: + cmd = self.fix_sys_argv_quotes(list(sys.argv)) + + if init: + ctime = time.strftime("%Y-%m-%d %H:%M:%S") + + lines.append("#!/bin/bash\n\n") + # stop script on error, prevents `create_newcase` from failing + # and continuing to execute commands + lines.append("set -e\n\n") + lines.append("# Created {}\n\n".format(ctime)) + lines.append('CASEDIR="{}"\n\n'.format(caseroot)) + lines.append('cd "${CASEDIR}"\n\n') + + # Ensure program path is absolute + cmd[0] = re.sub("^./", "{}/scripts/".format(cimeroot), cmd[0]) + else: + expect( + caseroot + and os.path.isdir(caseroot) + and os.path.isfile(os.path.join(caseroot, "env_case.xml")), + "Directory {} does not appear to be a valid case directory".format( + caseroot + ), + ) + + cmd = " ".join(cmd) + + # Replace instances of caseroot with variable + cmd = re.sub(caseroot, '"${CASEDIR}"', cmd) + + lines_len = len(lines) + lines.insert(lines_len - 1 if init else lines_len, "{}\n\n".format(cmd)) + + try: + with open(os.path.join(caseroot, "replay.sh"), "a") as fd: + fd.writelines(lines) + except PermissionError: + logger.warning("Could not write to 'replay.sh' script") + + def fix_sys_argv_quotes(self, cmd): + """Fixes removed quotes from argument list. + + Restores quotes to `--val` and `KEY=VALUE` from sys.argv. + """ + # handle fixing quotes + # case 1: "--val", " -nlev 276 " + # case 2: "-val" , " -nlev 276 " + # case 3: CAM_CONFIG_OPTS=" -nlev 276 " + for i, item in enumerate(cmd): + if re.match("[-]{1,2}val", item) is not None: + if i + 1 >= len(cmd): + continue + + # only quote if value contains spaces + if " " in cmd[i + 1]: + cmd[i + 1] = f'"{cmd[i + 1]}"' + else: + m = re.search("([^=]*)=(.*)", item) + + if m is None: + continue + + g = m.groups() + + # only quote if value contains spaces + if " " in g[1]: + cmd[i] = f'{g[0]}="{g[1]}"' + + return cmd + + def create( + self, + casename, + srcroot, + compset_name, + grid_name, + user_mods_dirs=None, + machine_name=None, + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ): + try: + # Set values for env_case.xml + self.set_lookup_value("CASE", os.path.basename(casename)) + self.set_lookup_value("CASEROOT", self._caseroot) + self.set_lookup_value("SRCROOT", srcroot) + self.set_lookup_value("CASE_HASH", self.new_hash()) + + # Propagate to `GenericXML` to resolve $SRCROOT + utils.GLOBAL["SRCROOT"] = srcroot + + customize_path = os.path.join(srcroot, "cime_config", "customize") + + config.load(customize_path) + + # If any of the top level user_mods_dirs contain a config_grids.xml file and + # gridfile was not set on the command line, use it. However, if there are + # multiple user_mods_dirs, it is an error for more than one of them to contain + # a config_grids.xml file, because it would be ambiguous which one we should + # use. + if user_mods_dirs: + found_um_config_grids = False + for this_user_mods_dir in user_mods_dirs: + um_config_grids = os.path.join( + this_user_mods_dir, "config_grids.xml" + ) + if os.path.exists(um_config_grids): + if gridfile: + # Either a gridfile was found in an earlier user_mods + # directory or a gridfile was given on the command line. The + # first case (which would set found_um_config_grids to True) + # is an error; the second case just generates a warning. + expect( + not found_um_config_grids, + "Cannot handle multiple usermods directories with config_grids.xml files: {} and {}".format( + gridfile, um_config_grids + ), + ) + logger.warning( + "A config_grids file was found in {} but also provided on the command line {}, command line takes precedence".format( + um_config_grids, gridfile + ) + ) + else: + gridfile = um_config_grids + found_um_config_grids = True + + # Configure the Case + self.configure( + compset_name, + grid_name, + machine_name=machine_name, + project=project, + pecount=pecount, + compiler=compiler, + mpilib=mpilib, + pesfile=pesfile, + gridfile=gridfile, + multi_driver=multi_driver, + ninst=ninst, + test=test, + walltime=walltime, + queue=queue, + output_root=output_root, + run_unsupported=run_unsupported, + answer=answer, + input_dir=input_dir, + driver=driver, + workflowid=workflowid, + non_local=non_local, + extra_machines_dir=extra_machines_dir, + case_group=case_group, + ) + + self.create_caseroot() + + # Write out the case files + self.flush(flushall=True) + self.apply_user_mods(user_mods_dirs) + + # Lock env_case.xml + lock_file("env_case.xml", self._caseroot) + except Exception: + if os.path.exists(self._caseroot): + if not logger.isEnabledFor(logging.DEBUG) and not test: + logger.warning( + "Failed to setup case, removing {}\nUse --debug to force me to keep caseroot".format( + self._caseroot + ) + ) + shutil.rmtree(self._caseroot) + else: + logger.warning("Leaving broken case dir {}".format(self._caseroot)) + + raise + + def new_hash(self): + """Creates a hash""" + args = "".join(sys.argv) + ctime = time.strftime("%Y-%m-%d %H:%M:%S") + hostname = socket.getfqdn() + user = getpass.getuser() + + data = "{}{}{}{}".format(args, ctime, hostname, user) + + return hashlib.sha256(data.encode()).hexdigest() + + def is_save_timing_dir_project(self, project): + """ + Check whether the project is permitted to archive performance data in the location + specified for the current machine + """ + save_timing_dir_projects = self.get_value("SAVE_TIMING_DIR_PROJECTS") + if not save_timing_dir_projects: + return False + else: + save_timing_dir_projects = save_timing_dir_projects.split( + "," + ) # pylint: disable=no-member + for save_timing_dir_project in save_timing_dir_projects: + regex = re.compile(save_timing_dir_project) + if regex.match(project): + return True + + return False + + def get_primary_job(self): + return "case.test" if self.get_value("TEST") else "case.run" + + def get_first_job(self): + env_workflow = self.get_env("workflow") + jobs = env_workflow.get_jobs() + return jobs[0] + + def preview_run(self, write, job): + write("CASE INFO:") + write(" nodes: {}".format(self.num_nodes)) + write(" total tasks: {}".format(self.total_tasks)) + write(" tasks per node: {}".format(self.tasks_per_node)) + write(" thread count: {}".format(self.thread_count)) + write(" ngpus per node: {}".format(self.ngpus_per_node)) + write("") + + write("BATCH INFO:") + if not job: + job = self.get_first_job() + + job_id_to_cmd = self.submit_jobs(dry_run=True, job=job) + + env_batch = self.get_env("batch") + for job_id, cmd in job_id_to_cmd: + write(" FOR JOB: {}".format(job_id)) + write(" ENV:") + loaded_envs = self.load_env(job=job_id, reset=True, verbose=False) + + for name, value in iter(sorted(loaded_envs, key=lambda x: x[0])): + write(" Setting Environment {}={}".format(name, value)) + + write("") + write(" SUBMIT CMD:") + write(" {}".format(self.get_resolved_value(cmd))) + write("") + if job_id in ("case.run", "case.test"): + # get_job_overrides must come after the case.load_env since the cmd may use + # env vars. + overrides = env_batch.get_job_overrides(job_id, self) + write(" MPIRUN (job={}):".format(job_id)) + write(" {}".format(self.get_resolved_value(overrides["mpirun"]))) + write("") diff --git a/CIME/case/case_clone.py b/CIME/case/case_clone.py new file mode 100644 index 00000000000..c9f4ccb7090 --- /dev/null +++ b/CIME/case/case_clone.py @@ -0,0 +1,245 @@ +""" +create_clone is a member of the Case class from file case.py +""" +import os, glob, shutil +from CIME.XML.standard_module_setup import * +from CIME.utils import expect, check_name, safe_copy, get_model +from CIME.simple_compare import compare_files +from CIME.locked_files import lock_file +from CIME.user_mod_support import apply_user_mods + +logger = logging.getLogger(__name__) + + +def create_clone( + self, + newcaseroot, + keepexe=False, + mach_dir=None, + project=None, + cime_output_root=None, + exeroot=None, + rundir=None, + user_mods_dirs=None, +): + """ + Create a case clone + + If exeroot or rundir are provided (not None), sets these directories + to the given paths; if not provided, uses default values for these + directories. It is an error to provide exeroot if keepexe is True. + """ + if cime_output_root is None: + cime_output_root = self.get_value("CIME_OUTPUT_ROOT") + + newcaseroot = os.path.abspath(newcaseroot) + expect( + not os.path.isdir(newcaseroot), + "New caseroot directory {} already exists".format(newcaseroot), + ) + newcasename = os.path.basename(newcaseroot) + expect(check_name(newcasename), "New case name invalid {} ".format(newcasename)) + newcase_cimeroot = os.path.abspath(get_cime_root()) + + # create clone from case to case + clone_cimeroot = self.get_value("CIMEROOT") + if newcase_cimeroot != clone_cimeroot: + logger.warning(" case CIMEROOT is {} ".format(newcase_cimeroot)) + logger.warning(" clone CIMEROOT is {} ".format(clone_cimeroot)) + logger.warning( + " It is NOT recommended to clone cases from different versions of CIME." + ) + + # *** create case object as deepcopy of clone object *** + if os.path.isdir(os.path.join(newcase_cimeroot, "share")) and get_model() == "cesm": + srcroot = newcase_cimeroot + else: + srcroot = os.path.join(newcase_cimeroot, "..") + if not os.path.isdir(srcroot): + srcroot = self.get_value("SRCROOT") + + newcase = self.copy(newcasename, newcaseroot, newsrcroot=srcroot) + with newcase: + newcase.set_value("CIMEROOT", newcase_cimeroot) + + # if we are cloning to a different user modify the output directory + olduser = self.get_value("USER") + newuser = os.environ.get("USER") + if olduser != newuser: + cime_output_root = cime_output_root.replace(olduser, newuser) + newcase.set_value("USER", newuser) + newcase.set_value("CIME_OUTPUT_ROOT", cime_output_root) + + # try to make the new output directory and raise an exception + # on any error other than directory already exists. + if os.path.isdir(cime_output_root): + expect( + os.access(cime_output_root, os.W_OK), + "Directory {} is not writable " + "by this user. Use the --cime-output-root flag to provide a writable " + "scratch directory".format(cime_output_root), + ) + else: + if not os.path.isdir(cime_output_root): + os.makedirs(cime_output_root) + + # determine if will use clone executable or not + if keepexe: + orig_exeroot = self.get_value("EXEROOT") + newcase.set_value("EXEROOT", orig_exeroot) + newcase.set_value("BUILD_COMPLETE", "TRUE") + orig_bld_complete = self.get_value("BUILD_COMPLETE") + if not orig_bld_complete: + logger.warning( + "\nWARNING: Creating a clone with --keepexe before building the original case may cause PIO_TYPENAME to be invalid in the clone" + ) + logger.warning( + "Avoid this message by building case one before you clone.\n" + ) + else: + newcase.set_value("BUILD_COMPLETE", "FALSE") + + # set machdir + if mach_dir is not None: + newcase.set_value("MACHDIR", mach_dir) + + # set exeroot and rundir if requested + if exeroot is not None: + expect( + not keepexe, + "create_case_clone: if keepexe is True, then exeroot cannot be set", + ) + newcase.set_value("EXEROOT", exeroot) + if rundir is not None: + newcase.set_value("RUNDIR", rundir) + + # Set project id + # Note: we do not just copy this from the clone because it seems likely that + # users will want to change this sometimes, especially when cloning another + # user's case. However, note that, if a project is not given, the fallback will + # be to copy it from the clone, just like other xml variables are copied. + if project is None: + project = self.get_value("PROJECT", subgroup=self.get_primary_job()) + if project is not None: + newcase.set_value("PROJECT", project) + + # create caseroot + newcase.create_caseroot(clone=True) + + # Many files in the case will be links back to the source tree + # but users may have broken links to modify files locally. In this case + # copy the locally modified file. We only want to do this for files that + # already exist in the clone. + # pylint: disable=protected-access + self._copy_user_modified_to_clone( + self.get_value("CASEROOT"), newcase.get_value("CASEROOT") + ) + self._copy_user_modified_to_clone( + self.get_value("CASETOOLS"), newcase.get_value("CASETOOLS") + ) + + newcase.flush(flushall=True) + + # copy user_ files + cloneroot = self.get_case_root() + files = glob.glob(cloneroot + "/user_*") + + for item in files: + safe_copy(item, newcaseroot) + + # copy SourceMod and Buildconf files + # if symlinks exist, copy rather than follow links + for casesub in ("SourceMods", "Buildconf"): + shutil.copytree( + os.path.join(cloneroot, casesub), + os.path.join(newcaseroot, casesub), + symlinks=True, + ) + + # copy the postprocessing directory if it exists + if os.path.isdir(os.path.join(cloneroot, "postprocess")): + shutil.copytree( + os.path.join(cloneroot, "postprocess"), + os.path.join(newcaseroot, "postprocess"), + symlinks=True, + ) + + # lock env_case.xml in new case + lock_file("env_case.xml", newcaseroot) + + # if any other xml files exist in case directory copy them + files = glob.glob(os.path.join(cloneroot, "*.xml")) + for item in files: + if not os.path.exists(os.path.join(newcaseroot, os.path.basename(item))): + safe_copy(item, newcaseroot) + + # apply user_mods if appropriate + newcase_root = newcase.get_value("CASEROOT") + if user_mods_dirs is not None: + if keepexe: + # If keepexe CANNOT change any env_build.xml variables - so make a temporary copy of + # env_build.xml and verify that it has not been modified + safe_copy( + os.path.join(newcaseroot, "env_build.xml"), + os.path.join(newcaseroot, "LockedFiles", "env_build.xml"), + ) + + # Now apply contents of all specified user_mods directories + for one_user_mods_dir in user_mods_dirs: + apply_user_mods(newcase_root, one_user_mods_dir, keepexe=keepexe) + + # Determine if env_build.xml has changed + if keepexe: + success, comment = compare_files( + os.path.join(newcaseroot, "env_build.xml"), + os.path.join(newcaseroot, "LockedFiles", "env_build.xml"), + ) + if not success: + logger.warning(comment) + shutil.rmtree(newcase_root) + expect( + False, + "env_build.xml cannot be changed via usermods if keepexe is an option: \n " + "Failed to clone case, removed {}\n".format(newcase_root), + ) + + # if keep executable, then remove the new case SourceMods directory and link SourceMods to + # the clone directory + if keepexe: + shutil.rmtree(os.path.join(newcase_root, "SourceMods")) + os.symlink( + os.path.join(cloneroot, "SourceMods"), + os.path.join(newcase_root, "SourceMods"), + ) + + # Update README.case + fclone = open(cloneroot + "/README.case", "r") + fnewcase = open(newcaseroot + "/README.case", "a") + fnewcase.write("\n *** original clone README follows ****") + fnewcase.write("\n " + fclone.read()) + + clonename = self.get_value("CASE") + logger.info( + " Successfully created new case {} from clone case {} ".format( + newcasename, clonename + ) + ) + + return newcase + + +# pylint: disable=unused-argument +def _copy_user_modified_to_clone(self, origpath, newpath): + """ + If file_ exists and is a link in newpath, and exists but is not a + link in origpath, copy origpath file to newpath + """ + for file_ in os.listdir(newpath): + if ( + os.path.islink(os.path.join(newpath, file_)) + and os.path.isfile(os.path.join(origpath, file_)) + and not os.path.islink(os.path.join(origpath, file_)) + ): + logger.info("Copying user modified file {} to clone".format(file_)) + os.unlink(os.path.join(newpath, file_)) + safe_copy(os.path.join(origpath, file_), newpath) diff --git a/CIME/case/case_cmpgen_namelists.py b/CIME/case/case_cmpgen_namelists.py new file mode 100644 index 00000000000..10ded368147 --- /dev/null +++ b/CIME/case/case_cmpgen_namelists.py @@ -0,0 +1,197 @@ +""" +Library for case.cmpgen_namelists. +case_cmpgen_namelists is a member of Class case from file case.py +""" + +from CIME.XML.standard_module_setup import * + +from CIME.compare_namelists import is_namelist_file, compare_namelist_files +from CIME.simple_compare import compare_files, compare_runconfigfiles +from CIME.utils import safe_copy, SharedArea +from CIME.status import append_status +from CIME.test_status import * + +import os, shutil, traceback, stat, glob + +logger = logging.getLogger(__name__) + + +def _do_full_nl_comp(case, test, compare_name, baseline_root=None): + test_dir = case.get_value("CASEROOT") + casedoc_dir = os.path.join(test_dir, "CaseDocs") + baseline_root = ( + case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root + ) + + all_match = True + baseline_dir = os.path.join(baseline_root, compare_name, test) + baseline_casedocs = os.path.join(baseline_dir, "CaseDocs") + + # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!) + # TODO: Namelist files should have consistent suffix + all_items_to_compare = [ + item + for item in glob.glob("{}/*".format(casedoc_dir)) + if "README" not in os.path.basename(item) + and not item.endswith("doc") + and not item.endswith("prescribed") + and not os.path.basename(item).startswith(".") + ] + + comments = "NLCOMP\n" + for item in all_items_to_compare: + baseline_counterpart = os.path.join( + baseline_casedocs + if os.path.dirname(item).endswith("CaseDocs") + else baseline_dir, + os.path.basename(item), + ) + if not os.path.exists(baseline_counterpart): + comments += "Missing baseline namelist '{}'\n".format(baseline_counterpart) + all_match = False + else: + if item.endswith("runconfig") or item.endswith("runseq"): + success, current_comments = compare_runconfigfiles( + baseline_counterpart, item, test + ) + elif is_namelist_file(item): + success, current_comments = compare_namelist_files( + baseline_counterpart, item, test + ) + else: + success, current_comments = compare_files( + baseline_counterpart, item, test + ) + + all_match &= success + if not success: + comments += "Comparison failed between '{}' with '{}'\n".format( + item, baseline_counterpart + ) + + comments += current_comments + + logging.info(comments) + return all_match, comments + + +def _do_full_nl_gen_impl(case, test, generate_name, baseline_root=None): + test_dir = case.get_value("CASEROOT") + casedoc_dir = os.path.join(test_dir, "CaseDocs") + baseline_root = ( + case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root + ) + + baseline_dir = os.path.join(baseline_root, generate_name, test) + baseline_casedocs = os.path.join(baseline_dir, "CaseDocs") + + if not os.path.isdir(baseline_dir): + os.makedirs( + baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH + ) + + if os.path.isdir(baseline_casedocs): + shutil.rmtree(baseline_casedocs) + + shutil.copytree(casedoc_dir, baseline_casedocs) + + for item in glob.glob(os.path.join(test_dir, "user_nl*")): + preexisting_baseline = os.path.join(baseline_dir, os.path.basename(item)) + if os.path.exists(preexisting_baseline): + os.remove(preexisting_baseline) + + safe_copy(item, baseline_dir, preserve_meta=False) + + +def _do_full_nl_gen(case, test, generate_name, baseline_root=None): + with SharedArea(): + _do_full_nl_gen_impl(case, test, generate_name, baseline_root=baseline_root) + + +def case_cmpgen_namelists( + self, + compare=False, + generate=False, + compare_name=None, + generate_name=None, + baseline_root=None, + logfile_name="TestStatus.log", +): + expect(self.get_value("TEST"), "Only makes sense to run this for a test case") + + caseroot, casebaseid = self.get_value("CASEROOT"), self.get_value("CASEBASEID") + + if not compare: + compare = self.get_value("COMPARE_BASELINE") + if not generate: + generate = self.get_value("GENERATE_BASELINE") + + if not compare and not generate: + logging.debug("No namelists compares requested") + return True + + # create namelists for case if they haven't been already + casedocs = os.path.join(caseroot, "CaseDocs") + if not os.path.exists(os.path.join(casedocs, "drv_in")): + self.create_namelists() + + test_name = casebaseid if casebaseid is not None else self.get_value("CASE") + with TestStatus(test_dir=caseroot, test_name=test_name) as ts: + try: + # Inside this try are where we catch non-fatal errors, IE errors involving + # baseline operations which may not directly impact the functioning of the viability of this case + if compare and not compare_name: + compare_name = self.get_value("BASELINE_NAME_CMP") + expect( + compare_name, + "Was asked to do baseline compare but unable to determine baseline name", + ) + logging.info( + "Comparing namelists with baselines '{}'".format(compare_name) + ) + if generate and not generate_name: + generate_name = self.get_value("BASELINE_NAME_GEN") + expect( + generate_name, + "Was asked to do baseline generation but unable to determine baseline name", + ) + logging.info( + "Generating namelists to baselines '{}'".format(generate_name) + ) + + success = True + output = "" + if compare: + success, output = _do_full_nl_comp( + self, test_name, compare_name, baseline_root + ) + if not success and ts.get_status(RUN_PHASE) is not None: + run_warn = """NOTE: It is not necessarily safe to compare namelists after RUN +phase has completed. Running a case can pollute namelists. The namelists +kept in the baselines are pre-RUN namelists.""" + output += run_warn + logging.info(run_warn) + if generate: + _do_full_nl_gen(self, test_name, generate_name, baseline_root) + except Exception: + success = False + ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS) + warn = "Exception during namelist operations:\n{}\n{}".format( + sys.exc_info()[1], traceback.format_exc() + ) + output += warn + logging.warning(warn) + finally: + ts.set_status( + NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS + ) + try: + append_status( + output, + logfile_name, + caseroot=caseroot, + ) + except IOError: + pass + + return success diff --git a/CIME/case/case_run.py b/CIME/case/case_run.py new file mode 100644 index 00000000000..3e7c8baa37d --- /dev/null +++ b/CIME/case/case_run.py @@ -0,0 +1,612 @@ +""" +case_run is a member of Class Case +'""" +from CIME.XML.standard_module_setup import * +from CIME.config import Config +from CIME.utils import gzip_existing_file, new_lid +from CIME.utils import run_sub_or_cmd, safe_copy, model_log, CIMEError +from CIME.utils import batch_jobid, is_comp_standalone +from CIME.status import append_status, run_and_log_case_status +from CIME.get_timing import get_timing +from CIME.locked_files import check_lockedfiles + +import shutil, time, sys, os, glob + +TERMINATION_TEXT = ("HAS ENDED", "END OF MODEL RUN", "SUCCESSFUL TERMINATION") + +logger = logging.getLogger(__name__) + + +############################################################################### +def _pre_run_check(case, lid, skip_pnl=False, da_cycle=0): + ############################################################################### + + # Pre run initialization code.. + if da_cycle > 0: + case.create_namelists(component="cpl") + return + + caseroot = case.get_value("CASEROOT") + din_loc_root = case.get_value("DIN_LOC_ROOT") + rundir = case.get_value("RUNDIR") + + if case.get_value("TESTCASE") == "PFS": + for filename in ("env_mach_pes.xml", "software_environment.txt"): + fullpath = os.path.join(caseroot, filename) + safe_copy(fullpath, "{}.{}".format(filename, lid)) + + # check for locked files, may impact BUILD_COMPLETE + skip = None + + if case.get_value("EXTERNAL_WORKFLOW"): + skip = "env_batch" + + check_lockedfiles(case, skip=skip) + + logger.debug("check_lockedfiles OK") + + build_complete = case.get_value("BUILD_COMPLETE") + + # check that build is done + expect( + build_complete, + "BUILD_COMPLETE is not true\nPlease rebuild the model interactively", + ) + logger.debug("build complete is {} ".format(build_complete)) + + # load the module environment... + case.load_env(reset=True) + + # create the timing directories, optionally cleaning them if needed. + if os.path.isdir(os.path.join(rundir, "timing")): + shutil.rmtree(os.path.join(rundir, "timing")) + + os.makedirs(os.path.join(rundir, "timing", "checkpoints")) + + # This needs to be done everytime the LID changes in order for log files to be set up correctly + # The following also needs to be called in case a user changes a user_nl_xxx file OR an env_run.xml + # variable while the job is in the queue + model_log( + "e3sm", + logger, + "{} NAMELIST CREATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + if skip_pnl: + case.create_namelists(component="cpl") + else: + logger.info("Generating namelists for {}".format(caseroot)) + case.create_namelists() + + model_log( + "e3sm", + logger, + "{} NAMELIST CREATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + + logger.info( + "-------------------------------------------------------------------------" + ) + logger.info(" - Prestage required restarts into {}".format(rundir)) + logger.info( + " - Case input data directory (DIN_LOC_ROOT) is {} ".format(din_loc_root) + ) + logger.info(" - Checking for required input datasets in DIN_LOC_ROOT") + logger.info( + "-------------------------------------------------------------------------" + ) + + +############################################################################### +def _run_model_impl(case, lid, skip_pnl=False, da_cycle=0): + ############################################################################### + + model_log( + "e3sm", + logger, + "{} PRE_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + _pre_run_check(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle) + model_log( + "e3sm", + logger, + "{} PRE_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + + model = case.get_value("MODEL") + + # Set OMP_NUM_THREADS + os.environ["OMP_NUM_THREADS"] = str(case.thread_count) + + # Run the model + cmd = case.get_mpirun_cmd(allow_unresolved_envvars=False) + logger.info("run command is {} ".format(cmd)) + + rundir = case.get_value("RUNDIR") + + # MPIRUN_RETRY_REGEX allows the mpi command to be reattempted if the + # failure described by that regular expression is matched in the model log + # case.spare_nodes is overloaded and may also represent the number of + # retries to attempt if ALLOCATE_SPARE_NODES is False + retry_run_re = case.get_value("MPIRUN_RETRY_REGEX") + node_fail_re = case.get_value("NODE_FAIL_REGEX") + retry_count = 0 + if retry_run_re: + retry_run_regex = re.compile(re.escape(retry_run_re)) + retry_count = case.get_value("MPIRUN_RETRY_COUNT") + if node_fail_re: + node_fail_regex = re.compile(re.escape(node_fail_re)) + + is_batch = case.get_value("BATCH_SYSTEM") is not None + msg_func = None + + if is_batch: + jobid = batch_jobid() + msg_func = lambda *args: jobid if jobid else "" + + loop = True + while loop: + loop = False + + model_log( + "e3sm", + logger, + "{} SAVE_PRERUN_PROVENANCE BEGINS HERE".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + try: + Config.instance().save_prerun_provenance(case) + except AttributeError: + logger.debug("No hook for saving prerun provenance was executed") + model_log( + "e3sm", + logger, + "{} SAVE_PRERUN_PROVENANCE HAS FINISHED".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + + model_log( + "e3sm", + logger, + "{} MODEL EXECUTION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + run_func = lambda: run_cmd_no_fail(cmd, from_dir=rundir) + case.flush() + + try: + run_and_log_case_status( + run_func, + "model execution", + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=case.get_value("CASEROOT"), + is_batch=is_batch, + gitinterface=case._gitinterface, + ) + cmd_success = True + except CIMEError: + cmd_success = False + + # The run will potentially take a very long time. We need to + # allow the user to xmlchange things in their case. + # + # WARNING: All case variables are reloaded after this call to get the + # new values of any variables that may have been changed by + # the user during model execution. Thus, any local variables + # set from case variables before this point may be + # inconsistent with their latest values in the xml files, so + # should generally be reloaded (via case.get_value(XXX)) if they are still needed. + case.read_xml() + + model_log( + "e3sm", + logger, + "{} MODEL EXECUTION HAS FINISHED".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + + model_logfile = os.path.join(rundir, model + ".log." + lid) + # Determine if failure was due to a failed node, if so, try to restart + if retry_run_re or node_fail_re: + model_logfile = os.path.join(rundir, model + ".log." + lid) + if os.path.exists(model_logfile): + num_node_fails = 0 + num_retry_fails = 0 + if node_fail_re: + num_node_fails = len( + node_fail_regex.findall(open(model_logfile, "r").read()) + ) + if retry_run_re: + num_retry_fails = len( + retry_run_regex.findall(open(model_logfile, "r").read()) + ) + logger.debug( + "RETRY: num_retry_fails {} spare_nodes {} retry_count {}".format( + num_retry_fails, case.spare_nodes, retry_count + ) + ) + if num_node_fails > 0 and case.spare_nodes >= num_node_fails: + # We failed due to node failure! + logger.warning( + "Detected model run failed due to node failure, restarting" + ) + case.spare_nodes -= num_node_fails + loop = True + case.set_value( + "CONTINUE_RUN", case.get_value("RESUBMIT_SETS_CONTINUE_RUN") + ) + elif num_retry_fails > 0 and retry_count >= num_retry_fails: + logger.warning("Detected model run failed, restarting") + retry_count -= 1 + loop = True + + if loop: + # Archive the last consistent set of restart files and restore them + if case.get_value("DOUT_S"): + case.case_st_archive(resubmit=False) + case.restore_from_archive() + + lid = new_lid(case=case) + case.create_namelists() + + if not cmd_success and not loop: + # We failed and we're not restarting + expect( + False, + "RUN FAIL: Command '{}' failed\nSee log file for details: {}".format( + cmd, model_logfile + ), + ) + + model_log( + "e3sm", + logger, + "{} POST_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + _post_run_check(case, lid) + model_log( + "e3sm", + logger, + "{} POST_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + + return lid + + +############################################################################### +def _run_model(case, lid, skip_pnl=False, da_cycle=0): + ############################################################################### + functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle) + + is_batch = case.get_value("BATCH_SYSTEM") is not None + msg_func = None + + if is_batch: + jobid = batch_jobid() + msg_func = lambda *args: jobid if jobid is not None else "" + + return run_and_log_case_status( + functor, + "case.run", + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=case.get_value("CASEROOT"), + is_batch=is_batch, + gitinterface=case._gitinterface, + ) + + +############################################################################### +def _post_run_check(case, lid): + ############################################################################### + + rundir = case.get_value("RUNDIR") + driver = case.get_value("COMP_INTERFACE") + + comp_standalone, model = is_comp_standalone(case) + + if driver == "nuopc": + if comp_standalone: + file_prefix = model + else: + file_prefix = "med" + else: + file_prefix = "cpl" + + cpl_ninst = 1 + if case.get_value("MULTI_DRIVER"): + cpl_ninst = case.get_value("NINST_MAX") + cpl_logs = [] + + if cpl_ninst > 1: + for inst in range(cpl_ninst): + cpl_logs.append( + os.path.join(rundir, file_prefix + "_%04d.log." % (inst + 1) + lid) + ) + if driver == "nuopc" and comp_standalone: + cpl_logs.append( + os.path.join(rundir, "med_%04d.log." % (inst + 1) + lid) + ) + else: + cpl_logs = [os.path.join(rundir, file_prefix + ".log." + lid)] + if driver == "nuopc" and comp_standalone: + cpl_logs.append(os.path.join(rundir, "med.log." + lid)) + cpl_logfile = cpl_logs[0] + # find the last model.log and cpl.log + model_logfile = os.path.join(rundir, model + ".log." + lid) + if not os.path.isfile(model_logfile): + expect(False, "Model did not complete, no {} log file ".format(model_logfile)) + elif os.stat(model_logfile).st_size == 0: + expect(False, "Run FAILED") + else: + count_ok = 0 + for cpl_logfile in cpl_logs: + if not os.path.isfile(cpl_logfile): + break + with open(cpl_logfile, "r") as fd: + logfile = fd.read() + if any([x in logfile for x in TERMINATION_TEXT]): + count_ok += 1 + if count_ok < cpl_ninst: + expect(False, "Model did not complete - see {} \n ".format(cpl_logfile)) + + +############################################################################### +def _save_logs(case, lid): + ############################################################################### + rundir = case.get_value("RUNDIR") + logfiles = glob.glob(os.path.join(rundir, "*.log.{}".format(lid))) + for logfile in logfiles: + if os.path.isfile(logfile): + gzip_existing_file(logfile) + + +###################################################################################### +def _resubmit_check(case): + ############################################################################### + """ + check to see if we need to do resubmission from this particular job, + Note that Mira requires special logic + """ + dout_s = case.get_value("DOUT_S") + logger.warning("dout_s {} ".format(dout_s)) + mach = case.get_value("MACH") + logger.warning("mach {} ".format(mach)) + resubmit_num = case.get_value("RESUBMIT") + logger.warning("resubmit_num {}".format(resubmit_num)) + # If dout_s is True than short-term archiving handles the resubmit + # If dout_s is True and machine is mira submit the st_archive script + resubmit = False + if not dout_s and resubmit_num > 0: + resubmit = True + elif dout_s and mach == "mira": + caseroot = case.get_value("CASEROOT") + cimeroot = case.get_value("CIMEROOT") + cmd = "ssh cooleylogin1 'cd {case}; CIMEROOT={root} ./case.submit {case} --job case.st_archive'".format( + case=caseroot, root=cimeroot + ) + run_cmd(cmd, verbose=True) + + if resubmit: + job = case.get_primary_job() + + case.submit(job=job, resubmit=True) + + logger.debug("resubmit after check is {}".format(resubmit)) + + +############################################################################### +def _do_external(script_name, caseroot, rundir, lid, prefix): + ############################################################################### + expect( + os.path.isfile(script_name), "External script {} not found".format(script_name) + ) + filename = "{}.external.log.{}".format(prefix, lid) + outfile = os.path.join(rundir, filename) + append_status("Starting script {}".format(script_name), "CaseStatus") + run_sub_or_cmd( + script_name, + [caseroot], + (os.path.basename(script_name).split(".", 1))[0], + [caseroot], + logfile=outfile, + ) # For sub, use case? + append_status("Completed script {}".format(script_name), "CaseStatus") + + +############################################################################### +def _do_data_assimilation(da_script, caseroot, cycle, lid, rundir): + ############################################################################### + expect( + os.path.isfile(da_script), + "Data Assimilation script {} not found".format(da_script), + ) + filename = "da.log.{}".format(lid) + outfile = os.path.join(rundir, filename) + run_sub_or_cmd( + da_script, + [caseroot, cycle], + os.path.basename(da_script), + [caseroot, cycle], + logfile=outfile, + ) # For sub, use case? + + +############################################################################### +def case_run(self, skip_pnl=False, set_continue_run=False, submit_resubmits=False): + ############################################################################### + model_log( + "e3sm", + logger, + "{} CASE.RUN BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + # Set up the run, run the model, do the postrun steps + + # set up the LID + lid = new_lid(case=self) + + prerun_script = self.get_value("PRERUN_SCRIPT") + if prerun_script: + model_log( + "e3sm", + logger, + "{} PRERUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + self.flush() + _do_external( + prerun_script, + self.get_value("CASEROOT"), + self.get_value("RUNDIR"), + lid, + prefix="prerun", + ) + self.read_xml() + model_log( + "e3sm", + logger, + "{} PRERUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + + # We might need to tweak these if we want to allow the user to change them + data_assimilation_cycles = self.get_value("DATA_ASSIMILATION_CYCLES") + data_assimilation_script = self.get_value("DATA_ASSIMILATION_SCRIPT") + data_assimilation = ( + data_assimilation_cycles > 0 + and len(data_assimilation_script) > 0 + and os.path.isfile(data_assimilation_script) + ) + drv_restart_pointer = self.get_value("DRV_RESTART_POINTER") + for cycle in range(data_assimilation_cycles): + # After the first DA cycle, runs are restart runs + if cycle > 0: + lid = new_lid() + self.set_value("CONTINUE_RUN", self.get_value("RESUBMIT_SETS_CONTINUE_RUN")) + + # WARNING: All case variables are reloaded during run_model to get + # new values of any variables that may have been changed by + # the user during model execution. Thus, any local variables + # set from case variables before this point may be + # inconsistent with their latest values in the xml files, so + # should generally be reloaded (via case.get_value(XXX)) if they are still needed. + model_log( + "e3sm", + logger, + "{} RUN_MODEL BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + lid = _run_model(self, lid, skip_pnl, da_cycle=cycle) + + # get the most recent cpl restart pointer file + rundir = self.get_value("RUNDIR") + if drv_restart_pointer and not self._read_only_mode: + pattern = os.path.join(rundir, "rpointer.cpl*") + files = sorted(glob.glob(pattern), key=os.path.getmtime) + if files: + drv_ptr = os.path.basename(files[-1]) + self.set_value("DRV_RESTART_POINTER", drv_ptr) + model_log( + "e3sm", + logger, + "{} RUN_MODEL HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + + if self.get_value("CHECK_TIMING") or self.get_value("SAVE_TIMING"): + model_log( + "e3sm", + logger, + "{} GET_TIMING BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + get_timing(self, lid) # Run the getTiming script + model_log( + "e3sm", + logger, + "{} GET_TIMING HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + + if data_assimilation: + model_log( + "e3sm", + logger, + "{} DO_DATA_ASSIMILATION BEGINS HERE".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + self.flush() + _do_data_assimilation( + data_assimilation_script, + self.get_value("CASEROOT"), + cycle, + lid, + self.get_value("RUNDIR"), + ) + self.read_xml() + model_log( + "e3sm", + logger, + "{} DO_DATA_ASSIMILATION HAS FINISHED".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + + _save_logs(self, lid) # Copy log files back to caseroot + + model_log( + "e3sm", + logger, + "{} SAVE_POSTRUN_PROVENANCE BEGINS HERE".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + try: + Config.instance().save_postrun_provenance(self, lid) + except AttributeError: + logger.debug("No hook for saving postrun provenance was executed") + model_log( + "e3sm", + logger, + "{} SAVE_POSTRUN_PROVENANCE HAS FINISHED".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + + postrun_script = self.get_value("POSTRUN_SCRIPT") + if postrun_script: + model_log( + "e3sm", + logger, + "{} POSTRUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + self.flush() + _do_external( + postrun_script, + self.get_value("CASEROOT"), + self.get_value("RUNDIR"), + lid, + prefix="postrun", + ) + self.read_xml() + _save_logs(self, lid) + model_log( + "e3sm", + logger, + "{} POSTRUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + + if set_continue_run: + self.set_value("CONTINUE_RUN", self.get_value("RESUBMIT_SETS_CONTINUE_RUN")) + + external_workflow = self.get_value("EXTERNAL_WORKFLOW") + if not external_workflow: + logger.warning("check for resubmit") + + logger.debug("submit_resubmits is {}".format(submit_resubmits)) + if submit_resubmits: + _resubmit_check(self) + + model_log( + "e3sm", + logger, + "{} CASE.RUN HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + return True diff --git a/CIME/case/case_setup.py b/CIME/case/case_setup.py new file mode 100644 index 00000000000..98671ba24d2 --- /dev/null +++ b/CIME/case/case_setup.py @@ -0,0 +1,597 @@ +""" +Library for case.setup. +case_setup is a member of class Case from file case.py +""" + +import os + +from CIME.XML.standard_module_setup import * +from CIME.config import Config +from CIME.XML.machines import Machines +from CIME.BuildTools.configure import ( + generate_env_mach_specific, + copy_depends_files, +) +from CIME.utils import ( + get_batch_script_for_job, + safe_copy, + file_contains_python_function, + import_from_file, + copy_local_macros_to_dir, + batch_jobid, + run_cmd_no_fail, +) +from CIME.status import run_and_log_case_status, append_case_status +from CIME.test_status import * +from CIME.locked_files import unlock_file, lock_file, check_lockedfiles +from CIME.gitinterface import GitInterface + +import errno, shutil + +logger = logging.getLogger(__name__) + + +############################################################################### +def _build_usernl_files(case, model, comp): + ############################################################################### + """ + Create user_nl_xxx files, expects cwd is caseroot + """ + model = model.upper() + if model == "DRV": + model_file = case.get_value("CONFIG_CPL_FILE") + else: + model_file = case.get_value("CONFIG_{}_FILE".format(model)) + expect( + model_file is not None, + "Could not locate CONFIG_{}_FILE in config_files.xml".format(model), + ) + model_dir = os.path.dirname(model_file) + + expect( + os.path.isdir(model_dir), + "cannot find cime_config directory {} for component {}".format(model_dir, comp), + ) + comp_interface = case.get_value("COMP_INTERFACE") + multi_driver = case.get_value("MULTI_DRIVER") + ninst = 1 + + if multi_driver: + ninst_max = case.get_value("NINST_MAX") + if comp_interface != "nuopc" and model not in ("DRV", "CPL", "ESP"): + ninst_model = case.get_value("NINST_{}".format(model)) + expect( + ninst_model == ninst_max, + "MULTI_DRIVER mode, all components must have same NINST value. NINST_{} != {}".format( + model, ninst_max + ), + ) + if comp == "cpl": + if not os.path.exists("user_nl_cpl"): + safe_copy(os.path.join(model_dir, "user_nl_cpl"), ".") + else: + if comp_interface == "nuopc": + ninst = case.get_value("NINST") + elif ninst == 1: + ninst = case.get_value("NINST_{}".format(model)) + default_nlfile = "user_nl_{}".format(comp) + model_nl = os.path.join(model_dir, default_nlfile) + user_nl_list = _get_user_nl_list(case, default_nlfile, model_dir) + + # Note that, even if there are multiple elements of user_nl_list (i.e., we are + # creating multiple user_nl files for this component with different names), all of + # them will start out as copies of the single user_nl_comp file in the model's + # source tree - unless the file has _stream in its name + for nlfile in user_nl_list: + if ninst > 1: + for inst_counter in range(1, ninst + 1): + inst_nlfile = "{}_{:04d}".format(nlfile, inst_counter) + if not os.path.exists(inst_nlfile): + # If there is a user_nl_foo in the case directory, copy it + # to user_nl_foo_INST; otherwise, copy the original + # user_nl_foo from model_dir + if os.path.exists(nlfile): + safe_copy(nlfile, inst_nlfile) + elif "_stream" in nlfile: + safe_copy(os.path.join(model_dir, nlfile), inst_nlfile) + elif os.path.exists(model_nl): + safe_copy(model_nl, inst_nlfile) + else: + # ninst = 1 + if not os.path.exists(nlfile): + if "_stream" in nlfile: + safe_copy(os.path.join(model_dir, nlfile), nlfile) + elif os.path.exists(model_nl): + safe_copy(model_nl, nlfile) + + +############################################################################### +def _get_user_nl_list(case, default_nlfile, model_dir): + """Get a list of user_nl files needed by this component + + Typically, each component has a single user_nl file: user_nl_comp. However, some + components use multiple user_nl files. These components can define a function in + cime_config/buildnml named get_user_nl_list, which returns a list of user_nl files + that need to be staged in the case directory. For example, in a run where CISM is + modeling both Antarctica and Greenland, its get_user_nl_list function will return + ['user_nl_cism', 'user_nl_cism_ais', 'user_nl_cism_gris']. + + If that function is NOT defined in the component's buildnml, then we return the given + default_nlfile. + + """ + # Check if buildnml is present in the expected location, and if so, whether it + # contains the function "get_user_nl_list"; if so, we'll import the module and call + # that function; if not, we'll fall back on the default value. + buildnml_path = os.path.join(model_dir, "buildnml") + has_function = False + if os.path.isfile(buildnml_path) and file_contains_python_function( + buildnml_path, "get_user_nl_list" + ): + has_function = True + + if has_function: + comp_buildnml = import_from_file("comp_buildnml", buildnml_path) + return comp_buildnml.get_user_nl_list(case) + else: + return [default_nlfile] + + +############################################################################### +def _create_macros_cmake( + caseroot, cmake_macros_dir, mach_obj, compiler, case_cmake_path +): + ############################################################################### + if not os.path.isfile(os.path.join(caseroot, "Macros.cmake")): + safe_copy(os.path.join(cmake_macros_dir, "Macros.cmake"), caseroot) + + if not os.path.exists(case_cmake_path): + os.mkdir(case_cmake_path) + + # This impl is coupled to contents of Macros.cmake + os_ = mach_obj.get_value("OS") + mach = mach_obj.get_machine_name() + macros = [ + "universal.cmake", + os_ + ".cmake", + compiler + ".cmake", + "{}_{}.cmake".format(compiler, os), + mach + ".cmake", + "{}_{}.cmake".format(compiler, mach), + "CMakeLists.txt", + ] + for macro in macros: + repo_macro = os.path.join(cmake_macros_dir, macro) + mach_repo_macro = os.path.join(cmake_macros_dir, "..", mach, macro) + case_macro = os.path.join(case_cmake_path, macro) + if not os.path.exists(case_macro): + if os.path.exists(mach_repo_macro): + safe_copy(mach_repo_macro, case_cmake_path) + elif os.path.exists(repo_macro): + safe_copy(repo_macro, case_cmake_path) + + copy_depends_files(mach, mach_obj.machines_dir, caseroot, compiler) + + +############################################################################### +def _create_macros( + case, mach_obj, caseroot, compiler, mpilib, debug, comp_interface, sysos +): + ############################################################################### + """ + creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler + and env_mach_specific.xml if they don't already exist. + """ + reread = not os.path.isfile("env_mach_specific.xml") + new_cmake_macros_dir = case.get_value("CMAKE_MACROS_DIR") + + if reread: + case.flush() + generate_env_mach_specific( + caseroot, + mach_obj, + compiler, + mpilib, + debug, + comp_interface, + sysos, + False, + threaded=case.get_build_threaded(), + noenv=True, + ) + case.read_xml() + + case_cmake_path = os.path.join(caseroot, "cmake_macros") + + _create_macros_cmake( + caseroot, new_cmake_macros_dir, mach_obj, compiler, case_cmake_path + ) + copy_local_macros_to_dir( + case_cmake_path, extra_machdir=case.get_value("EXTRA_MACHDIR") + ) + + +############################################################################### +def _case_setup_impl( + case, caseroot, clean=False, test_mode=False, reset=False, keep=None +): + ############################################################################### + os.chdir(caseroot) + + non_local = case.get_value("NONLOCAL") + + models = case.get_values("COMP_CLASSES") + mach = case.get_value("MACH") + compiler = case.get_value("COMPILER") + debug = case.get_value("DEBUG") + mpilib = case.get_value("MPILIB") + sysos = case.get_value("OS") + comp_interface = case.get_value("COMP_INTERFACE") + extra_machines_dir = case.get_value("EXTRA_MACHDIR") + + expect(mach is not None, "xml variable MACH is not set") + + mach_obj = Machines(machine=mach, extra_machines_dir=extra_machines_dir) + + # Check that $DIN_LOC_ROOT exists or can be created: + if not non_local: + din_loc_root = case.get_value("DIN_LOC_ROOT") + testcase = case.get_value("TESTCASE") + + if not os.path.isdir(din_loc_root): + try: + os.makedirs(din_loc_root) + except OSError as e: + if e.errno == errno.EACCES: + logger.info("Invalid permissions to create {}".format(din_loc_root)) + + expect( + not (not os.path.isdir(din_loc_root) and testcase != "SBN"), + "inputdata root is not a directory or is not readable: {}".format( + din_loc_root + ), + ) + + # Remove batch scripts + if reset or clean: + # clean setup-generated files + batch_script = get_batch_script_for_job(case.get_primary_job()) + files_to_clean = [ + batch_script, + "env_mach_specific.xml", + "Macros.make", + "Macros.cmake", + "cmake_macros", + ] + for file_to_clean in files_to_clean: + if os.path.exists(file_to_clean) and not (keep and file_to_clean in keep): + if os.path.isdir(file_to_clean): + shutil.rmtree(file_to_clean) + else: + os.remove(file_to_clean) + logger.info("Successfully cleaned {}".format(file_to_clean)) + + if not test_mode: + # rebuild the models (even on restart) + case.set_value("BUILD_COMPLETE", False) + + # Cannot leave case in bad state (missing env_mach_specific.xml) + if clean and not os.path.isfile("env_mach_specific.xml"): + case.flush() + generate_env_mach_specific( + caseroot, + mach_obj, + compiler, + mpilib, + debug, + comp_interface, + sysos, + False, + threaded=case.get_build_threaded(), + noenv=True, + ) + case.read_xml() + + if not clean: + if not non_local: + case.load_env() + + _create_macros( + case, mach_obj, caseroot, compiler, mpilib, debug, comp_interface, sysos + ) + + # Set tasks to 1 if mpi-serial library + if mpilib == "mpi-serial": + case.set_value("NTASKS", 1) + + # Check ninst. + # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. + comp_interface = case.get_value("COMP_INTERFACE") + if comp_interface == "nuopc": + ninst = case.get_value("NINST") + + multi_driver = case.get_value("MULTI_DRIVER") + + for comp in models: + ntasks = case.get_value("NTASKS_{}".format(comp)) + if comp == "CPL": + continue + if comp_interface != "nuopc": + ninst = case.get_value("NINST_{}".format(comp)) + if multi_driver: + if comp_interface != "nuopc": + expect( + case.get_value("NINST_LAYOUT_{}".format(comp)) == "concurrent", + "If multi_driver is TRUE, NINST_LAYOUT_{} must be concurrent".format( + comp + ), + ) + case.set_value("NTASKS_PER_INST_{}".format(comp), ntasks) + else: + if ninst > ntasks: + if ntasks == 1: + case.set_value("NTASKS_{}".format(comp), ninst) + ntasks = ninst + else: + expect( + False, + "NINST_{comp} value {ninst} greater than NTASKS_{comp} {ntasks}".format( + comp=comp, ninst=ninst, ntasks=ntasks + ), + ) + + case.set_value( + "NTASKS_PER_INST_{}".format(comp), max(1, int(ntasks / ninst)) + ) + + if os.path.exists(get_batch_script_for_job(case.get_primary_job())): + logger.info( + "Machine/Decomp/Pes configuration has already been done ...skipping" + ) + + case.initialize_derived_attributes() + + case.set_value("BUILD_THREADED", case.get_build_threaded()) + + else: + caseroot = case.get_value("CASEROOT") + + unlock_file("env_build.xml", caseroot) + + unlock_file("env_batch.xml", caseroot) + + case.flush() + + check_lockedfiles(case, skip=["env_build", "env_mach_pes"]) + + case.initialize_derived_attributes() + + cost_per_node = case.get_value("COSTPES_PER_NODE") + case.set_value("COST_PES", case.num_nodes * cost_per_node) + threaded = case.get_build_threaded() + case.set_value("BUILD_THREADED", threaded) + if threaded and case.total_tasks * case.thread_count > cost_per_node: + smt_factor = max( + 1.0, int(case.get_value("MAX_TASKS_PER_NODE") / cost_per_node) + ) + case.set_value( + "TOTALPES", + case.iotasks + + int( + (case.total_tasks - case.iotasks) + * max(1.0, float(case.thread_count) / smt_factor) + ), + ) + else: + case.set_value( + "TOTALPES", + (case.total_tasks - case.iotasks) * case.thread_count + + case.iotasks, + ) + + # ---------------------------------------------------------------------------------------------------------- + # Sanity check for a GPU run: + # 1. GPU_TYPE and GPU_OFFLOAD must both be defined to use GPUs + # 2. If the NGPUS_PER_NODE XML variable in the env_mach_pes.xml file is larger than + # the value of MAX_GPUS_PER_NODE, set it to MAX_GPUS_PER_NODE automatically. + # 3. If the NGPUS_PER_NODE XML variable is equal to 0, it will be updated to 1 automatically. + # ---------------------------------------------------------------------------------------------------------- + max_gpus_per_node = case.get_value("MAX_GPUS_PER_NODE") + gpu_type = case.get_value("GPU_TYPE") + openacc_gpu_offload = case.get_value("OPENACC_GPU_OFFLOAD") + openmp_gpu_offload = case.get_value("OPENMP_GPU_OFFLOAD") + kokkos_gpu_offload = case.get_value("KOKKOS_GPU_OFFLOAD") + gpu_offload = ( + openacc_gpu_offload or openmp_gpu_offload or kokkos_gpu_offload + ) + ngpus_per_node = case.get_value("NGPUS_PER_NODE") + if gpu_type and str(gpu_type).lower() != "none": + if max_gpus_per_node <= 0: + raise RuntimeError( + f"MAX_GPUS_PER_NODE must be larger than 0 for machine={mach} and compiler={compiler} in order to configure a GPU run" + ) + if not gpu_offload: + raise RuntimeError( + "GPU_TYPE is defined but none of the GPU OFFLOAD options are enabled" + ) + case.gpu_enabled = True + if ngpus_per_node >= 0: + case.set_value( + "NGPUS_PER_NODE", + max(1, ngpus_per_node) + if ngpus_per_node <= max_gpus_per_node + else max_gpus_per_node, + ) + elif gpu_offload: + raise RuntimeError( + "GPU_TYPE is not defined but at least one GPU OFFLOAD option is enabled" + ) + elif ngpus_per_node and ngpus_per_node != 0: + raise RuntimeError( + f"ngpus_per_node is expected to be 0 for a pure CPU run ; {ngpus_per_node} is provided instead ;" + ) + + # May need to select new batch settings if pelayout changed (e.g. problem is now too big for prev-selected queue) + env_batch = case.get_env("batch") + env_batch.set_job_defaults([(case.get_primary_job(), {})], case) + + # create batch files + env_batch.make_all_batch_files(case) + + if Config.instance().make_case_run_batch_script and not case.get_value( + "TEST" + ): + input_batch_script = os.path.join( + case.get_value("MACHDIR"), "template.case.run.sh" + ) + env_batch.make_batch_script( + input_batch_script, + "case.run", + case, + outfile=get_batch_script_for_job("case.run.sh"), + ) + + # Make a copy of env_mach_pes.xml in order to be able + # to check that it does not change once case.setup is invoked + case.flush() + + logger.debug("at copy TOTALPES = {}".format(case.get_value("TOTALPES"))) + + caseroot = case.get_value("CASEROOT") + + lock_file("env_mach_pes.xml", caseroot) + + lock_file("env_batch.xml", caseroot) + + # Create user_nl files for the required number of instances + if not os.path.exists("user_nl_cpl"): + logger.info("Creating user_nl_xxx files for components and cpl") + + # loop over models + for model in models: + comp = case.get_value("COMP_{}".format(model)) + logger.debug("Building {} usernl files".format(model)) + _build_usernl_files(case, model, comp) + if comp == "cism": + glcroot = case.get_value("COMP_ROOT_DIR_GLC") + run_cmd_no_fail( + "{}/cime_config/cism.template {}".format(glcroot, caseroot) + ) + if comp == "cam": + camroot = case.get_value("COMP_ROOT_DIR_ATM") + if os.path.exists( + os.path.join(camroot, "cime_config/cam.case_setup.py") + ): + logger.info("Running cam.case_setup.py") + run_cmd_no_fail( + "python {cam}/cime_config/cam.case_setup.py {cam} {case}".format( + cam=camroot, case=caseroot + ) + ) + + _build_usernl_files(case, "drv", "cpl") + + # Create needed directories for case + case.create_dirs() + + logger.info( + "If an old case build already exists, might want to run 'case.build --clean' before building" + ) + + # Some tests need namelists created here (ERP) - so do this if we are in test mode + if ( + test_mode or Config.instance().case_setup_generate_namelist + ) and not non_local: + logger.info("Generating component namelists as part of setup") + case.create_namelists() + + # Record env information + env_module = case.get_env("mach_specific") + if mach == "zeus": + overrides = env_module.get_overrides_nodes(case) + logger.debug("Updating Zeus nodes {}".format(overrides)) + env_module.make_env_mach_specific_file("sh", case) + env_module.make_env_mach_specific_file("csh", case) + if not non_local: + env_module.save_all_env_info("software_environment.txt") + + logger.info( + "You can now run './preview_run' to get more info on how your case will be run" + ) + + +############################################################################### +def case_setup( + self, clean=False, test_mode=False, reset=False, keep=None, disable_git=False +): + ############################################################################### + caseroot, casebaseid = self.get_value("CASEROOT"), self.get_value("CASEBASEID") + phase = "setup.clean" if clean else "case.setup" + functor = lambda: _case_setup_impl( + self, caseroot, clean=clean, test_mode=test_mode, reset=reset, keep=keep + ) + + is_batch = self.get_value("BATCH_SYSTEM") is not None + msg_func = None + + if is_batch: + jobid = batch_jobid() + msg_func = lambda *args: jobid if jobid is not None else "" + + if self.get_value("TEST") and not test_mode: + test_name = casebaseid if casebaseid is not None else self.get_value("CASE") + with TestStatus(test_dir=caseroot, test_name=test_name) as ts: + try: + run_and_log_case_status( + functor, + phase, + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=caseroot, + is_batch=is_batch, + ) + except BaseException: # Want to catch KeyboardInterrupt too + ts.set_status(SETUP_PHASE, TEST_FAIL_STATUS) + raise + else: + if clean: + ts.set_status(SETUP_PHASE, TEST_PEND_STATUS) + else: + ts.set_status(SETUP_PHASE, TEST_PASS_STATUS) + else: + run_and_log_case_status( + functor, + phase, + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=caseroot, + is_batch=is_batch, + ) + if not disable_git and not reset: + self._create_case_repo(caseroot) + + +def _create_case_repo(self, caseroot): + self._gitinterface = GitInterface(caseroot, logger, branch=self.get_value("CASE")) + if self._gitinterface and not os.path.exists(os.path.join(caseroot, ".gitignore")): + safe_copy( + os.path.join( + self.get_value("CIMEROOT"), + "CIME", + "data", + "templates", + "gitignore.template", + ), + os.path.join(caseroot, ".gitignore"), + ) + append_case_status( + "", "", "local git repository created", gitinterface=self._gitinterface + ) + # add all files in caseroot to local repository + self._gitinterface._git_command("add", "*") + elif not self._gitinterface: + append_case_status( + "", + "", + "Local git version too old for cime git interface, version 2.28 or newer required.", + ) diff --git a/CIME/case/case_st_archive.py b/CIME/case/case_st_archive.py new file mode 100644 index 00000000000..cdb925ec960 --- /dev/null +++ b/CIME/case/case_st_archive.py @@ -0,0 +1,1340 @@ +""" +short term archiving +case_st_archive, restore_from_archive, archive_last_restarts +are members of class Case from file case.py +""" + +import shutil, glob, re, os + +from CIME.XML.standard_module_setup import * +from CIME.utils import ( + ls_sorted_by_mtime, + symlink_force, + safe_copy, + find_files, + batch_jobid, +) +from CIME.status import run_and_log_case_status +from CIME.date import get_file_date +from CIME.XML.archive import Archive +from CIME.XML.files import Files +from os.path import isdir, join + +logger = logging.getLogger(__name__) + + +############################################################################### +def _get_archive_fn_desc(archive_fn): + ############################################################################### + return "moving" if archive_fn is shutil.move else "copying" + + +############################################################################### +def _get_archive_file_fn(copy_only): + ############################################################################### + """ + Returns the function to use for archiving some files + """ + return safe_copy if copy_only else shutil.move + + +############################################################################### +def _get_datenames(casename, rundir): + ############################################################################### + """ + Returns the date objects specifying the times of each file + Note we are assuming that the coupler restart files exist and are consistent with other component datenames + Not doc-testable due to filesystem dependence + """ + expect(isdir(rundir), "Cannot open directory {} ".format(rundir)) + + files = sorted(glob.glob(os.path.join(rundir, casename + ".cpl.r.*.nc"))) + if not files: + files = sorted(glob.glob(os.path.join(rundir, casename + ".cpl_0001.r.*.nc"))) + + logger.debug(" cpl files : {} ".format(files)) + + if not files: + logger.warning( + "Cannot find a {}.cpl*.r.*.nc file in directory {} ".format( + casename, rundir + ) + ) + + datenames = [] + for filename in files: + file_date = get_file_date(filename) + datenames.append(file_date) + return datenames + + +def _datetime_str(_date): + """ + Returns the standard format associated with filenames. + + >>> from CIME.date import date + >>> _datetime_str(date(5, 8, 22)) + '0005-08-22-00000' + >>> _datetime_str(get_file_date("0011-12-09-00435")) + '0011-12-09-00435' + """ + + format_string = "{year:04d}-{month:02d}-{day:02d}-{seconds:05d}" + return format_string.format( + year=_date.year(), + month=_date.month(), + day=_date.day(), + seconds=_date.second_of_day(), + ) + + +def _datetime_str_mpas(_date): + """ + Returns the mpas format associated with filenames. + + >>> from CIME.date import date + >>> _datetime_str_mpas(date(5, 8, 22)) + '0005-08-22_00:00:00' + >>> _datetime_str_mpas(get_file_date("0011-12-09-00435")) + '0011-12-09_00:07:15' + """ + + format_string = ( + "{year:04d}-{month:02d}-{day:02d}_{hours:02d}:{minutes:02d}:{seconds:02d}" + ) + return format_string.format( + year=_date.year(), + month=_date.month(), + day=_date.day(), + hours=_date.hour(), + minutes=_date.minute(), + seconds=_date.second(), + ) + + +############################################################################### +def _get_ninst_info(case, compclass): + ############################################################################### + """ + Returns the number of instances used by a component and suffix strings for filenames + Not doc-testable due to case dependence + """ + + ninst = case.get_value("NINST_" + compclass.upper()) + ninst_strings = [] + if ninst is None: + ninst = 1 + for i in range(1, ninst + 1): + if ninst > 1: + ninst_strings.append("_" + "{:04d}".format(i)) + + logger.debug( + "ninst and ninst_strings are: {} and {} for {}".format( + ninst, ninst_strings, compclass + ) + ) + return ninst, ninst_strings + + +############################################################################### +def _get_component_archive_entries(components, archive): + ############################################################################### + """ + Each time this generator function is called, it yields a tuple + (archive_entry, compname, compclass) for one component in this + case's compset components. + """ + for compname in components: + logger.debug("compname is {} ".format(compname)) + archive_entry = archive.get_entry(compname) + if archive_entry is None: + logger.debug("No entry found for {}".format(compname)) + compclass = None + else: + compclass = archive.get(archive_entry, "compclass") + yield (archive_entry, compname, compclass) + + +############################################################################### +def _archive_rpointer_files( + casename, + ninst_strings, + rundir, + save_interim_restart_files, + archive, + archive_entry, + archive_restdir, + datename, + datename_is_last, +): + ############################################################################### + + # parse env_archive.xml to determine the rpointer files + # and contents for the given archive_entry tag + # loop through the possible rpointer files and contents + rpointer_nodes = archive.get_children("rpointer", root=archive_entry) + for rpointer in rpointer_nodes: + file_node = archive.get_child("rpointer_file", root=rpointer) + temp_rpointer_file = archive.text(file_node) + content_node = archive.get_child("rpointer_content", root=rpointer) + temp_rpointer_content = archive.text(content_node) + rpointer_file = temp_rpointer_file.replace("$NINST_STRING", "*") + if rpointer_file == "unset": + continue + if "$DATENAME" in rpointer_file: + rpointer_file = rpointer_file.replace("$DATENAME", _datetime_str(datename)) + + expect( + not "$" in rpointer_file, + "Unrecognized expression in name {}".format(rpointer_file), + ) + rpointers = glob.glob(rundir + "/" + rpointer_file) + if datename_is_last: + for rpfile in rpointers: + safe_copy( + rpfile, os.path.join(archive_restdir, os.path.basename(rpfile)) + ) + else: + # Generate rpointer file(s) for interim restarts for the one datename and each + # possible value of ninst_strings + if save_interim_restart_files: + # If timestamped rpointers exist use them + if rpointers: + for rpfile in rpointers: + logger.info("moving interim rpointer_file {}".format(rpfile)) + shutil.move( + rpfile, + os.path.join(archive_restdir, os.path.basename(rpfile)), + ) + else: + + # put in a temporary setting for ninst_strings if they are empty + # in order to have just one loop over ninst_strings below + if ninst_strings: + rpointer_content = temp_rpointer_content.replace( + "$NINST_STRING", ninst_strings[0] + ) + else: + rpointer_content = temp_rpointer_content.replace( + "$NINST_STRING", "" + ) + rpointer_content = rpointer_content.replace( + "$DATENAME", _datetime_str(datename) + ) + if rpointer_content != "unset": + if not ninst_strings: + ninst_strings = ["empty"] + + for ninst_string in ninst_strings: + rpointer_file = temp_rpointer_file + rpointer_content = temp_rpointer_content + if ninst_string == "empty": + ninst_string = "" + for key, value in [ + ("$CASE", casename), + ("$DATENAME", _datetime_str(datename)), + ("$MPAS_DATENAME", _datetime_str_mpas(datename)), + ("$NINST_STRING", ninst_string), + ]: + rpointer_file = rpointer_file.replace(key, value) + rpointer_content = rpointer_content.replace(key, value) + + # write out the respective files with the correct contents + rpointer_file = os.path.join( + archive_restdir, rpointer_file + ) + logger.info( + "writing rpointer_file {}".format(rpointer_file) + ) + f = open(rpointer_file, "w") + for output in rpointer_content.split(","): + f.write("{} \n".format(output)) + f.close() + else: + logger.info( + "rpointer_content unset, not creating rpointer file {}".format( + rpointer_file + ) + ) + + +############################################################################### +def _archive_log_files(dout_s_root, rundir, archive_incomplete, archive_file_fn): + ############################################################################### + """ + Find all completed log files, or all log files if archive_incomplete is True, and archive them. + Each log file is required to have ".log." in its name, and completed ones will end with ".gz" + Not doc-testable due to file system dependence + """ + archive_logdir = os.path.join(dout_s_root, "logs") + if not os.path.exists(archive_logdir): + os.makedirs(archive_logdir) + logger.debug("created directory {} ".format(archive_logdir)) + + if archive_incomplete == False: + log_search = "*.log.*.gz" + else: + log_search = "*.log.*" + + logfiles = glob.glob(os.path.join(rundir, log_search)) + for logfile in logfiles: + srcfile = join(rundir, os.path.basename(logfile)) + destfile = join(archive_logdir, os.path.basename(logfile)) + logger.info( + "{} {} to {}".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) + archive_file_fn(srcfile, destfile) + + +############################################################################### +def _archive_history_files( + archive, + compclass, + compname, + histfiles_savein_rundir, + last_date, + archive_file_fn, + dout_s_root, + casename, + rundir, +): + ############################################################################### + """ + perform short term archiving on history files in rundir + + Not doc-testable due to case and file system dependence + """ + + # determine history archive directory (create if it does not exist) + + archive_histdir = os.path.join(dout_s_root, compclass, "hist") + if not os.path.exists(archive_histdir): + os.makedirs(archive_histdir) + logger.debug("created directory {}".format(archive_histdir)) + # the compname is drv but the files are named cpl + if compname == "drv": + compname = "cpl" + + if compname == "nemo": + archive_rblddir = os.path.join(dout_s_root, compclass, "rebuild") + if not os.path.exists(archive_rblddir): + os.makedirs(archive_rblddir) + logger.debug("created directory {}".format(archive_rblddir)) + + sfxrbld = r"mesh_mask_" + r"[0-9]*" + pfile = re.compile(sfxrbld) + rbldfiles = [f for f in os.listdir(rundir) if pfile.search(f)] + logger.debug("rbldfiles = {} ".format(rbldfiles)) + + if rbldfiles: + for rbldfile in rbldfiles: + srcfile = join(rundir, rbldfile) + destfile = join(archive_rblddir, rbldfile) + logger.info( + "{} {} to {} ".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) + archive_file_fn(srcfile, destfile) + + sfxhst = casename + r"_[0-9][mdy]_" + r"[0-9]*" + pfile = re.compile(sfxhst) + hstfiles = [f for f in os.listdir(rundir) if pfile.search(f)] + logger.debug("hstfiles = {} ".format(hstfiles)) + + if hstfiles: + for hstfile in hstfiles: + srcfile = join(rundir, hstfile) + destfile = join(archive_histdir, hstfile) + logger.info( + "{} {} to {} ".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) + archive_file_fn(srcfile, destfile) + + # determine ninst and ninst_string + + # archive history files - the only history files that kept in the + # run directory are those that are needed for restarts + histfiles = archive.get_all_hist_files(casename, compname, rundir) + + if histfiles: + for histfile in histfiles: + file_date = get_file_date(os.path.basename(histfile)) + if last_date is None or file_date is None or file_date <= last_date: + srcfile = join(rundir, histfile) + expect( + os.path.isfile(srcfile), + "history file {} does not exist ".format(srcfile), + ) + destfile = join(archive_histdir, histfile) + if histfile in histfiles_savein_rundir: + logger.info("copying {} to {} ".format(srcfile, destfile)) + safe_copy(srcfile, destfile) + else: + logger.info( + "{} {} to {} ".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) + archive_file_fn(srcfile, destfile) + + +############################################################################### +def get_histfiles_for_restarts( + rundir, archive, archive_entry, restfile, testonly=False +): + ############################################################################### + """ + query restart files to determine history files that are needed for restarts + + Not doc-testable due to filesystem dependence + """ + + # Make certain histfiles is a set so we don't repeat + histfiles = set() + rest_hist_varname = archive.get_entry_value("rest_history_varname", archive_entry) + if rest_hist_varname != "unset": + ncdump = shutil.which("ncdump") + expect(ncdump, "ncdump not found in path") + cmd = "{} -v {} {} ".format( + ncdump, rest_hist_varname, os.path.join(rundir, restfile) + ) + if testonly: + out = "{} =".format(rest_hist_varname) + else: + rc, out, error = run_cmd(cmd) + if rc != 0: + logger.info( + " WARNING: {} failed rc={:d}\n out={}\n err={}".format( + cmd, rc, out, error + ) + ) + logger.debug(" get_histfiles_for_restarts: \n out={}".format(out)) + + searchname = "{} =".format(rest_hist_varname) + if searchname in out: + offset = out.index(searchname) + items = out[offset:].split(",") + for item in items: + # the following match has an option of having any number of '.'s and '/'s + # at the beginning of the history filename + matchobj = re.search(r"\"\S+\s*\"", item) + if matchobj: + histfile = matchobj.group(0).strip('" ') + histfile = os.path.basename(histfile) + # append histfile to the list ONLY if it exists in rundir before the archiving + if histfile in histfiles: + logger.warning( + "WARNING, tried to add a duplicate file to histfiles" + ) + if os.path.isfile(os.path.join(rundir, histfile)): + histfiles.add(histfile) + else: + logger.debug( + " get_histfiles_for_restarts: histfile {} does not exist ".format( + histfile + ) + ) + return histfiles + + +############################################################################### +def _archive_restarts_date( + case, + casename, + rundir, + archive, + datename, + datename_is_last, + last_date, + archive_restdir, + archive_file_fn, + components=None, + link_to_last_restart_files=False, + testonly=False, +): + ############################################################################### + """ + Archive restart files for a single date + + Returns a dictionary of histfiles that need saving in the run + directory, indexed by compname + """ + logger.info("-------------------------------------------") + logger.info("Archiving restarts for date {}".format(datename)) + logger.debug("last date {}".format(last_date)) + logger.debug("date is last? {}".format(datename_is_last)) + logger.debug("components are {}".format(components)) + logger.info("-------------------------------------------") + logger.debug("last date: {}".format(last_date)) + + if components is None: + components = case.get_compset_components() + components.append("drv") + components.append("dart") + + histfiles_savein_rundir_by_compname = {} + + for archive_entry, compname, compclass in _get_component_archive_entries( + components, archive + ): + if compclass: + logger.info("Archiving restarts for {} ({})".format(compname, compclass)) + + # archive restarts + histfiles_savein_rundir = _archive_restarts_date_comp( + case, + casename, + rundir, + archive, + archive_entry, + compclass, + compname, + datename, + datename_is_last, + last_date, + archive_restdir, + archive_file_fn, + link_to_last_restart_files=link_to_last_restart_files, + testonly=testonly, + ) + histfiles_savein_rundir_by_compname[compname] = histfiles_savein_rundir + + return histfiles_savein_rundir_by_compname + + +############################################################################### +def _archive_restarts_date_comp( + case, + casename, + rundir, + archive, + archive_entry, + compclass, + compname, + datename, + datename_is_last, + last_date, + archive_restdir, + archive_file_fn, + link_to_last_restart_files=False, + testonly=False, +): + ############################################################################### + """ + Archive restart files for a single date and single component + + If link_to_last_restart_files is True, then make a symlink to the + last set of restart files (i.e., the set with datename_is_last + True); if False (the default), copy them. (This has no effect on the + history files that are associated with these restart files.) + """ + datename_str = _datetime_str(datename) + + if ( + datename_is_last or case.get_value("DOUT_S_SAVE_INTERIM_RESTART_FILES") + ) and not os.path.isdir(archive_restdir): + os.makedirs(archive_restdir) + + # archive the rpointer file(s) for this datename and all possible ninst_strings + _archive_rpointer_files( + casename, + _get_ninst_info(case, compclass)[1], + rundir, + case.get_value("DOUT_S_SAVE_INTERIM_RESTART_FILES"), + archive, + archive_entry, + archive_restdir, + datename, + datename_is_last, + ) + + # move all but latest restart files into the archive restart directory + # copy latest restart files to archive restart directory + histfiles_savein_rundir = [] + + # determine function to use for last set of restart files + if link_to_last_restart_files: + last_restart_file_fn = symlink_force + last_restart_file_fn_msg = "linking" + else: + last_restart_file_fn = safe_copy + last_restart_file_fn_msg = "copying" + + # the compname is drv but the files are named cpl + if compname == "drv": + compname = "cpl" + + # get file_extension suffixes + for suffix in archive.get_rest_file_extensions(archive_entry): + # logger.debug("suffix is {} ninst {}".format(suffix, ninst)) + restfiles = "" + if compname.find("mpas") == 0 or compname == "mali": + pattern = ( + casename + + r"\." + + compname + + r"\." + + suffix + + r"\." + + "_".join(datename_str.rsplit("-", 1)) + ) + pfile = re.compile(pattern) + restfiles = [f for f in os.listdir(rundir) if pfile.search(f)] + elif compname == "nemo": + pattern = r"_*_" + suffix + r"[0-9]*" + pfile = re.compile(pattern) + restfiles = [f for f in os.listdir(rundir) if pfile.search(f)] + else: + pattern = r"^{}\.{}[\d_]*\.".format(casename, compname) + pfile = re.compile(pattern) + files = [f for f in os.listdir(rundir) if pfile.search(f)] + pattern = ( + r"_?" + + r"\d*" + + r"\." + + suffix + + r"\." + + r"[^\.]*" + + r"\.?" + + datename_str + ) + pfile = re.compile(pattern) + restfiles = [f for f in files if pfile.search(f)] + logger.debug("pattern is {} restfiles {}".format(pattern, restfiles)) + for rfile in restfiles: + rfile = os.path.basename(rfile) + + file_date = get_file_date(rfile) + if last_date is not None and file_date > last_date: + # Skip this file + continue + + if not os.path.exists(archive_restdir): + os.makedirs(archive_restdir) + + # obtain array of history files for restarts + # need to do this before archiving restart files + histfiles_for_restart = get_histfiles_for_restarts( + rundir, archive, archive_entry, rfile, testonly=testonly + ) + + if datename_is_last and histfiles_for_restart: + for histfile in histfiles_for_restart: + if histfile not in histfiles_savein_rundir: + histfiles_savein_rundir.append(histfile) + + # archive restart files and all history files that are needed for restart + # Note that the latest file should be copied and not moved + if datename_is_last: + srcfile = os.path.join(rundir, rfile) + destfile = os.path.join(archive_restdir, rfile) + last_restart_file_fn(srcfile, destfile) + logger.info( + "{} file {} to {}".format( + last_restart_file_fn_msg, srcfile, destfile + ) + ) + for histfile in histfiles_for_restart: + srcfile = os.path.join(rundir, histfile) + destfile = os.path.join(archive_restdir, histfile) + expect( + os.path.isfile(srcfile), + "history restart file {} for last date does not exist ".format( + srcfile + ), + ) + logger.info("Copying {} to {}".format(srcfile, destfile)) + safe_copy(srcfile, destfile) + logger.debug( + "datename_is_last + histfiles_for_restart copying \n {} to \n {}".format( + srcfile, destfile + ) + ) + else: + # Only archive intermediate restarts if requested - otherwise remove them + if case.get_value("DOUT_S_SAVE_INTERIM_RESTART_FILES"): + srcfile = os.path.join(rundir, rfile) + destfile = os.path.join(archive_restdir, rfile) + expect( + os.path.isfile(srcfile), + "restart file {} does not exist ".format(srcfile), + ) + logger.info( + "{} file {} to {}".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) + archive_file_fn(srcfile, destfile) + + # need to copy the history files needed for interim restarts - since + # have not archived all of the history files yet + for histfile in histfiles_for_restart: + srcfile = os.path.join(rundir, histfile) + destfile = os.path.join(archive_restdir, histfile) + expect( + os.path.isfile(srcfile), + "hist file {} does not exist ".format(srcfile), + ) + logger.info("copying {} to {}".format(srcfile, destfile)) + safe_copy(srcfile, destfile) + else: + if compname == "nemo": + flist = glob.glob(rundir + "/" + casename + "_*_restart*.nc") + logger.debug("nemo restart file {}".format(flist)) + if len(flist) > 2: + flist0 = glob.glob( + rundir + "/" + casename + "_*_restart_0000.nc" + ) + if len(flist0) > 1: + rstfl01 = flist0[0] + rstfl01spl = rstfl01.split("/") + logger.debug("splitted name {}".format(rstfl01spl)) + rstfl01nm = rstfl01spl[-1] + rstfl01nmspl = rstfl01nm.split("_") + logger.debug( + "splitted name step2 {}".format(rstfl01nmspl) + ) + rsttm01 = rstfl01nmspl[-3] + + rstfl02 = flist0[1] + rstfl02spl = rstfl02.split("/") + logger.debug("splitted name {}".format(rstfl02spl)) + rstfl02nm = rstfl02spl[-1] + rstfl02nmspl = rstfl02nm.split("_") + logger.debug( + "splitted name step2 {}".format(rstfl02nmspl) + ) + rsttm02 = rstfl02nmspl[-3] + + if int(rsttm01) > int(rsttm02): + restlist = glob.glob( + rundir + + "/" + + casename + + "_" + + rsttm02 + + "_restart_*.nc" + ) + else: + restlist = glob.glob( + rundir + + "/" + + casename + + "_" + + rsttm01 + + "_restart_*.nc" + ) + logger.debug("nemo restart list {}".format(restlist)) + if restlist: + for _restfile in restlist: + srcfile = os.path.join(rundir, _restfile) + logger.info( + "removing interim restart file {}".format( + srcfile + ) + ) + if os.path.isfile(srcfile): + try: + os.remove(srcfile) + except OSError: + logger.warning( + "unable to remove interim restart file {}".format( + srcfile + ) + ) + else: + logger.warning( + "interim restart file {} does not exist".format( + srcfile + ) + ) + elif len(flist) == 2: + flist0 = glob.glob( + rundir + "/" + casename + "_*_restart.nc" + ) + if len(flist0) > 1: + rstfl01 = flist0[0] + rstfl01spl = rstfl01.split("/") + logger.debug("splitted name {}".format(rstfl01spl)) + rstfl01nm = rstfl01spl[-1] + rstfl01nmspl = rstfl01nm.split("_") + logger.debug( + "splitted name step2 {}".format(rstfl01nmspl) + ) + rsttm01 = rstfl01nmspl[-2] + + rstfl02 = flist0[1] + rstfl02spl = rstfl02.split("/") + logger.debug("splitted name {}".format(rstfl02spl)) + rstfl02nm = rstfl02spl[-1] + rstfl02nmspl = rstfl02nm.split("_") + logger.debug( + "splitted name step2 {}".format(rstfl02nmspl) + ) + rsttm02 = rstfl02nmspl[-2] + + if int(rsttm01) > int(rsttm02): + restlist = glob.glob( + rundir + + "/" + + casename + + "_" + + rsttm02 + + "_restart_*.nc" + ) + else: + restlist = glob.glob( + rundir + + "/" + + casename + + "_" + + rsttm01 + + "_restart_*.nc" + ) + logger.debug("nemo restart list {}".format(restlist)) + if restlist: + for _rfile in restlist: + srcfile = os.path.join(rundir, _rfile) + logger.info( + "removing interim restart file {}".format( + srcfile + ) + ) + if os.path.isfile(srcfile): + try: + os.remove(srcfile) + except OSError: + logger.warning( + "unable to remove interim restart file {}".format( + srcfile + ) + ) + else: + logger.warning( + "interim restart file {} does not exist".format( + srcfile + ) + ) + else: + logger.warning( + "unable to find NEMO restart file in {}".format(rundir) + ) + + else: + srcfile = os.path.join(rundir, rfile) + logger.info("removing interim restart file {}".format(srcfile)) + if os.path.isfile(srcfile): + try: + os.remove(srcfile) + except OSError: + logger.warning( + "unable to remove interim restart file {}".format( + srcfile + ) + ) + else: + logger.warning( + "interim restart file {} does not exist".format(srcfile) + ) + + return histfiles_savein_rundir + + +############################################################################### +def _archive_process( + case, + archive, + last_date, + archive_incomplete_logs, + copy_only, + components=None, + dout_s_root=None, + casename=None, + rundir=None, + testonly=False, +): + ############################################################################### + """ + Parse config_archive.xml and perform short term archiving + """ + + logger.debug("In archive_process...") + + if dout_s_root is None: + dout_s_root = case.get_value("DOUT_S_ROOT") + if rundir is None: + rundir = case.get_value("RUNDIR") + if casename is None: + casename = case.get_value("CASE") + if components is None: + components = case.get_compset_components() + components.append("drv") + components.append("dart") + + archive_file_fn = _get_archive_file_fn(copy_only) + + # archive log files + _archive_log_files(dout_s_root, rundir, archive_incomplete_logs, archive_file_fn) + + # archive restarts and all necessary associated files (e.g. rpointer files) + datenames = _get_datenames(casename, rundir) + logger.debug("datenames {} ".format(datenames)) + histfiles_savein_rundir_by_compname = {} + for datename in datenames: + datename_is_last = False + if datename == datenames[-1]: + datename_is_last = True + + logger.debug("datename {} last_date {}".format(datename, last_date)) + if last_date is None or datename <= last_date: + archive_restdir = join(dout_s_root, "rest", _datetime_str(datename)) + + histfiles_savein_rundir_by_compname_this_date = _archive_restarts_date( + case, + casename, + rundir, + archive, + datename, + datename_is_last, + last_date, + archive_restdir, + archive_file_fn, + components, + testonly=testonly, + ) + if datename_is_last: + histfiles_savein_rundir_by_compname = ( + histfiles_savein_rundir_by_compname_this_date + ) + + # archive history files + + for _, compname, compclass in _get_component_archive_entries(components, archive): + if compclass: + logger.info( + "Archiving history files for {} ({})".format(compname, compclass) + ) + histfiles_savein_rundir = histfiles_savein_rundir_by_compname.get( + compname, [] + ) + logger.debug( + "_archive_process: histfiles_savein_rundir {} ".format( + histfiles_savein_rundir + ) + ) + _archive_history_files( + archive, + compclass, + compname, + histfiles_savein_rundir, + last_date, + archive_file_fn, + dout_s_root, + casename, + rundir, + ) + + +############################################################################### +def restore_from_archive( + self, rest_dir=None, dout_s_root=None, rundir=None, test=False +): + ############################################################################### + """ + Take archived restart files and load them into current case. Use rest_dir if provided otherwise use most recent + restore_from_archive is a member of Class Case + """ + if dout_s_root is None: + dout_s_root = self.get_value("DOUT_S_ROOT") + if rundir is None: + rundir = self.get_value("RUNDIR") + if rest_dir: + if not os.path.isabs(rest_dir): + rest_dir = os.path.join(dout_s_root, "rest", rest_dir) + else: + rest_root = os.path.join(dout_s_root, "rest") + + if os.path.exists(rest_root): + rest_dir = os.path.join( + rest_root, ls_sorted_by_mtime(os.path.join(dout_s_root, "rest"))[-1] + ) + + if rest_dir is None and test: + logger.warning( + "No rest_dir found for test - is this expected? DOUT_S_ROOT={}".format( + dout_s_root + ) + ) + return + expect(os.path.exists(rest_dir), "ERROR: No directory {} found".format(rest_dir)) + logger.info("Restoring restart from {}".format(rest_dir)) + + for item in glob.glob("{}/*".format(rest_dir)): + base = os.path.basename(item) + dst = os.path.join(rundir, base) + if os.path.exists(dst): + os.remove(dst) + logger.info("Restoring {} from {} to {}".format(item, rest_dir, rundir)) + + safe_copy(item, rundir) + + +############################################################################### +def archive_last_restarts( + self, archive_restdir, rundir, last_date=None, link_to_restart_files=False +): + ############################################################################### + """ + Convenience function for archiving just the last set of restart + files to a given directory. This also saves files attached to the + restart set, such as rpointer files and necessary history + files. However, it does not save other files that are typically + archived (e.g., history files, log files). + + Files are copied to the directory given by archive_restdir. + + If link_to_restart_files is True, then symlinks rather than copies + are done for the restart files. (This has no effect on the history + files that are associated with these restart files.) + """ + archive = self.get_env("archive") + casename = self.get_value("CASE") + datenames = _get_datenames(casename, rundir) + expect(len(datenames) >= 1, "No restart dates found") + last_datename = datenames[-1] + + # Not currently used for anything if we're only archiving the last + # set of restart files, but needed to satisfy the following interface + archive_file_fn = _get_archive_file_fn(copy_only=False) + + _ = _archive_restarts_date( + case=self, + casename=casename, + rundir=rundir, + archive=archive, + datename=last_datename, + datename_is_last=True, + last_date=last_date, + archive_restdir=archive_restdir, + archive_file_fn=archive_file_fn, + link_to_last_restart_files=link_to_restart_files, + ) + + +############################################################################### +def case_st_archive( + self, + last_date_str=None, + archive_incomplete_logs=True, + copy_only=False, + resubmit=True, +): + ############################################################################### + """ + Create archive object and perform short term archiving + """ + logger.debug("resubmit {}".format(resubmit)) + caseroot = self.get_value("CASEROOT") + self.load_env(job="case.st_archive") + if last_date_str is not None: + try: + last_date = get_file_date(last_date_str) + except ValueError: + expect(False, "Could not parse the last date to archive") + else: + last_date = None + + dout_s_root = self.get_value("DOUT_S_ROOT") + if dout_s_root is None or dout_s_root == "UNSET": + expect(False, "XML variable DOUT_S_ROOT is required for short-term achiver") + if not isdir(dout_s_root): + os.makedirs(dout_s_root) + + dout_s_save_interim = self.get_value("DOUT_S_SAVE_INTERIM_RESTART_FILES") + if dout_s_save_interim == "FALSE" or dout_s_save_interim == "UNSET": + rest_n = self.get_value("REST_N") + stop_n = self.get_value("STOP_N") + if rest_n < stop_n: + logger.warning( + "Restart files from end of run will be saved" + "interim restart files will be deleted" + ) + + logger.info("st_archive starting") + + is_batch = self.get_value("BATCH_SYSTEM") + msg_func = None + + if is_batch: + jobid = batch_jobid() + msg_func = lambda *args: jobid if jobid is not None else "" + + archive = self.get_env("archive") + functor = lambda: _archive_process( + self, archive, last_date, archive_incomplete_logs, copy_only + ) + run_and_log_case_status( + functor, + "st_archive", + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=caseroot, + is_batch=is_batch, + gitinterface=self._gitinterface, + ) + + logger.info("st_archive completed") + + # resubmit case if appropriate + if not self.get_value("EXTERNAL_WORKFLOW") and resubmit: + resubmit_cnt = self.get_value("RESUBMIT") + logger.debug("resubmit_cnt {} resubmit {}".format(resubmit_cnt, resubmit)) + if resubmit_cnt > 0: + logger.info( + "resubmitting from st_archive, resubmit={:d}".format(resubmit_cnt) + ) + if self.get_value("MACH") == "mira": + expect( + os.path.isfile(".original_host"), "ERROR alcf host file not found" + ) + with open(".original_host", "r") as fd: + sshhost = fd.read() + run_cmd( + "ssh cooleylogin1 ssh {} '{case}/case.submit {case} --resubmit' ".format( + sshhost, case=caseroot + ), + verbose=True, + ) + else: + self.submit(resubmit=True) + + return True + + +def test_st_archive(self, testdir="st_archive_test"): + files = Files() + archive = Archive(files=files) + components = [] + # expect(not self.get_value("MULTI_DRIVER"),"Test not configured for multi-driver cases") + + config_archive_files = archive.get_all_config_archive_files(files) + # create the run directory testdir and populate it with rest_file and hist_file from + # config_archive.xml test_file_names + if os.path.exists(testdir): + logger.info("Removing existing test directory {}".format(testdir)) + shutil.rmtree(testdir) + dout_s_root = os.path.join(testdir, "archive") + archive = Archive() + schema = files.get_schema("ARCHIVE_SPEC_FILE") + for config_archive_file in config_archive_files: + archive.read(config_archive_file, schema) + comp_archive_specs = archive.get_children("comp_archive_spec") + for comp_archive_spec in comp_archive_specs: + components.append(archive.get(comp_archive_spec, "compname")) + test_file_names = archive.get_optional_child( + "test_file_names", root=comp_archive_spec + ) + if test_file_names is not None: + if not os.path.exists(testdir): + os.makedirs(os.path.join(testdir, "archive")) + + for file_node in archive.get_children("tfile", root=test_file_names): + fname = os.path.join(testdir, archive.text(file_node)) + disposition = archive.get(file_node, "disposition") + logger.info( + "Create file {} with disposition {}".format(fname, disposition) + ) + with open(fname, "w") as fd: + fd.write(disposition + "\n") + + logger.info("testing components: {} ".format(list(set(components)))) + _archive_process( + self, + archive, + None, + False, + False, + components=list(set(components)), + dout_s_root=dout_s_root, + casename="casename", + rundir=testdir, + testonly=True, + ) + + _check_disposition(testdir) + + # Now test the restore capability + testdir2 = os.path.join(testdir, "run2") + os.makedirs(testdir2) + + restore_from_archive(self, rundir=testdir2, dout_s_root=dout_s_root, test=True) + + restfiles = [ + f + for f in os.listdir( + os.path.join(testdir, "archive", "rest", "1976-01-01-00000") + ) + ] + for _file in restfiles: + expect( + os.path.isfile(os.path.join(testdir2, _file)), + "Expected file {} to be restored from rest dir".format(_file), + ) + + return True + + +def test_env_archive(self, testdir="env_archive_test"): + components = self.get_values("COMP_CLASSES") + comps_in_case = [] + # create the run directory testdir and populate it with rest_file and hist_file from + # config_archive.xml test_file_names + if os.path.exists(testdir): + logger.info("Removing existing test directory {}".format(testdir)) + shutil.rmtree(testdir) + dout_s_root = os.path.join(testdir, "archive") + archive = self.get_env("archive") + comp_archive_specs = archive.scan_children("comp_archive_spec") + + # ignore stub and dead components + for comp in list(components): + compname = self.get_value("COMP_{}".format(comp)) + if ( + compname == "s" + comp.lower() or compname == "x" + comp.lower() + ) and comp != "ESP": + logger.info("Not testing component {}".format(comp)) + components.remove(comp) + elif comp == "ESP" and self.get_value("MODEL") == "e3sm": + components.remove(comp) + else: + if compname == "cpl": + compname = "drv" + comps_in_case.append(compname) + + for comp_archive_spec in comp_archive_specs: + comp_expected = archive.get(comp_archive_spec, "compname") + # Rename ww3 component when case and archive names don't match, + # specific to CESM. + if comp_expected == "ww3" and "ww" in comps_in_case: + comp_expected = "ww" + comp_class = archive.get(comp_archive_spec, "compclass").upper() + if comp_class in components: + components.remove(comp_class) + else: + expect( + False, "Error finding comp_class {} in components".format(comp_class) + ) + if comp_expected == "cpl": + comp_expected = "drv" + if comp_expected != "dart": + expect( + comp_expected in comps_in_case, + "env_archive defines component {} not defined in case".format( + comp_expected + ), + ) + + test_file_names = archive.get_optional_child( + "test_file_names", root=comp_archive_spec + ) + if test_file_names is not None: + if not os.path.exists(testdir): + os.makedirs(os.path.join(testdir, "archive")) + + for file_node in archive.get_children("tfile", root=test_file_names): + fname = os.path.join(testdir, archive.text(file_node)) + disposition = archive.get(file_node, "disposition") + logger.info( + "Create file {} with disposition {}".format(fname, disposition) + ) + with open(fname, "w") as fd: + fd.write(disposition + "\n") + + expect( + not components, "No archive entry found for components: {}".format(components) + ) + if "dart" not in comps_in_case: + comps_in_case.append("dart") + logger.info("testing components: {} ".format(comps_in_case)) + _archive_process( + self, + archive, + None, + False, + False, + components=comps_in_case, + dout_s_root=dout_s_root, + casename="casename", + rundir=testdir, + testonly=True, + ) + + _check_disposition(testdir) + + # Now test the restore capability + testdir2 = os.path.join(testdir, "run2") + os.makedirs(testdir2) + restfiles = [] + restore_from_archive(self, rundir=testdir2, dout_s_root=dout_s_root, test=True) + if os.path.exists(os.path.join(testdir, "archive", "rest")): + restfiles = [ + f + for f in os.listdir( + os.path.join(testdir, "archive", "rest", "1976-01-01-00000") + ) + ] + for _file in restfiles: + expect( + os.path.isfile(os.path.join(testdir2, _file)), + "Expected file {} to be restored from rest dir".format(_file), + ) + + return True + + +def _check_disposition(testdir): + copyfilelist = [] + for root, _, files in os.walk(testdir): + for _file in files: + with open(os.path.join(root, _file), "r") as fd: + disposition = fd.readline() + logger.info( + "Checking testfile {} with disposition {}".format(_file, disposition) + ) + if root == testdir: + if "move" in disposition: + if find_files(os.path.join(testdir, "archive"), _file): + expect( + False, + "Copied file {} to archive with disposition move".format( + _file + ), + ) + else: + expect(False, "Failed to move file {} to archive".format(_file)) + if "copy" in disposition: + copyfilelist.append(_file) + elif "ignore" in disposition: + expect( + False, + "Moved file {} with dispostion ignore to directory {}".format( + _file, root + ), + ) + elif "copy" in disposition: + expect( + _file in copyfilelist, + "File {} with disposition copy was moved to directory {}".format( + _file, root + ), + ) + for _file in copyfilelist: + expect( + find_files(os.path.join(testdir, "archive"), _file) != [], + "File {} was not copied to archive.".format(_file), + ) diff --git a/CIME/case/case_submit.py b/CIME/case/case_submit.py new file mode 100644 index 00000000000..21e03c05d02 --- /dev/null +++ b/CIME/case/case_submit.py @@ -0,0 +1,387 @@ +#!/usr/bin/env python3 + +""" +case.submit - Submit a cesm workflow to the queueing system or run it +if there is no queueing system. A cesm workflow may include multiple +jobs. +submit, check_case and check_da_settings are members of class Case in file case.py +""" +import configparser +from CIME.XML.standard_module_setup import * +from CIME.utils import expect, CIMEError, get_time_in_seconds +from CIME.status import run_and_log_case_status +from CIME.locked_files import ( + unlock_file, + lock_file, + check_lockedfile, + check_lockedfiles, +) +from CIME.test_status import * + +logger = logging.getLogger(__name__) + + +def _build_prereq_str(case, prev_job_ids): + delimiter = case.get_value("depend_separator") + prereq_str = "" + for job_id in prev_job_ids.values(): + prereq_str += str(job_id) + delimiter + return prereq_str[:-1] + + +def _submit( + case, + job=None, + no_batch=False, + prereq=None, + allow_fail=False, + resubmit=False, + resubmit_immediate=False, + skip_pnl=False, + mail_user=None, + mail_type=None, + batch_args=None, + workflow=True, + chksum=False, + dryrun=False, +): + if job is None: + job = case.get_first_job() + caseroot = case.get_value("CASEROOT") + # Check mediator + hasMediator = True + comp_classes = case.get_values("COMP_CLASSES") + if "CPL" not in comp_classes: + hasMediator = False + + # Check if CONTINUE_RUN value makes sense + # if submitted with a prereq don't do this check + if case.get_value("CONTINUE_RUN") and hasMediator and not prereq: + rundir = case.get_value("RUNDIR") + expect( + os.path.isdir(rundir), + "CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir), + ) + # only checks for the first instance in a multidriver case + if case.get_value("COMP_INTERFACE") == "nuopc": + rpointer = case.get_value("DRV_RESTART_POINTER") + if not rpointer: + rpointer = "rpointer.cpl" + if case.get_value("NINST") > 1: + rpointer = rpointer + "_0001" + else: + rpointer = "rpointer.drv" + if case.get_value("MULTI_DRIVER"): + rpointer = rpointer + "_0001" + expect( + os.path.exists(os.path.join(rundir, rpointer)), + "CONTINUE_RUN is true but this case does not appear to have restart files staged in {} {}".format( + rundir, rpointer + ), + ) + # Finally we open the rpointer file and check that it's correct + casename = case.get_value("CASE") + with open(os.path.join(rundir, rpointer), "r") as fd: + ncfile = fd.readline().strip() + expect( + ncfile.startswith(casename) + and os.path.exists(os.path.join(rundir, ncfile)), + "File {ncfile} not present or does not match case {casename}".format( + ncfile=os.path.join(rundir, ncfile), casename=casename + ), + ) + + # if case.submit is called with the no_batch flag then we assume that this + # flag will stay in effect for the duration of the RESUBMITs + env_batch = case.get_env("batch") + external_workflow = case.get_value("EXTERNAL_WORKFLOW") + if env_batch.get_batch_system_type() == "none" or resubmit and external_workflow: + no_batch = True + + if no_batch: + batch_system = "none" + else: + batch_system = env_batch.get_batch_system_type() + + if batch_system != case.get_value("BATCH_SYSTEM"): + unlock_file(os.path.basename(env_batch.filename), caseroot) + + case.set_value("BATCH_SYSTEM", batch_system) + + env_batch_has_changed = False + if not external_workflow: + try: + check_lockedfile(case, os.path.basename(env_batch.filename)) + except: + env_batch_has_changed = True + + if batch_system != "none" and env_batch_has_changed and not external_workflow: + # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc) + logger.warning( + """ +env_batch.xml appears to have changed, regenerating batch scripts +manual edits to these file will be lost! +""" + ) + env_batch.make_all_batch_files(case) + + case.flush() + + lock_file(os.path.basename(env_batch.filename), caseroot) + + if resubmit: + # This is a resubmission, do not reinitialize test values + if job == "case.test": + case.set_value("IS_FIRST_RUN", False) + + resub = case.get_value("RESUBMIT") + logger.info("Submitting job '{}', resubmit={:d}".format(job, resub)) + case.set_value("RESUBMIT", resub - 1) + if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"): + case.set_value("CONTINUE_RUN", True) + + else: + if job == "case.test": + case.set_value("IS_FIRST_RUN", True) + + if no_batch: + batch_system = "none" + else: + batch_system = env_batch.get_batch_system_type() + + case.set_value("BATCH_SYSTEM", batch_system) + + env_batch_has_changed = False + try: + check_lockedfile(case, os.path.basename(env_batch.filename)) + except CIMEError: + env_batch_has_changed = True + + if env_batch.get_batch_system_type() != "none" and env_batch_has_changed: + # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc) + logger.warning( + """ +env_batch.xml appears to have changed, regenerating batch scripts +manual edits to these file will be lost! +""" + ) + env_batch.make_all_batch_files(case) + + unlock_file(os.path.basename(env_batch.filename), caseroot) + + lock_file(os.path.basename(env_batch.filename), caseroot) + + case.check_case(skip_pnl=skip_pnl, chksum=chksum) + + if job == case.get_primary_job(): + case.check_DA_settings() + + # Load Modules + case.load_env() + + case.flush() + + logger.warning("submit_jobs {}".format(job)) + job_ids = case.submit_jobs( + no_batch=no_batch, + job=job, + prereq=prereq, + skip_pnl=skip_pnl, + resubmit_immediate=resubmit_immediate, + allow_fail=allow_fail, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + workflow=workflow, + dry_run=dryrun, + ) + xml_jobids = [] + if dryrun: + for job in job_ids: + xml_jobids.append("{}:{}".format(job[0], job[1])) + else: + for jobname, jobid in job_ids.items(): + logger.info("Submitted job {} with id {}".format(jobname, jobid)) + if jobid: + xml_jobids.append("{}:{}".format(jobname, jobid)) + + xml_jobid_text = ", ".join(xml_jobids) + if xml_jobid_text and not dryrun: + case.set_value("JOB_IDS", xml_jobid_text) + + return xml_jobid_text + + +def submit( + self, + job=None, + no_batch=False, + prereq=None, + allow_fail=False, + resubmit=False, + resubmit_immediate=False, + skip_pnl=False, + mail_user=None, + mail_type=None, + batch_args=None, + workflow=True, + chksum=False, + dryrun=False, +): + if resubmit_immediate and self.get_value("MACH") in ["mira", "cetus"]: + logger.warning( + "resubmit_immediate does not work on Mira/Cetus, submitting normally" + ) + resubmit_immediate = False + + caseroot = self.get_value("CASEROOT") + if self.get_value("TEST"): + casebaseid = self.get_value("CASEBASEID") + if os.path.exists(os.path.join(caseroot, "env_test.xml")): + self.set_initial_test_values() + # This should take care of the race condition where the submitted job + # begins immediately and tries to set RUN phase. We proactively assume + # a passed SUBMIT phase. If this state is already PASS, don't set it again + # because then we'll lose RUN phase info if it's there. This info is important + # for system_tests_common to know if it needs to reinitialize the test or not. + with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts: + phase_status = ts.get_status(SUBMIT_PHASE) + if phase_status != TEST_PASS_STATUS: + ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS) + + # If this is a resubmit check the hidden file .submit_options for + # any submit options used on the original submit and use them again + submit_options = os.path.join(caseroot, ".submit_options") + if resubmit and os.path.exists(submit_options): + config = configparser.RawConfigParser() + config.read(submit_options) + if not skip_pnl and config.has_option("SubmitOptions", "skip_pnl"): + skip_pnl = config.getboolean("SubmitOptions", "skip_pnl") + if mail_user is None and config.has_option("SubmitOptions", "mail_user"): + mail_user = config.get("SubmitOptions", "mail_user") + if mail_type is None and config.has_option("SubmitOptions", "mail_type"): + mail_type = str(config.get("SubmitOptions", "mail_type")).split(",") + if batch_args is None and config.has_option("SubmitOptions", "batch_args"): + batch_args = config.get("SubmitOptions", "batch_args") + + is_batch = self.get_value("BATCH_SYSTEM") is not None + + try: + functor = lambda: _submit( + self, + job=job, + no_batch=no_batch, + prereq=prereq, + allow_fail=allow_fail, + resubmit=resubmit, + resubmit_immediate=resubmit_immediate, + skip_pnl=skip_pnl, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + workflow=workflow, + chksum=chksum, + dryrun=dryrun, + ) + run_and_log_case_status( + functor, + "case.submit", + caseroot=caseroot, + custom_success_msg_functor=lambda x: x, + is_batch=is_batch, + gitinterface=self._gitinterface, + ) + except BaseException: # Want to catch KeyboardInterrupt too + # If something failed in the batch system, make sure to mark + # the test as failed if we are running a test. + if self.get_value("TEST"): + with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts: + ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS) + + raise + + +def check_case(self, skip_pnl=False, chksum=False): + check_lockedfiles(self) + + if not skip_pnl: + self.create_namelists() # Must be called before check_all_input_data + + logger.info("Checking that inputdata is available as part of case submission") + self.check_all_input_data(chksum=chksum) + + if self.get_value("COMP_WAV") == "ww": + # the ww3 buildnml has dependencies on inputdata so we must run it again + self.create_namelists(component="WAV") + + if self.get_value("COMP_INTERFACE") == "nuopc": + # + # Check that run length is a multiple of the longest component + # coupling interval. The longest interval is smallest NCPL value. + # models using the nuopc interface will fail at initialization unless + # ncpl follows these rules, other models will only fail later and so + # this test is skipped so that short tests can be run without adjusting NCPL + # + maxncpl = 10000 + minncpl = 0 + maxcomp = None + for comp in self.get_values("COMP_CLASSES"): + if comp == "CPL": + continue + compname = self.get_value("COMP_{}".format(comp)) + + # ignore stub components in this test. + if compname == "s{}".format(comp.lower()): + ncpl = None + else: + ncpl = self.get_value("{}_NCPL".format(comp)) + + if ncpl and maxncpl > ncpl: + maxncpl = ncpl + maxcomp = comp + if ncpl and minncpl < ncpl: + minncpl = ncpl + + ncpl_base_period = self.get_value("NCPL_BASE_PERIOD") + if ncpl_base_period == "hour": + coupling_secs = 3600 / maxncpl + timestep = 3600 / minncpl + elif ncpl_base_period == "day": + coupling_secs = 86400 / maxncpl + timestep = 86400 / minncpl + elif ncpl_base_period == "year": + coupling_secs = 31536000 / maxncpl + timestep = 31536000 / minncpl + elif ncpl_base_period == "decade": + coupling_secs = 315360000 / maxncpl + timestep = 315360000 / minncpl + stop_option = self.get_value("STOP_OPTION") + stop_n = self.get_value("STOP_N") + if stop_option == "nsteps": + stop_option = "seconds" + stop_n = stop_n * timestep + + runtime = get_time_in_seconds(stop_n, stop_option) + expect( + runtime >= coupling_secs and runtime % coupling_secs == 0, + " Runtime ({0} s) must be a multiple of the longest coupling interval {1}_NCPL ({2}s). Adjust runtime or {1}_NCPL".format( + runtime, maxcomp, coupling_secs + ), + ) + + expect( + self.get_value("BUILD_COMPLETE"), + "Build complete is not True please rebuild the model by calling case.build", + ) + logger.info("Check case OK") + + +def check_DA_settings(self): + script = self.get_value("DATA_ASSIMILATION_SCRIPT") + cycles = self.get_value("DATA_ASSIMILATION_CYCLES") + if len(script) > 0 and os.path.isfile(script) and cycles > 0: + logger.info( + "Data Assimilation enabled using script {} with {:d} cycles".format( + script, cycles + ) + ) diff --git a/scripts/lib/CIME/case/case_test.py b/CIME/case/case_test.py similarity index 89% rename from scripts/lib/CIME/case/case_test.py rename to CIME/case/case_test.py index 6ed144437f8..c607dd93786 100644 --- a/scripts/lib/CIME/case/case_test.py +++ b/CIME/case/case_test.py @@ -4,15 +4,22 @@ """ from CIME.XML.standard_module_setup import * -from CIME.utils import expect, find_system_test, append_testlog, find_proc_id +from CIME.utils import expect, find_system_test, find_proc_id from CIME.SystemTests.system_tests_common import * +from CIME.status import append_testlog import sys, signal + def _iter_signal_names(): - for signame in [item for item in dir(signal) if item.startswith("SIG") and not item.startswith("SIG_")]: + for signame in [ + item + for item in dir(signal) + if item.startswith("SIG") and not item.startswith("SIG_") + ]: yield signame + def _signal_handler(signum, _): name = "Unknown" for signame in _iter_signal_names(): @@ -32,6 +39,7 @@ def _signal_handler(signum, _): # Throw an exception so SystemTest infrastructure can handle this error expect(False, "Job killed due to receiving signal {:d} ({})".format(signum, name)) + def _set_up_signal_handlers(): """ Add handles for all signals that might be used to abort a test @@ -43,9 +51,10 @@ def _set_up_signal_handlers(): signum = getattr(signal, signame) signal.signal(signum, _signal_handler) + def case_test(self, testname=None, reset=False, skip_pnl=False): if testname is None: - testname = self.get_value('TESTCASE') + testname = self.get_value("TESTCASE") expect(testname is not None, "testname argument not resolved") logging.warning("Running test for {}".format(testname)) diff --git a/CIME/case/check_input_data.py b/CIME/case/check_input_data.py new file mode 100644 index 00000000000..0aa0a3acdf0 --- /dev/null +++ b/CIME/case/check_input_data.py @@ -0,0 +1,683 @@ +""" +API for checking input for testcase +""" +from CIME.XML.standard_module_setup import * +from CIME.utils import SharedArea, find_files, safe_copy, expect +from CIME.XML.inputdata import Inputdata +import CIME.Servers + +import glob, hashlib, shutil + +logger = logging.getLogger(__name__) +# The inputdata_checksum.dat file will be read into this hash if it's available +chksum_hash = dict() +local_chksum_file = "inputdata_checksum.dat" + + +def _download_checksum_file(rundir): + """ + Download the checksum files from each server and merge them into rundir. + """ + inputdata = Inputdata() + protocol = "svn" + chksum_found = False + # download and merge all available chksum files. + while protocol is not None: + protocol, address, user, passwd, chksum_file, _, _ = inputdata.get_next_server() + if protocol not in vars(CIME.Servers): + logger.info("Client protocol {} not enabled".format(protocol)) + continue + logger.info( + "Using protocol {} with user {} and passwd {}".format( + protocol, user, passwd + ) + ) + if protocol == "svn": + server = CIME.Servers.SVN(address, user, passwd) + elif protocol == "gftp": + server = CIME.Servers.GridFTP(address, user, passwd) + elif protocol == "ftp": + server = CIME.Servers.FTP.ftp_login(address, user, passwd) + elif protocol == "wget": + server = CIME.Servers.WGET.wget_login(address, user, passwd) + else: + expect(False, "Unsupported inputdata protocol: {}".format(protocol)) + if not server: + continue + + if chksum_file: + chksum_found = True + else: + continue + + success = False + rel_path = chksum_file + full_path = os.path.join(rundir, local_chksum_file) + new_file = full_path + ".raw" + protocol = type(server).__name__ + logger.info( + "Trying to download file: '{}' to path '{}' using {} protocol.".format( + rel_path, new_file, protocol + ) + ) + tmpfile = None + if os.path.isfile(full_path): + tmpfile = full_path + ".tmp" + os.rename(full_path, tmpfile) + # Use umask to make sure files are group read/writable. As long as parent directories + # have +s, then everything should work. + success = server.getfile(rel_path, new_file) + if success: + _reformat_chksum_file(full_path, new_file) + if tmpfile: + _merge_chksum_files(full_path, tmpfile) + chksum_hash.clear() + else: + if tmpfile and os.path.isfile(tmpfile): + os.rename(tmpfile, full_path) + logger.warning( + "Could not automatically download file " + + full_path + + " Restoring existing version." + ) + else: + logger.warning( + "Could not automatically download file {}".format(full_path) + ) + return chksum_found + + +def _reformat_chksum_file(chksum_file, server_file): + """ + The checksum file on the server has 8 space seperated columns, I need the first and last ones. + This function gets the first and last column of server_file and saves it to chksum_file + """ + with open(server_file) as fd, open(chksum_file, "w") as fout: + lines = fd.readlines() + for line in lines: + lsplit = line.split() + if len(lsplit) < 8 or " DIR " in line: + continue + + # remove the first directory ('inputdata/') from the filename + chksum = lsplit[0] + fname = (lsplit[7]).split("/", 1)[1] + fout.write(" ".join((chksum, fname)) + "\n") + os.remove(server_file) + + +def _merge_chksum_files(new_file, old_file): + """ + If more than one server checksum file is available, this merges the files and removes + any duplicate lines + """ + with open(old_file) as fin: + lines = fin.readlines() + with open(new_file) as fin: + lines += fin.readlines() + lines = set(lines) + with open(new_file, "w") as fout: + fout.write("".join(lines)) + os.remove(old_file) + + +def _download_if_in_repo( + server, input_data_root, rel_path, isdirectory=False, ic_filepath=None +): + """ + Return True if successfully downloaded + server is an object handle of type CIME.Servers + input_data_root is the local path to inputdata (DIN_LOC_ROOT) + rel_path is the path to the file or directory relative to input_data_root + user is the user name of the person running the script + isdirectory indicates that this is a directory download rather than a single file + """ + if not (rel_path or server.fileexists(rel_path)): + return False + full_path = os.path.join(input_data_root, rel_path) + if ic_filepath: + full_path = full_path.replace(ic_filepath, "/") + logger.info( + "Trying to download file: '{}' to path '{}' using {} protocol.".format( + rel_path, full_path, type(server).__name__ + ) + ) + # Make sure local path exists, create if it does not + if isdirectory or full_path.endswith(os.sep): + if not os.path.exists(full_path): + logger.info("Creating directory {}".format(full_path)) + os.makedirs(full_path + ".tmp") + isdirectory = True + elif not os.path.exists(os.path.dirname(full_path)): + os.makedirs(os.path.dirname(full_path), exist_ok=True) + + # Use umask to make sure files are group read/writable. As long as parent directories + # have +s, then everything should work. + if isdirectory: + success = server.getdirectory(rel_path, full_path + ".tmp") + # this is intended to prevent a race condition in which + # one case attempts to use a refdir before another one has + # completed the download + if success: + os.rename(full_path + ".tmp", full_path) + else: + shutil.rmtree(full_path + ".tmp") + else: + success = server.getfile(rel_path, full_path) + + return success + + +def _check_all_input_data_impl( + self, + protocol, + address, + input_data_root, + data_list_dir, + download, + chksum, +): + success = False + if protocol is not None and address is not None: + success = self.check_input_data( + protocol=protocol, + address=address, + download=download, + input_data_root=input_data_root, + data_list_dir=data_list_dir, + chksum=chksum, + ) + else: + if chksum: + chksum_found = _download_checksum_file(self.get_value("RUNDIR")) + + clm_usrdat_name = self.get_value("CLM_USRDAT_NAME") + if clm_usrdat_name and clm_usrdat_name == "UNSET": + clm_usrdat_name = None + + if download and clm_usrdat_name: + success = _downloadfromserver( + self, + input_data_root, + data_list_dir, + attributes={"CLM_USRDAT_NAME": clm_usrdat_name}, + ) + if not success: + success = self.check_input_data( + protocol=protocol, + address=address, + download=False, + input_data_root=input_data_root, + data_list_dir=data_list_dir, + chksum=chksum and chksum_found, + ) + if download and not success: + if chksum: + chksum_found = _download_checksum_file(self.get_value("RUNDIR")) + success = _downloadfromserver(self, input_data_root, data_list_dir) + + expect( + not download or (download and success), + "Could not find all inputdata on any server", + ) + self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir) + return success + + +def check_all_input_data( + self, + protocol=None, + address=None, + input_data_root=None, + data_list_dir="Buildconf", + download=True, + chksum=False, +): + """ + Read through all files of the form *.input_data_list in the data_list_dir directory. These files + contain a list of input and boundary files needed by each model component. For each file in the + list confirm that it is available in input_data_root and if not (optionally download it from a + server at address using protocol. Perform a chksum of the downloaded file. + """ + # Run the entire impl in a SharedArea to help avoid permission problems + with SharedArea(): + return _check_all_input_data_impl( + self, protocol, address, input_data_root, data_list_dir, download, chksum + ) + + +def _downloadfromserver(case, input_data_root, data_list_dir, attributes=None): + """ + Download files + """ + success = False + protocol = "svn" + inputdata = Inputdata() + if not input_data_root: + input_data_root = case.get_value("DIN_LOC_ROOT") + + while not success and protocol is not None: + protocol, address, user, passwd, _, ic_filepath, _ = inputdata.get_next_server( + attributes=attributes + ) + logger.info("Checking server {} with protocol {}".format(address, protocol)) + success = case.check_input_data( + protocol=protocol, + address=address, + download=True, + input_data_root=input_data_root, + data_list_dir=data_list_dir, + user=user, + passwd=passwd, + ic_filepath=ic_filepath, + ) + return success + + +def stage_refcase(self, input_data_root=None, data_list_dir=None): + """ + Get a REFCASE for a hybrid or branch run + This is the only case in which we are downloading an entire directory instead of + a single file at a time. + """ + get_refcase = self.get_value("GET_REFCASE") + run_type = self.get_value("RUN_TYPE") + continue_run = self.get_value("CONTINUE_RUN") + + # We do not fully populate the inputdata directory on every + # machine and do not expect every user to download the 3TB+ of + # data in our inputdata repository. This code checks for the + # existence of inputdata in the local inputdata directory and + # attempts to download data from the server if it's needed and + # missing. + if get_refcase and run_type != "startup" and not continue_run: + din_loc_root = self.get_value("DIN_LOC_ROOT") + run_refdate = self.get_value("RUN_REFDATE") + run_refcase = self.get_value("RUN_REFCASE") + run_refdir = self.get_value("RUN_REFDIR") + rundir = self.get_value("RUNDIR") + + if os.path.isabs(run_refdir): + refdir = run_refdir + expect( + os.path.isdir(refdir), + "Reference case directory {} does not exist or is not readable".format( + refdir + ), + ) + + else: + refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate) + if not os.path.isdir(refdir): + logger.warning( + "Refcase not found in {}, will attempt to download from inputdata".format( + refdir + ) + ) + with open( + os.path.join("Buildconf", "refcase.input_data_list"), "w" + ) as fd: + fd.write("refdir = {}{}".format(refdir, os.sep)) + if input_data_root is None: + input_data_root = din_loc_root + if data_list_dir is None: + data_list_dir = "Buildconf" + success = _downloadfromserver( + self, input_data_root=input_data_root, data_list_dir=data_list_dir + ) + expect(success, "Could not download refcase from any server") + + logger.info(" - Prestaging REFCASE ({}) to {}".format(refdir, rundir)) + + # prestage the reference case's files. + + if not os.path.exists(rundir): + logger.debug("Creating run directory: {}".format(rundir)) + os.makedirs(rundir) + rpointerfile = None + # copy the refcases' rpointer files to the run directory + for rpointerfile in glob.iglob(os.path.join("{}", "*rpointer*").format(refdir)): + logger.info("Copy rpointer {}".format(rpointerfile)) + safe_copy(rpointerfile, rundir) + os.chmod(os.path.join(rundir, os.path.basename(rpointerfile)), 0o644) + expect( + rpointerfile, + "Reference case directory {} does not contain any rpointer files".format( + refdir + ), + ) + # link everything else + + for rcfile in glob.iglob(os.path.join(refdir, "*")): + rcbaseline = os.path.basename(rcfile) + skipfiles = ( + "timing" in rcbaseline + or "spio_stats" in rcbaseline + or "memory." in rcbaseline + ) + if not os.path.exists("{}/{}".format(rundir, rcbaseline)) and not skipfiles: + logger.info("Staging file {}".format(rcfile)) + os.symlink(rcfile, "{}/{}".format(rundir, rcbaseline)) + # Backward compatibility, some old refcases have cam2 in the name + # link to local cam file. + for cam2file in glob.iglob(os.path.join("{}", "*.cam2.*").format(rundir)): + camfile = cam2file.replace("cam2", "cam") + os.symlink(cam2file, camfile) + elif not get_refcase and run_type != "startup": + logger.info( + "GET_REFCASE is false, the user is expected to stage the refcase to the run directory." + ) + if os.path.exists(os.path.join("Buildconf", "refcase.input_data_list")): + os.remove(os.path.join("Buildconf", "refcase.input_data_list")) + return True + + +def _check_input_data_impl( + case, + protocol, + address, + input_data_root, + data_list_dir, + download, + user, + passwd, + chksum, + ic_filepath, +): + case.load_env(reset=True) + rundir = case.get_value("RUNDIR") + # Fill in defaults as needed + input_data_root = ( + case.get_value("DIN_LOC_ROOT") if input_data_root is None else input_data_root + ) + input_ic_root = case.get_value("DIN_LOC_IC", resolved=True) + expect( + os.path.isdir(data_list_dir), + "Invalid data_list_dir directory: '{}'".format(data_list_dir), + ) + + data_list_files = find_files(data_list_dir, "*.input_data_list") + if not data_list_files: + logger.warning( + "WARNING: No .input_data_list files found in dir '{}'".format(data_list_dir) + ) + + no_files_missing = True + if download: + if protocol not in vars(CIME.Servers): + logger.info("Client protocol {} not enabled".format(protocol)) + return False + logger.info( + "Using protocol {} with user {} and passwd {}".format( + protocol, user, passwd + ) + ) + if protocol == "svn": + server = CIME.Servers.SVN(address, user, passwd) + elif protocol == "gftp": + server = CIME.Servers.GridFTP(address, user, passwd) + elif protocol == "ftp": + server = CIME.Servers.FTP.ftp_login(address, user, passwd) + elif protocol == "wget": + server = CIME.Servers.WGET.wget_login(address, user, passwd) + else: + expect(False, "Unsupported inputdata protocol: {}".format(protocol)) + if not server: + return None + + for data_list_file in data_list_files: + logger.info("Loading input file list: '{}'".format(data_list_file)) + with open(data_list_file, "r") as fd: + lines = fd.readlines() + + for line in lines: + line = line.strip() + use_ic_path = False + if line and not line.startswith("#"): + tokens = line.split("=") + description, full_path = tokens[0].strip(), tokens[1].strip() + if ( + description.endswith("datapath") + or description.endswith("data_path") + or full_path.endswith("/dev/null") + ): + continue + if description.endswith("file") or description.endswith("filename"): + # There are required input data with key, or 'description' entries + # that specify in their names whether they are files or filenames + # rather than 'datapath's or 'data_path's so we check to make sure + # the input data list has correct non-path values for input files. + # This check happens whether or not a file already exists locally. + expect( + (not full_path.endswith(os.sep)), + "Unsupported directory path in input_data_list named {}. Line entry is '{} = {}'.".format( + data_list_file, description, full_path + ), + ) + if full_path: + # expand xml variables + full_path = case.get_resolved_value(full_path) + rel_path = full_path + if input_ic_root and input_ic_root in full_path and ic_filepath: + rel_path = full_path.replace(input_ic_root, ic_filepath) + use_ic_path = True + elif input_data_root in full_path: + rel_path = full_path.replace(input_data_root, "") + elif input_ic_root and ( + input_ic_root not in input_data_root + and input_ic_root in full_path + ): + if ic_filepath: + rel_path = full_path.replace(input_ic_root, ic_filepath) + use_ic_path = True + model = os.path.basename(data_list_file).split(".")[0] + isdirectory = rel_path.endswith(os.sep) + + if ( + "/" in rel_path + and rel_path == full_path + and not full_path.startswith("unknown") + ): + # User pointing to a file outside of input_data_root, we cannot determine + # rel_path, and so cannot download the file. If it already exists, we can + # proceed + if not os.path.exists(full_path): + msg = "Model {} missing file {} = '{}'".format( + model, description, full_path + ) + # Data download path must be DIN_LOC_ROOT, DIN_LOC_IC or RUNDIR + + rundir = case.get_value("RUNDIR") + if download: + if full_path.startswith(rundir): + print(msg) + filepath = os.path.dirname(full_path) + if not os.path.exists(filepath): + logger.info( + "Creating directory {}".format(filepath) + ) + os.makedirs(filepath) + tmppath = full_path[len(rundir) + 1 :] + success = _download_if_in_repo( + server, + os.path.join(rundir, "inputdata"), + tmppath[10:], + isdirectory=isdirectory, + ic_filepath="/", + ) + no_files_missing = success + else: + # Ensure that msg and warning text are together in TestStatus.log + logger.warning( + msg + + "\n Cannot download file since it lives outside of the input_data_root '{}'".format( + input_data_root + ) + ) + else: + print(msg) + no_files_missing = False + else: + logger.debug(" Found input file: '{}'".format(full_path)) + else: + # There are some special values of rel_path that + # we need to ignore - some of the component models + # set things like 'NULL' or 'same_as_TS' - + # basically if rel_path does not contain '/' (a + # directory tree) you can assume it's a special + # value and ignore it (perhaps with a warning) + + if ( + "/" in rel_path + and not os.path.exists(full_path) + and not full_path.startswith("unknown") + ): + print( + "Model {} missing file {} = '{}'".format( + model, description, full_path + ) + ) + if download: + if use_ic_path: + success = _download_if_in_repo( + server, + input_ic_root, + rel_path.strip(os.sep), + isdirectory=isdirectory, + ic_filepath=ic_filepath, + ) + else: + success = _download_if_in_repo( + server, + input_data_root, + rel_path.strip(os.sep), + isdirectory=isdirectory, + ic_filepath=ic_filepath, + ) + if not success: + no_files_missing = False + if success and chksum: + verify_chksum( + input_data_root, + rundir, + rel_path.strip(os.sep), + isdirectory, + ) + else: + no_files_missing = False + else: + if chksum: + verify_chksum( + input_data_root, + rundir, + rel_path.strip(os.sep), + isdirectory, + ) + logger.info( + "Chksum passed for file {}".format( + os.path.join(input_data_root, rel_path) + ) + ) + logger.debug( + " Already had input file: '{}'".format(full_path) + ) + else: + model = os.path.basename(data_list_file).split(".")[0] + logger.warning( + "Model {} no file specified for {}".format(model, description) + ) + + return no_files_missing + + +def check_input_data( + case, + protocol="svn", + address=None, + input_data_root=None, + data_list_dir="Buildconf", + download=False, + user=None, + passwd=None, + chksum=False, + ic_filepath=None, +): + """ + For a given case check for the relevant input data as specified in data_list_dir/*.input_data_list + in the directory input_data_root, if not found optionally download it using the servers specified + in config_inputdata.xml. If a chksum file is available compute the chksum and compare it to that + in the file. + Return True if no files missing + """ + # Run the entire impl in a SharedArea to help avoid permission problems + with SharedArea(): + return _check_input_data_impl( + case, + protocol, + address, + input_data_root, + data_list_dir, + download, + user, + passwd, + chksum, + ic_filepath, + ) + + +def verify_chksum(input_data_root, rundir, filename, isdirectory): + """ + For file in filename perform a chksum and compare the result to that stored in + the local checksumfile, if isdirectory chksum all files in the directory of form *.* + """ + hashfile = os.path.join(rundir, local_chksum_file) + if not chksum_hash: + if not os.path.isfile(hashfile): + logger.warning("Failed to find or download file {}".format(hashfile)) + return + + with open(hashfile) as fd: + lines = fd.readlines() + for line in lines: + fchksum, fname = line.split() + if fname in chksum_hash: + expect( + chksum_hash[fname] == fchksum, + " Inconsistent hashes in chksum for file {}".format(fname), + ) + else: + chksum_hash[fname] = fchksum + + if isdirectory: + filenames = glob.glob(os.path.join(filename, "*.*")) + else: + filenames = [filename] + for fname in filenames: + if not os.sep in fname: + continue + chksum = md5(os.path.join(input_data_root, fname)) + if chksum_hash: + if not fname in chksum_hash: + logger.warning( + "Did not find hash for file {} in chksum file {}".format( + filename, hashfile + ) + ) + else: + expect( + chksum == chksum_hash[fname], + "chksum mismatch for file {} expected {} found {}".format( + os.path.join(input_data_root, fname), chksum, chksum_hash[fname] + ), + ) + + +def md5(fname): + """ + performs an md5 sum one chunk at a time to avoid memory issues with large files. + """ + hash_md5 = hashlib.md5() + with open(fname, "rb") as f: + for chunk in iter(lambda: f.read(4096), b""): + hash_md5.update(chunk) + return hash_md5.hexdigest() diff --git a/CIME/case/preview_namelists.py b/CIME/case/preview_namelists.py new file mode 100644 index 00000000000..707d59a53c0 --- /dev/null +++ b/CIME/case/preview_namelists.py @@ -0,0 +1,137 @@ +""" +API for preview namelist +create_dirs and create_namelists are members of Class case from file case.py +""" + +from CIME.XML.standard_module_setup import * +from CIME.utils import import_and_run_sub_or_cmd, safe_copy +import time, glob + +logger = logging.getLogger(__name__) + + +def create_dirs(self): + """ + Make necessary directories for case + """ + # Get data from XML + exeroot = self.get_value("EXEROOT") + libroot = self.get_value("LIBROOT") + incroot = self.get_value("INCROOT") + rundir = self.get_value("RUNDIR") + caseroot = self.get_value("CASEROOT") + docdir = os.path.join(caseroot, "CaseDocs") + dirs_to_make = [] + models = self.get_values("COMP_CLASSES") + for model in models: + dirname = model.lower() + dirs_to_make.append(os.path.join(exeroot, dirname, "obj")) + + dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir]) + + for dir_to_make in dirs_to_make: + if not os.path.isdir(dir_to_make) and not os.path.islink(dir_to_make): + try: + logger.debug("Making dir '{}'".format(dir_to_make)) + os.makedirs(dir_to_make) + except OSError as e: + # In a multithreaded situation, we may have lost a race to create this dir. + # We do not want to crash if that's the case. + if not os.path.isdir(dir_to_make): + expect( + False, + "Could not make directory '{}', error: {}".format( + dir_to_make, e + ), + ) + + # As a convenience write the location of the case directory in the bld and run directories + for dir_ in (exeroot, rundir): + with open(os.path.join(dir_, "CASEROOT"), "w+") as fd: + fd.write(caseroot + "\n") + + +def create_namelists(self, component=None): + """ + Create component namelists + """ + self.flush() + + create_dirs(self) + + casebuild = self.get_value("CASEBUILD") + caseroot = self.get_value("CASEROOT") + rundir = self.get_value("RUNDIR") + + docdir = os.path.join(caseroot, "CaseDocs") + + # Load modules + self.load_env() + + self.stage_refcase() + + # Create namelists - must have cpl last in the list below + # Note - cpl must be last in the loop below so that in generating its namelist, + # it can use xml vars potentially set by other component's buildnml scripts + models = self.get_values("COMP_CLASSES") + models += [models.pop(0)] + for model in models: + model_str = model.lower() + logger.info(" {} {} ".format(time.strftime("%Y-%m-%d %H:%M:%S"), model_str)) + config_file = self.get_value("CONFIG_{}_FILE".format(model_str.upper())) + config_dir = os.path.dirname(config_file) + if model_str == "cpl": + compname = "drv" + else: + compname = self.get_value("COMP_{}".format(model_str.upper())) + if component is None or component == model_str or compname == "ufsatm": + cmd = os.path.join(config_dir, "buildnml") + logger.info("Create namelist for component {}".format(compname)) + import_and_run_sub_or_cmd( + cmd, + (caseroot), + "buildnml", + (self, caseroot, compname), + config_dir, + compname, + case=self, + ) + + logger.debug( + "Finished creating component namelists, component {} models = {}".format( + component, models + ) + ) + + # Save namelists to docdir + if not os.path.isdir(docdir): + os.makedirs(docdir) + try: + with open(os.path.join(docdir, "README"), "w") as fd: + fd.write( + " CESM Resolved Namelist Files\n For documentation only DO NOT MODIFY\n" + ) + except (OSError, IOError) as e: + expect(False, "Failed to write {}/README: {}".format(docdir, e)) + + for cpglob in [ + "*_in_[0-9]*", + "*modelio*", + "*_in", + "nuopc.runconfig", + "*streams*txt*", + "*streams.xml", + "*stxt", + "*maps.rc", + "*cism*.config*", + "nuopc.runseq", + ]: + for file_to_copy in glob.glob(os.path.join(rundir, cpglob)): + logger.debug("Copy file from '{}' to '{}'".format(file_to_copy, docdir)) + safe_copy(file_to_copy, docdir) + + # Copy over chemistry mechanism docs if they exist + atmconf = self.get_value("COMP_ATM") + "conf" + if os.path.isdir(os.path.join(casebuild, atmconf)): + for file_to_copy in glob.glob(os.path.join(casebuild, atmconf, "*chem_mech*")): + safe_copy(file_to_copy, docdir) diff --git a/CIME/code_checker.py b/CIME/code_checker.py new file mode 100644 index 00000000000..08a5a021b0f --- /dev/null +++ b/CIME/code_checker.py @@ -0,0 +1,200 @@ +""" +Libraries for checking python code with pylint +""" + +import os +import json +from shutil import which + +from CIME.XML.standard_module_setup import * + +from CIME.utils import ( + run_cmd, + run_cmd_no_fail, + expect, + get_cime_root, + get_src_root, + is_python_executable, + get_cime_default_driver, +) + +from multiprocessing.dummy import Pool as ThreadPool + +logger = logging.getLogger(__name__) + + +############################################################################### +def _run_pylint(all_files, interactive): + ############################################################################### + pylint = which("pylint") + + cmd_options = ( + " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" + ) + cmd_options += ( + ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" + ) + cmd_options += ",logging-format-interpolation,no-name-in-module,arguments-renamed" + cmd_options += " -j 0 -f json" + cimeroot = get_cime_root() + srcroot = get_src_root() + + # if "scripts/Tools" in on_file: + # cmd_options +=",relative-import" + + # add init-hook option + cmd_options += ( + ' --init-hook=\'import sys; sys.path.extend(("%s","%s","%s","%s"))\'' + % ( + os.path.join(cimeroot, "CIME"), + os.path.join(cimeroot, "CIME", "Tools"), + os.path.join(cimeroot, "scripts", "fortran_unit_testing", "python"), + os.path.join(srcroot, "components", "cmeps", "cime_config", "runseq"), + ) + ) + + files = " ".join(all_files) + cmd = "%s %s %s" % (pylint, cmd_options, files) + logger.debug("pylint command is %s" % cmd) + stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot) + + data = json.loads(out) + + result = {} + + for item in data: + if item["type"] != "error": + continue + + path = item["path"] + message = item["message"] + line = item["line"] + + if path in result: + result[path].append(f"{message}:{line}") + else: + result[path] = [ + message, + ] + + for k in result.keys(): + result[k] = "\n".join(set(result[k])) + + return result + + +############################################################################### +def _matches(file_path, file_ends): + ############################################################################### + for file_end in file_ends: + if file_path.endswith(file_end): + return True + + return False + + +############################################################################### +def _should_pylint_skip(filepath): + ############################################################################### + # TODO - get rid of this + list_of_directories_to_ignore = ( + "xmlconvertors", + "pointclm", + "point_clm", + "tools", + "machines", + "apidocs", + "doc", + ) + for dir_to_skip in list_of_directories_to_ignore: + if dir_to_skip + "/" in filepath: + return True + # intended to be temporary, file needs update + if filepath.endswith("archive_metadata") or filepath.endswith("pgn.py"): + return True + + return False + + +############################################################################### +def get_all_checkable_files(): + ############################################################################### + cimeroot = get_cime_root() + all_git_files = run_cmd_no_fail( + "git ls-files", from_dir=cimeroot, verbose=False + ).splitlines() + if get_cime_default_driver() == "nuopc": + srcroot = get_src_root() + nuopc_git_files = [] + try: + nuopc_git_files = run_cmd_no_fail( + "git ls-files", + from_dir=os.path.join(srcroot, "components", "cmeps"), + verbose=False, + ).splitlines() + except: + logger.warning("No nuopc driver found in source") + all_git_files.extend( + [ + os.path.join(srcroot, "components", "cmeps", _file) + for _file in nuopc_git_files + ] + ) + files_to_test = [ + item + for item in all_git_files + if ( + (item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) + and not _should_pylint_skip(item) + ) + ] + + return files_to_test + + +############################################################################### +def check_code(files, num_procs=10, interactive=False): + ############################################################################### + """ + Check all python files in the given directory + + Returns True if all files had no problems + """ + # Get list of files to check, we look to see if user-provided file argument + # is a valid file, if not, we search the repo for a file with similar name. + files_to_check = [] + if files: + repo_files = get_all_checkable_files() + for filearg in files: + if os.path.exists(filearg): + files_to_check.append(os.path.abspath(filearg)) + else: + found = False + for repo_file in repo_files: + if repo_file.endswith(filearg): + found = True + files_to_check.append(repo_file) # could have multiple matches + + if not found: + logger.warning( + "Could not find file matching argument '%s'" % filearg + ) + else: + # Check every python file + files_to_check = get_all_checkable_files() + + expect(len(files_to_check) > 0, "No matching files found") + + # No point in using more threads than files + # if len(files_to_check) < num_procs: + # num_procs = len(files_to_check) + + results = _run_pylint(files_to_check, interactive) + + return results + + # pool = ThreadPool(num_procs) + # results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check) + # pool.close() + # pool.join() + # return dict(results) diff --git a/CIME/compare_namelists.py b/CIME/compare_namelists.py new file mode 100644 index 00000000000..4e1555984f1 --- /dev/null +++ b/CIME/compare_namelists.py @@ -0,0 +1,703 @@ +import os, re, logging +from CIME.utils import expect, CIMEError + +logger = logging.getLogger(__name__) + +# pragma pylint: disable=unsubscriptable-object + +############################################################################### +def _normalize_lists(value_str): + ############################################################################### + """ + >>> _normalize_lists("'one two' 'three four'") + "'one two','three four'" + >>> _normalize_lists("'one two' 'three four'") + "'one two','three four'" + >>> _normalize_lists("'one two' , 'three four'") + "'one two','three four'" + >>> _normalize_lists("'one two'") + "'one two'" + >>> _normalize_lists("1 2 3, 4 , 5") + '1,2,3,4,5' + >>> _normalize_lists("2, 2*13") + '2,2*13' + >>> _normalize_lists("'DMS -> 1.0 * value.nc'") + "'DMS -> 1.0 * value.nc'" + >>> _normalize_lists("1.0* value.nc") + '1.0*value.nc' + >>> _normalize_lists("1.0*value.nc") + '1.0*value.nc' + """ + # Handle special case "value * value" which should not be treated as list + parsed = re.match(r"^([^*=->\s]*)\s*(\*)\s*(.*)$", value_str) + if parsed is not None: + value_str = "".join(parsed.groups()) + result = "" + inside_quotes = False + idx = 0 + while idx < len(value_str): + value_c = value_str[idx] + if value_c == "'": + inside_quotes = not inside_quotes + result += value_c + idx += 1 + elif value_c.isspace() or value_c == ",": + if inside_quotes: + result += value_c + idx += 1 + else: + result += "," + idx += 1 + while idx < len(value_str): + value_c = value_str[idx] + if not value_c.isspace() and value_c != ",": + break + idx += 1 + else: + result += value_c + idx += 1 + + return result + + +############################################################################### +def _interpret_value(value_str, filename): + ############################################################################### + """ + >>> _interpret_value("one", "foo") + 'one' + >>> _interpret_value("one, two", "foo") + ['one', 'two'] + >>> _interpret_value("3*1.0", "foo") + ['1.0', '1.0', '1.0'] + >>> _interpret_value("'DMS -> value.nc'", "foo") + {'DMS': 'value.nc'} + >>> _interpret_value("'DMS -> 1.0 * value.nc'", "foo") + {'DMS': '1.0*value.nc'} + >>> _interpret_value("'DMS -> 1.0* value.nc'", "foo") + {'DMS': '1.0*value.nc'} + """ + comma_re = re.compile(r"\s*,\s*") + dict_re = re.compile(r"^'(\S+)\s*->\s*(\S+|(?:\S+\s*\*\s*\S+))\s*'") + + value_str = _normalize_lists(value_str) + + tokens = [item.strip() for item in comma_re.split(value_str) if item.strip() != ""] + if "->" in value_str: + # dict + rv = {} + for token in tokens: + m = dict_re.match(token) + expect( + m is not None, + "In file '{}', Dict entry '{}' does not match expected format".format( + filename, token + ), + ) + k, v = m.groups() + rv[k] = _interpret_value(v, filename) + + return rv + else: + new_tokens = [] + for token in tokens: + if "*" in token: + try: + # the following ensure that the following to namelist settings trigger a match + # nmlvalue = 1,1,1 versus nmlvalue = 3*1 + sub_tokens = [item.strip() for item in token.split("*")] + expect( + len(sub_tokens) == 2, + "Incorrect usage of multiplication in token '{}'".format(token), + ) + new_tokens.extend([sub_tokens[1]] * int(sub_tokens[0])) + except Exception: + # User probably did not intend to use the * operator as a namelist multiplier + new_tokens.append(token) + else: + new_tokens.append(token) + + if "," in value_str or len(new_tokens) > 1: + return new_tokens + else: + return new_tokens[0] + + +############################################################################### +def _parse_namelists(namelist_lines, filename): + ############################################################################### + """ + Return data in form: {namelist -> {key -> value} }. + value can be an int, string, list, or dict + + >>> teststr = '''&nml + ... val = 'foo' + ... aval = 'one','two', 'three' + ... maval = 'one', 'two', + ... 'three', 'four' + ... dval = 'one->two', 'three -> four' + ... mdval = 'one -> two', + ... 'three -> four', + ... 'five -> six' + ... nval = 1850 + ... / + ... + ... # Hello + ... + ... &nml2 + ... val2 = .false. + ... / + ... ''' + >>> _parse_namelists(teststr.splitlines(), 'foo') + {'nml': {'val': "'foo'", 'aval': ["'one'", "'two'", "'three'"], 'maval': ["'one'", "'two'", "'three'", "'four'"], 'dval': {'one': 'two', 'three': 'four'}, 'mdval': {'one': 'two', 'three': 'four', 'five': 'six'}, 'nval': '1850'}, 'nml2': {'val2': '.false.'}} + + >>> teststr = '''&fire_emis_nl + ... fire_emis_factors_file = 'fire_emis_factors_c140116.nc' + ... fire_emis_specifier = 'bc_a1 = BC', 'pom_a1 = 1.4*OC', 'pom_a2 = A*B*C', 'SO2 = SO2' + ... / + ... ''' + >>> _parse_namelists(teststr.splitlines(), 'foo') + {'fire_emis_nl': {'fire_emis_factors_file': "'fire_emis_factors_c140116.nc'", 'fire_emis_specifier': ["'bc_a1 = BC'", "'pom_a1 = 1.4*OC'", "'pom_a2 = A*B*C'", "'SO2 = SO2'"]}} + + >>> _parse_namelists('blah', 'foo') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: File 'foo' does not appear to be a namelist file, skipping + + >>> teststr = '''&nml + ... val = 'one', 'two', + ... val2 = 'three' + ... /''' + >>> _parse_namelists(teststr.splitlines(), 'foo') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: In file 'foo', Incomplete multiline variable: 'val' + + >>> teststr = '''&nml + ... val = 'one', 'two', + ... /''' + >>> _parse_namelists(teststr.splitlines(), 'foo') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: In file 'foo', Incomplete multiline variable: 'val' + + >>> teststr = '''&nml + ... val = 'one', 'two', + ... 'three -> four' + ... /''' + >>> _parse_namelists(teststr.splitlines(), 'foo') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: In file 'foo', multiline list variable 'val' had dict entries + + >>> teststr = '''&nml + ... val = 2, 2*13 + ... /''' + >>> _parse_namelists(teststr.splitlines(), 'foo') + {'nml': {'val': ['2', '13', '13']}} + + >>> teststr = '''&nml + ... val = 2 2 3 + ... /''' + >>> _parse_namelists(teststr.splitlines(), 'foo') + {'nml': {'val': ['2', '2', '3']}} + + >>> teststr = '''&nml + ... val = 'a brown cow' 'a red hen' + ... /''' + >>> _parse_namelists(teststr.splitlines(), 'foo') + {'nml': {'val': ["'a brown cow'", "'a red hen'"]}} + """ + + comment_re = re.compile(r"^[#!]") + namelist_re = re.compile(r"^&(\S+)$") + name_re = re.compile(r"^([^\s=']+)\s*=\s*(.+)$") + rcline_re = re.compile(r"^([^&\s':]+)\s*:\s*(.+)$") + + rv = {} + current_namelist = None + multiline_variable = None # (name, value) + for line in namelist_lines: + + line = line.strip() + line = line.replace('"', "'") + + logger.debug("Parsing line: '{}'".format(line)) + + if line == "" or comment_re.match(line) is not None: + logger.debug(" Line was whitespace or comment, skipping.") + continue + + rcline = rcline_re.match(line) + if rcline is not None: + # Defining a variable (AKA name) + name, value = rcline.groups() + + logger.debug(" Parsing variable '{}' with data '{}'".format(name, value)) + + if "seq_maps.rc" not in rv: + rv["seq_maps.rc"] = {} + + expect( + name not in rv["seq_maps.rc"], + "In file '{}', Duplicate name: '{}'".format(filename, name), + ) + rv["seq_maps.rc"][name] = value + + elif current_namelist is None: + # Must start a namelist + expect( + multiline_variable is None, + "In file '{}', Incomplete multiline variable: '{}'".format( + filename, + multiline_variable[0] if multiline_variable is not None else "", + ), + ) + + # Unfortunately, other tools were using the old compare_namelists.pl script + # to compare files that are not namelist files. We need a special error + # to signify this event + if namelist_re.match(line) is None: + expect( + rv != {}, + "File '{}' does not appear to be a namelist file, skipping".format( + filename + ), + ) + expect( + False, + "In file '{}', Line '{}' did not begin a namelist as expected".format( + filename, line + ), + ) + + current_namelist = namelist_re.match(line).groups()[0] + expect( + current_namelist not in rv, + "In file '{}', Duplicate namelist '{}'".format( + filename, current_namelist + ), + ) + + rv[current_namelist] = {} + + logger.debug(" Starting namelist '{}'".format(current_namelist)) + + elif line == "/": + # Ends a namelist + logger.debug(" Ending namelist '{}'".format(current_namelist)) + + expect( + multiline_variable is None, + "In file '{}', Incomplete multiline variable: '{}'".format( + filename, + multiline_variable[0] if multiline_variable is not None else "", + ), + ) + + current_namelist = None + + elif name_re.match(line): + # Defining a variable (AKA name) + name, value_str = name_re.match(line).groups() + + logger.debug( + " Parsing variable '{}' with data '{}'".format(name, value_str) + ) + + expect( + multiline_variable is None, + "In file '{}', Incomplete multiline variable: '{}'".format( + filename, + multiline_variable[0] if multiline_variable is not None else "", + ), + ) + expect( + name not in rv[current_namelist], + "In file '{}', Duplicate name: '{}'".format(filename, name), + ) + + real_value = _interpret_value(value_str, filename) + + rv[current_namelist][name] = real_value + logger.debug(" Adding value: {}".format(real_value)) + + if line.endswith(","): + # Value will continue on in subsequent lines + multiline_variable = (name, real_value) + + logger.debug(" Var is multiline...") + + elif multiline_variable is not None: + # Continuation of list or dict variable + current_value = multiline_variable[1] + logger.debug( + " Continuing multiline variable '{}' with data '{}'".format( + multiline_variable[0], line + ) + ) + + real_value = _interpret_value(line, filename) + if type(current_value) is list: + expect( + type(real_value) is not dict, + "In file '{}', multiline list variable '{}' had dict entries".format( + filename, multiline_variable[0] + ), + ) + real_value = real_value if type(real_value) is list else [real_value] + current_value.extend(real_value) + + elif type(current_value) is dict: + expect( + type(real_value) is dict, + "In file '{}', multiline dict variable '{}' had non-dict entries".format( + filename, multiline_variable[0] + ), + ) + current_value.update(real_value) + + else: + expect( + False, + "In file '{}', Continuation should have been for list or dict, instead it was: '{}'".format( + filename, type(current_value) + ), + ) + + logger.debug(" Adding value: {}".format(real_value)) + + if not line.endswith(","): + # Completed + multiline_variable = None + + logger.debug(" Terminating multiline variable") + + else: + expect( + False, "In file '{}', Unrecognized line: '{}'".format(filename, line) + ) + + return rv + + +############################################################################### +def _normalize_string_value(name, value, case): + ############################################################################### + """ + Some of the string in namelists will contain data that's inherently prone + to diffs, like file paths, etc. This function attempts to normalize that + data so that it will not cause diffs. + """ + # Any occurance of case must be normalized because test-ids might not match + if case is not None: + case_re = re.compile(r"{}[.]([GC]+)[.]([^./\s]+)".format(case)) + value = case_re.sub("{}.ACTION.TESTID".format(case), value) + + if name in ["runid", "model_version", "username", "logfile"]: + # Don't even attempt to diff these, we don't care + return name.upper() + elif ":" in value: + items = value.split(":") + items = [_normalize_string_value(name, item, case) for item in items] + return ":".join(items) + elif "/" in value: + # Handle special format scale*path, normalize the path and reconstruct + parsed = re.match(r"^([^*]+\*)(/[^/]+)*", value) + if parsed is not None and len(parsed.groups()) == 2: + items = list(parsed.groups()) + items[1] = os.path.basename(items[1]) + return "".join(items) + + # File path, just return the basename unless its a seq_maps.rc mapping + # mapname or maptype + if "mapname" not in name and "maptype" not in name: + return os.path.basename(value) + else: + return value + else: + return value + + +############################################################################### +def _compare_values(name, gold_value, comp_value, case): + ############################################################################### + """ + Compare values for a specific variable in a namelist. + + Returns comments + + Note there will only be comments if values did not match + """ + comments = "" + if type(gold_value) != type(comp_value): + comments += " variable '{}' did not have expected type '{}', instead is type '{}'\n".format( + name, type(gold_value), type(comp_value) + ) + return comments + + if type(gold_value) is list: + # Note, list values remain order sensitive + for idx, gold_value_list_item in enumerate(gold_value): + if idx < len(comp_value): + comments += _compare_values( + "{} list item {:d}".format(name, idx), + gold_value_list_item, + comp_value[idx], + case, + ) + else: + comments += " list variable '{}' missing value {}\n".format( + name, gold_value_list_item + ) + + if len(comp_value) > len(gold_value): + for comp_value_list_item in comp_value[len(gold_value) :]: + comments += " list variable '{}' has extra value {}\n".format( + name, comp_value_list_item + ) + + elif type(gold_value) is dict: + for key, gold_value_dict_item in gold_value.items(): + if key in comp_value: + comments += _compare_values( + "{} dict item {}".format(name, key), + gold_value_dict_item, + comp_value[key], + case, + ) + else: + comments += ( + " dict variable '{}' missing key {} with value {}\n".format( + name, key, gold_value_dict_item + ) + ) + + for key in comp_value: + if key not in gold_value: + comments += ( + " dict variable '{}' has extra key {} with value {}\n".format( + name, key, comp_value[key] + ) + ) + + else: + expect( + isinstance(gold_value, str), + "Unexpected type found: '{}'".format(type(gold_value)), + ) + norm_gold_value = _normalize_string_value(name, gold_value, case) + norm_comp_value = _normalize_string_value(name, comp_value, case) + + if norm_gold_value != norm_comp_value: + comments += " BASE: {} = {}\n".format(name, norm_gold_value) + comments += " COMP: {} = {}\n".format(name, norm_comp_value) + + return comments + + +############################################################################### +def _compare_namelists(gold_namelists, comp_namelists, case): + ############################################################################### + """ + Compare two namelists. Print diff information if any. + Returns comments + Note there will only be comments if the namelists were not an exact match + + Expect args in form: {namelist -> {key -> value} }. + value can be an int, string, list, or dict + + >>> teststr = '''&nml + ... val = 'foo' + ... aval = 'one','two', 'three' + ... maval = 'one', 'two', 'three', 'four' + ... dval = 'one -> two', 'three -> four' + ... mdval = 'one -> two', 'three -> four', 'five -> six' + ... nval = 1850 + ... / + ... &nml2 + ... val2 = .false. + ... / + ... ''' + >>> _compare_namelists(_parse_namelists(teststr.splitlines(), 'foo'), _parse_namelists(teststr.splitlines(), 'bar'), None) + '' + >>> teststr1 = '''&nml1 + ... val11 = 'foo' + ... / + ... &nml2 + ... val21 = 'foo' + ... val22 = 'foo', 'bar', 'baz' + ... val23 = 'baz' + ... val24 = '1 -> 2', '2 -> 3', '3 -> 4' + ... / + ... &nml3 + ... val3 = .false. + ... /''' + >>> teststr2 = '''&nml01 + ... val11 = 'foo' + ... / + ... &nml2 + ... val21 = 'foo0' + ... val22 = 'foo', 'bar0', 'baz' + ... val230 = 'baz' + ... val24 = '1 -> 20', '2 -> 3', '30 -> 4' + ... / + ... &nml3 + ... val3 = .false. + ... /''' + >>> comments = _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), None) + >>> print(comments) + Missing namelist: nml1 + Differences in namelist 'nml2': + BASE: val21 = 'foo' + COMP: val21 = 'foo0' + BASE: val22 list item 1 = 'bar' + COMP: val22 list item 1 = 'bar0' + missing variable: 'val23' + BASE: val24 dict item 1 = 2 + COMP: val24 dict item 1 = 20 + dict variable 'val24' missing key 3 with value 4 + dict variable 'val24' has extra key 30 with value 4 + found extra variable: 'val230' + Found extra namelist: nml01 + + + >>> teststr1 = '''&rad_cnst_nl + ... icecldoptics = 'mitchell' + ... logfile = 'cpl.log.150514-001533' + ... case_name = 'ERB.f19_g16.B1850C5.sandiatoss3_intel.C.150513-230221' + ... runid = 'FOO' + ... model_version = 'cam5_3_36' + ... username = 'jgfouca' + ... iceopticsfile = '/projects/ccsm/inputdata/atm/cam/physprops/iceoptics_c080917.nc' + ... liqcldoptics = 'gammadist' + ... liqopticsfile = '/projects/ccsm/inputdata/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc' + ... mode_defs = 'mam3_mode1:accum:=', 'A:num_a1:N:num_c1:num_mr:+', + ... 'A:so4_a1:N:so4_c1:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:pom_a1:N:pom_c1:p-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocpho_rrtmg_c101112.nc:+', + ... 'A:soa_a1:N:soa_c1:s-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', 'A:bc_a1:N:bc_c1:black-c:/projects/ccsm/inputdata/atm/cam/physprops/bcpho_rrtmg_c100508.nc:+', + ... 'A:dst_a1:N:dst_c1:dust:/projects/ccsm/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', 'A:ncl_a1:N:ncl_c1:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', + ... 'mam3_mode2:aitken:=', 'A:num_a2:N:num_c2:num_mr:+', + ... 'A:so4_a2:N:so4_c2:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:soa_a2:N:soa_c2:s-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', + ... 'A:ncl_a2:N:ncl_c2:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', 'mam3_mode3:coarse:=', + ... 'A:num_a3:N:num_c3:num_mr:+', 'A:dst_a3:N:dst_c3:dust:/projects/ccsm/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', + ... 'A:ncl_a3:N:ncl_c3:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc:+', 'A:so4_a3:N:so4_c3:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc' + ... rad_climate = 'A:Q:H2O', 'N:O2:O2', 'N:CO2:CO2', + ... 'N:ozone:O3', 'N:N2O:N2O', 'N:CH4:CH4', + ... 'N:CFC11:CFC11', 'N:CFC12:CFC12', 'M:mam3_mode1:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode1_rrtmg_c110318.nc', + ... 'M:mam3_mode2:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode2_rrtmg_c110318.nc', 'M:mam3_mode3:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode3_rrtmg_c110318.nc' + ... /''' + >>> teststr2 = '''&rad_cnst_nl + ... icecldoptics = 'mitchell' + ... logfile = 'cpl.log.150514-2398745' + ... case_name = 'ERB.f19_g16.B1850C5.sandiatoss3_intel.C.150513-1274213' + ... runid = 'BAR' + ... model_version = 'cam5_3_36' + ... username = 'hudson' + ... iceopticsfile = '/something/else/inputdata/atm/cam/physprops/iceoptics_c080917.nc' + ... liqcldoptics = 'gammadist' + ... liqopticsfile = '/something/else/inputdata/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc' + ... mode_defs = 'mam3_mode1:accum:=', 'A:num_a1:N:num_c1:num_mr:+', + ... 'A:so4_a1:N:so4_c1:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:pom_a1:N:pom_c1:p-organic:/something/else/inputdata/atm/cam/physprops/ocpho_rrtmg_c101112.nc:+', + ... 'A:soa_a1:N:soa_c1:s-organic:/something/else/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', 'A:bc_a1:N:bc_c1:black-c:/something/else/inputdata/atm/cam/physprops/bcpho_rrtmg_c100508.nc:+', + ... 'A:dst_a1:N:dst_c1:dust:/something/else/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', 'A:ncl_a1:N:ncl_c1:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', + ... 'mam3_mode2:aitken:=', 'A:num_a2:N:num_c2:num_mr:+', + ... 'A:so4_a2:N:so4_c2:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:soa_a2:N:soa_c2:s-organic:/something/else/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', + ... 'A:ncl_a2:N:ncl_c2:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', 'mam3_mode3:coarse:=', + ... 'A:num_a3:N:num_c3:num_mr:+', 'A:dst_a3:N:dst_c3:dust:/something/else/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', + ... 'A:ncl_a3:N:ncl_c3:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc:+', 'A:so4_a3:N:so4_c3:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc' + ... rad_climate = 'A:Q:H2O', 'N:O2:O2', 'N:CO2:CO2', + ... 'N:ozone:O3', 'N:N2O:N2O', 'N:CH4:CH4', + ... 'N:CFC11:CFC11', 'N:CFC12:CFC12', 'M:mam3_mode1:/something/else/inputdata/atm/cam/physprops/mam3_mode1_rrtmg_c110318.nc', + ... 'M:mam3_mode2:/something/else/inputdata/atm/cam/physprops/mam3_mode2_rrtmg_c110318.nc', 'M:mam3_mode3:/something/else/inputdata/atm/cam/physprops/mam3_mode3_rrtmg_c110318.nc' + ... /''' + >>> _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), 'ERB.f19_g16.B1850C5.sandiatoss3_intel') + '' + >>> teststr1 = '''&nml + ... csw_specifier = 'DMS -> 1.0 * value.nc' + ... /''' + >>> _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'),\ + _parse_namelists(teststr1.splitlines(), 'foo'), "case") + '' + >>> teststr2 = '''&nml + ... csw_specifier = 'DMS -> 2.0 * value.nc' + ... /''' + >>> comments = _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'),\ + _parse_namelists(teststr2.splitlines(), 'foo'), "case") + >>> print(comments) + BASE: csw_specifier dict item DMS = 1.0*value.nc + COMP: csw_specifier dict item DMS = 2.0*value.nc + + >>> teststr2 = '''&nml + ... csw_specifier = 'DMS -> 1.0 * other.nc' + ... /''' + >>> comments = _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'),\ + _parse_namelists(teststr2.splitlines(), 'foo'), "case") + >>> print(comments) + BASE: csw_specifier dict item DMS = 1.0*value.nc + COMP: csw_specifier dict item DMS = 1.0*other.nc + + """ + different_namelists = {} + for namelist, gold_names in gold_namelists.items(): + if namelist not in comp_namelists: + different_namelists[namelist] = ["Missing namelist: {}\n".format(namelist)] + else: + comp_names = comp_namelists[namelist] + for name, gold_value in gold_names.items(): + if name not in comp_names: + different_namelists.setdefault(namelist, []).append( + " missing variable: '{}'\n".format(name) + ) + else: + comp_value = comp_names[name] + comments = _compare_values(name, gold_value, comp_value, case) + if comments != "": + different_namelists.setdefault(namelist, []).append(comments) + + for name in comp_names: + if name not in gold_names: + different_namelists.setdefault(namelist, []).append( + " found extra variable: '{}'\n".format(name) + ) + + for namelist in comp_namelists: + if namelist not in gold_namelists: + different_namelists[namelist] = [ + "Found extra namelist: {}\n".format(namelist) + ] + + comments = "" + for namelist, nlcomment in different_namelists.items(): + if len(nlcomment) == 1: + comments += nlcomment[0] + else: + comments += "Differences in namelist '{}':\n".format(namelist) + comments += "".join(nlcomment) + + return comments + + +############################################################################### +def compare_namelist_files(gold_file, compare_file, case=None): + ############################################################################### + """ + Returns (is_match, comments) + """ + expect(os.path.exists(gold_file), "File not found: {}".format(gold_file)) + expect(os.path.exists(compare_file), "File not found: {}".format(compare_file)) + + gold_namelists = _parse_namelists(open(gold_file, "r").readlines(), gold_file) + comp_namelists = _parse_namelists(open(compare_file, "r").readlines(), compare_file) + comments = _compare_namelists(gold_namelists, comp_namelists, case) + return comments == "", comments + + +############################################################################### +def is_namelist_file(file_path): + ############################################################################### + try: + compare_namelist_files(file_path, file_path) + except CIMEError as e: + assert "does not appear to be a namelist file" in str(e), str(e) + return False + return True diff --git a/CIME/compare_test_results.py b/CIME/compare_test_results.py new file mode 100644 index 00000000000..a46dbe50fe7 --- /dev/null +++ b/CIME/compare_test_results.py @@ -0,0 +1,224 @@ +import CIME.compare_namelists, CIME.simple_compare +from CIME.status import append_status +from CIME.utils import EnvironmentContext, parse_test_name +from CIME.test_status import * +from CIME.hist_utils import compare_baseline, get_ts_synopsis +from CIME.case import Case +from CIME.test_utils import get_test_status_files + +import os, logging + +############################################################################### +def append_status_cprnc_log(msg, logfile_name, test_dir): + ############################################################################### + try: + append_status(msg, logfile_name, caseroot=test_dir) + except IOError: + pass + + +############################################################################### +def compare_namelists(case, baseline_name, baseline_root, logfile_name): + ############################################################################### + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + success = case.case_cmpgen_namelists( + compare=True, + compare_name=baseline_name, + baseline_root=baseline_root, + logfile_name=logfile_name, + ) + logging.getLogger().setLevel(log_lvl) + return success + + +############################################################################### +def compare_history(case, baseline_name, baseline_root, log_id): + ############################################################################### + real_user = case.get_value("REALUSER") + with EnvironmentContext(USER=real_user): + baseline_full_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) + + outfile_suffix = "{}.{}".format(baseline_name, log_id) + try: + result, comments = compare_baseline( + case, baseline_dir=baseline_full_dir, outfile_suffix=outfile_suffix + ) + except IOError: + result, comments = compare_baseline( + case, baseline_dir=baseline_full_dir, outfile_suffix=None + ) + + return result, comments + + +############################################################################### +def compare_test_results( + baseline_name, + baseline_root, + test_root, + compiler, + test_id=None, + compare_tests=None, + namelists_only=False, + hist_only=False, +): + ############################################################################### + """ + Compares with baselines for all matching tests + + Outputs results for each test to stdout (one line per test); possible status + codes are: PASS, FAIL, SKIP. (A SKIP denotes a test that did not make it to + the run phase or a test for which the run phase did not pass: we skip + baseline comparisons in this case.) + + In addition, creates files named compare.log.BASELINE_NAME.TIMESTAMP in each + test directory, which contain more detailed output. Also creates + *.cprnc.out.BASELINE_NAME.TIMESTAMP files in each run directory. + + Returns True if all tests generated either PASS or SKIP results, False if + there was at least one FAIL result. + """ + test_status_files = get_test_status_files(test_root, compiler, test_id=test_id) + + # ID to use in the log file names, to avoid file name collisions with + # earlier files that may exist. + log_id = CIME.utils.get_timestamp() + + all_pass_or_skip = True + + compare_tests_counts = None + if compare_tests: + compare_tests_counts = dict( + [(compare_test, 0) for compare_test in compare_tests] + ) + + for test_status_file in test_status_files: + test_dir = os.path.dirname(test_status_file) + ts = TestStatus(test_dir=test_dir) + test_name = ts.get_name() + testopts = parse_test_name(test_name)[1] + testopts = [] if testopts is None else testopts + build_only = "B" in testopts + + if not compare_tests or CIME.utils.match_any(test_name, compare_tests_counts): + + if not hist_only: + nl_compare_result = None + nl_compare_comment = "" + nl_result = ts.get_status(SETUP_PHASE) + if nl_result is None: + nl_compare_result = "SKIP" + nl_compare_comment = "Test did not make it to setup phase" + nl_do_compare = False + else: + nl_do_compare = True + else: + nl_do_compare = False + + detailed_comments = "" + if not namelists_only and not build_only: + compare_result = None + compare_comment = "" + run_result = ts.get_status(RUN_PHASE) + if run_result is None: + compare_result = "SKIP" + compare_comment = "Test did not make it to run phase" + do_compare = False + elif run_result != TEST_PASS_STATUS: + compare_result = "SKIP" + compare_comment = "Run phase did not pass" + do_compare = False + else: + do_compare = True + else: + do_compare = False + + with Case(test_dir) as case: + if baseline_name is None: + baseline_name = case.get_value("BASELINE_NAME_CMP") + if not baseline_name: + baseline_name = CIME.utils.get_current_branch( + repo=CIME.utils.get_cime_root() + ) + + if baseline_root is None: + baseline_root = case.get_value("BASELINE_ROOT") + + logfile_name = "compare.log.{}.{}".format( + baseline_name.replace("/", "_"), log_id + ) + + append_status_cprnc_log( + "Comparing against baseline with compare_test_results:\n" + "Baseline: {}\n In baseline_root: {}".format( + baseline_name, baseline_root + ), + logfile_name, + test_dir, + ) + + if nl_do_compare or do_compare: + if nl_do_compare: + nl_success = compare_namelists( + case, baseline_name, baseline_root, logfile_name + ) + if nl_success: + nl_compare_result = TEST_PASS_STATUS + nl_compare_comment = "" + else: + nl_compare_result = TEST_FAIL_STATUS + nl_compare_comment = "See {}/{}".format(test_dir, logfile_name) + all_pass_or_skip = False + + if do_compare: + success, detailed_comments = compare_history( + case, baseline_name, baseline_root, log_id + ) + if success: + compare_result = TEST_PASS_STATUS + else: + compare_result = TEST_FAIL_STATUS + all_pass_or_skip = False + + compare_comment = get_ts_synopsis(detailed_comments) + + brief_result = "" + if not hist_only: + brief_result += "{} {} {} {}\n".format( + nl_compare_result, test_name, NAMELIST_PHASE, nl_compare_comment + ) + + if not namelists_only: + brief_result += "{} {} {}".format( + compare_result, test_name, BASELINE_PHASE + ) + if compare_comment: + brief_result += " {}".format(compare_comment) + brief_result += "\n" + + print(brief_result) + + append_status_cprnc_log(brief_result, logfile_name, test_dir) + + if detailed_comments: + append_status_cprnc_log( + "Detailed comments:\n" + detailed_comments, logfile_name, test_dir + ) + + # Emit a warning if items in compare_tests did not match anything + if compare_tests: + for compare_test, compare_count in compare_tests_counts.items(): + if compare_count == 0: + logging.warning( + """ +compare test arg '{}' did not match any tests in test_root {} with +compiler {} and test_id {}. It's possible that one of these arguments +had a mistake (likely compiler or testid).""".format( + compare_test, test_root, compiler, test_id + ) + ) + + return all_pass_or_skip diff --git a/CIME/config.py b/CIME/config.py new file mode 100644 index 00000000000..f63a4bea78d --- /dev/null +++ b/CIME/config.py @@ -0,0 +1,387 @@ +import os +import re +import sys +import logging +import importlib.machinery +import importlib.util +import inspect +from pathlib import Path + +from CIME import utils + +logger = logging.getLogger(__name__) + +DEFAULT_CUSTOMIZE_PATH = os.path.join(utils.get_src_root(), "cime_config", "customize") + + +def print_rst_header(header, anchor=None, separator='"'): + n = len(header) + if anchor is not None: + print(f".. _{anchor}\n") + print(separator * n) + print(header) + print(separator * n) + + +def print_rst_table(headers, *rows): + column_widths = [] + + columns = [[rows[y][x] for y in range(len(rows))] for x in range(len(rows[0]))] + + for header, column in zip(headers, columns): + column_widths.append( + max( + [ + len(x) + for x in [ + header, + ] + + column + ] + ) + ) + + divider = " ".join([f"{'=' * x}" for x in column_widths]) + + print(divider) + print(" ".join(f"{y}{' ' * (x - len(y))}" for x, y in zip(column_widths, headers))) + print(divider) + + for row in rows: + print(" ".join([f"{y}{' ' * (x-len(y))}" for x, y in zip(column_widths, row)])) + + print(divider) + + +class ConfigBase: + def __new__(cls): + if not hasattr(cls, "_instance"): + cls._instance = super(ConfigBase, cls).__new__(cls) + + return cls._instance + + def __init__(self): + self._attribute_config = {} + + @property + def loaded(self): + return getattr(self, "_loaded", False) + + @classmethod + def instance(cls): + """Access singleton. + + Explicit way to access singleton, same as calling constructor. + """ + return cls() + + @classmethod + def load(cls, customize_path): + obj = cls() + + logger.debug("Searching %r for files to load", customize_path) + + customize_path = Path(customize_path) + + if customize_path.is_file(): + customize_files = [f"{customize_path}"] + else: + ignore_pattern = re.compile(f"{customize_path}/(?:tests|conftest|test_)") + + # filter out any tests + customize_files = [ + f"{x}" + for x in customize_path.glob("**/*.py") + if ignore_pattern.search(f"{x}") is None + ] + + customize_module_spec = importlib.machinery.ModuleSpec("cime_customize", None) + + customize_module = importlib.util.module_from_spec(customize_module_spec) + + sys.modules["CIME.customize"] = customize_module + + for x in sorted(customize_files): + obj._load_file(x, customize_module) + + setattr(obj, "_loaded", True) + + return obj + + def _load_file(self, file_path, customize_module): + logger.debug("Loading file %r", file_path) + + raw_config = utils.import_from_file("raw_config", file_path) + + # filter user define variables and functions + user_defined = [x for x in dir(raw_config) if not x.endswith("__")] + + # set values on this object, will overwrite existing + for x in user_defined: + try: + value = getattr(raw_config, x) + except AttributeError: + # should never hit this + logger.fatal("Attribute %r missing on obejct", x) + + sys.exit(1) + else: + setattr(customize_module, x, value) + + self._set_attribute(x, value) + + def _set_attribute(self, name, value, desc=None): + if hasattr(self, name): + logger.debug("Overwriting %r attribute", name) + + logger.debug("Setting attribute %r with value %r", name, value) + + setattr(self, name, value) + + self._attribute_config[name] = { + "desc": desc, + "default": value, + } + + def print_rst_table(self): + self.print_variable_rst() + + print("") + + self.print_method_rst() + + def print_variable_rst(self): + print_rst_header("Variables", anchor=f"{self.__class__.__name__} Variables:") + + headers = ("Variable", "Default", "Type", "Description") + + rows = ( + (x, str(y["default"]), type(y["default"]).__name__, y["desc"]) + for x, y in self._attribute_config.items() + ) + + print_rst_table(headers, *rows) + + def print_method_rst(self): + print_rst_header("Methods", anchor=f"{self.__class__.__name__} Methods:") + + methods = inspect.getmembers(self, inspect.ismethod) + + ignore = ( + "__init__", + "loaded", + "load", + "instance", + "_load_file", + "_set_attribute", + "print_rst_table", + "print_method_rst", + "print_variable_rst", + ) + + child_methods = [ + (x[0], inspect.signature(x[1]), inspect.getdoc(x[1])) + for x in methods + if x[1].__class__ != Config and x[0] not in ignore + ] + + for (name, sig, doc) in child_methods: + if doc is None: + continue + print(".. code-block::\n") + print(f" def {name}{sig!s}:") + print(' """') + for line in doc.split("\n"): + print(f" {line}") + print(' """') + + +class Config(ConfigBase): + @classmethod + def load_defaults(cls): + return cls.load(DEFAULT_CUSTOMIZE_PATH) + + def __init__(self): + super().__init__() + + if self.loaded: + return + + self._set_attribute( + "additional_archive_components", + ("drv", "dart"), + desc="Additional components to archive.", + ) + self._set_attribute( + "verbose_run_phase", + False, + desc="If set to `True` then after a SystemTests successful run phase the elapsed time is recorded to BASELINE_ROOT, on a failure the test is checked against the previous run and potential breaking merges are listed in the testlog.", + ) + self._set_attribute( + "baseline_store_teststatus", + True, + desc="If set to `True` and GENERATE_BASELINE is set then a teststatus.log is created in the case's baseline.", + ) + self._set_attribute( + "common_sharedlibroot", + True, + desc="If set to `True` then SHAREDLIBROOT is set for the case and SystemTests will only build the shared libs once.", + ) + self._set_attribute( + "create_test_flag_mode", + "cesm", + desc="Sets the flag mode for the `create_test` script. When set to `cesm`, the `-c` flag will compare baselines against a give directory.", + ) + self._set_attribute( + "use_kokkos", + False, + desc="If set to `True` and CAM_TARGET is `preqx_kokkos`, `theta-l` or `theta-l_kokkos` then kokkos is built with the shared libs.", + ) + self._set_attribute( + "shared_clm_component", + True, + desc="If set to `True` and then the `clm` land component is built as a shared lib.", + ) + self._set_attribute( + "ufs_alternative_config", + False, + desc="If set to `True` and UFS_DRIVER is set to `nems` then model config dir is set to `$CIMEROOT/../src/model/NEMS/cime/cime_config`.", + ) + self._set_attribute( + "enable_smp", + True, + desc="If set to `True` then `SMP=` is added to model compile command.", + ) + self._set_attribute( + "build_model_use_cmake", + False, + desc="If set to `True` the model is built using using CMake otherwise Make is used.", + ) + self._set_attribute( + "build_cime_component_lib", + True, + desc="If set to `True` then `Filepath`, `CIME_cppdefs` and `CCSM_cppdefs` directories are copied from CASEBUILD directory to BUILDROOT in order to build CIME's internal components.", + ) + self._set_attribute( + "default_short_term_archiving", + True, + desc="If set to `True` and the case is not a test then DOUT_S is set to True and TIMER_LEVEL is set to 4.", + ) + # TODO combine copy_e3sm_tools and copy_cesm_tools into a single variable + self._set_attribute( + "copy_e3sm_tools", + False, + desc="If set to `True` then E3SM specific tools are copied into the case directory.", + ) + self._set_attribute( + "copy_cesm_tools", + True, + desc="If set to `True` then CESM specific tools are copied into the case directory.", + ) + self._set_attribute( + "copy_cism_source_mods", + True, + desc="If set to `True` then `$CASEROOT/SourceMods/src.cism/source_cism` is created and a README is written to directory.", + ) + self._set_attribute( + "make_case_run_batch_script", + False, + desc="If set to `True` and case is not a test then `case.run.sh` is created in case directory from `$MACHDIR/template.case.run.sh`.", + ) + self._set_attribute( + "case_setup_generate_namelist", + False, + desc="If set to `True` and case is a test then namelists are created during `case.setup`.", + ) + self._set_attribute( + "create_bless_log", + False, + desc="If set to `True` and comparing test to baselines the most recent bless is added to comments.", + ) + self._set_attribute( + "allow_unsupported", + True, + desc="If set to `True` then unsupported compsets and resolutions are allowed.", + ) + # set for ufs + self._set_attribute( + "check_machine_name_from_test_name", + True, + desc="If set to `True` then the TestScheduler will use testlists to parse for a list of tests.", + ) + self._set_attribute( + "sort_tests", + False, + desc="If set to `True` then the TestScheduler will sort tests by runtime.", + ) + self._set_attribute( + "calculate_mode_build_cost", + False, + desc="If set to `True` then the TestScheduler will set the number of processors for building the model to min(16, (($GMAKE_J * 2) / 3) + 1) otherwise it's set to 4.", + ) + self._set_attribute( + "share_exes", + False, + desc="If set to `True` then the TestScheduler will share exes between tests.", + ) + + self._set_attribute( + "serialize_sharedlib_builds", + True, + desc="If set to `True` then the TestScheduler will use `proc_pool + 1` processors to build shared libraries otherwise a single processor is used.", + ) + + self._set_attribute( + "use_testreporter_template", + True, + desc="If set to `True` then the TestScheduler will create `testreporter` in $CIME_OUTPUT_ROOT.", + ) + + self._set_attribute( + "check_invalid_args", + True, + desc="If set to `True` then script arguments are checked for being valid.", + ) + self._set_attribute( + "test_mode", + "cesm", + desc="Sets the testing mode, this changes various configuration for CIME's unit and system tests.", + ) + self._set_attribute( + "xml_component_key", + "COMP_ROOT_DIR_{}", + desc="The string template used as the key to query the XML system to find a components root directory e.g. the template `COMP_ROOT_DIR_{}` and component `LND` becomes `COMP_ROOT_DIR_LND`.", + ) + self._set_attribute( + "set_comp_root_dir_cpl", + True, + desc="If set to `True` then COMP_ROOT_DIR_CPL is set for the case.", + ) + self._set_attribute( + "use_nems_comp_root_dir", + False, + desc="If set to `True` then COMP_ROOT_DIR_CPL is set using UFS_DRIVER if defined.", + ) + self._set_attribute( + "test_custom_project_machine", + "melvin", + desc="Sets the machine name to use when testing a machine with no PROJECT.", + ) + self._set_attribute( + "driver_default", "nuopc", desc="Sets the default driver for the model." + ) + self._set_attribute( + "driver_choices", + ("nuopc",), + desc="Sets the available driver choices for the model.", + ) + self._set_attribute( + "mct_path", + "{srcroot}/libraries/mct", + desc="Sets the path to the mct library.", + ) + self._set_attribute( + "mpi_serial_path", + "{srcroot}/libraries/mpi-serial", + desc="Sets the path to the mpi-serial library.", + ) diff --git a/CIME/cs_status.py b/CIME/cs_status.py new file mode 100644 index 00000000000..6a65ca4da71 --- /dev/null +++ b/CIME/cs_status.py @@ -0,0 +1,136 @@ +""" +Implementation of the cs.status script, which prints the status of all +of the tests in one or more test suites +""" + +from __future__ import print_function +from CIME.XML.standard_module_setup import * +from CIME.XML.expected_fails_file import ExpectedFailsFile +from CIME.test_status import TestStatus, SHAREDLIB_BUILD_PHASE, TEST_PEND_STATUS +import os +import sys +from collections import defaultdict + + +def cs_status( + test_paths, + summary=False, + fails_only=False, + count_fails_phase_list=None, + check_throughput=False, + check_memory=False, + expected_fails_filepath=None, + force_rebuild=False, + out=sys.stdout, +): + """Print the test statuses of all tests in test_paths. The default + is to print to stdout, but this can be overridden with the 'out' + argument. + + If summary is True, then only the overall status of each test is printed + + If fails_only is True, then only test failures are printed (this + includes PENDs as well as FAILs). + + If count_fails_phase_list is provided, it should be a list of phases + (from the phases given by test_status.ALL_PHASES). For each phase in + this list: do not give line-by-line output; instead, just report the + total number of tests that have not PASSed this phase (this includes + PENDs and FAILs). (This is typically used with the fails_only + option, but it can also be used without that option.) + + If expected_fails_filepath is provided, it should be a string giving + the full path to a file listing expected failures for this test + suite. Expected failures are then labeled as such in the output. + """ + expect(not (summary and fails_only), "Cannot have both summary and fails_only") + expect( + not (summary and count_fails_phase_list), + "Cannot have both summary and count_fails_phase_list", + ) + if count_fails_phase_list is None: + count_fails_phase_list = [] + non_pass_counts = dict.fromkeys(count_fails_phase_list, 0) + xfails = _get_xfails(expected_fails_filepath) + test_id_output = defaultdict(str) + test_id_counts = defaultdict(int) + for test_path in test_paths: + test_dir = os.path.dirname(test_path) + ts = TestStatus(test_dir=test_dir) + + if force_rebuild: + with ts: + ts.set_status(SHAREDLIB_BUILD_PHASE, TEST_PEND_STATUS) + + test_id = os.path.basename(test_dir).split(".")[-1] + if summary: + output = _overall_output( + ts, " {status} {test_name}\n", check_throughput, check_memory + ) + else: + if fails_only: + output = "" + else: + output = _overall_output( + ts, + " {test_name} (Overall: {status}) details:\n", + check_throughput, + check_memory, + ) + output += ts.phase_statuses_dump( + prefix=" ", + skip_passes=fails_only, + skip_phase_list=count_fails_phase_list, + xfails=xfails.get(ts.get_name()), + ) + if count_fails_phase_list: + ts.increment_non_pass_counts(non_pass_counts) + + test_id_output[test_id] += output + test_id_counts[test_id] += 1 + + for test_id in sorted(test_id_output): + count = test_id_counts[test_id] + print( + "{}: {} test{}".format(test_id, count, "s" if count > 1 else ""), file=out + ) + print(test_id_output[test_id], file=out) + print(" ", file=out) + + if count_fails_phase_list: + print(72 * "=", file=out) + print("Non-PASS results for select phases:", file=out) + for phase in count_fails_phase_list: + print("{} non-passes: {}".format(phase, non_pass_counts[phase]), file=out) + + +def _get_xfails(expected_fails_filepath): + """Returns a dictionary of ExpectedFails objects, where the keys are test names + + expected_fails_filepath should be either a string giving the path to + the file containing expected failures, or None. If None, then this + returns an empty dictionary (as if expected_fails_filepath were + pointing to a file with no expected failures listed). + """ + if expected_fails_filepath is not None: + expected_fails_file = ExpectedFailsFile(expected_fails_filepath) + xfails = expected_fails_file.get_expected_fails() + else: + xfails = {} + return xfails + + +def _overall_output(ts, format_str, check_throughput, check_memory): + """Returns a string giving the overall test status + + Args: + ts: TestStatus object + format_str (string): string giving the format of the output; must + contain place-holders for status and test_name + """ + test_name = ts.get_name() + status = ts.get_overall_test_status( + check_throughput=check_throughput, + check_memory=check_memory, + )[0] + return format_str.format(status=status, test_name=test_name) diff --git a/CIME/cs_status_creator.py b/CIME/cs_status_creator.py new file mode 100644 index 00000000000..787ff5d32a3 --- /dev/null +++ b/CIME/cs_status_creator.py @@ -0,0 +1,48 @@ +""" +Creates a test suite-specific cs.status file from a template +""" + +from CIME.XML.standard_module_setup import * +import CIME.utils +import os +import stat + + +def create_cs_status(test_root, test_id, extra_args="", filename=None): + """Create a test suite-specific cs.status file from the template + + Arguments: + test_root (string): path to test root; the file will be put here. If + this directory doesn't exist, it is created. + test_id (string): test id for this test suite. This can contain + shell wildcards if you want this one cs.status file to work + across multiple test suites. However, be careful not to make + this too general: for example, ending this with '*' will pick up + the *.ref1 directories for ERI and other tests, which is NOT + what you want. + extra_args (string): extra arguments to the cs.status command + (If there are multiple arguments, these should be in a space-delimited string.) + filename (string): name of the generated cs.status file. If not + given, this will be built from the test_id. + """ + cime_root = CIME.utils.get_cime_root() + tools_path = os.path.join(cime_root, "CIME", "Tools") + template_path = CIME.utils.get_template_path() + template_file = os.path.join(template_path, "cs.status.template") + template = open(template_file, "r").read() + template = ( + template.replace("", tools_path) + .replace("", extra_args) + .replace("", test_id) + .replace("", test_root) + ) + if not os.path.exists(test_root): + os.makedirs(test_root) + if filename is None: + filename = "cs.status.{}".format(test_id) + cs_status_file = os.path.join(test_root, filename) + with open(cs_status_file, "w") as fd: + fd.write(template) + os.chmod( + cs_status_file, os.stat(cs_status_file).st_mode | stat.S_IXUSR | stat.S_IXGRP + ) diff --git a/scripts/lib/CIME/tests/XML/__init__.py b/CIME/data/__init__.py similarity index 100% rename from scripts/lib/CIME/tests/XML/__init__.py rename to CIME/data/__init__.py diff --git a/scripts/lib/CIME/tests/__init__.py b/CIME/data/config/__init__.py similarity index 100% rename from scripts/lib/CIME/tests/__init__.py rename to CIME/data/config/__init__.py diff --git a/CIME/data/config/cesm/config_files.xml b/CIME/data/config/cesm/config_files.xml new file mode 100644 index 00000000000..6113148a72a --- /dev/null +++ b/CIME/data/config/cesm/config_files.xml @@ -0,0 +1,666 @@ + + + + + + + + char + cesm + case_der + env_case.xml + model system name + + + + + + + + char + $CIMEROOT/CIME/data/config/config_headers.xml + case_der + env_case.xml + contains both header and group information for all the case env_*.xml files + + + + char + $SRCROOT/ccs_config/config_grids.xml + + $SRCROOT/ccs_config/config_grids_nuopc.xml + + case_last + env_case.xml + file containing specification of all supported model grids, domains and mapping files (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_grids_v2.2.xsd + + + + char + $SRCROOT/ccs_config/machines/config_machines.xml + case_last + env_case.xml + file containing machine specifications for target model primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_machines.xsd + $CIMEROOT/CIME/data/config/xml_schemas/config_machines_version3.xsd + + + + char + $SRCROOT/ccs_config/machines/config_batch.xml + case_last + env_case.xml + file containing batch system details for target system (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_batch.xsd + + + + char + $SRCROOT/tools/CUPiD/cime_config/config_tool.xml + case_last + env_case.xml + file containing postprocessing XML configuration (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + + + + char + $SRCROOT/ccs_config/machines/config_workflow.xml + case_last + env_case.xml + file containing workflow (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_workflow.xsd + + + + char + $SRCROOT/ccs_config/config_inputdata.xml + case_last + env_case.xml + file containing inputdata server descriptions (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_inputdata.xsd + + + + char + $SRCROOT/ccs_config/machines/config_compilers.xml + case_last + env_case.xml + file containing compiler specifications for target model primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_compilers_v2.xsd + + + + char + $SRCROOT/ccs_config/machines/cmake_macros + case_last + env_case.xml + Directory containing cmake macros (for documentation only - DO NOT EDIT) + + + + char + $SRCROOT/ccs_config/machines/config_pio.xml + case_last + env_case.xml + file containing specification of pio settings for target model possible machine, compiler, mpilib, compset and/or grid attributes (for documentation only - DO NOT EDIT) + + + + char + + $CIMEROOT/CIME/data/config/config_tests.xml + + $COMP_ROOT_DIR_LND/cime_config/config_tests.xml + $COMP_ROOT_DIR_LND/cime_config/config_tests.xml + $COMP_ROOT_DIR_ATM/cime_config/config_tests.xml + $COMP_ROOT_DIR_GLC/cime_config/config_tests.xml + $COMP_ROOT_DIR_OCN/cime_config/config_tests.xml + + test + env_test.xml + file containing system test descriptions + + + + + + + + + char + + $SRCROOT/components/cdeps/datm + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/satm + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xatm + + $SRCROOT/components/cam/ + $SRCROOT/components/fv3/ + + case_comps + env_case.xml + Root directory of the case atmospheric component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + + $SRCROOT/components/cmeps + $SRCROOT/components/cmeps + $SRCROOT/components/cmeps + + case_comps + env_case.xml + Root directory of the case driver/coupler component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/pop/ + $SRCROOT/components/mom/ + $SRCROOT/components/nemo/ + $SRCROOT/components/blom/ + $SRCROOT/components/cdeps/docn + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/socn + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xocn + + case_comps + env_case.xml + Root directory of the case ocean component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/ww3/ + $SRCROOT/components/cdeps/dwav + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/swav + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xwav + + case_comps + env_case.xml + Root directory of the case wave model component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/cism/ + $SRCROOT/components/cdeps/dglc + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/sglc + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xglc + + case_comps + env_case.xml + Root directory of the case land ice component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/cice5/ + $SRCROOT/components/cice/ + $SRCROOT/components/cdeps/dice + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/sice + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xice + + case_comps + env_case.xml + Root directory of the case sea ice component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/rtm/ + $SRCROOT/components/mosart/ + $SRCROOT/components/mizuRoute/ + $SRCROOT/components/cdeps/drof + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/srof + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xrof + + case_comps + env_case.xml + Root directory of the case river runoff model component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/clm/ + $SRCROOT/components/slim/ + $SRCROOT/components/cdeps/dlnd + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/slnd + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xlnd + + case_comps + env_case.xml + Root directory of the case land model component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/siac + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xiac + + case_comps + env_case.xml + Root directory of the case integrated assessment component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $CIMEROOT/CIME/non_py/src/components/data_comps_$COMP_INTERFACE/desp + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/sesp + + case_comps + env_case.xml + Root directory of the case external system processing (esp) component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/cime_config/config_compsets.xml + $COMP_ROOT_DIR_CPL/cime_config/config_compsets.xml + $COMP_ROOT_DIR_ATM/cime_config/config_compsets.xml + $COMP_ROOT_DIR_ATM/cime_config/config_compsets.xml + $COMP_ROOT_DIR_GLC/cime_config/config_compsets.xml + $COMP_ROOT_DIR_LND/cime_config/config_compsets.xml + $COMP_ROOT_DIR_ICE/cime_config/config_compsets.xml + $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml + $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml + $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml + $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml + $COMP_ROOT_DIR_WAV/cime_config/config_compsets.xml + + case_last + env_case.xml + file containing specification of all compsets for primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/cime_config/config_pes.xml + $COMP_ROOT_DIR_CPL/cime_config/config_pes.xml + $COMP_ROOT_DIR_ATM/cime_config/config_pes.xml + $COMP_ROOT_DIR_ATM/cime_config/config_pes.xml + $COMP_ROOT_DIR_GLC/cime_config/config_pes.xml + $COMP_ROOT_DIR_LND/cime_config/config_pes.xml + $COMP_ROOT_DIR_ICE/cime_config/config_pes.xml + $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml + $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml + $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml + $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml + $COMP_ROOT_DIR_WAV/cime_config/config_pes.xml + + case_last + env_case.xml + file containing specification of all pe-layouts for primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_pes.xsd + + + + char + + $SRCROOT/ccs_config/config_archive.xml + $COMP_ROOT_DIR_CPL/cime_config/config_archive.xml + + $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml + $COMP_ROOT_DIR_ATM/cime_config/config_archive.xml + $COMP_ROOT_DIR_ICE/cime_config/config_archive.xml + $COMP_ROOT_DIR_LND/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_GLC/cime_config/config_archive.xml + $COMP_ROOT_DIR_WAV/cime_config/config_archive.xml + + $COMP_ROOT_DIR_ATM/cime_config/config_archive.xml + $COMP_ROOT_DIR_GLC/cime_config/config_archive.xml + $COMP_ROOT_DIR_LND/cime_config/config_archive.xml + $COMP_ROOT_DIR_ICE/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml + $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml + $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml + $COMP_ROOT_DIR_WAV/cime_config/config_archive.xml + + case_last + env_case.xml + file containing specification of archive files for each component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_archive.xsd + + + + char + + $CIMEROOT/CIME/SystemTests + $COMP_ROOT_DIR_LND/cime_config/SystemTests + $COMP_ROOT_DIR_ATM/cime_config/SystemTests + $COMP_ROOT_DIR_OCN/cime_config/SystemTests + $COMP_ROOT_DIR_OCN/cime_config/SystemTests + $COMP_ROOT_DIR_OCN/cime_config/SystemTests + $COMP_ROOT_DIR_OCN/cime_config/SystemTests + $COMP_ROOT_DIR_ICE/cime_config/SystemTests + $COMP_ROOT_DIR_GLC/cime_config/SystemTests + $COMP_ROOT_DIR_ROF/cime_config/SystemTests + $COMP_ROOT_DIR_ROF/cime_config/SystemTests + $COMP_ROOT_DIR_ROF/cime_config/SystemTests + $COMP_ROOT_DIR_WAV/cime_config/SystemTests + + test + env_test.xml + directories containing cime compatible system test modules + + + + char + unset + + $SRCROOT/cime_config/testlist_allactive.xml + $COMP_ROOT_DIR_CPL/cime_config/testdefs/testlist_drv.xml + $COMP_ROOT_DIR_ATM/cime_config/testdefs/testlist_cam.xml + $COMP_ROOT_DIR_GLC/cime_config/testdefs/testlist_cism.xml + $COMP_ROOT_DIR_LND/cime_config/testdefs/testlist_clm.xml + $COMP_ROOT_DIR_LND/cime_config/testdefs/testlist_slim.xml + $COMP_ROOT_DIR_ICE/cime_config/testdefs/testlist_cice.xml + $COMP_ROOT_DIR_ICE/cime_config/testdefs/testlist_cice.xml + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_pop.xml + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_mom.xml + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_nemo.xml + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_blom.xml + $COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_rtm.xml + $COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_mosart.xml + $COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_mizuRoute.xml + $COMP_ROOT_DIR_WAV/cime_config/testdefs/testlist_ww3.xml + $SRCROOT/components/cdeps/datm/cime_config/testdefs/testlist_datm.xml + $SRCROOT/components/cdeps/dice/cime_config/testdefs/testlist_dice.xml + $SRCROOT/components/cdeps/dlnd/cime_config/testdefs/testlist_dlnd.xml + $SRCROOT/components/cdeps/docn/cime_config/testdefs/testlist_docn.xml + $SRCROOT/components/cdeps/drof/cime_config/testdefs/testlist_drof.xml + $SRCROOT/components/cdeps/dglc/cime_config/testdefs/testlist_dglc.xml + $SRCROOT/components/cdeps/dwav/cime_config/testdefs/testlist_dwav.xml + + case_last + env_case.xml + file containing specification of all system tests for primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/testlist.xsd + + + + char + unset + + $SRCROOT/cime_config/testmods_dirs + $COMP_ROOT_DIR_CPL/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ATM/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_GLC/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_LND/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_LND/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ICE/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ICE/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs + $SRCROOT/components/cdeps/datm/cime_config/testdefs/testmods_dirs + $SRCROOT/components/cdeps/dice/cime_config/testdefs/testmods_dirs + $SRCROOT/components/cdeps/dlnd/cime_config/testdefs/testmods_dirs + $SRCROOT/components/cdeps/docn/cime_config/testdefs/testmods_dirs + $SRCROOT/components/cdeps/drof/cime_config/testdefs/testmods_dirs + $SRCROOT/components/cdeps/dglc/cime_config/testdefs/testmods_dirs + $SRCROOT/components/cdeps/dwav/cime_config/testdefs/testmods_dirs + + case_last + env_case.xml + directory containing test modifications for primary component tests (for documentation only - DO NOT EDIT) + + + + char + unset + + $SRCROOT/cime_config/usermods_dirs + $COMP_ROOT_DIR_CPL/cime_config/usermods_dirs + $COMP_ROOT_DIR_ATM/cime_config/usermods_dirs + $COMP_ROOT_DIR_GLC/cime_config/usermods_dirs + $COMP_ROOT_DIR_LND/cime_config/usermods_dirs + $COMP_ROOT_DIR_LND/cime_config/usermods_dirs + $COMP_ROOT_DIR_ICE/cime_config/usermods_dirs + $COMP_ROOT_DIR_ICE/cime_config/usermods_dirs + $COMP_ROOT_DIR_ROF/cime_config/usermods_dirs + $COMP_ROOT_DIR_ROF/cime_config/usermods_dirs + $COMP_ROOT_DIR_ROF/cime_config/usermods_dirs + $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs + $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs + $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs + $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs + + case_last + env_case.xml + directory containing user modifications for primary components (for documentation only - DO NOT EDIT) + + + + char + unset + + $COMP_ROOT_DIR_CPL/cime_config/namelist_definition_drv.xml + + $COMP_ROOT_DIR_ROF/cime_config/namelist_definition_drof.xml + $COMP_ROOT_DIR_ATM/cime_config/namelist_definition_datm.xml + $COMP_ROOT_DIR_ICE/cime_config/namelist_definition_dice.xml + $COMP_ROOT_DIR_LND/cime_config/namelist_definition_dlnd.xml + $COMP_ROOT_DIR_OCN/cime_config/namelist_definition_docn.xml + $COMP_ROOT_DIR_GLC/cime_config/namelist_definition_dglc.xml + $COMP_ROOT_DIR_WAV/cime_config/namelist_definition_dwav.xml + + + + $COMP_ROOT_DIR_LND/bld/namelist_files/namelist_definition_slim.xml + + case_last + env_case.xml + file containing namelist_definitions for all components + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_namelist.xsd + + + + + + + + char + + $COMP_ROOT_DIR_CPL/cime_config/config_component.xml + + case_last + env_case.xml + file containing all non-component specific case configuration variables (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $SRCROOT/components/cmeps/cime_config/config_component_$MODEL.xml --> + + case_last + env_case.xml + file containing all component specific driver configuration variables (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + unset + + $COMP_ROOT_DIR_ATM/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_LND/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_ROF/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_ICE/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_OCN/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_GLC/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + unset + + $COMP_ROOT_DIR_IAC/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_WAV/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_ESP/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $SRCROOT/libraries/FTorch/buildlib + $SRCROOT/libraries/FMS/buildlib + $SRCROOT/components/cdeps/cime_config/buildlib + $SRCROOT/components/cmeps/cime_config/buildlib + $CIMEROOT/CIME/build_scripts/buildlib.gptl + $CIMEROOT/CIME/build_scripts/buildlib.pio + $CIMEROOT/CIME/build_scripts/buildlib.mct + $SRCROOT/share/buildlib.csm_share + $CIMEROOT/CIME/build_scripts/buildlib.mpi-serial + $CIMEROOT/CIME/build_scripts/buildlib.cprnc + + case_last + env_case.xml + path to buildlib script for the given library + + + diff --git a/config/config_headers.xml b/CIME/data/config/config_headers.xml similarity index 95% rename from config/config_headers.xml rename to CIME/data/config/config_headers.xml index c0d939cf3a4..10f59f2c770 100644 --- a/config/config_headers.xml +++ b/CIME/data/config/config_headers.xml @@ -17,6 +17,13 @@ + +
+ These variables may be changed anytime during a run, they + control jobs that are dependent on case.run. +
+
+
These variables CANNOT BE CHANGED once a case has been created. diff --git a/config/config_tests.xml b/CIME/data/config/config_tests.xml similarity index 90% rename from config/config_tests.xml rename to CIME/data/config/config_tests.xml index 099bc90a91e..5e029ad8641 100644 --- a/config/config_tests.xml +++ b/CIME/data/config/config_tests.xml @@ -41,6 +41,8 @@ SBN smoke build-namelist test (just run preview_namelist and check_input_data REP reproducibility: do two identical runs give the same results? +REUSEINITFILES do we get identical results when reusing init-generated files? + ====================================================================== Restart Tests ====================================================================== @@ -64,7 +66,7 @@ ERP pes counts hybrid (open-MP/MPI) restart bfb test from startup, default 6 do an 11 day initial test - write a restart at day 6 (suffix base) half the number of tasks and threads for each component do a 5 day restart test starting from restart at day 6 (suffix rest) - this is just like an ERS test but the pe-counts/threading count are modified on retart + this is just like an ERS test but the pe-counts/threading count are modified on restart ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) (1) ref1case @@ -74,7 +76,7 @@ ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) (2) ref2case do a hybrid for ${STOP_N}-${STOP_N}/6 running with ref1 restarts from ${STOP_N}/6 and writing restarts at ( ${STOP_N} - ${STOP_N}/6 )/2 +1 - (by default will run for 18 days and write a restart after 10 days) + (by default will run for 18 days and write a restart after 10 days) ref2 case is a clone of the main case short term archiving is on (3) case @@ -203,12 +205,19 @@ TESTRUNDIFF Produces a canned hist file. Env var TESTRUNDIFF_ALTERNATE can be used to cause a DIFF. Used to check that baseline diffs are detected and reported correctly. +TESTRUNDIFF Produces a canned hist file. Env var TESTRUNDIFF_ALTERNATE can +\RESUBMIT be used to cause a DIFF. Used to check that baseline diffs are + detected and reported correctly. Sets Resubmit equal to one. + + TESTTESTDIFF Simulates internal test diff (non baseline). Used to check that internal comparison failures are detected and reported correctly. TESTRUNSLOWPASS After 5 minutes of sleep, pass run step. Used to test timeouts and kills. +TESTRUNUSERXMLCHANGE Test concurrent user modifications via xmlchange while case is running + NODEFAIL Tests restart upon detected node failure. Generates fake failures, the number of which is controlled by NODEFAIL_NUM_FAILS. @@ -252,7 +261,6 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu 11 FALSE TRUE - $STOP_N / 2 + 1 $STOP_OPTION $STOP_OPTION $STOP_N @@ -263,7 +271,6 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu 1 ndays 11 - $STOP_N / 2 + 1 $STOP_OPTION $STOP_N $STOP_OPTION @@ -289,7 +296,6 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu 1 ndays 11 - $STOP_N / 2 - 1 $STOP_OPTION $STOP_N $STOP_OPTION @@ -311,12 +317,12 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu 1 ndays 7 - $STOP_N / 2 + 1 $STOP_OPTION $STOP_N $STOP_OPTION 1 FALSE + TRUE @@ -324,7 +330,6 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu 1 ndays 7 - $STOP_N / 2 + 1 $STOP_OPTION $STOP_N $STOP_OPTION @@ -352,6 +357,15 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu FALSE + + Run homme tests. Only works with the ACME version of the atmosphere component. + 1 + ndays + 11 + FALSE + FALSE + + Run fortran unit tests. Grid and compset (and most case settings) are ignored. 1 @@ -411,6 +425,15 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu FALSE + + For testing infra only. Insta-fail run step, also testing that settings get reset upon rerun. + 1 + ndays + 11 + FALSE + FALSE + + For testing infra only. Insta-fail st archive test with exception. 1 @@ -465,6 +488,17 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu FALSE + + For testing infra only. Produces a canned hist file with a resubmit. + 1 + ndays + TRUE + 11 + FALSE + FALSE + 1 + + For testing infra only. Simulates internal test diff (non baseline) 1 @@ -483,13 +517,25 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu FALSE + + For testing infra only. Test simultaneous xmlchanges while first run is running. + 1 + nsteps + 3 + 1 + FALSE + none + $STOP_OPTION + $STOP_N + FALSE + + For testing infra only. Tests restart upon detected node failure 1 nsteps $ATM_NCPL 11 - $STOP_N / 2 + 1 $STOP_OPTION $STOP_N $STOP_OPTION @@ -551,6 +597,7 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu none FALSE FALSE + none @@ -570,12 +617,12 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu FALSE FALSE nmonths - 2 + 14 $STOP_OPTION $STOP_N $STOP_OPTION $STOP_N - 6 + 0 @@ -620,6 +667,16 @@ NODEFAIL Tests restart upon detected node failure. Generates fake failu $STOP_N + + do we get identical results when reusing init-generated files? + 1 + FALSE + FALSE + never + $STOP_OPTION + $STOP_N + + smoke build-namelist test (just run preview_namelist and check_input_data) 1 diff --git a/CIME/data/config/e3sm/config_files.xml b/CIME/data/config/e3sm/config_files.xml new file mode 100644 index 00000000000..8873b6b0058 --- /dev/null +++ b/CIME/data/config/e3sm/config_files.xml @@ -0,0 +1,15 @@ + + + + + + + + char + $SRCROOT/cime_config/config_files.xml + case_last + env_case.xml + file containing paths + + + diff --git a/CIME/data/config/ufs/config_files.xml b/CIME/data/config/ufs/config_files.xml new file mode 100644 index 00000000000..1f7137821fa --- /dev/null +++ b/CIME/data/config/ufs/config_files.xml @@ -0,0 +1,593 @@ + + + + + + + + char + ufs + case_der + env_case.xml + model system name + + + + + + + + char + $CIMEROOT/CIME/data/config/config_headers.xml + case_der + env_case.xml + contains both header and group information for all the case env_*.xml files + + + + char + $SRCROOT/ccs_config_ufs/config_grids.xml + case_last + env_case.xml + file containing specification of all supported model grids, domains and mapping files (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_grids_v2.1.xsd + + + + char + $SRCROOT/ccs_config_ufs/machines/config_machines.xml + case_last + env_case.xml + file containing machine specifications for target model primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_machines.xsd + + + + char + $SRCROOT/ccs_config_ufs/machines/config_batch.xml + case_last + env_case.xml + file containing batch system details for target system (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_batch.xsd + + + + char + $SRCROOT/ccs_config_ufs/machines/config_workflow.xml + case_last + env_case.xml + file containing workflow (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_workflow.xsd + + + + char + $SRCROOT/ccs_config_ufs/config_inputdata.xml + case_last + env_case.xml + file containing inputdata server descriptions (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_inputdata.xsd + + + + char + $SRCROOT/ccs_config_ufs/machines/config_compilers.xml + case_last + env_case.xml + file containing compiler specifications for target model primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_compilers_v2.xsd + + + + char + $SRCROOT/ccs_config_ufs/machines/config_pio.xml + case_last + env_case.xml + file containing specification of pio settings for target model possible machine, compiler, mpilib, compset and/or grid attributes (for documentation only - DO NOT EDIT) + + + + char + + $CIMEROOT/CIME/data/config/config_tests.xml + + $COMP_ROOT_DIR_LND/cime_config/config_tests.xml + $COMP_ROOT_DIR_ATM/cime_config/config_tests.xml + + test + env_test.xml + file containing system test descriptions + + + + + + + + + char + + $SRCROOT/src/model/CDEPS/datm + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/satm + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xatm + $SRCROOT/components/cam/ + $SRCROOT/src/model/FV3 + + case_comps + env_case.xml + Root directory of the case atmospheric component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + + $SRCROOT/src/model/CMEPS + $SRCROOT/src/model/NEMS/cime + + case_comps + env_case.xml + Root directory of the case driver/coupler component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/pop/ + $SRCROOT/src/model/MOM6/ + $SRCROOT/components/nemo/ + $SRCROOT/src/model/HYCOM/ + $SRCROOT/src/model/CDEPS/docn + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/socn + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xocn + + case_comps + env_case.xml + Root directory of the case ocean component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/ww3/ + $SRCROOT/src/model/CDEPS/dwav + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/swav + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xwav + + case_comps + env_case.xml + Root directory of the case wave model component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/cism/ + $SRCROOT/src/model/CDEPS/dglc + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/sglc + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xglc + + case_comps + env_case.xml + Root directory of the case land ice component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/src/model/CICE/ + $SRCROOT/src/model/CDEPS/dice + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/sice + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xice + + case_comps + env_case.xml + Root directory of the case sea ice component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/rtm/ + $SRCROOT/components/mosart/ + $SRCROOT/src/model/CDEPS/drof + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/srof + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xrof + + case_comps + env_case.xml + Root directory of the case river runoff model component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/components/clm/ + $SRCROOT/src/model/CDEPS/dlnd + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/slnd + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xlnd + + case_comps + env_case.xml + Root directory of the case land model component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/siac + $CIMEROOT/CIME/non_py/src/components/xcpl_comps_$COMP_INTERFACE/xiac + + case_comps + env_case.xml + Root directory of the case integrated assessment component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/src/model/CDEPS/desp + $CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/sesp + + case_comps + env_case.xml + Root directory of the case external system processing (esp) component + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/cime_config/config_compsets.xml + $COMP_ROOT_DIR_CPL/cime_config/config_compsets.xml + $COMP_ROOT_DIR_ATM/cime_config/config_compsets.xml + $COMP_ROOT_DIR_ATM/cime/cime_config/config_compsets.xml + $COMP_ROOT_DIR_GLC/cime_config/config_compsets.xml + $COMP_ROOT_DIR_LND/cime_config/config_compsets.xml + $COMP_ROOT_DIR_ICE/cime/cime_config/config_compsets.xml + $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml + $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml + $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml + $COMP_ROOT_DIR_OCN/cime/cime_config/config_compsets.xml + + case_last + env_case.xml + file containing specification of all compsets for primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_compsets.xsd + + + + char + unset + + $SRCROOT/cime_config/config_pes.xml + $COMP_ROOT_DIR_CPL/cime_config/config_pes.xml + $COMP_ROOT_DIR_ATM/cime_config/config_pes.xml + $COMP_ROOT_DIR_ATM/cime/cime_config/config_pes.xml + $COMP_ROOT_DIR_GLC/cime_config/config_pes.xml + $COMP_ROOT_DIR_LND/cime_config/config_pes.xml + $COMP_ROOT_DIR_ICE/cime/cime_config/config_pes.xml + $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml + $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml + $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml + $COMP_ROOT_DIR_OCN/cime/cime_config/config_pes.xml + + case_last + env_case.xml + file containing specification of all pe-layouts for primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_pes.xsd + + + + char + + $SRCROOT/ccs_config_ufs/config_archive.xml + $COMP_ROOT_DIR_CPL/cime_config/config_archive.xml + + $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml + $COMP_ROOT_DIR_ATM/cime_config/config_archive.xml + $COMP_ROOT_DIR_ICE/cime_config/config_archive.xml + $COMP_ROOT_DIR_LND/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_WAV/cime_config/config_archive.xml + + $COMP_ROOT_DIR_ATM/cime_config/config_archive.xml + $COMP_ROOT_DIR_GLC/cime_config/config_archive.xml + $COMP_ROOT_DIR_LND/cime_config/config_archive.xml + $COMP_ROOT_DIR_ICE/cime/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml + $COMP_ROOT_DIR_OCN/cime/cime_config/config_archive.xml + $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml + $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml + $COMP_ROOT_DIR_ATM/cime/cime_config/config_archive.xml + + case_last + env_case.xml + file containing specification of archive files for each component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/config_archive.xsd + + + + char + + $CIMEROOT/CIME/SystemTests + $COMP_ROOT_DIR_LND/cime_config/SystemTests + $COMP_ROOT_DIR_ATM/cime_config/SystemTests + $COMP_ROOT_DIR_OCN/cime_config/SystemTests + $COMP_ROOT_DIR_OCN/cime_config/SystemTests + $COMP_ROOT_DIR_OCN/cime_config/SystemTests + $COMP_ROOT_DIR_ICE/cime/cime_config/SystemTests + $COMP_ROOT_DIR_GLC/cime_config/SystemTests + $COMP_ROOT_DIR_ROF/cime_config/SystemTests + $COMP_ROOT_DIR_ROF/cime_config/SystemTests + + test + env_test.xml + directories containing cime compatible system test modules + + + + char + unset + + $COMP_ROOT_DIR_ATM/cime/cime_config/testlist.xml + $COMP_ROOT_DIR_OCN/cime/cime_config/testlist.xml + + case_last + env_case.xml + file containing specification of all system tests for primary component (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/testlist.xsd + + + + char + unset + + $SRCROOT/cime_config/testmods_dirs + $COMP_ROOT_DIR_CPL/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ATM/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_GLC/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_LND/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ICE/cime/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs + $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs + + case_last + env_case.xml + directory containing test modifications for primary component tests (for documentation only - DO NOT EDIT) + + + + char + unset + + $SRCROOT/cime_config/usermods_dirs + $COMP_ROOT_DIR_CPL/cime_config/usermods_dirs + $COMP_ROOT_DIR_ATM/cime_config/usermods_dirs + $COMP_ROOT_DIR_GLC/cime_config/usermods_dirs + $COMP_ROOT_DIR_LND/cime_config/usermods_dirs + $COMP_ROOT_DIR_ICE/cime/cime_config/usermods_dirs + $COMP_ROOT_DIR_ROF/cime_config/usermods_dirs + $COMP_ROOT_DIR_ROF/cime_config/usermods_dirs + $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs + $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs + $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs + + case_last + env_case.xml + directory containing user modifications for primary components (for documentation only - DO NOT EDIT) + + + + + char + unset + + $COMP_ROOT_DIR_CPL/cime_config/namelist_definition_drv.xml + + $COMP_ROOT_DIR_ROF/cime_config/namelist_definition_drof.xml + $COMP_ROOT_DIR_ATM/cime_config/namelist_definition_datm.xml + $COMP_ROOT_DIR_ICE/cime/cime_config/namelist_definition_dice.xml + $COMP_ROOT_DIR_LND/cime_config/namelist_definition_dlnd.xml + $COMP_ROOT_DIR_OCN/cime_config/namelist_definition_docn.xml + $COMP_ROOT_DIR_WAV/cime_config/namelist_definition_dwav.xml + + + + case_last + env_case.xml + file containing namelist_definitions for all components + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_namelist.xsd + + + + + + + + char + + $COMP_ROOT_DIR_CPL/cime_config/config_component.xml + + case_last + env_case.xml + file containing all non-component specific case configuration variables (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_CPL/cime_config/config_component_$MODEL.xml + + case_last + env_case.xml + file containing all component specific driver configuration variables (for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + unset + + $COMP_ROOT_DIR_ATM/cime_config/config_component.xml + $COMP_ROOT_DIR_ATM/cime/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_LND/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_ROF/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_ICE/cime_config/config_component.xml + $COMP_ROOT_DIR_ICE/cime/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_OCN/cime_config/config_component.xml + $COMP_ROOT_DIR_OCN/cime/cime_config/config_component.xml + $COMP_ROOT_DIR_OCN/cime/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_GLC/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + unset + + $COMP_ROOT_DIR_IAC/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_WAV/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $COMP_ROOT_DIR_ESP/cime_config/config_component.xml + + case_last + env_case.xml + file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) + $CIMEROOT/CIME/data/config/xml_schemas/entry_id.xsd + $CIMEROOT/CIME/data/config/xml_schemas/entry_id_version3.xsd + + + + char + + $SRCROOT/src/model/FMS/cime/cime_config/buildlib + $SRCROOT/src/model/CDEPS/cime_config/buildlib + $SRCROOT/src/model/CMEPS/cime_config/buildlib + $CIMEROOT/CIME/build_scripts/buildlib.gptl + $CIMEROOT/CIME/build_scripts/buildlib.pio + $CIMEROOT/CIME/build_scripts/buildlib.mct + $CIMEROOT/CIME/build_scripts/buildlib.csm_share + $CIMEROOT/CIME/build_scripts/buildlib.mpi-serial + $CIMEROOT/CIME/build_scripts/buildlib.cprnc + + case_last + env_case.xml + path to buildlib script for the given library + + + diff --git a/config/xml_schemas/cimeteststatus.xsd b/CIME/data/config/xml_schemas/cimeteststatus.xsd similarity index 100% rename from config/xml_schemas/cimeteststatus.xsd rename to CIME/data/config/xml_schemas/cimeteststatus.xsd diff --git a/CIME/data/config/xml_schemas/config_archive.xsd b/CIME/data/config/xml_schemas/config_archive.xsd new file mode 100644 index 00000000000..cc7fe137ab8 --- /dev/null +++ b/CIME/data/config/xml_schemas/config_archive.xsd @@ -0,0 +1,85 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/config/xml_schemas/config_batch.xsd b/CIME/data/config/xml_schemas/config_batch.xsd similarity index 93% rename from config/xml_schemas/config_batch.xsd rename to CIME/data/config/xml_schemas/config_batch.xsd index 141bcf8e57f..10b8368538d 100644 --- a/config/xml_schemas/config_batch.xsd +++ b/CIME/data/config/xml_schemas/config_batch.xsd @@ -127,17 +127,19 @@ - - - - - - - - - - - + + + + + + + + + + + + + @@ -146,10 +148,7 @@ - - - - + @@ -181,6 +180,7 @@ + diff --git a/config/xml_schemas/config_compsets.xsd b/CIME/data/config/xml_schemas/config_compsets.xsd similarity index 100% rename from config/xml_schemas/config_compsets.xsd rename to CIME/data/config/xml_schemas/config_compsets.xsd diff --git a/CIME/data/config/xml_schemas/config_grids_v2.2.xsd b/CIME/data/config/xml_schemas/config_grids_v2.2.xsd new file mode 100644 index 00000000000..bf9c071536d --- /dev/null +++ b/CIME/data/config/xml_schemas/config_grids_v2.2.xsd @@ -0,0 +1,182 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/config/xml_schemas/config_grids_v2.xsd b/CIME/data/config/xml_schemas/config_grids_v2.xsd similarity index 97% rename from config/xml_schemas/config_grids_v2.xsd rename to CIME/data/config/xml_schemas/config_grids_v2.xsd index dc61054d2a7..0d2ddd34a7d 100644 --- a/config/xml_schemas/config_grids_v2.xsd +++ b/CIME/data/config/xml_schemas/config_grids_v2.xsd @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ - + @@ -125,6 +125,7 @@ + diff --git a/CIME/data/config/xml_schemas/config_inputdata.xsd b/CIME/data/config/xml_schemas/config_inputdata.xsd new file mode 100644 index 00000000000..fee8e973c10 --- /dev/null +++ b/CIME/data/config/xml_schemas/config_inputdata.xsd @@ -0,0 +1,28 @@ + + + + + + + server precidence is order in this file. Highest preference at top + + + + + + + + + + + + + + + + + + + + + diff --git a/config/xml_schemas/config_machines.xsd b/CIME/data/config/xml_schemas/config_machines.xsd similarity index 84% rename from config/xml_schemas/config_machines.xsd rename to CIME/data/config/xml_schemas/config_machines.xsd index e8af235035c..d6cbe51f1ec 100644 --- a/config/xml_schemas/config_machines.xsd +++ b/CIME/data/config/xml_schemas/config_machines.xsd @@ -6,6 +6,7 @@ + @@ -15,6 +16,7 @@ + @@ -37,6 +39,8 @@ + + @@ -50,8 +54,13 @@ - - + + + + + + + @@ -62,6 +71,7 @@ + @@ -123,6 +133,10 @@ + + + + @@ -151,10 +165,25 @@ - + + + + + + + + + + - + + + + + @@ -194,6 +225,7 @@ + @@ -235,11 +267,12 @@ + - + @@ -264,7 +297,8 @@ - + + diff --git a/config/xml_schemas/config_machines_template.xml b/CIME/data/config/xml_schemas/config_machines_template.xml similarity index 94% rename from config/xml_schemas/config_machines_template.xml rename to CIME/data/config/xml_schemas/config_machines_template.xml index 97e688712bb..de361c16265 100644 --- a/config/xml_schemas/config_machines_template.xml +++ b/CIME/data/config/xml_schemas/config_machines_template.xml @@ -1,17 +1,12 @@ - + SITE VENDOR platform, os is ---, xx pes/node, batch system is --- - - .*.cheyenne.ucar.edu - LINUX @@ -37,7 +32,7 @@ the batch script (ex. #PBS -A charge_account). Will default to PROJECT if not set. can be overridden in environment or $HOME/.cime/config --> - + couldbethis @@ -72,14 +67,14 @@ $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc.cheyenne - + gmake 8 - pbs + none diff --git a/CIME/data/config/xml_schemas/config_machines_version3.xsd b/CIME/data/config/xml_schemas/config_machines_version3.xsd new file mode 100644 index 00000000000..99f823ffe46 --- /dev/null +++ b/CIME/data/config/xml_schemas/config_machines_version3.xsd @@ -0,0 +1,340 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/config/xml_schemas/config_pes.xsd b/CIME/data/config/xml_schemas/config_pes.xsd similarity index 100% rename from config/xml_schemas/config_pes.xsd rename to CIME/data/config/xml_schemas/config_pes.xsd diff --git a/config/xml_schemas/config_workflow.xsd b/CIME/data/config/xml_schemas/config_workflow.xsd similarity index 86% rename from config/xml_schemas/config_workflow.xsd rename to CIME/data/config/xml_schemas/config_workflow.xsd index 805a70cf7bf..b39b8526a90 100644 --- a/config/xml_schemas/config_workflow.xsd +++ b/CIME/data/config/xml_schemas/config_workflow.xsd @@ -6,15 +6,16 @@ - + - - + + + @@ -35,7 +36,7 @@ - + @@ -57,6 +58,7 @@ + diff --git a/config/xml_schemas/entry_id.xsd b/CIME/data/config/xml_schemas/entry_id.xsd similarity index 100% rename from config/xml_schemas/entry_id.xsd rename to CIME/data/config/xml_schemas/entry_id.xsd diff --git a/config/xml_schemas/entry_id_base.xsd b/CIME/data/config/xml_schemas/entry_id_base.xsd similarity index 88% rename from config/xml_schemas/entry_id_base.xsd rename to CIME/data/config/xml_schemas/entry_id_base.xsd index 9c0578c2581..2aae23762f6 100644 --- a/config/xml_schemas/entry_id_base.xsd +++ b/CIME/data/config/xml_schemas/entry_id_base.xsd @@ -13,7 +13,6 @@ - @@ -28,6 +27,16 @@ + + + + + + + + + + @@ -49,7 +58,7 @@ - + diff --git a/config/xml_schemas/entry_id_base_version3.xsd b/CIME/data/config/xml_schemas/entry_id_base_version3.xsd similarity index 98% rename from config/xml_schemas/entry_id_base_version3.xsd rename to CIME/data/config/xml_schemas/entry_id_base_version3.xsd index f4599927e9e..fc03b6a90d2 100644 --- a/config/xml_schemas/entry_id_base_version3.xsd +++ b/CIME/data/config/xml_schemas/entry_id_base_version3.xsd @@ -57,7 +57,7 @@ - + diff --git a/config/xml_schemas/entry_id_namelist.xsd b/CIME/data/config/xml_schemas/entry_id_namelist.xsd similarity index 100% rename from config/xml_schemas/entry_id_namelist.xsd rename to CIME/data/config/xml_schemas/entry_id_namelist.xsd diff --git a/config/xml_schemas/entry_id_version3.xsd b/CIME/data/config/xml_schemas/entry_id_version3.xsd similarity index 100% rename from config/xml_schemas/entry_id_version3.xsd rename to CIME/data/config/xml_schemas/entry_id_version3.xsd diff --git a/config/xml_schemas/env_archive.xsd b/CIME/data/config/xml_schemas/env_archive.xsd similarity index 100% rename from config/xml_schemas/env_archive.xsd rename to CIME/data/config/xml_schemas/env_archive.xsd diff --git a/config/xml_schemas/env_batch.xsd b/CIME/data/config/xml_schemas/env_batch.xsd similarity index 100% rename from config/xml_schemas/env_batch.xsd rename to CIME/data/config/xml_schemas/env_batch.xsd diff --git a/config/xml_schemas/env_entry_id.xsd b/CIME/data/config/xml_schemas/env_entry_id.xsd similarity index 100% rename from config/xml_schemas/env_entry_id.xsd rename to CIME/data/config/xml_schemas/env_entry_id.xsd diff --git a/config/xml_schemas/env_mach_pes.xsd b/CIME/data/config/xml_schemas/env_mach_pes.xsd similarity index 100% rename from config/xml_schemas/env_mach_pes.xsd rename to CIME/data/config/xml_schemas/env_mach_pes.xsd diff --git a/config/xml_schemas/env_mach_specific.xsd b/CIME/data/config/xml_schemas/env_mach_specific.xsd similarity index 91% rename from config/xml_schemas/env_mach_specific.xsd rename to CIME/data/config/xml_schemas/env_mach_specific.xsd index fabd35c9314..77020e8e0f1 100644 --- a/config/xml_schemas/env_mach_specific.xsd +++ b/CIME/data/config/xml_schemas/env_mach_specific.xsd @@ -9,7 +9,8 @@ - + + @@ -102,6 +103,7 @@ + @@ -120,7 +122,8 @@ - + + @@ -133,7 +136,7 @@ - + @@ -150,6 +153,7 @@ + @@ -160,13 +164,14 @@ + - + diff --git a/config/xml_schemas/expected_fails_file.xsd b/CIME/data/config/xml_schemas/expected_fails_file.xsd similarity index 100% rename from config/xml_schemas/expected_fails_file.xsd rename to CIME/data/config/xml_schemas/expected_fails_file.xsd diff --git a/config/xml_schemas/testlist.xsd b/CIME/data/config/xml_schemas/testlist.xsd similarity index 95% rename from config/xml_schemas/testlist.xsd rename to CIME/data/config/xml_schemas/testlist.xsd index 07752f4eefc..af17a3abeaf 100644 --- a/config/xml_schemas/testlist.xsd +++ b/CIME/data/config/xml_schemas/testlist.xsd @@ -16,7 +16,7 @@ - + @@ -59,6 +59,7 @@ + diff --git a/tools/load_balancing_tool/tests/__init__.py b/CIME/data/templates/__init__.py similarity index 100% rename from tools/load_balancing_tool/tests/__init__.py rename to CIME/data/templates/__init__.py diff --git a/scripts/lib/cs.status.template b/CIME/data/templates/cs.status.template similarity index 100% rename from scripts/lib/cs.status.template rename to CIME/data/templates/cs.status.template diff --git a/scripts/lib/cs.submit.template b/CIME/data/templates/cs.submit.template similarity index 100% rename from scripts/lib/cs.submit.template rename to CIME/data/templates/cs.submit.template diff --git a/CIME/data/templates/gitignore.template b/CIME/data/templates/gitignore.template new file mode 100644 index 00000000000..ce731b6d0c3 --- /dev/null +++ b/CIME/data/templates/gitignore.template @@ -0,0 +1,81 @@ +# cime build and run directories +bld +run +# logger output files +*.log + +# queueing system output files +run.*.o\d+ +st_archive.*.o\d+ + +#python files +__pycache__/ +*.so + +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data diff --git a/scripts/lib/testreporter.template b/CIME/data/templates/testreporter.template similarity index 100% rename from scripts/lib/testreporter.template rename to CIME/data/templates/testreporter.template diff --git a/scripts/lib/CIME/date.py b/CIME/date.py similarity index 79% rename from scripts/lib/CIME/date.py rename to CIME/date.py index c2adea8e90f..0d12b2ceede 100644 --- a/scripts/lib/CIME/date.py +++ b/CIME/date.py @@ -1,9 +1,10 @@ import re from CIME.XML.standard_module_setup import * + logger = logging.getLogger(__name__) ############################################################################### def get_file_date(filename): -############################################################################### + ############################################################################### """ Returns the date associated with the filename as a date object representing the correct date Formats supported: @@ -34,10 +35,11 @@ def get_file_date(filename): # TODO: Add these to config_archive.xml, instead of here # Note these must be in order of most specific to least # so that lesser specificities aren't used to parse greater ones - re_formats = [r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}_[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}", # [yy...]yyyy-mm-dd_hh.MM.ss - r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}[\-_][0-9]{1,5}", # [yy...]yyyy-mm-dd_sssss - r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}", # [yy...]yyyy-mm-dd - r"[0-9]*[0-9]{4}[\-\.][0-9]{1,2}", # [yy...]yyyy-mm + re_formats = [ + r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}_[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}", # [yy...]yyyy-mm-dd_hh.MM.ss + r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}[\-_][0-9]{1,5}", # [yy...]yyyy-mm-dd_sssss + r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}", # [yy...]yyyy-mm-dd + r"[0-9]*[0-9]{4}[\-\.][0-9]{1,2}", # [yy...]yyyy-mm ] for re_str in re_formats: @@ -57,15 +59,16 @@ def get_file_date(filename): elif len(date_tuple) == 6: # Create a date object with arbitrary year, month, day, but the correct time of day # Then use _get_day_second to get the time of day in seconds - second = date.hms_to_second(hour = date_tuple[3], - minute = date_tuple[4], - second = date_tuple[5]) + second = date.hms_to_second( + hour=date_tuple[3], minute=date_tuple[4], second=date_tuple[5] + ) return date(year, month, day, 0, 0, second) # Not a valid filename date format logger.debug("{} is a filename without a supported date!".format(filename)) return None + class date: """ Simple struct for holding dates and the time of day and performing comparisons @@ -178,20 +181,21 @@ class date: >>> date(3, 5, 6, 8) > date(4, 5, 6, 8) False """ + @staticmethod def hms_to_second(hour, minute, second): _SECONDS_PER_HOUR = 3600 _SECONDS_PER_MINUTE = 60 - return (hour * _SECONDS_PER_HOUR + minute * _SECONDS_PER_MINUTE - + second) + return hour * _SECONDS_PER_HOUR + minute * _SECONDS_PER_MINUTE + second @staticmethod def second_to_hms(second): _SECONDS_PER_HOUR = 3600 _SECONDS_PER_MINUTE = 60 - return { 'hour': second // _SECONDS_PER_HOUR, - 'minute': (second % _SECONDS_PER_HOUR) // _SECONDS_PER_MINUTE, - 'second': second % _SECONDS_PER_MINUTE + return { + "hour": second // _SECONDS_PER_HOUR, + "minute": (second % _SECONDS_PER_HOUR) // _SECONDS_PER_MINUTE, + "second": second % _SECONDS_PER_MINUTE, } def __init__(self, year=1, month=1, day=1, hour=0, minute=0, second=0): @@ -206,12 +210,14 @@ def __str__(self): 'date(4, 5, 7, 0, 1, 4)' """ fmt_str = "date({year:d}, {month:d}, {day:d}, {hour:d}, {minute:d}, {second:d})" - return fmt_str.format(year = self.year(), - month = self.month(), - day = self.day(), - hour = self.hour(), - minute = self.minute(), - second = self.second()) + return fmt_str.format( + year=self.year(), + month=self.month(), + day=self.day(), + hour=self.hour(), + minute=self.minute(), + second=self.second(), + ) def year(self): return self._year @@ -223,13 +229,13 @@ def day(self): return self._day def hour(self): - return self.second_to_hms(self._second)['hour'] + return self.second_to_hms(self._second)["hour"] def minute(self): - return self.second_to_hms(self._second)['minute'] + return self.second_to_hms(self._second)["minute"] def second(self): - return self.second_to_hms(self._second)['second'] + return self.second_to_hms(self._second)["second"] def second_of_day(self): return self._second @@ -238,9 +244,12 @@ def __repr__(self): return str(self) def __eq__(self, other): - return ((self.year() == other.year()) and (self.month() == other.month()) - and (self.day() == other.day()) - and (self.second_of_day() == other.second_of_day())) + return ( + (self.year() == other.year()) + and (self.month() == other.month()) + and (self.day() == other.day()) + and (self.second_of_day() == other.second_of_day()) + ) def __ne__(self, other): return not (self == other) @@ -268,7 +277,7 @@ def __lt__(self, other): return False def __le__(self, other): - return ((self < other) or (self == other)) + return (self < other) or (self == other) def __ge__(self, other): return not (self < other) diff --git a/CIME/expected_fails.py b/CIME/expected_fails.py new file mode 100644 index 00000000000..f8e5339702b --- /dev/null +++ b/CIME/expected_fails.py @@ -0,0 +1,44 @@ +""" +Contains the definition of a class to hold information on expected failures for a single test +""" + +from CIME.XML.standard_module_setup import * + +EXPECTED_FAILURE_COMMENT = "(EXPECTED FAILURE)" +UNEXPECTED_FAILURE_COMMENT_START = "(UNEXPECTED" # There will be some additional text after this, before the end parentheses + + +class ExpectedFails(object): + def __init__(self): + """Initialize an empty ExpectedFails object""" + self._fails = {} + + def __eq__(self, rhs): + expect(isinstance(rhs, ExpectedFails), "Wrong type") + return self._fails == rhs._fails # pylint: disable=protected-access + + def __ne__(self, rhs): + result = self.__eq__(rhs) + return not result + + def __repr__(self): + return repr(self._fails) + + def add_failure(self, phase, expected_status): + """Add an expected failure to the list""" + expect( + phase not in self._fails, "Phase {} already present in list".format(phase) + ) + self._fails[phase] = expected_status + + def expected_fails_comment(self, phase, status): + """Returns a string giving the expected fails comment for this phase and status""" + if phase not in self._fails: + return "" + + if self._fails[phase] == status: + return EXPECTED_FAILURE_COMMENT + else: + return "{}: expected {})".format( + UNEXPECTED_FAILURE_COMMENT_START, self._fails[phase] + ) diff --git a/CIME/get_tests.py b/CIME/get_tests.py new file mode 100644 index 00000000000..a4d4abb3410 --- /dev/null +++ b/CIME/get_tests.py @@ -0,0 +1,495 @@ +import CIME.utils +from CIME.utils import expect, convert_to_seconds, parse_test_name, get_cime_root +from CIME.XML.machines import Machines +import sys, os + +# Expect that, if a model wants to use python-based test lists, they will have a file +# $model/cime_config/tests.py , containing a test dictionary called _TESTS. Currently, +# only E3SM is using this feature. + +sys.path.insert(0, os.path.join(get_cime_root(), "../cime_config")) +_ALL_TESTS = {} +try: + from tests import _TESTS # pylint: disable=import-error + + _ALL_TESTS.update(_TESTS) +except ImportError: + pass + +# Here are the tests belonging to cime suites. Format for individual tests is +# ..[.] +# +# suite_name : { +# "inherit" : (suite1, suite2, ...), # Optional. Suites to inherit tests from. Default is None. Tuple, list, or str. +# "time" : "HH:MM:SS", # Optional. Recommended upper-limit on test time. +# "share" : True|False, # Optional. If True, all tests in this suite share a build. Default is False. +# "perf" : True|False, # Optional. If True, all tests in this suite will do performance tracking. Default is False. +# "tests" : (test1, test2, ...) # Optional. The list of tests for this suite. See above for format. Tuple, list, or str. This is the ONLY inheritable attribute. +# } + +_CIME_TESTS = { + "cime_tiny": { + "time": "0:10:00", + "tests": ( + "ERS.f19_g16.A", + "NCK.f19_g16.A", + ), + }, + "cime_test_only_pass": { + "time": "0:10:00", + "tests": ( + "TESTRUNPASS_P1.f19_g16.A", + "TESTRUNPASS_P1.ne30_g16.A", + "TESTRUNPASS_P1.f45_g37.A", + ), + }, + "cime_test_only_slow_pass": { + "time": "0:10:00", + "tests": ( + "TESTRUNSLOWPASS_P1.f19_g16.A", + "TESTRUNSLOWPASS_P1.ne30_g16.A", + "TESTRUNSLOWPASS_P1.f45_g37.A", + ), + }, + "cime_test_only": { + "time": "0:10:00", + "tests": ( + "TESTBUILDFAIL_P1.f19_g16.A", + "TESTBUILDFAILEXC_P1.f19_g16.A", + "TESTRUNFAIL_P1.f19_g16.A", + "TESTRUNSTARCFAIL_P1.f19_g16.A", + "TESTRUNFAILEXC_P1.f19_g16.A", + "TESTRUNPASS_P1.f19_g16.A", + "TESTTESTDIFF_P1.f19_g16.A", + "TESTMEMLEAKFAIL_P1.f09_g16.X", + "TESTMEMLEAKPASS_P1.f09_g16.X", + ), + }, + "cime_test_all": { + "inherit": "cime_test_only", + "time": "0:10:00", + "tests": "TESTRUNDIFF_P1.f19_g16.A", + }, + "cime_test_share": { + "time": "0:10:00", + "share": True, + "tests": ( + "SMS_P2.f19_g16.A", + "SMS_P4.f19_g16.A", + "SMS_P8.f19_g16.A", + "SMS_P16.f19_g16.A", + ), + }, + "cime_test_share2": { + "time": "0:10:00", + "share": True, + "tests": ( + "SMS_P2.f19_g16.X", + "SMS_P4.f19_g16.X", + "SMS_P8.f19_g16.X", + "SMS_P16.f19_g16.X", + ), + }, + "cime_test_perf": { + "time": "0:10:00", + "perf": True, + "tests": ( + "SMS_P2.T42_T42.S", + "SMS_P4.T42_T42.S", + "SMS_P8.T42_T42.S", + "SMS_P16.T42_T42.S", + ), + }, + "cime_test_timing": { + "time": "0:10:00", + "tests": ("SMS_P1.T42_T42.S",), + }, + "cime_test_repeat": { + "tests": ( + "TESTRUNPASS_P1.f19_g16.A", + "TESTRUNPASS_P2.ne30_g16.A", + "TESTRUNPASS_P4.f45_g37.A", + ) + }, + "cime_test_time": { + "time": "0:13:00", + "tests": ("TESTRUNPASS_P69.f19_g16.A.testmod",), + }, + "cime_test_multi_inherit": { + "inherit": ("cime_test_repeat", "cime_test_only_pass", "cime_test_all") + }, + "cime_developer": { + "time": "0:15:00", + "tests": ( + "NCK_Ld3.f45_g37.A", + "ERI_Ln9.f09_g16.X", + "ERIO_Ln11.f09_g16.X", + "SEQ_Ln9.f19_g16.A", + "ERS.ne30_g16.A", + "IRT_N2_Vmct_Ln9.f19_g16.A", + "ERR_Ln9.f45_g37.A", + "ERP_Ln9.f45_g37.A", + "SMS_D_Ln9_Mmpi-serial.f19_g16.A", + "PET_Ln9_P4.f19_f19.A", + "PEM_Ln9_P4.f19_f19.A", + "SMS_Ln3.T42_T42.S", + "PRE.f19_f19.ADESP", + "PRE.f19_f19.ADESP_TEST", + "MCC_P1.f19_g16.A", + "LDSTA.f45_g37.A", + ), + }, +} + +_ALL_TESTS.update(_CIME_TESTS) + +############################################################################### +def _get_key_data(raw_dict, key, the_type): + ############################################################################### + if key not in raw_dict: + if the_type is tuple: + return () + elif the_type is str: + return None + elif the_type is bool: + return False + else: + expect(False, "Unsupported type {}".format(the_type)) + else: + val = raw_dict[key] + if the_type is tuple and isinstance(val, str): + val = (val,) + + expect( + isinstance(val, the_type), + "Wrong type for {}, {} is a {} but expected {}".format( + key, val, type(val), the_type + ), + ) + + return val + + +############################################################################### +def get_test_data(suite): + ############################################################################### + """ + For a given suite, returns (inherit, time, share, perf, tests) + """ + raw_dict = _ALL_TESTS[suite] + for key in raw_dict.keys(): + expect( + key in ["inherit", "time", "share", "perf", "tests"], + "Unexpected test key '{}'".format(key), + ) + + return ( + _get_key_data(raw_dict, "inherit", tuple), + _get_key_data(raw_dict, "time", str), + _get_key_data(raw_dict, "share", bool), + _get_key_data(raw_dict, "perf", bool), + _get_key_data(raw_dict, "tests", tuple), + ) + + +############################################################################### +def get_test_suites(): + ############################################################################### + return list(_ALL_TESTS.keys()) + + +############################################################################### +def get_test_suite( + suite, machine=None, compiler=None, skip_inherit=False, skip_tests=None +): + ############################################################################### + """ + Return a list of FULL test names for a suite. + """ + expect(suite in get_test_suites(), "Unknown test suite: '{}'".format(suite)) + machobj = Machines(machine=machine) + machine = machobj.get_machine_name() + + if compiler is None: + compiler = machobj.get_default_compiler() + expect( + machobj.is_valid_compiler(compiler), + "Compiler {} not valid for machine {}".format(compiler, machine), + ) + + inherits_from, _, _, _, tests_raw = get_test_data(suite) + tests = [] + for item in tests_raw: + expect( + isinstance(item, str), + "Bad type of test {}, expected string".format(item), + ) + + test_mods = None + test_components = item.split(".") + expect(len(test_components) in [3, 4], "Bad test name {}".format(item)) + + if len(test_components) == 4: + test_name = ".".join(test_components[:-1]) + test_mods = test_components[-1] + else: + test_name = item + if not skip_tests or not test_name in skip_tests: + tests.append( + CIME.utils.get_full_test_name( + test_name, + machine=machine, + compiler=compiler, + testmods_string=test_mods, + ) + ) + + if not skip_inherit: + for inherits in inherits_from: + inherited_tests = get_test_suite(inherits, machine, compiler) + + for inherited_test in inherited_tests: + if inherited_test not in tests: + tests.append(inherited_test) + + return tests + + +############################################################################### +def suite_has_test(suite, test_full_name, skip_inherit=False): + ############################################################################### + _, _, _, _, machine, compiler, _ = CIME.utils.parse_test_name(test_full_name) + expect(machine is not None, "{} is not a full test name".format(test_full_name)) + + tests = get_test_suite( + suite, machine=machine, compiler=compiler, skip_inherit=skip_inherit + ) + return test_full_name in tests + + +############################################################################### +def get_build_groups(tests): + ############################################################################### + """ + Given a list of tests, return a list of lists, with each list representing + a group of tests that can share executables. + + >>> tests = ["SMS_P2.f19_g16.A.melvin_gnu", "SMS_P4.f19_g16.A.melvin_gnu", "SMS_P2.f19_g16.X.melvin_gnu", "SMS_P4.f19_g16.X.melvin_gnu", "TESTRUNSLOWPASS_P1.f19_g16.A.melvin_gnu", "TESTRUNSLOWPASS_P1.ne30_g16.A.melvin_gnu"] + >>> get_build_groups(tests) + [('SMS_P2.f19_g16.A.melvin_gnu', 'SMS_P4.f19_g16.A.melvin_gnu'), ('SMS_P2.f19_g16.X.melvin_gnu', 'SMS_P4.f19_g16.X.melvin_gnu'), ('TESTRUNSLOWPASS_P1.f19_g16.A.melvin_gnu',), ('TESTRUNSLOWPASS_P1.ne30_g16.A.melvin_gnu',)] + """ + build_groups = [] # list of tuples ([tests], set(suites)) + + # Get a list of suites that share exes + suites = get_test_suites() + share_suites = [] + for suite in suites: + share = get_test_data(suite)[2] + if share: + share_suites.append(suite) + + # Divide tests up into build groups. Assumes that build-compatibility is transitive + for test in tests: + matched = False + + my_share_suites = set() + for suite in share_suites: + if suite_has_test(suite, test, skip_inherit=True): + my_share_suites.add(suite) + + # Try to match this test with an existing build group + if my_share_suites: + for build_group_tests, build_group_suites in build_groups: + overlap = build_group_suites & my_share_suites + if overlap: + matched = True + build_group_tests.append(test) + build_group_suites.update(my_share_suites) + break + + # Nothing matched, this test is in a build group of its own + if not matched: + build_groups.append(([test], my_share_suites)) + + return [tuple(item[0]) for item in build_groups] + + +############################################################################### +def is_perf_test(test): + ############################################################################### + """ + Is the provided test in a suite with perf=True? + + >>> is_perf_test("SMS_P2.T42_T42.S.melvin_gnu") + True + >>> is_perf_test("SMS_P2.f19_g16.X.melvin_gnu") + False + >>> is_perf_test("PFS_P2.f19_g16.X.melvin_gnu") + True + """ + # Get a list of performance suites + if test.startswith("PFS"): + return True + else: + suites = get_test_suites() + for suite in suites: + perf = get_test_data(suite)[3] + if perf and suite_has_test(suite, test, skip_inherit=True): + return True + + return False + + +############################################################################### +def infer_arch_from_tests(testargs): + ############################################################################### + """ + Return a tuple (machine, [compilers]) that can be inferred from the test args + + >>> infer_arch_from_tests(["NCK.f19_g16.A.melvin_gnu"]) + ('melvin', ['gnu']) + >>> infer_arch_from_tests(["NCK.f19_g16.A"]) + (None, []) + >>> infer_arch_from_tests(["NCK.f19_g16.A", "NCK.f19_g16.A.melvin_gnu"]) + ('melvin', ['gnu']) + >>> infer_arch_from_tests(["NCK.f19_g16.A.melvin_gnu", "NCK.f19_g16.A.melvin_gnu"]) + ('melvin', ['gnu']) + >>> infer_arch_from_tests(["NCK.f19_g16.A.melvin_gnu9", "NCK.f19_g16.A.melvin_gnu"]) + ('melvin', ['gnu9', 'gnu']) + >>> infer_arch_from_tests(["NCK.f19_g16.A.melvin_gnu", "NCK.f19_g16.A.mappy_gnu"]) + Traceback (most recent call last): + ... + CIME.utils.CIMEError: ERROR: Must have consistent machine 'melvin' != 'mappy' + """ + e3sm_test_suites = get_test_suites() + + machine = None + compilers = [] + for testarg in testargs: + testarg = testarg.strip() + if testarg.startswith("^"): + testarg = testarg[1:] + + if testarg not in e3sm_test_suites: + machine_for_this_test, compiler_for_this_test = parse_test_name(testarg)[ + 4:6 + ] + if machine_for_this_test is not None: + if machine is None: + machine = machine_for_this_test + else: + expect( + machine == machine_for_this_test, + "Must have consistent machine '%s' != '%s'" + % (machine, machine_for_this_test), + ) + + if ( + compiler_for_this_test is not None + and compiler_for_this_test not in compilers + ): + compilers.append(compiler_for_this_test) + + return machine, compilers + + +############################################################################### +def get_full_test_names(testargs, machine, compiler): + ############################################################################### + """ + Return full test names in the form: + TESTCASE.GRID.COMPSET.MACHINE_COMPILER.TESTMODS + Testmods are optional + + Testargs can be categories or test names and support the NOT symbol '^' + + >>> get_full_test_names(["cime_tiny"], "melvin", "gnu") + ['ERS.f19_g16.A.melvin_gnu', 'NCK.f19_g16.A.melvin_gnu'] + + >>> get_full_test_names(["cime_tiny", "PEA_P1_M.f45_g37.A"], "melvin", "gnu") + ['ERS.f19_g16.A.melvin_gnu', 'NCK.f19_g16.A.melvin_gnu', 'PEA_P1_M.f45_g37.A.melvin_gnu'] + + >>> get_full_test_names(['ERS.f19_g16.A', 'NCK.f19_g16.A', 'PEA_P1_M.f45_g37.A'], "melvin", "gnu") + ['ERS.f19_g16.A.melvin_gnu', 'NCK.f19_g16.A.melvin_gnu', 'PEA_P1_M.f45_g37.A.melvin_gnu'] + + >>> get_full_test_names(["cime_tiny", "^NCK.f19_g16.A"], "melvin", "gnu") + ['ERS.f19_g16.A.melvin_gnu'] + + >>> get_full_test_names(["cime_test_multi_inherit"], "melvin", "gnu") + ['TESTBUILDFAILEXC_P1.f19_g16.A.melvin_gnu', 'TESTBUILDFAIL_P1.f19_g16.A.melvin_gnu', 'TESTMEMLEAKFAIL_P1.f09_g16.X.melvin_gnu', 'TESTMEMLEAKPASS_P1.f09_g16.X.melvin_gnu', 'TESTRUNDIFF_P1.f19_g16.A.melvin_gnu', 'TESTRUNFAILEXC_P1.f19_g16.A.melvin_gnu', 'TESTRUNFAIL_P1.f19_g16.A.melvin_gnu', 'TESTRUNPASS_P1.f19_g16.A.melvin_gnu', 'TESTRUNPASS_P1.f45_g37.A.melvin_gnu', 'TESTRUNPASS_P1.ne30_g16.A.melvin_gnu', 'TESTRUNPASS_P2.ne30_g16.A.melvin_gnu', 'TESTRUNPASS_P4.f45_g37.A.melvin_gnu', 'TESTRUNSTARCFAIL_P1.f19_g16.A.melvin_gnu', 'TESTTESTDIFF_P1.f19_g16.A.melvin_gnu'] + """ + expect(machine is not None, "Must define a machine") + expect(compiler is not None, "Must define a compiler") + e3sm_test_suites = get_test_suites() + + tests_to_run = set() + negations = set() + + for testarg in testargs: + # remove any whitespace in name + testarg = testarg.strip() + if testarg.startswith("^"): + negations.add(testarg[1:]) + elif testarg in e3sm_test_suites: + tests_to_run.update(get_test_suite(testarg, machine, compiler)) + else: + try: + tests_to_run.add( + CIME.utils.get_full_test_name( + testarg, machine=machine, compiler=compiler + ) + ) + except Exception: + if "." not in testarg: + expect(False, "Unrecognized test suite '{}'".format(testarg)) + else: + raise + + for negation in negations: + if negation in e3sm_test_suites: + tests_to_run -= set(get_test_suite(negation, machine, compiler)) + else: + fullname = CIME.utils.get_full_test_name( + negation, machine=machine, compiler=compiler + ) + if fullname in tests_to_run: + tests_to_run.remove(fullname) + + return list(sorted(tests_to_run)) + + +############################################################################### +def get_recommended_test_time(test_full_name): + ############################################################################### + """ + >>> get_recommended_test_time("ERS.f19_g16.A.melvin_gnu") + '0:10:00' + + >>> get_recommended_test_time("TESTRUNPASS_P69.f19_g16.A.melvin_gnu.testmod") + '0:13:00' + + >>> get_recommended_test_time("PET_Ln20.ne30_ne30.FC5.sandiatoss3_intel.cam-outfrq9s") + >>> + """ + best_time = None + suites = get_test_suites() + for suite in suites: + rec_time = get_test_data(suite)[1] + if ( + suite_has_test(suite, test_full_name, skip_inherit=True) + and rec_time is not None + and ( + best_time is None + or convert_to_seconds(rec_time) < convert_to_seconds(best_time) + ) + ): + best_time = rec_time + + return best_time + + +############################################################################### +def key_test_time(test_full_name): + ############################################################################### + result = get_recommended_test_time(test_full_name) + return 99999999 if result is None else convert_to_seconds(result) diff --git a/CIME/get_timing.py b/CIME/get_timing.py new file mode 100644 index 00000000000..a5526ccec8b --- /dev/null +++ b/CIME/get_timing.py @@ -0,0 +1,939 @@ +#!/usr/bin/env python3 + +""" +Library for implementing getTiming tool which gets timing +information from a run. +""" + +from CIME.XML.standard_module_setup import * +from CIME.utils import safe_copy +from CIME.status import append_case_status + +import datetime, re + +logger = logging.getLogger(__name__) + + +class _GetTimingInfo: + def __init__(self, name): + self.name = name + self.tmin = 0 + self.tmax = 0 + self.adays = 0 + + +class _TimingParser: + def __init__(self, case, lid="999999-999999"): + self.case = case + self.caseroot = case.get_value("CASEROOT") + self.lid = lid + self.finlines = None + self.fout = None + self.adays = 0 + self._driver = case.get_value("COMP_INTERFACE") + self.models = {} + self.ncount = 0 + self.nprocs = 0 + self.version = -1 + + def write(self, text): + self.fout.write(text) + + def prttime(self, label, offset=None, div=None, coff=-999): + if offset is None: + offset = self.models["CPL"].offset + if div is None: + div = self.adays + datalen = 20 + cstr = "<---->" + clen = len(cstr) + + minval, maxval, found = self.gettime(label) + if div >= 1.0: + mind = minval / div + maxd = maxval / div + else: + mind = minval + maxd = maxval + + pstrlen = 25 + if mind >= 0 and maxd >= 0 and found: + if coff >= 0: + zoff = pstrlen + coff + int((datalen - clen) / 2) + csp = offset - coff - int((datalen - clen) / 2) + self.write( + " {label:<{width1}}{cstr:<{width2}} {minv:8.3f}:{maxv:8.3f} \n".format( + label=label, + width1=zoff, + cstr=cstr, + width2=csp, + minv=mind, + maxv=maxd, + ) + ) + else: + zoff = pstrlen + offset + self.write( + " {label:<{width1}} {minv:8.3f}:{maxv:8.3f} \n".format( + label=label, width1=zoff, minv=mind, maxv=maxd + ) + ) + + def gettime2(self, heading_padded): + if self._driver == "mct" or self._driver == "moab": + return self._gettime2_mct(heading_padded) + elif self._driver == "nuopc": + if self.version < 0: + self._get_esmf_profile_version() + return self._gettime2_nuopc() + + def _gettime2_mct(self, heading_padded): + nprocs = 0 + ncount = 0 + + heading = '"' + heading_padded.strip() + '"' + for line in self.finlines: + m = re.match(r"\s*{}\s+\S\s+(\d+)\s*\d+\s*(\S+)".format(heading), line) + if m: + nprocs = int(float(m.groups()[0])) + ncount = int(float(m.groups()[1])) + return (nprocs, ncount) + else: + m = re.match(r"\s*{}\s+\S\s+(\d+)\s".format(heading), line) + if m: + nprocs = 1 + ncount = int(float(m.groups()[0])) + return (nprocs, ncount) + return (0, 0) + + def _gettime2_nuopc(self): + self.nprocs = 0 + self.ncount = 0 + if self.version < 0: + self._get_esmf_profile_version() + if self.version == 0: + expression = re.compile(r"\s*\[ATM]\s*RunPhase1\s+(\d+)\s+(\d+)") + else: + expression = re.compile(r"\s*\[ATM]\s*RunPhase1\s+\d+\s+(\d+)\s+(\d+)") + + for line in self.finlines: + match = expression.match(line) + if match: + self.nprocs = int(match.group(1)) + self.ncount = int(match.group(2)) + return (self.nprocs, self.ncount) + + return (0, 0) + + def gettime(self, heading_padded): + if self._driver == "mct" or self._driver == "moab": + return self._gettime_mct(heading_padded) + elif self._driver == "nuopc": + if self.version < 0: + self._get_esmf_profile_version() + return self._gettime_nuopc(heading_padded) + + def _gettime_mct(self, heading_padded): + found = False + heading = '"' + heading_padded.strip() + '"' + minval = 0 + maxval = 0 + for line in self.finlines: + m = re.match( + r"\s*{}\s+\S\s+\d+\s*\d+\s*\S+\s*\S+\s*(\d*\.\d+)\s*\(.*\)\s*(\d*\.\d+)\s*\(.*\)".format( + heading + ), + line, + ) + if m: + maxval = float(m.groups()[0]) + minval = float(m.groups()[1]) + found = True + return (minval, maxval, found) + return (0, 0, False) + + def _get_esmf_profile_version(self): + """ + Prior to ESMF8_3_0_beta_snapshot_04 the PEs column was not in ESMF_Profile.summary + this routine looks for that in the header field to determine if this file was produced + by a newer (version 1) or older (version 0) ESMF library. + """ + expect(self.finlines, " No ESMF_Profile.summary file found") + for line in self.finlines: + if line.startswith("Region"): + if "PEs" in line: + self.version = 1 + else: + self.version = 0 + + def _gettime_nuopc(self, heading, instance="0001"): + if instance == "": + instance = "0001" + minval = 0 + maxval = 0 + m = None + timeline = [] + # PETs Count Mean (s) Min (s) Min PET Max (s) Max PET + timeline.append( + re.compile( + r"\s*{}\s+\d+\s+\d+\s+(\d*\.\d+)\s+(\d*\.\d+)\s+\d+\s+(\d*\.\d+)\s+\d+".format( + re.escape(heading) + ) + ) + ) + # PETs PEs Count Mean (s) Min (s) Min PET Max (s) Max PET + timeline.append( + re.compile( + r"\s*{}\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+(\d*\.\d+)\s+\d+\s+(\d*\.\d+)\s+\d+".format( + re.escape(heading) + ) + ) + ) + phase = None + for line in self.finlines: + phase = self._get_nuopc_phase(line, instance, phase) + if phase != "run" and not "[ensemble]" in heading: + continue + if heading in line: + m = timeline[self.version].match(line) + if m: + minval = float(m.group(2)) + maxval = float(m.group(3)) + return (minval, maxval, True) + else: + expect(False, "Parsing error in ESMF_Profile.summary file") + + return (0, 0, False) + + @staticmethod + def _get_nuopc_phase(line, instance, phase): + if "[ensemble] Init 1" in line: + phase = "init" + elif "[ESM" + instance + "] RunPhase1" in line: + phase = "run" + elif "[ESM" + instance + "] Finalize" in line: + phase = "finalize" + elif "[ESM" in line and "RunPhase1" in line: + phase = "other" + return phase + + def getMEDtime(self, instance): + if instance == "": + instance = "0001" + + med_phase_line = [] + med_connector_line = [] + med_fraction_line = [] + med_phase_line.append( + re.compile(r"\s*(\[MED\] med_phases\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + med_connector_line.append( + re.compile(r"\s*(\[MED\] med_connectors\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + med_fraction_line.append( + re.compile(r"\s*(\[MED\] med_fraction\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + med_phase_line.append( + re.compile(r"\s*(\[MED\] med_phases\S+)\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + med_connector_line.append( + re.compile( + r"\s*(\[MED\] med_connectors\S+)\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+" + ) + ) + med_fraction_line.append( + re.compile( + r"\s*(\[MED\] med_fraction\S+)\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+" + ) + ) + + m = None + minval = 0 + maxval = 0 + phase = None + for line in self.finlines: + phase = self._get_nuopc_phase(line, instance, phase) + if phase != "run": + continue + m = med_phase_line[self.version].match(line) + if not m: + m = med_connector_line[self.version].match(line) + if not m: + m = med_fraction_line[self.version].match(line) + if m: + minval += float(m.group(2)) + maxval += float(m.group(2)) + + return (minval, maxval) + + def getCOMMtime(self, instance): + if instance == "": + instance = "0001" + comm_line = [] + comm_line.append( + re.compile(r"\s*(\[\S+-TO-\S+\] RunPhase1)\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + comm_line.append( + re.compile( + r"\s*(\[\S+-TO-\S+\] RunPhase1)\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+" + ) + ) + m = None + maxval = 0 + phase = None + for line in self.finlines: + phase = self._get_nuopc_phase(line, instance, phase) + if phase != "run": + continue + m = comm_line[self.version].match(line) + if m: + heading = m.group(1) + maxv = float(m.group(2)) + maxval += maxv + logger.debug("{} time={} sum={}".format(heading, maxv, maxval)) + return maxval + + def getTiming(self): + ninst = 1 + multi_driver = self.case.get_value("MULTI_DRIVER") + if multi_driver: + ninst = self.case.get_value("NINST_MAX") + + if ninst > 1: + for inst in range(ninst): + self._getTiming(inst + 1) + else: + self._getTiming() + + def _getTiming(self, inst=0): + components = self.case.get_values("COMP_CLASSES") + for s in components: + self.models[s] = _GetTimingInfo(s) + atm = None + lnd = None + rof = None + ice = None + ocn = None + glc = None + cpl = None + if "ATM" in self.models: + atm = self.models["ATM"] + if "LND" in self.models: + lnd = self.models["LND"] + if "ROF" in self.models: + rof = self.models["ROF"] + if "ICE" in self.models: + ice = self.models["ICE"] + if "OCN" in self.models: + ocn = self.models["OCN"] + if "GLC" in self.models: + glc = self.models["GLC"] + if "CPL" in self.models: + cpl = self.models["CPL"] + + cime_model = self.case.get_value("MODEL") + caseid = self.case.get_value("CASE") + mach = self.case.get_value("MACH") + user = self.case.get_value("USER") + continue_run = self.case.get_value("CONTINUE_RUN") + rundir = self.case.get_value("RUNDIR") + run_type = self.case.get_value("RUN_TYPE") + ncpl_base_period = self.case.get_value("NCPL_BASE_PERIOD") + ncpl = 0 + ocn_ncpl = None + for compclass in self.case.get_values("COMP_CLASSES"): + comp_ncpl = self.case.get_value("{}_NCPL".format(compclass)) + if compclass == "OCN": + ocn_ncpl = comp_ncpl + if comp_ncpl is not None: + ncpl = max(ncpl, comp_ncpl) + + compset = self.case.get_value("COMPSET") + if compset is None: + compset = "" + grid = self.case.get_value("GRID") + run_type = self.case.get_value("RUN_TYPE") + stop_option = self.case.get_value("STOP_OPTION") + stop_n = self.case.get_value("STOP_N") + + cost_pes = self.case.get_value("COST_PES") + costpes_per_node = self.case.get_value("COSTPES_PER_NODE") + + totalpes = self.case.get_value("TOTALPES") + max_mpitasks_per_node = self.case.get_value("MAX_MPITASKS_PER_NODE") + smt_factor = max( + 1, int(self.case.get_value("MAX_TASKS_PER_NODE") / max_mpitasks_per_node) + ) + + if cost_pes > 0: + pecost = cost_pes + elif costpes_per_node: + pecost = self.case.num_nodes * costpes_per_node + else: + pecost = totalpes + + for m in self.models.values(): + for key in ["NTASKS", "ROOTPE", "PSTRID", "NTHRDS", "NINST"]: + if key == "NINST" and m.name == "CPL": + m.ninst = 1 + else: + setattr( + m, + key.lower(), + int(self.case.get_value("{}_{}".format(key, m.name))), + ) + + m.comp = self.case.get_value("COMP_{}".format(m.name)) + m.pemax = m.rootpe + m.ntasks * m.pstrid - 1 + + now = datetime.datetime.ctime(datetime.datetime.now()) + inittype = "FALSE" + if (run_type == "startup" or run_type == "hybrid") and not continue_run: + inittype = "TRUE" + + if inst > 0: + inst_label = "_{:04d}".format(inst) + else: + inst_label = "" + if self._driver == "mct" or self._driver == "moab": + binfilename = os.path.join( + rundir, "timing", "model_timing{}_stats".format(inst_label) + ) + finfilename = os.path.join( + self.caseroot, + "timing", + "{}_timing{}_stats.{}".format(cime_model, inst_label, self.lid), + ) + elif self._driver == "nuopc": + binfilename = os.path.join(rundir, "ESMF_Profile.summary") + finfilename = os.path.join( + self.caseroot, + "timing", + "{}.ESMF_Profile.summary.{}".format(cime_model, self.lid), + ) + + foutfilename = os.path.join( + self.caseroot, + "timing", + "{}_timing{}.{}.{}".format(cime_model, inst_label, caseid, self.lid), + ) + + timingDir = os.path.join(self.caseroot, "timing") + if not os.path.isfile(binfilename): + logger.warning("No timing file found in run directory") + return + + if not os.path.isdir(timingDir): + os.makedirs(timingDir) + + safe_copy(binfilename, finfilename) + + os.chdir(self.caseroot) + try: + fin = open(finfilename, "r") + self.finlines = fin.readlines() + fin.close() + except Exception as e: + logger.critical("Unable to open file {}".format(finfilename)) + raise e + + tlen = 1.0 + if ncpl_base_period == "decade": + tlen = 3650.0 + elif ncpl_base_period == "year": + tlen = 365.0 + elif ncpl_base_period == "day": + tlen = 1.0 + elif ncpl_base_period == "hour": + tlen = 1.0 / 24.0 + else: + logger.warning("Unknown NCPL_BASE_PERIOD={}".format(ncpl_base_period)) + + # at this point the routine becomes driver specific + if self._driver == "mct" or self._driver == "moab": + nprocs, ncount = self.gettime2("CPL:CLOCK_ADVANCE ") + nsteps = ncount / nprocs + elif self._driver == "nuopc": + nprocs, nsteps = self.gettime2("") + adays = nsteps * tlen / ncpl + odays = nsteps * tlen / ncpl + if ocn_ncpl and inittype == "TRUE": + odays = odays - (tlen / ocn_ncpl) + + peminmax = max([m.rootpe for m in self.models.values()]) + 1 + if ncpl_base_period in ["decade", "year", "day"] and int(adays) > 0: + adays = int(adays) + if tlen % ocn_ncpl == 0: + odays = int(odays) + self.adays = adays + maxoffset = 40 + extraoff = 20 + for m in self.models.values(): + m.offset = int((maxoffset * m.rootpe) / peminmax) + extraoff + if cpl: + cpl.offset = 0 + try: + self.fout = open(foutfilename, "w") + except Exception as e: + logger.critical("Could not open file for writing: {}".format(foutfilename)) + raise e + + self.write("---------------- TIMING PROFILE ---------------------\n") + + self.write(" Case : {}\n".format(caseid)) + self.write(" LID : {}\n".format(self.lid)) + self.write(" Machine : {}\n".format(mach)) + self.write(" Caseroot : {}\n".format(self.caseroot)) + self.write(" Timeroot : {}/Tools\n".format(self.caseroot)) + self.write(" User : {}\n".format(user)) + self.write(" Curr Date : {}\n".format(now)) + if self._driver == "nuopc": + self.write(" Driver : CMEPS\n") + elif self._driver == "mct" or self._driver == "moab": + self.write(" Driver : CPL7\n") + + self.write(" grid : {}\n".format(grid)) + self.write(" compset : {}\n".format(compset)) + self.write( + " run type : {}, continue_run = {} (inittype = {})\n".format( + run_type, str(continue_run).upper(), inittype + ) + ) + self.write(" stop option : {}, stop_n = {}\n".format(stop_option, stop_n)) + self.write(" run length : {} days ({} for ocean)\n\n".format(adays, odays)) + + self.write( + " component comp_pes root_pe tasks " + "x threads" + " instances (stride) \n" + ) + self.write( + " --------- ------ ------- ------ " + "------ --------- ------ \n" + ) + maxthrds = 0 + xmax = 0 + for k in self.case.get_values("COMP_CLASSES"): + m = self.models[k] + if m.comp == "cpl": + comp_label = m.comp + inst_label + else: + comp_label = m.comp + self.write( + " {} = {:<8s} {:<6d} {:<6d} {:<6d} x {:<6d} {:<6d} ({:<6d}) \n".format( + m.name.lower(), + comp_label, + (m.ntasks * m.nthrds), + m.rootpe, + m.ntasks, + m.nthrds, + m.ninst, + m.pstrid, + ) + ) + if m.nthrds > maxthrds: + maxthrds = m.nthrds + if self._driver == "nuopc": + for k in components: + m = self.models[k] + if k != "CPL": + m.tmin, m.tmax, _ = self._gettime_nuopc( + " [{}] RunPhase1 ".format(m.name), inst_label[1:] + ) + else: + m.tmin, m.tmax = self.getMEDtime(inst_label[1:]) + nmax = self.gettime("[ensemble] Init 1")[1] + tmax = self.gettime("[ensemble] RunPhase1")[1] + fmax = self.gettime("[ensemble] FinalizePhase1")[1] + xmax = self.getCOMMtime(inst_label[1:]) + + if self._driver == "mct" or self._driver == "moab": + for k in components: + if k != "CPL": + m = self.models[k] + m.tmin, m.tmax, _ = self.gettime(" CPL:{}_RUN ".format(m.name)) + nmax = self.gettime(" CPL:INIT ")[1] + tmax = self.gettime(" CPL:RUN_LOOP ")[1] + wtmin = self.gettime(" CPL:TPROF_WRITE ")[0] + fmax = self.gettime(" CPL:FINAL ")[1] + otmin, otmax, _ = self.gettime(" CPL:OCNT_RUN ") + + # pick OCNT_RUN for tight coupling + if otmax > ocn.tmax: + ocn.tmin = otmin + ocn.tmax = otmax + + cpl.tmin, cpl.tmax, _ = self.gettime(" CPL:RUN ") + xmax = self.gettime(" CPL:COMM ")[1] + ocnwaittime = self.gettime(" CPL:C2O_INITWAIT")[0] + + if odays != 0: + ocnrunitime = ocn.tmax * (adays / odays - 1.0) + else: + ocnrunitime = 0.0 + + correction = max(0, ocnrunitime - ocnwaittime) + + tmax = tmax + wtmin + correction + ocn.tmax += ocnrunitime + + for m in self.models.values(): + m.tmaxr = 0 + if m.tmax > 0: + m.tmaxr = adays * 86400.0 / (m.tmax * 365.0) + xmaxr = 0 + if xmax > 0: + xmaxr = adays * 86400.0 / (xmax * 365.0) + tmaxr = 0 + if tmax > 0: + tmaxr = adays * 86400.0 / (tmax * 365.0) + + self.write("\n") + self.write(" total pes active : {} \n".format(totalpes * smt_factor)) + self.write(" mpi tasks per node : {} \n".format(max_mpitasks_per_node)) + self.write(" pe count for cost estimate : {} \n".format(pecost)) + self.write("\n") + + self.write(" Overall Metrics: \n") + if adays > 0: + self.write( + " Model Cost: {:10.2f} pe-hrs/simulated_year \n".format( + (tmax * 365.0 * pecost) / (3600.0 * adays) + ) + ) + if tmax > 0: + self.write( + " Model Throughput: {:10.2f} simulated_years/day \n".format( + (86400.0 * adays) / (tmax * 365.0) + ) + ) + + self.write("\n") + + self.write(" Init Time : {:10.3f} seconds \n".format(nmax)) + if adays > 0: + self.write( + " Run Time : {:10.3f} seconds {:10.3f} seconds/day \n".format( + tmax, tmax / adays + ) + ) + self.write(" Final Time : {:10.3f} seconds \n".format(fmax)) + + self.write("\n") + if self._driver == "mct" or self._driver == "moab": + self.write( + " Actual Ocn Init Wait Time : {:10.3f} seconds \n".format( + ocnwaittime + ) + ) + self.write( + " Estimated Ocn Init Run Time : {:10.3f} seconds \n".format( + ocnrunitime + ) + ) + self.write( + " Estimated Run Time Correction : {:10.3f} seconds \n".format( + correction + ) + ) + self.write( + " (This correction has been applied to the ocean and" + " total run times) \n" + ) + + self.write("\n") + self.write( + "Runs Time in total seconds, seconds/model-day, and" + " model-years/wall-day \n" + ) + self.write( + "CPL Run Time represents time in CPL pes alone, " + "not including time associated with data exchange " + "with other components \n" + ) + self.write("\n") + + if adays > 0: + self.write( + " TOT Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format( + tmax, tmax / adays, tmaxr + ) + ) + for k in self.case.get_values("COMP_CLASSES"): + m = self.models[k] + self.write( + " {} Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format( + k, m.tmax, m.tmax / adays, m.tmaxr + ) + ) + self.write( + " CPL COMM Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format( + xmax, xmax / adays, xmaxr + ) + ) + + pstrlen = 25 + hoffset = 1 + self.write(" NOTE: min:max driver timers (seconds/day): \n") + + for k in self.case.get_values("COMP_CLASSES"): + m = self.models[k] + xspace = (pstrlen + hoffset + m.offset) * " " + self.write( + " {} {} (pes {:d} to {:d}) \n".format(xspace, k, m.rootpe, m.pemax) + ) + self.write("\n") + + self.prttime(" CPL:CLOCK_ADVANCE ") + self.prttime(" CPL:OCNPRE1_BARRIER ") + self.prttime(" CPL:OCNPRE1 ") + self.prttime(" CPL:ATMOCN1_BARRIER ") + self.prttime(" CPL:ATMOCN1 ") + self.prttime(" CPL:OCNPREP_BARRIER ") + self.prttime(" CPL:OCNPREP ") + self.prttime( + " CPL:C2O_BARRIER ", offset=ocn.offset, div=odays, coff=cpl.offset + ) + self.prttime(" CPL:C2O ", offset=ocn.offset, div=odays, coff=cpl.offset) + self.prttime(" CPL:LNDPREP_BARRIER ") + self.prttime(" CPL:LNDPREP ") + self.prttime(" CPL:C2L_BARRIER ", offset=lnd.offset, coff=cpl.offset) + self.prttime(" CPL:C2L ", offset=lnd.offset, coff=cpl.offset) + self.prttime(" CPL:ICEPREP_BARRIER ") + self.prttime(" CPL:ICEPREP ") + self.prttime(" CPL:C2I_BARRIER ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:C2I ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:WAVPREP_BARRIER ") + self.prttime(" CPL:WAVPREP ") + self.prttime(" CPL:C2W_BARRIER ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:C2W ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:ROFPREP_BARRIER ") + self.prttime(" CPL:ROFPREP ") + self.prttime(" CPL:C2R_BARRIER ", offset=rof.offset, coff=cpl.offset) + self.prttime(" CPL:C2R ", offset=rof.offset, coff=cpl.offset) + self.prttime(" CPL:ICE_RUN_BARRIER ", offset=ice.offset) + self.prttime(" CPL:ICE_RUN ", offset=ice.offset) + self.prttime(" CPL:LND_RUN_BARRIER ", offset=lnd.offset) + self.prttime(" CPL:LND_RUN ", offset=lnd.offset) + self.prttime(" CPL:ROF_RUN_BARRIER ", offset=rof.offset) + self.prttime(" CPL:ROF_RUN ", offset=rof.offset) + self.prttime(" CPL:WAV_RUN_BARRIER ", offset=rof.offset) + self.prttime(" CPL:WAV_RUN ", offset=rof.offset) + self.prttime(" CPL:OCNT_RUN_BARRIER ", offset=ocn.offset, div=odays) + self.prttime(" CPL:OCNT_RUN ", offset=ocn.offset, div=odays) + self.prttime( + " CPL:O2CT_BARRIER ", offset=ocn.offset, div=odays, coff=cpl.offset + ) + self.prttime(" CPL:O2CT ", offset=ocn.offset, div=odays, coff=cpl.offset) + self.prttime(" CPL:OCNPOSTT_BARRIER ") + self.prttime(" CPL:OCNPOSTT ") + self.prttime(" CPL:ATMOCNP_BARRIER ") + self.prttime(" CPL:ATMOCNP ") + self.prttime(" CPL:L2C_BARRIER ", offset=lnd.offset, coff=cpl.offset) + self.prttime(" CPL:L2C ", offset=lnd.offset, div=cpl.offset) + self.prttime(" CPL:LNDPOST_BARRIER ") + self.prttime(" CPL:LNDPOST ") + self.prttime(" CPL:GLCPREP_BARRIER ") + self.prttime(" CPL:GLCPREP ") + self.prttime(" CPL:C2G_BARRIER ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:C2G ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:R2C_BARRIER ", offset=rof.offset, coff=cpl.offset) + self.prttime(" CPL:R2C ", offset=rof.offset, coff=cpl.offset) + self.prttime(" CPL:ROFPOST_BARRIER ") + self.prttime(" CPL:ROFPOST ") + self.prttime(" CPL:BUDGET1_BARRIER ") + self.prttime(" CPL:BUDGET1 ") + self.prttime(" CPL:I2C_BARRIER ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:I2C ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:ICEPOST_BARRIER ") + self.prttime(" CPL:ICEPOST ") + self.prttime(" CPL:FRACSET_BARRIER ") + self.prttime(" CPL:FRACSET ") + self.prttime(" CPL:ATMOCN2_BARRIER ") + self.prttime(" CPL:ATMOCN2 ") + self.prttime(" CPL:OCNPRE2_BARRIER ") + self.prttime(" CPL:OCNPRE2 ") + self.prttime( + " CPL:C2O2_BARRIER ", offset=ocn.offset, div=odays, coff=cpl.offset + ) + self.prttime(" CPL:C2O2 ", offset=ocn.offset, div=odays, coff=cpl.offset) + self.prttime(" CPL:ATMOCNQ_BARRIER") + self.prttime(" CPL:ATMOCNQ ") + self.prttime(" CPL:ATMPREP_BARRIER ") + self.prttime(" CPL:ATMPREP ") + self.prttime(" CPL:C2A_BARRIER ", offset=atm.offset, coff=cpl.offset) + self.prttime(" CPL:C2A ", offset=atm.offset, coff=cpl.offset) + self.prttime(" CPL:OCN_RUN_BARRIER ", offset=ocn.offset, div=odays) + self.prttime(" CPL:OCN_RUN ", offset=ocn.offset, div=odays) + self.prttime(" CPL:ATM_RUN_BARRIER ", offset=atm.offset) + self.prttime(" CPL:ATM_RUN ", offset=atm.offset) + self.prttime(" CPL:GLC_RUN_BARRIER ", offset=glc.offset) + self.prttime(" CPL:GLC_RUN ", offset=glc.offset) + self.prttime(" CPL:W2C_BARRIER ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:W2C ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:WAVPOST_BARRIER ") + self.prttime(" CPL:WAVPOST ", cpl.offset) + self.prttime(" CPL:G2C_BARRIER ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:G2C ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:GLCPOST_BARRIER ") + self.prttime(" CPL:GLCPOST ") + self.prttime(" CPL:A2C_BARRIER ", offset=atm.offset, coff=cpl.offset) + self.prttime(" CPL:A2C ", offset=atm.offset, coff=cpl.offset) + self.prttime(" CPL:ATMPOST_BARRIER ") + self.prttime(" CPL:ATMPOST ") + self.prttime(" CPL:BUDGET2_BARRIER ") + self.prttime(" CPL:BUDGET2 ") + self.prttime(" CPL:BUDGET3_BARRIER ") + self.prttime(" CPL:BUDGET3 ") + self.prttime(" CPL:BUDGETF_BARRIER ") + self.prttime(" CPL:BUDGETF ") + self.prttime( + " CPL:O2C_BARRIER ", offset=ocn.offset, div=odays, coff=cpl.offset + ) + self.prttime(" CPL:O2C ", offset=ocn.offset, div=odays, coff=cpl.offset) + self.prttime(" CPL:OCNPOST_BARRIER ") + self.prttime(" CPL:OCNPOST ") + self.prttime(" CPL:RESTART_BARRIER ") + self.prttime(" CPL:RESTART") + self.prttime(" CPL:HISTORY_BARRIER ") + self.prttime(" CPL:HISTORY ") + self.prttime(" CPL:TSTAMP_WRITE ") + self.prttime(" CPL:TPROF_WRITE ") + self.prttime(" CPL:RUN_LOOP_BSTOP ") + + self.write("\n\n") + self.write("More info on coupler timing:\n") + + self.write("\n") + self.prttime(" CPL:OCNPRE1 ") + self.prttime(" CPL:ocnpre1_atm2ocn ") + + self.write("\n") + self.prttime(" CPL:OCNPREP ") + self.prttime(" CPL:OCNPRE2 ") + self.prttime(" CPL:ocnprep_avg ") + self.prttime(" CPL:ocnprep_diagav ") + + self.write("\n") + self.prttime(" CPL:LNDPREP ") + self.prttime(" CPL:lndprep_atm2lnd ") + self.prttime(" CPL:lndprep_mrgx2l ") + self.prttime(" CPL:lndprep_diagav ") + + self.write("\n") + self.prttime(" CPL:ICEPREP ") + self.prttime(" CPL:iceprep_ocn2ice ") + self.prttime(" CPL:iceprep_atm2ice ") + self.prttime(" CPL:iceprep_mrgx2i ") + self.prttime(" CPL:iceprep_diagav ") + + self.write("\n") + self.prttime(" CPL:WAVPREP ") + self.prttime(" CPL:wavprep_atm2wav ") + self.prttime(" CPL:wavprep_ocn2wav ") + self.prttime(" CPL:wavprep_ice2wav ") + self.prttime(" CPL:wavprep_mrgx2w ") + self.prttime(" CPL:wavprep_diagav ") + + self.write("\n") + self.prttime(" CPL:ROFPREP ") + self.prttime(" CPL:rofprep_l2xavg ") + self.prttime(" CPL:rofprep_lnd2rof ") + self.prttime(" CPL:rofprep_mrgx2r ") + self.prttime(" CPL:rofprep_diagav ") + + self.write("\n") + self.prttime(" CPL:GLCPREP ") + self.prttime(" CPL:glcprep_avg ") + self.prttime(" CPL:glcprep_lnd2glc ") + self.prttime(" CPL:glcprep_mrgx2g ") + self.prttime(" CPL:glcprep_diagav ") + + self.write("\n") + self.prttime(" CPL:ATMPREP ") + self.prttime(" CPL:atmprep_xao2atm ") + self.prttime(" CPL:atmprep_ocn2atm ") + self.prttime(" CPL:atmprep_alb2atm ") + self.prttime(" CPL:atmprep_ice2atm ") + self.prttime(" CPL:atmprep_lnd2atm ") + self.prttime(" CPL:atmprep_mrgx2a ") + self.prttime(" CPL:atmprep_diagav ") + + self.write("\n") + self.prttime(" CPL:ATMOCNP ") + self.prttime(" CPL:ATMOCN1 ") + self.prttime(" CPL:ATMOCN2 ") + self.prttime(" CPL:atmocnp_ice2ocn ") + self.prttime(" CPL:atmocnp_wav2ocn ") + self.prttime(" CPL:atmocnp_fluxo ") + self.prttime(" CPL:atmocnp_fluxe ") + self.prttime(" CPL:atmocnp_mrgx2o ") + self.prttime(" CPL:atmocnp_accum ") + self.prttime(" CPL:atmocnp_ocnalb ") + + self.write("\n") + self.prttime(" CPL:ATMOCNQ ") + self.prttime(" CPL:atmocnq_ocn2atm ") + self.prttime(" CPL:atmocnq_fluxa ") + self.prttime(" CPL:atmocnq_atm2ocnf ") + + self.write("\n") + self.prttime(" CPL:OCNPOSTT ") + self.prttime(" CPL:OCNPOST ") + self.prttime(" CPL:ocnpost_diagav ") + + self.write("\n") + self.prttime(" CPL:LNDPOST ") + self.prttime(" CPL:lndpost_diagav ") + self.prttime(" CPL:lndpost_acc2lr ") + self.prttime(" CPL:lndpost_acc2lg ") + + self.write("\n") + self.prttime(" CPL:ROFOST ") + self.prttime(" CPL:rofpost_diagav ") + self.prttime(" CPL:rofpost_histaux ") + self.prttime(" CPL:rofpost_rof2lnd ") + self.prttime(" CPL:rofpost_rof2ice ") + self.prttime(" CPL:rofpost_rof2ocn ") + + self.write("\n") + self.prttime(" CPL:ICEPOST ") + self.prttime(" CPL:icepost_diagav ") + + self.write("\n") + self.prttime(" CPL:WAVPOST ") + self.prttime(" CPL:wavpost_diagav ") + + self.write("\n") + self.prttime(" CPL:GLCPOST ") + self.prttime(" CPL:glcpost_diagav ") + self.prttime(" CPL:glcpost_glc2lnd ") + self.prttime(" CPL:glcpost_glc2ice ") + self.prttime(" CPL:glcpost_glc2ocn ") + + self.write("\n") + self.prttime(" CPL:ATMPOST ") + self.prttime(" CPL:atmpost_diagav ") + + self.write("\n") + self.prttime(" CPL:BUDGET ") + self.prttime(" CPL:BUDGET1 ") + self.prttime(" CPL:BUDGET2 ") + self.prttime(" CPL:BUDGET3 ") + self.prttime(" CPL:BUDGETF ") + self.write("\n\n") + + self.fout.close() + + +def get_timing(case, lid): + parser = _TimingParser(case, lid) + parser.getTiming() + if case._gitinterface: + case._gitinterface._git_command("add", "*." + lid) + append_case_status( + "", + "", + msg="Timing files created for run {}".format(lid), + gitinterface=case._gitinterface, + ) diff --git a/CIME/gitinterface.py b/CIME/gitinterface.py new file mode 100644 index 00000000000..790f1fc70f2 --- /dev/null +++ b/CIME/gitinterface.py @@ -0,0 +1,127 @@ +import sys, shutil, re +from CIME.utils import run_cmd_no_fail +from pathlib import Path + + +class GitInterface: + def __init__(self, repo_path, logger, branch=None): + major = 0 + minor = 0 + self.logger = logger + self._defined = False + if shutil.which("git"): + version = run_cmd_no_fail("git --version") + result = re.findall(r"([0-9]+)\.([0-9]+)\.?[0-9]*", version) + major = int(result[0][0]) + minor = int(result[0][1]) + if major < 2 or (major == 2 and minor < 28): + logger.warning( + "Git not found or git version too old for cesm git interface {} {}".format( + major, minor + ) + ) + return + + logger.debug("Initialize GitInterface for {}".format(repo_path)) + self._defined = True + if isinstance(repo_path, str): + self.repo_path = Path(repo_path).resolve() + elif isinstance(repo_path, Path): + self.repo_path = repo_path.resolve() + else: + raise TypeError("repo_path must be a str or Path object") + try: + import git + + self._use_module = True + try: + self.repo = git.Repo(str(self.repo_path)) # Initialize GitPython repo + except git.exc.InvalidGitRepositoryError: + self.git = git + self._init_git_repo(branch=branch) + msg = "Using GitPython interface to git" + except ImportError: + self._use_module = False + if not (self.repo_path / ".git").exists(): + self._init_git_repo(branch=branch) + msg = "Using shell interface to git" + logger.debug(msg) + + def _git_command(self, operation, *args): + if not self._defined: + return + self.logger.debug(operation) + if self._use_module and operation != "submodule": + try: + return getattr(self.repo.git, operation)(*args) + except Exception as e: + sys.exit(e) + else: + return ["git", "-C", str(self.repo_path), operation] + list(args) + + def _init_git_repo(self, branch=None): + if not self._defined: + return + if self._use_module: + self.repo = self.git.Repo.init(str(self.repo_path)) + if branch: + self.git_operation("checkout", "-b", branch) + else: + command = ["git", "-C", str(self.repo_path), "init"] + if branch: + command.extend(["-b", branch]) + run_cmd_no_fail(" ".join(command)) + + # pylint: disable=unused-argument + def git_operation(self, operation, *args, **kwargs): + if not self._defined: + return + command = self._git_command(operation, *args) + if isinstance(command, list): + try: + return run_cmd_no_fail(" ".join(command)) + except Exception as e: + sys.exit(e) + else: + return command + + def config_get_value(self, section, name): + if not self._defined: + return + if self._use_module: + config = self.repo.config_reader() + try: + val = config.get_value(section, name) + except: + val = None + return val + else: + cmd = ( + "git", + "-C", + str(self.repo_path), + "config", + "--get", + f"{section}.{name}", + ) + output = run_cmd_no_fail(cmd) + return output.strip() + + def config_set_value(self, section, name, value): + if not self._defined: + return + if self._use_module: + with self.repo.config_writer() as writer: + writer.set_value(section, name, value) + writer.release() # Ensure changes are saved + else: + cmd = ( + "git", + "-C", + str(self.repo_path), + "config", + f"{section}.{name}", + value, + ) + self.logger.debug(cmd) + run_cmd_no_fail(cmd) diff --git a/CIME/hist_utils.py b/CIME/hist_utils.py new file mode 100644 index 00000000000..30ff1920a66 --- /dev/null +++ b/CIME/hist_utils.py @@ -0,0 +1,810 @@ +""" +Functions for actions pertaining to history files. +""" +import logging +import os +import re +import filecmp + +from CIME.XML.standard_module_setup import * +from CIME.config import Config +from CIME.test_status import TEST_NO_BASELINES_COMMENT, TEST_STATUS_FILENAME +from CIME.utils import ( + get_current_commit, + get_timestamp, + safe_copy, + SharedArea, + parse_test_name, +) +from CIME.utils import CIMEError + +logger = logging.getLogger(__name__) + +BLESS_LOG_NAME = "bless_log" + +# ------------------------------------------------------------------------ +# Strings used in the comments generated by cprnc +# ------------------------------------------------------------------------ + +CPRNC_FIELDLISTS_DIFFER = "files differ only in their field lists" + +# ------------------------------------------------------------------------ +# Strings used in the comments generated by _compare_hists +# ------------------------------------------------------------------------ + +NO_COMPARE = "had no compare counterpart" +NO_ORIGINAL = "had no original counterpart" +FIELDLISTS_DIFFER = "had a different field list from" +DIFF_COMMENT = "did NOT match" +FAILED_OPEN = "Failed to open file" +IDENTICAL = "the two files seem to be IDENTICAL" +# COMPARISON_COMMENT_OPTIONS should include all of the above: these are any of the special +# comment strings that describe the reason for a comparison failure +COMPARISON_COMMENT_OPTIONS = set( + [NO_COMPARE, NO_ORIGINAL, FIELDLISTS_DIFFER, DIFF_COMMENT] +) +# Comments that indicate a true baseline comparison failure +COMPARISON_FAILURE_COMMENT_OPTIONS = COMPARISON_COMMENT_OPTIONS - set( + [NO_COMPARE, FIELDLISTS_DIFFER] +) + +NO_HIST_TESTS = ["IRT", "PFS", "TSC"] +ALL_HIST_TESTS = ["MVK", "MVKO", "PGN", "TSC"] + + +def _iter_model_file_substrs(case): + models = case.get_compset_components() + models.append("cpl") + for model in models: + yield model + + +def copy_histfiles(case, suffix, match_suffix=None): + """Copy the most recent batch of hist files in a case, adding the given suffix. + + This can allow you to temporarily "save" these files so they won't be blown + away if you re-run the case. + + case - The case containing the files you want to save + suffix - The string suffix you want to add to saved files, this can be used to find them later. + + returns (comments, num_copied) + """ + rundir = case.get_value("RUNDIR") + ref_case = case.get_value("RUN_REFCASE") + casename = case.get_value("CASE") + # Loop over models + archive = case.get_env("archive") + comments = "Copying hist files to suffix '{}'\n".format(suffix) + num_copied = 0 + for model in _iter_model_file_substrs(case): + if case.get_value("TEST") and archive.exclude_testing(model): + logger.info( + "Case is a test and component %r is excluded from comparison", model + ) + + continue + test_hists = archive.get_latest_hist_files( + casename, model, rundir, suffix=match_suffix, ref_case=ref_case + ) + num_copied += len(test_hists) + for test_hist in test_hists: + test_hist = os.path.join(rundir, test_hist) + if not test_hist.endswith(".nc") or "once" in os.path.basename(test_hist): + logger.info("Will not compare non-netcdf file {}".format(test_hist)) + continue + if model == "mom": + if "ocean_geometry" in test_hist: + comments += " skipping '{}'\n".format(test_hist) + continue + if "mom6.ic" in test_hist: + comments += " skipping '{}'\n".format(test_hist) + continue + comments += " Copying hist files for model '{}'\n".format(model) + new_file = "{}.{}".format(test_hist, suffix) + if os.path.exists(new_file): + os.remove(new_file) + + comments += " Copying '{}' to '{}'\n".format(test_hist, new_file) + + # Need to copy rather than move in case there are some history files + # that will need to continue to be filled on the next phase; this + # can be the case for a restart run. + # + # (If it weren't for that possibility, a move/rename would be more + # robust here: The problem with a copy is that there can be + # confusion after the second run as to which files were created by + # the first run and which by the second. For example, if the second + # run fails to output any history files, the test will still pass, + # because the test system will think that run1's files were output + # by run2. But we live with that downside for the sake of the reason + # noted above.) + safe_copy(test_hist, new_file) + + expect( + num_copied > 0, + "copy_histfiles failed: no hist files found in rundir '{}'".format(rundir), + ) + + return comments, num_copied + + +def rename_all_hist_files(case, suffix): + """Renaming all hist files in a case, adding the given suffix. + + case - The case containing the files you want to save + suffix - The string suffix you want to add to saved files, this can be used to find them later. + """ + rundir = case.get_value("RUNDIR") + ref_case = case.get_value("RUN_REFCASE") + # Loop over models + archive = case.get_env("archive") + comments = "Renaming hist files by adding suffix '{}'\n".format(suffix) + num_renamed = 0 + for model in _iter_model_file_substrs(case): + comments += " Renaming hist files for model '{}'\n".format(model) + + if model == "cpl": + mname = "drv" + else: + mname = model + test_hists = archive.get_all_hist_files( + case.get_value("CASE"), mname, rundir, ref_case=ref_case + ) + num_renamed += len(test_hists) + for test_hist in test_hists: + test_hist = os.path.join(rundir, test_hist) + new_file = "{}.{}".format(test_hist, suffix) + if os.path.exists(new_file): + os.remove(new_file) + + comments += " Renaming '{}' to '{}'\n".format(test_hist, new_file) + + os.rename(test_hist, new_file) + + expect( + num_renamed > 0, + "renaming failed: no hist files found in rundir '{}'".format(rundir), + ) + + return comments + + +def _hists_match(model, hists1, hists2, suffix1="", suffix2=""): + """ + return (num in set 1 but not 2 , num in set 2 but not 1, matchups) + + >>> hists1 = ['FOO.G.cpl.h1.nc', 'FOO.G.cpl.h2.nc', 'FOO.G.cpl.h3.nc'] + >>> hists2 = ['cpl.h2.nc', 'cpl.h3.nc', 'cpl.h4.nc'] + >>> _hists_match('cpl', hists1, hists2) + (['FOO.G.cpl.h1.nc'], ['cpl.h4.nc'], [('FOO.G.cpl.h2.nc', 'cpl.h2.nc'), ('FOO.G.cpl.h3.nc', 'cpl.h3.nc')]) + >>> hists1 = ['FOO.G.cpl.h1.nc.SUF1', 'FOO.G.cpl.h2.nc.SUF1', 'FOO.G.cpl.h3.nc.SUF1'] + >>> hists2 = ['cpl.h2.nc.SUF2', 'cpl.h3.nc.SUF2', 'cpl.h4.nc.SUF2'] + >>> _hists_match('cpl', hists1, hists2, 'SUF1', 'SUF2') + (['FOO.G.cpl.h1.nc.SUF1'], ['cpl.h4.nc.SUF2'], [('FOO.G.cpl.h2.nc.SUF1', 'cpl.h2.nc.SUF2'), ('FOO.G.cpl.h3.nc.SUF1', 'cpl.h3.nc.SUF2')]) + >>> hists1 = ['cam.h0.1850-01-08-00000.nc'] + >>> hists2 = ['cam_0001.h0.1850-01-08-00000.nc','cam_0002.h0.1850-01-08-00000.nc'] + >>> _hists_match('cam', hists1, hists2, '', '') + ([], [], [('cam.h0.1850-01-08-00000.nc', 'cam_0001.h0.1850-01-08-00000.nc'), ('cam.h0.1850-01-08-00000.nc', 'cam_0002.h0.1850-01-08-00000.nc')]) + >>> hists1 = ['cam_0001.h0.1850-01-08-00000.nc.base','cam_0002.h0.1850-01-08-00000.nc.base'] + >>> hists2 = ['cam_0001.h0.1850-01-08-00000.nc.rest','cam_0002.h0.1850-01-08-00000.nc.rest'] + >>> _hists_match('cam', hists1, hists2, 'base', 'rest') + ([], [], [('cam_0001.h0.1850-01-08-00000.nc.base', 'cam_0001.h0.1850-01-08-00000.nc.rest'), ('cam_0002.h0.1850-01-08-00000.nc.base', 'cam_0002.h0.1850-01-08-00000.nc.rest')]) + """ + normalized1, normalized2 = [], [] + multi_normalized1, multi_normalized2 = [], [] + multiinst = False + + for hists, suffix, normalized, multi_normalized in [ + (hists1, suffix1, normalized1, multi_normalized1), + (hists2, suffix2, normalized2, multi_normalized2), + ]: + for hist in hists: + hist_basename = os.path.basename(hist) + offset = hist_basename.rfind(model) + expect( + offset >= 0, + "ERROR: cant find model name {} in {}".format(model, hist_basename), + ) + normalized_name = os.path.basename(hist_basename[offset:]) + if suffix != "": + expect( + normalized_name.endswith(suffix), + "How did '{}' not have suffix '{}'".format(hist, suffix), + ) + normalized_name = normalized_name[ + : len(normalized_name) - len(suffix) - 1 + ] + + m = re.search("(.+)_[0-9]{4}(.*.nc)", normalized_name) + if m is not None: + multiinst = True + if m.group(1).endswith(".") and m.group(2).startswith("."): + multi_normalized.append(m.group(1) + m.group(2)[1:]) + else: + multi_normalized.append(m.group(1) + m.group(2)) + + normalized.append(normalized_name) + + set_of_1_not_2 = set(normalized1) - set(normalized2) + set_of_2_not_1 = set(normalized2) - set(normalized1) + + one_not_two = sorted([hists1[normalized1.index(item)] for item in set_of_1_not_2]) + two_not_one = sorted([hists2[normalized2.index(item)] for item in set_of_2_not_1]) + + both = set(normalized1) & set(normalized2) + + match_ups = sorted( + [ + (hists1[normalized1.index(item)], hists2[normalized2.index(item)]) + for item in both + ] + ) + + # Special case - comparing multiinstance to single instance files + + if multi_normalized1 != multi_normalized2: + # in this case hists1 contains multiinstance hists2 does not + if set(multi_normalized1) == set(normalized2): + for idx, norm_hist1 in enumerate(multi_normalized1): + for idx1, hist2 in enumerate(hists2): + norm_hist2 = normalized2[idx1] + if norm_hist1 == norm_hist2: + match_ups.append((hists1[idx], hist2)) + if hist2 in two_not_one: + two_not_one.remove(hist2) + if hists1[idx] in one_not_two: + one_not_two.remove(hists1[idx]) + # in this case hists2 contains multiinstance hists1 does not + if set(multi_normalized2) == set(normalized1): + for idx, norm_hist2 in enumerate(multi_normalized2): + for idx1, hist1 in enumerate(hists1): + norm_hist1 = normalized1[idx1] + if norm_hist2 == norm_hist1: + match_ups.append((hist1, hists2[idx])) + if hist1 in one_not_two: + one_not_two.remove(hist1) + if hists2[idx] in two_not_one: + two_not_one.remove(hists2[idx]) + + if not multiinst: + expect( + len(match_ups) + len(set_of_1_not_2) == len(hists1), "Programming error1" + ) + expect( + len(match_ups) + len(set_of_2_not_1) == len(hists2), "Programming error2" + ) + + return one_not_two, two_not_one, match_ups + + +def _compare_hists( + case, + from_dir1, + from_dir2, + suffix1="", + suffix2="", + outfile_suffix="", + ignore_fieldlist_diffs=False, +): + """ + Compares two sets of history files + + Returns (success (True if all matched), comments, num_compared) + """ + if from_dir1 == from_dir2: + expect(suffix1 != suffix2, "Comparing files to themselves?") + + casename = case.get_value("CASE") + testcase = case.get_value("TESTCASE") + casedir = case.get_value("CASEROOT") + all_success = True + num_compared = 0 + comments = "Comparing hists for case '{}' dir1='{}', suffix1='{}', dir2='{}' suffix2='{}'\n".format( + casename, from_dir1, suffix1, from_dir2, suffix2 + ) + multiinst_driver_compare = False + archive = case.get_env("archive") + ref_case = case.get_value("RUN_REFCASE") + for model in _iter_model_file_substrs(case): + if case.get_value("TEST") and archive.exclude_testing(model): + logger.info( + "Case is a test and component %r is excluded from comparison", model + ) + + continue + if model == "cpl" and suffix2 == "multiinst": + multiinst_driver_compare = True + comments += " comparing model '{}'\n".format(model) + hists1 = archive.get_latest_hist_files( + casename, model, from_dir1, suffix=suffix1, ref_case=ref_case + ) + hists2 = archive.get_latest_hist_files( + casename, model, from_dir2, suffix=suffix2, ref_case=ref_case + ) + + if len(hists1) == 0 and len(hists2) == 0: + comments += " no hist files found for model {}\n".format(model) + continue + + one_not_two, two_not_one, match_ups = _hists_match( + model, hists1, hists2, suffix1, suffix2 + ) + for item in one_not_two: + if "initial" in item: + continue + comments += " File '{}' {} in '{}' with suffix '{}'\n".format( + item, NO_COMPARE, from_dir2, suffix2 + ) + all_success = False + + for item in two_not_one: + if "initial" in item: + continue + comments += " File '{}' {} in '{}' with suffix '{}'\n".format( + item, NO_ORIGINAL, from_dir1, suffix1 + ) + all_success = False + + num_compared += len(match_ups) + + for hist1, hist2 in match_ups: + if not ".nc" in hist1: + logger.info("Ignoring non-netcdf file {}".format(hist1)) + continue + + success = False + cprnc_log_file = None + + try: + success, cprnc_log_file, cprnc_comment = cprnc( + model, + os.path.join(from_dir1, hist1), + os.path.join(from_dir2, hist2), + case, + from_dir1, + multiinst_driver_compare=multiinst_driver_compare, + outfile_suffix=outfile_suffix, + ignore_fieldlist_diffs=ignore_fieldlist_diffs, + ) + except CIMEError as e: + cprnc_comment = str(e) + except Exception as e: + cprnc_comment = f"Unknown CRPRC error: {e!s}" + + if success: + comments += " {} matched {}\n".format(hist1, hist2) + else: + if not cprnc_log_file: + comments += cprnc_comment + all_success = False + return all_success, comments, 0 + elif cprnc_comment == CPRNC_FIELDLISTS_DIFFER: + comments += " {} {} {}\n".format(hist1, FIELDLISTS_DIFFER, hist2) + else: + comments += " {} {} {}\n".format(hist1, DIFF_COMMENT, hist2) + comments += " cat " + cprnc_log_file + "\n" + expected_log_file = os.path.join( + casedir, os.path.basename(cprnc_log_file) + ) + if not ( + os.path.exists(expected_log_file) + and filecmp.cmp(cprnc_log_file, expected_log_file) + ): + try: + safe_copy(cprnc_log_file, casedir) + except (OSError, IOError) as _: + logger.warning( + "Could not copy {} to {}".format(cprnc_log_file, casedir) + ) + + all_success = False + + # Some tests don't save history files. + if num_compared == 0 and testcase not in NO_HIST_TESTS: + all_success = False + comments += "Did not compare any hist files! Missing baselines?\n" + + comments += "PASS" if all_success else "FAIL" + + return all_success, comments, num_compared + + +def compare_test(case, suffix1, suffix2, ignore_fieldlist_diffs=False): + """ + Compares two sets of component history files in the testcase directory + + case - The case containing the hist files to compare + suffix1 - The suffix that identifies the first batch of hist files + suffix1 - The suffix that identifies the second batch of hist files + ignore_fieldlist_diffs (bool): If True, then: If the two cases differ only in their + field lists (i.e., all shared fields are bit-for-bit, but one case has some + diagnostic fields that are missing from the other case), treat the two cases as + identical. + + returns (SUCCESS, comments, num_compared) + """ + rundir = case.get_value("RUNDIR") + + return _compare_hists( + case, + rundir, + rundir, + suffix1, + suffix2, + ignore_fieldlist_diffs=ignore_fieldlist_diffs, + ) + + +def cprnc( + model, + file1, + file2, + case, + rundir, + multiinst_driver_compare=False, + outfile_suffix="", + ignore_fieldlist_diffs=False, + cprnc_exe=None, +): + """ + Run cprnc to compare two individual nc files + + file1 - the full or relative path of the first file + file2 - the full or relative path of the second file + case - the case containing the files + rundir - the rundir for the case + outfile_suffix - if non-blank, then the output file name ends with this + suffix (with a '.' added before the given suffix). + Use None to avoid permissions issues in the case dir. + ignore_fieldlist_diffs (bool): If True, then: If the two cases differ only in their + field lists (i.e., all shared fields are bit-for-bit, but one case has some + diagnostic fields that are missing from the other case), treat the two cases as + identical. + + returns (True if the files matched, log_name, comment) + where 'comment' is either an empty string or one of the module-level constants + beginning with CPRNC_ (e.g., CPRNC_FIELDLISTS_DIFFER) + """ + if not cprnc_exe: + cprnc_exe = case.get_value("CCSM_CPRNC") + expect( + os.path.isfile(cprnc_exe) and os.access(cprnc_exe, os.X_OK), + f"cprnc {cprnc_exe} does not exist or is not executable", + ) + + basename = os.path.basename(file1) + multiinst_regex = re.compile(r".*%s[^_]*(_[0-9]{4})[.]h.?[.][^.]+?[.]nc" % model) + mstr = "" + mstr1 = "" + mstr2 = "" + # If one is a multiinstance file but the other is not add an instance string + m1 = multiinst_regex.match(file1) + m2 = multiinst_regex.match(file2) + if m1 is not None: + mstr1 = m1.group(1) + if m2 is not None: + mstr2 = m2.group(1) + if mstr1 != mstr2: + mstr = mstr1 + mstr2 + + output_filename = os.path.join(rundir, "{}{}.cprnc.out".format(basename, mstr)) + if outfile_suffix: + output_filename += ".{}".format(outfile_suffix) + + if outfile_suffix is None: + cpr_stat, out, _ = run_cmd( + "{} -m {} {}".format(cprnc_exe, file1, file2), combine_output=True + ) + output_filename = None + else: + # Remove existing output file if it exists + if os.path.exists(output_filename): + os.remove(output_filename) + + cpr_stat = run_cmd( + "{} -m {} {}".format(cprnc_exe, file1, file2), + combine_output=True, + arg_stdout=output_filename, + )[0] + with open(output_filename, "r", encoding="utf-8") as fd: + out = fd.read() + + comment = "" + if cpr_stat == 0: + # Successful exit from cprnc + if multiinst_driver_compare: + # In a multiinstance test the cpl hist file will have a different number of + # dimensions and so cprnc will indicate that the files seem to be DIFFERENT + # in this case we only want to check that the fields we are able to compare + # have no differences. + files_match = " 0 had non-zero differences" in out + else: + if "the two files seem to be DIFFERENT" in out: + files_match = False + elif "the two files DIFFER only in their field lists" in out: + if ignore_fieldlist_diffs: + files_match = True + else: + files_match = False + comment = CPRNC_FIELDLISTS_DIFFER + elif "files seem to be IDENTICAL" in out: + files_match = True + elif "Failed to open file" in out: + raise CIMEError("Failed to open file") + else: + # TODO convert to CIMEError + expect( + False, + "Did not find an expected summary string in cprnc output:\n{}".format( + out + ), + ) + else: + # If there is an error in cprnc, we do the safe thing of saying the comparison failed + files_match = False + + return (files_match, output_filename, comment) + + +def compare_baseline(case, baseline_dir=None, outfile_suffix=""): + """ + compare the current test output to a baseline result + + case - The case containing the hist files to be compared against baselines + baseline_dir - Optionally, specify a specific baseline dir, otherwise it will be computed from case config + outfile_suffix - if non-blank, then the cprnc output file name ends with + this suffix (with a '.' added before the given suffix). if None, no output file saved. + + returns (SUCCESS, comments) + SUCCESS means all hist files matched their corresponding baseline + """ + rundir = case.get_value("RUNDIR") + if baseline_dir is None: + baselineroot = case.get_value("BASELINE_ROOT") + basecmp_dir = os.path.join(baselineroot, case.get_value("BASECMP_CASE")) + dirs_to_check = (baselineroot, basecmp_dir) + else: + basecmp_dir = baseline_dir + dirs_to_check = (basecmp_dir,) + + for bdir in dirs_to_check: + if not os.path.isdir(bdir): + return False, "ERROR {} baseline directory '{}' does not exist".format( + TEST_NO_BASELINES_COMMENT, bdir + ) + + success, comments, _ = _compare_hists( + case, rundir, basecmp_dir, outfile_suffix=outfile_suffix + ) + if Config.instance().create_bless_log: + bless_log = os.path.join(basecmp_dir, BLESS_LOG_NAME) + if os.path.exists(bless_log): + lines = open(bless_log, "r", encoding="utf-8").readlines() + if lines: + last_line = lines[-1] + comments += "\n Most recent bless: {}".format(last_line) + + return success, comments + + +def generate_teststatus(testdir, baseline_dir): + """ + CESM stores it's TestStatus file in baselines. Do not let exceptions + escape from this function. + """ + try: + with SharedArea(): + if not os.path.isdir(baseline_dir): + os.makedirs(baseline_dir) + + safe_copy( + os.path.join(testdir, TEST_STATUS_FILENAME), + baseline_dir, + preserve_meta=False, + ) + except Exception as e: + logger.warning( + "Could not copy {} to baselines, {}".format( + os.path.join(testdir, TEST_STATUS_FILENAME), str(e) + ) + ) + + +def _generate_baseline_impl(case, baseline_dir=None, allow_baseline_overwrite=False): + """ + copy the current test output to baseline result + + case - The case containing the hist files to be copied into baselines + baseline_dir - Optionally, specify a specific baseline dir, otherwise it will be computed from case config + allow_baseline_overwrite must be true to generate baselines to an existing directory. + + returns (SUCCESS, comments) + """ + rundir = case.get_value("RUNDIR") + ref_case = case.get_value("RUN_REFCASE") + if baseline_dir is None: + baselineroot = case.get_value("BASELINE_ROOT") + basegen_dir = os.path.join(baselineroot, case.get_value("BASEGEN_CASE")) + else: + basegen_dir = baseline_dir + testcase = case.get_value("CASE") + archive = case.get_env("archive") + + if not os.path.isdir(basegen_dir): + os.makedirs(basegen_dir) + + if ( + os.path.isdir(os.path.join(basegen_dir, testcase)) + and not allow_baseline_overwrite + ): + expect(False, " Cowardly refusing to overwrite existing baseline directory") + + comments = "Generating baselines into '{}'\n".format(basegen_dir) + num_gen = 0 + for model in _iter_model_file_substrs(case): + + comments += " generating for model '{}'\n".format(model) + if case.get_value("TESTCASE") in ALL_HIST_TESTS: + hists = archive.get_all_hist_files( + testcase, model, rundir, ref_case=ref_case + ) + logger.debug("all_files: {}".format(hists)) + else: + hists = archive.get_latest_hist_files( + testcase, model, rundir, ref_case=ref_case + ) + logger.debug("latest_files: {}".format(hists)) + num_gen += len(hists) + + for hist in hists: + offset = hist.rfind(model) + expect( + offset >= 0, "ERROR: cant find model name {} in {}".format(model, hist) + ) + baseline = os.path.join(basegen_dir, hist[offset:]) + if os.path.exists(baseline): + os.remove(baseline) + + safe_copy(os.path.join(rundir, hist), baseline, preserve_meta=False) + comments += " generating baseline '{}' from file {}\n".format( + baseline, hist + ) + + # copy latest cpl log to baseline + # drop the date so that the name is generic + if case.get_value("COMP_INTERFACE") == "nuopc": + cplname = "med" + else: + cplname = "cpl" + + newestcpllogfile = case.get_latest_cpl_log( + coupler_log_path=case.get_value("RUNDIR"), cplname=cplname + ) + if newestcpllogfile is None: + logger.warning( + "No {}.log file found in directory {}".format( + cplname, case.get_value("RUNDIR") + ) + ) + else: + safe_copy( + newestcpllogfile, + os.path.join(basegen_dir, "{}.log.gz".format(cplname)), + preserve_meta=False, + ) + + testname = case.get_value("TESTCASE") + testopts = parse_test_name(case.get_value("CASEBASEID"))[1] + testopts = [] if testopts is None else testopts + expect( + num_gen > 0 or (testname in NO_HIST_TESTS or "B" in testopts), + "Could not generate any hist files for case '{}', something is seriously wrong".format( + os.path.join(rundir, testcase) + ), + ) + + if Config.instance().create_bless_log: + bless_log = os.path.join(basegen_dir, BLESS_LOG_NAME) + with open(bless_log, "a", encoding="utf-8") as fd: + fd.write( + "sha:{} date:{}\n".format( + get_current_commit(repo=case.get_value("SRCROOT")), + get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S"), + ) + ) + + return True, comments + + +def generate_baseline(case, baseline_dir=None, allow_baseline_overwrite=False): + with SharedArea(): + return _generate_baseline_impl( + case, + baseline_dir=baseline_dir, + allow_baseline_overwrite=allow_baseline_overwrite, + ) + + +def get_ts_synopsis(comments): + r""" + Reduce case diff comments down to a single line synopsis so that we can put + something in the TestStatus file. It's expected that the comments provided + to this function came from compare_baseline, not compare_tests. + + >>> get_ts_synopsis('') + '' + >>> get_ts_synopsis('\n') + '' + >>> get_ts_synopsis('big error') + 'big error' + >>> get_ts_synopsis('big error\n') + 'big error' + >>> get_ts_synopsis('stuff\n File foo had a different field list from bar with suffix baz\nPass\n') + 'FIELDLIST field lists differ (otherwise bit-for-bit)' + >>> get_ts_synopsis('stuff\n File foo had no compare counterpart in bar with suffix baz\nPass\n') + 'ERROR BFAIL some baseline files were missing' + >>> get_ts_synopsis('stuff\n File foo had a different field list from bar with suffix baz\n File foo had no compare counterpart in bar with suffix baz\nPass\n') + 'MULTIPLE ISSUES: field lists differ and some baseline files were missing' + >>> get_ts_synopsis('stuff\n File foo did NOT match bar with suffix baz\nPass\n') + 'DIFF' + >>> get_ts_synopsis('stuff\n File foo did NOT match bar with suffix baz\n File foo had a different field list from bar with suffix baz\nPass\n') + 'DIFF' + >>> get_ts_synopsis('stuff\n File foo did NOT match bar with suffix baz\n File foo had no compare counterpart in bar with suffix baz\nPass\n') + 'DIFF' + >>> get_ts_synopsis('File foo had no compare counterpart in bar with suffix baz\n File foo had no original counterpart in bar with suffix baz\n') + 'DIFF' + >>> get_ts_synopsis('file1=\nfile2=\nFailed to open file\n') + 'ERROR CPRNC failed to open files' + >>> get_ts_synopsis('file1=\nfile2=\nSome other error\n') + 'ERROR Could not interpret CPRNC output' + >>> get_ts_synopsis('file1=\nfile2=\n diff_test: the two files seem to be IDENTICAL \n') + '' + """ + comments = comments.strip() + + if comments == "" or "\n" not in comments: + return comments + + if comments.endswith("PASS"): + return "" + + # Empty synopsis when files are identicial + if re.search(IDENTICAL, comments) is not None: + return "" + + fieldlist_differences = re.search(FIELDLISTS_DIFFER, comments) is not None + baseline_fail = re.search(NO_COMPARE, comments) is not None + real_fail = [ + re.search(x, comments) is not None for x in COMPARISON_FAILURE_COMMENT_OPTIONS + ] + open_fail = re.search(FAILED_OPEN, comments) is not None + + if any(real_fail): + # If there are any real differences, we just report that: we assume that the + # user cares much more about those real differences than fieldlist or bfail + # issues, and we don't want to complicate the matter by trying to report all + # issues in this case. + synopsis = "DIFF" + elif fieldlist_differences and baseline_fail: + # It's not clear which of these (if either) the user would care more + # about, so we report both. We deliberately avoid printing the keywords + # 'FIELDLIST' or TEST_NO_BASELINES_COMMENT (i.e., 'BFAIL'): if we printed + # those, then (e.g.) a 'grep -v FIELDLIST' (which the user might do if + # (s)he was expecting fieldlist differences) would also filter out this + # line, which we don't want. + synopsis = ( + "MULTIPLE ISSUES: field lists differ and some baseline files were missing" + ) + elif fieldlist_differences: + synopsis = "FIELDLIST field lists differ (otherwise bit-for-bit)" + elif baseline_fail: + synopsis = "ERROR {} some baseline files were missing".format( + TEST_NO_BASELINES_COMMENT + ) + elif open_fail: + synopsis = "ERROR CPRNC failed to open files" + else: + synopsis = "ERROR Could not interpret CPRNC output" + + return synopsis diff --git a/CIME/jenkins_generic_job.py b/CIME/jenkins_generic_job.py new file mode 100644 index 00000000000..e35a5d04e67 --- /dev/null +++ b/CIME/jenkins_generic_job.py @@ -0,0 +1,444 @@ +import CIME.wait_for_tests +from CIME.utils import expect, run_cmd_no_fail +from CIME.case import Case + +import os, shutil, glob, signal, logging, threading, sys, re, tarfile, time + +############################################################################## +def cleanup_queue(test_root, test_id): + ############################################################################### + """ + Delete all jobs left in the queue + """ + for teststatus_file in glob.iglob("{}/*{}*/TestStatus".format(test_root, test_id)): + case_dir = os.path.dirname(teststatus_file) + with Case(case_dir, read_only=True) as case: + jobmap = case.get_job_info() + jobkills = [] + for jobname, jobid in jobmap.items(): + logging.warning( + "Found leftover batch job {} ({}) that need to be deleted".format( + jobid, jobname + ) + ) + jobkills.append(jobid) + + case.cancel_batch_jobs(jobkills) + + +############################################################################### +def delete_old_test_data( + mach_comp, + test_id_root, + scratch_root, + test_root, + run_area, + build_area, + archive_area, + avoid_test_id, +): + ############################################################################### + # Remove old dirs + for clutter_area in [scratch_root, test_root, run_area, build_area, archive_area]: + for old_file in glob.glob( + "{}/*{}*{}*".format(clutter_area, mach_comp, test_id_root) + ): + if avoid_test_id not in old_file: + logging.info("TEST ARCHIVER: removing {}".format(old_file)) + if os.path.isdir(old_file): + shutil.rmtree(old_file) + else: + os.remove(old_file) + + else: + logging.info( + "TEST ARCHIVER: leaving case {} due to avoiding test id {}".format( + old_file, avoid_test_id + ) + ) + + +############################################################################### +def scan_for_test_ids(old_test_archive, mach_comp, test_id_root): + ############################################################################### + results = set([]) + test_id_re = re.compile(".+[.]([^.]+)") + for item in glob.glob( + "{}/{}/*{}*{}*".format(old_test_archive, "old_cases", mach_comp, test_id_root) + ): + filename = os.path.basename(item) + the_match = test_id_re.match(filename) + if the_match: + test_id = the_match.groups()[0] + results.add(test_id) + + return list(results) + + +############################################################################### +def archive_old_test_data( + machine, + mach_comp, + test_id_root, + test_root, + old_test_archive, + avoid_test_id, +): + ############################################################################### + + gb_allowed = machine.get_value("MAX_GB_OLD_TEST_DATA") + gb_allowed = 500 if gb_allowed is None else gb_allowed + bytes_allowed = gb_allowed * 1000000000 + expect( + bytes_allowed > 0, + "Machine {} does not support test archiving".format(machine.get_machine_name()), + ) + + # Remove old cs.status, cs.submit. I don't think there's any value to leaving these around + # or archiving them + for old_cs_file in glob.glob("{}/cs.*.{}[0-9]*".format(test_root, test_id_root)): + if avoid_test_id not in old_cs_file: + logging.info("TEST ARCHIVER: Removing {}".format(old_cs_file)) + os.remove(old_cs_file) + + # Remove the old CTest XML, same reason as above + if os.path.isdir("Testing"): + logging.info( + "TEST ARCHIVER: Removing {}".format(os.path.join(os.getcwd(), "Testing")) + ) + shutil.rmtree("Testing") + + if not os.path.exists(old_test_archive): + os.mkdir(old_test_archive) + + # Archive old data by looking at old test cases + for old_case in glob.glob( + "{}/*{}*{}[0-9]*".format(test_root, mach_comp, test_id_root) + ): + if avoid_test_id not in old_case: + logging.info("TEST ARCHIVER: archiving case {}".format(old_case)) + exeroot, rundir, archdir = run_cmd_no_fail( + "./xmlquery EXEROOT RUNDIR DOUT_S_ROOT --value", from_dir=old_case + ).split(",") + + for the_dir, target_area in [ + (exeroot, "old_builds"), + (rundir, "old_runs"), + (archdir, "old_archives"), + (old_case, "old_cases"), + ]: + if os.path.exists(the_dir): + start_time = time.time() + logging.info( + "TEST ARCHIVER: archiving {} to {}".format( + the_dir, os.path.join(old_test_archive, target_area) + ) + ) + if not os.path.exists(os.path.join(old_test_archive, target_area)): + os.mkdir(os.path.join(old_test_archive, target_area)) + + old_case_name = os.path.basename(old_case) + with tarfile.open( + os.path.join( + old_test_archive, + target_area, + "{}.tar.gz".format(old_case_name), + ), + "w:gz", + ) as tfd: + tfd.add(the_dir, arcname=old_case_name) + + shutil.rmtree(the_dir) + + # Remove parent dir if it's empty + parent_dir = os.path.dirname(the_dir) + if not os.listdir(parent_dir) or os.listdir(parent_dir) == [ + "case2_output_root" + ]: + shutil.rmtree(parent_dir) + + end_time = time.time() + logging.info( + "TEST ARCHIVER: archiving {} took {} seconds".format( + the_dir, int(end_time - start_time) + ) + ) + + else: + logging.info( + "TEST ARCHIVER: leaving case {} due to avoiding test id {}".format( + old_case, avoid_test_id + ) + ) + + # Check size of archive + bytes_of_old_test_data = int( + run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0] + ) + if bytes_of_old_test_data > bytes_allowed: + logging.info( + "TEST ARCHIVER: Too much test data, {}GB (actual) > {}GB (limit)".format( + bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000 + ) + ) + old_test_ids = scan_for_test_ids(old_test_archive, mach_comp, test_id_root) + for old_test_id in sorted(old_test_ids): + logging.info( + "TEST ARCHIVER: Removing old data for test {}".format(old_test_id) + ) + for item in ["old_cases", "old_builds", "old_runs", "old_archives"]: + for dir_to_rm in glob.glob( + "{}/{}/*{}*{}*".format( + old_test_archive, item, mach_comp, old_test_id + ) + ): + logging.info("TEST ARCHIVER: Removing {}".format(dir_to_rm)) + if os.path.isdir(dir_to_rm): + shutil.rmtree(dir_to_rm) + else: + os.remove(dir_to_rm) + + bytes_of_old_test_data = int( + run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0] + ) + if bytes_of_old_test_data < bytes_allowed: + break + + else: + logging.info( + "TEST ARCHIVER: Test data is within accepted bounds, {}GB (actual) < {}GB (limit)".format( + bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000 + ) + ) + + +############################################################################### +def handle_old_test_data( + machine, compiler, test_id_root, scratch_root, test_root, avoid_test_id +): + ############################################################################### + run_area = os.path.dirname( + os.path.dirname(machine.get_value("RUNDIR")) + ) # Assumes XXX/$CASE/run + build_area = os.path.dirname( + os.path.dirname(machine.get_value("EXEROOT")) + ) # Assumes XXX/$CASE/build + archive_area = os.path.dirname( + machine.get_value("DOUT_S_ROOT") + ) # Assumes XXX/archive/$CASE + old_test_archive = os.path.join(scratch_root, "old_test_archive") + + mach_comp = "{}_{}".format(machine.get_machine_name(), compiler) + + try: + archive_old_test_data( + machine, + mach_comp, + test_id_root, + test_root, + old_test_archive, + avoid_test_id, + ) + except Exception: + logging.warning( + "TEST ARCHIVER: Archiving of old test data FAILED: {}\nDeleting data instead".format( + sys.exc_info()[1] + ) + ) + delete_old_test_data( + mach_comp, + test_id_root, + scratch_root, + test_root, + run_area, + build_area, + archive_area, + avoid_test_id, + ) + + +############################################################################### +def jenkins_generic_job( + generate_baselines, + submit_to_cdash, + no_batch, + baseline_name, + arg_cdash_build_name, + cdash_project, + arg_test_suite, + cdash_build_group, + baseline_compare, + scratch_root, + parallel_jobs, + walltime, + machine, + compiler, + real_baseline_name, + baseline_root, + update_success, + check_throughput, + check_memory, + ignore_memleak, + ignore_namelists, + ignore_diffs, + save_timing, + pes_file, + jenkins_id, + queue, +): + ############################################################################### + """ + Return True if all tests passed + """ + use_batch = machine.has_batch_system() and not no_batch + test_suite = machine.get_value("TESTS") + proxy = machine.get_value("PROXY") + test_suite = test_suite if arg_test_suite is None else arg_test_suite + test_root = os.path.join(scratch_root, "J") + + if use_batch: + batch_system = machine.get_value("BATCH_SYSTEM") + expect( + batch_system is not None, + "Bad XML. Batch machine has no batch_system configuration.", + ) + + # + # Env changes + # + + if submit_to_cdash and proxy is not None: + os.environ["http_proxy"] = proxy + + if not os.path.isdir(scratch_root): + os.makedirs(scratch_root) + + # Important, need to set up signal handlers before we officially + # kick off tests. We don't want this process getting killed outright + # since it's critical that the cleanup in the finally block gets run + CIME.wait_for_tests.set_up_signal_handlers() + + # + # Clean up leftovers from previous run of jenkins_generic_job. This will + # break the previous run of jenkins_generic_job if it's still running. Set up + # the Jenkins jobs with timeouts to avoid this. + # + + if jenkins_id is not None: + test_id_root = jenkins_id + test_id = "%s%s" % (test_id_root, CIME.utils.get_timestamp("%y%m%d_%H%M%S")) + else: + test_id_root = "J{}{}".format( + baseline_name.capitalize(), test_suite.replace("e3sm_", "").capitalize() + ) + test_id = "%s%s" % (test_id_root, CIME.utils.get_timestamp()) + archiver_thread = threading.Thread( + target=handle_old_test_data, + args=(machine, compiler, test_id_root, scratch_root, test_root, test_id), + ) + archiver_thread.start() + + # + # Set up create_test command and run it + # + + create_test_args = [ + test_suite, + "--test-root %s" % test_root, + "-t %s" % test_id, + "--machine %s" % machine.get_machine_name(), + "--compiler %s" % compiler, + ] + if generate_baselines: + create_test_args.append("-g -b " + real_baseline_name) + elif baseline_compare: + create_test_args.append("-c -b " + real_baseline_name) + + if scratch_root != machine.get_value("CIME_OUTPUT_ROOT"): + create_test_args.append("--output-root=" + scratch_root) + + if no_batch: + create_test_args.append("--no-batch") + + if parallel_jobs is not None: + create_test_args.append("-j {:d}".format(parallel_jobs)) + + if walltime is not None: + create_test_args.append("--walltime " + walltime) + + if baseline_root is not None: + create_test_args.append("--baseline-root " + baseline_root) + + if pes_file is not None: + create_test_args.append("--pesfile " + pes_file) + + if queue is not None: + create_test_args.append("--queue " + queue) + + if save_timing: + create_test_args.append("--save-timing") + + create_test_cmd = "./create_test " + " ".join(create_test_args) + + if not CIME.wait_for_tests.SIGNAL_RECEIVED: + create_test_stat = CIME.utils.run_cmd( + create_test_cmd, + from_dir=CIME.utils.get_scripts_root(), + verbose=True, + arg_stdout=None, + arg_stderr=None, + )[0] + # Create_test should have either passed, detected failing tests, or timed out + expect( + create_test_stat in [0, CIME.utils.TESTS_FAILED_ERR_CODE, -signal.SIGTERM], + "Create_test script FAILED with error code '{:d}'!".format( + create_test_stat + ), + ) + + # + # Wait for tests + # + + if submit_to_cdash: + cdash_build_name = ( + "_".join([test_suite, baseline_name, compiler]) + if arg_cdash_build_name is None + else arg_cdash_build_name + ) + else: + cdash_build_name = None + + os.environ["CIME_MACHINE"] = machine.get_machine_name() + + if submit_to_cdash: + logging.info( + "To resubmit to dashboard: wait_for_tests {}/*{}/TestStatus --no-wait -b {}".format( + test_root, test_id, cdash_build_name + ) + ) + + tests_passed = CIME.wait_for_tests.wait_for_tests( + glob.glob("{}/*{}/TestStatus".format(test_root, test_id)), + no_wait=not use_batch, # wait if using queue + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_diffs=ignore_diffs, + ignore_memleak=ignore_memleak, + cdash_build_name=cdash_build_name, + cdash_project=cdash_project, + cdash_build_group=cdash_build_group, + update_success=update_success, + ) + + if use_batch and CIME.wait_for_tests.SIGNAL_RECEIVED: + # Cleanup + cleanup_queue(test_root, test_id) + + if not CIME.wait_for_tests.SIGNAL_RECEIVED: + logging.info("TEST ARCHIVER: Waiting for archiver thread") + archiver_thread.join() + logging.info("TEST ARCHIVER: Waiting for archiver finished") + + return tests_passed diff --git a/CIME/locked_files.py b/CIME/locked_files.py new file mode 100644 index 00000000000..3be6f9c3573 --- /dev/null +++ b/CIME/locked_files.py @@ -0,0 +1,219 @@ +from pathlib import Path + +from CIME.utils import safe_copy +from CIME.XML.standard_module_setup import * +from CIME.XML.env_build import EnvBuild +from CIME.XML.env_mach_pes import EnvMachPes +from CIME.XML.env_case import EnvCase +from CIME.XML.env_batch import EnvBatch +from CIME.XML.generic_xml import GenericXML + +logger = logging.getLogger(__name__) + + +LOCKED_DIR = "LockedFiles" + + +def lock_file(filename, caseroot, newname=None): + expect("/" not in filename, "Please just provide basename of locked file") + + if newname is None: + newname = filename + + fulllockdir = os.path.join(caseroot, LOCKED_DIR) + + if not os.path.exists(fulllockdir): + os.mkdir(fulllockdir) + + logging.debug("Locking file {}".format(filename)) + + # JGF: It is extremely dangerous to alter our database (xml files) without + # going through the standard API. The copy below invalidates all existing + # GenericXML instances that represent this file and all caching that may + # have involved this file. We should probably seek a safer way of locking + # files. + safe_copy(os.path.join(caseroot, filename), os.path.join(fulllockdir, newname)) + + GenericXML.invalidate(os.path.join(fulllockdir, newname)) + + +def unlock_file(filename, caseroot): + expect("/" not in filename, "Please just provide basename of locked file") + + locked_path = os.path.join(caseroot, LOCKED_DIR, filename) + + if os.path.exists(locked_path): + os.remove(locked_path) + + logging.debug("Unlocking file {}".format(filename)) + + +def is_locked(filename, caseroot): + expect("/" not in filename, "Please just provide basename of locked file") + + return os.path.exists(os.path.join(caseroot, LOCKED_DIR, filename)) + + +def check_lockedfiles(case, skip=None, quiet=False, caseroot=None, whitelist=None): + """ + Check that all lockedfiles match what's in case + + If caseroot is not specified, it is set to the current working directory + """ + if skip is None: + skip = [] + elif isinstance(skip, str): + skip = [skip] + + if caseroot is None: + caseroot = case.get_value("CASEROOT") + + locked_path = Path(caseroot, LOCKED_DIR) + + lockedfiles = locked_path.glob("*.xml") + + # filter based on whitelist + if whitelist is not None: + lockedfiles = [x for x in lockedfiles if x.stem in whitelist] + + for file_path in lockedfiles: + filename = file_path.name + + # Skip files used for tests e.g. env_mach_pes.ERP1.xml or included in skip list + if filename.count(".") > 1 or any([filename.startswith(x) for x in skip]): + continue + + check_lockedfile(case, f"{filename}", caseroot=caseroot, quiet=quiet) + + +def check_lockedfile(case, filebase, caseroot=None, quiet=False): + if caseroot is None: + caseroot = case.get_value("CASEROOT") + + env_name, diff = diff_lockedfile(case, caseroot, filebase) + + if diff: + check_diff(case, filebase, env_name, diff, quiet=quiet) + + +def diff_lockedfile(case, caseroot, filename): + env_name = filename.split(".")[0] + + case_file = Path(caseroot, filename) + + locked_file = case_file.parent / LOCKED_DIR / filename + + if not locked_file.is_file(): + return env_name, {} + + try: + l_env, r_env = _get_case_env(case, caseroot, locked_file, env_name) + except NameError as e: + logger.warning(e) + + return env_name, {} + + return env_name, l_env.compare_xml(r_env) + + +def _get_case_env(case, caseroot, locked_file, env_name): + if env_name == "env_build": + l_env = case.get_env("build") + r_env = EnvBuild(caseroot, str(locked_file), read_only=True) + elif env_name == "env_mach_pes": + l_env = case.get_env("mach_pes") + r_env = EnvMachPes( + caseroot, + str(locked_file), + components=case.get_values("COMP_CLASSES"), + read_only=True, + ) + elif env_name == "env_case": + l_env = case.get_env("case") + r_env = EnvCase(caseroot, str(locked_file), read_only=True) + elif env_name == "env_batch": + l_env = case.get_env("batch") + r_env = EnvBatch(caseroot, str(locked_file), read_only=True) + else: + raise NameError( + "Locked XML file {!r} is not currently being handled".format( + locked_file.name + ) + ) + + return l_env, r_env + + +def check_diff(case, filename, env_name, diff, quiet=False): + logger.warning("Detected diff in locked file {!r}".format(filename)) + + # Remove BUILD_COMPLETE, invalid entry in diff + diff.pop("BUILD_COMPLETE", None) + + # Nothing to process + if not diff: + return + + # List differences + for key, value in diff.items(): + logger.warning( + "\t{!r} has changed from {!r} to {!r}".format(key, value[1], value[0]) + ) + + reset = False + rebuild = False + message = "" + clean_targets = "" + rebuild_components = [] + + if env_name == "env_case": + expect( + False, + f"Cannot change `env_case.xml`, please restore origin {filename!r}", + ) + elif env_name == "env_build" and diff: + build_status = 1 + + if "PIO_VERSION" in diff: + build_status = 2 + + logging.critical( + "Changing 'PIO_VERSION' requires running `./case.build --clean-all` to rebuild" + ) + + case.set_value("BUILD_STATUS", build_status) + + rebuild = True + + clean_targets = "--clean-all" + elif env_name in ("env_batch", "env_mach_pes"): + reset = True + + for component in case.get_values("COMP_CLASSES"): + triggers = case.get_values(f"REBUILD_TRIGGER_{component}") + + if any([y.startswith(x) for x in triggers for y in diff.keys()]): + rebuild = True + + rebuild_components.append(component) + + if reset: + message = "For your changes to take effect, run:\n./case.setup --reset\n" + + if rebuild: + case.set_value("BUILD_COMPLETE", False) + + if rebuild_components and clean_targets != "--clean-all": + clean_targets = " ".join([x.lower() for x in rebuild_components]) + + clean_targets = f"--clean {clean_targets}" + + if not reset: + message = "For your changes to take effect, run:\n" + + message = f"{message}./case.build {clean_targets}\n./case.build" + + if quiet: + logger.info(message) + else: + expect(False, message) diff --git a/scripts/lib/CIME/namelist.py b/CIME/namelist.py similarity index 79% rename from scripts/lib/CIME/namelist.py rename to CIME/namelist.py index 1093d8963f6..d89dee7c2c1 100644 --- a/scripts/lib/CIME/namelist.py +++ b/CIME/namelist.py @@ -101,56 +101,72 @@ # pylint: disable=line-too-long,too-many-lines,invalid-name import re -import collections +from contextlib import contextmanager # Disable these because this is our standard setup # pylint: disable=wildcard-import,unused-wildcard-import from CIME.XML.standard_module_setup import * from CIME.utils import expect, string_in_list -import six logger = logging.getLogger(__name__) # Fortran syntax regular expressions. # Variable names. -#FORTRAN_NAME_REGEX = re.compile(r"(^[a-z][a-z0-9_]{0,62})(\([+-]?\d*:?[+-]?\d*:?[+-]?\d*\))?$", re.IGNORECASE) -FORTRAN_NAME_REGEX = re.compile(r"""(^[a-z][a-z0-9_@]{0,62}) # The variable name +# FORTRAN_NAME_REGEX = re.compile(r"(^[a-z][a-z0-9_]{0,62})(\([+-]?\d*:?[+-]?\d*:?[+-]?\d*\))?$", re.IGNORECASE) +FORTRAN_NAME_REGEX = re.compile( + r"""(^[a-z][a-z0-9_@]{0,62}) # The variable name (\( # begin optional index expression (([+-]?\d+) # Single valued index | # or (([+-]?\d+)?:([+-]?\d+)?:?([+-]?\d+)?)) # colon seperated triplet - \))?\s*$""" # end optional index expression - , re.IGNORECASE | re.VERBOSE) + \))?\s*$""", # end optional index expression + re.IGNORECASE | re.VERBOSE, +) FORTRAN_LITERAL_REGEXES = {} # Integer literals. _int_re_string = r"(\+|-)?[0-9]+" -FORTRAN_LITERAL_REGEXES['integer'] = re.compile("^" + _int_re_string + "$") +FORTRAN_LITERAL_REGEXES["integer"] = re.compile("^" + _int_re_string + "$") # Real/complex literals. _ieee_exceptional_re_string = r"inf(inity)?|nan(\([^)]+\))?" -_float_re_string = r"((\+|-)?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ed]?{})?|{})".format(_int_re_string, _ieee_exceptional_re_string) -FORTRAN_LITERAL_REGEXES['real'] = re.compile("^" + _float_re_string + "$", - re.IGNORECASE) -FORTRAN_LITERAL_REGEXES['complex'] = re.compile(r"^\([ \n]*" + - _float_re_string + - r"[ \n]*,[ \n]*" + - _float_re_string + - r"[ \n]*\)$", re.IGNORECASE) +_float_re_string = r"((\+|-)?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ed]?{})?|{})".format( + _int_re_string, _ieee_exceptional_re_string +) +FORTRAN_LITERAL_REGEXES["real"] = re.compile( + "^" + _float_re_string + "$", re.IGNORECASE +) +FORTRAN_LITERAL_REGEXES["complex"] = re.compile( + r"^\([ \n]*" + + _float_re_string + + r"[ \n]*,[ \n]*" + + _float_re_string + + r"[ \n]*\)$", + re.IGNORECASE, +) # Character literals. _char_single_re_string = r"'[^']*(''[^']*)*'" _char_double_re_string = r'"[^"]*(""[^"]*)*"' -FORTRAN_LITERAL_REGEXES['character'] = re.compile("^(" + - _char_single_re_string + "|" + - _char_double_re_string + - ")$") +FORTRAN_LITERAL_REGEXES["character"] = re.compile( + "^(" + _char_single_re_string + "|" + _char_double_re_string + ")$" +) # Logical literals. -FORTRAN_LITERAL_REGEXES['logical'] = re.compile(r"^\.?[tf][^=/ \n]*$", - re.IGNORECASE) +FORTRAN_LITERAL_REGEXES["logical"] = re.compile(r"^\.?[tf][^=/ \n]*$", re.IGNORECASE) # Repeated value prefix. FORTRAN_REPEAT_PREFIX_REGEX = re.compile(r"^[0-9]*[1-9]+[0-9]*\*") +def convert_bool(value): + if isinstance(value, bool): + value = f".{str(value).lower()}." + elif isinstance(value, str): + value = f".{value.lower()}." + else: + raise ValueError("Unable to convert {}".format(value)) + + return value + + def is_valid_fortran_name(string): """Check that a variable name is allowed in Fortran. @@ -195,8 +211,9 @@ def is_valid_fortran_name(string): """ return FORTRAN_NAME_REGEX.search(string) is not None + def get_fortran_name_only(full_var): - """ remove array section if any and return only the variable name + """remove array section if any and return only the variable name >>> get_fortran_name_only('foo') 'foo' >>> get_fortran_name_only('foo(3)') @@ -215,8 +232,9 @@ def get_fortran_name_only(full_var): m = FORTRAN_NAME_REGEX.search(full_var) return m.group(1) + def get_fortran_variable_indices(varname, varlen=1, allow_any_len=False): - """ get indices from a fortran namelist variable as a triplet of minindex, maxindex and step + """get indices from a fortran namelist variable as a triplet of minindex, maxindex and step >>> get_fortran_variable_indices('foo(3)') (3, 3, 1) @@ -248,10 +266,11 @@ def get_fortran_variable_indices(varname, varlen=1, allow_any_len=False): if allow_any_len and maxindex == minindex: maxindex = -1 - expect(step != 0,"Step size 0 not allowed") + expect(step != 0, "Step size 0 not allowed") return (minindex, maxindex, step) + def fortran_namelist_base_value(string): r"""Strip off whitespace and repetition syntax from a namelist value. @@ -272,7 +291,7 @@ def fortran_namelist_base_value(string): string = string.strip(" \n") # Strip off repeated value prefix. if FORTRAN_REPEAT_PREFIX_REGEX.search(string) is not None: - string = string[string.find('*') + 1:] + string = string[string.find("*") + 1 :] return string @@ -298,7 +317,7 @@ def character_literal_to_string(literal): # Find left and right edges of the string, extract middle. left_pos = literal.find(delimiter) right_pos = literal.rfind(delimiter) - new_literal = literal[left_pos+1:right_pos] + new_literal = literal[left_pos + 1 : right_pos] # Replace escaped quote and apostrophe characters. return new_literal.replace(delimiter * 2, delimiter) @@ -318,6 +337,7 @@ def string_to_character_literal(string): string = string.replace('"', '""') return '"' + string + '"' + def is_valid_fortran_namelist_literal(type_, string): r"""Determine whether a literal is valid in a Fortran namelist. @@ -573,12 +593,14 @@ def is_valid_fortran_namelist_literal(type_, string): >>> is_valid_fortran_namelist_literal("logical", ".t2 ") True """ - expect(type_ in FORTRAN_LITERAL_REGEXES, - "Invalid Fortran type for a namelist: {!r}".format(str(type_))) + expect( + type_ in FORTRAN_LITERAL_REGEXES, + "Invalid Fortran type for a namelist: {!r}".format(str(type_)), + ) # Strip off whitespace and repetition. string = fortran_namelist_base_value(string) # Null values are always allowed. - if string == '': + if string == "": return True return FORTRAN_LITERAL_REGEXES[type_].search(string) is not None @@ -641,39 +663,50 @@ def literal_to_python_value(literal, type_=None): >>> literal_to_python_value("") >>> literal_to_python_value("-1.D+10") -10000000000.0 - >>> shouldRaise(ValueError, literal_to_python_value, "nan(1234)") + >>> literal_to_python_value("nan(1234)") + Traceback (most recent call last): + ... + ValueError: could not convert string to float: 'nan(1234)' """ - expect(FORTRAN_REPEAT_PREFIX_REGEX.search(literal) is None, - "Cannot use repetition syntax in literal_to_python_value") + expect( + FORTRAN_REPEAT_PREFIX_REGEX.search(literal) is None, + "Cannot use repetition syntax in literal_to_python_value", + ) # Handle null value. - if fortran_namelist_base_value(literal) == '': + if fortran_namelist_base_value(literal) == "": return None if type_ is None: # Autodetect type. - for test_type in ('character', 'complex', 'integer', 'logical', 'real'): + for test_type in ("character", "complex", "integer", "logical", "real"): if is_valid_fortran_namelist_literal(test_type, literal): type_ = test_type break - expect(type_ is not None, - "{!r} is not a valid literal for any Fortran type.".format(str(literal))) + expect( + type_ is not None, + "{!r} is not a valid literal for any Fortran type.".format(str(literal)), + ) else: # Check that type is valid. - expect(is_valid_fortran_namelist_literal(type_, literal), - "{!r} is not a valid literal of type {!r}.".format(str(literal), str(type_))) + expect( + is_valid_fortran_namelist_literal(type_, literal), + "{!r} is not a valid literal of type {!r}.".format( + str(literal), str(type_) + ), + ) # Conversion for each type. - if type_ == 'character': + if type_ == "character": return character_literal_to_string(literal) - elif type_ == 'complex': - literal = literal.lstrip(' \n(').rstrip(' \n)') - real_part, _, imag_part = literal.partition(',') + elif type_ == "complex": + literal = literal.lstrip(" \n(").rstrip(" \n)") + real_part, _, imag_part = literal.partition(",") return complex(float(real_part), float(imag_part)) - elif type_ == 'integer': + elif type_ == "integer": return int(literal) - elif type_ == 'logical': - literal = literal.lstrip(' \n.') - return literal[0] in 'tT' - elif type_ == 'real': - literal = literal.lower().replace('d', 'e') + elif type_ == "logical": + literal = literal.lstrip(" \n.") + return literal[0] in "tT" + elif type_ == "real": + literal = literal.lower().replace("d", "e") return float(literal) @@ -692,7 +725,7 @@ def expand_literal_list(literals): expanded = [] for literal in literals: if FORTRAN_REPEAT_PREFIX_REGEX.search(literal) is not None: - num, _, value = literal.partition('*') + num, _, value = literal.partition("*") expanded += int(num) * [value] else: expanded.append(literal) @@ -729,27 +762,28 @@ def compress_literal_list(literals): else: # Otherwise, write out the previous literal and start tracking the # new one. - rep_str = str(num_reps) + '*' if num_reps > 1 else '' - if isinstance(old_literal, six.string_types): + rep_str = str(num_reps) + "*" if num_reps > 1 else "" + if isinstance(old_literal, str): compressed.append(rep_str + old_literal) else: compressed.append(rep_str + str(old_literal)) old_literal = literal num_reps = 1 - rep_str = str(num_reps) + '*' if num_reps > 1 else '' - if isinstance(old_literal, six.string_types): + rep_str = str(num_reps) + "*" if num_reps > 1 else "" + if isinstance(old_literal, str): compressed.append(rep_str + old_literal) else: compressed.append(rep_str + str(old_literal)) return compressed else: for literal in literals: - if isinstance(literal, six.string_types): + if isinstance(literal, str): compressed.append(literal) else: compressed.append(str(literal)) return compressed + def merge_literal_lists(default, overwrite): """Merge two lists of literal value strings. @@ -780,7 +814,7 @@ def merge_literal_lists(default, overwrite): overwrite = expand_literal_list(overwrite) for default_elem, elem in zip(default, overwrite): - if elem == '': + if elem == "": merged.append(default_elem) else: merged.append(elem) @@ -827,11 +861,15 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): of "6*2" is returned as that string; it is not converted to 6 copies of the Python integer `2`. Null values are returned as the empty string (""). """ - expect(in_file is not None or text is not None, - "Must specify an input file or text to the namelist parser.") - expect(in_file is None or text is None, - "Cannot specify both input file and text to the namelist parser.") - if isinstance(in_file, six.string_types): + expect( + in_file is not None or text is not None, + "Must specify an input file or text to the namelist parser.", + ) + expect( + in_file is None or text is None, + "Cannot specify both input file and text to the namelist parser.", + ) + if isinstance(in_file, str): logger.debug("Reading namelist at: {}".format(in_file)) with open(in_file) as in_file_obj: text = in_file_obj.read() @@ -839,7 +877,7 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): logger.debug("Reading namelist from file object") text = in_file.read() if convert_tab_to_space: - text = text.replace('\t', ' ') + text = text.replace("\t", " ") try: namelist_dict = _NamelistParser(text, groupless).parse_namelist() except (_NamelistEOF, _NamelistParseError) as error: @@ -851,21 +889,6 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): return Namelist(namelist_dict) -def shouldRaise(eclass, method, *args, **kw): - """ - A helper function to make doctests py3 compatible - http://python3porting.com/problems.html#running-doctests - """ - try: - method(*args, **kw) - except BaseException: - e = sys.exc_info()[1] - if not isinstance(e, eclass): - raise - return - raise Exception("Expected exception %s not raised" % - str(eclass)) - class Namelist(object): """Class representing a Fortran namelist. @@ -896,12 +919,21 @@ def __init__(self, groups=None): if groups is not None: for group_name in groups: expect(group_name is not None, " Got None in groups {}".format(groups)) - self._groups[group_name] = collections.OrderedDict() + self._groups[group_name] = {} for variable_name in groups[group_name]: - self._groups[group_name][variable_name] = groups[group_name][variable_name] + self._groups[group_name][variable_name] = groups[group_name][ + variable_name + ] + + @contextmanager + def __call__(self, filename): + try: + yield self + finally: + self.write(filename) def clean_groups(self): - self._groups = collections.OrderedDict() + self._groups = {} def get_group_names(self): """Return a list of all groups in the namelist. @@ -930,7 +962,7 @@ def get_variable_names(self, group_name): >>> sorted(x.get_variable_names('fOo')) ['bar(::)', 'bazz', 'bazz(2)', 'bazz(:2:)'] """ - gn = string_in_list(group_name,self._groups) + gn = string_in_list(group_name, self._groups) if not gn: return [] return list(self._groups[gn].keys()) @@ -949,12 +981,14 @@ def get_variable_value(self, group_name, variable_name): >>> parse(text='&foo bar=1,2 /').get_variable_value('foO', 'Bar') ['1', '2'] """ - gn = string_in_list(group_name,self._groups) + gn = string_in_list(group_name, self._groups) if gn: - vn = string_in_list(variable_name,self._groups[gn]) + vn = string_in_list(variable_name, self._groups[gn]) if vn: - return self._groups[gn][vn] - return [''] + # Make a copy of the list so that any modifications done by the caller + # don't modify the internal values. + return self._groups[gn][vn][:] + return [""] def get_value(self, variable_name): """Return the value of a uniquely-named variable. @@ -981,13 +1015,15 @@ def get_value(self, variable_name): if vnt: vn = vnt possible_groups.append(group_name) - expect(len(possible_groups) <= 1, - "Namelist.get_value: Variable {} is present in multiple groups: " - + str(possible_groups)) + expect( + len(possible_groups) <= 1, + "Namelist.get_value: Variable {} is present in multiple groups: " + + str(possible_groups), + ) if possible_groups: return self._groups[possible_groups[0]][vn] else: - return [''] + return [""] def set_variable_value(self, group_name, variable_name, value, var_size=1): """Set the value of the specified variable. @@ -1014,12 +1050,22 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): >>> x.get_variable_value('foo', 'red') ['', '2', '', '4', '', '6'] """ + if not isinstance(value, (set, list)): + value = [ + value, + ] + minindex, maxindex, step = get_fortran_variable_indices(variable_name, var_size) variable_name = get_fortran_name_only(variable_name) - expect(minindex > 0, "Indices < 1 not supported in CIME interface to fortran namelists... lower bound={}".format(minindex)) - gn = string_in_list(group_name,self._groups) - if not gn: + expect( + minindex > 0, + "Indices < 1 not supported in CIME interface to fortran namelists... lower bound={}".format( + minindex + ), + ) + gn = string_in_list(group_name, self._groups) + if gn is None: gn = group_name self._groups[gn] = {} @@ -1030,15 +1076,15 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): else: vn = variable_name tlen = 1 - self._groups[gn][vn] = [''] + self._groups[gn][vn] = [""] if minindex > tlen: - self._groups[gn][vn].extend(['']*(minindex-tlen-1)) + self._groups[gn][vn].extend([""] * (minindex - tlen - 1)) - for i in range(minindex, maxindex+2*step, step): + for i in range(minindex, maxindex + 2 * step, step): while len(self._groups[gn][vn]) < i: - self._groups[gn][vn].append('') - self._groups[gn][vn][i-1] = value.pop(0) + self._groups[gn][vn].append("") + self._groups[gn][vn][i - 1] = value.pop(0) if len(value) == 0: break @@ -1056,9 +1102,9 @@ def delete_variable(self, group_name, variable_name): >>> x.get_variable_names('brack') [] """ - gn = string_in_list(group_name,self._groups) + gn = string_in_list(group_name, self._groups) if gn: - vn=string_in_list(variable_name,self._groups[gn]) + vn = string_in_list(variable_name, self._groups[gn]) if vn: del self._groups[gn][vn] @@ -1120,8 +1166,9 @@ def merge_nl(self, other, overwrite=False): merged_val = merge_literal_lists(self_val, other_val) else: merged_val = merge_literal_lists(other_val, self_val) - self.set_variable_value(group_name, variable_name, merged_val, - var_size=len(merged_val)) + self.set_variable_value( + group_name, variable_name, merged_val, var_size=len(merged_val) + ) def get_group_variables(self, group_name): group_variables = {} @@ -1131,7 +1178,10 @@ def get_group_variables(self, group_name): group_variables[name] = value return group_variables - def write(self, out_file, groups=None, append=False, format_='nml', sorted_groups=True, skip_comps=None): + def write( + self, out_file, groups=None, append=False, format_="nml", sorted_groups=True + ): + """Write a the output data (normally fortran namelist) to the out_file As with `parse`, the `out_file` argument can be either a file name, or a @@ -1146,37 +1196,33 @@ def write(self, out_file, groups=None, append=False, format_='nml', sorted_group specifies the file format. Formats other than 'nml' may not support all possible output values. """ - expect(format_ in ('nml', 'rc', 'nmlcontents', 'nuopc'), - "Namelist.write: unexpected output format {!r}".format(str(format_))) - if isinstance(out_file, six.string_types): + expect( + format_ in ("nml", "rc", "nmlcontents"), + "Namelist.write: unexpected output format {!r}".format(str(format_)), + ) + if isinstance(out_file, str): logger.debug("Writing namelist to: {}".format(out_file)) - flag = 'a' if append else 'w' + flag = "a" if append else "w" with open(out_file, flag) as file_obj: - if format_ == 'nuopc': - self._write_nuopc(file_obj, groups, sorted_groups=sorted_groups, skip_comps=skip_comps) - else: - self._write(file_obj, groups, format_, sorted_groups=sorted_groups) + self._write(file_obj, groups, format_, sorted_groups=sorted_groups) else: logger.debug("Writing namelist to file object") - if format_ == 'nuopc': - self._write_nuopc(out_file, groups, sorted_groups=sorted_groups, skip_comps=skip_comps) - else: - self._write(out_file, groups, format_, sorted_groups=sorted_groups) + self._write(out_file, groups, format_, sorted_groups=sorted_groups) def _write(self, out_file, groups, format_, sorted_groups): """Unwrapped version of `write` assuming that a file object is input.""" if groups is None: groups = list(self._groups.keys()) - if format_ == 'nml' or format_ == 'nmlcontents': - equals = ' =' - elif format_ == 'rc': - equals = ':' - if (sorted_groups): + if format_ == "nml" or format_ == "nmlcontents": + equals = " =" + elif format_ == "rc": + equals = ":" + if sorted_groups: group_names = sorted(group for group in groups) else: group_names = groups for group_name in group_names: - if format_ == 'nml': + if group_name != "" and format_ == "nml": out_file.write("&{}\n".format(group_name)) # allow empty group if group_name in self._groups: @@ -1188,16 +1234,27 @@ def _write(self, out_file, groups, format_, sorted_groups): # in the write phase, all characters in the namelist variable name after # the @ and including the @ should be removed if "@" in name: - name = re.sub('@.+$', "", name) + name = re.sub("@.+$", "", name) # To prettify things for long lists of values, build strings # line-by-line. - if values[0] == "True" or values[0] == "False": - values[0] = values[0].replace("True",".true.").replace("False",".false.") - lines = [" {}{} {}".format(name, equals, values[0])] + if isinstance(values[0], bool) or values[0].lower() in ( + "true", + "false", + ): + values[0] = convert_bool(values[0]) + + if group_name == "": + lines = ["{}{} {}".format(name, equals, values[0])] + else: + lines = [" {}{} {}".format(name, equals, values[0])] for value in values[1:]: - if value == "True" or value == "False": - value = value.replace("True",".true.").replace("False",".false.") + if isinstance(value, bool) or value.lower() in ( + "true", + "false", + ): + value = convert_bool(value) + if len(lines[-1]) + len(value) <= 77: lines[-1] += ", " + value else: @@ -1206,59 +1263,80 @@ def _write(self, out_file, groups, format_, sorted_groups): lines[-1] += "\n" for line in lines: out_file.write(line) - if format_ == 'nml': + if group_name != "" and format_ == "nml": out_file.write("/\n") - if format_ == 'nmlcontents': + if format_ == "nmlcontents": out_file.write("\n") - def _write_nuopc(self, out_file, groups, sorted_groups, skip_comps): + def write_nuopc(self, out_file, groups=None, sorted_groups=True): + """Write a nuopc config file out_file + + As with `parse`, the `out_file` argument can be either a file name, or a + file object with a `write` method that accepts unicode. If specified, + the `groups` argument specifies a subset of all groups to write out. + """ + if isinstance(out_file, str): + logger.debug("Writing nuopc config file to: {}".format(out_file)) + flag = "w" + with open(out_file, flag) as file_obj: + self._write_nuopc(file_obj, groups, sorted_groups=sorted_groups) + else: + logger.debug("Writing nuopc config data to file object") + self._write_nuopc(out_file, groups, sorted_groups=sorted_groups) + + def _write_nuopc(self, out_file, groups, sorted_groups): """Unwrapped version of `write` assuming that a file object is input.""" if groups is None: groups = self._groups.keys() - if (sorted_groups): + if sorted_groups: group_names = sorted(group for group in groups) else: group_names = groups for group_name in group_names: - if "_attributes" not in group_name and "nuopc_" not in group_name and "_no_group" not in group_name: + if ( + "_modelio" not in group_name + and "_attributes" not in group_name + and "nuopc_" not in group_name + and "_no_group" not in group_name + ): continue - if "_attributes" in group_name: + if "_attributes" in group_name or "_modelio" in group_name: out_file.write("{}::\n".format(group_name)) + indent = True group = self._groups[group_name] for name in sorted(group.keys()): values = group[name] - if "component_list" in name: - for skip_comp in skip_comps: - if skip_comp in values[0]: - values[0] = values[0].replace(skip_comp,"") - # @ is used in a namelist to put the same namelist variable in multiple groups # in the write phase, all characters in the namelist variable name after # the @ and including the @ should be removed if "@" in name: - name = re.sub('@.+$', "", name) + name = re.sub("@.+$", "", name) equals = " =" if "_var" in group_name: - equals = ':' + equals = ":" # To prettify things for long lists of values, build strings # line-by-line. if values[0] == "True" or values[0] == "False": - values[0] = values[0].replace("True",".true.").replace("False",".false.") + values[0] = ( + values[0].replace("True", ".true.").replace("False", ".false.") + ) - if "_attribute" in group_name: + if indent: lines = [" {}{} {}".format(name, equals, values[0])] else: lines = ["{}{} {}".format(name, equals, values[0])] for value in values[1:]: if value == "True" or value == "False": - value = value.replace("True",".true.").replace("False",".false.") + value = value.replace("True", ".true.").replace( + "False", ".false." + ) if len(lines[-1]) + len(value) <= 77: lines[-1] += ", " + value else: @@ -1266,11 +1344,13 @@ def _write_nuopc(self, out_file, groups, sorted_groups, skip_comps): lines.append(" " + value) lines[-1] += "\n" for line in lines: - line = line.replace('"','') + line = line.replace('"', "") out_file.write(line) - if "_attribute" in group_name: + if indent: out_file.write("::\n\n") + indent = False + class _NamelistEOF(Exception): @@ -1316,7 +1396,7 @@ def __str__(self): return string -class _NamelistParser(object): # pylint:disable=too-few-public-methods +class _NamelistParser(object): # pylint:disable=too-few-public-methods """Class to validate and read from Fortran namelist input. @@ -1336,7 +1416,7 @@ def __init__(self, text, groupless=False): # Dictionary with group names as keys, and dictionaries of variable # name-value pairs as values. (Or a single flat dictionary if # `groupless=True`.) - self._settings = collections.OrderedDict() + self._settings = {} # Fortran allows setting a particular index of an array # such as foo(2)='k' # this dict is set to that value if used. @@ -1359,14 +1439,16 @@ def _curr(self): def _next(self): """Return the character at the next position. - >>> shouldRaise(_NamelistEOF, _NamelistParser(' ')._next) - + >>> _NamelistParser(' ')._next() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. """ # If at the end of the file, we should raise _NamelistEOF. The easiest # way to do this is to just advance. if self._pos == self._len - 1: self._advance() - return self._text[self._pos+1] + return self._text[self._pos + 1] def _advance(self, nchars=1, check_eof=False): r"""Advance the parser's current position by `nchars` characters. @@ -1399,10 +1481,14 @@ def _advance(self, nchars=1, check_eof=False): >>> x._advance(3) >>> (x._pos, x._line, x._col) (7, 3, 1) - >>> shouldRaise(_NamelistEOF, x._advance, 1) - - >>> shouldRaise(_NamelistEOF, _NamelistParser('abc\n')._advance, 4) - + >>> x._advance(1) + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. + >>> _NamelistParser('abc\n')._advance(4) + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> x = _NamelistParser('ab') >>> x._advance(check_eof=True) False @@ -1411,17 +1497,16 @@ def _advance(self, nchars=1, check_eof=False): >>> x._advance(check_eof=True) True """ - assert nchars >= 0, \ - "_NamelistParser attempted to 'advance' backwards" + assert nchars >= 0, "_NamelistParser attempted to 'advance' backwards" new_pos = min(self._pos + nchars, self._len) - consumed_text = self._text[self._pos:new_pos] + consumed_text = self._text[self._pos : new_pos] self._pos = new_pos - lines = consumed_text.count('\n') + lines = consumed_text.count("\n") self._line += lines # If we started a new line, set self._col to be relative to the start of # the current line. if lines > 0: - self._col = -(consumed_text.rfind('\n') + 1) + self._col = -(consumed_text.rfind("\n") + 1) self._col += len(consumed_text) end_of_file = new_pos == self._len if check_eof: @@ -1445,8 +1530,10 @@ def _eat_whitespace(self, allow_initial_comment=False): >>> x._eat_whitespace() False >>> x._advance() - >>> shouldRaise(_NamelistEOF, x._eat_whitespace) - + >>> x._eat_whitespace() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> x = _NamelistParser(' \n! blah\n ! blah\n a') >>> x._eat_whitespace() True @@ -1471,8 +1558,8 @@ def _eat_whitespace(self, allow_initial_comment=False): eaten = False comment_allowed = allow_initial_comment while True: - while self._curr() in (' ', '\n'): - comment_allowed |= self._curr() == '\n' + while self._curr() in (" ", "\n"): + comment_allowed |= self._curr() == "\n" eaten = True self._advance() # Note the reliance on short-circuit `and` here. @@ -1500,15 +1587,19 @@ def _eat_comment(self): >>> x._curr() 'a' >>> x._advance(2) - >>> shouldRaise(_NamelistEOF, x._eat_comment) - + >>> x._eat_comment() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> x = _NamelistParser('! foo\n') - >>> shouldRaise(_NamelistEOF, x._eat_comment) - + >>> x._eat_comment() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. """ - if self._curr() != '!': + if self._curr() != "!": return False - newline_pos = self._text[self._pos:].find('\n') + newline_pos = self._text[self._pos :].find("\n") if newline_pos == -1: # This is the last line. self._advance(self._len - self._pos) @@ -1528,8 +1619,10 @@ def _expect_char(self, chars): >>> x = _NamelistParser('ab') >>> x._expect_char('a') >>> x._advance() - >>> shouldRaise(_NamelistParseError, x._expect_char, 'a') - + >>> x._expect_char('a') + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected 'a' but found 'b' >>> x._expect_char('ab') """ if self._curr() not in chars: @@ -1537,25 +1630,37 @@ def _expect_char(self, chars): char_description = repr(str(chars)) else: char_description = "one of the characters in {!r}".format(str(chars)) - raise _NamelistParseError("expected {} but found {!r}".format(char_description, str(self._curr()))) + raise _NamelistParseError( + "expected {} but found {!r}".format(char_description, str(self._curr())) + ) def _parse_namelist_group_name(self): r"""Parses and returns a namelist group name at the current position. - >>> shouldRaise(_NamelistParseError, _NamelistParser('abc')._parse_namelist_group_name) - - >>> shouldRaise(_NamelistEOF, _NamelistParser('&abc')._parse_namelist_group_name) - + >>> _NamelistParser('abc')._parse_namelist_group_name() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected '&' but found 'a' + >>> _NamelistParser('&abc')._parse_namelist_group_name() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser('&abc ')._parse_namelist_group_name() 'abc' >>> _NamelistParser('&abc\n')._parse_namelist_group_name() 'abc' - >>> shouldRaise(_NamelistParseError, _NamelistParser('&abc/ ')._parse_namelist_group_name) - - >>> shouldRaise(_NamelistParseError, _NamelistParser('&abc= ')._parse_namelist_group_name) - - >>> shouldRaise(_NamelistParseError, _NamelistParser('& ')._parse_namelist_group_name) - + >>> _NamelistParser('&abc/ ')._parse_namelist_group_name() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: 'abc/' is not a valid variable name + >>> _NamelistParser('&abc= ')._parse_namelist_group_name() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: 'abc=' is not a valid variable name + >>> _NamelistParser('& ')._parse_namelist_group_name() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: '' is not a valid variable name """ self._expect_char("&") self._advance() @@ -1568,8 +1673,10 @@ def _parse_variable_name(self, allow_equals=True): variable name; if it is `False`, only white space can be used for this purpose. - >>> shouldRaise(_NamelistEOF, _NamelistParser('abc')._parse_variable_name) - + >>> _NamelistParser('abc')._parse_variable_name() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser('foo(2)= ')._parse_variable_name() 'foo(2)' >>> _NamelistParser('abc ')._parse_variable_name() @@ -1605,28 +1712,30 @@ def _parse_variable_name(self, allow_equals=True): 'foo' """ old_pos = self._pos - separators = (' ', '\n', '=', '+') if allow_equals else (' ', '\n') + separators = (" ", "\n", "=", "+") if allow_equals else (" ", "\n") while self._curr() not in separators: self._advance() - text = self._text[old_pos:self._pos] - if '(' in text: - expect(')' in text,"Parsing error ") - elif ')' in text: - expect(False,"Parsing error ") + text = self._text[old_pos : self._pos] + if "(" in text: + expect(")" in text, "Parsing error ") + elif ")" in text: + expect(False, "Parsing error ") # @ is used in a namelist to put the same namelist variable in multiple groups # in the write phase, all characters in the namelist variable name after # the @ and including the @ should be removed if "%" in text: - text_check = re.sub('%.+$', "", text) + text_check = re.sub("%.+$", "", text) elif "@" in text: - text_check = re.sub('@.+$', "", text) + text_check = re.sub("@.+$", "", text) else: text_check = text if not is_valid_fortran_name(text_check): if re.search(r".*\(.*\,.*\)", text_check): - err_str = "Multiple dimensions not supported in CIME namelist variables {!r}".format(str(text)) + err_str = "Multiple dimensions not supported in CIME namelist variables {!r}".format( + str(text) + ) else: err_str = "{!r} is not a valid variable name".format(str(text)) raise _NamelistParseError(err_str) @@ -1638,14 +1747,18 @@ def _parse_character_literal(self): Position on return is the last character of the string; we avoid advancing past that in order to avoid potential EOF errors. - >>> shouldRaise(_NamelistEOF, _NamelistParser('"abc')._parse_character_literal) - + >>> _NamelistParser('"abc')._parse_character_literal() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser('"abc" ')._parse_character_literal() '"abc"' >>> _NamelistParser("'abc' ")._parse_character_literal() "'abc'" - >>> shouldRaise(_NamelistParseError, _NamelistParser("*abc* ")._parse_character_literal) - + >>> _NamelistParser("*abc* ")._parse_character_literal() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: *abc* is not a valid character literal >>> _NamelistParser("'abc''def' ")._parse_character_literal() "'abc''def'" >>> _NamelistParser("'abc''' ")._parse_character_literal() @@ -1667,9 +1780,11 @@ def _parse_character_literal(self): self._advance(2) else: break - text = self._text[old_pos:self._pos+1] + text = self._text[old_pos : self._pos + 1] if not is_valid_fortran_namelist_literal("character", text): - raise _NamelistParseError("{} is not a valid character literal".format(text)) + raise _NamelistParseError( + "{} is not a valid character literal".format(text) + ) return text def _parse_complex_literal(self): @@ -1678,19 +1793,25 @@ def _parse_complex_literal(self): Position on return is the last character of the string; we avoid advancing past that in order to avoid potential EOF errors. - >>> shouldRaise(_NamelistEOF, _NamelistParser('(1.,2.')._parse_complex_literal) - + >>> _NamelistParser('(1.,2.')._parse_complex_literal() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser('(1.,2.) ')._parse_complex_literal() '(1.,2.)' - >>> shouldRaise(_NamelistParseError, _NamelistParser("(A,B) ")._parse_complex_literal) - + >>> _NamelistParser("(A,B) ")._parse_complex_literal() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: '(A,B)' is not a valid complex literal """ old_pos = self._pos - while self._curr() != ')': + while self._curr() != ")": self._advance() - text = self._text[old_pos:self._pos+1] + text = self._text[old_pos : self._pos + 1] if not is_valid_fortran_namelist_literal("complex", text): - raise _NamelistParseError("{!r} is not a valid complex literal".format(str(text))) + raise _NamelistParseError( + "{!r} is not a valid complex literal".format(str(text)) + ) return text def _look_ahead_for_equals(self, pos): @@ -1709,8 +1830,8 @@ def _look_ahead_for_equals(self, pos): False """ for test_pos in range(pos, self._len): - if self._text[test_pos] not in (' ', '\n'): - if self._text[test_pos] == '=': + if self._text[test_pos] not in (" ", "\n"): + if self._text[test_pos] == "=": return True else: break @@ -1732,8 +1853,8 @@ def _look_ahead_for_plusequals(self, pos): False """ for test_pos in range(pos, self._len): - if self._text[test_pos] not in (' ', '\n'): - if self._text[test_pos] == '+': + if self._text[test_pos] not in (" ", "\n"): + if self._text[test_pos] == "+": return self._look_ahead_for_equals(test_pos + 1) else: break @@ -1762,14 +1883,18 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): '"abc"' >>> _NamelistParser("'abc' ")._parse_literal() "'abc'" - >>> shouldRaise(_NamelistEOF, _NamelistParser('"abc"')._parse_literal) - + >>> _NamelistParser('"abc"')._parse_literal() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser('"abc"')._parse_literal(allow_eof_end=True) '"abc"' >>> _NamelistParser('(1.,2.) ')._parse_literal() '(1.,2.)' - >>> shouldRaise(_NamelistEOF, _NamelistParser('(1.,2.)')._parse_literal) - + >>> _NamelistParser('(1.,2.)')._parse_literal() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser('(1.,2.)')._parse_literal(allow_eof_end=True) '(1.,2.)' >>> _NamelistParser('5 ')._parse_literal() @@ -1782,8 +1907,10 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): 'nan(booga)' >>> _NamelistParser('.FLORIDA$ ')._parse_literal() '.FLORIDA$' - >>> shouldRaise(_NamelistParseError, _NamelistParser('hamburger ')._parse_literal) - + >>> _NamelistParser('hamburger ')._parse_literal() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected literal value, but got 'hamburger' >>> _NamelistParser('5,')._parse_literal() '5' >>> _NamelistParser('5\n')._parse_literal() @@ -1798,14 +1925,20 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): '6*(1., 2.)' >>> _NamelistParser('6*"a" ')._parse_literal() '6*"a"' - >>> shouldRaise(_NamelistEOF, _NamelistParser('6*')._parse_literal) - + >>> _NamelistParser('6*')._parse_literal() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser('6*')._parse_literal(allow_eof_end=True) '6*' - >>> shouldRaise(_NamelistParseError, _NamelistParser('foo= ')._parse_literal) - - >>> shouldRaise(_NamelistParseError, _NamelistParser('foo+= ')._parse_literal) - + >>> _NamelistParser('foo= ')._parse_literal() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected literal value, but got 'foo=' + >>> _NamelistParser('foo+= ')._parse_literal() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected literal value, but got 'foo+=' >>> _NamelistParser('5,')._parse_literal(allow_name=True) '5' >>> x = _NamelistParser('foo= ') @@ -1816,10 +1949,14 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): >>> x._parse_literal(allow_name=True) >>> x._curr() 'f' - >>> shouldRaise(_NamelistParseError, _NamelistParser('6*foo= ')._parse_literal, allow_name=True) - - >>> shouldRaise(_NamelistParseError, _NamelistParser('6*foo+= ')._parse_literal, allow_name=True) - + >>> _NamelistParser('6*foo= ')._parse_literal(allow_name=True) + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected literal value, but got '6*foo=' + >>> _NamelistParser('6*foo+= ')._parse_literal(allow_name=True) + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected literal value, but got '6*foo+=' >>> x = _NamelistParser('foo = ') >>> x._parse_literal(allow_name=True) >>> x._curr() @@ -1833,38 +1970,38 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): """ # Deal with empty input string. if allow_eof_end and self._pos == self._len: - return '' + return "" # Deal with a repeated value prefix. old_pos = self._pos - if FORTRAN_REPEAT_PREFIX_REGEX.search(self._text[self._pos:]): + if FORTRAN_REPEAT_PREFIX_REGEX.search(self._text[self._pos :]): allow_name = False - while self._curr() != '*': + while self._curr() != "*": self._advance() if self._advance(check_eof=allow_eof_end): # In case the file ends with the 'r*' form of null value. return self._text[old_pos:] - prefix = self._text[old_pos:self._pos] + prefix = self._text[old_pos : self._pos] # Deal with delimited literals. if self._curr() in ('"', "'"): literal = self._parse_character_literal() self._advance(check_eof=allow_eof_end) return prefix + literal - if self._curr() == '(': + if self._curr() == "(": literal = self._parse_complex_literal() self._advance(check_eof=allow_eof_end) return prefix + literal # Deal with non-delimited literals. new_pos = self._pos - separators = [' ', '\n', ',', '/'] + separators = [" ", "\n", ",", "/"] if allow_name: - separators.append('=') - separators.append('+') + separators.append("=") + separators.append("+") while new_pos != self._len and self._text[new_pos] not in separators: # allow commas if they are inside () - if self._text[new_pos] == '(': - separators.remove(',') - elif self._text[new_pos] == ')': - separators.append(',') + if self._text[new_pos] == "(": + separators.remove(",") + elif self._text[new_pos] == ")": + separators.append(",") new_pos += 1 if not allow_eof_end and new_pos == self._len: @@ -1878,10 +2015,14 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): return self._advance(new_pos - self._pos, check_eof=allow_eof_end) - text = self._text[old_pos:self._pos] - if not any(is_valid_fortran_namelist_literal(type_, text) - for type_ in ("integer", "logical", "real")): - raise _NamelistParseError("expected literal value, but got {!r}".format(str(text))) + text = self._text[old_pos : self._pos] + if not any( + is_valid_fortran_namelist_literal(type_, text) + for type_ in ("integer", "logical", "real") + ): + raise _NamelistParseError( + "expected literal value, but got {!r}".format(str(text)) + ) return text def _expect_separator(self, allow_eof=False): @@ -1921,8 +2062,10 @@ def _expect_separator(self, allow_eof=False): >>> x._curr() '/' >>> x = _NamelistParser("a") - >>> shouldRaise(_NamelistParseError, x._expect_separator) - + >>> x._expect_separator() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected one of the characters in ' \n,/' but found 'a' >>> x = _NamelistParser(" , a") >>> x._expect_separator() True @@ -1952,18 +2095,20 @@ def _expect_separator(self, allow_eof=False): >>> x._expect_separator(allow_eof=True) True >>> x = _NamelistParser(" / ") - >>> shouldRaise(_NamelistParseError, x._expect_separator, allow_eof=True) - + >>> x._expect_separator(allow_eof=True) + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: found group-terminating '/' in file without group names """ errstring = "found group-terminating '/' in file without group names" # Deal with the possibility that we are already at EOF. if allow_eof and self._pos == self._len: return False # Must actually be at a value separator. - self._expect_char(' \n,/') + self._expect_char(" \n,/") try: self._eat_whitespace() - if self._curr() == '/': + if self._curr() == "/": if allow_eof: raise _NamelistParseError(errstring) else: @@ -1974,7 +2119,7 @@ def _expect_separator(self, allow_eof=False): else: raise try: - if self._curr() == ',': + if self._curr() == ",": self._advance() self._eat_whitespace(allow_initial_comment=True) except _NamelistEOF: @@ -2002,8 +2147,10 @@ def _parse_name_and_values(self, allow_eof_end=False): ('foo', ["'bar'"], False) >>> _NamelistParser("foo=\n'bar' /")._parse_name_and_values() ('foo', ["'bar'"], False) - >>> shouldRaise(_NamelistParseError, _NamelistParser("foo 'bar' /")._parse_name_and_values) - + >>> _NamelistParser("foo 'bar' /")._parse_name_and_values() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected '=' but found "'" >>> _NamelistParser("foo='bar','bazz' /")._parse_name_and_values() ('foo', ["'bar'", "'bazz'"], False) >>> _NamelistParser("foo=,,'bazz',6*/")._parse_name_and_values() @@ -2012,14 +2159,18 @@ def _parse_name_and_values(self, allow_eof_end=False): ('foo', ["'bar'", "'bazz'"], False) >>> _NamelistParser("foo='bar' 'bazz' foo2(2)='ban'")._parse_name_and_values() ('foo', ["'bar'", "'bazz'"], False) - >>> shouldRaise(_NamelistParseError, _NamelistParser("foo= foo2='ban' ")._parse_name_and_values) - + >>> _NamelistParser("foo= foo2='ban' ")._parse_name_and_values() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: expected literal value, but got "foo2='ban'" >>> _NamelistParser("foo=,,'bazz',6* ")._parse_name_and_values(allow_eof_end=True) ('foo', ['', '', "'bazz'", '6*'], False) >>> _NamelistParser("foo(3)='bazz'")._parse_name_and_values(allow_eof_end=True) ('foo(3)', ["'bazz'"], False) - >>> shouldRaise(_NamelistEOF, _NamelistParser("foo=")._parse_name_and_values) - + >>> _NamelistParser("foo=")._parse_name_and_values() + Traceback (most recent call last): + ... + namelist._NamelistEOF: Unexpected end of file encountered in namelist. >>> _NamelistParser("foo=")._parse_name_and_values(allow_eof_end=True) ('foo', [''], False) >>> _NamelistParser("foo= ")._parse_name_and_values(allow_eof_end=True) @@ -2042,9 +2193,9 @@ def _parse_name_and_values(self, allow_eof_end=False): self._eat_whitespace() # check to see if we have a "+=" - if self._curr() == '+': + if self._curr() == "+": self._advance() - addto=True # tell parser that we want to add to dictionary values + addto = True # tell parser that we want to add to dictionary values self._expect_char("=") try: self._advance() @@ -2052,7 +2203,7 @@ def _parse_name_and_values(self, allow_eof_end=False): except _NamelistEOF: # If we hit the end of file, return a name assigned to a null value. if allow_eof_end: - return name, [''], addto + return name, [""], addto else: raise # Expect at least one literal, even if it's a null value. @@ -2060,15 +2211,16 @@ def _parse_name_and_values(self, allow_eof_end=False): # While we haven't reached the end of the namelist group... while self._expect_separator(allow_eof=allow_eof_end): # see if we can parse a literal (we might get a variable name)... - literal = self._parse_literal(allow_name=True, - allow_eof_end=allow_eof_end) + literal = self._parse_literal(allow_name=True, allow_eof_end=allow_eof_end) if literal is None: break # and if it really is a literal, add it. values.append(literal) - (minindex, maxindex, step) = get_fortran_variable_indices(name,allow_any_len=True) + (minindex, maxindex, step) = get_fortran_variable_indices( + name, allow_any_len=True + ) if (minindex > 1 or maxindex > minindex or step > 1) and maxindex > 0: - arraylen =max(0,1 + ((maxindex - minindex)/step)) + arraylen = max(0, 1 + ((maxindex - minindex) / step)) expect(len(values) <= arraylen, "Too many values for array {}".format(name)) return name, values, addto @@ -2082,61 +2234,65 @@ def _parse_namelist_group(self): >>> x = _NamelistParser("&group /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('group', {})]) + {'group': {}} >>> x._curr() '/' >>> x = _NamelistParser("&group\n foo='bar','bazz'\n,, foo2=2*5\n /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('group', {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']})]) + {'group': {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']}} >>> x = _NamelistParser("&group\n foo='bar','bazz'\n,, foo2=2*5\n /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['5', '5'])]) + {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']} >>> x._curr() '/' >>> x = _NamelistParser("&group /&group /") >>> x._parse_namelist_group() >>> x._advance() - >>> shouldRaise(_NamelistParseError, x._parse_namelist_group) - + >>> x._parse_namelist_group() + Traceback (most recent call last): + ... + namelist._NamelistParseError: Error in parsing namelist: Namelist group 'group' encountered twice. >>> x = _NamelistParser("&group foo='bar', foo='bazz' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('group', {'foo': ["'bazz'"]})]) + {'group': {'foo': ["'bazz'"]}} >>> x = _NamelistParser("&group foo='bar', foo= /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('group', {'foo': ["'bar'"]})]) + {'group': {'foo': ["'bar'"]}} >>> x = _NamelistParser("&group foo='bar', foo= /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('foo', ["'bar'"])]) + {'foo': ["'bar'"]} >>> x = _NamelistParser("&group foo='bar', foo+='baz' /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('foo', ["'bar'", "'baz'"])]) + {'foo': ["'bar'", "'baz'"]} >>> x = _NamelistParser("&group foo+='bar' /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('foo', ["'bar'"])]) + {'foo': ["'bar'"]} >>> x = _NamelistParser("&group foo='bar', foo+='baz' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('group', {'foo': ["'bar'", "'baz'"]})]) + {'group': {'foo': ["'bar'", "'baz'"]}} >>> x = _NamelistParser("&group foo+='bar' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([('group', {'foo': ["'bar'"]})]) + {'group': {'foo': ["'bar'"]}} """ group_name = self._parse_namelist_group_name() if not self._groupless: # Make sure that this is the first time we've seen this group. if group_name in self._settings: - raise _NamelistParseError("Namelist group {!r} encountered twice.".format(str(group_name))) + raise _NamelistParseError( + "Namelist group {!r} encountered twice.".format(str(group_name)) + ) self._settings[group_name] = {} self._eat_whitespace() - while self._curr() != '/': + while self._curr() != "/": name, values, addto = self._parse_name_and_values() dsettings = [] if self._groupless: @@ -2164,41 +2320,41 @@ def parse_namelist(self): first by namelist group name, then by variable name. >>> _NamelistParser("").parse_namelist() - OrderedDict() + {} >>> _NamelistParser(" \n!Comment").parse_namelist() - OrderedDict() + {} >>> _NamelistParser(" &group /").parse_namelist() - OrderedDict([('group', {})]) + {'group': {}} >>> _NamelistParser("! Comment \n &group /! Comment\n ").parse_namelist() - OrderedDict([('group', {})]) + {'group': {}} >>> _NamelistParser("! Comment \n &group /! Comment ").parse_namelist() - OrderedDict([('group', {})]) + {'group': {}} >>> _NamelistParser("&group1\n foo='bar','bazz'\n,, foo2=2*5\n / &group2 /").parse_namelist() - OrderedDict([('group1', {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']}), ('group2', {})]) + {'group1': {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']}, 'group2': {}} >>> _NamelistParser("!blah \n foo='bar','bazz'\n,, foo2=2*5\n ", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['2*5'])]) + {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['2*5']} >>> _NamelistParser("!blah \n foo='bar','bazz'\n,, foo2=2*5,6\n ", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['2*5', '6'])]) + {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['2*5', '6']} >>> _NamelistParser("!blah \n foo='bar'", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bar'"])]) + {'foo': ["'bar'"]} >>> _NamelistParser("foo='bar', foo(3)='bazz'", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bar'"]), ('foo(3)', ["'bazz'"])]) + {'foo': ["'bar'"], 'foo(3)': ["'bazz'"]} >>> _NamelistParser("foo(2)='bar'", groupless=True).parse_namelist() - OrderedDict([('foo(2)', ["'bar'"])]) + {'foo(2)': ["'bar'"]} >>> _NamelistParser("foo(2)='bar', foo(3)='bazz'", groupless=True).parse_namelist() - OrderedDict([('foo(2)', ["'bar'"]), ('foo(3)', ["'bazz'"])]) + {'foo(2)': ["'bar'"], 'foo(3)': ["'bazz'"]} >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bazz'"])]) + {'foo': ["'bazz'"]} >>> _NamelistParser("foo='bar'\n foo+='bazz'", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bar'", "'bazz'"])]) + {'foo': ["'bar'", "'bazz'"]} >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bazz'"])]) + {'foo': ["'bazz'"]} >>> _NamelistParser("foo='bar', foo=", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bar'"])]) + {'foo': ["'bar'"]} >>> _NamelistParser("foo='bar', 'bazz'\n foo+='ban'", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bar'", "'bazz'", "'ban'"])]) + {'foo': ["'bar'", "'bazz'", "'ban'"]} >>> _NamelistParser("foo+='bar'", groupless=True).parse_namelist() - OrderedDict([('foo', ["'bar'"])]) + {'foo': ["'bar'"]} """ # Return empty dictionary for empty files. if self._len == 0: @@ -2210,7 +2366,7 @@ def parse_namelist(self): except _NamelistEOF: return self._settings # Handle case with no namelist groups. - if self._groupless and self._curr() != '&': + if self._groupless and self._curr() != "&": while self._pos < self._len: name, values, addto = self._parse_name_and_values(allow_eof_end=True) if name in self._settings: diff --git a/CIME/nmlgen.py b/CIME/nmlgen.py new file mode 100644 index 00000000000..7cad37dd093 --- /dev/null +++ b/CIME/nmlgen.py @@ -0,0 +1,936 @@ +"""Class for generating component namelists.""" + +# Typically ignore this. +# pylint: disable=invalid-name + +# Disable these because this is our standard setup +# pylint: disable=wildcard-import,unused-wildcard-import + +import datetime +import re +import hashlib + +from CIME.XML.standard_module_setup import * +from CIME.namelist import ( + Namelist, + parse, + character_literal_to_string, + string_to_character_literal, + expand_literal_list, + compress_literal_list, + merge_literal_lists, +) +from CIME.XML.namelist_definition import NamelistDefinition +from CIME.utils import expect, safe_copy +from CIME.XML.stream import Stream +from CIME.XML.grids import GRID_SEP + +logger = logging.getLogger(__name__) + +_var_ref_re = re.compile(r"\$(\{)?(?P\w+)(?(1)\})") + +_ymd_re = re.compile(r"%(?P[1-9][0-9]*)?y(?Pm(?Pd)?)?") + +_stream_mct_file_template = """ + + + GENERIC + + + + {domain_varnames} + + + {domain_filepath} + + + {domain_filenames} + + + + + {data_varnames} + + + {data_filepath} + + + {data_filenames} + + + {offset} + + + +""" + + +class NamelistGenerator(object): + + """Utility class for generating namelists for a given component.""" + + _streams_variables = [] + + # pylint:disable=too-many-arguments + def __init__(self, case, definition_files, files=None): + """Construct a namelist generator. + + Arguments: + `case` - `Case` object corresponding to the current case. + `infiles` - List of files with user namelist options. + `definition_files` - List of XML files containing namelist definitions. + `config` - A dictionary of attributes for matching defaults. + """ + # Save off important information from inputs. + self._case = case + self._din_loc_root = case.get_value("DIN_LOC_ROOT") + + # Create definition object - this will validate the xml schema in the definition file + self._definition = NamelistDefinition(definition_files[0], files=files) + + # Determine array of _stream_variables from definition object + # This is only applicable to data models + self._streams_namelists = {"streams": []} + self._streams_variables = self._definition.get_per_stream_entries() + for variable in self._streams_variables: + self._streams_namelists[variable] = [] + + # Create namelist object. + self._namelist = Namelist() + + # entries for which we should potentially call add_default (variables that do not + # set skip_default_entry) + self._default_nodes = [] + + # Define __enter__ and __exit__ so that we can use this as a context manager + def __enter__(self): + return self + + def __exit__(self, *_): + return False + + def init_defaults( + self, + infiles, + config, + skip_groups=None, + skip_entry_loop=False, + skip_default_for_groups=None, + set_group_name=None, + ): + """Return array of names of all definition nodes + + infiles should be a list of file paths, each one giving namelist settings that + take precedence over the default values. Often there will be only one file in this + list. If there are multiple files, earlier files take precedence over later files. + + If skip_default_for_groups is provided, it should be a list of namelist group + names; the add_default call will not be done for any variables in these + groups. This is often paired with later conditional calls to + add_defaults_for_group. + + """ + if skip_default_for_groups is None: + skip_default_for_groups = [] + + # first clean out any settings left over from previous calls + self.new_instance() + + # Determine the array of entry nodes that will be acted upon + self._default_nodes = self._definition.set_nodes(skip_groups=skip_groups) + + # Add attributes to definition object + self._definition.add_attributes(config) + + # Parse the infile and create namelist settings for the contents of infile + # this will override all other settings in add_defaults + for file_ in infiles: + # Parse settings in "groupless" mode. + nml_dict = parse(in_file=file_, groupless=True) + + # Add groups using the namelist definition. + new_namelist = self._definition.dict_to_namelist(nml_dict, filename=file_) + + # Make sure that the input is actually valid. + self._definition.validate(new_namelist, filename=file_) + + # Merge into existing settings (earlier settings have precedence + # over later settings). + self._namelist.merge_nl(new_namelist) + + if not skip_entry_loop: + for entry in self._default_nodes: + if set_group_name: + group_name = set_group_name + else: + group_name = self._definition.get_group_name(entry) + if not group_name in skip_default_for_groups: + self.add_default(self._definition.get(entry, "id")) + + return [self._definition.get(entry, "id") for entry in self._default_nodes] + + def rename_group(self, group, newgroup): + """Pass through to namelist definition""" + return self._definition.rename_group(group, newgroup) + + def add_defaults_for_group(self, group): + """Call add_default for namelist variables in the given group + + This still skips variables that have attributes of skip_default_entry or + per_stream_entry. + + This must be called after init_defaults. It is often paired with use of + skip_default_for_groups in the init_defaults call. + """ + for entry in self._default_nodes: + group_name = self._definition.get_group_name(entry) + if group_name == group: + self.add_default(self._definition.get(entry, "id")) + + def confirm_group_is_empty(self, group_name, errmsg): + """Confirms that no values have been added to the given group + + If any values HAVE been added to this group, aborts with the given error message. + + This is often paired with use of skip_default_for_groups in the init_defaults call + and add_defaults_for_group, as in: + + if nmlgen.get_value("enable_frac_overrides") == ".true.": + nmlgen.add_defaults_for_group("glc_override_nml") + else: + nmlgen.confirm_empty("glc_override_nml", "some message") + + Args: + group_name: string - name of namelist group + errmsg: string - error message to print if group is not empty + """ + variables_in_group = self._namelist.get_variable_names(group_name) + fullmsg = "{}\nOffending variables: {}".format(errmsg, variables_in_group) + expect(len(variables_in_group) == 0, fullmsg) + + @staticmethod + def quote_string(string): + """Convert a string to a quoted Fortran literal. + + Does nothing if the string appears to be quoted already. + """ + if string == "" or (string[0] not in ('"', "'") or string[0] != string[-1]): + string = string_to_character_literal(string) + return string + + def _to_python_value(self, name, literals): + """Transform a literal list as needed for `get_value`.""" + ( + var_type, + _, + var_size, + ) = self._definition.split_type_string(name) + if len(literals) > 0 and literals[0] is not None: + values = expand_literal_list(literals) + else: + return "" + + for i, scalar in enumerate(values): + if scalar == "": + values[i] = None + elif var_type == "character": + values[i] = character_literal_to_string(scalar) + + if var_size == 1: + return values[0] + else: + return values + + def _to_namelist_literals(self, name, values): + """Transform a literal list as needed for `set_value`. + + This is the inverse of `_to_python_value`, except that many of the + changes have potentially already been performed. + """ + ( + var_type, + _, + var_size, + ) = self._definition.split_type_string(name) + if var_size == 1 and not isinstance(values, list): + values = [values] + + for i, scalar in enumerate(values): + if scalar is None: + values[i] = "" + elif var_type == "character": + expect(not isinstance(scalar, list), name) + values[i] = self.quote_string(scalar) + + return compress_literal_list(values) + + def get_value(self, name): + """Get the current value of a given namelist variable. + + Note that the return value of this function is always a string or a list + of strings. E.g. the scalar logical value .false. will be returned as + `".false."`, while an array of two .false. values will be returned as + `[".false.", ".false."]`. Whether or not a value is scalar is determined + by checking the array size in the namelist definition file. + + Null values are converted to `None`, and repeated values are expanded, + e.g. `['2*3']` is converted to `['3', '3', '3']`. + + For character variables, the value is converted to a Python string (e.g. + quotation marks are removed). + + All other literals are returned as the raw string values that will be + written to the namelist. + """ + return self._to_python_value(name, self._namelist.get_value(name)) + + def set_value(self, name, value): + """Set the current value of a given namelist variable. + + Usually, you should use `add_default` instead of this function. + + The `name` argument is the name of the variable to set, and the `value` + is a list of strings to use as settings. If the variable is scalar, the + list is optional; i.e. a scalar logical can be set using either + `value='.false.'` or `value=['.false.']`. If the variable is of type + character, and the input is missing quotes, quotes will be added + automatically. If `None` is provided in place of a string, this will be + translated to a null value. + + Note that this function will overwrite the current value, which may hold + a user-specified setting. Even if `value` is (or contains) a null value, + the old setting for the variable will be thrown out completely. + """ + var_group = self._definition.get_group(name) + literals = self._to_namelist_literals(name, value) + ( + _, + _, + var_size, + ) = self._definition.split_type_string(name) + if len(literals) > 0 and literals[0] is not None: + self._namelist.set_variable_value(var_group, name, literals, var_size) + + def get_default(self, name, config=None, allow_none=False): + """Get the value of a variable from the namelist definition file. + + The `config` argument is passed through to the underlying + `NamelistDefaults.get_value` call as the `attribute` argument. + + The return value of this function is a list of values that were found in + the defaults file. If there is no matching default, this function + returns `None` if `allow_none=True` is passed, otherwise an error is + raised. + + Note that we perform some translation of the values, since there are a + few differences between Fortran namelist literals and values in the + defaults file: + 1) In the defaults file, whitespace is ignored except within strings, so + the output of this function strips out most whitespace. (This implies + that commas are the only way to separate array elements in the + defaults file.) + 2) In the defaults file, quotes around character literals (strings) are + optional, as long as the literal does not contain whitespace, commas, + or (single or double) quotes. If a setting for a character variable + does not seem to have quotes (and is not a null value), this function + will add them. + 3) Default values may refer to variables in a case's `env_*.xml` files. + This function replaces references of the form `$VAR` or `${VAR}` with + the value of the variable `VAR` in an env file, if that variable + exists. This behavior is suppressed within single-quoted strings + (similar to parameter expansion in shell scripts). + """ + default = self._definition.get_value_match( + name, attributes=config, exact_match=False + ) + if default is None: + expect(allow_none, "No default value found for {}.".format(name)) + return None + default = expand_literal_list(default) + + var_type, _, _ = self._definition.split_type_string(name) + + for i, scalar in enumerate(default): + # Skip single-quoted strings. + if ( + var_type == "character" + and scalar != "" + and scalar[0] == scalar[-1] == "'" + ): + continue + match = _var_ref_re.search(scalar) + while match: + env_val = self._case.get_value(match.group("name")) + if env_val is not None: + scalar = scalar.replace(match.group(0), str(env_val), 1) + match = _var_ref_re.search(scalar) + else: + scalar = None + logger.warning( + "Namelist default for variable {} refers to unknown XML variable {}.".format( + name, match.group("name") + ) + ) + match = None + default[i] = scalar + + # Deal with missing quotes. + + if var_type == "character": + for i, scalar in enumerate(default): + # Preserve null values. + if scalar != "": + default[i] = self.quote_string(scalar) + + default = self._to_python_value(name, default) + + return default + + def get_streams(self): + """Get a list of all streams used for the current data model mode.""" + return self.get_default("streamslist") + + def clean_streams(self): + for variable in self._streams_variables: + self._streams_namelists[variable] = [] + self._streams_namelists["streams"] = [] + + def new_instance(self): + """Clean the object just enough to introduce a new instance""" + self.clean_streams() + self._namelist.clean_groups() + + def _sub_fields(self, varnames): + """Substitute indicators with given values in a list of fields. + + Replace any instance of the following substring indicators with the + appropriate values: + %glc = two-digit GLC elevation class from 00 through glc_nec + + The difference between this function and `_sub_paths` is that this + function is intended to be used for variable names (especially from the + `strm_datvar` defaults), whereas `_sub_paths` is intended for use on + input data file paths. + + Returns a string. + + Example: If `_sub_fields` is called with an array containing two + elements, each of which contains two strings, and glc_nec=3: + foo bar + s2x_Ss_tsrf%glc tsrf%glc + then the returned array will be: + foo bar + s2x_Ss_tsrf00 tsrf00 + s2x_Ss_tsrf01 tsrf01 + s2x_Ss_tsrf02 tsrf02 + s2x_Ss_tsrf03 tsrf03 + """ + lines = varnames.split("\n") + new_lines = [] + for line in lines: + if not line: + continue + if "%glc" in line: + if self._case.get_value("GLC_NEC") == 0: + glc_nec_indices = [] + else: + glc_nec_indices = range(self._case.get_value("GLC_NEC") + 1) + for i in glc_nec_indices: + new_lines.append(line.replace("%glc", "{:02d}".format(i))) + else: + new_lines.append(line) + return "\n".join(new_lines) + + @staticmethod + def _days_in_month(month, year=1): + """Number of days in the given month (specified as an int, 1-12). + + The `year` argument gives the year for which to request the number of + days, in a Gregorian calendar. Defaults to `1` (not a leap year). + """ + month_start = datetime.date(year, month, 1) + if month == 12: + next_year = year + 1 + next_month = 1 + else: + next_year = year + next_month = month + 1 + next_month_start = datetime.date(next_year, next_month, 1) + return (next_month_start - month_start).days + + def _sub_paths(self, filenames, year_start, year_end): + """Substitute indicators with given values in a list of filenames. + + Replace any instance of the following substring indicators with the + appropriate values: + %y = year from the range year_start to year_end + %ym = year-month from the range year_start to year_end with all 12 + months + %ymd = year-month-day from the range year_start to year_end with + all 12 months + + For the date indicators, the year may be prefixed with a number of + digits to use (the default is 4). E.g. `%2ymd` can be used to change the + number of year digits from 4 to 2. + + Note that we assume that there is no mixing and matching of date + indicators, i.e. you cannot use `%4ymd` and `%2y` in the same line. Note + also that we use a no-leap calendar, i.e. every month has the same + number of days every year. + + The difference between this function and `_sub_fields` is that this + function is intended to be used for file names (especially from the + `strm_datfil` defaults), whereas `_sub_fields` is intended for use on + variable names. + + Returns a string (filenames separated by newlines). + """ + lines = [line for line in filenames.split("\n") if line] + new_lines = [] + for line in lines: + match = _ymd_re.search(filenames) + if match is None: + new_lines.append(line) + continue + if match.group("digits"): + year_format = "{:0" + match.group("digits") + "d}" + else: + year_format = "{:04d}" + for year in range(year_start, year_end + 1): + if match.group("day"): + for month in range(1, 13): + days = self._days_in_month(month) + for day in range(1, days + 1): + date_string = (year_format + "-{:02d}-{:02d}").format( + year, month, day + ) + new_line = line.replace(match.group(0), date_string) + new_lines.append(new_line) + elif match.group("month"): + for month in range(1, 13): + date_string = (year_format + "-{:02d}").format(year, month) + new_line = line.replace(match.group(0), date_string) + new_lines.append(new_line) + else: + date_string = year_format.format(year) + new_line = line.replace(match.group(0), date_string) + new_lines.append(new_line) + return "\n".join(new_lines) + + @staticmethod + def _add_xml_delimiter(list_to_deliminate, delimiter): + expect(delimiter and not " " in delimiter, "Missing or badly formed delimiter") + pred = "<{}>".format(delimiter) + postd = "".format(delimiter) + for n, _ in enumerate(list_to_deliminate): + list_to_deliminate[n] = pred + list_to_deliminate[n].strip() + postd + return "\n ".join(list_to_deliminate) + + def create_stream_file_and_update_shr_strdata_nml( + self, + config, + caseroot, # pylint:disable=too-many-locals + stream, + stream_path, + data_list_path, + ): + """Write the pseudo-XML file corresponding to a given stream. + + Arguments: + `config` - Used to look up namelist defaults. This is used *in addition* + to the `config` used to construct the namelist generator. The + main reason to supply additional configuration options here + is to specify stream-specific settings. + `stream` - Name of the stream. + `stream_path` - Path to write the stream file to. + `data_list_path` - Path of file to append input data information to. + """ + + if os.path.exists(stream_path): + os.unlink(stream_path) + user_stream_path = os.path.join( + caseroot, "user_" + os.path.basename(stream_path) + ) + + # Use the user's stream file, or create one if necessary. + config = config.copy() + config["stream"] = stream + + # Stream-specific configuration. + if os.path.exists(user_stream_path): + safe_copy(user_stream_path, stream_path) + strmobj = Stream(infile=stream_path) + domain_filepath = strmobj.get_value("domainInfo/filePath") + data_filepath = strmobj.get_value("fieldInfo/filePath") + domain_filenames = strmobj.get_value("domainInfo/fileNames") + data_filenames = strmobj.get_value("fieldInfo/fileNames") + else: + # Figure out the details of this stream. + if stream in ("prescribed", "copyall"): + # Assume only one file for prescribed mode! + grid_file = self.get_default("strm_grid_file", config) + domain_filepath, domain_filenames = os.path.split(grid_file) + data_file = self.get_default("strm_data_file", config) + data_filepath, data_filenames = os.path.split(data_file) + else: + domain_filepath = self.get_default("strm_domdir", config) + domain_filenames = self.get_default("strm_domfil", config) + data_filepath = self.get_default("strm_datdir", config) + data_filenames = self.get_default("strm_datfil", config) + + domain_varnames = self._sub_fields(self.get_default("strm_domvar", config)) + data_varnames = self._sub_fields(self.get_default("strm_datvar", config)) + offset = self.get_default("strm_offset", config) + year_start = int(self.get_default("strm_year_start", config)) + year_end = int(self.get_default("strm_year_end", config)) + data_filenames = self._sub_paths(data_filenames, year_start, year_end) + domain_filenames = self._sub_paths(domain_filenames, year_start, year_end) + + # Overwrite domain_file if should be set from stream data + if domain_filenames == "null": + domain_filepath = data_filepath + domain_filenames = data_filenames.splitlines()[0] + + stream_file_text = _stream_mct_file_template.format( + domain_varnames=domain_varnames, + domain_filepath=domain_filepath, + domain_filenames=domain_filenames, + data_varnames=data_varnames, + data_filepath=data_filepath, + data_filenames=data_filenames, + offset=offset, + ) + + with open(stream_path, "w") as stream_file: + stream_file.write(stream_file_text) + + lines_hash = self._get_input_file_hash(data_list_path) + with open(data_list_path, "a") as input_data_list: + for i, filename in enumerate(domain_filenames.split("\n")): + if filename.strip() == "": + continue + filepath, filename = os.path.split(filename) + if not filepath: + filepath = os.path.join(domain_filepath, filename.strip()) + string = "domain{:d} = {}\n".format(i + 1, filepath) + hashValue = hashlib.md5(string.rstrip().encode("utf-8")).hexdigest() + if hashValue not in lines_hash: + input_data_list.write(string) + for i, filename in enumerate(data_filenames.split("\n")): + if filename.strip() == "": + continue + filepath = os.path.join(data_filepath, filename.strip()) + string = "file{:d} = {}\n".format(i + 1, filepath) + hashValue = hashlib.md5(string.rstrip().encode("utf-8")).hexdigest() + if hashValue not in lines_hash: + input_data_list.write(string) + self.update_shr_strdata_nml(config, stream, stream_path) + + def update_shr_strdata_nml(self, config, stream, stream_path): + """Updates values for the `shr_strdata_nml` namelist group. + + This should be done once per stream, and it shouldn't usually be called + directly, since `create_stream_file` calls this method itself. + """ + assert ( + config["stream"] == stream + ), "config stream is {}, but input stream is {}".format( + config["stream"], stream + ) + # Double-check the years for sanity. + year_start = int(self.get_default("strm_year_start", config)) + year_end = int(self.get_default("strm_year_end", config)) + year_align = int(self.get_default("strm_year_align", config)) + expect( + year_end >= year_start, + "Stream {} starts at year {:d}, but ends at earlier year {:d}.".format( + stream, year_start, year_end + ), + ) + # Add to streams file. + stream_string = "{} {:d} {:d} {:d}".format( + os.path.basename(stream_path), year_align, year_start, year_end + ) + self._streams_namelists["streams"].append(stream_string) + for variable in self._streams_variables: + default = self.get_default(variable, config) + expect( + len(default) == 1, + "Stream {} had multiple settings for variable {}.".format( + stream, variable + ), + ) + self._streams_namelists[variable].append(default[0]) + + def set_abs_file_path(self, file_path): + """If `file_path` is relative, make it absolute using `DIN_LOC_ROOT`. + + If an absolute path is input, it is returned unchanged. + """ + if os.path.isabs(file_path): + return file_path + else: + fullpath = os.path.join(self._din_loc_root, file_path) + return fullpath + + def add_default(self, name, value=None, ignore_abs_path=None): + """Add a value for the specified variable to the namelist. + + If the specified variable is already defined in the object, the existing + value is preserved. Otherwise, the `value` argument, if provided, will + be used to set the value. If no such value is found, the defaults file + will be consulted. If null values are present in any of the above, the + result will be a merged array of values. + + If no value for the variable is found via any of the above, this method + will raise an exception. + """ + # pylint: disable=protected-access + group = self._definition.get_group(name) + + # Use this to see if we need to raise an error when nothing is found. + have_value = False + # Check for existing value. + current_literals = self._namelist.get_variable_value(group, name) + if current_literals != [""]: + have_value = True + + # Check for input argument. + if value is not None: + have_value = True + # if compression were to occur, this is where it does + literals = self._to_namelist_literals(name, value) + current_literals = merge_literal_lists(literals, current_literals) + + # Check for default value. + default = self.get_default(name, allow_none=True) + if default is not None: + have_value = True + default_literals = self._to_namelist_literals(name, default) + current_literals = merge_literal_lists(default_literals, current_literals) + expect( + have_value, + "No default value found for {} with attributes {}.".format( + name, self._definition.get_attributes() + ), + ) + + # Go through file names and prepend input data root directory for + # absolute pathnames. + var_type, _, var_size = self._definition.split_type_string(name) + if var_type == "character" and ignore_abs_path is None: + var_input_pathname = self._definition.get_input_pathname(name) + if var_input_pathname == "abs": + current_literals = expand_literal_list(current_literals) + for i, literal in enumerate(current_literals): + if literal == "": + continue + file_path = character_literal_to_string(literal) + abs_file_path = self._convert_to_abs_file_path(file_path, name) + current_literals[i] = string_to_character_literal(abs_file_path) + current_literals = compress_literal_list(current_literals) + + # Set the new value. + self._namelist.set_variable_value(group, name, current_literals, var_size) + + def _convert_to_abs_file_path(self, file_path, name): + """Convert the given file_path to an abs file path and return the result + + It's possible that file_path actually contains multiple files delimited by + GRID_SEP. (This is the case when a component has multiple grids, and so has a file + for each grid.) In this case, we split it on GRID_SEP and handle each separated + portion as a separate file, then return a new GRID_SEP-delimited string. + + """ + abs_file_paths = [] + # In most cases, the list created by the following split will only contain a + # single element, but this split is needed to handle grid-related files for + # components with multiple grids (e.g., GLC). + for one_file_path in file_path.split(GRID_SEP): + # NOTE - these are hard-coded here and a better way is to make these extensible + if ( + one_file_path == "UNSET" + or one_file_path == "idmap" + or one_file_path == "idmap_ignore" + or one_file_path == "unset" + ): + abs_file_paths.append(one_file_path) + elif one_file_path in ("null", "create_mesh"): + abs_file_paths.append(one_file_path) + else: + one_abs_file_path = self.set_abs_file_path(one_file_path) + if not os.path.exists(one_abs_file_path): + logger.warning( + "File not found: {} = {}, will attempt to download in check_input_data phase".format( + name, one_abs_file_path + ) + ) + abs_file_paths.append(one_abs_file_path) + + return GRID_SEP.join(abs_file_paths) + + def create_shr_strdata_nml(self): + """Set defaults for `shr_strdata_nml` variables other than the variable domainfile""" + self.add_default("datamode") + if self.get_value("datamode") != "NULL": + self.add_default("streams", value=self._streams_namelists["streams"]) + for variable in self._streams_variables: + self.add_default(variable, value=self._streams_namelists[variable]) + + def get_group_variables(self, group_name): + return self._namelist.get_group_variables(group_name) + + def _get_input_file_hash(self, data_list_path): + lines_hash = set() + if os.path.isfile(data_list_path): + with open(data_list_path, "r") as input_data_list: + for line in input_data_list: + hashValue = hashlib.md5(line.rstrip().encode("utf-8")).hexdigest() + logger.debug("Found line {} with hash {}".format(line, hashValue)) + lines_hash.add(hashValue) + return lines_hash + + def _write_input_files(self, data_list_path): + """Write input data files to list.""" + # append to input_data_list file + lines_hash = self._get_input_file_hash(data_list_path) + with open(data_list_path, "a") as input_data_list: + for group_name in self._namelist.get_group_names(): + for variable_name in self._namelist.get_variable_names(group_name): + input_pathname = self._definition.get_node_element_info( + variable_name, "input_pathname" + ) + if input_pathname is not None: + # This is where we end up for all variables that are paths + # to input data files. + literals = self._namelist.get_variable_value( + group_name, variable_name + ) + for literal in literals: + file_path = character_literal_to_string(literal) + self._add_file_to_input_data_list( + input_data_list=input_data_list, + variable_name=variable_name, + file_path=file_path, + input_pathname=input_pathname, + lines_hash=lines_hash, + ) + + def _add_file_to_input_data_list( + self, input_data_list, variable_name, file_path, input_pathname, lines_hash + ): + """Add one file to the input data list, if needed + + It's possible that file_path actually contains multiple files delimited by + GRID_SEP. (This is the case when a component has multiple grids, and so has a file + for each grid.) In this case, we split it on GRID_SEP and handle each separated + portion as a separate file. + + Args: + - input_data_list: file handle + - variable_name (string): name of variable to add + - file_path (string): path to file + - input_pathname (string): whether this is an absolute or relative path + - lines_hash (set): set of hashes of lines already in the given input data list + + """ + for one_file_path in file_path.split(GRID_SEP): + # NOTE - these are hard-coded here and a better way is to make these extensible + if ( + one_file_path == "UNSET" + or one_file_path == "idmap" + or one_file_path == "idmap_ignore" + ): + continue + if input_pathname == "abs": + # No further mangling needed for absolute paths. + # At this point, there are overwrites that should be ignored + if not os.path.isabs(one_file_path): + continue + else: + pass + elif input_pathname.startswith("rel:"): + # The part past "rel" is the name of a variable that + # this variable specifies its path relative to. + root_var = input_pathname[4:] + root_dir = self.get_value(root_var) + one_file_path = os.path.join(root_dir, one_file_path) + else: + expect(False, "Bad input_pathname value: {}.".format(input_pathname)) + + # Write to the input data list. + # + # Note that the same variable name is repeated for each file. This currently + # seems okay for check_input_data, but if it becomes a problem, we could + # change this, e.g., appending an index to the end of variable_name. + string = "{} = {}".format(variable_name, one_file_path) + hashValue = hashlib.md5(string.rstrip().encode("utf-8")).hexdigest() + if hashValue not in lines_hash: + logger.debug("Adding line {} with hash {}".format(string, hashValue)) + input_data_list.write(string + "\n") + else: + logger.debug("Line already in file {}".format(string)) + + def write_output_file( + self, namelist_file, data_list_path=None, groups=None, sorted_groups=True + ): + """Write out the namelists and input data files. + + The `namelist_file` and `modelio_file` are the locations to which the + component and modelio namelists will be written, respectively. The + `data_list_path` argument is the location of the `*.input_data_list` + file, which will have the input data files added to it. + """ + self._definition.validate(self._namelist) + if groups is None: + groups = self._namelist.get_group_names() + + # remove groups that are never in namelist file + if "modelio" in groups: + groups.remove("modelio") + if "seq_maps" in groups: + groups.remove("seq_maps") + + # write namelist file + self._namelist.write(namelist_file, groups=groups, sorted_groups=sorted_groups) + + if data_list_path is not None: + self._write_input_files(data_list_path) + + # For MCT + def add_nmlcontents( + self, filename, group, append=True, format_="nmlcontents", sorted_groups=True + ): + """Write only contents of nml group""" + self._namelist.write( + filename, + groups=[group], + append=append, + format_=format_, + sorted_groups=sorted_groups, + ) + + def write_seq_maps(self, filename): + """Write mct out seq_maps.rc""" + self._namelist.write(filename, groups=["seq_maps"], format_="rc") + + def write_modelio_file(self, filename): + """Write mct component modelio files""" + self._namelist.write(filename, groups=["modelio", "pio_inparm"], format_="nml") + + # For NUOPC + def write_nuopc_modelio_file(self, filename): + """Write nuopc component modelio files""" + self._namelist.write(filename, groups=["pio_inparm"], format_="nml") + + def write_nuopc_config_file( + self, filename, data_list_path=None, sorted_groups=False + ): + """Write the nuopc config file""" + self._definition.validate(self._namelist) + groups = self._namelist.get_group_names() + # write the config file + self._namelist.write_nuopc(filename, groups=groups, sorted_groups=sorted_groups) + # append to input_data_list file + if data_list_path is not None: + self._write_input_files(data_list_path) diff --git a/CIME/non_py/cprnc b/CIME/non_py/cprnc new file mode 160000 index 00000000000..9276b219750 --- /dev/null +++ b/CIME/non_py/cprnc @@ -0,0 +1 @@ +Subproject commit 9276b219750881633d8673c72ec80ac821f96d82 diff --git a/CIME/non_py/externals/genf90/ChangeLog b/CIME/non_py/externals/genf90/ChangeLog new file mode 100644 index 00000000000..15c6f6929cf --- /dev/null +++ b/CIME/non_py/externals/genf90/ChangeLog @@ -0,0 +1,61 @@ +================================================================================ +SVN $Id: ChangeLog 45058 2013-03-20 16:12:21Z jedwards $ +SVN $URL: https://svn-ccsm-models.cgd.ucar.edu/tools/genf90/trunk/ChangeLog $ +================================================================================ +This file describes what tags were created and why +=========================== +Originator: jedwards +Date: Jan 21, 2014 +Model: genf90 +Version: genf90_140121 +One-line summary: add nctype and ctype data type support + + +=========================== +Originator: jedwards +Date: Nov 20, 2013 +Model: genf90 +Version: genf90_131120 +One-line summary: Added a documentation header and a logical type + +=========================== +Originator: jedwards +Date: Nov 14, 2013 +Model: genf90 +Version: genf90_131114 +One-line summary: minor change to avoid undefined variable + +=========================== +Originator: jedwards +Date: Nov 13, 2013 +Model: genf90 +Version: genf90_131113 +One-line summary: handles derived types and interface blocks better (Sean +Santos) + + +=========================== +Originator: jedwards +Date: Apr 02, 2013 +Model: genf90 +Version: genf90_130402 +One-line summary: correct when dtypes.h is generated + + M genf90.pl + +=========================== +Originator: jedwards +Date: Mar 20, 2013 +Model: genf90 +Version: genf90_130320a +One-line summary: generate helper file dtypes.h + M genf90.pl + +=========================== +Originator: jedwards +Date: Mar 20, 2013 +Model: genf90 +Version: genf90_130320 +One-line summary: Move to new directory + +=========================== diff --git a/CIME/non_py/externals/genf90/genf90.pl b/CIME/non_py/externals/genf90/genf90.pl new file mode 100755 index 00000000000..6dba47d7687 --- /dev/null +++ b/CIME/non_py/externals/genf90/genf90.pl @@ -0,0 +1,393 @@ +#!/usr/bin/env perl +use strict; +my $outfile; +# Beginning with F90, Fortran has strict typing of variables based on "TKR" +# (type, kind, and rank). In many cases we want to write subroutines that +# provide the same functionality for different variable types and ranks. In +# order to do this without cut-and-paste duplication of code, we create a +# template file with the extension ".F90.in", which can be parsed by this script +# to generate F90 code for all of the desired specific types. +# +# Keywords are delimited by curly brackets: {} +# +# {TYPE} and {DIMS} are used to generate the specific subroutine names from the +# generic template +# {TYPE} : Variable type name; implemented types are character, 4 or 8 byte real, +# and 4 or 8 byte integer. +# allowed values: text, real, double, int, long, logical +# default values: text, real, double, int +# {VTYPE} : Used to generate variable declarations to match the specific type. +# if {TYPE}=double then {VTYPE} is "real(r8)" +# {ITYPE}, {ITYPENAME} : Used to generate CPP statements for the specific type. +# {MPITYPE} : Used to generate MPI types corresponding to the specific type. +# +# {DIMS} : Rank of arrays, "0" for scalar. +# allowed values: 0-7 +# default values : 0-5 +# {DIMSTR} : Generates the parenthesis and colons used for a variable +# declaration of {DIMS} dimensions. +# if {DIMS}=3 then {DIMSTR} is (:,:,:) +# {REPEAT} : Repeats an expression for each number from 1 to {DIMS}, with each +# iteration separated by commas. +# {REPEAT: foo(#, bar)} +# expands to this: +# foo(1, bar), foo(2, bar), foo(3, bar), ... + +# defaults +my @types = qw(text real double int short); +my $vtype = {'text' => 'character(len=*)', + 'real' => 'real(r4)', + 'double' => 'real(r8)', + 'int' => 'integer(i4)', + 'short' => 'integer(i2)', + 'long' => 'integer(i8)', + 'logical' => 'logical' }; +my $itype = {'text' => 100, + 'real' => 101, + 'double' => 102, + 'int' => 103, + 'long' => 104, + 'logical' => 105, + 'short' => 106}; +my $itypename = {'text' => 'TYPETEXT', + 'real' => 'TYPEREAL', + 'double' => 'TYPEDOUBLE', + 'int' => 'TYPEINT', + 'short' => 'TYPESHORT', + 'long' => 'TYPELONG', + 'logical' => 'TYPELOGICAL'}; +my $mpitype = {'text' => 'MPI_CHARACTER', + 'real' => 'MPI_REAL4', + 'short' => 'MPI_SHORT', + 'double' => 'MPI_REAL8', + 'int' => 'MPI_INTEGER'}; +# Netcdf C datatypes +my $nctype = {'text' => 'text', + 'real' => 'float', + 'short' => 'short', + 'double' => 'double', + 'int' => 'int'}; +# C interoperability types +my $ctype = {'text' => 'character(C_CHAR)', + 'real' => 'real(C_FLOAT)', + 'double' => 'real(C_DOUBLE)', + 'int' => 'integer(C_INT)', + 'short' => 'integer(C_SHORT)'}; + + + +my @dims =(0..5); + +my $write_dtypes = "no"; +# begin + +foreach(@ARGV){ + my $infile = $_; + usage() unless($infile =~ /(.*.F90).in/); + $outfile = $1; + open(F,"$infile") || die "$0 Could not open $infile to read"; + my @parsetext; + my $cnt=0; + foreach(){ + $cnt++; + if(/^\s*contains/i){ + push(@parsetext,"# $cnt \"$infile\"\n"); + } + if(/^\s*interface/i){ + push(@parsetext,"# $cnt \"$infile\"\n"); + } + if(/^[^!]*subroutine/i){ + push(@parsetext,"# $cnt \"$infile\"\n"); + } + if(/^[^!]*function/i){ + push(@parsetext,"# $cnt \"$infile\"\n"); + } + + push(@parsetext,$_); + } + + close(F); + + my $end; + my $contains=0; + my $in_type_block=0; + my @unit; + my $unitcnt=0; + my $date = localtime(); + my $preamble = +"!=================================================== +! DO NOT EDIT THIS FILE, it was generated using $0 +! Any changes you make to this file may be lost +!===================================================\n"; + my @output ; + push(@output,$preamble); + + my $line; + my $dimmodifier; + my $typemodifier; + my $itypeflag; + my $block; + my $block_type; + my $cppunit; + foreach $line (@parsetext){ +# skip parser comments + next if($line =~ /\s*!pl/); + + $itypeflag=1 if($line =~ /{ITYPE}/); + $itypeflag=1 if($line =~ /TYPETEXT/); + $itypeflag=1 if($line =~ /TYPEREAL/); + $itypeflag=1 if($line =~ /TYPEDOUBLE/); + $itypeflag=1 if($line =~ /TYPEINT/); + $itypeflag=1 if($line =~ /TYPELONG/); + + + if($contains==0){ + if($line=~/\s*!\s*DIMS\s+[\d,]+!*/){ + $dimmodifier=$line; + next; + } + if($line=~/\s*!\s*TYPE\s+[^!]+!*$/){ + $typemodifier=$line; + next; + } + if ((defined $typemodifier or defined $dimmodifier) + and not defined $block and $line=~/^\s*#[^{]*$/) { + push(@output, $line); + next; + } + # Figure out the bounds of a type statement. + # Type blocks start with "type," "type foo" or "type::" but not + # "type(". + $in_type_block=1 if($line=~/^\s*type\s*[,:[:alpha:]]/i); + $in_type_block=0 if($line=~/^\s*end\s*type/i); + if(not defined $block) { + if ($line=~/^\s*type[^[:alnum:]_].*(\{TYPE\}|\{DIMS\})/i or + $line=~/^[^!]*(function|subroutine).*(\{TYPE\}|\{DIMS\})/i) { + $block=$line; + next; + } + if ($line=~/^\s*interface.*(\{TYPE\}|\{DIMS\})/i) { + $block_type="interface"; + $block=$line; + next; + } + } + if(not defined $block_type and + ($line=~/^\s*end\s+type\s+.*(\{TYPE\}|\{DIMS\})/i or + $line=~/^\s*end\s+(function|subroutine)\s+.*(\{TYPE\}|\{DIMS\})/i)){ + + $line = $block.$line; + undef $block; + } + if ($line=~/^\s*end\s*interface/i and + defined $block) { + $line = $block.$line; + undef $block; + undef $block_type; + } + if(defined $block){ + $block = $block.$line; + next; + } + if(defined $dimmodifier){ + $line = $dimmodifier.$line; + undef $dimmodifier; + } + if(defined $typemodifier){ + $line = $typemodifier.$line; + undef $typemodifier; + } + + push(@output, buildout($line)); + if(($line =~ /^\s*contains\s*!*/i && ! $in_type_block) or + ($line =~ /^\s*!\s*Not a module/i)){ + $contains=1; + next; + } + } + if($line=~/^\s*end module\s*/){ + $end = $line; + last; + } + + if($contains==1){ + # first parse into functions or subroutines + if($cppunit || !(defined($unit[$unitcnt]))){ + # Make cpp lines and blanks between routines units. + if($line =~ /^\s*\#(?!\s[[:digit:]]+)/ || $line =~/^\s*$/ || $line=~/^\s*!(?!\s*(TYPE|DIMS))/){ + push(@{$unit[$unitcnt]},$line); + $cppunit=1; + next; + } else { + $cppunit=0; + $unitcnt++; + } + } + + + push(@{$unit[$unitcnt]},$line); + if ($line=~/^\s*interface/i) { + $block_type="interface"; + $block=$line; + } + if ($line=~/^\s*end\s*interface/i) { + undef $block_type; + undef $block; + } + unless(defined $block){ + if($line =~ /\s*end function/i or $line =~ /\s*end subroutine/i){ + $unitcnt++; + } + } + } + } + my $i; + + + for($i=0;$i<$unitcnt;$i++){ + if(defined($unit[$i])){ + my $func = join('',@{$unit[$i]}); + push(@output, buildout($func)); + } + } + push(@output,@{$unit[$#unit]}) if($unitcnt==$#unit); + push(@output, $end); + if($itypeflag==1){ + my $str; + $str.="#include \"dtypes.h\"\n"; + $write_dtypes = "yes"; + print $str; + } + print @output; + writedtypes() if(!(-e "dtypes.h") && $write_dtypes == "yes"); + + +} + + +sub usage{ + die("$0 Expected input filename of the form .*.F90.in"); +} + +sub build_repeatstr{ + my($dims) = @_; + # Create regex to repeat expression DIMS times. + my $repeatstr; + for(my $i=1;$i<=$dims;$i++){ + $repeatstr .="\$\{1\}$i\$\{2\},&\n"; + } + if(defined $repeatstr){ + $repeatstr="\"$repeatstr"; + chop $repeatstr; + chop $repeatstr; + chop $repeatstr; + $repeatstr.="\""; + }else{ + $repeatstr=''; + } +} + +sub writedtypes{ + open(F,">dtypes.h"); + print F +"#define TYPETEXT 100 +#define TYPEREAL 101 +#define TYPEDOUBLE 102 +#define TYPEINT 103 +#define TYPELONG 104 +#define TYPELOGICAL 105 +"; + close(F); +} + +sub buildout{ + my ($func) = @_; + + my $outstr; + my(@ldims, @ltypes); + + if($func=~/\s*!\s*DIMS\s+([\d,]+)\s*/){ + @ldims = split(/,/,$1); + }else{ + @ldims = @dims; + } + if($func=~/\s*!\s*TYPE\s+([^!\s]+)\s*/){ + @ltypes = split(/,/,$1); +# print ">$func<>@ltypes<\n"; + }else{ + @ltypes = @types; + } + + + if(($func =~ /{TYPE}/ && $func =~ /{DIMS}/) ){ + my ($type, $dims); + foreach $type (@ltypes){ + foreach $dims (@ldims){ + my $dimstr; + for(my $i=1;$i<=$dims;$i++){ + $dimstr .=':,'; + } + if(defined $dimstr){ + $dimstr="($dimstr"; + chop $dimstr; + $dimstr.=')'; + }else{ + $dimstr=''; + } + + my $repeatstr = build_repeatstr($dims); + + my $str = $func; + $str =~ s/{TYPE}/$type/g; + $str =~ s/{VTYPE}/$vtype->{$type}/g; + $str =~ s/{ITYPE}/$itype->{$type}/g; + $str =~ s/{MPITYPE}/$mpitype->{$type}/g; + $str =~ s/{NCTYPE}/$nctype->{$type}/g; + $str =~ s/{CTYPE}/$ctype->{$type}/g; + $str =~ s/{DIMS}/$dims/g; + $str =~ s/{DIMSTR}/$dimstr/g; + $str =~ s/{REPEAT:([^#}]*)#([^#}]*)}/$repeatstr/eeg; + $outstr .= $str; + } + } + }elsif($func =~ /{DIMS}/){ + my $dims; + foreach $dims (@ldims){ + my $dimstr; + for(my $i=1;$i<=$dims;$i++){ + $dimstr .=':,'; + } + if(defined $dimstr){ + $dimstr="($dimstr"; + chop $dimstr; + $dimstr.=')'; + }else{ + $dimstr=''; + } + + my $repeatstr = build_repeatstr($dims); + + my $str = $func; + $str =~ s/{DIMS}/$dims/g; + $str =~ s/{DIMSTR}/$dimstr/g; + $str =~ s/{REPEAT:([^#}]*)#([^#}]*)}/$repeatstr/eeg; + $outstr .= $str; + } + }elsif($func =~ /{TYPE}/){ + my ($type); + foreach $type (@ltypes){ + my $str = $func; + $str =~ s/{TYPE}/$type/g; + $str =~ s/{VTYPE}/$vtype->{$type}/g; + $str =~ s/{ITYPE}/$itype->{$type}/g; + $str =~ s/{MPITYPE}/$mpitype->{$type}/g; + $str =~ s/{NCTYPE}/$nctype->{$type}/g; + $str =~ s/{CTYPE}/$ctype->{$type}/g; + $outstr.=$str; + } + }else{ + $outstr=$func; + } + + return $outstr; +} diff --git a/src/CMake/.gitignore b/CIME/non_py/src/CMake/.gitignore similarity index 100% rename from src/CMake/.gitignore rename to CIME/non_py/src/CMake/.gitignore diff --git a/src/CMake/CESM_utils.cmake b/CIME/non_py/src/CMake/CESM_utils.cmake similarity index 81% rename from src/CMake/CESM_utils.cmake rename to CIME/non_py/src/CMake/CESM_utils.cmake index 6b7a433cbad..1fa512a81f4 100644 --- a/src/CMake/CESM_utils.cmake +++ b/CIME/non_py/src/CMake/CESM_utils.cmake @@ -1,2 +1,2 @@ message("CESM_utils.cmake is deprecated, please replace references with CIME_utils.cmake") -include(CIME_utils) \ No newline at end of file +include(CIME_utils) diff --git a/src/CMake/CIME_initial_setup.cmake b/CIME/non_py/src/CMake/CIME_initial_setup.cmake similarity index 100% rename from src/CMake/CIME_initial_setup.cmake rename to CIME/non_py/src/CMake/CIME_initial_setup.cmake diff --git a/src/CMake/CIME_utils.cmake b/CIME/non_py/src/CMake/CIME_utils.cmake similarity index 91% rename from src/CMake/CIME_utils.cmake rename to CIME/non_py/src/CMake/CIME_utils.cmake index a875e209117..4a91112559b 100644 --- a/src/CMake/CIME_utils.cmake +++ b/CIME/non_py/src/CMake/CIME_utils.cmake @@ -34,7 +34,12 @@ set(CMAKE_COLOR_MAKEFILE "${USE_COLOR}") # Compiler info #================================================= -list(APPEND CMAKE_MODULE_PATH "../pio2/cmake") +if (EXISTS ${SRC_ROOT}/libraries/parallelio/cmake) + list(APPEND CMAKE_MODULE_PATH "${SRC_ROOT}/libraries/parallelio/cmake") +else() + list(APPEND CMAKE_MODULE_PATH "../pio2/cmake") +endif() + set(CMAKE_C_FLAGS "${CPPDEFS} ${CFLAGS}") set(CMAKE_Fortran_FLAGS "${CPPDEFS} ${FFLAGS}") set(CMAKE_EXE_LINKER_FLAGS "${LDFLAGS} ${SLIBS}") @@ -67,10 +72,7 @@ include(genf90_utils) #================================================= # pFUnit and its preprocessor -find_package(pFUnit) - -# Preprocessor and driver handling. -include(pFUnit_utils) +find_package(PFUNIT) # Need to add PFUNIT_INCLUDE_DIRS to the general list of include_directories # because we use pfunit's 'throw'. diff --git a/src/CMake/ChangeLog b/CIME/non_py/src/CMake/ChangeLog similarity index 100% rename from src/CMake/ChangeLog rename to CIME/non_py/src/CMake/ChangeLog diff --git a/src/CMake/Compilers.cmake b/CIME/non_py/src/CMake/Compilers.cmake similarity index 100% rename from src/CMake/Compilers.cmake rename to CIME/non_py/src/CMake/Compilers.cmake diff --git a/CIME/non_py/src/CMake/CorrectWindowsPaths.cmake b/CIME/non_py/src/CMake/CorrectWindowsPaths.cmake new file mode 100644 index 00000000000..5058282e74a --- /dev/null +++ b/CIME/non_py/src/CMake/CorrectWindowsPaths.cmake @@ -0,0 +1,13 @@ +# CorrectWindowsPaths - this module defines one macro +# +# CONVERT_CYGWIN_PATH( PATH ) +# This uses the command cygpath (provided by cygwin) to convert +# unix-style paths into paths useable by cmake on windows + +macro (CONVERT_CYGWIN_PATH _path) + if (WIN32) + EXECUTE_PROCESS(COMMAND cygpath.exe -m ${${_path}} + OUTPUT_VARIABLE ${_path}) + string (STRIP ${${_path}} ${_path}) + endif (WIN32) +endmacro (CONVERT_CYGWIN_PATH) diff --git a/src/CMake/FindPETSc.cmake b/CIME/non_py/src/CMake/FindPETSc.cmake similarity index 100% rename from src/CMake/FindPETSc.cmake rename to CIME/non_py/src/CMake/FindPETSc.cmake diff --git a/CIME/non_py/src/CMake/FindPackageMultipass.cmake b/CIME/non_py/src/CMake/FindPackageMultipass.cmake new file mode 100644 index 00000000000..fbf06a7f0fc --- /dev/null +++ b/CIME/non_py/src/CMake/FindPackageMultipass.cmake @@ -0,0 +1,106 @@ +# PackageMultipass - this module defines two macros +# +# FIND_PACKAGE_MULTIPASS (Name CURRENT +# STATES VAR0 VAR1 ... +# DEPENDENTS DEP0 DEP1 ...) +# +# This function creates a cache entry _CURRENT which +# the user can set to "NO" to trigger a reconfiguration of the package. +# The first time this function is called, the values of +# _VAR0, ... are saved. If _CURRENT +# is false or if any STATE has changed since the last time +# FIND_PACKAGE_MULTIPASS() was called, then CURRENT will be set to "NO", +# otherwise CURRENT will be "YES". IF not CURRENT, then +# _DEP0, ... will be FORCED to NOTFOUND. +# Example: +# find_path (FOO_DIR include/foo.h) +# FIND_PACKAGE_MULTIPASS (Foo foo_current +# STATES DIR +# DEPENDENTS INCLUDES LIBRARIES) +# if (NOT foo_current) +# # Make temporary files, run programs, etc, to determine FOO_INCLUDES and FOO_LIBRARIES +# endif (NOT foo_current) +# +# MULTIPASS_SOURCE_RUNS (Name INCLUDES LIBRARIES SOURCE RUNS LANGUAGE) +# Always runs the given test, use this when you need to re-run tests +# because parent variables have made old cache entries stale. The LANGUAGE +# variable is either C or CXX indicating which compiler the test should +# use. +# MULTIPASS_C_SOURCE_RUNS (Name INCLUDES LIBRARIES SOURCE RUNS) +# DEPRECATED! This is only included for backwards compatability. Use +# the more general MULTIPASS_SOURCE_RUNS instead. +# Always runs the given test, use this when you need to re-run tests +# because parent variables have made old cache entries stale. + +macro (FIND_PACKAGE_MULTIPASS _name _current) + string (TOUPPER ${_name} _NAME) + set (_args ${ARGV}) + list (REMOVE_AT _args 0 1) + + set (_states_current "YES") + list (GET _args 0 _cmd) + if (_cmd STREQUAL "STATES") + list (REMOVE_AT _args 0) + list (GET _args 0 _state) + while (_state AND NOT _state STREQUAL "DEPENDENTS") + # The name of the stored value for the given state + set (_stored_var PACKAGE_MULTIPASS_${_NAME}_${_state}) + if (NOT "${${_stored_var}}" STREQUAL "${${_NAME}_${_state}}") + set (_states_current "NO") + endif (NOT "${${_stored_var}}" STREQUAL "${${_NAME}_${_state}}") + set (${_stored_var} "${${_NAME}_${_state}}" CACHE INTERNAL "Stored state for ${_name}." FORCE) + list (REMOVE_AT _args 0) + list (GET _args 0 _state) + endwhile (_state AND NOT _state STREQUAL "DEPENDENTS") + endif (_cmd STREQUAL "STATES") + + set (_stored ${_NAME}_CURRENT) + if (NOT ${_stored}) + set (${_stored} "YES" CACHE BOOL "Is the configuration for ${_name} current? Set to \"NO\" to reconfigure." FORCE) + set (_states_current "NO") + endif (NOT ${_stored}) + + set (${_current} ${_states_current}) + if (NOT ${_current} AND PACKAGE_MULTIPASS_${_name}_CALLED) + message (STATUS "Clearing ${_name} dependent variables") + # Clear all the dependent variables so that the module can reset them + list (GET _args 0 _cmd) + if (_cmd STREQUAL "DEPENDENTS") + list (REMOVE_AT _args 0) + foreach (dep ${_args}) + set (${_NAME}_${dep} "NOTFOUND" CACHE INTERNAL "Cleared" FORCE) + endforeach (dep) + endif (_cmd STREQUAL "DEPENDENTS") + set (${_NAME}_FOUND "NOTFOUND" CACHE INTERNAL "Cleared" FORCE) + endif () + set (PACKAGE_MULTIPASS_${name}_CALLED YES CACHE INTERNAL "Private" FORCE) +endmacro (FIND_PACKAGE_MULTIPASS) + + +macro (MULTIPASS_SOURCE_RUNS includes libraries source runs language) + include (Check${language}SourceRuns) + # This is a ridiculous hack. CHECK_${language}_SOURCE_* thinks that if the + # *name* of the return variable doesn't change, then the test does + # not need to be re-run. We keep an internal count which we + # increment to guarantee that every test name is unique. If we've + # gotten here, then the configuration has changed enough that the + # test *needs* to be rerun. + if (NOT MULTIPASS_TEST_COUNT) + set (MULTIPASS_TEST_COUNT 00) + endif (NOT MULTIPASS_TEST_COUNT) + math (EXPR _tmp "${MULTIPASS_TEST_COUNT} + 1") # Why can't I add to a cache variable? + set (MULTIPASS_TEST_COUNT ${_tmp} CACHE INTERNAL "Unique test ID") + set (testname MULTIPASS_TEST_${MULTIPASS_TEST_COUNT}_${runs}) + set (CMAKE_REQUIRED_INCLUDES ${includes}) + set (CMAKE_REQUIRED_LIBRARIES ${libraries}) + if(${language} STREQUAL "C") + check_c_source_runs ("${source}" ${testname}) + elseif(${language} STREQUAL "CXX") + check_cxx_source_runs ("${source}" ${testname}) + endif() + set (${runs} "${${testname}}") +endmacro (MULTIPASS_SOURCE_RUNS) + +macro (MULTIPASS_C_SOURCE_RUNS includes libraries source runs) + multipass_source_runs("${includes}" "${libraries}" "${source}" ${runs} "C") +endmacro (MULTIPASS_C_SOURCE_RUNS) diff --git a/src/CMake/LICENSE b/CIME/non_py/src/CMake/LICENSE similarity index 100% rename from src/CMake/LICENSE rename to CIME/non_py/src/CMake/LICENSE diff --git a/CIME/non_py/src/CMake/README.md b/CIME/non_py/src/CMake/README.md new file mode 100644 index 00000000000..80b84eb2bba --- /dev/null +++ b/CIME/non_py/src/CMake/README.md @@ -0,0 +1,35 @@ +CMake_Fortran_utils +=================== + +CMake modules dealing with Fortran-specific issues and Fortran libraries + +Currently, these modules should work with CMake version 2.8.8 and later +versions. Earlier CMake versions may work but are untested. + +Below is a brief listing of modules. More detailed information on the +purpose and use of these modules can be found in comments at the top of +each file. + +Find modules for specific libraries: + +FindNETCDF + +FindPnetcdf + +Utility modules: + +genf90_utils - Generate Fortran code from genf90.pl templates. + +Sourcelist_utils - Use source file lists defined over multiple directories. + +Modules that are CESM-specific and/or incomplete: + +CIME\_initial\_setup - Handles setup that must be done before the 'project' +line. This must be included before the 'project' line in the main CMakeLists.txt +file. + +CIME_utils - Handles a few options, and includes several other modules. This +must be included after the 'project' line in the main CMakeLists.txt file, and +after the inclusion of CIME\_initial\_setup. + +Compilers - Specify compiler-specific behavior, add build types for CESM. diff --git a/CIME/non_py/src/CMake/ResolveCompilerPaths.cmake b/CIME/non_py/src/CMake/ResolveCompilerPaths.cmake new file mode 100644 index 00000000000..2c5c7d88c00 --- /dev/null +++ b/CIME/non_py/src/CMake/ResolveCompilerPaths.cmake @@ -0,0 +1,105 @@ +# ResolveCompilerPaths - this module defines two macros +# +# RESOLVE_LIBRARIES (XXX_LIBRARIES LINK_LINE) +# This macro is intended to be used by FindXXX.cmake modules. +# It parses a compiler link line and resolves all libraries +# (-lfoo) using the library path contexts (-L/path) in scope. +# The result in XXX_LIBRARIES is the list of fully resolved libs. +# Example: +# +# RESOLVE_LIBRARIES (FOO_LIBRARIES "-L/A -la -L/B -lb -lc -ld") +# +# will be resolved to +# +# FOO_LIBRARIES:STRING="/A/liba.so;/B/libb.so;/A/libc.so;/usr/lib/libd.so" +# +# if the filesystem looks like +# +# /A: liba.so libc.so +# /B: liba.so libb.so +# /usr/lib: liba.so libb.so libc.so libd.so +# +# and /usr/lib is a system directory. +# +# Note: If RESOLVE_LIBRARIES() resolves a link line differently from +# the native linker, there is a bug in this macro (please report it). +# +# RESOLVE_INCLUDES (XXX_INCLUDES INCLUDE_LINE) +# This macro is intended to be used by FindXXX.cmake modules. +# It parses a compile line and resolves all includes +# (-I/path/to/include) to a list of directories. Other flags are ignored. +# Example: +# +# RESOLVE_INCLUDES (FOO_INCLUDES "-I/A -DBAR='\"irrelevant -I/string here\"' -I/B") +# +# will be resolved to +# +# FOO_INCLUDES:STRING="/A;/B" +# +# assuming both directories exist. +# Note: as currently implemented, the -I/string will be picked up mistakenly (cry, cry) +include (CorrectWindowsPaths) + +macro (RESOLVE_LIBRARIES LIBS LINK_LINE) + string (REGEX MATCHALL "((-L|-l|-Wl)([^\" ]+|\"[^\"]+\")|[^\" ]+\\.(a|so|dll|lib))" _all_tokens "${LINK_LINE}") + set (_libs_found "") + set (_directory_list "") + foreach (token ${_all_tokens}) + if (token MATCHES "-L([^\" ]+|\"[^\"]+\")") + # If it's a library path, add it to the list + string (REGEX REPLACE "^-L" "" token ${token}) + string (REGEX REPLACE "//" "/" token ${token}) + convert_cygwin_path(token) + list (APPEND _directory_list ${token}) + elseif (token MATCHES "^(-l([^\" ]+|\"[^\"]+\")|[^\" ]+\\.(a|so|dll|lib))") + # It's a library, resolve the path by looking in the list and then (by default) in system directories + if (WIN32) #windows expects "libfoo", linux expects "foo" + string (REGEX REPLACE "^-l" "lib" token ${token}) + else (WIN32) + string (REGEX REPLACE "^-l" "" token ${token}) + endif (WIN32) + set (_root "") + if (token MATCHES "^/")# We have an absolute path + #separate into a path and a library name: + string (REGEX MATCH "[^/]*\\.(a|so|dll|lib)$" libname ${token}) + string (REGEX MATCH ".*[^${libname}$]" libpath ${token}) + convert_cygwin_path(libpath) + set (_directory_list ${_directory_list} ${libpath}) + set (token ${libname}) + endif (token MATCHES "^/") + set (_lib "NOTFOUND" CACHE FILEPATH "Cleared" FORCE) + find_library (_lib ${token} HINTS ${_directory_list} ${_root}) + if (_lib) + string (REPLACE "//" "/" _lib ${_lib}) + list (APPEND _libs_found ${_lib}) + else (_lib) + message (STATUS "Unable to find library ${token}") + endif (_lib) + endif (token MATCHES "-L([^\" ]+|\"[^\"]+\")") + endforeach (token) + set (_lib "NOTFOUND" CACHE INTERNAL "Scratch variable" FORCE) + # only the LAST occurence of each library is required since there should be no circular dependencies + if (_libs_found) + list (REVERSE _libs_found) + list (REMOVE_DUPLICATES _libs_found) + list (REVERSE _libs_found) + endif (_libs_found) + set (${LIBS} "${_libs_found}") +endmacro (RESOLVE_LIBRARIES) + +macro (RESOLVE_INCLUDES INCS COMPILE_LINE) + string (REGEX MATCHALL "-I([^\" ]+|\"[^\"]+\")" _all_tokens "${COMPILE_LINE}") + set (_incs_found "") + foreach (token ${_all_tokens}) + string (REGEX REPLACE "^-I" "" token ${token}) + string (REGEX REPLACE "//" "/" token ${token}) + convert_cygwin_path(token) + if (EXISTS ${token}) + list (APPEND _incs_found ${token}) + else (EXISTS ${token}) + message (STATUS "Include directory ${token} does not exist") + endif (EXISTS ${token}) + endforeach (token) + list (REMOVE_DUPLICATES _incs_found) + set (${INCS} "${_incs_found}") +endmacro (RESOLVE_INCLUDES) diff --git a/src/CMake/Sourcelist_utils.cmake b/CIME/non_py/src/CMake/Sourcelist_utils.cmake similarity index 100% rename from src/CMake/Sourcelist_utils.cmake rename to CIME/non_py/src/CMake/Sourcelist_utils.cmake diff --git a/src/CMake/TryCSizeOf.f90 b/CIME/non_py/src/CMake/TryCSizeOf.f90 similarity index 100% rename from src/CMake/TryCSizeOf.f90 rename to CIME/non_py/src/CMake/TryCSizeOf.f90 diff --git a/src/CMake/TryMPIIO.f90 b/CIME/non_py/src/CMake/TryMPIIO.f90 similarity index 100% rename from src/CMake/TryMPIIO.f90 rename to CIME/non_py/src/CMake/TryMPIIO.f90 diff --git a/src/CMake/TryMPIMod.f90 b/CIME/non_py/src/CMake/TryMPIMod.f90 similarity index 100% rename from src/CMake/TryMPIMod.f90 rename to CIME/non_py/src/CMake/TryMPIMod.f90 diff --git a/src/CMake/TryMPISERIAL.f90 b/CIME/non_py/src/CMake/TryMPISERIAL.f90 similarity index 100% rename from src/CMake/TryMPISERIAL.f90 rename to CIME/non_py/src/CMake/TryMPISERIAL.f90 diff --git a/src/CMake/TryPnetcdf_inc.f90 b/CIME/non_py/src/CMake/TryPnetcdf_inc.f90 similarity index 100% rename from src/CMake/TryPnetcdf_inc.f90 rename to CIME/non_py/src/CMake/TryPnetcdf_inc.f90 diff --git a/src/CMake/TryPnetcdf_mod.f90 b/CIME/non_py/src/CMake/TryPnetcdf_mod.f90 similarity index 100% rename from src/CMake/TryPnetcdf_mod.f90 rename to CIME/non_py/src/CMake/TryPnetcdf_mod.f90 diff --git a/src/CMake/genf90_utils.cmake b/CIME/non_py/src/CMake/genf90_utils.cmake similarity index 100% rename from src/CMake/genf90_utils.cmake rename to CIME/non_py/src/CMake/genf90_utils.cmake diff --git a/src/CMake/mpiexec.cmake b/CIME/non_py/src/CMake/mpiexec.cmake similarity index 99% rename from src/CMake/mpiexec.cmake rename to CIME/non_py/src/CMake/mpiexec.cmake index 2ca9f736a86..7378715eccc 100644 --- a/src/CMake/mpiexec.cmake +++ b/CIME/non_py/src/CMake/mpiexec.cmake @@ -27,7 +27,3 @@ function( add_mpi_test _testName _testExe _testArgs _numProc _timeout) set_tests_properties(${_testName} PROPERTIES TIMEOUT ${_timeout}) endfunction(add_mpi_test) - - - - diff --git a/CIME/non_py/src/README b/CIME/non_py/src/README new file mode 100644 index 00000000000..e3efa6552d0 --- /dev/null +++ b/CIME/non_py/src/README @@ -0,0 +1,30 @@ +!=============================================================================== +! SVN $Id: README 19883 2009-12-14 23:19:10Z erik $ +! SVN $URL: https://svn-ccsm-models.cgd.ucar.edu/csm_share/trunk_tags/share3_150116/README $ +!=============================================================================== + + A description of csm_share + +This module exists to collect code shared between various CIME components. +Excluding this "shared code" module, CIME components are built using disjoint +sets of source code. The use of this shared code is similar to the use of +object code libraries where each subdirectory of share is equivalant to +one library. While object library routines are accessed by linking to libraries +during the load phase, these shared code routines are accessed by including the +appropriate source code directory path during the compile phase. + +Motivation for this code sharing includes: + +- facilitating consistent physics between all models. For example, uniform + solar-zenith-angle/orbital calculations and uniform physical constants. +- providing an interface/API between component models and the flux-coupler + component in the CCSM framework. +- avoiding the need for redundant implementations of commonly needed + functionality. For example netCDF file reading, basic mapping (re-gridding) + functionality, and common character string manipulations. + +Current subsets ("libraries") of shared code only include: + +util - very generic, general-purpose code that is likely to be useful to all + CIME components. CIME components may be explicitly required to use some + parts of this code, for example the physical constants module. diff --git a/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildnml new file mode 100755 index 00000000000..8a21a4ae9e6 --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildnml @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" +# DO NOTHING +# pylint: disable=unused-argument +def buildnml(case, caseroot, compname): + pass diff --git a/src/components/stub_comps/satm/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/satm/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildnml new file mode 100755 index 00000000000..8a21a4ae9e6 --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildnml @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" +# DO NOTHING +# pylint: disable=unused-argument +def buildnml(case, caseroot, compname): + pass diff --git a/src/components/stub_comps/sesp/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/sesp/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildnml new file mode 100755 index 00000000000..8a21a4ae9e6 --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildnml @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" +# DO NOTHING +# pylint: disable=unused-argument +def buildnml(case, caseroot, compname): + pass diff --git a/src/components/stub_comps/sglc/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/sglc/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildnml new file mode 100755 index 00000000000..5350aa63e9a --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildnml @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" + +# DO NOTHING diff --git a/src/components/stub_comps/siac/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/siac/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildnml new file mode 100755 index 00000000000..8a21a4ae9e6 --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildnml @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" +# DO NOTHING +# pylint: disable=unused-argument +def buildnml(case, caseroot, compname): + pass diff --git a/src/components/stub_comps/sice/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/sice/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildnml new file mode 100755 index 00000000000..8a21a4ae9e6 --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildnml @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" +# DO NOTHING +# pylint: disable=unused-argument +def buildnml(case, caseroot, compname): + pass diff --git a/src/components/stub_comps/slnd/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/slnd/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildnml new file mode 100755 index 00000000000..8a21a4ae9e6 --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildnml @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" +# DO NOTHING +# pylint: disable=unused-argument +def buildnml(case, caseroot, compname): + pass diff --git a/src/components/stub_comps/socn/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/socn/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildnml new file mode 100755 index 00000000000..8a21a4ae9e6 --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildnml @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" +# DO NOTHING +# pylint: disable=unused-argument +def buildnml(case, caseroot, compname): + pass diff --git a/src/components/stub_comps/srof/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/srof/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildlib b/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildlib_cmake b/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildnml b/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildnml new file mode 100755 index 00000000000..8a21a4ae9e6 --- /dev/null +++ b/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildnml @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +""" +build stub model namelist +""" +# DO NOTHING +# pylint: disable=unused-argument +def buildnml(case, caseroot, compname): + pass diff --git a/src/components/stub_comps/swav/cime_config/config_component.xml b/CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/config_component.xml similarity index 100% rename from src/components/stub_comps/swav/cime_config/config_component.xml rename to CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib_cmake b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml new file mode 100755 index 00000000000..e7efcd46b01 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +""" +build data model library +""" + +import sys, os + +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildnml import build_xcpl_nml, parse_input +from CIME.case import Case + + +def buildnml(case, caseroot, compname): + if compname != "xatm": + raise AttributeError + build_xcpl_nml(case, caseroot, compname) + + +def _main_func(): + caseroot = parse_input(sys.argv) + with Case(caseroot) as case: + buildnml(case, caseroot, "xatm") + + +if __name__ == "__main__": + _main_func() diff --git a/src/components/xcpl_comps/xatm/cime_config/config_component.xml b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/config_component.xml similarity index 100% rename from src/components/xcpl_comps/xatm/cime_config/config_component.xml rename to CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 new file mode 100644 index 00000000000..64cd5b768da --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 @@ -0,0 +1,529 @@ +module atm_comp_nuopc + + !---------------------------------------------------------------------------- + ! This is the NUOPC cap for XATM + !---------------------------------------------------------------------------- + + use ESMF + use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize + use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise + use NUOPC_Model , only : model_routine_SS => SetServices + use NUOPC_Model , only : model_label_Advance => label_Advance + use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock + use NUOPC_Model , only : model_label_Finalize => label_Finalize + use NUOPC_Model , only : NUOPC_ModelGet, SetVM + use shr_sys_mod , only : shr_sys_abort + use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit + use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck + use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance + use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock + use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type + + implicit none + private ! except + + public :: SetServices + public :: SetVM + !-------------------------------------------------------------------------- + ! Private module data + !-------------------------------------------------------------------------- + + character(len=CL) :: flds_scalar_name = '' + integer :: flds_scalar_num = 0 + integer :: flds_scalar_index_nx = 0 + integer :: flds_scalar_index_ny = 0 + integer :: flds_scalar_index_nextsw_cday = 0 + + integer :: fldsToAtm_num = 0 + integer :: fldsFrAtm_num = 0 + type (fld_list_type) :: fldsToAtm(fldsMax) + type (fld_list_type) :: fldsFrAtm(fldsMax) + integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost + + type(ESMF_Mesh) :: mesh + integer :: nxg ! global dim i-direction + integer :: nyg ! global dim j-direction + integer :: my_task ! my task in mpi communicator mpicom + integer :: inst_index ! number of current instance (ie. 1) + character(len=5) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") + integer :: logunit ! logging unit number + logical :: mastertask + integer :: dbug = 0 + character(*),parameter :: modName = "(xatm_comp_nuopc)" + character(*),parameter :: u_FILE_u = & + __FILE__ + +!=============================================================================== +contains +!=============================================================================== + + subroutine SetServices(gcomp, rc) + + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! the NUOPC gcomp component will register the generic methods + call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! switching to IPD versions + call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & + userRoutine=ModelInitPhase, phase=0, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set entry point for methods that require specific implementation + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & + userRoutine=InitializeAdvertise, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & + userRoutine=InitializeRealize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! attach specializing method(s) + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine SetServices + + !=============================================================================== + subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + type(ESMF_VM) :: vm + character(CS) :: stdname + integer :: n + integer :: lsize ! local array size + integer :: shrlogunit ! original log unit + character(CL) :: cvalue + character(len=CL) :: logmsg + logical :: isPresent, isSet + character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_VMGet(vm, localpet=my_task, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + mastertask = (my_task==0) + + ! determine instance information + call get_component_instance(gcomp, inst_suffix, inst_index, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! set logunit and set shr logging to my log file + call set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! Initialize xatm + call dead_read_inparms('atm', inst_suffix, logunit, nxg, nyg) + + ! advertise import and export fields + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + flds_scalar_name = trim(cvalue) + call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue, *) flds_scalar_num + write(logmsg,*) flds_scalar_num + call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_nx + write(logmsg,*) flds_scalar_index_nx + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_ny + write(logmsg,*) flds_scalar_index_ny + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxNextSwCday", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_nextsw_cday + write(logmsg,*) flds_scalar_index_nextsw_cday + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nextsw_cday = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxNextSwCday') + endif + + if (nxg /= 0 .and. nyg /= 0) then + + call fld_list_add(fldsFrAtm_num, fldsFrAtm, trim(flds_scalar_name)) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_topo' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_z' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_u' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_v' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_tbot' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_ptem' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_shum' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_pbot' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_dens' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_pslv' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_rainc' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_rainl' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_snowc' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_snowl' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_lwdn' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swndr' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swvdr' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swndf' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swvdf' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swnet' ) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_bcph' , ungridded_lbound=1, ungridded_ubound=3) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_ocph' , ungridded_lbound=1, ungridded_ubound=3) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_dstwet', ungridded_lbound=1, ungridded_ubound=4) + call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_dstdry', ungridded_lbound=1, ungridded_ubound=4) + + call fld_list_add(fldsToAtm_num, fldsToAtm, trim(flds_scalar_name)) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_anidr' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_avsdf' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_anidf' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_avsdr' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sl_lfrac' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Si_ifrac' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'So_ofrac' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_tref' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_qref' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_t' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'So_t' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sl_fv' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sl_ram1' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sl_snowh' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Si_snowh' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'So_ssq' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'So_re' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_u10' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_taux' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_tauy' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_lat' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_sen' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_lwup' ) + call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_evap' ) + + do n = 1,fldsFrAtm_num + if(mastertask) write(logunit,*)'Advertising From Xatm ',trim(fldsFrAtm(n)%stdname) + call NUOPC_Advertise(exportState, standardName=fldsFrAtm(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + + do n = 1,fldsToAtm_num + if(mastertask) write(logunit,*)'Advertising To Xatm',trim(fldsToAtm(n)%stdname) + call NUOPC_Advertise(importState, standardName=fldsToAtm(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + end if + + ! Reset shr logging to original values + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeAdvertise + + !=============================================================================== + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! input/output arguments + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + type(ESMF_Time) :: nextTime + real(r8) :: nextsw_cday + integer :: n + integer :: shrlogunit ! original log unit + character(ESMF_MAXSTR) :: cvalue ! config data + character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize: xatm) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + ! Reset shr logging to my log file + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logUnit) + + ! generate the mesh + call NUOPC_CompAttributeGet(gcomp, name='mesh_atm', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + mesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + ! realize the actively coupled fields, now that a mesh is established + ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState + ! by replacing the advertised fields with the newly created fields of the same name. + call fld_list_realize( & + state=exportState, & + fldList=fldsFrAtm, & + numflds=fldsFrAtm_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':xatmExport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call fld_list_realize( & + state=importState, & + fldList=fldsToAtm, & + numflds=fldsToAtm_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':xatmImport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! Pack export state + call state_setexport(exportState, rc=rc) + call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! Set time of next radiation computation + call ESMF_ClockGetNextTime(clock, nextTime) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_TimeGet(nextTime, dayOfYear_r8=nextsw_cday) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(nextsw_cday, flds_scalar_index_nextsw_cday, exportState, & + flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! diagnostics + if (dbug > 1) then + call State_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeRealize + + !=============================================================================== + subroutine ModelAdvance(gcomp, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + + ! local variables + type(ESMF_Clock) :: clock + type(ESMF_State) :: exportState + real(r8) :: nextsw_cday + integer :: shrlogunit ! original log unit + character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (dbug > 1) then + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + end if + call memcheck(subname, 3, mastertask) + + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) + + !-------------------------------- + ! Pack export state + !-------------------------------- + + call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call state_setexport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call State_SetScalar(nextsw_cday, flds_scalar_index_nextsw_cday, exportState, & + flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! diagnostics + !-------------------------------- + + if (dbug > 1) then + call state_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (mastertask) then + call log_clock_advance(clock, 'XATM', logunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + endif + + call shr_log_setLogUnit (shrlogunit) + + end subroutine ModelAdvance + + !=============================================================================== + subroutine state_setexport(exportState, rc) + + ! input/output variables + type(ESMF_State) , intent(inout) :: exportState + integer, intent(out) :: rc + + ! local variables + integer :: n, nf, nind + real(r8), pointer :: lat(:) + real(r8), pointer :: lon(:) + integer :: spatialDim + integer :: numOwnedElements + real(R8), pointer :: ownedElemCoords(:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(ownedElemCoords(spatialDim*numOwnedElements)) + call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate(lon(numownedElements)) + allocate(lat(numownedElements)) + do n = 1,numownedElements + lon(n) = ownedElemCoords(2*n-1) + lat(n) = ownedElemCoords(2*n) + end do + + ! Start from index 2 in order to Skip the scalar field here + do nf = 2,fldsFrAtm_num + if (fldsFrAtm(nf)%ungridded_ubound == 0) then + call field_setexport(exportState, trim(fldsFrAtm(nf)%stdname), lon, lat, nf=nf, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + else + do nind = 1,fldsFrAtm(nf)%ungridded_ubound + call field_setexport(exportState, trim(fldsFrAtm(nf)%stdname), lon, lat, nf=nf+nind-1, & + ungridded_index=nind, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + end if + end do + + deallocate(lon) + deallocate(lat) + + end subroutine state_setexport + + !=============================================================================== + subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) + + use shr_const_mod , only : pi=>shr_const_pi + + ! intput/otuput variables + type(ESMF_State) , intent(inout) :: exportState + character(len=*) , intent(in) :: fldname + real(r8) , intent(in) :: lon(:) + real(r8) , intent(in) :: lat(:) + integer , intent(in) :: nf + integer, optional , intent(in) :: ungridded_index + integer , intent(out) :: rc + + ! local variables + integer :: i, ncomp + type(ESMF_Field) :: lfield + real(r8), pointer :: data1d(:) + real(r8), pointer :: data2d(:,:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ncomp = 1 + if (present(ungridded_index)) then + call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (gridToFieldMap == 1) then + do i = 1,size(data2d, dim=1) + data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + else if (gridToFieldMap == 2) then + do i = 1,size(data2d, dim=2) + data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + else + call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + do i = 1,size(data1d) + data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + + end subroutine field_setexport + + !=============================================================================== + subroutine ModelFinalize(gcomp, rc) + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (mastertask) then + write(logunit,*) + write(logunit,*) 'xatm: end of main integration loop' + write(logunit,*) + end if + end subroutine ModelFinalize + +end module atm_comp_nuopc diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib_cmake b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml new file mode 100755 index 00000000000..a90f7a189ef --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +""" +build data model library +""" + +import sys, os + +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildnml import build_xcpl_nml, parse_input +from CIME.case import Case + + +def buildnml(case, caseroot, compname): + if compname != "xglc": + raise AttributeError + build_xcpl_nml(case, caseroot, compname) + + +def _main_func(): + caseroot = parse_input(sys.argv) + with Case(caseroot) as case: + buildnml(case, caseroot, "xglc") + + +if __name__ == "__main__": + _main_func() diff --git a/src/components/xcpl_comps/xglc/cime_config/config_component.xml b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/config_component.xml similarity index 100% rename from src/components/xcpl_comps/xglc/cime_config/config_component.xml rename to CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 new file mode 100644 index 00000000000..4b498f8d9c6 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 @@ -0,0 +1,457 @@ +module glc_comp_nuopc + + !---------------------------------------------------------------------------- + ! This is the NUOPC cap for XGLC + !---------------------------------------------------------------------------- + + use ESMF + use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize + use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise, NUOPC_AddNestedState + use NUOPC_Model , only : model_routine_SS => SetServices + use NUOPC_Model , only : model_label_Advance => label_Advance + use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock + use NUOPC_Model , only : model_label_Finalize => label_Finalize + use NUOPC_Model , only : NUOPC_ModelGet, SetVM + use shr_sys_mod , only : shr_sys_abort + use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit + use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck + use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance + use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock + use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type + + implicit none + private ! except + + public :: SetServices + public :: SetVM + !-------------------------------------------------------------------------- + ! Private module data + !-------------------------------------------------------------------------- + + character(len=CL) :: flds_scalar_name = '' + integer :: flds_scalar_num = 0 + integer :: flds_scalar_index_nx = 0 + integer :: flds_scalar_index_ny = 0 + + integer :: fldsToGlc_num = 0 + integer :: fldsFrGlc_num = 0 + type (fld_list_type) :: fldsToGlc(fldsMax) + type (fld_list_type) :: fldsFrGlc(fldsMax) + integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost + + type(ESMF_Mesh) :: mesh + integer :: nxg ! global dim i-direction + integer :: nyg ! global dim j-direction + integer :: my_task ! my task in mpi communicator mpicom + integer :: inst_index ! number of current instance (ie. 1) + character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") + integer :: logunit ! logging unit number + integer ,parameter :: master_task=0 ! task number of master task + logical :: mastertask + integer :: dbug = 0 + character(*),parameter :: modName = "(xglc_comp_nuopc)" + character(*),parameter :: u_FILE_u = & + __FILE__ + + ! TODO: this must be generalized - but for now is just hard-wired + integer, parameter :: max_icesheets = 1 + integer :: num_icesheets = 1 + type(ESMF_State) :: NStateImp(max_icesheets) + type(ESMF_State) :: NStateExp(max_icesheets) + +!=============================================================================== +contains +!=============================================================================== + + subroutine SetServices(gcomp, rc) + + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! the NUOPC gcomp component will register the generic methods + call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! switching to IPD versions + call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & + userRoutine=ModelInitPhase, phase=0, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set entry point for methods that require specific implementation + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & + userRoutine=InitializeAdvertise, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & + userRoutine=InitializeRealize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! attach specializing method(s) + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine SetServices + + !=============================================================================== + subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + type(ESMF_VM) :: vm + character(CS) :: stdname + integer :: n, ns, nf + integer :: lsize ! local array size + integer :: shrlogunit ! original log unit + character(CL) :: cvalue + character(len=CL) :: logmsg + character(len=CS) :: cnum + logical :: isPresent, isSet + character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_VMGet(vm, localpet=my_task, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + mastertask = (my_task == master_task) + + ! determine instance information + call get_component_instance(gcomp, inst_suffix, inst_index, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set logunit and set shr logging to my log file + call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! Initialize xglc + call dead_read_inparms('glc', inst_suffix, logunit, nxg, nyg) + + ! advertise import and export fields + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + flds_scalar_name = trim(cvalue) + call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue, *) flds_scalar_num + write(logmsg,*) flds_scalar_num + call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_nx + write(logmsg,*) flds_scalar_index_nx + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_ny + write(logmsg,*) flds_scalar_index_ny + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') + endif + + ! Create nested state for each active ice sheet + do ns = 1,num_icesheets + write(cnum,'(i0)') ns + call NUOPC_AddNestedState(importState, CplSet="GLC"//trim(cnum), nestedState=NStateImp(ns), rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call NUOPC_AddNestedState(exportState, CplSet="GLC"//trim(cnum), nestedState=NStateExp(ns), rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + end do + + if (nxg /= 0 .and. nyg /= 0) then + + call fld_list_add(fldsFrGlc_num, fldsFrGlc, trim(flds_scalar_name)) + call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Sg_icemask' ) + call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Sg_icemask_coupled_fluxes' ) + call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Sg_ice_covered' ) + call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Sg_topo' ) + call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Flgg_hflx' ) + + call fld_list_add(fldsToGlc_num, fldsToGlc, trim(flds_scalar_name)) + call fld_list_add(fldsToGlc_num, fldsToGlc, 'Sl_tsrf') + call fld_list_add(fldsToGlc_num, fldsToGlc, 'Flgl_qice') + + ! Now advertise import and export fields fields + do ns = 1,num_icesheets + if (mastertask) write(logunit,*)'Advertising To Xglc ',trim(fldsToGlc(ns)%stdname) + do nf = 1,fldsToGlc_num + call NUOPC_Advertise(NStateImp(ns), standardName=fldsToGlc(nf)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkErr(rc,__LINE__,u_FILE_u)) return + end do + if (mastertask) write(logunit,*)'Advertising From Xglc ',trim(fldsFrGlc(ns)%stdname) + do nf = 1,fldsFrGlc_num + call NUOPC_Advertise(NStateExp(ns), standardName=fldsFrGlc(nf)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkErr(rc,__LINE__,u_FILE_u)) return + end do + enddo + + end if + + end subroutine InitializeAdvertise + + !=============================================================================== + + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + integer :: n, ns + character(ESMF_MAXSTR) :: cvalue ! config data + character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + ! generate the mesh + call NUOPC_CompAttributeGet(gcomp, name='mesh_glc', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + mesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! realize the actively coupled fields, now that a mesh is established + ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState + ! by replacing the advertised fields with the newly created fields of the same name. + do ns = 1,num_icesheets + call fld_list_realize( & + state=NStateExp(ns), & + fldList=fldsFrGlc, & + numflds=fldsFrGlc_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':dglcExport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call fld_list_realize( & + state=NStateImp(ns), & + fldList=fldsToGlc, & + numflds=fldsToGlc_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':dglcImport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + + ! Pack export state and set the coupling scalars + call state_setexport(rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + do ns = 1,num_icesheets + call state_setscalar(dble(nxg),flds_scalar_index_nx, NStateExp(ns), flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call state_setscalar(dble(nyg),flds_scalar_index_ny, NStateExp(ns), flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + + end subroutine InitializeRealize + + !=============================================================================== + + subroutine ModelAdvance(gcomp, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + + ! local variables + character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + call memcheck(subname, 3, mastertask) + + call state_setexport(rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine ModelAdvance + + !=============================================================================== + subroutine state_setexport(rc) + + ! input/output variables + integer, intent(out) :: rc + + ! local variables + integer :: n, nf, nind, ns + real(r8), pointer :: lat(:) + real(r8), pointer :: lon(:) + integer :: spatialDim + integer :: numOwnedElements + real(R8), pointer :: ownedElemCoords(:) + character(len=*),parameter :: subname=trim(modName)//':(state_setexport) ' + !-------------------------------------------------- + rc = ESMF_SUCCESS + + call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(ownedElemCoords(spatialDim*numOwnedElements)) + call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate(lon(numownedElements)) + allocate(lat(numownedElements)) + do n = 1,numownedElements + lon(n) = ownedElemCoords(2*n-1) + lat(n) = ownedElemCoords(2*n) + end do + + ! Start from index 2 in order to skip the scalar field + do ns = 1,num_icesheets + do nf = 2,fldsFrGlc_num + if (fldsFrGlc(nf)%ungridded_ubound == 0) then + call field_setexport(NStateExp(ns), trim(fldsFrGlc(nf)%stdname), lon, lat, nf=nf, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + else + do nind = 1,fldsFrGlc(nf)%ungridded_ubound + call field_setexport(NStateExp(ns), trim(fldsFrGlc(nf)%stdname), lon, lat, nf=nf+nind-1, & + ungridded_index=nind, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + end if + end do + if (dbug > 1) then + call State_diagnose(NStateExp(ns), trim(subname)//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + end do + + deallocate(lon) + deallocate(lat) + + end subroutine state_setexport + + !=============================================================================== + + subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) + + use shr_const_mod , only : pi=>shr_const_pi + + ! intput/otuput variables + type(ESMF_State) , intent(inout) :: exportState + character(len=*) , intent(in) :: fldname + real(r8) , intent(in) :: lon(:) + real(r8) , intent(in) :: lat(:) + integer , intent(in) :: nf + integer, optional , intent(in) :: ungridded_index + integer , intent(out) :: rc + + ! local variables + integer :: i, ncomp + type(ESMF_Field) :: lfield + real(r8), pointer :: data1d(:) + real(r8), pointer :: data2d(:,:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ncomp = 5 + if (present(ungridded_index)) then + call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (gridToFieldMap == 1) then + do i = 1,size(data2d, dim=1) + data2d(i,ungridded_index) = (nf*100) & + * cos (pi*lat(i)/180.0_R8) * cos (pi*lat(i)/180.0_R8) & + * sin (pi*lon(i)/180.0_R8) * sin (pi*lon(i)/180.0_R8) & + + (ncomp*10.0_R8) + enddo + else if (gridToFieldMap == 2) then + do i = 1,size(data2d, dim=2) + data2d(ungridded_index,i) = (nf*100) & + * cos (pi*lat(i)/180.0_R8) * cos (pi*lat(i)/180.0_R8) & + * sin (pi*lon(i)/180.0_R8) * sin (pi*lon(i)/180.0_R8) & + + (ncomp*10.0_R8) + end do + end if + else + call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (fldname == 'Sg_icemask' .or. fldname == 'Sg_icemask_coupled_fluxes' .or. fldname == 'Sg_ice_covered') then + data1d(:) = 1._r8 + else + do i = 1,size(data1d) + data1d(i) = (nf*100) & + * cos (pi*lat(i)/180.0_R8) * cos (pi*lat(i)/180.0_R8) & + * sin (pi*lon(i)/180.0_R8) * sin (pi*lon(i)/180.0_R8) & + + (ncomp*10.0_R8) + end do + end if + end if + + end subroutine field_setexport + + !=============================================================================== + subroutine ModelFinalize(gcomp, rc) + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (mastertask) then + write(logunit,*) + write(logunit,*) 'xglc: end of main integration loop' + write(logunit,*) + end if + end subroutine ModelFinalize + +end module glc_comp_nuopc diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildlib b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildlib_cmake b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildnml b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildnml new file mode 100755 index 00000000000..7d141edd619 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildnml @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +""" +build data model library +""" + +import sys, os + +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildnml import build_xcpl_nml, parse_input +from CIME.case import Case + + +def buildnml(case, caseroot, compname): + if compname != "xice": + raise AttributeError + build_xcpl_nml(case, caseroot, compname) + + +def _main_func(): + caseroot = parse_input(sys.argv) + with Case(caseroot) as case: + buildnml(case, caseroot, "xice") + + +if __name__ == "__main__": + _main_func() diff --git a/src/components/xcpl_comps/xice/cime_config/config_component.xml b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/config_component.xml similarity index 100% rename from src/components/xcpl_comps/xice/cime_config/config_component.xml rename to CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 new file mode 100644 index 00000000000..9185b8e532f --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 @@ -0,0 +1,552 @@ +module ice_comp_nuopc + + !---------------------------------------------------------------------------- + ! This is the NUOPC cap for XICE + !---------------------------------------------------------------------------- + + use ESMF + use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize + use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise + use NUOPC_Model , only : model_routine_SS => SetServices + use NUOPC_Model , only : model_label_Advance => label_Advance + use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock + use NUOPC_Model , only : model_label_Finalize => label_Finalize + use NUOPC_Model , only : NUOPC_ModelGet, SetVM + use shr_sys_mod , only : shr_sys_abort + use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit + use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck + use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance + use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock + use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type + + implicit none + private ! except + + public :: SetServices + public :: SetVM + !-------------------------------------------------------------------------- + ! Private module data + !-------------------------------------------------------------------------- + + character(len=CL) :: flds_scalar_name = '' + integer :: flds_scalar_num = 0 + integer :: flds_scalar_index_nx = 0 + integer :: flds_scalar_index_ny = 0 + + integer :: fldsToIce_num = 0 + integer :: fldsFrIce_num = 0 + type (fld_list_type) :: fldsToIce(fldsMax) + type (fld_list_type) :: fldsFrIce(fldsMax) + integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost + + type(ESMF_Mesh) :: mesh + integer :: nxg ! global dim i-direction + integer :: nyg ! global dim j-direction + integer :: my_task ! my task in mpi communicator mpicom + integer :: inst_index ! number of current instance (ie. 1) + character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") + integer :: logunit ! logging unit number + integer ,parameter :: master_task=0 ! task number of master task + logical :: mastertask + integer :: dbug = 0 + character(*),parameter :: modName = "(xice_comp_nuopc)" + character(*),parameter :: u_FILE_u = & + __FILE__ + +!=============================================================================== +contains +!=============================================================================== + + subroutine SetServices(gcomp, rc) + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! the NUOPC gcomp component will register the generic methods + call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! switching to IPD versions + call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & + userRoutine=ModelInitPhase, phase=0, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set entry point for methods that require specific implementation + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & + userRoutine=InitializeAdvertise, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & + userRoutine=InitializeRealize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! attach specializing method(s) + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine SetServices + + !=============================================================================== + subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + type(ESMF_VM) :: vm + character(CL) :: cvalue + character(CS) :: stdname + integer :: n + integer :: lsize ! local array size + integer :: shrlogunit ! original log unit + character(len=CL) :: logmsg + logical :: isPresent, isSet + character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + + call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_VMGet(vm, localpet=my_task, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + mastertask = my_task == master_task + + !---------------------------------------------------------------------------- + ! determine instance information + !---------------------------------------------------------------------------- + + call get_component_instance(gcomp, inst_suffix, inst_index, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !---------------------------------------------------------------------------- + ! set logunit and set shr logging to my log file + !---------------------------------------------------------------------------- + + call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !---------------------------------------------------------------------------- + ! Initialize xice + !---------------------------------------------------------------------------- + + call dead_read_inparms('ice', inst_suffix, logunit, nxg, nyg) + + !-------------------------------- + ! advertise import and export fields + !-------------------------------- + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + flds_scalar_name = trim(cvalue) + call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue, *) flds_scalar_num + write(logmsg,*) flds_scalar_num + call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_nx + write(logmsg,*) flds_scalar_index_nx + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_ny + write(logmsg,*) flds_scalar_index_ny + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') + endif + + if (nxg /= 0 .and. nyg /= 0) then + + call fld_list_add(fldsFrIce_num, fldsFrIce, trim(flds_scalar_name)) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_imask' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_ifrac' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_t' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_tref' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_qref' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_snowh' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_u10' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_avsdr' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_anidr' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_avsdf' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_anidf' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_taux' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_tauy' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_lat' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_sen' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_lwup' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_evap' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_swnet' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_melth' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_swpen' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_meltw' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_salt' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_taux' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_tauy' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_bcpho' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_bcphi' ) + call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_flxdst' ) + + call fld_list_add(fldsToIce_num, fldsToIce, trim(flds_scalar_name)) + call fld_list_add(fldsToIce_num, fldsToIce, 'So_dhdx' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'So_dhdy' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'So_t' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'So_s' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'So_u' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'So_v' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Fioo_q' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_z' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_u' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_v' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_ptem' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_shum' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_dens' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_tbot' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_swvdr' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_swndr' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_swvdf' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_swndf' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_lwdn' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_rain' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_snow' ) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_bcph' , ungridded_lbound=1, ungridded_ubound=3) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_ocph' , ungridded_lbound=1, ungridded_ubound=3) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_dstwet', ungridded_lbound=1, ungridded_ubound=4) + call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_dstdry', ungridded_lbound=1, ungridded_ubound=4) + + do n = 1,fldsFrIce_num + if(mastertask) write(logunit,*)'Advertising From Xice ',trim(fldsFrIce(n)%stdname) + call NUOPC_Advertise(exportState, standardName=fldsFrIce(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + + do n = 1,fldsToIce_num + if(mastertask) write(logunit,*)'Advertising To Xice ',trim(fldsToIce(n)%stdname) + call NUOPC_Advertise(importState, standardName=fldsToIce(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + end if + + + if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + + !---------------------------------------------------------------------------- + ! Reset shr logging to original values + !---------------------------------------------------------------------------- + + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeAdvertise + + !=============================================================================== + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + integer :: shrlogunit ! original log unit + integer :: n + character(ESMF_MAXSTR) :: cvalue ! config data + character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + !---------------------------------------------------------------------------- + ! Reset shr logging to my log file + !---------------------------------------------------------------------------- + + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logUnit) + + !-------------------------------- + ! generate the mesh + !-------------------------------- + + call NUOPC_CompAttributeGet(gcomp, name='mesh_ice', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + mesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! realize the actively coupled fields, now that a mesh is established + ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState + ! by replacing the advertised fields with the newly created fields of the same name. + !-------------------------------- + + call fld_list_realize( & + state=ExportState, & + fldlist=fldsFrIce, & + numflds=fldsFrIce_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':diceExport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call fld_list_realize( & + state=importState, & + fldList=fldsToIce, & + numflds=fldsToIce_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':diceImport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! Pack export state + !-------------------------------- + + call State_SetExport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & + flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & + flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! diagnostics + !-------------------------------- + + if (dbug > 1) then + call State_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeRealize + + !=============================================================================== + subroutine ModelAdvance(gcomp, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + + ! local variables + type(ESMF_Clock) :: clock + type(ESMF_State) :: exportState + integer :: shrlogunit ! original log unit + character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + call memcheck(subname, 3, mastertask) + + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) + + !-------------------------------- + ! Pack export state + !-------------------------------- + + call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call state_setexport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! diagnostics + !-------------------------------- + + if (dbug > 1) then + call State_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (my_task == master_task) then + call log_clock_advance(clock, 'XICE', logunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + endif + + call shr_log_setLogUnit (shrlogunit) + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + + end subroutine ModelAdvance + + !=============================================================================== + subroutine state_setexport(exportState, rc) + + ! input/output variables + type(ESMF_State) , intent(inout) :: exportState + integer, intent(out) :: rc + + ! local variables + integer :: n, nf, nind + real(r8), pointer :: lat(:) + real(r8), pointer :: lon(:) + integer :: spatialDim + integer :: numOwnedElements + real(R8), pointer :: ownedElemCoords(:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(ownedElemCoords(spatialDim*numOwnedElements)) + call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate(lon(numownedElements)) + allocate(lat(numownedElements)) + do n = 1,numownedElements + lon(n) = ownedElemCoords(2*n-1) + lat(n) = ownedElemCoords(2*n) + end do + + ! Start from index 2 in order to skip the scalar field + do nf = 2,fldsFrIce_num + if (fldsFrIce(nf)%ungridded_ubound == 0) then + call field_setexport(exportState, trim(fldsFrIce(nf)%stdname), lon, lat, nf=nf, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + else + do nind = 1,fldsFrIce(nf)%ungridded_ubound + call field_setexport(exportState, trim(fldsFrIce(nf)%stdname), lon, lat, nf=nf+nind-1, & + ungridded_index=nind, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + end if + end do + + deallocate(lon) + deallocate(lat) + + end subroutine state_setexport + + !=============================================================================== + subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) + + use shr_const_mod , only : pi=>shr_const_pi + + ! intput/otuput variables + type(ESMF_State) , intent(inout) :: exportState + character(len=*) , intent(in) :: fldname + real(r8) , intent(in) :: lon(:) + real(r8) , intent(in) :: lat(:) + integer , intent(in) :: nf + integer, optional , intent(in) :: ungridded_index + integer , intent(out) :: rc + + ! local variables + integer :: i, ncomp + type(ESMF_Field) :: lfield + real(r8), pointer :: data1d(:) + real(r8), pointer :: data2d(:,:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ncomp = 3 + if (present(ungridded_index)) then + call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (gridToFieldMap == 1) then + do i = 1,size(data2d, dim=1) + data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + else if (gridToFieldMap == 2) then + do i = 1,size(data2d, dim=2) + data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + else + call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + do i = 1,size(data1d) + data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + ! Reset some fields + if (fldname == 'Si_ifrac') then + do i = 1,size(data1d) + data1d(i) = min(1.0_R8,max(0.0_R8,data1d(i))) + end do + else if (fldname == 'Si_imask') then + do i = 1,size(data1d) + data1d(i) = float(nint(min(1.0_R8,max(0.0_R8,data1d(i))))) + end do + end if + end if + + end subroutine field_setexport + + !=============================================================================== + subroutine ModelFinalize(gcomp, rc) + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (mastertask) then + write(logunit,*) + write(logunit,*) 'xice: end of main integration loop' + write(logunit,*) + end if + end subroutine ModelFinalize + +end module ice_comp_nuopc diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib_cmake b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml new file mode 100755 index 00000000000..72e822771b4 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +""" +build data model library +""" + +import sys, os + +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildnml import build_xcpl_nml, parse_input +from CIME.case import Case + + +def buildnml(case, caseroot, compname): + if compname != "xlnd": + raise AttributeError + build_xcpl_nml(case, caseroot, compname) + + +def _main_func(): + caseroot = parse_input(sys.argv) + with Case(caseroot) as case: + buildnml(case, caseroot, "xlnd") + + +if __name__ == "__main__": + _main_func() diff --git a/src/components/xcpl_comps/xlnd/cime_config/config_component.xml b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/config_component.xml similarity index 100% rename from src/components/xcpl_comps/xlnd/cime_config/config_component.xml rename to CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 new file mode 100644 index 00000000000..a43215939ad --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 @@ -0,0 +1,564 @@ +module lnd_comp_nuopc + + !---------------------------------------------------------------------------- + ! This is the NUOPC cap for XLND + !---------------------------------------------------------------------------- + + use ESMF + use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize + use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise + use NUOPC_Model , only : model_routine_SS => SetServices + use NUOPC_Model , only : model_label_Advance => label_Advance + use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock + use NUOPC_Model , only : model_label_Finalize => label_Finalize + use NUOPC_Model , only : NUOPC_ModelGet, SetVM + use shr_sys_mod , only : shr_sys_abort + use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit + use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck + use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance + use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock + use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type + + implicit none + private ! except + + public :: SetServices + public :: SetVM + !-------------------------------------------------------------------------- + ! Private module data + !-------------------------------------------------------------------------- + + character(len=CL) :: flds_scalar_name = '' + integer :: flds_scalar_num = 0 + integer :: flds_scalar_index_nx = 0 + integer :: flds_scalar_index_ny = 0 + integer :: flds_scalar_index_nextsw_cday = 0._r8 + + integer :: fldsToLnd_num = 0 + integer :: fldsFrLnd_num = 0 + type (fld_list_type) :: fldsToLnd(fldsMax) + type (fld_list_type) :: fldsFrLnd(fldsMax) + integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost + integer :: glc_nec + + type(ESMF_Mesh) :: mesh + integer :: nxg ! global dim i-direction + integer :: nyg ! global dim j-direction + integer :: my_task ! my task in mpi communicator mpicom + integer :: inst_index ! number of current instance (ie. 1) + character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") + integer :: logunit ! logging unit number + integer ,parameter :: master_task=0 ! task number of master task + logical :: mastertask + integer :: dbug = 1 + character(*),parameter :: modName = "(xlnd_comp_nuopc)" + character(*),parameter :: u_FILE_u = & + __FILE__ + +!=============================================================================== +contains +!=============================================================================== + + subroutine SetServices(gcomp, rc) + + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! the NUOPC gcomp component will register the generic methods + call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! switching to IPD versions + call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & + userRoutine=ModelInitPhase, phase=0, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set entry point for methods that require specific implementation + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & + userRoutine=InitializeAdvertise, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & + userRoutine=InitializeRealize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! attach specializing method(s) + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine SetServices + + !=============================================================================== + + subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + type(ESMF_VM) :: vm + character(CS) :: stdname + integer :: n + integer :: lsize ! local array size + integer :: shrlogunit ! original log unit + character(CL) :: cvalue + character(CL) :: logmsg + logical :: isPresent, isSet + character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + + call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_VMGet(vm, localpet=my_task, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + mastertask = (my_task == master_task) + + !---------------------------------------------------------------------------- + ! determine instance information + !---------------------------------------------------------------------------- + + call get_component_instance(gcomp, inst_suffix, inst_index, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !---------------------------------------------------------------------------- + ! set logunit and set shr logging to my log file + !---------------------------------------------------------------------------- + + call set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !---------------------------------------------------------------------------- + ! Initialize xlnd + !---------------------------------------------------------------------------- + + call dead_read_inparms('lnd', inst_suffix, logunit, nxg, nyg) + + !-------------------------------- + ! advertise import and export fields + !-------------------------------- + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + flds_scalar_name = trim(cvalue) + call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue, *) flds_scalar_num + write(logmsg,*) flds_scalar_num + call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_nx + write(logmsg,*) flds_scalar_index_nx + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_ny + write(logmsg,*) flds_scalar_index_ny + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') + end if + + if (nxg /= 0 .and. nyg /= 0) then + + call NUOPC_CompAttributeGet(gcomp, name='glc_nec', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) glc_nec + call ESMF_LogWrite('glc_nec = '// trim(cvalue), ESMF_LOGMSG_INFO) + + call fld_list_add(fldsFrLnd_num, fldsFrlnd, trim(flds_scalar_name)) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_lfrin' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_t' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_tref' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_qref' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_avsdr' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_anidr' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_avsdf' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_anidf' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_snowh' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_u10' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_fv' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_ram1' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_rofsur' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_rofgwl' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_rofsub' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_rofi' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_irrig' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_taux' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_tauy' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_lat' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_sen' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_lwup' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_evap' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_swnet' ) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_flxdst' , ungridded_lbound=1, ungridded_ubound=4) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flgl_qice_elev', ungridded_lbound=1, ungridded_ubound=glc_nec+1) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_tsrf_elev' , ungridded_lbound=1, ungridded_ubound=glc_nec+1) + call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_topo_elev' , ungridded_lbound=1, ungridded_ubound=glc_nec+1) + + call fld_list_add(fldsToLnd_num, fldsToLnd, trim(flds_scalar_name)) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_z' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_topo' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_u' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_v' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_ptem' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_pbot' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_tbot' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_shum' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Flrr_volr' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Flrr_volrmch' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_lwdn' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_rainc' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_rainl' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_snowc' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_snowl' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_swndr' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_swvdr' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_swndf' ) + call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_swvdf' ) + call fld_list_add(fldsTolnd_num, fldsTolnd, 'Faxa_bcph' , ungridded_lbound=1, ungridded_ubound=3) + call fld_list_add(fldsTolnd_num, fldsTolnd, 'Faxa_ocph' , ungridded_lbound=1, ungridded_ubound=3) + call fld_list_add(fldsTolnd_num, fldsTolnd, 'Faxa_dstwet' , ungridded_lbound=1, ungridded_ubound=4) + call fld_list_add(fldsTolnd_num, fldsTolnd, 'Faxa_dstdry' , ungridded_lbound=1, ungridded_ubound=4) + call fld_list_add(fldsToLnd_num, fldsTolnd, 'Sg_topo_elev' , ungridded_lbound=1, ungridded_ubound=glc_nec+1) + call fld_list_add(fldsToLnd_num, fldsTolnd, 'Sg_ice_covered_elev' , ungridded_lbound=1, ungridded_ubound=glc_nec+1) + call fld_list_add(fldsToLnd_num, fldsTolnd, 'Flgg_hflx_elev' , ungridded_lbound=1, ungridded_ubound=glc_nec+1) + call fld_list_add(fldsToLnd_num, fldsTolnd, 'Sg_icemask') + call fld_list_add(fldsToLnd_num, fldsTolnd, 'Sg_icemask_coupled_fluxes') + + do n = 1,fldsFrLnd_num + if (mastertask) write(logunit,*)'Advertising From Xlnd ',trim(fldsFrLnd(n)%stdname) + call NUOPC_Advertise(exportState, standardName=fldsFrLnd(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + + do n = 1,fldsToLnd_num + if(mastertask) write(logunit,*)'Advertising To Xlnd',trim(fldsToLnd(n)%stdname) + call NUOPC_Advertise(importState, standardName=fldsToLnd(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + + end if + + !---------------------------------------------------------------------------- + ! Reset shr logging to original values + !---------------------------------------------------------------------------- + + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeAdvertise + + !=============================================================================== + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! intput/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + integer :: shrlogunit ! original log unit + integer :: n + character(ESMF_MAXSTR) :: cvalue ! config data + character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + + !---------------------------------------------------------------------------- + ! Reset shr logging to my log file + !---------------------------------------------------------------------------- + + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logUnit) + + !-------------------------------- + ! generate the mesh + !-------------------------------- + + call NUOPC_CompAttributeGet(gcomp, name='mesh_lnd', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + mesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! realize the actively coupled fields, now that a mesh is established + ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState + ! by replacing the advertised fields with the newly created fields of the same name. + !-------------------------------- + + call fld_list_realize( & + state=ExportState, & + fldlist=fldsFrLnd, & + numflds=fldsFrLnd_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':dlndExport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call fld_list_realize( & + state=importState, & + fldList=fldsToLnd, & + numflds=fldsToLnd_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':dlndImport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! Pack export state + !-------------------------------- + + call state_setexport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & + flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & + flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! diagnostics + !-------------------------------- + + if (dbug > 1) then + call state_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + + call shr_log_setLogUnit (shrlogunit) + + if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + + end subroutine InitializeRealize + + !=============================================================================== + subroutine ModelAdvance(gcomp, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + + ! local variables + type(ESMF_Clock) :: clock + type(ESMF_State) :: exportState + integer :: shrlogunit ! original log unit + character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + call memcheck(subname, 3, mastertask) + + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) + + !-------------------------------- + ! Pack export state + !-------------------------------- + + call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call state_setexport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! diagnostics + !-------------------------------- + + if (dbug > 1) then + call state_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (mastertask) then + call log_clock_advance(clock, 'LND', logunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + endif + + call shr_log_setLogUnit (shrlogunit) + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + + end subroutine ModelAdvance + + !=============================================================================== + + subroutine state_setexport(exportState, rc) + + ! input/output variables + type(ESMF_State) , intent(inout) :: exportState + integer, intent(out) :: rc + + ! local variables + integer :: n, nf, nind + real(r8), pointer :: lat(:) + real(r8), pointer :: lon(:) + integer :: spatialDim + integer :: numOwnedElements + real(R8), pointer :: ownedElemCoords(:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(ownedElemCoords(spatialDim*numOwnedElements)) + call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate(lon(numownedElements)) + allocate(lat(numownedElements)) + do n = 1,numownedElements + lon(n) = ownedElemCoords(2*n-1) + lat(n) = ownedElemCoords(2*n) + end do + + ! Start from index 2 in order to Skip the scalar field here + do nf = 2,fldsFrLnd_num + if (fldsFrLnd(nf)%ungridded_ubound == 0) then + call field_setexport(exportState, trim(fldsFrLnd(nf)%stdname), lon, lat, nf=nf, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + else + do nind = 1,fldsFrLnd(nf)%ungridded_ubound + call field_setexport(exportState, trim(fldsFrLnd(nf)%stdname), lon, lat, nf=nf+nind-1, & + ungridded_index=nind, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + end if + end do + + deallocate(lon) + deallocate(lat) + + end subroutine state_setexport + + !=============================================================================== + + subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) + + use shr_const_mod , only : pi=>shr_const_pi + + ! intput/otuput variables + type(ESMF_State) , intent(inout) :: exportState + character(len=*) , intent(in) :: fldname + real(r8) , intent(in) :: lon(:) + real(r8) , intent(in) :: lat(:) + integer , intent(in) :: nf + integer, optional , intent(in) :: ungridded_index + integer , intent(out) :: rc + + ! local variables + integer :: i, ncomp + type(ESMF_Field) :: lfield + real(r8), pointer :: data1d(:) + real(r8), pointer :: data2d(:,:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ncomp = 2 + if (present(ungridded_index)) then + call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (gridToFieldMap == 1) then + do i = 1,size(data2d, dim=1) + data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + else if (gridToFieldMap == 2) then + do i = 1,size(data2d, dim=2) + data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + else + call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (fldname == 'Sl_lfrin') then + data1d(:) = 1._r8 + else + do i = 1,size(data1d) + data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + end if + + end subroutine field_setexport + + !=============================================================================== + subroutine ModelFinalize(gcomp, rc) + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (mastertask) then + write(logunit,*) + write(logunit,*) 'xlnd: end of main integration loop' + write(logunit,*) + end if + end subroutine ModelFinalize + +end module lnd_comp_nuopc diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib_cmake b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml new file mode 100755 index 00000000000..7158056b462 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +""" +build data model library +""" + +import sys, os + +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildnml import build_xcpl_nml, parse_input +from CIME.case import Case + + +def buildnml(case, caseroot, compname): + if compname != "xocn": + raise AttributeError + build_xcpl_nml(case, caseroot, compname) + + +def _main_func(): + caseroot = parse_input(sys.argv) + with Case(caseroot) as case: + buildnml(case, caseroot, "xocn") + + +if __name__ == "__main__": + _main_func() diff --git a/src/components/xcpl_comps/xocn/cime_config/config_component.xml b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/config_component.xml similarity index 100% rename from src/components/xcpl_comps/xocn/cime_config/config_component.xml rename to CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 new file mode 100644 index 00000000000..87f8ca25102 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 @@ -0,0 +1,475 @@ +module ocn_comp_nuopc + + !---------------------------------------------------------------------------- + ! This is the NUOPC cap for XOCN + !---------------------------------------------------------------------------- + + use ESMF + use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize + use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise + use NUOPC_Model , only : model_routine_SS => SetServices + use NUOPC_Model , only : model_label_Advance => label_Advance + use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock + use NUOPC_Model , only : model_label_Finalize => label_Finalize + use NUOPC_Model , only : NUOPC_ModelGet, SetVM + use shr_sys_mod , only : shr_sys_abort + use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit + use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck + use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance + use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock + use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type + + implicit none + private ! except + + public :: SetServices + public :: SetVM + !-------------------------------------------------------------------------- + ! Private module data + !-------------------------------------------------------------------------- + + character(len=CL) :: flds_scalar_name = '' + integer :: flds_scalar_num = 0 + integer :: flds_scalar_index_nx = 0 + integer :: flds_scalar_index_ny = 0 + integer :: flds_scalar_index_nextsw_cday = 0._r8 + + integer :: fldsToOcn_num = 0 + integer :: fldsFrOcn_num = 0 + type (fld_list_type) :: fldsToOcn(fldsMax) + type (fld_list_type) :: fldsFrOcn(fldsMax) + integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost + + type(ESMF_Mesh) :: mesh + integer :: nxg ! global dim i-direction + integer :: nyg ! global dim j-direction + integer :: my_task ! my task in mpi communicator mpicom + integer :: inst_index ! number of current instance (ie. 1) + character(len=16) :: inst_name ! fullname of current instance (ie. "ocn_0001") + character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") + integer :: logunit ! logging unit number + integer ,parameter :: master_task=0 ! task number of master task + logical :: mastertask + integer :: dbug = 0 + character(*),parameter :: modName = "(xocn_comp_nuopc)" + character(*),parameter :: u_FILE_u = & + __FILE__ + +!=============================================================================== +contains +!=============================================================================== + + subroutine SetServices(gcomp, rc) + + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! the NUOPC gcomp component will register the generic methods + call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! switching to IPD versions + call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & + userRoutine=ModelInitPhase, phase=0, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set entry point for methods that require specific implementation + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & + userRoutine=InitializeAdvertise, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & + userRoutine=InitializeRealize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! attach specializing method(s) + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine SetServices + + !=============================================================================== + + subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + type(ESMF_VM) :: vm + integer :: n + integer :: lsize ! local array size + integer :: shrlogunit ! original log unit + character(CL) :: cvalue + character(len=CL) :: logmsg + logical :: isPresent, isSet + character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + + call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_VMGet(vm, localpet=my_task, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + mastertask = (my_task == master_task) + + ! determine instance information + call get_component_instance(gcomp, inst_suffix, inst_index, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set logunit and set shr logging to my log file + call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! Initialize xocn + call dead_read_inparms('ocn', inst_suffix, logunit, nxg, nyg) + + ! advertise import and export fields + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + flds_scalar_name = trim(cvalue) + call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue, *) flds_scalar_num + write(logmsg,*) flds_scalar_num + call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_nx + write(logmsg,*) flds_scalar_index_nx + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_ny + write(logmsg,*) flds_scalar_index_ny + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') + endif + + if (nxg /= 0 .and. nyg /= 0) then + + call fld_list_add(fldsFrOcn_num, fldsFrOcn, trim(flds_scalar_name)) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_omask" ) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_t" ) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_s" ) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_u" ) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_v" ) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_dhdx" ) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_dhdy" ) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_bldepth" ) + call fld_list_add(fldsFrOcn_num, fldsFrOcn, "Fioo_q" ) + + call fld_list_add(fldsToOcn_num, fldsToOcn, trim(flds_scalar_name)) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_rain" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_snow" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_lwdn" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_swndr" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_swvdr" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_swndf" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_swvdf" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_taux" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_tauy" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_sen" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_lat" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_lwup" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_evap" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Fioi_salt" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_rofl" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_rofi" ) + call fld_list_add(fldsToOcn_num, fldsToOcn, "Sa_pslv" ) + + do n = 1,fldsFrOcn_num + if(mastertask) write(logunit,*)'Advertising From Xocn ',trim(fldsFrOcn(n)%stdname) + call NUOPC_Advertise(exportState, standardName=fldsFrOcn(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + + do n = 1,fldsToOcn_num + if(mastertask) write(logunit,*)'Advertising To Xocn',trim(fldsToOcn(n)%stdname) + call NUOPC_Advertise(importState, standardName=fldsToOcn(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + end if + + ! Reset shr logging to original values + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeAdvertise + + !=============================================================================== + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + integer :: shrlogunit ! original log unit + character(ESMF_MAXSTR) :: cvalue ! config data + character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize: xocn) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + ! Reset shr logging to my log file + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) + + ! generate the mesh + call NUOPC_CompAttributeGet(gcomp, name='mesh_ocn', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + mesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! realize the actively coupled fields, now that a mesh is established + ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState + ! by replacing the advertised fields with the newly created fields of the same name. + call fld_list_realize( & + state=ExportState, & + fldlist=fldsFrOcn, & + numflds=fldsFrOcn_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':docnExport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call fld_list_realize( & + state=importState, & + fldList=fldsToOcn, & + numflds=fldsToOcn_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':docnImport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! Pack export state + call state_setexport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! diagnostics + if (dbug > 1) then + call state_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeRealize + + !=============================================================================== + subroutine ModelAdvance(gcomp, rc) + + ! intput/output variables + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + + ! local variables + type(ESMF_Clock) :: clock + type(ESMF_State) :: exportState + integer :: shrlogunit ! original log unit + character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + call memcheck(subname, 3, mastertask) + + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) + + ! Pack export state + call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call state_setexport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! diagnostics + if (dbug > 1) then + call state_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + + call shr_log_setLogUnit (shrlogunit) + + end subroutine ModelAdvance + + !=============================================================================== + subroutine state_setexport(exportState, rc) + + ! input/output variables + type(ESMF_State) , intent(inout) :: exportState + integer, intent(out) :: rc + + ! local variables + integer :: n, nf, nind + real(r8), pointer :: lat(:) + real(r8), pointer :: lon(:) + integer :: spatialDim + integer :: numOwnedElements + real(R8), pointer :: ownedElemCoords(:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(ownedElemCoords(spatialDim*numOwnedElements)) + call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate(lon(numownedElements)) + allocate(lat(numownedElements)) + do n = 1,numownedElements + lon(n) = ownedElemCoords(2*n-1) + lat(n) = ownedElemCoords(2*n) + end do + + ! Start from index 2 in order to Skip the scalar field here + do nf = 2,fldsFrOcn_num + if (fldsFrOcn(nf)%ungridded_ubound == 0) then + call field_setexport(exportState, trim(fldsFrOcn(nf)%stdname), lon, lat, nf=nf, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + else + do nind = 1,fldsFrOcn(nf)%ungridded_ubound + call field_setexport(exportState, trim(fldsFrOcn(nf)%stdname), lon, lat, nf=nf, & + ungridded_index=nind, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + end if + end do + + deallocate(lon) + deallocate(lat) + + end subroutine state_setexport + + !=============================================================================== + + subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) + + use shr_const_mod , only : pi=>shr_const_pi + + ! intput/otuput variables + type(ESMF_State) , intent(inout) :: exportState + character(len=*) , intent(in) :: fldname + real(r8) , intent(in) :: lon(:) + real(r8) , intent(in) :: lat(:) + integer , intent(in) :: nf + integer, optional , intent(in) :: ungridded_index + integer , intent(out) :: rc + + ! local variables + integer :: i, ncomp + type(ESMF_Field) :: lfield + real(r8), pointer :: data1d(:) + real(r8), pointer :: data2d(:,:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ncomp = 4 + if (present(ungridded_index)) then + call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (gridToFieldMap == 1) then + do i = 1,size(data2d, dim=1) + data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + else if (gridToFieldMap == 2) then + do i = 1,size(data2d, dim=2) + data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + else + call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + do i = 1,size(data1d) + data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + + if (fldname == 'So_omask') then + do i = 1,size(data1d) + !data1d(i) = float(nint(min(1.0_R8,max(0.0_R8,data1d(i))))) + data1d(i) = 0._r8 + end do + end if + + end subroutine field_setexport + + !=============================================================================== + subroutine ModelFinalize(gcomp, rc) + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (mastertask) then + write(logunit,*) + write(logunit,*) 'xocn: end of main integration loop' + write(logunit,*) + end if + end subroutine ModelFinalize + +end module ocn_comp_nuopc diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib_cmake b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml new file mode 100755 index 00000000000..bf23e8913e5 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +""" +build data model library +""" + +import sys, os + +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildnml import build_xcpl_nml, parse_input +from CIME.case import Case + + +def buildnml(case, caseroot, compname): + if compname != "xrof": + raise AttributeError + build_xcpl_nml(case, caseroot, compname) + + +def _main_func(): + caseroot = parse_input(sys.argv) + with Case(caseroot) as case: + buildnml(case, caseroot, "xrof") + + +if __name__ == "__main__": + _main_func() diff --git a/src/components/xcpl_comps/xrof/cime_config/config_component.xml b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/config_component.xml similarity index 100% rename from src/components/xcpl_comps/xrof/cime_config/config_component.xml rename to CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 new file mode 100644 index 00000000000..1b5b9dd4901 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 @@ -0,0 +1,473 @@ +module rof_comp_nuopc + + !---------------------------------------------------------------------------- + ! This is the NUOPC cap for XROF + !---------------------------------------------------------------------------- + + use ESMF + use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize + use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise + use NUOPC_Model , only : model_routine_SS => SetServices + use NUOPC_Model , only : model_label_Advance => label_Advance + use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock + use NUOPC_Model , only : model_label_Finalize => label_Finalize + use NUOPC_Model , only : NUOPC_ModelGet, SetVM + use shr_sys_mod , only : shr_sys_abort + use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit + use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck + use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance + use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock + use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type + + implicit none + private ! except + + public :: SetServices + public :: SetVM + !-------------------------------------------------------------------------- + ! Private module data + !-------------------------------------------------------------------------- + + character(len=CL) :: flds_scalar_name = '' + integer :: flds_scalar_num = 0 + integer :: flds_scalar_index_nx = 0 + integer :: flds_scalar_index_ny = 0 + integer :: flds_scalar_index_nextsw_cday = 0 + + integer :: fldsToRof_num = 0 + integer :: fldsFrRof_num = 0 + type (fld_list_type) :: fldsToRof(fldsMax) + type (fld_list_type) :: fldsFrRof(fldsMax) + integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost + + type(ESMF_Mesh) :: mesh + integer :: nxg ! global dim i-direction + integer :: nyg ! global dim j-direction + integer :: my_task ! my task in mpi + integer :: inst_index ! number of current instance (ie. 1) + character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") + integer :: logunit ! logging unit number + integer ,parameter :: master_task=0 ! task number of master task + logical :: mastertask + integer :: dbug = 0 + character(*),parameter :: modName = "(xrof_comp_nuopc)" + character(*),parameter :: u_FILE_u = & + __FILE__ + +!=============================================================================== +contains +!=============================================================================== + + subroutine SetServices(gcomp, rc) + + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! the NUOPC gcomp component will register the generic methods + call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! switching to IPD versions + call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & + userRoutine=ModelInitPhase, phase=0, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set entry point for methods that require specific implementation + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & + userRoutine=InitializeAdvertise, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & + userRoutine=InitializeRealize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! attach specializing method(s) + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine SetServices + + !=============================================================================== + subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + type(ESMF_VM) :: vm + character(CS) :: stdname + integer :: n + integer :: lsize ! local array size + integer :: shrlogunit ! original log unit + character(CL) :: cvalue + character(len=CL) :: logmsg + logical :: isPresent, isSet + character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_VMGet(vm, localpet=my_task, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + mastertask = (my_task == master_task) + + ! determine instance information + call get_component_instance(gcomp, inst_suffix, inst_index, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! set logunit and set shr logging to my log file + call set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! Initialize xrof + call dead_read_inparms('rof', inst_suffix, logunit, nxg, nyg) + + !-------------------------------- + ! advertise import and export fields + !-------------------------------- + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + flds_scalar_name = trim(cvalue) + call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue, *) flds_scalar_num + write(logmsg,*) flds_scalar_num + call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_nx + write(logmsg,*) flds_scalar_index_nx + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_ny + write(logmsg,*) flds_scalar_index_ny + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') + endif + + if (nxg /= 0 .and. nyg /= 0) then + + call fld_list_add(fldsFrRof_num, fldsFrRof, trim(flds_scalar_name)) + call fld_list_add(fldsFrRof_num, fldsFrRof, 'Forr_rofl') + call fld_list_add(fldsFrRof_num, fldsFrRof, 'Forr_rofi') + call fld_list_add(fldsFrRof_num, fldsFrRof, 'Flrr_flood') + call fld_list_add(fldsFrRof_num, fldsFrRof, 'Flrr_volr') + call fld_list_add(fldsFrRof_num, fldsFrRof, 'Flrr_volrmch') + + call fld_list_add(fldsToRof_num, fldsToRof, trim(flds_scalar_name)) + call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofsur') + call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofgwl') + call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofsub') + call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofdto') + call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofi') + call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_irrig') + + do n = 1,fldsFrRof_num + if(mastertask) write(logunit,*)'Advertising From Xrof ',trim(fldsFrRof(n)%stdname) + call NUOPC_Advertise(exportState, standardName=fldsFrRof(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + + do n = 1,fldsToRof_num + if(mastertask) write(logunit,*)'Advertising To Xrof',trim(fldsToRof(n)%stdname) + call NUOPC_Advertise(importState, standardName=fldsToRof(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + end if + + if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + + !---------------------------------------------------------------------------- + ! Reset shr logging to original values + !---------------------------------------------------------------------------- + + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeAdvertise + + !=============================================================================== + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! input/output arguments + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + integer :: shrlogunit ! original log unit + character(ESMF_MAXSTR) :: cvalue ! config data + character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + ! Reset shr logging to my log file + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logUnit) + + + ! generate the mesh + call NUOPC_CompAttributeGet(gcomp, name='mesh_rof', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + mesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! realize the actively coupled fields, now that a mesh is established + ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState + ! by replacing the advertised fields with the newly created fields of the same name. + call fld_list_realize( & + state=ExportState, & + fldlist=fldsFrRof, & + numflds=fldsFrRof_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':drofExport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call fld_list_realize( & + state=importState, & + fldList=fldsToRof, & + numflds=fldsToRof_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':drofImport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! Pack export state + !-------------------------------- + + call state_setexport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! diagnostics + !-------------------------------- + + if (dbug > 1) then + call State_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeRealize + + !=============================================================================== + subroutine ModelAdvance(gcomp, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + + ! local variables + type(ESMF_Clock) :: clock + type(ESMF_State) :: exportState + integer :: shrlogunit ! original log unit + character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (dbug > 5) then + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + end if + call memcheck(subname, 3, mastertask) + + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) + + ! Pack export state + call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetExport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! diagnostics + if (dbug > 1) then + call State_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (mastertask) then + call log_clock_advance(clock, 'XROF', logunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + endif + + call shr_log_setLogUnit (shrlogunit) + + if (dbug > 5) then + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + end if + + end subroutine ModelAdvance + + !=============================================================================== + subroutine state_setexport(exportState, rc) + + ! input/output variables + type(ESMF_State) , intent(inout) :: exportState + integer, intent(out) :: rc + + ! local variables + integer :: n, nf, nind + real(r8), pointer :: lat(:) + real(r8), pointer :: lon(:) + integer :: spatialDim + integer :: numOwnedElements + real(R8), pointer :: ownedElemCoords(:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(ownedElemCoords(spatialDim*numOwnedElements)) + call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate(lon(numownedElements)) + allocate(lat(numownedElements)) + do n = 1,numownedElements + lon(n) = ownedElemCoords(2*n-1) + lat(n) = ownedElemCoords(2*n) + end do + + ! Start from index 2 in order to skip the scalar field + do nf = 2,fldsFrRof_num + if (fldsFrRof(nf)%ungridded_ubound == 0) then + call field_setexport(exportState, trim(fldsFrRof(nf)%stdname), lon, lat, nf=nf, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + else + do nind = 1,fldsFrRof(nf)%ungridded_ubound + call field_setexport(exportState, trim(fldsFrRof(nf)%stdname), lon, lat, nf=nf+nind-1, & + ungridded_index=nind, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + end if + end do + + deallocate(lon) + deallocate(lat) + + end subroutine state_setexport + + !=============================================================================== + subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) + + use shr_const_mod , only : pi=>shr_const_pi + + ! intput/otuput variables + type(ESMF_State) , intent(inout) :: exportState + character(len=*) , intent(in) :: fldname + real(r8) , intent(in) :: lon(:) + real(r8) , intent(in) :: lat(:) + integer , intent(in) :: nf + integer, optional , intent(in) :: ungridded_index + integer , intent(out) :: rc + + ! local variables + integer :: i, ncomp + type(ESMF_Field) :: lfield + real(r8), pointer :: data1d(:) + real(r8), pointer :: data2d(:,:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ncomp = 6 + if (present(ungridded_index)) then + call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (gridToFieldMap == 1) then + do i = 1,size(data2d, dim=1) + data2d(i,ungridded_index) = (nf+1) * 1.0_r8 + end do + else if (gridToFieldMap == 2) then + do i = 1,size(data2d, dim=2) + data2d(ungridded_index,i) = (nf+1) * 1.0_r8 + end do + end if + else + call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + do i = 1,size(data1d) + data1d(i) = (nf+1) * 1.0_r8 + end do + end if + + end subroutine field_setexport + + !=============================================================================== + subroutine ModelFinalize(gcomp, rc) + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (mastertask) then + write(logunit,*) + write(logunit,*) 'xrof: end of main integration loop' + write(logunit,*) + end if + end subroutine ModelFinalize + +end module rof_comp_nuopc diff --git a/src/components/xcpl_comps/xshare/nuopc/dead_methods_mod.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 similarity index 97% rename from src/components/xcpl_comps/xshare/nuopc/dead_methods_mod.F90 rename to CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 index d8b55abd8c0..a9ad38e2419 100644 --- a/src/components/xcpl_comps/xshare/nuopc/dead_methods_mod.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 @@ -11,8 +11,8 @@ module dead_methods_mod use ESMF , only : ESMF_GeomType_Flag, ESMF_FieldStatus_Flag use ESMF , only : ESMF_Mesh, ESMF_MeshGet use ESMF , only : ESMF_GEOMTYPE_MESH, ESMF_GEOMTYPE_GRID, ESMF_FIELDSTATUS_COMPLETE - use ESMF , only : ESMF_Clock, ESMF_ClockCreate, ESMF_ClockGet, ESMF_ClockSet - use ESMF , only : ESMF_ClockPrint, ESMF_ClockAdvance + use ESMF , only : ESMF_Clock, ESMF_ClockCreate, ESMF_ClockGet, ESMF_ClockSet + use ESMF , only : ESMF_ClockPrint, ESMF_ClockAdvance use ESMF , only : ESMF_Alarm, ESMF_AlarmCreate, ESMF_AlarmGet, ESMF_AlarmSet use ESMF , only : ESMF_Calendar, ESMF_CALKIND_NOLEAP, ESMF_CALKIND_GREGORIAN use ESMF , only : ESMF_Time, ESMF_TimeGet, ESMF_TimeSet @@ -22,7 +22,7 @@ module dead_methods_mod use NUOPC_Model , only : NUOPC_ModelGet use shr_kind_mod , only : r8 => shr_kind_r8, cl=>shr_kind_cl, cs=>shr_kind_cs use shr_sys_mod , only : shr_sys_abort - use shr_file_mod , only : shr_file_setlogunit, shr_file_getLogUnit + use shr_log_mod , only : shr_log_setlogunit, shr_log_getLogUnit implicit none private @@ -34,7 +34,7 @@ module dead_methods_mod public :: state_getscalar public :: state_setscalar public :: state_diagnose - public :: alarmInit + public :: alarmInit public :: chkerr private :: timeInit @@ -61,7 +61,7 @@ module dead_methods_mod optMonthly = "monthly" , & optYearly = "yearly" , & optDate = "date" , & - optIfdays0 = "ifdays0" + optIfdays0 = "ifdays0" ! Module data integer, parameter :: SecPerDay = 86400 ! Seconds per day @@ -127,7 +127,8 @@ end subroutine get_component_instance !=============================================================================== subroutine set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) - + use ESMF, only : ESMF_GridCompGet, ESMF_LogWrite + use NUOPC, only: NUOPC_CompAttributeAdd, NUOPC_CompAttributeSet ! input/output variables type(ESMF_GridComp) :: gcomp logical, intent(in) :: mastertask @@ -136,8 +137,9 @@ subroutine set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) integer, intent(out) :: rc ! local variables - character(len=CL) :: diro + character(len=CL) :: diro, name character(len=CL) :: logfile + character(len=*), parameter :: subname ='('//__FILE__//': set_component_logging)' !----------------------------------------------------------------------- rc = ESMF_SUCCESS @@ -155,7 +157,18 @@ subroutine set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) logUnit = 6 endif - call shr_file_setLogUnit (logunit) + call shr_log_setLogUnit (logunit) + + call ESMF_GridCompGet(gcomp, name=name, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_LogWrite(trim(subname)//": setting logunit for component: "//trim(name), ESMF_LOGMSG_INFO) + + call NUOPC_CompAttributeAdd(gcomp, attrList=(/'logunit'/), rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call NUOPC_CompAttributeSet(gcomp, name='logunit',value=logunit, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end subroutine set_component_logging @@ -422,7 +435,7 @@ subroutine field_getfldptr(field, fldptr1, fldptr2, rank, abort, rc) call ESMF_MeshGet(lmesh, numOwnedNodes=nnodes, numOwnedElements=nelements, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return if (nnodes == 0 .and. nelements == 0) lrank = 0 - else + else call ESMF_LogWrite(trim(subname)//": ERROR geomtype not supported ", & ESMF_LOGMSG_INFO, rc=rc) rc = ESMF_FAILURE @@ -783,7 +796,7 @@ end subroutine alarmInit subroutine timeInit( Time, ymd, cal, tod, rc) - ! Create the ESMF_Time object corresponding to the given input time, + ! Create the ESMF_Time object corresponding to the given input time, ! given in YMD (Year Month Day) and TOD (Time-of-day) format. ! Set the time by an integer as YYYYMMDD and integer seconds in the day diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_nuopc_mod.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_nuopc_mod.F90 new file mode 100644 index 00000000000..ee3ca6b682e --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_nuopc_mod.F90 @@ -0,0 +1,346 @@ +module dead_nuopc_mod + + use ESMF , only : ESMF_Gridcomp, ESMF_State, ESMF_StateGet + use ESMF , only : ESMF_Clock, ESMF_Time, ESMF_TimeInterval, ESMF_Alarm + use ESMF , only : ESMF_GridCompGet, ESMF_ClockGet, ESMF_ClockSet, ESMF_ClockAdvance, ESMF_AlarmSet + use ESMF , only : ESMF_SUCCESS, ESMF_LogWrite, ESMF_LOGMSG_INFO, ESMF_METHOD_INITIALIZE + use ESMF , only : ESMF_FAILURE, ESMF_LOGMSG_ERROR + use ESMF , only : ESMF_VMGetCurrent, ESMF_VM, ESMF_VMBroadcast, ESMF_VMGet + use ESMF , only : ESMF_VM, ESMF_VMGetCurrent, ESMF_VmGet + use ESMF , only : operator(/=), operator(==), operator(+) + use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs + use shr_sys_mod , only : shr_sys_abort + use dead_methods_mod , only : chkerr, alarmInit + + implicit none + private + + public :: dead_read_inparms + public :: ModelInitPhase + public :: ModelSetRunClock + public :: fld_list_add + public :: fld_list_realize + + ! !PUBLIC DATA MEMBERS: + type fld_list_type + character(len=128) :: stdname + integer :: ungridded_lbound = 0 + integer :: ungridded_ubound = 0 + end type fld_list_type + public :: fld_list_type + + integer, parameter, public :: fldsMax = 100 + integer :: dbug_flag = 0 + character(*), parameter :: u_FILE_u = & + __FILE__ + +!=============================================================================== +contains +!=============================================================================== + + subroutine dead_read_inparms(model, inst_suffix, logunit, nxg, nyg) + + ! input/output variables + character(len=*) , intent(in) :: model + character(len=*) , intent(in) :: inst_suffix ! char string associated with instance + integer , intent(in) :: logunit ! logging unit number + integer , intent(out) :: nxg ! global dim i-direction + integer , intent(out) :: nyg ! global dim j-direction + + ! local variables + type(ESMF_VM) :: vm + character(CL) :: fileName ! generic file name + integer :: nunit ! unit number + integer :: unitn ! Unit for namelist file + integer :: tmp(2) ! array for broadcast + integer :: localPet ! mpi id of current task in current context + integer :: rc ! return code + character(*), parameter :: F00 = "('(dead_read_inparms) ',8a)" + character(*), parameter :: F01 = "('(dead_read_inparms) ',a,a,4i8)" + character(*), parameter :: F03 = "('(dead_read_inparms) ',a,a,i8,a)" + character(*), parameter :: subName = "(dead_read_inpamrs) " + !------------------------------------------------------------------------------- + + ! read the input parms (used to configure model) + call ESMF_VMGetCurrent(vm, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_VMGet(vm, localPet=localPet, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + nxg = -9999 + nyg = -9999 + + if (localPet==0) then + open(newunit=unitn, file='x'//model//'_in'//trim(inst_suffix), status='old' ) + read(unitn,*) nxg + read(unitn,*) nyg + close (unitn) + endif + + tmp(1) = nxg + tmp(2) = nyg + call ESMF_VMBroadcast(vm, tmp, 3, 0, rc=rc) + nxg = tmp(1) + nyg = tmp(2) + + if (localPet==0) then + write(logunit,*)' Read in X'//model//' input from file= x'//model//'_in' + write(logunit,F00) model + write(logunit,F00) model,' Model : ',model + write(logunit,F01) model,' NGX : ',nxg + write(logunit,F01) model,' NGY : ',nyg + write(logunit,F00) model,' inst_suffix : ',trim(inst_suffix) + write(logunit,F00) model + end if + + end subroutine dead_read_inparms + + !=============================================================================== + subroutine fld_list_add(num, fldlist, stdname, ungridded_lbound, ungridded_ubound) + + ! input/output variables + integer , intent(inout) :: num + type(fld_list_type) , intent(inout) :: fldlist(:) + character(len=*) , intent(in) :: stdname + integer, optional , intent(in) :: ungridded_lbound + integer, optional , intent(in) :: ungridded_ubound + + ! local variables + character(len=*), parameter :: subname='(dead_nuopc_mod:fld_list_add)' + !------------------------------------------------------------------------------- + + ! Set up a list of field information + num = num + 1 + if (num > fldsMax) then + call ESMF_LogWrite(trim(subname)//": ERROR num > fldsMax "//trim(stdname), & + ESMF_LOGMSG_ERROR, line=__LINE__, file=__FILE__) + return + endif + fldlist(num)%stdname = trim(stdname) + + if (present(ungridded_lbound) .and. present(ungridded_ubound)) then + fldlist(num)%ungridded_lbound = ungridded_lbound + fldlist(num)%ungridded_ubound = ungridded_ubound + end if + + end subroutine fld_list_add + + !=============================================================================== + subroutine fld_list_realize(state, fldList, numflds, flds_scalar_name, flds_scalar_num, mesh, tag, rc) + + use NUOPC , only : NUOPC_IsConnected, NUOPC_Realize + use ESMF , only : ESMF_MeshLoc_Element, ESMF_FieldCreate, ESMF_TYPEKIND_R8 + use ESMF , only : ESMF_MAXSTR, ESMF_Field, ESMF_State, ESMF_Mesh, ESMF_StateRemove + use ESMF , only : ESMF_LogFoundError, ESMF_LOGMSG_INFO, ESMF_SUCCESS + use ESMF , only : ESMF_LogWrite, ESMF_LOGMSG_ERROR, ESMF_LOGERR_PASSTHRU + + type(ESMF_State) , intent(inout) :: state + type(fld_list_type) , intent(in) :: fldList(:) + integer , intent(in) :: numflds + character(len=*) , intent(in) :: flds_scalar_name + integer , intent(in) :: flds_scalar_num + character(len=*) , intent(in) :: tag + type(ESMF_Mesh) , intent(in) :: mesh + integer , intent(inout) :: rc + + ! local variables + integer :: n + type(ESMF_Field) :: field + character(len=80) :: stdname + integer :: gridtoFieldMap=2 + character(len=*),parameter :: subname='(dead_nuopc_mod:fld_list_realize)' + ! ---------------------------------------------- + + rc = ESMF_SUCCESS + + do n = 1, numflds + stdname = fldList(n)%stdname + if (NUOPC_IsConnected(state, fieldName=stdname)) then + if (stdname == trim(flds_scalar_name)) then + call ESMF_LogWrite(trim(subname)//trim(tag)//" Field = "//trim(stdname)//" is connected on root pe", & + ESMF_LOGMSG_INFO) + ! Create the scalar field + call SetScalarField(field, flds_scalar_name, flds_scalar_num, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + else + call ESMF_LogWrite(trim(subname)//trim(tag)//" Field = "//trim(stdname)//" is connected using mesh", & + ESMF_LOGMSG_INFO) + ! Create the field + if (fldlist(n)%ungridded_lbound > 0 .and. fldlist(n)%ungridded_ubound > 0) then + field = ESMF_FieldCreate(mesh, ESMF_TYPEKIND_R8, name=stdname, meshloc=ESMF_MESHLOC_ELEMENT, & + ungriddedLbound=(/fldlist(n)%ungridded_lbound/), & + ungriddedUbound=(/fldlist(n)%ungridded_ubound/), & + gridToFieldMap=(/gridToFieldMap/), rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + else + field = ESMF_FieldCreate(mesh, ESMF_TYPEKIND_R8, name=stdname, meshloc=ESMF_MESHLOC_ELEMENT, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + end if + endif + + ! NOW call NUOPC_Realize + call NUOPC_Realize(state, field=field, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + else + if (stdname /= trim(flds_scalar_name)) then + call ESMF_LogWrite(subname // trim(tag) // " Field = "// trim(stdname) // " is not connected.", & + ESMF_LOGMSG_INFO) + call ESMF_StateRemove(state, (/stdname/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + end if + end if + end do + + contains !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + subroutine SetScalarField(field, flds_scalar_name, flds_scalar_num, rc) + ! ---------------------------------------------- + ! create a field with scalar data on the root pe + ! ---------------------------------------------- + + use ESMF, only : ESMF_Field, ESMF_DistGrid, ESMF_Grid + use ESMF, only : ESMF_DistGridCreate, ESMF_GridCreate, ESMF_LogFoundError, ESMF_LOGERR_PASSTHRU + use ESMF, only : ESMF_FieldCreate, ESMF_GridCreate, ESMF_TYPEKIND_R8 + + type(ESMF_Field) , intent(inout) :: field + character(len=*) , intent(in) :: flds_scalar_name + integer , intent(in) :: flds_scalar_num + integer , intent(inout) :: rc + + ! local variables + type(ESMF_Distgrid) :: distgrid + type(ESMF_Grid) :: grid + character(len=*), parameter :: subname='(dead_nuopc_mod:SetScalarField)' + ! ---------------------------------------------- + + rc = ESMF_SUCCESS + + ! create a DistGrid with a single index space element, which gets mapped onto DE 0. + distgrid = ESMF_DistGridCreate(minIndex=(/1/), maxIndex=(/1/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + + grid = ESMF_GridCreate(distgrid, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + + field = ESMF_FieldCreate(name=trim(flds_scalar_name), grid=grid, typekind=ESMF_TYPEKIND_R8, & + ungriddedLBound=(/1/), ungriddedUBound=(/flds_scalar_num/), gridToFieldMap=(/2/), rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return + + end subroutine SetScalarField + + end subroutine fld_list_realize + + !=============================================================================== + subroutine ModelInitPhase(gcomp, importState, exportState, clock, rc) + + use NUOPC, only : NUOPC_CompFilterPhaseMap + + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + ! Switch to IPDv01 by filtering all other phaseMap entries + call NUOPC_CompFilterPhaseMap(gcomp, ESMF_METHOD_INITIALIZE, acceptStringList=(/"IPDv01p"/), rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine ModelInitPhase + + !=============================================================================== + subroutine ModelSetRunClock(gcomp, rc) + + use ESMF , only : ESMF_ClockGetAlarmList, ESMF_ALARMLIST_ALL + use NUOPC_Model , only : NUOPC_ModelGet + use NUOPC , only : NUOPC_CompAttributeGet + + ! input/output variables + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + + ! local variables + type(ESMF_Clock) :: mclock, dclock + type(ESMF_Time) :: mcurrtime, dcurrtime + type(ESMF_Time) :: mstoptime + type(ESMF_TimeInterval) :: mtimestep, dtimestep + character(len=256) :: cvalue + character(len=256) :: restart_option ! Restart option units + integer :: restart_n ! Number until restart interval + integer :: restart_ymd ! Restart date (YYYYMMDD) + type(ESMF_ALARM) :: restart_alarm + character(len=128) :: name + integer :: alarmcount + character(len=*),parameter :: subname='dead_nuopc_mod:(ModelSetRunClock) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + ! query the Component for its clocks + call NUOPC_ModelGet(gcomp, driverClock=dclock, modelClock=mclock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_ClockGet(dclock, currTime=dcurrtime, timeStep=dtimestep, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_ClockGet(mclock, currTime=mcurrtime, timeStep=mtimestep, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! force model clock currtime and timestep to match driver and set stoptime + !-------------------------------- + + mstoptime = mcurrtime + dtimestep + call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + !-------------------------------- + ! set restart alarm + !-------------------------------- + + call ESMF_ClockGetAlarmList(mclock, alarmlistflag=ESMF_ALARMLIST_ALL, alarmCount=alarmCount, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + if (alarmCount == 0) then + + call ESMF_GridCompGet(gcomp, name=name, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_LogWrite(subname//'setting alarms for' // trim(name), ESMF_LOGMSG_INFO) + + call NUOPC_CompAttributeGet(gcomp, name="restart_option", value=restart_option, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompAttributeGet(gcomp, name="restart_n", value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) restart_n + + call NUOPC_CompAttributeGet(gcomp, name="restart_ymd", value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) restart_ymd + + call alarmInit(mclock, restart_alarm, restart_option, & + opt_n = restart_n, & + opt_ymd = restart_ymd, & + RefTime = mcurrTime, & + alarmname = 'alarm_restart', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_AlarmSet(restart_alarm, clock=mclock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end if + + !-------------------------------- + ! Advance model clock to trigger alarms then reset model clock back to currtime + !-------------------------------- + + call ESMF_ClockAdvance(mclock,rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine ModelSetRunClock + +end module dead_nuopc_mod diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib_cmake b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib_cmake new file mode 120000 index 00000000000..7766f77f5bc --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib_cmake @@ -0,0 +1 @@ +../../../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml new file mode 100755 index 00000000000..1ea9dc3a5d8 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +""" +build data model library +""" + +import sys, os + +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) + +from standard_script_setup import * +from CIME.buildnml import build_xcpl_nml, parse_input +from CIME.case import Case + + +def buildnml(case, caseroot, compname): + if compname != "xwav": + raise AttributeError + build_xcpl_nml(case, caseroot, compname) + + +def _main_func(): + caseroot = parse_input(sys.argv) + with Case(caseroot) as case: + buildnml(case, caseroot, "xwav") + + +if __name__ == "__main__": + _main_func() diff --git a/src/components/xcpl_comps/xwav/cime_config/config_component.xml b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/config_component.xml similarity index 100% rename from src/components/xcpl_comps/xwav/cime_config/config_component.xml rename to CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/config_component.xml diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 new file mode 100644 index 00000000000..aa4d982e530 --- /dev/null +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 @@ -0,0 +1,465 @@ +module wav_comp_nuopc + + !---------------------------------------------------------------------------- + ! This is the NUOPC cap for XWAV + !---------------------------------------------------------------------------- + + use ESMF + use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize + use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise + use NUOPC_Model , only : model_routine_SS => SetServices + use NUOPC_Model , only : model_label_Advance => label_Advance + use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock + use NUOPC_Model , only : model_label_Finalize => label_Finalize + use NUOPC_Model , only : NUOPC_ModelGet, SetVM + use shr_sys_mod , only : shr_sys_abort + use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit + use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck + use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance + use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock + use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type + + implicit none + private ! except + + public :: SetServices + public :: SetVM + !-------------------------------------------------------------------------- + ! Private module data + !-------------------------------------------------------------------------- + + character(len=CL) :: flds_scalar_name = '' + integer :: flds_scalar_num = 0 + integer :: flds_scalar_index_nx = 0 + integer :: flds_scalar_index_ny = 0 + integer :: flds_scalar_index_nextsw_cday = 0 + + integer :: fldsToWav_num = 0 + integer :: fldsFrWav_num = 0 + type (fld_list_type) :: fldsToWav(fldsMax) + type (fld_list_type) :: fldsFrWav(fldsMax) + integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost + + type(ESMF_Mesh) :: mesh + integer :: nxg ! global dim i-direction + integer :: nyg ! global dim j-direction + integer :: my_task ! my task in mpi communicator mpicom + integer :: inst_index ! number of current instance (ie. 1) + character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") + integer :: logunit ! logging unit number + logical :: mastertask + integer :: dbug = 1 + character(*),parameter :: modName = "(xwav_comp_nuopc)" + character(*),parameter :: u_FILE_u = & + __FILE__ + +!=============================================================================== +contains +!=============================================================================== + + subroutine SetServices(gcomp, rc) + + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! the NUOPC gcomp component will register the generic methods + call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! switching to IPD versions + call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & + userRoutine=ModelInitPhase, phase=0, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set entry point for methods that require specific implementation + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & + userRoutine=InitializeAdvertise, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & + userRoutine=InitializeRealize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! attach specializing method(s) + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + end subroutine SetServices + + !=============================================================================== + subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + type(ESMF_VM) :: vm + character(CS) :: stdname + integer :: n + integer :: lsize ! local array size + integer :: shrlogunit ! original log unit + character(CL) :: cvalue + character(len=CL) :: logmsg + logical :: isPresent, isSet + character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call ESMF_VMGet(vm, localpet=my_task, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + mastertask = (my_task == 0) + + ! determine instance information + call get_component_instance(gcomp, inst_suffix, inst_index, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! set logunit and set shr logging to my log file + call set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! Initialize xwav + call dead_read_inparms('wav', inst_suffix, logunit, nxg, nyg) + + ! advertise import and export fields + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + flds_scalar_name = trim(cvalue) + call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue, *) flds_scalar_num + write(logmsg,*) flds_scalar_num + call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_nx + write(logmsg,*) flds_scalar_index_nx + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') + endif + + call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_scalar_index_ny + write(logmsg,*) flds_scalar_index_ny + call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + else + call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') + endif + + if (nxg /= 0 .and. nyg /= 0) then + + call fld_list_add(fldsFrWav_num, fldsFrWav, trim(flds_scalar_name)) + call fld_list_add(fldsFrWav_num, fldsFrWav, 'Sw_lamult' ) + call fld_list_add(fldsFrWav_num, fldsFrWav, 'Sw_ustokes' ) + call fld_list_add(fldsFrWav_num, fldsFrWav, 'Sw_vstokes' ) + call fld_list_add(fldsFrWav_num, fldsFrWav, 'Sw_hstokes' ) + + call fld_list_add(fldsToWav_num, fldsToWav, trim(flds_scalar_name)) + call fld_list_add(fldsToWav_num, fldsToWav, 'Sa_u' ) + call fld_list_add(fldsToWav_num, fldsToWav, 'Sa_v' ) + call fld_list_add(fldsToWav_num, fldsToWav, 'Sa_tbot' ) + call fld_list_add(fldsToWav_num, fldsToWav, 'Si_ifrac' ) + call fld_list_add(fldsToWav_num, fldsToWav, 'So_t' ) + call fld_list_add(fldsToWav_num, fldsToWav, 'So_u' ) + call fld_list_add(fldsToWav_num, fldsToWav, 'So_v' ) + call fld_list_add(fldsToWav_num, fldsToWav, 'So_bldepth' ) + + do n = 1,fldsFrWav_num + if (mastertask) write(logunit,*)'Advertising From Xwav ',trim(fldsFrWav(n)%stdname) + call NUOPC_Advertise(exportState, standardName=fldsFrWav(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + + do n = 1,fldsToWav_num + if(mastertask) write(logunit,*)'Advertising To Xwav ',trim(fldsToWav(n)%stdname) + call NUOPC_Advertise(importState, standardName=fldsToWav(n)%stdname, & + TransferOfferGeomObject='will provide', rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + enddo + end if + + call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! Reset shr logging to original values + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeAdvertise + + !=============================================================================== + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState, exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! local variables + integer :: shrlogunit ! original log unit + character(ESMF_MAXSTR) :: cvalue ! config data + character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + ! Reset shr logging to my log file + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) + + ! generate the mesh + call NUOPC_CompAttributeGet(gcomp, name='mesh_wav', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + mesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + ! realize the actively coupled fields, now that a mesh is established + ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState + ! by replacing the advertised fields with the newly created fields of the same name. + call fld_list_realize( & + state=ExportState, & + fldlist=fldsFrWav, & + numflds=fldsFrWav_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':dwavExport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + call fld_list_realize( & + state=importState, & + fldList=fldsToWav, & + numflds=fldsToWav_num, & + flds_scalar_name=flds_scalar_name, & + flds_scalar_num=flds_scalar_num, & + tag=subname//':dwavImport',& + mesh=mesh, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! Pack export state + call State_SetExport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, flds_scalar_name, flds_scalar_num, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! diagnostics + if (dbug > 1) then + call State_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + + call shr_log_setLogUnit (shrlogunit) + + end subroutine InitializeRealize + + !=============================================================================== + subroutine ModelAdvance(gcomp, rc) + + ! input/output variables + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + + ! local variables + type(ESMF_Clock) :: clock + type(ESMF_State) :: exportState + integer :: shrlogunit ! original log unit + character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + call memcheck(subname, 3, mastertask) + + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) + + ! Pack export state + call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call state_setexport(exportState, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ! diagnostics + if (dbug > 1) then + call State_diagnose(exportState,subname//':ES',rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if ( mastertask) then + call log_clock_advance(clock, 'XWAV', logunit, rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + endif + endif + + call shr_log_setLogUnit (shrlogunit) + + end subroutine ModelAdvance + + !=============================================================================== + subroutine state_setexport(exportState, rc) + + ! input/output variables + type(ESMF_State) , intent(inout) :: exportState + integer , intent(out) :: rc + + ! local variables + integer :: nfstart, ubound + integer :: n, nf, nind + real(r8), pointer :: lat(:) + real(r8), pointer :: lon(:) + integer :: spatialDim + integer :: numOwnedElements + real(R8), pointer :: ownedElemCoords(:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + allocate(ownedElemCoords(spatialDim*numOwnedElements)) + call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + allocate(lon(numownedElements)) + allocate(lat(numownedElements)) + do n = 1,numownedElements + lon(n) = ownedElemCoords(2*n-1) + lat(n) = ownedElemCoords(2*n) + end do + + nfstart = 0 ! for fields that have ubound > 0 + do nf = 2,fldsFrWav_num ! Start from index 2 in order to skip the scalar field + ubound = fldsFrWav(nf)%ungridded_ubound + if (ubound == 0) then + call field_setexport(exportState, trim(fldsFrWav(nf)%stdname), lon, lat, nf=nf, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + else + nfstart = nfstart + nf + ubound - 1 + do nind = 1,ubound + call field_setexport(exportState, trim(fldsFrWav(nf)%stdname), lon, lat, nf=nfstart+nind-1, & + ungridded_index=nind, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + end do + end if + end do + + deallocate(lon) + deallocate(lat) + + end subroutine state_setexport + + !=============================================================================== + + subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) + + use shr_const_mod , only : pi=>shr_const_pi + + ! intput/otuput variables + type(ESMF_State) , intent(inout) :: exportState + character(len=*) , intent(in) :: fldname + real(r8) , intent(in) :: lon(:) + real(r8) , intent(in) :: lat(:) + integer , intent(in) :: nf + integer, optional , intent(in) :: ungridded_index + integer , intent(out) :: rc + + ! local variables + integer :: i, ncomp + type(ESMF_Field) :: lfield + real(r8), pointer :: data1d(:) + real(r8), pointer :: data2d(:,:) + !-------------------------------------------------- + + rc = ESMF_SUCCESS + + call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + + ncomp = 7 + if (present(ungridded_index)) then + call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (gridToFieldMap == 1) then + do i = 1,size(data2d, dim=1) + data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + else if (gridToFieldMap == 2) then + do i = 1,size(data2d, dim=2) + data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + else + call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + do i = 1,size(data1d) + data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & + sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) + end do + end if + + end subroutine field_setexport + + !=============================================================================== + subroutine ModelFinalize(gcomp, rc) + type(ESMF_GridComp) :: gcomp + integer, intent(out) :: rc + !------------------------------------------------------------------------------- + + rc = ESMF_SUCCESS + + if (mastertask) then + write(logunit,*) + write(logunit,*) 'xwav: end of main integration loop' + write(logunit,*) + end if + end subroutine ModelFinalize + +end module wav_comp_nuopc diff --git a/src/share/timing/CMakeLists.txt b/CIME/non_py/src/timing/CMakeLists.txt similarity index 100% rename from src/share/timing/CMakeLists.txt rename to CIME/non_py/src/timing/CMakeLists.txt diff --git a/src/share/timing/COPYING b/CIME/non_py/src/timing/COPYING similarity index 100% rename from src/share/timing/COPYING rename to CIME/non_py/src/timing/COPYING diff --git a/CIME/non_py/src/timing/ChangeLog b/CIME/non_py/src/timing/ChangeLog new file mode 100644 index 00000000000..619565f0224 --- /dev/null +++ b/CIME/non_py/src/timing/ChangeLog @@ -0,0 +1,238 @@ +timing_180912: Moved prefix support from perf_mod.F90 to gptl.c + and also added support for setting prefixes in + threaded regions. + [Patrick Worley] +timing_180911: Moved detail to end of timer name when specify + profile_add_detail (so that will not interfere with + planned move of the prefix logic into gptl.c) + [Patrick Worley] +timing_180910: Removed addition of double quotes to timer names in + perf_mod.F90 and added this as an output option in + gptl.c (so internally the names do not have the quotes) + [Patrick Worley] +timing_180822: Fixed perf_mod.F90 bug that prevents PAPI derived events + from being recognized. + [Patrick Worley] +timing_180731: Refactored implementation of append/write modes; + collected and output "on" events for global statistics + [Patrick Worley] +timing_180730: Added support for setting GPTLmaxthreads. Cleaned up white space. + Added SEQUENTIAL to fortran open, to avoid problems on some systems. + Added timing overhead measurement to perf_mod. Fixed errors in + f_wrappers.c in definition of gptlpr_query_append and + gptlpr_XXX_write. + [Patrick Worley (some from Jim Rosinksi)] +timing_180403: Added GPTLstartstop_val(f) to gptl.h, to provide explicit + typing and eliminate compile-time warning for some compilers. + Also do not define the CPP tokens HAVE_COMM_F2C and + HAVE_GETTIMEOFDAY in private.h if they have already been + defined, also eliminating compile-time warnings. + [Patrick Worley] +timing_171028: Backported GPTLstartstop_val from a more recent version + of GPTL, added a callcount parameter, and renamed it + GPTLstartstop_vals. Also added a version for non-null + terminated timing event names (GPTLstartstop_valsf). + Added t_startstop_valsf to perf_mod. Also replaced + all strncpy calls in f_wrapper.c, just to be safe. + [Patrick Worley (but primarily Jim Rosinksi)] +timing_171027: Mitigated against nonmonotonic timing calls by + setting negative deltas to zero in gptl.c . + [Patrick Worley] +timing_160816: Added quotes to timing event names in t_startf and t_stopf + before calling GPTL routines. + [Patrick Worley and Sean Patrick Santos] +timing_161207: Replaced strncpy and snprintf where applied to + non-null-terminated strings, to avoid memory issues + when strncpy and snprintf implementations use strnlen + to check validity of passed in string length parameter. + (Why this causes problems is still a mystery.) + [Patrick Worley and Gautam Bisht] +timing_160320: Added routines t_set_prefixf and t_unset_prefixf. + Setting the prefix adds this to the beginning of all subsequent + timer event names (defined in t_startf/t_stopf). + Also doubling default hash index tablesize to 2048. + [Patrick Worley] +timing_160124: Added option to prefix timer names with detail level. This is + controlled by a new namelist variable (profile_add_detail). + The default is to not enable this option. It is meant to be + used for inspecting the profile detail definitions and + associated logic. + [Patrick Worley] +timing_150903: Changed API to be compatible with NCAR CIME version of timing + library: (a) changed handle argument from integer8 to integer + in t_startf and t_stopf, and disabled use of handles in these + routines until gptl.c can be updated; (b) added MaxThreads + optional argument to t_initf, though it does not do anything + yet. + [Patrick Worley] +timing_150518: Disabled abort when calling GPTL routines before GPTLinitialize + (so can use with Chombo library, for example); changed top + level default from nanotimer to MPI_WTIME. + [Patrick Worley] +timing_150327: Added option to more accurately measure measurement overhead + (incurring additional overhead, so not on by default). + [Patrick Worley] +timing_150217: Added support for enabling/disabling start/stop timers from + perf_mod even when calling GPTL routines directly from C/C++ + libraries; increased maximum timer name length. + [Patrick Worley] +timing_141119: Enabled cmake build of timing library. + [Jayesh Krishna] +timing_140805: Disabled GPTL autoinstrumentation, as this conflicts with the + VampirTrace tool. (We do not use the autoinstrumentation + capability.) [Patrick Worley, from B. Jamroz] +timing_140416: Changed Makefile so that .mods and the static library are copied to + LIBROOT +timing_140317: Modified Makefile to deal with shared mpi-serial builds +timing_131108: Added memory retrieval commands for BG/Q [S Mickelson] +timing_131023: Added explicit include path for gptl.h [J Edwards] +timing_130827: added routines supporting non-null terminated timer labels, for use with + with C++ std:string (and more efficient usage with Fortran); also CMake + logic fixes + [Patrick Worley] +timing_130506: Copy all modules to the include directory on install +timing_130417: Made nano time the default timer if available. +timing_130316: Changed declarations of functions used in qsort in gptl.c, to eliminate + error with Cray compiler (and warnings from other compilers) + [Patrick Worley] +timing_130214: NAG port: Put mpif.h include before "save", and don't use + "abort" and "flush" extensions for NAG. [Sean Patrick Santos] +timing_120921: Add code for cmake build, should not have any affect otherwise +timing_120731: Correction in Makefile for serial build [Jim Edwards] +timing_120728: Replace process subset optional parameter in t_prf with + outpe_thispe optional parameter. Change def_perf_outpe_num to 0. + [Patrick Worley] +timing_120717: Retain timestamp on cp in Makefile [Jim Edwards] +timing_120710: Correct issue in Makefile [Jim Edwards] +timing_120709: Change for BGP to measure on compute nodes rather than IO nodes only, + minor Change in Makefile so that gptl can build seperate from csm_share + in cesm [Jim Edwards] +timing_120512: Bug fix in global statistics logic for when a thread has no events + to contribute to the merge (mods to gptl.c) + [Patrick Worley] +timing_120419: Minor changes for mpi-serial compile (jedwards) +timing_120408: Make HAVE_COMM_F2C default to true. (jedwards) +timing_120110: Update to GPTL 4.1 source (mods to gptl.c and GPTLprint_memusage) + [Jim Rosinski (GPTL 4.1), Patrick Worley] +timing_120109: Bug fix (adding shr_kind_i8 to shr_kind_mod list) +timing_111205: Update to gptl 4.0 (introducing CESM customizations); + support for handles in t_startf/t_stopf; + support for restricting output to explicitly named process subsets + [Jim Rosinski (gptl 4.0), Patrick Worley] +timing_111101: Workaround for mpi_rsend issue on cray/gemini +timing_110928: Add a Makefile and build as a library usable by mct and pio +timing_101215: No changes from previous tag other than updating Changelog +timing_101210: Fix interface to cesm build system, add workaround for xlf bug +timing_101202: updated get_memusage and print_memusage from GPTL version 3.7; adds + improved support for MacOS and SLASHPROC + [Jim Rosinski, Chuck Bardeen (integrated by P. Worley)] +timing_091021: update to GPTL version 3.5; rewrite of GPTLpr_summary: much faster, merging + events from all processes and all threads (not just process 0/thread 0); + miscellaneous fixes + [Jim Rosinski (gptl 3.5), Joseph Singh, Patrick Worley] +timing_090929: added explicit support for the GPTL-native token HAVE_MPI (indicating + presence of MPI library) + [Patrick Worley] +timing_081221: restore default assumption that gettimeofday available +timing_081028: bug fix in include order in gptl_papi.c +timing_081026: change in output format to make postprocessing simpler +timing_081024: support for up to one million processes and writing timing files to + subdirectories +timing_081017: updated to gptl version 3_4_2. Changed some defaults. + [Jim Rosinski, Patrick Worley] +timing_080629: added optional parameters perf_outpe_num and perf_outpe_stride to t_prf. + These are used to override the user specified values for timing data + written out before the end of a simulation. + [Patrick Worley] +timing_071213: changed default to disable inline keyword; changed global statistics + logic to avoid problems at scale; moved shr and CAM routine equivalencies + to a new module (in perf_utils.F90); added t_getLogUnit/t_setLogUnit + routines to control Log output in same way as shr_file_get/setLogUnit; + modified GPTLpr logic to support output of timing data during a run + [Patrick Worley] +timing_071023: updated to gptl version 2.16, added support for output of global + statistics; removed dependencies on shr and CAM routines; renamed + gptlutil.c to GPTLutil.c + [Patrick Worley, Jim Rosinski] +timing_071019: modified namelist logic to abort if try to set unknown namelist parameters; + changed default number of reporting processes to 1; + reversed meaning and changed names of CPP tokens to NO_C99_INLINE and NO_VPRINTF + [Patrick Worley] +timing_071010: modified gptl.c to remove the 'inline' specification unless the + CPP token C99 is defined. + [Patrick Worley] +timing_070810: added ChangeLog + updated to latest version of GPTL (from Jim Rosinski) + modified perf_mod.F90: + - added perf_outpe_num and perf_outpe_stride to perf_inparm + namelist to control which processes output timing data + - added perf_papi_enable to perf_inparm namelist to enable + PAPI counters + - added papi_inparm namelist and papi_ctr1,2,3,4 namelist + parameters to specify PAPI counters + [Patrick Worley, Jim Rosinski] +timing_070525: bug fix in gptl.c + - unitialized pointer, testing for null pter + before traversing + [Patrick Worley] +timing_070328: modified perf_mod.F90 + - deleted HIDE_MPI cpp token + [Erik Kluzek] +timing_070327: bug fixes in gptl.c + - testing for null pters before traversing + links; added missing type declaration to GPTLallocate for sum + bug fixes in perf_mod.F90 + - fixed OMP-related logic, modified settings reporting, + modified to work when namelist input is + missing; moved timer depth logic back into gptl.c + [Patrick Worley] +timing_070308: added perf_mod.F90 + - defines all t_xxx entry points - calling gptlxxx directly + and removing all external gptlxxx dependencies, + added detail option as an alternative way to disable + event timing, added runtime selection of timing_disable, + perf_timer, timer_depth_limit, timing_detail_limit, + timing_barrier, perf_single_file via namelist parameters + modified f_wrappers.c + - replaced all t_xxx entry points with gptlxxx entry points, + added new gptlxxx entry points, deleted _fcd support + modified gptl.c + - deleted DISABLE_TIMERS cpp token, modified GPTLpr call + and logic to move some of support for concatenating timing + output into a single file to perf_mod.F90 + modified gptl.h + - exposed gptlxxx entry points and to add support for choice + of GPTL timer + modified gptl.inc + - removed t_xxx entry points and expose gptlxxx entry points + [Patrick Worley] +timing_061207: modified gptl.c + - improved event output ordering + [Jim Edwards] +timing_061124: modified gptl.c + - modified GPTLpr to add option to concatenate + all timing data in a single output file, added GPTL_enable + and GPTL_disable as runtime control of event timing, + process 0-only reporting of timing options - unless DEBUG + cpp token defined + modified gptl.h + - redefined GPTLpr parameters + modified f_wrappers.c + - added t_enablef and t_disablef to call GPTL_enable and + GPTL_disable, added t_pr_onef, added string.h include + bug fix in f_wrappers.c + - changed character string size declaration from int to size_t + bug fix in gptl_papi.c + - modified error message - from Jim Edwards + modified private.h + - increased maximum event name length + [Patrick Worley] +timing_061028: modified f_wrappers.c + - deleted dependency on cfort.h + [Patrick Worley] +timing_060524: modified f_wrappers.c + - added support for CRAY cpp token and fixed routine + type declarations + [Patrick Worley] +timing_051212: original subversion version + - see CAM ChangeLog for earlier history diff --git a/CIME/non_py/src/timing/GPTLget_memusage.c b/CIME/non_py/src/timing/GPTLget_memusage.c new file mode 100644 index 00000000000..db527401b2d --- /dev/null +++ b/CIME/non_py/src/timing/GPTLget_memusage.c @@ -0,0 +1,196 @@ +/* +** $Id: get_memusage.c,v 1.10 2010-11-09 19:08:53 rosinski Exp $ +** +** Author: Jim Rosinski +** Credit to Chuck Bardeen for MACOS section (__APPLE__ ifdef) +** +** get_memusage: +** +** Designed to be called from Fortran, returns information about memory +** usage in each of 5 input int* args. On Linux read from the /proc +** filesystem because getrusage() returns placebos (zeros). Return -1 for +** values which are unavailable or ambiguous on a particular architecture. +** Reported numbers are in kilobytes. +** +** Return value: 0 = success +** -1 = failure +*/ + +#include +#include "gptl.h" /* additional cpp defs and function prototypes */ +#include + +/* _AIX is automatically defined when using the AIX C compilers */ +#ifdef _AIX +#include +#endif + +#ifdef IRIX64 +#include +#endif + +#ifdef HAVE_SLASHPROC + +#include +#include +#include + +#elif (defined __APPLE__) + +#include +#include +#include + +#endif + +#ifdef BGP + +#include +#include +#include +#include +#define Personality _BGP_Personality_t + +#endif + +#ifdef __bgq__ + +#include +#include + +#endif + +#define PRINT_MEMUSAGE 0 + +int GPTLget_memusage (int *size, int *rss, int *share, int *text, int *datastack) +{ +#if defined (BGP) + long long alloc, total; + int node_config; + struct mallinfo m; + Personality pers; + + /* memory available */ + Kernel_GetPersonality(&pers, sizeof(pers)); + total = BGP_Personality_DDRSizeMB(&pers); + + node_config = BGP_Personality_processConfig(&pers); + if (node_config == _BGP_PERS_PROCESSCONFIG_VNM) total /= 4; + else if (node_config == _BGP_PERS_PROCESSCONFIG_2x2) total /= 2; + total *= 1024; // in KB + + /* total memory used - heap only (not static memory)*/ + *size = total; + + m = mallinfo(); + alloc = m.hblkhd + m.uordblks; + + *rss = alloc; + *share = -1; + *text = -1; + *datastack = -1; + return 0; + +#elif (defined __bgq__) + uint64_t heap, shared, stack; + + Kernel_GetMemorySize(KERNEL_MEMSIZE_HEAP, &heap); + Kernel_GetMemorySize(KERNEL_MEMSIZE_SHARED, &shared); + Kernel_GetMemorySize(KERNEL_MEMSIZE_STACK, &stack); + + *size = heap/1024; + *rss = heap/1024; + *share = shared/1024; + *text = -1; + *datastack = stack/1024; + return 0; + +#elif (defined HAVE_SLASHPROC) + FILE *fd; /* file descriptor for fopen */ + int pid; /* process id */ + char file[19]; /* full path to file in /proc */ + int dum; /* placeholder for unused return arguments */ + int ret; /* function return value */ + static int pg_sz = -1; /* page size */ + + /* + ** The file we want to open is /proc//statm + */ + + pid = (int) getpid (); + if (pid <= 0) { + fprintf (stderr, "get_memusage: pid %d is non-positive\n", pid); + return -1; + } + + sprintf (file, "/proc/%d/statm", pid); + if ((fd = fopen (file, "r")) < 0) { + fprintf (stderr, "get_memusage: bad attempt to open %s\n", file); + return -1; + } + + /* + ** Read the desired data from the /proc filesystem directly into the output + ** arguments, close the file and return. + */ + + ret = fscanf (fd, "%d %d %d %d %d %d %d", + size, rss, share, text, datastack, &dum, &dum); + ret = fclose (fd); + + // read page size once + if (pg_sz == -1) { + pg_sz = sysconf(_SC_PAGESIZE) / 1024; + } + + // convert from pages to KBs + *size = *size * pg_sz; + *rss = *rss * pg_sz; + *share = *share * pg_sz; + *text = *text * pg_sz; + *datastack = *datastack * pg_sz; +#if PRINT_MEMUSAGE + fprintf (stderr, "get_memusage: size=%d KB, rss=%d KB, share=%d KB, text=%d KB, datastack=%d KB, page_size=%d KB\n", + *size, *rss, *share, *text, *datastack, pg_sz); +#endif + + return 0; + +#elif (defined __APPLE__) + + FILE *fd; + char cmd[60]; + int pid = (int) getpid (); + + // returned values are in KBs + sprintf (cmd, "ps -o vsz -o rss -o tsiz -p %d | grep -v RSS", pid); + fd = popen (cmd, "r"); + + if (fd) { + fscanf (fd, "%d %d %d", size, rss, text); + *share = -1; + *datastack = -1; + (void) pclose (fd); + } + + return 0; + +#else + + struct rusage usage; /* structure filled in by getrusage */ + + if (getrusage (RUSAGE_SELF, &usage) < 0) + return -1; + + *size = -1; + *rss = usage.ru_maxrss; // in KBs + *share = -1; + *text = -1; + *datastack = -1; +#ifdef IRIX64 + *datastack = usage.ru_idrss + usage.ru_isrss; +#endif + return 0; + +#endif +} diff --git a/CIME/non_py/src/timing/GPTLprint_memusage.c b/CIME/non_py/src/timing/GPTLprint_memusage.c new file mode 100644 index 00000000000..be5e706ff07 --- /dev/null +++ b/CIME/non_py/src/timing/GPTLprint_memusage.c @@ -0,0 +1,155 @@ +/* +** $Id: print_memusage.c,v 1.13 2010-11-09 19:08:54 rosinski Exp $ +** +** Author: Jim Rosinski +** +** print_memusage: +** +** Prints info about memory usage of this process by calling get_memusage. +** +** Return value: 0 = success +** -1 = failure +*/ + +#include "gptl.h" +#include +#include +#include +#include +#ifdef __bgq__ +#include +#endif + +static int nearest_powerof2 (const int); +static int convert_to_mb = 1; /* true */ + +int GPTLprint_memusage (const char *str) +{ +#ifdef __bgq__ + uint64_t shared, persist, heapavail, stackavail, stack, heap, guard, mmap; + + Kernel_GetMemorySize(KERNEL_MEMSIZE_HEAP, &heap); + Kernel_GetMemorySize(KERNEL_MEMSIZE_SHARED, &shared); + Kernel_GetMemorySize(KERNEL_MEMSIZE_STACK, &stack); + Kernel_GetMemorySize(KERNEL_MEMSIZE_PERSIST, &persist); + Kernel_GetMemorySize(KERNEL_MEMSIZE_HEAPAVAIL, &heapavail); + Kernel_GetMemorySize(KERNEL_MEMSIZE_STACKAVAIL, &stackavail); + Kernel_GetMemorySize(KERNEL_MEMSIZE_GUARD, &guard); + Kernel_GetMemorySize(KERNEL_MEMSIZE_MMAP, &mmap); + + printf("%s Memory(MB): heap-alloc: %.2f, heap-avail: %.2f," + "stack-alloc: %.2f, stack-avail: %.2f," + "shared: %.2f, persist: %.2f, guard: %.2f, mmap: %.2f\n", str, + (double)heap/(1024*1024), (double)heapavail/(1024*1024), + (double)stack/(1024*1024), (double)stackavail/(1024*1024), + (double)shared/(1024*1024), (double)persist/(1024*1024), + (double)guard/(1024*1024), (double)mmap/(1024*1024)); + return 0; + +#else + int size, size2; /* process size (returned from OS) */ + int rss, rss2; /* resident set size (returned from OS) */ + int share, share2; /* shared data segment size (returned from OS) */ + int text, text2; /* text segment size (returned from OS) */ + int datastack, datastack2; /* data/stack size (returned from OS) */ + static int kbytesperblock = -1; /* convert to Kbytes (init to invalid) */ + static const int nbytes =1024*1024*1024;/* allocate 1 GB */ + void *space; /* allocated space */ + + setbuf(stdout, NULL); // don't buffer stdout, flush + if (GPTLget_memusage (&size, &rss, &share, &text, &datastack) < 0) { + printf ("GPTLprint_memusage: GPTLget_memusage failed.\n"); + return -1; + } + +#if (defined HAVE_SLASHPROC || defined __APPLE__) + if (kbytesperblock == -1) { + kbytesperblock = sysconf(_SC_PAGESIZE) / 1024; + printf ("GPTLprint_memusage: Using Kbytesperpage=%d\n", kbytesperblock); + } + + /* + ** Determine size in bytes of memory usage info presented by the OS. Method: allocate a + ** known amount of memory and see how much bigger the process becomes. + */ + + if (convert_to_mb && kbytesperblock == -1 && (space = malloc (nbytes))) { + memset (space, 0, nbytes); /* ensure the space is really allocated */ + if (GPTLget_memusage (&size2, &rss2, &share2, &text2, &datastack2) == 0) { + if (size2 > size) { + /* + ** Estimate bytes per block, then refine to nearest power of 2. + ** The assumption is that the OS presents memory usage info in + ** units that are a power of 2. + */ + kbytesperblock = (int) ((nbytes / (double) (size2 - size)) + 0.5); + kbytesperblock = nearest_powerof2 (kbytesperblock); + printf ("GPTLprint_memusage: Using Kbytesperblock=%d\n", kbytesperblock); + } else { + printf ("GPTLprint_memusage: highwater did not increase.\n"); + } + } else { + printf ("GPTLprint_memusage: call GPTLget_memusage failed.\n"); + } + free (space); + } + + if (kbytesperblock > 0) { + printf ("%s sysmem size=%.1f MB rss=%.1f MB share=%.1f MB text=%.1f MB datastack=%.1f MB\n", + str, size/1024., rss/1024., share/1024., text/1024., datastack/1024.); + } else { + printf ("%s sysmem size=%d rss=%d share=%d text=%d datastack=%d\n", + str, size, rss, share, text, datastack); + } + +#else + + /* + ** Use max rss as returned by getrusage. If someone knows how to + ** get the process size under AIX please tell me. + */ + + if (convert_to_mb) + printf ("%s max rss=%.1f MB\n", str, rss*1024.); + else + printf ("%s max rss=%d\n", str, rss); +#endif + + return 0; +#endif +} + +/* +** nearest_powerof2: +** Determine nearest integer which is a power of 2. +** Note: algorithm can't use anything that requires -lm because this is a library, +** and we don't want to burden the user with having to add extra libraries to the +** link line. +** +** Input arguments: +** val: input integer +** +** Return value: nearest integer to val which is a power of 2 +*/ + +static int nearest_powerof2 (const int val) +{ + int lower; /* power of 2 which is just less than val */ + int higher; /* power of 2 which is just more than val */ + int delta1; /* difference between val and lower */ + int delta2; /* difference between val and higher */ + + if (val < 2) + return 0; + + for (higher = 1; higher < val; higher *= 2) + lower = higher; + + delta1 = val - lower; + delta2 = higher - val; + + if (delta1 < delta2) + return lower; + else + return higher; +} diff --git a/CIME/non_py/src/timing/GPTLutil.c b/CIME/non_py/src/timing/GPTLutil.c new file mode 100644 index 00000000000..d9a1a93866a --- /dev/null +++ b/CIME/non_py/src/timing/GPTLutil.c @@ -0,0 +1,81 @@ +/* +** $Id: util.c,v 1.13 2010-01-01 01:34:07 rosinski Exp $ +*/ + +#include +#include +#include + +#include "private.h" + +static bool abort_on_error = false; /* flag says to abort on any error */ +static int max_error = 500; /* max number of error print msgs */ + +/* +** GPTLerror: error return routine to print a message and return a failure +** value. +** +** Input arguments: +** fmt: format string +** variable list of additional arguments for vfprintf +** +** Return value: -1 (failure) +*/ + +int GPTLerror (const char *fmt, ...) +{ + va_list args; + + va_start (args, fmt); + static int num_error = 0; + + if (fmt != NULL && num_error < max_error) { +#ifndef NO_VPRINTF + (void) vfprintf (stderr, fmt, args); +#else + (void) fprintf (stderr, "GPTLerror: no vfprintf: fmt is %s\n", fmt); +#endif + if (num_error == max_error) + (void) fprintf (stderr, "Truncating further error print now after %d msgs", + num_error); + ++num_error; + } + + va_end (args); + + if (abort_on_error) + exit (-1); + + return (-1); +} + +/* +** GPTLset_abort_on_error: User-visible routine to set abort_on_error flag +** +** Input arguments: +** val: true (abort on error) or false (don't) +*/ + +void GPTLset_abort_on_error (bool val) +{ + abort_on_error = val; +} + +/* +** GPTLallocate: wrapper utility for malloc +** +** Input arguments: +** nbytes: size to allocate +** +** Return value: pointer to the new space (or NULL) +*/ + +void *GPTLallocate (const int nbytes) +{ + void *ptr; + + if ( nbytes <= 0 || ! (ptr = malloc (nbytes))) + (void) GPTLerror ("GPTLallocate: malloc failed for %d bytes\n", nbytes); + + return ptr; +} diff --git a/CIME/non_py/src/timing/Makefile b/CIME/non_py/src/timing/Makefile new file mode 100644 index 00000000000..46b08aa45f6 --- /dev/null +++ b/CIME/non_py/src/timing/Makefile @@ -0,0 +1,112 @@ +.SUFFIXES: +.SUFFIXES: .F90 .o .c .f90 +# name of macros file - but default this is generic + +VPATH := $(GPTL_DIR) + +ifndef MOD_SUFFIX + MOD_SUFFIX := mod +endif +ifeq ($(strip $(SMP)),TRUE) + CPPDEFS += -DTHREADED_OMP + compile_threaded=TRUE +endif + +ifeq ($(strip $(MACFILE)),) + MACFILE := Macros.make +endif + +# Machine specific macros file +# This must be included before any settings are overwritten +# But must be AFTER any definitions it uses are defined. +# So be careful if moving this either earlier or later in the makefile!!! +include $(MACFILE) +ifdef COMP_INTERFACE + UPVAR := $(shell echo $(COMP_INTERFACE) | tr a-z A-Z) + CPPDEFS+=-D$(UPVAR)_INTERFACE + ifeq ("$(COMP_INTERFACE)", "nuopc") + # Set esmf.mk location with ESMF_LIBDIR having precedence over ESMFMKFILE + CIME_ESMFMKFILE := undefined_ESMFMKFILE + ifdef ESMFMKFILE + CIME_ESMFMKFILE := $(ESMFMKFILE) + endif + ifdef ESMF_LIBDIR + CIME_ESMFMKFILE := $(ESMF_LIBDIR)/esmf.mk + endif + -include $(CIME_ESMFMKFILE) + FFLAGS += $(ESMF_F90COMPILEPATHS) + endif +endif +ifdef DEBUG + ifeq ("$(DEBUG)", "TRUE") + CPPDEFS+=-DDEBUG + endif +endif +ifdef GPTL_CPPDEFS + CPPDEFS+=$(GPTL_CPPDEFS) -D$(OS) -DCPR$(shell echo $(COMPILER) | tr a-z A-z) +endif +INCLDIR += -I$(GPTL_DIR) + +ifeq ($(strip $(MPILIB)), mpi-serial) + CC := $(SCC) + FC := $(SFC) + MPIFC := $(SFC) + MPICC := $(SCC) + ifdef MPI_SERIAL_PATH + INCLDIR += -I$(MPI_SERIAL_PATH)/include + else + INCLDIR += -I$(GPTL_LIBDIR)/../mpi-serial + endif +else + CC := $(MPICC) + FC := $(MPIFC) + CPPDEFS += -DHAVE_MPI +endif +ifdef CPRE + FPPDEFS := $(patsubst -D%,$(CPRE)%,$(CPPDEFS)) +else + FPPDEFS := $(CPPDEFS) +endif + + + +OBJS = gptl.o GPTLutil.o GPTLget_memusage.o GPTLprint_memusage.o \ + gptl_papi.o f_wrappers.o perf_mod.o perf_utils.o + +AR ?= ar +ARFLAGS ?= ruv + +libgptl.a: $(OBJS) + $(AR) $(ARFLAGS) $@ $(OBJS) + + + +.c.o: + $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) $(CPPDEFS) $< +.F.o: + $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FPPDEFS) $(FIXEDFLAGS) $< +.f90.o: + $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $< +.F90.o: + $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FPPDEFS) $(FREEFLAGS) $< + +mostlyclean: + $(RM) -f *.f *.f90 + +clean: + $(RM) -f *.f *.f90 *.d *.$(MOD_SUFFIX) $(OBJS) + + +install: libgptl.a + cp -p $(GPTL_DIR)/gptl.h $(SHAREDPATH)/include + cp -p *.$(MOD_SUFFIX) $(SHAREDPATH)/include + cp -p libgptl.a $(SHAREDPATH)/lib + + +perf_mod.o: perf_utils.o +f_wrappers.o: gptl.h private.h +f_wrappers_pmpi.o: gptl.h private.h +gptl.o: gptl.h private.h +util.o: gptl.h private.h +gptl_papi.o: gptl.h private.h +pmpi.o: gptl.h private.h diff --git a/src/externals/pio1/timing/README b/CIME/non_py/src/timing/README similarity index 100% rename from src/externals/pio1/timing/README rename to CIME/non_py/src/timing/README diff --git a/src/share/timing/f_wrappers.c b/CIME/non_py/src/timing/f_wrappers.c similarity index 100% rename from src/share/timing/f_wrappers.c rename to CIME/non_py/src/timing/f_wrappers.c diff --git a/CIME/non_py/src/timing/gptl.c b/CIME/non_py/src/timing/gptl.c new file mode 100644 index 00000000000..1eeacccfd25 --- /dev/null +++ b/CIME/non_py/src/timing/gptl.c @@ -0,0 +1,6026 @@ +/* +** $Id: gptl.c,v 1.157 2011-03-28 20:55:18 rosinski Exp $ +** +** Author: Jim Rosinski +** +** Main file contains most user-accessible GPTL functions +*/ + +#include /* malloc */ +#include /* gettimeofday */ +#include /* times */ +#include /* gettimeofday, syscall */ +#include +#include /* memset, strcmp (via STRMATCH), strncmp (via STRNMATCH) */ +#include /* isdigit */ +#include /* u_int8_t, u_int16_t */ +#include /* FLT_MAX */ +#include + +#ifndef HAVE_C99_INLINE +#define inline +#endif + +#ifdef HAVE_PAPI +#include /* PAPI_get_real_usec */ +#endif + +#ifdef HAVE_LIBRT +#include +#endif + +#ifdef _AIX +#include +#endif + +#include "private.h" +#include "gptl.h" + +static Timer **timers = 0; /* linked list of timers */ +static Timer **last = 0; /* last element in list */ +static int *max_depth; /* maximum indentation level encountered */ +static int *max_name_len; /* max length of timer name */ +static volatile int nthreads = -1; /* num threads. Init to bad value */ +static volatile int maxthreads = -1; /* max threads (=nthreads for OMP). Init to bad value */ +static int depthlimit = 99999; /* max depth for timers (99999 is effectively infinite) */ +static volatile bool disabled = false; /* Timers disabled? */ +static volatile bool initialized = false; /* GPTLinitialize has been called */ +static volatile bool pr_has_been_called = false; /* GPTLpr_file has been called */ +static Entry eventlist[MAX_AUX]; /* list of PAPI-based events to be counted */ +static int nevents = 0; /* number of PAPI events (init to 0) */ +static bool dousepapi = false; /* saves a function call if stays false */ +static bool verbose = false; /* output verbosity */ +static bool percent = false; /* print wallclock also as percent of 1st timers[0] */ +static bool dopr_preamble = true; /* whether to print preamble info */ +static bool dopr_threadsort = true; /* whether to print sorted thread stats */ +static bool dopr_multparent = true; /* whether to print multiple parent info */ +static bool dopr_collision = true; /* whether to print hash collision info */ +static bool dopr_quotes = false; /* whether to surround timer names with double quotes */ + +static time_t ref_gettimeofday = -1; /* ref start point for gettimeofday */ +static time_t ref_clock_gettime = -1;/* ref start point for clock_gettime */ +#ifdef _AIX +static time_t ref_read_real_time = -1; /* ref start point for read_real_time */ +#endif +static long long ref_papitime = -1; /* ref start point for PAPI_get_real_usec */ + +#if ( defined THREADED_OMP ) + +#include +static volatile int *threadid_omp = 0; /* array of thread ids */ + +#elif ( defined THREADED_PTHREADS ) + +#include + +#define MUTEX_API +#ifdef MUTEX_API +static volatile pthread_mutex_t t_mutex; +#else +static volatile pthread_mutex_t t_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif +static volatile pthread_t *threadid = 0; /* array of thread ids */ +static int lock_mutex (void); /* lock a mutex for entry into a critical region */ +static int unlock_mutex (void); /* unlock a mutex for exit from a critical region */ + +#else + +/* Unthreaded case */ +static int threadid = -1; + +#endif + +typedef struct { + const Option option; /* wall, cpu, etc. */ + const char *str; /* descriptive string for printing */ + bool enabled; /* flag */ +} Settings; + +/* For Summary stats */ + +typedef struct { + double wallmax; + double wallmin; + double walltotal; + int onflgs; + int processes; + int threads; +#ifdef HAVE_PAPI + double papimax[MAX_AUX]; + double papimin[MAX_AUX]; + double papitotal[MAX_AUX]; +#endif + unsigned long count; + int wallmax_p; /* over processes */ + int wallmax_t; /* over threads */ + int wallmin_p; + int wallmin_t; +#ifdef HAVE_PAPI + int papimax_p[MAX_AUX]; /* over processes */ + int papimax_t[MAX_AUX]; /* over threads */ + int papimin_p[MAX_AUX]; + int papimin_t[MAX_AUX]; +#endif +} Summarystats; + +/* Options, print strings, and default enable flags */ + +static Settings cpustats = {GPTLcpu, "Usr sys usr+sys ", false}; +static Settings wallstats = {GPTLwall, " Wallclock max min", true }; +static Settings overheadstats = {GPTLoverhead, " UTR Overhead " , true }; +static Settings profileovhd = {GPTLprofile_ovhd, "", false }; + +static Hashentry **hashtable; /* table of entries */ +static long ticks_per_sec; /* clock ticks per second */ +static char **timerlist; /* list of all timers */ + +typedef struct { + int val; /* depth in calling tree */ + int padding[31]; /* padding is to mitigate false cache sharing */ +} Nofalse; +static Timer ***callstack; /* call stack */ +static Nofalse *stackidx; /* index into callstack: */ + +static int prefix_len_nt; /* length of timer name prefix set outside parallel region */ +static char *prefix_nt; /* timer name prefix set outside of parallel region */ +static int *prefix_len; /* length of timer name prefix for each thread */ +static char **prefix; /* timer name prefix for each thread */ + +static Method method = GPTLmost_frequent; /* default parent/child printing mechanism */ +static PRMode print_mode = GPTLprint_write; /* default output mode */ + +/* Local function prototypes */ + +static void printstats (const Timer *, FILE *, const int, const int, const bool, double); +static void add (Timer *, const Timer *); + +static void get_threadstats (const int, const char *, Summarystats *); +static void get_summarystats (Summarystats *, const Summarystats *); +#ifdef HAVE_MPI +static int collect_data( const int, MPI_Comm, int *, Summarystats ** ); +#else +static int collect_data( const int, const int, int *, Summarystats ** ); +#endif +static int merge_thread_data(); + +static void print_multparentinfo (FILE *, Timer *); +static inline int get_cpustamp (long *, long *); +static int newchild (Timer *, Timer *); +static int get_max_depth (const Timer *, const int); +static int num_descendants (Timer *); +static int is_descendant (const Timer *, const Timer *); +static int show_descendant (const int, const Timer *, const Timer *); +static char *methodstr (Method); +static char *modestr (PRMode); + +/* Prototypes from previously separate file threadutil.c */ + +static int threadinit (void); /* initialize threading environment */ +static void threadfinalize (void); /* finalize threading environment */ +static void print_threadmapping (FILE *); /* print mapping of thread ids */ +static inline int get_thread_num (void); /* get 0-based thread number */ +static int serial_region (void); /* check whether in a serial region */ + +/* These are the (possibly) supported underlying wallclock timers */ + +static inline double utr_nanotime (void); +static inline double utr_mpiwtime (void); +static inline double utr_clock_gettime (void); +static inline double utr_papitime (void); +static inline double utr_read_real_time (void); +static inline double utr_gettimeofday (void); + +static int init_nanotime (void); +static int init_mpiwtime (void); +static int init_clock_gettime (void); +static int init_papitime (void); +static int init_read_real_time (void); +static int init_gettimeofday (void); + +static double utr_getoverhead (void); +static inline Timer *getentry_instr (const Hashentry *, void *, unsigned int *); +static inline Timer *getentry (const Hashentry *, const char *, unsigned int *); +static inline Timer *getentryf (const Hashentry *, const char *, const int, unsigned int *); +static void printself_andchildren (const Timer *, FILE *, const int, const int, const double); +static inline int update_parent_info (Timer *, Timer **, int); +static inline int update_stats (Timer *, const double, const long, const long, const int); +static int update_ll_hash (Timer *, const int, const unsigned int); +static inline int update_ptr (Timer *, const int); +static int construct_tree (Timer *, Method); + +static int cmp (const void *, const void *); +static int ncmp (const void *, const void *); +static int get_index ( const char *, const char *); + +static int add_prefix( char *, const char *, const int, const int); + +typedef struct { + const Funcoption option; + double (*func)(void); + int (*funcinit)(void); + const char *name; +} Funcentry; + +static Funcentry funclist[] = { + {GPTLgettimeofday, utr_gettimeofday, init_gettimeofday, "gettimeofday"}, + {GPTLnanotime, utr_nanotime, init_nanotime, "nanotime"}, + {GPTLmpiwtime, utr_mpiwtime, init_mpiwtime, "MPI_Wtime"}, + {GPTLclockgettime, utr_clock_gettime, init_clock_gettime, "clock_gettime"}, + {GPTLpapitime, utr_papitime, init_papitime, "PAPI_get_real_usec"}, + {GPTLread_real_time, utr_read_real_time, init_read_real_time,"read_real_time"} /* AIX only */ +}; +static const int nfuncentries = sizeof (funclist) / sizeof (Funcentry); + +static double (*ptr2wtimefunc)() = 0; /* init to invalid */ +static int funcidx = 0; /* default timer is gettimeofday */ + +#ifdef HAVE_NANOTIME +static float cpumhz = -1.; /* init to bad value */ +static double cyc2sec = -1; /* init to bad value */ +static unsigned inline long long nanotime (void); /* read counter (assembler) */ +static float get_clockfreq (void); /* cycles/sec */ +#endif + +#define DEFAULT_TABLE_SIZE 2048 +static int tablesize = DEFAULT_TABLE_SIZE; /* per-thread size of hash table (settable parameter) */ +static char *outdir = 0; /* dir to write output files to (currently unused) */ + +static double overhead_utr = 0.0; /* timer cost estimate */ +static double overhead_est = 0.0; /* direct measurement of overhead for thread 0 */ +static double overhead_bound = 0.0; /* direct measurement of overhead for thread 0 */ + +/* VERBOSE is a debugging ifdef local to the rest of this file */ +#undef VERBOSE + +/* +** GPTLsetoption: set option value to true or false. +** +** Input arguments: +** option: option to be set +** val: value to which option should be set (nonzero=true, zero=false) +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLsetoption (const int option, /* option */ + const int val) /* value */ +{ + static const char *thisfunc = "GPTLsetoption"; + + if (initialized) + return GPTLerror ("%s: must be called BEFORE GPTLinitialize\n", thisfunc); + + if (option == GPTLabort_on_error) { + GPTLset_abort_on_error ((bool) val); + if (verbose) + printf ("%s: boolean abort_on_error = %d\n", thisfunc, val); + return 0; + } + + switch (option) { + case GPTLcpu: +#ifdef HAVE_TIMES + cpustats.enabled = (bool) val; + if (verbose) + printf ("%s: cpustats = %d\n", thisfunc, val); +#else + if (val) + return GPTLerror ("%s: times() not available\n", thisfunc); +#endif + return 0; + case GPTLwall: + wallstats.enabled = (bool) val; + if (verbose) + printf ("%s: boolean wallstats = %d\n", thisfunc, val); + return 0; + case GPTLoverhead: + overheadstats.enabled = (bool) val; + if (verbose) + printf ("%s: boolean overheadstats = %d\n", thisfunc, val); + return 0; + case GPTLprofile_ovhd: + profileovhd.enabled = (bool) val; + if (verbose) + printf ("%s: boolean profileovhd = %d\n", thisfunc, val); + return 0; + case GPTLdepthlimit: + depthlimit = val; + if (verbose) + printf ("%s: depthlimit = %d\n", thisfunc, val); + return 0; + case GPTLverbose: + verbose = (bool) val; +#ifdef HAVE_PAPI + (void) GPTL_PAPIsetoption (GPTLverbose, val); +#endif + if (verbose) + printf ("%s: boolean verbose = %d\n", thisfunc, val); + return 0; + case GPTLpercent: + percent = (bool) val; + if (verbose) + printf ("%s: boolean percent = %d\n", thisfunc, val); + return 0; + case GPTLdopr_preamble: + dopr_preamble = (bool) val; + if (verbose) + printf ("%s: boolean dopr_preamble = %d\n", thisfunc, val); + return 0; + case GPTLdopr_threadsort: + dopr_threadsort = (bool) val; + if (verbose) + printf ("%s: boolean dopr_threadsort = %d\n", thisfunc, val); + return 0; + case GPTLdopr_multparent: + dopr_multparent = (bool) val; + if (verbose) + printf ("%s: boolean dopr_multparent = %d\n", thisfunc, val); + return 0; + case GPTLdopr_collision: + dopr_collision = (bool) val; + if (verbose) + printf ("%s: boolean dopr_collision = %d\n", thisfunc, val); + return 0; + case GPTLdopr_quotes: + dopr_quotes = (bool) val; + if (verbose) + printf ("%s: boolean dopr_quotes = %d\n", thisfunc, val); + return 0; + case GPTLprint_mode: + print_mode = (PRMode) val; + if (verbose) + printf ("%s: print_mode = %s\n", thisfunc, modestr (print_mode)); + return 0; + case GPTLprint_method: + method = (Method) val; + if (verbose) + printf ("%s: print_method = %s\n", thisfunc, methodstr (method)); + return 0; + case GPTLtablesize: + if (val < 1) + return GPTLerror ("%s: tablesize must be positive. %d is invalid\n", thisfunc, val); + tablesize = val; + if (verbose) + printf ("%s: tablesize = %d\n", thisfunc, tablesize); + return 0; + case GPTLsync_mpi: +#ifdef ENABLE_PMPI + if (GPTLpmpi_setoption (option, val) != 0) + fprintf (stderr, "%s: GPTLpmpi_setoption failure\n", thisfunc); +#endif + if (verbose) + printf ("%s: boolean sync_mpi = %d\n", thisfunc, val); + return 0; + + case GPTLmaxthreads: + if (val < 1) + return GPTLerror ("%s: maxthreads must be positive. %d is invalid\n", thisfunc, val); + + maxthreads = val; + return 0; + + /* + ** Allow GPTLmultiplex to fall through because it will be handled by + ** GPTL_PAPIsetoption() + */ + + case GPTLmultiplex: + default: + break; + } + +#ifdef HAVE_PAPI + if (GPTL_PAPIsetoption (option, val) == 0) { + if (val) + dousepapi = true; + return 0; + } +#else + /* Make GPTLnarrowprint a placebo if PAPI not enabled */ + + if (option == GPTLnarrowprint) + return 0; +#endif + + return GPTLerror ("%s: faiure to enable option %d\n", thisfunc, option); +} + +/* +** GPTLsetutr: set underlying timing routine. +** +** Input arguments: +** option: index which sets function +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLsetutr (const int option) +{ + int i; /* index over number of underlying timer */ + static const char *thisfunc = "GPTLsetutr"; + + if (initialized) + return GPTLerror ("%s: must be called BEFORE GPTLinitialize\n", thisfunc); + + for (i = 0; i < nfuncentries; i++) { + if (option == (int) funclist[i].option) { + if (verbose) + printf ("%s: underlying wallclock timer = %s\n", thisfunc, funclist[i].name); + funcidx = i; + + /* + ** Return an error condition if the function is not available. + ** OK for the user code to ignore: GPTLinitialize() will reset to gettimeofday + */ + + if ((*funclist[i].funcinit)() < 0) + return GPTLerror ("%s: utr=%s not available\n", thisfunc, funclist[i].name); + else + return 0; + } + } + return GPTLerror ("%s: unknown option %d\n", thisfunc, option); +} + +/* +** GPTLinitialize (): Initialization routine must be called from single-threaded +** region before any other timing routines may be called. The need for this +** routine could be eliminated if not targetting timing library for threaded +** capability. +** +** return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLinitialize (void) +{ + int i; /* loop index */ + int t; /* thread index */ + double t1, t2; /* returned from underlying timer */ + static const char *thisfunc = "GPTLinitialize"; + + if (initialized) + return GPTLerror ("%s: has already been called\n", thisfunc); + + if (threadinit () < 0) + return GPTLerror ("%s: bad return from threadinit\n", thisfunc); + + if ((ticks_per_sec = sysconf (_SC_CLK_TCK)) == -1) + return GPTLerror ("%s: failure from sysconf (_SC_CLK_TCK)\n", thisfunc); + + /* Allocate space for global arrays */ + + callstack = (Timer ***) GPTLallocate (maxthreads * sizeof (Timer **)); + stackidx = (Nofalse *) GPTLallocate (maxthreads * sizeof (Nofalse)); + timers = (Timer **) GPTLallocate (maxthreads * sizeof (Timer *)); + last = (Timer **) GPTLallocate (maxthreads * sizeof (Timer *)); + max_depth = (int *) GPTLallocate (maxthreads * sizeof (int)); + max_name_len = (int *) GPTLallocate (maxthreads * sizeof (int)); + hashtable = (Hashentry **) GPTLallocate (maxthreads * sizeof (Hashentry *)); + prefix_len = (int *) GPTLallocate (maxthreads * sizeof (int)); + prefix = (char **) GPTLallocate (maxthreads * sizeof (char *)); + + /* Initialize array values */ + + for (t = 0; t < maxthreads; t++) { + max_depth[t] = -1; + max_name_len[t] = 0; + callstack[t] = (Timer **) GPTLallocate (MAX_STACK * sizeof (Timer *)); + hashtable[t] = (Hashentry *) GPTLallocate (tablesize * sizeof (Hashentry)); + for (i = 0; i < tablesize; i++) { + hashtable[t][i].nument = 0; + hashtable[t][i].entries = 0; + } + + /* + ** Make a timer "GPTL_ROOT" to ensure no orphans, and to simplify printing. + */ + + timers[t] = (Timer *) GPTLallocate (sizeof (Timer)); + memset (timers[t], 0, sizeof (Timer)); + strcpy (timers[t]->name, "GPTL_ROOT"); + timers[t]->onflg = true; + last[t] = timers[t]; + + stackidx[t].val = 0; + callstack[t][0] = timers[t]; + for (i = 1; i < MAX_STACK; i++) + callstack[t][i] = 0; + + prefix_len[t] = 0; + prefix[t] = (char *) GPTLallocate ((MAX_CHARS+1) * sizeof (char)); + prefix[t][0] = '\0'; + } + + prefix_len_nt = 0; + prefix_nt = (char *) GPTLallocate ((MAX_CHARS+1) * sizeof (char)); + prefix_nt[0] = '\0'; + +#ifdef HAVE_PAPI + if (GPTL_PAPIinitialize (maxthreads, verbose, &nevents, eventlist) < 0) + return GPTLerror ("%s: Failure from GPTL_PAPIinitialize\n", thisfunc); +#endif + + /* + ** Call init routine for underlying timing routine. + */ + + if ((*funclist[funcidx].funcinit)() < 0) { + fprintf (stderr, "%s: Failure initializing %s. Reverting underlying timer to %s\n", + thisfunc, funclist[funcidx].name, funclist[0].name); + funcidx = 0; + } + + ptr2wtimefunc = funclist[funcidx].func; + + if (verbose) { + t1 = (*ptr2wtimefunc) (); + t2 = (*ptr2wtimefunc) (); + if (t1 > t2) + fprintf (stderr, "%s: negative delta-t=%g\n", thisfunc, t2-t1); + + printf ("Per call overhead est. t2-t1=%g should be near zero\n", t2-t1); + printf ("Underlying wallclock timing routine is %s\n", funclist[funcidx].name); + } + + /* set global timer overhead estimate */ + if (wallstats.enabled && profileovhd.enabled){ + overhead_utr = utr_getoverhead (); + } + + initialized = true; + return 0; +} + +/* +** GPTLfinalize (): Finalization routine must be called from single-threaded +** region. Free all malloc'd space +** +** return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLfinalize (void) +{ + int t; /* thread index */ + int n; /* array index */ + Timer *ptr, *ptrnext; /* ll indices */ + static const char *thisfunc = "GPTLfinalize"; + + if ( ! initialized) + return GPTLerror ("%s: initialization was not completed\n", thisfunc); + + for (t = 0; t < maxthreads; ++t) { + for (n = 0; n < tablesize; ++n) { + if (hashtable[t][n].nument > 0) + free (hashtable[t][n].entries); + } + free (hashtable[t]); + hashtable[t] = NULL; + free (callstack[t]); + free (prefix[t]); + for (ptr = timers[t]; ptr; ptr = ptrnext) { + ptrnext = ptr->next; + if (ptr->nparent > 0) { + free (ptr->parent); + free (ptr->parent_count); + } + if (ptr->nchildren > 0) + free (ptr->children); + free (ptr); + } + } + + free (callstack); + free (stackidx); + free (timers); + free (last); + free (max_depth); + free (max_name_len); + free (hashtable); + free (prefix_len); + free (prefix); + free (prefix_nt); + + threadfinalize (); + +#ifdef HAVE_PAPI + GPTL_PAPIfinalize (maxthreads); +#endif + + /* Reset initial values */ + + timers = 0; + last = 0; + max_depth = 0; + max_name_len = 0; + nthreads = -1; + maxthreads = -1; + depthlimit = 99999; + disabled = false; + initialized = false; + pr_has_been_called = false; + dousepapi = false; + verbose = false; + percent = false; + dopr_preamble = true; + dopr_threadsort = true; + dopr_multparent = true; + dopr_collision = true; + print_mode = GPTLprint_write; + ref_gettimeofday = -1; + ref_clock_gettime = -1; +#ifdef _AIX + ref_read_real_time = -1; +#endif + ref_papitime = -1; + funcidx = 0; +#ifdef HAVE_NANOTIME + cpumhz= 0; + cyc2sec = -1; +#endif + outdir = 0; + tablesize = DEFAULT_TABLE_SIZE; + prefix_len_nt = 0; + + return 0; +} + +/* +** GPTLprefix_set: define prefix for subsequent timer names +** +** Input arguments: +** prefixname: prefix string +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLprefix_set (const char *prefixname) /* prefix string */ +{ + int t; /* thread index (of this thread) */ + int len_prefix; /* number of characters in prefix */ + char *ptr_prefix; /* pointer to prefix string */ + static const char *thisfunc = "GPTLprefix_set"; + + if (disabled) + return 0; + + if ( ! initialized){ + return 0; + } + +#if ( defined THREADED_PTHREADS ) + /* + ** prefix logic not enabled when using PTHREADS + */ + return 0; +#endif + + len_prefix = MIN (strlen (prefixname), MAX_CHARS); + + /* + ** Note: if in a parallel region with only one active thread, e.g. + ** thread 0, this will NOT be identified as a serial regions. + ** If want GPTLprefix_set to apply to all threads, will need to + ** "fire up" the idle threads in some sort of parallel loop. + ** It is not safe to just test omp_in_parallel and + ** omp_get_thread_num == 1 unless add a thread barrier, and this + ** barrier would apply to all calls, so would be a performance bottleneck. + */ + + if (serial_region()){ + + prefix_len_nt = len_prefix; + ptr_prefix = prefix_nt; + + } else { + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + prefix_len[t] = len_prefix; + ptr_prefix = prefix[t]; + + } + + strncpy (ptr_prefix, prefixname, len_prefix); + + return (0); +} + +/* +** GPTLprefix_setf: define prefix for subsequent timer names when +** the string may not be null terminated +** +** Input arguments: +** prefixname: prefix string +** prefixlen: number of characters in timer name +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLprefix_setf (const char *prefixname, const int prefixlen) /* prefix string and length*/ +{ + int t; /* thread index (of this thread) */ + int c; /* character index */ + int len_prefix; /* number of characters in prefix */ + char *ptr_prefix; /* pointer to prefix string */ + static const char *thisfunc = "GPTLprefix_setf"; + + if (disabled) + return 0; + + if ( ! initialized){ + return 0; + } + +#if ( defined THREADED_PTHREADS ) + /* + ** prefix logic not enabled when using PTHREADS + */ + return 0; +#endif + + len_prefix = MIN (prefixlen, MAX_CHARS); + + /* + ** Note: if in a parallel region with only one active thread, e.g. + ** thread 0, this will NOT be identified as a serial regions. + ** If want GPTLprefix_setf to apply to all threads, will need to + ** "fire up" the idle threads in some sort of parallel loop. + ** It is not safe to just test omp_in_parallel and + ** omp_get_thread_num == 1 unless add a thread barrier, and this + ** barrier would apply to all calls, so would be a performance bottleneck. + */ + + if (serial_region()){ + + prefix_len_nt = len_prefix; + ptr_prefix = prefix_nt; + + } else { + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + prefix_len[t] = len_prefix; + ptr_prefix = prefix[t]; + + } + + for (c = 0; c < len_prefix; c++) { + ptr_prefix[c] = prefixname[c]; + } + ptr_prefix[len_prefix] = '\0'; + + return (0); +} + +/* +** GPTLprefix_unset: undefine prefix for subsequent timer names +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLprefix_unset () +{ + int t; /* thread index (of this thread) */ + int c; /* character index */ + char *ptr_prefix; /* pointer to prefix string */ + static const char *thisfunc = "GPTLprefix_setf"; + + if (disabled) + return 0; + + if ( ! initialized){ + return 0; + } + +#if ( defined THREADED_PTHREADS ) + /* + ** prefix logic not enabled when using PTHREADS + */ + return 0; +#endif + + /* + ** Note: if in a parallel region with only one active thread, e.g. + ** thread 0, this will NOT be identified as a serial regions. + ** If want GPTLprefix_unset to apply to all threads, will need to + ** "fire up" the idle threads in some sort of parallel loop. + ** It is not safe to just test omp_in_parallel and + ** omp_get_thread_num == 1 unless add a thread barrier, and this + ** barrier would apply to all calls, so would be a performance bottleneck. + */ + + if (serial_region()){ + + prefix_len_nt = 0; + ptr_prefix = prefix_nt; + + } else { + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + prefix_len[t] = 0; + ptr_prefix = prefix[t]; + + } + + ptr_prefix[0] = '\0'; + + return (0); +} + +/* +** GPTLstart_instr: start a timer (auto-instrumented) +** +** Input arguments: +** self: function address +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLstart_instr (void *self) +{ + Timer *ptr; /* linked list pointer */ + int t; /* thread index (of this thread) */ + unsigned int indx; /* hash table index */ + static const char *thisfunc = "GPTLstart_instr"; + + if (disabled) + return 0; + + if ( ! initialized) + return GPTLerror ("%s self=%p: GPTLinitialize has not been called\n", thisfunc, self); + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** increment and return + */ + + if (stackidx[t].val >= depthlimit) { + ++stackidx[t].val; + return 0; + } + + ptr = getentry_instr (hashtable[t], self, &indx); + + /* + ** Recursion => increment depth in recursion and return. We need to return + ** because we don't want to restart the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr && ptr->onflg) { + ++ptr->recurselvl; + return 0; + } + + /* + ** Increment stackidx[t] unconditionally. This is necessary to ensure the correct + ** behavior when GPTLstop_instr decrements stackidx[t] unconditionally. + */ + + if (++stackidx[t].val > MAX_STACK-1) + return GPTLerror ("%s: stack too big\n", thisfunc); + + if ( ! ptr) { /* Add a new entry and initialize */ + ptr = (Timer *) GPTLallocate (sizeof (Timer)); + memset (ptr, 0, sizeof (Timer)); + + /* + ** Need to save the address string for later conversion back to a real + ** name by an offline tool. + */ + + snprintf (ptr->name, MAX_CHARS+1, "%lx", (unsigned long) self); + ptr->address = self; + + if (update_ll_hash (ptr, t, indx) != 0) + return GPTLerror ("%s: update_ll_hash error\n", thisfunc); + } + + if (update_parent_info (ptr, callstack[t], stackidx[t].val) != 0) + return GPTLerror ("%s: update_parent_info error\n", thisfunc); + + if (update_ptr (ptr, t) != 0) + return GPTLerror ("%s: update_ptr error\n", thisfunc); + + return (0); +} + +/* +** GPTLstart: start a timer +** +** Input arguments: +** timername: timer name +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLstart (const char *timername) /* timer name */ +{ + Timer *ptr; /* linked list pointer */ + int t; /* thread index (of this thread) */ + int numchars; /* number of characters to copy */ + int namelen; /* number of characters in timer name */ + unsigned int indx; /* hash table index */ + double tpa = 0.0; /* time stamp */ + double tpb = 0.0; /* time stamp */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLstart"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** increment and return + */ + + if (stackidx[t].val >= depthlimit) { + ++stackidx[t].val; + return 0; + } + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* first caliper timestamp */ + tpa = (*ptr2wtimefunc) (); + } + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + namelen = strlen(timername); + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + name = timername; + numchars = MIN (strlen (name), MAX_CHARS); + } + + /* + ** ptr will point to the requested timer in the current list, + ** or NULL if this is a new entry + */ + + ptr = getentry (hashtable[t], name, &indx); + + /* + ** Recursion => increment depth in recursion and return. We need to return + ** because we don't want to restart the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr && ptr->onflg) { + ++ptr->recurselvl; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tpa) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tpa) + 2*overhead_utr); + } + } + return 0; + } + + /* + ** Increment stackidx[t] unconditionally. This is necessary to ensure the correct + ** behavior when GPTLstop decrements stackidx[t] unconditionally. + */ + + if (++stackidx[t].val > MAX_STACK-1) + return GPTLerror ("%s: stack too big\n", thisfunc); + + if ( ! ptr) { /* Add a new entry and initialize */ + ptr = (Timer *) GPTLallocate (sizeof (Timer)); + memset (ptr, 0, sizeof (Timer)); + + //pw numchars = MIN (strlen (name), MAX_CHARS); + strncpy (ptr->name, name, numchars); + ptr->name[numchars] = '\0'; + + if (update_ll_hash (ptr, t, indx) != 0) + return GPTLerror ("%s: update_ll_hash error\n", thisfunc); + } + + if (update_parent_info (ptr, callstack[t], stackidx[t].val) != 0) + return GPTLerror ("%s: update_parent_info error\n", thisfunc); + + if (update_ptr (ptr, t) != 0) + return GPTLerror ("%s: update_ptr error\n", thisfunc); + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tpa) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tpa) + 2*overhead_utr); + } + } + + return (0); +} + +/* +** GPTLstart_handle: start a timer based on a handle +** +** Input arguments: +** name: timer name (required when on input, handle=0) +** handle: pointer to timer matching "name" +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLstart_handle (const char *name, /* timer name */ + void **handle) /* handle (output if input value is 0) */ +{ + Timer *ptr; /* linked list pointer */ + int t; /* thread index (of this thread) */ + int numchars; /* number of characters to copy */ + unsigned int indx = (unsigned int) -1; /* hash table index: init to bad value */ + double tpa = 0.0; /* time stamp */ + double tpb = 0.0; /* time stamp */ + static const char *thisfunc = "GPTLstart_handle"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** increment and return + */ + + if (stackidx[t].val >= depthlimit) { + ++stackidx[t].val; + return 0; + } + + /* + ** If prefix string is defined, then call GPTLstart and + ** return a handle of 0. Otherwise a change in the prefix + ** might be ignored if the handle has already been set. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + *handle = 0; + return GPTLstart (name); + } + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* first caliper timestamp */ + tpa = (*ptr2wtimefunc) (); + } + } + + /* + ** If on input, handle references a non-zero value, assume it's a previously returned Timer* + ** passed in by the user. If zero, generate the hash entry and return it to the user. + */ + + if (*handle) { + ptr = (Timer *) *handle; + } else { + ptr = getentry (hashtable[t], name, &indx); + } + + /* + ** Recursion => increment depth in recursion and return. We need to return + ** because we don't want to restart the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr && ptr->onflg) { + ++ptr->recurselvl; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tpa) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tpa) + 2*overhead_utr); + } + } + + return 0; + } + + /* + ** Increment stackidx[t] unconditionally. This is necessary to ensure the correct + ** behavior when GPTLstop decrements stackidx[t] unconditionally. + */ + + if (++stackidx[t].val > MAX_STACK-1) + return GPTLerror ("%s: stack too big\n", thisfunc); + + if ( ! ptr) { /* Add a new entry and initialize */ + ptr = (Timer *) GPTLallocate (sizeof (Timer)); + memset (ptr, 0, sizeof (Timer)); + + numchars = MIN (strlen (name), MAX_CHARS); + strncpy (ptr->name, name, numchars); + ptr->name[numchars] = '\0'; + + if (update_ll_hash (ptr, t, indx) != 0) + return GPTLerror ("%s: update_ll_hash error\n", thisfunc); + } + + if (update_parent_info (ptr, callstack[t], stackidx[t].val) != 0) + return GPTLerror ("%s: update_parent_info error\n", thisfunc); + + if (update_ptr (ptr, t) != 0) + return GPTLerror ("%s: update_ptr error\n", thisfunc); + + /* + ** If on input, *handle was 0, return the pointer to the timer for future input + */ + + if ( ! *handle) + *handle = (void *) ptr; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tpa) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tpa) + 2*overhead_utr); + } + } + + return (0); +} + +/* +** GPTLstartf: start a timer when the timer name may not be null terminated +** +** Input arguments: +** timername: timer name +** namelen: number of characters in timer name +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLstartf (const char *timername, const int namelen) /* timer name and length */ +{ + Timer *ptr; /* linked list pointer */ + int t; /* thread index (of this thread) */ + int c; /* character index */ + int numchars; /* number of characters to copy */ + unsigned int indx; /* hash table index */ + double tpa = 0.0; /* time stamp */ + double tpb = 0.0; /* time stamp */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLstartf"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** increment and return + */ + + if (stackidx[t].val >= depthlimit) { + ++stackidx[t].val; + return 0; + } + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* first caliper timestamp */ + tpa = (*ptr2wtimefunc) (); + } + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + numchars = MIN (namelen, MAX_CHARS); + name = timername; + } + + /* + ** ptr will point to the requested timer in the current list, + ** or NULL if this is a new entry + */ + + ptr = getentryf (hashtable[t], name, numchars, &indx); + + /* + ** Recursion => increment depth in recursion and return. We need to return + ** because we don't want to restart the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr && ptr->onflg) { + ++ptr->recurselvl; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tpa) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tpa) + 2*overhead_utr); + } + } + + return 0; + } + + /* + ** Increment stackidx[t] unconditionally. This is necessary to ensure the correct + ** behavior when GPTLstop decrements stackidx[t] unconditionally. + */ + + if (++stackidx[t].val > MAX_STACK-1) + return GPTLerror ("%s: stack too big\n", thisfunc); + + if ( ! ptr) { /* Add a new entry and initialize */ + ptr = (Timer *) GPTLallocate (sizeof (Timer)); + memset (ptr, 0, sizeof (Timer)); + + //pw numchars = MIN (namelen, MAX_CHARS); + //pw strncpy (ptr->name, name, numchars); + for (c = 0; c < numchars; c++) { + ptr->name[c] = name[c]; + } + ptr->name[numchars] = '\0'; + + if (update_ll_hash (ptr, t, indx) != 0) + return GPTLerror ("%s: update_ll_hash error\n", thisfunc); + } + + if (update_parent_info (ptr, callstack[t], stackidx[t].val) != 0) + return GPTLerror ("%s: update_parent_info error\n", thisfunc); + + if (update_ptr (ptr, t) != 0) + return GPTLerror ("%s: update_ptr error\n", thisfunc); + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tpa) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tpa) + 2*overhead_utr); + } + } + + return (0); +} + +/* +** GPTLstartf_handle: start a timer based on a handle +** when the timer name may not be null terminated +** +** Input arguments: +** name: timer name (required when on input, handle=0) +** namelen: number of characters in timer name +** handle: pointer to timer matching "name" +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLstartf_handle (const char *name, /* timer name */ + const int namelen, /* timer name length */ + void **handle) /* handle (output if input value is 0) */ +{ + Timer *ptr; /* linked list pointer */ + int t; /* thread index (of this thread) */ + int c; /* character index */ + int numchars; /* number of characters to copy */ + unsigned int indx = (unsigned int) -1; /* hash table index: init to bad value */ + double tpa = 0.0; /* time stamp */ + double tpb = 0.0; /* time stamp */ + static const char *thisfunc = "GPTLstartf_handle"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** increment and return + */ + + if (stackidx[t].val >= depthlimit) { + ++stackidx[t].val; + return 0; + } + + /* + ** If prefix string is defined, then call GPTLstartf and + ** return a handle of 0. Otherwise a change in the prefix + ** might be ignored if the handle has already been set. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + *handle = 0; + return GPTLstartf (name, namelen); + } + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* first caliper timestamp */ + tpa = (*ptr2wtimefunc) (); + } + } + + /* + ** If on input, handle references a non-zero value, assume it's a previously returned Timer* + ** passed in by the user. If zero, generate the hash entry and return it to the user. + */ + + if (*handle) { + ptr = (Timer *) *handle; + } else { + numchars = MIN (namelen, MAX_CHARS); + ptr = getentryf (hashtable[t], name, numchars, &indx); + } + + /* + ** Recursion => increment depth in recursion and return. We need to return + ** because we don't want to restart the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr && ptr->onflg) { + ++ptr->recurselvl; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tpa) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tpa) + 2*overhead_utr); + } + } + + return 0; + } + + /* + ** Increment stackidx[t] unconditionally. This is necessary to ensure the correct + ** behavior when GPTLstop decrements stackidx[t] unconditionally. + */ + + if (++stackidx[t].val > MAX_STACK-1) + return GPTLerror ("%s: stack too big\n", thisfunc); + + if ( ! ptr) { /* Add a new entry and initialize */ + ptr = (Timer *) GPTLallocate (sizeof (Timer)); + memset (ptr, 0, sizeof (Timer)); + + numchars = MIN (namelen, MAX_CHARS); + //pw strncpy (ptr->name, name, numchars); + for (c = 0; c < numchars; c++) { + ptr->name[c] = name[c]; + } + ptr->name[numchars] = '\0'; + + if (update_ll_hash (ptr, t, indx) != 0) + return GPTLerror ("%s: update_ll_hash error\n", thisfunc); + } + + if (update_parent_info (ptr, callstack[t], stackidx[t].val) != 0) + return GPTLerror ("%s: update_parent_info error\n", thisfunc); + + if (update_ptr (ptr, t) != 0) + return GPTLerror ("%s: update_ptr error\n", thisfunc); + + /* + ** If on input, *handle was 0, return the pointer to the timer for future input + */ + + if ( ! *handle) + *handle = (void *) ptr; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tpa) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tpa) + 2*overhead_utr); + } + } + + return (0); +} + +/* +** add_prefix: add prefix string to timer name +** +** Input arguments: +** new_name: new name +** timername: timer name +** namelen: length of timer name +** t: thread id +** +** Return value: length of new name +*/ + +static int add_prefix (char *new_name, const char *timername, const int namelen, const int t) +{ + int numchars; /* number of characters to copy */ + int c; /* character index */ + + /* add prefix from serial region */ + numchars = MIN (prefix_len_nt, MAX_CHARS); + for (c = 0; c < numchars; c++) { + new_name[c] = prefix_nt[c]; + } + + /* add thread-specific prefix */ + numchars = MIN (prefix_len[t], MAX_CHARS-prefix_len_nt); + for (c = 0; c < numchars; c++) { + new_name[c+prefix_len_nt] = prefix[t][c]; + } + + /* add timer name */ + numchars = MIN (namelen, MAX_CHARS-prefix_len_nt-prefix_len[t]); + for (c = 0; c < numchars; c++) { + new_name[c+prefix_len_nt+prefix_len[t]] = timername[c]; + } + + /* add string terminator */ + numchars = MIN (namelen+prefix_len_nt+prefix_len[t], MAX_CHARS); + new_name[numchars] = '\0'; + + return numchars; +} + +/* +** update_ll_hash: Update linked list and hash table. +** Called by GPTLstart(f), GPTLstart_instr, +** and GPTLstart(f)_handle. +** +** Input arguments: +** ptr: pointer to timer +** t: thread index +** indx: hash index +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +static int update_ll_hash (Timer *ptr, const int t, const unsigned int indx) +{ + int nchars; /* number of chars */ + int nument; /* number of entries */ + Timer **eptr; /* for realloc */ + + nchars = strlen (ptr->name); + if (nchars > max_name_len[t]) + max_name_len[t] = nchars; + + last[t]->next = ptr; + last[t] = ptr; + ++hashtable[t][indx].nument; + nument = hashtable[t][indx].nument; + + eptr = (Timer **) realloc (hashtable[t][indx].entries, nument * sizeof (Timer *)); + if ( ! eptr) + return GPTLerror ("update_ll_hash: realloc error\n"); + + hashtable[t][indx].entries = eptr; + hashtable[t][indx].entries[nument-1] = ptr; + + return 0; +} + +/* +** update_ptr: Update timer contents. +** Called by GPTLstart(f), GPTLstart_instr, and GPTLstart(f)_handle. +** +** Input arguments: +** ptr: pointer to timer +** t: thread index +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +static inline int update_ptr (Timer *ptr, const int t) +{ + double tp2; /* time stamp */ + + ptr->onflg = true; + + if (cpustats.enabled && get_cpustamp (&ptr->cpu.last_utime, &ptr->cpu.last_stime) < 0) + return GPTLerror ("update_ptr: get_cpustamp error"); + + if (wallstats.enabled) { + tp2 = (*ptr2wtimefunc) (); + ptr->wall.last = tp2; + } + +#ifdef HAVE_PAPI + if (dousepapi && GPTL_PAPIstart (t, &ptr->aux) < 0) + return GPTLerror ("update_ptr: error from GPTL_PAPIstart\n"); +#endif + return 0; +} + +/* +** update_parent_info: update info about parent, and in the parent about this child +** +** Arguments: +** ptr: pointer to timer +** callstackt: callstack for this thread +** stackidxt: stack index for this thread +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +static inline int update_parent_info (Timer *ptr, + Timer **callstackt, + int stackidxt) +{ + int n; /* loop index through known parents */ + Timer *pptr; /* pointer to parent in callstack */ + Timer **pptrtmp; /* for realloc parent pointer array */ + int nparent; /* number of parents */ + int *parent_count; /* number of times parent invoked this child */ + static const char *thisfunc = "update_parent_info"; + + if ( ! ptr ) + return -1; + + if (stackidxt < 0) + return GPTLerror ("%s: called with negative stackidx\n", thisfunc); + + callstackt[stackidxt] = ptr; + + /* + ** If the region has no parent, bump its orphan count + ** (should never happen since "GPTL_ROOT" added). + */ + + if (stackidxt == 0) { + ++ptr->norphan; + return 0; + } + + pptr = callstackt[stackidxt-1]; + + /* If this parent occurred before, bump its count */ + + for (n = 0; n < ptr->nparent; ++n) { + if (ptr->parent[n] == pptr) { + ++ptr->parent_count[n]; + break; + } + } + + /* If this is a new parent, update info */ + + if (n == ptr->nparent) { + ++ptr->nparent; + nparent = ptr->nparent; + pptrtmp = (Timer **) realloc (ptr->parent, nparent * sizeof (Timer *)); + if ( ! pptrtmp) + return GPTLerror ("%s: realloc error pptrtmp nparent=%d\n", thisfunc, nparent); + + ptr->parent = pptrtmp; + ptr->parent[nparent-1] = pptr; + parent_count = (int *) realloc (ptr->parent_count, nparent * sizeof (int)); + if ( ! parent_count) + return GPTLerror ("%s: realloc error parent_count nparent=%d\n", thisfunc, nparent); + + ptr->parent_count = parent_count; + ptr->parent_count[nparent-1] = 1; + } + + return 0; +} + +/* +** GPTLstop_instr: stop a timer (auto-instrumented) +** +** Input arguments: +** self: function address +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLstop_instr (void *self) +{ + double tp1 = 0.0; /* time stamp */ + Timer *ptr; /* linked list pointer */ + int t; /* thread number for this process */ + unsigned int indx; /* index into hash table */ + long usr = 0; /* user time (returned from get_cpustamp) */ + long sys = 0; /* system time (returned from get_cpustamp) */ + static const char *thisfunc = "GPTLstop_instr"; + + if (disabled) + return 0; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); + + /* Get the timestamp */ + + if (wallstats.enabled) { + tp1 = (*ptr2wtimefunc) (); + } + + if (cpustats.enabled && get_cpustamp (&usr, &sys) < 0) + return GPTLerror ("%s: bad return from get_cpustamp\n", thisfunc); + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** decrement and return + */ + + if (stackidx[t].val > depthlimit) { + --stackidx[t].val; + return 0; + } + + ptr = getentry_instr (hashtable[t], self, &indx); + + if ( ! ptr) + return GPTLerror ("%s: timer for %p had not been started.\n", thisfunc, self); + + if ( ! ptr->onflg ) + return GPTLerror ("%s: timer %s was already off.\n", thisfunc, ptr->name); + + ++ptr->count; + + /* + ** Recursion => decrement depth in recursion and return. We need to return + ** because we don't want to stop the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr->recurselvl > 0) { + ++ptr->nrecurse; + --ptr->recurselvl; + return 0; + } + + if (update_stats (ptr, tp1, usr, sys, t) != 0) + return GPTLerror ("%s: error from update_stats\n", thisfunc); + + return 0; +} + +/* +** GPTLstop: stop a timer +** +** Input arguments: +** timername: timer name +** +** Return value: 0 (success) or -1 (failure) +*/ + +int GPTLstop (const char *timername) /* timer name */ +{ + double tp1 = 0.0; /* time stamp */ + Timer *ptr; /* linked list pointer */ + int t; /* thread number for this process */ + int numchars; /* number of characters to copy */ + int namelen; /* number of characters in timer name */ + int len_prefix; /* number of characters in prefix */ + unsigned int indx; /* index into hash table */ + long usr = 0; /* user time (returned from get_cpustamp) */ + long sys = 0; /* system time (returned from get_cpustamp) */ + double tpa = 0.0; /* time stamp */ + double tpb = 0.0; /* time stamp */ + char *ptr_prefix; /* pointer to prefix string */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLstop"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + /* Get the timestamp */ + + if (wallstats.enabled) { + tp1 = (*ptr2wtimefunc) (); + } + + if (cpustats.enabled && get_cpustamp (&usr, &sys) < 0) + return GPTLerror ("%s: get_cpustamp error", thisfunc); + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** decrement and return + */ + + if (stackidx[t].val > depthlimit) { + --stackidx[t].val; + return 0; + } + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* dummy clock call, to capture earlier tp1 call */ + tpa = (*ptr2wtimefunc) (); + } + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + namelen = strlen(timername); + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + name = timername; + } + + if ( ! (ptr = getentry (hashtable[t], name, &indx))) + return GPTLerror ("%s thread %d: timer for %s had not been started.\n", thisfunc, t, name); + + if ( ! ptr->onflg ) + return GPTLerror ("%s: timer %s was already off.\n", thisfunc, ptr->name); + + ++ptr->count; + + /* + ** Recursion => decrement depth in recursion and return. We need to return + ** because we don't want to stop the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr->recurselvl > 0) { + ++ptr->nrecurse; + --ptr->recurselvl; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tp1) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tp1) + 2*overhead_utr); + } + } + + return 0; + } + + if (update_stats (ptr, tp1, usr, sys, t) != 0) + return GPTLerror ("%s: error from update_stats\n", thisfunc); + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tp1) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tp1) + 2*overhead_utr); + } + } + + return 0; +} + +/* +** GPTLstop_handle: stop a timer based on a handle +** +** Input arguments: +** name: timer name (used only for diagnostics) +** handle: pointer to timer +** +** Return value: 0 (success) or -1 (failure) +*/ + +int GPTLstop_handle (const char *name, /* timer name */ + void **handle) /* handle (output if input value is 0) */ +{ + double tp1 = 0.0; /* time stamp */ + Timer *ptr; /* linked list pointer */ + int t; /* thread number for this process */ + unsigned int indx; /* index into hash table */ + long usr = 0; /* user time (returned from get_cpustamp) */ + long sys = 0; /* system time (returned from get_cpustamp) */ + double tpa = 0.0; /* time stamp */ + double tpb = 0.0; /* time stamp */ + static const char *thisfunc = "GPTLstop_handle"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If prefix string is defined, then call GPTLstop and + ** return a handle of 0. Otherwise a change in the prefix + ** might be ignored if the handle has already been set. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + *handle = 0; + return GPTLstop (name); + } + + /* Get the timestamp */ + + if (wallstats.enabled) { + tp1 = (*ptr2wtimefunc) (); + } + + if (cpustats.enabled && get_cpustamp (&usr, &sys) < 0) + return GPTLerror (0); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** decrement and return + */ + + if (stackidx[t].val > depthlimit) { + --stackidx[t].val; + return 0; + } + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* dummy clock call, to capture earlier tp1 call */ + tpa = (*ptr2wtimefunc) (); + } + } + + /* + ** If on input, handle references a non-zero value, assume it's a previously returned Timer* + ** passed in by the user. If zero, generate the hash entry and return it to the user. + */ + + if (*handle) { + ptr = (Timer *) *handle; + } else { + if ( ! (ptr = getentry (hashtable[t], name, &indx))) + return GPTLerror ("%s thread %d: timer for %s had not been started.\n", thisfunc, t, name); + } + + if ( ! ptr->onflg ) + return GPTLerror ("%s: timer %s was already off.\n", thisfunc, ptr->name); + + ++ptr->count; + + /* + ** Recursion => decrement depth in recursion and return. We need to return + ** because we don't want to stop the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr->recurselvl > 0) { + ++ptr->nrecurse; + --ptr->recurselvl; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tp1) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tp1) + 2*overhead_utr); + } + } + + return 0; + } + + if (update_stats (ptr, tp1, usr, sys, t) != 0) + return GPTLerror ("%s: error from update_stats\n", thisfunc); + + /* + ** If on input, *handle was 0, return the pointer to the timer for future input + */ + + if ( ! *handle) + *handle = (void *) ptr; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tp1) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tp1) + 2*overhead_utr); + } + } + + return 0; +} + +/* +** GPTLstopf: stop a timer when the timer name may not be null terminated +** +** Input arguments: +** timername: timer name +** namelen: number of characters in timer name +** +** Return value: 0 (success) or -1 (failure) +*/ + +int GPTLstopf (const char *timername, const int namelen) /* timer name and length */ +{ + double tp1 = 0.0; /* time stamp */ + Timer *ptr; /* linked list pointer */ + int t; /* thread number for this process */ + int c; /* character index */ + int numchars; /* number of characters to copy */ + unsigned int indx; /* index into hash table */ + long usr = 0; /* user time (returned from get_cpustamp) */ + long sys = 0; /* system time (returned from get_cpustamp) */ + char strname[MAX_CHARS+1]; /* null terminated version of name */ + double tpa = 0.0; /* time stamp */ + double tpb = 0.0; /* time stamp */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLstopf"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + /* Get the timestamp */ + + if (wallstats.enabled) { + tp1 = (*ptr2wtimefunc) (); + } + + if (cpustats.enabled && get_cpustamp (&usr, &sys) < 0) + return GPTLerror ("%s: get_cpustamp error", thisfunc); + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** decrement and return + */ + + if (stackidx[t].val > depthlimit) { + --stackidx[t].val; + return 0; + } + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* dummy clock call, to capture earlier tp1 call */ + tpa = (*ptr2wtimefunc) (); + } + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + numchars = MIN (namelen, MAX_CHARS); + name = timername; + } + + if ( ! (ptr = getentryf (hashtable[t], name, numchars, &indx))){ + //pw numchars = MIN (namelen, MAX_CHARS); + //pw strncpy (strname, name, numchars); + for (c = 0; c < numchars; c++) { + strname[c] = name[c]; + } + strname[numchars] = '\0'; + return GPTLerror ("%s thread %d: timer for %s had not been started.\n", thisfunc, t, strname); + } + + if ( ! ptr->onflg ) + return GPTLerror ("%s: timer %s was already off.\n", thisfunc, ptr->name); + + ++ptr->count; + + /* + ** Recursion => decrement depth in recursion and return. We need to return + ** because we don't want to stop the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr->recurselvl > 0) { + ++ptr->nrecurse; + --ptr->recurselvl; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tp1) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tp1) + 2*overhead_utr); + } + } + + return 0; + } + + if (update_stats (ptr, tp1, usr, sys, t) != 0) + return GPTLerror ("%s: error from update_stats\n", thisfunc); + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tp1) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tp1) + 2*overhead_utr); + } + } + + return 0; +} + +/* +** GPTLstopf_handle: stop a timer based on a handle +** when the timer name may not be null terminated +** +** Input arguments: +** name: timer name (used only for diagnostics) +** namelen: number of characters in timer name +** handle: pointer to timer +** +** Return value: 0 (success) or -1 (failure) +*/ + +int GPTLstopf_handle (const char *name, /* timer name */ + const int namelen, /* timer name length */ + void **handle) /* handle (output if input value is 0) */ +{ + double tp1 = 0.0; /* time stamp */ + Timer *ptr; /* linked list pointer */ + int t; /* thread number for this process */ + int c; /* character index */ + unsigned int indx; /* index into hash table */ + long usr = 0; /* user time (returned from get_cpustamp) */ + long sys = 0; /* system time (returned from get_cpustamp) */ + int numchars; /* number of characters to copy */ + char strname[MAX_CHARS+1]; /* null terminated version of name */ + double tpa = 0.0; /* time stamp */ + double tpb = 0.0; /* time stamp */ + static const char *thisfunc = "GPTLstopf_handle"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If prefix string is defined, then call GPTLstopf and + ** return a handle of 0. Otherwise a change in the prefix + ** might be ignored if the handle has already been set. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + *handle = 0; + return GPTLstopf (name, namelen); + } + + /* Get the timestamp */ + + if (wallstats.enabled) { + tp1 = (*ptr2wtimefunc) (); + } + + if (cpustats.enabled && get_cpustamp (&usr, &sys) < 0) + return GPTLerror (0); + + /* + ** If current depth exceeds a user-specified limit for print, just + ** decrement and return + */ + + if (stackidx[t].val > depthlimit) { + --stackidx[t].val; + return 0; + } + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* dummy clock call, to capture earlier tp1 call */ + tpa = (*ptr2wtimefunc) (); + } + } + + /* + ** If on input, handle references a non-zero value, assume it's a previously returned Timer* + ** passed in by the user. If zero, generate the hash entry and return it to the user. + */ + + if (*handle) { + ptr = (Timer *) *handle; + } else { + if ( ! (ptr = getentryf (hashtable[t], name, namelen, &indx))){ + numchars = MIN (namelen, MAX_CHARS); + //pw strncpy (strname, name, numchars); + for (c = 0; c < numchars; c++) { + strname[c] = name[c]; + } + strname[numchars] = '\0'; + return GPTLerror ("%s thread %d: timer for %s had not been started.\n", thisfunc, t, strname); + } + } + + if ( ! ptr->onflg ) + return GPTLerror ("%s: timer %s was already off.\n", thisfunc, ptr->name); + + ++ptr->count; + + /* + ** Recursion => decrement depth in recursion and return. We need to return + ** because we don't want to stop the timer. We want the reported time for + ** the timer to reflect the outermost layer of recursion. + */ + + if (ptr->recurselvl > 0) { + ++ptr->nrecurse; + --ptr->recurselvl; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tp1) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tp1) + 2*overhead_utr); + } + } + + return 0; + } + + if (update_stats (ptr, tp1, usr, sys, t) != 0) + return GPTLerror ("%s: error from update_stats\n", thisfunc); + + /* + ** If on input, *handle was 0, return the pointer to the timer for future input + */ + + if ( ! *handle) + *handle = (void *) ptr; + + if (wallstats.enabled && profileovhd.enabled){ + if (t == 0){ + /* second caliper timestamp */ + tpb = (*ptr2wtimefunc) (); + /* subtract out additional overhead from caliper timing calls */ + overhead_est += ((tpb - tp1) - overhead_utr); + /* add in additional overhead due to caliper timing calls (probaby 2X what necessary) */ + overhead_bound += ((tpb - tp1) + 2*overhead_utr); + } + } + + return 0; +} + +/* +** update_stats: update stats inside ptr. Called by GPTLstop(f), GPTLstop_instr, +** GPTLstop(f)_handle +** +** Input arguments: +** ptr: pointer to timer +** tp1: input time stapm +** usr: user time +** sys: system time +** t: thread index +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +static inline int update_stats (Timer *ptr, + const double tp1, + const long usr, + const long sys, + const int t) +{ + double delta; /* difference */ + static const char *thisfunc = "update_stats"; + + ptr->onflg = false; + --stackidx[t].val; + if (stackidx[t].val < -1) { + stackidx[t].val = -1; + return GPTLerror ("%s: tree depth has become negative.\n", thisfunc); + } + +#ifdef HAVE_PAPI + if (dousepapi && GPTL_PAPIstop (t, &ptr->aux) < 0) + return GPTLerror ("%s: error from GPTL_PAPIstop\n", thisfunc); +#endif + + if (wallstats.enabled) { + delta = tp1 - ptr->wall.last; + + if (delta < 0.) { + fprintf (stderr, "%s: negative delta=%g\n", thisfunc, delta); + delta = 0.0; + } + + ptr->wall.accum += delta; + ptr->wall.latest = delta; + + if (ptr->count == 1) { + ptr->wall.max = delta; + + ptr->wall.prev_min = FLT_MAX; + ptr->wall.min = delta; + ptr->wall.latest_is_min = 1; + } else { + if (delta > ptr->wall.max) + ptr->wall.max = delta; + if (delta < ptr->wall.min){ + ptr->wall.prev_min = ptr->wall.min; + ptr->wall.min = delta; + ptr->wall.latest_is_min = 1; + } else { + ptr->wall.latest_is_min = 0; + } + } + } + + if (cpustats.enabled) { + ptr->cpu.accum_utime += usr - ptr->cpu.last_utime; + ptr->cpu.accum_stime += sys - ptr->cpu.last_stime; + ptr->cpu.last_utime = usr; + ptr->cpu.last_stime = sys; + } + return 0; +} + +/* +** GPTLenable: enable timers +** +** Return value: 0 (success) +*/ + +int GPTLenable (void) +{ + disabled = false; + return (0); +} + +/* +** GPTLdisable: disable timers +** +** Return value: 0 (success) +*/ + +int GPTLdisable (void) +{ + disabled = true; + return (0); +} + +/* +** GPTLstamp: Compute timestamp of usr, sys, and wallclock time (seconds) +** +** Output arguments: +** wall: wallclock +** usr: user time +** sys: system time +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLstamp (double *wall, double *usr, double *sys) +{ + struct tms buf; /* argument to times */ + + if ( ! initialized) + return GPTLerror ("GPTLstamp: GPTLinitialize has not been called\n"); + +#ifdef HAVE_TIMES + *usr = 0; + *sys = 0; + + if (times (&buf) == -1) + return GPTLerror ("GPTLstamp: times() failed. Results bogus\n"); + + *usr = buf.tms_utime / (double) ticks_per_sec; + *sys = buf.tms_stime / (double) ticks_per_sec; +#endif + *wall = (*ptr2wtimefunc) (); + return 0; +} + +/* +** GPTLreset: reset all timers to 0 +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLreset (void) +{ + int t; /* index over threads */ + Timer *ptr; /* linked list index */ + static const char *thisfunc = "GPTLreset"; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); + + for (t = 0; t < nthreads; t++) { + for (ptr = timers[t]; ptr; ptr = ptr->next) { + ptr->onflg = false; + ptr->count = 0; + memset (&ptr->wall, 0, sizeof (ptr->wall)); + memset (&ptr->cpu, 0, sizeof (ptr->cpu)); +#ifdef HAVE_PAPI + memset (&ptr->aux, 0, sizeof (ptr->aux)); +#endif + } + } + + if (verbose) + printf ("%s: accumulators for all timers set to zero\n", thisfunc); + + return 0; +} + +/* +** GPTLprint_mode_set: set output mode to use for +** GPTLpr_file and GPTLpr_summary_file +*/ + +int GPTLprint_mode_set (int pr_mode) +{ + print_mode = (PRMode) pr_mode; + return 0; +} + +/* +** GPTLprint_mode_query: query output mode used +** for GPTLpr_file and GPTLpr_summary_file +*/ + +int GPTLprint_mode_query (void) +{ + return (int) print_mode; +} + +/* +** GPTLpr: Print values of all timers +** +** Input arguments: +** id: integer to append to string "timing." +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLpr (const int id) /* output file will be named "timing." */ +{ + char outfile[14]; /* name of output file: timing.xxxxxx */ + static const char *thisfunc = "GPTLpr"; + + if (id < 0 || id > 999999) + return GPTLerror ("%s: bad id=%d for output file. Must be >= 0 and < 1000000\n", thisfunc, id); + + sprintf (outfile, "timing.%d", id); + + if (GPTLpr_file (outfile) != 0) + return GPTLerror ("%s: Error in GPTLpr_file\n", thisfunc); + + return 0; +} + +/* +** GPTLpr_file: Print values of all timers +** +** Input arguments: +** outfile: Name of output file to write +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLpr_file (const char *outfile) /* output file to write */ +{ + FILE *fp; /* file handle to write to */ + Timer *ptr; /* walk through master thread linked list */ + Timer *tptr; /* walk through slave threads linked lists */ + Timer sumstats; /* sum of same timer stats over threads */ + int i, ii, n, t; /* indices */ + int totent; /* per-thread collision count (diagnostic) */ + int nument; /* per-index collision count (diagnostic) */ + int totlen; /* length for malloc */ + unsigned long totcount; /* total timer invocations */ + char *outpath; /* path to output file: outdir/timing.xxxxxx */ + float *sum; /* sum of overhead values (per thread) */ + float osum; /* sum of overhead over threads */ + double utr_overhead; /* overhead of calling underlying timing routine */ + double tot_overhead; /* utr_overhead + papi overhead */ + double papi_overhead = 0; /* overhead of reading papi counters */ + bool found; /* jump out of loop when name found */ + bool foundany; /* whether summation print necessary */ + bool first; /* flag 1st time entry found */ + /* + ** Diagnostics for collisions and GPTL memory usage + */ + int num_zero; /* number of buckets with 0 collisions */ + int num_one; /* number of buckets with 1 collision */ + int num_two; /* number of buckets with 2 collisions */ + int num_more; /* number of buckets with more than 2 collisions */ + int most; /* biggest collision count */ + int numtimers = 0; /* number of timers */ + float hashmem; /* hash table memory usage */ + float regionmem; /* timer memory usage */ + float papimem; /* PAPI stats memory usage */ + float pchmem; /* parent/child array memory usage */ + float gptlmem; /* total per-thread GPTL memory usage estimate */ + float totmem; /* sum of gptlmem across threads */ + + static const char *thisfunc = "GPTLpr_file"; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize() has not been called\n", thisfunc); + + /* 2 is for "/" plus null */ + if (outdir) + totlen = strlen (outdir) + strlen (outfile) + 2; + else + totlen = strlen (outfile) + 2; + + outpath = (char *) GPTLallocate (totlen); + + if (outdir) { + strcpy (outpath, outdir); + strcat (outpath, "/"); + strcat (outpath, outfile); + } else { + strcpy (outpath, outfile); + } + + if (print_mode == GPTLprint_append){ + if ( ! (fp = fopen (outpath, "a"))) + fp = stderr; + } + else{ + if ( ! (fp = fopen (outpath, "w"))) + fp = stderr; + } + + free (outpath); + + fprintf (fp, "$Id: gptl.c,v 1.157 2011-03-28 20:55:18 rosinski Exp $\n"); + + /* + ** A set of nasty ifdefs to tell important aspects of how GPTL was built + */ + +#ifdef HAVE_NANOTIME + if (funclist[funcidx].option == GPTLnanotime) { + fprintf (fp, "Clock rate = %f MHz\n", cpumhz); +#ifdef BIT64 + fprintf (fp, " BIT64 was true\n"); +#else + fprintf (fp, " BIT64 was false\n"); +#endif + } +#endif + +#if ( defined THREADED_OMP ) + fprintf (fp, "GPTL was built with THREADED_OMP\n"); +#elif ( defined THREADED_PTHREADS ) + fprintf (fp, "GPTL was built with THREADED_PTHREADS\n"); +#else + fprintf (fp, "GPTL was built without threading\n"); +#endif + +#ifdef HAVE_MPI + fprintf (fp, "HAVE_MPI was true\n"); + +#ifdef HAVE_COMM_F2C + fprintf (fp, " HAVE_COMM_F2C was true\n"); +#else + fprintf (fp, " HAVE_COMM_F2C was false\n"); +#endif + +#ifdef ENABLE_PMPI + fprintf (fp, " ENABLE_PMPI was true\n"); +#else + fprintf (fp, " ENABLE_PMPI was false\n"); +#endif + +#else + fprintf (fp, "HAVE_MPI was false\n"); +#endif + +#ifdef HAVE_PAPI + fprintf (fp, "HAVE_PAPI was true\n"); + if (dousepapi) { + if (GPTL_PAPIis_multiplexed ()) + fprintf (fp, " PAPI event multiplexing was ON\n"); + else + fprintf (fp, " PAPI event multiplexing was OFF\n"); + GPTL_PAPIprintenabled (fp); + } +#else + fprintf (fp, "HAVE_PAPI was false\n"); +#endif + + /* + ** Estimate underlying timing routine overhead + */ + + utr_overhead = utr_getoverhead (); + fprintf (fp, "Underlying timing routine was %s.\n", funclist[funcidx].name); + if (wallstats.enabled && profileovhd.enabled){ + fprintf (fp, "Per-call utr overhead est (at init): %g sec.\n", overhead_utr); + fprintf (fp, "Per-call utr overhead est (at end): %g sec.\n", utr_overhead); + } else { + fprintf (fp, "Per-call utr overhead est: %g sec.\n", utr_overhead); + } +#ifdef HAVE_PAPI + if (dousepapi) { + double t1, t2; + t1 = (*ptr2wtimefunc) (); + read_counters100 (); + t2 = (*ptr2wtimefunc) (); + papi_overhead = 0.01 * (t2 - t1); + fprintf (fp, "Per-call PAPI overhead est: %g sec.\n", papi_overhead); + } +#endif + tot_overhead = utr_overhead + papi_overhead; + if (dopr_preamble) { + fprintf (fp, "If overhead stats are printed, roughly half the estimated number is\n" + "embedded in the wallclock stats for each timer.\n" + "Print method was %s.\n", methodstr (method)); +#ifdef ENABLE_PMPI + fprintf (fp, "If a AVG_MPI_BYTES field is present, it is an estimate of the per-call " + "average number of bytes handled by that process.\n" + "If timers beginning with sync_ are present, it means MPI synchronization " + "was turned on.\n"); +#endif + fprintf (fp, "If a \'%%_of\' field is present, it is w.r.t. the first timer for thread 0.\n" + "If a \'e6_per_sec\' field is present, it is in millions of PAPI counts per sec.\n\n" + "A '*' in column 1 below means the timer had multiple parents, though the\n" + "values printed are for all calls.\n" + "Further down the listing may be more detailed information about multiple\n" + "parents. Look for 'Multiple parent info'\n\n"); + } + + sum = (float *) GPTLallocate (nthreads * sizeof (float)); + + for (t = 0; t < nthreads; ++t) { + + /* + ** Construct tree for printing timers in parent/child form. get_max_depth() must be called + ** AFTER construct_tree() because it relies on the per-parent children arrays being complete. + */ + + if (construct_tree (timers[t], method) != 0) + printf ("GPTLpr_file: failure from construct_tree: output will be incomplete\n"); + max_depth[t] = get_max_depth (timers[t], 0); + + if (t > 0) + fprintf (fp, "\n"); + fprintf (fp, "Stats for thread %d:\n", t); + + for (n = 0; n < max_depth[t]+1; ++n) /* +1 to always indent timer name */ + fprintf (fp, " "); + if (dopr_quotes){ + for (n = 0; n < max_name_len[t]+2; ++n) /* longest timer name + quotes */ + fprintf (fp, " "); + } else { + for (n = 0; n < max_name_len[t]; ++n) /* longest timer name */ + fprintf (fp, " "); + } + + fprintf (fp, " On Called Recurse"); + + /* Print strings for enabled timer types */ + + if (cpustats.enabled) + fprintf (fp, "%s", cpustats.str); + if (wallstats.enabled) { + fprintf (fp, "%s", wallstats.str); + if (percent && timers[0]->next) + fprintf (fp, "%%_of_%5.5s ", timers[0]->next->name); + if (overheadstats.enabled) + fprintf (fp, "%s", overheadstats.str); + } + +#ifdef ENABLE_PMPI + fprintf (fp, "AVG_MPI_BYTES "); +#endif + +#ifdef HAVE_PAPI + GPTL_PAPIprstr (fp); +#endif + + fprintf (fp, "\n"); /* Done with titles, now print stats */ + + /* + ** Print call tree and stats via recursive routine. "-1" is flag to + ** avoid printing dummy outermost timer, and initialize the depth. + */ + + printself_andchildren (timers[t], fp, t, -1, tot_overhead); + + /* + ** Sum of overhead across timers is meaningful. + ** Factor of 2 is because there are 2 utr calls per start/stop pair. + */ + + sum[t] = 0; + totcount = 0; + for (ptr = timers[t]->next; ptr; ptr = ptr->next) { + sum[t] += ptr->count * 2 * tot_overhead; + totcount += ptr->count; + } + fprintf (fp, "\n"); + if (wallstats.enabled && overheadstats.enabled){ + fprintf (fp, "Overhead sum = %9.3g wallclock seconds\n", sum[t]); + } + if (t == 0){ + if (wallstats.enabled && profileovhd.enabled){ + fprintf (fp, "Overhead estimate = %9.3g wallclock seconds\n", overhead_est); + fprintf (fp, "Overhead bound = %9.3g wallclock seconds\n", overhead_bound); + } + } + if (totcount < PRTHRESH) + fprintf (fp, "Total calls = %lu\n", totcount); + else + fprintf (fp, "Total calls = %9.3e\n", (float) totcount); + } + + /* Print per-name stats for all threads */ + + if (dopr_threadsort && nthreads > 1) { + fprintf (fp, "\nSame stats sorted by timer for threaded regions (for timers active on thread 0):\n"); + fprintf (fp, "Thd "); + + for (n = 0; n < max_name_len[0]; ++n) /* longest timer name */ + fprintf (fp, " "); + + fprintf (fp, " On Called Recurse"); + + if (cpustats.enabled) + fprintf (fp, "%s", cpustats.str); + if (wallstats.enabled) { + fprintf (fp, "%s", wallstats.str); + if (percent && timers[0]->next) + fprintf (fp, "%%_of_%5.5s ", timers[0]->next->name); + if (overheadstats.enabled) + fprintf (fp, "%s", overheadstats.str); + } + +#ifdef HAVE_PAPI + GPTL_PAPIprstr (fp); +#endif + + fprintf (fp, "\n"); + + /* Start at next to skip dummy */ + + for (ptr = timers[0]->next; ptr; ptr = ptr->next) { + + /* + ** To print sum stats, first create a new timer then copy thread 0 + ** stats into it. then sum using "add", and finally print. + */ + + foundany = false; + first = true; + sumstats = *ptr; + for (t = 1; t < nthreads; ++t) { + found = false; + for (tptr = timers[t]->next; tptr && ! found; tptr = tptr->next) { + if (STRMATCH (ptr->name, tptr->name)) { + + /* Only print thread 0 when this timer found for other threads */ + + if (first) { + first = false; + fprintf (fp, "%3.3d ", 0); + printstats (ptr, fp, 0, 0, false, tot_overhead); + } + + found = true; + foundany = true; + fprintf (fp, "%3.3d ", t); + printstats (tptr, fp, 0, 0, false, tot_overhead); + add (&sumstats, tptr); + } + } + } + + if (foundany) { + fprintf (fp, "SUM "); + printstats (&sumstats, fp, 0, 0, false, tot_overhead); + fprintf (fp, "\n"); + } + } + + /* Repeat overhead print in loop over threads */ + + if (wallstats.enabled && overheadstats.enabled) { + osum = 0.; + for (t = 0; t < nthreads; ++t) { + fprintf (fp, "OVERHEAD.%3.3d (wallclock seconds) = %9.3g\n", t, sum[t]); + osum += sum[t]; + } + fprintf (fp, "OVERHEAD.SUM (wallclock seconds) = %9.3g\n", osum); + } + } + + /* Print info about timers with multiple parents */ + + if (dopr_multparent) { + for (t = 0; t < nthreads; ++t) { + bool some_multparents = false; /* thread has entries with multiple parents? */ + for (ptr = timers[t]->next; ptr; ptr = ptr->next) { + if (ptr->nparent > 1) { + some_multparents = true; + break; + } + } + + if (some_multparents) { + fprintf (fp, "\nMultiple parent info for thread %d:\n", t); + if (dopr_preamble && t == 0) { + fprintf (fp, "Columns are count and name for the listed child\n" + "Rows are each parent, with their common child being the last entry, " + "which is indented.\n" + "Count next to each parent is the number of times it called the child.\n" + "Count next to child is total number of times it was called by the " + "listed parents.\n\n"); + } + + for (ptr = timers[t]->next; ptr; ptr = ptr->next) + if (ptr->nparent > 1) + print_multparentinfo (fp, ptr); + } + } + } + + /* Print hash table stats */ + + if (dopr_collision) { + for (t = 0; t < nthreads; t++) { + first = true; + totent = 0; + num_zero = 0; + num_one = 0; + num_two = 0; + num_more = 0; + most = 0; + numtimers= 0; + + for (i = 0; i < tablesize; i++) { + nument = hashtable[t][i].nument; + if (nument > 1) { + totent += nument-1; + if (first) { + first = false; + fprintf (fp, "\nthread %d had some hash collisions:\n", t); + } + fprintf (fp, "hashtable[%d][%d] had %d entries:", t, i, nument); + for (ii = 0; ii < nument; ii++) + fprintf (fp, " %s", hashtable[t][i].entries[ii]->name); + fprintf (fp, "\n"); + } + switch (nument) { + case 0: + ++num_zero; + break; + case 1: + ++num_one; + break; + case 2: + ++num_two; + break; + default: + ++num_more; + break; + } + most = MAX (most, nument); + numtimers += nument; + } + + if (totent > 0) { + fprintf (fp, "Total collisions thread %d = %d\n", t, totent); + fprintf (fp, "Entry information:\n"); + fprintf (fp, "num_zero = %d num_one = %d num_two = %d num_more = %d\n", + num_zero, num_one, num_two, num_more); + fprintf (fp, "Most = %d\n", most); + } + } + } + + /* Stats on GPTL memory usage */ + + totmem = 0.; + for (t = 0; t < nthreads; t++) { + hashmem = (float) sizeof (Hashentry) * tablesize; + regionmem = (float) numtimers * sizeof (Timer); +#ifdef HAVE_PAPI + papimem = (float) numtimers * sizeof (Papistats); +#else + papimem = 0.; +#endif + pchmem = 0.; + for (ptr = timers[t]->next; ptr; ptr = ptr->next) + pchmem += (float) (sizeof (Timer *)) * (ptr->nchildren + ptr->nparent); + + gptlmem = hashmem + regionmem + pchmem; + totmem += gptlmem; + fprintf (fp, "\n"); + fprintf (fp, "Thread %d total memory usage = %g KB\n", t, gptlmem*.001); + fprintf (fp, " Hashmem = %g KB\n" + " Regionmem = %g KB (papimem portion = %g KB)\n" + " Parent/child arrays = %g KB\n", + hashmem*.001, regionmem*.001, papimem*.001, pchmem*.001); + } + fprintf (fp, "\n"); + fprintf (fp, "Total memory usage all threads = %g KB\n", totmem*0.001); + + print_threadmapping (fp); + free (sum); + + if (fclose (fp) != 0) + fprintf (stderr, "Attempt to close %s failed\n", outfile); + + pr_has_been_called = true; + return 0; +} + +/* +** construct_tree: Build the parent->children tree starting with knowledge of +** parent list for each child. +** +** Input arguments: +** timerst: Linked list of timers +** method: method to be used to define the links +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int construct_tree (Timer *timerst, Method method) +{ + Timer *ptr; /* loop through linked list */ + Timer *pptr = 0; /* parent (init to NULL to avoid compiler warning) */ + int nparent; /* number of parents */ + int maxcount; /* max calls by a single parent */ + int n; /* loop over nparent */ + + /* + ** Walk the linked list to build the parent-child tree, using whichever + ** mechanism is in place. newchild() will prevent loops. + */ + + for (ptr = timerst; ptr; ptr = ptr->next) { + switch (method) { + case GPTLfirst_parent: + if (ptr->nparent > 0) { + pptr = ptr->parent[0]; + if (newchild (pptr, ptr) != 0); + } + break; + case GPTLlast_parent: + if (ptr->nparent > 0) { + nparent = ptr->nparent; + pptr = ptr->parent[nparent-1]; + if (newchild (pptr, ptr) != 0); + } + break; + case GPTLmost_frequent: + maxcount = 0; + for (n = 0; n < ptr->nparent; ++n) { + if (ptr->parent_count[n] > maxcount) { + pptr = ptr->parent[n]; + maxcount = ptr->parent_count[n]; + } + } + if (maxcount > 0) { /* not an orphan */ + if (newchild (pptr, ptr) != 0); + } + break; + case GPTLfull_tree: + /* + ** Careful: this one can create *lots* of output! + */ + for (n = 0; n < ptr->nparent; ++n) { + pptr = ptr->parent[n]; + if (newchild (pptr, ptr) != 0); + } + break; + default: + return GPTLerror ("construct_tree: method %d is not known\n", method); + } + } + return 0; +} + +/* +** modestr: Return a pointer to a string that represents the mode +** +** Input arguments: +** mode: print mode type (write or append) +*/ +static char *modestr (PRMode prmode) +{ + if (prmode == GPTLprint_write) + return "write"; + else if (prmode == GPTLprint_append) + return "append"; + else + return "Unknown"; +} + +/* +** methodstr: Return a pointer to a string which represents the method +** +** Input arguments: +** method: method type +*/ + +static char *methodstr (Method method) +{ + if (method == GPTLfirst_parent) + return "first_parent"; + else if (method == GPTLlast_parent) + return "last_parent"; + else if (method == GPTLmost_frequent) + return "most_frequent"; + else if (method == GPTLfull_tree) + return "full_tree"; + else + return "Unknown"; +} + +/* +** newchild: Add an entry to the children list of parent. Use function +** is_descendant() to prevent infinite loops. +** +** Input arguments: +** parent: parent node +** child: child to be added +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +static int newchild (Timer *parent, Timer *child) +{ + int nchildren; /* number of children (temporary) */ + Timer **chptr; /* array of pointers to children */ + int n; /* loop over nchildren */ + + static const char *thisfunc = "newchild"; + + if (parent == child) + return GPTLerror ("%s: child %s can't be a parent of itself\n", thisfunc, child->name); + + /* + ** To allow construct_tree to be called multiple times, check that proposed child + ** is not a known child + */ + + for (n = 0; n < parent->nchildren; ++n) { + if (parent->children[n] == child){ + n = parent->nchildren + 1; + } + } + if (n > parent->nchildren){ + return 0; + } + + /* + ** To guarantee no loops, ensure that proposed parent isn't already a descendant of + ** proposed child + */ + + if (is_descendant (child, parent)) { + show_descendant (0, child, parent); + return GPTLerror ("%s: loop detected: NOT adding %s to descendant list of %s. " + "Proposed parent is in child's descendant path.\n", + thisfunc, child->name, parent->name); + } + + /* Safe to add the child to the parent's list of children */ + + ++parent->nchildren; + nchildren = parent->nchildren; + chptr = (Timer **) realloc (parent->children, nchildren * sizeof (Timer *)); + if ( ! chptr) + return GPTLerror ("%s: realloc error\n", thisfunc); + parent->children = chptr; + parent->children[nchildren-1] = child; + + return 0; +} + +/* +** get_max_depth: Determine the maximum call tree depth by traversing the +** tree recursively +** +** Input arguments: +** ptr: Starting timer +** startdepth: current depth when function invoked +** +** Return value: maximum depth +*/ + +static int get_max_depth (const Timer *ptr, const int startdepth) +{ + int maxdepth = startdepth; + int depth; + int n; + + for (n = 0; n < ptr->nchildren; ++n) + if ((depth = get_max_depth (ptr->children[n], startdepth+1)) > maxdepth) + maxdepth = depth; + + return maxdepth; +} + +/* +** num_descendants: Determine the number of descendants of a timer by traversing +** the tree recursively. This function is not currently used. It could be +** useful in a pruning algorithm +** +** Input arguments: +** ptr: Starting timer +** +** Return value: number of descendants +*/ + +static int num_descendants (Timer *ptr) +{ + int n; + + ptr->num_desc = ptr->nchildren; + for (n = 0; n < ptr->nchildren; ++n) { + ptr->num_desc += num_descendants (ptr->children[n]); + } + return ptr->num_desc; +} + +/* +** is_descendant: Determine whether node2 is in the descendant list for +** node1 +** +** Input arguments: +** node1: starting node for recursive search +** node2: node to be searched for +** +** Return value: true or false +*/ + +static int is_descendant (const Timer *node1, const Timer *node2) +{ + int n; + + /* Breadth before depth for efficiency */ + + for (n = 0; n < node1->nchildren; ++n) + if (node1->children[n] == node2) + return 1; + + for (n = 0; n < node1->nchildren; ++n) + if (is_descendant (node1->children[n], node2)) + return 1; + + return 0; +} + +/* +** show_descendant: list descendants, breadth first, stopping early +** if a particular node is discovered (e.g. the parent) +** +** Input arguments: +** level: current level in recursion, should be 0 when first called +** node1: starting node for recursive listing +** node2: node defining the early stopping criterion +** +** Return value: true (listed all descendants) or false (stopped early) +*/ + +static int show_descendant (const int level, const Timer *node1, const Timer *node2) +{ + int n; + + /* Breadth before depth for efficiency */ + + for (n = 0; n < node1->nchildren; ++n){ + printf ("node1: %-32s level: %d child: %d label: %-32s\n", node1->name, level, n, node1->children[n]->name); + if (node1->children[n] == node2) + return 1; + } + + for (n = 0; n < node1->nchildren; ++n) + if (show_descendant (level+1, node1->children[n], node2)) + return 1; + + return 0; +} + +/* +** printstats: print a single timer +** +** Input arguments: +** timer: timer for which to print stats +** fp: file descriptor to write to +** t: thread number +** depth: depth to indent timer +** doindent: whether indenting will be done +** tot_overhead: underlying timing routine overhead +*/ + +static void printstats (const Timer *timer, + FILE *fp, + const int t, + const int depth, + const bool doindent, + const double tot_overhead) +{ + int i; /* index */ + int indent; /* index for indenting */ + int extraspace; /* for padding to length of longest name */ + float fusr; /* user time as float */ + float fsys; /* system time as float */ + float usrsys; /* usr + sys */ + float elapse; /* elapsed time */ + float wallmax; /* max wall time */ + float wallmin; /* min wall time */ + float ratio; /* percentage calc */ + + /* Flag regions having multiple parents with a "*" in column 1 */ + + if (doindent) { + if (timer->nparent > 1) + fprintf (fp, "* "); + else + fprintf (fp, " "); + + /* Indent to depth of this timer */ + + for (indent = 0; indent < depth; ++indent) + fprintf (fp, " "); + } + + if (dopr_quotes){ + fprintf (fp, "\"%s\"", timer->name); + } else { + fprintf (fp, "%s", timer->name); + } + + /* Pad to length of longest name */ + + extraspace = max_name_len[t] - strlen (timer->name); + for (i = 0; i < extraspace; ++i) + fprintf (fp, " "); + + /* Pad to max indent level */ + + if (doindent) + for (indent = depth; indent < max_depth[t]; ++indent) + fprintf (fp, " "); + + if (timer->onflg) + fprintf (fp, " y"); + else + fprintf (fp, " -"); + + if (timer->count < PRTHRESH) { + if (timer->nrecurse > 0) + fprintf (fp, "%8lu %6lu ", timer->count, timer->nrecurse); + else + fprintf (fp, "%8lu - ", timer->count); + } else { + if (timer->nrecurse > 0) + fprintf (fp, "%8.1e %6.0e ", (float) timer->count, (float) timer->nrecurse); + else + fprintf (fp, "%8.1e - ", (float) timer->count); + } + + if (cpustats.enabled) { + fusr = timer->cpu.accum_utime / (float) ticks_per_sec; + fsys = timer->cpu.accum_stime / (float) ticks_per_sec; + usrsys = fusr + fsys; + fprintf (fp, "%9.3f %9.3f %9.3f ", fusr, fsys, usrsys); + } + + if (wallstats.enabled) { + elapse = timer->wall.accum; + wallmax = timer->wall.max; + wallmin = timer->wall.min; + fprintf (fp, "%12.6f %12.6f %12.6f ", elapse, wallmax, wallmin); + + if (percent && timers[0]->next) { + ratio = 0.; + if (timers[0]->next->wall.accum > 0.) + ratio = (timer->wall.accum * 100.) / timers[0]->next->wall.accum; + fprintf (fp, " %9.2f ", ratio); + } + + /* + ** Factor of 2 is because there are 2 utr calls per start/stop pair. + */ + + if (overheadstats.enabled) { + fprintf (fp, "%16.6f ", timer->count * 2 * tot_overhead); + } + } + +#ifdef ENABLE_PMPI + if (timer->nbytes == 0.) + fprintf (fp, " - "); + else + fprintf (fp, "%13.3e ", timer->nbytes / timer->count); +#endif + +#ifdef HAVE_PAPI + GPTL_PAPIpr (fp, &timer->aux, t, timer->count, timer->wall.accum); +#endif + + fprintf (fp, "\n"); +} + +/* +** print_multparentinfo: +** +** Input arguments: +** Input/output arguments: +*/ +void print_multparentinfo (FILE *fp, + Timer *ptr) +{ + int n; + + if (ptr->norphan > 0) { + if (ptr->norphan < PRTHRESH) + fprintf (fp, "%8u %-32s\n", ptr->norphan, "ORPHAN"); + else + fprintf (fp, "%8.1e %-32s\n", (float) ptr->norphan, "ORPHAN"); + } + + for (n = 0; n < ptr->nparent; ++n) { + if (ptr->parent_count[n] < PRTHRESH) + fprintf (fp, "%8d %-32s\n", ptr->parent_count[n], ptr->parent[n]->name); + else + fprintf (fp, "%8.1e %-32s\n", (float) ptr->parent_count[n], ptr->parent[n]->name); + } + + if (ptr->count < PRTHRESH) + fprintf (fp, "%8lu %-32s\n\n", ptr->count, ptr->name); + else + fprintf (fp, "%8.1e %-32s\n\n", (float) ptr->count, ptr->name); +} + +/* +** add: add the contents of tin to tout +** +** Input arguments: +** tin: input timer +** Input/output arguments: +** tout: output timer summed into +*/ + +static void add (Timer *tout, + const Timer *tin) +{ + tout->count += tin->count; + + if (wallstats.enabled) { + tout->wall.accum += tin->wall.accum; + + tout->wall.max = MAX (tout->wall.max, tin->wall.max); + tout->wall.min = MIN (tout->wall.min, tin->wall.min); + } + + if (cpustats.enabled) { + tout->cpu.accum_utime += tin->cpu.accum_utime; + tout->cpu.accum_stime += tin->cpu.accum_stime; + } +#ifdef HAVE_PAPI + GPTL_PAPIadd (&tout->aux, &tin->aux); +#endif +} + +/* +** GPTLpr_summary: Gather and print summary stats across +** threads and MPI tasks +** +** Input arguments: +** comm: commuicator (e.g. MPI_COMM_WORLD). If zero, use MPI_COMM_WORLD +*/ + +#ifdef HAVE_MPI +int GPTLpr_summary (MPI_Comm comm) +#else +int GPTLpr_summary (int comm) +#endif +{ + const char *outfile = "timing.summary"; + int ret; + + ret = GPTLpr_summary_file(comm, outfile); + return 0; +} + +#ifdef HAVE_MPI +int GPTLpr_summary_file (MPI_Comm comm, + const char *outfile) +#else +int GPTLpr_summary_file (int comm, + const char *outfile) +#endif +{ + int iam = 0; /* MPI rank: default master */ + int n; /* index */ + int extraspace; /* for padding to length of longest name */ + int totlen; /* length for malloc */ + char *outpath; /* path to output file: outdir/outfile */ + FILE *fp = 0; /* output file */ + + int count; /* number of timers */ + Summarystats *storage; /* storage for data from all timers */ + + int x; /* pointer increment */ + int k; /* counter */ + char *tempname; /* event name workspace */ + int max_name_length; + int len; + float temp; + int ret; /* return code */ + + static const char *thisfunc = "GPTLpr_summary_file"; + +#ifdef HAVE_MPI + int nproc; /* number of procs in MPI communicator */ + + char name[MAX_CHARS+1]; /* timer name requested by master */ + + if (((int) comm) == 0) + comm = MPI_COMM_WORLD; + + if ((ret = MPI_Comm_rank (comm, &iam)) != MPI_SUCCESS) + return GPTLerror ("%s: Bad return from MPI_Comm_rank=%d\n", thisfunc, ret); + + if ((ret = MPI_Comm_size (comm, &nproc)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Comm_size=%d\n", thisfunc, iam, ret); + +#endif + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize() has not been called\n", thisfunc); + + /* + ** Each process gathers stats for its threads. + ** Binary tree used combine results. + ** Master prints results. + */ + + if (iam == 0) { + + /* 2 is for "/" plus null */ + if (outdir) + totlen = strlen (outdir) + strlen (outfile) + 2; + else + totlen = strlen (outfile) + 2; + + outpath = (char *) GPTLallocate (totlen); + + if (outdir) { + strcpy (outpath, outdir); + strcat (outpath, "/"); + strcat (outpath, outfile); + } else { + strcpy (outpath, outfile); + } + + if (print_mode == GPTLprint_append){ + if ( ! (fp = fopen (outpath, "a"))) + fp = stderr; + } + else{ + if ( ! (fp = fopen (outpath, "w"))) + fp = stderr; + } + + free (outpath); + + fprintf (fp, "$Id: gptl.c,v 1.157 2011-03-28 20:55:18 rosinski Exp $\n"); + fprintf (fp, "'count' is cumulative. All other stats are max/min\n"); + fprintf (fp, "'on' indicates whether the timer was active during output, and so stats are lower or upper bounds.\n"); +#ifndef HAVE_MPI + fprintf (fp, "NOTE: GPTL was built WITHOUT MPI: Only task 0 stats will be printed.\n"); + fprintf (fp, "This is even for MPI codes.\n"); +#endif + fprintf (fp, "\n"); + + count = merge_thread_data(); /*merges events from all threads*/ + + if( !( tempname = (char*)malloc((MAX_CHARS + 1) * sizeof(char) ) ) ) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + + /* allocate storage for data for all timers */ + if( !( storage = malloc( sizeof(Summarystats) * count ) ) && count ) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + + if ( (ret = collect_data( iam, comm, &count, &storage) ) != 0 ) + return GPTLerror ("%s: master collect_data failed\n", thisfunc); + + x = 0; /*finds max timer name length*/ + max_name_length = 0; + for( k = 0; k < count; k++ ) { + len = strlen( timerlist[0] + x ); + if( len > max_name_length ) + max_name_length = len; + x += MAX_CHARS + 1; + } + + /* Print heading */ + + fprintf (fp, "name"); + if (dopr_quotes){ + extraspace = (max_name_length+2) - strlen ("name"); + } else { + extraspace = max_name_length - strlen ("name"); + } + for (n = 0; n < extraspace; ++n) + fprintf (fp, " "); + fprintf (fp, " on processes threads count"); + fprintf (fp, " walltotal wallmax (proc thrd ) wallmin (proc thrd )"); + + for (n = 0; n < nevents; ++n) { + fprintf (fp, " %8.8stotal", eventlist[n].str8); + fprintf (fp, " %8.8smax (proc thrd )", eventlist[n].str8); + fprintf (fp, " %8.8smin (proc thrd )", eventlist[n].str8); + } + + fprintf (fp, "\n"); + + x = 0; + for( k = 0; k < count; k++ ) { + + /* Print the results for this timer */ + memset( tempname, 0, (MAX_CHARS + 1) * sizeof(char) ); + memcpy( tempname, timerlist[0] + x, (MAX_CHARS + 1) * sizeof(char) ); + + x += (MAX_CHARS + 1); + if (dopr_quotes){ + fprintf (fp, "\"%s\"", tempname); + } else { + fprintf (fp, "%s", tempname); + } + extraspace = max_name_length - strlen (tempname); + for (n = 0; n < extraspace; ++n) + fprintf (fp, " "); + if (storage[k].onflgs > 0) + fprintf (fp, " y "); + else + fprintf (fp, " - "); + temp = storage[k].count; + fprintf(fp, " %8d %8d %12.6e ", + storage[k].processes, storage[k].threads, temp); + fprintf (fp, " %12.6e %9.3f (%6d %6d) %9.3f (%6d %6d)", + storage[k].walltotal, + storage[k].wallmax, storage[k].wallmax_p, storage[k].wallmax_t, + storage[k].wallmin, storage[k].wallmin_p, storage[k].wallmin_t); +#ifdef HAVE_PAPI + for (n = 0; n < nevents; ++n) { + fprintf (fp, " %12.6e", storage[k].papitotal[n]); + + fprintf (fp, " %9.3e (%6d %6d)", + storage[k].papimax[n], storage[k].papimax_p[n], + storage[k].papimax_t[n]); + + fprintf (fp, " %9.3e (%6d %6d)", + storage[k].papimin[n], storage[k].papimin_p[n], + storage[k].papimin_t[n]); + } +#endif + fprintf (fp, "\n"); + } + + fprintf (fp, "\n"); + free(tempname); + + } + else { /* iam != 0 (slave) */ +#ifdef HAVE_MPI + /* count number of timers from linked list */ + count = merge_thread_data(); + + /*allocate storage for data for all timers */ + if( !( storage = malloc( sizeof(Summarystats) * count ) ) && count ) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + + if ( (ret = collect_data( iam, comm, &count, &storage ) ) != 0 ) + return GPTLerror ("%s: slave collect_data failed\n", thisfunc); +#endif + } + + free(timerlist[0]); + free(timerlist); + free(storage); + if (iam == 0 && fclose (fp) != 0) + fprintf (stderr, "%s: Attempt to close %s failed\n", thisfunc, outfile); + return 0; +} + +/* +** merge_thread_data: returns number of events in merged list +*/ + +static int merge_thread_data() +{ + int n, k, x; /*counters*/ + int t; /*current thread*/ + int num_newtimers; + int compare; + int *count; + int max_count; /* largest number of timers among non-thread-0 threads */ + char **newtimers; + int length = MAX_CHARS + 1; + char ***sort; + int count_r; /* count to be returned, allows *count to be free()ed */ + Timer *ptr; + + static const char *thisfunc = "merge_thread_data"; + + if( nthreads == 1 ) { /* merging is not needed since only 1 thread */ + + /* count timers for thread 0 */ + count_r = 0; + for (ptr = timers[0]->next; ptr; ptr = ptr->next) count_r++; + + timerlist = (char **) GPTLallocate( sizeof (char *)); + if( !( timerlist[0] = (char *)malloc( count_r * length * sizeof (char)) ) && count_r) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + + x = 0; + for (ptr = timers[0]->next; ptr; ptr = ptr->next) { + strcpy((timerlist[0] + x), ptr->name); + x += length; + } + + return count_r; + + } + + timerlist = (char **) GPTLallocate( nthreads * sizeof (char *)); + count = (int *) GPTLallocate( nthreads * sizeof (int)); + sort = (char ***) GPTLallocate( nthreads * sizeof (void *)); + + max_count = 0; + for (t = 0; t < nthreads; t++) { + + /* count timers for thread */ + count[t] = 0; + for (ptr = timers[t]->next; ptr; ptr = ptr->next) count[t]++; + + if( count[t] > max_count || max_count == 0 ) max_count = count[t]; + + if( !( sort[t] = (char **)malloc( count[t] * sizeof (char *)) ) && count[t]) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + + /* allocate memory to hold list of timer names */ + if( !( timerlist[t] = (char *)malloc( length * count[t] * sizeof (char)) ) && count[t]) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + memset( timerlist[t], 0, length * count[t] * sizeof (char) ); + + x = 0; + for (ptr = timers[t]->next; ptr; ptr = ptr->next) { + strcpy((timerlist[t] + x), ptr->name); + x += length; + } + + x = 0; + for (k = 0; k < count[t]; k++) { + sort[t][k] = timerlist[t] + x; + x += length; + } + + qsort( sort[t], count[t], sizeof (char *), cmp ); + + } + + if( !( newtimers = (char **)malloc( max_count * sizeof (char *)) ) && max_count) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + + for (t = 1; t < nthreads; t++) { + memset( newtimers, 0, max_count * sizeof (char *) ); + k = 0; + n = 0; + num_newtimers = 0; + while( k < count[0] && n < count[t] ) { + /* linear comparison of timers */ + compare = strcmp( sort[0][k], sort[t][n] ); + + if( compare == 0 ) { + /* both have, nothing needs to be done */ + k++; + n++; + continue; + } + + if( compare < 0 ) { + /* event that only master has, nothing needs to be done */ + k++; + continue; + } + + if( compare > 0 ) { + /* event that only slave thread has, need to add */ + newtimers[num_newtimers] = sort[t][n]; + n++; + num_newtimers++; + } + } + + while( n < count[t] ) { + /* adds any remaining timers, since we know that all the rest + are new since have checked all master thread timers */ + newtimers[num_newtimers] = sort[t][n]; + num_newtimers++; + n++; + } + + if( num_newtimers ) { + /* sorts by memory address to restore original order */ + qsort( newtimers, num_newtimers, sizeof(char*), ncmp ); + + /* reallocate memory to hold additional timers */ + if( !( sort[0] = realloc( sort[0], (count[0] + num_newtimers) * sizeof (char *)) ) ) + return GPTLerror ("%s: memory reallocation failed\n", thisfunc); + if( !(timerlist[0] = realloc(timerlist[0], length * (count[0] + num_newtimers) * sizeof (char)) ) ) + return GPTLerror ("%s: memory reallocation failed\n", thisfunc); + + k = count[0]; + for (n = 0; n < num_newtimers; n++) { + /* add new found timers */ + memcpy( timerlist[0] + (count[0] + n) * length, newtimers[n], length * sizeof (char) ); + } + + count[0] += num_newtimers; + + /* reassign pointers in sort since realloc will have broken them if it moved the memory. */ + x = 0; + for (k = 0; k < count[0]; k++) { + sort[0][k] = timerlist[0] + x; + x += length; + } + + qsort( sort[0], count[0], sizeof (char *), cmp ); + } + } + + free(newtimers); + free(sort[0]); + /* don't free timerlist[0], since needed for subsequent steps in gathering global statistics */ + for (t = 1; t < nthreads; t++) { + free(sort[t]); + free(timerlist[t]); + } + + free(sort); + count_r = count[0]; + free(count); + + return count_r; +} + +/* +** collect data: compute global stats using tree reduction algorithm +** returns pointer to new summarystats list +** +** Input arguments: +** iam: process id +** comm: MPI communicator +** Input/Output arguments: +** summarystats: max/min/etc stats over all processes and threads +** count: number of events +** timerlist: list of all timer names (global variable) +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +#ifdef HAVE_MPI +static int collect_data(const int iam, + MPI_Comm comm, + int *count, + Summarystats **summarystats_cumul ) +#else +static int collect_data(const int iam, + int comm, + int *count, + Summarystats **summarystats_cumul ) +#endif +{ + int step; /* spacing beween active processes */ + int mstep; /* spacing between active masters */ + int procid; /* process to communicate with */ + int ret; + int nproc; + int signal = 1; + int x, k, n; /* counters */ + char *tempname; + int s = (MAX_CHARS + 1 ); /* spacing between timer names */ + int length = MAX_CHARS + 1; + int compare; + int num_newtimers; + int count_slave; + char *timers_slave; /* slave timerlist */ + char **newtimers; + char **sort_slave; /* slave sorted list */ + char **sort_master; /* master sorted list */ + int m_index, s_index; + Summarystats *summarystats; /* stats collected on master */ + + static const char *thisfunc = "collect_data"; + +#ifdef HAVE_MPI + Summarystats *summarystats_slave; /* stats sent to master */ + const int taga = 99; + const int tagb = 100; + const int tagc = 101; + MPI_Status status; + MPI_Request rcvreq1; + MPI_Request rcvreq2; + MPI_Request rcvreq3; + + if ((ret = MPI_Comm_size (comm, &nproc)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Comm_size=%d\n", thisfunc, iam, ret); + +#endif + + summarystats = *summarystats_cumul; + + if (!( tempname = (char*)malloc((MAX_CHARS +1) * sizeof(char) ) )) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + + x = 0; + for (k = 0; k < *count; k++) { + memcpy( tempname, timerlist[0] + x, (MAX_CHARS + 1) * sizeof (char) ); + /* calculate individual stats */ + get_threadstats( iam, tempname, &summarystats[k]); + x += (MAX_CHARS + 1); + } + +#ifdef HAVE_MPI + step = 1; + mstep = 2; + while( step < nproc ) { + + if ((iam % mstep) == 0) { + /* find new masters at the current level, which are at every n*step starting with 0 */ + + procid = iam + step; + if (procid < nproc) { + /* prevent lone master wanting data from nonexistent process problem */ + + /* prepare for receive */ + if ((ret = MPI_Irecv (&count_slave, 1, MPI_INTEGER, procid, taga, comm, &rcvreq2)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Irecv=%d\n", thisfunc, iam, ret); + + /* handshake with slave */ + if ((ret = MPI_Send (&signal, 1, MPI_INTEGER, procid, taga, comm)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); + + /* wait for message from slave */ + if ((ret = MPI_Wait (&rcvreq2, MPI_STATUS_IGNORE)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Wait=%d\n", thisfunc, iam, ret); + + if (count_slave != 0) { /* if slave had no events, then nothing needs to be done*/ + + if (!(sort_master = (char **) malloc( (*count) * sizeof (char *) ) ) && (*count)) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + if (!(newtimers = (char **) malloc( count_slave * sizeof (char *) ) )) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + if (!(sort_slave = (char **) malloc( count_slave * sizeof (char *) ) )) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + if (!(summarystats_slave = (Summarystats *) malloc( count_slave * sizeof (Summarystats) ) )) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + if (!(timers_slave = (char *) malloc( count_slave * (MAX_CHARS + 1) * sizeof (char) ) )) + return GPTLerror ("%s: memory allocation failed\n", thisfunc); + + if ((ret = MPI_Irecv (timers_slave, count_slave * (MAX_CHARS + 1), MPI_CHAR, procid, tagb, comm, &rcvreq3)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Irecv=%d\n", thisfunc, iam, ret); + if ((ret = MPI_Irecv (summarystats_slave, count_slave * sizeof(Summarystats), MPI_BYTE, procid, tagc, comm, &rcvreq1)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Irecv=%d\n", thisfunc, iam, ret); + if ((ret = MPI_Send (&signal, 1, MPI_INT, procid, tagb, comm)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); + if ((ret = MPI_Wait (&rcvreq1, MPI_STATUS_IGNORE)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Wait=%d\n", thisfunc, iam, ret); + if ((ret = MPI_Wait (&rcvreq3, MPI_STATUS_IGNORE)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Wait=%d\n", thisfunc, iam, ret); + + x = 0; + for (k = 0; k < count_slave; k++) { + sort_slave[k] = timers_slave + x; + x += MAX_CHARS + 1; + } + x = 0; + for (k = 0; k < *count; k++) { + sort_master[k] = timerlist[0] + x; + x += MAX_CHARS + 1; + } + + qsort(sort_master, *count, sizeof(char*), cmp); + qsort(sort_slave, count_slave, sizeof(char*), cmp); + + num_newtimers = 0; + n = 0; + k = 0; + while (k < *count && n < count_slave) + { + compare = strcmp(sort_master[k], sort_slave[n]); + + if (compare == 0) { + /* matching timers found */ + + /* find element number of the name in original timerlist so that it can be matched with its summarystats */ + m_index = get_index( timerlist[0], sort_master[k] ); + + s_index = get_index( timers_slave, sort_slave[n] ); + get_summarystats (&summarystats[m_index], &summarystats_slave[s_index]); + k++; + n++; + continue; + } + + if (compare > 0) { + /* s1 >s2 . slave has event; master does not */ + newtimers[num_newtimers] = sort_slave[n]; + num_newtimers++; + n++; + continue; + } + + if (compare < 0) /* only master has event; nothing needs to be done */ + k++; + } + + while (n < count_slave) { + /* add all remaining timers which only the slave has */ + newtimers[num_newtimers] = sort_slave[n]; + num_newtimers++; + n++; + } + + /* sort by memory address to get original order */ + qsort (newtimers, num_newtimers, sizeof(char*), ncmp); + + /* reallocate to hold new timer names and summary stats from slave */ + if (!(timerlist[0] = realloc( timerlist[0], length * (*count + num_newtimers) * sizeof (char) ) )) + return GPTLerror ("%s: memory reallocation failed\n", thisfunc); + if (!(summarystats = realloc( summarystats, (*count + count_slave ) * sizeof (Summarystats) ) )) + return GPTLerror ("%s: memory reallocation failed\n", thisfunc); + + k = *count; + x = *count * (MAX_CHARS + 1); + for (n = 0; n < num_newtimers; n++) { + /* copy new timers names and new timer data */ + memcpy(timerlist[0] + x, newtimers[n], length * sizeof (char)); + s_index = get_index( timers_slave, newtimers[n] ); + memcpy(&summarystats[k], &summarystats_slave[s_index], sizeof (Summarystats)); + k++; + x += MAX_CHARS + 1; + } + *count += num_newtimers; + + free(timers_slave); + free(summarystats_slave); + free(newtimers); + free(sort_slave); + free(sort_master); + } + + } + + } + else if ( (iam % step) == 0 ) { + /* non masters send data */ + + procid = iam - step; + + /* wait for ready signal from master */ + if ((ret = MPI_Recv (&signal, 1, MPI_INTEGER, procid, taga, comm, MPI_STATUS_IGNORE)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Recv=%d\n", thisfunc, iam, ret); + + if ((ret = MPI_Send (count, 1, MPI_INTEGER, procid, taga, comm)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); + + if ( count != 0) { + if ((ret = MPI_Recv (&signal, 1, MPI_INTEGER, procid, tagb, comm, MPI_STATUS_IGNORE)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Recv=%d\n", thisfunc, iam, ret); + if ((ret = MPI_Send (timerlist[0], (*count) * (MAX_CHARS + 1), MPI_CHAR, procid, tagb, comm)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); + if ((ret = MPI_Send (summarystats, (*count) * sizeof(Summarystats), MPI_BYTE, procid, tagc, comm)) != MPI_SUCCESS) + return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); + } + free(tempname); + *summarystats_cumul = summarystats; + return 0; + + } + + step = mstep; + mstep = 2 * mstep; + + } + +#endif + + free(tempname); + *summarystats_cumul = summarystats; + return 0; +} + +/* +** get_index: calculates the index number of an element in a list +** based on the start memory address and memory address of the element +** where each element is MAX_CHARS+1 long +** +** Input arguments: +** list: start address of list +** element: start address of element +** +** Return value: index of element in list +*/ + +int get_index( const char * list, + const char * element ) +{ + return (( element - list ) / ( MAX_CHARS + 1 )); +} + + +/* +** cmp: returns value from strcmp. for use with qsort +*/ + +static int cmp(const void *pa, const void *pb) +{ + const char** x = (const char**)pa; + const char** y = (const char**)pb; + return strcmp(*x, *y); +} + + +/* +** ncmp: compares values of memory adresses pointed to by a pointer. for use with qsort +*/ + +static int ncmp( const void *pa, const void *pb ) +{ + static const char *thisfunc = "GPTLsetoption"; + const char** x = (const char**)pa; + const char** y = (const char**)pb; + + if( *x > *y ) + return 1; + if( *x < *y ) + return -1; + if( *x == *y ) + GPTLerror("%s: shared memory address between timers\n", thisfunc); +} + +/* +** get_threadstats: gather stats for timer "name" over all threads +** +** Input arguments: +** iam: MPI process id +** name: timer name +** Output arguments: +** summarystats: max/min stats over all threads +*/ + +void get_threadstats (const int iam, + const char *name, + Summarystats *summarystats) +{ +#ifdef HAVE_PAPI + int n; /* event index */ +#endif + int t; /* thread index */ + unsigned int indx; /* returned from getentry() */ + Timer *ptr; /* timer */ + + /* + ** This memset fortuitiously initializes the process values (_p) to master (0) + */ + + memset (summarystats, 0, sizeof (Summarystats)); + + summarystats->wallmax_p = iam; + summarystats->wallmin_p = iam; + + for (t = 0; t < nthreads; ++t) { + if ((ptr = getentry (hashtable[t], name, &indx))) { + + if (ptr->onflg) + summarystats->onflgs++; + + if (ptr->count > 0) { + summarystats->threads++; + summarystats->walltotal += ptr->wall.accum; + } + summarystats->count += ptr->count; + + if (ptr->wall.accum > summarystats->wallmax) { + summarystats->wallmax = ptr->wall.accum; + summarystats->wallmax_t = t; + } + + if (ptr->wall.accum < summarystats->wallmin || summarystats->wallmin == 0.) { + summarystats->wallmin = ptr->wall.accum; + summarystats->wallmin_t = t; + } +#ifdef HAVE_PAPI + for (n = 0; n < nevents; ++n) { + double value; + if (GPTL_PAPIget_eventvalue (eventlist[n].namestr, &ptr->aux, &value) != 0) { + fprintf (stderr, "Bad return from GPTL_PAPIget_eventvalue\n"); + return; + } + summarystats->papimax_p[n] = iam; + summarystats->papimin_p[n] = iam; + + if (value > summarystats->papimax[n]) { + summarystats->papimax[n] = value; + summarystats->papimax_t[n] = t; + } + + if (value < summarystats->papimin[n] || summarystats->papimin[n] == 0.) { + summarystats->papimin[n] = value; + summarystats->papimin_t[n] = t; + } + summarystats->papitotal[n] += value; + } +#endif + } + } + if ( summarystats->count ) summarystats->processes = 1; +} + +/* +** get_summarystats: write max/min stats into mpistats based on comparison +** with summarystats_slave +** +** Input arguments: +** summarystats_slave: stats from a slave process +** Input/Output arguments: +** summarystats: stats (starts out as master stats) +*/ + +void get_summarystats (Summarystats *summarystats, + const Summarystats *summarystats_slave) +{ + if (summarystats_slave->count == 0) return; + + if (summarystats_slave->wallmax > summarystats->wallmax) { + summarystats->wallmax = summarystats_slave->wallmax; + summarystats->wallmax_p = summarystats_slave->wallmax_p; + summarystats->wallmax_t = summarystats_slave->wallmax_t; + } + + if ((summarystats_slave->wallmin < summarystats->wallmin) || + (summarystats->count == 0)){ + summarystats->wallmin = summarystats_slave->wallmin; + summarystats->wallmin_p = summarystats_slave->wallmin_p; + summarystats->wallmin_t = summarystats_slave->wallmin_t; + } + +#ifdef HAVE_PAPI + { + int n; + for (n = 0; n < nevents; ++n) { + if (summarystats_slave->papimax[n] > summarystats->papimax[n]) { + summarystats->papimax[n] = summarystats_slave->papimax[n]; + summarystats->papimax_p[n] = summarystats_slave->papimax_p[n]; + summarystats->papimax_t[n] = summarystats_slave->papimax_t[n]; + } + + if ((summarystats_slave->papimin[n] < summarystats->papimin[n]) || + (summarystats->count == 0)){ + summarystats->papimin[n] = summarystats_slave->papimin[n]; + summarystats->papimin_p[n] = summarystats_slave->papimin_p[n]; + summarystats->papimin_t[n] = summarystats_slave->papimin_t[n]; + } + summarystats->papitotal[n] += summarystats_slave->papitotal[n]; + } + } +#endif + + summarystats->onflgs += summarystats_slave->onflgs; + summarystats->count += summarystats_slave->count; + summarystats->walltotal += summarystats_slave->walltotal; + summarystats->processes += summarystats_slave->processes; + summarystats->threads += summarystats_slave->threads; +} + +/* +** GPTLbarrier: When MPI enabled, set and time an MPI barrier +** +** Input arguments: +** comm: commuicator (e.g. MPI_COMM_WORLD). If zero, use MPI_COMM_WORLD +** name: region name +** +** Return value: 0 (success) +*/ + +#ifdef HAVE_MPI +int GPTLbarrier (MPI_Comm comm, const char *name) +#else +int GPTLbarrier (int comm, const char *name) +#endif +{ + int ret; + static const char *thisfunc = "GPTLbarrier"; + + ret = GPTLstart (name); +#ifdef HAVE_MPI + if ((ret = MPI_Barrier (comm)) != MPI_SUCCESS) + return GPTLerror ("%s: Bad return from MPI_Barrier=%d", thisfunc, ret); +#endif + ret = GPTLstop (name); + return 0; +} + +/* +** get_cpustamp: Invoke the proper system timer and return stats. +** +** Output arguments: +** usr: user time +** sys: system time +** +** Return value: 0 (success) +*/ + +static inline int get_cpustamp (long *usr, long *sys) +{ +#ifdef HAVE_TIMES + struct tms buf; + + (void) times (&buf); + *usr = buf.tms_utime; + *sys = buf.tms_stime; + return 0; +#else + return GPTLerror ("get_cpustamp: times() not available\n"); +#endif +} + +/* +** GPTLquery: return current status info about a timer. If certain stats are not +** enabled, they should just have zeros in them. If PAPI is not enabled, input +** counter info is ignored. +** +** Input args: +** timername: timer name +** maxcounters: max number of PAPI counters to get info for +** t: thread number (if < 0, the request is for the current thread) +** +** Output args: +** count: number of times this timer was called +** onflg: whether timer is currently on +** wallclock: accumulated wallclock time +** usr: accumulated user CPU time +** sys: accumulated system CPU time +** papicounters_out: accumulated PAPI counters +*/ + +int GPTLquery (const char *timername, + int t, + int *count, + int *onflg, + double *wallclock, + double *dusr, + double *dsys, + long long *papicounters_out, + const int maxcounters) +{ + Timer *ptr; /* linked list pointer */ + int numchars; /* number of characters to copy */ + int namelen; /* number of characters in timer name */ + unsigned int indx; /* linked list index returned from getentry (unused) */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLquery"; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); + + /* + ** If t is < 0, assume the request is for the current thread + */ + + if (t < 0) { + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: get_thread_num failure\n", thisfunc); + } else { + if (t >= maxthreads) + return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + namelen = strlen(timername); + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + name = timername; + } + + ptr = getentry (hashtable[t], name, &indx); + if ( !ptr) + return GPTLerror ("%s: requested timer %s does not have a name hash\n", thisfunc, name); + + *onflg = ptr->onflg; + *count = ptr->count; + *wallclock = ptr->wall.accum; + *dusr = ptr->cpu.accum_utime / (double) ticks_per_sec; + *dsys = ptr->cpu.accum_stime / (double) ticks_per_sec; +#ifdef HAVE_PAPI + GPTL_PAPIquery (&ptr->aux, papicounters_out, maxcounters); +#endif + return 0; +} + +/* +** GPTLquerycounters: return current PAPI counters for a timer. +** THIS ROUTINE ID DEPRECATED. USE GPTLget_eventvalue() instead +** +** Input args: +** timername: timer name +** t: thread number (if < 0, the request is for the current thread) +** +** Output args: +** papicounters_out: accumulated PAPI counters +*/ + +int GPTLquerycounters (const char *timername, + int t, + long long *papicounters_out) +{ + Timer *ptr; /* linked list pointer */ + unsigned int indx; /* hash index returned from getentry */ + int numchars; /* number of characters to copy */ + int namelen; /* number of characters in timer name */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLquery_counters"; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); + + /* + ** If t is < 0, assume the request is for the current thread + */ + + if (t < 0) { + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: get_thread_num failure\n", thisfunc); + } else { + if (t >= maxthreads) + return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + namelen = strlen(timername); + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + name = timername; + } + + ptr = getentry (hashtable[t], name, &indx); + if ( !ptr) + return GPTLerror ("%s: requested timer %s does not have a name hash\n", thisfunc, name); + +#ifdef HAVE_PAPI + /* The 999 is a hack to say "give me all the counters" */ + GPTL_PAPIquery (&ptr->aux, papicounters_out, 999); +#endif + return 0; +} + +/* +** GPTLget_wallclock: return wallclock accumulation for a timer. +** +** Input args: +** timername: timer name +** t: thread number (if < 0, the request is for the current thread) +** +** Output args: +** value: current wallclock accumulation for the timer +*/ + +int GPTLget_wallclock (const char *timername, + int t, + double *value) +{ + void *self; /* timer address when hash entry generated with *_instr */ + Timer *ptr; /* linked list pointer */ + unsigned int indx; /* hash index returned from getentry (unused) */ + int numchars; /* number of characters to copy */ + int namelen; /* number of characters in timer name */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLget_wallclock"; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); + + if ( ! wallstats.enabled) + return GPTLerror ("%s: wallstats not enabled\n", thisfunc); + + /* + ** If t is < 0, assume the request is for the current thread + */ + + if (t < 0) { + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + } else { + if (t >= maxthreads) + return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + namelen = strlen(timername); + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + name = timername; + } + + /* + ** Don't know whether hashtable entry for timername was generated with + ** *_instr() or not, so try both possibilities + */ + + ptr = getentry (hashtable[t], name, &indx); + if ( !ptr) { + if (sscanf (timername, "%lx", (unsigned long *) &self) < 1) + return GPTLerror ("%s: requested timer %s does not exist\n", thisfunc, timername); + ptr = getentry_instr (hashtable[t], self, &indx); + if ( !ptr) + return GPTLerror ("%s: requested timer %s does not exist\n", thisfunc, timername); + } + + *value = ptr->wall.accum; + return 0; +} + +/* +** GPTLstartstop_vals: create/add walltime and call count to an event timer +** +** Input arguments: +** timername: timer name +** add_time: value to add to the walltime accumulator +** add_count: value to add to the call counter +** +** Return value: 0 (success) or -1 (failure) +*/ + +int GPTLstartstop_vals (const char *timername, /* timer name */ + double add_time, /* walltime increment */ + int add_count) /* call count increment */ +{ + Timer *ptr; /* linked list pointer */ + int t; /* thread number for this process */ + int numchars; /* number of characters to copy */ + int namelen; /* number of characters in timer name */ + unsigned int indx; /* index into hash table */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLstartstop_vals"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + if ( ! wallstats.enabled) + return GPTLerror ("%s: wallstats must be enabled to call this function\n", thisfunc); + + if (add_time < 0.) + return GPTLerror ("%s: Input add_time must not be negative\n", thisfunc); + + /* getentry requires the thread number */ + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + namelen = strlen(timername); + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + name = timername; + } + + /* Find out if the timer already exists */ + ptr = getentry (hashtable[t], name, &indx); + + if (ptr) { + /* + ** The timer already exists. If add_count is > 0, then increment the + ** count and update the time stamp. Then let control jump to the point where + ** wallclock settings are adjusted. + */ + if (add_count > 0){ + ptr->count += add_count; + ptr->wall.last = (*ptr2wtimefunc) (); + } + } else { + /* Need to call start/stop to set up linked list and hash table. */ + if (GPTLstart (timername) != 0) + return GPTLerror ("%s: Error from GPTLstart\n", thisfunc); + + if (GPTLstop (timername) != 0) + return GPTLerror ("%s: Error from GPTLstop\n", thisfunc); + + /* start/stop pair just called should guarantee ptr will be found */ + if ( ! (ptr = getentry (hashtable[t], name, &indx))) + return GPTLerror ("%s: Unexpected error from getentry\n", thisfunc); + + /* + ** If add_count >= 0, then set count to desired value. + ** Otherwise, assume add_count == 0 and set count to 0. + */ + if (add_count >= 0){ + ptr->count = add_count; + } else { + ptr->count = 0; + } + + /* Since this is the first call, set max and min to user input. */ + ptr->wall.max = add_time; + + ptr->wall.prev_min = FLT_MAX; + ptr->wall.min = add_time; + ptr->wall.latest_is_min = 1; + + /* + ** Minor mod: Subtract the overhead of the above start/stop call, before + ** adding user input + */ + ptr->wall.accum -= ptr->wall.latest; + + /* Then set latest to zero, so that update below is correct */ + ptr->wall.latest = 0.0; + + } + + /* Update accum with user input */ + ptr->wall.accum += add_time; + + /* + ** Update latest with user input: + ** If add_count > 0 and old count > 0 (new count > add_count), + ** assume new event time is the average (add_time/add_count). + ** If add_count > 0 and old count = 0 (new count == add_count), + ** assume new event time is the augmented average + ** ((latest value + add_time)/add_count). + ** If add_count == 0, new event time is latest value + add_time. + */ + if (add_count > 0){ + if (ptr->count > add_count) + ptr->wall.latest = add_time/add_count; + else + ptr->wall.latest = (ptr->wall.latest+add_time)/add_count; + } else { + ptr->wall.latest += add_time; + } + + /* Update max with user input */ + if (ptr->wall.latest > ptr->wall.max) + ptr->wall.max = ptr->wall.latest; + + /* Update min with user input */ + if ((ptr->count <= 1) || (add_count == ptr->count)) { + /* + ** still recording walltime for first occurrence, + ** so assign latest estimate to min and prev_min + */ + ptr->wall.min = ptr->wall.latest; + ptr->wall.latest_is_min = 1; + } else { + if (add_count > 0){ + /* check whether latest is the new min */ + if (ptr->wall.latest < ptr->wall.min){ + ptr->wall.prev_min = ptr->wall.min; + ptr->wall.min = ptr->wall.latest; + ptr->wall.latest_is_min = 1; + } else { + ptr->wall.latest_is_min = 0; + } + } else { + /* + ** still recording walltime for latest occurrence, + ** so check whether updated latest is the new min. + */ + if (ptr->wall.latest_is_min == 1){ + if (ptr->wall.prev_min > ptr->wall.latest){ + ptr->wall.min = ptr->wall.latest; + } else { + ptr->wall.min = ptr->wall.prev_min; + ptr->wall.latest_is_min = 0; + } + } + } + } + + return 0; +} + +/* +** GPTLstartstop_valsf: create/add walltime and call count to an event timer. +** Version for when timer name may not be null terminated. +** +** Input arguments: +** timername: timer name +** namelen: number of characters in timer name +** add_time: value to add to the walltime accumulator +** add_count: value to add to the call counter +** +** Return value: 0 (success) or -1 (failure) +*/ + +int GPTLstartstop_valsf (const char *timername, /* timer name */ + const int namelen, /* timer name length */ + double add_time, /* walltime increment */ + int add_count) /* call count increment */ +{ + Timer *ptr; /* linked list pointer */ + int t; /* thread number for this process */ + int numchars; /* number of characters to copy */ + unsigned int indx; /* index into hash table */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLstartstop_valsf"; + + if (disabled) + return 0; + + if ( ! initialized) + return 0; + + if ( ! wallstats.enabled) + return GPTLerror ("%s: wallstats must be enabled to call this function\n", thisfunc); + + if (add_time < 0.) + return GPTLerror ("%s: Input add_time must not be negative\n", thisfunc); + + /* getentry requires the thread number */ + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + numchars = MIN (namelen, MAX_CHARS); + name = timername; + } + + /* Find out if the timer already exists */ + ptr = getentryf (hashtable[t], name, numchars, &indx); + + if (ptr) { + /* + ** The timer already exists. If add_count is > 0, then increment the + ** count and update the time stamp. Then let control jump to the point where + ** wallclock settings are adjusted. + */ + if (add_count > 0){ + ptr->count += add_count; + ptr->wall.last = (*ptr2wtimefunc) (); + } + } else { + /* Need to call start/stop to set up linked list and hash table. */ + if (GPTLstartf (timername, namelen) != 0) + return GPTLerror ("%s: Error from GPTLstart\n", thisfunc); + + if (GPTLstopf (timername, namelen) != 0) + return GPTLerror ("%s: Error from GPTLstop\n", thisfunc); + + /* start/stop pair just called should guarantee ptr will be found */ + if ( ! (ptr = getentryf (hashtable[t], name, numchars, &indx))) + return GPTLerror ("%s: Unexpected error from getentry\n", thisfunc); + + /* + ** If add_count >= 0, then set count to desired value. + ** Otherwise, assume add_count == 0 and set count to 0. + */ + if (add_count >= 0){ + ptr->count = add_count; + } else { + ptr->count = 0; + } + + /* Since this is the first call, set max and min to user input. */ + ptr->wall.max = add_time; + + ptr->wall.prev_min = FLT_MAX; + ptr->wall.min = add_time; + ptr->wall.latest_is_min = 1; + + /* + ** Minor mod: Subtract the overhead of the above start/stop call, before + ** adding user input + */ + ptr->wall.accum -= ptr->wall.latest; + + /* Then set latest to zero, so that update below is correct */ + ptr->wall.latest = 0.0; + + } + + /* Update accum with user input */ + ptr->wall.accum += add_time; + + /* + ** Update latest with user input: + ** If add_count > 0 and old count > 0 (new count > add_count), + ** assume new event time is the average (add_time/add_count). + ** If add_count > 0 and old count = 0 (new count == add_count), + ** assume new event time is the augmented average + ** ((latest value + add_time)/add_count). + ** If add_count == 0, new event time is latest value + add_time. + */ + if (add_count > 0){ + if (ptr->count > add_count) + ptr->wall.latest = add_time/add_count; + else + ptr->wall.latest = (ptr->wall.latest+add_time)/add_count; + } else { + ptr->wall.latest += add_time; + } + + /* Update max with user input */ + if (ptr->wall.latest > ptr->wall.max) + ptr->wall.max = ptr->wall.latest; + + /* Update min with user input */ + if ((ptr->count <= 1) || (add_count == ptr->count)) { + /* + ** still recording walltime for first occurrence, + ** so assign latest estimate to min and prev_min + */ + ptr->wall.min = ptr->wall.latest; + ptr->wall.latest_is_min = 1; + } else { + if (add_count > 0){ + /* check whether latest is the new min */ + if (ptr->wall.latest < ptr->wall.min){ + ptr->wall.prev_min = ptr->wall.min; + ptr->wall.min = ptr->wall.latest; + ptr->wall.latest_is_min = 1; + } else { + ptr->wall.latest_is_min = 0; + } + } else { + /* + ** still recording walltime for latest occurrence, + ** so check whether updated latest is the new min. + */ + if (ptr->wall.latest_is_min == 1){ + if (ptr->wall.prev_min > ptr->wall.latest){ + ptr->wall.min = ptr->wall.latest; + } else { + ptr->wall.min = ptr->wall.prev_min; + ptr->wall.latest_is_min = 0; + } + } + } + } + + return 0; +} + +/* +** GPTLget_eventvalue: return PAPI-based event value for a timer. All values will be +** returned as doubles, even if the event is not derived. +** +** Input args: +** timername: timer name +** eventname: event name (must be currently enabled) +** t: thread number (if < 0, the request is for the current thread) +** +** Output args: +** value: current value of the event for this timer +*/ + +int GPTLget_eventvalue (const char *timername, + const char *eventname, + int t, + double *value) +{ + void *self; /* timer address when hash entry generated with *_instr */ + Timer *ptr; /* linked list pointer */ + int numchars; /* number of characters to copy */ + int namelen; /* number of characters in timer name */ + unsigned int indx; /* hash index returned from getentry (unused) */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + const char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLget_eventvalue"; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); + + /* + ** If t is < 0, assume the request is for the current thread + */ + + if (t < 0) { + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: get_thread_num failure\n", thisfunc); + } else { + if (t >= maxthreads) + return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + namelen = strlen(timername); + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + name = timername; + } + + /* + ** Don't know whether hashtable entry for timername was generated with + ** *_instr() or not, so try both possibilities + */ + + ptr = getentry (hashtable[t], name, &indx); + if ( !ptr) { + if (sscanf (timername, "%lx", (unsigned long *) &self) < 1) + return GPTLerror ("%s: requested timer %s does not exist\n", thisfunc, timername); + ptr = getentry_instr (hashtable[t], self, &indx); + if ( !ptr) + return GPTLerror ("%s: requested timer %s does not exist\n", thisfunc, timername); + } + +#ifdef HAVE_PAPI + return GPTL_PAPIget_eventvalue (eventname, &ptr->aux, value); +#else + return GPTLerror ("%s: PAPI not enabled\n", thisfunc); +#endif +} + +/* +** GPTLget_nregions: return number of regions (i.e. timer names) for this thread +** +** Input args: +** t: thread number (if < 0, the request is for the current thread) +** +** Output args: +** nregions: number of regions +*/ + +int GPTLget_nregions (int t, + int *nregions) +{ + Timer *ptr; /* walk through linked list */ + static const char *thisfunc = "GPTLget_nregions"; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); + + /* + ** If t is < 0, assume the request is for the current thread + */ + + if (t < 0) { + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: get_thread_num failure\n", thisfunc); + } else { + if (t >= maxthreads) + return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); + } + + *nregions = 0; + for (ptr = timers[t]->next; ptr; ptr = ptr->next) + ++*nregions; + + return 0; +} + +/* +** GPTLget_regionname: return region name for this thread +** +** Input args: +** t: thread number (if < 0, the request is for the current thread) +** region: region number +** nc: max number of chars to put in name +** +** Output args: +** name region name +*/ + +int GPTLget_regionname (int t, /* thread number */ + int region, /* region number (0-based) */ + char *name, /* output region name */ + int nc) /* number of chars in name (free form Fortran) */ +{ + int ncpy; /* number of characters to copy */ + int i; /* index */ + Timer *ptr; /* walk through linked list */ + static const char *thisfunc = "GPTLget_regionname"; + + if ( ! initialized) + return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); + + /* + ** If t is < 0, assume the request is for the current thread + */ + + if (t < 0) { + if ((t = get_thread_num ()) < 0) + return GPTLerror ("%s: get_thread_num failure\n", thisfunc); + } else { + if (t >= maxthreads) + return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); + } + + ptr = timers[t]->next; + for (i = 0; i < region; i++) { + if ( ! ptr) + return GPTLerror ("%s: timer number %d does not exist in thread %d\n", thisfunc, region, t); + ptr = ptr->next; + } + + if (ptr) { + ncpy = MIN (nc, strlen (ptr->name)); + strncpy (name, ptr->name, ncpy); + + /* + ** Adding the \0 is only important when called from C + */ + + if (ncpy < nc) + name[ncpy] = '\0'; + } else { + return GPTLerror ("%s: timer number %d does not exist in thread %d\n", thisfunc, region, t); + } + return 0; +} + +/* +** GPTLis_initialized: Return whether GPTL has been initialized +*/ + +int GPTLis_initialized (void) +{ + return (int) initialized; +} + +/* +** getentry_instr: find hash table entry and return a pointer to it +** +** Input args: +** hashtable: the hashtable (array) +** self: input address (from -finstrument-functions) +** Output args: +** indx: hashtable index +** +** Return value: pointer to the entry, or NULL if not found +*/ + +static inline Timer *getentry_instr (const Hashentry *hashtable, /* hash table */ + void *self, /* address */ + unsigned int *indx) /* hash index */ +{ + int i; + Timer *ptr = 0; /* return value when entry not found */ + + /* + ** Hash index is timer address modulo the table size + ** On most machines, right-shifting the address helps because linkers often + ** align functions on even boundaries + */ + + *indx = (((unsigned long) self) >> 4) % tablesize; + for (i = 0; i < hashtable[*indx].nument; ++i) { + if (hashtable[*indx].entries[i]->address == self) { + ptr = hashtable[*indx].entries[i]; + break; + } + } + return ptr; +} + +/* +** getentry: find the entry in the hash table and return a pointer to it. +** +** Input args: +** hashtable: the hashtable (array) +** name: string to be hashed on (specifically, summed) +** Output args: +** indx: hashtable index +** +** Return value: pointer to the entry, or NULL if not found +*/ + +static inline Timer *getentry (const Hashentry *hashtable, /* hash table */ + const char *name, /* name to hash */ + unsigned int *indx) /* hash index */ +{ + int i; /* multiplier for hashing; loop index */ + const unsigned char *c; /* pointer to elements of "name" */ + Timer *ptr = 0; /* return value when entry not found */ + + /* + ** Hash value is sum of: chars times their 1-based position index, modulo tablesize + */ + + *indx = 0; + c = (unsigned char *) name; + for (i = 1; *c && i < MAX_CHARS+1; ++c, ++i) { + *indx += (*c) * i; + } + + *indx %= tablesize; + + /* + ** If nument exceeds 1 there was a hash collision and we must search + ** linearly through an array for a match + */ + + for (i = 0; i < hashtable[*indx].nument; i++) { + if (STRMATCH (name, hashtable[*indx].entries[i]->name)) { + ptr = hashtable[*indx].entries[i]; + break; + } + } + return ptr; +} + +/* +** getentryf: find the entry in the hash table and return a pointer to it. +** (variant of getentry where string length is included because string +** may not be null terminated) +** +** Input args: +** hashtable: the hashtable (array) +** name: string to be hashed on (specifically, summed) +** namelen: number of characters in string +** Output args: +** indx: hashtable index +** +** Return value: pointer to the entry, or NULL if not found +*/ + +static inline Timer *getentryf (const Hashentry *hashtable, /* hash table */ + const char *name, /* name to hash */ + const int namelen, /* length of name */ + unsigned int *indx) /* hash index */ +{ + int i; /* multiplier for hashing; loop index */ + int numchars; /* maximum number of characters to examine */ + const unsigned char *c; /* pointer to elements of "name" */ + Timer *ptr = 0; /* return value when entry not found */ + + numchars = MIN (namelen, MAX_CHARS); + + /* + ** Hash value is sum of: chars times their 1-based position index, modulo tablesize + */ + + *indx = 0; + c = (unsigned char *) name; + for (i = 1; i < numchars+1; ++c, ++i) { + *indx += (*c) * i; + } + + *indx %= tablesize; + + /* + ** If nument exceeds 1 there was a hash collision and we must search + ** linearly through an array for a match + */ + + for (i = 0; i < hashtable[*indx].nument; i++) { + if (STRNMATCH (name, hashtable[*indx].entries[i]->name,numchars)) { + ptr = hashtable[*indx].entries[i]; + break; + } + } + return ptr; +} + +/* +** Add entry points for auto-instrumented codes +** Auto instrumentation flags for various compilers: +** +** gcc, pathcc, icc: -finstrument-functions +** pgcc: -Minstrument:functions +** xlc: -qdebug=function_trace +*/ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef AUTO_INST +#ifdef _AIX +void __func_trace_enter (const char *function_name, + const char *file_name, + int line_number, + void **const user_data) +{ + (void) GPTLstart (function_name); +} + +void __func_trace_exit (const char *function_name, + const char *file_name, + int line_number, + void **const user_data) +{ + (void) GPTLstop (function_name); +} + +#else + +void __cyg_profile_func_enter (void *this_fn, + void *call_site) +{ + (void) GPTLstart_instr (this_fn); +} + +void __cyg_profile_func_exit (void *this_fn, + void *call_site) +{ + (void) GPTLstop_instr (this_fn); +} +#endif +#endif + +#ifdef __cplusplus +}; +#endif + +#ifdef HAVE_NANOTIME +#ifdef BIT64 +/* 64-bit code copied from PAPI library */ +static inline unsigned long long nanotime (void) +{ + unsigned long long val; + do { + unsigned int a,d; + asm volatile("rdtsc" : "=a" (a), "=d" (d)); + (val) = ((unsigned long)a) | (((unsigned long)d)<<32); + } while(0); + + return (val); +} +#else +static inline unsigned long long nanotime (void) +{ + unsigned long long val; + __asm__ __volatile__("rdtsc" : "=A" (val) : ); + return (val); +} +#endif + +#define LEN 4096 + +static float get_clockfreq () +{ + FILE *fd = 0; + char buf[LEN]; + int is; + + if ( ! (fd = fopen ("/proc/cpuinfo", "r"))) { + fprintf (stderr, "get_clockfreq: can't open /proc/cpuinfo\n"); + return -1.; + } + + while (fgets (buf, LEN, fd)) { + if (strncmp (buf, "cpu MHz", 7) == 0) { + for (is = 7; buf[is] != '\0' && !isdigit (buf[is]); is++); + if (isdigit (buf[is])) + return (float) atof (&buf[is]); + } + } + + return -1.; +} +#endif + +/* +** The following are the set of underlying timing routines which may or may +** not be available. And their accompanying init routines. +** NANOTIME is currently only available on x86. +*/ + +static int init_nanotime () +{ + static const char *thisfunc = "init_nanotime"; +#ifdef HAVE_NANOTIME + if ((cpumhz = get_clockfreq ()) < 0) + return GPTLerror ("%s: Can't get clock freq\n", thisfunc); + + if (verbose) + printf ("%s: Clock rate = %f MHz\n", thisfunc, cpumhz); + + cyc2sec = 1./(cpumhz * 1.e6); + return 0; +#else + return GPTLerror ("%s: not enabled\n", thisfunc); +#endif +} + +static inline double utr_nanotime () +{ +#ifdef HAVE_NANOTIME + double timestamp; + timestamp = nanotime () * cyc2sec; + return timestamp; +#else + static const char *thisfunc = "utr_nanotime"; + (void) GPTLerror ("%s: not enabled\n", thisfunc); + return -1.; +#endif +} + +/* +** MPI_Wtime requires the MPI lib. +*/ + +static int init_mpiwtime () +{ +#ifdef HAVE_MPI + return 0; +#else + static const char *thisfunc = "init_mpiwtime"; + return GPTLerror ("%s: not enabled\n", thisfunc); +#endif +} + +static inline double utr_mpiwtime () +{ +#ifdef HAVE_MPI + return MPI_Wtime (); +#else + static const char *thisfunc = "utr_mpiwtime"; + (void) GPTLerror ("%s: not enabled\n", thisfunc); + return -1.; +#endif +} + +/* +** PAPI_get_real_usec requires the PAPI lib. +*/ + +static int init_papitime () +{ + static const char *thisfunc = "init_papitime"; +#ifdef HAVE_PAPI + ref_papitime = PAPI_get_real_usec (); + if (verbose) + printf ("%s: ref_papitime=%ld\n", thisfunc, (long) ref_papitime); + return 0; +#else + return GPTLerror ("%s: not enabled\n", thisfunc); +#endif +} + +static inline double utr_papitime () +{ +#ifdef HAVE_PAPI + return (PAPI_get_real_usec () - ref_papitime) * 1.e-6; +#else + static const char *thisfunc = "utr_papitime"; + (void) GPTLerror ("%s: not enabled\n", thisfunc); + return -1.; +#endif +} + +/* +** Probably need to link with -lrt for this one to work +*/ + +static int init_clock_gettime () +{ + static const char *thisfunc = "init_clock_gettime"; +#ifdef HAVE_LIBRT + struct timespec tp; + (void) clock_gettime (CLOCK_REALTIME, &tp); + ref_clock_gettime = tp.tv_sec; + if (verbose) + printf ("%s: ref_clock_gettime=%ld\n", thisfunc, (long) ref_clock_gettime); + return 0; +#else + return GPTLerror ("%s: not enabled\n", thisfunc); +#endif +} + +static inline double utr_clock_gettime () +{ +#ifdef HAVE_LIBRT + struct timespec tp; + (void) clock_gettime (CLOCK_REALTIME, &tp); + return (tp.tv_sec - ref_clock_gettime) + 1.e-9*tp.tv_nsec; +#else + static const char *thisfunc = "utr_clock_gettime"; + (void) GPTLerror ("%s: not enabled\n", thisfunc); + return -1.; +#endif +} + +/* +** High-res timer on AIX: read_real_time +*/ + +static int init_read_real_time () +{ + static const char *thisfunc = "init_read_real_time"; +#ifdef _AIX + timebasestruct_t ibmtime; + (void) read_real_time (&ibmtime, TIMEBASE_SZ); + (void) time_base_to_time (&ibmtime, TIMEBASE_SZ); + ref_read_real_time = ibmtime.tb_high; + if (verbose) + printf ("%s: ref_read_real_time=%ld\n", thisfunc, (long) ref_read_real_time); + return 0; +#else + return GPTLerror ("%s: not enabled\n", thisfunc); +#endif +} + +static inline double utr_read_real_time () +{ +#ifdef _AIX + timebasestruct_t ibmtime; + (void) read_real_time (&ibmtime, TIMEBASE_SZ); + (void) time_base_to_time (&ibmtime, TIMEBASE_SZ); + return (ibmtime.tb_high - ref_read_real_time) + 1.e-9*ibmtime.tb_low; +#else + static const char *thisfunc = "utr_read_real_time"; + return GPTLerror ("%s: not enabled\n", thisfunc); +#endif +} + +/* +** Default available most places: gettimeofday +*/ + +static int init_gettimeofday () +{ + static const char *thisfunc = "init_gettimeofday"; +#ifdef HAVE_GETTIMEOFDAY + struct timeval tp; + (void) gettimeofday (&tp, 0); + ref_gettimeofday = tp.tv_sec; + if (verbose) + printf ("%s: ref_gettimeofday=%ld\n", thisfunc, (long) ref_gettimeofday); + return 0; +#else + return GPTLerror ("%s: not enabled\n", thisfunc); +#endif +} + +static inline double utr_gettimeofday () +{ +#ifdef HAVE_GETTIMEOFDAY + struct timeval tp; + (void) gettimeofday (&tp, 0); + return (tp.tv_sec - ref_gettimeofday) + 1.e-6*tp.tv_usec; +#else + static const char *thisfunc = "utr_gettimeofday"; + return GPTLerror ("%s: not enabled\n", thisfunc); +#endif +} + +/* +** Determine underlying timing routine overhead: call it 1000 times. +*/ + +static double utr_getoverhead () +{ + double val2[1001]; + int i; + + val2[0] = (*ptr2wtimefunc)(); + for (i = 1; i < 1001; ++i) { + val2[i] = (*ptr2wtimefunc)(); + } + return 0.001 * (val2[1000] - val2[0]); +} + +/* +** printself_andchildren: Recurse through call tree, printing stats for self, then children +*/ + +static void printself_andchildren (const Timer *ptr, + FILE *fp, + const int t, + const int depth, + const double tot_overhead) +{ + int n; + + if (depth > -1) /* -1 flag is to avoid printing stats for dummy outer timer */ + printstats (ptr, fp, t, depth, true, tot_overhead); + + for (n = 0; n < ptr->nchildren; n++) + printself_andchildren (ptr->children[n], fp, t, depth+1, tot_overhead); +} + +#ifdef ENABLE_PMPI +/* +** GPTLgetentry: called ONLY from pmpi.c (i.e. not a public entry point). Returns a pointer to the +** requested timer name by calling internal function getentry() +** +** Return value: 0 (NULL) or the return value of getentry() +*/ + +Timer *GPTLgetentry (const char *timername) +{ + int t; /* thread number */ + int numchars; /* number of characters to copy */ + int namelen; /* number of characters in timer name */ + unsigned int indx; /* returned from getentry (unused) */ + char new_name[MAX_CHARS+1]; /* timer name with prefix, if there is one */ + char *name; /* pointer to timer name */ + static const char *thisfunc = "GPTLgetentry"; + + if ( ! initialized) { + (void) GPTLerror ("%s: initialization was not completed\n", thisfunc); + return 0; + } + + if ((t = get_thread_num ()) < 0) { + (void) GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); + return 0; + } + + /* + ** If prefix string is defined, prepend it to timername + ** and assign the name pointer to the new string. + ** Otherwise assign the name pointer to the original string. + */ + + if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ + namelen = strlen(timername); + numchars = add_prefix(new_name, timername, namelen, t); + name = new_name; + } else { + name = timername; + } + + return (getentry (hashtable[t], name, &indx)); +} + +/* +** GPTLpr_file_has_been_called: Called ONLY from pmpi.c (i.e. not a public entry point). Return +** whether GPTLpr_file has been called. MPI_Finalize wrapper needs +** to know whether it needs to call GPTLpr. +*/ + +int GPTLpr_has_been_called (void) +{ + return (int) pr_has_been_called; +} + +#endif + +/*************************************************************************************/ + +/* +** Contents of inserted threadutil.c starts here. +** Moved to gptl.c to enable inlining +*/ + +/* +** $Id: gptl.c,v 1.157 2011-03-28 20:55:18 rosinski Exp $ +** +** Author: Jim Rosinski +** +** Utility functions handle thread-based GPTL needs. +*/ + +/* Max allowable number of threads (used only when THREADED_PTHREADS is true) */ +#define MAX_THREADS 128 + +/**********************************************************************************/ +/* +** 3 sets of routines: OMP threading, PTHREADS, unthreaded +*/ + +#if ( defined THREADED_OMP ) + +/* +** threadinit: Allocate and initialize threadid_omp; set max number of threads +** +** Output results: +** maxthreads: max number of threads +** +** threadid_omp[] is allocated and initialized to -1 +** +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +static int threadinit (void) +{ + int t; /* loop index */ + static const char *thisfunc = "threadinit"; + + if (omp_get_thread_num () != 0) + return GPTLerror ("OMP %s: MUST only be called by the master thread\n", thisfunc); + + /* + ** Allocate the threadid array which maps physical thread IDs to logical IDs + ** For OpenMP this will be just threadid_omp[iam] = iam; + */ + + if (threadid_omp) + return GPTLerror ("OMP %s: has already been called.\nMaybe mistakenly called by multiple threads?", + thisfunc); + + /* + ** maxthreads may have been set by the user, in which case use that. But if as + ** yet uninitialized, set to the current value of OMP_NUM_THREADS. + */ + if (maxthreads == -1) + maxthreads = MAX ((1), (omp_get_max_threads ())); + + if ( ! (threadid_omp = (int *) GPTLallocate (maxthreads * sizeof (int)))) + return GPTLerror ("OMP %s: malloc failure for %d elements of threadid_omp\n", thisfunc, maxthreads); + + /* + ** Initialize threadid array to flag values for use by get_thread_num(). + ** get_thread_num() will fill in the values on first use. + */ + + for (t = 0; t < maxthreads; ++t) + threadid_omp[t] = -1; + +#ifdef VERBOSE + printf ("OMP %s: Set maxthreads=%d\n", thisfunc, maxthreads); +#endif + + return 0; +} + +/* +** Threadfinalize: clean up +** +** Output results: +** threadid_omp array is freed and array pointer nullified +*/ + +static void threadfinalize () +{ + free ((void *) threadid_omp); + threadid_omp = 0; +} + +/* +** get_thread_num: Determine thread number of the calling thread +** Start PAPI counters if enabled and first call for this thread. +** +** Output results: +** nthreads: Number of threads (=maxthreads) +** threadid_omp: Our thread id added to list on 1st call +** +** Return value: thread number (success) or GPTLerror (failure) +*/ + +static inline int get_thread_num (void) +{ + int t; /* thread number */ + static const char *thisfunc = "get_thread_num"; + + if ((t = omp_get_thread_num ()) >= maxthreads) + return GPTLerror ("OMP %s: returned id=%d exceeds maxthreads=%d\n", thisfunc, t, maxthreads); + + /* + ** If our thread number has already been set in the list, we are done + */ + + if (t == threadid_omp[t]) + return t; + + /* + ** Thread id not found. Modify threadid_omp with our ID, then start PAPI events if required. + ** Due to the setting of threadid_omp, everything below here will only execute once per thread. + */ + + threadid_omp[t] = t; + +#ifdef VERBOSE + printf ("OMP %s: 1st call t=%d\n", thisfunc, t); +#endif + +#ifdef HAVE_PAPI + + /* + ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, + ** create and start an event set for the new thread. + */ + + if (GPTLget_npapievents () > 0) { +#ifdef VERBOSE + printf ("OMP %s: Starting EventSet t=%d\n", thisfunc, t); +#endif + + if (GPTLcreate_and_start_events (t) < 0) + return GPTLerror ("OMP %s: error from GPTLcreate_and_start_events for thread %d\n", thisfunc, t); + } +#endif + + /* + ** nthreads = maxthreads based on setting in threadinit + */ + + nthreads = maxthreads; +#ifdef VERBOSE + printf ("OMP %s: nthreads=%d\n", thisfunc, nthreads); +#endif + + return t; +} + +static void print_threadmapping (FILE *fp) +{ + int n; + + fprintf (fp, "\n"); + fprintf (fp, "Thread mapping:\n"); + for (n = 0; n < nthreads; ++n) + fprintf (fp, "threadid_omp[%d] = %d\n", n, threadid_omp[n]); +} + +/* +** serial_region: determine whether in a serial or parallel region +** +** Return value: true (1) or false (0) +*/ + +static int serial_region () +{ + + /* + ** This test is more robust than 'omp_in_parallel', which is true + ** in a parallel region when only one thread is active, which may + ** not be thread 0. Other active thread teams also will not be + ** recognized. + */ + if ( (omp_get_num_threads()==1 ) && ( omp_get_level()==0 ) ){ + return 1; + } else { + return 0; + } + +} + +/**********************************************************************************/ +/* +** PTHREADS +*/ + +#elif ( defined THREADED_PTHREADS ) + +/* +** threadinit: Allocate threadid and initialize to -1; set max number of threads; +** Initialize the mutex for later use; Initialize nthreads to 0 +** +** Output results: +** nthreads: number of threads (init to zero here, increment later in get_thread_num) +** maxthreads: max number of threads (MAX_THREADS) +** +** threadid[] is allocated and initialized to -1 +** mutex is initialized for future use +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +static int threadinit (void) +{ + int t; /* thread number */ + int ret; /* return code */ + static const char *thisfunc = "threadinit"; + + /* + ** The following test is not rock-solid, but it's pretty close in terms of guaranteeing that + ** threadinit gets called by only 1 thread. Problem is, mutex hasn't yet been initialized + ** so we can't use it. + */ + + if (nthreads == -1) + nthreads = 0; + else + return GPTLerror ("PTHREADS %s: has already been called.\n" + "Maybe mistakenly called by multiple threads?\n", thisfunc); + + /* + ** Initialize the mutex required for critical regions. + ** Previously, t_mutex = PTHREAD_MUTEX_INITIALIZER on the static declaration line was + ** adequate to initialize the mutex. But this failed in programs that invoked + ** GPTLfinalize() followed by GPTLinitialize(). + ** "man pthread_mutex_init" indicates that passing NULL as the second argument to + ** pthread_mutex_init() should appropriately initialize the mutex, assuming it was + ** properly destroyed by a previous call to pthread_mutex_destroy(); + */ + +#ifdef MUTEX_API + if ((ret = pthread_mutex_init ((pthread_mutex_t *) &t_mutex, NULL)) != 0) + return GPTLerror ("PTHREADS %s: mutex init failure: ret=%d\n", thisfunc, ret); +#endif + + /* + ** Allocate the threadid array which maps physical thread IDs to logical IDs + */ + + if (threadid) + return GPTLerror ("PTHREADS %s: threadid not null\n", thisfunc); + else if ( ! (threadid = (pthread_t *) GPTLallocate (MAX_THREADS * sizeof (pthread_t)))) + return GPTLerror ("PTHREADS %s: malloc failure for %d elements of threadid\n", thisfunc, MAX_THREADS); + + maxthreads = MAX_THREADS; + + /* + ** Initialize threadid array to flag values for use by get_thread_num(). + ** get_thread_num() will fill in the values on first use. + */ + + for (t = 0; t < maxthreads; ++t) + threadid[t] = (pthread_t) -1; + +#ifdef VERBOSE + printf ("PTHREADS %s: Set maxthreads=%d nthreads=%d\n", thisfunc, maxthreads, nthreads); +#endif + + return 0; +} + +/* +** threadfinalize: Clean up +** +** Output results: +** threadid array is freed and array pointer nullified +** mutex is destroyed +*/ + +static void threadfinalize () +{ + int ret; + +#ifdef MUTEX_API + if ((ret = pthread_mutex_destroy ((pthread_mutex_t *) &t_mutex)) != 0) + printf ("threadfinalize: failed attempt to destroy t_mutex: ret=%d\n", ret); +#endif + free ((void *) threadid); + threadid = 0; +} + +/* +** get_thread_num: Determine zero-based thread number of the calling thread. +** Update nthreads and maxthreads if necessary. +** Start PAPI counters if enabled and first call for this thread. +** +** Output results: +** nthreads: Updated number of threads +** threadid: Our thread id added to list on 1st call +** +** Return value: thread number (success) or GPTLerror (failure) +*/ + +static inline int get_thread_num (void) +{ + int t; /* logical thread number, defined by array index of found threadid */ + pthread_t mythreadid; /* thread id from pthreads library */ + int retval; /* value to return to caller */ + bool foundit = false; /* thread id found in list */ + static const char *thisfunc = "get_thread_num"; + + mythreadid = pthread_self (); + + /* + ** If our thread number has already been set in the list, we are done + ** VECTOR code should run a bit faster on vector machines. + */ +#define VECTOR +#ifdef VECTOR + for (t = 0; t < nthreads; ++t) + if (pthread_equal (mythreadid, threadid[t])) { + foundit = true; + retval = t; + } + + if (foundit) + return retval; +#else + for (t = 0; t < nthreads; ++t) + if (pthread_equal (mythreadid, threadid[t])) + return t; +#endif + + /* + ** Thread id not found. Define a critical region, then start PAPI counters if + ** necessary and modify threadid[] with our id. + */ + + if (lock_mutex () < 0) + return GPTLerror ("PTHREADS %s: mutex lock failure\n", thisfunc); + + /* + ** If our thread id is not in the known list, add to it after checking that + ** we do not have too many threads. + */ + + if (nthreads >= MAX_THREADS) { + if (unlock_mutex () < 0) + fprintf (stderr, "PTHREADS %s: mutex unlock failure\n", thisfunc); + + return GPTLerror ("PTHREADS %s: nthreads=%d is too big. Recompile " + "with larger value of MAX_THREADS\n", thisfunc, nthreads); + } + + threadid[nthreads] = mythreadid; + +#ifdef VERBOSE + printf ("PTHREADS %s: 1st call threadid=%lu maps to location %d\n", + thisfunc, (unsigned long) mythreadid, nthreads); +#endif + +#ifdef HAVE_PAPI + + /* + ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, + ** create and start an event set for the new thread. + */ + + if (GPTLget_npapievents () > 0) { +#ifdef VERBOSE + printf ("PTHREADS get_thread_num: Starting EventSet threadid=%lu location=%d\n", + (unsigned long) mythreadid, nthreads); +#endif + if (GPTLcreate_and_start_events (nthreads) < 0) { + if (unlock_mutex () < 0) + fprintf (stderr, "PTHREADS %s: mutex unlock failure\n", thisfunc); + + return GPTLerror ("PTHREADS %s: error from GPTLcreate_and_start_events for thread %d\n", + thisfunc, nthreads); + } + } +#endif + + /* + ** IMPORTANT to set return value before unlocking the mutex!!!! + ** "return nthreads-1" fails occasionally when another thread modifies + ** nthreads after it gets the mutex! + */ + + retval = nthreads++; + +#ifdef VERBOSE + printf ("PTHREADS get_thread_num: nthreads bumped to %d\n", nthreads); +#endif + + if (unlock_mutex () < 0) + return GPTLerror ("PTHREADS %s: mutex unlock failure\n", thisfunc); + + return retval; +} + +/* +** lock_mutex: lock a mutex for private access +*/ + +static int lock_mutex () +{ + static const char *thisfunc = "lock_mutex"; + + if (pthread_mutex_lock ((pthread_mutex_t *) &t_mutex) != 0) + return GPTLerror ("%s: failure from pthread_lock_mutex\n", thisfunc); + + return 0; +} + +/* +** unlock_mutex: unlock a mutex from private access +*/ + +static int unlock_mutex () +{ + static const char *thisfunc = "unlock_mutex"; + + if (pthread_mutex_unlock ((pthread_mutex_t *) &t_mutex) != 0) + return GPTLerror ("%s: failure from pthread_unlock_mutex\n", thisfunc); + return 0; +} + +static void print_threadmapping (FILE *fp) +{ + int t; + + fprintf (fp, "\n"); + fprintf (fp, "Thread mapping:\n"); + for (t = 0; t < nthreads; ++t) + fprintf (fp, "threadid[%d] = %lu\n", t, (unsigned long) threadid[t]); +} + +/* +** serial_region: determine whether in a serial or parallel region +** +** Not currently implemented (or even defined) when using PTHREADS/ +** It is an error if this is ever called. +** +** Return value: true (1) or false (0) +*/ + +static int serial_region () +{ + static const char *thisfunc = "serial_region"; + + return GPTLerror ("%s: not supported for THREADED_PTHREADS\n", thisfunc); + +} + +/**********************************************************************************/ +/* +** Unthreaded case +*/ + +#else + +static int threadinit (void) +{ + static const char *thisfunc = "threadinit"; + + if (nthreads != -1) + return GPTLerror ("Unthreaded %s: MUST only be called once", thisfunc); + + nthreads = 0; + maxthreads = 1; + return 0; +} + +void threadfinalize () +{ + threadid = -1; +} + +static inline int get_thread_num () +{ + static const char *thisfunc = "get_thread_num"; +#ifdef HAVE_PAPI + /* + ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, + ** create and start an event set for the new thread. + */ + + if (threadid == -1 && GPTLget_npapievents () > 0) { + if (GPTLcreate_and_start_events (0) < 0) + return GPTLerror ("Unthreaded %s: error from GPTLcreate_and_start_events for thread %0\n", thisfunc); + + threadid = 0; + } +#endif + + nthreads = 1; + return 0; +} + +static void print_threadmapping (FILE *fp) +{ + fprintf (fp, "\n"); + fprintf (fp, "threadid[0] = 0\n"); +} + +/* +** serial_region: determine whether in a serial or parallel region +** +** Return value: true (1) or false (0) +*/ + +static int serial_region () +{ + return 1; +} + +#endif diff --git a/src/share/timing/gptl.h b/CIME/non_py/src/timing/gptl.h similarity index 100% rename from src/share/timing/gptl.h rename to CIME/non_py/src/timing/gptl.h diff --git a/src/share/timing/gptl.inc b/CIME/non_py/src/timing/gptl.inc similarity index 100% rename from src/share/timing/gptl.inc rename to CIME/non_py/src/timing/gptl.inc diff --git a/CIME/non_py/src/timing/gptl_papi.c b/CIME/non_py/src/timing/gptl_papi.c new file mode 100644 index 00000000000..1f701cb8976 --- /dev/null +++ b/CIME/non_py/src/timing/gptl_papi.c @@ -0,0 +1,1325 @@ +/* +** $Id: gptl_papi.c,v 1.79 2011-03-28 20:55:19 rosinski Exp $ +** +** Author: Jim Rosinski +** +** Contains routines which interface to PAPI library +*/ + +#include "private.h" +#include "gptl.h" + +#ifdef HAVE_PAPI + +#include +#include +#include +#include + +#if ( defined THREADED_OMP ) +#include +#elif ( defined THREADED_PTHREADS ) +#include +#endif + +/* Mapping of PAPI counters to short and long printed strings */ + +static const Entry papitable [] = { + {PAPI_L1_DCM, "PAPI_L1_DCM", "L1_DCM ", "L1_Dcache_miss ", "Level 1 data cache misses"}, + {PAPI_L1_ICM, "PAPI_L1_ICM", "L1_ICM ", "L1_Icache_miss ", "Level 1 instruction cache misses"}, + {PAPI_L2_DCM, "PAPI_L2_DCM", "L2_DCM ", "L2_Dcache_miss ", "Level 2 data cache misses"}, + {PAPI_L2_ICM, "PAPI_L2_ICM", "L2_ICM ", "L2_Icache_miss ", "Level 2 instruction cache misses"}, + {PAPI_L3_DCM, "PAPI_L3_DCM", "L3_DCM ", "L3_Dcache_miss ", "Level 3 data cache misses"}, + {PAPI_L3_ICM, "PAPI_L3_ICM", "L3_ICM ", "L3_Icache_miss ", "Level 3 instruction cache misses"}, + {PAPI_L1_TCM, "PAPI_L1_TCM", "L1_TCM ", "L1_cache_miss ", "Level 1 total cache misses"}, + {PAPI_L2_TCM, "PAPI_L2_TCM", "L2_TCM ", "L2_cache_miss ", "Level 2 total cache misses"}, + {PAPI_L3_TCM, "PAPI_L3_TCM", "L3_TCM ", "L3_cache_miss ", "Level 3 total cache misses"}, + {PAPI_CA_SNP, "PAPI_CA_SNP", "CA_SNP ", "Snoops ", "Snoops "}, + {PAPI_CA_SHR, "PAPI_CA_SHR", "CA_SHR ", "PAPI_CA_SHR ", "Request for shared cache line (SMP)"}, + {PAPI_CA_CLN, "PAPI_CA_CLN", "CA_CLN ", "PAPI_CA_CLN ", "Request for clean cache line (SMP)"}, + {PAPI_CA_INV, "PAPI_CA_INV", "CA_INV ", "PAPI_CA_INV ", "Request for cache line Invalidation (SMP)"}, + {PAPI_CA_ITV, "PAPI_CA_ITV", "CA_ITV ", "PAPI_CA_ITV ", "Request for cache line Intervention (SMP)"}, + {PAPI_L3_LDM, "PAPI_L3_LDM", "L3_LDM ", "L3_load_misses ", "Level 3 load misses"}, + {PAPI_L3_STM, "PAPI_L3_STM", "L3_STM ", "L3_store_misses ", "Level 3 store misses"}, + {PAPI_BRU_IDL,"PAPI_BRU_IDL","BRU_IDL ", "PAPI_BRU_IDL ", "Cycles branch units are idle"}, + {PAPI_FXU_IDL,"PAPI_FXU_IDL","FXU_IDL ", "PAPI_FXU_IDL ", "Cycles integer units are idle"}, + {PAPI_FPU_IDL,"PAPI_FPU_IDL","FPU_IDL ", "PAPI_FPU_IDL ", "Cycles floating point units are idle"}, + {PAPI_LSU_IDL,"PAPI_LSU_IDL","LSU_IDL ", "PAPI_LSU_IDL ", "Cycles load/store units are idle"}, + {PAPI_TLB_DM, "PAPI_TLB_DM" "TLB_DM ", "Data_TLB_misses ", "Data translation lookaside buffer misses"}, + {PAPI_TLB_IM, "PAPI_TLB_IM", "TLB_IM ", "Inst_TLB_misses ", "Instr translation lookaside buffer misses"}, + {PAPI_TLB_TL, "PAPI_TLB_TL", "TLB_TL ", "Tot_TLB_misses ", "Total translation lookaside buffer misses"}, + {PAPI_L1_LDM, "PAPI_L1_LDM", "L1_LDM ", "L1_load_misses ", "Level 1 load misses"}, + {PAPI_L1_STM, "PAPI_L1_STM", "L1_STM ", "L1_store_misses ", "Level 1 store misses"}, + {PAPI_L2_LDM, "PAPI_L2_LDM", "L2_LDM ", "L2_load_misses ", "Level 2 load misses"}, + {PAPI_L2_STM, "PAPI_L2_STM", "L2_STM ", "L2_store_misses ", "Level 2 store misses"}, + {PAPI_BTAC_M, "PAPI_BTAC_M", "BTAC_M ", "BTAC_miss ", "BTAC miss"}, + {PAPI_PRF_DM, "PAPI_PRF_DM", "PRF_DM ", "PAPI_PRF_DM ", "Prefetch data instruction caused a miss"}, + {PAPI_L3_DCH, "PAPI_L3_DCH", "L3_DCH ", "L3_DCache_Hit ", "Level 3 Data Cache Hit"}, + {PAPI_TLB_SD, "PAPI_TLB_SD", "TLB_SD ", "PAPI_TLB_SD ", "Xlation lookaside buffer shootdowns (SMP)"}, + {PAPI_CSR_FAL,"PAPI_CSR_FAL","CSR_FAL ", "PAPI_CSR_FAL ", "Failed store conditional instructions"}, + {PAPI_CSR_SUC,"PAPI_CSR_SUC","CSR_SUC ", "PAPI_CSR_SUC ", "Successful store conditional instructions"}, + {PAPI_CSR_TOT,"PAPI_CSR_TOT","CSR_TOT ", "PAPI_CSR_TOT ", "Total store conditional instructions"}, + {PAPI_MEM_SCY,"PAPI_MEM_SCY","MEM_SCY ", "Cyc_Stalled_Mem ", "Cycles Stalled Waiting for Memory Access"}, + {PAPI_MEM_RCY,"PAPI_MEM_RCY","MEM_RCY ", "Cyc_Stalled_MemR", "Cycles Stalled Waiting for Memory Read"}, + {PAPI_MEM_WCY,"PAPI_MEM_WCY","MEM_WCY ", "Cyc_Stalled_MemW", "Cycles Stalled Waiting for Memory Write"}, + {PAPI_STL_ICY,"PAPI_STL_ICY","STL_ICY ", "Cyc_no_InstrIss ", "Cycles with No Instruction Issue"}, + {PAPI_FUL_ICY,"PAPI_FUL_ICY","FUL_ICY ", "Cyc_Max_InstrIss", "Cycles with Maximum Instruction Issue"}, + {PAPI_STL_CCY,"PAPI_STL_CCY","STL_CCY ", "Cyc_No_InstrComp", "Cycles with No Instruction Completion"}, + {PAPI_FUL_CCY,"PAPI_FUL_CCY","FUL_CCY ", "Cyc_Max_InstComp", "Cycles with Maximum Instruction Completion"}, + {PAPI_HW_INT, "PAPI_HW_INT", "HW_INT ", "HW_interrupts ", "Hardware interrupts"}, + {PAPI_BR_UCN, "PAPI_BR_UCN", "BR_UCN ", "Uncond_br_instr ", "Unconditional branch instructions executed"}, + {PAPI_BR_CN, "PAPI_BR_CN", "BR_CN ", "Cond_br_instr_ex", "Conditional branch instructions executed"}, + {PAPI_BR_TKN, "PAPI_BR_TKN", "BR_TKN ", "Cond_br_instr_tk", "Conditional branch instructions taken"}, + {PAPI_BR_NTK, "PAPI_BR_NTK", "BR_NTK ", "Cond_br_instrNtk", "Conditional branch instructions not taken"}, + {PAPI_BR_MSP, "PAPI_BR_MSP", "BR_MSP ", "Cond_br_instrMPR", "Conditional branch instructions mispred"}, + {PAPI_BR_PRC, "PAPI_BR_PRC", "BR_PRC ", "Cond_br_instrCPR", "Conditional branch instructions corr. pred"}, + {PAPI_FMA_INS,"PAPI_FMA_INS","FMA_INS ", "FMA_instr_comp ", "FMA instructions completed"}, + {PAPI_TOT_IIS,"PAPI_TOT_IIS","TOT_IIS ", "Total_instr_iss ", "Total instructions issued"}, + {PAPI_TOT_INS,"PAPI_TOT_INS","TOT_INS ", "Total_instr_ex ", "Total instructions executed"}, + {PAPI_INT_INS,"PAPI_INT_INS","INT_INS ", "Int_instr_ex ", "Integer instructions executed"}, + {PAPI_FP_INS, "PAPI_FP_INS", "FP_INS ", "FP_instr_ex ", "Floating point instructions executed"}, + {PAPI_LD_INS, "PAPI_LD_INS", "LD_INS ", "Load_instr_ex ", "Load instructions executed"}, + {PAPI_SR_INS, "PAPI_SR_INS", "SR_INS ", "Store_instr_ex ", "Store instructions executed"}, + {PAPI_BR_INS, "PAPI_BR_INS", "BR_INS ", "br_instr_ex ", "Total branch instructions executed"}, + {PAPI_VEC_INS,"PAPI_VEC_INS","VEC_INS ", "Vec/SIMD_instrEx", "Vector/SIMD instructions executed"}, + {PAPI_RES_STL,"PAPI_RES_STL","RES_STL ", "Cyc_proc_stalled", "Cycles processor is stalled on resource"}, + {PAPI_FP_STAL,"PAPI_FP_STAL","FP_STAL ", "Cyc_any_FP_stall", "Cycles any FP units are stalled"}, + {PAPI_TOT_CYC,"PAPI_TOT_CYC","TOT_CYC ", "Total_cycles ", "Total cycles"}, + {PAPI_LST_INS,"PAPI_LST_INS","LST_INS ", "Tot_L/S_inst_ex ", "Total load/store inst. executed"}, + {PAPI_SYC_INS,"PAPI_SYC_INS","SYC_INS ", "Sync._inst._ex ", "Sync. inst. executed"}, + {PAPI_L1_DCH, "PAPI_L1_DCH", "L1_DCH ", "L1_D_Cache_Hit ", "L1 D Cache Hit"}, + {PAPI_L2_DCH, "PAPI_L2_DCH", "L2_DCH ", "L2_D_Cache_Hit ", "L2 D Cache Hit"}, + {PAPI_L1_DCA, "PAPI_L1_DCA", "L1_DCA ", "L1_D_Cache_Acc ", "L1 D Cache Access"}, + {PAPI_L2_DCA, "PAPI_L2_DCA", "L2_DCA ", "L2_D_Cache_Acc ", "L2 D Cache Access"}, + {PAPI_L3_DCA, "PAPI_L3_DCA", "L3_DCA ", "L3_D_Cache_Acc ", "L3 D Cache Access"}, + {PAPI_L1_DCR, "PAPI_L1_DCR", "L1_DCR ", "L1_D_Cache_Read ", "L1 D Cache Read"}, + {PAPI_L2_DCR, "PAPI_L2_DCR", "L2_DCR ", "L2_D_Cache_Read ", "L2 D Cache Read"}, + {PAPI_L3_DCR, "PAPI_L3_DCR", "L3_DCR ", "L3_D_Cache_Read ", "L3 D Cache Read"}, + {PAPI_L1_DCW, "PAPI_L1_DCW", "L1_DCW ", "L1_D_Cache_Write", "L1 D Cache Write"}, + {PAPI_L2_DCW, "PAPI_L2_DCW", "L2_DCW ", "L2_D_Cache_Write", "L2 D Cache Write"}, + {PAPI_L3_DCW, "PAPI_L3_DCW", "L3_DCW ", "L3_D_Cache_Write", "L3 D Cache Write"}, + {PAPI_L1_ICH, "PAPI_L1_ICH", "L1_ICH ", "L1_I_cache_hits ", "L1 instruction cache hits"}, + {PAPI_L2_ICH, "PAPI_L2_ICH", "L2_ICH ", "L2_I_cache_hits ", "L2 instruction cache hits"}, + {PAPI_L3_ICH, "PAPI_L3_ICH", "L3_ICH ", "L3_I_cache_hits ", "L3 instruction cache hits"}, + {PAPI_L1_ICA, "PAPI_L1_ICA", "L1_ICA ", "L1_I_cache_acc ", "L1 instruction cache accesses"}, + {PAPI_L2_ICA, "PAPI_L2_ICA", "L2_ICA ", "L2_I_cache_acc ", "L2 instruction cache accesses"}, + {PAPI_L3_ICA, "PAPI_L3_ICA", "L3_ICA ", "L3_I_cache_acc ", "L3 instruction cache accesses"}, + {PAPI_L1_ICR, "PAPI_L1_ICR", "L1_ICR ", "L1_I_cache_reads", "L1 instruction cache reads"}, + {PAPI_L2_ICR, "PAPI_L2_ICR", "L2_ICR ", "L2_I_cache_reads", "L2 instruction cache reads"}, + {PAPI_L3_ICR, "PAPI_L3_ICR", "L3_ICR ", "L3_I_cache_reads", "L3 instruction cache reads"}, + {PAPI_L1_ICW, "PAPI_L1_ICW", "L1_ICW ", "L1_I_cache_write", "L1 instruction cache writes"}, + {PAPI_L2_ICW, "PAPI_L2_ICW", "L2_ICW ", "L2_I_cache_write", "L2 instruction cache writes"}, + {PAPI_L3_ICW, "PAPI_L3_ICW", "L3_ICW ", "L3_I_cache_write", "L3 instruction cache writes"}, + {PAPI_L1_TCH, "PAPI_L1_TCH", "L1_TCH ", "L1_cache_hits ", "L1 total cache hits"}, + {PAPI_L2_TCH, "PAPI_L2_TCH", "L2_TCH ", "L2_cache_hits ", "L2 total cache hits"}, + {PAPI_L3_TCH, "PAPI_L3_TCH", "L3_TCH ", "L3_cache_hits ", "L3 total cache hits"}, + {PAPI_L1_TCA, "PAPI_L1_TCA", "L1_TCA ", "L1_cache_access ", "L1 total cache accesses"}, + {PAPI_L2_TCA, "PAPI_L2_TCA", "L2_TCA ", "L2_cache_access ", "L2 total cache accesses"}, + {PAPI_L3_TCA, "PAPI_L3_TCA", "L3_TCA ", "L3_cache_access ", "L3 total cache accesses"}, + {PAPI_L1_TCR, "PAPI_L1_TCR", "L1_TCR ", "L1_cache_reads ", "L1 total cache reads"}, + {PAPI_L2_TCR, "PAPI_L2_TCR", "L2_TCR ", "L2_cache_reads ", "L2 total cache reads"}, + {PAPI_L3_TCR, "PAPI_L3_TCR", "L3_TCR ", "L3_cache_reads ", "L3 total cache reads"}, + {PAPI_L1_TCW, "PAPI_L1_TCW", "L1_TCW ", "L1_cache_writes ", "L1 total cache writes"}, + {PAPI_L2_TCW, "PAPI_L2_TCW", "L2_TCW ", "L2_cache_writes ", "L2 total cache writes"}, + {PAPI_L3_TCW, "PAPI_L3_TCW", "L3_TCW ", "L3_cache_writes ", "L3 total cache writes"}, + {PAPI_FML_INS,"PAPI_FML_INS","FML_INS ", "FM_ins ", "FM ins"}, + {PAPI_FAD_INS,"PAPI_FAD_INS","FAD_INS ", "FA_ins ", "FA ins"}, + {PAPI_FDV_INS,"PAPI_FDV_INS","FDV_INS ", "FD_ins ", "FD ins"}, + {PAPI_FSQ_INS,"PAPI_FSQ_INS","FSQ_INS ", "FSq_ins ", "FSq ins"}, + {PAPI_FNV_INS,"PAPI_FNV_INS","FNV_INS ", "Finv_ins ", "Finv ins"}, + {PAPI_FP_OPS, "PAPI_FP_OPS", "FP_OPS ", "FP_ops_executed ", "Floating point operations executed"} +}; + +static const int npapientries = sizeof (papitable) / sizeof (Entry); +static int papieventlist[MAX_AUX]; /* list of PAPI events to be counted */ +static Pr_event pr_event[MAX_AUX]; /* list of events (PAPI or derived) */ + +/* Derived events */ +static const Entry derivedtable [] = { + {GPTL_IPC, "GPTL_IPC", "IPC ", "Instr_per_cycle ", "Instructions per cycle"}, + {GPTL_CI, "GPTL_CI", "CI ", "Comp_Intensity ", "Computational intensity"}, + {GPTL_FPC, "GPTL_FPC", "Flop/Cyc", "FP_Ops_per_cycle", "Floating point ops per cycle"}, + {GPTL_FPI, "GPTL_FPI", "Flop/Ins", "FP_Ops_per_instr", "Floating point ops per instruction"}, + {GPTL_LSTPI, "GPTL_LSTPI", "LST_frac", "LST_fraction ", "Load-store instruction fraction"}, + {GPTL_DCMRT, "GPTL_DCMRT", "DCMISRAT", "L1_Miss_Rate ", "L1 miss rate (fraction)"}, + {GPTL_LSTPDCM,"GPTL_LSTPDCM", "LSTPDCM ", "LST_per_L1_miss ", "Load-store instructions per L1 miss"}, + {GPTL_L2MRT, "GPTL_L2MRT", "L2MISRAT", "L2_Miss_Rate ", "L2 miss rate (fraction)"}, + {GPTL_LSTPL2M,"GPTL_LSTPL2M", "LSTPL2M ", "LST_per_L2_miss ", "Load-store instructions per L2 miss"}, + {GPTL_L3MRT, "GPTL_L3MRT", "L3MISRAT", "L3_Miss_Rate ", "L3 read miss rate (fraction)"} +}; +static const int nderivedentries = sizeof (derivedtable) / sizeof (Entry); + +static int npapievents = 0; /* number of PAPI events: initialize to 0 */ +static int nevents = 0; /* number of events: initialize to 0 */ +static int *EventSet; /* list of events to be counted by PAPI */ +static long_long **papicounters; /* counters returned from PAPI */ + +static const int BADCOUNT = -999999; /* Set counters to this when they are bad */ +static bool is_multiplexed = false; /* whether multiplexed (always start false)*/ +static bool narrowprint = true; /* only use 8 digits not 16 for counter prints */ +static bool persec = true; /* print PAPI stats per second */ +static bool enable_multiplexing = false; /* whether to try multiplexing */ +static bool verbose = false; /* output verbosity */ + +/* Function prototypes */ + +static int canenable (int); +static int canenable2 (int, int); +static int papievent_is_enabled (int); +static int already_enabled (int); +static int enable (int); +static int getderivedidx (int); + +/* +** GPTL_PAPIsetoption: enable or disable PAPI event defined by "counter". Called +** from GPTLsetoption. Since all events are off by default, val=false degenerates +** to a no-op. Coded this way to be consistent with the rest of GPTL +** +** Input args: +** counter: PAPI counter +** val: true or false for enable or disable +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTL_PAPIsetoption (const int counter, /* PAPI counter (or option) */ + const int val) /* true or false for enable or disable */ +{ + int n; /* loop index */ + int ret; /* return code */ + int numidx; /* numerator index */ + int idx; /* derived counter index */ + char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ + + /* + ** First, check for option which is not an actual counter + */ + + switch (counter) { + case GPTLverbose: + /* don't printf here--that'd duplicate what's in gptl.c */ + verbose = (bool) val; + return 0; + case GPTLmultiplex: + enable_multiplexing = (bool) val; + if (verbose) + printf ("GPTL_PAPIsetoption: boolean enable_multiplexing = %d\n", val); + return 0; + case GPTLnarrowprint: + narrowprint = (bool) val; + if (verbose) + printf ("GPTL_PAPIsetoption: boolean narrowprint = %d\n", val); + return 0; + case GPTLpersec: + persec = (bool) val; + if (verbose) + printf ("GPTL_PAPIsetoption: boolean persec = %d\n", val); + return 0; + default: + break; + } + + /* + ** If val is false, return an error if the event has already been enabled. + ** Otherwise just warn that attempting to disable a PAPI-based event + ** that has already been enabled doesn't work--for now it's just a no-op + */ + + if (! val) { + if (already_enabled (counter)) + return GPTLerror ("GPTL_PAPIsetoption: already enabled counter %d cannot be disabled\n", + counter); + else + if (verbose) + printf ("GPTL_PAPIsetoption: 'disable' %d currently is just a no-op\n", counter); + return 0; + } + + /* If the event has already been enabled for printing, exit */ + + if (already_enabled (counter)) + return GPTLerror ("GPTL_PAPIsetoption: counter %d has already been enabled\n", + counter); + + /* + ** Initialize PAPI if it hasn't already been done. + ** From here on down we can assume the intent is to enable (not disable) an option + */ + + if (GPTL_PAPIlibraryinit () < 0) + return GPTLerror ("GPTL_PAPIsetoption: PAPI library init error\n"); + + /* Ensure max nevents won't be exceeded */ + + if (nevents+1 > MAX_AUX) + return GPTLerror ("GPTL_PAPIsetoption: %d is too many events. Can be increased in private.h\n", + nevents+1); + + /* Check derived events */ + + switch (counter) { + case GPTL_IPC: + if ( ! canenable2 (PAPI_TOT_INS, PAPI_TOT_CYC)) + return GPTLerror ("GPTL_PAPIsetoption: GPTL_IPC unavailable\n"); + + idx = getderivedidx (GPTL_IPC); + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_TOT_INS); + pr_event[nevents].denomidx = enable (PAPI_TOT_CYC); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_TOT_INS / PAPI_TOT_CYC\n", + pr_event[nevents].event.namestr); + ++nevents; + return 0; + case GPTL_CI: + idx = getderivedidx (GPTL_CI); + if (canenable2 (PAPI_FP_OPS, PAPI_LST_INS)) { + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_FP_OPS); + pr_event[nevents].denomidx = enable (PAPI_LST_INS); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_FP_OPS / PAPI_LST_INS\n", + pr_event[nevents].event.namestr); + } else if (canenable2 (PAPI_FP_OPS, PAPI_L1_DCA)) { + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_FP_OPS); + pr_event[nevents].denomidx = enable (PAPI_L1_DCA); +#ifdef DEBUG + printf ("GPTL_PAPIsetoption: pr_event %d is derived and will be PAPI event %d / %d\n", + nevents, pr_event[nevents].numidx, pr_event[nevents].denomidx); +#endif + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_FP_OPS / PAPI_L1_DCA\n", + pr_event[nevents].event.namestr); + } else { + return GPTLerror ("GPTL_PAPIsetoption: GPTL_CI unavailable\n"); + } + ++nevents; + return 0; + case GPTL_FPC: + if ( ! canenable2 (PAPI_FP_OPS, PAPI_TOT_CYC)) + return GPTLerror ("GPTL_PAPIsetoption: GPTL_FPC unavailable\n"); + + idx = getderivedidx (GPTL_FPC); + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_FP_OPS); + pr_event[nevents].denomidx = enable (PAPI_TOT_CYC); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_FP_OPS / PAPI_TOT_CYC\n", + pr_event[nevents].event.namestr); + ++nevents; + return 0; + case GPTL_FPI: + if ( ! canenable2 (PAPI_FP_OPS, PAPI_TOT_INS)) + return GPTLerror ("GPTL_PAPIsetoption: GPTL_FPI unavailable\n"); + + idx = getderivedidx (GPTL_FPI); + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_FP_OPS); + pr_event[nevents].denomidx = enable (PAPI_TOT_INS); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_FP_OPS / PAPI_TOT_INS\n", + pr_event[nevents].event.namestr); + ++nevents; + return 0; + case GPTL_LSTPI: + idx = getderivedidx (GPTL_LSTPI); + if (canenable2 (PAPI_LST_INS, PAPI_TOT_INS)) { + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_LST_INS); + pr_event[nevents].denomidx = enable (PAPI_TOT_INS); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_LST_INS / PAPI_TOT_INS\n", + pr_event[nevents].event.namestr); + } else if (canenable2 (PAPI_L1_DCA, PAPI_TOT_INS)) { + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_L1_DCA); + pr_event[nevents].denomidx = enable (PAPI_TOT_INS); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L1_DCA / PAPI_TOT_INS\n", + pr_event[nevents].event.namestr); + } else { + return GPTLerror ("GPTL_PAPIsetoption: GPTL_LSTPI unavailable\n"); + } + ++nevents; + return 0; + case GPTL_DCMRT: + if ( ! canenable2 (PAPI_L1_DCM, PAPI_L1_DCA)) + return GPTLerror ("GPTL_PAPIsetoption: GPTL_DCMRT unavailable\n"); + + idx = getderivedidx (GPTL_DCMRT); + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_L1_DCM); + pr_event[nevents].denomidx = enable (PAPI_L1_DCA); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L1_DCM / PAPI_L1_DCA\n", + pr_event[nevents].event.namestr); + ++nevents; + return 0; + case GPTL_LSTPDCM: + idx = getderivedidx (GPTL_LSTPDCM); + if (canenable2 (PAPI_LST_INS, PAPI_L1_DCM)) { + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_LST_INS); + pr_event[nevents].denomidx = enable (PAPI_L1_DCM); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_LST_INS / PAPI_L1_DCM\n", + pr_event[nevents].event.namestr); + } else if (canenable2 (PAPI_L1_DCA, PAPI_L1_DCM)) { + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_L1_DCA); + pr_event[nevents].denomidx = enable (PAPI_L1_DCM); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L1_DCA / PAPI_L1_DCM\n", + pr_event[nevents].event.namestr); + } else { + return GPTLerror ("GPTL_PAPIsetoption: GPTL_LSTPDCM unavailable\n"); + } + ++nevents; + return 0; + /* + ** For L2 counts, use TC* instead of DC* to avoid PAPI derived events + */ + case GPTL_L2MRT: + if ( ! canenable2 (PAPI_L2_TCM, PAPI_L2_TCA)) + return GPTLerror ("GPTL_PAPIsetoption: GPTL_L2MRT unavailable\n"); + + idx = getderivedidx (GPTL_L2MRT); + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_L2_TCM); + pr_event[nevents].denomidx = enable (PAPI_L2_TCA); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L2_TCM / PAPI_L2_TCA\n", + pr_event[nevents].event.namestr); + ++nevents; + return 0; + case GPTL_LSTPL2M: + idx = getderivedidx (GPTL_LSTPL2M); + if (canenable2 (PAPI_LST_INS, PAPI_L2_TCM)) { + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_LST_INS); + pr_event[nevents].denomidx = enable (PAPI_L2_TCM); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_LST_INS / PAPI_L2_TCM\n", + pr_event[nevents].event.namestr); + } else if (canenable2 (PAPI_L1_DCA, PAPI_L2_TCM)) { + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_L1_DCA); + pr_event[nevents].denomidx = enable (PAPI_L2_TCM); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L1_DCA / PAPI_L2_TCM\n", + pr_event[nevents].event.namestr); + } else { + return GPTLerror ("GPTL_PAPIsetoption: GPTL_LSTPL2M unavailable\n"); + } + ++nevents; + return 0; + case GPTL_L3MRT: + if ( ! canenable2 (PAPI_L3_TCM, PAPI_L3_TCR)) + return GPTLerror ("GPTL_PAPIsetoption: GPTL_L3MRT unavailable\n"); + + idx = getderivedidx (GPTL_L3MRT); + pr_event[nevents].event = derivedtable[idx]; + pr_event[nevents].numidx = enable (PAPI_L3_TCM); + pr_event[nevents].denomidx = enable (PAPI_L3_TCR); + if (verbose) + printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L3_TCM / PAPI_L3_TCR\n", + pr_event[nevents].event.namestr); + ++nevents; + return 0; + default: + break; + } + + /* Check PAPI presets */ + + for (n = 0; n < npapientries; n++) { + if (counter == papitable[n].counter) { + if ((numidx = papievent_is_enabled (counter)) >= 0) { + pr_event[nevents].event = papitable[n]; + pr_event[nevents].numidx = numidx; + pr_event[nevents].denomidx = -1; /* flag says not derived (no denominator) */ + } else if (canenable (counter)) { + pr_event[nevents].event = papitable[n]; + pr_event[nevents].numidx = enable (counter); + pr_event[nevents].denomidx = -1; /* flag says not derived (no denominator) */ + } else { + return GPTLerror ("GPTL_PAPIsetoption: Can't enable event \n", + papitable[n].longstr); + } + if (verbose) + printf ("GPTL_PAPIsetoption: enabling PAPI preset event %s\n", + pr_event[nevents].event.namestr); + ++nevents; + return 0; + } + } + + /* + ** Check native events last: If PAPI_event_code_to_name fails, give up + */ + + if ((ret = PAPI_event_code_to_name (counter, eventname)) != PAPI_OK) + return GPTLerror ("GPTL_PAPIsetoption: name not found for counter %d: PAPI_strerror: %s\n", + counter, PAPI_strerror (ret)); + + /* + ** A table with predefined names of various lengths does not exist for + ** native events. Just truncate eventname. + */ + + if ((numidx = papievent_is_enabled (counter)) >= 0) { + pr_event[nevents].event.counter = counter; + + pr_event[nevents].event.namestr = (char *) GPTLallocate (12+1); + strncpy (pr_event[nevents].event.namestr, eventname, 12); + pr_event[nevents].event.namestr[12] = '\0'; + + pr_event[nevents].event.str16 = (char *) GPTLallocate (16+1); + strncpy (pr_event[nevents].event.str16, eventname, 16); + pr_event[nevents].event.str16[16] = '\0'; + + pr_event[nevents].event.longstr = (char *) GPTLallocate (PAPI_MAX_STR_LEN); + strncpy (pr_event[nevents].event.longstr, eventname, PAPI_MAX_STR_LEN); + + pr_event[nevents].numidx = numidx; + pr_event[nevents].denomidx = -1; /* flag says not derived (no denominator) */ + } else if (canenable (counter)) { + pr_event[nevents].event.counter = counter; + + pr_event[nevents].event.namestr = (char *) GPTLallocate (12+1); + strncpy (pr_event[nevents].event.namestr, eventname, 12); + pr_event[nevents].event.namestr[12] = '\0'; + + pr_event[nevents].event.str16 = (char *) GPTLallocate (16+1); + strncpy (pr_event[nevents].event.str16, eventname, 16); + pr_event[nevents].event.str16[16] = '\0'; + + pr_event[nevents].event.longstr = (char *) GPTLallocate (PAPI_MAX_STR_LEN); + strncpy (pr_event[nevents].event.longstr, eventname, PAPI_MAX_STR_LEN); + + pr_event[nevents].numidx = enable (counter); + pr_event[nevents].denomidx = -1; /* flag says not derived (no denominator) */ + } else { + return GPTLerror ("GPTL_PAPIsetoption: Can't enable event %s\n", eventname); + } + + if (verbose) + printf ("GPTL_PAPIsetoption: enabling native event %s\n", pr_event[nevents].event.longstr); + + ++nevents; + return 0; +} + +/* +** canenable: determine whether a PAPI counter can be enabled +** +** Input args: +** counter: PAPI counter +** +** Return value: 0 (success) or non-zero (failure) +*/ + +int canenable (int counter) +{ + char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ + + if (npapievents+1 > MAX_AUX) + return false; + + if (PAPI_query_event (counter) != PAPI_OK) { + (void) PAPI_event_code_to_name (counter, eventname); + fprintf (stderr, "canenable: event %s not available on this arch\n", eventname); + return false; + } + + return true; +} + +/* +** canenable2: determine whether 2 PAPI counters can be enabled +** +** Input args: +** counter1: PAPI counter +** counter2: PAPI counter +** +** Return value: 0 (success) or non-zero (failure) +*/ + +int canenable2 (int counter1, int counter2) +{ + char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ + + if (npapievents+2 > MAX_AUX) + return false; + + if (PAPI_query_event (counter1) != PAPI_OK) { + (void) PAPI_event_code_to_name (counter1, eventname); + return false; + } + + if (PAPI_query_event (counter2) != PAPI_OK) { + (void) PAPI_event_code_to_name (counter2, eventname); + return false; + } + + return true; +} + +/* +** papievent_is_enabled: determine whether a PAPI counter has already been +** enabled. Used internally to keep track of PAPI counters enabled. A given +** PAPI counter may occur in the computation of multiple derived events, as +** well as output directly. E.g. PAPI_FP_OPS is used to compute +** computational intensity, and floating point ops per instruction. +** +** Input args: +** counter: PAPI counter +** +** Return value: index into papieventlist (success) or negative (not found) +*/ + +int papievent_is_enabled (int counter) +{ + int n; + + for (n = 0; n < npapievents; ++n) + if (papieventlist[n] == counter) + return n; + return -1; +} + +/* +** already_enabled: determine whether a PAPI-based event has already been +** enabled for printing. +** +** Input args: +** counter: PAPI or derived counter +** +** Return value: 1 (true) or 0 (false) +*/ + +int already_enabled (int counter) +{ + int n; + + for (n = 0; n < nevents; ++n) + if (pr_event[n].event.counter == counter) + return 1; + return 0; +} + +/* +** enable: enable a PAPI event. ASSUMES that canenable() has already determined +** that the event can be enabled. +** +** Input args: +** counter: PAPI counter +** +** Return value: index into papieventlist +*/ + +int enable (int counter) +{ + int n; + + /* If the event is already enabled, return its index */ + + for (n = 0; n < npapievents; ++n) { + if (papieventlist[n] == counter) { +#ifdef DEBUG + printf ("enable: PAPI event %d is %d\n", n, counter); +#endif + return n; + } + } + + /* New event */ + + papieventlist[npapievents++] = counter; + return npapievents-1; +} + +/* +** getderivedidx: find the table index of a derived counter +** +** Input args: +** counter: derived counter +** +** Return value: index into derivedtable (success) or GPTLerror (failure) +*/ + +int getderivedidx (int dcounter) +{ + int n; + + for (n = 0; n < nderivedentries; ++n) { + if (derivedtable[n].counter == dcounter) + return n; + } + return GPTLerror ("getderivedidx: failed to find derived counter %d\n", dcounter); +} + +/* +** GPTL_PAPIlibraryinit: Call PAPI_library_init if necessary +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTL_PAPIlibraryinit () +{ + int ret; + + if ((ret = PAPI_is_initialized ()) == PAPI_NOT_INITED) { + if ((ret = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) { + fprintf (stderr, "GPTL_PAPIlibraryinit: ret=%d PAPI_VER_CURRENT=%d\n", + ret, (int) PAPI_VER_CURRENT); + return GPTLerror ("GPTL_PAPIlibraryinit: PAPI_library_init failure:%s\n", + PAPI_strerror (ret)); + } + } + return 0; +} + +/* +** GPTL_PAPIinitialize(): Initialize the PAPI interface. Called from GPTLinitialize. +** PAPI_library_init must be called before any other PAPI routines. +** PAPI_thread_init is called subsequently if threading is enabled. +** Finally, allocate space for PAPI counters and start them. +** +** Input args: +** maxthreads: number of threads +** +** Return value: 0 (success) or GPTLerror or -1 (failure) +*/ + +int GPTL_PAPIinitialize (const int maxthreads, /* number of threads */ + const bool verbose_flag, /* output verbosity */ + int *nevents_out, /* nevents needed by gptl.c */ + Entry *pr_event_out) /* events needed by gptl.c */ +{ + int ret; /* return code */ + int n; /* loop index */ + int t; /* thread index */ + + verbose = verbose_flag; + + if (maxthreads < 1) + return GPTLerror ("GPTL_PAPIinitialize: maxthreads = %d\n", maxthreads); + + /* Ensure that PAPI_library_init has already been called */ + + if ((ret = GPTL_PAPIlibraryinit ()) < 0) + return GPTLerror ("GPTL_PAPIinitialize: GPTL_PAPIlibraryinit failure\n"); + + /* PAPI_thread_init needs to be called if threading enabled */ + +#if ( defined THREADED_OMP ) + if (PAPI_thread_init ((unsigned long (*)(void)) (omp_get_thread_num)) != PAPI_OK) + return GPTLerror ("GPTL_PAPIinitialize: PAPI_thread_init failure\n"); +#elif ( defined THREADED_PTHREADS ) + if (PAPI_thread_init ((unsigned long (*)(void)) (pthread_self)) != PAPI_OK) + return GPTLerror ("GPTL_PAPIinitialize: PAPI_thread_init failure\n"); +#endif + + /* allocate and initialize static local space */ + + EventSet = (int *) GPTLallocate (maxthreads * sizeof (int)); + papicounters = (long_long **) GPTLallocate (maxthreads * sizeof (long_long *)); + + for (t = 0; t < maxthreads; t++) { + EventSet[t] = PAPI_NULL; + papicounters[t] = (long_long *) GPTLallocate (MAX_AUX * sizeof (long_long)); + } + + *nevents_out = nevents; + for (n = 0; n < nevents; ++n) { + pr_event_out[n].counter = pr_event[n].event.counter; + pr_event_out[n].namestr = pr_event[n].event.namestr; + pr_event_out[n].str8 = pr_event[n].event.str8; + pr_event_out[n].str16 = pr_event[n].event.str16; + pr_event_out[n].longstr = pr_event[n].event.longstr; + } + return 0; +} + +/* +** GPTLcreate_and_start_events: Create and start the PAPI eventset. +** Threaded routine to create the "event set" (PAPI terminology) and start +** the counters. This is only done once, and is called from get_thread_num +** for the first time for the thread. +** +** Input args: +** t: thread number +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLcreate_and_start_events (const int t) /* thread number */ +{ + int ret; /* return code */ + int n; /* loop index over events */ + char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ + + /* Create the event set */ + + if ((ret = PAPI_create_eventset (&EventSet[t])) != PAPI_OK) + return GPTLerror ("GPTLcreate_and_start_events: thread %d failure creating eventset: %s\n", + t, PAPI_strerror (ret)); + + if (verbose) + printf ("GPTLcreate_and_start_events: successfully created eventset for thread %d\n", t); + + /* Add requested events to the event set */ + + for (n = 0; n < npapievents; n++) { + if ((ret = PAPI_add_event (EventSet[t], papieventlist[n])) != PAPI_OK) { + if (verbose) { + fprintf (stderr, "%s\n", PAPI_strerror (ret)); + ret = PAPI_event_code_to_name (papieventlist[n], eventname); + fprintf (stderr, "GPTLcreate_and_start_events: failure adding event:%s\n", + eventname); + } + + if (enable_multiplexing) { + if (verbose) + printf ("Trying multiplexing...\n"); + is_multiplexed = true; + break; + } else + return GPTLerror ("enable_multiplexing is false: giving up\n"); + } + } + + if (is_multiplexed) { + + /* Cleanup the eventset for multiplexing */ + + if ((ret = PAPI_cleanup_eventset (EventSet[t])) != PAPI_OK) + return GPTLerror ("GPTLcreate_and_start_events: %s\n", PAPI_strerror (ret)); + + if ((ret = PAPI_destroy_eventset (&EventSet[t])) != PAPI_OK) + return GPTLerror ("GPTLcreate_and_start_events: %s\n", PAPI_strerror (ret)); + + if ((ret = PAPI_create_eventset (&EventSet[t])) != PAPI_OK) + return GPTLerror ("GPTLcreate_and_start_events: failure creating eventset: %s\n", + PAPI_strerror (ret)); + + if ((ret = PAPI_multiplex_init ()) != PAPI_OK) + return GPTLerror ("GPTLcreate_and_start_events: failure from PAPI_multiplex_init%s\n", + PAPI_strerror (ret)); + + if ((ret = PAPI_set_multiplex (EventSet[t])) != PAPI_OK) + return GPTLerror ("GPTLcreate_and_start_events: failure from PAPI_set_multiplex: %s\n", + PAPI_strerror (ret)); + + for (n = 0; n < npapievents; n++) { + if ((ret = PAPI_add_event (EventSet[t], papieventlist[n])) != PAPI_OK) { + ret = PAPI_event_code_to_name (papieventlist[n], eventname); + return GPTLerror ("GPTLcreate_and_start_events: failure adding event:%s\n" + " Error was: %s\n", eventname, PAPI_strerror (ret)); + } + } + } + + /* Start the event set. It will only be read from now on--never stopped */ + + if ((ret = PAPI_start (EventSet[t])) != PAPI_OK) + return GPTLerror ("GPTLcreate_and_start_events: failed to start event set: %s\n", + PAPI_strerror (ret)); + + return 0; +} + +/* +** GPTL_PAPIstart: Start the PAPI counters (actually they are just read). +** Called from GPTLstart. +** +** Input args: +** t: thread number +** +** Output args: +** aux: struct containing the counters +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTL_PAPIstart (const int t, /* thread number */ + Papistats *aux) /* struct containing PAPI stats */ +{ + int ret; /* return code from PAPI lib calls */ + int n; /* loop index */ + + /* If no events are to be counted just return */ + + if (npapievents == 0) + return 0; + + /* Read the counters */ + + if ((ret = PAPI_read (EventSet[t], papicounters[t])) != PAPI_OK) + return GPTLerror ("GPTL_PAPIstart: %s\n", PAPI_strerror (ret)); + + /* + ** Store the counter values. When GPTL_PAPIstop is called, the counters + ** will again be read, and differenced with the values saved here. + */ + + for (n = 0; n < npapievents; n++) + aux->last[n] = papicounters[t][n]; + + return 0; +} + +/* +** GPTL_PAPIstop: Stop the PAPI counters (actually they are just read). +** Called from GPTLstop. +** +** Input args: +** t: thread number +** +** Input/output args: +** aux: struct containing the counters +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTL_PAPIstop (const int t, /* thread number */ + Papistats *aux) /* struct containing PAPI stats */ +{ + int ret; /* return code from PAPI lib calls */ + int n; /* loop index */ + long_long delta; /* change in counters from previous read */ + + /* If no events are to be counted just return */ + + if (npapievents == 0) + return 0; + + /* Read the counters */ + + if ((ret = PAPI_read (EventSet[t], papicounters[t])) != PAPI_OK) + return GPTLerror ("GPTL_PAPIstop: %s\n", PAPI_strerror (ret)); + + /* + ** Accumulate the difference since timer start in aux. + ** Negative accumulation can happen when multiplexing is enabled, so don't + ** set count to BADCOUNT in that case. + */ + + for (n = 0; n < npapievents; n++) { +#ifdef DEBUG + printf ("GPTL_PAPIstop: event %d counter value is %ld\n", n, (long) papicounters[t][n]); +#endif + delta = papicounters[t][n] - aux->last[n]; + if ( ! is_multiplexed && delta < 0) + aux->accum[n] = BADCOUNT; + else + aux->accum[n] += delta; + } + return 0; +} + +/* +** GPTL_PAPIprstr: Print the descriptive string for all enabled PAPI events. +** Called from GPTLpr. +** +** Input args: +** fp: file descriptor +*/ + +void GPTL_PAPIprstr (FILE *fp) +{ + int n; + + if (narrowprint) { + for (n = 0; n < nevents; n++) { + fprintf (fp, "%8.8s ", pr_event[n].event.str8); + + /* Test on < 0 says it's a PAPI preset */ + + if (persec && pr_event[n].event.counter < 0) + fprintf (fp, "e6_/_sec "); + } + } else { + for (n = 0; n < nevents; n++) { + fprintf (fp, "%16.16s ", pr_event[n].event.str16); + + /* Test on < 0 says it's a PAPI preset */ + + if (persec && pr_event[n].event.counter < 0) + fprintf (fp, "e6_/_sec "); + } + } +} + +/* +** GPTL_PAPIpr: Print PAPI counter values for all enabled events, including +** derived events. Called from GPTLpr. +** +** Input args: +** fp: file descriptor +** aux: struct containing the counters +*/ + +void GPTL_PAPIpr (FILE *fp, /* file descriptor to write to */ + const Papistats *aux, /* stats to write */ + const int t, /* thread number */ + const int count, /* number of invocations */ + const double wcsec) /* wallclock time (sec) */ +{ + const char *shortintfmt = "%8ld "; + const char *longintfmt = "%16ld "; + const char *shortfloatfmt = "%8.2e "; + const char *longfloatfmt = "%16.10e "; + const char *intfmt; /* integer format */ + const char *floatfmt; /* floating point format */ + + int n; /* loop index */ + int numidx; /* index pointer to appropriated (derived) numerator */ + int denomidx; /* index pointer to appropriated (derived) denominator */ + double val; /* value to be printed */ + + intfmt = narrowprint ? shortintfmt : longintfmt; + floatfmt = narrowprint ? shortfloatfmt : longfloatfmt; + + for (n = 0; n < nevents; n++) { + numidx = pr_event[n].numidx; + if (pr_event[n].denomidx > -1) { /* derived event */ + denomidx = pr_event[n].denomidx; + +#ifdef DEBUG + printf ("GPTL_PAPIpr: derived event: numidx=%d denomidx=%d values = %ld %ld\n", + numidx, denomidx, (long) aux->accum[numidx], (long) aux->accum[denomidx]); +#endif + /* Protect against divide by zero */ + + if (aux->accum[denomidx] > 0) + val = (double) aux->accum[numidx] / (double) aux->accum[denomidx]; + else + val = 0.; + fprintf (fp, floatfmt, val); + + } else { /* Raw PAPI event */ + +#ifdef DEBUG + printf ("GPTL_PAPIpr: raw event: numidx=%d value = %ld\n", + numidx, (long) aux->accum[numidx]); +#endif + if (aux->accum[numidx] < PRTHRESH) + fprintf (fp, intfmt, (long) aux->accum[numidx]); + else + fprintf (fp, floatfmt, (double) aux->accum[numidx]); + + if (persec) { + if (wcsec > 0.) + fprintf (fp, "%8.2f ", aux->accum[numidx] * 1.e-6 / wcsec); + else + fprintf (fp, "%8.2f ", 0.); + } + } + } +} + +/* +** GPTL_PAPIprintenabled: Print list of enabled timers +** +** Input args: +** fp: file descriptor +*/ + +void GPTL_PAPIprintenabled (FILE *fp) +{ + int n, nn; + PAPI_event_info_t info; /* returned from PAPI_get_event_info */ + char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ + + if (nevents > 0) { + fprintf (fp, "Description of printed events (PAPI and derived):\n"); + for (n = 0; n < nevents; n++) { + if (strncmp (pr_event[n].event.namestr, "GPTL", 4) == 0) { + fprintf (fp, " %s: %s\n", pr_event[n].event.namestr, pr_event[n].event.longstr); + } else { + nn = pr_event[n].event.counter; + if (PAPI_get_event_info (nn, &info) == PAPI_OK) { + fprintf (fp, " %s\n", info.short_descr); + fprintf (fp, " %s\n", info.note); + } + } + } + fprintf (fp, "\n"); + + fprintf (fp, "PAPI events enabled (including those required for derived events):\n"); + for (n = 0; n < npapievents; n++) + if (PAPI_event_code_to_name (papieventlist[n], eventname) == PAPI_OK) + fprintf (fp, " %s\n", eventname); + fprintf (fp, "\n"); + } +} + +/* +** GPTL_PAPIadd: Accumulate PAPI counters. Called from add. +** +** Input/Output args: +** auxout: auxout = auxout + auxin +** +** Input args: +** auxin: counters to be summed into auxout +*/ + +void GPTL_PAPIadd (Papistats *auxout, /* output struct */ + const Papistats *auxin) /* input struct */ +{ + int n; + + for (n = 0; n < npapievents; n++) + if (auxin->accum[n] == BADCOUNT || auxout->accum[n] == BADCOUNT) + auxout->accum[n] = BADCOUNT; + else + auxout->accum[n] += auxin->accum[n]; +} + +/* +** GPTL_PAPIfinalize: finalization routine must be called from single-threaded +** region. Free all malloc'd space +*/ + +void GPTL_PAPIfinalize (int maxthreads) +{ + int t; /* thread index */ + int ret; /* return code */ + + for (t = 0; t < maxthreads; t++) { + ret = PAPI_stop (EventSet[t], papicounters[t]); + free (papicounters[t]); + ret = PAPI_cleanup_eventset (EventSet[t]); + ret = PAPI_destroy_eventset (&EventSet[t]); + } + + free (EventSet); + free (papicounters); + + /* Reset initial values */ + + npapievents = 0; + nevents = 0; + is_multiplexed = false; + narrowprint = true; + persec = true; + enable_multiplexing = false; + verbose = false; +} + +/* +** GPTL_PAPIquery: return current PAPI counter info. Return into a long for best +** compatibility possibilities with Fortran. +** +** Input args: +** aux: struct containing the counters +** ncounters: max number of counters to return +** +** Output args: +** papicounters_out: current value of PAPI counters +*/ + +void GPTL_PAPIquery (const Papistats *aux, + long long *papicounters_out, + int ncounters) +{ + int n; + + if (ncounters > 0) { + for (n = 0; n < ncounters && n < npapievents; n++) { + papicounters_out[n] = (long long) aux->accum[n]; + } + } +} + +/* +** GPTL_PAPIget_eventvalue: return current value for an enabled event. +** +** Input args: +** eventname: event name to check (whether derived or raw PAPI counter) +** aux: struct containing the counter(s) for the event +** +** Output args: +** value: current value of the event +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTL_PAPIget_eventvalue (const char *eventname, + const Papistats *aux, + double *value) +{ + int n; /* loop index through enabled events */ + int numidx; /* numerator index into papicounters */ + int denomidx; /* denominator index into papicounters */ + + for (n = 0; n < nevents; ++n) { + if (STRMATCH (eventname, pr_event[n].event.namestr)) { + numidx = pr_event[n].numidx; + if (pr_event[n].denomidx > -1) { /* derived event */ + denomidx = pr_event[n].denomidx; + if (aux->accum[denomidx] > 0) /* protect against divide by zero */ + *value = (double) aux->accum[numidx] / (double) aux->accum[denomidx]; + else + *value = 0.; + } else { /* Raw PAPI event */ + *value = (double) aux->accum[numidx]; + } + break; + } + } + if (n == nevents) + return GPTLerror ("GPTL_PAPIget_eventvalue: event %s not enabled\n", eventname); + return 0; +} + +/* +** GPTL_PAPIis_multiplexed: return status of whether events are being multiplexed +*/ + +bool GPTL_PAPIis_multiplexed () +{ + return is_multiplexed; +} + +/* +** The following functions are publicly available +*/ + +void read_counters100 () +{ + int i; + int ret; + long_long counters[MAX_AUX]; + + for (i = 0; i < 10; ++i) { + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + ret = PAPI_read (EventSet[0], counters); + } + return; +} + +/* +** GPTLevent_name_to_code: convert a string to a PAPI code +** or derived event code. +** +** Input arguments: +** arg: string to convert +** +** Output arguments: +** code: PAPI or GPTL derived code +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLevent_name_to_code (const char *name, int *code) +{ + int ret; /* return code */ + int n; /* loop over derived entries */ + + /* + ** First check derived events + */ + + for (n = 0; n < nderivedentries; ++n) { + if (STRMATCH (name, derivedtable[n].namestr)) { + *code = derivedtable[n].counter; + return 0; + } + } + + /* + ** Next check PAPI events--note that PAPI must be initialized before the + ** name_to_code function can be invoked. + */ + + if ((ret = GPTL_PAPIlibraryinit ()) < 0) + return GPTLerror ("GPTL_event_name_to_code: GPTL_PAPIlibraryinit failure\n"); + + if ((PAPI_event_name_to_code ((char *) name, code)) != PAPI_OK) + return GPTLerror ("GPTL_event_name_to_code: PAPI_event_name_to_code failure\n"); + + return 0; +} + +/* +** GPTLevent_code_to_name: convert a string to a PAPI code +** or derived event code. +** +** Input arguments: +** code: event code (PAPI or derived) +** +** Output arguments: +** name: string corresponding to code +** +** Return value: 0 (success) or GPTLerror (failure) +*/ + +int GPTLevent_code_to_name (const int code, char *name) +{ + int ret; /* return code */ + int n; /* loop over derived entries */ + + /* + ** First check derived events + */ + + for (n = 0; n < nderivedentries; ++n) { + if (code == derivedtable[n].counter) { + strcpy (name, derivedtable[n].namestr); + return 0; + } + } + + /* + ** Next check PAPI events--note that PAPI must be initialized before the + ** code_to_name function can be invoked. + */ + + if ((ret = GPTL_PAPIlibraryinit ()) < 0) + return GPTLerror ("GPTL_event_code_to_name: GPTL_PAPIlibraryinit failure\n"); + + if (PAPI_event_code_to_name (code, name) != PAPI_OK) + return GPTLerror ("GPTL_event_code_to_name: PAPI_event_code_to_name failure\n"); + + return 0; +} + +int GPTLget_npapievents (void) +{ + return npapievents; +} + +#else + +/* +** HAVE_PAPI not defined branch: "Should not be called" entry points for public routines +*/ + +int GPTL_PAPIlibraryinit () +{ + return GPTLerror ("GPTL_PAPIlibraryinit: PAPI not enabled\n"); +} + +int GPTLevent_name_to_code (const char *name, int *code) +{ + return GPTLerror ("GPTLevent_name_to_code: PAPI not enabled\n"); +} + +int GPTLevent_code_to_name (int code, char *name) +{ + return GPTLerror ("GPTLevent_code_to_name: PAPI not enabled\n"); +} + +#endif /* HAVE_PAPI */ diff --git a/CIME/non_py/src/timing/perf_mod.F90 b/CIME/non_py/src/timing/perf_mod.F90 new file mode 100644 index 00000000000..b8f9b50a2a8 --- /dev/null +++ b/CIME/non_py/src/timing/perf_mod.F90 @@ -0,0 +1,1770 @@ +module perf_mod + +!----------------------------------------------------------------------- +! +! Purpose: This module is responsible for controlling the performance +! timer logic. +! +! Author: P. Worley, January 2007 +! +! $Id$ +! +!----------------------------------------------------------------------- + +!----------------------------------------------------------------------- +!- Uses ---------------------------------------------------------------- +!----------------------------------------------------------------------- +#ifdef NUOPC_INTERFACE +#define TIMERSTART call ESMF_TraceRegionEnter +#define TIMERSTOP call ESMF_TraceRegionExit + use ESMF, only: ESMF_TraceRegionEnter, ESMF_TraceRegionExit +#else +#define TIMERSTART ierr = GPTLstart +#define TIMERSTOP ierr = GPTLstop +#endif + +#ifndef USE_CSM_SHARE + use perf_utils +#else + use shr_sys_mod, only: shr_sys_abort + use shr_kind_mod, only: SHR_KIND_CS, SHR_KIND_CM, SHR_KIND_CX, & + SHR_KIND_R8, SHR_KIND_I8 + use shr_mpi_mod, only: shr_mpi_barrier, shr_mpi_bcast + use shr_log_mod, only: shr_log_getUnit, shr_log_freeUnit + use namelist_utils, only: find_group_name +#endif + use mpi +#if ( defined _OPENMP ) + use omp_lib, only : omp_in_parallel +#endif +!!----------------------------------------------------------------------- +!- module boilerplate -------------------------------------------------- +!----------------------------------------------------------------------- + implicit none + private ! Make the default access private + save + +!----------------------------------------------------------------------- +! Public interfaces ---------------------------------------------------- +!----------------------------------------------------------------------- + public t_initf + public t_setLogUnit + public t_getLogUnit + public t_profile_onf + public t_barrier_onf + public t_single_filef + public t_set_prefixf + public t_unset_prefixf + public t_stampf + public t_startf + public t_stopf + public t_startstop_valsf + public t_enablef + public t_disablef + public t_adj_detailf + public t_barrierf + public t_prf + public t_finalizef + +!----------------------------------------------------------------------- +! Private interfaces (local) ------------------------------------------- +!----------------------------------------------------------------------- + private perf_defaultopts + private perf_setopts + private papi_defaultopts + private papi_setopts + +!----------------------------------------------------------------------- +!- include statements -------------------------------------------------- +!----------------------------------------------------------------------- +#include "gptl.inc" + +!----------------------------------------------------------------------- +! Private data --------------------------------------------------------- +!----------------------------------------------------------------------- + + !---------------------------------------------------------------------------- + ! perf_mod options + !---------------------------------------------------------------------------- + integer, parameter :: def_p_logunit = 6 ! default + integer, private :: p_logunit = def_p_logunit + ! unit number for log output + + logical, parameter :: def_timing_initialized = .false. ! default + logical, private :: timing_initialized = def_timing_initialized + ! flag indicating whether timing library has + ! been initialized + + logical, parameter :: def_timing_disable = .false. ! default + logical, private :: timing_disable = def_timing_disable + ! flag indicating whether timers are disabled + + logical, parameter :: def_timing_barrier = .false. ! default + logical, private :: timing_barrier = def_timing_barrier + ! flag indicating whether the mpi_barrier in + ! t_barrierf should be called + + integer, parameter :: def_timer_depth_limit = 99999 ! default + integer, private :: timer_depth_limit = def_timer_depth_limit + ! integer indicating maximum number of levels of + ! timer nesting + + integer, parameter :: def_timing_detail_limit = 1 ! default + integer, private :: timing_detail_limit = def_timing_detail_limit + ! integer indicating maximum detail level to + ! profile + + integer, parameter :: init_timing_disable_depth = 0 ! init + integer, private :: timing_disable_depth = init_timing_disable_depth + ! integer indicating depth of t_disablef calls + + integer, parameter :: init_timing_detail = 0 ! init + integer, private :: cur_timing_detail = init_timing_detail + ! current timing detail level +#ifdef NUOPC_INTERFACE + integer, private :: cur_timing_depth = 0 +#endif + + integer, parameter :: init_num_threads = 1 ! init + integer, private :: num_threads = init_num_threads + ! current maximum number of threads per process + + logical, parameter :: def_perf_single_file = .false. ! default + logical, private :: perf_single_file = def_perf_single_file + ! flag indicating whether the performance timer + ! output should be written to a single file + ! (per component communicator) or to a + ! separate file for each process + + integer, parameter :: def_perf_outpe_num = 0 ! default + integer, private :: perf_outpe_num = def_perf_outpe_num + ! maximum number of processes writing out + ! timing data (for this component communicator) + + integer, parameter :: def_perf_outpe_stride = 1 ! default + integer, private :: perf_outpe_stride = def_perf_outpe_stride + ! separation between process ids for processes + ! that are writing out timing data + ! (for this component communicator) + + logical, parameter :: def_perf_global_stats = .true. ! default + logical, private :: perf_global_stats = def_perf_global_stats + ! collect and print out global performance statistics + ! (for this component communicator) + + logical, parameter :: def_perf_ovhd_measurement = .false. ! default + logical, private :: perf_ovhd_measurement = def_perf_ovhd_measurement + ! measure overhead of profiling directly + + real(shr_kind_r8), private :: perf_timing_ovhd = 0.0 ! start/stop overhead + + logical, parameter :: def_perf_add_detail = .false. ! default + logical, private :: perf_add_detail = def_perf_add_detail + ! flag indicating whether to add the current + ! detail level as a suffix to the timer name. + ! This requires that even t_startf/t_stopf + ! calls do not cross detail level changes +#ifdef HAVE_MPI + integer, parameter :: def_perf_timer = GPTLmpiwtime ! default +#else +#ifdef HAVE_NANOTIME + integer, parameter :: def_perf_timer = GPTLnanotime ! default +#else +#ifdef CPRIBM + integer,parameter :: def_perf_timer = GPTLread_real_time +#else + integer,parameter :: def_perf_timer = GPTLgettimeofday +#endif +#endif +#endif + + + integer, private :: perf_timer = def_perf_timer ! default + ! integer indicating which timer to use + ! (as defined in gptl.inc) + +#ifdef HAVE_PAPI + logical, parameter :: def_perf_papi_enable = .false. ! default +#else + logical, parameter :: def_perf_papi_enable = .false. ! default +#endif + logical, private :: perf_papi_enable = def_perf_papi_enable + ! flag indicating whether the PAPI namelist + ! should be read and HW performance counters + ! used in profiling + + ! PAPI counter ids + integer, parameter :: PAPI_NULL = -1 + + integer, parameter :: def_papi_ctr1 = PAPI_NULL ! default + integer, private :: papi_ctr1 = def_papi_ctr1 + + integer, parameter :: def_papi_ctr2 = PAPI_NULL ! default + integer, private :: papi_ctr2 = def_papi_ctr2 + + integer, parameter :: def_papi_ctr3 = PAPI_NULL ! default + integer, private :: papi_ctr3 = def_papi_ctr3 + + integer, parameter :: def_papi_ctr4 = PAPI_NULL ! default + integer, private :: papi_ctr4 = def_papi_ctr4 + +!======================================================================= +contains +!======================================================================= + +! +!======================================================================== +! + subroutine t_getLogUnit(LogUnit) +!----------------------------------------------------------------------- +! Purpose: Get log unit number. +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + integer(SHR_KIND_IN), intent(OUT) :: LogUnit ! Unit number for log output +!----------------------------------------------------------------------- + + LogUnit = p_logunit + + return + end subroutine t_getLogUnit +! +!======================================================================== +! + subroutine t_setLogUnit(LogUnit) +!----------------------------------------------------------------------- +! Purpose: Set log unit number. +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + integer(SHR_KIND_IN), intent(IN) :: LogUnit ! Unit number for log output +!----------------------------------------------------------------------- + + p_logunit = LogUnit +#ifndef USE_CSM_SHARE + call perfutils_setunit(p_logunit) +#endif + + return + end subroutine t_setLogUnit +! +!======================================================================== +! + subroutine perf_defaultopts(timing_disable_out, & + perf_timer_out, & + timer_depth_limit_out, & + timing_detail_limit_out, & + timing_barrier_out, & + perf_outpe_num_out, & + perf_outpe_stride_out, & + perf_single_file_out, & + perf_global_stats_out, & + perf_papi_enable_out, & + perf_ovhd_measurement_out, & + perf_add_detail_out ) +!----------------------------------------------------------------------- +! Purpose: Return default runtime options +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- + ! timers disable/enable option + logical, intent(out), optional :: timing_disable_out + ! performance timer option + integer, intent(out), optional :: perf_timer_out + ! timer depth limit option + integer, intent(out), optional :: timer_depth_limit_out + ! timer detail limit option + integer, intent(out), optional :: timing_detail_limit_out + ! timing barrier enable/disable option + logical, intent(out), optional :: timing_barrier_out + ! number of processes writing out timing data + integer, intent(out), optional :: perf_outpe_num_out + ! separation between process ids for processes that are writing out timing data + integer, intent(out), optional :: perf_outpe_stride_out + ! timing single / multple output file option + logical, intent(out), optional :: perf_single_file_out + ! collect and output global performance statistics option + logical, intent(out), optional :: perf_global_stats_out + ! calling PAPI to read HW performance counters option + logical, intent(out), optional :: perf_papi_enable_out + ! measure overhead of profiling directly + logical, intent(out), optional :: perf_ovhd_measurement_out + ! 'suffix' timer name with current detail level + logical, intent(out), optional :: perf_add_detail_out +!----------------------------------------------------------------------- + if ( present(timing_disable_out) ) then + timing_disable_out = def_timing_disable + endif + if ( present(perf_timer_out) ) then + perf_timer_out = def_perf_timer + endif + if ( present(timer_depth_limit_out) ) then + timer_depth_limit_out = def_timer_depth_limit + endif + if ( present(timing_detail_limit_out) ) then + timing_detail_limit_out = def_timing_detail_limit + endif + if ( present(timing_barrier_out) ) then + timing_barrier_out = def_timing_barrier + endif + if ( present(perf_outpe_num_out) ) then + perf_outpe_num_out = def_perf_outpe_num + endif + if ( present(perf_outpe_stride_out) ) then + perf_outpe_stride_out = def_perf_outpe_stride + endif + if ( present(perf_single_file_out) ) then + perf_single_file_out = def_perf_single_file + endif + if ( present(perf_global_stats_out) ) then + perf_global_stats_out = def_perf_global_stats + endif + if ( present(perf_papi_enable_out) ) then + perf_papi_enable_out = def_perf_papi_enable + endif + if ( present(perf_ovhd_measurement_out) ) then + perf_ovhd_measurement_out = def_perf_ovhd_measurement + endif + if ( present(perf_add_detail_out) ) then + perf_add_detail_out = def_perf_add_detail + endif +! + return + end subroutine perf_defaultopts +! +!======================================================================== +! + subroutine perf_setopts(mastertask, & + LogPrint, & + timing_disable_in, & + perf_timer_in, & + timer_depth_limit_in, & + timing_detail_limit_in, & + timing_barrier_in, & + perf_outpe_num_in, & + perf_outpe_stride_in, & + perf_single_file_in, & + perf_global_stats_in, & + perf_papi_enable_in, & + perf_ovhd_measurement_in, & + perf_add_detail_in ) +!----------------------------------------------------------------------- +! Purpose: Set runtime options +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments---------------------------- +! + ! master process? + logical, intent(in) :: mastertask + ! Print out to log file? + logical, intent(IN) :: LogPrint + ! timers disable/enable option + logical, intent(in), optional :: timing_disable_in + ! performance timer option + integer, intent(in), optional :: perf_timer_in + ! timer depth limit option + integer, intent(in), optional :: timer_depth_limit_in + ! timer detail limit option + integer, intent(in), optional :: timing_detail_limit_in + ! timing barrier enable/disable option + logical, intent(in), optional :: timing_barrier_in + ! number of processes writing out timing data + integer, intent(in), optional :: perf_outpe_num_in + ! separation between process ids for processes that are writing out timing data + integer, intent(in), optional :: perf_outpe_stride_in + ! timing single / multple output file option + logical, intent(in), optional :: perf_single_file_in + ! collect and output global performance statistics option + logical, intent(in), optional :: perf_global_stats_in + ! calling PAPI to read HW performance counters option + logical, intent(in), optional :: perf_papi_enable_in + ! measure overhead of profiling directly + logical, intent(in), optional :: perf_ovhd_measurement_in + ! 'suffix' timer name with current detail level + logical, intent(in), optional :: perf_add_detail_in +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! error return +!----------------------------------------------------------------------- + if ( .not. timing_initialized ) then + + if ( present(timing_disable_in) ) then + timing_disable = timing_disable_in + if (timing_disable) then + ierr = GPTLdisable() + else + ierr = GPTLenable() + endif + endif + if ( present(perf_timer_in) ) then + if ((perf_timer_in .eq. GPTLgettimeofday) .or. & + (perf_timer_in .eq. GPTLnanotime) .or. & + (perf_timer_in .eq. GPTLread_real_time) .or. & + (perf_timer_in .eq. GPTLmpiwtime) .or. & + (perf_timer_in .eq. GPTLclockgettime) .or. & + (perf_timer_in .eq. GPTLpapitime)) then + perf_timer = perf_timer_in + else + if (mastertask) then + write(p_logunit,*) 'PERF_SETOPTS: illegal timer requested=',& + perf_timer_in, '. Request ignored.' + endif + endif + endif + if ( present(timer_depth_limit_in) ) then + timer_depth_limit = timer_depth_limit_in + endif + if ( present(timing_detail_limit_in) ) then + timing_detail_limit = timing_detail_limit_in + endif + if ( present(timing_barrier_in) ) then + timing_barrier = timing_barrier_in + endif + if ( present(perf_outpe_num_in) ) then + perf_outpe_num = perf_outpe_num_in + endif + if ( present(perf_outpe_stride_in) ) then + perf_outpe_stride = perf_outpe_stride_in + endif + if ( present(perf_single_file_in) ) then + perf_single_file = perf_single_file_in + endif + if ( present(perf_global_stats_in) ) then + perf_global_stats = perf_global_stats_in + endif + if ( present(perf_papi_enable_in) ) then +#ifdef HAVE_PAPI + perf_papi_enable = perf_papi_enable_in +#else + if (perf_papi_enable_in) then + if (mastertask) then + write(p_logunit,*) 'PERF_SETOPTS: PAPI library not linked in. ',& + 'Request to enable PAPI ignored.' + endif + endif + perf_papi_enable = .false. +#endif + endif + if ( present(perf_ovhd_measurement_in) ) then + perf_ovhd_measurement = perf_ovhd_measurement_in + endif + if ( present(perf_add_detail_in) ) then + perf_add_detail = perf_add_detail_in + endif +! + if (mastertask .and. LogPrint) then + write(p_logunit,*) '(t_initf) Using profile_disable= ', timing_disable + write(p_logunit,*) '(t_initf) profile_timer= ', perf_timer + write(p_logunit,*) '(t_initf) profile_depth_limit= ', timer_depth_limit + write(p_logunit,*) '(t_initf) profile_detail_limit= ', timing_detail_limit + write(p_logunit,*) '(t_initf) profile_barrier= ', timing_barrier + write(p_logunit,*) '(t_initf) profile_outpe_num= ', perf_outpe_num + write(p_logunit,*) '(t_initf) profile_outpe_stride= ', perf_outpe_stride + write(p_logunit,*) '(t_initf) profile_single_file= ', perf_single_file + write(p_logunit,*) '(t_initf) profile_global_stats= ', perf_global_stats + write(p_logunit,*) '(t_initf) profile_ovhd_measurement=', perf_ovhd_measurement + write(p_logunit,*) '(t_initf) profile_add_detail= ', perf_add_detail + write(p_logunit,*) '(t_initf) profile_papi_enable= ', perf_papi_enable + endif +! +#ifdef DEBUG + else + write(p_logunit,*) 'PERF_SETOPTS: timing library already initialized. Request ignored.' +#endif + endif +! + return + end subroutine perf_setopts + +! +!======================================================================== +! + subroutine papi_defaultopts(papi_ctr1_out, & + papi_ctr2_out, & + papi_ctr3_out, & + papi_ctr4_out ) +!----------------------------------------------------------------------- +! Purpose: Return default runtime PAPI counter options +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- + ! PAPI counter option #1 + integer, intent(out), optional :: papi_ctr1_out + ! PAPI counter option #2 + integer, intent(out), optional :: papi_ctr2_out + ! PAPI counter option #3 + integer, intent(out), optional :: papi_ctr3_out + ! PAPI counter option #4 + integer, intent(out), optional :: papi_ctr4_out +!----------------------------------------------------------------------- + if ( present(papi_ctr1_out) ) then + papi_ctr1_out = def_papi_ctr1 + endif + if ( present(papi_ctr2_out) ) then + papi_ctr2_out = def_papi_ctr2 + endif + if ( present(papi_ctr3_out) ) then + papi_ctr3_out = def_papi_ctr3 + endif + if ( present(papi_ctr4_out) ) then + papi_ctr4_out = def_papi_ctr4 + endif +! + return + end subroutine papi_defaultopts +! +!======================================================================== +! + subroutine papi_setopts(papi_ctr1_in, & + papi_ctr2_in, & + papi_ctr3_in, & + papi_ctr4_in ) +!----------------------------------------------------------------------- +! Purpose: Set runtime PAPI counter options +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments---------------------------- +! + ! performance counter option + integer, intent(in), optional :: papi_ctr1_in + ! performance counter option + integer, intent(in), optional :: papi_ctr2_in + ! performance counter option + integer, intent(in), optional :: papi_ctr3_in + ! performance counter option + integer, intent(in), optional :: papi_ctr4_in +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! error return +!----------------------------------------------------------------------- + if ( .not. timing_initialized ) then + + if ( present(papi_ctr1_in) ) then + papi_ctr1 = papi_ctr1_in + endif + if ( present(papi_ctr2_in) ) then + papi_ctr2 = papi_ctr2_in + endif + if ( present(papi_ctr3_in) ) then + papi_ctr3 = papi_ctr3_in + endif + if ( present(papi_ctr4_in) ) then + papi_ctr4 = papi_ctr4_in + endif +! +#ifdef DEBUG + else + write(p_logunit,*) 'PAPI_SETOPTS: timing library already initialized. Request ignored.' +#endif + endif +! + return + end subroutine papi_setopts +! +!======================================================================== +! + logical function t_profile_onf() +!----------------------------------------------------------------------- +! Purpose: Return flag indicating whether profiling is currently active. +! Part of workaround to implement FVbarrierclock before +! communicators exposed in Pilgrim. Does not check level of +! event nesting. +! Author: P. Worley +!----------------------------------------------------------------------- + + if ((.not. timing_initialized) .or. & + (timing_disable_depth > 0)) then + t_profile_onf = .false. + else + t_profile_onf = .true. + endif + + end function t_profile_onf +! +!======================================================================== +! + logical function t_barrier_onf() +!----------------------------------------------------------------------- +! Purpose: Return timing_barrier. Part of workaround to implement +! FVbarrierclock before communicators exposed in Pilgrim. +! Author: P. Worley +!----------------------------------------------------------------------- + + t_barrier_onf = timing_barrier + + end function t_barrier_onf +! +!======================================================================== +! + logical function t_single_filef() +!----------------------------------------------------------------------- +! Purpose: Return perf_single_file. Used to control output of other +! performance data, only spmdstats currently. +! Author: P. Worley +!----------------------------------------------------------------------- + + t_single_filef = perf_single_file + + end function t_single_filef +! +!======================================================================== +! + subroutine t_set_prefixf(prefix_string) +!----------------------------------------------------------------------- +! Purpose: Set prefix for subsequent time event names. +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + ! performance timer event name prefix + character(len=*), intent(in) :: prefix_string +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return + integer i ! loop index +! +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + + ierr = GPTLprefix_set(trim(prefix_string)) + + end subroutine t_set_prefixf +! +!======================================================================== +! + subroutine t_unset_prefixf() +!----------------------------------------------------------------------- +! Purpose: Unset prefix for subsequent time event names. +! Ignored in threaded regions. +! Author: P. Worley +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return +! +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + + ierr = GPTLprefix_unset() + + end subroutine t_unset_prefixf +! +!======================================================================== +! + subroutine t_stampf(wall, usr, sys) +!----------------------------------------------------------------------- +! Purpose: Record wallclock, user, and system times (seconds). +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Output arguments----------------------------- +! + real(shr_kind_r8), intent(out) :: wall ! wallclock time + real(shr_kind_r8), intent(out) :: usr ! user time + real(shr_kind_r8), intent(out) :: sys ! system time +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return +! +!----------------------------------------------------------------------- +! + if ((.not. timing_initialized) .or. & + (timing_disable_depth > 0)) then + wall = 0.0 + usr = 0.0 + sys = 0.0 + else + ierr = GPTLstamp(wall, usr, sys) + endif + + return + end subroutine t_stampf +! +!======================================================================== +! + subroutine t_startf(event, handle) +!----------------------------------------------------------------------- +! Purpose: Start an event timer +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + ! performance timer event name + character(len=*), intent(in) :: event +! +!---------------------------Input/Output arguments---------------------- +! + ! GPTL event handle + integer, optional :: handle +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return + integer str_length, i ! support for adding + ! detail suffix + character(len=2) cdetail ! char variable for detail + real(shr_kind_r8) ovhd_start, ovhd_stop, usr, sys + ! for overhead calculation +! +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + if (timing_disable_depth > 0) return +#ifdef NUOPC_INTERFACE +#if ( defined _OPENMP ) + if (omp_in_parallel()) return +#endif + cur_timing_depth = cur_timing_depth + 1 + if(cur_timing_depth > timer_depth_limit) return +#ifdef DEBUG +! print *, 'start timer ',trim(event), cur_timing_depth, timer_depth_limit +#endif +#endif + +!$OMP MASTER + if (perf_ovhd_measurement) then +#ifdef HAVE_MPI + ovhd_start = mpi_wtime() +#else + usr = 0.0 + sys = 0.0 + ierr = GPTLstamp(ovhd_start, usr, sys) +#endif + perf_timing_ovhd = perf_timing_ovhd - ovhd_start + endif +#ifndef NUOPC_INTERFACE +!$OMP END MASTER +#endif + if ((perf_add_detail) .AND. (cur_timing_detail < 100)) then + write(cdetail,'(i2.2)') cur_timing_detail + str_length = min(SHR_KIND_CM-3,len_trim(event)) + TIMERSTART(event(1:str_length)//'_'//cdetail) + else + str_length = min(SHR_KIND_CM,len_trim(event)) + TIMERSTART(event(1:str_length)) + endif +#ifndef NUOPC_INTERFACE +!$OMP MASTER +#endif + if (perf_ovhd_measurement) then +#ifdef HAVE_MPI + ovhd_stop = mpi_wtime() +#else + ierr = GPTLstamp(ovhd_stop, usr, sys) +#endif + perf_timing_ovhd = perf_timing_ovhd + ovhd_stop + endif +!$OMP END MASTER + return + end subroutine t_startf +! +!======================================================================== +! + subroutine t_stopf(event, handle) +!----------------------------------------------------------------------- +! Purpose: Stop an event timer +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + ! performance timer event name + character(len=*), intent(in) :: event +! +!---------------------------Input/Output arguments---------------------- +! + ! GPTL event handle + integer, optional :: handle +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return + integer str_length, i ! support for adding + ! detail suffix + character(len=2) cdetail ! char variable for detail + real(shr_kind_r8) ovhd_start, ovhd_stop, usr, sys + ! for overhead calculation +! +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + if (timing_disable_depth > 0) return +#ifdef NUOPC_INTERFACE +#if ( defined _OPENMP ) + if (omp_in_parallel()) return +#endif +#endif +!$OMP MASTER + if (perf_ovhd_measurement) then +#ifdef HAVE_MPI + ovhd_start = mpi_wtime() +#else + usr = 0.0 + sys = 0.0 + ierr = GPTLstamp(ovhd_start, usr, sys) +#endif + perf_timing_ovhd = perf_timing_ovhd - ovhd_start + endif +#ifdef NUOPC_INTERFACE + cur_timing_depth = cur_timing_depth - 1 + if(cur_timing_depth < timer_depth_limit) then +#else +!$OMP END MASTER +#endif + if ((perf_add_detail) .AND. (cur_timing_detail < 100)) then + write(cdetail,'(i2.2)') cur_timing_detail + str_length = min(SHR_KIND_CM-3,len_trim(event)) + TIMERSTOP(event(1:str_length)//'_'//cdetail) + else + str_length = min(SHR_KIND_CM,len_trim(event)) + TIMERSTOP(event(1:str_length)) + endif +#ifndef NUOPC_INTERFACE +!$OMP MASTER +#endif + if (perf_ovhd_measurement) then +#ifdef HAVE_MPI + ovhd_stop = mpi_wtime() +#else + ierr = GPTLstamp(ovhd_stop, usr, sys) +#endif + perf_timing_ovhd = perf_timing_ovhd + ovhd_stop + endif +#ifdef NUOPC_INTERFACE + endif +#endif +!$OMP END MASTER + return + end subroutine t_stopf +! +!======================================================================== +! + subroutine t_startstop_valsf(event, walltime, callcount, handle) +!----------------------------------------------------------------------- +! Purpose: Create/add walltime and call count to an event timer +! Author: P. Worley (based on J. Rosinski GPTL routine) +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + ! performance timer event name + character(len=*), intent(in) :: event + ! walltime (seconds) associated with this start/stop pair + ! If not set, default is 0.0 . If < 0.0, set to 0.0 . + real(shr_kind_r8), intent(in), optional :: walltime + ! call count associated with this start/stop pair + ! If not set, default is 1. If < 0, set to 0. + integer, intent(in), optional :: callcount +! +!---------------------------Input/Output arguments---------------------- +! + ! GPTL event handle + integer, optional :: handle +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return + + integer str_length, i ! support for adding + ! detail suffix + character(len=2) cdetail ! char variable for detail + integer callcnt ! call count increment + real(shr_kind_r8) wtime ! walltime increment (seconds) + real(shr_kind_r8) ovhd_start, ovhd_stop, usr, sys + ! for overhead calculation +! +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + if (timing_disable_depth > 0) return + +!$OMP MASTER + if (perf_ovhd_measurement) then +#ifdef HAVE_MPI + ovhd_start = mpi_wtime() +#else + usr = 0.0 + sys = 0.0 + ierr = GPTLstamp(ovhd_start, usr, sys) +#endif + perf_timing_ovhd = perf_timing_ovhd - ovhd_start + endif +!$OMP END MASTER + + wtime = 0.0_shr_kind_r8 + if ( present(walltime) ) then + if (walltime > 0.0) then + wtime = walltime + endif + endif + + callcnt = 1 + if ( present(callcount) ) then + if (callcount > 0) then + callcnt = callcount + else + callcnt = 0 + endif + endif + + if ((perf_add_detail) .AND. (cur_timing_detail < 100)) then + + write(cdetail,'(i2.2)') cur_timing_detail + str_length = min(SHR_KIND_CM-3,len_trim(event)) + ierr = GPTLstartstop_vals( & + event(1:str_length)//'_'//cdetail, wtime, callcnt) + + else + + str_length = min(SHR_KIND_CM,len_trim(event)) + ierr = GPTLstartstop_vals(trim(event), wtime, callcnt) + + endif + +!$OMP MASTER + if (perf_ovhd_measurement) then +#ifdef HAVE_MPI + ovhd_stop = mpi_wtime() +#else + ierr = GPTLstamp(ovhd_stop, usr, sys) +#endif + perf_timing_ovhd = perf_timing_ovhd + ovhd_stop + endif +!$OMP END MASTER + return + end subroutine t_startstop_valsf +! +!======================================================================== +! + subroutine t_enablef() +!----------------------------------------------------------------------- +! Purpose: Enable t_startf, t_stopf, t_stampf, and t_barrierf. Ignored +! in threaded regions. +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return +! +!---------------------------Externals----------------------------------- +! + +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + +#if ( defined _OPENMP ) + if (omp_in_parallel()) return +#endif + + if (timing_disable_depth > 0) then + if (timing_disable_depth .eq. 1) then + ierr = GPTLenable() + endif + timing_disable_depth = timing_disable_depth - 1 + endif + + return + end subroutine t_enablef +! +!======================================================================== +! + subroutine t_disablef() +!----------------------------------------------------------------------- +! Purpose: Disable t_startf, t_stopf, t_stampf, and t_barrierf. Ignored +! in threaded regions. +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return +! +!---------------------------Externals----------------------------------- +! +#if ( defined _OPENMP ) + logical omp_in_parallel + external omp_in_parallel +#endif +! +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + +#if ( defined _OPENMP ) + if (omp_in_parallel()) return +#endif + + if (timing_disable_depth .eq. 0) then + ierr = GPTLdisable() + endif + timing_disable_depth = timing_disable_depth + 1 + + return + end subroutine t_disablef +! +!======================================================================== +! + subroutine t_adj_detailf(detail_adjustment) +!----------------------------------------------------------------------- +! Purpose: Modify current detail level. Ignored in threaded regions. +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + integer, intent(in) :: detail_adjustment ! user defined increase or + ! decrease in detail level +! +!---------------------------Externals----------------------------------- +! +#if ( defined _OPENMP ) + logical omp_in_parallel + external omp_in_parallel +#endif +! +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + +#if ( defined _OPENMP ) + if (omp_in_parallel()) return +#endif + +! using disable/enable to implement timing_detail logic so also control +! direct GPTL calls (such as occur in Trilinos library) + if ((cur_timing_detail <= timing_detail_limit) .and. & + (cur_timing_detail + detail_adjustment > timing_detail_limit)) then + call t_disablef() + elseif ((cur_timing_detail > timing_detail_limit) .and. & + (cur_timing_detail + detail_adjustment <= timing_detail_limit)) then + call t_enablef() + endif + + cur_timing_detail = cur_timing_detail + detail_adjustment + + return + end subroutine t_adj_detailf +! +!======================================================================== +! + subroutine t_barrierf(event, mpicom) +!----------------------------------------------------------------------- +! Purpose: Call (and time) mpi_barrier. Ignored inside OpenMP +! threaded regions. Note that barrier executed even if +! event not recorded because of level of timer event nesting. +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- + ! mpi communicator id + integer, intent(in), optional :: mpicom + ! performance timer event name + character(len=*), intent(in), optional :: event +! +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return +! +!---------------------------Externals----------------------------------- +! +#if ( defined _OPENMP ) + logical omp_in_parallel + external omp_in_parallel +#endif +! +!----------------------------------------------------------------------- +! + if (timing_barrier) then + +#if ( defined _OPENMP ) + if (omp_in_parallel()) return +#endif + if (.not. timing_initialized) return + if (timing_disable_depth > 0) return + + if ( present (event) ) then + call t_startf(event) + endif + + if ( present (mpicom) ) then + call shr_mpi_barrier(mpicom, 'T_BARRIERF: bad mpi communicator') + else + call shr_mpi_barrier(MPI_COMM_WORLD, 'T_BARRIERF: bad mpi communicator') + endif + + if ( present (event) ) then + call t_stopf(event) + endif + + endif + + return + end subroutine t_barrierf +! +!======================================================================== +! + subroutine t_prf(filename, mpicom, num_outpe, stride_outpe, & + single_file, global_stats, output_thispe) +!----------------------------------------------------------------------- +! Purpose: Write out performance timer data +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + ! performance timer output file name + character(len=*), intent(in), optional :: filename + ! mpi communicator id + integer, intent(in), optional :: mpicom + ! maximum number of processes writing out timing data + integer, intent(in), optional :: num_outpe + ! separation between process ids for processes writing out data + integer, intent(in), optional :: stride_outpe + ! enable/disable the writing of data to a single file + logical, intent(in), optional :: single_file + ! enable/disable the collection of global statistics + logical, intent(in), optional :: global_stats + ! output timing data for this process + logical, intent(in), optional :: output_thispe +! +!---------------------------Local workspace----------------------------- +! + logical one_file ! flag indicting whether to write + ! all data to a single file + logical glb_stats ! flag indicting whether to compute + ! global statistics + logical pr_write ! flag indicating whether the current + ! GPTL output mode is write + logical write_data ! flag indicating whether this process + ! should output its timing data + integer i ! loop index + integer mpicom2 ! local copy of MPI communicator + integer me ! communicator local process id + integer npes ! local communicator group size + integer gme ! global process id + integer ierr ! MPI error return + integer outpe_num ! max number of processes writing out + ! timing data (excluding output_thispe) + integer outpe_stride ! separation between process ids for + ! processes writing out timing data + integer max_outpe ! max process id for processes + ! writing out timing data + integer signal ! send/recv variable for single + ! output file logic + integer str_length ! string length + integer unitn ! file unit number + integer cme_adj ! length of filename suffix + integer status (MPI_STATUS_SIZE) ! Status of message + character(len=7) cme ! string representation of process id + character(len=SHR_KIND_CX+14) fname ! timing output filename +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return +#ifdef NUOPC_INTERFACE + return +#endif + + call t_startf("t_prf") +!$OMP MASTER + call mpi_comm_rank(MPI_COMM_WORLD, gme, ierr) + if ( present(mpicom) ) then + mpicom2 = mpicom + call mpi_comm_size(mpicom2, npes, ierr) + if (ierr .eq. MPI_ERR_COMM) then + call shr_sys_abort('T_PRF: bad mpi communicator') + endif + call mpi_comm_rank(mpicom2, me, ierr) + else + call mpi_comm_size(MPI_COMM_WORLD, npes, ierr) + mpicom2 = MPI_COMM_WORLD + me = gme + endif + + do i=1,SHR_KIND_CX+14 + fname(i:i) = " " + enddo + + unitn = shr_log_getUnit() + + ! determine what the current output mode is (append or write) + if (GPTLprint_mode_query() == GPTLprint_write) then + pr_write = .true. + ierr = GPTLprint_mode_set(GPTLprint_append) + else + pr_write = .false. + endif + + ! Determine whether to write all data to a single fie + if (present(single_file)) then + one_file = single_file + else + one_file = perf_single_file + endif + + ! Determine whether to compute global statistics + if (present(global_stats)) then + glb_stats = global_stats + else + glb_stats = perf_global_stats + endif + + ! Determine which processes are writing out timing data + write_data = .false. + + if (present(num_outpe)) then + if (num_outpe < 0) then + outpe_num = npes + else + outpe_num = num_outpe + endif + else + if (perf_outpe_num < 0) then + outpe_num = npes + else + outpe_num = perf_outpe_num + endif + endif + + if (present(stride_outpe)) then + if (stride_outpe < 1) then + outpe_stride = 1 + else + outpe_stride = stride_outpe + endif + else + if (perf_outpe_stride < 1) then + outpe_stride = 1 + else + outpe_stride = perf_outpe_stride + endif + endif + + max_outpe = min(outpe_num*outpe_stride, npes) - 1 + + if ((mod(me, outpe_stride) .eq. 0) .and. (me .le. max_outpe)) & + write_data = .true. + + if (present(output_thispe)) then + write_data = output_thispe + endif + + ! If a single timing output file, take turns writing to it. + if (one_file) then + + if ( present(filename) ) then + str_length = min(SHR_KIND_CX,len_trim(filename)) + fname(1:str_length) = filename(1:str_length) + else + fname(1:10) = "timing_all" + endif + + signal = 0 + if (me .eq. 0) then + + if (glb_stats) then + open( unitn, file=trim(fname), status='UNKNOWN', access='SEQUENTIAL' ) + write( unitn, 100) npes + 100 format(/,"***** GLOBAL STATISTICS (",I6," MPI TASKS) *****",/) + close( unitn ) + + ierr = GPTLpr_summary_file(mpicom2, trim(fname)) + endif + + if (write_data) then + if (glb_stats) then + open( unitn, file=trim(fname), status='OLD', access='SEQUENTIAL', position='APPEND' ) + else + open( unitn, file=trim(fname), status='UNKNOWN', access='SEQUENTIAL' ) + endif + + if (perf_ovhd_measurement) then + write( unitn, 101) me, gme + 101 format(/,"************ PROCESS ",I6," (",I6,") ************") + write( unitn, 102) perf_timing_ovhd + 102 format("** TIMING OVERHEAD ",E20.10," SECONDS *",/) + else + write( unitn, 103) me, gme + 103 format(/,"************ PROCESS ",I6," (",I6,") ************",/) + endif + + close( unitn ) + + ierr = GPTLpr_file(trim(fname)) + endif + + else + + if (glb_stats) then + ierr = GPTLpr_summary_file(mpicom2, trim(fname)) + endif + + call mpi_recv (signal, 1, mpi_integer, me-1, me-1, mpicom2, status, ierr) + if (ierr /= mpi_success) then + write(p_logunit,*) 'T_PRF: mpi_recv failed ierr=',ierr + call shr_sys_abort() + end if + + if (write_data) then + open( unitn, file=trim(fname), status='OLD', access='SEQUENTIAL', position='APPEND' ) + if (perf_ovhd_measurement) then + write( unitn, 101) me, gme + write( unitn, 102) perf_timing_ovhd + else + write( unitn, 103) me, gme + endif + close( unitn ) + + ierr = GPTLpr_file(trim(fname)) + endif + + endif + + if (me+1 < npes) & + call mpi_send (signal, 1, mpi_integer, me+1, me, mpicom2, ierr) + + else + + if (glb_stats) then + if ( present(filename) ) then + str_length = min(SHR_KIND_CX-6,len_trim(filename)) + fname(1:str_length) = filename(1:str_length) + else + str_length = 6 + fname(1:10) = "timing" + endif + fname(str_length+1:str_length+6) = '_stats' + + if (me .eq. 0) then + open( unitn, file=trim(fname), status='UNKNOWN', access='SEQUENTIAL' ) + write( unitn, 100) npes + close( unitn ) + endif + + ierr = GPTLpr_summary_file(mpicom2, trim(fname)) + fname(str_length+1:str_length+6) = ' ' + endif + + if (write_data) then + if (npes .le. 10) then + write(cme,'(i1.1)') me + cme_adj = 2 + elseif (npes .le. 100) then + write(cme,'(i2.2)') me + cme_adj = 3 + elseif (npes .le. 1000) then + write(cme,'(i3.3)') me + cme_adj = 4 + elseif (npes .le. 10000) then + write(cme,'(i4.4)') me + cme_adj = 5 + elseif (npes .le. 100000) then + write(cme,'(i5.5)') me + cme_adj = 6 + else + write(cme,'(i6.6)') me + cme_adj = 7 + endif + + if ( present(filename) ) then + str_length = min(SHR_KIND_CX-cme_adj,len_trim(filename)) + fname(1:str_length) = filename(1:str_length) + else + str_length = 6 + fname(1:10) = "timing" + endif + fname(str_length+1:str_length+1) = '.' + fname(str_length+2:str_length+cme_adj) = cme + + open( unitn, file=trim(fname), status='UNKNOWN', access='SEQUENTIAL' ) + if (perf_ovhd_measurement) then + write( unitn, 101) me, gme + write( unitn, 102) perf_timing_ovhd + else + write( unitn, 103) me, gme + endif + close( unitn ) + + ierr = GPTLpr_file(trim(fname)) + endif + + endif + + call shr_log_freeUnit( unitn ) + + ! reset GPTL output mode + if (pr_write) then + ierr = GPTLprint_mode_set(GPTLprint_write) + endif + +!$OMP END MASTER + call t_stopf("t_prf") + + return + end subroutine t_prf +! +!======================================================================== +! + subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask, & + MaxThreads) +!----------------------------------------------------------------------- +! Purpose: Set default values of runtime timing options +! before namelists prof_inparm and papi_inparm are read, +! read namelists (and broadcast, if SPMD), +! then initialize timing library. +! Author: P. Worley (based on shr_inputinfo_mod and runtime_opts) +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + character(len=*), intent(IN) :: NLFilename ! Name-list filename + logical, optional, intent(IN) :: LogPrint ! If print out to log file + integer, optional, intent(IN) :: LogUnit ! Unit number for log output + integer, optional, intent(IN) :: mpicom ! MPI communicator + logical, optional, intent(IN) :: MasterTask ! If MPI master task + integer, optional, intent(IN) :: MaxThreads ! maximum number of threads + ! used by components +! +!---------------------------Local workspace----------------------------- +! + character(len=*), parameter :: subname = '(T_INITF) ' + logical :: MasterTask2 ! If MPI master task + logical :: LogPrint2 ! If print to log + + integer me ! communicator local process id + integer ierr ! error return + integer unitn ! file unit number + integer papi_ctr1_id ! PAPI counter id + integer papi_ctr2_id ! PAPI counter id + integer papi_ctr3_id ! PAPI counter id + integer papi_ctr4_id ! PAPI counter id +! +!---------------------------Namelists ---------------------------------- +! + logical profile_disable + logical profile_barrier + logical profile_single_file + logical profile_global_stats + integer profile_depth_limit + integer profile_detail_limit + integer profile_outpe_num + integer profile_outpe_stride + integer profile_timer + logical profile_papi_enable + logical profile_ovhd_measurement + logical profile_add_detail + namelist /prof_inparm/ profile_disable, profile_barrier, & + profile_single_file, profile_global_stats, & + profile_depth_limit, & + profile_detail_limit, profile_outpe_num, & + profile_outpe_stride, profile_timer, & + profile_papi_enable, profile_ovhd_measurement, & + profile_add_detail + + character(len=16) papi_ctr1_str + character(len=16) papi_ctr2_str + character(len=16) papi_ctr3_str + character(len=16) papi_ctr4_str + namelist /papi_inparm/ papi_ctr1_str, papi_ctr2_str, & + papi_ctr3_str, papi_ctr4_str +! +!---------------------------Externals----------------------------------- +! +#if ( defined _OPENMP ) + integer omp_get_max_threads + external omp_get_max_threads +#endif +!----------------------------------------------------------------------- + if ( timing_initialized ) then +#ifdef DEBUG + write(p_logunit,*) 'T_INITF: timing library already initialized. Request ignored.' +#endif + return + endif + +!$OMP MASTER + if ( present(MaxThreads) ) then + num_threads = MaxThreads + else +#ifdef _OPENMP +!$omp parallel + num_threads = omp_get_max_threads() +!$omp end parallel +#else + num_threads = 1 +#endif + endif + + if ( present(LogUnit) ) then + call t_setLogUnit(LogUnit) + else + call t_setLogUnit(def_p_logunit) + endif + + if ( present(MasterTask) .and. present(mpicom) )then + call mpi_comm_rank(mpicom, me, ierr) + if (ierr .eq. MPI_ERR_COMM) then + call shr_sys_abort('T_INITF: bad mpi communicator') + endif + if (me .eq. 0) then + MasterTask2 = .true. + else + MasterTask2 = .false. + endif + else + MasterTask2 = .true. + end if + + if ( present(LogPrint) ) then + LogPrint2 = LogPrint + else + LogPrint2 = .true. + endif + + ! Set PERF defaults, then override with user-specified input + call perf_defaultopts(timing_disable_out=profile_disable, & + perf_timer_out=profile_timer, & + timer_depth_limit_out=profile_depth_limit, & + timing_detail_limit_out=profile_detail_limit, & + timing_barrier_out=profile_barrier, & + perf_outpe_num_out = profile_outpe_num, & + perf_outpe_stride_out = profile_outpe_stride, & + perf_single_file_out=profile_single_file, & + perf_global_stats_out=profile_global_stats, & + perf_papi_enable_out=profile_papi_enable, & + perf_ovhd_measurement_out=profile_ovhd_measurement, & + perf_add_detail_out=profile_add_detail ) + if ( MasterTask2 ) then + + ! Read in the prof_inparm namelist from NLFilename if it exists + + write(p_logunit,*) '(t_initf) Read in prof_inparm namelist from: '//trim(NLFilename) + unitn = shr_log_getUnit() + + ierr = 1 + open( unitn, file=trim(NLFilename), status="OLD", form="FORMATTED", access="SEQUENTIAL", iostat=ierr ) + if (ierr .eq. 0) then + + ! Look for prof_inparm group name in the input file. + ! If found, leave the file positioned at that namelist group. + call find_group_name(unitn, 'prof_inparm', status=ierr) + + if (ierr == 0) then ! found prof_inparm + read(unitn, nml=prof_inparm, iostat=ierr) + if (ierr /= 0) then + call shr_sys_abort( subname//':: namelist read returns an'// & + ' error condition for prof_inparm' ) + end if + end if + + close(unitn) + + endif + call shr_log_freeUnit( unitn ) + + endif + + ! This logic assumes that there will be only one MasterTask + ! per communicator, and that this MasterTask is process 0. + if ( present(MasterTask) .and. present(mpicom) )then + call shr_mpi_bcast( profile_disable, MPICom ) + call shr_mpi_bcast( profile_barrier, MPICom ) + call shr_mpi_bcast( profile_single_file, MPICom ) + call shr_mpi_bcast( profile_global_stats, MPICom ) + call shr_mpi_bcast( profile_papi_enable, MPICom ) + call shr_mpi_bcast( profile_ovhd_measurement, MPICom ) + call shr_mpi_bcast( profile_add_detail, MPICom ) + call shr_mpi_bcast( profile_depth_limit, MPICom ) + call shr_mpi_bcast( profile_detail_limit, MPICom ) + call shr_mpi_bcast( profile_outpe_num, MPICom ) + call shr_mpi_bcast( profile_outpe_stride, MPICom ) + call shr_mpi_bcast( profile_timer, MPICom ) + end if + call perf_setopts (MasterTask2, LogPrint2, & + timing_disable_in=profile_disable, & + perf_timer_in=profile_timer, & + timer_depth_limit_in=profile_depth_limit, & + timing_detail_limit_in=profile_detail_limit, & + timing_barrier_in=profile_barrier, & + perf_outpe_num_in=profile_outpe_num, & + perf_outpe_stride_in=profile_outpe_stride, & + perf_single_file_in=profile_single_file, & + perf_global_stats_in=profile_global_stats, & + perf_papi_enable_in=profile_papi_enable, & + perf_ovhd_measurement_in=profile_ovhd_measurement, & + perf_add_detail_in=profile_add_detail ) + + ! Set PAPI defaults, then override with user-specified input + if (perf_papi_enable) then + call papi_defaultopts(papi_ctr1_out=papi_ctr1_id, & + papi_ctr2_out=papi_ctr2_id, & + papi_ctr3_out=papi_ctr3_id, & + papi_ctr4_out=papi_ctr4_id ) + + if ( MasterTask2 ) then + papi_ctr1_str = "PAPI_NO_CTR" + papi_ctr2_str = "PAPI_NO_CTR" + papi_ctr3_str = "PAPI_NO_CTR" + papi_ctr4_str = "PAPI_NO_CTR" + + + ! Read in the papi_inparm namelist from NLFilename if it exists + + write(p_logunit,*) '(t_initf) Read in papi_inparm namelist from: '//trim(NLFilename) + unitn = shr_log_getUnit() + + ierr = 1 + open( unitn, file=trim(NLFilename), status="OLD", form="FORMATTED", access="SEQUENTIAL", iostat=ierr ) + if (ierr .eq. 0) then + ! Look for papi_inparm group name in the input file. + ! If found, leave the file positioned at that namelist group. + call find_group_name(unitn, 'papi_inparm', status=ierr) + + if (ierr == 0) then ! found papi_inparm + read(unitn, nml=papi_inparm, iostat=ierr) + if (ierr /= 0) then + call shr_sys_abort( subname//':: namelist read returns an'// & + ' error condition for papi_inparm' ) + end if + end if + + close(unitn) + + endif + call shr_log_freeUnit( unitn ) + + ! if enabled and nothing set, use "defaults" + if ((papi_ctr1_str(1:11) .eq. "PAPI_NO_CTR") .and. & + (papi_ctr2_str(1:11) .eq. "PAPI_NO_CTR") .and. & + (papi_ctr3_str(1:11) .eq. "PAPI_NO_CTR") .and. & + (papi_ctr4_str(1:11) .eq. "PAPI_NO_CTR")) then + papi_ctr1_str = "PAPI_FP_OPS" + endif + + if (papi_ctr1_str(1:11) /= "PAPI_NO_CTR") then + ierr = gptlevent_name_to_code(trim(papi_ctr1_str), papi_ctr1_id) + endif + if (papi_ctr2_str(1:11) /= "PAPI_NO_CTR") then + ierr = gptlevent_name_to_code(trim(papi_ctr2_str), papi_ctr2_id) + endif + if (papi_ctr3_str(1:11) /= "PAPI_NO_CTR") then + ierr = gptlevent_name_to_code(trim(papi_ctr3_str), papi_ctr3_id) + endif + if (papi_ctr4_str(1:11) /= "PAPI_NO_CTR") then + ierr = gptlevent_name_to_code(trim(papi_ctr4_str), papi_ctr4_id) + endif + + endif + ! This logic assumes that there will be only one MasterTask + ! per communicator, and that this MasterTask is process 0. + if ( present(MasterTask) .and. present(mpicom) )then + call shr_mpi_bcast( papi_ctr1_id, MPICom ) + call shr_mpi_bcast( papi_ctr2_id, MPICom ) + call shr_mpi_bcast( papi_ctr3_id, MPICom ) + call shr_mpi_bcast( papi_ctr4_id, MPICom ) + end if + + call papi_setopts (papi_ctr1_in=papi_ctr1_id, & + papi_ctr2_in=papi_ctr2_id, & + papi_ctr3_in=papi_ctr3_id, & + papi_ctr4_in=papi_ctr4_id ) + endif +!$OMP END MASTER +!$OMP BARRIER + + if (timing_disable) return + +!$OMP MASTER + ! + ! Set options and initialize timing library. + ! + ! Set timer + if (gptlsetutr (perf_timer) < 0) call shr_sys_abort (subname//':: gptlsetutr') + ! + ! For logical settings, 2nd arg 0 + ! to gptlsetoption means disable, non-zero means enable + ! + ! Turn off CPU timing (expensive) + ! + if (gptlsetoption (gptlcpu, 0) < 0) call shr_sys_abort (subname//':: gptlsetoption') + ! + ! Enable addition of double quotes to the output of timer names + ! + if (gptlsetoption (gptldopr_quotes, 1) < 0) & + call shr_sys_abort (subname//':: gptlsetoption') + ! + ! Set maximum number of threads + ! + if ( present(MaxThreads) ) then + if (gptlsetoption (gptlmaxthreads, MaxThreads) < 0) & + call shr_sys_abort (subname//':: gptlsetoption') + endif + ! + ! Set max timer depth + ! + if (gptlsetoption (gptldepthlimit, timer_depth_limit) < 0) & + call shr_sys_abort (subname//':: gptlsetoption') + ! + ! Set profile ovhd measurement (default is false) + ! + if (perf_ovhd_measurement) then + if (gptlsetoption (gptlprofile_ovhd, 1) < 0) & + call shr_sys_abort (subname//':: gptlsetoption') + endif + ! + ! Next 2 calls only work if PAPI is enabled. These examples enable counting + ! of total cycles and floating point ops, respectively + ! + if (perf_papi_enable) then + if (papi_ctr1 /= PAPI_NULL) then + if (gptlsetoption (papi_ctr1, 1) < 0) call shr_sys_abort (subname//':: gptlsetoption') + endif + if (papi_ctr2 /= PAPI_NULL) then + if (gptlsetoption (papi_ctr2, 1) < 0) call shr_sys_abort (subname//':: gptlsetoption') + endif + if (papi_ctr3 /= PAPI_NULL) then + if (gptlsetoption (papi_ctr3, 1) < 0) call shr_sys_abort (subname//':: gptlsetoption') + endif + if (papi_ctr4 /= PAPI_NULL) then + if (gptlsetoption (papi_ctr4, 1) < 0) call shr_sys_abort (subname//':: gptlsetoption') + endif + endif + ! + ! Initialize the timing lib. This call must occur after all gptlsetoption + ! calls and before all other timing lib calls. + ! + if (gptlinitialize () < 0) call shr_sys_abort (subname//':: gptlinitialize') + timing_initialized = .true. +!$OMP END MASTER +!$OMP BARRIER + + return + end subroutine t_initf +! +!======================================================================== +! + subroutine t_finalizef() +!----------------------------------------------------------------------- +! Purpose: shut down timing library +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Local workspace----------------------------- +! + integer ierr ! GPTL error return +! +!----------------------------------------------------------------------- +! + if (.not. timing_initialized) return + +!$OMP MASTER + ierr = GPTLfinalize() + timing_initialized = .false. +!$OMP END MASTER +!$OMP BARRIER + + return + end subroutine t_finalizef + +!=============================================================================== + +end module perf_mod diff --git a/CIME/non_py/src/timing/perf_utils.F90 b/CIME/non_py/src/timing/perf_utils.F90 new file mode 100644 index 00000000000..96d08ff9c9c --- /dev/null +++ b/CIME/non_py/src/timing/perf_utils.F90 @@ -0,0 +1,535 @@ +module perf_utils + +!----------------------------------------------------------------------- +! +! Purpose: This module supplies the csm_share and CAM utilities +! needed by perf_mod.F90 (when the csm_share and CAM utilities +! are not available). +! +! Author: P. Worley, October 2007 +! +! $Id$ +! +!----------------------------------------------------------------------- + +!----------------------------------------------------------------------- +!- module boilerplate -------------------------------------------------- +!----------------------------------------------------------------------- + implicit none + private ! Make the default access private +#include + save + +!----------------------------------------------------------------------- +! Public interfaces ---------------------------------------------------- +!----------------------------------------------------------------------- + public perfutils_setunit + public shr_sys_abort + public shr_mpi_barrier + public shr_log_getUnit + public shr_log_freeUnit + public find_group_name + public to_lower + public shr_mpi_bcast + + interface shr_mpi_bcast ; module procedure & + shr_mpi_bcastl0, & + shr_mpi_bcasti0 + end interface + +!----------------------------------------------------------------------- +! Private interfaces --------------------------------------------------- +!----------------------------------------------------------------------- + private shr_sys_flush + private shr_mpi_chkerr + private shr_mpi_abort + +!----------------------------------------------------------------------- +!- include statements -------------------------------------------------- +!----------------------------------------------------------------------- +#include "gptl.inc" + +!----------------------------------------------------------------------- +! Public data --------------------------------------------------------- +!----------------------------------------------------------------------- + + !---------------------------------------------------------------------------- + ! precision/kind constants (from csm_share/shr/shr_kind_mod.F90) + !---------------------------------------------------------------------------- + integer,parameter,public :: SHR_KIND_R8 = selected_real_kind(12) ! 8 byte real + integer,parameter,public :: SHR_KIND_I8 = selected_int_kind (13) ! 8 byte integer + integer,parameter,public :: SHR_KIND_IN = kind(1) ! native integer + integer,parameter,public :: SHR_KIND_CS = 80 ! short char + integer,parameter,public :: SHR_KIND_CM = 160 ! mid-sized char + integer,parameter,public :: SHR_KIND_CL = 256 ! long char + integer,parameter,public :: SHR_KIND_CX = 512 ! extra-long char + +!----------------------------------------------------------------------- +! Private data --------------------------------------------------------- +!----------------------------------------------------------------------- + + integer, parameter :: def_pu_logunit = 6 ! default + integer, private :: pu_logunit = def_pu_logunit + ! unit number for log output + +!======================================================================= +contains +!======================================================================= + +! +!======================================================================== +! + subroutine perfutils_setunit(LogUnit) +!----------------------------------------------------------------------- +! Purpose: Set log unit number. +! Author: P. Worley +!----------------------------------------------------------------------- +!---------------------------Input arguments----------------------------- +! + integer(SHR_KIND_IN), intent(IN) :: LogUnit ! Unit number for log output +!----------------------------------------------------------------------- + pu_logunit = LogUnit +! + return +! + end subroutine perfutils_setunit + +!============== Routines from csm_share/shr/shr_sys_mod.F90 ============ +!======================================================================= + +SUBROUTINE shr_sys_abort(string) + + IMPLICIT none + + character(*) ,optional :: string ! error message string + + !----- local ----- + integer(SHR_KIND_IN) :: ierr + logical :: flag + + !----- formats ----- + character(*),parameter :: subName = '(shr_sys_abort) ' + character(*),parameter :: F00 = "('(shr_sys_abort) ',4a)" + +!------------------------------------------------------------------------------- +! PURPOSE: consistent stopping mechanism +! (dumbed down from original shr_sys_mod.F90 version for use in perf_mod) +!------------------------------------------------------------------------------- + + call shr_sys_flush(pu_logunit) + + if ( present(string) ) then + if (len_trim(string) > 0) then + write(pu_logunit,*) trim(subName),' ERROR: ',trim(string) + else + write(pu_logunit,*) trim(subName),' ERROR ' + endif + else + write(pu_logunit,*) trim(subName),' ERROR ' + endif + + write(pu_logunit,F00) 'WARNING: calling mpi_abort() and stopping' + call shr_sys_flush(pu_logunit) + call mpi_abort(MPI_COMM_WORLD,0,ierr) + call shr_sys_flush(pu_logunit) +#ifndef CPRNAG + call abort() +#endif + + stop + +END SUBROUTINE shr_sys_abort + +!=============================================================================== +!=============================================================================== + +SUBROUTINE shr_sys_flush(unit) + + IMPLICIT none + + !----- arguments ----- + integer(SHR_KIND_IN) :: unit ! flush output buffer for this unit + + !----- formats ----- + character(*),parameter :: subName = '(shr_sys_flush) ' + character(*),parameter :: F00 = "('(shr_sys_flush) ',4a)" + +!------------------------------------------------------------------------------- +! PURPOSE: an architecture independant system call +!------------------------------------------------------------------------------- + +#if (defined IRIX64 || defined CRAY || defined OSF1 || defined SUNOS || defined LINUX || defined NEC_SX || defined UNICOSMP) +#ifdef CPRNAG + flush(unit) +#else + call flush(unit) +#endif +#endif +#if (defined AIX) + call flush_(unit) +#endif + +END SUBROUTINE shr_sys_flush + +!=============================================================================== + +!================== Routines from csm_share/shr/shr_mpi_mod.F90 =============== +!=============================================================================== + +SUBROUTINE shr_mpi_chkerr(rcode,string) + + IMPLICIT none + + !----- arguments --- + integer(SHR_KIND_IN), intent(in) :: rcode ! input MPI error code + character(*), intent(in) :: string ! message + + !----- local --- + character(*),parameter :: subName = '(shr_mpi_chkerr) ' + character(MPI_MAX_ERROR_STRING) :: lstring + integer(SHR_KIND_IN) :: len + integer(SHR_KIND_IN) :: ierr + +!------------------------------------------------------------------------------- +! PURPOSE: layer on MPI error checking +!------------------------------------------------------------------------------- + + if (rcode /= MPI_SUCCESS) then + call MPI_ERROR_STRING(rcode,lstring,len,ierr) + write(pu_logunit,*) trim(subName),":",lstring(1:len) + call shr_mpi_abort(string,rcode) + endif + +END SUBROUTINE shr_mpi_chkerr + +!=============================================================================== +!=============================================================================== + +SUBROUTINE shr_mpi_abort(string,rcode) + + IMPLICIT none + + !----- arguments --- + character(*),optional,intent(in) :: string ! message + integer,optional,intent(in) :: rcode ! optional code + + !----- local --- + character(*),parameter :: subName = '(shr_mpi_abort) ' + integer(SHR_KIND_IN) :: ierr + +!------------------------------------------------------------------------------- +! PURPOSE: MPI abort +!------------------------------------------------------------------------------- + + if ( present(string) .and. present(rcode) ) then + write(pu_logunit,*) trim(subName),":",trim(string),rcode + endif + call MPI_ABORT(MPI_COMM_WORLD,rcode,ierr) + +END SUBROUTINE shr_mpi_abort + +!=============================================================================== +!=============================================================================== + +SUBROUTINE shr_mpi_barrier(comm,string) + + IMPLICIT none + + !----- arguments --- + integer,intent(in) :: comm + character(*),optional,intent(in) :: string ! message + + !----- local --- + character(*),parameter :: subName = '(shr_mpi_barrier) ' + integer(SHR_KIND_IN) :: ierr + +!------------------------------------------------------------------------------- +! PURPOSE: MPI barrier +!------------------------------------------------------------------------------- + + call MPI_BARRIER(comm,ierr) + if (present(string)) then + call shr_mpi_chkerr(ierr,subName//trim(string)) + else + call shr_mpi_chkerr(ierr,subName) + endif + +END SUBROUTINE shr_mpi_barrier + +!=============================================================================== +!=============================================================================== + +SUBROUTINE shr_mpi_bcasti0(vec,comm,string) + + IMPLICIT none + + !----- arguments --- + integer(SHR_KIND_IN), intent(inout):: vec ! vector of 1 + integer(SHR_KIND_IN), intent(in) :: comm ! mpi communicator + character(*),optional,intent(in) :: string ! message + + !----- local --- + character(*),parameter :: subName = '(shr_mpi_bcasti0) ' + integer(SHR_KIND_IN) :: ierr + integer(SHR_KIND_IN) :: lsize + +!------------------------------------------------------------------------------- +! PURPOSE: Broadcast an integer +!------------------------------------------------------------------------------- + + lsize = 1 + + call MPI_BCAST(vec,lsize,MPI_INTEGER,0,comm,ierr) + if (present(string)) then + call shr_mpi_chkerr(ierr,subName//trim(string)) + else + call shr_mpi_chkerr(ierr,subName) + endif + +END SUBROUTINE shr_mpi_bcasti0 + +!=============================================================================== +!=============================================================================== + +SUBROUTINE shr_mpi_bcastl0(vec,comm,string) + + IMPLICIT none + + !----- arguments --- + logical, intent(inout):: vec ! vector of 1 + integer(SHR_KIND_IN), intent(in) :: comm ! mpi communicator + character(*),optional,intent(in) :: string ! message + + !----- local --- + character(*),parameter :: subName = '(shr_mpi_bcastl0) ' + integer(SHR_KIND_IN) :: ierr + integer(SHR_KIND_IN) :: lsize + +!------------------------------------------------------------------------------- +! PURPOSE: Broadcast a logical +!------------------------------------------------------------------------------- + + lsize = 1 + + call MPI_BCAST(vec,lsize,MPI_LOGICAL,0,comm,ierr) + if (present(string)) then + call shr_mpi_chkerr(ierr,subName//trim(string)) + else + call shr_mpi_chkerr(ierr,subName) + endif + +END SUBROUTINE shr_mpi_bcastl0 + +!=============================================================================== + +!================== Routines from csm_share/shr/shr_log_mod.F90 =============== +!=============================================================================== +!BOP =========================================================================== +! +! !IROUTINE: shr_log_getUnit -- Get a free FORTRAN unit number +! +! !DESCRIPTION: Get the next free FORTRAN unit number. +! +! !REVISION HISTORY: +! 2005-Dec-14 - E. Kluzek - creation +! 2007-Oct-21 - P. Worley - dumbed down for use in perf_mod +! +! !INTERFACE: ------------------------------------------------------------------ + +INTEGER FUNCTION shr_log_getUnit () + + implicit none + +!EOP + + !----- local parameters ----- + integer(SHR_KIND_IN),parameter :: shr_log_minUnit = 10 ! Min unit number to give + integer(SHR_KIND_IN),parameter :: shr_log_maxUnit = 99 ! Max unit number to give + + !----- local variables ----- + integer(SHR_KIND_IN) :: n ! loop index + logical :: opened ! If unit opened or not + + !----- formats ----- + character(*),parameter :: subName = '(shr_log_getUnit) ' + character(*),parameter :: F00 = "('(shr_log_getUnit) ',A,I4,A)" + +!------------------------------------------------------------------------------- +! Notes: +!------------------------------------------------------------------------------- + + ! --- Choose first available unit other than 0, 5, or 6 ------ + do n=shr_log_minUnit, shr_log_maxUnit + inquire( n, opened=opened ) + if (n == 5 .or. n == 6 .or. opened) then + cycle + end if + shr_log_getUnit = n + return + end do + + call shr_sys_abort( subName//': Error: no available units found' ) + +END FUNCTION shr_log_getUnit +!=============================================================================== + +!=============================================================================== +!BOP =========================================================================== +! +! !IROUTINE: shr_log_freeUnit -- Free up a FORTRAN unit number +! +! !DESCRIPTION: Free up the given unit number +! +! !REVISION HISTORY: +! 2005-Dec-14 - E. Kluzek - creation +! 2007-Oct-21 - P. Worley - dumbed down for use in perf_mod +! +! !INTERFACE: ------------------------------------------------------------------ + +SUBROUTINE shr_log_freeUnit ( unit) + + implicit none + +! !INPUT/OUTPUT PARAMETERS: + + integer(SHR_KIND_IN),intent(in) :: unit ! unit number to be freed + +!EOP + + !----- local parameters ----- + integer(SHR_KIND_IN),parameter :: shr_log_minUnit = 10 ! Min unit number to give + integer(SHR_KIND_IN),parameter :: shr_log_maxUnit = 99 ! Max unit number to give + + !----- formats ----- + character(*), parameter :: subName = '(shr_log_freeUnit) ' + character(*), parameter :: F00 = "('(shr_log_freeUnit) ',A,I4,A)" + +!------------------------------------------------------------------------------- +! Notes: +!------------------------------------------------------------------------------- + + if (unit < 0 .or. unit > shr_log_maxUnit) then +!pw if (s_loglev > 0) write(pu_logunit,F00) 'invalid unit number request:', unit + else if (unit == 0 .or. unit == 5 .or. unit == 6) then + call shr_sys_abort( subName//': Error: units 0, 5, and 6 must not be freed' ) + end if + + return + +END SUBROUTINE shr_log_freeUnit +!=============================================================================== + +!============= Routines from atm/cam/src/utils/namelist_utils.F90 ============== +!=============================================================================== + +subroutine find_group_name(unit, group, status) + +!--------------------------------------------------------------------------------------- +! Purpose: +! Search a file that contains namelist input for the specified namelist group name. +! Leave the file positioned so that the current record is the first record of the +! input for the specified group. +! +! Method: +! Read the file line by line. Each line is searched for an '&' which may only +! be preceded by blanks, immediately followed by the group name which is case +! insensitive. If found then backspace the file so the current record is the +! one containing the group name and return success. Otherwise return -1. +! +! Author: B. Eaton, August 2007 +!--------------------------------------------------------------------------------------- + + integer, intent(in) :: unit ! fortran unit attached to file + character(len=*), intent(in) :: group ! namelist group name + integer, intent(out) :: status ! 0 for success, -1 if group name not found + + ! Local variables + + integer :: len_grp + integer :: ios ! io status + character(len=80) :: inrec ! first 80 characters of input record + character(len=80) :: inrec2 ! left adjusted input record + character(len=len(group)) :: lc_group + + !--------------------------------------------------------------------------- + + len_grp = len_trim(group) + lc_group = to_lower(group) + + ios = 0 + do while (ios <= 0) + + read(unit, '(a)', iostat=ios, end=102) inrec + + if (ios <= 0) then ! ios < 0 indicates an end of record condition + + ! look for group name in this record + + ! remove leading blanks + inrec2 = to_lower(adjustl(inrec)) + + ! check for leading '&' + if (inrec2(1:1) == '&') then + + ! check for case insensitive group name + if (trim(lc_group) == inrec2(2:len_grp+1)) then + + ! found group name. backspace to leave file position at this record + backspace(unit) + status = 0 + return + + end if + end if + end if + + end do + + 102 continue ! end of file processing + status = -1 + +end subroutine find_group_name +!=============================================================================== + +!================ Routines from atm/cam/src/utils/string_utils.F90 ============= +!=============================================================================== + +function to_lower(str) + +!----------------------------------------------------------------------- +! Purpose: +! Convert character string to lower case. +! +! Method: +! Use achar and iachar intrinsics to ensure use of ascii collating sequence. +! +! Author: B. Eaton, July 2001 +! +! $Id$ +!----------------------------------------------------------------------- + implicit none + + character(len=*), intent(in) :: str ! String to convert to lower case + character(len=len(str)) :: to_lower + +! Local variables + + integer :: i ! Index + integer :: aseq ! ascii collating sequence + integer :: upper_to_lower ! integer to convert case + character(len=1) :: ctmp ! Character temporary +!----------------------------------------------------------------------- + upper_to_lower = iachar("a") - iachar("A") + + do i = 1, len(str) + ctmp = str(i:i) + aseq = iachar(ctmp) + if ( aseq >= iachar("A") .and. aseq <= iachar("Z") ) & + ctmp = achar(aseq + upper_to_lower) + to_lower(i:i) = ctmp + end do + +end function to_lower +!=============================================================================== + +end module perf_utils diff --git a/src/share/timing/private.h b/CIME/non_py/src/timing/private.h similarity index 100% rename from src/share/timing/private.h rename to CIME/non_py/src/timing/private.h diff --git a/CIME/provenance.py b/CIME/provenance.py new file mode 100644 index 00000000000..849bb2757d1 --- /dev/null +++ b/CIME/provenance.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python3 + +""" +Library for saving build/run provenance. +""" + +from CIME.XML.standard_module_setup import * +from CIME.utils import ( + SharedArea, + convert_to_babylonian_time, + get_current_commit, + run_cmd, +) + +import sys + +logger = logging.getLogger(__name__) + + +_WALLTIME_BASELINE_NAME = "walltimes" +_WALLTIME_FILE_NAME = "walltimes" +_GLOBAL_MINUMUM_TIME = 900 +_GLOBAL_WIGGLE = 1000 +_WALLTIME_TOLERANCE = ((600, 2.0), (1800, 1.5), (9999999999, 1.25)) + + +def get_recommended_test_time_based_on_past(baseline_root, test, raw=False): + if baseline_root is not None: + try: + the_path = os.path.join( + baseline_root, _WALLTIME_BASELINE_NAME, test, _WALLTIME_FILE_NAME + ) + if os.path.exists(the_path): + last_line = int(open(the_path, "r").readlines()[-1].split()[0]) + if raw: + best_walltime = last_line + else: + best_walltime = None + for cutoff, tolerance in _WALLTIME_TOLERANCE: + if last_line <= cutoff: + best_walltime = int(float(last_line) * tolerance) + break + + if best_walltime < _GLOBAL_MINUMUM_TIME: + best_walltime = _GLOBAL_MINUMUM_TIME + + best_walltime += _GLOBAL_WIGGLE + + return convert_to_babylonian_time(best_walltime) + except Exception: + # We NEVER want a failure here to kill the run + logger.warning("Failed to read test time: {}".format(sys.exc_info()[1])) + + return None + + +def save_test_time(baseline_root, test, time_seconds, commit): + if baseline_root is not None: + try: + with SharedArea(): + the_dir = os.path.join(baseline_root, _WALLTIME_BASELINE_NAME, test) + if not os.path.exists(the_dir): + os.makedirs(the_dir) + + the_path = os.path.join(the_dir, _WALLTIME_FILE_NAME) + with open(the_path, "a") as fd: + fd.write("{} {}\n".format(int(time_seconds), commit)) + + except Exception: + # We NEVER want a failure here to kill the run + logger.warning("Failed to store test time: {}".format(sys.exc_info()[1])) + + +_SUCCESS_BASELINE_NAME = "success-history" +_SUCCESS_FILE_NAME = "last-transitions" + + +def _read_success_data(baseline_root, test): + success_path = os.path.join( + baseline_root, _SUCCESS_BASELINE_NAME, test, _SUCCESS_FILE_NAME + ) + if os.path.exists(success_path): + with open(success_path, "r") as fd: + prev_results_raw = fd.read().strip() + prev_results = prev_results_raw.split() + expect( + len(prev_results) == 2, + "Bad success data: '{}'".format(prev_results_raw), + ) + else: + prev_results = ["None", "None"] + + # Convert "None" to None + for idx, item in enumerate(prev_results): + if item == "None": + prev_results[idx] = None + + return success_path, prev_results + + +def _is_test_working(prev_results, src_root, testing=False): + # If there is no history of success, prev run could not have succeeded and vice versa for failures + if prev_results[0] is None: + return False + elif prev_results[1] is None: + return True + else: + if not testing: + stat, out, err = run_cmd( + "git merge-base --is-ancestor {}".format(" ".join(prev_results)), + from_dir=src_root, + ) + expect( + stat in [0, 1], + "Unexpected status from ancestor check:\n{}\n{}".format(out, err), + ) + else: + # Hack for testing + stat = 0 if prev_results[0] < prev_results[1] else 1 + + # stat == 0 tells us that pass is older than fail, so we must have failed, otherwise we passed + return stat != 0 + + +def get_test_success(baseline_root, src_root, test, testing=False): + """ + Returns (was prev run success, commit when test last passed, commit when test last transitioned from pass to fail) + + Unknown history is expressed as None + """ + if baseline_root is not None: + try: + prev_results = _read_success_data(baseline_root, test)[1] + prev_success = _is_test_working(prev_results, src_root, testing=testing) + return prev_success, prev_results[0], prev_results[1] + + except Exception: + # We NEVER want a failure here to kill the run + logger.warning("Failed to read test success: {}".format(sys.exc_info()[1])) + + return False, None, None + + +def save_test_success(baseline_root, src_root, test, succeeded, force_commit_test=None): + """ + Update success data accordingly based on succeeded flag + """ + if baseline_root is not None: + try: + with SharedArea(): + success_path, prev_results = _read_success_data(baseline_root, test) + + the_dir = os.path.dirname(success_path) + if not os.path.exists(the_dir): + os.makedirs(the_dir) + + prev_succeeded = _is_test_working( + prev_results, src_root, testing=(force_commit_test is not None) + ) + + # if no transition occurred then no update is needed + if ( + succeeded + or succeeded != prev_succeeded + or (prev_results[0] is None and succeeded) + or (prev_results[1] is None and not succeeded) + ): + + new_results = list(prev_results) + my_commit = ( + force_commit_test + if force_commit_test + else get_current_commit(repo=src_root) + ) + if succeeded: + new_results[0] = my_commit # we passed + else: + new_results[1] = my_commit # we transitioned to a failing state + + str_results = [ + "None" if item is None else item for item in new_results + ] + with open(success_path, "w") as fd: + fd.write("{}\n".format(" ".join(str_results))) + + except Exception: + # We NEVER want a failure here to kill the run + logger.warning("Failed to store test success: {}".format(sys.exc_info()[1])) diff --git a/doc/source/_static b/CIME/scripts/__init__.py similarity index 100% rename from doc/source/_static rename to CIME/scripts/__init__.py diff --git a/CIME/scripts/configure b/CIME/scripts/configure new file mode 100755 index 00000000000..f31c179518c --- /dev/null +++ b/CIME/scripts/configure @@ -0,0 +1,212 @@ +#!/usr/bin/env python3 + +"""This script writes CIME build information to a directory. + +The pieces of information that will be written include: + +1. Machine-specific build settings (i.e. the "Macros" file). +2. File-specific build settings (i.e. "Depends" files). +3. Environment variable loads (i.e. the env_mach_specific files). + +The .env_mach_specific.sh and .env_mach_specific.csh files are specific to a +given compiler, MPI library, and DEBUG setting. By default, these will be the +machine's default compiler, the machine's default MPI library, and FALSE, +respectively. These can be changed by setting the environment variables +COMPILER, MPILIB, and DEBUG, respectively. +""" + +# pylint: disable=W1505 + +import os +import sys + +real_file_dir = os.path.dirname(os.path.realpath(__file__)) +cimeroot = os.path.abspath(os.path.join(real_file_dir, "..", "..")) +sys.path.insert(0, cimeroot) + +from CIME.Tools.standard_script_setup import * +from CIME.utils import expect, get_model +from CIME.BuildTools.configure import configure +from CIME.XML.machines import Machines + +logger = logging.getLogger(__name__) + + +def parse_command_line(args): + """Command line argument parser for configure.""" + description = __doc__ + parser = argparse.ArgumentParser(description=description) + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "--machine", help="The machine to create build information for." + ) + parser.add_argument( + "--machines-dir", + help="The machines directory to take build information " + "from. Overrides the CIME_MODEL environment variable, " + "and must be specified if that variable is not set.", + ) + parser.add_argument( + "--macros-format", + action="append", + choices=["Makefile", "CMake"], + help="The format of Macros file to generate. If " + "'Makefile' is passed in, a file called 'Macros.make' " + "is generated. If 'CMake' is passed in, a file called " + "'Macros.cmake' is generated. This option can be " + "specified multiple times to generate multiple files. " + "If not used at all, Macros generation is skipped. " + "Note that Depends files are currently always in " + "Makefile format, regardless of this option.", + ) + parser.add_argument( + "--output-dir", + default=os.getcwd(), + help="The directory to write files to. If not " + "specified, defaults to the current working directory.", + ) + + parser.add_argument( + "--compiler", + "-compiler", + help="Specify a compiler. " + "To see list of supported compilers for each machine, use the utility query_config in this directory", + ) + + parser.add_argument( + "--mpilib", + "-mpilib", + help="Specify the mpilib. " + "To see list of supported mpilibs for each machine, use the utility query_config in this directory. " + "The default is the first listing in MPILIBS in config_machines.xml", + ) + + parser.add_argument( + "--clean", + action="store_true", + help="Remove old Macros and env files before attempting to create new ones", + ) + + parser.add_argument( + "--comp-interface", + default="mct", + help="""The cime driver/cpl interface to use.""", + ) + + argcnt = len(args) + args = parser.parse_args() + CIME.utils.parse_args_and_handle_standard_logging_options(args) + + opts = {} + if args.machines_dir is not None: + machines_file = os.path.join(args.machines_dir, "config_machines.xml") + machobj = Machines(infile=machines_file, machine=args.machine) + else: + model = get_model() + if model is not None: + machobj = Machines(machine=args.machine) + else: + expect( + False, + "Either --mach-dir or the CIME_MODEL environment " + "variable must be specified!", + ) + + opts["machobj"] = machobj + + if args.macros_format is None: + opts["macros_format"] = [] + else: + opts["macros_format"] = args.macros_format + + expect( + os.path.isdir(args.output_dir), + "Output directory '%s' does not exist." % args.output_dir, + ) + + opts["output_dir"] = args.output_dir + + # Set compiler. + if args.compiler is not None: + compiler = args.compiler + elif "COMPILER" in os.environ: + compiler = os.environ["COMPILER"] + else: + compiler = machobj.get_default_compiler() + os.environ["COMPILER"] = compiler + expect( + opts["machobj"].is_valid_compiler(compiler), + "Invalid compiler vendor given in COMPILER environment variable: %s" % compiler, + ) + opts["compiler"] = compiler + opts["os"] = machobj.get_value("OS") + opts["comp_interface"] = args.comp_interface + + if args.clean: + files = [ + "Macros.make", + "Macros.cmake", + "env_mach_specific.xml", + ".env_mach_specific.sh", + ".env_mach_specific.csh", + "Depends.%s" % compiler, + "Depends.%s" % args.machine, + "Depends.%s.%s" % (args.machine, compiler), + ] + for file_ in files: + if os.path.isfile(file_): + logger.warn("Removing file %s" % file_) + os.remove(file_) + if argcnt == 2: + opts["clean_only"] = True + return opts + + # Set MPI library. + if args.mpilib is not None: + mpilib = args.mpilib + elif "MPILIB" in os.environ: + mpilib = os.environ["MPILIB"] + else: + mpilib = machobj.get_default_MPIlib(attributes={"compiler": compiler}) + os.environ["MPILIB"] = mpilib + + expect( + opts["machobj"].is_valid_MPIlib(mpilib, attributes={"compiler": compiler}), + "Invalid MPI library name given in MPILIB environment variable: %s" % mpilib, + ) + opts["mpilib"] = mpilib + + # Set DEBUG flag. + if "DEBUG" in os.environ: + expect( + os.environ["DEBUG"].lower() in ("true", "false"), + "Invalid DEBUG environment variable value (must be 'TRUE' or " + "'FALSE'): %s" % os.environ["DEBUG"], + ) + debug = os.environ["DEBUG"].lower() == "true" + else: + debug = False + os.environ["DEBUG"] = "FALSE" + opts["debug"] = debug + + return opts + + +def _main(): + opts = parse_command_line(sys.argv) + if "clean_only" not in opts or not opts["clean_only"]: + configure( + opts["machobj"], + opts["output_dir"], + opts["macros_format"], + opts["compiler"], + opts["mpilib"], + opts["debug"], + opts["comp_interface"], + opts["os"], + ) + + +if __name__ == "__main__": + _main() diff --git a/CIME/scripts/create_clone.py b/CIME/scripts/create_clone.py new file mode 100755 index 00000000000..2f2a95994d8 --- /dev/null +++ b/CIME/scripts/create_clone.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 + +from CIME.Tools.standard_script_setup import * +from CIME.utils import expect +from CIME.case import Case +from argparse import RawTextHelpFormatter +import re + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args): + ############################################################################### + parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "--case", + "-case", + required=True, + help="(required) Specify a new case name. If not a full pathname, " + "\nthe new case will be created under then current working directory.", + ) + + parser.add_argument( + "--clone", + "-clone", + required=True, + help="(required) Specify a case to be cloned. If not a full pathname, " + "\nthe case to be cloned is assumed to be under then current working directory.", + ) + + parser.add_argument( + "--ensemble", + default=1, + help="clone an ensemble of cases, the case name argument must end in an integer.\n" + "for example: ./create_clone --clone case.template --case case.001 --ensemble 4 \n" + "will create case.001, case.002, case.003, case.004 from existing case.template", + ) + + # This option supports multiple values, hence the plural ("user-mods-dirs"). However, + # we support the singular ("user-mods-dir") for backwards compatibility (and because + # the singular may be more intuitive for someone who only wants to use a single + # directory). + parser.add_argument( + "--user-mods-dirs", + "--user-mods-dir", + nargs="*", + help="Full pathname to a directory containing any combination of user_nl_* files " + "\nand a shell_commands script (typically containing xmlchange commands). " + "\nThe directory can also contain an SourceMods/ directory with the same structure " + "\nas would be found in a case directory." + "\nIt can also contain a file named 'include_user_mods' which gives the path to" + "\none or more other directories that should be included." + "\nMultiple directories can be given to the --user-mods-dirs argument," + "\nin which case changes from all of them are applied." + "\n(If there are conflicts, later directories take precedence.)" + "\n(Care is needed if multiple directories include the same directory via 'include_user_mods':" + "\nin this case, the included directory will be applied multiple times.)" + "\nIf this argument is used in conjunction " + "\nwith the --keepexe flag, then no changes will be permitted to the env_build.xml " + "\nin the newly created case directory. ", + ) + + parser.add_argument( + "--keepexe", + "-keepexe", + action="store_true", + help="Sets EXEROOT to point to original build. It is HIGHLY recommended " + "\nthat the original case be built BEFORE cloning it if the --keepexe flag is specfied. " + "\nThis flag will make the SourceMods/ directory in the newly created case directory a " + "\nsymbolic link to the SourceMods/ directory in the original case directory. ", + ) + + parser.add_argument( + "--mach-dir", + "-mach_dir", + help="Specify the locations of the Machines directory, other than the default. " + "\nThe default is CIMEROOT/machines.", + ) + + parser.add_argument( + "--project", + "-project", + help="Specify a project id for the case (optional)." + "\nUsed for accounting and directory permissions when on a batch system." + "\nThe default is user or machine specified by PROJECT." + "\nAccounting (only) may be overridden by user or machine specified CHARGE_ACCOUNT.", + ) + + parser.add_argument( + "--cime-output-root", + help="Specify the root output directory. The default is the setting in the original" + "\ncase directory. NOTE: create_clone will fail if this directory is not writable.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + if args.case is None: + expect(False, "Must specify -case as an input argument") + + if args.clone is None: + expect(False, "Must specify -clone as an input argument") + + startval = "1" + if int(args.ensemble) > 1: + m = re.search(r"(\d+)$", args.case) + expect(m, " case name must end in an integer to use this feature") + startval = m.group(1) + + return ( + args.case, + args.clone, + args.keepexe, + args.mach_dir, + args.project, + args.cime_output_root, + args.user_mods_dirs, + int(args.ensemble), + startval, + ) + + +############################################################################## +def _main_func(): + ############################################################################### + + ( + case, + clone, + keepexe, + mach_dir, + project, + cime_output_root, + user_mods_dirs, + ensemble, + startval, + ) = parse_command_line(sys.argv) + + cloneroot = os.path.abspath(clone) + expect(os.path.isdir(cloneroot), "Missing cloneroot directory %s " % cloneroot) + + if user_mods_dirs is not None: + user_mods_dirs = [ + os.path.abspath(one_user_mods_dir) + if os.path.isdir(one_user_mods_dir) + else one_user_mods_dir + for one_user_mods_dir in user_mods_dirs + ] + nint = len(startval) + + for i in range(int(startval), int(startval) + ensemble): + if ensemble > 1: + case = case[:-nint] + "{{0:0{0:d}d}}".format(nint).format(i) + with Case(cloneroot, read_only=False) as clone: + clone.create_clone( + case, + keepexe=keepexe, + mach_dir=mach_dir, + project=project, + cime_output_root=cime_output_root, + user_mods_dirs=user_mods_dirs, + ) + + +############################################################################### + +if __name__ == "__main__": + _main_func() diff --git a/CIME/scripts/create_newcase.py b/CIME/scripts/create_newcase.py new file mode 100755 index 00000000000..879a5d167c8 --- /dev/null +++ b/CIME/scripts/create_newcase.py @@ -0,0 +1,451 @@ +#!/usr/bin/env python3 + +# pylint: disable=W0621, W0613 + +""" +Script to create a new CIME Case Control System (CSS) experimental case. +""" + +from CIME.Tools.standard_script_setup import * +from CIME.utils import ( + expect, + get_cime_config, + get_cime_default_driver, + get_src_root, +) +from CIME.config import Config +from CIME.case import Case +from argparse import RawTextHelpFormatter + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, cimeroot, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + customize_path = os.path.join(CIME.utils.get_src_root(), "cime_config", "customize") + + config = Config.load(customize_path) + + try: + cime_config = get_cime_config() + except Exception: + cime_config = None + + parser.add_argument( + "--case", + "-case", + required=True, + metavar="CASENAME", + help="(required) Specify the case name. " + "\nIf this is simply a name (not a path), the case directory is created in the current working directory." + "\nThis can also be a relative or absolute path specifying where the case should be created;" + "\nwith this usage, the name of the case will be the last component of the path.", + ) + + parser.add_argument( + "--compset", + "-compset", + required=True, + help="(required) Specify a compset. " + "\nTo see list of current compsets, use the utility ./query_config --compsets in this directory.\n", + ) + + parser.add_argument( + "--res", + "-res", + required=True, + metavar="GRID", + help="(required) Specify a model grid resolution. " + "\nTo see list of current model resolutions, use the utility " + "\n./query_config --grids in this directory.", + ) + + parser.add_argument( + "--machine", + "-mach", + help="Specify a machine. " + "The default value is the match to NODENAME_REGEX in config_machines.xml. To see " + "\nthe list of current machines, invoke ./query_config --machines.", + ) + + parser.add_argument( + "--compiler", + "-compiler", + help="Specify a compiler. " + "\nTo see list of supported compilers for each machine, use the utility " + "\n./query_config --machines in this directory. " + "\nThe default value will be the first one listed.", + ) + + parser.add_argument( + "--multi-driver", + action="store_true", + help="Specify that --ninst should modify the number of driver/coupler instances. " + "\nThe default is to have one driver/coupler supporting multiple component instances.", + ) + + parser.add_argument( + "--ninst", + default=1, + type=int, + help="Specify number of model ensemble instances. " + "\nThe default is multiple components and one driver/coupler. " + "\nUse --multi-driver to run multiple driver/couplers in the ensemble.", + ) + + parser.add_argument( + "--mpilib", + "-mpilib", + help="Specify the MPI library. " + "To see list of supported mpilibs for each machine, invoke ./query_config --machines." + "\nThe default is the first listing in MPILIBS in config_machines.xml.\n", + ) + + parser.add_argument( + "--project", + "-project", + help="Specify a project id for the case (optional)." + "\nUsed for accounting and directory permissions when on a batch system." + "\nThe default is user or machine specified by PROJECT." + "\nAccounting (only) may be overridden by user or machine specified CHARGE_ACCOUNT.", + ) + + parser.add_argument( + "--pecount", + "-pecount", + default="M", + help="Specify a target size description for the number of cores. " + "\nThis is used to query the appropriate config_pes.xml file and find the " + "\noptimal PE-layout for your case - if it exists there. " + "\nAllowed options are ('S','M','L','X1','X2','[0-9]x[0-9]','[0-9]').\n", + ) + + # This option supports multiple values, hence the plural ("user-mods-dirs"). However, + # we support the singular ("user-mods-dir") for backwards compatibility (and because + # the singular may be more intuitive for someone who only wants to use a single + # directory). + parser.add_argument( + "--user-mods-dirs", + "--user-mods-dir", + nargs="*", + help="Full pathname to a directory containing any combination of user_nl_* files " + "\nand a shell_commands script (typically containing xmlchange commands). " + "\nThe directory can also contain an SourceMods/ directory with the same structure " + "\nas would be found in a case directory." + "\nIt can also contain a file named 'include_user_mods' which gives the path to" + "\none or more other directories that should be included." + "\nMultiple directories can be given to the --user-mods-dirs argument," + "\nin which case changes from all of them are applied." + "\n(If there are conflicts, later directories take precedence.)" + "\n(Care is needed if multiple directories include the same directory via 'include_user_mods':" + "\nin this case, the included directory will be applied multiple times.)", + ) + + parser.add_argument( + "--pesfile", + help="Full pathname of an optional pes specification file. " + "\nThe file can follow either the config_pes.xml or the env_mach_pes.xml format.", + ) + + parser.add_argument( + "--gridfile", + help="Full pathname of config grid file to use. " + "\nThis should be a copy of config/config_grids.xml with the new user grid changes added to it. \n", + ) + + if cime_config and cime_config.has_option("main", "workflow"): + workflow_default = cime_config.get("main", "workflow") + else: + workflow_default = "default" + + parser.add_argument( + "--workflow", + default=workflow_default, + help="A workflow from config_workflow.xml to apply to this case. ", + ) + + srcroot_default = get_src_root() + + parser.add_argument( + "--srcroot", + default=srcroot_default, + help="Alternative pathname for source root directory. " + f"The default is {srcroot_default}", + ) + + parser.add_argument( + "--output-root", + help="Alternative pathname for the directory where case output is written.", + ) + + # The following is a deprecated option + parser.add_argument( + "--script-root", dest="script_root", default=None, help=argparse.SUPPRESS + ) + + if config.allow_unsupported: + parser.add_argument( + "--run-unsupported", + action="store_true", + help="Force the creation of a case that is not tested or supported by CESM developers.", + ) + # hidden argument indicating called from create_test + # Indicates that create_newcase was called from create_test - do not use otherwise. + parser.add_argument("--test", "-test", action="store_true", help=argparse.SUPPRESS) + + parser.add_argument( + "--walltime", + default=os.getenv("CIME_GLOBAL_WALLTIME"), + help="Set the wallclock limit for this case in the format (the usual format is HH:MM:SS). " + "\nYou may use env var CIME_GLOBAL_WALLTIME to set this. " + "\nIf CIME_GLOBAL_WALLTIME is not defined in the environment, then the walltime" + "\nwill be the maximum allowed time defined for the queue in config_batch.xml.", + ) + + parser.add_argument( + "-q", + "--queue", + default=None, + help="Force batch system to use the specified queue. ", + ) + + parser.add_argument( + "--handle-preexisting-dirs", + dest="answer", + choices=("a", "r", "u"), + default=None, + help="Do not query how to handle pre-existing bld/exe dirs. " + "\nValid options are (a)bort (r)eplace or (u)se existing. " + "\nThis can be useful if you need to run create_newcase non-iteractively.", + ) + + parser.add_argument( + "-i", + "--input-dir", + help="Use a non-default location for input files. This will change the xml value of DIN_LOC_ROOT.", + ) + + drv_choices = config.driver_choices + drv_help = ( + "Override the top level driver type and use this one " + + "(changes xml variable COMP_INTERFACE) [this is an advanced option]" + ) + + parser.add_argument( + "--driver", + # use get_cime_default_driver rather than config.driver_default as it considers + # environment, user config then config.driver_default + default=get_cime_default_driver(), + choices=drv_choices, + help=drv_help, + ) + + parser.add_argument( + "-n", + "--non-local", + action="store_true", + help="Use when you've requested a machine that you aren't on. " + "Will reduce errors for missing directories etc.", + ) + + parser.add_argument( + "--extra-machines-dir", + help="Optional path to a directory containing one or more of:" + "\nconfig_machines.xml, config_batch.xml." + "\nIf provided, the contents of these files will be appended to" + "\nthe standard machine files (and any files in ~/.cime).", + ) + + parser.add_argument("--case-group", help="Add this case to a case group") + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + if args.srcroot is not None: + expect( + os.path.isdir(args.srcroot), + "Input non-default directory srcroot {} does not exist ".format( + args.srcroot + ), + ) + args.srcroot = os.path.abspath(args.srcroot) + + if args.gridfile is not None: + expect( + os.path.isfile(args.gridfile), + "Grid specification file {} does not exist ".format(args.gridfile), + ) + + if args.pesfile is not None: + expect( + os.path.isfile(args.pesfile), + "Pes specification file {} cannot be found ".format(args.pesfile), + ) + + run_unsupported = False + if config.allow_unsupported: + run_unsupported = args.run_unsupported + + expect( + CIME.utils.check_name(args.case, fullpath=True), + "Illegal case name argument provided", + ) + + if args.input_dir is not None: + args.input_dir = os.path.abspath(args.input_dir) + elif cime_config and cime_config.has_option("main", "input_dir"): + args.input_dir = os.path.abspath(cime_config.get("main", "input_dir")) + + if config.create_test_flag_mode == "cesm" and args.driver == "mct": + logger.warning( + """======================================================================== +WARNING: The MCT-based driver and data models will be removed from CESM +WARNING: on September 30, 2022. +WARNING: Please contact members of the CESM Software Engineering Group +WARNING: if you need support migrating to the ESMF/NUOPC infrastructure. +========================================================================""" + ) + + return ( + args.case, + args.compset, + args.res, + args.machine, + args.compiler, + args.mpilib, + args.project, + args.pecount, + args.user_mods_dirs, + args.pesfile, + args.gridfile, + args.srcroot, + args.test, + args.multi_driver, + args.ninst, + args.walltime, + args.queue, + args.output_root, + args.script_root, + run_unsupported, + args.answer, + args.input_dir, + args.driver, + args.workflow, + args.non_local, + args.extra_machines_dir, + args.case_group, + ) + + +############################################################################### +def _main_func(description=None): + ############################################################################### + cimeroot = os.path.abspath(CIME.utils.get_cime_root()) + + ( + casename, + compset, + grid, + machine, + compiler, + mpilib, + project, + pecount, + user_mods_dirs, + pesfile, + gridfile, + srcroot, + test, + multi_driver, + ninst, + walltime, + queue, + output_root, + script_root, + run_unsupported, + answer, + input_dir, + driver, + workflow, + non_local, + extra_machines_dir, + case_group, + ) = parse_command_line(sys.argv, cimeroot, description) + + if script_root is None: + caseroot = os.path.abspath(casename) + else: + caseroot = os.path.abspath(script_root) + + if user_mods_dirs is not None: + user_mods_dirs = [ + os.path.abspath(one_user_mods_dir) + if os.path.isdir(one_user_mods_dir) + else one_user_mods_dir + for one_user_mods_dir in user_mods_dirs + ] + + # create_test creates the caseroot before calling create_newcase + # otherwise throw an error if this directory exists + expect( + not (os.path.exists(caseroot) and not test), + "Case directory {} already exists".format(caseroot), + ) + + # create_newcase ... --test ... throws a CIMEError along with + # a very stern warning message to the user + # if it detects that it was invoked outside of create_test + if test: + expect( + ( + "FROM_CREATE_TEST" in os.environ + and os.environ["FROM_CREATE_TEST"] == "True" + ), + "The --test argument is intended to only be called from inside create_test. Invoking this option from the command line is not appropriate usage.", + ) + del os.environ["FROM_CREATE_TEST"] + + with Case(caseroot, read_only=False, non_local=non_local) as case: + # Configure the Case + case.create( + casename, + srcroot, + compset, + grid, + user_mods_dirs=user_mods_dirs, + machine_name=machine, + project=project, + pecount=pecount, + compiler=compiler, + mpilib=mpilib, + pesfile=pesfile, + gridfile=gridfile, + multi_driver=multi_driver, + ninst=ninst, + test=test, + walltime=walltime, + queue=queue, + output_root=output_root, + run_unsupported=run_unsupported, + answer=answer, + input_dir=input_dir, + driver=driver, + workflowid=workflow, + non_local=non_local, + extra_machines_dir=extra_machines_dir, + case_group=case_group, + ) + + # Called after create since casedir does not exist yet + case.record_cmd(init=True) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/scripts/create_test.py b/CIME/scripts/create_test.py new file mode 100755 index 00000000000..051c7430592 --- /dev/null +++ b/CIME/scripts/create_test.py @@ -0,0 +1,1183 @@ +#!/usr/bin/env python3 + +""" +Script to create, build and run CIME tests. This script can: + +1) Run a single test, or more than one test + ./create_test TESTNAME + ./create_test TESTNAME1 TESTNAME2 ... +2) Run a test suite from a text file with one test per line + ./create_test -f TESTFILE +3) Run an E3SM test suite: + Below, a suite name, SUITE, is defined in $CIMEROOT/scripts/lib/get_tests.py + - Run a single suite + ./create_test SUITE + - Run two suites + ./create_test SUITE1 SUITE2 + - Run all tests in a suite except for one + ./create_test SUITE ^TESTNAME + - Run all tests in a suite except for tests that are in another suite + ./create_test SUITE1 ^SUITE2 + - Run all tests in a suite with baseline comparisons against master baselines + ./create_test SUITE1 -c -b master +4) Run a CESM test suite(s): + ./create_test --xml-category XML_CATEGORY [--xml-machine XML_MACHINE] [--xml-compiler XML_COMPILER] [ --xml-testlist XML_TESTLIST] + +If this tool is missing any feature that you need, please add an issue on +https://github.com/ESMCI/cime +""" +from CIME.Tools.standard_script_setup import * +from CIME import get_tests +from CIME.test_scheduler import TestScheduler, RUN_PHASE +from CIME import utils +from CIME.utils import ( + expect, + convert_to_seconds, + compute_total_time, + convert_to_babylonian_time, + run_cmd_no_fail, + get_cime_config, +) +from CIME.config import Config +from CIME.XML.machines import Machines +from CIME.case import Case +from CIME.test_utils import get_tests_from_xml +from argparse import RawTextHelpFormatter + +import argparse, math, glob + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + + parser = argparse.ArgumentParser( + description=description, formatter_class=RawTextHelpFormatter + ) + + model_config = Config.instance() + + CIME.utils.setup_standard_logging_options(parser) + + config = get_cime_config() + + parser.add_argument( + "--no-run", action="store_true", help="Do not run generated tests" + ) + + parser.add_argument( + "--no-build", + action="store_true", + help="Do not build generated tests, implies --no-run", + ) + + parser.add_argument( + "--no-setup", + action="store_true", + help="Do not setup generated tests, implies --no-build and --no-run", + ) + + parser.add_argument( + "-u", + "--use-existing", + action="store_true", + help="Use pre-existing case directories they will pick up at the " + "\nlatest PEND state or re-run the first failed state. Requires test-id", + ) + + default = get_default_setting(config, "SAVE_TIMING", False, check_main=False) + + parser.add_argument( + "--save-timing", + action="store_true", + default=default, + help="Enable archiving of performance data.", + ) + + parser.add_argument( + "--no-batch", + action="store_true", + help="Do not submit jobs to batch system, run locally." + "\nIf false, this will default to machine setting.", + ) + + parser.add_argument( + "--single-exe", + action="store_true", + default=False, + help="Use a single build for all cases. This can " + "\ndrastically improve test throughput but is currently use-at-your-own risk." + "\nIt's up to the user to ensure that all cases are build-compatible." + "\nE3SM tests belonging to a suite with share enabled will always share exes.", + ) + + default = get_default_setting(config, "SINGLE_SUBMIT", False, check_main=False) + + parser.add_argument( + "--single-submit", + action="store_true", + default=default, + help="Use a single interactive allocation to run all the tests. This can " + "\ndrastically reduce queue waiting but only makes sense on batch machines.", + ) + + default = get_default_setting(config, "TEST_ROOT", None, check_main=False) + + parser.add_argument( + "-r", + "--test-root", + default=default, + help="Where test cases will be created. The default is output root" + "\nas defined in the config_machines file", + ) + + default = get_default_setting(config, "OUTPUT_ROOT", None, check_main=False) + + parser.add_argument( + "--output-root", default=default, help="Where the case output is written." + ) + + default = get_default_setting(config, "BASELINE_ROOT", None, check_main=False) + + parser.add_argument( + "--baseline-root", + default=default, + help="Specifies a root directory for baseline datasets that will " + "\nbe used for Bit-for-bit generate and/or compare testing.", + ) + + default = get_default_setting(config, "CLEAN", False, check_main=False) + + parser.add_argument( + "--clean", + action="store_true", + default=default, + help="Specifies if tests should be cleaned after run. If set, all object" + "\nexecutables and data files will be removed after the tests are run.", + ) + + default = get_default_setting(config, "MACHINE", None, check_main=True) + + parser.add_argument( + "-m", + "--machine", + default=default, + help="The machine for creating and building tests. This machine must be defined" + "\nin the config_machines.xml file for the given model. The default is to " + "\nto match the name of the machine in the test name or the name of the " + "\nmachine this script is run on to the NODENAME_REGEX field in " + "\nconfig_machines.xml. WARNING: This option is highly unsafe and should " + "\nonly be used if you are an expert.", + ) + + default = get_default_setting(config, "MPILIB", None, check_main=True) + + parser.add_argument( + "--mpilib", + default=default, + help="Specify the mpilib. To see list of supported MPI libraries for each machine, " + "\ninvoke ./query_config. The default is the first listing .", + ) + + if model_config.create_test_flag_mode == "cesm": + parser.add_argument( + "-c", + "--compare", + help="While testing, compare baselines against the given compare directory. ", + ) + + parser.add_argument( + "-g", + "--generate", + help="While testing, generate baselines in the given generate directory. " + "\nNOTE: this can also be done after the fact with bless_test_results", + ) + + parser.add_argument( + "--xml-machine", + help="Use this machine key in the lookup in testlist.xml. " + "\nThe default is all if any --xml- argument is used.", + ) + + parser.add_argument( + "--xml-compiler", + help="Use this compiler key in the lookup in testlist.xml. " + "\nThe default is all if any --xml- argument is used.", + ) + + parser.add_argument( + "--xml-category", + help="Use this category key in the lookup in testlist.xml. " + "\nThe default is all if any --xml- argument is used.", + ) + + parser.add_argument( + "--xml-testlist", + help="Use this testlist to lookup tests.The default is specified in config_files.xml", + ) + + parser.add_argument( + "--driver", + choices=model_config.driver_choices, + help="Override driver specified in tests and use this one.", + ) + + parser.add_argument( + "testargs", + nargs="*", + help="Tests to run. Testname form is TEST.GRID.COMPSET[.MACHINE_COMPILER]", + ) + + else: + + parser.add_argument( + "testargs", + nargs="+", + help="Tests or test suites to run." + " Testname form is TEST.GRID.COMPSET[.MACHINE_COMPILER]", + ) + + parser.add_argument( + "-b", + "--baseline-name", + help="If comparing or generating baselines, use this directory under baseline root. " + "\nDefault will be current branch name.", + ) + + parser.add_argument( + "-c", + "--compare", + action="store_true", + help="While testing, compare baselines", + ) + + parser.add_argument( + "-g", + "--generate", + action="store_true", + help="While testing, generate baselines. " + "\nNOTE: this can also be done after the fact with bless_test_results", + ) + + parser.add_argument( + "--driver", + help="Override driver specified in tests and use this one.", + ) + + default = get_default_setting(config, "COMPILER", None, check_main=True) + + parser.add_argument( + "--compiler", + default=default, + help="Compiler for building cime. Default will be the name in the " + "\nTestname or the default defined for the machine.", + ) + + parser.add_argument( + "-n", + "--namelists-only", + action="store_true", + help="Only perform namelist actions for tests", + ) + + parser.add_argument( + "-p", + "--project", + help="Specify a project id for the case (optional)." + "\nUsed for accounting and directory permissions when on a batch system." + "\nThe default is user or machine specified by PROJECT." + "\nAccounting (only) may be overridden by user or machine specified CHARGE_ACCOUNT.", + ) + + parser.add_argument( + "-t", + "--test-id", + help="Specify an 'id' for the test. This is simply a string that is appended " + "\nto the end of a test name. If no test-id is specified, a time stamp plus a " + "\nrandom string will be used (ensuring a high probability of uniqueness). " + "\nIf a test-id is specified, it is the user's responsibility to ensure that " + "\neach run of create_test uses a unique test-id. WARNING: problems will occur " + "\nif you use the same test-id twice on the same file system, even if the test " + "\nlists are completely different.", + ) + + default = get_default_setting(config, "PARALLEL_JOBS", None, check_main=False) + + parser.add_argument( + "-j", + "--parallel-jobs", + type=int, + default=default, + help="Number of tasks create_test should perform simultaneously. The default " + "\n is min(num_cores, num_tests).", + ) + + default = get_default_setting(config, "PROC_POOL", None, check_main=False) + + parser.add_argument( + "--proc-pool", + type=int, + default=default, + help="The size of the processor pool that create_test can use. The default is " + "\nMAX_MPITASKS_PER_NODE + 25 percent.", + ) + + default = os.getenv("CIME_GLOBAL_WALLTIME") + if default is None: + default = get_default_setting(config, "WALLTIME", None, check_main=True) + + parser.add_argument( + "--walltime", + default=default, + help="Set the wallclock limit for all tests in the suite. " + "\nUse the variable CIME_GLOBAL_WALLTIME to set this for all tests.", + ) + + default = get_default_setting(config, "JOB_QUEUE", None, check_main=True) + + parser.add_argument( + "-q", + "--queue", + default=default, + help="Force batch system to use a certain queue", + ) + + parser.add_argument( + "-f", "--testfile", help="A file containing an ascii list of tests to run" + ) + + default = get_default_setting( + config, "ALLOW_BASELINE_OVERWRITE", False, check_main=False + ) + + default = get_default_setting( + config, "SKIP_TESTS_WITH_EXISTING_BASELINES", False, check_main=False + ) + + # Don't allow -o/--allow-baseline-overwrite AND --skip-tests-with-existing-baselines + existing_baseline_group = parser.add_mutually_exclusive_group() + + existing_baseline_group.add_argument( + "--allow-baseline-overwrite", + "-o", + action="store_true", + default=default, + help="If the --generate option is given, then an attempt to overwrite " + "\nan existing baseline directory will raise an error. WARNING: Specifying this " + "\noption will allow existing baseline directories to be silently overwritten. " + "\nIncompatible with --skip-tests-with-existing-baselines.", + ) + + existing_baseline_group.add_argument( + "--skip-tests-with-existing-baselines", + action="store_true", + default=default, + help="If the --generate option is given, then an attempt to overwrite " + "\nan existing baseline directory will raise an error. WARNING: Specifying this " + "\noption will allow tests with existing baseline directories to be silently skipped. " + "\nIncompatible with -o/--allow-baseline-overwrite.", + ) + + default = get_default_setting(config, "WAIT", False, check_main=False) + + parser.add_argument( + "--wait", + action="store_true", + default=default, + help="On batch systems, wait for submitted jobs to complete", + ) + + default = get_default_setting(config, "ALLOW_PNL", False, check_main=False) + + parser.add_argument( + "--allow-pnl", + action="store_true", + default=default, + help="Do not pass skip-pnl to case.submit", + ) + + parser.add_argument( + "--check-throughput", + action="store_true", + help="Fail if throughput check fails. Requires --wait on batch systems", + ) + + parser.add_argument( + "--check-memory", + action="store_true", + help="Fail if memory check fails. Requires --wait on batch systems", + ) + + parser.add_argument( + "--ignore-namelists", + action="store_true", + help="Do not fail if there namelist diffs", + ) + + parser.add_argument( + "--ignore-diffs", + action="store_true", + help="Do not fail if there history file diffs", + ) + + parser.add_argument( + "--ignore-memleak", action="store_true", help="Do not fail if there's a memleak" + ) + + default = get_default_setting(config, "FORCE_PROCS", None, check_main=False) + + parser.add_argument( + "--force-procs", + type=int, + default=default, + help="For all tests to run with this number of processors", + ) + + default = get_default_setting(config, "FORCE_THREADS", None, check_main=False) + + parser.add_argument( + "--force-threads", + type=int, + default=default, + help="For all tests to run with this number of threads", + ) + + default = get_default_setting(config, "INPUT_DIR", None, check_main=True) + + parser.add_argument( + "-i", + "--input-dir", + default=default, + help="Use a non-default location for input files", + ) + + default = get_default_setting(config, "PESFILE", None, check_main=True) + + parser.add_argument( + "--pesfile", + default=default, + help="Full pathname of an optional pes specification file. The file" + "\ncan follow either the config_pes.xml or the env_mach_pes.xml format.", + ) + + default = get_default_setting(config, "RETRY", 0, check_main=False) + + parser.add_argument( + "--retry", + type=int, + default=default, + help="Automatically retry failed tests. >0 implies --wait", + ) + + parser.add_argument( + "-N", + "--non-local", + action="store_true", + help="Use when you've requested a machine that you aren't on. " + "Will reduce errors for missing directories etc.", + ) + + if config and config.has_option("main", "workflow"): + workflow_default = config.get("main", "workflow") + else: + workflow_default = "default" + + parser.add_argument( + "--workflow", + default=workflow_default, + help="A workflow from config_workflow.xml to apply to this case. ", + ) + + parser.add_argument( + "--chksum", action="store_true", help="Verifies input data checksums." + ) + + srcroot_default = utils.get_src_root() + + parser.add_argument( + "--srcroot", + default=srcroot_default, + help="Alternative pathname for source root directory. " + f"The default is {srcroot_default}", + ) + + parser.add_argument( + "--force-rebuild", + action="store_true", + help="When used with 'use-existing' and 'test-id', the" + "tests will have their 'BUILD_SHAREDLIB' phase reset to 'PEND'.", + ) + + CIME.utils.add_mail_type_args(parser) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + CIME.utils.resolve_mail_type_args(args) + + if args.force_rebuild: + expect( + args.use_existing and args.test_id, + "Cannot force a rebuild without 'use-existing' and 'test-id'", + ) + + # generate and compare flags may not point to the same directory + if model_config.create_test_flag_mode == "cesm": + if args.generate is not None: + expect( + not (args.generate == args.compare), + "Cannot generate and compare baselines at the same time", + ) + + if args.xml_testlist is not None: + expect( + not ( + args.xml_machine is None + and args.xml_compiler is None + and args.xml_category is None + ), + "If an xml-testlist is present at least one of --xml-machine, " + "--xml-compiler, --xml-category must also be present", + ) + + else: + expect( + not ( + args.baseline_name is not None + and (not args.compare and not args.generate) + ), + "Provided baseline name but did not specify compare or generate", + ) + expect( + not (args.compare and args.generate), + "Tried to compare and generate at same time", + ) + + expect( + not (args.namelists_only and not (args.generate or args.compare)), + "Must provide either --compare or --generate with --namelists-only", + ) + + if args.retry > 0: + args.wait = True + + if args.parallel_jobs is not None: + expect( + args.parallel_jobs > 0, + "Invalid value for parallel_jobs: %d" % args.parallel_jobs, + ) + + if args.use_existing: + expect(args.test_id is not None, "Must provide test-id of pre-existing cases") + + if args.no_setup: + args.no_build = True + + if args.no_build: + args.no_run = True + + # Namelist-only forces some other options: + if args.namelists_only: + expect(not args.no_setup, "Cannot compare namelists without setup") + args.no_build = True + args.no_run = True + args.no_batch = True + + expect( + not (args.non_local and not args.no_build), "Cannot build on non-local machine" + ) + + if args.single_submit: + expect( + not args.no_run, + "Doesn't make sense to request single-submit if no-run is on", + ) + args.no_build = True + args.no_run = True + args.no_batch = True + + if args.test_id is None: + args.test_id = "%s_%s" % (CIME.utils.get_timestamp(), CIME.utils.id_generator()) + else: + expect( + CIME.utils.check_name(args.test_id, additional_chars="."), + "invalid test-id argument provided", + ) + + if args.testfile is not None: + with open(args.testfile, "r") as fd: + args.testargs.extend( + [ + line.strip() + for line in fd.read().splitlines() + if line.strip() and not line.startswith("#") + ] + ) + + # Propagate `srcroot` to `GenericXML` to resolve $SRCROOT + # See call to `Machines` below + utils.GLOBAL["SRCROOT"] = args.srcroot + + # Compute list of fully-resolved test_names + test_extra_data = {} + if model_config.check_machine_name_from_test_name: + machine_name = args.xml_machine if args.machine is None else args.machine + + # If it's still unclear what machine to use, look at test names + if machine_name is None: + for test in args.testargs: + testsplit = CIME.utils.parse_test_name(test) + if testsplit[4] is not None: + if machine_name is None: + machine_name = testsplit[4] + else: + expect( + machine_name == testsplit[4], + "ambiguity in machine, please use the --machine option", + ) + + mach_obj = Machines(machine=machine_name) + if args.testargs: + args.compiler = ( + mach_obj.get_default_compiler() + if args.compiler is None + else args.compiler + ) + test_names = get_tests.get_full_test_names( + args.testargs, mach_obj.get_machine_name(), args.compiler + ) + else: + expect( + not ( + args.xml_machine is None + and args.xml_compiler is None + and args.xml_category is None + and args.xml_testlist is None + ), + "At least one of --xml-machine, --xml-testlist, " + "--xml-compiler, --xml-category or a valid test name must be provided.", + ) + + test_data = get_tests_from_xml( + xml_machine=args.xml_machine, + xml_category=args.xml_category, + xml_compiler=args.xml_compiler, + xml_testlist=args.xml_testlist, + machine=machine_name, + compiler=args.compiler, + driver=args.driver, + ) + test_names = [item["name"] for item in test_data] + for test_datum in test_data: + test_extra_data[test_datum["name"]] = test_datum + + logger.info("Testnames: %s" % test_names) + else: + inf_machine, inf_compilers = get_tests.infer_arch_from_tests(args.testargs) + if args.machine is None: + args.machine = inf_machine + + mach_obj = Machines(machine=args.machine) + if args.compiler is None: + if len(inf_compilers) == 0: + args.compiler = mach_obj.get_default_compiler() + elif len(inf_compilers) == 1: + args.compiler = inf_compilers[0] + else: + # User has multiple compiler specifications in their testargs + args.compiler = inf_compilers[0] + expect( + not args.compare and not args.generate, + "It is not safe to do baseline operations with heterogenous compiler set: {}".format( + inf_compilers + ), + ) + + test_names = get_tests.get_full_test_names( + args.testargs, mach_obj.get_machine_name(), args.compiler + ) + + expect( + mach_obj.is_valid_compiler(args.compiler), + "Compiler %s not valid for machine %s" + % (args.compiler, mach_obj.get_machine_name()), + ) + + if not args.wait and mach_obj.has_batch_system() and not args.no_batch: + expect( + not args.check_throughput, + "Makes no sense to use --check-throughput without --wait", + ) + expect( + not args.check_memory, "Makes no sense to use --check-memory without --wait" + ) + + # Normalize compare/generate between the models + baseline_cmp_name = None + baseline_gen_name = None + if args.compare or args.generate: + if model_config.create_test_flag_mode == "cesm": + if args.compare is not None: + baseline_cmp_name = args.compare + if args.generate is not None: + baseline_gen_name = args.generate + else: + baseline_name = ( + args.baseline_name + if args.baseline_name + else CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) + ) + expect( + baseline_name is not None, + "Could not determine baseline name from branch, please use -b option", + ) + if args.compare: + baseline_cmp_name = baseline_name + elif args.generate: + baseline_gen_name = baseline_name + + if args.input_dir is not None: + args.input_dir = os.path.abspath(args.input_dir) + + # sanity check + for name in test_names: + dot_count = name.count(".") + expect(dot_count > 1 and dot_count <= 4, "Invalid test Name, '{}'".format(name)) + + # for e3sm, sort by walltime + if model_config.sort_tests: + if args.walltime is None: + # Longest tests should run first + test_names.sort(key=get_tests.key_test_time, reverse=True) + else: + test_names.sort() + + return ( + test_names, + test_extra_data, + args.compiler, + mach_obj.get_machine_name(), + args.no_run, + args.no_build, + args.no_setup, + args.no_batch, + args.test_root, + args.baseline_root, + args.clean, + baseline_cmp_name, + baseline_gen_name, + args.namelists_only, + args.project, + args.test_id, + args.parallel_jobs, + args.walltime, + args.single_submit, + args.proc_pool, + args.use_existing, + args.save_timing, + args.queue, + args.allow_baseline_overwrite, + args.skip_tests_with_existing_baselines, + args.output_root, + args.wait, + args.force_procs, + args.force_threads, + args.mpilib, + args.input_dir, + args.pesfile, + args.retry, + args.mail_user, + args.mail_type, + args.check_throughput, + args.check_memory, + args.ignore_namelists, + args.ignore_diffs, + args.ignore_memleak, + args.allow_pnl, + args.non_local, + args.single_exe, + args.workflow, + args.chksum, + args.force_rebuild, + args.driver, + ) + + +############################################################################### +def get_default_setting(config, varname, default_if_not_found, check_main=False): + ############################################################################### + if config.has_option("create_test", varname): + default = config.get("create_test", varname) + elif check_main and config.has_option("main", varname): + default = config.get("main", varname) + else: + default = default_if_not_found + return default + + +############################################################################### +def single_submit_impl( + machine_name, test_id, proc_pool, _, args, job_cost_map, wall_time, test_root +): + ############################################################################### + mach = Machines(machine=machine_name) + expect( + mach.has_batch_system(), + "Single submit does not make sense on non-batch machine '%s'" + % mach.get_machine_name(), + ) + + machine_name = mach.get_machine_name() + + # + # Compute arg list for second call to create_test + # + new_args = list(args) + new_args.remove("--single-submit") + new_args.append("--no-batch") + new_args.append("--use-existing") + no_arg_is_a_test_id_arg = True + no_arg_is_a_proc_pool_arg = True + no_arg_is_a_machine_arg = True + for arg in new_args: + if arg == "-t" or arg.startswith("--test-id"): + no_arg_is_a_test_id_arg = False + elif arg.startswith("--proc-pool"): + no_arg_is_a_proc_pool_arg = False + elif arg == "-m" or arg.startswith("--machine"): + no_arg_is_a_machine_arg = True + + if no_arg_is_a_test_id_arg: + new_args.append("-t %s" % test_id) + if no_arg_is_a_proc_pool_arg: + new_args.append("--proc-pool %d" % proc_pool) + if no_arg_is_a_machine_arg: + new_args.append("-m %s" % machine_name) + + # + # Resolve batch directives manually. There is currently no other way + # to do this without making a Case object. Make a throwaway case object + # to help us here. + # + testcase_dirs = glob.glob("%s/*%s*/TestStatus" % (test_root, test_id)) + expect(testcase_dirs, "No test case dirs found!?") + first_case = os.path.abspath(os.path.dirname(testcase_dirs[0])) + with Case(first_case, read_only=False) as case: + env_batch = case.get_env("batch") + + submit_cmd = env_batch.get_value("batch_submit", subgroup=None) + submit_args = env_batch.get_submit_args(case, "case.test") + + tasks_per_node = mach.get_value("MAX_MPITASKS_PER_NODE") + num_nodes = int(math.ceil(float(proc_pool) / tasks_per_node)) + if wall_time is None: + wall_time = compute_total_time(job_cost_map, proc_pool) + wall_time_bab = convert_to_babylonian_time(int(wall_time)) + else: + wall_time_bab = wall_time + + queue = env_batch.select_best_queue(num_nodes, proc_pool, walltime=wall_time_bab) + wall_time_max_bab = env_batch.get_queue_specs(queue)[3] + if wall_time_max_bab is not None: + wall_time_max = convert_to_seconds(wall_time_max_bab) + if wall_time_max < wall_time: + wall_time = wall_time_max + wall_time_bab = convert_to_babylonian_time(wall_time) + + overrides = { + "job_id": "create_test_single_submit_%s" % test_id, + "num_nodes": num_nodes, + "tasks_per_node": tasks_per_node, + "totaltasks": tasks_per_node * num_nodes, + "job_wallclock_time": wall_time_bab, + "job_queue": env_batch.text(queue), + } + + directives = env_batch.get_batch_directives(case, "case.test", overrides=overrides) + + # + # Make simple submit script and submit + # + + script = "#! /bin/bash\n" + script += "\n%s" % directives + script += "\n" + script += "cd %s\n" % os.getcwd() + script += "%s %s\n" % (__file__, " ".join(new_args)) + + submit_cmd = "%s %s" % (submit_cmd, submit_args) + logger.info("Script:\n%s" % script) + + run_cmd_no_fail( + submit_cmd, input_str=script, arg_stdout=None, arg_stderr=None, verbose=True + ) + + +############################################################################### +# pragma pylint: disable=protected-access +def create_test( + test_names, + test_data, + compiler, + machine_name, + no_run, + no_build, + no_setup, + no_batch, + test_root, + baseline_root, + clean, + baseline_cmp_name, + baseline_gen_name, + namelists_only, + project, + test_id, + parallel_jobs, + walltime, + single_submit, + proc_pool, + use_existing, + save_timing, + queue, + allow_baseline_overwrite, + skip_tests_with_existing_baselines, + output_root, + wait, + force_procs, + force_threads, + mpilib, + input_dir, + pesfile, + run_count, + mail_user, + mail_type, + check_throughput, + check_memory, + ignore_namelists, + ignore_diffs, + ignore_memleak, + allow_pnl, + non_local, + single_exe, + workflow, + chksum, + force_rebuild, + driver, +): + ############################################################################### + impl = TestScheduler( + test_names, + test_data=test_data, + no_run=no_run, + no_build=no_build, + no_setup=no_setup, + no_batch=no_batch, + test_root=test_root, + test_id=test_id, + baseline_root=baseline_root, + baseline_cmp_name=baseline_cmp_name, + baseline_gen_name=baseline_gen_name, + clean=clean, + machine_name=machine_name, + compiler=compiler, + namelists_only=namelists_only, + project=project, + parallel_jobs=parallel_jobs, + walltime=walltime, + proc_pool=proc_pool, + use_existing=use_existing, + save_timing=save_timing, + queue=queue, + allow_baseline_overwrite=allow_baseline_overwrite, + skip_tests_with_existing_baselines=skip_tests_with_existing_baselines, + output_root=output_root, + force_procs=force_procs, + force_threads=force_threads, + mpilib=mpilib, + input_dir=input_dir, + pesfile=pesfile, + run_count=run_count, + mail_user=mail_user, + mail_type=mail_type, + allow_pnl=allow_pnl, + non_local=non_local, + single_exe=single_exe, + workflow=workflow, + chksum=chksum, + force_rebuild=force_rebuild, + driver=driver, + ) + + success = impl.run_tests( + wait=wait, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_diffs=ignore_diffs, + ignore_memleak=ignore_memleak, + ) + + if success and single_submit: + # Get real test root + test_root = impl._test_root + + job_cost_map = {} + largest_case = 0 + for test in impl._tests: + test_dir = impl._get_test_dir(test) + procs_needed = impl._get_procs_needed(test, RUN_PHASE) + time_needed = convert_to_seconds( + run_cmd_no_fail( + "./xmlquery JOB_WALLCLOCK_TIME -value -subgroup case.test", + from_dir=test_dir, + ) + ) + job_cost_map[test] = (procs_needed, time_needed) + if procs_needed > largest_case: + largest_case = procs_needed + + if proc_pool is None: + # Based on size of created jobs, choose a reasonable proc_pool. May need to put + # more thought into this. + proc_pool = 2 * largest_case + + # Create submit script + single_submit_impl( + machine_name, + test_id, + proc_pool, + project, + sys.argv[1:], + job_cost_map, + walltime, + test_root, + ) + + return success + + +############################################################################### +def _main_func(description=None): + ############################################################################### + customize_path = os.path.join(utils.get_src_root(), "cime_config", "customize") + + if os.path.exists(customize_path): + Config.instance().load(customize_path) + + ( + test_names, + test_data, + compiler, + machine_name, + no_run, + no_build, + no_setup, + no_batch, + test_root, + baseline_root, + clean, + baseline_cmp_name, + baseline_gen_name, + namelists_only, + project, + test_id, + parallel_jobs, + walltime, + single_submit, + proc_pool, + use_existing, + save_timing, + queue, + allow_baseline_overwrite, + skip_tests_with_existing_baselines, + output_root, + wait, + force_procs, + force_threads, + mpilib, + input_dir, + pesfile, + retry, + mail_user, + mail_type, + check_throughput, + check_memory, + ignore_namelists, + ignore_diffs, + ignore_memleak, + allow_pnl, + non_local, + single_exe, + workflow, + chksum, + force_rebuild, + driver, + ) = parse_command_line(sys.argv, description) + + success = False + run_count = 0 + while not success and run_count <= retry: + use_existing = use_existing if run_count == 0 else True + allow_baseline_overwrite = allow_baseline_overwrite if run_count == 0 else True + success = create_test( + test_names, + test_data, + compiler, + machine_name, + no_run, + no_build, + no_setup, + no_batch, + test_root, + baseline_root, + clean, + baseline_cmp_name, + baseline_gen_name, + namelists_only, + project, + test_id, + parallel_jobs, + walltime, + single_submit, + proc_pool, + use_existing, + save_timing, + queue, + allow_baseline_overwrite, + skip_tests_with_existing_baselines, + output_root, + wait, + force_procs, + force_threads, + mpilib, + input_dir, + pesfile, + run_count, + mail_user, + mail_type, + check_throughput, + check_memory, + ignore_namelists, + ignore_diffs, + ignore_memleak, + allow_pnl, + non_local, + single_exe, + workflow, + chksum, + force_rebuild, + driver, + ) + run_count += 1 + + # For testing only + os.environ["TESTBUILDFAIL_PASS"] = "True" + os.environ["TESTRUNFAIL_PASS"] = "True" + + sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) + + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/scripts/query_config.py b/CIME/scripts/query_config.py new file mode 100755 index 00000000000..41b8d804578 --- /dev/null +++ b/CIME/scripts/query_config.py @@ -0,0 +1,384 @@ +#!/usr/bin/env python3 +""" +Displays information about available compsets, component settings, grids and/or +machines. Typically run with one of the arguments --compsets, --settings, +--grids or --machines; if you specify more than one of these arguments, +information will be listed for each. +""" + +import os +import sys +import logging +import argparse + +from CIME.Tools.standard_script_setup import * +from CIME import utils +from CIME.XML.files import Files +from CIME.XML.component import Component +from CIME.XML.compsets import Compsets +from CIME.XML.grids import Grids +from CIME.XML.machines import Machines +from CIME.config import Config + +logger = logging.getLogger(__name__) + +customize_path = os.path.join(utils.get_src_root(), "cime_config", "customize") + +config = Config.load(customize_path) + + +def _main_func(description=__doc__): + kwargs = parse_command_line(description) + + if kwargs["grids"]: + query_grids(**kwargs) + + if kwargs["compsets"] is not None: + query_compsets(**kwargs) + + if kwargs["components"] is not None: + query_component_settings(**kwargs) + + if kwargs["machines"] is not None: + query_machines(**kwargs) + + +def parse_command_line(description): + files = {x: Files(x) for x in list(config.driver_choices)} + + compset_active_components = get_compset_active_components(files) + compset_active_components.extend(["all"]) + + components = get_components(files) + components.extend(["all"]) + + default_driver = config.driver_default + config_file = files[default_driver].get_value("MACHINES_SPEC_FILE") + utils.expect( + os.path.isfile(config_file), + "Cannot find config_file {} on disk".format(config_file), + ) + machines = Machines(config_file, machine="Query") + machine_names = ["all", "current"] + machine_names.extend(machines.list_available_machines()) + + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter + ) + + config_group = parser.add_argument_group("Config options") + + config_group.add_argument( + "--compsets", + nargs="?", + const="all", + choices=sorted(set(compset_active_components)), + help="Query compsets for active component. If no value is passed, compsets for all active components will be printed.", + ) + + config_group.add_argument( + "--components", + nargs="?", + const="all", + choices=sorted(set(components)), + help="Query settings for component. If not value is passed, settings for all components will be printed.", + ) + + config_group.add_argument( + "--grids", action="store_true", help="Query grids for model." + ) + + config_group.add_argument( + "--machines", + nargs="?", + const="all", + choices=machine_names, + help="Query machines for model. If not value is passed, all machines will be printed.", + ) + + config_group.add_argument( + "--compiler", + help="Prints compiler details when combined with --machines", + ) + + output_group = parser.add_argument_group("Output options") + + output_group.add_argument( + "--long", action="store_true", help="Print extended output for queries." + ) + + output_group.add_argument("--xml", action="store_true", help="Print output in xml.") + + filter_group = parser.add_argument_group("Filter options") + + default_driver = config.driver_default + + filter_group.add_argument( + "--driver", + choices=config.driver_choices, + default=default_driver, + help=f"Filter by driver, defaults to {default_driver!r}.", + ) + + filter_group.add_argument( + "--comp_interface", + choices=config.driver_choices, + default="mct", + action=utils.deprecate_action(", use --driver argument"), + help="DEPRECATED: Use --driver argument", + ) + + utils.setup_standard_logging_options(parser) + + kwargs = vars(parser.parse_args()) + + utils.configure_logging(**kwargs) + + # make sure at least one argument has been passed + if not any([kwargs[x] for x in ["grids", "compsets", "components", "machines"]]): + parser.print_help(sys.stderr) + + if kwargs["compiler"] is not None and ( + kwargs["machines"] is None or kwargs["machines"] == "all" + ): + parser.print_help(sys.stderr) + + print("") + print( + "The --compiler argument must be used when specifying a machine with --machines " + ) + + sys.exit(1) + + kwargs["files"] = files[kwargs["driver"]] + + return kwargs + + +def get_compset_active_components(files): + values = [] + + for file in files.values(): + active_components = file.get_components("COMPSETS_SPEC_FILE") + + values.extend([x for x in active_components if x is not None]) + + return values + + +def get_components(files): + values = [] + + for file in files.values(): + classes = get_component_classes(file) + + for c in classes: + components = file.get_components(f"COMP_ROOT_DIR_{c}") + + values.extend([x for x in components if x is not None]) + + return values + + +def get_component_classes(files): + infile = files.get_value("CONFIG_CPL_FILE") + + config_drv = Component(infile, "CPL") + + return config_drv.get_valid_model_components() + + +def query_grids(files, long, xml, **_): + config_file = files.get_value("GRIDS_SPEC_FILE") + + utils.expect( + os.path.isfile(config_file), + "Cannot find config_file {} on disk".format(config_file), + ) + + grids = Grids(config_file) + + if xml: + print("{}".format(grids.get_raw_record().decode("UTF-8"))) + else: + grids.print_values(long=long) + + +def query_compsets(files, compsets, **kwargs): + if compsets == "all": + active_components = files.get_components("COMPSETS_SPEC_FILE") + + for component in active_components: + # the all_components flag will only print available components + print_compset(component, files, all_components=True, **kwargs) + else: + print_compset(compsets, files, **kwargs) + + +def print_compset(name, files, xml=False, all_components=False, **_): + """ + print compsets associated with the component name, but if all_components is true only + print the details if the associated component is available + """ + + # Determine the config_file for the target component + config_file = files.get_value("COMPSETS_SPEC_FILE", attribute={"component": name}) + # only error out if we aren't printing all otherwise exit quitely + if not all_components: + utils.expect( + (config_file), + "Cannot find any config_component.xml file for {}".format(name), + ) + + # Check that file exists on disk + utils.expect( + os.path.isfile(config_file), + "Cannot find config_file {} on disk".format(config_file), + ) + elif config_file is None or not os.path.isfile(config_file): + return + + if config.test_mode not in ("e3sm", "cesm") and name == "drv": + return + + print("\nActive component: {}".format(name)) + # Now parse the compsets file and write out the compset alias and longname as well as the help text + # determine component xml content + compsets = Compsets(config_file) + # print compsets associated with component without help text + if xml: + print("{}".format(compsets.get_raw_record().decode("UTF-8"))) + else: + compsets.print_values(arg_help=False) + + +def query_component_settings(components, files, **kwargs): + classes = get_component_classes(files) + + if components == "all": + # Loop through the elements for each component class (in config_files.xml) + for comp in classes: + string = "CONFIG_{}_FILE".format(comp) + + # determine all components in string + components = files.get_components(string) + + for item in components: + _query_component_settings(item, files, all_components=True, **kwargs) + else: + _query_component_settings(components, files, **kwargs) + + +def _query_component_settings(component, files, xml=False, all_components=False, **_): + # Determine the valid component classes (e.g. atm) for the driver/cpl + # These are then stored in comps_array + classes = get_component_classes(files) + + # Loop through the elements for each component class (in config_files.xml) + # and see if there is a match for the the target component in the component attribute + match_found = False + valid_components = [] + config_exists = False + for comp in classes: + string = "CONFIG_{}_FILE".format(comp) + config_file = None + # determine all components in string + root_dir_node_name = "COMP_ROOT_DIR_{}".format(comp) + _components = files.get_components(root_dir_node_name) + if _components is None: + _components = files.get_components(string) + for item in _components: + valid_components.append(item) + logger.debug("{}: valid_components {}".format(comp, valid_components)) + # determine if config_file is on disk + if component is None: + config_file = files.get_value(string) + elif component in valid_components: + config_file = files.get_value(string, attribute={"component": component}) + logger.debug("query {}".format(config_file)) + if config_file is not None: + match_found = True + config_exists = os.path.isfile(config_file) + break + + if not all_components and not config_exists: + utils.expect( + config_exists, "Cannot find config_file {} on disk".format(config_file) + ) + elif all_components and not config_exists: + print("WARNING: Couldn't find config_file {} on disk".format(config_file)) + return + # If name is not a valid argument - exit with error + utils.expect( + match_found, + "Invalid input argument {}, valid input arguments are {}".format( + component, valid_components + ), + ) + + # Check that file exists on disk, if not exit with error + utils.expect( + (config_file), + "Cannot find any config_component.xml file for {}".format(component), + ) + + # determine component xml content + component = Component(config_file, "CPL") + if xml: + print("{}".format(component.get_raw_record().decode("UTF-8"))) + else: + component.print_values() + + +def query_machines(files, machines, xml, compiler, **_): + config_file = files.get_value("MACHINES_SPEC_FILE") + utils.expect( + os.path.isfile(config_file), + "Cannot find config_file {} on disk".format(config_file), + ) + # Provide a special machine name indicating no need for a machine name + xml_machines = Machines(config_file, machine="Query") + if xml: + if machines == "all": + print("{}".format(machines.get_raw_record().decode("UTF-8"))) + else: + xml_machines.set_machine(machines) + print( + "{}".format( + xml_machines.get_raw_record(root=machines.machine_node).decode( + "UTF-8" + ) + ) + ) + else: + print_machine_values(xml_machines, compiler, machines) + + +def print_machine_values( + machine, + compiler, + machine_name="all", +): # pylint: disable=arguments-differ + """Prints machine values + + Args: + machine (CIME.XML.machines.Machine): Machine object. + machine_name (str, optional): Which machine to print values for, can be "all", "current", or specific name. Defaults to "all". + """ + if machine_name == "current": + machine_name = machine.probe_machine_name(False) + + if machine_name == "all": + machine_names = machine.list_available_machines() + else: + machine_names = [machine_name] + + print("Machine(s)\n") + + for name in machine_names: + machine.set_machine(name) + machine.print_values(compiler) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/scripts/query_testlists.py b/CIME/scripts/query_testlists.py new file mode 100755 index 00000000000..ad00733cd61 --- /dev/null +++ b/CIME/scripts/query_testlists.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 + +""" +Script to query xml test lists, displaying all tests in human-readable form. + +Usage: + ./query_testlists [--show-options] [--define-testtypes] + Display a list of tests + ./query_testlists --count + Count tests by category/machine/compiler + ./query_testlists --list {category,categories,machine,machines,compiler,compilers} + List the available options for --xml-category, --xml-machine, or --xml-compiler + + All of the above support the various --xml-* arguments for subsetting which tests are included. +""" + +from CIME.Tools.standard_script_setup import * +from CIME.test_utils import get_tests_from_xml, test_to_string +from CIME.XML.tests import Tests +from CIME.utils import expect + +logger = logging.getLogger(__name__) + +############################################################################### +def parse_command_line(args, description): + ############################################################################### + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawTextHelpFormatter + ) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument( + "--count", + action="store_true", + help="Rather than listing tests, just give counts by category/machine/compiler.", + ) + + parser.add_argument( + "--list", + dest="list_type", + choices=[ + "category", + "categories", + "machine", + "machines", + "compiler", + "compilers", + ], + help="Rather than listing tests, list the available options for\n" + "--xml-category, --xml-machine, or --xml-compiler.\n" + "(The singular and plural forms are equivalent - so '--list category'\n" + "is equivalent to '--list categories', etc.)", + ) + + parser.add_argument( + "--show-options", + action="store_true", + help="For each test, also show options for that test\n" + "(wallclock time, memory leak tolerance, etc.).\n" + "(Has no effect with --list or --count options.)", + ) + + parser.add_argument( + "--define-testtypes", + action="store_true", + help="At the top of the list of tests, define all of the possible test types.\n" + "(Has no effect with --list or --count options.)", + ) + + parser.add_argument( + "--xml-category", + help="Only include tests in this category; default is all categories.", + ) + + parser.add_argument( + "--xml-machine", + help="Only include tests for this machine; default is all machines.", + ) + + parser.add_argument( + "--xml-compiler", + help="Only include tests for this compiler; default is all compilers.", + ) + + parser.add_argument( + "--xml-testlist", + help="Path to testlist file from which tests are gathered;\n" + "default is all files specified in config_files.xml.", + ) + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + _check_argument_compatibility(args) + + if args.list_type: + _process_list_type(args) + + return args + + +############################################################################### +def _check_argument_compatibility(args): + ############################################################################### + """Ensures there are no incompatible arguments + + If incompatible arguments are found, aborts with a helpful error + message. + """ + + expect( + not (args.count and args.list_type), + "Cannot specify both --count and --list arguments.", + ) + + if args.count: + expect(not args.show_options, "--show-options is incompatible with --count") + expect( + not args.define_testtypes, "--define-testtypes is incompatible with --count" + ) + + if args.list_type: + expect(not args.show_options, "--show-options is incompatible with --list") + expect( + not args.define_testtypes, "--define-testtypes is incompatible with --list" + ) + + +############################################################################### +def _process_list_type(args): + ############################################################################### + """Convert args.list_type into a name that matches one of the keys of the + test data dictionaries + + Args: + args: object containing list_type string attribute + """ + + if args.list_type == "categories": + args.list_type = "category" + elif args.list_type == "machines": + args.list_type = "machine" + elif args.list_type == "compilers": + args.list_type = "compiler" + + +############################################################################### +def print_test_data(test_data, show_options, define_testtypes): + ############################################################################### + """ + Args: + test_data (dict): dictionary of test data, containing at least these keys: + - name: full test name + - category: test category + """ + + if define_testtypes: + print("#" * 72) + print("Test types") + print("----------") + test_definitions = Tests() + test_definitions.print_values(skip_infrastructure_tests=True) + print("#" * 72) + + categories = sorted(set([item["category"] for item in test_data])) + max_category_len = max([len(category) for category in categories]) + max_test_len = max([len(item["name"]) for item in test_data]) + for category in categories: + test_subset = [ + one_test for one_test in test_data if one_test["category"] == category + ] + for one_test in test_subset: + print( + test_to_string( + test=one_test, + category_field_width=max_category_len, + test_field_width=max_test_len, + show_options=show_options, + ) + ) + + +############################################################################### +def count_test_data(test_data): + ############################################################################### + """ + Args: + test_data (dict): dictionary of test data, containing at least these keys: + - name: full test name + - category: test category + - machine + - compiler + """ + + tab_stop = " " * 4 + + categories = sorted(set([item["category"] for item in test_data])) + for category in categories: + tests_this_category = [ + one_test for one_test in test_data if one_test["category"] == category + ] + print("%s: %d" % (category, len(tests_this_category))) + + machines = sorted(set([item["machine"] for item in tests_this_category])) + for machine in machines: + tests_this_machine = [ + one_test + for one_test in tests_this_category + if one_test["machine"] == machine + ] + print("%s%s: %d" % (tab_stop, machine, len(tests_this_machine))) + + compilers = sorted(set([item["compiler"] for item in tests_this_machine])) + for compiler in compilers: + tests_this_compiler = [ + one_test + for one_test in tests_this_machine + if one_test["compiler"] == compiler + ] + print("%s%s: %d" % (tab_stop * 2, compiler, len(tests_this_compiler))) + + +############################################################################### +def list_test_data(test_data, list_type): + ############################################################################### + """List categories, machines or compilers + + Args: + test_data (dict): dictionary of test data, containing at least these keys: + - category + - machine + - compiler + list_type (str): one of 'category', 'machine' or 'compiler' + """ + + items = sorted(set([one_test[list_type] for one_test in test_data])) + for item in items: + print(item) + + +############################################################################### +def _main_func(description=None): + ############################################################################### + args = parse_command_line(sys.argv, description) + + test_data = get_tests_from_xml( + xml_machine=args.xml_machine, + xml_category=args.xml_category, + xml_compiler=args.xml_compiler, + xml_testlist=args.xml_testlist, + ) + + expect( + test_data, + "No tests found with the following options (where 'None' means no subsetting on that attribute):\n" + "\tMachine = %s\n\tCategory = %s\n\tCompiler = %s\n\tTestlist = %s" + % (args.xml_machine, args.xml_category, args.xml_compiler, args.xml_testlist), + ) + + if args.count: + count_test_data(test_data) + elif args.list_type: + list_test_data(test_data, args.list_type) + else: + print_test_data(test_data, args.show_options, args.define_testtypes) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/simple_compare.py b/CIME/simple_compare.py new file mode 100644 index 00000000000..515457af017 --- /dev/null +++ b/CIME/simple_compare.py @@ -0,0 +1,248 @@ +import os, re + +from CIME.utils import expect + +############################################################################### +def _normalize_string_value(value, case): + ############################################################################### + """ + Some of the strings are inherently prone to diffs, like file + paths, etc. This function attempts to normalize that data so that + it will not cause diffs. + """ + # Any occurance of case must be normalized because test-ids might not match + if case is not None: + case_re = re.compile(r"{}[.]([GC])[.]([^./\s]+)".format(case)) + value = case_re.sub("{}.ACTION.TESTID".format(case), value) + + if "/" in value: + # File path, just return the basename + return os.path.basename(value) + elif "username" in value: + return "" + elif ".log." in value: + # Remove the part that's prone to diff + components = value.split(".") + return os.path.basename(".".join(components[0:-1])) + else: + return value + + +############################################################################### +def _skip_comments_and_whitespace(lines, idx): + ############################################################################### + """ + Starting at idx, return next valid idx of lines that contains real data + """ + if idx == len(lines): + return idx + + comment_re = re.compile(r"^[#!]") + + lines_slice = lines[idx:] + for line in lines_slice: + line = line.strip() + if comment_re.match(line) is not None or line == "": + idx += 1 + else: + return idx + + return idx + + +############################################################################### +def _compare_data(gold_lines, comp_lines, case, offset_method=False): + ############################################################################### + """ + >>> teststr = ''' + ... data1 + ... data2 data3 + ... data4 data5 data6 + ... + ... # Comment + ... data7 data8 data9 data10 + ... ''' + >>> _compare_data(teststr.splitlines(), teststr.splitlines(), None) + ('', 0) + + >>> teststr2 = ''' + ... data1 + ... data2 data30 + ... data4 data5 data6 + ... data7 data8 data9 data10 + ... data00 + ... ''' + >>> results,_ = _compare_data(teststr.splitlines(), teststr2.splitlines(), None) + >>> print(results) + Inequivalent lines data2 data3 != data2 data30 + NORMALIZED: data2 data3 != data2 data30 + Found extra lines + data00 + + >>> teststr3 = ''' + ... data1 + ... data4 data5 data6 + ... data7 data8 data9 data10 + ... data00 + ... ''' + >>> results,_ = _compare_data(teststr3.splitlines(), teststr2.splitlines(), None, offset_method=True) + >>> print(results) + Inequivalent lines data4 data5 data6 != data2 data30 + NORMALIZED: data4 data5 data6 != data2 data30 + + """ + comments = "" + cnt = 0 + gidx, cidx = 0, 0 + gnum, cnum = len(gold_lines), len(comp_lines) + while gidx < gnum or cidx < cnum: + gidx = _skip_comments_and_whitespace(gold_lines, gidx) + cidx = _skip_comments_and_whitespace(comp_lines, cidx) + + if gidx == gnum: + if cidx == cnum: + return comments, cnt + else: + comments += "Found extra lines\n" + comments += "\n".join(comp_lines[cidx:]) + "\n" + return comments, cnt + elif cidx == cnum: + comments += "Missing lines\n" + comments += "\n".join(gold_lines[gidx:1]) + "\n" + return comments, cnt + + gold_value = gold_lines[gidx].strip() + gold_value = gold_value.replace('"', "'") + comp_value = comp_lines[cidx].strip() + comp_value = comp_value.replace('"', "'") + + norm_gold_value = _normalize_string_value(gold_value, case) + norm_comp_value = _normalize_string_value(comp_value, case) + if norm_gold_value != norm_comp_value: + comments += "Inequivalent lines {} != {}\n".format(gold_value, comp_value) + comments += " NORMALIZED: {} != {}\n".format( + norm_gold_value, norm_comp_value + ) + cnt += 1 + if offset_method and (norm_gold_value != norm_comp_value): + if gnum > cnum: + gidx += 1 + else: + cidx += 1 + else: + gidx += 1 + cidx += 1 + + return comments, cnt + + +############################################################################### +def compare_files(gold_file, compare_file, case=None): + ############################################################################### + """ + Returns true if files are the same, comments are returned too: + (success, comments) + """ + expect(os.path.exists(gold_file), "File not found: {}".format(gold_file)) + expect(os.path.exists(compare_file), "File not found: {}".format(compare_file)) + + comments, cnt = _compare_data( + open(gold_file, "r").readlines(), open(compare_file, "r").readlines(), case + ) + + if cnt > 0: + comments2, cnt2 = _compare_data( + open(gold_file, "r").readlines(), + open(compare_file, "r").readlines(), + case, + offset_method=True, + ) + if cnt2 < cnt: + comments = comments2 + + return comments == "", comments + + +############################################################################### +def compare_runconfigfiles(gold_file, compare_file, case=None): + ############################################################################### + """ + Returns true if files are the same, comments are returned too: + (success, comments) + """ + expect(os.path.exists(gold_file), "File not found: {}".format(gold_file)) + expect(os.path.exists(compare_file), "File not found: {}".format(compare_file)) + + # create dictionary's of the runconfig files and compare them + gold_dict = _parse_runconfig(gold_file) + compare_dict = _parse_runconfig(compare_file) + comments = findDiff(gold_dict, compare_dict, case=case) + comments = comments.replace(" d1", " " + gold_file) + comments = comments.replace(" d2", " " + compare_file) + # this picks up the case that an entry in compare is not in gold + if comments == "": + comments = findDiff(compare_dict, gold_dict, case=case) + comments = comments.replace(" d2", " " + gold_file) + comments = comments.replace(" d1", " " + compare_file) + + return comments == "", comments + + +def _parse_runconfig(filename): + runconfig = {} + inrunseq = False + insubsection = None + subsection_re = re.compile(r"\s*(\S+)::") + group_re = re.compile(r"\s*(\S+)\s*:\s*(\S+)") + var_re = re.compile(r"\s*(\S+)\s*=\s*(\S+)") + with open(filename, "r") as fd: + for line in fd: + # remove comments + line = line.split("#")[0] + subsection_match = subsection_re.match(line) + group_match = group_re.match(line) + var_match = var_re.match(line) + if re.match(r"\s*runSeq\s*::", line): + runconfig["runSeq"] = [] + inrunseq = True + elif re.match(r"\s*::\s*", line): + inrunseq = False + elif inrunseq: + runconfig["runSeq"].append(line) + elif subsection_match: + insubsection = subsection_match.group(1) + runconfig[insubsection] = {} + elif group_match: + runconfig[group_match.group(1)] = group_match.group(2) + elif insubsection and var_match: + runconfig[insubsection][var_match.group(1)] = var_match.group(2) + return runconfig + + +def findDiff(d1, d2, path="", case=None): + comment = "" + for k in d1.keys(): + if not k in d2: + comment += path + ":\n" + comment += k + " as key not in d2\n" + else: + if type(d1[k]) is dict: + if path == "": + path = k + else: + path = path + "->" + k + comment += findDiff(d1[k], d2[k], path=path, case=case) + else: + if case in d1[k]: + pass + elif "username" in k: + pass + elif "logfile" in k: + pass + elif "model_version" in k: + pass + elif d1[k] != d2[k]: + comment += path + ":\n" + comment += " - {} : {}\n".format(k, d1[k]) + comment += " + {} : {}\n".format(k, d2[k]) + return comment diff --git a/CIME/status.py b/CIME/status.py new file mode 100644 index 00000000000..08618c7219d --- /dev/null +++ b/CIME/status.py @@ -0,0 +1,140 @@ +# These routines were moved from utils.py to avoid circular dependancies +import time, os, sys, logging +from CIME.utils import Timeout, CASE_SUCCESS, CASE_FAILURE + +logger = logging.getLogger(__name__) + + +def append_status(msg, sfile, caseroot="."): + """ + Append msg to sfile in caseroot + """ + ctime = time.strftime("%Y-%m-%d %H:%M:%S: ") + + # Reduce empty lines in CaseStatus. It's a very concise file + # and does not need extra newlines for readability + line_ending = "\n" + with open(os.path.join(caseroot, sfile), "a") as fd: + fd.write(ctime + msg + line_ending) + fd.write(" ---------------------------------------------------" + line_ending) + + +def append_testlog(msg, caseroot="."): + """ + Add to TestStatus.log file + """ + append_status(msg, "TestStatus.log", caseroot) + + +def append_case_status(phase, status, msg=None, caseroot=".", gitinterface=None): + """ + Update CaseStatus file + """ + msg = msg if msg else "" + append_status( + "{} {} {}".format(phase, status, msg), + "CaseStatus", + caseroot, + ) + if gitinterface: + filelist = gitinterface.git_operation( + "ls-files", "--deleted", "--exclude-standard" + ) + # First delete files that have been removed + if filelist: + for f in filelist.splitlines(): + logger.debug("removing file {}".format(f)) + gitinterface.git_operation("rm", f) + filelist = gitinterface.git_operation( + "ls-files", "--others", "--modified", "--exclude-standard" + ) + # Files that should not be added should have been excluded by the .gitignore file + if filelist: + for f in filelist.splitlines(): + logger.debug("adding file {}".format(f)) + gitinterface.git_operation("add", f) + msg = msg if msg else " no message provided" + push = True + try: + gitinterface.git_operation("commit", "-m", '"' + msg + '"') + except Exception as e: + print(e) + push = False + + remote = gitinterface.git_operation("remote") + if remote and push: + with Timeout(30): + gitinterface.git_operation("push", remote) + + +def run_and_log_case_status( + func, + phase, + caseroot=".", + custom_starting_msg_functor=None, + custom_success_msg_functor=None, + is_batch=False, + gitinterface=None, +): + starting_msg = None + + if custom_starting_msg_functor is not None: + starting_msg = custom_starting_msg_functor() + + # Delay appending "starting" on "case.subsmit" phase when batch system is + # present since we don't have the jobid yet + if phase != "case.submit" or not is_batch: + append_case_status( + phase, + "starting", + msg=starting_msg, + caseroot=caseroot, + gitinterface=gitinterface, + ) + rv = None + try: + rv = func() + except BaseException: + custom_success_msg = ( + custom_success_msg_functor(rv) + if custom_success_msg_functor and rv is not None + else None + ) + if phase == "case.submit" and is_batch: + append_case_status( + phase, + "starting", + msg=custom_success_msg, + caseroot=caseroot, + gitinterface=gitinterface, + ) + e = sys.exc_info()[1] + append_case_status( + phase, + CASE_FAILURE, + msg=("\n{}".format(e)), + caseroot=caseroot, + gitinterface=gitinterface, + ) + raise + else: + custom_success_msg = ( + custom_success_msg_functor(rv) if custom_success_msg_functor else None + ) + if phase == "case.submit" and is_batch: + append_case_status( + phase, + "starting", + msg=custom_success_msg, + caseroot=caseroot, + gitinterface=gitinterface, + ) + append_case_status( + phase, + CASE_SUCCESS, + msg=custom_success_msg, + caseroot=caseroot, + gitinterface=gitinterface, + ) + + return rv diff --git a/CIME/test_scheduler.py b/CIME/test_scheduler.py new file mode 100644 index 00000000000..2f05d946e03 --- /dev/null +++ b/CIME/test_scheduler.py @@ -0,0 +1,1508 @@ +""" +A library for scheduling/running through the phases of a set +of system tests. Supports phase-level parallelism (can make progres +on multiple system tests at once). + +TestScheduler will handle the TestStatus for the 1-time setup +phases. All other phases need to handle their own status because +they can be run outside the context of TestScheduler. +""" + +import os +import traceback, stat, threading, time, glob +from collections import OrderedDict + +from CIME.XML.standard_module_setup import * +from CIME.get_tests import get_recommended_test_time, get_build_groups, is_perf_test +from CIME.status import append_status, append_testlog +from CIME.utils import ( + TESTS_FAILED_ERR_CODE, + parse_test_name, + get_full_test_name, + get_model, + convert_to_seconds, + get_cime_root, + get_src_root, + get_tools_path, + get_template_path, + get_project, + get_timestamp, + get_cime_default_driver, + clear_folder, + CIMEError, +) +from CIME.config import Config +from CIME.test_status import * +from CIME.XML.machines import Machines +from CIME.XML.generic_xml import GenericXML +from CIME.XML.env_test import EnvTest +from CIME.XML.env_mach_pes import EnvMachPes +from CIME.XML.files import Files +from CIME.XML.component import Component +from CIME.XML.tests import Tests +from CIME.case import Case +from CIME.wait_for_tests import wait_for_tests +from CIME.provenance import get_recommended_test_time_based_on_past +from CIME.locked_files import lock_file +from CIME.cs_status_creator import create_cs_status +from CIME.hist_utils import generate_teststatus +from CIME.build import post_build +from CIME.SystemTests.test_mods import find_test_mods + +logger = logging.getLogger(__name__) + +# Phases managed by TestScheduler +TEST_START = "INIT" # Special pseudo-phase just for test_scheduler bookkeeping +PHASES = [ + TEST_START, + CREATE_NEWCASE_PHASE, + XML_PHASE, + SETUP_PHASE, + SHAREDLIB_BUILD_PHASE, + MODEL_BUILD_PHASE, + RUN_PHASE, +] # Order matters + +############################################################################### +def _translate_test_names_for_new_pecount(test_names, force_procs, force_threads): + ############################################################################### + new_test_names = [] + caseopts = [] + for test_name in test_names: + ( + testcase, + caseopts, + grid, + compset, + machine, + compiler, + testmods, + ) = parse_test_name(test_name) + rewrote_caseopt = False + if caseopts is not None: + for idx, caseopt in enumerate(caseopts): + if caseopt.startswith("P"): + caseopt = caseopt[1:] + if "x" in caseopt: + old_procs, old_thrds = caseopt.split("x") + else: + old_procs, old_thrds = caseopt, None + + new_procs = force_procs if force_procs is not None else old_procs + new_thrds = ( + force_threads if force_threads is not None else old_thrds + ) + + newcaseopt = ( + ("P{}".format(new_procs)) + if new_thrds is None + else ("P{}x{}".format(new_procs, new_thrds)) + ) + caseopts[idx] = newcaseopt + + rewrote_caseopt = True + break + + if not rewrote_caseopt: + force_procs = "M" if force_procs is None else force_procs + newcaseopt = ( + ("P{}".format(force_procs)) + if force_threads is None + else ("P{}x{}".format(force_procs, force_threads)) + ) + if caseopts is None: + caseopts = [newcaseopt] + else: + caseopts.append(newcaseopt) + + new_test_name = get_full_test_name( + testcase, + caseopts=caseopts, + grid=grid, + compset=compset, + machine=machine, + compiler=compiler, + testmods_list=testmods, + ) + new_test_names.append(new_test_name) + + return new_test_names + + +_TIME_CACHE = {} +############################################################################### +def _get_time_est(test, baseline_root, as_int=False, use_cache=False, raw=False): + ############################################################################### + if test in _TIME_CACHE and use_cache: + return _TIME_CACHE[test] + + recommended_time = get_recommended_test_time_based_on_past( + baseline_root, test, raw=raw + ) + + if recommended_time is None: + recommended_time = get_recommended_test_time(test) + + if as_int: + if recommended_time is None: + recommended_time = 9999999999 + else: + recommended_time = convert_to_seconds(recommended_time) + + if use_cache: + _TIME_CACHE[test] = recommended_time + + return recommended_time + + +############################################################################### +def _order_tests_by_runtime(tests, baseline_root): + ############################################################################### + tests.sort( + key=lambda x: _get_time_est( + x, baseline_root, as_int=True, use_cache=True, raw=True + ), + reverse=True, + ) + + +############################################################################### +class TestScheduler(object): + ############################################################################### + + ########################################################################### + def __init__( + self, + test_names, + test_data=None, + no_run=False, + no_build=False, + no_setup=False, + no_batch=None, + test_root=None, + test_id=None, + machine_name=None, + compiler=None, + baseline_root=None, + baseline_cmp_name=None, + baseline_gen_name=None, + clean=False, + namelists_only=False, + project=None, + parallel_jobs=None, + walltime=None, + proc_pool=None, + use_existing=False, + save_timing=False, + queue=None, + allow_baseline_overwrite=False, + skip_tests_with_existing_baselines=False, + output_root=None, + force_procs=None, + force_threads=None, + mpilib=None, + input_dir=None, + pesfile=None, + run_count=0, + mail_user=None, + mail_type=None, + allow_pnl=False, + non_local=False, + single_exe=False, + workflow=None, + chksum=False, + force_rebuild=False, + driver=None, + ): + ########################################################################### + self._cime_root = get_cime_root() + self._cime_model = get_model() + self._cime_driver = driver if driver is not None else get_cime_default_driver() + self._save_timing = save_timing + self._queue = queue + self._test_data = ( + {} if test_data is None else test_data + ) # Format: {test_name -> {data_name -> data}} + self._mpilib = mpilib # allow override of default mpilib + self._completed_tests = 0 + self._input_dir = input_dir + self._pesfile = pesfile + self._allow_baseline_overwrite = allow_baseline_overwrite + self._skip_tests_with_existing_baselines = skip_tests_with_existing_baselines + self._single_exe = single_exe + if self._single_exe: + self._allow_pnl = True + else: + self._allow_pnl = allow_pnl + self._non_local = non_local + self._build_groups = [] + self._workflow = workflow + + self._mail_user = mail_user + self._mail_type = mail_type + + self._machobj = Machines(machine=machine_name) + + self._config = Config.instance() + + if self._config.calculate_mode_build_cost: + # Current build system is unlikely to be able to productively use more than 16 cores + self._model_build_cost = min( + 16, int((self._machobj.get_value("GMAKE_J") * 2) / 3) + 1 + ) + else: + self._model_build_cost = 4 + + # If user is forcing procs or threads, re-write test names to reflect this. + if force_procs or force_threads: + test_names = _translate_test_names_for_new_pecount( + test_names, force_procs, force_threads + ) + + self._no_setup = no_setup + self._no_build = no_build or no_setup or namelists_only + self._no_run = no_run or self._no_build + self._output_root = output_root + # Figure out what project to use + if project is None: + self._project = get_project(machobj=self._machobj) + else: + self._project = project + + # We will not use batch system if user asked for no_batch or if current + # machine is not a batch machine + self._no_batch = no_batch or not self._machobj.has_batch_system() + expect( + not (self._no_batch and self._queue is not None), + "Does not make sense to request a queue without batch system", + ) + + # Determine and resolve test_root + if test_root is not None: + self._test_root = test_root + elif self._output_root is not None: + self._test_root = self._output_root + else: + self._test_root = self._machobj.get_value("CIME_OUTPUT_ROOT") + + if self._project is not None: + self._test_root = self._test_root.replace("$PROJECT", self._project) + + self._test_root = os.path.abspath(self._test_root) + self._test_id = test_id if test_id is not None else get_timestamp() + + self._compiler = ( + self._machobj.get_default_compiler() if compiler is None else compiler + ) + + self._clean = clean + + self._namelists_only = namelists_only + + self._walltime = walltime + + if parallel_jobs is None: + mach_parallel_jobs = self._machobj.get_value("NTEST_PARALLEL_JOBS") + if mach_parallel_jobs is None: + mach_parallel_jobs = self._machobj.get_value("MAX_MPITASKS_PER_NODE") + self._parallel_jobs = min(len(test_names), mach_parallel_jobs) + else: + self._parallel_jobs = parallel_jobs + + logger.info( + "create_test will do up to {} tasks simultaneously".format( + self._parallel_jobs + ) + ) + + self._baseline_cmp_name = ( + baseline_cmp_name # Implies comparison should be done if not None + ) + self._baseline_gen_name = ( + baseline_gen_name # Implies generation should be done if not None + ) + + # Compute baseline_root. Need to set some properties on machobj in order for + # the baseline_root to resolve correctly. + self._machobj.set_value("COMPILER", self._compiler) + self._machobj.set_value("PROJECT", self._project) + self._baseline_root = ( + os.path.abspath(baseline_root) + if baseline_root is not None + else self._machobj.get_value("BASELINE_ROOT") + ) + + if baseline_cmp_name or baseline_gen_name: + if self._baseline_cmp_name: + full_baseline_dir = os.path.join( + self._baseline_root, self._baseline_cmp_name + ) + expect( + os.path.isdir(full_baseline_dir), + "Missing baseline comparison directory {}".format( + full_baseline_dir + ), + ) + + # the following is to assure that the existing generate directory is not overwritten + if self._baseline_gen_name: + full_baseline_dir = os.path.join( + self._baseline_root, self._baseline_gen_name + ) + existing_baselines = [] + if skip_tests_with_existing_baselines: + tests_to_skip = [] + for test_name in test_names: + test_baseline = os.path.join(full_baseline_dir, test_name) + if os.path.isdir(test_baseline): + existing_baselines.append(test_baseline) + if allow_baseline_overwrite and run_count == 0: + if self._namelists_only: + clear_folder(os.path.join(test_baseline, "CaseDocs")) + else: + clear_folder(test_baseline) + elif skip_tests_with_existing_baselines: + tests_to_skip.append(test_name) + expect( + allow_baseline_overwrite + or len(existing_baselines) == 0 + or skip_tests_with_existing_baselines, + "Baseline directories already exists {}\n" + "Use -o or --skip-tests-with-existing-baselines to avoid this error".format( + existing_baselines + ), + ) + if skip_tests_with_existing_baselines: + test_names = [ + test for test in test_names if test not in tests_to_skip + ] + + if self._config.sort_tests: + _order_tests_by_runtime(test_names, self._baseline_root) + + # This is the only data that multiple threads will simultaneously access + # Each test has it's own value and setting/retrieving items from a dict + # is atomic, so this should be fine to use without mutex. + # name -> (phase, status) + self._tests = OrderedDict() + for test_name in test_names: + self._tests[test_name] = (TEST_START, TEST_PASS_STATUS) + + # Oversubscribe by 1/4 + if proc_pool is None: + pes = int(self._machobj.get_value("MAX_TASKS_PER_NODE")) + self._proc_pool = int(pes * 1.25) + else: + self._proc_pool = int(proc_pool) + + logger.info( + "create_test will use up to {} cores simultaneously".format(self._proc_pool) + ) + + self._procs_avail = self._proc_pool + + # Setup phases + self._phases = list(PHASES) + if self._no_setup: + self._phases.remove(SETUP_PHASE) + if self._no_build: + self._phases.remove(SHAREDLIB_BUILD_PHASE) + self._phases.remove(MODEL_BUILD_PHASE) + if self._no_run: + self._phases.remove(RUN_PHASE) + + if use_existing: + for test in self._tests: + with TestStatus(self._get_test_dir(test)) as ts: + if force_rebuild: + ts.set_status(SHAREDLIB_BUILD_PHASE, TEST_PEND_STATUS) + + for phase, status in ts: + if phase in CORE_PHASES: + if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]: + if status == TEST_FAIL_STATUS: + # Import for potential subsequent waits + ts.set_status( + phase, TEST_PEND_STATUS, TEST_RERUN_COMMENT + ) + + # We need to pick up here + break + + else: + if phase != SUBMIT_PHASE: + # Somewhat subtle. Create_test considers submit/run to be the run phase, + # so don't try to update test status for a passed submit phase + self._update_test_status( + test, phase, TEST_PEND_STATUS + ) + self._update_test_status(test, phase, status) + + if phase == RUN_PHASE: + logger.info( + "Test {} passed and will not be re-run".format( + test + ) + ) + + logger.info( + "Using existing test directory {}".format(self._get_test_dir(test)) + ) + else: + # None of the test directories should already exist. + for test in self._tests: + expect( + not os.path.exists(self._get_test_dir(test)), + "Cannot create new case in directory '{}', it already exists." + " Pick a different test-id".format(self._get_test_dir(test)), + ) + logger.info( + "Creating test directory {}".format(self._get_test_dir(test)) + ) + + # Setup build groups + if single_exe: + self._build_groups = [tuple(self._tests.keys())] + elif self._config.share_exes: + # Any test that's in a shared-enabled suite with other tests should share exes + self._build_groups = get_build_groups(self._tests) + else: + self._build_groups = [(item,) for item in self._tests] + + # Build group to exeroot map + self._build_group_exeroots = {} + for build_group in self._build_groups: + self._build_group_exeroots[build_group] = None + + logger.debug("Build groups are:") + for build_group in self._build_groups: + for test_name in build_group: + logger.debug( + "{}{}".format( + " " if test_name == build_group[0] else " ", test_name + ) + ) + + self._chksum = chksum + # By the end of this constructor, this program should never hard abort, + # instead, errors will be placed in the TestStatus files for the various + # tests cases + + ########################################################################### + def get_testnames(self): + ########################################################################### + return list(self._tests.keys()) + + ########################################################################### + def _log_output(self, test, output): + ########################################################################### + test_dir = self._get_test_dir(test) + if not os.path.isdir(test_dir): + # Note: making this directory could cause create_newcase to fail + # if this is run before. + os.makedirs(test_dir) + append_testlog(output, caseroot=test_dir) + + ########################################################################### + def _get_case_id(self, test): + ########################################################################### + baseline_action_code = "" + if self._baseline_gen_name: + baseline_action_code += "G" + if self._baseline_cmp_name: + baseline_action_code += "C" + if len(baseline_action_code) > 0: + return "{}.{}.{}".format(test, baseline_action_code, self._test_id) + else: + return "{}.{}".format(test, self._test_id) + + ########################################################################### + def _get_test_dir(self, test): + ########################################################################### + return os.path.join(self._test_root, self._get_case_id(test)) + + ########################################################################### + def _get_test_data(self, test): + ########################################################################### + # Must be atomic + return self._tests[test] + + ########################################################################### + def _is_broken(self, test): + ########################################################################### + status = self._get_test_status(test) + return status != TEST_PASS_STATUS and status != TEST_PEND_STATUS + + ########################################################################### + def _work_remains(self, test): + ########################################################################### + test_phase, test_status = self._get_test_data(test) + return ( + test_status == TEST_PASS_STATUS or test_status == TEST_PEND_STATUS + ) and test_phase != self._phases[-1] + + ########################################################################### + def _get_test_status(self, test, phase=None): + ########################################################################### + curr_phase, curr_status = self._get_test_data(test) + if phase is None or phase == curr_phase: + return curr_status + else: + # Assume all future phases are PEND + if phase is not None and self._phases.index(phase) > self._phases.index( + curr_phase + ): + return TEST_PEND_STATUS + + # Assume all older phases PASSed + return TEST_PASS_STATUS + + ########################################################################### + def _get_test_phase(self, test): + ########################################################################### + return self._get_test_data(test)[0] + + ########################################################################### + def _update_test_status(self, test, phase, status): + ########################################################################### + phase_idx = self._phases.index(phase) + old_phase, old_status = self._get_test_data(test) + + if old_phase == phase: + expect( + old_status == TEST_PEND_STATUS, + "Only valid to transition from PEND to something else, found '{}' for phase '{}'".format( + old_status, phase + ), + ) + expect(status != TEST_PEND_STATUS, "Cannot transition from PEND -> PEND") + else: + expect( + old_status == TEST_PASS_STATUS, + "Why did we move on to next phase when prior phase did not pass?", + ) + expect( + status == TEST_PEND_STATUS, "New phase should be set to pending status" + ) + expect( + self._phases.index(old_phase) == phase_idx - 1, + "Skipped phase? {} {}".format(old_phase, phase_idx), + ) + + # Must be atomic + self._tests[test] = (phase, status) + + ########################################################################### + def _shell_cmd_for_phase(self, test, cmd, phase, from_dir=None): + ########################################################################### + env = os.environ.copy() + env["PYTHONPATH"] = f"{get_cime_root()}:{get_tools_path()}" + + while True: + rc, output, errput = run_cmd(cmd, from_dir=from_dir, env=env) + if rc != 0: + self._log_output( + test, + "{} FAILED for test '{}'.\nCommand: {}\nOutput: {}\n".format( + phase, test, cmd, output + "\n" + errput + ), + ) + # Temporary hack to get around odd file descriptor use by + # buildnml scripts. + if "bad interpreter" in output: + time.sleep(1) + continue + else: + return False, errput + else: + # We don't want "RUN PASSED" in the TestStatus.log if the only thing that + # succeeded was the submission. + phase = "SUBMIT" if phase == RUN_PHASE else phase + self._log_output( + test, + "{} PASSED for test '{}'.\nCommand: {}\nOutput: {}\n".format( + phase, test, cmd, output + "\n" + errput + ), + ) + return True, errput + + ########################################################################### + def _create_newcase_phase(self, test): + ########################################################################### + test_dir = self._get_test_dir(test) + + _, case_opts, grid, compset, machine, compiler, test_mods = parse_test_name( + test + ) + + os.environ["FROM_CREATE_TEST"] = "True" + create_newcase_cmd = "{} {} --case {} --res {} --compset {} --test".format( + sys.executable, + os.path.join(self._cime_root, "CIME", "scripts", "create_newcase.py"), + test_dir, + grid, + compset, + ) + + if machine is not None: + create_newcase_cmd += " --machine {}".format(machine) + if compiler is not None: + create_newcase_cmd += " --compiler {}".format(compiler) + if self._project is not None: + create_newcase_cmd += " --project {} ".format(self._project) + if self._output_root is not None: + create_newcase_cmd += " --output-root {} ".format(self._output_root) + if self._input_dir is not None: + create_newcase_cmd += " --input-dir {} ".format(self._input_dir) + if self._non_local: + create_newcase_cmd += " --non-local" + if self._workflow: + create_newcase_cmd += " --workflow {}".format(self._workflow) + if self._pesfile is not None: + create_newcase_cmd += " --pesfile {} ".format(self._pesfile) + + create_newcase_cmd += f" --srcroot {get_src_root()}" + + mpilib = None + ninst = 1 + ncpl = 1 + driver = self._cime_driver + if case_opts is not None: + for case_opt in case_opts: # pylint: disable=not-an-iterable + if case_opt.startswith("M"): + mpilib = case_opt[1:] + create_newcase_cmd += " --mpilib {}".format(mpilib) + logger.debug(" MPILIB set to {}".format(mpilib)) + elif case_opt.startswith("N"): + expect(ncpl == 1, "Cannot combine _C and _N options") + ninst = case_opt[1:] + create_newcase_cmd += " --ninst {}".format(ninst) + logger.debug(" NINST set to {}".format(ninst)) + elif case_opt.startswith("C"): + expect(ninst == 1, "Cannot combine _C and _N options") + ncpl = case_opt[1:] + create_newcase_cmd += " --ninst {} --multi-driver".format(ncpl) + logger.debug(" NCPL set to {}".format(ncpl)) + elif case_opt.startswith("P"): + pesize = case_opt[1:] + create_newcase_cmd += " --pecount {}".format(pesize) + elif case_opt.startswith("V"): + driver = case_opt[1:] + + create_newcase_cmd += " --driver {}".format(driver) + + if ( + "--ninst" in create_newcase_cmd + and not "--multi-driver" in create_newcase_cmd + ): + if "--driver nuopc" in create_newcase_cmd or ( + "--driver" not in create_newcase_cmd and driver == "nuopc" + ): + expect(False, "_N option not supported by nuopc driver, use _C instead") + + if test_mods is not None: + create_newcase_cmd += " --user-mods-dir " + + try: + test_mods_paths = find_test_mods(self._cime_driver, test_mods) + except CIMEError as e: + error = f"{e}" + + self._log_output(test, error) + + return False, error + else: + test_mods_paths = " ".join(test_mods_paths) + + create_newcase_cmd += f"{test_mods_paths}" + + # create_test mpilib option overrides default but not explicitly set case_opt mpilib + if mpilib is None and self._mpilib is not None: + create_newcase_cmd += " --mpilib {}".format(self._mpilib) + logger.debug(" MPILIB set to {}".format(self._mpilib)) + + if self._queue is not None: + create_newcase_cmd += " --queue={}".format(self._queue) + else: + # We need to hard code the queue for this test on cheyenne + # otherwise it runs in share and fails intermittently + test_case = parse_test_name(test)[0] + if test_case == "NODEFAIL": + machine = ( + machine if machine is not None else self._machobj.get_machine_name() + ) + if machine == "cheyenne": + create_newcase_cmd += " --queue=regular" + + if self._walltime is not None: + create_newcase_cmd += " --walltime {}".format(self._walltime) + else: + # model specific ways of setting time + if self._config.sort_tests: + recommended_time = _get_time_est(test, self._baseline_root) + + if recommended_time is not None: + create_newcase_cmd += " --walltime {}".format(recommended_time) + + else: + if ( + test in self._test_data + and "options" in self._test_data[test] + and "wallclock" in self._test_data[test]["options"] + ): + create_newcase_cmd += " --walltime {}".format( + self._test_data[test]["options"]["wallclock"] + ) + if ( + test in self._test_data + and "options" in self._test_data[test] + and "workflow" in self._test_data[test]["options"] + ): + create_newcase_cmd += " --workflow {}".format( + self._test_data[test]["options"]["workflow"] + ) + + logger.debug("Calling create_newcase: " + create_newcase_cmd) + return self._shell_cmd_for_phase(test, create_newcase_cmd, CREATE_NEWCASE_PHASE) + + ########################################################################### + def _xml_phase(self, test): + ########################################################################### + test_case, case_opts, _, _, _, compiler, _ = parse_test_name(test) + + # Create, fill and write an envtest object + test_dir = self._get_test_dir(test) + envtest = EnvTest(test_dir) + + # Find driver. It may be different for the current test if V testopt is used + driver = self._cime_driver + if case_opts is not None: + for case_opt in case_opts: # pylint: disable=not-an-iterable + if case_opt.startswith("V"): + driver = case_opt[1:] + + # Determine list of component classes that this coupler/driver knows how + # to deal with. This list follows the same order as compset longnames follow. + files = Files(comp_interface=driver) + ufs_driver = os.environ.get("UFS_DRIVER") + attribute = None + if ufs_driver: + attribute = {"component": ufs_driver} + + drv_config_file = files.get_value("CONFIG_CPL_FILE", attribute=attribute) + + if driver == "nuopc" and not os.path.exists(drv_config_file): + drv_config_file = files.get_value("CONFIG_CPL_FILE", {"component": "cpl"}) + expect( + os.path.exists(drv_config_file), + "File {} not found, cime driver {}".format(drv_config_file, driver), + ) + + drv_comp = Component(drv_config_file, "CPL") + + envtest.add_elements_by_group(files, {}, "env_test.xml") + envtest.add_elements_by_group(drv_comp, {}, "env_test.xml") + envtest.set_value("TESTCASE", test_case) + envtest.set_value("TEST_TESTID", self._test_id) + envtest.set_value("CASEBASEID", test) + memleak_tolerance = self._machobj.get_value( + "TEST_MEMLEAK_TOLERANCE", resolved=False + ) + if ( + test in self._test_data + and "options" in self._test_data[test] + and "memleak_tolerance" in self._test_data[test]["options"] + ): + memleak_tolerance = self._test_data[test]["options"]["memleak_tolerance"] + + envtest.set_value( + "TEST_MEMLEAK_TOLERANCE", + 0.10 if memleak_tolerance is None else memleak_tolerance, + ) + + test_argv = "-testname {} -testroot {}".format(test, self._test_root) + if self._baseline_gen_name: + test_argv += " -generate {}".format(self._baseline_gen_name) + basegen_case_fullpath = os.path.join( + self._baseline_root, self._baseline_gen_name, test + ) + logger.debug("basegen_case is {}".format(basegen_case_fullpath)) + envtest.set_value("BASELINE_NAME_GEN", self._baseline_gen_name) + envtest.set_value( + "BASEGEN_CASE", os.path.join(self._baseline_gen_name, test) + ) + if self._baseline_cmp_name: + test_argv += " -compare {}".format(self._baseline_cmp_name) + envtest.set_value("BASELINE_NAME_CMP", self._baseline_cmp_name) + envtest.set_value( + "BASECMP_CASE", os.path.join(self._baseline_cmp_name, test) + ) + + envtest.set_value("TEST_ARGV", test_argv) + envtest.set_value("CLEANUP", self._clean) + + envtest.set_value("BASELINE_ROOT", self._baseline_root) + envtest.set_value("GENERATE_BASELINE", self._baseline_gen_name is not None) + envtest.set_value("COMPARE_BASELINE", self._baseline_cmp_name is not None) + envtest.set_value( + "CCSM_CPRNC", self._machobj.get_value("CCSM_CPRNC", resolved=False) + ) + tput_tolerance = self._machobj.get_value("TEST_TPUT_TOLERANCE", resolved=False) + if ( + test in self._test_data + and "options" in self._test_data[test] + and "tput_tolerance" in self._test_data[test]["options"] + ): + tput_tolerance = self._test_data[test]["options"]["tput_tolerance"] + + envtest.set_value( + "TEST_TPUT_TOLERANCE", 0.25 if tput_tolerance is None else tput_tolerance + ) + + # Add the test instructions from config_test to env_test in the case + config_test = Tests() + testnode = config_test.get_test_node(test_case) + envtest.add_test(testnode) + + if compiler == "nag": + envtest.set_value("FORCE_BUILD_SMP", "FALSE") + + # Determine case_opts from the test_case + if case_opts is not None: + logger.debug("case_opts are {} ".format(case_opts)) + for opt in case_opts: # pylint: disable=not-an-iterable + + logger.debug("case_opt is {}".format(opt)) + if opt == "D": + envtest.set_test_parameter("DEBUG", "TRUE") + logger.debug(" DEBUG set to TRUE") + + elif opt == "E": + envtest.set_test_parameter("USE_ESMF_LIB", "TRUE") + logger.debug(" USE_ESMF_LIB set to TRUE") + + elif opt == "CG": + envtest.set_test_parameter("CALENDAR", "GREGORIAN") + logger.debug(" CALENDAR set to {}".format(opt)) + + elif opt.startswith("L"): + match = re.match("L([A-Za-z])([0-9]*)", opt) + stop_option = { + "y": "nyears", + "m": "nmonths", + "d": "ndays", + "h": "nhours", + "s": "nseconds", + "n": "nsteps", + } + opt = match.group(1) + envtest.set_test_parameter("STOP_OPTION", stop_option[opt]) + opti = match.group(2) + envtest.set_test_parameter("STOP_N", opti) + + logger.debug(" STOP_OPTION set to {}".format(stop_option[opt])) + logger.debug(" STOP_N set to {}".format(opti)) + + elif opt.startswith("R"): + # R option is for testing in PTS_MODE or Single Column Model + # (SCM) mode + envtest.set_test_parameter("PTS_MODE", "TRUE") + + # For PTS_MODE, set all tasks and threads to 1 + comps = ["ATM", "LND", "ICE", "OCN", "CPL", "GLC", "ROF", "WAV"] + + for comp in comps: + envtest.set_test_parameter("NTASKS_" + comp, "1") + envtest.set_test_parameter("NTHRDS_" + comp, "1") + envtest.set_test_parameter("ROOTPE_" + comp, "0") + envtest.set_test_parameter("PIO_TYPENAME", "netcdf") + + elif opt.startswith("A"): + # A option is for testing in ASYNC IO mode, only available with nuopc driver and pio2 + envtest.set_test_parameter("PIO_ASYNC_INTERFACE", "TRUE") + expect( + driver == "nuopc", "ASYNC IO mode only works with nuopc driver" + ) + envtest.set_test_parameter("PIO_VERSION", "2") + match = re.match("A([0-9]+)x?([0-9])*", opt) + envtest.set_test_parameter("PIO_NUMTASKS_CPL", match.group(1)) + if match.group(2): + envtest.set_test_parameter("PIO_STRIDE_CPL", match.group(2)) + + elif ( + opt.startswith("I") + or opt.startswith( # Marker to distinguish tests with same name - ignored + "M" + ) + or opt.startswith("P") # handled in create_newcase + or opt.startswith("N") # handled in create_newcase + or opt.startswith("C") # handled in create_newcase + or opt.startswith("V") # handled in create_newcase + or opt.startswith("G") # handled in create_newcase + or opt == "B" # handled in create_newcase + ): # handled in run_phase + pass + + elif opt.startswith("IOP"): + logger.warning("IOP test option not yet implemented") + else: + expect(False, "Could not parse option '{}' ".format(opt)) + + envtest.write() + lock_file("env_run.xml", caseroot=test_dir, newname="env_run.orig.xml") + + with Case(test_dir, read_only=False, non_local=self._non_local) as case: + if self._output_root is None: + self._output_root = case.get_value("CIME_OUTPUT_ROOT") + # if we are running a single test we don't need sharedlibroot + if len(self._tests) > 1 and self._config.common_sharedlibroot: + case.set_value( + "SHAREDLIBROOT", + os.path.join( + self._output_root, "sharedlibroot.{}".format(self._test_id) + ), + ) + envtest.set_initial_values(case) + case.set_value("TEST", True) + if is_perf_test(test): + case.set_value("SAVE_TIMING", True) + else: + case.set_value("SAVE_TIMING", self._save_timing) + + # handle single-exe here, all cases will use the EXEROOT from + # the first case in the build group + is_first_test, _, my_build_group = self._get_build_group(test) + if is_first_test: + expect( + self._build_group_exeroots[my_build_group] is None, + "Should not already have exeroot", + ) + self._build_group_exeroots[my_build_group] = case.get_value("EXEROOT") + else: + build_group_exeroot = self._build_group_exeroots[my_build_group] + expect(build_group_exeroot is not None, "Should already have exeroot") + case.set_value("EXEROOT", build_group_exeroot) + + # Scale back build parallelism on systems with few cores + if self._model_build_cost > self._proc_pool: + case.set_value("GMAKE_J", self._proc_pool) + self._model_build_cost = self._proc_pool + + return True, "" + + ########################################################################### + def _setup_phase(self, test): + ########################################################################### + test_dir = self._get_test_dir(test) + rv = self._shell_cmd_for_phase( + test, "./case.setup", SETUP_PHASE, from_dir=test_dir + ) + + # It's OK for this command to fail with baseline diffs but not catastrophically + if rv[0]: + env = os.environ.copy() + env["PYTHONPATH"] = f"{get_cime_root()}:{get_tools_path()}" + cmdstat, output, _ = run_cmd( + "./case.cmpgen_namelists", + combine_output=True, + from_dir=test_dir, + env=env, + ) + try: + expect( + cmdstat in [0, TESTS_FAILED_ERR_CODE], + "Fatal error in case.cmpgen_namelists: {}".format(output), + ) + except Exception: + self._update_test_status_file(test, SETUP_PHASE, TEST_FAIL_STATUS) + raise + + if self._single_exe: + with Case(self._get_test_dir(test), read_only=False) as case: + tests = Tests() + + try: + tests.support_single_exe(case) + except Exception: + self._update_test_status_file(test, SETUP_PHASE, TEST_FAIL_STATUS) + + raise + + return rv + + ########################################################################### + def _sharedlib_build_phase(self, test): + ########################################################################### + is_first_test, first_test, _ = self._get_build_group(test) + if not is_first_test: + if ( + self._get_test_status(first_test, phase=SHAREDLIB_BUILD_PHASE) + == TEST_PASS_STATUS + ): + return True, "" + else: + return False, "Cannot use build for test {} because it failed".format( + first_test + ) + + test_dir = self._get_test_dir(test) + return self._shell_cmd_for_phase( + test, + "./case.build --sharedlib-only", + SHAREDLIB_BUILD_PHASE, + from_dir=test_dir, + ) + + ########################################################################### + def _get_build_group(self, test): + ########################################################################### + for build_group in self._build_groups: + if test in build_group: + return test == build_group[0], build_group[0], build_group + + expect(False, "No build group for test '{}'".format(test)) + + ########################################################################### + def _model_build_phase(self, test): + ########################################################################### + is_first_test, first_test, _ = self._get_build_group(test) + + test_dir = self._get_test_dir(test) + + if not is_first_test: + if ( + self._get_test_status(first_test, phase=MODEL_BUILD_PHASE) + == TEST_PASS_STATUS + ): + with Case(test_dir, read_only=False) as case: + post_build( + case, [], build_complete=True, save_build_provenance=False + ) + + return True, "" + else: + return False, "Cannot use build for test {} because it failed".format( + first_test + ) + + return self._shell_cmd_for_phase( + test, "./case.build --model-only", MODEL_BUILD_PHASE, from_dir=test_dir + ) + + ########################################################################### + def _run_phase(self, test): + ########################################################################### + test_dir = self._get_test_dir(test) + + case_opts = parse_test_name(test)[1] + if ( + case_opts is not None + and "B" in case_opts # pylint: disable=unsupported-membership-test + ): + self._log_output(test, "{} SKIPPED for test '{}'".format(RUN_PHASE, test)) + self._update_test_status_file(test, SUBMIT_PHASE, TEST_PASS_STATUS) + self._update_test_status_file(test, RUN_PHASE, TEST_PASS_STATUS) + + return True, "SKIPPED" + else: + cmd = "./case.submit" + if not self._allow_pnl: + cmd += " --skip-preview-namelist" + if self._no_batch: + cmd += " --no-batch" + if self._mail_user: + cmd += " --mail-user={}".format(self._mail_user) + if self._mail_type: + cmd += " -M={}".format(",".join(self._mail_type)) + if self._chksum: + cmd += " --chksum" + + return self._shell_cmd_for_phase(test, cmd, RUN_PHASE, from_dir=test_dir) + + ########################################################################### + def _run_catch_exceptions(self, test, phase, run): + ########################################################################### + try: + return run(test) + except Exception as e: + exc_tb = sys.exc_info()[2] + errput = "Test '{}' failed in phase '{}' with exception '{}'\n".format( + test, phase, str(e) + ) + errput += "".join(traceback.format_tb(exc_tb)) + self._log_output(test, errput) + return False, errput + + ########################################################################### + def _get_procs_needed(self, test, phase, threads_in_flight=None, no_batch=False): + ########################################################################### + """ + Return the number of processors/cores needed to run phase of test. + + Returns None if the phase of this test is currently ineligible to run. + """ + # For build pools, we must wait for the first case to complete XML, SHAREDLIB, + # and MODEL_BUILD phases before the other cases can do those phases + is_first_test, first_test, _ = self._get_build_group(test) + + if not is_first_test: + build_group_dep_phases = [ + XML_PHASE, + SHAREDLIB_BUILD_PHASE, + MODEL_BUILD_PHASE, + ] + if phase in build_group_dep_phases: + if self._get_test_status(first_test, phase=phase) == TEST_PEND_STATUS: + return None # None indicates job is ineligible to run + else: + return 1 + + if phase == RUN_PHASE and (self._no_batch or no_batch): + test_dir = self._get_test_dir(test) + total_pes = EnvMachPes(test_dir, read_only=True).get_value("TOTALPES") + return total_pes + + elif phase == SHAREDLIB_BUILD_PHASE: + if self._config.serialize_sharedlib_builds: + # Will force serialization of sharedlib builds + # TODO - instead of serializing, compute all library configs needed and build + # them all in parallel + for _, _, running_phase in threads_in_flight.values(): + if running_phase == SHAREDLIB_BUILD_PHASE: + return None + + return 1 + elif phase == MODEL_BUILD_PHASE: + # Model builds now happen in parallel + return self._model_build_cost + else: + return 1 + + ########################################################################### + def _wait_for_something_to_finish(self, threads_in_flight): + ########################################################################### + expect(len(threads_in_flight) <= self._parallel_jobs, "Oversubscribed?") + finished_tests = [] + while not finished_tests: + for test, thread_info in threads_in_flight.items(): + if not thread_info[0].is_alive(): + finished_tests.append((test, thread_info[1])) + + if not finished_tests: + time.sleep(0.2) + + for finished_test, procs_needed in finished_tests: + self._procs_avail += procs_needed + del threads_in_flight[finished_test] + + ########################################################################### + def _update_test_status_file(self, test, test_phase, status): + ########################################################################### + """ + In general, test_scheduler should not be responsible for updating + the TestStatus file, but there are a few cases where it has to. + """ + test_dir = self._get_test_dir(test) + with TestStatus(test_dir=test_dir, test_name=test) as ts: + ts.set_status(test_phase, status) + + ########################################################################### + def _consumer(self, test, test_phase, phase_method): + ########################################################################### + before_time = time.time() + success, errors = self._run_catch_exceptions(test, test_phase, phase_method) + elapsed_time = time.time() - before_time + status = ( + ( + TEST_PEND_STATUS + if test_phase == RUN_PHASE and not self._no_batch + else TEST_PASS_STATUS + ) + if success + else TEST_FAIL_STATUS + ) + + if status != TEST_PEND_STATUS: + self._update_test_status(test, test_phase, status) + + if not self._work_remains(test): + self._completed_tests += 1 + total = len(self._tests) + status_str = "Finished {} for test {} in {:f} seconds ({}). [COMPLETED {:d} of {:d}]".format( + test_phase, test, elapsed_time, status, self._completed_tests, total + ) + else: + status_str = "Finished {} for test {} in {:f} seconds ({})".format( + test_phase, test, elapsed_time, status + ) + + if not success: + status_str += "\n Case dir: {}\n".format(self._get_test_dir(test)) + status_str += " Errors were:\n {}\n".format( + "\n ".join(errors.splitlines()) + ) + + logger.info(status_str) + + is_first_test = self._get_build_group(test)[0] + + if test_phase in [CREATE_NEWCASE_PHASE, XML_PHASE] or ( + not is_first_test + and test_phase in [SHAREDLIB_BUILD_PHASE, MODEL_BUILD_PHASE] + ): + # These are the phases for which TestScheduler is reponsible for + # updating the TestStatus file + self._update_test_status_file(test, test_phase, status) + + if test_phase == XML_PHASE: + append_status( + "Case Created using: " + " ".join(sys.argv), + "README.case", + caseroot=self._get_test_dir(test), + ) + + # On batch systems, we want to immediately submit to the queue, because + # it's very cheap to submit and will get us a better spot in line + if ( + success + and not self._no_run + and not self._no_batch + and test_phase == MODEL_BUILD_PHASE + ): + logger.info( + "Starting {} for test {} with 1 proc on interactive node and {:d} procs on compute nodes".format( + RUN_PHASE, + test, + self._get_procs_needed(test, RUN_PHASE, no_batch=True), + ) + ) + self._update_test_status(test, RUN_PHASE, TEST_PEND_STATUS) + self._consumer(test, RUN_PHASE, self._run_phase) + + ########################################################################### + def _producer_indv_test_launch(self, test, threads_in_flight): + ########################################################################### + """ + Launch the next phase of test if possible. Return True if launched + """ + test_phase, test_status = self._get_test_data(test) + expect(test_status != TEST_PEND_STATUS, test) + next_phase = self._phases[self._phases.index(test_phase) + 1] + procs_needed = self._get_procs_needed(test, next_phase, threads_in_flight) + + if procs_needed is None: + # This test cannot run now so skip + return False + + elif procs_needed > self._proc_pool: + # This test is asking for more than we can ever provide + # This should only ever happen for RUN_PHASE + msg = f"Test {test} phase {next_phase} requested more ({procs_needed}) than entire pool (self._proc_pool)" + expect(next_phase == RUN_PHASE, msg) + + # CIME phase won't be run, so we need to update TEST_STATUS ourselves + self._update_test_status_file(test, SUBMIT_PHASE, TEST_PASS_STATUS) + self._update_test_status_file(test, RUN_PHASE, TEST_FAIL_STATUS) + + # Update our internal state that this test failed + self._update_test_status(test, next_phase, TEST_PEND_STATUS) + self._update_test_status(test, next_phase, TEST_FAIL_STATUS) + + logger.warning(msg) + self._log_output(test, msg) + + # We did run the phase in some sense in that we instantly failed it + return True + + elif procs_needed <= self._procs_avail: + # We can run this test! + self._procs_avail -= procs_needed + + # Necessary to print this way when multiple threads printing + logger.info( + f"Starting {next_phase} for test {test} with {procs_needed} procs" + ) + + self._update_test_status(test, next_phase, TEST_PEND_STATUS) + phase_method = getattr(self, f"_{next_phase.lower()}_phase") + new_thread = threading.Thread( + target=self._consumer, + args=(test, next_phase, phase_method), + ) + threads_in_flight[test] = (new_thread, procs_needed, next_phase) + new_thread.start() + + logger.debug(" Current workload:") + total_procs = 0 + for the_test, the_data in threads_in_flight.items(): + logger.debug(f" {the_test}: {the_data[2]} -> {the_data[1]}") + total_procs += the_data[1] + + logger.debug(f" Total procs in use: {total_procs}") + + return True + + else: + # There aren't enough free procs to run this phase, so skip + return False + + ########################################################################### + def _producer(self): + ########################################################################### + threads_in_flight = {} # test-name -> (thread, procs, phase) + while True: + work_to_do = False + num_threads_launched_this_iteration = 0 + for test in self._tests: + logger.debug("test_name: " + test) + + if self._work_remains(test): + work_to_do = True + + # If we have no workers available, immediately break out of loop so we can wait + if len(threads_in_flight) == self._parallel_jobs: + break + + # Check if this test is already running a phase. If so, we can't + # launch a new phase now. + if test not in threads_in_flight: + launched = self._producer_indv_test_launch( + test, threads_in_flight + ) + if launched: + num_threads_launched_this_iteration += 1 + + if not work_to_do: + break + + if num_threads_launched_this_iteration == 0: + # No free resources, wait for something in flight to finish + self._wait_for_something_to_finish(threads_in_flight) + + for unfinished_thread, _, _ in threads_in_flight.values(): + unfinished_thread.join() + + ########################################################################### + def _setup_cs_files(self): + ########################################################################### + try: + template_path = get_template_path() + + create_cs_status(test_root=self._test_root, test_id=self._test_id) + + template_file = os.path.join(template_path, "cs.submit.template") + template = open(template_file, "r").read() + setup_cmd = "./case.setup" if self._no_setup else ":" + build_cmd = "./case.build" if self._no_build else ":" + test_cmd = "./case.submit" + template = ( + template.replace("", setup_cmd) + .replace("", build_cmd) + .replace("", test_cmd) + .replace("", self._test_id) + ) + + if self._no_run: + cs_submit_file = os.path.join( + self._test_root, "cs.submit.{}".format(self._test_id) + ) + with open(cs_submit_file, "w") as fd: + fd.write(template) + os.chmod( + cs_submit_file, + os.stat(cs_submit_file).st_mode | stat.S_IXUSR | stat.S_IXGRP, + ) + + if self._config.use_testreporter_template: + template_file = os.path.join(template_path, "testreporter.template") + template = open(template_file, "r").read() + template = template.replace("", get_tools_path()) + testreporter_file = os.path.join(self._test_root, "testreporter") + with open(testreporter_file, "w") as fd: + fd.write(template) + os.chmod( + testreporter_file, + os.stat(testreporter_file).st_mode | stat.S_IXUSR | stat.S_IXGRP, + ) + + except Exception as e: + logger.warning("FAILED to set up cs files: {}".format(str(e))) + + ########################################################################### + def run_tests( + self, + wait=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_diffs=False, + ignore_memleak=False, + ): + ########################################################################### + """ + Main API for this class. + + Return True if all tests passed. + """ + start_time = time.time() + + # Tell user what will be run + logger.info("RUNNING TESTS:") + for test in self._tests: + logger.info(" {}".format(test)) + + # Setup cs files + self._setup_cs_files() + + GenericXML.DISABLE_CACHING = True + self._producer() + GenericXML.DISABLE_CACHING = False + + expect(threading.active_count() == 1, "Leftover threads?") + + config = Config.instance() + + # Copy TestStatus files to baselines for tests that have already failed. + if config.baseline_store_teststatus: + for test in self._tests: + status = self._get_test_data(test)[1] + if ( + status not in [TEST_PASS_STATUS, TEST_PEND_STATUS] + and self._baseline_gen_name + ): + basegen_case_fullpath = os.path.join( + self._baseline_root, self._baseline_gen_name, test + ) + test_dir = self._get_test_dir(test) + generate_teststatus(test_dir, basegen_case_fullpath) + + no_need_to_wait = self._no_run or self._no_batch + if no_need_to_wait: + wait = False + + expect_test_complete = not self._no_run and (self._no_batch or wait) + + logger.info("Waiting for tests to finish") + rv = wait_for_tests( + glob.glob( + os.path.join(self._test_root, "*{}/TestStatus".format(self._test_id)) + ), + no_wait=not wait, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_diffs=ignore_diffs, + ignore_memleak=ignore_memleak, + no_run=self._no_run, + expect_test_complete=expect_test_complete, + ) + + if not no_need_to_wait and not wait: + logger.info( + "Due to presence of batch system, create_test will exit before tests are complete.\n" + "To force create_test to wait for full completion, use --wait" + ) + + logger.info("test-scheduler took {} seconds".format(time.time() - start_time)) + + return rv diff --git a/CIME/test_status.py b/CIME/test_status.py new file mode 100644 index 00000000000..93b2c34a032 --- /dev/null +++ b/CIME/test_status.py @@ -0,0 +1,620 @@ +""" +Contains the crucial TestStatus class which manages phase-state of a test +case and ensure that this state is represented by the TestStatus file in +the case. + +TestStatus objects are only modifiable via the set_status method and this +is only allowed if the object is being accessed within the context of a +context manager. Example: + + with TestStatus(test_dir=caseroot) as ts: + ts.set_status(RUN_PHASE, TEST_PASS_STATUS) + +This file also contains all of the hardcoded phase information which includes +the phase names, phase orders, potential phase states, and which phases are +required (core phases). + +Additional important design decisions: +1) In order to ensure that incomplete tests are always left in a PEND + state, updating a core phase to a PASS state will automatically set the next + core state to PEND. +2) If the user repeats a core state, that invalidates all subsequent state. For + example, if a user rebuilds their case, then any of the post-run states like the + RUN state are no longer valid. +""" + +from CIME.XML.standard_module_setup import * +import os, itertools +from CIME import expected_fails + +TEST_STATUS_FILENAME = "TestStatus" + +# The statuses that a phase can be in +TEST_PEND_STATUS = "PEND" +TEST_PASS_STATUS = "PASS" +TEST_FAIL_STATUS = "FAIL" + +ALL_PHASE_STATUSES = [TEST_PEND_STATUS, TEST_PASS_STATUS, TEST_FAIL_STATUS] + +# Special statuses that the overall test can be in +TEST_DIFF_STATUS = "DIFF" # Implies a failure in the BASELINE phase +NAMELIST_FAIL_STATUS = "NLFAIL" # Implies a failure in the NLCOMP phase + +# Special strings that can appear in comments, indicating particular types of failures +TEST_NO_BASELINES_COMMENT = "BFAIL" # Implies baseline directory is missing in the +# baseline comparison phase +TEST_RERUN_COMMENT = "RERUN" # Added to a PEND status to indicate that the test +# system has changed this phase to PEND in order to +# rerun it (e.g., to retry a failed test). +# The expected and unexpected failure comments aren't used directly in this module, but +# are included here for symmetry, so other modules can access them from here. +TEST_EXPECTED_FAILURE_COMMENT = expected_fails.EXPECTED_FAILURE_COMMENT +TEST_UNEXPECTED_FAILURE_COMMENT_START = expected_fails.UNEXPECTED_FAILURE_COMMENT_START + +# The valid phases +CREATE_NEWCASE_PHASE = "CREATE_NEWCASE" +XML_PHASE = "XML" +SETUP_PHASE = "SETUP" +NAMELIST_PHASE = "NLCOMP" +SHAREDLIB_BUILD_PHASE = "SHAREDLIB_BUILD" +MODEL_BUILD_PHASE = "MODEL_BUILD" +SUBMIT_PHASE = "SUBMIT" +RUN_PHASE = "RUN" +THROUGHPUT_PHASE = "TPUTCOMP" +MEMCOMP_PHASE = "MEMCOMP" +MEMLEAK_PHASE = "MEMLEAK" +STARCHIVE_PHASE = "SHORT_TERM_ARCHIVER" +COMPARE_PHASE = "COMPARE" # This is one special, real phase will be COMPARE_$WHAT, this is for internal test comparisons, there could be multiple variations of this phase in one test +BASELINE_PHASE = "BASELINE" +GENERATE_PHASE = "GENERATE" + +ALL_PHASES = [ + CREATE_NEWCASE_PHASE, + XML_PHASE, + SETUP_PHASE, + NAMELIST_PHASE, + SHAREDLIB_BUILD_PHASE, + MODEL_BUILD_PHASE, + SUBMIT_PHASE, + RUN_PHASE, + COMPARE_PHASE, + BASELINE_PHASE, + THROUGHPUT_PHASE, + MEMCOMP_PHASE, + MEMLEAK_PHASE, + STARCHIVE_PHASE, + GENERATE_PHASE, +] + +# These are mandatory phases that a test must go through +CORE_PHASES = [ + CREATE_NEWCASE_PHASE, + XML_PHASE, + SETUP_PHASE, + SHAREDLIB_BUILD_PHASE, + MODEL_BUILD_PHASE, + SUBMIT_PHASE, + RUN_PHASE, +] + + +def _test_helper1(file_contents): + ts = TestStatus(test_dir="/", test_name="ERS.foo.A") + ts._parse_test_status(file_contents) # pylint: disable=protected-access + return ts._phase_statuses # pylint: disable=protected-access + + +def _test_helper2( + file_contents, + wait_for_run=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_diffs=False, + no_run=False, + no_perm=False, +): + lines = file_contents.splitlines() + rv = None + perms = [lines] if no_perm else itertools.permutations(lines) + for perm in perms: + ts = TestStatus(test_dir="/", test_name="ERS.foo.A") + ts._parse_test_status("\n".join(perm)) # pylint: disable=protected-access + the_status = ts.get_overall_test_status( + wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_diffs=ignore_diffs, + no_run=no_run, + ) + if rv is not None and the_status != rv: + return "{} != {}".format(rv, the_status) + else: + rv = the_status + + return rv + + +class TestStatus(object): + def __init__(self, test_dir=None, test_name=None, no_io=False): + """ + Create a TestStatus object + + If test_dir is not specified, it is set to the current working directory + + no_io is intended only for testing, and should be kept False in + production code + """ + test_dir = os.getcwd() if test_dir is None else test_dir + self._filename = os.path.join(test_dir, TEST_STATUS_FILENAME) + self._phase_statuses = {} # {name -> (status, comments)} + self._test_name = test_name + self._ok_to_modify = False + self._no_io = no_io + + if os.path.exists(self._filename): + self._parse_test_status_file() + if not os.access(self._filename, os.W_OK): + self._no_io = True + else: + expect( + test_name is not None, + "Must provide test_name if TestStatus file doesn't exist", + ) + + def __enter__(self): + self._ok_to_modify = True + return self + + def __exit__(self, *_): + self._ok_to_modify = False + self.flush() + + def __iter__(self): + for phase, data in self._phase_statuses.items(): + yield phase, data[0] + + def __eq__(self, rhs): + return ( + self._phase_statuses == rhs._phase_statuses + ) # pylint: disable=protected-access + + def __ne__(self, rhs): + return not self.__eq__(rhs) + + def get_name(self): + return self._test_name + + def set_status(self, phase, status, comments=""): + """ + Update the status of this test by changing the status of given phase to the + given status. + + >>> with TestStatus(test_dir="/", test_name="ERS.foo.A", no_io=True) as ts: + ... ts.set_status(CREATE_NEWCASE_PHASE, "PASS") + ... ts.set_status(XML_PHASE, "PASS") + ... ts.set_status(SETUP_PHASE, "FAIL") + ... ts.set_status(SETUP_PHASE, "PASS") + ... ts.set_status("{}_base_rest".format(COMPARE_PHASE), "FAIL") + ... ts.set_status(SHAREDLIB_BUILD_PHASE, "PASS", comments='Time=42') + >>> ts._phase_statuses + {'CREATE_NEWCASE': ('PASS', ''), 'XML': ('PASS', ''), 'SETUP': ('PASS', ''), 'SHAREDLIB_BUILD': ('PASS', 'Time=42'), 'COMPARE_base_rest': ('FAIL', ''), 'MODEL_BUILD': ('PEND', '')} + >>> with TestStatus(test_dir="/", test_name="ERS.foo.A", no_io=True) as ts: + ... ts.set_status(CREATE_NEWCASE_PHASE, "PASS") + ... ts.set_status(XML_PHASE, "PASS") + ... ts.set_status(SETUP_PHASE, "FAIL") + ... ts.set_status(SETUP_PHASE, "PASS") + ... ts.set_status(BASELINE_PHASE, "PASS") + ... ts.set_status("{}_base_rest".format(COMPARE_PHASE), "FAIL") + ... ts.set_status(SHAREDLIB_BUILD_PHASE, "PASS", comments='Time=42') + ... ts.set_status(SETUP_PHASE, "PASS") + >>> ts._phase_statuses + {'CREATE_NEWCASE': ('PASS', ''), 'XML': ('PASS', ''), 'SETUP': ('PASS', ''), 'SHAREDLIB_BUILD': ('PEND', '')} + >>> with TestStatus(test_dir="/", test_name="ERS.foo.A", no_io=True) as ts: + ... ts.set_status(CREATE_NEWCASE_PHASE, "FAIL") + >>> ts._phase_statuses + {'CREATE_NEWCASE': ('FAIL', '')} + """ + expect( + self._ok_to_modify, + "TestStatus not in a modifiable state, use 'with' syntax", + ) + expect( + phase in ALL_PHASES or phase.startswith(COMPARE_PHASE), + "Invalid phase '{}'".format(phase), + ) + expect(status in ALL_PHASE_STATUSES, "Invalid status '{}'".format(status)) + + if phase in CORE_PHASES and phase != CORE_PHASES[0]: + previous_core_phase = CORE_PHASES[CORE_PHASES.index(phase) - 1] + # TODO: enable check below + # expect(previous_core_phase in self._phase_statuses, "Core phase '{}' was skipped".format(previous_core_phase)) + + if previous_core_phase in self._phase_statuses: + expect( + self._phase_statuses[previous_core_phase][0] == TEST_PASS_STATUS, + "Cannot move past core phase '{}', it didn't pass: ".format( + previous_core_phase + ), + ) + + reran_phase = ( + phase in self._phase_statuses + and self._phase_statuses[phase][0] != TEST_PEND_STATUS + and phase in CORE_PHASES + ) + if reran_phase: + # All subsequent phases are invalidated + phase_idx = ALL_PHASES.index(phase) + for subsequent_phase in ALL_PHASES[phase_idx + 1 :]: + if subsequent_phase in self._phase_statuses: + del self._phase_statuses[subsequent_phase] + if subsequent_phase.startswith(COMPARE_PHASE): + for stored_phase in list(self._phase_statuses.keys()): + if stored_phase.startswith(COMPARE_PHASE): + del self._phase_statuses[stored_phase] + + self._phase_statuses[phase] = (status, comments) # Can overwrite old phase info + + if ( + status == TEST_PASS_STATUS + and phase in CORE_PHASES + and phase != CORE_PHASES[-1] + ): + next_core_phase = CORE_PHASES[CORE_PHASES.index(phase) + 1] + self._phase_statuses[next_core_phase] = (TEST_PEND_STATUS, "") + + def get_status(self, phase): + return self._phase_statuses[phase][0] if phase in self._phase_statuses else None + + def get_comment(self, phase): + return self._phase_statuses[phase][1] if phase in self._phase_statuses else "" + + def current_is(self, phase, status): + try: + latest = self.get_latest_phase() + except KeyError: + return False + + return latest == phase and self.get_status(phase) == status + + def get_latest_phase(self): + return list(self._phase_statuses.keys())[-1] + + def phase_statuses_dump( + self, prefix="", skip_passes=False, skip_phase_list=None, xfails=None + ): + """ + Args: + prefix: string printed at the start of each line + skip_passes: if True, do not output lines that have a PASS status + skip_phase_list: list of phases (from the phases given by + ALL_PHASES) for which we skip output + xfails: object of type ExpectedFails, giving expected failures for this test + """ + if skip_phase_list is None: + skip_phase_list = [] + if xfails is None: + xfails = expected_fails.ExpectedFails() + result = "" + if self._phase_statuses: + for phase, data in self._phase_statuses.items(): + if phase in skip_phase_list: + continue + status, comments = data + xfail_comment = xfails.expected_fails_comment(phase, status) + if skip_passes: + if status == TEST_PASS_STATUS and not xfail_comment: + # Note that we still print the result of a PASSing test if there + # is a comment related to the expected failure status. Typically + # this will indicate that this is an unexpected PASS (and so + # should be removed from the expected fails list). + continue + result += "{}{} {} {}".format(prefix, status, self._test_name, phase) + if comments: + result += " {}".format(comments) + if xfail_comment: + result += " {}".format(xfail_comment) + result += "\n" + + return result + + def increment_non_pass_counts(self, non_pass_counts): + """ + Increment counts of the number of times given phases did not pass + + non_pass_counts is a dictionary whose keys are phases of + interest and whose values are running counts of the number of + non-passes. This method increments those counts based on results + in the given TestStatus object. + """ + for phase in non_pass_counts: + if phase in self._phase_statuses: + status, _ = self._phase_statuses[phase] + if status != TEST_PASS_STATUS: + non_pass_counts[phase] += 1 + + def flush(self): + if self._phase_statuses and not self._no_io: + with open(self._filename, "w") as fd: + fd.write(self.phase_statuses_dump()) + + def _parse_test_status(self, file_contents): + """ + >>> contents = ''' + ... PASS ERS.foo.A CREATE_NEWCASE + ... PASS ERS.foo.A XML + ... FAIL ERS.foo.A SETUP + ... PASS ERS.foo.A COMPARE_base_rest + ... PASS ERS.foo.A SHAREDLIB_BUILD Time=42 + ... ''' + >>> _test_helper1(contents) + {'CREATE_NEWCASE': ('PASS', ''), 'XML': ('PASS', ''), 'SETUP': ('FAIL', ''), 'COMPARE_base_rest': ('PASS', ''), 'SHAREDLIB_BUILD': ('PASS', 'Time=42')} + """ + for line in file_contents.splitlines(): + line = line.strip() + tokens = line.split() + if line == "": + pass # skip blank lines + elif len(tokens) >= 3: + status, curr_test_name, phase = tokens[:3] + if self._test_name is None: + self._test_name = curr_test_name + else: + expect( + self._test_name == curr_test_name, + "inconsistent test name in parse_test_status: '{}' != '{}'".format( + self._test_name, curr_test_name + ), + ) + + expect( + status in ALL_PHASE_STATUSES, + "Unexpected status '{}' in parse_test_status for test '{}'".format( + status, self._test_name + ), + ) + expect( + phase in ALL_PHASES or phase.startswith(COMPARE_PHASE), + "phase '{}' not expected in parse_test_status for test '{}'".format( + phase, self._test_name + ), + ) + expect( + phase not in self._phase_statuses, + "Should not have seen multiple instances of phase '{}' for test '{}'".format( + phase, self._test_name + ), + ) + + self._phase_statuses[phase] = (status, " ".join(tokens[3:])) + else: + logging.warning( + "In TestStatus file for test '{}', line '{}' not in expected format".format( + self._test_name, line + ) + ) + + def _parse_test_status_file(self): + with open(self._filename, "r") as fd: + self._parse_test_status(fd.read()) + + def _get_overall_status_based_on_phases( + self, + phases, + wait_for_run=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_diffs=False, + ignore_memleak=False, + no_run=False, + ): + + rv = TEST_PASS_STATUS + run_phase_found = False + phase_responsible_for_status = None + for phase in phases: # ensure correct order of processing phases + if phase in self._phase_statuses: + data = self._phase_statuses[phase] + else: + continue + + status = data[0] + + if ( + phase in CORE_PHASES + and rv in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] + and status != TEST_PEND_STATUS + ): + phase_responsible_for_status = phase + + if phase == RUN_PHASE: + run_phase_found = True + + if phase in [SUBMIT_PHASE, RUN_PHASE] and no_run: + break + + if status == TEST_PEND_STATUS and rv in [ + TEST_PASS_STATUS, + NAMELIST_FAIL_STATUS, + ]: + if not no_run: + rv = TEST_PEND_STATUS + phase_responsible_for_status = phase + break + + elif status == TEST_FAIL_STATUS: + if ( + (not check_throughput and phase == THROUGHPUT_PHASE) + or (not check_memory and phase == MEMCOMP_PHASE) + or (ignore_namelists and phase == NAMELIST_PHASE) + or (ignore_diffs and phase == BASELINE_PHASE) + or (ignore_memleak and phase == MEMLEAK_PHASE) + ): + continue + + if phase == NAMELIST_PHASE: + if rv == TEST_PASS_STATUS: + rv = NAMELIST_FAIL_STATUS + + elif phase in [BASELINE_PHASE, THROUGHPUT_PHASE, MEMCOMP_PHASE]: + if rv in [NAMELIST_FAIL_STATUS, TEST_PASS_STATUS]: + phase_responsible_for_status = phase + # need to further inspect message to determine + # phase status. BFAILs need to be a DIFF + if "DIFF" in data[1] or TEST_NO_BASELINES_COMMENT in data[1]: + rv = TEST_DIFF_STATUS + elif "ERROR" in data[1]: + rv = TEST_FAIL_STATUS + else: + rv = TEST_DIFF_STATUS + else: + pass # a DIFF does not trump a FAIL + + elif phase in CORE_PHASES: + phase_responsible_for_status = phase + return TEST_FAIL_STATUS, phase_responsible_for_status + + else: + phase_responsible_for_status = phase + rv = TEST_FAIL_STATUS + + # The test did not fail but the RUN phase was not found, so if the user requested + # that we wait for the RUN phase, then the test must still be considered pending. + if ( + rv in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] + and not run_phase_found + and wait_for_run + ): + phase_responsible_for_status = RUN_PHASE + rv = TEST_PEND_STATUS + + return rv, phase_responsible_for_status + + def get_overall_test_status( + self, + wait_for_run=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_diffs=False, + ignore_memleak=False, + no_run=False, + ): + r""" + Given the current phases and statuses, produce a single results for this test. Preference + is given to PEND since we don't want to stop waiting for a test + that hasn't finished. Namelist diffs are given the lowest precedence. + + >>> _test_helper2('PASS ERS.foo.A RUN') + ('PASS', 'RUN') + >>> _test_helper2('PASS ERS.foo.A SHAREDLIB_BUILD\nPEND ERS.foo.A RUN') + ('PEND', 'RUN') + >>> _test_helper2('FAIL ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN') + ('FAIL', 'MODEL_BUILD') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN') + ('PASS', 'RUN') + >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP') + ('PASS', 'RUN') + >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP', check_throughput=True) + ('DIFF', 'TPUTCOMP') + >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A MEMCOMP', check_memory=True) + ('DIFF', 'MEMCOMP') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') + ('NLFAIL', 'RUN') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') + ('PEND', 'RUN') + >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A MEMCOMP') + ('PASS', 'RUN') + >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP', ignore_namelists=True) + ('PASS', 'RUN') + >>> _test_helper2('PASS ERS.foo.A COMPARE_1\nFAIL ERS.foo.A NLCOMP\nFAIL ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN') + ('FAIL', 'COMPARE_2') + >>> _test_helper2('FAIL ERS.foo.A BASELINE\nFAIL ERS.foo.A NLCOMP\nPASS ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN') + ('DIFF', 'BASELINE') + >>> _test_helper2('FAIL ERS.foo.A BASELINE\nPASS ERS.foo.A NLCOMP\nPASS ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN', ignore_diffs=True) + ('PASS', 'RUN') + >>> _test_helper2('FAIL ERS.foo.A BASELINE\nFAIL ERS.foo.A NLCOMP\nPASS ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN', ignore_diffs=True) + ('NLFAIL', 'RUN') + >>> _test_helper2('FAIL ERS.foo.A BASELINE\nFAIL ERS.foo.A NLCOMP\nFAIL ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN') + ('FAIL', 'COMPARE_2') + >>> _test_helper2('PEND ERS.foo.A COMPARE_2\nFAIL ERS.foo.A RUN') + ('FAIL', 'RUN') + >>> _test_helper2('PEND ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN') + ('PEND', 'COMPARE_2') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD') + ('PASS', 'MODEL_BUILD') + >>> _test_helper2('PEND ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN') + ('PEND', 'MODEL_BUILD') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD', wait_for_run=True) + ('PEND', 'RUN') + >>> _test_helper2('FAIL ERS.foo.A MODEL_BUILD', wait_for_run=True) + ('FAIL', 'MODEL_BUILD') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN', wait_for_run=True) + ('PEND', 'RUN') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nFAIL ERS.foo.A RUN', wait_for_run=True) + ('FAIL', 'RUN') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN', wait_for_run=True) + ('PASS', 'RUN') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nFAIL ERS.foo.A RUN\nPEND ERS.foo.A COMPARE') + ('FAIL', 'RUN') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN', no_run=True) + ('PASS', 'MODEL_BUILD') + >>> s = '''PASS ERS.foo.A CREATE_NEWCASE + ... PASS ERS.foo.A XML + ... PASS ERS.foo.A SETUP + ... PASS ERS.foo.A SHAREDLIB_BUILD time=454 + ... PASS ERS.foo.A NLCOMP + ... PASS ERS.foo.A MODEL_BUILD time=363 + ... PASS ERS.foo.A SUBMIT + ... PASS ERS.foo.A RUN time=73 + ... PEND ERS.foo.A COMPARE_base_single_thread + ... FAIL ERS.foo.A BASELINE master: DIFF + ... PASS ERS.foo.A TPUTCOMP + ... PASS ERS.foo.A MEMLEAK insuffiencient data for memleak test + ... PASS ERS.foo.A SHORT_TERM_ARCHIVER + ... ''' + >>> _test_helper2(s, no_perm=True) + ('PEND', 'COMPARE_base_single_thread') + >>> s = '''PASS ERS.foo.A CREATE_NEWCASE + ... PASS ERS.foo.A XML + ... PASS ERS.foo.A SETUP + ... PEND ERS.foo.A SHAREDLIB_BUILD + ... FAIL ERS.foo.A NLCOMP + ... ''' + >>> _test_helper2(s, no_run=True) + ('NLFAIL', 'SETUP') + >>> _test_helper2(s, no_run=False) + ('PEND', 'SHAREDLIB_BUILD') + """ + # Core phases take priority + core_rv, phase = self._get_overall_status_based_on_phases( + CORE_PHASES, + wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_diffs=ignore_diffs, + ignore_memleak=ignore_memleak, + no_run=no_run, + ) + if core_rv != TEST_PASS_STATUS: + return core_rv, phase + else: + phase_order = list(CORE_PHASES) + phase_order.extend( + [item for item in self._phase_statuses if item not in CORE_PHASES] + ) + + return self._get_overall_status_based_on_phases( + phase_order, + wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_diffs=ignore_diffs, + ignore_memleak=ignore_memleak, + no_run=no_run, + ) diff --git a/CIME/test_utils.py b/CIME/test_utils.py new file mode 100644 index 00000000000..d4daa131496 --- /dev/null +++ b/CIME/test_utils.py @@ -0,0 +1,173 @@ +""" +Utility functions used in test_scheduler.py, and by other utilities that need to +get test lists. +""" +import glob +from CIME.XML.standard_module_setup import * +from CIME.XML.testlist import Testlist +from CIME.XML.files import Files +from CIME.test_status import TEST_STATUS_FILENAME +import CIME.utils + +logger = logging.getLogger(__name__) + + +def get_tests_from_xml( + xml_machine=None, + xml_category=None, + xml_compiler=None, + xml_testlist=None, + machine=None, + compiler=None, + driver=None, +): + """ + Parse testlists for a list of tests + """ + listoftests = [] + testlistfiles = [] + if machine is not None: + thismach = machine + if compiler is not None: + thiscompiler = compiler + + if xml_testlist is not None: + expect( + os.path.isfile(xml_testlist), + "Testlist not found or not readable " + xml_testlist, + ) + testlistfiles.append(xml_testlist) + else: + files = Files() + comps = files.get_components("TESTS_SPEC_FILE") + for comp in comps: + test_spec_file = files.get_value("TESTS_SPEC_FILE", {"component": comp}) + if os.path.isfile(test_spec_file): + testlistfiles.append(test_spec_file) + # We need to make nuopc the default for cesm testing, then we can remove this block + files = Files(comp_interface="nuopc") + test_spec_file = files.get_value("TESTS_SPEC_FILE", {"component": "drv"}) + if os.path.isfile(test_spec_file): + testlistfiles.append(test_spec_file) + + for testlistfile in testlistfiles: + thistestlistfile = Testlist(testlistfile) + logger.debug("Testlist file is " + testlistfile) + logger.debug( + "xml_machine {} xml_category {} xml_compiler {}".format( + xml_machine, xml_category, xml_compiler + ) + ) + newtests = thistestlistfile.get_tests(xml_machine, xml_category, xml_compiler) + for test in newtests: + if machine is None: + thismach = test["machine"] + if compiler is None: + thiscompiler = test["compiler"] + test["name"] = CIME.utils.get_full_test_name( + test["testname"], + grid=test["grid"], + compset=test["compset"], + machine=thismach, + compiler=thiscompiler, + testmods_string=None if "testmods" not in test else test["testmods"], + ) + if driver: + # override default or specified driver + founddriver = False + for specdriver in ("Vnuopc", "Vmct", "Vmoab"): + if specdriver in test["name"]: + test["name"] = test["name"].replace( + specdriver, "V{}".format(driver) + ) + founddriver = True + if not founddriver: + name = test["name"] + index = name.find(".") + test["name"] = name[:index] + "_V{}".format(driver) + name[index:] + + logger.debug( + "Adding test {} with compiler {}".format(test["name"], test["compiler"]) + ) + listoftests += newtests + logger.debug("Found {:d} tests".format(len(listoftests))) + + return listoftests + + +def test_to_string( + test, category_field_width=0, test_field_width=0, show_options=False +): + """Given a test dictionary, return a string representation suitable for printing + + Args: + test (dict): dictionary for a single test - e.g., one element from the + list returned by get_tests_from_xml + category_field_width (int): minimum amount of space to use for printing the test category + test_field_width (int): minimum amount of space to use for printing the test category + show_options (bool): if True, print test options, too (note that the 'comment' + option is always printed, if present) + + Basic functionality: + >>> mytest = {'name': 'SMS.f19_g16.A.cheyenne_intel', 'category': 'prealpha', 'options': {}} + >>> test_to_string(mytest, 10) + 'prealpha : SMS.f19_g16.A.cheyenne_intel' + + Printing comments: + >>> mytest = {'name': 'SMS.f19_g16.A.cheyenne_intel', 'category': 'prealpha', 'options': {'comment': 'my remarks'}} + >>> test_to_string(mytest, 10) + 'prealpha : SMS.f19_g16.A.cheyenne_intel # my remarks' + + Newlines in comments are converted to spaces + >>> mytest = {'name': 'SMS.f19_g16.A.cheyenne_intel', 'category': 'prealpha', 'options': {'comment': 'my\\nremarks'}} + >>> test_to_string(mytest, 10) + 'prealpha : SMS.f19_g16.A.cheyenne_intel # my remarks' + + Printing other options, too: + >>> mytest = {'name': 'SMS.f19_g16.A.cheyenne_intel', 'category': 'prealpha', 'options': {'comment': 'my remarks', 'wallclock': '0:20', 'memleak_tolerance': 0.2}} + >>> test_to_string(mytest, 10, show_options=True) + 'prealpha : SMS.f19_g16.A.cheyenne_intel # my remarks # memleak_tolerance: 0.2 # wallclock: 0:20' + """ + + mystr = "%-*s: %-*s" % ( + category_field_width, + test["category"], + test_field_width, + test["name"], + ) + if "options" in test: + myopts = test["options"].copy() + comment = myopts.pop("comment", None) + if comment: + comment = comment.replace("\n", " ") + mystr += " # {}".format(comment) + if show_options: + for one_opt in sorted(myopts): + mystr += " # {}: {}".format(one_opt, myopts[one_opt]) + + return mystr + + +def get_test_status_files(test_root, compiler, test_id=None): + test_id_glob = ( + "*{}*".format(compiler) + if test_id is None + else "*{}*{}*".format(compiler, test_id) + ) + test_status_files = glob.glob( + "{}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME) + ) + test_status_files = [ + item + for item in test_status_files + if not os.path.dirname(item).endswith("ref1") + and not os.path.dirname(item).endswith("ref2") + ] + + expect( + test_status_files, + "No matching test cases found in for {}/{}/{}".format( + test_root, test_id_glob, TEST_STATUS_FILENAME + ), + ) + return test_status_files diff --git a/CIME/tests/README b/CIME/tests/README new file mode 100644 index 00000000000..9141da2ace8 --- /dev/null +++ b/CIME/tests/README @@ -0,0 +1,50 @@ +The directory structure is expected to stay flat. The following naming schema +should be used to maintain this pattern. + +Unit test should be prefixed with `_unit` while System Tests are prefixed with +`_sys`. + +For example, writing unit tests for CIME/foo.py would map to +CIME/tests/test_unit_foo.py and CIME/SystemTests/bar.py would map to +CIME/tests/test_sys_bar.py. + +There are a few methods you can use to run the unit tests here: + +Both `scripts_regression_tests.py` and `pytest` support the same CLI arguments. +See `--help` using either command. + +- Using `scripts_regression_tests.py` from scripts/tests, run: + + `./scripts_regression_tests.py` + + This runs all the unit and sys tests. + + `./scripts_regression_tests.py test_unit_doctest` + + This runs a specific test file. + + `./scripts_regression_tests.py test_unit_doctest.TestDocs` + + This runs a specific test class. + + `./scripts_regression_tests.py test_unit_doctest.TestDocs.test_lib_docs` + + This runs a specific test case. + +- Using `pytest` from scripts/tests, run: + + `pytest` + + This runs all the unit and sys tests. + + `pytest test_unit_doctest.py` + + This runs a specific test file. + + `pytest test_unit_doctest::TestDocs` + + This runs a specific test class. + + `pytest test_unit_doctest::TestDocs::test_lib_docs` + + This runs a specific test case. diff --git a/CIME/tests/__init__.py b/CIME/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/CIME/tests/base.py b/CIME/tests/base.py new file mode 100644 index 00000000000..8873924b60a --- /dev/null +++ b/CIME/tests/base.py @@ -0,0 +1,341 @@ +#!/usr/bin/env python3 + +import glob +import os +import tempfile +import time +import signal +import shutil +import stat +import sys +import unittest + +from CIME import utils +from CIME.config import Config +from CIME.XML.machines import Machines + + +def typed_os_environ(key, default_value, expected_type=None): + # Infer type if not explicitly set + dst_type = expected_type or type(default_value) + + value = os.environ.get(key, default_value) + + if value is not None and dst_type == bool: + # Any else is false, might want to be more strict + return value.lower() == "true" if isinstance(value, str) else value + + if value is None: + return None + + return dst_type(value) + + +class BaseTestCase(unittest.TestCase): + # These static values are set when scripts/lib/CIME/tests/scripts_regression_tests.py is called. + MACHINE = None + SCRIPT_DIR = utils.get_scripts_root() + TOOLS_DIR = os.path.join(utils.get_cime_root(), "CIME", "Tools") + TEST_ROOT = None + TEST_COMPILER = None + TEST_MPILIB = None + NO_FORTRAN_RUN = None + FAST_ONLY = None + NO_BATCH = None + NO_CMAKE = None + NO_TEARDOWN = None + GLOBAL_TIMEOUT = None + + def setUp(self): + self._thread_error = None + self._unset_proxy = self.setup_proxy() + self._machine = self.MACHINE.get_machine_name() + self._compiler = ( + self.MACHINE.get_default_compiler() + if self.TEST_COMPILER is None + else self.TEST_COMPILER + ) + self._baseline_name = "fake_testing_only_%s" % utils.get_timestamp() + self._baseline_area = os.path.join(self.TEST_ROOT, "baselines") + self._testroot = self.TEST_ROOT + self._hasbatch = self.MACHINE.has_batch_system() and not self.NO_BATCH + self._do_teardown = not self.NO_TEARDOWN + self._root_dir = os.getcwd() + self._cprnc = self.MACHINE.get_value("CCSM_CPRNC") + customize_path = os.path.join(utils.get_src_root(), "cime_config", "customize") + self._config = Config.load(customize_path) + self._driver = utils.get_cime_default_driver() + + def tearDown(self): + self.kill_subprocesses() + + os.chdir(self._root_dir) + + if self._unset_proxy: + del os.environ["http_proxy"] + + files_to_clean = [] + + baselines = os.path.join(self._baseline_area, self._baseline_name) + if os.path.isdir(baselines): + files_to_clean.append(baselines) + + for test_id in ["master", self._baseline_name]: + for leftover in glob.glob(os.path.join(self._testroot, "*%s*" % test_id)): + files_to_clean.append(leftover) + + do_teardown = self._do_teardown and sys.exc_info() == (None, None, None) + if not do_teardown and files_to_clean: + print("Detected failed test or user request no teardown") + print("Leaving files:") + for file_to_clean in files_to_clean: + print(" " + file_to_clean) + else: + # For batch machines need to avoid race condition as batch system + # finishes I/O for the case. + if self._hasbatch: + time.sleep(5) + + for file_to_clean in files_to_clean: + if os.path.isdir(file_to_clean): + shutil.rmtree(file_to_clean) + else: + os.remove(file_to_clean) + + def assert_test_status(self, test_name, test_status_obj, test_phase, expected_stat): + test_status = test_status_obj.get_status(test_phase) + self.assertEqual( + test_status, + expected_stat, + msg="Problem with {}: for phase '{}': has status '{}', expected '{}'".format( + test_name, test_phase, test_status, expected_stat + ), + ) + + def run_cmd_assert_result( + self, cmd, from_dir=None, expected_stat=0, env=None, verbose=False, shell=True + ): + from_dir = os.getcwd() if from_dir is None else from_dir + stat, output, errput = utils.run_cmd( + cmd, from_dir=from_dir, env=env, verbose=verbose, shell=shell + ) + if expected_stat == 0: + expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat + else: + expectation = "EXPECTED STAT %s, INSTEAD GOT STAT %s" % ( + expected_stat, + stat, + ) + msg = """ + COMMAND: %s + FROM_DIR: %s + %s + OUTPUT: %s + ERRPUT: %s + """ % ( + cmd, + from_dir, + expectation, + output, + errput, + ) + self.assertEqual(stat, expected_stat, msg=msg) + + return output + + def setup_proxy(self): + if "http_proxy" not in os.environ: + proxy = self.MACHINE.get_value("PROXY") + if proxy is not None: + os.environ["http_proxy"] = proxy + return True + + return False + + def assert_dashboard_has_build(self, build_name, expected_count=1): + # Do not test E3SM dashboard if model is CESM + if self._config.test_mode == "e3sm": + time.sleep(10) # Give chance for cdash to update + + wget_file = tempfile.mktemp() + + utils.run_cmd_no_fail( + "wget https://my.cdash.org/api/v1/index.php?project=ACME_test --no-check-certificate -O %s" + % wget_file + ) + + raw_text = open(wget_file, "r").read() + os.remove(wget_file) + + num_found = raw_text.count(build_name) + self.assertEqual( + num_found, + expected_count, + msg="Dashboard did not have expected num occurances of build name '%s'. Expected %s, found %s" + % (build_name, expected_count, num_found), + ) + + def kill_subprocesses( + self, name=None, sig=signal.SIGKILL, expected_num_killed=None + ): + # Kill all subprocesses + proc_ids = utils.find_proc_id(proc_name=name, children_only=True) + if expected_num_killed is not None: + self.assertEqual( + len(proc_ids), + expected_num_killed, + msg="Expected to find %d processes to kill, found %d" + % (expected_num_killed, len(proc_ids)), + ) + for proc_id in proc_ids: + try: + os.kill(proc_id, sig) + except OSError: + pass + + def kill_python_subprocesses(self, sig=signal.SIGKILL, expected_num_killed=None): + self.kill_subprocesses("[Pp]ython", sig, expected_num_killed) + + def _create_test( + self, + extra_args, + test_id=None, + run_errors=False, + env_changes="", + default_baseline_area=False, + expect_cases_made=True, + ): + """ + Convenience wrapper around create_test. Returns list of full paths to created cases. If multiple cases, + the order of the returned list is not guaranteed to match the order of the arguments. + """ + # All stub model not supported in nuopc driver + if self._driver == "nuopc" and "cime_developer" in extra_args: + extra_args.append( + " ^SMS_Ln3.T42_T42.S ^PRE.f19_f19.ADESP_TEST ^PRE.f19_f19.ADESP ^DAE.ww3a.ADWAV ^IRT_N2_Vmct_Ln9.f19_g16.A" + ) + + test_id = ( + "{}-{}".format(self._baseline_name, utils.get_timestamp()) + if test_id is None + else test_id + ) + extra_args.append("-t {}".format(test_id)) + if not default_baseline_area: + extra_args.append("--baseline-root {}".format(self._baseline_area)) + if self.NO_BATCH: + extra_args.append("--no-batch") + if self.TEST_COMPILER and ( + [extra_arg for extra_arg in extra_args if "--compiler" in extra_arg] == [] + ): + extra_args.append("--compiler={}".format(self.TEST_COMPILER)) + if self.TEST_MPILIB and ( + [extra_arg for extra_arg in extra_args if "--mpilib" in extra_arg] == [] + ): + extra_args.append("--mpilib={}".format(self.TEST_MPILIB)) + if [extra_arg for extra_arg in extra_args if "--machine" in extra_arg] == []: + extra_args.append(f"--machine {self.MACHINE.get_machine_name()}") + extra_args.append("--test-root={0} --output-root={0}".format(self._testroot)) + + full_run = ( + set(extra_args) + & set(["-n", "--namelist-only", "--no-setup", "--no-build", "--no-run"]) + ) == set() + if full_run and not self.NO_BATCH: + extra_args.append("--wait") + + expected_stat = 0 if not run_errors else utils.TESTS_FAILED_ERR_CODE + + output = self.run_cmd_assert_result( + "{} {}/create_test {}".format( + env_changes, self.SCRIPT_DIR, " ".join(extra_args) + ), + expected_stat=expected_stat, + ) + cases = [] + for line in output.splitlines(): + if "Case dir:" in line: + casedir = line.split()[-1] + self.assertTrue( + os.path.isdir(casedir), msg="Missing casedir {}".format(casedir) + ) + cases.append(casedir) + + if expect_cases_made: + self.assertTrue(len(cases) > 0, "create_test made no cases") + else: + self.assertTrue( + len(cases) == 0, + "create_test unexpectedly made {} case(s)".format(len(cases)), + ) + + return cases[0] if len(cases) == 1 else cases + + def _wait_for_tests(self, test_id, expect_works=True, always_wait=False): + if self._hasbatch or always_wait: + timeout_arg = ( + "--timeout={}".format(self.GLOBAL_TIMEOUT) + if self.GLOBAL_TIMEOUT is not None + else "" + ) + expected_stat = 0 if expect_works else utils.TESTS_FAILED_ERR_CODE + self.run_cmd_assert_result( + "{}/wait_for_tests {} *{}/TestStatus".format( + self.TOOLS_DIR, timeout_arg, test_id + ), + from_dir=self._testroot, + expected_stat=expected_stat, + ) + + def get_casedir(self, case_fragment, all_cases): + potential_matches = [item for item in all_cases if case_fragment in item] + self.assertTrue( + len(potential_matches) == 1, + "Ambiguous casedir selection for {}, found {} among {}".format( + case_fragment, potential_matches, all_cases + ), + ) + return potential_matches[0] + + def verify_perms(self, root_dir): + for root, dirs, files in os.walk(root_dir): + for filename in files: + full_path = os.path.join(root, filename) + st = os.stat(full_path) + self.assertTrue( + st.st_mode & stat.S_IWGRP, + msg="file {} is not group writeable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IRGRP, + msg="file {} is not group readable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IROTH, + msg="file {} is not world readable".format(full_path), + ) + + for dirname in dirs: + full_path = os.path.join(root, dirname) + st = os.stat(full_path) + + self.assertTrue( + st.st_mode & stat.S_IWGRP, + msg="dir {} is not group writable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IRGRP, + msg="dir {} is not group readable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IXGRP, + msg="dir {} is not group executable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IROTH, + msg="dir {} is not world readable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IXOTH, + msg="dir {} is not world executable".format(full_path), + ) diff --git a/scripts/lib/CIME/tests/case_fake.py b/CIME/tests/case_fake.py similarity index 76% rename from scripts/lib/CIME/tests/case_fake.py rename to CIME/tests/case_fake.py index 5f39c7e8b0f..2b1117edf63 100644 --- a/scripts/lib/CIME/tests/case_fake.py +++ b/CIME/tests/case_fake.py @@ -6,6 +6,7 @@ import os from copy import deepcopy + class CaseFake(object): def __init__(self, case_root, create_case_root=True): """ @@ -18,20 +19,22 @@ def __init__(self, case_root, create_case_root=True): self.vars = dict() if create_case_root: os.makedirs(case_root) - self.set_value('CASEROOT', case_root) + self.set_value("CASEROOT", case_root) casename = os.path.basename(case_root) # Typically, CIME_OUTPUT_ROOT is independent of the case. Here, # we nest it under CASEROOT so that (1) tests don't interfere # with each other; (2) a cleanup that removes CASEROOT will also # remove CIME_OUTPUT_ROOT. - self.set_value('CIME_OUTPUT_ROOT', - os.path.join(case_root, 'CIME_OUTPUT_ROOT')) - self.set_value('CASE', casename) - self.set_value('CASEBASEID', casename) - self.set_value('RUN_TYPE', 'startup') + self.set_value("CIME_OUTPUT_ROOT", os.path.join(case_root, "CIME_OUTPUT_ROOT")) + self.set_value("CASE", casename) + self.set_value("CASEBASEID", casename) + self.set_value("RUN_TYPE", "startup") self.set_exeroot() self.set_rundir() + def set_initial_test_values(self): + pass + def get_value(self, item): """ Get the value of the given item @@ -63,16 +66,24 @@ def copy(self, newcasename, newcaseroot): newcaseroot (str): new value for CASEROOT """ newcase = deepcopy(self) - newcase.set_value('CASE', newcasename) - newcase.set_value('CASEBASEID', newcasename) - newcase.set_value('CASEROOT', newcaseroot) + newcase.set_value("CASE", newcasename) + newcase.set_value("CASEBASEID", newcasename) + newcase.set_value("CASEROOT", newcaseroot) newcase.set_exeroot() newcase.set_rundir() return newcase - def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, - cime_output_root=None, exeroot=None, rundir=None): + def create_clone( + self, + newcase, + keepexe=False, + mach_dir=None, + project=None, + cime_output_root=None, + exeroot=None, + rundir=None, + ): # Need to disable unused-argument checking: keepexe is needed to match # the interface of Case, but is not used in this fake implementation # @@ -96,13 +107,13 @@ def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, newcaseroot = os.path.abspath(newcase) newcasename = os.path.basename(newcase) os.makedirs(newcaseroot) - clone = self.copy(newcasename = newcasename, newcaseroot = newcaseroot) + clone = self.copy(newcasename=newcasename, newcaseroot=newcaseroot) if cime_output_root is not None: - clone.set_value('CIME_OUTPUT_ROOT', cime_output_root) + clone.set_value("CIME_OUTPUT_ROOT", cime_output_root) if exeroot is not None: - clone.set_value('EXEROOT', exeroot) + clone.set_value("EXEROOT", exeroot) if rundir is not None: - clone.set_value('RUNDIR', rundir) + clone.set_value("RUNDIR", rundir) return clone @@ -113,21 +124,21 @@ def make_rundir(self): """ Make directory given by RUNDIR """ - os.makedirs(self.get_value('RUNDIR')) + os.makedirs(self.get_value("RUNDIR")) def set_exeroot(self): """ Assumes CASEROOT is already set; sets an appropriate EXEROOT (nested inside CASEROOT) """ - self.set_value('EXEROOT', os.path.join(self.get_value('CASEROOT'), 'bld')) + self.set_value("EXEROOT", os.path.join(self.get_value("CASEROOT"), "bld")) def set_rundir(self): """ Assumes CASEROOT is already set; sets an appropriate RUNDIR (nested inside CASEROOT) """ - self.set_value('RUNDIR', os.path.join(self.get_value('CASEROOT'), 'run')) + self.set_value("RUNDIR", os.path.join(self.get_value("CASEROOT"), "run")) def case_setup(self, clean=False, test_mode=False, reset=False): pass diff --git a/CIME/tests/custom_assertions_test_status.py b/CIME/tests/custom_assertions_test_status.py new file mode 100644 index 00000000000..16dc44e5cea --- /dev/null +++ b/CIME/tests/custom_assertions_test_status.py @@ -0,0 +1,94 @@ +""" +This module contains a class that extends unittest.TestCase, adding custom assertions that +can be used when testing TestStatus. +""" + +from CIME.XML.standard_module_setup import * + +import unittest +import re +from CIME import test_status + + +class CustomAssertionsTestStatus(unittest.TestCase): + def assert_status_of_phase(self, output, status, phase, test_name, xfail=None): + """Asserts that 'output' contains a line showing the given + status for the given phase for the given test_name. + + 'xfail' should have one of the following values: + - None (the default): assertion passes regardless of whether there is an + EXPECTED/UNEXPECTED string + - 'no': The line should end with the phase, with no additional text after that + - 'expected': After the phase, the line should contain '(EXPECTED FAILURE)' + - 'unexpected': After the phase, the line should contain '(UNEXPECTED' + """ + expected = r"^ *{} +".format( + re.escape(status) + ) + self._test_name_and_phase_regex(test_name, phase) + + if xfail == "no": + # There should be no other text after the testname and phase regex + expected += r" *$" + elif xfail == "expected": + expected += r" *{}".format( + re.escape(test_status.TEST_EXPECTED_FAILURE_COMMENT) + ) + elif xfail == "unexpected": + expected += r" *{}".format( + re.escape(test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START) + ) + else: + expect(xfail is None, "Unhandled value of xfail argument") + + expected_re = re.compile(expected, flags=re.MULTILINE) + + self.assertRegex(output, expected_re) + + def assert_phase_absent(self, output, phase, test_name): + """Asserts that 'output' does not contain a status line for the + given phase and test_name""" + expected = re.compile( + r"^.* +" + self._test_name_and_phase_regex(test_name, phase), + flags=re.MULTILINE, + ) + + self.assertNotRegex(output, expected) + + def assert_core_phases(self, output, test_name, fails): + """Asserts that 'output' contains a line for each of the core test + phases for the given test_name. All results should be PASS + except those given by the fails list, which should be FAILS. + """ + for phase in test_status.CORE_PHASES: + if phase in fails: + status = test_status.TEST_FAIL_STATUS + else: + status = test_status.TEST_PASS_STATUS + self.assert_status_of_phase( + output=output, status=status, phase=phase, test_name=test_name + ) + + def assert_num_expected_unexpected_fails( + self, output, num_expected, num_unexpected + ): + """Asserts that the number of occurrences of expected and unexpected fails in + 'output' matches the given numbers""" + self.assertEqual( + output.count(test_status.TEST_EXPECTED_FAILURE_COMMENT), num_expected + ) + self.assertEqual( + output.count(test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START), + num_unexpected, + ) + + @staticmethod + def _test_name_and_phase_regex(test_name, phase): + """Returns a regex matching the portion of a TestStatus line + containing the test name and phase""" + # The main purpose of extracting this into a shared method is: + # assert_phase_absent could wrongly pass if the format of the + # TestStatus output changed without that method's regex + # changing. By making its regex shared as much as possible with + # the regex in assert_status_of_phase, we decrease the chances + # of these false passes. + return r"{} +{}".format(re.escape(test_name), re.escape(phase)) diff --git a/CIME/tests/scripts_regression_tests.py b/CIME/tests/scripts_regression_tests.py new file mode 100755 index 00000000000..66f4c015298 --- /dev/null +++ b/CIME/tests/scripts_regression_tests.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 + +""" +Script containing CIME python regression test suite. This suite should be run +to confirm overall CIME correctness. +""" + +import glob, os, re, shutil, signal, sys, tempfile, threading, time, logging, unittest, getpass, filecmp, time, atexit, functools + +CIMEROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) +sys.path.insert(0, CIMEROOT) + +from xml.etree.ElementTree import ParseError + +import subprocess, argparse + +subprocess.call('/bin/rm -f $(find . -name "*.pyc")', shell=True, cwd=CIMEROOT) +import stat as osstat + +import collections + +from CIME.utils import ( + run_cmd, + run_cmd_no_fail, + get_lids, + get_current_commit, + safe_copy, + CIMEError, + get_cime_root, + get_src_root, + Timeout, + import_from_file, + get_model, +) +import CIME.test_scheduler, CIME.wait_for_tests +from CIME import get_tests +from CIME.test_scheduler import TestScheduler +from CIME.XML.env_run import EnvRun +from CIME.XML.machines import Machines +from CIME.XML.files import Files +from CIME.case import Case +from CIME.code_checker import check_code, get_all_checkable_files +from CIME.test_status import * +from CIME.provenance import get_test_success, save_test_success +from CIME import utils +from CIME.tests.base import BaseTestCase +from CIME.config import Config + +os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00" + +TEST_RESULT = None + + +def write_provenance_info(machine, test_compiler, test_mpilib, test_root): + curr_commit = get_current_commit(repo=CIMEROOT) + logging.info("Testing commit %s" % curr_commit) + cime_model = get_model() + logging.info("Using cime_model = %s" % cime_model) + logging.info("Testing machine = %s" % machine.get_machine_name()) + if test_compiler is not None: + logging.info("Testing compiler = %s" % test_compiler) + if test_mpilib is not None: + logging.info("Testing mpilib = %s" % test_mpilib) + logging.info("Test root: %s" % test_root) + logging.info("Test driver: %s" % CIME.utils.get_cime_default_driver()) + logging.info("Python version {}\n".format(sys.version)) + + +def cleanup(test_root): + if ( + os.path.exists(test_root) + and TEST_RESULT is not None + and TEST_RESULT.wasSuccessful() + ): + testreporter = os.path.join(test_root, "testreporter") + files = os.listdir(test_root) + if len(files) == 1 and os.path.isfile(testreporter): + os.unlink(testreporter) + if not os.listdir(test_root): + print("All pass, removing directory:", test_root) + os.rmdir(test_root) + + +def setup_arguments(parser): + parser.add_argument( + "--fast", + action="store_true", + help="Skip full system tests, which saves a lot of time", + ) + + parser.add_argument( + "--no-batch", + action="store_true", + help="Do not submit jobs to batch system, run locally." + " If false, will default to machine setting.", + ) + + parser.add_argument( + "--no-fortran-run", + action="store_true", + help="Do not run any fortran jobs. Implies --fast" " Used for github actions", + ) + + parser.add_argument( + "--no-cmake", action="store_true", help="Do not run cmake tests" + ) + + parser.add_argument( + "--no-teardown", + action="store_true", + help="Do not delete directories left behind by testing", + ) + + parser.add_argument( + "--machine", help="Select a specific machine setting for cime", default=None + ) + + parser.add_argument( + "--compiler", help="Select a specific compiler setting for cime", default=None + ) + + parser.add_argument( + "--mpilib", help="Select a specific compiler setting for cime", default=None + ) + + parser.add_argument( + "--test-root", + help="Select a specific test root for all cases created by the testing", + default=None, + ) + + parser.add_argument( + "--timeout", + type=int, + help="Select a specific timeout for all tests", + default=None, + ) + + +def configure_tests( + timeout, + no_fortran_run, + fast, + no_batch, + no_cmake, + no_teardown, + machine, + compiler, + mpilib, + test_root, + **kwargs +): + config = CIME.utils.get_cime_config() + + customize_path = os.path.join(utils.get_src_root(), "cime_config", "customize") + Config.load(customize_path) + + if timeout: + BaseTestCase.GLOBAL_TIMEOUT = str(timeout) + + BaseTestCase.NO_FORTRAN_RUN = no_fortran_run or False + BaseTestCase.FAST_ONLY = fast or no_fortran_run + BaseTestCase.NO_BATCH = no_batch or False + BaseTestCase.NO_CMAKE = no_cmake or False + BaseTestCase.NO_TEARDOWN = no_teardown or False + + # make sure we have default values + MACHINE = None + TEST_COMPILER = None + TEST_MPILIB = None + + if machine is not None: + MACHINE = Machines(machine=machine) + os.environ["CIME_MACHINE"] = machine + elif "CIME_MACHINE" in os.environ: + MACHINE = Machines(machine=os.environ["CIME_MACHINE"]) + elif config.has_option("create_test", "MACHINE"): + MACHINE = Machines(machine=config.get("create_test", "MACHINE")) + elif config.has_option("main", "MACHINE"): + MACHINE = Machines(machine=config.get("main", "MACHINE")) + else: + MACHINE = Machines() + + BaseTestCase.MACHINE = MACHINE + + if compiler is not None: + TEST_COMPILER = compiler + elif config.has_option("create_test", "COMPILER"): + TEST_COMPILER = config.get("create_test", "COMPILER") + elif config.has_option("main", "COMPILER"): + TEST_COMPILER = config.get("main", "COMPILER") + + BaseTestCase.TEST_COMPILER = TEST_COMPILER + + if mpilib is not None: + TEST_MPILIB = mpilib + elif config.has_option("create_test", "MPILIB"): + TEST_MPILIB = config.get("create_test", "MPILIB") + elif config.has_option("main", "MPILIB"): + TEST_MPILIB = config.get("main", "MPILIB") + + BaseTestCase.TEST_MPILIB = TEST_MPILIB + + if test_root is not None: + TEST_ROOT = test_root + elif config.has_option("create_test", "TEST_ROOT"): + TEST_ROOT = config.get("create_test", "TEST_ROOT") + else: + TEST_ROOT = os.path.join( + MACHINE.get_value("CIME_OUTPUT_ROOT"), + "scripts_regression_test.%s" % CIME.utils.get_timestamp(), + ) + + BaseTestCase.TEST_ROOT = TEST_ROOT + + write_provenance_info(MACHINE, TEST_COMPILER, TEST_MPILIB, TEST_ROOT) + + atexit.register(functools.partial(cleanup, TEST_ROOT)) + + +def _main_func(description): + help_str = """ +{0} [TEST] [TEST] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run the full suite \033[0m + > {0} + + \033[1;32m# Run single test file (with or without extension) \033[0m + > {0} test_unit_doctest + + \033[1;32m# Run single test class from a test file \033[0m + > {0} test_unit_doctest.TestDocs + + \033[1;32m# Run single test case from a test class \033[0m + > {0} test_unit_doctest.TestDocs.test_lib_docs +""".format( + os.path.basename(sys.argv[0]) + ) + + parser = argparse.ArgumentParser( + usage=help_str, + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + setup_arguments(parser) + + parser.add_argument("--verbose", action="store_true", help="Enable verbose logging") + + parser.add_argument("--debug", action="store_true", help="Enable debug logging") + + parser.add_argument("--silent", action="store_true", help="Disable all logging") + + parser.add_argument( + "tests", nargs="*", help="Specific tests to run e.g. test_unit*" + ) + + ns, args = parser.parse_known_args() + + # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone) + sys.argv[1:] = args + + utils.configure_logging(ns.verbose, ns.debug, ns.silent) + + configure_tests(**vars(ns)) + + os.chdir(CIMEROOT) + + if len(ns.tests) == 0: + test_root = os.path.join(CIMEROOT, "CIME", "tests") + + test_suite = unittest.defaultTestLoader.discover(test_root) + else: + # Fixes handling shell expansion e.g. test_unit_*, by removing python extension + tests = [x.replace(".py", "").replace("/", ".") for x in ns.tests] + + # Try to load tests by just names + test_suite = unittest.defaultTestLoader.loadTestsFromNames(tests) + + test_runner = unittest.TextTestRunner(verbosity=2) + + global TEST_RESULT + + TEST_RESULT = test_runner.run(test_suite) + + # Implements same behavior as unittesst.main + # https://github.com/python/cpython/blob/b6d68aa08baebb753534a26d537ac3c0d2c21c79/Lib/unittest/main.py#L272-L273 + sys.exit(not TEST_RESULT.wasSuccessful()) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/tests/test_sys_bless_tests_results.py b/CIME/tests/test_sys_bless_tests_results.py new file mode 100644 index 00000000000..2f2beeb461d --- /dev/null +++ b/CIME/tests/test_sys_bless_tests_results.py @@ -0,0 +1,218 @@ +#!/usr/bin/env python3 + +import glob +import re +import os +import stat + +from CIME import utils +from CIME.tests import base + + +class TestBlessTestResults(base.BaseTestCase): + def setUp(self): + super().setUp() + + # Set a restrictive umask so we can test that SharedAreas used for + # recording baselines are working + restrictive_mask = 0o027 + self._orig_umask = os.umask(restrictive_mask) + if not self._cprnc: + self.skipTest( + "Test cannot run without cprnc program defined in config_machines.xml" + ) + + def tearDown(self): + super().tearDown() + + if "TESTRUNDIFF_ALTERNATE" in os.environ: + del os.environ["TESTRUNDIFF_ALTERNATE"] + + os.umask(self._orig_umask) + + def test_bless_test_results(self): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + # Test resubmit scenario if Machine has a batch system + if self.MACHINE.has_batch_system(): + test_names = [ + "TESTRUNDIFFRESUBMIT_Mmpi-serial.f19_g16.A", + "TESTRUNDIFF_Mmpi-serial.f19_g16.A", + ] + else: + test_names = ["TESTRUNDIFF_P1.f19_g16.A"] + + # Generate some baselines + for test_name in test_names: + if self._config.create_test_flag_mode == "e3sm": + genargs = ["-g", "-o", "-b", self._baseline_name, test_name] + compargs = ["-c", "-b", self._baseline_name, test_name] + else: + genargs = [ + "-g", + self._baseline_name, + "-o", + test_name, + "--baseline-root ", + self._baseline_area, + ] + compargs = [ + "-c", + self._baseline_name, + test_name, + "--baseline-root ", + self._baseline_area, + ] + + self._create_test(genargs) + # Hist compare should pass + self._create_test(compargs) + # Change behavior + os.environ["TESTRUNDIFF_ALTERNATE"] = "True" + + # Hist compare should now fail + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + self._create_test(compargs, test_id=test_id, run_errors=True) + + # compare_test_results should detect the fail + cpr_cmd = "{}/compare_test_results --test-root {} -t {} ".format( + self.TOOLS_DIR, self._testroot, test_id + ) + output = self.run_cmd_assert_result( + cpr_cmd, expected_stat=utils.TESTS_FAILED_ERR_CODE + ) + + # use regex + expected_pattern = re.compile(r"FAIL %s[^\s]* BASELINE" % test_name) + the_match = expected_pattern.search(output) + self.assertNotEqual( + the_match, + None, + msg="Cmd '%s' failed to display failed test %s in output:\n%s" + % (cpr_cmd, test_name, output), + ) + # Bless + utils.run_cmd_no_fail( + "{}/bless_test_results --test-root {} --hist-only --force -t {}".format( + self.TOOLS_DIR, self._testroot, test_id + ) + ) + # Hist compare should now pass again + self._create_test(compargs) + self.verify_perms(self._baseline_area) + if "TESTRUNDIFF_ALTERNATE" in os.environ: + del os.environ["TESTRUNDIFF_ALTERNATE"] + + def test_rebless_namelist(self): + # Generate some namelist baselines + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + test_to_change = "TESTRUNPASS_P1.f19_g16.A" + if self._config.create_test_flag_mode == "e3sm": + genargs = ["-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"] + compargs = ["-c", "-b", self._baseline_name, "cime_test_only_pass"] + else: + genargs = ["-g", self._baseline_name, "-o", "cime_test_only_pass"] + compargs = ["-c", self._baseline_name, "cime_test_only_pass"] + + self._create_test(genargs) + + # Basic namelist compare + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + cases = self._create_test(compargs, test_id=test_id) + casedir = self.get_casedir(test_to_change, cases) + + # Check standalone case.cmpgen_namelists + self.run_cmd_assert_result("./case.cmpgen_namelists", from_dir=casedir) + + # compare_test_results should pass + cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} ".format( + self.TOOLS_DIR, self._testroot, test_id + ) + output = self.run_cmd_assert_result(cpr_cmd) + + # use regex + expected_pattern = re.compile(r"PASS %s[^\s]* NLCOMP" % test_to_change) + the_match = expected_pattern.search(output) + msg = f"Cmd {cpr_cmd} failed to display passed test in output:\n{output}" + self.assertNotEqual( + the_match, + None, + msg=msg, + ) + + # Modify namelist + fake_nl = """ + &fake_nml + fake_item = 'fake' + fake = .true. +/""" + baseline_area = self._baseline_area + baseline_glob = glob.glob( + os.path.join(baseline_area, self._baseline_name, "TEST*") + ) + self.assertEqual( + len(baseline_glob), + 3, + msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob), + ) + + for baseline_dir in baseline_glob: + nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") + self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) + + os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR) + with open(nl_path, "a") as nl_file: + nl_file.write(fake_nl) + + # Basic namelist compare should now fail + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + self._create_test(compargs, test_id=test_id, run_errors=True) + casedir = self.get_casedir(test_to_change, cases) + + # Unless namelists are explicitly ignored + test_id2 = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + self._create_test(compargs + ["--ignore-namelists"], test_id=test_id2) + + self.run_cmd_assert_result( + "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100 + ) + + # preview namelists should work + self.run_cmd_assert_result("./preview_namelists", from_dir=casedir) + + # This should still fail + self.run_cmd_assert_result( + "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100 + ) + + # compare_test_results should fail + cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} ".format( + self.TOOLS_DIR, self._testroot, test_id + ) + output = self.run_cmd_assert_result( + cpr_cmd, expected_stat=utils.TESTS_FAILED_ERR_CODE + ) + + # use regex + expected_pattern = re.compile(r"FAIL %s[^\s]* NLCOMP" % test_to_change) + the_match = expected_pattern.search(output) + self.assertNotEqual( + the_match, + None, + msg="Cmd '%s' failed to display passed test in output:\n%s" + % (cpr_cmd, output), + ) + + # Bless + new_test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + utils.run_cmd_no_fail( + "{}/bless_test_results --test-root {} -n --force -t {} --new-test-root={} --new-test-id={}".format( + self.TOOLS_DIR, self._testroot, test_id, self._testroot, new_test_id + ) + ) + + # Basic namelist compare should now pass again + self._create_test(compargs) + + self.verify_perms(self._baseline_area) diff --git a/CIME/tests/test_sys_build_system.py b/CIME/tests/test_sys_build_system.py new file mode 100644 index 00000000000..7875c33b5fd --- /dev/null +++ b/CIME/tests/test_sys_build_system.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 + +from CIME.tests import base + + +class TestBuildSystem(base.BaseTestCase): + def test_clean_rebuild(self): + casedir = self._create_test( + ["--no-run", "SMS.f19_g16.A"], test_id=self._baseline_name + ) + + # Clean a component and a sharedlib + self.run_cmd_assert_result("./case.build --clean atm", from_dir=casedir) + self.run_cmd_assert_result("./case.build --clean gptl", from_dir=casedir) + + # Repeating should not be an error + self.run_cmd_assert_result("./case.build --clean atm", from_dir=casedir) + self.run_cmd_assert_result("./case.build --clean gptl", from_dir=casedir) + + self.run_cmd_assert_result("./case.build", from_dir=casedir) diff --git a/CIME/tests/test_sys_cime_case.py b/CIME/tests/test_sys_cime_case.py new file mode 100644 index 00000000000..227221173a5 --- /dev/null +++ b/CIME/tests/test_sys_cime_case.py @@ -0,0 +1,773 @@ +#!/usr/bin/env python3 + +import collections +import os +import re +import shutil +import sys +import time + +from CIME import utils +from CIME.tests import base +from CIME.case.case import Case +from CIME.XML.env_run import EnvRun + +try: + collectionsAbc = collections.abc +except AttributeError: + collectionsAbc = collections + + +class TestCimeCase(base.BaseTestCase): + def test_cime_case(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_P1.f19_g16.A"], test_id=self._baseline_name + ) + + self.assertEqual(type(self.MACHINE.get_value("MAX_TASKS_PER_NODE")), int) + self.assertTrue( + type(self.MACHINE.get_value("PROJECT_REQUIRED")) in [type(None), bool] + ) + + with Case(casedir, read_only=False) as case: + build_complete = case.get_value("BUILD_COMPLETE") + self.assertFalse( + build_complete, + msg="Build complete had wrong value '%s'" % build_complete, + ) + + case.set_value("BUILD_COMPLETE", True) + build_complete = case.get_value("BUILD_COMPLETE") + self.assertTrue( + build_complete, + msg="Build complete had wrong value '%s'" % build_complete, + ) + + case.flush() + + build_complete = utils.run_cmd_no_fail( + "./xmlquery BUILD_COMPLETE --value", from_dir=casedir + ) + self.assertEqual( + build_complete, + "TRUE", + msg="Build complete had wrong value '%s'" % build_complete, + ) + + # Test some test properties + self.assertEqual(case.get_value("TESTCASE"), "TESTRUNPASS") + + def _batch_test_fixture(self, testcase_name): + if not self.MACHINE.has_batch_system() or self.NO_BATCH: + self.skipTest("Skipping testing user prerequisites without batch systems") + testdir = os.path.join(self._testroot, testcase_name) + if os.path.exists(testdir): + shutil.rmtree(testdir) + args = "--case {name} --script-root {testdir} --compset X --res f19_g16 --handle-preexisting-dirs=r --output-root {testdir}".format( + name=testcase_name, testdir=testdir + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + + self.run_cmd_assert_result( + "{}/create_newcase {}".format(self.SCRIPT_DIR, args), + from_dir=self.SCRIPT_DIR, + ) + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + + return testdir + + def test_cime_case_prereq(self): + testcase_name = "prereq_test" + testdir = self._batch_test_fixture(testcase_name) + with Case(testdir, read_only=False) as case: + if case.get_value("depend_string") is None: + self.skipTest( + "Skipping prereq test, depend_string was not provided for this batch system" + ) + job_name = "case.run" + prereq_name = "prereq_test" + batch_commands = case.submit_jobs( + prereq=prereq_name, job=job_name, skip_pnl=True, dry_run=True + ) + self.assertTrue( + isinstance(batch_commands, collectionsAbc.Sequence), + "case.submit_jobs did not return a sequence for a dry run", + ) + self.assertTrue( + len(batch_commands) > 0, + "case.submit_jobs did not return any job submission string", + ) + # The first element in the internal sequence should just be the job name + # The second one (batch_cmd_index) should be the actual batch submission command + batch_cmd_index = 1 + # The prerequisite should be applied to all jobs, though we're only expecting one + for batch_cmd in batch_commands: + self.assertTrue( + isinstance(batch_cmd, collectionsAbc.Sequence), + "case.submit_jobs did not return a sequence of sequences", + ) + self.assertTrue( + len(batch_cmd) > batch_cmd_index, + "case.submit_jobs returned internal sequences with length <= {}".format( + batch_cmd_index + ), + ) + self.assertTrue( + isinstance(batch_cmd[1], str), + "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format( + batch_cmd[1] + ), + ) + batch_cmd_args = batch_cmd[1] + + jobid_ident = "jobid" + dep_str_fmt = case.get_env("batch").get_value( + "depend_string", subgroup=None + ) + self.assertTrue( + jobid_ident in dep_str_fmt, + "dependency string doesn't include the jobid identifier {}".format( + jobid_ident + ), + ) + dep_str = dep_str_fmt[: dep_str_fmt.index(jobid_ident)] + + prereq_substr = None + while dep_str in batch_cmd_args: + dep_id_pos = batch_cmd_args.find(dep_str) + len(dep_str) + batch_cmd_args = batch_cmd_args[dep_id_pos:] + prereq_substr = batch_cmd_args[: len(prereq_name)] + if prereq_substr == prereq_name: + break + + self.assertTrue( + prereq_name in prereq_substr, + "Dependencies added, but not the user specified one", + ) + + def test_cime_case_allow_failed_prereq(self): + testcase_name = "allow_failed_prereq_test" + testdir = self._batch_test_fixture(testcase_name) + with Case(testdir, read_only=False) as case: + depend_allow = case.get_value("depend_allow_string") + if depend_allow is None: + self.skipTest( + "Skipping allow_failed_prereq test, depend_allow_string was not provided for this batch system" + ) + job_name = "case.run" + prereq_name = "prereq_allow_fail_test" + depend_allow = depend_allow.replace("jobid", prereq_name) + batch_commands = case.submit_jobs( + prereq=prereq_name, + allow_fail=True, + job=job_name, + skip_pnl=True, + dry_run=True, + ) + self.assertTrue( + isinstance(batch_commands, collectionsAbc.Sequence), + "case.submit_jobs did not return a sequence for a dry run", + ) + num_submissions = 1 + if case.get_value("DOUT_S"): + num_submissions = 2 + self.assertTrue( + len(batch_commands) == num_submissions, + "case.submit_jobs did not return any job submission strings", + ) + self.assertTrue(depend_allow in batch_commands[0][1]) + + def test_cime_case_resubmit_immediate(self): + testcase_name = "resubmit_immediate_test" + testdir = self._batch_test_fixture(testcase_name) + with Case(testdir, read_only=False) as case: + depend_string = case.get_value("depend_string") + if depend_string is None: + self.skipTest( + "Skipping resubmit_immediate test, depend_string was not provided for this batch system" + ) + depend_string = re.sub("jobid.*$", "", depend_string) + job_name = "case.run" + num_submissions = 6 + case.set_value("RESUBMIT", num_submissions - 1) + batch_commands = case.submit_jobs( + job=job_name, skip_pnl=True, dry_run=True, resubmit_immediate=True + ) + self.assertTrue( + isinstance(batch_commands, collectionsAbc.Sequence), + "case.submit_jobs did not return a sequence for a dry run", + ) + if case.get_value("DOUT_S"): + num_submissions = 12 + self.assertTrue( + len(batch_commands) == num_submissions, + "case.submit_jobs did not return {} submitted jobs".format( + num_submissions + ), + ) + for i, cmd in enumerate(batch_commands): + if i > 0: + self.assertTrue(depend_string in cmd[1]) + + def test_cime_case_st_archive_resubmit(self): + testcase_name = "st_archive_resubmit_test" + testdir = self._batch_test_fixture(testcase_name) + with Case(testdir, read_only=False) as case: + case.case_setup(clean=False, test_mode=False, reset=True) + orig_resubmit = 2 + case.set_value("RESUBMIT", orig_resubmit) + case.case_st_archive(resubmit=False) + new_resubmit = case.get_value("RESUBMIT") + self.assertTrue( + orig_resubmit == new_resubmit, "st_archive resubmitted when told not to" + ) + case.case_st_archive(resubmit=True) + new_resubmit = case.get_value("RESUBMIT") + self.assertTrue( + (orig_resubmit - 1) == new_resubmit, + "st_archive did not resubmit when told to", + ) + + def test_cime_case_build_threaded_1(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_P1x1.f19_g16.A"], + test_id=self._baseline_name, + ) + + with Case(casedir, read_only=False) as case: + build_threaded = case.get_value("BUILD_THREADED") + self.assertFalse(build_threaded) + + build_threaded = case.get_build_threaded() + self.assertFalse(build_threaded) + + case.set_value("FORCE_BUILD_SMP", True) + + build_threaded = case.get_build_threaded() + self.assertTrue(build_threaded) + + def test_cime_case_build_threaded_2(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_P1x2.f19_g16.A"], + test_id=self._baseline_name, + ) + + with Case(casedir, read_only=False) as case: + build_threaded = case.get_value("BUILD_THREADED") + self.assertTrue(build_threaded) + + build_threaded = case.get_build_threaded() + self.assertTrue(build_threaded) + + def test_cime_case_mpi_serial(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_Mmpi-serial_P10.f19_g16.A"], + test_id=self._baseline_name, + ) + + with Case(casedir, read_only=True) as case: + + # Serial cases should not be using pnetcdf + self.assertEqual(case.get_value("CPL_PIO_TYPENAME"), "netcdf") + + # Serial cases should be using 1 task + self.assertEqual(case.get_value("TOTALPES"), 1) + + self.assertEqual(case.get_value("NTASKS_CPL"), 1) + + def test_cime_case_force_pecount(self): + casedir = self._create_test( + [ + "--no-build", + "--force-procs=16", + "--force-threads=8", + "TESTRUNPASS.f19_g16.A", + ], + test_id=self._baseline_name, + ) + + with Case(casedir, read_only=True) as case: + self.assertEqual(case.get_value("NTASKS_CPL"), 16) + + self.assertEqual(case.get_value("NTHRDS_CPL"), 8) + + def test_cime_case_xmlchange_append(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_P1x1.f19_g16.A"], + test_id=self._baseline_name, + ) + + self.run_cmd_assert_result( + "./xmlchange --id PIO_CONFIG_OPTS --val='-opt1'", from_dir=casedir + ) + result = self.run_cmd_assert_result( + "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir + ) + self.assertEqual(result, "-opt1") + + self.run_cmd_assert_result( + "./xmlchange --id PIO_CONFIG_OPTS --val='-opt2' --append", from_dir=casedir + ) + result = self.run_cmd_assert_result( + "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir + ) + self.assertEqual(result, "-opt1 -opt2") + + def test_cime_case_test_walltime_mgmt_1(self): + if self._config.test_mode == "cesm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS.f19_g16.A" + casedir = self._create_test( + ["--no-setup", "--machine=blues", "--non-local", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME -N --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "00:10:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE -N --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "biggpu") + + def test_cime_case_test_walltime_mgmt_2(self): + if self._config.test_mode == "cesm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS_P64.f19_g16.A" + casedir = self._create_test( + ["--no-setup", "--machine=blues", "--non-local", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME -N --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "01:00:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE -N --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "biggpu") + + def test_cime_case_test_walltime_mgmt_3(self): + if self._config.test_mode == "cesm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS_P64.f19_g16.A" + casedir = self._create_test( + [ + "--no-setup", + "--machine=blues", + "--non-local", + "--walltime=0:10:00", + test_name, + ], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME -N --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "00:10:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE -N --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "biggpu") # Not smart enough to select faster queue + + def test_cime_case_test_walltime_mgmt_4(self): + if self._config.test_mode == "cesm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS_P1.f19_g16.A" + casedir = self._create_test( + [ + "--no-setup", + "--machine=blues", + "--non-local", + "--walltime=2:00:00", + test_name, + ], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME -N --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "01:00:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE -N --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "biggpu") + + def test_cime_case_test_walltime_mgmt_5(self): + if self._config.test_mode == "cesm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS_P1.f19_g16.A" + casedir = self._create_test( + ["--no-setup", "--machine=blues", "--non-local", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_QUEUE=slartibartfast -N --subgroup=case.test", + from_dir=casedir, + expected_stat=1, + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_QUEUE=slartibartfast -N --force --subgroup=case.test", + from_dir=casedir, + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME -N --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "01:00:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE -N --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "slartibartfast") + + def test_cime_case_test_walltime_mgmt_6(self): + if not self._hasbatch: + self.skipTest("Skipping walltime test. Depends on batch system") + + test_name = "ERS_P1.f19_g16.A" + casedir = self._create_test( + ["--no-build", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", + from_dir=casedir, + ) + + self.run_cmd_assert_result("./case.setup --reset", from_dir=casedir) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + with Case(casedir) as case: + walltime_format = case.get_value("walltime_format", subgroup=None) + if walltime_format is not None and walltime_format.count(":") == 1: + self.assertEqual(result, "421:32") + else: + self.assertEqual(result, "421:32:11") + + def test_cime_case_test_walltime_mgmt_7(self): + if not self._hasbatch: + self.skipTest("Skipping walltime test. Depends on batch system") + + test_name = "ERS_P1.f19_g16.A" + casedir = self._create_test( + ["--no-build", "--walltime=01:00:00", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", + from_dir=casedir, + ) + + self.run_cmd_assert_result("./case.setup --reset", from_dir=casedir) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + with Case(casedir) as case: + walltime_format = case.get_value("walltime_format", subgroup=None) + if walltime_format is not None and walltime_format.count(":") == 1: + self.assertEqual(result, "421:32") + else: + self.assertEqual(result, "421:32:11") + + def test_cime_case_test_walltime_mgmt_8(self): + if self._config.test_mode == "cesm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + # Frontier has 56 MAX_MPITASKS_PER_NODE so 5600 should require 100 nodes + # which should land us in 6 hour queue + test_name = "SMS_P5600.f19_g16.A" + machine, compiler = "frontier", "craygnu" + casedir = self._create_test( + [ + "--no-setup", + "--non-local", + "--machine={}".format(machine), + "--compiler={}".format(compiler), + "--project e3sm", + test_name, + ], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME -N --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "12:00:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE -N --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "batch") + + def test_cime_case_test_custom_project(self): + test_name = "ERS_P1.f19_g16.A" + # have to use a machine both models know and one that doesn't put PROJECT in any key paths + machine = self._config.test_custom_project_machine + compiler = "gnu" + casedir = self._create_test( + [ + "--no-setup", + "--machine={}".format(machine), + "--compiler={}".format(compiler), + "--project=testproj", + test_name, + "--mpilib=mpi-serial", + "--non-local", + ], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery --non-local --value PROJECT --subgroup=case.test", + from_dir=casedir, + ) + self.assertEqual(result, "testproj") + + def test_create_test_longname(self): + self._create_test( + ["SMS.f19_g16.2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV", "--no-build"] + ) + + def test_env_loading(self): + if self._machine != "mappy": + self.skipTest("Skipping env load test - Only works on mappy") + + casedir = self._create_test( + ["--no-build", "TESTRUNPASS.f19_g16.A"], test_id=self._baseline_name + ) + + with Case(casedir, read_only=True) as case: + env_mach = case.get_env("mach_specific") + orig_env = dict(os.environ) + + env_mach.load_env(case) + module_env = dict(os.environ) + + os.environ.clear() + os.environ.update(orig_env) + + env_mach.load_env(case, force_method="generic") + generic_env = dict(os.environ) + + os.environ.clear() + os.environ.update(orig_env) + + problems = "" + for mkey, mval in module_env.items(): + if mkey not in generic_env: + if not mkey.startswith("PS") and mkey != "OLDPWD": + problems += "Generic missing key: {}\n".format(mkey) + elif ( + mval != generic_env[mkey] + and mkey not in ["_", "SHLVL", "PWD"] + and not mkey.endswith("()") + ): + problems += "Value mismatch for key {}: {} != {}\n".format( + mkey, repr(mval), repr(generic_env[mkey]) + ) + + for gkey in generic_env.keys(): + if gkey not in module_env: + problems += "Modules missing key: {}\n".format(gkey) + + self.assertEqual(problems, "", msg=problems) + + def test_case_submit_interface(self): + # the current directory may not exist, so make sure we are in a real directory + os.chdir(os.getenv("HOME")) + sys.path.append(self.TOOLS_DIR) + case_submit_path = os.path.join(self.TOOLS_DIR, "case.submit") + + module = utils.import_from_file("case.submit", case_submit_path) + + sys.argv = [ + "case.submit", + "--batch-args", + "'random_arguments_here.%j'", + "--mail-type", + "fail", + "--mail-user", + "'random_arguments_here.%j'", + ] + module._main_func(None, True) + + def test_xml_caching(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS.f19_g16.A"], test_id=self._baseline_name + ) + + active = os.path.join(casedir, "env_run.xml") + backup = os.path.join(casedir, "env_run.xml.bak") + + utils.safe_copy(active, backup) + + with Case(casedir, read_only=False) as case: + env_run = EnvRun(casedir, read_only=True) + self.assertEqual(case.get_value("RUN_TYPE"), "startup") + case.set_value("RUN_TYPE", "branch") + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + self.assertEqual(env_run.get_value("RUN_TYPE"), "branch") + + with Case(casedir) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + + time.sleep(0.2) + utils.safe_copy(backup, active) + + with Case(casedir, read_only=False) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "startup") + case.set_value("RUN_TYPE", "branch") + + with Case(casedir, read_only=False) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + time.sleep(0.2) + utils.safe_copy(backup, active) + case.read_xml() # Manual re-sync + self.assertEqual(case.get_value("RUN_TYPE"), "startup") + case.set_value("RUN_TYPE", "branch") + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + + with Case(casedir) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + time.sleep(0.2) + utils.safe_copy(backup, active) + env_run = EnvRun(casedir, read_only=True) + self.assertEqual(env_run.get_value("RUN_TYPE"), "startup") + + with Case(casedir, read_only=False) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "startup") + case.set_value("RUN_TYPE", "branch") + + # behind the back detection. + with self.assertRaises(utils.CIMEError): + with Case(casedir, read_only=False) as case: + case.set_value("RUN_TYPE", "startup") + time.sleep(0.2) + utils.safe_copy(backup, active) + + with Case(casedir, read_only=False) as case: + case.set_value("RUN_TYPE", "branch") + + # If there's no modications within CIME, the files should not be written + # and therefore no timestamp check + with Case(casedir) as case: + time.sleep(0.2) + utils.safe_copy(backup, active) + + def test_configure(self): + testname = "SMS.f09_g16.X" + casedir = self._create_test( + [testname, "--no-build"], test_id=self._baseline_name + ) + + manual_config_dir = os.path.join(casedir, "manual_config") + os.mkdir(manual_config_dir) + + utils.run_cmd_no_fail( + "{} --machine={} --compiler={}".format( + os.path.join(utils.get_cime_root(), "CIME", "scripts", "configure"), + self._machine, + self._compiler, + ), + from_dir=manual_config_dir, + ) + + with open(os.path.join(casedir, "env_mach_specific.xml"), "r") as fd: + case_env_contents = fd.read() + + with open(os.path.join(manual_config_dir, "env_mach_specific.xml"), "r") as fd: + man_env_contents = fd.read() + + self.assertEqual(case_env_contents, man_env_contents) + + def test_self_build_cprnc(self): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + if self.TEST_COMPILER and "gpu" in self.TEST_COMPILER: + self.skipTest("Skipping cprnc test for gpu compiler") + + testname = "ERS_Ln7.f19_g16.A" + casedir = self._create_test( + [testname, "--no-build"], test_id=self._baseline_name + ) + + self.run_cmd_assert_result( + "./xmlchange CCSM_CPRNC=this_is_a_broken_cprnc --file env_test.xml", + from_dir=casedir, + ) + self.run_cmd_assert_result("./case.build", from_dir=casedir) + self.run_cmd_assert_result("./case.submit", from_dir=casedir) + + self._wait_for_tests(self._baseline_name, always_wait=True) + + def test_case_clean(self): + testname = "ERS_Ln7.f19_g16.A" + casedir = self._create_test( + [testname, "--no-build"], test_id=self._baseline_name + ) + + self.run_cmd_assert_result("./case.setup --clean", from_dir=casedir) + self.run_cmd_assert_result("./case.setup --clean", from_dir=casedir) + self.run_cmd_assert_result("./case.setup", from_dir=casedir) + + def test_skip_run_with_existing_baseline(self): + test_name = "TESTRUNPASS_P1.f19_g16.A" + + if self._config.test_mode == "cesm": + create_test_extra_args = ["--generate", "baseline", "--no-build", test_name] + else: + create_test_extra_args = ["-g", "--no-build", test_name] + + orig_testroot = self._testroot + self._testroot = os.path.join(orig_testroot, "case0") + casedir_0 = self._create_test( + create_test_extra_args, + test_id=self._baseline_name, + expect_cases_made=True, + ) + self._testroot = os.path.join(orig_testroot, "case1") + casedir_1 = self._create_test( + ["--skip-tests-with-existing-baselines"] + create_test_extra_args, + test_id=self._baseline_name, + expect_cases_made=False, + ) + self._testroot = orig_testroot diff --git a/CIME/tests/test_sys_cime_performance.py b/CIME/tests/test_sys_cime_performance.py new file mode 100644 index 00000000000..a0282c7759a --- /dev/null +++ b/CIME/tests/test_sys_cime_performance.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 + +import time + +from CIME.tests import base + + +class TestCimePerformance(base.BaseTestCase): + def test_cime_case_ctrl_performance(self): + + ts = time.time() + + num_repeat = 5 + for _ in range(num_repeat): + self._create_test(["cime_tiny", "--no-build"]) + + elapsed = time.time() - ts + + print("Perf test result: {:0.2f}".format(elapsed)) diff --git a/CIME/tests/test_sys_create_newcase.py b/CIME/tests/test_sys_create_newcase.py new file mode 100644 index 00000000000..1be636aff36 --- /dev/null +++ b/CIME/tests/test_sys_create_newcase.py @@ -0,0 +1,869 @@ +#!/usr/bin/env python3 + +import filecmp +import os +import re +import shutil +import sys + +from CIME import utils +from CIME.tests import base +from CIME.case.case import Case +from CIME.build import CmakeTmpBuildDir + + +class TestCreateNewcase(base.BaseTestCase): + @classmethod + def setUpClass(cls): + cls._testdirs = [] + cls._do_teardown = [] + cls._testroot = os.path.join(cls.TEST_ROOT, "TestCreateNewcase") + cls._root_dir = os.getcwd() + + def tearDown(self): + cls = self.__class__ + os.chdir(cls._root_dir) + + def test_a_createnewcase(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testcreatenewcase") + if os.path.exists(testdir): + shutil.rmtree(testdir) + args = " --case %s --compset X --output-root %s --handle-preexisting-dirs=r" % ( + testdir, + cls._testroot, + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args = args + " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + + args += f" --machine {self.MACHINE.get_machine_name()}" + + cls._testdirs.append(testdir) + self.run_cmd_assert_result( + "./create_newcase %s" % (args), from_dir=self.SCRIPT_DIR + ) + self.assertTrue(os.path.exists(testdir)) + self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) + + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + self.run_cmd_assert_result("./case.build", from_dir=testdir) + + with Case(testdir, read_only=False) as case: + ntasks = case.get_value("NTASKS_ATM") + case.set_value("NTASKS_ATM", ntasks + 1) + + # this should fail with a locked file issue + self.run_cmd_assert_result("./case.build", from_dir=testdir, expected_stat=1) + + self.run_cmd_assert_result("./case.setup --reset", from_dir=testdir) + self.run_cmd_assert_result("./case.build", from_dir=testdir) + with Case(testdir, read_only=False) as case: + case.set_value("CHARGE_ACCOUNT", "fred") + # to be used in next test + batch_system = case.get_value("BATCH_SYSTEM") + + # on systems (like github workflow) that do not have batch, set this for the next test + if batch_system == "none": + self.run_cmd_assert_result( + r'./xmlchange --subgroup case.run BATCH_COMMAND_FLAGS="-q \$JOB_QUEUE"', + from_dir=testdir, + ) + + # this should not fail with a locked file issue + self.run_cmd_assert_result("./case.build", from_dir=testdir) + + self.run_cmd_assert_result("./case.st_archive --test-all", from_dir=testdir) + + with Case(testdir, read_only=False) as case: + batch_command = case.get_value("BATCH_COMMAND_FLAGS", subgroup="case.run") + + self.run_cmd_assert_result( + './xmlchange --append --subgroup case.run BATCH_COMMAND_FLAGS="-l trythis"', + from_dir=testdir, + ) + # Test that changes to BATCH_COMMAND_FLAGS work + with Case(testdir, read_only=False) as case: + new_batch_command = case.get_value( + "BATCH_COMMAND_FLAGS", subgroup="case.run" + ) + + self.assertTrue( + new_batch_command == batch_command + " -l trythis", + msg=f"Failed to correctly append BATCH_COMMAND_FLAGS {new_batch_command} {batch_command}#", + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_QUEUE=fred --subgroup case.run --force", from_dir=testdir + ) + + with Case(testdir, read_only=False) as case: + new_batch_command = case.get_value( + "BATCH_COMMAND_FLAGS", subgroup="case.run" + ) + self.assertTrue( + "fred" in new_batch_command, + msg="Failed to update JOB_QUEUE in BATCH_COMMAND_FLAGS {}".format( + new_batch_command + ), + ) + + # Trying to set values outside of context manager should fail + case = Case(testdir, read_only=False) + with self.assertRaises(utils.CIMEError): + case.set_value("NTASKS_ATM", 42) + + # Trying to read_xml with pending changes should fail + with self.assertRaises(utils.CIMEError): + with Case(testdir, read_only=False) as case: + case.set_value("CHARGE_ACCOUNT", "fouc") + case.read_xml() + + cls._do_teardown.append(testdir) + + def test_aa_no_flush_on_instantiate(self): + testdir = os.path.join(self.__class__._testroot, "testcreatenewcase") + with Case(testdir, read_only=False) as case: + for env_file in case._files: + self.assertFalse( + env_file.needsrewrite, + msg="Instantiating a case should not trigger a flush call", + ) + + with Case(testdir, read_only=False) as case: + case.set_value("HIST_OPTION", "nyears") + runfile = case.get_env("run") + self.assertTrue( + runfile.needsrewrite, msg="Expected flush call not triggered" + ) + for env_file in case._files: + if env_file != runfile: + self.assertFalse( + env_file.needsrewrite, + msg="Unexpected flush triggered for file {}".format( + env_file.filename + ), + ) + # Flush the file + runfile.write() + # set it again to the same value + case.set_value("HIST_OPTION", "nyears") + # now the file should not need to be flushed + for env_file in case._files: + self.assertFalse( + env_file.needsrewrite, + msg="Unexpected flush triggered for file {}".format( + env_file.filename + ), + ) + + # Check once more with a new instance + with Case(testdir, read_only=False) as case: + case.set_value("HIST_OPTION", "nyears") + for env_file in case._files: + self.assertFalse( + env_file.needsrewrite, + msg="Unexpected flush triggered for file {}".format( + env_file.filename + ), + ) + + def test_b_user_mods(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testusermods") + if os.path.exists(testdir): + shutil.rmtree(testdir) + + cls._testdirs.append(testdir) + + user_mods_dir = os.path.join(os.path.dirname(__file__), "user_mods_test1") + args = ( + " --case %s --compset X --user-mods-dir %s --output-root %s --handle-preexisting-dirs=r" + % (testdir, user_mods_dir, cls._testroot) + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args = args + " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "%s/create_newcase %s " % (self.SCRIPT_DIR, args), from_dir=self.SCRIPT_DIR + ) + + self.assertTrue( + os.path.isfile( + os.path.join(testdir, "SourceMods", "src.drv", "somefile.F90") + ), + msg="User_mods SourceMod missing", + ) + + with open(os.path.join(testdir, "user_nl_cpl"), "r") as fd: + contents = fd.read() + self.assertTrue( + "a different cpl test option" in contents, + msg="User_mods contents of user_nl_cpl missing", + ) + self.assertTrue( + "a cpl namelist option" in contents, + msg="User_mods contents of user_nl_cpl missing", + ) + cls._do_teardown.append(testdir) + + def test_c_create_clone_keepexe(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "test_create_clone_keepexe") + if os.path.exists(testdir): + shutil.rmtree(testdir) + prevtestdir = cls._testdirs[0] + user_mods_dir = os.path.join(os.path.dirname(__file__), "user_mods_test3") + + cmd = "%s/create_clone --clone %s --case %s --keepexe --user-mods-dir %s" % ( + self.SCRIPT_DIR, + prevtestdir, + testdir, + user_mods_dir, + ) + self.run_cmd_assert_result(cmd, from_dir=self.SCRIPT_DIR, expected_stat=1) + cls._do_teardown.append(testdir) + + def test_d_create_clone_new_user(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "test_create_clone_new_user") + if os.path.exists(testdir): + shutil.rmtree(testdir) + prevtestdir = cls._testdirs[0] + cls._testdirs.append(testdir) + # change the USER and CIME_OUTPUT_ROOT to nonsense values + # this is intended as a test of whether create_clone is independent of user + self.run_cmd_assert_result( + "./xmlchange USER=this_is_not_a_user", from_dir=prevtestdir + ) + + fakeoutputroot = cls._testroot.replace( + os.environ.get("USER"), "this_is_not_a_user" + ) + self.run_cmd_assert_result( + "./xmlchange CIME_OUTPUT_ROOT=%s" % fakeoutputroot, from_dir=prevtestdir + ) + + # this test should pass (user name is replaced) + self.run_cmd_assert_result( + "%s/create_clone --clone %s --case %s " + % (self.SCRIPT_DIR, prevtestdir, testdir), + from_dir=self.SCRIPT_DIR, + ) + + shutil.rmtree(testdir) + # this test should pass + self.run_cmd_assert_result( + "%s/create_clone --clone %s --case %s --cime-output-root %s" + % (self.SCRIPT_DIR, prevtestdir, testdir, cls._testroot), + from_dir=self.SCRIPT_DIR, + ) + + cls._do_teardown.append(testdir) + + def test_dd_create_clone_not_writable(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "test_create_clone_not_writable") + if os.path.exists(testdir): + shutil.rmtree(testdir) + prevtestdir = cls._testdirs[0] + cls._testdirs.append(testdir) + + with Case(prevtestdir, read_only=False) as case1: + case2 = case1.create_clone(testdir) + with self.assertRaises(utils.CIMEError): + case2.set_value("CHARGE_ACCOUNT", "fouc") + cls._do_teardown.append(testdir) + + def test_e_xmlquery(self): + # Set script and script path + xmlquery = "./xmlquery" + cls = self.__class__ + casedir = cls._testdirs[0] + + # Check for environment + self.assertTrue(os.path.isdir(self.SCRIPT_DIR)) + self.assertTrue(os.path.isdir(self.TOOLS_DIR)) + self.assertTrue(os.path.isfile(os.path.join(casedir, xmlquery))) + + # Test command line options + with Case(casedir, read_only=True, non_local=True) as case: + STOP_N = case.get_value("STOP_N") + COMP_CLASSES = case.get_values("COMP_CLASSES") + BUILD_COMPLETE = case.get_value("BUILD_COMPLETE") + cmd = xmlquery + " --non-local STOP_N --value" + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue(output == str(STOP_N), msg="%s != %s" % (output, STOP_N)) + cmd = xmlquery + " --non-local BUILD_COMPLETE --value" + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + output = output == "TRUE" + self.assertTrue( + output == BUILD_COMPLETE, msg="%s != %s" % (output, BUILD_COMPLETE) + ) + # we expect DOCN_MODE to be undefined in this X compset + # this test assures that we do not try to resolve this as a compvar + cmd = xmlquery + " --non-local DOCN_MODE --value" + _, output, error = utils.run_cmd(cmd, from_dir=casedir) + self.assertTrue( + error == "ERROR: No results found for variable DOCN_MODE", + msg="unexpected result for DOCN_MODE, output {}, error {}".format( + output, error + ), + ) + + for comp in COMP_CLASSES: + caseresult = case.get_value("NTASKS_%s" % comp) + cmd = xmlquery + " --non-local NTASKS_%s --value" % comp + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue( + output == str(caseresult), msg="%s != %s" % (output, caseresult) + ) + cmd = xmlquery + " --non-local NTASKS --subgroup %s --value" % comp + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue( + output == str(caseresult), msg="%s != %s" % (output, caseresult) + ) + if self.MACHINE.has_batch_system(): + JOB_QUEUE = case.get_value("JOB_QUEUE", subgroup="case.run") + cmd = xmlquery + " --non-local JOB_QUEUE --subgroup case.run --value" + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue( + output == JOB_QUEUE, msg="%s != %s" % (output, JOB_QUEUE) + ) + + cmd = xmlquery + " --non-local --listall" + utils.run_cmd_no_fail(cmd, from_dir=casedir) + + cls._do_teardown.append(cls._testroot) + + def test_f_createnewcase_with_user_compset(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testcreatenewcase_with_user_compset") + if os.path.exists(testdir): + shutil.rmtree(testdir) + + cls._testdirs.append(testdir) + + if self._config.test_mode == "cesm": + pesfile = os.path.join( + utils.get_src_root(), + "components", + "cmeps", + "cime_config", + "config_pes.xml", + ) + else: + pesfile = os.path.join( + utils.get_src_root(), "driver-mct", "cime_config", "config_pes.xml" + ) + + args = ( + "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" + % (testdir, pesfile, cls._testroot) + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "%s/create_newcase %s" % (self.SCRIPT_DIR, args), from_dir=self.SCRIPT_DIR + ) + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + self.run_cmd_assert_result("./case.build", from_dir=testdir) + + cls._do_teardown.append(testdir) + + def test_g_createnewcase_with_user_compset_and_env_mach_pes(self): + cls = self.__class__ + + testdir = os.path.join( + cls._testroot, "testcreatenewcase_with_user_compset_and_env_mach_pes" + ) + if os.path.exists(testdir): + shutil.rmtree(testdir) + previous_testdir = cls._testdirs[-1] + cls._testdirs.append(testdir) + + pesfile = os.path.join(previous_testdir, "env_mach_pes.xml") + args = ( + "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" + % (testdir, pesfile, cls._testroot) + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args += " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "%s/create_newcase %s" % (self.SCRIPT_DIR, args), from_dir=self.SCRIPT_DIR + ) + self.run_cmd_assert_result( + "diff env_mach_pes.xml %s" % (previous_testdir), from_dir=testdir + ) + # this line should cause the diff to fail (I assume no machine is going to default to 17 tasks) + self.run_cmd_assert_result("./xmlchange NTASKS=17", from_dir=testdir) + self.run_cmd_assert_result( + "diff env_mach_pes.xml %s" % (previous_testdir), + from_dir=testdir, + expected_stat=1, + ) + + cls._do_teardown.append(testdir) + + def test_h_primary_component(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testprimarycomponent") + if os.path.exists(testdir): + shutil.rmtree(testdir) + + cls._testdirs.append(testdir) + args = ( + " --case CreateNewcaseTest --script-root %s --compset X --output-root %s --handle-preexisting-dirs u" + % (testdir, cls._testroot) + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args += " --mpilib %s" % self.TEST_MPILIB + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "%s/create_newcase %s" % (self.SCRIPT_DIR, args), from_dir=self.SCRIPT_DIR + ) + self.assertTrue(os.path.exists(testdir)) + self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) + + with Case(testdir, read_only=False) as case: + case._compsetname = case.get_value("COMPSET") + case.set_comp_classes(case.get_values("COMP_CLASSES")) + primary = case._find_primary_component() + self.assertEqual( + primary, + "drv", + msg="primary component test expected drv but got %s" % primary, + ) + # now we are going to corrupt the case so that we can do more primary_component testing + case.set_valid_values("COMP_GLC", "%s,fred" % case.get_value("COMP_GLC")) + case.set_value("COMP_GLC", "fred") + primary = case._find_primary_component() + self.assertEqual( + primary, + "fred", + msg="primary component test expected fred but got %s" % primary, + ) + case.set_valid_values("COMP_ICE", "%s,wilma" % case.get_value("COMP_ICE")) + case.set_value("COMP_ICE", "wilma") + primary = case._find_primary_component() + self.assertEqual( + primary, + "wilma", + msg="primary component test expected wilma but got %s" % primary, + ) + + case.set_valid_values( + "COMP_OCN", "%s,bambam,docn" % case.get_value("COMP_OCN") + ) + case.set_value("COMP_OCN", "bambam") + primary = case._find_primary_component() + self.assertEqual( + primary, + "bambam", + msg="primary component test expected bambam but got %s" % primary, + ) + + case.set_valid_values("COMP_LND", "%s,barney" % case.get_value("COMP_LND")) + case.set_value("COMP_LND", "barney") + primary = case._find_primary_component() + # This is a "J" compset + self.assertEqual( + primary, + "allactive", + msg="primary component test expected allactive but got %s" % primary, + ) + case.set_value("COMP_OCN", "docn") + case.set_valid_values("COMP_LND", "%s,barney" % case.get_value("COMP_LND")) + case.set_value("COMP_LND", "barney") + primary = case._find_primary_component() + self.assertEqual( + primary, + "barney", + msg="primary component test expected barney but got %s" % primary, + ) + case.set_valid_values("COMP_ATM", "%s,wilma" % case.get_value("COMP_ATM")) + case.set_value("COMP_ATM", "wilma") + primary = case._find_primary_component() + self.assertEqual( + primary, + "wilma", + msg="primary component test expected wilma but got %s" % primary, + ) + # this is a "E" compset + case._compsetname = case._compsetname.replace("XOCN", "DOCN%SOM") + primary = case._find_primary_component() + self.assertEqual( + primary, + "allactive", + msg="primary component test expected allactive but got %s" % primary, + ) + # finally a "B" compset + case.set_value("COMP_OCN", "bambam") + primary = case._find_primary_component() + self.assertEqual( + primary, + "allactive", + msg="primary component test expected allactive but got %s" % primary, + ) + + cls._do_teardown.append(testdir) + + def test_j_createnewcase_user_compset_vs_alias(self): + """ + Create a compset using the alias and another compset using the full compset name + and make sure they are the same by comparing the namelist files in CaseDocs. + Ignore the modelio files and clean the directory names out first. + """ + cls = self.__class__ + + testdir1 = os.path.join(cls._testroot, "testcreatenewcase_user_compset") + if os.path.exists(testdir1): + shutil.rmtree(testdir1) + cls._testdirs.append(testdir1) + + args = " --case CreateNewcaseTest --script-root {} --compset 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV --output-root {} --handle-preexisting-dirs u".format( + testdir1, cls._testroot + ) + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args += " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "{}/create_newcase {}".format(self.SCRIPT_DIR, args), + from_dir=self.SCRIPT_DIR, + ) + self.run_cmd_assert_result("./case.setup ", from_dir=testdir1) + self.run_cmd_assert_result("./preview_namelists ", from_dir=testdir1) + + dir1 = os.path.join(testdir1, "CaseDocs") + dir2 = os.path.join(testdir1, "CleanCaseDocs") + os.mkdir(dir2) + for _file in os.listdir(dir1): + if "modelio" in _file: + continue + with open(os.path.join(dir1, _file), "r") as fi: + file_text = fi.read() + file_text = file_text.replace(os.path.basename(testdir1), "PATH") + file_text = re.sub(r"logfile =.*", "", file_text) + with open(os.path.join(dir2, _file), "w") as fo: + fo.write(file_text) + cleancasedocs1 = dir2 + + testdir2 = os.path.join(cls._testroot, "testcreatenewcase_alias_compset") + if os.path.exists(testdir2): + shutil.rmtree(testdir2) + cls._testdirs.append(testdir2) + args = " --case CreateNewcaseTest --script-root {} --compset ADSOMAQP --output-root {} --handle-preexisting-dirs u".format( + testdir2, cls._testroot + ) + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args += " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "{}/create_newcase {}".format(self.SCRIPT_DIR, args), + from_dir=self.SCRIPT_DIR, + ) + self.run_cmd_assert_result("./case.setup ", from_dir=testdir2) + self.run_cmd_assert_result("./preview_namelists ", from_dir=testdir2) + + dir1 = os.path.join(testdir2, "CaseDocs") + dir2 = os.path.join(testdir2, "CleanCaseDocs") + os.mkdir(dir2) + for _file in os.listdir(dir1): + if "modelio" in _file: + continue + with open(os.path.join(dir1, _file), "r") as fi: + file_text = fi.read() + file_text = file_text.replace(os.path.basename(testdir2), "PATH") + file_text = re.sub(r"logfile =.*", "", file_text) + with open(os.path.join(dir2, _file), "w") as fo: + fo.write(file_text) + + cleancasedocs2 = dir2 + dcmp = filecmp.dircmp(cleancasedocs1, cleancasedocs2) + self.assertTrue( + len(dcmp.diff_files) == 0, "CaseDocs differ {}".format(dcmp.diff_files) + ) + + cls._do_teardown.append(testdir1) + cls._do_teardown.append(testdir2) + + def test_k_append_config(self): + machlist_before = self.MACHINE.list_available_machines() + self.assertEqual( + len(machlist_before) > 1, True, msg="Problem reading machine list" + ) + + newmachfile = os.path.join( + utils.get_cime_root(), + "CIME", + "data", + "config", + "xml_schemas", + "config_machines_template.xml", + ) + self.MACHINE.read(newmachfile) + machlist_after = self.MACHINE.list_available_machines() + + self.assertEqual( + len(machlist_after) - len(machlist_before), + 1, + msg="Not able to append config_machines.xml {} {}".format( + len(machlist_after), len(machlist_before) + ), + ) + self.assertEqual( + "mymachine" in machlist_after, + True, + msg="Not able to append config_machines.xml", + ) + + def test_ka_createnewcase_extra_machines_dir(self): + # Test that we pick up changes in both config_machines.xml and + # cmake macros in a directory specified with the --extra-machines-dir + # argument to create_newcase. + cls = self.__class__ + casename = "testcreatenewcase_extra_machines_dir" + + # Setup: stage some xml files in a temporary directory + extra_machines_dir = os.path.join( + cls._testroot, "{}_machine_config".format(casename) + ) + os.makedirs(os.path.join(extra_machines_dir, "cmake_macros")) + cls._do_teardown.append(extra_machines_dir) + newmachfile = os.path.join( + utils.get_cime_root(), + "CIME", + "data", + "config", + "xml_schemas", + "config_machines_template.xml", + ) + utils.safe_copy( + newmachfile, os.path.join(extra_machines_dir, "config_machines.xml") + ) + cmake_macro_text = """\ +set(NETCDF_PATH /my/netcdf/path) +""" + cmake_macro_path = os.path.join( + extra_machines_dir, "cmake_macros", "mymachine.cmake" + ) + with open(cmake_macro_path, "w") as cmake_macro: + cmake_macro.write(cmake_macro_text) + + # Create the case + testdir = os.path.join(cls._testroot, casename) + if os.path.exists(testdir): + shutil.rmtree(testdir) + # In the following, note that 'mymachine' is the machine name defined in + # config_machines_template.xml + args = ( + " --case {testdir} --compset X --mach mymachine" + " --output-root {testroot} --non-local" + " --extra-machines-dir {extra_machines_dir}".format( + testdir=testdir, + testroot=cls._testroot, + extra_machines_dir=extra_machines_dir, + ) + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + self.run_cmd_assert_result( + "./create_newcase {}".format(args), from_dir=self.SCRIPT_DIR + ) + + args += f" --machine {self.MACHINE.get_machine_name()}" + + cls._do_teardown.append(testdir) + + # Run case.setup + self.run_cmd_assert_result("./case.setup --non-local", from_dir=testdir) + + # Make sure Macros file contains expected text + + with Case(testdir, non_local=True) as case: + with CmakeTmpBuildDir(macroloc=testdir) as cmaketmp: + macros_contents = cmaketmp.get_makefile_vars(case=case) + + expected_re = re.compile("NETCDF_PATH.*/my/netcdf/path") + self.assertTrue( + expected_re.search(macros_contents), + msg="{} not found in:\n{}".format(expected_re.pattern, macros_contents), + ) + + def test_m_createnewcase_alternate_drivers(self): + # Test that case.setup runs for nuopc and moab drivers + cls = self.__class__ + + # TODO refactor + if self._config.test_mode == "cesm": + alternative_driver = ("nuopc",) + else: + alternative_driver = ("moab",) + + for driver in alternative_driver: + if driver == "moab" and not os.path.exists( + os.path.join(utils.get_cime_root(), "src", "drivers", driver) + ): + self.skipTest( + "Skipping driver test for {}, driver not found".format(driver) + ) + if driver == "nuopc" and not os.path.exists( + os.path.join(utils.get_src_root(), "components", "cmeps") + ): + self.skipTest( + "Skipping driver test for {}, driver not found".format(driver) + ) + + testdir = os.path.join(cls._testroot, "testcreatenewcase.{}".format(driver)) + if os.path.exists(testdir): + shutil.rmtree(testdir) + args = " --driver {} --case {} --compset X --res f19_g16 --output-root {} --handle-preexisting-dirs=r".format( + driver, testdir, cls._testroot + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args = args + " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + cls._testdirs.append(testdir) + self.run_cmd_assert_result( + "./create_newcase %s" % (args), from_dir=self.SCRIPT_DIR + ) + self.assertTrue(os.path.exists(testdir)) + self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) + + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + with Case(testdir, read_only=False) as case: + comp_interface = case.get_value("COMP_INTERFACE") + self.assertTrue( + driver == comp_interface, msg="%s != %s" % (driver, comp_interface) + ) + + cls._do_teardown.append(testdir) + + def test_n_createnewcase_bad_compset(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testcreatenewcase_bad_compset") + if os.path.exists(testdir): + shutil.rmtree(testdir) + args = ( + " --case %s --compset InvalidCompsetName --output-root %s --handle-preexisting-dirs=r " + % (testdir, cls._testroot) + ) + if self._config.allow_unsupported: + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args = args + " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "./create_newcase %s" % (args), from_dir=self.SCRIPT_DIR, expected_stat=1 + ) + self.assertFalse(os.path.exists(testdir)) + + @classmethod + def tearDownClass(cls): + do_teardown = ( + len(cls._do_teardown) > 0 + and sys.exc_info() == (None, None, None) + and not cls.NO_TEARDOWN + ) + rmtestroot = True + for tfile in cls._testdirs: + if tfile not in cls._do_teardown: + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s" % tfile) + rmtestroot = False + elif do_teardown: + try: + print("Attempt to remove directory {}".format(tfile)) + shutil.rmtree(tfile) + except BaseException: + print("Could not remove directory {}".format(tfile)) + if rmtestroot and do_teardown: + shutil.rmtree(cls._testroot) diff --git a/CIME/tests/test_sys_full_system.py b/CIME/tests/test_sys_full_system.py new file mode 100644 index 00000000000..ad4485d2b81 --- /dev/null +++ b/CIME/tests/test_sys_full_system.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 + +import os + +from CIME import get_tests +from CIME import test_status +from CIME import utils +from CIME import wait_for_tests +from CIME.tests import base + + +class TestFullSystem(base.BaseTestCase): + def test_full_system(self): + # Put this inside any test that's slow + if self.FAST_ONLY: + self.skipTest("Skipping slow test") + + driver = utils.get_cime_default_driver() + if driver == "mct": + cases = self._create_test( + ["--walltime=0:15:00", "cime_developer"], test_id=self._baseline_name + ) + else: + cases = self._create_test( + ["--walltime=0:30:00", "cime_developer"], test_id=self._baseline_name + ) + + self.run_cmd_assert_result( + "%s/cs.status.%s" % (self._testroot, self._baseline_name), + from_dir=self._testroot, + ) + + # Ensure that we can get test times + for case_dir in cases: + tstatus = os.path.join(case_dir, "TestStatus") + test_time = wait_for_tests.get_test_time(os.path.dirname(tstatus)) + self.assertIs( + type(test_time), int, msg="get time did not return int for %s" % tstatus + ) + self.assertTrue(test_time > 0, msg="test time was zero for %s" % tstatus) + + # Test that re-running works + skip_tests = None + if utils.get_cime_default_driver() == "nuopc": + skip_tests = [ + "SMS_Ln3.T42_T42.S", + "PRE.f19_f19.ADESP_TEST", + "PRE.f19_f19.ADESP", + "DAE.ww3a.ADWAV", + "IRT_N2_Vmct_Ln9.f19_g16.A", + ] + tests = get_tests.get_test_suite( + "cime_developer", + machine=self._machine, + compiler=self._compiler, + skip_tests=skip_tests, + ) + print(f"tests are {tests}") + self.assertTrue(False, msg="Stop here") + for test in tests: + casedir = self.get_casedir(test, cases) + + # Subtle issue: The run phases of these tests will be in the PASS state until + # the submitted case.test script is run, which could take a while if the system is + # busy. This potentially leaves a window where the wait_for_tests command below will + # not wait for the re-submitted jobs to run because it sees the original PASS. + # The code below forces things back to PEND to avoid this race condition. Note + # that we must use the MEMLEAK phase, not the RUN phase, because RUN being in a non-PEND + # state is how system tests know they are being re-run and must reset certain + # case settings. + if self._hasbatch: + with test_status.TestStatus(test_dir=casedir) as ts: + ts.set_status( + test_status.MEMLEAK_PHASE, test_status.TEST_PEND_STATUS + ) + + self.run_cmd_assert_result( + "./case.submit --skip-preview-namelist", from_dir=casedir + ) + + self._wait_for_tests(self._baseline_name) diff --git a/CIME/tests/test_sys_grid_generation.py b/CIME/tests/test_sys_grid_generation.py new file mode 100644 index 00000000000..5a889faa9b0 --- /dev/null +++ b/CIME/tests/test_sys_grid_generation.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +import os +import shutil +import sys + +from CIME import utils +from CIME.tests import base + + +class TestGridGeneration(base.BaseTestCase): + @classmethod + def setUpClass(cls): + cls._do_teardown = [] + cls._testroot = os.path.join(cls.TEST_ROOT, "TestGridGeneration") + cls._testdirs = [] + + def test_gen_domain(self): + if self._config.test_mode == "cesm": + self.skipTest("Skipping gen_domain test. Depends on E3SM tools") + cime_root = utils.get_cime_root() + inputdata = self.MACHINE.get_value("DIN_LOC_ROOT") + + tool_name = "test_gen_domain" + tool_location = os.path.join( + cime_root, "tools", "mapping", "gen_domain_files", "test_gen_domain.sh" + ) + args = "--cime_root={} --inputdata_root={}".format(cime_root, inputdata) + + cls = self.__class__ + test_dir = os.path.join(cls._testroot, tool_name) + cls._testdirs.append(test_dir) + os.makedirs(test_dir) + self.run_cmd_assert_result( + "{} {}".format(tool_location, args), from_dir=test_dir + ) + cls._do_teardown.append(test_dir) + + @classmethod + def tearDownClass(cls): + do_teardown = ( + len(cls._do_teardown) > 0 + and sys.exc_info() == (None, None, None) + and not cls.NO_TEARDOWN + ) + teardown_root = True + for tfile in cls._testdirs: + if tfile not in cls._do_teardown: + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s" % tfile) + teardown_root = False + elif do_teardown: + shutil.rmtree(tfile) + + if teardown_root and do_teardown: + shutil.rmtree(cls._testroot) diff --git a/CIME/tests/test_sys_jenkins_generic_job.py b/CIME/tests/test_sys_jenkins_generic_job.py new file mode 100644 index 00000000000..30b31c5c8d6 --- /dev/null +++ b/CIME/tests/test_sys_jenkins_generic_job.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 + +import glob +import os +import signal +import stat +import threading +import time + +from CIME import get_tests +from CIME import utils +from CIME.tests import base + + +class TestJenkinsGenericJob(base.BaseTestCase): + def setUp(self): + super().setUp() + + if self._config.test_mode == "cesm": + self.skipTest("Skipping Jenkins tests. E3SM feature") + + # Need to run in a subdir in order to not have CTest clash. Name it + # such that it should be cleaned up by the parent tearDown + self._testdir = os.path.join( + self._testroot, "jenkins_test_%s" % self._baseline_name + ) + os.makedirs(self._testdir) + + # Change root to avoid clashing with other jenkins_generic_jobs + self._jenkins_root = os.path.join(self._testdir, "J") + + def tearDown(self): + super().tearDown() + + if "TESTRUNDIFF_ALTERNATE" in os.environ: + del os.environ["TESTRUNDIFF_ALTERNATE"] + + def simple_test(self, expect_works, extra_args, build_name=None): + if self.NO_BATCH: + extra_args += " --no-batch" + + # Need these flags to test dashboard if e3sm + if self._config.test_mode == "e3sm" and build_name is not None: + extra_args += ( + " -p ACME_test --submit-to-cdash --cdash-build-group=Nightly -c %s" + % build_name + ) + + self.run_cmd_assert_result( + "%s/jenkins_generic_job -r %s %s -B %s" + % (self.TOOLS_DIR, self._testdir, extra_args, self._baseline_area), + from_dir=self._testdir, + expected_stat=(0 if expect_works else utils.TESTS_FAILED_ERR_CODE), + shell=False, + ) + + def threaded_test(self, expect_works, extra_args, build_name=None): + try: + self.simple_test(expect_works, extra_args, build_name) + except AssertionError as e: + self._thread_error = str(e) + + def assert_num_leftovers(self, suite): + num_tests_in_suite = len(get_tests.get_test_suite(suite)) + + case_glob = "%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize()) + jenkins_dirs = glob.glob(case_glob) # Case dirs + # scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs + + self.assertEqual( + num_tests_in_suite, + len(jenkins_dirs), + msg="Wrong number of leftover directories in %s, expected %d, see %s. Glob checked %s" + % (self._jenkins_root, num_tests_in_suite, jenkins_dirs, case_glob), + ) + + # JGF: Can't test this at the moment due to root change flag given to jenkins_generic_job + # self.assertEqual(num_tests_in_tiny + 1, len(scratch_dirs), + # msg="Wrong number of leftover directories in %s, expected %d, see %s" % \ + # (self._testroot, num_tests_in_tiny, scratch_dirs)) + + def test_jenkins_generic_job(self): + # Generate fresh baselines so that this test is not impacted by + # unresolved diffs + self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name) + self.assert_num_leftovers("cime_test_only_pass") + + build_name = "jenkins_generic_job_pass_%s" % utils.get_timestamp() + self.simple_test( + True, + "-t cime_test_only_pass -b %s" % self._baseline_name, + build_name=build_name, + ) + self.assert_num_leftovers( + "cime_test_only_pass" + ) # jenkins_generic_job should have automatically cleaned up leftovers from prior run + self.assert_dashboard_has_build(build_name) + + def test_jenkins_generic_job_save_timing(self): + self.simple_test( + True, "-t cime_test_timing --save-timing -b %s" % self._baseline_name + ) + self.assert_num_leftovers("cime_test_timing") + + jenkins_dirs = glob.glob( + "%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize()) + ) # case dirs + case = jenkins_dirs[0] + result = self.run_cmd_assert_result( + "./xmlquery --value SAVE_TIMING", from_dir=case + ) + self.assertEqual(result, "TRUE") + + def test_jenkins_generic_job_kill(self): + build_name = "jenkins_generic_job_kill_%s" % utils.get_timestamp() + run_thread = threading.Thread( + target=self.threaded_test, + args=(False, " -t cime_test_only_slow_pass -b master", build_name), + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(120) + + self.kill_subprocesses(sig=signal.SIGTERM) + + run_thread.join(timeout=30) + + self.assertFalse( + run_thread.is_alive(), msg="jenkins_generic_job should have finished" + ) + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + self.assert_dashboard_has_build(build_name) + + def test_jenkins_generic_job_realistic_dash(self): + # The actual quality of the cdash results for this test can only + # be inspected manually + + # Generate fresh baselines so that this test is not impacted by + # unresolved diffs + self.simple_test(False, "-t cime_test_all -g -b %s" % self._baseline_name) + self.assert_num_leftovers("cime_test_all") + + # Should create a diff + os.environ["TESTRUNDIFF_ALTERNATE"] = "True" + + # Should create a nml diff + # Modify namelist + fake_nl = """ + &fake_nml + fake_item = 'fake' + fake = .true. +/""" + baseline_glob = glob.glob( + os.path.join(self._baseline_area, self._baseline_name, "TESTRUNPASS*") + ) + self.assertEqual( + len(baseline_glob), + 1, + msg="Expected one match, got:\n%s" % "\n".join(baseline_glob), + ) + + for baseline_dir in baseline_glob: + nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") + self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) + + os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR) + with open(nl_path, "a") as nl_file: + nl_file.write(fake_nl) + + build_name = "jenkins_generic_job_mixed_%s" % utils.get_timestamp() + self.simple_test( + False, "-t cime_test_all -b %s" % self._baseline_name, build_name=build_name + ) + self.assert_num_leftovers( + "cime_test_all" + ) # jenkins_generic_job should have automatically cleaned up leftovers from prior run + self.assert_dashboard_has_build(build_name) diff --git a/CIME/tests/test_sys_manage_and_query.py b/CIME/tests/test_sys_manage_and_query.py new file mode 100644 index 00000000000..31ae3392bdb --- /dev/null +++ b/CIME/tests/test_sys_manage_and_query.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 + +from CIME import utils +from CIME.tests import base +from CIME.XML.files import Files + + +class TestManageAndQuery(base.BaseTestCase): + """Tests various scripts to manage and query xml files""" + + def setUp(self): + super().setUp() + + if self._config.test_mode == "e3sm": + self.skipTest("Skipping XML test management tests. E3SM does not use this.") + + def _run_and_assert_query_testlist(self, extra_args=""): + """Ensure that query_testlist runs successfully with the given extra arguments""" + files = Files(self._driver) + testlist_drv = files.get_value("TESTS_SPEC_FILE", {"component": "drv"}) + + self.run_cmd_assert_result( + "{}/query_testlists --xml-testlist {} {}".format( + self.SCRIPT_DIR, testlist_drv, extra_args + ) + ) + + def test_query_testlists_runs(self): + """Make sure that query_testlists runs successfully + + This simply makes sure that query_testlists doesn't generate any errors + when it runs. This helps ensure that changes in other utilities don't + break query_testlists. + """ + self._run_and_assert_query_testlist(extra_args="--show-options") + + def test_query_testlists_define_testtypes_runs(self): + """Make sure that query_testlists runs successfully with the --define-testtypes argument""" + self._run_and_assert_query_testlist(extra_args="--define-testtypes") + + def test_query_testlists_count_runs(self): + """Make sure that query_testlists runs successfully with the --count argument""" + self._run_and_assert_query_testlist(extra_args="--count") + + def test_query_testlists_list_runs(self): + """Make sure that query_testlists runs successfully with the --list argument""" + self._run_and_assert_query_testlist(extra_args="--list categories") diff --git a/CIME/tests/test_sys_query_config.py b/CIME/tests/test_sys_query_config.py new file mode 100644 index 00000000000..1aee428cc16 --- /dev/null +++ b/CIME/tests/test_sys_query_config.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 + +from CIME import utils +from CIME.tests import base + + +class TestQueryConfig(base.BaseTestCase): + def setUp(self): + super().setUp() + + def test_query_compsets(self): + utils.run_cmd_no_fail("{}/query_config --compsets".format(self.SCRIPT_DIR)) + + def test_query_components(self): + utils.run_cmd_no_fail("{}/query_config --components".format(self.SCRIPT_DIR)) + + def test_query_grids(self): + utils.run_cmd_no_fail("{}/query_config --grids".format(self.SCRIPT_DIR)) + + def test_query_machines(self): + utils.run_cmd_no_fail("{}/query_config --machines".format(self.SCRIPT_DIR)) diff --git a/CIME/tests/test_sys_run_restart.py b/CIME/tests/test_sys_run_restart.py new file mode 100644 index 00000000000..4a7fb9e2391 --- /dev/null +++ b/CIME/tests/test_sys_run_restart.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +import os + +from CIME import utils +from CIME.tests import base + + +class TestRunRestart(base.BaseTestCase): + def test_run_restart(self): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + driver = utils.get_cime_default_driver() + if driver == "mct": + walltime = "00:15:00" + else: + walltime = "00:30:00" + + casedir = self._create_test( + ["--walltime " + walltime, "NODEFAIL_P1.f09_g16.X"], + test_id=self._baseline_name, + ) + rundir = utils.run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) + fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL") + self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel) + + self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 3) + + def test_run_restart_too_many_fails(self): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + driver = utils.get_cime_default_driver() + if driver == "mct": + walltime = "00:15:00" + else: + walltime = "00:30:00" + + casedir = self._create_test( + ["--walltime " + walltime, "NODEFAIL_P1.f09_g16.X"], + test_id=self._baseline_name, + env_changes="NODEFAIL_NUM_FAILS=5", + run_errors=True, + ) + rundir = utils.run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) + fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL") + self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel) + + self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 4) diff --git a/CIME/tests/test_sys_save_timings.py b/CIME/tests/test_sys_save_timings.py new file mode 100644 index 00000000000..ee4a964c4b3 --- /dev/null +++ b/CIME/tests/test_sys_save_timings.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 + +import getpass +import glob +import os + +from CIME import provenance +from CIME import utils +from CIME.tests import base +from CIME.case.case import Case + + +class TestSaveTimings(base.BaseTestCase): + def simple_test(self, manual_timing=False): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + timing_flag = "" if manual_timing else "--save-timing" + driver = utils.get_cime_default_driver() + if driver == "mct": + walltime = "00:15:00" + else: + walltime = "00:30:00" + self._create_test( + ["SMS_Ln9_P1.f19_g16.A", timing_flag, "--walltime=" + walltime], + test_id=self._baseline_name, + ) + + statuses = glob.glob( + "%s/*%s/TestStatus" % (self._testroot, self._baseline_name) + ) + self.assertEqual( + len(statuses), + 1, + msg="Should have had exactly one match, found %s" % statuses, + ) + casedir = os.path.dirname(statuses[0]) + + with Case(casedir, read_only=True) as case: + lids = utils.get_lids(case) + timing_dir = case.get_value("SAVE_TIMING_DIR") + casename = case.get_value("CASE") + + self.assertEqual(len(lids), 1, msg="Expected one LID, found %s" % lids) + + if manual_timing: + self.run_cmd_assert_result( + "cd %s && %s/save_provenance postrun" % (casedir, self.TOOLS_DIR) + ) + if self._config.test_mode == "e3sm": + provenance_glob = os.path.join( + timing_dir, + "performance_archive", + getpass.getuser(), + casename, + lids[0] + "*", + ) + provenance_dirs = glob.glob(provenance_glob) + self.assertEqual( + len(provenance_dirs), + 1, + msg="wrong number of provenance dirs, expected 1, got {}, looked for {}".format( + provenance_dirs, provenance_glob + ), + ) + self.verify_perms("".join(provenance_dirs)) + + def test_save_timings(self): + self.simple_test() + + def test_save_timings_manual(self): + self.simple_test(manual_timing=True) + + def _record_success( + self, + test_name, + test_success, + commit, + exp_last_pass, + exp_trans_fail, + baseline_dir, + ): + provenance.save_test_success( + baseline_dir, None, test_name, test_success, force_commit_test=commit + ) + was_success, last_pass, trans_fail = provenance.get_test_success( + baseline_dir, None, test_name, testing=True + ) + self.assertEqual( + test_success, + was_success, + msg="Broken was_success {} {}".format(test_name, commit), + ) + self.assertEqual( + last_pass, + exp_last_pass, + msg="Broken last_pass {} {}".format(test_name, commit), + ) + self.assertEqual( + trans_fail, + exp_trans_fail, + msg="Broken trans_fail {} {}".format(test_name, commit), + ) + if test_success: + self.assertEqual(exp_last_pass, commit, msg="Should never") + + def test_success_recording(self): + if self._config.test_mode == "e3sm": + self.skipTest("Skipping success recording tests. E3SM feature") + + fake_test1 = "faketest1" + fake_test2 = "faketest2" + baseline_dir = os.path.join(self._baseline_area, self._baseline_name) + + # Test initial state + was_success, last_pass, trans_fail = provenance.get_test_success( + baseline_dir, None, fake_test1, testing=True + ) + self.assertFalse(was_success, msg="Broken initial was_success") + self.assertEqual(last_pass, None, msg="Broken initial last_pass") + self.assertEqual(trans_fail, None, msg="Broken initial trans_fail") + + # Test first result (test1 fails, test2 passes) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, False, "AAA", None, "AAA", baseline_dir) + self._record_success(fake_test2, True, "AAA", "AAA", None, baseline_dir) + + # Test second result matches first (no transition) (test1 fails, test2 passes) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, False, "BBB", None, "AAA", baseline_dir) + self._record_success(fake_test2, True, "BBB", "BBB", None, baseline_dir) + + # Test transition to new state (first real transition) (test1 passes, test2 fails) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, True, "CCC", "CCC", "AAA", baseline_dir) + self._record_success(fake_test2, False, "CCC", "BBB", "CCC", baseline_dir) + + # Test transition to new state (second real transition) (test1 fails, test2 passes) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, False, "DDD", "CCC", "DDD", baseline_dir) + self._record_success(fake_test2, True, "DDD", "DDD", "CCC", baseline_dir) + + # Test final repeat (test1 fails, test2 passes) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, False, "EEE", "CCC", "DDD", baseline_dir) + self._record_success(fake_test2, True, "EEE", "EEE", "CCC", baseline_dir) + + # Test final transition (test1 passes, test2 fails) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, True, "FFF", "FFF", "DDD", baseline_dir) + self._record_success(fake_test2, False, "FFF", "EEE", "FFF", baseline_dir) diff --git a/CIME/tests/test_sys_single_submit.py b/CIME/tests/test_sys_single_submit.py new file mode 100644 index 00000000000..fed850263df --- /dev/null +++ b/CIME/tests/test_sys_single_submit.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 + +from CIME import utils +from CIME.tests import base + + +class TestSingleSubmit(base.BaseTestCase): + def test_single_submit(self): + # Skip unless on a batch system and users did not select no-batch + if not self._hasbatch: + self.skipTest("Skipping single submit. Not valid without batch") + if self._config.test_mode == "cesm": + self.skipTest("Skipping single submit. E3SM experimental feature") + if self._machine not in ["sandiatoss3"]: + self.skipTest("Skipping single submit. Only works on sandiatoss3") + + # Keep small enough for now that we don't have to worry about load balancing + self._create_test( + ["--single-submit", "SMS_Ln9_P8.f45_g37.A", "SMS_Ln9_P8.f19_g16.A"], + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) diff --git a/CIME/tests/test_sys_test_scheduler.py b/CIME/tests/test_sys_test_scheduler.py new file mode 100755 index 00000000000..c2520223224 --- /dev/null +++ b/CIME/tests/test_sys_test_scheduler.py @@ -0,0 +1,591 @@ +#!/usr/bin/env python3 + +import re +import glob +import logging +import os +import unittest +from unittest import mock + +from CIME import get_tests +from CIME import utils +from CIME import test_status +from CIME import test_scheduler +from CIME.tests import base + + +class TestTestScheduler(base.BaseTestCase): + def get_default_tests(self): + # exclude the MEMLEAK tests here. + return get_tests.get_full_test_names( + [ + "cime_test_only", + "^TESTMEMLEAKFAIL_P1.f09_g16.X", + "^TESTMEMLEAKPASS_P1.f09_g16.X", + "^TESTRUNSTARCFAIL_P1.f19_g16.A", + "^TESTTESTDIFF_P1.f19_g16.A", + "^TESTBUILDFAILEXC_P1.f19_g16.A", + "^TESTRUNFAILEXC_P1.f19_g16.A", + ], + self._machine, + self._compiler, + ) + + @mock.patch("time.strftime", return_value="00:00:00") + def test_chksum(self, strftime): # pylint: disable=unused-argument + if self._config.test_mode == "e3sm": + self.skipTest("Skipping chksum test. Depends on CESM settings") + + ts = test_scheduler.TestScheduler( + ["SEQ_Ln9.f19_g16.A.perlmutter_gnu"], + machine_name="perlmutter", + chksum=True, + test_root="/tests", + ) + + with mock.patch.object(ts, "_shell_cmd_for_phase") as _shell_cmd_for_phase: + ts._run_phase( + "SEQ_Ln9.f19_g16.A.perlmutter_gnu" + ) # pylint: disable=protected-access + + _shell_cmd_for_phase.assert_called_with( + "SEQ_Ln9.f19_g16.A.perlmutter_gnu", + "./case.submit --skip-preview-namelist --chksum", + "RUN", + from_dir="/tests/SEQ_Ln9.f19_g16.A.perlmutter_gnu.00:00:00", + ) + + def test_testmods(self): + if self._config.test_mode == "cesm": + self.skipTest("Skipping testmods test. Depends on E3SM settings") + + tests = self.get_default_tests() + ct = test_scheduler.TestScheduler( + tests, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + with mock.patch.object(ct, "_shell_cmd_for_phase"): + ct._create_newcase_phase("TESTRUNPASS_P1.f19_g16.A.docker_gnu.eam-rrtmgp") + + create_newcase_cmd = ct._shell_cmd_for_phase.call_args.args[1] + + assert ( + re.search(r"--user-mods-dir .*eam/rrtmgp", create_newcase_cmd) + is not None + ), create_newcase_cmd + + def test_testmods_malformed(self): + tests = self.get_default_tests() + ct = test_scheduler.TestScheduler( + tests, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + with mock.patch.object(ct, "_shell_cmd_for_phase"): + success, message = ct._create_newcase_phase( + "TESTRUNPASS_P1.f19_g16.A.docker_gnu.notacomponent?fun" + ) + + assert not success + assert ( + message + == "Invalid testmod, format should be `${component}-${testmod}`, got 'notacomponent?fun'" + ), message + + def test_testmods_missing(self): + tests = self.get_default_tests() + ct = test_scheduler.TestScheduler( + tests, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + with mock.patch.object(ct, "_shell_cmd_for_phase"): + success, message = ct._create_newcase_phase( + "TESTRUNPASS_P1.f19_g16.A.docker_gnu.notacomponent-fun" + ) + + assert not success + assert ( + re.search("Could not locate testmod 'fun'", message) is not None + ), message + + def test_a_phases(self): + tests = self.get_default_tests() + self.assertEqual(len(tests), 3) + ct = test_scheduler.TestScheduler( + tests, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0] + run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0] + pass_test = [item for item in tests if "TESTRUNPASS" in item][0] + + self.assertTrue( + "BUILDFAIL" in build_fail_test, msg="Wrong test '%s'" % build_fail_test + ) + self.assertTrue( + "RUNFAIL" in run_fail_test, msg="Wrong test '%s'" % run_fail_test + ) + self.assertTrue("RUNPASS" in pass_test, msg="Wrong test '%s'" % pass_test) + + for idx, phase in enumerate(ct._phases): + for test in ct._tests: + if phase == test_scheduler.TEST_START: + continue + elif phase == test_status.MODEL_BUILD_PHASE: + ct._update_test_status(test, phase, test_status.TEST_PEND_STATUS) + + if test == build_fail_test: + ct._update_test_status( + test, phase, test_status.TEST_FAIL_STATUS + ) + self.assertTrue(ct._is_broken(test)) + self.assertFalse(ct._work_remains(test)) + else: + ct._update_test_status( + test, phase, test_status.TEST_PASS_STATUS + ) + self.assertFalse(ct._is_broken(test)) + self.assertTrue(ct._work_remains(test)) + + elif phase == test_status.RUN_PHASE: + if test == build_fail_test: + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, phase, test_status.TEST_PEND_STATUS + ) + else: + ct._update_test_status( + test, phase, test_status.TEST_PEND_STATUS + ) + self.assertFalse(ct._work_remains(test)) + + if test == run_fail_test: + ct._update_test_status( + test, phase, test_status.TEST_FAIL_STATUS + ) + self.assertTrue(ct._is_broken(test)) + else: + ct._update_test_status( + test, phase, test_status.TEST_PASS_STATUS + ) + self.assertFalse(ct._is_broken(test)) + + self.assertFalse(ct._work_remains(test)) + + else: + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, ct._phases[idx + 1], test_status.TEST_PEND_STATUS + ) + + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, phase, test_status.TEST_PASS_STATUS + ) + + ct._update_test_status(test, phase, test_status.TEST_PEND_STATUS) + self.assertFalse(ct._is_broken(test)) + self.assertTrue(ct._work_remains(test)) + + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, phase, test_status.TEST_PEND_STATUS + ) + + ct._update_test_status(test, phase, test_status.TEST_PASS_STATUS) + + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, phase, test_status.TEST_FAIL_STATUS + ) + + self.assertFalse(ct._is_broken(test)) + self.assertTrue(ct._work_remains(test)) + + def test_b_full(self): + tests = get_tests.get_full_test_names( + ["cime_test_only"], self._machine, self._compiler + ) + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + ct = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + build_fail_test = [item for item in tests if "TESTBUILDFAIL_" in item][0] + build_fail_exc_test = [item for item in tests if "TESTBUILDFAILEXC" in item][0] + run_fail_test = [item for item in tests if "TESTRUNFAIL_" in item][0] + run_fail_exc_test = [item for item in tests if "TESTRUNFAILEXC" in item][0] + pass_test = [item for item in tests if "TESTRUNPASS" in item][0] + test_diff_test = [item for item in tests if "TESTTESTDIFF" in item][0] + mem_fail_test = [item for item in tests if "TESTMEMLEAKFAIL" in item][0] + mem_pass_test = [item for item in tests if "TESTMEMLEAKPASS" in item][0] + st_arch_fail_test = [item for item in tests if "TESTRUNSTARCFAIL" in item][0] + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + self._wait_for_tests(test_id, expect_works=False) + + test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) + self.assertEqual(len(tests), len(test_statuses)) + + for x in test_statuses: + ts = test_status.TestStatus(test_dir=os.path.dirname(x)) + test_name = ts.get_name() + log_files = glob.glob( + "%s/%s*%s/TestStatus.log" % (self._testroot, test_name, test_id) + ) + self.assertEqual( + len(log_files), + 1, + "Expected exactly one test_status.TestStatus.log file, found %d" + % len(log_files), + ) + log_file = log_files[0] + if test_name == build_fail_test: + + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_FAIL_STATUS, + ) + data = open(log_file, "r").read() + self.assertTrue( + "Intentional fail for testing infrastructure" in data, + "Broken test did not report build error:\n%s" % data, + ) + elif test_name == build_fail_exc_test: + data = open(log_file, "r").read() + self.assert_test_status( + test_name, + ts, + test_status.SHAREDLIB_BUILD_PHASE, + test_status.TEST_FAIL_STATUS, + ) + self.assertTrue( + "Exception from init" in data, + "Broken test did not report build error:\n%s" % data, + ) + elif test_name == run_fail_test: + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_FAIL_STATUS + ) + elif test_name == run_fail_exc_test: + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_FAIL_STATUS + ) + data = open(log_file, "r").read() + self.assertTrue( + "Exception from run_phase" in data, + "Broken test did not report run error:\n%s" % data, + ) + elif test_name == mem_fail_test: + self.assert_test_status( + test_name, + ts, + test_status.MEMLEAK_PHASE, + test_status.TEST_FAIL_STATUS, + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + elif test_name == test_diff_test: + self.assert_test_status( + test_name, ts, "COMPARE_base_rest", test_status.TEST_FAIL_STATUS + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + elif test_name == st_arch_fail_test: + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + self.assert_test_status( + test_name, + ts, + test_status.STARCHIVE_PHASE, + test_status.TEST_FAIL_STATUS, + ) + else: + self.assertTrue(test_name in [pass_test, mem_pass_test]) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + if test_name == mem_pass_test: + self.assert_test_status( + test_name, + ts, + test_status.MEMLEAK_PHASE, + test_status.TEST_PASS_STATUS, + ) + + def test_force_rebuild(self): + tests = get_tests.get_full_test_names( + [ + "TESTBUILDFAIL_P1.f19_g16.A", + "TESTRUNFAIL_P1.f19_g16.A", + "TESTRUNPASS_P1.f19_g16.A", + ], + self._machine, + self._compiler, + ) + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + ct = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + ct = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + force_rebuild=True, + use_existing=True, + ) + + test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) + + for x in test_statuses: + casedir = os.path.dirname(x) + + ts = test_status.TestStatus(test_dir=casedir) + + self.assertTrue( + ts.get_status(test_status.SHAREDLIB_BUILD_PHASE) + == test_status.TEST_PEND_STATUS + ) + + def test_c_use_existing(self): + tests = get_tests.get_full_test_names( + [ + "TESTBUILDFAIL_P1.f19_g16.A", + "TESTRUNFAIL_P1.f19_g16.A", + "TESTRUNPASS_P1.f19_g16.A", + ], + self._machine, + self._compiler, + ) + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + ct = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0] + run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0] + pass_test = [item for item in tests if "TESTRUNPASS" in item][0] + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) + self.assertEqual(len(tests), len(test_statuses)) + + self._wait_for_tests(test_id, expect_works=False) + + for x in test_statuses: + casedir = os.path.dirname(x) + ts = test_status.TestStatus(test_dir=casedir) + test_name = ts.get_name() + if test_name == build_fail_test: + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_FAIL_STATUS, + ) + with test_status.TestStatus(test_dir=casedir) as ts: + ts.set_status( + test_status.MODEL_BUILD_PHASE, test_status.TEST_PEND_STATUS + ) + elif test_name == run_fail_test: + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_FAIL_STATUS + ) + with test_status.TestStatus(test_dir=casedir) as ts: + ts.set_status( + test_status.SUBMIT_PHASE, test_status.TEST_PEND_STATUS + ) + else: + self.assertTrue(test_name == pass_test) + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_PASS_STATUS, + ) + self.assert_test_status( + test_name, + ts, + test_status.SUBMIT_PHASE, + test_status.TEST_PASS_STATUS, + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + + os.environ["TESTBUILDFAIL_PASS"] = "True" + os.environ["TESTRUNFAIL_PASS"] = "True" + ct2 = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + use_existing=True, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct2.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + self._wait_for_tests(test_id) + + for x in test_statuses: + ts = test_status.TestStatus(test_dir=os.path.dirname(x)) + test_name = ts.get_name() + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_PASS_STATUS, + ) + self.assert_test_status( + test_name, ts, test_status.SUBMIT_PHASE, test_status.TEST_PASS_STATUS + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + + del os.environ["TESTBUILDFAIL_PASS"] + del os.environ["TESTRUNFAIL_PASS"] + + # test that passed tests are not re-run + + ct2 = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + use_existing=True, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct2.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + self._wait_for_tests(test_id) + + for x in test_statuses: + ts = test_status.TestStatus(test_dir=os.path.dirname(x)) + test_name = ts.get_name() + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_PASS_STATUS, + ) + self.assert_test_status( + test_name, ts, test_status.SUBMIT_PHASE, test_status.TEST_PASS_STATUS + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + + def test_d_retry(self): + args = [ + "TESTBUILDFAIL_P1.f19_g16.A", + "TESTRUNFAILRESET_P1.f19_g16.A", + "TESTRUNPASS_P1.f19_g16.A", + "--retry=1", + ] + + self._create_test(args) + + def test_e_test_inferred_compiler(self): + if self._config.test_mode != "e3sm" or self._machine != "docker": + self.skipTest("Skipping create_test test. Depends on E3SM settings") + + args = ["SMS.f19_g16.A.docker_gnuX", "--no-setup"] + + case = self._create_test(args, default_baseline_area=True) + result = self.run_cmd_assert_result( + "./xmlquery --value BASELINE_ROOT", from_dir=case + ) + self.assertEqual(os.path.split(result)[1], "gnuX") + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_sys_unittest.py b/CIME/tests/test_sys_unittest.py new file mode 100755 index 00000000000..3baeacac038 --- /dev/null +++ b/CIME/tests/test_sys_unittest.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 + +import os +import shutil +import sys +import re + +from CIME import utils +from CIME.tests import base +from CIME.XML.files import Files + + +class TestUnitTest(base.BaseTestCase): + @classmethod + def setUpClass(cls): + cls._do_teardown = [] + cls._testroot = os.path.join(cls.TEST_ROOT, "TestUnitTests") + cls._testdirs = [] + + def setUp(self): + super().setUp() + + self._driver = utils.get_cime_default_driver() + self._has_pfunit = self._has_unit_test_support() + + def _has_unit_test_support(self): + cmake_macros_dir = Files().get_value("CMAKE_MACROS_DIR") + cmake_machine_macros_dir = os.path.join(cmake_macros_dir, "..", self._machine) + + macros_to_check = [ + os.path.join( + cmake_macros_dir, + "{}_{}.cmake".format(self._compiler, self._machine), + ), + os.path.join(cmake_macros_dir, "{}.cmake".format(self._machine)), + os.path.join( + os.environ.get("HOME"), + ".cime", + "{}_{}.cmake".format(self._compiler, self._machine), + ), + os.path.join( + os.environ.get("HOME"), ".cime", "{}.cmake".format(self._machine) + ), + os.path.join( + cmake_machine_macros_dir, + "{}_{}.cmake".format(self._compiler, self._machine), + ), + os.path.join(cmake_machine_macros_dir, "{}.cmake".format(self._machine)), + ] + env_ref_re = re.compile(r"\$ENV\{(\w+)\}") + + for macro_to_check in macros_to_check: + if os.path.exists(macro_to_check): + with open(macro_to_check, "r") as f: + while True: + line = f.readline().strip() + if not line: + break + if "PFUNIT_PATH" in line: + path = line.split(" ")[1][1:-2] + m = env_ref_re.match(path) + if m: + env_var = m.groups()[0] + env_var_exists = env_var in os.environ + if env_var_exists: + path = path.replace( + "$ENV{" + env_var + "}", os.environ[env_var] + ) + if os.path.exists(path): + return True + + return False + + def test_a_unit_test(self): + cls = self.__class__ + if not self._has_pfunit: + self.skipTest( + "Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine" + ) + test_dir = os.path.join(cls._testroot, "unit_tester_test") + cls._testdirs.append(test_dir) + os.makedirs(test_dir) + unit_test_tool = os.path.abspath( + os.path.join( + utils.get_cime_root(), "scripts", "fortran_unit_testing", "run_tests.py" + ) + ) + test_spec_dir = os.path.join( + os.path.dirname(unit_test_tool), "Examples", "interpolate_1d", "tests" + ) + args = f"--build-dir {test_dir} --test-spec-dir {test_spec_dir} --machine {self._machine} --compiler {self._compiler} --comp-interface {self._driver}" + utils.run_cmd_no_fail("{} {}".format(unit_test_tool, args)) + cls._do_teardown.append(test_dir) + + def test_b_cime_f90_unit_tests(self): + cls = self.__class__ + if self.FAST_ONLY: + self.skipTest("Skipping slow test") + + if not self._has_unit_test_support(): + self.skipTest( + "Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine" + ) + + test_dir = os.path.join(cls._testroot, "driver_f90_tests") + cls._testdirs.append(test_dir) + os.makedirs(test_dir) + test_spec_dir = utils.get_cime_root() + unit_test_tool = os.path.abspath( + os.path.join( + test_spec_dir, "scripts", "fortran_unit_testing", "run_tests.py" + ) + ) + args = f"--build-dir {test_dir} --test-spec-dir {test_spec_dir} --machine {self._machine} --compiler {self._compiler} --comp-interface {self._driver}" + utils.run_cmd_no_fail("{} {}".format(unit_test_tool, args)) + cls._do_teardown.append(test_dir) + + @classmethod + def tearDownClass(cls): + do_teardown = ( + len(cls._do_teardown) > 0 + and sys.exc_info() == (None, None, None) + and not cls.NO_TEARDOWN + ) + + teardown_root = True + for tfile in cls._testdirs: + if tfile not in cls._do_teardown: + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s" % tfile) + teardown_root = False + elif do_teardown: + shutil.rmtree(tfile) + + if teardown_root and do_teardown: + shutil.rmtree(cls._testroot) diff --git a/CIME/tests/test_sys_user_concurrent_mods.py b/CIME/tests/test_sys_user_concurrent_mods.py new file mode 100644 index 00000000000..c8173b34215 --- /dev/null +++ b/CIME/tests/test_sys_user_concurrent_mods.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 + +import os +import time + +from CIME import utils +from CIME.tests import base + + +class TestUserConcurrentMods(base.BaseTestCase): + def test_user_concurrent_mods(self): + # Put this inside any test that's slow + if self.FAST_ONLY: + self.skipTest("Skipping slow test") + + casedir = self._create_test( + ["--walltime=0:30:00", "TESTRUNUSERXMLCHANGE_Mmpi-serial.f19_g16.X"], + test_id=self._baseline_name, + ) + + with utils.Timeout(3000): + while True: + with open(os.path.join(casedir, "CaseStatus"), "r") as fd: + self._wait_for_tests(self._baseline_name) + contents = fd.read() + if contents.count("model execution success") == 2: + break + + time.sleep(5) + + rundir = utils.run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) + if utils.get_cime_default_driver() == "nuopc": + chk_file = "nuopc.runconfig" + else: + chk_file = "drv_in" + with open(os.path.join(rundir, chk_file), "r") as fd: + contents = fd.read() + self.assertTrue("stop_n = 6" in contents) diff --git a/CIME/tests/test_sys_wait_for_tests.py b/CIME/tests/test_sys_wait_for_tests.py new file mode 100644 index 00000000000..0377d65771e --- /dev/null +++ b/CIME/tests/test_sys_wait_for_tests.py @@ -0,0 +1,380 @@ +#!/usr/bin/env python3 + +import os +import signal +import shutil +import sys +import time +import threading + +from CIME import utils +from CIME import test_status +from CIME.tests import base +from CIME.tests import utils as test_utils + + +class TestWaitForTests(base.BaseTestCase): + def setUp(self): + super().setUp() + + self._testroot = os.path.join(self.TEST_ROOT, "TestWaitForTests") + self._timestamp = utils.get_timestamp() + + # basic tests + self._testdir_all_pass = os.path.join( + self._testroot, "scripts_regression_tests.testdir_all_pass" + ) + self._testdir_with_fail = os.path.join( + self._testroot, "scripts_regression_tests.testdir_with_fail" + ) + self._testdir_unfinished = os.path.join( + self._testroot, "scripts_regression_tests.testdir_unfinished" + ) + self._testdir_unfinished2 = os.path.join( + self._testroot, "scripts_regression_tests.testdir_unfinished2" + ) + + # live tests + self._testdir_teststatus1 = os.path.join( + self._testroot, "scripts_regression_tests.testdir_teststatus1" + ) + self._testdir_teststatus2 = os.path.join( + self._testroot, "scripts_regression_tests.testdir_teststatus2" + ) + + self._testdirs = [ + self._testdir_all_pass, + self._testdir_with_fail, + self._testdir_unfinished, + self._testdir_unfinished2, + self._testdir_teststatus1, + self._testdir_teststatus2, + ] + basic_tests = self._testdirs[: self._testdirs.index(self._testdir_teststatus1)] + + for testdir in self._testdirs: + if os.path.exists(testdir): + shutil.rmtree(testdir) + os.makedirs(testdir) + + for r in range(10): + for testdir in basic_tests: + os.makedirs(os.path.join(testdir, str(r))) + test_utils.make_fake_teststatus( + os.path.join(testdir, str(r)), + "Test_%d" % r, + test_status.TEST_PASS_STATUS, + test_status.RUN_PHASE, + ) + + test_utils.make_fake_teststatus( + os.path.join(self._testdir_with_fail, "5"), + "Test_5", + test_status.TEST_FAIL_STATUS, + test_status.RUN_PHASE, + ) + test_utils.make_fake_teststatus( + os.path.join(self._testdir_unfinished, "5"), + "Test_5", + test_status.TEST_PEND_STATUS, + test_status.RUN_PHASE, + ) + test_utils.make_fake_teststatus( + os.path.join(self._testdir_unfinished2, "5"), + "Test_5", + test_status.TEST_PASS_STATUS, + test_status.SUBMIT_PHASE, + ) + + integration_tests = self._testdirs[len(basic_tests) :] + for integration_test in integration_tests: + os.makedirs(os.path.join(integration_test, "0")) + test_utils.make_fake_teststatus( + os.path.join(integration_test, "0"), + "Test_0", + test_status.TEST_PASS_STATUS, + test_status.CORE_PHASES[0], + ) + + # Set up proxy if possible + self._unset_proxy = self.setup_proxy() + + self._thread_error = None + + def tearDown(self): + super().tearDown() + + do_teardown = sys.exc_info() == (None, None, None) and not self.NO_TEARDOWN + + if do_teardown: + for testdir in self._testdirs: + shutil.rmtree(testdir) + + def simple_test(self, testdir, expected_results, extra_args="", build_name=None): + # Need these flags to test dashboard if e3sm + if self._config.create_test_flag_mode == "e3sm" and build_name is not None: + extra_args += " -b %s" % build_name + + expected_stat = 0 + for expected_result in expected_results: + if not ( + expected_result == "PASS" + or (expected_result == "PEND" and "-n" in extra_args) + ): + expected_stat = utils.TESTS_FAILED_ERR_CODE + + output = self.run_cmd_assert_result( + "%s/wait_for_tests -p ACME_test */TestStatus %s" + % (self.TOOLS_DIR, extra_args), + from_dir=testdir, + expected_stat=expected_stat, + ) + + lines = [ + line + for line in output.splitlines() + if ( + line.startswith("PASS") + or line.startswith("FAIL") + or line.startswith("PEND") + ) + ] + self.assertEqual(len(lines), len(expected_results)) + for idx, line in enumerate(lines): + testname, status = test_utils.parse_test_status(line) + self.assertEqual(status, expected_results[idx]) + self.assertEqual(testname, "Test_%d" % idx) + + def threaded_test(self, testdir, expected_results, extra_args="", build_name=None): + try: + self.simple_test(testdir, expected_results, extra_args, build_name) + except AssertionError as e: + self._thread_error = str(e) + + def test_wait_for_test_all_pass(self): + self.simple_test(self._testdir_all_pass, ["PASS"] * 10) + + def test_wait_for_test_with_fail(self): + expected_results = ["FAIL" if item == 5 else "PASS" for item in range(10)] + self.simple_test(self._testdir_with_fail, expected_results) + + def test_wait_for_test_no_wait(self): + expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] + self.simple_test(self._testdir_unfinished, expected_results, "-n") + + def test_wait_for_test_timeout(self): + expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] + self.simple_test(self._testdir_unfinished, expected_results, "--timeout=3") + + def test_wait_for_test_wait_for_pend(self): + run_thread = threading.Thread( + target=self.threaded_test, args=(self._testdir_unfinished, ["PASS"] * 10) + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) # Kinda hacky + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + with test_status.TestStatus( + test_dir=os.path.join(self._testdir_unfinished, "5") + ) as ts: + ts.set_status(test_status.RUN_PHASE, test_status.TEST_PASS_STATUS) + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + def test_wait_for_test_wait_for_missing_run_phase(self): + run_thread = threading.Thread( + target=self.threaded_test, args=(self._testdir_unfinished2, ["PASS"] * 10) + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) # Kinda hacky + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + with test_status.TestStatus( + test_dir=os.path.join(self._testdir_unfinished2, "5") + ) as ts: + ts.set_status(test_status.RUN_PHASE, test_status.TEST_PASS_STATUS) + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + def test_wait_for_test_wait_kill(self): + expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] + run_thread = threading.Thread( + target=self.threaded_test, args=(self._testdir_unfinished, expected_results) + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + self.kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1) + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + def test_wait_for_test_cdash_pass(self): + expected_results = ["PASS"] * 10 + build_name = "regression_test_pass_" + self._timestamp + run_thread = threading.Thread( + target=self.threaded_test, + args=(self._testdir_all_pass, expected_results, "", build_name), + ) + run_thread.daemon = True + run_thread.start() + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + self.assert_dashboard_has_build(build_name) + + def test_wait_for_test_cdash_kill(self): + expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] + build_name = "regression_test_kill_" + self._timestamp + run_thread = threading.Thread( + target=self.threaded_test, + args=(self._testdir_unfinished, expected_results, "", build_name), + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + self.kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1) + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + self.assert_dashboard_has_build(build_name) + + if self._config.test_mode == "e3sm": + cdash_result_dir = os.path.join(self._testdir_unfinished, "Testing") + tag_file = os.path.join(cdash_result_dir, "TAG") + self.assertTrue(os.path.isdir(cdash_result_dir)) + self.assertTrue(os.path.isfile(tag_file)) + + tag = open(tag_file, "r").readlines()[0].strip() + xml_file = os.path.join(cdash_result_dir, tag, "Test.xml") + self.assertTrue(os.path.isfile(xml_file)) + + xml_contents = open(xml_file, "r").read() + self.assertTrue( + r"Test_0Test_1Test_2Test_3Test_4Test_5Test_6Test_7Test_8Test_9" + in xml_contents + ) + self.assertTrue( + r'Test_5' in xml_contents + ) + + # TODO: Any further checking of xml output worth doing? + + def live_test_impl(self, testdir, expected_results, last_phase, last_status): + run_thread = threading.Thread( + target=self.threaded_test, args=(testdir, expected_results) + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + for core_phase in test_status.CORE_PHASES[1:]: + with test_status.TestStatus( + test_dir=os.path.join(self._testdir_teststatus1, "0") + ) as ts: + ts.set_status( + core_phase, + last_status + if core_phase == last_phase + else test_status.TEST_PASS_STATUS, + ) + + time.sleep(5) + + if core_phase != last_phase: + self.assertTrue( + run_thread.is_alive(), + msg="wait_for_tests should have waited after passing phase {}".format( + core_phase + ), + ) + else: + run_thread.join(timeout=10) + self.assertFalse( + run_thread.is_alive(), + msg="wait_for_tests should have finished after phase {}".format( + core_phase + ), + ) + break + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + def test_wait_for_test_test_status_integration_pass(self): + self.live_test_impl( + self._testdir_teststatus1, + ["PASS"], + test_status.RUN_PHASE, + test_status.TEST_PASS_STATUS, + ) + + def test_wait_for_test_test_status_integration_submit_fail(self): + self.live_test_impl( + self._testdir_teststatus1, + ["FAIL"], + test_status.SUBMIT_PHASE, + test_status.TEST_FAIL_STATUS, + ) diff --git a/CIME/tests/test_unit_aprun.py b/CIME/tests/test_unit_aprun.py new file mode 100644 index 00000000000..1767c68484c --- /dev/null +++ b/CIME/tests/test_unit_aprun.py @@ -0,0 +1,128 @@ +import unittest +from unittest import mock + +from CIME import aprun + +# NTASKS, NTHRDS, ROOTPE, PSTRID +DEFAULT_COMP_ATTRS = [ + 512, + 2, + 0, + 1, + 675, + 2, + 0, + 1, + 168, + 2, + 512, + 1, + 512, + 2, + 0, + 1, + 128, + 4, + 680, + 1, + 168, + 2, + 512, + 1, + 168, + 2, + 512, + 1, + 512, + 2, + 0, + 1, + 1, + 1, + 0, + 1, +] + +# MAX_TASKS_PER_NODE, MAX_MPITASKS_PER_NODE, PIO_NUMTASKS, PIO_ASYNC_INTERFACE, COMPILER, MACH +DEFAULT_ARGS = [ + 16, + 16, + -1, + False, + "gnu", + "docker", +] + + +class TestUnitAprun(unittest.TestCase): + def test_aprun_extra_args(self): + case = mock.MagicMock() + + case.get_values.return_value = [ + "CPL", + "ATM", + "LND", + "ICE", + "OCN", + "ROF", + "GLC", + "WAV", + "IAC", + ] + + case.get_value.side_effect = DEFAULT_COMP_ATTRS + DEFAULT_ARGS + + extra_args = { + "-e DEBUG=true": {"position": "global"}, + "-j 20": {"position": "per"}, + } + + ( + aprun_args, + total_node_count, + total_task_count, + min_tasks_per_node, + max_thread_count, + ) = aprun.get_aprun_cmd_for_case(case, "e3sm.exe", extra_args=extra_args) + + assert ( + aprun_args + == " -e DEBUG=true -n 680 -N 8 -d 2 -j 20 e3sm.exe : -n 128 -N 4 -d 4 -j 20 e3sm.exe " + ) + assert total_node_count == 117 + assert total_task_count == 808 + assert min_tasks_per_node == 4 + assert max_thread_count == 4 + + def test_aprun(self): + case = mock.MagicMock() + + case.get_values.return_value = [ + "CPL", + "ATM", + "LND", + "ICE", + "OCN", + "ROF", + "GLC", + "WAV", + "IAC", + ] + + case.get_value.side_effect = DEFAULT_COMP_ATTRS + DEFAULT_ARGS + + ( + aprun_args, + total_node_count, + total_task_count, + min_tasks_per_node, + max_thread_count, + ) = aprun.get_aprun_cmd_for_case(case, "e3sm.exe") + + assert ( + aprun_args == " -n 680 -N 8 -d 2 e3sm.exe : -n 128 -N 4 -d 4 e3sm.exe " + ) + assert total_node_count == 117 + assert total_task_count == 808 + assert min_tasks_per_node == 4 + assert max_thread_count == 4 diff --git a/CIME/tests/test_unit_baselines_performance.py b/CIME/tests/test_unit_baselines_performance.py new file mode 100644 index 00000000000..48437857b9e --- /dev/null +++ b/CIME/tests/test_unit_baselines_performance.py @@ -0,0 +1,658 @@ +#!/usr/bin/env python3 + +import gzip +import tempfile +import unittest +import os +from unittest import mock +from pathlib import Path + +from CIME.baselines import performance +from CIME.tests.test_unit_system_tests import CPLLOG + + +def create_mock_case(tempdir, get_latest_cpl_logs=None): + caseroot = Path(tempdir, "0", "caseroot") + + rundir = caseroot / "run" + + if get_latest_cpl_logs is not None: + get_latest_cpl_logs.return_value = (str(rundir / "cpl.log.gz"),) + + baseline_root = Path(tempdir, "baselines") + + baseline_root.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + + return case, caseroot, rundir, baseline_root + + +class TestUnitBaselinesPerformance(unittest.TestCase): + @mock.patch("CIME.baselines.performance._perf_get_memory") + def test_perf_get_memory_default(self, _perf_get_memory): + _perf_get_memory.return_value = ("1000", "a") + + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_memory.side_effect = AttributeError + + mem = performance.perf_get_memory(case, config) + + assert mem == ("1000", "a") + + def test_perf_get_memory(self): + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_memory.return_value = ("1000", "a") + + mem = performance.perf_get_memory(case, config) + + assert mem == ("1000", "a") + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + def test_perf_get_throughput_default(self, _perf_get_throughput): + _perf_get_throughput.return_value = ("100", "a") + + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_throughput.side_effect = AttributeError + + tput = performance.perf_get_throughput(case, config) + + assert tput == ("100", "a") + + def test_perf_get_throughput(self): + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_throughput.return_value = ("100", "a") + + tput = performance.perf_get_throughput(case, config) + + assert tput == ("100", "a") + + def test_get_cpl_throughput_no_file(self): + throughput = performance.get_cpl_throughput("/tmp/cpl.log") + + assert throughput is None + + def test_get_cpl_throughput(self): + with tempfile.TemporaryDirectory() as tempdir: + cpl_log_path = Path(tempdir, "cpl.log.gz") + + with gzip.open(cpl_log_path, "w") as fd: + fd.write(CPLLOG.encode("utf-8")) + + throughput = performance.get_cpl_throughput(str(cpl_log_path)) + + assert throughput == 719.635 + + def test_get_cpl_mem_usage_gz(self): + with tempfile.TemporaryDirectory() as tempdir: + cpl_log_path = Path(tempdir, "cpl.log.gz") + + with gzip.open(cpl_log_path, "w") as fd: + fd.write(CPLLOG.encode("utf-8")) + + mem_usage = performance.get_cpl_mem_usage(str(cpl_log_path)) + + assert mem_usage == [ + (10102.0, 1673.89), + (10103.0, 1673.89), + (10104.0, 1673.89), + (10105.0, 1673.89), + ] + + @mock.patch("CIME.baselines.performance.os.path.isfile") + def test_get_cpl_mem_usage(self, isfile): + isfile.return_value = True + + with mock.patch( + "builtins.open", mock.mock_open(read_data=CPLLOG.encode("utf-8")) + ) as mock_file: + mem_usage = performance.get_cpl_mem_usage("/tmp/cpl.log") + + assert mem_usage == [ + (10102.0, 1673.89), + (10103.0, 1673.89), + (10104.0, 1673.89), + (10105.0, 1673.89), + ] + + def test_read_baseline_file_multi_line(self): + with mock.patch( + "builtins.open", + mock.mock_open( + read_data="sha:1df0 date:2023 1000.0\nsha:3b05 date:2023 2000.0" + ), + ) as mock_file: + baseline = performance.read_baseline_file("/tmp/cpl-mem.log") + + mock_file.assert_called_with("/tmp/cpl-mem.log") + assert baseline == "sha:1df0 date:2023 1000.0\nsha:3b05 date:2023 2000.0" + + def test_read_baseline_file_content(self): + if not os.path.exists("/tmp/cpl-mem.log"): + os.mknod("/tmp/cpl-mem.log") + with mock.patch( + "builtins.open", mock.mock_open(read_data="sha:1df0 date:2023 1000.0") + ) as mock_file: + baseline = performance.read_baseline_file("/tmp/cpl-mem.log") + + mock_file.assert_called_with("/tmp/cpl-mem.log") + assert baseline == "sha:1df0 date:2023 1000.0" + + def test_write_baseline_file(self): + with mock.patch("builtins.open", mock.mock_open()) as mock_file: + performance.write_baseline_file("/tmp/cpl-tput.log", "1000") + + mock_file.assert_called_with("/tmp/cpl-tput.log", "a") + + @mock.patch("CIME.baselines.performance.get_cpl_throughput") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_throughput(self, get_latest_cpl_logs, get_cpl_throughput): + get_cpl_throughput.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + with self.assertRaises(RuntimeError): + performance._perf_get_throughput(case) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_memory_override(self, get_latest_cpl_logs, get_cpl_mem_usage): + get_cpl_mem_usage.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + with self.assertRaises(RuntimeError): + performance._perf_get_memory(case, "/tmp/override") + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_memory(self, get_latest_cpl_logs, get_cpl_mem_usage): + get_cpl_mem_usage.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + with self.assertRaises(RuntimeError): + performance._perf_get_memory(case) + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_write_baseline_skip( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.return_value = "100" + + perf_get_memory.return_value = "1000" + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline( + case, + baseline_root, + False, + False, + ) + + perf_get_throughput.assert_not_called() + perf_get_memory.assert_not_called() + write_baseline_file.assert_not_called() + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_write_baseline_runtimeerror( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.side_effect = RuntimeError + + perf_get_memory.side_effect = RuntimeError + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline(case, baseline_root) + + perf_get_throughput.assert_called() + perf_get_memory.assert_called() + write_baseline_file.assert_not_called() + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_perf_write_baseline( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.return_value = ("100", "a") + + perf_get_memory.return_value = ("1000", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline(case, baseline_root) + + perf_get_throughput.assert_called() + perf_get_memory.assert_called() + write_baseline_file.assert_any_call( + str(baseline_root / "cpl-tput.log"), "100", "a" + ) + write_baseline_file.assert_any_call( + str(baseline_root / "cpl-mem.log"), "1000", "a" + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_baseline_file( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.side_effect = FileNotFoundError + + _perf_get_throughput.return_value = 504 + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + with self.assertRaises(FileNotFoundError): + performance.perf_compare_throughput_baseline(case) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_baseline( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "" + + _perf_get_throughput.return_value = ("504", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance is None + assert ( + comment + == "Could not compare throughput to baseline, as baseline had no value." + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_tolerance( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "500" + + _perf_get_throughput.return_value = ("504", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + None, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance + assert ( + comment + == "TPUTCOMP: Throughput changed by -0.80%: baseline=500.000 sypd, tolerance=10%, current=504.000 sypd" + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_above_threshold( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "1000" + + _perf_get_throughput.return_value = ("504", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert not below_tolerance + assert ( + comment + == "Error: TPUTCOMP: Throughput changed by 49.60%: baseline=1000.000 sypd, tolerance=5%, current=504.000 sypd" + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "500" + + _perf_get_throughput.return_value = ("504", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance + assert ( + comment + == "TPUTCOMP: Throughput changed by -0.80%: baseline=500.000 sypd, tolerance=5%, current=504.000 sypd" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_baseline( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater changed by 0.00%: baseline=0.000 MB, tolerance=5%, current=1003.000 MB" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_not_enough_samples( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = ["1000.0"] + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance is None + assert comment == "Found 2 memory usage samples, need atleast 4" + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_baseline_file( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.side_effect = FileNotFoundError + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + with self.assertRaises(FileNotFoundError): + performance.perf_compare_memory_baseline(case) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_tolerance( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + None, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater changed by 0.30%: baseline=1000.000 MB, tolerance=10%, current=1003.000 MB" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_above_threshold( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 2000.0), + (2, 2001.0), + (3, 2002.0), + (4, 2003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert not below_tolerance + assert ( + comment + == "Error: MEMCOMP: Memory usage highwater changed by 100.30%: baseline=1000.000 MB, tolerance=5%, current=2003.000 MB" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater changed by 0.30%: baseline=1000.000 MB, tolerance=5%, current=1003.000 MB" + ) + + def test_get_latest_cpl_logs_found_multiple(self): + with tempfile.TemporaryDirectory() as tempdir: + run_dir = Path(tempdir) / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + cpl_log_path = run_dir / "cpl.log.gz" + cpl_log_path.touch() + + cpl_log_2_path = run_dir / "cpl-2023-01-01.log.gz" + cpl_log_2_path.touch() + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(run_dir), + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 2 + assert sorted(latest_cpl_logs) == sorted( + [str(cpl_log_path), str(cpl_log_2_path)] + ) + + def test_get_latest_cpl_logs_found_single(self): + with tempfile.TemporaryDirectory() as tempdir: + run_dir = Path(tempdir) / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + cpl_log_path = run_dir / "cpl.log.gz" + cpl_log_path.touch() + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(run_dir), + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 1 + assert latest_cpl_logs[0] == str(cpl_log_path) + + def test_get_latest_cpl_logs(self): + case = mock.MagicMock() + case.get_value.side_effect = ( + f"/tmp/run", + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 0 diff --git a/CIME/tests/test_unit_bless_test_results.py b/CIME/tests/test_unit_bless_test_results.py new file mode 100644 index 00000000000..92aab87b4df --- /dev/null +++ b/CIME/tests/test_unit_bless_test_results.py @@ -0,0 +1,1051 @@ +import re +import unittest +import tempfile +from unittest import mock +from pathlib import Path + +from CIME.bless_test_results import ( + bless_test_results, + _bless_throughput, + _bless_memory, + bless_history, + bless_namelists, + is_hist_bless_needed, +) +from CIME.test_status import ALL_PHASES, GENERATE_PHASE + + +class TestUnitBlessTestResults(unittest.TestCase): + @mock.patch("CIME.bless_test_results.generate_baseline") + @mock.patch("CIME.bless_test_results.compare_baseline") + def test_bless_history_fail(self, compare_baseline, generate_baseline): + generate_baseline.return_value = (False, "") + + compare_baseline.return_value = (False, "") + + case = mock.MagicMock() + case.get_value.side_effect = [ + "USER", + "SMS.f19_g16.S", + "/tmp/run", + ] + + success, comment = bless_history( + "SMS.f19_g16.S", case, "master", "/tmp/baselines", False, True + ) + + assert not success + assert comment == "Generate baseline failed: " + + @mock.patch("CIME.bless_test_results.generate_baseline") + @mock.patch("CIME.bless_test_results.compare_baseline") + def test_bless_history_force(self, compare_baseline, generate_baseline): + generate_baseline.return_value = (True, "") + + compare_baseline.return_value = (False, "") + + case = mock.MagicMock() + case.get_value.side_effect = [ + "USER", + "SMS.f19_g16.S", + "/tmp/run", + ] + + success, comment = bless_history( + "SMS.f19_g16.S", case, "master", "/tmp/baselines", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.compare_baseline") + def test_bless_history(self, compare_baseline): + compare_baseline.return_value = (True, "") + + case = mock.MagicMock() + case.get_value.side_effect = [ + "USER", + "SMS.f19_g16.S", + "/tmp/run", + ] + + success, comment = bless_history( + "SMS.f19_g16.S", case, "master", "/tmp/baselines", True, False + ) + + assert success + assert comment is None + + def test_bless_namelists_report_only(self): + success, comment = bless_namelists( + "SMS.f19_g16.S", + True, + False, + None, + "master", + "/tmp/baselines", + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_pes_file(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [1, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + "/tmp/pes/new_layout.xml", + "master", + "/tmp/baselines", + ) + + assert not success + assert comment == "Namelist regen failed: 'None'" + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --pesfile /tmp/pes/new_layout.xml --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_new_test_id(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [1, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + None, + "master", + "/tmp/baselines", + new_test_root="/tmp/other-test-root", + new_test_id="hello", + ) + + assert not success + assert comment == "Namelist regen failed: 'None'" + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --test-root=/tmp/other-test-root --output-root=/tmp/other-test-root -t hello --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_new_test_root(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [1, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + None, + "master", + "/tmp/baselines", + new_test_root="/tmp/other-test-root", + ) + + assert not success + assert comment == "Namelist regen failed: 'None'" + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --test-root=/tmp/other-test-root --output-root=/tmp/other-test-root --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_fail(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [1, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + None, + "master", + "/tmp/baselines", + ) + + assert not success + assert comment == "Namelist regen failed: 'None'" + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_force(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [0, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + None, + "master", + "/tmp/baselines", + ) + + assert success + assert comment is None + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_force_error( + self, perf_compare_memory_baseline, perf_write_baseline + ): + perf_write_baseline.side_effect = Exception + + perf_compare_memory_baseline.return_value = (False, "") + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert not success + assert ( + comment + == "Failed to write baseline memory usage for test 'SMS.f19_g16.S': " + ) + perf_write_baseline.assert_called() + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_force( + self, perf_compare_memory_baseline, perf_write_baseline + ): + perf_compare_memory_baseline.return_value = (False, "") + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + perf_write_baseline.assert_called() + + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_report_only(self, perf_compare_memory_baseline): + perf_compare_memory_baseline.return_value = (True, "") + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", True, False + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_general_error( + self, perf_compare_memory_baseline, perf_write_baseline + ): + perf_compare_memory_baseline.side_effect = Exception + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_file_not_found_error( + self, perf_compare_memory_baseline, perf_write_baseline + ): + perf_compare_memory_baseline.side_effect = FileNotFoundError + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory(self, perf_compare_memory_baseline): + perf_compare_memory_baseline.return_value = (True, "") + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, False + ) + + assert success + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_force_error( + self, perf_compare_throughput_baseline, perf_write_baseline + ): + perf_write_baseline.side_effect = Exception + + perf_compare_throughput_baseline.return_value = (False, "") + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert not success + assert comment == "Failed to write baseline throughput for 'SMS.f19_g16.S': " + perf_write_baseline.assert_called() + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_force( + self, perf_compare_throughput_baseline, perf_write_baseline + ): + perf_compare_throughput_baseline.return_value = (False, "") + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + perf_write_baseline.assert_called() + + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_report_only(self, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.return_value = (True, "") + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", True, False + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_general_error(self, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.side_effect = Exception + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_file_not_found_error( + self, + perf_compare_throughput_baseline, + perf_write_baseline, + ): + perf_compare_throughput_baseline.side_effect = FileNotFoundError + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput(self, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.return_value = (True, "") + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, False + ) + + assert success + + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_perf( + self, + get_test_status_files, + TestStatus, + Case, + _bless_memory, + _bless_throughput, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "FAIL", "FAIL", "FAIL"] + + case = Case.return_value.__enter__.return_value + + _bless_memory.return_value = (True, "") + + _bless_throughput.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_perf=True, + ) + + assert success + _bless_memory.assert_called() + _bless_throughput.assert_called() + + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_memory_only( + self, + get_test_status_files, + TestStatus, + Case, + _bless_memory, + _bless_throughput, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("DIFF", "MEMCOMP") + ts.get_status.side_effect = ["PASS", "PASS", "FAIL", "FAIL"] + + case = Case.return_value.__enter__.return_value + + _bless_memory.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_mem=True, + ) + + assert success + _bless_memory.assert_called() + _bless_throughput.assert_not_called() + + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_throughput_only( + self, + get_test_status_files, + TestStatus, + Case, + _bless_memory, + _bless_throughput, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("DIFF", "TPUTCOMP") + ts.get_status.side_effect = ["PASS", "PASS", "FAIL", "FAIL"] + + case = Case.return_value.__enter__.return_value + + _bless_throughput.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_tput=True, + ) + + assert success + _bless_memory.assert_not_called() + _bless_throughput.assert_called() + + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_namelists_only( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["FAIL", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + bless_namelists.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + namelists_only=True, + ) + + assert success + bless_namelists.assert_called() + + @mock.patch("CIME.bless_test_results.bless_history") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_hist_only( + self, + get_test_status_files, + TestStatus, + Case, + bless_history, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "FAIL"] + + case = Case.return_value.__enter__.return_value + + bless_history.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + hist_only=True, + ) + + assert success + bless_history.assert_called() + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_specific(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS"] * 10 + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_tests=["SMS"], + ) + + assert success + + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results.bless_history") + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_tests_results_homme( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + bless_history, + _bless_throughput, + _bless_memory, + ): + _bless_memory.return_value = (False, "") + + _bless_throughput.return_value = (False, "") + + bless_history.return_value = (False, "") + + bless_namelists.return_value = (False, "") + + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.HOMME.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + no_skip_pass=True, + ) + + assert not success + + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results.bless_history") + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_tests_results_fail( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + bless_history, + _bless_throughput, + _bless_memory, + ): + _bless_memory.return_value = (False, "") + + _bless_throughput.return_value = (False, "") + + bless_history.return_value = (False, "") + + bless_namelists.return_value = (False, "") + + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + no_skip_pass=True, + ) + + assert not success + + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results.bless_history") + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_no_skip_pass( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + bless_history, + _bless_throughput, + _bless_memory, + ): + _bless_memory.return_value = (True, "") + + _bless_throughput.return_value = (True, "") + + bless_history.return_value = (True, "") + + bless_namelists.return_value = (True, "") + + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + no_skip_pass=True, + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_baseline_root_none(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["FAIL"] + ["PASS"] * 9 + + case = Case.return_value.__enter__.return_value + case.get_value.side_effect = [None, None] + + success = bless_test_results( + "master", + None, + "/tmp/cases", + "gnu", + force=True, + ) + + assert not success + + @mock.patch("CIME.utils.get_current_branch") + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_baseline_name_none( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + get_current_branch, + ): + get_current_branch.return_value = "master" + + bless_namelists.return_value = (True, "") + + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["FAIL"] + ["PASS"] * 9 + + case = Case.return_value.__enter__.return_value + case.get_value.side_effect = [None, None] + + success = bless_test_results( + None, + "/tmp/baselines", + "/tmp/cases", + "gnu", + force=True, + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_exclude(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + exclude="SMS", + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_multiple_files(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/SMS.f19_g16.S.docker-gnu.23456/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_tests_no_match(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS"] * 10 + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_tests=["SEQ"], + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_all(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + ) + + assert success + + def test_is_bless_needed_no_skip_fail(self): + ts = mock.MagicMock() + ts.get_status.side_effect = [ + "PASS", + ] + + broken_blesses = [] + + needed = is_hist_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", True, "RUN" + ) + + assert needed + assert broken_blesses == [] + + def test_is_bless_needed_overall_fail(self): + ts = mock.MagicMock() + + # get_status() calls in is_hist_bless_needed() + ts.get_status.side_effect = [ + "PASS", # Check of RUN_PHASE at top of function + "PASS", # Check of GENERATE_PHASE + ] + + broken_blesses = [] + + needed = is_hist_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "FAIL", False, "RUN" + ) + + assert not needed + assert broken_blesses == [("SMS.f19_g16.A", "test did not pass")] + + def test_is_bless_needed_generate_fail(self): + ts = mock.MagicMock() + + # First two get_status() calls in is_hist_bless_needed() + side_effect = [ + "PASS", # Check of RUN_PHASE at top of function + "FAIL", # Check of GENERATE_PHASE + ] + # Checks in `for p in ALL_PHASES` loop + side_effect += ["PASS" for p in ALL_PHASES if p != GENERATE_PHASE] + # Save + ts.get_status.side_effect = side_effect + + broken_blesses = [] + + needed = is_hist_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "FAIL", False, "RUN" + ) + + assert needed + + def test_is_bless_needed_baseline_fail(self): + ts = mock.MagicMock() + ts.get_status.side_effect = ["PASS", "FAIL"] + + broken_blesses = [] + + needed = is_hist_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN" + ) + + assert needed + assert broken_blesses == [] + + def test_is_bless_needed_run_phase_fail(self): + ts = mock.MagicMock() + ts.get_status.side_effect = [ + "FAIL", + ] + + broken_blesses = [] + + needed = is_hist_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN" + ) + + assert not needed + assert broken_blesses == [("SMS.f19_g16.A", "run phase did not pass")] + + def test_is_bless_needed_no_run_phase(self): + ts = mock.MagicMock() + ts.get_status.side_effect = [None] + + broken_blesses = [] + + needed = is_hist_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN" + ) + + assert not needed + assert broken_blesses == [("SMS.f19_g16.A", "no run phase")] + + def test_is_bless_needed(self): + ts = mock.MagicMock() + ts.get_status.side_effect = ["PASS", "PASS"] + + broken_blesses = [] + + needed = is_hist_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN" + ) + + assert not needed diff --git a/CIME/tests/test_unit_case.py b/CIME/tests/test_unit_case.py new file mode 100755 index 00000000000..fff3fa5f7c8 --- /dev/null +++ b/CIME/tests/test_unit_case.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python3 + +import os +import unittest +from unittest import mock +import tempfile + +from CIME.case import case_submit +from CIME.case import Case +from CIME import utils as cime_utils + + +def make_valid_case(path): + """Make the given path look like a valid case to avoid errors""" + # Case validity is determined by checking for an env_case.xml file. So put one there + # to suggest that this directory is a valid case directory. Open in append mode in + # case the file already exists. + with open(os.path.join(path, "env_case.xml"), "a"): + pass + + +class TestCaseSubmit(unittest.TestCase): + def test_check_case(self): + case = mock.MagicMock() + # get_value arguments TEST, COMP_WAV, COMP_INTERFACE, BUILD_COMPLETE + case.get_value.side_effect = ["/tmp/caseroot", "", "", True] + case_submit.check_case(case, chksum=True) + + case.check_all_input_data.assert_called_with(chksum=True) + + @mock.patch("CIME.case.case_submit.lock_file") + @mock.patch("CIME.case.case_submit.unlock_file") + @mock.patch("os.path.basename") + def test__submit( + self, lock_file, unlock_file, basename + ): # pylint: disable=unused-argument + case = mock.MagicMock() + + case_submit._submit(case, chksum=True) # pylint: disable=protected-access + + case.check_case.assert_called_with(skip_pnl=False, chksum=True) + + @mock.patch("CIME.case.case_submit._submit") + @mock.patch("CIME.case.case.Case.initialize_derived_attributes") + @mock.patch("CIME.case.case.Case.get_value") + @mock.patch("CIME.case.case.Case.read_xml") + def test_submit( + self, read_xml, get_value, init, _submit + ): # pylint: disable=unused-argument + with tempfile.TemporaryDirectory() as tempdir: + get_value.side_effect = [ + tempdir, + tempdir, + tempdir, + "test", + tempdir, + True, + "baseid", + None, + True, + ] + + make_valid_case(tempdir) + with Case(tempdir, non_local=True) as case: + case.submit(chksum=True) + + _submit.assert_called_with( + case, + job=None, + no_batch=False, + prereq=None, + allow_fail=False, + resubmit=False, + resubmit_immediate=False, + skip_pnl=False, + mail_user=None, + mail_type=None, + batch_args=None, + workflow=True, + chksum=True, + dryrun=False, + ) + + +class TestCase(unittest.TestCase): + def setUp(self): + self.srcroot = os.path.abspath(cime_utils.get_src_root()) + self.tempdir = tempfile.TemporaryDirectory() + + @mock.patch("CIME.case.case.Case.read_xml") + def test_fix_sys_argv_quotes(self, read_xml): + input_data = ["./xmlquery", "--val", "PIO"] + expected_data = ["./xmlquery", "--val", "PIO"] + + with tempfile.TemporaryDirectory() as tempdir: + make_valid_case(tempdir) + + with Case(tempdir) as case: + output_data = case.fix_sys_argv_quotes(input_data) + + assert output_data == expected_data + + @mock.patch("CIME.case.case.Case.read_xml") + def test_fix_sys_argv_quotes_incomplete(self, read_xml): + input_data = ["./xmlquery", "--val"] + expected_data = ["./xmlquery", "--val"] + + with tempfile.TemporaryDirectory() as tempdir: + make_valid_case(tempdir) + + with Case(tempdir) as case: + output_data = case.fix_sys_argv_quotes(input_data) + + assert output_data == expected_data + + @mock.patch("CIME.case.case.Case.read_xml") + def test_fix_sys_argv_quotes_val(self, read_xml): + input_data = ["./xmlquery", "--val", "-test"] + expected_data = ["./xmlquery", "--val", "-test"] + + with tempfile.TemporaryDirectory() as tempdir: + make_valid_case(tempdir) + + with Case(tempdir) as case: + output_data = case.fix_sys_argv_quotes(input_data) + + assert output_data == expected_data + + @mock.patch("CIME.case.case.Case.read_xml") + def test_fix_sys_argv_quotes_val_quoted(self, read_xml): + input_data = ["./xmlquery", "--val", " -nlev 267 "] + expected_data = ["./xmlquery", "--val", '" -nlev 267 "'] + + with tempfile.TemporaryDirectory() as tempdir: + make_valid_case(tempdir) + + with Case(tempdir) as case: + output_data = case.fix_sys_argv_quotes(input_data) + + assert output_data == expected_data + + @mock.patch("CIME.case.case.Case.read_xml") + def test_fix_sys_argv_quotes_kv(self, read_xml): + input_data = ["./xmlquery", "CAM_CONFIG_OPTS= -nlev 267", "OTHER_OPTS=-test"] + expected_data = [ + "./xmlquery", + 'CAM_CONFIG_OPTS=" -nlev 267"', + "OTHER_OPTS=-test", + ] + + with tempfile.TemporaryDirectory() as tempdir: + make_valid_case(tempdir) + + with Case(tempdir) as case: + output_data = case.fix_sys_argv_quotes(input_data) + + assert output_data == expected_data + + @mock.patch("CIME.case.case.Case.read_xml") + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("socket.getfqdn", return_value="host1") + @mock.patch("getpass.getuser", side_effect=["root", "root", "johndoe"]) + def test_new_hash( + self, getuser, getfqdn, strftime, read_xml + ): # pylint: disable=unused-argument + with self.tempdir as tempdir: + make_valid_case(tempdir) + with Case(tempdir) as case: + expected = ( + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a" + ) + + # Check idempotency + for _ in range(2): + value = case.new_hash() + + self.assertTrue( + value == expected, "{} != {}".format(value, expected) + ) + + expected = ( + "bb59f1c473ac07e9dd30bfab153c0530a777f89280b716cf42e6fe2f49811a6e" + ) + + value = case.new_hash() + + self.assertTrue(value == expected, "{} != {}".format(value, expected)) + + @mock.patch("CIME.case.case.Case.read_xml") + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("CIME.case.case.lock_file") + @mock.patch("CIME.case.case.Case.set_lookup_value") + @mock.patch("CIME.case.case.Case.apply_user_mods") + @mock.patch("CIME.case.case.Case.create_caseroot") + @mock.patch("CIME.case.case.Case.configure") + @mock.patch("socket.getfqdn", return_value="host1") + @mock.patch("getpass.getuser", return_value="root") + @mock.patch.dict(os.environ, {"CIME_MODEL": "cesm"}) + def test_copy( + self, + getuser, + getfqdn, + configure, + create_caseroot, # pylint: disable=unused-argument + apply_user_mods, + set_lookup_value, + lock_file, + strftime, # pylint: disable=unused-argument + read_xml, + ): # pylint: disable=unused-argument + expected_first_hash = ( + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a" + ) + expected_second_hash = ( + "3561339a49daab999e3c4ea2f03a9c6acc33296a5bc35f1bfb82e7b5e10bdf38" + ) + + with self.tempdir as tempdir: + caseroot = os.path.join(tempdir, "test1") + with Case(caseroot, read_only=False) as case: + case.create( + "test1", + self.srcroot, + "A", + "f19_g16", + machine_name="perlmutter", + ) + + # Check that they're all called + configure.assert_called_with( + "A", + "f19_g16", + machine_name="perlmutter", + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ) + create_caseroot.assert_called() + apply_user_mods.assert_called() + lock_file.assert_called() + + set_lookup_value.assert_called_with("CASE_HASH", expected_first_hash) + + strftime.return_value = "10:00:00" + with mock.patch( + "CIME.case.case.Case.set_value" + ) as set_value, mock.patch("sys.argv", ["/src/create_clone"]): + case.copy("test2", "{}_2".format(tempdir)) + + set_value.assert_called_with("CASE_HASH", expected_second_hash) + + @mock.patch("CIME.case.case.Case.read_xml") + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("CIME.case.case.lock_file") + @mock.patch("CIME.case.case.Case.set_lookup_value") + @mock.patch("CIME.case.case.Case.apply_user_mods") + @mock.patch("CIME.case.case.Case.create_caseroot") + @mock.patch("CIME.case.case.Case.configure") + @mock.patch("socket.getfqdn", return_value="host1") + @mock.patch("getpass.getuser", return_value="root") + @mock.patch.dict(os.environ, {"CIME_MODEL": "cesm"}) + def test_create( + self, + get_user, + getfqdn, + configure, + create_caseroot, # pylint: disable=unused-argument + apply_user_mods, + set_lookup_value, + lock_file, + strftime, # pylint: disable=unused-argument + read_xml, + ): # pylint: disable=unused-argument + with self.tempdir as tempdir: + caseroot = os.path.join(tempdir, "test1") + with Case(caseroot, read_only=False) as case: + case.create( + "test1", + self.srcroot, + "A", + "f19_g16", + machine_name="perlmutter", + ) + + # Check that they're all called + configure.assert_called_with( + "A", + "f19_g16", + machine_name="perlmutter", + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ) + create_caseroot.assert_called() + apply_user_mods.assert_called() + lock_file.assert_called() + + set_lookup_value.assert_called_with( + "CASE_HASH", + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a", + ) + + +class TestCase_RecordCmd(unittest.TestCase): + def setUp(self): + self.tempdir = tempfile.TemporaryDirectory() + + def assert_calls_match(self, calls, expected): + self.assertTrue(len(calls) == len(expected), calls) + + for x, y in zip(calls, expected): + self.assertTrue(x == y, calls) + + @mock.patch("CIME.case.case.Case.__init__", return_value=None) + @mock.patch("CIME.case.case.Case.flush") + @mock.patch("CIME.case.case.Case.get_value") + @mock.patch("CIME.case.case.open", mock.mock_open()) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("sys.argv", ["/src/create_newcase"]) + def test_error( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + with self.tempdir as tempdir, mock.patch( + "CIME.case.case.open", mock.mock_open() + ) as m: + m.side_effect = PermissionError() + + with Case(tempdir) as case: + get_value.side_effect = [tempdir, "/src"] + + # We didn't need to make tempdir look like a valid case for the Case + # constructor because we mock that constructor, but we *do* need to make + # it look like a valid case for record_cmd. + make_valid_case(tempdir) + case.record_cmd() + + @mock.patch("CIME.case.case.Case.__init__", return_value=None) + @mock.patch("CIME.case.case.Case.flush") + @mock.patch("CIME.case.case.Case.get_value") + @mock.patch("CIME.case.case.open", mock.mock_open()) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("sys.argv", ["/src/create_newcase"]) + def test_init( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + mocked_open = mock.mock_open() + + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): + with Case(tempdir) as case: + get_value.side_effect = [tempdir, "/src"] + + case.record_cmd(init=True) + + mocked_open.assert_called_with(f"{tempdir}/replay.sh", "a") + + handle = mocked_open() + + handle.writelines.assert_called_with( + [ + "#!/bin/bash\n\n", + "set -e\n\n", + "# Created 00:00:00\n\n", + 'CASEDIR="{}"\n\n'.format(tempdir), + "/src/create_newcase\n\n", + 'cd "${CASEDIR}"\n\n', + ] + ) + + @mock.patch("CIME.case.case.Case.__init__", return_value=None) + @mock.patch("CIME.case.case.Case.flush") + @mock.patch("CIME.case.case.Case.get_value") + @mock.patch("CIME.case.case.open", mock.mock_open()) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("sys.argv", ["/src/scripts/create_newcase"]) + def test_sub_relative( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + mocked_open = mock.mock_open() + + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): + with Case(tempdir) as case: + get_value.side_effect = [tempdir, "/src"] + + case.record_cmd(init=True) + + expected = [ + "#!/bin/bash\n\n", + "set -e\n\n", + "# Created 00:00:00\n\n", + 'CASEDIR="{}"\n\n'.format(tempdir), + "/src/scripts/create_newcase\n\n", + 'cd "${CASEDIR}"\n\n', + ] + + handle = mocked_open() + handle.writelines.assert_called_with(expected) + + @mock.patch("CIME.case.case.Case.__init__", return_value=None) + @mock.patch("CIME.case.case.Case.flush") + @mock.patch("CIME.case.case.Case.get_value") + def test_cmd_arg(self, get_value, flush, init): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + mocked_open = mock.mock_open() + + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): + with Case(tempdir) as case: + get_value.side_effect = [ + tempdir, + "/src", + ] + + # We didn't need to make tempdir look like a valid case for the Case + # constructor because we mock that constructor, but we *do* need to make + # it look like a valid case for record_cmd. + make_valid_case(tempdir) + case.record_cmd(["/some/custom/command", "arg1"]) + + expected = [ + "/some/custom/command arg1\n\n", + ] + + handle = mocked_open() + handle.writelines.assert_called_with(expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_case_fake.py b/CIME/tests/test_unit_case_fake.py new file mode 100755 index 00000000000..448931ecc7c --- /dev/null +++ b/CIME/tests/test_unit_case_fake.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of CaseFake +""" + +import unittest +import tempfile +import os +import shutil + +from CIME.tests.case_fake import CaseFake + + +class TestCaseFake(unittest.TestCase): + def setUp(self): + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tempdir, ignore_errors=True) + + def test_create_clone(self): + # Setup + old_caseroot = os.path.join(self.tempdir, "oldcase") + oldcase = CaseFake(old_caseroot) + oldcase.set_value("foo", "bar") + + # Exercise + new_caseroot = os.path.join(self.tempdir, "newcase") + clone = oldcase.create_clone(new_caseroot) + + # Verify + self.assertEqual("bar", clone.get_value("foo")) + self.assertEqual("newcase", clone.get_value("CASE")) + self.assertEqual("newcase", clone.get_value("CASEBASEID")) + self.assertEqual(new_caseroot, clone.get_value("CASEROOT")) + self.assertEqual(os.path.join(new_caseroot, "run"), clone.get_value("RUNDIR")) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_case_run.py b/CIME/tests/test_unit_case_run.py new file mode 100644 index 00000000000..8f188925d57 --- /dev/null +++ b/CIME/tests/test_unit_case_run.py @@ -0,0 +1,50 @@ +import unittest +from unittest import mock + +from CIME.utils import CIMEError +from CIME.case.case_run import TERMINATION_TEXT +from CIME.case.case_run import _post_run_check + + +def _case_post_run_check(): + case = mock.MagicMock() + + # RUNDIR, COMP_INTERFACE, COMP_CPL, COMP_ATM, COMP_OCN, MULTI_DRIVER + case.get_value.side_effect = ("/tmp/run", "mct", "cpl", "satm", "socn", False) + + # COMP_CLASSES + case.get_values.return_value = ("CPL", "ATM", "OCN") + + return case + + +class TestCaseSubmit(unittest.TestCase): + @mock.patch("os.stat") + @mock.patch("os.path.isfile") + def test_post_run_check(self, isfile, stat): + isfile.return_value = True + + stat.return_value.st_size = 1024 + + # no exceptions means success + for x in TERMINATION_TEXT: + case = _case_post_run_check() + + with mock.patch("builtins.open", mock.mock_open(read_data=x)) as mock_file: + _post_run_check(case, "1234") + + @mock.patch("os.stat") + @mock.patch("os.path.isfile") + def test_post_run_check_no_termination(self, isfile, stat): + isfile.return_value = True + + stat.return_value.st_size = 1024 + + case = _case_post_run_check() + + with self.assertRaises(CIMEError): + with mock.patch( + "builtins.open", + mock.mock_open(read_data="I DONT HAVE A TERMINATION MESSAGE"), + ) as mock_file: + _post_run_check(case, "1234") diff --git a/CIME/tests/test_unit_case_setup.py b/CIME/tests/test_unit_case_setup.py new file mode 100644 index 00000000000..a00dcf1b413 --- /dev/null +++ b/CIME/tests/test_unit_case_setup.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 + +import os +import unittest +import tempfile +import contextlib +from pathlib import Path +from unittest import mock + +from CIME.case import case_setup +from CIME.tests.utils import chdir + + +@contextlib.contextmanager +def create_machines_dir(): + """Creates temp machines directory with fake content""" + with tempfile.TemporaryDirectory() as temp_path: + machines_path = os.path.join(temp_path, "machines") + cmake_path = os.path.join(machines_path, "cmake_macros") + Path(cmake_path).mkdir(parents=True) + Path(os.path.join(cmake_path, "Macros.cmake")).touch() + Path(os.path.join(cmake_path, "test.cmake")).touch() + + yield temp_path + + +# pylint: disable=protected-access +class TestCaseSetup(unittest.TestCase): + @mock.patch("CIME.case.case_setup.copy_depends_files") + def test_create_macros_cmake(self, copy_depends_files): + machine_mock = mock.MagicMock() + machine_mock.get_machine_name.return_value = "test" + + # create context stack to cleanup after test + with contextlib.ExitStack() as stack: + root_path = stack.enter_context(create_machines_dir()) + case_path = stack.enter_context(tempfile.TemporaryDirectory()) + + machines_path = os.path.join(root_path, "machines") + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) + + # make sure we're calling everything from within the case root + stack.enter_context(chdir(case_path)) + + case_setup._create_macros_cmake( + case_path, + os.path.join(machines_path, "cmake_macros"), + machine_mock, + "gnu-test", + os.path.join(case_path, "cmake_macros"), + ) + + assert os.path.exists(os.path.join(case_path, "Macros.cmake")) + assert os.path.exists(os.path.join(case_path, "cmake_macros", "test.cmake")) + + copy_depends_files.assert_called_with( + "test", machines_path, case_path, "gnu-test" + ) + + @mock.patch("CIME.case.case_setup._create_macros_cmake") + def test_create_macros(self, _create_macros_cmake): + case_mock = mock.MagicMock() + + machine_mock = mock.MagicMock() + machine_mock.get_machine_name.return_value = "test" + + # create context stack to cleanup after test + with contextlib.ExitStack() as stack: + root_path = stack.enter_context(create_machines_dir()) + case_path = stack.enter_context(tempfile.TemporaryDirectory()) + + cmake_macros_path = os.path.join(root_path, "machines", "cmake_macros") + case_mock.get_value.return_value = cmake_macros_path + + machines_path = os.path.join(root_path, "machines") + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) + + # do not generate env_mach_specific.xml + Path(os.path.join(case_path, "env_mach_specific.xml")).touch() + + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) + + case_mock.get_value.assert_any_call("CMAKE_MACROS_DIR") + + # make sure we're calling everything from within the case root + stack.enter_context(chdir(case_path)) + + _create_macros_cmake.assert_called_with( + case_path, + cmake_macros_path, + machine_mock, + "gnu-test", + os.path.join(case_path, "cmake_macros"), + ) + + def test_create_macros_copy_user(self): + case_mock = mock.MagicMock() + + machine_mock = mock.MagicMock() + machine_mock.get_machine_name.return_value = "test" + + # create context stack to cleanup after test + with contextlib.ExitStack() as stack: + root_path = stack.enter_context(create_machines_dir()) + case_path = stack.enter_context(tempfile.TemporaryDirectory()) + user_path = stack.enter_context(tempfile.TemporaryDirectory()) + + user_cime_path = Path(os.path.join(user_path, ".cime")) + user_cime_path.mkdir() + user_cmake = user_cime_path / "user.cmake" + user_cmake.touch() + + cmake_macros_path = os.path.join(root_path, "machines", "cmake_macros") + case_mock.get_value.return_value = cmake_macros_path + + machines_path = os.path.join(root_path, "machines") + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) + + # do not generate env_mach_specific.xml + Path(os.path.join(case_path, "env_mach_specific.xml")).touch() + + stack.enter_context(mock.patch.dict(os.environ, {"HOME": user_path})) + + # make sure we're calling everything from within the case root + stack.enter_context(chdir(case_path)) + + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) + + case_mock.get_value.assert_any_call("CMAKE_MACROS_DIR") + + assert os.path.exists(os.path.join(case_path, "cmake_macros", "user.cmake")) + + def test_create_macros_copy_extra(self): + case_mock = mock.MagicMock() + + machine_mock = mock.MagicMock() + machine_mock.get_machine_name.return_value = "test" + + # create context stack to cleanup after test + with contextlib.ExitStack() as stack: + root_path = stack.enter_context(create_machines_dir()) + case_path = stack.enter_context(tempfile.TemporaryDirectory()) + extra_path = stack.enter_context(tempfile.TemporaryDirectory()) + + extra_cmake_path = Path(extra_path, "cmake_macros") + extra_cmake_path.mkdir() + + extra_macros_path = extra_cmake_path / "extra.cmake" + extra_macros_path.touch() + + cmake_macros_path = os.path.join(root_path, "machines", "cmake_macros") + case_mock.get_value.side_effect = [cmake_macros_path, extra_path] + + machines_path = os.path.join(root_path, "machines") + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) + + # do not generate env_mach_specific.xml + Path(os.path.join(case_path, "env_mach_specific.xml")).touch() + + # make sure we're calling everything from within the case root + stack.enter_context(chdir(case_path)) + + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) + + case_mock.get_value.assert_any_call("EXTRA_MACHDIR") + + assert os.path.exists( + os.path.join(case_path, "cmake_macros", "extra.cmake") + ) diff --git a/CIME/tests/test_unit_compare_test_results.py b/CIME/tests/test_unit_compare_test_results.py new file mode 100755 index 00000000000..4844a96c1a6 --- /dev/null +++ b/CIME/tests/test_unit_compare_test_results.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests for compare_test_results +""" + +import unittest +import tempfile +import os +import shutil + +from CIME import utils +from CIME import compare_test_results +from CIME.test_status import * +from CIME.tests.case_fake import CaseFake + + +class TestCaseFake(unittest.TestCase): + def setUp(self): + self.tempdir = tempfile.mkdtemp() + self.test_root = os.path.join(self.tempdir, "tests") + self.baseline_root = os.path.join(self.test_root, "baselines") + + # TODO switch to unittest.mock + self._old_strftime = utils.time.strftime + utils.time.strftime = lambda *args: "2021-02-20" + + self._old_init = CaseFake.__init__ + CaseFake.__init__ = lambda x, y, *args: self._old_init( + x, y, create_case_root=False + ) + + self._old_case = compare_test_results.Case + compare_test_results.Case = CaseFake + + def tearDown(self): + utils.time.strftime = self._old_strftime + CaseFake.__init__ = self._old_init + compare_test_results.Case = self._old_case + + shutil.rmtree(self.tempdir, ignore_errors=True) + + def _compare_test_results(self, baseline, test_id, phases, **kwargs): + test_status_root = os.path.join(self.test_root, "gnu." + test_id) + os.makedirs(test_status_root) + + with TestStatus(test_status_root, "test") as status: + for x in phases: + status.set_status(x[0], x[1]) + + compare_test_results.compare_test_results( + baseline, self.baseline_root, self.test_root, "gnu", test_id, **kwargs + ) + + compare_log = os.path.join( + test_status_root, "compare.log.{}.2021-02-20".format(baseline) + ) + + self.assertTrue(os.path.exists(compare_log)) + + def test_namelists_only(self): + compare_test_results.compare_namelists = lambda *args: True + compare_test_results.compare_history = lambda *args: (True, "Detail comments") + + phases = [ + (SETUP_PHASE, "PASS"), + (RUN_PHASE, "PASS"), + ] + + self._compare_test_results( + "test1", "test-baseline", phases, namelists_only=True + ) + + def test_hist_only(self): + compare_test_results.compare_namelists = lambda *args: True + compare_test_results.compare_history = lambda *args: (True, "Detail comments") + + phases = [ + (SETUP_PHASE, "PASS"), + (RUN_PHASE, "PASS"), + ] + + self._compare_test_results("test1", "test-baseline", phases, hist_only=True) + + def test_failed_early(self): + compare_test_results.compare_namelists = lambda *args: True + compare_test_results.compare_history = lambda *args: (True, "Detail comments") + + phases = [ + (CREATE_NEWCASE_PHASE, "PASS"), + ] + + self._compare_test_results("test1", "test-baseline", phases) + + def test_baseline(self): + compare_test_results.compare_namelists = lambda *args: True + compare_test_results.compare_history = lambda *args: (True, "Detail comments") + + phases = [ + (SETUP_PHASE, "PASS"), + (RUN_PHASE, "PASS"), + ] + + self._compare_test_results("test1", "test-baseline", phases) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_compare_two.py b/CIME/tests/test_unit_compare_two.py new file mode 100755 index 00000000000..3ce0abb9b05 --- /dev/null +++ b/CIME/tests/test_unit_compare_two.py @@ -0,0 +1,667 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of the core logic in SystemTestsCompareTwo. +""" + +# Ignore privacy concerns for unit tests, so that unit tests can access +# protected members of the system under test +# +# pylint:disable=protected-access + +import unittest +from collections import namedtuple +import functools +import os +import shutil +import tempfile +from unittest import mock + +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +import CIME.test_status as test_status +from CIME.tests.case_fake import CaseFake + +# ======================================================================== +# Structure for storing information about calls made to methods +# ======================================================================== + +# You can create a Call object to record a single call made to a method: +# +# Call(method, arguments) +# method (str): name of method +# arguments (dict): dictionary mapping argument names to values +# +# Example: +# If you want to record a call to foo(bar = 1, baz = 2): +# somecall = Call(method = 'foo', arguments = {'bar': 1, 'baz': 2}) +# Or simply: +# somecall = Call('foo', {'bar': 1, 'baz': 2}) +Call = namedtuple("Call", ["method", "arguments"]) + +# ======================================================================== +# Names of methods for which we want to record calls +# ======================================================================== + +# We use constants for these method names because, in some cases, a typo in a +# hard-coded string could cause a test to always pass, which would be a Bad +# Thing. +# +# For now the names of the constants match the strings they equate to, which +# match the actual method names. But it's fine if this doesn't remain the case +# moving forward (which is another reason to use constants rather than +# hard-coded strings in the tests). + +METHOD_case_one_custom_prerun_action = "_case_one_custom_prerun_action" +METHOD_case_one_custom_postrun_action = "_case_one_custom_postrun_action" +METHOD_case_two_custom_prerun_action = "_case_two_custom_prerun_action" +METHOD_case_two_custom_postrun_action = "_case_two_custom_postrun_action" +METHOD_link_to_case2_output = "_link_to_case2_output" +METHOD_run_indv = "_run_indv" + +# ======================================================================== +# Fake version of SystemTestsCompareTwo that overrides some functionality for +# the sake of unit testing +# ======================================================================== + +# A SystemTestsCompareTwoFake object can be controlled to fail at a given +# point. See the documentation in its __init__ method for details. +# +# It logs what stubbed-out methods have been called in its log attribute; this +# is a list of Call objects (see above for their definition). + + +class SystemTestsCompareTwoFake(SystemTestsCompareTwo): + def __init__( + self, + case1, + run_one_suffix="base", + run_two_suffix="test", + separate_builds=False, + multisubmit=False, + case2setup_raises_exception=False, + run_one_should_pass=True, + run_two_should_pass=True, + compare_should_pass=True, + ): + """ + Initialize a SystemTestsCompareTwoFake object + + The core test phases prior to RUN_PHASE are set to TEST_PASS_STATUS; + RUN_PHASE is left unset (as is any later phase) + + Args: + case1 (CaseFake): existing case + run_one_suffix (str, optional): Suffix used for first run. Defaults + to 'base'. Currently MUST be 'base'. + run_two_suffix (str, optional): Suffix used for the second run. Defaults to 'test'. + separate_builds (bool, optional): Passed to SystemTestsCompareTwo.__init__ + multisubmit (bool, optional): Passed to SystemTestsCompareTwo.__init__ + case2setup_raises_exception (bool, optional): If True, then the call + to _case_two_setup will raise an exception. Default is False. + run_one_should_pass (bool, optional): Whether the run_indv method should + pass for the first run. Default is True, meaning it will pass. + run_two_should_pass (bool, optional): Whether the run_indv method should + pass for the second run. Default is True, meaning it will pass. + compare_should_pass (bool, optional): Whether the comparison between the two + cases should pass. Default is True, meaning it will pass. + """ + + self._case2setup_raises_exception = case2setup_raises_exception + + # NOTE(wjs, 2016-08-03) Currently, due to limitations in the test + # infrastructure, run_one_suffix MUST be 'base'. However, I'm keeping it + # as an explicit argument to the constructor so that it's easy to relax + # this requirement later: To relax this assumption, remove the following + # assertion and add run_one_suffix as an argument to + # SystemTestsCompareTwo.__init__ + assert run_one_suffix == "base" + + SystemTestsCompareTwo.__init__( + self, + case1, + separate_builds=separate_builds, + run_two_suffix=run_two_suffix, + multisubmit=multisubmit, + ) + + # Need to tell test status that all phases prior to the run phase have + # passed, since this is checked in the run call (at least for the build + # phase status) + with self._test_status: + for phase in test_status.CORE_PHASES: + if phase == test_status.RUN_PHASE: + break + self._test_status.set_status(phase, test_status.TEST_PASS_STATUS) + + self.run_pass_caseroot = [] + if run_one_should_pass: + self.run_pass_caseroot.append(self._case1.get_value("CASEROOT")) + if run_two_should_pass: + self.run_pass_caseroot.append(self._case2.get_value("CASEROOT")) + + self.compare_should_pass = compare_should_pass + + self.log = [] + + # ------------------------------------------------------------------------ + # Stubs of methods called by SystemTestsCommon.__init__ that interact with + # the system or case object in ways we want to avoid here + # ------------------------------------------------------------------------ + + def _init_environment(self, caseroot): + pass + + def _init_locked_files(self, caseroot, expected): + pass + + def _init_case_setup(self): + pass + + # ------------------------------------------------------------------------ + # Fake implementations of methods that are typically provided by + # SystemTestsCommon + # ------------------------------------------------------------------------ + + def run_indv( + self, + suffix="base", + st_archive=False, + submit_resubmits=None, + keep_init_generated_files=False, + ): + """ + This fake implementation appends to the log and raises an exception if + it's supposed to + + Note that the Call object appended to the log has the current CASE name + in addition to the method arguments. (This is mainly to ensure that the + proper suffix is used for the proper case, but this extra check can be + removed if it's a maintenance problem.) + """ + caseroot = self._case.get_value("CASEROOT") + self.log.append(Call(METHOD_run_indv, {"suffix": suffix, "CASEROOT": caseroot})) + + # Determine whether we should raise an exception + # + # It's important that this check be based on some attribute of the + # self._case object, to ensure that the right case has been activated + # for this call to run_indv (e.g., to catch if we forgot to activate + # case2 before the second call to run_indv). + if caseroot not in self.run_pass_caseroot: + raise RuntimeError("caseroot not in run_pass_caseroot") + + def _do_compare_test(self, suffix1, suffix2, ignore_fieldlist_diffs=False): + """ + This fake implementation allows controlling whether compare_test + passes or fails + """ + return (self.compare_should_pass, "no comment", None) + + def _check_for_memleak(self): + pass + + def _st_archive_case_test(self): + pass + + # ------------------------------------------------------------------------ + # Fake implementations of methods that are typically provided by + # SystemTestsCompareTwo + # + # Since we're overriding these, their functionality is untested here! + # (Though note that _link_to_case2_output is tested elsewhere.) + # ------------------------------------------------------------------------ + + def _case_from_existing_caseroot(self, caseroot): + """ + Returns a CaseFake object instead of a Case object + """ + return CaseFake(caseroot, create_case_root=False) + + def _link_to_case2_output(self): + self.log.append(Call(METHOD_link_to_case2_output, {})) + + # ------------------------------------------------------------------------ + # Fake implementations of methods that are typically provided by the + # individual test + # + # The values set here are asserted against in some unit tests + # ------------------------------------------------------------------------ + + def _common_setup(self): + self._case.set_value("var_set_in_common_setup", "common_val") + + def _case_one_setup(self): + self._case.set_value("var_set_in_setup", "case1val") + + def _case_two_setup(self): + self._case.set_value("var_set_in_setup", "case2val") + if self._case2setup_raises_exception: + raise RuntimeError + + def _case_one_custom_prerun_action(self): + self.log.append(Call(METHOD_case_one_custom_prerun_action, {})) + + def _case_one_custom_postrun_action(self): + self.log.append(Call(METHOD_case_one_custom_postrun_action, {})) + + def _case_two_custom_prerun_action(self): + self.log.append(Call(METHOD_case_two_custom_prerun_action, {})) + + def _case_two_custom_postrun_action(self): + self.log.append(Call(METHOD_case_two_custom_postrun_action, {})) + + +# ======================================================================== +# Test class itself +# ======================================================================== + + +class TestSystemTestsCompareTwo(unittest.TestCase): + def setUp(self): + self.original_wd = os.getcwd() + # create a sandbox in which case directories can be created + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + # Some tests trigger a chdir call in the SUT; make sure we return to the + # original directory at the end of the test + os.chdir(self.original_wd) + + shutil.rmtree(self.tempdir, ignore_errors=True) + + def get_caseroots(self, casename="mytest"): + """ + Returns a tuple (case1root, case2root) + """ + case1root = os.path.join(self.tempdir, casename) + case2root = os.path.join(case1root, "case2", casename) + return case1root, case2root + + def get_compare_phase_name(self, mytest): + """ + Returns a string giving the compare phase name for this test + """ + run_one_suffix = mytest._run_one_suffix + run_two_suffix = mytest._run_two_suffix + compare_phase_name = "{}_{}_{}".format( + test_status.COMPARE_PHASE, run_one_suffix, run_two_suffix + ) + return compare_phase_name + + def test_resetup_case_single_exe(self): + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + case1._read_only_mode = False + + mytest = SystemTestsCompareTwoFake(case1) + + case1.set_value = mock.MagicMock() + case1.get_value = mock.MagicMock() + case1.get_value.side_effect = ["/tmp", "/tmp/bld", False] + + mytest._resetup_case(test_status.RUN_PHASE, reset=True) + + case1.set_value.assert_not_called() + + case1.get_value.side_effect = ["/tmp", "/tmp/bld", True] + + mytest._resetup_case(test_status.RUN_PHASE, reset=True) + + case1.set_value.assert_not_called() + + case1.get_value.side_effect = ["/tmp", "/other/bld", False] + + mytest._resetup_case(test_status.RUN_PHASE, reset=True) + + case1.set_value.assert_not_called() + + case1.get_value.side_effect = ["/tmp", "/other/bld", True] + + mytest._resetup_case(test_status.RUN_PHASE, reset=True) + + case1.set_value.assert_called_with("BUILD_COMPLETE", True) + + def test_setup(self): + # Ensure that test setup properly sets up case 1 and case 2 + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + case1.set_value("var_preset", "preset_value") + + # Exercise + mytest = SystemTestsCompareTwoFake(case1) + + # Verify + # Make sure that pre-existing values in case1 are copied to case2 (via + # clone) + self.assertEqual("preset_value", mytest._case2.get_value("var_preset")) + + # Make sure that _common_setup is called for both + self.assertEqual( + "common_val", mytest._case1.get_value("var_set_in_common_setup") + ) + self.assertEqual( + "common_val", mytest._case2.get_value("var_set_in_common_setup") + ) + + # Make sure that _case_one_setup and _case_two_setup are called + # appropriately + self.assertEqual("case1val", mytest._case1.get_value("var_set_in_setup")) + self.assertEqual("case2val", mytest._case2.get_value("var_set_in_setup")) + + def test_setup_separate_builds_sharedlibroot(self): + # If we're using separate_builds, the two cases should still use + # the same sharedlibroot + + # Setup + case1root, _ = self.get_caseroots() + case1 = CaseFake(case1root) + case1.set_value("SHAREDLIBROOT", os.path.join(case1root, "sharedlibroot")) + + # Exercise + mytest = SystemTestsCompareTwoFake(case1, separate_builds=True) + + # Verify + self.assertEqual( + case1.get_value("SHAREDLIBROOT"), mytest._case2.get_value("SHAREDLIBROOT") + ) + + def test_setup_case2_exists(self): + # If case2 already exists, then setup code should not be called + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + os.makedirs(os.path.join(case1root, "case2", "case1")) + + # Exercise + mytest = SystemTestsCompareTwoFake(case1, run_two_suffix="test") + + # Verify: + + # Make sure that case2 object is set (i.e., that it doesn't remain None) + self.assertEqual("case1", mytest._case2.get_value("CASE")) + + # Variables set in various setup methods should not be set + # (In the real world - i.e., outside of this unit testing fakery - these + # values would be set when the Case objects are created.) + self.assertIsNone(mytest._case1.get_value("var_set_in_common_setup")) + self.assertIsNone(mytest._case2.get_value("var_set_in_common_setup")) + self.assertIsNone(mytest._case1.get_value("var_set_in_setup")) + self.assertIsNone(mytest._case2.get_value("var_set_in_setup")) + + def test_setup_error(self): + # If there is an error in setup, an exception should be raised and the + # case2 directory should be removed + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + + # Exercise + with self.assertRaises(Exception): + SystemTestsCompareTwoFake( + case1, run_two_suffix="test", case2setup_raises_exception=True + ) + + # Verify + self.assertFalse(os.path.exists(os.path.join(case1root, "case1.test"))) + + def test_run_phase_passes(self): + # Make sure the run phase behaves properly when all runs succeed. + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1) + + # Exercise + mytest.run() + + # Verify + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) + + def test_run_phase_internal_calls(self): + # Make sure that the correct calls are made to methods stubbed out by + # SystemTestsCompareTwoFake (when runs succeed) + # + # The point of this is: A number of methods called from the run_phase + # method are stubbed out in the Fake test implementation, because their + # actions are awkward in these unit tests. But we still want to make + # sure that those methods actually got called correctly. + + # Setup + run_one_suffix = "base" + run_two_suffix = "run2" + case1root, case2root = self.get_caseroots() + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake( + case1, run_one_suffix=run_one_suffix, run_two_suffix=run_two_suffix + ) + + # Exercise + mytest.run() + + # Verify + expected_calls = [ + Call(METHOD_case_one_custom_prerun_action, {}), + Call(METHOD_run_indv, {"suffix": run_one_suffix, "CASEROOT": case1root}), + Call(METHOD_case_one_custom_postrun_action, {}), + Call(METHOD_case_two_custom_prerun_action, {}), + Call(METHOD_run_indv, {"suffix": run_two_suffix, "CASEROOT": case2root}), + Call(METHOD_case_two_custom_postrun_action, {}), + Call(METHOD_link_to_case2_output, {}), + ] + self.assertEqual(expected_calls, mytest.log) + + def test_run_phase_internal_calls_multisubmit_phase1(self): + # Make sure that the correct calls are made to methods stubbed out by + # SystemTestsCompareTwoFake (when runs succeed), when we have a + # multi-submit test, in the first phase + + # Setup + run_one_suffix = "base" + run_two_suffix = "run2" + case1root, _ = self.get_caseroots() + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake( + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + ) + # RESUBMIT=1 signals first phase + case1.set_value("RESUBMIT", 1) + + # Exercise + mytest.run() + + # Verify + expected_calls = [ + Call(METHOD_case_one_custom_prerun_action, {}), + Call(METHOD_run_indv, {"suffix": run_one_suffix, "CASEROOT": case1root}), + Call(METHOD_case_one_custom_postrun_action, {}), + ] + self.assertEqual(expected_calls, mytest.log) + + # Also verify that comparison is NOT called: + compare_phase_name = self.get_compare_phase_name(mytest) + self.assertEqual( + test_status.TEST_PEND_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + + def test_run_phase_internal_calls_multisubmit_phase2(self): + # Make sure that the correct calls are made to methods stubbed out by + # SystemTestsCompareTwoFake (when runs succeed), when we have a + # multi-submit test, in the second phase + + # Setup + run_one_suffix = "base" + run_two_suffix = "run2" + case1root, case2root = self.get_caseroots() + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake( + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + compare_should_pass=True, + ) + # RESUBMIT=0 signals second phase + case1.set_value("RESUBMIT", 0) + + # Exercise + mytest.run() + + # Verify + expected_calls = [ + Call(METHOD_case_two_custom_prerun_action, {}), + Call(METHOD_run_indv, {"suffix": run_two_suffix, "CASEROOT": case2root}), + Call(METHOD_case_two_custom_postrun_action, {}), + Call(METHOD_link_to_case2_output, {}), + ] + self.assertEqual(expected_calls, mytest.log) + + # Also verify that comparison is called: + compare_phase_name = self.get_compare_phase_name(mytest) + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + + def test_internal_calls_multisubmit_failed_state(self): + run_one_suffix = "base" + run_two_suffix = "run2" + case1root, _ = self.get_caseroots() + case1 = CaseFake(case1root) + + def _set_initial_test_values(x): + x.set_value("RESUBMIT", 1) + + case1.set_initial_test_values = functools.partial( + _set_initial_test_values, case1 + ) + + # Standard first phase + case1.set_value("IS_FIRST_RUN", True) + case1.set_value("RESUBMIT", 1) + + mytest = SystemTestsCompareTwoFake( + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + ) + + mytest.run() + + expected_calls = [ + Call(METHOD_case_one_custom_prerun_action, {}), + Call(METHOD_run_indv, {"CASEROOT": case1root, "suffix": "base"}), + Call(METHOD_case_one_custom_postrun_action, {}), + ] + + self.assertEqual(expected_calls, mytest.log) + + # Emulate a rerun ensure phase 1 still runs + case1.set_value("IS_FIRST_RUN", True) + case1.set_value("RESUBMIT", 0) + + # Reset the log + mytest.log = [] + + mytest.run() + + expected_calls = [ + Call(METHOD_case_one_custom_prerun_action, {}), + Call(METHOD_run_indv, {"CASEROOT": case1root, "suffix": "base"}), + Call(METHOD_case_one_custom_postrun_action, {}), + ] + + self.assertEqual(expected_calls, mytest.log) + + def test_run1_fails(self): + # Make sure that a failure in run1 is reported correctly + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, run_one_should_pass=False) + + # Exercise + try: + mytest.run() + except Exception: + pass + + # Verify + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) + + def test_run2_fails(self): + # Make sure that a failure in run2 is reported correctly + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, run_two_should_pass=False) + + # Exercise + try: + mytest.run() + except Exception: + pass + + # Verify + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) + + def test_compare_passes(self): + # Make sure that a pass in the comparison is reported correctly + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, compare_should_pass=True) + + # Exercise + mytest.run() + + # Verify + compare_phase_name = self.get_compare_phase_name(mytest) + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + + def test_compare_fails(self): + # Make sure that a failure in the comparison is reported correctly + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, compare_should_pass=False) + + # Exercise + mytest.run() + + # Verify + compare_phase_name = self.get_compare_phase_name(mytest) + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + + +if __name__ == "__main__": + unittest.main(verbosity=2, catchbreak=True) diff --git a/CIME/tests/test_unit_config.py b/CIME/tests/test_unit_config.py new file mode 100644 index 00000000000..93da359b046 --- /dev/null +++ b/CIME/tests/test_unit_config.py @@ -0,0 +1,191 @@ +import os +import unittest +import tempfile +from unittest import mock +from pathlib import Path + +from CIME.config import Config + + +def spy(method): + m = mock.MagicMock() + + def wrapper(self, *args, **kwargs): + m(*args, **kwargs) + return method(self, *args, **kwargs) + + wrapper.mock = m + return wrapper + + +class TestConfig(unittest.TestCase): + def test_ignore(self): + test_paths = ( + ("valid.py", False), + ("module/valid.py", False), + ("valid_test.py", False), + ("test_something.py", True), + ("tests/test_something.py", True), + ("conftest.py", True), + ("tests/conftest.py", True), + ("tests/generic/test_something.py", True), + ("tests/generic/conftest.py", True), + ) + + with tempfile.TemporaryDirectory() as _tempdir: + for src_path_name in ("generic", "test", "tests"): + customize_path = Path( + _tempdir, src_path_name, "cime_config", "customize" + ) + + for test_path_name, _ in test_paths: + test_file = customize_path / test_path_name + + test_file.parent.mkdir(parents=True, exist_ok=True) + + test_file.touch() + + with mock.patch( + "CIME.config.Config._load_file", spy(Config._load_file) + ) as mock_load_file: + _ = Config.load(f"{customize_path}") + + loaded_files = [ + f'{Path(x[0][0]).relative_to(f"{customize_path}")}' + for x in mock_load_file.mock.call_args_list + ] + + for test_path_name, ignored in test_paths: + if ignored: + assert test_path_name not in loaded_files + else: + assert test_path_name in loaded_files + + def test_class_external(self): + with tempfile.TemporaryDirectory() as tempdir: + complex_file = os.path.join(tempdir, "01_complex.py") + + with open(complex_file, "w") as fd: + fd.write( + """ +class TestComplex: + def do_something(self): + print("Something complex") + """ + ) + + test_file = os.path.join(tempdir, "02_test.py") + + with open(test_file, "w") as fd: + fd.write( + """ +from CIME.customize import TestComplex + +use_feature1 = True +use_feature2 = False + +def prerun_provenance(case, **kwargs): + print("prerun_provenance") + + external = TestComplex() + + external.do_something() + + return True + """ + ) + + config = Config.load(tempdir) + + assert config.use_feature1 + assert not config.use_feature2 + assert config.prerun_provenance + assert config.prerun_provenance("test") + + with self.assertRaises(AttributeError): + config.postrun_provenance("test") + + def test_class(self): + with tempfile.TemporaryDirectory() as tempdir: + test_file = os.path.join(tempdir, "test.py") + + with open(test_file, "w") as fd: + fd.write( + """ +use_feature1 = True +use_feature2 = False + +class TestComplex: + def do_something(self): + print("Something complex") + +def prerun_provenance(case, **kwargs): + print("prerun_provenance") + + external = TestComplex() + + external.do_something() + + return True + """ + ) + + config = Config.load(tempdir) + + assert config.use_feature1 + assert not config.use_feature2 + assert config.prerun_provenance + assert config.prerun_provenance("test") + + with self.assertRaises(AttributeError): + config.postrun_provenance("test") + + def test_load(self): + with tempfile.TemporaryDirectory() as tempdir: + test_file = os.path.join(tempdir, "test.py") + + with open(test_file, "w") as fd: + fd.write( + """ +use_feature1 = True +use_feature2 = False + +def prerun_provenance(case, **kwargs): + print("prerun_provenance") + + return True + """ + ) + + config = Config.load(tempdir) + + assert config.use_feature1 + assert not config.use_feature2 + assert config.prerun_provenance + assert config.prerun_provenance("test") + + with self.assertRaises(AttributeError): + config.postrun_provenance("test") + + def test_overwrite(self): + with tempfile.TemporaryDirectory() as tempdir: + test_file = os.path.join(tempdir, "test.py") + + with open(test_file, "w") as fd: + fd.write( + """ +use_feature1 = True +use_feature2 = False + +def prerun_provenance(case, **kwargs): + print("prerun_provenance") + + return True + """ + ) + + Config.use_feature1 = False + + config = Config.load(tempdir) + + assert config.use_feature1 diff --git a/CIME/tests/test_unit_configure.py b/CIME/tests/test_unit_configure.py new file mode 100644 index 00000000000..693abccdfbd --- /dev/null +++ b/CIME/tests/test_unit_configure.py @@ -0,0 +1,49 @@ +import os +import unittest +import tempfile +import shutil + +from CIME.utils import expect + +from CIME.BuildTools.configure import generate_env_mach_specific +from CIME.XML.machines import Machines + + +class TestConfigure(unittest.TestCase): + def setUp(self): + self.mach_obj = Machines() + self.compiler = self.mach_obj.get_default_compiler() + self.sysos = self.mach_obj.get_value("OS") + self.mpilib = self.mach_obj.get_default_MPIlib( + attributes={"compiler": self.compiler} + ) + self.debug = "FALSE" + self.comp_interface = "nuopc" + + self.tempdir = tempfile.mkdtemp() + self.caseroot = os.path.join(self.tempdir, "newcase") + os.mkdir(self.caseroot) + self.cwd = os.getcwd() + os.chdir(self.caseroot) + + def tearDown(self): + # Make sure we aren't in the temp directory to remove + + os.chdir(self.cwd) + if os.getcwd() == self.tempdir: + expect(False, "CWD is the tempdir to be removed") + shutil.rmtree(self.tempdir, ignore_errors=True) + + def test_generate_env_mach_specific(self): + + generate_env_mach_specific( + self.caseroot, + self.mach_obj, + self.compiler, + self.mpilib, + self.debug, + self.comp_interface, + self.sysos, + unit_testing=False, + threaded=False, + ) diff --git a/CIME/tests/test_unit_cs_status.py b/CIME/tests/test_unit_cs_status.py new file mode 100755 index 00000000000..70efd47935f --- /dev/null +++ b/CIME/tests/test_unit_cs_status.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python3 + +import io +import unittest +import shutil +import os +import tempfile +import re +from CIME.cs_status import cs_status +from CIME import test_status +from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus + + +class TestCsStatus(CustomAssertionsTestStatus): + + # ------------------------------------------------------------------------ + # Test helper functions + # ------------------------------------------------------------------------ + + # An arbitrary phase we can use when we want to work with a non-core phase + _NON_CORE_PHASE = test_status.MEMLEAK_PHASE + + # Another arbitrary phase if we need two different non-core phases + _NON_CORE_PHASE2 = test_status.BASELINE_PHASE + + def setUp(self): + self._testroot = tempfile.mkdtemp() + self._output = io.StringIO() + + def tearDown(self): + self._output.close() + shutil.rmtree(self._testroot, ignore_errors=True) + + def create_test_dir(self, test_dir): + """Creates the given test directory under testroot. + + Returns the full path to the created test directory. + """ + fullpath = os.path.join(self._testroot, test_dir) + os.makedirs(fullpath) + return fullpath + + @staticmethod + def create_test_status_core_passes(test_dir_path, test_name): + """Creates a TestStatus file in the given path, with PASS status + for all core phases""" + with test_status.TestStatus(test_dir=test_dir_path, test_name=test_name) as ts: + for phase in test_status.CORE_PHASES: + ts.set_status(phase, test_status.TEST_PASS_STATUS) + + def set_last_core_phase_to_fail(self, test_dir_path, test_name): + """Sets the last core phase to FAIL + + Returns the name of this phase""" + fail_phase = test_status.CORE_PHASES[-1] + self.set_phase_to_status( + test_dir_path=test_dir_path, + test_name=test_name, + phase=fail_phase, + status=test_status.TEST_FAIL_STATUS, + ) + return fail_phase + + @staticmethod + def set_phase_to_status(test_dir_path, test_name, phase, status): + """Sets the given phase to the given status for this test""" + with test_status.TestStatus(test_dir=test_dir_path, test_name=test_name) as ts: + ts.set_status(phase, status) + + # ------------------------------------------------------------------------ + # Begin actual tests + # ------------------------------------------------------------------------ + + def test_force_rebuild(self): + test_name = "my.test.name" + test_dir = "my.test.name.testid" + test_dir_path = self.create_test_dir(test_dir) + self.create_test_status_core_passes(test_dir_path, test_name) + cs_status( + [os.path.join(test_dir_path, "TestStatus")], + force_rebuild=True, + out=self._output, + ) + self.assert_status_of_phase( + self._output.getvalue(), + test_status.TEST_PEND_STATUS, + test_status.SHAREDLIB_BUILD_PHASE, + test_name, + ) + + def test_single_test(self): + """cs_status for a single test should include some minimal expected output""" + test_name = "my.test.name" + test_dir = "my.test.name.testid" + test_dir_path = self.create_test_dir(test_dir) + self.create_test_status_core_passes(test_dir_path, test_name) + cs_status([os.path.join(test_dir_path, "TestStatus")], out=self._output) + self.assert_core_phases(self._output.getvalue(), test_name, fails=[]) + + def test_two_tests(self): + """cs_status for two tests (one with a FAIL) should include some minimal expected output""" + test_name1 = "my.test.name1" + test_name2 = "my.test.name2" + test_dir1 = test_name1 + ".testid" + test_dir2 = test_name2 + ".testid" + test_dir_path1 = self.create_test_dir(test_dir1) + test_dir_path2 = self.create_test_dir(test_dir2) + self.create_test_status_core_passes(test_dir_path1, test_name1) + self.create_test_status_core_passes(test_dir_path2, test_name2) + test2_fail_phase = self.set_last_core_phase_to_fail(test_dir_path2, test_name2) + cs_status( + [ + os.path.join(test_dir_path1, "TestStatus"), + os.path.join(test_dir_path2, "TestStatus"), + ], + out=self._output, + ) + self.assert_core_phases(self._output.getvalue(), test_name1, fails=[]) + self.assert_core_phases( + self._output.getvalue(), test_name2, fails=[test2_fail_phase] + ) + + def test_fails_only(self): + """With fails_only flag, only fails and pends should appear in the output""" + test_name = "my.test.name" + test_dir = "my.test.name.testid" + test_dir_path = self.create_test_dir(test_dir) + self.create_test_status_core_passes(test_dir_path, test_name) + fail_phase = self.set_last_core_phase_to_fail(test_dir_path, test_name) + pend_phase = self._NON_CORE_PHASE + self.set_phase_to_status( + test_dir_path, + test_name, + phase=pend_phase, + status=test_status.TEST_PEND_STATUS, + ) + cs_status( + [os.path.join(test_dir_path, "TestStatus")], + fails_only=True, + out=self._output, + ) + self.assert_status_of_phase( + output=self._output.getvalue(), + status=test_status.TEST_FAIL_STATUS, + phase=fail_phase, + test_name=test_name, + ) + self.assert_status_of_phase( + output=self._output.getvalue(), + status=test_status.TEST_PEND_STATUS, + phase=pend_phase, + test_name=test_name, + ) + for phase in test_status.CORE_PHASES: + if phase != fail_phase: + self.assert_phase_absent( + output=self._output.getvalue(), phase=phase, test_name=test_name + ) + self.assertNotRegex(self._output.getvalue(), r"Overall:") + + def test_count_fails(self): + """Test the count of fails with three tests + + For first phase of interest: First test FAILs, second PASSes, + third FAILs; count should be 2, and this phase should not appear + individually for each test. + + For second phase of interest: First test PASSes, second PASSes, + third FAILs; count should be 1, and this phase should not appear + individually for each test. + """ + # Note that this test does NOT cover: + # - combining count_fails_phase_list with fails_only: currently, + # this wouldn't cover any additional code/logic + # - ensuring that PENDs are also counted: currently, this + # wouldn't cover any additional code/logic + phase_of_interest1 = self._NON_CORE_PHASE + phase_of_interest2 = self._NON_CORE_PHASE2 + statuses1 = [ + test_status.TEST_FAIL_STATUS, + test_status.TEST_PASS_STATUS, + test_status.TEST_FAIL_STATUS, + ] + statuses2 = [ + test_status.TEST_PASS_STATUS, + test_status.TEST_PASS_STATUS, + test_status.TEST_FAIL_STATUS, + ] + test_paths = [] + test_names = [] + for testnum in range(3): + test_name = "my.test.name" + str(testnum) + test_names.append(test_name) + test_dir = test_name + ".testid" + test_dir_path = self.create_test_dir(test_dir) + self.create_test_status_core_passes(test_dir_path, test_name) + self.set_phase_to_status( + test_dir_path, + test_name, + phase=phase_of_interest1, + status=statuses1[testnum], + ) + self.set_phase_to_status( + test_dir_path, + test_name, + phase=phase_of_interest2, + status=statuses2[testnum], + ) + test_paths.append(os.path.join(test_dir_path, "TestStatus")) + + cs_status( + test_paths, + count_fails_phase_list=[phase_of_interest1, phase_of_interest2], + out=self._output, + ) + + for testnum in range(3): + self.assert_phase_absent( + output=self._output.getvalue(), + phase=phase_of_interest1, + test_name=test_names[testnum], + ) + self.assert_phase_absent( + output=self._output.getvalue(), + phase=phase_of_interest2, + test_name=test_names[testnum], + ) + count_regex1 = r"{} +non-passes: +2".format(re.escape(phase_of_interest1)) + self.assertRegex(self._output.getvalue(), count_regex1) + count_regex2 = r"{} +non-passes: +1".format(re.escape(phase_of_interest2)) + self.assertRegex(self._output.getvalue(), count_regex2) + + def test_expected_fails(self): + """With the expected_fails_file flag, expected failures should be flagged as such""" + test_name1 = "my.test.name1" + test_name2 = "my.test.name2" + test_dir1 = test_name1 + ".testid" + test_dir2 = test_name2 + ".testid" + test_dir_path1 = self.create_test_dir(test_dir1) + test_dir_path2 = self.create_test_dir(test_dir2) + self.create_test_status_core_passes(test_dir_path1, test_name1) + self.create_test_status_core_passes(test_dir_path2, test_name2) + test1_fail_phase = self.set_last_core_phase_to_fail(test_dir_path1, test_name1) + test2_fail_phase = self.set_last_core_phase_to_fail(test_dir_path2, test_name2) + + # One phase is labeled as an expected failure for test1, nothing for test2: + expected_fails_contents = """ + + + + {fail_status} + + + +""".format( + test_name1=test_name1, + test1_fail_phase=test1_fail_phase, + fail_status=test_status.TEST_FAIL_STATUS, + ) + expected_fails_filepath = os.path.join(self._testroot, "ExpectedFails.xml") + with open(expected_fails_filepath, "w") as expected_fails_file: + expected_fails_file.write(expected_fails_contents) + + cs_status( + [ + os.path.join(test_dir_path1, "TestStatus"), + os.path.join(test_dir_path2, "TestStatus"), + ], + expected_fails_filepath=expected_fails_filepath, + out=self._output, + ) + + # Both test1 and test2 should have a failure for one phase, but this should be + # marked as expected only for test1. + self.assert_core_phases( + self._output.getvalue(), test_name1, fails=[test1_fail_phase] + ) + self.assert_status_of_phase( + self._output.getvalue(), + test_status.TEST_FAIL_STATUS, + test1_fail_phase, + test_name1, + xfail="expected", + ) + self.assert_core_phases( + self._output.getvalue(), test_name2, fails=[test2_fail_phase] + ) + self.assert_status_of_phase( + self._output.getvalue(), + test_status.TEST_FAIL_STATUS, + test2_fail_phase, + test_name2, + xfail="no", + ) + # Make sure that no other phases are mistakenly labeled as expected failures: + self.assert_num_expected_unexpected_fails( + self._output.getvalue(), num_expected=1, num_unexpected=0 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_custom_assertions_test_status.py b/CIME/tests/test_unit_custom_assertions_test_status.py new file mode 100755 index 00000000000..99e6cb05d04 --- /dev/null +++ b/CIME/tests/test_unit_custom_assertions_test_status.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of CustomAssertionsTestStatus +""" + +import unittest +from CIME import test_status +from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus + + +class TestCustomAssertions(CustomAssertionsTestStatus): + + _UNEXPECTED_COMMENT = test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START + " blah)" + + @staticmethod + def output_line(status, test_name, phase, extra=""): + output = status + " " + test_name + " " + phase + if extra: + output += " " + extra + output += "\n" + return output + + def test_assertPhaseAbsent_passes(self): + """assert_phase_absent should pass when the phase is absent for + the given test_name""" + test_name1 = "my.test.name1" + test_name2 = "my.test.name2" + output = self.output_line("PASS", test_name1, "PHASE1") + output += self.output_line("PASS", test_name2, "PHASE2") + + self.assert_phase_absent(output, "PHASE2", test_name1) + self.assert_phase_absent(output, "PHASE1", test_name2) + + def test_assertPhaseAbsent_fails(self): + """assert_phase_absent should fail when the phase is present for + the given test_name""" + test_name = "my.test.name" + output = self.output_line("PASS", test_name, "PHASE1") + + with self.assertRaises(AssertionError): + self.assert_phase_absent(output, "PHASE1", test_name) + + def test_assertCorePhases_passes(self): + """assert_core_phases passes when it should""" + output = "" + fails = [test_status.CORE_PHASES[1]] + test_name = "my.test.name" + for phase in test_status.CORE_PHASES: + if phase in fails: + status = test_status.TEST_FAIL_STATUS + else: + status = test_status.TEST_PASS_STATUS + output = output + self.output_line(status, test_name, phase) + + self.assert_core_phases(output, test_name, fails) + + def test_assertCorePhases_missingPhase_fails(self): + """assert_core_phases fails if there is a missing phase""" + output = "" + test_name = "my.test.name" + for phase in test_status.CORE_PHASES: + if phase != test_status.CORE_PHASES[1]: + output = output + self.output_line( + test_status.TEST_PASS_STATUS, test_name, phase + ) + + with self.assertRaises(AssertionError): + self.assert_core_phases(output, test_name, fails=[]) + + def test_assertCorePhases_wrongStatus_fails(self): + """assert_core_phases fails if a phase has the wrong status""" + output = "" + test_name = "my.test.name" + for phase in test_status.CORE_PHASES: + output = output + self.output_line( + test_status.TEST_PASS_STATUS, test_name, phase + ) + + with self.assertRaises(AssertionError): + self.assert_core_phases( + output, test_name, fails=[test_status.CORE_PHASES[1]] + ) + + def test_assertCorePhases_wrongName_fails(self): + """assert_core_phases fails if the test name is wrong""" + output = "" + test_name = "my.test.name" + for phase in test_status.CORE_PHASES: + output = output + self.output_line( + test_status.TEST_PASS_STATUS, test_name, phase + ) + + with self.assertRaises(AssertionError): + self.assert_core_phases(output, "my.test", fails=[]) + + # Note: Basic functionality of assert_status_of_phase is covered sufficiently via + # tests of assert_core_phases. Below we just cover some other aspects that aren't + # already covered. + + def test_assertStatusOfPhase_withExtra_passes(self): + """Make sure assert_status_of_phase passes when there is some extra text at the + end of the line""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=test_status.TEST_EXPECTED_FAILURE_COMMENT, + ) + self.assert_status_of_phase( + output, test_status.TEST_FAIL_STATUS, test_status.CORE_PHASES[0], test_name + ) + + def test_assertStatusOfPhase_xfailNo_passes(self): + """assert_status_of_phase should pass when xfail='no' and there is no + EXPECTED/UNEXPECTED on the line""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, test_name, test_status.CORE_PHASES[0] + ) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="no", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) + + def test_assertStatusOfPhase_xfailNo_fails(self): + """assert_status_of_phase should fail when xfail='no' but the line contains the + EXPECTED comment""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=test_status.TEST_EXPECTED_FAILURE_COMMENT, + ) + + with self.assertRaises(AssertionError): + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="no", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) + + def test_assertStatusOfPhase_xfailExpected_passes(self): + """assert_status_of_phase should pass when xfail='expected' and the line contains + the EXPECTED comment""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=test_status.TEST_EXPECTED_FAILURE_COMMENT, + ) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="expected", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) + + def test_assertStatusOfPhase_xfailExpected_fails(self): + """assert_status_of_phase should fail when xfail='expected' but the line does NOT contain + the EXPECTED comment""" + test_name = "my.test.name" + # Note that the line contains the UNEXPECTED comment, but not the EXPECTED comment + # (we assume that if the assertion correctly fails in this case, then it will also + # correctly handle the case where neither the EXPECTED nor UNEXPECTED comment is + # present). + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=self._UNEXPECTED_COMMENT, + ) + + with self.assertRaises(AssertionError): + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="expected", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=1 + ) + + def test_assertStatusOfPhase_xfailUnexpected_passes(self): + """assert_status_of_phase should pass when xfail='unexpected' and the line contains + the UNEXPECTED comment""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=self._UNEXPECTED_COMMENT, + ) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="unexpected", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=1 + ) + + def test_assertStatusOfPhase_xfailUnexpected_fails(self): + """assert_status_of_phase should fail when xfail='unexpected' but the line does NOT + contain the UNEXPECTED comment""" + test_name = "my.test.name" + # Note that the line contains the EXPECTED comment, but not the UNEXPECTED comment + # (we assume that if the assertion correctly fails in this case, then it will also + # correctly handle the case where neither the EXPECTED nor UNEXPECTED comment is + # present). + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=test_status.TEST_EXPECTED_FAILURE_COMMENT, + ) + + with self.assertRaises(AssertionError): + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="unexpected", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_doctest.py b/CIME/tests/test_unit_doctest.py new file mode 100644 index 00000000000..571b9af41db --- /dev/null +++ b/CIME/tests/test_unit_doctest.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 + +import glob +import re +import os +import stat +import doctest +import sys +import pkgutil +import unittest +import functools + +import CIME +from CIME import utils +from CIME.tests import base + + +class TestDocs(base.BaseTestCase): + def test_lib_docs(self): + cime_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) + + ignore_patterns = [ + "/tests/", + "mvk.py", + "pgn.py", + "tsc.py", + ] + + for dirpath, _, filenames in os.walk(os.path.join(cime_root, "CIME")): + for filepath in map(lambda x: os.path.join(dirpath, x), filenames): + if not filepath.endswith(".py") or any( + [x in filepath for x in ignore_patterns] + ): + continue + + # Couldn't use doctest.DocFileSuite due to sys.path issue + self.run_cmd_assert_result( + f"PYTHONPATH={cime_root}:$PYTHONPATH python3 -m doctest {filepath} 2>&1", + from_dir=cime_root, + ) diff --git a/CIME/tests/test_unit_expected_fails_file.py b/CIME/tests/test_unit_expected_fails_file.py new file mode 100755 index 00000000000..1e0e5878e2b --- /dev/null +++ b/CIME/tests/test_unit_expected_fails_file.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 + +import unittest +import os +import shutil +import tempfile +from CIME.XML.expected_fails_file import ExpectedFailsFile +from CIME.utils import CIMEError +from CIME.expected_fails import ExpectedFails + + +class TestExpectedFailsFile(unittest.TestCase): + def setUp(self): + self._workdir = tempfile.mkdtemp() + self._xml_filepath = os.path.join(self._workdir, "expected_fails.xml") + + def tearDown(self): + shutil.rmtree(self._workdir) + + def test_basic(self): + """Basic test of the parsing of an expected fails file""" + contents = """ + + + + FAIL + #404 + + + PEND + #404 + Because of the RUN failure, this phase is listed as PEND + + + + + FAIL + ESMCI/cime#2917 + + + FAIL + ESMCI/cime#2917 + + + +""" + with open(self._xml_filepath, "w") as xml_file: + xml_file.write(contents) + expected_fails_file = ExpectedFailsFile(self._xml_filepath) + xfails = expected_fails_file.get_expected_fails() + + expected_test1 = ExpectedFails() + expected_test1.add_failure("RUN", "FAIL") + expected_test1.add_failure("COMPARE_base_rest", "PEND") + expected_test2 = ExpectedFails() + expected_test2.add_failure("GENERATE", "FAIL") + expected_test2.add_failure("BASELINE", "FAIL") + expected = {"my.test.1": expected_test1, "my.test.2": expected_test2} + + self.assertEqual(xfails, expected) + + def test_same_test_appears_twice(self): + """If the same test appears twice, its information should be appended. + + This is not the typical, expected layout of the file, but it should be handled + correctly in case the file is written this way. + """ + contents = """ + + + + FAIL + #404 + + + + + PEND + #404 + Because of the RUN failure, this phase is listed as PEND + + + +""" + with open(self._xml_filepath, "w") as xml_file: + xml_file.write(contents) + expected_fails_file = ExpectedFailsFile(self._xml_filepath) + xfails = expected_fails_file.get_expected_fails() + + expected_test1 = ExpectedFails() + expected_test1.add_failure("RUN", "FAIL") + expected_test1.add_failure("COMPARE_base_rest", "PEND") + expected = {"my.test.1": expected_test1} + + self.assertEqual(xfails, expected) + + def test_invalid_file(self): + """Given an invalid file, an exception should be raised in schema validation""" + + # This file is missing a element in the block. + # + # It's important to have the expectedFails version number be greater than 1, + # because schema validation isn't done in cime for files with a version of 1. + contents = """ + + + + + + +""" + with open(self._xml_filepath, "w") as xml_file: + xml_file.write(contents) + + with self.assertRaisesRegex(CIMEError, "Schemas validity error"): + _ = ExpectedFailsFile(self._xml_filepath) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_fake_case.py b/CIME/tests/test_unit_fake_case.py new file mode 100755 index 00000000000..c4cebebcd2c --- /dev/null +++ b/CIME/tests/test_unit_fake_case.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of FakeCase from configure +This is seperate from FakeCase that's under CIME/tests +""" + +import unittest +import os +from CIME.utils import get_model, CIMEError, GLOBAL, get_src_root + +from CIME.BuildTools.configure import FakeCase + + +class TestFakeCase(unittest.TestCase): + def setUp(self): + self.compiler = "intel" + self.mpilib = "mpich" + self.debug = "FALSE" + self.comp_interface = "nuopc" + self.model = get_model() + self.srcroot = get_src_root() + + def create_fake_case( + self, compiler, mpilib, debug, comp_interface, threading=False, gpu_type="none" + ): + + pio_version = 2 + fake_case = FakeCase( + compiler, + mpilib, + debug, + comp_interface, + threading=threading, + gpu_type=gpu_type, + ) + + # Verify + self.assertEqual(compiler, fake_case.get_value("COMPILER")) + self.assertEqual(mpilib, fake_case.get_value("MPILIB")) + self.assertEqual(debug, fake_case.get_value("DEBUG")) + self.assertEqual(comp_interface, fake_case.get_value("COMP_INTERFACE")) + self.assertEqual(gpu_type, fake_case.get_value("GPU_TYPE")) + self.assertEqual(pio_version, fake_case.get_value("PIO_VERSION")) + self.assertEqual(self.model, fake_case.get_value("MODEL")) + self.assertEqual(self.srcroot, fake_case.get_value("SRCROOT")) + self.assertEqual(threading, fake_case.get_build_threaded()) + + return fake_case + + def test_create_simple(self): + fake_case = self.create_fake_case( + self.compiler, self.mpilib, self.debug, self.comp_interface + ) + + def test_get_bad_setting(self): + fake_case = self.create_fake_case( + self.compiler, self.mpilib, self.debug, self.comp_interface + ) + + with self.assertRaisesRegex( + CIMEError, "ERROR: FakeCase does not support getting value of 'ZZTOP'" + ): + fake_case.get_value("ZZTOP") + + def test_get_case_root(self): + fake_case = self.create_fake_case( + self.compiler, self.mpilib, self.debug, self.comp_interface + ) + + caseroot = os.path.join(self.srcroot, "newcase") + fake_case.set_value("CASEROOT", caseroot) + self.assertEqual(fake_case.get_case_root(), caseroot) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_grids.py b/CIME/tests/test_unit_grids.py new file mode 100755 index 00000000000..8417177e83e --- /dev/null +++ b/CIME/tests/test_unit_grids.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python3 + +""" +This module tests *some* functionality of CIME.XML.grids +""" + +# Ignore privacy concerns for unit tests, so that unit tests can access +# protected members of the system under test +# +# pylint:disable=protected-access + +# Also ignore too-long lines, since these are common in unit tests +# +# pylint:disable=line-too-long + +import unittest +import os +import shutil +import string +import tempfile +from CIME.XML.grids import Grids, _ComponentGrids, _add_grid_info, _strip_grid_from_name +from CIME.utils import CIMEError + + +class TestGrids(unittest.TestCase): + """Tests some functionality of CIME.XML.grids + + Note that much of the functionality of CIME.XML.grids is NOT covered here + """ + + _CONFIG_GRIDS_TEMPLATE = string.Template( + """ + + + + + + + + atm_default_grid + lnd_default_grid + ocnice_default_grid + rof_default_grid + glc_default_grid + wav_default_grid + null + + +$MODEL_GRID_ENTRIES + + + + + + 0 0 + unset + null is no grid: + + +$DOMAIN_ENTRIES + + + + ATM2OCN_FMAPNAME + OCN2ATM_FMAPNAME +$EXTRA_REQUIRED_GRIDMAPS + + + +$GRIDMAP_ENTRIES + + +""" + ) + + _MODEL_GRID_F09_G17 = """ + + 0.9x1.25 + 0.9x1.25 + gx1v7 + gx1v7 + +""" + + # For testing multiple GLC grids + _MODEL_GRID_F09_G17_3GLC = """ + + 0.9x1.25 + 0.9x1.25 + gx1v7 + ais8:gris4:lis12 + gx1v7 + +""" + + _DOMAIN_F09 = """ + + 288 192 + fv0.9x1.25_ESMFmesh.nc + 0.9x1.25 is FV 1-deg grid: + +""" + + _DOMAIN_G17 = """ + + 320 384 + gx1v7_ESMFmesh.nc + gx1v7 is displaced Greenland pole 1-deg grid with Caspian as a land feature: + +""" + + _DOMAIN_GRIS4 = """ + + 416 704 + greenland_4km_ESMFmesh.nc + 4-km Greenland grid + +""" + + _DOMAIN_AIS8 = """ + + 704 576 + antarctica_8km_ESMFmesh.nc + 8-km Antarctica grid + +""" + + _DOMAIN_LIS12 = """ + + 123 456 + laurentide_12km_ESMFmesh.nc + 12-km Laurentide grid + +""" + + _GRIDMAP_F09_G17 = """ + + + map_foo_TO_gx1v7_aave.nc + map_gx1v7_TO_foo_aave.nc + map_gx1v7_TO_foo_xxx.nc + + + + + map_fv0.9x1.25_TO_gx1v7_aave.nc + map_gx1v7_TO_fv0.9x1.25_aave.nc + + + + + map_fv0.9x1.25_TO_foo_aave.nc + map_foo_TO_fv0.9x1.25_aave.nc + map_foo_TO_fv0.9x1.25_xxx.nc + +""" + + _GRIDMAP_GRIS4_G17 = """ + + map_gris4_to_gx1v7_liq.nc + map_gris4_to_gx1v7_ice.nc + +""" + + _GRIDMAP_AIS8_G17 = """ + + map_ais8_to_gx1v7_liq.nc + map_ais8_to_gx1v7_ice.nc + +""" + + _GRIDMAP_LIS12_G17 = """ + + map_lis12_to_gx1v7_liq.nc + map_lis12_to_gx1v7_ice.nc + +""" + + def setUp(self): + self._workdir = tempfile.mkdtemp() + self._xml_filepath = os.path.join(self._workdir, "config_grids.xml") + + def tearDown(self): + shutil.rmtree(self._workdir) + + def _create_grids_xml( + self, + model_grid_entries, + domain_entries, + gridmap_entries, + extra_required_gridmaps="", + ): + grids_xml = self._CONFIG_GRIDS_TEMPLATE.substitute( + { + "MODEL_GRID_ENTRIES": model_grid_entries, + "DOMAIN_ENTRIES": domain_entries, + "EXTRA_REQUIRED_GRIDMAPS": extra_required_gridmaps, + "GRIDMAP_ENTRIES": gridmap_entries, + } + ) + with open(self._xml_filepath, "w", encoding="UTF-8") as xml_file: + xml_file.write(grids_xml) + + def assert_grid_info_f09_g17(self, grid_info): + """Asserts that expected grid info is present and correct when using _MODEL_GRID_F09_G17""" + self.assertEqual(grid_info["ATM_NX"], 288) + self.assertEqual(grid_info["ATM_NY"], 192) + self.assertEqual(grid_info["ATM_GRID"], "0.9x1.25") + self.assertEqual(grid_info["ATM_DOMAIN_MESH"], "fv0.9x1.25_ESMFmesh.nc") + + self.assertEqual(grid_info["LND_NX"], 288) + self.assertEqual(grid_info["LND_NY"], 192) + self.assertEqual(grid_info["LND_GRID"], "0.9x1.25") + self.assertEqual(grid_info["LND_DOMAIN_MESH"], "fv0.9x1.25_ESMFmesh.nc") + + self.assertEqual(grid_info["OCN_NX"], 320) + self.assertEqual(grid_info["OCN_NY"], 384) + self.assertEqual(grid_info["OCN_GRID"], "gx1v7") + self.assertEqual(grid_info["OCN_DOMAIN_MESH"], "gx1v7_ESMFmesh.nc") + + self.assertEqual(grid_info["ICE_NX"], 320) + self.assertEqual(grid_info["ICE_NY"], 384) + self.assertEqual(grid_info["ICE_GRID"], "gx1v7") + self.assertEqual(grid_info["ICE_DOMAIN_MESH"], "gx1v7_ESMFmesh.nc") + + self.assertEqual( + grid_info["ATM2OCN_FMAPNAME"], "map_fv0.9x1.25_TO_gx1v7_aave.nc" + ) + self.assertEqual( + grid_info["OCN2ATM_FMAPNAME"], "map_gx1v7_TO_fv0.9x1.25_aave.nc" + ) + self.assertFalse("OCN2ATM_SHOULDBEABSENT" in grid_info) + + def assert_grid_info_f09_g17_3glc(self, grid_info): + """Asserts that all domain info is present & correct for _MODEL_GRID_F09_G17_3GLC""" + self.assert_grid_info_f09_g17(grid_info) + + # Note that we don't assert GLC_NX and GLC_NY here: these are unused for this + # multi-grid case, so we don't care what arbitrary values they have. + self.assertEqual(grid_info["GLC_GRID"], "ais8:gris4:lis12") + self.assertEqual( + grid_info["GLC_DOMAIN_MESH"], + "antarctica_8km_ESMFmesh.nc:greenland_4km_ESMFmesh.nc:laurentide_12km_ESMFmesh.nc", + ) + self.assertEqual( + grid_info["GLC2OCN_LIQ_RMAPNAME"], + "map_ais8_to_gx1v7_liq.nc:map_gris4_to_gx1v7_liq.nc:map_lis12_to_gx1v7_liq.nc", + ) + self.assertEqual( + grid_info["GLC2OCN_ICE_RMAPNAME"], + "map_ais8_to_gx1v7_ice.nc:map_gris4_to_gx1v7_ice.nc:map_lis12_to_gx1v7_ice.nc", + ) + + def test_get_grid_info_basic(self): + """Basic test of get_grid_info""" + model_grid_entries = self._MODEL_GRID_F09_G17 + domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 + gridmap_entries = self._GRIDMAP_F09_G17 + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + ) + + grids = Grids(self._xml_filepath) + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17(grid_info) + + def test_get_grid_info_extra_required_gridmaps(self): + """Test of get_grid_info with some extra required gridmaps""" + model_grid_entries = self._MODEL_GRID_F09_G17 + domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 + gridmap_entries = self._GRIDMAP_F09_G17 + # These are some extra required gridmaps that aren't explicitly specified + extra_required_gridmaps = """ + ATM2OCN_EXTRA + OCN2ATM_EXTRA +""" + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + extra_required_gridmaps=extra_required_gridmaps, + ) + + grids = Grids(self._xml_filepath) + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17(grid_info) + self.assertEqual(grid_info["ATM2OCN_EXTRA"], "unset") + self.assertEqual(grid_info["OCN2ATM_EXTRA"], "unset") + + def test_get_grid_info_extra_gridmaps(self): + """Test of get_grid_info with some extra gridmaps""" + model_grid_entries = self._MODEL_GRID_F09_G17 + domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 + gridmap_entries = self._GRIDMAP_F09_G17 + # These are some extra gridmaps that aren't in the required list + gridmap_entries += """ + + map_fv0.9x1.25_TO_gx1v7_extra.nc + map_gx1v7_TO_fv0.9x1.25_extra.nc + +""" + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + ) + + grids = Grids(self._xml_filepath) + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17(grid_info) + self.assertEqual(grid_info["ATM2OCN_EXTRA"], "map_fv0.9x1.25_TO_gx1v7_extra.nc") + self.assertEqual(grid_info["OCN2ATM_EXTRA"], "map_gx1v7_TO_fv0.9x1.25_extra.nc") + + def test_get_grid_info_3glc(self): + """Test of get_grid_info with 3 glc grids""" + model_grid_entries = self._MODEL_GRID_F09_G17_3GLC + domain_entries = ( + self._DOMAIN_F09 + + self._DOMAIN_G17 + + self._DOMAIN_GRIS4 + + self._DOMAIN_AIS8 + + self._DOMAIN_LIS12 + ) + gridmap_entries = ( + self._GRIDMAP_F09_G17 + + self._GRIDMAP_GRIS4_G17 + + self._GRIDMAP_AIS8_G17 + + self._GRIDMAP_LIS12_G17 + ) + # Claim that a glc2atm gridmap is required in order to test the logic that handles + # an unset required gridmap for a component with multiple grids. + extra_required_gridmaps = """ + GLC2ATM_EXTRA +""" + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + extra_required_gridmaps=extra_required_gridmaps, + ) + + grids = Grids(self._xml_filepath) + grid_info = grids.get_grid_info( + name="f09_g17_3glc", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17_3glc(grid_info) + self.assertEqual(grid_info["GLC2ATM_EXTRA"], "unset") + + +class TestComponentGrids(unittest.TestCase): + """Tests the _ComponentGrids helper class defined in CIME.XML.grids""" + + # A valid grid long name used in a lot of these tests; there are two rof grids and + # three glc grids, and one grid for each other component + _GRID_LONGNAME = "a%0.9x1.25_l%0.9x1.25_oi%gx1v7_r%r05:r01_g%ais8:gris4:lis12_w%ww3a_z%null_m%gx1v7" + + # ------------------------------------------------------------------------ + # Tests of check_num_elements + # + # These tests cover a lot of the code in _ComponentGrids + # + # We don't cover all of the branches in check_num_elements because many of the + # branches that lead to a successful pass are already covered by unit tests in the + # TestGrids class. + # ------------------------------------------------------------------------ + + def test_check_num_elements_right_ndomains(self): + """With the right number of domains for a component, check_num_elements should pass""" + component_grids = _ComponentGrids(self._GRID_LONGNAME) + gridinfo = {"GLC_DOMAIN_MESH": "foo:bar:baz"} + + # The test passes as long as the following call doesn't generate any errors + component_grids.check_num_elements(gridinfo) + + def test_check_num_elements_wrong_ndomains(self): + """With the wrong number of domains for a component, check_num_elements should fail""" + component_grids = _ComponentGrids(self._GRID_LONGNAME) + # In the following, there should be 3 elements, but we only specify 2 + gridinfo = {"GLC_DOMAIN_MESH": "foo:bar"} + + self.assertRaisesRegex( + CIMEError, + "Unexpected number of colon-delimited elements", + component_grids.check_num_elements, + gridinfo, + ) + + def test_check_num_elements_right_nmaps(self): + """With the right number of maps between two components, check_num_elements should pass""" + component_grids = _ComponentGrids(self._GRID_LONGNAME) + gridinfo = {"GLC2ROF_RMAPNAME": "map1:map2:map3:map4:map5:map6"} + + # The test passes as long as the following call doesn't generate any errors + component_grids.check_num_elements(gridinfo) + + def test_check_num_elements_wrong_nmaps(self): + """With the wrong number of maps between two components, check_num_elements should fail""" + component_grids = _ComponentGrids(self._GRID_LONGNAME) + # In the following, there should be 6 elements, but we only specify 5 + gridinfo = {"GLC2ROF_RMAPNAME": "map1:map2:map3:map4:map5"} + + self.assertRaisesRegex( + CIMEError, + "Unexpected number of colon-delimited elements", + component_grids.check_num_elements, + gridinfo, + ) + + +class TestGridsFunctions(unittest.TestCase): + """Tests helper functions defined in CIME.XML.grids + + These tests are in a separate class to avoid the unnecessary setUp and tearDown + function of the main test class. + + """ + + # ------------------------------------------------------------------------ + # Tests of _add_grid_info + # ------------------------------------------------------------------------ + + def test_add_grid_info_initial(self): + """Test of _add_grid_info for the initial add of a given key""" + grid_info = {"foo": "a"} + _add_grid_info(grid_info, "bar", "b") + self.assertEqual(grid_info, {"foo": "a", "bar": "b"}) + + def test_add_grid_info_existing(self): + """Test of _add_grid_info when the given key already exists""" + grid_info = {"foo": "bar"} + _add_grid_info(grid_info, "foo", "baz") + self.assertEqual(grid_info, {"foo": "bar:baz"}) + + def test_add_grid_info_existing_with_value_for_multiple(self): + """Test of _add_grid_info when the given key already exists and value_for_multiple is provided""" + grid_info = {"foo": 1} + _add_grid_info(grid_info, "foo", 2, value_for_multiple=0) + self.assertEqual(grid_info, {"foo": 0}) + + # ------------------------------------------------------------------------ + # Tests of strip_grid_from_name + # ------------------------------------------------------------------------ + + def test_strip_grid_from_name_basic(self): + """Basic test of _strip_grid_from_name""" + result = _strip_grid_from_name("atm_grid") + self.assertEqual(result, "atm") + + def test_strip_grid_from_name_badname(self): + """_strip_grid_from_name should raise an exception for a name not ending with _grid""" + self.assertRaisesRegex( + CIMEError, "does not end with _grid", _strip_grid_from_name, name="atm" + ) + + # ------------------------------------------------------------------------ + # Tests of _check_grid_info_component_counts + # ------------------------------------------------------------------------ + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_hist_utils.py b/CIME/tests/test_unit_hist_utils.py new file mode 100644 index 00000000000..fe6d4866c34 --- /dev/null +++ b/CIME/tests/test_unit_hist_utils.py @@ -0,0 +1,66 @@ +import io +import unittest +from unittest import mock + +from CIME.hist_utils import copy_histfiles +from CIME.XML.archive import Archive + + +class TestHistUtils(unittest.TestCase): + @mock.patch("CIME.hist_utils.safe_copy") + def test_copy_histfiles_exclude(self, safe_copy): + case = mock.MagicMock() + + case.get_env.return_value.get_latest_hist_files.side_effect = [ + ["/tmp/testing.cpl.hi.nc"], + ["/tmp/testing.atm.hi.nc"], + ] + + case.get_env.return_value.exclude_testing.side_effect = [True, False] + + case.get_value.side_effect = [ + "/tmp", # RUNDIR + None, # RUN_REFCASE + "testing", # CASE + True, # TEST + True, # TEST + ] + + case.get_compset_components.return_value = ["atm"] + + test_files = [ + "testing.cpl.hi.nc", + ] + + with mock.patch("os.listdir", return_value=test_files): + comments, num_copied = copy_histfiles(case, "base") + + assert num_copied == 1 + + @mock.patch("CIME.hist_utils.safe_copy") + def test_copy_histfiles(self, safe_copy): + case = mock.MagicMock() + + case.get_env.return_value.get_latest_hist_files.return_value = [ + "/tmp/testing.cpl.hi.nc", + ] + + case.get_env.return_value.exclude_testing.return_value = False + + case.get_value.side_effect = [ + "/tmp", # RUNDIR + None, # RUN_REFCASE + "testing", # CASE + True, # TEST + ] + + case.get_compset_components.return_value = [] + + test_files = [ + "testing.cpl.hi.nc", + ] + + with mock.patch("os.listdir", return_value=test_files): + comments, num_copied = copy_histfiles(case, "base") + + assert num_copied == 1 diff --git a/CIME/tests/test_unit_locked_files.py b/CIME/tests/test_unit_locked_files.py new file mode 100644 index 00000000000..fc871e3b768 --- /dev/null +++ b/CIME/tests/test_unit_locked_files.py @@ -0,0 +1,344 @@ +import tempfile +import unittest +from unittest import mock +from pathlib import Path + +from CIME import locked_files +from CIME.utils import CIMEError +from CIME.XML.entry_id import EntryID +from CIME.XML.env_batch import EnvBatch +from CIME.XML.files import Files + + +def create_batch_system(env_batch, batch_submit_value=None): + batch_system = env_batch.make_child( + name="batch_system", attributes={"type": "slurm"} + ) + env_batch.make_child(name="batch_query", attributes={"args": ""}, root=batch_system) + batch_submit = env_batch.make_child( + name="batch_submit", root=batch_system, text=batch_submit_value + ) + env_batch.make_child(name="batch_cancel", root=batch_system) + env_batch.make_child(name="batch_redirect", root=batch_system) + env_batch.make_child(name="batch_directive", root=batch_system) + directives = env_batch.make_child(name="directives", root=batch_system) + env_batch.make_child(name="directive", root=directives) + + return batch_system + + +def create_fake_env(tempdir): + locked_files_dir = Path(tempdir, locked_files.LOCKED_DIR) + + locked_files_dir.mkdir(parents=True) + + locked_file_path = locked_files_dir / "env_batch.xml" + + env_batch = EnvBatch(tempdir) + + env_batch.write(force_write=True) + + batch_system = create_batch_system(env_batch, "sbatch") + + env_batch.write(str(locked_file_path), force_write=True) + + env_batch.remove_child(batch_system) + + batch_system = create_batch_system(env_batch) + + env_batch.write(force_write=True) + + return env_batch + + +class TestLockedFiles(unittest.TestCase): + def test_check_diff_reset_and_rebuild(self): + case = mock.MagicMock() + + # reset triggered by env_mach_pes + # rebuild triggered by REBUILD_TRIGGER_ATM and REBUILD_TRIGGER_LND + # COMP_CLASSES, REBUILD_TRIGGER_CPL, REBUILD_TRIGGER_ATM, REBUILD_TRIGGER_LND + case.get_values.side_effect = ( + ("CPL", "ATM", "LND"), + (), + ("NTASKS",), + ("NTASKS",), + ) + + diff = { + "NTASKS": ("32", "16"), + } + + expected_msg = """ERROR: For your changes to take effect, run: +./case.setup --reset +./case.build --clean atm lnd +./case.build""" + + with self.assertRaisesRegex(CIMEError, expected_msg): + locked_files.check_diff(case, "env_mach_pes.xml", "env_mach_pes", diff) + + def test_check_diff_reset_and_rebuild_single(self): + case = mock.MagicMock() + + # reset triggered by env_mach_pes + # rebuild triggered only by REBUILD_TRIGGER_ATM + # COMP_CLASSES, REBUILD_TRIGGER_CPL, REBUILD_TRIGGER_ATM, REBUILD_TRIGGER_LND + case.get_values.side_effect = (("CPL", "ATM", "LND"), (), ("NTASKS",), ()) + + diff = { + "NTASKS": ("32", "16"), + } + + expected_msg = """ERROR: For your changes to take effect, run: +./case.setup --reset +./case.build --clean atm +./case.build""" + + with self.assertRaisesRegex(CIMEError, expected_msg): + locked_files.check_diff(case, "env_mach_pes.xml", "env_mach_pes", diff) + + def test_check_diff_env_mach_pes(self): + case = mock.MagicMock() + + diff = { + "NTASKS": ("32", "16"), + } + + expected_msg = """ERROR: For your changes to take effect, run: +./case.setup --reset""" + + with self.assertRaisesRegex(CIMEError, expected_msg): + locked_files.check_diff(case, "env_mach_pes.xml", "env_mach_pes", diff) + + def test_check_diff_env_build_no_diff(self): + case = mock.MagicMock() + + diff = {} + + locked_files.check_diff(case, "env_build.xml", "env_build", diff) + + case.set_value.assert_not_called() + + def test_check_diff_env_build_pio_version(self): + case = mock.MagicMock() + + diff = { + "some_key": ("value1", "value2"), + "PIO_VERSION": ("1", "2"), + } + + expected_msg = """ERROR: For your changes to take effect, run: +./case.build --clean-all +./case.build""" + + with self.assertRaisesRegex(CIMEError, expected_msg): + locked_files.check_diff(case, "env_build.xml", "env_build", diff) + + case.set_value.assert_any_call("BUILD_COMPLETE", False) + case.set_value.assert_any_call("BUILD_STATUS", 2) + + def test_check_diff_env_build(self): + case = mock.MagicMock() + + diff = { + "some_key": ("value1", "value2"), + } + + expected_msg = """ERROR: For your changes to take effect, run: +./case.build --clean-all +./case.build""" + + with self.assertRaisesRegex(CIMEError, expected_msg): + locked_files.check_diff(case, "env_build.xml", "env_build", diff) + + case.set_value.assert_any_call("BUILD_COMPLETE", False) + case.set_value.assert_any_call("BUILD_STATUS", 1) + + def test_check_diff_env_batch(self): + case = mock.MagicMock() + + diff = { + "some_key": ("value1", "value2"), + } + + expected_msg = """ERROR: For your changes to take effect, run: +./case.setup --reset""" + + with self.assertRaisesRegex(CIMEError, expected_msg): + locked_files.check_diff(case, "env_batch.xml", "env_batch", diff) + + def test_check_diff_env_case(self): + case = mock.MagicMock() + + diff = { + "some_key": ("value1", "value2"), + } + + expected_msg = ( + "ERROR: Cannot change `env_case.xml`, please restore origin 'env_case.xml'" + ) + + with self.assertRaisesRegex(CIMEError, expected_msg): + locked_files.check_diff(case, "env_case.xml", "env_case", diff) + + def test_diff_lockedfile_detect_difference(self): + case = mock.MagicMock() + + with tempfile.TemporaryDirectory() as tempdir: + case.get_value.side_effect = (tempdir,) + + env_batch = create_fake_env(tempdir) + + case.get_env.return_value = env_batch + + _, diff = locked_files.diff_lockedfile(case, tempdir, "env_batch.xml") + + assert diff + assert diff["batch_submit"] == [None, "sbatch"] + + def test_diff_lockedfile_not_supported(self): + case = mock.MagicMock() + + with tempfile.TemporaryDirectory() as tempdir: + case.get_value.side_effect = (tempdir,) + + locked_file_path = Path(tempdir, locked_files.LOCKED_DIR, "env_new.xml") + + locked_file_path.parent.mkdir(parents=True) + + locked_file_path.touch() + + _, diff = locked_files.diff_lockedfile(case, tempdir, "env_new.xml") + + assert not diff + + def test_diff_lockedfile_does_not_exist(self): + case = mock.MagicMock() + + with tempfile.TemporaryDirectory() as tempdir: + case.get_value.side_effect = (tempdir,) + + locked_files.diff_lockedfile(case, tempdir, "env_batch.xml") + + def test_diff_lockedfile(self): + case = mock.MagicMock() + + with tempfile.TemporaryDirectory() as tempdir: + case.get_value.side_effect = (tempdir,) + + create_fake_env(tempdir) + + locked_files.diff_lockedfile(case, tempdir, "env_batch.xml") + + def test_check_lockedfile(self): + case = mock.MagicMock() + + with tempfile.TemporaryDirectory() as tempdir: + case.get_value.side_effect = (tempdir,) + + create_fake_env(tempdir) + + with self.assertRaises(CIMEError): + locked_files.check_lockedfile(case, "env_batch.xml") + + def test_check_lockedfiles_skip(self): + case = mock.MagicMock() + + with tempfile.TemporaryDirectory() as tempdir: + case.get_value.side_effect = (tempdir,) + + create_fake_env(tempdir) + + locked_files.check_lockedfiles(case, skip="env_batch.xml") + + def test_check_lockedfiles(self): + case = mock.MagicMock() + + with tempfile.TemporaryDirectory() as tempdir: + case.get_value.side_effect = (tempdir,) + + create_fake_env(tempdir) + + with self.assertRaises(CIMEError): + locked_files.check_lockedfiles(case) + + def test_check_lockedfiles_quiet(self): + case = mock.MagicMock() + + with tempfile.TemporaryDirectory() as tempdir: + case.get_value.side_effect = (tempdir,) + + create_fake_env(tempdir) + + # Should not raise exception + locked_files.check_lockedfiles(case, quiet=True) + + def test_is_locked(self): + with tempfile.TemporaryDirectory() as tempdir: + src_path = Path(tempdir, locked_files.LOCKED_DIR, "env_case.xml") + + src_path.parent.mkdir(parents=True) + + src_path.touch() + + assert locked_files.is_locked("env_case.xml", tempdir) + + src_path.unlink() + + assert not locked_files.is_locked("env_case.xml", tempdir) + + def test_unlock_file_error_path(self): + with tempfile.TemporaryDirectory() as tempdir: + src_path = Path(tempdir, locked_files.LOCKED_DIR, "env_case.xml") + + src_path.parent.mkdir(parents=True) + + src_path.touch() + + with self.assertRaises(CIMEError): + locked_files.unlock_file("/env_case.xml", tempdir) + + def test_unlock_file(self): + with tempfile.TemporaryDirectory() as tempdir: + src_path = Path(tempdir, locked_files.LOCKED_DIR, "env_case.xml") + + src_path.parent.mkdir(parents=True) + + src_path.touch() + + locked_files.unlock_file("env_case.xml", tempdir) + + assert not src_path.exists() + + def test_lock_file_newname(self): + with tempfile.TemporaryDirectory() as tempdir: + src_path = Path(tempdir, "env_case.xml") + + src_path.touch() + + locked_files.lock_file("env_case.xml", tempdir, newname="env_case-old.xml") + + dst_path = Path(tempdir, locked_files.LOCKED_DIR, "env_case-old.xml") + + assert dst_path.exists() + + def test_lock_file_error_path(self): + with tempfile.TemporaryDirectory() as tempdir: + src_path = Path(tempdir, "env_case.xml") + + src_path.touch() + + with self.assertRaises(CIMEError): + locked_files.lock_file("/env_case.xml", tempdir) + + def test_lock_file(self): + with tempfile.TemporaryDirectory() as tempdir: + src_path = Path(tempdir, "env_case.xml") + + src_path.touch() + + locked_files.lock_file("env_case.xml", tempdir) + + dst_path = Path(tempdir, locked_files.LOCKED_DIR, "env_case.xml") + + assert dst_path.exists() diff --git a/CIME/tests/test_unit_nmlgen.py b/CIME/tests/test_unit_nmlgen.py new file mode 100644 index 00000000000..52e04d28856 --- /dev/null +++ b/CIME/tests/test_unit_nmlgen.py @@ -0,0 +1,59 @@ +from collections import OrderedDict +import tempfile +import unittest +from unittest import mock + +from CIME.nmlgen import NamelistGenerator + +# pylint: disable=protected-access +class TestNamelistGenerator(unittest.TestCase): + def test_init_defaults(self): + test_nml_infile = b"""&test +test1 = 'test1_updated' +/""" + + test_data = """ + + + + + char + test + test_nml + test1_value,test1_updated + + test1_value + + + + char + test + test_nml + + test2_value + + +""" + + with tempfile.NamedTemporaryFile() as temp, tempfile.NamedTemporaryFile() as temp2: + temp.write(test_data.encode()) + temp.flush() + + temp2.write(test_nml_infile) + temp2.flush() + + case = mock.MagicMock() + + nmlgen = NamelistGenerator(case, [temp.name]) + + nmlgen.init_defaults([temp2.name], None) + + expected_groups = OrderedDict( + {"test_nml": {"test1": ["'test1_updated'"], "test2": ['"test2_value"']}} + ) + + assert nmlgen._namelist._groups == expected_groups + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_paramgen.py b/CIME/tests/test_unit_paramgen.py new file mode 100755 index 00000000000..26b2be4ac6e --- /dev/null +++ b/CIME/tests/test_unit_paramgen.py @@ -0,0 +1,468 @@ +#!/usr/bin/env python3 + +""" +This module tests *some* functionality of CIME.ParamGen.paramgen's ParamGen class +""" + +# Ignore privacy concerns for unit tests, so that unit tests can access +# protected members of the system under test +# +# pylint:disable=protected-access + +# Also ignore too-long lines, since these are common in unit tests +# +# pylint:disable=line-too-long + +import unittest +import tempfile +from CIME.ParamGen.paramgen import ParamGen + +############### +# Example inputs +############### + +_MOM_INPUT_YAML = """ +Global: + INPUTDIR: + value: ${DIN_LOC_ROOT}/ocn/mom/${OCN_GRID} + RESTORE_SALINITY: + value: + $OCN_GRID == "tx0.66v1" and $COMP_ATM == "datm": True # for C and G compsets on tx0.66v1 + else: False + INIT_LAYERS_FROM_Z_FILE: + value: + $OCN_GRID == "gx1v6": True + $OCN_GRID == "tx0.66v1": True + $OCN_GRID == "tx0.25v1": True + TEMP_SALT_Z_INIT_FILE: + value: + $OCN_GRID == "gx1v6": "WOA05_pottemp_salt.nc" + $OCN_GRID == "tx0.66v1": "woa18_04_initial_conditions.nc" + $OCN_GRID == "tx0.25v1": "MOM6_IC_TS.nc" +""" + +_MOM_INPUT_DATA_LIST_YAML = """ +mom.input_data_list: + ocean_hgrid: + $OCN_GRID == "gx1v6": "${INPUTDIR}/ocean_hgrid.nc" + $OCN_GRID == "tx0.66v1": "${INPUTDIR}/ocean_hgrid_180829.nc" + $OCN_GRID == "tx0.25v1": "${INPUTDIR}/ocean_hgrid.nc" + tempsalt: + $OCN_GRID in ["gx1v6", "tx0.66v1", "tx0.25v1"]: + $INIT_LAYERS_FROM_Z_FILE == "True": + "${INPUTDIR}/${TEMP_SALT_Z_INIT_FILE}" +""" + +_MY_TEMPLATE_XML = """ + + + + + string + test_nml + a dummy parameter for testing single key=value guards + + alpha + beta + gamma + + + + + string + test_nml + another dummy parameter for multiple key=value guards mixed with explicit (flexible) guards + + delta + epsilon + + + + + string + test_nml + parameter to test the case where there is no match + + zeta + eta + + + + +""" + +_DUPLICATE_IDS_XML = """ + + + + + string + test_nml + a dummy parameter for testing single key=value guards + + alpha + beta + gamma + + + + + string + test_nml + another dummy parameter for multiple key=value guards mixed with explicit (flexible) guards + + delta + epsilon + + + + +""" + +############################ +# Dummy functions and classes +############################ + + +class DummyCase: + """A dummy Case class that mimics CIME class objects' get_value method.""" + + def get_value(self, varname): + d = { + "DIN_LOC_ROOT": "/foo/inputdata", + "OCN_GRID": "tx0.66v1", + "COMP_ATM": "datm", + } + return d[varname] if varname in d else None + + +case = DummyCase() + +##### + + +def _expand_func_demo(varname): + return { + "ICE_GRID": "gx1v6", + "DIN_LOC_ROOT": "/glade/p/cesmdata/cseg/inputdata", + "cice_mode": "thermo_only", + "some_bool": "True", + "some_int": 2, + "some_float": "3.1415", + }[varname] + + +################ +# Unitest classes +################ + + +class TestParamGen(unittest.TestCase): + """ + Tests some basic functionality of the + CIME.ParamGen.paramgen's ParamGen class + """ + + def test_init_data(self): + """Tests the ParamGen initializer with and without an initial data.""" + # empty + _ = ParamGen({}) + # with data + data_dict = {"a": 1, "b": 2} + _ = ParamGen(data_dict) + + def test_reduce(self): + """Tests the reduce method of ParamGen on data with explicit guards (True or False).""" + data_dict = {"False": 1, "True": 2} + obj = ParamGen(data_dict) + obj.reduce() + self.assertEqual(obj.data, 2) + + def test_nested_reduce(self): + """Tests the reduce method of ParamGen on data with nested guards.""" + data_dict = {"False": 1, "True": {"2>3": 0, "2<3": 2}} + obj = ParamGen(data_dict) + obj.reduce() + self.assertEqual(obj.data, 2) + + def test_outer_guards(self): + """Tests the reduce method on data with outer guards enclosing parameter definitions.""" + data_dict = { + "False": {"param": "foo"}, + "True": {"param": "bar"}, + } + obj = ParamGen(data_dict) + obj.reduce() + self.assertEqual(obj.data, {"param": "bar"}) + + def test_match(self): + """Tests the default behavior of returning the last match and the optional behavior of returning the + first match.""" + + data_dict = { + "1<2": "foo", + "2<3": "bar", + "3<4": "baz", + } + + obj = ParamGen(data_dict) # by default, match='last' + obj.reduce() + self.assertEqual(obj.data, "baz") + + obj = ParamGen(data_dict, match="first") + obj.reduce() + self.assertEqual(obj.data, "foo") + + def test_undefined_var(self): + """Tests the reduce method of ParamGen on nested guards where an undefined expandable var is specified + below a guard that evaluates to False. The undefined var should not lead to an error since the enclosing + guard evaluates to false.""" + + # define an expansion function, i.e., a mapping for expandable var names to their values + test_map = {"alpha": 1, "beta": False} + expand_func = lambda var: test_map[var] + + # define a data dict + data_dict = {"param": {"$alpha >= 1": "foo", "${beta}": {"${zeta}": "bar"}}} + + # Instantiate a ParamGen object and reduce its data to obtain the final parameter set + obj = ParamGen(data_dict) + obj.reduce(expand_func) + self.assertEqual(obj.data, {"param": "foo"}) + + def test_expandable_vars(self): + """Tests the reduce method of ParamGen expandable vars in guards.""" + + # define an expansion function, i.e., a mapping for expandable var names to their values + test_map = {"alpha": 1, "beta": False, "gamma": "xyz"} + expand_func = lambda var: test_map[var] + + # define a data dict + data_dict = { + "param": {"$alpha > 1": "foo", "${beta}": "bar", '"x" in $gamma': "baz"} + } + + # Instantiate a ParamGen object and reduce its data to obtain the final parameter set + obj = ParamGen(data_dict) + obj.reduce(expand_func) + self.assertEqual(obj.data, {"param": "baz"}) + + def test_formula_expansion(self): + """Tests the formula expansion feature of ParamGen.""" + + # define an expansion function, i.e., a mapping for expandable var names to their values + test_map = {"alpha": 3} + expand_func = lambda var: test_map[var] + + # define a data dict + data_dict = {"x": "= $alpha **2", "y": "= [i for i in range(3)]"} + + # Instantiate a ParamGen object and reduce its data to obtain the final parameter set + obj = ParamGen(data_dict) + obj.reduce(expand_func) + self.assertEqual(obj.data["x"], 9) + self.assertEqual(obj.data["y"], [0, 1, 2]) + + +##### + + +class TestParamGenYamlConstructor(unittest.TestCase): + """A unit test class for testing ParamGen's yaml constructor.""" + + def test_mom_input(self): + """Test MOM_input file generation via a subset of original MOM_input.yaml""" + + # Create temporary YAML file: + with tempfile.NamedTemporaryFile() as temp: + temp.write(_MOM_INPUT_YAML.encode()) + temp.flush() + + # Open YAML file using ParamGen: + mom_input = ParamGen.from_yaml(temp.name) + + # Define a local ParamGen reducing function: + def input_data_list_expand_func(varname): + val = case.get_value(varname) + if val == None: + val = str(mom_input.data["Global"][varname]["value"]).strip() + if val == None: + raise RuntimeError("Cannot determine the value of variable: " + varname) + return val + + # Reduce ParamGen entries: + mom_input.reduce(input_data_list_expand_func) + + # Check output: + self.assertEqual( + mom_input.data, + { + "Global": { + "INPUTDIR": {"value": "/foo/inputdata/ocn/mom/tx0.66v1"}, + "RESTORE_SALINITY": {"value": True}, + "INIT_LAYERS_FROM_Z_FILE": {"value": True}, + "TEMP_SALT_Z_INIT_FILE": { + "value": "woa18_04_initial_conditions.nc" + }, + } + }, + ) + + def test_input_data_list(self): + """Test mom.input_data_list file generation via a subset of original input_data_list.yaml""" + + # Create temporary YAML file: + with tempfile.NamedTemporaryFile() as temp: + temp.write(_MOM_INPUT_YAML.encode()) + temp.flush() + + # Open YAML file using ParamGen: + mom_input = ParamGen.from_yaml(temp.name) + + # Define a local ParamGen reducing function: + def input_data_list_expand_func(varname): + val = case.get_value(varname) + if val == None: + val = str(mom_input.data["Global"][varname]["value"]).strip() + if val == None: + raise RuntimeError("Cannot determine the value of variable: " + varname) + return val + + # Reduce ParamGen entries: + mom_input.reduce(input_data_list_expand_func) + + # Create a second temporary YAML file: + with tempfile.NamedTemporaryFile() as temp2: + temp2.write(_MOM_INPUT_DATA_LIST_YAML.encode()) + temp2.flush() + + # Open second YAML file using ParamGen: + input_data_list = ParamGen.from_yaml(temp2.name) + + # Reduce ParamGen entries: + input_data_list.reduce(input_data_list_expand_func) + + # Check output: + self.assertEqual( + input_data_list.data, + { + "mom.input_data_list": { + "ocean_hgrid": "/foo/inputdata/ocn/mom/tx0.66v1/ocean_hgrid_180829.nc", + "tempsalt": "/foo/inputdata/ocn/mom/tx0.66v1/woa18_04_initial_conditions.nc", + } + }, + ) + + +##### + + +class TestParamGenXmlConstructor(unittest.TestCase): + """A unit test class for testing ParamGen's xml constructor.""" + + def test_single_key_val_guard(self): + """Test xml entry values with single key=value guards""" + + # Create temporary YAML file: + with tempfile.NamedTemporaryFile() as temp: + temp.write(_MY_TEMPLATE_XML.encode()) + temp.flush() + + # Open XML file using ParamGen: + pg = ParamGen.from_xml_nml(temp.name) + + # Reduce ParamGen entries: + pg.reduce(_expand_func_demo) + + # Check output: + self.assertEqual(pg.data["test_nml"]["foo"]["values"], "beta") + + def test_mixed_guard(self): + """Tests multiple key=value guards mixed with explicit (flexible) guards.""" + + # Create temporary YAML file: + with tempfile.NamedTemporaryFile() as temp: + temp.write(_MY_TEMPLATE_XML.encode()) + temp.flush() + + # Open XML file using ParamGen: + pg = ParamGen.from_xml_nml(temp.name) + + # Reduce ParamGen entries: + pg.reduce(_expand_func_demo) + + # Check output: + self.assertEqual(pg.data["test_nml"]["bar"]["values"], "epsilon") + + def test_mixed_guard_first(self): + """Tests multiple key=value guards mixed with explicit (flexible) guards + with match=first option.""" + + # Create temporary YAML file: + with tempfile.NamedTemporaryFile() as temp: + temp.write(_MY_TEMPLATE_XML.encode()) + temp.flush() + + # Open XML file using ParamGen: + pg = ParamGen.from_xml_nml(temp.name, match="first") + + # Reduce ParamGen entries: + pg.reduce(_expand_func_demo) + + # Check output: + self.assertEqual(pg.data["test_nml"]["bar"]["values"], "delta") + + def test_no_match(self): + """Tests an xml entry with no match, i.e., no guards evaluating to True.""" + + # Create temporary YAML file: + with tempfile.NamedTemporaryFile() as temp: + temp.write(_MY_TEMPLATE_XML.encode()) + temp.flush() + + # Open XML file using ParamGen: + pg = ParamGen.from_xml_nml(temp.name) + + # Reduce ParamGen entries: + pg.reduce(_expand_func_demo) + + # Check output: + self.assertEqual(pg.data["test_nml"]["baz"]["values"], None) + + def test_default_var(self): + """Test to check if default val is assigned when all guards eval to False""" + + # Create temporary YAML file: + with tempfile.NamedTemporaryFile() as temp: + temp.write(_MY_TEMPLATE_XML.encode()) + temp.flush() + + # Open XML file using ParamGen: + pg = ParamGen.from_xml_nml(temp.name) + + # Reduce ParamGen entries: + pg.reduce(lambda varname: "_") + + # Check output: + self.assertEqual(pg.data["test_nml"]["foo"]["values"], "alpha") + + def test_duplicate_entry_error(self): + """ + Test to make sure duplicate ids raise the correct error + when the "no_duplicates" flag is True. + """ + with self.assertRaises(ValueError) as verr: + + # Create temporary YAML file: + with tempfile.NamedTemporaryFile() as temp: + temp.write(_DUPLICATE_IDS_XML.encode()) + temp.flush() + + _ = ParamGen.from_xml_nml(temp.name, no_duplicates=True) + + emsg = "Entry id 'foo' listed twice in file:\n'./xml_test_files/duplicate_ids.xml'" + self.assertEqual(emsg, str(verr.exception)) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_system_tests.py b/CIME/tests/test_unit_system_tests.py new file mode 100644 index 00000000000..8e0ad633820 --- /dev/null +++ b/CIME/tests/test_unit_system_tests.py @@ -0,0 +1,653 @@ +#!/usr/bin/env python3 + +import os +import tempfile +import gzip +import re +from re import A +import unittest +from unittest import mock +from pathlib import Path + +from CIME.config import Config +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.SystemTests.system_tests_compare_n import SystemTestsCompareN + +CPLLOG = """ + tStamp_write: model date = 00010102 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010102 0 memory = 1673.89 MB (highwater) 387.77 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010103 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010103 0 memory = 1673.89 MB (highwater) 390.09 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010104 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010104 0 memory = 1673.89 MB (highwater) 391.64 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010105 0 wall clock = 2023-09-19 19:39:43 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010105 0 memory = 1673.89 MB (highwater) 392.67 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010106 0 wall clock = 2023-09-19 19:39:43 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010106 0 memory = 1673.89 MB (highwater) 393.44 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + +(seq_mct_drv): =============== SUCCESSFUL TERMINATION OF CPL7-e3sm =============== +(seq_mct_drv): =============== at YMD,TOD = 00010106 0 =============== +(seq_mct_drv): =============== # simulated days (this run) = 5.000 =============== +(seq_mct_drv): =============== compute time (hrs) = 0.000 =============== +(seq_mct_drv): =============== # simulated years / cmp-day = 719.635 =============== +(seq_mct_drv): =============== pes min memory highwater (MB) 851.957 =============== +(seq_mct_drv): =============== pes max memory highwater (MB) 1673.891 =============== +(seq_mct_drv): =============== pes min memory last usage (MB) 182.742 =============== +(seq_mct_drv): =============== pes max memory last usage (MB) 393.441 =============== +""" + + +def create_mock_case(tempdir, idx=None, cpllog_data=None): + if idx is None: + idx = 0 + + case = mock.MagicMock() + + caseroot = Path(tempdir, str(idx), "caseroot") + baseline_root = caseroot.parent / "baselines" + run_dir = caseroot / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + if cpllog_data is not None: + cpllog = run_dir / "cpl.log.gz" + + with gzip.open(cpllog, "w") as fd: + fd.write(cpllog_data.encode("utf-8")) + + case.get_latest_cpl_log.return_value = str(cpllog) + + hist_file = run_dir / "cpl.hi.2023-01-01.nc" + hist_file.touch() + + case.get_env.return_value.get_latest_hist_files.return_value = [str(hist_file)] + + case.get_compset_components.return_value = [] + + return case, caseroot, baseline_root, run_dir + + +class TestUnitSystemTests(unittest.TestCase): + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common.perf_get_memory_list") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_runtime_error( + self, + get_latest_cpl_logs, + perf_get_memory_list, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + perf_get_memory_list.side_effect = RuntimeError + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + "rpointer.cpl", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="insufficient data for memleak test" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common.perf_get_memory_list") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_not_enough_samples( + self, + get_latest_cpl_logs, + perf_get_memory_list, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + perf_get_memory_list.return_value = [ + (1, 1000.0), + (2, 0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + None, + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="data for memleak test is insufficient" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common.perf_get_memory_list") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_found( + self, + get_latest_cpl_logs, + perf_get_memory_list, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + perf_get_memory_list.return_value = [ + (1, 1000.0), + (2, 2000.0), + (3, 3000.0), + (4, 3000.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + None, + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + expected_comment = "memleak detected, memory went from 2000.000000 to 3000.000000 in 2 days" + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "FAIL", comments=expected_comment + ) + + append_testlog.assert_any_call(expected_comment, str(caseroot)) + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common.perf_get_memory_list") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak( + self, + get_latest_cpl_logs, + perf_get_memory_list, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + perf_get_memory_list.return_value = [ + (1, 3040.0), + (2, 3002.0), + (3, 3030.0), + (4, 3008.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + None, + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput(self, append_testlog, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.return_value = ( + True, + "TPUTCOMP: Computation time changed by 2.00% relative to baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + None, + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "TPUTCOMP: Computation time changed by 2.00% relative to baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput_error_diff( + self, append_testlog, perf_compare_throughput_baseline + ): + perf_compare_throughput_baseline.return_value = (None, "Error diff value") + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + "rpointer.cpl.0001-01-01", + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput_fail( + self, append_testlog, perf_compare_throughput_baseline + ): + perf_compare_throughput_baseline.return_value = ( + False, + "Error: TPUTCOMP: Computation time increase > 5% from baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + None, + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "Error: TPUTCOMP: Computation time increase > 5% from baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory(self, append_testlog, perf_compare_memory_baseline): + perf_compare_memory_baseline.return_value = ( + True, + "MEMCOMP: Memory usage highwater has changed by 2.00% relative to baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + "rpointer.cpl", + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "MEMCOMP: Memory usage highwater has changed by 2.00% relative to baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory_error_diff( + self, append_testlog, perf_compare_memory_baseline + ): + perf_compare_memory_baseline.return_value = (None, "Error diff value") + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + None, + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory_error_fail( + self, append_testlog, perf_compare_memory_baseline + ): + perf_compare_memory_baseline.return_value = ( + False, + "Error: Memory usage increase >5% from baseline's 1000.000000 to 1002.000000", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + "rpointer.cpl", + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "Error: Memory usage increase >5% from baseline's 1000.000000 to 1002.000000", + str(caseroot), + ) + + def test_generate_baseline(self): + with tempfile.TemporaryDirectory() as tempdir: + case, caseroot, baseline_root, run_dir = create_mock_case( + tempdir, cpllog_data=CPLLOG + ) + + get_value_calls = [ + str(caseroot), + "ERIO.ne30_g16.A.docker_gnu", + "mct", + None, + str(run_dir), + "case.std", + str(baseline_root), + "master/ERIO.ne30_g16.A.docker_gnu", + "ERIO.ne30_g16.A.docker_gnu.G.20230919_193255_z9hg2w", + "ERIO", + "mct", + str(run_dir), + "ERIO", + "ERIO.ne30_g16.A.docker_gnu", + "master/ERIO.ne30_g16.A.docker_gnu", + str(baseline_root), + "master/ERIO.ne30_g16.A.docker_gnu", + str(run_dir), + "mct", + "/tmp/components/cpl", + str(run_dir), + "mct", + str(run_dir), + "mct", + ] + + if Config.instance().create_bless_log: + get_value_calls.insert(12, os.getcwd()) + + case.get_value.side_effect = get_value_calls + + common = SystemTestsCommon(case) + + common._generate_baseline() + + baseline_dir = baseline_root / "master" / "ERIO.ne30_g16.A.docker_gnu" + assert (baseline_dir / "cpl.log.gz").exists() + assert (baseline_dir / "cpl-tput.log").exists() + assert (baseline_dir / "cpl-mem.log").exists() + assert (baseline_dir / "cpl.hi.2023-01-01.nc").exists() + + with open(baseline_dir / "cpl-tput.log") as fd: + lines = fd.readlines() + + assert len(lines) == 1 + assert re.match("sha:.* date:.* (\d+\.\d+)", lines[0]) + + with open(baseline_dir / "cpl-mem.log") as fd: + lines = fd.readlines() + + assert len(lines) == 1 + assert re.match("sha:.* date:.* (\d+\.\d+)", lines[0]) + + def test_kwargs(self): + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + _ = SystemTestsCommon(case, something="random") + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + orig1 = SystemTestsCompareTwo._get_caseroot + orig2 = SystemTestsCompareTwo._get_caseroot2 + + SystemTestsCompareTwo._get_caseroot = mock.MagicMock() + SystemTestsCompareTwo._get_caseroot2 = mock.MagicMock() + + _ = SystemTestsCompareTwo(case, something="random") + + SystemTestsCompareTwo._get_caseroot = orig1 + SystemTestsCompareTwo._get_caseroot2 = orig2 + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + orig = SystemTestsCompareN._get_caseroots + + SystemTestsCompareN._get_caseroots = mock.MagicMock() + + _ = SystemTestsCompareN(case, something="random") + + SystemTestsCompareN._get_caseroots = orig + + def test_dry_run(self): + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + None, + "/caseroot", + "SMS.f19_g16.S", + ) + + orig = SystemTestsCompareTwo._setup_cases_if_not_yet_done + + SystemTestsCompareTwo._setup_cases_if_not_yet_done = mock.MagicMock() + + system_test = SystemTestsCompareTwo(case, dry_run=True) + + system_test._setup_cases_if_not_yet_done.assert_not_called() + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "rpointer.cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + system_test = SystemTestsCompareTwo(case) + + system_test._setup_cases_if_not_yet_done.assert_called() + + SystemTestsCompareTwo._setup_cases_if_not_yet_done = orig + + orig = SystemTestsCompareN._setup_cases_if_not_yet_done + + SystemTestsCompareN._setup_cases_if_not_yet_done = mock.MagicMock() + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + system_test = SystemTestsCompareN(case, dry_run=True) + + system_test._setup_cases_if_not_yet_done.assert_not_called() + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + system_test = SystemTestsCompareN(case) + + system_test._setup_cases_if_not_yet_done.assert_called() + + SystemTestsCompareN._setup_cases_if_not_yet_done = orig diff --git a/CIME/tests/test_unit_system_tests_mvk.py b/CIME/tests/test_unit_system_tests_mvk.py new file mode 100644 index 00000000000..0bb33f8d605 --- /dev/null +++ b/CIME/tests/test_unit_system_tests_mvk.py @@ -0,0 +1,730 @@ +#!/usr/bin/env python3 + +import os +import json +import unittest +import tempfile +import contextlib +import sysconfig +from pathlib import Path +from unittest import mock +from CIME.tests.utils import chdir + +evv4esm = False +try: + from CIME.SystemTests.mvk import MVK +except: + unittest.SkipTest("Skipping mvk tests. E3SM feature") +else: + from CIME.SystemTests.mvk import MVKConfig + + evv4esm = True + + +def create_complex_case( + case_name, + temp_dir, + run_dir, + baseline_dir, + compare_baseline=False, + mock_evv_output=False, +): + case = mock.MagicMock() + + side_effect = [ + str(temp_dir), # CASEROOT + "MVK.f19_g16.S.docker_gnu", # CASEBASEID + "mct", # COMP_INTERFACE + "mct", # COMP_INTERFACE + False, # DRV_RESTART_POINTER + ] + + # single extra call for _compare_baseline + if compare_baseline: + side_effect.append("e3sm") # MODEL + + side_effect.extend( + [ + 0, # RESUBMIT + False, # GENERATE_BASELINE + 0, # RESUBMIT + str(run_dir), # RUNDIR + case_name, # CASE + str(baseline_dir), # BASELINE_ROOT + "", # BASECMP_CASE + "docker", # MACH + ] + ) + + case.get_value.side_effect = side_effect + + run_dir.mkdir(parents=True, exist_ok=True) + + evv_output = run_dir / f"{case_name}.evv" / "index.json" + + evv_output.parent.mkdir(parents=True, exist_ok=True) + + write_evv_output(evv_output, mock_evv_output=mock_evv_output) + + return case + + +def write_evv_output(evv_output_path, mock_evv_output): + if mock_evv_output: + evv_output_data = { + "Page": { + "elements": [ + { + "Table": { + "data": { + "Test status": ["pass"], + "Variables analyzed": ["v1", "v2"], + "Rejecting": [2], + "Critical value": [12], + } + } + } + ] + } + } + else: + evv_output_data = {"Page": {"elements": []}} + + with open(evv_output_path, "w") as fd: + fd.write(json.dumps(evv_output_data)) + + +def create_simple_case(model="e3sm", resubmit=0, generate_baseline=False): + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/tmp/case", # CASEROOT + "MVK.f19_g16.S.docker_gnu", # CASEBASEID + "mct", # COMP_INTERFACE + False, # DRV_RESTART_POINTER + "MVK.f19_g16.S.docker_gnu", # CASEBASEID + model, + resubmit, + generate_baseline, + ) + + return case + + +class TestSystemTestsMVK(unittest.TestCase): + def tearDown(self): + # reset singleton + try: + delattr(MVKConfig, "_instance") + except: + pass + + @mock.patch("CIME.SystemTests.mvk.test_mods.find_test_mods") + @mock.patch("CIME.SystemTests.mvk.evv") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_testmod_complex(self, evv, find_test_mods): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + print(temp_dir) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + testmods_dir = temp_dir / "testmods" / "eam" + + testmods_dir.mkdir(parents=True) + + find_test_mods.return_value = [str(testmods_dir)] + + with open(testmods_dir / "params.py", "w") as fd: + fd.write( + """ +import os +from CIME.namelist import Namelist +from CIME.SystemTests.mvk import EVV_LIB_DIR + +component = "new-comp" +components = ["new-comp", "secondary-comp"] +ninst = 8 + +def generate_namelist(case, component, i, filename): + nml = Namelist() + + if component == "new-comp": + nml.set_variable_value("", "var1", "value1") + elif component == "secondary-comp": + nml.set_variable_value("", "var2", "value2") + + nml.write(filename) + +def evv_test_config(case, config): + config["module"] = os.path.join(EVV_LIB_DIR, "extensions", "kso.py") + config["component"] = "someother-comp" + + return config + """ + ) + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + case = create_complex_case(case_name, temp_dir, run_dir, baseline_dir) + test = MVK(case) + + stack.enter_context(mock.patch.object(test, "build_indv")) + + test.build_phase(False, True) + test._compare_baseline() + + with open(run_dir / f"{case_name}.json", "r") as fd: + config = json.load(fd) + + expected_config = { + "20240515_212034_41b5u2": { + "component": "someother-comp", + "ninst": 8, + "ref-case": "Baseline", + "ref-dir": f"{temp_dir}/baselines/", + "test-case": "Test", + "test-dir": f"{temp_dir}/run", + "var-set": "default", + } + } + + module = config["20240515_212034_41b5u2"].pop("module") + + assert ( + f'{sysconfig.get_paths()["purelib"]}/evv4esm/extensions/kso.py' + == module + ) + + assert config == expected_config + + nml_files = [x for x in os.listdir(temp_dir) if x.startswith("user_nl")] + + assert len(nml_files) == 16 + + with open(sorted(nml_files)[0], "r") as fd: + lines = fd.readlines() + + assert lines == ["var1 = value1\n"] + + with open(sorted(nml_files)[-1], "r") as fd: + lines = fd.readlines() + + assert lines == ["var2 = value2\n"] + + @mock.patch("CIME.SystemTests.mvk.append_testlog") + @mock.patch("CIME.SystemTests.mvk.Machines") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_update_testlog(self, machines, append_testlog): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + + run_dir.mkdir(parents=True) + + evv_output_path = run_dir / "index.json" + + write_evv_output(evv_output_path, True) + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + machines.return_value.get_value.return_value = "docker" + + case = create_complex_case(case_name, temp_dir, run_dir, baseline_dir) + + test = MVK(case) + + test.update_testlog("test1", case_name, str(run_dir)) + + append_testlog.assert_any_call( + """BASELINE PASS for test 'test1'. + Test status: pass; Variables analyzed: v1; Rejecting: 2; Critical value: 12 + EVV results can be viewed at: + docker/evv/MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2/index.html""", + str(temp_dir), + ) + + @mock.patch("CIME.SystemTests.mvk.utils.get_urlroot") + @mock.patch("CIME.SystemTests.mvk.append_testlog") + @mock.patch("CIME.SystemTests.mvk.Machines") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_update_testlog_urlroot_None(self, machines, append_testlog, get_urlroot): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + + run_dir.mkdir(parents=True) + + evv_output_path = run_dir / "index.json" + + write_evv_output(evv_output_path, True) + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + machines.return_value.get_value.return_value = "docker" + + get_urlroot.return_value = None + + case = create_complex_case(case_name, temp_dir, run_dir, baseline_dir) + + test = MVK(case) + + test.update_testlog("test1", case_name, str(run_dir)) + + print(append_testlog.call_args_list) + append_testlog.assert_any_call( + f"""BASELINE PASS for test 'test1'. + Test status: pass; Variables analyzed: v1; Rejecting: 2; Critical value: 12 + EVV results can be viewed at: + [{run_dir!s}_URL]/evv/MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2/index.html""", + str(temp_dir), + ) + + @mock.patch("CIME.SystemTests.mvk.utils.get_htmlroot") + @mock.patch("CIME.SystemTests.mvk.append_testlog") + @mock.patch("CIME.SystemTests.mvk.Machines") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_update_testlog_htmlroot(self, machines, append_testlog, get_htmlroot): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + + run_dir.mkdir(parents=True) + + evv_output_path = run_dir / "index.json" + + write_evv_output(evv_output_path, True) + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + machines.return_value.get_value.return_value = "docker" + + get_htmlroot.return_value = None + + case = create_complex_case(case_name, temp_dir, run_dir, baseline_dir) + + test = MVK(case) + + test.update_testlog("test1", case_name, str(run_dir)) + + append_testlog.assert_any_call( + f"""BASELINE PASS for test 'test1'. + Test status: pass; Variables analyzed: v1; Rejecting: 2; Critical value: 12 + EVV results can be viewed at: + {run_dir!s} + EVV viewing instructions can be found at: https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/climate_reproducibility/README.md#test-passfail-and-extended-output""", + str(temp_dir), + ) + + @mock.patch("CIME.SystemTests.mvk.test_mods.find_test_mods") + @mock.patch("CIME.SystemTests.mvk.evv") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_testmod_simple(self, evv, find_test_mods): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + testmods_dir = temp_dir / "testmods" / "eam" + + testmods_dir.mkdir(parents=True) + + find_test_mods.return_value = [str(testmods_dir)] + + with open(testmods_dir / "params.py", "w") as fd: + fd.write( + """ +component = "new-comp" +components = ["new-comp", "second-comp"] +ninst = 8 +var_set = "special" +ref_case = "Reference" +test_case = "Default" + """ + ) + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + case = create_complex_case(case_name, temp_dir, run_dir, baseline_dir) + + test = MVK(case) + + stack.enter_context(mock.patch.object(test, "build_indv")) + + test.build_phase(False, True) + test._compare_baseline() + + with open(run_dir / f"{case_name}.json", "r") as fd: + config = json.load(fd) + + expected_config = { + "20240515_212034_41b5u2": { + "test-case": "Default", + "test-dir": f"{run_dir}", + "ref-case": "Reference", + "ref-dir": f"{baseline_dir}/", + "var-set": "special", + "ninst": 8, + "component": "new-comp", + } + } + + module = config["20240515_212034_41b5u2"].pop("module") + + assert ( + f'{sysconfig.get_paths()["purelib"]}/evv4esm/extensions/ks.py' == module + ) + + assert config == expected_config + + nml_files = [x for x in os.listdir(temp_dir) if x.startswith("user_nl")] + + assert len(nml_files) == 16 + + with open(sorted(nml_files)[0], "r") as fd: + lines = fd.readlines() + + assert lines == [ + "new_random = .true.\n", + "pertlim = 1.0e-10\n", + "seed_clock = .true.\n", + "seed_custom = 1\n", + ] + + with open(sorted(nml_files)[-1], "r") as fd: + lines = fd.readlines() + + assert lines == [ + "new_random = .true.\n", + "pertlim = 1.0e-10\n", + "seed_clock = .true.\n", + "seed_custom = 8\n", + ] + + @mock.patch("CIME.SystemTests.mvk.case_setup") + @mock.patch("CIME.SystemTests.mvk.MVK.build_indv") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_build_phase(self, build_indv, case_setup): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + case = create_complex_case( + case_name, temp_dir, run_dir, baseline_dir, True, mock_evv_output=True + ) + + case.get_values.side_effect = (("CPL", "LND"),) + + side_effect = [x for x in case.get_value.side_effect] + + n = 7 + side_effect.insert(n, 8) + side_effect.insert(n, 16) + side_effect.insert(n, None) + + case.get_value.side_effect = side_effect + + test = MVK(case) + + test.build_phase(sharedlib_only=True) + + case.set_value.assert_any_call("NTHRDS_CPL", 1) + case.set_value.assert_any_call("NTASKS_CPL", 480) + case.set_value.assert_any_call("NTHRDS_LND", 1) + case.set_value.assert_any_call("NTASKS_LND", 240) + case.set_value.assert_any_call("NINST_LND", 30) + + case.flush.assert_called() + + case_setup.assert_any_call(case, test_mode=False, reset=True) + + @mock.patch("CIME.SystemTests.mvk.SystemTestsCommon._generate_baseline") + @mock.patch("CIME.SystemTests.mvk.append_testlog") + @mock.patch("CIME.SystemTests.mvk.evv") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test__generate_baseline(self, evv, append_testlog, _generate_baseline): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + case = create_complex_case( + case_name, temp_dir, run_dir, baseline_dir, True, mock_evv_output=True + ) + + # use original 5 args + side_effect = [x for x in case.get_value.side_effect][:7] + + side_effect.extend( + [ + None, + str(baseline_dir), + "MVK.f19_g16.S", + str(run_dir), + "MVK.f19_g16.S", + case_name, + ] + ) + + case.get_value.side_effect = side_effect + + case_baseline_dir = baseline_dir / "MVK.f19_g16.S" / "eam" + + case_baseline_dir.mkdir(parents=True, exist_ok=True) + + (run_dir / "eam").mkdir(parents=True, exist_ok=True) + + (run_dir / "eam" / "test1.nc").touch() + (run_dir / "eam" / "test2.nc").touch() + + case.get_env.return_value.get_all_hist_files.return_value = ( + "eam/test1.nc", + "eam/test2.nc", + ) + + test = MVK(case) + + test._generate_baseline() + + files = os.listdir(case_baseline_dir) + + assert sorted(files) == sorted(["test1.nc", "test2.nc"]) + + # reset side_effect + case.get_value.side_effect = side_effect + + test = MVK(case) + + # test baseline_dir already exists + test._generate_baseline() + + files = os.listdir(case_baseline_dir) + + assert sorted(files) == sorted(["test1.nc", "test2.nc"]) + + @mock.patch("CIME.SystemTests.mvk.append_testlog") + @mock.patch("CIME.SystemTests.mvk.evv") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test__compare_baseline_resubmit(self, evv, append_testlog): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + case = create_complex_case( + case_name, temp_dir, run_dir, baseline_dir, True, mock_evv_output=True + ) + + side_effect = [x for x in case.get_value.side_effect][:-8] + + side_effect.extend([1, 1]) + + case.get_value.side_effect = side_effect + + test = MVK(case) + + with mock.patch.object(test, "_test_status") as _test_status: + test._compare_baseline() + + _test_status.set_status.assert_any_call("BASELINE", "PASS") + + @mock.patch("CIME.SystemTests.mvk.append_testlog") + @mock.patch("CIME.SystemTests.mvk.evv") + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test__compare_baseline(self, evv, append_testlog): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + # convert to Path + temp_dir = Path(temp_dir) + run_dir = temp_dir / "run" + baseline_dir = temp_dir / "baselines" + + case_name = "MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2" # CASE + + case = create_complex_case( + case_name, temp_dir, run_dir, baseline_dir, True, mock_evv_output=True + ) + + test = MVK(case) + + test._compare_baseline() + + with open(run_dir / f"{case_name}.json", "r") as fd: + config = json.load(fd) + + expected_config = { + "20240515_212034_41b5u2": { + "test-case": "Test", + "test-dir": f"{run_dir}", + "ref-case": "Baseline", + "ref-dir": f"{baseline_dir}/", + "var-set": "default", + "ninst": 30, + "component": "eam", + } + } + + module = config["20240515_212034_41b5u2"].pop("module") + + assert ( + f'{sysconfig.get_paths()["purelib"]}/evv4esm/extensions/ks.py' == module + ) + + assert config == expected_config + + expected_comments = f"""BASELINE PASS for test '20240515_212034_41b5u2'. + Test status: pass; Variables analyzed: v1; Rejecting: 2; Critical value: 12 + EVV results can be viewed at: + {run_dir}/MVK.f19_g16.S.docker_gnu.20240515_212034_41b5u2.evv + EVV viewing instructions can be found at: https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/climate_reproducibility/README.md#test-passfail-and-extended-output""" + + append_testlog.assert_any_call( + expected_comments, str(temp_dir) + ), append_testlog.call_args.args + + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_generate_namelist_multiple_components(self): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + case = create_simple_case() + + test = MVK(case) + + stack.enter_context(mock.patch.object(test, "build_indv")) + + test._config.components = ["eam", "elm"] + + test.build_phase(False, True) + + nml_files = os.listdir(temp_dir) + + assert len(nml_files) == 60 + + with open(sorted(nml_files)[0], "r") as fd: + lines = fd.readlines() + + assert lines == [ + "new_random = .true.\n", + "pertlim = 1.0e-10\n", + "seed_clock = .true.\n", + "seed_custom = 1\n", + ] + + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_generate_namelist(self): + with contextlib.ExitStack() as stack: + temp_dir = stack.enter_context(tempfile.TemporaryDirectory()) + + stack.enter_context(chdir(temp_dir)) + + case = create_simple_case() + + test = MVK(case) + + stack.enter_context(mock.patch.object(test, "build_indv")) + + test.build_phase(False, True) + + nml_files = os.listdir(temp_dir) + + assert len(nml_files) == 30 + + with open(sorted(nml_files)[0], "r") as fd: + lines = fd.readlines() + + assert lines == [ + "new_random = .true.\n", + "pertlim = 1.0e-10\n", + "seed_clock = .true.\n", + "seed_custom = 1\n", + ] + + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_compare_baseline(self): + case = create_simple_case() + + MVK(case) + + case.set_value.assert_any_call("COMPARE_BASELINE", True) + + case = create_simple_case(generate_baseline=True) + + MVK(case) + + case.set_value.assert_any_call("COMPARE_BASELINE", False) + + case = create_simple_case(resubmit=1, generate_baseline=True) + + MVK(case) + + case.set_value.assert_any_call("COMPARE_BASELINE", False) + + @unittest.skipUnless(evv4esm, "evv4esm module not found") + def test_mvk(self): + case = create_simple_case() + + test = MVK(case) + + assert test._config.component == "eam" + assert test._config.components == ["eam"] + + case = create_simple_case("cesm") + + test = MVK(case) + + assert test._config.component == "cam" + assert test._config.components == ["cam"] diff --git a/CIME/tests/test_unit_test_status.py b/CIME/tests/test_unit_test_status.py new file mode 100755 index 00000000000..9b3036801fc --- /dev/null +++ b/CIME/tests/test_unit_test_status.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 + +import unittest +import os +from CIME import test_status +from CIME import expected_fails +from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus + + +class TestTestStatus(CustomAssertionsTestStatus): + + _TESTNAME = "fake_test" + + # An arbitrary phase we can use when we want to work with a non-core phase + _NON_CORE_PHASE = test_status.MEMLEAK_PHASE + + def setUp(self): + self._ts = test_status.TestStatus( + test_dir=os.path.join("nonexistent", "path"), + test_name=self._TESTNAME, + no_io=True, + ) + self._set_core_phases_to_pass() + + def _set_core_phases_to_pass(self): + """Set all core phases of self._ts to pass status""" + with self._ts: + for phase in test_status.CORE_PHASES: + self._ts.set_status(phase, test_status.TEST_PASS_STATUS) + + def _set_last_core_phase_to_fail(self): + """Sets the last core phase to FAIL + + Returns the name of this phase""" + fail_phase = test_status.CORE_PHASES[-1] + self._set_phase_to_status(fail_phase, test_status.TEST_FAIL_STATUS) + return fail_phase + + def _set_phase_to_status(self, phase, status): + """Set given phase to given status""" + with self._ts: + self._ts.set_status(phase, status) + + def test_get_latest_phase(self): + assert self._ts.get_latest_phase() == test_status.RUN_PHASE + + def test_current_is(self): + assert self._ts.current_is(test_status.RUN_PHASE, test_status.TEST_PASS_STATUS) + + assert not self._ts.current_is( + test_status.RUN_PHASE, test_status.TEST_PEND_STATUS + ) + + assert not self._ts.current_is( + test_status.SUBMIT_PHASE, test_status.TEST_PASS_STATUS + ) + + # ------------------------------------------------------------------------ + # Tests of TestStatus.phase_statuses_dump + # ------------------------------------------------------------------------ + + def test_psdump_corePhasesPass(self): + output = self._ts.phase_statuses_dump() + self.assert_core_phases(output, self._TESTNAME, fails=[]) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) + + def test_psdump_oneCorePhaseFails(self): + fail_phase = self._set_last_core_phase_to_fail() + output = self._ts.phase_statuses_dump() + self.assert_core_phases(output, self._TESTNAME, fails=[fail_phase]) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) + + def test_psdump_oneCorePhaseFailsAbsentFromXFails(self): + """One phase fails. There is an expected fails list, but that phase is not in it.""" + fail_phase = self._set_last_core_phase_to_fail() + xfails = expected_fails.ExpectedFails() + xfails.add_failure( + phase=self._NON_CORE_PHASE, expected_status=test_status.TEST_FAIL_STATUS + ) + output = self._ts.phase_statuses_dump(xfails=xfails) + self.assert_status_of_phase( + output, test_status.TEST_FAIL_STATUS, fail_phase, self._TESTNAME, xfail="no" + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) + + def test_psdump_oneCorePhaseFailsInXFails(self): + """One phase fails. That phase is in the expected fails list.""" + fail_phase = self._set_last_core_phase_to_fail() + xfails = expected_fails.ExpectedFails() + xfails.add_failure( + phase=fail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) + output = self._ts.phase_statuses_dump(xfails=xfails) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + fail_phase, + self._TESTNAME, + xfail="expected", + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) + + def test_psdump_oneCorePhasePassesInXFails(self): + """One phase passes despite being in the expected fails list.""" + xfail_phase = test_status.CORE_PHASES[-1] + xfails = expected_fails.ExpectedFails() + xfails.add_failure( + phase=xfail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) + output = self._ts.phase_statuses_dump(xfails=xfails) + self.assert_status_of_phase( + output, + test_status.TEST_PASS_STATUS, + xfail_phase, + self._TESTNAME, + xfail="unexpected", + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=1 + ) + + def test_psdump_skipPasses(self): + """With the skip_passes argument, only non-passes should appear""" + fail_phase = self._set_last_core_phase_to_fail() + output = self._ts.phase_statuses_dump(skip_passes=True) + self.assert_status_of_phase( + output, test_status.TEST_FAIL_STATUS, fail_phase, self._TESTNAME, xfail="no" + ) + for phase in test_status.CORE_PHASES: + if phase != fail_phase: + self.assert_phase_absent(output, phase, self._TESTNAME) + + def test_psdump_unexpectedPass_shouldBePresent(self): + """Even with the skip_passes argument, an unexpected PASS should be present""" + xfail_phase = test_status.CORE_PHASES[-1] + xfails = expected_fails.ExpectedFails() + xfails.add_failure( + phase=xfail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) + output = self._ts.phase_statuses_dump(skip_passes=True, xfails=xfails) + self.assert_status_of_phase( + output, + test_status.TEST_PASS_STATUS, + xfail_phase, + self._TESTNAME, + xfail="unexpected", + ) + for phase in test_status.CORE_PHASES: + if phase != xfail_phase: + self.assert_phase_absent(output, phase, self._TESTNAME) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_two_link_to_case2_output.py b/CIME/tests/test_unit_two_link_to_case2_output.py new file mode 100755 index 00000000000..2984fa4c802 --- /dev/null +++ b/CIME/tests/test_unit_two_link_to_case2_output.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of the method +SystemTestsCompareTwo._link_to_case2_output +""" + +# Ignore privacy concerns for unit tests, so that unit tests can access +# protected members of the system under test +# +# pylint:disable=protected-access + +import unittest +import os +import shutil +import tempfile +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.tests.case_fake import CaseFake + +# ======================================================================== +# Fake version of SystemTestsCompareTwo that overrides some functionality for +# the sake of unit testing +# ======================================================================== + + +class SystemTestsCompareTwoFake(SystemTestsCompareTwo): + def __init__(self, case1, run_two_suffix="test"): + + SystemTestsCompareTwo.__init__( + self, case1, separate_builds=False, run_two_suffix=run_two_suffix + ) + + # ------------------------------------------------------------------------ + # Stubs of methods called by SystemTestsCommon.__init__ that interact with + # the system or case object in ways we want to avoid here + # ------------------------------------------------------------------------ + + def _init_environment(self, caseroot): + pass + + def _init_locked_files(self, caseroot, expected): + pass + + def _init_case_setup(self): + pass + + # ------------------------------------------------------------------------ + # Stubs of methods that are typically provided by the individual test + # ------------------------------------------------------------------------ + + def _case_one_setup(self): + pass + + def _case_two_setup(self): + pass + + +# ======================================================================== +# Test class itself +# ======================================================================== + + +class TestLinkToCase2Output(unittest.TestCase): + + # ======================================================================== + # Test helper functions + # ======================================================================== + + def setUp(self): + self.original_wd = os.getcwd() + # Create a sandbox in which case directories can be created + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + # Some tests trigger a chdir call in the SUT; make sure we return to the + # original directory at the end of the test + os.chdir(self.original_wd) + + shutil.rmtree(self.tempdir, ignore_errors=True) + + def setup_test_and_directories(self, casename1, run2_suffix): + """ + Returns test object + """ + + case1root = os.path.join(self.tempdir, casename1) + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, run_two_suffix=run2_suffix) + mytest._case1.make_rundir() # pylint: disable=maybe-no-member + mytest._case2.make_rundir() # pylint: disable=maybe-no-member + + return mytest + + def create_file_in_rundir2(self, mytest, core_filename, run2_suffix): + """ + Creates a file in rundir2 named CASE2.CORE_FILENAME.nc.RUN2_SUFFIX + (where CASE2 is the casename of case2) + + Returns full path to the file created + """ + filename = "{}.{}.nc.{}".format( + mytest._case2.get_value("CASE"), core_filename, run2_suffix + ) + filepath = os.path.join(mytest._case2.get_value("RUNDIR"), filename) + open(filepath, "w").close() + return filepath + + # ======================================================================== + # Begin actual tests + # ======================================================================== + + def test_basic(self): + # Setup + casename1 = "mytest" + run2_suffix = "run2" + + mytest = self.setup_test_and_directories(casename1, run2_suffix) + filepath1 = self.create_file_in_rundir2(mytest, "clm2.h0", run2_suffix) + filepath2 = self.create_file_in_rundir2(mytest, "clm2.h1", run2_suffix) + + # Exercise + mytest._link_to_case2_output() + + # Verify + expected_link_filename1 = "{}.clm2.h0.nc.{}".format(casename1, run2_suffix) + expected_link_filepath1 = os.path.join( + mytest._case1.get_value("RUNDIR"), expected_link_filename1 + ) + self.assertTrue(os.path.islink(expected_link_filepath1)) + self.assertEqual(filepath1, os.readlink(expected_link_filepath1)) + + expected_link_filename2 = "{}.clm2.h1.nc.{}".format(casename1, run2_suffix) + expected_link_filepath2 = os.path.join( + mytest._case1.get_value("RUNDIR"), expected_link_filename2 + ) + self.assertTrue(os.path.islink(expected_link_filepath2)) + self.assertEqual(filepath2, os.readlink(expected_link_filepath2)) + + def test_existing_link(self): + # Setup + casename1 = "mytest" + run2_suffix = "run2" + + mytest = self.setup_test_and_directories(casename1, run2_suffix) + self.create_file_in_rundir2(mytest, "clm2.h0", run2_suffix) + + # Create initial link via a call to _link_to_case2_output + mytest._link_to_case2_output() + + # Exercise + # See what happens when we try to recreate that link + mytest._link_to_case2_output() + + # (No verification: Test passes if no exception was raised) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_user_mod_support.py b/CIME/tests/test_unit_user_mod_support.py new file mode 100755 index 00000000000..51ffb4778ca --- /dev/null +++ b/CIME/tests/test_unit_user_mod_support.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python3 + +import unittest +import shutil +import tempfile +import os +from CIME.user_mod_support import apply_user_mods +from CIME.utils import CIMEError + +# ======================================================================== +# Define some parameters +# ======================================================================== + +_SOURCEMODS = os.path.join("SourceMods", "src.drv") + + +class TestUserModSupport(unittest.TestCase): + + # ======================================================================== + # Test helper functions + # ======================================================================== + + def setUp(self): + self._caseroot = tempfile.mkdtemp() + self._caseroot_sourcemods = os.path.join(self._caseroot, _SOURCEMODS) + os.makedirs(self._caseroot_sourcemods) + self._user_mods_parent_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self._caseroot, ignore_errors=True) + shutil.rmtree(self._user_mods_parent_dir, ignore_errors=True) + + def createUserMod(self, name, include_dirs=None): + """Create a user_mods directory with the given name. + + This directory is created within self._user_mods_parent_dir + + For name='foo', it will contain: + + - A user_nl_cpl file with contents: + foo + + - A shell_commands file with contents: + echo foo >> /PATH/TO/CASEROOT/shell_commands_result + + - A file in _SOURCEMODS named myfile.F90 with contents: + foo + + If include_dirs is given, it should be a list of strings, giving names + of other user_mods directories to include. e.g., if include_dirs is + ['foo1', 'foo2'], then this will create a file 'include_user_mods' that + contains paths to the 'foo1' and 'foo2' user_mods directories, one per + line. + """ + + mod_dir = os.path.join(self._user_mods_parent_dir, name) + os.makedirs(mod_dir) + mod_dir_sourcemods = os.path.join(mod_dir, _SOURCEMODS) + os.makedirs(mod_dir_sourcemods) + + with open(os.path.join(mod_dir, "user_nl_cpl"), "w") as user_nl_cpl: + user_nl_cpl.write(name + "\n") + with open(os.path.join(mod_dir, "shell_commands"), "w") as shell_commands: + command = "echo {} >> {}/shell_commands_result\n".format( + name, self._caseroot + ) + shell_commands.write(command) + with open(os.path.join(mod_dir_sourcemods, "myfile.F90"), "w") as f90_file: + f90_file.write(name + "\n") + + if include_dirs: + with open( + os.path.join(mod_dir, "include_user_mods"), "w" + ) as include_user_mods: + for one_include in include_dirs: + include_user_mods.write( + os.path.join(self._user_mods_parent_dir, one_include) + "\n" + ) + + def assertResults( + self, + expected_user_nl_cpl, + expected_shell_commands_result, + expected_sourcemod, + msg="", + ): + """Asserts that the contents of the files in self._caseroot match expectations + + If msg is provided, it is printed for some failing assertions + """ + + path_to_user_nl_cpl = os.path.join(self._caseroot, "user_nl_cpl") + self.assertTrue( + os.path.isfile(path_to_user_nl_cpl), + msg=msg + ": user_nl_cpl does not exist", + ) + with open(path_to_user_nl_cpl, "r") as user_nl_cpl: + contents = user_nl_cpl.read() + self.assertEqual(expected_user_nl_cpl, contents) + + path_to_shell_commands_result = os.path.join( + self._caseroot, "shell_commands_result" + ) + self.assertTrue( + os.path.isfile(path_to_shell_commands_result), + msg=msg + ": shell_commands_result does not exist", + ) + with open(path_to_shell_commands_result, "r") as shell_commands_result: + contents = shell_commands_result.read() + self.assertEqual(expected_shell_commands_result, contents) + + path_to_sourcemod = os.path.join(self._caseroot_sourcemods, "myfile.F90") + self.assertTrue( + os.path.isfile(path_to_sourcemod), + msg=msg + ": sourcemod file does not exist", + ) + with open(path_to_sourcemod, "r") as sourcemod: + contents = sourcemod.read() + self.assertEqual(expected_sourcemod, contents) + + # ======================================================================== + # Begin actual tests + # ======================================================================== + + def test_basic(self): + self.createUserMod("foo") + apply_user_mods(self._caseroot, os.path.join(self._user_mods_parent_dir, "foo")) + self.assertResults( + expected_user_nl_cpl="foo\n", + expected_shell_commands_result="foo\n", + expected_sourcemod="foo\n", + msg="test_basic", + ) + + def test_keepexe(self): + self.createUserMod("foo") + with self.assertRaisesRegex(CIMEError, "cannot have any source mods"): + apply_user_mods( + self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo"), + keepexe=True, + ) + + def test_two_applications(self): + """If apply_user_mods is called twice, the second should appear after the first so that it takes precedence.""" + + self.createUserMod("foo1") + self.createUserMod("foo2") + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "foo1") + ) + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "foo2") + ) + self.assertResults( + expected_user_nl_cpl="foo1\nfoo2\n", + expected_shell_commands_result="foo1\nfoo2\n", + expected_sourcemod="foo2\n", + msg="test_two_applications", + ) + + def test_include(self): + """If there is an included mod, the main one should appear after the included one so that it takes precedence.""" + + self.createUserMod("base") + self.createUserMod("derived", include_dirs=["base"]) + + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "derived") + ) + + self.assertResults( + expected_user_nl_cpl="base\nderived\n", + expected_shell_commands_result="base\nderived\n", + expected_sourcemod="derived\n", + msg="test_include", + ) + + def test_duplicate_includes(self): + """Test multiple includes, where both include the same base mod. + + The base mod should only be included once. + """ + + self.createUserMod("base") + self.createUserMod("derived1", include_dirs=["base"]) + self.createUserMod("derived2", include_dirs=["base"]) + self.createUserMod("derived_combo", include_dirs=["derived1", "derived2"]) + + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "derived_combo") + ) + + # NOTE(wjs, 2017-04-15) The ordering of derived1 vs. derived2 is not + # critical here: If this aspect of the behavior changes, the + # expected_contents can be changed to match the new behavior in this + # respect. + expected_contents = """base +derived2 +derived1 +derived_combo +""" + self.assertResults( + expected_user_nl_cpl=expected_contents, + expected_shell_commands_result=expected_contents, + expected_sourcemod="derived_combo\n", + msg="test_duplicate_includes", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_user_nl_utils.py b/CIME/tests/test_unit_user_nl_utils.py new file mode 100755 index 00000000000..9220182eeeb --- /dev/null +++ b/CIME/tests/test_unit_user_nl_utils.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 + +import unittest +import os +import shutil +import tempfile +from CIME.SystemTests.test_utils import user_nl_utils + + +class TestUserNLCopier(unittest.TestCase): + + # ======================================================================== + # Test helper functions + # ======================================================================== + + def setUp(self): + self._caseroot = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self._caseroot, ignore_errors=True) + + def write_user_nl_file(self, component, contents, suffix=""): + """Write contents to a user_nl file in the case directory. Returns the + basename (i.e., not the full path) of the file that is created. + + For a component foo, with the default suffix of '', the file name will + be user_nl_foo + + If the suffix is '_0001', the file name will be user_nl_foo_0001 + """ + + filename = "user_nl_" + component + suffix + + with open(os.path.join(self._caseroot, filename), "w") as user_nl_file: + user_nl_file.write(contents) + + return filename + + def assertFileContentsEqual(self, expected, filepath, msg=None): + """Asserts that the contents of the file given by 'filepath' are equal to + the string given by 'expected'. 'msg' gives an optional message to be + printed if the assertion fails.""" + + with open(filepath, "r") as myfile: + contents = myfile.read() + + self.assertEqual(expected, contents, msg=msg) + + # ======================================================================== + # Begin actual tests + # ======================================================================== + + def test_append(self): + # Define some variables + component = "foo" + # deliberately exclude new line from file contents, to make sure that's + # handled correctly + orig_contents = "bar = 42" + contents_to_append = "baz = 101" + + # Setup + filename = self.write_user_nl_file(component, orig_contents) + + # Exercise + user_nl_utils.append_to_user_nl_files( + caseroot=self._caseroot, component=component, contents=contents_to_append + ) + + # Verify + expected_contents = orig_contents + "\n" + contents_to_append + "\n" + self.assertFileContentsEqual( + expected_contents, os.path.join(self._caseroot, filename) + ) + + def test_append_list(self): + # Define some variables + component = "foo" + # deliberately exclude new line from file contents, to make sure that's + # handled correctly + orig_contents = "bar = 42" + contents_to_append_1 = "baz = 101" + contents_to_append_2 = "qux = 987" + contents_to_append = [ + contents_to_append_1, + contents_to_append_2, + ] + + # Setup + filename = self.write_user_nl_file(component, orig_contents) + + # Exercise + user_nl_utils.append_to_user_nl_files( + caseroot=self._caseroot, component=component, contents=contents_to_append + ) + + # Verify + expected_contents = ( + orig_contents + + "\n" + + contents_to_append_1 + + "\n" + + contents_to_append_2 + + "\n" + ) + self.assertFileContentsEqual( + expected_contents, os.path.join(self._caseroot, filename) + ) + + def test_append_multiple_files(self): + # Simulates a multi-instance test + component = "foo" + orig_contents1 = "bar = 42" + orig_contents2 = "bar = 17" + contents_to_append = "baz = 101" + + # Setup + filename1 = self.write_user_nl_file(component, orig_contents1, suffix="_0001") + filename2 = self.write_user_nl_file(component, orig_contents2, suffix="_0002") + + # Exercise + user_nl_utils.append_to_user_nl_files( + caseroot=self._caseroot, component=component, contents=contents_to_append + ) + + # Verify + expected_contents1 = orig_contents1 + "\n" + contents_to_append + "\n" + expected_contents2 = orig_contents2 + "\n" + contents_to_append + "\n" + self.assertFileContentsEqual( + expected_contents1, os.path.join(self._caseroot, filename1) + ) + self.assertFileContentsEqual( + expected_contents2, os.path.join(self._caseroot, filename2) + ) + + def test_append_without_files_raises_exception(self): + # This test verifies that you get an exception if you call + # append_to_user_nl_files when there are no user_nl files of interest + + # Define some variables + component_exists = "foo" + component_for_append = "bar" + + # Setup + # Create file in caseroot for component_exists, but not for component_for_append + self.write_user_nl_file(component_exists, "irrelevant contents") + + self.assertRaisesRegex( + RuntimeError, + "No user_nl files found", + user_nl_utils.append_to_user_nl_files, + caseroot=self._caseroot, + component=component_for_append, + contents="irrelevant contents to append", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_utils.py b/CIME/tests/test_unit_utils.py new file mode 100755 index 00000000000..8abde4d8fef --- /dev/null +++ b/CIME/tests/test_unit_utils.py @@ -0,0 +1,441 @@ +#!/usr/bin/env python3 + +import os +import stat +import shutil +import sys +import tempfile + +import unittest +from unittest import mock +from CIME.status import run_and_log_case_status +from CIME.utils import ( + indent_string, + import_from_file, + _line_defines_python_function, + file_contains_python_function, + copy_globs, + import_and_run_sub_or_cmd, +) + + +class TestIndentStr(unittest.TestCase): + """Test the indent_string function.""" + + def test_indent_string_singleline(self): + """Test the indent_string function with a single-line string""" + mystr = "foo" + result = indent_string(mystr, 4) + expected = " foo" + self.assertEqual(expected, result) + + def test_indent_string_multiline(self): + """Test the indent_string function with a multi-line string""" + mystr = """hello +hi +goodbye +""" + result = indent_string(mystr, 2) + expected = """ hello + hi + goodbye +""" + self.assertEqual(expected, result) + + +class TestLineDefinesPythonFunction(unittest.TestCase): + """Tests of _line_defines_python_function""" + + # ------------------------------------------------------------------------ + # Tests of _line_defines_python_function that should return True + # ------------------------------------------------------------------------ + + def test_def_foo(self): + """Test of a def of the function of interest""" + line = "def foo():" + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_def_foo_space(self): + """Test of a def of the function of interest, with an extra space before the parentheses""" + line = "def foo ():" + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_import_foo(self): + """Test of an import of the function of interest""" + line = "from bar.baz import foo" + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_import_foo_space(self): + """Test of an import of the function of interest, with trailing spaces""" + line = "from bar.baz import foo " + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_import_foo_then_others(self): + """Test of an import of the function of interest, along with others""" + line = "from bar.baz import foo, bar" + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_import_others_then_foo(self): + """Test of an import of the function of interest, after others""" + line = "from bar.baz import bar, foo" + self.assertTrue(_line_defines_python_function(line, "foo")) + + # ------------------------------------------------------------------------ + # Tests of _line_defines_python_function that should return False + # ------------------------------------------------------------------------ + + def test_def_barfoo(self): + """Test of a def of a different function""" + line = "def barfoo():" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_def_foobar(self): + """Test of a def of a different function""" + line = "def foobar():" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_def_foo_indented(self): + """Test of a def of the function of interest, but indented""" + line = " def foo():" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_def_foo_no_parens(self): + """Test of a def of the function of interest, but without parentheses""" + line = "def foo:" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_import_foo_indented(self): + """Test of an import of the function of interest, but indented""" + line = " from bar.baz import foo" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_import_barfoo(self): + """Test of an import of a different function""" + line = "from bar.baz import barfoo" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_import_foobar(self): + """Test of an import of a different function""" + line = "from bar.baz import foobar" + self.assertFalse(_line_defines_python_function(line, "foo")) + + +class TestFileContainsPythonFunction(unittest.TestCase): + """Tests of file_contains_python_function""" + + def setUp(self): + self._workdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self._workdir, ignore_errors=True) + + def create_test_file(self, contents): + """Creates a test file with the given contents, and returns the path to that file""" + + filepath = os.path.join(self._workdir, "testfile") + with open(filepath, "w") as fd: + fd.write(contents) + + return filepath + + def test_contains_correct_def_and_others(self): + """Test file_contains_python_function with a correct def mixed with other defs""" + contents = """ +def bar(): +def foo(): +def baz(): +""" + filepath = self.create_test_file(contents) + self.assertTrue(file_contains_python_function(filepath, "foo")) + + def test_does_not_contain_correct_def(self): + """Test file_contains_python_function without the correct def""" + contents = """ +def bar(): +def notfoo(): +def baz(): +""" + filepath = self.create_test_file(contents) + self.assertFalse(file_contains_python_function(filepath, "foo")) + + +class MockTime(object): + def __init__(self): + self._old = None + + def __enter__(self): + self._old = getattr(sys.modules["time"], "strftime") + setattr(sys.modules["time"], "strftime", lambda *args: "00:00:00 ") + + def __exit__(self, *args, **kwargs): + setattr(sys.modules["time"], "strftime", self._old) + + +def match_all_lines(data, lines): + for line in data: + for i, x in enumerate(lines): + if x == line: + lines.pop(i) + + continue + + if len(lines) == 0: + return True, [] + + return False, lines + + +class TestUtils(unittest.TestCase): + def setUp(self): + self.base_func = lambda *args: None + + # pylint: disable=unused-argument + def _error_func(*args): + raise Exception("Something went wrong") + + self.error_func = _error_func + + def test_import_and_run_sub_or_cmd(self): + with self.assertRaisesRegex( + Exception, "ERROR: Could not find buildnml file for component test" + ): + import_and_run_sub_or_cmd( + "/tmp/buildnml", + "arg1 arg2 -vvv", + "buildnml", + (self, "arg1"), + "/tmp", + "test", + ) + + @mock.patch("importlib.import_module") + def test_import_and_run_sub_or_cmd_cime_py(self, importmodule): + importmodule.side_effect = Exception("Module has a problem") + + with self.assertRaisesRegex(Exception, "Module has a problem") as e: + import_and_run_sub_or_cmd( + "/tmp/buildnml", + "arg1, arg2 -vvv", + "buildnml", + (self, "arg1"), + "/tmp", + "test", + ) + + # check that we avoid exception chaining + self.assertTrue(e.exception.__context__ is None) + + @mock.patch("importlib.import_module") + def test_import_and_run_sub_or_cmd_import(self, importmodule): + importmodule.side_effect = Exception("I am being imported") + + with self.assertRaisesRegex(Exception, "I am being imported") as e: + import_and_run_sub_or_cmd( + "/tmp/buildnml", + "arg1 arg2 -vvv", + "buildnml", + (self, "arg1"), + "/tmp", + "test", + ) + + # check that we avoid exception chaining + self.assertTrue(e.exception.__context__ is None) + + @mock.patch("os.path.isfile") + @mock.patch("CIME.utils.run_sub_or_cmd") + def test_import_and_run_sub_or_cmd_run(self, func, isfile): + isfile.return_value = True + + func.side_effect = Exception( + "ERROR: /tmp/buildnml arg1 arg2 -vvv FAILED, see above" + ) + + with self.assertRaisesRegex( + Exception, "ERROR: /tmp/buildnml arg1 arg2 -vvv FAILED, see above" + ): + import_and_run_sub_or_cmd( + "/tmp/buildnml", + "arg1 arg2 -vvv", + "buildnml", + (self, "arg1"), + "/tmp", + "test", + ) + + @mock.patch("glob.glob") + @mock.patch("CIME.utils.safe_copy") + def test_copy_globs(self, safe_copy, glob): + glob.side_effect = [ + [], + ["/src/run/test.sh", "/src/run/.hidden.sh"], + [ + "/src/bld/test.nc", + ], + ] + + copy_globs(["CaseDocs/*", "run/*.sh", "bld/*.nc"], "/storage/output", "uid") + + safe_copy.assert_any_call( + "/src/run/test.sh", "/storage/output/test.sh.uid", preserve_meta=False + ) + safe_copy.assert_any_call( + "/src/run/.hidden.sh", "/storage/output/hidden.sh.uid", preserve_meta=False + ) + safe_copy.assert_any_call( + "/src/bld/test.nc", "/storage/output/test.nc.uid", preserve_meta=False + ) + + def assertMatchAllLines(self, tempdir, test_lines): + with open(os.path.join(tempdir, "CaseStatus")) as fd: + data = fd.readlines() + + result, missing = match_all_lines(data, test_lines) + + error = [] + + if len(missing) != 0: + error.extend(["Missing Lines", ""]) + error.extend([x.rstrip("\n") for x in missing]) + error.extend(["", "Tempfile contents", ""]) + error.extend([x.rstrip("\n") for x in data]) + + self.assertTrue(result, msg="\n".join(error)) + + def test_import_from_file(self): + with tempfile.NamedTemporaryFile() as fd: + fd.writelines( + [ + b"def test():\n", + b" return 'value'", + ] + ) + + fd.flush() + + module = import_from_file("test.py", fd.name) + + assert module.test() == "value" + + def test_run_and_log_case_status(self): + test_lines = [ + "00:00:00 default starting \n", + "00:00:00 default success \n", + ] + + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status(self.base_func, "default", caseroot=tempdir) + + self.assertMatchAllLines(tempdir, test_lines) + + def test_run_and_log_case_status_case_submit_on_batch(self): + test_lines = [ + "00:00:00 case.submit starting \n", + "00:00:00 case.submit success \n", + ] + + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + self.base_func, "case.submit", caseroot=tempdir, is_batch=True + ) + + self.assertMatchAllLines(tempdir, test_lines) + + def test_run_and_log_case_status_case_submit_no_batch(self): + test_lines = [ + "00:00:00 case.submit starting \n", + "00:00:00 case.submit success \n", + ] + + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + self.base_func, "case.submit", caseroot=tempdir, is_batch=False + ) + + self.assertMatchAllLines(tempdir, test_lines) + + def test_run_and_log_case_status_case_submit_error_on_batch(self): + test_lines = [ + "00:00:00 case.submit starting \n", + "00:00:00 case.submit error \n", + "Something went wrong\n", + ] + + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + with self.assertRaises(Exception): + run_and_log_case_status( + self.error_func, "case.submit", caseroot=tempdir, is_batch=True + ) + + self.assertMatchAllLines(tempdir, test_lines) + + def test_run_and_log_case_status_custom_msg(self): + test_lines = [ + "00:00:00 default starting starting extra\n", + "00:00:00 default success success extra\n", + ] + + starting_func = mock.MagicMock(return_value="starting extra") + success_func = mock.MagicMock(return_value="success extra") + + def normal_func(): + return "data" + + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + normal_func, + "default", + custom_starting_msg_functor=starting_func, + custom_success_msg_functor=success_func, + caseroot=tempdir, + ) + + self.assertMatchAllLines(tempdir, test_lines) + + starting_func.assert_called_with() + success_func.assert_called_with("data") + + def test_run_and_log_case_status_custom_msg_error_on_batch(self): + test_lines = [ + "00:00:00 default starting starting extra\n", + "00:00:00 default success success extra\n", + ] + + starting_func = mock.MagicMock(return_value="starting extra") + success_func = mock.MagicMock(return_value="success extra") + + def error_func(): + raise Exception("Error") + + with tempfile.TemporaryDirectory() as tempdir, MockTime(), self.assertRaises( + Exception + ): + run_and_log_case_status( + error_func, + "default", + custom_starting_msg_functor=starting_func, + custom_success_msg_functor=success_func, + caseroot=tempdir, + ) + + self.assertMatchAllLines(tempdir, test_lines) + + starting_func.assert_called_with() + success_func.assert_not_called() + + def test_run_and_log_case_status_error(self): + test_lines = [ + "00:00:00 default starting \n", + "00:00:00 default error \n", + "Something went wrong\n", + ] + + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + with self.assertRaises(Exception): + run_and_log_case_status(self.error_func, "default", caseroot=tempdir) + + self.assertMatchAllLines(tempdir, test_lines) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_xml_archive_base.py b/CIME/tests/test_unit_xml_archive_base.py new file mode 100644 index 00000000000..98f58055c9c --- /dev/null +++ b/CIME/tests/test_unit_xml_archive_base.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 + +import os +import io +import unittest +import tempfile +from contextlib import contextmanager +from pathlib import Path +from unittest import mock + +from CIME.XML.archive_base import ArchiveBase + +TEST_CONFIG = """ + + unique\.name\.unique.* + +""" + +EXACT_TEST_CONFIG = """ + + unique\.name\.unique.nc + +""" + +EXCLUDE_TEST_CONFIG = """ + + unique\.name\.unique.nc + + + unique\.name\.unique.nc + + + unique\.name\.unique.nc + +""" + + +class TestXMLArchiveBase(unittest.TestCase): + @contextmanager + def _setup_environment(self, test_files): + with tempfile.TemporaryDirectory() as temp_dir: + for x in test_files: + Path(temp_dir, x).touch() + + yield temp_dir + + def test_exclude_testing(self): + archiver = ArchiveBase() + + archiver.read_fd(io.StringIO(EXCLUDE_TEST_CONFIG)) + + # no attribute + assert not archiver.exclude_testing("eam") + + # not in config + assert not archiver.exclude_testing("mpassi") + + # set false + assert not archiver.exclude_testing("mpasso") + + # set true + assert archiver.exclude_testing("cpl") + + def test_match_files(self): + archiver = ArchiveBase() + + archiver.read_fd(io.StringIO(TEST_CONFIG)) + + fail_files = [ + "othername.eam.unique.name.unique.0001-01-01-0000.nc", # casename mismatch + "casename.satm.unique.name.unique.0001-01-01-0000.nc", # model (component?) mismatch + "casename.eam.0001-01-01-0000.nc", # missing hist_file_extension + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + ] + + test_files = [ + "casename.eam1.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1_.unique.name.unique.0001-01-01-0000.nc", + "casename.eam_.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam_1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1_1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam11990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.0001-01-01-0000.nc.base", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc.base", + ] + + with self._setup_environment(fail_files + test_files) as temp_dir: + hist_files = archiver.get_all_hist_files( + "casename", "eam", from_dir=temp_dir + ) + + test_files.sort() + hist_files.sort() + + assert len(hist_files) == len(test_files) + + # assert all match except first + for x, y in zip(test_files, hist_files): + assert x == y, f"{x} != {y}" + + def test_extension_included(self): + archiver = ArchiveBase() + + archiver.read_fd(io.StringIO(EXACT_TEST_CONFIG)) + + fail_files = [ + "othername.eam.unique.name.unique.0001-01-01-0000.nc", # casename mismatch + "casename.satm.unique.name.unique.0001-01-01-0000.nc", # model (component?) mismatch + "casename.eam.0001-01-01-0000.nc", # missing hist_file_extension + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.0001-01-01-0000.nc.base", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc.base", + ] + + test_files = [ + "casename.eam1.unique.name.unique.nc", + "casename.eam1_.unique.name.unique.nc", + "casename.eam_.unique.name.unique.nc", + "casename.eam1990.unique.name.unique.nc", + "casename.eam_1990.unique.name.unique.nc", + "casename.eam1_1990.unique.name.unique.nc", + "casename.eam11990.unique.name.unique.nc", + "casename.eam.unique.name.unique.nc", + ] + + with self._setup_environment(fail_files + test_files) as temp_dir: + hist_files = archiver.get_all_hist_files( + "casename", "eam", suffix="nc", from_dir=temp_dir + ) + + test_files.sort() + hist_files.sort() + + assert len(hist_files) == len(test_files) + + # assert all match except first + for x, y in zip(test_files, hist_files): + assert x == y, f"{x} != {y}" + + def test_suffix(self): + archiver = ArchiveBase() + + archiver.read_fd(io.StringIO(TEST_CONFIG)) + + fail_files = [ + "othername.eam.unique.name.unique.0001-01-01-0000.nc", # casename mismatch + "casename.satm.unique.name.unique.0001-01-01-0000.nc", # model (component?) mismatch + "casename.eam.0001-01-01-0000.nc", # missing hist_file_extension + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + # ensure these do not match when suffix is provided + "casename.eam1.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1_.unique.name.unique.0001-01-01-0000.nc", + "casename.eam_.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam_1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1_1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam11990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + ] + + test_files = [ + "casename.eam.unique.name.unique.0001-01-01-0000.nc.base", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc.base", + ] + + with self._setup_environment(fail_files + test_files) as temp_dir: + hist_files = archiver.get_all_hist_files( + "casename", "eam", suffix="base", from_dir=temp_dir + ) + + assert len(hist_files) == len(test_files) + + hist_files.sort() + test_files.sort() + + for x, y in zip(hist_files, test_files): + assert x == y, f"{x} != {y}" diff --git a/CIME/tests/test_unit_xml_env_batch.py b/CIME/tests/test_unit_xml_env_batch.py new file mode 100755 index 00000000000..e657b57aa04 --- /dev/null +++ b/CIME/tests/test_unit_xml_env_batch.py @@ -0,0 +1,1266 @@ +#!/usr/bin/env python3 + +import os +import unittest +import tempfile +from contextlib import ExitStack +from unittest import mock + +from CIME.utils import CIMEError, expect +from CIME.XML.env_batch import EnvBatch, get_job_deps +from CIME.XML.env_workflow import EnvWorkflow +from CIME.BuildTools.configure import FakeCase + +# pylint: disable=unused-argument + +XML_BASE = b""" + +
+ These variables may be changed anytime during a run, they + control arguments to the batch submit command. +
+ + + char + miller_slurm,nersc_slurm,lc_slurm,moab,pbs,lsf,slurm,cobalt,cobalt_theta,none + The batch system type to use for this machine. + + + + + logical + TRUE,FALSE + whether the PROJECT value is required on this machine + + + + squeue + sbatch + scancel + #SBATCH + (\\d+)$ + --dependency=afterok:jobid + --dependency=afterany:jobid + : + %H:%M:%S + --mail-user + --mail-type + none, all, begin, end, fail + + + + + + + --job-name={{ job_id }} + --nodes={{ num_nodes }} + --output={{ job_id }}.%j + --exclusive + + + + + -w docker + + + debug + big + smallfast + + +
""" + +XML_DIFF = b""" + +
+ These variables may be changed anytime during a run, they + control arguments to the batch submit command. +
+ + + char + miller_slurm,nersc_slurm,lc_slurm,moab,pbs,lsf,slurm,cobalt,cobalt_theta,none + The batch system type to use for this machine. + + + + + logical + TRUE,FALSE + whether the PROJECT value is required on this machine + + + + squeue + batch + scancel + #SBATCH + (\\d+)$ + --dependency=afterok:jobid + --dependency=afterany:jobid + : + %H:%M:%S + --mail-user + --mail-type + none, all, begin, end, fail + + + + + + + + --job-name={{ job_id }} + --nodes=10 + --output={{ job_id }}.%j + --exclusive + --qos=high + + + + + -w docker + + + debug + big + + +
""" + + +XML_CHECK = b""" + +
+ These variables may be changed anytime during a run, they + control arguments to the batch submit command. +
+ + + char + miller_slurm,nersc_slurm,lc_slurm,moab,pbs,pbspro,lsf,slurm,cobalt,cobalt_theta,slurm_single_node,none + The batch system type to use for this machine. + + + + + logical + TRUE,FALSE + whether the PROJECT value is required on this machine + + + + + --constraint=gpu + + + --gpus-per-node=4 + --gpu-bind=none + + + --gpus-per-task=1 + --gpu-bind=map_gpu:0,1,2,3 + + + --gpus-per-node=4 + --gpu-bind=none + + + -G 0 + + + -G 0 + + + regular + preempt + shared + overrun + debug + + +
""" + +XML_WORKFLOW = b""" + +
+ These variables may be changed anytime during a run, they + control jobs that will be submitted and their dependancies. +
+ + + char + + + char + + + char + + +
""" + + +def _open_temp_file(stack, data): + tfile = stack.enter_context(tempfile.NamedTemporaryFile()) + + tfile.write(data) + + tfile.seek(0) + + return tfile + + +class FakeCaseWWorkflow(FakeCase): + """ + Extend the FakeCase class to have the functions needed for testing get_jobs_overrides + Use FakeCase rather than a class mock in order to return a more complex and dynamic + env_workflow object + """ + + def __init__( + self, + compiler, + mpilib, + debug, + comp_interface, + task_count, + thread_count, + tasks_per_node, + mem_per_task, + max_mem, + ): + super().__init__(compiler, mpilib, debug, comp_interface) + self._vals["task_count"] = task_count + self._vals["thread_count"] = thread_count + self._vals["tasks_per_node"] = tasks_per_node + self._vals["mem_per_task"] = mem_per_task + self._vals["max_mem"] = max_mem + + def get_env(self, short_name): + expect( + short_name == "workflow", + "FakeWWorkflow only can handle workflow as short_name sent in", + ) + with ExitStack() as stack: + WorkflowFile = _open_temp_file(stack, XML_WORKFLOW) + env_workflow = EnvWorkflow(infile=WorkflowFile.name) + env_workflow.set_value( + "task_count", str(self.get_value("task_count")), subgroup="case.test" + ) + env_workflow.set_value( + "thread_count", + str(self.get_value("thread_count")), + subgroup="case.test", + ) + env_workflow.set_value( + "tasks_per_node", + str(self.get_value("tasks_per_node")), + subgroup="case.test", + ) + + return env_workflow + + return None + + def get_resolved_value( + self, item, attribute=None, subgroup="PRIMARY", resolved=True + ): + print(item) + expect(isinstance(item, str), "item must be a string") + expect(("$" not in item), "$ not allowed in item for this fake") + return item + + def get_mpirun_cmd(self, job, overrides): + if self.get_value("MPILIB") == "mpi-serial": + mpirun = "" + else: + mpirun = "mpirun" + return mpirun + + +class TestXMLEnvBatch(unittest.TestCase): + def test_compare_xml(self): + with ExitStack() as stack: + file1 = _open_temp_file(stack, XML_DIFF) + batch1 = EnvBatch(infile=file1.name) + + file2 = _open_temp_file(stack, XML_BASE) + batch2 = EnvBatch(infile=file2.name) + + diff = batch1.compare_xml(batch2) + diff2 = batch2.compare_xml(batch1) + + expected_diff = { + "BATCH_SYSTEM": ["pbs", "slurm"], + "arg1": ["-p pbatch", "-p $JOB_QUEUE"], + "arg3": ["-m plane", ""], + "batch_submit": ["batch", "sbatch"], + "directive1": [" --nodes=10", " --nodes={{ num_nodes }}"], + "directive4": [" --qos=high ", ""], + "queue1": ["big", "big"], + "queue2": ["", "smallfast"], + } + + assert diff == expected_diff + + expected_diff2 = { + "BATCH_SYSTEM": ["slurm", "pbs"], + "arg1": ["-p $JOB_QUEUE", "-p pbatch"], + "arg3": ["", "-m plane"], + "batch_submit": ["sbatch", "batch"], + "directive1": [" --nodes={{ num_nodes }}", " --nodes=10"], + "directive4": ["", " --qos=high "], + "queue1": ["big", "big"], + "queue2": ["smallfast", ""], + } + + assert diff2 == expected_diff2 + + def test_compare_xml_same(self): + with ExitStack() as stack: + file1 = _open_temp_file(stack, XML_CHECK) + batch1 = EnvBatch(infile=file1.name) + + file2 = _open_temp_file(stack, XML_CHECK) + batch2 = EnvBatch(infile=file2.name) + + diff = batch1.compare_xml(batch2) + diff2 = batch2.compare_xml(batch1) + + expected_diff = {} + assert diff == expected_diff, f"{diff}" + assert diff2 == expected_diff, f"{diff2}" + + @mock.patch("CIME.XML.env_batch.EnvBatch._submit_single_job") + def test_submit_jobs(self, _submit_single_job): + case = mock.MagicMock() + + case.get_value.side_effect = [ + False, + ] + + env_batch = EnvBatch() + + with self.assertRaises(CIMEError): + env_batch.submit_jobs(case) + + @mock.patch("CIME.XML.env_batch.os.path.isfile") + @mock.patch("CIME.XML.env_batch.get_batch_script_for_job") + @mock.patch("CIME.XML.env_batch.EnvBatch._submit_single_job") + def test_submit_jobs_dependency( + self, _submit_single_job, get_batch_script_for_job, isfile + ): + case = mock.MagicMock() + + case.get_env.return_value.get_jobs.return_value = [ + "case.build", + "case.run", + ] + + case.get_env.return_value.get_value.side_effect = [ + None, + "", + None, + "case.build", + ] + + case.get_value.side_effect = [ + False, + ] + + _submit_single_job.side_effect = ["0", "1"] + + isfile.return_value = True + + get_batch_script_for_job.side_effect = [".case.build", ".case.run"] + + env_batch = EnvBatch() + + depid = env_batch.submit_jobs(case) + + _submit_single_job.assert_any_call( + case, + "case.build", + skip_pnl=False, + resubmit_immediate=False, + dep_jobs=[], + allow_fail=False, + no_batch=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ) + _submit_single_job.assert_any_call( + case, + "case.run", + skip_pnl=False, + resubmit_immediate=False, + dep_jobs=[ + "0", + ], + allow_fail=False, + no_batch=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ) + assert depid == {"case.build": "0", "case.run": "1"} + + @mock.patch("CIME.XML.env_batch.os.path.isfile") + @mock.patch("CIME.XML.env_batch.get_batch_script_for_job") + @mock.patch("CIME.XML.env_batch.EnvBatch._submit_single_job") + def test_submit_jobs_single( + self, _submit_single_job, get_batch_script_for_job, isfile + ): + case = mock.MagicMock() + + case.get_env.return_value.get_jobs.return_value = [ + "case.run", + ] + + case.get_env.return_value.get_value.return_value = None + + case.get_value.side_effect = [ + False, + ] + + _submit_single_job.return_value = "0" + + isfile.return_value = True + + get_batch_script_for_job.side_effect = [ + ".case.run", + ] + + env_batch = EnvBatch() + + depid = env_batch.submit_jobs(case) + + _submit_single_job.assert_any_call( + case, + "case.run", + skip_pnl=False, + resubmit_immediate=False, + dep_jobs=[], + allow_fail=False, + no_batch=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ) + assert depid == {"case.run": "0"} + + def test_get_job_deps(self): + # no jobs + job_deps = get_job_deps("", {}) + + assert job_deps == [] + + # dependency doesn't exist + job_deps = get_job_deps("case.run", {}) + + assert job_deps == [] + + job_deps = get_job_deps("case.run", {"case.run": 0}) + + assert job_deps == [ + "0", + ] + + job_deps = get_job_deps( + "case.run case.post_run_io", {"case.run": 0, "case.post_run_io": 1} + ) + + assert job_deps == ["0", "1"] + + # old syntax + job_deps = get_job_deps("case.run and case.post_run_io", {"case.run": 0}) + + assert job_deps == [ + "0", + ] + + # old syntax + job_deps = get_job_deps( + "(case.run and case.post_run_io) or case.test", {"case.run": 0} + ) + + assert job_deps == [ + "0", + ] + + job_deps = get_job_deps("", {}, user_prereq="2") + + assert job_deps == [ + "2", + ] + + job_deps = get_job_deps("", {}, prev_job="1") + + assert job_deps == [ + "1", + ] + + def test_get_submit_args_job_queue(self): + with tempfile.NamedTemporaryFile() as tfile: + tfile.write( + b""" + +
+ These variables may be changed anytime during a run, they + control arguments to the batch submit command. +
+ + + char + miller_slurm,nersc_slurm,lc_slurm,moab,pbs,lsf,slurm,cobalt,cobalt_theta,none + The batch system type to use for this machine. + + + + + logical + TRUE,FALSE + whether the PROJECT value is required on this machine + + + + + -w default + -w short + -w long + -A $VARIABLE_THAT_DOES_NOT_EXIST + + + long + short + + +
+""" + ) + + tfile.seek(0) + + batch = EnvBatch(infile=tfile.name) + + case = mock.MagicMock() + + case.get_value.side_effect = ("long", "long", None) + + case.get_resolved_value.return_value = None + + case.filename = mock.PropertyMock(return_value=tfile.name) + + submit_args = batch.get_submit_args(case, ".case.run") + + expected_args = " -w default -w long" + assert submit_args == expected_args + + @mock.patch.dict(os.environ, {"TEST": "GOOD"}) + def test_get_submit_args(self): + with tempfile.NamedTemporaryFile() as tfile: + tfile.write( + b""" + +
+ These variables may be changed anytime during a run, they + control arguments to the batch submit command. +
+ + + char + miller_slurm,nersc_slurm,lc_slurm,moab,pbs,lsf,slurm,cobalt,cobalt_theta,none + The batch system type to use for this machine. + + + + + logical + TRUE,FALSE + whether the PROJECT value is required on this machine + + + + squeue + sbatch + scancel + #SBATCH + (\\d+)$ + --dependency=afterok:jobid + --dependency=afterany:jobid + : + %H:%M:%S + --mail-user + --mail-type + none, all, begin, end, fail + + + + + + + + + --job-name={{ job_id }} + --nodes={{ num_nodes }} + --output={{ job_id }}.%j + --exclusive + + + + + -w docker + + + long + short + + +
+""" + ) + + tfile.seek(0) + + batch = EnvBatch(infile=tfile.name) + + case = mock.MagicMock() + + case.get_value.side_effect = [ + os.path.dirname(tfile.name), + "00:30:00", + "long", + "CIME", + "/test", + ] + + def my_get_resolved_value(val): + return val + + # value for --path + case.get_resolved_value.side_effect = my_get_resolved_value + + case.filename = mock.PropertyMock(return_value=tfile.name) + + submit_args = batch.get_submit_args(case, ".case.run") + + expected_args = " --time 00:30:00 -p long --account CIME --no-arg --path /test -w docker" + + assert submit_args == expected_args + + @mock.patch("CIME.XML.env_batch.EnvBatch.get") + def test_get_queue_specs(self, get): + node = mock.MagicMock() + + batch = EnvBatch() + + get.side_effect = [ + "1", + "1", + None, + None, + "case.run", + "08:00:00", + "05:00:00", + "12:00:00", + "false", + ] + + ( + nodemin, + nodemax, + jobname, + walltimedef, + walltimemin, + walltimemax, + jobmin, + jobmax, + strict, + ) = batch.get_queue_specs(node) + + self.assertTrue(nodemin == 1) + self.assertTrue(nodemax == 1) + self.assertTrue(jobname == "case.run") + self.assertTrue(walltimedef == "08:00:00") + self.assertTrue(walltimemin == "05:00:00") + self.assertTrue(walltimemax == "12:00:00") + self.assertTrue(jobmin == None) + self.assertTrue(jobmax == None) + self.assertFalse(strict) + + @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") + # nodemin, nodemax, jobname, walltimemin, walltimemax, jobmin, jobmax, strict + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) + @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") + @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") + def test_set_job_defaults_honor_walltimemax( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): + case = mock.MagicMock() + + batch_jobs = [ + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) + ] + + def get_value(*args, **kwargs): + if args[0] == "USER_REQUESTED_WALLTIME": + return "20:00:00" + + return mock.MagicMock() + + case.get_value = get_value + + case.get_env.return_value.get_jobs.return_value = ["case.run"] + + batch = EnvBatch() + + batch.set_job_defaults(batch_jobs, case) + + env_workflow = case.get_env.return_value + + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "20:00:00", subgroup="case.run" + ) + + @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") + # nodemin, nodemax, jobname, walltimemin, walltimemax, jobmin, jobmax, strict + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) + @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") + @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") + def test_set_job_defaults_honor_walltimemin( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): + case = mock.MagicMock() + + batch_jobs = [ + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) + ] + + def get_value(*args, **kwargs): + if args[0] == "USER_REQUESTED_WALLTIME": + return "05:00:00" + + return mock.MagicMock() + + case.get_value = get_value + + case.get_env.return_value.get_jobs.return_value = ["case.run"] + + batch = EnvBatch() + + batch.set_job_defaults(batch_jobs, case) + + env_workflow = case.get_env.return_value + + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "05:00:00", subgroup="case.run" + ) + + @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") + # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) + @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") + @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") + def test_set_job_defaults_user_walltime( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): + case = mock.MagicMock() + + batch_jobs = [ + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) + ] + + def get_value(*args, **kwargs): + if args[0] == "USER_REQUESTED_WALLTIME": + return "10:00:00" + + return mock.MagicMock() + + case.get_value = get_value + + case.get_env.return_value.get_jobs.return_value = ["case.run"] + + batch = EnvBatch() + + batch.set_job_defaults(batch_jobs, case) + + env_workflow = case.get_env.return_value + + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "10:00:00", subgroup="case.run" + ) + + @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") + # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "05:00:00", + None, + 1, + 1, + False, + ], + ) + @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") + @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") + def test_set_job_defaults_walltimemax_none( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): + case = mock.MagicMock() + + batch_jobs = [ + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) + ] + + def get_value(*args, **kwargs): + if args[0] == "USER_REQUESTED_WALLTIME": + return "08:00:00" + + return mock.MagicMock() + + case.get_value = get_value + + case.get_env.return_value.get_jobs.return_value = ["case.run"] + + batch = EnvBatch() + + batch.set_job_defaults(batch_jobs, case) + + env_workflow = case.get_env.return_value + + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "08:00:00", subgroup="case.run" + ) + + @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") + # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + None, + "12:00:00", + 1, + 1, + False, + ], + ) + @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") + @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") + def test_set_job_defaults_walltimemin_none( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): + case = mock.MagicMock() + + batch_jobs = [ + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) + ] + + def get_value(*args, **kwargs): + if args[0] == "USER_REQUESTED_WALLTIME": + return "08:00:00" + + return mock.MagicMock() + + case.get_value = get_value + + case.get_env.return_value.get_jobs.return_value = ["case.run"] + + batch = EnvBatch() + + batch.set_job_defaults(batch_jobs, case) + + env_workflow = case.get_env.return_value + + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "08:00:00", subgroup="case.run" + ) + + @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") + # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) + @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") + @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") + def test_set_job_defaults_walltimedef( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): + case = mock.MagicMock() + + batch_jobs = [ + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) + ] + + def get_value(*args, **kwargs): + if args[0] == "USER_REQUESTED_WALLTIME": + return None + + return mock.MagicMock() + + case.get_value = get_value + + case.get_env.return_value.get_jobs.return_value = ["case.run"] + + batch = EnvBatch() + + batch.set_job_defaults(batch_jobs, case) + + env_workflow = case.get_env.return_value + + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "10:00:00", subgroup="case.run" + ) + + @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") + # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + None, + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) + @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") + @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") + def test_set_job_defaults( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): + case = mock.MagicMock() + + batch_jobs = [ + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) + ] + + def get_value(*args, **kwargs): + if args[0] == "USER_REQUESTED_WALLTIME": + return None + + return mock.MagicMock() + + case.get_value = get_value + + case.get_env.return_value.get_jobs.return_value = ["case.run"] + + batch = EnvBatch() + + batch.set_job_defaults(batch_jobs, case) + + env_workflow = case.get_env.return_value + + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "12:00:00", subgroup="case.run" + ) + + def test_get_job_overrides_mpi_serial_single_task(self): + """Test that get_job_overrides gives expected results for an mpi-serial case with a single task""" + task_count = 1 + thread_count = 1 + mem_per_task = 10 + tasks_per_node = task_count + max_mem = 235 + overrides = self.run_get_job_overrides( + task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ) + self.assertEqual(overrides["mem_per_node"], mem_per_task) + self.assertEqual(overrides["tasks_per_node"], task_count) + self.assertEqual(overrides["max_tasks_per_node"], task_count) + self.assertEqual(overrides["mpirun"], "") + self.assertEqual(overrides["thread_count"], str(thread_count)) + self.assertEqual(overrides["num_nodes"], 1) + + def test_get_job_overrides_two_tasks(self): + """Test that get_job_overrides gives expected results for a case with two tasks""" + task_count = 2 + thread_count = 1 + mem_per_task = 10 + tasks_per_node = task_count + max_mem = 235 + # import pdb; pdb.set_trace() + overrides = self.run_get_job_overrides( + task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ) + self.assertEqual(overrides["mem_per_node"], mem_per_task * task_count) + self.assertEqual(overrides["tasks_per_node"], task_count) + self.assertEqual(overrides["max_tasks_per_node"], task_count) + self.assertEqual(overrides["mpirun"], "mpirun") + self.assertEqual(overrides["thread_count"], str(thread_count)) + self.assertEqual(overrides["num_nodes"], 1) + + def test_get_job_overrides_sixteen_tasks(self): + """Test that get_job_overrides gives expected results for a case with sixteen tasks""" + task_count = 16 + thread_count = 1 + mem_per_task = 10 + tasks_per_node = task_count + max_mem = 235 + overrides = self.run_get_job_overrides( + task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ) + self.assertEqual(overrides["mem_per_node"], int(max_mem * task_count / 128)) + self.assertEqual(overrides["tasks_per_node"], task_count) + self.assertEqual(overrides["max_tasks_per_node"], task_count) + self.assertEqual(overrides["mpirun"], "mpirun") + self.assertEqual(overrides["thread_count"], str(thread_count)) + self.assertEqual(overrides["num_nodes"], 1) + + def test_get_job_overrides_twentyfive_tasks(self): + """Test that get_job_overrides gives expected results for a case with 25 tasks""" + # This test is mportant for the CTSM regional amazon case that can use 25 tasks + task_count = 25 + thread_count = 1 + mem_per_task = 10 + tasks_per_node = task_count + max_mem = 235 + overrides = self.run_get_job_overrides( + task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ) + self.assertEqual(overrides["mem_per_node"], int(max_mem * task_count / 128)) + self.assertEqual(overrides["tasks_per_node"], task_count) + self.assertEqual(overrides["max_tasks_per_node"], task_count) + self.assertEqual(overrides["mpirun"], "mpirun") + self.assertEqual(overrides["thread_count"], str(thread_count)) + self.assertEqual(overrides["num_nodes"], 1) + + def test_get_job_overrides_eight_tasks_eight_threads(self): + """Test that get_job_overrides gives expected results for a case with 8 tasks and 8 threads""" + task_count = 8 + thread_count = 8 + mem_per_task = 10 + tasks_per_node = task_count + max_mem = 235 + overrides = self.run_get_job_overrides( + task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ) + self.assertEqual(overrides["mem_per_node"], int(max_mem / 2)) + self.assertEqual(overrides["tasks_per_node"], task_count) + self.assertEqual(overrides["max_tasks_per_node"], task_count * thread_count) + self.assertEqual(overrides["mpirun"], "mpirun") + self.assertEqual(overrides["thread_count"], str(thread_count)) + self.assertEqual(overrides["num_nodes"], 1) + + def test_get_job_overrides_sixtyfour_tasks(self): + """Test that get_job_overrides gives expected results for a case with 64 tasks""" + task_count = 64 + thread_count = 1 + mem_per_task = 10 + tasks_per_node = task_count + max_mem = 235 + overrides = self.run_get_job_overrides( + task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ) + self.assertEqual(overrides["mem_per_node"], int(max_mem / 2)) + self.assertEqual(overrides["tasks_per_node"], task_count) + self.assertEqual(overrides["max_tasks_per_node"], task_count) + self.assertEqual(overrides["mpirun"], "mpirun") + self.assertEqual(overrides["thread_count"], str(thread_count)) + self.assertEqual(overrides["num_nodes"], 1) + + def test_get_job_overrides_ninetysix_tasks(self): + """Test that get_job_overrides gives expected results for a case with ninetysix tasks""" + task_count = 96 + thread_count = 1 + mem_per_task = 10 + tasks_per_node = task_count + max_mem = 235 + overrides = self.run_get_job_overrides( + task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ) + self.assertEqual(overrides["mem_per_node"], int(max_mem * 3 / 4)) + self.assertEqual(overrides["tasks_per_node"], task_count) + self.assertEqual(overrides["max_tasks_per_node"], task_count) + self.assertEqual(overrides["mpirun"], "mpirun") + self.assertEqual(overrides["thread_count"], str(thread_count)) + self.assertEqual(overrides["num_nodes"], 1) + + def test_get_job_overrides_hundredtwentyseven_tasks(self): + """Test that get_job_overrides gives expected results for a case with 127 tasks""" + task_count = 127 + thread_count = 1 + mem_per_task = 10 + tasks_per_node = task_count + max_mem = 235 + overrides = self.run_get_job_overrides( + task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ) + self.assertEqual(overrides["mem_per_node"], int(max_mem * task_count / 128)) + self.assertEqual(overrides["tasks_per_node"], task_count) + self.assertEqual(overrides["max_tasks_per_node"], task_count) + self.assertEqual(overrides["mpirun"], "mpirun") + self.assertEqual(overrides["thread_count"], str(thread_count)) + self.assertEqual(overrides["num_nodes"], 1) + + def run_get_job_overrides( + self, task_count, thread_count, mem_per_task, tasks_per_node, max_mem + ): + """Setup and run get_job_overrides so it can be tested from a variety of tests""" + + env_batch = EnvBatch() + # NOTE: GPU_TYPE is assumed to be none, so no GPU settings will be done + mpilib = "mpich" + if task_count == 1: + mpilib = "mpi-serial" + case = FakeCaseWWorkflow( + compiler="intel", + mpilib=mpilib, + debug="FALSE", + comp_interface="nuopc", + task_count=task_count, + thread_count=thread_count, + mem_per_task=mem_per_task, + tasks_per_node=tasks_per_node, + max_mem=max_mem, + ) + totalpes = task_count * thread_count + + case.set_value("TOTALPES", totalpes) + case.set_value("MAX_TASKS_PER_NODE", 128) + case.set_value("MEM_PER_TASK", mem_per_task) + case.set_value("MAX_MEM_PER_NODE", max_mem) + + case.set_value("MAX_GPUS_PER_NODE", 4) + case.set_value("NGPUS_PER_NODE", 0) + overrides = env_batch.get_job_overrides("case.test", case) + self.assertEqual(overrides["ngpus_per_node"], 0) + + return overrides + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_xml_env_mach_specific.py b/CIME/tests/test_unit_xml_env_mach_specific.py new file mode 100644 index 00000000000..a900034fad8 --- /dev/null +++ b/CIME/tests/test_unit_xml_env_mach_specific.py @@ -0,0 +1,345 @@ +#!/usr/bin/env python3 + +import unittest +import tempfile +from unittest import mock + +from CIME import utils +from CIME.XML.env_mach_specific import EnvMachSpecific + +# pylint: disable=unused-argument + + +class TestXMLEnvMachSpecific(unittest.TestCase): + def test_aprun_get_args(self): + with tempfile.NamedTemporaryFile() as temp: + temp.write( + b""" + +
+ These variables control the machine dependent environment including + the paths to compilers and libraries external to cime such as netcdf, + environment variables for use in the running job should also be set here. +
+ + + char + executable name + + + char + redirect for job output + + + + + 1 + 1 + + + override + aprun + + -j 10 + -n {{ total_tasks }} + --oversubscribe + + +
+""" + ) + temp.seek(0) + + mach_specific = EnvMachSpecific(infile=temp.name) + + attribs = {"compiler": "gnu", "mpilib": "openmpi", "threaded": False} + + case = mock.MagicMock() + + type(case).total_tasks = mock.PropertyMock(return_value=4) + + extra_args = mach_specific.get_aprun_args(case, attribs, "case.run") + + expected_args = { + "-j 10": {"position": "per"}, + "--oversubscribe": {"position": "per"}, + "-n 4": {"position": "global"}, + } + + assert extra_args == expected_args + + def test_get_aprun_mode_not_valid(self): + with tempfile.NamedTemporaryFile() as temp: + temp.write( + b""" + +
+ These variables control the machine dependent environment including + the paths to compilers and libraries external to cime such as netcdf, + environment variables for use in the running job should also be set here. +
+ + + char + executable name + + + char + redirect for job output + + + + + 1 + 1 + + + custom + aprun + + -n {{ total_tasks }} + --oversubscribe + + +
+""" + ) + temp.seek(0) + + mach_specific = EnvMachSpecific(infile=temp.name) + + attribs = {"compiler": "gnu", "mpilib": "openmpi", "threaded": False} + + with self.assertRaises(utils.CIMEError) as e: + mach_specific.get_aprun_mode(attribs) + + assert ( + str(e.exception) + == "ERROR: Value 'custom' for \"aprun_mode\" is not valid, options are 'ignore, default, override'" + ) + + def test_get_aprun_mode_user_defined(self): + with tempfile.NamedTemporaryFile() as temp: + temp.write( + b""" + +
+ These variables control the machine dependent environment including + the paths to compilers and libraries external to cime such as netcdf, + environment variables for use in the running job should also be set here. +
+ + + char + executable name + + + char + redirect for job output + + + + + 1 + 1 + + + default + aprun + + -n {{ total_tasks }} + --oversubscribe + + +
+""" + ) + temp.seek(0) + + mach_specific = EnvMachSpecific(infile=temp.name) + + attribs = {"compiler": "gnu", "mpilib": "openmpi", "threaded": False} + + aprun_mode = mach_specific.get_aprun_mode(attribs) + + assert aprun_mode == "default" + + def test_get_aprun_mode_default(self): + with tempfile.NamedTemporaryFile() as temp: + temp.write( + b""" + +
+ These variables control the machine dependent environment including + the paths to compilers and libraries external to cime such as netcdf, + environment variables for use in the running job should also be set here. +
+ + + char + executable name + + + char + redirect for job output + + + + + 1 + 1 + + + aprun + + -n {{ total_tasks }} + --oversubscribe + + +
+""" + ) + temp.seek(0) + + mach_specific = EnvMachSpecific(infile=temp.name) + + attribs = {"compiler": "gnu", "mpilib": "openmpi", "threaded": False} + + aprun_mode = mach_specific.get_aprun_mode(attribs) + + assert aprun_mode == "default" + + def test_find_best_mpirun_match(self): + with tempfile.NamedTemporaryFile() as temp: + temp.write( + b""" + +
+ These variables control the machine dependent environment including + the paths to compilers and libraries external to cime such as netcdf, + environment variables for use in the running job should also be set here. +
+ + + char + executable name + + + char + redirect for job output + + + + + 1 + 1 + + + aprun + + -n {{ total_tasks }} + --oversubscribe + + + + srun + +
+""" + ) + temp.seek(0) + + mach_specific = EnvMachSpecific(infile=temp.name) + + mock_case = mock.MagicMock() + + type(mock_case).total_tasks = mock.PropertyMock(return_value=4) + + attribs = {"compiler": "gnu", "mpilib": "openmpi", "threaded": False} + + executable, args, run_exe, run_misc_suffix = mach_specific.get_mpirun( + mock_case, attribs, "case.run" + ) + + assert executable == "srun" + assert args == [] + assert run_exe is None + assert run_misc_suffix is None + + def test_get_mpirun(self): + with tempfile.NamedTemporaryFile() as temp: + temp.write( + b""" + +
+ These variables control the machine dependent environment including + the paths to compilers and libraries external to cime such as netcdf, + environment variables for use in the running job should also be set here. +
+ + + char + executable name + + + char + redirect for job output + + + + + 1 + 1 + + + aprun + + -n {{ total_tasks }} + --oversubscribe + + +
+""" + ) + temp.seek(0) + + mach_specific = EnvMachSpecific(infile=temp.name) + + mock_case = mock.MagicMock() + + type(mock_case).total_tasks = mock.PropertyMock(return_value=4) + + attribs = {"compiler": "gnu", "mpilib": "openmpi", "threaded": False} + + executable, args, run_exe, run_misc_suffix = mach_specific.get_mpirun( + mock_case, attribs, "case.run" + ) + + assert executable == "aprun" + assert args == ["-n 4", "--oversubscribe"] + assert run_exe is None + assert run_misc_suffix is None + + @mock.patch("CIME.XML.env_mach_specific.EnvMachSpecific.get_optional_child") + @mock.patch("CIME.XML.env_mach_specific.EnvMachSpecific.text") + @mock.patch.dict("os.environ", {"TEST_VALUE": "/testexec"}) + def test_init_path(self, text, get_optional_child): + text.return_value = "$ENV{TEST_VALUE}/init/python" + + mach_specific = EnvMachSpecific() + + value = mach_specific.get_module_system_init_path("python") + + assert value == "/testexec/init/python" + + @mock.patch("CIME.XML.env_mach_specific.EnvMachSpecific.get_optional_child") + @mock.patch("CIME.XML.env_mach_specific.EnvMachSpecific.text") + @mock.patch.dict("os.environ", {"TEST_VALUE": "/testexec"}) + def test_cmd_path(self, text, get_optional_child): + text.return_value = "$ENV{TEST_VALUE}/python" + + mach_specific = EnvMachSpecific() + + value = mach_specific.get_module_system_cmd_path("python") + + assert value == "/testexec/python" diff --git a/CIME/tests/test_unit_xml_grids.py b/CIME/tests/test_unit_xml_grids.py new file mode 100644 index 00000000000..17c01e355de --- /dev/null +++ b/CIME/tests/test_unit_xml_grids.py @@ -0,0 +1,164 @@ +import os +import io +import unittest +import tempfile +from contextlib import contextmanager +from pathlib import Path +from unittest import mock + +from CIME.utils import CIMEError +from CIME.XML.grids import Grids + + +TEST_CONFIG = """ + + + null + null + null + null + rx1 + r05 + r05 + rx1 + r05 + r05 + null + null + null + null + null + + + + T62 + T62 + gx3v7 + rx1 + null + null + gx3v7 + + + 0.47x0.63 + 0.47x0.63 + gx1v6 + r05 + null + null + gx1v6 + + + 0.23x0.31 + 0.23x0.31 + gx1v6 + r05 + null + null + gx1v6 + + + T31 + T31 + gx3v7 + rx1 + null + null + gx3v7 + + + 0.9x1.25 + 0.9x1.25 + gx1v6 + r05 + null + null + gx1v6 + + + 1.9x2.5 + 1.9x2.5 + gx1v6 + r05 + null + null + + +""" + + +def write_config_grids(tempdir, config): + config_grids_path = os.path.join(tempdir, "config_grids.xml") + + with open(config_grids_path, "w") as fd: + fd.write(TEST_CONFIG) + + return config_grids_path + + +class TestXMLGrids(unittest.TestCase): + def test_read_config_grids(self): + with tempfile.TemporaryDirectory() as tempdir: + config_grids_path = write_config_grids(tempdir, TEST_CONFIG) + + grids = Grids(config_grids_path) + + lname = grids._read_config_grids("T62_g37", "DATM") + + assert lname == "a%T62_l%T62_oi%gx3v7_r%rx1_g%null_w%null_z%null_m%gx3v7" + + with self.assertRaisesRegex( + CIMEError, "ERROR: grid alias T62_g37 not valid for compset SCREAM" + ): + grids._read_config_grids("T62_g37", "SCREAM") + + lname = grids._read_config_grids("f02_g16", "DATM") + + assert ( + lname + == "a%0.23x0.31_l%0.23x0.31_oi%gx1v6_r%r05_g%null_w%null_z%null_m%gx1v6" + ) + + lname = grids._read_config_grids("f05_g16", "SCREAM") + + assert ( + lname + == "a%0.47x0.63_l%0.47x0.63_oi%gx1v6_r%r05_g%null_w%null_z%null_m%gx1v6" + ) + + with self.assertRaisesRegex( + CIMEError, "ERROR: grid alias f05_g16 not valid for compset DATM" + ): + grids._read_config_grids("f05_g16", "DATM") + + lname = grids._read_config_grids("T31_g37", "_DROF") + + assert lname == "a%T31_l%T31_oi%gx3v7_r%rx1_g%null_w%null_z%null_m%gx3v7" + + lname = grids._read_config_grids("f09_g16", "DATM3TEST") + + assert ( + lname + == "a%0.9x1.25_l%0.9x1.25_oi%gx1v6_r%r05_g%null_w%null_z%null_m%gx1v6" + ) + + with self.assertRaisesRegex( + CIMEError, "ERROR: grid alias f09_g16 not valid for compset DATM2TEST" + ): + grids._read_config_grids("f09_g16", "DATM2TEST") + + lname = grids._read_config_grids("f19_g16", "DATM") + + assert ( + lname + == "a%1.9x2.5_l%1.9x2.5_oi%gx1v6_r%r05_g%null_w%null_z%null_m%gx1v6" + ) + + lname = grids._read_config_grids( + "f19_g16", "DATM", atmnlev="2", lndnlev="4" + ) + + assert ( + lname + == "a%1.9x2.5z2_l%1.9x2.5z4_oi%gx1v6_r%r05_g%null_w%null_z%null_m%gx1v6" + ) diff --git a/CIME/tests/test_unit_xml_machines.py b/CIME/tests/test_unit_xml_machines.py new file mode 100644 index 00000000000..7d0eacfdc62 --- /dev/null +++ b/CIME/tests/test_unit_xml_machines.py @@ -0,0 +1,348 @@ +import io +import os +import re +import unittest +from contextlib import redirect_stdout + +from CIME.XML.machines import Machines + +MACHINE_TEST_XML = """ + + Some default machine definition + ubuntu + gnu,intel + mpi-serial + custom + /data/timings + testing + /data/scratch + /data/inputdata + /data/inputdata/atm/datm7 + $CIME_OUTPUT_ROOT/archive/$CASE + /data/baselines/$COMPILER + /data/tools/cprnc + 8 + e3sm_developer + 4 + slurm + developers + 8 + 8 + FALSE + + srun + + -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit + -c $SHELL{echo 128/ {{ tasks_per_node }} |bc} + $SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} + -m plane={{ tasks_per_node }} + + + + /opt/ubuntu/pe/modules/default/init/perl.pm + /opt/ubuntu/pe/modules/default/init/python.py + /opt/ubuntu/pe/modules/default/init/sh + /opt/ubuntu/pe/modules/default/init/csh + /opt/ubuntu/pe/modules/default/bin/modulecmd perl + /opt/ubuntu/pe/modules/default/bin/modulecmd python + module + module + + ubuntupe + ubuntu-mpich + ubuntu-parallel-netcdf + ubuntu-hdf5-parallel + ubuntu-hdf5 + ubuntu-netcdf + ubuntu-netcdf-hdf5parallel + ubuntupe/2.7.15 + + + PrgEnv-ubuntu + PrgEnv-gnu + PrgEnv-gnu/8.3.3 + gcc/12.1.0 + + + ubuntu-mpich/8.1.16 + ubuntu-hdf5-parallel/1.12.1.3 + ubuntu-netcdf-hdf5parallel/4.8.1.3 + ubuntu-parallel-netcdf/1.12.2.3 + + + + $CIME_OUTPUT_ROOT/$CASE/run + $CIME_OUTPUT_ROOT/$CASE/bld + 0.1 + 1000 + + /usr/lib/perl5/5.26.2 + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + $SHELL{dirname $(dirname $(which pnetcdf_version))} + + + 128M + + + cores + + + + Some default machine definition + ubuntu + gnu,intel + mpi-serial + custom + /data/timings + testing + /data/scratch + /data/inputdata + /data/inputdata/atm/datm7 + $CIME_OUTPUT_ROOT/archive/$CASE + /data/baselines/$COMPILER + /data/tools/cprnc + 8 + e3sm_developer + 4 + none + developers + 8 + 8 + FALSE + + srun + + -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit + -c $SHELL{echo 128/ {{ tasks_per_node }} |bc} + $SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} + -m plane={{ tasks_per_node }} + + + $CIME_OUTPUT_ROOT/$CASE/run + $CIME_OUTPUT_ROOT/$CASE/bld + 0.1 + 1000 + + /usr/lib/perl5/5.26.2 + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + $SHELL{dirname $(dirname $(which pnetcdf_version))} + + + 128M + + + cores + + + + Some default machine definition + ubuntu + gnu,gnugpu,intel + mpi-serial,openmpi,mpich2 + custom + /data/timings + testing + /data/scratch + /data/inputdata + /data/inputdata/atm/datm7 + $CIME_OUTPUT_ROOT/archive/$CASE + /data/baselines/$COMPILER + /data/tools/cprnc + 8 + e3sm_developer + 4 + slurm + developers + 8 + 8 + FALSE + + srun + + -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit + -c $SHELL{echo 128/ {{ tasks_per_node }} |bc} + $SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} + -m plane={{ tasks_per_node }} + + + + /opt/ubuntu/pe/modules/default/init/perl.pm + /opt/ubuntu/pe/modules/default/init/python.py + /opt/ubuntu/pe/modules/default/init/sh + /opt/ubuntu/pe/modules/default/init/csh + /opt/ubuntu/pe/modules/default/bin/modulecmd perl + /opt/ubuntu/pe/modules/default/bin/modulecmd python + module + module + + + ubuntupe/2.7.15 + + + debug + + > + PrgEnv-intel/8.3.3 + + + PrgEnv-gnu/8.3.3 + + + PrgEnv-gnu/8.3.3 + PrgEnv-gnugpu/8.3.3 + + > + PrgEnv-gnugpu/8.3.3 + + + ubuntu-mpich/8.1.16 + + + + $CIME_OUTPUT_ROOT/$CASE/run + $CIME_OUTPUT_ROOT/$CASE/bld + 0.1 + 1000 + + /usr/lib/perl5/5.26.2 + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + $SHELL{dirname $(dirname $(which pnetcdf_version))} + + + 128M + + + cores + + + cores + + + gpu + + + 128M + + + +""" + + +class TestUnitXMLMachines(unittest.TestCase): + def setUp(self): + Machines._FILEMAP = {} + # read_only=False for github testing + # MACHINE IS SET BELOW TO USE DEFINITION IN "MACHINE_TEST_XML" + self.machine = Machines() + + self.machine.read_fd(io.StringIO(MACHINE_TEST_XML)) + + def test_get_resolved_environment_variable(self): + self.machine.set_machine("multi-compiler") + + assert os.environ["HOME"] == self.machine._get_resolved_environment_variable( + "$ENV{HOME}" + ) + assert "" == self.machine._get_resolved_environment_variable("") + + pattern = r"Failed to resolve '\$SHELL{\./xmlquery MODEL}' with: ERROR: Command: '\./xmlquery MODEL' failed with error '/bin/sh: 1: \./xmlquery: not found' from dir '.*/cime'" + output = self.machine._get_resolved_environment_variable( + "$SHELL{./xmlquery MODEL}" + ) + + assert re.match(pattern, output) is not None + + def test_filter_children_by_compiler(self): + self.machine.set_machine("multi-compiler") + + module_system_node = self.machine.get_child("module_system") + + def command_formatter(x): + return ( + f"{x.attrib['name']}" + if x.text is None + else f"{x.attrib['name']} {x.text}" + ) + + nodes = list( + self.machine._filter_children_by_compiler( + "modules", "command", "intel", command_formatter, module_system_node + ) + ) + + assert len(nodes) == 4 + assert nodes[0] == ("", ["purge", "load ubuntupe/2.7.15"]) + assert nodes[1] == ("DEBUG='TRUE'", ["load debug"]) + assert nodes[2] == ("", ["load PrgEnv-intel/8.3.3"]) + assert nodes[3] == ("", ["load ubuntu-mpich/8.1.16"]) + + nodes = list( + self.machine._filter_children_by_compiler( + "modules", "command", "gnugpu", command_formatter, module_system_node + ) + ) + + assert len(nodes) == 6 + assert nodes[0] == ("", ["purge", "load ubuntupe/2.7.15"]) + assert nodes[1] == ("DEBUG='TRUE'", ["load debug"]) + assert nodes[2] == ("", ["load PrgEnv-gnu/8.3.3"]) + assert nodes[3] == ("", ["load PrgEnv-gnu/8.3.3", "load PrgEnv-gnugpu/8.3.3"]) + assert nodes[4] == ("", ["load PrgEnv-gnugpu/8.3.3"]) + assert nodes[5] == ("", ["load ubuntu-mpich/8.1.16"]) + + def test_print_values(self): + self.machine.set_machine("default") + + with io.StringIO() as buffer, redirect_stdout(buffer): + self.machine.print_values() + + output = buffer.getvalue() + + assert "ubuntu" in output + assert "gnu,intel" in output + assert "mpi-serial" in output + assert "unload ubuntupe" not in output + + def test_print_values_details(self): + self.machine.set_machine("default") + + with io.StringIO() as buffer, redirect_stdout(buffer): + self.machine.print_values("intel") + + output = buffer.getvalue() + + assert "ubuntu" in output + assert "gnu,intel" in output + assert "mpi-serial" in output + assert "unload ubuntupe" in output + assert "unload PrgEnv-ubuntu" not in output + assert "load ubuntu-mpich/8.1.16" in output + assert "(with BUILD_THREADED='TRUE')" in output + assert "PERL5LIB: /usr/lib/perl5/5.26.2" in output + assert "OMP_PLACES: cores" not in output + + def test_has_batch_system(self): + self.machine.set_machine("default") + + assert self.machine.has_batch_system() + + self.machine.set_machine("default-no-batch") + + assert not self.machine.has_batch_system() + + def test_is_valid_MPIlib(self): + self.machine.set_machine("default") + + assert self.machine.is_valid_MPIlib("mpi-serial") + + assert not self.machine.is_valid_MPIlib("mpi-bogus") + + def test_is_valid_compiler(self): + self.machine.set_machine("default") + + assert self.machine.is_valid_compiler("gnu") + + assert not self.machine.is_valid_compiler("bogus") diff --git a/CIME/tests/test_unit_xml_namelist_definition.py b/CIME/tests/test_unit_xml_namelist_definition.py new file mode 100644 index 00000000000..96cc9f6a527 --- /dev/null +++ b/CIME/tests/test_unit_xml_namelist_definition.py @@ -0,0 +1,42 @@ +import tempfile +import unittest + +from CIME.XML.namelist_definition import NamelistDefinition + +# pylint: disable=protected-access + + +class TestXMLNamelistDefinition(unittest.TestCase): + def test_set_nodes(self): + test_data = """ + + + + + char + test + + + char + test + +""" + + with tempfile.NamedTemporaryFile() as temp: + temp.write(test_data.encode()) + temp.flush() + + nmldef = NamelistDefinition(temp.name) + + nmldef.set_nodes() + + assert len(nmldef._entry_nodes) == 2 + assert nmldef._entry_ids == ["test1", "test2"] + assert len(nmldef._nodes) == 2 + assert nmldef._entry_types == {"test1": "char", "test2": "char"} + assert nmldef._valid_values == {"test1": None, "test2": None} + assert nmldef._group_names == {"test1": None, "test2": None} + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_xml_tests.py b/CIME/tests/test_unit_xml_tests.py new file mode 100644 index 00000000000..a79bb3b9c0a --- /dev/null +++ b/CIME/tests/test_unit_xml_tests.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 + +import re +import unittest +import tempfile +from pathlib import Path +from unittest import mock + +from CIME.XML.tests import Tests + + +class TestXMLTests(unittest.TestCase): + def setUp(self): + # reset file caching + Tests._FILEMAP = {} + + # skip hard to mock function call + @mock.patch( + "CIME.SystemTests.system_tests_compare_two.SystemTestsCompareTwo._setup_cases_if_not_yet_done" + ) + def test_support_single_exe(self, _setup_cases_if_not_yet_done): + with tempfile.TemporaryDirectory() as tdir: + test_file = Path(tdir) / "sms.py" + + test_file.touch(exist_ok=True) + + caseroot = Path(tdir) / "caseroot1" + + caseroot.mkdir(exist_ok=True) + + case = mock.MagicMock() + + case.get_compset_components.return_value = () + + case.get_value.side_effect = ( + "SMS", + tdir, + f"{caseroot}", + "SMS.f19_g16.S", + "cpl", + "SMS.f19_g16.S", + f"{caseroot}", + "SMS.f19_g16.S", + ) + + tests = Tests() + + tests.support_single_exe(case) + + # skip hard to mock function call + @mock.patch( + "CIME.SystemTests.system_tests_compare_two.SystemTestsCompareTwo._setup_cases_if_not_yet_done" + ) + def test_support_single_exe_error(self, _setup_cases_if_not_yet_done): + with tempfile.TemporaryDirectory() as tdir: + test_file = Path(tdir) / "erp.py" + + test_file.touch(exist_ok=True) + + caseroot = Path(tdir) / "caseroot1" + + caseroot.mkdir(exist_ok=True) + + case = mock.MagicMock() + + case.get_compset_components.return_value = () + + case.get_value.side_effect = ( + "ERP", + tdir, + f"{caseroot}", + "ERP.f19_g16.S", + "cpl", + None, + "ERP.f19_g16.S", + f"{caseroot}", + "ERP.f19_g16.S", + ) + + tests = Tests() + + with self.assertRaises(Exception) as e: + tests.support_single_exe(case) + + assert ( + re.search( + r"does not support the '--single-exe' option as it requires separate builds", + f"{e.exception}", + ) + is not None + ), f"{e.exception}" + + +if __name__ == "__main__": + unittest.main() diff --git a/scripts/tests/user_mods_test1/include_user_mods b/CIME/tests/user_mods_test1/include_user_mods similarity index 100% rename from scripts/tests/user_mods_test1/include_user_mods rename to CIME/tests/user_mods_test1/include_user_mods diff --git a/scripts/tests/user_mods_test1/user_nl_comp b/CIME/tests/user_mods_test1/user_nl_comp similarity index 100% rename from scripts/tests/user_mods_test1/user_nl_comp rename to CIME/tests/user_mods_test1/user_nl_comp diff --git a/scripts/tests/user_mods_test1/user_nl_cpl b/CIME/tests/user_mods_test1/user_nl_cpl similarity index 100% rename from scripts/tests/user_mods_test1/user_nl_cpl rename to CIME/tests/user_mods_test1/user_nl_cpl diff --git a/scripts/tests/user_mods_test2/SourceMods/src.drv/somefile.F90 b/CIME/tests/user_mods_test2/SourceMods/src.drv/somefile.F90 similarity index 100% rename from scripts/tests/user_mods_test2/SourceMods/src.drv/somefile.F90 rename to CIME/tests/user_mods_test2/SourceMods/src.drv/somefile.F90 diff --git a/scripts/tests/user_mods_test2/user_nl_cpl b/CIME/tests/user_mods_test2/user_nl_cpl similarity index 100% rename from scripts/tests/user_mods_test2/user_nl_cpl rename to CIME/tests/user_mods_test2/user_nl_cpl diff --git a/CIME/tests/user_mods_test3/shell_commands b/CIME/tests/user_mods_test3/shell_commands new file mode 100755 index 00000000000..476661b9b56 --- /dev/null +++ b/CIME/tests/user_mods_test3/shell_commands @@ -0,0 +1,8 @@ +#!/usr/bin/sh +pioversion=`./xmlquery --value PIO_VERSION` +if [[ "$pioversion" -eq "1" ]] +then + ./xmlchange PIO_VERSION=2 +else + ./xmlchange PIO_VERSION=1 +fi diff --git a/CIME/tests/utils.py b/CIME/tests/utils.py new file mode 100644 index 00000000000..0f75fa5ad24 --- /dev/null +++ b/CIME/tests/utils.py @@ -0,0 +1,449 @@ +import io +import os +import tempfile +import signal +import shutil +import sys +import time +import contextlib +from collections.abc import Iterable + +from CIME import utils +from CIME import test_status +from CIME.utils import expect + +MACRO_PRESERVE_ENV = [ + "ADDR2LINE", + "AR", + "AS", + "CC", + "CC_FOR_BUILD", + "CMAKE_ARGS", + "CONDA_EXE", + "CONDA_PYTHON_EXE", + "CPP", + "CXX", + "CXXFILT", + "CXX_FOR_BUILD", + "ELFEDIT", + "F77", + "F90", + "F95", + "FC", + "GCC", + "GCC_AR", + "GCC_NM", + "GCC_RANLIB", + "GFORTRAN", + "GPROF", + "GXX", + "LD", + "LD_GOLD", + "NM", + "OBJCOPY", + "OBJDUMP", + "PATH", + "RANLIB", + "READELF", + "SIZE", + "STRINGS", + "STRIP", +] + + +@contextlib.contextmanager +def chdir(path): + old_path = os.getcwd() + os.chdir(path) + + try: + yield + finally: + os.chdir(old_path) + + +def parse_test_status(line): + status, test = line.split()[0:2] + return test, status + + +def make_fake_teststatus(path, testname, status, phase): + expect(phase in test_status.CORE_PHASES, "Bad phase '%s'" % phase) + with test_status.TestStatus(test_dir=path, test_name=testname) as ts: + for core_phase in test_status.CORE_PHASES: + if core_phase == phase: + ts.set_status( + core_phase, + status, + comments=("time=42" if phase == test_status.RUN_PHASE else ""), + ) + break + else: + ts.set_status( + core_phase, + test_status.TEST_PASS_STATUS, + comments=("time=42" if phase == test_status.RUN_PHASE else ""), + ) + + +class MockMachines(object): + """A mock version of the Machines object to simplify testing.""" + + def __init__(self, name, os_): + """Store the name.""" + self.name = name + self.os = os_ + + def get_machine_name(self): + """Return the name we were given.""" + return self.name + + def get_value(self, var_name): + """Allow the operating system to be queried.""" + assert var_name == "OS", ( + "Build asked for a value not " "implemented in the testing infrastructure." + ) + return self.os + + def is_valid_compiler(self, _): # pylint:disable=no-self-use + """Assume all compilers are valid.""" + return True + + def is_valid_MPIlib(self, _): + """Assume all MPILIB settings are valid.""" + return True + + # pragma pylint: disable=unused-argument + def get_default_MPIlib(self, attributes=None): + return "mpich2" + + def get_default_compiler(self): + return "intel" + + +class MakefileTester(object): + + """Helper class for checking Makefile output. + + Public methods: + __init__ + query_var + assert_variable_equals + assert_variable_matches + """ + + # Note that the following is a Makefile and the echo line must begin with a tab + _makefile_template = """ +include Macros +query: +\techo '$({})' > query.out +""" + + def __init__(self, parent, make_string): + """Constructor for Makefile test helper class. + + Arguments: + parent - The TestCase object that is using this item. + make_string - Makefile contents to test. + """ + self.parent = parent + self.make_string = make_string + + def query_var(self, var_name, env, var): + """Request the value of a variable in the Makefile, as a string. + + Arguments: + var_name - Name of the variable to query. + env - A dict containing extra environment variables to set when calling + make. + var - A dict containing extra make variables to set when calling make. + (The distinction between env and var actually matters only for + CMake, though.) + """ + if env is None: + env = dict() + if var is None: + var = dict() + + # Write the Makefile strings to temporary files. + temp_dir = tempfile.mkdtemp() + macros_file_name = os.path.join(temp_dir, "Macros") + makefile_name = os.path.join(temp_dir, "Makefile") + output_name = os.path.join(temp_dir, "query.out") + + with open(macros_file_name, "w") as macros_file: + macros_file.write(self.make_string) + with open(makefile_name, "w") as makefile: + makefile.write(self._makefile_template.format(var_name)) + + # environment = os.environ.copy() + environment = dict(PATH=os.environ["PATH"]) + environment.update(env) + environment.update(var) + for x in MACRO_PRESERVE_ENV: + if x in os.environ: + environment[x] = os.environ[x] + gmake_exe = self.parent.MACHINE.get_value("GMAKE") + if gmake_exe is None: + gmake_exe = "gmake" + self.parent.run_cmd_assert_result( + "%s query --directory=%s 2>&1" % (gmake_exe, temp_dir), env=environment + ) + + with open(output_name, "r") as output: + query_result = output.read().strip() + + # Clean up the Makefiles. + shutil.rmtree(temp_dir) + + return query_result + + def assert_variable_equals(self, var_name, value, env=None, var=None): + """Assert that a variable in the Makefile has a given value. + + Arguments: + var_name - Name of variable to check. + value - The string that the variable value should be equal to. + env - Optional. Dict of environment variables to set when calling make. + var - Optional. Dict of make variables to set when calling make. + """ + self.parent.assertEqual(self.query_var(var_name, env, var), value) + + def assert_variable_matches(self, var_name, regex, env=None, var=None): + """Assert that a variable in the Makefile matches a regex. + + Arguments: + var_name - Name of variable to check. + regex - The regex to match. + env - Optional. Dict of environment variables to set when calling make. + var - Optional. Dict of make variables to set when calling make. + """ + self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex) + + +class CMakeTester(object): + + """Helper class for checking CMake output. + + Public methods: + __init__ + query_var + assert_variable_equals + assert_variable_matches + """ + + _cmakelists_template = """ +include(./Macros.cmake) +file(WRITE query.out "${{{}}}") +""" + + def __init__(self, parent, cmake_string): + """Constructor for CMake test helper class. + + Arguments: + parent - The TestCase object that is using this item. + cmake_string - CMake contents to test. + """ + self.parent = parent + self.cmake_string = cmake_string + + def query_var(self, var_name, env, var): + """Request the value of a variable in Macros.cmake, as a string. + + Arguments: + var_name - Name of the variable to query. + env - A dict containing extra environment variables to set when calling + cmake. + var - A dict containing extra CMake variables to set when calling cmake. + """ + if env is None: + env = dict() + if var is None: + var = dict() + + # Write the CMake strings to temporary files. + temp_dir = tempfile.mkdtemp() + macros_file_name = os.path.join(temp_dir, "Macros.cmake") + cmakelists_name = os.path.join(temp_dir, "CMakeLists.txt") + output_name = os.path.join(temp_dir, "query.out") + + with open(macros_file_name, "w") as macros_file: + for key in var: + macros_file.write("set({} {})\n".format(key, var[key])) + macros_file.write(self.cmake_string) + with open(cmakelists_name, "w") as cmakelists: + cmakelists.write(self._cmakelists_template.format(var_name)) + + # environment = os.environ.copy() + environment = dict(PATH=os.environ["PATH"]) + environment.update(env) + for x in MACRO_PRESERVE_ENV: + if x in os.environ: + environment[x] = os.environ[x] + os_ = self.parent.MACHINE.get_value("OS") + # cmake will not work on cray systems without this flag + if os_ == "CNL": + cmake_args = "-DCMAKE_SYSTEM_NAME=Catamount" + else: + cmake_args = "" + + self.parent.run_cmd_assert_result( + "cmake %s . 2>&1" % cmake_args, from_dir=temp_dir, env=environment + ) + + with open(output_name, "r") as output: + query_result = output.read().strip() + + # Clean up the CMake files. + shutil.rmtree(temp_dir) + + return query_result + + def assert_variable_equals(self, var_name, value, env=None, var=None): + """Assert that a variable in the CMakeLists has a given value. + + Arguments: + var_name - Name of variable to check. + value - The string that the variable value should be equal to. + env - Optional. Dict of environment variables to set when calling cmake. + var - Optional. Dict of CMake variables to set when calling cmake. + """ + self.parent.assertEqual(self.query_var(var_name, env, var), value) + + def assert_variable_matches(self, var_name, regex, env=None, var=None): + """Assert that a variable in the CMkeLists matches a regex. + + Arguments: + var_name - Name of variable to check. + regex - The regex to match. + env - Optional. Dict of environment variables to set when calling cmake. + var - Optional. Dict of CMake variables to set when calling cmake. + """ + self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex) + + +# TODO after dropping python 2.7 replace with tempfile.TemporaryDirectory +class TemporaryDirectory(object): + def __init__(self): + self._tempdir = None + + def __enter__(self): + self._tempdir = tempfile.mkdtemp() + return self._tempdir + + def __exit__(self, *args, **kwargs): + if os.path.exists(self._tempdir): + shutil.rmtree(self._tempdir) + + +# TODO replace with actual mock once 2.7 is dropped +class Mocker: + def __init__(self, ret=None, cmd=None, return_value=None, side_effect=None): + self._orig = [] + self._ret = ret or return_value + self._cmd = cmd + self._calls = [] + + if isinstance(side_effect, (list, tuple)): + self._side_effect = iter(side_effect) + else: + self._side_effect = side_effect + + self._method_calls = {} + + @property + def calls(self): + return self._calls + + @property + def method_calls(self): + return dict((x, y.calls) for x, y in self._method_calls.items()) + + @property + def ret(self): + return self._ret + + @ret.setter + def ret(self, value): + self._ret = value + + def assert_called(self): + assert len(self.calls) > 0 + + def assert_called_with(self, i=None, args=None, kwargs=None): + if i is None: + i = 0 + + call = self.calls[i] + + if args is not None: + _call_args = set(call["args"]) + _exp_args = set(args) + assert _exp_args <= _call_args, "Got {} missing {}".format( + _call_args, _exp_args - _call_args + ) + + if kwargs is not None: + call_kwargs = call["kwargs"] + + for x, y in kwargs.items(): + assert call_kwargs[x] == y, "Missing {}".format(x) + + def __getattr__(self, name): + if name in self._method_calls: + new_method = self._method_calls[name] + else: + new_method = Mocker(self, cmd=name) + self._method_calls[name] = new_method + + return new_method + + def __call__(self, *args, **kwargs): + self._calls.append({"args": args, "kwargs": kwargs}) + + if self._side_effect is not None and isinstance(self._side_effect, Iterable): + rv = next(self._side_effect) + else: + rv = self._ret + + return rv + + def __del__(self): + self.revert_mocks() + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.revert_mocks() + + def revert_mocks(self): + for m, module, method in self._orig: + if isinstance(module, str): + setattr(sys.modules[module], method, m) + else: + setattr(module, method, m) + + def patch( + self, module, method=None, ret=None, is_property=False, update_value_only=False + ): + rv = None + if isinstance(module, str): + x = module.split(".") + main = ".".join(x[:-1]) + if not update_value_only: + self._orig.append((getattr(sys.modules[main], x[-1]), main, x[-1])) + if is_property: + setattr(sys.modules[main], x[-1], ret) + else: + rv = Mocker(ret, cmd=x[-1]) + setattr(sys.modules[main], x[-1], rv) + elif method != None: + if not update_value_only: + self._orig.append((getattr(module, method), module, method)) + rv = Mocker(ret) + setattr(module, method, rv) + else: + raise Exception("Could not patch") + + return rv diff --git a/CIME/user_mod_support.py b/CIME/user_mod_support.py new file mode 100644 index 00000000000..2c182a19dbd --- /dev/null +++ b/CIME/user_mod_support.py @@ -0,0 +1,167 @@ +""" +user_mod_support.py +""" + +from CIME.XML.standard_module_setup import * +from CIME.utils import expect, run_cmd_no_fail, safe_copy +import glob + +logger = logging.getLogger(__name__) + + +def apply_user_mods(caseroot, user_mods_path, keepexe=None): + """ + Recursivlely apply user_mods to caseroot - this includes updating user_nl_xxx, + updating SourceMods and creating case shell_commands and xmlchange_cmds files + + First remove case shell_commands files if any already exist + + If this function is called multiple times, settings from later calls will + take precedence over earlier calls, if there are conflicts. + + keepexe is an optional argument that is needed for cases where apply_user_mods is + called from create_clone + """ + case_shell_command_files = [ + os.path.join(caseroot, "shell_commands"), + os.path.join(caseroot, "xmlchange_cmnds"), + ] + for shell_command_file in case_shell_command_files: + if os.path.isfile(shell_command_file): + os.remove(shell_command_file) + + include_dirs = build_include_dirs_list(user_mods_path) + # If a user_mods dir 'foo' includes 'bar', the include_dirs list returned + # from build_include_dirs has 'foo' before 'bar'. But with the below code, + # directories that occur later in the list take precedence over the earlier + # ones, and we want 'foo' to take precedence over 'bar' in this case (in + # general: we want a given user_mods directory to take precedence over any + # mods that it includes). So we reverse include_dirs to accomplish this. + include_dirs.reverse() + logger.debug("include_dirs are {}".format(include_dirs)) + for include_dir in include_dirs: + # write user_nl_xxx file in caseroot + for user_nl in glob.iglob(os.path.join(include_dir, "user_nl_*")): + with open(os.path.join(include_dir, user_nl), "r") as fd: + newcontents = fd.read() + if len(newcontents) == 0: + continue + case_user_nl = user_nl.replace(include_dir, caseroot) + # If the same variable is set twice in a user_nl file, the later one + # takes precedence. So by appending the new contents, later entries + # in the include_dirs list take precedence over earlier entries. + with open(case_user_nl, "a") as fd: + fd.write(newcontents) + + # update SourceMods in caseroot + for root, _, files in os.walk(include_dir, followlinks=True, topdown=False): + if "src" in os.path.basename(root): + if keepexe is not None: + expect( + False, + "cannot have any source mods in {} if keepexe is an option".format( + user_mods_path + ), + ) + for sfile in files: + source_mods = os.path.join(root, sfile) + case_source_mods = source_mods.replace(include_dir, caseroot) + # We overwrite any existing SourceMods file so that later + # include_dirs take precedence over earlier ones + if os.path.isfile(case_source_mods): + logger.warning( + "WARNING: Overwriting existing SourceMods in {}".format( + case_source_mods + ) + ) + else: + logger.info( + "Adding SourceMod to case {}".format(case_source_mods) + ) + try: + safe_copy(source_mods, case_source_mods) + except Exception: + expect( + False, + "Could not write file {} in caseroot {}".format( + case_source_mods, caseroot + ), + ) + + # create xmlchange_cmnds and shell_commands in caseroot + shell_command_files = glob.glob( + os.path.join(include_dir, "shell_commands") + ) + glob.glob(os.path.join(include_dir, "xmlchange_cmnds")) + for shell_commands_file in shell_command_files: + case_shell_commands = shell_commands_file.replace(include_dir, caseroot) + # add commands from both shell_commands and xmlchange_cmnds to + # the same file (caseroot/shell_commands) + case_shell_commands = case_shell_commands.replace( + "xmlchange_cmnds", "shell_commands" + ) + # Note that use of xmlchange_cmnds has been deprecated and will soon + # be removed altogether, so new tests should rely on shell_commands + if shell_commands_file.endswith("xmlchange_cmnds"): + logger.warning( + "xmlchange_cmnds is deprecated and will be removed " + + "in a future release; please rename {} shell_commands".format( + shell_commands_file + ) + ) + with open(shell_commands_file, "r") as fd: + new_shell_commands = fd.read().replace("xmlchange", "xmlchange --force") + # By appending the new commands to the end, settings from later + # include_dirs take precedence over earlier ones + with open(case_shell_commands, "a") as fd: + fd.write(new_shell_commands) + + for shell_command_file in case_shell_command_files: + if os.path.isfile(shell_command_file): + os.chmod(shell_command_file, 0o777) + run_cmd_no_fail(shell_command_file, verbose=True) + + +def build_include_dirs_list(user_mods_path, include_dirs=None): + """ + If user_mods_path has a file "include_user_mods" read that + file and add directories to the include_dirs, recursively check + each of those directories for further directories. + The file may also include comments deleneated with # in the first column + """ + include_dirs = [] if include_dirs is None else include_dirs + if user_mods_path is None or user_mods_path == "UNSET": + return include_dirs + expect( + os.path.isabs(user_mods_path), + "Expected full directory path, got '{}'".format(user_mods_path), + ) + expect( + os.path.isdir(user_mods_path), "Directory not found {}".format(user_mods_path) + ) + norm_path = os.path.normpath(user_mods_path) + + for dir_ in include_dirs: + if norm_path == dir_: + include_dirs.remove(norm_path) + break + + logger.info("Adding user mods directory {}".format(norm_path)) + include_dirs.append(norm_path) + include_file = os.path.join(norm_path, "include_user_mods") + if os.path.isfile(include_file): + with open(include_file, "r") as fd: + for newpath in fd: + newpath = newpath.rstrip() + if len(newpath) > 0 and not newpath.startswith("#"): + if not os.path.isabs(newpath): + newpath = os.path.join(user_mods_path, newpath) + if os.path.isabs(newpath): + build_include_dirs_list(newpath, include_dirs) + else: + logger.warning( + "Could not resolve path '{}' in file '{}'".format( + newpath, include_file + ) + ) + + return include_dirs diff --git a/CIME/utils.py b/CIME/utils.py new file mode 100644 index 00000000000..61808eb8779 --- /dev/null +++ b/CIME/utils.py @@ -0,0 +1,2685 @@ +""" +Common functions used by cime python scripts +Warning: you cannot use CIME Classes in this module as it causes circular dependencies +""" + +import shlex +import configparser +import io, logging, gzip, sys, os, time, re, shutil, glob, string, random, importlib, fnmatch +import importlib.util +import errno, signal, warnings, filecmp +import stat as statlib +from argparse import Action +from contextlib import contextmanager + +# Return this error code if the scripts worked but tests failed +TESTS_FAILED_ERR_CODE = 100 +logger = logging.getLogger(__name__) + +# Fix to pass user defined `srcroot` to `CIME.XML.generic_xml.GenericXML` +# where it's used to resolve $SRCROOT in XML config files. +GLOBAL = {} +CASE_SUCCESS = "success" +CASE_FAILURE = "error" + + +def deprecate_action(message): + class ActionStoreDeprecated(Action): + def __call__(self, parser, namespace, values, option_string=None): + raise DeprecationWarning(f"{option_string} is deprecated{message}") + + return ActionStoreDeprecated + + +def import_from_file(name, file_path): + loader = importlib.machinery.SourceFileLoader(name, file_path) + + spec = importlib.util.spec_from_loader(loader.name, loader) + + module = importlib.util.module_from_spec(spec) + + sys.modules[name] = module + + spec.loader.exec_module(module) + + return module + + +@contextmanager +def redirect_stdout(new_target): + old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout + try: + yield new_target # run some code with the replaced stdout + finally: + sys.stdout = old_target # restore to the previous value + + +@contextmanager +def redirect_stderr(new_target): + old_target, sys.stderr = sys.stderr, new_target # replace sys.stdout + try: + yield new_target # run some code with the replaced stdout + finally: + sys.stderr = old_target # restore to the previous value + + +@contextmanager +def redirect_stdout_stderr(new_target): + old_stdout, old_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = new_target, new_target + try: + yield new_target + finally: + sys.stdout, sys.stderr = old_stdout, old_stderr + + +@contextmanager +def redirect_logger(new_target, logger_name): + ch = logging.StreamHandler(stream=new_target) + ch.setLevel(logging.DEBUG) + log = logging.getLogger(logger_name) + root_log = logging.getLogger() + orig_handlers = log.handlers + orig_root_loggers = root_log.handlers + + try: + root_log.handlers = [] + log.handlers = [ch] + yield log + finally: + root_log.handlers = orig_root_loggers + log.handlers = orig_handlers + + +class IndentFormatter(logging.Formatter): + def __init__(self, indent, fmt=None, datefmt=None): + logging.Formatter.__init__(self, fmt, datefmt) + self._indent = indent + + def format(self, record): + record.msg = "{}{}".format(self._indent, record.msg) + out = logging.Formatter.format(self, record) + return out + + +def set_logger_indent(indent): + root_log = logging.getLogger() + root_log.handlers = [] + formatter = IndentFormatter(indent) + + handler = logging.StreamHandler() + handler.setFormatter(formatter) + root_log.addHandler(handler) + + +class EnvironmentContext(object): + """ + Context manager for environment variables + Usage: + os.environ['MYVAR'] = 'oldvalue' + with EnvironmentContex(MYVAR='myvalue', MYVAR2='myvalue2'): + print os.getenv('MYVAR') # Should print myvalue. + print os.getenv('MYVAR2') # Should print myvalue2. + print os.getenv('MYVAR') # Should print oldvalue. + print os.getenv('MYVAR2') # Should print None. + + CREDIT: https://github.com/sakurai-youhei/envcontext + """ + + def __init__(self, **kwargs): + self.envs = kwargs + self.old_envs = {} + + def __enter__(self): + self.old_envs = {} + for k, v in self.envs.items(): + self.old_envs[k] = os.environ.get(k) + os.environ[k] = v + + def __exit__(self, *args): + for k, v in self.old_envs.items(): + if v: + os.environ[k] = v + else: + del os.environ[k] + + +# This should be the go-to exception for CIME use. It's a subclass +# of SystemExit in order suppress tracebacks, which users generally +# hate seeing. It's a subclass of Exception because we want it to be +# "catchable". If you are debugging CIME and want to see the stacktrace, +# run your CIME command with the --debug flag. +class CIMEError(SystemExit, Exception): + pass + + +def expect(condition, error_msg, exc_type=CIMEError, error_prefix="ERROR:"): + """ + Similar to assert except doesn't generate an ugly stacktrace. Useful for + checking user error, not programming error. + + >>> expect(True, "error1") + >>> expect(False, "error2") # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: error2 + """ + # Without this line we get a futurewarning on the use of condition below + warnings.filterwarnings("ignore") + if not condition: + if logger.isEnabledFor(logging.DEBUG): + import pdb + + pdb.set_trace() # pylint: disable=forgotten-debug-statement + + msg = error_prefix + " " + error_msg + raise exc_type(msg) + + +def id_generator(size=6, chars=string.ascii_lowercase + string.digits): + return "".join(random.choice(chars) for _ in range(size)) + + +def check_name(fullname, additional_chars=None, fullpath=False): + """ + check for unallowed characters in name, this routine only + checks the final name and does not check if path exists or is + writable + + >>> check_name("test.id", additional_chars=".") + False + >>> check_name("case.name", fullpath=False) + True + >>> check_name("/some/file/path/case.name", fullpath=True) + True + >>> check_name("mycase+mods") + False + >>> check_name("mycase?mods") + False + >>> check_name("mycase*mods") + False + >>> check_name("/some/full/path/name/") + False + """ + + chars = r"+*?<>/{}[\]~`@:" + if additional_chars is not None: + chars += additional_chars + if fullname.endswith("/"): + return False + if fullpath: + _, name = os.path.split(fullname) + else: + name = fullname + match = re.search(r"[" + re.escape(chars) + "]", name) + if match is not None: + logger.warning( + "Illegal character {} found in name {}".format(match.group(0), name) + ) + return False + return True + + +# Should only be called from get_cime_config() +def _read_cime_config_file(): + """ + READ the config file in ~/.cime, this file may contain + [main] + CIME_MODEL=e3sm,cesm,ufs + PROJECT=someprojectnumber + """ + allowed_sections = ("main", "create_test") + + allowed_in_main = ( + "cime_model", + "project", + "charge_account", + "srcroot", + "mail_type", + "mail_user", + "machine", + "mpilib", + "compiler", + "input_dir", + "cime_driver", + ) + allowed_in_create_test = ( + "mail_type", + "mail_user", + "save_timing", + "single_submit", + "test_root", + "output_root", + "baseline_root", + "clean", + "machine", + "mpilib", + "compiler", + "parallel_jobs", + "proc_pool", + "walltime", + "job_queue", + "allow_baseline_overwrite", + "skip_tests_with_existing_baselines", + "wait", + "force_procs", + "force_threads", + "input_dir", + "pesfile", + "retry", + "walltime", + ) + + cime_config_file = os.path.abspath( + os.path.join(os.path.expanduser("~"), ".cime", "config") + ) + cime_config = configparser.ConfigParser() + if os.path.isfile(cime_config_file): + cime_config.read(cime_config_file) + for section in cime_config.sections(): + expect( + section in allowed_sections, + "Unknown section {} in .cime/config\nallowed sections are {}".format( + section, allowed_sections + ), + ) + if cime_config.has_section("main"): + for item, _ in cime_config.items("main"): + expect( + item in allowed_in_main, + 'Unknown option in config section "main": "{}"\nallowed options are {}'.format( + item, allowed_in_main + ), + ) + if cime_config.has_section("create_test"): + for item, _ in cime_config.items("create_test"): + expect( + item in allowed_in_create_test, + 'Unknown option in config section "test": "{}"\nallowed options are {}'.format( + item, allowed_in_create_test + ), + ) + else: + logger.debug("File {} not found".format(cime_config_file)) + cime_config.add_section("main") + + return cime_config + + +_CIMECONFIG = None + + +def get_cime_config(): + global _CIMECONFIG + if not _CIMECONFIG: + _CIMECONFIG = _read_cime_config_file() + + return _CIMECONFIG + + +def reset_cime_config(): + """ + Useful to keep unit tests from interfering with each other + """ + global _CIMECONFIG + _CIMECONFIG = None + + +def copy_local_macros_to_dir(destination, extra_machdir=None): + """ + Copy any local macros files to the path given by 'destination'. + + Local macros files are potentially found in: + (1) extra_machdir/cmake_macros/*.cmake + (2) $HOME/.cime/*.cmake + """ + local_macros = [] + if extra_machdir: + if os.path.isdir(os.path.join(extra_machdir, "cmake_macros")): + local_macros.extend( + glob.glob(os.path.join(extra_machdir, "cmake_macros/*.cmake")) + ) + + dotcime = None + home = os.environ.get("HOME") + if home: + dotcime = os.path.join(home, ".cime") + if dotcime and os.path.isdir(dotcime): + local_macros.extend(glob.glob(dotcime + "/*.cmake")) + + for macro in local_macros: + safe_copy(macro, destination) + + +def get_python_libs_location_within_cime(): + """ + From within CIME, return subdirectory of python libraries + """ + return os.path.join("scripts", "lib") + + +def get_cime_root(case=None): + """ + Return the absolute path to the root of CIME that contains this script + """ + real_file_dir = os.path.dirname(os.path.realpath(__file__)) + cimeroot = os.path.abspath(os.path.join(real_file_dir, "..")) + + if case is not None: + case_cimeroot = os.path.abspath(case.get_value("CIMEROOT")) + cimeroot = os.path.abspath(cimeroot) + expect( + cimeroot == case_cimeroot, + "Inconsistent CIMEROOT variable: case -> '{}', file location -> '{}'".format( + case_cimeroot, cimeroot + ), + ) + + logger.debug("CIMEROOT is " + cimeroot) + return cimeroot + + +def get_config_path(): + cimeroot = get_cime_root() + + return os.path.join(cimeroot, "CIME", "data", "config") + + +def get_schema_path(): + config_path = get_config_path() + + return os.path.join(config_path, "xml_schemas") + + +def get_template_path(): + cimeroot = get_cime_root() + + return os.path.join(cimeroot, "CIME", "data", "templates") + + +def get_tools_path(): + cimeroot = get_cime_root() + + return os.path.join(cimeroot, "CIME", "Tools") + + +def get_src_root(): + """ + Return the absolute path to the root of SRCROOT. + + """ + cime_config = get_cime_config() + + if "SRCROOT" in os.environ: + srcroot = os.environ["SRCROOT"] + + logger.debug("SRCROOT from environment: {}".format(srcroot)) + elif cime_config.has_option("main", "SRCROOT"): + srcroot = cime_config.get("main", "SRCROOT") + + logger.debug("SRCROOT from user config: {}".format(srcroot)) + elif "SRCROOT" in GLOBAL: + srcroot = GLOBAL["SRCROOT"] + + logger.debug("SRCROOT from internal GLOBAL: {}".format(srcroot)) + else: + # If the share directory exists in the CIME root then it's + # assumed it's also the source root. This should only + # occur when the local "Externals.cfg" is used to install + # requirements for running/testing without a specific model. + if os.path.isdir(os.path.join(get_cime_root(), "share")): + srcroot = os.path.abspath(os.path.join(get_cime_root())) + else: + srcroot = os.path.abspath(os.path.join(get_cime_root(), "..")) + + logger.debug("SRCROOT from implicit detection: {}".format(srcroot)) + + return srcroot + + +def get_cime_default_driver(): + driver = os.environ.get("CIME_DRIVER") + if driver: + logger.debug("Setting CIME_DRIVER={} from environment".format(driver)) + else: + cime_config = get_cime_config() + if cime_config.has_option("main", "CIME_DRIVER"): + driver = cime_config.get("main", "CIME_DRIVER") + if driver: + logger.debug( + "Setting CIME_driver={} from ~/.cime/config".format(driver) + ) + + from CIME.config import Config + + config = Config.load_defaults() + + if not driver: + driver = config.driver_default + + expect( + driver in config.driver_choices, + "Attempt to set invalid driver {}".format(driver), + ) + return driver + + +def get_all_cime_models(): + config_path = get_config_path() + models = [] + + for entry in os.listdir(config_path): + if os.path.isdir(os.path.join(config_path, entry)): + models.append(entry) + + models.remove("xml_schemas") + + return models + + +def set_model(model): + """ + Set the model to be used in this session + """ + cime_config = get_cime_config() + cime_models = get_all_cime_models() + if not cime_config.has_section("main"): + cime_config.add_section("main") + expect( + model in cime_models, + "model {} not recognized. The acceptable values of CIME_MODEL currently are {}".format( + model, cime_models + ), + ) + cime_config.set("main", "CIME_MODEL", model) + + +def get_model(): + """ + Get the currently configured model value + The CIME_MODEL env variable may or may not be set + + >>> os.environ["CIME_MODEL"] = "garbage" + >>> get_model() # doctest:+ELLIPSIS +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: model garbage not recognized + >>> del os.environ["CIME_MODEL"] + >>> set_model('rocky') # doctest:+ELLIPSIS +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: model rocky not recognized + >>> set_model('e3sm') + >>> get_model() + 'e3sm' + >>> reset_cime_config() + """ + model = os.environ.get("CIME_MODEL") + cime_models = get_all_cime_models() + if model in cime_models: + logger.debug("Setting CIME_MODEL={} from environment".format(model)) + else: + expect( + model is None, + "model {} not recognized. The acceptable values of CIME_MODEL currently are {}".format( + model, cime_models + ), + ) + cime_config = get_cime_config() + if cime_config.has_option("main", "CIME_MODEL"): + model = cime_config.get("main", "CIME_MODEL") + if model is not None: + logger.debug("Setting CIME_MODEL={} from ~/.cime/config".format(model)) + + # One last try + if model is None: + srcroot = get_src_root() + + if os.path.isfile(os.path.join(srcroot, "bin", "git-fleximod")): + model = "cesm" + elif os.path.isfile(os.path.join(srcroot, "Externals.cfg")): + model = "cesm" + with open(os.path.join(srcroot, "Externals.cfg")) as fd: + for line in fd: + if re.search("ufs", line): + model = "ufs" + else: + model = "e3sm" + # This message interfers with the correct operation of xmlquery + # logger.debug("Guessing CIME_MODEL={}, set environment variable if this is incorrect".format(model)) + + if model is not None: + set_model(model) + return model + + modelroot = os.path.join(get_cime_root(), "CIME", "config") + models = os.listdir(modelroot) + msg = ".cime/config or environment variable CIME_MODEL must be set to one of: " + msg += ", ".join( + [ + model + for model in models + if os.path.isdir(os.path.join(modelroot, model)) and model != "xml_schemas" + ] + ) + expect(False, msg) + + +def _get_path(filearg, from_dir): + if not filearg.startswith("/") and from_dir is not None: + filearg = os.path.join(from_dir, filearg) + + return filearg + + +def _convert_to_fd(filearg, from_dir, mode="a"): + filearg = _get_path(filearg, from_dir) + + return open(filearg, mode) + + +_hack = object() + + +def _line_defines_python_function(line, funcname): + """Returns True if the given line defines the function 'funcname' as a top-level definition + + ("top-level definition" means: not something like a class method; i.e., the def should + be at the start of the line, not indented) + + """ + if re.search(r"^def\s+{}\s*\(".format(funcname), line) or re.search( + r"^from\s.+\simport.*\s{}(?:,|\s|$)".format(funcname), line + ): + return True + return False + + +def file_contains_python_function(filepath, funcname): + """Checks whether the given file contains a top-level definition of the function 'funcname' + + Returns a boolean value (True if the file contains this function definition, False otherwise) + """ + has_function = False + with open(filepath, "r") as fd: + for line in fd.readlines(): + if _line_defines_python_function(line, funcname): + has_function = True + break + + return has_function + + +def fixup_sys_path(*additional_paths): + cimeroot = get_cime_root() + + if cimeroot not in sys.path or sys.path.index(cimeroot) > 0: + sys.path.insert(0, cimeroot) + + tools_path = get_tools_path() + + if tools_path not in sys.path or sys.path.index(tools_path) > 1: + sys.path.insert(1, tools_path) + + for i, x in enumerate(additional_paths): + if x not in sys.path or sys.path.index(x) > 2 + i: + sys.path.insert(2 + i, x) + + +def import_and_run_sub_or_cmd( + cmd, + cmdargs, + subname, + subargs, + config_dir, + compname, + logfile=None, + case=None, + from_dir=None, + timeout=None, +): + sys_path_old = sys.path + # ensure we provide `get_src_root()` and `get_tools_path()` to sys.path + # allowing imported modules to correctly import `CIME` module or any + # tool under `CIME/Tools`. + fixup_sys_path(config_dir) + try: + mod = importlib.import_module(f"{compname}_cime_py") + getattr(mod, subname)(*subargs) + except (ModuleNotFoundError, AttributeError) as e: + # * ModuleNotFoundError if importlib can not find module, + # * AttributeError if importlib finds the module but + # {subname} is not defined in the module + expect( + os.path.isfile(cmd), + f"Could not find {subname} file for component {compname}", + ) + + # TODO shouldn't need to use logger.isEnabledFor for debug logging + if isinstance(e, ModuleNotFoundError) and logger.isEnabledFor(logging.DEBUG): + logger.info( + "WARNING: Could not import module '{}_cime_py'".format(compname) + ) + + try: + run_sub_or_cmd( + cmd, cmdargs, subname, subargs, logfile, case, from_dir, timeout + ) + except Exception as e1: + raise e1 from None + except Exception: + if logfile: + with open(logfile, "a") as log_fd: + log_fd.write(str(sys.exc_info()[1])) + expect(False, "{} FAILED, cat {}".format(cmd, logfile)) + else: + raise + sys.path = sys_path_old + + +def run_sub_or_cmd( + cmd, cmdargs, subname, subargs, logfile=None, case=None, from_dir=None, timeout=None +): + """ + This code will try to import and run each cmd as a subroutine + if that fails it will run it as a program in a seperate shell + + Raises exception on failure. + """ + if file_contains_python_function(cmd, subname): + do_run_cmd = False + else: + do_run_cmd = True + + if not do_run_cmd: + # ensure we provide `get_src_root()` and `get_tools_path()` to sys.path + # allowing imported modules to correctly import `CIME` module or any + # tool under `CIME/Tools`. + fixup_sys_path() + + try: + mod = import_from_file(subname, cmd) + logger.info(" Calling {}".format(cmd)) + # Careful: logfile code is not thread safe! + if logfile: + with open(logfile, "w") as log_fd: + with redirect_logger(log_fd, subname): + with redirect_stdout_stderr(log_fd): + getattr(mod, subname)(*subargs) + else: + getattr(mod, subname)(*subargs) + + except (SyntaxError, AttributeError) as _: + pass # Need to try to run as shell command + + except Exception: + if logfile: + with open(logfile, "a") as log_fd: + log_fd.write(str(sys.exc_info()[1])) + + expect(False, "{} FAILED, cat {}".format(cmd, logfile)) + else: + raise + + else: + return # Running as python function worked, we're done + + logger.info(" Running {} ".format(cmd)) + if case is not None: + case.flush() + + fullcmd = cmd + if isinstance(cmdargs, list): + for arg in cmdargs: + fullcmd += " " + str(arg) + else: + fullcmd += " " + cmdargs + + if logfile: + fullcmd += " >& {} ".format(logfile) + + stat, output, _ = run_cmd( + "{}".format(fullcmd), combine_output=True, from_dir=from_dir, timeout=timeout + ) + if output: # Will be empty if logfile + logger.info(output) + + if stat != 0: + if logfile: + expect(False, "{} FAILED, cat {}".format(fullcmd, logfile)) + else: + expect(False, "{} FAILED, see above".format(fullcmd)) + + # refresh case xml object from file + if case is not None: + case.read_xml() + + +def run_cmd( + cmd, + input_str=None, + from_dir=None, + verbose=None, + arg_stdout=_hack, + arg_stderr=_hack, + env=None, + combine_output=False, + timeout=None, + executable=None, + shell=True, +): + """ + Wrapper around subprocess to make it much more convenient to run shell commands + + >>> run_cmd('ls file_i_hope_doesnt_exist')[0] != 0 + True + """ + import subprocess # Not safe to do globally, module not available in older pythons + + # Real defaults for these value should be subprocess.PIPE + if arg_stdout is _hack: + arg_stdout = subprocess.PIPE + elif isinstance(arg_stdout, str): + arg_stdout = _convert_to_fd(arg_stdout, from_dir) + + if arg_stderr is _hack: + arg_stderr = subprocess.STDOUT if combine_output else subprocess.PIPE + elif isinstance(arg_stderr, str): + arg_stderr = _convert_to_fd(arg_stdout, from_dir) + + if verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG)): + logger.info( + "RUN: {}\nFROM: {}".format( + cmd, os.getcwd() if from_dir is None else from_dir + ) + ) + + if input_str is not None: + stdin = subprocess.PIPE + else: + stdin = None + + if not shell: + cmd = shlex.split(cmd) + + # ensure we have an environment to use if not being over written by parent + if env is None: + # persist current environment + env = os.environ.copy() + + # Always provide these variables for anything called externally. + # `CIMEROOT` is provided for external scripts, makefiles, etc that + # may reference it. `PYTHONPATH` is provided to ensure external + # python can correctly import the CIME module and anything under + # `CIME/tools`. + # + # `get_tools_path()` is provided for backwards compatibility. + # External python prior to the CIME module move would use `CIMEROOT` + # or build a relative path and append `sys.path` to import + # `standard_script_setup`. Providing `PYTHONPATH` fixes protential + # broken paths in external python. + env_pythonpath = os.environ.get("PYTHONPATH", "").split(":") + cime_pythonpath = [f"{get_cime_root()}", f"{get_tools_path()}"] + env_pythonpath + env["PYTHONPATH"] = ":".join(filter(None, cime_pythonpath)) + env["CIMEROOT"] = f"{get_cime_root()}" + + if timeout: + with Timeout(timeout): + proc = subprocess.Popen( + cmd, + shell=shell, + stdout=arg_stdout, + stderr=arg_stderr, + stdin=stdin, + cwd=from_dir, + executable=executable, + env=env, + ) + + output, errput = proc.communicate(input_str) + else: + proc = subprocess.Popen( + cmd, + shell=shell, + stdout=arg_stdout, + stderr=arg_stderr, + stdin=stdin, + cwd=from_dir, + executable=executable, + env=env, + ) + + output, errput = proc.communicate(input_str) + + # In Python3, subprocess.communicate returns bytes. We want to work with strings + # as much as possible, so we convert bytes to string (which is unicode in py3) via + # decode. For python2, we do NOT want to do this since decode will yield unicode + # strings which are not necessarily compatible with the system's default base str type. + if output is not None: + try: + output = output.decode("utf-8", errors="ignore") + except AttributeError: + pass + if errput is not None: + try: + errput = errput.decode("utf-8", errors="ignore") + except AttributeError: + pass + + # Always strip outputs + if output: + output = output.strip() + if errput: + errput = errput.strip() + + stat = proc.wait() + if isinstance(arg_stdout, io.IOBase): + arg_stdout.close() # pylint: disable=no-member + if isinstance(arg_stderr, io.IOBase) and arg_stderr is not arg_stdout: + arg_stderr.close() # pylint: disable=no-member + + if verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG)): + if stat != 0: + logger.info(" stat: {:d}\n".format(stat)) + if output: + logger.info(" output: {}\n".format(output)) + if errput: + logger.info(" errput: {}\n".format(errput)) + + return stat, output, errput + + +def run_cmd_no_fail( + cmd, + input_str=None, + from_dir=None, + verbose=None, + arg_stdout=_hack, + arg_stderr=_hack, + env=None, + combine_output=False, + timeout=None, + executable=None, +): + """ + Wrapper around subprocess to make it much more convenient to run shell commands. + Expects command to work. Just returns output string. + + >>> run_cmd_no_fail('echo foo') == 'foo' + True + >>> run_cmd_no_fail('echo THE ERROR >&2; false') # doctest:+ELLIPSIS +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: Command: 'echo THE ERROR >&2; false' failed with error ... + + >>> run_cmd_no_fail('grep foo', input_str=b'foo') == 'foo' + True + >>> run_cmd_no_fail('echo THE ERROR >&2', combine_output=True) == 'THE ERROR' + True + """ + stat, output, errput = run_cmd( + cmd, + input_str, + from_dir, + verbose, + arg_stdout, + arg_stderr, + env, + combine_output, + executable=executable, + timeout=timeout, + ) + if stat != 0: + # If command produced no errput, put output in the exception since we + # have nothing else to go on. + errput = output if not errput else errput + if errput is None: + if combine_output: + if isinstance(arg_stdout, str): + errput = "See {}".format(_get_path(arg_stdout, from_dir)) + else: + errput = "" + elif isinstance(arg_stderr, str): + errput = "See {}".format(_get_path(arg_stderr, from_dir)) + else: + errput = "" + + expect( + False, + "Command: '{}' failed with error '{}' from dir '{}'".format( + cmd, errput, os.getcwd() if from_dir is None else from_dir + ), + ) + + return output + + +def normalize_case_id(case_id): + """ + Given a case_id, return it in form TESTCASE.GRID.COMPSET.PLATFORM + + >>> normalize_case_id('ERT.ne16_g37.B1850C5.sandiatoss3_intel') + 'ERT.ne16_g37.B1850C5.sandiatoss3_intel' + >>> normalize_case_id('ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod') + 'ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod' + >>> normalize_case_id('ERT.ne16_g37.B1850C5.sandiatoss3_intel.G.20151121') + 'ERT.ne16_g37.B1850C5.sandiatoss3_intel' + >>> normalize_case_id('ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod.G.20151121') + 'ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod' + """ + sep_count = case_id.count(".") + expect( + sep_count >= 3 and sep_count <= 6, + "Case '{}' needs to be in form: TESTCASE.GRID.COMPSET.PLATFORM[.TESTMOD] or TESTCASE.GRID.COMPSET.PLATFORM[.TESTMOD].GC.TESTID".format( + case_id + ), + ) + if sep_count in [5, 6]: + return ".".join(case_id.split(".")[:-2]) + else: + return case_id + + +def parse_test_name(test_name): + """ + Given a CIME test name TESTCASE[_CASEOPTS].GRID.COMPSET[.MACHINE_COMPILER[.TESTMODS]], + return each component of the testname with machine and compiler split. + Do not error if a partial testname is provided (TESTCASE or TESTCASE.GRID) instead + parse and return the partial results. + + TESTMODS use hyphens in a special way: + - A single hyphen stands for a path separator (for example, 'test-mods' resolves to + the path 'test/mods') + - A double hyphen separates multiple test mods (for example, 'test-mods--other-dir-path' + indicates two test mods: 'test/mods' and 'other/dir/path') + + If there are one or more TESTMODS, then the testmods component of the result will be a + list, where each element of the list is one testmod, and hyphens have been replaced by + slashes. (If there are no TESTMODS in this test, then the TESTMODS component of the + result is None, as for other optional components.) + + >>> parse_test_name('ERS') + ['ERS', None, None, None, None, None, None] + >>> parse_test_name('ERS.fe12_123') + ['ERS', None, 'fe12_123', None, None, None, None] + >>> parse_test_name('ERS.fe12_123.JGF') + ['ERS', None, 'fe12_123', 'JGF', None, None, None] + >>> parse_test_name('ERS_D.fe12_123.JGF') + ['ERS', ['D'], 'fe12_123', 'JGF', None, None, None] + >>> parse_test_name('ERS_D_P1.fe12_123.JGF') + ['ERS', ['D', 'P1'], 'fe12_123', 'JGF', None, None, None] + >>> parse_test_name('ERS_D_G2.fe12_123.JGF') + ['ERS', ['D', 'G2'], 'fe12_123', 'JGF', None, None, None] + >>> parse_test_name('SMS_D_Ln9_Mmpi-serial.f19_g16_rx1.A') + ['SMS', ['D', 'Ln9', 'Mmpi-serial'], 'f19_g16_rx1', 'A', None, None, None] + >>> parse_test_name('ERS.fe12_123.JGF.machine_compiler') + ['ERS', None, 'fe12_123', 'JGF', 'machine', 'compiler', None] + >>> parse_test_name('ERS.fe12_123.JGF.machine_compiler.test-mods') + ['ERS', None, 'fe12_123', 'JGF', 'machine', 'compiler', ['test/mods']] + >>> parse_test_name('ERS.fe12_123.JGF.*_compiler.test-mods') + ['ERS', None, 'fe12_123', 'JGF', None, 'compiler', ['test/mods']] + >>> parse_test_name('ERS.fe12_123.JGF.machine_*.test-mods') + ['ERS', None, 'fe12_123', 'JGF', 'machine', None, ['test/mods']] + >>> parse_test_name('ERS.fe12_123.JGF.*_*.test-mods') + ['ERS', None, 'fe12_123', 'JGF', None, None, ['test/mods']] + >>> parse_test_name('ERS.fe12_123.JGF.machine_compiler.test-mods--other-dir-path--and-one-more') + ['ERS', None, 'fe12_123', 'JGF', 'machine', 'compiler', ['test/mods', 'other/dir/path', 'and/one/more']] + >>> parse_test_name('SMS.f19_g16.2000_DATM%QI.A_XLND_SICE_SOCN_XROF_XGLC_SWAV.mach-ine_compiler.test-mods') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: Expected 4th item of 'SMS.f19_g16.2000_DATM%QI.A_XLND_SICE_SOCN_XROF_XGLC_SWAV.mach-ine_compiler.test-mods' ('A_XLND_SICE_SOCN_XROF_XGLC_SWAV') to be in form machine_compiler + >>> parse_test_name('SMS.f19_g16.2000_DATM%QI/A_XLND_SICE_SOCN_XROF_XGLC_SWAV.mach-ine_compiler.test-mods') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + CIMEError: ERROR: Invalid compset name 2000_DATM%QI/A_XLND_SICE_SOCN_XROF_XGLC_SWAV + """ + rv = [None] * 7 + num_dots = test_name.count(".") + + rv[0 : num_dots + 1] = test_name.split(".") + testcase_field_underscores = rv[0].count("_") + rv.insert(1, None) # Make room for caseopts + rv.pop() + if testcase_field_underscores > 0: + full_str = rv[0] + rv[0] = full_str.split("_")[0] + rv[1] = full_str.split("_")[1:] + + if num_dots >= 3: + expect(check_name(rv[3]), "Invalid compset name {}".format(rv[3])) + + expect( + rv[4].count("_") == 1, + "Expected 4th item of '{}' ('{}') to be in form machine_compiler".format( + test_name, rv[4] + ), + ) + rv[4:5] = rv[4].split("_") + if rv[4] == "*": + rv[4] = None + if rv[5] == "*": + rv[5] = None + rv.pop() + + if rv[-1] is not None: + # The last element of the return value - testmods - will be a list of testmods, + # built by separating the TESTMODS component on strings of double hyphens + testmods = rv[-1].split("--") + rv[-1] = [one_testmod.replace("-", "/") for one_testmod in testmods] + + expect( + num_dots <= 4, + "'{}' does not look like a CIME test name, expect TESTCASE.GRID.COMPSET[.MACHINE_COMPILER[.TESTMODS]]".format( + test_name + ), + ) + + return rv + + +def get_full_test_name( + partial_test, + caseopts=None, + grid=None, + compset=None, + machine=None, + compiler=None, + testmods_list=None, + testmods_string=None, +): + """ + Given a partial CIME test name, return in form TESTCASE.GRID.COMPSET.MACHINE_COMPILER[.TESTMODS] + Use the additional args to fill out the name if needed + + Testmods can be provided through one of two arguments, but *not* both: + - testmods_list: a list of one or more testmods (as would be returned by + parse_test_name, for example) + - testmods_string: a single string containing one or more testmods; if there is more + than one, then they should be separated by a string of two hyphens ('--') + + For both testmods_list and testmods_string, any slashes as path separators ('/') are + replaced by hyphens ('-'). + + >>> get_full_test_name("ERS", grid="ne16_fe16", compset="JGF", machine="melvin", compiler="gnu") + 'ERS.ne16_fe16.JGF.melvin_gnu' + >>> get_full_test_name("ERS", caseopts=["D", "P16"], grid="ne16_fe16", compset="JGF", machine="melvin", compiler="gnu") + 'ERS_D_P16.ne16_fe16.JGF.melvin_gnu' + >>> get_full_test_name("ERS.ne16_fe16", compset="JGF", machine="melvin", compiler="gnu") + 'ERS.ne16_fe16.JGF.melvin_gnu' + >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu") + 'ERS.ne16_fe16.JGF.melvin_gnu' + >>> get_full_test_name("ERS.ne16_fe16.JGF.melvin_gnu.mods", machine="melvin", compiler="gnu") + 'ERS.ne16_fe16.JGF.melvin_gnu.mods' + + testmods_list can be a single element: + >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu", testmods_list=["mods/test"]) + 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test' + + testmods_list can also have multiple elements, separated either by slashes or hyphens: + >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu", testmods_list=["mods/test", "mods2/test2/subdir2", "mods3/test3/subdir3"]) + 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test--mods2-test2-subdir2--mods3-test3-subdir3' + >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu", testmods_list=["mods-test", "mods2-test2-subdir2", "mods3-test3-subdir3"]) + 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test--mods2-test2-subdir2--mods3-test3-subdir3' + + The above testmods_list tests should also work with equivalent testmods_string arguments: + >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu", testmods_string="mods/test") + 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test' + >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu", testmods_string="mods/test--mods2/test2/subdir2--mods3/test3/subdir3") + 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test--mods2-test2-subdir2--mods3-test3-subdir3' + >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu", testmods_string="mods-test--mods2-test2-subdir2--mods3-test3-subdir3") + 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test--mods2-test2-subdir2--mods3-test3-subdir3' + + The following tests the consistency check between the test name and various optional arguments: + >>> get_full_test_name("ERS.ne16_fe16.JGF.melvin_gnu.mods-test--mods2-test2-subdir2--mods3-test3-subdir3", machine="melvin", compiler="gnu", testmods_list=["mods/test", "mods2/test2/subdir2", "mods3/test3/subdir3"]) + 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test--mods2-test2-subdir2--mods3-test3-subdir3' + """ + ( + partial_testcase, + partial_caseopts, + partial_grid, + partial_compset, + partial_machine, + partial_compiler, + partial_testmods, + ) = parse_test_name(partial_test) + + required_fields = [ + (partial_grid, grid, "grid"), + (partial_compset, compset, "compset"), + (partial_machine, machine, "machine"), + (partial_compiler, compiler, "compiler"), + ] + + result = partial_test + for partial_val, arg_val, name in required_fields: + if partial_val is None: + # Add to result based on args + expect( + arg_val is not None, + "Could not fill-out test name, partial string '{}' had no {} information and you did not provide any".format( + partial_test, name + ), + ) + if name == "machine" and "*_" in result: + result = result.replace("*_", arg_val + "_") + elif name == "compiler" and "_*" in result: + result = result.replace("_*", "_" + arg_val) + else: + result = "{}{}{}".format( + result, "_" if name == "compiler" else ".", arg_val + ) + elif arg_val is not None and partial_val != partial_compiler: + expect( + arg_val == partial_val, + "Mismatch in field {}, partial string '{}' indicated it should be '{}' but you provided '{}'".format( + name, partial_test, partial_val, arg_val + ), + ) + + if testmods_string is not None: + expect( + testmods_list is None, + "Cannot provide both testmods_list and testmods_string", + ) + # Convert testmods_string to testmods_list; after this point, the code will work + # the same regardless of whether testmods_string or testmods_list was provided. + testmods_list = testmods_string.split("--") + if partial_testmods is None: + if testmods_list is None: + # No testmods for this test and that's OK + pass + else: + testmods_hyphenated = [ + one_testmod.replace("/", "-") for one_testmod in testmods_list + ] + result += ".{}".format("--".join(testmods_hyphenated)) + elif testmods_list is not None: + expect( + testmods_list == partial_testmods, + "Mismatch in field testmods, partial string '{}' indicated it should be '{}' but you provided '{}'".format( + partial_test, partial_testmods, testmods_list + ), + ) + + if partial_caseopts is None: + if caseopts is None: + # No casemods for this test and that's OK + pass + else: + result = result.replace( + partial_testcase, + "{}_{}".format(partial_testcase, "_".join(caseopts)), + 1, + ) + elif caseopts is not None: + expect( + caseopts == partial_caseopts, + "Mismatch in field caseopts, partial string '{}' indicated it should be '{}' but you provided '{}'".format( + partial_test, partial_caseopts, caseopts + ), + ) + + return result + + +def get_current_branch(repo=None): + """ + Return the name of the current branch for a repository + + >>> if "GIT_BRANCH" in os.environ: + ... get_current_branch() is not None + ... else: + ... os.environ["GIT_BRANCH"] = "foo" + ... get_current_branch() == "foo" + True + """ + if "GIT_BRANCH" in os.environ: + # This approach works better for Jenkins jobs because the Jenkins + # git plugin does not use local tracking branches, it just checks out + # to a commit + branch = os.environ["GIT_BRANCH"] + if branch.startswith("origin/"): + branch = branch.replace("origin/", "", 1) + return branch + else: + stat, output, _ = run_cmd("git symbolic-ref HEAD", from_dir=repo) + if stat != 0: + return None + else: + return output.replace("refs/heads/", "") + + +def get_current_commit(short=False, repo=None, tag=False): + """ + Return the sha1 of the current HEAD commit + + >>> get_current_commit() is not None + True + """ + if tag: + rc, output, _ = run_cmd( + "git describe --tags $(git log -n1 --pretty='%h')", from_dir=repo + ) + else: + rc, output, _ = run_cmd( + "git rev-parse {} HEAD".format("--short" if short else ""), from_dir=repo + ) + + return output if rc == 0 else "unknown" + + +def get_model_config_location_within_cime(model=None): + model = get_model() if model is None else model + return os.path.join("config", model) + + +def get_scripts_root(): + """ + Get absolute path to scripts + + >>> os.path.isdir(get_scripts_root()) + True + """ + return os.path.join(get_cime_root(), "scripts") + + +def get_model_config_root(model=None): + """ + Get absolute path to model config area" + + >>> os.environ["CIME_MODEL"] = "e3sm" # Set the test up don't depend on external resources + >>> os.path.isdir(get_model_config_root()) + True + """ + model = get_model() if model is None else model + return os.path.join( + get_cime_root(), "CIME", "data", get_model_config_location_within_cime(model) + ) + + +def stop_buffering_output(): + """ + All stdout, stderr will not be buffered after this is called. + """ + os.environ["PYTHONUNBUFFERED"] = "1" + + +def start_buffering_output(): + """ + All stdout, stderr will be buffered after this is called. This is python's + default behavior. + """ + sys.stdout.flush() + sys.stdout = os.fdopen(sys.stdout.fileno(), "w") + + +def match_any(item, re_counts): + """ + Return true if item matches any regex in re_counts' keys. Increments + count if a match was found. + """ + for regex_str in re_counts: + regex = re.compile(regex_str) + if regex.match(item): + re_counts[regex_str] += 1 + return True + + return False + + +def get_current_submodule_status(recursive=False, repo=None): + """ + Return the sha1s of the current currently checked out commit for each submodule, + along with the submodule path and the output of git describe for the SHA-1. + + >>> get_current_submodule_status() is not None + True + """ + rc, output, _ = run_cmd( + "git submodule status {}".format("--recursive" if recursive else ""), + from_dir=repo, + ) + + return output if rc == 0 else "unknown" + + +def copy_globs(globs_to_copy, output_directory, lid=None): + """ + Takes a list of globs and copies all files to `output_directory`. + + Hiddens files become unhidden i.e. removing starting dot. + + Output filename is derviced from the basename of the input path and can + be appended with the `lid`. + + """ + for glob_to_copy in globs_to_copy: + for item in glob.glob(glob_to_copy): + item_basename = os.path.basename(item).lstrip(".") + + if lid is None: + filename = item_basename + else: + filename = f"{item_basename}.{lid}" + + safe_copy( + item, os.path.join(output_directory, filename), preserve_meta=False + ) + + +def copy_over_file(src_path, tgt_path): + """ + Copy a file over a file that already exists + """ + st = os.stat(tgt_path) + owner_uid = st.st_uid + + # Handle read-only files if possible + if not os.access(tgt_path, os.W_OK): + if owner_uid == os.getuid(): + # I am the owner, make writeable + os.chmod(tgt_path, st.st_mode | statlib.S_IWRITE) + else: + # I won't be able to copy this file + raise OSError( + "Cannot copy over file {}, it is readonly and you are not the owner".format( + tgt_path + ) + ) + + if owner_uid == os.getuid(): + # I am the owner, copy file contents, permissions, and metadata + try: + shutil.copy2(src_path, tgt_path) + # ignore same file error + except shutil.SameFileError: + pass + + else: + # I am not the owner, just copy file contents + shutil.copyfile(src_path, tgt_path) + + +def safe_copy(src_path, tgt_path, preserve_meta=True): + """ + A flexbile and safe copy routine. Will try to copy file and metadata, but this + can fail if the current user doesn't own the tgt file. A fallback data-only copy is + attempted in this case. Works even if overwriting a read-only file. + + tgt_path can be a directory, src_path must be a file + + most of the complexity here is handling the case where the tgt_path file already + exists. This problem does not exist for the tree operations so we don't need to wrap those. + + preserve_meta toggles if file meta-data, like permissions, should be preserved. If you are + copying baseline files, you should be within a SharedArea context manager and preserve_meta + should be false so that the umask set up by SharedArea can take affect regardless of the + permissions of the src files. + """ + + tgt_path = ( + os.path.join(tgt_path, os.path.basename(src_path)) + if os.path.isdir(tgt_path) + else tgt_path + ) + + # Handle pre-existing file + try: + if os.path.isfile(tgt_path): + copy_over_file(src_path, tgt_path) + + elif preserve_meta: + # We are making a new file, copy file contents, permissions, and metadata. + # This can fail if the underlying directory is not writable by current user. + shutil.copy2(src_path, tgt_path) + + else: + shutil.copy(src_path, tgt_path) + + except OSError: + # Some systems get weird OSErrors when using shutil copy, try an + # old-fashioned cp as a last resort + cp_path = shutil.which("cp") + # cp is not in PATH, we must give up and raise the original err + if cp_path is None: + raise + run_cmd_no_fail(f"{cp_path} -f {src_path} {tgt_path}") + + # If src file was executable, then the tgt file should be too + st = os.stat(tgt_path) + if os.access(src_path, os.X_OK) and st.st_uid == os.getuid(): + os.chmod( + tgt_path, st.st_mode | statlib.S_IXUSR | statlib.S_IXGRP | statlib.S_IXOTH + ) + + +def safe_recursive_copy(src_dir, tgt_dir, file_map): + """ + Copies a set of files from one dir to another. Works even if overwriting a + read-only file. Files can be relative paths and the relative path will be + matched on the tgt side. + """ + for src_file, tgt_file in file_map: + full_tgt = os.path.join(tgt_dir, tgt_file) + full_src = ( + src_file if os.path.isabs(src_file) else os.path.join(src_dir, src_file) + ) + expect( + os.path.isfile(full_src), + "Source dir '{}' missing file '{}'".format(src_dir, src_file), + ) + safe_copy(full_src, full_tgt) + + +def symlink_force(target, link_name): + """ + Makes a symlink from link_name to target. Unlike the standard + os.symlink, this will work even if link_name already exists (in + which case link_name will be overwritten). + """ + try: + os.symlink(target, link_name) + except OSError as e: + if e.errno == errno.EEXIST: + os.remove(link_name) + os.symlink(target, link_name) + else: + raise e + + +def find_proc_id(proc_name=None, children_only=False, of_parent=None): + """ + Children implies recursive. + """ + expect( + proc_name is not None or children_only, + "Must provide proc_name if not searching for children", + ) + expect( + not (of_parent is not None and not children_only), + "of_parent only used with children_only", + ) + + parent = of_parent if of_parent is not None else os.getpid() + + pgrep_cmd = "pgrep {} {}".format( + proc_name if proc_name is not None else "", + "-P {:d}".format(parent if children_only else ""), + ) + stat, output, errput = run_cmd(pgrep_cmd) + expect(stat in [0, 1], "pgrep failed with error: '{}'".format(errput)) + + rv = set([int(item.strip()) for item in output.splitlines()]) + if children_only: + pgrep_cmd = "pgrep -P {}".format(parent) + stat, output, errput = run_cmd(pgrep_cmd) + expect(stat in [0, 1], "pgrep failed with error: '{}'".format(errput)) + + for child in output.splitlines(): + rv = rv.union( + set(find_proc_id(proc_name, children_only, int(child.strip()))) + ) + + return list(rv) + + +def get_timestamp(timestamp_format="%Y%m%d_%H%M%S", utc_time=False): + """ + Get a string representing the current UTC time in format: YYYYMMDD_HHMMSS + + The format can be changed if needed. + """ + if utc_time: + time_tuple = time.gmtime() + else: + time_tuple = time.localtime() + return time.strftime(timestamp_format, time_tuple) + + +def get_project(machobj=None): + """ + Hierarchy for choosing PROJECT: + 0. Command line flag to create_newcase or create_test + 1. Environment variable PROJECT + 2 Environment variable ACCOUNT (this is for backward compatibility) + 3. File $HOME/.cime/config (this is new) + 4 File $HOME/.cesm_proj (this is for backward compatibility) + 5 config_machines.xml (if machobj provided) + """ + project = os.environ.get("PROJECT") + if project is not None: + logger.info("Using project from env PROJECT: " + project) + return project + project = os.environ.get("ACCOUNT") + if project is not None: + logger.info("Using project from env ACCOUNT: " + project) + return project + + cime_config = get_cime_config() + if cime_config.has_option("main", "PROJECT"): + project = cime_config.get("main", "PROJECT") + if project is not None: + logger.info("Using project from .cime/config: " + project) + return project + + projectfile = os.path.abspath(os.path.join(os.path.expanduser("~"), ".cesm_proj")) + if os.path.isfile(projectfile): + with open(projectfile, "r") as myfile: + for line in myfile: + project = line.rstrip() + if not project.startswith("#"): + break + if project is not None: + logger.info("Using project from .cesm_proj: " + project) + cime_config.set("main", "PROJECT", project) + return project + + if machobj is not None: + project = machobj.get_value("PROJECT") + if project is not None: + logger.info("Using project from config_machines.xml: " + project) + return project + + logger.info("No project info available") + return None + + +def get_charge_account(machobj=None, project=None): + """ + Hierarchy for choosing CHARGE_ACCOUNT: + 1. Environment variable CHARGE_ACCOUNT + 2. File $HOME/.cime/config + 3. config_machines.xml (if machobj provided) + 4. default to same value as PROJECT + + >>> import CIME + >>> import CIME.XML.machines + >>> machobj = CIME.XML.machines.Machines(machine="ubuntu-latest") + >>> project = get_project(machobj) + >>> charge_account = get_charge_account(machobj, project) + >>> project == charge_account + True + >>> os.environ["CHARGE_ACCOUNT"] = "ChargeAccount" + >>> get_charge_account(machobj, project) + 'ChargeAccount' + >>> del os.environ["CHARGE_ACCOUNT"] + """ + charge_account = os.environ.get("CHARGE_ACCOUNT") + if charge_account is not None: + logger.info("Using charge_account from env CHARGE_ACCOUNT: " + charge_account) + return charge_account + + cime_config = get_cime_config() + if cime_config.has_option("main", "CHARGE_ACCOUNT"): + charge_account = cime_config.get("main", "CHARGE_ACCOUNT") + if charge_account is not None: + logger.info("Using charge_account from .cime/config: " + charge_account) + return charge_account + + if machobj is not None: + charge_account = machobj.get_value("CHARGE_ACCOUNT") + if charge_account is not None: + logger.info( + "Using charge_account from config_machines.xml: " + charge_account + ) + return charge_account + + logger.info("No charge_account info available, using value from PROJECT") + return project + + +def find_files(rootdir, pattern): + """ + recursively find all files matching a pattern + """ + result = [] + for root, _, files in os.walk(rootdir): + for filename in files: + if fnmatch.fnmatch(filename, pattern): + result.append(os.path.join(root, filename)) + + return result + + +def setup_standard_logging_options(parser): + group = parser.add_argument_group("Logging options") + + helpfile = os.path.join(os.getcwd(), os.path.basename("{}.log".format(sys.argv[0]))) + + group.add_argument( + "-d", + "--debug", + action="store_true", + help="Print debug information (very verbose) to file {}".format(helpfile), + ) + + group.add_argument( + "-v", + "--verbose", + action="store_true", + help="Add additional context (time and file) to log messages", + ) + + group.add_argument( + "-s", + "--silent", + action="store_true", + help="Print only warnings and error messages", + ) + + +class _LessThanFilter(logging.Filter): + def __init__(self, exclusive_maximum, name=""): + super(_LessThanFilter, self).__init__(name) + self.max_level = exclusive_maximum + + def filter(self, record): + # non-zero return means we log this message + return 1 if record.levelno < self.max_level else 0 + + +def configure_logging(verbose, debug, silent, **_): + root_logger = logging.getLogger() + + verbose_formatter = logging.Formatter( + fmt="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", datefmt="%m-%d %H:%M" + ) + + # Change info to go to stdout. This handle applies to INFO exclusively + stdout_stream_handler = logging.StreamHandler(stream=sys.stdout) + stdout_stream_handler.setLevel(logging.INFO) + stdout_stream_handler.addFilter(_LessThanFilter(logging.WARNING)) + + # Change warnings and above to go to stderr + stderr_stream_handler = logging.StreamHandler(stream=sys.stderr) + stderr_stream_handler.setLevel(logging.WARNING) + + # --verbose adds to the message format but does not impact the log level + if verbose: + stdout_stream_handler.setFormatter(verbose_formatter) + stderr_stream_handler.setFormatter(verbose_formatter) + + root_logger.addHandler(stdout_stream_handler) + root_logger.addHandler(stderr_stream_handler) + + if debug: + # Set up log file to catch ALL logging records + log_file = "{}.log".format(os.path.basename(sys.argv[0])) + + debug_log_handler = logging.FileHandler(log_file, mode="w") + debug_log_handler.setFormatter(verbose_formatter) + debug_log_handler.setLevel(logging.DEBUG) + root_logger.addHandler(debug_log_handler) + + root_logger.setLevel(logging.DEBUG) + elif silent: + root_logger.setLevel(logging.WARN) + else: + root_logger.setLevel(logging.INFO) + + +def parse_args_and_handle_standard_logging_options(args, parser=None): + """ + Guide to logging in CIME. + + logger.debug -> Verbose/detailed output, use for debugging, off by default. Goes to a .log file + logger.info -> Goes to stdout (and log if --debug). Use for normal program output + logger.warning -> Goes to stderr (and log if --debug). Use for minor problems + logger.error -> Goes to stderr (and log if --debug) + """ + # scripts_regression_tests is the only thing that should pass a None argument in parser + if parser is not None: + if "--help" not in args[1:]: + _check_for_invalid_args(args[1:]) + args = parser.parse_args(args[1:]) + + configure_logging(args.verbose, args.debug, args.silent) + + return args + + +def get_logging_options(): + """ + Use to pass same logging options as was used for current + executable to subprocesses. + """ + root_logger = logging.getLogger() + + if root_logger.level == logging.DEBUG: + return "--debug" + elif root_logger.level == logging.WARN: + return "--silent" + else: + return "" + + +def convert_to_type(value, type_str, vid=""): + """ + Convert value from string to another type. + vid is only for generating better error messages. + """ + if value is not None: + if type_str == "char": + pass + + elif type_str == "integer": + try: + value = int(eval(value)) + except Exception: + expect( + False, + "Entry {} was listed as type int but value '{}' is not valid int".format( + vid, value + ), + ) + + elif type_str == "logical": + expect( + value.upper() in ["TRUE", "FALSE"], + "Entry {} was listed as type logical but had val '{}' instead of TRUE or FALSE".format( + vid, value + ), + ) + value = value.upper() == "TRUE" + + elif type_str == "real": + try: + value = float(value) + except Exception: + expect( + False, + "Entry {} was listed as type real but value '{}' is not valid real".format( + vid, value + ), + ) + + else: + expect(False, "Unknown type '{}'".format(type_str)) + + return value + + +def convert_to_unknown_type(value): + """ + Convert value to it's real type by probing conversions. + """ + if value is not None: + # Attempt to convert to logical + if value.upper() in ["TRUE", "FALSE"]: + return value.upper() == "TRUE" + + # Attempt to convert to integer + try: + value = int(eval(value)) + except Exception: + pass + else: + return value + + # Attempt to convert to float + try: + value = float(value) + except Exception: + pass + else: + return value + + # Just treat as string + + return value + + +def convert_to_string(value, type_str=None, vid=""): + """ + Convert value back to string. + vid is only for generating better error messages. + >>> convert_to_string(6, type_str="integer") == '6' + True + >>> convert_to_string('6', type_str="integer") == '6' + True + >>> convert_to_string('6.0', type_str="real") == '6.0' + True + >>> convert_to_string(6.01, type_str="real") == '6.01' + True + """ + if value is not None and not isinstance(value, str): + if type_str == "char": + expect( + isinstance(value, str), + "Wrong type for entry id '{}'".format(vid), + ) + elif type_str == "integer": + expect( + isinstance(value, int), + "Wrong type for entry id '{}'".format(vid), + ) + value = str(value) + elif type_str == "logical": + expect(type(value) is bool, "Wrong type for entry id '{}'".format(vid)) + value = "TRUE" if value else "FALSE" + elif type_str == "real": + expect(type(value) is float, "Wrong type for entry id '{}'".format(vid)) + value = str(value) + else: + expect(False, "Unknown type '{}'".format(type_str)) + if value is None: + value = "" + logger.debug("Attempt to convert None value for vid {} {}".format(vid, value)) + + return value + + +def convert_to_seconds(time_str): + """ + Convert time value in [[HH:]MM:]SS to seconds + + We assume that XX:YY is likely to be HH:MM, not MM:SS + + >>> convert_to_seconds("42") + 42 + >>> convert_to_seconds("01:01:01") + 3661 + >>> convert_to_seconds("01:01") + 3660 + """ + components = time_str.split(":") + expect(len(components) < 4, "Unusual time string: '{}'".format(time_str)) + + components.reverse() + result = 0 + starting_exp = 1 if len(components) == 2 else 0 + for idx, component in enumerate(components): + result += int(component) * pow(60, idx + starting_exp) + + return result + + +def convert_to_babylonian_time(seconds): + """ + Convert time value to seconds to HH:MM:SS + + >>> convert_to_babylonian_time(3661) + '01:01:01' + >>> convert_to_babylonian_time(360000) + '100:00:00' + """ + hours = int(seconds / 3600) + seconds %= 3600 + minutes = int(seconds / 60) + seconds %= 60 + + return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds) + + +def get_time_in_seconds(timeval, unit): + """ + Convert a time from 'unit' to seconds + """ + if "nyear" in unit: + dmult = 365 * 24 * 3600 + elif "nmonth" in unit: + dmult = 30 * 24 * 3600 + elif "nday" in unit: + dmult = 24 * 3600 + elif "nhour" in unit: + dmult = 3600 + elif "nminute" in unit: + dmult = 60 + else: + dmult = 1 + + return dmult * timeval + + +def compute_total_time(job_cost_map, proc_pool): + """ + Given a map: jobname -> (procs, est-time), return a total time + estimate for a given processor pool size + + >>> job_cost_map = {"A" : (4, 3000), "B" : (2, 1000), "C" : (8, 2000), "D" : (1, 800)} + >>> compute_total_time(job_cost_map, 8) + 5160 + >>> compute_total_time(job_cost_map, 12) + 3180 + >>> compute_total_time(job_cost_map, 16) + 3060 + """ + current_time = 0 + waiting_jobs = dict(job_cost_map) + running_jobs = {} # name -> (procs, est-time, start-time) + while len(waiting_jobs) > 0 or len(running_jobs) > 0: + launched_jobs = [] + for jobname, data in waiting_jobs.items(): + procs_for_job, time_for_job = data + if procs_for_job <= proc_pool: + proc_pool -= procs_for_job + launched_jobs.append(jobname) + running_jobs[jobname] = (procs_for_job, time_for_job, current_time) + + for launched_job in launched_jobs: + del waiting_jobs[launched_job] + + completed_jobs = [] + for jobname, data in running_jobs.items(): + procs_for_job, time_for_job, time_started = data + if (current_time - time_started) >= time_for_job: + proc_pool += procs_for_job + completed_jobs.append(jobname) + + for completed_job in completed_jobs: + del running_jobs[completed_job] + + current_time += 60 # minute time step + + return current_time + + +def format_time(time_format, input_format, input_time): + """ + Converts the string input_time from input_format to time_format + Valid format specifiers are "%H", "%M", and "%S" + % signs must be followed by an H, M, or S and then a separator + Separators can be any string without digits or a % sign + Each specifier can occur more than once in the input_format, + but only the first occurence will be used. + An example of a valid format: "%H:%M:%S" + Unlike strptime, this does support %H >= 24 + + >>> format_time("%H:%M:%S", "%H", "43") + '43:00:00' + >>> format_time("%H %M", "%M,%S", "59,59") + '0 59' + >>> format_time("%H, %S", "%H:%M:%S", "2:43:9") + '2, 09' + """ + input_fields = input_format.split("%") + expect( + input_fields[0] == input_time[: len(input_fields[0])], + "Failed to parse the input time '{}'; does not match the header string '{}'".format( + input_time, input_format + ), + ) + input_time = input_time[len(input_fields[0]) :] + timespec = {"H": None, "M": None, "S": None} + maxvals = {"M": 60, "S": 60} + DIGIT_CHECK = re.compile("[^0-9]*") + # Loop invariants given input follows the specs: + # field starts with H, M, or S + # input_time starts with a number corresponding with the start of field + for field in input_fields[1:]: + # Find all of the digits at the start of the string + spec = field[0] + value_re = re.match(r"\d*", input_time) + expect( + value_re is not None, + "Failed to parse the input time for the '{}' specifier, expected an integer".format( + spec + ), + ) + value = value_re.group(0) + expect(spec in timespec, "Unknown time specifier '" + spec + "'") + # Don't do anything if the time field is already specified + if timespec[spec] is None: + # Verify we aren't exceeding the maximum value + if spec in maxvals: + expect( + int(value) < maxvals[spec], + "Failed to parse the '{}' specifier: A value less than {:d} is expected".format( + spec, maxvals[spec] + ), + ) + timespec[spec] = value + input_time = input_time[len(value) :] + # Check for the separator string + expect( + len(re.match(DIGIT_CHECK, field).group(0)) == len(field), + "Numbers are not permissible in separator strings", + ) + expect( + input_time[: len(field) - 1] == field[1:], + "The separator string ({}) doesn't match '{}'".format( + field[1:], input_time + ), + ) + input_time = input_time[len(field) - 1 :] + output_fields = time_format.split("%") + output_time = output_fields[0] + # Used when a value isn't given + min_len_spec = {"H": 1, "M": 2, "S": 2} + # Loop invariants given input follows the specs: + # field starts with H, M, or S + # output_time + for field in output_fields[1:]: + expect( + field == output_fields[-1] or len(field) > 1, + "Separator strings are required to properly parse times", + ) + spec = field[0] + expect(spec in timespec, "Unknown time specifier '" + spec + "'") + if timespec[spec] is not None: + output_time += "0" * (min_len_spec[spec] - len(timespec[spec])) + output_time += timespec[spec] + else: + output_time += "0" * min_len_spec[spec] + output_time += field[1:] + return output_time + + +def does_file_have_string(filepath, text): + """ + Does the text string appear in the filepath file + """ + return os.path.isfile(filepath) and text in open(filepath).read() + + +def is_last_process_complete(filepath, expect_text, fail_text): + """ + Search the filepath in reverse order looking for expect_text + before finding fail_text. This utility is used by archive_metadata. + + """ + complete = False + fh = open(filepath, "r") + fb = fh.readlines() + + rfb = "".join(reversed(fb)) + + findex = re.search(fail_text, rfb) + if findex is None: + findex = 0 + else: + findex = findex.start() + + eindex = re.search(expect_text, rfb) + if eindex is None: + eindex = 0 + else: + eindex = eindex.start() + + if findex > eindex: + complete = True + + return complete + + +def transform_vars(text, case=None, subgroup=None, overrides=None, default=None): + """ + Do the variable substitution for any variables that need transforms + recursively. + + >>> transform_vars("{{ cesm_stdout }}", default="cesm.stdout") + 'cesm.stdout' + >>> member_store = lambda : None + >>> member_store.foo = "hi" + >>> transform_vars("I say {{ foo }}", overrides={"foo":"hi"}) + 'I say hi' + """ + directive_re = re.compile(r"{{ (\w+) }}", flags=re.M) + # loop through directive text, replacing each string enclosed with + # template characters with the necessary values. + while directive_re.search(text): + m = directive_re.search(text) + variable = m.groups()[0] + whole_match = m.group() + if ( + overrides is not None + and variable.lower() in overrides + and overrides[variable.lower()] is not None + ): + repl = overrides[variable.lower()] + logger.debug( + "from overrides: in {}, replacing {} with {}".format( + text, whole_match, str(repl) + ) + ) + text = text.replace(whole_match, str(repl)) + + elif ( + case is not None + and hasattr(case, variable.lower()) + and getattr(case, variable.lower()) is not None + ): + repl = getattr(case, variable.lower()) + logger.debug( + "from case members: in {}, replacing {} with {}".format( + text, whole_match, str(repl) + ) + ) + text = text.replace(whole_match, str(repl)) + + elif ( + case is not None + and case.get_value(variable.upper(), subgroup=subgroup) is not None + ): + repl = case.get_value(variable.upper(), subgroup=subgroup) + logger.debug( + "from case: in {}, replacing {} with {}".format( + text, whole_match, str(repl) + ) + ) + text = text.replace(whole_match, str(repl)) + + elif default is not None: + logger.debug( + "from default: in {}, replacing {} with {}".format( + text, whole_match, str(default) + ) + ) + text = text.replace(whole_match, default) + + else: + # If no queue exists, then the directive '-q' by itself will cause an error + if "-q {{ queue }}" in text: + text = "" + else: + logger.warning("Could not replace variable '{}'".format(variable)) + text = text.replace(whole_match, "") + + return text + + +def wait_for_unlocked(filepath): + locked = True + file_object = None + while locked: + try: + buffer_size = 8 + # Opening file in append mode and read the first 8 characters. + file_object = open(filepath, "a", buffer_size) + if file_object: + locked = False + except IOError: + locked = True + time.sleep(1) + finally: + if file_object: + file_object.close() + + +def gunzip_existing_file(filepath): + with gzip.open(filepath, "rb") as fd: + return fd.read() + + +def gzip_existing_file(filepath): + """ + Gzips an existing file, removes the unzipped version, returns path to zip file. + Note the that the timestamp of the original file will be maintained in + the zipped file. + + >>> import tempfile + >>> fd, filename = tempfile.mkstemp(text=True) + >>> _ = os.write(fd, b"Hello World") + >>> os.close(fd) + >>> gzfile = gzip_existing_file(filename) + >>> gunzip_existing_file(gzfile) == b'Hello World' + True + >>> os.remove(gzfile) + """ + expect(os.path.exists(filepath), "{} does not exists".format(filepath)) + + st = os.stat(filepath) + orig_atime, orig_mtime = st[statlib.ST_ATIME], st[statlib.ST_MTIME] + + gzpath = "{}.gz".format(filepath) + with open(filepath, "rb") as f_in: + with gzip.open(gzpath, "wb") as f_out: + shutil.copyfileobj(f_in, f_out) + + os.remove(filepath) + + os.utime(gzpath, (orig_atime, orig_mtime)) + + return gzpath + + +def touch(fname): + if os.path.exists(fname): + os.utime(fname, None) + else: + open(fname, "a").close() + + +def find_system_test(testname, case): + """ + Find and import the test matching testname + Look through the paths set in config_files.xml variable SYSTEM_TESTS_DIR + for components used in this case to find a test matching testname. Add the + path to that directory to sys.path if its not there and return the test object + Fail if the test is not found in any of the paths. + """ + from importlib import import_module + + system_test_path = None + if testname.startswith("TEST"): + system_test_path = "CIME.SystemTests.system_tests_common.{}".format(testname) + else: + components = ["any"] + components.extend(case.get_compset_components()) + fdir = [] + for component in components: + tdir = case.get_value( + "SYSTEM_TESTS_DIR", attribute={"component": component} + ) + if tdir is not None: + tdir = os.path.abspath(tdir) + system_test_file = os.path.join(tdir, "{}.py".format(testname.lower())) + if os.path.isfile(system_test_file): + fdir.append(tdir) + logger.debug("found " + system_test_file) + if component == "any": + system_test_path = "CIME.SystemTests.{}.{}".format( + testname.lower(), testname + ) + else: + system_test_dir = os.path.dirname(system_test_file) + if system_test_dir not in sys.path: + sys.path.append(system_test_dir) + system_test_path = "{}.{}".format(testname.lower(), testname) + expect(len(fdir) > 0, "Test {} not found, aborting".format(testname)) + expect( + len(fdir) == 1, + "Test {} found in multiple locations {}, aborting".format(testname, fdir), + ) + expect(system_test_path is not None, "No test {} found".format(testname)) + + path, m = system_test_path.rsplit(".", 1) + mod = import_module(path) + return getattr(mod, m) + + +def _get_most_recent_lid_impl(files): + """ + >>> files = ['/foo/bar/e3sm.log.20160905_111212', '/foo/bar/e3sm.log.20160906_111212.gz'] + >>> _get_most_recent_lid_impl(files) + ['20160905_111212', '20160906_111212'] + >>> files = ['/foo/bar/e3sm.log.20160905_111212', '/foo/bar/e3sm.log.20160905_111212.gz'] + >>> _get_most_recent_lid_impl(files) + ['20160905_111212'] + """ + results = [] + for item in files: + basename = os.path.basename(item) + components = basename.split(".") + if len(components) > 2: + results.append(components[2]) + else: + logger.warning( + "Apparent model log file '{}' did not conform to expected name format".format( + item + ) + ) + + return sorted(list(set(results))) + + +def ls_sorted_by_mtime(path): + """return list of path sorted by timestamp oldest first""" + mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime + return list(sorted(os.listdir(path), key=mtime)) + + +def get_lids(case): + model = case.get_value("MODEL") + rundir = case.get_value("RUNDIR") + return _get_most_recent_lid_impl(glob.glob("{}/{}.log*".format(rundir, model))) + + +def new_lid(case=None): + lid = time.strftime("%y%m%d-%H%M%S") + jobid = batch_jobid(case=case) + if jobid is not None: + lid = jobid + "." + lid + os.environ["LID"] = lid + return lid + + +def batch_jobid(case=None): + jobid = os.environ.get("PBS_JOBID") + if jobid is None: + jobid = os.environ.get("SLURM_JOB_ID") + if jobid is None: + jobid = os.environ.get("LSB_JOBID") + if jobid is None: + jobid = os.environ.get("COBALT_JOBID") + if case: + jobid = case.get_job_id(jobid) + return jobid + + +def analyze_build_log(comp, log, compiler): + """ + Capture and report warning count, + capture and report errors and undefined references. + """ + warncnt = 0 + if "intel" in compiler: + warn_re = re.compile(r" warning #") + error_re = re.compile(r" error #") + undefined_re = re.compile(r" undefined reference to ") + elif "gnu" in compiler or "nag" in compiler: + warn_re = re.compile(r"^Warning: ") + error_re = re.compile(r"^Error: ") + undefined_re = re.compile(r" undefined reference to ") + else: + # don't know enough about this compiler + return + + with open(log, "r") as fd: + for line in fd: + if re.search(warn_re, line): + warncnt += 1 + if re.search(error_re, line): + logger.warning(line) + if re.search(undefined_re, line): + logger.warning(line) + + if warncnt > 0: + logger.info( + "Component {} build complete with {} warnings".format(comp, warncnt) + ) + + +def is_python_executable(filepath): + first_line = None + if os.path.isfile(filepath): + with open(filepath, "rt") as f: + try: + first_line = f.readline() + except Exception: + pass + + return ( + first_line is not None + and first_line.startswith("#!") + and "python" in first_line + ) + return False + + +def get_umask(): + current_umask = os.umask(0) + os.umask(current_umask) + + return current_umask + + +def stringify_bool(val): + val = False if val is None else val + expect(type(val) is bool, "Wrong type for val '{}'".format(repr(val))) + return "TRUE" if val else "FALSE" + + +def indent_string(the_string, indent_level): + """Indents the given string by a given number of spaces + + Args: + the_string: str + indent_level: int + + Returns a new string that is the same as the_string, except that + each line is indented by 'indent_level' spaces. + + In python3, this can be done with textwrap.indent. + """ + + lines = the_string.splitlines(True) + padding = " " * indent_level + lines_indented = [padding + line for line in lines] + return "".join(lines_indented) + + +def verbatim_success_msg(return_val): + return return_val + + +def _check_for_invalid_args(args): + # Prevent circular import + from CIME.config import Config + + # TODO Is this really model specific + if Config.instance().check_invalid_args: + for arg in args: + # if arg contains a space then it was originally quoted and we can ignore it here. + if " " in arg or arg.startswith("--"): + continue + if arg.startswith("-") and len(arg) > 2: + sys.stderr.write( + 'WARNING: The {} argument is deprecated. Multi-character arguments should begin with "--" and single character with "-"\n Use --help for a complete list of available options\n'.format( + arg + ) + ) + + +def add_mail_type_args(parser): + parser.add_argument("--mail-user", help="Email to be used for batch notification.") + + parser.add_argument( + "-M", + "--mail-type", + action="append", + help="When to send user email. Options are: never, all, begin, end, fail.\n" + "You can specify multiple types with either comma-separated args or multiple -M flags.", + ) + + +def resolve_mail_type_args(args): + if args.mail_type is not None: + resolved_mail_types = [] + for mail_type in args.mail_type: + resolved_mail_types.extend(mail_type.split(",")) + + for mail_type in resolved_mail_types: + expect( + mail_type in ("never", "all", "begin", "end", "fail"), + "Unsupported mail-type '{}'".format(mail_type), + ) + + args.mail_type = resolved_mail_types + + +def copyifnewer(src, dest): + """if dest does not exist or is older than src copy src to dest""" + if not os.path.isfile(dest) or not filecmp.cmp(src, dest): + safe_copy(src, dest) + + +class SharedArea(object): + """ + Enable 0002 umask within this manager + """ + + def __init__(self, new_perms=0o002): + self._orig_umask = None + self._new_perms = new_perms + + def __enter__(self): + self._orig_umask = os.umask(self._new_perms) + + def __exit__(self, *_): + os.umask(self._orig_umask) + + +class Timeout(object): + """ + A context manager that implements a timeout. By default, it + will raise exception, but a custon function call can be provided. + Provided None as seconds makes this class a no-op + """ + + def __init__(self, seconds, action=None): + self._seconds = seconds + self._action = action if action is not None else self._handle_timeout + + def _handle_timeout(self, *_): + raise RuntimeError("Timeout expired") + + def __enter__(self): + if self._seconds is not None: + signal.signal(signal.SIGALRM, self._action) + signal.alarm(self._seconds) + + def __exit__(self, *_): + if self._seconds is not None: + signal.alarm(0) + + +def filter_unicode(unistr): + """ + Sometimes unicode chars can cause problems + """ + return "".join([i if ord(i) < 128 else " " for i in unistr]) + + +def run_bld_cmd_ensure_logging(cmd, arg_logger, from_dir=None, timeout=None): + arg_logger.info(cmd) + stat, output, errput = run_cmd(cmd, from_dir=from_dir, timeout=timeout) + arg_logger.info(output) + arg_logger.info(errput) + expect(stat == 0, filter_unicode(errput)) + + +def get_batch_script_for_job(job, hidden=None): + # this if statement is for backward compatibility + if hidden is None: + hidden = job != "case.st_archive" + return "." + job if hidden else job + + +def string_in_list(_string, _list): + """Case insensitive search for string in list + returns the matching list value + >>> string_in_list("Brack",["bar", "bracK", "foo"]) + 'bracK' + >>> string_in_list("foo", ["FFO", "FOO", "foo2", "foo3"]) + 'FOO' + >>> string_in_list("foo", ["FFO", "foo2", "foo3"]) + """ + for x in _list: + if _string.lower() == x.lower(): + return x + return None + + +def model_log(model, arg_logger, msg, debug_others=True): + if get_model() == model: + arg_logger.info(msg) + elif debug_others: + arg_logger.debug(msg) + + +def get_htmlroot(machobj=None): + """Get location for test HTML output + + Hierarchy for choosing CIME_HTML_ROOT: + 0. Environment variable CIME_HTML_ROOT + 1. File $HOME/.cime/config + 2. config_machines.xml (if machobj provided) + """ + htmlroot = os.environ.get("CIME_HTML_ROOT") + if htmlroot is not None: + logger.info("Using htmlroot from env CIME_HTML_ROOT: {}".format(htmlroot)) + return htmlroot + + cime_config = get_cime_config() + if cime_config.has_option("main", "CIME_HTML_ROOT"): + htmlroot = cime_config.get("main", "CIME_HTML_ROOT") + if htmlroot is not None: + logger.info("Using htmlroot from .cime/config: {}".format(htmlroot)) + return htmlroot + + if machobj is not None: + htmlroot = machobj.get_value("CIME_HTML_ROOT") + if htmlroot is not None: + logger.info("Using htmlroot from config_machines.xml: {}".format(htmlroot)) + return htmlroot + + logger.info("No htmlroot info available") + return None + + +def get_urlroot(machobj=None): + """Get URL to htmlroot + + Hierarchy for choosing CIME_URL_ROOT: + 0. Environment variable CIME_URL_ROOT + 1. File $HOME/.cime/config + 2. config_machines.xml (if machobj provided) + """ + urlroot = os.environ.get("CIME_URL_ROOT") + if urlroot is not None: + logger.info("Using urlroot from env CIME_URL_ROOT: {}".format(urlroot)) + return urlroot + + cime_config = get_cime_config() + if cime_config.has_option("main", "CIME_URL_ROOT"): + urlroot = cime_config.get("main", "CIME_URL_ROOT") + if urlroot is not None: + logger.info("Using urlroot from .cime/config: {}".format(urlroot)) + return urlroot + + if machobj is not None: + urlroot = machobj.get_value("CIME_URL_ROOT") + if urlroot is not None: + logger.info("Using urlroot from config_machines.xml: {}".format(urlroot)) + return urlroot + + logger.info("No urlroot info available") + return None + + +def clear_folder(_dir): + if os.path.exists(_dir): + for the_file in os.listdir(_dir): + file_path = os.path.join(_dir, the_file) + try: + if os.path.isfile(file_path): + os.unlink(file_path) + else: + clear_folder(file_path) + os.rmdir(file_path) + except Exception as e: + print(e) + + +def add_flag_to_cmd(flag, val): + """ + Given a flag and value for a shell command, return a string + + >>> add_flag_to_cmd("-f", "hi") + '-f hi' + >>> add_flag_to_cmd("--foo", 42) + '--foo 42' + >>> add_flag_to_cmd("--foo=", 42) + '--foo=42' + >>> add_flag_to_cmd("--foo:", 42) + '--foo:42' + >>> add_flag_to_cmd("--foo:", " hi ") + '--foo:hi' + """ + no_space_chars = "=:" + no_space = False + for item in no_space_chars: + if flag.endswith(item): + no_space = True + + separator = "" if no_space else " " + return "{}{}{}".format(flag, separator, str(val).strip()) + + +def is_comp_standalone(case): + """ + Test if the case is a single component standalone + such as FKESSLER + """ + stubcnt = 0 + classes = case.get_values("COMP_CLASSES") + for comp in classes: + if case.get_value("COMP_{}".format(comp)) == "s{}".format(comp.lower()): + stubcnt = stubcnt + 1 + else: + model = comp.lower() + numclasses = len(classes) + if stubcnt >= numclasses - 2: + return True, model + return False, get_model() diff --git a/CIME/wait_for_tests.py b/CIME/wait_for_tests.py new file mode 100644 index 00000000000..928b010ba30 --- /dev/null +++ b/CIME/wait_for_tests.py @@ -0,0 +1,838 @@ +# pylint: disable=import-error +import queue +import os, time, threading, socket, signal, shutil, glob + +# pylint: disable=import-error +import logging +import xml.etree.ElementTree as xmlet + +import CIME.utils +from CIME.utils import expect, Timeout, run_cmd_no_fail, safe_copy, CIMEError +from CIME.XML.machines import Machines +from CIME.test_status import * +from CIME.provenance import save_test_success +from CIME.case.case import Case + +SIGNAL_RECEIVED = False +E3SM_MAIN_CDASH = "E3SM" +CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest" +SLEEP_INTERVAL_SEC = 0.1 + +############################################################################### +def signal_handler(*_): + ############################################################################### + logging.warning("RECEIVED SIGNAL!") + global SIGNAL_RECEIVED + SIGNAL_RECEIVED = True + + +############################################################################### +def set_up_signal_handlers(): + ############################################################################### + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + + +############################################################################### +def get_test_time(test_path): + ############################################################################### + ts = TestStatus(test_dir=test_path) + comment = ts.get_comment(RUN_PHASE) + if "time=" not in comment: + logging.warning("No run-phase time data found in {}".format(test_path)) + return 0 + else: + time_data = [token for token in comment.split() if token.startswith("time=")][0] + return int(time_data.split("=")[1]) + + +############################################################################### +def get_test_phase(test_path, phase): + ############################################################################### + ts = TestStatus(test_dir=test_path) + return ts.get_status(phase) + + +############################################################################### +def get_nml_diff(test_path): + ############################################################################### + test_log = os.path.join(test_path, "TestStatus.log") + + diffs = "" + with open(test_log, "r") as fd: + started = False + for line in fd.readlines(): + if "NLCOMP" in line: + started = True + elif started: + if "------------" in line: + break + else: + diffs += line + + return diffs + + +############################################################################### +def get_test_output(test_path): + ############################################################################### + output_file = os.path.join(test_path, "TestStatus.log") + if os.path.exists(output_file): + return open(output_file, "r").read() + else: + logging.warning("File '{}' not found".format(output_file)) + return "" + + +############################################################################### +def create_cdash_xml_boiler( + phase, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + git_commit, +): + ############################################################################### + site_elem = xmlet.Element("Site") + + if "JENKINS_START_TIME" in os.environ: + time_info_str = "Total testing time: {:d} seconds".format( + int(current_time) - int(os.environ["JENKINS_START_TIME"]) + ) + else: + time_info_str = "" + + site_elem.attrib["BuildName"] = cdash_build_name + site_elem.attrib["BuildStamp"] = "{}-{}".format(utc_time, cdash_build_group) + site_elem.attrib["Name"] = hostname + site_elem.attrib["OSName"] = "Linux" + site_elem.attrib["Hostname"] = hostname + site_elem.attrib["OSVersion"] = "Commit: {}{}".format(git_commit, time_info_str) + + phase_elem = xmlet.SubElement(site_elem, phase) + + xmlet.SubElement(phase_elem, "StartDateTime").text = time.ctime(current_time) + xmlet.SubElement( + phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase) + ).text = str(int(current_time)) + + return site_elem, phase_elem + + +############################################################################### +def create_cdash_config_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, +): + ############################################################################### + site_elem, config_elem = create_cdash_xml_boiler( + "Configure", + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + git_commit, + ) + + xmlet.SubElement(config_elem, "ConfigureCommand").text = "namelists" + + config_results = [] + for test_name in sorted(results): + test_path = results[test_name][0] + test_norm_path = ( + test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + ) + nml_phase_result = get_test_phase(test_norm_path, NAMELIST_PHASE) + if nml_phase_result == TEST_FAIL_STATUS: + nml_diff = get_nml_diff(test_norm_path) + cdash_warning = "CMake Warning:\n\n{} NML DIFF:\n{}\n".format( + test_name, nml_diff + ) + config_results.append(cdash_warning) + + xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results) + + xmlet.SubElement(config_elem, "ConfigureStatus").text = "0" + xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now + + etree = xmlet.ElementTree(site_elem) + etree.write(os.path.join(data_rel_path, "Configure.xml")) + + +############################################################################### +def create_cdash_build_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, +): + ############################################################################### + site_elem, build_elem = create_cdash_xml_boiler( + "Build", + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + git_commit, + ) + + xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build" + + build_results = [] + for test_name in sorted(results): + build_results.append(test_name) + + xmlet.SubElement(build_elem, "Log").text = "\n".join(build_results) + + for idx, test_name in enumerate(sorted(results)): + test_path, test_status, _ = results[test_name] + test_norm_path = ( + test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + ) + if test_status == TEST_FAIL_STATUS and get_test_time(test_norm_path) == 0: + error_elem = xmlet.SubElement(build_elem, "Error") + xmlet.SubElement(error_elem, "Text").text = test_name + xmlet.SubElement(error_elem, "BuildLogLine").text = str(idx) + xmlet.SubElement(error_elem, "PreContext").text = test_name + xmlet.SubElement(error_elem, "PostContext").text = "" + xmlet.SubElement(error_elem, "RepeatCount").text = "0" + + xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now + + etree = xmlet.ElementTree(site_elem) + etree.write(os.path.join(data_rel_path, "Build.xml")) + + +############################################################################### +def create_cdash_test_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, +): + ############################################################################### + site_elem, testing_elem = create_cdash_xml_boiler( + "Testing", + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + git_commit, + ) + + test_list_elem = xmlet.SubElement(testing_elem, "TestList") + for test_name in sorted(results): + xmlet.SubElement(test_list_elem, "Test").text = test_name + + for test_name in sorted(results): + test_path, test_status, _ = results[test_name] + test_passed = test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] + test_norm_path = ( + test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + ) + + full_test_elem = xmlet.SubElement(testing_elem, "Test") + if test_passed: + full_test_elem.attrib["Status"] = "passed" + elif test_status == TEST_PEND_STATUS: + full_test_elem.attrib["Status"] = "notrun" + else: + full_test_elem.attrib["Status"] = "failed" + + xmlet.SubElement(full_test_elem, "Name").text = test_name + + xmlet.SubElement(full_test_elem, "Path").text = test_norm_path + + xmlet.SubElement(full_test_elem, "FullName").text = test_name + + xmlet.SubElement(full_test_elem, "FullCommandLine") + # text ? + + results_elem = xmlet.SubElement(full_test_elem, "Results") + + named_measurements = ( + ("text/string", "Exit Code", test_status), + ("text/string", "Exit Value", "0" if test_passed else "1"), + ("numeric_double", "Execution Time", str(get_test_time(test_norm_path))), + ( + "text/string", + "Completion Status", + "Not Completed" if test_status == TEST_PEND_STATUS else "Completed", + ), + ("text/string", "Command line", "create_test"), + ) + + for type_attr, name_attr, value in named_measurements: + named_measurement_elem = xmlet.SubElement(results_elem, "NamedMeasurement") + named_measurement_elem.attrib["type"] = type_attr + named_measurement_elem.attrib["name"] = name_attr + + xmlet.SubElement(named_measurement_elem, "Value").text = value + + measurement_elem = xmlet.SubElement(results_elem, "Measurement") + + value_elem = xmlet.SubElement(measurement_elem, "Value") + value_elem.text = "".join( + [item for item in get_test_output(test_norm_path) if ord(item) < 128] + ) + + xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now + + etree = xmlet.ElementTree(site_elem) + + etree.write(os.path.join(data_rel_path, "Test.xml")) + + +############################################################################### +def create_cdash_xml_fakes( + results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname +): + ############################################################################### + # We assume all cases were created from the same code repo + first_result_case = os.path.dirname(list(results.items())[0][1][0]) + try: + srcroot = run_cmd_no_fail( + "./xmlquery --value SRCROOT", from_dir=first_result_case + ) + except CIMEError: + # Use repo containing this script as last resort + srcroot = os.path.join(CIME.utils.get_cime_root(), "..") + + git_commit = CIME.utils.get_current_commit(repo=srcroot) + + data_rel_path = os.path.join("Testing", utc_time) + + create_cdash_config_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, + ) + + create_cdash_build_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, + ) + + create_cdash_test_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, + ) + + +############################################################################### +def create_cdash_upload_xml( + results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload +): + ############################################################################### + + data_rel_path = os.path.join("Testing", utc_time) + + try: + log_dir = "{}_logs".format(cdash_build_name) + + need_to_upload = False + + for test_name, test_data in results.items(): + test_path, test_status, _ = test_data + + if test_status != TEST_PASS_STATUS or force_log_upload: + test_case_dir = os.path.dirname(test_path) + + case_dirs = [test_case_dir] + case_base = os.path.basename(test_case_dir) + test_case2_dir = os.path.join(test_case_dir, "case2", case_base) + if os.path.exists(test_case2_dir): + case_dirs.append(test_case2_dir) + + for case_dir in case_dirs: + for param in ["EXEROOT", "RUNDIR", "CASEDIR"]: + if param == "CASEDIR": + log_src_dir = case_dir + else: + # it's possible that tests that failed very badly/early, and fake cases for testing + # will not be able to support xmlquery + try: + log_src_dir = run_cmd_no_fail( + "./xmlquery {} --value".format(param), + from_dir=case_dir, + ) + except: + continue + + log_dst_dir = os.path.join( + log_dir, + "{}{}_{}_logs".format( + test_name, + "" if case_dir == test_case_dir else ".case2", + param, + ), + ) + os.makedirs(log_dst_dir) + for log_file in glob.glob(os.path.join(log_src_dir, "*log*")): + if os.path.isdir(log_file): + shutil.copytree( + log_file, + os.path.join( + log_dst_dir, os.path.basename(log_file) + ), + ) + else: + safe_copy(log_file, log_dst_dir) + for log_file in glob.glob( + os.path.join(log_src_dir, "*.cprnc.out*") + ): + safe_copy(log_file, log_dst_dir) + + need_to_upload = True + + if need_to_upload: + + tarball = "{}.tar.gz".format(log_dir) + if os.path.exists(tarball): + os.remove(tarball) + + run_cmd_no_fail( + "tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball + ) + base64 = run_cmd_no_fail("base64 {}".format(tarball)) + + xml_text = r""" + "?> + + + + +{} + + + + +""".format( + cdash_build_name, + utc_time, + cdash_build_group, + hostname, + os.path.abspath(tarball), + base64, + ) + + with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd: + fd.write(xml_text) + + finally: + if os.path.isdir(log_dir): + shutil.rmtree(log_dir) + + +############################################################################### +def create_cdash_xml( + results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False +): + ############################################################################### + + # + # Create dart config file + # + + current_time = time.time() + + utc_time_tuple = time.gmtime(current_time) + cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple) + + hostname = Machines().get_machine_name() + if hostname is None: + hostname = socket.gethostname().split(".")[0] + logging.warning( + "Could not convert hostname '{}' into an E3SM machine name".format(hostname) + ) + + for drop_method in ["https", "http"]: + dart_config = """ +SourceDirectory: {0} +BuildDirectory: {0} + +# Site is something like machine.domain, i.e. pragmatic.crd +Site: {1} + +# Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++ +BuildName: {2} + +# Submission information +IsCDash: TRUE +CDashVersion: +QueryCDashVersion: +DropSite: my.cdash.org +DropLocation: /submit.php?project={3} +DropSiteUser: +DropSitePassword: +DropSiteMode: +DropMethod: {6} +TriggerSite: +ScpCommand: {4} + +# Dashboard start time +NightlyStartTime: {5} UTC + +UseLaunchers: +CurlOptions: CURLOPT_SSL_VERIFYPEER_OFF;CURLOPT_SSL_VERIFYHOST_OFF +""".format( + os.getcwd(), + hostname, + cdash_build_name, + cdash_project, + shutil.which("scp"), + cdash_timestamp, + drop_method, + ) + + with open("DartConfiguration.tcl", "w") as dart_fd: + dart_fd.write(dart_config) + + utc_time = time.strftime("%Y%m%d-%H%M", utc_time_tuple) + testing_dir = os.path.join("Testing", utc_time) + if os.path.isdir(testing_dir): + shutil.rmtree(testing_dir) + + os.makedirs(os.path.join("Testing", utc_time)) + + # Make tag file + with open("Testing/TAG", "w") as tag_fd: + tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group)) + + create_cdash_xml_fakes( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + ) + + create_cdash_upload_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + hostname, + force_log_upload, + ) + + stat, out, _ = run_cmd("ctest -VV -D NightlySubmit", combine_output=True) + if stat != 0: + logging.warning( + "ctest upload drop method {} FAILED:\n{}".format(drop_method, out) + ) + else: + logging.info("Upload SUCCESS:\n{}".format(out)) + return + + expect(False, "All cdash upload attempts failed") + + +############################################################################### +def wait_for_test( + test_path, + results, + wait, + check_throughput, + check_memory, + ignore_namelists, + ignore_diffs, + ignore_memleak, + no_run, +): + ############################################################################### + if os.path.isdir(test_path): + test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME) + else: + test_status_filepath = test_path + + logging.debug("Watching file: '{}'".format(test_status_filepath)) + test_log_path = os.path.join( + os.path.dirname(test_status_filepath), ".internal_test_status.log" + ) + + # We don't want to make it a requirement that wait_for_tests has write access + # to all case directories + try: + fd = open(test_log_path, "w") + fd.close() + except (IOError, OSError): + test_log_path = "/dev/null" + + prior_ts = None + with open(test_log_path, "w") as log_fd: + while True: + if os.path.exists(test_status_filepath): + ts = TestStatus(test_dir=os.path.dirname(test_status_filepath)) + test_name = ts.get_name() + test_status, test_phase = ts.get_overall_test_status( + wait_for_run=not no_run, # Important + no_run=no_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_diffs=ignore_diffs, + ignore_memleak=ignore_memleak, + ) + + if prior_ts is not None and prior_ts != ts: + log_fd.write(ts.phase_statuses_dump()) + log_fd.write("OVERALL: {}\n\n".format(test_status)) + + prior_ts = ts + + if test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED): + time.sleep(SLEEP_INTERVAL_SEC) + logging.debug("Waiting for test to finish") + else: + results.put((test_name, test_path, test_status, test_phase)) + break + + else: + if wait and not SIGNAL_RECEIVED: + logging.debug( + "File '{}' does not yet exist".format(test_status_filepath) + ) + time.sleep(SLEEP_INTERVAL_SEC) + else: + test_name = os.path.abspath(test_status_filepath).split("/")[-2] + results.put( + ( + test_name, + test_path, + "File '{}' doesn't exist".format(test_status_filepath), + CREATE_NEWCASE_PHASE, + ) + ) + break + + +############################################################################### +def wait_for_tests_impl( + test_paths, + no_wait=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_diffs=False, + ignore_memleak=False, + no_run=False, +): + ############################################################################### + results = queue.Queue() + + wft_threads = [] + for test_path in test_paths: + t = threading.Thread( + target=wait_for_test, + args=( + test_path, + results, + not no_wait, + check_throughput, + check_memory, + ignore_namelists, + ignore_diffs, + ignore_memleak, + no_run, + ), + ) + t.daemon = True + t.start() + wft_threads.append(t) + + for wft_thread in wft_threads: + wft_thread.join() + + test_results = {} + completed_test_paths = [] + while not results.empty(): + test_name, test_path, test_status, test_phase = results.get() + if test_name in test_results: + prior_path, prior_status, _ = test_results[test_name] + if test_status == prior_status: + logging.warning( + "Test name '{}' was found in both '{}' and '{}'".format( + test_name, test_path, prior_path + ) + ) + else: + raise CIMEError( + "Test name '{}' was found in both '{}' and '{}' with different results".format( + test_name, test_path, prior_path + ) + ) + + expect( + test_name is not None, + "Failed to get test name for test_path: {}".format(test_path), + ) + test_results[test_name] = (test_path, test_status, test_phase) + completed_test_paths.append(test_path) + + expect( + set(test_paths) == set(completed_test_paths), + "Missing results for test paths: {}".format( + set(test_paths) - set(completed_test_paths) + ), + ) + return test_results + + +############################################################################### +def wait_for_tests( + test_paths, + no_wait=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_diffs=False, + ignore_memleak=False, + cdash_build_name=None, + cdash_project=E3SM_MAIN_CDASH, + cdash_build_group=CDASH_DEFAULT_BUILD_GROUP, + timeout=None, + force_log_upload=False, + no_run=False, + update_success=False, + expect_test_complete=True, +): + ############################################################################### + # Set up signal handling, we want to print results before the program + # is terminated + set_up_signal_handlers() + + with Timeout(timeout, action=signal_handler): + test_results = wait_for_tests_impl( + test_paths, + no_wait, + check_throughput, + check_memory, + ignore_namelists, + ignore_diffs, + ignore_memleak, + no_run, + ) + + all_pass = True + env_loaded = False + for test_name, test_data in sorted(test_results.items()): + test_path, test_status, phase = test_data + case_dir = os.path.dirname(test_path) + + if test_status not in [ + TEST_PASS_STATUS, + TEST_PEND_STATUS, + NAMELIST_FAIL_STATUS, + ]: + # Report failed phases + logging.info("{} {} (phase {})".format(test_status, test_name, phase)) + all_pass = False + else: + # Be cautious about telling the user that the test passed since we might + # not know that the test passed yet. + if test_status == TEST_PEND_STATUS: + if expect_test_complete: + logging.info( + "{} {} (phase {} unexpectedly left in PEND)".format( + TEST_PEND_STATUS, test_name, phase + ) + ) + all_pass = False + else: + logging.info( + "{} {} (phase {} has not yet completed)".format( + TEST_PEND_STATUS, test_name, phase + ) + ) + + elif test_status == NAMELIST_FAIL_STATUS: + logging.info( + "{} {} (but otherwise OK) {}".format( + NAMELIST_FAIL_STATUS, test_name, phase + ) + ) + all_pass = False + else: + expect( + test_status == TEST_PASS_STATUS, + "Expected pass if we made it here, instead: {}".format(test_status), + ) + logging.info("{} {} {}".format(test_status, test_name, phase)) + + logging.info(" Case dir: {}".format(case_dir)) + + if update_success or (cdash_build_name and not env_loaded): + try: + # This can fail if the case crashed before setup completed + with Case(case_dir, read_only=True) as case: + srcroot = case.get_value("SRCROOT") + baseline_root = case.get_value("BASELINE_ROOT") + # Submitting to cdash requires availability of cmake. We can't guarantee + # that without loading the env for a case + if cdash_build_name and not env_loaded: + case.load_env() + env_loaded = True + + if update_success: + save_test_success( + baseline_root, + srcroot, + test_name, + test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS], + ) + + except CIMEError as e: + logging.warning( + "Failed to update success / load_env for Case {}: {}".format( + case_dir, e + ) + ) + + if cdash_build_name: + create_cdash_xml( + test_results, + cdash_build_name, + cdash_project, + cdash_build_group, + force_log_upload, + ) + + return all_pass diff --git a/CMakeLists.txt b/CMakeLists.txt index d960c60622b..adf4d7d6d98 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 2.8) +cmake_minimum_required(VERSION 3.14) include(ExternalProject) set(CIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}") @@ -9,7 +9,11 @@ project(cime_tests Fortran C) # We rely on pio for cmake utilities like findnetcdf.cmake, so that we don't # need to duplicate this cmake code -list(APPEND CMAKE_MODULE_PATH "${CIME_ROOT}/src/externals/pio2/cmake") +if (EXISTS ${SRC_ROOT}/libraries/parallelio/cmake) + list(APPEND CMAKE_MODULE_PATH "${SRC_ROOT}/libraries/parallelio/cmake") +else() + list(APPEND CMAKE_MODULE_PATH "${SRC_ROOT}/externals/scorpio/cmake") +endif() include(CIME_utils) find_package(NetCDF COMPONENTS C Fortran) @@ -18,82 +22,20 @@ include_directories(${NetCDF_C_INCLUDE_DIRS} ${NetCDF_Fortran_INCLUDE_DIRS}) # TODO: Some of the below should be done in the relevant directories, not in # this top level CMakeLists. -# ------------------------------------------------------------------------ -# Build mct -# ------------------------------------------------------------------------ -set(MCT_ROOT "${CIME_ROOT}/src/externals/mct") - -if (USE_MPI_SERIAL) - set(ENABLE_MPI_SERIAL "--enable-mpiserial") -else() - set(ENABLE_MPI_SERIAL "") -endif() - -ExternalProject_add(mct_project - PREFIX ${CMAKE_CURRENT_BINARY_DIR} - SOURCE_DIR ${MCT_ROOT} - BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/mct - CONFIGURE_COMMAND ${MCT_ROOT}/configure ${ENABLE_MPI_SERIAL} --enable-debugging --prefix=${CMAKE_CURRENT_BINARY_DIR} CC=${CMAKE_C_COMPILER} FC=${CMAKE_Fortran_COMPILER} CFLAGS=${CFLAGS} FCFLAGS=${FFLAGS} SRCDIR=${MCT_ROOT} DEBUG="-g" - BUILD_COMMAND $(MAKE) SRCDIR=${MCT_ROOT} - # Leave things in rather than "installing", because we have - # no need to move things around inside of the CMake binary directory. Also, - # mpi-serial doesn't install properly in the out-of-source build - INSTALL_COMMAND : - ) -# This copy_makefiles step is needed because mct currently doesn't support an -# out-of-source build. I am replicating what is done for the CIME system build. -ExternalProject_add_step(mct_project copy_makefiles - DEPENDEES configure - DEPENDERS build - WORKING_DIRECTORY - COMMAND cp -p /Makefile . - COMMAND mkdir -p mct - COMMAND cp -p /mct/Makefile mct/ - COMMAND mkdir -p mpeu - COMMAND cp -p /mpeu/Makefile mpeu/ - ) -if (USE_MPI_SERIAL) - ExternalProject_add_step(mct_project copy_mpi_serial_files - DEPENDEES configure - DEPENDERS build - WORKING_DIRECTORY - COMMAND mkdir -p mpi-serial - COMMAND cp -p /mpi-serial/Makefile mpi-serial/ - COMMAND cp /mpi-serial/mpif.h mpi-serial/ - COMMAND cp /mpi-serial/mpi.h mpi-serial/ - ) -endif() - -# Tell cmake to look for libraries & mod files here, because this is where we built libraries -include_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mct) -include_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mpeu) -link_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mct) -link_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mpeu) -if (USE_MPI_SERIAL) - # We need to list the mpi-serial include directory before system-level - # directories so that we're sure to use mpi-serial's mpif.h instead of - # an mpif.h from a system path. - include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/mct/mpi-serial) - link_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/mct/mpi-serial) -endif() - -# ------------------------------------------------------------------------ -# Done MCT build -# ------------------------------------------------------------------------ - # Now a bunch of includes for share code. # csm_share (we don't build it here because it seems to be built differently # by different tests?) -set(SHARE_ROOT "${CIME_ROOT}/src/share") -add_subdirectory(${SHARE_ROOT}/util csm_share) -add_subdirectory(${SHARE_ROOT}/unit_test_stubs/util csm_share_stubs) -include_directories(${SHARE_ROOT}/include) -# esmf_wrf_timemgr not built here because it depends on csm_share. -add_subdirectory(${SHARE_ROOT}/esmf_wrf_timemgr esmf_wrf_timemgr) -include_directories(${SHARE_ROOT}/esmf_wrf_timemgr) +if (EXISTS ${SRC_ROOT}/share/src) + add_subdirectory(${SRC_ROOT}/share/src share_src) + add_subdirectory(${SRC_ROOT}/share/unit_test_stubs/util csm_share_stubs) + include_directories(${SRC_ROOT}/share/include) +else() + add_subdirectory(${SRC_ROOT}/share/util csm_share) + add_subdirectory(${SRC_ROOT}/share/unit_test_stubs/util csm_share_stubs) + include_directories(${SRC_ROOT}/share/include) +endif() # Now the actual test directories. -add_subdirectory(${CIME_ROOT}/src/drivers/mct/unit_test) -add_subdirectory(${SHARE_ROOT}/test/unit) +add_subdirectory(${SRC_ROOT}/share/test/unit ${CMAKE_BINARY_DIR}/unittests) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bfa36c38c4d..a70f247e9d4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -27,7 +27,7 @@ find useful? Have a few minutes to tackle an issue? In this guide we will get yo integrated into contributing to CIME! ## What Can I Do? -* Tackle any unassigned [issues](https://github.com/ESMCI/CIME/issues) you wish! +* Tackle any unassigned [issues](https://github.com/ESMCI/CIME/issues) you wish! * Contribute code you already have. It doesn’t need to be perfect! We will help you clean things up, test it, etc. @@ -62,17 +62,32 @@ We love pull requests from everyone. Fork, then clone the repo: git clone git@github.com:your-username/CIME.git +Additionally you may need to checkout the submodules with: + + cd CIME + git submodule update --init + You will need to install CIME dependencies and edit config files to tell CIME about your development machine. See the [CIME users guide](https://esmci.github.io/cime/users_guide/porting-cime.html) -Run the scripts_regression_test: +Run the scripts_regression_tests: + + cd CIME/tests + python scripts_regression_tests.py - cd scripts/tests - scripts_regression_tests.py +Alternatively with `pytest`: + + pytest CIME/tests Make your change. Add tests for your change. Make the tests pass to the same level as before your changes. - scripts_regression_tests.py + cd CIME/tests + python scripts_regression_tests.py + +Run [pre-commit](https://pre-commit.com/#usage) before committing changes and submitting a PR. + + pip install pre-commit + pre-commit run -a Commit the changes you made. Chris Beams has written a [guide](https://chris.beams.io/posts/git-commit/) on how to write good commit messages. @@ -90,8 +105,9 @@ Some things that will increase the chance that your pull request is accepted: * Follow [PEP8][pep8] for style. (The `flake8` utility can help with this.) * Write a [good commit message][commit]. -Pull requests will automatically have tests run by Travis. This includes -running both the unit tests as well as the `flake8` code linter. +Pull requests will automatically have tests run by a Github Action. This +includes running both the unit tests as well as `pre-commit`, which checks +linting. [pep8]: http://pep8.org [commit]: https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/ChangeLog b/ChangeLog index a98ff92e66e..d153136c2d6 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7627 @@ ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer +Date: 4-27-2022 +Tag: cime6.0.17 +Answer Changes: None +Tests: scripts_regression_tests, cesm prealpha +Dependencies: + +Brief Summary: + - Make pio_async_interface a compvar. + - Fixes a revert from python reorg. + - Add help to the --driver option in create_newcase. + - Some component builds care about RUNDIR. + - Fix error in parsing ESMF_Profile.summary in ESMF 8.3. + - Add Total_Build time to end of build_times.txt. + - Fixes issue with model detection on E3SM machines. + - Revert ignoring special syntax. + - Correct ATM_GRID definition for CAM SE grids. + - Fix machine check for some unusual cases. + - Add a valid PTS_DOMAIN_FILE in aquaplanet mode. + - Try multiple ctest drop methods. + - Python package reorg. + - Fixes special syntax being ignored in XML fields. + - Extend configure to get cmake macros files from the .cime directory. + +User interface changes: + - Add help to the --driver option in create_newcase + +PR summary: git log --oneline --first-parent [previous_tag]..master +e42cdfd75 Merge pull request #4199 from jedwards4b/async_io_in_esmf +f88e19a48 Merge pull request #4227 from jasonb5/fix_revert +40b4114f3 Merge pull request #4222 from ekluzek/add_driver_help_to_create_newcase +bf7e2a1ca Merge pull request #4224 from ESMCI/jgfouca/add_rundir_cmake +5f41dd0e5 Merge pull request #4225 from jedwards4b/esmf83_timing_fix +524842660 Merge pull request #4217 from ESMCI/jgfouca/e3sm_prov +dd48d0627 Merge pull request #4218 from jasonb5/fix_model_detection +87580b755 Merge pull request #4219 from jasonb5/revert_special_syntax +3a90325b7 Merge pull request #4215 from jedwards4b/fix_se_grid_def +77dde7a40 Merge pull request #4206 from billsacks/fix_machine_check +52fca626d add a valid PTS_DOMAIN_FILE in aquaplanet mode (#4212) +e6fa10a68 Merge pull request #4207 from ESMCI/jgfouca/ctest_multiple_drop_methods +d5f998391 Merge pull request #4162 from jasonb5/python_package_reorg +1dfd2f507 Merge pull request #4209 from jasonb5/fix_special_syntax +078f1d0ba Merge pull request #4205 from billsacks/configure_gets_macros_from_dotcime + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M .github/workflows/srt_nuopc.yml +M .gitignore +A .gitmodules +M .pre-commit-config.yaml +R100 scripts/Tools/__init__.py CIME/BuildTools/__init__.py +R100 scripts/lib/CIME/BuildTools/cmakemacroswriter.py CIME/BuildTools/cmakemacroswriter.py +R091 scripts/lib/CIME/BuildTools/configure.py CIME/BuildTools/configure.py +R100 scripts/lib/CIME/BuildTools/macroconditiontree.py CIME/BuildTools/macroconditiontree.py +R098 scripts/lib/CIME/BuildTools/macrowriterbase.py CIME/BuildTools/macrowriterbase.py +R100 scripts/lib/CIME/BuildTools/makemacroswriter.py CIME/BuildTools/makemacroswriter.py +R100 scripts/lib/CIME/BuildTools/possiblevalues.py CIME/BuildTools/possiblevalues.py +R100 scripts/lib/CIME/BuildTools/valuesetting.py CIME/BuildTools/valuesetting.py +R100 scripts/lib/CIME/Servers/__init__.py CIME/Servers/__init__.py +R100 scripts/lib/CIME/Servers/ftp.py CIME/Servers/ftp.py +R100 scripts/lib/CIME/Servers/generic_server.py CIME/Servers/generic_server.py +R100 scripts/lib/CIME/Servers/gftp.py CIME/Servers/gftp.py +R100 scripts/lib/CIME/Servers/svn.py CIME/Servers/svn.py +R100 scripts/lib/CIME/Servers/wget.py CIME/Servers/wget.py +R100 scripts/lib/CIME/SystemTests/README CIME/SystemTests/README +R100 scripts/lib/CIME/BuildTools/__init__.py CIME/SystemTests/__init__.py +R100 scripts/lib/CIME/SystemTests/dae.py CIME/SystemTests/dae.py +R100 scripts/lib/CIME/SystemTests/eri.py CIME/SystemTests/eri.py +R100 scripts/lib/CIME/SystemTests/erio.py CIME/SystemTests/erio.py +R100 scripts/lib/CIME/SystemTests/erp.py CIME/SystemTests/erp.py +R100 scripts/lib/CIME/SystemTests/err.py CIME/SystemTests/err.py +R100 scripts/lib/CIME/SystemTests/erri.py CIME/SystemTests/erri.py +R100 scripts/lib/CIME/SystemTests/ers.py CIME/SystemTests/ers.py +R100 scripts/lib/CIME/SystemTests/ers2.py CIME/SystemTests/ers2.py +R100 scripts/lib/CIME/SystemTests/ert.py CIME/SystemTests/ert.py +R100 scripts/lib/CIME/SystemTests/funit.py CIME/SystemTests/funit.py +R100 scripts/lib/CIME/SystemTests/homme.py CIME/SystemTests/homme.py +R100 scripts/lib/CIME/SystemTests/hommebaseclass.py CIME/SystemTests/hommebaseclass.py +R100 scripts/lib/CIME/SystemTests/hommebfb.py CIME/SystemTests/hommebfb.py +R100 scripts/lib/CIME/SystemTests/icp.py CIME/SystemTests/icp.py +R100 scripts/lib/CIME/SystemTests/irt.py CIME/SystemTests/irt.py +R100 scripts/lib/CIME/SystemTests/ldsta.py CIME/SystemTests/ldsta.py +R100 scripts/lib/CIME/SystemTests/mcc.py CIME/SystemTests/mcc.py +R100 scripts/lib/CIME/SystemTests/mvk.py CIME/SystemTests/mvk.py +R100 scripts/lib/CIME/SystemTests/nck.py CIME/SystemTests/nck.py +R100 scripts/lib/CIME/SystemTests/ncr.py CIME/SystemTests/ncr.py +R100 scripts/lib/CIME/SystemTests/nodefail.py CIME/SystemTests/nodefail.py +R100 scripts/lib/CIME/SystemTests/pea.py CIME/SystemTests/pea.py +R100 scripts/lib/CIME/SystemTests/pem.py CIME/SystemTests/pem.py +R100 scripts/lib/CIME/SystemTests/pet.py CIME/SystemTests/pet.py +R100 scripts/lib/CIME/SystemTests/pfs.py CIME/SystemTests/pfs.py +R100 scripts/lib/CIME/SystemTests/pgn.py CIME/SystemTests/pgn.py +R100 scripts/lib/CIME/SystemTests/pre.py CIME/SystemTests/pre.py +R100 scripts/lib/CIME/SystemTests/rep.py CIME/SystemTests/rep.py +R100 scripts/lib/CIME/SystemTests/restart_tests.py CIME/SystemTests/restart_tests.py +R100 scripts/lib/CIME/SystemTests/seq.py CIME/SystemTests/seq.py +R100 scripts/lib/CIME/SystemTests/sms.py CIME/SystemTests/sms.py +R099 scripts/lib/CIME/SystemTests/system_tests_common.py CIME/SystemTests/system_tests_common.py +R100 scripts/lib/CIME/SystemTests/system_tests_compare_n.py CIME/SystemTests/system_tests_compare_n.py +R100 scripts/lib/CIME/SystemTests/system_tests_compare_two.py CIME/SystemTests/system_tests_compare_two.py +R100 scripts/lib/CIME/SystemTests/__init__.py CIME/SystemTests/test_utils/__init__.py +R100 scripts/lib/CIME/SystemTests/test_utils/user_nl_utils.py CIME/SystemTests/test_utils/user_nl_utils.py +R100 scripts/lib/CIME/SystemTests/tsc.py CIME/SystemTests/tsc.py +R099 scripts/Tools/Makefile CIME/Tools/Makefile +R100 scripts/lib/CIME/SystemTests/test_utils/__init__.py CIME/Tools/__init__.py +R100 scripts/Tools/advanced-py-prof CIME/Tools/advanced-py-prof +R099 scripts/Tools/archive_metadata CIME/Tools/archive_metadata +R100 scripts/Tools/bld_diff CIME/Tools/bld_diff +R100 scripts/Tools/bless_test_results CIME/Tools/bless_test_results +R100 scripts/Tools/case.build CIME/Tools/case.build +R100 scripts/Tools/case.cmpgen_namelists CIME/Tools/case.cmpgen_namelists +R100 scripts/Tools/case.qstatus CIME/Tools/case.qstatus +R093 scripts/Tools/case.setup CIME/Tools/case.setup +R099 scripts/Tools/case.submit CIME/Tools/case.submit +R100 scripts/Tools/case_diff CIME/Tools/case_diff +R100 scripts/Tools/check_case CIME/Tools/check_case +R100 scripts/Tools/check_input_data CIME/Tools/check_input_data +R100 scripts/Tools/check_lockedfiles CIME/Tools/check_lockedfiles +R100 scripts/Tools/cime_bisect CIME/Tools/cime_bisect +R100 scripts/Tools/code_checker CIME/Tools/code_checker +R100 scripts/Tools/compare_namelists CIME/Tools/compare_namelists +R100 scripts/Tools/compare_test_results CIME/Tools/compare_test_results +R100 scripts/Tools/component_compare_baseline CIME/Tools/component_compare_baseline +R100 scripts/Tools/component_compare_copy CIME/Tools/component_compare_copy +R100 scripts/Tools/component_compare_test CIME/Tools/component_compare_test +R100 scripts/Tools/component_generate_baseline CIME/Tools/component_generate_baseline +R100 scripts/Tools/concat_daily_hist.csh CIME/Tools/concat_daily_hist.csh +R100 scripts/Tools/cs.status CIME/Tools/cs.status +R100 scripts/Tools/e3sm_check_env CIME/Tools/e3sm_check_env +R100 scripts/Tools/e3sm_compile_wrap.py CIME/Tools/e3sm_compile_wrap.py +R100 scripts/Tools/generate_cylc_workflow.py CIME/Tools/generate_cylc_workflow.py +R100 scripts/Tools/getTiming CIME/Tools/getTiming +R100 scripts/Tools/get_case_env CIME/Tools/get_case_env +R100 scripts/Tools/get_standard_makefile_args CIME/Tools/get_standard_makefile_args +R099 scripts/Tools/jenkins_generic_job CIME/Tools/jenkins_generic_job +R100 scripts/Tools/jenkins_script CIME/Tools/jenkins_script +R099 scripts/Tools/list_e3sm_tests CIME/Tools/list_e3sm_tests +R100 scripts/Tools/mkDepends CIME/Tools/mkDepends +R100 scripts/Tools/mkSrcfiles CIME/Tools/mkSrcfiles +R097 scripts/Tools/mvsource CIME/Tools/mvsource +R100 scripts/Tools/normalize_cases CIME/Tools/normalize_cases +R100 scripts/Tools/pelayout CIME/Tools/pelayout +R100 scripts/Tools/preview_namelists CIME/Tools/preview_namelists +R100 scripts/Tools/preview_run CIME/Tools/preview_run +R100 scripts/Tools/save_provenance CIME/Tools/save_provenance +R100 scripts/Tools/simple-py-prof CIME/Tools/simple-py-prof +R100 scripts/Tools/simple_compare CIME/Tools/simple_compare +R082 scripts/Tools/standard_script_setup.py CIME/Tools/standard_script_setup.py +R100 scripts/Tools/testreporter.py CIME/Tools/testreporter.py +R100 scripts/Tools/wait_for_tests CIME/Tools/wait_for_tests +R100 scripts/Tools/xmlchange CIME/Tools/xmlchange +R097 scripts/Tools/xmlconvertors/config_pes_converter.py CIME/Tools/xmlconvertors/config_pes_converter.py +R100 scripts/Tools/xmlconvertors/convert-grid-v1-to-v2 CIME/Tools/xmlconvertors/convert-grid-v1-to-v2 +R098 scripts/Tools/xmlconvertors/grid_xml_converter.py CIME/Tools/xmlconvertors/grid_xml_converter.py +R100 scripts/Tools/xmlquery CIME/Tools/xmlquery +R100 scripts/Tools/xmltestentry CIME/Tools/xmltestentry +R100 scripts/lib/CIME/XML/__init__.py CIME/XML/__init__.py +R100 scripts/lib/CIME/XML/archive.py CIME/XML/archive.py +R100 scripts/lib/CIME/XML/archive_base.py CIME/XML/archive_base.py +R100 scripts/lib/CIME/XML/batch.py CIME/XML/batch.py +R100 scripts/lib/CIME/XML/compilerblock.py CIME/XML/compilerblock.py +R099 scripts/lib/CIME/XML/compilers.py CIME/XML/compilers.py +R100 scripts/lib/CIME/XML/component.py CIME/XML/component.py +R100 scripts/lib/CIME/XML/compsets.py CIME/XML/compsets.py +R099 scripts/lib/CIME/XML/entry_id.py CIME/XML/entry_id.py +R092 scripts/lib/CIME/XML/env_archive.py CIME/XML/env_archive.py +R100 scripts/lib/CIME/XML/env_base.py CIME/XML/env_base.py +R099 scripts/lib/CIME/XML/env_batch.py CIME/XML/env_batch.py +R083 scripts/lib/CIME/XML/env_build.py CIME/XML/env_build.py +R083 scripts/lib/CIME/XML/env_case.py CIME/XML/env_case.py +R098 scripts/lib/CIME/XML/env_mach_pes.py CIME/XML/env_mach_pes.py +R098 scripts/lib/CIME/XML/env_mach_specific.py CIME/XML/env_mach_specific.py +R085 scripts/lib/CIME/XML/env_run.py CIME/XML/env_run.py +R100 scripts/lib/CIME/XML/env_test.py CIME/XML/env_test.py +R098 scripts/lib/CIME/XML/env_workflow.py CIME/XML/env_workflow.py +R094 scripts/lib/CIME/XML/expected_fails_file.py CIME/XML/expected_fails_file.py +R090 scripts/lib/CIME/XML/files.py CIME/XML/files.py +R098 scripts/lib/CIME/XML/generic_xml.py CIME/XML/generic_xml.py +R099 scripts/lib/CIME/XML/grids.py CIME/XML/grids.py +R093 scripts/lib/CIME/XML/headers.py CIME/XML/headers.py +R100 scripts/lib/CIME/XML/inputdata.py CIME/XML/inputdata.py +R100 scripts/lib/CIME/XML/machines.py CIME/XML/machines.py +R099 scripts/lib/CIME/XML/namelist_definition.py CIME/XML/namelist_definition.py +R100 scripts/lib/CIME/XML/pes.py CIME/XML/pes.py +R100 scripts/lib/CIME/XML/pio.py CIME/XML/pio.py +R100 scripts/lib/CIME/XML/standard_module_setup.py CIME/XML/standard_module_setup.py +R100 scripts/lib/CIME/XML/stream.py CIME/XML/stream.py +R100 scripts/lib/CIME/XML/test_reporter.py CIME/XML/test_reporter.py +R100 scripts/lib/CIME/XML/testlist.py CIME/XML/testlist.py +R100 scripts/lib/CIME/XML/tests.py CIME/XML/tests.py +R100 scripts/lib/CIME/XML/testspec.py CIME/XML/testspec.py +R100 scripts/lib/CIME/XML/workflow.py CIME/XML/workflow.py +R100 scripts/lib/CIME/__init__.py CIME/__init__.py +R100 scripts/lib/CIME/aprun.py CIME/aprun.py +R098 scripts/lib/CIME/bless_test_results.py CIME/bless_test_results.py +R099 scripts/lib/CIME/build.py CIME/build.py +R100 scripts/lib/CIME/tests/__init__.py CIME/build_scripts/__init__.py +R083 src/build_scripts/buildlib.cprnc CIME/build_scripts/buildlib.cprnc +R066 src/build_scripts/buildlib.gptl CIME/build_scripts/buildlib.gptl +R094 src/build_scripts/buildlib.internal_components CIME/build_scripts/buildlib.internal_components +R094 src/build_scripts/buildlib.kokkos CIME/build_scripts/buildlib.kokkos +R093 src/build_scripts/buildlib.mct CIME/build_scripts/buildlib.mct +R094 src/build_scripts/buildlib.mpi-serial CIME/build_scripts/buildlib.mpi-serial +R096 src/build_scripts/buildlib.pio CIME/build_scripts/buildlib.pio +R094 src/build_scripts/buildlib_cmake.internal_components CIME/build_scripts/buildlib_cmake.internal_components +R100 scripts/lib/CIME/buildlib.py CIME/buildlib.py +R100 scripts/lib/CIME/buildnml.py CIME/buildnml.py +R100 scripts/lib/CIME/case/README CIME/case/README +R100 scripts/lib/CIME/case/__init__.py CIME/case/__init__.py +R098 scripts/lib/CIME/case/case.py CIME/case/case.py +R100 scripts/lib/CIME/case/case_clone.py CIME/case/case_clone.py +R100 scripts/lib/CIME/case/case_cmpgen_namelists.py CIME/case/case_cmpgen_namelists.py +R100 scripts/lib/CIME/case/case_run.py CIME/case/case_run.py +R093 scripts/lib/CIME/case/case_setup.py CIME/case/case_setup.py +R100 scripts/lib/CIME/case/case_st_archive.py CIME/case/case_st_archive.py +R099 scripts/lib/CIME/case/case_submit.py CIME/case/case_submit.py +R100 scripts/lib/CIME/case/case_test.py CIME/case/case_test.py +R100 scripts/lib/CIME/case/check_input_data.py CIME/case/check_input_data.py +R098 scripts/lib/CIME/case/check_lockedfiles.py CIME/case/check_lockedfiles.py +R100 scripts/lib/CIME/case/preview_namelists.py CIME/case/preview_namelists.py +R076 scripts/lib/CIME/code_checker.py CIME/code_checker.py +R099 scripts/lib/CIME/compare_namelists.py CIME/compare_namelists.py +R100 scripts/lib/CIME/compare_test_results.py CIME/compare_test_results.py +R100 scripts/lib/CIME/cs_status.py CIME/cs_status.py +R088 scripts/lib/CIME/cs_status_creator.py CIME/cs_status_creator.py +A CIME/data/__init__.py +A CIME/data/config/__init__.py +R085 config/cesm/config_files.xml CIME/data/config/cesm/config_files.xml +R100 config/config_headers.xml CIME/data/config/config_headers.xml +R100 config/config_tests.xml CIME/data/config/config_tests.xml +R084 config/e3sm/config_files.xml CIME/data/config/e3sm/config_files.xml +R079 config/ufs/config_files.xml CIME/data/config/ufs/config_files.xml +R100 config/xml_schemas/cimeteststatus.xsd CIME/data/config/xml_schemas/cimeteststatus.xsd +R100 config/xml_schemas/config_archive.xsd CIME/data/config/xml_schemas/config_archive.xsd +R100 config/xml_schemas/config_batch.xsd CIME/data/config/xml_schemas/config_batch.xsd +R100 config/xml_schemas/config_compilers_v2.xsd CIME/data/config/xml_schemas/config_compilers_v2.xsd +R100 config/xml_schemas/config_compsets.xsd CIME/data/config/xml_schemas/config_compsets.xsd +R100 config/xml_schemas/config_grids_v2.2.xsd CIME/data/config/xml_schemas/config_grids_v2.2.xsd +R100 config/xml_schemas/config_grids_v2.xsd CIME/data/config/xml_schemas/config_grids_v2.xsd +R100 config/xml_schemas/config_inputdata.xsd CIME/data/config/xml_schemas/config_inputdata.xsd +R100 config/xml_schemas/config_machines.xsd CIME/data/config/xml_schemas/config_machines.xsd +R100 config/xml_schemas/config_machines_template.xml CIME/data/config/xml_schemas/config_machines_template.xml +R100 config/xml_schemas/config_pes.xsd CIME/data/config/xml_schemas/config_pes.xsd +R100 config/xml_schemas/config_workflow.xsd CIME/data/config/xml_schemas/config_workflow.xsd +R100 config/xml_schemas/entry_id.xsd CIME/data/config/xml_schemas/entry_id.xsd +R100 config/xml_schemas/entry_id_base.xsd CIME/data/config/xml_schemas/entry_id_base.xsd +R100 config/xml_schemas/entry_id_base_version3.xsd CIME/data/config/xml_schemas/entry_id_base_version3.xsd +R100 config/xml_schemas/entry_id_namelist.xsd CIME/data/config/xml_schemas/entry_id_namelist.xsd +R100 config/xml_schemas/entry_id_version3.xsd CIME/data/config/xml_schemas/entry_id_version3.xsd +R100 config/xml_schemas/env_archive.xsd CIME/data/config/xml_schemas/env_archive.xsd +R100 config/xml_schemas/env_batch.xsd CIME/data/config/xml_schemas/env_batch.xsd +R100 config/xml_schemas/env_entry_id.xsd CIME/data/config/xml_schemas/env_entry_id.xsd +R100 config/xml_schemas/env_mach_pes.xsd CIME/data/config/xml_schemas/env_mach_pes.xsd +R100 config/xml_schemas/env_mach_specific.xsd CIME/data/config/xml_schemas/env_mach_specific.xsd +R100 config/xml_schemas/expected_fails_file.xsd CIME/data/config/xml_schemas/expected_fails_file.xsd +R100 config/xml_schemas/testlist.xsd CIME/data/config/xml_schemas/testlist.xsd +A CIME/data/templates/__init__.py +R100 scripts/lib/cs.status.template CIME/data/templates/cs.status.template +R100 scripts/lib/cs.submit.template CIME/data/templates/cs.submit.template +R100 scripts/lib/testreporter.template CIME/data/templates/testreporter.template +R100 scripts/lib/CIME/date.py CIME/date.py +R100 scripts/lib/CIME/expected_fails.py CIME/expected_fails.py +R099 scripts/lib/get_tests.py CIME/get_tests.py +R099 scripts/lib/CIME/get_timing.py CIME/get_timing.py +R100 scripts/lib/CIME/hist_utils.py CIME/hist_utils.py +R100 scripts/lib/jenkins_generic_job.py CIME/jenkins_generic_job.py +R100 scripts/lib/CIME/locked_files.py CIME/locked_files.py +R099 scripts/lib/CIME/namelist.py CIME/namelist.py +R099 scripts/lib/CIME/nmlgen.py CIME/nmlgen.py +R089 tools/cprnc/CMakeLists.txt CIME/non_py/cprnc/CMakeLists.txt +R100 tools/cprnc/Depends CIME/non_py/cprnc/Depends +R098 tools/cprnc/Makefile CIME/non_py/cprnc/Makefile +R099 tools/cprnc/README CIME/non_py/cprnc/README +R100 tools/cprnc/compare_vars_mod.F90.in CIME/non_py/cprnc/compare_vars_mod.F90.in +R100 tools/cprnc/cprnc.F90 CIME/non_py/cprnc/cprnc.F90 +R100 tools/cprnc/filestruct.F90 CIME/non_py/cprnc/filestruct.F90 +R100 tools/cprnc/prec.F90 CIME/non_py/cprnc/prec.F90 +R100 tools/cprnc/run_tests CIME/non_py/cprnc/run_tests +R100 tools/cprnc/summarize_cprnc_diffs CIME/non_py/cprnc/summarize_cprnc_diffs +R100 tools/cprnc/test_inputs/README CIME/non_py/cprnc/test_inputs/README +R100 tools/cprnc/test_inputs/clm2.h0.subset.control.nc CIME/non_py/cprnc/test_inputs/clm2.h0.subset.control.nc +R100 tools/cprnc/test_inputs/clm2.h0.subset.test.nc CIME/non_py/cprnc/test_inputs/clm2.h0.subset.test.nc +R100 tools/cprnc/test_inputs/clm2.h1.subset.control.nc CIME/non_py/cprnc/test_inputs/clm2.h1.subset.control.nc +R100 tools/cprnc/test_inputs/clm2.h1.subset.test.nc CIME/non_py/cprnc/test_inputs/clm2.h1.subset.test.nc +R100 tools/cprnc/test_inputs/control.nc CIME/non_py/cprnc/test_inputs/control.nc +R100 tools/cprnc/test_inputs/control_0d.nc CIME/non_py/cprnc/test_inputs/control_0d.nc +R100 tools/cprnc/test_inputs/control_attributes.nc CIME/non_py/cprnc/test_inputs/control_attributes.nc +R100 tools/cprnc/test_inputs/control_char.nc CIME/non_py/cprnc/test_inputs/control_char.nc +R100 tools/cprnc/test_inputs/control_floatDoubleNan.nc CIME/non_py/cprnc/test_inputs/control_floatDoubleNan.nc +R100 tools/cprnc/test_inputs/control_int.nc CIME/non_py/cprnc/test_inputs/control_int.nc +R100 tools/cprnc/test_inputs/control_multipleTimes_someTimeless.nc CIME/non_py/cprnc/test_inputs/control_multipleTimes_someTimeless.nc +R100 tools/cprnc/test_inputs/control_noTime.nc CIME/non_py/cprnc/test_inputs/control_noTime.nc +R100 tools/cprnc/test_inputs/copy.nc CIME/non_py/cprnc/test_inputs/copy.nc +R100 tools/cprnc/test_inputs/copy_char.nc CIME/non_py/cprnc/test_inputs/copy_char.nc +R100 tools/cprnc/test_inputs/cpl.hi.subset.control.nc CIME/non_py/cprnc/test_inputs/cpl.hi.subset.control.nc +R100 tools/cprnc/test_inputs/cpl.hi.subset.test.nc CIME/non_py/cprnc/test_inputs/cpl.hi.subset.test.nc +R100 tools/cprnc/test_inputs/diffs_0d.nc CIME/non_py/cprnc/test_inputs/diffs_0d.nc +R100 tools/cprnc/test_inputs/diffs_in_attribute.nc CIME/non_py/cprnc/test_inputs/diffs_in_attribute.nc +R100 tools/cprnc/test_inputs/diffs_in_fill.nc CIME/non_py/cprnc/test_inputs/diffs_in_fill.nc +R100 tools/cprnc/test_inputs/diffs_in_nans.nc CIME/non_py/cprnc/test_inputs/diffs_in_nans.nc +R100 tools/cprnc/test_inputs/diffs_in_vals.nc CIME/non_py/cprnc/test_inputs/diffs_in_vals.nc +R100 tools/cprnc/test_inputs/diffs_in_vals_and_diffs_in_fill.nc CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_diffs_in_fill.nc +R100 tools/cprnc/test_inputs/diffs_in_vals_and_extra_and_missing.nc CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_extra_and_missing.nc +R100 tools/cprnc/test_inputs/diffs_in_vals_and_fill.nc CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_fill.nc +R100 tools/cprnc/test_inputs/extra_variables.nc CIME/non_py/cprnc/test_inputs/extra_variables.nc +R100 tools/cprnc/test_inputs/int_missing.nc CIME/non_py/cprnc/test_inputs/int_missing.nc +R100 tools/cprnc/test_inputs/lon_differs.nc CIME/non_py/cprnc/test_inputs/lon_differs.nc +R100 tools/cprnc/test_inputs/missing_variables.nc CIME/non_py/cprnc/test_inputs/missing_variables.nc +R100 tools/cprnc/test_inputs/multipleTimes_someTimeless_diffs_in_vals_and_fill.nc CIME/non_py/cprnc/test_inputs/multipleTimes_someTimeless_diffs_in_vals_and_fill.nc +R100 tools/cprnc/test_inputs/multipleTimes_someTimeless_extra_and_missing.nc CIME/non_py/cprnc/test_inputs/multipleTimes_someTimeless_extra_and_missing.nc +R100 tools/cprnc/test_inputs/noTime_diffs_in_vals_and_fill.nc CIME/non_py/cprnc/test_inputs/noTime_diffs_in_vals_and_fill.nc +R100 tools/cprnc/test_inputs/noTime_extra_and_missing.nc CIME/non_py/cprnc/test_inputs/noTime_extra_and_missing.nc +R100 tools/cprnc/test_inputs/vals_differ_by_1.1.nc CIME/non_py/cprnc/test_inputs/vals_differ_by_1.1.nc +R100 tools/cprnc/test_inputs/vals_differ_by_1.1_somewhere.nc CIME/non_py/cprnc/test_inputs/vals_differ_by_1.1_somewhere.nc +R100 tools/cprnc/test_inputs/vals_differ_by_varying_amounts.nc CIME/non_py/cprnc/test_inputs/vals_differ_by_varying_amounts.nc +R100 tools/cprnc/test_inputs/vals_differ_by_varying_amounts2.nc CIME/non_py/cprnc/test_inputs/vals_differ_by_varying_amounts2.nc +R100 tools/cprnc/utils.F90 CIME/non_py/cprnc/utils.F90 +A CIME/non_py/externals/genf90 +R100 src/CMake/.gitignore CIME/non_py/src/CMake/.gitignore +R100 src/CMake/CESM_utils.cmake CIME/non_py/src/CMake/CESM_utils.cmake +R100 src/CMake/CIME_initial_setup.cmake CIME/non_py/src/CMake/CIME_initial_setup.cmake +R100 src/CMake/CIME_utils.cmake CIME/non_py/src/CMake/CIME_utils.cmake +R100 src/CMake/ChangeLog CIME/non_py/src/CMake/ChangeLog +R100 src/CMake/Compilers.cmake CIME/non_py/src/CMake/Compilers.cmake +R100 src/CMake/CorrectWindowsPaths.cmake CIME/non_py/src/CMake/CorrectWindowsPaths.cmake +R100 src/CMake/FindPETSc.cmake CIME/non_py/src/CMake/FindPETSc.cmake +R100 src/CMake/FindPackageMultipass.cmake CIME/non_py/src/CMake/FindPackageMultipass.cmake +R100 src/CMake/FindpFUnit.cmake CIME/non_py/src/CMake/FindpFUnit.cmake +R100 src/CMake/LICENSE CIME/non_py/src/CMake/LICENSE +R100 src/CMake/README.md CIME/non_py/src/CMake/README.md +R100 src/CMake/ResolveCompilerPaths.cmake CIME/non_py/src/CMake/ResolveCompilerPaths.cmake +R100 src/CMake/Sourcelist_utils.cmake CIME/non_py/src/CMake/Sourcelist_utils.cmake +R100 src/CMake/TryCSizeOf.f90 CIME/non_py/src/CMake/TryCSizeOf.f90 +R100 src/CMake/TryMPIIO.f90 CIME/non_py/src/CMake/TryMPIIO.f90 +R100 src/CMake/TryMPIMod.f90 CIME/non_py/src/CMake/TryMPIMod.f90 +R100 src/CMake/TryMPISERIAL.f90 CIME/non_py/src/CMake/TryMPISERIAL.f90 +R100 src/CMake/TryPnetcdf_inc.f90 CIME/non_py/src/CMake/TryPnetcdf_inc.f90 +R100 src/CMake/TryPnetcdf_mod.f90 CIME/non_py/src/CMake/TryPnetcdf_mod.f90 +R100 src/CMake/genf90_utils.cmake CIME/non_py/src/CMake/genf90_utils.cmake +R100 src/CMake/mpiexec.cmake CIME/non_py/src/CMake/mpiexec.cmake +R100 src/CMake/pFUnit_utils.cmake CIME/non_py/src/CMake/pFUnit_utils.cmake +R100 src/share/README CIME/non_py/src/README +A CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/satm/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/buildnml +R100 src/components/stub_comps_nuopc/satm/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/satm/cime_config/config_component.xml +A CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/sesp/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/buildnml +R100 src/components/stub_comps_nuopc/sesp/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/sesp/cime_config/config_component.xml +A CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/sglc/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/buildnml +R100 src/components/stub_comps_nuopc/sglc/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/sglc/cime_config/config_component.xml +A CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/siac/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/buildnml +R100 src/components/stub_comps_nuopc/siac/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/siac/cime_config/config_component.xml +A CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/sice/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/buildnml +R100 src/components/stub_comps_nuopc/sice/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/sice/cime_config/config_component.xml +A CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/slnd/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/buildnml +R100 src/components/stub_comps_nuopc/slnd/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/slnd/cime_config/config_component.xml +A CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/socn/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/buildnml +R100 src/components/stub_comps_nuopc/socn/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/socn/cime_config/config_component.xml +A CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/srof/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/buildnml +R100 src/components/stub_comps_nuopc/srof/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/srof/cime_config/config_component.xml +A CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildlib +A CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildlib_cmake +R100 src/components/stub_comps_nuopc/swav/cime_config/buildnml CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/buildnml +R100 src/components/stub_comps_nuopc/swav/cime_config/config_component.xml CIME/non_py/src/components/stub_comps_nuopc/swav/cime_config/config_component.xml +A CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib +A CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib_cmake +R091 src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml +R100 src/components/xcpl_comps_nuopc/xatm/cime_config/config_component.xml CIME/non_py/src/components/xcpl_comps_nuopc/xatm/cime_config/config_component.xml +R100 src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 +A CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib +A CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib_cmake +R091 src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml +R100 src/components/xcpl_comps_nuopc/xglc/cime_config/config_component.xml CIME/non_py/src/components/xcpl_comps_nuopc/xglc/cime_config/config_component.xml +R100 src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +A CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildlib +A CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildlib_cmake +R091 src/components/xcpl_comps_nuopc/xice/cime_config/buildnml CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/buildnml +R100 src/components/xcpl_comps_nuopc/xice/cime_config/config_component.xml CIME/non_py/src/components/xcpl_comps_nuopc/xice/cime_config/config_component.xml +R100 src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 +A CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib +A CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib_cmake +R091 src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml +R100 src/components/xcpl_comps_nuopc/xlnd/cime_config/config_component.xml CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/cime_config/config_component.xml +R100 src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 +A CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib +A CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib_cmake +R091 src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml +R100 src/components/xcpl_comps_nuopc/xocn/cime_config/config_component.xml CIME/non_py/src/components/xcpl_comps_nuopc/xocn/cime_config/config_component.xml +R100 src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 +A CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib +A CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib_cmake +R091 src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml +R100 src/components/xcpl_comps_nuopc/xrof/cime_config/config_component.xml CIME/non_py/src/components/xcpl_comps_nuopc/xrof/cime_config/config_component.xml +R100 src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 +R100 src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 +R100 src/components/xcpl_comps_nuopc/xshare/dead_nuopc_mod.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_nuopc_mod.F90 +A CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib +A CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib_cmake +R091 src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml +R100 src/components/xcpl_comps_nuopc/xwav/cime_config/config_component.xml CIME/non_py/src/components/xcpl_comps_nuopc/xwav/cime_config/config_component.xml +R100 src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 CIME/non_py/src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 +R100 src/share/timing/CMakeLists.txt CIME/non_py/src/timing/CMakeLists.txt +R100 src/share/timing/COPYING CIME/non_py/src/timing/COPYING +R100 src/share/timing/ChangeLog CIME/non_py/src/timing/ChangeLog +R100 src/share/timing/GPTLget_memusage.c CIME/non_py/src/timing/GPTLget_memusage.c +R100 src/share/timing/GPTLprint_memusage.c CIME/non_py/src/timing/GPTLprint_memusage.c +R100 src/share/timing/GPTLutil.c CIME/non_py/src/timing/GPTLutil.c +R100 src/share/timing/Makefile CIME/non_py/src/timing/Makefile +R100 src/share/timing/README CIME/non_py/src/timing/README +R100 src/share/timing/f_wrappers.c CIME/non_py/src/timing/f_wrappers.c +R100 src/share/timing/gptl.c CIME/non_py/src/timing/gptl.c +R100 src/share/timing/gptl.h CIME/non_py/src/timing/gptl.h +R100 src/share/timing/gptl.inc CIME/non_py/src/timing/gptl.inc +R100 src/share/timing/gptl_papi.c CIME/non_py/src/timing/gptl_papi.c +R100 src/share/timing/perf_mod.F90 CIME/non_py/src/timing/perf_mod.F90 +R100 src/share/timing/perf_utils.F90 CIME/non_py/src/timing/perf_utils.F90 +R100 src/share/timing/private.h CIME/non_py/src/timing/private.h +R098 scripts/lib/CIME/provenance.py CIME/provenance.py +A CIME/scripts/__init__.py +R096 tools/configure CIME/scripts/configure +A CIME/scripts/create_clone.py +A CIME/scripts/create_newcase.py +A CIME/scripts/create_test.py +A CIME/scripts/query_config.py +A CIME/scripts/query_testlists.py +R100 scripts/lib/CIME/simple_compare.py CIME/simple_compare.py +R098 scripts/lib/CIME/test_scheduler.py CIME/test_scheduler.py +R100 scripts/lib/CIME/test_status.py CIME/test_status.py +R100 scripts/lib/CIME/test_utils.py CIME/test_utils.py +R100 scripts/lib/CIME/tests/README CIME/tests/README +A CIME/tests/__init__.py +R099 scripts/lib/CIME/tests/base.py CIME/tests/base.py +R100 scripts/lib/CIME/tests/case_fake.py CIME/tests/case_fake.py +R096 scripts/lib/CIME/tests/custom_assertions_test_status.py CIME/tests/custom_assertions_test_status.py +R098 scripts/lib/CIME/tests/scripts_regression_tests.py CIME/tests/scripts_regression_tests.py +R100 scripts/lib/CIME/tests/test_sys_bless_tests_results.py CIME/tests/test_sys_bless_tests_results.py +R100 scripts/lib/CIME/tests/test_sys_build_system.py CIME/tests/test_sys_build_system.py +R099 scripts/lib/CIME/tests/test_sys_cime_case.py CIME/tests/test_sys_cime_case.py +R100 scripts/lib/CIME/tests/test_sys_cime_performance.py CIME/tests/test_sys_cime_performance.py +R100 scripts/lib/CIME/tests/test_sys_cmake_macros.py CIME/tests/test_sys_cmake_macros.py +R099 scripts/lib/CIME/tests/test_sys_create_newcase.py CIME/tests/test_sys_create_newcase.py +R099 scripts/lib/CIME/tests/test_sys_full_system.py CIME/tests/test_sys_full_system.py +R100 scripts/lib/CIME/tests/test_sys_grid_generation.py CIME/tests/test_sys_grid_generation.py +R099 scripts/lib/CIME/tests/test_sys_jenkins_generic_job.py CIME/tests/test_sys_jenkins_generic_job.py +R100 scripts/lib/CIME/tests/test_sys_macro_basic.py CIME/tests/test_sys_macro_basic.py +R100 scripts/lib/CIME/tests/test_sys_make_macros.py CIME/tests/test_sys_make_macros.py +R100 scripts/lib/CIME/tests/test_sys_manage_and_query.py CIME/tests/test_sys_manage_and_query.py +R100 scripts/lib/CIME/tests/test_sys_query_config.py CIME/tests/test_sys_query_config.py +R100 scripts/lib/CIME/tests/test_sys_run_restart.py CIME/tests/test_sys_run_restart.py +R100 scripts/lib/CIME/tests/test_sys_save_timings.py CIME/tests/test_sys_save_timings.py +R100 scripts/lib/CIME/tests/test_sys_single_submit.py CIME/tests/test_sys_single_submit.py +R099 scripts/lib/CIME/tests/test_sys_test_scheduler.py CIME/tests/test_sys_test_scheduler.py +R100 scripts/lib/CIME/tests/test_sys_unittest.py CIME/tests/test_sys_unittest.py +R100 scripts/lib/CIME/tests/test_sys_user_concurrent_mods.py CIME/tests/test_sys_user_concurrent_mods.py +R100 scripts/lib/CIME/tests/test_sys_wait_for_tests.py CIME/tests/test_sys_wait_for_tests.py +R099 scripts/lib/CIME/tests/test_unit_case.py CIME/tests/test_unit_case.py +R100 scripts/lib/CIME/tests/test_unit_case_fake.py CIME/tests/test_unit_case_fake.py +R100 scripts/lib/CIME/tests/test_unit_case_setup.py CIME/tests/test_unit_case_setup.py +R100 scripts/lib/CIME/tests/test_unit_compare_test_results.py CIME/tests/test_unit_compare_test_results.py +R100 scripts/lib/CIME/tests/test_unit_compare_two.py CIME/tests/test_unit_compare_two.py +R097 scripts/lib/CIME/tests/test_unit_cs_status.py CIME/tests/test_unit_cs_status.py +R100 scripts/lib/CIME/tests/test_unit_custom_assertions_test_status.py CIME/tests/test_unit_custom_assertions_test_status.py +R092 scripts/lib/CIME/tests/test_unit_doctest.py CIME/tests/test_unit_doctest.py +R097 scripts/lib/CIME/tests/test_unit_expected_fails_file.py CIME/tests/test_unit_expected_fails_file.py +R100 scripts/lib/CIME/tests/test_unit_grids.py CIME/tests/test_unit_grids.py +R100 scripts/lib/CIME/tests/test_unit_nmlgen.py CIME/tests/test_unit_nmlgen.py +R098 scripts/lib/CIME/tests/test_unit_provenance.py CIME/tests/test_unit_provenance.py +R100 scripts/lib/CIME/tests/test_unit_test_status.py CIME/tests/test_unit_test_status.py +R100 scripts/lib/CIME/tests/test_unit_two_link_to_case2_output.py CIME/tests/test_unit_two_link_to_case2_output.py +R098 scripts/lib/CIME/tests/test_unit_user_mod_support.py CIME/tests/test_unit_user_mod_support.py +R097 scripts/lib/CIME/tests/test_unit_user_nl_utils.py CIME/tests/test_unit_user_nl_utils.py +R095 scripts/lib/CIME/tests/test_unit_utils.py CIME/tests/test_unit_utils.py +R100 scripts/lib/CIME/tests/test_unit_xml_env_batch.py CIME/tests/test_unit_xml_env_batch.py +A CIME/tests/test_unit_xml_env_mach_specific.py +R100 scripts/lib/CIME/tests/test_unit_xml_namelist_definition.py CIME/tests/test_unit_xml_namelist_definition.py +R100 scripts/tests/user_mods_test1/include_user_mods CIME/tests/user_mods_test1/include_user_mods +R100 scripts/tests/user_mods_test1/user_nl_comp CIME/tests/user_mods_test1/user_nl_comp +R100 scripts/tests/user_mods_test1/user_nl_cpl CIME/tests/user_mods_test1/user_nl_cpl +R100 scripts/tests/user_mods_test2/SourceMods/src.drv/somefile.F90 CIME/tests/user_mods_test2/SourceMods/src.drv/somefile.F90 +R100 scripts/tests/user_mods_test2/user_nl_cpl CIME/tests/user_mods_test2/user_nl_cpl +R100 scripts/tests/user_mods_test3/shell_commands CIME/tests/user_mods_test3/shell_commands +R099 scripts/lib/CIME/tests/utils.py CIME/tests/utils.py +R100 scripts/lib/CIME/user_mod_support.py CIME/user_mod_support.py +R092 scripts/lib/CIME/utils.py CIME/utils.py +R094 scripts/lib/CIME/wait_for_tests.py CIME/wait_for_tests.py +M CONTRIBUTING.md +M Externals.cfg +A MANIFEST.in +M conftest.py +M doc/source/users_guide/unit_testing.rst +M doc/tools_autodoc.py +A pyproject.toml +M scripts/create_clone +M scripts/create_newcase +M scripts/create_test +M scripts/fortran_unit_testing/run_tests.py +D scripts/lib/six.py +D scripts/lib/six_additions.py +M scripts/query_config +M scripts/query_testlists +M scripts/tests/CMakeLists.txt +M scripts/tests/list_tests +M setup.cfg +A setup.py +D src/components/stub_comps_nuopc/satm/cime_config/buildlib +D src/components/stub_comps_nuopc/satm/cime_config/buildlib_cmake +D src/components/stub_comps_nuopc/sesp/cime_config/buildlib +D src/components/stub_comps_nuopc/sesp/cime_config/buildlib_cmake +D src/components/stub_comps_nuopc/sglc/cime_config/buildlib +D src/components/stub_comps_nuopc/sglc/cime_config/buildlib_cmake +D src/components/stub_comps_nuopc/siac/cime_config/buildlib +D src/components/stub_comps_nuopc/siac/cime_config/buildlib_cmake +D src/components/stub_comps_nuopc/sice/cime_config/buildlib +D src/components/stub_comps_nuopc/sice/cime_config/buildlib_cmake +D src/components/stub_comps_nuopc/slnd/cime_config/buildlib +D src/components/stub_comps_nuopc/slnd/cime_config/buildlib_cmake +D src/components/stub_comps_nuopc/socn/cime_config/buildlib +D src/components/stub_comps_nuopc/socn/cime_config/buildlib_cmake +D src/components/stub_comps_nuopc/srof/cime_config/buildlib +D src/components/stub_comps_nuopc/srof/cime_config/buildlib_cmake +D src/components/stub_comps_nuopc/swav/cime_config/buildlib +D src/components/stub_comps_nuopc/swav/cime_config/buildlib_cmake +D src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib +D src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib_cmake +D src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib +D src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib_cmake +D src/components/xcpl_comps_nuopc/xice/cime_config/buildlib +D src/components/xcpl_comps_nuopc/xice/cime_config/buildlib_cmake +D src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib +D src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib_cmake +D src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib +D src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib_cmake +D src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib +D src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib_cmake +D src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib +D src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib_cmake +D src/externals/genf90/ChangeLog +D src/externals/genf90/genf90.pl +A tools/README.md +M tools/mapping/gen_domain_files/test_gen_domain.sh +====================================================================== +====================================================================== + +Originatorr: Chris Fischer +Date: 3-25-2022 +Tag: cime6.0.16 +Answer Changes: all active cice cases, same climate +Tests: scripts_regression_tests, aux_cice +Dependencies: + +Brief Summary: + - Fix run_tests.py when no machine is given. + - Changes to CIME to make CICE6 the default component + - Bump pillow from 9.0.0 to 9.0.1 in pyCECT documents. + - Only clear baselines when run_count is 0 and -o is used. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +0b89e444d Merge pull request #4203 from billsacks/run_tests_no_explicit_machine +62fede4fd Merge pull request #4172 from dabail10/cime_cice6 +b3d4147ff Merge pull request #4196 from ESMCI/dependabot/pip/tools/statistical_ensemble_test/pyCECT/docs/pillow-9.0.1 +ff645a6d9 Merge pull request #4197 from ESMCI/fischer/retryruncount + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M scripts/create_test +M scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/test_scheduler.py +M tools/statistical_ensemble_test/pyCECT/docs/requirements.txt + + +====================================================================== +====================================================================== + +Originator: Chris Fischer +Date: 3-11-2022 +Tag: cime6.0.15 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Fix generated baseline from disappearing when using --retry. + - Comparison test class with arbitrary number of instances. + - Fixes extraneous warnings and machine detection + - neon inputdata update + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +4227e709c Merge pull request #4195 from ESMCI/fischer/retry +3cbf732a4 Merge pull request #4169 from alperaltuntas/system_tests_compare_multi +88f216f97 Merge pull request #4187 from jasonb5/fix_machine_warning +479181641 Merge pull request #4190 from jedwards4b/neon_inputdata_update + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M scripts/create_test +M scripts/lib/CIME/Servers/ftp.py +M scripts/lib/CIME/Servers/wget.py +A scripts/lib/CIME/SystemTests/system_tests_compare_n.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/test_scheduler.py + + +====================================================================== +====================================================================== + +Originator: Chris Fischer +Date: 3-1-2022 +Tag: cime6.0.14 +Answer Changes: [None, Round Off, Climate Changing] +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Needed some non-local flags added. + - Allow unbounded compilers to modify max tasks per node. + - Fix for correct path for PTS_DOMAINFILE. + - Check current machine and exit if not case machine. + - Use of optional .exclude files for selective exclusion during build. + - Automate documentation. + - Changes for implementing nemo in cesm2.3. + - Need to move version check out of utils.py. + - Update tests to get status from new EVV output. + - Remove the remaining users of config_compilers.xml. + - Reorder fox libraries for nvhpc. + - Add missing doctest. + - Fixes for a number of fails in scripts_regression for e3sm on mappy. + - Update grids.rst, removing $CCSMROOT references. + - Remove test_release from test_categories. + - Bump pillow from 8.3.2 to 9.0.0 in /tools/statistical_ensemble_test/pyCECT/docs. + - Fix removing test directories on failure. + - Remove rather than overwrite baseline contents with the create_test -o flag. + - Remove trailing whitespace + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +fa02da2ae Merge pull request #4185 from ESMCI/jgfouca/fix_e3sm_tests +bbd38af81 Merge pull request #4184 from ESMCI/azamat/modify-max-unbounded-compilers +0fda6b095 Merge pull request #4183 from mvertens/feature/fix_scam +ac3cd93ce Merge pull request #4179 from jedwards4b/check_current_machine +2bb0deea6 Merge pull request #4180 from CESM-GC/feature/file_build_exclude_list +4f9ce69d2 Merge pull request #4177 from jasonb5/automate_docs +108b0cc25 Merge pull request #4170 from ESMCI/peano/nemo_port +2c4e6fc3e Merge pull request #4168 from jedwards4b/move_version_check +726136e78 Merge pull request #4166 from ESMCI/mkstratos/evv_fix +7d0ef4b30 Merge pull request #4161 from ESMCI/jgfouca/remove_remaining_users_of_compilers_xml +11fb0364b reorder fox libraries for nvhpc +3f2fe1ab8 Merge pull request #4159 from jasonb5/add_missing_doctest +6dde219c8 Merge pull request #4156 from ESMCI/jgfouca/e3sm_fixes +433bdeaac Merge pull request #4147 from johnsonbk/fix_ccsmroot_references +a4d865847 Merge pull request #4155 from ESMCI/jgfouca/remove_test_release +ff92490f6 Merge pull request #4158 from ESMCI/dependabot/pip/tools/statistical_ensemble_test/pyCECT/docs/pillow-9.0.0 +334f5622b Merge pull request #4157 from jasonb5/fix_test_directory_removal +83cc09df4 Merge pull request #4154 from jedwards4b/remove_not_overwrite_baseline +dd17a1543 Remove trailing whitespace + + +Modified files: git diff --name-status [previous_tag] +A .github/workflows/docs.yml +M ChangeLog +M config/xml_schemas/config_machines.xsd +A doc/requirements.txt +M doc/source/conf.py +M doc/source/index.rst +M doc/source/users_guide/grids.rst +M doc/source/xml_files/cesm.rst +M doc/source/xml_files/e3sm.rst +M doc/source/xml_files/index.rst +M doc/tools_autodoc.py +M scripts/Tools/Makefile +M scripts/Tools/case.setup +M scripts/Tools/mkDepends +M scripts/Tools/mkSrcfiles +M scripts/Tools/standard_script_setup.py +M scripts/Tools/xmlchange +M scripts/Tools/xmlquery +M scripts/create_newcase +M scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/SystemTests/funit.py +M scripts/lib/CIME/SystemTests/mvk.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/compilers.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/tests/README +M scripts/lib/CIME/tests/base.py +M scripts/lib/CIME/tests/scripts_regression_tests.py +M scripts/lib/CIME/tests/test_sys_bless_tests_results.py +M scripts/lib/CIME/tests/test_sys_cime_case.py +M scripts/lib/CIME/tests/test_sys_cmake_macros.py +M scripts/lib/CIME/tests/test_sys_create_newcase.py +M scripts/lib/CIME/tests/test_sys_grid_generation.py +M scripts/lib/CIME/tests/test_sys_jenkins_generic_job.py +M scripts/lib/CIME/tests/test_sys_macro_basic.py +M scripts/lib/CIME/tests/test_sys_make_macros.py +M scripts/lib/CIME/tests/test_sys_test_scheduler.py +M scripts/lib/CIME/tests/test_sys_unittest.py +M scripts/lib/CIME/tests/test_unit_case.py +A scripts/lib/CIME/tests/test_unit_doctest.py +R100 scripts/lib/CIME/tests/test_xml_env_batch.py scripts/lib/CIME/tests/test_unit_xml_env_batch.py +M scripts/lib/CIME/tests/utils.py +M scripts/lib/CIME/utils.py +M tools/cprnc/README +M tools/mapping/gen_domain_files/test_gen_domain.sh +M tools/statistical_ensemble_test/pyCECT/docs/requirements.txt +====================================================================== +====================================================================== + +Originator: Chris Fischer +Date: 1-11-22 +Tag: cime6.0.13 +Answer Changes: None +Tests: scripts_regression_tests, pytest, & by hand +Dependencies: + +Brief Summary: + - Handle ESMF_Profile format change (backward compatible). + - Extend throughput and memory checking in baseline tests. + - FUNIT needs CIME_NO_CMAKE_MACRO ON for now. + - New compset naming convention. + - Changes needed for new cesm grid split in ccs_config_cesm. + - Move cesm config files to new repository - ccs_config_cesm. + - Add enforcing code formatting to pre-commit. + - Support importing buildnml from cime_config/{compname}_cime_py. + - Update the documentation of --extra-machines-dir. + - Fix returning correct test results. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +163752d4 Merge pull request #4150 from jedwards4b/esmf_profile_update +b0ada2143 Merge pull request #4151 from ESMCI/azamat/extend-perf-checks +f58bb131a Merge pull request #4149 from billsacks/funit_no_cmake_macro +7e63749f2 Merge pull request #4148 from jedwards4b/jedwards/refactor_compset_naming +bc80870bf changes needed for new cesm grid split in ccs_config_cesm (#4145) +a633c4521 Merge pull request #4144 from mvertens/feature/split_cesm_config +281c3d829 Merge pull request #4141 from jasonb5/precommit +178e9f83e Merge pull request #4130 from mnlevy1981/improve_cime_config +aba326d08 Merge pull request #4137 from billsacks/doc_extra_machines_dir +d6ffc88e3 Merge pull request #4143 from jasonb5/fix_testing + + +Modified files: git diff --name-status [previous_tag] +A .git-blame-ignore-revs +M .github/PULL_REQUEST_TEMPLATE +M .github/workflows/srt.yml +M .github/workflows/srt_nuopc.yml +M .gitignore +M .pre-commit-config.yaml +M CONTRIBUTING.md +M ChangeLog +M ChangeLog_template +M Externals.cfg +M LICENSE.TXT +D config/cesm/config_archive.xml +M config/cesm/config_files.xml +D config/cesm/config_grids.xml +D config/cesm/config_grids_common.xml +D config/cesm/config_grids_mct.xml +D config/cesm/config_grids_nuopc.xml +D config/cesm/config_inputdata.xml +D config/cesm/machines/Depends.babbageKnc +D config/cesm/machines/Depends.bluewaters +D config/cesm/machines/Depends.corip1 +D config/cesm/machines/Depends.cray +D config/cesm/machines/Depends.gnu +D config/cesm/machines/Depends.intel +D config/cesm/machines/Depends.intel14 +D config/cesm/machines/Depends.intelmic +D config/cesm/machines/Depends.intelmic14 +D config/cesm/machines/Depends.mira +D config/cesm/machines/Depends.nag +D config/cesm/machines/Depends.nvhpc-gpu +D config/cesm/machines/Depends.pgi-gpu +D config/cesm/machines/README +D config/cesm/machines/cmake_macros/CMakeLists.txt +D config/cesm/machines/cmake_macros/CNL.cmake +D config/cesm/machines/cmake_macros/Darwin.cmake +D config/cesm/machines/cmake_macros/Macros.cmake +D config/cesm/machines/cmake_macros/arm.cmake +D config/cesm/machines/cmake_macros/armgcc.cmake +D config/cesm/machines/cmake_macros/athena.cmake +D config/cesm/machines/cmake_macros/bluewaters.cmake +D config/cesm/machines/cmake_macros/casper.cmake +D config/cesm/machines/cmake_macros/centos7-linux.cmake +D config/cesm/machines/cmake_macros/cheyenne.cmake +D config/cesm/machines/cmake_macros/container.cmake +D config/cesm/machines/cmake_macros/cray.cmake +D config/cesm/machines/cmake_macros/cray_daint.cmake +D config/cesm/machines/cmake_macros/euler2.cmake +D config/cesm/machines/cmake_macros/euler3.cmake +D config/cesm/machines/cmake_macros/euler4.cmake +D config/cesm/machines/cmake_macros/frontera.cmake +D config/cesm/machines/cmake_macros/gnu.cmake +D config/cesm/machines/cmake_macros/gnu_cheyenne.cmake +D config/cesm/machines/cmake_macros/gnu_coeus.cmake +D config/cesm/machines/cmake_macros/gnu_hobart.cmake +D config/cesm/machines/cmake_macros/gnu_homebrew.cmake +D config/cesm/machines/cmake_macros/gnu_melvin.cmake +D config/cesm/machines/cmake_macros/gnu_modex.cmake +D config/cesm/machines/cmake_macros/hobart.cmake +D config/cesm/machines/cmake_macros/ibm.cmake +D config/cesm/machines/cmake_macros/ibm_AIX.cmake +D config/cesm/machines/cmake_macros/ibm_BGQ.cmake +D config/cesm/machines/cmake_macros/ibm_mira.cmake +D config/cesm/machines/cmake_macros/intel.cmake +D config/cesm/machines/cmake_macros/intel_Darwin.cmake +D config/cesm/machines/cmake_macros/intel_aleph.cmake +D config/cesm/machines/cmake_macros/intel_athena.cmake +D config/cesm/machines/cmake_macros/intel_bluewaters.cmake +D config/cesm/machines/cmake_macros/intel_casper.cmake +D config/cesm/machines/cmake_macros/intel_cheyenne.cmake +D config/cesm/machines/cmake_macros/intel_constance.cmake +D config/cesm/machines/cmake_macros/intel_cori-haswell.cmake +D config/cesm/machines/cmake_macros/intel_cori-knl.cmake +D config/cesm/machines/cmake_macros/intel_eastwind.cmake +D config/cesm/machines/cmake_macros/intel_edison.cmake +D config/cesm/machines/cmake_macros/intel_euler2.cmake +D config/cesm/machines/cmake_macros/intel_euler3.cmake +D config/cesm/machines/cmake_macros/intel_euler4.cmake +D config/cesm/machines/cmake_macros/intel_greenplanet-sib29.cmake +D config/cesm/machines/cmake_macros/intel_greenplanet-sky24.cmake +D config/cesm/machines/cmake_macros/intel_hobart.cmake +D config/cesm/machines/cmake_macros/intel_izumi.cmake +D config/cesm/machines/cmake_macros/intel_laramie.cmake +D config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake +D config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake +D config/cesm/machines/cmake_macros/intel_sandiatoss3.cmake +D config/cesm/machines/cmake_macros/intel_stampede2-knl.cmake +D config/cesm/machines/cmake_macros/intel_stampede2-skx.cmake +D config/cesm/machines/cmake_macros/intel_theia.cmake +D config/cesm/machines/cmake_macros/intel_zeus.cmake +D config/cesm/machines/cmake_macros/izumi.cmake +D config/cesm/machines/cmake_macros/laramie.cmake +D config/cesm/machines/cmake_macros/lonestar5.cmake +D config/cesm/machines/cmake_macros/nag.cmake +D config/cesm/machines/cmake_macros/nvhpc-gpu.cmake +D config/cesm/machines/cmake_macros/nvhpc-gpu_casper.cmake +D config/cesm/machines/cmake_macros/nvhpc.cmake +D config/cesm/machines/cmake_macros/nvhpc_casper.cmake +D config/cesm/machines/cmake_macros/pgi-gpu.cmake +D config/cesm/machines/cmake_macros/pgi-gpu_casper.cmake +D config/cesm/machines/cmake_macros/pgi.cmake +D config/cesm/machines/cmake_macros/pgi_bluewaters.cmake +D config/cesm/machines/cmake_macros/pgi_casper.cmake +D config/cesm/machines/cmake_macros/pgi_cheyenne.cmake +D config/cesm/machines/cmake_macros/pgi_constance.cmake +D config/cesm/machines/cmake_macros/pgi_daint.cmake +D config/cesm/machines/cmake_macros/pgi_eastwind.cmake +D config/cesm/machines/cmake_macros/pgi_euler2.cmake +D config/cesm/machines/cmake_macros/pgi_euler3.cmake +D config/cesm/machines/cmake_macros/pgi_euler4.cmake +D config/cesm/machines/cmake_macros/pgi_hobart.cmake +D config/cesm/machines/cmake_macros/pgi_izumi.cmake +D config/cesm/machines/cmake_macros/pgi_olympus.cmake +D config/cesm/machines/cmake_macros/pleiades-bro.cmake +D config/cesm/machines/cmake_macros/pleiades-has.cmake +D config/cesm/machines/cmake_macros/pleiades-ivy.cmake +D config/cesm/machines/cmake_macros/pleiades-san.cmake +D config/cesm/machines/cmake_macros/stampede2-knl.cmake +D config/cesm/machines/cmake_macros/stampede2-skx.cmake +D config/cesm/machines/cmake_macros/theta.cmake +D config/cesm/machines/cmake_macros/universal.cmake +D config/cesm/machines/cmake_macros/userdefined.cmake +D config/cesm/machines/cmake_macros/zeus.cmake +D config/cesm/machines/config_batch.xml +D config/cesm/machines/config_compilers.xml +D config/cesm/machines/config_machines.xml +D config/cesm/machines/config_pio.xml +D config/cesm/machines/config_workflow.xml +D config/cesm/machines/cylc_suite.rc.template +D config/cesm/machines/mpi_run_gpu.casper +D config/cesm/machines/nag_mpi_argument.txt +D config/cesm/machines/template.case.run +D config/cesm/machines/template.case.test +D config/cesm/machines/template.st_archive +D config/cesm/machines/userdefined_laptop_template/README.md +D config/cesm/machines/userdefined_laptop_template/config_compilers.xml +D config/cesm/machines/userdefined_laptop_template/config_machines.xml +D config/cesm/machines/userdefined_laptop_template/config_pes.xml +D config/ufs/config_archive.xml +M config/ufs/config_files.xml +D config/ufs/config_grids.xml +D config/ufs/config_inputdata.xml +D config/ufs/machines/Depends.cray +D config/ufs/machines/Depends.gnu +D config/ufs/machines/Depends.intel +D config/ufs/machines/README +D config/ufs/machines/config_batch.xml +D config/ufs/machines/config_compilers.xml +D config/ufs/machines/config_machines.xml +D config/ufs/machines/config_pio.xml +D config/ufs/machines/config_workflow.xml +D config/ufs/machines/cylc_suite.rc.template +D config/ufs/machines/template.case.run +D config/ufs/machines/template.case.test +D config/ufs/machines/template.chgres.run +D config/ufs/machines/template.gfs_post.run +D config/ufs/machines/template.st_archive +D config/ufs/machines/userdefined_laptop_template/README.md +D config/ufs/machines/userdefined_laptop_template/config_compilers.xml +D config/ufs/machines/userdefined_laptop_template/config_machines.xml +D config/ufs/machines/userdefined_laptop_template/config_pes.xml +M config/xml_schemas/config_grids_v2.2.xsd +M config/xml_schemas/config_machines.xsd +M conftest.py +M doc/Makefile +M doc/README +M doc/source/Tools_user/index.rst.template +M doc/source/_templates/layout.html +M doc/source/build_cpl/adding-components.rst +M doc/source/build_cpl/index.rst +M doc/source/build_cpl/introduction.rst +M doc/source/conf.py +M doc/source/glossary/index.rst +M doc/source/index.rst +M doc/source/misc_tools/ect.rst +M doc/source/misc_tools/load-balancing-tool.rst +M doc/source/users_guide/cime-internals.rst +M doc/source/users_guide/porting-cime.rst +M doc/source/users_guide/setting-up-a-case.rst +M doc/source/users_guide/testing.rst +M doc/source/users_guide/unit_testing.rst +M doc/source/what_cime/index.rst +M doc/source/xml_files/atmosphere.rst +M doc/source/xml_files/cesm.rst +M doc/source/xml_files/common.rst +M doc/source/xml_files/components.rst +M doc/source/xml_files/drivers.rst +M doc/source/xml_files/e3sm.rst +M doc/source/xml_files/esp.rst +M doc/source/xml_files/index.rst +M doc/source/xml_files/land.rst +M doc/source/xml_files/landice.rst +M doc/source/xml_files/ocean.rst +M doc/source/xml_files/river.rst +M doc/source/xml_files/seaice.rst +M doc/source/xml_files/wave.rst +M doc/tools_autodoc.cfg +M doc/tools_autodoc.py +M scripts/Tools/archive_metadata +M scripts/Tools/bld_diff +M scripts/Tools/bless_test_results +M scripts/Tools/case.build +M scripts/Tools/case.cmpgen_namelists +M scripts/Tools/case.qstatus +M scripts/Tools/case.setup +M scripts/Tools/case.submit +M scripts/Tools/case_diff +M scripts/Tools/check_case +M scripts/Tools/check_input_data +M scripts/Tools/check_lockedfiles +M scripts/Tools/cime_bisect +M scripts/Tools/code_checker +M scripts/Tools/compare_namelists +M scripts/Tools/compare_test_results +M scripts/Tools/component_compare_baseline +M scripts/Tools/component_compare_copy +M scripts/Tools/component_compare_test +M scripts/Tools/component_generate_baseline +M scripts/Tools/cs.status +M scripts/Tools/e3sm_check_env +M scripts/Tools/e3sm_compile_wrap.py +M scripts/Tools/generate_cylc_workflow.py +M scripts/Tools/getTiming +M scripts/Tools/get_case_env +M scripts/Tools/get_standard_makefile_args +M scripts/Tools/jenkins_generic_job +M scripts/Tools/list_e3sm_tests +M scripts/Tools/mvsource +M scripts/Tools/normalize_cases +M scripts/Tools/pelayout +M scripts/Tools/preview_namelists +M scripts/Tools/preview_run +M scripts/Tools/save_provenance +M scripts/Tools/simple_compare +M scripts/Tools/standard_script_setup.py +M scripts/Tools/testreporter.py +M scripts/Tools/wait_for_tests +M scripts/Tools/xmlchange +M scripts/Tools/xmlconvertors/config_pes_converter.py +M scripts/Tools/xmlconvertors/convert-grid-v1-to-v2 +M scripts/Tools/xmlconvertors/grid_xml_converter.py +M scripts/Tools/xmlquery +M scripts/Tools/xmltestentry +M scripts/climate_reproducibility/README.md +M scripts/create_clone +M scripts/create_newcase +M scripts/create_test +M scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +M scripts/fortran_unit_testing/python/printer.py +M scripts/fortran_unit_testing/python/test_xml_test_list.py +M scripts/fortran_unit_testing/python/xml_test_list.py +M scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/BuildTools/cmakemacroswriter.py +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/BuildTools/macroconditiontree.py +M scripts/lib/CIME/BuildTools/macrowriterbase.py +M scripts/lib/CIME/BuildTools/makemacroswriter.py +M scripts/lib/CIME/BuildTools/possiblevalues.py +M scripts/lib/CIME/BuildTools/valuesetting.py +M scripts/lib/CIME/Servers/__init__.py +M scripts/lib/CIME/Servers/ftp.py +M scripts/lib/CIME/Servers/generic_server.py +M scripts/lib/CIME/Servers/gftp.py +M scripts/lib/CIME/Servers/svn.py +M scripts/lib/CIME/Servers/wget.py +M scripts/lib/CIME/SystemTests/dae.py +M scripts/lib/CIME/SystemTests/eri.py +M scripts/lib/CIME/SystemTests/erio.py +M scripts/lib/CIME/SystemTests/erp.py +M scripts/lib/CIME/SystemTests/err.py +M scripts/lib/CIME/SystemTests/erri.py +M scripts/lib/CIME/SystemTests/ers.py +M scripts/lib/CIME/SystemTests/ers2.py +M scripts/lib/CIME/SystemTests/ert.py +M scripts/lib/CIME/SystemTests/funit.py +M scripts/lib/CIME/SystemTests/homme.py +M scripts/lib/CIME/SystemTests/hommebaseclass.py +M scripts/lib/CIME/SystemTests/hommebfb.py +M scripts/lib/CIME/SystemTests/icp.py +M scripts/lib/CIME/SystemTests/irt.py +M scripts/lib/CIME/SystemTests/ldsta.py +M scripts/lib/CIME/SystemTests/mcc.py +M scripts/lib/CIME/SystemTests/mvk.py +M scripts/lib/CIME/SystemTests/nck.py +M scripts/lib/CIME/SystemTests/ncr.py +M scripts/lib/CIME/SystemTests/nodefail.py +M scripts/lib/CIME/SystemTests/pea.py +M scripts/lib/CIME/SystemTests/pem.py +M scripts/lib/CIME/SystemTests/pet.py +M scripts/lib/CIME/SystemTests/pfs.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/SystemTests/pre.py +M scripts/lib/CIME/SystemTests/rep.py +M scripts/lib/CIME/SystemTests/restart_tests.py +M scripts/lib/CIME/SystemTests/seq.py +M scripts/lib/CIME/SystemTests/sms.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/SystemTests/system_tests_compare_two.py +M scripts/lib/CIME/SystemTests/test_utils/user_nl_utils.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/archive.py +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/XML/batch.py +M scripts/lib/CIME/XML/compilerblock.py +M scripts/lib/CIME/XML/compilers.py +M scripts/lib/CIME/XML/component.py +M scripts/lib/CIME/XML/compsets.py +M scripts/lib/CIME/XML/entry_id.py +M scripts/lib/CIME/XML/env_archive.py +M scripts/lib/CIME/XML/env_base.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_build.py +M scripts/lib/CIME/XML/env_case.py +M scripts/lib/CIME/XML/env_mach_pes.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/env_run.py +M scripts/lib/CIME/XML/env_test.py +M scripts/lib/CIME/XML/env_workflow.py +M scripts/lib/CIME/XML/expected_fails_file.py +M scripts/lib/CIME/XML/files.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/XML/headers.py +M scripts/lib/CIME/XML/inputdata.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/XML/namelist_definition.py +M scripts/lib/CIME/XML/pes.py +M scripts/lib/CIME/XML/pio.py +M scripts/lib/CIME/XML/standard_module_setup.py +M scripts/lib/CIME/XML/stream.py +M scripts/lib/CIME/XML/test_reporter.py +M scripts/lib/CIME/XML/testlist.py +M scripts/lib/CIME/XML/tests.py +M scripts/lib/CIME/XML/testspec.py +M scripts/lib/CIME/XML/workflow.py +M scripts/lib/CIME/aprun.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/buildnml.py +M scripts/lib/CIME/case/README +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_clone.py +M scripts/lib/CIME/case/case_cmpgen_namelists.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/case/case_test.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/case/check_lockedfiles.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/code_checker.py +M scripts/lib/CIME/compare_namelists.py +M scripts/lib/CIME/compare_test_results.py +M scripts/lib/CIME/cs_status.py +M scripts/lib/CIME/cs_status_creator.py +M scripts/lib/CIME/date.py +M scripts/lib/CIME/expected_fails.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/locked_files.py +M scripts/lib/CIME/namelist.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/simple_compare.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/test_status.py +M scripts/lib/CIME/test_utils.py +M scripts/lib/CIME/tests/base.py +M scripts/lib/CIME/tests/case_fake.py +M scripts/lib/CIME/tests/custom_assertions_test_status.py +M scripts/lib/CIME/tests/scripts_regression_tests.py +M scripts/lib/CIME/tests/test_sys_bless_tests_results.py +M scripts/lib/CIME/tests/test_sys_build_system.py +M scripts/lib/CIME/tests/test_sys_cime_case.py +M scripts/lib/CIME/tests/test_sys_cime_performance.py +M scripts/lib/CIME/tests/test_sys_cmake_macros.py +M scripts/lib/CIME/tests/test_sys_create_newcase.py +M scripts/lib/CIME/tests/test_sys_full_system.py +M scripts/lib/CIME/tests/test_sys_grid_generation.py +M scripts/lib/CIME/tests/test_sys_jenkins_generic_job.py +M scripts/lib/CIME/tests/test_sys_macro_basic.py +M scripts/lib/CIME/tests/test_sys_make_macros.py +M scripts/lib/CIME/tests/test_sys_manage_and_query.py +M scripts/lib/CIME/tests/test_sys_run_restart.py +M scripts/lib/CIME/tests/test_sys_save_timings.py +M scripts/lib/CIME/tests/test_sys_single_submit.py +M scripts/lib/CIME/tests/test_sys_test_scheduler.py +M scripts/lib/CIME/tests/test_sys_unittest.py +M scripts/lib/CIME/tests/test_sys_user_concurrent_mods.py +M scripts/lib/CIME/tests/test_sys_wait_for_tests.py +M scripts/lib/CIME/tests/test_unit_case.py +M scripts/lib/CIME/tests/test_unit_case_fake.py +M scripts/lib/CIME/tests/test_unit_case_setup.py +M scripts/lib/CIME/tests/test_unit_compare_test_results.py +M scripts/lib/CIME/tests/test_unit_compare_two.py +M scripts/lib/CIME/tests/test_unit_cs_status.py +M scripts/lib/CIME/tests/test_unit_custom_assertions_test_status.py +M scripts/lib/CIME/tests/test_unit_expected_fails_file.py +M scripts/lib/CIME/tests/test_unit_grids.py +M scripts/lib/CIME/tests/test_unit_nmlgen.py +M scripts/lib/CIME/tests/test_unit_provenance.py +M scripts/lib/CIME/tests/test_unit_test_status.py +M scripts/lib/CIME/tests/test_unit_two_link_to_case2_output.py +M scripts/lib/CIME/tests/test_unit_user_mod_support.py +M scripts/lib/CIME/tests/test_unit_user_nl_utils.py +M scripts/lib/CIME/tests/test_unit_utils.py +M scripts/lib/CIME/tests/test_unit_xml_namelist_definition.py +M scripts/lib/CIME/tests/test_xml_env_batch.py +M scripts/lib/CIME/tests/utils.py +M scripts/lib/CIME/user_mod_support.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/lib/get_tests.py +M scripts/lib/jenkins_generic_job.py +M scripts/lib/six.py +M scripts/lib/six_additions.py +M scripts/query_config +M scripts/query_testlists +M scripts/tests/CMakeLists.txt +M scripts/tests/list_tests +M scripts/tests/user_mods_test3/shell_commands +M setup.cfg +M src/CMake/CESM_utils.cmake +M src/CMake/mpiexec.cmake +M src/build_scripts/buildlib.cprnc +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.internal_components +M src/build_scripts/buildlib.kokkos +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.mpi-serial +M src/build_scripts/buildlib.pio +M src/build_scripts/buildlib_cmake.internal_components +M src/components/stub_comps_nuopc/satm/cime_config/buildnml +M src/components/stub_comps_nuopc/sesp/cime_config/buildnml +M src/components/stub_comps_nuopc/sglc/cime_config/buildnml +M src/components/stub_comps_nuopc/sice/cime_config/buildnml +M src/components/stub_comps_nuopc/slnd/cime_config/buildnml +M src/components/stub_comps_nuopc/socn/cime_config/buildnml +M src/components/stub_comps_nuopc/srof/cime_config/buildnml +M src/components/stub_comps_nuopc/swav/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xice/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 +M src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml +M src/externals/genf90/ChangeLog +M src/share/README +M src/share/timing/ChangeLog +M src/share/timing/GPTLutil.c +M src/share/timing/gptl.c +M src/share/timing/gptl_papi.c +M tools/configure +M tools/load_balancing_tool/layouts.py +M tools/load_balancing_tool/load_balancing_solve.py +M tools/load_balancing_tool/load_balancing_submit.py +M tools/load_balancing_tool/optimize_model.py +M tools/load_balancing_tool/tests/atm_lnd.py +M tools/load_balancing_tool/tests/example.json +M tools/load_balancing_tool/tests/load_balancing_test.py +M tools/load_balancing_tool/tests/timing/timing_1 +M tools/load_balancing_tool/tests/timing/timing_2 +M tools/load_balancing_tool/tests/timing/timing_3 +M tools/mapping/check_maps/check_map.sh +M tools/mapping/check_maps/src/Makefile +M tools/mapping/gen_domain_files/INSTALL +M tools/mapping/gen_domain_files/README +M tools/mapping/gen_domain_files/src/gen_domain.F90 +M tools/mapping/gen_mapping_files/README +M tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README +M tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL +M tools/mapping/gen_mapping_files/runoff_to_ocn/README +M tools/mapping/map_field/INSTALL +M tools/mapping/map_field/README +M tools/statistical_ensemble_test/ensemble.py +M tools/statistical_ensemble_test/pyCECT/.gitignore +M tools/statistical_ensemble_test/pyCECT/CHANGES.rst +M tools/statistical_ensemble_test/pyCECT/EET.py +M tools/statistical_ensemble_test/pyCECT/README.rst +M tools/statistical_ensemble_test/pyCECT/docs/conf.py +M tools/statistical_ensemble_test/pyCECT/docs/source/installation.rst +M tools/statistical_ensemble_test/pyCECT/docs/source/pyCECT.rst +M tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSum.rst +M tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSumPop.rst +M tools/statistical_ensemble_test/pyCECT/docs/source/readme.rst +M tools/statistical_ensemble_test/pyCECT/pyCECT.py +M tools/statistical_ensemble_test/pyCECT/pyEnsLib.py +M tools/statistical_ensemble_test/pyCECT/pyEnsSum.py +M tools/statistical_ensemble_test/pyCECT/pyEnsSumPop.py +M tools/statistical_ensemble_test/pyCECT/pyPlots.py +M tools/statistical_ensemble_test/pyCECT/test_pyEnsSum.sh +M tools/statistical_ensemble_test/single_run.py + +====================================================================== +====================================================================== + +Originator: Chris Fischer +Date: 12-16-21 +Tag: cime6.0.12 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Rename izumi.unified to izumi.cgd + - Fix test ptr to run_cmd_no_fail. + - chdir is redundent in CMakeLists.txt + - Fix paths in cdash testing + - Hot fix cdash testing/ + - Update unit tests and linting. + - Fix SSL certification for testing database. + - Fix probing machine. + - cprnc compares NaN. + - Update izumi ESMF lib to 8.2.0 release. + - Update CCE for crayenv2 on cheyenne. + - Cleanup Scorpio I/O performance data in the run directory. + - Fix the N_TestUnitTest by setting CIME_NO_CMAKE_MACRO + - Add python 3.7.0 module load for izumi (CESM only). + - Fix broken links in documentation. + - Update cime externals. + - tools/configure was using deprecated config_compilers.xml system. + - Remove documentation sections for pieces no longer included in CIME. + - More grids for mizuRoute. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +5e99566ae Merge pull request #4142 from ESMCI/fischer/izumi_cgd +6ffadc5fd fix test ptr to run_cmd_no_fail +d271ebbd8 chdir here is redundent, done in CMakeLists.txt +1faf45586 fix paths in cdash testing +3955917de hot fix cdash testing +baf320df2 Merge pull request #4129 from jasonb5/updates_testing +e725ab7cb Merge pull request #4128 from ESMCI/fischer/testreporter_ssl +f0dca0d96 Merge pull request #4134 from billsacks/fix_machine_probe +4961f0357 Merge pull request #4132 from jedwards4b/cprnc_nan_detect +188e9fe2b Merge pull request #4124 from ESMCI/fischer/izumi_esmf +b0f7080a9 update CCE for crayenv2 on cheyenne +8c515e31c Merge pull request #4127 from ESMCI/jayeshkrishna/consolidate_spio_timing +5a98a511b fix the N_TestUnitTest by setting CIME_NO_CMAKE_MACRO +cde1dafda Merge pull request #4117 from peverwhee/python_load_module +f9fe41210 Merge pull request #4123 from billsacks/fix_doc_links +1686f03b8 update externals +9f263ed3d Merge pull request #4122 from ESMCI/jgfouca/fix_tools_configure +7f709b709 Merge pull request #4108 from billsacks/remove_some_docs +c7813bce7 Merge pull request #3823 from ekluzek/mizuRoute + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M .github/workflows/srt_nuopc.yml +A .pre-commit-config.yaml +M Externals.cfg +M README.md +M config/cesm/config_grids.xml +M config/cesm/config_grids_nuopc.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_machines.xml +A conftest.py +D doc/source/data_models/data-atm.rst +D doc/source/data_models/data-lnd.rst +D doc/source/data_models/data-model-science.rst +D doc/source/data_models/data-ocean.rst +D doc/source/data_models/data-river.rst +D doc/source/data_models/data-seaice.rst +D doc/source/data_models/data-wave.rst +D doc/source/data_models/design-details.rst +D doc/source/data_models/index.rst +D doc/source/data_models/input-namelists.rst +D doc/source/data_models/input-streams.rst +D doc/source/data_models/introduction.rst +D doc/source/driver_cpl/bit-for-bit-flag.rst +D doc/source/driver_cpl/budgets.rst +D doc/source/driver_cpl/cplug-02.1-figx1.jpg +D doc/source/driver_cpl/design.rst +D doc/source/driver_cpl/driver_threading_control.rst +D doc/source/driver_cpl/grids.rst +D doc/source/driver_cpl/history-and-restarts.rst +D doc/source/driver_cpl/implementation.rst +D doc/source/driver_cpl/index.rst +D doc/source/driver_cpl/initialization-and-restart.rst +D doc/source/driver_cpl/introduction.rst +D doc/source/driver_cpl/multi-instance.rst +D doc/source/driver_cpl/namelist-overview.rst +D doc/source/driver_cpl/time-management.rst +M doc/source/index.rst +M doc/source/users_guide/cime-change-namelist.rst +M doc/source/users_guide/grids.rst +M doc/source/users_guide/running-a-case.rst +M doc/source/users_guide/testing.rst +M doc/source/what_cime/index.rst +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/BuildTools/macrowriterbase.py +M scripts/lib/CIME/SystemTests/ers2.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/XML/test_reporter.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/provenance.py +D scripts/lib/CIME/tests/SystemTests/__init__.py +D scripts/lib/CIME/tests/SystemTests/test_utils/__init__.py +D scripts/lib/CIME/tests/XML/__init__.py +A scripts/lib/CIME/tests/base.py +A scripts/lib/CIME/tests/scripts_regression_tests.py +A scripts/lib/CIME/tests/test_sys_bless_tests_results.py +A scripts/lib/CIME/tests/test_sys_build_system.py +A scripts/lib/CIME/tests/test_sys_cime_case.py +A scripts/lib/CIME/tests/test_sys_cime_performance.py +A scripts/lib/CIME/tests/test_sys_cmake_macros.py +A scripts/lib/CIME/tests/test_sys_create_newcase.py +A scripts/lib/CIME/tests/test_sys_full_system.py +A scripts/lib/CIME/tests/test_sys_grid_generation.py +A scripts/lib/CIME/tests/test_sys_jenkins_generic_job.py +A scripts/lib/CIME/tests/test_sys_macro_basic.py +A scripts/lib/CIME/tests/test_sys_make_macros.py +A scripts/lib/CIME/tests/test_sys_manage_and_query.py +A scripts/lib/CIME/tests/test_sys_query_config.py +A scripts/lib/CIME/tests/test_sys_run_restart.py +A scripts/lib/CIME/tests/test_sys_save_timings.py +A scripts/lib/CIME/tests/test_sys_single_submit.py +A scripts/lib/CIME/tests/test_sys_test_scheduler.py +A scripts/lib/CIME/tests/test_sys_unittest.py +A scripts/lib/CIME/tests/test_sys_user_concurrent_mods.py +A scripts/lib/CIME/tests/test_sys_wait_for_tests.py +D scripts/lib/CIME/tests/test_test_scheduler.py +R100 scripts/lib/CIME/tests/test_case.py scripts/lib/CIME/tests/test_unit_case.py +R100 scripts/lib/CIME/tests/test_case_fake.py scripts/lib/CIME/tests/test_unit_case_fake.py +R100 scripts/lib/CIME/tests/test_case_setup.py scripts/lib/CIME/tests/test_unit_case_setup.py +R100 scripts/lib/CIME/tests/test_compare_test_results.py scripts/lib/CIME/tests/test_unit_compare_test_results.py +R100 scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py scripts/lib/CIME/tests/test_unit_compare_two.py +R100 scripts/lib/CIME/tests/test_cs_status.py scripts/lib/CIME/tests/test_unit_cs_status.py +R100 scripts/lib/CIME/tests/test_custom_assertions_test_status.py scripts/lib/CIME/tests/test_unit_custom_assertions_test_status.py +R100 scripts/lib/CIME/tests/XML/test_expected_fails_file.py scripts/lib/CIME/tests/test_unit_expected_fails_file.py +R100 scripts/lib/CIME/tests/XML/test_grids.py scripts/lib/CIME/tests/test_unit_grids.py +R100 scripts/lib/CIME/tests/test_nmlgen.py scripts/lib/CIME/tests/test_unit_nmlgen.py +R100 scripts/lib/CIME/tests/test_provenance.py scripts/lib/CIME/tests/test_unit_provenance.py +R100 scripts/lib/CIME/tests/test_test_status.py scripts/lib/CIME/tests/test_unit_test_status.py +R100 scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py scripts/lib/CIME/tests/test_unit_two_link_to_case2_output.py +R100 scripts/lib/CIME/tests/test_user_mod_support.py scripts/lib/CIME/tests/test_unit_user_mod_support.py +R100 scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py scripts/lib/CIME/tests/test_unit_user_nl_utils.py +R100 scripts/lib/CIME/tests/test_utils.py scripts/lib/CIME/tests/test_unit_utils.py +R100 scripts/lib/CIME/tests/test_xml_namelist_definition.py scripts/lib/CIME/tests/test_unit_xml_namelist_definition.py +M scripts/lib/CIME/tests/utils.py +M scripts/lib/CIME/utils.py +M scripts/tests/CMakeLists.txt +M scripts/tests/list_tests +D scripts/tests/scripts_regression_tests.py +A setup.cfg +M tools/cprnc/compare_vars_mod.F90.in +M tools/cprnc/run_tests +M tools/cprnc/test_inputs/README +A tools/cprnc/test_inputs/control_floatDoubleNan.nc +A tools/cprnc/test_inputs/diffs_in_nans.nc +M tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 11-1-21 +Tag: cime6.0.11 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Add IRT test to exceptions for generate history. + - Replace use of MODEL with COMP_NAME in cmake_macros directory. + - Improve Macro.make generation. + - Update centos7-linux settings. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +b763b20a9 Merge pull request #4110 from jedwards4b/fix_IRT_generate +48f8ad154 Merge pull request #4119 from jedwards4b/cmake_MODEL_TO_COMP_NAME +90b4e37b7 Merge pull request #4113 from ESMCI/jgfouca/improve_make_macro_generation +5868385ad update centos7-linux settings + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/cmake_macros/CNL.cmake +M config/cesm/machines/cmake_macros/cheyenne.cmake +M config/cesm/machines/cmake_macros/container.cmake +M config/cesm/machines/cmake_macros/cray.cmake +M config/cesm/machines/cmake_macros/frontera.cmake +M config/cesm/machines/cmake_macros/gnu_cheyenne.cmake +M config/cesm/machines/cmake_macros/gnu_modex.cmake +M config/cesm/machines/cmake_macros/hobart.cmake +M config/cesm/machines/cmake_macros/ibm.cmake +M config/cesm/machines/cmake_macros/ibm_AIX.cmake +M config/cesm/machines/cmake_macros/intel_aleph.cmake +M config/cesm/machines/cmake_macros/intel_athena.cmake +M config/cesm/machines/cmake_macros/intel_cori-haswell.cmake +M config/cesm/machines/cmake_macros/intel_cori-knl.cmake +M config/cesm/machines/cmake_macros/intel_edison.cmake +M config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake +M config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake +M config/cesm/machines/cmake_macros/intel_zeus.cmake +M config/cesm/machines/cmake_macros/izumi.cmake +M config/cesm/machines/cmake_macros/laramie.cmake +M config/cesm/machines/cmake_macros/nag.cmake +M config/cesm/machines/cmake_macros/nvhpc-gpu.cmake +M config/cesm/machines/cmake_macros/nvhpc.cmake +M config/cesm/machines/cmake_macros/pgi-gpu.cmake +M config/cesm/machines/cmake_macros/pgi.cmake +M config/cesm/machines/cmake_macros/universal.cmake +M config/cesm/machines/cmake_macros/zeus.cmake +M config/cesm/machines/config_machines.xml +M scripts/lib/CIME/SystemTests/irt.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/hist_utils.py +M scripts/tests/scripts_regression_tests.py + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-25-2021 +Tag: cime6.0.10 +Answer Changes: [None, Round Off, Climate Changing] +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Update to Cheyenne to use ESMF_8_2_0_beta_snapshot_23. + - Remove reference to Argonne from License file. + - Remove debug print statements. + - Port to nersc perlmutter and CPE on cheyenne. + - config_archive changes for running CISM with multiple ice sheets. + - wpc erio improvement. + - Hot fix Makefile for cesm. + - Fixes copying user cmake files. + - Remove MODEL as a setting for the build system, use COM_NAME instead. + - Hot fix for scripts regression tests. + - Merge branch 'master' of github.com:/ESMCI/cime. + - Update Externals + - Hot fix for nuopc mpi-serial tests. + - Remove dependency of cprnc on non-existent COMPARE_VARS target. + - Add cmake macros. + - cime_bisect: Improve robustness when using custom script. + - Update PGN and TSC test scripts for E3SM. + - Fix limited github workflow nuopc scripts regression testing. + - Hot fix, add another cam target that uses kokkos. + - Hot fix for better cmake-macro toggling. + - Change E3SM to use cmake macro file system. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +1a77ba628 Merge pull request #4114 from ESMCI/fischer/ESMF_update +3708b0e26 Merge pull request #4112 from ESMCI/fischer/license +e6fb20994 hot fix, remove debug print statements +adc6adc3f Merge pull request #4106 from jedwards4b/perlmutter_port +c8af27966 Merge pull request #4101 from billsacks/remove_cism_config_archive +7eeb2c8e6 Merge pull request #4109 from ESMCI/wpc_erio_improvement +a26c8e393 hot fix Makefile for cesm +fb5934c63 Merge pull request #4105 from jasonb5/fixes_user_cmake +4a6b60438 Merge pull request #4104 from ESMCI/jgfouca/remove_MODEL_from_bld +828fd3227 hot fix for scripts regression tests +79159c14e Merge branch 'master' of github.com:/ESMCI/cime +ea1597e79 update externals +d6956db5c hot fix for nuopc mpi-serial test +3d1be0ed6 Merge pull request #4098 from bartgol/bartgol/remove-compare-vars-dep +2b6088308 Merge pull request #4093 from jedwards4b/replace_config_compilers_with_cmake_macros +258737591 Merge pull request #4099 from ESMCI/jgfouca/imprv_cime_bisect +802206017 Merge pull request #4092 from ESMCI/wlin/atm_pgn_tsc_tests +c89e85d10 Merge pull request #4094 from ESMCI/fischer/nuopc_srt +6a2cc11fd hotfix, add another cam target that uses kokkos +fbb407916 Hotfix for better cmake-macro toggling +86a0db6f3 Merge pull request #4088 from ESMCI/jgfouca/e3sm_cmake_macros + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt_nuopc.yml +M CMakeLists.txt +M Externals.cfg +M LICENSE.TXT +M config/cesm/config_archive.xml +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +A config/cesm/machines/cmake_macros/CMakeLists.txt +A config/cesm/machines/cmake_macros/CNL.cmake +A config/cesm/machines/cmake_macros/Darwin.cmake +A config/cesm/machines/cmake_macros/Macros.cmake +A config/cesm/machines/cmake_macros/arm.cmake +A config/cesm/machines/cmake_macros/armgcc.cmake +A config/cesm/machines/cmake_macros/athena.cmake +A config/cesm/machines/cmake_macros/bluewaters.cmake +A config/cesm/machines/cmake_macros/casper.cmake +A config/cesm/machines/cmake_macros/centos7-linux.cmake +A config/cesm/machines/cmake_macros/cheyenne.cmake +A config/cesm/machines/cmake_macros/container.cmake +A config/cesm/machines/cmake_macros/cray.cmake +A config/cesm/machines/cmake_macros/cray_daint.cmake +A config/cesm/machines/cmake_macros/euler2.cmake +A config/cesm/machines/cmake_macros/euler3.cmake +A config/cesm/machines/cmake_macros/euler4.cmake +A config/cesm/machines/cmake_macros/frontera.cmake +A config/cesm/machines/cmake_macros/gnu.cmake +A config/cesm/machines/cmake_macros/gnu_cheyenne.cmake +A config/cesm/machines/cmake_macros/gnu_coeus.cmake +A config/cesm/machines/cmake_macros/gnu_hobart.cmake +A config/cesm/machines/cmake_macros/gnu_homebrew.cmake +A config/cesm/machines/cmake_macros/gnu_melvin.cmake +A config/cesm/machines/cmake_macros/gnu_modex.cmake +A config/cesm/machines/cmake_macros/hobart.cmake +A config/cesm/machines/cmake_macros/ibm.cmake +A config/cesm/machines/cmake_macros/ibm_AIX.cmake +A config/cesm/machines/cmake_macros/ibm_BGQ.cmake +A config/cesm/machines/cmake_macros/ibm_mira.cmake +A config/cesm/machines/cmake_macros/intel.cmake +A config/cesm/machines/cmake_macros/intel_Darwin.cmake +A config/cesm/machines/cmake_macros/intel_aleph.cmake +A config/cesm/machines/cmake_macros/intel_athena.cmake +A config/cesm/machines/cmake_macros/intel_bluewaters.cmake +A config/cesm/machines/cmake_macros/intel_casper.cmake +A config/cesm/machines/cmake_macros/intel_cheyenne.cmake +A config/cesm/machines/cmake_macros/intel_constance.cmake +A config/cesm/machines/cmake_macros/intel_cori-haswell.cmake +A config/cesm/machines/cmake_macros/intel_cori-knl.cmake +A config/cesm/machines/cmake_macros/intel_eastwind.cmake +A config/cesm/machines/cmake_macros/intel_edison.cmake +A config/cesm/machines/cmake_macros/intel_euler2.cmake +A config/cesm/machines/cmake_macros/intel_euler3.cmake +A config/cesm/machines/cmake_macros/intel_euler4.cmake +A config/cesm/machines/cmake_macros/intel_greenplanet-sib29.cmake +A config/cesm/machines/cmake_macros/intel_greenplanet-sky24.cmake +A config/cesm/machines/cmake_macros/intel_hobart.cmake +A config/cesm/machines/cmake_macros/intel_izumi.cmake +A config/cesm/machines/cmake_macros/intel_laramie.cmake +A config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake +A config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake +A config/cesm/machines/cmake_macros/intel_sandiatoss3.cmake +A config/cesm/machines/cmake_macros/intel_stampede2-knl.cmake +A config/cesm/machines/cmake_macros/intel_stampede2-skx.cmake +A config/cesm/machines/cmake_macros/intel_theia.cmake +A config/cesm/machines/cmake_macros/intel_zeus.cmake +A config/cesm/machines/cmake_macros/izumi.cmake +A config/cesm/machines/cmake_macros/laramie.cmake +A config/cesm/machines/cmake_macros/lonestar5.cmake +A config/cesm/machines/cmake_macros/nag.cmake +A config/cesm/machines/cmake_macros/nvhpc-gpu.cmake +A config/cesm/machines/cmake_macros/nvhpc-gpu_casper.cmake +A config/cesm/machines/cmake_macros/nvhpc.cmake +A config/cesm/machines/cmake_macros/nvhpc_casper.cmake +A config/cesm/machines/cmake_macros/pgi-gpu.cmake +A config/cesm/machines/cmake_macros/pgi-gpu_casper.cmake +A config/cesm/machines/cmake_macros/pgi.cmake +A config/cesm/machines/cmake_macros/pgi_bluewaters.cmake +A config/cesm/machines/cmake_macros/pgi_casper.cmake +A config/cesm/machines/cmake_macros/pgi_cheyenne.cmake +A config/cesm/machines/cmake_macros/pgi_constance.cmake +A config/cesm/machines/cmake_macros/pgi_daint.cmake +A config/cesm/machines/cmake_macros/pgi_eastwind.cmake +A config/cesm/machines/cmake_macros/pgi_euler2.cmake +A config/cesm/machines/cmake_macros/pgi_euler3.cmake +A config/cesm/machines/cmake_macros/pgi_euler4.cmake +A config/cesm/machines/cmake_macros/pgi_hobart.cmake +A config/cesm/machines/cmake_macros/pgi_izumi.cmake +A config/cesm/machines/cmake_macros/pgi_olympus.cmake +A config/cesm/machines/cmake_macros/pleiades-bro.cmake +A config/cesm/machines/cmake_macros/pleiades-has.cmake +A config/cesm/machines/cmake_macros/pleiades-ivy.cmake +A config/cesm/machines/cmake_macros/pleiades-san.cmake +A config/cesm/machines/cmake_macros/stampede2-knl.cmake +A config/cesm/machines/cmake_macros/stampede2-skx.cmake +A config/cesm/machines/cmake_macros/theta.cmake +A config/cesm/machines/cmake_macros/universal.cmake +A config/cesm/machines/cmake_macros/userdefined.cmake +A config/cesm/machines/cmake_macros/zeus.cmake +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/config_tests.xml +M config/e3sm/config_files.xml +M config/xml_schemas/config_archive.xsd +M doc/source/users_guide/cime-config.rst +M doc/source/users_guide/machine.rst +M doc/source/users_guide/porting-cime.rst +M doc/source/users_guide/troubleshooting.rst +M doc/source/users_guide/unit_testing.rst +M doc/source/xml_files/cesm.rst +M scripts/Tools/Makefile +M scripts/Tools/cime_bisect +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/SystemTests/erio.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/test_scheduler.py +A scripts/lib/CIME/tests/test_case_setup.py +M scripts/lib/get_tests.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.mpi-serial +M src/build_scripts/buildlib.pio +M tools/cprnc/CMakeLists.txt + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 9-09-2021 +Tag: cime6.0.9 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Fix scripts_regression_tests on mappy for e3sm. + - Update the test_schedualer to reflect default driver change. + - Adds a container machine definition. + - Bump pillow from 8.2.0 to 8.3.2 in /tools/statistical_ensemble_test/pyCECT/docs. + - Update modules and build flags on cori. + - Add a SourceMods/src.cdeps directory for shared cdeps sourcemods. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +4fa1194a2 Merge pull request #4085 from ESMCI/jgfouca/fix_scripts_reg +0eaecd9f0 Merge pull request #4082 from jedwards4b/update_for_nuopc_default +4981c6277 Merge pull request #4081 from jedwards4b/better_neon_data_handling +27df61824 Merge pull request #4084 from ESMCI/dependabot/pip/tools/statistical_ensemble_test/pyCECT/docs/pillow-8.3.2 +cd6c7eaf0 Merge pull request #4047 from jedwards4b/cori_update +4906230e4 Merge pull request #4079 from jedwards4b/add_cdeps_SourceMods + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M scripts/lib/CIME/XML/inputdata.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/code_checker.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/tests/test_case.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.mct +M tools/statistical_ensemble_test/pyCECT/docs/requirements.txt + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 9-02-2021 +Tag: cime6.0.8 +Answer Changes: None +Tests: +Dependencies: + +Brief Summary: + - Add ESMF modules for PGI on izumi. + - Allow test_reporter to post long list of test results to database. + - Set nuopc as the default. + - Remove moab. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +1c41537f7 Merge pull request #4080 from ESMCI/fischer/izumi_esmf +5ba4a6b12 Merge pull request #4075 from ESMCI/fischer/test_reporter_update +13d2a36eb Merge pull request #4078 from jedwards4b/change_cesm_default_driver_nuopc +4e9e4f409 Merge pull request #4037 from ESMCI/fischer/moab + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M .github/workflows/srt_nuopc.yml +M config/cesm/machines/config_machines.xml +M scripts/lib/CIME/XML/test_reporter.py +M scripts/lib/CIME/utils.py +D src/drivers/moab + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 8-25-2021 +Tag: cime6.0.7 +Answer Changes: None +Tests: ran the test testlist_cdeps.xml +Dependencies: + +Brief Summary: + - Changes to have new user_nl_stream_xxx for cdeps. + +User interface changes: + - New user_nl_xxx_streams file to change CDEPS stream files + + +PR summary: git log --oneline --first-parent [previous_tag]..master +173f5c35b Merge pull request #4074 from mvertens/feature/new_userstream_file + +Modified files: git diff --name-status [previous_tag] +M scripts/lib/CIME/case/case_setup.py + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 8-20-2021 +Tag: cime6.0.6 +Answer Changes: [None, Round Off, Climate Changing] +Tests: +Dependencies: + +Brief Summary: + - Make sure CLM_USERDAT_NAME is not UNSET if set. + - New mesh file and grid for running CISM over Antarctica. + - Handle neon data better by first checking the neon server. + - Fixes provenance in git worktree. + - Fix namelist duplications. + - Changed Addendum to Appendices. + - Allow multiple methods for running unit tests. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +2ceb39021 make sure CLM_USRDAT_NAME is not UNSET if set +99d111786 Merge pull request #4071 from billsacks/update_antarctica_grid +e54b73654 Merge pull request #4073 from jedwards4b/better_neon_data_handling +db69f21d4 Merge pull request #4060 from jasonb5/fix_git_worktree +e66984b24 Merge pull request #4066 from jasonb5/fix_namelist_duplications +48fc713a9 Merge pull request #4070 from Sukhamjot-Singh/patch-1 +322d52bc6 Merge pull request #4008 from billsacks/unit_tests_multiple_methods + + +Modified files: git diff --name-status [previous_tag] + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 8-6-2021 +Tag: cime6.0.5 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Fix an issue with file locks when not run from caseroot. + - Strip % from job_id. + - Update esmf version. + - Support the creation of multiple user_nl files for a component. + +User interface changes: + - Allows components to define a get_user_nl_list function in their + buildnml file, which supports the creation of multiple user_nl files for + the component + +PR summary: git log --oneline --first-parent [previous_tag]..master +949b0e798 (HEAD -> master, origin/master, origin/HEAD) Merge pull request #4065 from jedwards4b/fix_not_in_caseroot +a2b4a90d7 Merge pull request #4064 from jedwards4b/strip_job_id +9d731e9b3 update esmf version +e90399193 Merge pull request #4058 from billsacks/multiple_user_nls + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt_nuopc.yml +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/code_checker.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/tests/test_utils.py +M scripts/lib/CIME/utils.py + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 8-2-2021 +Tag: cime6.0.4 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Fix replay archive. + - Prepend the casename to mpas component restart file search in st_archive. + - Fix e3sm archiving. + - Check the size of read only xml files. + - Ensure Cmake is available for wait_for_tests. + - Fix the failure of ensemble consistency tests for PGI/NVHPC compiler on Casper. + - Fix type in centos7-linux definition. + - jenkins_generic_job: no reason for this magic config setting here. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +c6f489356 Merge pull request #4053 from jasonb5/fix_replay_archive +dbaa9b2d2 Merge pull request #4057 from ESMCI/jonbob/fix_mpas_archive_naming +1ee229961 Merge pull request #4055 from jasonb5/fix_e3sm_archiving +705640271 Merge pull request #4056 from jedwards4b/jedwards/checksize_of_xml +231c8e783 Merge pull request #4052 from ESMCI/jgfouca/fix_cmake_avail_for_wft +4961a210a Merge pull request #4048 from sjsprecious/fix_ect_failure +d72fc45f1 fix typo in centos7-linux definition +49342dca5 Merge pull request #4046 from ESMCI/jgfouca/remove_dumb_config + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/Depends.nvhpc-gpu +M config/cesm/machines/Depends.pgi-gpu +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M scripts/Tools/jenkins_generic_job +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/tests/test_case.py +M scripts/lib/CIME/tests/test_provenance.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/tests/scripts_regression_tests.py + + +====================================================================== + +====================================================================== + +Originator: Bill Sacks +Date: 7-15-2021 +Tag: cime6.0.3 +Answer Changes: [None, Round Off, Climate Changing] +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - WW3 support for E3SM + - Allow a test to have multiple testmods + - Update python version check to 3.6 and load python module on cheyenne + - Update ESMF libraries on cheyenne and izumi + +User interface changes: + - Allow multiple user mods directories in tests as well as in + create_newcase and create_clone. This is completely backwards + compatible. + +PR summary: git log --oneline --first-parent [previous_tag]..master +45a8622dc Merge pull request #4042 from jedwards4b/jedwards/testmods_path +4ac7bfb6b Merge pull request #4040 from ESMCI/fischer/esmf +0067c2dc3 Merge pull request #4038 from ESMCI/mkstratos/fix_clim_repro_bugs +5e2046fee update standalone externals +2709797d1 Merge pull request #4035 from billsacks/cheyenne_python_module +26e38f5e2 Merge pull request #4030 from ESMCI/fix_auto_r8 +5f055413e Merge pull request #4031 from billsacks/multiple_testmods +790be3994 Merge pull request #4029 from jasonb5/fix_external_mct +4312d8506 Merge pull request #4028 from jasonb5/add_missing_desp_definition +2f363cffd Merge pull request #4027 from ESMCI/jonbob/e3sm/support-add-ww3 +cdc40cba4 Merge pull request #4026 from jedwards4b/jedwards/centos7-update +6788449eb fix pylint issue + +Modified files: git diff --name-status [previous_tag] +M CMakeLists.txt +M ChangeLog +M Externals.cfg +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/e3sm/config_files.xml +M scripts/Tools/Makefile +M scripts/Tools/standard_script_setup.py +M scripts/create_clone +M scripts/create_newcase +M scripts/lib/CIME/SystemTests/mvk.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_clone.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/test_utils.py +M scripts/lib/CIME/tests/test_utils.py +M scripts/lib/CIME/utils.py +M scripts/lib/get_tests.py +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.mpi-serial +M tools/cprnc/CMakeLists.txt + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 6-29-2021 +Tag: cime6.0.2 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Update externals. + - Fix tests. + - Add esmf support for casper pgi. + - Fix testreporter issues with python3 + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +95644f2 update externals +52f6cca Merge pull request #4022 from jedwards4b/jedwards/fix_tests +cb316dd Merge pull request #4023 from ESMCI/fischer/casper +4246060 Merge pull request #4021 from ESMCI/fischer/testreporter_python3 + + +Modified files: git diff --name-status [previous_tag] +M CMakeLists.txt +M Externals.cfg +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M scripts/lib/CIME/XML/test_reporter.py +M src/build_scripts/buildlib.cprnc +D src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.pio + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 6-29-2021 +Tag: cime6.0.1 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Allow multiple copys of this variable. + - Updates from imp to importlib. + - Set default python to 'python3' for scripts tests CMake file. + - Add a new version of WW3 as an optional component. + - Multi gpus casper. + - cime smartsim. + - Refactor lilac build - only relevant to CESM. + - Adds default walltime for queues. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +ccdf7376e Merge pull request #4019 from jedwards4b/fix_max_tasks_per_node +792be93f1 Merge pull request #4015 from jasonb5/update_importlib +f919e5809 Merge pull request #4016 from ESMCI/jgfouca/fix_mappy_py_ctest +7989160f2 Merge pull request #3976 from ESMCI/feature/add_ww3 +1ca7db61f merge multi_gpus_casper +915ab904c Merge branch 'cime_smartsim' +b42da1b94 Merge pull request #4014 from ESMCI/feature/refactor_lilac_build +523e9afc5 Merge pull request #4011 from jasonb5/add_default_walltime + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/config_inputdata.xml +M config/cesm/machines/Depends.nvhpc-gpu +M config/cesm/machines/Depends.pgi-gpu +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_workflow.xml +A config/cesm/machines/mpi_run_gpu.casper +M config/xml_schemas/config_batch.xsd +M config/xml_schemas/config_inputdata.xsd +M config/xml_schemas/config_machines.xsd +M scripts/Tools/Makefile +M scripts/Tools/xmlconvertors/config_pes_converter.py +M scripts/Tools/xmlquery +M scripts/create_newcase +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/XML/entry_id.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_workflow.py +M scripts/lib/CIME/XML/inputdata.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/XML/namelist_definition.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/namelist.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/tests/test_case.py +M scripts/lib/CIME/tests/test_utils.py +M scripts/lib/CIME/tests/test_xml_env_batch.py +M scripts/lib/CIME/utils.py +M scripts/query_config +M scripts/tests/CMakeLists.txt +M scripts/tests/scripts_regression_tests.py + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 6-23-2021 +Tag: cime6.0.0 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Remove files that were moved out in #3990 + - Remove all python2 invocations, except some stuff form tools. + - Seperation of fortran code to new repositories. + - Change cdash upload settings. + - Fix walltime check. + - Record preview_run provenance. + - Fixes error in parsing and comparing namelist values. + - Bump pillow from 8.1.1 to 8.2.0 in /tools/statistical_ensemble_test/pyCECT/docs + - Update the esmf library module all compilers, update gnu compiler to 10.1.0 on cheyenne. + - Fix unittest raising error on py37. + - Allow multiple grids for a component. + - Update climate reproducibility tests. + - Adds --chksum argument to case.submit and create_test. + - Fixes new_hash method. + - Adds CASE_HASH to env_case. + - Modify ERI test for EAM. + - Switch location of E3SM share buildlib scripts. + - Updates for pleiades systems. + - Add feature to capture CIME commands in a script. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +866d4f507 Merge pull request #4010 from ESMCI/fischer/rm_moved_share +19c689b3a Merge pull request #4009 from ESMCI/jgfouca/more_python_churn +ca5bcf0a8 Seperation of fortran code to new repositories. (#3990) +6b7a8e201 Merge pull request #4006 from ESMCI/jgfouca/change_cdash_drop_settings +8d916c1a8 Merge pull request #4005 from jasonb5/fix_walltime_check +71ddf1ced Merge pull request #3998 from jasonb5/fix_record_run_details +4d5fdbab2 Merge pull request #3994 from jasonb5/fix_namelist_cmp +bc9733021 Merge pull request #3991 from ESMCI/dependabot/pip/tools/statistical_ensemble_test/pyCECT/docs/pillow-8.2.0 +b5301eb2f Merge pull request #3985 from jedwards4b/fix_make_bld_flags +8a3320cb6 Merge pull request #3986 from jasonb5/fix_unittest_py37 +2ae0ec9e3 Merge pull request #3982 from billsacks/glc_multigrids +e4f7ac800 Merge pull request #3967 from ESMCI/mkstratos/update_repro_tests +8ec59c02d Merge pull request #3977 from jasonb5/add_chksum_argument +7653760bc Merge pull request #3975 from jasonb5/fix_new_hash +d224588ef Merge pull request #3968 from jasonb5/add_case_hash +ad68a5d35 Merge pull request #3963 from ESMCI/rljacob/fix-eri4eam +9e5b743ef Merge pull request #3959 from ESMCI/rljacob/e3sm-mvshare2 +353730e0f Merge pull request #3946 from fvitt/pleiades_updates +3591f0afe Merge pull request #3958 from jasonb5/add_script_capture + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +A .github/workflows/srt_nuopc.yml +M .gitignore +M CMakeLists.txt +A Externals.cfg +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/template.case.run +M config/cesm/machines/template.case.test +M config/cesm/machines/template.st_archive +M config/e3sm/config_files.xml +M config/ufs/machines/template.case.run +M config/ufs/machines/template.case.test +M config/ufs/machines/template.st_archive +M doc/source/users_guide/running-a-case.rst +M doc/tools_autodoc.py +M scripts/Tools/Makefile +M scripts/Tools/bld_diff +M scripts/Tools/case.build +M scripts/Tools/case.setup +M scripts/Tools/case.submit +M scripts/Tools/case_diff +M scripts/Tools/check_case +M scripts/Tools/pelayout +M scripts/Tools/preview_namelists +M scripts/Tools/preview_run +M scripts/Tools/xmlchange +M scripts/Tools/xmlconvertors/config_pes_converter.py +M scripts/Tools/xmlconvertors/convert-grid-v1-to-v2 +M scripts/Tools/xmlconvertors/grid_xml_converter.py +M scripts/create_newcase +M scripts/create_test +M scripts/fortran_unit_testing/python/test_xml_test_list.py +M scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/SystemTests/eri.py +M scripts/lib/CIME/SystemTests/mvk.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/code_checker.py +M scripts/lib/CIME/compare_namelists.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py +M scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py +M scripts/lib/CIME/tests/XML/test_expected_fails_file.py +A scripts/lib/CIME/tests/XML/test_grids.py +A scripts/lib/CIME/tests/test_case.py +M scripts/lib/CIME/tests/test_case_fake.py +M scripts/lib/CIME/tests/test_compare_test_results.py +M scripts/lib/CIME/tests/test_cs_status.py +M scripts/lib/CIME/tests/test_custom_assertions_test_status.py +M scripts/lib/CIME/tests/test_provenance.py +A scripts/lib/CIME/tests/test_test_scheduler.py +M scripts/lib/CIME/tests/test_test_status.py +M scripts/lib/CIME/tests/test_user_mod_support.py +M scripts/lib/CIME/tests/test_utils.py +A scripts/lib/CIME/tests/test_xml_env_batch.py +A scripts/lib/CIME/tests/utils.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/tests/CTestConfig.cmake +M scripts/tests/scripts_regression_tests.py +M src/CMake/CIME_utils.cmake +M src/build_scripts/buildlib.cprnc +M src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.internal_components +M src/build_scripts/buildlib.kokkos +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.mpi-serial +M src/build_scripts/buildlib.pio +M src/build_scripts/buildlib_cmake.internal_components +D src/components/data_comps_mct/datm/cime_config/buildlib +D src/components/data_comps_mct/datm/cime_config/buildlib_cmake +D src/components/data_comps_mct/datm/cime_config/buildnml +D src/components/data_comps_mct/datm/cime_config/config_archive.xml +D src/components/data_comps_mct/datm/cime_config/config_component.xml +D src/components/data_comps_mct/datm/cime_config/namelist_definition_datm.xml +D src/components/data_comps_mct/datm/cime_config/user_nl_datm +D src/components/data_comps_mct/datm/src/atm_comp_mct.F90 +D src/components/data_comps_mct/datm/src/datm_comp_mod.F90 +D src/components/data_comps_mct/datm/src/datm_shr_mod.F90 +D src/components/data_comps_mct/desp/cime_config/buildlib +D src/components/data_comps_mct/desp/cime_config/buildlib_cmake +D src/components/data_comps_mct/desp/cime_config/buildnml +D src/components/data_comps_mct/desp/cime_config/config_component.xml +D src/components/data_comps_mct/desp/cime_config/namelist_definition_desp.xml +D src/components/data_comps_mct/desp/cime_config/user_nl_desp +D src/components/data_comps_mct/desp/desp_comp_mod.F90 +D src/components/data_comps_mct/desp/esp_utils.F90 +D src/components/data_comps_mct/desp/src/esp_comp_mct.F90 +D src/components/data_comps_mct/dice/cime_config/buildlib +D src/components/data_comps_mct/dice/cime_config/buildlib_cmake +D src/components/data_comps_mct/dice/cime_config/buildnml +D src/components/data_comps_mct/dice/cime_config/config_archive.xml +D src/components/data_comps_mct/dice/cime_config/config_component.xml +D src/components/data_comps_mct/dice/cime_config/namelist_definition_dice.xml +D src/components/data_comps_mct/dice/cime_config/user_nl_dice +D src/components/data_comps_mct/dice/src/dice_comp_mod.F90 +D src/components/data_comps_mct/dice/src/dice_flux_atmice_mod.F90 +D src/components/data_comps_mct/dice/src/dice_shr_mod.F90 +D src/components/data_comps_mct/dice/src/ice_comp_mct.F90 +D src/components/data_comps_mct/dlnd/cime_config/buildlib +D src/components/data_comps_mct/dlnd/cime_config/buildlib_cmake +D src/components/data_comps_mct/dlnd/cime_config/buildnml +D src/components/data_comps_mct/dlnd/cime_config/config_archive.xml +D src/components/data_comps_mct/dlnd/cime_config/config_component.xml +D src/components/data_comps_mct/dlnd/cime_config/namelist_definition_dlnd.xml +D src/components/data_comps_mct/dlnd/cime_config/user_nl_dlnd +D src/components/data_comps_mct/dlnd/src/dlnd_comp_mod.F90 +D src/components/data_comps_mct/dlnd/src/dlnd_shr_mod.F90 +D src/components/data_comps_mct/dlnd/src/lnd_comp_mct.F90 +D src/components/data_comps_mct/docn/cime_config/buildlib +D src/components/data_comps_mct/docn/cime_config/buildlib_cmake +D src/components/data_comps_mct/docn/cime_config/buildnml +D src/components/data_comps_mct/docn/cime_config/config_archive.xml +D src/components/data_comps_mct/docn/cime_config/config_component.xml +D src/components/data_comps_mct/docn/cime_config/namelist_definition_docn.xml +D src/components/data_comps_mct/docn/cime_config/user_nl_docn +D src/components/data_comps_mct/docn/src/docn_comp_mod.F90 +D src/components/data_comps_mct/docn/src/docn_shr_mod.F90 +D src/components/data_comps_mct/docn/src/ocn_comp_mct.F90 +D src/components/data_comps_mct/docn/tools/pop_som_frc/README +D src/components/data_comps_mct/docn/tools/pop_som_frc/SOM.doc +D src/components/data_comps_mct/docn/tools/pop_som_frc/SOM.pdf +D src/components/data_comps_mct/docn/tools/pop_som_frc/pop_frc.csh +D src/components/data_comps_mct/docn/tools/pop_som_frc/pop_frc_mlann.ncl +D src/components/data_comps_mct/docn/tools/pop_som_frc/pop_frc_mlt.ncl +D src/components/data_comps_mct/docn/tools/pop_som_frc/pop_interp.ncl +D src/components/data_comps_mct/docn/tools/pop_som_frc/read_from_mss.csh +D src/components/data_comps_mct/docn/tools/pop_som_frc/read_from_mss_month.csh +D src/components/data_comps_mct/drof/cime_config/buildlib +D src/components/data_comps_mct/drof/cime_config/buildlib_cmake +D src/components/data_comps_mct/drof/cime_config/buildnml +D src/components/data_comps_mct/drof/cime_config/config_archive.xml +D src/components/data_comps_mct/drof/cime_config/config_component.xml +D src/components/data_comps_mct/drof/cime_config/namelist_definition_drof.xml +D src/components/data_comps_mct/drof/cime_config/user_nl_drof +D src/components/data_comps_mct/drof/src/drof_comp_mod.F90 +D src/components/data_comps_mct/drof/src/drof_shr_mod.F90 +D src/components/data_comps_mct/drof/src/rof_comp_mct.F90 +D src/components/data_comps_mct/dwav/README +D src/components/data_comps_mct/dwav/cime_config/buildlib +D src/components/data_comps_mct/dwav/cime_config/buildlib_cmake +D src/components/data_comps_mct/dwav/cime_config/buildnml +D src/components/data_comps_mct/dwav/cime_config/config_archive.xml +D src/components/data_comps_mct/dwav/cime_config/config_component.xml +D src/components/data_comps_mct/dwav/cime_config/namelist_definition_dwav.xml +D src/components/data_comps_mct/dwav/cime_config/user_nl_dwav +D src/components/data_comps_mct/dwav/src/dwav_comp_mod.F90 +D src/components/data_comps_mct/dwav/src/dwav_shr_mod.F90 +D src/components/data_comps_mct/dwav/src/wav_comp_mct.F90 +D src/components/stub_comps_mct/satm/cime_config/buildlib +D src/components/stub_comps_mct/satm/cime_config/buildlib_cmake +D src/components/stub_comps_mct/satm/cime_config/buildnml +D src/components/stub_comps_mct/satm/cime_config/config_component.xml +D src/components/stub_comps_mct/satm/src/atm_comp_mct.F90 +D src/components/stub_comps_mct/sesp/cime_config/buildlib +D src/components/stub_comps_mct/sesp/cime_config/buildlib_cmake +D src/components/stub_comps_mct/sesp/cime_config/buildnml +D src/components/stub_comps_mct/sesp/cime_config/config_component.xml +D src/components/stub_comps_mct/sesp/src/esp_comp_mct.F90 +D src/components/stub_comps_mct/sglc/cime_config/buildlib +D src/components/stub_comps_mct/sglc/cime_config/buildlib_cmake +D src/components/stub_comps_mct/sglc/cime_config/buildnml +D src/components/stub_comps_mct/sglc/cime_config/config_component.xml +D src/components/stub_comps_mct/sglc/src/glc_comp_mct.F90 +D src/components/stub_comps_mct/siac/cime_config/buildlib +D src/components/stub_comps_mct/siac/cime_config/buildlib_cmake +D src/components/stub_comps_mct/siac/cime_config/buildnml +D src/components/stub_comps_mct/siac/cime_config/config_component.xml +D src/components/stub_comps_mct/siac/src/iac_comp_mct.F90 +D src/components/stub_comps_mct/sice/cime_config/buildlib +D src/components/stub_comps_mct/sice/cime_config/buildlib_cmake +D src/components/stub_comps_mct/sice/cime_config/buildnml +D src/components/stub_comps_mct/sice/cime_config/config_component.xml +D src/components/stub_comps_mct/sice/src/ice_comp_mct.F90 +D src/components/stub_comps_mct/slnd/cime_config/buildlib +D src/components/stub_comps_mct/slnd/cime_config/buildlib_cmake +D src/components/stub_comps_mct/slnd/cime_config/buildnml +D src/components/stub_comps_mct/slnd/cime_config/config_component.xml +D src/components/stub_comps_mct/slnd/src/lnd_comp_mct.F90 +D src/components/stub_comps_mct/socn/cime_config/buildlib +D src/components/stub_comps_mct/socn/cime_config/buildlib_cmake +D src/components/stub_comps_mct/socn/cime_config/buildnml +D src/components/stub_comps_mct/socn/cime_config/config_component.xml +D src/components/stub_comps_mct/socn/src/ocn_comp_mct.F90 +D src/components/stub_comps_mct/srof/cime_config/buildlib +D src/components/stub_comps_mct/srof/cime_config/buildlib_cmake +D src/components/stub_comps_mct/srof/cime_config/buildnml +D src/components/stub_comps_mct/srof/cime_config/config_component.xml +D src/components/stub_comps_mct/srof/src/rof_comp_mct.F90 +D src/components/stub_comps_mct/swav/cime_config/buildlib +D src/components/stub_comps_mct/swav/cime_config/buildlib_cmake +D src/components/stub_comps_mct/swav/cime_config/buildnml +D src/components/stub_comps_mct/swav/cime_config/config_component.xml +D src/components/stub_comps_mct/swav/src/wav_comp_mct.F90 +M src/components/stub_comps_nuopc/satm/cime_config/buildnml +M src/components/stub_comps_nuopc/sesp/cime_config/buildnml +M src/components/stub_comps_nuopc/sglc/cime_config/buildnml +M src/components/stub_comps_nuopc/siac/cime_config/buildnml +M src/components/stub_comps_nuopc/sice/cime_config/buildnml +M src/components/stub_comps_nuopc/slnd/cime_config/buildnml +M src/components/stub_comps_nuopc/socn/cime_config/buildnml +M src/components/stub_comps_nuopc/srof/cime_config/buildnml +M src/components/stub_comps_nuopc/swav/cime_config/buildnml +D src/components/xcpl_comps_mct/xatm/cime_config/buildlib +D src/components/xcpl_comps_mct/xatm/cime_config/buildlib_cmake +D src/components/xcpl_comps_mct/xatm/cime_config/buildnml +D src/components/xcpl_comps_mct/xatm/cime_config/config_component.xml +D src/components/xcpl_comps_mct/xatm/src/atm_comp_mct.F90 +D src/components/xcpl_comps_mct/xglc/cime_config/buildlib +D src/components/xcpl_comps_mct/xglc/cime_config/buildlib_cmake +D src/components/xcpl_comps_mct/xglc/cime_config/buildnml +D src/components/xcpl_comps_mct/xglc/cime_config/config_component.xml +D src/components/xcpl_comps_mct/xglc/src/glc_comp_mct.F90 +D src/components/xcpl_comps_mct/xice/cime_config/buildlib +D src/components/xcpl_comps_mct/xice/cime_config/buildlib_cmake +D src/components/xcpl_comps_mct/xice/cime_config/buildnml +D src/components/xcpl_comps_mct/xice/cime_config/config_component.xml +D src/components/xcpl_comps_mct/xice/src/ice_comp_mct.F90 +D src/components/xcpl_comps_mct/xlnd/cime_config/buildlib +D src/components/xcpl_comps_mct/xlnd/cime_config/buildlib_cmake +D src/components/xcpl_comps_mct/xlnd/cime_config/buildnml +D src/components/xcpl_comps_mct/xlnd/cime_config/config_component.xml +D src/components/xcpl_comps_mct/xlnd/src/lnd_comp_mct.F90 +D src/components/xcpl_comps_mct/xocn/cime_config/buildlib +D src/components/xcpl_comps_mct/xocn/cime_config/buildlib_cmake +D src/components/xcpl_comps_mct/xocn/cime_config/buildnml +D src/components/xcpl_comps_mct/xocn/cime_config/config_component.xml +D src/components/xcpl_comps_mct/xocn/src/ocn_comp_mct.F90 +D src/components/xcpl_comps_mct/xrof/cime_config/buildlib +D src/components/xcpl_comps_mct/xrof/cime_config/buildlib_cmake +D src/components/xcpl_comps_mct/xrof/cime_config/buildnml +D src/components/xcpl_comps_mct/xrof/cime_config/config_component.xml +D src/components/xcpl_comps_mct/xrof/src/rof_comp_mct.F90 +D src/components/xcpl_comps_mct/xshare/dead_data_mod.F90 +D src/components/xcpl_comps_mct/xshare/dead_mct_mod.F90 +D src/components/xcpl_comps_mct/xshare/dead_mod.F90 +D src/components/xcpl_comps_mct/xwav/cime_config/buildlib +D src/components/xcpl_comps_mct/xwav/cime_config/buildlib_cmake +D src/components/xcpl_comps_mct/xwav/cime_config/buildnml +D src/components/xcpl_comps_mct/xwav/cime_config/config_component.xml +D src/components/xcpl_comps_mct/xwav/src/wav_comp_mct.F90 +M src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xice/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml +D src/drivers/mct/cime_config/buildexe +D src/drivers/mct/cime_config/buildlib_cmake +D src/drivers/mct/cime_config/buildnml +D src/drivers/mct/cime_config/config_archive.xml +D src/drivers/mct/cime_config/config_component.xml +D src/drivers/mct/cime_config/config_component_cesm.xml +D src/drivers/mct/cime_config/config_component_e3sm.xml +D src/drivers/mct/cime_config/config_compsets.xml +D src/drivers/mct/cime_config/config_pes.xml +D src/drivers/mct/cime_config/namelist_definition_drv.xml +D src/drivers/mct/cime_config/namelist_definition_drv_flds.xml +D src/drivers/mct/cime_config/namelist_definition_modelio.xml +D src/drivers/mct/cime_config/testdefs/testlist_drv.xml +D src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/5steps/shell_commands +D src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/default/shell_commands +D src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/som/shell_commands +D src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/y100k/README +D src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/y100k/shell_commands +D src/drivers/mct/cime_config/user_nl_cpl +D src/drivers/mct/main/CMakeLists.txt +D src/drivers/mct/main/cime_comp_mod.F90 +D src/drivers/mct/main/cime_driver.F90 +D src/drivers/mct/main/component_mod.F90 +D src/drivers/mct/main/component_type_mod.F90 +D src/drivers/mct/main/cplcomp_exchange_mod.F90 +D src/drivers/mct/main/map_glc2lnd_mod.F90 +D src/drivers/mct/main/map_lnd2glc_mod.F90 +D src/drivers/mct/main/map_lnd2rof_irrig_mod.F90 +D src/drivers/mct/main/mrg_mod.F90 +D src/drivers/mct/main/prep_aoflux_mod.F90 +D src/drivers/mct/main/prep_atm_mod.F90 +D src/drivers/mct/main/prep_glc_mod.F90 +D src/drivers/mct/main/prep_iac_mod.F90 +D src/drivers/mct/main/prep_ice_mod.F90 +D src/drivers/mct/main/prep_lnd_mod.F90 +D src/drivers/mct/main/prep_ocn_mod.F90 +D src/drivers/mct/main/prep_rof_mod.F90 +D src/drivers/mct/main/prep_wav_mod.F90 +D src/drivers/mct/main/seq_diag_mct.F90 +D src/drivers/mct/main/seq_domain_mct.F90 +D src/drivers/mct/main/seq_flux_mct.F90 +D src/drivers/mct/main/seq_frac_mct.F90 +D src/drivers/mct/main/seq_hist_mod.F90 +D src/drivers/mct/main/seq_io_mod.F90 +D src/drivers/mct/main/seq_map_mod.F90 +D src/drivers/mct/main/seq_map_type_mod.F90 +D src/drivers/mct/main/seq_rest_mod.F90 +D src/drivers/mct/main/t_driver_timers_mod.F90 +D src/drivers/mct/shr/CMakeLists.txt +D src/drivers/mct/shr/glc_elevclass_mod.F90 +D src/drivers/mct/shr/seq_cdata_mod.F90 +D src/drivers/mct/shr/seq_comm_mct.F90 +D src/drivers/mct/shr/seq_drydep_mod.F90 +D src/drivers/mct/shr/seq_flds_mod.F90 +D src/drivers/mct/shr/seq_infodata_mod.F90 +D src/drivers/mct/shr/seq_io_read_mod.F90 +D src/drivers/mct/shr/seq_pauseresume_mod.F90 +D src/drivers/mct/shr/seq_timemgr_mod.F90 +D src/drivers/mct/shr/shr_carma_mod.F90 +D src/drivers/mct/shr/shr_expr_parser_mod.F90 +D src/drivers/mct/shr/shr_fire_emis_mod.F90 +D src/drivers/mct/shr/shr_megan_mod.F90 +D src/drivers/mct/shr/shr_ndep_mod.F90 +D src/drivers/mct/unit_test/CMakeLists.txt +D src/drivers/mct/unit_test/avect_wrapper_test/CMakeLists.txt +D src/drivers/mct/unit_test/avect_wrapper_test/test_avect_wrapper.pf +D src/drivers/mct/unit_test/check_fields_test/CMakeLists.txt +D src/drivers/mct/unit_test/check_fields_test/test_check_fields.pf +D src/drivers/mct/unit_test/glc_elevclass_test/CMakeLists.txt +D src/drivers/mct/unit_test/glc_elevclass_test/test_glc_elevclass.pf +D src/drivers/mct/unit_test/map_glc2lnd_test/CMakeLists.txt +D src/drivers/mct/unit_test/map_glc2lnd_test/test_map_glc2lnd.pf +D src/drivers/mct/unit_test/map_lnd2rof_irrig_test/CMakeLists.txt +D src/drivers/mct/unit_test/map_lnd2rof_irrig_test/test_map_lnd2rof_irrig.pf +D src/drivers/mct/unit_test/seq_map_test/CMakeLists.txt +D src/drivers/mct/unit_test/seq_map_test/test_seq_map.pf +D src/drivers/mct/unit_test/stubs/CMakeLists.txt +D src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 +D src/drivers/mct/unit_test/utils/CMakeLists.txt +D src/drivers/mct/unit_test/utils/avect_wrapper_mod.F90 +D src/drivers/mct/unit_test/utils/create_mapper_mod.F90 +D src/drivers/mct/unit_test/utils/mct_wrapper_mod.F90 +D src/drivers/mct/unit_test/utils/simple_map_mod.F90 +M src/drivers/moab/cime_config/buildexe +D src/externals/mct/.gitignore +D src/externals/mct/COPYRIGHT +D src/externals/mct/Makefile +D src/externals/mct/Makefile.conf.in +D src/externals/mct/README +D src/externals/mct/aclocal.m4 +D src/externals/mct/benchmarks/.gitignore +D src/externals/mct/benchmarks/Makefile +D src/externals/mct/benchmarks/RouterTestDis.F90 +D src/externals/mct/benchmarks/RouterTestOvr.F90 +D src/externals/mct/benchmarks/T42.8pC +D src/externals/mct/benchmarks/T42.8pR +D src/externals/mct/benchmarks/gx1.8pC +D src/externals/mct/benchmarks/gx1.8pR +D src/externals/mct/benchmarks/importBench.F90 +D src/externals/mct/config.h.in +D src/externals/mct/configure +D src/externals/mct/configure.ac +D src/externals/mct/doc/.gitignore +D src/externals/mct/doc/Makefile +D src/externals/mct/doc/README +D src/externals/mct/doc/coupler.bib +D src/externals/mct/doc/mct_APIs.tex +D src/externals/mct/doc/texsrc/.gitignore +D src/externals/mct/doc/texsrc/Makefile +D src/externals/mct/doc/texsrc/SRCS_tex.mk +D src/externals/mct/examples/Makefile +D src/externals/mct/examples/README +D src/externals/mct/examples/climate_concur1/.gitignore +D src/externals/mct/examples/climate_concur1/Makefile +D src/externals/mct/examples/climate_concur1/README +D src/externals/mct/examples/climate_concur1/coupler.F90 +D src/externals/mct/examples/climate_concur1/master.F90 +D src/externals/mct/examples/climate_concur1/model.F90 +D src/externals/mct/examples/climate_sequen1/.gitignore +D src/externals/mct/examples/climate_sequen1/Makefile +D src/externals/mct/examples/climate_sequen1/README +D src/externals/mct/examples/climate_sequen1/TS1.dat +D src/externals/mct/examples/climate_sequen1/coupler.F90 +D src/externals/mct/examples/climate_sequen1/dst.rc +D src/externals/mct/examples/climate_sequen1/dstmodel.F90 +D src/externals/mct/examples/climate_sequen1/master.F90 +D src/externals/mct/examples/climate_sequen1/mutils.F90 +D src/externals/mct/examples/climate_sequen1/src.rc +D src/externals/mct/examples/climate_sequen1/srcmodel.F90 +D src/externals/mct/examples/simple/.gitignore +D src/externals/mct/examples/simple/Makefile +D src/externals/mct/examples/simple/README +D src/externals/mct/examples/simple/script.babyblue +D src/externals/mct/examples/simple/twocmp.con.F90 +D src/externals/mct/examples/simple/twocmp.seq.F90 +D src/externals/mct/examples/simple/twocmp.seqNB.F90 +D src/externals/mct/examples/simple/twocmp.seqUnvn.F90 +D src/externals/mct/install-sh +D src/externals/mct/m4/README +D src/externals/mct/m4/acx_mpi.m4 +D src/externals/mct/m4/ax_fc_version.m4 +D src/externals/mct/m4/fortran.m4 +D src/externals/mct/mct/Makefile +D src/externals/mct/mct/README +D src/externals/mct/mct/m_Accumulator.F90 +D src/externals/mct/mct/m_AccumulatorComms.F90 +D src/externals/mct/mct/m_AttrVect.F90 +D src/externals/mct/mct/m_AttrVectComms.F90 +D src/externals/mct/mct/m_AttrVectReduce.F90 +D src/externals/mct/mct/m_ConvertMaps.F90 +D src/externals/mct/mct/m_ExchangeMaps.F90 +D src/externals/mct/mct/m_GeneralGrid.F90 +D src/externals/mct/mct/m_GeneralGridComms.F90 +D src/externals/mct/mct/m_GlobalMap.F90 +D src/externals/mct/mct/m_GlobalSegMap.F90 +D src/externals/mct/mct/m_GlobalSegMapComms.F90 +D src/externals/mct/mct/m_GlobalToLocal.F90 +D src/externals/mct/mct/m_MCTWorld.F90 +D src/externals/mct/mct/m_MatAttrVectMul.F90 +D src/externals/mct/mct/m_Merge.F90 +D src/externals/mct/mct/m_Navigator.F90 +D src/externals/mct/mct/m_Rearranger.F90 +D src/externals/mct/mct/m_Router.F90 +D src/externals/mct/mct/m_SPMDutils.F90 +D src/externals/mct/mct/m_SparseMatrix.F90 +D src/externals/mct/mct/m_SparseMatrixComms.F90 +D src/externals/mct/mct/m_SparseMatrixDecomp.F90 +D src/externals/mct/mct/m_SparseMatrixPlus.F90 +D src/externals/mct/mct/m_SparseMatrixToMaps.F90 +D src/externals/mct/mct/m_SpatialIntegral.F90 +D src/externals/mct/mct/m_SpatialIntegralV.F90 +D src/externals/mct/mct/m_Transfer.F90 +D src/externals/mct/mkinstalldirs +D src/externals/mct/mpeu/Makefile +D src/externals/mct/mpeu/README +D src/externals/mct/mpeu/assertmpeu.H +D src/externals/mct/mpeu/get_zeits.c +D src/externals/mct/mpeu/m_FcComms.F90 +D src/externals/mct/mpeu/m_FileResolv.F90 +D src/externals/mct/mpeu/m_Filename.F90 +D src/externals/mct/mpeu/m_IndexBin_char.F90 +D src/externals/mct/mpeu/m_IndexBin_integer.F90 +D src/externals/mct/mpeu/m_IndexBin_logical.F90 +D src/externals/mct/mpeu/m_List.F90 +D src/externals/mct/mpeu/m_MergeSorts.F90 +D src/externals/mct/mpeu/m_Permuter.F90 +D src/externals/mct/mpeu/m_SortingTools.F90 +D src/externals/mct/mpeu/m_StrTemplate.F90 +D src/externals/mct/mpeu/m_String.F90 +D src/externals/mct/mpeu/m_StringLinkedList.F90 +D src/externals/mct/mpeu/m_TraceBack.F90 +D src/externals/mct/mpeu/m_chars.F90 +D src/externals/mct/mpeu/m_die.F90 +D src/externals/mct/mpeu/m_dropdead.F90 +D src/externals/mct/mpeu/m_flow.F90 +D src/externals/mct/mpeu/m_inpak90.F90 +D src/externals/mct/mpeu/m_ioutil.F90 +D src/externals/mct/mpeu/m_mall.F90 +D src/externals/mct/mpeu/m_mpif.F90 +D src/externals/mct/mpeu/m_mpif90.F90 +D src/externals/mct/mpeu/m_mpout.F90 +D src/externals/mct/mpeu/m_rankMerge.F90 +D src/externals/mct/mpeu/m_realkinds.F90 +D src/externals/mct/mpeu/m_stdio.F90 +D src/externals/mct/mpeu/m_zeit.F90 +D src/externals/mct/mpi-serial/.gitignore +D src/externals/mct/mpi-serial/Makefile +D src/externals/mct/mpi-serial/Makefile.conf.in +D src/externals/mct/mpi-serial/README +D src/externals/mct/mpi-serial/aclocal.m4 +D src/externals/mct/mpi-serial/cart.c +D src/externals/mct/mpi-serial/collective.c +D src/externals/mct/mpi-serial/comm.c +D src/externals/mct/mpi-serial/config.h.in +D src/externals/mct/mpi-serial/configure +D src/externals/mct/mpi-serial/configure.in +D src/externals/mct/mpi-serial/copy.c +D src/externals/mct/mpi-serial/error.c +D src/externals/mct/mpi-serial/fort.F90 +D src/externals/mct/mpi-serial/getcount.c +D src/externals/mct/mpi-serial/group.c +D src/externals/mct/mpi-serial/handles.c +D src/externals/mct/mpi-serial/ic_merge.c +D src/externals/mct/mpi-serial/info.c +D src/externals/mct/mpi-serial/list.c +D src/externals/mct/mpi-serial/list.h +D src/externals/mct/mpi-serial/listP.h +D src/externals/mct/mpi-serial/listops.h +D src/externals/mct/mpi-serial/m4/README +D src/externals/mct/mpi-serial/m4/ax_fc_version.m4 +D src/externals/mct/mpi-serial/mpi.c +D src/externals/mct/mpi-serial/mpi.h +D src/externals/mct/mpi-serial/mpiP.h +D src/externals/mct/mpi-serial/mpif.F90 +D src/externals/mct/mpi-serial/mpif.h +D src/externals/mct/mpi-serial/op.c +D src/externals/mct/mpi-serial/pack.c +D src/externals/mct/mpi-serial/probe.c +D src/externals/mct/mpi-serial/protify.awk +D src/externals/mct/mpi-serial/recv.c +D src/externals/mct/mpi-serial/req.c +D src/externals/mct/mpi-serial/send.c +D src/externals/mct/mpi-serial/tests/.gitignore +D src/externals/mct/mpi-serial/tests/Makefile +D src/externals/mct/mpi-serial/tests/ctest.c +D src/externals/mct/mpi-serial/tests/ctest_old.c +D src/externals/mct/mpi-serial/tests/ftest.F90 +D src/externals/mct/mpi-serial/tests/ftest_internal.F90 +D src/externals/mct/mpi-serial/tests/ftest_old.F90 +D src/externals/mct/mpi-serial/time.c +D src/externals/mct/mpi-serial/type.c +D src/externals/mct/mpi-serial/type.h +D src/externals/mct/mpi-serial/type_const.c +D src/externals/mct/protex/protex +D src/externals/mct/testsystem/Makefile +D src/externals/mct/testsystem/testall/.gitignore +D src/externals/mct/testsystem/testall/Makefile +D src/externals/mct/testsystem/testall/ReadSparseMatrixAsc.F90 +D src/externals/mct/testsystem/testall/UNTESTED +D src/externals/mct/testsystem/testall/ccm.F90 +D src/externals/mct/testsystem/testall/convertPOPT.F90 +D src/externals/mct/testsystem/testall/convertgauss.F90 +D src/externals/mct/testsystem/testall/cpl.F90 +D src/externals/mct/testsystem/testall/job.ut-all.jaguar +D src/externals/mct/testsystem/testall/m_ACTEST.F90 +D src/externals/mct/testsystem/testall/m_AVTEST.F90 +D src/externals/mct/testsystem/testall/m_GGRIDTEST.F90 +D src/externals/mct/testsystem/testall/m_GMAPTEST.F90 +D src/externals/mct/testsystem/testall/m_GSMAPTEST.F90 +D src/externals/mct/testsystem/testall/m_MCTWORLDTEST.F90 +D src/externals/mct/testsystem/testall/m_ROUTERTEST.F90 +D src/externals/mct/testsystem/testall/m_SMATTEST.F90 +D src/externals/mct/testsystem/testall/master.F90 +D src/externals/mct/testsystem/testall/mph.F90 +D src/externals/mct/testsystem/testall/pop.F90 +D src/externals/mct/testsystem/testall/processors_map.in +D src/externals/mct/testsystem/testall/script.jag +D src/externals/mct/testsystem/testall/ut_SparseMatrix.rc +D src/externals/mct/testunit/.gitignore +D src/externals/mct/testunit/AttrVect_Test.F90 +D src/externals/mct/testunit/Makefile +D src/externals/mct/testunit/master.F90 +D src/externals/pio1/CMakeLists.txt +D src/externals/pio1/ChangeLog +D src/externals/pio1/ChangeLog_template +D src/externals/pio1/Doxyfile +D src/externals/pio1/DoxygenLayout.xml +D src/externals/pio1/SVN_EXTERNAL_DIRECTORIES +D src/externals/pio1/customdoxygen.css +D src/externals/pio1/doc/CAMexample.txt +D src/externals/pio1/doc/Decomp.txt +D src/externals/pio1/doc/DoxygenLayout.xml +D src/externals/pio1/doc/Error.txt +D src/externals/pio1/doc/Examples.txt +D src/externals/pio1/doc/GettingStarted.txt +D src/externals/pio1/doc/Installing.txt +D src/externals/pio1/doc/api.txt +D src/externals/pio1/doc/base.txt +D src/externals/pio1/doc/example/errorhandle +D src/externals/pio1/doc/example/simple-bc +D src/externals/pio1/doc/example/simple-bc-rearr +D src/externals/pio1/doc/example/simple-bc-rearr-pe1 +D src/externals/pio1/doc/example/simple-bc-rearr-pe2 +D src/externals/pio1/doc/example/simple-dof +D src/externals/pio1/doc/example/simple-dof-rearr +D src/externals/pio1/doc/faq.txt +D src/externals/pio1/doc/footer.html +D src/externals/pio1/doc/header.html +D src/externals/pio1/doc/images/baseimage.graffle +D src/externals/pio1/doc/images/block-cyclic-rearr.eps +D src/externals/pio1/doc/images/block-cyclic-rearr.graffle +D src/externals/pio1/doc/images/block-cyclic-rearr.png +D src/externals/pio1/doc/images/block-cyclic.eps +D src/externals/pio1/doc/images/block-cyclic.graffle +D src/externals/pio1/doc/images/block-cyclic.png +D src/externals/pio1/doc/images/dof-rearr.eps +D src/externals/pio1/doc/images/dof-rearr.graffle +D src/externals/pio1/doc/images/dof-rearr.png +D src/externals/pio1/doc/images/dof.eps +D src/externals/pio1/doc/images/dof.graffle +D src/externals/pio1/doc/images/dof.png +D src/externals/pio1/doc/testpio_example.txt +D src/externals/pio1/doxygen.sty +D src/externals/pio1/pio/CMakeLists.txt +D src/externals/pio1/pio/C_interface_mod.F90 +D src/externals/pio1/pio/Makefile.conf.in +D src/externals/pio1/pio/README.config +D src/externals/pio1/pio/alloc_mod.F90.in +D src/externals/pio1/pio/box_rearrange.F90.in +D src/externals/pio1/pio/calcdecomp.F90 +D src/externals/pio1/pio/calcdisplace_mod.F90 +D src/externals/pio1/pio/config.h.in +D src/externals/pio1/pio/dtypes.h +D src/externals/pio1/pio/fdepends.awk +D src/externals/pio1/pio/iompi_mod.F90.in +D src/externals/pio1/pio/ionf_mod.F90 +D src/externals/pio1/pio/nf_mod.F90 +D src/externals/pio1/pio/pio.F90 +D src/externals/pio1/pio/pio_kinds.F90 +D src/externals/pio1/pio/pio_mpi_utils.F90 +D src/externals/pio1/pio/pio_msg_callbacks.F90 +D src/externals/pio1/pio/pio_msg_getput_callbacks.F90.in +D src/externals/pio1/pio/pio_msg_mod.F90 +D src/externals/pio1/pio/pio_nf_utils.F90 +D src/externals/pio1/pio/pio_spmd_utils.F90.in +D src/externals/pio1/pio/pio_support.F90 +D src/externals/pio1/pio/pio_types.F90 +D src/externals/pio1/pio/pio_utils.F90 +D src/externals/pio1/pio/piodarray.F90.in +D src/externals/pio1/pio/piolib_mod.F90 +D src/externals/pio1/pio/pionfatt_mod.F90.in +D src/externals/pio1/pio/pionfget_mod.F90.in +D src/externals/pio1/pio/pionfput_mod.F90.in +D src/externals/pio1/pio/pionfread_mod.F90.in +D src/externals/pio1/pio/pionfwrite_mod.F90.in +D src/externals/pio1/pio/piovdc.F90 +D src/externals/pio1/pio/rearr_options.h +D src/externals/pio1/pio/rearrange.F90.in +D src/externals/pio1/pio/topology.c +D src/externals/pio1/scripts/Utils.pm +D src/externals/pio1/scripts/config.pl +D src/externals/pio1/scripts/testpio_yellowstone.pl +D src/externals/pio1/tests/testpio/CAM05.csh +D src/externals/pio1/tests/testpio/CMakeLists.txt +D src/externals/pio1/tests/testpio/MPASA30km.csh +D src/externals/pio1/tests/testpio/MPASA60km.csh +D src/externals/pio1/tests/testpio/POPB.csh +D src/externals/pio1/tests/testpio/POPC.csh +D src/externals/pio1/tests/testpio/POPD.csh +D src/externals/pio1/tests/testpio/POPDv0.csh +D src/externals/pio1/tests/testpio/POPDv1.csh +D src/externals/pio1/tests/testpio/POPDv2.csh +D src/externals/pio1/tests/testpio/POPDv3.csh +D src/externals/pio1/tests/testpio/POPDv4.csh +D src/externals/pio1/tests/testpio/POPDv5.csh +D src/externals/pio1/tests/testpio/README.testpio +D src/externals/pio1/tests/testpio/WRFB.csh +D src/externals/pio1/tests/testpio/build_defaults.xml +D src/externals/pio1/tests/testpio/check_mod.F90 +D src/externals/pio1/tests/testpio/config_bench.xml +D src/externals/pio1/tests/testpio/fdepends.awk +D src/externals/pio1/tests/testpio/gdecomp_mod.F90 +D src/externals/pio1/tests/testpio/kinds_mod.F90 +D src/externals/pio1/tests/testpio/kraken.128.csh +D src/externals/pio1/tests/testpio/kraken.1K.csh +D src/externals/pio1/tests/testpio/kraken.256.csh +D src/externals/pio1/tests/testpio/kraken.512.csh +D src/externals/pio1/tests/testpio/kraken.64.csh +D src/externals/pio1/tests/testpio/namelist_mod.F90 +D src/externals/pio1/tests/testpio/namelists/testpio_in.apb05 +D src/externals/pio1/tests/testpio/namelists/testpio_in.asb01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.asb04 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b04 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b05 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b06 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b07 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b08 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b09 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b10 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b11 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b12 +D src/externals/pio1/tests/testpio/namelists/testpio_in.b13 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bb01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bb02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bb03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bb04 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bb05 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bb06 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bb07 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bb08 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bn01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bn02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.bn03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4b01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4b02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4b03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4b04 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4b05 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4b06 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4b07 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4b08 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4n01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4n02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.n4n03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pb01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pb02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pb03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pb04 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pb05 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pb06 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pb07 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pb08 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pn01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pn02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.pn03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sb01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sb02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sb03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sb04 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sb05 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sb06 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sb07 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sb08 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sn01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sn02 +D src/externals/pio1/tests/testpio/namelists/testpio_in.sn03 +D src/externals/pio1/tests/testpio/namelists/testpio_in.wr01 +D src/externals/pio1/tests/testpio/namelists/testpio_in.wr02 +D src/externals/pio1/tests/testpio/perl5lib/ChangeLog +D src/externals/pio1/tests/testpio/perl5lib/README +D src/externals/pio1/tests/testpio/perl5lib/XML/Changes +D src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm +D src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm +D src/externals/pio1/tests/testpio/perl5lib/XML/README +D src/externals/pio1/tests/testpio/test.csh +D src/externals/pio1/tests/testpio/test_lib.F90 +D src/externals/pio1/tests/testpio/testdecomp.F90 +D src/externals/pio1/tests/testpio/testdecomp.bluefire.run +D src/externals/pio1/tests/testpio/testdecomp_in +D src/externals/pio1/tests/testpio/testpio.F90 +D src/externals/pio1/tests/testpio/testpio_bench.pl +D src/externals/pio1/tests/testpio/testpio_build.pl +D src/externals/pio1/tests/testpio/testpio_run.pl +D src/externals/pio1/tests/testpio/utils_mod.F90 +D src/externals/pio1/tests/testpio/ystest.sh +D src/externals/pio1/tests/unittests/CMakeLists.txt +D src/externals/pio1/tests/unittests/Levy_Notes +D src/externals/pio1/tests/unittests/README +D src/externals/pio1/tests/unittests/basic_tests.F90 +D src/externals/pio1/tests/unittests/driver.F90 +D src/externals/pio1/tests/unittests/global_vars.F90 +D src/externals/pio1/tests/unittests/input.nl +D src/externals/pio1/tests/unittests/nc_set_log_level2.c +D src/externals/pio1/tests/unittests/ncdf_tests.F90 +D src/externals/pio1/tests/unittests/not_netcdf.ieee +D src/externals/pio1/timing/CMakeLists.txt +D src/externals/pio1/timing/COPYING +D src/externals/pio1/timing/ChangeLog +D src/externals/pio1/timing/GPTLget_memusage.c +D src/externals/pio1/timing/GPTLprint_memusage.c +D src/externals/pio1/timing/GPTLutil.c +D src/externals/pio1/timing/Makefile +D src/externals/pio1/timing/README +D src/externals/pio1/timing/XXXdotF/perf_mod.F +D src/externals/pio1/timing/XXXdotF/perf_utils.F +D src/externals/pio1/timing/f_wrappers.c +D src/externals/pio1/timing/gptl.c +D src/externals/pio1/timing/gptl.h +D src/externals/pio1/timing/gptl.inc +D src/externals/pio1/timing/gptl_papi.c +D src/externals/pio1/timing/perf_mod.F90 +D src/externals/pio1/timing/perf_utils.F90 +D src/externals/pio1/timing/private.h +D src/externals/pio1/timing/threadutil.c +D src/externals/pio2/.github/workflows/autotools.yml +D src/externals/pio2/.github/workflows/cmake.yml +D src/externals/pio2/.github/workflows/cmake_ncint.yml +D src/externals/pio2/.github/workflows/cmake_netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml +D src/externals/pio2/.github/workflows/netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml +D src/externals/pio2/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_no_pnetcdf_ncint_mpich-3.3.yml +D src/externals/pio2/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_pnetcdf-12.2_ncint_mpich-3.3_asan.yml +D src/externals/pio2/.github/workflows/netcdf-4.7.4_pnetcdf-12.1_ncint_mpich-3.3.yml +D src/externals/pio2/.github/workflows/netcdf-4.7.4_pnetcdf-12.1_ncint_openmpi_4.0.4.yml +D src/externals/pio2/.github/workflows/strict_autotools_ubuntu_latest.yml +D src/externals/pio2/.gitignore +D src/externals/pio2/.travis.yml +D src/externals/pio2/CMakeLists.txt +D src/externals/pio2/COPYRIGHT +D src/externals/pio2/CTestConfig.cmake +D src/externals/pio2/CTestScript.cmake +D src/externals/pio2/Makefile.am +D src/externals/pio2/README.md +D src/externals/pio2/cmake/FindGPTL.cmake +D src/externals/pio2/cmake/FindHDF5.cmake +D src/externals/pio2/cmake/FindLIBRT.cmake +D src/externals/pio2/cmake/FindLIBZ.cmake +D src/externals/pio2/cmake/FindMPE.cmake +D src/externals/pio2/cmake/FindMPISERIAL.cmake +D src/externals/pio2/cmake/FindNetCDF.cmake +D src/externals/pio2/cmake/FindPAPI.cmake +D src/externals/pio2/cmake/FindPnetCDF.cmake +D src/externals/pio2/cmake/FindSZIP.cmake +D src/externals/pio2/cmake/LibCheck.cmake +D src/externals/pio2/cmake/LibFind.cmake +D src/externals/pio2/cmake/LibMPI.cmake +D src/externals/pio2/cmake/Makefile.am +D src/externals/pio2/cmake/TryHDF5_HAS_SZIP.c +D src/externals/pio2/cmake/TryNetCDF_DAP.c +D src/externals/pio2/cmake/TryNetCDF_PARALLEL.c +D src/externals/pio2/cmake/TryNetCDF_PNETCDF.c +D src/externals/pio2/cmake/mpiexec.alcf +D src/externals/pio2/cmake/mpiexec.ncsa +D src/externals/pio2/cmake/mpiexec.nersc +D src/externals/pio2/cmake/mpiexec.nwscla +D src/externals/pio2/cmake/mpiexec.olcf +D src/externals/pio2/cmake_config.h.in +D src/externals/pio2/configure.ac +D src/externals/pio2/ctest/CTestEnvironment-alcf.cmake +D src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake +D src/externals/pio2/ctest/CTestEnvironment-cgd.cmake +D src/externals/pio2/ctest/CTestEnvironment-ncsa.cmake +D src/externals/pio2/ctest/CTestEnvironment-nersc.cmake +D src/externals/pio2/ctest/CTestEnvironment-nwscla.cmake +D src/externals/pio2/ctest/CTestEnvironment-unknown.cmake +D src/externals/pio2/ctest/CTestScript-Test.cmake +D src/externals/pio2/ctest/runcdash-alcf-ibm.sh +D src/externals/pio2/ctest/runcdash-anlworkstation.sh +D src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh +D src/externals/pio2/ctest/runcdash-cgd-nag.sh +D src/externals/pio2/ctest/runcdash-nersc-cray.sh +D src/externals/pio2/ctest/runcdash-nersc-intel.sh +D src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh +D src/externals/pio2/ctest/runcdash-nwsc-intel.sh +D src/externals/pio2/ctest/runcdash-nwscla-gnu.sh +D src/externals/pio2/ctest/runcdash-nwscla-intel.sh +D src/externals/pio2/ctest/runcdash-nwscla-pgi.sh +D src/externals/pio2/ctest/runctest-alcf.sh +D src/externals/pio2/ctest/runctest-anlworkstation.sh +D src/externals/pio2/ctest/runctest-cgd.sh +D src/externals/pio2/ctest/runctest-ncsa.sh +D src/externals/pio2/ctest/runctest-nersc.sh +D src/externals/pio2/ctest/runctest-nwscla.sh +D src/externals/pio2/ctest/runctest-unknown.sh +D src/externals/pio2/doc/CMakeFiles/3.2.3/CMakeSystem.cmake +D src/externals/pio2/doc/CMakeFiles/CMakeOutput.log +D src/externals/pio2/doc/CMakeFiles/cmake.check_cache +D src/externals/pio2/doc/CMakeLists.txt +D src/externals/pio2/doc/Doxyfile.in +D src/externals/pio2/doc/DoxygenLayout.xml +D src/externals/pio2/doc/Makefile.am +D src/externals/pio2/doc/customdoxygen.css +D src/externals/pio2/doc/doxygen.sty +D src/externals/pio2/doc/images/I_O_on_Few.png +D src/externals/pio2/doc/images/I_O_on_Many_Async.png +D src/externals/pio2/doc/images/I_O_on_Many_Intracomm.png +D src/externals/pio2/doc/images/I_O_on_many_async_small.png +D src/externals/pio2/doc/images/Makefile.am +D src/externals/pio2/doc/images/PIO_Async.png +D src/externals/pio2/doc/images/PIO_Decomposition.png +D src/externals/pio2/doc/images/PIO_Intracomm1.png +D src/externals/pio2/doc/images/PIO_Library_Architecture1.jpg +D src/externals/pio2/doc/images/baseimage.graffle +D src/externals/pio2/doc/images/block-cyclic-rearr.eps +D src/externals/pio2/doc/images/block-cyclic-rearr.graffle +D src/externals/pio2/doc/images/block-cyclic-rearr.png +D src/externals/pio2/doc/images/block-cyclic.eps +D src/externals/pio2/doc/images/block-cyclic.graffle +D src/externals/pio2/doc/images/block-cyclic.png +D src/externals/pio2/doc/images/dof-rearr.eps +D src/externals/pio2/doc/images/dof-rearr.graffle +D src/externals/pio2/doc/images/dof-rearr.png +D src/externals/pio2/doc/images/dof.eps +D src/externals/pio2/doc/images/dof.graffle +D src/externals/pio2/doc/images/dof.png +D src/externals/pio2/doc/source/CAMexample.txt +D src/externals/pio2/doc/source/Decomp.txt +D src/externals/pio2/doc/source/Error.txt +D src/externals/pio2/doc/source/Examples.txt +D src/externals/pio2/doc/source/Installing.txt +D src/externals/pio2/doc/source/Introduction.txt +D src/externals/pio2/doc/source/Makefile.am +D src/externals/pio2/doc/source/Testing.txt +D src/externals/pio2/doc/source/api.txt +D src/externals/pio2/doc/source/base.txt +D src/externals/pio2/doc/source/c_api.txt +D src/externals/pio2/doc/source/contributing_code.txt +D src/externals/pio2/doc/source/example/errorhandle +D src/externals/pio2/doc/source/example/simple-bc +D src/externals/pio2/doc/source/example/simple-bc-rearr +D src/externals/pio2/doc/source/example/simple-bc-rearr-pe1 +D src/externals/pio2/doc/source/example/simple-bc-rearr-pe2 +D src/externals/pio2/doc/source/example/simple-dof +D src/externals/pio2/doc/source/example/simple-dof-rearr +D src/externals/pio2/doc/source/faq.txt +D src/externals/pio2/doc/source/iosystem.txt +D src/externals/pio2/doc/source/mach_walkthrough.txt +D src/externals/pio2/doc/source/netcdf_integration.txt +D src/externals/pio2/doc/source/testpio_example.txt +D src/externals/pio2/doc/source/users_guide.txt +D src/externals/pio2/examples/CMakeLists.txt +D src/externals/pio2/examples/Makefile.am +D src/externals/pio2/examples/basic/CAM05.csh +D src/externals/pio2/examples/basic/CMakeLists.txt +D src/externals/pio2/examples/basic/MPASA30km.csh +D src/externals/pio2/examples/basic/MPASA60km.csh +D src/externals/pio2/examples/basic/POPB.csh +D src/externals/pio2/examples/basic/POPC.csh +D src/externals/pio2/examples/basic/POPD.csh +D src/externals/pio2/examples/basic/POPDv0.csh +D src/externals/pio2/examples/basic/POPDv1.csh +D src/externals/pio2/examples/basic/POPDv2.csh +D src/externals/pio2/examples/basic/POPDv3.csh +D src/externals/pio2/examples/basic/POPDv4.csh +D src/externals/pio2/examples/basic/POPDv5.csh +D src/externals/pio2/examples/basic/README.testpio +D src/externals/pio2/examples/basic/WRFB.csh +D src/externals/pio2/examples/basic/alloc_mod.F90.in +D src/externals/pio2/examples/basic/build_defaults.xml +D src/externals/pio2/examples/basic/check_mod.F90 +D src/externals/pio2/examples/basic/config_bench.xml +D src/externals/pio2/examples/basic/fdepends.awk +D src/externals/pio2/examples/basic/gdecomp_mod.F90 +D src/externals/pio2/examples/basic/kinds_mod.F90 +D src/externals/pio2/examples/basic/kraken.128.csh +D src/externals/pio2/examples/basic/kraken.1K.csh +D src/externals/pio2/examples/basic/kraken.256.csh +D src/externals/pio2/examples/basic/kraken.512.csh +D src/externals/pio2/examples/basic/kraken.64.csh +D src/externals/pio2/examples/basic/namelist_mod.F90 +D src/externals/pio2/examples/basic/namelists/testpio_in.apb05 +D src/externals/pio2/examples/basic/namelists/testpio_in.asb01 +D src/externals/pio2/examples/basic/namelists/testpio_in.asb04 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4b01 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4b02 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4b03 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4b04 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4b05 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4b06 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4b07 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4b08 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4n01 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4n02 +D src/externals/pio2/examples/basic/namelists/testpio_in.n4n03 +D src/externals/pio2/examples/basic/namelists/testpio_in.pb01 +D src/externals/pio2/examples/basic/namelists/testpio_in.pb02 +D src/externals/pio2/examples/basic/namelists/testpio_in.pb03 +D src/externals/pio2/examples/basic/namelists/testpio_in.pb04 +D src/externals/pio2/examples/basic/namelists/testpio_in.pb05 +D src/externals/pio2/examples/basic/namelists/testpio_in.pb06 +D src/externals/pio2/examples/basic/namelists/testpio_in.pb07 +D src/externals/pio2/examples/basic/namelists/testpio_in.pb08 +D src/externals/pio2/examples/basic/namelists/testpio_in.pn01 +D src/externals/pio2/examples/basic/namelists/testpio_in.pn02 +D src/externals/pio2/examples/basic/namelists/testpio_in.pn03 +D src/externals/pio2/examples/basic/namelists/testpio_in.ps01 +D src/externals/pio2/examples/basic/namelists/testpio_in.ps02 +D src/externals/pio2/examples/basic/namelists/testpio_in.ps03 +D src/externals/pio2/examples/basic/namelists/testpio_in.ps04 +D src/externals/pio2/examples/basic/namelists/testpio_in.ps05 +D src/externals/pio2/examples/basic/namelists/testpio_in.ps06 +D src/externals/pio2/examples/basic/namelists/testpio_in.ps07 +D src/externals/pio2/examples/basic/namelists/testpio_in.ps08 +D src/externals/pio2/examples/basic/namelists/testpio_in.sb01 +D src/externals/pio2/examples/basic/namelists/testpio_in.sb02 +D src/externals/pio2/examples/basic/namelists/testpio_in.sb03 +D src/externals/pio2/examples/basic/namelists/testpio_in.sb04 +D src/externals/pio2/examples/basic/namelists/testpio_in.sb05 +D src/externals/pio2/examples/basic/namelists/testpio_in.sb06 +D src/externals/pio2/examples/basic/namelists/testpio_in.sb07 +D src/externals/pio2/examples/basic/namelists/testpio_in.sb08 +D src/externals/pio2/examples/basic/namelists/testpio_in.sn01 +D src/externals/pio2/examples/basic/namelists/testpio_in.sn02 +D src/externals/pio2/examples/basic/namelists/testpio_in.sn03 +D src/externals/pio2/examples/basic/namelists/testpio_in.wr01 +D src/externals/pio2/examples/basic/namelists/testpio_in.wr02 +D src/externals/pio2/examples/basic/perl5lib/ChangeLog +D src/externals/pio2/examples/basic/perl5lib/README +D src/externals/pio2/examples/basic/perl5lib/XML/Changes +D src/externals/pio2/examples/basic/perl5lib/XML/Lite.pm +D src/externals/pio2/examples/basic/perl5lib/XML/Lite/Element.pm +D src/externals/pio2/examples/basic/perl5lib/XML/README +D src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite.3 +D src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite_Element.3 +D src/externals/pio2/examples/basic/test.csh +D src/externals/pio2/examples/basic/testdecomp.F90 +D src/externals/pio2/examples/basic/testdecomp.bluefire.run +D src/externals/pio2/examples/basic/testdecomp_in +D src/externals/pio2/examples/basic/testpio.F90 +D src/externals/pio2/examples/basic/testpio_bench.pl +D src/externals/pio2/examples/basic/testpio_build.pl +D src/externals/pio2/examples/basic/testpio_run.pl +D src/externals/pio2/examples/basic/utils_mod.F90 +D src/externals/pio2/examples/basic/wstest.c +D src/externals/pio2/examples/basic/ystest.sh +D src/externals/pio2/examples/c/CMakeLists.txt +D src/externals/pio2/examples/c/Makefile.am +D src/externals/pio2/examples/c/darray_async.c +D src/externals/pio2/examples/c/darray_no_async.c +D src/externals/pio2/examples/c/example1.c +D src/externals/pio2/examples/c/example2.c +D src/externals/pio2/examples/c/examplePio.c +D src/externals/pio2/examples/c/run_tests.sh.in +D src/externals/pio2/examples/c/valsupp_example1.supp +D src/externals/pio2/examples/cxx/CMakeLists.txt +D src/externals/pio2/examples/cxx/examplePio.cxx +D src/externals/pio2/examples/f03/CMakeLists.txt +D src/externals/pio2/examples/f03/Makefile.am +D src/externals/pio2/examples/f03/exampleAsyncPio.F90 +D src/externals/pio2/examples/f03/examplePio.F90 +D src/externals/pio2/examples/f03/run_tests.sh.in +D src/externals/pio2/libpio.settings.in +D src/externals/pio2/scripts/Makefile.am +D src/externals/pio2/scripts/genf90.pl +D src/externals/pio2/scripts/prune_decomps.pl +D src/externals/pio2/src/CMakeLists.txt +D src/externals/pio2/src/Makefile.am +D src/externals/pio2/src/clib/CMakeLists.txt +D src/externals/pio2/src/clib/Makefile.am +D src/externals/pio2/src/clib/pio.h +D src/externals/pio2/src/clib/pio_darray.c +D src/externals/pio2/src/clib/pio_darray_int.c +D src/externals/pio2/src/clib/pio_error.c +D src/externals/pio2/src/clib/pio_error.h +D src/externals/pio2/src/clib/pio_file.c +D src/externals/pio2/src/clib/pio_get_nc.c +D src/externals/pio2/src/clib/pio_get_vard.c +D src/externals/pio2/src/clib/pio_getput_int.c +D src/externals/pio2/src/clib/pio_internal.h +D src/externals/pio2/src/clib/pio_lists.c +D src/externals/pio2/src/clib/pio_meta.h.in +D src/externals/pio2/src/clib/pio_msg.c +D src/externals/pio2/src/clib/pio_nc.c +D src/externals/pio2/src/clib/pio_nc4.c +D src/externals/pio2/src/clib/pio_put_nc.c +D src/externals/pio2/src/clib/pio_put_vard.c +D src/externals/pio2/src/clib/pio_rearrange.c +D src/externals/pio2/src/clib/pio_spmd.c +D src/externals/pio2/src/clib/pioc.c +D src/externals/pio2/src/clib/pioc_sc.c +D src/externals/pio2/src/clib/pioc_support.c +D src/externals/pio2/src/clib/topology.c +D src/externals/pio2/src/clib/uthash.h +D src/externals/pio2/src/flib/CMakeLists.txt +D src/externals/pio2/src/flib/Makefile.am +D src/externals/pio2/src/flib/ncint_mod.F90 +D src/externals/pio2/src/flib/pio.F90 +D src/externals/pio2/src/flib/pio_kinds.F90 +D src/externals/pio2/src/flib/pio_nf.F90 +D src/externals/pio2/src/flib/pio_support.F90 +D src/externals/pio2/src/flib/pio_types.F90 +D src/externals/pio2/src/flib/piodarray.F90.in +D src/externals/pio2/src/flib/piolib_mod.F90 +D src/externals/pio2/src/flib/pionfatt_mod.F90.in +D src/externals/pio2/src/flib/pionfget_mod.F90.in +D src/externals/pio2/src/flib/pionfput_mod.F90.in +D src/externals/pio2/src/gptl/CMakeLists.txt +D src/externals/pio2/src/gptl/COPYING +D src/externals/pio2/src/gptl/ChangeLog +D src/externals/pio2/src/gptl/GPTLget_memusage.c +D src/externals/pio2/src/gptl/GPTLprint_memusage.c +D src/externals/pio2/src/gptl/GPTLutil.c +D src/externals/pio2/src/gptl/Makefile.am +D src/externals/pio2/src/gptl/README +D src/externals/pio2/src/gptl/f_wrappers.c +D src/externals/pio2/src/gptl/f_wrappers_2.c +D src/externals/pio2/src/gptl/gptl.c +D src/externals/pio2/src/gptl/gptl.h +D src/externals/pio2/src/gptl/gptl.inc +D src/externals/pio2/src/gptl/gptl_papi.c +D src/externals/pio2/src/gptl/perf_mod.F90 +D src/externals/pio2/src/gptl/perf_utils.F90 +D src/externals/pio2/src/gptl/private.h +D src/externals/pio2/src/gptl/threadutil.c +D src/externals/pio2/src/ncint/Makefile.am +D src/externals/pio2/src/ncint/nc_get_vard.c +D src/externals/pio2/src/ncint/nc_put_vard.c +D src/externals/pio2/src/ncint/ncint_pio.c +D src/externals/pio2/src/ncint/ncintdispatch.c +D src/externals/pio2/src/ncint/ncintdispatch.h +D src/externals/pio2/tests/CMakeLists.txt +D src/externals/pio2/tests/Makefile.am +D src/externals/pio2/tests/cunit/CMakeLists.txt +D src/externals/pio2/tests/cunit/Makefile.am +D src/externals/pio2/tests/cunit/pio_tests.h +D src/externals/pio2/tests/cunit/run_tests.sh.in +D src/externals/pio2/tests/cunit/test_async_1d.c +D src/externals/pio2/tests/cunit/test_async_3proc.c +D src/externals/pio2/tests/cunit/test_async_4proc.c +D src/externals/pio2/tests/cunit/test_async_manyproc.c +D src/externals/pio2/tests/cunit/test_async_mpi.c +D src/externals/pio2/tests/cunit/test_async_multi2.c +D src/externals/pio2/tests/cunit/test_async_multicomp.c +D src/externals/pio2/tests/cunit/test_async_perf.c +D src/externals/pio2/tests/cunit/test_async_simple.c +D src/externals/pio2/tests/cunit/test_common.c +D src/externals/pio2/tests/cunit/test_darray.c +D src/externals/pio2/tests/cunit/test_darray_1d.c +D src/externals/pio2/tests/cunit/test_darray_2sync.c +D src/externals/pio2/tests/cunit/test_darray_3d.c +D src/externals/pio2/tests/cunit/test_darray_append.c +D src/externals/pio2/tests/cunit/test_darray_async.c +D src/externals/pio2/tests/cunit/test_darray_async_from_comm.c +D src/externals/pio2/tests/cunit/test_darray_async_many.c +D src/externals/pio2/tests/cunit/test_darray_async_simple.c +D src/externals/pio2/tests/cunit/test_darray_fill.c +D src/externals/pio2/tests/cunit/test_darray_frame.c +D src/externals/pio2/tests/cunit/test_darray_multi.c +D src/externals/pio2/tests/cunit/test_darray_multivar.c +D src/externals/pio2/tests/cunit/test_darray_multivar2.c +D src/externals/pio2/tests/cunit/test_darray_multivar3.c +D src/externals/pio2/tests/cunit/test_darray_vard.c +D src/externals/pio2/tests/cunit/test_decomp_frame.c +D src/externals/pio2/tests/cunit/test_decomp_uneven.c +D src/externals/pio2/tests/cunit/test_decomps.c +D src/externals/pio2/tests/cunit/test_intercomm2.c +D src/externals/pio2/tests/cunit/test_iosystem2.c +D src/externals/pio2/tests/cunit/test_iosystem2_simple.c +D src/externals/pio2/tests/cunit/test_iosystem2_simple2.c +D src/externals/pio2/tests/cunit/test_iosystem3.c +D src/externals/pio2/tests/cunit/test_iosystem3_simple.c +D src/externals/pio2/tests/cunit/test_iosystem3_simple2.c +D src/externals/pio2/tests/cunit/test_perf2.c +D src/externals/pio2/tests/cunit/test_pioc.c +D src/externals/pio2/tests/cunit/test_pioc_fill.c +D src/externals/pio2/tests/cunit/test_pioc_putget.c +D src/externals/pio2/tests/cunit/test_pioc_unlim.c +D src/externals/pio2/tests/cunit/test_rearr.c +D src/externals/pio2/tests/cunit/test_shared.c +D src/externals/pio2/tests/cunit/test_simple.c +D src/externals/pio2/tests/cunit/test_spmd.c +D src/externals/pio2/tests/fncint/Makefile.am +D src/externals/pio2/tests/fncint/ftst_pio.f90 +D src/externals/pio2/tests/fncint/ftst_pio_orig.f90 +D src/externals/pio2/tests/fncint/run_tests.sh.in +D src/externals/pio2/tests/fncint/tst_c_pio.c +D src/externals/pio2/tests/general/CMakeLists.txt +D src/externals/pio2/tests/general/Makefile.am +D src/externals/pio2/tests/general/README.md +D src/externals/pio2/tests/general/ncdf_fail.F90.in +D src/externals/pio2/tests/general/ncdf_get_put.F90.in +D src/externals/pio2/tests/general/ncdf_inq.F90.in +D src/externals/pio2/tests/general/ncdf_simple_tests.F90.in +D src/externals/pio2/tests/general/pio_decomp_fillval.F90.in +D src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in +D src/externals/pio2/tests/general/pio_decomp_tests.F90.in +D src/externals/pio2/tests/general/pio_decomp_tests_1d.F90.in +D src/externals/pio2/tests/general/pio_decomp_tests_2d.F90.in +D src/externals/pio2/tests/general/pio_decomp_tests_2d_async.F90.in +D src/externals/pio2/tests/general/pio_decomp_tests_2d_halo.F90.in +D src/externals/pio2/tests/general/pio_decomp_tests_3d.F90.in +D src/externals/pio2/tests/general/pio_decomphalo_tests_2d.F90.in +D src/externals/pio2/tests/general/pio_fail.F90.in +D src/externals/pio2/tests/general/pio_file_fail.F90.in +D src/externals/pio2/tests/general/pio_file_simple_tests.F90.in +D src/externals/pio2/tests/general/pio_init_finalize.F90.in +D src/externals/pio2/tests/general/pio_iosystem_async_tests.F90.in +D src/externals/pio2/tests/general/pio_iosystem_tests.F90.in +D src/externals/pio2/tests/general/pio_iosystem_tests2.F90.in +D src/externals/pio2/tests/general/pio_iosystem_tests3.F90.in +D src/externals/pio2/tests/general/pio_rearr.F90.in +D src/externals/pio2/tests/general/pio_rearr_opts.F90.in +D src/externals/pio2/tests/general/pio_rearr_opts2.F90.in +D src/externals/pio2/tests/general/run_tests.sh.in +D src/externals/pio2/tests/general/test_memleak.c +D src/externals/pio2/tests/general/util/Makefile.am +D src/externals/pio2/tests/general/util/pio_tf_f90gen.pl +D src/externals/pio2/tests/general/util/pio_tutil.F90 +D src/externals/pio2/tests/ncint/CMakeLists.txt +D src/externals/pio2/tests/ncint/Makefile.am +D src/externals/pio2/tests/ncint/pio_err_macros.h +D src/externals/pio2/tests/ncint/run_perf.sh.in +D src/externals/pio2/tests/ncint/run_tests.sh.in +D src/externals/pio2/tests/ncint/tst_async_multi.c +D src/externals/pio2/tests/ncint/tst_ncint_async_perf.c +D src/externals/pio2/tests/ncint/tst_ncint_perf.c +D src/externals/pio2/tests/ncint/tst_pio_async.c +D src/externals/pio2/tests/ncint/tst_pio_udf.c +D src/externals/pio2/tests/ncint/tst_var_compress.c +D src/externals/pio2/tests/performance/CMakeLists.txt +D src/externals/pio2/tests/performance/Makefile.am +D src/externals/pio2/tests/performance/Pioperformance.md +D src/externals/pio2/tests/performance/gensimple.pl +D src/externals/pio2/tests/performance/kt.PIO1.perfmakefile +D src/externals/pio2/tests/performance/pioperf.nl +D src/externals/pio2/tests/performance/pioperformance.F90 +D src/externals/pio2/tests/performance/run_tests.sh.in +D src/externals/pio2/tests/unit/CMakeLists.txt +D src/externals/pio2/tests/unit/Levy_Notes +D src/externals/pio2/tests/unit/Makefile.am +D src/externals/pio2/tests/unit/basic_tests.F90 +D src/externals/pio2/tests/unit/driver.F90 +D src/externals/pio2/tests/unit/ftst_vars_chunking.F90 +D src/externals/pio2/tests/unit/global_vars.F90 +D src/externals/pio2/tests/unit/input.nl +D src/externals/pio2/tests/unit/nc_set_log_level2.c +D src/externals/pio2/tests/unit/ncdf_tests.F90 +D src/externals/pio2/tests/unit/not_netcdf.ieee +D src/externals/pio2/tests/unit/run_tests.sh.in +D src/share/RandNum/include/dSFMT-common.h +D src/share/RandNum/include/dSFMT-params.h +D src/share/RandNum/include/dSFMT-params19937.h +D src/share/RandNum/include/dSFMT.h +D src/share/RandNum/src/dsfmt_f03/dSFMT.c +D src/share/RandNum/src/dsfmt_f03/dSFMT_interface.F90 +D src/share/RandNum/src/dsfmt_f03/dSFMT_utils.c +D src/share/RandNum/src/kissvec/kissvec.c +D src/share/RandNum/src/kissvec/kissvec_mod.F90 +D src/share/RandNum/src/mt19937/mersennetwister_mod.F90 +D src/share/RandNum/src/shr_RandNum_mod.F90 +D src/share/RandNum/test/bench/Makefile +D src/share/RandNum/test/bench/test_shr_RandNum.F90 +D src/share/esmf_wrf_timemgr/CMakeLists.txt +D src/share/esmf_wrf_timemgr/ESMF.F90 +D src/share/esmf_wrf_timemgr/ESMF_AlarmClockMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_AlarmMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_BaseMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_BaseTimeMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_CalendarMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_ClockMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_FractionMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_Macros.inc +D src/share/esmf_wrf_timemgr/ESMF_ShrTimeMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_Stubs.F90 +D src/share/esmf_wrf_timemgr/ESMF_TimeIntervalMod.F90 +D src/share/esmf_wrf_timemgr/ESMF_TimeMgr.inc +D src/share/esmf_wrf_timemgr/ESMF_TimeMod.F90 +D src/share/esmf_wrf_timemgr/Makefile +D src/share/esmf_wrf_timemgr/MeatMod.F90 +D src/share/esmf_wrf_timemgr/README +D src/share/esmf_wrf_timemgr/unittests/Makefile +D src/share/esmf_wrf_timemgr/unittests/go.csh +D src/share/esmf_wrf_timemgr/unittests/test.F90 +D src/share/esmf_wrf_timemgr/unittests/wrf_stuff.F90 +D src/share/esmf_wrf_timemgr/wrf_error_fatal.F90 +D src/share/esmf_wrf_timemgr/wrf_message.F90 +D src/share/include/dynamic_vector_procdef.inc +D src/share/include/dynamic_vector_typedef.inc +D src/share/include/shr_assert.h +D src/share/nuopc/glc_elevclass_mod.F90 +D src/share/nuopc/seq_drydep_mod.F90 +D src/share/nuopc/shr_carma_mod.F90 +D src/share/nuopc/shr_expr_parser_mod.F90 +D src/share/nuopc/shr_fire_emis_mod.F90 +D src/share/nuopc/shr_megan_mod.F90 +D src/share/nuopc/shr_ndep_mod.F90 +D src/share/streams/shr_dmodel_mod.F90 +D src/share/streams/shr_strdata_mod.F90 +D src/share/streams/shr_stream_mod.F90 +D src/share/streams/shr_tInterp_mod.F90 +D src/share/test/old_unit_testers/Makefile +D src/share/test/old_unit_testers/Mkdepends +D src/share/test/old_unit_testers/Mksrcfiles +D src/share/test/old_unit_testers/bundle_expected.F90 +D src/share/test/old_unit_testers/config.h +D src/share/test/old_unit_testers/make.Macros +D src/share/test/old_unit_testers/namelist +D src/share/test/old_unit_testers/nl/atm.stdin +D src/share/test/old_unit_testers/nl/cpl.stdin +D src/share/test/old_unit_testers/nl/ice.stdin +D src/share/test/old_unit_testers/nl/lnd.stdin +D src/share/test/old_unit_testers/nl/ocn.stdin +D src/share/test/old_unit_testers/run_dshr_bundle_test +D src/share/test/old_unit_testers/run_file_test +D src/share/test/old_unit_testers/test_mod.F90 +D src/share/test/old_unit_testers/test_shr_file.F90 +D src/share/test/old_unit_testers/test_shr_log.F90 +D src/share/test/old_unit_testers/test_shr_mpi.F90 +D src/share/test/old_unit_testers/test_shr_orb.F90 +D src/share/test/old_unit_testers/test_shr_scam.F90 +D src/share/test/old_unit_testers/test_shr_streams.F90 +D src/share/test/old_unit_testers/test_shr_sys.F90 +D src/share/test/old_unit_testers/test_shr_tInterp.F90 +D src/share/test/unit/CMakeLists.txt +D src/share/test/unit/dynamic_vector/CMakeLists.txt +D src/share/test/unit/dynamic_vector/character16_vector_tests.pf.in +D src/share/test/unit/dynamic_vector/dynamic_vector_base_tests.inc +D src/share/test/unit/dynamic_vector/dynamic_vector_character16.F90 +D src/share/test/unit/dynamic_vector/dynamic_vector_int_ptr.F90 +D src/share/test/unit/dynamic_vector/dynamic_vector_integer.F90 +D src/share/test/unit/dynamic_vector/dynamic_vector_r8.F90 +D src/share/test/unit/dynamic_vector/int_ptr_vector_tests.pf.in +D src/share/test/unit/dynamic_vector/integer_vector_tests.pf.in +D src/share/test/unit/dynamic_vector/ptr_wrapper.F90 +D src/share/test/unit/dynamic_vector/r8_vector_tests.pf.in +D src/share/test/unit/mock/CMakeLists.txt +D src/share/test/unit/mock/README +D src/share/test/unit/mock/shr_sys_mod.nompi_abortthrows.F90 +D src/share/test/unit/shr_abort_test/CMakeLists.txt +D src/share/test/unit/shr_abort_test/README +D src/share/test/unit/shr_abort_test/test_shr_abort.pf +D src/share/test/unit/shr_assert_test/CMakeLists.txt +D src/share/test/unit/shr_assert_test/test_assert.pf +D src/share/test/unit/shr_assert_test/test_assert_array.pf +D src/share/test/unit/shr_assert_test/test_macro.pf +D src/share/test/unit/shr_assert_test/test_ndebug.pf +D src/share/test/unit/shr_cal_test/CMakeLists.txt +D src/share/test/unit/shr_cal_test/test_shr_cal.pf +D src/share/test/unit/shr_infnan_test/CMakeLists.txt +D src/share/test/unit/shr_infnan_test/test_infnan.F90 +D src/share/test/unit/shr_log_test/CMakeLists.txt +D src/share/test/unit/shr_log_test/test_error_printers.pf +D src/share/test/unit/shr_precip_test/CMakeLists.txt +D src/share/test/unit/shr_precip_test/test_shr_precip.pf +D src/share/test/unit/shr_spfn_test/CMakeLists.txt +D src/share/test/unit/shr_spfn_test/test_erf_r4.pf +D src/share/test/unit/shr_spfn_test/test_erf_r8.pf +D src/share/test/unit/shr_spfn_test/test_gamma_factorial.pf +D src/share/test/unit/shr_spfn_test/test_igamma.pf +D src/share/test/unit/shr_strconvert_test/CMakeLists.txt +D src/share/test/unit/shr_strconvert_test/test_toString.pf +D src/share/test/unit/shr_string_test/CMakeLists.txt +D src/share/test/unit/shr_string_test/test_shr_string.pf +D src/share/test/unit/shr_vmath_test/CMakeLists.txt +D src/share/test/unit/shr_vmath_test/test_vmath.F90 +D src/share/test/unit/shr_wv_sat_test/CMakeLists.txt +D src/share/test/unit/shr_wv_sat_test/test_wv_sat.pf +D src/share/test/unit/shr_wv_sat_test/test_wv_sat_each_method.pf +D src/share/unit_test_stubs/README +D src/share/unit_test_stubs/pio/CMakeLists.txt +D src/share/unit_test_stubs/pio/README +D src/share/unit_test_stubs/pio/pio.F90.in +D src/share/unit_test_stubs/util/CMakeLists.txt +D src/share/unit_test_stubs/util/README +D src/share/unit_test_stubs/util/shr_abort_mod.abortthrows.F90 +D src/share/util/CMakeLists.txt +D src/share/util/mct_mod.F90 +D src/share/util/shr_abort_mod.F90 +D src/share/util/shr_assert_mod.F90.in +D src/share/util/shr_cal_mod.F90 +D src/share/util/shr_const_mod.F90 +D src/share/util/shr_file_mod.F90 +D src/share/util/shr_flds_mod.F90 +D src/share/util/shr_flux_mod.F90 +D src/share/util/shr_frz_mod.F90.in +D src/share/util/shr_infnan_mod.F90.in +D src/share/util/shr_kind_mod.F90 +D src/share/util/shr_log_mod.F90 +D src/share/util/shr_map_mod.F90 +D src/share/util/shr_mct_mod.F90 +D src/share/util/shr_mem_mod.F90 +D src/share/util/shr_mpi_mod.F90 +D src/share/util/shr_msg_mod.F90 +D src/share/util/shr_ncread_mod.F90 +D src/share/util/shr_nl_mod.F90 +D src/share/util/shr_orb_mod.F90 +D src/share/util/shr_pcdf_mod.F90 +D src/share/util/shr_pio_mod.F90 +D src/share/util/shr_precip_mod.F90 +D src/share/util/shr_reprosum_mod.F90 +D src/share/util/shr_reprosumx86.c +D src/share/util/shr_scam_mod.F90 +D src/share/util/shr_spfn_mod.F90 +D src/share/util/shr_strconvert_mod.F90 +D src/share/util/shr_string_mod.F90 +D src/share/util/shr_sys_mod.F90 +D src/share/util/shr_taskmap_mod.F90 +D src/share/util/shr_timer_mod.F90 +D src/share/util/shr_vmath_mod.F90 +D src/share/util/shr_wv_sat_mod.F90 +D src/share/util/water_isotopes.F90 +D src/share/util/water_types.F90 +M tools/configure +M tools/cprnc/CMakeLists.txt +M tools/statistical_ensemble_test/pyCECT/docs/requirements.txt + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 5-11-2021 +Tag: cime5.8.47 +Answer Changes: None +Tests: SMS_D_Lm1_Mmpi-serial.CLM_USRDAT.I1PtClm50SpRs.cheyenne_intel.clm-USUMB +Dependencies: + +Brief Summary: + - Add back domain for CLM_USRDAT for mct driver. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +f01e48081 Merge pull request #3954 from jedwards4b/fix_clm_usrdata_mct + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 5-10-2021 +Tag: cime5.8.46 +Answer Changes: None +Tests: subset of aux_glc +Dependencies: + +Brief Summary: + - Rename GLC Greenland grid from "gland" to "gris" + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +a52e7faf6 Merge pull request #3943 from billsacks/rename_gland + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/config_grids_mct.xml +M src/drivers/mct/cime_config/config_component.xml + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 5-7-2021 +Tag: cime5.8.45 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Update this so that paths with % in them do not break cism build. + - Fix res for nuopc. + - Add cice6 as an new CESM component. + - Change behavior of check on DIN_LOC_ROOT in case_setup.py. + - Reset a test that is rerun after a run failure. + - Adds git submodule info to provenance. + - Fixes phase 1 of multisubmit not executing during rerun. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +1e72b5db7 Merge pull request #3956 from jedwards4b/GLCMakefile +5c15fb06e Merge pull request #3945 from jedwards4b/srt_fix_for_nuopc +b325c7109 Merge pull request #3952 from mvertens/features/add_cice6 +7c99238d7 Merge pull request #3925 from briandobbins/container_inputdata +b95a28b41 Merge pull request #3939 from billsacks/fix_retry +56a2ca18e Merge pull request #3949 from jasonb5/update_git_provenance +205c5dd48 Merge pull request #3924 from jasonb5/fix_pet_multisubmit + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/config_tests.xml +M scripts/Tools/Makefile +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/SystemTests/system_tests_compare_two.py +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/test_status.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +M scripts/lib/CIME/tests/case_fake.py +A scripts/lib/CIME/tests/test_provenance.py +M scripts/tests/scripts_regression_tests.py + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 5-3-2021 +Tag: cime5.8.44 +Answer Changes: None +Tests: ERS_Ld3.f19_f19_mg17.FXHIST.cheyenne_intel.cam-waccmx_weimer + scrtips_regression_tests +Dependencies: + +Brief Summary: + - Revert cheyenne intel mpt to 2.22. + - Change shebang to explict python3. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +6cbfdd632 Merge pull request #3951 from ESMCI/fischer/mpt_cheyenne +296aeaf4a Merge pull request #3948 from jedwards4b/move_to_py3 + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M config/cesm/machines/config_machines.xml +M scripts/Tools/archive_metadata +M scripts/Tools/bless_test_results +M scripts/Tools/case.build +M scripts/Tools/case.cmpgen_namelists +M scripts/Tools/case.qstatus +M scripts/Tools/case.setup +M scripts/Tools/case.submit +M scripts/Tools/check_case +M scripts/Tools/check_input_data +M scripts/Tools/check_lockedfiles +M scripts/Tools/cime_bisect +M scripts/Tools/code_checker +M scripts/Tools/compare_namelists +M scripts/Tools/compare_test_results +M scripts/Tools/component_compare_baseline +M scripts/Tools/component_compare_copy +M scripts/Tools/component_compare_test +M scripts/Tools/component_generate_baseline +M scripts/Tools/cs.status +M scripts/Tools/e3sm_check_env +M scripts/Tools/generate_cylc_workflow.py +M scripts/Tools/getTiming +M scripts/Tools/get_case_env +M scripts/Tools/get_standard_makefile_args +M scripts/Tools/jenkins_generic_job +M scripts/Tools/list_e3sm_tests +M scripts/Tools/mvsource +M scripts/Tools/normalize_cases +M scripts/Tools/pelayout +M scripts/Tools/preview_namelists +M scripts/Tools/preview_run +M scripts/Tools/save_provenance +M scripts/Tools/simple_compare +M scripts/Tools/testreporter.py +M scripts/Tools/wait_for_tests +M scripts/Tools/xmlchange +M scripts/Tools/xmlquery +M scripts/create_clone +M scripts/create_newcase +M scripts/create_test +M scripts/query_config +M scripts/query_testlists +M scripts/tests/list_tests +M scripts/tests/scripts_regression_tests.py + +====================================================================== + +====================================================================== + +Originator: Chris Fiscehr +Date: 4-26-2021 +Tag: cime5.8.43 +Answer Changes: None +Tests: scripts_regression_tests, aux_cime_baselines +Dependencies: + +Brief Summary: + - Update intel compiler and esmf on cheyenne. + - Fix issue to build a testcase on Casper through an interactive node. + - Fix py3 error in srun syntax. + - Remove "Code review" line from PR template. + - Add support for WAV as primary component. + - Update case status to include jobid on batch system. + - Allow E3SM to use its versions of coupler and data,stub,x models. + - Add neon server to inputdata. + - Fix create_test --retry when generating baselines. + - Generalize build directory paths, and prep for CESM share repository. + - Fix ChangeLog. + - Update configuration xml for Casper and add openacc directives to enable GPU simulation. + - Fix query_config for py3 output format. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +40be61f38 Merge pull request #3941 from ESMCI/fischer/cheyenne_intel +2b2a18dd0 Merge pull request #3942 from sjsprecious/master +83ddb3e4e fix py3 error in srun syntax +8b48a2e2d Merge pull request #3938 from billsacks/pr_template_cleanup +6407bc297 Merge pull request #3935 from sbrus89/wav_primary_fix +44b241d26 Merge pull request #3926 from jasonb5/update_case_status +ebdb29719 Merge pull request #3930 from ESMCI/rljacob/move-for-e3sm +e256ea7ad Merge pull request #3927 from jedwards4b/add_neon_to_inputdata +f5433921b Merge pull request #3914 from billsacks/retry_baseline_overwrite +599e0bdd5 Merge pull request #3917 from ESMCI/fischer/cime_sep +25d85e50c Update for cime5.8.41 and cime5.8.42 +86f0fc800 Merge pull request #3922 from sjsprecious/master +7aa6b0c26 Merge pull request #3921 from jedwards4b/query_config_py3_update + + +Modified files: git diff --name-status [previous_tag] +M .github/PULL_REQUEST_TEMPLATE +M ChangeLog +M config/cesm/config_inputdata.xml +A config/cesm/machines/Depends.nvhpc-gpu +A config/cesm/machines/Depends.pgi-gpu +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/e3sm/config_files.xml +M scripts/Tools/Makefile +M scripts/create_test +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/XML/inputdata.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_clone.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/tests/test_utils.py +M scripts/lib/CIME/utils.py +M scripts/query_config +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.internal_components +M src/build_scripts/buildlib_cmake.internal_components +M src/share/util/shr_spfn_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 4-8-2021 +Tag: cime5.8.42 +Answer Changes: None +Tests: hand tested and scripts_regression_tests +Dependencies: + +Brief Summary: + - No need to test ESMF_AWARE_THREADING here. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +b655c9127 Merge pull request #3918 from jedwards4b/ninst_fix + +Modified files: git diff --name-status [previous_tag] +M scripts/lib/CIME/XML/env_mach_pes.py + +====================================================================== + +====================================================================== + +Originator: Jim Edwards +Date: 4-6-2021 +Tag: cime5.8.41 +Answer Changes: Climate Changing just for T compsets. +Tests: scripts_regression_tests + SMS_Vnuopc_D_P1x1.f10_f10_mg37.I2000Clm50Sp.izumi_nag.clm-default +Dependencies: + +Brief Summary: + - Save Scorpio I/O statistics. + - Support for threading in two different ways for esmf. + - Fix issue with build of nag mpiserial. + - Handle case when domain_root is not present. + - Point to new dlnd scpl forcing data. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +81d4f2033 Merge pull request #3904 from ESMCI/jayeshkrishna/spio_stats_provenance +ada43e1c9 Merge pull request #3899 from jedwards4b/esmf_threading +b86d2e1b5 Merge pull request #3908 from jedwards4b/izumi_nag_mpiserial_fix +000b3c7d5 Merge pull request #3903 from jedwards4b/domain_root_fix +af4d0ae47 Merge pull request #3901 from billsacks/dlnd_new_glc_forcings + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M config/cesm/machines/config_machines.xml +M scripts/Tools/pelayout +M scripts/lib/CIME/XML/env_mach_pes.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/provenance.py +M src/components/data_comps_mct/dlnd/cime_config/config_component.xml + +====================================================================== + + +====================================================================== + +Originator: Chris Fischer +Date: 3-30-2021 +Tag: cime5.8.40 +Answer Changes: +Tests: +Dependencies: + +Brief Summary: + - Bug fix for generating C/G nuopc compsets + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +a900c8ef6 Merge pull request #3900 from mvertens/feature/nuopc_grids_bugfix + + +Modified files: git diff --name-status [previous_tag] +M scripts/lib/CIME/XML/grids.py + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 3-26-2021 +Tag: cime5.8.39 +Answer Changes: Round Off for nuopc/cmeps configurations. +Tests: scripts_regression_tests, SMS_D_Ld6_Mmpi-serial_Vnuopc.1x1_smallvilleIA.IHistClm45BgcCropQianRs, + SMS_D_Ld6_Mmpi-serial_Vnuopc.1x1_smallvilleIA.IHistClm45BgcCropQianRs +Dependencies: + +Brief Summary: + - Add ROF support in the primary component computation. + - Bug fix for single column mode for nuopc/cmeps. + - Fixes to nuopc single column PTS_LAT and PTS_LON. + - Switch e3sm config to COMP_ROOT_DIR. + - Fix format descriptor in mem-logging for IBM compiler. + - Bump pillow from 7.1.2 to 8.1.1 in /tools/statistical_ensemble_test/pyCECT/docs. + - Bug fix for specifying mesh for 0.25 degree runoff grid. + - Changes for zeus port. + - Avoid adding CDEPS things to inc & lib paths with LILAC. + - New nuopc/cmeps single/column single/point functionality. + - Logs tail of cprnc outputs to TestStatus.log. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +1e705ee39 Merge pull request #3895 from ESMCI/jgfouca/add_rof_support_to_primary_comp +f00ba9e2c Merge pull request #3898 from ESMCI/mvertens/scol_bugfix +4d16c5b91 Merge pull request #3894 from ESMCI/mvertens/scol_bugfix +92e0ecfbe Merge pull request #3891 from ESMCI/rljacob/update-e3sm-config +de4a1e282 Merge pull request #3879 from ESMCI/azamat/mprof/replace-indef-fmt-xlf +cd997cab8 Merge pull request #3890 from ESMCI/dependabot/pip/tools/statistical_ensemble_test/pyCECT/docs/pillow-8.1.1 +0cd2268d5 Merge pull request #3889 from ESMCI/mvertens/drof_mesh_bugfix +4822938a0 Merge pull request #3880 from ESMCI/peano/zeus_update_port +929fd466e Merge pull request #3882 from billsacks/no_cdeps_libs_with_lilac +17a980fbf Merge pull request #3884 from ESMCI/mvertens/scol +47514026c Merge pull request #3861 from jasonb5/log_cprnc_tail + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/config_grids_mct.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/e3sm/config_files.xml +R076 config/xml_schemas/config_grids_v2.1.xsd config/xml_schemas/config_grids_v2.2.xsd +M scripts/Tools/Makefile +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_setup.py +M src/drivers/mct/main/cime_comp_mod.F90 +M tools/statistical_ensemble_test/pyCECT/docs/requirements.txt + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 3-12-2021 +Tag: cime5.8.38 +Answer Changes: bit-for-bit, climate-changing for trigrid +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + +User interface changes: + - Disable kokkos tests when building for E3SM. + - Remove references to CISM1-only grid for CESM. + - NAG port for nuopc. + - Generalize the file pattern match for cism.config files. + - Add header row and perf-archiving to memory profiling logs. + - Add run scripts to provenance capture. + - Make 64bit data default netcdf format. + - PIO2 update. + - Enable 2010 SST climatology for all variants of F-2010 compsets. + - Improve check_input_data tool. + - Fixes case where a skipped test would result in logfile_name being undefined. + - SCORPIO needs _PNETCDF. + - nuopc result is in drv.log not med.log. + - Adds support of DATM_MODE=CLM1PT for ELM. + - Introduction of MASK_GRID for CMEPS. + - Fixes cleaning up only on success. + - bless/compare_test_results should not process build-only tests. + - Minor changes to Makefile needed for izumi nag build to link to ESMF lib. + - Update driver-mct support for trigrid configuration where lnd is not on the atm grid. + - Make update success more robust. + - env_mach_specific needs to process all mpirun nodes from machobj. + - Add a new env_run XML value for E3SM: CASE_GROUP. + - Turn PIO_USE_MALLOC back on. + - Add machine frontera and fix run_exe setting issue. + - Remove collapse directives for Cray compiler. + - Fix some documentation of EXEROOT. + - Fix i4 in shr_reposum_mod and add comments. + +PR summary: git log --oneline --first-parent [previous_tag]..master +33f236c48 Merge pull request #3881 from ESMCI/jgfouca/disable_kokkos_tests +3321317ff Merge pull request #3785 from billsacks/remove_cism1 +d67b18ec7 Merge pull request #3878 from jedwards4b/nag_port +75e8f7af6 Merge pull request #3869 from billsacks/generalize_cism_config +fd30d9c1f Merge pull request #3875 from ESMCI/azamat/mprof/add-header-archiving +b164a7f5b Merge pull request #3873 from sarats/sarats/capture-runscripts-provenance +56c121ed8 Merge pull request #3870 from jedwards4b/make_64bit_data_default +162767bde Merge pull request #3859 from jedwards4b/pio2_update +5253df01c Merge pull request #3867 from ESMCI/wlin/F2010_sstdata +e76f2cef8 Merge pull request #3711 from ESMCI/wpcoomb/improve_check_input_data_tool +2b49cf54f Merge pull request #3860 from jasonb5/fix_compare_test_results_undefined +5382666a7 Merge pull request #3864 from ESMCI/jgfouca/fix_for_makefile +9fd7ee81c Merge pull request #3863 from jedwards4b/nuopc_log_fix +3f5a34720 Merge pull request #3845 from ESMCI/bishtgautam/user-defined-forcing-for-elm +2604dd663 Merge pull request #3841 from ESMCI/mvertens/bugfixes_auxcam +af03e9868 Merge pull request #3847 from jasonb5/fix_directory_removal +f90d23a50 Merge pull request #3855 from ESMCI/jgfouca/fix_test_results_for_B +dbf9f538a Merge pull request #3856 from fvitt/izumi_esmflib +55704c82a Merge pull request #3853 from ESMCI/jonbob/update-trigrid-support +78448a114 Merge pull request #3851 from ESMCI/jgfouca/update_success_robust +6f2688798 Merge pull request #3848 from ESMCI/jgfouca/fix_mpirun_env_mach_specific +743a2c967 Merge pull request #3844 from ESMCI/jgfouca/add_case_group +d088f9826 Merge pull request #3843 from ESMCI/jgfouca/e3sm_cime_temp +ef6b28c80 Merge pull request #3836 from jedwards4b/add_frontera_fix_run_exe_xml_issue +6335dd8b8 Merge pull request #3830 from ESMCI/azamat/mct-cime/rm-collapse-for-cray +664376d67 Merge pull request #3840 from billsacks/fix_doc_exeroot +0940e6a6d Merge pull request #3839 from ESMCI/worleyph/reprosum_i4_fix + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/config_grids_mct.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M doc/source/users_guide/running-a-case.rst +M scripts/Tools/Makefile +M scripts/create_newcase +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/SystemTests/nodefail.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/compare_test_results.py +M scripts/lib/CIME/provenance.py +A scripts/lib/CIME/tests/test_compare_test_results.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.kokkos +M src/components/data_comps_mct/datm/cime_config/buildnml +M src/components/data_comps_mct/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps_mct/docn/cime_config/config_component.xml +M src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/prep_rof_mod.F90 +M src/drivers/mct/main/seq_diag_mct.F90 +M src/drivers/mct/main/seq_frac_mct.F90 +M src/externals/mct/mct/m_AttrVect.F90 +M src/externals/pio2/.github/workflows/cmake.yml +A src/externals/pio2/.github/workflows/cmake_netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml +A src/externals/pio2/.github/workflows/netcdf-4.7.4_hdf5-1.10.7_pnetcdf-12.1_ncint_mpich-3.3_asan.yml +A src/externals/pio2/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_no_pnetcdf_ncint_mpich-3.3.yml +A src/externals/pio2/.github/workflows/netcdf-4.7.4_hdf5-1.12.0_pnetcdf-12.2_ncint_mpich-3.3_asan.yml +R100 src/externals/pio2/.github/workflows/a4.yml src/externals/pio2/.github/workflows/netcdf-4.7.4_pnetcdf-12.1_ncint_mpich-3.3.yml +R100 src/externals/pio2/.github/workflows/a3.yml src/externals/pio2/.github/workflows/netcdf-4.7.4_pnetcdf-12.1_ncint_openmpi_4.0.4.yml +R100 src/externals/pio2/.github/workflows/strict_autotools.yml src/externals/pio2/.github/workflows/strict_autotools_ubuntu_latest.yml +M src/externals/pio2/README.md +M src/externals/pio2/configure.ac +M src/externals/pio2/ctest/CTestScript-Test.cmake +A src/externals/pio2/doc/images/I_O_on_many_async_small.png +M src/externals/pio2/src/clib/pio.h +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_darray_int.c +M src/externals/pio2/src/clib/pio_msg.c +M src/externals/pio2/src/clib/pioc.c +M src/externals/pio2/src/clib/pioc_sc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/src/flib/Makefile.am +M src/externals/pio2/src/flib/piolib_mod.F90 +M src/externals/pio2/src/ncint/ncintdispatch.c +M src/externals/pio2/src/ncint/ncintdispatch.h +M src/externals/pio2/tests/cunit/CMakeLists.txt +M src/externals/pio2/tests/cunit/pio_tests.h +M src/externals/pio2/tests/cunit/run_tests.sh.in +M src/externals/pio2/tests/cunit/test_async_1d.c +M src/externals/pio2/tests/cunit/test_async_perf.c +M src/externals/pio2/tests/cunit/test_darray_multivar.c +M src/externals/pio2/tests/cunit/test_darray_multivar3.c +M src/externals/pio2/tests/cunit/test_perf2.c +M src/externals/pio2/tests/cunit/test_simple.c +M src/externals/pio2/tests/fncint/ftst_pio_orig.f90 +M src/externals/pio2/tests/ncint/pio_err_macros.h +M src/externals/pio2/tests/ncint/tst_var_compress.c +M src/externals/pio2/tests/unit/basic_tests.F90 +M src/externals/pio2/tests/unit/driver.F90 +M src/externals/pio2/tests/unit/ncdf_tests.F90 +M src/share/unit_test_stubs/pio/pio.F90.in +M src/share/util/shr_pio_mod.F90 +M src/share/util/shr_reprosum_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 1-27-2021 +Tag: cime5.8.37 +Answer Changes: Some fill_values and pgi on cheyenne +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - wait_for_tests: tweak cdash nml compare column. + - Pio bld fixes. + - pnetcdf detection correction. + - Added deleted f10_f10_musgs grid back into config_grids.xml. + - Changes needed to create fractions/masks at runtime with NUOPC. + - Better create_test/wait_for_tests integration. + - Add support for Chrysalis in saving provenance info. + - Remove unused logic. + - Update PIO2 to latest version and add testing support for PIO async tests. + - Update esmf to latest on cheyenne. + - Add pnetcdf module to centos7-linux build. + - Allow srt to skip some tests. + - Pass integer and not floating point for cpus-per-task. + - Need to get queue from case, not batch object. + - Update cheyenne esmf build. + - Seq io fix pio2. + - Add some SCREAM config dirs. + - Update to PyCECT 3.2.0. + - Update pgi and esmf on cheyenne. + - Add memory usage logging. + - Facilitate adding defaults for a namelist group conditionally. + - Add a mask. + - Check if TEST_ROOT exists earlier in cleanup from scripts_regression_tests. + - query_testlists: replace newlines in comments with spaces. + +User interface changes: + - Slight change to output of query_testlists + +PR summary: git log --oneline --first-parent [previous_tag]..master +d005fc583 Merge pull request #3829 from ESMCI/jgfouca/tweak_cdash_nml_output +1c949c056 Merge pull request #3837 from jedwards4b/pio_bld_fixes +738bd23c4 Merge pull request #3822 from jedwards4b/pio2_pnetcdf_detection +6df28a93b Merge pull request #3832 from ESMCI/mvertens/bugfix_f10maskusgs +79ad32f53 Merge pull request #3827 from ESMCI/mvertens/dynfrac +6834f9da8 Merge pull request #3824 from ESMCI/jgfouca/misc_cime_updates +10675d59e Merge pull request #3819 from ESMCI/worleyph/chrysalis_support +cc2fa1c85 Merge pull request #3809 from ESMCI/jgfouca/minor_makefile_cleanup +b71c1f341 Merge pull request #3817 from jedwards4b/pio_async +c0b4e0714 update esmf to latest on cheyenne +52e50f9f1 add pnetcdf module to centos7-linux build +c9c7f88e2 Merge pull request #3821 from jedwards4b/skip_tests +93243a4d8 Merge pull request #3820 from adityakavalur/patch-1 +7e9ee92fa need to get queue from case, not batch object +c6a0fe00e update cheyenne esmf build +d74ba19a7 Merge pull request #3811 from jedwards4b/seq_io_fix_pio2 +56a4ebb62 Merge pull request #3813 from ESMCI/jgfouca/add_scream_config_files +00a9e6205 Merge pull request #3812 from ESMCI/fischer/pycect_update +2818cf59c Merge pull request #3803 from jedwards4b/cheyenne_pgi +aecb9b1dc Merge pull request #3801 from ESMCI/azamat/driver/mem-profile-logging +ac45f7a7e Merge pull request #3804 from billsacks/nmlgen_skip_groups +6dc716233 Merge pull request #3800 from jedwards4b/add_a_mask +0d18eabfa Merge pull request #3798 from ESMCI/wpcoomb_fix_cleanup_in_regression_tests +22276aad9 Merge pull request #3797 from billsacks/no_newline_in_query_testlists + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M config/cesm/config_grids.xml +M config/cesm/machines/config_machines.xml +M config/e3sm/config_files.xml +M config/xml_schemas/config_grids_v2.1.xsd +M config/xml_schemas/config_grids_v2.xsd +M scripts/Tools/Makefile +M scripts/Tools/cime_bisect +M scripts/Tools/wait_for_tests +M scripts/create_test +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_run.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/XML/namelist_definition.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/compare_namelists.py +M scripts/lib/CIME/cs_status.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/test_status.py +M scripts/lib/CIME/test_utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/lib/get_tests.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.pio +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/config_component_e3sm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/seq_hist_mod.F90 +M src/drivers/mct/main/seq_io_mod.F90 +M src/drivers/mct/shr/seq_comm_mct.F90 +M src/externals/pio2/.gitignore +M src/externals/pio2/CMakeLists.txt +M src/externals/pio2/configure.ac +M src/externals/pio2/examples/CMakeLists.txt +M src/externals/pio2/examples/c/example1.c +M src/externals/pio2/examples/c/example2.c +M src/externals/pio2/examples/f03/CMakeLists.txt +A src/externals/pio2/examples/f03/exampleAsyncPio.F90 +M src/externals/pio2/examples/f03/examplePio.F90 +M src/externals/pio2/src/clib/pio.h +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_error.h +M src/externals/pio2/src/clib/pio_getput_int.c +M src/externals/pio2/src/clib/pio_internal.h +M src/externals/pio2/src/clib/pio_msg.c +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pio_rearrange.c +M src/externals/pio2/src/clib/pio_spmd.c +M src/externals/pio2/src/clib/pioc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/src/flib/ncint_mod.F90 +M src/externals/pio2/src/flib/pio.F90 +M src/externals/pio2/src/flib/pio_nf.F90 +M src/externals/pio2/src/flib/pio_types.F90 +M src/externals/pio2/src/flib/piolib_mod.F90 +M src/externals/pio2/src/flib/pionfget_mod.F90.in +M src/externals/pio2/tests/cunit/CMakeLists.txt +M src/externals/pio2/tests/cunit/test_async_1d.c +M src/externals/pio2/tests/cunit/test_async_3proc.c +M src/externals/pio2/tests/cunit/test_async_4proc.c +M src/externals/pio2/tests/cunit/test_async_manyproc.c +M src/externals/pio2/tests/cunit/test_async_multi2.c +M src/externals/pio2/tests/cunit/test_async_multicomp.c +M src/externals/pio2/tests/cunit/test_async_perf.c +M src/externals/pio2/tests/cunit/test_async_simple.c +M src/externals/pio2/tests/cunit/test_darray_2sync.c +M src/externals/pio2/tests/cunit/test_darray_async.c +A src/externals/pio2/tests/cunit/test_darray_async_from_comm.c +M src/externals/pio2/tests/cunit/test_darray_async_many.c +M src/externals/pio2/tests/cunit/test_darray_async_simple.c +M src/externals/pio2/tests/cunit/test_perf2.c +M src/externals/pio2/tests/cunit/test_rearr.c +M src/externals/pio2/tests/fncint/ftst_pio.f90 +M src/externals/pio2/tests/general/CMakeLists.txt +M src/externals/pio2/tests/general/Makefile.am +M src/externals/pio2/tests/general/ncdf_get_put.F90.in +A src/externals/pio2/tests/general/pio_decomp_tests_2d_async.F90.in +A src/externals/pio2/tests/general/pio_decomp_tests_2d_halo.F90.in +A src/externals/pio2/tests/general/pio_decomphalo_tests_2d.F90.in +A src/externals/pio2/tests/general/pio_iosystem_async_tests.F90.in +M src/externals/pio2/tests/general/run_tests.sh.in +M src/externals/pio2/tests/general/util/pio_tf_f90gen.pl +M src/externals/pio2/tests/general/util/pio_tutil.F90 +M src/externals/pio2/tests/ncint/pio_err_macros.h +M src/externals/pio2/tests/performance/pioperformance.F90 +M src/share/nuopc/seq_drydep_mod.F90 +M src/share/streams/shr_dmodel_mod.F90 +M src/share/streams/shr_strdata_mod.F90 +M src/share/unit_test_stubs/pio/pio.F90.in +M src/share/util/shr_pio_mod.F90 +A tools/statistical_ensemble_test/pyCECT/CHANGES.rst +M tools/statistical_ensemble_test/pyCECT/EET.py +M tools/statistical_ensemble_test/pyCECT/LICENSE.txt +A tools/statistical_ensemble_test/pyCECT/README.rst +D tools/statistical_ensemble_test/pyCECT/README_pyCECT.rst +D tools/statistical_ensemble_test/pyCECT/README_pyEnsSum.rst +D tools/statistical_ensemble_test/pyCECT/README_pyEnsSumPop.rst +D tools/statistical_ensemble_test/pyCECT/beta06_ens_excluded_varlist.json +D tools/statistical_ensemble_test/pyCECT/beta06_ens_excluded_varlist_a.json +A tools/statistical_ensemble_test/pyCECT/docs/Makefile +A tools/statistical_ensemble_test/pyCECT/docs/conf.py +A tools/statistical_ensemble_test/pyCECT/docs/index.rst +A tools/statistical_ensemble_test/pyCECT/docs/make.bat +A tools/statistical_ensemble_test/pyCECT/docs/requirements.txt +A tools/statistical_ensemble_test/pyCECT/docs/source/installation.rst +A tools/statistical_ensemble_test/pyCECT/docs/source/pyCECT.rst +A tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSum.rst +A tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSumPop.rst +A tools/statistical_ensemble_test/pyCECT/docs/source/readme.rst +R100 tools/statistical_ensemble_test/pyCECT/exclude_empty.json tools/statistical_ensemble_test/pyCECT/empty_excluded.json +D tools/statistical_ensemble_test/pyCECT/ens_excluded_varlist.json +D tools/statistical_ensemble_test/pyCECT/ens_sub.pbs +D tools/statistical_ensemble_test/pyCECT/ens_sub.sh +A tools/statistical_ensemble_test/pyCECT/excluded_varlist.json +D tools/statistical_ensemble_test/pyCECT/gm_cumul.sh +M tools/statistical_ensemble_test/pyCECT/pyCECT.py +M tools/statistical_ensemble_test/pyCECT/pyEnsLib.py +M tools/statistical_ensemble_test/pyCECT/pyEnsSum.py +M tools/statistical_ensemble_test/pyCECT/pyEnsSumPop.py +A tools/statistical_ensemble_test/pyCECT/pyPlots.py +A tools/statistical_ensemble_test/pyCECT/readthedocs.yml +M tools/statistical_ensemble_test/pyCECT/test_pop_CECT.sh +A tools/statistical_ensemble_test/pyCECT/test_pyEnsSum.sh +M tools/statistical_ensemble_test/pyCECT/test_pyEnsSumPop.sh +A tools/statistical_ensemble_test/pyCECT/test_uf_cam_ect.sh +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 12-11-2020 +Tag: cime5.8.36 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Add a useful check to wait_for_tests. + - Fix issue with time variable in ha files. + - Revert "Merge pull request #3788 from ESMCI/azamat/driver/mem-usage-logging." + - Add memory usage logging for memory profiling. + - Update OS process id error-checking in GPTL's get_memusage. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +a0fb01a33 Merge pull request #3796 from ESMCI/jgfouca/wait_for_tests_expect +7c9523ef3 Merge pull request #3794 from jedwards4b/cplhistavg_fix +c258cdf08 Merge pull request #3791 from ESMCI/jgfouca/revert_mem-usage-logging +9c64b5ac2 Merge pull request #3788 from ESMCI/azamat/driver/mem-usage-logging +5cef5558c Merge pull request #3781 from ESMCI/azamat/gptl/increase-pid-ceiling + +Modified files: git diff --name-status [previous_tag] +A .github/workflows/srt.yml +D .travis.yml +M ChangeLog +M config/cesm/config_archive.xml +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/config_grids_mct.xml +M config/cesm/config_grids_nuopc.xml +M config/cesm/config_inputdata.xml +M config/cesm/machines/Depends.intel +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M config/config_tests.xml +D config/e3sm/allactive/config_compsets.xml +D config/e3sm/allactive/config_pesall.xml +D config/e3sm/allactive/testlist_allactive.xml +D config/e3sm/config_archive.xml +M config/e3sm/config_files.xml +D config/e3sm/config_grids.xml +D config/e3sm/config_inputdata.xml +D config/e3sm/machines/Depends.cray +D config/e3sm/machines/Depends.cray.cmake +D config/e3sm/machines/Depends.gnu +D config/e3sm/machines/Depends.gnu.cmake +D config/e3sm/machines/Depends.ibm +D config/e3sm/machines/Depends.ibm.cmake +D config/e3sm/machines/Depends.intel +D config/e3sm/machines/Depends.intel.cmake +D config/e3sm/machines/Depends.nag +D config/e3sm/machines/Depends.nag.cmake +D config/e3sm/machines/Depends.summit.cmake +D config/e3sm/machines/Depends.summit.pgiacc +D config/e3sm/machines/Depends.summit.pgiacc.cmake +D config/e3sm/machines/Depends.summit.pgigpu +D config/e3sm/machines/Depends.summit.pgigpu.cmake +D config/e3sm/machines/Depends.summitdev.pgiacc +D config/e3sm/machines/Depends.summitdev.pgiacc.cmake +D config/e3sm/machines/README +D config/e3sm/machines/config_batch.xml +D config/e3sm/machines/config_compilers.xml +D config/e3sm/machines/config_machines.xml +D config/e3sm/machines/config_pio.xml +D config/e3sm/machines/config_workflow.xml +D config/e3sm/machines/syslog.anvil +D config/e3sm/machines/syslog.compy +D config/e3sm/machines/syslog.cori-haswell +D config/e3sm/machines/syslog.cori-knl +D config/e3sm/machines/syslog.noop +D config/e3sm/machines/syslog.summit +D config/e3sm/machines/syslog.theta +D config/e3sm/machines/syslog.titan +D config/e3sm/machines/template.case.run +D config/e3sm/machines/template.case.run.sh +D config/e3sm/machines/template.case.test +D config/e3sm/machines/template.ocn_diagnostics +D config/e3sm/machines/template.st_archive +D config/e3sm/machines/template.timeseries +D config/e3sm/machines/userdefined_laptop_template/README.md +D config/e3sm/machines/userdefined_laptop_template/config_compilers.xml +D config/e3sm/machines/userdefined_laptop_template/config_machines.xml +D config/e3sm/machines/userdefined_laptop_template/config_pes.xml +D config/e3sm/testmods_dirs/allactive/force_netcdf_pio/shell_commands +D config/e3sm/testmods_dirs/allactive/mach/pet/shell_commands +D config/e3sm/testmods_dirs/allactive/mach_mods/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgc/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_clm +D config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_mpaso +D config/e3sm/testmods_dirs/allactive/v1bgc_1850/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_clm +D config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_mpaso +D config/e3sm/testmods_dirs/allactive/v1bgceca/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_clm +D config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_mpaso +D config/e3sm/testmods_dirs/allactive/v1bgceca_1850/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_clm +D config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_mpaso +D config/e3sm/testmods_dirs/allactive/v1cmip6/README +D config/e3sm/testmods_dirs/allactive/v1cmip6/shell_commands +D config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_clm +D config/e3sm/testmods_dirs/bench/gmpas_noio/shell_commands +D config/e3sm/testmods_dirs/bench/gmpas_noio/user_nl_mpaso +D config/e3sm/testmods_dirs/bench/gmpas_noio/user_nl_mpassi +D config/e3sm/testmods_dirs/bench/wcycl/hires/shell_commands +D config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_cam +D config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_mpaso +D config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_mpassi +D config/e3sm/testmods_dirs/bench/wcycl/lores/shell_commands +D config/e3sm/testmods_dirs/bench/wcycl/lores/user_nl_cam +D config/e3sm/testmods_dirs/bench/wcycl/lores/user_nl_clm +D config/e3sm/tests.py +D config/e3sm/usermods_dirs/README +M config/ufs/config_files.xml +M config/ufs/config_grids.xml +M config/ufs/config_inputdata.xml +M config/ufs/machines/config_compilers.xml +M config/ufs/machines/config_machines.xml +M config/xml_schemas/config_compilers_v2.xsd +M config/xml_schemas/config_machines.xsd +M config/xml_schemas/entry_id_base.xsd +M config/xml_schemas/entry_id_base_version3.xsd +M config/xml_schemas/env_mach_specific.xsd +M config/xml_schemas/testlist.xsd +M scripts/Tools/Makefile +M scripts/Tools/case.build +D scripts/Tools/e3sm_cime_merge +D scripts/Tools/e3sm_cime_split +A scripts/Tools/e3sm_compile_wrap.py +M scripts/Tools/jenkins_generic_job +A scripts/Tools/mvsource +M scripts/Tools/preview_run +M scripts/Tools/xmlchange +M scripts/create_newcase +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/archive.py +M scripts/lib/CIME/XML/env_mach_pes.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/env_test.py +M scripts/lib/CIME/XML/env_workflow.py +M scripts/lib/CIME/XML/files.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/XML/pes.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/buildnml.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/simple_compare.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +D scripts/lib/e3sm_cime_mgmt.py +M scripts/lib/get_tests.py +M scripts/tests/CTestConfig.cmake +M scripts/tests/scripts_regression_tests.py +M scripts/tests/user_mods_test3/shell_commands +M src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.internal_components +M src/build_scripts/buildlib.pio +M src/build_scripts/buildlib_cmake.internal_components +M src/components/data_comps_mct/datm/cime_config/buildnml +M src/components/data_comps_mct/datm/cime_config/config_component.xml +M src/components/data_comps_mct/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps_mct/datm/src/datm_comp_mod.F90 +M src/components/data_comps_mct/datm/src/datm_shr_mod.F90 +M src/components/data_comps_mct/dlnd/cime_config/config_component.xml +M src/components/data_comps_mct/dlnd/cime_config/namelist_definition_dlnd.xml +M src/components/data_comps_mct/dlnd/src/dlnd_comp_mod.F90 +M src/components/data_comps_mct/dlnd/src/dlnd_shr_mod.F90 +M src/components/data_comps_mct/docn/cime_config/config_component.xml +M src/components/data_comps_mct/docn/src/docn_comp_mod.F90 +M src/components/data_comps_mct/docn/src/ocn_comp_mct.F90 +M src/components/data_comps_mct/drof/cime_config/config_component.xml +M src/components/data_comps_mct/drof/cime_config/namelist_definition_drof.xml +D src/components/data_comps_nuopc/datm/cime_config/buildlib +D src/components/data_comps_nuopc/datm/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/datm/cime_config/buildnml +D src/components/data_comps_nuopc/datm/cime_config/config_archive.xml +D src/components/data_comps_nuopc/datm/cime_config/config_component.xml +D src/components/data_comps_nuopc/datm/cime_config/namelist_definition_datm.xml +D src/components/data_comps_nuopc/datm/cime_config/user_nl_datm +D src/components/data_comps_nuopc/datm/src/atm_comp_nuopc.F90 +D src/components/data_comps_nuopc/dice/cime_config/buildlib +D src/components/data_comps_nuopc/dice/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/dice/cime_config/buildnml +D src/components/data_comps_nuopc/dice/cime_config/config_archive.xml +D src/components/data_comps_nuopc/dice/cime_config/config_component.xml +D src/components/data_comps_nuopc/dice/cime_config/namelist_definition_dice.xml +D src/components/data_comps_nuopc/dice/cime_config/user_nl_dice +D src/components/data_comps_nuopc/dice/src/dice_flux_atmice_mod.F90 +D src/components/data_comps_nuopc/dice/src/ice_comp_nuopc.F90 +D src/components/data_comps_nuopc/dlnd/cime_config/buildlib +D src/components/data_comps_nuopc/dlnd/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/dlnd/cime_config/buildnml +D src/components/data_comps_nuopc/dlnd/cime_config/config_archive.xml +D src/components/data_comps_nuopc/dlnd/cime_config/config_component.xml +D src/components/data_comps_nuopc/dlnd/cime_config/namelist_definition_dlnd.xml +D src/components/data_comps_nuopc/dlnd/cime_config/user_nl_dlnd +D src/components/data_comps_nuopc/dlnd/src/lnd_comp_nuopc.F90 +D src/components/data_comps_nuopc/docn/cime_config/buildlib +D src/components/data_comps_nuopc/docn/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/docn/cime_config/buildnml +D src/components/data_comps_nuopc/docn/cime_config/config_archive.xml +D src/components/data_comps_nuopc/docn/cime_config/config_component.xml +D src/components/data_comps_nuopc/docn/cime_config/namelist_definition_docn.xml +D src/components/data_comps_nuopc/docn/cime_config/user_nl_docn +D src/components/data_comps_nuopc/docn/src/ocn_comp_nuopc.F90 +D src/components/data_comps_nuopc/drof/cime_config/buildlib +D src/components/data_comps_nuopc/drof/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/drof/cime_config/buildnml +D src/components/data_comps_nuopc/drof/cime_config/config_archive.xml +D src/components/data_comps_nuopc/drof/cime_config/config_component.xml +D src/components/data_comps_nuopc/drof/cime_config/namelist_definition_drof.xml +D src/components/data_comps_nuopc/drof/cime_config/user_nl_drof +D src/components/data_comps_nuopc/drof/src/rof_comp_nuopc.F90 +D src/components/data_comps_nuopc/dshr_nuopc/dshr_dfield_mod.F90 +D src/components/data_comps_nuopc/dshr_nuopc/dshr_fldlist_mod.F90 +D src/components/data_comps_nuopc/dshr_nuopc/dshr_mod.F90 +D src/components/data_comps_nuopc/dwav/cime_config/buildlib +D src/components/data_comps_nuopc/dwav/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/dwav/cime_config/buildnml +D src/components/data_comps_nuopc/dwav/cime_config/config_archive.xml +D src/components/data_comps_nuopc/dwav/cime_config/config_component.xml +D src/components/data_comps_nuopc/dwav/cime_config/namelist_definition_dwav.xml +D src/components/data_comps_nuopc/dwav/cime_config/user_nl_dwav +D src/components/data_comps_nuopc/dwav/src/wav_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xshare/dead_nuopc_mod.F90 +M src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 +M src/drivers/mct/cime_config/buildlib_cmake +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/config_component_e3sm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/cime_driver.F90 +M src/drivers/mct/main/prep_rof_mod.F90 +M src/drivers/mct/main/seq_hist_mod.F90 +M src/drivers/mct/main/seq_io_mod.F90 +M src/drivers/mct/shr/seq_drydep_mod.F90 +M src/drivers/mct/shr/seq_flds_mod.F90 +M src/drivers/mct/shr/seq_infodata_mod.F90 +M src/externals/mct/configure +M src/externals/mct/mpeu/m_inpak90.F90 +M src/externals/mct/mpeu/m_ioutil.F90 +M src/externals/mct/mpi-serial/configure +A src/externals/pio2/.github/workflows/a3.yml +A src/externals/pio2/.github/workflows/a4.yml +A src/externals/pio2/.github/workflows/autotools.yml +A src/externals/pio2/.github/workflows/cmake.yml +A src/externals/pio2/.github/workflows/cmake_ncint.yml +A src/externals/pio2/.github/workflows/strict_autotools.yml +M src/externals/pio2/.gitignore +M src/externals/pio2/.travis.yml +M src/externals/pio2/CMakeLists.txt +M src/externals/pio2/Makefile.am +M src/externals/pio2/cmake/LibMPI.cmake +M src/externals/pio2/cmake_config.h.in +M src/externals/pio2/configure.ac +M src/externals/pio2/ctest/runcdash-nwscla-gnu.sh +M src/externals/pio2/ctest/runcdash-nwscla-intel.sh +M src/externals/pio2/ctest/runcdash-nwscla-pgi.sh +M src/externals/pio2/examples/basic/gdecomp_mod.F90 +M src/externals/pio2/examples/basic/testpio.F90 +M src/externals/pio2/examples/c/Makefile.am +M src/externals/pio2/examples/c/example2.c +R087 src/externals/pio2/examples/c/run_tests.sh src/externals/pio2/examples/c/run_tests.sh.in +M src/externals/pio2/examples/f03/CMakeLists.txt +M src/externals/pio2/examples/f03/Makefile.am +R099 src/externals/pio2/examples/f03/examplePio.f90 src/externals/pio2/examples/f03/examplePio.F90 +R091 src/externals/pio2/examples/f03/run_tests.sh src/externals/pio2/examples/f03/run_tests.sh.in +A src/externals/pio2/libpio.settings.in +M src/externals/pio2/scripts/genf90.pl +D src/externals/pio2/set_flags.am +M src/externals/pio2/src/CMakeLists.txt +M src/externals/pio2/src/clib/CMakeLists.txt +M src/externals/pio2/src/clib/Makefile.am +D src/externals/pio2/src/clib/bget.c +D src/externals/pio2/src/clib/bget.h +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_darray_int.c +A src/externals/pio2/src/clib/pio_error.c +M src/externals/pio2/src/clib/pio_error.h +M src/externals/pio2/src/clib/pio_file.c +M src/externals/pio2/src/clib/pio_getput_int.c +M src/externals/pio2/src/clib/pio_internal.h +A src/externals/pio2/src/clib/pio_meta.h.in +M src/externals/pio2/src/clib/pio_msg.c +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pio_nc4.c +M src/externals/pio2/src/clib/pio_rearrange.c +M src/externals/pio2/src/clib/pioc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/src/flib/CMakeLists.txt +M src/externals/pio2/src/flib/Makefile.am +M src/externals/pio2/src/flib/pio.F90 +M src/externals/pio2/src/flib/pio_kinds.F90 +M src/externals/pio2/src/flib/pio_nf.F90 +M src/externals/pio2/src/flib/pio_support.F90 +M src/externals/pio2/src/flib/pio_types.F90 +M src/externals/pio2/src/flib/piodarray.F90.in +M src/externals/pio2/src/flib/piolib_mod.F90 +M src/externals/pio2/src/flib/pionfatt_mod.F90.in +M src/externals/pio2/src/flib/pionfget_mod.F90.in +M src/externals/pio2/src/flib/pionfput_mod.F90.in +M src/externals/pio2/src/gptl/CMakeLists.txt +M src/externals/pio2/src/ncint/ncintdispatch.c +M src/externals/pio2/src/ncint/ncintdispatch.h +M src/externals/pio2/tests/CMakeLists.txt +M src/externals/pio2/tests/cunit/CMakeLists.txt +M src/externals/pio2/tests/cunit/Makefile.am +M src/externals/pio2/tests/cunit/pio_tests.h +R078 src/externals/pio2/tests/cunit/run_tests.sh src/externals/pio2/tests/cunit/run_tests.sh.in +M src/externals/pio2/tests/cunit/test_async_3proc.c +M src/externals/pio2/tests/cunit/test_async_4proc.c +M src/externals/pio2/tests/cunit/test_async_simple.c +M src/externals/pio2/tests/cunit/test_common.c +A src/externals/pio2/tests/cunit/test_darray_append.c +M src/externals/pio2/tests/cunit/test_darray_frame.c +M src/externals/pio2/tests/cunit/test_darray_multivar3.c +M src/externals/pio2/tests/cunit/test_iosystem3.c +M src/externals/pio2/tests/cunit/test_pioc.c +M src/externals/pio2/tests/cunit/test_pioc_fill.c +M src/externals/pio2/tests/cunit/test_pioc_putget.c +A src/externals/pio2/tests/cunit/test_simple.c +M src/externals/pio2/tests/fncint/Makefile.am +R090 src/externals/pio2/tests/fncint/run_tests.sh src/externals/pio2/tests/fncint/run_tests.sh.in +M src/externals/pio2/tests/general/CMakeLists.txt +M src/externals/pio2/tests/general/Makefile.am +M src/externals/pio2/tests/general/ncdf_fail.F90.in +M src/externals/pio2/tests/general/ncdf_get_put.F90.in +M src/externals/pio2/tests/general/ncdf_inq.F90.in +M src/externals/pio2/tests/general/ncdf_simple_tests.F90.in +M src/externals/pio2/tests/general/pio_decomp_fillval.F90.in +M src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in +M src/externals/pio2/tests/general/pio_decomp_tests.F90.in +M src/externals/pio2/tests/general/pio_decomp_tests_1d.F90.in +M src/externals/pio2/tests/general/pio_decomp_tests_2d.F90.in +M src/externals/pio2/tests/general/pio_decomp_tests_3d.F90.in +M src/externals/pio2/tests/general/pio_fail.F90.in +M src/externals/pio2/tests/general/pio_file_fail.F90.in +M src/externals/pio2/tests/general/pio_file_simple_tests.F90.in +M src/externals/pio2/tests/general/pio_init_finalize.F90.in +M src/externals/pio2/tests/general/pio_iosystem_tests.F90.in +M src/externals/pio2/tests/general/pio_iosystem_tests2.F90.in +M src/externals/pio2/tests/general/pio_iosystem_tests3.F90.in +M src/externals/pio2/tests/general/pio_rearr.F90.in +M src/externals/pio2/tests/general/pio_rearr_opts.F90.in +M src/externals/pio2/tests/general/pio_rearr_opts2.F90.in +R094 src/externals/pio2/tests/general/run_tests.sh src/externals/pio2/tests/general/run_tests.sh.in +M src/externals/pio2/tests/general/util/pio_tf_f90gen.pl +M src/externals/pio2/tests/general/util/pio_tutil.F90 +A src/externals/pio2/tests/ncint/CMakeLists.txt +M src/externals/pio2/tests/ncint/Makefile.am +R091 src/externals/pio2/tests/ncint/run_perf.sh src/externals/pio2/tests/ncint/run_perf.sh.in +R079 src/externals/pio2/tests/ncint/run_tests.sh src/externals/pio2/tests/ncint/run_tests.sh.in +M src/externals/pio2/tests/ncint/tst_ncint_async_perf.c +M src/externals/pio2/tests/ncint/tst_ncint_perf.c +A src/externals/pio2/tests/ncint/tst_var_compress.c +M src/externals/pio2/tests/performance/Makefile.am +M src/externals/pio2/tests/performance/pioperformance.F90 +R090 src/externals/pio2/tests/performance/run_tests.sh src/externals/pio2/tests/performance/run_tests.sh.in +M src/externals/pio2/tests/unit/CMakeLists.txt +M src/externals/pio2/tests/unit/Makefile.am +M src/externals/pio2/tests/unit/basic_tests.F90 +M src/externals/pio2/tests/unit/driver.F90 +A src/externals/pio2/tests/unit/ftst_vars_chunking.F90 +M src/externals/pio2/tests/unit/global_vars.F90 +M src/externals/pio2/tests/unit/ncdf_tests.F90 +R081 src/externals/pio2/tests/unit/run_tests.sh src/externals/pio2/tests/unit/run_tests.sh.in +M src/share/nuopc/seq_drydep_mod.F90 +M src/share/streams/shr_dmodel_mod.F90 +M src/share/streams/shr_strdata_mod.F90 +M src/share/streams/shr_stream_mod.F90 +D src/share/streams_nuopc/dshr_methods_mod.F90 +D src/share/streams_nuopc/dshr_strdata_mod.F90 +D src/share/streams_nuopc/dshr_stream_mod.F90 +D src/share/streams_nuopc/dshr_tInterp_mod.F90 +M src/share/timing/GPTLget_memusage.c +M src/share/timing/Makefile +M src/share/timing/perf_mod.F90 +M src/share/util/shr_cal_mod.F90 +M src/share/util/shr_kind_mod.F90 +M src/share/util/shr_pio_mod.F90 +M src/share/util/shr_scam_mod.F90 +M src/share/util/shr_string_mod.F90 +M tools/cprnc/CMakeLists.txt +M tools/cprnc/compare_vars_mod.F90.in +M tools/cprnc/run_tests +M tools/cprnc/test_inputs/README +A tools/cprnc/test_inputs/control_int.nc +A tools/cprnc/test_inputs/int_missing.nc +M tools/statistical_ensemble_test/ensemble.py +M tools/statistical_ensemble_test/single_run.py + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 12-7-2020 +Tag: cime5.8.35 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Make the default pio_version = 2. + - Changes for nuopc xcpl_comps needed for new ice sheets capability. + - Print TPUTCOMP difference to TestStatus.log. + - Correct typo in pgi module on cheyenne. + - Changes to implement ESMF aware threading in the nuopc driver. + - Add back pio version cpp. + - Fix pgi nuopc build issue. + - More e3sm build upgrades. + - Fix some build issues, for nuopc and cam_dycore. + - Build nuopc_cap with csm_share. + - Update seq_io_write_time to work for CPLHIST run. + - Fix python3 incompatibilities in case.py. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +184f5886f Merge pull request #3732 from jedwards4b/change_pio_default_version +bfe14e4aa Merge pull request #3787 from ESMCI/mvertens/icesheets +ca6712bb8 Merge pull request #3786 from billsacks/log_tputcomp +aff5b52e2 correct typo in pgi module on cheyenne +13996b82d Merge pull request #3784 from jedwards4b/nuopc_timing_cheyenne_esmf +1bcacca70 add back pio version cpp +073f83989 fix pgi nuopc build issue +55b6096ec Merge pull request #3782 from ESMCI/jgfouca/more_e3sm_build_upgrades +c06710482 Merge pull request #3779 from jedwards4b/fix_bld_issues +9db73e464 Merge pull request #3777 from jedwards4b/add_cap_share_to_csm +e23562c59 Merge pull request #3754 from ESMCI/jonbob/update-seq-io-write-time +1b7eb3e18 Merge pull request #3765 from johnsonb-ucar/fix_python3_incompatibility_in_case + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M config/xml_schemas/config_machines.xsd +M config/xml_schemas/env_mach_specific.xsd +M scripts/Tools/Makefile +M scripts/Tools/preview_run +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_mach_pes.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/provenance.py +M src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.pio +M src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/main/seq_hist_mod.F90 +M src/drivers/mct/main/seq_io_mod.F90 +A src/externals/pio2/.github/workflows/a3.yml +A src/externals/pio2/.github/workflows/a4.yml +M src/externals/pio2/.github/workflows/autotools.yml +M src/externals/pio2/.github/workflows/cmake.yml +A src/externals/pio2/.github/workflows/cmake_ncint.yml +M src/externals/pio2/.github/workflows/strict_autotools.yml +M src/externals/pio2/.travis.yml +M src/externals/pio2/CMakeLists.txt +M src/externals/pio2/Makefile.am +M src/externals/pio2/cmake/LibMPI.cmake +M src/externals/pio2/cmake_config.h.in +M src/externals/pio2/configure.ac +M src/externals/pio2/ctest/runcdash-nwscla-gnu.sh +M src/externals/pio2/ctest/runcdash-nwscla-intel.sh +M src/externals/pio2/ctest/runcdash-nwscla-pgi.sh +M src/externals/pio2/examples/basic/gdecomp_mod.F90 +M src/externals/pio2/examples/basic/testpio.F90 +M src/externals/pio2/examples/c/Makefile.am +R087 src/externals/pio2/examples/c/run_tests.sh src/externals/pio2/examples/c/run_tests.sh.in +M src/externals/pio2/examples/f03/CMakeLists.txt +M src/externals/pio2/examples/f03/Makefile.am +R099 src/externals/pio2/examples/f03/examplePio.f90 src/externals/pio2/examples/f03/examplePio.F90 +R091 src/externals/pio2/examples/f03/run_tests.sh src/externals/pio2/examples/f03/run_tests.sh.in +M src/externals/pio2/libpio.settings.in +D src/externals/pio2/set_flags.am +M src/externals/pio2/src/clib/CMakeLists.txt +M src/externals/pio2/src/clib/Makefile.am +D src/externals/pio2/src/clib/bget.c +D src/externals/pio2/src/clib/bget.h +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_darray_int.c +M src/externals/pio2/src/clib/pio_file.c +M src/externals/pio2/src/clib/pio_getput_int.c +M src/externals/pio2/src/clib/pio_internal.h +M src/externals/pio2/src/clib/pio_meta.h.in +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pio_nc4.c +M src/externals/pio2/src/clib/pio_rearrange.c +M src/externals/pio2/src/clib/pioc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/src/flib/CMakeLists.txt +M src/externals/pio2/src/flib/Makefile.am +M src/externals/pio2/src/flib/pio.F90 +M src/externals/pio2/src/flib/pio_kinds.F90 +M src/externals/pio2/src/flib/pio_nf.F90 +M src/externals/pio2/src/flib/pio_support.F90 +M src/externals/pio2/src/flib/pio_types.F90 +M src/externals/pio2/src/flib/piodarray.F90.in +M src/externals/pio2/src/flib/piolib_mod.F90 +M src/externals/pio2/src/flib/pionfatt_mod.F90.in +M src/externals/pio2/src/flib/pionfget_mod.F90.in +M src/externals/pio2/src/flib/pionfput_mod.F90.in +M src/externals/pio2/src/gptl/CMakeLists.txt +M src/externals/pio2/src/ncint/ncintdispatch.c +M src/externals/pio2/src/ncint/ncintdispatch.h +M src/externals/pio2/tests/CMakeLists.txt +M src/externals/pio2/tests/cunit/CMakeLists.txt +M src/externals/pio2/tests/cunit/Makefile.am +M src/externals/pio2/tests/cunit/pio_tests.h +R089 src/externals/pio2/tests/cunit/run_tests.sh src/externals/pio2/tests/cunit/run_tests.sh.in +M src/externals/pio2/tests/cunit/test_common.c +M src/externals/pio2/tests/cunit/test_darray_append.c +M src/externals/pio2/tests/cunit/test_darray_frame.c +M src/externals/pio2/tests/cunit/test_iosystem3.c +M src/externals/pio2/tests/cunit/test_pioc.c +A src/externals/pio2/tests/cunit/test_simple.c +M src/externals/pio2/tests/fncint/Makefile.am +R090 src/externals/pio2/tests/fncint/run_tests.sh src/externals/pio2/tests/fncint/run_tests.sh.in +M src/externals/pio2/tests/general/Makefile.am +M src/externals/pio2/tests/general/ncdf_fail.F90.in +M src/externals/pio2/tests/general/ncdf_get_put.F90.in +M src/externals/pio2/tests/general/ncdf_inq.F90.in +M src/externals/pio2/tests/general/ncdf_simple_tests.F90.in +M src/externals/pio2/tests/general/pio_decomp_fillval.F90.in +M src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in +M src/externals/pio2/tests/general/pio_decomp_tests.F90.in +M src/externals/pio2/tests/general/pio_decomp_tests_1d.F90.in +M src/externals/pio2/tests/general/pio_decomp_tests_2d.F90.in +M src/externals/pio2/tests/general/pio_decomp_tests_3d.F90.in +M src/externals/pio2/tests/general/pio_fail.F90.in +M src/externals/pio2/tests/general/pio_file_fail.F90.in +M src/externals/pio2/tests/general/pio_file_simple_tests.F90.in +M src/externals/pio2/tests/general/pio_init_finalize.F90.in +M src/externals/pio2/tests/general/pio_iosystem_tests.F90.in +M src/externals/pio2/tests/general/pio_iosystem_tests2.F90.in +M src/externals/pio2/tests/general/pio_iosystem_tests3.F90.in +M src/externals/pio2/tests/general/pio_rearr.F90.in +M src/externals/pio2/tests/general/pio_rearr_opts.F90.in +M src/externals/pio2/tests/general/pio_rearr_opts2.F90.in +R094 src/externals/pio2/tests/general/run_tests.sh src/externals/pio2/tests/general/run_tests.sh.in +M src/externals/pio2/tests/general/util/pio_tf_f90gen.pl +M src/externals/pio2/tests/general/util/pio_tutil.F90 +A src/externals/pio2/tests/ncint/CMakeLists.txt +M src/externals/pio2/tests/ncint/Makefile.am +R091 src/externals/pio2/tests/ncint/run_perf.sh src/externals/pio2/tests/ncint/run_perf.sh.in +R079 src/externals/pio2/tests/ncint/run_tests.sh src/externals/pio2/tests/ncint/run_tests.sh.in +M src/externals/pio2/tests/ncint/tst_ncint_async_perf.c +M src/externals/pio2/tests/ncint/tst_ncint_perf.c +A src/externals/pio2/tests/ncint/tst_var_compress.c +M src/externals/pio2/tests/performance/Makefile.am +M src/externals/pio2/tests/performance/pioperformance.F90 +R090 src/externals/pio2/tests/performance/run_tests.sh src/externals/pio2/tests/performance/run_tests.sh.in +M src/externals/pio2/tests/unit/CMakeLists.txt +M src/externals/pio2/tests/unit/Makefile.am +M src/externals/pio2/tests/unit/basic_tests.F90 +M src/externals/pio2/tests/unit/driver.F90 +A src/externals/pio2/tests/unit/ftst_vars_chunking.F90 +M src/externals/pio2/tests/unit/global_vars.F90 +M src/externals/pio2/tests/unit/ncdf_tests.F90 +R081 src/externals/pio2/tests/unit/run_tests.sh src/externals/pio2/tests/unit/run_tests.sh.in +M src/share/timing/perf_mod.F90 +M src/share/util/shr_pio_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 11-12-2020 +Tag: cime5.8.34 +Answer Changes: None +Tests: code-checker, ERP_Ln9.f09_f09_mg17.F2000climo.cheyenne_intel.cam-outfrq9s_mg3 + github actions, scripts_regression_tests.py, P_TestJenkinsGenericJob +Dependencies: + +Brief Summary: + - MPAS build fix for nag and pgi. + - Remove timing flags for e3sm build. + - New build wrapper for e3sm. + - More build updates for e3sm. + - Revert one e3sm-only line change for timing. + - Avoid building CDEPS with CTSM's LILAC. + - Add ability to add timings to all build operations for E3SM. + - Rename _separate_builds to _user_separate_builds. + - Support separate_builds for tests. + - Add an error check for separate builds. + - Merge branch 'master'. + - Merge branch 'master'. + - Fix merge issue. + - Remove the flags that fail the ERP test. + - Fix E3SM nightly TSC test failure. + - Revert "Use enhanced compiler flags for PUMAS/MG3 related F90 codes". + - Use enhanced compiler flags for PUMAS/MG3 related F90 codes. + - Create srt.yml. + - ESMF and pgi cheyenne update. + - Correctly attribute scorpio timers in cime_pre_init2. + - Fix test Y for nuopc driver. + - Add nuopc tests to cdash. + - Fix hang in TESTRUNDIFFRESUBMIT. + - Fix for pop ecosys rh files. + - Merge branch 'master'. + - Update ESMF on cheyenne. + - Change name ccsm cppdefs. + - Fixes Y_TestUserConcurrentMods by adding a more-robust waiting scheme. + - Add a no-submit option to jenkins_generic_job. + - In MEMLEAK checks, skip first day memory highwater while initializing. + - Some updates to E3SM test infrastructure. + - Add EAM case for 20TR CMIP6 docn component. + - Fix check_input_data to fail over to svn when a wget download fails. + - Change permissions on ref case rpointer files. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +726e53cf7 Merge pull request #3775 from ESMCI/fischer/MPAS_build +a64bdaef6 Merge pull request #3774 from ESMCI/jgfouca/remove_timing +f80d0d085 Merge pull request #3773 from ESMCI/jgfouca/e3sm_compile_wrap +e8bda07ed Merge pull request #3772 from ESMCI/jgfouca/more_build_updates +c23554bdd Revert one e3sm-only line change for timing +9d4b101a1 Merge pull request #3770 from billsacks/lilac_no_cdeps_v2 +7855ee406 Merge pull request #3767 from ESMCI/jgfouca/e3sm_build_timings +12d98a4a5 Merge pull request #3764 from ESMCI/jgfouca/another_build_fix + +5316eabdf Merge pull request #3761 from ESMCI/jgfouca/sep_builds_for_tests +c75b863a7 Merge pull request #3760 from ESMCI/jgfouca/add_error_check_sep_builds +798bea6b9 Merge branch 'master' of https://github.com/ESMCI/cime +775b67815 Merge branch 'master' of https://github.com/ESMCI/cime +0c0227c58 fix merge issue +3adf301d8 Merge pull request #3757 from sjsprecious/enhanced_compiler_flags +810e0e7a7 Merge pull request #3755 from ESMCI/mkstratos/fix_tsc_test +9df7abc1b Merge pull request #3753 from ESMCI/revert-3752-enhanced_compiler_flags +068b57aa4 Merge pull request #3752 from sjsprecious/enhanced_compiler_flags +d1fac4348 Merge pull request #3748 from ESMCI/add_github_action +10e20076d Merge pull request #3747 from ESMCI/esmf_and_pgi_cheyenne_update +7b9ba680a Merge pull request #3740 from ESMCI/worleyph/driver/fix-e3sm-issue-3886 +f7dfa69db Merge pull request #3746 from jedwards4b/nuopc_Y_testfix +0d621adf8 add nuopc tests to cdash +c892b0fec Merge pull request #3744 from ESMCI/jgfouca/fix_test_rundiff_resubmit_hang +634b33d3b fix for pop ecosys rh files +01b0e9f7b Merge branch 'master' of https://github.com/ESMCI/cime +4021365b2 update esmf on cheyenne +fb2a56cf5 Merge pull request #3737 from ESMCI/wpcoomb_change_name_CCSM_cppdefs +dffe4b939 Merge pull request #3739 from ESMCI/jgfouca/fix_current_mods_test +af7c1e726 Merge pull request #3738 from ESMCI/jgfouca/jenkins_no_submit +c97d17e47 Merge pull request #3736 from ESMCI/azamat/memleak-tests/skip-first-mem-highwater +83a941283 Merge pull request #3734 from ESMCI/jgfouca/update_e3sm_test_infra +83ed19271 Merge pull request #3729 from ESMCI/rljacob/docn/add-eam +64bdbbbdb Merge pull request #3730 from lukaszlacinski/lukaszlacinski/check_input_data +82ac63dc7 Merge pull request #3726 from jedwards4b/rpointer_perms + + +Modified files: git diff --name-status [previous_tag] +A .github/workflows/srt.yml +D .travis.yml +M ChangeLog +M config/cesm/config_archive.xml +M config/cesm/machines/Depends.intel +M config/cesm/machines/config_machines.xml +M config/config_tests.xml +M scripts/Tools/Makefile +M scripts/Tools/case.build +A scripts/Tools/e3sm_compile_wrap.py +M scripts/Tools/jenkins_generic_job +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/tests/CTestConfig.cmake +M scripts/tests/scripts_regression_tests.py +M scripts/tests/user_mods_test3/shell_commands +M src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.internal_components +M src/build_scripts/buildlib_cmake.internal_components +M src/components/data_comps_mct/docn/cime_config/config_component.xml +M src/drivers/mct/cime_config/buildlib_cmake +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/cime_driver.F90 + + +====================================================================== + + +====================================================================== + +Originator: Chris Fischer +Date: 11-2-2020 +Tag: cime5.8.33 +Answer Changes: None +Tests: code-checker, ERP_Ln9.f09_f09_mg17.F2000climo.cheyenne_intel.cam-outfrq9s_mg3 + github actions, scripts_regression_tests.py, P_TestJenkinsGenericJob +Dependencies: + +Brief Summary: + - Support separate_builds for tests. + - Add an error check for separate builds. + - Merge branch 'master'. + - Merge branch 'master'. + - Fix merge issue. + - Remove the flags that fail the ERP test. + - Fix E3SM nightly TSC test failure. + - Revert "Use enhanced compiler flags for PUMAS/MG3 related F90 codes". + - Use enhanced compiler flags for PUMAS/MG3 related F90 codes. + - Create srt.yml. + - ESMF and pgi cheyenne update. + - Correctly attribute scorpio timers in cime_pre_init2. + - Fix test Y for nuopc driver. + - Add nuopc tests to cdash. + - Fix hang in TESTRUNDIFFRESUBMIT. + - Fix for pop ecosys rh files. + - Merge branch 'master'. + - Update ESMF on cheyenne. + - Change name ccsm cppdefs. + - Fixes Y_TestUserConcurrentMods by adding a more-robust waiting scheme. + - Add a no-submit option to jenkins_generic_job. + - In MEMLEAK checks, skip first day memory highwater while initializing. + - Some updates to E3SM test infrastructure. + - Add EAM case for 20TR CMIP6 docn component. + - Fix check_input_data to fail over to svn when a wget download fails. + - Change permissions on ref case rpointer files. + - Add support for MPAS grids. + - Allow for different atm names in chem_mech file generation. + - Change name of cam to eam in E3SM. + - Add mpi_init timer and ouput memory usage before CIME Run loop. + - Fix path to FMS in CESM. + - Support csh source cmd. + - Skip preview namelist. + - Downgrade Intel Compiler for UFS applications. + - Add JRA Repeat Year Forcing data modes. + - Change name of clm to elm in E3SM. + - Update cmeps paths. + - Fix mct build issue. + - Fix path the cmeps buildexe. + - Fix path to cdeps buildlib for cesm. + - Update impi version for ufs model. + - Change name of clm to elm in E3SM. + - Change NCPL's for G-case default and grids EC30 to 60E2r2 and WC14 to 60E2r3. + - Sets coupling frequencies for new MPAS-Ocean grid in E3SM. + - Include submodule status in provenance. + - Cime changes required by the topounit inplementation in E3SM. + - Update for UFS app. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +5316eabdf Merge pull request #3761 from ESMCI/jgfouca/sep_builds_for_tests +c75b863a7 Merge pull request #3760 from ESMCI/jgfouca/add_error_check_sep_builds +798bea6b9 Merge branch 'master' of https://github.com/ESMCI/cime +775b67815 Merge branch 'master' of https://github.com/ESMCI/cime +0c0227c58 fix merge issue +3adf301d8 Merge pull request #3757 from sjsprecious/enhanced_compiler_flags +810e0e7a7 Merge pull request #3755 from ESMCI/mkstratos/fix_tsc_test +9df7abc1b Merge pull request #3753 from ESMCI/revert-3752-enhanced_compiler_flags +068b57aa4 Merge pull request #3752 from sjsprecious/enhanced_compiler_flags +d1fac4348 Merge pull request #3748 from ESMCI/add_github_action +10e20076d Merge pull request #3747 from ESMCI/esmf_and_pgi_cheyenne_update +7b9ba680a Merge pull request #3740 from ESMCI/worleyph/driver/fix-e3sm-issue-3886 +f7dfa69db Merge pull request #3746 from jedwards4b/nuopc_Y_testfix +0d621adf8 add nuopc tests to cdash +c892b0fec Merge pull request #3744 from ESMCI/jgfouca/fix_test_rundiff_resubmit_hang +634b33d3b fix for pop ecosys rh files +01b0e9f7b Merge branch 'master' of https://github.com/ESMCI/cime +4021365b2 update esmf on cheyenne +fb2a56cf5 Merge pull request #3737 from ESMCI/wpcoomb_change_name_CCSM_cppdefs +dffe4b939 Merge pull request #3739 from ESMCI/jgfouca/fix_current_mods_test +af7c1e726 Merge pull request #3738 from ESMCI/jgfouca/jenkins_no_submit +c97d17e47 Merge pull request #3736 from ESMCI/azamat/memleak-tests/skip-first-mem-highwater +83a941283 Merge pull request #3734 from ESMCI/jgfouca/update_e3sm_test_infra +83ed19271 Merge pull request #3729 from ESMCI/rljacob/docn/add-eam +64bdbbbdb Merge pull request #3730 from lukaszlacinski/lukaszlacinski/check_input_data +82ac63dc7 Merge pull request #3726 from jedwards4b/rpointer_perms +14e85613b Merge pull request #3727 from ESMCI/fischer/mpas_bld_grds +7188df18b Merge pull request #3725 from jedwards4b/fix_atm_chem_mech +516f7c275 Merge pull request #3723 from ESMCI/rljacob/e3smconfig/eam-name +10004fe33 Merge pull request #3716 from ESMCI/worleyph/driver/init_profiling2 +19b07d7ce fix path to FMS in CESM +19c9e54d0 Merge pull request #3718 from jedwards4b/support_csh_source_cmd +2e25aa97d Merge pull request #3719 from jedwards4b/skip_preview_namelist +098ac2ea7 Merge pull request #3721 from ESMCI/ufs_down_intel +24996612a Merge pull request #3691 from alperaltuntas/add_jra_ryf +2d0d0a9d1 Merge pull request #3714 from ESMCI/rljacob/e3smconfig/elm-name +329f400c1 update cmeps paths +91904cc35 fix mct build issue +f3aaa582f fix path to cmeps buildexe +99484d5f5 fix path to cdeps buildlib for cesm +f7791fbf3 update impi version for ufs model +4a3dccde4 Merge pull request #3703 from jedwards4b/s2sport +84df62862 Merge pull request #3701 from ESMCI/jonbob/add-new-ocnice-grids +e0dcc5902 Merge pull request #3694 from ESMCI/darincomeau/add_MPASO_grid_ECwISC30to60E1r2 +07b7d3bc5 Merge pull request #3696 from ESMCI/wpcoomb/include_submodule_status_in_provenance +edbd2d08a Merge branch 'cime_for_topounits2' (PR #3695) +35fb2d7e9 Merge pull request #3697 from ESMCI/ufs_fix + +Modified files: git diff --name-status [previous_tag] +A .github/workflows/srt.yml +D .travis.yml +M config/cesm/config_archive.xml +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/machines/Depends.intel +M config/cesm/machines/config_machines.xml +M config/config_tests.xml +M config/e3sm/config_files.xml +M config/ufs/config_files.xml +M config/ufs/config_grids.xml +M config/ufs/config_inputdata.xml +M config/ufs/machines/config_compilers.xml +M config/ufs/machines/config_machines.xml +M scripts/Tools/Makefile +M scripts/Tools/case.build +M scripts/Tools/jenkins_generic_job +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/XML/pes.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/tests/CTestConfig.cmake +M scripts/tests/scripts_regression_tests.py +M scripts/tests/user_mods_test3/shell_commands +M src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.internal_components +M src/build_scripts/buildlib_cmake.internal_components +M src/components/data_comps_mct/datm/cime_config/config_component.xml +M src/components/data_comps_mct/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps_mct/datm/src/datm_comp_mod.F90 +M src/components/data_comps_mct/datm/src/datm_shr_mod.F90 +M src/components/data_comps_mct/docn/cime_config/config_component.xml +M src/components/data_comps_mct/drof/cime_config/config_component.xml +M src/components/data_comps_mct/drof/cime_config/namelist_definition_drof.xml +M src/drivers/mct/cime_config/buildlib_cmake +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component_e3sm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/cime_driver.F90 +M src/drivers/mct/shr/seq_flds_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 8-28-2020 +Tag: cime5.8.32 +Answer Changes: None +Tests: scripts_regression_tests, create_test_cime_developer, + SMS_D_Ld5_Vnuopc.f10_f10_musgs.I2000Clm50BgcCropGs.cheyenne_intel.clm-default, + SMS.f19_g17.X.cheyenne_intel, cdeps_aux tests and testlist_drv.xml test +Dependencies: + +Brief Summary: + - Support concurrent env XML changes during case.run. + - Allow for a sh format file to be sourced and incorporated into the cime environment. + - Avoid artificial limit on string lengths in shr_string_listMerge. + - Don't try to set a variable from env_test.xml if it does not otherwise exist in the case. + - Fix logic in cdeps build. + - New cdeps stream schema and stream definition file. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +5ab9bf4bd Merge pull request #3693 from ESMCI/jgfouca/volatile_env +bf51eb598 Merge pull request #3689 from jedwards4b/add_source_file_feature +6e6bf6b23 Merge pull request #3686 from billsacks/listmerge_relax_limit +e996ec4f5 Merge pull request #3688 from jedwards4b/pfs_test_fix +503d6c568 Merge pull request #3685 from jedwards4b/cdeps_bld_logic_fix +ae676698c Merge pull request #3680 from ESMCI/mvertens/new_stream_schema + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/cesm/machines/config_machines.xml +M config/config_tests.xml +M config/xml_schemas/config_machines.xsd +M config/xml_schemas/env_mach_specific.xsd +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/env_test.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/nmlgen.py +M scripts/tests/scripts_regression_tests.py +M src/share/util/shr_string_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 8-20-2020 +Tag: cime5.8.31 +Answer Changes: None +Tests: scripts_regressio_tests +Dependencies: + +Brief Summary: + - Fix FV3 grid problem and by default end the GSWP3 forcing in 2013. + - CMEPS driver env var. + - Add flexibility in dlnd domain_fracname. + - Fix K_TestCimeCase scripts regression tests on Mappy. + - Add cuda entries. + - Remove fox build from makefile. + - Revert PR #3658. + - shell_commands useds before env_test.xml is written. + - Revert dlnd source code to how it was prior to PR 3619. + - Add a warning if the user attempts to change a value set by env_test.xml. + - Skip over namelist vars ending with "input_data" in check_input_data. + - CIME changes to support upcoming addition of MALI gis1to10km configuration. + - Changes needed to have cdeps 3d stream input working. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +d575f612e Merge pull request #3679 from ekluzek/fv3gridfix2013endGSWP3 +cb20f8725 Merge pull request #3666 from jedwards4b/cmeps_driver_env_var +bea8a7fcd Merge pull request #3678 from billsacks/dlnd_frac_more_flexible +5408413b9 Merge pull request #3675 from ESMCI/wpcoomb/fix_K_TestCimeCase_scripts_regression_tests_on_Mappy +4ebf75a1d Merge pull request #3677 from ESMCI/mrnorman/add-cuda-entries-xml +e673f457b Merge pull request #3674 from jedwards4b/remove_fox +43036f386 Merge pull request #3676 from ESMCI/jgfouca/revert_input_chg +a27d3abef shell_commands useds before env_test.xml is written +ce6fc2ba1 Merge pull request #3671 from billsacks/fix_dlnd_for_sno +eeb1e3d2c Merge pull request #3670 from jedwards4b/add_env_test_warning +de05b6aa8 Merge pull request #3665 from fvitt/data_path +896bdf552 Merge pull request #3660 from ESMCI/jonbob/add-gis1to10km +3a86badd6 Merge pull request #3659 from ESMCI/mvertens/cdeps3d + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M config/cesm/config_grids_mct.xml +M config/ufs/config_inputdata.xml +M config/ufs/machines/config_machines.xml +M config/xml_schemas/config_compilers_v2.xsd +M scripts/Tools/Makefile +M scripts/Tools/xmlchange +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/test_scheduler.py +M scripts/tests/scripts_regression_tests.py +M src/components/data_comps_mct/datm/cime_config/config_component.xml +M src/components/data_comps_mct/dlnd/cime_config/namelist_definition_dlnd.xml +M src/components/data_comps_mct/dlnd/src/dlnd_comp_mod.F90 +M src/components/data_comps_mct/dlnd/src/dlnd_shr_mod.F90 +M src/drivers/mct/cime_config/config_component.xml +M src/externals/mct/mpeu/m_inpak90.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 8-12-2020 +Tag: cime5.8.30 +Answer Changes: [None, Round Off, Climate Changing] +Tests: cprnc run_tests, scripts_regression_tests.py, ctsm aux_clm with nag +Dependencies: + +Brief Summary: + - Fix single-variable integer output in cprnc. + - Changes required for noaa hafs app. + - Improve check_input_data.py to use os.path.isfile(). + - Remove hard-wired nuopc mapping files except for required custom ones. + - Fix python 3 issue. + - Nag build fix. + - Remove the FoX library. It's now handled by CDEPS. + - Fix issue that the es3m config_files.xml points to several non-existent xml files. + - Reorder drof options. + - Update the pio2 external library. + - Add support to the data runoff model for E3SM Cryosphere. + - Fix G_TestBuildSystem test. + - Better logic when compset is tested in aux_ test. + - Another hotfix for e3sm. + - PET Test Baseline Generation and Comparison Improvement. + - Archive atm_chunk_costs with other performance data. + - Use SRCROOT instead of CIMEROOT in key places. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +899fcd90e Merge pull request #3663 from billsacks/cprnc_fix_int_singlevar +7e2655994 Merge pull request #3648 from jedwards4b/hafs_port +5176e710d Merge pull request #3658 from ESMCI/wpcoomb_check_input_data_improvements +1db9682bc Merge pull request #3652 from ESMCI/mvertens/remove_mapping_files +ef97cbed3 fix py3 issue +30c6b6af1 Merge pull request #3646 from jedwards4b/nag_build_fix +3298bb2a2 Merge pull request #3651 from jedwards4b/remove_fox +99867f17d Merge pull request #3650 from ESMCI/wpcoomb_query_config_components_improvements +923954285 Merge pull request #3644 from ESMCI/darincomeau/reorder_drof_options +5b34da18a Merge pull request #3643 from jedwards4b/pio2_update_0731 +22e2a2f00 Merge pull request #3641 from ESMCI/darincomeau/add_JRA_e3sm_cryo_compsets +0d9ae78ca Merge pull request #3642 from ESMCI/jgfouca/fix_g_build_test +092e50f51 Merge pull request #3639 from mnlevy1981/update_supported_test +3eca1452a Another hotfix for e3sm +3a765464b Merge pull request #3635 from ESMCI/PET_test_baseline_generation_and_comparison_improvement +562a9a489 Merge pull request #3636 from ESMCI/singhbalwinder/pat-worley-provenance-changes +dd45995ff Merge pull request #3638 from ESMCI/jgfouca/fix_jenkins_commit + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/config_grids_mct.xml +M config/cesm/config_grids_nuopc.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/e3sm/config_files.xml +M config/ufs/config_grids.xml +M config/ufs/machines/config_machines.xml +M scripts/Tools/Makefile +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/buildnml.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/tests/scripts_regression_tests.py +D src/build_scripts/buildlib.fox +M src/components/data_comps_mct/drof/cime_config/config_component.xml +M src/components/data_comps_mct/drof/cime_config/namelist_definition_drof.xml +M src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xshare/dead_nuopc_mod.F90 +M src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 +M src/externals/mct/mpeu/m_ioutil.F90 +A src/externals/pio2/.github/workflows/autotools.yml +A src/externals/pio2/.github/workflows/cmake.yml +A src/externals/pio2/.github/workflows/strict_autotools.yml +M src/externals/pio2/.gitignore +M src/externals/pio2/.travis.yml +M src/externals/pio2/CMakeLists.txt +M src/externals/pio2/Makefile.am +M src/externals/pio2/configure.ac +M src/externals/pio2/examples/c/example2.c +A src/externals/pio2/libpio.settings.in +M src/externals/pio2/src/CMakeLists.txt +M src/externals/pio2/src/clib/CMakeLists.txt +M src/externals/pio2/src/clib/Makefile.am +A src/externals/pio2/src/clib/pio_error.c +M src/externals/pio2/src/clib/pio_error.h +A src/externals/pio2/src/clib/pio_meta.h.in +M src/externals/pio2/src/clib/pio_msg.c +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/src/flib/CMakeLists.txt +M src/externals/pio2/src/flib/Makefile.am +M src/externals/pio2/src/flib/pio_types.F90 +M src/externals/pio2/src/flib/piolib_mod.F90 +M src/externals/pio2/src/flib/pionfput_mod.F90.in +M src/externals/pio2/tests/cunit/CMakeLists.txt +M src/externals/pio2/tests/cunit/Makefile.am +M src/externals/pio2/tests/cunit/run_tests.sh +M src/externals/pio2/tests/cunit/test_common.c +A src/externals/pio2/tests/cunit/test_darray_append.c +M src/externals/pio2/tests/cunit/test_darray_multivar3.c +M src/externals/pio2/tests/general/CMakeLists.txt +M tools/cprnc/compare_vars_mod.F90.in +M tools/cprnc/run_tests +M tools/cprnc/test_inputs/README +A tools/cprnc/test_inputs/control_int.nc +A tools/cprnc/test_inputs/int_missing.nc +M tools/statistical_ensemble_test/single_run.py + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 7-24-2020 +Tag: cime5.8.29 +Answer Changes: MOM6 +Tests: e3sm_land_developer, scripts_regression_tests, aux_cdeps +Dependencies: + +Brief Summary: + - Minor fix to 'wget' mode of downloading CESM input data. + - stampede2 and cori machine updates. + - Modifications in MCT to support MOSART stratification. + - Add default ATM_NCPL=72 for ne45 (E3SM). + - Remove E3SM CIME subtree managent tools and libraries. + - Emergency hotfix for e3sm. + - CIME changes to support SCREAM. + - Update TL319 -> tx0.1v3 ATM2OCN map. + - Fix build clean for e3sm. + - Make hist_utils a bit more robust to existing cprnc.out files. + - Pet test system tests common fix. + - Branch for acme split 2020-07-16 merge. + - Gptl threaded. + - Reapply PR #3591 + - Addition of new aux_cdeps tests to CDEPS. + - Make cprnc rpath system more robust. + - buildlib.cprnc make sure to call cprnc through wrapper. + - Branch for acme split 2020-07-07 merge. + - Refactor ensemble.py and single_run.py for python3. + - Dict has_key is removed in python3. + - Update TFREEZE_SALTWATER_OPTION value for MOM6. + - Set PIO_REARR_COMM_TYPE: coll for mpi-serial. + + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +8cda0e75b Merge pull request #3633 from ESMCI/bdobbins-wget-inputdata-bugfix +8ac220cfe Merge pull request #3634 from ESMCI/fischer/stampede2_cori_updates +9a4936785 Merge pull request #3627 from ESMCI/bishtgautam/mosart/stratification +7b238f284 Merge pull request #3628 from ESMCI/whannah/add_ne45_support +8e4329c1f Merge pull request #3632 from ESMCI/jgfouca/remove_e3sm_subtree_mgmt_code +a82674b68 Emergency hotfix for e3sm +e1ae39f25 Merge pull request #3626 from ESMCI/jgfouca/scream_cime +cf6c5c3df Merge pull request #3624 from mnlevy1981/update_highres_maps +df201d467 Merge pull request #3622 from ESMCI/jgfouca/fix_clean +0d2b31adf Merge pull request #3625 from ESMCI/jgfouca/fix_false_pass +893441098 Merge pull request #3620 from ESMCI/PET_test_system_tests_common_fix +283c01819 Merge pull request #3619 from ESMCI/jgfouca/branch-for-acme-split-2020-07-16 +543c3e5cc Merge pull request #3600 from jedwards4b/gptl_threaded +f2a94676b Merge pull request #3618 from jedwards4b/force_smp_fix_again +1b973968e Merge pull request #3614 from ESMCI/mvertens/inline_updates +30470cec1 Merge pull request #3616 from ESMCI/jgfouca/further_cprnc_build_improvements +8340214b2 Merge pull request #3611 from ESMCI/jgfouca/fix_cprnc_build +ca7d0bb34 Merge pull request #3610 from ESMCI/jgfouca/branch-for-acme-split-2020-07-07 +3a5df36cf Merge pull request #3609 from johnsonb-ucar/master +23c3f42d9 Merge pull request #3605 from billsacks/no_has_key +97cd72c38 Merge pull request #3603 from alperaltuntas/change_tfreeze_saltwater_option +366ec5c65 Merge pull request #3597 from jedwards4b/redo_mpi_serial_pio_comm_type + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/config_grids_mct.xml +M config/cesm/config_inputdata.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +D config/e3sm/allactive/config_compsets.xml +D config/e3sm/allactive/config_pesall.xml +D config/e3sm/allactive/testlist_allactive.xml +D config/e3sm/config_archive.xml +M config/e3sm/config_files.xml +D config/e3sm/config_grids.xml +D config/e3sm/config_inputdata.xml +D config/e3sm/machines/Depends.cray +D config/e3sm/machines/Depends.cray.cmake +D config/e3sm/machines/Depends.gnu +D config/e3sm/machines/Depends.gnu.cmake +D config/e3sm/machines/Depends.ibm +D config/e3sm/machines/Depends.ibm.cmake +D config/e3sm/machines/Depends.intel +D config/e3sm/machines/Depends.intel.cmake +D config/e3sm/machines/Depends.nag +D config/e3sm/machines/Depends.nag.cmake +D config/e3sm/machines/Depends.summit.cmake +D config/e3sm/machines/Depends.summit.pgiacc +D config/e3sm/machines/Depends.summit.pgiacc.cmake +D config/e3sm/machines/Depends.summit.pgigpu +D config/e3sm/machines/Depends.summit.pgigpu.cmake +D config/e3sm/machines/Depends.summitdev.pgiacc +D config/e3sm/machines/Depends.summitdev.pgiacc.cmake +D config/e3sm/machines/README +D config/e3sm/machines/config_batch.xml +D config/e3sm/machines/config_compilers.xml +D config/e3sm/machines/config_machines.xml +D config/e3sm/machines/config_pio.xml +D config/e3sm/machines/config_workflow.xml +D config/e3sm/machines/syslog.anvil +D config/e3sm/machines/syslog.compy +D config/e3sm/machines/syslog.cori-haswell +D config/e3sm/machines/syslog.cori-knl +D config/e3sm/machines/syslog.noop +D config/e3sm/machines/syslog.summit +D config/e3sm/machines/syslog.theta +D config/e3sm/machines/syslog.titan +D config/e3sm/machines/template.case.run +D config/e3sm/machines/template.case.run.sh +D config/e3sm/machines/template.case.test +D config/e3sm/machines/template.ocn_diagnostics +D config/e3sm/machines/template.st_archive +D config/e3sm/machines/template.timeseries +D config/e3sm/machines/userdefined_laptop_template/README.md +D config/e3sm/machines/userdefined_laptop_template/config_compilers.xml +D config/e3sm/machines/userdefined_laptop_template/config_machines.xml +D config/e3sm/machines/userdefined_laptop_template/config_pes.xml +D config/e3sm/testmods_dirs/allactive/force_netcdf_pio/shell_commands +D config/e3sm/testmods_dirs/allactive/mach/pet/shell_commands +D config/e3sm/testmods_dirs/allactive/mach_mods/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgc/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_clm +D config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_mpaso +D config/e3sm/testmods_dirs/allactive/v1bgc_1850/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_clm +D config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_mpaso +D config/e3sm/testmods_dirs/allactive/v1bgceca/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_clm +D config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_mpaso +D config/e3sm/testmods_dirs/allactive/v1bgceca_1850/shell_commands +D config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_clm +D config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_mpaso +D config/e3sm/testmods_dirs/allactive/v1cmip6/README +D config/e3sm/testmods_dirs/allactive/v1cmip6/shell_commands +D config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_cam +D config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_clm +D config/e3sm/testmods_dirs/bench/gmpas_noio/shell_commands +D config/e3sm/testmods_dirs/bench/gmpas_noio/user_nl_mpaso +D config/e3sm/testmods_dirs/bench/gmpas_noio/user_nl_mpassi +D config/e3sm/testmods_dirs/bench/wcycl/hires/shell_commands +D config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_cam +D config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_mpaso +D config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_mpassi +D config/e3sm/testmods_dirs/bench/wcycl/lores/shell_commands +D config/e3sm/testmods_dirs/bench/wcycl/lores/user_nl_cam +D config/e3sm/testmods_dirs/bench/wcycl/lores/user_nl_clm +D config/e3sm/tests.py +D config/e3sm/usermods_dirs/README +M config/xml_schemas/config_compilers_v2.xsd +M config/xml_schemas/testlist.xsd +M scripts/Tools/Makefile +D scripts/Tools/e3sm_cime_merge +D scripts/Tools/e3sm_cime_split +M scripts/Tools/jenkins_generic_job +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/simple_compare.py +M scripts/lib/CIME/test_scheduler.py +D scripts/lib/e3sm_cime_mgmt.py +M scripts/lib/get_tests.py +M scripts/tests/scripts_regression_tests.py +M src/components/data_comps_mct/datm/cime_config/config_component.xml +M src/components/data_comps_mct/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps_mct/dlnd/cime_config/config_component.xml +M src/components/data_comps_mct/dlnd/cime_config/namelist_definition_dlnd.xml +M src/components/data_comps_mct/dlnd/src/dlnd_comp_mod.F90 +M src/components/data_comps_mct/docn/cime_config/config_component.xml +M src/components/data_comps_mct/docn/src/docn_comp_mod.F90 +M src/components/data_comps_mct/docn/src/ocn_comp_mct.F90 +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/config_component_e3sm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/prep_rof_mod.F90 +M src/drivers/mct/shr/seq_flds_mod.F90 +M src/drivers/mct/shr/seq_infodata_mod.F90 +M src/externals/mct/configure +M src/externals/mct/mpi-serial/configure +M src/share/streams/shr_dmodel_mod.F90 +M src/share/streams/shr_strdata_mod.F90 +M src/share/timing/Makefile +M src/share/util/shr_pio_mod.F90 +M src/share/util/shr_scam_mod.F90 +M tools/cprnc/CMakeLists.txt +M tools/statistical_ensemble_test/ensemble.py +M tools/statistical_ensemble_test/single_run.py + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 6-30-2020 +Tag: cime5.8.28 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Update some domain and mapping files. + - Improve error message for create newcase test option. + - Clean up screen output & logging in check_input_data. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +a2de0598c Merge pull request #3602 from ESMCI/fischer/mapping_file_update +01c50ea71 Merge pull request #3599 from ESMCI/wpcoomb/improve_error_message_for_create_newcase_test_option +87ab750cd Merge pull request #3589 from billsacks/check_input_data_logging + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/config_grids_mct.xml +M scripts/create_newcase +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/test_scheduler.py + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 6-23-2020 +Tag: cime5.8.27 +Answer Changes: [None, Round Off, Climate Changing] +Tests: SMS.ne30pg3_ne30pg3_mg17.A.cheyenne_intel + scripts_resgression_tests + SMS_D_Ld3.f45_g37_rx1.A.izumi_nag +Dependencies: + +Brief Summary: + - Rename ne30pg3 grid name to ne30np4.pg3 + - ERA5 data stream related changes. + - Fix link order for cdeps. + - Give the file name that results in the "empty read-only file" error. + - Uncomment error check. + - Use pio1 even for mpi-serial cases in CESM. + - Fix print format issue and turn off debug flags. + - Update paths for cdeps. + - Merge branch 'master' of https://github.com/ESMCI/cime. + - Minor adjust to cdeps link step. + - Add cmake to cheyenne modules, point to CDEPS buildlib. + - Updates for cdeps streams + - Remove nuopc data models to new repo CDEPS. + - Fixed and cleaned-up scripts_regression_tests.py test Q_TestBlessTestResults with CIME_MODEL = cesm. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +b3fb69d87 Merge pull request #3587 from ESMCI/fischer/rename_ne30pg3_grid +d1058c937 Merge pull request #3586 from ESMCI/add_era5_cime_only +231c70059 fix link order for cdeps +bdef181e4 Merge pull request #3585 from billsacks/improve_error_message +4427dd051 uncomment error check +86509cc45 Merge pull request #3583 from billsacks/use_pio1 +4412c39b2 Merge pull request #3582 from jedwards4b/nag_fixes +8a4859e5a update paths for cdeps +40b2eb7b3 Merge branch 'master' of https://github.com/ESMCI/cime +87951ae07 minor adjust to CDEPS link step +a740bedfd add cmake to cheyenne modules, point to CDEPS buildlib +d10f946d6 updates for cdeps streams +74e537532 Merge pull request #3578 from jedwards4b/remove_nuopc_data_models +9052cc17f Merge pull request #3579 from ESMCI/wpcoomb/fix_scripts_regression_tests.py_test_Q_TestBlessTestResults + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/config_grids_mct.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M config/ufs/config_files.xml +M config/ufs/config_grids.xml +M config/ufs/machines/config_machines.xml +M config/xml_schemas/entry_id_base.xsd +M config/xml_schemas/entry_id_base_version3.xsd +M scripts/Tools/Makefile +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/archive.py +M scripts/lib/CIME/XML/env_workflow.py +M scripts/lib/CIME/XML/files.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/nmlgen.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.internal_components +D src/components/data_comps_nuopc/datm/cime_config/buildlib +D src/components/data_comps_nuopc/datm/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/datm/cime_config/buildnml +D src/components/data_comps_nuopc/datm/cime_config/config_archive.xml +D src/components/data_comps_nuopc/datm/cime_config/config_component.xml +D src/components/data_comps_nuopc/datm/cime_config/namelist_definition_datm.xml +D src/components/data_comps_nuopc/datm/cime_config/user_nl_datm +D src/components/data_comps_nuopc/datm/src/atm_comp_nuopc.F90 +D src/components/data_comps_nuopc/datm/src/datm_datamode_clmncep_mod.F90 +D src/components/data_comps_nuopc/datm/src/datm_datamode_core2_mod.F90 +D src/components/data_comps_nuopc/datm/src/datm_datamode_jra_mod.F90 +D src/components/data_comps_nuopc/dice/cime_config/buildlib +D src/components/data_comps_nuopc/dice/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/dice/cime_config/buildnml +D src/components/data_comps_nuopc/dice/cime_config/config_archive.xml +D src/components/data_comps_nuopc/dice/cime_config/config_component.xml +D src/components/data_comps_nuopc/dice/cime_config/namelist_definition_dice.xml +D src/components/data_comps_nuopc/dice/cime_config/user_nl_dice +D src/components/data_comps_nuopc/dice/src/dice_datamode_ssmi_mod.F90 +D src/components/data_comps_nuopc/dice/src/dice_flux_atmice_mod.F90 +D src/components/data_comps_nuopc/dice/src/ice_comp_nuopc.F90 +D src/components/data_comps_nuopc/dlnd/cime_config/buildlib +D src/components/data_comps_nuopc/dlnd/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/dlnd/cime_config/buildnml +D src/components/data_comps_nuopc/dlnd/cime_config/config_archive.xml +D src/components/data_comps_nuopc/dlnd/cime_config/config_component.xml +D src/components/data_comps_nuopc/dlnd/cime_config/namelist_definition_dlnd.xml +D src/components/data_comps_nuopc/dlnd/cime_config/user_nl_dlnd +D src/components/data_comps_nuopc/dlnd/src/lnd_comp_nuopc.F90 +D src/components/data_comps_nuopc/docn/cime_config/buildlib +D src/components/data_comps_nuopc/docn/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/docn/cime_config/buildnml +D src/components/data_comps_nuopc/docn/cime_config/config_archive.xml +D src/components/data_comps_nuopc/docn/cime_config/config_component.xml +D src/components/data_comps_nuopc/docn/cime_config/namelist_definition_docn.xml +D src/components/data_comps_nuopc/docn/cime_config/user_nl_docn +D src/components/data_comps_nuopc/docn/src/docn_datamode_aquaplanet_mod.F90 +D src/components/data_comps_nuopc/docn/src/docn_datamode_copyall_mod.F90 +D src/components/data_comps_nuopc/docn/src/docn_datamode_iaf_mod.F90 +D src/components/data_comps_nuopc/docn/src/docn_datamode_som_mod.F90 +D src/components/data_comps_nuopc/docn/src/docn_set_ofrac_mod.F90 +D src/components/data_comps_nuopc/docn/src/ocn_comp_nuopc.F90 +D src/components/data_comps_nuopc/drof/cime_config/buildlib +D src/components/data_comps_nuopc/drof/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/drof/cime_config/buildnml +D src/components/data_comps_nuopc/drof/cime_config/config_archive.xml +D src/components/data_comps_nuopc/drof/cime_config/config_component.xml +D src/components/data_comps_nuopc/drof/cime_config/namelist_definition_drof.xml +D src/components/data_comps_nuopc/drof/cime_config/user_nl_drof +D src/components/data_comps_nuopc/drof/src/rof_comp_nuopc.F90 +D src/components/data_comps_nuopc/dshr_nuopc/dshr_dfield_mod.F90 +D src/components/data_comps_nuopc/dshr_nuopc/dshr_fldlist_mod.F90 +D src/components/data_comps_nuopc/dshr_nuopc/dshr_mod.F90 +D src/components/data_comps_nuopc/dwav/cime_config/buildlib +D src/components/data_comps_nuopc/dwav/cime_config/buildlib_cmake +D src/components/data_comps_nuopc/dwav/cime_config/buildnml +D src/components/data_comps_nuopc/dwav/cime_config/config_archive.xml +D src/components/data_comps_nuopc/dwav/cime_config/config_component.xml +D src/components/data_comps_nuopc/dwav/cime_config/namelist_definition_dwav.xml +D src/components/data_comps_nuopc/dwav/cime_config/user_nl_dwav +D src/components/data_comps_nuopc/dwav/src/wav_comp_nuopc.F90 +M src/share/streams/shr_strdata_mod.F90 +M src/share/streams/shr_stream_mod.F90 +D src/share/streams_nuopc/dshr_methods_mod.F90 +D src/share/streams_nuopc/dshr_strdata_mod.F90 +D src/share/streams_nuopc/dshr_stream_mod.F90 +D src/share/streams_nuopc/dshr_tInterp_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 6-16-2020 +Tag: cime5.8.26 +Answer Changes: Climate Changin] +Tests: hand tested with ERS_Vnuopc_D.f09_g17.A.cheyenne_intel modified for era5 + ../src/drivers/nuopc/cime_config/testdefs/testlist_drv.xml (cheyenne, intel) + scripts_regression_tests (with PIO_VERSION=2) +Dependencies: + +Brief Summary: + - One line bug fix for regional amazon grid and nuopc rpointer name. + - scorpio (e3sm pio2 version) does not support the PIO_SHORT type added to the pio2 library. + - Add mvsource script. + - Add support for compressed ERA5 data streams. + - Major refactor of nuopc datamodel caps and share code. + - Changes to add NCAR's Casper/DAV systems to the machine lists. + - Updates to dry deposition data. + - Updates to the latest pio2 code. + - Allow a user supplied config_grids.xml file. + - Pleiades machines updates. + - Improved error message for incorrect setting of cime model. + +User interface changes: + - For all CIME data models. + +PR summary: git log --oneline --first-parent [previous_tag]..master +c09bafe53 minor one line bug fixes for regional amazon grid and nuopc rpointer name +cd1238247 Merge pull request #3575 from jedwards4b/fix_scorpio_bld +50c768dec Merge pull request #3543 from jedwards4b/movesrc +9e31c070f Merge pull request #3573 from jedwards4b/add_era5_support_nuopc_data_models +dab733cba Merge pull request #3572 from ESMCI/esmf_datamodels +bb4932212 Merge pull request #3548 from gdicker1/add_DAV +72a32ce65 Merge pull request #3557 from fvitt/drydep_cime5.8.23 +0c7d278c1 Merge pull request #3568 from jedwards4b/pio2_update +e2dbd62e6 Merge pull request #3565 from jedwards4b/user_mods_config_grids2 +84391ac37 Merge pull request #3546 from fvitt/pleiades_cime5.8.23 +31a009936 Merge pull request #3567 from ESMCI/wpcoomb/Improved_error_message_for_incorrect_setting_of_CIME_MODEL + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M scripts/Tools/Makefile +A scripts/Tools/mvsource +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/buildnml.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/utils.py +A src/build_scripts/buildlib.fox +M src/build_scripts/buildlib.pio +M src/components/data_comps_mct/datm/cime_config/buildnml +M src/components/data_comps_nuopc/datm/cime_config/buildnml +M src/components/data_comps_nuopc/datm/cime_config/config_component.xml +M src/components/data_comps_nuopc/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps_nuopc/datm/src/atm_comp_nuopc.F90 +A src/components/data_comps_nuopc/datm/src/datm_datamode_clmncep_mod.F90 +A src/components/data_comps_nuopc/datm/src/datm_datamode_core2_mod.F90 +A src/components/data_comps_nuopc/datm/src/datm_datamode_jra_mod.F90 +M src/components/data_comps_nuopc/dice/cime_config/buildnml +M src/components/data_comps_nuopc/dice/cime_config/config_component.xml +M src/components/data_comps_nuopc/dice/cime_config/namelist_definition_dice.xml +A src/components/data_comps_nuopc/dice/src/dice_datamode_ssmi_mod.F90 +M src/components/data_comps_nuopc/dice/src/ice_comp_nuopc.F90 +M src/components/data_comps_nuopc/dlnd/cime_config/buildnml +M src/components/data_comps_nuopc/dlnd/cime_config/config_component.xml +M src/components/data_comps_nuopc/dlnd/cime_config/namelist_definition_dlnd.xml +M src/components/data_comps_nuopc/dlnd/src/lnd_comp_nuopc.F90 +M src/components/data_comps_nuopc/docn/cime_config/buildnml +M src/components/data_comps_nuopc/docn/cime_config/config_component.xml +M src/components/data_comps_nuopc/docn/cime_config/namelist_definition_docn.xml +A src/components/data_comps_nuopc/docn/src/docn_datamode_aquaplanet_mod.F90 +A src/components/data_comps_nuopc/docn/src/docn_datamode_copyall_mod.F90 +A src/components/data_comps_nuopc/docn/src/docn_datamode_iaf_mod.F90 +A src/components/data_comps_nuopc/docn/src/docn_datamode_som_mod.F90 +A src/components/data_comps_nuopc/docn/src/docn_set_ofrac_mod.F90 +M src/components/data_comps_nuopc/docn/src/ocn_comp_nuopc.F90 +M src/components/data_comps_nuopc/drof/cime_config/buildnml +M src/components/data_comps_nuopc/drof/cime_config/config_component.xml +M src/components/data_comps_nuopc/drof/cime_config/namelist_definition_drof.xml +M src/components/data_comps_nuopc/drof/src/rof_comp_nuopc.F90 +M src/components/data_comps_nuopc/dshr_nuopc/dshr_dfield_mod.F90 +M src/components/data_comps_nuopc/dshr_nuopc/dshr_fldlist_mod.F90 +M src/components/data_comps_nuopc/dshr_nuopc/dshr_mod.F90 +M src/components/data_comps_nuopc/dwav/cime_config/buildnml +M src/components/data_comps_nuopc/dwav/cime_config/config_component.xml +M src/components/data_comps_nuopc/dwav/cime_config/namelist_definition_dwav.xml +M src/components/data_comps_nuopc/dwav/src/wav_comp_nuopc.F90 +M src/drivers/mct/shr/seq_drydep_mod.F90 +M src/externals/pio2/scripts/genf90.pl +M src/externals/pio2/set_flags.am +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pio_nc4.c +M src/externals/pio2/src/flib/CMakeLists.txt +M src/externals/pio2/src/flib/Makefile.am +M src/externals/pio2/src/flib/pio.F90 +M src/externals/pio2/src/flib/pio_kinds.F90 +M src/externals/pio2/src/flib/pio_types.F90 +M src/externals/pio2/src/flib/piodarray.F90.in +M src/externals/pio2/src/flib/pionfatt_mod.F90.in +M src/externals/pio2/src/flib/pionfget_mod.F90.in +M src/externals/pio2/src/flib/pionfput_mod.F90.in +M src/externals/pio2/src/ncint/ncintdispatch.c +M src/externals/pio2/src/ncint/ncintdispatch.h +M src/externals/pio2/tests/cunit/test_async_3proc.c +M src/externals/pio2/tests/cunit/test_async_4proc.c +M src/externals/pio2/tests/cunit/test_async_simple.c +M src/externals/pio2/tests/cunit/test_pioc.c +M src/externals/pio2/tests/cunit/test_pioc_fill.c +M src/externals/pio2/tests/cunit/test_pioc_putget.c +M src/externals/pio2/tests/general/util/pio_tf_f90gen.pl +M src/externals/pio2/tests/general/util/pio_tutil.F90 +M src/externals/pio2/tests/unit/ncdf_tests.F90 +M src/share/nuopc/seq_drydep_mod.F90 +M src/share/streams/shr_strdata_mod.F90 +M src/share/streams/shr_stream_mod.F90 +M src/share/streams_nuopc/dshr_methods_mod.F90 +M src/share/streams_nuopc/dshr_strdata_mod.F90 +M src/share/streams_nuopc/dshr_stream_mod.F90 +M src/share/streams_nuopc/dshr_tInterp_mod.F90 +M src/share/util/shr_cal_mod.F90 +M src/share/util/shr_kind_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 6-8-2020 +Tag: cime5.8.25 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Change grid alias for artic var-res grids. + - Couples HYCOM with data atmosphere for UFS HAFS application. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +e8dfe9356 Merge pull request #3566 from ESMCI/fischer/var_res_grids +3d03e31f2 Merge pull request #3556 from ESMCI/ufs-hafs-app_updated_v2 + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M config/ufs/config_files.xml +M config/ufs/config_grids.xml +M config/ufs/machines/config_compilers.xml +M config/ufs/machines/config_machines.xml +M scripts/lib/CIME/XML/files.py +M scripts/lib/CIME/case/case.py +M src/components/data_comps_nuopc/datm/src/atm_comp_nuopc.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 6-3-2020 +Tag: cime5.8.24 +Answer Changes: Round Off on izumi +Tests: scripts_regression_tests, izumi tests +Dependencies: + +Brief Summary: + - Machine configuration updates for izumi. + - Add Makefile target to write various flags. + - Improve PET test baseline generation and comparison. + - Allow gptl build to recognize ESMF_LIBDIR. + - When writing .env_mach_specific files, only write settings for main job. + - Start adding option for MizuRoute. + - Merge ACME branch 2020-05-15. + - Allow compiler to modify MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE. + - Improve cime buildlib function debug logging. + - Fix cime_model to stdout issue. + - Extend allowed string length. + - Fix div0s in cprnc comparisons. + - Change type to double for derived vars in cprnc. + - Fix typo. + +User interface changes: + - New component added for cesm. + +PR summary: git log --oneline --first-parent [previous_tag]..master +837ffbf25 Merge pull request #3560 from ESMCI/fischer/izumi_update +99a0b1f96 Merge pull request #3558 from billsacks/makefile_rule_write_link_flags +cacf02562 Merge pull request #3434 from ESMCI/wpcoomb/improve_PETtest_baseline_generation_and_comparison +2547d0d11 Merge pull request #3551 from billsacks/gptl_esmflibdir +27021a67d Merge pull request #3555 from billsacks/env_mach_specific_main_job +1b049969f Merge pull request #3529 from ekluzek/mizuRoute +b847e9fb1 Merge pull request #3538 from ESMCI/jgfouca/branch-for-acme-split-2020-05-15 +64e9f9202 Merge pull request #3545 from jedwards4b/modify_max_with_compiler +588039c8e Merge pull request #3539 from ESMCI/wpcoomb/improve_cime_buildlib_function +50a0edc49 Merge pull request #3536 from jedwards4b/cime_model_fix +29508e9d6 Merge pull request #3535 from jedwards4b/cprnc_improvement +9b650d311 Merge pull request #3530 from rgknox/fix-div0-compare_vars +f914cb653 Merge pull request #3531 from jedwards4b/cprnc_improvement +34c9b8f19 Merge pull request #3524 from christophergeiger3/patch-1 + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/machines/config_machines.xml +M config/config_tests.xml +M config/e3sm/allactive/config_compsets.xml +M config/e3sm/allactive/config_pesall.xml +M config/e3sm/config_grids.xml +M config/e3sm/machines/Depends.summit.cmake +A config/e3sm/machines/Depends.summit.pgigpu +A config/e3sm/machines/Depends.summit.pgigpu.cmake +M config/e3sm/machines/config_batch.xml +M config/e3sm/machines/config_compilers.xml +M config/e3sm/machines/config_machines.xml +M config/e3sm/machines/config_pio.xml +M config/e3sm/machines/template.case.run.sh +A config/e3sm/testmods_dirs/allactive/v1bgc/shell_commands +A config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_cam +A config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_clm +A config/e3sm/testmods_dirs/allactive/v1bgc/user_nl_mpaso +A config/e3sm/testmods_dirs/allactive/v1bgc_1850/shell_commands +A config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_cam +A config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_clm +A config/e3sm/testmods_dirs/allactive/v1bgc_1850/user_nl_mpaso +A config/e3sm/testmods_dirs/allactive/v1bgceca/shell_commands +A config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_cam +A config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_clm +A config/e3sm/testmods_dirs/allactive/v1bgceca/user_nl_mpaso +A config/e3sm/testmods_dirs/allactive/v1bgceca_1850/shell_commands +A config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_cam +A config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_clm +A config/e3sm/testmods_dirs/allactive/v1bgceca_1850/user_nl_mpaso +M config/e3sm/testmods_dirs/allactive/v1cmip6/shell_commands +M config/e3sm/tests.py +M config/xml_schemas/config_machines.xsd +M doc/source/what_cime/index.rst +M scripts/Tools/Makefile +M scripts/Tools/cime_bisect +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/BuildTools/makemacroswriter.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/XML/pio.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/lib/jenkins_generic_job.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.internal_components +M src/drivers/mct/cime_config/config_component_e3sm.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/prep_rof_mod.F90 +M src/drivers/mct/shr/seq_flds_mod.F90 +M src/share/timing/Makefile +M src/share/util/shr_mem_mod.F90 +M tools/cprnc/Makefile +M tools/cprnc/compare_vars_mod.F90.in +M tools/cprnc/filestruct.F90 +D tools/cprnc/test.csh + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 4-19-2020 +Tag: cime5.8.23 +Answer Changes: roundoff for some compsets, bfb for others +Tests: scripts_regressions_tests +Dependencies: + +Brief Summary: + - Improvement to scripts regression tests on the Melvin machine. + - Fix atm level spec so that it does not match 1x1_brazil. + - Document provenance from COARE/Fairall surface flux scheme option. + - Add missing ROF2OCN grid for r05_TO_gx3v7. + - Updates to CIME used for 0.1 degree JRA-forced G run with BGC enabled. + - FV3 bug fix and Makefile update. + - Update cheyenne ESMF libs. + - case.setup: fix merge error from prev PR. + - case.setup: fix case.setup --clean. + - First steps to ESMF data models. + - Allow user to specify an extra machines directory in create_newcase. + - Remove code specific to ORNL machine. + + +User interface changes: + - Adds --extra-machines-dir argument to create_newcase. + +PR summary: git log --oneline --first-parent [previous_tag]..master +332d8c9b3 Merge pull request #3522 from ESMCI/wpcoomb/melvin_regression_tests_improvements +6d745aa6b Merge pull request #3519 from jedwards4b/atm_level_fix +850bef781 Merge pull request #3520 from mt5555/patch-1 +ebb9dd6c9 Merge pull request #3517 from ESMCI/fischer/r05_gx3v7 +5805a40a5 Merge pull request #3515 from mnlevy1981/highres_JRA_BGC +1bc586676 Merge pull request #3516 from jtruesdal/fv3_3452 +6a00e248c Merge pull request #3514 from ESMCI/fischer/esmf_libs +793764fd2 Merge pull request #3513 from ESMCI/jgfouca/fix_case_setup +25840072d Merge pull request #3511 from ESMCI/jgfouca/fix_case_clean +509f2bfa8 Merge pull request #3505 from ESMCI/mvertens/dmodels_refactor_to_esmfpre +51ac93be5 Merge pull request #3508 from billsacks/alternate_dot_cime_location +7303682ed Merge pull request #3502 from jedwards4b/remove_titan_piocode + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/machines/config_machines.xml +M config/xml_schemas/config_machines_template.xml +M scripts/Tools/Makefile +M scripts/create_newcase +M scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/SystemTests/erio.py +M scripts/lib/CIME/XML/batch.py +M scripts/lib/CIME/XML/compilers.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_setup.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.csm_share +M src/components/data_comps_nuopc/datm/cime_config/buildnml +M src/components/data_comps_nuopc/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps_nuopc/datm/src/atm_comp_nuopc.F90 +D src/components/data_comps_nuopc/datm/src/datm_comp_mod.F90 +M src/components/data_comps_nuopc/dice/cime_config/buildnml +M src/components/data_comps_nuopc/dice/cime_config/namelist_definition_dice.xml +D src/components/data_comps_nuopc/dice/src/dice_comp_mod.F90 +M src/components/data_comps_nuopc/dice/src/ice_comp_nuopc.F90 +M src/components/data_comps_nuopc/dlnd/cime_config/buildnml +M src/components/data_comps_nuopc/dlnd/cime_config/namelist_definition_dlnd.xml +D src/components/data_comps_nuopc/dlnd/src/dlnd_comp_mod.F90 +M src/components/data_comps_nuopc/dlnd/src/lnd_comp_nuopc.F90 +M src/components/data_comps_nuopc/docn/cime_config/buildnml +M src/components/data_comps_nuopc/docn/cime_config/namelist_definition_docn.xml +D src/components/data_comps_nuopc/docn/src/docn_comp_mod.F90 +M src/components/data_comps_nuopc/docn/src/ocn_comp_nuopc.F90 +M src/components/data_comps_nuopc/drof/cime_config/buildnml +M src/components/data_comps_nuopc/drof/cime_config/namelist_definition_drof.xml +D src/components/data_comps_nuopc/drof/src/drof_comp_mod.F90 +M src/components/data_comps_nuopc/drof/src/rof_comp_nuopc.F90 +M src/components/data_comps_nuopc/dshr_nuopc/dshr_dfield_mod.F90 +M src/components/data_comps_nuopc/dshr_nuopc/dshr_fldlist_mod.F90 +R063 src/components/data_comps_nuopc/dshr_nuopc/dshr_nuopc_mod.F90 src/components/data_comps_nuopc/dshr_nuopc/dshr_mod.F90 +M src/components/data_comps_nuopc/dwav/cime_config/buildnml +M src/components/data_comps_nuopc/dwav/cime_config/namelist_definition_dwav.xml +D src/components/data_comps_nuopc/dwav/src/dwav_comp_mod.F90 +M src/components/data_comps_nuopc/dwav/src/wav_comp_nuopc.F90 +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/share/streams/shr_stream_mod.F90 +R058 src/components/data_comps_nuopc/dshr_nuopc/dshr_methods_mod.F90 src/share/streams_nuopc/dshr_methods_mod.F90 +A src/share/streams_nuopc/dshr_strdata_mod.F90 +A src/share/streams_nuopc/dshr_stream_mod.F90 +A src/share/streams_nuopc/dshr_tInterp_mod.F90 +M src/share/util/shr_flux_mod.F90 +M utils/perl5lib/Config/SetupTools.pm +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 4-26-2020 +Tag: cime5.8.22 +Answer Changes: b4b +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Fix issue with mpi-serial io. + - Use esmf8.1.0b14 on cheyenne. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +3b4ea8a99 Merge pull request #3499 from jedwards4b/pio_serialio_fix +eb2e04e52 Merge pull request #3494 from jedwards4b/update_cheyenne_esmf + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M scripts/Tools/Makefile +M src/externals/pio2/src/flib/piolib_mod.F90 +M src/share/util/shr_pio_mod.F90 + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 4-17-2020 +Tag: cime5.8.21 +Answer Changes: None +Tests: scripts_regression_tests + CIME_MODEL=cesm; CIME_DRIVER=nuopc; ./scripts_regression_tests.py +Dependencies: + +Brief Summary: + - Use shrmap by reference instead of copy. + - nuopc_updates for scripts_regression_tests, esmf on cheyenne + - e3sm_cime_mgmt hotfix for both-removed conflicts + - Allow for specification of vertical grid in ATM_GRID. + - Recognize when a machine listed in config_machines.xml has no entries. + - Fix case reset on some machines. + - Fix pylint issues. + - Bring back the setting of dlnd strm_domdir and strm_domfil to null. + - Bugfixes and changes that permit cmeps to run without a mediator. + - Some changes to cprnc's CMakeLists.txt. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +979ab8433 Merge pull request #3470 from jedwards4b/shr_map_getARptr +4525d6a4e Merge pull request #3490 from jedwards4b/nuopc_updates +3c9fa143f e3sm_cime_mgmt hotfix for both-removed conflicts +22aab810e Merge pull request #3492 from jedwards4b/atm_vert_grid_fix +37a74cf5b Merge pull request #3468 from ESMCI/wpcoomb/user_defined_env_variables +79a8e11e6 Merge pull request #3475 from ESMCI/jgfouca/fix_case_reset +abbf05745 fix pylint issue +7bbbc40fa Merge pull request #3486 from billsacks/fix_dlnd_streams +15893bcaf Merge pull request #3484 from ESMCI/mvertens/nuopc_bugfixes +4bc65d82b Merge pull request #3483 from ESMCI/jgfouca/cprnc_improve_cmake + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids_nuopc.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M scripts/Tools/Makefile +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/XML/pio.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/namelist.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/e3sm_cime_mgmt.py +M scripts/tests/scripts_regression_tests.py +M src/components/data_comps_mct/dlnd/cime_config/namelist_definition_dlnd.xml +M src/components/data_comps_nuopc/dice/src/dice_comp_mod.F90 +M src/components/data_comps_nuopc/docn/src/docn_comp_mod.F90 +M src/components/data_comps_nuopc/drof/src/drof_comp_mod.F90 +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component.xml +M src/share/nuopc/seq_drydep_mod.F90 +M src/share/nuopc/shr_fire_emis_mod.F90 +M src/share/nuopc/shr_megan_mod.F90 +M src/share/nuopc/shr_ndep_mod.F90 +M src/share/streams/shr_dmodel_mod.F90 +M src/share/timing/perf_mod.F90 +M src/share/util/shr_map_mod.F90 +M src/share/util/shr_sys_mod.F90 +M tools/cprnc/CMakeLists.txt + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 4-7-2020 +Tag: cime5.8.20 +Answer Changes: None +Tests: scripts_regression_tests, check_inputdata +Dependencies: + +Brief Summary: + - Add ftp timeout, fix failover to svn. + - E3SM 03-16-2020 merger. + - Remove unneeded encodes. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +708a9338b Merge pull request #3479 from jedwards4b/ftp_timeout +263de7e86 Merge pull request #3457 from ESMCI/jgfouca/branch-for-acme-split-2020-03-16 +611ca9c1e wget: hotfix, remove unneeded encodes + + +Modified files: git diff --name-status [previous_tag] +M config/config_tests.xml +M config/e3sm/allactive/config_pesall.xml +M config/e3sm/config_files.xml +M config/e3sm/config_grids.xml +M config/e3sm/machines/Depends.gnu.cmake +M config/e3sm/machines/Depends.ibm.cmake +M config/e3sm/machines/Depends.intel.cmake +M config/e3sm/machines/Depends.summit.pgiacc.cmake +M config/e3sm/machines/Depends.summitdev.pgiacc.cmake +D config/e3sm/machines/Depends.theta.intel +D config/e3sm/machines/Depends.theta.intel.cmake +M config/e3sm/machines/config_batch.xml +M config/e3sm/machines/config_compilers.xml +M config/e3sm/machines/config_machines.xml +A config/e3sm/testmods_dirs/bench/gmpas_noio/shell_commands +A config/e3sm/testmods_dirs/bench/gmpas_noio/user_nl_mpaso +A config/e3sm/testmods_dirs/bench/gmpas_noio/user_nl_mpassi +A config/e3sm/testmods_dirs/bench/wcycl/hires/shell_commands +A config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_cam +A config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_mpaso +A config/e3sm/testmods_dirs/bench/wcycl/hires/user_nl_mpassi +A config/e3sm/testmods_dirs/bench/wcycl/lores/shell_commands +A config/e3sm/testmods_dirs/bench/wcycl/lores/user_nl_cam +A config/e3sm/testmods_dirs/bench/wcycl/lores/user_nl_clm +M config/e3sm/tests.py +M config/xml_schemas/config_compilers_v2.xsd +M config/xml_schemas/config_grids_v2.1.xsd +M config/xml_schemas/config_grids_v2.xsd +M scripts/Tools/bld_diff +M scripts/Tools/case.build +A scripts/Tools/get_case_env +M scripts/Tools/get_standard_makefile_args +M scripts/create_newcase +M scripts/lib/CIME/BuildTools/cmakemacroswriter.py +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/Servers/ftp.py +M scripts/lib/CIME/Servers/wget.py +M scripts/lib/CIME/SystemTests/homme.py +A scripts/lib/CIME/SystemTests/hommebaseclass.py +A scripts/lib/CIME/SystemTests/hommebfb.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/lib/jenkins_generic_job.py +M scripts/tests/scripts_regression_tests.py +A src/CMake/CorrectWindowsPaths.cmake +A src/CMake/FindPackageMultipass.cmake +A src/CMake/ResolveCompilerPaths.cmake +M src/build_scripts/buildlib.cprnc +M src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.kokkos +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.mpi-serial +M src/build_scripts/buildlib.pio +A src/build_scripts/buildlib_cmake.internal_components +A src/components/data_comps_mct/datm/cime_config/buildlib_cmake +A src/components/data_comps_mct/desp/cime_config/buildlib_cmake +A src/components/data_comps_mct/dice/cime_config/buildlib_cmake +A src/components/data_comps_mct/dlnd/cime_config/buildlib_cmake +M src/components/data_comps_mct/dlnd/cime_config/config_component.xml +M src/components/data_comps_mct/dlnd/cime_config/namelist_definition_dlnd.xml +A src/components/data_comps_mct/docn/cime_config/buildlib_cmake +M src/components/data_comps_mct/docn/cime_config/config_component.xml +A src/components/data_comps_mct/drof/cime_config/buildlib_cmake +A src/components/data_comps_mct/dwav/cime_config/buildlib_cmake +A src/components/data_comps_nuopc/datm/cime_config/buildlib_cmake +M src/components/data_comps_nuopc/datm/cime_config/config_component.xml +M src/components/data_comps_nuopc/datm/cime_config/namelist_definition_datm.xml +A src/components/data_comps_nuopc/dice/cime_config/buildlib_cmake +A src/components/data_comps_nuopc/dlnd/cime_config/buildlib_cmake +A src/components/data_comps_nuopc/docn/cime_config/buildlib_cmake +A src/components/data_comps_nuopc/drof/cime_config/buildlib_cmake +A src/components/data_comps_nuopc/dwav/cime_config/buildlib_cmake +A src/components/stub_comps_mct/satm/cime_config/buildlib_cmake +A src/components/stub_comps_mct/sesp/cime_config/buildlib_cmake +A src/components/stub_comps_mct/sglc/cime_config/buildlib_cmake +A src/components/stub_comps_mct/siac/cime_config/buildlib_cmake +A src/components/stub_comps_mct/sice/cime_config/buildlib_cmake +A src/components/stub_comps_mct/slnd/cime_config/buildlib_cmake +A src/components/stub_comps_mct/socn/cime_config/buildlib_cmake +A src/components/stub_comps_mct/srof/cime_config/buildlib_cmake +A src/components/stub_comps_mct/swav/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/satm/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/sesp/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/sglc/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/siac/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/sice/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/slnd/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/socn/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/srof/cime_config/buildlib_cmake +A src/components/stub_comps_nuopc/swav/cime_config/buildlib_cmake +A src/components/xcpl_comps_mct/xatm/cime_config/buildlib_cmake +A src/components/xcpl_comps_mct/xglc/cime_config/buildlib_cmake +A src/components/xcpl_comps_mct/xice/cime_config/buildlib_cmake +A src/components/xcpl_comps_mct/xlnd/cime_config/buildlib_cmake +A src/components/xcpl_comps_mct/xocn/cime_config/buildlib_cmake +A src/components/xcpl_comps_mct/xrof/cime_config/buildlib_cmake +A src/components/xcpl_comps_mct/xwav/cime_config/buildlib_cmake +A src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib_cmake +A src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib_cmake +A src/components/xcpl_comps_nuopc/xice/cime_config/buildlib_cmake +A src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib_cmake +A src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib_cmake +A src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib_cmake +A src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib_cmake +A src/drivers/mct/cime_config/buildlib_cmake +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/cime_config/config_component_e3sm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/prep_rof_mod.F90 +M src/drivers/mct/shr/seq_flds_mod.F90 +M src/drivers/mct/shr/seq_infodata_mod.F90 +M src/share/timing/GPTLget_memusage.c +M src/share/timing/GPTLprint_memusage.c +M src/share/timing/gptl.c +M src/share/util/shr_mem_mod.F90 +M src/share/util/shr_orb_mod.F90 +M src/share/util/shr_sys_mod.F90 +M tools/mapping/gen_domain_files/src/gen_domain.F90 +A tools/mapping/gen_domain_files/test_gen_domain.sh + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 4-3-2020 +Tag: cime5.8.19 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + + - Clean up follow maint5.6 merge. + - Change sst_aquap11 to sst_aquap_constant + - case.setup: Add --keep option + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +2656300d5 Merge pull request #3462 from mnlevy1981/cleanup_maint-5.6_merge +06760c7c2 Merge pull request #3469 from ESMCI/fischer/sst_aquap_constant +bb2d15680 Merge pull request #3464 from ESMCI/jgfouca/case_setup_keep_list + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids_common.xml +M scripts/Tools/case.setup +M scripts/lib/CIME/case/case_setup.py +M src/components/data_comps_mct/datm/cime_config/config_component.xml +M src/components/data_comps_mct/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps_mct/datm/src/datm_comp_mod.F90 +M src/components/data_comps_mct/drof/cime_config/namelist_definition_drof.xml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 3-20-2020 +Tag: cime5.8.18 +Answer Changes: None +Tests: scripts_regression_tests, SMS.ne5pg4_ne5pg4_mg37.A +Dependencies: + +Brief Summary: + - Add ne5np4.pg4 grid. + - Ensure that api documentation is rebuilt whenever rebuilding + documentation. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +f990945a7 Merge pull request #3460 from ESMCI/fischer/ne5np4 +f25ecc408 Merge pull request #3454 from billsacks/doc_clean_api + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M doc/Makefile + +====================================================================== + +====================================================================== + +Originator: Bill Sacks +Date: 3-15-2020 +Tag: cime5.8.17 +Answer Changes: None +Tests: scripts_regression_tests, aux_clm +Dependencies: none + +Brief Summary: + - Separate mct and nuopc data, xcpl and stub components + - Update pio2 + - Documentation: provide a version dropdown menu and change theme + - Add ne5pg2_ne5pg2_mg37 grid + - Fix some issues + +User interface changes: None + +PR summary: git log --oneline --first-parent [previous_tag]..master +4f1880756 Merge pull request #3439 from billsacks/versioned_docs3 +38c2b9f3c Merge pull request #3441 from jedwards4b/thunder_port +5d9ff806a Merge pull request #3451 from jedwards4b/check_inputdata_fix +3444503c2 Merge pull request #3449 from ESMCI/jgfouca/improve_case_setup_clean +02a8eebaa fix default for PIO_REARRANGER +49e82c611 Merge pull request #3447 from jedwards4b/pio2_update +ada1f0901 Merge pull request #3445 from ESMCI/fischer/ne5pg2 +bbba1afdb Merge pull request #3444 from alperaltuntas/datm_mesh_tol +acc2d239c Merge commit 'e5265f03a6a8e1806e1b463b00c006b365e149d4' +fadc3ee16 resolve conflicts +614b3117c Merge pull request #3440 from ESMCI/fischer/aq_scam +328e3872a Merge pull request #3392 from ESMCI/mvertens/new_component_directories3 +a93ced5eb need to explicitly state compiler used +bf09ad8e8 minor change to get FFLAGS +20856d803 Merge pull request #3431 from jedwards4b/complete_cprnc_fix +7fd3daebc Merge pull request #3430 from ESMCI/jgfouca/sharedlib_bld_serialization +e75142fae Merge pull request #3424 from ESMCI/cprnc_build_machine + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M config/e3sm/config_files.xml +M config/ufs/config_files.xml +M config/ufs/config_inputdata.xml +M config/ufs/machines/config_batch.xml +M config/ufs/machines/config_compilers.xml +M config/ufs/machines/config_machines.xml +M config/ufs/machines/config_workflow.xml +M config/ufs/machines/template.chgres.run +M config/ufs/machines/template.gfs_post.run +M doc/README +D doc/source/_static +A doc/source/_static/pop_ver.js +A doc/source/_templates/footer.html +A doc/source/_templates/layout.html +M doc/source/conf.py +M scripts/lib/CIME/SystemTests/pre.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/test_scheduler.py +M scripts/tests/CMakeLists.txt +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.cprnc +D src/components/data_comps/datm/doc/bookinfo.xml +D src/components/data_comps/datm/doc/datacomps.xml +D src/components/data_comps/datm/doc/intro.xml +D src/components/data_comps/datm/doc/rundocbook.csh +D src/components/data_comps/datm/doc/streams.xml +D src/components/data_comps/datm/doc/ug.xml +D src/components/data_comps/docn/tools/pop_som_frc/SOM.pdf +D src/components/data_comps/dwav/bld/README +R100 src/components/data_comps/datm/cime_config/buildlib src/components/data_comps_mct/datm/cime_config/buildlib +R100 src/components/data_comps/datm/cime_config/buildnml src/components/data_comps_mct/datm/cime_config/buildnml +R100 src/components/data_comps/datm/cime_config/config_archive.xml src/components/data_comps_mct/datm/cime_config/config_archive.xml +R100 src/components/data_comps/datm/cime_config/config_component.xml src/components/data_comps_mct/datm/cime_config/config_component.xml +R100 src/components/data_comps/datm/cime_config/namelist_definition_datm.xml src/components/data_comps_mct/datm/cime_config/namelist_definition_datm.xml +R100 src/components/data_comps/datm/cime_config/user_nl_datm src/components/data_comps_mct/datm/cime_config/user_nl_datm +R100 src/components/data_comps/datm/mct/atm_comp_mct.F90 src/components/data_comps_mct/datm/src/atm_comp_mct.F90 +R100 src/components/data_comps/datm/mct/datm_comp_mod.F90 src/components/data_comps_mct/datm/src/datm_comp_mod.F90 +R100 src/components/data_comps/datm/mct/datm_shr_mod.F90 src/components/data_comps_mct/datm/src/datm_shr_mod.F90 +R100 src/components/data_comps/desp/cime_config/buildlib src/components/data_comps_mct/desp/cime_config/buildlib +R099 src/components/data_comps/desp/cime_config/buildnml src/components/data_comps_mct/desp/cime_config/buildnml +R100 src/components/data_comps/desp/cime_config/config_component.xml src/components/data_comps_mct/desp/cime_config/config_component.xml +R100 src/components/data_comps/desp/cime_config/namelist_definition_desp.xml src/components/data_comps_mct/desp/cime_config/namelist_definition_desp.xml +R100 src/components/data_comps/desp/cime_config/user_nl_desp src/components/data_comps_mct/desp/cime_config/user_nl_desp +R100 src/components/data_comps/desp/desp_comp_mod.F90 src/components/data_comps_mct/desp/desp_comp_mod.F90 +R100 src/components/data_comps/desp/esp_utils.F90 src/components/data_comps_mct/desp/esp_utils.F90 +R100 src/components/data_comps/desp/mct/esp_comp_mct.F90 src/components/data_comps_mct/desp/src/esp_comp_mct.F90 +R100 src/components/data_comps/dice/cime_config/buildlib src/components/data_comps_mct/dice/cime_config/buildlib +R100 src/components/data_comps/dice/cime_config/buildnml src/components/data_comps_mct/dice/cime_config/buildnml +R100 src/components/data_comps/dice/cime_config/config_archive.xml src/components/data_comps_mct/dice/cime_config/config_archive.xml +R100 src/components/data_comps/dice/cime_config/config_component.xml src/components/data_comps_mct/dice/cime_config/config_component.xml +R100 src/components/data_comps/dice/cime_config/namelist_definition_dice.xml src/components/data_comps_mct/dice/cime_config/namelist_definition_dice.xml +R100 src/components/data_comps/dice/cime_config/user_nl_dice src/components/data_comps_mct/dice/cime_config/user_nl_dice +R100 src/components/data_comps/dice/mct/dice_comp_mod.F90 src/components/data_comps_mct/dice/src/dice_comp_mod.F90 +R100 src/components/data_comps/dice/mct/dice_flux_atmice_mod.F90 src/components/data_comps_mct/dice/src/dice_flux_atmice_mod.F90 +R100 src/components/data_comps/dice/mct/dice_shr_mod.F90 src/components/data_comps_mct/dice/src/dice_shr_mod.F90 +R100 src/components/data_comps/dice/mct/ice_comp_mct.F90 src/components/data_comps_mct/dice/src/ice_comp_mct.F90 +R100 src/components/data_comps/dlnd/cime_config/buildlib src/components/data_comps_mct/dlnd/cime_config/buildlib +R100 src/components/data_comps/dlnd/cime_config/buildnml src/components/data_comps_mct/dlnd/cime_config/buildnml +R100 src/components/data_comps/dlnd/cime_config/config_archive.xml src/components/data_comps_mct/dlnd/cime_config/config_archive.xml +R100 src/components/data_comps/dlnd/cime_config/config_component.xml src/components/data_comps_mct/dlnd/cime_config/config_component.xml +R100 src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml src/components/data_comps_mct/dlnd/cime_config/namelist_definition_dlnd.xml +R100 src/components/data_comps/dlnd/cime_config/user_nl_dlnd src/components/data_comps_mct/dlnd/cime_config/user_nl_dlnd +R100 src/components/data_comps/dlnd/mct/dlnd_comp_mod.F90 src/components/data_comps_mct/dlnd/src/dlnd_comp_mod.F90 +R100 src/components/data_comps/dlnd/mct/dlnd_shr_mod.F90 src/components/data_comps_mct/dlnd/src/dlnd_shr_mod.F90 +R100 src/components/data_comps/dlnd/mct/lnd_comp_mct.F90 src/components/data_comps_mct/dlnd/src/lnd_comp_mct.F90 +R100 src/components/data_comps/docn/cime_config/buildlib src/components/data_comps_mct/docn/cime_config/buildlib +R100 src/components/data_comps/docn/cime_config/buildnml src/components/data_comps_mct/docn/cime_config/buildnml +R100 src/components/data_comps/docn/cime_config/config_archive.xml src/components/data_comps_mct/docn/cime_config/config_archive.xml +R100 src/components/data_comps/docn/cime_config/config_component.xml src/components/data_comps_mct/docn/cime_config/config_component.xml +R100 src/components/data_comps/docn/cime_config/namelist_definition_docn.xml src/components/data_comps_mct/docn/cime_config/namelist_definition_docn.xml +R100 src/components/data_comps/docn/cime_config/user_nl_docn src/components/data_comps_mct/docn/cime_config/user_nl_docn +R099 src/components/data_comps/docn/mct/docn_comp_mod.F90 src/components/data_comps_mct/docn/src/docn_comp_mod.F90 +R100 src/components/data_comps/docn/mct/docn_shr_mod.F90 src/components/data_comps_mct/docn/src/docn_shr_mod.F90 +R100 src/components/data_comps/docn/mct/ocn_comp_mct.F90 src/components/data_comps_mct/docn/src/ocn_comp_mct.F90 +R100 src/components/data_comps/docn/tools/pop_som_frc/README src/components/data_comps_mct/docn/tools/pop_som_frc/README +R100 src/components/data_comps/docn/tools/pop_som_frc/SOM.doc src/components/data_comps_mct/docn/tools/pop_som_frc/SOM.doc +R100 src/components/data_comps/datm/doc/SOM.pdf src/components/data_comps_mct/docn/tools/pop_som_frc/SOM.pdf +R100 src/components/data_comps/docn/tools/pop_som_frc/pop_frc.csh src/components/data_comps_mct/docn/tools/pop_som_frc/pop_frc.csh +R100 src/components/data_comps/docn/tools/pop_som_frc/pop_frc_mlann.ncl src/components/data_comps_mct/docn/tools/pop_som_frc/pop_frc_mlann.ncl +R100 src/components/data_comps/docn/tools/pop_som_frc/pop_frc_mlt.ncl src/components/data_comps_mct/docn/tools/pop_som_frc/pop_frc_mlt.ncl +R100 src/components/data_comps/docn/tools/pop_som_frc/pop_interp.ncl src/components/data_comps_mct/docn/tools/pop_som_frc/pop_interp.ncl +R100 src/components/data_comps/docn/tools/pop_som_frc/read_from_mss.csh src/components/data_comps_mct/docn/tools/pop_som_frc/read_from_mss.csh +R100 src/components/data_comps/docn/tools/pop_som_frc/read_from_mss_month.csh src/components/data_comps_mct/docn/tools/pop_som_frc/read_from_mss_month.csh +R100 src/components/data_comps/drof/cime_config/buildlib src/components/data_comps_mct/drof/cime_config/buildlib +R100 src/components/data_comps/drof/cime_config/buildnml src/components/data_comps_mct/drof/cime_config/buildnml +R100 src/components/data_comps/drof/cime_config/config_archive.xml src/components/data_comps_mct/drof/cime_config/config_archive.xml +R100 src/components/data_comps/drof/cime_config/config_component.xml src/components/data_comps_mct/drof/cime_config/config_component.xml +R100 src/components/data_comps/drof/cime_config/namelist_definition_drof.xml src/components/data_comps_mct/drof/cime_config/namelist_definition_drof.xml +R100 src/components/data_comps/drof/cime_config/user_nl_drof src/components/data_comps_mct/drof/cime_config/user_nl_drof +R100 src/components/data_comps/drof/mct/drof_comp_mod.F90 src/components/data_comps_mct/drof/src/drof_comp_mod.F90 +R100 src/components/data_comps/drof/mct/drof_shr_mod.F90 src/components/data_comps_mct/drof/src/drof_shr_mod.F90 +R100 src/components/data_comps/drof/mct/rof_comp_mct.F90 src/components/data_comps_mct/drof/src/rof_comp_mct.F90 +R100 src/components/data_comps/dwav/README src/components/data_comps_mct/dwav/README +R100 src/components/data_comps/dwav/cime_config/buildlib src/components/data_comps_mct/dwav/cime_config/buildlib +R100 src/components/data_comps/dwav/cime_config/buildnml src/components/data_comps_mct/dwav/cime_config/buildnml +R100 src/components/data_comps/dwav/cime_config/config_archive.xml src/components/data_comps_mct/dwav/cime_config/config_archive.xml +R100 src/components/data_comps/dwav/cime_config/config_component.xml src/components/data_comps_mct/dwav/cime_config/config_component.xml +R100 src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml src/components/data_comps_mct/dwav/cime_config/namelist_definition_dwav.xml +R100 src/components/data_comps/dwav/cime_config/user_nl_dwav src/components/data_comps_mct/dwav/cime_config/user_nl_dwav +R100 src/components/data_comps/dwav/mct/dwav_comp_mod.F90 src/components/data_comps_mct/dwav/src/dwav_comp_mod.F90 +R100 src/components/data_comps/dwav/mct/dwav_shr_mod.F90 src/components/data_comps_mct/dwav/src/dwav_shr_mod.F90 +R100 src/components/data_comps/dwav/mct/wav_comp_mct.F90 src/components/data_comps_mct/dwav/src/wav_comp_mct.F90 +R100 src/components/stub_comps/satm/cime_config/buildlib src/components/data_comps_nuopc/datm/cime_config/buildlib +A src/components/data_comps_nuopc/datm/cime_config/buildnml +A src/components/data_comps_nuopc/datm/cime_config/config_archive.xml +A src/components/data_comps_nuopc/datm/cime_config/config_component.xml +A src/components/data_comps_nuopc/datm/cime_config/namelist_definition_datm.xml +A src/components/data_comps_nuopc/datm/cime_config/user_nl_datm +R099 src/components/data_comps/datm/nuopc/atm_comp_nuopc.F90 src/components/data_comps_nuopc/datm/src/atm_comp_nuopc.F90 +R099 src/components/data_comps/datm/nuopc/datm_comp_mod.F90 src/components/data_comps_nuopc/datm/src/datm_comp_mod.F90 +R100 src/components/stub_comps/sesp/cime_config/buildlib src/components/data_comps_nuopc/dice/cime_config/buildlib +A src/components/data_comps_nuopc/dice/cime_config/buildnml +A src/components/data_comps_nuopc/dice/cime_config/config_archive.xml +A src/components/data_comps_nuopc/dice/cime_config/config_component.xml +A src/components/data_comps_nuopc/dice/cime_config/namelist_definition_dice.xml +A src/components/data_comps_nuopc/dice/cime_config/user_nl_dice +R097 src/components/data_comps/dice/nuopc/dice_comp_mod.F90 src/components/data_comps_nuopc/dice/src/dice_comp_mod.F90 +R100 src/components/data_comps/dice/nuopc/dice_flux_atmice_mod.F90 src/components/data_comps_nuopc/dice/src/dice_flux_atmice_mod.F90 +R099 src/components/data_comps/dice/nuopc/ice_comp_nuopc.F90 src/components/data_comps_nuopc/dice/src/ice_comp_nuopc.F90 +R100 src/components/stub_comps/sglc/cime_config/buildlib src/components/data_comps_nuopc/dlnd/cime_config/buildlib +A src/components/data_comps_nuopc/dlnd/cime_config/buildnml +A src/components/data_comps_nuopc/dlnd/cime_config/config_archive.xml +A src/components/data_comps_nuopc/dlnd/cime_config/config_component.xml +A src/components/data_comps_nuopc/dlnd/cime_config/namelist_definition_dlnd.xml +A src/components/data_comps_nuopc/dlnd/cime_config/user_nl_dlnd +R100 src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90 src/components/data_comps_nuopc/dlnd/src/dlnd_comp_mod.F90 +R100 src/components/data_comps/dlnd/nuopc/lnd_comp_nuopc.F90 src/components/data_comps_nuopc/dlnd/src/lnd_comp_nuopc.F90 +R100 src/components/stub_comps/siac/cime_config/buildlib src/components/data_comps_nuopc/docn/cime_config/buildlib +A src/components/data_comps_nuopc/docn/cime_config/buildnml +A src/components/data_comps_nuopc/docn/cime_config/config_archive.xml +A src/components/data_comps_nuopc/docn/cime_config/config_component.xml +A src/components/data_comps_nuopc/docn/cime_config/namelist_definition_docn.xml +A src/components/data_comps_nuopc/docn/cime_config/user_nl_docn +R100 src/components/data_comps/docn/nuopc/docn_comp_mod.F90 src/components/data_comps_nuopc/docn/src/docn_comp_mod.F90 +R098 src/components/data_comps/docn/nuopc/ocn_comp_nuopc.F90 src/components/data_comps_nuopc/docn/src/ocn_comp_nuopc.F90 +R100 src/components/stub_comps/sice/cime_config/buildlib src/components/data_comps_nuopc/drof/cime_config/buildlib +A src/components/data_comps_nuopc/drof/cime_config/buildnml +A src/components/data_comps_nuopc/drof/cime_config/config_archive.xml +A src/components/data_comps_nuopc/drof/cime_config/config_component.xml +A src/components/data_comps_nuopc/drof/cime_config/namelist_definition_drof.xml +A src/components/data_comps_nuopc/drof/cime_config/user_nl_drof +R100 src/components/data_comps/drof/nuopc/drof_comp_mod.F90 src/components/data_comps_nuopc/drof/src/drof_comp_mod.F90 +R100 src/components/data_comps/drof/nuopc/rof_comp_nuopc.F90 src/components/data_comps_nuopc/drof/src/rof_comp_nuopc.F90 +R100 src/components/data_comps/dshr_nuopc/dshr_dfield_mod.F90 src/components/data_comps_nuopc/dshr_nuopc/dshr_dfield_mod.F90 +R100 src/components/data_comps/dshr_nuopc/dshr_fldlist_mod.F90 src/components/data_comps_nuopc/dshr_nuopc/dshr_fldlist_mod.F90 +R100 src/components/data_comps/dshr_nuopc/dshr_methods_mod.F90 src/components/data_comps_nuopc/dshr_nuopc/dshr_methods_mod.F90 +R100 src/components/data_comps/dshr_nuopc/dshr_nuopc_mod.F90 src/components/data_comps_nuopc/dshr_nuopc/dshr_nuopc_mod.F90 +R100 src/components/stub_comps/slnd/cime_config/buildlib src/components/data_comps_nuopc/dwav/cime_config/buildlib +A src/components/data_comps_nuopc/dwav/cime_config/buildnml +A src/components/data_comps_nuopc/dwav/cime_config/config_archive.xml +A src/components/data_comps_nuopc/dwav/cime_config/config_component.xml +A src/components/data_comps_nuopc/dwav/cime_config/namelist_definition_dwav.xml +A src/components/data_comps_nuopc/dwav/cime_config/user_nl_dwav +R100 src/components/data_comps/dwav/nuopc/dwav_comp_mod.F90 src/components/data_comps_nuopc/dwav/src/dwav_comp_mod.F90 +R100 src/components/data_comps/dwav/nuopc/wav_comp_nuopc.F90 src/components/data_comps_nuopc/dwav/src/wav_comp_nuopc.F90 +R100 src/components/stub_comps/socn/cime_config/buildlib src/components/stub_comps_mct/satm/cime_config/buildlib +R100 src/components/stub_comps/satm/cime_config/buildnml src/components/stub_comps_mct/satm/cime_config/buildnml +R100 src/components/stub_comps/satm/cime_config/config_component.xml src/components/stub_comps_mct/satm/cime_config/config_component.xml +R100 src/components/stub_comps/satm/mct/atm_comp_mct.F90 src/components/stub_comps_mct/satm/src/atm_comp_mct.F90 +R100 src/components/stub_comps/srof/cime_config/buildlib src/components/stub_comps_mct/sesp/cime_config/buildlib +R100 src/components/stub_comps/sesp/cime_config/buildnml src/components/stub_comps_mct/sesp/cime_config/buildnml +R100 src/components/stub_comps/sesp/cime_config/config_component.xml src/components/stub_comps_mct/sesp/cime_config/config_component.xml +R100 src/components/stub_comps/sesp/mct/esp_comp_mct.F90 src/components/stub_comps_mct/sesp/src/esp_comp_mct.F90 +R100 src/components/stub_comps/swav/cime_config/buildlib src/components/stub_comps_mct/sglc/cime_config/buildlib +R100 src/components/stub_comps/sglc/cime_config/buildnml src/components/stub_comps_mct/sglc/cime_config/buildnml +R100 src/components/stub_comps/sglc/cime_config/config_component.xml src/components/stub_comps_mct/sglc/cime_config/config_component.xml +R100 src/components/stub_comps/sglc/mct/glc_comp_mct.F90 src/components/stub_comps_mct/sglc/src/glc_comp_mct.F90 +R100 src/components/xcpl_comps/xatm/cime_config/buildlib src/components/stub_comps_mct/siac/cime_config/buildlib +R100 src/components/stub_comps/siac/cime_config/buildnml src/components/stub_comps_mct/siac/cime_config/buildnml +R100 src/components/stub_comps/siac/cime_config/config_component.xml src/components/stub_comps_mct/siac/cime_config/config_component.xml +R100 src/components/stub_comps/siac/mct/iac_comp_mct.F90 src/components/stub_comps_mct/siac/src/iac_comp_mct.F90 +R100 src/components/xcpl_comps/xglc/cime_config/buildlib src/components/stub_comps_mct/sice/cime_config/buildlib +R100 src/components/stub_comps/sice/cime_config/buildnml src/components/stub_comps_mct/sice/cime_config/buildnml +R100 src/components/stub_comps/sice/cime_config/config_component.xml src/components/stub_comps_mct/sice/cime_config/config_component.xml +R100 src/components/stub_comps/sice/mct/ice_comp_mct.F90 src/components/stub_comps_mct/sice/src/ice_comp_mct.F90 +R100 src/components/xcpl_comps/xice/cime_config/buildlib src/components/stub_comps_mct/slnd/cime_config/buildlib +R100 src/components/stub_comps/slnd/cime_config/buildnml src/components/stub_comps_mct/slnd/cime_config/buildnml +R100 src/components/stub_comps/slnd/cime_config/config_component.xml src/components/stub_comps_mct/slnd/cime_config/config_component.xml +R100 src/components/stub_comps/slnd/mct/lnd_comp_mct.F90 src/components/stub_comps_mct/slnd/src/lnd_comp_mct.F90 +R100 src/components/xcpl_comps/xlnd/cime_config/buildlib src/components/stub_comps_mct/socn/cime_config/buildlib +R100 src/components/stub_comps/socn/cime_config/buildnml src/components/stub_comps_mct/socn/cime_config/buildnml +R100 src/components/stub_comps/socn/cime_config/config_component.xml src/components/stub_comps_mct/socn/cime_config/config_component.xml +R100 src/components/stub_comps/socn/mct/ocn_comp_mct.F90 src/components/stub_comps_mct/socn/src/ocn_comp_mct.F90 +R100 src/components/xcpl_comps/xocn/cime_config/buildlib src/components/stub_comps_mct/srof/cime_config/buildlib +R100 src/components/stub_comps/srof/cime_config/buildnml src/components/stub_comps_mct/srof/cime_config/buildnml +R100 src/components/stub_comps/srof/cime_config/config_component.xml src/components/stub_comps_mct/srof/cime_config/config_component.xml +R100 src/components/stub_comps/srof/mct/rof_comp_mct.F90 src/components/stub_comps_mct/srof/src/rof_comp_mct.F90 +R100 src/components/xcpl_comps/xrof/cime_config/buildlib src/components/stub_comps_mct/swav/cime_config/buildlib +R100 src/components/stub_comps/swav/cime_config/buildnml src/components/stub_comps_mct/swav/cime_config/buildnml +R100 src/components/stub_comps/swav/cime_config/config_component.xml src/components/stub_comps_mct/swav/cime_config/config_component.xml +R100 src/components/stub_comps/swav/mct/wav_comp_mct.F90 src/components/stub_comps_mct/swav/src/wav_comp_mct.F90 +R100 src/components/xcpl_comps/xwav/cime_config/buildlib src/components/stub_comps_nuopc/satm/cime_config/buildlib +A src/components/stub_comps_nuopc/satm/cime_config/buildnml +A src/components/stub_comps_nuopc/satm/cime_config/config_component.xml +A src/components/stub_comps_nuopc/sesp/cime_config/buildlib +A src/components/stub_comps_nuopc/sesp/cime_config/buildnml +A src/components/stub_comps_nuopc/sesp/cime_config/config_component.xml +A src/components/stub_comps_nuopc/sglc/cime_config/buildlib +A src/components/stub_comps_nuopc/sglc/cime_config/buildnml +A src/components/stub_comps_nuopc/sglc/cime_config/config_component.xml +A src/components/stub_comps_nuopc/siac/cime_config/buildlib +A src/components/stub_comps_nuopc/siac/cime_config/buildnml +A src/components/stub_comps_nuopc/siac/cime_config/config_component.xml +A src/components/stub_comps_nuopc/sice/cime_config/buildlib +A src/components/stub_comps_nuopc/sice/cime_config/buildnml +A src/components/stub_comps_nuopc/sice/cime_config/config_component.xml +A src/components/stub_comps_nuopc/slnd/cime_config/buildlib +A src/components/stub_comps_nuopc/slnd/cime_config/buildnml +A src/components/stub_comps_nuopc/slnd/cime_config/config_component.xml +A src/components/stub_comps_nuopc/socn/cime_config/buildlib +A src/components/stub_comps_nuopc/socn/cime_config/buildnml +A src/components/stub_comps_nuopc/socn/cime_config/config_component.xml +A src/components/stub_comps_nuopc/srof/cime_config/buildlib +A src/components/stub_comps_nuopc/srof/cime_config/buildnml +A src/components/stub_comps_nuopc/srof/cime_config/config_component.xml +A src/components/stub_comps_nuopc/swav/cime_config/buildlib +A src/components/stub_comps_nuopc/swav/cime_config/buildnml +A src/components/stub_comps_nuopc/swav/cime_config/config_component.xml +A src/components/xcpl_comps_mct/xatm/cime_config/buildlib +R100 src/components/xcpl_comps/xatm/cime_config/buildnml src/components/xcpl_comps_mct/xatm/cime_config/buildnml +R100 src/components/xcpl_comps/xatm/cime_config/config_component.xml src/components/xcpl_comps_mct/xatm/cime_config/config_component.xml +R100 src/components/xcpl_comps/xatm/mct/atm_comp_mct.F90 src/components/xcpl_comps_mct/xatm/src/atm_comp_mct.F90 +A src/components/xcpl_comps_mct/xglc/cime_config/buildlib +R100 src/components/xcpl_comps/xglc/cime_config/buildnml src/components/xcpl_comps_mct/xglc/cime_config/buildnml +R100 src/components/xcpl_comps/xglc/cime_config/config_component.xml src/components/xcpl_comps_mct/xglc/cime_config/config_component.xml +R100 src/components/xcpl_comps/xglc/mct/glc_comp_mct.F90 src/components/xcpl_comps_mct/xglc/src/glc_comp_mct.F90 +A src/components/xcpl_comps_mct/xice/cime_config/buildlib +R100 src/components/xcpl_comps/xice/cime_config/buildnml src/components/xcpl_comps_mct/xice/cime_config/buildnml +R100 src/components/xcpl_comps/xice/cime_config/config_component.xml src/components/xcpl_comps_mct/xice/cime_config/config_component.xml +R100 src/components/xcpl_comps/xice/mct/ice_comp_mct.F90 src/components/xcpl_comps_mct/xice/src/ice_comp_mct.F90 +A src/components/xcpl_comps_mct/xlnd/cime_config/buildlib +R100 src/components/xcpl_comps/xlnd/cime_config/buildnml src/components/xcpl_comps_mct/xlnd/cime_config/buildnml +R100 src/components/xcpl_comps/xlnd/cime_config/config_component.xml src/components/xcpl_comps_mct/xlnd/cime_config/config_component.xml +R100 src/components/xcpl_comps/xlnd/mct/lnd_comp_mct.F90 src/components/xcpl_comps_mct/xlnd/src/lnd_comp_mct.F90 +A src/components/xcpl_comps_mct/xocn/cime_config/buildlib +R100 src/components/xcpl_comps/xocn/cime_config/buildnml src/components/xcpl_comps_mct/xocn/cime_config/buildnml +R100 src/components/xcpl_comps/xocn/cime_config/config_component.xml src/components/xcpl_comps_mct/xocn/cime_config/config_component.xml +R100 src/components/xcpl_comps/xocn/mct/ocn_comp_mct.F90 src/components/xcpl_comps_mct/xocn/src/ocn_comp_mct.F90 +A src/components/xcpl_comps_mct/xrof/cime_config/buildlib +R100 src/components/xcpl_comps/xrof/cime_config/buildnml src/components/xcpl_comps_mct/xrof/cime_config/buildnml +R100 src/components/xcpl_comps/xrof/cime_config/config_component.xml src/components/xcpl_comps_mct/xrof/cime_config/config_component.xml +R100 src/components/xcpl_comps/xrof/mct/rof_comp_mct.F90 src/components/xcpl_comps_mct/xrof/src/rof_comp_mct.F90 +R100 src/components/xcpl_comps/xshare/mct/dead_data_mod.F90 src/components/xcpl_comps_mct/xshare/dead_data_mod.F90 +R100 src/components/xcpl_comps/xshare/mct/dead_mct_mod.F90 src/components/xcpl_comps_mct/xshare/dead_mct_mod.F90 +R100 src/components/xcpl_comps/xshare/mct/dead_mod.F90 src/components/xcpl_comps_mct/xshare/dead_mod.F90 +A src/components/xcpl_comps_mct/xwav/cime_config/buildlib +R100 src/components/xcpl_comps/xwav/cime_config/buildnml src/components/xcpl_comps_mct/xwav/cime_config/buildnml +R100 src/components/xcpl_comps/xwav/cime_config/config_component.xml src/components/xcpl_comps_mct/xwav/cime_config/config_component.xml +R100 src/components/xcpl_comps/xwav/mct/wav_comp_mct.F90 src/components/xcpl_comps_mct/xwav/src/wav_comp_mct.F90 +A src/components/xcpl_comps_nuopc/xatm/cime_config/buildlib +A src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml +A src/components/xcpl_comps_nuopc/xatm/cime_config/config_component.xml +R100 src/components/xcpl_comps/xatm/nuopc/atm_comp_nuopc.F90 src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 +A src/components/xcpl_comps_nuopc/xglc/cime_config/buildlib +A src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml +A src/components/xcpl_comps_nuopc/xglc/cime_config/config_component.xml +R100 src/components/xcpl_comps/xglc/nuopc/glc_comp_nuopc.F90 src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +A src/components/xcpl_comps_nuopc/xice/cime_config/buildlib +A src/components/xcpl_comps_nuopc/xice/cime_config/buildnml +A src/components/xcpl_comps_nuopc/xice/cime_config/config_component.xml +R100 src/components/xcpl_comps/xice/nuopc/ice_comp_nuopc.F90 src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 +A src/components/xcpl_comps_nuopc/xlnd/cime_config/buildlib +A src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml +A src/components/xcpl_comps_nuopc/xlnd/cime_config/config_component.xml +R100 src/components/xcpl_comps/xlnd/nuopc/lnd_comp_nuopc.F90 src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 +A src/components/xcpl_comps_nuopc/xocn/cime_config/buildlib +A src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml +A src/components/xcpl_comps_nuopc/xocn/cime_config/config_component.xml +R100 src/components/xcpl_comps/xocn/nuopc/ocn_comp_nuopc.F90 src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 +A src/components/xcpl_comps_nuopc/xrof/cime_config/buildlib +A src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml +A src/components/xcpl_comps_nuopc/xrof/cime_config/config_component.xml +R100 src/components/xcpl_comps/xrof/nuopc/rof_comp_nuopc.F90 src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 +R100 src/components/xcpl_comps/xshare/nuopc/dead_methods_mod.F90 src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 +R100 src/components/xcpl_comps/xshare/nuopc/dead_nuopc_mod.F90 src/components/xcpl_comps_nuopc/xshare/dead_nuopc_mod.F90 +A src/components/xcpl_comps_nuopc/xwav/cime_config/buildlib +A src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml +A src/components/xcpl_comps_nuopc/xwav/cime_config/config_component.xml +R100 src/components/xcpl_comps/xwav/nuopc/wav_comp_nuopc.F90 src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 +M src/drivers/mct/cime_config/config_component.xml +M src/externals/pio2/CMakeLists.txt +M src/externals/pio2/Makefile.am +A src/externals/pio2/cmake/Makefile.am +M src/externals/pio2/configure.ac +M src/externals/pio2/doc/source/base.txt +M src/externals/pio2/doc/source/netcdf_integration.txt +M src/externals/pio2/examples/Makefile.am +M src/externals/pio2/examples/c/CMakeLists.txt +M src/externals/pio2/examples/c/Makefile.am +M src/externals/pio2/examples/c/darray_async.c +M src/externals/pio2/src/clib/CMakeLists.txt +M src/externals/pio2/src/clib/Makefile.am +M src/externals/pio2/src/clib/pio.h +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_darray_int.c +M src/externals/pio2/src/clib/pio_file.c +M src/externals/pio2/src/clib/pio_internal.h +M src/externals/pio2/src/clib/pio_lists.c +M src/externals/pio2/src/clib/pio_msg.c +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pio_rearrange.c +M src/externals/pio2/src/clib/pioc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/src/flib/Makefile.am +M src/externals/pio2/src/gptl/Makefile.am +M src/externals/pio2/src/ncint/ncint_pio.c +M src/externals/pio2/src/ncint/ncintdispatch.c +M src/externals/pio2/tests/cunit/CMakeLists.txt +M src/externals/pio2/tests/cunit/Makefile.am +M src/externals/pio2/tests/cunit/run_tests.sh +A src/externals/pio2/tests/cunit/test_async_1d.c +M src/externals/pio2/tests/cunit/test_darray.c +M src/externals/pio2/tests/cunit/test_spmd.c +M src/externals/pio2/tests/fncint/Makefile.am +M src/externals/pio2/tests/ncint/Makefile.am +A src/externals/pio2/tests/ncint/pio_err_macros.h +A src/externals/pio2/tests/ncint/run_perf.sh +M src/externals/pio2/tests/ncint/run_tests.sh +A src/externals/pio2/tests/ncint/tst_async_multi.c +A src/externals/pio2/tests/ncint/tst_ncint_async_perf.c +A src/externals/pio2/tests/ncint/tst_ncint_perf.c +A src/externals/pio2/tests/ncint/tst_pio_async.c +M src/externals/pio2/tests/ncint/tst_pio_udf.c +M src/externals/pio2/tests/unit/Makefile.am +M src/share/util/shr_infnan_mod.F90.in +M tools/cprnc/CMakeLists.txt + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 2-27-2020 +Tag: cime5.8.16 +Answer Changes: None +tests: scripts_regression_tests, aux_cam using fv3, various hand tests. +Dependencies: + +Brief Summary: + - Add SE grids for CAM. + - Improved cprnc build process. + - FV3 port to CAM. + - Fix izumi submit. + - maint-5.6 branch merge(2020-02-20). + - Fix the -n parameter to mpirun in chgres workflow template. + - Initial port of CIME to Hera for the UFS MR Weather App. + - Add rpath for netcdf. + - Update laramie modules. + - Fix typo. + - Fix pylint issue. + - Add ic_filepath to config_inputdata. + - Change homebrew to macos. + - Non-netcdf files should not be compared with cprnc. + - Fix an issue in downloading inputdata. + - Update cheyenne build. + - More updates of macros. + - Update macros. + - Strictly ufs updates. + - Fix for post-processing to work with netcdf files. + - Fix auto-triage. + - Add D0 to all namelist values added in PR 3231 and 3251. + - Revert "Update nuopc esmf". + - More fixes for UFS release. + - Put MAX_MPITASKS_PER_NODE back to config_workflow.xml. + - Fix chgres issue on cheyenne. + - Update for UFS model. + - Allow writing T-forcing cplhist files even when running with SGLC. + - Update nuopc esmf. + - Fix misspelling in comment. + - Fix issue with resubmit on systems that use ssh. + - Move orbital updates to nuopc caps and update nuopc dmodels. + - Latest developments from the CIME_MODEL=ufs project. + - Fix issue when input_ic_root is None. + - Fix misleading output casestatus. + - More work related with UFS. + - Update description for FORCE_BUILD_SMP. + - Revert "update esmf and netcdf on cheyenne". + - Remove redundant logic in _get_extension. + - Pop hist dd fix. + - Homebrew build fix. + - Cheyenne machine updates. + - Correction in get_model(). + - Update ufs config_files. + - Implement automatic test traiging in e3sm test infrastructure. + - Rename fv3gfs to ufsatm. + - Extend archiving support to accept a regex pattern as well as a cesm style file extension. + - Updates needed to get aux_clm tests working with CMEPS. + - Add trouble shooting doc. + - Updates input directory for UFS application. + - Changes to build fv3gfs standalone using cime. + - Support new inputdata for ufs model. + - Fixed Empty .cesm_proj file results in a crash. + - Improve scripts_regression_tests test cleanup. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +b2e292597 Merge pull request #3420 from ESMCI/fischer/segrids +5f75c600f Merge pull request #3400 from ESMCI/wpcoomb/improve_cprnc_build_process +f6395d42d Merge pull request #3415 from jtruesdal/fv3port +894562c87 Merge pull request #3413 from jedwards4b/fix_izumi_submit +1d406ebfa Merge pull request #3409 from ESMCI/fischer/m_maint5.6 +a1ee2a564 Merge pull request #3405 from ESMCI/rsdunlapiv/chgres_mpirun_fix +d7d30df71 Merge pull request #3407 from ESMCI/rsdunlapiv/heraport2 +3b42be3f0 Merge pull request #3406 from jedwards4b/mac_build_fix +1f64cbe2a Merge pull request #3402 from ESMCI/laramie_update +2a8bbd8e5 fix typo +7fe3af99b fix pylint issue +8884e2acf Merge pull request #3398 from jedwards4b/ufs_autodownload +39ce78718 change homebrew to macos +3835a137c Merge pull request #3397 from jedwards4b/dont_compare_nonnetcdf_hists +3399dd0be Merge pull request #3395 from jedwards4b/fix_issue_in_download +ae6b1a267 update cheyenne build for compatibility +e11de4814 more updates for macos +0c262b64b updates for macos +7339b0b66 strictly ufs updates +5b59975cf fix for post-processing to work with netcdf files +b5c3ca048 Merge pull request #3388 from ESMCI/jgfouca/fix_auto_triage +4c26f56e6 Merge pull request #3383 from cacraigucar/add_namelist_double +e8e9f655a Merge pull request #3386 from ESMCI/revert-3377-update_nuopc_esmf +e3b71c2f3 Merge pull request #3384 from ESMCI/uturuncoglu/ufs_release +5fe1b20e2 put MAX_MPITASKS_PER_NODE back to config_workflow.xml +846d089d8 fix chgres issue on cheyenne +f09afb134 Merge pull request #3379 from ESMCI/ufs_release +ce075935d Merge pull request #3358 from billsacks/t_forcing_cplhist_with_sglc_master +6cfcb072d Merge pull request #3377 from jedwards4b/update_nuopc_esmf +0d7fc13ab Fix misspelling in comment +307d4bb4a Merge pull request #3375 from jedwards4b/ssh_resubmit_fix +4de404425 Merge pull request #3373 from ESMCI/mvertens/update_nuopc_dmodels +d3624d963 Merge pull request #3371 from ESMCI/ufs_release +44673e0b8 fix issue when input_ic_root is None +8b8b1df5a Merge pull request #3354 from ESMCI/wpcoomb/fix_misleading_output_casestatus +bd4525df7 Merge pull request #3353 from ESMCI/ufs_release +e6ddef63f Merge pull request #3351 from ESMCI/jgfouca/update_force_build_smp_desc +5c32f2315 Merge pull request #3350 from ESMCI/revert-3343-cheyenne_machine_updates +27fd20a50 Merge pull request #3348 from billsacks/cleanup_extension_dd_match +d261fea15 Merge pull request #3346 from jedwards4b/pop_hist_dd_fix +207d1ab7d Merge pull request #3345 from jedwards4b/homebrew_build_fix +27b6d3405 Merge pull request #3343 from jedwards4b/cheyenne_machine_updates +b373a0240 correction in get_model() +2fd32ebef update ufs config_files +6d5b5ba84 Merge pull request #3340 from ESMCI/jgfouca/cime_auto_triage +5a35958e2 Merge pull request #3337 from jedwards4b/rename_fv3gfs_ufsatm +047f4ffbf Merge pull request #3326 from jedwards4b/archive_extensions +2f3876d4d Merge pull request #3335 from ESMCI/mvertens/updates_for_nuopc_regional +33ad6e314 Merge pull request #3332 from billsacks/doc_troubleshooting +00cfb7ed9 Merge pull request #3327 from jedwards4b/ufs_release_3 +d13f34f15 Merge pull request #3322 from jedwards4b/fv3gfs_cime_build2 +93931a0ec Merge pull request #3323 from jedwards4b/ufs_inputdata +c00ce8ff4 Merge pull request #3319 from ESMCI/wpcoomb/fix_empty_.cesm_proj_crash +2a7d36454 Merge pull request #3317 from ESMCI/jgfouca/better_regr_test_cleanup + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_archive.xml +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/config_grids_common.xml +M config/cesm/config_grids_mct.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_workflow.xml +M config/cesm/machines/cylc_suite.rc.template +M config/ufs/config_files.xml +M config/ufs/config_grids.xml +D config/ufs/config_grids_common.xml +D config/ufs/config_grids_mct.xml +D config/ufs/config_grids_nuopc.xml +M config/ufs/config_inputdata.xml +M config/ufs/machines/config_batch.xml +M config/ufs/machines/config_compilers.xml +M config/ufs/machines/config_machines.xml +M config/ufs/machines/config_workflow.xml +A config/ufs/machines/template.chgres.run +A config/ufs/machines/template.gfs_post.run +M config/xml_schemas/config_compilers_v2.xsd +M config/xml_schemas/config_inputdata.xsd +M config/xml_schemas/config_workflow.xsd +M config/xml_schemas/testlist.xsd +M doc/source/users_guide/troubleshooting.rst +M scripts/Tools/Makefile +M scripts/Tools/archive_metadata +M scripts/Tools/bless_test_results +M scripts/Tools/component_compare_copy +M scripts/Tools/jenkins_generic_job +M scripts/Tools/jenkins_script +M scripts/Tools/wait_for_tests +M scripts/create_test +M scripts/lib/CIME/Servers/ftp.py +M scripts/lib/CIME/Servers/wget.py +M scripts/lib/CIME/SystemTests/mvk.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/archive.py +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/XML/env_archive.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_workflow.py +M scripts/lib/CIME/XML/inputdata.py +M scripts/lib/CIME/XML/testlist.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/code_checker.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/lib/jenkins_generic_job.py +M scripts/query_config +M scripts/tests/scripts_regression_tests.py +A src/build_scripts/buildlib.cprnc +M src/components/data_comps/datm/cime_config/config_component.xml +M src/components/data_comps/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps/datm/nuopc/atm_comp_nuopc.F90 +M src/components/data_comps/datm/nuopc/datm_comp_mod.F90 +D src/components/data_comps/datm/nuopc/datm_shr_mod.F90 +M src/components/data_comps/dice/nuopc/dice_comp_mod.F90 +M src/components/data_comps/dice/nuopc/dice_flux_atmice_mod.F90 +D src/components/data_comps/dice/nuopc/dice_shr_mod.F90 +M src/components/data_comps/dice/nuopc/ice_comp_nuopc.F90 +M src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90 +D src/components/data_comps/dlnd/nuopc/dlnd_shr_mod.F90 +M src/components/data_comps/dlnd/nuopc/lnd_comp_nuopc.F90 +M src/components/data_comps/docn/mct/docn_comp_mod.F90 +M src/components/data_comps/docn/nuopc/docn_comp_mod.F90 +D src/components/data_comps/docn/nuopc/docn_shr_mod.F90 +M src/components/data_comps/docn/nuopc/ocn_comp_nuopc.F90 +M src/components/data_comps/drof/cime_config/config_component.xml +M src/components/data_comps/drof/cime_config/namelist_definition_drof.xml +M src/components/data_comps/drof/nuopc/drof_comp_mod.F90 +D src/components/data_comps/drof/nuopc/drof_shr_mod.F90 +M src/components/data_comps/drof/nuopc/rof_comp_nuopc.F90 +A src/components/data_comps/dshr_nuopc/dshr_dfield_mod.F90 +A src/components/data_comps/dshr_nuopc/dshr_fldlist_mod.F90 +M src/components/data_comps/dshr_nuopc/dshr_methods_mod.F90 +M src/components/data_comps/dshr_nuopc/dshr_nuopc_mod.F90 +M src/components/data_comps/dwav/nuopc/dwav_comp_mod.F90 +D src/components/data_comps/dwav/nuopc/dwav_shr_mod.F90 +M src/components/data_comps/dwav/nuopc/wav_comp_nuopc.F90 +M src/components/xcpl_comps/xatm/nuopc/atm_comp_nuopc.F90 +M src/components/xcpl_comps/xglc/nuopc/glc_comp_nuopc.F90 +M src/components/xcpl_comps/xice/nuopc/ice_comp_nuopc.F90 +M src/components/xcpl_comps/xlnd/nuopc/lnd_comp_nuopc.F90 +M src/components/xcpl_comps/xocn/nuopc/ocn_comp_nuopc.F90 +M src/components/xcpl_comps/xrof/nuopc/rof_comp_nuopc.F90 +M src/components/xcpl_comps/xwav/nuopc/wav_comp_nuopc.F90 +M src/drivers/mct/cime_config/buildexe +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/prep_glc_mod.F90 +M src/drivers/mct/main/seq_flux_mct.F90 +M src/share/timing/perf_mod.F90 +M tools/cprnc/CMakeLists.txt + +====================================================================== + +====================================================================== + +Originator: Bill Sacks +Date: 11-26-2019 +Tag: cime5.8.15 +Answer Changes: None +Tests: scripts_regression_tests, aux_clm +Dependencies: none + +Brief Summary: + - Modifications for UFS Global Mid-range Weather application + - Changes for nuopc mediator updates + - Fix clean build for CLM + - Fix dlnd for gnu + - Documentation updates for building pFUnit + +User interface changes: none + +PR summary: git log --oneline --first-parent [previous_tag]..master +67a54f588 Merge pull request #3315 from billsacks/pass_comp_lnd_to_build +8884a9e2f config_compilers had a duplicate entry for athena +4e556447e Merge pull request #3311 from billsacks/doc_pfunit_build +3c00449df Merge pull request #3312 from ESMCI/mvertens/update_nuopc_externals +7365a820b Merge pull request #3309 from ESMCI/fischer/T1850G1_fix +e67646129 Merge pull request #3306 from ESMCI/ufs-release +1c04f4453 Update location for obtaining pFUnit (#3300) + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M config/cesm/machines/config_compilers.xml +A config/ufs/config_archive.xml +A config/ufs/config_files.xml +A config/ufs/config_grids.xml +A config/ufs/config_grids_common.xml +A config/ufs/config_grids_mct.xml +A config/ufs/config_grids_nuopc.xml +A config/ufs/config_inputdata.xml +A config/ufs/machines/Depends.cray +A config/ufs/machines/Depends.gnu +A config/ufs/machines/Depends.intel +A config/ufs/machines/README +A config/ufs/machines/config_batch.xml +A config/ufs/machines/config_compilers.xml +A config/ufs/machines/config_machines.xml +A config/ufs/machines/config_pio.xml +A config/ufs/machines/config_workflow.xml +A config/ufs/machines/cylc_suite.rc.template +A config/ufs/machines/template.case.run +A config/ufs/machines/template.case.test +A config/ufs/machines/template.st_archive +A config/ufs/machines/userdefined_laptop_template/README.md +A config/ufs/machines/userdefined_laptop_template/config_compilers.xml +A config/ufs/machines/userdefined_laptop_template/config_machines.xml +A config/ufs/machines/userdefined_laptop_template/config_pes.xml +M doc/source/users_guide/unit_testing.rst +M scripts/Tools/Makefile +M scripts/create_test +M scripts/lib/CIME/build.py +M scripts/lib/CIME/utils.py +M scripts/tests/CTestConfig.cmake +M src/components/data_comps/dlnd/mct/dlnd_comp_mod.F90 +M src/share/nuopc/seq_drydep_mod.F90 +M src/share/nuopc/shr_carma_mod.F90 +M src/share/nuopc/shr_fire_emis_mod.F90 +M src/share/nuopc/shr_megan_mod.F90 +M src/share/nuopc/shr_ndep_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 11-11-2019 +Tag: cime5.8.14 +Answer Changes: None, (except for X compsets with nuopc +Tests: scripts_regression_tests, ERIO.f09_g16.X.cheyenne_intel +Dependencies: + +Brief Summary: + - Revert "initialize albdif and albdir in seq_flux_mct" + - Initialize albdif and albdir in seq_flux_mct. + - Clean up env_batch. + - Test too short. + - set mpilib for custom test. + - Shorten the cime developer tests. + - Update time of day string in ERI test. + - Shr code cleanup. + - Update for compatibility with latest CMEPS. + - Omit broken netcdf4 on cheyenne with mpt. + - Exclude .ref1 and .ref2 from TestStatus files and move get_test_status_files to a common place. + - Fix issue with e3sm field. + - Minor fix to e3sm_cime_mgmt to properly handle submodules. + - Refactor how run_cmd handles strings. + - ACME merge 2019-10-25. + + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +cad8477bb Merge pull request #3297 from ESMCI/revert-3296-master_seq_flux_init +5f7bfb6a1 Merge pull request #3296 from jedwards4b/master_seq_flux_init +96455e0d6 Merge pull request #3292 from ESMCI/jgfouca/cleanup_env_batch +73dddb262 test too short +ff5a4c07c set mpilib for custom test +de2f7c3bb Merge pull request #3289 from jedwards4b/shorten_cime_developer +62a77d1d5 Merge pull request #3288 from ESMCI/fischer/eri_test +0bd9a353e Merge pull request #3285 from jedwards4b/shr_code_cleanup +1529bac14 Merge pull request #3283 from ESMCI/nems_integration_fixed2 +edb013cb7 Merge pull request #3284 from jedwards4b/ERIO_on_cheyenne +cafc94139 Merge pull request #3282 from jedwards4b/fix_get_test_status_files +0945a4760 Merge pull request #3277 from jedwards4b/demand_better +db4d8df17 Minor fix to e3sm_cime_mgmt to properly handle submodules +4d6798451 Merge pull request #3273 from ESMCI/jgfouca/run_cmd_str_refactor +bb9fc0a99 Merge pull request #3272 from ESMCI/jgfouca/branch-for-acme-split-2019-10-25 + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/config_machines.xml +M config/e3sm/allactive/config_pesall.xml +M config/e3sm/config_grids.xml +M config/e3sm/machines/config_batch.xml +M config/e3sm/machines/config_compilers.xml +M config/e3sm/machines/config_machines.xml +M config/e3sm/tests.py +M scripts/create_test +M scripts/lib/CIME/Servers/wget.py +M scripts/lib/CIME/SystemTests/eri.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/compare_test_results.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/test_utils.py +M scripts/lib/CIME/utils.py +M scripts/lib/e3sm_cime_mgmt.py +M scripts/lib/get_tests.py +M scripts/tests/scripts_regression_tests.py +M src/components/data_comps/dlnd/mct/dlnd_comp_mod.F90 +M src/components/data_comps/docn/nuopc/docn_comp_mod.F90 +M src/components/data_comps/docn/nuopc/docn_shr_mod.F90 +M src/components/xcpl_comps/xlnd/nuopc/lnd_comp_nuopc.F90 +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/prep_aoflux_mod.F90 +M src/drivers/mct/main/prep_rof_mod.F90 +M src/drivers/mct/shr/seq_flds_mod.F90 +M src/drivers/mct/shr/seq_timemgr_mod.F90 +M src/share/nuopc/glc_elevclass_mod.F90 +M src/share/nuopc/seq_drydep_mod.F90 +M src/share/streams/shr_strdata_mod.F90 +M src/share/util/mct_mod.F90 +M src/share/util/shr_const_mod.F90 +M src/share/util/shr_flux_mod.F90 +M src/share/util/shr_mct_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-24-2019 +Tag: cime5.8.13 +Answer Changes: None +Tests: scripts_regeression_tests +Dependencies: + +Brief Summary: + - Add a raw xml option to the query_config interface. + - Add --only-job option to case.submit. + +User interface changes: + - Add --xml to query_config. + - Add option --only-job to case.submit. + - Fix an issue with Lockedfile error when case.submit is used with the --no-batch flag. + - Change globally uniform SST mode from AQP11 to AQPCONST. + +PR summary: git log --oneline --first-parent [previous_tag]..master +f22ebdafa Merge pull request #3268 from jedwards4b/add_xml_to_query_config +ab4d53999 Merge pull request #3267 from jedwards4b/no_workflow +4cd12246f Merge pull request #3265 from jedwards4b/fix_batch_lock_issue +a45130996 Merge pull request #3257 from ESMCI/whannah/add-AQPCONST-mode + +Modified files: git diff --name-status [previous_tag] +M scripts/Tools/case.submit +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/utils.py +M scripts/query_config +M src/components/data_comps/docn/cime_config/buildnml +M src/components/data_comps/docn/cime_config/config_component.xml +M src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +M src/components/data_comps/docn/mct/docn_comp_mod.F90 +M src/components/data_comps/docn/mct/docn_shr_mod.F90 +M src/drivers/mct/cime_config/config_component.xml +M src/share/util/shr_scam_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-15-2019 +Tag: cime5.8.12 +Answer Changes: None +Tests: ERS.T5_T5_mg37.QPC4, scripts_regression_tests +Dependencies: + +Brief Summary: + - Add missing domain files for T5_T5_mg37 grid. + - Merge maint-5.6 to master 2019-11-10. + - Updates for cam testing updates. + - Fix nuopc esmf paths. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +7f1d48fe9 Merge pull request #3263 from ESMCI/fischer/T5_grid_fix +3468404c1 Merge pull request #3260 from ESMCI/fischer/maint-5.6_merge +7070d8a5c Merge pull request #3256 from ESMCI/fischer/cam_testing +4a9b008f2 fix nuopc esmf paths + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_workflow.xml +M config/e3sm/machines/config_workflow.xml +M config/xml_schemas/config_workflow.xsd +M scripts/create_newcase +M scripts/lib/CIME/XML/workflow.py +M scripts/lib/CIME/case/case.py +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/seq_flux_mct.F90 +M src/share/util/shr_flux_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-01-2019 +Tag: cime5.8.11 +Answer Changes: None +Tests: scripts_regression_tests, Hand tested SMS_D_Vnuopc.f09_g17.X.cheyenne_intel +Dependencies: + +Brief Summary: + - Correct the way esmf is built to avoid library mismatch. + - Update gnu compiler on cheyenne, bit for bit. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +693383167 Merge pull request #3252 from jedwards4b/nuopc_esmf_build_correction +fcc629127 Merge pull request #3253 from ESMCI/fischer/gnu_fix + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/config_machines.xml +M scripts/Tools/Makefile + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 9-26-2019 +Tag: cime5.8.10 +Answer Changes: None +Tests: scripts_regression_tests, tested with nuopc +Dependencies: + +Brief Summary: + - Don't call mpi_bcast on null communicator. + - Fix nag compiler flags. + - Fix issue with workflow prior to run on master. + - Merge maint-5.6. + - Update esmf paths on cheyenne for nuopc. + - Fix issue in scripts_regression_tests.py. + - Fix issue 3232 + - Update DROF(JRA) to MOM6 mapping file. + - acme split 2019-09-05 merge. + - Clean up some issues in scripts_regression_tests.py. + - Add /cluster/torque/bin to path on izumi. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +809bcb3b8 Merge pull request #3248 from jedwards4b/mct_null_comm_fix +2a2b9ecfe Merge pull request #3244 from ESMCI/fischer/cam_nag_fix +5f4f1c6da Merge pull request #3243 from jedwards4b/workflow_before_run_master +23409d7e9 Merge pull request #3241 from ESMCI/fischer/maint-5.6_09182019 +47b630012 Merge pull request #3239 from jedwards4b/cheyenne_esmf_update +00d664666 fix issue in scripts_regression_tests.py +474dc5fc0 fix issue 3232 +209b1e19b Merge pull request #3233 from alperaltuntas/update_TL319_t061 +35bc87f4f Merge pull request #3230 from ESMCI/jgfouca/branch-for-acme-split-2019-09-05 +d2f7157b8 Merge pull request #3227 from jedwards4b/srt_update +8bffca6d3 Merge pull request #3221 from billsacks/izumi_add_to_path + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_archive.xml +M config/cesm/config_grids_common.xml +M config/cesm/machines/Depends.nag +M config/cesm/machines/config_machines.xml +M config/e3sm/allactive/config_compsets.xml +M config/e3sm/allactive/config_pesall.xml +M config/e3sm/machines/config_batch.xml +M config/e3sm/machines/config_machines.xml +M config/e3sm/machines/config_pio.xml +M config/e3sm/tests.py +M config/xml_schemas/config_machines.xsd +M scripts/Tools/Makefile +M scripts/Tools/archive_metadata +M scripts/Tools/preview_run +M scripts/climate_reproducibility/README.md +M scripts/lib/CIME/SystemTests/mvk.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/machines.py +A scripts/lib/CIME/XML/stream.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/utils.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.pio +M src/components/data_comps/datm/cime_config/buildnml +M src/components/data_comps/dice/cime_config/buildnml +M src/components/data_comps/dlnd/cime_config/buildnml +M src/components/data_comps/docn/cime_config/buildnml +M src/components/data_comps/docn/cime_config/config_component.xml +M src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +M src/components/data_comps/docn/mct/docn_comp_mod.F90 +M src/components/data_comps/docn/mct/docn_shr_mod.F90 +M src/components/data_comps/drof/cime_config/buildnml +M src/components/data_comps/dwav/cime_config/buildnml +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/config_component_e3sm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/cime_config/testdefs/testlist_drv.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/seq_flux_mct.F90 +M src/drivers/mct/shr/seq_infodata_mod.F90 +M src/share/streams/shr_tInterp_mod.F90 +M src/share/util/shr_orb_mod.F90 +M src/share/util/shr_scam_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-24-2019 +Tag: cime5.8.13 +Answer Changes: None +Tests: scripts_regeression_tests +Dependencies: + +Brief Summary: + - Add a raw xml option to the query_config interface. + - Add --only-job option to case.submit. + +User interface changes: + - Add --xml to query_config. + - Add option --only-job to case.submit. + - Fix an issue with Lockedfile error when case.submit is used with the --no-batch flag. + - Change globally uniform SST mode from AQP11 to AQPCONST. + +PR summary: git log --oneline --first-parent [previous_tag]..master +f22ebdafa Merge pull request #3268 from jedwards4b/add_xml_to_query_config +ab4d53999 Merge pull request #3267 from jedwards4b/no_workflow +4cd12246f Merge pull request #3265 from jedwards4b/fix_batch_lock_issue +a45130996 Merge pull request #3257 from ESMCI/whannah/add-AQPCONST-mode + +Modified files: git diff --name-status [previous_tag] +M scripts/Tools/case.submit +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/utils.py +M scripts/query_config +M src/components/data_comps/docn/cime_config/buildnml +M src/components/data_comps/docn/cime_config/config_component.xml +M src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +M src/components/data_comps/docn/mct/docn_comp_mod.F90 +M src/components/data_comps/docn/mct/docn_shr_mod.F90 +M src/drivers/mct/cime_config/config_component.xml +M src/share/util/shr_scam_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-15-2019 +Tag: cime5.8.12 +Answer Changes: None +Tests: ERS.T5_T5_mg37.QPC4, scripts_regression_tests +Dependencies: + +Brief Summary: + - Add missing domain files for T5_T5_mg37 grid. + - Merge maint-5.6 to master 2019-11-10. + - Updates for cam testing updates. + - Fix nuopc esmf paths. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +7f1d48fe9 Merge pull request #3263 from ESMCI/fischer/T5_grid_fix +3468404c1 Merge pull request #3260 from ESMCI/fischer/maint-5.6_merge +7070d8a5c Merge pull request #3256 from ESMCI/fischer/cam_testing +4a9b008f2 fix nuopc esmf paths + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_workflow.xml +M config/e3sm/machines/config_workflow.xml +M config/xml_schemas/config_workflow.xsd +M scripts/create_newcase +M scripts/lib/CIME/XML/workflow.py +M scripts/lib/CIME/case/case.py +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/main/seq_flux_mct.F90 +M src/share/util/shr_flux_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-01-2019 +Tag: cime5.8.11 +Answer Changes: None +Tests: scripts_regression_tests, Hand tested SMS_D_Vnuopc.f09_g17.X.cheyenne_intel +Dependencies: + +Brief Summary: + - Correct the way esmf is built to avoid library mismatch. + - Update gnu compiler on cheyenne, bit for bit. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +693383167 Merge pull request #3252 from jedwards4b/nuopc_esmf_build_correction +fcc629127 Merge pull request #3253 from ESMCI/fischer/gnu_fix + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/config_machines.xml +M scripts/Tools/Makefile + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 9-26-2019 +Tag: cime5.8.10 +Answer Changes: None +Tests: scripts_regression_tests, tested with nuopc +Dependencies: + +Brief Summary: + - Don't call mpi_bcast on null communicator. + - Fix nag compiler flags. + - Fix issue with workflow prior to run on master. + - Merge maint-5.6. + - Update esmf paths on cheyenne for nuopc. + - Fix issue in scripts_regression_tests.py. + - Fix issue 3232 + - Update DROF(JRA) to MOM6 mapping file. + - acme split 2019-09-05 merge. + - Clean up some issues in scripts_regression_tests.py. + - Add /cluster/torque/bin to path on izumi. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +809bcb3b8 Merge pull request #3248 from jedwards4b/mct_null_comm_fix +2a2b9ecfe Merge pull request #3244 from ESMCI/fischer/cam_nag_fix +5f4f1c6da Merge pull request #3243 from jedwards4b/workflow_before_run_master +23409d7e9 Merge pull request #3241 from ESMCI/fischer/maint-5.6_09182019 +47b630012 Merge pull request #3239 from jedwards4b/cheyenne_esmf_update +00d664666 fix issue in scripts_regression_tests.py +474dc5fc0 fix issue 3232 +209b1e19b Merge pull request #3233 from alperaltuntas/update_TL319_t061 +35bc87f4f Merge pull request #3230 from ESMCI/jgfouca/branch-for-acme-split-2019-09-05 +d2f7157b8 Merge pull request #3227 from jedwards4b/srt_update +8bffca6d3 Merge pull request #3221 from billsacks/izumi_add_to_path + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_archive.xml +M config/cesm/config_grids_common.xml +M config/cesm/machines/Depends.nag +M config/cesm/machines/config_machines.xml +M config/e3sm/allactive/config_compsets.xml +M config/e3sm/allactive/config_pesall.xml +M config/e3sm/machines/config_batch.xml +M config/e3sm/machines/config_machines.xml +M config/e3sm/machines/config_pio.xml +M config/e3sm/tests.py +M config/xml_schemas/config_machines.xsd +M scripts/Tools/Makefile +M scripts/Tools/archive_metadata +M scripts/Tools/preview_run +M scripts/climate_reproducibility/README.md +M scripts/lib/CIME/SystemTests/mvk.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/machines.py +A scripts/lib/CIME/XML/stream.py +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/utils.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.pio +M src/components/data_comps/datm/cime_config/buildnml +M src/components/data_comps/dice/cime_config/buildnml +M src/components/data_comps/dlnd/cime_config/buildnml +M src/components/data_comps/docn/cime_config/buildnml +M src/components/data_comps/docn/cime_config/config_component.xml +M src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +M src/components/data_comps/docn/mct/docn_comp_mod.F90 +M src/components/data_comps/docn/mct/docn_shr_mod.F90 +M src/components/data_comps/drof/cime_config/buildnml +M src/components/data_comps/dwav/cime_config/buildnml +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/config_component_e3sm.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +M src/drivers/mct/cime_config/testdefs/testlist_drv.xml +M src/drivers/mct/main/cime_comp_mod.F90 +M src/drivers/mct/main/seq_flux_mct.F90 +M src/drivers/mct/shr/seq_infodata_mod.F90 +M src/share/streams/shr_tInterp_mod.F90 +M src/share/util/shr_orb_mod.F90 +M src/share/util/shr_scam_mod.F90 + +====================================================================== + +====================================================================== + +Originator: Chris Fischer Date: 8-29-2019 Tag: cime5.8.9 Answer Changes: None @@ -10,7 +7631,7 @@ Dependencies: Brief Summary: - Fix nag compiler for CAM. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master - 6026ca341 Merge pull request #3219 from ESMCI/fischer/nag @@ -23,11 +7644,11 @@ M config/cesm/machines/Depends.nag ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-26-2019 Tag: cime5.8.8 Answer Changes: None -Tests: scripts_regression_tests, aux_mom +Tests: scripts_regression_tests, aux_mom Dependencies: Brief Summary: @@ -48,7 +7669,7 @@ Brief Summary: - ACME merge 2019-07-22 - Remove test SMS.T42_T42.S when testing with nuopc driver. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master a9c9289f1 Merge branch 'maint-5.6' @@ -212,7 +7833,7 @@ M src/share/util/shr_pio_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 7-19-209 Tag: cime5.8.7 Answer Changes: Climate changing for CAM ne120, ne120pg3, ne0CONUS grids @@ -224,7 +7845,7 @@ Brief Summary: - Updates and fixes for MOM6 in cime. - Maint 5.6 merge. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 6efe21745 Merge pull request #3177 from ESMCI/fischer/SE_gx1v7MaskFix @@ -279,17 +7900,17 @@ M src/share/util/shr_file_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 7-16-2019 Tag: cime5.8.6 Answer Changes: None -Tests: scripts_regression_tests, SMS_Ld1.f19_f19_mg17.FXSD.cheyenne_intel.cam-outfrq1d +Tests: scripts_regression_tests, SMS_Ld1.f19_f19_mg17.FXSD.cheyenne_intel.cam-outfrq1d Dependencies: Brief Summary: - Add back ESMF modules on cheyenne. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 07f0945e6 Merge pull request #3172 from ESMCI/fischer/fix_esmfmodules @@ -303,11 +7924,11 @@ M config/cesm/machines/config_machines.xml ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 7-12-2019 Tag: cime5.8.5 Answer Changes: bit for bit -Tests: scripts_regression_tests on izumi, SMS_Ln9.ne0CONUSne30x8_ne0CONUSne30x8_mg17.F2000climo +Tests: scripts_regression_tests on izumi, SMS_Ln9.ne0CONUSne30x8_ne0CONUSne30x8_mg17.F2000climo Dependencies: Brief Summary: @@ -317,7 +7938,7 @@ Brief Summary: - Case insensitive user_nl. - ACME merge 2019-06-25. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master ba5ce7f68 Merge pull request #3167 from ESMCI/fischer/SE_conusmt12 @@ -349,11 +7970,11 @@ M src/share/util/shr_flux_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-24-2019 Tag: cime5.8.4 Answer Changes: None -Tests: scripts_regression_tests, code checker +Tests: scripts_regression_tests, code checker Dependencies: Brief Summary: @@ -367,7 +7988,7 @@ Brief Summary: - ACME merge 2019-06-10 - Change nodefail test to allow for nuopc file name difference from mct. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 8f0e3d3 Merge pull request #3150 from ESMCI/theia_to_slurm @@ -597,8 +8218,8 @@ A src/externals/pio2/tests/unit/run_tests.sh ====================================================================== -Originator: Chris Fischer -Date: 6-7-2019 +Originator: Chris Fischer +Date: 6-7-2019 Tag: cime5.8.3 Answer Changes: [None, Round Off, Climate Changing] Tests: scripts_regression_tests.py with CIME_DRIVER=nuopc, @@ -609,8 +8230,8 @@ Brief Summary: - Add logic to control activation of glcshelf_c2_ice. - Fix nuopc build, update for stamepede esmf lib. - Fixes for nuopc scripts_regressions_tests. - - Merge branch master. - - Merge branch master. + - Merge branch master. + - Merge branch master. - Merge acme split 2019-5-28 - Remove the nuopc driver and mediator to a separate repository https://github.com/ESCOMP/CMEPS.git - New documentation explaining how to invoked the --user-mods-dir option to create_newcase. @@ -633,7 +8254,7 @@ Brief Summary: - Update modules cheyenne. - Poperly link nag f90. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 9f39361 Add logic to control activation of glcshelf_c2_ice (#3131) @@ -850,7 +8471,7 @@ M tools/statistical_ensemble_test/README ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 4-23-2019 Tag: cime5.8.2 Answer Changes: None @@ -866,7 +8487,7 @@ Brief Summary: - Introduces a new stub IAC. - Master merge to nuopc cmeps. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 57cf4a5 Merge pull request #3086 from ESMCI/fischer/ne_conus @@ -1018,11 +8639,11 @@ M src/share/util/shr_pio_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 04-08-2019 Tag: cime5.8.1 Answer Changes: None -Tests: scripts_regression_tests, many create_newcase with mangled compset names +Tests: scripts_regression_tests, many create_newcase with mangled compset names hand test xmllint Dependencies: @@ -1042,7 +8663,7 @@ Brief Summary: - PET and ERP tests were not setting compile_threaded correctly. - Implement 'share' field of test suites. -User interface changes: +User interface changes: - Stub components are now optional in compset long names. Also there is less order dependency. PR summary: git log --oneline --first-parent [previous_tag]..master @@ -1128,7 +8749,7 @@ M tools/mapping/gen_mapping_files/runoff_to_ocn/src/Makefile ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 03-12-2019 Tag: cime5.8.0 Answer Changes: None @@ -1139,7 +8760,7 @@ Brief Summary: - Merge maint-5.6 branch. - Cleanup of build. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 5f939ae Merge pull request #3039 from ESMCI/maint-5.6 @@ -1280,7 +8901,7 @@ Brief Summary: - Fix test issue for cesm. - Better handling of file permissions when copying files. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 5f31182 Merge pull request #3014 from jedwards4b/config_grids_v2.1 @@ -1471,11 +9092,11 @@ A tools/cprnc/test_inputs/missing_variables.nc ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 1-15-2019 Tag: cime5.7.7 Answer Changes: None -Tests: scripts_regression_tests +Tests: scripts_regression_tests Dependencies: Brief Summary: @@ -1491,7 +9112,7 @@ Brief Summary: - docn_comp_mod needs fix for optional variable. - Remove support for CLM4.0. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master e5c2368 Minor fix to e3sm_cime_mgmt to properly handle exe files @@ -1589,11 +9210,11 @@ M src/share/util/shr_wv_sat_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 01-07-2019 Tag: cime5.7.6 Answer Changes: None -Tests: scripts_regression_tests.py, hand testing of cesm cases +Tests: scripts_regression_tests.py, hand testing of cesm cases scripts_regression_tests.py with PIO_VERSION=2 code-checker, by-hand Dependencies: @@ -1619,7 +9240,7 @@ Brief Summary: - Merge latest maint-5.6 changes into master. - Fix new pylint errors. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 486c579 Merge pull request #2966 from jedwards4b/cmpgen_needs_write @@ -1907,20 +9528,20 @@ Brief Summary: Miscellaneous minor bug fixes, including getting User interface changes: none -Originator: Chris Fischer +Originator: Chris Fischer Date: 11-29-2018 Tag: cime_cesm2_1_rel_06 Answer Changes: None -Tests: scripts_regression_tests +Tests: scripts_regression_tests Dependencies: Brief Summary: - Fix new pylint errors. - Update addmetadata for POP-ECT tests. -User interface changes: +User interface changes: -PR summary: git log --oneline --first-parent [previous_tag]..master +PR summary: git log --oneline --first-parent [previous_tag]..master 5a86646 Merge pull request #2921 from jedwards4b/fixpylint3errors 98821ff Merge pull request #2919 from ESMCI/fischer/addmetadata @@ -1945,7 +9566,7 @@ M tools/statistical_ensemble_test/addmetadata.sh ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer PR summary: git log --oneline --first-parent [previous_tag]..master e882c1c06 Merge pull request #2897 from billsacks/fix_histutils_regex fc14d5870 Merge pull request #2895 from billsacks/fix_lnd2glc_averaged_now diff --git a/ChangeLog_template b/ChangeLog_template index e368a3a8ccb..27821ec1540 100644 --- a/ChangeLog_template +++ b/ChangeLog_template @@ -1,19 +1,18 @@ ====================================================================== -Originator: -Date: +Originator: +Date: Tag: cimeX.Y.Z Answer Changes: [None, Round Off, Climate Changing] -Tests: +Tests: Dependencies: Brief Summary: -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master Modified files: git diff --name-status [previous_tag] ====================================================================== - diff --git a/LICENSE.TXT b/LICENSE.TXT index 63bfa9ae5d3..2424196e5a6 100644 --- a/LICENSE.TXT +++ b/LICENSE.TXT @@ -1,12 +1,9 @@ Copyright (c) 2017, University Corporation for Atmospheric Research (UCAR) All rights reserved. and -Copyright (c) 2017, Sandia Corporation. +Copyright (c) 2017, Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. - and -Copyright (c) 2017, UChicago Argonne, LLC, All Rights Reserved -under Contract No. DE-AC02-06CH11357 with the Department of Energy Developed by: University Corporation for Atmospheric Research - National Center for Atmospheric Research diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000000..984c159960f --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +recursive-include CIME * +recursive-exclude CIME/tests * +recursive-exclude **/__pycache__ * diff --git a/README.md b/README.md index 7ce3cc3c4ff..d4162788230 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,14 @@ # cime Common Infrastructure for Modeling the Earth -CIME, pronounced “SEAMâ€, contains the support scripts (configure, build, run, test), data models, essential -utility libraries, a “main†and other tools that are needed to build a single-executable coupled Earth System Model. -CIME is available in a stand-alone package that can be compiled and tested without active prognostic components -but is typically included in the source of a climate model. CIME does not contain: any active components, -any intra-component coupling capability (such as atmosphere physics-dynamics coupling). +CIME, pronounced "SEAM", primarily consists of a Case Control System that supports the configuration, compilation, execution, system testing and unit testing of an Earth System Model. The two main components of the Case Control System are: -*cime* (pronounced: seem) is currently used by the +1. Scripts to enable simple generation of model executables and associated input files for different scientific cases, component resolutions and combinations of full, data and stub components with a handful of commands. +2. Testing utilities to run defined system tests and report results for different configurations of the coupled system. + +CIME does **not** contain the source code for any Earth System Model drivers or components. It is typically included alongside the source code of a host model. However, CIME does include pointers to external repositories that contain drivers, data models and other test components. These external components can be easily assembled to facilitate end-to-end system tests of the CIME infrastructure, which are defined in the CIME repository. + +CIME is currently used by the Community Earth System Model (CESM) and the Energy Exascale Earth System Model (E3SM). @@ -19,16 +20,10 @@ See esmci.github.io/cime # Developers ## Lead Developers -Case Control System: Jim Edwards (NCAR), Jim Foucar (SNL) - -MCT-based Coupler/Driver: Mariana Vertenstein (NCAR), Robert Jacob (ANL) - -Data Models: Mariana Vertenstein (NCAR) +Jim Edwards (NCAR), Jim Foucar (SNL) ## Also Developed by -Alice Bertini (NCAR), Tony Craig (NCAR), Michael Deakin (SNL), Chris Fischer (NCAR), Steve Goldhaber (NCAR), -Erich Foster (SNL), Mike Levy (NCAR), Bill Sacks (NCAR), Andrew Salinger (SNL), Sean Santos (NCAR), Jason Sarich (ANL), -Andreas Wilke (ANL). +Alice Bertini (NCAR), Jason Boutte (LLNL), Tony Craig (NCAR), Michael Deakin (SNL), Chris Fischer (NCAR), Erich Foster (SNL), Steve Goldhaber (NCAR), Robert Jacob (ANL), Mike Levy (NCAR), Bill Sacks (NCAR), Andrew Salinger (SNL), Sean Santos (NCAR), Jason Sarich (ANL), Mariana Vertenstein (NCAR), Andreas Wilke (ANL). # Acknowledgements diff --git a/config/cesm/config_archive.xml b/config/cesm/config_archive.xml deleted file mode 100644 index 7e48a550d05..00000000000 --- a/config/cesm/config_archive.xml +++ /dev/null @@ -1,128 +0,0 @@ - - - r - rh\d? - h\d*.*\.nc$ - e - locfnh - - rpointer.lnd$NINST_STRING - ./$CASE.clm2$NINST_STRING.r.$DATENAME.nc - - - rpointer.lnd - rpointer.lnd_9999 - casename.clm2.r.1976-01-01-00000.nc - casename.clm2.rh4.1976-01-01-00000.nc - casename.clm2.h0.1976-01-01-00000.nc - casename.clm2.h0.1976-01-01-00000.nc.base - casename.clm2_0002.e.postassim.1976-01-01-00000.nc - casename.clm2_0002.e.preassim.1976-01-01-00000.nc - anothercasename.clm2.i.1976-01-01-00000.nc - - - - - [ri] - h\d*.*\.nc$ - unset - - rpointer.ice$NINST_STRING - ./$CASE.cice$NINST_STRING.r.$DATENAME.nc - - - rpointer.ice - casename.cice.r.1976-01-01-00000.nc - casename.cice.h.1976-01-01-00000.nc - - - - - - r - r[ho] - h\d*.*\.nc$ - d[dovt] - unset - - rpointer.ocn$NINST_STRING.restart - ./$CASE.pop$NINST_STRING.r.$DATENAME.nc,RESTART_FMT=nc - - - rpointer.ocn$NINST_STRING.ovf - ./$CASE.pop$NINST_STRING.ro.$DATENAME - - - rpointer.ocn$NINST_STRING.tavg - ./$CASE.pop$NINST_STRING.rh.$DATENAME.nc - - - rpointer.pop - casename.pop_0001.r.1976-01-01-00000.nc - casename.pop.r.1976-01-01-00000.nc - casename.pop.h.1976-01-01-00000.nc - casename.pop.h.1975-02-01-00000.nc - casename.pop.h0.1976-01-01-00000.nc - casename.pop.dd.1976-01-01-00000.nc - casename.pop.r.1975-01-01-00000.nc - anothercasename.pop.r.1976-01-01-00000.nc - - - - - [ri] - h\d*.*\.nc$ - initial_hist - unset - - rpointer.glc$NINST_STRING - ./$CASE.cism$NINST_STRING.r.$DATENAME.nc - - - - rpointer.glc - rpointer.glc_9999 - - casename.cism.r.1975-01-01-00000.nc - casename.cism.r.1976-01-01-00000.nc - - casename.cism.initial_hist.0001-01-01-00000.nc - casename.cism.h.1975-01-01-00000.nc - casename.cism.h.1976-01-01-00000.nc - - casename.cism.h.1976-01-01-00000.nc.base - anothercasename.cism.r.1976-01-01-00000.nc - - - - - r - hi.*\.nc$ - unset - - rpointer.wav$NINST_STRING - unset - - - - - r - rh\d? - [ei] - restart_hist - - rpointer.unset - unset - - - - casename.dart.r.1976-01-01-00000.nc - casename.dart.rh.pop_preassim_priorinf_mean.1976-01-01-00000.nc - casename.dart.rh.cam_preassim_priorinf_mean.1976-01-01-00000.nc - - casename.dart.e.cam_postassim_mean.1976-01-01-00000.nc - casename.dart.i.cam_output_mean.1976-01-01-00000.nc - casename.dart.e.cam_obs_seq_final.1976-01-01-00000.nc - - - diff --git a/config/cesm/config_files.xml b/config/cesm/config_files.xml deleted file mode 100644 index 12464b5ef39..00000000000 --- a/config/cesm/config_files.xml +++ /dev/null @@ -1,573 +0,0 @@ - - - - - - - - char - cesm - case_der - env_case.xml - model system name - - - - - - - - char - $CIMEROOT/config/config_headers.xml - case_der - env_case.xml - contains both header and group information for all the case env_*.xml files - - - - char - $CIMEROOT/config/$MODEL/config_grids.xml - case_last - env_case.xml - file containing specification of all supported model grids, domains and mapping files (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_grids_v2.1.xsd - - - - char - $CIMEROOT/config/$MODEL/machines/config_machines.xml - case_last - env_case.xml - file containing machine specifications for target model primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_machines.xsd - - - - char - $CIMEROOT/config/$MODEL/machines/config_batch.xml - case_last - env_case.xml - file containing batch system details for target system (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_batch.xsd - - - - char - $CIMEROOT/config/$MODEL/machines/config_workflow.xml - case_last - env_case.xml - file containing workflow (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_workflow.xsd - - - - char - $CIMEROOT/config/$MODEL/config_inputdata.xml - case_last - env_case.xml - file containing inputdata server descriptions (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_inputdata.xsd - - - - char - $CIMEROOT/config/$MODEL/machines/config_compilers.xml - case_last - env_case.xml - file containing compiler specifications for target model primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_compilers_v2.xsd - - - - char - $CIMEROOT/config/$MODEL/machines/config_pio.xml - case_last - env_case.xml - file containing specification of pio settings for target model possible machine, compiler, mpilib, compset and/or grid attributes (for documentation only - DO NOT EDIT) - - - - char - - $CIMEROOT/config/config_tests.xml - - $COMP_ROOT_DIR_LND/cime_config/config_tests.xml - - test - env_test.xml - file containing system test descriptions - - - - - - - - - char - - $CIMEROOT/src/components/data_comps/datm - $CIMEROOT/src/components/stub_comps/satm - $CIMEROOT/src/components/xcpl_comps/xatm - $SRCROOT/components/cam/ - $SRCROOT/components/fv3/ - - case_comps - env_case.xml - Root directory of the case atmospheric component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - - $CIMEROOT/src/drivers/$COMP_INTERFACE - - case_comps - env_case.xml - Root directory of the case driver/coupler component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $SRCROOT/components/pop/ - $SRCROOT/components/mom/ - $SRCROOT/components/nemo/ - $CIMEROOT/src/components/data_comps/docn - $CIMEROOT/src/components/stub_comps/socn - $CIMEROOT/src/components/xcpl_comps/xocn - - case_comps - env_case.xml - Root directory of the case ocean component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $SRCROOT/components/ww3/ - $CIMEROOT/src/components/data_comps/dwav - $CIMEROOT/src/components/stub_comps/swav - $CIMEROOT/src/components/xcpl_comps/xwav - - case_comps - env_case.xml - Root directory of the case wave model component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $SRCROOT/components/cism/ - $CIMEROOT/src/components/data_comps/dglc - $CIMEROOT/src/components/stub_comps/sglc - $CIMEROOT/src/components/xcpl_comps/xglc - - case_comps - env_case.xml - Root directory of the case land ice component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $SRCROOT/components/cice/ - $CIMEROOT/src/components/data_comps/dice - $CIMEROOT/src/components/stub_comps/sice - $CIMEROOT/src/components/xcpl_comps/xice - - case_comps - env_case.xml - Root directory of the case sea ice component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $SRCROOT/components/rtm/ - $SRCROOT/components/mosart/ - $CIMEROOT/src/components/data_comps/drof - $CIMEROOT/src/components/stub_comps/srof - $CIMEROOT/src/components/xcpl_comps/xrof - - case_comps - env_case.xml - Root directory of the case river runoff model component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $SRCROOT/components/clm/ - $CIMEROOT/src/components/data_comps/dlnd - $CIMEROOT/src/components/stub_comps/slnd - $CIMEROOT/src/components/xcpl_comps/xlnd - - case_comps - env_case.xml - Root directory of the case land model component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $CIMEROOT/src/components/stub_comps/siac - $CIMEROOT/src/components/xcpl_comps/xiac - - case_comps - env_case.xml - Root directory of the case integrated assessment component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $CIMEROOT/src/components/data_comps/desp - $CIMEROOT/src/components/stub_comps/sesp - - case_comps - env_case.xml - Root directory of the case external system processing (esp) component - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $SRCROOT/cime_config/config_compsets.xml - $COMP_ROOT_DIR_CPL/cime_config/config_compsets.xml - $COMP_ROOT_DIR_ATM/cime_config/config_compsets.xml - $COMP_ROOT_DIR_ATM/cime_config/config_compsets.xml - $COMP_ROOT_DIR_GLC/cime_config/config_compsets.xml - $COMP_ROOT_DIR_LND/cime_config/config_compsets.xml - $COMP_ROOT_DIR_ICE/cime_config/config_compsets.xml - $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml - $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml - $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml - - case_last - env_case.xml - file containing specification of all compsets for primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $SRCROOT/cime_config/config_pes.xml - $COMP_ROOT_DIR_CPL/cime_config/config_pes.xml - $COMP_ROOT_DIR_ATM/cime_config/config_pes.xml - $COMP_ROOT_DIR_ATM/cime_config/config_pes.xml - $COMP_ROOT_DIR_GLC/cime_config/config_pes.xml - $COMP_ROOT_DIR_LND/cime_config/config_pes.xml - $COMP_ROOT_DIR_ICE/cime_config/config_pes.xml - $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml - $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml - $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml - - case_last - env_case.xml - file containing specification of all pe-layouts for primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_pes.xsd - - - - char - - $CIMEROOT/config/cesm/config_archive.xml - $COMP_ROOT_DIR_CPL/cime_config/config_archive.xml - - $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml - $COMP_ROOT_DIR_ATM/cime_config/config_archive.xml - $COMP_ROOT_DIR_ICE/cime_config/config_archive.xml - $COMP_ROOT_DIR_LND/cime_config/config_archive.xml - $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml - $COMP_ROOT_DIR_WAV/cime_config/config_archive.xml - - $COMP_ROOT_DIR_ATM/cime_config/config_archive.xml - $COMP_ROOT_DIR_GLC/cime_config/config_archive.xml - $COMP_ROOT_DIR_LND/cime_config/config_archive.xml - $COMP_ROOT_DIR_ICE/cime_config/config_archive.xml - $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml - $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml - $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml - $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml - $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml - - case_last - env_case.xml - file containing specification of archive files for each component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_archive.xsd - - - - char - - $CIMEROOT/scripts/lib/CIME/SystemTests - $COMP_ROOT_DIR_LND/cime_config/SystemTests - $COMP_ROOT_DIR_ATM/cime_config/SystemTests - $COMP_ROOT_DIR_OCN/cime_config/SystemTests - $COMP_ROOT_DIR_OCN/cime_config/SystemTests - $COMP_ROOT_DIR_OCN/cime_config/SystemTests - $COMP_ROOT_DIR_ICE/cime_config/SystemTests - $COMP_ROOT_DIR_GLC/cime_config/SystemTests - $COMP_ROOT_DIR_ROF/cime_config/SystemTests - $COMP_ROOT_DIR_ROF/cime_config/SystemTests - - test - env_test.xml - directories containing cime compatible system test modules - - - - char - unset - - $SRCROOT/cime_config/testlist_allactive.xml - $COMP_ROOT_DIR_CPL/cime_config/testdefs/testlist_drv.xml - $COMP_ROOT_DIR_ATM/cime_config/testdefs/testlist_cam.xml - $COMP_ROOT_DIR_GLC/cime_config/testdefs/testlist_cism.xml - $COMP_ROOT_DIR_LND/cime_config/testdefs/testlist_clm.xml - $COMP_ROOT_DIR_ICE/cime_config/testdefs/testlist_cice.xml - $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_pop.xml - $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_mom.xml - $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_nemo.xml - $COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_rtm.xml - $COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_mosart.xml - - case_last - env_case.xml - file containing specification of all system tests for primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/testlist.xsd - - - - char - unset - - $SRCROOT/cime_config/testmods_dirs - $COMP_ROOT_DIR_CPL/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_ATM/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_GLC/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_LND/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_ICE/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs - $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs - - case_last - env_case.xml - directory containing test modifications for primary component tests (for documentation only - DO NOT EDIT) - - - - char - unset - - $SRCROOT/cime_config/usermods_dirs - $COMP_ROOT_DIR_CPL/cime_config/usermods_dirs - $COMP_ROOT_DIR_ATM/cime_config/usermods_dirs - $COMP_ROOT_DIR_GLC/cime_config/usermods_dirs - $COMP_ROOT_DIR_LND/cime_config/usermods_dirs - $COMP_ROOT_DIR_ICE/cime_config/usermods_dirs - $COMP_ROOT_DIR_ROF/cime_config/usermods_dirs - $COMP_ROOT_DIR_ROF/cime_config/usermods_dirs - $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs - $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs - $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs - - case_last - env_case.xml - directory containing user modifications for primary components (for documentation only - DO NOT EDIT) - - - - - char - unset - - $COMP_ROOT_DIR_CPL/cime_config/namelist_definition_drv.xml - - $CIMEROOT/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml - $CIMEROOT/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml - $CIMEROOT/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml - $CIMEROOT/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml - $CIMEROOT/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml - $CIMEROOT/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml - - - - case_last - env_case.xml - file containing namelist_definitions for all components - $CIMEROOT/config/xml_schemas/entry_id_namelist.xsd - - - - - - - - char - - $COMP_ROOT_DIR_CPL/cime_config/config_component.xml - - case_last - env_case.xml - file containing all non-component specific case configuration variables (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/config_component_$MODEL.xml - - case_last - env_case.xml - file containing all component specific driver configuration variables (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $COMP_ROOT_DIR_ATM/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $COMP_ROOT_DIR_LND/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $COMP_ROOT_DIR_ROF/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $COMP_ROOT_DIR_ICE/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $COMP_ROOT_DIR_OCN/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $COMP_ROOT_DIR_GLC/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $COMP_ROOT_DIR_IAC/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $COMP_ROOT_DIR_WAV/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $COMP_ROOT_DIR_ESP/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml deleted file mode 100644 index 9097a7cdb26..00000000000 --- a/config/cesm/config_grids.xml +++ /dev/null @@ -1,1738 +0,0 @@ - - - - - - ========================================= - GRID naming convention - ========================================= - The notation for the grid longname is - a%name_l%name_oi%name_r%name_m%mask_g%name_w%name - where - a% => atm, l% => lnd, oi% => ocn/ice, r% => river, m% => mask, g% => glc, w% => wav - - Supported out of the box grid configurations are given via alias specification in - the file "config_grids.xml". Each grid alias can also be associated with the - following optional attributes - - compset (Regular expression for compset matches that are required for this grid) - not_compset (Regular expression for compset matches that are not permitted this grid) - - Using the alias and the optional "compset" and "not_compset" attributes a grid longname is created - Note that the mask is for information only - and is not an attribute of the grid - By default, if the mask is not specified below, it will be set to the ocnice grid - And if there is no ocnice grid (such as for single column, the mask is null since it does not mean anything) - - - - - null - null - null - null - rx1 - r05 - r05 - rx1 - r05 - r05 - null - gland5UM - gland4 - gland4 - null - ww3a - ww3a - ww3a - null - - - - gx1v6 - gx1v6 - gx1v6 - Non-standard grid for testing of the interpolation in DATM rather than coupler - - - - gx1v7 - gx1v7 - gx1v7 - Non-standard grid for testing of the interpolation in DATM rather than coupler - - - - 01col - 01col - Non-standard grid for running POP in true 1D mode - - - - CLM_USRDAT - CLM_USRDAT - null - - - - 1x1_numaIA - 1x1_numaIA - null - - - - 1x1_brazil - 1x1_brazil - null - - - - 1x1_smallvilleIA - 1x1_smallvilleIA - null - - - - 1x1_camdenNJ - 1x1_camdenNJ - null - - - - 1x1_mexicocityMEX - 1x1_mexicocityMEX - null - - - - 1x1_vancouverCAN - 1x1_vancouverCAN - null - - - - 1x1_urbanc_alpha - 1x1_urbanc_alpha - null - - - - 5x5_amazon - 5x5_amazon - null - - - - - 0.125nldas2 - 0.125nldas2 - 0.125nldas2 - 0.125nldas2 - nldas2 - - - - 360x720cru - 360x720cru - - - - - - T31 - T31 - gx3v7 - gx3v7 - - - - T31 - T31 - gx3v7 - gland4 - gx3v7 - - - - T31 - T31 - gx3v7 - gland20 - gx3v7 - - - - T31 - T31 - gx3v7 - gland5UM - gx3v7 - - - - T42 - T42 - T42 - usgs - - - - T42 - T42 - T42 - gx1v7 - - - - T42 - T42 - T42 - gx1v6 - - - - T42 - T42 - T42 - gx1v7 - - - - T85 - T85 - T85 - gx1v6 - - - - T85 - T85 - T85 - gx1v7 - - - - T85 - T85 - T85 - usgs - - - - T85 - 0.9x1.25 - tx0.1v2 - tx0.1v2 - - - - T341 - 0.23x0.31 - tx0.1v2 - tx0.1v2 - - - - T62 - T62 - gx3v7 - gx3v7 - - - - T62 - T62 - tx1v1 - tx1v1 - - - - T62 - T62 - tn1v3 - tn1v3 - - - - T62 - T62 - tn0.25v3 - tn0.25v3 - - - - T62 - T62 - tx0.1v2 - tx0.1v2 - - - - T62 - T62 - tx0.1v3 - tx0.1v3 - - - - TL319 - TL319 - gx1v7 - JRA025 - gx1v7 - - - - TL319 - TL319 - tx0.66v1 - JRA025 - - - - TL319 - TL319 - tx0.1v2 - JRA025 - - - - TL319 - TL319 - tx0.1v3 - JRA025 - - - - T62 - T62 - tx0.66v1 - - - - T62 - T62 - tx0.25v1 - - - 0.9x1.25 - 0.9x1.25 - tx0.66v1 - - - - T62 - T62 - gx1v6 - gx1v6 - - - - T62 - T62 - gx1v7 - gx1v7 - - - - T62 - T62 - oQU120 - oQU120 - - - - - - 0.23x0.31 - 0.23x0.31 - gx1v6 - gx1v6 - - - - 0.23x0.31 - 0.23x0.31 - gx1v7 - gx1v7 - - - - 0.23x0.31 - 0.23x0.31 - tn1v3 - tn1v3 - - - - 0.23x0.31 - 0.23x0.31 - tn0.25v3 - tn0.25v3 - - - - 0.23x0.31 - 0.23x0.31 - tx0.1v2 - tx0.1v2 - - - - 0.47x0.63 - 0.47x0.63 - gx1v6 - gx1v6 - - - - 0.47x0.63 - 0.47x0.63 - gx1v7 - gx1v7 - - - - 0.47x0.63 - 0.47x0.63 - tx0.1v2 - tx0.1v2 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - tn1v3 - tn1v3 - - - - 0.9x1.25 - 0.9x1.25 - tn0.25v3 - tn0.25v3 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - gland4 - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gland4 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - gland20 - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gland20 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - gland5UM - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gland5UM - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - null - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - gx1v7 - - - - 0.47x0.63 - 0.47x0.63 - 0.47x0.63 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - gland5UM - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - gland5UM - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - gland5UM - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - r01 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - r01 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - gland4 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - gland4 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - gland5UM - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - gland5UM - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gland5UM - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gland5UM - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gland5UM - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - null - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gx1v7 - - - - 4x5 - 4x5 - gx3v7 - gx3v7 - - - - 0.23x0.31 - 0.23x0.31 - 0.23x0.31 - gx1v6 - - - - 0.23x0.31 - 0.23x0.31 - 0.23x0.31 - gx1v7 - - - - 2.5x3.33 - 2.5x3.33 - 2.5x3.33 - gx1v6 - - - - 2.5x3.33 - 2.5x3.33 - 2.5x3.33 - gx1v7 - - - - 4x5 - 4x5 - 4x5 - gx3v7 - - - - 10x15 - 10x15 - 10x15 - gx3v7 - - - - 10x15 - 10x15 - 10x15 - usgs - - - - 10x15 - 10x15 - gx3v7 - gx3v7 - - - - - - ne5np4 - ne5np4 - ne5np4 - gx3v7 - - - - ne16np4 - ne16np4 - gx1v7 - gx1v7 - - - - ne16np4 - ne16np4 - ne16np4 - gx1v7 - - - - ne30np4 - ne30np4 - gx1v6 - gx1v6 - - - - ne30np4 - ne30np4 - gx1v7 - gx1v7 - - - - ne30pg3 - ne30pg3 - gx1v7 - gx1v7 - - - - ne30np4 - 1.9x2.5 - gx1v6 - For testing tri-grid - gx1v6 - - - - ne30np4 - 1.9x2.5 - gx1v7 - For testing tri-grid - gx1v7 - - - - ne30np4 - 0.9x1.25 - gx1v6 - For testing tri-grid - gx1v6 - - - - ne30np4 - 0.9x1.25 - gx1v7 - For testing tri-grid - gx1v7 - - - - ne30np4 - ne30np4 - ne30np4 - gx1v6 - - - - ne30np4 - ne30np4 - ne30np4 - gx1v7 - - - - ne60np4 - ne60np4 - gx1v6 - gx1v6 - - - - ne60np4 - ne60np4 - gx1v7 - gx1v7 - - - - ne60np4 - ne60np4 - ne60np4 - gx1v6 - - - - ne120np4 - ne120np4 - gx1v6 - gx1v6 - - - - ne120np4 - ne120np4 - gx1v7 - gx1v7 - - - - ne120np4 - ne120np4 - tx0.1v2 - tx0.1v2 - - - - ne120np4 - ne120np4 - ne120np4 - gx1v6 - - - - ne120np4 - ne120np4 - ne120np4 - gx1v7 - - - - ne240np4 - 0.23x0.31 - gx1v6 - For testing high resolution tri-grid - gx1v6 - - - - ne240np4 - 0.23x0.31 - gx1v7 - For testing high resolution tri-grid - gx1v7 - - - - ne240np4 - ne240np4 - tx0.1v2 - tx0.1v2 - - - - ne240np4 - ne240np4 - ne240np4 - gx1v6 - - - - ne240np4 - ne240np4 - ne240np4 - gx1v7 - - - - - - ne30np4.pg2 - ne30np4.pg2 - ne30np4.pg2 - gx1v7 - - - - ne60np4.pg2 - ne60np4.pg2 - ne60np4.pg2 - gx1v7 - - - - ne120np4.pg2 - ne120np4.pg2 - ne120np4.pg2 - gx1v7 - - - - ne240np4.pg2 - ne240np4.pg2 - ne240np4.pg2 - gx1v7 - - - - - - ne5np4.pg3 - ne5np4.pg3 - ne5np4.pg3 - gx3v7 - - - - ne16np4.pg3 - ne16np4.pg3 - ne16np4.pg3 - gx1v7 - - - - ne30pg3 - ne30pg3 - ne30pg3 - gx1v7 - - - - ne60np4.pg3 - ne60np4.pg3 - ne60np4.pg3 - gx1v7 - - - - ne120np4.pg3 - ne120np4.pg3 - ne120np4.pg3 - gx1v7 - - - - ne120np4.pg3 - ne120np4.pg3 - ne120np4.pg3 - tx0.1v3 - - - - ne240np4.pg3 - ne240np4.pg3 - ne240np4.pg3 - gx1v7 - - - - ne120np4.pg3 - ne120np4.pg3 - gx1v7 - gx1v7 - - - - ne120np4.pg3 - ne120np4.pg3 - gx1v7 - tx0.1v3 - - - - - - ne30np4.pg4 - ne30np4.pg4 - ne30np4.pg4 - gx1v7 - - - - ne60np4.pg4 - ne60np4.pg4 - ne60np4.pg4 - gx1v7 - - - - ne120np4.pg4 - ne120np4.pg4 - ne120np4.pg4 - gx1v7 - - - - - - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - gx1v7 - gx1v7 - - - - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - gx1v7 - - - - ne0np4TESTONLY.ne5x4 - ne0np4TESTONLY.ne5x4 - ne0np4TESTONLY.ne5x4 - gx3v7 - - - - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - tx0.1v2 - - - - - - T31 - T31 - gx3v7 - gx3v7 - - - - 4x5 - 4x5 - gx3v7 - gx3v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - gx1v7 - - - - ne30np4 - ne30np4 - gx1v6 - gx1v6 - - - - ne30np4 - ne30np4 - gx1v7 - gx1v7 - - - - C96 - C96 - gx1v7 - gland4 - gx1v7 - - - - C96 - C96 - C96 - tx0.66v1 - - - - C96 - C96 - tx0.66v1 - tx0.66v1 - - - - C96 - C96 - tx0.25v1 - tx0.25v1 - - - - C384 - C384 - tx0.25v1 - tx0.25v1 - - - - - ww3a - - - - - - - - - - - - - 0 0 - unset - null is no grid: - - - - - - - 1 1 - domain.ocn.01col.ArcticOcean.20150824.nc - domain.ocn.01col.ArcticOcean.20150824.nc - 01col is a single-column grid for datm and POP: - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.${CLM_USRDAT_NAME}_navy.nc - user specified domain - only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-numaIA_navy.110106.nc - 1x1 Numa Iowa -- only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-brazil_navy.090715.nc - 1x1 Brazil -- only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-smallvilleIA_test.110106.nc - 1x1 Smallville Iowa Crop Test Case -- only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-camdenNJ_navy.111004.nc - 1x1 Camden New Jersey -- only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-mexicocityMEX_navy.090715.nc - 1x1 Mexico City Mexico -- only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-vancouverCAN_navy.090715.nc - 1x1 Vancouver Canada -- only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-urbanc_alpha_test.110201.nc - 1x1 Urban C Alpha Test Case -- only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.5x5pt-amazon_navy.090715.nc - 5x5 Amazon regional case -- only valid for DATM/CLM compset - - - - - 464 224 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.0.125nldas2_0.125nldas2.190410.nc - $DIN_LOC_ROOT/share/domains/domain.clm/domain.ocn.0.125nldas2.190410.nc - Regional NLDAS-2 grid over the U.S. (0.125 degree resolution; 25-53N, 235-293E) - - - - - - 720 360 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.360x720_cruncep.100429.nc - Exact half-degree CRUNCEP datm forcing grid with CRUNCEP land-mask -- only valid for DATM/CLM compset - - - - 1152 768 - domain.lnd.fv0.23x0.31_gx1v6.100517.nc - domain.ocn.0.23x0.31_gx1v6_101108.nc - domain.lnd.fv0.23x0.31_tn1v3.160414.nc - domain.ocn.fv0.23x0.31_tn1v3.160414.nc - domain.lnd.fv0.23x0.31_tn0.25v3.160721.nc - domain.ocn.fv0.23x0.31_tn0.25v3.160721.nc - 0.23x0.31 is FV 1/4-deg grid: - - - - 576 384 - domain.lnd.fv0.47x0.63_gx1v6.090407.nc - domain.ocn.0.47x0.63_gx1v6_090408.nc - domain.lnd.fv0.47x0.63_gx1v7.180521.nc - domain.ocn.fv0.47x0.63_gx1v7.180521.nc - $DIN_LOC_ROOT/share/meshes/fv0.47x0.63_141008_ESMFmesh.nc - 0.47x0.63 is FV 1/2-deg grid: - - - - 288 192 - domain.lnd.fv0.9x1.25_gx1v6.090309.nc - domain.ocn.0.9x1.25_gx1v6_090403.nc - domain.lnd.fv0.9x1.25_gx1v7.151020.nc - domain.ocn.fv0.9x1.25_gx1v7.151020.nc - domain.lnd.fv0.9x1.25_tx0.66v1.190314.nc - domain.ocn.fv0.9x1.25_tx0.66v1.190314.nc - domain.lnd.fv0.9x1.25_tn1v3.160414.nc - domain.ocn.fv0.9x1.25_tn1v3.160414.nc - domain.lnd.fv0.9x1.25_tn0.25v3.160721.nc - domain.ocn.fv0.9x1.25_tn0.25v3.160721.nc - /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc - /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc - $DIN_LOC_ROOT/share/meshes/fv0.9x1.25_141008_polemod_ESMFmesh.nc - 0.9x1.25 is FV 1-deg grid: - - - - - 144 96 - domain.lnd.fv1.9x2.5_gx1v6.090206.nc - domain.ocn.1.9x2.5_gx1v6_090403.nc - domain.lnd.fv1.9x2.5_gx1v7.181205.nc - domain.ocn.fv1.9x2.5_gx1v7.181205.nc - domain.aqua.fv1.9x2.5.nc - $DIN_LOC_ROOT/share/meshes/fv1.9x2.5_141008_ESMFmesh.nc - 1.9x2.5 is FV 2-deg grid: - - - - 72 46 - domain.lnd.fv4x5_gx3v7.091218.nc - domain.ocn.4x5_gx3v7_100120.nc - $DIN_LOC_ROOT/share/meshes/fv4x5_050615_polemod_ESMFmesh.nc - 4x5 is FV 4-deg grid: - - - - 108 72 - domain.lnd.fv2.5x3.33_gx3v7.110223.nc - domain.ocn.fv2.5x3.33_gx3v7_110223.nc - 2.5x3.33 is FV 3-deg grid: - - - - 24 19 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.fv10x15_USGS.110713.nc - $DIN_LOC_ROOT/share/domains/domain.clm/domain.ocn.fv10x15_USGS_070807.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.fv10x15_gx3v7.180321.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.fv10x15_gx3v7.180321.nc - $DIN_LOC_ROOT/share/meshes/10x15_nomask_c110308_ESMFmesh.nc - 10x15 is FV 10-deg grid: - For low resolution testing - - - - 1024 512 - - - domain.lnd.T341_gx1v6.111226.nc - T341 is Gaussian grid: - Backward compatible for very high resolution Spectral-dycore experiments - - - - - 256 128 - domain.lnd.T85_gx1v4.060403.nc - domain.lnd.T85_gx1v4.060403.nc - T85 is Gaussian grid: - Backward compatible for high resolution Spectral-dycore experiments - - - - 192 96 - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx1v7.151008.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx1v6.090320.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx3v7.090911.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx1v1.090122.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.1v2_090623.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.1v3.170929.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU120.160325.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx1v6.130409.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx1v7.151008.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx3v7.130409.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T62_tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tn1v3.160414.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tn0.25v3.160721.nc - $DIN_LOC_ROOT/share/meshes/T62_040121_ESMFmesh.nc - T62 is Gaussian grid: - - - - 96 48 - $DIN_LOC_ROOT/share/domains/domain.lnd.T31_gx3v7.130409.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T31_gx3v7.130409.nc - $DIN_LOC_ROOT/share/meshes/T31_040122_ESMFmesh.nc - T31 is Gaussian grid: - - - - 128 64 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.T42_USGS.111004.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.64x128_USGS_070807.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T42_gx1v7.180727.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T42_gx1v7.180727.nc - T42 is Gaussian grid: - - - - 1352 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4_gx3v7.140810.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4_gx3v7.140810.nc - ne5np4 is Spectral Elem 6-deg grid: - For ultra-low resolution spectral element grid testing - - - - 1350 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4.pg3_gx3v7.170605.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4.pg3_gx3v7.170605.nc - ne5np4 is Spectral Elem 6-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 13826 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne16np4_gx1v7.171018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne16np4_gx1v7.171018.nc - $DIN_LOC_ROOT/share/meshes/ne16np4_scrip_171002_ESMFmesh.nc - ne16np4 is Spectral Elem 2-deg grid: - For low resolution spectral element grid testing - - - - 13824 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne16pg3_gx1v7.171003.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne16pg3_gx1v7.171003.nc - ne16np4.pg3 is a Spectral Elem 2-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 48602 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_gx1v6.110905.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_gx1v6_110217.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30_gx1v7.171003.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30_gx1v7.171003.nc - $DIN_LOC_ROOT/share/meshes/ne30np4_091226_pentagons_ESMFmesh.nc - ne30np4 is Spectral Elem 1-deg grid: - - - - 21600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg2_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg2_gx1v7.170628.nc - ne30np4.pg2 is a Spectral Elem 1-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 48600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg3_gx1v7.170605.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg3_gx1v7_170605.nc - ne30pg3 is a Spectral Elem ne30 grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 86400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg4_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg4_gx1v7.170628.nc - ne30np4.pg4 is a Spectral Elem 1-deg grid with a 4x4 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 194402 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4_gx1v6.120406.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4_gx1v6.121113.nc - $DIN_LOC_ROOT/share/meshes/ne60np4_pentagons_100408_ESMFmesh.nc - ne60np4 is Spectral Elem 1/2-deg grid: - - - - 86400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg2_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg2_gx1v7.170628.nc - ne60np4.pg2 is a Spectral Elem 0.5-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 194400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg3_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg3_gx1v7.170628.nc - ne60np4.pg3 is a Spectral Elem 0.5-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 345600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg4_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg4_gx1v7.170628.nc - ne60np4.pg4 is a Spectral Elem 0.5-deg grid with a 4x4 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 777602 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v6.110502.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v6.121113.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v7.190718.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v7.190718.nc - ne120np4 is Spectral Elem 1/4-deg grid: - - - - 345600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg2_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg2_gx1v7.170629.nc - $DIN_LOC_ROOT/share/meshes/ne120np4_pentagons_100310_ESMFmesh.nc - ne120np4.pg2 is a Spectral Elem 0.25-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 777600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_gx1v7.190718.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_gx1v7.190718.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_tx0.1v3.190820.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_tx0.1v3.190820.nc - ne120np4.pg3 is a Spectral Elem 0.25-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 1382400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg4_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg4_gx1v7.170629.nc - ne120np4.pg4 is a Spectral Elem 0.25-deg grid with a 4x4 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 3110402 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4_gx1v6.111226.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4_gx1v6.111226.nc - ne240np4 is Spectral Elem 1/8-deg grid: - Experimental for very high resolution experiments - - - - 3863 1 - ne0np4TESTONLY.ne5x4 is a low-resolution refined SE grid for testing: - Test support only - - - - 174098 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne0CONUSne30x8_gx1v7.190322.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne0CONUSne30x8_gx1v7.190322.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne0CONUSne30x8_tx0.1v2.171010.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne0CONUSne30x8_tx0.1v2.171010.nc - ne0np4CONUS.ne30x8 is a Spectral Elem 1-deg grid with a 1/8 deg refined region over the continental United States: - Test support only - - - - 640 320 - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_gx1v7.170705.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL319_gx1v7.170705.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL319_tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.1v2.161014.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v2.161014.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.1v3.170730.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v3.170730.nc - $DIN_LOC_ROOT/share/meshes/TL319_151007_ESMFmesh.nc - TL319 grid for JRA55 - - - - 1382400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4.pg2_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4.pg2_gx1v7.170629.nc - ne240np4.pg2 is a Spectral Elem 0.125-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 3110400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4.pg3_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4.pg3_gx1v7.170629.nc - ne240np4.pg3 is a Spectral Elem 0.125-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - - - - - 320 384 - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v6.090206.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v6.090206.nc - $DIN_LOC_ROOT/share/meshes/gx1v6_090205_ESMFmesh.nc - gx1v6 is displaced Greenland pole v6 1-deg grid: - - - - 320 384 - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v7.151008.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v7.151008.nc - $DIN_LOC_ROOT/share/meshes/gx1v7_151008_ESMFmesh.nc - gx1v7 is displaced Greenland pole 1-deg grid with Caspian as a land feature: - - - - 100 116 - $DIN_LOC_ROOT/share/domains/domain.ocn.gx3v7.120323.nc - $DIN_LOC_ROOT/share/meshes/gx3v7_120309_ESMFmesh.nc - gx3v7 is displaced Greenland pole v7 3-deg grid: - - - - 540 458 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/meshes/tx0.66v1_190314_ESMFmesh.nc - tx0.66v1 is tripole v1 0.66-deg MOM6 grid: - Experimental for MOM6 experiments - - - - 1440 1080 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.25v1.190207.nc - $DIN_LOC_ROOT/share/meshes/tx0.25v1_190204_ESMFmesh.nc - tx0.25v1 is tripole v1 0.25-deg MOM6 grid: - Experimental for MOM6 experiments - - - - 3600 2400 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v2.161014.nc - tx0.1v2 is tripole v2 1/10-deg grid: - Experimental for high resolution experiments - - - - 3600 2400 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v3.170730.nc - tx0.1v3 is tripole v3 1/10-deg grid: - Experimental for high resolution experiments - - - - 360 240 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx1v1.090122.nc - tripole v1 1-deg grid: testing proxy for high-res tripole ocean grids- do not use for scientific experiments - Experimental tripole ocean grid - - - - 28574 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oQU120.160325.nc - oQU120 is a MPAS ocean grid that is roughly 1 degree resolution: - Experimental, under development - - - - - - - - 360 291 - $DIN_LOC_ROOT/share/domains/domain.ocn.tn1v3.160414.nc - tn1v3 is NEMO ORCA1 tripole grid at 1 deg (reduced eORCA): - NEMO ORCA1 tripole ocean grid - - - - 1440 1050 - $DIN_LOC_ROOT/share/domains/domain.ocn.tn0.25v3.160721.nc - tn0.25v3 is NEMO ORCA1 tripole grid at 1/4 deg (reduced eORCA): - NEMO ORCA1 tripole ocean grid - - - - 360 180 - $DIN_LOC_ROOT/share/meshes/rx1_nomask_181022_ESMFmesh.nc - rx1 is 1 degree river routing grid (only valid for DROF): - Can only be used by DROF - - - - 720 360 - $DIN_LOC_ROOT/share/meshes/r05_nomask_c110308_ESMFmesh.nc - r05 is 1/2 degree river routing grid: - - - - 3600 1800 - - r01 is 1/10 degree river routing grid: - For experimental use by high resolution grids - - - - 1440 720 - - $DIN_LOC_ROOT/share/meshes/JRA025m.170209_ESMFmesh.nc - JRA is 0.25 degree runoff grid for use with JRA-55 runoff data - - - - - - 76 141 - $DIN_LOC_ROOT/share/meshes/gland_20km_c150511_ESMFmesh.nc - 20-km Greenland grid - - - - 301 561 - 5-km Greenland grid (new version from U. Montana) - - - - 416 704 - 4-km Greenland grid, for use with the glissade dycore - - - - - - 90 50 - $DIN_LOC_ROOT/share/domains/domain.lnd.ww3a_ww3a.120222.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ww3a_ww3a.120222.nc - $DIN_LOC_ROOT/share/meshes/ww3a_120222_ESMFmesh.nc - WW3 90 x 50 global grid - For testing of the WAV model - - - - - - 55296 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.C96_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C96_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.C96_tx0.66v1.181210.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C96_tx0.66v1.181210.nc - $DIN_LOC_ROOT/share/meshes/C96_181018_ESMFmesh.nc - C96 is a fvcubed xx-deg grid: - Experimental for fv3 dycore - - - - - 100000 1 - C384 is a fvcubed xx-deg grid: - Experimental for fv3 dycore - - - - - - - - - - - - - ATM2OCN_FMAPNAME - ATM2OCN_SMAPNAME - ATM2OCN_VMAPNAME - OCN2ATM_FMAPNAME - OCN2ATM_SMAPNAME - ATM2LND_FMAPNAME - ATM2LND_SMAPNAME - LND2ATM_FMAPNAME - LND2ATM_SMAPNAME - ATM2WAV_SMAPNAME - OCN2WAV_SMAPNAME - ICE2WAV_SMAPNAME - - ROF2OCN_LIQ_RMAPNAME - ROF2OCN_ICE_RMAPNAME - LND2ROF_FMAPNAME - ROF2LND_FMAPNAME - - - - - - - diff --git a/config/cesm/config_grids_common.xml b/config/cesm/config_grids_common.xml deleted file mode 100644 index 9dd8f9efbb3..00000000000 --- a/config/cesm/config_grids_common.xml +++ /dev/null @@ -1,539 +0,0 @@ - - - - - - - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_360x720_nomask_to_0.1x0.1_nomask_aave_da_c130107.nc - lnd/clm2/mappingdata/maps/360x720/map_0.1x0.1_nomask_to_360x720_nomask_aave_da_c130104.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_1.9x2.5_nomask_to_0.1x0.1_nomask_aave_da_c120709.nc - lnd/clm2/mappingdata/maps/1.9x2.5/map_0.1x0.1_nomask_to_1.9x2.5_nomask_aave_da_c120709.nc - - - - lnd/clm2/mappingdata/maps/ne30pg3/map_ne30pg3_to_0.5x0.5_nomask_aave_da_c180515.nc - lnd/clm2/mappingdata/maps/ne30pg3/map_0.5x0.5_nomask_to_ne30pg3_aave_da_c180515.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_ne120np4_nomask_to_0.1x0.1_nomask_aave_da_c120711.nc - lnd/clm2/mappingdata/maps/ne120np4/map_0.1x0.1_nomask_to_ne120np4_nomask_aave_da_c120706.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_ne240np4_nomask_to_0.1x0.1_nomask_aave_da_c120711.nc - lnd/clm2/mappingdata/maps/ne240np4/map_0.1x0.1_nomask_to_ne240np4_nomask_aave_da_c120706.nc - - - - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_0.5x0.5_nomask_aave.190322.nc - cpl/gridmaps/ne0np4CONUS.ne30x8/map_0.5x0.5_nomask_TO_ne0CONUSne30x8_aave.190322.nc - - - - lnd/clm2/mappingdata/maps/0.5x0.5/map_360x720_nomask_to_0.5x0.5_nomask_aave_da_c130103.nc - lnd/clm2/mappingdata/maps/360x720/map_0.5x0.5_nomask_to_360x720_nomask_aave_da_c120830.nc - - - - lnd/clm2/mappingdata/maps/ne16np4/map_ne16np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne16np4/map_0.5x0.5_nomask_to_ne16np4_nomask_aave_da_c110922.nc - - - - lnd/clm2/mappingdata/maps/ne30np4/map_ne30np4_to_0.5x0.5rtm_aave_da_110320.nc - lnd/clm2/mappingdata/maps/ne30np4/map_0.5x0.5_nomask_to_ne30np4_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/ne60np4/map_ne60np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne60np4/map_0.5x0.5_nomask_to_ne60np4_nomask_aave_da_c110922.nc - - - - lnd/clm2/mappingdata/maps/ne120np4/map_ne120np4_TO_0.5x0.5_nomask_aave.190502.nc - lnd/clm2/mappingdata/maps/ne120np4/map_0.5x0.5_nomask_TO_ne120np4_aave.190502.nc - - - - lnd/clm2/mappingdata/maps/ne120np4.pg3/map_ne120np4.pg3_TO_0.5x0.5_nomask_aave.190503.nc - lnd/clm2/mappingdata/maps/ne120np4.pg3/map_0.5x0.5_nomask_TO_ne120np4.pg3_aave.190503.nc - - - - lnd/clm2/mappingdata/maps/ne240np4/map_ne240np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne240np4/map_0.5x0.5_nomask_to_ne240np4_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/0.23x0.31/map_0.23x0.31_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/0.23x0.31/map_0.5x0.5_nomask_to_0.23x0.31_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/0.47x0.63/map_0.47x0.63_nomask_to_0.5x0.5_nomask_aave_da_c120306.nc - lnd/clm2/mappingdata/maps/0.47x0.63/map_0.5x0.5_nomask_to_0.47x0.63_nomask_aave_da_c120306.nc - - - - lnd/clm2/mappingdata/maps/0.9x1.25/map_0.9x1.25_nomask_to_0.5x0.5_nomask_aave_da_c120522.nc - lnd/clm2/mappingdata/maps/0.9x1.25/map_0.5x0.5_nomask_to_0.9x1.25_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/1.9x2.5/map_1.9x2.5_nomask_to_0.5x0.5_nomask_aave_da_c120522.nc - lnd/clm2/mappingdata/maps/1.9x2.5/map_0.5x0.5_nomask_to_1.9x2.5_nomask_aave_da_c120709.nc - - - - lnd/clm2/mappingdata/maps/2.5x3.33/map_2.5x3.33_nomask_to_0.5x0.5_nomask_aave_da_c110823.nc - lnd/clm2/mappingdata/maps/2.5x3.33/map_0.5x0.5_nomask_to_2.5x3.33_nomask_aave_da_c110823.nc - - - - lnd/clm2/mappingdata/maps/10x15/map_10x15_to_0.5x0.5rtm_aave_da_c20190725.nc - lnd/clm2/mappingdata/maps/10x15/map_0.5x0.5_nomask_to_10x15_nomask_aave_da_c20190725.nc - - - - lnd/clm2/mappingdata/maps/4x5/map_4x5_nomask_to_0.5x0.5_nomask_aave_da_c110822.nc - lnd/clm2/mappingdata/maps/4x5/map_0.5x0.5_nomask_to_4x5_nomask_aave_da_c110822.nc - - - - lnd/clm2/mappingdata/maps/512x1024/map_512x1024_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/512x1024/map_0.5x0.5_nomask_to_512x1024_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/128x256/map_128x256_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/128x256/map_0.5x0.5_nomask_to_128x256_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/64x128/map_64x128_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/64x128/map_0.5x0.5_nomask_to_64x128_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/48x96/map_48x96_nomask_to_0.5x0.5_nomask_aave_da_c110822.nc - lnd/clm2/mappingdata/maps/48x96/map_0.5x0.5_nomask_to_48x96_nomask_aave_da_c110822.nc - - - - - - - - cpl/cpl6/map_r05_TO_g16_aave.120920.nc - - - - cpl/gridmaps/r05/map_r05_TO_gx1v7_aave.161012.nc - - - - cpl/gridmaps/rx1/map_rx1_to_gx3v7_nnsm_e1000r500_180430.nc - cpl/gridmaps/rx1/map_rx1_to_gx3v7_nnsm_e1000r500_180430.nc - - - cpl/gridmaps/rx1/map_rx1_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_170503.nc - cpl/gridmaps/rx1/map_rx1_to_gx1v6_nnsm_e1000r300_170503.nc - - - cpl/gridmaps/rx1/map_rx1_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_170413.nc - cpl/gridmaps/rx1/map_rx1_to_gx1v7_nnsm_e1000r300_170413.nc - - - cpl/gridmaps/rx1/map_rx1_to_tx1v1_e1000r300_161214.nc - cpl/gridmaps/rx1/map_rx1_to_tx1v1_e1000r300_161214.nc - - - cpl/gridmaps/rx1/map_rx1_to_tx0.66v1_nnsm_e1000r300_190315.nc - cpl/gridmaps/rx1/map_rx1_to_tx0.66v1_nnsm_e1000r300_190315.nc - - - cpl/cpl6/map_rx1_to_tx0.1v2_e1000r200_090624.nc - cpl/cpl6/map_rx1_to_tx0.1v2_e1000r200_090624.nc - - - cpl/cpl6/map_rx1_to_tx0.1v3_nnsm_e1000r200_170914.nc - cpl/cpl6/map_rx1_to_tx0.1v3_nnsm_e1000r200_170914.nc - - - cpl/gridmaps/rx1/map_rx1_to_oQU120_nn.160527.nc - cpl/gridmaps/rx1/map_rx1_to_oQU120_nn.160527.nc - - - - cpl/gridmaps/r05/map_r05_to_gx3v7_nnsm_e1000r500_180430.nc - cpl/gridmaps/r05/map_r05_to_gx3v7_nnsm_e1000r500_180430.nc - - - cpl/gridmaps/r05/map_r05_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_170503.nc - cpl/gridmaps/r05/map_r05_to_gx1v6_nnsm_e1000r300_170503.nc - - - cpl/gridmaps/r05/map_r05_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_170413.nc - cpl/gridmaps/r05/map_r05_to_gx1v7_nnsm_e1000r300_170413.nc - - - cpl/gridmaps/r05/map_r05_to_tx1v1_e1000r500_161214.nc - cpl/gridmaps/r05/map_r05_to_tx1v1_e1000r500_161214.nc - - - cpl/cpl6/map_r05_to_tx0.1v2_r500e1000_080620.nc - cpl/cpl6/map_r05_to_tx0.1v2_r500e1000_080620.nc - - - - cpl/cpl6/map_r01_to_gx1v6_120711.nc - cpl/cpl6/map_r01_to_gx1v6_120711.nc - - - - cpl/gridmaps/rJRA025/map_JRA025m_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_170801.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_gx1v7_e1000r300_170801.nc - - - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v2_e333r100_170619.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v2_e333r100_170619.nc - - - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v3_e333r100_170830.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v3_e333r100_170830.nc - - - cpl/gridmaps/r05/map_r05_to_tx0.66v1_e1000r300_190314.nc - cpl/gridmaps/r05/map_r05_to_tx0.66v1_e1000r300_190314.nc - - - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.66v1_nnsm_e333r100_190326.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.66v1_nnsm_e333r100_190326.nc - - - - - - - - - - - - - - - - cpl/gridmaps/fv0.47x0.63/map_fv0.47x0.63_TO_gland4km_aave.171105.nc - cpl/gridmaps/fv0.47x0.63/map_fv0.47x0.63_TO_gland4km_blin.171105.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv0.47x0.63_aave.171105.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv0.47x0.63_aave.171105.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland4km_aave.170429.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv0.9x1.25_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv0.9x1.25_aave.170429.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland4km_aave.170429.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv1.9x2.5_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv1.9x2.5_aave.170429.nc - - - - cpl/gridmaps/T31/map_T31_TO_gland4km_aave.170429.nc - cpl/gridmaps/T31/map_T31_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_T31_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_T31_aave.170429.nc - - - - cpl/gridmaps/360x720/map_360x720_TO_gland4km_aave.170429.nc - cpl/gridmaps/360x720/map_360x720_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_360x720_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_360x720_aave.170429.nc - - - - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland4km_aave.170429.nc - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv10x15_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv10x15_aave.170429.nc - - - - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland4km_aave.170429.nc - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv4x5_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv4x5_aave.170429.nc - - - - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland4km_aave.170429.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne16np4_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne16np4_aave.170429.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland4km_aave.170429.nc - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne30np4_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne30np4_aave.170429.nc - - - - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gland4km_aave.180515.nc - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gland4km_blin.180515.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne30pg3_aave.180510.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne30pg3_aave.180510.nc - - - - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gland4km_aave.190322.nc - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gland4km_blin.190322.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne0CONUSne30x8_aave.190322.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne0CONUSne30x8_aave.190322.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland4km_aave.190502.nc - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland4km_blin.190502.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne120np4_aave.190502.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne120np4_aave.190502.nc - - - - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gland4km_aave.190503.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gland4km_blin.190503.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne120np4.pg3_aave.190503.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne120np4.pg3_aave.190503.nc - - - - - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland5km_aave.150514.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland5km_blin.150514.nc - cpl/gridmaps/gland5km/map_gland5km_TO_fv0.9x1.25_aave.150514.nc - cpl/gridmaps/gland5km/map_gland5km_TO_fv0.9x1.25_aave.150514.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland5km_aave.150514.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland5km_blin.150514.nc - cpl/gridmaps/gland5km/map_gland5km_TO_fv1.9x2.5_aave.150514.nc - cpl/gridmaps/gland5km/map_gland5km_TO_fv1.9x2.5_aave.150514.nc - - - - cpl/gridmaps/T31/map_T31_TO_gland5km_aave.150514.nc - cpl/gridmaps/T31/map_T31_TO_gland5km_blin.150514.nc - cpl/gridmaps/gland5km/map_gland5km_TO_T31_aave.150514.nc - cpl/gridmaps/gland5km/map_gland5km_TO_T31_aave.150514.nc - - - - cpl/gridmaps/360x720/map_360x720_TO_gland5km_aave.160329.nc - cpl/gridmaps/360x720/map_360x720_TO_gland5km_blin.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_360x720_aave.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_360x720_aave.160329.nc - - - - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland5km_aave.160329.nc - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland5km_blin.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_fv10x15_aave.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_fv10x15_aave.160329.nc - - - - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland5km_aave.160329.nc - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland5km_blin.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_fv4x5_aave.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_fv4x5_aave.160329.nc - - - - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland5km_aave.160329.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland5km_blin.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_ne16np4_aave.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_ne16np4_aave.160329.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland5km_aave.160329.nc - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland5km_blin.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_ne30np4_aave.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_ne30np4_aave.160329.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland5km_aave.160329.nc - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland5km_blin.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_ne120np4_aave.160329.nc - cpl/gridmaps/gland5km/map_gland5km_TO_ne120np4_aave.160329.nc - - - - - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland20km_aave.150514.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland20km_blin.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv0.9x1.25_aave.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv0.9x1.25_aave.150514.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland20km_aave.150514.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland20km_blin.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv1.9x2.5_aave.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv1.9x2.5_aave.150514.nc - - - - cpl/gridmaps/T31/map_T31_TO_gland20km_aave.150514.nc - cpl/gridmaps/T31/map_T31_TO_gland20km_blin.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_T31_aave.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_T31_aave.150514.nc - - - - cpl/gridmaps/360x720/map_360x720_TO_gland20km_aave.160329.nc - cpl/gridmaps/360x720/map_360x720_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_360x720_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_360x720_aave.160329.nc - - - - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland20km_aave.160329.nc - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv10x15_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv10x15_aave.160329.nc - - - - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland20km_aave.160329.nc - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv4x5_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv4x5_aave.160329.nc - - - - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland20km_aave.160329.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne16np4_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne16np4_aave.160329.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland20km_aave.160329.nc - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne30np4_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne30np4_aave.160329.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland20km_aave.160329.nc - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne120np4_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne120np4_aave.160329.nc - - - - - - - - - - - - - - - cpl/gridmaps/gland4km/map_gland4km_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland4km/map_gland4km_to_gx1v6_nnsm_e1000r300_171105.nc - - - cpl/gridmaps/gland4km/map_gland4km_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland4km/map_gland4km_to_gx1v7_nnsm_e1000r300_171105.nc - - - - cpl/gridmaps/gland4km/map_gland4km_to_gx3v7_nnsm_e1000r500_180502.nc - cpl/gridmaps/gland4km/map_gland4km_to_gx3v7_nnsm_e1000r500_180502.nc - - - cpl/gridmaps/gland5km/map_gland5km_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland5km/map_gland5km_to_gx1v6_nnsm_e1000r300_171105.nc - - - cpl/gridmaps/gland5km/map_gland5km_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland5km/map_gland5km_to_gx1v7_nnsm_e1000r300_171105.nc - - - - cpl/gridmaps/gland5km/map_gland5km_to_gx3v7_nnsm_e1000r500_180502.nc - cpl/gridmaps/gland5km/map_gland5km_to_gx3v7_nnsm_e1000r500_180502.nc - - - - cpl/gridmaps/gland20km/map_gland20km_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland20km/map_gland20km_to_gx1v6_nnsm_e1000r300_171105.nc - - - cpl/gridmaps/gland20km/map_gland20km_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland20km/map_gland20km_to_gx1v7_nnsm_e1000r300_171105.nc - - - - cpl/gridmaps/gland20km/map_gland20km_to_gx3v7_nnsm_e1000r500_180502.nc - cpl/gridmaps/gland20km/map_gland20km_to_gx3v7_nnsm_e1000r500_180502.nc - - - - - - - - cpl/gridmaps/ww3a/map_ww3a_TO_gx3v7_splice_150428.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_ww3a_splice_150428.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_ww3a_splice_150428.nc - - - - cpl/gridmaps/ww3a/map_ww3a_TO_gx1v6_splice_150428.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ww3a_splice_150428.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ww3a_splice_150428.nc - - - - cpl/gridmaps/ww3a/map_ww3a_TO_gx1v7_splice_170214.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ww3a_splice_170214.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ww3a_splice_170214.nc - - - cpl/gridmaps/gland4km/map_gland4km_to_tx0.66v1_nnsm_e1000r300_190314.nc - cpl/gridmaps/gland4km/map_gland4km_to_tx0.66v1_nnsm_e1000r300_190314.nc - - - diff --git a/config/cesm/config_grids_mct.xml b/config/cesm/config_grids_mct.xml deleted file mode 100644 index e1aac8a469b..00000000000 --- a/config/cesm/config_grids_mct.xml +++ /dev/null @@ -1,420 +0,0 @@ - - - - - - - - cpl/gridmaps/C96/map_C96_TO_tx0.66v1_aave.181210.nc - cpl/gridmaps/C96/map_C96_TO_tx0.66v1_blin.181210.nc - cpl/gridmaps/C96/map_C96_TO_tx0.66v1_patc.181210.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_C96_aave.181210.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_C96_blin.181210.nc - - - - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_aave_da_100423.nc - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_bilin_da_100423.nc - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_bilin_da_100423.nc - cpl/cpl6/map_gx1v6_to_fv0.23x0.31_aave_da_100423.nc - cpl/cpl6/map_gx1v6_to_fv0.23x0.31_aave_da_100423.nc - - - - cpl/cpl6/map_fv0.23x0.31_to_tx0.1v2_aave_da_090127.nc - cpl/cpl6/map_fv0.23x0.31_to_tx0.1v2_bilin_da_090127.nc - cpl/cpl6/map_fv0.23x0.31_to_tx0.1v2_bilin_da_090127.nc - cpl/cpl6/map_tx0.1v2_to_fv0.23x0.31_aave_da_090127.nc - cpl/cpl6/map_tx0.1v2_to_fv0.23x0.31_aave_da_090127.nc - - - - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_aave_da_090407.nc - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_patch_090401.nc - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_patch_090401.nc - cpl/cpl6/map_gx1v6_to_fv0.47x0.63_aave_da_090407.nc - cpl/cpl6/map_gx1v6_to_fv0.47x0.63_aave_da_090407.nc - - - - cpl/cpl6/map_fv0.47x0.63_to_tx0.1v2_aave_da_090218.nc - cpl/cpl6/map_fv0.47x0.63_to_tx0.1v2_bilin_da_090218.nc - cpl/cpl6/map_fv0.47x0.63_to_tx0.1v2_bilin_da_090218.nc - cpl/cpl6/map_tx0.1v2_to_fv0.47x0.63_aave_da_090218.nc - cpl/cpl6/map_tx0.1v2_to_fv0.47x0.63_aave_da_090218.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_aave.130322.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_blin.130322.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v7_aave.151008.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v7_blin.151008.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v7_patc.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_fv0.9x1.25_aave.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_fv0.9x1.25_aave.151008.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_tx0.66v1_aave.190314.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_tx0.66v1_blin.190314.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_tx0.66v1_patc.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_fv0.9x1.25_aave.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_fv0.9x1.25_aave.190314.nc - - - - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_mp120v1_to_fv0.9x1.25_aave_da_111004.nc - cpl/cpl6/map_mp120v1_to_fv0.9x1.25_aave_da_111004.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_aave.130322.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_blin.130322.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv1.9x2.5_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv1.9x2.5_aave.130322.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v7_aave.181205.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v7_blin.181205.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v7_patc.181205.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_fv1.9x2.5_aave.181205.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_fv1.9x2.5_aave.181205.nc - - - - cpl/cpl6/map_fv1.9x2.5_to_tx1v1_aave_da_090710.nc - cpl/cpl6/map_fv1.9x2.5_to_tx1v1_bilin_da_090710.nc - cpl/cpl6/map_fv1.9x2.5_to_tx1v1_bilin_da_090710.nc - cpl/cpl6/map_tx1v1_to_fv1.9x2.5_aave_da_090710.nc - cpl/cpl6/map_tx1v1_to_fv1.9x2.5_aave_da_090710.nc - - - - cpl/cpl6/map_fv4x5_to_gx3v7_aave_da_091218.nc - cpl/cpl6/map_fv4x5_to_gx3v7_bilin_da_091218.nc - cpl/cpl6/map_fv4x5_to_gx3v7_bilin_da_091218.nc - cpl/cpl6/map_gx3v7_to_fv4x5_aave_da_091218.nc - cpl/cpl6/map_gx3v7_to_fv4x5_aave_da_091218.nc - - - - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx1v7_aave.171018.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx1v7_aave.171018.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx1v7_aave.171018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne16np4_aave.171018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne16np4_aave.171018.nc - - - - cpl/cpl6/map_ne30np4_to_gx1v6_aave_110121.nc - cpl/cpl6/map_ne30np4_to_gx1v6_native_110328.nc - cpl/cpl6/map_ne30np4_to_gx1v6_native_110328.nc - cpl/cpl6/map_gx1v6_to_ne30np4_aave_110121.nc - cpl/cpl6/map_gx1v6_to_ne30np4_aave_110121.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/ne30np4/map_ne30np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne30np4_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne30np4_aave.120712.nc - - - cpl/cpl6/map_ne30np4_to_fv1.9x2.5_aave_da_091230.nc - cpl/cpl6/map_ne30np4_to_fv1.9x2.5_aave_da_091230.nc - cpl/cpl6/map_fv1.9x2.5_to_ne30np4_aave_da_091230.nc - cpl/cpl6/map_fv1.9x2.5_to_ne30np4_aave_da_091230.nc - - - cpl/gridmaps/ne30np4/map_ne30_TO_gx1v7_aave.190214.nc - cpl/gridmaps/ne30np4/map_ne30_TO_gx1v7_blin.190214.nc - cpl/gridmaps/ne30np4/map_ne30_TO_gx1v7_blin.190214.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne30_aave.190214.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne30_aave.190214.nc - - - cpl/gridmaps/ne30np4/map_ne30_TO_ww3a_blin.190214.nc - - - - - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gx1v7_aave.190215.nc - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gx1v7_blin.190215.nc - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gx1v7_blin.190215.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne30pg3_aave.190215.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne30pg3_aave.190215.nc - - - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_ww3a_blin.190215.nc - - - - - cpl/gridmaps/ne60np4/map_ne60np4_TO_gx1v6_aave.120406.nc - cpl/gridmaps/ne60np4/map_ne60np4_TO_gx1v6_blin.120406.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ne60np4_aave.120406.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ne60np4_aave.120406.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_bilin_110428.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_bilin_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne120np4_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne120np4_aave_110428.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v7_aave_190718.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v7_bilin_190718.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v7_bilin_190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_to_ne120np4_aave_190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_to_ne120np4_aave_190718.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_ww3a_blin.190502.nc - - - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gx1v7_aave.190718.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gx1v7_blin.190718.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gx1v7_blin.190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne120np4.pg3_aave.190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne120np4.pg3_aave.190718.nc - - - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_tx0.1v3_aave.190820.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_tx0.1v3_blin.190820.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_tx0.1v3_blin.190820.nc - cpl/gridmaps/tx0.1v3/map_tx0.1v3_TO_ne120np4.pg3_aave.190820.nc - cpl/gridmaps/tx0.1v3/map_tx0.1v3_TO_ne120np4.pg3_aave.190820.nc - - - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_ww3a_blin.190503.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_to_tx0.1v2_aave_110331.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_tx0.1v2_090127_bilin_110331.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_tx0.1v2_090127_bilin_110331.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne120np4_aave_110331.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne120np4_aave_110331.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/ne120np4/map_ne120np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne120np4_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne120np4_aave.120712.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_to_fv0.23x0.31_aave_110331.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_fv0.23x0.31_aave_110331.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne120np4_aave_110331.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne120np4_aave_110331.nc - - - - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne240np4_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne240np4_aave_110428.nc - - - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_aave_110419.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_aave_110419.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_aave_110419.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne240np4_aave_110419.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne240np4_aave_110419.nc - - - cpl/gridmaps/ne240np4/map_ne240np4_to_fv0.23x0.31_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_fv0.23x0.31_aave_110428.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne240np4_aave_110428.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne240np4_aave_110428.nc - - - - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gx1v7_aave.190718.nc - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gx1v7_blin.190718.nc - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gx1v7_patc.190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne0CONUSne30x8_aave.190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne0CONUSne30x8_aave.190718.nc - - - - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_ww3a_blin.190322.nc - - - - cpl/gridmaps/TL319/map_TL319_TO_gx1v7_aave.170705.nc - cpl/gridmaps/TL319/map_TL319_TO_gx1v7_blin.170705.nc - cpl/gridmaps/TL319/map_TL319_TO_gx1v7_patc.170705.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_TL319_aave.170705.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_TL319_aave.170705.nc - - - cpl/gridmaps/TL319/map_TL319_TO_tx0.66v1_aave.190326.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.66v1_blin.190326.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.66v1_patc.190326.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_TL319_aave.190326.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_TL319_aave.190326.nc - - - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v2_patc.161014.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v2_patc.161014.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v2_patc.161014.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_TO_TL319_aave.161014.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_TO_TL319_aave.161014.nc - - - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v3_patc.170730.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v3_patc.170730.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v3_patc.170730.nc - cpl/gridmaps/tx0.1v3/map_tx0.1v3_TO_TL319_aave.170730.nc - cpl/gridmaps/tx0.1v3/map_tx0.1v3_TO_TL319_aave.170730.nc - - - cpl/gridmaps/T62/map_T62_TO_gx3v7_aave.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx3v7_blin.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx3v7_patc.130322.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_T62_aave.130322.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_T62_aave.130322.nc - - - cpl/gridmaps/T62/map_T62_TO_gx1v6_aave.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx1v6_blin.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_T62_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_T62_aave.130322.nc - - - cpl/gridmaps/T62/map_T62_TO_gx1v7_aave.151008.nc - cpl/gridmaps/T62/map_T62_TO_gx1v7_blin.151008.nc - cpl/gridmaps/T62/map_T62_TO_gx1v7_patc.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_T62_aave.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_T62_aave.151008.nc - - - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_aave.190314.nc - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_blin.190314.nc - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_blin.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_T62_aave.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_T62_aave.190314.nc - - - cpl/cpl6/map_T62_to_tx1v1_aave_da_090122.nc - cpl/cpl6/map_T62_to_tx1v1_bilin_da_090122.nc - cpl/cpl6/map_T62_to_tx1v1_bilin_da_090122.nc - cpl/cpl6/map_tx1v1_to_T62_aave_da_090122.nc - cpl/cpl6/map_tx1v1_to_T62_aave_da_090122.nc - - - cpl/cpl6/map_T62_to_tx0.1v2_aave_da_090220.nc - cpl/cpl6/map_T62_to_tx0.1v2_bilin_da_090220.nc - cpl/cpl6/map_T62_to_tx0.1v2_bilin_da_090220.nc - cpl/cpl6/map_tx0.1v2_to_T62_aave_da_090220.nc - cpl/cpl6/map_tx0.1v2_to_T62_aave_da_090220.nc - - - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_tx0.1v3_TO_T62_aave.170928.nc - cpl/cpl6/map_tx0.1v3_TO_T62_aave.170928.nc - - - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/oQU120/map_oQU120_TO_T62_aave.151209.nc - cpl/gridmaps/oQU120/map_oQU120_TO_T62_aave.151209.nc - - - - - cpl/cpl6/map_T31_to_gx3v7_aave_da_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - - - - cpl/gridmaps/T85/map_T85_to_gx1v6_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_bilin_110411.nc - - - cpl/gridmaps/T85/map_T85_to_tx0.1v2_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_tx0.1v2_bilin_110411.nc - cpl/gridmaps/T85/map_T85_to_tx0.1v2_bilin_110411.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T85_bilin_110411.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T85_aave_110411.nc - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - - - - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T341_aave_110413.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T341_aave_110413.nc - - - cpl/gridmaps/T341/map_T341_to_fv0.23x0.31_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_fv0.23x0.31_aave_110413.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_T341_aave_110413.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_T341_aave_110413.nc - - - - - - cpl/gridmaps/ww3a/map_ww3a_TO_tx1v1_blin.170523.nc - cpl/gridmaps/tx1v1/map_tx1v1_TO_ww3a_blin.170523.nc - cpl/gridmaps/tx1v1/map_tx1v1_TO_ww3a_blin.170523.nc - - - - cpl/gridmaps/T31/map_T31_TO_ww3a_bilin_131104.nc - - - - cpl/gridmaps/T62/map_T62_TO_ww3a_bilin.150617.nc - - - - cpl/gridmaps/TL319/map_TL319_TO_ww3a_bilin.170707.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_ww3a_bilin_140702.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ww3a_bilin.160324.nc - - - diff --git a/config/cesm/config_grids_nuopc.xml b/config/cesm/config_grids_nuopc.xml deleted file mode 100644 index 0cd403e264e..00000000000 --- a/config/cesm/config_grids_nuopc.xml +++ /dev/null @@ -1,125 +0,0 @@ - - - - - - - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_aave.190314.nc - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_blin.190314.nc - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_blin.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_T62_aave.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_T62_aave.190314.nc - - - cpl/gridmaps/T62/map_T62_TO_gx3v7_aave.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx3v7_blin.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx3v7_patc.130322.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_T62_aave.130322.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_T62_aave.130322.nc - - - cpl/gridmaps/T62/map_T62_TO_gx1v6_aave.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx1v6_blin.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_T62_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_T62_aave.130322.nc - - - cpl/gridmaps/T62/map_T62_TO_gx1v7_aave.151008.nc - cpl/gridmaps/T62/map_T62_TO_gx1v7_blin.151008.nc - cpl/gridmaps/T62/map_T62_TO_gx1v7_patc.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_T62_aave.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_T62_aave.151008.nc - - - cpl/cpl6/map_T62_to_tx1v1_aave_da_090122.nc - cpl/cpl6/map_T62_to_tx1v1_bilin_da_090122.nc - cpl/cpl6/map_T62_to_tx1v1_bilin_da_090122.nc - cpl/cpl6/map_tx1v1_to_T62_aave_da_090122.nc - cpl/cpl6/map_tx1v1_to_T62_aave_da_090122.nc - - - cpl/cpl6/map_T62_to_tx0.1v2_aave_da_090220.nc - cpl/cpl6/map_T62_to_tx0.1v2_bilin_da_090220.nc - cpl/cpl6/map_T62_to_tx0.1v2_bilin_da_090220.nc - cpl/cpl6/map_tx0.1v2_to_T62_aave_da_090220.nc - cpl/cpl6/map_tx0.1v2_to_T62_aave_da_090220.nc - - - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_tx0.1v3_TO_T62_aave.170928.nc - cpl/cpl6/map_tx0.1v3_TO_T62_aave.170928.nc - - - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/oQU120/map_oQU120_TO_T62_aave.151209.nc - cpl/gridmaps/oQU120/map_oQU120_TO_T62_aave.151209.nc - - - - - cpl/cpl6/map_T31_to_gx3v7_aave_da_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - - - - cpl/gridmaps/T85/map_T85_to_gx1v6_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_bilin_110411.nc - - - cpl/gridmaps/T85/map_T85_to_tx0.1v2_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_tx0.1v2_bilin_110411.nc - cpl/gridmaps/T85/map_T85_to_tx0.1v2_bilin_110411.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T85_bilin_110411.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T85_aave_110411.nc - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - - - - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T341_aave_110413.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T341_aave_110413.nc - - - cpl/gridmaps/T341/map_T341_to_fv0.23x0.31_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_fv0.23x0.31_aave_110413.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_T341_aave_110413.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_T341_aave_110413.nc - - - - cpl/gridmaps/T31/map_T31_TO_ww3a_bilin_131104.nc - - - - cpl/gridmaps/T62/map_T62_TO_ww3a_bilin.150617.nc - - - - cpl/gridmaps/TL319/map_TL319_TO_ww3a_bilin.170707.nc - - - diff --git a/config/cesm/config_inputdata.xml b/config/cesm/config_inputdata.xml deleted file mode 100644 index 985f5ee02fe..00000000000 --- a/config/cesm/config_inputdata.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - - - - - grid ftp requires the globus-url-copy tool on the client side - gftp -
ftp://gridanon.cgd.ucar.edu:2811/cesm/inputdata/
- ../inputdata_checksum.dat -
- - - wget -
ftp://ftp.cgd.ucar.edu/cesm/inputdata
- anonymous - user@example.edu - ../inputdata_checksum.dat -
- - - ftp requires the python package ftplib - ftp -
ftp.cgd.ucar.edu/cesm/inputdata
- anonymous - user@example.edu - ../inputdata_checksum.dat -
- - - svn -
https://svn-ccsm-inputdata.cgd.ucar.edu/trunk/inputdata
-
- -
diff --git a/config/cesm/machines/Depends.babbageKnc b/config/cesm/machines/Depends.babbageKnc deleted file mode 100644 index 130ade09928..00000000000 --- a/config/cesm/machines/Depends.babbageKnc +++ /dev/null @@ -1,6 +0,0 @@ - -shr_ncread_mod.o: shr_ncread_mod.F90 - $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< - -quadrature_mod.o: quadrature_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -DIS_ACCELERATOR $< diff --git a/config/cesm/machines/Depends.bluewaters b/config/cesm/machines/Depends.bluewaters deleted file mode 100755 index e113298e08a..00000000000 --- a/config/cesm/machines/Depends.bluewaters +++ /dev/null @@ -1,5 +0,0 @@ -# - ifeq ($(strip $(COMPILER)),pgi) - progseasalts_intr.o: progseasalts_intr.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -Mnovect $< -endif diff --git a/config/cesm/machines/Depends.corip1 b/config/cesm/machines/Depends.corip1 deleted file mode 100644 index 81be8f3f16e..00000000000 --- a/config/cesm/machines/Depends.corip1 +++ /dev/null @@ -1,5 +0,0 @@ -# Workaround for ICE in intel/2016.0.109 -ifeq (CPRINTEL,$(findstring CPRINTEL, $(FFLAGS))) -RtmMod.o: RtmMod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $(CONTIGUOUS_FLAG) -O1 $< -endif diff --git a/config/cesm/machines/Depends.cray b/config/cesm/machines/Depends.cray deleted file mode 100644 index bbe5a712d97..00000000000 --- a/config/cesm/machines/Depends.cray +++ /dev/null @@ -1,6 +0,0 @@ -NOOPTOBJS= ice_boundary.o dyn_comp.o unicon.o - -$(NOOPTOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< - - diff --git a/config/cesm/machines/Depends.gnu b/config/cesm/machines/Depends.gnu deleted file mode 100644 index 2d53247217e..00000000000 --- a/config/cesm/machines/Depends.gnu +++ /dev/null @@ -1,2 +0,0 @@ -geopk.o:geopk.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fcray-pointer $< diff --git a/config/cesm/machines/Depends.intel b/config/cesm/machines/Depends.intel deleted file mode 100644 index 3dd4e885e27..00000000000 --- a/config/cesm/machines/Depends.intel +++ /dev/null @@ -1,40 +0,0 @@ -# -PERFOBJS=\ -prim_advection_mod.o \ -edge_mod.o \ -derivative_mod.o \ -bndry_mod.o \ -prim_advance_mod.o - -# CLM's SatellitePhenologyMod is compiled incorrectly with intel 15.0.0 at -O2 -REDUCED_OPT_OBJS=\ -SatellitePhenologyMod.o - -# shr_wv_sat_mod does not need to have better than ~0.1% precision, and benefits -# enormously from a lower precision in the vector functions. -REDUCED_PRECISION_OBJS=\ -shr_wv_sat_mod.o - -SHR_RANDNUM_FORT_OBJS=\ -kissvec_mod.o \ -mersennetwister_mod.o \ -dSFMT_interface.o \ -shr_RandNum_mod.o - -SHR_RANDNUM_C_OBJS=\ -dSFMT.o \ -dSFMT_utils.o \ -kissvec.o - -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-prec-div $< - $(REDUCED_OPT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O1 $< - $(REDUCED_PRECISION_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fimf-precision=low -fp-model fast $< - $(SHR_RANDNUM_FORT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fp-model fast -no-prec-div -no-prec-sqrt -qoverride-limits $< - $(SHR_RANDNUM_C_OBJS): %.o: %.c - $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) -O3 -fp-model fast $< -endif diff --git a/config/cesm/machines/Depends.intel14 b/config/cesm/machines/Depends.intel14 deleted file mode 100644 index 32e4747d7a3..00000000000 --- a/config/cesm/machines/Depends.intel14 +++ /dev/null @@ -1,28 +0,0 @@ -# -# 12/03/2012 the intel compiler on yellowstone 12.1.5 20120612 -# does not converge the pH computation without the -CU flag -# root cause has not been determined. JPE -# this problem is resolved in intel 13.0.1 -#ecosys_mod.o: ecosys_mod.F90 -# $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -CU $< - - - -PERFOBJS=\ -prim_advection_mod_base.o \ -vertremap_mod_base.o \ -edge_mod_base.o \ -derivative_mod_base.o \ -bndry_mod_base.o \ -prim_advance_mod.o \ -uwshcu.o \ -wetdep.o - -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-prec-div $< - $(REDUCED_OPT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O1 $< - $(REDUCED_PRECISION_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fimf-precision=low -fp-model fast $< -endif diff --git a/config/cesm/machines/Depends.intelmic b/config/cesm/machines/Depends.intelmic deleted file mode 100644 index 5b90a1de374..00000000000 --- a/config/cesm/machines/Depends.intelmic +++ /dev/null @@ -1,6 +0,0 @@ - -#derivative_mod_base.o: derivative_mod_base.F90 -# $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS_NOOPT) -O1 $< - -shr_ncread_mod.o: shr_ncread_mod.F90 - $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< diff --git a/config/cesm/machines/Depends.intelmic14 b/config/cesm/machines/Depends.intelmic14 deleted file mode 100644 index 5b90a1de374..00000000000 --- a/config/cesm/machines/Depends.intelmic14 +++ /dev/null @@ -1,6 +0,0 @@ - -#derivative_mod_base.o: derivative_mod_base.F90 -# $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS_NOOPT) -O1 $< - -shr_ncread_mod.o: shr_ncread_mod.F90 - $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< diff --git a/config/cesm/machines/Depends.mira b/config/cesm/machines/Depends.mira deleted file mode 100644 index c786f6248a4..00000000000 --- a/config/cesm/machines/Depends.mira +++ /dev/null @@ -1,22 +0,0 @@ -# These routines have problems with stacksize when omp is invoked add -qsmallstack to resolve -SSOBJS = shr_reprosum_mod.o mo_sethet.o mo_drydep.o - -$(SSOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -qsmallstack $< - -time_management.o: time_management.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -qsmp=noauto:noomp $< - -# These routines benefit from -qnostrict without violating the bfb test -PERFOBJS=\ -prim_advection_mod.o \ -edge_mod.o \ -derivative_mod.o \ -bndry_mod.o \ -prim_advance_mod.o \ -uwshcu.o \ -wetdep.o -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -qnostrict $< -endif diff --git a/config/cesm/machines/Depends.nag b/config/cesm/machines/Depends.nag deleted file mode 100644 index b22c26e1e65..00000000000 --- a/config/cesm/machines/Depends.nag +++ /dev/null @@ -1,4 +0,0 @@ -wrap_mpi.o: wrap_mpi.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -mismatch_all $(FFLAGS_NOOPT) $(FREEFLAGS) $< -fft99.o: fft99.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< diff --git a/config/cesm/machines/README b/config/cesm/machines/README deleted file mode 100644 index 3bafbe274ee..00000000000 --- a/config/cesm/machines/README +++ /dev/null @@ -1,15 +0,0 @@ -config_pes_pop.xml -current assumptions: - prognostic: pop, cice - data: datm, drof - stub: slnd, sglc -DATM.+XLND.+CICE.+POP.+DROF.+SGLC -The current attributes that are supported are - lcompset_matchN= (where N can be any number) - pecount=[S,M,L,XL] - -Please refer to the documentation in the config_machines.xml and config_compilers.xml files. - - - - diff --git a/config/cesm/machines/config_batch.xml b/config/cesm/machines/config_batch.xml deleted file mode 100644 index eb099adc07b..00000000000 --- a/config/cesm/machines/config_batch.xml +++ /dev/null @@ -1,605 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - qstat - qsub - qdel - -v - - (\d+) - --dependencies - %H:%M:%s - -M - - - - - - - - - - - - - - - qstat - qsub - qdel - --env - #COBALT - (\d+) - --dependencies - -M - - - - - - - - - - - - - bjobs - bsub - bkill - < - - #BSUB - <(\d+)> - -w 'done(jobid)' - -w 'ended(jobid)' - && - %H:%M - -u - - - - -J {{ job_id }} - -n {{ total_tasks }} - -W $JOB_WALLCLOCK_TIME - -o {{ job_id }}.%J - -e {{ job_id }}.%J - - - - - qstat - qsub - qdel - -v - #PBS - ^(\S+)$ - -W depend=afterok:jobid - -W depend=afterany:jobid - : - %H:%M:%S - -M - -m - , bea, b, e, a - - - - - - - -N {{ job_id }} - -r {{ rerunnable }} - - -j oe - -V - - - - - squeue - scancel - #SBATCH - (\d+)$ - --dependency=afterok:jobid - --dependency=afterany:jobid - , - %H:%M:%S - --mail-user - --mail-type - none, all, begin, end, fail - - --job-name={{ job_id }} - --nodes={{ num_nodes }} - --ntasks-per-node={{ tasks_per_node }} - --output={{ job_id }} - --exclusive - - - - - - -l nodes={{ num_nodes }} - -q {{ queue }} - - - iccp - - - - - - - - - - - - -R "span[ptile={{ tasks_per_node }}]" - -N - -a {{ poe }} - - - poe_short - poe_medium - poe_long - - - - - - (\d+.bw)$ - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}:xe - -S {{ shell }} - - - normal - debug - - - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select=1:mpiprocs={{ total_tasks }}:ompthreads={{ thread_count }} - - - - regular - - - regular - premium - share - economy - - - - - - squeue - sbatch - scancel - #SBATCH - (\d+)$ - , - %H:%M:%S - --mail-user - --mail-type - none, all, begin, end, fail - - --job-name={{ job_id }} - --nodes={{ num_nodes }} - --ntasks-per-node={{ tasks_per_node }} - --output={{ job_id }} - --exclusive - - - medium - - - - - sbatch - - - - - - - - - sbatch - - - - - - - -C haswell - - - regular - - - - - - sbatch - - - - - - - -C knl,quad,cache - -S 2 - - - regular - - - - - - sbatch - - - - - - - default - - - - - sbatch - - - - - - - batch - - - - - sbatch - - - - - - - regular - debug - - - - - - - -R "select[model==XeonE5_2680v3]" - - - normal.24h - normal.4h - - - - - - - -R "span[ptile=4] select[model==XeonE3_1585Lv5]" - - - normal.24h - normal.4h - - - - - - - -R "select[model==XeonGold_6150]" - - - normal.24h - normal.4h - - - - - - -A cpo - -l {{ partition }} - -l size={{ mppsize }} - -E - -d $RUNDIR - -o $RUNDIR/$CASE.out - -S /bin/bash - - - debug - batch - - - - - - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - -S {{ shell }} - - - short - medium - long - verylong - overnight - monster - - - - - ssh izumi cd $CASEROOT ; qsub - (\d+.izumi.unified.ucar.edu)$ - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - -S {{ shell }} - - - short - medium - long - verylong - overnight - monster - - - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - regular - - - - - sbatch - - --qos=lr_normal - --partition=lr3 - --account={{ project }} - --ntasks-per-node={{ tasks_per_node }} - - - lr3 - - - - - sbatch - - --qos=lr_normal - --partition=lr2 - --account={{ project }} - --ntasks-per-node={{ tasks_per_node }} - - - lr2 - - - - - ssh login1.ls5.tacc.utexas.edu cd $CASEROOT ; sbatch - - - - - - - normal - large - development - - - - - - default - - - - - - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - -S {{ shell }} - - - batch - - - - - sbatch - - - - - - - queue - - - - - - - -W group_list=$PROJECT - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=bro - -l place=scatter:excl - -S {{ shell }} - - - normal - - - - - - -W group_list=$PROJECT - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=has - -l place=scatter:excl - -S {{ shell }} - - - normal - - - - - - -W group_list=$PROJECT - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=ivy - -l place=scatter:excl - -S {{ shell }} - - - normal - - - - - - -W group_list=$PROJECT - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=san - -l place=scatter:excl - -S {{ shell }} - - - normal - - - - - ssh stampede2.tacc.utexas.edu cd $CASEROOT ; sbatch - - - - - - - skx-normal - skx-dev - - - - - ssh stampede2.tacc.utexas.edu cd $CASEROOT ; sbatch - - - - - - - normal - development - - - - - sbatch - - - - - - - --partition=theia - - - batch - - - - - - default - - - diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml deleted file mode 100644 index 8af5d247e3f..00000000000 --- a/config/cesm/machines/config_compilers.xml +++ /dev/null @@ -1,1305 +0,0 @@ - - - - - - - - -DCESMCOUPLED - -D_USE_FLOW_CONTROL - -DSPMD - - - - -I$(EXEROOT)/atm/obj/FMS - - - $(FC_AUTO_R8) - $(FC_AUTO_R8) -Duse_LARGEFILE - - FALSE - - - - - -h noomp - -g -O0 - -O2 - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY - -DDIR=NOOP - -DDIR=NOOP - - - -s real64 - - - -f free -N 255 -h byteswapio -x dir - -h noomp - -g -O0 -K trap=fp -m1 - -O2,ipa2 -em - - - -O1,fp2,ipa0,scalar0,vector0 - - TRUE - - -Wl,--allow-multiple-definition -h byteswapio - - - - - - -std=gnu99 - -fopenmp - -g -Wall -Og -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds - -O - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU - - FORTRAN - - -fdefault-real-8 - - - - -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -fopenmp - - -g -Wall -Og -fbacktrace -ffpe-trap=zero,overflow -fcheck=bounds - -O - - - -O0 - - - -ffixed-form - - - -ffree-form - - FALSE - - -fopenmp - - mpicc - mpicxx - mpif90 - gcc - g++ - gfortran - TRUE - - - - - -g -qfullpath -qmaxmem=-1 - -O3 - -qsmp=omp - -qsmp=omp:noopt - - - - -DFORTRAN_SAME -DCPRIBM - - -WF,-D - - -qrealsize=8 - - - -g -qfullpath -qmaxmem=-1 - -O2 -qstrict -qinline=auto - -qsmp=omp - -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en - -qsmp=omp:noopt - -C - - - -qsuffix=f=f -qfixed=132 - - - -qsuffix=f=f90:cpp=F90 - - TRUE - - -qsmp=omp - -qsmp=omp:noopt - - - - - - -qno-opt-dynamic-align -fp-model precise -std=gnu99 - -qopenmp - -O2 -debug minimal - -O0 -g - - - - -DFORTRANUNDERSCORE -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -qno-opt-dynamic-align -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source - -qopenmp - -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created - -O2 -debug minimal - - - -O0 - - - -fixed - - - -free - - - -qopenmp - - mpicc - mpicxx - mpif90 - icc - icpc - ifort - - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl - - TRUE - - - - - -std=gnu99 - -g - - - -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG - - - -r8 - - - - - -Wp,-macro=no_com -convert=BIG_ENDIAN -indirect $ENV{CIMEROOT}/config/cesm/machines/nag_mpi_argument.txt - - -ieee=full -O2 - - - -C=all -g -time -f2003 -ieee=stop - -gline - - -mismatch_all - - - -O0 - - - -fixed - - - -free - - FALSE - mpicc - mpif90 - gcc - nagfor - - - -lpthread - - - FCLIBS="-Wl,--as-needed,--allow-shlib-undefined -L$(COMPILER_PATH)/lib/NAG_Fortran -lf62rts" - - - - - - - -gopt -time - -mp - - - - - - - - - - - - - - - - - - - - - - - - - - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI - - CXX - - -r8 - - - -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee - -mp - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - - - -O0 - - - -Mfixed - - - -Mfree - - - - FALSE - - -time -Wl,--allow-multiple-definition - -mp - - mpicc - mpicxx - mpif90 - pgcc - pgc++ - pgf95 - - - - - -qarch=auto -qtune=auto -qcache=auto - - /usr/bin/bash - - -qarch=auto -qtune=auto -qcache=auto -qsclk=micro - -qspill=6000 - - - -qsigtrap=xl__trcedump - -bdatapsize:64K -bstackpsize:64K -btextpsize:32K - - mpcc_r - mpxlf2003_r - cc_r - xlf2003_r - - -lmassv -lessl - -lmass - - - - - - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - - - -DLINUX - - - -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush - -O3 -qstrict -qinline=auto - -qsmp=omp - -qsmp=omp:noopt - - - -Wl,--relax -Wl,--allow-multiple-definition - - - - - - -DCMAKE_SYSTEM_NAME=Catamount - - - -DLINUX - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - cc - CC - ftn - $ENV{NETCDF_DIR} - lustre - $ENV{PARALLEL_NETCDF_DIR} - cc - CC - ftn - - - - - -DSYSDARWIN - - - - - - -heap-arrays - - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - xiar - - cru - - - - - - -xHost - - - -DINTEL_MKL -DHAVE_SSE2 - - - -xHost - - mpiicpc - mpiicc - mpiifort - icc - ifort - $ENV{TRILINOS_PATH} - - - - - --host=Linux - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - - - -DHAVE_PAPI -DHAVE_SLASHPROC - - - -mkl - - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - xiar - - cru - - - - - - -xHost - - - -DINTEL_MKL -DHAVE_SSE2 - - - -xHost - - - $(FC_AUTO_R8) -O3 -assume norealloc_lhs - - - $SHELL{${NETCDF_PATH}/bin/nc-config --flibs} - - mpiicpc - mpiicc - mpiifort - icc - ifort - $ENV{TRILINOS_PATH} - - - - - -DHAVE_PAPI - - lustre - - - - FALSE - - -dynamic -mkl=sequential -no-fma - - - -dynamic -mkl=sequential -no-fma - - - - - - -O2 - -nofma - - - -lmpichf90_pgi $ENV{PGI_PATH}/linux86-64/$ENV{PGI_VERSION}/lib/f90main.o - - - -O2 - -nofma - - TRUE - - - - - -I/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/build/lib/mct -I/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/build/lib/psmile.MPI1 - - - -llapack -lblas - -L/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/lib -lpsmile.MPI1 -lscrip -lmct_oasis -lmpeu_oasis - - - - - - -I/project/s824/edavin/OASIS3-MCT_2.0/build.cray/build/lib/mct -I/project/s824/edavin/OASIS3-MCT_2.0/build.cray/build/lib/psmile.MPI1 - - - -L/project/s824/edavin/OASIS3-MCT_2.0/build.cray/lib -lpsmile.MPI1 -lscrip -lmct_oasis -lmpeu_oasis - - - - - -L$(NETCDF_PATH)/lib -Wl,-rpath,$(NETCDF_PATH)/lib -lnetcdff -lnetcdf - - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - - - - - -DNO_MPIMOD - - - -ldl - - - - - - -qopt-report -xCORE_AVX2 -no-fma - - - -qopt-report -xCORE_AVX2 -no-fma - - - -DPIO_ENABLE_LOGGING=ON - - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP - - TRUE - - - - - -llapack -lblas - -ldl - - - - - /vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0 - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - - $ENV{NETCDF_HOME} - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt - - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - - $ENV{NETCDF_HOME} - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - --host=Linux - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - - - -DHAVE_SLASHPROC - - - -mkl - - - FALSE - - - - - --host=Linux - - - -xMIC-AVX512 - - - -xMIC-AVX512 - - - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - - - -DHAVE_SLASHPROC - - - -mkl -lmemkind -zmuldefs - - - FALSE - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - - $ENV{NETCDF_HOME} - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - -O2 - -nomp - - - --host=Linux - - - -DLINUX - - - -O2 - -nomp - - - -nomp - - $ENV{NETCDF_HOME} - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - -O2 - - - --host=Linux - - - -DHAVE_PAPI - - - -O2 - - - -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf - - - - - - -DLINUX - - $ENV{NETCDF} - lustre - $ENV{PNETCDF} - - -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff - - - - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -mkl - - - - - - -O2 - - - -O2 - - - - - - -DLINUX - - $ENV{NETCDF} - lustre - $ENV{PNETCDF} - - -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff - - - - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -mkl - - - - - - -O2 - - - -O2 - - - - - - -DLINUX - - $ENV{NETCDF} - lustre - $ENV{PNETCDF} - - -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff - - - - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -mkl - - - - - - -O2 - - - -O2 - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - /usr/lib64 - mpich - $ENV{NETCDF_PATH} - - -L$NETCDF_PATH/lib -lnetcdff -lnetcdf - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - /usr/lib64 - mpich - $ENV{NETCDF_PATH} - - -L$NETCDF_PATH/lib -lnetcdff -lnetcdf - - - - - - -lifcore - - - -lifcore - -mcmodel medium - - - -lquadmath - -Wl,-rpath,${NETCDF_PATH}/lib - -Wl,-rpath,$ENV{COMPILER_PATH}/lib/intel64 - -Wl,-rpath,$ENV{COMPILER_PATH}/mkl/lib/intel64 - -Wl,-rpath,$ENV{MPI_PATH}/lib - -lifcore - - - -mkl=cluster - - /fs/cgd/csm/tools/pFUnit/pFUnit3.2.8_hobart_Intel15.0.2_noMPI_noOpenMP - - - - - -O0 - - - -O0 - - - -lgomp - -Wl,-R${NETCDF_PATH}/lib - -Wl,-R$ENV{COMPILER_PATH}/lib - -Wl,-R$ENV{COMPILER_PATH}/libso - - - - - - -lm -ldl - - - - - - - -framework Accelerate - - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - - - - -vec-report - - - -vec-report - - - -DPIO_ENABLE_LOGGING=ON - - - - - - -DHAVE_VPRINTF -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - - -lnetcdff -lnetcdf -mkl - - - -ftrapuv - - - -ftrapuv - - $ENV{NETCDF_DIR} - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - - - - - -DHAVE_VPRINTF -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - - -lnetcdff -lnetcdf -mkl - - - -ftrapuv - - - -ftrapuv - - $ENV{NETCDF_DIR} - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - - - - /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install - - -O2 - - - --host=Linux - - - -lstdc++ -lmpi_cxx - - - -O2 - - $ENV{NETCDFROOT} - $ENV{PNETCDFROOT} - - $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack - - - - - - -qfloat=nomaf - - - -qfloat=nomaf - - $ENV{HDF5} - - - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf77_r - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r - /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ - gpfs - /soft/libraries/pnetcdf/1.6.1/cnk-xl/current/ - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r - - -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf -L$ENV{HDF5}/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib - - - - - - -L$ENV{HDF5_HOME}/lib -lhdf5_fortran -lhdf5 -lhdf5_hl -lhdf5hl_fortran - -L$ENV{NETCDF_PATH}/lib/ -lnetcdff -lnetcdf -lcurl -lblas -llapack - - - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE - - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - - $ENV{NETCDF_LIB}/.. - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - -O2 -xCORE-AVX2 - - icc - mpi - $ENV{MPI_ROOT} - $ENV{NETCDF} - - -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf - - /home6/fvitt/esmf_7_1_0r/esmf/lib/libO/Linux.intel.64.mpi.default - - - - - -O2 -xCORE-AVX2 - - icc - mpi - $ENV{MPI_ROOT} - $ENV{NETCDF} - - -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf - - /home6/fvitt/esmf_7_1_0r/esmf/lib/libO/Linux.intel.64.mpi.default - - - - - -O2 -xAVX - - icc - mpi - $ENV{MPI_ROOT} - $ENV{NETCDF} - - -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf - - /home6/fvitt/esmf_7_1_0r/esmf/lib/libO/Linux.intel.64.mpi.default - - - - - -O2 -xAVX - - icc - mpi - $ENV{MPI_ROOT} - $ENV{NETCDF} - - -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf - - /home6/fvitt/esmf_7_1_0r/esmf/lib/libO/Linux.intel.64.mpi.default - - - - - -O2 - - - --host=Linux - - /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default - - -O2 - - $ENV{NETCDFROOT} - lustre - $ENV{PNETCDFROOT} - - -L${NETCDF_PATH}/lib -lnetcdff -L/projects/ccsm/BLAS-intel -lblas_LINUX - - - - - - -DHAVE_NANOTIME - - $ENV{TACC_NETCDF_DIR} - lustre - $ENV{TACC_PNETCDF_DIR} - - -Wl,-rpath,${NETCDF_PATH}/lib - - - -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf - - - - - - -DHAVE_NANOTIME - - $ENV{TACC_NETCDF_DIR} - lustre - $ENV{TACC_PNETCDF_DIR} - - - - - -xCOMMON-AVX512 -no-fma - - - -xCOMMON-AVX512 -no-fma - -mcmodel medium - - - -L$ENV{TACC_HDF5_LIB} -lhdf5 $(MKL) -zmuldefs -xCOMMON-AVX512 - - - $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -L$ENV{TACC_HDF5_LIB} -lhdf5 - - $ENV{TRILINOS_PATH} - FALSE - - - - - -DHAVE_NANOTIME - - $ENV{TACC_NETCDF_DIR} - lustre - $ENV{TACC_PNETCDF_DIR} - - - - - -xCOMMON-AVX512 -no-fma - - - -xCOMMON-AVX512 -no-fma - -mcmodel medium - - - -L$ENV{TACC_HDF5_LIB} -lhdf5 $(MKL) -zmuldefs -xCOMMON-AVX512 - - - $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -L$ENV{TACC_HDF5_LIB} -lhdf5 - - $ENV{TRILINOS_PATH} - FALSE - - - - mpiicc - mpiicpc - mpiifort - /apps/netcdf/4.3.0-intel - - - - - -xMIC-AVX512 - - - -xMIC-AVX512 - - - --host=Linux - - - -L$(NETCDF_DIR)/lib -lnetcdff -L$(NETCDF_DIR)/lib -lnetcdf -Wl,-rpath -Wl,$(NETCDF_DIR)/lib - - - - - - - - - - - - - - USERDEFINED_MUST_EDIT_THIS - - - # USERDEFINED $SHELL{${NETCDF_PATH}/bin/nc-config --flibs} - - - - diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml deleted file mode 100644 index 737aa5d6a6e..00000000000 --- a/config/cesm/machines/config_machines.xml +++ /dev/null @@ -1,2601 +0,0 @@ - - - - - - - XC50 SkyLake, os is CNL, 40 pes/node, batch system is PBSPro - .*eth\d - CNL - intel,gnu,cray - mpt,mpi-serial - $ENV{BASEDIR} - $ENV{DIN_LOC_ROOT} - $DIN_LOC_ROOT - ${CIME_OUTPUT_ROOT}/archive/$CASE - ${CIME_OUTPUT_ROOT}/cesm_baselines - /home/jedwards/cesm_tools/cprnc/cprnc - 8 - pbs - @ pusan.ac.kr - 40 - 40 - - aprun - - -j {{ hyperthreading }} - -n {{ total_tasks }} - -N $MAX_MPITASKS_PER_NODE - -S {{ tasks_per_numa }} - -d $ENV{OMP_NUM_THREADS} - --mpmd-env OMP_NUM_THREADS=$OMP_NUM_THREADS - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - craype-x86-skylake - PrgEnv-pgi - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - cray-netcdf - cray-hdf5 - cray-parallel-netcdf - papi - - - PrgEnv-intel - craype-x86-skylake - craype-hugepages2M - perftools-base/7.0.4 - cray-netcdf/4.6.1.3 - cray-hdf5/1.10.2.0 - cray-parallel-netcdf/1.8.1.3 - papi/5.6.0.4 - gridftp/6.0 - cray-python/3.6.5.1 - - - - 256M - /home/jedwards/workflow/CESM_postprocessing - - - - - CMCC IBM iDataPlex, os is Linux, 16 pes/node, batch system is LSFd mpich - .*.cluster.net - LINUX - intel,intel15 - mpich2 - /work/$USER/CESM2 - /users/home/dp16116/CESM2/inputdata - $DIN_LOC_ROOT/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/ccsm_baselines - /users/home/dp16116/CESM2/cesm2.0.1/cime/tools/cprnc/cprnc - /usr/lib64/perl5:/usr/share/perl5 - 8 - lsf - - 30 - 15 - FALSE - - mpirun_Impi5 - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - ANACONDA2/python2.7 - INTEL/intel_xe_2015.3.187 - SZIP/szip-2.1_int15 - - - ESMF/esmf-6.3.0rp1-intelmpi-64-g_int15 - - - ESMF/esmf-6.3.0rp1-intelmpi-64-O_int15 - - - ESMF/esmf-6.3.0rp1-mpiuni-64-g_int15 - - - ESMF/esmf-6.3.0rp1-mpiuni-64-O_int15 - - - HDF5/hdf5-1.8.15-patch1 - NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1 - - - HDF5/hdf5-1.8.15-patch1_parallel - NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1_parallel - PARALLEL_NETCDF/parallel-netcdf-1.6.1 - - - CMAKE/cmake-3.3.0-rc1 - - - INTEL/intel_xe_2013.5.192 - INTEL/intel_xe_2013 - HDF5/hdf5-1.8.10-patch1 - INTEL/intel_xe_2015.3.187 - - - - 256M - - - gpfs - on - snb - lsf - 1 - on - on - /users/home/models/nemo/xios-cmip6/intel_xe_2013 - - - - - ORNL XE6, os is CNL, 32 pes/node, batch system is PBS - h2o - CNL - intel,pgi,cray,gnu - mpich - banu - /scratch/sciteam/$USER - $ENV{CESMDATAROOT}/inputdata - $ENV{CESMDATAROOT}/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/ccsm_baselines - $ENV{CESMDATAROOT}/tools/cprnc - 8 - pbs - cseg - 32 - 16 - TRUE - - aprun - - -n {{ total_tasks }} - - -N $MAX_MPITASKS_PER_NODE - -d $ENV{OMP_NUM_THREADS} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/3.2.10.3/bin/modulecmd perl - /opt/modules/3.2.10.3/bin/modulecmd python - module - module - - PrgEnv-pgi - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - pgi - cray - intel - cray-netcdf - gcc - - - PrgEnv-intel - intel - intel/18.0.3.222 - - gcc - - - PrgEnv-pgi - pgi pgi/18.7.0 - - - PrgEnv-gnu - gcc gcc/6.3.0 - - - PrgEnv-cray - cce cce/8.5.8 - - - papi/5.5.1.1 - cray-mpich cray-mpich/7.7.1 - cray-libsci cray-libsci/18.04.1 - torque/6.0.4 - - - cray-hdf5-parallel/1.10.2.0 - cray-netcdf-hdf5parallel/4.6.1.0 - cray-parallel-netcdf/1.8.1.3 - - - cray-netcdf/4.6.1.0 - - - cmake/3.1.3 - darshan - /sw/modulefiles/CESM - CESM-ENV - - - - 64M - $ENV{HOME}/bin:$ENV{PATH} - - - - - - Example port to centos7 linux system with gcc, netcdf, pnetcdf and mpich - using modules from http://www.admin-magazine.com/HPC/Articles/Environment-Modules - - regex.expression.matching.your.machine - LINUX - https://howto.get.out - gnu - mpich - none - - $ENV{HOME}/cesm/scratch - $ENV{HOME}/cesm/inputdata - $ENV{HOME}/cesm/inputdata/lmwg - $ENV{HOME}/cesm/archive/$CASE - $ENV{HOME}/cesm/cesm_baselines - $ENV{HOME}/cesm/tools/cime/tools/cprnc/cprnc - make - 8 - none - me@my.address - 8 - 8 - FALSE - - mpiexec - - -np {{ total_tasks }} - - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - compiler/gnu/8.2.0 - mpi/3.3/gcc-8.2.0 - tool/netcdf/4.6.1/gcc-8.1.0 - - - - 256M - - - -1 - - - - - NCAR SGI platform, os is Linux, 36 pes/node, batch system is PBS - .*.?cheyenne\d?.ucar.edu - - MPT: Launcher network accept (MPI_LAUNCH_TIMEOUT) timed out - 10 - LINUX - intel,gnu,pgi - mpt,openmpi - openmpi,mpt - openmpi - /glade/scratch/$USER - $ENV{CESMDATAROOT}/inputdata - /glade/p/cgd/tss/CTSM_datm_forcing_data - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/cesm_baselines - $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc.cheyenne - 8 - pbs - cseg - - 36 - 36 - TRUE - - mpiexec_mpt - - -p "%g:" - -np {{ total_tasks }} - - omplace -tm open64 - - - - mpirun `hostname` - - -np {{ total_tasks }} - - omplace -tm open64 - - - - mpirun - - -np {{ total_tasks }} - - - - - /opt/sgi/mpt/mpt-2.15/bin/mpirun $ENV{UNIT_TEST_HOST} -np 1 - - - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/perl - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/env_modules_python.py - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/csh - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/sh - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod perl - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod python - module - module - - - ncarenv/1.2 - - - intel/19.0.2 - esmf_libs - mkl - - - esmf-7.1.0r-defio-mpi-g - - - esmf-7.1.0r-defio-mpi-O - - - esmf-7.1.0r-ncdfio-uni-g - - - esmf-7.1.0r-ncdfio-uni-O - - - pgi/19.3 - - - gnu/7.3.0 - openblas/0.2.20 - - - mpt/2.16 - netcdf-mpi/4.6.1 - - - mpt/2.19 - netcdf-mpi/4.6.1 - pnetcdf/1.11.0 - - - mpt/2.19 - netcdf-mpi/4.6.3 - pnetcdf/1.11.1 - - - openmpi/3.1.4 - netcdf/4.6.3 - - - openmpi/3.0.1 - netcdf/4.6.1 - - - ncarcompilers/0.5.0 - - - netcdf/4.6.1 - - - netcdf/4.6.1 - - - netcdf/4.6.3 - - - - 256M - /glade/scratch/$USER - 16 - 1 - - - - /glade/work/dunlap/ESMF-INSTALL/8.0.0bs38/lib/libO/Linux.intel.64.mpt.default/esmf.mk - - - /glade/work/dunlap/ESMF-INSTALL/8.0.0bs38/lib/libg/Linux.intel.64.mpt.default/esmf.mk - - - ON - SUMMARY - /glade/work/turuncu/FV3GFS/benchmark-inputs/2012010100/gfs/fcst - /glade/work/turuncu/FV3GFS/fix_am - /glade/work/turuncu/FV3GFS/addon - - - false - - - /glade/scratch/$USER - false - - - -1 - - - - - - Portland State University Coeus Cluster Dec 2019 CentOS 7 - - (login[1,2].cluster|compute[0-9]*.cluster) - LINUX - gnu - mvapich2 - none - $ENV{CESMDATAROOT}/$USER - $ENV{CESMDATAROOT}/inputdata - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/cesm_baselines - /vol/apps/hpc/src/cesm-2.1.0/cime/tools/cprnc/cprnc - make - 8 - slurm - oit-rc-groups@pdx.edu - 40 - 20 - FALSE - - srun - - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - - - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - gcc-6.3.0 - mvapich2-2.2-psm/gcc-6.3.0 - General/netcdf/4.4.1.1/gcc-6.3.0 - Python/2.7.13/gcc-6.3.0 - - - - 256M - /vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0/ - - - -1 - - - - - PNL Haswell cluster, OS is Linux, batch system is SLURM - LINUX - intel,pgi - mvapich2,openmpi,intelmpi,mvapich - /pic/scratch/$USER - /pic/scratch/tcraig/IRESM/inputdata - /pic/scratch/tcraig/IRESM/inputdata/atm/datm7 - /pic/scratch/$USER/cases/archive/$CASE - /pic/scratch/tcraig/IRESM/ccsm_baselines - /people/tcraig/bin/cprnc - 8 - slurm - tcraig -at- ucar.edu - 24 - 24 - FALSE - - srun - - --mpi=none - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - srun - - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - mpirun - - -n {{ total_tasks }} - - - - mpirun - - -n {{ total_tasks }} - - - - /share/apps/modules/Modules/3.2.10/init/perl.pm - /etc/profile.d/modules.csh - /etc/profile.d/modules.sh - /share/apps/modules/Modules/3.2.10/bin/modulecmd perl - module - module - - - - - perl/5.20.0 - cmake/2.8.12 - - - intel/15.0.1 - netcdf/4.3.2 - mkl/15.0.1 - - - pgi/14.10 - netcdf/4.3.2 - - - mvapich2/2.1 - - - mvapich2/2.1 - - - intelmpi/5.0.1.035 - - - openmpi/1.8.3 - - - - 64M - - - $MLIB_LIB - /share/apps/netcdf/4.3.2/intel/15.0.1 - - - /share/apps/netcdf/4.3.2/pgi/14.10 - - - - - - - NERSC XC40 Haswell, os is CNL, 32 pes/node, batch system is Slurm - cori - CNL - intel,gnu,cray - mpt - $ENV{SCRATCH} - /project/projectdirs/ccsm1/inputdata - /project/projectdirs/ccsm1/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /project/projectdirs/ccsm1/ccsm_baselines - /project/projectdirs/ccsm1/tools/cprnc.corip1/cprnc - 8 - slurm - cseg - 64 - 32 - - srun - - --label - -n {{ total_tasks }} - -c {{ srun_binding }} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-parallel-netcdf - cray-parallel-hdf5 - pmi - cray-libsci - cray-mpich2 - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - craype-sandybridge - craype-ivybridge - craype - - - - PrgEnv-intel - intel intel/19.0.3.199 - /global/project/projectdirs/ccsm1/modulefiles/cori - - - esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-haswell - - - esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-haswell - - - - PrgEnv-cray - cce cce/8.6.5 - - - PrgEnv-gnu - gcc gcc/7.3.0 - - - cray-memkind - craype craype/2.5.18 - - - cray-libsci/19.02.1 - - - cray-mpich/7.7.8 - - - cray-hdf5/1.10.5.0 - cray-netcdf/4.6.3.0 - - - cray-netcdf-hdf5parallel/4.6.3.0 - cray-hdf5-parallel/1.10.5.0 - cray-parallel-netcdf/1.11.1.0 - - - cmake/3.14.4 - - - - 256M - spread - threads - - - - - - - NERSC XC* KNL, os is CNL, 68 pes/node, batch system is Slurm - CNL - intel,gnu,cray - mpt - $ENV{SCRATCH} - /project/projectdirs/ccsm1/inputdata - /project/projectdirs/ccsm1/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /project/projectdirs/ccsm1/ccsm_baselines - /project/projectdirs/ccsm1/tools/cprnc.corip1/cprnc - 8 - slurm - cseg - 256 - 64 - 68 - - srun - - --label - -n {{ total_tasks }} - -c {{ srun_binding }} --cpu_bind=cores - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - craype-mic-knl - craype-haswell - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-parallel-netcdf - cray-parallel-hdf5 - pmi - cray-libsci - cray-mpich2 - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - - - - PrgEnv-intel - intel intel/19.0.3.199 - /global/project/projectdirs/ccsm1/modulefiles/cori - - - esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-knl - - - esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-knl - - - - PrgEnv-cray - cce cce/8.6.5 - - - PrgEnv-gnu - gcc gcc/7.3.0 - - - cray-memkind - craype craype/2.5.18 - craype-mic-knl - - - cray-libsci/19.02.1 - - - cray-mpich/7.7.8 - - - cray-hdf5/1.10.5.0 - cray-netcdf/4.6.3.0 - - - cray-netcdf-hdf5parallel/4.6.3.0 - cray-hdf5-parallel/1.10.5.0 - cray-parallel-netcdf/1.11.1.0 - - - - 256M - spread - threads - - - - - CSCS Cray XC50, os is SUSE SLES, 12 pes/node, batch system is SLURM - CNL - pgi,cray,gnu - mpich - /scratch/snx3000/$USER - /project/s824/cesm_inputdata - /project/s824/cesm_inputdata/atm/datm7 - /project/s824/$USER/archive/$CASE - /project/s824/ccsm_baselines - /project/s824/cesm_tools/ccsm_cprnc/cprnc - 12 - slurm - edouard.davin -at- env.ethz.ch - 12 - 12 - - srun - - -n {{ total_tasks }} - -d $ENV{OMP_NUM_THREADS} - - - - - 64M - - - - - PNL IBM Xeon cluster, os is Linux (pgi), batch system is SLURM - LINUX - pgi,intel - mvapich2,mvapich - /lustre/$USER - /lustre/tcraig/IRESM/inputdata - /lustre/tcraig/IRESM/inputdata/atm/datm7 - /lustre/$USER/archive/$CASE - /lustre/tcraig/IRESM/ccsm_baselines - /lustre/tcraig/IRESM/tools/cprnc/cprnc - 8 - slurm - tcraig -at- ucar.edu - 12 - 12 - - srun - - --ntasks={{ total_tasks }} - --cpu_bind=sockets - --cpu_bind=verbose - --kill-on-bad-exit - - - - srun - - --mpi=none - --ntasks={{ total_tasks }} - --cpu_bind=sockets - --cpu_bind=verbose - --kill-on-bad-exit - - - - /etc/profile.d/modules.perl - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /share/apps/modules/Modules/3.2.7/bin/modulecmd perl - module - module - - - perl/5.20.7 - cmake/3.0.0 - pgi/15.5 - mpi/mvapich2/1.5.1p1/pgi11.3 - netcdf/4.1.2/pgi - - - - 64M - - - - - NERSC XC30, os is CNL, 24 pes/node, batch system is SLURM - edison - CNL - intel,gnu,cray - mpt - $ENV{CSCRATCH} - /project/projectdirs/ccsm1/inputdata - /project/projectdirs/ccsm1/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /project/projectdirs/ccsm1/ccsm_baselines - /project/projectdirs/ccsm1/tools/cprnc.edison/cprnc - 8 - slurm - cseg - 48 - 24 - - srun - - --label - -n {{ total_tasks }} - -c {{ srun_binding }} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-parallel-netcdf - cray-parallel-hdf5 - pmi - cray-libsci - cray-mpich2 - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - craype-sandybridge - craype-ivybridge - craype - - - - PrgEnv-intel - intel intel/18.0.1.163 - cray-libsci - /global/project/projectdirs/ccsm1/modulefiles/edison - - - esmf/7.1.0r-defio-intel18.0.1.163-mpi-O - - - esmf/6.3.0rp1-defio-intel17.0-mpiuni-O - - - PrgEnv-cray - cce cce/8.6.5 - cray-libsci/18.03.1 - - - PrgEnv-gnu - gcc gcc/7.3.0 - cray-libsci/18.03.1 - - - papi/5.5.1.4 - craype craype/2.5.14 - craype-ivybridge - - - cray-mpich/7.7.0 - - - cray-hdf5/1.10.1.1 - cray-netcdf/4.4.1.1.6 - - - cray-netcdf-hdf5parallel/4.4.1.1.6 - cray-hdf5-parallel/1.10.1.1 - cray-parallel-netcdf/1.8.1.3 - - - - - 64M - spread - threads - - - - - - Euler II Linux Cluster ETH, 24 pes/node, InfiniBand, XeonE5_2680v3, batch system LSF - LINUX - intel,pgi - openmpi,mpich - /cluster/work/climate/$USER - /cluster/work/climate/cesm/inputdata - /cluster/work/climate/cesm/inputdata/atm/datm7 - /cluster/work/climate/$USER/archive/$CASE - /cluster/work/climate/cesm/ccsm_baselines - /cluster/work/climate/cesm/tools/cprnc/cprnc - 1 - lsf - urs.beyerle -at- env.ethz.ch - 24 - 24 - - mpirun - - -hostfile $ENV{PBS_JOBID} - -ppn $MAX_MPITASKS_PER_NODE - -n {{ total_tasks }} - - - - mpirun - - - - - /cluster/apps/modules/init/python.py - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /cluster/apps/modules/bin/modulecmd python - module - module - - - - - new - - - intel/2018.1 - - - netcdf/4.3.1 - - - pgi/14.1 - - - mvapich2/1.8.1 - - - open_mpi/1.6.5 - - - - 64M - - - - - Euler III Linux Cluster ETH, 4 pes/node, Ethernet, XeonE3_1585Lv5, batch system LSF - LINUX - intel,pgi - openmpi,mpich - /cluster/work/climate/$USER - /cluster/work/climate/cesm/inputdata - /cluster/work/climate/cesm/inputdata/atm/datm7 - /cluster/work/climate/$USER/archive/$CASE - /cluster/work/climate/cesm/ccsm_baselines - /cluster/work/climate/cesm/tools/cprnc/cprnc - 1 - lsf - urs.beyerle -at- env.ethz.ch - 4 - 4 - - mpirun - - -hostfile $ENV{PBS_JOBID} - -ppn $MAX_MPITASKS_PER_NODE - -n {{ total_tasks }} - - - - mpirun - - - - - /cluster/apps/modules/init/python.py - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /cluster/apps/modules/bin/modulecmd python - module - module - - - - - new - - - interconnect/ethernet - - - intel/2018.1 - - - netcdf/4.3.1 - - - pgi/14.1 - - - mvapich2/1.8.1 - - - open_mpi/1.6.5 - - - - 64M - - - - - Euler IV Linux Cluster ETH, 36 pes/node, InfiniBand, XeonGold_6150, batch system LSF - LINUX - intel,pgi - openmpi,mpich - /cluster/work/climate/$USER - /cluster/work/climate/cesm/inputdata - /cluster/work/climate/cesm/inputdata/atm/datm7 - /cluster/work/climate/$USER/archive/$CASE - /cluster/work/climate/cesm/ccsm_baselines - /cluster/work/climate/cesm/tools/cprnc/cprnc - 1 - lsf - urs.beyerle -at- env.ethz.ch - 36 - 36 - - mpirun - - -hostfile $ENV{PBS_JOBID} - -ppn $MAX_MPITASKS_PER_NODE - -n {{ total_tasks }} - - - - mpirun - - - - - /cluster/apps/modules/init/python.py - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /cluster/apps/modules/bin/modulecmd python - module - module - - - - - new - - - intel/2018.1 - - - - 64M - - - - - NOAA XE6, os is CNL, 24 pes/node, batch system is PBS - CNL - pgi - mpich - /lustre/fs/scratch/Julio.T.Bacmeister - /lustre/fs/scratch/Julio.T.Bacmeister/inputdata - /lustre/fs/scratch/Julio.T.Bacmeister/inputdata - /lustre/fs/scratch/Julio.T.Bacmeister/archive/$CASE - UNSET - UNSET - 8 - pbs - julio -at- ucar.edu - 24 - 24 - - aprun - - -j {{ hyperthreading }} - -n {{ total_tasks }} - -S {{ tasks_per_numa }} - -N $MAX_MPITASKS_PER_NODE - -d $ENV{OMP_NUM_THREADS} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/csh - /opt/modules/default/init/sh - /opt/modules/default/bin/modulecmd perl - module - module - - PrgEnv-pgi - PrgEnv-cray - PrgEnv-gnu - pgi - cray - - - PrgEnv-pgi - pgi pgi/12.5.0 - - - PrgEnv-gnu - torque - - - PrgEnv-cray/4.0.36 - cce/8.0.2 - - - torque/4.1.3 - netcdf-hdf5parallel/4.2.0 - parallel-netcdf/1.2.0 - - - - 64M - 1 - - - - - NCAR CGD Linux Cluster 48 pes/node, batch system is PBS - ^h.*\.cgd\.ucar\.edu - LINUX - intel,pgi,nag,gnu - mvapich2,openmpi - /scratch/cluster/$USER - /fs/cgd/csm/inputdata - /project/tss - /scratch/cluster/$USER/archive/$CASE - /fs/cgd/csm/ccsm_baselines - /fs/cgd/csm/tools/cime/tools/cprnc/cprnc - gmake --output-sync - 4 - pbs - cseg - 48 - 48 - - mpiexec - - --machinefile $ENV{PBS_NODEFILE} - -n {{ total_tasks }} - - - - mpiexec - - -n {{ total_tasks }} - - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - compiler/intel/18.0.3 - tool/netcdf/4.6.1/intel - - - mpi/intel/mvapich2-2.3rc2-intel-18.0.3 - - - compiler/pgi/18.1 - tool/netcdf/4.6.1/pgi - - - compiler/nag/6.2 - tool/netcdf/4.6.1/nag - - - mpi/nag/mvapich2-2.3rc2 - - - mpi/nag/openmpi-3.1.0 - - - compiler/gnu/8.1.0 - tool/netcdf/4.6.1/gcc - - - mpi/gcc/openmpi-3.1.0a - - - mpi/gcc/mvapich2-2.3rc2-qlc - - - - 64M - - $ENV{PATH}:/cluster/torque/bin - /home/dunlap/ESMF-INSTALL/8.0.0bs16/lib/libg/Linux.intel.64.mvapich2.default/esmf.mk - - - -1 - - - - - - - Customize these fields as appropriate for your system, - particularly changing MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE to the - number of cores on your machine. You may also want to change - instances of '$ENV{HOME}/projects' to your desired directory - organization. You can use this in either of two ways: (1) - Without making any changes, by adding `--machine homebrew` to - create_newcase or create_test (2) Copying this into a - config_machines.xml file in your personal .cime directory and - then changing the machine name (MACH="homebrew") to - your machine name and the NODENAME_REGEX to something matching - your machine's hostname. With (2), you should not need the - `--machine` argument, because the machine should be determined - automatically. However, with (2), you will also need to copy the - homebrew-specific settings in config_compilers.xml into a - config_compilers.xml file in your personal .cime directory, again - changing the machine name (MACH="homebrew") to your machine name. - - - something.matching.your.machine.hostname - Darwin - gnu - mpich - $ENV{HOME}/projects/scratch - $ENV{HOME}/projects/cesm-inputdata - $ENV{HOME}/projects/ptclm-data - $ENV{HOME}/projects/scratch/archive/$CASE - $ENV{HOME}/projects/baselines - $CIMEROOT/tools/cprnc/build/cprnc - make - 4 - none - __YOUR_NAME_HERE__ - 8 - 4 - - mpirun - - -np {{ total_tasks }} - -prepend-rank - - - - - /usr/local - - - - - NCAR CGD Linux Cluster 48 pes/node, batch system is PBS - ^i.*\.ucar\.edu - LINUX - intel,pgi,nag,gnu - mvapich2,openmpi - /scratch/cluster/$USER - /fs/cgd/csm/inputdata - /project/tss - /scratch/cluster/$USER/archive/$CASE - /fs/cgd/csm/ccsm_baselines - /fs/cgd/csm/tools/cime/tools/cprnc/cprnc.izumi - gmake --output-sync - 4 - pbs - cseg - 48 - 48 - - mpiexec - - --machinefile $ENV{PBS_NODEFILE} - -n {{ total_tasks }} - - - - mpiexec - - -n {{ total_tasks }} - - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - compiler/intel/19.0.1 - tool/netcdf/4.6.1/intel - - - mvapich2/2.3/intel-cluster-19.0.1 - - - compiler/pgi/18.10 - tool/netcdf/4.6.1/pgi - - - compiler/nag/6.2 - tool/netcdf/4.6.1/nag - - - mvapich2/2.3/nag-6.2 - - - openmpi/4.0.0/nag-6.2 - - - compiler/gnu/8.2.0 - tool/netcdf/4.6.1/gcc - - - openmpi/4.0.0/gnu-8.2.0 - - - mvapich2/2.3/gnu-8.2.0 - - - - 64M - - - -1 - - - - - NCAR SGI test platform, os is Linux, 36 pes/node, batch system is PBS - .*.laramie.ucar.edu - LINUX - intel,gnu - mpt - /picnic/scratch/$USER - $ENV{CESMDATAROOT}/inputdata - $ENV{CESMDATAROOT}/lmwg - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/cesm_baselines - $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc - 8 - pbs - cseg - - 36 - 36 - TRUE - - mpiexec_mpt - - -p "%g:" - omplace - - - - /picnic/u/apps/la/opt/lmod/7.5.3/gnu/4.8.5/lmod/lmod/init/perl - /picnic/u/apps/la/opt/lmod/7.5.3/gnu/4.8.5/lmod/lmod/init/env_modules_python.py - /picnic/u/apps/la/opt/lmod/7.5.3/gnu/4.8.5/lmod/lmod/init/csh - /picnic/u/apps/la/opt/lmod/7.5.3/gnu/4.8.5/lmod/lmod/init/sh - /picnic/u/apps/la/opt/lmod/7.5.3/gnu/4.8.5/lmod/lmod/libexec/lmod perl - /picnic/u/apps/la/opt/lmod/7.5.3/gnu/4.8.5/lmod/lmod/libexec/lmod python - module - module - - - ncarenv/1.2 - - - intel/17.0.1 - mkl - - - gnu/6.3.0 - openblas/0.2.14 - - - mpt/2.17 - netcdf-mpi/4.4.1.1 - - - pnetcdf/1.8.1 - - - openmpi/2.1.0 - netcdf/4.4.1.1 - - - ncarcompilers/0.4.1 - - - netcdf/4.4.1.1 - - - - 256M - 16 - - - - - Lawrencium LR3 cluster at LBL, OS is Linux (intel), batch system is SLURM - LINUX - intel - openmpi - /global/scratch/$ENV{USER} - /global/scratch/$ENV{USER}/cesm_input_datasets/ - /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines - /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc - 4 - slurm - rgknox at lbl dot gov - 16 - 16 - TRUE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/Modules/init/perl.pm - /usr/Modules/python.py - module - module - /usr/Modules/bin/modulecmd perl - /usr/Modules/bin/modulecmd python - - - cmake - perl xml-libxml switch python/2.7 - - - intel/2016.4.072 - mkl - - - netcdf/4.4.1.1-intel-s - - - openmpi - netcdf/4.4.1.1-intel-p - - - - - - Lawrencium LR2 cluster at LBL, OS is Linux (intel), batch system is SLURM - LINUX - intel - openmpi - /global/scratch/$ENV{USER} - /global/scratch/$ENV{USER}/cesm_input_datasets/ - /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines - /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc - 4 - slurm - rgknox and gbisht at lbl dot gov - 12 - 12 - TRUE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/Modules/init/perl.pm - /usr/Modules/python.py - module - module - /usr/Modules/bin/modulecmd perl - /usr/Modules/bin/modulecmd python - - - cmake - perl xml-libxml switch python/2.7 - - - intel/2016.4.072 - mkl - - - netcdf/4.4.1.1-intel-s - - - openmpi - netcdf/4.4.1.1-intel-p - - - - - - Lonestar5 cluster at TACC, OS is Linux (intel), batch system is SLURM - .*ls5\.tacc\.utexas\.edu - LINUX - intel - mpich - $ENV{SCRATCH} - /work/02503/edwardsj/CESM/inputdata - /work/02503/edwardsj/CESM/inputdata/lmwg - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - /work/02503/edwardsj/CESM/cesm_baselines - /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc - 4 - slurm - cseg - 48 - 24 - FALSE - - srun - - --ntasks={{ total_tasks }} - - - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/env_modules_python.py - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module - module - - - - cmake - - - intel/18.0.2 - - - netcdf/4.6.2 - - - cray_mpich - - - pnetcdf/1.8.0 - parallel-netcdf/4.6.2 - - - - - - - Linux workstation for Jenkins testing - (melvin|watson) - LINUX - sonproxy.sandia.gov:80 - gnu - openmpi - /sems-data-store/ACME/timings - $ENV{HOME}/acme/scratch - /sems-data-store/ACME/inputdata - /sems-data-store/ACME/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /sems-data-store/ACME/baselines - /sems-data-store/ACME/cprnc/build/cprnc - make - 32 - acme_developer - none - jgfouca at sandia dot gov - 64 - 64 - - mpirun - - -np {{ total_tasks }} - --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread - - - - /usr/share/Modules/init/python.py - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/sh - /usr/share/Modules/init/csh - /usr/bin/modulecmd python - /usr/bin/modulecmd perl - module - module - - - sems-env - acme-env - sems-git - sems-python/2.7.9 - sems-cmake/2.8.12 - - - sems-gcc/5.3.0 - - - sems-intel/16.0.3 - - - sems-netcdf/4.4.1/exo - acme-pfunit/3.2.8/base - - - sems-openmpi/1.8.7 - sems-netcdf/4.4.1/exo_parallel - - - - $ENV{SEMS_NETCDF_ROOT} - 64M - spread - threads - - - $ENV{SEMS_NETCDF_ROOT} - - - - - ANL IBM BG/Q, os is BGP, 16 pes/node, batch system is cobalt - .*.fst.alcf.anl.gov - BGQ - ibm - ibm - /projects/$PROJECT/usr/$ENV{USER} - /projects/ccsm/inputdata - /projects/ccsm/inputdata/atm/datm7 - /projects/$PROJECT/usr/$USER/archive/$CASE - /projects/ccsm/ccsm_baselines/ - /projects/ccsm/tools/cprnc/cprnc - 4 - cobalt - cseg - 64 - 8 - TRUE - - /usr/bin/runjob - - --label short - - --ranks-per-node $MAX_MPITASKS_PER_NODE - - --np {{ total_tasks }} - --block $COBALT_PARTNAME --envs OMP_WAIT_POLICY=active --envs BG_SMP_FAST_WAKEUP=yes $LOCARGS - --envs BG_THREADLAYOUT=1 - --envs OMP_STACKSIZE=32M - --envs OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} - - - - /etc/profile.d/00softenv.csh - /etc/profile.d/00softenv.sh - soft - soft - - +mpiwrapper-xl - @ibm-compilers-2015-02 - +cmake - +python - - - - 10000 - FALSE - 64M - /soft/libraries/hdf5/1.8.14/cnk-xl/current - - - - - Medium sized linux cluster at BNL, torque scheduler. - LINUX - gnu - openmpi,mpi-serial - /data/$ENV{USER} - /data/Model_Data/cesm_input_datasets/ - /data/Model_Data/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines - /data/software/cesm_tools/cprnc/cprnc - 4 - pbs - rgknox at lbl dot gov and sserbin at bnl gov - 12 - 12 - 12 - FALSE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_TASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - module - module - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - - - perl/5.22.1 - libxml2/2.9.2 - maui/3.3.1 - python/2.7.13 - - - gcc/5.4.0 - gfortran/5.4.0 - hdf5/1.8.19fates - netcdf/4.4.1.1-gnu540-fates - openmpi/2.1.1-gnu540 - - - openmpi/2.1.1-gnu540 - - - - /data/software/hdf5/1.8.19fates - /data/software/netcdf/4.4.1.1-gnu540-fates - - - - - PNL cluster, os is Linux (pgi), batch system is SLURM - LINUX - pgi - mpich - /pic/scratch/$USER - /pic/scratch/tcraig/IRESM/inputdata - /pic/scratch/tcraig/IRESM/inputdata/atm/datm7 - /pic/scratch/$USER/archive/$CASE - /pic/scratch/tcraig/IRESM/ccsm_baselines - /pic/scratch/tcraig/IRESM/tools/cprnc/cprnc - 8 - slurm - tcraig -at- ucar.edu - 32 - 32 - FALSE - - mpiexec_mpt - - --mpi=none - -n={{ total_tasks }} - --kill-on-bad-exit - - - - /share/apps/modules/Modules/3.2.7/init/perl.pm - /share/apps/modules/Modules/3.2.7/init/csh - /share/apps/modules/Modules/3.2.7/init/sh - /share/apps/modules/Modules/3.2.7/bin/modulecmd perl - module - module - - - precision/i4 - pgi/11.8 - mvapich2/1.7 - netcdf/4.1.3 - - - - 64M - - - - - NASA/AMES Linux Cluster, Linux (ia64), 2.4 GHz Broadwell Intel Xeon E5-2680v4 processors, 28 pes/node (two 14-core processors) and 128 GB of memory/node, batch system is PBS - LINUX - intel - mpt - /nobackup/$USER - /nobackup/fvitt/csm/inputdata - /nobackup/fvitt/csm/inputdata/atm/datm7 - /nobackup/$USER/archive/$CASE - /nobackup/fvitt/cesm_baselines - /u/fvitt/bin/cprnc - 8 - pbs - fvitt -at- ucar.edu - 28 - 28 - TRUE - - mpiexec_mpt - - -n {{ total_tasks }} - - - - /usr/share/Modules/3.2.10/init/perl.pm - /usr/share/Modules/3.2.10/init/sh - /usr/share/Modules/3.2.10/init/csh - /usr/share/Modules/3.2.10/init/python.py - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - nas - pkgsrc - comp-intel/2018.3.222 - mpi-sgi/mpt.2.15r20 - szip/2.1.1 - hdf4/4.2.12 - hdf5/1.8.18_mpt - netcdf/4.4.1.1_mpt - - - - 1024 - 100000 - 16 - noverbose,disabled - static,balanced - 256M - - - - - NASA/AMES Linux Cluster, Linux (ia64), 2.5 GHz Haswell Intel Xeon E5-2680v3 processors, 24 pes/node (two 12-core processors) and 128 GB of memory/node, batch system is PBS - LINUX - intel - mpt - /nobackup/$USER - /nobackup/fvitt/csm/inputdata - /nobackup/fvitt/csm/inputdata/atm/datm7 - /nobackup/$USER/archive/$CASE - /nobackup/fvitt/cesm_baselines - /u/fvitt/bin/cprnc - 8 - pbs - fvitt -at- ucar.edu - 24 - 24 - TRUE - - mpiexec_mpt - - -n {{ total_tasks }} - - - - /usr/share/Modules/3.2.10/init/perl.pm - /usr/share/Modules/3.2.10/init/sh - /usr/share/Modules/3.2.10/init/csh - /usr/share/Modules/3.2.10/init/python.py - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - nas - pkgsrc - comp-intel/2018.3.222 - mpi-sgi/mpt.2.15r20 - szip/2.1.1 - hdf4/4.2.12 - hdf5/1.8.18_mpt - netcdf/4.4.1.1_mpt - - - - 1024 - 100000 - 16 - noverbose,disabled - static,balanced - 256M - - - - - NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.6 GHz Sandy Bridge processors, 16 cores/node and 32 GB of memory, batch system is PBS - LINUX - intel - mpt - /nobackup/$USER - /nobackup/fvitt/csm/inputdata - /nobackup/fvitt/csm/inputdata/atm/datm7 - /nobackup/$USER/archive/$CASE - /nobackup/fvitt/cesm_baselines - /u/fvitt/bin/cprnc - 8 - pbs - fvitt -at- ucar.edu - 16 - 16 - TRUE - - mpiexec_mpt - - -n {{ total_tasks }} - - - - /usr/share/Modules/3.2.10/init/perl.pm - /usr/share/Modules/3.2.10/init/sh - /usr/share/Modules/3.2.10/init/csh - /usr/share/Modules/3.2.10/init/python.py - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - nas - pkgsrc - comp-intel/2018.3.222 - mpi-sgi/mpt.2.15r20 - szip/2.1.1 - hdf4/4.2.12 - hdf5/1.8.18_mpt - netcdf/4.4.1.1_mpt - - - - 1024 - 100000 - 16 - noverbose,disabled - static,balanced - 256M - - - - - NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.8 GHz Ivy Bridge processors, 20 cores/node and 3.2 GB of memory per core, batch system is PBS - LINUX - intel - mpich - /nobackup/$USER - /nobackup/fvitt/csm/inputdata - /nobackup/fvitt/csm/inputdata/atm/datm7 - /nobackup/$USER/archive/$CASE - /nobackup/fvitt/cesm_baselines - /u/fvitt/bin/cprnc - 8 - pbs - fvitt -at- ucar.edu - 20 - 20 - TRUE - - mpiexec_mpt - - -n {{ total_tasks }} - - - - /usr/share/Modules/3.2.10/init/perl.pm - /usr/share/Modules/3.2.10/init/sh - /usr/share/Modules/3.2.10/init/csh - /usr/share/Modules/3.2.10/init/python.py - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - nas - pkgsrc - comp-intel/2018.3.222 - mpi-sgi/mpt.2.15r20 - szip/2.1.1 - hdf4/4.2.12 - hdf5/1.8.18_mpt - netcdf/4.4.1.1_mpt - - - - 1024 - 100000 - 16 - noverbose,disabled - static,balanced - 256M - - - - - Linux workstation at Sandia on SRN with SEMS TPL modules - (s999964|climate|penn) - LINUX - wwwproxy.sandia.gov:80 - gnu - openmpi - /sems-data-store/ACME/timings - $ENV{HOME}/acme/scratch - /sems-data-store/ACME/inputdata - /sems-data-store/ACME/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /sems-data-store/ACME/baselines - /sems-data-store/ACME/cprnc/build/cprnc - make - 32 - acme_developer - none - jgfouca at sandia dot gov - 64 - 64 - - mpirun - - -np {{ total_tasks }} - - - - /usr/share/Modules/init/python.py - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/sh - /usr/share/Modules/init/csh - /usr/bin/modulecmd python - /usr/bin/modulecmd perl - module - module - - - sems-env - sems-git - sems-python/2.7.9 - sems-gcc/5.1.0 - sems-openmpi/1.8.7 - sems-cmake/2.8.12 - sems-netcdf/4.3.2/parallel - - - - $ENV{SEMS_NETCDF_ROOT} - $ENV{SEMS_NETCDF_ROOT} - - - - - SNL clust - (skybridge|chama)-login - LINUX - wwwproxy.sandia.gov:80 - intel - openmpi - /projects/ccsm/timings - /gscratch/$USER/acme_scratch/$MACH - /projects/ccsm/inputdata - /projects/ccsm/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /projects/ccsm/ccsm_baselines - /projects/ccsm/cprnc/build/cprnc_wrap - 8 - acme_integration - slurm - jgfouca at sandia dot gov - 16 - 16 - TRUE - - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /usr/share/Modules/init/python.py - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/sh - /usr/share/Modules/init/csh - /usr/bin/modulecmd python - /usr/bin/modulecmd perl - module - module - - - sems-env - sems-git - sems-python/2.7.9 - gnu/4.9.2 - intel/intel-15.0.3.187 - libraries/intel-mkl-15.0.2.164 - libraries/intel-mkl-15.0.2.164 - - - openmpi-intel/1.8 - sems-hdf5/1.8.12/parallel - sems-netcdf/4.3.2/parallel - sems-hdf5/1.8.12/base - sems-netcdf/4.3.2/base - - - - $ENV{SEMS_NETCDF_ROOT} - 64M - - - $ENV{SEMS_NETCDF_ROOT} - - - - - Intel Xeon Platinum 8160 ("Skylake"),48 cores on two sockets (24 cores/socket) , batch system is SLURM - .*stampede2 - LINUX - intel - impi,mvapich2 - TG-ATM180016 - $ENV{SCRATCH} - /work/02503/edwardsj/CESM/inputdata - /work/02503/edwardsj/CESM/inputdata/lmwg - $ENV{WORK}/archive/$CASE - /work/02503/edwardsj/CESM/cesm_baselines - /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc - 4 - slurm - cseg - 96 - 48 - - ibrun - - -n {{ total_tasks }} - - - - ibrun - - -n {{ total_tasks }} - - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/env_modules_python.py - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module - module - - - TACC - python/2.7.13 - intel/18.0.2 - cmake/3.10.2 - - - mvapich2/2.3b - pnetcdf/1.11.0 - parallel-netcdf/4.3.3.1 - - - mvapich2 - impi/18.0.2 - pnetcdf/1.11.0 - parallel-netcdf/4.3.3.1 - - - netcdf/4.3.3.1 - - - - 256M - - - /work/01118/tg803972/stampede2/ESMF-INSTALL/8.0.0bs38/lib/libO/Linux.intel.64.intelmpi.default/esmf.mk - - - ON - SUMMARY - /work/06242/tg855414/stampede2/FV3GFS/benchmark-inputs/2012010100/gfs/fcst - /work/06242/tg855414/stampede2/FV3GFS/fix_am - /work/06242/tg855414/stampede2/FV3GFS/addon - - - - - - Intel Xeon Phi 7250 ("Knights Landing") , batch system is SLURM - LINUX - intel - impi,mvapich2 - $ENV{SCRATCH} - /work/02503/edwardsj/CESM/inputdata - /work/02503/edwardsj/CESM/inputdata/lmwg - $ENV{WORK}/archive/$CASE - /work/02503/edwardsj/CESM/cesm_baselines - /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc - 4 - slurm - cseg - 256 - 64 - - ibrun - - - ibrun - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/env_modules_python.py - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module - module - - - TACC - python/2.7.13 - intel/17.0.4 - cmake/3.10.2 - - - mvapich2/2.3b - pnetcdf/1.11.0 - parallel-netcdf/4.3.3.1 - - - mvapich2 - impi/17.0.3 - pnetcdf/1.11.0 - parallel-netcdf/4.3.3.1 - - - netcdf/4.3.3.1 - - - - 256M - - - - - theia - tfe - LINUX - intel - impi - nems - - /scratch4/NCEPDEV/nems/noscrub/$USER/cimecases - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/BASELINES - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/tools/cprnc - make - 8 - slurm - cseg - 24 - 24 - TRUE - - srun - - -n $TOTALPES - - - - - - - /apps/lmod/lmod/init/sh - /apps/lmod/lmod/init/csh - module - module - /apps/lmod/lmod/libexec/lmod python - - - intel/15.1.133 - impi/5.1.1.109 - netcdf/4.3.0 - pnetcdf - /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/soft/modulefiles - yaml-cpp - esmf/8.0.0bs29g - - - - ON - SUMMARY - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/benchmark-inputs/2012010100/gfs/fcst - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/fix_am - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/addon - - - - - ALCF Cray XC* KNL, os is CNL, 64 pes/node, batch system is cobalt - theta.* - CNL - intel,gnu,cray - mpt - CESM_Highres_Testing - /projects/CESM_Highres_Testing/cesm/scratch/$USER - /projects/CESM_Highres_Testing/cesm/inputdata - /projects/CESM_Highres_Testing/cesm/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /projects/CESM_Highres_Testing/cesm/baselines - /projects/CESM_Highres_Testing/cesm/tools/cprnc/cprnc - 8 - cobalt_theta - cseg - 64 - 64 - TRUE - - aprun - - -n {{ total_tasks }} - -N {{ tasks_per_node }} - --cc depth -d $OMP_NUM_THREADS - -e OMP_STACKSIZE=64M - -e OMP_NUM_THREADS=$OMP_NUM_THREADS - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - craype-mic-knl - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-parallel-netcdf - cray-hdf5-parallel - pmi - cray-libsci - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - craype - papi - - - - PrgEnv-intel/6.0.4 - intel intel/18.0.0.128 - cray-libsci - - - - PrgEnv-cray/6.0.4 - cce cce/8.7.0 - - - PrgEnv-gnu/6.0.4 - gcc gcc/7.3.0 - - - papi/5.6.0.1 - craype craype/2.5.14 - - - cray-libsci/18.04.1 - - - cray-mpich/7.7.0 - - - cray-netcdf-hdf5parallel/4.4.1.1.6 - cray-hdf5-parallel/1.10.1.1 - cray-parallel-netcdf/1.8.1.3 - - - - - - ${EXEROOT}/cesm.exe - >> cesm.log.$LID 2>&1 - - - diff --git a/config/cesm/machines/config_pio.xml b/config/cesm/machines/config_pio.xml deleted file mode 100644 index 40f22b32a07..00000000000 --- a/config/cesm/machines/config_pio.xml +++ /dev/null @@ -1,347 +0,0 @@ - - - - - - - - - - - $MAX_MPITASKS_PER_NODE - 60 - - - - - - - - - - - - pnetcdf - netcdf - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - netcdf - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/config/cesm/machines/config_workflow.xml b/config/cesm/machines/config_workflow.xml deleted file mode 100644 index f4bcf29e49b..00000000000 --- a/config/cesm/machines/config_workflow.xml +++ /dev/null @@ -1,138 +0,0 @@ - - - - - - - - $BUILD_COMPLETE and not $TEST - - - - $BUILD_COMPLETE and $TEST - - - - - case.run or case.test - $DOUT_S - - 1 - 1 - 0:20:00 - - - - - - - - case.st_archive - $TIMESERIES - - 72 - 9 - 0:20:00 - - - 72 - 9 - 0:20:00 - - - - - - - timeseries - 1 - - 1 - 1 - 1:00:00 - - - - - - - timeseriesL - $CASEROOT/postprocess/pp_config -value --get STANDARDIZE_TIMESERIES - - - - - timeseries or case.st_archive - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - :lnd_avg(args) -timeseries or case.st_archive - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - timeseries or case.st_archive - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - - timeseries or case.st_archive - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - atm_averages - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - lnd_averages - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - ice_averages - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - ocn_averages - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - diff --git a/config/cesm/machines/cylc_suite.rc.template b/config/cesm/machines/cylc_suite.rc.template deleted file mode 100644 index 5511f6ab2e3..00000000000 --- a/config/cesm/machines/cylc_suite.rc.template +++ /dev/null @@ -1,24 +0,0 @@ -[meta] - title = CESM CYLC workflow for {{ workflow_description }} -[cylc] - [[parameters]] - member = {{ members }} - -[scheduling] - cycling mode = integer - initial cycle point = 1 - final cycle point = {{ cycles }} - - [[dependencies]] - [[[R1]]] - graph = "set_external_workflow => run => st_archive " - [[[R/P1]]] # Integer Cycling - graph = """ - st_archive[-P1] => run - run => st_archive - """ -[runtime] - [[set_external_workflow]] - script = cd {{ case_path_string }} ./xmlchange EXTERNAL_WORKFLOW=TRUE - [[st_archive]] - script = cd {{ case_path_string }} ./case.submit --job case.st_archive; ./xmlchange CONTINUE_RUN=TRUE diff --git a/config/cesm/machines/nag_mpi_argument.txt b/config/cesm/machines/nag_mpi_argument.txt deleted file mode 100644 index 95e1380aaed..00000000000 --- a/config/cesm/machines/nag_mpi_argument.txt +++ /dev/null @@ -1,4 +0,0 @@ --wmismatch=mpi_send,mpi_recv,mpi_bsend,mpi_ssend,mpi_rsend,mpi_buffer_attach,mpi_buffer_detach,mpi_isend,mpi_ibsend,mpi_issend,mpi_irsend,mpi_irecv,mpi_mrecv,mpi_imrecv,mpi_send_init,mpi_bsend_init,mpi_ssend_init,mpi_rsend_init,mpi_recv_init,mpi_sendrecv,mpi_sendrecv_replace,mpi_get_address,mpi_pack,mpi_unpack,mpi_pack_external,mpi_unpack_external,mpi_bcast,mpi_gather,mpi_gatherv,mpi_scatter,mpi_scatterv,mpi_allgather,mpi_allgatherv,mpi_alltoall,mpi_alltoallv,mpi_alltoallw,mpi_reduce,mpi_allreduce,mpi_reduce_local,mpi_reduce_scatter_block,mpi_reduce_scatter,mpi_scan,mpi_exscan,mpi_ibcast,mpi_igather,mpi_igatherv,mpi_iscatter,mpi_iscatterv,mpi_iallgather,mpi_iallgatherv,mpi_ialltoall,mpi_ialltoallv,mpi_ialltoallw,mpi_ireduce,mpi_iallreduce,mpi_ireduce_scatter_block,mpi_ireduce_scatter,mpi_iscan,mpi_iexscan,mpi_neighbor_allgather,mpi_neighbor_allgatherv,mpi_neighbor_alltoall,mpi_neighbor_alltoallv,mpi_neighbor_alltoallw,mpi_ineighbor_allgather,mpi_ineighbor_allgatherv --wmismatch=mpi_ineighbor_alltoall,mpi_ineighbor_alltoallv,mpi_ineighbor_alltoallw,mpi_free_mem,mpi_win_create,mpi_win_attach,mpi_win_detach,mpi_win_allocate,mpi_win_shared_query,mpi_put,mpi_get,mpi_accumulate,mpi_get_accumulate,mpi_fetch_and_op,mpi_compare_and_swap,mpi_rput,mpi_rget,mpi_raccumulate,mpi_rget_accumulate,mpi_file_read_at,mpi_file_read_at_all,mpi_file_write_at,mpi_file_write_at_all,mpi_file_iread_at,mpi_file_iread_at_all,mpi_file_iwrite_at,mpi_file_iwrite_at_all,mpi_file_read,mpi_file_read_all,mpi_file_write,mpi_file_write_all,mpi_file_iread,mpi_file_iread_all,mpi_file_iwrite,mpi_file_iwrite_all,mpi_file_read_shared,mpi_file_write_shared,mpi_file_iread_shared,mpi_file_iwrite_shared,mpi_file_read_ordered,mpi_file_write_ordered,mpi_file_read_at_all_begin,mpi_file_read_at_all_end,mpi_file_write_at_all_begin,mpi_file_write_at_all_end,mpi_file_read_all_begin,mpi_file_read_all_end,mpi_file_write_all_begin,mpi_file_write_all_end,mpi_read_ordered_begin,mpi_read_ordered_end --wmismatch=mpi_write_ordered_begin,mpi_write_ordered_end,mpi_f_sync_reg,mpi_sizeof,mpibcast,mpiscatterv --wmismatch=mpi_startall,mpi_waitall diff --git a/config/cesm/machines/template.case.run b/config/cesm/machines/template.case.run deleted file mode 100755 index 9f035f0ce76..00000000000 --- a/config/cesm/machines/template.case.run +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python -# Batch system directives -{{ batchdirectives }} - -""" -template to create a case run script. This should only ever be called -by case.submit when on batch system. This script only exists as a way of providing -batch directives. Use case.submit from the command line to run your case. - -DO NOT RUN THIS SCRIPT MANUALLY -""" - -import os, sys -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * - -from CIME.case import Case - -logger = logging.getLogger(__name__) - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", - help="Case directory to build") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run") - - parser.add_argument("--completion-sets-continue-run", action="store_true", - help="This is used to ensure CONTINUE_RUN is cleared for an initial run, " - "but set for subsequent runs.") - - parser.add_argument("--resubmit", default=False, action="store_true", - help="If RESUBMIT is set, this performs the resubmissions.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - if args.skip_preview_namelist is None: - args.skip_preview_namelist = False - - return args.caseroot, args.skip_preview_namelist, args.completion_sets_continue_run, args.resubmit - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - - caseroot, skip_pnl, set_continue_run, resubmit = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_run(skip_pnl=skip_pnl, set_continue_run=set_continue_run, submit_resubmits=resubmit) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/config/cesm/machines/template.case.test b/config/cesm/machines/template.case.test deleted file mode 100755 index 0bb8a49506c..00000000000 --- a/config/cesm/machines/template.case.test +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -{{ batchdirectives }} -""" -This is the system test submit script for CIME. This should only ever be called -by case.submit when on batch system. This script only exists as a way of providing -batch directives. Use case.submit from the command line to run your case. - -DO NOT RUN THIS SCRIPT MANUALLY -""" -import os, sys -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * - -from CIME.case import Case - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("testname", nargs="?",default=None, - help="Name of the test to run, default is set in TESTCASE in env_test.xml") - - parser.add_argument("--caseroot", - help="Case directory to build") - - parser.add_argument("--reset", action="store_true", - help="Reset the case to its original state as defined by config_tests.xml") - - parser.add_argument("--resubmit", action="store_true", - help="Ignored in tests, but needed for all templates") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - return args.caseroot, args.testname, args.reset, args.skip_preview_namelist - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - - caseroot, testname, reset, skip_pnl = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_test(testname=testname, reset=reset, skip_pnl=skip_pnl) - - sys.exit(0 if success else 1) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/config/cesm/machines/template.st_archive b/config/cesm/machines/template.st_archive deleted file mode 100755 index 3f010ef1270..00000000000 --- a/config/cesm/machines/template.st_archive +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python -# Batch system directives -{{ batchdirectives }} - -""" -Performs short term archiving for restart files, history and rpointer -files in the $RUNDIR associated with $CASEROOT. Normally this script -is called by case.submit on batch systems. - -""" - -import sys, os, time -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * -from CIME.case import Case - -logger = logging.getLogger(__name__) - - -############################################################################### -def parse_command_line(args, description): -############################################################################### - - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to build") - - parser.add_argument("--no-incomplete-logs", default=False, action="store_true", - help="Whether to archive logs which have been completed or not") - - parser.add_argument("--copy-only", default=False, action="store_true", - help="Copy instead of move the files to be archived") - - parser.add_argument("--last-date", default=None, - help="WARNING: This option with --force-move may corrupt your run directory! Use at your own risk! " - "Last simulation date to archive, specified as 'Year-Month-Day'. " - "Year must be specified with 4 digits, while month and day can be specified without zero padding. " - "'0003-11-4' would archive at most files for the simulated year 3, month 11, day 4." - "This option implies --copy-only unless --force-move is specified ") - - parser.add_argument("--force-move", default=False, action="store_true", - help="Move the files even if it's unsafe to do so, dangerous if used with --copy-only.") - - parser.add_argument("--test-all", default=False, action="store_true", - help="Run tests of st_archiver functionality on config_arvchive.xml") - - parser.add_argument("--test-case", default=False, action="store_true", - help="Run tests of st_archiver functionality on env_arvchive.xml") - - parser.add_argument("--resubmit", default=False, action="store_true", - help="If RESUBMIT is set, this performs the resubmissions." - "This is primarily meant for use by case.submit") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - if args.last_date is not None and args.force_move is False: - args.copy_only = True - - if args.force_move is True: - args.copy_only = False - - return (args.caseroot, args.last_date, args.no_incomplete_logs, args.copy_only, - args.test_all, args.test_case, args.resubmit) - - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - caseroot, last_date, no_incomplete_logs, copy_only, testall, testcase, resubmit = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - if testall: - success = case.test_st_archive() - elif testcase: - success = case.test_env_archive() - else: - success = case.case_st_archive(last_date_str=last_date, - archive_incomplete_logs=not no_incomplete_logs, - copy_only=copy_only, resubmit=resubmit) - - sys.exit(0 if success else 1) - -############################################################################### - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/config/cesm/machines/userdefined_laptop_template/README.md b/config/cesm/machines/userdefined_laptop_template/README.md deleted file mode 100644 index 1a0e03b663f..00000000000 --- a/config/cesm/machines/userdefined_laptop_template/README.md +++ /dev/null @@ -1,131 +0,0 @@ -Building CIME on an UNSUPPORTED local machine ---------------------------------------------- - -These directions are for a Mac OS X 10.9 or 10.10 laptop using -homebrew or macports to install the required software. The procedure -is similar for a linux workstation or cluster, you will just use -different package management tools to install the third party -libraries. - -Setup -===== - - - install xcode, including the command line tools. Failure to - install the command line tools is the most likely cause if you - get an error about the compilers not being able to create - executables. - - - install third party libraries from homebrew or macports. - - - home brew - - Install science tap : - - brew install gcc --without-multilib cmake mpich hdf5 --enable-fortran netcdf --enable-fortran - - - - macports - - sudo port install mpich +gcc48 hdf5-18 +mpich netcdf-fortran +gcc48 +mpich cmake - - Note: If you see an error while running create_newcase that - indicates perl can't find XML::LibXML, you may need to install - p5-xml-libxml as well. - - - - Some of the shell scripts used by cesm hard code "gmake" instead - of using the GMAKE variable from env_build.xml. To work around - this, you should install gnu make, or simply create a link from - make to gmake in you path. - - mkdir -p ${HOME}/local/bin - ln -s `whereis make` ${HOME}/local/bin/gmake - cat >> ${HOME}/.bashrc < - - - - - - -DFORTRANUNDERSCORE -DNO_R16 - - - -fopenmp - - - /usr/local/bin/gfortran - /usr/bin/cc - /usr/bin/c++ - /usr/local/bin/mpif90 - /usr/local/bin/mpicc - /usr/local/bin/mpicxx - FORTRAN - TRUE - /usr/local - - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - - - - - diff --git a/config/cesm/machines/userdefined_laptop_template/config_machines.xml b/config/cesm/machines/userdefined_laptop_template/config_machines.xml deleted file mode 100644 index 2d1005c5d0d..00000000000 --- a/config/cesm/machines/userdefined_laptop_template/config_machines.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - __USEFUL_DESCRIPTION__ - something.matching.your.machine.hostname - Darwin - gnu - mpich - $ENV{HOME}/projects/scratch - $ENV{HOME}/projects/cesm-inputdata - $ENV{HOME}/projects/ptclm-data - $ENV{HOME}/projects/scratch/archive/$CASE - $ENV{HOME}/projects/baselines - $CIMEROOT/tools/cprnc/build/cprnc - 4 - none - __YOUR_NAME_HERE__ - 4 - 4 - - mpiexec_mpt - - -np $TOTALPES - --prepend-rank - - - - - - diff --git a/config/cesm/machines/userdefined_laptop_template/config_pes.xml b/config/cesm/machines/userdefined_laptop_template/config_pes.xml deleted file mode 100644 index 0464137703c..00000000000 --- a/config/cesm/machines/userdefined_laptop_template/config_pes.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - $MAX_TASKS_PER_NODE 1 0 - - - - 1 1 0 - - - - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - - - 2 1 - 2 - - - - diff --git a/config/e3sm/allactive/config_compsets.xml b/config/e3sm/allactive/config_compsets.xml deleted file mode 100644 index 118beecef09..00000000000 --- a/config/e3sm/allactive/config_compsets.xml +++ /dev/null @@ -1,327 +0,0 @@ - - - - - - - ========================================= - compset naming convention - ========================================= - The compset longname below has the specified order - atm, lnd, ice, ocn, river, glc wave model-options - - The notation for the compset longname is - TIME_ATM[%phys]_LND[%phys]_ICE[%phys]_OCN[%phys]_ROF[%phys]_GLC[%phys]_WAV[%phys][_ESP%phys][_BGC%phys] - Where for the CAM specific compsets below the following is supported - TIME = Time period (e.g. 2000, HIST, RCP8...) - ATM = [CAM4, CAM5, SATM] - LND = [CLM45, SLND] - ICE = [MPASSI, CICE, DICE, SICE] - OCN = [MPASO, DOCN, SOCN] - ROF = [MOSART, SROF] - GLC = [MALI, SGLC] - WAV = [DWAV, XWAV, SWAV] - ESP = [SESP] - BGC = optional BGC scenario - - The OPTIONAL %phys attributes specify submodes of the given system - For example DOCN%DOM is the data ocean model for DOCN - ALL data models must have a %phys option that corresponds to the data model mode - - Each compset node is associated with the following elements - - lname - - alias - - support (optional description of the support level for this compset) - Each compset node can also have the following attributes - - grid (optional regular expression match for grid to work with the compset) - - - - - - - A_WCYCL1850S_CMIP6 - 1850_CAM5%CMIP6_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_WCYCL20TRS_CMIP6 - 20TR_CAM5%CMIP6_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_WCYCL1950S_CMIP6_LR - 1950_CAM5%CMIP6-LR_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_WCYCL1950S_CMIP6_HR - 1950_CAM5%CMIP6-HR_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_WCYCL1950S_CMIP6_LRtunedHR - 1950_CAM5%CMIP6-LRtunedHR_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - - - A_WCYCL2000 - 2000_CAM5%AV1C-L_CLM45%SPBC_MPASSI_MPASO_MOSART_SGLC_SWAV - - - - A_WCYCL2000S - 2000_CAM5%AV1C-L_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_WCYCL1850 - 1850_CAM5%AV1C-L_CLM45%SPBC_MPASSI_MPASO_MOSART_SGLC_SWAV - - - - A_WCYCL1850S - 1850_CAM5%AV1C-L_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_WCYCL20TR - 20TR_CAM5%AV1C-L_CLM45%SPBC_MPASSI_MPASO_MOSART_SGLC_SWAV - - - - A_WCYCL20TRS - 20TR_CAM5%AV1C-L_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_CRYO - 2000_CAM5%AV1C-L_CLM45%SPBC_MPASSI_MPASO_MOSART_MALI_SWAV - - - - - - A_WCYCL2000_H01A - 2000_CAM5%AV1C-H01A_CLM45%SPBC_MPASSI_MPASO_MOSART_SGLC_SWAV - - - - A_WCYCL2000_H01AS - 2000_CAM5%AV1C-H01A_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_WCYCL1850_H01A - 1850_CAM5%AV1C-H01A_CLM45%SPBC_MPASSI_MPASO_MOSART_SGLC_SWAV - - - - A_WCYCL1850_H01AS - 1850_CAM5%AV1C-H01A_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - - A_WCYCL20TR_H01A - 20TR_CAM5%AV1C-H01A_CLM45%SPBC_MPASSI_MPASO_MOSART_SGLC_SWAV - - - - A_WCYCL20TR_H01AS - 20TR_CAM5%AV1C-H01A_CLM45%SPBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - - - BGCEXP_BCRC_CNPRDCTC_1850 - 1850_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BCRC - - - - BGCEXP_BCRC_CNPRDCTC_1850S - 1850_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV_BGC%BCRC - - - - BGCEXP_BCRC_CNPRDCTC_20TR - 20TR_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BCRC - - - - BGCEXP_BCRC_CNPRDCTC_20TRS - 20TR_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV_BGC%BCRC - - - - BGCEXP_BCRD_CNPRDCTC_20TR - 20TR_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BCRD - - - - BGCEXP_BCRD_CNPRDCTC_20TRS - 20TR_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV_BGC%BCRD - - - - BGCEXP_BDRC_CNPRDCTC_20TR - 20TR_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BDRC - - - - BGCEXP_BDRC_CNPRDCTC_20TRS - 20TR_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV_BGC%BDRC - - - - BGCEXP_BDRD_CNPRDCTC_20TR - 20TR_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BDRD - - - - BGCEXP_BDRD_CNPRDCTC_20TRS - 20TR_CAM5%CMIP6_CLM45%CNPRDCTCBC_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV_BGC%BDRD - - - - BGCEXP_BCRC_CNPECACNT_1850 - 1850_CAM5%CMIP6_CLM45%CNPECACNTBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BCRC - - - - BGCEXP_BCRC_CNPECACNT_20TR - 20TR_CAM5%CMIP6_CLM45%CNPECACNTBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BCRC - - - - BGCEXP_BCRD_CNPECACNT_20TR - 20TR_CAM5%CMIP6_CLM45%CNPECACNTBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BCRD - - - - BGCEXP_BDRC_CNPECACNT_20TR - 20TR_CAM5%CMIP6_CLM45%CNPECACNTBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BDRC - - - - BGCEXP_BDRD_CNPECACNT_20TR - 20TR_CAM5%CMIP6_CLM45%CNPECACNTBC_MPASSI%BGC_MPASO%OIECOOIDMS_MOSART_SGLC_SWAV_BGC%BDRD - - - - - - A_WCYCL1850-DIB - 1850_CAM5%AV1C-L_CLM45%SPBC_MPASSI%DIB_MPASO%IB_MOSART_SGLC_SWAV - - - - A_WCYCL1850-DIB-ISMF - 1850_CAM5%AV1C-L_CLM45%SPBC_MPASSI%DIB_MPASO%IBISMF_MOSART_SGLC_SWAV - - - - A_WCYCL1850-DIB_CMIP6 - 1850_CAM5%CMIP6_CLM45%SPBC_MPASSI%DIB_MPASO%IB_MOSART_SGLC_SWAV - - - - A_WCYCL1850-DIB-ISMF_CMIP6 - 1850_CAM5%CMIP6_CLM45%SPBC_MPASSI%DIB_MPASO%IBISMF_MOSART_SGLC_SWAV - - - - - A_WCYCL1850_v0atm - 1850_CAM5_CLM45%SP_MPASSI_MPASO_MOSART_SGLC_SWAV - - - - A_WCYCL2000_v0atm - 2000_CAM5_CLM45%SP_MPASSI_MPASO_MOSART_SGLC_SWAVi - - - - A_WCYCL1850S_v0atm - 1850_CAM5_CLM45%SP_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAV - - - - A_WCYCL2000S_v0atm - 2000_CAM5_CLM45%SP_MPASSI%SPUNUP_MPASO%SPUNUP_MOSART_SGLC_SWAVi - - - - - - - A_BG1850CN - 1850_CAM5_CLM45%CN_MPASSI_MPASO_MOSART_MALI%SIA_SWAV - - - - - MPAS_LISIO_TEST - 2000_DATM%NYF_SLND_MPASSI_MPASO_DROF%NYF_MALI%SIA_SWAV - - - - - ETEST - 2000_CAM4_CLM40%SP_CICE_DOCN%SOM_MOSART_SGLC_SWAV_TEST - - - - E1850C5 - 1850_CAM5_CLM40%SP_CICE_DOCN%SOM_MOSART_SGLC_SWAV - - - - E1850C5TEST - 1850_CAM5_CLM40%SP_CICE_DOCN%SOM_MOSART_SGLC_SWAV_TEST - - - - E1850CN - 1850_CAM4_CLM40%CN_CICE_DOCN%SOM_MOSART_SGLC_SWAV - - - - E1850C5CN - 1850_CAM5_CLM40%CN_CICE_DOCN%SOM_MOSART_SGLC_SWAV - - - - E1850C5CNTEST - 1850_CAM5_CLM40%CN_CICE_DOCN%SOM_MOSART_SGLC_SWAV_TEST - - - - E1850C5CLM45 - 1850_CAM5_CLM45%SP_CICE_DOCN%SOM_MOSART_SGLC_SWAV - - - - E1850C5CLM45CN - 1850_CAM5_CLM45%CN_CICE_DOCN%SOM_MOSART_SGLC_SWAV - - - - E1850C5CLM45BGC - 1850_CAM5_CLM45%BGC_CICE_DOCN%SOM_MOSART_SGLC_SWAV - - - - E1850C5CNECACTCBC - 1850_CAM5_CLM45%CNECACTCBC_CICE_DOCN%SOM_MOSART_SGLC_SWAV - - - - E20TRC5CNECACTCBC - 20TR_CAM5_CLM45%CNECACTCBC_CICE_DOCN%SOM_MOSART_SGLC_SWAV - - - diff --git a/config/e3sm/allactive/config_pesall.xml b/config/e3sm/allactive/config_pesall.xml deleted file mode 100644 index 17fb664c6fe..00000000000 --- a/config/e3sm/allactive/config_pesall.xml +++ /dev/null @@ -1,8793 +0,0 @@ - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - -1 - -1 - -1 - -1 - -1 - -1 - -1 - -1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 8 - 8 - 8 - 8 - 8 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 4800 - 4800 - 4800 - 4800 - 4800 - 4800 - 4800 - 4800 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - cori-knl, generic ne120, 338 nodes, 64x4 - 64 - 256 - - 21600 - 21600 - 21600 - 19200 - 19200 - 64 - 64 - 21600 - - - 4 - 4 - 1 - 2 - 2 - 1 - 1 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - cori-knl, generic ne120, 169 nodes, 64x4 - 64 - 256 - - 10800 - 10800 - 10800 - 9600 - 9600 - 64 - 64 - 10800 - - - 4 - 4 - 4 - 2 - 2 - 1 - 1 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - cori-knl, generic ne120, 85 nodes, 64x4 - 64 - 256 - - 5400 - 5400 - 5400 - 4800 - 4800 - 64 - 64 - 5400 - - - 4 - 4 - 4 - 2 - 2 - 1 - 1 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1024 - 1024 - 1024 - 1024 - 1024 - 1024 - 1024 - 1024 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 4800 - 4800 - 4800 - 4800 - 4800 - 4800 - 4800 - 4800 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 3200 - 1600 - 3200 - 1600 - 32 - 3200 - 3200 - 3200 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 1600 - 3200 - 0 - 0 - 0 - - - - - - - - none - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 48 - 48 - 48 - 48 - 48 - 48 - 48 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 192 - 192 - 192 - 192 - 192 - 192 - 192 - 192 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - 169 nodes, 19 sypd - - 5400 - 5400 - 5400 - 5400 - 5400 - 5400 - 5400 - 5400 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - 85 nodes - - 2700 - 2700 - 2700 - 2700 - 2700 - 2700 - 2700 - 2700 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - 9 nodes - - 270 - 270 - 270 - 270 - 270 - 270 - 270 - 270 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - cori-knl, generic ne30, 85 nodes, 64x1 - 64 - 64 - - 5440 - 5440 - 5440 - 4800 - 4800 - 64 - 64 - 5440 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - - cori-knl, generic ne30, 43 nodes, 32x4 - 32 - 128 - - 1350 - 1350 - 1350 - 1200 - 1200 - 32 - 32 - 1350 - - - 4 - 4 - 1 - 1 - 1 - 1 - 1 - 4 - - - - cori-knl, generic ne30, 22 nodes, 32x4 - 32 - 128 - - 704 - 704 - 704 - 480 - 480 - 32 - 32 - 704 - - - 4 - 4 - 1 - 1 - 1 - 1 - 1 - 4 - - - - cori-knl, generic ne30, 4 nodes, 34x8 - 34 - 268 - - 136 - 136 - 136 - 128 - 128 - 33 - 33 - 136 - - - 8 - 8 - 1 - 1 - 1 - 1 - 1 - 8 - - - - - - - - none - - 960 - 960 - 960 - 960 - 960 - 960 - 960 - 960 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1280 - 1280 - 1280 - 1280 - 1280 - 1280 - 1280 - 1280 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1280 - 1280 - 1280 - 1280 - 1280 - 1280 - 1280 - 1280 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 2000 - 960 - 2000 - 1040 - 32 - 1 - 2000 - 960 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 960 - 2000 - 0 - 0 - 0 - - - - - - - - MALISIA. - - 8 - 8 - 8 - 8 - 8 - 8 - 8 - 8 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 4 - 4 - 4 - 1 - 1 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 256 - 256 - 256 - 256 - 256 - 256 - 256 - 256 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 12 - 12 - 12 - 12 - 12 - 12 - 12 - 12 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 8 - 8 - 8 - 8 - 8 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 48 - 48 - 48 - 48 - 48 - 48 - 48 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - 2 nodes, 64x2 - 64 - 128 - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 24 - 24 - 24 - 24 - 24 - 24 - 24 - 24 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 48 - 48 - 48 - 48 - 48 - 48 - 48 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 96 - 96 - 96 - 96 - 96 - 96 - 96 - 96 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 96 - 96 - 96 - 96 - 96 - 96 - 96 - 96 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - 4 nodes, 64x2 - 64 - 128 - - 256 - 256 - 256 - 256 - 256 - 256 - 256 - 256 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 24 - 24 - 24 - 24 - 24 - 24 - 24 - 24 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - - none - - 48 - 48 - 48 - 48 - 48 - 48 - 48 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 192 - 192 - 192 - 192 - 192 - 192 - 192 - 192 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - 1 node, 64x2 - 64 - 128 - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 48 - 48 - 48 - 48 - 48 - 48 - 48 - 48 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 48 - 48 - 48 - 48 - 48 - 48 - 48 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 96 - 96 - 96 - 96 - 96 - 96 - 96 - 96 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - cori-knl, 6 nodes, 64x4, sypd=2.93 (for F-compset) - 64 - 256 - - 384 - 384 - 384 - 256 - 256 - 64 - 64 - 384 - - - 4 - 4 - 4 - 1 - 1 - 1 - 1 - 4 - - - - - - - - 6 nodes, 64x2, sypd=11.1 (for F-compset) - 64 - 128 - - 363 - 363 - 363 - 128 - 128 - 64 - 64 - 363 - - - 2 - 2 - 1 - 1 - 1 - 1 - 1 - 2 - - - - - - - - none - - 960 - 48 - 960 - 912 - 48 - 1 - 960 - 960 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 48 - 960 - 0 - 0 - 0 - - - - - - - - none - - 256 - 256 - 256 - 256 - 256 - 256 - 256 - 256 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 512 - 512 - 512 - 512 - 512 - 512 - 512 - 512 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 25 - 25 - 25 - 25 - 25 - 25 - 25 - 25 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 25 - 25 - 25 - 25 - 25 - 25 - 25 - 25 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 40 - 40 - 40 - 40 - 40 - 40 - 40 - 40 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - default and minimal 512-node partition - - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - 2048 - - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - default 64x16 PEs for acme_developer tests - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - -compset MALI - - 1 - 1 - 1 - 1 - 1 - 64 - 1 - 64 - - - 1 - 1 - 1 - 1 - 1 - 16 - 1 - 16 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 8 - 8 - 8 - 8 - 8 - - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 5 - 4 - 8 - 8 - 8 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1 - 1 - 1 - 32 - 96 - 1 - 1 - 32 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 16 - 0 - 0 - 32 - 0 - 0 - 0 - - - - - - - - none - - 1 - 1 - 1 - 80 - 192 - 1 - 1 - 80 - - - 1 - 1 - 1 - 4 - 4 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 80 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 8 - 480 - 8 - 8 - 32 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 8 - 0 - 16 - 32 - 24 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 480 - 8 - 8 - 8 - 32 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 8 - 0 - 32 - 16 - 24 - 0 - 0 - - - - - - - - none - - 16 - 8 - 16 - 480 - 480 - 8 - 16 - 32 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 16 - 0 - 32 - 32 - 24 - 0 - 0 - - - - - - - - none - - 48 - 48 - 48 - 48 - 1024 - 48 - 48 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 48 - 0 - 192 - 0 - 96 - 0 - 144 - - - - - - - - none - - 48 - 48 - 48 - 1024 - 48 - 48 - 48 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 48 - 0 - 0 - 192 - 96 - 0 - 144 - - - - - - - - none - - 48 - 48 - 48 - 1024 - 1024 - 48 - 48 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 48 - 0 - 0 - 0 - 96 - 0 - 144 - - - - - - - - none - - 48 - 48 - 48 - 960 - 960 - 48 - 48 - 48 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 48 - 0 - 0 - 0 - 96 - 0 - 144 - - - - - - - - none - - 96 - 96 - 96 - 96 - 96 - 96 - 96 - 96 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 6 - 8 - 8 - 8 - 8 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 5 - 4 - 8 - 8 - 8 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 5 - 4 - 8 - 8 - 8 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 160 - 160 - 160 - 160 - 240 - 160 - 160 - 160 - - - 2 - 1 - 2 - 2 - 1 - 2 - 2 - 1 - - - 0 - 0 - 0 - 0 - 160 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 32 - 64 - 32 - 64 - 64 - 64 - 64 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 32 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 384 - 64 - 384 - 320 - 32 - 384 - 384 - 64 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 1 - - - 0 - 0 - 0 - 64 - 384 - 0 - 0 - 0 - - - - - - - - none - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 120 - 96 - 120 - 12 - 12 - 1 - 120 - 96 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 96 - 108 - 0 - 0 - 0 - - - - - - - - none - - 25 - 25 - 25 - 32 - 32 - 1 - 25 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 256 - 1 - 256 - 256 - 256 - 1 - 256 - 256 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 256 - 1 - 256 - 16 - 16 - 1 - 256 - 256 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1525 - 71 - 71 - 1454 - 256 - 1 - 1525 - 1525 - - - 16 - 16 - 16 - 16 - 1 - 1 - 16 - 4 - - - 0 - 0 - 0 - 71 - 1525 - 0 - 0 - 0 - - - - none - - 512 - 512 - 512 - 512 - 512 - 512 - 512 - 512 - - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1525 - 71 - 71 - 1454 - 256 - 1 - 1525 - 1525 - - - 16 - 16 - 16 - 16 - 1 - 1 - 16 - 4 - - - 0 - 0 - 0 - 71 - 1525 - 0 - 0 - 0 - - - - - - - - none - - 496 - 176 - 496 - 320 - 16 - 496 - 496 - 160 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 320 - 0 - 0 - 496 - 0 - 0 - 0 - - - - - - - - none - - 512 - 192 - 512 - 320 - 64 - 512 - 512 - 512 - - - 4 - 4 - 4 - 1 - 1 - 4 - 4 - 1 - - - 0 - 320 - 0 - 0 - 512 - 0 - 0 - 0 - - - - - - - - none - - 480 - 416 - 480 - 480 - 480 - 480 - 480 - 480 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1024 - 416 - 1024 - 1024 - 1024 - 1024 - 1024 - 1024 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 480 - 416 - 480 - 480 - 1232 - 480 - 480 - 432 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 480 - 0 - 0 - 0 - - - - - - - - none - - 1024 - 416 - 1024 - 1024 - 2356 - 1024 - 1024 - 432 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 1024 - 0 - 0 - 0 - - - - - - - - none - - 1664 - 416 - 1664 - 1800 - 3476 - 1664 - 1664 - 432 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 1800 - 0 - 0 - 0 - - - - - - - - none - - 496 - 336 - 496 - 160 - 16 - 496 - 496 - 160 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 160 - 0 - 0 - 496 - 0 - 0 - 0 - - - - - - - - ne30_ne30 grid on 40 nodes 36 ppn pure-MPI - - 1350 - 72 - 72 - 72 - 72 - 72 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 1368 - 1368 - 1368 - 1368 - 1368 - 0 - 0 - - - - 77x36x1 - - 2700 - 72 - 72 - 72 - 72 - 2700 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 2700 - 2700 - 2628 - 2700 - 0 - 0 - 0 - - - - 152x36x1 - - 5400 - 72 - 72 - 72 - 72 - 72 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 5400 - 5400 - 5400 - 5400 - 5400 - 0 - 0 - - - - - - ne30_ne30 grid on 23 nodes 40 ppn pure-MPI - - 900 - 900 - 900 - 900 - 900 - 900 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - - - - ne30_ne30 grid on 68 nodes 40 ppn pure-MPI - - 2700 - 2700 - 2700 - 2700 - 2700 - 2700 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - - - - ne30_ne30 grid on 135 nodes 40 ppn pure-MPI - - 5400 - 5400 - 5400 - 5400 - 5400 - 5400 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - default,4nodes*36tasks*1threads - - 144 - 144 - 144 - 144 - 144 - 144 - 144 - 144 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - default,4nodes*40tasks*1thread - - 160 - 160 - 160 - 160 - 160 - 160 - 160 - 160 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - ne30 grid on 128x16x4 PEs - 16 - - 1350 - 176 - 176 - 1360 - 512 - 1 - 1 - 1360 - 1 - - - 4 - 4 - 4 - 4 - 4 - 1 - 1 - 4 - 1 - - - 0 - 1360 - 1360 - 0 - 1536 - 0 - 0 - 0 - 0 - - - - - - - - none - - 2560 - 512 - 2560 - 512 - 512 - 2560 - 2560 - 512 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 1536 - 512 - 1536 - 0 - 0 - 1536 - 1536 - 1023 - - - - - - - - none - - 9600 - 960 - 960 - 960 - 960 - 960 - 9600 - 960 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 5800 - 960 - 1920 - 4840 - 0 - 3880 - - - - - - - - none - - 2048 - 112 - 2048 - 1800 - 4028 - 2048 - 2048 - 2048 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 2048 - 0 - 2160 - 3960 - 0 - 0 - 0 - - - - - - - - none - - 512 - 512 - 512 - 512 - 512 - 512 - 512 - 512 - - - 6 - 6 - 6 - 1 - 1 - 6 - 6 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 2048 - 112 - 2048 - 1800 - 4028 - 2048 - 2048 - 1800 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 2048 - 0 - 2160 - 3960 - 0 - 0 - 0 - - - - - - - - none - - 1824 - 112 - 1824 - 1600 - 3600 - 1824 - 1824 - 1600 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 1824 - 0 - 1936 - 3536 - 0 - 0 - 0 - - - - - - - - none - - 512 - 512 - 512 - 1800 - 2048 - 512 - 512 - 512 - - - 6 - 6 - 6 - 1 - 1 - 6 - 6 - 1 - - - 0 - 512 - 0 - 512 - 2312 - 0 - 0 - 0 - - - - - - - - none - - 512 - 512 - 512 - 1800 - 2048 - 512 - 512 - 512 - - - 6 - 6 - 6 - 1 - 1 - 6 - 6 - 1 - - - 0 - 512 - 0 - 512 - 2312 - 0 - 0 - 0 - - - - - - - - none - - 1440 - 512 - 1440 - 512 - 2048 - 1440 - 1440 - 512 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 1440 - 0 - 0 - 0 - - - - - - - - none - - 3600 - 512 - 3600 - 512 - 512 - 3600 - 3600 - 512 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 14400 - 512 - 14400 - 512 - 512 - 14400 - 14400 - 512 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 192 - 192 - 192 - 192 - 192 - 192 - 192 - 192 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 96 - 96 - 96 - 96 - 96 - 96 - 96 - 96 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 1 - 16 - 16 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 2 - 2 - 4 - 8 - 4 - 8 - 4 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 8 - 0 - 2 - 4 - 24 - 20 - 8 - 16 - - - - - - - - none - - 8 - 4 - 4 - 8 - 8 - 4 - 4 - 8 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 8 - 0 - 12 - 16 - 24 - 32 - 44 - 36 - - - - - - - - none - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 4 - 0 - 8 - 12 - 16 - 20 - 28 - 24 - - - - - - - - none - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 8 - 8 - 8 - 8 - 8 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 6 - 6 - 6 - 6 - 6 - 6 - 6 - 6 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 8 - 8 - 8 - 8 - 8 - 8 - 8 - 8 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - -1 - -1 - -1 - -1 - -1 - -1 - -1 - -1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - -1 - -1 - -1 - -1 - -1 - -1 - -1 - -1 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 5 - 5 - 5 - 5 - 5 - 5 - 5 - 5 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 64 - 64 - 64 - 64 - 64 - 8 - 64 - 64 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - "113-node 12x4 F-compset sypd=10.8" - 12 - 48 - - 1350 - 1350 - 1350 - 1350 - 1350 - 12 - 12 - 1350 - - - 4 - 2 - 2 - 1 - 1 - 1 - 1 - 2 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - "133 node version gets 6 SYPD. This will be the default and M size" - - 2700 - 312 - 312 - 2400 - 480 - 312 - 2400 - 2400 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 2400 - 2400 - 0 - 2712 - 2400 - 0 - 0 - - - - - - - - "39 node version gets 2.1 SYPD." - - 675 - 56 - 56 - 640 - 240 - 56 - 56 - 640 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 640 - 640 - 0 - 696 - 640 - 0 - 0 - - - - - - - - "285 node version gets 11.5 SYPD" - - 5400 - 600 - 600 - 3200 - 1440 - 600 - 4800 - 4800 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 4800 - 4800 - 0 - 5400 - 4800 - 0 - 0 - - - - - - - - "185 nodes, 32x1, ~5sypd (wmod185)" - - 5400 - 608 - 608 - 3200 - 512 - 32 - 32 - 4800 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 4800 - 4800 - 0 - 5408 - 0 - 0 - 0 - - - - "15 nodes, 32x1, ~.5sypd (wmod015)" - - 288 - 32 - 32 - 256 - 192 - 32 - 32 - 288 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 256 - 256 - 0 - 288 - 0 - 0 - 0 - - - - - - - - "cori-knl ne30 coupled compest on 120 nodes, 64x1 (2 threads CPL/OCN/ICE), (kmod125) sypd=4.1" - 64 - 128 - - 6080 - 822 - 128 - 5120 - 1920 - 64 - 64 - 5952 - - - 1 - 1 - 2 - 2 - 2 - 1 - 1 - 2 - - - 0 - 5120 - 5952 - 0 - 6080 - 0 - 0 - 0 - - - - "cori-knl ne30 coupled compest on 60 nodes, 67x2, (kmod060b) sypd=2.86" - 67 - 268 - - 2700 - 268 - 134 - 2560 - 960 - 67 - 67 - 2881 - - - 2 - 2 - 2 - 2 - 2 - 1 - 1 - 2 - - - 0 - 2613 - 2881 - 0 - 3015 - 0 - 0 - 0 - - - - "cori-knl ne30 coupled compest on 31 nodes, 67x2, (kmod031b) sypd=1.71" - 67 - 134 - - 1350 - 670 - 64 - 1350 - 600 - 67 - 67 - 1350 - - - 2 - 2 - 2 - 2 - 2 - 1 - 1 - 2 - - - 0 - 0 - 1407 - 0 - 1474 - 0 - 0 - 0 - - - - "cori-knl ne30 coupled compest on 17 nodes, 67x4, (kmod017) sypd=1.12" - 67 - 268 - - 737 - 670 - 64 - 640 - 384 - 67 - 67 - 737 - - - 4 - 2 - 2 - 2 - 2 - 1 - 1 - 2 - - - 0 - 0 - 670 - 0 - 737 - 0 - 0 - 0 - - - - cori-knl ne30 F-compset on 81 nodes, 67x1, sypd=6.1 - 67 - 134 - - 5427 - 5427 - 5427 - 5427 - 5427 - 33 - 33 - 5427 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - - cori-knl ne30 F-compset on 41 nodes, 33x4, sypd=4.4 - 33 - 132 - - 1350 - 1350 - 1350 - 1200 - 1200 - 33 - 33 - 1350 - - - 4 - 4 - 1 - 1 - 1 - 1 - 1 - 4 - - - - cori-knl ne30 F-compset on 21 nodes, 33x4, sypd=2.35 - 33 - 132 - - 693 - 693 - 693 - 693 - 693 - 33 - 33 - 693 - - - 4 - 4 - 1 - 1 - 1 - 1 - 1 - 4 - - - - cori-knl ne30 F-compset on 4 nodes, 34x8, sypd=0.61 - 34 - 268 - - 136 - 136 - 136 - 136 - 136 - 33 - 33 - 136 - - - 8 - 4 - 4 - 1 - 1 - 1 - 1 - 4 - - - - - - - - none - - 675 - 168 - 168 - 512 - 512 - 1 - 512 - 512 - - - 2 - 2 - 2 - 2 - 1 - 2 - 2 - 2 - - - 0 - 512 - 512 - 0 - 680 - 512 - 0 - 0 - - - - - - - - none - - 1024 - 1024 - 1024 - 1024 - 1024 - 1024 - 1024 - 1024 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - T62_oQU120 grid for MPAS tests on 20 nodes pure-MPI - - 720 - 720 - 720 - 720 - 720 - 720 - 720 - 720 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 9600 - 9600 - 9600 - 9600 - 9600 - 9600 - 1 - 9600 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 9600 - 9600 - 9600 - 9600 - 9600 - 9600 - 1 - 9600 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - ne120 coupled-compset on 1024 nodes - - 2700 - 300 - 300 - 2400 - 1396 - 1 - 1 - 2400 - - - 8 - 8 - 8 - 8 - 2 - 1 - 1 - 8 - - - 0 - 2400 - 2400 - 0 - 2700 - 0 - 0 - 0 - - - - ne120 coupled-compset on 2048 nodes - - 5400 - 2608 - 2608 - 2792 - 2792 - 1 - 1 - 2792 - - - 8 - 8 - 8 - 8 - 2 - 1 - 1 - 8 - - - 0 - 2792 - 2792 - 0 - 5400 - 0 - 0 - 0 - - - - ne120 coupled-compset on 4096 nodes - - 10800 - 2608 - 2608 - 8192 - 5584 - 1 - 1 - 8192 - - - 8 - 8 - 8 - 8 - 2 - 1 - 1 - 8 - - - 0 - 8192 - 8192 - 0 - 10800 - 0 - 0 - 0 - - - - ne120 F-compset on 512 nodes - 64 - 8 - - 3600 - 2048 - 2048 - 2048 - 2048 - 2048 - 1 - 1 - - - 8 - 8 - 8 - 8 - 8 - 8 - 1 - 1 - - - 0 - 0 - 2048 - 0 - 0 - 0 - 0 - 0 - - - - ne120 F-compset on 1024 nodes - 64 - 8 - - 7200 - 7200 - 7200 - 992 - 992 - 992 - 1 - 1 - - - 6 - 6 - 6 - 6 - 6 - 6 - 1 - 1 - - - 0 - 0 - 0 - 7200 - 7200 - 7200 - 0 - 0 - - - - ne120 F-compset on 2048 nodes - 64 - 8 - - 14400 - 14400 - 14400 - 1984 - 1984 - 1984 - 1 - 1 - - - 8 - 8 - 8 - 8 - 8 - 8 - 1 - 1 - - - 0 - 0 - 0 - 14400 - 14400 - 14400 - 0 - 0 - - - - - - ne120 F-compset on 128 nodes - 256 - 64 - - 7200 - 7200 - 7200 - 960 - 960 - 960 - 1 - 1 - - - 4 - 4 - 4 - 4 - 4 - 4 - 1 - 1 - - - 0 - 0 - 0 - 7232 - 7232 - 7232 - 0 - 0 - - - - ne120 F-compset on 384 nodes - 256 - 32 - - 10800 - 10800 - 10800 - 1472 - 1472 - 1472 - 1 - 1 - - - 8 - 8 - 8 - 8 - 8 - 8 - 1 - 1 - - - 0 - 0 - 0 - 10816 - 10816 - 10816 - 0 - 0 - - - - ne120-wcycl on 145 nodes, MPI-only - 64 - 64 - - 7200 - 7200 - 6400 - 832 - 832 - 2048 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 6400 - 6400 - 7232 - 0 - 0 - - - - ne120-wcycl on 145 nodes, threaded - 256 - 64 - - 7200 - 7200 - 6400 - 832 - 832 - 2048 - 1 - 1 - - - 4 - 1 - 2 - 4 - 4 - 4 - 1 - 1 - - - 0 - 0 - 0 - 6400 - 6400 - 7232 - 0 - 0 - - - - ne120 coupled-compset on 466 nodes - 64 - 64 - - 21600 - 16384 - 16384 - 5248 - 5248 - 8192 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 16384 - 16384 - 21632 - 0 - 0 - - - - ne120-wcycl on 863 nodes, MPI-only - 64 - 64 - - 43200 - 43200 - 24000 - 4800 - 4800 - 12000 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 38400 - 33600 - 43200 - 0 - 0 - - - - ne120-wcycl on 863 nodes, threaded - 128 - 64 - - 43200 - 43200 - 24000 - 4800 - 4800 - 12000 - 1 - 1 - - - 2 - 1 - 2 - 2 - 2 - 2 - 1 - 1 - - - 0 - 0 - 0 - 38400 - 33600 - 43200 - 0 - 0 - - - - ne120-wcycl on 825 nodes, threaded, 32 tasks/node - 128 - 32 - - 21600 - 21600 - 9600 - 4800 - 4800 - 4800 - 1 - 1 - - - 4 - 4 - 4 - 4 - 4 - 4 - 1 - 1 - - - 0 - 0 - 0 - 16800 - 12000 - 21600 - 0 - 0 - - - - ne120-wcycl on 800 nodes, threaded, 32 tasks/node - 128 - 32 - - 21600 - 21600 - 12000 - 4800 - 4800 - 4000 - 1 - 1 - - - 4 - 4 - 4 - 4 - 4 - 4 - 1 - 1 - - - 0 - 0 - 0 - 16800 - 12000 - 21600 - 0 - 0 - - - - - - cori-knl ne120 coupled compset on 1025 nodes, 33x8, (hmod1025vc) s=1.0 - 33 - 264 - - 29007 - 27852 - 19200 - 4950 - 1155 - 4800 - 33 - 33 - - - 4 - 4 - 4 - 2 - 2 - 4 - 1 - 1 - - - 0 - 0 - 0 - 19800 - 27852 - 29007 - 0 - 0 - - - - cori-knl ne120 coupled-compset on 448 nodes, 33x8, (hmod448b) sypd=0.69 wcosplite s=0.54 - 33 - 264 - - 12375 - 11880 - 9600 - 2277 - 495 - 2400 - 33 - 33 - - - 4 - 4 - 4 - 2 - 2 - 4 - 1 - 1 - - - 0 - 0 - 0 - 9603 - 11880 - 12375 - 0 - 0 - - - - cori-knl ne120 coupled-compset on 207 nodes, 33x8, (hmod207) sypd=0.37 - 33 - 264 - - 5610 - 4620 - 5584 - 4620 - 990 - 1200 - 33 - 33 - - - 8 - 8 - 4 - 4 - 2 - 4 - 1 - 1 - - - 0 - 0 - 0 - 0 - 4620 - 5610 - 0 - 0 - - - - cori-knl ne120 coupled-compset on 131 nodes, 33x8, (hmod131) sypd=0.25 - 33 - 264 - - 3333 - 3333 - 3200 - 2871 - 462 - 960 - 33 - 33 - - - 8 - 8 - 4 - 4 - 4 - 4 - 1 - 1 - - - 0 - 0 - 0 - 0 - 2871 - 3333 - 0 - 0 - - - - cori-knl ne120 F-compset on 675 nodes, 64x2, sypd=1.95 - 64 - 128 - - 43200 - 43200 - 43200 - 43200 - 43200 - 64 - 64 - 43200 - - - 2 - 2 - 1 - 1 - 1 - 1 - 1 - 2 - - - - cori-knl ne120 F-compset on 323 nodes, 67x4, sypd=1.18 - 67 - 268 - - 21600 - 21600 - 21600 - 21600 - 21600 - 67 - 67 - 21600 - - - 4 - 4 - 1 - 1 - 1 - 1 - 1 - 4 - - - - cori-knl ne120 F-compset on 162 nodes, 67x4, sypd=0.69 - 67 - 268 - - 10800 - 10800 - 10800 - 10800 - 10800 - 67 - 67 - 10800 - - - 4 - 4 - 4 - 1 - 1 - 1 - 1 - 4 - - - - cori-knl ne120 F-compset on 81 nodes, 67x4, sypd=0.35 - 67 - 268 - - 5427 - 5427 - 5427 - 5427 - 5427 - 67 - 67 - 5427 - - - 4 - 4 - 4 - 1 - 1 - 1 - 1 - 4 - - - - cori-knl ne120 F-compset on 42 nodes, 67x4, sypd=0.19 - 67 - 268 - - 2814 - 2814 - 2814 - 2814 - 2814 - 67 - 67 - 2814 - - - 4 - 2 - 2 - 1 - 1 - 1 - 1 - 4 - - - - - - compy ne120 W-cycle on 310 nodes, 40x1, sypd=1.2 - - 9600 - 9600 - 7200 - 2800 - 2400 - 2400 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 9600 - 7200 - 7200 - - - - - - - - none - - 240 - 240 - 240 - 240 - 240 - 240 - 240 - 240 - - - 4 - 4 - 4 - 1 - 1 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - ne4 grid on 4 nodes pure-MPI - - 108 - 108 - 108 - 36 - 36 - 36 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 108 - 108 - 108 - 0 - 0 - - - - - - - - -compset A_WCYCL* -res ne30_oEC* on 32 nodes pure-MPI - - 675 - 72 - 72 - 720 - 360 - 720 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 720 - 720 - 0 - 792 - 0 - 0 - 0 - - - - -compset A_WCYCL* -res ne30_oEC* on 54 nodes pure-MPI - - 1350 - 216 - 216 - 1152 - 576 - 1152 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 1152 - 1152 - 0 - 1368 - 0 - 0 - 0 - - - - -compset A_WCYCL* -res ne30_oEC* on 105 nodes pure-MPI - - 2700 - 540 - 540 - 2160 - 1080 - 2160 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 2160 - 2160 - 0 - 2700 - 0 - 0 - 0 - - - - - - ne30-wcycl on 8 nodes - 128 - 64 - - 338 - 128 - 128 - 256 - 128 - 256 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 256 - 256 - 0 - 384 - 0 - 0 - 0 - - - - ne30-wcycl on 128 nodes - 128 - 64 - - 5400 - 640 - 640 - 2752 - 2752 - 5400 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 4800 - 4800 - 0 - 5440 - 0 - 0 - 0 - - - - - - -compset A_WCYCL* -res ne30_oEC* on 27 nodes pure-MPI - - 900 - 900 - 900 - 900 - 160 - 900 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 920 - 0 - - - - -compset A_WCYCL* -res ne30_oEC* on 40 nodes pure-MPI - - 1350 - 1350 - 1350 - 1350 - 240 - 1350 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 1360 - 0 - - - - -compset A_WCYCL* -res ne30_oEC* on 80 nodes pure-MPI - - 2700 - 2700 - 2700 - 2700 - 480 - 2700 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 2720 - 0 - - - - -compset A_WCYCL* -res ne30_oEC* on 160 nodes pure-MPI - - 5400 - 5400 - 5400 - 5400 - 1000 - 5400 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 5400 - 0 - - - - - - - - none - - 32 - 32 - 32 - 32 - 32 - 32 - 32 - 32 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - none - - 48 - 48 - 48 - 48 - 48 - 48 - 48 - 48 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - none - - 96 - 96 - 96 - 96 - 96 - 96 - 96 - 96 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 32 - 16 - 32 - 16 - 16 - 32 - 32 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 32 - 0 - 32 - 32 - 0 - 0 - 0 - - - - none - - 96 - 16 - 96 - 96 - 16 - 96 - 96 - 96 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 96 - 0 - 0 - 96 - 0 - 0 - 0 - - - - none - - 96 - 32 - 96 - 96 - 32 - 96 - 96 - 96 - - - 4 - 4 - 4 - 4 - 4 - 4 - 4 - 4 - - - 0 - 96 - 0 - 0 - 96 - 0 - 0 - 0 - - - - - - - - none - - 32 - 16 - 32 - 16 - 16 - 32 - 32 - 48 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 32 - 0 - 32 - 32 - 0 - 0 - 0 - - - - - - - - any compset on ne4 grid - - 6 - 6 - 6 - 6 - 6 - 6 - 6 - 6 - - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - 3 nodes, any compset on ne4 grid - - 96 - 96 - 96 - 96 - 96 - 96 - 96 - 96 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - - - - - - "cori-knl ne4 coupled compest on 6 nodes, sypd=22.9" - 67 - 134 - - 268 - 268 - 96 - 128 - 128 - 32 - 32 - 268 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 268 - 0 - 0 - 0 - - - - cori-knl, 13 nodes, 67x1 any compset on ne4 grid, sypd=50 - 67 - 67 - - 866 - 128 - 128 - 256 - 256 - 32 - 32 - 866 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - - cori-knl, 4 nodes, 67x1 any compset on ne4 grid, sypd=31.4 - 67 - 67 - - 268 - 268 - 268 - 268 - 268 - 32 - 32 - 268 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - - - - "edison ne4 coupled compest on 6 nodes, OCN by itself on 2 nodes sypd=45.2" - - 96 - 96 - 24 - 96 - 48 - 24 - 24 - 96 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 96 - 0 - 0 - 0 - - - - edison, 4 nodes, any compset on ne4 grid, sypd=57 - - 96 - 96 - 96 - 96 - 96 - 24 - 24 - 96 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - - - - - - 30to10-gmpas on 128 nodes - 128 - 64 - - 8192 - 8192 - 8192 - 8192 - 8192 - 8192 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - cori-knl G 30to10 on 52 nodes, 64x2 - 64 - 128 - - 1280 - 1280 - 1280 - 1280 - 2048 - 1280 - 1 - 1 - - - 1 - 1 - 1 - 2 - 2 - 1 - 1 - 2 - - - 0 - 0 - 0 - 0 - 1280 - 0 - 0 - 0 - - - - - - cori-haswell G 30to10 on 48 nodes - - 512 - 512 - 512 - 512 - 1024 - 512 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 512 - 0 - 0 - 0 - - - - - - cori-knl G 30to10 on 128 nodes - - 1024 - 1024 - 1024 - 1024 - 2048 - 1024 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 1024 - 0 - 0 - 0 - - - - - - 30to10-gmpas on 32 nodes - 16 - 16 - - 512 - 512 - 512 - 512 - 512 - 512 - 1 - 1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - none - - 128 - 128 - 128 - 128 - 128 - 128 - 128 - 128 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - RRM grid on 128 Theta nodes - - 8192 - 8192 - 8192 - 8192 - 8192 - 8192 - 8192 - 8192 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - cori-knl, hires (18to6) G case on 150 nodes, 64x2, sypd=0.5 - 64 - 128 - - 9600 - 9600 - 9600 - 9600 - 9600 - 9600 - 1 - 1 - - - 1 - 1 - 1 - 2 - 2 - 1 - 1 - 1 - - - - - - - - cori-knl, lowres (60to30) G case on 16 nodes, 64x2, sypd=2.42 - 64 - 128 - - 1024 - 1024 - 1024 - 1024 - 1024 - 1024 - 1 - 1 - - - 1 - 1 - 1 - 2 - 2 - 1 - 1 - 1 - - - - - - - - compy, lowres (60to30v3) G case on 12 nodes 40 ppn pure-MPI, sypd=10 - - 160 - 160 - 160 - 160 - 320 - 120 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 160 - 0 - - - - compy, lowres (60to30v3) G case on 24 nodes 40 ppn pure-MPI, sypd=18 - - 320 - 320 - 320 - 320 - 640 - 120 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 320 - 0 - - - - compy, lowres (60to30v3) G case on 37 nodes 40 ppn pure-MPI, sypd=28 - - 480 - 480 - 480 - 480 - 1000 - 480 - - - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 480 - 0 - - - - - diff --git a/config/e3sm/allactive/testlist_allactive.xml b/config/e3sm/allactive/testlist_allactive.xml deleted file mode 100644 index c694b09823d..00000000000 --- a/config/e3sm/allactive/testlist_allactive.xml +++ /dev/null @@ -1,370 +0,0 @@ - - - - - - yellowstone - yellowstone - - - - - hobart - hobart - - - - - - - mira - - - - - - - yellowstone - yellowstone - - - - - - - bluewaters - - - - - - - bluewaters - - - - - - - bluewaters - mira - yellowstone - - - - - - - yellowstone - yellowstone - - - edison - - - yellowstone - yellowstone - - - - - yellowstone - yellowstone - - - - - bluewaters - edison - eos - - - bluewaters - yellowstone - yellowstone - - - - - bluewaters - yellowstone - yellowstone - - - titan - yellowstone - yellowstone - - - - - mira - - - - - yellowstone - yellowstone - - - mira - - - mira - yellowstone - yellowstone - - - yellowstone - yellowstone - - - titan - - - eos - - - mira - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - mira - - - - - bluewaters - - - - - - - mira - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - janus - - - - - - - babbageKnc - - - babbageKnc - - - - - - - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - hobart - - - - - yellowstone - yellowstone - - - - - - - eos - - - - - - - edison - yellowstone - yellowstone - - - - - - - bluewaters - - - - - - - eos - - - - - - - yellowstone - yellowstone - - - - - - - bluewaters - - - edison - - - - - bluewaters - yellowstone - - - hobart - - - - - - - janus - - - - - - - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - edison - - - - - - - mira - - - edison - - - - - - - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - - - - yellowstone - yellowstone - - - - diff --git a/config/e3sm/config_archive.xml b/config/e3sm/config_archive.xml deleted file mode 100644 index afe05a45aa0..00000000000 --- a/config/e3sm/config_archive.xml +++ /dev/null @@ -1,144 +0,0 @@ - - - - [ri] - rh\d* - rs - h\d*.*\.nc$ - e - nhfil - - rpointer.atm$NINST_STRING - $CASE.cam$NINST_STRING.r.$DATENAME.nc - - - rpointer.atm - rpointer.atm_9999 - casename.cam.r.1976-01-01-00000.nc - casename.cam.rh4.1976-01-01-00000.nc - casename.cam.h0.1976-01-01-00000.nc - casename.cam.h0.1976-01-01-00000.nc.base - casename.cam_0002.e.postassim.1976-01-01-00000.nc - casename.cam_0002.e.preassim.1976-01-01-00000.nc - casename.cam.i.1976-01-01-00000.nc - anothercasename.cam.i.1976-01-01-00000.nc - - - - - r - rh\d? - h\d*.*\.nc$ - e - locfnh - - rpointer.lnd$NINST_STRING - ./$CASE.clm2$NINST_STRING.r.$DATENAME.nc - - - rpointer.lnd - rpointer.lnd_9999 - casename.clm2.r.1976-01-01-00000.nc - casename.clm2.rh4.1976-01-01-00000.nc - casename.clm2.h0.1976-01-01-00000.nc - casename.clm2.h0.1976-01-01-00000.nc.base - casename.clm2_0002.e.postassim.1976-01-01-00000.nc - casename.clm2_0002.e.preassim.1976-01-01-00000.nc - anothercasename.clm2.i.1976-01-01-00000.nc - - - - - r - rh\d* - h\d* - unset - - rpointer.rof$NINST_STRING - $CASE.mosart$NINST_STRING.r.$DATENAME.nc - - - - - [ri] - h\d* - unset - - rpointer.ice$NINST_STRING - ./$CASE.cice$NINST_STRING.r.$DATENAME.nc - - - rpointer.ice - casename.cice.r.1976-01-01-00000.nc - casename.cice.h.1976-01-01-00000.nc - - - - - rst - rst.am.timeSeriesStatsMonthly - hist - unset - - rpointer.ice$NINST_STRING - $MPAS_DATENAME - - - rpointer.ice - mpassi.rst.1976-01-01_00000.nc - mpassi.rst.am.timeSeriesStatsMonthly.1976-01-01_00000.nc - mpassi.hist.1976-01-01_00000.nc - mpassi.hist.am.regionalStatistics.0001.01.nc - - - - - rst - rst.am.timeSeriesStatsMonthly - hist - unset - - rpointer.ocn$NINST_STRING - $MPAS_DATENAME - - - rpointer.ocn - mpaso.rst.1976-01-01_00000.nc - mpaso.rst.am.timeSeriesStatsMonthly.1976-01-01_00000.nc - mpaso.hist.am.globalStats.1976-01-01.nc - mpaso.hist.am.highFrequencyOutput.1976-01-01_00.00.00.nc - - - - - rst - rst.am.timeSeriesStatsMonthly - hist - unset - - rpointer.glc$NINST_STRING - ./mali$NINST_STRING.rst.$MPAS_DATENAME.nc - - - rpointer.glc - mali.rst.1976-01-01_00000.nc - mali.rst.am.timeSeriesStatsMonthly.1976-01-01_00000.nc - mali.hist.am.globalStats.1976-01-01.nc - mali.hist.am.highFrequencyOutput.1976-01-01_00.00.00.nc - - - - - e\.\w+inf\w+ - [ei] - unset - - rpointer.unset - unset - - - casename.dart.e.pop_preassim_priorinf_mean.1976-01-01-00000.nc - - - - diff --git a/config/e3sm/config_files.xml b/config/e3sm/config_files.xml deleted file mode 100644 index 1f8b4e1998b..00000000000 --- a/config/e3sm/config_files.xml +++ /dev/null @@ -1,420 +0,0 @@ - - - - - - - - char - e3sm - case_der - env_case.xml - model system name - - - - - - - - char - $CIMEROOT/config/config_headers.xml - case_der - env_case.xml - contains both header and group information for all the case env_*.xml files - - - - char - $CIMEROOT/config/$MODEL/machines/config_batch.xml - case_last - env_case.xml - file containing batch system details for target system (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_batch.xsd - - - - char - $CIMEROOT/config/$MODEL/machines/config_workflow.xml - case_last - env_case.xml - file containing workflow (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_workflow.xsd - - - - char - $CIMEROOT/config/$MODEL/config_inputdata.xml - case_last - env_case.xml - file containing inputdata server descriptions (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_inputdata.xsd - - - - - char - $CIMEROOT/config/$MODEL/config_grids.xml - case_last - env_case.xml - file containing specification of all supported model grids, domains and mapping files (for documentation only - DO NOT EDIT) - - - - char - $CIMEROOT/config/$MODEL/machines/config_compilers.xml - case_last - env_case.xml - file containing compiler specifications for target model primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_compilers_v2.xsd - - - - char - $CIMEROOT/config/$MODEL/machines/config_machines.xml - case_last - env_case.xml - file containing machine specifications for target model primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_machines.xsd - - - - char - $CIMEROOT/config/$MODEL/machines/config_pio.xml - case_last - env_case.xml - file containing specification of pio settings for target model possible machine, compiler, mpilib, compset and/or grid attributes (for documentation only - DO NOT EDIT) - - - - char - - $CIMEROOT/config/config_tests.xml - - test - env_test.xml - file containing system test descriptions - - - - - - - - - char - unset - - $CIMEROOT/config/$MODEL/allactive/config_compsets.xml - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/config_compsets.xml - $SRCROOT/components/cam/cime_config/config_compsets.xml - $SRCROOT/components/clm/cime_config/config_compsets.xml - $SRCROOT/components/cice/cime_config/config_compsets.xml - $SRCROOT/components/mpas-ocean/cime_config/config_compsets.xml - $SRCROOT/components/mpas-albany-landice/cime_config/config_compsets.xml - $SRCROOT/components/mpas-seaice/cime_config/config_compsets.xml - - case_last - env_case.xml - file containing specification of all compsets for primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_compsets.xsd - - - - char - unset - - $CIMEROOT/config/$MODEL/allactive/config_pesall.xml - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/config_pes.xml - $CIMEROOT/config/$MODEL/allactive/config_pesall.xml - $CIMEROOT/config/$MODEL/allactive/config_pesall.xml - $CIMEROOT/config/$MODEL/allactive/config_pesall.xml - $CIMEROOT/config/$MODEL/allactive/config_pesall.xml - $CIMEROOT/config/$MODEL/allactive/config_pesall.xml - $CIMEROOT/config/$MODEL/allactive/config_pesall.xml - - case_last - env_case.xml - file containing specification of all pe-layouts for primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_pes.xsd - - - char - - $CIMEROOT/config/e3sm/config_archive.xml - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/config_archive.xml - - $CIMEROOT/src/components/data_comps/drof/cime_config/config_archive.xml - $CIMEROOT/src/components/data_comps/datm/cime_config/config_archive.xml - $CIMEROOT/src/components/data_comps/dice/cime_config/config_archive.xml - $CIMEROOT/src/components/data_comps/dlnd/cime_config/config_archive.xml - $CIMEROOT/src/components/data_comps/docn/cime_config/config_archive.xml - $CIMEROOT/src/components/data_comps/dwav/cime_config/config_archive.xml - - $SRCROOT/components/cam/cime_config/config_archive.xml - $SRCROOT/components/clm/cime_config/config_archive.xml - $SRCROOT/components/cice/cime_config/config_archive.xml - - case_last - env_case.xml - file containing specification of archive files for each component (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/config_archive.xsd - - - - char - - $CIMEROOT/scripts/lib/CIME/SystemTests - $SRCROOT/components/clm/cime_config/SystemTests - $SRCROOT/components/cam/cime_config/SystemTests - $SRCROOT/components/cice/cime_config/SystemTests - - test - env_test.xml - directories containing cime compatible system test modules - - - - char - unset - - $CIMEROOT/config/$MODEL/allactive/testlist_allactive.xml - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/testdefs/testlist_drv.xml - $SRCROOT/components/cam/cime_config/testdefs/testlist_cam.xml - $SRCROOT/components/clm/cime_config/testdefs/testlist_clm.xml - $SRCROOT/components/cice/cime_config/testdefs/testlist_cice.xml - $SRCROOT/components/mosart/cime_config/testdefs/testlist_mosart.xml - - case_last - env_case.xml - file containing specification of all system tests for primary component (for documentation only - DO NOT EDIT) - - - - char - unset - - $CIMEROOT/config/$MODEL/testmods_dirs - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/testdefs/testmods_dirs - $SRCROOT/components/cam/cime_config/testdefs/testmods_dirs - $SRCROOT/components/clm/cime_config/testdefs/testmods_dirs - $SRCROOT/components/cice/cime_config/testdefs/testmods_dirs - $SRCROOT/components/mosart/cime_config/testdefs/testmods_dirs - - case_last - env_case.xml - directory containing test modifications for primary component tests (for documentation only - DO NOT EDIT) - - - - char - unset - - $CIMEROOT/config/$MODEL/usermods_dirs - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/usermods_dirs - $SRCROOT/components/cam/cime_config/usermods_dirs - $SRCROOT/components/clm/cime_config/usermods_dirs - $SRCROOT/components/cice/cime_config/usermods_dirs - $SRCROOT/components/mosart/cime_config/usermods_dirs - - case_last - env_case.xml - directory containing user modifications for primary components (for documentation only - DO NOT EDIT) - - - - char - unset - - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/namelist_definition_drv.xml - - $CIMEROOT/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml - $CIMEROOT/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml - $CIMEROOT/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml - $CIMEROOT/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml - $CIMEROOT/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml - $CIMEROOT/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml - - - - case_last - env_case.xml - file containing namelist_definitions for all components - $CIMEROOT/config/xml_schemas/entry_id_namelist.xsd - - - - - - - - char - - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/config_component.xml - - case_last - env_case.xml - file containing all non-component specific case configuration variables (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - - $CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/config_component_$MODEL.xml - - case_last - env_case.xml - file containing all component specific driver configuration variables (for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $SRCROOT/components/cam/cime_config/config_component.xml - $CIMEROOT/src/components/data_comps/datm/cime_config/config_component.xml - $CIMEROOT/src/components/stub_comps/satm/cime_config/config_component.xml - $CIMEROOT/src/components/xcpl_comps/xatm/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $CIMEROOT/config/$MODEL//vic/config_component.xml - $SRCROOT/components/clm/cime_config/config_component.xml - $CIMEROOT/src/components/data_comps/dlnd/cime_config/config_component.xml - $CIMEROOT/src/components/stub_comps/slnd/cime_config/config_component.xml - $CIMEROOT/src/components/xcpl_comps/xlnd/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $SRCROOT/components/mosart/cime_config/config_component.xml - $CIMEROOT/src/components/data_comps/drof/cime_config/config_component.xml - $CIMEROOT/src/components/stub_comps/srof/cime_config/config_component.xml - $CIMEROOT/src/components/xcpl_comps/xrof/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $SRCROOT/components/mpas-seaice/cime_config/config_component.xml - $SRCROOT/components/cice/cime_config/config_component.xml - $CIMEROOT/src/components/data_comps/dice/cime_config/config_component.xml - $CIMEROOT/src/components/stub_comps/sice/cime_config/config_component.xml - $CIMEROOT/src/components/xcpl_comps/xice/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $SRCROOT/components/mpas-ocean/cime_config/config_component.xml - $CIMEROOT/src/components/data_comps/docn/cime_config/config_component.xml - $CIMEROOT/src/components/stub_comps/socn/cime_config/config_component.xml - $CIMEROOT/src/components/xcpl_comps/xocn/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $SRCROOT/components/mpas-albany-landice/cime_config/config_component.xml - $CIMEROOT/src/components/data_comps/dglc/cime_config/config_component.xml - $CIMEROOT/src/components/stub_comps/sglc/cime_config/config_component.xml - $CIMEROOT/src/components/xcpl_comps/xglc/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $CIMEROOT/src/components/stub_comps/siac/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - unset - - $SRCROOT/components/ww3/cime_config/config_component.xml - $CIMEROOT/src/components/data_comps/dwav/cime_config/config_component.xml - $CIMEROOT/src/components/stub_comps/swav/cime_config/config_component.xml - $CIMEROOT/src/components/xcpl_comps/xwav/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - - char - $CIMEROOT/src/components/stub_comps/sesp/cime_config/config_component.xml - - $CIMEROOT/src/components/stub_comps/sesp/cime_config/config_component.xml - $CIMEROOT/src/components/data_comps/desp/cime_config/config_component.xml - - case_last - env_case.xml - file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT) - $CIMEROOT/config/xml_schemas/entry_id.xsd - $CIMEROOT/config/xml_schemas/entry_id_version3.xsd - - - diff --git a/config/e3sm/config_grids.xml b/config/e3sm/config_grids.xml deleted file mode 100644 index 2f7480419db..00000000000 --- a/config/e3sm/config_grids.xml +++ /dev/null @@ -1,3409 +0,0 @@ - - - - - - ========================================= - GRID naming convention - ========================================= - The notation for the grid longname is - a%name_l%name_oi%name_r%name_m%mask_g%name_w%name - where - a% => atm, l% => lnd, oi% => ocn/ice, r% => river, m% => mask, g% => glc, w% => wav - - Supported out of the box grid configurations are given via alias specification in - the file "config_grids.xml". Each grid alias can also be associated with the - following optional attributes - - compset (Regular expression for compset matches that are required for this grid) - not_compset (Regular expression for compset matches that are not permitted this grid) - - Using the alias and the optional "compset" and "not_compset" attributes a grid longname is created - Note that the mask is for information only - and is not an attribute of the grid - By default, if the mask is not specified below, it will be set to the ocnice grid - And if there is no ocnice grid (such as for single column, the mask is null since it does not mean anything) - - - - - - null - null - null - null - rx1 - r05 - r05 - rx1 - r05 - r05 - null - gland5UM - gland4 - gland4 - null - ww3a - ww3a - ww3a - null - - - - gx1v6 - gx1v6 - gx1v6 - rx1 - null - null - gx1v6 - - - - CLM_USRDAT - CLM_USRDAT - CLM_USRDAT - null - null - null - reg - - - - 1x1_numaIA - 1x1_numaIA - 1x1_numaIA - null - null - null - reg - - - - 1x1_brazil - 1x1_brazil - 1x1_brazil - null - null - null - reg - - - - 1x1_smallvilleIA - 1x1_smallvilleIA - 1x1_smallvilleIA - null - null - null - reg - - - - 1x1_camdenNJ - 1x1_camdenNJ - 1x1_camdenNJ - null - null - null - reg - - - - 1x1_mexicocityMEX - 1x1_mexicocityMEX - 1x1_mexicocityMEX - null - null - null - reg - - - - 1x1_vancouverCAN - 1x1_vancouverCAN - 1x1_vancouverCAN - null - null - null - reg - - - - 1x1_tropicAtl - 1x1_tropicAtl - 1x1_tropicAtl - null - null - null - reg - - - - 1x1_urbanc_alpha - 1x1_urbanc_alpha - 1x1_urbanc_alpha - null - null - null - reg - - - - 5x5_amazon - 5x5_amazon - 5x5_amazon - null - null - null - reg - - - - 360x720cru - 360x720cru - 360x720cru - r05 - null - null - 360x720cru - - - - NLDAS - NLDAS - NLDAS - NLDAS - null - null - NLDAS - - - - - - T31 - T31 - gx3v7 - r05 - null - null - gx3v7 - - - - T31 - T31 - T31 - r05 - null - null - gx3v7 - - - - T42 - T42 - T42 - r05 - null - null - usgs - - - - T62 - T62 - gx3v7 - rx1 - null - null - gx3v7 - - - - T62 - T62 - gx1v6 - rx1 - null - null - gx1v6 - - - - T62 - T62 - mpas120 - rx1 - null - null - mpas120 - - - - T62 - T62 - mpasgx1 - rx1 - null - null - mpasgx1 - - - - T62 - T62 - oEC60to30 - rx1 - null - null - oEC60to30 - - - - T62 - T62 - oEC60to30v3 - rx1 - null - null - oEC60to30v3 - - - - T62 - T62 - oEC60to30wLI - rx1 - null - null - oEC60to30wLI - - - - T62 - T62 - oEC60to30v3wLI - rx1 - null - null - oEC60to30v3wLI - - - - T62 - T62 - oRRS30to10 - rx1 - null - null - oRRS30to10 - - - - T62 - T62 - oRRS30to10wLI - rx1 - null - null - oRRS30to10wLI - - - - T62 - T62 - oRRS30to10v3 - rx1 - null - null - oRRS30to10v3 - - - - T62 - T62 - oRRS30to10v3wLI - rx1 - null - null - oRRS30to10v3wLI - - - - T62 - T62 - oRRS18to6 - rx1 - null - null - oRRS18to6 - - - - T62 - T62 - oRRS18to6v3 - rx1 - null - null - oRRS18to6v3 - - - - T62 - T62 - oRRS15to5 - rx1 - null - null - oRRS15to5 - - - - T62 - T62 - oARRM60to10 - rx1 - null - null - oARRM60to10 - - - - T62 - T62 - oARRM60to6 - rx1 - null - null - oARRM60to6 - - - - TL319 - TL319 - oEC60to30v3 - JRA025 - null - null - oEC60to30v3 - - - - TL319 - TL319 - oARRM60to10 - JRA025 - null - null - oARRM60to10 - - - - TL319 - TL319 - oARRM60to6 - JRA025 - null - null - oARRM60to6 - - - - - - 0.23x0.31 - 0.23x0.31 - gx1v6 - r05 - null - null - gx1v6 - - - - 0.47x0.63 - 0.47x0.63 - gx1v6 - r05 - null - null - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - r05 - null - null - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - mpas120 - r05 - null - null - mpas120 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - r05 - null - ww3a - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - r05 - null - null - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - r05 - null - ww3a - gx1v6 - - - - 4x5 - 4x5 - gx3v7 - r05 - null - null - gx3v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - r01 - null - null - gx1v6 - - - - 0.23x0.31 - 0.23x0.31 - 0.23x0.31 - r05 - null - null - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - r05 - null - null - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - r05 - null - null - gx1v6 - - - - 2.5x3.33 - 2.5x3.33 - 2.5x3.33 - r05 - null - null - gx1v6 - - - - 4x5 - 4x5 - 4x5 - r05 - null - null - gx3v7 - - - - 10x15 - 10x15 - 10x15 - r05 - null - null - usgs - - - - - - ne4np4 - ne4np4 - oQU480 - r05 - null - null - oQU480 - - - - ne4np4 - ne4np4 - oQU240 - r05 - null - null - oQU240 - - - - ne4np4 - ne4np4 - oQU240wLI - r05 - null - null - oQU240wLI - - - - ne11np4 - ne11np4 - oQU240 - r05 - null - null - oQU240 - - - - ne16np4 - ne16np4 - gx3v7 - r05 - null - null - gx3v7 - - - - ne30np4 - ne30np4 - gx1v6 - r05 - null - null - gx1v6 - - - - ne30np4 - ne30np4 - mpas120 - r05 - null - null - mpas120 - - - - ne30np4 - ne30np4 - mpas120 - r05 - mpas.gis20km - null - mpas120 - - - - ne30np4 - 1.9x2.5 - gx1v6 - r05 - null - null - gx1v6 - - - - ne30np4 - 0.9x1.25 - gx1v6 - r05 - null - null - gx1v6 - - - - ne60np4 - ne60np4 - gx1v6 - r05 - null - null - gx1v6 - - - - ne120np4 - ne120np4 - gx1v6 - r05 - null - null - gx1v6 - - - - ne120np4 - ne120np4 - oRRS18to6 - r0125 - null - null - oRRS18to6 - - - - ne120np4 - ne120np4 - oRRS18to6v3 - r0125 - null - null - oRRS18to6v3 - - - - ne120np4 - ne120np4 - oRRS15to5 - r0125 - null - null - oRRS15to5 - - - - ne240np4 - 0.23x0.31 - gx1v6 - r05 - null - null - gx1v6 - - - - ne4np4 - ne4np4 - ne4np4 - r05 - null - null - oQU240 - - - - ne4np4.pg1 - ne4np4.pg1 - ne4np4.pg1 - r05 - null - null - oQU240 - - - - ne4np4.pg2 - ne4np4.pg2 - ne4np4.pg2 - r05 - null - null - oQU240 - - - - ne8np4 - ne8np4 - ne8np4 - r05 - null - null - oQU240 - - - - ne8np4.pg1 - ne8np4.pg1 - ne8np4.pg1 - r05 - null - null - oQU240 - - - - ne8np4.pg2 - ne8np4.pg2 - ne8np4.pg2 - r05 - null - null - oQU240 - - - - ne11np4 - ne11np4 - ne11np4 - r05 - null - null - oQU240 - - - - ne0np4_arm_x8v3_lowcon - ne0np4_arm_x8v3_lowcon - ne0np4_arm_x8v3_lowcon - null - null - null - gx1v6 - - - - ne0np4_conus_x4v1_lowcon - ne0np4_conus_x4v1_lowcon - ne0np4_conus_x4v1_lowcon - null - null - null - tx0.1v2 - - - - ne0np4_svalbard_x8v1_lowcon - ne0np4_svalbard_x8v1_lowcon - ne0np4_svalbard_x8v1_lowcon - null - null - null - tx0.1v2 - - - - ne0np4_sooberingoa_x4x8v1_lowcon - ne0np4_sooberingoa_x4x8v1_lowcon - ne0np4_sooberingoa_x4x8v1_lowcon - null - null - null - tx0.1v2 - - - - ne0np4_enax4v1 - ne0np4_enax4v1 - ne0np4_enax4v1 - null - null - null - oRRS18to6 - - - - ne0np4_enax4v1 - ne30np4 - ne0np4_enax4v1 - null - null - null - oRRS18to6 - - - - ne16np4 - ne16np4 - ne16np4 - r05 - null - null - gx3v7 - - - - ne16np4.pg1 - ne16np4.pg1 - ne16np4.pg1 - r05 - null - null - gx3v7 - - - - ne16np4.pg2 - ne16np4.pg2 - ne16np4.pg2 - r05 - null - null - gx3v7 - - - - ne30np4 - ne30np4 - ne30np4 - r05 - null - null - gx1v6 - - - - ne30np4.pg1 - ne30np4.pg1 - ne30np4.pg1 - r05 - null - null - gx1v6 - - - - ne30np4.pg2 - ne30np4.pg2 - ne30np4.pg2 - r05 - null - null - gx1v6 - - - - ne32np4 - ne32np4 - ne32np4 - r05 - null - null - gx1v6 - - - - ne32np4.pg2 - ne32np4.pg2 - ne32np4.pg2 - r05 - null - null - gx1v6 - - - - ne30np4 - ne30np4 - oEC60to30_ICG - r05 - null - null - oEC60to30 - - - - ne30np4 - ne30np4 - oEC60to30v3_ICG - r05 - null - null - oEC60to30v3 - - - - ne30np4.pg2 - ne30np4.pg2 - oEC60to30v3_ICG - r05 - null - null - oEC60to30v3 - - - - ne30np4 - ne30np4 - oEC60to30v3wLI_ICG - r05 - null - null - oEC60to30v3wLI - - - - ne25np4 - ne25np4 - ne25np4 - r05 - null - null - gx1v6 - - - ne25np4.pg2 - ne25np4.pg2 - ne25np4.pg2 - r05 - null - null - gx1v6 - - - ne35np4 - ne35np4 - ne35np4 - r05 - null - null - gx1v6 - - - ne35np4.pg2 - ne35np4.pg2 - ne35np4.pg2 - r05 - null - null - gx1v6 - - - ne40np4 - ne40np4 - ne40np4 - r05 - null - null - gx1v6 - - - ne40np4.pg2 - ne40np4.pg2 - ne40np4.pg2 - r05 - null - null - gx1v6 - - - ne45np4 - ne45np4 - ne45np4 - r05 - null - null - gx1v6 - - - ne45np4.pg2 - ne45np4.pg2 - ne45np4.pg2 - r05 - null - null - gx1v6 - - - - ne120np4 - ne120np4 - oRRS18to6v3_ICG - r0125 - null - null - oRRS18to6v3 - - - - ne60np4 - ne60np4 - ne60np4 - r05 - null - null - gx1v6 - - - - ne60np4.pg1 - ne60np4.pg1 - ne60np4.pg1 - r05 - null - null - gx1v6 - - - - ne60np4.pg2 - ne60np4.pg2 - ne60np4.pg2 - r05 - null - null - gx1v6 - - - - ne64np4 - ne64np4 - ne64np4 - r05 - null - null - gx1v6 - - - - ne64np4.pg2 - ne64np4.pg2 - ne64np4.pg2 - r05 - null - null - gx1v6 - - - - ne120np4 - ne120np4 - ne120np4 - r05 - null - null - gx1v6 - - - - ne120np4.pg2 - ne120np4.pg2 - ne120np4.pg2 - r05 - null - null - gx1v6 - - - - ne240np4 - ne240np4 - ne240np4 - null - null - null - tx0.1v2 - - - - - - ne30np4 - ne30np4 - oEC60to30v3 - r05 - mpas.aisgis20km - null - oEC60to30v3 - - - - ne30np4 - ne30np4 - oEC60to30v3wLI - r05 - mpas.aisgis20km - null - oEC60to30v3wLI - - - - ne30np4 - ne30np4 - oEC60to30v3 - r05 - mpas.gis20km - null - oEC60to30v3 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - r05 - mpas.gis20km - null - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - r05 - mpas.ais20km - null - gx1v6 - - - - - - T62 - T62 - mpas120 - rx1 - mpas.gis20km - null - mpas120 - - - - 0.9x1.25 - 0.9x1.25 - oEC60to30 - r05 - mpas.ais20km - null - oEC60to30 - - - - - - T31 - T31 - gx3v7 - rx1 - null - null - gx3v7 - - - - 4x5 - 4x5 - gx3v7 - rx1 - null - null - gx3v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - rx1 - null - null - gx1v6 - - - - ne30np4 - ne30np4 - gx1v6 - rx1 - null - null - gx1v6 - - - - ne30np4 - 1.9x2.5 - gx1v6 - rx1 - null - null - gx1v6 - - - - - - ww3a - ww3a - ww3a - null - null - ww3a - ww3a - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - r05 - null - ww3a - gx1v6 - - - - 0.9x1.25 - 1.9x2.5 - gx1v6 - r05 - null - ww3a - gx1v6 - - - - T31 - T31 - gx3v7 - r05 - null - ww3a - gx3v7 - - - - T31 - T31 - gx3v7 - rx1 - null - ww3a - gx3v7 - - - - T62 - T62 - gx1v6 - rx1 - null - ww3a - gx1v6 - - - - T62 - T62 - oQU480 - rx1 - null - null - oQU480 - - - - T62 - T62 - oQU240 - rx1 - null - null - oQU240 - - - - T62 - T62 - oQU240wLI - rx1 - null - null - oQU240wLI - - - - T62 - T62 - oQU120 - rx1 - null - null - oQU120 - - - - ne16np4 - ne16np4 - oQU240 - r05 - null - null - oQU240 - - - - ne16np4 - ne16np4 - oQU240 - r05 - mpas.ais20km - null - oQU240 - - - - ne30np4 - ne30np4 - oQU120 - r05 - null - null - oQU120 - - - - ne30np4 - ne30np4 - oQU120 - r05 - mpas.ais20km - null - oQU120 - - - - ne30np4 - ne30np4 - oEC60to30 - r05 - null - null - oEC60to30 - - - - ne30np4 - ne30np4 - oEC60to30v3 - r05 - null - null - oEC60to30v3 - - - - ne30np4 - ne30np4 - oEC60to30wLI - r05 - null - null - oEC60to30wLI - - - - ne30np4 - ne30np4 - oEC60to30v3wLI - r05 - null - null - oEC60to30v3wLI - - - - ne30np4 - ne30np4 - oRRS30to10 - r05 - null - null - oRRS30to10 - - - - ne30np4 - ne30np4 - oRRS30to10wLI - r05 - null - null - oRRS30to10wLI - - - - ne30np4 - ne30np4 - oRRS30to10v3 - r05 - null - null - oRRS30to10v3 - - - - ne30np4 - ne30np4 - oRRS30to10v3wLI - r05 - null - null - oRRS30to10v3wLI - - - - ne0np4_twpx4v1 - ne0np4_twpx4v1 - ne0np4_twpx4v1 - null - null - null - oRRS18to6v3 - - - - T62 - T62 - oQU120 - rx1 - mpas.ais20km - null - oQU120 - - - - T62 - T62 - oEC60to30v3wLI - rx1 - mpas.ais20km - null - oEC60to30v3wLI - - - - - ww3a - - - - - - - - - - - - - - regional grid mask: - - - - - - USGS mask - - - - 0 - 0 - null is no grid: - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.${CLM_USRDAT_NAME}_navy.nc - user specified domain - only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-numaIA_navy.110106.nc - 1x1 Numa Iowa -- only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-brazil_navy.090715.nc - 1x1 Brazil -- only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-smallvilleIA_test.110106.nc - 1x1 Smallville Iowa Crop Test Case -- only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-camdenNJ_navy.111004.nc - 1x1 Camden New Jersey -- only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-mexicocityMEX_navy.090715.nc - 1x1 Mexico City Mexico -- only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-vancouverCAN_navy.090715.nc - 1x1 Vancouver Canada -- only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-tropicAtl_test.111004.nc - 1x1 Tropical Atlantic Test Case -- only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-urbanc_alpha_test.110201.nc - 1x1 Urban C Alpha Test Case -- only valid for DATM/CLM compset - - - - 1 - 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.5x5pt-amazon_navy.090715.nc - 5x5 Amazon regional case -- only valid for DATM/CLM compset - - - - 720 - 360 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.360x720_cruncep.100429.nc - Exact half-degree CRUNCEP datm forcing grid with CRUNCEP land-mask -- only valid for DATM/CLM compset - - - - 464 - 224 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.nldas2_0224x0464_c110415.nc - NLDAS US one eighth degree grid -- only valid for DATM/CLM compset - - - - 1152 - 768 - $DIN_LOC_ROOT/share/domains/domain.lnd.fv0.23x0.31_gx1v6.100517.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.0.23x0.31_gx1v6_101108.nc - 0.23x0.31 is FV 1/4-deg grid: - - - - 576 - 384 - $DIN_LOC_ROOT/share/domains/domain.lnd.fv0.47x0.63_gx1v6.090407.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.0.47x0.63_gx1v6_090408.nc - 0.47x0.63 is FV 1/2-deg grid: - - - - 288 - 192 - $DIN_LOC_ROOT/share/domains/domain.lnd.fv0.9x1.25_gx1v6.090309.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.fv0.9x1.25_mp120v1.111018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.0.9x1.25_gx1v6_090403.nc - 0.9x1.25 is FV 1-deg grid: - - - - 144 - 96 - $DIN_LOC_ROOT/share/domains/domain.lnd.fv1.9x2.5_gx1v6.090206.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.1.9x2.5_gx1v6_090403.nc - 1.9x2.5 is FV 2-deg grid: - - - - 72 - 46 - $DIN_LOC_ROOT/share/domains/domain.lnd.fv4x5_gx3v7.091218.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.4x5_gx3v7_100120.nc - 4x5 is FV 4-deg grid: - - - - 108 - 72 - $DIN_LOC_ROOT/share/domains/domain.lnd.fv2.5x3.33_gx3v7.110223.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.fv2.5x3.33_gx3v7_110223.nc - 2.5x3.33 is FV 3-deg grid: - - - - 24 - 19 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.fv10x15_USGS.110713.nc - $DIN_LOC_ROOT/share/domains/domain.clm/domain.camocn.10x15_USGS_070807.nc - 10x15 is FV 10-deg grid: - - - - - - - - - 1024 - 512 - T341 is Gaussian grid: - - - - 256 - 128 - T85 is Gaussian grid: - - - - 192 - 96 - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx1v6.090320.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx3v7.090911.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_mpasgx1.150903.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU480.151209.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU240.151209.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU240wLI_mask.160929.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU120.151209.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oEC60to30.150616.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oEC60to30v3.161222.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oEC60to30wLI_mask.160830.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oEC60to30v3wLI_mask.170328.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oRRS30to10.150722.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oRRS30to10wLI_mask.171109.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oRRS30to10v3.171129.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oRRS30to10v3wLI_mask.171109.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oRRS18to6.160831.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oRRS18to6v3.170111.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oRRS15to5.150722.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oARRM60to10.180716.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oARRM60to6.180803.nc - T62 is Gaussian grid: - - - - 128 - 64 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.T42_USGS.111004.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.64x128_USGS_070807.nc - T42 is Gaussian grid: - - - - 96 - 48 - T31 is Gaussian grid: - - - - 640 - 320 - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_oEC60to30v3.181203.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL319_oEC60to30v3.181203.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_oARRM60to10.180905.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL319_oARRM60to10.180905.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_oARRM60to6.180905.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL319_oARRM60to6.180905.nc - TL319 is JRA lat/lon grid: - - - - - - - - - - - 866 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne4np4_oQU480.180702.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne4np4_oQU240.160614.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne4np4_oQU480.180702.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne4np4_oQU240.160614.nc - ne4np4 is Spectral Elem 7.5-deg grid: - - - 96 - 1 - DUMMY - DUMMY - ne4np4.pg1 is Spectral Elem 7.5-deg grid w/ 1x1 FV physics grid per element: - - - 384 - 1 - domain.lnd.ne4pg2_oQU240.190321.nc - domain.ocn.ne4pg2_oQU240.190321.nc - ne4np4.pg2 is Spectral Elem 7.5-deg grid w/ 2x2 FV physics grid per element: - - - - - - 6536 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne11np4_oQU240.160614.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne11np4_oQU240.160614.nc - ne11np4 is Spectral Elem 2.7-deg grid: - - - - - - 13826 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne16np4_gx3v7.120406.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne16np4_oQU240.151211.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne16np4_gx3v7.121113.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne16np4_oQU240.151211.nc - ne16np4 is Spectral Elem 2-deg grid: - - - 1536 - 1 - DUMMY - DUMMY - ne16np4.pg1 is Spectral Elem 2-deg grid w/ 1x1 FV physics grid per element: - - - 6144 - 1 - DUMMY - DUMMY - ne16np4.pg2 is Spectral Elem 2-deg grid w/ 2x2 FV physics grid per element: - - - - - - 48602 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_gx1v6.110905.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oQU120.160401.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oEC60to30.20151214.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oEC60to30v3.161222.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oEC60to30wLI_mask.160915.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oEC60to30v3wLI_mask.170802.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oRRS30to10.160419.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oRRS30to10wLI.160930.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oRRS30to10v3.171101.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oRRS30to10v3wLI_mask.171109.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_gx1v6_110217.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oQU120.160401.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oEC60to30.20151214.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oEC60to30v3.161222.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oEC60to30wLI_mask.160915.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oEC60to30v3wLI_mask.160915.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oRRS30to10.160419.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oRRS30to10wLI.160930.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oRRS30to10v3.171101.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oRRS30to10v3wLI_mask.171109.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_oEC60to30v3wLI_mask.170802.nc - ne30np4 is Spectral Elem 1-deg grid: - - - 5400 - 1 - DUMMY - DUMMY - ne30np4.pg1 is Spectral Elem 1-deg grid w/ 1x1 FV physics grid per element: - - - 21600 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30pg2_oECv3.190806.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30pg2_oECv3.190806.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30pg2_gx1v6.190806.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30pg2_gx1v6.190806.nc - ne30np4.pg2 is Spectral Elem 1-deg grid w/ 2x2 FV physics grid per element: - - - - - - 194402 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4_gx1v6.120406.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4_gx1v6.121113.nc - ne60np4 is Spectral Elem 1/2-deg grid: - - - 21600 - 1 - DUMMY - DUMMY - ne60np4.pg1 is Spectral Elem 1/2-deg grid w/ 1x1 FV physics grid per element: - - - 86400 - 1 - DUMMY - DUMMY - ne60np4.pg2 is Spectral Elem 1/2-deg grid w/ 2x2 FV physics grid per element: - - - - - - 777602 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v6.110502.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_oRRS18to6.160831.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_oRRS18to6v3.170111.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_oRRS15to5.160207.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v6.121113.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_oRRS18to6.160831.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_oRRS18to6v3.170111.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_oRRS15to5.160207.nc - ne120np4 is Spectral Elem 1/4-deg grid: - - - - 345600 - 1 - DUMMY - DUMMY - ne120np4 is Spectral Elem 1/4-deg grid w/ 2x2 FV physics grid - - - - - 3110402 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4_gx1v6.111226.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4_gx1v6.111226.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4_tx0.1v2.170822.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4_tx0.1v2.170822.nc - ne240np4 is Spectral Elem 1/8-deg grid: - - - - - - - 320 - 384 - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v6.090206.nc - gx1v6 is displaced Greenland pole v6 1-deg grid: - - - - 100 - 116 - $DIN_LOC_ROOT/share/domains/domain.ocn.gx3v7.120323.nc - gx3v7 is displaced Greenland pole v7 3-deg grid: - - - - - - 86354 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.mpasgx1.150903.nc - mpasgx1 is a MPAS seaice grid that is roughly 1 degree resolution: - - - - 28574 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.mpas120.121116.nc - mpas120 is a MPAS ocean grid that is roughly 1 degree resolution: - - - - 234988 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oEC60to30.150616.nc - oEC60to30 is a MPAS ocean grid generated with the eddy closure density function that is roughly comparable to the pop 1 degree resolution: - - - - 235160 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oEC60to30v3.161222.nc - oEC60to30v3 is a MPAS ocean grid generated with the eddy closure density function that is roughly comparable to the pop 1 degree resolution: - - - - 236689 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oEC60to30wLI.160830.nc - oEC60to30wLI is a MPAS ocean grid generated with the eddy closure density function with 30 km gridcells at the equator, 60 km at mid-latitudes, and 35 km at high latitudes. It is roughly comparable to the POP 1 degree resolution. Additionally, it has ocean under landice cavities: - - - - 236358 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oEC60to30v3wLI-nomask.180906.nc - oEC60to30v3wLI is a MPAS ocean grid generated with the eddy closure density function with 30 km gridcells at the equator, 60 km at mid-latitudes, and 35 km at high latitudes. It is roughly comparable to the POP 1 degree resolution. Additionally, it has ocean under landice cavities: - - - - 1444565 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oRRS30to10.150722.nc - oRRS30to10 is an MPAS ocean grid with a mesh density function that is roughly proportional to the Rossby radius of deformation, with 30 km gridcells at low and 10 km gridcells at high latitudes: - - - - 1445361 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oRRS30to10v3.171129.nc - oRRS30to10v3 is an MPAS ocean grid with a mesh density function that is roughly proportional to the Rossby radius of deformation, with 30 km gridcells at low and 10 km gridcells at high latitudes: - - - - 1462411 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oRRS30to10wLI.160930.nc - oRRS30to10wLI is an MPAS ocean grid with a mesh density function that is roughly proportional to the Rossby radius of deformation, with 30 km gridcells at low and 10 km gridcells at high latitudes: Additionally, it has ocean under landice cavities: - - - - 1460217 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oRRS30to10v3wLI.171109.nc - oRRS30to10v3wLI is an MPAS ocean grid with a mesh density function that is roughly proportional to the Rossby radius of deformation, with 30 km gridcells at low and 10 km gridcells at high latitudes: Additionally, it has ocean under landice cavities: - - - - 234988 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oEC60to30.150616.nc - oEC60to30_ICG is a MPAS ocean grid with initial conditions, generated with the eddy closure density function with 30 km gridcells at the equator, 60 km at mid-latitudes, and 35 km at high latitudes. It is roughly comparable to the POP 1 degree resolution. This version of initial conditions is spun-up from a G compset run: - - - - 235160 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oEC60to30v3.161222.nc - oEC60to30v3_ICG is a MPAS ocean grid with initial conditions, generated with the eddy closure density function with 30 km gridcells at the equator, 60 km at mid-latitudes, and 35 km at high latitudes. It is roughly comparable to the POP 1 degree resolution. This version of initial conditions is spun-up from a G compset run: - - - - 236358 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_oEC60to30v3wLI-nomask.180906.nc - oEC60to30v3wLI is a MPAS ocean grid generated with the eddy closure density function with 30 km gridcells at the equator, 60 km at mid-latitudes, and 35 km at high latitudes. It is roughly comparable to the POP 1 degree resolution. Additionally, it has ocean under landice cavities: - - - - 3697425 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oRRS18to6.160831.nc - oRRS18to6 is an MPAS ocean grid with a mesh density function that is roughly proportional to the Rossby radius of deformation, with 18 km gridcells at low and 6 km gridcells at high latitudes: - - - - 3693225 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oRRS18to6v3.170111.nc - oRRS18to6v3 is an MPAS ocean grid with a mesh density function that is roughly proportional to the Rossby radius of deformation, with 18 km gridcells at low and 6 km gridcells at high latitudes: - - - - 3693225 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oRRS18to6v3.170111.nc - oRRS18to6v3_ICG is an MPAS ocean grid with a mesh density function that is roughly proportional to the Rossby radius of deformation, with 18 km gridcells at low and 6 km gridcells at high latitudes. This version of initial conditions is spun-up from a G compset run: - - - - 619264 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oARRM60to10.180716.nc - oARRM60to10 is an Arctic-Region-Refined MPAS ocean grid with 30 km gridcells at -90 deg latitude, 60 km gridcells at -40 deg latitude, 30 km gridcells at the equator, and 10 km gridcells at 90 deg latitude; North Atlantic and North Pacific have different resolution between 0 and 60 deg latitudes: - - - - 1208625 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oARRM60to6.180803.nc - oARRM60to6 is an Arctic-Region-Refined MPAS ocean grid with 30 km gridcells at -90 deg latitude, 60 km gridcells at -40 deg latitude, 30 km gridcells at the equator, and 6 km gridcells at 90 deg latitude; North Atlantic and North Pacific have different resolution between 0 and 60 deg latitudes: - - - - - - 360 - 180 - rx1 is 1 degree river routing grid (only valid for DROF): - - - - 720 - 360 - r05 is 1/2 degree river routing grid: - - - - 3600 - 1800 - r01 is 1/10 degree river routing grid: - - - - 2880 - 1440 - r0125 is 1/8 degree river routing grid: - - - - 28993 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.mp120v1.111018.nc - mp120v1 is a MPAS ocean grid that is roughly 1 degree resolution: - - - - 139734 - 1 - mp120r10v1 is a MPAS grid: - - - - - - 53100 - 1 - mpas.aisgis20km is a uniform-resolution 20km MALI grid of the Antarctic and Greenland Ice Sheets. It is primarily intended for testing. - - - - 7425 - 1 - mpas.gis20km is a uniform-resolution 20km MALI grid of the Greenland Ice Sheet. It is primarily intended for testing. - - - - 45675 - 1 - mpas.ais20km is a uniform-resolution 20km MALI grid of the Antarctic Ice Sheet. It is primarily intended for testing. - - - - - - 90 - 50 - $DIN_LOC_ROOT/share/domains/domain.lnd.ww3a_ww3a.120222.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ww3a_ww3a.120222.nc - WW3 90 x 50 global grid - - - - - - 92558 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.armx8v3_gx1v6.140517.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.armx8v3_gx1v6.140517.nc - 1-deg with 1/8-deg over U.S. (version 3): - - - - 78788 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.enax4v1_gx1v6.170523.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.enax4v1_oRRS18to6.170621.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.enax4v1_gx1v6.170523.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.enax4v1_oRRS18to6.170621.nc - 1-deg with 1/4-deg over Eastern North Atlantic (version 1): - - - - 81434 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.twpx4v1_oRRS18to6v3.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.twpx4v1_oRRS18to6v3.170629.nc - 1-deg with 1/4-deg over Tropical West Pacific (version 1): - - - - 89147 - 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.conusx4v1_tx0.1v2.161129.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.conusx4v1_tx0.1v2.161129.nc - 1-deg with 1/4-deg over CONUS (version 1): - - - - 3600 - 2400 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v2.090218.nc - tx0.1v2 is an old mask used for CONUS: - - - - 1791 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oQU480.151209.nc - oQU480 is an MPAS ocean mesh with quasi-uniform 480 km grid cells, nominally 4 degree resolution: - - - - 7153 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oQU240.151209.nc - oQU240 is an MPAS ocean mesh with quasi-uniform 240 km grid cells, nominally 2 degree resolution: - - - - 7268 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oQU240wLI.160929.nc - oQU240wLI is an MPAS ocean mesh with quasi-uniform 240 km grid cells, nominally 2 degree resolution. Additionally, it has ocean under landice cavities.: - - - - 28571 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oQU120.160401.nc - oQU120 is an MPAS ocean mesh with quasi-uniform 120 km grid cells, nominally 1 degree resolution: - - - - 5778136 - 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oRRS15to5.160207.nc - oRRS15to5 is an MPAS ocean grid with a mesh density function that is roughly proportional to the Rossby radius of deformation, with 15 km gridcells at low and 5 km gridcells at high latitudes: - - - - - - - - - ATM2OCN_FMAPNAME - ATM2OCN_SMAPNAME - ATM2OCN_VMAPNAME - OCN2ATM_FMAPNAME - OCN2ATM_SMAPNAME - ATM2LND_FMAPNAME - ATM2LND_SMAPNAME - LND2ATM_FMAPNAME - LND2ATM_SMAPNAME - ATM2WAV_SMAPNAME - OCN2WAV_SMAPNAME - ICE2WAV_SMAPNAME - - ROF2OCN_LIQ_RMAPNAME - ROF2OCN_ICE_RMAPNAME - LND2ROF_FMAPNAME - ROF2LND_FMAPNAME - - - - - - - - - - - - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_aave_da_100423.nc - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_bilin_da_100423.nc - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_bilin_da_100423.nc - cpl/cpl6/map_gx1v6_to_fv0.23x0.31_aave_da_100423.nc - cpl/cpl6/map_gx1v6_to_fv0.23x0.31_aave_da_100423.nc - - - - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_aave_da_090407.nc - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_patch_090401.nc - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_patch_090401.nc - cpl/cpl6/map_gx1v6_to_fv0.47x0.63_aave_da_090407.nc - cpl/cpl6/map_gx1v6_to_fv0.47x0.63_aave_da_090407.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_aave.130322.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_blin.130322.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc - - - - cpl/gridmaps/fv0.9x1.25/map_0.9x1.25_TO_mpas120_aave.151109.nc - cpl/gridmaps/fv0.9x1.25/map_0.9x1.25_TO_mpas120_bilin.151109.nc - cpl/gridmaps/fv0.9x1.25/map_0.9x1.25_TO_mpas120_patc.151109.nc - cpl/gridmaps/mpas120/map_mpas120_TO_0.9x1.25_aave.151109.nc - cpl/gridmaps/mpas120/map_mpas120_TO_0.9x1.25_aave.151109.nc - - - - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_mp120v1_to_fv0.9x1.25_aave_da_111004.nc - cpl/cpl6/map_mp120v1_to_fv0.9x1.25_aave_da_111004.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_aave.130322.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_blin.130322.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv1.9x2.5_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv1.9x2.5_aave.130322.nc - - - - cpl/cpl6/map_fv4x5_to_gx3v7_aave_da_091218.nc - cpl/cpl6/map_fv4x5_to_gx3v7_bilin_da_091218.nc - cpl/cpl6/map_fv4x5_to_gx3v7_bilin_da_091218.nc - cpl/cpl6/map_gx3v7_to_fv4x5_aave_da_091218.nc - cpl/cpl6/map_gx3v7_to_fv4x5_aave_da_091218.nc - - - - cpl/gridmaps/ne4np4/ - cpl/gridmaps/ne4np4/ - cpl/gridmaps/ne4np4/ - cpl/gridmaps/gx3v7/ - cpl/gridmaps/gx3v7/ - - - - cpl/gridmaps/ne4np4/map_ne4np4_to_oQU480_aave.180702.nc - cpl/gridmaps/ne4np4/map_ne4np4_to_oQU480_conserve.180702.nc - cpl/gridmaps/ne4np4/map_ne4np4_to_oQU480_conserve.180702.nc - cpl/gridmaps/oQU480/map_oQU480_to_ne4np4_aave.180702.nc - cpl/gridmaps/oQU480/map_oQU480_to_ne4np4_aave.180702.nc - - - - cpl/gridmaps/ne4np4/map_ne4np4_to_oQU240_aave.160614.nc - cpl/gridmaps/ne4np4/map_ne4np4_to_oQU240_aave.160614.nc - cpl/gridmaps/ne4np4/map_ne4np4_to_oQU240_aave.160614.nc - cpl/gridmaps/oQU240/map_oQU240_to_ne4np4_aave.160614.nc - cpl/gridmaps/oQU240/map_oQU240_to_ne4np4_aave.160614.nc - - - - cpl/gridmaps/ne11np4/ - cpl/gridmaps/ne11np4/ - cpl/gridmaps/ne11np4/ - cpl/gridmaps/gx3v7/ - cpl/gridmaps/gx3v7/ - - - - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx3v7_aave.120406.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx3v7_aave.120406.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx3v7_aave.120406.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_ne16np4_aave.120406.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_ne16np4_aave.120406.nc - - - - cpl/gridmaps/ne11np4/map_ne11np4_to_oQU240_aave.160614.nc - cpl/gridmaps/ne11np4/map_ne11np4_to_oQU240_aave.160614.nc - cpl/gridmaps/ne11np4/map_ne11np4_to_oQU240_aave.160614.nc - cpl/gridmaps/oQU240/map_oQU240_to_ne11np4_aave.160614.nc - cpl/gridmaps/oQU240/map_oQU240_to_ne11np4_aave.160614.nc - - - - cpl/cpl6/map_ne30np4_to_gx1v6_aave_110121.nc - cpl/cpl6/map_ne30np4_to_gx1v6_native_110328.nc - cpl/cpl6/map_ne30np4_to_gx1v6_native_110328.nc - cpl/cpl6/map_gx1v6_to_ne30np4_aave_110121.nc - cpl/cpl6/map_gx1v6_to_ne30np4_aave_110121.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_oEC60to30_aave.151207.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30_conserve_151207.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30_conserve_151207.nc - cpl/gridmaps/oEC60to30/map_oEC60to30_TO_ne30np4_aave.151207.nc - cpl/gridmaps/oEC60to30/map_oEC60to30_TO_ne30np4_aave.151207.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_aave.161222.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_conserve.161222.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_conserve.161222.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_ne30np4_aave.161222.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_ne30np4_aave.161222.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_aave.161222.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_conserve.161222.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_conserve.161222.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_ne30np4_aave.161222.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_ne30np4_aave.161222.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30wLI_mask_aave.160915.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30wLI_mask_aave.160915.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30wLI_nomask_aave.160915.nc - cpl/gridmaps/oEC60to30wLI/map_oEC60to30wLI_mask_to_ne30np4_aave.160915.nc - cpl/gridmaps/oEC60to30wLI/map_oEC60to30wLI_mask_to_ne30np4_aave.160915.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3wLI_mask_aave.170802.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3wLI_mask_conserve.170802.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3wLI_nomask_bilin.170802.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_nomask_to_ne30np4_aave.180906.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_nomask_to_ne30np4_aave.180906.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3wLI_mask_aave.170802.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3wLI_mask_conserve.170802.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3wLI_nomask_bilin.170802.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_nomask_to_ne30np4_aave.180906.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_nomask_to_ne30np4_aave.180906.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10wLI_mask_aave.160930.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10wLI_mask_aave.160930.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10wLI_nomask_aave.160930.nc - cpl/gridmaps/oRRS30to10wLI/map_oRRS30to10wLI_mask_to_ne30np4_aave.160930.nc - cpl/gridmaps/oRRS30to10wLI/map_oRRS30to10wLI_mask_to_ne30np4_aave.160930.nc - - - - cpl/cpl6/map_ne30np4_TO_MPASO_QU120km_aave.151110.nc - cpl/cpl6/map_ne30np4_TO_MPASO_QU120km_bilin.151110.nc - cpl/cpl6/map_ne30np4_TO_MPASO_QU120km_bilin.151110.nc - cpl/cpl6/map_MPASO_QU120km_TO_ne30np4_aave.151110.nc - cpl/cpl6/map_MPASO_QU120km_TO_ne30np4_aave.151110.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/ne30np4/map_ne30np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne30np4_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne30np4_aave.120712.nc - - - - cpl/cpl6/map_ne30np4_to_fv1.9x2.5_aave_da_091230.nc - cpl/cpl6/map_ne30np4_to_fv1.9x2.5_aave_da_091230.nc - cpl/cpl6/map_fv1.9x2.5_to_ne30np4_aave_da_091230.nc - cpl/cpl6/map_fv1.9x2.5_to_ne30np4_aave_da_091230.nc - - - - cpl/gridmaps/ne60np4/map_ne60np4_TO_gx1v6_aave.120406.nc - cpl/gridmaps/ne60np4/map_ne60np4_TO_gx1v6_blin.120406.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ne60np4_aave.120406.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ne60np4_aave.120406.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_bilin_110428.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_bilin_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne120np4_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne120np4_aave_110428.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/ne120np4/map_ne120np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne120np4_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne120np4_aave.120712.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_to_fv0.23x0.31_aave_110331.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_fv0.23x0.31_aave_110331.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne120np4_aave_110331.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne120np4_aave_110331.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6v3_aave.170111.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6v3_conserve.170111.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6v3_conserve.170111.nc - cpl/gridmaps/oRRS18to6v3/map_oRRS18to6v3_to_ne120np4_aave.170111.nc - cpl/gridmaps/oRRS18to6v3/map_oRRS18to6v3_to_ne120np4_aave.170111.nc - - - - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne240np4_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne240np4_aave_110428.nc - - - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_aave_110419.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_native_120327.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_native_120327.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne240np4_aave_110419.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne240np4_aave_110419.nc - - - - cpl/gridmaps/ne240np4/map_ne240np4_to_fv0.23x0.31_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_fv0.23x0.31_aave_110428.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne240np4_aave_110428.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne240np4_aave_110428.nc - - - - cpl/cpl6/map_enax4v1_TO_ne30np4_aave.170517.nc - cpl/cpl6/map_enax4v1_TO_ne30np4_aave.170517.nc - cpl/cpl6/map_ne30np4_TO_enax4v1_aave.170517.nc - cpl/cpl6/map_ne30np4_TO_enax4v1_aave.170517.nc - - - - cpl/cpl6/map_enax4v1_TO_gx1v6_aave.170523.nc - cpl/cpl6/map_enax4v1_TO_gx1v6_blin.170523.nc - cpl/cpl6/map_enax4v1_TO_gx1v6_patc.170523.nc - cpl/cpl6/map_gx1v6_TO_enax4v1_aave.170523.nc - cpl/cpl6/map_gx1v6_TO_enax4v1_aave.170523.nc - - - - cpl/cpl6/map_enax4v1_TO_oRRS18to6_aave.170620.nc - cpl/cpl6/map_enax4v1_TO_oRRS18to6_blin.170620.nc - cpl/cpl6/map_enax4v1_TO_oRRS18to6_patc.170620.nc - cpl/cpl6/map_oRRS18to6_TO_enax4v1_aave.170620.nc - cpl/cpl6/map_oRRS18to6_TO_enax4v1_aave.170620.nc - - - - cpl/gridmaps/T62/map_T62_TO_gx3v7_aave.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx3v7_blin.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx3v7_patc.130322.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_T62_aave.130322.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_T62_aave.130322.nc - - - - cpl/gridmaps/T62/map_T62_TO_gx1v6_aave.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx1v6_blin.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_T62_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_T62_aave.130322.nc - - - - cpl/gridmaps/T62/map_T62_TO_mpas120_aave.121116.nc - cpl/gridmaps/T62/map_T62_TO_mpas120_aave.121116.nc - cpl/gridmaps/T62/map_T62_TO_mpas120_aave.121116.nc - cpl/gridmaps/mpas120/map_mpas120_TO_T62_aave.121116.nc - cpl/gridmaps/mpas120/map_mpas120_TO_T62_aave.121116.nc - - - - cpl/gridmaps/T62/map_T62_TO_mpasgx1_aave.150827.nc - cpl/gridmaps/T62/map_T62_TO_mpasgx1_blin.150827.nc - cpl/gridmaps/T62/map_T62_TO_mpasgx1_blin.150827.nc - cpl/gridmaps/mpasgx1/map_mpasgx1_TO_T62_aave.150827.nc - cpl/gridmaps/mpasgx1/map_mpasgx1_TO_T62_aave.150827.nc - - - - cpl/gridmaps/T62/map_T62_TO_oQU480_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU480_patc.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU480_blin.151209.nc - cpl/gridmaps/oQU480/map_oQU480_TO_T62_aave.151209.nc - cpl/gridmaps/oQU480/map_oQU480_TO_T62_aave.151209.nc - - - - cpl/gridmaps/T62/map_T62_TO_oQU240_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU240_patc.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU240_blin.151209.nc - cpl/gridmaps/oQU240/map_oQU240_TO_T62_aave.151209.nc - cpl/gridmaps/oQU240/map_oQU240_TO_T62_aave.151209.nc - - - - cpl/gridmaps/T62/map_T62_TO_oQU240wLI_mask_aave.160929.nc - cpl/gridmaps/T62/map_T62_TO_oQU240wLI_nomask_aave.160929.nc - cpl/gridmaps/T62/map_T62_TO_oQU240wLI_mask_patc.160929.nc - cpl/gridmaps/oQU240wLI/map_oQU240wLI_mask_TO_T62_aave.160929.nc - cpl/gridmaps/oQU240wLI/map_oQU240wLI_mask_TO_T62_aave.160929.nc - - - - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU120_patc.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU120_blin.151209.nc - cpl/gridmaps/oQU120/map_oQU120_TO_T62_aave.151209.nc - cpl/gridmaps/oQU120/map_oQU120_TO_T62_aave.151209.nc - - - - cpl/gridmaps/T62/map_T62_TO_oEC60to30_aave.150615.nc - cpl/gridmaps/T62/map_T62_TO_oEC60to30_bilin.150804.nc - cpl/gridmaps/T62/map_T62_TO_oEC60to30_patc.150804.nc - cpl/gridmaps/oEC60to30/map_oEC60to30_TO_T62_aave.150615.nc - cpl/gridmaps/oEC60to30/map_oEC60to30_TO_T62_aave.150615.nc - - - - cpl/gridmaps/T62/map_T62_TO_oEC60to30v3_aave.161222.nc - cpl/gridmaps/T62/map_T62_TO_oEC60to30v3_blin.161222.nc - cpl/gridmaps/T62/map_T62_TO_oEC60to30v3_patc.161222.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_TO_T62_aave.161222.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_TO_T62_aave.161222.nc - - - - cpl/gridmaps/T62/map_T62_TO_oEC60to30wLI_aave.160830.nc - cpl/gridmaps/T62/map_T62_TO_oEC60to30wLI_nm_aave.160830.nc - cpl/gridmaps/T62/map_T62_TO_oEC60to30wLI_blin.160830.nc - cpl/gridmaps/oEC60to30wLI/map_oEC60to30wLI_TO_T62_aave.160830.nc - cpl/gridmaps/oEC60to30wLI/map_oEC60to30wLI_TO_T62_aave.160830.nc - - - - cpl/gridmaps/T62/map_T62_TO_oEC60to30v3wLI_mask_aave.170328.nc - cpl/gridmaps/T62/map_T62_TO_oEC60to30v3wLI_nomask_blin.170328.nc - cpl/gridmaps/T62/map_T62_TO_oEC60to30v3wLI_mask_patc.170328.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_mask_TO_T62_aave.170328.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_mask_TO_T62_aave.170328.nc - - - - cpl/gridmaps/T62/map_T62_TO_oRRS30to10_aave.150722.nc - cpl/gridmaps/T62/map_T62_TO_oRRS30to10_blin.150722.nc - cpl/gridmaps/T62/map_T62_TO_oRRS30to10_patc.150722.nc - cpl/gridmaps/oRRS30to10/map_oRRS30to10_TO_T62_aave.150722.nc - cpl/gridmaps/oRRS30to10/map_oRRS30to10_TO_T62_aave.150722.nc - - - - cpl/gridmaps/T62/map_T62_TO_oRRS30to10v3_aave.171128.nc - cpl/gridmaps/T62/map_T62_TO_oRRS30to10v3_blin.171128.nc - cpl/gridmaps/T62/map_T62_TO_oRRS30to10v3_patc.171128.nc - cpl/gridmaps/oRRS30to10v3/map_oRRS30to10v3_TO_T62_aave.171128.nc - cpl/gridmaps/oRRS30to10v3/map_oRRS30to10v3_TO_T62_aave.171128.nc - - - - cpl/gridmaps/T62/map_T62_TO_oRRS30to10wLI_mask_aave.160930.nc - cpl/gridmaps/T62/map_T62_TO_oRRS30to10wLI_nomask_aave.160930.nc - cpl/gridmaps/T62/map_T62_TO_oRRS30to10wLI_mask_blin.160930.nc - cpl/gridmaps/oRRS30to10wLI/map_oRRS30to10wLI_mask_TO_T62_aave.160930.nc - cpl/gridmaps/oRRS30to10wLI/map_oRRS30to10wLI_mask_TO_T62_aave.160930.nc - - - - cpl/gridmaps/T62/map_T62_TO_oRRS30to10v3wLI_mask_aave.171109.nc - cpl/gridmaps/T62/map_T62_TO_oRRS30to10v3wLI_nomask_blin.171109.nc - cpl/gridmaps/T62/map_T62_TO_oRRS30to10v3wLI_mask_patc.171109.nc - cpl/gridmaps/oRRS30to10v3wLI/map_oRRS30to10v3wLI_mask_TO_T62_aave.171109.nc - cpl/gridmaps/oRRS30to10v3wLI/map_oRRS30to10v3wLI_mask_TO_T62_aave.171109.nc - - - - cpl/gridmaps/T62/map_T62_to_oRRS18to6_aave.160831.nc - cpl/gridmaps/T62/map_T62_to_oRRS18to6_patch.160831.nc - cpl/gridmaps/T62/map_T62_to_oRRS18to6_bilin.160831.nc - cpl/gridmaps/oRRS18to6/map_oRRS18to6_to_T62_aave.160831.nc - cpl/gridmaps/oRRS18to6/map_oRRS18to6_to_T62_aave.160831.nc - - - - cpl/gridmaps/T62/map_T62_to_oRRS18to6v3_aave.170111.nc - cpl/gridmaps/T62/map_T62_to_oRRS18to6v3_patc.170111.nc - cpl/gridmaps/T62/map_T62_to_oRRS18to6v3_blin.170111.nc - cpl/gridmaps/oRRS18to6v3/map_oRRS18to6v3_to_T62_aave.170111.nc - cpl/gridmaps/oRRS18to6v3/map_oRRS18to6v3_to_T62_aave.170111.nc - - - - cpl/gridmaps/T62/map_T62_TO_oRRS15to5_aave.150722.nc - cpl/gridmaps/T62/map_T62_TO_oRRS15to5_patc.150722.nc - cpl/gridmaps/T62/map_T62_TO_oRRS15to5_blin.150722.nc - cpl/gridmaps/oRRS15to5/map_oRRS15to5_TO_T62_aave.150722.nc - cpl/gridmaps/oRRS15to5/map_oRRS15to5_TO_T62_aave.150722.nc - - - - cpl/gridmaps/T62/map_T62_to_oARRM60to10_aave.180715.nc - cpl/gridmaps/T62/map_T62_to_oARRM60to10_bilin.180715.nc - cpl/gridmaps/T62/map_T62_to_oARRM60to10_patch.180715.nc - cpl/gridmaps/oARRM60to10/map_oARRM60to10_to_T62_aave.180715.nc - cpl/gridmaps/oARRM60to10/map_oARRM60to10_to_T62_aave.180715.nc - - - - cpl/gridmaps/T62/map_T62_to_oARRM60to6_aave.180803.nc - cpl/gridmaps/T62/map_T62_to_oARRM60to6_bilin.180803.nc - cpl/gridmaps/T62/map_T62_to_oARRM60to6_patch.180803.nc - cpl/gridmaps/oARRM60to6/map_oARRM60to6_to_T62_aave.180803.nc - cpl/gridmaps/oARRM60to6/map_oARRM60to6_to_T62_aave.180803.nc - - - - cpl/gridmaps/TL319/map_TL319_to_oEC60to30v3_aave.181203.nc - cpl/gridmaps/TL319/map_TL319_to_oEC60to30v3_bilin.181203.nc - cpl/gridmaps/TL319/map_TL319_to_oEC60to30v3_patch.181203.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_TL319_aave.181203.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_TL319_aave.181203.nc - - - - cpl/gridmaps/TL319/map_TL319_to_oARRM60to10_aave.180904.nc - cpl/gridmaps/TL319/map_TL319_to_oARRM60to10_bilin.180904.nc - cpl/gridmaps/TL319/map_TL319_to_oARRM60to10_patch.180904.nc - cpl/gridmaps/oARRM60to10/map_oARRM60to10_to_TL319_aave.180904.nc - cpl/gridmaps/oARRM60to10/map_oARRM60to10_to_TL319_aave.180904.nc - - - - cpl/gridmaps/TL319/map_TL319_to_oARRM60to6_aave.180904.nc - cpl/gridmaps/TL319/map_TL319_to_oARRM60to6_bilin.180904.nc - cpl/gridmaps/TL319/map_TL319_to_oARRM60to6_patch.180904.nc - cpl/gridmaps/oARRM60to6/map_oARRM60to6_to_TL319_aave.180904.nc - cpl/gridmaps/oARRM60to6/map_oARRM60to6_to_TL319_aave.180904.nc - - - - cpl/cpl6/map_T31_to_gx3v7_aave_da_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - - - - cpl/gridmaps/T85/map_T85_to_gx1v6_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_bilin_110411.nc - - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_bilin_110411.nc - - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - - - - cpl/gridmaps/T341/map_T341_to_fv0.23x0.31_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_fv0.23x0.31_aave_110413.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_T341_aave_110413.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_T341_aave_110413.nc - - - - - - cpl/gridmaps/ww3a/map_ww3a_TO_gx3v7_splice_150428.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_ww3a_splice_150428.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_ww3a_splice_150428.nc - - - - cpl/gridmaps/ww3a/map_ww3a_TO_gx1v6_splice_150428.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ww3a_splice_150428.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ww3a_splice_150428.nc - - - - cpl/gridmaps/T31/map_T31_TO_ww3a_bilin_131104.nc - - - - cpl/gridmaps/T62/map_T62_TO_ww3a_bilin.150617.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_ww3a_bilin_140702.nc - - - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_360x720_nomask_to_0.1x0.1_nomask_aave_da_c130107.nc - lnd/clm2/mappingdata/maps/360x720/map_0.1x0.1_nomask_to_360x720_nomask_aave_da_c130104.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_1.9x2.5_nomask_to_0.1x0.1_nomask_aave_da_c120709.nc - lnd/clm2/mappingdata/maps/1.9x2.5/map_0.1x0.1_nomask_to_1.9x2.5_nomask_aave_da_c120709.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_ne120np4_nomask_to_0.1x0.1_nomask_aave_da_c120711.nc - lnd/clm2/mappingdata/maps/ne120np4/map_0.1x0.1_nomask_to_ne120np4_nomask_aave_da_c120706.nc - - - - lnd/clm2/mappingdata/maps/ne120np4/map_ne120np4_to_0.125_nomask_aave.160613.nc - lnd/clm2/mappingdata/maps/ne120np4/map_0.125_to_ne120np4_nomask_aave.160613.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_ne240np4_nomask_to_0.1x0.1_nomask_aave_da_c120711.nc - lnd/clm2/mappingdata/maps/ne240np4/map_0.1x0.1_nomask_to_ne240np4_nomask_aave_da_c120706.nc - - - - lnd/clm2/mappingdata/maps/0.5x0.5/map_360x720_nomask_to_0.5x0.5_nomask_aave_da_c130103.nc - lnd/clm2/mappingdata/maps/360x720/map_0.5x0.5_nomask_to_360x720_nomask_aave_da_c120830.nc - - - - lnd/clm2/mappingdata/maps/ne4np4/map_ne4np4_TO_r05_aave.160621.nc - lnd/clm2/mappingdata/maps/ne4np4/map_r05_TO_ne4np4_aave.160621.nc - - - - lnd/clm2/mappingdata/maps/ne11np4/map_ne11np4_TO_r05_aave.160621.nc - lnd/clm2/mappingdata/maps/ne11np4/map_r05_TO_ne11np4_aave.160621.nc - - - - lnd/clm2/mappingdata/maps/ne16np4/map_ne16np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne16np4/map_0.5x0.5_nomask_to_ne16np4_nomask_aave_da_c110922.nc - - - - lnd/clm2/mappingdata/maps/ne30np4/map_ne30np4_to_0.5x0.5rtm_aave_da_110320.nc - lnd/clm2/mappingdata/maps/ne30np4/map_0.5x0.5_nomask_to_ne30np4_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/ne60np4/map_ne60np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne60np4/map_0.5x0.5_nomask_to_ne60np4_nomask_aave_da_c110922.nc - - - - lnd/clm2/mappingdata/maps/ne120np4/map_ne120np4_to_0.5x0.5rtm_aave_da_110320.nc - lnd/clm2/mappingdata/maps/ne120np4/map_0.5x0.5_nomask_to_ne120np4_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/ne240np4/map_ne240np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne240np4/map_0.5x0.5_nomask_to_ne240np4_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/0.23x0.31/map_0.23x0.31_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/0.23x0.31/map_0.5x0.5_nomask_to_0.23x0.31_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/0.47x0.63/map_0.47x0.63_nomask_to_0.5x0.5_nomask_aave_da_c120306.nc - lnd/clm2/mappingdata/maps/0.47x0.63/map_0.5x0.5_nomask_to_0.47x0.63_nomask_aave_da_c120306.nc - - - - lnd/clm2/mappingdata/maps/0.9x1.25/map_0.9x1.25_nomask_to_0.5x0.5_nomask_aave_da_c120522.nc - lnd/clm2/mappingdata/maps/0.9x1.25/map_0.5x0.5_nomask_to_0.9x1.25_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/1.9x2.5/map_1.9x2.5_nomask_to_0.5x0.5_nomask_aave_da_c120522.nc - lnd/clm2/mappingdata/maps/1.9x2.5/map_0.5x0.5_nomask_to_1.9x2.5_nomask_aave_da_c120709.nc - - - - lnd/clm2/mappingdata/maps/2.5x3.33/map_2.5x3.33_nomask_to_0.5x0.5_nomask_aave_da_c110823.nc - lnd/clm2/mappingdata/maps/2.5x3.33/map_0.5x0.5_nomask_to_2.5x3.33_nomask_aave_da_c110823.nc - - - - lnd/clm2/mappingdata/maps/10x15/map_10x15_to_0.5x0.5rtm_aave_da_110307.nc - lnd/clm2/mappingdata/maps/10x15/map_0.5x0.5_nomask_to_10x15_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/4x5/map_4x5_nomask_to_0.5x0.5_nomask_aave_da_c110822.nc - lnd/clm2/mappingdata/maps/4x5/map_0.5x0.5_nomask_to_4x5_nomask_aave_da_c110822.nc - - - - lnd/clm2/mappingdata/maps/512x1024/map_512x1024_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/512x1024/map_0.5x0.5_nomask_to_512x1024_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/128x256/map_128x256_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/128x256/map_0.5x0.5_nomask_to_128x256_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/48x96/map_48x96_nomask_to_0.5x0.5_nomask_aave_da_c110822.nc - lnd/clm2/mappingdata/maps/48x96/map_0.5x0.5_nomask_to_48x96_nomask_aave_da_c110822.nc - - - - - - cpl/cpl6/map_r05_TO_g16_aave.120920.nc - - - - cpl/cpl6/map_rx1_to_gx3v7_e1000r500_090903.nc - cpl/cpl6/map_rx1_to_gx3v7_e1000r500_090903.nc - - - - cpl/cpl6/map_rx1_to_gx1v6_e1000r300_090318.nc - cpl/cpl6/map_rx1_to_gx1v6_e1000r300_090318.nc - - - - cpl/cpl6/map_r05_to_gx3v7_e1000r500_090903.nc - cpl/cpl6/map_r05_to_gx3v7_e1000r500_090903.nc - - - - cpl/cpl6/map_r05_to_gx1v6_e1000r300_090226.nc - cpl/cpl6/map_r05_to_gx1v6_e1000r300_090226.nc - - - - cpl/cpl6/map_r01_to_gx1v6_120711.nc - cpl/cpl6/map_r01_to_gx1v6_120711.nc - - - - - - cpl/cpl6/map_rx1_to_mpasgx1_nn_150910.nc - cpl/cpl6/map_rx1_to_mpasgx1_nn_150910.nc - - - - cpl/cpl6/map_rx1_to_mpas120_nn_131217.nc - cpl/cpl6/map_rx1_to_mpas120_nn_131217.nc - - - - cpl/cpl6/map_r05_to_QU120km_nn_151110.nc - cpl/cpl6/map_r05_to_QU120km_nn_151110.nc - - - - - - cpl/cpl6/map_rx1_to_oQU480_nn_151209.nc - cpl/cpl6/map_rx1_to_oQU480_nn_151209.nc - - - - cpl/cpl6/map_rx1_to_oQU240_nn.160527.nc - cpl/cpl6/map_rx1_to_oQU240_nn.160527.nc - - - - cpl/cpl6/map_rx1_to_oQU240wLI_nn.160929.nc - cpl/cpl6/map_rx1_to_oQU240wLI_nn.160929.nc - - - - cpl/cpl6/map_rx1_to_oQU120_nn.160527.nc - cpl/cpl6/map_rx1_to_oQU120_nn.160527.nc - - - - cpl/cpl6/map_rx1_to_oEC60to30_nn.160527.nc - cpl/cpl6/map_rx1_to_oEC60to30_nn.160527.nc - - - - cpl/cpl6/map_rx1_to_oEC60to30v3_smoothed.r300e600.161222.nc - cpl/cpl6/map_rx1_to_oEC60to30v3_smoothed.r300e600.161222.nc - - - - cpl/cpl6/map_rx1_to_oEC60to30wLI_nn.160830.nc - cpl/cpl6/map_rx1_to_oEC60to30wLI_nn.160830.nc - - - - cpl/cpl6/map_rx1_to_oEC60to30v3wLI_smoothed.r300e600.180601.nc - cpl/cpl6/map_rx1_to_oEC60to30v3wLI_smoothed.r300e600.180601.nc - - - - cpl/cpl6/map_rx1_to_oRRS30to10_nn.160527.nc - cpl/cpl6/map_rx1_to_oRRS30to10_nn.160527.nc - - - - cpl/cpl6/map_rx1_to_oRRS30to10v3_smoothed.r150e300.171129.nc - cpl/cpl6/map_rx1_to_oRRS30to10v3_smoothed.r150e300.171129.nc - - - - cpl/cpl6/map_rx1_to_oRRS30to10wLI_smoothed.r150e300.160930.nc - cpl/cpl6/map_rx1_to_oRRS30to10wLI_smoothed.r150e300.160930.nc - - - - cpl/cpl6/map_rx1_to_oRRS30to10v3wLI-masked_smoothed.r150e300.180611.nc - cpl/cpl6/map_rx1_to_oRRS30to10v3wLI-masked_smoothed.r150e300.180611.nc - - - - cpl/cpl6/map_rx1_to_oRRS18to6_nn.160830.nc - cpl/cpl6/map_rx1_to_oRRS18to6_nn.160830.nc - - - - cpl/cpl6/map_rx1_to_oRRS18to6v3_smoothed.r100e200.170111.nc - cpl/cpl6/map_rx1_to_oRRS18to6v3_smoothed.r100e200.170111.nc - - - - cpl/cpl6/map_rx1_to_oRRS15to5_nn.160527.nc - cpl/cpl6/map_rx1_to_oRRS15to5_nn.160527.nc - - - - cpl/cpl6/map_rx1_to_oARRM60to10_smoothed.r150e300.180718.nc - cpl/cpl6/map_rx1_to_oARRM60to10_smoothed.r150e300.180718.nc - - - - cpl/cpl6/map_rx1_to_oARRM60to6_smoothed.r150e300.180816.nc - cpl/cpl6/map_rx1_to_oARRM60to6_smoothed.r150e300.180816.nc - - - - cpl/cpl6/map_JRA025_to_oEC60to30v3_smoothed.r150e300.181204.nc - cpl/cpl6/map_JRA025_to_oEC60to30v3_smoothed.r150e300.181204.nc - - - - cpl/cpl6/map_JRA025_to_oARRM60to10_smoothed.r150e300.181219.nc - cpl/cpl6/map_JRA025_to_oARRM60to10_smoothed.r150e300.181219.nc - - - - cpl/cpl6/map_JRA025_to_oARRM60to6_smoothed.r150e300.181220.nc - cpl/cpl6/map_JRA025_to_oARRM60to6_smoothed.r150e300.181220.nc - - - - cpl/cpl6/map_r05_to_oQU480_nn.180702.nc - cpl/cpl6/map_r05_to_oQU480_nn.180702.nc - - - - cpl/cpl6/map_r05_to_oQU240_nn.160714.nc - cpl/cpl6/map_r05_to_oQU240_nn.160714.nc - - - - cpl/cpl6/map_r05_to_oQU240wLI_nn.160929.nc - cpl/cpl6/map_r05_to_oQU240wLI_nn.160929.nc - - - - cpl/cpl6/map_r05_to_oQU120_nn.160718.nc - cpl/cpl6/map_r05_to_oQU120_nn.160718.nc - - - - cpl/cpl6/map_r05_to_oEC60to30_smoothed.r175e350.160718.nc - cpl/cpl6/map_r05_to_oEC60to30_smoothed.r175e350.160718.nc - - - - cpl/cpl6/map_r05_to_oEC60to30_smoothed.r175e350.160718.nc - cpl/cpl6/map_r05_to_oEC60to30_smoothed.r175e350.160718.nc - - - - cpl/cpl6/map_r05_to_oEC60to30v3_smoothed.r300e600.161222.nc - cpl/cpl6/map_r05_to_oEC60to30v3_smoothed.r300e600.161222.nc - - - - cpl/cpl6/map_r05_to_oEC60to30v3_smoothed.r300e600.161222.nc - cpl/cpl6/map_r05_to_oEC60to30v3_smoothed.r300e600.161222.nc - - - - cpl/cpl6/map_r05_to_oEC60to30wLI_smoothed.r300e600.160926.nc - cpl/cpl6/map_r05_to_oEC60to30wLI_smoothed.r300e600.160926.nc - - - - cpl/cpl6/map_r05_to_oEC60to30v3wLI_smoothed.r300e600.180611.nc - cpl/cpl6/map_r05_to_oEC60to30v3wLI_smoothed.r300e600.180611.nc - - - - cpl/cpl6/map_r05_to_oEC60to30v3wLI_smoothed.r300e600.180611.nc - cpl/cpl6/map_r05_to_oEC60to30v3wLI_smoothed.r300e600.180611.nc - - - - cpl/cpl6/map_r05_to_oRRS30to10_nn.160718.nc - cpl/cpl6/map_r05_to_oRRS30to10_nn.160718.nc - - - - cpl/cpl6/map_r05_to_oRRS30to10wLI_nn.160930.nc - cpl/cpl6/map_r05_to_oRRS30to10wLI_nn.160930.nc - - - - cpl/cpl6/map_r05_to_oRRS30to10v3_smoothed.r150e300.171109.nc - cpl/cpl6/map_r05_to_oRRS30to10v3_smoothed.r150e300.171109.nc - - - - cpl/cpl6/map_r05_to_oRRS30to10v3wLI_smoothed.r150e300.171109.nc - cpl/cpl6/map_r05_to_oRRS30to10v3wLI_smoothed.r150e300.171109.nc - - - - cpl/cpl6/map_r05_to_oRRS15to5_nn.160203.nc - cpl/cpl6/map_r05_to_oRRS15to5_nn.160203.nc - - - - cpl/cpl6/map_r0125_to_oRRS18to6_nn.160831.nc - cpl/cpl6/map_r0125_to_oRRS18to6_nn.160831.nc - - - - cpl/cpl6/map_r0125_to_oRRS18to6v3_smoothed.r50e100.170111.nc - cpl/cpl6/map_r0125_to_oRRS18to6v3_smoothed.r50e100.170111.nc - - - - cpl/cpl6/map_r0125_to_oRRS18to6v3_smoothed.r50e100.170111.nc - cpl/cpl6/map_r0125_to_oRRS18to6v3_smoothed.r50e100.170111.nc - - - - cpl/cpl6/map_r0125_to_oRRS15to5_nn.160720.nc - cpl/cpl6/map_r0125_to_oRRS15to5_nn.160720.nc - - - - - ACTIVE - - - - - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_mpas.gis20km_aave.150922.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_mpas.gis20km_bilin.150922.nc - cpl/gridmaps/mpas.gis20km/map_mpas.gis20km_TO_fv0.9x1.25_aave.150922.nc - cpl/gridmaps/mpas.gis20km/map_mpas.gis20km_TO_fv0.9x1.25_aave.150922.nc - - - - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_mpas.aisgis20km_mono.20190805.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_mpas.aisgis20km_intbilin.20190805.nc - cpl/gridmaps/mpas.aisgis20km/map_mpas.aisgis20km_to_ne30np4_monotr.20190805.nc - cpl/gridmaps/mpas.aisgis20km/map_mpas.aisgis20km_to_ne30np4_mono.20190805.nc - - - - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_mpas.aisgis20km_aave.190403.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_mpas.aisgis20km_bilin.190403.nc - cpl/gridmaps/mpas.aisgis20km/map_mpas.aisgis20km_to_oEC60to30v3_aave.190403.nc - cpl/gridmaps/mpas.aisgis20km/map_mpas.aisgis20km_to_oEC60to30v3_bilin.190403.nc - - - - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_to_mpas.aisgis20km_aave.190713.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_to_mpas.aisgis20km_bilin.190713.nc - cpl/gridmaps/mpas.aisgis20km/map_mpas.aisgis20km_to_oEC60to30v3wLI_aave.190713.nc - cpl/gridmaps/mpas.aisgis20km/map_mpas.aisgis20km_to_oEC60to30v3wLI_bilin.190713.nc - - - - - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_mpas.gis20km_mono.171002.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_mpas.gis20km_intbilin.171002.nc - cpl/gridmaps/mpas.gis20km/map_mpas.gis20km_to_ne30np4_mono.171002.nc - cpl/gridmaps/mpas.gis20km/map_mpas.gis20km_to_ne30np4_mono.171002.nc - - - - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_mpas.gis20km_aave.181115.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_mpas.gis20km_bilin.181115.nc - cpl/gridmaps/mpas.gis20km/map_mpas.gis20km_to_oEC60to30v3_aave.181115.nc - cpl/gridmaps/mpas.gis20km/map_mpas.gis20km_to_oEC60to30v3_aave.181115.nc - - - - - - - - cpl/gridmaps/fv0.9x1.25/map_f09_TO_ais20km_aave.151005.nc - cpl/gridmaps/fv0.9x1.25/map_f09_TO_ais20km_bilin.151005.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_TO_f09_aave.151005.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_TO_f09_aave.151005.nc - - - - - - - - - - - - - - - - - cpl/gridmaps/mpas.gis20km/map_mpas.gis20km_to_mpas120_nnsmooth_150924.nc - cpl/gridmaps/mpas.gis20km/map_mpas.gis20km_to_mpas120_nnsmooth_150924.nc - - - - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oEC60to30_nearestdtos.151005.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oEC60to30_nearestdtos.151005.nc - cpl/gridmaps/oEC60to30/map_oEC60to30_to_ais20km_nearestdtos.151005.nc - cpl/gridmaps/oEC60to30/map_oEC60to30_to_ais20km_nearestdtos.151005.nc - - - - cpl/gridmaps/ne16np4/map_ne16np4_to_oQU240_aave.151209.nc - cpl/gridmaps/ne16np4/map_ne16np4_to_oQU240_conserve.151209.nc - cpl/gridmaps/ne16np4/map_ne16np4_to_oQU240_conserve.151209.nc - cpl/gridmaps/oQU240/map_oQU240_to_ne16np4_aave.151209.nc - cpl/gridmaps/oQU240/map_oQU240_to_ne16np4_aave.151209.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oQU120_aave.160322.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oQU120_conserve.160322.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oQU120_conserve.160322.nc - cpl/gridmaps/oQU120/map_oQU120_to_ne30np4_aave.160322.nc - cpl/gridmaps/oQU120/map_oQU120_to_ne30np4_aave.160322.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_oEC60to30_aave.151207.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30_conserve_151207.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30_conserve_151207.nc - cpl/gridmaps/oEC60to30/map_oEC60to30_TO_ne30np4_aave.151207.nc - cpl/gridmaps/oEC60to30/map_oEC60to30_TO_ne30np4_aave.151207.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_aave.161222.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_conserve_161222.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oEC60to30v3_conserve_161222.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_ne30np4_aave.161222.nc - cpl/gridmaps/oEC60to30v3/map_oEC60to30v3_to_ne30np4_aave.161222.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10_aave.160419.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10_aave.160419.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10_aave.160419.nc - cpl/gridmaps/oRRS30to10/map_oRRS30to10_to_ne30np4_aave.160419.nc - cpl/gridmaps/oRRS30to10/map_oRRS30to10_to_ne30np4_aave.160419.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10wLI_mask_aave.160930.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10wLI_mask_aave.160930.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10wLI_nomask_aave.160930.nc - cpl/gridmaps/oRRS30to10wLI/map_oRRS30to10wLI_mask_to_ne30np4_aave.160930.nc - cpl/gridmaps/oRRS30to10wLI/map_oRRS30to10wLI_mask_to_ne30np4_aave.160930.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10v3_aave.171218.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10v3_conserve.171218.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10v3_conserve.171218.nc - cpl/gridmaps/oRRS30to10v3/map_oRRS30to10v3_to_ne30np4_aave.171218.nc - cpl/gridmaps/oRRS30to10v3/map_oRRS30to10v3_to_ne30np4_aave.171218.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10v3wLI_mask_aave.171218.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10v3wLI_mask_conserve.171218.nc - cpl/gridmaps/ne30np4/map_ne30np4_to_oRRS30to10v3wLI_nomask_conserve.171218.nc - cpl/gridmaps/oRRS30to10v3wLI/map_oRRS30to10v3wLI_mask_to_ne30np4_aave.171218.nc - cpl/gridmaps/oRRS30to10v3wLI/map_oRRS30to10v3wLI_mask_to_ne30np4_aave.171218.nc - - - - cpl/cpl6/map_ne30np4_to_fv1.9x2.5_aave_da_091230.nc - cpl/cpl6/map_fv1.9x2.5_to_ne30np4_aave_da_091230.nc - cpl/cpl6/map_ne30np4_to_fv1.9x2.5_aave_da_091230.nc - cpl/cpl6/map_fv1.9x2.5_to_ne30np4_aave_da_091230.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6_aave.160831.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6_bilin.160831.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6_bilin.160831.nc - cpl/gridmaps/oRRS18to6/map_oRRS18to6_to_ne120np4_aave.160831.nc - cpl/gridmaps/oRRS18to6/map_oRRS18to6_to_ne120np4_aave.160831.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6v3_aave.170111.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6v3_conserve.170111.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS18to6v3_conserve.170111.nc - cpl/gridmaps/oRRS18to6v3/map_oRRS18to6v3_to_ne120np4_aave.170111.nc - cpl/gridmaps/oRRS18to6v3/map_oRRS18to6v3_to_ne120np4_aave.170111.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS15to5_aave.160203.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS15to5_bilin.160428.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_oRRS15to5_bilin.160428.nc - cpl/gridmaps/oRRS15to5/map_oRRS15to5_to_ne120np4_aave.160203.nc - cpl/gridmaps/oRRS15to5/map_oRRS15to5_to_ne120np4_aave.160203.nc - - - - cpl/cpl6/map_T31_to_gx3v7_aave_da_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - - - - cpl/gridmaps/T85/map_T85_to_gx1v6_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - - - - lnd/clm2/mappingdata/maps/512x1024/map_512x1024_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/512x1024/map_0.5x0.5_nomask_to_512x1024_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/128x256/map_128x256_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/128x256/map_0.5x0.5_nomask_to_128x256_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/48x96/map_48x96_nomask_to_0.5x0.5_nomask_aave_da_c110822.nc - lnd/clm2/mappingdata/maps/48x96/map_0.5x0.5_nomask_to_48x96_nomask_aave_da_c110822.nc - - - - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU240_aave.151209.nc - cpl/gridmaps/oQU240/map_oQU240_to_ais20km_aave.151209.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU240_nearestdtos.151209.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU240_nearestdtos.151209.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU240_nearestdtos.151209.nc - cpl/gridmaps/oQU240/map_oQU240_to_ais20km_nearestdtos.151209.nc - - - - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU120_aave.160331.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU120_nearestdtos.160331.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU120_aave.160331.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU120_nearestdtos.160331.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU120_nearestdtos.160331.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oQU120_nearestdtos.160331.nc - cpl/gridmaps/oQU120/map_oQU120_to_ais20km_aave.160331.nc - cpl/gridmaps/oQU120/map_oQU120_to_ais20km_neareststod.160331.nc - - - - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oEC60to30v3wLI_nomask_aave.190207.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oEC60to30v3wLI_nomask_nearestdtos.190207.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oEC60to30v3wLI_nomask_aave.190207.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oEC60to30v3wLI_nomask_nearestdtos.190207.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oEC60to30v3wLI_nomask_nearestdtos.190207.nc - cpl/gridmaps/mpas.ais20km/map_ais20km_to_oEC60to30v3wLI_nomask_nearestdtos.190207.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_nomask_to_ais20km_aave.190207.nc - cpl/gridmaps/oEC60to30v3wLI/map_oEC60to30v3wLI_nomask_to_ais20km_neareststod.190207.nc - - - - - diff --git a/config/e3sm/config_inputdata.xml b/config/e3sm/config_inputdata.xml deleted file mode 100644 index 254830b7d68..00000000000 --- a/config/e3sm/config_inputdata.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - wget -
https://web.lcrc.anl.gov/public/e3sm/inputdata/
-
- - svn -
https://svn-ccsm-inputdata.cgd.ucar.edu/trunk/inputdata
-
- -
diff --git a/config/e3sm/machines/Depends.cray b/config/e3sm/machines/Depends.cray deleted file mode 100644 index 021e39b8004..00000000000 --- a/config/e3sm/machines/Depends.cray +++ /dev/null @@ -1,5 +0,0 @@ -NOOPTOBJS= ice_boundary.o dyn_comp.o - -$(NOOPTOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< - diff --git a/config/e3sm/machines/Depends.cray.cmake b/config/e3sm/machines/Depends.cray.cmake deleted file mode 100644 index 53e5823fa38..00000000000 --- a/config/e3sm/machines/Depends.cray.cmake +++ /dev/null @@ -1 +0,0 @@ -list(NOOPT_FILES APPEND cice/src/mpi/ice_boundary.F90 cice/src/serial/ice_boundary.F90) diff --git a/config/e3sm/machines/Depends.gnu b/config/e3sm/machines/Depends.gnu deleted file mode 100644 index 2d53247217e..00000000000 --- a/config/e3sm/machines/Depends.gnu +++ /dev/null @@ -1,2 +0,0 @@ -geopk.o:geopk.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fcray-pointer $< diff --git a/config/e3sm/machines/Depends.gnu.cmake b/config/e3sm/machines/Depends.gnu.cmake deleted file mode 100644 index 627b9085d8d..00000000000 --- a/config/e3sm/machines/Depends.gnu.cmake +++ /dev/null @@ -1 +0,0 @@ -set_property(SOURCE cam/src/dynamics/fv/geopk.F90 APPEND_STRING PROPERTY COMPILE_FLAGS " -fcray-pointer ") diff --git a/config/e3sm/machines/Depends.ibm b/config/e3sm/machines/Depends.ibm deleted file mode 100644 index 373f5fad78a..00000000000 --- a/config/e3sm/machines/Depends.ibm +++ /dev/null @@ -1,49 +0,0 @@ -# These routines have problems with stacksize when omp is invoked add -qsmallstack to resolve -SSOBJS = mo_sethet.o mo_drydep.o time_management.o - -$(SSOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -qsmallstack $< - -QSMPFLAGS:= -ifeq ($(compile_threaded), TRUE) - QSMPFLAGS += -qsmp=noauto:noomp -endif -shr_reprosum_mod.o: shr_reprosum_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $(QSMPFLAGS) $< - -# These routines benefit from -qnostrict without violating the bfb test -PERFOBJS=\ -prim_advection_base.o \ -vertremap_base.o \ -edge_mod_base.o \ -derivative_mod_base.o \ -bndry_mod_base.o \ -prim_advance_mod.o \ -uwshcu.o \ -wetdep.o - - -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -qnostrict $< -#Model crashes if these files are compiled with O3(default) optimizations - seasalt_model.o: seasalt_model.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O2 $< - linoz_data.o: linoz_data.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O2 $< -endif - -### These files take long time to compile with default optimization flags. -### Reducing optimization gives <1min build-times and little impact on model run time. -### begin -## atm files taking more than a minute to compile -# this takes 9 mins to compile at default -O3 level -buffer.o: buffer.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O0 $< - -# disable inlining (some issues with pure functions therein) -advance_xm_wpxp_module.o: advance_xm_wpxp_module.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -Q! $< -advance_wp2_wp3_module.o: advance_wp2_wp3_module.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -Q! $< -### end diff --git a/config/e3sm/machines/Depends.ibm.cmake b/config/e3sm/machines/Depends.ibm.cmake deleted file mode 100644 index 5ebefbf91f7..00000000000 --- a/config/e3sm/machines/Depends.ibm.cmake +++ /dev/null @@ -1,56 +0,0 @@ -# These routines have problems with stacksize when omp is invoked add -qsmallstack to resolve -set(SSOBJS - cam/src/chemistry/mozart/mo_sethet.F90 - cam/src/chemistry/mozart/mo_drydep.F90) - -foreach(ITEM IN LISTS SSOBJS) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -qsmallstack ") -endforeach() - -if (compile_threaded) - set_property(SOURCE share/util/shr_reprosum_mod.F90 APPEND_STRING PROPERTY COMPILE_FLAGS " -qsmp=noauto:noomp ") -endif() - -# These routines benefit from -qnostrict without violating the bfb test -set(PERFOBJS - homme/src/share/prim_advection_base.F90 - homme/src/share/vertremap_base.F90 - homme/src/share/edge_mod_base.F90 - homme/src/share/derivative_mod_base.F90 - homme/src/share/bndry_mod_base.F90 - homme/src/theta-l/prim_advance_mod.F90 - homme/src/pese/prim_advance_mod.F90 - homme/src/theta/prim_advance_mod.F90 - homme/src/preqx/share/prim_advance_mod.F90 - cam/src/physics/cam/uwshcu.F90 - cam/src/chemistry/aerosol/wetdep.F90) - -#Model crashes if these files are compiled with O3(default) optimizations -set(REDUCEOPT - cam/src/chemistry/bulk_aero/seasalt_model.F90 - cam/src/chemistry/modal_aero/seasalt_model.F90 - cam/src/chemistry/mozart/linoz_data.F90) - -set(NOINLINE - cam/src/physics/clubb/advance_xm_wpxp_module.F90 - cam/src/physics/clubb/advance_wp2_wp3_module.F90) - -if (NOT DEBUG) - foreach(ITEM IN LISTS PERFOBJS) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -qnostrict ") - endforeach() - - foreach(ITEM IN LISTS REDUCEOPT) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -O2 ") - endforeach() - -endif() - -# These files take long time to compile with default optimization flags. -# Reducing optimization gives <1min build-times and little impact on model run time. -# begin -list(APPEND NOOPT_FILES cam/src/utils/buffer.F90) - -foreach(ITEN IN LISTS NOINLINE) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -Q! ") -endforeach() diff --git a/config/e3sm/machines/Depends.intel b/config/e3sm/machines/Depends.intel deleted file mode 100644 index af5beb5382d..00000000000 --- a/config/e3sm/machines/Depends.intel +++ /dev/null @@ -1,44 +0,0 @@ -# -PERFOBJS=\ -prim_advection_base.o \ -vertremap_base.o \ -edge_mod_base.o \ -derivative_mod_base.o \ -bndry_mod_base.o \ -prim_advance_mod.o \ -viscosity_preqx_base.o \ -viscosity_base.o \ -viscosity_theta.o \ -eos.o \ -hevi_mod.o \ -uwshcu.o - -# shr_wv_sat_mod does not need to have better than ~0.1% precision, and benefits -# enormously from a lower precision in the vector functions. -REDUCED_PRECISION_OBJS=\ -shr_wv_sat_mod.o - -SHR_RANDNUM_FORT_OBJS=\ -kissvec_mod.o \ -mersennetwister_mod.o \ -dSFMT_interface.o \ -shr_RandNum_mod.o - -SHR_RANDNUM_C_OBJS=\ -dSFMT.o \ -dSFMT_utils.o \ -kissvec.o - -# Note: FFLAGS contains flags such as -fp-model consistent (and -fimf-use-svml for intel version 18) -# The -fp-model fast flags below will effectively override other -fp-model settings. - -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fp-model fast -no-prec-div $< - $(REDUCED_PRECISION_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fimf-precision=low -fp-model fast $< - $(SHR_RANDNUM_FORT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fp-model fast -no-prec-div -no-prec-sqrt -qoverride-limits $< - $(SHR_RANDNUM_C_OBJS): %.o: %.c - $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) -O3 -fp-model fast $< -endif diff --git a/config/e3sm/machines/Depends.intel.cmake b/config/e3sm/machines/Depends.intel.cmake deleted file mode 100644 index a4f86e63fe3..00000000000 --- a/config/e3sm/machines/Depends.intel.cmake +++ /dev/null @@ -1,50 +0,0 @@ -set(PERFOBJS - homme/src/share/prim_advection_base.F90 - homme/src/share/vertremap_base.F90 - homme/src/share/edge_mod_base.F90 - homme/src/share/derivative_mod_base.F90 - homme/src/share/bndry_mod_base.F90 - homme/src/theta-l/prim_advance_mod.F90 - homme/src/pese/prim_advance_mod.F90 - homme/src/theta/prim_advance_mod.F90 - homme/src/preqx/share/prim_advance_mod.F90 - homme/src/preqx/share/viscosity_preqx_base.F90 - homme/src/share/viscosity_base.F90 - homme/src/theta-l/viscosity_theta.F90 - homme/src/theta/viscosity_theta.F90 - homme/src/theta-l/eos.F90 - homme/src/theta/eos.F90 - homme/src/theta/hevi_mod.F90 - cam/src/physics/cam/uwshcu.F90) - -set(REDUCED_PRECISION_OBJS share/util/shr_wv_sat_mod.F90) - -set(SHR_RANDNUM_FORT_OBJS - share/RandNum/src/kissvec/kissvec_mod.F90 - share/RandNum/src/mt19937/mersennetwister_mod.F90 - share/RandNum/src/dsfmt_f03/dSFMT_interface.F90 - share/RandNum/src/shr_RandNum_mod.F90) - -set(SHR_RANDNUM_C_OBJS - share/RandNum/src/dsfmt_f03/dSFMT.c - share/RandNum/src/dsfmt_f03/dSFMT_utils.c - share/RandNum/src/kissvec/kissvec.c) - -if (NOT DEBUG) - foreach(ITEM IN LISTS PERFOBJS) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -O3 -fp-model fast -no-prec-div ") - endforeach() - - foreach(ITEM IN LISTS REDUCED_PRECISION_OBJS) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -fimf-precision=low -fp-model fast ") - endforeach() - - foreach(ITEM IN LISTS SHR_RANDNUM_FORT_OBJS) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -O3 -fp-model fast -no-prec-div -no-prec-sqrt -qoverride-limits ") - endforeach() - - foreach(ITEM IN LISTS SHR_RANDNUM_C_OBJS) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -O3 -fp-model fast ") - endforeach() - -endif() \ No newline at end of file diff --git a/config/e3sm/machines/Depends.nag b/config/e3sm/machines/Depends.nag deleted file mode 100644 index e2816dc488b..00000000000 --- a/config/e3sm/machines/Depends.nag +++ /dev/null @@ -1,4 +0,0 @@ -wrap_mpi.o: wrap_mpi.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< -fft99.o: fft99.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< diff --git a/config/e3sm/machines/Depends.nag.cmake b/config/e3sm/machines/Depends.nag.cmake deleted file mode 100644 index a344c429ebd..00000000000 --- a/config/e3sm/machines/Depends.nag.cmake +++ /dev/null @@ -1 +0,0 @@ -list(NOOPT_FILES APPEND cam/src/control/wrap_mpi.F90 cam/src/utils/fft99.F90) diff --git a/config/e3sm/machines/Depends.summit.cmake b/config/e3sm/machines/Depends.summit.cmake deleted file mode 100644 index 1ec5b4dd5e0..00000000000 --- a/config/e3sm/machines/Depends.summit.cmake +++ /dev/null @@ -1,22 +0,0 @@ -list(APPEND NOOPT_FILES - cam/src/dynamics/eul/dyn_comp.F90 - cam/src/dynamics/fv/dyn_comp.F90 - cam/src/dynamics/se/dyn_comp.F90 - cam/src/dynamics/sld/dyn_comp.F90 - cam/src/physics/cam/microp_aero.F90) - -set(FILES_NEED_CUDA_FLAGS - homme/src/preqx_acc/bndry_mod.F90 - homme/src/preqx_acc/derivative_mod.F90 - homme/src/preqx_acc/edge_mod.F90 - homme/src/share/element_mod.F90 - homme/src/preqx_acc/element_state.F90 - homme/src/preqx_acc/openacc_utils_mod.F90 - homme/src/preqx_acc/prim_advection_mod.F90 - homme/src/share/prim_si_mod.F90 - homme/src/preqx_acc/model_init_mod.F90 - homme/src/preqx_acc/viscosity_mod.F90 - homme/src/preqx_acc/prim_driver_mod.F90 - homme/src/share/prim_driver_base.F90 - homme/src/share/physics_mod.F90 - cam/src/control/physconst.F90) diff --git a/config/e3sm/machines/Depends.summit.pgiacc b/config/e3sm/machines/Depends.summit.pgiacc deleted file mode 100644 index fe74aec4684..00000000000 --- a/config/e3sm/machines/Depends.summit.pgiacc +++ /dev/null @@ -1,56 +0,0 @@ -dyn_comp.o: dyn_comp.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< - -microp_aero.o: microp_aero.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< - - - -bndry_mod.o: bndry_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -derivative_mod.o: derivative_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -edge_mod.o: edge_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -element_mod.o: element_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -element_state.o: element_state.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -openacc_utils_mod.o: openacc_utils_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_advance_mod.o: prim_advance_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_advection_mod.o: prim_advection_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_si_mod.o: prim_si_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -model_init_mod.o: model_init_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -vertremap_mod.o: vertremap_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -viscosity_mod.o: viscosity_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_driver_mod.o: prim_driver_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_driver_base.o: prim_driver_base.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -physics_mod.o: physics_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - -physconst.o: physconst.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel $(FREEFLAGS) $< - diff --git a/config/e3sm/machines/Depends.summit.pgiacc.cmake b/config/e3sm/machines/Depends.summit.pgiacc.cmake deleted file mode 100644 index 890bc438771..00000000000 --- a/config/e3sm/machines/Depends.summit.pgiacc.cmake +++ /dev/null @@ -1,3 +0,0 @@ -foreach(ITEM IN LISTS FILES_NEED_CUDA_FLAGS) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -DUSE_OPENACC=1 -acc -ta=tesla,cc70,pinned -Minfo=accel ") -endforeach() diff --git a/config/e3sm/machines/Depends.summitdev.pgiacc b/config/e3sm/machines/Depends.summitdev.pgiacc deleted file mode 100644 index 791d7ae5dda..00000000000 --- a/config/e3sm/machines/Depends.summitdev.pgiacc +++ /dev/null @@ -1,56 +0,0 @@ -dyn_comp.o: dyn_comp.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< - -microp_aero.o: microp_aero.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< - - - -bndry_mod.o: bndry_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -derivative_mod.o: derivative_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -edge_mod.o: edge_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -element_mod.o: element_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -element_state.o: element_state.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -openacc_utils_mod.o: openacc_utils_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_advance_mod.o: prim_advance_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_advection_mod.o: prim_advection_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_si_mod.o: prim_si_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -model_init_mod.o: model_init_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -vertremap_mod.o: vertremap_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -viscosity_mod.o: viscosity_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_driver_mod.o: prim_driver_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -prim_driver_base.o: prim_driver_base.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -physics_mod.o: physics_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - -physconst.o: physconst.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel $(FREEFLAGS) $< - diff --git a/config/e3sm/machines/Depends.summitdev.pgiacc.cmake b/config/e3sm/machines/Depends.summitdev.pgiacc.cmake deleted file mode 100644 index e9626664baa..00000000000 --- a/config/e3sm/machines/Depends.summitdev.pgiacc.cmake +++ /dev/null @@ -1,3 +0,0 @@ -foreach(ITEM IN LISTS FILES_NEED_CUDA_FLAGS) - set_property(SOURCE ${ITEM} APPEND_STRING PROPERTY COMPILE_FLAGS " -DUSE_OPENACC=1 -acc -ta=tesla,cc60,pinned -Minfo=accel ") -endforeach() diff --git a/config/e3sm/machines/Depends.theta.intel b/config/e3sm/machines/Depends.theta.intel deleted file mode 100644 index d131cb41ad6..00000000000 --- a/config/e3sm/machines/Depends.theta.intel +++ /dev/null @@ -1,44 +0,0 @@ -# -PERFOBJS=\ -prim_advection_base.o \ -vertremap_base.o \ -edge_mod_base.o \ -derivative_mod_base.o \ -bndry_mod_base.o \ -prim_advance_mod.o \ -viscosity_preqx_base.o \ -viscosity_base.o \ -viscosity_theta.o \ -eos.o \ -hevi_mod.o \ -uwshcu.o - -# shr_wv_sat_mod does not need to have better than ~0.1% precision, and benefits -# enormously from a lower precision in the vector functions. -REDUCED_PRECISION_OBJS=\ -shr_wv_sat_mod.o - -SHR_RANDNUM_FORT_OBJS=\ -kissvec_mod.o \ -mersennetwister_mod.o \ -dSFMT_interface.o \ -shr_RandNum_mod.o - -SHR_RANDNUM_C_OBJS=\ -dSFMT.o \ -dSFMT_utils.o \ -kissvec.o - -# Note: FFLAGS contains flags such as -fp-model consistent (and -fimf-use-svml for intel version 18) -# The -fp-model fast flags below will effectively override other -fp-model settings. - -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-prec-div $< - $(REDUCED_PRECISION_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fimf-precision=low -fp-model fast $< - $(SHR_RANDNUM_FORT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fp-model fast -no-prec-div -no-prec-sqrt -qoverride-limits $< - $(SHR_RANDNUM_C_OBJS): %.o: %.c - $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) -O3 -fp-model fast $< -endif diff --git a/config/e3sm/machines/Depends.theta.intel.cmake b/config/e3sm/machines/Depends.theta.intel.cmake deleted file mode 100644 index 12a852fd20d..00000000000 --- a/config/e3sm/machines/Depends.theta.intel.cmake +++ /dev/null @@ -1,9 +0,0 @@ -# Same as generic intel except remove fp-model fast - -if (NOT DEBUG) - foreach(ITEM IN LISTS PERFOBJS) - get_property(ITEM_FLAGS SOURCE ${ITEM} PROPERTY COMPILE_FLAGS) - string(REPLACE "-fp-model fast" "" ITEM_FLAGS "${ITEM_FLAGS}") - set_property(SOURCE ${ITEM} PROPERTY COMPILE_FLAGS "${ITEM_FLAGS}") - endforeach() -endif() diff --git a/config/e3sm/machines/README b/config/e3sm/machines/README deleted file mode 100644 index 341f5fae443..00000000000 --- a/config/e3sm/machines/README +++ /dev/null @@ -1,5 +0,0 @@ -Please refer to the documentation in the config_machines.xml and config_compilers.xml files. - - - - diff --git a/config/e3sm/machines/config_batch.xml b/config/e3sm/machines/config_batch.xml deleted file mode 100644 index a97163dcf34..00000000000 --- a/config/e3sm/machines/config_batch.xml +++ /dev/null @@ -1,572 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - qstat - qsub - qdel - --env - - (\d+) - --dependencies - %H:%M:%s - -M - - - - - - - - - - - - - - qstat - /projects/ccsm/acme/tools/cobalt/dsub - qdel - --env - #COBALT - (\d+) - --dependencies jobid - : - -M - - - - - - - - - - - - - - bjobs - bsub - bkill - -env - #BSUB - <(\d+)> - -w 'done(jobid)' - -w 'ended(jobid)' - && - %H:%M - -u - - , -B -N, -B,-N,-N - - - - - - - -nnodes {{ num_nodes }} - -o {{ output_error_path }}.%J - -e {{ output_error_path }}.%J - -J {{ job_id }} - - - - - qstat - qsub - qdel - -v - #PBS - ^(\S+)$ - -W depend=afterok:jobid - -W depend=afterany:jobid - : - %H:%M:%S - -M - -m - , bea, b, e, a - - - - - - - -N {{ job_id }} - -r {{ rerunnable }} - - -j oe - -V - - - - - showq - msub - canceljob - #MSUB - (\d+)$ - -W depend=afterok:jobid - -W depend=afterany:jobid - : - %H:%M:%S - -M - -m - , bea, b, e, a - - - - - - -N {{ job_id }} - -j oe - -r {{ rerunnable }} - -S {{ shell }} - - - - - - squeue - sbatch - scancel - #SBATCH - (\d+)$ - --dependency=afterok:jobid - --dependency=afterany:jobid - : - %H:%M:%S - --mail-user - --mail-type - none, all, begin, end, fail - - --export=ALL - -p {{ job_queue }} - -J {{ job_id }} - -N {{ num_nodes }} - -n {{ total_tasks }} - -t {{ job_wallclock_time }} - -o {{ job_id }}.out - -e {{ job_id }}.err - -A {{ project }} - - - pbatch - pdebug - - - - - - - squeue - sbatch - scancel - #SBATCH - (\d+)$ - --dependency=afterok:jobid - --dependency=afterany:jobid - : - %H:%M:%S - --mail-user - --mail-type - none, all, begin, end, fail - - - - - - - --job-name={{ job_id }} - --nodes={{ num_nodes }} - --output={{ job_id }}.%j - --exclusive - - - - - squeue - sbatch - scancel - #SBATCH - (\d+)$ - --dependency=afterok:jobid - --dependency=afterany:jobid - : - %H:%M:%S - --mail-user - --mail-type - none, all, begin, end, fail - - - - - - - --job-name={{ job_id }} - --nodes={{ num_nodes }} - --output={{ job_id }}.%j - --exclusive - - - - - - -A {{ PROJECT }} - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - - - shared - batch - - - - - - acme-centos6 - - - - - - debug - bdw - knl - - - - - - -A {{ project }} - -l nodes={{ num_nodes }} - - - batch - - - - - - debug - regular - - - - - - --constraint=haswell - - - debug - regular - - - - - - --constraint=knl,quad,cache - - - debug - regular - - - - - - -n {{ total_tasks }} - - - skx-dev - skx-large - skx-normal - - - - - - default - - - - - - default - - - - - - debug-cache-quad - default - - - - - qsub - - skylake_8180 - - - - - - --output=slurm.out - --error=slurm.err - - - small - medium - large - - - - - - --output=slurm.out - --error=slurm.err - - - slurm - - - - - - slurm - - - - - - --ntasks-per-node={{ tasks_per_node }} - --output=slurm.out - --error=slurm.err - - - slurm - - - - - - short,batch - batch - - - - - - short,batch - batch - - - - - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - - - - - - --nodes={{ num_nodes }} - --ntasks-per-node={{ tasks_per_node }} - --qos=standard - - - standard - - - - - - --nodes={{ num_nodes }} - --ntasks-per-node={{ tasks_per_node }} - --qos=standard - - - standard - - - - - - mesabi - debug - - - - - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - -q esd13q - - - esd13q - esddbg13q - - - - - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - -W group_list=cades-ccsi - - - batch - - - - - - batch - debug - - - - - - -A {{ project }} - -l nodes={{ num_nodes }} - -env "all" - - - batch - debug - - - - - - -P {{ project }} - - - -alloc_flags smt2 - - - -alloc_flags "gpumps smt2" - - - batch - - - - - - - -P {{ project }} - -alloc_flags gpumps - - - - batch - - - - - - - rhel7G - - - - - - blake - - - - - - --ntasks-per-node={{ tasks_per_node }} - --qos=lr_normal - --account={{ project }} - - - lr2 - - - - - - --ntasks-per-node={{ tasks_per_node }} - --qos=lr_normal - --account={{ project }} - - - lr3 - - - - - - --ntasks-per-node={{ tasks_per_node }} - --qos=condo_esd2 - - - lr6 - - - - diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml deleted file mode 100644 index c15bf6390a5..00000000000 --- a/config/e3sm/machines/config_compilers.xml +++ /dev/null @@ -1,2201 +0,0 @@ - - - - - - FALSE - - - - - -h noomp - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY - -DDIR=NOOP - - - -s real64 - - - -O2 -f free -N 255 -h byteswapio -em - -h noomp - -g -trapuv -Wuninitialized - - - -O0 - - TRUE - - -Wl,--allow-multiple-definition -h byteswapio - -h noomp - - - - - - -mcmodel=medium - -fopenmp - -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow - -O - -std=c99 - - - -D CISM_GNU=ON - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU - - FORTRAN - - -fdefault-real-8 - - - - -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -fopenmp - -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow - -O - - - -O0 - - - -ffixed-form - - - -ffree-form - - FALSE - - -fopenmp - - mpicc - mpicxx - mpif90 - gcc - g++ - gfortran - TRUE - - - - - -mcmodel=medium - -fopenmp - -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow - -O - - - -D CISM_GNU=ON - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU - - FORTRAN - - -fdefault-real-8 - - - - -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -fopenmp - -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow - -O - - - -O0 - - - -ffixed-form - - - -ffree-form - - FALSE - - -fopenmp - - mpicc - mpicxx - mpif90 - gcc - g++ - gfortran - TRUE - - - - - -g -qfullpath -qmaxmem=-1 -qphsinfo - -O3 - -qsmp=omp -qsuppress=1520-045 - -qsmp=omp:noopt -qsuppress=1520-045 - - - - -DFORTRAN_SAME -DCPRIBM - - -WF,-D - - -qrealsize=8 - - - -g -qfullpath -qmaxmem=-1 -qphsinfo - -O2 -qstrict -Q - -qsmp=omp -qsuppress=1520-045 - -qsmp=omp:noopt -qsuppress=1520-045 - -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en - - - -O0 - - - -qsuffix=f=f -qfixed=132 - - - -qsuffix=f=f90:cpp=F90 - - TRUE - - - - - -O2 -fp-model precise -std=gnu99 - -qopenmp - -O2 -debug minimal - -O0 -g - - - -std=c++11 -fp-model source - -qopenmp - -O0 -g - -O2 - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source - -qopenmp - - -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created - -O2 -debug minimal - - - -O0 - - - -fixed -132 - - - -free - - TRUE - - -qopenmp - - mpicc - mpicxx - mpif90 - icc - icpc - ifort - TRUE - - - - - -g - -std=c99 - - - -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG - - - -r8 - - - - - - - - -wmismatch=mpi_send,mpi_recv,mpi_bcast,mpi_allreduce,mpi_reduce,mpi_isend,mpi_irecv,mpi_irsend,mpi_rsend,mpi_gatherv,mpi_gather,mpi_scatterv,mpi_allgather,mpi_alltoallv,mpi_file_read_all,mpi_file_write_all,mpibcast,mpiscatterv,mpi_alltoallw,nfmpi_get_vara_all,NFMPI_IPUT_VARA,NFMPI_GET_VAR_ALL,NFMPI_PUT_VARA,NFMPI_PUT_ATT_REAL,NFMPI_PUT_ATT_DOUBLE,NFMPI_PUT_ATT_INT,NFMPI_GET_ATT_REAL,NFMPI_GET_ATT_INT,NFMPI_GET_ATT_DOUBLE,NFMPI_PUT_VARA_DOUBLE_ALL,NFMPI_PUT_VARA_REAL_ALL,NFMPI_PUT_VARA_INT_ALL -convert=BIG_ENDIAN - - -ieee=full -O2 - -g -time -f2003 -ieee=stop - - - -C=all -g -time -f2003 -ieee=stop - -gline - -openmp - - - -fixed - - - -free - - FALSE - - -openmp - - mpicc - mpif90 - gcc - nagfor - - - - - -mp - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRPATHSCALE - - - -r8 - - - -O -extend_source -ftpp -fno-second-underscore -funderscoring -byteswapio - -mp - -g -trapuv -Wuninitialized - - - -O0 - - FALSE - - -mp - - mpicc - mpif90 - - - - - -gopt -time - - -mp - - - - - - - - - - - - - - - - - - - - - - - - - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI - - CXX - - -r8 - - - -i4 -gopt -time -Mstack_arrays -Mextend -byteswapio -Mflushz -Kieee -Mallocatable=03 - - -mp - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - - - -O0 - - - -Mfixed - - - -Mfree - - - - FALSE - - -time -Wl,--allow-multiple-definition - - -mp - - mpicc - mpicxx - mpif90 - pgcc - pgc++ - pgf95 - - - - - -time - - -mp - - - - - - - - - - - - - - - - - - - - - - - - - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DUSE_CUDA_FORTRAN -DCPRPGI - - CXX - - -r8 - - - -i4 -time -Mstack_arrays -Mextend -byteswapio -Mflushz -Kieee - - -mp - - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - - - -O0 - - - -Mfixed - - - -Mfree - - - - FALSE - - -time -Wl,--allow-multiple-definition -acc - - -mp - - mpicc - mpicxx - mpif90 - pgcc - pgc++ - pgf95 - - - - - -qarch=auto -qtune=auto -qcache=auto - - /usr/bin/bash - - -qarch=auto -qtune=auto -qcache=auto -qsclk=micro - -qspill=6000 - - - -qsigtrap=xl__trcedump - -bdatapsize:64K -bstackpsize:64K -btextpsize:32K - - mpcc_r - mpxlf2003_r - cc_r - xlf2003_r - - -lmassv -lessl - -lmass - - - - - - -O3 -qstrict - -qtune=440 -qarch=440d - - - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - - - -DLINUX -DnoI8 - - - -qtune=440 -qarch=440d - -O3 -qstrict -Q - -qinitauto=FF911299 -qflttrap=ov:zero:inv:en - -qextname=flush - - - -Wl,--relax -Wl,--allow-multiple-definition - - - -L/bgl/BlueLight/ppcfloor/bglsys/lib -lmpich.rts -lmsglayer.rts -lrts.rts -ldevices.rts - - blrts_xlc - blrts_xlf2003 - mpich.rts - /bgl/BlueLight/ppcfloor/bglsys - blrts_xlc - blrts_xlf2003 - - - - - -qtune=450 -qarch=450 -I/bgsys/drivers/ppcfloor/arch/include/ - - - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - - - -DLINUX -DnoI8 - - - -qspillsize=2500 -qtune=450 -qarch=450 - -qextname=flush - - - -Wl,--relax -Wl,--allow-multiple-definition - - - - - - -qsmp=omp:nested_par -qsuppress=1520-045 - -qsmp=omp:nested_par:noopt -qsuppress=1520-045 - - - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - - - -DLINUX - - - -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush -qphsinfo - -O3 -qstrict -Q - -qsmp=omp:nested_par -qsuppress=1520-045 - -qsmp=omp:nested_par:noopt -qsuppress=1520-045 - - - -Wl,--relax -Wl,--allow-multiple-definition - - - - - - -DCMAKE_SYSTEM_NAME=Catamount - - - -DLINUX - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - cc - CC - ftn - mpich - $ENV{MPICH_DIR} - $ENV{NETCDF_DIR} - lustre - $ENV{PARALLEL_NETCDF_DIR} - cc - CC - ftn - - - - - -DSYSDARWIN - - - -all_load - - - - - - -heap-arrays - - - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl - - - - - - -mcmodel medium -shared-intel - - - - - /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install - - -O2 - - - --host=Linux - - - -lstdc++ -lmpi_cxx - - - -O2 - - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack - - $ENV{NETCDF_PATH} - $ENV{PNETCDF_PATH} - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY - - gpfs - - $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs} -L$ENV{MKLROOT}/lib/intel64 -Wl,--no-as-needed -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm -ldl - $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs} - - $ENV{NETCDF_C_PATH} - $ENV{NETCDF_FORTRAN_PATH} - $ENV{PNETCDF_PATH} - - - - - -static-intel - -heap-arrays - - - -DHAVE_SLASHPROC - - - -O2 -debug minimal -qno-opt-dynamic-align - $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --fflags} - -static-intel - -heap-arrays - - - -static-intel - - gpfs - - $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs} - $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs} - -mkl - - $ENV{NETCDF_C_PATH} - $ENV{NETCDF_FORTRAN_PATH} - $ENV{PNETCDF_PATH} - - - - gpfs - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas - -rpath $ENV{NETCDF_PATH}/lib - - $ENV{NETCDF_C_PATH} - $ENV{NETCDF_FORTRAN_PATH} - $ENV{PNETCDF_PATH} - - - - /soft/climate/AlbanyTrilinos_06262017/Albany/buildintel/install - - -DHAVE_SLASHPROC - - - -lstdc++ - - - -O2 -debug minimal -qno-opt-dynamic-align - - mpiicc - mpiicpc - mpiifort - - $SHELL{nf-config --flibs} -mkl - - $ENV{NETCDF_C_PATH} - $ENV{NETCDF_FORTRAN_PATH} - $ENV{PNETCDF_PATH} - - - - /soft/climate/AlbanyTrilinos_06262017/Albany/build/install - - -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY - - - -lstdc++ - - mpi - /blues/gpfs/home/software/spack/opt/spack/linux-x86_64/gcc-5.3.0/mvapich2-2.2b-sdh7nhddicl4sh5mgxjyzxtxox3ajqey - $ENV{NETCDFROOT} - gpfs - $ENV{PNETCDFROOT} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas - - - - - mpi - /soft/mvapich2/2.2b_psm/intel-15.0 - $ENV{NETCDFROOT} - gpfs - $ENV{PNETCDFROOT} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas - -Wl,-rpath -Wl,$ENV{NETCDFROOT}/lib - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl - - - - - mpi - mpich - /soft/openmpi/1.8.2/intel-13.1 - /soft/mpich2/1.4.1-intel-13.1 - $ENV{NETCDFROOT} - gpfs - $ENV{PNETCDFROOT} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl - - - - - mpi - /home/robl/soft/mpich-3.1.4-nag-6.0 - $ENV{NETCDFROOT} - gpfs - $ENV{PNETCDFROOT} - - $(shell $NETCDF_PATH/bin/nf-config --flibs) $SHELL{$NETCDF_PATH/bin/nc-config --libs} -llapack -lblas - - - - - mpi - mpi - mpich - /soft/openmpi/1.8.2/pgi-13.9 - /soft/mpich2/1.4.1-pgi-13.9/ - $ENV{NETCDFROOT} - gpfs - $ENV{PNETCDFROOT} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas - -rpath $ENV{NETCDFROOT}/lib - - - - - - -fopenmp - - - -D CISM_GNU=ON - - - -DFORTRANUNDERSCORE -DNO_R16 - - - FORTRAN - - -fdefault-real-8 - - - - -O -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none -fno-range-check - -fopenmp - -g -Wall - - - - -ffixed-form - - - -ffree-form - - /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/hdf5-parallel/1.8.17/centos7.2_gnu5.3.0 - /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/netcdf-hdf5parallel/4.3.3.1/centos7.2_gnu5.3.0 - /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/pnetcdf/1.9.0/centos7.2_gnu5.3.0 - /software/tools/compilers/intel_2017/mkl/lib/intel64 - - -fopenmp - -L$NETCDF_PATH/lib -Wl,-rpath=$NETCDF_PATH/lib -lnetcdff -lnetcdf - - - mpicc - mpic++ - mpif90 - gcc - gcpp - gfortran - TRUE - - - - - --host=Linux --enable-filesystem-hints=lustre - - - -DLINUX - - - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - - $ENV{NETCDF_HOME} - lustre - $ENV{PNETCDFROOT} - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH}/lib/intel64 -lmkl_rt - -mkl=cluster - -mkl - - - - - - -DnoI8 - - - - -C=all -g -O0 -v - -C=all -g -nan -O0 -v - - - - - $ENV{MPI_LIB} - $ENV{NETCDF_ROOT} - lustre - $ENV{PNETCDFROOT} - - -L$ENV{NETCDF_ROOT}/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH} -lmkl_rt - - - - - /projects/ccsm/libs/AlbanyTrilinos/Albany/build/install - - -DMPASLI_EXTERNAL_INTERFACE_DISABLE_MANGLING - - - -llapack -lblas -L$ENV{IBM_MAIN_DIR}/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$ENV{IBM_MAIN_DIR}/xlsmp/bg/3.1/bglib64 -lxlsmp - - CXX - /soft/libraries/hdf5/1.8.14/cnk-xl/current/ - mpixlf77_r - mpixlc_r - /soft/compilers/bgclang/mpi/bgclang/bin/mpic++11 - mpixlf2003_r - /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ - /soft/libraries/petsc/3.5.3.1 - gpfs - /soft/libraries/pnetcdf/1.6.0/cnk-xl/current/ - mpixlc_r - mpixlf2003_r - - -L$NETCDF_PATH/lib -lnetcdff -lnetcdf -L$HDF5_PATH/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib - -L$ENV{IBM_MAIN_DIR}/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$ENV{IBM_MAIN_DIR}/xlsmp/bg/3.1/bglib64 -lxlsmp - - TRUE - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - - $ENV{NETCDF_HOME} - lustre - $ENV{PNETCDFROOT} - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt - - - - - - -O2 -kind=byte - - - --host=Linux - - - -DLINUX - - - -O2 -kind=byte - -C=all -g -O0 -v - - $ENV{NETCDF_HOME} - lustre - $ENV{PNETCDFROOT} - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt - - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - -C -Mbounds -traceback -Mchkfpstk -Mchkstk -Mdalign -Mdepchk -Mextend -Miomutex -Mrecursive -Ktrap=fp -O0 -g -byteswapio -Meh_frame - - $ENV{NETCDF_HOME} - lustre - $ENV{PNETCDFROOT} - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MPI_LIB} -lmpich - - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv -init=snan - - $ENV{NETCDF_HOME} - $ENV{PNETCDF_HOME} - lustre - - -lpmi -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH}/lib/intel64/ -lmkl_rt $ENV{PNETCDF_LIBRARIES} - - mpiicc - mpiicpc - mpiifort - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - -C -Mbounds -traceback -Mchkfpstk -Mchkstk -Mdalign -Mdepchk -Mextend -Miomutex -Mrecursive -Ktrap=fp -O0 -g -byteswapio -Meh_frame - - $ENV{NETCDF_HOME} - lustre - $ENV{PNETCDF_HOME} - - -lpmi -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH}/lib/intel64/ -lmkl_rt $ENV{PNETCDF_LIBRARIES} - - - - - - --host=Linux - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml - -O2 -debug minimal -qno-opt-dynamic-align - - $ENV{PETSC_DIR} - icc - icpc - ifort - - -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf - -mkl -lpthread - - - - - - -axMIC-AVX512 -xCORE-AVX2 - - - --host=Linux - - - -DARCH_MIC_KNL - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml - -O2 -debug minimal -qno-opt-dynamic-align - -xMIC-AVX512 - -DHAVE_ERF_INTRINSICS - - mpiicc - mpiicpc - mpiifort - - impi - $ENV{PETSC_DIR} - icc - icpc - ifort - - -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf - -mkl -lpthread - - - - - - -O2 -fp-model precise -std=gnu99 - -qopenmp - -O2 -debug minimal - -O0 -g - - - --host=Linux - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml - -qopenmp - -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created - -O2 -debug minimal -qno-opt-dynamic-align - -xMIC-AVX512 - - - -fixed -132 - - - -free - - TRUE - - -qopenmp - - $ENV{PETSC_DIR} - icc - icpc - ifort - mpiicc - mpiicpc - mpiifort - - impi - - -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf - -mkl -lpthread - - TRUE - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - - $ENV{NETCDF_HOME} - lustre - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi - - - - - - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE - - $ENV{NETCDF_HOME} - - -L$ENV{NETCDF_HOME}/lib/ -lnetcdff -lnetcdf -lcurl -llapack -lblas - - - - - - -O2 - - - --host=Linux - - - -DHAVE_PAPI - - - -O2 - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - - cc - CC - ftn - - -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf - $ENV{MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group $ENV{MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm - - - - - /projects/ccsm/AlbanyTrilinos_06262017/Albany/build/install - - -O2 - - - --host=Linux - - /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default - - -O2 - - /opt/openmpi-1.8-intel - $ENV{NETCDFROOT} - /projects/ccsm/pfunit/3.2.9/mpi-serial - lustre - $ENV{PNETCDFROOT} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -L/projects/ccsm/BLAS-intel -lblas_LINUX - -mkl=cluster - -mkl - - - - - - -O2 -fp-model precise -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include - -openmp - - - -DFORTRANUNDERSCORE -DNO_R16 - -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include - -openmp - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - - - -fixed -132 - - - -free - - - -openmp - -lnetcdff - - mpiicc - mpiicpc - mpiifort - icc - icpc - ifort - - -L/soft/netcdf/fortran-4.4-intel-sp1-update3-parallel/lib -lnetcdff -L/soft/hdf5/hdf5-1.8.13-intel-2013-sp1-update3-impi-5.0.0.028/lib -openmp -fPIC -lnetcdf -lnetcdf -L/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/lib/intel64 -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm - - TRUE - - - - - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY - - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - $ENV{NETCDF_DIR} - - -lnetcdff -lnetcdf -mkl - - - - - - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY - - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - $ENV{NETCDF_DIR} - - -lnetcdff -lnetcdf -mkl - - - - - - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY - - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - $ENV{NETCDF_DIR} - - -lnetcdff -lnetcdf -mkl - - - - - $ENV{LAPACK_DIR}/lib - - -O2 - - - --host=Linux - - - -lstdc++ -lmpi_cxx - - - -O2 - -I$ENV{NETCDF_DIR}/include - - $ENV{NETCDF_DIR} - $ENV{PNETCDF_DIR} - - -L/global/software/sl-7.x86_64/modules/gcc/6.3.0/netcdf/4.4.1.1-gcc-p/lib -lnetcdff -lnetcdf -lnetcdf -lblas -llapack - - - - - $ENV{NETCDF_PATH} - $ENV{PNETCDF_PATH} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} - - - - - - -framework Accelerate - - $ENV{NETCDF_PATH} - - -L$NETCDF_PATH/lib -lnetcdff -lnetcdf - - - - - /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install - - -O2 - - - --host=Linux - - - -lstdc++ - - - -O2 - -I$ENV{NETCDFROOT}/include - - $ENV{NETCDFROOT} - $ENV{SEMS_PFUNIT_ROOT} - $ENV{PNETCDFROOT} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack - - - - - /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install - - -O2 - - - --host=Linux - - - -lstdc++ -lmpi_cxx - - - -O2 - - $ENV{NETCDFROOT} - $ENV{PNETCDFROOT} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack - - - - - $ENV{E3SM_SRCROOT}/externals/kokkos/bin/nvcc_wrapper - $ENV{E3SM_SRCROOT}/externals/kokkos/bin/nvcc_wrapper - --arch=Pascal60 --with-cuda=$ENV{CUDA_ROOT} --with-cuda-options=enable_lambda - - -expt-extended-lambda -DCUDA_BUILD - - - -lstdc++ -lcudart - - $ENV{NETCDF_FORTRAN_PATH} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack - - - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - - - -xCORE_AVX512 -mkl -std=gnu99 - -O3 -g -debug minimal - -O0 -g - - - -xCORE_AVX512 -mkl -std=c++11 - -O3 -g -debug minimal - -O0 -g - -qopenmp - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -xCORE_AVX512 -mkl - -qopenmp - -O3 -g -debug minimal - -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created - - - -mkl -lstdc++ - -qopenmp - -L$(NETCDF_FORTRAN_PATH)/lib64 - - - -fixed -132 - - - -free - - TRUE - - -r8 - - ifort - icc - icpc - FORTRAN - - -cxxlib - - TRUE - - - - - -O2 -fp-model precise -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/inc -lude - -openmp - - - -DFORTRANUNDERSCORE -DNO_R16 - -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -I/soft/i -ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include - -openmp - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - - - -fixed -132 - - - -free - - - -openmp - -lnetcdff - - mpiicc - mpiicpc - mpiifort - icc - icpc - ifort - - -L/soft/netcdf/fortran-4.4-intel-sp1-update3-parallel/lib -lnetcdff -L/soft/hdf5/hdf5-1.8.13-intel-2013-sp1-update3-impi-5.0.0.028/lib -openmp -fPIC -lnetcdf -lnetcdf -L/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/lib/intel64 -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm - - TRUE - - - - /projects/ccsm/libs/AlbanyTrilinos/Albany/build/install - - -DMPASLI_EXTERNAL_INTERFACE_DISABLE_MANGLING - - - -llapack -lblas -L$ENV{IBM_MAIN_DIR}/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$ENV{IBM_MAIN_DIR}/xlsmp/bg/3.1/bglib64 -lxlsmp - - CXX - /soft/libraries/hdf5/1.8.14/cnk-xl/current/ - - - mpixlf77_r - mpixlc_r - /soft/compilers/bgclang/mpi/bgclang/bin/mpic++11 - mpixlf2003_r - /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ - /soft/libraries/petsc/3.5.3.1 - gpfs - /soft/libraries/pnetcdf/1.6.0/cnk-xl/current/ - mpixlc_r - mpixlf2003_r - - -L$NETCDF_PATH/lib -lnetcdff -lnetcdf -L$HDF5_PATH/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib - -L$ENV{IBM_MAIN_DIR}/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$ENV{IBM_MAIN_DIR}/xlsmp/bg/3.1/bglib64 -lxlsmp - - TRUE - - - - /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpicc - /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpif90 - /projects/cesm/devtools/netcdf-4.1.3-gcc4.8.1-mpich3.0.4/ - /projects/cesm/devtools/gcc-4.8.1/bin/gcc - /projects/cesm/devtools/gcc-4.8.1/bin/g++ - /projects/cesm/devtools/gcc-4.8.1/bin/gfortran - - -L/user/lib64 -llapack -lblas -lnetcdff - - - - - /home/zdr/opt/netcdf-4.1.3_pgf95 - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - - $ENV{NETCDF_LIB}/.. - lustre - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi - - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - - $ENV{NETCDF_LIB}/.. - lustre - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi - - - - - /projects/ccsm/AlbanyTrilinos_06262017/Albany/build/install - - -O2 - - - --host=Linux - - /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default - - -O2 - - $ENV{MPIHOME} - $ENV{NETCDFROOT} - /projects/ccsm/pfunit/3.2.9/mpi-serial - lustre - $ENV{PNETCDFROOT} - - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -L/projects/ccsm/BLAS-intel -lblas_LINUX -L$ENV{MKL_LIBS} -lmkl_rt - -mkl=cluster - -mkl - - - - - - -std=c99 - - - --host=Linux - - - -DLINUX -DCPRINTEL - - - -O2 -debug minimal - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - - $ENV{NETCDF_PATH} - lustre - $ENV{PNETCDFROOT} - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKLROOT} -lmkl_rt - - - - - - -O2 - - - --host=Linux - - - -DLINUX - - - -O2 - - $ENV{NETCDF_LIB}/.. - lustre - - -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi - - - - - - - -xCORE-AVX2 - - - --host=Linux - - - -DLINUX - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - -DARCH_MIC_KNL - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml - -O2 -debug minimal -qno-opt-dynamic-align - - - -xCORE-AVX2 - - $ENV{TACC_HDF5_DIR} - mpicc - mpicxx - mpif90 - impi - $ENV{TACC_NETCDF_DIR} - $ENV{TACC_NETCDF_DIR} - $ENV{PETSC_DIR} - $ENV{TACC_PNETCDF_DIR} - icc - icpc - ifort - - -L$NETCDF_PATH -lnetcdff -Wl,--as-needed,-L$NETCDF_PATH/lib -lnetcdff -lnetcdf - -L$NETCDF_PATH -lnetcdff -Wl,--as-needed,-L$NETCDF_PATH/lib -lnetcdff -lnetcdf - -mkl -lpthread - - - - - - -O2 - - - --host=Linux - - - -O2 - - - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 - - mpicc - mpiCC - mpif90 - gcc - g++ - gfortran - gpfs - $ENV{NETCDF_C_PATH} - $ENV{NETCDF_FORTRAN_PATH} - TRUE - - - - - --host=Linux - - - -DLINUX - - - -qzerosize -qfree=f90 -qxlf2003=polymorphic - -qspillsize=2500 -qextname=flush - - - -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 - -Wl,--relax -Wl,--allow-multiple-definition - - mpicc - mpiCC - mpif90 - gpfs - xlc_r - xlf90_r - xlc++_r - $ENV{NETCDF_C_PATH} - $ENV{NETCDF_FORTRAN_PATH} - TRUE - - - - - -O2 - - - --host=Linux - - - -O2 -DSUMMITDEV_PGI - - - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 - - - mpicc - mpiCC - mpif90 - gpfs - pgcc - pgc++ - pgfortran - $ENV{NETCDF_C_PATH} - $ENV{NETCDF_FORTRAN_PATH} - TRUE - - - - - -O2 - - - --host=Linux - - - -O2 -DSUMMITDEV_PGI - - - -ta=tesla:cc70,pinned - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 - - mpicc - mpiCC - mpif90 - gpfs - pgcc - pgfortran - $ENV{NETCDF_C_PATH} - $ENV{NETCDF_FORTRAN_PATH} - TRUE - - - - - --host=Linux - - - -qzerosize -qfree=f90 -qxlf2003=polymorphic - - - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib64 -llapack - - mpixlc - mpixlC - mpixlf - lustre - xlc_r - xlf_r - - - - - -O2 - - - --host=Linux - - - -O2 -DSUMMITDEV_PGI - - - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - - mpicc - mpiCC - mpif90 - lustre - pgcc - pgfortran - - - - - -O2 - - - --host=Linux - - - -O2 -DSUMMITDEV_PGI - - - - -ta=tesla:cc60,cuda8.0,pinned - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - - mpicc - mpiCC - mpif90 - lustre - pgcc - pgfortran - - - - - -O2 - - - --host=Linux - - - -DNO_SHR_VMATH -DCNL - - - -O2 - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - - - -llapack -lblas - - mpich - /usr/tce/packages/mvapich2/mvapich2-2.2-intel-18.0.1/ - /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/ - - $SHELL{/usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/bin/nf-config --flibs} - - - - - - -O2 - - - --host=Linux - - - -DNO_SHR_VMATH -DCNL - - - -O2 - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - - - -llapack -lblas - - mpich - /usr/tce/packages/mvapich2/mvapich2-2.2-intel-18.0.1/ - /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/ - - $SHELL{/usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/bin/nf-config --flibs} - - - - - - --host=Linux - - - -O2 - - - -O2 - - - $SHELL{nf-config --flibs} - - - - - - --host=Linux - - - -DARCH_MIC_KNL - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent - -O2 -debug minimal -qno-opt-dynamic-align -fp-speculation=off - -DHAVE_ERF_INTRINSICS - - icc - icpc - ifort - - -L$ENV{NETCDF_DIR}/lib -lnetcdff -L$ENV{NETCDF_DIR}/lib -lnetcdf -Wl,-rpath -Wl,$ENV{NETCDF_DIR}/lib - -mkl -lpthread - - - - - - --host=Linux - - - -DHAVE_SLASHPROC - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent - -O2 -debug minimal -qno-opt-dynamic-align -fp-speculation=off - - mpiifort - mpiicc - mpiicpc - icc - icpc - ifort - - $SHELL{$(NETCDF_PATH)/bin/nf-config --flibs} -Wl,-rpath -Wl,$ENV{NETCDF_PATH}/lib -mkl - - - - - - --host=Linux - - - -O2 - - - -O2 - - - $SHELL{$(NETCDF_PATH)/bin/nf-config --flibs} -Wl,-rpath -Wl,$(NETCDF_PATH)/lib -L/home/azamat/soft/libs -llapack -lblas - - - - - - -O2 - - - --host=Linux - - - -O2 - - - $SHELL{nf-config --flibs} -mkl=cluster - $SHELL{nf-config --flibs} -mkl=cluster - $SHELL{nf-config --flibs} -mkl=cluster - $SHELL{nf-config --flibs} -mkl=cluster - $SHELL{nf-config --flibs} -mkl=cluster - $SHELL{nf-config --flibs} -mkl=cluster - - -L/opt/cray/netcdf/4.4.1.1.3/INTEL/16.0/lib -lnetcdff -L/opt/cray/hdf5/1.10.0.3/GNU/4.9/lib -lnetcdf -mkl - - TRUE - - - - /ccs/proj/cli106/AlbanyTrilinos/Albany/build/install - - -O2 - - - --host=Linux - - - -lfmpich -lmpichf90_pgi $ENV{PGI_PATH}/linux86-64/$ENV{PGI_VERSION}/lib/f90main.o /opt/gcc/default/snos/lib64/libstdc++.a - - - -O2 - -target-cpu=istanbul - - - $SHELL{nf-config --flibs} - $SHELL{nf-config --flibs} - $SHELL{nf-config --flibs} - $SHELL{nf-config --flibs} - $SHELL{nf-config --flibs} - $SHELL{nf-config --flibs} - - -L/opt/cray/netcdf/4.4.1.1.3/PGI/15.3/lib -lnetcdff -L/opt/cray/hdf5/1.10.0.3/GNU/4.9/lib -lnetcdf - - TRUE - - - - - -O2 - - - --host=Linux - - - -lfmpich -lmpichf90_pgi $ENV{PGI_PATH}/linux86-64/$ENV{PGI_VERSION}/lib/f90main.o - - - -O2 - - - -ta=nvidia,cc35,cuda7.5 - - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} - - TRUE - - - - lustre - mpicc - mpif90 - mpic++ - gfortran - gcc - g++ - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas - $ENV{MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group $ENV{MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm -z muldefs - - - -lstdc++ -lmpi_cxx - - - - - lustre - mpicc - mpif90 - mpic++ - ifort - icc - icpc - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas - -mkl -lpthread - - - -lstdc++ -lmpi_cxx - - - - - lustre - mpicc - mpif90 - mpic++ - gfortran - gcc - g++ - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas - $ENV{MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group $ENV{MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm -z muldefs - - - -lstdc++ -lmpi_cxx - - - - - lustre - mpicc - mpif90 - mpic++ - ifort - icc - icpc - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas - -mkl -lpthread - - - -lstdc++ -lmpi_cxx - - - compile_threaded="TRUE"> -qopenmp - - - compile_threaded="TRUE"> -qopenmp - - - compile_threaded="TRUE"> -qopenmp - - - - - - - - - - - - - - USERDEFINED_MUST_EDIT_THIS - - - # USERDEFINED $SHELL{$NETCDF_PATH/bin/nf-config --flibs} - - - - diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml deleted file mode 100644 index b25f53fc61c..00000000000 --- a/config/e3sm/machines/config_machines.xml +++ /dev/null @@ -1,3077 +0,0 @@ - - - - - - - Cori. XC40 Cray system at NERSC. Haswell partition. os is CNL, 32 pes/node, batch system is SLURM - cori-knl-is-default - CNL - intel,gnu - mpt - acme - /project/projectdirs/acme - acme,m3411,m3412 - $ENV{SCRATCH}/acme_scratch/cori-haswell - /project/projectdirs/acme/inputdata - /project/projectdirs/acme/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /project/projectdirs/acme/baselines/$COMPILER - /project/projectdirs/acme/tools/cprnc.cori/cprnc - 8 - e3sm_developer - 4 - nersc_slurm - e3sm - 32 - 32 - TRUE - - srun - - --label - -n {{ total_tasks }} - -c $SHELL{echo 64/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc} - $SHELL{if [ 32 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} - -m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`} - - - - /opt/modules/default/init/perl - /opt/modules/default/init/python - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - gcc - cray-parallel-netcdf - cray-hdf5-parallel - pmi - cray-libsci - cray-mpich2 - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - craype-sandybridge - craype-ivybridge - craype - papi - cmake - cray-petsc - esmf - zlib - craype-hugepages2M - - - craype - PrgEnv-intel - cray-mpich - craype-mic-knl - craype-haswell - - - - cray-mpich cray-mpich/7.7.6 - - - - PrgEnv-intel/6.0.5 - intel - intel/19.0.3.199 - - - - PrgEnv-intel PrgEnv-gnu/6.0.5 - gcc - gcc/8.2.0 - cray-libsci - cray-libsci/19.02.1 - - - - craype craype/2.5.18 - pmi - pmi/5.0.14 - craype-mic-knl - craype-haswell - - - - cray-netcdf-hdf5parallel - cray-hdf5-parallel - cray-parallel-netcdf - cray-netcdf/4.6.1.3 - cray-hdf5/1.10.2.0 - - - cray-netcdf-hdf5parallel - cray-netcdf-hdf5parallel/4.6.1.3 - cray-hdf5-parallel/1.10.2.0 - cray-parallel-netcdf/1.8.1.4 - - - - git - git - cmake - cmake/3.14.4 - - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - - - 1 - 1 - - - 128M - spread - threads - FALSE - - /project/projectdirs/acme/perl5/lib/perl5/x86_64-linux-thread-multi - - - yes - - - - - - Cori. XC40 Cray system at NERSC. KNL partition. os is CNL, 68 pes/node (for now only use 64), batch system is SLURM - cori - CNL - intel,gnu,intel19 - mpt,impi - acme - /project/projectdirs/acme - acme,m3411,m3412 - $ENV{SCRATCH}/acme_scratch/cori-knl - /project/projectdirs/acme/inputdata - /project/projectdirs/acme/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /project/projectdirs/acme/baselines/$COMPILER - /project/projectdirs/acme/tools/cprnc.cori/cprnc - 8 - e3sm_developer - 4 - nersc_slurm - e3sm - 128 - 64 - TRUE - - srun - - --label - -n {{ total_tasks }} - -c $SHELL{mpn=`./xmlquery --value MAX_MPITASKS_PER_NODE`; if [ 68 -ge $mpn ]; then c0=`expr 272 / $mpn`; c1=`expr $c0 / 4`; cflag=`expr $c1 \* 4`; echo $cflag|bc ; else echo 272/$mpn|bc;fi;} - $SHELL{if [ 68 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} - -m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`} - - - - /opt/modules/default/init/perl - /opt/modules/default/init/python - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - craype - craype-mic-knl - craype-haswell - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - gcc - cray-parallel-netcdf - cray-hdf5-parallel - pmi - cray-mpich2 - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - cray-libsci - papi - cmake - cray-petsc - esmf - zlib - craype-hugepages2M - - - craype - PrgEnv-intel - cray-mpich - craype-haswell - craype-mic-knl - - - - cray-mpich cray-mpich/7.7.6 - - - - cray-mpich impi/2019.up3 - - - - PrgEnv-intel/6.0.5 - intel - intel/18.0.1.163 - - - - PrgEnv-intel/6.0.5 - intel - intel/19.0.3.199 - - - - PrgEnv-intel PrgEnv-gnu/6.0.5 - gcc - gcc/8.2.0 - cray-libsci - cray-libsci/19.02.1 - - - - craype craype/2.5.18 - pmi - pmi/5.0.14 - craype-haswell - craype-mic-knl - - - - cray-netcdf-hdf5parallel - cray-hdf5-parallel - cray-parallel-netcdf - cray-netcdf/4.6.1.3 - cray-hdf5/1.10.2.0 - - - cray-netcdf-hdf5parallel - cray-netcdf-hdf5parallel/4.6.1.3 - cray-hdf5-parallel/1.10.2.0 - cray-parallel-netcdf/1.8.1.4 - - - - git - git - cmake - cmake/3.14.4 - - - - - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - - 1 - 1 - - - 128M - spread - threads - FALSE - - /project/projectdirs/acme/perl5/lib/perl5/x86_64-linux-thread-multi - - - - disabled - - - yes - 1 - - - 1 - - - - - - Stampede2. Intel skylake nodes at TACC. 48 cores per node, batch system is SLURM - .*stampede2.* - LINUX - intel,gnu - impi - $ENV{SCRATCH} - acme - $ENV{SCRATCH}/acme_scratch/stampede2 - $ENV{SCRATCH}/inputdata - $ENV{SCRATCH}/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{SCRATCH}/baselines/$COMPILER - $ENV{SCRATCH}/tools/cprnc.cori/cprnc - 8 - e3sm_developer - slurm - e3sm - 96 - 48 - FALSE - - ibrun - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/python - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module -q - module -q - - - - - - - intel/18.0.0 - - - - gcc/6.3.0 - - - - impi/18.0.0 - - - - hdf5/1.8.16 - netcdf/4.3.3.1 - - - phdf5/1.8.16 - parallel-netcdf/4.3.3.1 - pnetcdf/1.8.1 - - - git - cmake - autotools - xalt - - - - - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - - 1 - 1 - - 128M - spread - threads - 1 - -l - - - - - Mac OS/X workstation or laptop - - Darwin - gnu - openmpi,mpich - $ENV{HOME}/projects/acme/scratch - $ENV{HOME}/projects/acme/cesm-inputdata - $ENV{HOME}/projects/acme/ptclm-data - $ENV{HOME}/projects/acme/scratch/archive/$CASE - $ENV{HOME}/projects/acme/baselines/$COMPILER - $CCSMROOT/tools/cprnc/build/cprnc - 4 - e3sm_developer - none - jnjohnson at lbl dot gov - 4 - 2 - - mpirun - - - - $ENV{HOME}/projects/acme/scratch/$CASE/run - $ENV{HOME}/projects/acme/scratch/$CASE/bld - - - - - - Linux workstation or laptop - none - LINUX - gnu - openmpi,mpich - $ENV{HOME}/projects/acme/scratch - $ENV{HOME}/projects/acme/cesm-inputdata - $ENV{HOME}/projects/acme/ptclm-data - $ENV{HOME}/projects/acme/scratch/archive/$CASE - $ENV{HOME}/projects/acme/baselines/$COMPILER - $CCSMROOT/tools/cprnc/build/cprnc - 4 - e3sm_developer - none - jayesh at mcs dot anl dot gov - 4 - 2 - - mpirun - - -np {{ total_tasks }} - - - - $ENV{HOME}/projects/acme/scratch/$CASE/run - $ENV{HOME}/projects/acme/scratch/$CASE/bld - - - - - - Linux workstation for Jenkins testing - (melvin|watson|s999964|climate|penn|sems) - LINUX - sonproxy.sandia.gov:80 - gnu,intel - openmpi - /sems-data-store/ACME/timings - .* - $ENV{HOME}/acme/scratch - /sems-data-store/ACME/inputdata - /sems-data-store/ACME/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /sems-data-store/ACME/baselines/$COMPILER - /sems-data-store/ACME/cprnc/build.new/cprnc - 32 - e3sm_developer - none - jgfouca at sandia dot gov - 48 - 48 - - mpirun - - -np {{ total_tasks }} - --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread:overload-allowed - - - - /usr/share/Modules/init/python.py - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/sh - /usr/share/Modules/init/csh - /usr/bin/modulecmd python - /usr/bin/modulecmd perl - module - module - - - sems-env - acme-env - sems-git - acme-binutils - sems-python/2.7.9 - sems-cmake/3.12.2 - - - sems-gcc/7.3.0 - - - sems-intel/16.0.3 - - - sems-netcdf/4.4.1/exo - acme-pfunit/3.2.8/base - - - acme-openmpi/2.1.5/acme - acme-netcdf/4.4.1/acme - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - 1000 - - - $ENV{SEMS_NETCDF_ROOT} - 64M - spread - threads - - - $ENV{SEMS_NETCDF_ROOT} - - - - - IBM Power 8 Testbed machine - white - LINUX - gnu - openmpi - $ENV{HOME}/projects/e3sm/scratch - $ENV{HOME}/projects/e3sm/cesm-inputdata - $ENV{HOME}/projects/e3sm/ptclm-data - $ENV{HOME}/projects/e3sm/scratch/archive/$CASE - $ENV{HOME}/projects/e3sm/baselines/$COMPILER - $CCSMROOT/tools/cprnc/build/cprnc - 32 - e3sm_developer - lsf - mdeakin at sandia dot gov - 4 - 1 - - mpirun - - - - /usr/share/Modules/init/sh - /usr/share/Modules/init/python.py - module - /usr/bin/modulecmd python - - devpack/20181011/openmpi/2.1.2/gcc/7.2.0/cuda/9.2.88 - - - $ENV{HOME}/projects/e3sm/scratch/$CASE/run - $ENV{HOME}/projects/e3sm/scratch/$CASE/bld - - $ENV{NETCDF_ROOT} - /ascldap/users/jgfouca/packages/netcdf-fortran-4.4.4-white - $SRCROOT - - - - - Skylake Testbed machine - blake - LINUX - intel18 - openmpi - $ENV{HOME}/projects/e3sm/scratch - $ENV{HOME}/projects/e3sm/cesm-inputdata - $ENV{HOME}/projects/e3sm/ptclm-data - $ENV{HOME}/projects/e3sm/scratch/archive/$CASE - $ENV{HOME}/projects/e3sm/baselines/$COMPILER - $CCSMROOT/tools/cprnc/build/cprnc - 48 - e3sm_developer - slurm - mdeakin at sandia dot gov - 48 - 48 - - mpirun - - - - /usr/share/Modules/init/sh - /usr/share/Modules/init/python.py - module - module - - zlib/1.2.11 - intel/compilers/18.1.163 - openmpi/2.1.2/intel/18.1.163 - hdf5/1.10.1/openmpi/2.1.2/intel/18.1.163 - netcdf-exo/4.4.1.1/openmpi/2.1.2/intel/18.1.163 - - - $ENV{HOME}/projects/e3sm/scratch/$CASE/run - $ENV{HOME}/projects/e3sm/scratch/$CASE/bld - - $ENV{NETCDF_ROOT} - $ENV{NETCDFF_ROOT} - - - - - Linux workstation for ANL - compute.*mcs.anl.gov - LINUX - gnu - mpich - $ENV{HOME}/acme/scratch - /home/climate1/acme/inputdata - /home/climate1/acme/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /home/climate1/acme/baselines/$COMPILER - /home/climate1/acme/cprnc/build/cprnc - make - 32 - e3sm_developer - none - jgfouca at sandia dot gov - 32 - 32 - - mpirun - - -l -np {{ total_tasks }} - - - - /software/common/adm/packages/softenv-1.6.2/etc/softenv-load.csh - /software/common/adm/packages/softenv-1.6.2/etc/softenv-load.sh - source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.csh ; soft - source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.sh ; soft - - +cmake-3.12.4 - - - +gcc-6.2.0 - +szip-2.1-gcc-6.2.0 - - - +netcdf-4.4.1c-4.2cxx-4.4.4f-serial-gcc6.2.0 - - - +mpich-3.2-gcc-6.2.0 - +hdf5-1.8.16-gcc-6.2.0-mpich-3.2-parallel - +netcdf-4.4.1c-4.2cxx-4.4.4f-parallel-gcc6.2.0-mpich-3.2 - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - $SHELL{dirname $(dirname $(which ncdump))} - - - - /soft/apps/packages/climate/hdf5/1.8.16-serial/gcc-6.2.0/lib:$ENV{LD_LIBRARY_PATH} - - - $SHELL{dirname $(dirname $(which h5dump))} - - /soft/apps/packages/climate/pnetcdf/1.8.1/gcc-6.2.0 - - - 64M - - - - - SNL clust - (skybridge|chama) - LINUX - wwwproxy.sandia.gov:80 - intel - openmpi - fy190158 - /projects/ccsm/timings - .* - /gpfs1/$USER/acme_scratch/sandiatoss3 - /projects/ccsm/inputdata - /projects/ccsm/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /projects/ccsm/ccsm_baselines/$COMPILER - /projects/ccsm/cprnc/build.toss3/cprnc_wrap - 8 - e3sm_integration - slurm - jgfouca at sandia dot gov - 16 - 16 - TRUE - - mpiexec - - --n {{ total_tasks }} - --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core - - - - - - - /usr/share/lmod/lmod/init/python.py - /usr/share/lmod/lmod/init/perl.pm - /usr/share/lmod/lmod/init/sh - /usr/share/lmod/lmod/init/csh - /usr/share/lmod/lmod/libexec/lmod python - /usr/share/lmod/lmod/libexec/lmod perl - module - module - - - sems-env - sems-git - sems-python/2.7.9 - sems-cmake/3.12.2 - gnu/4.9.2 - sems-intel/17.0.0 - - - sems-openmpi/1.10.5 - sems-netcdf/4.4.1/exo_parallel - - - sems-netcdf/4.4.1/exo - - - /gscratch/$USER/acme_scratch/sandiatoss3/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - - 0.1 - - - $ENV{SEMS_NETCDF_ROOT} - $ENV{SEMS_NETCDF_ROOT}/include - $ENV{SEMS_NETCDF_ROOT}/lib - 64M - - - $ENV{SEMS_NETCDF_ROOT} - - - - - SNL clust - ghost-login - LINUX - wwwproxy.sandia.gov:80 - intel - openmpi - fy190158 - - /gscratch/$USER/acme_scratch/ghost - /projects/ccsm/inputdata - /projects/ccsm/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /projects/ccsm/ccsm_baselines/$COMPILER - /projects/ccsm/cprnc/build.toss3/cprnc_wrap - 8 - e3sm_integration - slurm - jgfouca at sandia dot gov - 36 - 36 - TRUE - - mpiexec - - --n {{ total_tasks }} - --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core - - - - - - - /usr/share/lmod/lmod/init/python.py - /usr/share/lmod/lmod/init/perl.pm - /usr/share/lmod/lmod/init/sh - /usr/share/lmod/lmod/init/csh - /usr/share/lmod/lmod/libexec/lmod python - /usr/share/lmod/lmod/libexec/lmod perl - module - module - - - sems-env - sems-git - sems-python/2.7.9 - sems-cmake - gnu/4.9.2 - sems-intel/16.0.2 - mkl/16.0 - sems-netcdf/4.4.1/exo_parallel - - - sems-openmpi/1.10.5 - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - - - $ENV{SEMS_NETCDF_ROOT} - $ENV{SEMS_NETCDF_ROOT}/include - $ENV{SEMS_NETCDF_ROOT}/lib - 64M - - - $ENV{SEMS_NETCDF_ROOT} - - - - - ANL/LCRC Linux Cluster - blogin.*.lcrc.anl.gov - LINUX - gnu,pgi,intel,nag - mvapich,mpich,openmpi - ACME - /lcrc/project/$PROJECT/$USER/acme_scratch - /home/ccsm-data/inputdata - /home/ccsm-data/inputdata/atm/datm7 - /lcrc/project/ACME/$USER/archive/$CASE - /lcrc/group/acme/acme_baselines/blues/$COMPILER - /home/ccsm-data/tools/cprnc - 4 - e3sm_integration - 4 - pbs - acme - 16 - 16 - TRUE - - mpiexec - - -n {{ total_tasks }} - - - - mpiexec - - -n {{ total_tasks }} - - - - - - - /etc/profile.d/a_softenv.csh - /etc/profile.d/a_softenv.sh - soft - soft - - +cmake-2.8.12 - +python-2.7 - - - +gcc-5.3.0 - +hdf5-1.10.0-gcc-5.3.0-serial - +netcdf-c-4.4.0-f77-4.4.3-gcc-5.3.0-serial - - - +gcc-5.2 - +netcdf-4.3.3.1-gnu5.2-serial - - - +mvapich2-2.2b-gcc-5.3.0 - +pnetcdf-1.6.1-gcc-5.3.0-mvapich2-2.2b - - - +mvapich2-2.2b-gcc-5.2 - - - +intel-15.0 - +mkl-11.2.1 - - - +mvapich2-2.2b-intel-15.0 - +pnetcdf-1.6.1-mvapich2-2.2a-intel-15.0 - - - +pgi-15.7 - +binutils-2.27 - +netcdf-c-4.4.1-f77-4.4.4-pgi-15.7-serial - - - +mvapich2-2.2-pgi-15.7 - +pnetcdf-1.7.0-pgi-15.7-mvapich2-2.2 - - - +nag-6.0 - +hdf5-1.8.12-serial-nag - +netcdf-4.3.1-serial-nag - - - +mpich3-3.1.4-nag-6.0 - +pnetcdf-1.6.1-mpich-3.1.4-nag-6.0 - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - - $SHELL{dirname $(dirname $(which ncdump))} - - - $SHELL{dirname $(dirname $(which pnetcdf_version))} - - - 64M - - - - - ANL/LCRC Linux Cluster - b51.*.lcrc.anl.gov - LINUX - intel,gnu,pgi - mvapich,openmpi - condo - /lcrc/group/acme - .* - /lcrc/group/acme/$USER/acme_scratch/anvil - /home/ccsm-data/inputdata - /home/ccsm-data/inputdata/atm/datm7 - /lcrc/group/acme/$USER/archive/$CASE - /lcrc/group/acme/acme_baselines/$COMPILER - /lcrc/group/acme/tools/cprnc/cprnc - 8 - e3sm_integration - 4 - slurm - E3SM - 36 - 36 - FALSE - - srun - - -l -N {{ num_nodes }} -n {{ total_tasks }} - --cpu_bind=cores - -c $SHELL{if [ FALSE = `./xmlquery --value SMP_PRESENT` ];then echo 1;else echo $OMP_NUM_THREADS;fi} - -m plane=$SHELL{if [ FALSE = `./xmlquery --value SMP_PRESENT` ];then echo 36;else echo 36/$OMP_NUM_THREADS|bc;fi} - - - - - - - /etc/profile.d/a_softenv.csh - /etc/profile.d/a_softenv.sh - soft - soft - - +cmake-3.12.3 - +python-2.7 - - - +gcc-5.3.0 - +intel-17.0.0 - +netcdf-c-4.4.1-f77-4.4.4-intel-17.0.0-serial - - - +mvapich2-2.2-intel-17.0.0-acme - +pnetcdf-1.7.0-intel-17.0.0-mvapich2-2.2-acme - - - +openmpi-2.0.1-intel-17.0.0-acme - +pnetcdf-1.7.0-intel-17.0.0-openmpi-2.0.1-acme - - - +gcc-5.3.0 - +netcdf-c-4.4.0-f77-4.4.3-gcc-5.3.0-serial - - - +mvapich2-2.2b-gcc-5.3.0-acme - +pnetcdf-1.6.1-gcc-5.3.0-mvapich2-2.2b-acme - - - +openmpi-1.10.2-gcc-5.3.0-acme - +pnetcdf-1.6.1-gcc-5.3.0-openmpi-1.10.2-acme - - - +pgi-16.3 - +netcdf-c-4.4.0-f77-4.4.3-pgi-16.3-serial - - - +mvapich2-2.2b-pgi-16.3-acme - +pnetcdf-1.6.1-pgi-16.3-mvapich2-2.2b-acme - - - +openmpi-1.10.2-pgi-16.3-acme - +pnetcdf-1.6.1-pgi-16.3-openmpi-1.10.2-acme - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - 1000 - - $SHELL{which nc-config | xargs dirname | xargs dirname} - $SHELL{which nf-config | xargs dirname | xargs dirname} - - - $SHELL{which pnetcdf_version | xargs dirname | xargs dirname} - - - 0 - 1 - - - 1 - 2 - - - 10 - - - 64M - 1 - - - granularity=thread,scatter - 1 - - - spread - threads - - - - - ANL/LCRC Cluster, Cray CS400, 352-nodes Xeon Phi 7230 KNLs 64C/1.3GHz + 672-nodes Xeon E5-2695v4 Broadwells 36C/2.10GHz, Intel Omni-Path network, SLURM batch system, Lmod module environment. - beboplogin.* - LINUX - intel,gnu - impi,mpich,mvapich,openmpi - acme - /lcrc/group/acme/$USER/acme_scratch/bebop - /home/ccsm-data/inputdata - /home/ccsm-data/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /lcrc/group/acme/acme_baselines/bebop/$COMPILER - /lcrc/group/acme/tools/cprnc/cprnc - 8 - e3sm_integration - 4 - slurm - E3SM - 36 - 36 - TRUE - - mpirun - - -l -n {{ total_tasks }} - - - - - - - /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh - /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh - /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py - /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python - module - module - - - - - intel/17.0.4-74uvhji - intel-mkl/2017.3.196-jyjmyut - - - gcc/7.1.0-4bgguyp - - - intel-mpi/2017.3-dfphq6k - parallel-netcdf/1.6.1 - - - mvapich2/2.2-n6lclff - parallel-netcdf/1.6.1-mvapich2.2 - - - cmake - netcdf/4.4.1.1-prsuusl - netcdf-fortran/4.4.4-ojwazvy - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - - $SHELL{which nc-config | xargs dirname | xargs dirname} - $SHELL{which nf-config | xargs dirname | xargs dirname} - /lcrc/group/acme/soft/perl/5.26.0/bin:$ENV{PATH} - - - $SHELL{which pnetcdf_version | xargs dirname | xargs dirname} - - - 128M - spread - threads - - - shm:tmi - - - - - ANL IBM BG/Q, os is BGQ, 16 cores/node, batch system is cobalt - cetus - BGQ - ibm - ibm - ClimateEnergy_2 - ClimateEnergy - /projects/$PROJECT/$USER - /projects/ccsm/inputdata - /projects/ccsm/inputdata/atm/datm7 - /projects/$PROJECT/$USER/archive/$CASE - /projects/ccsm/ccsm_baselines//$COMPILER - /projects/ccsm/tools/cprnc/cprnc - 8 - e3sm_developer - cobalt - jayesh -at- mcs.anl.gov - 64 - 4 - TRUE - - /usr/bin/runjob - - --label short - --ranks-per-node $MAX_MPITASKS_PER_NODE - --np {{ total_tasks }} - --block $COBALT_PARTNAME $LOCARGS - $ENV{BGQ_SMP_VARS} - $ENV{BGQ_STACKSIZE} - - - - /etc/profile.d/00softenv.csh - /etc/profile.d/00softenv.sh - soft - soft - - +mpiwrapper-xl - @ibm-compilers-2016-05 - +cmake - +python - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - - 10000 - - - - - --envs BG_THREADLAYOUT=1 XL_BG_SPREADLAYOUT=YES OMP_DYNAMIC=FALSE OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} - - - --envs OMP_STACKSIZE=64M - - - --envs OMP_STACKSIZE=16M - - - - - LLNL Linux Cluster, Linux (pgi), 16 pes/node, batch system is Slurm - LINUX - intel - mpich - /p/lscratchh/$USER - /usr/gdata/climdat/ccsm3data/inputdata - /usr/gdata/climdat/ccsm3data/inputdata/atm/datm7 - /p/lscratchh/$CCSMUSER/archive/$CASE - /p/lscratchh/$CCSMUSER/ccsm_baselines/$COMPILER - /p/lscratchd/ma21/ccsm3data/tools/cprnc/cprnc - 8 - lc_slurm - donahue5 -at- llnl.gov - 16 - 16 - - - - - srun - - - /usr/share/lmod/lmod/init/env_modules_python.py - /usr/share/lmod/lmod/init/perl - /usr/share/lmod/lmod/init/sh - /usr/share/lmod/lmod/init/csh - module - module - /usr/share/lmod/lmod/libexec/lmod python - /usr/share/lmod/lmod/libexec/lmod perl - - python - git - intel/19.0.4 - mvapich2/2.3 - cmake/3.14.5 - netcdf-fortran/4.4.4 - pnetcdf/1.9.0 - - - /p/lscratchh/$CCSMUSER/ACME/$CASE/run - /p/lscratchh/$CCSMUSER/$CASE/bld - - /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/ - /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/ - - - /usr/tce/packages/pnetcdf/pnetcdf-1.9.0-intel-18.0.1-mvapich2-2.2/ - - - - - LLNL Linux Cluster, Linux (pgi), 36 pes/node, batch system is Slurm - LINUX - intel - mpich - /p/lscratchh/$USER - /usr/gdata/climdat/ccsm3data/inputdata - /usr/gdata/climdat/ccsm3data/inputdata/atm/datm7 - /p/lscratchh/$CCSMUSER/archive/$CASE - /p/lscratchh/$CCSMUSER/ccsm_baselines/$COMPILER - /p/lscratchd/ma21/ccsm3data/tools/cprnc/cprnc - 8 - lc_slurm - donahue5 -at- llnl.gov - 36 - 36 - - - - - srun - - - /usr/share/lmod/lmod/init/env_modules_python.py - /usr/share/lmod/lmod/init/perl - /usr/share/lmod/lmod/init/sh - /usr/share/lmod/lmod/init/csh - module - module - /usr/share/lmod/lmod/libexec/lmod python - /usr/share/lmod/lmod/libexec/lmod perl - - python - git - intel/19.0.4 - mvapich2/2.3 - cmake/3.14.5 - netcdf-fortran/4.4.4 - pnetcdf/1.9.0 - - - /p/lscratchh/$CCSMUSER/ACME/$CASE/run - /p/lscratchh/$CCSMUSER/$CASE/bld - - /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/ - /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/ - - - /usr/tce/packages/pnetcdf/pnetcdf-1.9.0-intel-18.0.1-mvapich2-2.2/ - - - - - ANL IBM BG/Q, os is BGQ, 16 cores/node, batch system is cobalt - mira.* - BGQ - ibm - ibm - ClimateEnergy_2 - /projects/$PROJECT - ClimateEnergy_2 - /projects/$PROJECT/$USER - /projects/ccsm/inputdata - /projects/ccsm/inputdata/atm/datm7 - /projects/$PROJECT/$USER/archive/$CASE - /projects/ccsm/ccsm_baselines//$COMPILER - /projects/ccsm/tools/cprnc/cprnc - 8 - e3sm_developer - cobalt - mickelso -at- mcs.anl.gov - 64 - 4 - TRUE - - /usr/bin/runjob - - --label short - --ranks-per-node $MAX_MPITASKS_PER_NODE - --np {{ total_tasks }} - --block $COBALT_PARTNAME $LOCARGS - $ENV{BGQ_SMP_VARS} - $ENV{BGQ_STACKSIZE} - - - - /etc/profile.d/00softenv.csh - /etc/profile.d/00softenv.sh - soft - soft - - +mpiwrapper-xl - @ibm-compilers-2016-05 - +cmake - +python - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - - 10000 - - - - - --envs BG_THREADLAYOUT=1 XL_BG_SPREADLAYOUT=YES OMP_DYNAMIC=FALSE OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} - - - --envs OMP_STACKSIZE=64M - - - --envs OMP_STACKSIZE=16M - - - - - ALCF Cray XC40 KNL, os is CNL, 64 pes/node, batch system is cobalt - theta.* - CNL - intel,gnu,cray - mpt - /projects/$PROJECT - ClimateEnergy_3,OceanClimate_2 - /projects/$PROJECT/$USER - /projects/ccsm/acme/inputdata - /projects/ccsm/acme/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /projects/$PROJECT/acme/baselines/$COMPILER - /projects/ccsm/acme/tools/cprnc/cprnc - 8 - e3sm_developer - 4 - cobalt_theta - E3SM - 128 - 64 - TRUE - - aprun - - -n {{ total_tasks }} - -N $SHELL{if [ `./xmlquery --value MAX_MPITASKS_PER_NODE` -gt `./xmlquery --value TOTAL_TASKS` ];then echo `./xmlquery --value TOTAL_TASKS`;else echo `./xmlquery --value MAX_MPITASKS_PER_NODE`;fi;} - --cc depth -d $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc} -j $SHELL{if [ 64 -ge `./xmlquery --value MAX_TASKS_PER_NODE` ];then echo 1;else echo `./xmlquery --value MAX_TASKS_PER_NODE`/64|bc;fi;} - $ENV{SMP_VARS} $ENV{labeling} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - craype-mic-knl - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-mpich - cray-parallel-netcdf - cray-hdf5-parallel - cray-hdf5 - cray-netcdf - cray-netcdf-hdf5parallel - cray-libsci - craype - craype/2.5.12 - cmake/3.11.4 - - - intel/18.0.0.128 - PrgEnv-intel/6.0.4 - - - cce/8.6.2 - PrgEnv-cray/6.0.4 - - - gcc/7.3.0 - PrgEnv-gnu/6.0.4 - - - cray-libsci/17.09.1 - - - craype-mic-knl - cray-mpich/7.6.2 - - - cray-netcdf/4.4.1.1.3 - cray-parallel-netcdf/1.8.1.3 - - - cray-netcdf/4.4.1.1.3 - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - 0.1 - - 1 - 1 - - /projects/ccsm/acme/tools/mpas - 2 - - - - - -e OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -e OMP_STACKSIZE=128M -e KMP_AFFINITY=granularity=thread,scatter - - - -e OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -e OMP_STACKSIZE=128M -e OMP_PROC_BIND=spread -e OMP_PLACES=threads - - - -e PMI_LABEL_ERROUT=1 - - - - - ANL experimental/evaluation cluster, batch system is cobalt - jlse.* - LINUX - intel,gnu - mpich - $ENV{HOME}/acme/scratch - /home/azamat/acme/inputdata - /home/azamat/acme/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{HOME}/acme/baselines/$COMPILER - /home/azamat/acme/tools/cprnc - 8 - acme_developer - cobalt_theta - e3sm - 128 - 64 - FALSE - - mpirun - - -n $TOTALPES - - - - /etc/bashrc - source - - /soft/compilers/intel/compilers_and_libraries/linux/bin/compilervars.sh intel64 - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - 1 - 1 - 1 - 1 - /home/azamat/perl5/bin:$ENV{PATH} - /home/azamat/perl5/lib/perl5 - /home/azamat/perl5 - "--install_base \"/home/azamat/perl5\"" - "INSTALL_BASE=/home/azamat/perl5" - - - /home/azamat/soft/netcdf/4.3.3.1c-4.2cxx-4.4.2f/intel18 - /home/azamat/soft/pnetcdf/1.6.1/intel18 - 10 - core - - - /home/azamat/soft/netcdf/4.3.3.1c-4.2cxx-4.4.2f/gnu-arm - /home/azamat/soft/pnetcdf/1.6.1/gnu-arm - - - verbose,granularity=thread,scatter - 256M - - - spread - threads - 256M - - - - - PNL cluster, OS is Linux, batch system is SLURM - sooty - LINUX - intel,pgi - mvapich2 - /lustre/$USER/cime_output_root - /lustre/climate/csmdata/ - /lustre/climate/csmdata/atm/datm7 - /lustre/$USER/archive/$CASE - /lustre/climate/acme_baselines/$COMPILER - /lustre/climate/acme_baselines/cprnc/cprnc - 8 - slurm - balwinder.singh -at- pnnl.gov - 8 - 8 - FALSE - - - - - srun - - --mpi=none - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - /share/apps/modules/Modules/3.2.10/init/perl.pm - /share/apps/modules/Modules/3.2.10/init/python.py - /etc/profile.d/modules.csh - /etc/profile.d/modules.sh - /share/apps/modules/Modules/3.2.10/bin/modulecmd perl - /share/apps/modules/Modules/3.2.10/bin/modulecmd python - module - module - - - - - perl/5.20.0 - cmake/3.3.0 - python/2.7.8 - svn/1.8.13 - - - intel/15.0.1 - mkl/15.0.1 - - - pgi/14.10 - - - mvapich2/2.1 - - - netcdf/4.3.2 - - - /lustre/$USER/csmruns/$CASE/run - /lustre/$USER/csmruns/$CASE/bld - - $ENV{MKLROOT} - $ENV{NETCDF_LIB}/../ - 64M - - - - - PNNL Intel KNC cluster, OS is Linux, batch system is SLURM - glogin - LINUX - intel - impi,mvapich2 - /dtemp/$PROJECT/$USER - /dtemp/st49401/sing201/acme/inputdata/ - /dtemp/st49401/sing201/acme/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $CIME_OUTPUT_ROOT/acme/acme_baselines - $CIME_OUTPUT_ROOT/acme/acme_baselines/cprnc/cprnc - 8 - slurm - balwinder.singh -at- pnnl.gov - 16 - 16 - TRUE - - - - - mpirun - - -np {{ total_tasks }} - - - - srun - - --mpi=none - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - /opt/lmod/7.8.4/init/env_modules_python.py - /etc/profile.d/modules.csh - /etc/profile.d/modules.sh - /opt/lmod/7.8.4/libexec/lmod python - module - module - - - - - python/2.7.9 - - - intel/ips_18 - mkl/14.0 - - - impi/4.1.2.040 - - - mvapich2/1.9 - - - netcdf/4.3.0 - - - $CIME_OUTPUT_ROOT/csmruns/$CASE/run - $CIME_OUTPUT_ROOT/csmruns/$CASE/bld - - 64M - $ENV{NETCDF_ROOT} - - - $ENV{MLIBHOME} - intel - - - - - PNL Haswell cluster, OS is Linux, batch system is SLURM - constance - LINUX - intel,pgi,nag - mvapich2,openmpi,intelmpi,mvapich - /pic/scratch/$USER - /pic/projects/climate/csmdata/ - /pic/projects/climate/csmdata/atm/datm7 - /pic/scratch/$USER/archive/$CASE - /pic/projects/climate/acme_baselines/$COMPILER - /pic/projects/climate/acme_baselines/cprnc/cprnc - 8 - slurm - balwinder.singh -at- pnnl.gov - 24 - 24 - FALSE - - - - - srun - - --mpi=none - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - srun - - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - mpirun - - -n {{ total_tasks }} - - - - mpirun - - -n {{ total_tasks }} - - - - /share/apps/modules/Modules/3.2.10/init/perl.pm - /share/apps/modules/Modules/3.2.10/init/python.py - /etc/profile.d/modules.csh - /etc/profile.d/modules.sh - /share/apps/modules/Modules/3.2.10/bin/modulecmd perl - /share/apps/modules/Modules/3.2.10/bin/modulecmd python - module - module - - - - - perl/5.20.0 - cmake/3.3.0 - python/2.7.8 - - - intel/15.0.1 - mkl/15.0.1 - - - pgi/14.10 - - - nag/6.0 - mkl/15.0.1 - - - mvapich2/2.1 - - - mvapich2/2.1 - - - mvapich2/2.1 - - - mvapich2/2.3b - - - intelmpi/5.0.1.035 - - - openmpi/1.8.3 - - - netcdf/4.3.2 - - - netcdf/4.3.2 - - - netcdf/4.4.1.1 - - - /pic/scratch/$USER/csmruns/$CASE/run - /pic/scratch/$USER/csmruns/$CASE/bld - - 64M - $ENV{NETCDF_LIB}/../ - - - $ENV{MLIB_LIB} - - - $ENV{MLIB_LIB} - - - - - PNL E3SM Intel Xeon Gold 6148(Skylake) nodes, OS is Linux, SLURM - compy - LINUX - intel,pgi - impi,mvapich2 - /compyfs - .* - /compyfs/$USER/e3sm_scratch - /compyfs/inputdata - /compyfs/inputdata/atm/datm7 - /compyfs/$USER/e3sm_scratch/archive/$CASE - /compyfs/e3sm_baselines/$COMPILER - /compyfs/e3sm_baselines/cprnc/cprnc - 8 - e3sm_integration - slurm - bibi.mathew -at- pnnl.gov - 40 - 40 - TRUE - - - - - srun - - --mpi=none - --ntasks={{ total_tasks }} - --kill-on-bad-exit - -l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc} - - - - srun - - --mpi=pmi2 - --ntasks={{ total_tasks }} - --kill-on-bad-exit - -l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc} - - - - /share/apps/modules/init/perl.pm - /share/apps/modules/init/python.py - /etc/profile.d/modules.csh - /etc/profile.d/modules.sh - /share/apps/modules/bin/modulecmd perl - /share/apps/modules/bin/modulecmd python - module - module - - - - - cmake/3.11.4 - - - intel/19.0.3 - - - pgi/18.10 - - - mvapich2/2.3.1 - - - intelmpi/2019u3 - - - netcdf/4.6.3 - pnetcdf/1.9.0 - mkl/2019u3 - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - $ENV{NETCDF_ROOT}/ - $ENV{MKLROOT} - - - 0 - 1 - - - 1 - - - 10 - - - 64M - spread - threads - - - - - ORNL XK6, os is Linux, 32 pes/node, batch system is PBS - oic5 - LINUX - gnu - mpich,openmpi - /home/$USER/models/ACME - /home/zdr/models/ccsm_inputdata - /home/zdr/models/ccsm_inputdata/atm/datm7 - /home/$USER/models/ACME/run/archive/$CASE - 32 - e3sm_developer - pbs - dmricciuto - 32 - 32 - - /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpirun - - -np {{ total_tasks }} - --hostfile $ENV{PBS_NODEFILE} - - - - - - - /home/$USER/models/ACME/run/$CASE/run - /home/$USER/models/ACME/run/$CASE/bld - - - - OR-CONDO, CADES-CCSI, os is Linux, 16 pes/nodes, batch system is PBS - or-condo - LINUX - gnu,intel - openmpi - /lustre/or-hydra/cades-ccsi/scratch/$USER - /lustre/or-hydra/cades-ccsi/proj-shared/project_acme/ACME_inputdata - /lustre/or-hydra/cades-ccsi/proj-shared/project_acme/ACME_inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /lustre/or-hydra/cades-ccsi/proj-shared/project_acme/baselines/$COMPILER - /lustre/or-hydra/cades-ccsi/proj-shared/tools/cprnc.orcondo - 4 - e3sm_developer - pbs - yinj -at- ornl.gov - 32 - 32 - FALSE - - mpirun - - -np {{ total_tasks }} - --hostfile $ENV{PBS_NODEFILE} - - - - - - - /usr/share/Modules/init/sh - /usr/share/Modules/init/csh - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - module - module - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - - - - - PE-gnu - - - mkl/2017 - /lustre/or-hydra/cades-ccsi/proj-shared/tools/cmake/3.6.1 - python/2.7.12 - /lustre/or-hydra/cades-ccsi/proj-shared/tools/nco/4.6.4 - hdf5-parallel/1.8.17 - netcdf-hdf5parallel/4.3.3.1 - pnetcdf/1.9.0 - - - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - - /software/user_tools/current/cades-ccsi/petsc4pf/openmpi-1.10-gcc-5.3 - - - - - ORNL XK6, os is CNL, 16 pes/node, batch system is PBS - titan - Received node event ec_node - CNL - pgi,pgiacc,intel,cray - mpich - cli115 - $ENV{PROJWORK}/$PROJECT - cli106,cli115,cli127,cli133,csc190 - $ENV{HOME}/acme_scratch/$PROJECT - /lustre/atlas1/cli900/world-shared/cesm/inputdata - /lustre/atlas1/cli900/world-shared/cesm/inputdata/atm/datm7 - $ENV{MEMBERWORK}/$PROJECT/archive/$CASE - /lustre/atlas1/cli115/world-shared/E3SM/baselines/$COMPILER - /lustre/atlas1/cli900/world-shared/cesm/tools/cprnc/cprnc.titan - 8 - e3sm_developer - 4 - pbs - TRUE - E3SM - 16 - 16 - TRUE - - aprun - - - - aprun - - - - - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/init/python.py - /opt/modules/default/init/perl.pm - - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - - - python/2.7.9 - subversion - subversion/1.9.3 - cmake - cmake3/3.6.0 - - - - PrgEnv-cray - PrgEnv-gnu - PrgEnv-intel - PrgEnv-pathscale - PrgEnv-pgi - pgi pgi/17.5.0 - cray-mpich - cray-libsci - atp - esmf - cudatoolkit - cray-mpich/7.6.3 - cray-libsci/16.11.1 - atp/2.1.1 - esmf/5.2.0rp2 - cudatoolkit - - - PrgEnv-cray - PrgEnv-gnu - PrgEnv-intel - PrgEnv-pathscale - PrgEnv-pgi - pgi pgi/17.5.0 - cray-mpich - cray-libsci - atp - esmf - cray-mpich/7.6.3 - cray-libsci/16.11.1 - atp/2.1.1 - esmf/5.2.0rp2 - - - PrgEnv-pgi - PrgEnv-cray - PrgEnv-gnu - PrgEnv-pathscale - PrgEnv-intel - intel - cray-libsci - cray-mpich - atp - intel/18.0.1.163 - cray-mpich/7.6.3 - atp/2.1.1 - - - PrgEnv-pgi - PrgEnv-gnu - PrgEnv-intel - PrgEnv-pathscale - PrgEnv-cray - cce - cray-mpich - cce/8.6.4 - cray-mpich/7.6.3 - - - - cray-netcdf - cray-netcdf-hdf5parallel - cray-netcdf/4.4.1.1.3 - - - cray-netcdf - cray-netcdf-hdf5parallel - cray-netcdf/4.4.1.1.3 - cray-parallel-netcdf/1.8.1.3 - - - $ENV{PROJWORK}/$PROJECT/$USER/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - 0.1 - - - - $COMPILER - $MPILIB - 1 - 1 - - 128M - 128M - - - - - - istanbul - 1 - - - dynamic - - - - - ORNL XC30, os is CNL, 16 pes/node, batch system is PBS - eos - CNL - intel - mpich - $ENV{PROJWORK}/$PROJECT - cli115,cli127,cli106,csc190 - $ENV{HOME}/acme_scratch/$PROJECT - /lustre/atlas1/cli900/world-shared/cesm/inputdata - /lustre/atlas1/cli900/world-shared/cesm/inputdata/atm/datm7 - $ENV{MEMBERWORK}/$PROJECT/archive/$CASE - /lustre/atlas1/cli900/world-shared/cesm/baselines/$COMPILER - /lustre/atlas1/cli900/world-shared/cesm/tools/cprnc/cprnc.eos - 8 - e3sm_developer - pbs - E3SM - 32 - 16 - TRUE - - aprun - - -j {{ hyperthreading }} - -S {{ tasks_per_numa }} - -n {{ total_tasks }} - -N $MAX_MPITASKS_PER_NODE - -d $ENV{OMP_NUM_THREADS} - -cc numa_node - - - - - - - $MODULESHOME/init/sh - $MODULESHOME/init/csh - $MODULESHOME/init/perl.pm - $MODULESHOME/init/python.py - module - module - $MODULESHOME/bin/modulecmd perl - $MODULESHOME/bin/modulecmd python - - intel - cray - cray-parallel-netcdf - cray-libsci - cray-netcdf - cray-netcdf-hdf5parallel - netcdf - - - intel/18.0.1.163 - papi - - - PrgEnv-cray - cce cce/8.1.9 - cray-libsci/12.1.00 - - - PrgEnv-gnu - gcc gcc/4.8.0 - cray-libsci/12.1.00 - - - cray-netcdf/4.3.2 - - - cray-netcdf-hdf5parallel/4.3.3.1 - cray-parallel-netcdf/1.6.1 - - - cmake3/3.2.3 - python/2.7.9 - - - $ENV{MEMBERWORK}/$PROJECT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - 1 - 1 - - 64M - - - - - - LANL Linux Cluster, 36 pes/node, batch system slurm - gr-fe.*.lanl.gov - LINUX - gnu,intel - mvapich,openmpi - climateacme - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/scratch - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data/atm/datm7 - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/archive/$CASE - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER - /turquoise/usr/projects/climate/SHARED_CLIMATE/software/wolf/cprnc/v0.40/cprnc - 4 - e3sm_developer - slurm - luke.vanroekel @ gmail.com - 36 - 32 - TRUE - - mpirun - - -n {{ total_tasks }} - - - - srun - - -n {{ total_tasks }} - - - - mpirun - - -n {{ total_tasks }} - - - - - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /etc/profile.d/z00_lmod.sh - /etc/profile.d/z00_lmod.csh - /usr/share/lmod/lmod/libexec/lmod perl - /usr/share/lmod/lmod/libexec/lmod python - module - module - - - /usr/projects/climate/SHARED_CLIMATE/modulefiles/all - python/anaconda-2.7-climate - - - gcc/5.3.0 - - - intel/17.0.1 - - - openmpi/1.10.5 - - - mvapich2/2.2 - - - netcdf/4.4.1 - - - parallel-netcdf/1.5.0 - - - mkl - - - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/cases/$CASE/run - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/cases/$CASE/bld - - romio_ds_write=disable;romio_ds_read=disable;romio_cb_write=enable;romio_cb_read=enable - - - /opt/intel/17.0/mkl - - - - - LANL Linux Cluster, 36 pes/node, batch system slurm - ba-fe.*.lanl.gov - LINUX - gnu,intel - mvapich,openmpi - climateacme - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/scratch - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data/atm/datm7 - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/archive/$CASE - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER - /turquoise/usr/projects/climate/SHARED_CLIMATE/software/wolf/cprnc/v0.40/cprnc - 4 - e3sm_developer - slurm - e3sm - 36 - 32 - TRUE - - mpirun - - -n {{ total_tasks }} - - - - srun - - -n {{ total_tasks }} - - - - mpirun - - -n {{ total_tasks }} - - - - - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /etc/profile.d/z00_lmod.sh - /etc/profile.d/z00_lmod.csh - /usr/share/lmod/lmod/libexec/lmod perl - /usr/share/lmod/lmod/libexec/lmod python - module - module - - - /usr/projects/climate/SHARED_CLIMATE/modulefiles/all - python/anaconda-2.7-climate - - - gcc/6.4.0 - - - intel/17.0.4 - - - openmpi/2.1.2 - - - mvapich2/2.2 - - - netcdf/4.4.1.1 - - - parallel-netcdf/1.8.1 - - - mkl - - - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/cases/$CASE/run - /lustre/scratch3/turquoise/$ENV{USER}/E3SM/cases/$CASE/bld - - romio_ds_write=disable;romio_ds_read=disable;romio_cb_write=enable;romio_cb_read=enable - - - /opt/intel/17.0/mkl - - - - - Mesabi batch queue - LINUX - intel - openmpi - /home/reichpb/scratch - /home/reichpb/shared/cesm_inputdata - /home/reichpb/shared/cesm_inputdata/atm/datm7 - USERDEFINED_optional_run - USERDEFINED_optional_run/$COMPILER - USERDEFINED_optional_test - 2 - pbs - chen1718 at umn dot edu - 24 - 24 - TRUE - - aprun - - -n {{ total_tasks }} - -S {{ tasks_per_numa }} - -N $MAX_MPITASKS_PER_NODE - -d $ENV{OMP_NUM_THREADS} - - - - $CASEROOT/run - - $CASEROOT/exedir - - - - - - - - - - - - - - Itasca batch queue - LINUX - intel - openmpi - /home/reichpb/scratch - /home/reichpb/shared/cesm_inputdata - /home/reichpb/shared/cesm_inputdata/atm/datm7 - USERDEFINED_optional_run - USERDEFINED_optional_run/$COMPILER - USERDEFINED_optional_test - 2 - pbs - chen1718 at umn dot edu - 8 - 8 - - aprun - - -n {{ total_tasks }} - -S {{ tasks_per_numa }} - -N $MAX_MPITASKS_PER_NODE - -d $ENV{OMP_NUM_THREADS} - - - - $CASEROOT/run - - $CASEROOT/exedir - - - - - - - - - - - - - - Lawrencium LR6 cluster at LBL, OS is Linux (intel), batch system is SLURM - n000* - LINUX - intel,gnu - openmpi - ac_acme - /global/scratch/$ENV{USER} - /global/scratch/$ENV{USER}/cesm_input_datasets/ - /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER - /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc - 4 - slurm - gbisht at lbl dot gov - 12 - 12 - TRUE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/Modules/init/perl.pm - /usr/Modules/python.py - module - module - /usr/Modules/bin/modulecmd perl - /usr/Modules/bin/modulecmd python - - - cmake - perl xml-libxml switch python/2.7 - - - intel/2016.4.072 - mkl - - - netcdf/4.4.1.1-intel-s - - - openmpi - netcdf/4.4.1.1-intel-p - - - gcc/6.3.0 - lapack/3.8.0-gcc - - - netcdf/5.4.1.1-gcc-s - openmpi/2.0.2-gcc - - - openmpi/3.0.1-gcc - netcdf/4.4.1.1-gcc-p - openmpi/2.0.2-gcc - - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - - - Lawrencium LR6 cluster at LBL, OS is Linux (intel), batch system is SLURM - n000* - LINUX - intel,gnu - openmpi - ac_acme - /global/scratch/$ENV{USER} - /global/scratch/$ENV{USER}/cesm_input_datasets/ - /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER - /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc - 4 - slurm - gbisht at lbl dot gov - 12 - 12 - TRUE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/Modules/init/perl.pm - /usr/Modules/python.py - module - module - /usr/Modules/bin/modulecmd perl - /usr/Modules/bin/modulecmd python - - - cmake - perl xml-libxml switch python/2.7 - - - intel/2016.4.072 - mkl - - - netcdf/4.4.1.1-intel-s - - - openmpi - netcdf/4.4.1.1-intel-p - - - gcc/6.3.0 - lapack/3.8.0-gcc - - - netcdf/5.4.1.1-gcc-s - openmpi/2.0.2-gcc - - - openmpi/3.0.1-gcc - netcdf/4.4.1.1-gcc-p - openmpi/2.0.2-gcc - - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - - - Lawrencium LR6 cluster at LBL, OS is Linux (intel), batch system is SLURM - n000* - LINUX - intel,gnu - openmpi - ac_acme - /global/scratch/$ENV{USER} - /global/scratch/$ENV{USER}/cesm_input_datasets/ - /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER - /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc - 4 - slurm - gbisht at lbl dot gov - 12 - 12 - TRUE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/Modules/init/perl.pm - /usr/Modules/python.py - module - module - /usr/Modules/bin/modulecmd perl - /usr/Modules/bin/modulecmd python - - - cmake - perl xml-libxml switch python/2.7 - - - intel/2016.4.072 - mkl - - - netcdf/4.4.1.1-intel-s - - - openmpi - netcdf/4.4.1.1-intel-p - - - gcc/6.3.0 - lapack/3.8.0-gcc - - - netcdf/5.4.1.1-gcc-s - openmpi/2.0.2-gcc - - - openmpi/3.0.1-gcc - netcdf/4.4.1.1-gcc-p - openmpi/2.0.2-gcc - - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - - - small developer workhorse at lbl climate sciences - LINUX - gnu - openmpi - ngeet - /home/lbleco/acme/ - /home/lbleco/cesm/cesm_input_datasets/ - /home/lbleco/cesm/cesm_input_datasets/atm/datm7/ - /home/lbleco/acme/cesm_archive/$CASE - /home/lbleco/acme/cesm_baselines/$COMPILER - /home/lbleco/cesm/cesm_tools/cprnc/cprnc - 1 - none - rgknox at lbl gov - 4 - 4 - FALSE - - - - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - - - - ORNL pre-Summit testbed. Node: 2x POWER8 + 4x Tesla P100, 20 cores/node, 8 HW threads/core. - summitdev-* - LINUX - ibm,pgi,pgiacc - spectrum-mpi,mpi-serial - csc249 - CSC249ADSE15 - /lustre/atlas/proj-shared/$PROJECT - cli115,cli127,cli106,csc190 - $ENV{HOME}/acme_scratch/$PROJECT - /lustre/atlas1/cli900/world-shared/cesm/inputdata - /lustre/atlas1/cli900/world-shared/cesm/inputdata/atm/datm7 - /lustre/atlas/scratch/$ENV{USER}/$PROJECT/archive/$CASE - /lustre/atlas1/cli900/world-shared/cesm/baselines/$COMPILER - /lustre/atlas1/cli900/world-shared/cesm/tools/cprnc/cprnc - 32 - e3sm_developer - lsf - acme - 160 - 80 - TRUE - - /lustre/atlas/world-shared/cli900/helper_scripts/mpirun.summitdev - - - -n {{ total_tasks }} -N $MAX_MPITASKS_PER_NODE - - - - - - - /sw/summitdev/lmod/7.4.0/rhel7.2_gnu4.8.5/lmod/7.4/init/sh - /sw/summitdev/lmod/7.4.0/rhel7.2_gnu4.8.5/lmod/7.4/init/csh - /sw/summitdev/lmod/7.4.0/rhel7.2_gnu4.8.5/lmod/7.4/init/env_modules_python.py - /sw/summitdev/lmod/7.4.0/rhel7.2_gnu4.8.5/lmod/7.4/init/perl - - module - /sw/summitdev/lmod/7.4.0/rhel7.2_gnu4.8.5/lmod/lmod/libexec/lmod python - module - module - - - - - - - DefApps - python/3.5.2 - subversion/1.9.3 - git/2.13.0 - cmake/3.6.1 - essl/5.5.0-20161110 - netlib-lapack/3.6.1 - - - - xl - pgi/17.9 - spectrum-mpi/10.1.0.4-20170915 - - - - pgi - xl/20170914-beta - spectrum-mpi/10.1.0.4-20170915 - - - - - - netcdf/4.4.1 - netcdf-fortran/4.4.4 - - - - netcdf/4.4.1 - netcdf-fortran/4.4.4 - parallel-netcdf/1.7.0 - hdf5/1.10.0-patch1 - - - netcdf/4.4.1 - netcdf-fortran/4.4.4 - parallel-netcdf/1.7.0 - hdf5/1.10.0-patch1 - - - /lustre/atlas/scratch/$ENV{USER}/$PROJECT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - - - - - $COMPILER - $MPILIB - 128M - $ENV{OLCF_NETCDF_ROOT} - $ENV{OLCF_NETCDF_FORTRAN_ROOT} - $ENV{OLCF_HDF5_ROOT} - $ENV{OLCF_ESSL_ROOT} - $ENV{OLCF_NETLIB_LAPACK_ROOT} - - - - - - $ENV{OLCF_PARALLEL_NETCDF_ROOT} - - - - - ORNL Summit. Node: 2x POWER9 + 6x Volta V100, 22 cores/socket, 4 HW threads/core. - .*summit.* - LINUX - ibm,pgi,pgiacc,gnu - spectrum-mpi,mpi-serial - cli115 - cli115 - /gpfs/alpine/proj-shared/$PROJECT - cli115,cli127 - /gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch - - - - /gpfs/alpine/cli115/world-shared/e3sm/inputdata - /gpfs/alpine/cli115/world-shared/e3sm/inputdata/atm/datm7 - /gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/archive/$CASE - /gpfs/alpine/cli115/world-shared/e3sm/baselines/$COMPILER - /gpfs/alpine/cli115/world-shared/e3sm/tools/cprnc.summit/cprnc - 32 - e3sm_developer - 4 - lsf - e3sm - 84 - 84 - TRUE - - - /gpfs/alpine/world-shared/cli115/mpirun.summit - - - -n {{ total_tasks }} -N $MAX_MPITASKS_PER_NODE - - - - - /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/lmod/init/sh - /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/lmod/init/csh - /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/lmod/init/env_modules_python.py - /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/lmod/init/perl - - module - /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/7.7.10/libexec/lmod python - module - module - - - - - - DefApps - python/3.5.2 - subversion/1.9.3 - git/2.13.0 - cmake/3.13.4 - essl/6.1.0-2 - netlib-lapack/3.8.0 - - - - pgi/19.4 - - - xl/16.1.1-3 - - - gcc/6.4.0 - - - - netcdf/4.6.1 - netcdf-fortran/4.4.4 - - - - - spectrum-mpi/10.3.0.1-20190611 - - - spectrum-mpi/10.3.0.1-20190611 - - - spectrum-mpi/10.3.0.1-20190611 - - - - parallel-netcdf/1.8.1 - hdf5/1.10.3 - - - - - $CIME_OUTPUT_ROOT/$CASE/run - $CIME_OUTPUT_ROOT/$CASE/bld - - - - - - - - $COMPILER - $MPILIB - 128M - $ENV{OLCF_NETCDF_ROOT} - $ENV{OLCF_NETCDF_FORTRAN_ROOT} - $ENV{OLCF_NETCDF_FORTRAN_ROOT} - $ENV{OLCF_NETCDF_FORTRAN_ROOT} - $ENV{OLCF_ESSL_ROOT} - $ENV{OLCF_NETLIB_LAPACK_ROOT} - - - $ENV{OMP_NUM_THREADS} - - - $ENV{OLCF_HDF5_ROOT} - $ENV{OLCF_PARALLEL_NETCDF_ROOT} - - - - - ${EXEROOT}/e3sm.exe - >> e3sm.log.$LID 2>&1 - - - diff --git a/config/e3sm/machines/config_pio.xml b/config/e3sm/machines/config_pio.xml deleted file mode 100644 index 65f503aeada..00000000000 --- a/config/e3sm/machines/config_pio.xml +++ /dev/null @@ -1,368 +0,0 @@ - - - - - - - - - - - $MAX_MPITASKS_PER_NODE - 60 - 128 - -99 - - - - - - 0 - - - - - - - - pnetcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - netcdf - - - - - - $PIO_VERSION - - - - - - - - - - 1 - 1 - - - - - - - - - - - 16 - - - - - - - - - - - - - 16 - - - - - - netcdf - - - - - - - - - - 16 - - - - - - - - - - - - 16 - - - - - - - - - - - - 16 - - - - - - netcdf - - - - - - - - - - 16 - - - - - - netcdf - - - - - - - - - - 1 - - - - - - - - - - - - 1 - - - - - - - - - - - - 1 - - - - - - diff --git a/config/e3sm/machines/config_workflow.xml b/config/e3sm/machines/config_workflow.xml deleted file mode 100644 index 7fcdcec8083..00000000000 --- a/config/e3sm/machines/config_workflow.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - - $BUILD_COMPLETE and not $TEST - - - - $BUILD_COMPLETE and $TEST - - - - - case.run or case.test - $DOUT_S - - 1 - 0:20:00 - - - - diff --git a/config/e3sm/machines/syslog.anvil b/config/e3sm/machines/syslog.anvil deleted file mode 100755 index c342495f20d..00000000000 --- a/config/e3sm/machines/syslog.anvil +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/csh -f -# anvil syslog script: -# mach_syslog - -set sample_interval = $1 -set jid = $2 -set lid = $3 -set run = $4 -set timing = $5 -set dir = $6 - -# Wait until job task-to-node mapping information is output before saving output file. -# Target length was determined empirically (maximum number of lines before job mapping -# information starts + number of nodes), and it may need to be adjusted in the future. -# (Note that calling script 'touch'es the e3sm log file before spawning this script, so that 'wc' does not fail.) -set nnodes = `squeue --noheader -o '%D' --job $jid | sed 's/^0*\([0-9]*\)/\1/' ` -if ("X$nnodes" == "X") set nnodes = 0 -@ target_lines = 150 + $nnodes -sleep 10 -set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -while ($outlth < $target_lines) - sleep 60 - set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -end - -set TimeLeft = `squeue --noheader -O 'timeleft' --job $jid ` -set TimeLeftwday = `echo $TimeLeft | grep '-' ` -if ("X$TimeLeftwday" == "X") then - set left_days = 0 - set TimeLeftwhour = `echo $TimeLeft | grep '.*:.*:.*' ` - if ("X$TimeLeftwhour" == "X") then - set left_hours = 0 - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - else - set left_hours = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - endif -else - set left_days = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_hours = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\4/' ` -endif - -if ("X$left_days" == "X") set left_days = 0 -if ("X$left_hours" == "X") set left_hours = 0 -if ("X$left_mins" == "X") set left_mins = 0 -if ("X$left_secs" == "X") set left_secs = 0 -@ remaining = 86400 * $left_days + 3600 * $left_hours + 60 * $left_mins + $left_secs -cat > $run/Walltime.Remaining < $dir/squeuef.$lid.$remaining - squeue -s | grep -v -F extern > $dir/squeues.$lid.$remaining -endif - -while ($remaining > 0) - echo "Wallclock time remaining: $remaining" >> $dir/atm.log.$lid.step - grep -Fa -e "nstep" -e "model date" $run/*atm.log.$lid | tail -n 4 >> $dir/atm.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/lnd.log.$lid.step - grep -Fa -e "timestep" -e "model date" $run/*lnd.log.$lid | tail -n 4 >> $dir/lnd.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ocn.log.$lid.step - grep -Fa -e "timestep" -e "Step number" -e "model date" $run/*ocn.log.$lid | tail -n 4 >> $dir/ocn.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ice.log.$lid.step - grep -Fa -e "timestep" -e "istep" -e "model date" $run/*ice.log.$lid | tail -n 4 >> $dir/ice.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*rof.log.$lid | tail -n 4 >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*cpl.log.$lid > $dir/cpl.log.$lid.step-all - echo "Wallclock time remaining: $remaining" >> $dir/cpl.log.$lid.step - tail -n 4 $dir/cpl.log.$lid.step-all >> $dir/cpl.log.$lid.step - /bin/cp --preserve=timestamps -u $timing/* $dir - squeue -t R -o "%.10i %.10P %.15u %.20a %.2t %.6D %.8C %.12M %.12l %j" > $dir/squeuef.$lid.$remaining - squeue -s | grep -v -F extern > $dir/squeues.$lid.$remaining - chmod a+r $dir/* - # sleep $sample_interval - set sleep_remaining = $sample_interval - while ($sleep_remaining > 120) - sleep 120 - @ sleep_remaining = $sleep_remaining - 120 - end - sleep $sleep_remaining - # query remaining time - set TimeLeft = `squeue --noheader -O 'timeleft' --job $jid ` - set TimeLeftwday = `echo $TimeLeft | grep '-' ` - if ("X$TimeLeftwday" == "X") then - set left_days = 0 - set TimeLeftwhour = `echo $TimeLeft | grep '.*:.*:.*' ` - if ("X$TimeLeftwhour" == "X") then - set left_hours = 0 - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - else - set left_hours = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - endif - else - set left_days = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_hours = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\4/' ` - endif - if ("X$left_days" == "X") set left_days = 0 - if ("X$left_hours" == "X") set left_hours = 0 - if ("X$left_mins" == "X") set left_mins = 0 - if ("X$left_secs" == "X") set left_secs = 0 - @ remaining = 86400 * $left_days + 3600 * $left_hours + 60 * $left_mins + $left_secs - cat > $run/Walltime.Remaining << EOF2 -$remaining $sample_interval -EOF2 - -end diff --git a/config/e3sm/machines/syslog.compy b/config/e3sm/machines/syslog.compy deleted file mode 100755 index 4c1ada6139c..00000000000 --- a/config/e3sm/machines/syslog.compy +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/csh -f -# compy syslog script: -# mach_syslog - -set sample_interval = $1 -set jid = $2 -set lid = $3 -set run = $4 -set timing = $5 -set dir = $6 - -# Wait until job task-to-node mapping information is output before saving output file. -# Target length was determined empirically (maximum number of lines before job mapping -# information starts + number of nodes), and it may need to be adjusted in the future. -# (Note that calling script 'touch'es the e3sm log file before spawning this script, so that 'wc' does not fail.) -set nnodes = `squeue --noheader -o '%D' --job $jid | sed 's/^0*\([0-9]*\)/\1/' ` -if ("X$nnodes" == "X") set nnodes = 0 -@ target_lines = 150 + $nnodes -sleep 10 -set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -while ($outlth < $target_lines) - sleep 60 - set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -end - -set TimeLeft = `squeue --noheader -O 'timeleft' --job $jid ` -set TimeLeftwday = `echo $TimeLeft | grep '-' ` -if ("X$TimeLeftwday" == "X") then - set left_days = 0 - set TimeLeftwhour = `echo $TimeLeft | grep '.*:.*:.*' ` - if ("X$TimeLeftwhour" == "X") then - set left_hours = 0 - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - else - set left_hours = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - endif -else - set left_days = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_hours = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\4/' ` -endif - -if ("X$left_days" == "X") set left_days = 0 -if ("X$left_hours" == "X") set left_hours = 0 -if ("X$left_mins" == "X") set left_mins = 0 -if ("X$left_secs" == "X") set left_secs = 0 -@ remaining = 86400 * $left_days + 3600 * $left_hours + 60 * $left_mins + $left_secs -cat > $run/Walltime.Remaining < $dir/squeuef.$lid.$remaining - squeue -s | grep -v -F extern > $dir/squeues.$lid.$remaining -endif - -while ($remaining > 0) - echo "Wallclock time remaining: $remaining" >> $dir/atm.log.$lid.step - grep -Fa -e "nstep" -e "model date" $run/*atm.log.$lid | tail -n 4 >> $dir/atm.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/lnd.log.$lid.step - grep -Fa -e "timestep" -e "model date" $run/*lnd.log.$lid | tail -n 4 >> $dir/lnd.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ocn.log.$lid.step - grep -Fa -e "timestep" -e "Step number" -e "model date" $run/*ocn.log.$lid | tail -n 4 >> $dir/ocn.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ice.log.$lid.step - grep -Fa -e "timestep" -e "istep" -e "model date" $run/*ice.log.$lid | tail -n 4 >> $dir/ice.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*rof.log.$lid | tail -n 4 >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*cpl.log.$lid > $dir/cpl.log.$lid.step-all - echo "Wallclock time remaining: $remaining" >> $dir/cpl.log.$lid.step - tail -n 4 $dir/cpl.log.$lid.step-all >> $dir/cpl.log.$lid.step - /bin/cp --preserve=timestamps -u $timing/* $dir - squeue -t R -o "%.10i %.10P %.15u %.20a %.2t %.6D %.8C %.12M %.12l %j" > $dir/squeuef.$lid.$remaining - squeue -s | grep -v -F extern > $dir/squeues.$lid.$remaining - chmod a+r $dir/* - # sleep $sample_interval - set sleep_remaining = $sample_interval - while ($sleep_remaining > 120) - sleep 120 - @ sleep_remaining = $sleep_remaining - 120 - end - sleep $sleep_remaining - # query remaining time - set TimeLeft = `squeue --noheader -O 'timeleft' --job $jid ` - set TimeLeftwday = `echo $TimeLeft | grep '-' ` - if ("X$TimeLeftwday" == "X") then - set left_days = 0 - set TimeLeftwhour = `echo $TimeLeft | grep '.*:.*:.*' ` - if ("X$TimeLeftwhour" == "X") then - set left_hours = 0 - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - else - set left_hours = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - endif - else - set left_days = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set left_hours = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set left_mins = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - set left_secs = `echo $TimeLeft | sed 's/^0*\([0-9]*\)-0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\4/' ` - endif - if ("X$left_days" == "X") set left_days = 0 - if ("X$left_hours" == "X") set left_hours = 0 - if ("X$left_mins" == "X") set left_mins = 0 - if ("X$left_secs" == "X") set left_secs = 0 - @ remaining = 86400 * $left_days + 3600 * $left_hours + 60 * $left_mins + $left_secs - cat > $run/Walltime.Remaining << EOF2 -$remaining $sample_interval -EOF2 - -end diff --git a/config/e3sm/machines/syslog.cori-haswell b/config/e3sm/machines/syslog.cori-haswell deleted file mode 100755 index a57e02bab18..00000000000 --- a/config/e3sm/machines/syslog.cori-haswell +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/csh -f -# cori-haswell syslog script: -# mach_syslog - -set sample_interval = $1 -set jid = $2 -set lid = $3 -set run = $4 -set timing = $5 -set dir = $6 - -# Wait until job task-to-node mapping information is output before saving output file. -# Target length was determined empirically (maximum number of lines before job mapping -# information starts + number of nodes), and it may need to be adjusted in the future. -# (Note that calling script 'touch'es the e3sm log file before spawning this script, so that 'wc' does not fail.) -set nnodes = `sqs -f $jid | grep -F NumNodes | sed 's/ *NumNodes=\([0-9]*\) .*/\1/' ` -@ target_lines = 150 + $nnodes -sleep 10 -set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -while ($outlth < $target_lines) - sleep 60 - set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -end - -set TimeLimit = `sqs -f $jid | grep -F TimeLimit | sed 's/^ *RunTime=.*TimeLimit=\([0-9]*:[0-9]*:[0-9]*\) .*/\1/' ` -set limit_hours = `echo $TimeLimit | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` -set limit_mins = `echo $TimeLimit | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` -set limit_secs = `echo $TimeLimit | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` -if ("X$limit_hours" == "X") set limit_hours = 0 -if ("X$limit_mins" == "X") set limit_mins = 0 -if ("X$limit_secs" == "X") set limit_secs = 0 -@ limit = 3600 * $limit_hours + 60 * $limit_mins + $limit_secs - -set RunTime = `sqs -f $jid | grep -F RunTime | sed 's/^ *RunTime=\([0-9]*:[0-9]*:[0-9]*\) .*/\1/' ` -set runt_hours = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` -set runt_mins = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` -set runt_secs = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` -if ("X$runt_hours" == "X") set runt_hours = 0 -if ("X$runt_mins" == "X") set runt_mins = 0 -if ("X$runt_secs" == "X") set runt_secs = 0 -@ runt = 3600 * $runt_hours + 60 * $runt_mins + $runt_secs - -@ remaining = $limit - $runt -cat > $run/Walltime.Remaining < $dir/squeuef.$lid.$remaining - squeue -s | grep -v -F extern > $dir/squeues.$lid.$remaining - # squeue -t R -o "%.10i %R" > $dir/squeueR.$lid.$remaining -endif - -while ($remaining > 0) - echo "Wallclock time remaining: $remaining" >> $dir/atm.log.$lid.step - grep -Fa -e "nstep" -e "model date" $run/*atm.log.$lid | tail -n 4 >> $dir/atm.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/lnd.log.$lid.step - grep -Fa -e "timestep" -e "model date" $run/*lnd.log.$lid | tail -n 4 >> $dir/lnd.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ocn.log.$lid.step - grep -Fa -e "timestep" -e "Step number" -e "model date" $run/*ocn.log.$lid | tail -n 4 >> $dir/ocn.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ice.log.$lid.step - grep -Fa -e "timestep" -e "istep" -e "model date" $run/*ice.log.$lid | tail -n 4 >> $dir/ice.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*rof.log.$lid | tail -n 4 >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*cpl.log.$lid > $dir/cpl.log.$lid.step-all - echo "Wallclock time remaining: $remaining" >> $dir/cpl.log.$lid.step - tail -n 4 $dir/cpl.log.$lid.step-all >> $dir/cpl.log.$lid.step - /bin/cp --preserve=timestamps -u $timing/* $dir - # sqs -w -a | grep "^[0-9]* *R *"> $dir/sqswr.$lid.$remaining - squeue -t R -o "%.10i %.15P %.20j %.10u %.7a %.2t %.6D %.8C %.10M %.10l" > $dir/squeuef.$lid.$remaining - squeue -s | grep -v -F extern > $dir/squeues.$lid.$remaining - # squeue -t R -o "%.10i %R" > $dir/squeueR.$lid.$remaining - chmod a+r $dir/* - # sleep $sample_interval - set sleep_remaining = $sample_interval - while ($sleep_remaining > 120) - sleep 120 - @ sleep_remaining = $sleep_remaining - 120 - end - sleep $sleep_remaining - set RunTime = `sqs -f $jid | grep -F RunTime | sed 's/^ *RunTime=\([0-9]*:[0-9]*:[0-9]*\) .*/\1/' ` - set runt_hours = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set runt_mins = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set runt_secs = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - if ("X$runt_hours" == "X") set runt_hours = 0 - if ("X$runt_mins" == "X") set runt_mins = 0 - if ("X$runt_secs" == "X") set runt_secs = 0 - @ runt = 3600 * $runt_hours + 60 * $runt_mins + $runt_secs - @ remaining = $limit - $runt - cat > $run/Walltime.Remaining << EOF2 -$remaining $sample_interval -EOF2 - -end diff --git a/config/e3sm/machines/syslog.cori-knl b/config/e3sm/machines/syslog.cori-knl deleted file mode 100755 index 80876f1a8d6..00000000000 --- a/config/e3sm/machines/syslog.cori-knl +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/csh -f -# cori-knl syslog script: -# mach_syslog - -set sample_interval = $1 -set jid = $2 -set lid = $3 -set run = $4 -set timing = $5 -set dir = $6 - -# Wait until job task-to-node mapping information is output before saving output file. -# Target length was determined empirically (maximum number of lines before job mapping -# information starts + number of nodes), and it may need to be adjusted in the future. -# (Note that calling script 'touch'es the e3sm log file before spawning this script, so that 'wc' does not fail.) -set nnodes = `sqs -f $jid | grep -F NumNodes | sed 's/ *NumNodes=\([0-9]*\) .*/\1/' ` -@ target_lines = 150 + $nnodes -sleep 10 -set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -while ($outlth < $target_lines) - sleep 60 - set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -end - -set TimeLimit = `sqs -f $jid | grep -F TimeLimit | sed 's/^ *RunTime=.*TimeLimit=\([0-9]*:[0-9]*:[0-9]*\) .*/\1/' ` -set limit_hours = `echo $TimeLimit | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` -set limit_mins = `echo $TimeLimit | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` -set limit_secs = `echo $TimeLimit | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` -if ("X$limit_hours" == "X") set limit_hours = 0 -if ("X$limit_mins" == "X") set limit_mins = 0 -if ("X$limit_secs" == "X") set limit_secs = 0 -@ limit = 3600 * $limit_hours + 60 * $limit_mins + $limit_secs - -set RunTime = `sqs -f $jid | grep -F RunTime | sed 's/^ *RunTime=\([0-9]*:[0-9]*:[0-9]*\) .*/\1/' ` -set runt_hours = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` -set runt_mins = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` -set runt_secs = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` -if ("X$runt_hours" == "X") set runt_hours = 0 -if ("X$runt_mins" == "X") set runt_mins = 0 -if ("X$runt_secs" == "X") set runt_secs = 0 -@ runt = 3600 * $runt_hours + 60 * $runt_mins + $runt_secs - -@ remaining = $limit - $runt -cat > $run/Walltime.Remaining < $dir/squeuef.$lid.$remaining - squeue -s | grep -v -F extern > $dir/squeues.$lid.$remaining - # squeue -t R -o "%.10i %R" > $dir/squeueR.$lid.$remaining -endif - -while ($remaining > 0) - echo "Wallclock time remaining: $remaining" >> $dir/atm.log.$lid.step - grep -Fa -e "nstep" -e "model date" $run/*atm.log.$lid | tail -n 4 >> $dir/atm.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/lnd.log.$lid.step - grep -Fa -e "timestep" -e "model date" $run/*lnd.log.$lid | tail -n 4 >> $dir/lnd.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ocn.log.$lid.step - grep -Fa -e "timestep" -e "Step number" -e "model date" $run/*ocn.log.$lid | tail -n 4 >> $dir/ocn.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ice.log.$lid.step - grep -Fa -e "timestep" -e "istep" -e "model date" $run/*ice.log.$lid | tail -n 4 >> $dir/ice.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*rof.log.$lid | tail -n 4 >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*cpl.log.$lid > $dir/cpl.log.$lid.step-all - echo "Wallclock time remaining: $remaining" >> $dir/cpl.log.$lid.step - tail -n 4 $dir/cpl.log.$lid.step-all >> $dir/cpl.log.$lid.step - /bin/cp --preserve=timestamps -u $timing/* $dir - # sqs -w -a | grep "^[0-9]* *R *"> $dir/sqswr.$lid.$remaining - squeue -t R -o "%.10i %.15P %.20j %.10u %.7a %.2t %.6D %.8C %.10M %.10l" > $dir/squeuef.$lid.$remaining - squeue -s | grep -v -F extern > $dir/squeues.$lid.$remaining - # squeue -t R -o "%.10i %R" > $dir/squeueR.$lid.$remaining - chmod a+r $dir/* - # sleep $sample_interval - set sleep_remaining = $sample_interval - while ($sleep_remaining > 120) - sleep 120 - @ sleep_remaining = $sleep_remaining - 120 - end - sleep $sleep_remaining - set RunTime = `sqs -f $jid | grep -F RunTime | sed 's/^ *RunTime=\([0-9]*:[0-9]*:[0-9]*\) .*/\1/' ` - set runt_hours = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set runt_mins = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set runt_secs = `echo $RunTime | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - if ("X$runt_hours" == "X") set runt_hours = 0 - if ("X$runt_mins" == "X") set runt_mins = 0 - if ("X$runt_secs" == "X") set runt_secs = 0 - @ runt = 3600 * $runt_hours + 60 * $runt_mins + $runt_secs - @ remaining = $limit - $runt - cat > $run/Walltime.Remaining << EOF2 -$remaining $sample_interval -EOF2 - -end diff --git a/config/e3sm/machines/syslog.noop b/config/e3sm/machines/syslog.noop deleted file mode 100755 index fb37463b9b4..00000000000 --- a/config/e3sm/machines/syslog.noop +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/csh -f -# "no op" syslog script: -# mach_syslog - -# set sec = 0 -set sample_interval = $1 -set jid = $2 -set lid = $3 -set run = $4 -set timing = $5 -set dir = $6 - -# while (1) -# sleep $sample_interval -# end - diff --git a/config/e3sm/machines/syslog.summit b/config/e3sm/machines/syslog.summit deleted file mode 100755 index c238b9b1b92..00000000000 --- a/config/e3sm/machines/syslog.summit +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/csh -f -# summit syslog script: -# mach_syslog - -set sample_interval = $1 -set jid = $2 -set lid = $3 -set run = $4 -set timing = $5 -set dir = $6 - -# Wait until some model output appears before saving output file. -# Target length was determined empirically, so it may need to be adjusted in the future. -# (Note that calling script 'touch'es the e3sm log file before spawning this script, so that 'wc' does not fail.) -# set ntasks = `bjobs -l -UF $jid | grep -F Started | sed 's/.* Started *\([0-9]*\) *Task.*/\1/' ` -# @ nnodes = ($ntasks - 1) / 42 -set nnodes = 0 -@ target_lines = 20 + $nnodes -sleep 10 -set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -while ($outlth < $target_lines) - sleep 60 - set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -end - -set time_left = `bjobs -noheader -hms -o "time_left" $jid` -set remaining_hours = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\1/' ` -set remaining_mins = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\2/' ` -set remaining_secs = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\3/' ` -if ("X$remaining_hours" == "X") set remaining_hours = 0 -if ("X$remaining_mins" == "X") set remaining_mins = 0 -if ("X$remaining_secs" == "X") set remaining_secs = 0 -@ remaining = 3600 * $remaining_hours + 60 * $remaining_mins + $remaining_secs -cat > $run/Walltime.Remaining < $dir/bjobsru_all.$lid.$remaining - bjobs -r -u all -o 'jobid slots exec_host' > $dir/bjobsru_allo.$lid.$remaining -endif - -while ($remaining > 0) - echo "Wallclock time remaining: $remaining" >> $dir/atm.log.$lid.step - grep -Fa -e "nstep" -e "model date" $run/*atm.log.$lid | tail -n 4 >> $dir/atm.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/lnd.log.$lid.step - grep -Fa -e "timestep" -e "model date" $run/*lnd.log.$lid | tail -n 4 >> $dir/lnd.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ocn.log.$lid.step - grep -Fa -e "timestep" -e "Step number" -e "model date" $run/*ocn.log.$lid | tail -n 4 >> $dir/ocn.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ice.log.$lid.step - grep -Fa -e "timestep" -e "istep" -e "model date" $run/*ice.log.$lid | tail -n 4 >> $dir/ice.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*rof.log.$lid | tail -n 4 >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*cpl.log.$lid > $dir/cpl.log.$lid.step-all - echo "Wallclock time remaining: $remaining" >> $dir/cpl.log.$lid.step - tail -n 4 $dir/cpl.log.$lid.step-all >> $dir/cpl.log.$lid.step - /bin/cp --preserve=timestamps -u $timing/* $dir - bjobs -r -u all > $dir/bjobsru_all.$lid.$remaining - bjobs -r -u all -o 'jobid slots exec_host' > $dir/bjobsru_allo.$lid.$remaining - chmod a+r $dir/* - # sleep $sample_interval - set sleep_remaining = $sample_interval - while ($sleep_remaining > 120) - sleep 120 - @ sleep_remaining = $sleep_remaining - 120 - end - sleep $sleep_remaining - set time_left = `bjobs -noheader -hms -o "time_left" $jid` - set remaining_hours = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\1/' ` - set remaining_mins = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\2/' ` - set remaining_secs = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\3/' ` - if ("X$remaining_hours" == "X") set remaining_hours = 0 - if ("X$remaining_mins" == "X") set remaining_mins = 0 - if ("X$remaining_secs" == "X") set remaining_secs = 0 - @ remaining = 3600 * $remaining_hours + 60 * $remaining_mins + $remaining_secs - cat > $run/Walltime.Remaining << EOF2 -$remaining $sample_interval -EOF2 - -end diff --git a/config/e3sm/machines/syslog.theta b/config/e3sm/machines/syslog.theta deleted file mode 100755 index 38220677930..00000000000 --- a/config/e3sm/machines/syslog.theta +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/csh -f -# theta syslog script: -# mach_syslog - -set sample_interval = $1 -set jid = $2 -set lid = $3 -set run = $4 -set timing = $5 -set dir = $6 - -# Wait until job task-to-node mapping information is output before saving output file. -# Target length was determined empirically (maximum number of lines before job mapping -# information starts + number of nodes), and it may need to be adjusted in the future. -# (Note that calling script 'touch'es the e3sm log file before spawning this script, so that 'wc' does not fail.) -set nnodes = `qstat -lf $jid | grep -F Nodes | sed 's/ *Nodes *: *\([0-9]*\) */\1/' ` -@ target_lines = 150 + $nnodes -sleep 10 -set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -while ($outlth < $target_lines) - sleep 60 - set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -end - -set TimeRemaining = `qstat -l --header TimeRemaining $jid | grep -F TimeRemaining | sed 's/^ *TimeRemaining *: *\([0-9]*:[0-9]*:[0-9]*\) */\1/' ` -set rem_hours = `echo $TimeRemaining | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` -set rem_mins = `echo $TimeRemaining | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` -set rem_secs = `echo $TimeRemaining | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` -if ("X$rem_hours" == "X") set rem_hours = 0 -if ("X$rem_mins" == "X") set rem_mins = 0 -if ("X$rem_secs" == "X") set rem_secs = 0 -@ remaining = 3600 * $rem_hours + 60 * $rem_mins + $rem_secs -cat > $run/Walltime.Remaining < $dir/xtnodestat.$lid.$remaining - qstat --header JobID:State:Nodes:Location | grep -Fa -e "running" -e "State" -e "starting" -e "exiting" > $dir/qstatn.$lid.$remaining - qstat --header JobID:JobName:User:Project:WallTime:RunTime:TimeRemaining:Nodes:State:StartTime:attrs | grep -Fa -e "running" -e "State" -e "starting" -e "exiting" > $dir/qstatr.$lid.$remaining -endif - -while ($remaining > 0) - echo "Wallclock time remaining: $remaining" >> $dir/atm.log.$lid.step - grep -Fa -e "nstep" -e "model date" $run/*atm.log.$lid | tail -n 4 >> $dir/atm.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/lnd.log.$lid.step - grep -Fa -e "timestep" -e "model date" $run/*lnd.log.$lid | tail -n 4 >> $dir/lnd.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ocn.log.$lid.step - grep -Fa -e "timestep" -e "Step number" -e "model date" $run/*ocn.log.$lid | tail -n 4 >> $dir/ocn.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ice.log.$lid.step - grep -Fa -e "timestep" -e "istep" -e "model date" $run/*ice.log.$lid | tail -n 4 >> $dir/ice.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*rof.log.$lid | tail -n 4 >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*cpl.log.$lid > $dir/cpl.log.$lid.step-all - echo "Wallclock time remaining: $remaining" >> $dir/cpl.log.$lid.step - tail -n 4 $dir/cpl.log.$lid.step-all >> $dir/cpl.log.$lid.step - /bin/cp --preserve=timestamps -u $timing/* $dir - xtnodestat > $dir/xtnodestat.$lid.$remaining - qstat --header JobID:State:Nodes:Location | grep -Fa -e "running" -e "State" -e "starting" -e "exiting" > $dir/qstatn.$lid.$remaining - qstat --header JobID:JobName:User:Project:WallTime:RunTime:TimeRemaining:Nodes:State:StartTime:attrs | grep -Fa -e "running" -e "State" -e "starting" -e "exiting" > $dir/qstatr.$lid.$remaining - chmod a+r $dir/* - # sleep $sample_interval - set sleep_remaining = $sample_interval - while ($sleep_remaining > 120) - sleep 120 - @ sleep_remaining = $sleep_remaining - 120 - end - sleep $sleep_remaining - set TimeRemaining = `qstat -l --header TimeRemaining $jid | grep -F TimeRemaining | sed 's/^ *TimeRemaining *: *\([0-9]*:[0-9]*:[0-9]*\) */\1/' ` - set rem_hours = `echo $TimeRemaining | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\1/' ` - set rem_mins = `echo $TimeRemaining | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\2/' ` - set rem_secs = `echo $TimeRemaining | sed 's/^0*\([0-9]*\):0*\([0-9]*\):0*\([0-9]*\)/\3/' ` - if ("X$rem_hours" == "X") set rem_hours = 0 - if ("X$rem_mins" == "X") set rem_mins = 0 - if ("X$rem_secs" == "X") set rem_secs = 0 - @ remaining = 3600 * $rem_hours + 60 * $rem_mins + $rem_secs - cat > $run/Walltime.Remaining << EOF2 -$remaining $sample_interval -EOF2 - -end diff --git a/config/e3sm/machines/syslog.titan b/config/e3sm/machines/syslog.titan deleted file mode 100755 index 50800390b9b..00000000000 --- a/config/e3sm/machines/syslog.titan +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/csh -f -# titan syslog script: -# mach_syslog - -set sample_interval = $1 -set jid = $2 -set lid = $3 -set run = $4 -set timing = $5 -set dir = $6 - -# Wait until job task-to-node mapping information is output before saving output file. -# Target length was determined empirically (maximum number of lines before job mapping -# information starts + number of nodes), and it may need to be adjusted in the future. -# (Note that calling script 'touch'es the e3sm log file before spawning this script, so that 'wc' does not fail) -set nnodes = `qstat -f $jid | grep -F Resource_List.nodes | sed 's/ *Resource_List.nodes = *\([0-9]*\):ppn=*\([0-9]*\) */\1/' ` -@ target_lines = 150 + $nnodes -sleep 10 -set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -while ($outlth < $target_lines) - sleep 60 - set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` -end -set remaining = `qstat -f $jid | grep -F Walltime.Remaining | sed 's/ *Walltime.Remaining = *\([0-9]*\) */\1/' ` -cat > $run/Walltime.Remaining < $dir/xtnodestat.$lid.$remaining - # showq > $dir/showq.$lid.$remaining - showq -r > $dir/showqr.$lid.$remaining -endif - -while ($remaining > 0) - echo "Wallclock time remaining: $remaining" >> $dir/atm.log.$lid.step - grep -Fa -e "nstep" -e "model date" $run/*atm.log.$lid | tail -n 4 >> $dir/atm.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/lnd.log.$lid.step - grep -Fa -e "timestep" -e "model date" $run/*lnd.log.$lid | tail -n 4 >> $dir/lnd.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ocn.log.$lid.step - grep -Fa -e "timestep" -e "Step number" -e "model date" $run/*ocn.log.$lid | tail -n 4 >> $dir/ocn.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/ice.log.$lid.step - grep -Fa -e "timestep" -e "istep" -e "model date" $run/*ice.log.$lid | tail -n 4 >> $dir/ice.log.$lid.step - echo "Wallclock time remaining: $remaining" >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*rof.log.$lid | tail -n 4 >> $dir/rof.log.$lid.step - grep -Fa "model date" $run/*cpl.log.$lid > $dir/cpl.log.$lid.step-all - echo "Wallclock time remaining: $remaining" >> $dir/cpl.log.$lid.step - tail -n 4 $dir/cpl.log.$lid.step-all >> $dir/cpl.log.$lid.step - /bin/cp --preserve=timestamps -u $timing/* $dir - xtnodestat > $dir/xtnodestat.$lid.$remaining - # showq > $dir/showq.$lid.$remaining - showq -r > $dir/showqr.$lid.$remaining - chmod a+r $dir/* - # sleep $sample_interval - set sleep_remaining = $sample_interval - while ($sleep_remaining > 120) - sleep 120 - @ sleep_remaining = $sleep_remaining - 120 - end - sleep $sleep_remaining - set remaining = `qstat -f $jid | grep -F Walltime.Remaining | sed 's/ *Walltime.Remaining = *\([0-9]*\) */\1/' ` - if ("X$remaining" == "X") set remaining = 0 - cat > $run/Walltime.Remaining << EOF2 -$remaining $sample_interval -EOF2 - -end - diff --git a/config/e3sm/machines/template.case.run b/config/e3sm/machines/template.case.run deleted file mode 100755 index 99a8fa66e90..00000000000 --- a/config/e3sm/machines/template.case.run +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python - -# Batch system directives -{{ batchdirectives }} - -""" -template to create a case run script. This should only ever be called -by case.submit when on batch system. This script only exists as a way of providing -batch directives. Use case.submit from the command line to run your case. - -DO NOT RUN THIS SCRIPT MANUALLY -""" - -import os, sys -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * - -from CIME.case import Case - -logger = logging.getLogger(__name__) - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# case.run SMS\033[0m - > {0} -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", - help="Case directory to build") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run") - - parser.add_argument("--completion-sets-continue-run", action="store_true", - help="This is used to ensure CONTINUE_RUN is cleared for an initial run, " - "but set for subsequent runs.") - - parser.add_argument("--resubmit", default=False, action="store_true", - help="If RESUBMIT is set, this performs the resubmissions.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - if args.skip_preview_namelist is None: - args.skip_preview_namelist = False - - return args.caseroot, args.skip_preview_namelist, args.completion_sets_continue_run, args.resubmit - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - - caseroot, skip_pnl, set_continue_run, resubmit = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_run(skip_pnl=skip_pnl, set_continue_run=set_continue_run, submit_resubmits=resubmit) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/config/e3sm/machines/template.case.run.sh b/config/e3sm/machines/template.case.run.sh deleted file mode 100755 index 48aef6be008..00000000000 --- a/config/e3sm/machines/template.case.run.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -e - -# Batch system directives -{{ batchdirectives }} - -# template to create a case run shell script. This should only ever be called -# by case.submit when on batch. Use case.submit from the command line to run your case. - -# cd to case -cd {{ caseroot }} - -# Set PYTHONPATH so we can make cime calls if needed -LIBDIR={{ cimeroot }}/scripts/lib -export PYTHONPATH=$LIBDIR:$PYTHONPATH - -# get new lid -lid=$(python -c 'import CIME.utils; print CIME.utils.new_lid()') -export LID=$lid - -# setup environment -source .env_mach_specific.sh - -# Clean/make timing dirs -RUNDIR=$(./xmlquery RUNDIR --value) -if [ -e $RUNDIR/timing ]; then - /bin/rm $RUNDIR/timing -fi -mkdir -p $RUNDIR/timing/checkpoints - -# minimum namelist action -./preview_namelists --component cpl -#./preview_namelists # uncomment for full namelist generation - -# uncomment for lockfile checking -# ./check_lockedfiles - -# setup OMP_NUM_THREADS -export OMP_NUM_THREADS=$(./xmlquery THREAD_COUNT --value) - -# save prerun provenance? - -# MPIRUN! -cd $(./xmlquery RUNDIR --value) -{{ mpirun }} - -# save logs? - -# save postrun provenance? - -# resubmit ? diff --git a/config/e3sm/machines/template.case.test b/config/e3sm/machines/template.case.test deleted file mode 100755 index aef2be3f911..00000000000 --- a/config/e3sm/machines/template.case.test +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -{{ batchdirectives }} -""" -This is the system test submit script for CIME. This should only ever be called -by case.submit when on batch system. This script only exists as a way of providing -batch directives. Use case.submit from the command line to run your case. - -DO NOT RUN THIS SCRIPT MANUALLY -""" -import os, sys -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * - -from CIME.case import Case - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# case.test SMS\033[0m - > {0} -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("testname", nargs="?",default=None, - help="Name of the test to run, default is set in TESTCASE in env_test.xml") - - parser.add_argument("--caseroot", - help="Case directory to build") - - parser.add_argument("--reset", action="store_true", - help="Reset the case to its original state as defined by config_tests.xml") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - return args.caseroot, args.testname, args.reset, args.skip_preview_namelist - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - - caseroot, testname, reset, skip_pnl = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_test(testname=testname, reset=reset, skip_pnl=skip_pnl) - - sys.exit(0 if success else 1) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/config/e3sm/machines/template.ocn_diagnostics b/config/e3sm/machines/template.ocn_diagnostics deleted file mode 100644 index 98b9671f54b..00000000000 --- a/config/e3sm/machines/template.ocn_diagnostics +++ /dev/null @@ -1,23 +0,0 @@ -#! /usr/bin/env bash -# -# template.diags_generate used to create the $CASEROOT/$CASE.ocn_diagnostics -# -# TODO - need to get the PE count from the config_pes.xml tag -# for the specified machine -# ## -# - -{{ batchdirectives }} - -mpirun={{ mpirun }} - -# xmlquery to get POSTPROCESS_VIRTUALENV, BATCHSUBMIT variable settings -virtualEnvDir=`./xmlquery POSTPROCESS_VIRTUALENV -value` -caseRoot=`./xmlquery CASEROOT -value` - -cd $virtualEnvDir/bin -activate virtualenv - -$mpirun ./ocn_diags_generator.py --caseroot $caseRoot >> $caseRoot/tSeriesStatus 2>&1 - -deactivate diff --git a/config/e3sm/machines/template.st_archive b/config/e3sm/machines/template.st_archive deleted file mode 100755 index 8f60cae3bc2..00000000000 --- a/config/e3sm/machines/template.st_archive +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python -# Batch system directives -{{ batchdirectives }} - -""" -Performs short term archiving for restart files, history and rpointer -files in the $RUNDIR associated with $CASEROOT. Normally this script -is called by case.submit on batch systems. - -""" - -import sys, os, time -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * -from CIME.case import Case - -logger = logging.getLogger(__name__) - - -############################################################################### -def parse_command_line(args, description): -############################################################################### - - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to build") - - parser.add_argument("--no-incomplete-logs", default=False, action="store_true", - help="Whether to archive logs which have been completed or not") - - parser.add_argument("--copy-only", default=False, action="store_true", - help="Copy instead of move the files to be archived") - - parser.add_argument("--last-date", default=None, - help="WARNING: This option with --force-move may corrupt your run directory! Use at your own risk! " - "Last simulation date to archive, specified as 'Year-Month-Day'. " - "Year must be specified with 4 digits, while month and day can be specified without zero padding. " - "'0003-11-4' would archive at most files for the simulated year 3, month 11, day 4." - "This option implies --copy-only unless --force-move is specified ") - - parser.add_argument("--force-move", default=False, action="store_true", - help="Move the files even if it's unsafe to do so, dangerous if used with --copy-only.") - - parser.add_argument("--test-all", default=False, action="store_true", - help="Run tests of st_archiver functionality on config_arvchive.xml") - - parser.add_argument("--test-case", default=False, action="store_true", - help="Run tests of st_archiver functionality on env_arvchive.xml") - - parser.add_argument("--resubmit", default=False, action="store_true", - help="If RESUBMIT is set, this performs the resubmissions." - "This is primarily meant for use by case.submit") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - if args.last_date is not None and args.force_move is False: - args.copy_only = True - - if args.force_move is True: - args.copy_only = False - - return (args.caseroot, args.last_date, args.no_incomplete_logs, args.copy_only, - args.test_all, args.test_case, args.resubmit) - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - caseroot, last_date, no_incomplete_logs, copy_only, testall, testcase, resubmit = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - if testall: - success = case.test_st_archive() - elif testcase: - success = case.test_env_archive() - else: - success = case.case_st_archive(last_date_str=last_date, - archive_incomplete_logs=not no_incomplete_logs, - copy_only=copy_only, resubmit=resubmit) - - sys.exit(0 if success else 1) - -############################################################################### - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/config/e3sm/machines/template.timeseries b/config/e3sm/machines/template.timeseries deleted file mode 100644 index 2add89bb22b..00000000000 --- a/config/e3sm/machines/template.timeseries +++ /dev/null @@ -1,24 +0,0 @@ -#! /usr/bin/env bash -# -# template.tseries_generate used to create the $CASEROOT/$CASE.timeseries -# -# TODO - need to get the PE count from the config_pes.xml tag -# for the specified machine -# ## -# - -{_batchdirectives_} - -mpirun={_mpirun_} - -# xmlquery to get POSTPROCESS_VIRTUALENV, BATCHSUBMIT variable settings -virtualEnvDir=`./xmlquery POSTPROCESS_VIRTUALENV -value` -caseRoot=`./xmlquery CASEROOT -value` - -cd $virtualEnvDir/bin -activate virtualenv - -$mpirun ./cesm_tseries_generator.py --caseroot $caseRoot >> $caseRoot/tSeriesStatus 2>&1 - -deactivate - diff --git a/config/e3sm/machines/userdefined_laptop_template/README.md b/config/e3sm/machines/userdefined_laptop_template/README.md deleted file mode 100644 index 353d397c3fe..00000000000 --- a/config/e3sm/machines/userdefined_laptop_template/README.md +++ /dev/null @@ -1,135 +0,0 @@ -Building CESM on an UNSUPPORTED local machine ---------------------------------------------- - -These directions are for a Mac OS X 10.9 or 10.10 laptop using -homebrew or macports to install the required software. The procedure -is similar for a linux workstation or cluster, you will just use -different package management tools to install the third party -libraries. - -Setup -===== - - - install xcode, including the command line tools. Failure to - install the command line tools is the most likely cause if you - get an error about the compilers not being able to create - executables. - - - install third party libraries from homebrew or macports. - - - home brew - - Install science tap : - - brew install gcc --without-multilib cmake mpich hdf5 --enable-fortran netcdf --enable-fortran - - - - macports - - sudo port install mpich +gcc48 hdf5-18 +mpich netcdf-fortran +gcc48 +mpich cmake - - Note: If you see an error while running create_newcase that - indicates perl can't find XML::LibXML, you may need to install - p5-xml-libxml as well. - - - - Some of the shell scripts used by cesm hard code "gmake" instead - of using the GMAKE variable from env_build.xml. To work around - this, you should install gnu make, or simply create a link from - make to gmake in you path. - - mkdir -p ${HOME}/local/bin - ln -s `whereis make` ${HOME}/local/bin/gmake - cat >> ${HOME}/.bashrc < - - - - - -DFORTRANUNDERSCORE -DNO_R16 - -fopenmp - -fopenmp - -L /usr/local/Cellar/gcc/4.9.2/lib/gcc/x86_64-apple-darwin14.0.0/4.9.2 -fopenmp - -D CISM_GNU=ON - -ffixed-form - -ffree-form - -g -Wall - - -O -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -O0 - -fdefault-real-8 - - /usr/local/bin/gfortran - /usr/bin/cc - /usr/bin/c++ - /usr/local/bin/mpif90 - /usr/local/bin/mpicc - /usr/local/bin/mpicxx - FORTRAN - TRUE - /usr/local - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e - - - - - -DFORTRANUNDERSCORE -DNO_R16 - -fopenmp - -fopenmp - -fopenmp - -D CISM_GNU=ON - -ffixed-form - -ffree-form - -g -Wall - - -O -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -O0 - -fdefault-real-8 - /opt/local/bin/gfortran-mp-4.8 - /usr/bin/cc - /usr/bin/c++ - /opt/local/bin/mpif90-mpich-gcc48 - /opt/local/bin/mpicc-mpich-gcc48 - /opt/local/bin/mpicxx-mpich-gcc48 - FORTRAN - TRUE - /opt/local - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - $ENV{HOME}/local/pfunit/pfunit-sf.git.ae92605e8e - - diff --git a/config/e3sm/machines/userdefined_laptop_template/config_machines.xml b/config/e3sm/machines/userdefined_laptop_template/config_machines.xml deleted file mode 100644 index f6cc9587d0b..00000000000 --- a/config/e3sm/machines/userdefined_laptop_template/config_machines.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - __USEFUL_DESCRIPTION__ - Darwin - something.matching.your.machine.hostname - gnu - mpich - $ENV{HOME}/projects/cesm-inputdata - $ENV{HOME}/projects/ptclm-data - $ENV{HOME}/projects/scratch/archive/$CASE - $ENV{HOME}/projects/scratch - $ENV{HOME}/projects/baselines - $CIMEROOT/tools/cprnc/build/cprnc - none - __YOUR_NAME_HERE__ - 4 - 4 - 2 - - - - diff --git a/config/e3sm/machines/userdefined_laptop_template/config_pes.xml b/config/e3sm/machines/userdefined_laptop_template/config_pes.xml deleted file mode 100644 index 774b6f66388..00000000000 --- a/config/e3sm/machines/userdefined_laptop_template/config_pes.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - $MAX_TASKS_PER_NODE 1 0 - 2cm - - - - 1 1 0 - 1r - - - - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1r - - - 2 1 - 2 - - - - diff --git a/config/e3sm/testmods_dirs/allactive/force_netcdf_pio/shell_commands b/config/e3sm/testmods_dirs/allactive/force_netcdf_pio/shell_commands deleted file mode 100644 index db652859278..00000000000 --- a/config/e3sm/testmods_dirs/allactive/force_netcdf_pio/shell_commands +++ /dev/null @@ -1 +0,0 @@ -./xmlchange PIO_TYPENAME=netcdf diff --git a/config/e3sm/testmods_dirs/allactive/mach/pet/shell_commands b/config/e3sm/testmods_dirs/allactive/mach/pet/shell_commands deleted file mode 100644 index 236f1ce50df..00000000000 --- a/config/e3sm/testmods_dirs/allactive/mach/pet/shell_commands +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# mod for multi-submit pet tests on theta in debug queue -if [ `./xmlquery --value MACH` == theta ]&& - [ `./xmlquery --value JOB_QUEUE --subgroup case.test` == debug-cache-quad ]; then - echo "theta pet test in debug queue" - # get number of nodes - nodes=$(./preview_run | grep nodes | grep -o '[[:digit:]]*') - echo "nodes=$nodes" - if [ $nodes -lt 128 ]; then - # increase ntasks to get into production queue: 128 nodes, 64 mpi/node - ./xmlchange --id NTASKS --val 8192 - echo "new NTASKS=8192" - fi - ./xmlchange --id JOB_QUEUE --val default - echo "new queue=default" - # lnd on ne4 grid can't run with more than 128 tasks - if [ `./xmlquery --value GRID | grep -Eo 'a%ne4np4_l%ne4.+' | wc -l` -gt 0 ]; then - echo "ne4 grid, setting NTASKS_LND to 128" - ./xmlchange --id NTASKS_LND --val 128 - echo "ne4 grid, setting NTASKS_ATM to 96" - ./xmlchange --id NTASKS_ATM --val 96 - fi -fi diff --git a/config/e3sm/testmods_dirs/allactive/mach_mods/shell_commands b/config/e3sm/testmods_dirs/allactive/mach_mods/shell_commands deleted file mode 100644 index ff03e1e4a39..00000000000 --- a/config/e3sm/testmods_dirs/allactive/mach_mods/shell_commands +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -if [ `./xmlquery --value MACH` == melvin ]; then ./xmlchange --id NTHRDS --val 1; fi diff --git a/config/e3sm/testmods_dirs/allactive/v1cmip6/README b/config/e3sm/testmods_dirs/allactive/v1cmip6/README deleted file mode 100644 index c96997ea2b8..00000000000 --- a/config/e3sm/testmods_dirs/allactive/v1cmip6/README +++ /dev/null @@ -1,6 +0,0 @@ -These modifications should result in a case that has the same namelist settings as the -v1 low-res CMIP6 production runs. - -Run these for at least 1 day to see all output. -Also use the CMIP6 compsets. -If running longer, change the nhtfrq for the first history file. diff --git a/config/e3sm/testmods_dirs/allactive/v1cmip6/shell_commands b/config/e3sm/testmods_dirs/allactive/v1cmip6/shell_commands deleted file mode 100644 index 00d910015b9..00000000000 --- a/config/e3sm/testmods_dirs/allactive/v1cmip6/shell_commands +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -./xmlchange --append CAM_CONFIG_OPTS='-cosp' -./xmlchange --id BUDGETS --val TRUE -./xmlchange PIO_TYPENAME=netcdf -if [ `./xmlquery --value MACH` == cetus ]||[ `./xmlquery --value MACH` == mira ]; then sed s/64M/128M/ env_mach_specific.xml >tmp && mv tmp env_mach_specific.xml; fi - diff --git a/config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_cam b/config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_cam deleted file mode 100644 index 1a67054ba16..00000000000 --- a/config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_cam +++ /dev/null @@ -1,11 +0,0 @@ - - nhtfrq = -24,-24,-6,-6,-3,-24 - mfilt = 1,30,120,120,240,30 - avgflag_pertape = 'A','A','I','A','A','A' - fexcl1 = 'CFAD_SR532_CAL' - fincl1 = 'IEFLX','extinct_sw_inp','extinct_lw_bnd7','extinct_lw_inp','CLD_CAL' - fincl2 = 'FLUT','PRECT','U200','V200','U850','V850','Z500','OMEGA500','UBOT','VBOT','TREFHT','TREFHTMN','TREFHTMX','QREFHT','TS','PS','TMQ','TUQ','TVQ' - fincl3 = 'PSL','T200','T500','U850','V850','UBOT','VBOT','TREFHT' - fincl4 = 'FLUT','U200','U850','PRECT','OMEGA500' - fincl5 = 'PRECT','PRECC' - fincl6 = 'CLDTOT_ISCCP','MEANCLDALB_ISCCP','MEANTAU_ISCCP','MEANPTOP_ISCCP','MEANTB_ISCCP','CLDTOT_CAL','CLDTOT_CAL_LIQ','CLDTOT_CAL_ICE','CLDTOT_CAL_UN','CLDHGH_CAL','CLDHGH_CAL_LIQ','CLDHGH_CAL_ICE','CLDHGH_CAL_UN','CLDMED_CAL','CLDMED_CAL_LIQ','CLDMED_CAL_ICE','CLDMED_CAL_UN','CLDLOW_CAL','CLDLOW_CAL_LIQ','CLDLOW_CAL_ICE','CLDLOW_CAL_UN' diff --git a/config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_clm b/config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_clm deleted file mode 100644 index 29108bc46c7..00000000000 --- a/config/e3sm/testmods_dirs/allactive/v1cmip6/user_nl_clm +++ /dev/null @@ -1,26 +0,0 @@ -!---------------------------------------------------------------------------------- -! Users should add all user specific namelist changes below in the form of -! namelist_var = new_namelist_value -! -! Include namelist variables for drv_flds_in ONLY if -megan and/or -drydep options -! are set in the CLM_NAMELIST_OPTS env variable. -! -! EXCEPTIONS: -! Set use_cndv by the compset you use and the CLM_BLDNML_OPTS -dynamic_vegetation setting -! Set use_vichydro by the compset you use and the CLM_BLDNML_OPTS -vichydro setting -! Set use_cn by the compset you use and CLM_BLDNML_OPTS -bgc setting -! Set use_crop by the compset you use and CLM_BLDNML_OPTS -crop setting -! Set spinup_state by the CLM_BLDNML_OPTS -bgc_spinup setting -! Set irrigate by the CLM_BLDNML_OPTS -irrig setting -! Set co2_ppmv with CCSM_CO2_PPMV option -! Set dtime with L_NCPL option -! Set fatmlndfrc with LND_DOMAIN_PATH/LND_DOMAIN_FILE options -! Set finidat with RUN_REFCASE/RUN_REFDATE/RUN_REFTOD options for hybrid or branch cases -! (includes $inst_string for multi-ensemble cases) -! Set glc_grid with CISM_GRID option -! Set glc_smb with GLC_SMB option -! Set maxpatch_glcmec with GLC_NEC option -! Set glc_do_dynglacier with GLC_TWO_WAY_COUPLING env variable -!---------------------------------------------------------------------------------- - - check_finidat_year_consistency = .false. diff --git a/config/e3sm/tests.py b/config/e3sm/tests.py deleted file mode 100644 index 7a2def01c98..00000000000 --- a/config/e3sm/tests.py +++ /dev/null @@ -1,226 +0,0 @@ -# Here are the tests belonging to e3sm suites. Format is -# ..[.] -# -# suite_name : { -# "inherit" : (suite1, suite2, ...), # Optional. Suites to inherit tests from. Default is None. Tuple, list, or str. -# "time" : "HH:MM:SS", # Optional. Recommended upper-limit on test time. -# "share" : True|False, # Optional. If True, all tests in this suite share a build. Default is False. -# "tests" : (test1, test2, ...) # Optional. The list of tests for this suite. See above for format. Tuple, list, or str. -# } - -_TESTS = { - - "e3sm_land_developer" : { - "time" : "0:45:00", - "tests" : ( - "ERS.f19_f19.ICLM45", - "ERS.f19_f19.I1850CLM45CN", - "ERS.f09_g16.I1850CLM45CN", - "ERS.f19_f19.I20TRCLM45CN", - "SMS_Ld1.hcru_hcru.I1850CRUCLM45CN", - "ERS.f19_g16.I1850CNECACNTBC.clm-eca", - "ERS.f19_g16.I1850CNECACTCBC.clm-eca", - "SMS_Ly2_P1x1.1x1_smallvilleIA.ICLM45CNCROP.clm-force_netcdf_pio", - "ERS_Ld3.f45_f45.ICLM45ED.clm-fates", - "ERS.f19_g16.I1850CLM45.clm-betr", - "ERS.f19_g16.I1850CLM45.clm-vst", - "ERS.f09_g16.I1850CLM45CN.clm-bgcinterface", - "ERS.ne11_oQU240.I20TRCLM45", - "ERS.f19_g16.I1850CNRDCTCBC.clm-rd", - "ERS.f19_g16.I1850GSWCNPECACNTBC.clm-eca_f19_g16_I1850GSWCNPECACNTBC", - "ERS.f19_g16.I20TRGSWCNPECACNTBC.clm-eca_f19_g16_I20TRGSWCNPECACNTBC", - "ERS.f19_g16.I1850GSWCNPRDCTCBC.clm-ctc_f19_g16_I1850GSWCNPRDCTCBC", - "ERS.f19_g16.I20TRGSWCNPRDCTCBC.clm-ctc_f19_g16_I20TRGSWCNPRDCTCBC", - "ERS.f09_g16.ICLM45BC", - ) - }, - - "e3sm_atm_developer" : { - "tests" : ( - "ERP_Ln9.ne4_ne4.FC5AV1C-L", - "SMS_Ln9.ne4_ne4.FC5AV1C-L.cam-outfrq9s", - "SMS.ne4_ne4.FC5AV1C-L.cam-cosplite", - "SMS_R_Ld5.ne4_ne4.FSCM5A97", - "SMS_D_Ln5.ne4_ne4.FC5AV1C-L", - "SMS_Ln5.ne4pg2_ne4pg2.FC5AV1C-L", - ) - }, - - "e3sm_atm_integration" : { - "inherit" : ("eam_preqx", "eam_theta"), - "tests" : ( - "ERP_Ln9.ne4_ne4.F-EAMv1-AQP1", - "SMS_Ld1.ne4_ne4.F-EAMv1-AQP1.cam-clubb_only", - "PET_Ln5.ne4_ne4.FC5AV1C-L.allactive-mach-pet", - "PEM_Ln5.ne4_ne4.FC5AV1C-L", - "SMS_D_Ln5.ne4_ne4.FC5AV1C-L.cam-cosplite_nhtfrq5", - "ERS_Ld5.ne4_ne4.FC5AV1C-L.cam-rrtmgp", - "ERS_Ld5.ne4_ne4.FC5AV1C-L.cam-gust_param", - "REP_Ln5.ne4_ne4.FC5AV1C-L", - ) - }, - - #atmopheric tests for extra coverage - "e3sm_atm_extra_coverage" : { - "tests" : ( - "SMS_Lm1.ne4_ne4.FC5AV1C-L", - "ERS_Ld31.ne4_ne4.FC5AV1C-L", - "ERP_Lm3.ne4_ne4.FC5AV1C-L", - "SMS_D_Ln5.ne30_ne30.FC5AV1C-L", - "ERP_Ln5.ne30_ne30.FC5AV1C-L", - "SMS_Ly1.ne4_ne4.FC5AV1C-L", - ) - }, - - #atmopheric tests for hi-res - "e3sm_atm_hi_res" : { - "time" : "01:30:00", - "tests" : "SMS.ne120_ne120.FC5AV1C-H01A" - }, - - #atmopheric tests to mimic low res production runs - "e3sm_atm_prod" : { - "tests" : "SMS_Ln5.ne30_ne30.FC5AV1C-L.cam-cosplite" - }, - - #atmopheric nbfb tests - "e3sm_atm_nbfb" : { - "tests" : ( - "PGN_P1x1.ne4_ne4.FC5AV1C-L", - "TSC.ne4_ne4.FC5AV1C-L", - "MVK_PL.ne4_ne4.FC5AV1C-L", - ) - }, - - "e3sm_ocnice_extra_coverage" : { - "tests" : ( - "ERS_P480_Ld5.T62_oEC60to30v3wLI.GMPAS-DIB-IAF-ISMF", - "PEM_P480_Ld5.T62_oEC60to30v3wLI.GMPAS-DIB-IAF-ISMF", - ) - }, - - "e3sm_developer" : { - "inherit" : ("e3sm_land_developer", "e3sm_atm_developer"), - "time" : "0:45:00", - "tests" : ( - "ERS.f19_g16_rx1.A", - "ERS.ne30_g16_rx1.A", - "SEQ.f19_g16.X", - "ERIO.ne30_g16_rx1.A", - "HOMME_P24.f19_g16_rx1.A", - "NCK.f19_g16_rx1.A", - "SMS.ne30_f19_g16_rx1.A", - "ERS_Ld5.T62_oQU120.CMPASO-NYF", - "ERS.f09_g16_g.MALISIA", - "SMS.T62_oQU120_ais20.MPAS_LISIO_TEST", - "SMS.f09_g16_a.IGCLM45_MLI", - "SMS_P12x2.ne4_oQU240.A_WCYCL1850.allactive-mach_mods", - "SMS_B.ne4_ne4.F-EAMv1-AQP1.cam-hommexx", - ) - }, - - "e3sm_integration" : { - "inherit" : ("e3sm_developer", "e3sm_atm_integration"), - "time" : "03:00:00", - "tests" : ( - "ERS.ne11_oQU240.A_WCYCL1850", - "SMS_D_Ld1.ne30_oECv3_ICG.A_WCYCL1850S_CMIP6.allactive-v1cmip6", - "ERS_Ln9.ne4_ne4.FC5AV1C-L", - #"ERT_Ld31.ne16_g37.B1850C5",#add this line back in with the new correct compset - "NCK.ne11_oQU240.A_WCYCL1850", - "PET.f19_g16.X.allactive-mach-pet", - "PET.f45_g37_rx1.A.allactive-mach-pet", - "PET_Ln9_PS.ne30_oECv3_ICG.A_WCYCL1850S.allactive-mach-pet", - "PEM_Ln9.ne30_oECv3_ICG.A_WCYCL1850S", - "ERP_Ld3.ne30_oECv3_ICG.A_WCYCL1850S", - "SMS.f09_g16_a.MALI", - "SMS_D_Ln5.conusx4v1_conusx4v1.FC5AV1C-L", - "SMS.ne30_oECv3.BGCEXP_BCRC_CNPECACNT_1850.clm-bgcexp", - "SMS.ne30_oECv3.BGCEXP_BCRC_CNPRDCTC_1850.clm-bgcexp", - ) - }, - - #e3sm tests for extra coverage - "e3sm_extra_coverage" : { - "inherit" : ("e3sm_atm_extra_coverage", "e3sm_ocnice_extra_coverage"), - "tests" : ( - "SMS_D_Ln5.enax4v1_enax4v1.FC5AV1C-L", - "SMS_D_Ln5.twpx4v1_twpx4v1.FC5AV1C-L", - ) - }, - - #e3sm tests for hi-res - "e3sm_hi_res" : { - "inherit" : "e3sm_atm_hi_res", - "tests" : ( - "SMS.ne120_oRRS18v3_ICG.A_WCYCL2000_H01AS.cam-cosplite", - "SMS.T62_oRRS30to10v3wLI.GMPAS-IAF", - ) - }, - - #e3sm tests for RRM grids - "e3sm_rrm" : { - "tests" : ( - "SMS_D_Ln5.conusx4v1_conusx4v1.FC5AV1C-L", - "SMS_D_Ln5.enax4v1_enax4v1.FC5AV1C-L", - "SMS_D_Ln5.twpx4v1_twpx4v1.FC5AV1C-L", - ) - }, - - #e3sm tests to mimic production runs - "e3sm_prod" : { - "inherit" : "e3sm_atm_prod", - "tests" : "SMS_Ld2.ne30_oECv3_ICG.A_WCYCL1850S_CMIP6.allactive-v1cmip6" - }, - - "fates" : { - "tests" : ( - "ERS_Ld9.1x1_brazil.ICLM45ED", - "ERS_D_Ld9.1x1_brazil.ICLM45ED", - "SMS_D_Lm6.1x1_brazil.ICLM45ED", - ) - }, - - - #atmopheric tests for ftypes with 2 builds only - #ftype2 is a default and tested in other suites for preqx - # preqx ftype0 - # preqx ftype1 - # preqx ftype4 - # theta-l hy ftype0 - # theta-l hy ftype1 - # theta-l hy ftype2 - # theta-l hy ftype4 - # theta-l nh ftype0 - # theta-l nh ftype1 - # theta-l nh ftype2 - # theta-l nh ftype4 - # theta-l hy SL - "eam_preqx" : { - "share" : True, - "time" : "01:00:00", - "tests" : ( - "SMS.ne4_ne4.FC5AV1C-L.cam-preqx_ftype0", - "SMS.ne4_ne4.FC5AV1C-L.cam-preqx_ftype1", - "SMS.ne4_ne4.FC5AV1C-L.cam-preqx_ftype4", - ) - }, - "eam_theta" : { - "share" : True, - "time" : "02:00:00", - "tests" : ( - "SMS.ne4_ne4.FC5AV1C-L.cam-thetahy_ftype0", - "SMS.ne4_ne4.FC5AV1C-L.cam-thetahy_ftype1", - "SMS.ne4_ne4.FC5AV1C-L.cam-thetahy_ftype2", - "SMS.ne4_ne4.FC5AV1C-L.cam-thetahy_ftype4", - "SMS.ne4_ne4.FC5AV1C-L.cam-thetanh_ftype0", - "SMS.ne4_ne4.FC5AV1C-L.cam-thetanh_ftype1", - "SMS.ne4_ne4.FC5AV1C-L.cam-thetanh_ftype2", - "SMS.ne4_ne4.FC5AV1C-L.cam-thetanh_ftype4", - "SMS.ne4_ne4.FC5AV1C-L.cam-thetahy_sl", - "ERS.ne4_ne4.FC5AV1C-L.cam-thetahy_ftype2", - "ERS.ne4_ne4.FC5AV1C-L.cam-thetanh_ftype2", - ) - }, - -} diff --git a/config/e3sm/usermods_dirs/README b/config/e3sm/usermods_dirs/README deleted file mode 100644 index 9a0be8bc90d..00000000000 --- a/config/e3sm/usermods_dirs/README +++ /dev/null @@ -1 +0,0 @@ -Add subdirectories with mods for allactive cases. diff --git a/config/xml_schemas/config_archive.xsd b/config/xml_schemas/config_archive.xsd deleted file mode 100644 index cf24e0ec7d7..00000000000 --- a/config/xml_schemas/config_archive.xsd +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/config/xml_schemas/config_compilers_v2.xsd b/config/xml_schemas/config_compilers_v2.xsd deleted file mode 100644 index c1b289303cd..00000000000 --- a/config/xml_schemas/config_compilers_v2.xsd +++ /dev/null @@ -1,193 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/config/xml_schemas/config_grids_v2.1.xsd b/config/xml_schemas/config_grids_v2.1.xsd deleted file mode 100644 index 1504274bec5..00000000000 --- a/config/xml_schemas/config_grids_v2.1.xsd +++ /dev/null @@ -1,174 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/config/xml_schemas/config_inputdata.xsd b/config/xml_schemas/config_inputdata.xsd deleted file mode 100644 index 583bb3430fc..00000000000 --- a/config/xml_schemas/config_inputdata.xsd +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - server precidence is order in this file. Highest preference at top - - - - - - - - - - - - - diff --git a/conftest.py b/conftest.py new file mode 100644 index 00000000000..fcee11dfa00 --- /dev/null +++ b/conftest.py @@ -0,0 +1,48 @@ +import os +import sys + +CIMEROOT = os.path.abspath(os.path.join(os.path.dirname(__file__))) +sys.path.insert(0, CIMEROOT) + +import pytest + +from CIME import utils +from CIME.config import Config +from CIME.tests import scripts_regression_tests + +os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00" + + +def pytest_addoption(parser): + # set addoption as add_argument to use common argument setup + # pytest's addoption has same signature as add_argument + setattr(parser, "add_argument", parser.addoption) + + scripts_regression_tests.setup_arguments(parser) + + # verbose and debug flags already exist + parser.addoption("--silent", action="store_true", help="Disable all logging") + + +def pytest_configure(config): + kwargs = vars(config.option) + + utils.configure_logging(kwargs["verbose"], kwargs["debug"], kwargs["silent"]) + + scripts_regression_tests.configure_tests(**kwargs) + + +@pytest.fixture(scope="module", autouse=True) +def setup(pytestconfig): + # ensure we start from CIMEROOT for each module + os.chdir(CIMEROOT) + + srcroot = utils.get_src_root() + + customize_path = os.path.join(srcroot, "cime_config", "customize") + + if os.path.exists(customize_path): + Config.instance().load(customize_path) + + # ensure GLOABL is reset + utils.GLOBAL = {} diff --git a/doc/Makefile b/doc/Makefile index efc61d8aa3f..93d79cf7e08 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -8,29 +8,44 @@ SPHINXPROJ = on SOURCEDIR = source BUILDDIR = build SPHINXAPI = sphinx-apidoc -SCRIPTSDIR = ../scripts # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) # exit 0 is to prevent the api rule from being run by the catchall target -# and generating an error. -api: CIME_api Tools_api Tools_user +# and generating an error. +api: CIME_api Tools_api Tools_user exit 0 -CIME_api: - @$(SPHINXAPI) --force -o $(SOURCEDIR)/$@ $(SCRIPTSDIR)/lib/CIME +CIME_api: + @$(SPHINXAPI) --force -o $(SOURCEDIR)/$@ ../CIME -Tools_api: - @$(SPHINXAPI) --force -o $(SOURCEDIR)/$@ $(SCRIPTSDIR)/Tools +Tools_api: + @$(SPHINXAPI) --force -o $(SOURCEDIR)/$@ ../CIME/Tools Tools_user: rm -f $(SOURCEDIR)/$@/*.rst - rm -f $(SOURCEDIR)/$@/temp_files/*.* + rm -f $(SOURCEDIR)/$@/temp_files/* ./tools_autodoc.py -.PHONY: help Makefile CIME_api Tools_api Tools_user +# It's too easy to forget to run 'make api' before running 'make html', +# so add a rule that ensures that the api documentation is regenerated +# whenever regenerating the html. +html: api + +clean: clean_api + +# Note that all of the files removed here are built using 'make api'; +# these are not - or at least, should not be - files that exist in the +# repository. +clean_api: + rm -f $(SOURCEDIR)/CIME_api/*.rst + rm -f $(SOURCEDIR)/Tools_api/*.rst + rm -f $(SOURCEDIR)/Tools_user/*.rst + rm -f $(SOURCEDIR)/Tools_user/temp_files/* + +.PHONY: help Makefile CIME_api Tools_api Tools_user clean_api # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). diff --git a/doc/README b/doc/README index 96462605d3d..deac674eb74 100644 --- a/doc/README +++ b/doc/README @@ -1,12 +1,17 @@ -This requires Sphinx v1.7 or greater. +This requires Sphinx v1.7 or greater, as well as some add-ons, which can +be installed with: + +pip install sphinx +pip install sphinxcontrib-programoutput +pip install git+https://github.com/esmci/sphinx_rtd_theme.git@version-dropdown-with-fixes Check the sphinx version as follows: >sphinx-build --version -The documentation source is stored with the CIME master code base. However, -the built html files are stored seperately in the orphan gh-pages branch +The documentation source is stored with the CIME master code base. However, +the built html files are stored separately in the orphan gh-pages branch and can be viewed from a browser at URL: http://esmci.github.io/cime @@ -19,17 +24,10 @@ https://github.com/ESMCI/cime/wiki/Working-with-Sphinx-and-reStructuredText Use the following commands to auto-build the html documentation from the main cime/doc directory: ->make clean ->make api ->make html - -To copy the html to the orphan gh-pages, follow these steps: - ->git clone -b gh-pages https://github.com/ESMCI/cime.git cime.gh-pages ->cd cime.gh-pages ->rm -rf * ->cp -R /path/to/cime-master/doc/build/html/* . ->git commit -m 'update HTML for PR #...' ->git push origin gh-pages - +make clean +make api +make html +To publish the docs to the orphan gh-pages branch, follow the steps in +https://github.com/ESMCI/cime/wiki/Working-with-Sphinx-and-reStructuredText +to ensure proper versioning of the documentation. diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 00000000000..e6edcf66b06 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,5 @@ +sphinx +sphinxcontrib-programoutput +sphinx-rtd-theme +sphinx-copybutton +evv4esm diff --git a/doc/source/Tools_user/index.rst.template b/doc/source/Tools_user/index.rst.template index 3da4239b722..e7c67e2a107 100644 --- a/doc/source/Tools_user/index.rst.template +++ b/doc/source/Tools_user/index.rst.template @@ -11,5 +11,3 @@ and **case.setup**. .. toctree:: :maxdepth: 1 - - diff --git a/doc/source/_templates/footer.html b/doc/source/_templates/footer.html new file mode 100644 index 00000000000..a7c22a302a3 --- /dev/null +++ b/doc/source/_templates/footer.html @@ -0,0 +1,5 @@ +{% extends "!footer.html" %} +{% block extrafooter %} + {{ super() }} + +{% endblock %} diff --git a/doc/source/_templates/versions.html b/doc/source/_templates/versions.html new file mode 100644 index 00000000000..b97bea56e39 --- /dev/null +++ b/doc/source/_templates/versions.html @@ -0,0 +1,64 @@ +{% if READTHEDOCS or display_lower_left %} +{# Add rst-badge after rst-versions for small badge style. #} + +
+ + Read the Docs + v: {{ current_version }} + + +
+ {% if languages|length >= 1 %} +
+
{{ _('Languages') }}
+ {% for slug, url in languages %} + {% if slug == current_language %} {% endif %} +
{{ slug }}
+ {% if slug == current_language %}
{% endif %} + {% endfor %} +
+ {% endif %} +
+
Versions
+
+ {% if downloads|length >= 1 %} +
+
{{ _('Downloads') }}
+ {% for type, url in downloads %} +
{{ type }}
+ {% endfor %} +
+ {% endif %} + {% if READTHEDOCS %} +
+
{{ _('On Read the Docs') }}
+
+ {{ _('Project Home') }} +
+
+ {{ _('Builds') }} +
+
+ {% endif %} +
+
+{% endif %} diff --git a/doc/source/build_cpl/adding-components.rst b/doc/source/build_cpl/adding-components.rst index 09ed5ebfd94..c9fcf234dd1 100644 --- a/doc/source/build_cpl/adding-components.rst +++ b/doc/source/build_cpl/adding-components.rst @@ -4,7 +4,7 @@ Adding components =================== -Here are the steps to add prognostic components to CIME models. +Here are the steps to add prognostic components to CIME models. There are a couple of aspects of a component interface to CIME, the scripts interface which controls setting up component inputs and diff --git a/doc/source/build_cpl/index.rst b/doc/source/build_cpl/index.rst index 46984c1a59f..a492a431548 100644 --- a/doc/source/build_cpl/index.rst +++ b/doc/source/build_cpl/index.rst @@ -12,7 +12,7 @@ Building a Coupled Model with CIME .. toctree:: :maxdepth: 3 :numbered: - + introduction.rst adding-components.rst @@ -22,4 +22,3 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` - diff --git a/doc/source/build_cpl/introduction.rst b/doc/source/build_cpl/introduction.rst index f45c4b47844..0352da61bf7 100644 --- a/doc/source/build_cpl/introduction.rst +++ b/doc/source/build_cpl/introduction.rst @@ -7,7 +7,7 @@ How to add a new component model to cime. How to replace an existing cime model with another one. -How to integrate your model in to the cime build/configure system and coupler. +How to integrate your model in to the cime build/configure system and coupler. How to work with the CIME-supplied models. diff --git a/doc/source/change.rst b/doc/source/change.rst new file mode 100644 index 00000000000..e69de29bb2d diff --git a/doc/source/conf.py b/doc/source/conf.py index f94a5ec8960..c6a1de2db7e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -* coding: utf-8 -*- # # on documentation build configuration file, created by # sphinx-quickstart on Tue Jan 31 19:46:36 2017. @@ -18,8 +18,14 @@ # import os import sys -sys.path.insert(0, os.path.abspath('../../scripts/lib')) -sys.path.insert(1, os.path.abspath('../../scripts')) + +# Note that we need a specific version of sphinx_rtd_theme. This can be obtained with: +# pip install git+https://github.com/esmci/sphinx_rtd_theme.git@version-dropdown-with-fixes +import sphinx_rtd_theme + +sys.path.insert(0, os.path.abspath("../../")) +sys.path.insert(1, os.path.abspath("../../scripts")) +sys.path.insert(2, os.path.abspath("../../CIME")) # -- General configuration ------------------------------------------------ @@ -31,49 +37,51 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.githubpages', - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.viewcode', - 'sphinx.ext.todo', - 'sphinxcontrib.programoutput' + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.githubpages", + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.viewcode", + "sphinx.ext.todo", + "sphinxcontrib.programoutput", + "sphinx_rtd_theme", + "sphinx_copybutton", ] -todo_include_todos=True +todo_include_todos = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'CIME' -copyright = u'2017, U.S. National Science Foundation and U.S. Department of Energy' -author = u'Staff of the NSF/CESM and DOE/E3SM projects' +project = "CIME" +copyright = "2017, U.S. National Science Foundation and U.S. Department of Energy" +author = "Staff of the NSF/CESM and DOE/E3SM projects" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'5.6' +version = "master" # The full version, including alpha/beta/rc tags. -release = u'5.6' +release = "master" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -81,7 +89,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -92,28 +100,38 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -#html_theme = 'alabaster' -#html_theme = 'bizstyle' -html_theme = 'classic' -#html_theme = 'sphinx_rtd_theme' -#html_theme = 'sphinxdoc' +# html_theme = 'alabaster' +# html_theme = 'bizstyle' +# html_theme = 'classic' +# html_theme = 'sphinxdoc' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # -html_theme_options = {"stickysidebar": "true"} +# html_theme_options = {"stickysidebar": "true"} + +# The 'versions' option needs to have at least two versions to work, but it doesn't need +# to have all versions: others will be added dynamically. Note that this maps from version +# names to html links. The current version can link to the current location (i.e., do +# nothing). For the other version, we just add a place-holder; its name and value are +# unimportant because these versions will get replaced dynamically. +html_theme_options = {} +html_theme_options["versions"] = {version: ""} +html_theme_options["versions"]["[placeholder]"] = "" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] + +html_css_files = ["custom.css"] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'ondoc' - +htmlhelp_basename = "ondoc" # -- Options for LaTeX output --------------------------------------------- @@ -121,15 +139,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -139,8 +154,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'on.tex', u'on Documentation', - u'Staff of the NSF/CESM and DOE/E3SM projects', 'manual'), + ( + master_doc, + "on.tex", + "on Documentation", + "Staff of the NSF/CESM and DOE/E3SM projects", + "manual", + ), ] @@ -148,10 +168,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'on', u'on Documentation', - [author], 1) -] +man_pages = [(master_doc, "on", "on Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -160,16 +177,38 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'on', u'on Documentation', - author, 'on', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "on", + "on Documentation", + author, + "on", + "One line description of project.", + "Miscellaneous", + ), ] # -- Options for pdf output ------------------------------------------------- pdf_documents = [ - (master_doc, - u'CIME_Users_Guide', - u'CIME Users Guide (PDF)', - u'Staff of the NSF/CESM and DOE/E3SM projects'), + ( + master_doc, + "CIME_Users_Guide", + "CIME Users Guide (PDF)", + "Staff of the NSF/CESM and DOE/E3SM projects", + ), ] + +try: + html_context +except NameError: + html_context = dict() + +html_context["display_lower_left"] = True + +html_context["current_language"] = language + +current_version = "master" + +html_context["current_version"] = current_version +html_context["version"] = current_version diff --git a/doc/source/data_models/data-atm.rst b/doc/source/data_models/data-atm.rst deleted file mode 100644 index 311b265a294..00000000000 --- a/doc/source/data_models/data-atm.rst +++ /dev/null @@ -1,284 +0,0 @@ -.. _data-atm: - -Data Atmosphere (DATM) -====================== - -DATM is normally used to provide observational forcing data (or forcing data produced by a previous run using active components) to drive prognostic components. -In the case of CESM, these would be: CLM (I compset), POP2 (C compset), and POP2/CICE (G compset). -As a result, DATM variable settings are specific to the compset that will be targeted. -As examples, CORE2_NYF (CORE2 normal year forcing) is the DATM mode used in C and G compsets. -CLM_QIAN, CLMCRUNCEP, CLMGSWP3 and CLM1PT are DATM modes using observational data for forcing CLM in I compsets. - -.. _datm-xml-vars: - ------------------- -xml variables ------------------- -The following are ``$CASEROOT`` xml variables that CIME supports for DATM. -These variables are defined in ``$CIMEROOT/src/components/data_comps/datm/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and the resulting values are compset dependent. - -.. note:: These xml variables are used by the the datm's **cime_config/buildnml** script in conjunction with datm's **cime_config/namelist_definition_datm.xml** file to generate the namelist file ``datm_in``. - -.. csv-table:: "DATM xml variables" - :header: "xml variable", "description" - :widths: 20, 80 - - "DATM_MODE", "Mode for atmospheric component" - "", "Valid values are: CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP," - "", "CLMCRUNCEP_V5,CLMGSWP3,WW3,CPLHIST,CORE_IAF_JRA" - - "DATM_PRESAERO", "Optional prescribed aerosol forcing" - "DATM_TOPO", "Optional Surface topography" - "DATM_CO2_TSERIES", "Optional CO2 time series type" - - "DATM_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file " - "DATM_CPLHIST_CASE", "Coupler history forcing data mode - case name" - "DATM_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history data" - "DATM_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DATM_CPLHIST_YR_START" - "DATM_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop data over" - "DATM_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop data over" - - "DATM_CLMNCEP_YR_ALIGN", "I compsets only - simulation year corresponding to data starting year" - "DATM_CLMNCEP_YR_START", "I compsets only - data model starting year to loop data over" - "DATM_CLMNCEP_YR_END", "I compsets only - data model ending year to loop data over" - -.. note:: If ``DATM_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``ATM_DOMAIN_PATH`` and ``ATM_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DATM_CPLHIST_DOMAIN_FILE``. If ``DATM_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the datm component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. This is the default that should be used for this mode. Alternatively, ``DATM_CPLHIST_DOMAIN_FILE`` can be set to ``$ATM_DOMAIN_PATH/$ATM_DOMAIN_FILE`` in a non-default configuration. - -.. _datm-datamodes: - --------------------- -datamode values --------------------- - -The xml variable ``DATM_MODE`` (described in :ref:`datm_mode`) sets the streams that are associated with DATM and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DATM on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DATM ``datamode`` values, as defined in the file ``namelist_definition_datm.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "This mode turns off the data model as a provider of data to the coupler. The ``atm_present`` flag will be set to ``false`` and the coupler assumes no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero except for aerosol deposition fields which will be set to a special value. " - "CLMNCEP", "In conjunction with NCEP climatological atmosphere data, provides the atmosphere forcing favored by the Land Model Working Group when coupling an active land model with observed atmospheric forcing. This mode replicates code previously found in CLM (circa 2005), before the LMWG started using the CIME coupling infrastructure and data models to do active-land-only simulations." - "CORE2_NYF", "Coordinated Ocean-ice Reference Experiments (CORE) Version 2 Normal Year Forcing." - "CORE2_IAF", "In conjunction with CORE Version 2 atmospheric forcing data, provides the atmosphere forcing favored by the Ocean Model Working Group when coupling an active ocean model with observed atmospheric forcing. This mode and associated data sets implement the CORE-IAF Version 2 forcing data, as developed by Large and Yeager (2008) at NCAR. Note that CORE2_NYF and CORE2_IAF work exactly the same way." - "CORE_IAF_JRA", "In conjunction with JRA-55 Project, provides the atmosphere forcing when coupling an active ocean model with observed atmospheric forcing. This mode and associated data sets implement the JRA-55 v1.3 forcing data." - -.. _datm_mode: - -------------------------------- -DATM_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DATM_MODE`` (defined in the ``config_component.xml`` file for DATM), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DATM_MODE`` based on the compset. - -.. csv-table:: "Relationship between DATM_MODE, datamode and streams" - :header: "DATM_MODE", "description-streams-datamode" - :widths: 15, 85 - - "NULL", "null mode" - "", "streams: none" - "", "datamode: NULL" - "CORE2_NYF","CORE2 normal year forcing (C ang G compsets)" - "", "streams: CORE2_NYF.GISS,CORE2_NYF.GXGXS,CORE2_NYF.NCEP" - "", "datamode: CORE2_NYF" - "CORE2_IAF","CORE2 interannual year forcing (C ang G compsets)" - "", "streams: CORE2_IAF.GCGCS.PREC,CORE2_IAF.GISS.LWDN,CORE2_IAF.GISS.SWDN,CORE2_IAF.GISS.SWUP," - "", "CORE2_IAF.NCEP.DN10,CORE2_IAF.NCEP.Q_10,CORE2_IAF.NCEP.SLP_,CORE2_IAF.NCEP.T_10,CORE2_IAF.NCEP.U_10," - "", "CORE2_IAF.NCEP.V_10,CORE2_IAF.CORE2.ArcFactor" - "", "datamode: CORE2_IAF" - "CORE_IAF_JRA",JRA-55 intra-annual year forcing(C ang G compsets)" - "", "streams: CORE_IAF_JRA.PREC,CORE_IAF_JRA.LWDN,CORE_IAF_JRA.SWDN," - "", "CORE_IAF_JRA.Q_10,CORE_IAF_JRA.SLP_,CORE_IAF_JRA.T_10,CORE_IAF_JRA.U_10," - "", "CORE_IAF_JRA.V_10,CORE_IAF_JRA.CORE2.ArcFactor" - "", "datamode: CORE_IAF_JRA" - "CLM_QIAN_WISO","QIAN atm input data with water isotopes (I compsets)" - "", "streams: CLM_QIAN_WISO.Solar,CLM_QIAN_WISO.Precip,CLM_QIAN_WISO.TPQW" - "", "datamode: CLMNCEP" - "CLM_QIAN", "QIAN atm input data (I compsets)" - "", "streams: CLM_QIAN.Solar,CLM_QIAN.Precip,CLM_QIAN.TPQW" - "", "datamode: CLMNCEP" - "CLMCRUNCEP","CRUNCEP atm input data (I compsets)" - "", "streams: CLMCRUNCEP.Solar,CLMCRUNCEP.Precip,CLMCRUNCEP.TPQW" - "", "datamode: CLMNCEP" - "CLMCRUNCEP_V5","CRUNCEP atm input data (I compsets)" - "","streams: CLMCRUNCEP_V5.Solar,CLMCRUNCEP_V5.Precip,CLMCRUNCEP_V5.TPQW" - "","datamode: CLMNCEP" - "CLMGSWP3","GSWP3 atm input data (I compsets)" - "","streams: CLMGSWP3.Solar,CLMGSWP3.Precip,CLMGSWP3.TPQW" - "","datamode: CLMNCEP" - "CLM1PT", "single point tower site atm input data" - "","streams: CLM1PT.$ATM_GRID" - "","datamode: CLMNCEP" - "CPLHIST","user generated forcing data from using coupler history files used to spinup relevant prognostic components (for CESM this is CLM, POP and CISM)" - "","streams: CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux," - "","CPLHISTForcing.State3hr,CPLHISTForcing.State1hr" - "","datamode: CPLHIST" - "WW3","WW3 wave watch data from a short period of hi WW3 wave watch data from a short period of hi temporal frequency COREv2 data" - "","streams: WW3" - "","datamode: COPYALL" - --------------- -Namelists --------------- - -The DATM namelist file is ``datm_in`` (or ``datm_in_NNN`` for multiple instances). DATM namelists can be separated into two groups: *stream-independent* namelist variables that are specific to the DATM model and *stream-specific* namelist variables whose names are common to all the data models. - -Stream dependent input is in the namelist group ``"shr_strdata_nml`` which is discussed in :ref:`input streams ` and is the same for all data models. - -.. _datm-stream-independent-namelists: - -The stream-independent group is ``datm_nml`` and the DATM stream-independent namelist variables are: - -===================== ============================================================================================= -datm_nml vars description -===================== ============================================================================================= -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -bias_correct if set, include bias correction streams in namelist -anomaly_forcing if set, includ anomaly forcing streams in namelist -factorfn filename containing correction factors for use in CORE2 modes (CORE2_IAF and CORE2_NYF) -presaero if true, prescribed aerosols are sent from datm -iradsw frequency to update radiation in number of time steps (of hours if negative) -wiso_datm if true, turn on water isotopes -===================== ============================================================================================= - -.. _datm-mode-independent-streams: - ------------------------------------------- -Streams independent of DATM_MODE value ------------------------------------------- - -In general, each ``DATM_MODE`` xml variable is identified with a unique set of streams. -However, there are several streams in DATM that can accompany any ``DATM_MODE`` setting. -Currently, these are streams associated with prescribed aerosols, co2 time series, topography, anomoly forcing and bias correction. -These mode-independent streams are activated different, depending on the stream. - -- ``prescribed aerosol stream:`` - To add this stream, set ``$DATM_PRESAERO`` to a supported value other than ``none``. - -- ``co2 time series stream``: - To add this stream, set ``$DATM_CO2_TSERIES`` to a supported value other than ``none``. - -- ``topo stream``: - To add this stream, set ``$DATM_TOPO`` to a supported value other than ``none``. - -- ``anomaly forcing stream:`` - To add this stream, you need to add any of the following keywword/value pair to the end of ``user_nl_datm``: - :: - - Anomaly.Forcing.Precip = - Anomaly.Forcing.Temperature = - Anomaly.Forcing.Pressure = - Anomaly.Forcing.Humidity = - Anomaly.Forcing.Uwind = - Anomaly.Forcing.Vwind = - Anomaly.Forcing.Shortwave = - Anomaly.Forcing.Longwave = - -- ``bias_correct stream:`` - To add this stream, you need to add any of the following keywword/value pair to the end of ``user_nl_datm``: - :: - - BC.QIAN.CMAP.Precip = - BC.QIAN.GPCP.Precip = - BC.CRUNCEP.CMAP.Precip = - BC.CRUNCEP.GPCP.Precip = - -.. _datm-fields: - ----------------- -DATM Field names ----------------- - -DATM defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. -In general, the stream input file should translate the stream input variable names into the ``datm_fld`` names for use within the data atmosphere model. - -.. csv-table:: "DATM internal field names" - :header: "datm_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "z", "Sa_z" - "topo", "Sa_topo" - "u", "Sa_u" - "v", "Sa_v" - "tbot", "Sa_tbot" - "ptem", "Sa_ptem" - "shum", "Sa_shum" - "dens", "Sa_dens" - "pbot", "Sa_pbot" - "pslv", "Sa_pslv" - "lwdn", "Faxa_lwdn" - "rainc", "Faxa_rainc" - "rainl", "Faxa_rainl" - "snowc", "Faxa_snowc" - "snowl", "Faxa_snowl" - "swndr", "Faxa_swndr" - "swvdr", "Faxa_swvdr" - "swndf", "Faxa_swndf" - "swvdf", "Faxa_swvdf" - "swnet", "Faxa_swnet" - "co2prog", "Sa_co2prog" - "co2diag", "Sa_co2diag" - "bcphidry", "Faxa_bcphidry" - "bcphodry", "Faxa_bcphodry" - "bcphiwet", "Faxa_bcphiwet" - "ocphidry", "Faxa_ocphidry" - "ocphodry", "Faxa_ocphodry" - "ocphiwet", "Faxa_ocphiwet" - "dstwet1", "Faxa_dstwet1" - "dstwet2", "Faxa_dstwet2" - "dstwet3", "Faxa_dstwet3" - "dstwet4", "Faxa_dstwet4" - "dstdry1", "Faxa_dstdry1" - "dstdry2", "Faxa_dstdry2" - "dstdry3", "Faxa_dstdry3" - "dstdry4", "Faxa_dstdry4" - "tref", "Sx_tref" - "qref", "Sx_qref" - "avsdr", "Sx_avsdr" - "anidr", "Sx_anidr" - "avsdf", "Sx_avsdf" - "anidf", "Sx_anidf" - "ts", "Sx_t" - "to", "So_t" - "snowhl", "Sl_snowh" - "lfrac", "Sf_lfrac" - "ifrac", "Sf_ifrac" - "ofrac", "Sf_ofrac" - "taux", "Faxx_taux" - "tauy", "Faxx_tauy" - "lat", "Faxx_lat" - "sen", "Faxx_sen" - "lwup", "Faxx_lwup" - "evap", "Faxx_evap" - "co2lnd", "Fall_fco2_lnd" - "co2ocn", "Faoo_fco2_ocn" - "dms", "Faoo_fdms_ocn" - "precsf", "Sa_precsf" - "prec_af", "Sa_prec_af" - "u_af", "Sa_u_af" - "v_af", "Sa_v_af" - "tbot_af", "Sa_tbot_af" - "pbot_af", "Sa_pbot_af" - "shum_af", "Sa_shum_af" - "swdn_af", "Sa_swdn_af" - "lwdn_af", "Sa_lwdn_af" - "rainc_18O", "Faxa_rainc_18O" - "rainc_HDO", "Faxa_rainc_HDO" - "rainl_18O", "Faxa_rainl_18O" - "rainl_HDO", "Faxa_rainl_HDO" - "snowc_18O", "Faxa_snowc_18O" - "snowc_HDO", "Faxa_snowc_HDO" - "snowl_18O", "Faxa_snowl_18O" - "snowl_HDO", "Faxa_snowl_HDO" - "shum_16O", "Sa_shum_16O" - "shum_18O", "Sa_shum_18O" diff --git a/doc/source/data_models/data-lnd.rst b/doc/source/data_models/data-lnd.rst deleted file mode 100644 index e22f6ec9f1f..00000000000 --- a/doc/source/data_models/data-lnd.rst +++ /dev/null @@ -1,154 +0,0 @@ -.. _data-lnd: - -Data Land (DLND) -================ - -The land model is unique because it supports land data and snow data (*lnd and sno*) almost as if they were two separate components, but they are in fact running in one component model through one interface. -The lnd (land) data consist of fields sent to the atmosphere. -This set of data is used when running DLND with an active atmosphere. -In general this is not a mode that is used or supported. -The sno (snow) data consist of fields sent to the glacier model. This set of data is used when running dlnd with an active glacier model (TG compsets). Both sets of data are assumed to be on the same grid. - -.. _dlnd-xml-vars: - ---------------- -xml variables ---------------- - -The following are xml variables that CIME supports for DLND. -These variables are defined in ``$CIMEROOT/src/components/data_comps/dlnd/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and are used by the DLND ``cime_config/buildnml`` script to generate the DLND namelist file ``dlnd_in`` and the required associated stream files for the case. - -.. note:: These xml variables are used by the the dlnd's **cime_config/buildnml** script in conjunction with dlnd's **cime_config/namelist_definition_dlnd.xml** file to generate the namelist file ``dlnd_in``. - -.. csv-table:: "DLND xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DLND_MODE", "Mode for data land component" - "", "Valid values are: NULL, CPLHIST, GLC_CPLHIST" - - "DLND_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file" - "DLND_CPLHIST_CASE", "Coupler history forcing data mode - case name" - "DLND_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history data" - "DLND_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DLND_CPLHIST_YR_START" - "DLND_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop data over" - "DLND_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop data over" - -.. note:: If ``DLND_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``LND_DOMAIN_PATH`` and ``LND_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DLND_CPLHIST_DOMAIN_FILE``. If ``DLND_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the dlnd component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. Alternatively, ``DLND_CPLHIST_DOMAIN_FILE`` can be set to ``$LND_DOMAIN_PATH/$LND_DOMAIN_FILE``. - -.. _dlnd-datamodes: - --------------------- -datamode values --------------------- - -The xml variable ``DLND_MODE`` (described in :ref:`dlnd_mode`) sets the streams that are associated with DLND and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DLND on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DLND ``datamode`` values, as defined in the file ``namelist_definition_dlnd.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "Turns off the data model as a provider of data to the coupler. The ``lnd_present`` flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." - -.. _dlnd_mode: - -------------------------------- -DLND_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DLND_MODE`` (defined in the ``config_component.xml`` file for DLND), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DLND_MODE`` based on the compset. - -.. csv-table:: "Relationship between DLND_MODE, datamode and streams" - :header: "DLND_MODE", "description-streams-datamode" - :widths: 20, 80 - - "NULL", "null mode" - "", "streams: none" - "", "datamode: null" - "CPLHIST", "land forcing data (e.g. produced by CESM/CLM) from a previous model run are read in from coupler history files" - "", "streams: lnd.cplhist" - "", "datamode: COPYALL" - "GLC_CPLHIST", "glc coupling fields (e.g. produced by CESM/CLM) from a previous model run are read in from coupler history files" - "", "streams: sno.cplhist" - "", "datamode: COPYALL" - ---------- -Namelists ---------- - -The namelist file for DLND is ``dlnd_in`` (or ``dlnd_in_NNN`` for multiple instances). - -As is the case for all data models, DLND namelists can be separated into two groups, stream-independent and stream-dependent. - -The stream dependent group is :ref:`shr_strdata_nml`. - -.. _dlnd-stream-independent-namelists: - -The stream-independent group is ``dlnd_nml`` and the DLND stream-independent namelist variables are: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in ``dlnd_in``, edit the file ``user_nl_dlnd``. - -.. _dlnd-mode-independent-streams: - --------------------------------------- -Streams independent of DLND_MODE value --------------------------------------- - -There are no datamode independent streams for DLND. - -.. _dlnd-fields: - ------------ -Field names ------------ - -DLND defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. -In general, the stream input file should translate the stream input variable names into the ``dlnd_fld`` names below for use within the data land model. - -.. csv-table:: "DLND internal field names" - :header: "dlnd_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "t", "Sl_t" - "tref", "Sl_tref" - "qref", "Sl_qref" - "avsdr", "Sl_avsdr" - "anidr", "Sl_anidr" - "avsdf", "Sl_avsdf" - "anidf", "Sl_anidf" - "snowh", "Sl_snowh" - "taux", "Fall_taux" - "tauy", "Fall_tauy" - "lat", "Fall_lat" - "sen", "Fall_sen" - "lwup", "Fall_lwup" - "evap", "Fall_evap" - "swnet", "Fall_swnet" - "lfrac", "Sl_landfrac" - "fv", "Sl_fv" - "ram1", "Sl_ram1" - "flddst1", "Fall_flxdst1" - "flxdst2", "Fall_flxdst2" - "flxdst3", "Fall_flxdst3" - "flxdst4", "Fall_flxdst4" - "tsrfNN", "Sl_tsrfNN" - "topoNN", "Sl_topoNN" - "qiceNN", "Flgl_qiceNN" - -where NN = (00,01,02,..., ``glc_nec``), and ``glc_nec`` is the number of glacier elevation classes. -Note that the number of elevation classes on the input files must be the same as in the run. diff --git a/doc/source/data_models/data-model-science.rst b/doc/source/data_models/data-model-science.rst deleted file mode 100644 index b92a7331e51..00000000000 --- a/doc/source/data_models/data-model-science.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. _data-model-science: - -Data Model Science -================== - -When a given data model is run, the user must specify which *science mode* it will run in. -Each data model has a fixed set of fields that it must send to the coupler, but it is the choice of mode that specifies how that set of fields is to be computed. -Each mode activates various assumptions about what input fields are available from the input data streams, what input fields are available from the the coupler, and how to use this input data to compute the output fields sent to the coupler. - -In general, a mode might specify... - -- that fields be set to a time invariant constant (so that no input data is needed) -- that fields be taken directly from input data files (the input streams) -- that fields be computed using data read in from input files -- that fields be computed using data received from the coupler -- some combination of the above. - -If a science mode is chosen that is not consistent with the input data provided, the model may abort (perhaps with a "missing data" error message), or the model may send erroneous data to the coupler (for example, if a mode assumes an input stream has temperature in Kelvin, but it really has temperature in Celsius). -Such an error is unlikely unless a user has edited the run scripts to specify either non-standard input data or a non-standard science mode. -When editing the run scripts to use non-standard stream data or modes, users must be careful that the input data is consistent with the science mode and should verify that the data model is providing data to the coupler as expected. - -The data model mode is a character string that is set in the namelist variable ``datamode`` in the namelist group ``shr_strdata_nml``. Although each data model, -``datm``, ``dlnd``, ``drof``, ``docn``, ``dice`` and ``dwav`` has its own set of valid datamode values, two modes are common to all data models: ``COPYALL`` and ``NULL``. - -``dataMode = "COPYALL"`` - The default mode is ``COPYALL`` -- the model will assume *all* the data that must be sent to the coupler will be found in the input data streams, and that this data can be sent to the coupler, unaltered, except for spatial and temporal interpolation. - -``dataMode = "NULL"`` - ``NULL`` mode turns off the data model as a provider of data to the coupler. The ``model_present`` flag (eg. ``atm_present``) will be set to false and the coupler will assume no exchange of data to or from the data model. diff --git a/doc/source/data_models/data-ocean.rst b/doc/source/data_models/data-ocean.rst deleted file mode 100644 index 899b6b40529..00000000000 --- a/doc/source/data_models/data-ocean.rst +++ /dev/null @@ -1,309 +0,0 @@ -.. _data-ocean: - -=================== -Data Ocean (DOCN) -=================== - -Data ocean can be run both as a prescribed component, simply reading -in SST data from a stream, or as a prognostic slab ocean model -component. - -The data ocean component (DOCN) always returns SSTs to the driver. -The atmosphere/ocean fluxes are computed in the coupler. Therefore, -the data ocean model does not compute fluxes like the data ice (DICE) -model. DOCN has two distinct modes of operation. DOCN can run as a -pure data model, reading in ocean SSTs (normally climatological) from -input datasets, performing time/spatial interpolations, and passing -these to the coupler. Alternatively, DOCN can compute updated SSTs by -running as a slab ocean model where bottom ocean heat flux convergence -and boundary layer depths are read in and used with the -atmosphere/ocean and ice/ocean fluxes obtained from the driver. - -DOCN running in prescribed mode assumes that the only field in the -input stream is SST and also that SST is in Celsius and must be -converted to Kelvin. All other fields are set to zero except for -ocean salinity, which is set to a constant reference salinity value. -Normally the ice fraction data (used for prescribed CICE) is found in -the same data files that provide SST data to the data ocean model -since SST and ice fraction data are derived from the same -observational data sets and are consistent with each other. For DOCN -prescribed mode, default yearly climatological datasets are provided -for various model resolutions. - -DOCN running as a slab ocean model is used in conjunction with active -ice mode running in full prognostic mode (e.g. CICE for CESM). This -mode computes a prognostic sea surface temperature and a freeze/melt -potential (surface Q-flux) used by the sea ice model. This -calculation requires an external SOM forcing data file that includes -ocean mixed layer depths and bottom-of-the-slab Q-fluxes. -Scientifically appropriate bottom-of-the-slab Q-fluxes are normally -ocean resolution dependent and are derived from the ocean model output -of a fully coupled CCSM run. Note that this mode no longer runs out -of the box, the default testing SOM forcing file is not scientifically -appropriate and is provided for testing and development purposes only. -Users must create scientifically appropriate data for their particular -application or use one of the standard SOM forcing files from the full -prognostic control runs. For CESM, some of these are available in the -`inputdata repository -`_. -The user then modifies the ``$DOCN_SOM_FILENAME`` variable in -env_run.xml to point to the appropriate SOM forcing dataset. - -.. note:: A tool is available to derive valid `SOM forcing `_ and more information on creating the SOM forcing is also available. - -.. _docn-xml-vars: - -------------- -xml variables -------------- - -The following are xml variables that CIME supports for DOCN. These -variables are defined in -``$CIMEROOT/src/components/data_comps/docn/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and are used by the -DOCN ``cime_config/buildnml`` script to generate the DOCN namelist -file ``docn_in`` and the required associated stream files for the -case. - -.. note:: These xml variables are used by the the docn's **cime_config/buildnml** script in conjunction with docn's **cime_config/namelist_definition_docn.xml** file to generate the namelist file ``docn_in``. - -.. csv-table:: "DOCN xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DOCN_MODE", "Data mode" - "", "Valid values are: null, prescribed, som, interannual, ww3" - "DOCN_SOM_FILENAME", "Sets SOM forcing data filename for pres runs, only used in D and E compset" - "SSTICE_STREAM", "Prescribed SST and ice coverage stream name." - "", "Sets SST and ice coverage stream name for prescribed runs." - "SSTICE_DATA_FILENAME", "Prescribed SST and ice coverage data file name." - "", "Sets SST and ice coverage data file name for DOCN prescribed runs." - "SSTICE_YEAR_ALIGN", "The model year that corresponds to SSTICE_YEAR_START on the data file." - "", "Prescribed SST and ice coverage data will be aligned so that the first year of" - "", "data corresponds to SSTICE_YEAR_ALIGN in the model. For instance, if the first" - "", "year of prescribed data is the same as the first year of the model run, this" - "", "should be set to the year given in RUN_STARTDATE." - "", "If SSTICE_YEAR_ALIGN is later than the model's starting year, or if the model is" - "", "run after the prescribed data ends (as determined by SSTICE_YEAR_END), the" - "", "default behavior is to assume that the data from SSTICE_YEAR_START to SSTICE_YEAR_END" - "", "cyclically repeats. This behavior is controlled by the *taxmode* stream option" - "SSTICE_YEAR_START", "The first year of data to use from SSTICE_DATA_FILENAME." - "", "This is the first year of prescribed SST and ice coverage data to use. For" - "", "example, if a data file has data for years 0-99, and SSTICE_YEAR_START is 10," - "", "years 0-9 in the file will not be used." - "SSTICE_YEAR_END", "The last year of data to use from SSTICE_DATA_FILENAME." - "", "This is the last year of prescribed SST and ice coverage data to use. For" - "", "example, if a data file has data for years 0-99, and value is 49," - "", "years 50-99 in the file will not be used." - -.. note:: For multi-year runs requiring AMIP datasets of sst/ice_cov fields, you need to set the xml variables for ``DOCN_SSTDATA_FILENAME``, ``DOCN_SSTDATA_YEAR_START``, and ``DOCN_SSTDATA_YEAR_END``. CICE in prescribed mode also uses these values. - -.. _docn-datamodes: - ---------------- -datamode values ---------------- - -The xml variable ``DOCN_MODE`` (described in :ref:`docn_mode`) sets the streams that are associated with DOCN and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DOCN on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DOCN ``datamode`` values, as defined in the file ``namelist_definition_docn.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "Turns off the data model as a provider of data to the coupler. The ocn_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." - "SSTDATA", "assumes the only field in the input stream is SST. It also assumes the SST is in Celsius and must be converted to Kelvin. All other fields are set to zero except for ocean salinity, which is set to a constant reference salinity value. Normally the ice fraction data is found in the same data files that provide SST data to the data ocean model. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other." - "IAF", "is the interannually varying version of SSTDATA" - "SOM", "(slab ocean model) mode is a prognostic mode. This mode computes a prognostic sea surface temperature and a freeze/melt potential (surface Q-flux) used by the sea ice model. This calculation requires an external SOM forcing data file that includes ocean mixed layer depths and bottom-of-the-slab Q-fluxes. Scientifically appropriate bottom-of-the-slab Q-fluxes are normally ocean resolution dependent and are derived from the ocean model output of a fully coupled CCSM run. Note that while this mode runs out of the box, the default SOM forcing file is not scientifically appropriate and is provided for testing and development purposes only. Users must create scientifically appropriate data for their particular application. A tool is available to derive valid SOM forcing." - -.. _docn_mode: - -------------------------------- -DOCN_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DOCN_MODE`` (defined in the ``config_component.xml`` file for DOCN), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DOCN_MODE`` based on the compset. - -.. csv-table:: "Relationship between DOCN_MODE, datamode and streams" - :header: "DOCN_MODE, "description-streams-datamode" - :widths: 20, 80 - - "null", "null mode" - "", "streams: none" - "", "datamode: null" - "prescribed","run with prescribed climatological SST and ice-coverage" - "","streams: prescribed" - "","datamode: SSTDATA" - "interannual", "run with interannual SST and ice-coverage" - "","streams: prescribed" - "","datamode: SSTDATA" - "som", "run in slab ocean mode" - "","streams: som" - "","datamode: SOM" - "ww3", "ww3 mode" - "", "streams: ww3" - "", "datamode: COPYALL" - -.. _docn-namelists: - ---------- -Namelists ---------- - -As is the case for all data models, DOCN namelists can be separated into two groups, stream-independent and stream-dependent. - -The namelist file for DOCN is ``docn_in`` (or ``docn_in_NNN`` for multiple instances). - -The stream dependent group is :ref:`shr_strdata_nml` . - -As part of the stream dependent namelist input, DOCN supports two science modes, ``SSTDATA`` (prescribed mode) and ``SOM`` (slab ocean mode). - -.. _docn-stream-independent-namelists: - -The stream-independent group is ``docn_nml`` and the DOCN stream-independent namelist variables are: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in docn_in, edit the file user_nl_docn. - -.. _docn-mode-independent-streams: - ---------------------------------- -Datamode independent streams ---------------------------------- - -There are no datamode independent streams for DOCN. - -.. _docn-fields: - ------------ -Field names ------------ - -DOCN defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. - -.. note:: In general, the stream input file should translate the stream input variable names into the ``docn_fld`` names below for use within the data ocn model. - -.. csv-table:: "DOCN internal field names" - :header: "docn_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "t", "So_t" - "u", "So_u" - "v", "So_v" - "dhdx", "So_dhdx" - "dhdy", "So_dhdy" - "s", "So_s" - "h", "strm_h (internal to docn_comp_mod only)" - "qbot", "strm_qbot (internal to docn_comp_mod only)" - -.. _creating-sstdata-input-from-prognostic-run: - ---------------------------------------------------------------------- -Creating SSTDATA mode input from a fully prognostic run (CESM only) ---------------------------------------------------------------------- - -The following outlines the steps you would take to create monthly averages of SST and ice coverage from a previous fully prognostic run that can then be read as as stream data by DOCN. - -As an example, the following uses an f09_g16 CESM B-configuration simulation using CAM5 physics and with cosp enabled. The procedure to create the SST/ICE file is as follows: - -1. Save monthly averaged 'aice' information from cice code (this is the default). - -2. Save monthly averaged SST information from pop2. To do this, copy $SRCROOT/pop2/input_templates/gx1v6_tavg_contents to $CASEROOT/SourceMods/src.pop2 and change the 2 in front of SST to 1 for monthly frequency. - -3. Extract (using ncrcat) SST from monthly pop2 history files and form a single netcdf file containing just SST; change SST to SST_cpl. - :: - - > ncrcat -v SST case.pop.h.*.nc temp.nc - > ncrename -v SST,SST_cpl temp.nc sst_cpl.nc - -4. Extract aice from monthly cice history files and form a single netcdf file containing aice; change aice to ice_cov; divide values by 100 (to convert from percent to fraction). - :: - - > ncrcat -v aice case.cice.h.*.nc temp.nc - > ncrename -v aice,ice_cov temp.nc temp2.nc - > ncap2 -s 'ice_cov=ice_cov/100.' temp2.nc ice_cov.nc - -5. Modify fill values in the sst_cpl file (which are over land points) to have value -1.8 and remove fill and missing value designators; change coordinate lengths and names: to accomplish this, first run ncdump, then replace _ with -1.8 in SST_cpl, then remove lines with _FillValue and missing_value. - (Note: although it might be possible to merely change the fill value to -1.8, this is conforming to other SST/ICE files, which have SST_cpl explicitly set to -1.8 over land.) - To change coordinate lengths and names, replace nlon by lon, nlat by lat, TLONG by lon, TLAT by lat. - The last step is to run ncgen. Note: when using ncdump followed by ncgen, precision will be lost; however, one can specify -d 9,17 to maximize precision - as in the following example: - :: - - > ncdump -d 9,17 old.nc > old - > ncgen -o new.nc new - -6. Modify fill values in the ice_cov file (which are over land points) to have value 1 and remove fill and missing value designators; change coordinate lengths and names; patch longitude and latitude to replace missing values. - To accomplish this, first run ncdump, then replace _ with 1 in ice_cov, then remove lines with _FillValue and missing_value. - To change coordinate lengths and names, replace ni by lon, nj by lat, TLON by lon, TLAT by lat. - To patch longitude and latitude arrays, replace values of those arrays with those in sst_cpl file. - The last step is to run ncgen. - (Note: the replacement of longitude and latitude missing values by actual values should not be necessary but is safer.) - -7. Combine (using ncks) the two netcdf files. - :: - - > ncks -v ice_cov ice_cov.nc sst_cpl.nc - - Rename the file to ssticetemp.nc. - The time variable will refer to the number of days at the end of each month, counting from year 0, whereas the actual simulation began at year 1. - However, we want time values to be in the middle of each month, referenced to the first year of the simulation (first time value equals 15.5). - Extract (using ncks) time variable from existing amip sst file (for correct number of months - 132 in this example) into working netcdf file. - :: - - > ncks -d time,0,131 -v time amipsst.nc ssticetemp.nc - - Add date variable: ncdump date variable from existing amip sst file; modify first year to be year 0 instead of 1949 (do not including leading zeroes or it will interpret as octal) and use correct number of months; ncgen to new netcdf file; extract date (using ncks) and place in working netcdf file. - :: - - > ncks -v date datefile.nc ssticetemp.nc - - Add datesec variable: extract (using ncks) datesec (correct number of months) from existing amip sst file and place in working netcdf file. - :: - - > ncks -d time,0,131 -v datesec amipsst.nc ssticetemp.nc - -8. At this point, you have an SST/ICE file in the correct format. - -9. Due to CAM's linear interpolation between mid-month values, you need to apply a procedure to assure that the computed monthly means are consistent with the input data. - To do this, invoke ``$SRCROOT/components/cam/tools/icesst/bcgen`` and following the following steps: - - a. Rename SST_cpl to SST, and ice_cov to ICEFRAC in the current SST/ICE file: - :: - - > ncrename -v SST_cpl,SST -v ice_cov,ICEFRAC ssticetemp.nc - - b. In driver.f90, sufficiently expand the lengths of variables prev_history and history (16384 should be sufficient); also comment out the test that the climate year be between 1982 and 2001 (lines 152-158). - - c. In bcgen.f90 and setup_outfile.f90, change the dimensions of xlon and ???TODO xlat to (nlon,nlat); this is to accommodate use of non-cartesian ocean grid. - - d. In setup_outfile.f90, modify the 4th and 5th ???TODO arguments in the calls to wrap_nf_def_var for *lon* and *lat* to be *2* and *dimids*; this is to accommodate use of non-cartesian ocean grid. - - e. Adjust Makefile to have proper path for LIB_NETCDF and INC_NETCDF. - - f. Modify namelist accordingly. - - g. Make bcgen and execute per instructions. The resulting sstice_ts.nc file is the desired ICE/SST file. - -9. Place the new SST/ICE file in desired location and modify ``env_run.xml`` to have : - - a. ``SSTICE_DATA_FILENAME`` point to the complete path of your SST/ICE file. - - b. ``SSTICE_GRID_FILENAME`` correspond to full path of (in this case) gx1v6 grid file. - - c. ``SSTICE_YEAR_START`` set to 0 - - d. ``SSTICE_YEAR_END`` to one less than the total number of years - - e. ``SSTICE_YEAR_ALIGN`` to 1 (for CESM, since CESM starts counting at year 1). diff --git a/doc/source/data_models/data-river.rst b/doc/source/data_models/data-river.rst deleted file mode 100644 index 25cc22b656a..00000000000 --- a/doc/source/data_models/data-river.rst +++ /dev/null @@ -1,132 +0,0 @@ -.. _data-river: - -================= -Data River (DROF) -================= - -The data river model (DROF) provides river runoff data primarily to be used by the prognostic ocean component. -This data can either be observational (climatological or interannual river data) or data from a previous model run that is output to coupler history files and then read back in by DROF. - -.. _drof-xml-vars: - -------------- -xml variables -------------- - -The following are xml variables that CIME supports for DROF. -These variables are defined in ``$CIMEROOT/src/components/data_comps/drof/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and are used by the DROF ``cime_config/buildnml`` script to generate the DROF namelist file ``drof_in`` and the required associated stream files for the case. - -.. note:: These xml variables are used by the the drof's **cime_config/buildnml** script in conjunction with drof's **cime_config/namelist_definition_drof.xml** file to generate the namelist file ``drof_in``. - -.. csv-table:: "DROF xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DROF_MODE", "Data mode" - "", "Valid values are: NULL,CPLHIST,DIATREN_ANN_RX1,DIATREN_IAF_RX1,IAF_JRA" - - "DROF_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file " - "DROF_CPLHIST_CASE", "Coupler history forcing data mode - case name" - "DROF_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history forcing data" - "DROF_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DROF_CPLHIST_YR_START" - "DROF_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop forcing data over" - "DROF_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop forcing data over" - -.. note:: If ``DROF_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``ROF_DOMAIN_PATH`` and ``ROF_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DROF_CPLHIST_DOMAIN_FILE``. If ``DROF_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the drof component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. This is the default mode that should be used for this mode. Alternatively, ``DROF_CPLHIST_DOMAIN_FILE`` can be set to ``$ROF_DOMAIN_PATH/$ROF_DOMAIN_FILE`` in a non-default configuration. - -.. _drof-datamodes: - --------------------- -datamode values --------------------- - -The xml variable ``DROF_MODE`` (described in :ref:`drof_mode`) sets the streams that are associated with DROF and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DROF on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DROF ``datamode`` values, as defined in the file ``namelist_definition_drof.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "Turns off the data model as a provider of data to the coupler. The rof_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "Copies all fields directly from the input data streams Any required fields not found on an input stream will be set to zero." - ---------- -Namelists ---------- - -The data river runoff model (DROF) provides data river input to prognostic components such as the ocean. - -The namelist file for DROF is ``drof_in``. - -As is the case for all data models, DROF namelists can be separated into two groups, stream-independent and stream-dependent. -The stream dependent group is :ref:`shr_strdata_nml`. -The stream-independent group is ``drof_nml`` and the DROF stream-independent namelist variables are: - -.. _drof-stream-independent-namelists: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in ``drof_in``, edit the file ``user_nl_drof`` in your case directory. - -.. _drof_mode: - -------------------------------- -DROF_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DROF_MODE`` (defined in the ``config_component.xml`` file for DROF), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DROF_MODE`` based on the compset. - -.. csv-table:: "Relationship between DROF_MODE, datamode and streams" - :header: "DROF_MODE", "description-streams-datamode" - :widths: 15, 85 - - "NULL", "null mode" - "", "streams: none" - "", "datamode: NULL" - "DIATREN_ANN_RX1", "Reads in annual forcing river data used for CORE2 forcing runs." - "", "streams: rof.diatren_ann_rx1" - "", "datamode: COPYALL" - "DIATREN_IAF_RX1", "Reads in intra-annual forcing river data used for CORE2 forcing runs." - "", "streams: rof.diatren_iaf_rx1" - "", "datamode: COPYALL" - "CPLHIST", "Reads in data from coupler history files generated by a previous run." - "", "streams: rof.cplhist" - "", "datamode: COPYALL" - "IAF_JRA", "Reads in intra-annual forcing river data used for JRA-55 forcing runs." - "", "streams: rof.iaf_jra" - "", "datamode: COPYALL" - -.. _drof-mode-independent-streams: - ------------------------------------------- -Streams independent of DROF_MODE value ------------------------------------------- - -There are no datamode independent streams for DROF. - -.. _drof-fields: - ----------------- -DROF Field names ----------------- - -DROF defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. -In general, the stream input file should translate the stream input variable names into the ``drof_fld`` names for use within the data rofosphere model. - -.. csv-table:: "DROF internal field names" - :header: "drof_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "roff", "Forr_rofl" - "ioff", "Forr_rofi" diff --git a/doc/source/data_models/data-seaice.rst b/doc/source/data_models/data-seaice.rst deleted file mode 100644 index 1c44c623b59..00000000000 --- a/doc/source/data_models/data-seaice.rst +++ /dev/null @@ -1,180 +0,0 @@ -.. _data-seaice: - -Data Ice (DICE) -================ - -DICE is a combination of a data model and a prognostic model. -The data functionality reads in ice coverage. -The prognostic functionality calculates the ice/atmosphere and ice/ocean fluxes. -DICE receives the same atmospheric input from the coupler as the active CICE model (i.e., atmospheric states, shortwave fluxes, and ocean ice melt flux) and acts very similarly to CICE running in prescribed mode. -Currently, this component is only used to drive POP in "C" compsets. - -.. _dice-xml-vars: - ---------------- -xml variables ---------------- -The following are xml variables that CIME supports for DICE. -These variables are defined in ``$CIMEROOT/src/components/data_comps/dice/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and are used by the DICE ``cime_config/buildnml`` script to generate the DICE namelist file ``dice_in`` and the required associated stream files for the case. - -.. note:: These xml variables are used by the the dice's **cime_config/buildnml** script in conjunction with dice's **cime_config/namelist_definition_dice.xml** file to generate the namelist file ``dice_in``. - -.. csv-table:: "DICE xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DICE_MODE", "Mode for sea-ice component" - "","Valid values are: null, prescribed, ssmi, ssmi_iaf, ww3 " - - -.. _dice-datamodes: - --------------------- -datamode values --------------------- - -The xml variable ``DICE_MODE`` (described in :ref:`dice_mode`) sets the streams that are associated with DICE and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DICE on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DICE ``datamode`` values, as defined in the file ``namelist_definition_dice.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "Turns off the data model as a provider of data to the coupler. The ice_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." - "SSTDATA","Is a prognostic mode. It requires data be sent to the ice model. Ice fraction (extent) data is read from an input stream, atmosphere state variables are received from the coupler, and then an atmosphere-ice surface flux is computed and sent to the coupler. It is called ``SSTDATA`` mode because normally the ice fraction data is found in the same data files that provide SST data to the data ocean model. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other. " - -.. _dice_mode: - -------------------------------- -DICE_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DICE_MODE`` (defined in the ``config_component.xml`` file for DICE), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DICE_MODE`` based on the compset. - -.. csv-table:: "Relationship between DICE_MODE, datamode and streams" - :header: "DICE_MODE, "description-streams-datamode" - :widths: 20, 80 - - "null", "null mode" - "", "streams: none" - "", "datamode: null" - "prescribed","prognostic mode - requires data to be sent to DICE" - "","streams: prescribed" - "","datamode: SSTDATA" - "ssmi", "Special Sensor Microwave Imager climatological data" - "","streams: SSMI" - "","datamode: SSTDATA" - "ssmi", "Special Sensor Microwave Imager inter-annual forcing data" - "","streams: SSMI_IAF" - "","datamode: SSTDATA" - "ww3", "ww3 mode" - "", "streams: ww3" - "", "datamode: COPYALL" - -NIf DICE_MODE is set to ``ssmi``, ``ssmi_iaf`` or ``prescribed``, it is a prognostic mode and requires data be sent to the ice model. -Ice fraction (extent) data is read from an input stream, atmosphere state variables are received from the coupler, and then an atmosphere-ice surface flux is computed and sent to the coupler. -Normally the ice fraction data is found in the same data files that provide SST data to the data ocean model. -They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other. - -.. _dice-namelists: - ---------- -Namelists ---------- - -The namelist file for DICE is ``dice_in`` (or ``dice_in_NNN`` for multiple instances). - -As is the case for all data models, DICE namelists can be separated into two groups, stream-independent and stream-dependent. - -The stream dependent group is :ref:`shr_strdata_nml`. - -.. _dice-stream-independent-namelists: - -The stream-independent group is ``dice_nml`` and the DICE stream-independent namelist variables are: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -flux_qacc activates water accumulation/melt wrt Q -flux_qacc0 initial water accumulation value -flux_qmin bound on melt rate -flux_swpf short-wave penetration factor -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in ``dice_in``, edit the file ``user_nl_dice``. - -.. _dice-mode-independent-streams: - --------------------------------------- -Streams independent of DICE_MODE value --------------------------------------- - -There are no datamode independent streams for DICE. - -.. _dice-fields: - ------------ -Field names ------------ - -DICE defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. - -.. note:: In general, the stream input file should translate the stream input variable names into the ``docn_fld`` names below for use within the data ocn model. - -.. csv-table:: "DICE internal field names" - :header: "dice_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "ifrac", "Si_ifrac" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/data_models/data-wave.rst b/doc/source/data_models/data-wave.rst deleted file mode 100644 index 53ea0f9ae46..00000000000 --- a/doc/source/data_models/data-wave.rst +++ /dev/null @@ -1,127 +0,0 @@ -.. _data-wave: - -================= -Data Wave (DWAV) -================= - -The data wave model (DWAV) provides data wave forcing primarily to be used by the prognostic ocean component. -Currently, this data is climatological. - -.. _dwav-xml-vars: - -------------- -xml variables -------------- - -The following are XML variables that CIME supports for DWAV. -These variables will appear in ``env_run.xml`` and are used by the DWAV ``cime_config/buildnml`` script to generate the DWAV namelist file ``dwav_in`` and the required associated stream files for the case. - -.. note:: These XML variables are used by the the DWAV **cime_config/buildnml** script in conjunction with the DWAV **cime_config/namelist_definition_dwav.xml** file to generate the namelist file ``dwav_in``. - -.. csv-table:: "DROF xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DWAV_MODE", "Data mode" - "", "Valid values are: NULL, CLIMO" - -.. _dwav-datamodes: - --------------------- -DWAV datamode values --------------------- - -One of the variables in ``shr_strdata_nml`` is ``datamode``, whose value is a character string. -Each data model has a unique set of ``datamode`` values that it supports. - -The valid values for ``datamode`` are set by the XML variable ``DWAV_MODE`` in the ``config_component.xml`` file for DWAV. -CIME will generate a value ``datamode`` that is compset dependent. - -The following are the supported DWAV datamode values and their relationship to the ``DWAV_MODE`` xml variable value. - -.. csv-table:: Relationship between ``DWAV_MODE`` xml variables and ``datamode`` namelist variables - :header: "DWAV_MODE (xml)", "datamode (namelist)" - :widths: 15, 90 - - "NULL", "NULL" - "", "This mode turns off the data model as a provider of data to the coupler. " - "", "The ``wav_present`` flag will be set to ``false`` and the coupler assumes no exchange of data to or from the data model." - "CLIMO", "COPYALL" - "", "Examines the fields found in all input data streams and if any input field names match the field names used internally, " - "", "they are copied into the export array and passed directly to the coupler without any special user code." - - -.. _dwav-namelists: - ---------- -Namelists ---------- - -The data wave model (DWAV) provides data wave input to prognostic components such as the ocean. - -The namelist file for DWAV is ``dwav_in``. - -As is the case for all data models, DWAV namelists can be separated into two groups, stream-independent and stream-dependent. -The stream dependent group is :ref:`shr_strdata_nml`. -The stream-independent group is ``dwav_nml`` and the DWAV stream-independent namelist variables are: - -.. _dwav-stream-independent-namelists: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in ``dwav_in``, edit the file ``user_nl_dwav`` in your case directory. - -.. _dwav_mode: - -------------------------------- -DWAV_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DWAV_MODE`` (defined in the ``config_component.xml`` file for DWAV), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DWAV_MODE`` based on the compset. - -.. csv-table:: "Relationship between DWAV_MODE, datamode and streams" - :header: "DWAV_MODE", "description-streams-datamode" - :widths: 15, 85 - - "NULL", "null mode" - "", "streams: none" - "", "datamode: NULL" - - -.. _dwav-mode-independent-streams: - --------------------------------------- -Streams independent of DWAV_MODE value --------------------------------------- - -There are no datamode independent streams for DWAV. - -.. _dwav-fields: - ----------------- -Field names ----------------- - -DWAV defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. -In general, the stream input file should translate the stream input variable names into the ``dwav_fld`` names below for use within the data wave model. - -.. csv-table:: "DWAV internal field names" - :header: "dwav_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "lamult", "Sw_lamult" - "ustokes","Sw_ustokes" - "vstokes", "Sw_vstokes" - - - - - diff --git a/doc/source/data_models/design-details.rst b/doc/source/data_models/design-details.rst deleted file mode 100644 index ad24102b214..00000000000 --- a/doc/source/data_models/design-details.rst +++ /dev/null @@ -1,225 +0,0 @@ -.. _design-details: - -================ - Design Details -================ - ----------------------- -Data Model Performance ----------------------- - -There are two primary costs associated with strdata: reading data and spatially mapping data. -Time interpolation is relatively cheap in the current implementation. -As much as possible, redundant operations are minimized. -Fill and mapping weights are generated at initialization and saved. -The upper and lower bound mapped input data is saved between time steps to reduce mapping costs in cases where data is time interpolated more often than new data is read. -If the input data timestep is relatively small (for example, hourly data as opposed to daily or monthly data) the cost of reading input data can be quite large. -Also, there can be significant variation in cost of the data model over the coarse of the run, for instance, when new inputdata must be read and interpolated, although it's relatively predictable. -The present implementation doesn't support changing the order of operations, for instance, time interpolating the data before spatial mapping. -Because the present computations are always linear, changing the order of operations will not fundamentally change the results. -The present order of operations generally minimizes the mapping cost for typical data model use cases. - ----------------------- -Data Model Limitations ----------------------- - -There are several limitations in both options and usage within the data models at the present time. -Spatial interpolation can only be performed from a two-dimensional latitude-longitude input grid. -The target grid can be arbitrary but the source grid must be able to be described by simple one-dimensional lists of longitudes and latitudes, although they don't have to be equally spaced. - ----------------------- -IO Through Data Models ----------------------- - -At the present time, data models can only read netcdf data, and IO is handled through either standard netcdf interfaces or through the PIO library using either netcdf or pnetcdf. -If standard netcdf is used, global fields are read and then scattered one field at a time. -If PIO is used, then data will be read either serially or in parallel in chunks that are approximately the global field size divided by the number of IO tasks. -If pnetcdf is used through PIO, then the pnetcdf library must be included during the build of the model. -The pnetcdf path and option is hardwired into the ``Macros.make`` file for the specific machine. -To turn on ``pnetcdf`` in the build, make sure the ``Macros.make`` variables ``PNETCDF_PATH``, ``INC_PNETCDF``, and ``LIB_PNETCDF`` are set and that the PIO ``CONFIG_ARGS`` sets the ``PNETCDF_PATH`` argument. - -Beyond just the option of selecting IO with PIO, several namelist variables are available to help optimize PIO IO performance. -Those are **TODO** - list these. -The total mpi tasks that can be used for IO is limited to the total number of tasks used by the data model. -Often though, using fewer IO tasks results in improved performance. -In general, [io_root + (num_iotasks-1)*io_stride + 1] has to be less than the total number of data model tasks. -In practice, PIO seems to perform optimally somewhere between the extremes of 1 task and all tasks, and is highly machine and problem dependent. - -------------- -Restart Files -------------- -Restart files are generated automatically by the data models based on a flag sent from the driver. -The restart files must meet the CIME naming convention and an ``rpointer`` file is generated at the same time. -An ``rpointer`` file is a *restart pointer* file which contains the name of the most recently created restart file. -Normally, if restart files are read, the restart filenames are specified in the ``rpointer`` file. -Optionally though, there are namelist variables such as ``restfilm`` to specify the restart filenames via namelist. If those namelist variables are set, the ``rpointer`` file will be ignored. - -In most cases, no restart file is required for the data models to restart exactly. -This is because there is no memory between timesteps in many of the data model science modes. -If a restart file is required, it will be written automatically and then must be used to continue the previous run. - -There are separate stream restart files that only exist for performance reasons. -A stream restart file contains information about the time axis of the input streams. -This information helps reduce the startup costs associated with reading the input dataset time axis information. -If a stream restart file is missing, the code will restart without it but may need to reread data from the input data files that would have been stored in the stream restart file. -This will take extra time but will not impact the results. - -.. _data-structures: - ---------------- -Data Structures ---------------- - -The data models all use three fundamental routines. - -- $CIMEROOT/src/utils/shr_dmodel_mod.F90 - -- $CIMEROOT/src/utils/shr_stream_mod.F90 - -- $CIMEROOT/src/utils/shr_strdata.F90 - -These routines contain three data structures that are leveraged by all the data model code. - -The most basic type, ``shr_stream_fileType`` is contained in ``shr_stream_mod.F90`` and specifies basic information related to a given stream file. - -.. code-block:: Fortran - - type shr_stream_fileType - character(SHR_KIND_CL) :: name = shr_stream_file_null ! the file name - logical :: haveData = .false. ! has t-coord data been read in? - integer (SHR_KIND_IN) :: nt = 0 ! size of time dimension - integer (SHR_KIND_IN),allocatable :: date(:) ! t-coord date: yyyymmdd - integer (SHR_KIND_IN),allocatable :: secs(:) ! t-coord secs: elapsed on date - end type shr_stream_fileType - -The following type, ``shr_stream_streamType`` contains information -that encapsulates the information related to all files specific to a -target stream. These are the list of files found in the ``domainInfo`` -and ``fieldInfo`` blocks of the target stream description file (see the overview of the :ref:`stream_description_file`). - -.. code-block:: Fortran - - type shr_stream_streamType - !private ! no public access to internal components - !--- input data file names and data --- - logical :: init ! has stream been initialized? - integer (SHR_KIND_IN),pointer :: initarr(:) => null()! surrogate for init flag - integer (SHR_KIND_IN) :: nFiles ! number of data files - character(SHR_KIND_CS) :: dataSource ! meta data identifying data source - character(SHR_KIND_CL) :: filePath ! remote location of data files - type(shr_stream_fileType), allocatable :: file(:) ! data specific to each file - - !--- specifies how model dates align with data dates --- - integer(SHR_KIND_IN) :: yearFirst ! first year to use in t-axis (yyyymmdd) - integer(SHR_KIND_IN) :: yearLast ! last year to use in t-axis (yyyymmdd) - integer(SHR_KIND_IN) :: yearAlign ! align yearFirst with this model year - integer(SHR_KIND_IN) :: offset ! offset in seconds of stream data - character(SHR_KIND_CS) :: taxMode ! cycling option for time axis - - !--- useful for quicker searching --- - integer(SHR_KIND_IN) :: k_lvd,n_lvd ! file/sample of least valid date - logical :: found_lvd ! T <=> k_lvd,n_lvd have been set - integer(SHR_KIND_IN) :: k_gvd,n_gvd ! file/sample of greatest valid date - logical :: found_gvd ! T <=> k_gvd,n_gvd have been set - - !---- for keeping files open - logical :: fileopen ! is current file open - character(SHR_KIND_CL) :: currfile ! current filename - type(file_desc_t) :: currpioid ! current pio file desc - - !--- stream data not used by stream module itself --- - character(SHR_KIND_CXX):: fldListFile ! field list: file's field names - character(SHR_KIND_CXX):: fldListModel ! field list: model's field names - character(SHR_KIND_CL) :: domFilePath ! domain file: file path of domain file - character(SHR_KIND_CL) :: domFileName ! domain file: name - character(SHR_KIND_CS) :: domTvarName ! domain file: time-dim var name - character(SHR_KIND_CS) :: domXvarName ! domain file: x-dim var name - character(SHR_KIND_CS) :: domYvarName ! domain file: y-dim var name - character(SHR_KIND_CS) :: domZvarName ! domain file: z-dim var name - character(SHR_KIND_CS) :: domAreaName ! domain file: area var name - character(SHR_KIND_CS) :: domMaskName ! domain file: mask var name - - character(SHR_KIND_CS) :: tInterpAlgo ! Algorithm to use for time interpolation - character(SHR_KIND_CL) :: calendar ! stream calendar - end type shr_stream_streamType - -Finally, the ``shr_strdata_type`` is the heart of the CIME data -model implemenentation and contains information for all the streams -that are active for the target data model. The first part of the -``shr_strdata_type`` is filled in by the namelist values read in from the -namelist group (see the :ref:`stream data namelist section `). - -.. code-block:: Fortran - - type shr_strdata_type - ! --- set by input namelist --- - character(CL) :: dataMode ! flags physics options wrt input data - character(CL) :: domainFile ! file containing domain info - character(CL) :: streams (nStrMax) ! stream description file names - character(CL) :: taxMode (nStrMax) ! time axis cycling mode - real(R8) :: dtlimit (nStrMax) ! dt max/min limit - character(CL) :: vectors (nVecMax) ! define vectors to vector map - character(CL) :: fillalgo(nStrMax) ! fill algorithm - character(CL) :: fillmask(nStrMax) ! fill mask - character(CL) :: fillread(nStrMax) ! fill mapping file to read - character(CL) :: fillwrit(nStrMax) ! fill mapping file to write - character(CL) :: mapalgo (nStrMax) ! scalar map algorithm - character(CL) :: mapmask (nStrMax) ! scalar map mask - character(CL) :: mapread (nStrMax) ! regrid mapping file to read - character(CL) :: mapwrit (nStrMax) ! regrid mapping file to write - character(CL) :: tintalgo(nStrMax) ! time interpolation algorithm - integer(IN) :: io_type ! io type, currently pnetcdf or netcdf - - !--- data required by cosz t-interp method, --- - real(R8) :: eccen ! orbital eccentricity - real(R8) :: mvelpp ! moving vernal equinox long - real(R8) :: lambm0 ! mean long of perihelion at vernal equinox (radians) - real(R8) :: obliqr ! obliquity in degrees - integer(IN) :: modeldt ! data model dt in seconds (set to the coupling frequency) - - ! --- data model grid, public --- - integer(IN) :: nxg ! data model grid lon size - integer(IN) :: nyg ! data model grid lat size - integer(IN) :: nzg ! data model grid vertical size - integer(IN) :: lsize ! data model grid local size - type(mct_gsmap) :: gsmap ! data model grid global seg map - type(mct_ggrid) :: grid ! data model grid ggrid - type(mct_avect) :: avs(nStrMax) ! data model stream attribute vectors - - ! --- stream specific arrays, stream grid --- - type(shr_stream_streamType) :: stream(nStrMax) - type(iosystem_desc_t), pointer :: pio_subsystem => null() - type(io_desc_t) :: pio_iodesc(nStrMax) - integer(IN) :: nstreams ! actual number of streams - integer(IN) :: strnxg(nStrMax) ! stream grid lon sizes - integer(IN) :: strnyg(nStrMax) ! stream grid lat sizes - integer(IN) :: strnzg(nStrMax) ! tream grid global sizes - logical :: dofill(nStrMax) ! true if stream grid is different from data model grid - logical :: domaps(nStrMax) ! true if stream grid is different from data model grid - integer(IN) :: lsizeR(nStrMax) ! stream local size of gsmapR on processor - type(mct_gsmap) :: gsmapR(nStrMax) ! stream global seg map - type(mct_rearr) :: rearrR(nStrMax) ! rearranger - type(mct_ggrid) :: gridR(nStrMax) ! local stream grid on processor - type(mct_avect) :: avRLB(nStrMax) ! Read attrvect - type(mct_avect) :: avRUB(nStrMax) ! Read attrvect - type(mct_avect) :: avFUB(nStrMax) ! Final attrvect - type(mct_avect) :: avFLB(nStrMax) ! Final attrvect - type(mct_avect) :: avCoszen(nStrMax) ! data assocaited with coszen time interp - type(mct_sMatP) :: sMatPf(nStrMax) ! sparse matrix map for fill on stream grid - type(mct_sMatP) :: sMatPs(nStrMax) ! sparse matrix map for mapping from stream to data model grid - integer(IN) :: ymdLB(nStrMax) ! lower bound time for stream - integer(IN) :: todLB(nStrMax) ! lower bound time for stream - integer(IN) :: ymdUB(nStrMax) ! upper bound time for stream - integer(IN) :: todUB(nStrMax) ! upper bound time for stream - real(R8) :: dtmin(nStrMax) - real(R8) :: dtmax(nStrMax) - - ! --- internal --- - integer(IN) :: ymd ,tod - character(CL) :: calendar ! model calendar for ymd,tod - integer(IN) :: nvectors ! number of vectors - integer(IN) :: ustrm (nVecMax) - integer(IN) :: vstrm (nVecMax) - character(CL) :: allocstring - end type shr_strdata_type - diff --git a/doc/source/data_models/index.rst b/doc/source/data_models/index.rst deleted file mode 100644 index fb7660a4bee..00000000000 --- a/doc/source/data_models/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _data-models: - -.. on documentation master file, created by - sphinx-quickstart on Tue Jan 31 19:46:36 2017. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -#################### - Data Models -#################### - -.. toctree:: - :maxdepth: 2 - :numbered: - - introduction.rst - input-namelists.rst - input-streams.rst - design-details.rst - data-model-science.rst - data-atm.rst - data-lnd.rst - data-seaice.rst - data-ocean.rst - data-river.rst - data-wave.rst - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/data_models/input-namelists.rst b/doc/source/data_models/input-namelists.rst deleted file mode 100644 index 533f1c1b2f9..00000000000 --- a/doc/source/data_models/input-namelists.rst +++ /dev/null @@ -1,87 +0,0 @@ -.. _input-namelists: - -Input Namelists -=============== - -Each data model has two namelist groups in its input namelist file: a **stream-dependent** and a **stream-independent** namelist group. - -The stream-dependent namelist group (``shr_strdata_nml``) specifies the data model mode, stream description text files, and interpolation options. -The stream description files will be provided as separate input files and contain the files and fields that need to be read. -The stream-independent namelist group (one of ``[datm_nml, dice_nml, dlnd_nml, docn_nml, drof_nml, dwav_nml]``) contains namelist input such as the data model decomposition, etc. - -For users wanting to introduce new data sources for any data model, it is important to know what modes are supported and the internal field names in the data model. -That information will be used in the ``shr_strdata_nml`` namelist and stream input files. - -Users will primarily set up different data model configurations through namelist settings. -**The stream input options and format are identical for all data models**. -The data model-specific namelist has significant overlap between data models, but each data model has a slightly different set of input namelist variables and each model reads that namelist from a unique filename. -The detailed namelist options for each data model will be described later, but each model will specify a filename or filenames for stream namelist input and each ``shr_strdata_nml`` namelist will specify a set of stream input files. - -The following example illustrates the basic set of namelist inputs:: - - &dlnd_nml - decomp = '1d' - / - &shr_strdata_nml - dataMode = 'CPLHIST' - domainFile = 'grid.nc' - streams = 'streama', 'streamb', 'streamc' - mapalgo = 'interpa', 'interpb', 'interpc' - / - -As mentioned above, the ``dataMode`` namelist variable that is associated with each data model specifies if there are any additional operations that need to be performed on that data model's input streams before return to the driver. -At a minimum, all data models support ``datamode`` values of ``NULL`` and ``COPYALL``. - -- ``NULL`` - turns off the data model as a provider of data to the coupler. - -- ``COPYALL`` - copies all fields directly from the input data streams. Any required fields not found on an input stream will be set to zero. - -Three stream description files (see :ref:`input streams`) are then expected to be available, ``streama``, ``streamb`` and ``streamc``. -Those files specify the input data filenames, input data grids, and input fields that are expected, among other things. -The stream files are **not** Fortran namelist format. -Their format and options will be described later. -As an example, one of the stream description files might look like -:: - - - - GENERIC - - - - dn10 dens - slp_ pslv - q_10 shum - t_10 tbot - u_10 u - v_10 v - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - 0 - - - nyf.ncep.T62.050923.nc - - - - - time time - lon lon - lat lat - area area - mask mask - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - nyf.ncep.T62.050923.nc - - - - - -In general, these examples of input files are not complete, but they do show the general hierarchy and feel of the data model input. diff --git a/doc/source/data_models/input-streams.rst b/doc/source/data_models/input-streams.rst deleted file mode 100644 index 3c4c0080924..00000000000 --- a/doc/source/data_models/input-streams.rst +++ /dev/null @@ -1,469 +0,0 @@ -.. _input-streams: - -Input Streams -============= - --------- -Overview --------- -An *input data stream* is a time-series of input data files where all the fields in the stream are located in the same data file and all share the same spatial and temporal coordinates (ie. are all on the same grid and share the same time axis). Normally a time axis has a uniform dt, but this is not a requirement. - -The data models can have multiple input streams. - -The data for one stream may be all in one file or may be spread over several files. For example, 50 years of monthly average data might be contained all in one data file or it might be spread over 50 files, each containing one year of data. - -The data models can *loop* over stream data -- i.e., repeatedly cycle over some subset of an input stream's time axis. When looping, the models can only loop over whole years. For example, an input stream might have SST data for years 1950 through 2000, but a model could loop over the data for years 1960 through 1980. A model *cannot* loop over partial years, for example, from 1950-Feb-10 through 1980-Mar-15. - -The input data must be in a netcdf file and the time axis in that file must be CF-1.0 compliant. - -There are two main categories of information that the data models need to know about a stream: - -- data that describes what a user wants -- what streams to use and how to use them -- things that can be changed by a user. -- data that describes the stream data -- meta-data about the inherent properties of the data itself -- things that cannot be changed by a user. - -Generally, information about what streams a user wants to use and how to use them is input via the strdata ("stream data") Fortran namelist, while meta-data that describes the stream data itself is found in an xml-like text file called a "stream description file." - --------------------------------------------------- -Stream Data and shr_strdata_nml namelists --------------------------------------------------- -The stream data (referred to as *strdata*) input is set via a Fortran namelist called ``shr_strdata_nml``. -That namelist, the associated strdata datatype, and the methods are contained in the share source code file, ``shr_strdata_mod.F90``. -In general, strdata input defines an array of input streams and operations to perform on those streams. -Therefore, many namelist inputs are arrays of character strings. -Different variables of the same index are associated. For instance, mapalgo(1) spatial interpolation will be performed between streams(1) and the target domain. - -Each data model has an associated input namelist file, ``xxx_in``, where ``xxx=[datm,dlnd,dice,docn,drof,dwav]``. - -The input namelist file for each data model has a stream dependent namelist group, ``shr_strdata_nml``, and a stream independent namelist group. -The ``shr_strdata_nml`` namelist variables **are the same for all data models**. - -=========== ========================================================================================================================== -File Namelist Groups -=========== ========================================================================================================================== -datm_in datm_nml, shr_strdata_nml -dice_in dice_nml, shr_strdata_nml -dlnd_in dlnd_nml, shr_strdata_nml -docn_in docn_nml, shr_strdata_nml -drof_in drof_nml, shr_strdata_nml -dwav_in dwav_nml, shr_strdata_nml -=========== ========================================================================================================================== - -.. _shr-strdata-nml: - -The following table summaries the ``shr_strdata_nml`` entries. - -=========== ========================================================================================================================== -Namelist Description -=========== ========================================================================================================================== -dataMode component specific mode. - - Each CIME data model has its own datamode values as described below: - - :ref:`datm dataMode` - - :ref:`dice dataMode` - - :ref:`dlnd dataMode` - - :ref:`docn dataMode` - - :ref:`drof dataMode` - - :ref:`dwav dataMode` - -domainFile component domain (all streams will be mapped to this domain). - - Spatial gridfile associated with the strdata. grid information will - be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - If the value is **null** then the domain of the first stream - will be used as the component domain - - default="null" - -streams character array (up to 30 elements) of input stream filenames and associated years of data. - - Each array entry consists of a stream_input_filename year_align year_first year_last. - The stream_input_filename is a stream text input file and the format and options are described elsewhere. - The year_align, year_first, and year_last provide information about the time axis of the file and how to relate - the input time axis to the model time axis. - - default="null". - -fillalgo array (up to 30 elements) of fill algorithms associated with the array of streams. - - Valid options are just copy (ie. no fill), special value, nearest neighbor, nearest neighbor in "i" direction, - or nearest neighbor in "j" direction. - - valid values: 'copy','spval','nn','nnoni','nnonj' - - default value='nn' - -fillmask array (up to 30 elements) of fill masks. - - valid values: "nomask,srcmask,dstmask,bothmask" - - default="nomask" - -fillread array (up to 30 elements) fill mapping files to read. Secifies the weights file to read in instead of - computing the weights on the fly for the fill operation. If this is set, fillalgo and fillmask are ignored. - - default='NOT_SET' - -fillwrite array of fill mapping file to write - - default='NOT_SET' - -mapalgo array of spatial interpolation algorithms - - default="bilinear" - -mapmask array of spatial interpolation mask - - default='NOT_SET' - -mapread array of spatial interpolation mapping files to read (optional) - - default='NOT_SET' - -mapwrite array (up to 30 elements) of spatial interpolation mapping files to write (optional). Specifies the weights file - to generate after weights are computed on the fly for the mapping (interpolation) operation, thereby allowing - users to save and reuse a set of weights later. - default='NOT_SET' - -tintalgo array (up to 30 elements) of time interpolation algorithm options associated with the array of streams. - - valid values: lower,upper,nearest,linear,coszen - lower = Use lower time-value - - upper = Use upper time-value - - nearest = Use the nearest time-value - - linear = Linearly interpolate between the two time-values - - coszen = Scale according to the cosine of the solar zenith angle (for solar) - - default="linear" - -taxMode array (up to 30 elements) of time interpolation modes. - - Time axis interpolation modes are associated with the array of streams for - handling data outside the specified stream time axis. - - Valid options are to cycle the data based on the first, last, and align - settings associated with the stream dataset, to extend the first and last - valid value indefinitely, or to limit the interpolated data to fall only between - the least and greatest valid value of the time array. - - valid values: cycle,extend,limit - - extend = extrapolate before and after the period by using the first or last value. - - cycle = cycle between the range of data - - limit = restrict to the period for which the data is valid - - default="cycle" - -dtlimit array (up to 30 elements) of setting delta time axis limit. - - Specifies delta time ratio limits placed on the time interpolation - associated with the array of streams. Causes the model to stop if - the ratio of the running maximum delta time divided by the minimum delta time - is greater than the dtlimit for that stream. For instance, with daily data, - the delta time should be exactly one day throughout the dataset and - the computed maximum divided by minimum delta time should always be 1.0. - For monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. The running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - - default=1.5 - -vectors paired vector field names -=========== ========================================================================================================================== - - -``shr_strdata_nml`` contains a namelist variable, ``streams``, that specifies a list of input stream description files and for each file what years of data to use, and how to align the input stream time axis with the model run time axis. - -The general input format for the ``streams`` namelist variable is: -:: - - &shr_strdata_nml - streams = 'stream1.txt year_align year_first year_last ', - 'stream2.txt year_align year_first year_last ', - ... - 'streamN.txt year_align year_first year_last ' - / - -where: - -.. code-block:: none - - streamN.txt - the stream description file, a plain text file containing details about the input stream (see below) - year_first - the first year of data that will be used - year_last - the last year of data that will be used - year_align - a model year that will be aligned with data for year_first - ---------------------- -Details on year_align ---------------------- - -The ``year_align`` value gives the simulation year corresponding to -``year_first``. A common usage is to set this to the year of -``RUN_STARTDATE``. With this setting, the forcing in the first year of -the run will be the forcing of year ``year_first``. Another use case is -to align the calendar of transient forcing with the model calendar. For -example, setting ``year_align`` = ``year_first`` will lead to the -forcing calendar being the same as the model calendar. The forcing for a -given model year would be the forcing of the same year. This would be -appropriate in transient runs where the model calendar is setup to span -the same year range as the forcing data. - -For some data model modes, ``year_align`` can be set via an xml variable -whose name ends with ``YR_ALIGN`` (there are a few such xml variables, -each pertaining to a particular data model mode). - -An example of this is land-only historical simulations in which we run -the model for 1850 to 2010 using atmospheric forcing data that is only -available for 1901 to 2010. In this case, we want to run the model for -years 1850 (so ``RUN_STARTDATE`` has year 1850) through 1900 by looping -over the forcing data for 1901-1920, and then run the model for years -1901-2010 using the forcing data from 1901-2010. To do this, we -initially set:: - - ./xmlchange DATM_CLMNCEP_YR_ALIGN=1901 - ./xmlchange DATM_CLMNCEP_YR_START=1901 - ./xmlchange DATM_CLMNCEP_YR_END=1920 - -When the model has completed year 1900, then we set:: - - ./xmlchange DATM_CLMNCEP_YR_ALIGN=1901 - ./xmlchange DATM_CLMNCEP_YR_START=1901 - ./xmlchange DATM_CLMNCEP_YR_END=2010 - -With this setup, the correlation between model run year and forcing year -looks like this:: - - RUN Year : 1850 ... 1860 1861 ... 1870 ... 1880 1881 ... 1890 ... 1900 1901 ... 2010 - FORCE Year : 1910 ... 1920 1901 ... 1910 ... 1920 1901 ... 1910 ... 1920 1901 ... 2010 - -Setting ``DATM_CLMNCEP_YR_ALIGN`` to 1901 tells the code that you want -to align model year 1901 with forcing data year 1901, and then it -calculates what the forcing year should be if the model starts in -year 1850. - --------------------------------------------------- -Customizing shr_strdata_nml values --------------------------------------------------- - -The contents of ``shr_strdata_nml`` are automatically generated by that data model's **cime_config/buildnml** script. -These contents are easily customizable for your target experiment. -As an example we refer to the following ``datm_in`` contents (that would appear in both ``$CASEROOT/CaseDocs`` and ``$RUNDIR``): -:: - - \&shr_strdata_nml - datamode = 'CLMNCEP' - domainfile = '/glade/proj3/cseg/inputdata/share/domains/domain.lnd.fv1.9x2.5_gx1v6.090206.nc' - dtlimit = 1.5,1.5,1.5,1.5 - fillalgo = 'nn','nn','nn','nn' - fillmask = 'nomask','nomask','nomask','nomask' - mapalgo = 'bilinear','bilinear','bilinear','bilinear' - mapmask = 'nomask','nomask','nomask','nomask' - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1972 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1972 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1972 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - taxmode = 'cycle','cycle','cycle','cycle' - tintalgo = 'coszen','nearest','linear','linear' - vectors = 'null' - / - - -As is discussed in the :ref:`CIME User's Guide`, to change the contents of ``datm_in``, you must edit ``$CASEROOT/user_nl_datm``. -In the above example, you can to this to change any of the above settings **except for the names** - -.. code-block:: none - - datm.streams.txt.CLM_QIAN.Solar - datm.streams.txt.CLM_QIAN.Precip - datm.streams.txt.CLM_QIAN.TPQW - datm.streams.txt.presaero.trans_1850-2000 - -Other than these names, any namelist variable from ``shr_strdata_nml`` can be modified by adding the appropriate keyword/value pairs to ``user_nl_datm``. - -As an example, the following could be the contents of ``$CASEROOT/user_nl_datm``: -:: - - !------------------------------------------------------------------------ - ! Users should ONLY USE user_nl_datm to change namelists variables - ! Users should add all user specific namelist changes below in the form of - ! namelist_var = new_namelist_value - ! Note that any namelist variable from shr_strdata_nml and datm_nml can - ! be modified below using the above syntax - ! User preview_namelists to view (not modify) the output namelist in the - ! directory $CASEROOT/CaseDocs - ! To modify the contents of a stream txt file, first use preview_namelists - ! to obtain the contents of the stream txt files in CaseDocs, and then - ! place a copy of the modified stream txt file in $CASEROOT with the string - ! user_ prepended. - !------------------------------------------------------------------------ - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - -and the contents of ``shr_strdata_nml`` (in both ``$CASEROOT/CaseDocs`` and ``$RUNDIR``) would be -:: - - datamode = 'CLMNCEP' - domainfile = '/glade/proj3/cseg/inputdata/share/domains/domain.lnd.fv1.9x2.5_gx1v6.090206.nc' - dtlimit = 1.5,1.5,1.5,1.5 - fillalgo = 'nn','nn','nn','nn' - fillmask = 'nomask','nomask','nomask','nomask' - mapalgo = 'bilinear','bilinear','bilinear','bilinear' - mapmask = 'nomask','nomask','nomask','nomask' - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - taxmode = 'cycle','cycle','cycle','cycle' - tintalgo = 'coszen','nearest','linear','linear' - vectors = 'null' - -As is discussed in the :ref:`CIME User's Guide`, you should use **preview_namelists** to view (not modify) the output namelist in ``CaseDocs``. - - -.. _stream_description_file: - ------------------------ -Stream Description File ------------------------ -The *stream description file* is not a Fortran namelist, but a locally built xml-like parsing implementation. -Sometimes it is called a "stream dot-text file" because it has a ".txt." in the filename. -Stream description files contain data that specifies the names of the fields in the stream, the names of the input data files, and the file system directory where the data files are located. - -The data elements found in the stream description file are: - -``dataSource`` - A comment about the source of the data -- always set to GENERIC and is there only for backwards compatibility. - -``domainInfo`` - Information about the domain data for this stream specified by the following 3 sub elements. - - ``variableNames`` - A list of the domain variable names. This is a paired list with the name of the variable in the netCDF file on the left and the name of the corresponding model variable on the right. This data models require five variables in this list. The names of model's variables (names on the right) must be: "time," "lon," "lat," "area," and "mask." - - ``filePath`` - The file system directory where the domain data file is located. - - ``fileNames`` - The name of the domain data file. Often the domain data is located in the same file as the field data (above), in which case the name of the domain file could simply be the name of the first field data file. Sometimes the field data files don't contain the domain data required by the data models, in this case, one new file can be created that contains the required data. - - -``fieldInfo`` - Information about the stream data for this stream specified by the following 3 required sub elements and optional offset element. - - ``variableNames`` - A list of the field variable names. This is a paired list with the name of the variable in the netCDF file on the left and the name of the corresponding model variable on the right. This is the list of fields to read in from the data file; there may be other fields in the file which are not read in (i.e., they won't be used). - - ``filePath`` - The file system directory where the data files are located. - - ``fileNames`` - The list of data files to use. If there is more than one file, the files must be in chronological order, that is, the dates in time axis of the first file are before the dates in the time axis of the second file. - - ``offset`` - The offset allows a user to shift the time axis of a data stream by a fixed and constant number of seconds. For instance, if a data set contains daily average data with timestamps for the data at the end of the day, it might be appropriate to shift the time axis by 12 hours so the data is taken to be at the middle of the day instead of the end of the day. This feature supports only simple shifts in seconds as a way of correcting input data time axes without having to modify the input data time axis manually. This feature does not support more complex shifts such as end of month to mid-month. But in conjunction with the time interpolation methods in the strdata input, hopefully most user needs can be accommodated with the two settings. Note that a positive offset advances the input data time axis forward by that number of seconds. - -The data models advance in time discretely. -At a given time, they read/derive fields from input files. -Those input files have data on a discrete time axis as well. -Each data point in the input files is associated with a discrete time (as opposed to a time interval). -Depending on whether you pick lower, upper, nearest, linear, or coszen, the data in the input file will be "interpolated" to the time in the model. - -The offset shifts the time axis of the input data the given number of seconds. -So if the input data is at 0, 3600, 7200, 10800 seconds (hourly) and you set an offset of 1800, then the input data will be set at times 1800, 5400, 9000, and 12600. -So a model at time 3600 using linear interpolation would have data at "n=2" with offset of 0 will have data at "n=(2+3)/2" with an offset of 1800. -n=2 is the 2nd data in the time list 0, 3600, 7200, 10800 in this example. -n=(2+3)/2 is the average of the 2nd and 3rd data in the time list 0, 3600, 7200, 10800. -offset can be positive or negative. - -Actual example: -:: - - - - - time time - lon lon - lat lat - area area - mask mask - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - nyf.ncep.T62.050923.nc - - - - - dn10 dens - slp_ pslv - q10 shnum - t_10 tbot - u_10 u - v_10 v - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - 0 - - - nyf.ncep.T62.050923.nc - - - - --------------------------------------------------- -Customizing stream description files --------------------------------------------------- - -Each data model's **cime-config/buildnml** utility automatically generates the required stream description files for the case. -The directory contents of each data model will look like the following (using DATM as an example) -:: - - $CIMEROOT/components/data_comps/datm/cime_config/buildnml - $CIMEROOT/components/data_comps/datm/cime_config/namelist_definition_datm.xml - -The ``namelist_definition_datm.xml`` file defines and sets default values for all the namelist variables and associated groups and also provides out-of-the box settings for the target data model and target stream. -**buildnml** utilizes these two files to construct the stream files for the given compset settings. You can modify the generated stream files for your particular needs by doing the following: - - -1. Copy the relevant description file from ``$CASEROOT/CaseDocs`` to ``$CASEROOT`` and pre-pend a "\user_"\ string to the filename. Change the permission of the file to write. For example, assuming you are in **$CASEROOT** - :: - - cp $CASEROOT/CaseDocs/datm.streams.txt.CLM_QIAN.Solar $CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar - chmod u+w $CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar - -2. Edit ``$CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar`` with your desired changes. - - - *Be sure not to put any tab characters in the file: use spaces instead*. - - - In contrast to other user_nl_xxx files, be sure to set all relevant data model settings in the xml files, issue the **preview_namelist** command and THEN edit the ``user_datm.streams.txt.CLM_QIAN.Solar`` file. - - - **Once you have created a user_xxx.streams.txt.* file, further modifications to the relevant data model settings in the xml files will be ignored.** - - - If you later realize that you need to change some settings in an xml file, you should remove the user_xxx.streams.txt.* file(s), make the modifications in the xml file, rerun **preview_namelists**, and then reintroduce your modifications into a new user_xxx.streams.txt.* stream file(s). - -3. Call **preview_namelists** and verify that your changes do indeed appear in the resultant stream description file appear in ``CaseDocs/datm.streams.txt.CLM_QIAN.Solar``. These changes will also appear in ``$RUNDIR/datm.streams.txt.CLM_QIAN.Solar``. diff --git a/doc/source/data_models/introduction.rst b/doc/source/data_models/introduction.rst deleted file mode 100644 index c2b668ff380..00000000000 --- a/doc/source/data_models/introduction.rst +++ /dev/null @@ -1,98 +0,0 @@ -.. _data-model-introduction: - -Introduction -============ - --------- -Overview --------- -The CIME data models perform the basic function of reading external data files, modifying those data, and then sending the data to the driver via the CIME coupling interfaces. -The fields sent to the driver are the same as those that would be sent by an active component. -This takes advantage of the fact that the driver and other models have no fundamental knowledge of whether another component is fully active or just a data model. -So, for example, the data atmosphere model (datm) sends the same fields as the prognostic Community Atmosphere Model (CAM). -However, rather than determining these fields prognostically, most data models simply read prescribed data. - -The data models typically read gridded data from observations or reanalysis products. -Out of the box, they often provide a few possible data sources and/or time periods that you can choose from when setting up a case. -However, data models can also be configured to read output from a previous coupled run. -For example, you can perform a fully-coupled run in which you ask for particular extra output streams; you can then use these saved "coupler history" files as inputs to datm to run a later land-only spinup. - -In some cases, data models have prognostic functionality, that is, they also receive and use data sent by the driver. -However, in most cases, the data models are not running prognostically and have no need to receive any data from the driver. - -The CIME data models have parallel capability and share significant amounts of source code. -Methods for reading and interpolating data have been established and can easily be reused: -The data model calls strdata ("stream data") methods which then call stream methods. -The stream methods are responsible for managing lists of input data files and their time axes. -The information is then passed up to the strdata methods where the data is read and interpolated in space and time. -The interpolated data is passed up to the data model where final fields are derived, packed, and returned to the driver. - ------- -Design ------- -Data models function by reading in different streams of input data and interpolating those data both spatially and temporally to the appropriate final model grid and model time. -The strdata implementation does the following: - -1. determines nearest lower and upper bound data from the input dataset -2. if that is new data then read lower and upper bound data -3. fill lower and upper bound data -4. spatially map lower and upper bound data to model grid -5. time interpolate lower and upper bound data to model time -6. return fields to data model - -The two timestamps of input data that bracket the present model time are read first. -These are called the lower and upper bounds of data and will change as the model advances. -Those two sets of inputdata are first filled based on the user setting of the namelist variables ``str_fillalgo`` and ``str_fillmask``. -That operation occurs on the input data grid. -The lower and upper bound data are then spatially mapped to the model grid based upon the user setting of the namelist variables ``str_mapalgo`` and ``str_mapmask``. -Spatial interpolation only occurs if the input data grid and model grid are not identical, and this is determined in the strdata module automatically. -Time interpolation is the final step and is done using a time interpolation method specified by the user in namelist (via the ``shr_strdata_nml`` namelist variable ``tintalgo``). -A final set of fields is then available to the data model on the model grid and for the current model time. -(See the :ref:`stream data namelist section ` for details on these and other namelist variables.) - -**Each data model** - -- communicates with the driver with fields on only the data model grid - -- can be associated with multiple :ref:`streams` - -- has an xml variable in ``env_run.xml`` that specifies its mode. - These are: ``DATM_MODE``, ``DICE_MODE``, ``DLND_MODE``, ``DOCN_MODE``, ``DROF_MODE``, ``DWAV_MODE``. - Each data model mode specifies the streams that are associated with that data model. - -- has two :ref:`namelist` groups in its input namelist file: a **stream-dependent** and a **stream-independent** namelist group. - -- is associated with only one stream-independent namelist variable ``datamode`` (specified in the ``shr_strdata_nml`` namelist group) that determines if additional operations need to be performed on the input streams before returning to the driver. - - -**Each** ``DXXX_MODE`` **xml variable variable specfies 2 things:** - -- the list of streams that are associated with the data model. - -- a ``datamode`` namelist variable that is associated with each data model and that determines if additional operations need to be performed on the input streams before returning to the driver. - - At a minimum, all data models support ``datamode`` values of ``NULL`` and ``COPYALL``. - - - ``NULL`` - turns off the data model as a provider of data to the coupler. - - - ``COPYALL`` - copies all fields directly from the input data streams. Any required fields not found on an input stream will be set to zero. - - -**Each data model stream** - -- can be associated with multiple stream input files (specified in the ``shr_strdata_nml`` namelist group). - - -**Each stream input file** - -- can contain data on a unique grid and unique temporal time stamps. - -- is interpolated to a single model grid and the present model time. - -More details of the data model design are covered in :ref:`design details`. - -------------- -Next Sections -------------- -In the next sections, more details will be presented, including a full description of the science modes and namelist settings for the data atmosphere, data land, data runoff, data ocean, and data ice models; namelist settings for the strdata namelist input; a description of the format and options for the stream description input files; and a list of internal field names for each of the data components. -The internal data model field names are important because they are used to setup the stream description files and to map the input data fields to the internal data model field names. diff --git a/doc/source/driver_cpl/bit-for-bit-flag.rst b/doc/source/driver_cpl/bit-for-bit-flag.rst deleted file mode 100644 index 0505e53441b..00000000000 --- a/doc/source/driver_cpl/bit-for-bit-flag.rst +++ /dev/null @@ -1,12 +0,0 @@ -Bit-for-bit flag -============================ - -The driver namelist variable ``bfbflag`` provides the option of preserving bit-for-bit results on different coupler processor counts. -This flag has no impact on other components and their ability to generate bit-for-bit results on different pe counts. -When this flag is set, all mappings become "X" types where the source data is rearranged to the destination processor and then local mapping is carried out. -The order of operations of this mapping is independent of the pe count or decomposition of the grids. - -The other feature that is changed by the ``bfbflag`` is the global sum diagnostics. - -- When ``bfbflag`` is set to *.false.*, a partial sum is done on each processors and those partial sums are added together to form a global sum. This is generally not order of operations independent for different pe counts or decompositions. -- When ``bfbflag`` is set to *.true.*, the global sums are computed by gathering the global field on the root processor and doing an ordered sum there. diff --git a/doc/source/driver_cpl/budgets.rst b/doc/source/driver_cpl/budgets.rst deleted file mode 100644 index 4c595494e74..00000000000 --- a/doc/source/driver_cpl/budgets.rst +++ /dev/null @@ -1,23 +0,0 @@ -Mass and Heat Budgets -===================== - -Mass and heat are conserved in the coupler to several digits over centuries. -Several steps have been taken to ensure this level of conservation, and these are described in other sections of the document. -In addition, efforts have been made to make sure each component is internally conservative with respect to mass and heat. - -The budgets can be turned on and off using the namelist variable ``do_budgets``. -The value of that namelist is set by the ``$CASEROOT/env_run.xml`` variable, ``BUDGETS``. - -The driver coupler can diagnose heat and mast budgets at several levels and over different periods. -The periods are *instantenous*, *daily average*, *monthly average*, *annual average*, or since the start of the run. -The budget output for each of these periods is controlled by the driver namelist variables ``budget_inst``, ``budget_daily``, ``budget_month``, ``budget_ann``, ``budget_ltann``, and ``budget_ltend``. -``budget_ltann`` and ``budget_ltend`` are used to write the long term budget at either the end of every year or the end of every run. -Other budgets are written at their period interval. - -The namelist input is an integer specifying what to write. -The budget flags are controlled by ``$CASEROOT/env_run.xml`` variables ``BUDGET_INST``, ``BUDGET_DAILY``, ``BUDGET_MONTHLY``, ``BUDGET_ANNUAL``, ``BUDGET_LONGTER_EOY``, and ``BUDGET_LONGTERM_STOP`` respectively. -Valid values are 0, 1, 2, or 3. -If 0 is set, no budget data is written. -The value 1 generates a net heat and water budget for each component, 2 adds a detailed heat and water budget for each component, and 3 adds a detailed heat and water budget of the different conmponents on the atmosphere grid. -Normally values of 0 or 1 are specified. -Values of 2 or 3 are generally used only when debugging problems involving conservation. diff --git a/doc/source/driver_cpl/cplug-02.1-figx1.jpg b/doc/source/driver_cpl/cplug-02.1-figx1.jpg deleted file mode 100644 index 17fd91a1993..00000000000 Binary files a/doc/source/driver_cpl/cplug-02.1-figx1.jpg and /dev/null differ diff --git a/doc/source/driver_cpl/design.rst b/doc/source/driver_cpl/design.rst deleted file mode 100644 index a2cf516529f..00000000000 --- a/doc/source/driver_cpl/design.rst +++ /dev/null @@ -1,116 +0,0 @@ -Design -====== - --------- -Overview --------- -cpl7 is built as a single executable with a single high-level driver. -The driver runs on all processors and handles coupler sequencing, model concurrency, and communication of data between components. -The driver calls all model components via common and standard interfaces. -The driver also directly calls coupler methods for mapping (interpolation), rearranging, merging, an atmosphere/ocean flux calculation, and diagnostics. -The model components and the coupler methods can run on subsets of all the processors. -In other words, cpl7 consists of a driver that controls the top level sequencing, the processor decomposition, and communication between components and the coupler while coupler operations such as mapping and merging are running under the driver on a subset of processors as if there were a unique coupler model component. - -In general, an active component both needs data from and provides data to the coupler while data models generally read data from I/O and then just provide data to the coupler. -Currently, the atmosphere, land, river, and sea ice models are always tightly coupled to better resolve the diurnal cycle. -This coupling is typically half-hourly, although at higher resolutions, can be more frequent. -The ocean model coupling is typically once or a few times per day. -The diurnal cycle of ocean surface albedo is computed in the coupler for use by the atmosphere model. -The looser ocean coupling frequency means the ocean forcing and response is lagged in the system. -There is an option in cpl7 to run the ocean tightly coupled without any lags, but this is more often used only when running with data ocean components. - --------------------------- -Sequencing and Concurrency --------------------------- -The component processor layouts and MPI communicators are derived from namelist input. -At the present time, there are eight (10) basic processor groups in cpl7. -These are associated with the atmosphere, land, river, ocean, sea ice, land ice, wave, external-system-process, coupler, and global groups, although others could be easily added later. -Each of the processor groups can be distinct, but that is not a requirement of the system. -A user can overlap processor groups relatively arbitrarily. -If all processors sets overlap each other in at least one processor, then the model runs sequentially. -If all processor sets are distinct, the model runs as concurrently as science allows. -The processor sets for each component group are described via 3 basic scalar parameters at the present time; the number of mpi tasks, the number of openmp threads per mpi task, and the global mpi task rank of the root mpi task for that group. -For example, a layout where the number of mpi tasks is 8, the number of threads per mpi task is 4, and the root mpi task is 16 would create a processor group that consisted of 32 hardware processors, starting on global mpi task number 16 and it would contain 8 mpi tasks. -The global group would have at least 24 tasks and at least 48 hardware processors. -The driver derives all MPI communicators at initialization and passes them to the component models for use. -More information on the coupler concurrency can be found in the Craig et al IJHPCA 2012 reference mentioned in the top section of this document. - -As mentioned above, there are two issues related to whether the component models run concurrently. -The first is whether unique chunks of work are running on distinct processor sets. -The second is the sequencing of this work in the driver. -As much as possible, the driver sequencing has been implemented to maximize the potential amount of concurrency of work between different components. -Ideally, in a single coupling step, the forcing for all models would be computed first, the models could then all run concurrently, and then the driver would advance. -However, scientific requirements such as the coordination of surface albedo and atmosphere radiation computations as well as general computational stability issues prevents this ideal implementation in cpl7. -`Figure 1 `_ shows the maximum amount of concurrency supported for a fully active system. -In practice, the scientific constraints mean the active atmosphere model cannot run concurrently with the land, runoff, and sea-ice models. -Again, `figure 1 `_ does not necessarily represent the optimum processor layout for performance for any configuration, but it provides a practical limit to the amount of concurrency in the system due to scientific constraints. -Results are bit-for-bit identical regardless of the component sequencing because the scientific lags are fixed by the implementation, not the processor layout. - -image:: cplug-02.1-figx1.jpg - -Figure 1: Maximum potential processor concurrency designed to support scientific requirements and stability. - --------------------- -Component Interfaces --------------------- -The standard cpl7 component model interfaces are based upon the ESMF design. -Each component provides an init, run, and finalize method with consistent arguments. -The component interface arguments currently consist of Fortran and MCT datatypes. -The physical coupling fields are passed through the interfaces in the init, run, and finalize phases. -As part of initialization, an MPI communicator is passed from the driver to the component, and grid and decomposition information is passed from the component back to the driver. -The driver/coupler acquires all information about resolution, configurations, and processor layout at run-time from either namelist or from communication with components. - - -Initialization of the system is relatively straight-forward. -First, the MPI communicators are computed in the driver. -Then the component model initialization methods are called on the appropriate processor sets, and an mpi communicator is sent, and the grid and decomposition information are passed back to the driver. -Once the driver has all the grid and decomposition information from the components, various rearrangers and mappers are initialized that will move data between processors, decompositions, and grids as needed at the driver level. -No distinction is made in the coupler implementation for sequential versus concurrent execution. -In general, even for cases where two components have identical grids and processor layouts, often their decomposition is different for performance reasons. -In cases where the grid, decomposition, and processor layout are identical between components, the mapping or rearranging operation will degenerate to a local data copy. - -The interface to the components' run method consists of two distinct bundles of fields. -One is the data sent to force the model. -The second is data received from the model for coupling to other components. -The run interface also contains a clock that specifies the current time and the run length for the model and a data type that encapsulates grid, decomposition, and scalar coupling information. -These interfaces generally follow the ESMF design principles. - -------------------------------- -MCT, The Model Coupling Toolkit -------------------------------- -In cpl7, the MCT attribute_vector, global_segmap, and general_grid datatypes have been adopted at the highest levels of the driver, and they are used directly in the component init, run, and finalize interfaces. -In addition, MCT is used for all data rearranging and mapping (interpolation). -The clock used by cpl7 at the driver level is based on the ESMF specification. -Mapping weights are still generated off-line using the SCRIP or ESMF packages as a preprocessing step. -They are read using a subroutine that reads and distributes the mapping weights in reasonably small chunks to minimize the memory footprint. -Development of the cpl7 coupler not only relies on MCT, but MCT developers contributed significantly to the design and implementation of the cpl7 driver. -Development of cpl7 coupler resulted from a particularly strong and close collaboration between NCAR and the Department of Energy Argonne National Lab. - ------------------------------------- -Memory, Parallel IO, and Performance ------------------------------------- -Scaling to tens-of-thousands of processors requires reasonable performance scaling of the models, and all components have worked at improving scaling via changes to algorithms, infrastructure, or decompositions. -In particular, decompositions using shared memory blocking, space filling curves, and all three spatial dimensions have been implemented to varying degrees in all components to increase parallelization and improve scalability. -The Craig et al IJHPCA 2012 reference mentioned in the first section of this document provides a summary of scaling performance of cpl7 for several coupler kernals. - -In practice, performance, load balance, and scalability are limited as a result of the size, complexity, and multiple model character of the system. -Within the system, each component has its own scaling characteristics. -In particular, each may have processor count "sweet-spots" where the individual component model performs particularly well. -This might occur within a component because of internal load balance, decomposition capabilities, communication patterns, or cache usage. -Second, component performance can vary over the length of the model run. -This occurs because of seasonal variability of the cost of physics in models, changes in performance during an adjustment (spin-up) phase, and temporal variability in calling certain model operations like radiation, dynamics, or I/O. -Third, the hardware or batch queueing system might have some constraints on the total number of processors that are available. -For instance, on 16 or 32 way shared memory node, a user is typically charged based on node usage, not processor usage. -So there is no cost savings running on 40 processors versus 64 processors on a 32-way node system. -As a result of all of these issues, perfect load-balancing is generally not possible. -But to a large degree, if one accepts the limitations, a load balance configuration with acceptable idle-time and reasonably good throughput is nearly always possible to configure. - - -Load-balancing requires a number of considerations such as which components are run, their absolute resolution, and their relative resolution; cost, scaling and processor count sweet-spots for each component; and internal load imbalance within a component. -It is often best to load balance the system with all significant run-time I/O turned off because this generally occurs very infrequently (typically one timestep per month), is best treated as a separate cost, and can bias interpretation of the overall model load balance. -Also, the use of OpenMP threading in some or all of the system is dependent on the hardware/OS support as well as whether the system supports running all MPI and mixed MPI/OpenMP on overlapping processors for different components. -Finally, should the components run sequentially, concurrently, or some combination of the two. -Typically, a series of short test runs is done with the desired production configuration to establish a reasonable load balance setup for the production job. - - - diff --git a/doc/source/driver_cpl/driver_threading_control.rst b/doc/source/driver_cpl/driver_threading_control.rst deleted file mode 100644 index ddc8cfd70fa..00000000000 --- a/doc/source/driver_cpl/driver_threading_control.rst +++ /dev/null @@ -1,12 +0,0 @@ -Driver Threading Control -======================== - -OpenMP thread counts are controlled at three levels. - -- The coarsest level is prior to launching the model. The environment variable OMP_NUM_THREADS is usually set to the largest value any mpi task will use. At a minimum, this will ensure threading is turned on to the maximum desired value in the run. - -- The next level is during the driver initialization phase. When the mpi communicators are initialized, the maximum number of threads per mpi task can be computed based on the ccsm_pes namelist input. At that point, there is an initial fortran call to the intrinsic, omp_set_num_threads. When that happens and if that call is successful, the number of threads will be set to the maximum needed in the system on an mpi task by task basis. - -- Finally, there is the ability of CESM to change the thread count per task as each component is individually called and as the model integrates through the driver run loop. In other words, for components that share the same hardware processor but have different threads per task, this feature allows those components to run with the exact value set by the user in the ccsm_pes namelist. This final level of thread control is turned off by default, but it can be turned on using the driver namelist variable ``drv_threading``. - -This fine control of threading is likely of limited use at this point given the current driver implementation. diff --git a/doc/source/driver_cpl/grids.rst b/doc/source/driver_cpl/grids.rst deleted file mode 100644 index a40e546b90c..00000000000 --- a/doc/source/driver_cpl/grids.rst +++ /dev/null @@ -1,285 +0,0 @@ -===================== -Grids -===================== - ----------------------------- -Standard Grid Configurations ----------------------------- -The standard implementation for grids in CIME has been that the atmosphere and land models are run on identical grids and the ocean and sea ice model are run on identical grids. -The ocean model mask is used to derive a complementary mask for the land grid such that for any given combination of atmosphere/land and ocean/ice grids, there is a unique land mask. -This approach for dealing with grids is still used a majority of the time. -But there is a new capability, called ``trigrid`` that allows the atmosphere and land grids to be unique. -A typical grid is the finite volume "1 degree" atmosphere/land grid matched with the "1 degree" ocean/ice grid. -The runoff grid is generally unique to runoff and the land ice grid is coupled on the land grid with interpolation carried out to a unique land ice grid inside that component. - -Historically, the ocean grid has been the higher resolution grid in CIME model configurations. -While that is no longer always the case, the current driver implementation largely reflects that presumption. -The atmosphere/ocean fluxes in the coupler are computed on the ocean grid. -The driver namelist variable ``aoflux_grid`` allows users to specify the atmosphere/ocean flux computation grid in the coupler in the future. -In addition, the default mapping approach used also reflects the presumption that the ocean is generally higher resolution. -Fluxes are always mapped using a locally conservative area average methods to preserve conservation. -However, states are mapped using bilinear interpolation from the atmosphere grid to the ocean grid to better preserve gradients, while they are mapped using a locally conservative area average approach from the ocean grid to the atmosphere grid. -These choices are based on the presumption that the ocean grid is higher resolution. - -There has always been an option that all grids (atmosphere, land, ocean, and ice) could be identical, and this is still supported. -There are a couple of namelist variables, ``samegrid_ao``, ``samegrid_al``, and ``samegrid_ro`` that tell the coupler whether to expect that the following grids; atmosphere/ocean, atmosphere/land, and runoff/ocean respectively are identical. -These are set automaticaly in the driver namelist depending on the grid chosen and impact mapping as well as domain checking. - ----------------------- -Trigrid Configurations ----------------------- -Grid configurations are allowed where the atmosphere and land grids are unique. - -The trigrid implementation introduces an ambiguity in the definition of the mask. -This ambiguity is associated with an inability to define an absolutely consistent ocean/land mask across all grids in the system. -A summary of trigrid support follows: -- The land mask is defined on the atmosphere grid as the complement of the ocean mask mapped conservatively to the atmosphere grid. -- Then the land and ocean masks are exactly complementary on the atmosphere grid where conservative merging are critical. -- No precise land fraction needs to be defined in the land grid. -- The only requirement is that the land model compute data on a masked grid such that when mapped to the atmosphere grid, all atmosphere grid points that contain some fraction of land have valid values computed in the land model. -- There are an infinite number of land fraction masks that can accomplish this including a fraction field that is exactly one at every grid cell. -- In the land model, all land fraction masks produce internally conservative results. -- The only place where the land fraction becomes important is mapping the land model output to the runoff model. -- In that case, the land fraction on the land grid is applied to the land to runoff mapping. - ---------- -Fractions ---------- -The component grid fractions in the coupler are defined and computed in ``$CIMEROOT/driver_cpl/driver/seq_frac_mct``. -A slightly modified version of the notes from this file is pasted below. -Just to clarify some of the terms. -- fractions_a, fractions_l, fractions_i, and fractions_o are the fractions on the atmosphere, land, ice, and ocean grids. -- afrac, lfrac, ifrac, and ofrac are the atmosphere, land, ice, and ocean fractions on those grids. -So fractions_a(lfrac) is the land fraction on the atmosphere grid. -lfrin in the land fraction defined in the land model. -This can be different from lfrac because of the trigrid implementation. -lfrac is the land fraction consistent with the ocean mask and lfrin is the land fraction in the land model. -ifrad and ofrad are fractions at the last radiation timestep. -These fractions preserve conservation of heat in the net shortwave calculation because the net shortwave calculation is one timestep behind the ice fraction evolution in the system. -When the variable "dom" is mentioned below, that refers to a field sent from a component at initialization. -:: - - ! the fractions fields are now afrac, ifrac, ofrac, lfrac, and lfrin. - ! afrac = fraction of atm on a grid - ! lfrac = fraction of lnd on a grid - ! ifrac = fraction of ice on a grid - ! ofrac = fraction of ocn on a grid - ! lfrin = land fraction defined by the land model - ! ifrad = fraction of ocn on a grid at last radiation time - ! ofrad = fraction of ice on a grid at last radiation time - ! afrac, lfrac, ifrac, and ofrac are the self-consistent values in the - ! system. lfrin is the fraction on the land grid and is allowed to - ! vary from the self-consistent value as descibed below. ifrad - ! and ofrad are needed for the swnet calculation. - ! the fractions fields are defined for each grid in the fraction bundles as - ! needed as follows. - ! character(*),parameter :: fraclist_a = 'afrac:ifrac:ofrac:lfrac:lfrin' - ! character(*),parameter :: fraclist_o = 'afrac:ifrac:ofrac:ifrad:ofrad' - ! character(*),parameter :: fraclist_i = 'afrac:ifrac:ofrac' - ! character(*),parameter :: fraclist_l = 'afrac:lfrac:lfrin' - ! character(*),parameter :: fraclist_g = 'gfrac' - ! - ! we assume ocean and ice are on the same grids, same masks - ! we assume ocn2atm and ice2atm are masked maps - ! we assume lnd2atm is a global map - ! we assume that the ice fraction evolves in time but that - ! the land model fraction does not. the ocean fraction then - ! is just the complement of the ice fraction over the region - ! of the ocean/ice mask. - ! we assume that component domains are filled with the total - ! potential mask/fraction on that grid, but that the fractions - ! sent at run time are always the relative fraction covered. - ! for example, if an atm cell can be up to 50% covered in - ! ice and 50% land, then the ice domain should have a fraction - ! value of 0.5 at that grid cell. at run time though, the ice - ! fraction will be between 0.0 and 1.0 meaning that grid cells - ! is covered with between 0.0 and 0.5 by ice. the "relative" fractions - ! sent at run-time are corrected by the model to be total fractions - ! such that - ! in general, on every grid, - ! fractions_*(afrac) = 1.0 - ! fractions_*(ifrac) + fractions_*(ofrac) + fractions_*(lfrac) = 1.0 - ! where fractions_* are a bundle of fractions on a particular grid and - ! *frac (ie afrac) is the fraction of a particular component in the bundle. - ! - ! the fractions are computed fundamentally as follows (although the - ! detailed implementation might be slightly different) - ! initialization (frac_init): - ! afrac is set on all grids - ! fractions_a(afrac) = 1.0 - ! fractions_o(afrac) = mapa2o(fractions_a(afrac)) - ! fractions_i(afrac) = mapa2i(fractions_a(afrac)) - ! fractions_l(afrac) = mapa2l(fractions_a(afrac)) - ! initially assume ifrac on all grids is zero - ! fractions_*(ifrac) = 0.0 - ! fractions/masks provided by surface components - ! fractions_o(ofrac) = dom_o(frac) ! ocean "mask" - ! fractions_l(lfrin) = dom_l(frac) ! land model fraction - ! then mapped to the atm model - ! fractions_a(ofrac) = mapo2a(fractions_o(ofrac)) - ! fractions_a(lfrin) = mapl2a(fractions_l(lfrin)) - ! and a few things are then derived - ! fractions_a(lfrac) = 1.0 - fractions_a(ofrac) - ! this is truncated to zero for very small values (< 0.001) - ! to attempt to preserve non-land gridcells. - ! fractions_l(lfrac) = mapa2l(fractions_a(lfrac)) - ! one final term is computed - ! dom_a(ascale) = fractions_a(lfrac)/fractions_a(lfrin) - ! dom_l(ascale) = mapa2l(dom_a(ascale)) - ! these are used to correct land fluxes in budgets and lnd2rtm coupling - ! and are particularly important when the land model is running on - ! a different grid than the atm model. in the old system, this term - ! was treated as effectively 1.0 since there was always a check that - ! fractions_a(lfrac) ~ fractions_a(lfrin), namely that the land model - ! provided a land frac that complemented the ocean grid. this is - ! no longer a requirement in this new system and as a result, the - ! ascale term can be thought of as a rescaling of the land fractions - ! in the land model to be exactly complementary to the ocean model - ! on whatever grid it may be running. - ! run-time (frac_set): - ! update fractions on ice grid - ! fractions_i(ifrac) = i2x_i(Si_ifrac) ! ice frac from ice model - ! fractions_i(ofrac) = 1.0 - fractions_i(ifrac) - ! note: the relative fractions are corrected to total fractions - ! fractions_o(ifrac) = mapi2o(fractions_i(ifrac)) - ! fractions_o(ofrac) = mapi2o(fractions_i(ofrac)) - ! fractions_a(ifrac) = mapi2a(fractions_i(ifrac)) - ! fractions_a(ofrac) = mapi2a(fractions_i(ofrac)) - ! - ! fractions used in merging are as follows - ! mrg_x2a uses fractions_a(lfrac,ofrac,ifrac) - ! mrg_x2o needs to use fractions_o(ofrac,ifrac) normalized to one - ! normalization happens in mrg routine - ! - ! fraction corrections in mapping are as follows - ! mapo2a uses *fractions_o(ofrac) and /fractions_a(ofrac) - ! mapi2a uses *fractions_i(ifrac) and /fractions_a(ifrac) - ! mapl2a uses *fractions_l(lfrin) and /fractions_a(lfrin) - ! mapa2* should use *fractions_a(afrac) and /fractions_*(afrac) but this - ! has been defered since the ratio always close to 1.0 - ! - ! budgets use the standard afrac, ofrac, ifrac, and lfrac to compute - ! quantities except in the land budget which uses lfrin multiplied - ! by the scale factor, dom_l(ascale) to compute budgets. - ! - ! fraction and domain checks - ! initialization: - ! dom_i = mapo2i(dom_o) ! lat, lon, mask, area - ! where fractions_a(lfrac) > 0.0, fractions_a(lfrin) is also > 0.0 - ! this ensures the land will provide data everywhere the atm needs it - ! and allows the land frac to be subtlely different from the - ! land fraction specified in the atm. - ! dom_a = mapl2a(dom_l) ! if atm/lnd same grids - ! dom_a = mapo2a(dom_o) ! if atm/ocn same grids - ! dom_a = mapi2a(dom_i) ! if atm/ocn same grids - ! 0.0-eps < fractions_*(*) < 1.0+eps - ! fractions_l(lfrin) = fractions_l(lfrac) - ! only if atm/lnd same grids (but this is not formally required) - ! this is needed until dom_l(ascale) is sent to the land model - ! as an additional field for use in l2r mapping. - ! run time: - ! fractions_a(lfrac) + fractions_a(ofrac) + fractions_a(ifrac) ~ 1.0 - ! 0.0-eps < fractions_*(*) < 1.0+eps - ---------------- -Domain Checking ---------------- -Domain checking is a very important initialization step in the system. -Domain checking verifies that the longitudes, latitudes, areas, masks, and fractions of different grids are consistent with each other. -The subroutine that carries out domain checking is in ``$CIMEROOT/driver_cpl/driver/seq_domain_mct``. -Tolerances for checking the domains can be set in the drv_in driver namelist via the namelist variables, ``eps_frac``, ``eps_amask``, ``eps_agrid``, ``eps_aarea``, ``eps_omask``, ``eps_ogrid``, and ``eps_oarea``. -These values are derived in the coupler namelist from the script env variables, EPS_FRAC, EPS_AMASK, EPS_AGRID, EPS_AAREA, EPS_OMASK, EPS_OGRID, and EPS_OAREA in the env_run.xml. -If an error is detected in the domain checking, the model will write an error message and abort. - -The domain checking is dependent on the grids and in particular, the samegrid input namelist settings. But it basically does the following, -:: - - ocean/ice grid comparison: - verifies the grids are the same size - verifies the difference in longitudes and latitudes is less than eps_ogrid. - verifies the difference in masks is less than eps_omask - verifies the difference in areas is less than eps_oarea - - atmosphere/land grid comparison (if samegrid_al): - verifies the grids are the same size - verifies the difference in longitudes and latitudes is less than eps_agrid. - verifies the difference in masks is less than eps_amask - verifies the difference in areas is less than eps_aarea - - atmosphere/ocean grid comparison (if samegrid_ao): - verifies the grids are the same size - verifies the difference in longitudes and latitudes is less than eps_agrid. - verifies the difference in masks is less than eps_amask - verifies the difference in areas is less than eps_aarea - - fractions - verifies that the land fraction on the atmosphere grid and the ocean fraction - on the atmosphere grid add to one within a tolerance of eps_frac. - -There are a number of subtle aspects in the domain checking like whether to check over masked grid cells, but these issues are less important than recognizing that errors in the domain checking should be treated seriously. -It is easy to make the errors go away by changing the tolerances, but by doing so, critical grid errors that can impact conservation and consistency in a simulation might be overlooked. - - ------------------------ -Mapping (Interpolation) ------------------------ -Mapping files to support interpolation of fields between grids are computed offline. -This is done using the ESMF offline regridding utility. -First, note that historically, the ocean grid has been the higher resolution grid. -While that is no longer always the case, the current implementation largely reflects that presumption. -In general, mapping of fluxes is done using a locally conservative area average approach to preserve conservation. -State fields are generally mapped using bilinear interpolation from the atmosphere grid to the ocean grid to better preserve gradients, but state fields are generally mapped using the conservative area average approach from the ocean grid to the atmosphere grid. -But this is not a requirement of the system. -The individual state and flux mapping files are specified at runtime using the ``seq_maps.rc`` input file, and any valid mapping file using any mapping approach can be specified in that input file. - -The ``seq_maps.c`` file contains information about the mapping files as well as the mapping type. -There are currently two types of mapping implementations, "X" and "Y". - -- "X" mapping rearranges the source data to the destination grid decomposition and then a local mapping is done from the source to the destination grid on the destination decomposition. In the "X" type, the source grid is rearranged. -- "Y" mapping does a local mapping from the source grid to the destination grid on the source grid decomposition. That generates a partial sum of the destination values which are then rearranged to the destination decomposition and summed. Both options produce reasonable results, although they may differ in value by "roundoff" due to differences in order or operations. The type chosen impacts performance. In both implementations, the number of flops is basically identical. The difference is the communication. In the "Y" type, the destination grid is rearranged. - -Since historically, the ocean grid is higher resolution than the atmosphere grid, "X" mapping is used for atmosphere to ocean/ice mapping and "Y" mapping is used from ocean/ice to atmosphere mapping to optimize mapping performance. - -Mapping corrections are made in some cases in the polar region. -In particular, the current bilinear and area conservative mapping approaches introduce relatively large errors in mapping vector fields around the pole. -The current coupler can correct the interpolated surface wind velocity near the pole when mapping from the atmosphere to the ocean and ice grids. -There are several options that correct the vector mapping and these are set in the env variable VECT_MAP. -The npfix option only affects ocean and ice grid cells that are northward of the last latitude line of the atmospheric grid. -The algorithm is contained in the file models/drv/driver/map_atmocn_mct.F90 and is only valid when the atmosphere grid is a longitude/latitude grid. -This feature is generally on by default. -The other alternative is the cart3d option which converts the surface u and v velocity to 3d x,y,z vectors then maps those three vectors before coverting back to u and v east and north directions on the surface. -Both vector mapping methods introduce errors of different degrees but are generally much better than just mapping vector fields as if they were individual scalars. -The ``vect_map`` namelist input is set in the ``drv_in`` file. - -The input mapping files are assumed to be valid for grids with masks of value zero or one where grid points with a mask of zero are never considered in the mapping. -Well defined, locally conservative area mapping files as well as bilinear mapping files can be generated using this masked approach. -However, there is another issue which is that a grid fraction in an active cell might actually change over time. -This is not the case for land fraction, but it is the case for relative ice and ocean fractions. -The ice fraction is constantly evolving in the system in general. -To improve the accuracy of the ice and ocean mapping, the ocean/ice fields are scaled by the local fraction before mapping and unscaled by the mapped fraction after mapping. -The easiest way to demonstate this is via an example. -Consider a case where two ice cells of equal area underlie a single atmosphere cell completely. -The mapping weight of each ice cell generated offline would be 0.5 in this case and if ice temperatures of -1.0 and -2.0 in the two cells respectively were mapped to the atmosphere grid, a resulting ice temperature on the atmosphere grid of -1.5 would result. -Consider the case where one cell has an ice fraction of 0.3 and the other has a fraction of 0.5. -Mapping the ice fraction to the atmospheric cell results in a value of 0.4. -If the same temperatures are mapped in the same way, a temperature of -1.5 results which is reasonable, but not entirely accurate. -Because of the relative ice fractions, the weight of the second cell should be greater than the weight of the first cell. -Taking this into account properly results in a fraction weighted ice temperature of -1.625 in this example. -This is the fraction correction that is carried out whenever ocean and ice fields are mapped to the atmosphere grid. -Time varying fraction corrections are not required in other mappings to improve accuracy because their relative fractions remain static. - -------------------------- -Area Correction of Fluxes -------------------------- -To improve conservation in the system, all fluxes sent to and received from components are corrected for the area differences between the components. -There are many reasonable ways to compute an area of a grid cell, but they are not generally consistent. -One assumption with respect to conservation of fluxes is that the area acting upon the flux is well defined. -Differences in area calculations can result in differences of areas up to a few percent and if these are not corrected, will impact overall mass and heat conservation. -Areas are extracted for each grid from the mapping files. -In this implementation, it is assumed that the areas in all mapping files are computed reasonably and consistently for each grid and on different grids. -Those mapping areas are used to correct the fluxes for each component by scaling the fluxes sent to and received by the component by the ratio of the mapping area and the component area. -The areas from the components are provided to the coupler by the component at initialization. -The minimum and maximum value of each area corrections is written to the coupler log file at initialization. -One critical point is that if mapping files are generated by different tools offline and used in the driver, an error could be introduced that is related to inconsistent areas provided by different mapping files. - - diff --git a/doc/source/driver_cpl/history-and-restarts.rst b/doc/source/driver_cpl/history-and-restarts.rst deleted file mode 100644 index eed38f345de..00000000000 --- a/doc/source/driver_cpl/history-and-restarts.rst +++ /dev/null @@ -1,24 +0,0 @@ -History and Restarts -==================== - -In addition to log files, component models also produce history and restart files. -History files are generally netcdf format and contain fields associated with the state of the model. -History files are implemented and controlled independently in the component models, although support for monthly average history files is a standard output of most production runs. -The driver has a file naming standard for history files which includes the case names, component name, and model date. - -All CIME-compliant component models must be able to stop in the middle of a run and then subsequently restart in a bit-for-bit fashion. -For most models, this requires the writing of a restart file. -The restart file can be any format, although netcdf has become relatively standard, and it should contain any scalars, fields, or information that is required to restart the component model in exactly the same state as when the restart was written and the model was stopped. -The expectation in CIME is that a restart of a model run will be bit-for-bit identical and this is regularly tested as part of component model development by running the model 10 days, writing a restart at the end of 5 days, and then restarting at day 5 and comparing the result with the 10 day run. -Unlike history files, restart files must be coordinated across different components. -The restart frequency is set in the driver time manager namelist by driver namelist variables ``restart_option``, ``restart_n``, and ``restart_ymd``. -The driver will trigger a restart alarm in clocks when a coordinated restart is requested. -The components are required to check this alarm whenever they are called and to write a restart file at the end of the current coupling period. -This method ensures all components are writing restart files at a consistent timestamp. -The restart filenames are normally set in a generic rpointer file. -The rpointer file evolves over the integration and keeps track of the current restart filenames. -When a model is restarted, both the rpointer file and the actual restart file are generally required. - -Many models are also able to restart accumulating history files in the middle of an accumulation period, but this is not a current requirement for CIME compliant components. -In production, the model is usually started and stopped on monthly boundaries so monthly average history files are produced cleanly. -The run length of a CESM1 production run is usually specified using the nmonths or nyears option and restart files are normally written only at the end of the run. diff --git a/doc/source/driver_cpl/implementation.rst b/doc/source/driver_cpl/implementation.rst deleted file mode 100644 index e4d74740f15..00000000000 --- a/doc/source/driver_cpl/implementation.rst +++ /dev/null @@ -1,15 +0,0 @@ -Implementation -============== - -.. toctree:: - :maxdepth: 3 - - time-management.rst - grids.rst - initialization-and-restart.rst - driver_threading_control.rst - bit-for-bit-flag.rst - history-and-restarts.rst - budgets.rst - multi-instance.rst - namelist-overview.rst diff --git a/doc/source/driver_cpl/index.rst b/doc/source/driver_cpl/index.rst deleted file mode 100644 index 18ffa1b9023..00000000000 --- a/doc/source/driver_cpl/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. _driver-cpl: - -.. on documentation master file, created by - sphinx-quickstart on Tue Jan 31 19:46:36 2017. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -####################### - Driver/Coupler -####################### - -.. toctree:: - :maxdepth: 3 - :numbered: - - introduction.rst - design.rst - implementation.rst - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc/source/driver_cpl/initialization-and-restart.rst b/doc/source/driver_cpl/initialization-and-restart.rst deleted file mode 100644 index e7680922d6c..00000000000 --- a/doc/source/driver_cpl/initialization-and-restart.rst +++ /dev/null @@ -1,34 +0,0 @@ -==================================== -Initialization and Restart -==================================== - -The initialization has been developed over the last two decades to meet the scientific goals, minimize the communication required, and ensure a consistent and well defined climate system. -The order of operations is critical. The initialization is basically as follows: - -- The ``ccsm_pes`` namelist is read and mpi communicators are initialized. -- The ``seq_infodata`` namelist is read and configuration settings are established. -- The ``prof_inparm`` namelist is read and the timing tool is initialized. -- The ``pio_inparm`` namelist is read and the driver IO is initialized. -- The ``seq_timemgr`` namelist is read and the driver time manager and clocks are initialized. -- The atmosphere init routine is called, the mpi communicator and clock are sent, and the atmosphere grid is returned. -- The land init routine is called, the mpi communicator and clock are sent, and the land grid is returned. -- The runoff init routine is called, the mpi communicator and clock are sent, and the runoff grid is returned. -- The ocean init routine is called, the mpi communicator and clock are sent, and the ocean grid is returned. -- The ice init routine is called, the mpi communicator and clock are sent, and the ice grid is returned. -- The land ice init routine is called, the mpi communicator and clock are sent, and the land ice grid is returned. -- The infodata buffer is synchronized across all processors. This buffer contains many model configuration settings set by the driver but also sent from the components. -- The atmosphere, land, runoff, ice, land ice, and ocean rearrangers are initialized. - These rearrangers move component data between the component pes and the coupler pes. -- The Remaining attribute datatypes associated are initialized -- The mapping weights and areas are read. -- The Component grids are checked using the domain checking method. -- The flux area corrections are initialized on the component pes and applied to the initial fields sent by each component on the component pes. Those initial fields are then rearranged to the coupler pes. -- The fractions are initialized on the coupler pes. -- The atmosphere/ocean flux computation is initialized and initial ocean albedos are computed on the coupler pes. -- The land, ocean, and ice initial albedos are mapped to the atmosphere grid and merged to generate initial surface albedos. -- The initial atmosphere forcing data (albedos) is rearranged from the coupler pes to the atmosphere pes, and the area corrections are applied. -- The second phase of the atmosphere init method is to initialize the atmosphere radiation from the surface albedos. -- The new atmosphere initial data is area corrected and rearranged to the coupler pes. -- The budget diagnostics are zeroed out. -- The coupler restart file is read. -- Initialization is complete. - diff --git a/doc/source/driver_cpl/introduction.rst b/doc/source/driver_cpl/introduction.rst deleted file mode 100644 index f7fe10aaceb..00000000000 --- a/doc/source/driver_cpl/introduction.rst +++ /dev/null @@ -1,18 +0,0 @@ -Introduction -============ - -The following provides an overview of the CIME driver/coupler. -We will cover the top level driver implementation as well as the coupler component within the system. -The driver runs on all hardware processors, runs the top level instructions, and, executes the driver time loop. -The coupler is a component of the CIME infrastructure that is run from within the driver. -It can be run on a subset of the total processors, and carries out mapping (interpolation), merging, diagnostics, and other calculations. -The name cpl7 refers to the source code associated with both the driver and the coupler parts of the model. -cpl7 code is located in the CIME source tree under driver_cpl/ and the main program of ``driver_cpl/driver/cesm_driver.F90``. - -We also provide a general overview of the cpl7 design. -Specific implementation issues are then discussed individually. -Finally, there is a section summarizing all of the cpl7 namelist input. -This document is written primarily to help users understand the inputs and controls within the cpl7 system, but to also provide some background about the associated implementation. -`Coupler flow diagrams `_ are provided in a separate document. -Some additional documentation on how the coupler works can be found in Craig et al, `"A New Flexible Coupler for Earth System Modeling Developed for CCSM4 and CESM1" `_, International Journal of High Performance Computing Applications 2012 26: 31 DOI: 10.1177/1094342011428141. - diff --git a/doc/source/driver_cpl/multi-instance.rst b/doc/source/driver_cpl/multi-instance.rst deleted file mode 100644 index ebfe8d923c6..00000000000 --- a/doc/source/driver_cpl/multi-instance.rst +++ /dev/null @@ -1,15 +0,0 @@ -Multi-instance Functionality -============================= - -The multi-instance feature allows multiple instances of a given component to run in a single CESM run. -This might be useful for data assimilation or to average results from multiple instances to force another model. - -The multi-instance implementation is fairly basic at this point. -It does not do any averaging or other statistics between multiple instances, and it requires that all prognostic components must run the same multiple instances to ensure correct coupling. -The multi-instance feature is set via the ``$CASEROOT/env_mach_pes.xml`` variables that have an ``NINST_`` prefix. -The tasks and threads that are specified in multi-instance cases are distributed evenly between the multiple instances. -In other words, if 16 tasks are requested for each of two atmosphere instances, each instance will run on 8 of those tasks. -The ``NINST_*_LAYOUT`` value should always be set to *concurrent* at this time. -Sequential running on multiple instances is still not a robust feature. -Multiple instances is a build time setting in env_mach_pes.xml. -Multiple instance capabilities are expected to be extended in the future. diff --git a/doc/source/driver_cpl/namelist-overview.rst b/doc/source/driver_cpl/namelist-overview.rst deleted file mode 100644 index ad0641d65bf..00000000000 --- a/doc/source/driver_cpl/namelist-overview.rst +++ /dev/null @@ -1,14 +0,0 @@ -More on Driver Namelists -========================= - -There are a series of driver/coupler namelist input files created by the driver namelist generator ``$CIMEROOT/driver_cpl/cime_config/buildnml``. These are - -- drv_in -- drv_flds_in -- cpl_modelio.nml, atm_modelio.nml, esp_modelio.nml, glc_modelio.nml, ice_modelio.nml, lnd_modelio.nmlo, ocn_modelio.nml, rof_modelio.nml, wav_modelio.nml -- seq_maps.rc - -The ``*_modelio.nml`` files set the filename for the primary standard output file and also provide settings for the parallel IO library, PIO. -The drv_in namelist file contains several different namelist groups associated with general options, time manager options, pe layout, timing output, and parallel IO settings. -The seq_maps.rc file specifies the mapping files for the configuration. -Note that seq_maps.rc is NOT a Fortran namelist file but the format should be relatively clear from the default settings. diff --git a/doc/source/driver_cpl/time-management.rst b/doc/source/driver_cpl/time-management.rst deleted file mode 100644 index da2ea973dda..00000000000 --- a/doc/source/driver_cpl/time-management.rst +++ /dev/null @@ -1,194 +0,0 @@ -.. _time-management: - -=============== -Time Management -=============== - -------------- -Driver Clocks -------------- -The driver manages the main clock in the system. The main clock -advances at the shortest coupling period and uses alarms to trigger -component coupling and other events. In addition, the driver -maintains a clock that is associated with each component. The -driver's component clocks have a timestep associated with the coupling -period of that component. The main driver clock and the component -clocks in the driver advance in a coordinated manor and are always -synchronized. The advancement of time is managed as follows in the -main run loop. First, the main driver clock advances one timestep and -the component clocks are advanced in a synchronous fashion. The clock -time represents the time at the end of the next model timestep. -Alarms may be triggered at that timestep to call the the atmosphere, -land, runoff, sea ice, land ice, or ocean run methods. If a component -run alarm is triggered, the run method is called and the driver passes -that component's clock to that component. The component clock -contains information about the length of the next component -integration and the expected time of the component at the end of the -integration period. - -Generally, the component models have indepedent time management -software. When a component run method is called, the component must -advance the proper period and also check that their internal clock is -consistent with the coupling clock before returning to the driver. -The clock passed to the component by the driver contains this -information. Component models are also responsible for making sure -the coupling period is consistent with their internal timestep. -History files are managed independently by each component, but restart -files are coordinated by the driver. - -The driver clocks are based on ESMF clock datatype are are supported -in software by either an official ESMF library or by software included -in CIME called ``esmf_wrf_timemgr``, which is a much simplified -Fortran implementation of a subset of the ESMF time manager -interfaces. - --------------------- -The Driver Time Loop --------------------- -The driver time loop is hardwired to sequence the component models in -a specific way to meet scientific requirements and to otherwise -provide the maximum amount of potential concurrency of work. The -results of the model integration are not dependent on the processor -layout of the components. See the Craig et al IJHPCA 2012 reference -for further details. - -In addition, the driver is currently configured to couple the -atmosphere, land, and sea ice models using the same coupling frequency -while the runoff, land ice, and ocean model can be coupled at the same -or at a lower frequency. To support this feature, the driver does -temporal averaging of coupling inputs to the ocean and runoff, and the -driver also computes the surface ocean albedo at the higher coupling -frequency. There is no averaging of coupling fields for other -component coupling interactions and the land and sea ice models' -surface albedos are computed inside those components. Averaging -functionality could be added to the driver to support alternative -relative coupling schemes in the future if desired with the additional -caveat that the interaction between the surface albedo computation in -each component and the atmospheric radiation calculation have to be -carefully considered. In addition, some other features may need to be -extended to support other coupling schemes and still allow model -concurrency. - -The coupler processors (pes) handle the interaction of data between -components, so there are separate tasks associated with deriving -fields on the coupler pes, transfering data to and from the coupler -pes and other components, and then running the component models on -their processors. The driver time loop is basically sequenced as -follows, -:: - - - The driver clock is advanced first and alarms set. - - Input data for ocean, land, sea ice, and runoff is computed. - - Ocean data is rearranged from the coupler to the ocean pes. - - Land data is rearranged from the coupler to the land pes. - - Ice data is rearranged from the coupler to the ice pes. - - Runoff data is rearranged from the coupler to the ice pes. - - The ice model is run. - - The land model is run. - - The runoff model is run. - - The ocean model is run. - - The ocean inputs are accumulated, and the atmosphere/ocean fluxes are - computed on the coupler pes based on the results from the previous - atmosphere and ocean coupled timestep. - - Land data is rearranged from the land pes to the coupler pes. - - Land ice input is computed. - - Land ice data is rearranged from the coupler to the land ice pes. - - River output (runoff) data is rearranged from the runoff pes to the coupler pes. - - Ice data is rearranged from the ice pes to the coupler pes. - - Coupler fractions are updated. - - Atmospheric forcing data is computed on the coupler pes. - - Atmospheric data is rearranged from the coupler pes to the atmosphere pes. - - The atmosphere model is run. - - The land ice model is run. - - Land ice data is rearranged from the land ice pes to the coupler pes. - - Atmospheric data is rearranged from the atmosphere pes to the coupler pes. - - Ocean data is rearranged from the ocean pes to the coupler pes. - - The loop returns - - Within this loop, as much as possible, coupler work associated - with mapping data, merging fields, diagnosing, applying area corrections, - and computing fluxes is overlapped with component work. - -The land ice model interaction is slightly different. -:: - - - The land ice model is run on the land grid - - Land model output is passed to the land ice model every land coupling period. - - The driver accumluates this data, interpolates the data to the land ice grid, - and the land ice model advances the land ice model about once a year. - -The runoff coupling should be coupled at a frequency between the land -coupling and ocean coupling frequencies. The runoff model runs at the -same time as the land and sea ice models when it runs. - -The current driver sequencing has been developed over nearly two -decades, and it plays a critical role in conserving mass and heat, -minimizing lags, and providing stability in the system. The above -description is consistent with the `concurrency limitations -`_. -Just to reiterate, the land, runoff, and sea ice models will always -run before the atmospheric model, and the coupler and ocean models are -able to run concurrently with all other components. The coupling -between the atmosphere, land, sea ice, and atmosphere/ocean flux -computation incurs no lags but the coupling to the ocean state is -lagged by one ocean coupling period in the system. `Mass and heat -`_ -are conserved in the system with more description. - - -It is possible to reduce the ocean lag in the system. A driver -namelist variable, ``ocean_tight_coupling``, moves the step where -ocean data is rearranged from the ocean pes to the coupler pes from -the end of the loop to before the atmosphere/ocean flux computation. -If ocean_tight_coupling is set to true, then the ocean lag is reduced -by one atmosphere coupling period, but the ability of the ocean model -to run concurrently with the atmosphere model is also reduced or -eliminated. This flag is most useful when the ocean coupling -frequency matches the other components. - ------------------- -Coupling Frequency ------------------- -In the current implementation, the coupling period must be identical -for the atmosphere, sea ice, and land components. The ocean coupling -period can be the same or greater. The runoff coupling period should -be between or the same as the land and ocean coupling period. All -coupling periods must be multiple integers of the smallest coupling -period and will evenly divide the NCPL_BASE_PERIOD, typically one day, -set in env_run.xml. The coupling periods are set using the NCPL env -variables in env_run.xml. - -The coupling periods are set in the driver namelist for each component -via variables called something like atm_cpl_dt and atm_cpl_offset. -The units of these inputs are seconds. The coupler template file -derives these values from CIME script variable names like ATM_NCPL -which is the coupling frequency per day. The \*_cpl_dt input -specifies the coupling period in seconds and the \*_cpl_offset input -specifies the temporal offset of the coupling time relative to initial -time. An example of an offset might be a component that couples every -six hours. That would normally be on the 6th, 12th, 18th, and 24th -hour of every day. An offset of 3600 seconds would change the -coupling to the 1st, 7th, 13th, and 19th hour of every day. The -offsets cannot be larger than the coupling period and the sign of the -offsets is such that a positive offset shifts the alarm time forward -by that number of seconds. The offsets are of limited use right now -because of the limitations of the relative coupling frequencies. - -Offsets play an important role in supporting concurrency. There is an -offset of the smallest coupling period automatically introduced in -every coupling run alarm for each component clock. This is only -mentioned because it is an important but subtle point of the -implementation and changing the coupling offset could have an impact -on concurrency performance. Without this explicit automatic offset, -the component run alarms would trigger at the end of the coupling -period. This is fine for components that are running at the shortest -coupling period, but will limit the ability of models to run -concurrently for models that couple at longer periods. What is really -required for concurrency is that the run alarm be triggered as early -as possible and that the data not be copied from that component to the -coupler pes until the coupling period has ended. The detailed -implementation of this feature is documented in the seq_timemgr_mod. -90 file and the impact of it for the ocean coupling is implemented in -the ccsm_driver.F90 code via use of the ocnrun_alarm and ocnnext_alarm -variables. - diff --git a/doc/source/glossary/index.rst b/doc/source/glossary/index.rst index e8fc6fb8303..6ab8f033fa9 100644 --- a/doc/source/glossary/index.rst +++ b/doc/source/glossary/index.rst @@ -14,14 +14,14 @@ General .. glossary:: - active or prognostic component + active or prognostic component Solves a complex set of equations to describe a sub-model’s behavior. case (CASE) An instance of a global climate model simulation. A case is defined by a component set, a model grid, a machine, a compiler, and any other additional customizations. - component + component A sub-model coupled with other components to constitute a global climate modeling system. Example components: atmosphere, ocean, land, etc. @@ -29,17 +29,17 @@ General A complete set of components to be linked together into a climate model to run a specific case. - data component + data component Replacement for an active component. Sends and receives the same variables to and from other models (but ignores the variables received). grid (GRID) A set of numerical grids of a case. Each active component operates on its own numerical grid. - resolution + resolution Used to refer to a set of grids. Each grid within a set may have different resolution. - stub component + stub component Simply occupies the required place in the climate execution sequence and does send or receive any data. @@ -49,25 +49,25 @@ Coupling .. glossary:: - coupler + coupler A component of the CIME infrastructure that is run from within the driver. It can be run on a subset of the total processors, and carries out mapping (interpolation), merging, diagnostics, and other calculations. - driver + driver The hub that connects all components. CIME driver runs on all hardware processors, runs the top level instructions, and, executes the driver time loop. forcing An imposed perturbation of Earth's energy balance - Model Coupling Toolkit or MCT + Model Coupling Toolkit or MCT A library used by CIME for all data rearranging and mapping (interpolation) - mask + mask Determines land/ocean boundaries in the model - mapping + mapping Interpolation of fields between components. ********************* @@ -85,13 +85,13 @@ Files and Directories case root (CASEROOT) The directory where the case is created. Includes namelist files, xml files, and scripts to setup, - build, and run the case. Also, includes logs and timing output files. + build, and run the case. Also, includes logs and timing output files. CIME root (CIMEROOT) The directory where the CIME source code resides - history files - NetCDF files that contain fields associated with the state of the model at a given time slice. + history files + NetCDF files that contain fields associated with the state of the model at a given time slice. initial files Files required to start a file @@ -100,22 +100,22 @@ Files and Directories A time-series of input data files where all the fields in the stream are located in the same data file and all share the same spatial and temporal coordinates. - namelist files + namelist files Each namelist file includes input parameters for a specific component. run directory (RUNDIR) Where the case is run. - restart files + restart files Written and read by each component in the RUNDIR to stop and subsequently restart in a bit-for-bit fashion. - rpointer files + rpointer files Text file written by the coupler in the RUNDIR with a list of necessary files required for model restart. - XML files + XML files Elements and attributes in these files configure a case. (building, running, batch, etc.) These files include env_archive.xml, env_batch.xml, env_build.xml, env_case.xml, env_mach_pes.xml, env_mach_specific.xml, env_run.xml - in CASEROOT and can be queried and modifed using the xmlquery and xmlchange tools. + in CASEROOT and can be queried and modifed using the xmlquery and xmlchange tools. *********** Development @@ -131,7 +131,7 @@ Development one or more source files that are modified by the user. Before building a case, CIME replaces the original source files with these files. - tag + tag A snapshot of the source code. With each consecutive tag (one or more) answer-changing modifications to the source code of a component are introduced. @@ -144,7 +144,7 @@ Testing .. glossary:: - baseline + baseline A set of test cases that is run using a tag which is complete, tested, and has no modifications in the source code. Used to assess the performance/accuracy of a case that is run using a sandbox. @@ -152,13 +152,13 @@ Testing A test that fails in its comparison with a baseline. blessing - Part of the unit testing framework used by CIME scripts regression tests. + Part of the unit testing framework used by CIME scripts regression tests. regression test A test that compares with baseline results to determine if any new errors have been introduced into the code base. - unit testing + unit testing A fast, self-verifying test of a small piece of code. ************* @@ -167,5 +167,5 @@ Miscellaneous .. glossary:: - ESP - External System Processing: handles data assimilation \ No newline at end of file + ESP + External System Processing: handles data assimilation diff --git a/doc/source/index.rst b/doc/source/index.rst index 23edd2b13b1..89cc7155218 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -15,16 +15,14 @@ Table of contents ----------------- .. toctree:: :maxdepth: 2 - + what_cime/index.rst users_guide/index.rst - data_models/index.rst - driver_cpl/index.rst build_cpl/index.rst misc_tools/index.rst -Addendum ---------- +Appendices +---------- .. toctree:: :maxdepth: 2 @@ -43,6 +41,6 @@ Python Module Indices and Search -CIME is developed by the -`E3SM `_ and +CIME is developed by the +`E3SM `_ and `CESM `_ projects. diff --git a/doc/source/misc_tools/ect.rst b/doc/source/misc_tools/ect.rst index 58113747e0a..5a2af9737de 100644 --- a/doc/source/misc_tools/ect.rst +++ b/doc/source/misc_tools/ect.rst @@ -14,16 +14,16 @@ UF-CAM-ECT - detects issues in CAM and CLM (9 time step runs) POP-ECT - detects issues in POP and CICE (12 month runs) The ECT process involves comparing runs generated with -the new scenario ( 3 for CAM-ECT and UF-CAM-ECT, and 1 for POP-ECT) +the new scenario ( 3 for CAM-ECT and UF-CAM-ECT, and 1 for POP-ECT) to an ensemble built on a trusted machine (currently cheyenne). The python ECT tools are located in the pyCECT subdirectory or https://github.com/NCAR/PyCECT/releases. -OR- - -We now provide a web server for CAM-ECT and UF-CAM-ECT, where + +We now provide a web server for CAM-ECT and UF-CAM-ECT, where you can upload the (3) generated runs for comparison to our ensemble. -Please see the webpage at http://www.cesm.ucar.edu/models/cesm2/verification/ +Please see the webpage at http://www.cesm.ucar.edu/models/cesm2/verification/ for further instructions. ----------------------------------- @@ -42,12 +42,12 @@ $CESMDATAROOT/inputdata/validation/pop_ensembles If none of our ensembles are suitable for your needs, then you may create your own ensemble (and summary file) using the following instructions: - -(1) To create a new ensemble, use the ensemble.py script in this directory. + +(1) To create a new ensemble, use the ensemble.py script in this directory. This script creates and compiles a case, then creates clones of the original case, where the initial temperature perturbation is slightly modified for each ensemble member. At this time, cime includes functionality -to create ensembles for CAM-ECT, UF-CAM-ECT, and POP-ECT. +to create ensembles for CAM-ECT, UF-CAM-ECT, and POP-ECT. (2) Use --ect to specify whether ensemble is for CAM or POP. (See 'python ensemble.py -h' for additional details). @@ -73,21 +73,21 @@ POP-ECT: python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/uf_ensemble/ensemble.cesm_tag.000 --mach cheyenne --ensemble 40 --ect pop --project P99999999 -Notes: +Notes: (a) ensemble.py accepts (most of) the argumenets of create_newcase (b) case name must end in ".000" and include the full path (c) ensemble size must be specified, and suggested defaults are listed - above. Note that for CAM-ECT and UF-CAM-ECT, the ensemble size + above. Note that for CAM-ECT and UF-CAM-ECT, the ensemble size needs to be larger than the number of variables that ECT will evaluate. -(5) Once all ensemble simulations have run successfully, copy every cam history -file (*.cam.h0.*) for CAM-ECT and UF-CAM-ECT) or monthly pop history file -(*.pop.h.*) for POP-ECT from each ensemble run directory into a separate directory. +(5) Once all ensemble simulations have run successfully, copy every cam history +file (*.cam.h0.*) for CAM-ECT and UF-CAM-ECT) or monthly pop history file +(*.pop.h.*) for POP-ECT from each ensemble run directory into a separate directory. Next create the ensemble summary using the pyCECT tool pyEnsSum.py (for CAM-ECT and -UF-CAM-ECT) or pyEnsSumPop.py (for POP-ECT). For details see README_pyEnsSum.rst +UF-CAM-ECT) or pyEnsSumPop.py (for POP-ECT). For details see README_pyEnsSum.rst and README_pyEnsSumPop.rst with the pyCECT tools. ------------------- @@ -105,14 +105,14 @@ attributes give this information. (2) For example, for CAM-ECT: python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/camcase.cesm_tag.000 --ect cam --mach cheyenne --project P99999999 ---compset F2000climo --res f19_f19 +--compset F2000climo --res f19_f19 For example, for UF-CAM-ECT: -python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/uf.camcase.cesm_tag.000 --ect cam --uf --mach cheyenne --project P99999999 --compset F2000climo --res f19_f19 +python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/uf.camcase.cesm_tag.000 --ect cam --uf --mach cheyenne --project P99999999 --compset F2000climo --res f19_f19 For example, for POP-ECT: -python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/popcase.cesm_tag.000 --ect pop --mach cheyenne --project P99999999 --compset G --res T62_g17 +python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/popcase.cesm_tag.000 --ect pop --mach cheyenne --project P99999999 --compset G --res T62_g17 (3) Next verify the new simulation(s) with the pyCECT tool pyCECT.py (see README_pyCECT.rst with the pyCECT tools). diff --git a/doc/source/misc_tools/load-balancing-tool.rst b/doc/source/misc_tools/load-balancing-tool.rst index da96f353632..2701b9e1b6f 100644 --- a/doc/source/misc_tools/load-balancing-tool.rst +++ b/doc/source/misc_tools/load-balancing-tool.rst @@ -36,13 +36,13 @@ Also in this documentation is:: 1. set PYTHONPATH to include $CIME_DIR/scripts:$CIME_DIR/tools/load_balancing_tool - + 2. create PE XML file to describe the PE layouts for the timing runs 3. $ ./load_balancing_submit.py --res --compset --pesfile - + 4. ... wait for jobs to run ... - + 5. $ ./load_balancing_solve.py --total-tasks --blocksize 8 @@ -55,7 +55,7 @@ Simulations can be run on a given system by executing the load_balancing_tool.py script, located in cime/tools/load_balancing_tool/load_balancing_tool_submit.py. This creates timing files in the case directory which will be used to solve a mixed integer linear program optimizing the layout. If there is already timing -information available, then a +information available, then a As with the create_newcase and create_test scripts, command line options are used to tailor the simulations for a given model. These values will be @@ -345,7 +345,7 @@ To permanently add to CIME: Testing ******* -To run the provided test suite: +To run the provided test suite: 1. set PYTHONPATH to include CIME libraries:: @@ -377,6 +377,3 @@ To run the provided test suite: $ cd $CIME_DIR/tools/load_balancing_tool $ ./tests/load_balancing_test.py - - - diff --git a/doc/source/users_guide/cime-change-namelist.rst b/doc/source/users_guide/cime-change-namelist.rst index 34bda3feec0..54497898d2b 100644 --- a/doc/source/users_guide/cime-change-namelist.rst +++ b/doc/source/users_guide/cime-change-namelist.rst @@ -195,7 +195,7 @@ Each data model can be runtime-configured with its own namelist. Data Atmosphere (DATM) ~~~~~~~~~~~~~~~~~~~~~~ -DATM is discussed in detail in :ref:`data atmosphere overview `. +DATM is discussed in detail in :ref:`data atmosphere overview ` (**link currently broken**). DATM can be user-customized by changing either its *namelist input files* or its *stream files*. The namelist file for DATM is **datm_in** (or **datm_in_NNN** for multiple instances). @@ -217,7 +217,7 @@ After calling `preview_namelists <../Tools_user/preview_namelists.html>`_ again, Data Ocean (DOCN) ~~~~~~~~~~~~~~~~~~~~~~ -DOCN is discussed in detail in :ref:`data ocean overview `. +DOCN is discussed in detail in :ref:`data ocean overview ` (**link currently broken**). DOCN can be user-customized by changing either its namelist input or its stream files. The namelist file for DOCN is **docn_in** (or **docn_in_NNN** for multiple instances). @@ -239,7 +239,7 @@ After changing this file and calling `preview_namelists <../Tools_user/preview_n Data Sea-ice (DICE) ~~~~~~~~~~~~~~~~~~~~~~ -DICE is discussed in detail in :ref:`data sea-ice overview `. +DICE is discussed in detail in :ref:`data sea-ice overview ` (**link currently broken**). DICE can be user-customized by changing either its namelist input or its stream files. The namelist file for DICE is ``dice_in`` (or ``dice_in_NNN`` for multiple instances) and its values can be changed by editing the ``$CASEROOT`` file ``user_nl_dice`` (or ``user_nl_dice_NNN`` for multiple instances). @@ -256,7 +256,7 @@ The namelist file for DICE is ``dice_in`` (or ``dice_in_NNN`` for multiple insta Data Land (DLND) ~~~~~~~~~~~~~~~~~~~~~~ -DLND is discussed in detail in :ref:`data land overview `. +DLND is discussed in detail in :ref:`data land overview ` (**link currently broken**). DLND can be user-customized by changing either its namelist input or its stream files. The namelist file for DLND is ``dlnd_in`` (or ``dlnd_in_NNN`` for multiple instances) and its values can be changed by editing the ``$CASEROOT`` file ``user_nl_dlnd`` (or ``user_nl_dlnd_NNN`` for multiple instances). @@ -273,7 +273,7 @@ The namelist file for DLND is ``dlnd_in`` (or ``dlnd_in_NNN`` for multiple insta Data River (DROF) ~~~~~~~~~~~~~~~~~~~~~~ -DROF is discussed in detail in :ref:`data river overview `. +DROF is discussed in detail in :ref:`data river overview ` (**link currently broken**). DROF can be user-customized by changing either its namelist input or its stream files. The namelist file for DROF is ``drof_in`` (or ``drof_in_NNN`` for multiple instances) and its values can be changed by editing the ``$CASEROOT`` file ``user_nl_drof`` (or ``user_nl_drof_NNN`` for multiple instances). @@ -298,7 +298,7 @@ CIME calls **$SRCROOT/components/cam/cime_config/buildnml** to generate the CAM' CAM-specific CIME xml variables are set in **$SRCROOT/components/cam/cime_config/config_component.xml** and are used by CAM's **buildnml** script to generate the namelist. -For complete documentation of namelist settings, see `CAM namelist variables `_. +For complete documentation of namelist settings, see `CAM namelist variables `_. To modify CAM namelist settings, add the appropriate keyword/value pair at the end of the **$CASEROOT/user_nl_cam** file. (See the documentation for each file at the top of that file.) @@ -316,7 +316,7 @@ CIME calls **$SRCROOT/components/clm/cime_config/buildnml** to generate the CLM CLM-specific CIME xml variables are set in **$SRCROOT/components/clm/cime_config/config_component.xml** and are used by CLM's **buildnml** script to generate the namelist. -For complete documentation of namelist settings, see `CLM namelist variables `_. +For complete documentation of namelist settings, see `CLM namelist variables `_. To modify CLM namelist settings, add the appropriate keyword/value pair at the end of the **$CASEROOT/user_nl_clm** file. @@ -336,34 +336,32 @@ CICE CIME calls **$SRCROOT/components/cice/cime_config/buildnml** to generate the CICE namelist variables. -For complete documentation of namelist settings, see `CICE namelist variables `_. +For complete documentation of namelist settings, see `CICE namelist variables `_. To modify CICE namelist settings, add the appropriate keyword/value pair at the end of the **$CASEROOT/user_nl_cice** file. (See the documentation for each file at the top of that file.) To see the result of your change, call `preview_namelists <../Tools_user/preview_namelists.html>`_ and verify that the changes appear correctly in **CaseDocs/ice_in**. -In addition, `case.setup <../Tools_user/case.setup.html>`_ creates CICE's compile time `block decomposition variables `_ in **env_build.xml** as follows: +In addition, `case.setup <../Tools_user/case.setup.html>`_ creates CICE's compile time `block decomposition variables `_ in **env_build.xml**. POP2 ~~~~ CIME calls **$SRCROOT/components/pop2/cime_config/buildnml** to generate the POP2 namelist variables. -For complete documentation of namelist settings, see `POP2 namelist variables `_. +For complete documentation of namelist settings, see `POP2 namelist variables `_. To modify POP2 namelist settings, add the appropriate keyword/value pair at the end of the **$CASEROOT/user_nl_pop2** file. (See the documentation for each file at the top of that file.) To see the result of your change, call `preview_namelists <../Tools_user/preview_namelists.html>`_ and verify that the changes appear correctly in **CaseDocs/ocn_in**. -In addition, `case.setup <../Tools_user/case.setup.html>`_ generates POP2's compile-time `block decomposition variables `_ in **env_build.xml** as shown here: - CISM ~~~~ -See `CISM namelist variables `_ for a complete description of the CISM runtime namelist variables. This includes variables that appear both in **cism_in** and in **cism.config**. +See `CISM namelist variables `_ for a complete description of the CISM runtime namelist variables. This includes variables that appear both in **cism_in** and in **cism.config**. To modify any of these settings, add the appropriate keyword/value pair at the end of the **user_nl_cism** file. (See the documentation for each file at the top of that file.) Note that there is no distinction between variables that will appear in **cism_in** and those that will appear in **cism.config**: simply add a new variable setting in **user_nl_cism**, and it will be added to the appropriate place in **cism_in** or **cism.config**. To see the result of your change, call `preview_namelists <../Tools_user/preview_namelists.html>`_ and verify that the changes appear correctly in **CaseDocs/cism_in** and **CaseDocs/cism.config**. -Some CISM runtime settings are sets via **env_run.xml**, as documented in `CISM runtime variables `_. +Some CISM runtime settings are sets via **env_run.xml**, as documented in `CISM runtime variables `_. diff --git a/doc/source/users_guide/cime-config.rst b/doc/source/users_guide/cime-config.rst index 29870414982..f2953f0e9e0 100644 --- a/doc/source/users_guide/cime-config.rst +++ b/doc/source/users_guide/cime-config.rst @@ -53,7 +53,20 @@ CIME recognizes a user-created custom configuration directory, ``$HOME/.cime``. For an example of a **config_machines.xml** file for a linux cluster, look at **$CIMEROOT/config/xml_schemas/config_machines_template.xml**. -* ``config_compilers.xml`` +* ``cmake_macros`` + + This subdirectory contains a hierarchy of cmake macros files which + are used to generate the flags to be used in the compilation of a + case. The cmake macro files are examined in the following order, with later files takeing precidence over earlier ones. + + * universal.cmake + * *COMPILER*.cmake + * *OS*.cmake + * *MACHINE*.cmake + * *COMPILER*_*OS*.cmake + * *COMPILER*_*MACHINE*.cmake + +* ``config_compilers.xml`` **DEPRECATED use cmake_macros** This file permits you to customize compiler settings for your machine and is appended to the file **$CIMEROOT/config/$model/machines/config_compilers.xml**. diff --git a/doc/source/users_guide/cime-customize.rst b/doc/source/users_guide/cime-customize.rst new file mode 100644 index 00000000000..6431f5c388a --- /dev/null +++ b/doc/source/users_guide/cime-customize.rst @@ -0,0 +1,76 @@ +.. _customizing-cime: + +=========================== +CIME config and hooks +=========================== + +CIME provides the ability to define model specific config and hooks. + +The config alters CIME's runtime and the hooks are triggered during their event. + +----------------------------------- +How does CIME load customizations? +----------------------------------- + +CIME will search ``cime_config/customize`` and load any python found under this directory or it's children. + +Any variables, functions or classes loaded are available from the ``CIME.customize`` module. + +--------------------------- +CIME config +--------------------------- + +Available config and descriptions. + +================================= ======================= ===== ================================================================================================================================================================================================================================ +Variable Default Type Description +================================= ======================= ===== ================================================================================================================================================================================================================================ +additional_archive_components ('drv', 'dart') tuple Additional components to archive. +allow_unsupported True bool If set to `True` then unsupported compsets and resolutions are allowed. +baseline_store_teststatus True bool If set to `True` and GENERATE_BASELINE is set then a teststatus.log is created in the case's baseline. +build_cime_component_lib True bool If set to `True` then `Filepath`, `CIME_cppdefs` and `CCSM_cppdefs` directories are copied from CASEBUILD directory to BUILDROOT in order to build CIME's internal components. +build_model_use_cmake False bool If set to `True` the model is built using using CMake otherwise Make is used. +calculate_mode_build_cost False bool If set to `True` then the TestScheduler will set the number of processors for building the model to min(16, (($GMAKE_J * 2) / 3) + 1) otherwise it's set to 4. +case_setup_generate_namelist False bool If set to `True` and case is a test then namelists are created during `case.setup`. +check_invalid_args True bool If set to `True` then script arguments are checked for being valid. +check_machine_name_from_test_name True bool If set to `True` then the TestScheduler will use testlists to parse for a list of tests. +common_sharedlibroot True bool If set to `True` then SHAREDLIBROOT is set for the case and SystemTests will only build the shared libs once. +copy_cesm_tools True bool If set to `True` then CESM specific tools are copied into the case directory. +copy_cism_source_mods True bool If set to `True` then `$CASEROOT/SourceMods/src.cism/source_cism` is created and a README is written to directory. +copy_e3sm_tools False bool If set to `True` then E3SM specific tools are copied into the case directory. +create_bless_log False bool If set to `True` and comparing test to baselines the most recent bless is added to comments. +create_test_flag_mode cesm str Sets the flag mode for the `create_test` script. When set to `cesm`, the `-c` flag will compare baselines against a give directory. +default_short_term_archiving True bool If set to `True` and the case is not a test then DOUT_S is set to True and TIMER_LEVEL is set to 4. +driver_choices ('mct', 'nuopc') tuple Sets the available driver choices for the model. +driver_default nuopc str Sets the default driver for the model. +enable_smp True bool If set to `True` then `SMP=` is added to model compile command. +make_case_run_batch_script False bool If set to `True` and case is not a test then `case.run.sh` is created in case directory from `$MACHDIR/template.case.run.sh`. +mct_path {srcroot}/libraries/mct str Sets the path to the mct library. +serialize_sharedlib_builds True bool If set to `True` then the TestScheduler will use `proc_pool + 1` processors to build shared libraries otherwise a single processor is used. +set_comp_root_dir_cpl True bool If set to `True` then COMP_ROOT_DIR_CPL is set for the case. +share_exes False bool If set to `True` then the TestScheduler will share exes between tests. +shared_clm_component True bool If set to `True` and then the `clm` land component is built as a shared lib. +sort_tests False bool If set to `True` then the TestScheduler will sort tests by runtime. +test_custom_project_machine melvin str Sets the machine name to use when testing a machine with no PROJECT. +test_mode cesm str Sets the testing mode, this changes various configuration for CIME's unit and system tests. +ufs_alternative_config False bool If set to `True` and UFS_DRIVER is set to `nems` then model config dir is set to `$CIMEROOT/../src/model/NEMS/cime/cime_config`. +use_kokkos False bool If set to `True` and CAM_TARGET is `preqx_kokkos`, `theta-l` or `theta-l_kokkos` then kokkos is built with the shared libs. +use_nems_comp_root_dir False bool If set to `True` then COMP_ROOT_DIR_CPL is set using UFS_DRIVER if defined. +use_testreporter_template True bool If set to `True` then the TestScheduler will create `testreporter` in $CIME_OUTPUT_ROOT. +verbose_run_phase False bool If set to `True` then after a SystemTests successful run phase the elapsed time is recorded to BASELINE_ROOT, on a failure the test is checked against the previous run and potential breaking merges are listed in the testlog. +xml_component_key COMP_ROOT_DIR_{} str The string template used as the key to query the XML system to find a components root directory e.g. the template `COMP_ROOT_DIR_{}` and component `LND` becomes `COMP_ROOT_DIR_LND`. +================================= ======================= ===== ================================================================================================================================================================================================================================ + +--------------------------- +CIME hooks +--------------------------- + +Available hooks and descriptions. + +======================================= ================================= +Function Description +======================================= ================================= +``save_build_provenance(case, lid)`` Called after the model is built. +``save_prerun_provenance(case, lid)`` Called before the model is run. +``save_postrun_provenance(case, lid)`` Called after the model is run. +======================================= ================================= diff --git a/doc/source/users_guide/cime-dir.rst b/doc/source/users_guide/cime-dir.rst index 15643de5f05..9bdb6540a63 100644 --- a/doc/source/users_guide/cime-dir.rst +++ b/doc/source/users_guide/cime-dir.rst @@ -25,25 +25,22 @@ CIME's content is split into several subdirectories. Users should start in the * ========================== ================================================================== Directory or Filename Description ========================== ================================================================== + **CIME/** **The main CIME source** + CIME/ParamGen Python tool for generating runtime params + CIME/Servers Scripts to interact with input data servers + CIME/SystemTests Scripts for create_test tests. + CIME/Tools Auxillary tools, scripts and functions. CMakeLists.txt For building with CMake + CONTRIBUTING.md Guide for contributing to CIME ChangeLog Developer-maintained record of changes to CIME ChangeLog_template Template for an entry in ChangeLog LICENSE.TXT The CIME license - README Brief intro to CIME + MANIFEST.in README.md README in markdown language - README.unit_testing Instructions for running unit tests with CIME - **config/** **Shared and model-specific configuration files** - config/cesm/ CESM-specific configuration options - config/e3sm/ E3SM-specific configuration options + conftest.py + doc Documentation for CIME in rst format + docker Container for CIME testing **scripts/** **The CIME user interface** - scripts/lib/ Infrastructure source code for CIME scripts and functions - scripts/Tools/ Auxiliary tools; scripts and functions - **src/** **Model source code provided by CIME** - src/components/ CIME-provided components including data and stub models - src/drivers/ CIME-provided main driver for a climate model - src/externals/ Software provided with CIME for building a climate model - src/share/ Model source code provided by CIME and used by multiple components - **tests/** **Tests** **tools/** **Standalone climate modeling tools** utils/ Some Perl source code needed by some prognostic components ========================== ================================================================== diff --git a/doc/source/users_guide/cime-internals.rst b/doc/source/users_guide/cime-internals.rst index 17d8e5633d1..3f31dd7cac6 100644 --- a/doc/source/users_guide/cime-internals.rst +++ b/doc/source/users_guide/cime-internals.rst @@ -48,4 +48,3 @@ The file **$CIMEROOT/config/[cesm,e3sm]/config_files.xml** contains all model-sp user-mods directories: - diff --git a/doc/source/users_guide/components.rst b/doc/source/users_guide/components.rst new file mode 100644 index 00000000000..3b48da0c4cc --- /dev/null +++ b/doc/source/users_guide/components.rst @@ -0,0 +1,62 @@ +.. _components: + +========== +Components +========== + +A single component in the smallest unit within a model. Multiple components make up a component set. + +Configuration +-------------- + +The configuration for a component can be found under `cime_config` in the component directory. + +Example contents of a components `config_component.xml`. + +:: + + + + + + + + Stub atm component + + + + char + satm + satm + case_comp + env_case.xml + Name of atmosphere component + + + + + ========================================= + SATM naming conventions in compset name + ========================================= + + + + +Triggering a rebuild +-------------------- + +It's the responsibility of a component to define which settings will require a component to be rebuilt. + +These triggers can be defined as follows. + +:: + + + char + NTASKS,NTHREADS,NINST + rebuild_triggers + env_build.xml + Settings that will trigger a rebuild + + +If a user was to change `NTASKS`, `NTHREADS`, or `NINST` in a case using the component, then a rebuild would be required before the case could be submitted again. diff --git a/doc/source/users_guide/create-a-case.rst b/doc/source/users_guide/create-a-case.rst index 71b39b5cbd8..3a74a0c87c2 100644 --- a/doc/source/users_guide/create-a-case.rst +++ b/doc/source/users_guide/create-a-case.rst @@ -63,7 +63,7 @@ In the argument to ``--case``, the case name is taken from the string after the The output from create_newcase includes information such as. - The compset longname is ``2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV`` -- The model resolution is ``a%0.9x1.25_l%0.9x1.25_oi%gx1v6_r%r05_m%gx1v6_g%null_w%null`` +- The grid set is ``a%0.9x1.25_l%0.9x1.25_oi%gx1v6_r%r05_m%gx1v6_g%null_w%null`` `create_newcase <../Tools_user/create_newcase.html>`_ installs files in ``$CASEROOT`` that will build and run the model and to optionally archive the case on the target platform. @@ -74,23 +74,26 @@ Running `create_newcase <../Tools_user/create_newcase.html>`_ creates the follo - `case.build <../Tools_user/case.build.html>`_ Script to build component and utility libraries and model executable. +- `case.cmpgen_namelist <../Tools_user/case.submit.html>`_ + Script to perform namelist baseline operations (compare, generate, or both)." + +- case.qstatus + Script to query the queue on any queue system. + - `case.setup <../Tools_user/case.setup.html>`_ Script used to set up the case (create the case.run script, Macros file and user_nl_xxx files). -- `case.st_archive <../Tools_user/case.st_archive.html>`_ - Script to perform short term archiving to disk for your case output. Note that this script is run automatically by the normal CIME workflow. - - `case.submit <../Tools_user/case.submit.html>`_ Script to submit the case to run using the machine's batch queueing system. -- `case.cmpgen_namelist <../Tools_user/case.submit.html>`_ - Script to perform namelist baseline operations (compare, generate, or both)." +- `check_case <../Tools_user/check_case.html>`_ + Script to verify case is set up correctly. -- `xmlchange <../Tools_user/xmlchange.html>`_ - Script to modify values in the xml files. +- `check_input_data <../Tools_user/check_input_data.html>`_ + Script for checking for various input data sets and moving them into place. -- `xmlquery <../Tools_user/xmlquery.html>`_ - Script to query values in the xml files. +- `pelayout <../Tools_user/pelayout.html>`_ + Script to query and modify the NTASKS, ROOTPE, and NTHRDS for each component model. - `preview_namelists <../Tools_user/preview_namelists.html>`_ Script for users to see their component namelists in ``$CASEROOT/CaseDocs`` before running the model. @@ -98,15 +101,12 @@ Running `create_newcase <../Tools_user/create_newcase.html>`_ creates the follo - `preview_run <../Tools_user/preview_run.html>`_ Script for users to see batch submit and mpirun command." -- `check_input_data <../Tools_user/check_input_data.html>`_ - Script for checking for various input data sets and moving them into place. +- `xmlchange <../Tools_user/xmlchange.html>`_ + Script to modify values in the xml files. -- `check_case <../Tools_user/check_case.html>`_ - Script to verify case is set up correctly. +- `xmlquery <../Tools_user/xmlquery.html>`_ + Script to query values in the xml files. -- `pelayout <../Tools_user/pelayout.html>`_ - Script to query and modify the NTASKS, ROOTPE, and NTHRDS for each component model. - This a convenience script that can be used in place of `xmlchange <../Tools_user/xmlchange.html>`_ and `xmlquery <../Tools_user/xmlquery.html>`_. **XML Files** @@ -114,24 +114,30 @@ Running `create_newcase <../Tools_user/create_newcase.html>`_ creates the follo Defines patterns of files to be sent to the short-term archive. You can edit this file at any time. You **CANNOT** use `xmlchange <../Tools_user/xmlchange.html>`_ to modify variables in this file." -- env_mach_specific.xml - Sets a number of machine-specific environment variables for building and/or running. - You **CANNOT** use `xmlchange <../Tools_user/xmlchange.html>`_ to modify variables in this file. +- env_batch.xml + Sets batch system settings such as wallclock time and queue name." - env_build.xml Sets model build settings. This includes component resolutions and component compile-time configuration options. You must run the case.build command after changing this file. -- env_run.xml - Sets runtime settings such as length of run, frequency of restarts, output of coupler diagnostics, and short-term and long-term archiving. - This file can be edited at any time before a job starts. +- env_case.xml + Parameters set by create_newcase - env_mach_pes.xml Sets component machine-specific processor layout (see changing pe layout ). The settings in this are critical to a well-load-balanced simulation (see :ref:`load balancing `). -- env_batch.xml - Sets batch system settings such as wallclock time and queue name." +- env_mach_specific.xml + Sets a number of machine-specific environment variables for building and/or running. + You **CANNOT** use `xmlchange <../Tools_user/xmlchange.html>`_ to modify variables in this file. + +- env_run.xml + Sets runtime settings such as length of run, frequency of restarts, output of coupler diagnostics, and short-term and long-term archiving. + This file can be edited at any time before a job starts. + +- env_workflow.xml + Sets paramateres for the runtime workflow. **User Source Mods Directory** @@ -207,6 +213,17 @@ As an example, the directory could contain the following files: :: > shell_commands (this would contain ./xmlchange commands) > SourceMods/src.cam/dyncomp.F90 +It is important to note that the file containing the **xmlchange** +commands must be named ``shell_commands`` in order for it to be recognised +and run upon case creation. + +The structure of the component directories do not need to be the +same as in the component source code. As an example, should the user +want to modify the ``src/dynamics/eul/dyncomp.F90`` file within the +CAM source code, the modified file should be put into the directory +``SourceMods/src.cam`` directly. There is no need to mimic the source +code structure, such as ``SourceMods/src.cam/dynamics/eul``. + When the user calls **create_newcase** with the ``--user-mods-dir`` pointing to the full pathname of the directory containing these changes, then the ``CASEROOT`` will be created with these changes applied. diff --git a/doc/source/users_guide/grids.rst b/doc/source/users_guide/grids.rst index a5fe82a3e88..1ebe44f8171 100644 --- a/doc/source/users_guide/grids.rst +++ b/doc/source/users_guide/grids.rst @@ -94,7 +94,7 @@ The steps for adding a new component grid to the model system follow. This proce If you are introducing just one new grid, you can leverage SCRIP grid files that are already in place for the other components. There is no supported functionality for creating the SCRIP format file. -2. Build the **check_map** utility by following the instructions in **$CCSMROOT/mapping/check_maps/INSTALL**. Also confirm that the `ESMF `_ toolkit is installed on your machine. +2. Build the **check_map** utility by following the instructions in **$CIMEROOT/tools/mapping/check_maps/INSTALL**. Also confirm that the ESMF toolkit is installed on your machine. When you add new user-defined grid files, you also need to generate a set of mapping files so the coupler can send data from a component on one grid to a component on another grid. There is an ESMF tool that tests the mapping file by comparing a mapping of a smooth function to its true value on the destination grid. @@ -113,14 +113,14 @@ The steps for adding a new component grid to the model system follow. This proce Using the SCRIP grid files from Step 1, generate a set of conservative (area-averaged) and non-conservative (patch and bilinear) mapping files. - You can do this by calling **gen_cesm_maps.sh** in ``$CCSMROOT/tools/mapping/gen_mapping_files/``. + You can do this by calling **gen_cesm_maps.sh** in ``$CIMEROOT/tools/mapping/gen_mapping_files/``. This script generates all the mapping files needed except ``rof -> ocn``, which is discussed below. This script uses the ESMF offline weight generation utility, which you must build *prior* to running **gen_cesm_maps.sh**. The **README** file in the **gen_mapping_files/** directory describes how to run **gen_cesm_maps.sh**. The basic usage is shown here: :: - > cd $CCSMROOT/mapping/gen_mapping_files + > cd $CIMEROOT/tools/mapping/gen_mapping_files > ./gen_cesm_maps.sh \ --fileocn \ --fileatm \ @@ -150,12 +150,12 @@ The steps for adding a new component grid to the model system follow. This proce If you also omit the runoff grid, then only the 5 atm<->ocn maps will be generated. .. note:: ESMF_RegridWeightGen runs in parallel, and the ``gen_cesm_maps.sh`` script has been written to run on yellowstone. - To run on any other machine, you may need to add some environment variables to ``$CCSMROOT/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh`` -- search for hostname to see where to edit the file. + To run on any other machine, you may need to add some environment variables to ``$CIMEROOT/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh`` -- search for hostname to see where to edit the file. 4. Generate atmosphere, land and ocean / ice domain files. Using the conservative ocean to land and ocean to atmosphere mapping files created in the previous step, you can create domain files for the atmosphere, land, and ocean; these are basically grid files with consistent masks and fractions. - You make these files by calling **gen_domain** in **$CCSMROOT/mapping/gen_domain_files**. + You make these files by calling **gen_domain** in **$CIMEROOT/tools/mapping/gen_domain_files**. The **INSTALL** file in the **gen_domain_files/** directory describes how to build the **gen_domain** executable. The **README** file in the same directory explains how to use the tool. The basic usage is: :: @@ -184,14 +184,14 @@ The steps for adding a new component grid to the model system follow. This proce You need to first generate mapping files for CLM surface dataset (since this is a non-standard grid). :: - > cd $CCSMROOT/models/lnd/clm/tools/mkmapdata + > cd $CIMEROOT/../components/clm/tools/mkmapdata > ./mkmapdata.sh --gridfile --res --gridtype global These mapping files are then used to generate CLM surface dataset. Below is an example for a current day surface dataset (model year 2000). :: - > cd $CCSMROOT/models/lnd/clm/tools/mksurfdata_map + > cd $CIMEROOT/../components/clm/tools/mksurfdata_map > ./mksurfdata.pl -res usrspec -usr_gname -usr_gdate yymmdd -y 2000 7. Create grid file needed for create_newcase. diff --git a/doc/source/users_guide/index.rst b/doc/source/users_guide/index.rst index f86173b5795..376518783b4 100644 --- a/doc/source/users_guide/index.rst +++ b/doc/source/users_guide/index.rst @@ -6,7 +6,7 @@ .. _users-guide1: ####################################### -Case Control System Part 1: Basic Usage +Using the Case Control System ####################################### .. toctree:: @@ -21,12 +21,14 @@ Case Control System Part 1: Basic Usage cloning-a-case.rst cime-change-namelist.rst cime-config.rst + cime-customize.rst + testing.rst troubleshooting.rst .. _users-guide2: ####################################################################################### -Case Control System Part 2: Configuration, Porting, Testing and Use Cases +Configuring the Case Control System ####################################################################################### .. toctree:: @@ -34,13 +36,12 @@ Case Control System Part 2: Configuration, Porting, Testing and Use Cases :numbered: cime-internals.rst + components.rst compsets.rst grids.rst machine.rst pes-threads.rst porting-cime.rst - timers.rst - testing.rst unit_testing.rst multi-instance.rst workflows.rst diff --git a/doc/source/users_guide/introduction-and-overview.rst b/doc/source/users_guide/introduction-and-overview.rst index 2371ef0fddc..4dbf673f2bf 100644 --- a/doc/source/users_guide/introduction-and-overview.rst +++ b/doc/source/users_guide/introduction-and-overview.rst @@ -26,7 +26,7 @@ Other prerequisites: CIME's commands are Python scripts and require a correct version of the Python interpreter to be installed. The Python version must be -greater than 2.7. Determine which version you have +greater than 2.11. Determine which version you have like this: :: @@ -37,7 +37,7 @@ Consult your local documentation if you need to update your python version. Key Terms and concepts ====================== -The following key terms and concepts are ingrained in CIME and used frequently in this documentation. +The following key terms and concepts are ingrained in the CCS and used frequently in this documentation. See the :ref:`glossary` for a more complete list of terms. **components** @@ -63,26 +63,61 @@ See the :ref:`glossary` for a more complete list of terms. *data*: For some climate problems, it is necessary to reduce feedbacks within the system by replacing an active model with a version that sends and receives the same variables to and from other models, but with the values read from files rather than computed from the equations. The values received are ignored. These active-model substitutes are called *data models*. - CIME provides data models for each of the possible components. You could add your own data model implementation of a component - but as for active models only one at a time can be used. - *stub*: For some configurations, no data model is needed, so CIME provides *stub* versions that simply occupy the - required place in the driver and do not send or receive any data. + *stub*: For some configurations, no data model is needed and one instead uses a *stub* version that simply occupies the + required place in the driver and does not send or receive any data. For example, if you are setting up an aqua-planet case + you would only need a stub for the land model. -**component set** or **compset**: The particular combination of active, data and stub versions of the 7 components is referred to +**component set** or **compset**: + + The particular combination of active, data and stub versions of the 7 components is referred to as a *component set* or *compset*. The Case Control System allows one to define - several possible compsets and configure and run them on supported platforms. See :ref:`Component Sets` for more information. + several possible compsets and configure and run them on supported platforms. + Here is an example of a component set *longname* from E3SM for a fully coupled active case: +:: + + 1850SOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI_MPASO_MOSART_SGLC_SWAV + +See :ref:`Component Sets` for more information. "GLC" originally meant "glacier model" and is now an ice-sheet model but the GLC letters have stuck. + +**compset alias**: + + Typing a compset longname like the above can be exhausting so the CCS allows defining a shorter *compset alias* + which is a short string that substitutes for the longname. In E3SM, the above longname can be reffered to as "WCYCL1850". + +.. note:: + + Long ago, CESM established a convention for the first letter in a compset alias based + on the combination of active, data and stub components. + If you see mention of "B-case" or "F-case", it comes from these conventions. + They pre-date the introduction of a wave model as an option. + + === ======================================================================================== + A All data models + B All models fully active with stub glc + C Active ocean with data atm, river and sea ice. stub lnd, glc + D Active sea ice with data atm, ocean (slab) and river. stub lnd, glc + E Active atm, lnd, sea-ice, river, data ocean (slab), stub glc + F Active atm, lnd, river, sea-ice (thermodynamics only), data ocean (fixed SST), stub glc + G Active ocean and sea ice, data atmosphere and river, stub lnd and glc + H Active ocean and sea ice, data atmosphere, stub lnd, river and glc + I Active land and river model, data atmosphere, stub ocn, sea-ice, glc + IG Active land, river and ice-sheet, data atmosphere, stub ocn, sea-ice + S All stub models (for testing only) + X All x-compsets (2D sine waves for each component except stub glc; for testing only) + === ======================================================================================== +.. +**grid set**: -**grid** or **model grid**: Each active model must solve its equations on a numerical grid. CIME allows models within the system to have - different grids. The resulting set of all numerical grids is called the *model grid* or sometimes just the *grid*, where - *grid* is a unique name that denotes a set of numerical grids. Sometimes the *resolution* also refers to a specific set - of grids. + different grids. The resulting set of all numerical grids is called the *grid set* or usually just the *grid*. Like + the compset longnamme, the CCS allows one to define an alias to represent a grid set. This alias is also referred to + as the *grid* or sometimes the *resolution*. **machine and compilers**: The *machine* is the computer you are using to run CIME and build and run the climate model. It could be a workstation or a national supercomputer. The exact name of *machine* is typically the UNIX hostname but it could be any string. A machine - may have one more more versions of Fortran, C and C++ *compilers* that are needed to compile the model's source code and CIME + may have one more more versions of Fortran, C and C++ *compilers* that are needed to compile the model's source code and CIME. **case**: To build and execute a CIME-enabled climate model, you have to make choices of compset, model grid, @@ -95,10 +130,10 @@ See the :ref:`glossary` for a more complete list of terms. the model source code and version-controlled together, its possible to match supported out-of-the-box cases with specific versions of the model source code, promoting reproducibility and provenance. An out-of-the-box case is also called a *base case* -CIME and your environment +CCS and your environment ========================= -Before using any CIME commands, set the ``CIME_MODEL`` environment variable. In bash, use **export** as shown and replace +Before using any CCS commands, set the ``CIME_MODEL`` environment variable. In bash, use **export** as shown and replace **** with the appropriate text. Current possibilities are "e3sm" or "cesm." :: @@ -139,17 +174,14 @@ After you submit the case, you can follow the progress of your run by monitoring Repeat the command until you see the message ``case.run success``. -Discovering available cases with **query_config** +Discovering available pre-defined compsets with **query_config** ================================================= -Your CIME-driven model has many more possible cases besides the simple one in the above Quick Start. +Your CIME-driven model likely has many compset and gridset aliases defined for cases that are widely used by the +model developers. Use the utility `query_config <../Tools_user/query_config.html>`_ to see which out-of-the-box compsets, components, grids and machines are available for your model. -If CIME is downloaded in standalone mode, only standalone CIME compsets can be queried. - -If CIME is part of a CIME-driven model, `query_config <../Tools_user/query_config.html>`_ will allow you to query all prognostic component compsets. - To see lists of available compsets, components, grids and machines, look at the **help** text:: > query_config --help diff --git a/doc/source/users_guide/machine.rst b/doc/source/users_guide/machine.rst index 7be970b5e7b..b349c61f6b0 100644 --- a/doc/source/users_guide/machine.rst +++ b/doc/source/users_guide/machine.rst @@ -57,9 +57,35 @@ Each ```` tag requires the following input: * May have optional attributes of ``compiler``, ``mpilib`` and/or ``threaded`` * May have an optional ```` element which in turn contains one or more ```` elements. These specify the arguments to the mpi executable and are dependent on your mpi library implementation. - * May have an option ```` element which overrides the ``default_run_exe`` - * May have an option ```` element which overrides the ``default_run_misc_suffix`` + * May have an optional ```` element which overrides the ``default_run_exe`` + * May have an optional ```` element which overrides the ``default_run_misc_suffix`` + * May have an optional ```` element which controls how CIME generates arguments when ```` contains ``aprun``. + The ```` element can be one of the following. The default value is ``ignore``. + + * ``ignore`` will cause CIME to ignore it's aprun module and join the values found in ````. + * ``default`` will use CIME's aprun module to generate arguments. + * ``override`` behaves the same as ``default`` expect it will use ```` to mutate the generated arguments. When using this mode a ``position`` attribute can be placed on ```` tags to specify how it's used. + + The ``position`` attribute on ```` can take one of the following values. The default value is ``per``. + + * ``global`` causes the value of the ```` element to be used as a global argument for ``aprun``. + * ``per`` causes the value of the ```` element to be appended to each separate binaries arguments. + + Example using ``override``: + :: + + aprun + override + + -e DEBUG=true + -j 20 + + + Sample command output: + :: + + aprun -e DEBUG=true ... -j 20 e3sm.exe : ... -j 20 e3sm.exe * ``module_system``: How and what modules to load on this system. Module systems allow you to easily load multiple compiler environments on a machine. CIME provides support for two types of module tools: `module `_ and `soft `_. If neither of these is available on your machine, simply set ````. @@ -104,7 +130,7 @@ The **config_batch.xml** schema is defined in **$CIMEROOT/config/xml_schemas/con CIME supports these batch systems: pbs, cobalt, lsf and slurm. -As is the case for **config_compilers.xml**, the entries in **config_batch.xml** are hierarchical. +The entries in **config_batch.xml** are hierarchical. #. General configurations for each system are provided at the top of the file. @@ -132,24 +158,22 @@ In addition, there is **case.test** job that is used by the CIME system test wor Compiler settings ----------------- -CIME looks at the xml element ``COMPILERS_SPEC_FILE`` in the **config_files.xml** file to identify supported out-of-the-box compiler details for the target model. The node has the following contents: +CIME looks at the xml element ``CMAKE_MACROS_DIR`` in the **config_files.xml** file to identify supported out-of-the-box compiler details for the target model. The node has the following contents: :: - + char - $CIMEROOT/cime_config/$MODEL/machines/config_compilers.xml + $CIMEROOT/config/$MODEL/machines/cmake_macros case_last env_case.xml - file containing compiler specifications for target model primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/cime_config/xml_schemas/config_compilers_v2.xsd + Directory containing cmake macros (for documentation only - DO NOT EDIT) -Additional compilers are made avilable by adding entries to the files pointed to by COMPILERS_SPEC_FILE or to a config_compilers.xml file -in your CIME config directory. +Additional compilers are made avilable by adding cmake macros files to the directory pointed to by CMAKE_MACROS_DIR or to your $HOME/.cime directory. .. _compilerfile: -config_compilers.xml - compiler paths and options +config_compilers.xml - compiler paths and options **DEPRECATED use cmake_macros** ------------------------------------------------- The **config_compilers.xml** file defines compiler flags for building CIME (and also CESM and E3SM prognostic CIME-driven components). diff --git a/doc/source/users_guide/porting-cime.rst b/doc/source/users_guide/porting-cime.rst index d1f94df1364..1e9236a646b 100644 --- a/doc/source/users_guide/porting-cime.rst +++ b/doc/source/users_guide/porting-cime.rst @@ -105,8 +105,8 @@ In what follows we outline the process for method (2) above: xmllint --noout --schema $CIME/config/xml_schemas/config_machines.xsd $HOME/.cime/config_machines.xml -- If you find that you need to introduce compiler settings specific to your machine, create a **$HOME/.cime/config_compilers.xml** file. - The default compiler settings are defined in **$CIME/config/$model/machines/config_compilers.xml**. There is no template for **config_compilers.xml**. +- If you find that you need to introduce compiler settings specific to your machine, create a **$HOME/.cime/*.cmake** file. + The default compiler settings are defined in **$CIME/config/$model/machines/cmake_macros/**. - If you have a batch system, you may also need to create a **$HOME/.cime/config_batch.xml** file. Out-of-the-box batch settings are set in **$CIME/config/$model/machines/config_batch.xml**. diff --git a/doc/source/users_guide/running-a-case.rst b/doc/source/users_guide/running-a-case.rst index cad2a52032a..b8366775716 100644 --- a/doc/source/users_guide/running-a-case.rst +++ b/doc/source/users_guide/running-a-case.rst @@ -321,7 +321,7 @@ The case initialization type is set using the ``$RUN_TYPE`` variable in branch runs. To set up a branch run, locate the restart tar file or restart directory for ``$RUN_REFCASE`` and ``$RUN_REFDATE`` from a previous run, then place those files in the ``$RUNDIR`` directory. - See :ref:`setting up a branch run`. + See :ref:`Starting from a reference case`. ``hybrid`` @@ -360,7 +360,7 @@ Starting from a reference case (REFCASE) ---------------------------------------- There are several xml variables that control how either a branch or a hybrid case can start up from another case. -The initial/restart files needed to start up a run from another case are required to be in $EXEROOT. +The initial/restart files needed to start up a run from another case are required to be in ``$RUNDIR``. The xml variable ``$GET_REFCASE`` is a flag that if set will automatically prestaging the refcase restart data. - If ``$GET_REFCASE`` is ``TRUE``, then the the values set by ``$RUN_REFDIR``, ``$RUN_REFCASE``, ``$RUN_REFDATE`` and ``$RUN_TOD`` are @@ -375,7 +375,7 @@ The xml variable ``$GET_REFCASE`` is a flag that if set will automatically prest - If ``$RUN_REFDIR`` is a relative pathname AND is not available in ``$DIN_LOC_ROOT`` then CIME will attempt to download the data from the input data repositories. -- If ``$GET_REFCASE`` is ``FALSE`` then the data is assumed to already exist in ``$EXEROOT``. +- If ``$GET_REFCASE`` is ``FALSE`` then the data is assumed to already exist in ``$RUNDIR``. .. _controlling-output-data: @@ -459,10 +459,10 @@ files as well as the time evolution of the model. Runs that are initialized as branch or hybrid runs require restart/initial files from previous model runs (as specified by the -variables ``$RUN_REFCASE`` and ``$RUN_REFDATE``). Pre-stage these -iles to the case ``$RUNDIR`` (normally ``$EXEROOT/run``) -before the model run starts. Normally this is done by copying the contents -of the relevant **$RUN_REFCASE/rest/$RUN_REFDATE.00000** directory. +variables ``$RUN_REFCASE`` and ``$RUN_REFDATE``). Pre-stage these files +to the case ``$RUNDIR`` (normally ``$EXEROOT/../run``) before the model +run starts. Normally this is done by copying the contents of the +relevant **$RUN_REFCASE/rest/$RUN_REFDATE.00000** directory. Whenever a component writes a restart file, it also writes a restart pointer file in the format **rpointer.$component**. Upon a restart, each @@ -484,8 +484,7 @@ for a component set using full active model components. If short-term archiving is turned on, the model archives the component restart data sets and pointer files into **$DOUT_S_ROOT/rest/yyyy-mm-dd-sssss**, where yyyy-mm-dd-sssss is the -model date at the time of the restart. (See `below for more details -`_.) +model date at the time of the restart. (See below for more details.) --------------------------------- Backing up to a previous restart @@ -526,7 +525,7 @@ Short-term archiving If short-term archiving is enabled, component output files are moved to the short-term archiving area on local disk, as specified by -``$DOUT_S_ROOT``. The directory normally is **$EXEROOT/../archive/$CASE.** +``$DOUT_S_ROOT``. The directory normally is **$EXEROOT/../../archive/$CASE.** and has the following directory structure: :: rest/yyyy-mm-dd-sssss/ @@ -604,7 +603,7 @@ as an external shell script. :: - #!/usr/bin/env python + #!/usr/bin/env python3 import sys from CIME.case import Case diff --git a/doc/source/users_guide/setting-up-a-case.rst b/doc/source/users_guide/setting-up-a-case.rst index aea931bf54a..2ffc473fafd 100644 --- a/doc/source/users_guide/setting-up-a-case.rst +++ b/doc/source/users_guide/setting-up-a-case.rst @@ -16,22 +16,39 @@ After creating a case or changing aspects of a case, such as the pe-layout, call This creates the following additional files and directories in ``$CASEROOT``: ============================= =============================================================================================================================== - .case.run A (hidden) file with the commands that will be used to run the model (such as “mpirunâ€) and any batch directives needed. - The directive values are generated using the contents - of **env_mach_pes.xml**. Running `case.setup --clean <../Tools_user/case.setup.html>`_ will remove this file. - This file should not be edited directly and instead controlled through XML variables in **env_batch.xml**. It should also - *never* be run directly. + .case.run A (hidden) file with the commands that will be used to run the model (such as “mpirunâ€) + + and any batch directives needed. The directive values are generated using the contents + + of **env_mach_pes.xml**. Running `case.setup --clean <../Tools_user/case.setup.html>`_ + + will remove this file. This file should not be edited directly and instead controlled + + through XML variables in **env_batch.xml**. It should also *never* be run directly. + + CaseStatus File containing a list of operations done in the current case. + + case.st_archive Script to perform short term archiving to disk for your case + + output. Note that this script is run automatically by the normal CIME workflow. + + Depends.* Lists of source code files that needs special build options. + + Macros.cmake File containing machine-specific makefile directives for your target platform/compiler. - Macros.make File containing machine-specific makefile directives for your target platform/compiler. This file is created if it does not already exist. The user can modify the file to change certain aspects of the build, such as compiler flags. + Running `case.setup --clean <../Tools_user/case.setup.html>`_ will not remove the file once it has been created. - However. if you remove or rename the Macros.make file, running `case.setup <../Tools_user/case.setup.html>`_ recreates it. + However. if you remove or rename the Macros.make file, running + + `case.setup <../Tools_user/case.setup.html>`_ recreates it. user_nl_xxx[_NNNN] Files where all user modifications to component namelists are made. **xxx** is any one of the set of components targeted for the case. + For example, for a full active CESM compset, **xxx** is cam, clm or rtm, and so on. NNNN goes from 0001 to the number of instances of that component. @@ -45,17 +62,22 @@ This creates the following additional files and directories in ``$CASEROOT``: Calling `case.setup --clean <../Tools_user/case.setup.html>`_ will *not remove* any user_nl files. Changing the number of instances in the **env_mach_pes.xml** file will cause only + new user_nl files to be added to ``$CASEROOT``. CaseDocs/ Directory that contains all the component namelists for the run. This is for reference only and files in this directory SHOULD NOT BE EDITED since they will + be overwritten at build time and runtime. .env_mach_specific.* Files summarizing the **module load** commands and environment variables that are set when + the scripts in ``$CASEROOT`` are called. These files are not used by the case but can be + useful for debugging **module load** and environment settings. software_environment.txt This file records some aspects of the computing system on which the case is built, + such as the shell environment. ============================= =============================================================================================================================== diff --git a/doc/source/users_guide/testing.rst b/doc/source/users_guide/testing.rst index 1d89dd9eeb5..f604e93f7d8 100644 --- a/doc/source/users_guide/testing.rst +++ b/doc/source/users_guide/testing.rst @@ -1,119 +1,98 @@ .. _testing: -********** -Testing -********** +************** +Testing Cases +************** -`create_test <../Tools_user/create_test.html>`_ -is the tool we use to test both CIME and CIME-driven models. -It can be used as an easy way to run a single basic test or an entire suite of tests. -`create_test <../Tools_user/create_test.html>`_ runs a test suite in parallel for improved performance. -It is the driver behind the automated nightly testing of cime-driven models. - -Running create_test is generally resource intensive, so run it in a manner appropriate for your system, -e.g. using 'nice', batch queues, nohup, the ``--parallel-jobs`` option to create_test, etc. -It will create and submit additional jobs to the batch queue (if one is available). +The `create_test <../Tools_user/create_test.html>`_ command provides +a powerful tool capable of testing a Case. The command can create, +setup, build and run a case according to the :ref:`testname ` syntax, returning +a PASS or FAIL result. .. _individual: An individual test can be run as:: - $CIMEROOT/scripts/create_test $test_name - -Multiple tests can be run similarly, by listing all of the test names on the command line:: - - $CIMEROOT/scripts/create_test $test_name $test_name2 - -or by putting the test names into a file, one name per line:: - - $CIMEROOT/scripts/create_test -f $file_of_test_names - -A pre-defined suite of tests can by run using the ``--xml`` options to create_test, -which harvest test names from testlist*.xml files. -As described in https://github.com/ESCOMP/ctsm/wiki/System-Testing-Guide, -to determine what pre-defined test suites are available and what tests they contain, -you can run query_testlists_. - -Test suites are retrieved in create_test via 3 selection attributes:: + $CIMEROOT/scripts/create_test - --xml-category your_category The test category. - --xml-machine your_machine The machine. - --xml-compiler your_compiler The compiler. +Everything the test will do is controlled by the :ref:`testname `. -| If none of these 3 are used, the default values are 'none'. -| If any of them are used, the default for the unused options is 'all'. -| Existing values of these attributes can be seen by running query_testlists_. +.. _`testname syntax`: -The search for test names can be restricted to a single test list using:: - - --xml-testlist your_testlist +================ +Testname syntax +================ -Omitting this results in searching all testlists listed in:: +Tests are defined by the following format, where anything enclosed in ``[]`` is optional:: - cime/config/{cesm,e3sm}/config_files.xml + TESTTYPE[_MODIFIERS].GRID.COMPSET[.MACHINE_COMPILER][.GROUP-TESTMODS] -================= -Testname syntax -================= -.. _`Test naming`: +For example using the minimum TESTTYPE_, `GRID <../users_guide/grids.html>`_, and `COMPSET <../users_guide/compsets.html>`_:: -Tests must be named with the following forms, [ ]=optional:: + ERP.ne4pg2_oQU480.F2010 - TESTTYPE[_MODIFIERS].GRID.COMPSET[.MACHINE_COMPILER][.GROUP-TESTMODS] +Below is a break-down of the different parts of the ``testname`` syntax. ================= ===================================================================================== NAME PART ================= ===================================================================================== TESTTYPE_ the general type of test, e.g. SMS. Options are listed in the following table and config_tests.xml. -MODIFIERS_ These are changes to the default settings for the test. +MODIFIERS_ Changes to the default settings for the test type. See the following table and test_scheduler.py. -GRID The model grid (can be an alias). -COMPSET alias of the compset, or long name, if no ``--xml`` arguments are used. -MACHINE This is optional; if this value is not supplied, `create_test <../Tools_user/create_test.html>`_ - will probe the underlying machine. +GRID The grid set (usually a grid alias). +COMPSET The compset, Can be a longname but usually a compset alias +MACHINE This is optional; if this value is not supplied, `create_test <../Tools_user/create_test.html>`_ + will probe the underlying machine. COMPILER If this value is not supplied, use the default compiler for MACHINE. -GROUP-TESTMODS_ This is optional. This points to a directory with ``user_nl_xxx`` files or a ``shell_commands`` - that can be used to make namelist and ``XML`` modifications prior to running a test. - | - +GROUP-TESTMODS_ This is optional. This points to a directory with ``user_nl_xxx`` files or a ``shell_commands`` + that can be used to make namelist and other modifications prior to running a test. ================= ===================================================================================== - + .. _TESTTYPE: - + +------------- +TESTTYPE +------------- +The test types in CIME are all system tests: they compile all the code needed in a case, They test +functionality of the model such as restart capability, invariance with MPI task count, and short +term archiving. At this time, they do not test for scientific correctness. + +The currently supported test types are: + ============ ===================================================================================== TESTTYPE Description ============ ===================================================================================== - ERS Exact restart from startup (default 6 days + 5 days) - | Do an 11 day initial test - write a restart at day 6. (file suffix: base) - | Do a 5 day restart test, starting from restart at day 6. (file suffix: rest) - | Compare component history files '.base' and '.rest' at day 11. - | They should be identical. + ERS Exact restart from startup (default 6 days + 5 days) + | Do an 11 day initial test - write a restart at day 6. (file suffix: base) + | Do a 5 day restart test, starting from restart at day 6. (file suffix: rest) + | Compare component history files '.base' and '.rest' at day 11 with cprnc + | PASS if they are identical. ERS2 Exact restart from startup (default 6 days + 5 days). | Do an 11 day initial test without making restarts. (file suffix: base) - | Do an 11 day restart test stopping at day 6 with a restart, + | Do an 11 day restart test stopping at day 6 with a restart, then resuming from restart at day 6. (file suffix: rest) | Compare component history files ".base" and ".rest" at day 11. - ERT Exact restart from startup, default 2 month + 1 month (ERS with info DBUG = 1). + ERT Longer version of ERS. Exact restart from startup, default 2 month + 1 month (ERS with info DBUG = 1). IRT Exact restart from startup, (default 4 days + 7 days) with restart from interim file. - ERIO Exact restart from startup with different PIO methods, (default 6 days + 5 days). + ERIO Exact restart from startup with different IO file types, (default 6 days + 5 days). ERR Exact restart from startup with resubmit, (default 4 days + 3 days). ERRI Exact restart from startup with resubmit, (default 4 days + 3 days). Tests incomplete logs option for st_archive. - ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) + ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) ref1case Do an initial run for 3 days writing restarts at day 3. ref1case is a clone of the main case. Short term archiving is on. ref2case (Suffix hybrid) Do a hybrid run for default 19 days running with ref1 restarts from day 3, - and writing restarts at day 10. + and writing restarts at day 10. ref2case is a clone of the main case. Short term archiving is on. case @@ -161,12 +140,12 @@ TESTTYPE Description Do an initial run test with NINST 2. (file suffix: multiinst for both _0001 and _0002) Compare base and _0001 and _0002. - REP Reproducibility: Two identical runs are bit for bit. (default 5 days) + REP Reproducibility: Two identical initial runs are bit for bit. (default 5 days) SBN Smoke build-namelist test (just run preview_namelist and check_input_data). - SMS Smoke startup test (default 5 days) - Do a 5 day initial test. (file suffix: base) + SMS Smoke test (default 5 days) + Do a 5 day initial test that runs to completing without error. (file suffix: base) SEQ Different sequencing bit for bit test. (default 10 days) Do an initial run test with out-of-box PE-layout. (file suffix: base) @@ -181,8 +160,16 @@ TESTTYPE Description ============ ===================================================================================== +The tests run for a default length indicated above, will use default pelayouts for the case +on the machine the test runs on and its default coupler and MPI library. Its possible to modify +elements of the test through a test type modifier. + .. _MODIFIERS: +------------------- +MODIFIERS +------------------- + ============ ===================================================================================== MODIFIERS Description ============ ===================================================================================== @@ -195,7 +182,7 @@ MODIFIERS Description _I Marker to distinguish tests with same name - ignored. _Lo# Run length set by o (STOP_OPTION) and # (STOP_N). - | o = {"y":"nyears", "m":"nmonths", "d":"ndays", + | o = {"y":"nyears", "m":"nmonths", "d":"ndays", | \ "h":"nhours", "s":"nseconds", "n":"nsteps"} _Mx Set MPI library to x. @@ -206,32 +193,267 @@ MODIFIERS Description _R For testing in PTS_MODE or Single Column Model (SCM) mode. For PTS_MODE, compile with mpi-serial. - + _Vx Set driver to x. | ============ ===================================================================================== +For example, this will run the ERP test with debugging turned on during compilation:: + + $CIMEROOT/scripts/create_test ERP_D.ne4pg2_oQU480.F2010 + +This will run the ERP test for 3 days instead of the default 11 days:: + + $CIMEROOT/scripts/create_test ERP_Ld3.ne4pg2_oQU480.F2010 + +You can combine testtype modifiers:: + + $CIMEROOT/scripts/create_test ERP_D_Ld3.ne4pg2_oQU480.F2010 + .. _GROUP-TESTMODS: +------------------- +GROUP-TESTMODS +------------------- + +The `create_test <../Tools_user/create_test.html>`_ command runs with out-of-the-box compsets and grid sets. +Sometimes you may want to run a test with modification to a namelist or other setting without creating an +entire compset. Case Control System (CCS) provides the testmods capability for this situation. + +The ``GROUP-TESTMODS`` string is at the end of the full :ref:`testname ` (including machine and compiler). +The form ``GROUP-TESTMODS`` are parsed as follows. + ============ ===================================================================================== -TESTMODS Description +PART Description ============ ===================================================================================== -GROUP A subdirectory of testmods_dirs and the parent directory of various testmods. -`-` Replaces '/' in the path name where the testmods are found. -TESTMODS A subdirectory of GROUP containing files which set non-default values - of the set-up and run-time variables via namelists or xml_change commands. - See "Adding tests": CESM_. - Examples include - - | GROUP-TESTMODS = cam-outfrq9s points to - | $cesm/components/cam/cime_config/testdefs/testmods_dirs/cam/outfrq9s - | while allactive-defaultio points to - | $cesm/cime_config/testmods_dirs/allactive/defaultio +GROUP Name of the directory under ``TESTS_MODS_DIR`` that contains ``TESTMODS``. +TESTMODS Any combination of `user_nl_* `_, `shell_commands `_, + `user_mods `_, or `params.py `_ in a directory under the + ``GROUP`` directory. ============ ===================================================================================== +For example, the *ERP* test for an E3SM *F-case* can be modified to use a different radiation scheme by using ``eam-rrtmgp``:: + ERP_D_Ld3.ne4pg2_oQU480.F2010.pm-cpu_intel.eam-rrtmgp + +If ``TESTS_MODS_DIR`` was set to ``$E3SM/components/eam/cime_config/testdefs/testmods_dirs`` then the +directory containg the testmods woulc be ``$E3SM/components/eam/cime_config/testdefs/testmods_dirs/eam/rrtmpg``. + +In this directory you'd find a `shell_commands`` file containing the following:: + + #!/bin/bash + ./xmlchange --append CAM_CONFIG_OPTS='-rad rrtmgp' + +These commands are applied after the testcase is created and case.setup is called. + +Note; do not use '-' in the testmods directory name because it has a special meaning to create_test. + +.. _USER_NL: + +```````` +Example *user_nl_* +```````` + +A components namelist can be modified by providing a ``user_nl_*`` file in a GROUP-TESTMODS_ directory. +For example, to change the namelist for the *eam* component a file name ``user_nl_eam`` could be used. + +:: + + # user_nl_eam + deep_scheme = 'off', + zmconv_microp = .false. + shallow_scheme = 'CLUBB_SGS', + l_tracer_aero = .false. + l_rayleigh = .false. + l_gw_drag = .false. + l_ac_energy_chk = .true. + l_bc_energy_fix = .true. + l_dry_adj = .false. + l_st_mac = .true. + l_st_mic = .false. + l_rad = .false. + +.. _SHELL_COMMANDS: + +`````````````` +Example *shell_commands* +`````````````` + +A test can be modified by providing a ``shell_commands`` file in a GROUP-TESTMODS_ directory. +This shell file can contain any arbitrary commands, for example:: + + # shell_commands + #!/bin/bash + + # Remove exe if chem pp exe (campp) already exists (it ensures that exe is always built) + /bin/rm -f $CIMEROOT/../components/eam/chem_proc/campp + + # Invoke campp (using v3 mechanism file) + ./xmlchange --append CAM_CONFIG_OPTS='-usr_mech_infile $CIMEROOT/../components/eam/chem_proc/inputs/pp_chemUCI_linozv3_mam5_vbs.in' + + # Assuming atmchange is available via $PATH + atmchange initial_conditions::perturbation_random_seed = 32 + +.. _USER_MODS: + +````````` +Example *user_mods* +````````` + +Additional GROUP_TESTMODS_ can be applied by providing a list in a ``user_mods`` file in a GROUP-TESTMODS_ directory. + +:: + + # user_mods + eam/cosp + eam/hommexx + +.. _TESTYPE_MOD: + +`````````````````````` +Example *params.py* +`````````````````````` + +Supported TESTYPES_ can further be modified by providing a ``params.py`` file in the GROUP-TESTMODS_ directory. + +^^^^^^^^^^^^ +MVK +^^^^^^^^^^^^ +The `MVK` system test can be configured by defining :ref:`variables ` and :ref:`methods ` in ``params.py``. + +See :ref:`examples ` for a simple and complex use case. + +.. _MVKConfig Variables: + +""""""""" +Variables +""""""""" +========== ======== ==== =============================================== +Variable Default Type Description +========== ======== ==== =============================================== +component str The main component. +components [] list Components that require namelist customization. +ninst 30 int The number of instances. +var_set default str Name of the variable set to analyze. +ref_case Baseline str Name of the reference case. +test_case Test str Name of the test case. +========== ======== ==== =============================================== + +.. _MVKConfig Methods: + +""""""" +Methods +""""""" +.. code-block:: + + def evv_test_config(case, config): + """ + Customize the evv4esm configuration. + + This method is used to customize the default evv4esm configuration + or generate a completely new one. + + The return configuration will be written to `$RUNDIR/$CASE.json`. + + Args: + case (CIME.case.case.Case): The case instance. + config (dict): Default evv4esm configuration. + + Returns: + dict: Dictionary with test configuration. + """ +.. code-block:: + + def generate_namelist(case, component, i, filename): + """ + Generate per instance namelist. + + This method is called for each instance to generate the desired + modifications. + + Args: + case (CIME.case.case.Case): The case instance. + component (str): Component the namelist belongs to. + i (int): Instance unique number. + filename (str): Name of the namelist that needs to be created. + """ + +.. _MVK Examples: + +"""""""""" +Examples +"""""""""" +.. _MVK Simple: +In the simplest form just :ref:`variables ` need to be defined in ``params.py``. + +For this case the default ``evv_test_config`` and ``generate_namelist`` functions will be called. + +.. code-block:: + + component = "eam" + # components = [] can be omitted when modifying a single component + ninst = 10 + +.. _MVK Complex: + +If more control over the evv4esm configuration file or the per instance configuration is desired then +the ``evv_test_config`` and ``generate_namelist`` functions can be overridden in the ``params.py`` file. + +The :ref:`variables ` will still need to be defined to generate the default +evv4esm config or ``config`` in the ``evv_test_config`` function can be ignored and a completely new +dictionary can be returned. + +In the following example, the default ``module`` is changed as well as ``component`` and ``ninst``. +The ``generate_namelist`` function creates namelists for certain components while running a shell +command to customize others. + +Note; this is a toy example, no scientific usage. + +.. code-block:: + + import os + from CIME.SystemTests.mvk import EVV_LIB_DIR + from CIME.namelist import Namelist + from CIME.utils import safe_copy + from CIME.utils import run_cmd + + component "eam" + # The generate_namelist function will be called `ninst` times per component + components = ["eam", "clm", "eamxx"] + ninst = 30 + + # This can be omitted if the default evv4esm configuration is sufficient + def evv_test_config(case, config): + config["module"] = os.path.join(EVV_LIB_DIR, "extensions", "kso.py") + config["component"] = "clm" + config["ninst"] = 20 + + return config + + def generate_namelist(case, component, i, filename): + namelist = Namelist() + + if component in ["eam", "clm"]: + with namelist(filename) as nml: + if component == "eam": + # arguments group, key, value + nml.set_variable_value("", "eam_specific", f"perturn-{i}") + elif component == "clm": + if i % 2 == 0: + nml.set_variable_value("", "clm_specific", "even") + else: + nml.set_variable_value("", "clm_specific", "odd") + else: + stat, output, err = run_cmd(f"atmchange initial_conditions::perturbation_random_seed = {i*32}") + + safe_copy("namelist_scream.xml", f"namelist_scream_{i:04}.xml") + + +======================== +Test progress and output +======================== Each test run by `create_test <../Tools_user/create_test.html>`_ includes the following mandatory steps: @@ -253,50 +475,23 @@ And the following optional phases: * GENERATE: Generate baseline results * BASELINE: Compare results against baselines -Each test may be in one of the following states: +Each phase within the test may be in one of the following states: * PASS: The phase was executed successfully * FAIL: We attempted to execute this phase, but it failed. If this phase is mandatory, no further progress will be made on this test. A detailed explanation of the failure should be in TestStatus.log. * PEND: This phase will be run or is currently running but not complete -The current state of a test is represented in the file $CASEROOT/TestStatus - -All output from the CIME infrastructure regarding this test will be put in the file $CASEROOT/TestStatus.log - -A cs.status.$testid script will be put in the test root. This script will allow you to see the -current status of all your tests. - -=================== -Query_testlists -=================== -.. _query_testlists: - -**$CIMEROOT/scripts/query_testlists** gathers descriptions of the tests and testlists available -for CESM, the components, and projects. - -The ``--xml-{compiler,machine,category,testlist}`` arguments can be used -as in create_test (above) to focus the search. -The 'category' descriptor of a test can be used to run a group of associated tests at the same time. -The available categories, with the tests they encompass, can be listed by:: - - ./query_testlists --define-testtypes - -The ``--show-options`` argument does the same, but displays the 'options' defined for the tests, -such as queue, walltime, etc.. - -============================ -Using **create_test** (E3SM) -============================ -.. _`Using create_test (E3SM)`: - +====================================================== +Running multiple tests and other command line examples +====================================================== -Usage will differ slightly depending on if you're using E3SM or CESM. +Multiple tests can be run by listing all of the test names on the command line:: -Using examples to illustrate common use cases + $CIMEROOT/scripts/create_test $test_name $test_name2 -To run a test:: +or by putting the test names into a file, one name per line:: - ./create_test SMS.f19_f19.A + $CIMEROOT/scripts/create_test -f $file_of_test_names To run a test with a non-default compiler:: @@ -326,25 +521,106 @@ To run a test and force it to go into a certain batch queue:: ./create_test SMS.f19_f19.A -q myqueue -To run a test and use a non-default project (can impact things like directory paths and acct for batch system):: +The Case Control System supports more sophisticated ways to specify a suite of tests and +how they should be run. One approach uses XML files and the other uses python dictionaries. + +=========================== +Test control with XML files +=========================== +.. _query_testlists: + +A pre-defined suite of tests can by run using the ``--xml`` options to create_test, +which harvest test names from testlist*.xml files. +As described in https://github.com/ESCOMP/ctsm/wiki/System-Testing-Guide, +to determine what pre-defined test suites are available and what tests they contain, +you can run query_testlists_. + +Test suites are retrieved in create_test via 3 selection attributes:: + + --xml-category your_category The test category. + --xml-machine your_machine The machine. + --xml-compiler your_compiler The compiler. + +| If none of these 3 are used, the default values are 'none'. +| If any of them are used, the default for the unused options is 'all'. +| Existing values of these attributes can be seen by running query_testlists_. + +The search for test names can be restricted to a single test list using:: + + --xml-testlist your_testlist + +Omitting this results in searching all testlists listed in:: + + cime/config/{cesm,e3sm}/config_files.xml + +**$CIMEROOT/scripts/query_testlists** gathers descriptions of the tests and testlists available +in the XML format, the components, and projects. + +The ``--xml-{compiler,machine,category,testlist}`` arguments can be used +as in create_test (above) to focus the search. +The 'category' descriptor of a test can be used to run a group of associated tests at the same time. +The available categories, with the tests they encompass, can be listed by:: + + ./query_testlists --define-testtypes + +The ``--show-options`` argument does the same, but displays the 'options' defined for the tests, +such as queue, walltime, etc.. + +Adding a test requires first deciding which compset will be tested +and then finding the appropriate testlist_$component.xml file:: + + components/$component/cime_config/testdefs/ + testlist_$component.xml + testmods_dirs/$component/{TESTMODS1,TESTMODS2,...} + cime_config/ + testlist_allactive.xml + testmods_dirs/allactive/{defaultio,...} + +You can optionally add testmods for that test in the testmods_dirs. +Testlists and testmods live in different paths for cime, drv, and components. - ./create_test SMS.f19_f19.A -p myproj +If this test will only be run as a single test, you can now create a test name +and follow the individual_ test instructions for create_test. -To run two tests:: +===================================== +Test control with python dictionaries +===================================== +.. _`python dict testing`: - ./create_test SMS.f19_f19.A SMS.f19_f19.B +One can also define suites of tests in a file called tests.py typically located in $MODEL/cime_config/tests.py -To run a test suite:: +To run a test suite called e3sm_developer:: ./create_test e3sm_developer -To run a test suite excluding a specific test:: +One can exclude a specific test from a suite:: ./create_test e3sm_developer ^SMS.f19_f19.A See create_test -h for the full list of options +` + +To add a test, open the MODEL/cime_config/tests.py file, you'll see a python dict at the top +of the file called _TESTS, find the test category you want to +change in this dict and add your testcase to the list. Note the +comment at the top of this file indicating that you add a test with +this format: test>.., and then there is a second +argument for mods. Machine and compiler are added later depending on where +create_test is invoked and its arguments. + +Existing tests can be listed using the cime/CIME/Tools/list_e3sm_tests script. + +For example:: + + /list_e3sm_tests -t compsets e3sm_developer -Interpreting test output is pretty easy, looking at an example:: +Will list all the compsets tested in the e3sm_developer test suite. + +============================ +Create_test output +============================ + +Interpreting test output is pretty easy. Looking at an example:: % ./create_test SMS.f19_f19.A @@ -371,110 +647,195 @@ Interpreting test output is pretty easy, looking at an example:: You can see that `create_test <../Tools_user/create_test.html>`_ informs the user of the case directory and of the progress and duration of the various test phases. -=================== -Managing baselines -=================== -.. _`Managing baselines`: +The $CASEDIR for the test will be created in $CIME_OUTPUT_ROOT. The name will be of the form:: -A big part of testing is managing your baselines (sometimes called gold results). We have provided -tools to help the user do this without having to repeat full runs of test cases with `create_test <../Tools_user/create_test.html>`_ . + TESTTYPE[_MODIFIERS].GRID.COMPSET.MACHINE_COMPILER[.GROUP-TESTMODS].YYYYMMDD_HHMMSS_hash -bless_test_results: Takes a batch of cases of tests that have already been run and copy their -results to a baseline area. +If MODIFIERS or GROUP-TESTMODS are used, those will be included in the test output directory name. THe +extra string with YYYYMMDD_HHMMSS_hash is the testid and used to distinquish mulitple runs of the +same test. That string +can be replaced with the --test-id argument to create_test. -compare_test_results: Takes a batch of cases of tests that have already been run and compare their -results to a baseline area. +For a test, the $CASEDIR will have $EXEROOT and $RUNDIR as subdirectories. -Take a batch of results for the jenkins user for the testid 'mytest' and copy the results to -baselines for 'master':: +The current state of a test is represented in the file $CASEDIR/TestStatus. Example output:: - ./bless_test_results -r /home/jenkins/e3sm/scratch/jenkins/ -t mytest -b master + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel CREATE_NEWCASE + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel XML + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel SETUP + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel SHAREDLIB_BUILD time=277 + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel MODEL_BUILD time=572 + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel SUBMIT + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel RUN time=208 + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel COMPARE_base_rest + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel MEMLEAK insufficient data for memleak test + PASS ERP_D_Ld3.ne4pg2_oQU480.F2010.chrysalis_intel SHORT_TERM_ARCHIVER -Take a batch of results for the jenkins user for the testid 'mytest' and compare the results to -baselines for 'master':: +All other stdout output from the CIME case control system produced by running this test will +be put in the file $CASEDIR/TestStatus.log - ./compare_test_results -r /home/jenkins/e3sm/scratch/jenkins/ -t mytest -b master +A cs.status.$testid script will also be put in the test root. This script will allow you to see the -============= -Adding tests -============= -.. _`Adding tests`: +============================== +Baselines and Baseline Testing +============================== +.. _`Baselines`: -E3SM +A big part of testing is managing your baselines (sometimes called gold results) and doing additional tests against +the baseline. The baseline for a test will be copy of the (history) files created in the run of the test. -Open the config/e3sm/tests.py file, you'll see a python dict at the top -of the file called _TESTS, find the test category you want to -change in this dict and add your testcase to the list. Note the -comment at the top of this file indicating that you add a test with -this format: test>.., and then there is a second -argument for mods. +create_test can +be asked to perform bit-for-bit comparisons between the files generated by the current run of the test and +the files stored in the baseline. They must be bit-for-bit identical for the baseline test to pass. -CESM +baseline testing adds an additional +test criteria to the one that comes from the test type and is used as a way to guard against unintentionaly +changing the results from a determinstic climate model. -.. _CESM: +------------------- +Creating a baseline +------------------- +.. _`Creating a baseline`: -Select a compset to test. If you need to test a non-standard compset, -define an alias for it in the most appropriate config_compsets.xml in :: +A baseline can be generated by passing ``-g`` to `create_test <../Tools_user/create_test.html>`_. There +are additional options to control generating baselines.:: - $cesm/components/$component/cime_config - $cesm/cime/src/drivers/mct/cime_config - $cesm/cime_config + ./scripts/create_test -b master -g SMS.ne30_f19_g16_rx1.A -If you want to test non-default namelist or xml variable values for your chosen compset, -you might find them in a suitable existing testmods directory (see "branching", this section, for locations). -If not, then populate a new testmods directory with the needed files (see "contents", below). -Note; do not use '-' in the testmods directory name because it has a special meaning to create_test. -Testlists and testmods live in different paths for cime, drv, and components. -The relevant directory branching looks like -:: +-------------------- +Comparing a baseline +-------------------- +.. _`Comparing a baseline`: - components/$component/cime_config/testdefs/ - testlist_$component.xml - testmods_dirs/$component/{TESTMODS1,TESTMODS2,...} - cime/src/drivers/mct/cime_config/testdefs/ - testlist_drv.xml - testmods_dirs/drv/{default,5steps,...} - cime_config/ - testlist_allactive.xml - testmods_dirs/allactive/{defaultio,...} - -The contents of each testmods directory can include -:: +Comparing the output of a test to a baseline is achieved by passing ``-c`` to `create_test <../Tools_user/create_test.html>`_.:: - user_nl_$components namelist variable=value pairs - shell_commands xmlchange commands - user_mods a list of other GROUP-TESTMODS which should be imported - but at a lower precedence than the local testmods. + ./scripts/create_test -b master -c SMS.ne30_f19_g16_rx1.A -If this test will only be run as a single test, you can now create a test name -and follow the individual_ test instructions for create_test. -If you want this test to be part of a suite, then it must be described in the relevant testlists_YYY.xml file. +Suppose you accidentally changed something in the source code that does not cause the model to crash but +does cause it to change the answers it produces. In this case, the SMS test would pass (it still runs) but the +comparison with baselines would FAIL (answers are not bit-for-bit identical to the baseline) and so the test +as a whole would FAIL. -=============================== -CIME's scripts regression tests -=============================== -.. _`CIME's scripts regression tests`: - -**$CIMEROOT/scripts/tests/scripts_regression_tests.py** is the suite of internal tests we run -for the stand-alone CIME testing. With no arguments, it will run the full suite. You can limit testing to a specific -test class or even a specific test within a test class. - -Run full suite:: - - ./scripts_regression_tests.py - -Run a test class:: - - ./scripts_regression_tests.py K_TestCimeCase - -Run a specific test:: - - ./scripts_regression_tests.py K_TestCimeCase.test_cime_case - -If a test fails, the unittest module that drives scripts_regression_tests wil note the failure, but -won't print the output of the test until testing has completed. When there are failures for a -test, the case directories for that test will not be cleaned up so that the user can do a post-mortem -analysis. The user will be notified of the specific directories that will be left for them to -examine. +------------------ +Managing baselines +------------------ +.. _`Managing baselines`: -More description can be found in https://github.com/ESCOMP/ctsm/wiki/System-Testing-Guide +If you intended to change the answers, you need to update the baseline with new files. This is referred to +as "blessing" the test. +This is done with the `bless_test_results <../Tools_user/bless_test_results.html>`_ tool. The tool provides the ability to bless different features of the baseline. The currently supported features are namelist files, history files, and performance metrics. The performance metrics are separated into throughput and memory usage. + +The following command can be used to compare a test to a baseline and bless an update to the history file.:: + + ./CIME/Tools/bless_test_results -b master --hist-only SMS.ne30_f19_g16_rx1.A + +The `compare_test_results <../Tools_user/compare_test_results.html>_` tool can be used to quickly compare tests to baselines and report any `diffs`.:: + + ./CIME/Tools/compare_test_results -b master SMS.ne30_f19_g16_rx1.A + +--------------------- +Performance baselines +--------------------- +.. _`Performance baselines`: +By default performance baselines are generated by parsing the coupler log and comparing the throughput in SYPD (Simulated Years Per Day) and the memory usage high water. + +This can be customized by creating a python module under ``$DRIVER_ROOT/cime_config/customize``. There are four hooks that can be used to customize the generation and comparison. + +- perf_get_throughput +- perf_get_memory +- perf_compare_throughput_baseline +- perf_compare_memory_baseline + +.. + TODO need to add api docs and link +The following pseudo code is an example of this customization.:: + + # $DRIVER/cime_config/customize/perf_baseline.py + + def perf_get_throughput(case): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str + Storing throughput value. + str + Open baseline file for writing. + """ + current = analyze_throughput(...) + + return json.dumps(current), "w" + + def perf_get_memory(case): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str + Storing memory value. + str + Open baseline file for writing. + """ + current = analyze_memory(case) + + return json.dumps(current), "w" + + def perf_compare_throughput_baseline(case, baseline, tolerance): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : str + Baseline throughput value. + tolerance : float + Allowed difference tolerance. + + Returns + ------- + bool + Whether throughput diff is below tolerance. + str + Comments about the results. + """ + current = analyze_throughput(case) + + baseline = json.loads(baseline) + + diff, comments = generate_diff(...) + + return diff, comments + + def perf_compare_memory_baseline(case, baseline, tolerance): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : str + Baseline memory value. + tolerance : float + Allowed difference tolerance. + + Returns + ------- + bool + Whether memory diff is below tolerance. + str + Comments about the results. + """ + current = analyze_memory(case) + + baseline = json.loads(baseline) + + diff, comments = generate_diff(...) + + return diff, comments diff --git a/doc/source/users_guide/troubleshooting.rst b/doc/source/users_guide/troubleshooting.rst index 54034b5f8d1..77ef823d9c0 100644 --- a/doc/source/users_guide/troubleshooting.rst +++ b/doc/source/users_guide/troubleshooting.rst @@ -13,6 +13,11 @@ If `create_newcase <../Tools_user/create_newcase.html>`_ fails on a relatively > create_newcase --help +Troubleshooting problems in cime scripts +---------------------------------------- + +If any of the python-based cime scripts are dying in a mysterious way, more information can be obtained by rerunning the script with the ``--debug`` option. + Troubleshooting job submission ------------------------------- @@ -100,3 +105,22 @@ Compare the restart files against the sizes of a previous restart. If they don't See `Restarting a run `_. It is not uncommon for nodes to fail on HPC systems or for access to large file systems to hang. Before you file a bug report, make sure a case fails consistently in the same place. + +**Rerunning with additional debugging information** + +There are a few changes you can make to your case to get additional information that aids in debugging: + +- Increase the value of the run-time xml variable ``INFO_DBUG``: ``./xmlchange INFO_DBUG=2``. + This adds more information to the cpl.log file that can be useful if you can't tell what component is aborting the run, or where bad coupling fields are originating. + (This does NOT require rebuilding.) + +- Try rebuilding and rerunning with the build-time xml variable ``DEBUG`` set to ``TRUE``: ``./xmlchange DEBUG=TRUE``. + + - This adds various runtime checks that trap conditions such as out-of-bounds array indexing, divide by 0, and other floating point exceptions (the exact conditions checked depend on flags set in macros defined in the cmake_macros subdirectory of the caseroot). + + - The best way to do this is often to create a new case and run ``./xmlchange DEBUG=TRUE`` before running ``./case.build``. + However, if it is hard for you to recreate your case, then you can run that xmlchange command from your existing case; then you must run ``./case.build --clean-all`` before rerunning ``./case.build``. + + - Note that the model will run significantly slower in this mode, so this may not be feasible if the model has to run a long time before producing the error. + (Sometimes it works well to run the model until shortly before the error in non-debug mode, have it write restart files, then restart after rebuilding in debug mode.) + Also note that answers will change slightly, so if the error arises from a rare condition, then it may not show up in this mode. diff --git a/doc/source/users_guide/unit_testing.rst b/doc/source/users_guide/unit_testing.rst index 25a81c0957e..af8025a6f44 100644 --- a/doc/source/users_guide/unit_testing.rst +++ b/doc/source/users_guide/unit_testing.rst @@ -35,7 +35,7 @@ These consist of: #. A Python script that provides a simple front end for the CMake-based tests. -The Fortran unit tests use `pFUnit `_, which is a Fortran testing framework that follows conventions of other xUnit frameworks. +The Fortran unit tests use `pFUnit `_, which is a Fortran testing framework that follows conventions of other xUnit frameworks. CIME's support for pFUnit requires pFUnit version 4 or greater. .. _running_unit_tests: @@ -98,22 +98,31 @@ These unit tests are run automatically as part of **scripts_regression_tests** o How to add unit testing support on your machine ----------------------------------------------- -The following instructions assume that you have ported CIME to your machine by following the instructions in :doc:`/users_guide/porting-cime`. -If you have done that, you can add unit testing support by building pFUnit on your machine and then pointing to the build in your **config_compilers.xml** file. Those processes are described in the following sections. +The following instructions assume that you have ported CIME to your +machine by following the instructions in +:doc:`/users_guide/porting-cime`. If you have done that, you can add +unit testing support by building pFUnit on your machine and then +pointing to the build in your ** *MACH*_*COMPILER*.cmake** file. Those +processes are described in the following sections. -At a minimum, do a serial build of pFUnit (without MPI or OpenMP) using the default compiler on your machine. -That is the default that **run_tests.py** and that is required for **scripts_regression_tests.py** to run the unit tests on your machine. +Building pFUnit +~~~~~~~~~~~~~~~ +Follow the instructions below to build pFUnit using the default compiler on your machine. +That is the default for **run_tests.py** and that is required for **scripts_regression_tests.py** to run the unit tests on your machine. +For the CMake step, we typically build with ``-DSKIP_MPI=YES``, ``-DSKIP_OPENMP=YES`` and ``-DCMAKE_INSTALL_PREFIX`` set to the directory where you want pFUnit to be installed. +(At this time, no unit tests require parallel support, so we build without MPI support to keep things simple.) Optionally, you can also provide pFUnit builds with other supported compilers on your machine. -You can also provide additional pFUnit builds with other combinations of MPI and OpenMP on or off. -At this time, however, no unit tests require parallel support so no benefit is gained by providing MPI-enabled builds. -Building pFUnit -~~~~~~~~~~~~~~~ +#. Obtain pFUnit from https://github.com/Goddard-Fortran-Ecosystem/pFUnit (see + https://github.com/Goddard-Fortran-Ecosystem/pFUnit#obtaining-pfunit for details) + +#. Create a directory for the build and cd to that directory: -For a serial build of pFUnit, follow these instructions: + .. code-block:: shell -#. Download pFUnit from https://sourceforge.net/projects/pfunit/. + > mkdir build-dir + > cd build-dir #. Set up your environment to be similar to the environment used in CIME system builds. For example, load the appropriate compilers into your path. @@ -121,25 +130,30 @@ For a serial build of pFUnit, follow these instructions: .. code-block:: shell - > $CIMEROOT/tools/configure --mpilib mpi-serial + > $CIMEROOT/CIME/scripts/configure --mpilib mpi-serial - If you are doing an MPI-enabled build, also change the ``--mpilib`` argument. Then source either **./.env_mach_specific.sh** or **./.env_mach_specific.csh**, depending on your shell. + On some systems, you may need to explicitly set the ``FC`` and ``CC`` environment + variables so that pFUnit's CMake build picks up the correct compilers, e.g., with: + + .. code-block:: shell + + > export FC=ifort + > export CC=icc + #. For convenience, set the ``PFUNIT`` environment variable to point to the location where you want to install pFUnit. For example (in bash): .. code-block:: shell - > export PFUNIT=/glade/p/cesmdata/cseg/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP + > export PFUNIT=$CESMDATAROOT/tools/pFUnit/pFUnit4.7.0_cheyenne_Intel19.1.1_noMPI_noOpenMP #. Configure and build pFUnit: .. code-block:: shell - > mkdir build - > cd build - > cmake -DMPI=NO -DOPENMP=NO -DCMAKE_INSTALL_PREFIX=$PFUNIT .. - > make -j 4 + > cmake -DSKIP_MPI=YES -DSKIP_OPENMP=YES -DCMAKE_INSTALL_PREFIX=$PFUNIT .. + > make -j 8 #. Run pFUnit's self-tests: @@ -153,27 +167,21 @@ For a serial build of pFUnit, follow these instructions: > make install -You can repeat this process with different compiler environments and/or different choices of ``-DMPI`` and ``-DOPENMP`` in the cmake step. (Each of them can have the value ``NO`` or ``YES``.) +You can repeat this process with different compiler environments. Make sure to choose a different installation directory for each build by setting the ``PFUNIT`` variable differently. -Adding to the xml file -~~~~~~~~~~~~~~~~~~~~~~ +Adding to the appropriate cmake file +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After you build pFUnit, tell CIME about your build or builds. -To do this, specify the appropriate path(s) using the ``PFUNIT_PATH`` element in **config_compilers.xml**. -For a serial build, your setting will look like this example: +To do this, specify the appropriate path using the ``PFUNIT_PATH`` CMake variable in the ** *MACH*_*COMPILER*.cmake** file. +For a build with no MPI or openMP support (as recommended above), the block should look like this (with the actual path replaced with the PFUNIT path you specified when doing the build): -.. code-block:: xml + .. code-block:: cmake - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP - -The ``MPILIB`` attribute should be either: - -* ``mpi-serial`` for a pFUnit build where ``-DMPI=NO``, or - -* the name of the MPI library you used for a pFUnit build where ``-DMPI=YES``. (For example, you might use ``mpich``, which should be one of the machine's MPI libraries specified by ``MPILIBS`` in **config_machines.xml**.) - -The ``compile_threaded`` attribute should be either ``TRUE`` or ``FALSE`` depending on the value of ``-DOPENMP``. + if (MPILIB STREQUAL mpi-serial AND NOT compile_threaded) + set(PFUNIT_PATH "$ENV{CESMDATAROOT}/tools/pFUnit/pFUnit4.7.0_cheyenne_Intel19.1.1_noMPI_noOpenMP") + endif() Once you have specified the path for your build(s), you should be able to run the unit tests by following the instructions in :ref:`running_unit_tests`. @@ -400,25 +408,12 @@ You can also see examples of the unit test build scripts by viewing the Other pFUnit documentation sources ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Some pFUnit documentation is available here: http://pfunit.sourceforge.net/. - -Extensive documentation and examples are included in the following when you download -pFUnit from http://sourceforge.net/projects/pfunit/: - -* documentation/pFUnit3-ReferenceManual.pdf - -* Examples/ - -* tests/ - -The tests are tests of the pFUnit code itself, written in pFUnit. They demonstrate -many uses of pFUnit features. Other documentation includes additional assertion -methods that are available. +Unfortunately, the documentation inside the pFUnit repository (in the documentation and Examples directories) is out-of-date (at least as of April, 2023): much of this documentation refers to version 3 of pFUnit, which differs in some ways from version 4. However, some working examples are provided in https://github.com/Goddard-Fortran-Ecosystem/pFUnit_demos. Documentation of the unit test build system ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The CMake build infrastructure is in **$CIMEROOT/src/externals/CMake**. +The CMake build infrastructure is in **$CIMEROOT/CIME/non_py/src/CMake**. The infrastructure for building and running tests with **run_tests.py** is in **$CIMEROOT/scripts/fortran_unit_testing**. That directory also contains general diff --git a/doc/source/what_cime/index.rst b/doc/source/what_cime/index.rst index 86e3e4bd5bc..e2e3eade94a 100644 --- a/doc/source/what_cime/index.rst +++ b/doc/source/what_cime/index.rst @@ -6,56 +6,33 @@ contain the root `toctree` directive. ##################################### - What is CIME? + What is CIME? ##################################### .. toctree:: :maxdepth: 3 :numbered: - - -CIME, pronounced "SEAM", contains the support scripts (configure, -build, run, test), data models, essential utility libraries, a “main†-and other tools that are needed to build a single-executable coupled -Earth System Model. CIME is available in a stand-alone package that -can be compiled and tested without active prognostic components but is -typically included in the source of a climate model. CIME does not -contain: any active components, any intra-component coupling -capability (such as atmosphere physics-dynamics coupling). ********* Overview ********* -CIME is comprised of: - -1. A Case Control System to support configuration, compilation, execution, system testing and unit testing of a earth system model: - - i. Scripts to enable simple generation of model executables and associated input files for different scientific cases, component resolutions and combinations of full, data and stub components with a handful of commands. - ii. Testing utilities to run defined system tests and report results for different configurations of the coupled system. - -2. A default coupled model architecture: - - i. A programmer interface and libraries to implement a hub-and-spoke inter-component coupling architecture. - ii. An implementation of a "hub" that needs 7 components (atm, ocn, lnd, sea-ice, land-ice, river, wave). a.k.a. “the driverâ€. - iii. The ability to allow active and data components to be mixed in any combination as long as each component implements the coupling programmer interface. +CIME (Common Infrastructure for Modeling the Earth, pronounced "SEAM") primarily consists of a Case Control System that supports the configuration, compilation, execution, system testing and unit testing of an Earth System Model. The three main components of the Case Control System are: -3. Non-active Data and Stub components: +1. XML files that describe the available machines, models and model configurations. +2. Python scripts that take user commands and parse the XML files to configure a set of models with their + build and runtime options for a specified machine and the provide additional commands to build executables and + submit jobs. +3. Testing utilities to run defined system tests and report results for different configurations of the coupled system. - i. “Data-only†versions of 6 of the 7 components that can replace active components at build-time. - ii. “Stub†versions of all 7 components for building a complete system. +CIME also contains additional stand-alone tools useful for Earth system modeling including: -4. Source code for externall libraries useful in scientific applications in general and climate models in particular. - i. Parallel I/O library. - ii. The Model Coupling Toolkit. - iii. Timing library. +1. Parallel regridding weight generation program +2. Scripts to automate off-line load-balancing. +3. Scripts to conduct ensemble-based statistical consistency tests. +4. Netcdf file comparison program (for bit-for-bit). -5. Additional stand-alone tools: - - i. Parallel regridding weight generation program - ii. Scripts to automate off-line load-balancing. - iii. Scripts to conduct ensemble-based statistical consistency tests. - iv. Netcdf file comparison program (for bit-for-bit). +CIME does **not** contain the source code for any Earth System Model drivers or components. It is typically included alongside the source code of a host model. However, CIME does include pointers to external repositories that contain drivers, data models and other test components. These external components can be easily assembled to facilitate end-to-end system tests of the CIME infrastructure, which are defined in the CIME repository. ************************* Development @@ -64,6 +41,3 @@ Development CIME is developed in an open-source, public repository hosted under the Earth System Model Computational Infrastructure (ESMCI) organization on Github at http://github.com/ESMCI/cime. - - - diff --git a/doc/source/xml_files/atmosphere.rst b/doc/source/xml_files/atmosphere.rst index 6bb2a2282f7..a6b544bc02c 100644 --- a/doc/source/xml_files/atmosphere.rst +++ b/doc/source/xml_files/atmosphere.rst @@ -4,7 +4,7 @@ CIME Atmosphere Data and Stub XML Files ####################################### -Atmosphere component XML files for data, stub, and dead components. +Atmosphere component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,7 +19,7 @@ XML specification for archiving datm output files. .. literalinclude:: ../../../src/components/data_comps/datm/cime_config/config_archive.xml -XML variables and component descriptions specific to datm. +XML variables and component descriptions specific to datm. .. literalinclude:: ../../../src/components/data_comps/datm/cime_config/config_component.xml @@ -35,7 +35,7 @@ CIMEROOT/src/components/stub_comps/satm/cime_config The atmosphere stub model, **satm**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to satm. +XML variables and component descriptions specific to satm. .. literalinclude:: ../../../src/components/stub_comps/satm/cime_config/config_component.xml @@ -47,11 +47,6 @@ CIMEROOT/src/components/xcpl_comps/xatm/cime_config The atmosphere dead model, **xatm**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xatm. +XML variables and component descriptions specific to xatm. .. literalinclude:: ../../../src/components/xcpl_comps/xatm/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/cesm.rst b/doc/source/xml_files/cesm.rst index 1b37a9b2789..4fe7748ac0a 100644 --- a/doc/source/xml_files/cesm.rst +++ b/doc/source/xml_files/cesm.rst @@ -4,7 +4,7 @@ CESM Coupled Model XML Files ############################# -XML files for CESM in CIMEROOT/config/cesm. +XML files for CESM in CIMEROOT/config/cesm. .. toctree:: :maxdepth: 1 @@ -13,57 +13,6 @@ XML files for CESM in CIMEROOT/config/cesm. CIMEROOT/config/cesm ******************** -CESM XML settings for short term archiver. - -.. literalinclude:: ../../../config/cesm/config_archive.xml - CESM XML settings for defining CASEROOT env_*.xml file entries. .. literalinclude:: ../../../config/cesm/config_files.xml - -CESM XML settings for defining supported grids. - -.. literalinclude:: ../../../config/cesm/config_grids.xml - - -***************************** -CIMEROOT/config/cesm/machines -***************************** - -CESM XML settings for supported batch queuing systems. - -.. literalinclude:: ../../../config/cesm/machines/config_batch.xml - -CESM XML settings for supported compilers. - -.. literalinclude:: ../../../config/cesm/machines/config_compilers.xml - -CESM XML settings for supported machines. - -.. literalinclude:: ../../../config/cesm/machines/config_machines.xml - -CESM XML settings for Parallel Input/Output (PIO) library. - -.. literalinclude:: ../../../config/cesm/machines/config_pio.xml - - -****************************** -allactive SRCROOT/cime_config -****************************** - -The CESM all-active model settings are stored in the `CESM cime_config github repository -`_. That repository includes -the following XML files. - -CESM XML settings for all-active component set (compset) configurations. - -.. literalinclude:: ../../../../cime_config/config_compsets.xml - -CESM XML settings for all-active test configurations. - -.. literalinclude:: ../../../../cime_config/testlist_allactive.xml - -CESM XML settings for optimized processor elements (PEs) layout configurations. - -.. literalinclude:: ../../../../cime_config/config_pes.xml - diff --git a/doc/source/xml_files/common.rst b/doc/source/xml_files/common.rst index 1f57771df56..4fb201eea5a 100644 --- a/doc/source/xml_files/common.rst +++ b/doc/source/xml_files/common.rst @@ -4,7 +4,7 @@ Common XML Files ################# -Common XML files in CIMEROOT/config. +Common XML files in CIMEROOT/config. .. toctree:: :maxdepth: 1 @@ -22,9 +22,6 @@ Headers for the CASEROOT env_*.xml files created by create_newcase. CIMEROOT/config/config_tests.xml ******************************** -Descriptions and XML settings for the CIME regression tests. +Descriptions and XML settings for the CIME regression tests. .. literalinclude:: ../../../config/config_tests.xml - - - diff --git a/doc/source/xml_files/components.rst b/doc/source/xml_files/components.rst index 7053df0301a..7fdb45dc8bd 100644 --- a/doc/source/xml_files/components.rst +++ b/doc/source/xml_files/components.rst @@ -16,7 +16,4 @@ Component XML files in CIMEROOT/src sub-directories. ocean.rst river.rst seaice.rst - wave.rst - - - + wave.rst diff --git a/doc/source/xml_files/drivers.rst b/doc/source/xml_files/drivers.rst index 4153e903957..a3c9ab8ea89 100644 --- a/doc/source/xml_files/drivers.rst +++ b/doc/source/xml_files/drivers.rst @@ -15,7 +15,7 @@ CIMEROOT/src/drivers/mct/cime_config The Model Coupling Toolkit (MCT) based driver/coupler is treated as a component by CIME with associated XML files -to define behavior. +to define behavior. XML specification for archiving coupler output files. @@ -43,7 +43,7 @@ XML settings for driver/coupler defined component set (compset) PE layouts. ************************************************************** -CIMEROOT/src/drivers/mct/cime_config/namelist_definition_*.xml +CIMEROOT/src/drivers/mct/cime_config/namelist_definition_*.xml ************************************************************** XML namelist definitions for the driver/coupler. @@ -57,6 +57,3 @@ XML namelist definitions for the driver/coupler fields. XML namelist definitions for the driver/coupler model input/output settings. .. literalinclude:: ../../../src/drivers/mct/cime_config/namelist_definition_modelio.xml - - - diff --git a/doc/source/xml_files/e3sm.rst b/doc/source/xml_files/e3sm.rst index a52c1248140..7c259f4bb95 100644 --- a/doc/source/xml_files/e3sm.rst +++ b/doc/source/xml_files/e3sm.rst @@ -4,7 +4,7 @@ E3SM Coupled Model XML Files ############################# -XML files for E3SM in CIMEROOT/config/e3sm. +XML files for E3SM in CIMEROOT/config/e3sm. .. toctree:: :maxdepth: 1 @@ -13,53 +13,6 @@ XML files for E3SM in CIMEROOT/config/e3sm. CIMEROOT/config/e3sm ******************** -E3SM XML settings for short term archiver. - -.. literalinclude:: ../../../config/e3sm/config_archive.xml - E3SM XML settings for defining CASEROOT env_*.xml file entries. .. literalinclude:: ../../../config/e3sm/config_files.xml - -E3SM XML settings for defining supported grids. - -.. literalinclude:: ../../../config/e3sm/config_grids.xml - - -****************************** -CIMEROOT/config/e3sm/allactive -****************************** - -E3SM XML settings for all-active component set (compset) configurations. - -.. literalinclude:: ../../../config/e3sm/allactive/config_compsets.xml - -E3SM XML settings for all-active test configurations. - -.. literalinclude:: ../../../config/e3sm/allactive/testlist_allactive.xml - -E3SM XML settings for optimized processor elements (PEs) layout configurations. - -.. literalinclude:: ../../../config/e3sm/allactive/config_pesall.xml - - -***************************** -CIMEROOT/config/e3sm/machines -***************************** - -E3SM XML settings for supported batch queuing systems. - -.. literalinclude:: ../../../config/e3sm/machines/config_batch.xml - -E3SM XML settings for supported compilers. - -.. literalinclude:: ../../../config/e3sm/machines/config_compilers.xml - -E3SM XML settings for supported machines. - -.. literalinclude:: ../../../config/e3sm/machines/config_machines.xml - -E3SM XML settings for Parallel Input/Output (PIO) library. - -.. literalinclude:: ../../../config/e3sm/machines/config_pio.xml - diff --git a/doc/source/xml_files/esp.rst b/doc/source/xml_files/esp.rst index 9688fa0c1b9..1874c535111 100644 --- a/doc/source/xml_files/esp.rst +++ b/doc/source/xml_files/esp.rst @@ -4,7 +4,7 @@ CIME ESP Data and Stub XML Files ################################ -External System Processing **ESP** component XML files for data, stub, and dead components. +External System Processing **ESP** component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -15,7 +15,7 @@ CIMEROOT/src/components/data_comps/desp/cime_config ESP data model, **desp**, XML files and settings. -XML variables and component descriptions specific to desp. +XML variables and component descriptions specific to desp. .. literalinclude:: ../../../src/components/data_comps/desp/cime_config/config_component.xml @@ -31,12 +31,6 @@ CIMEROOT/src/components/stub_comps/sesp/cime_config The ESP stub model, **sesp**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to sesp. +XML variables and component descriptions specific to sesp. .. literalinclude:: ../../../src/components/stub_comps/sesp/cime_config/config_component.xml - - - - - - diff --git a/doc/source/xml_files/index.rst b/doc/source/xml_files/index.rst index 923e4b8e1ec..62547bce7b5 100644 --- a/doc/source/xml_files/index.rst +++ b/doc/source/xml_files/index.rst @@ -11,9 +11,9 @@ CIMEROOT. Modifcations to XML settings is case specific and the tools and modify these settings while ensuring the continued schema integrity of the XML. -For advanced CIME developers, there are XML schema definition files +For advanced CIME developers, there are XML schema definition files in the CIMEROOT/config/xml_schemas directory that can be used with -**xmllint** to verify the XML. +**xmllint** to verify the XML. .. toctree:: :maxdepth: 2 @@ -21,9 +21,3 @@ in the CIMEROOT/config/xml_schemas directory that can be used with e3sm.rst cesm.rst common.rst - components.rst - drivers.rst - - - - diff --git a/doc/source/xml_files/land.rst b/doc/source/xml_files/land.rst index dbe556e5f16..e275f466434 100644 --- a/doc/source/xml_files/land.rst +++ b/doc/source/xml_files/land.rst @@ -4,7 +4,7 @@ CIME Land Data and Stub XML Files ################################# -Land component XML files for data, stub, and dead components. +Land component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,7 +19,7 @@ XML specification for archiving dlnd output files. .. literalinclude:: ../../../src/components/data_comps/dlnd/cime_config/config_archive.xml -XML variables and component descriptions specific to dlnd. +XML variables and component descriptions specific to dlnd. .. literalinclude:: ../../../src/components/data_comps/dlnd/cime_config/config_component.xml @@ -35,7 +35,7 @@ CIMEROOT/src/components/stub_comps/slnd/cime_config The land stub model, **slnd**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to slnd. +XML variables and component descriptions specific to slnd. .. literalinclude:: ../../../src/components/stub_comps/slnd/cime_config/config_component.xml @@ -47,11 +47,6 @@ CIMEROOT/src/components/xcpl_comps/xlnd/cime_config The land dead model, **xlnd**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xlnd. +XML variables and component descriptions specific to xlnd. .. literalinclude:: ../../../src/components/xcpl_comps/xlnd/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/landice.rst b/doc/source/xml_files/landice.rst index a0873c54040..cdb00e69d90 100644 --- a/doc/source/xml_files/landice.rst +++ b/doc/source/xml_files/landice.rst @@ -4,8 +4,8 @@ CIME Land Ice Data and Stub XML Files ##################################### -Land ice component XML files for stub and dead components. -The land ice component does not currently have a data model. +Land ice component XML files for stub and dead components. +The land ice component does not currently have a data model. .. toctree:: :maxdepth: 1 @@ -17,7 +17,7 @@ CIMEROOT/src/components/stub_comps/sglc/cime_config The land ice stub model, **sglc**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to sglc. +XML variables and component descriptions specific to sglc. .. literalinclude:: ../../../src/components/stub_comps/sglc/cime_config/config_component.xml @@ -29,11 +29,6 @@ CIMEROOT/src/components/xcpl_comps/xglc/cime_config The land ice dead model, **xglc**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xglc. +XML variables and component descriptions specific to xglc. .. literalinclude:: ../../../src/components/xcpl_comps/xglc/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/ocean.rst b/doc/source/xml_files/ocean.rst index 2c21990cc3b..40f1d69583b 100644 --- a/doc/source/xml_files/ocean.rst +++ b/doc/source/xml_files/ocean.rst @@ -4,7 +4,7 @@ CIME Ocean Data and Stub XML Files ################################## -Ocean component XML files for data, stub, and dead components. +Ocean component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,7 +19,7 @@ XML specification for archiving docn output files. .. literalinclude:: ../../../src/components/data_comps/docn/cime_config/config_archive.xml -XML variables and component descriptions specific to docn. +XML variables and component descriptions specific to docn. .. literalinclude:: ../../../src/components/data_comps/docn/cime_config/config_component.xml @@ -35,7 +35,7 @@ CIMEROOT/src/components/stub_comps/socn/cime_config The ocean stub model, **socn**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to socn. +XML variables and component descriptions specific to socn. .. literalinclude:: ../../../src/components/stub_comps/socn/cime_config/config_component.xml @@ -47,11 +47,6 @@ CIMEROOT/src/components/xcpl_comps/xocn/cime_config The ocean dead model, **xocn**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xocn. +XML variables and component descriptions specific to xocn. .. literalinclude:: ../../../src/components/xcpl_comps/xocn/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/river.rst b/doc/source/xml_files/river.rst index 116b1c42993..84543d59879 100644 --- a/doc/source/xml_files/river.rst +++ b/doc/source/xml_files/river.rst @@ -4,7 +4,7 @@ CIME River Runoff Data and Stub XML Files ######################################### -River runoff component XML files for data, stub, and dead components. +River runoff component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,11 +19,11 @@ XML specification for archiving drof output files. .. literalinclude:: ../../../src/components/data_comps/drof/cime_config/config_archive.xml -XML variables and component descriptions specific to drof. +XML variables and component descriptions specific to drof. .. literalinclude:: ../../../src/components/data_comps/drof/cime_config/config_component.xml -XML variables and component descriptions specific to drof. +XML variables and component descriptions specific to drof. .. literalinclude:: ../../../src/components/data_comps/drof/cime_config/config_component.xml @@ -39,7 +39,7 @@ CIMEROOT/src/components/stub_comps/srof/cime_config The river runoff stub model, **srof**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to srof. +XML variables and component descriptions specific to srof. .. literalinclude:: ../../../src/components/stub_comps/srof/cime_config/config_component.xml @@ -54,8 +54,3 @@ does it have any namelist settings. XML variables and component descriptions specific to xrof. .. literalinclude:: ../../../src/components/xcpl_comps/xrof/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/seaice.rst b/doc/source/xml_files/seaice.rst index 2492493b1e1..7c9d9e04e98 100644 --- a/doc/source/xml_files/seaice.rst +++ b/doc/source/xml_files/seaice.rst @@ -4,7 +4,7 @@ CIME Sea Ice Data and Stub XML Files #################################### -Sea Ice component XML files for data, stub, and dead components. +Sea Ice component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,11 +19,11 @@ XML specification for archiving dice output files. .. literalinclude:: ../../../src/components/data_comps/dice/cime_config/config_archive.xml -XML variables and component descriptions specific to dice. +XML variables and component descriptions specific to dice. .. literalinclude:: ../../../src/components/data_comps/dice/cime_config/config_component.xml -XML variables and component descriptions specific to dice. +XML variables and component descriptions specific to dice. .. literalinclude:: ../../../src/components/data_comps/dice/cime_config/config_component.xml @@ -39,7 +39,7 @@ CIMEROOT/src/components/stub_comps/sice/cime_config The sea ice stub model, **sice**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to sice. +XML variables and component descriptions specific to sice. .. literalinclude:: ../../../src/components/stub_comps/sice/cime_config/config_component.xml @@ -51,11 +51,6 @@ CIMEROOT/src/components/xcpl_comps/xice/cime_config The sea ice dead model, **xice**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to satm. +XML variables and component descriptions specific to satm. .. literalinclude:: ../../../src/components/xcpl_comps/xice/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/wave.rst b/doc/source/xml_files/wave.rst index fa6e91f3865..e9d54792c1e 100644 --- a/doc/source/xml_files/wave.rst +++ b/doc/source/xml_files/wave.rst @@ -4,7 +4,7 @@ CIME Wave Data and Stub XML Files ################################# -Wave component XML files for data, stub, and dead components. +Wave component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,11 +19,11 @@ XML specification for archiving dwav output files. .. literalinclude:: ../../../src/components/data_comps/dwav/cime_config/config_archive.xml -XML variables and component descriptions specific to dwav. +XML variables and component descriptions specific to dwav. .. literalinclude:: ../../../src/components/data_comps/dwav/cime_config/config_component.xml -XML variables and component descriptions specific to dwav. +XML variables and component descriptions specific to dwav. .. literalinclude:: ../../../src/components/data_comps/dwav/cime_config/config_component.xml @@ -39,7 +39,7 @@ CIMEROOT/src/components/stub_comps/swav/cime_config The wave stub model, **swav**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to swav. +XML variables and component descriptions specific to swav. .. literalinclude:: ../../../src/components/stub_comps/swav/cime_config/config_component.xml @@ -51,11 +51,6 @@ CIMEROOT/src/components/xcpl_comps/xwav/cime_config The wave dead model, **xwav**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xwav. +XML variables and component descriptions specific to xwav. .. literalinclude:: ../../../src/components/xcpl_comps/xwav/cime_config/config_component.xml - - - - - diff --git a/doc/tools_autodoc.cfg b/doc/tools_autodoc.cfg index ca1300b63a0..c0883b64e88 100644 --- a/doc/tools_autodoc.cfg +++ b/doc/tools_autodoc.cfg @@ -1,14 +1,14 @@ [tools] -tools_dir: ../scripts/Tools +tools_dir: ../CIME/Tools exclude_files: __init__.py load.awk standard_script_setup.py Makefile exclude_ext: ~ pyc exclude_prefix: JENKINS_ [scripts] scripts_dir: ../scripts -exclude_files: +exclude_files: exclude_ext: ~ pyc -exclude_prefix: +exclude_prefix: [templates] templates_dir: ../config/cesm/machines @@ -18,4 +18,4 @@ exclude_prefix: Depends [doc] doc_dir: ./source/Tools_user -index_template: index.rst.template \ No newline at end of file +index_template: index.rst.template diff --git a/doc/tools_autodoc.py b/doc/tools_autodoc.py index 5877f005f43..c2a420751f6 100755 --- a/doc/tools_autodoc.py +++ b/doc/tools_autodoc.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python -"""script to auto generate rst documentation for cime/scripts/Tools +#!/usr/bin/env python3 +"""script to auto generate rst documentation for cime/scripts/Tools user facing utilities. """ @@ -11,8 +11,11 @@ if sys.hexversion < 0x02070000: print(70 * "*") print("ERROR: {0} requires python >= 2.7.x. ".format(sys.argv[0])) - print("It appears that you are running python {0}".format( - ".".join(str(x) for x in sys.version_info[0:3]))) + print( + "It appears that you are running python {0}".format( + ".".join(str(x) for x in sys.version_info[0:3]) + ) + ) print(70 * "*") sys.exit(1) @@ -30,27 +33,30 @@ from configparser import ConfigParser as config_parser # define rst templates -_tool_template = Template(''' +_tool_template = Template( + """ .. _$tool_name: #################################################### -$tool_name +$tool_name #################################################### -**$tool_name** is a script in CIMEROOT/scripts/Tools. +**$tool_name** is a script in CIMEROOT/CIME/Tools. .. toctree:: :maxdepth: 1 .. command-output:: ./$tool_name --help :cwd: ../../$tools_dir -''') +""" +) -_script_template = Template(''' +_script_template = Template( + """ .. _$script_name: #################################################### -$script_name +$script_name #################################################### **$script_name** is a script in CIMEROOT/scripts. @@ -60,13 +66,15 @@ .. command-output:: ./$script_name --help :cwd: ../../$scripts_dir -''') +""" +) -_tmpl_template = Template(''' +_tmpl_template = Template( + """ .. _$tmpl_name: #################################################### -$tmpl_name +$tmpl_name #################################################### **$tmpl_name** is a script template in CIMEROOT/config/cesm/machines @@ -79,29 +87,32 @@ .. command-output:: ./$tmpl_name --help :cwd: ./temp_files -''') +""" +) # ------------------------------------------------------------------------------- # User input # ------------------------------------------------------------------------------- -def commandline_options(): - """Process the command line arguments. - """ +def commandline_options(): + """Process the command line arguments.""" parser = argparse.ArgumentParser( - description='Auto generate rst documentation for cime/scripts/Tools.') + description="Auto generate rst documentation for cime/scripts/Tools." + ) - parser.add_argument('--backtrace', action='store_true', - help='show exception backtraces as extra debugging ' - 'output') + parser.add_argument( + "--backtrace", + action="store_true", + help="show exception backtraces as extra debugging " "output", + ) - parser.add_argument('--debug', action='store_true', - help='extra debugging output') + parser.add_argument("--debug", action="store_true", help="extra debugging output") - parser.add_argument('--config', nargs=1, default=['tools_autodoc.cfg'], - help='path to config file') + parser.add_argument( + "--config", nargs=1, default=["tools_autodoc.cfg"], help="path to config file" + ) options = parser.parse_args() return options @@ -111,9 +122,7 @@ def commandline_options(): # read the tools_autodoc.cfg configuration file # ------------------------------------------------------------------------------- def read_config_file(filename): - """Read the configuration file and process - - """ + """Read the configuration file and process""" print("tools_autodoc.py - Reading configuration file : {0}".format(filename)) cfg_file = os.path.abspath(filename) @@ -125,29 +134,30 @@ def read_config_file(filename): return config + # ------------------------------------------------------------------------------- # create the rst files for the Tools configuration settings # ------------------------------------------------------------------------------- def get_tools(config, doc_dir): # get the input tools dir - tools_dir = config.get('tools','tools_dir') + tools_dir = config.get("tools", "tools_dir") tools_dir = os.path.abspath(tools_dir) - + # get list of files to exclude - exclude_files = config.get('tools','exclude_files').split() + exclude_files = config.get("tools", "exclude_files").split() # get list of files to exclude - exclude_ext = config.get('tools','exclude_ext').split() + exclude_ext = config.get("tools", "exclude_ext").split() # get list of files to exclude - exclude_prefix = config.get('tools','exclude_prefix').split() + exclude_prefix = config.get("tools", "exclude_prefix").split() # get a list of all files in the tools_dir all_files = next(os.walk(tools_dir))[2] tools_files = list() - # exclude files + # exclude files for f in all_files: f = f.strip() include = True @@ -163,10 +173,10 @@ def get_tools(config, doc_dir): if include: tools_files.append(f) - tools_dir = config.get('tools','tools_dir') + tools_dir = config.get("tools", "tools_dir") for f in tools_files: - tool_file = os.path.join(doc_dir, '{0}.rst'.format(f)) - with open(tool_file,'w') as tf: + tool_file = os.path.join(doc_dir, "{0}.rst".format(f)) + with open(tool_file, "w") as tf: contents = _tool_template.substitute(tool_name=f, tools_dir=tools_dir) tf.write(contents) @@ -179,23 +189,23 @@ def get_tools(config, doc_dir): def get_scripts(config, doc_dir): # get the input scripts dir - scripts_dir = config.get('scripts','scripts_dir') + scripts_dir = config.get("scripts", "scripts_dir") scripts_dir = os.path.abspath(scripts_dir) # get list of files to exclude - exclude_files = config.get('scripts','exclude_files').split() + exclude_files = config.get("scripts", "exclude_files").split() # get list of files to exclude - exclude_ext = config.get('scripts','exclude_ext').split() + exclude_ext = config.get("scripts", "exclude_ext").split() # get list of files to exclude - exclude_prefix = config.get('scripts','exclude_prefix').split() + exclude_prefix = config.get("scripts", "exclude_prefix").split() # get a list of all files in the scripts_dir all_files = next(os.walk(scripts_dir))[2] scripts_files = list() - # exclude files + # exclude files for f in all_files: f = f.strip() include = True @@ -211,39 +221,42 @@ def get_scripts(config, doc_dir): if include: scripts_files.append(f) - scripts_dir = config.get('scripts','scripts_dir') + scripts_dir = config.get("scripts", "scripts_dir") for f in scripts_files: - script_file = os.path.join(doc_dir, '{0}.rst'.format(f)) - with open(script_file,'w') as tf: - contents = _script_template.substitute(script_name=f, scripts_dir=scripts_dir) + script_file = os.path.join(doc_dir, "{0}.rst".format(f)) + with open(script_file, "w") as tf: + contents = _script_template.substitute( + script_name=f, scripts_dir=scripts_dir + ) tf.write(contents) return scripts_files + # ------------------------------------------------------------------------------- # get the template files and substitute the {{...}} patterns so they can be # run with the --help command # ------------------------------------------------------------------------------- def get_templates(config, doc_dir): - # get the input template dir - templates_dir = config.get('templates','templates_dir') + # get the input template dir + templates_dir = config.get("templates", "templates_dir") templates_dir = os.path.abspath(templates_dir) # get list of files to exclude - exclude_files = config.get('templates','exclude_files').split() + exclude_files = config.get("templates", "exclude_files").split() # get list of files to exclude - exclude_ext = config.get('templates','exclude_ext').split() + exclude_ext = config.get("templates", "exclude_ext").split() # get list of files to exclude - exclude_prefix = config.get('templates','exclude_prefix').split() + exclude_prefix = config.get("templates", "exclude_prefix").split() # get a list of all files in the templates_dir all_files = next(os.walk(templates_dir))[2] template_files = list() - # exclude files + # exclude files for f in all_files: f = f.strip() include = True @@ -261,75 +274,86 @@ def get_templates(config, doc_dir): # create temporary files with the {{..}} stripped out temp_files = list() - temp_dir = '{0}/temp_files'.format(doc_dir) + temp_dir = "{0}/temp_files".format(doc_dir) if not os.path.exists(temp_dir): os.makedirs(temp_dir) for fname in template_files: - with open(os.path.join(templates_dir,fname),'r') as f: + with open(os.path.join(templates_dir, fname), "r") as f: content = f.read() content = content.replace("{{ batchdirectives }}", "# {{ batchdirective }}", 1) - content = content.replace("os.chdir( '{{ caseroot }}')", "# os.chdir( '{{ caseroot }}')", 1) - content = content.replace('os.path.join("{{ cimeroot }}", "scripts", "Tools")', 'os.path.join("../../../..","scripts", "Tools")',1) + content = content.replace( + "os.chdir( '{{ caseroot }}')", "# os.chdir( '{{ caseroot }}')", 1 + ) + content = content.replace( + 'os.path.join("{{ cimeroot }}", "CIME", "Tools")', + 'os.path.join("../../../..","CIME", "Tools")', + 1, + ) # create a temporary file - tf = fname.split('.') - tfname = '.'.join(tf[1:]) - if tfname == 'st_archive': - tfname = 'case.st_archive' + tf = fname.split(".") + tfname = ".".join(tf[1:]) + if tfname == "st_archive": + tfname = "case.st_archive" tfile = os.path.join(temp_dir, tfname) - with open(tfile, 'w') as tf: + with open(tfile, "w") as tf: tf.write(content) - + temp_files.append(tfname) for f in temp_files: - tmpl_file = os.path.join(doc_dir, '{0}.rst'.format(f)) - with open(tmpl_file,'w') as tf: + tmpl_file = os.path.join(doc_dir, "{0}.rst".format(f)) + with open(tmpl_file, "w") as tf: contents = _tmpl_template.substitute(tmpl_name=f, temp_dir=temp_dir) tf.write(contents) tf.close() - exefile = os.path.join(doc_dir, 'temp_files/{0}'.format(f)) + exefile = os.path.join(doc_dir, "temp_files/{0}".format(f)) st = os.stat(exefile) os.chmod(exefile, st.st_mode | stat.S_IEXEC) return temp_files + # ------------------------------------------------------------------------------- # main # ------------------------------------------------------------------------------- + def main(options): all_files = list() config = read_config_file(options.config[0]) # get the output doc dir - doc_dir = config.get('doc','doc_dir') + doc_dir = config.get("doc", "doc_dir") doc_dir = os.path.abspath(doc_dir) # gather the files from different locations in the CIMEROOT tools_files = get_tools(config, doc_dir) scripts_files = get_scripts(config, doc_dir) - template_files = get_templates(config, doc_dir) + # Disabling templates, they are no longer included in + # config/cesm/machines + # template_files = get_templates(config, doc_dir) - all_files = tools_files + scripts_files + template_files + # all_files = tools_files + scripts_files + template_files + all_files = tools_files + scripts_files all_files.sort() # copy the index.rst.template to index.rst - doc_dir = config.get('doc','doc_dir') + doc_dir = config.get("doc", "doc_dir") doc_dir = os.path.abspath(doc_dir) - index_template = config.get('doc','index_template') - index_rst_file = index_template.split('.')[0:-1] - index_template = os.path.join(doc_dir,index_template) - index_rst_file = '.'.join(index_rst_file) - index_rst_file = os.path.join(doc_dir,index_rst_file) + index_template = config.get("doc", "index_template") + index_rst_file = index_template.split(".")[0:-1] + index_template = os.path.join(doc_dir, index_template) + index_rst_file = ".".join(index_rst_file) + index_rst_file = os.path.join(doc_dir, index_rst_file) shutil.copy2(index_template, index_rst_file) # open index_rst_file in append mode - with open(index_rst_file,'a') as index_rst: + with open(index_rst_file, "a") as index_rst: for f in all_files: - index_rst.write(' {0}\n'.format(f)) + index_rst.write(" {0}\n".format(f)) return 0 diff --git a/docker/.cime/config_machines.v2.xml b/docker/.cime/config_machines.v2.xml new file mode 100644 index 00000000000..242150d750c --- /dev/null +++ b/docker/.cime/config_machines.v2.xml @@ -0,0 +1,44 @@ + + + + + Docker + + LINUX + + gnu,gnuX + openmpi + CIME + /storage/timings + CIME + /storage/cases + /storage/inputdata + /storage/inputdata-clmforc + /storage/archive/$CASE + /storage/baselines/$COMPILER + /storage/tools/cprnc + make + 4 + e3sm_developer + none + boutte3@llnl.gov + 8 + 8 + + mpiexec + + -n {{ total_tasks }} + --oversubscribe + + + + $CASEROOT/run + $CASEROOT/bld + + 1 + 1 + /opt/conda + /opt/conda + + + diff --git a/docker/.cime/config_machines.v3.xml b/docker/.cime/config_machines.v3.xml new file mode 100644 index 00000000000..98a0cba3f66 --- /dev/null +++ b/docker/.cime/config_machines.v3.xml @@ -0,0 +1,7 @@ + + + + + docker + + diff --git a/docker/.cime/docker.cmake b/docker/.cime/docker.cmake new file mode 100644 index 00000000000..c60655fd6be --- /dev/null +++ b/docker/.cime/docker.cmake @@ -0,0 +1,15 @@ +string(APPEND CXXFLAGS " -std=c++14") +string(APPEND CXX_LIBS " -lstdc++") + +# DEBUGGING variables +# get_cmake_property(_variableNames VARIABLES) +# foreach (_variableName ${_variableNames}) +# message("${_variableName}=${${_variableName}}") +# endforeach() +# message( FATAL_ERROR "EXIT") + +# required for grid generation tests that use make +if (CMAKE_SOURCE_DIR MATCHES "^.*TestGridGeneration.*$") +string(APPEND FFLAGS " -I/opt/conda/include") +string(APPEND SLIBS " -L/opt/conda/lib -lnetcdf -lnetcdff") +endif() diff --git a/docker/.cime/docker/config_machines.xml b/docker/.cime/docker/config_machines.xml new file mode 100644 index 00000000000..e15fd7eaa49 --- /dev/null +++ b/docker/.cime/docker/config_machines.xml @@ -0,0 +1,39 @@ + + Docker + LINUX + + gnu,gnuX + openmpi + CIME + /storage/timings + CIME + /storage/cases + /storage/inputdata + /storage/inputdata-clmforc + /storage/archive/$CASE + /storage/baselines/$COMPILER + /storage/tools/cprnc + make + 4 + e3sm_developer + none + boutte3@llnl.gov + 8 + 8 + + mpiexec + + -n {{ total_tasks }} + --oversubscribe + + + + $CASEROOT/run + $CASEROOT/bld + + 1 + 1 + /opt/conda + /opt/conda + + diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000000..58f68286f50 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,112 @@ +ARG BASE_TAG=24.3.0-0 +FROM condaforge/miniforge3:${BASE_TAG} AS base + +SHELL ["/bin/bash", "-c"] + +# First layer as they never change, required for E3SM testing, TODO: fix in unittesting as well +RUN mkdir -p \ + /storage/inputdata/cpl/gridmaps/oQU240 \ + /storage/inputdata/share/domains \ + /storage/timings && \ + wget -O /storage/inputdata/cpl/gridmaps/oQU240/map_oQU240_to_ne4np4_aave.160614.nc https://portal.nersc.gov/project/e3sm/inputdata/cpl/gridmaps/oQU240/map_oQU240_to_ne4np4_aave.160614.nc && \ + wget -O /storage/inputdata/share/domains/domain.ocn.ne4np4_oQU240.160614.nc https://portal.nersc.gov/project/e3sm/inputdata/share/domains/domain.ocn.ne4np4_oQU240.160614.nc && \ + wget -O /storage/inputdata/share/domains/domain.lnd.ne4np4_oQU240.160614.nc https://portal.nersc.gov/project/e3sm/inputdata/share/domains/domain.lnd.ne4np4_oQU240.160614.nc + +COPY cime.yaml /cime.yaml + +RUN mamba env update -f /cime.yaml && \ + rm -rf /opt/conda/pkgs/* && \ + ln -sf /opt/conda/bin/x86_64-conda-linux-gnu-ar /opt/conda/bin/ar && \ + ln -sf /opt/conda/bin/x86_64-conda-linux-gnu-ranlib /opt/conda/bin/ranlib + +ARG PNETCDF_VERSION=1.12.3 +ENV PNETCDF_VERSION=${PNETCDF_VERSION} + +# Build pnetcdf +RUN curl -L -k -o "${PWD}/pnetcdf.tar.gz" \ + https://parallel-netcdf.github.io/Release/pnetcdf-${PNETCDF_VERSION}.tar.gz && \ + mkdir "${PWD}/pnetcdf" && \ + tar -xvf "${PWD}/pnetcdf.tar.gz" -C "${PWD}/pnetcdf" --strip-components=1 && \ + rm -rf "${PWD}/pnetcdf.tar.gz" && \ + cd "${PWD}/pnetcdf" && \ + source /opt/conda/etc/profile.d/conda.sh && \ + conda activate base && \ + ./configure --prefix /opt/conda --disable-cxx --enable-shared \ + MPICC=/opt/conda/bin/mpicc \ + MPICXX=/opt/conda/bin/mpicxx \ + MPIF77=/opt/conda/bin/mpif77 \ + MPIF90=/opt/conda/bin/mpif90 && \ + make -j4 && \ + make install && \ + rm -rf "${PWD}/pnetcdf" + +ARG CIME_BRANCH=master +ARG CIME_REPO=https://github.com/esmci/cime + +# Separate layer, it's most likely to change +RUN git clone -b ${CIME_BRANCH} ${CIME_REPO} /src/cime + +# General variables +ENV USER=root +ENV LOGNAME=root +ENV ESMFMKFILE=/opt/conda/lib/esmf.mk +ENV SRC_PATH=/src + +WORKDIR /src + +COPY .cime /root/.cime +COPY entrypoint.sh /entrypoint.sh + +ENTRYPOINT [ "/entrypoint.sh" ] + +FROM base AS slurm + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get install -y --no-install-recommends \ + munge slurmd slurm-client slurmctld && \ + rm -rf /var/lib/apt/lists/* && \ + sed -i"" "s/\(.*\)[^<]*\(<\/BATCH_SYSTEM>\)/\1slurm\2/g" ~/.cime/config_machines.xml + +COPY slurm/slurm.conf /etc/slurm-llnl/ +COPY slurm/config_batch.xml /root/.cime/ +COPY slurm/entrypoint_batch.sh /entrypoint_batch.sh + +FROM base AS pbs + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get install -y --no-install-recommends \ + curl ca-certificates software-properties-common \ + gcc make libtool libhwloc-dev libx11-dev libxt-dev libedit-dev \ + libical-dev ncurses-dev python-dev tcl-dev tk-dev swig libexpat-dev libssl-dev \ + libxext-dev libxft-dev autoconf automake \ + postgresql-12 postgresql-server-dev-all postgresql-contrib \ + expat libedit2 python3 sendmail-bin sudo tcl tk && \ + add-apt-repository ppa:deadsnakes/ppa && \ + apt-get update && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get install -y python3.7 python3.7-dev && \ + rm -rf /var/lib/apt/lists/* + +RUN mkdir /src && pushd /src && \ + curl -LO https://github.com/openpbs/openpbs/archive/refs/tags/v20.0.1.tar.gz && \ + tar -xvf v20.0.1.tar.gz && \ + cd openpbs-20.0.1 && \ + sed -i"" 's/\(#include "list_link.h"\)/\1\n#include /' /src/openpbs-20.0.1/src/lib/Libifl/list_link.c && \ + export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin && \ + ./autogen.sh && \ + PYTHON=/usr/bin/python3.7 \ + CFLAGS="`/usr/bin/python3.7m-config --cflags`" \ + LDFLAGS="`/usr/bin/python3.7m-config --ldflags`" \ + LIBS="-lpthread -lm -lpython3.7m" \ + ./configure --prefix=/opt/pbs && \ + make -j8 && \ + make install && \ + popd && \ + rm -rf /src && \ + sed -i"" "s/\(.*\)[^<]*\(<\/BATCH_SYSTEM>\)/\1pbs\2/g" ~/.cime/config_machines.xml + +COPY pbs/pbs.conf /etc/ +COPY pbs/config_batch.xml /root/.cime/ +COPY pbs/entrypoint_batch.sh /entrypoint_batch.sh diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000000..d1e5ad53608 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,114 @@ +# Docker development/testing container +This container is built for development and testing purposes. + +The default base image is version `4.11.0-0` of `condaforge/mambaforge`. + +The supported compiler is `gnu` provided by `conda-forge`. + +The `OpenMPI` version of libraries is installed by default. + +## Build the container +```bash +docker build -t {image}:{tag} --target {target} docker/ + +# e.g. building the base image +docker build -t cime:latest --target base docker/ +``` + +### Customize the container +When building the container some features can be customized. Multiple `--build-arg` arguments can be passed. + +```bash +docker build -t {image}:{tag} --build-arg {name}={value} docker/ +``` + +| Argument | Description | Default +| -------- | ----------- | ------- +| MAMBAFORGE_VERSION | Version of the condaforge/mambaforge image used as a base | 4.11.0-0 +| PNETCDF_VERSION | Parallel NetCDF version to build | 1.12.1 +| LIBNETCDF_VERSION | Version of libnetcdf, the default will install the latest | 4.8.1 +| NETCDF_FORTRAN_VERSION | Version of netcdf-fortran, the default will install the latest | 4.5.4 +| ESMF_VERSION | Version of ESMF, the default will install the latest | 8.2.0 + +## Targets +There are three possible build targets in the Dockerfile. The `slurm` and `pbs` targets are built on top of the `base`. + +When running either `slurm` or `pbs` it's important to use the `--hostname docker` argument since both batch systems are looking for a host named docker. + +| Target | Description +| ------ | ----------- +| base | Base image with no batch system. +| slurm | Slurm batch system with configuration and single queue. +| pbs | PBS batch system with configuration and single queue. + +```bash +docker build -t {image}:{tag} --target {target} docker/ + +# e.g building the slurm image +docker build -t cime:latest --target slurm docker/ +``` + +## Running the container +The default environment is similar to the one used by GitHub Actions. It will clone CIME into `/src/cime`, set `CIME_MODEL=cesm` and run CESM's `checkout_externals`. This will create a minimum base environment to run both unit and system tests. + +The `CIME_MODEL` environment vairable will change the environment that is created. + +Setting it to `E3SM` will clone E3SM into `/src/E3SM`, checkout the submodules and update the CIME repository using `CIME_REPO` and `CIME_BRANCH`. + +Setting it to `CESM` will clone CESM into `/src/CESM`, run `checkout_externals` and update the CIME repository using `CIME_REPO` and `CIME_BRANCH`. + +The container can further be modified using the environment variables defined below. + +```bash +docker run -it --name cime --hostname docker cime:latest bash + +# Run with E3SM +docker run -it --name cime --hostname docker -e CIME_MODEL=e3sm cime:latest bash +``` + +> It's recommended when running the container to pass `--hostname docker` as it will match the custom machine defined in `config_machines.xml`. If this is omitted, `--machine docker` must be passed to CIME commands in order to use the correct machine definition. + +### Environment variables + +Environment variables to modify the container environment. + +| Name | Description | Default | +| ---- | ----------- | ------- | +| INIT | Set to false to skip init | true | +| GIT_SHALLOW | Performs shallow checkouts, to save time | false | +| UPDATE_CIME | Setting this will cause the CIME repository to be updated using `CIME_REPO` and `CIME_BRANCH` | "false" | +| CIME_MODEL | Setting this will change which environment is loaded | | +| CIME_REPO | CIME repository URL | https://github.com/ESMCI/cime | +| CIME_BRANCH | CIME branch that will be cloned | master | +| E3SM_REPO | E3SM repository URL | https://github.com/E3SM-Project/E3SM | +| E3SM_BRANCH | E3SM branch that will be cloned | master | +| CESM_REPO | CESM repository URL | https://github.com/ESCOMP/CESM | +| CESM_BRANCH | CESM branch that will be cloned | master | + +## Persisting data + +The `config_machines.xml` definition as been setup to provided persistance for inputdata, cases, archives and tools. The following paths can be mounted as volumes to provide persistance. + +* /storage/inputdata +* /storage/cases +* /storage/archives +* /storage/tools + +```bash +docker run -it -v {hostpath}:{container_path} cime:latest bash + +e.g. +docker run -it -v ${PWD}/data-cache:/storage/inputdata cime:latest bash +``` + +It's also possible to persist the source git repositories. +```bash +docker run -it -v ${PWD}/src:/src cime:latest bash +``` + +Local git respositories can be mounted as well. +```bash +docker run -v ${PWD}:/src/cime cime:latest bash + +docker run -v ${PWD}:/src/E3SM cime:latest bash +``` diff --git a/docker/cime.yaml b/docker/cime.yaml new file mode 100644 index 00000000000..cd25a71118e --- /dev/null +++ b/docker/cime.yaml @@ -0,0 +1,30 @@ +name: base +channels: + - conda-forge +dependencies: + - cmake + - make + - wget + - curl + - subversion + - m4 + - pkg-config + - pytest + - pytest-cov + - pyyaml + - vim + - rsync + - openssh + - lapack + - blas + - libnetcdf=4.9.2=*openmpi* + - netcdf-fortran=*=*openmpi* + - esmf=*=*openmpi* + - gcc_linux-64=12.* + - gxx_linux-64=12.* + - gfortran_linux-64=12.* + - openmpi-mpifort + - gcc + - gxx + - gfortran +prefix: /opt/conda diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh new file mode 100755 index 00000000000..ae5007a68a3 --- /dev/null +++ b/docker/entrypoint.sh @@ -0,0 +1,146 @@ +#!/bin/bash + +DEBUG="${DEBUG:-false}" +SRC_PATH="${SRC_PATH:-`pwd`}" +# Treeless clone +GIT_FLAGS="${GIT_FLAGS:---filter=tree:0}" +# Shallow submodule checkout +GIT_SUBMODULE_FLAGS="${GIT_SUBMODULE_FLAGS:---recommend-shallow}" +SKIP_MODEL_SETUP="${SKIP_MODEL_SETUP:-false}" +CIME_REMOTE="${CIME_REMOTE:-https://github.com/ESMCI/cime}" +CIME_BRANCH="${CIME_BRANCH:-master}" +SKIP_CIME_UPDATE="${SKIP_CIME_UPDATE:-false}" + +echo "DEBUG = ${DEBUG}" +echo "SRC_PATH = ${SRC_PATH}" +echo "GIT_FLAGS = ${GIT_FLAGS}" +echo "GIT_SUBMODULE_FLAGS = ${GIT_SUBMODULE_FLAGS}" +echo "SKIP_MODEL_SETUP = ${SKIP_MODEL_SETUP}" +echo "CIME_REMOTE = ${CIME_REMOTE}" +echo "CIME_BRANCH = ${CIME_BRANCH}" +echo "SKIP_CIME_UDPATE = ${SKIP_CIME_UPDATE}" + +function to_lowercase() { + echo "${!1}" | tr -s '[:upper:]' '[:lower:]' +} + +if [[ "$(to_lowercase DEBUG)" == "true" ]]; then + set -x +fi + +####################################### +# Fixes mct/mpeu to use ARFLAGS environment variable +# +# TODO need to make an offical PR this is temporary. +####################################### +function fix_mct_arflags() { + local mct_path="${1}" + + # TODO make PR to fix + if [[ ! -e "${mct_path}/mct/Makefile.bak" ]] + then + echo "Fixing AR variable in ${mct_path}/mct/Makefile" + + sed -i".bak" "s/\$(AR)/\$(AR) \$(ARFLAGS)/g" "${mct_path}/mct/Makefile" + fi + + if [[ ! -e "${mct_path}/mpeu/Makefile.bak" ]] + then + echo "Fixing AR variable in ${mct_path}/mpeu/Makefile" + + sed -i".bak" "s/\$(AR)/\$(AR) \$(ARFLAGS)/g" "${mct_path}/mpeu/Makefile" + fi +} + +####################################### +# Fixes gitmodules to use https rather than ssh +####################################### +function fix_gitmodules() { + sed -i".bak" "s/git@github.com:/https:\/\/github.com\//g" "${1}/.gitmodules" +} + +if [[ "${SKIP_MODEL_SETUP}" == "false" ]]; then + if [[ "${CIME_MODEL}" == "e3sm" ]]; then + echo "Setting up E3SM" + + [[ ! -e "${SRC_PATH}/E3SM" ]] && git clone -b ${E3SM_BRANCH:-master} ${GIT_FLAGS} ${E3SM_REPO:-https://github.com/E3SM-Project/E3SM} "${SRC_PATH}/E3SM" + + pushd "${SRC_PATH}/E3SM" + + git config --global --add safe.directory "*" + + # fix E3SM gitmodules + fix_gitmodules "${PWD}" + + # checkout submodules + git submodule update --init "${GIT_SUBMODULE_FLAGS}" + + # fix mct arflags flags + fix_mct_arflags "${SRC_PATH}/E3SM/externals/mct" + + git status + git submodule status + + pushd cime + elif [[ "${CIME_MODEL}" == "cesm" ]]; then + echo "Setting up CESM" + + [[ ! -e "${SRC_PATH}/CESM" ]] && git clone -b ${CESM_BRANCH:-cesm3.0-alphabranch} ${GIT_FLAGS} ${E3SM_REPO:-https://github.com/ESCOMP/CESM} "${SRC_PATH}/CESM" + + pushd "${SRC_PATH}/CESM" + + git config --global --add safe.directory "*" + + ./bin/git-fleximod update + + git status + git submodule status + + pushd cime + fi +fi + +git config --global --add safe.directory "`pwd`" + +if [[ "$(to_lowercase SKIP_CIME_UPDATE)" == "false" ]]; then + fix_gitmodules "${PWD}" + + # Expect current directory to be CIME + git remote set-url origin "${CIME_REMOTE}" + git remote set-branches origin "*" + git fetch origin + git checkout "${CIME_BRANCH}" + + # Sync submodules + git submodule update --init +fi + +git status +git submodule status + +if [[ "${CIME_MODEL}" == "e3sm" ]]; then + # link v2 config_machines + ln -sf /root/.cime/config_machines.v2.xml /root/.cime/config_machines.xml +elif [[ "${CIME_MODEL}" == "cesm" ]]; then + # link v3 config_machines + ln -sf /root/.cime/config_machines.v3.xml /root/.cime/config_machines.xml +fi + +# load batch specific entrypoint +if [[ -e "/entrypoint_batch.sh" ]] +then + echo "Sourcing batch entrypoint" + + . "/entrypoint_batch.sh" +fi + +function create_environment() { + mamba create -n cime-$1 python=$1 + mamba env update -n cime-$1 -f /cime.yaml + + source /opt/conda/etc/profile.d/conda.sh + + conda activate cime-$1 +} + +exec "${@}" diff --git a/docker/pbs/config_batch.xml b/docker/pbs/config_batch.xml new file mode 100644 index 00000000000..b019a4e2812 --- /dev/null +++ b/docker/pbs/config_batch.xml @@ -0,0 +1,9 @@ + + + + + workq + workq + + + diff --git a/docker/pbs/entrypoint_batch.sh b/docker/pbs/entrypoint_batch.sh new file mode 100755 index 00000000000..afcb11fb758 --- /dev/null +++ b/docker/pbs/entrypoint_batch.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +/opt/pbs/libexec/pbs_postinstall + +service pbs start + +. /etc/profile.d/pbs.sh + +qmgr -c "set server flatuid=true" +qmgr -c "set server acl_roots+=root@*" +qmgr -c "set server operators+=root@*" +qmgr -c "set server job_history_enable=true" diff --git a/docker/pbs/pbs.conf b/docker/pbs/pbs.conf new file mode 100644 index 00000000000..a3efa3c8508 --- /dev/null +++ b/docker/pbs/pbs.conf @@ -0,0 +1,9 @@ +PBS_SERVER=docker +PBS_START_SERVER=1 +PBS_START_SCHED=1 +PBS_START_COMM=1 +PBS_START_MOM=1 +PBS_EXEC=/opt/pbs +PBS_HOME=/var/spool/pbs +PBS_CORE_LIMIT=unlimited +PBS_SCP=/opt/conda/bin/scp diff --git a/docker/slurm/config_batch.xml b/docker/slurm/config_batch.xml new file mode 100644 index 00000000000..07cdd64a842 --- /dev/null +++ b/docker/slurm/config_batch.xml @@ -0,0 +1,12 @@ + + + + + -w docker + + + long + short + + + diff --git a/docker/slurm/entrypoint_batch.sh b/docker/slurm/entrypoint_batch.sh new file mode 100755 index 00000000000..d0436371ad9 --- /dev/null +++ b/docker/slurm/entrypoint_batch.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +service munge start +service slurmctld start +service slurmd start diff --git a/docker/slurm/slurm.conf b/docker/slurm/slurm.conf new file mode 100644 index 00000000000..f4148286226 --- /dev/null +++ b/docker/slurm/slurm.conf @@ -0,0 +1,152 @@ +# slurm.conf file generated by configurator.html. +# Put this file on all nodes of your cluster. +# See the slurm.conf man page for more information. + +ClusterName=docker +SlurmctldHost=docker +#SlurmctldHost= + +#DisableRootJobs=NO +#EnforcePartLimits=NO +#Epilog= +#EpilogSlurmctld= +#FirstJobId=1 +#MaxJobId=67043328 +#GresTypes= +#GroupUpdateForce=0 +#GroupUpdateTime=600 +#JobFileAppend=0 +#JobRequeue=1 +#JobSubmitPlugins=lua +#KillOnBadExit=0 +#LaunchType=launch/slurm +#Licenses=foo*4,bar +#MailProg=/bin/mail +#MaxJobCount=10000 +#MaxStepCount=40000 +#MaxTasksPerNode=512 +MpiDefault=none +#MpiParams=ports=#-# +#PluginDir= +#PlugStackConfig= +#PrivateData=jobs +ProctrackType=proctrack/pgid +#Prolog= +#PrologFlags= +#PrologSlurmctld= +#PropagatePrioProcess=0 +#PropagateResourceLimits= +#PropagateResourceLimitsExcept= +#RebootProgram= +ReturnToService=1 +SlurmctldPidFile=/var/run/slurmctld.pid +SlurmctldPort=6817 +SlurmdPidFile=/var/run/slurmd.pid +SlurmdPort=6818 +SlurmdSpoolDir=/var/spool/slurmd +SlurmUser=root +#SlurmdUser=root +#SrunEpilog= +#SrunProlog= +StateSaveLocation=/var/spool/slurmctld +SwitchType=switch/none +#TaskEpilog= +TaskPlugin=task/affinity +#TaskProlog= +#TopologyPlugin=topology/tree +#TmpFS=/tmp +#TrackWCKey=no +#TreeWidth= +#UnkillableStepProgram= +#UsePAM=0 + + +# TIMERS +#BatchStartTimeout=10 +#CompleteWait=0 +#EpilogMsgTime=2000 +#GetEnvTimeout=2 +#HealthCheckInterval=0 +#HealthCheckProgram= +InactiveLimit=0 +KillWait=30 +#MessageTimeout=10 +#ResvOverRun=0 +MinJobAge=300 +#OverTimeLimit=0 +SlurmctldTimeout=120 +SlurmdTimeout=300 +#UnkillableStepTimeout=60 +#VSizeFactor=0 +Waittime=0 + + +# SCHEDULING +#DefMemPerCPU=0 +#MaxMemPerCPU=0 +#SchedulerTimeSlice=30 +SchedulerType=sched/backfill +SelectType=select/cons_tres +SelectTypeParameters=CR_Core + + +# JOB PRIORITY +#PriorityFlags= +#PriorityType=priority/basic +#PriorityDecayHalfLife= +#PriorityCalcPeriod= +#PriorityFavorSmall= +#PriorityMaxAge= +#PriorityUsageResetPeriod= +#PriorityWeightAge= +#PriorityWeightFairshare= +#PriorityWeightJobSize= +#PriorityWeightPartition= +#PriorityWeightQOS= + + +# LOGGING AND ACCOUNTING +#AccountingStorageEnforce=0 +#AccountingStorageHost= +#AccountingStoragePass= +#AccountingStoragePort= +AccountingStorageType=accounting_storage/none +#AccountingStorageUser= +#AccountingStoreFlags= +#JobCompHost= +#JobCompLoc= +#JobCompPass= +#JobCompPort= +JobCompType=jobcomp/none +#JobCompUser= +#JobContainerType=job_container/none +JobAcctGatherFrequency=30 +JobAcctGatherType=jobacct_gather/linux +SlurmctldDebug=info +SlurmctldLogFile=/var/log/slurmctld.log +SlurmdDebug=info +SlurmdLogFile=/var/log/slurmd.log +#SlurmSchedLogFile= +#SlurmSchedLogLevel= +#DebugFlags= + + +# POWER SAVE SUPPORT FOR IDLE NODES (optional) +#SuspendProgram= +#ResumeProgram= +#SuspendTimeout= +#ResumeTimeout= +#ResumeRate= +#SuspendExcNodes= +#SuspendExcParts= +#SuspendRate= +#SuspendTime= + + +# COMPUTE NODES +NodeName=docker CPUs=4 State=UNKNOWN + +# Partitions +PartitionName=DEFAULT Nodes=ALL Shared=FORCE:1 MaxTime=INFINITY State=UP +PartitionName=long Nodes=ALL MaxTime=01:00:00 Default=YES +PartitionName=short Nodes=ALL MaxTime=00:30:00 Default=NO diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000000..9787c3bdf00 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" diff --git a/scripts/Tools/Makefile b/scripts/Tools/Makefile deleted file mode 100644 index 9a24fa4d5ce..00000000000 --- a/scripts/Tools/Makefile +++ /dev/null @@ -1,1070 +0,0 @@ -#=============================================================================== -# -# Common Makefile: a framework for building all CIME components and more -# -#=============================================================================== - -# Set up special characters -null := -comma := , - -# Load dependency search path. -dirs := . -dirs += $(shell cat Filepath) - -cpp_dirs := $(dirs) -# Add INCROOT to path for Depends and Include -MINCROOT := -ifdef INCROOT - cpp_dirs += $(INCROOT) - MINCROOT := $(INCROOT) -endif - -# Expand any tildes in directory names. Change spaces to colons. -VPATH := $(foreach dir,$(cpp_dirs),$(wildcard $(dir))) -VPATH := $(subst $(space),:,$(VPATH)) - -RM := rm -CP := cp - -exec_se: $(EXEC_SE) Depends -complib: $(COMPLIB) Depends - -# Determine whether to compile threaded or not -# Set the THREADDIR for the shared build -# based on the threaded build status -ifeq ($(strip $(SMP)),TRUE) - THREADDIR = threads - compile_threaded = TRUE -else - ifeq ($(strip $(SMP_PRESENT)),TRUE) - THREADDIR = threads - compile_threaded = TRUE - else - THREADDIR = nothreads - compile_threaded = FALSE - endif -endif - -# set the debug directory based on the debug status -ifeq ($(strip $(DEBUG)),TRUE) - DEBUGDIR = debug -else - DEBUGDIR = nodebug -endif - -ifeq ($(strip $(USE_ESMF_LIB)), TRUE) - ESMFDIR = esmf -else - ESMFDIR = noesmf -endif - -# Determine whether any C++ code will be included in the build; -# currently, C++ code is included if and only if we're linking to the -# trilinos library or the Albany library. -USE_CXX = FALSE -ifeq ($(strip $(USE_TRILINOS)), TRUE) - USE_CXX = TRUE -endif -ifeq ($(strip $(USE_ALBANY)), TRUE) - USE_CXX = TRUE -endif -ifeq ($(strip $(USE_KOKKOS)), TRUE) - USE_CXX = TRUE -endif - -ifeq ($(strip $(USE_FMS)), TRUE) - SLIBS += -lfms -endif - -ifndef MOD_SUFFIX - MOD_SUFFIX := mod -endif - -#=============================================================================== -# set CPP options (must use this before any flags or cflags settings) -#=============================================================================== - -CPPDEFS := $(USER_CPPDEFS) -D$(OS) - -include $(CASEROOT)/Macros.make - -# Unless DEBUG mode is enabled, use NDEBUG to turn off assert statements. -ifeq ($(strip $(DEBUG)),TRUE) - # e3sm still has components that cannot build with -DDEBUG - ifeq ($(CIME_MODEL),cesm) - CPPDEFS += -DDEBUG - endif -else - CPPDEFS += -DNDEBUG -endif - -# USE_ESMF_LIB is currently only defined in env_build.xml -ifeq ($(USE_ESMF_LIB), TRUE) - CPPDEFS += -DUSE_ESMF_LIB -endif - -ifeq ($(COMP_INTERFACE), nuopc) - CPPDEFS += -DNUOPC_INTERFACE -else - CPPDEFS += -DMCT_INTERFACE -endif - -ifeq ($(COMPARE_TO_NUOPC), TRUE) - CPPDEFS += -DCOMPARE_TO_NUOPC -endif - -ifeq ($(strip $(MPILIB)),mpi-serial) - CPPDEFS += -DNO_MPI2 -else - CPPDEFS += -DHAVE_MPI -endif - -ifeq ($(strip $(PIO_VERSION)),1) - CPPDEFS += -DPIO1 -else - USE_CXX = TRUE -endif - -ifeq (,$(SHAREDPATH)) - SHAREDPATH = $(COMPILER)/$(MPILIB)/$(DEBUGDIR)/$(THREADDIR)/$(COMP_INTERFACE) - INSTALL_SHAREDPATH = $(EXEROOT)/$(SHAREDPATH) -endif - -ifeq ($(USE_CXX), TRUE) - ifeq ($(SUPPORTS_CXX), FALSE) - $(error Fatal attempt to include C++ code on a compiler/machine combo that has not been set up to support C++) - endif -endif - -# Not clear how to escape commas for libraries with their own configure -# script, and they don't need this defined anyway, so leave this out of -# FPPDEFS. -ifeq ($(HAS_F2008_CONTIGUOUS),TRUE) - CONTIGUOUS_FLAG := -DUSE_CONTIGUOUS=contiguous, -else - CONTIGUOUS_FLAG := -DUSE_CONTIGUOUS= -endif - -ifdef CPRE - CONTIGUOUS_FLAG := $(subst $(comma),\\$(comma),$(CONTIGUOUS_FLAG)) - CONTIGUOUS_FLAG := $(patsubst -D%,$(CPRE)%,$(CONTIGUOUS_FLAG)) -endif - -AR ?= ar -ARFLAGS ?= -r - -ifdef NETCDF_C_PATH - ifndef NETCDF_FORTRAN_PATH - $(error "NETCDF_C_PATH specified without NETCDF_FORTRAN_PATH") - endif - NETCDF_SEPARATE:=TRUE - ifndef INC_NETCDF_C - INC_NETCDF_C:=$(NETCDF_C_PATH)/include - endif - ifndef INC_NETCDF_FORTRAN - INC_NETCDF_FORTRAN:=$(NETCDF_FORTRAN_PATH)/include - endif - ifndef LIB_NETCDF_C - LIB_NETCDF_C:=$(NETCDF_C_PATH)/lib - endif - ifndef LIB_NETCDF_FORTRAN - LIB_NETCDF_FORTRAN:=$(NETCDF_FORTRAN_PATH)/lib - endif - else ifdef NETCDF_FORTRAN_PATH - $(error "NETCDF_FORTRAN_PATH specified without NETCDF_C_PATH") - else ifdef NETCDF_PATH - NETCDF_SEPARATE:=FALSE - ifndef INC_NETCDF - INC_NETCDF:=$(NETCDF_PATH)/include - endif - ifndef LIB_NETCDF - LIB_NETCDF:=$(NETCDF_PATH)/lib - endif -else - # No Netcdf is an error unless target is clean or DEP - ifneq ($(MAKECMDGOALS), db_files) - ifneq ($(MAKECMDGOALS), db_flags) - ifeq (,$(findstring clean,$(MAKECMDGOALS))) - $(error NETCDF not found: Define NETCDF_PATH or NETCDF_C_PATH and NETCDF_FORTRAN_PATH in config_machines.xml or config_compilers.xml) - endif - endif - endif -endif - - -ifeq ($(MPILIB),mpi-serial) - ifdef PNETCDF_PATH - undefine PNETCDF_PATH - endif -else - ifdef PNETCDF_PATH - ifndef $(INC_PNETCDF) - INC_PNETCDF:=$(PNETCDF_PATH)/include - endif - ifndef LIB_PNETCDF - LIB_PNETCDF:=$(PNETCDF_PATH)/lib - endif - endif -endif -# Set PETSc info if it is being used -ifeq ($(strip $(USE_PETSC)), TRUE) - ifdef PETSC_PATH - ifndef INC_PETSC - INC_PETSC:=$(PETSC_PATH)/include - endif - ifndef LIB_PETSC - LIB_PETSC:=$(PETSC_PATH)/lib - endif - else - $(error PETSC_PATH must be defined when USE_PETSC is TRUE) - endif - - # Get the "PETSC_LIB" list an env var - include $(PETSC_PATH)/lib/petsc/conf/variables - -endif - -# Set Trilinos info if it is being used -ifeq ($(strip $(USE_TRILINOS)), TRUE) - ifdef TRILINOS_PATH - ifndef INC_TRILINOS - INC_TRILINOS:=$(TRILINOS_PATH)/include - endif - ifndef LIB_TRILINOS - LIB_TRILINOS:=$(TRILINOS_PATH)/lib - endif - else - $(error TRILINOS_PATH must be defined when USE_TRILINOS is TRUE) - endif - - # get a bunch of variables related to this trilinos installation; - # these variables begin with "Trilinos_" - include $(INC_TRILINOS)/Makefile.export.Trilinos -endif - -# Set Albany info if it is being used -ifeq ($(strip $(USE_ALBANY)), TRUE) - ifdef ALBANY_PATH - ifndef INC_ALBANY - INC_ALBANY:=$(ALBANY_PATH)/include - endif - ifndef LIB_ALBANY - LIB_ALBANY:=$(ALBANY_PATH)/lib - endif - else - $(error ALBANY_PATH must be defined when USE_ALBANY is TRUE) - endif - - # get the "ALBANY_LINK_LIBS" list as an env var - include $(ALBANY_PATH)/export_albany.in -endif - -ifeq ($(strip $(USE_KOKKOS)), TRUE) - include $(INSTALL_SHAREDPATH)/Makefile.kokkos - SLIBS += $(KOKKOS_LIBS) - CXXFLAGS += $(KOKKOS_CXXFLAGS) - CXX_LDFLAGS += $(KOKKOS_LDFLAGS) -endif - -# Set MOAB info if it is being used -ifeq ($(strip $(USE_MOAB)), TRUE) - ifdef MOAB_PATH - CPPDEFS += -DHAVE_MOAB - ifndef INC_MOAB - INC_MOAB:=$(MOAB_PATH)/include - endif - ifndef LIB_MOAB - LIB_MOAB:=$(MOAB_PATH)/lib - endif - else - $(error MOAB_PATH must be defined when USE_MOAB is TRUE) - endif - - # get the "IMESH_LIBS" list as an env var - include $(LIB_MOAB)/iMesh-Defs.inc - -endif - -# Set HAVE_SLASHPROC on LINUX systems which are not bluegene or Darwin (OSx) - -ifeq ($(findstring -DLINUX,$(CPPDEFS)),-DLINUX) - ifneq ($(findstring DBG,$(CPPDEFS)),DBG) - ifneq ($(findstring Darwin,$(CPPDEFS)),Darwin) - CPPDEFS += -DHAVE_SLASHPROC - endif - endif -endif - -# Atleast on Titan+cray mpi, MPI_Irsends() are buggy, causing hangs during I/O -# Force PIO to use MPI_Isends instead of the default, MPI_Irsends -ifeq ($(PIO_VERSION),2) - EXTRA_PIO_CPPDEFS = -DUSE_MPI_ISEND_FOR_FC -else - EXTRA_PIO_CPPDEFS = -D_NO_MPI_RSEND -endif - -ifdef LIB_PNETCDF - CPPDEFS += -D_PNETCDF - SLIBS += -L$(LIB_PNETCDF) -lpnetcdf -endif - -# Set esmf.mk location with ESMF_LIBDIR having precedent over ESMFMKFILE -CIME_ESMFMKFILE := undefined_ESMFMKFILE -ifdef ESMFMKFILE - CIME_ESMFMKFILE := $(ESMFMKFILE) -endif -ifdef ESMF_LIBDIR - CIME_ESMFMKFILE := $(ESMF_LIBDIR)/esmf.mk -endif -# For compiling and linking with external ESMF. -# If linking to external ESMF library then include esmf.mk -# ESMF_F90COMPILEPATHS -# ESMF_F90LINKPATHS -# ESMF_F90LINKRPATHS -# ESMF_F90ESMFLINKLIBS -ifeq ($(USE_ESMF_LIB), TRUE) - -include $(CIME_ESMFMKFILE) - CPPDEFS += -DESMF_VERSION_MAJOR=$(ESMF_VERSION_MAJOR) -DESMF_VERSION_MINOR=$(ESMF_VERSION_MINOR) - FFLAGS += $(ESMF_F90COMPILEPATHS) - SLIBS += $(ESMF_F90LINKPATHS) $(ESMF_F90LINKRPATHS) $(ESMF_F90ESMFLINKLIBS) -endif - -# Stub libraries do not need to be built for nuopc driver -# so it will override these settings on the command line -ATM_PRESENT ?= TRUE -ICE_PRESENT ?= TRUE -LND_PRESENT ?= TRUE -OCN_PRESENT ?= TRUE -ROF_PRESENT ?= TRUE -GLC_PRESENT ?= TRUE -WAV_PRESENT ?= TRUE -ESP_PRESENT ?= TRUE -IAC_PRESENT ?= TRUE -ifeq ($(ULIBDEP),$(null)) - ifneq ($(LIBROOT),$(null)) - ifeq ($(ATM_PRESENT),TRUE) - ULIBDEP += $(LIBROOT)/libatm.a - CPPDEFS += -DATM_PRESENT - endif - ifeq ($(ICE_PRESENT),TRUE) - ULIBDEP += $(LIBROOT)/libice.a - CPPDEFS += -DICE_PRESENT - endif - ifeq ($(LND_PRESENT),TRUE) - ULIBDEP += $(LNDLIBDIR)/$(LNDLIB) - CPPDEFS += -DLND_PRESENT - endif - ifeq ($(OCN_PRESENT),TRUE) - ULIBDEP += $(LIBROOT)/libocn.a - CPPDEFS += -DOCN_PRESENT - endif - ifeq ($(ROF_PRESENT),TRUE) - ULIBDEP += $(LIBROOT)/librof.a - CPPDEFS += -DROF_PRESENT - endif - ifeq ($(GLC_PRESENT),TRUE) - ULIBDEP += $(LIBROOT)/libglc.a - CPPDEFS += -DGLC_PRESENT - endif - ifeq ($(WAV_PRESENT),TRUE) - ULIBDEP += $(LIBROOT)/libwav.a - CPPDEFS += -DWAV_PRESENT - endif - ifeq ($(ESP_PRESENT),TRUE) - ULIBDEP += $(LIBROOT)/libesp.a - CPPDEFS += -DESP_PRESENT - endif - ifeq ($(IAC_PRESENT),TRUE) - ULIBDEP += $(LIBROOT)/libiac.a - endif - endif -endif - -ifdef CPRE - FPPDEFS := $(subst $(comma),\\$(comma),$(CPPDEFS)) - FPPDEFS := $(patsubst -D%,$(CPRE)%,$(FPPDEFS)) - EXTRA_PIO_FPPDEFS := $(subst $(comma),\\$(comma),$(EXTRA_PIO_CPPDEFS)) - EXTRA_PIO_FPPDEFS := $(patsubst -D%,$(CPRE)%,$(EXTRA_PIO_FPPDEFS)) -else - FPPDEFS := $(CPPDEFS) - EXTRA_PIO_FPPDEFS := $(EXTRA_PIO_CPPDEFS) -endif - -#=============================================================================== -# Set config args for pio and mct to blank and then enable serial -#=============================================================================== -ifndef CONFIG_ARGS - CONFIG_ARGS := -endif -ifeq ($(findstring pio,$(MODEL)),pio) - CONFIG_ARGS+= --enable-timing - ifeq ($DEBUG,TRUE) - CONFIG_ARGS+= --enable-debug - endif -endif - -#=============================================================================== -# User-specified INCLDIR -#=============================================================================== - -INCLDIR := -I. -ifdef USER_INCLDIR - INCLDIR += $(USER_INCLDIR) -endif - -#=============================================================================== -# MPI-serial library (part of MCT) -#=============================================================================== - -ifeq ($(strip $(MPILIB)), mpi-serial) - CC := $(SCC) - FC := $(SFC) - CXX := $(SCXX) - MPIFC := $(SFC) - MPICC := $(SCC) - MPICXX := $(SCXX) - CONFIG_ARGS += MCT_PATH=$(SHAREDLIBROOT)/$(SHAREDPATH)/mct/mpi-serial -else - CC := $(MPICC) - FC := $(MPIFC) - CXX := $(MPICXX) - ifdef MPI_PATH - INC_MPI := $(MPI_PATH)/include - LIB_MPI := $(MPI_PATH)/lib - endif -endif -LD := $(MPIFC) -# Decide whether to use a C++ or Fortran linker, based on whether we -# are using any C++ code and the compiler-dependent CXX_LINKER variable -ifeq ($(USE_CXX), TRUE) - # The following is essentially an "if... elseif... else", but gmake - # 3.80 and earlier doesn't support elseif - ifeq ($(CXX_LINKER), CXX) - LD := $(MPICXX) - endif - ifeq ($(CXX_LINKER), FORTRAN) - LD := $(MPIFC) - endif -endif - -CSM_SHR_INCLUDE:=$(INSTALL_SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/include -# This is needed so that dependancies are found -VPATH+=$(CSM_SHR_INCLUDE) - -#=============================================================================== -# Set include paths (needed after override for any model specific builds below) -#=============================================================================== -INCLDIR += -I$(INSTALL_SHAREDPATH)/include -I$(INSTALL_SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/include - -ifeq ($(NETCDF_SEPARATE), FALSE) - INCLDIR += -I$(INC_NETCDF) -else ifeq ($(NETCDF_SEPARATE), TRUE) - INCLDIR += -I$(INC_NETCDF_C) -I$(INC_NETCDF_FORTRAN) -endif -ifdef MOD_NETCDF - INCLDIR += -I$(MOD_NETCDF) -endif -ifdef INC_MPI - INCLDIR += -I$(INC_MPI) -endif -ifdef INC_PNETCDF - INCLDIR += -I$(INC_PNETCDF) -endif -ifdef INC_PETSC - INCLDIR += -I$(INC_PETSC) -endif -ifdef INC_TRILINOS - INCLDIR += -I$(INC_TRILINOS) -endif -ifdef INC_ALBANY - INCLDIR += -I$(INC_ALBANY) -endif -ifdef INC_MOAB - INCLDIR += -I$(INC_MOAB) -endif - -ifeq ($(MODEL),driver) - INCLDIR += -I$(EXEROOT)/atm/obj -I$(EXEROOT)/ice/obj -I$(EXEROOT)/ocn/obj -I$(EXEROOT)/glc/obj -I$(EXEROOT)/rof/obj -I$(EXEROOT)/wav/obj -I$(EXEROOT)/esp/obj -I$(EXEROOT)/iac/obj -# nagfor and gcc have incompatible LDFLAGS. -# nagfor requires the weird "-Wl,-Wl,," syntax. -# If done in config_compilers.xml, we break MCT. - ifeq ($(strip $(COMPILER)),nag) - ifeq ($(NETCDF_SEPARATE), false) - SLIBS += -Wl,-Wl,,-rpath=$(LIB_NETCDF) - else ifeq ($(NETCDF_SEPARATE), true) - SLIBS += -Wl,-Wl,,-rpath=$(LIB_NETCDF_C) - SLIBS += -Wl,-Wl,,-rpath=$(LIB_NETCDF_FORTRAN) - endif - endif -else - ifeq ($(strip $(COMPILER)),nag) - ifeq ($(DEBUG), TRUE) - ifneq (,$(filter $(strip $(MACH)),hobart izumi)) - # GCC needs to be able to link to - # nagfor runtime to get autoconf - # tests to work. - CFLAGS += -Wl,--as-needed,--allow-shlib-undefined - SLIBS += -L$(COMPILER_PATH)/lib/NAG_Fortran -lf62rts - endif - endif - endif -endif - -ifndef MCT_LIBDIR - MCT_LIBDIR=$(INSTALL_SHAREDPATH)/lib -endif - -ifdef PIO_LIBDIR - ifeq ($(PIO_VERSION),$(PIO_VERSION_MAJOR)) - INCLDIR += -I$(PIO_INCDIR) - SLIBS += -L$(PIO_LIBDIR) - else - # If PIO_VERSION_MAJOR doesnt match, build from source - unexport PIO_LIBDIR - endif -endif -PIO_LIBDIR ?= $(INSTALL_SHAREDPATH)/lib - -ifndef GPTL_LIBDIR - GPTL_LIBDIR=$(INSTALL_SHAREDPATH)/lib -endif - -ifndef GLC_DIR - GLC_DIR=$(EXEROOT)/glc -endif -ifndef CISM_LIBDIR - CISM_LIBDIR=$(GLC_DIR)/lib -endif -ifndef GLCROOT - # Backwards compatibility - GLCROOT=$(CIMEROOT)/../components/cism -endif - -INCLDIR += -I$(INSTALL_SHAREDPATH)/include - -# -# Use the MCT dir for the cache for all configure calls because it is the first one -# -CFLAGS+=$(CPPDEFS) -CXXFLAGS+=$(CPPDEFS) -CONFIG_ARGS += CC="$(CC)" FC="$(FC)" MPICC="$(MPICC)" \ - MPIFC="$(MPIFC)" FCFLAGS="$(FFLAGS) $(FREEFLAGS) $(INCLDIR)" \ - CPPDEFS="$(CPPDEFS)" CFLAGS="$(CFLAGS) -I.. $(INCLDIR)" \ - LDFLAGS="$(LDFLAGS)" - -ifeq ($(NETCDF_SEPARATE), FALSE) - CONFIG_ARGS += NETCDF_PATH=$(NETCDF_PATH) -else ifeq ($(NETCDF_SEPARATE), TRUE) - # The mct library needs the NetCDF_C library - CONFIG_ARGS += NETCDF_PATH=$(NETCDF_C_PATH) -endif - -FFLAGS += $(FPPDEFS) -FFLAGS_NOOPT += $(FPPDEFS) - -ifeq ($(findstring -cosp,$(CAM_CONFIG_OPTS)),-cosp) - # The following is for the COSP simulator code: - COSP_LIBDIR:=$(abspath $(EXEROOT)/atm/obj/cosp) - ifeq ($(MODEL),driver) - INCLDIR+=-I$(COSP_LIBDIR) - endif -endif - -ifeq ($(MODEL),cam) - # These RRTMG files take an extraordinarily long time to compile with optimization. - # Until mods are made to read the data from files, just remove optimization from - # their compilation. -rrtmg_lw_k_g.o: rrtmg_lw_k_g.f90 - $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< - -rrtmg_sw_k_g.o: rrtmg_sw_k_g.f90 - $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< - - -ifdef COSP_LIBDIR -INCLDIR+=-I$(COSP_LIBDIR) -I$(COSP_LIBDIR)/../ -I../$(INSTALL_SHAREDPATH)/include -I../$(CSM_SHR_INCLUDE) -$(COSP_LIBDIR)/libcosp.a: cam_abortutils.o - $(MAKE) -C $(COSP_LIBDIR) F90='$(FC)' F90FLAGS='$(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FC_AUTO_R8)' \ - F90FLAGS_noauto='$(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS)' \ - F90FLAGS_fixed='$(INCLDIR) $(INCS) $(FIXEDFLAGS) $(FFLAGS) $(FC_AUTO_R8)' - -cospsimulator_intr.o: $(COSP_LIBDIR)/libcosp.a -endif - -endif - - -# System libraries (netcdf, mpi, pnetcdf, esmf, trilinos, etc.) -ifndef SLIBS - ifeq ($(NETCDF_SEPARATE), FALSE) - SLIBS := -L$(LIB_NETCDF) -lnetcdff -lnetcdf - else ifeq ($(NETCDF_SEPARATE), TRUE) - SLIBS := -L$(LIB_NETCDF_FORTRAN) -L$(LIB_NETCDF_C) -lnetcdff -lnetcdf - endif -endif - -ifdef LAPACK_LIBDIR - SLIBS += -L$(LAPACK_LIBDIR) -llapack -lblas -endif -ifdef LIB_MPI - ifndef MPI_LIB_NAME - SLIBS += -L$(LIB_MPI) -lmpi - else - SLIBS += -L$(LIB_MPI) -l$(MPI_LIB_NAME) - endif -endif - -# For compiling and linking with external ESMF. -# If linking to external ESMF library then include esmf.mk -# ESMF_F90COMPILEPATHS -# ESMF_F90LINKPATHS -# ESMF_F90LINKRPATHS -# ESMF_F90ESMFLINKLIBS -ifeq ($(USE_ESMF_LIB), TRUE) - -include $(CCSM_ESMFMKFILE) - FFLAGS += $(ESMF_F90COMPILEPATHS) - SLIBS += $(ESMF_F90LINKPATHS) $(ESMF_F90LINKRPATHS) $(ESMF_F90ESMFLINKLIBS) -endif - -# Add PETSc libraries -ifeq ($(strip $(USE_PETSC)), TRUE) - SLIBS += ${PETSC_LIB} -endif - -# Add trilinos libraries; too be safe, we include all libraries included in the trilinos build, -# as well as all necessary third-party libraries -ifeq ($(strip $(USE_TRILINOS)), TRUE) - SLIBS += -L$(LIB_TRILINOS) $(Trilinos_LIBRARIES) $(Trilinos_TPL_LIBRARY_DIRS) $(Trilinos_TPL_LIBRARIES) -endif - -# Add Albany libraries. These are defined in the ALBANY_LINK_LIBS env var that was included above -ifeq ($(strip $(USE_ALBANY)), TRUE) - SLIBS += $(ALBANY_LINK_LIBS) -endif - -# Add MOAB libraries. These are defined in the MOAB_LINK_LIBS env var that was included above -ifeq ($(strip $(USE_MOAB)), TRUE) - SLIBS += $(IMESH_LIBS) -endif - -# Add libraries and flags that we need on the link line when C++ code is included -# We need to do these additions after CONFIG_ARGS is set, because they can sometimes break configure for mct, etc., -# if they are added to LDFLAGS in CONFIG_ARGS. -ifeq ($(USE_CXX), TRUE) - ifdef CXX_LIBS - SLIBS += $(CXX_LIBS) - endif - - ifdef CXX_LDFLAGS - LDFLAGS += $(CXX_LDFLAGS) - endif -endif - -# Remove arch flag if it exists -F90_LDFLAGS := $(filter-out -arch%,$(LDFLAGS)) - -# Machine stuff to appear last on the link step -ifndef MLIBS - MLIBS := -endif - -#------------------------------------------------------------------------------ -# Drive configure scripts for support libraries (mct) -#------------------------------------------------------------------------------ - -$(SHAREDLIBROOT)/$(SHAREDPATH)/mct/Makefile.conf: - @echo "SHAREDLIBROOT |$(SHAREDLIBROOT)| SHAREDPATH |$(SHAREDPATH)|"; \ - $(CONFIG_SHELL) $(CIMEROOT)/src/externals/mct/configure $(CONFIG_ARGS) --srcdir $(CIMEROOT)/src/externals/mct - -$(SHAREDLIBROOT)/$(SHAREDPATH)/mct/mpi-serial/Makefile.conf: - @echo "SHAREDLIBROOT |$(SHAREDLIBROOT)| SHAREDPATH |$(SHAREDPATH)|"; \ - $(CONFIG_SHELL) $(CIMEROOT)/src/externals/mct/mpi-serial/configure $(CONFIG_ARGS) --srcdir $(CIMEROOT)/src/externals/mct/mpi-serial - -ifeq ($(PIO_VERSION),2) -# This is a pio2 library - PIOLIB = $(PIO_LIBDIR)/libpiof.a $(PIO_LIBDIR)/libpioc.a - PIOLIBNAME = -lpiof -lpioc - PIO_SRC_DIR = $(CIMEROOT)/src/externals/pio2 -else -# This is a pio1 library - PIOLIB = $(PIO_LIBDIR)/libpio.a - PIOLIBNAME = -lpio - ifneq ("$(wildcard $(CIMEROOT)/src/externals/pio1/pio)", "") - PIO_SRC_DIR = $(CIMEROOT)/src/externals/pio1 - else - PIO_SRC_DIR = $(CIMEROOT)/src/externals/pio1/pio - endif -endif -#endif - -MCTLIBS = $(MCT_LIBDIR)/libmct.a $(MCT_LIBDIR)/libmpeu.a - -GPTLLIB = $(GPTL_LIBDIR)/libgptl.a - -ULIBS += -L$(INSTALL_SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/lib -lcsm_share -L$(INSTALL_SHAREDPATH)/lib $(PIOLIBNAME) -lgptl -lmct -lmpeu - -#------------------------------------------------------------------------------ -# Drive cmake script for cism and pio -#------------------------------------------------------------------------------ - -ifndef CMAKE_OPTS - CMAKE_OPTS := -endif -# note that the fortran flags include neither the FREEFLAGS nor the -# FIXEDFLAGS, so that both free & fixed code can be built (cmake -# doesn't seem to be able to differentiate between free & fixed -# fortran flags) -CMAKE_OPTS += -D CMAKE_Fortran_FLAGS:STRING="$(FFLAGS) $(EXTRA_PIO_FPPDEFS) $(INCLDIR)" \ - -D CMAKE_C_FLAGS:STRING="$(CFLAGS) $(EXTRA_PIO_CPPDEFS) $(INCLDIR)" \ - -D CMAKE_CXX_FLAGS:STRING="$(CXXFLAGS) $(EXTRA_PIO_CPPDEFS) $(INCLDIR)" \ - -D CMAKE_VERBOSE_MAKEFILE:BOOL=ON \ - -D GPTL_PATH:STRING=$(INSTALL_SHAREDPATH) \ - -D PIO_ENABLE_TESTS:BOOL=OFF \ - -D PIO_USE_MALLOC:BOOL=OFF \ - -D USER_CMAKE_MODULE_PATH:LIST="$(CIMEROOT)/src/CMake;$(CIMEROOT)/src/externals/pio2/cmake" \ - -# Allow for separate installations of the NetCDF C and Fortran libraries -ifeq ($(NETCDF_SEPARATE), FALSE) - CMAKE_OPTS += -D NetCDF_PATH:PATH=$(NETCDF_PATH) -else ifeq ($(NETCDF_SEPARATE), TRUE) - # NETCDF_Fortran_DIR points to the separate - # installation of Fortran NetCDF for PIO - CMAKE_OPTS += -D NetCDF_C_PATH:PATH=$(NETCDF_C_PATH) \ - -D NetCDF_Fortran_PATH:PATH=$(NETCDF_FORTRAN_PATH) -endif - -ifdef HDF5_PATH - CMAKE_OPTS += -D HDF5_PATH:STRING="$(HDF5_PATH)" -endif - -ifdef PNETCDF_PATH - CMAKE_OPTS += -D PnetCDF_PATH:STRING="$(PNETCDF_PATH)" -else - CMAKE_OPTS += -D WITH_PNETCDF:LOGICAL=FALSE -D PIO_USE_MPIIO:LOGICAL=FALSE -endif -ifdef PIO_FILESYSTEM_HINTS - CMAKE_OPTS += -D PIO_FILESYSTEM_HINTS:STRING="$(PIO_FILESYSTEM_HINTS)" -endif -ifeq ($(MPILIB),mpi-serial) - CMAKE_OPTS += -D PIO_USE_MPISERIAL=TRUE -D MPISERIAL_PATH=$(INSTALL_SHAREDPATH) -endif - -# This captures the many cism-specific options to cmake -CMAKE_OPTS += $(USER_CMAKE_OPTS) - -# CMake doesn't seem to like it when you define compilers via -D -# CMAKE_C_COMPILER, etc., when you rerun cmake with an existing -# cache. So doing this via environment variables instead. -ifndef CMAKE_ENV_VARS - CMAKE_ENV_VARS := -endif -CMAKE_ENV_VARS += CC=$(CC) \ - CXX=$(CXX) \ - FC=$(FC) \ - LDFLAGS="$(LDFLAGS)" - - -# We declare $(GLC_DIR)/Makefile to be a phony target so that cmake is -# always rerun whenever invoking 'make $(GLC_DIR)/Makefile'; this is -# desirable to pick up any new source files that may have been added -.PHONY: $(GLC_DIR)/Makefile -$(GLC_DIR)/Makefile: - cd $(GLC_DIR); \ - $(CMAKE_ENV_VARS) cmake $(CMAKE_OPTS) $(GLCROOT)/source_cism - -$(PIO_LIBDIR)/Makefile: - cd $(PIO_LIBDIR); \ - $(CMAKE_ENV_VARS) cmake $(CMAKE_OPTS) $(PIO_SRC_DIR) - -#------------------------------------------------------------------------------- -# Build & include dependency files -#------------------------------------------------------------------------------- - -touch_filepath: - touch Filepath - -# Get list of files and build dependency file for all .o files -# using perl scripts mkSrcfiles and mkDepends -# if a source is of form .F90.in strip the .in before creating the list of objects -SOURCES := $(shell cat Srcfiles) -BASENAMES := $(basename $(basename $(SOURCES))) -OBJS := $(addsuffix .o, $(BASENAMES)) -INCS := $(foreach dir,$(cpp_dirs),-I$(dir)) - -CURDIR := $(shell pwd) - -Depends: Srcfiles Deppath - $(CASETOOLS)/mkDepends $(USER_MKDEPENDS_OPTS) Deppath Srcfiles > $@ - -Deppath: Filepath - $(CP) -f Filepath $@ - @echo "$(MINCROOT)" >> $@ - -Srcfiles: Filepath - $(CASETOOLS)/mkSrcfiles - -Filepath: - @echo "$(VPATH)" > $@ - - -#------------------------------------------------------------------------------- -# echo file names, paths, compile flags, etc. used during build -#------------------------------------------------------------------------------- - -db_files: - @echo " " - @echo "* VPATH := $(VPATH)" - @echo "* INCS := $(INCS)" - @echo "* OBJS := $(OBJS)" -db_flags: - @echo " " - @echo "* cc := $(CC) $(CFLAGS) $(INCS) $(INCLDIR)" - @echo "* .F.o := $(FC) $(FFLAGS) $(FIXEDFLAGS) $(INCS) $(INCLDIR)" - @echo "* .F90.o := $(FC) $(FFLAGS) $(FREEFLAGS) $(INCS) $(INCLDIR)" - ifeq ($(USE_CXX), TRUE) - @echo "* .cpp.o := $(CXX) $(CXXFLAGS) $(INCS) $(INCLDIR)" - endif - -#------------------------------------------------------------------------------- -# Rules used for the tests run by "configure -test" -#------------------------------------------------------------------------------- - -test_fc: test_fc.o - $(LD) -o $@ test_fc.o $(F90_LDFLAGS) -ifeq ($(NETCDF_SEPARATE), FALSE) -test_nc: test_nc.o - $(LD) -o $@ test_nc.o -L$(LIB_NETCDF) -lnetcdff -lnetcdf $(F90_LDFLAGS) -else ifeq ($(NETCDF_SEPARATE), TRUE) -test_nc: test_nc.o - $(LD) -o $@ test_nc.o -L$(LIB_NETCDF_FORTRAN) -L$(LIB_NETCDF_C) -lnetcdff -lnetcdf $(F90_LDFLAGS) -endif -test_mpi: test_mpi.o - $(LD) -o $@ test_mpi.o $(F90_LDFLAGS) -test_esmf: test_esmf.o - $(LD) -o $@ test_esmf.o $(F90_LDFLAGS) - -#------------------------------------------------------------------------------- -# create list of component libraries - hard-wired for current ccsm components -#------------------------------------------------------------------------------- -ifeq ($(CIME_MODEL),cesm) - ifeq ($(COMP_LND),clm) - USE_SHARED_CLM=TRUE - else - USE_SHARED_CLM=FALSE - endif -else - USE_SHARED_CLM=FALSE -endif -ifeq ($(USE_SHARED_CLM),FALSE) - LNDOBJDIR = $(EXEROOT)/lnd/obj - LNDLIBDIR=$(LIBROOT) - ifeq ($(COMP_LND),clm) - LNDLIB := libclm.a - else - LNDLIB := liblnd.a - endif - INCLDIR += -I$(LNDOBJDIR) -else - LNDLIB := libclm.a - LNDOBJDIR = $(SHAREDLIBROOT)/$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/clm/obj - LNDLIBDIR = $(EXEROOT)/$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/lib - INCLDIR += -I$(INSTALL_SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/include - ifeq ($(MODEL),clm) - INCLUDE_DIR = $(INSTALL_SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/include - endif -endif -ifeq ($(LND_PRESENT),TRUE) - INCLDIR += -I$(LNDOBJDIR) -endif -ifeq ($(COMP_GLC), cism) - ULIBDEP += $(CISM_LIBDIR)/libglimmercismfortran.a - ifeq ($(CISM_USE_TRILINOS), TRUE) - ULIBDEP += $(CISM_LIBDIR)/libglimmercismcpp.a - endif -endif -ifeq ($(OCN_SUBMODEL),moby) - ULIBDEP += $(LIBROOT)/libmoby.a -endif - -ifdef COSP_LIBDIR - ifeq ($(CIME_MODEL),cesm) - ULIBDEP += $(COSP_LIBDIR)/libcosp.a - endif -endif - - -ifndef CLIBS - ifdef ULIBDEP - # For each occurrence of something like /path/to/foo/libbar.a in ULIBDEP, - # CLIBS will contain -L/path/to/foo -lbar - CLIBS := $(foreach LIBDEP,$(strip $(ULIBDEP)), -L$(dir $(LIBDEP)) $(patsubst lib%.a,-l%,$(notdir $(LIBDEP)))) - endif -endif - -# libcsm_share.a is in ULIBDEP, but -lcsm_share is in ULIBS rather than CLIBS, -# so this needs to be added after creating CLIBS above -CSMSHARELIB = $(INSTALL_SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/lib/libcsm_share.a -ULIBDEP += $(CSMSHARELIB) - -#------------------------------------------------------------------------------- -# build rules: -#------------------------------------------------------------------------------- - -.SUFFIXES: -.SUFFIXES: .F90 .F .f90 .f .c .cpp .o .in - -ifeq ($(MPILIB),mpi-serial) - MPISERIAL = $(INSTALL_SHAREDPATH)/lib/libmpi-serial.a - MLIBS += $(MPISERIAL) - CMAKE_OPTS += -DMPI_C_INCLUDE_PATH=$(INSTALL_SHAREDPATH)/include \ - -DMPI_Fortran_INCLUDE_PATH=$(INSTALL_SHAREDPATH)/include \ - -DMPI_C_LIBRARIES=$(INSTALL_SHAREDPATH)/lib/libmpi-serial.a \ - -DMPI_Fortran_LIBRARIES=$(INSTALL_SHAREDPATH)/lib/libmpi-serial.a -endif - -$(MCTLIBS) : $(MPISERIAL) - -$(PIOLIB) : $(MPISERIAL) $(GPTLLIB) - -$(CSMSHARELIB): $(MCTLIBS) $(PIOLIB) $(GPTLLIB) - -ifneq ($(MODEL),csm_share) - $(OBJS): $(CSMSHARELIB) -else - complib: install_lib -endif - -install_lib: $(COMPLIB) - $(CP) -p $(COMPLIB) $(CSMSHARELIB) - $(CP) -p *.$(MOD_SUFFIX) *.h $(INCLUDE_DIR) - - -$(EXEC_SE): $(OBJS) $(ULIBDEP) $(CSMSHARELIB) $(MCTLIBS) $(PIOLIB) $(GPTLLIB) - $(LD) -o $(EXEC_SE) $(OBJS) $(CLIBS) $(ULIBS) $(SLIBS) $(MLIBS) $(F90_LDFLAGS) - -$(COMPLIB): $(OBJS) - $(AR) $(ARFLAGS) $(COMPLIB) $(OBJS) - -.c.o: - $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) $< -.F.o: - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FIXEDFLAGS) $< -.f.o: - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FIXEDFLAGS) $< -.f90.o: - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $< -.F90.o: - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $(CONTIGUOUS_FLAG) $< -.cpp.o: - $(CXX) -c $(INCLDIR) $(INCS) $(CXXFLAGS) $< - -%.F90: %.F90.in - $(CIMEROOT)/src/externals/genf90/genf90.pl $< > $@ - -clean_dependsatm: - $(RM) -f $(EXEROOT)/atm/obj/Srcfiles - -clean_dependscpl: - $(RM) -f $(EXEROOT)/cpl/obj/Srcfiles - -clean_dependsocn: - $(RM) -f $(EXEROOT)/ocn/obj/Srcfiles - -clean_dependswav: - $(RM) -f $(EXEROOT)/wav/obj/Srcfiles - -clean_dependsiac: - $(RM) -f $(EXEROOT)/iac/obj/Srcfiles - -clean_dependsglc: - $(RM) -f $(EXEROOT)/glc/obj/Srcfiles - -clean_dependsice: - $(RM) -f $(EXEROOT)/ice/obj/Srcfiles - -clean_dependsrof: - $(RM) -f $(EXEROOT)/rof/obj/Srcfiles - -clean_dependsesp: - $(RM) -f $(EXEROOT)/esp/obj/Srcfiles - -clean_dependslnd: - $(RM) -f $(LNDOBJDIR)/Srcfiles - -clean_dependscsmshare: - $(RM) -f $(SHAREDLIBROOT)/$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share/Srcfiles - -clean_depends: clean_dependsatm clean_dependscpl clean_dependswav clean_dependsglc clean_dependsice clean_dependsrof clean_dependslnd clean_dependscsmshare clean_dependsesp clean_dependsiac - - -cleanatm: - $(RM) -f $(LIBROOT)/libatm.a - $(RM) -fr $(EXEROOT)/atm/obj - -cleancpl: - $(RM) -fr $(EXEROOT)/cpl/obj - -cleanocn: - $(RM) -f $(LIBROOT)/libocn.a - $(RM) -fr $(EXEROOT)/ocn/obj - -cleanwav: - $(RM) -f $(LIBROOT)/libwav.a - $(RM) -fr $(EXEROOT)/wav/obj - -cleaniac: - $(RM) -f $(LIBROOT)/libiac.a - $(RM) -fr $(EXEROOT)/iac/obj - -cleanesp: - $(RM) -f $(LIBROOT)/libesp.a - $(RM) -fr $(EXEROOT)/esp/obj - -cleanglc: - $(RM) -f $(LIBROOT)/libglc.a - $(RM) -fr $(EXEROOT)/glc - -cleanice: - $(RM) -f $(LIBROOT)/libice.a - $(RM) -fr $(EXEROOT)/ice/obj - -cleanrof: - $(RM) -f $(LIBROOT)/librof.a - $(RM) -fr $(EXEROOT)/rof/obj - -cleanlnd: - $(RM) -f $(LIBROOT)/$(LNDLIB) - $(RM) -fr $(EXEROOT)/lnd/obj - -cleancsmshare: - $(RM) -f $(CSMSHARELIB) - $(RM) -fr $(SHAREDLIBROOT)/$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share - -cleanpio: - $(RM) -f $(PIO_LIBDIR)/libpio* - $(RM) -fr $(SHAREDLIBROOT)/$(SHAREDPATH)/pio - -cleanmct: - $(RM) -f $(MCTLIBS) - $(RM) -fr $(SHAREDLIBROOT)/$(SHAREDPATH)/mct - -cleangptl: - $(RM) -f $(GPTLLIB) - $(RM) -fr $(SHAREDLIBROOT)/$(SHAREDPATH)/gptl - -clean: cleanatm cleanocn cleanwav cleanglc cleanice cleanrof cleanlnd cleanesp cleaniac - -realclean: clean cleancsmshare cleanpio cleanmct cleangptl - -# the if-tests prevent DEPS files from being created when they're not needed -ifneq ($(MAKECMDGOALS), db_files) -ifneq ($(MAKECMDGOALS), db_flags) -ifeq (,$(findstring clean,$(MAKECMDGOALS))) - -include Depends $(CASEROOT)/Depends.$(COMPILER) $(CASEROOT)/Depends.$(MACH) $(CASEROOT)/Depends.$(MACH).$(COMPILER) -endif -endif -endif -ifeq ($(MODEL),csm_share) - shr_assert_mod.mod: shr_assert_mod.o -endif diff --git a/scripts/Tools/archive_metadata b/scripts/Tools/archive_metadata deleted file mode 100755 index 472a6187fa5..00000000000 --- a/scripts/Tools/archive_metadata +++ /dev/null @@ -1,1523 +0,0 @@ -#!/usr/bin/env python -""" -Gather all the case metadata and send it to the experiments databases -via a web post and SVN check-in - -Author: CSEG -""" -import argparse -import datetime -import filecmp -import getpass -import glob -import gzip -import json -import io -from os.path import expanduser -import re -import shutil -import ssl -import subprocess -import sys -from string import Template - -from standard_script_setup import * -from CIME.case import Case -from CIME.utils import is_last_process_complete -# six is for py2/py3 compatibility -from six.moves import configparser, urllib - -# define global constants -logger = logging.getLogger(__name__) -_svn_expdb_url = 'https://svn-cesm2-expdb.cgd.ucar.edu' -_exp_types = ['CMIP6', 'production', 'tuning'] -_xml_vars = ['CASE', 'COMPILER', 'COMPSET', 'CONTINUE_RUN', 'DOUT_S', 'DOUT_S_ROOT', - 'GRID', 'MACH', 'MPILIB', 'MODEL', 'MODEL_VERSION', 'REST_N', 'REST_OPTION', - 'RUNDIR', 'RUN_REFCASE', 'RUN_REFDATE', 'RUN_STARTDATE', 'RUN_TYPE', - 'STOP_N', 'STOP_OPTION', 'USER'] -_run_vars = ['JOB_QUEUE', 'JOB_WALLCLOCK_TIME', 'PROJECT'] -_archive_list = ['Buildconf', 'CaseDocs', 'CaseStatus', 'LockedFiles', - 'Macros.make', 'README.case', 'SourceMods', 'software_environment.txt'] -_call_template = Template('in "$function" - Ignoring SVN repo update\n' - 'SVN error executing command "$cmd". \n' - '$error: $strerror') -_copy_template = Template('in "$function" - Unable to copy "$source" to "$dest"' - '$error: $strerror') -_svn_error_template = Template('in "$function" - SVN client unavailable\n' - 'SVN error executing command "$cmd". \n' - '$error: $strerror') -_ignore_patterns = ['*.pyc', '^.git', 'tmp', '.svn', '*~'] -_pp_xml_vars = {'atm' : 'ATMDIAG_test_path_climo', - 'glc' : '', - 'lnd' : 'LNDDIAG_PTMPDIR_1', - 'ice' : 'ICEDIAG_PATH_CLIMO_CONT', - 'ocn' : 'OCNDIAG_TAVGDIR', - 'rof' : '', - 'timeseries' : 'TIMESERIES_OUTPUT_ROOTDIR', - 'xconform' : 'CONFORM_OUTPUT_DIR'} -_pp_diag_vars = {'atm' : ['ATMDIAG_test_first_yr', 'ATMDIAG_test_nyrs'], - 'ice' : ['ICEDIAG_BEGYR_CONT', 'ICEDIAG_ENDYR_CONT', 'ICEDIAG_YRS_TO_AVG'], - 'lnd' : ['LNDDIAG_clim_first_yr_1', 'LNDDIAG_clim_num_yrs_1', - 'LNDDIAG_trends_first_yr_1', 'LNDDIAG_trends_num_yrs_1'], - 'ocn' : ['OCNDIAG_YEAR0', 'OCNDIAG_YEAR1', - 'OCNDIAG_TSERIES_YEAR0', 'OCNDIAG_TSERIES_YEAR1']} -_pp_tseries_comps = ['atm', 'glc', 'ice', 'lnd', 'ocn', 'rof'] - -# setting the ssl context to avoid issues with CGD certificates -_context = ssl._create_unverified_context() # pylint:disable=protected-access - -# ------------------------------------------------------------------------------- -class PasswordPromptAction(argparse.Action): -# ------------------------------------------------------------------------------- - """ SVN developer's password class handler - """ - # pylint: disable=redefined-builtin - def __init__(self, - option_strings=None, - dest=None, - default=None, - required=False, - nargs=0, - help=None): - super(PasswordPromptAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - required=required, - nargs=nargs, - help=help) - - def __call__(self, parser, args, values, option_string=None): - # check if ~/.subversion/cmip6.conf exists - home = expanduser("~") - conf_path = os.path.join(home, ".subversion/cmip6.conf") - if os.path.exists(conf_path): - # read the .cmip6.conf file - config = configparser.SafeConfigParser() - config.read(conf_path) - password = config.get('svn', 'password') - else: - password = getpass.getpass() - setattr(args, self.dest, password) - -# --------------------------------------------------------------------- -def basic_authorization(user, password): -# --------------------------------------------------------------------- - """ Basic authentication encoding - """ - sauth = user + ":" + password - return "Basic " + sauth.encode("base64").rstrip() - -# --------------------------------------------------------------------- -class SVNException(Exception): -# --------------------------------------------------------------------- - """ SVN command exception handler - """ - def __init__(self, value): - super(SVNException, self).__init__(value) - self.value = value - - def __str__(self): - return repr(self.value) - -# ------------------------------------------------------------------------------- -def commandline_options(args): -# ------------------------------------------------------------------------------- - """ Process the command line arguments. - """ - parser = argparse.ArgumentParser( - description='Query and parse the caseroot files to gather metadata information' \ - ' that can be posted to the CESM experiments database.' \ - ' ' \ - ' CMIP6 experiment case names must be reserved already in the' \ - ' experiment database. Please see:' \ - ' https://csesgweb.cgd.ucar.edu/expdb2.0 for details.') - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument('--user', dest='user', type=str, default=None, required=True, - help='User name for SVN CESM developer access (required)') - - parser.add_argument('--password', dest='password', action=PasswordPromptAction, - default='', required=True, - help='Password for SVN CESM developer access (required)') - - parser.add_argument('--caseroot', nargs=1, required=False, - help='Fully quailfied path to case root directory (optional). ' \ - 'Defaults to current working directory.') - - parser.add_argument('--workdir', nargs=1, required=False, - help='Fully quailfied path to directory for storing intermediate ' \ - 'case files. A sub-directory called ' \ - 'archive_temp_dir is created, populated ' \ - 'with case files, and posted to the CESM experiments database and ' \ - 'SVN repository at URL "{0}". ' \ - 'This argument can be used to archive a caseroot when the user ' \ - 'does not have write permission in the caseroot (optional). ' \ - 'Defaults to current working directory.'.format(_svn_expdb_url)) - - parser.add_argument('--expType', dest='expType', nargs=1, required=True, choices=_exp_types, - help='Experiment type. For CMIP6 experiments, the case must already ' \ - 'exist in the experiments database at URL ' \ - ' "http://csegweb.cgd.ucar.edu/expdb2.0" (required). ' \ - 'Must be one of "{0}"'.format(_exp_types)) - - parser.add_argument('--title', nargs=1, required=False, default=None, - help='Title of experiment (optional).') - - parser.add_argument('--ignore-logs', dest='ignore_logs', action='store_true', - help='Ignore updating the SVN repository with the caseroot/logs files. ' \ - 'The experiments database will be updated (optional).') - - parser.add_argument('--ignore-timing', dest='ignore_timing', action='store_true', - help='Ignore updating the the SVN repository with caseroot/timing files.' \ - 'The experiments database will be updated (optional).') - - parser.add_argument('--ignore-repo-update', dest='ignore_repo_update', action='store_true', - help='Ignore updating the SVN repository with all the caseroot files. ' \ - 'The experiments database will be updated (optional).') - - parser.add_argument('--add-files', dest='user_add_files', required=False, - help='Comma-separated list with no spaces of files or directories to be ' \ - 'added to the SVN repository. These are in addition to the default added ' \ - 'caseroot files and directories: '\ - '"{0}, *.xml, user_nl_*" (optional).'.format(_archive_list)) - - parser.add_argument('--dryrun', action='store_true', - help='Parse settings and print what actions will be taken but ' \ - 'do not execute the action (optional).') - - parser.add_argument('--query_cmip6', nargs=2, required=False, - help='Query the experiments database global attributes ' \ - 'for specified CMIP6 casename as argument 1. ' \ - 'Writes a json formatted output file, specified by argument 2, ' \ - 'to subdir archive_files (optional).') - - parser.add_argument('--test-post', dest='test_post', action='store_true', - help='Post metadata to the test expdb2.0 web application server ' \ - 'at URL "http://csegwebdev.cgd.ucar.edu/expdb2.0". ' \ - 'No --test-post argument defaults to posting metadata to the ' \ - 'production expdb2.0 web application server '\ - 'at URL "http://csegweb.cgd.ucar.edu/expdb2.0" (optional).') - - opts = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return opts - -# --------------------------------------------------------------------- -def get_case_vars(case_dict, case): -# --------------------------------------------------------------------- - """ get_case_vars - loop through the global list of XML vars and get the values - from the case object into a case dictionary - - Arguments: - case_dict (dict) - case dictionary to store XML variables - case (object) - case object - """ - logger.debug('get_case_vars') - - for xml_id in _xml_vars: - case_dict[xml_id] = case.get_value(xml_id, resolved=True, subgroup=None) - - for xml_id in _run_vars: - case_dict[xml_id] = case.get_value(xml_id, resolved=True, subgroup='case.run') - - return case_dict - -# --------------------------------------------------------------------- -def get_disk_usage(path): -# --------------------------------------------------------------------- - """get_disk_usage - return the total disk usage in bytes for a given path. - - Arguments: - path - path to start - """ - logger.debug('get_disk_usage') - total_size = 0 - cwd = os.getcwd() - if os.path.exists(path): - os.chdir(path) - cmd = ['du', '--summarize', '--block-size=1'] - try: - total_size = subprocess.check_output(cmd) - total_size = total_size.replace('\t.\n', '') - except subprocess.CalledProcessError: - msg = "Error executing command = '{0}'".format(cmd) - logger.warning(msg) - os.chdir(cwd) - return int(total_size) - - -# --------------------------------------------------------------------- -def get_ocn_disk_usage(path): -# --------------------------------------------------------------------- - """get_ocn_disk_usage - return the total disk usage in bytes for a given path. - - Arguments: - path - path to start - """ - logger.debug('get_ocn_disk_usage') - total_size = 0 - paths = glob.glob(path) - for path in paths: - total_size += get_disk_usage(path) - return int(total_size) - -# --------------------------------------------------------------------- -def get_pp_path(pp_dir, process): -# --------------------------------------------------------------------- - """get_pp_path - return the XML path for process - - Arguments: - pp_dir - path to postprocess directory - process - process name - """ - logger.debug('get_pp_path') - - cwd = os.getcwd() - os.chdir(pp_dir) - - pp_path_var = '' - if process == 'timeseries': - pp_path_var = _pp_xml_vars['timeseries'] - elif process == 'xconform': - pp_path_var = _pp_xml_vars['xconform'] - - cmd = ['./pp_config', '--get', pp_path_var, '--value'] - try: - pp_path = subprocess.check_output(cmd) - except subprocess.CalledProcessError: - msg = "Error executing command = '{0}'".format(cmd) - logger.warning(msg) - - if (len(pp_path) > 2): - pp_path = pp_path.rstrip() - else: - pp_path = '' - - os.chdir(cwd) - return pp_path - -# --------------------------------------------------------------------- -def get_diag_dates(comp, pp_dir): -# --------------------------------------------------------------------- - """ get_diag_dates - - Query the postprocessing env_diags_[comp].xml file to get the model diag - dates for the given component. - """ - logger.debug('get_diag_dates') - - cwd = os.getcwd() - os.chdir(pp_dir) - - model_dates = '' - pp_vars = _pp_diag_vars.get(comp) - for pp_var in pp_vars: - cmd = ['./pp_config', '--get', pp_var, '--value'] - try: - pp_value = subprocess.check_output(cmd) - except subprocess.CalledProcessError: - msg = "Error executing command = '{0}'".format(cmd) - logger.warning(msg) - tmp_dates = '{0} = {1}'.format(pp_var, pp_value) - model_dates = model_dates + tmp_dates - - os.chdir(cwd) - return model_dates - -# --------------------------------------------------------------------- -def get_pp_status(case_dict): -# --------------------------------------------------------------------- - """ get_pp_status - Parse the postprocessing log files - looking for status information - - Arguments: - case_dict (dict) - case dictionary to store XML variables - """ - logger.debug('get_pp_status') - - # initialize status variables - msg_avg = dict() - msg_diags = dict() - diag_comps = ['atm', 'ice', 'lnd', 'ocn'] - - pp_dir = os.path.join(case_dict['CASEROOT'], 'postprocess') - pp_log_dir = os.path.join(case_dict['CASEROOT'], 'postprocess', 'logs') - - msg_avg['atm'] = "COMPLETED SUCCESSFULLY" - msg_diags['atm'] = "Successfully completed generating atmosphere diagnostics" - case_dict['atm_avg_dates'] = case_dict['atm_diag_dates'] = get_diag_dates('atm', pp_dir) - - msg_avg['ice'] = "Successfully completed generating ice climatology averages" - msg_diags['ice'] = "Successfully completed generating ice diagnostics" - case_dict['ice_avg_dates'] = case_dict['ice_diag_dates'] = get_diag_dates('ice', pp_dir) - - msg_avg['lnd'] = "COMPLETED SUCCESSFULLY" - msg_diags['lnd'] = "Successfully completed generating land diagnostics" - case_dict['lnd_avg_dates'] = case_dict['lnd_diag_dates'] = get_diag_dates('lnd', pp_dir) - - msg_avg['ocn'] = "Successfully completed generating ocean climatology averages" - msg_diags['ocn'] = "Successfully completed generating ocean diagnostics" - case_dict['ocn_avg_dates'] = case_dict['ocn_diag_dates'] = get_diag_dates('ocn', pp_dir) - - - for comp in diag_comps: - case_dict[comp+'_avg_status'] = 'Unknown' - case_dict[comp+'_diag_status'] = 'Unknown' - - if (comp != 'ocn'): - case_dict[comp+'_avg_path'] = os.path.join(case_dict['DOUT_S_ROOT'], comp, 'proc/climo') - case_dict[comp+'_avg_size'] = get_disk_usage(case_dict[comp+'_avg_path']) - case_dict[comp+'_diag_path'] = os.path.join(case_dict['DOUT_S_ROOT'], comp, 'proc/diag') - case_dict[comp+'_diag_size'] = get_disk_usage(case_dict[comp+'_diag_path']) - else: - case_dict[comp+'_avg_path'] = os.path.join(case_dict['DOUT_S_ROOT'], comp, 'proc/climo*') - case_dict[comp+'_avg_size'] = get_ocn_disk_usage(case_dict[comp+'_avg_path']) - case_dict[comp+'_diag_path'] = os.path.join(case_dict['DOUT_S_ROOT'], comp, 'proc/diag*') - case_dict[comp+'_diag_size'] = get_ocn_disk_usage(case_dict[comp+'_diag_path']) - - avg_logs = list() - avg_file_pattern = ("{0}/{1}_averages.log.*".format(pp_log_dir, comp)) - avg_logs = glob.glob(avg_file_pattern) - - if avg_logs: - log_file = max(avg_logs, key=os.path.getctime) - if (is_last_process_complete(log_file, msg_avg[comp], - 'Average list complies with standards.')): - case_dict[comp+'_avg_status'] = 'Succeeded' - else: - case_dict[comp+'_avg_status'] = 'Started' - - diag_logs = list() - diag_file_pattern = ("{0}/{1}_diagnostics.log.*".format(pp_log_dir, comp)) - diag_logs = glob.glob(diag_file_pattern) - - if diag_logs: - log_file = max(diag_logs, key=os.path.getctime) - if is_last_process_complete(log_file, msg_diags[comp], 'ncks version'): - case_dict[comp+'_diag_status'] = 'Succeeded' - else: - case_dict[comp+'_diag_status'] = 'Started' - - # get overall timeseries status - case_dict['timeseries_status'] = 'Unknown' - case_dict['timeseries_path'] = get_pp_path(pp_dir, 'timeseries') - case_dict['timeseries_size'] = 0 - case_dict['timeseries_dates'] = '{0}-{1}'.format(case_dict['RUN_STARTDATE'].replace("-", ""), - case_dict['RUN_STARTDATE'].replace("-", "")) - case_dict['timeseries_total_time'] = 0 - tseries_logs = list() - tseries_file_pattern = ("{0}/timeseries.log.*".format(pp_log_dir)) - tseries_logs = glob.glob(tseries_file_pattern) - if tseries_logs: - log_file = max(tseries_logs, key=os.path.getctime) - if is_last_process_complete(filepath=log_file, - expect_text='Successfully completed', - fail_text='opening'): - case_dict['timeseries_status'] = 'Succeeded' - with open(log_file, 'r') as fname: - log_content = fname.readlines() - total_time = [line for line in log_content if 'Total Time:' in line] - case_dict['timeseries_total_time'] = ' '.join(total_time[0].split()) - else: - case_dict['timeseries_status'] = 'Started' - sta_dates = case_dict['sta_last_date'].split("-") - case_dict['timeseries_dates'] = '{0}-{1}'.format(case_dict['RUN_STARTDATE'].replace("-", ""), - ''.join(sta_dates[:-1])) - for comp in _pp_tseries_comps: - tseries_path = "{0}/{1}/proc/tseries".format(case_dict['timeseries_path'], comp) - case_dict['timeseries_size'] += get_disk_usage(tseries_path) - - # get iconform status = this initializes files in the POSTPROCESS_PATH - case_dict['iconform_status'] = 'Unknown' - case_dict['iconform_path'] = '' - case_dict['iconform_size'] = 0 - case_dict['iconform_dates'] = case_dict['timeseries_dates'] - - iconform_logs = list() - iconform_file_pattern = ("{0}/iconform.log.*".format(pp_log_dir)) - iconform_logs = glob.glob(iconform_file_pattern) - if iconform_logs: - log_file = max(iconform_logs, key=os.path.getctime) - if (is_last_process_complete(log_file, 'Successfully created the conform tool', - 'Running createOutputSpecs')): - case_dict['iconform_status'] = 'Succeeded' - else: - case_dict['iconform_status'] = 'Started' - - # get xconform status - case_dict['xconform_path'] = '' - case_dict['xconform_path'] = get_pp_path(pp_dir, 'xconform') - case_dict['xconform_status'] = 'Unknown' - case_dict['xconform_size'] = get_disk_usage(case_dict['xconform_path']) - case_dict['xconform_dates'] = case_dict['timeseries_dates'] - case_dict['xconform_total_time'] = 0 - - xconform_logs = list() - xconform_file_pattern = ("{0}/xconform.log.*".format(pp_log_dir)) - xconform_logs = glob.glob(xconform_file_pattern) - if xconform_logs: - log_file = max(xconform_logs, key=os.path.getctime) - if (is_last_process_complete(log_file, - 'Successfully completed converting all files', - 'cesm_conform_generator INFO')): - case_dict['xconform_status'] = 'Succeeded' - case_dict['xconform_size'] = get_disk_usage(case_dict['xconform_path']) - with open(log_file, 'r') as fname: - log_content = fname.readlines() - total_time = [line for line in log_content if 'Total Time:' in line] - if total_time: - case_dict['xconform_total_time'] = ' '.join(total_time[0].split()) - else: - case_dict['xconform_status'] = 'Started' - - return case_dict - -# --------------------------------------------------------------------- -def get_run_last_date(casename, run_path): -# --------------------------------------------------------------------- - """ get_run_last_date - parse the last cpl.r file in the run_path to retrieve that last date. - - Arguments: - casename - run_path - path to run directory - """ - logger.debug('get_run_last_date') - - pattern = ('{0}.cpl.r.*.nc'.format(casename)) - cpl_files = sorted(glob.glob(os.path.join(run_path, pattern))) - - if cpl_files: - _, cpl_file = os.path.split(cpl_files[-1]) - fparts = cpl_file.split('.') - return fparts[-2] - - return '0000-00-00' - -# --------------------------------------------------------------------- -def get_sta_last_date(sta_path): -# --------------------------------------------------------------------- - """ get_sta_last_date - parse the last rest directory in the sta_path to retrieve that last date. - - Arguments: - sta_path - path to run directory - """ - logger.debug('get_sta_last_date') - - rest_dirs = sorted(glob.glob(os.path.join(sta_path, 'rest/*'))) - - if rest_dirs: - _, rest_dir = os.path.split(rest_dirs[-1]) - return rest_dir - - return '0000-00-00' - -# --------------------------------------------------------------------- -def get_case_status(case_dict): -# --------------------------------------------------------------------- - """ get_case_status - Parse the CaseStatus and postprocessing log files - looking for status information - - Arguments: - case_dict (dict) - case dictionary to store XML variables - """ - logger.debug('get_case_status') - - # initialize status variables - case_dict['run_status'] = 'Unknown' - case_dict['run_path'] = case_dict['RUNDIR'] - case_dict['run_size'] = 0 - case_dict['run_last_date'] = case_dict['RUN_STARTDATE'] - - case_dict['sta_status'] = 'Unknown' - case_dict['sta_path'] = case_dict['DOUT_S_ROOT'] - case_dict['sta_size'] = 0 - case_dict['sta_last_date'] = case_dict['RUN_STARTDATE'] - - cstatus = case_dict['CASEROOT']+'/CaseStatus' - if os.path.exists(cstatus): - # get the run status - run_status = is_last_process_complete(cstatus, "case.run success", "case.run starting") - if run_status is True: - case_dict['run_status'] = 'Succeeded' - case_dict['run_size'] = get_disk_usage(case_dict['run_path']) - case_dict['run_last_date'] = get_run_last_date(case_dict['CASE'], case_dict['run_path']) - - # get the STA status - if case_dict['DOUT_S']: - # get only the history, rest and logs dir - ignoring the proc subdirs - sta_status = is_last_process_complete(cstatus, "st_archive success", - "st_archive starting") - case_dict['sta_last_date'] = get_sta_last_date(case_dict['DOUT_S_ROOT']) - if sta_status is True: - case_dict['sta_status'] = 'Succeeded' - # exclude the proc directories in the sta size estimates - for subdir in ['atm/hist', 'cpl/hist', 'esp/hist', 'ice/hist', 'glc/hist', - 'lnd/hist', 'logs', 'ocn/hist', 'rest', 'rof/hist', - 'wav/hist', 'iac/hist']: - path = os.path.join(case_dict['sta_path'], subdir) - if os.path.isdir(path): - case_dict['sta_size'] += get_disk_usage(path) - - # check if the postprocess dir exists in the caseroot - case_dict['postprocess'] = False - if os.path.exists(case_dict['CASEROOT']+'/postprocess'): - case_dict['postprocess'] = True - case_dict = get_pp_status(case_dict) - - return case_dict - -# --------------------------------------------------------------------- -def check_expdb_case(case_dict, username, password): -# --------------------------------------------------------------------- - """ check_exp_case - Cross check the casename with the database for a CMIP6 experiment - - Arguments: - case_dict (dict) - case dictionary to store XML variables - username (string) - SVN developer's username - password (string) - SVN developer's password - - Return case_id value; 0 if does not exist or > 0 for exists. - - """ - logger.debug('check_expdb_case') - data_dict = {'casename':case_dict['CASE'], - 'queryType':'checkCaseExists', - 'expType':case_dict['expType']} - data = json.dumps(data_dict) - params = urllib.parse.urlencode(dict(username=username, password=password, data=data)) - try: - response = urllib.request.urlopen(case_dict['query_expdb_url'], params, context=_context) - output = json.loads(response.read().decode()) - except urllib.error.HTTPError as http_e: - logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) - sys.exit(1) - except urllib.error.URLError as url_e: - logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) - sys.exit(1) - - return int(output['case_id']) - -# --------------------------------------------------------------------- -def query_expdb_cmip6(case_dict, username, password): -# --------------------------------------------------------------------- - """ query_exp_case - Query the expdb for CMIP6 casename = case_dict['q_casename'] metadata. - Write out a json file to case_dict['q_outfile']. - - Arguments: - case_dict (dict) - case dictionary to store XML variables - username (string) - SVN developer's username - password (string) - SVN developer's password - - """ - logger.debug('query_expdb_cmip6') - exists = False - data_dict = {'casename':case_dict['q_casename'], - 'queryType':'CMIP6GlobalAtts', - 'expType':'CMIP6'} - data = json.dumps(data_dict) - params = urllib.parse.urlencode(dict(username=username, password=password, data=data)) - try: - response = urllib.request.urlopen(case_dict['query_expdb_url'], params, context=_context) - output = json.load(response) - except urllib.error.HTTPError as http_e: - logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) - except urllib.error.URLError as url_e: - logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) - - if output: - if not os.path.exists('{0}/archive_files'.format(case_dict['workdir'])): - os.makedirs('{0}/archive_files'.format(case_dict['workdir'])) - - filename = '{0}/archive_files/{1}'.format(case_dict['workdir'], case_dict['q_outfile']) - with io.open(filename, 'w+', encoding='utf-8') as fname: - fname.write(json.dumps(output, ensure_ascii=False)) - fname.close() - exists = True - - return exists - -# --------------------------------------------------------------------- -def create_json(case_dict): -# --------------------------------------------------------------------- - """ create_json - Create a JSON file in the caseroot/archive_files dir. - - Arguments: - case_dict (dict) - case dictionary to store XML variables - """ - logger.debug('create_json') - - if not os.path.exists('{0}/archive_files'.format(case_dict['workdir'])): - os.makedirs('{0}/archive_files'.format(case_dict['workdir'])) - - filename = '{0}/archive_files/json.{1}'.format(case_dict['workdir'], - datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) - with io.open(filename, 'wb') as fname: - jstr = str(json.dumps(case_dict, indent=4, sort_keys=True, ensure_ascii=False)) - if isinstance(jstr, str): - jstr = jstr.decode('utf-8') - fname.write(jstr) - fname.close() - -# --------------------------------------------------------------------- -def post_json(case_dict, username, password): -# --------------------------------------------------------------------- - """ post_json - Post a JSON file in the caseroot/archive_files to the - remote expdb URL. - - Arguments: - case_dict (dict) - case dictionary to store XML variables - username (string) - SVN developers username - password (string) - SVN developers password - """ - logger.debug('post_json') - - case_dict['COMPSET'] = urllib.parse.quote(case_dict['COMPSET']) - case_dict['GRID'] = urllib.parse.quote(case_dict['GRID']) - data = json.dumps(case_dict) - params = urllib.parse.urlencode(dict(username=username, password=password, data=data)) - try: - urllib.request.urlopen(case_dict['json_expdb_url'], params, context=_context) - except urllib.error.HTTPError as http_e: - logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) - except urllib.error.URLError as url_e: - logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) - -# --------------------------------------------------------------------- -def check_svn(): -# --------------------------------------------------------------------- - """ check_svn - - make sure svn client is installed and accessible - """ - logger.debug('check_svn') - - cmd = ['svn', '--version'] - svn_exists = True - result = '' - try: - result = subprocess.check_output(cmd) - except subprocess.CalledProcessError as error: - msg = _svn_error_template.substitute(function='check_svn', cmd=cmd, - error=error.returncode, strerror=error.output) - svn_exists = False - logger.info(msg) - raise SVNException(msg) - - if 'version' not in result: - msg = 'SVN is not available. Ignoring SVN update' - svn_exists = False - raise SVNException(msg) - - return svn_exists - -# --------------------------------------------------------------------- -def create_temp_archive(case_dict): -# --------------------------------------------------------------------- - """ create_temp_archive - - Create a temporary SVN sandbox directory in the current caseroot - """ - archive_temp_dir = '{0}/archive_temp_dir'.format(case_dict['workdir']) - logger.debug('create_temp_archive %s', archive_temp_dir) - - if not os.path.exists(archive_temp_dir): - os.makedirs(archive_temp_dir) - else: - logger.info('ERROR archive_metadata archive_temp_dir already exists. exiting...') - sys.exit(1) - - return archive_temp_dir - -# --------------------------------------------------------------------- -def check_svn_repo(case_dict, username, password): -# --------------------------------------------------------------------- - """ check_svn_repo - - check if a SVN repo exists for this case - """ - logger.debug('check_svn_repo') - - repo_exists = False - svn_repo = '{0}/trunk'.format(case_dict['svn_repo_url']) - cmd = ['svn', 'list', svn_repo, '--username', username, '--password', password] - result = '' - try: - result = subprocess.check_output(cmd) - except subprocess.CalledProcessError: - msg = 'SVN repo does not exist for this case. A new one will be created.' - logger.warning(msg) - - if re.search('README.archive', result): - repo_exists = True - - return repo_exists - -# --------------------------------------------------------------------- -def get_trunk_tag(case_dict, username, password): -# --------------------------------------------------------------------- - """ get_trunk_tag - - return the most recent trunk tag as an integer - """ - logger.debug('get_trunk_tag') - - tag = 0 - svn_repo = '{0}/trunk_tags'.format(case_dict['svn_repo_url']) - cmd = ['svn', 'list', svn_repo, '--username', username, '--password', password] - result = '' - try: - result = subprocess.check_output(cmd) - except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'list', svn_repo, '--username', username, '--password', '******'] - msg = _call_template.substitute(function='get_trunk_tag', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - - if result: - last_tag = [i for i in result.split('\n') if i][-1] - last_tag = last_tag[:-1].split('_')[-1] - tag = int(last_tag.lstrip('0')) - - return tag - -# --------------------------------------------------------------------- -def checkout_repo(case_dict, username, password): -# --------------------------------------------------------------------- - """ checkout_repo - - checkout the repo into the archive_temp_dir - """ - logger.debug('checkout_repo') - - os.chdir(case_dict['archive_temp_dir']) - svn_repo = '{0}/trunk'.format(case_dict['svn_repo_url']) - cmd = ['svn', 'co', '--username', username, '--password', password, svn_repo, '.'] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'co', '--username', username, '--password', '******', svn_repo, '.'] - msg = _call_template.substitute(function='checkout_repo', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - - os.chdir(case_dict['CASEROOT']) - -# --------------------------------------------------------------------- -def create_readme(case_dict): -# --------------------------------------------------------------------- - """ create_readme - - Create a generic README.archive file - """ - logger.debug('create_readme') - os.chdir(case_dict['archive_temp_dir']) - - fname = open('README.archive', 'w') - fname.write('Archived metadata is available for this case at URL:\n') - fname.write(case_dict['base_expdb_url']) - fname.close() - -# --------------------------------------------------------------------- -def update_repo_add_file(filename, dir1, dir2): -# --------------------------------------------------------------------- - """ update_repo_add_file - - Add a file to the SVN repository - """ - src = os.path.join(dir1, filename) - dest = os.path.join(dir2, filename) - logger.debug('left_only: '+src+' -> '+dest) - if not os.path.exists(dest): - shutil.copy2(src, dest) - cmd = ['svn', 'add', '--parents', dest] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - -# --------------------------------------------------------------------- -def update_repo_rm_file(filename, dir1, dir2): -# --------------------------------------------------------------------- - """ update_repo_rm_file - - Remove a file from the SVN repository - """ - src = os.path.join(dir2, filename) - dest = os.path.join(dir1, filename) - logger.debug('right_only: '+src+' -> '+dest) - if os.path.exists(dest): - cmd = ['svn', 'rm', dest] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - -# --------------------------------------------------------------------- -def update_repo_copy_file(filename, dir1, dir2): -# --------------------------------------------------------------------- - """ update_repo_copy_file - - Copy a file into the SVN local repo - """ - src = os.path.join(dir1, filename) - dest = os.path.join(dir2, filename) - shutil.copy2(src, dest) - -# --------------------------------------------------------------------- -def compare_dir_trees(dir1, dir2, archive_list): -# --------------------------------------------------------------------- - """ compare_dir_trees - - Compare two directories recursively. Files in each directory are - assumed to be equal if their names and contents are equal. - """ - xml_files = glob.glob(os.path.join(dir1, '*.xml')) - user_nl_files = glob.glob(os.path.join(dir1, 'user_nl_*')) - dirs_cmp = filecmp.dircmp(dir1, dir2, _ignore_patterns) - - left_only = [fn for fn in dirs_cmp.left_only if not os.path.islink(fn) - and (fn in xml_files or fn in user_nl_files or fn in archive_list)] - right_only = [fn for fn in dirs_cmp.right_only if not os.path.islink(fn) - and (fn in xml_files or fn in user_nl_files or fn in archive_list)] - funny_files = [fn for fn in dirs_cmp.funny_files if not os.path.islink(fn) - and (fn in xml_files or fn in user_nl_files or fn in archive_list)] - - # files and directories need to be added to svn repo from the caseroot - if left_only: - for filename in left_only: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': - update_repo_add_file(filename, dir1, dir2) - else: - new_dir1 = os.path.join(dir1, filename) - new_dir2 = os.path.join(dir2, filename) - os.makedirs(new_dir2) - cmd = ['svn', 'add', new_dir2] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - - # recurse through this new subdir - new_archive_list = [filename] - compare_dir_trees(new_dir1, new_dir2, new_archive_list) - - # files need to be removed from svn repo that are no longer in the caseroot - if right_only: - for filename in right_only: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': - update_repo_rm_file(filename, dir1, dir2) - - # files are the same but could not be compared so copy the caseroot version - if funny_files: - for filename in funny_files: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': - update_repo_copy_file(filename, dir1, dir2) - - # common files have changed in the caseroot and need to be copied to the svn repo - (_, mismatch, errors) = filecmp.cmpfiles( - dir1, dir2, dirs_cmp.common_files, shallow=False) - if mismatch: - for filename in mismatch: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': - update_repo_copy_file(filename, dir1, dir2) - - # error in file comparison so copy the caseroot file to the svn repo - if errors: - for filename in errors: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': - update_repo_copy_file(filename, dir1, dir2) - - # recurse through the subdirs - common_dirs = dirs_cmp.common_dirs - if common_dirs: - for common_dir in common_dirs: - if common_dir in archive_list: - new_dir1 = os.path.join(dir1, common_dir) - new_dir2 = os.path.join(dir2, common_dir) - compare_dir_trees(new_dir1, new_dir2, archive_list) - else: - return - -# --------------------------------------------------------------------- -def update_local_repo(case_dict, ignore_logs, ignore_timing): -# --------------------------------------------------------------------- - """ update_local_repo - - Compare and update local SVN sandbox - """ - logger.debug('update_local_repo') - from_dir = case_dict['CASEROOT'] - to_dir = case_dict['archive_temp_dir'] - - compare_dir_trees(from_dir, to_dir, case_dict['archive_list']) - - # check if ignore_logs is specified - if ignore_logs: - os.chdir(to_dir) - if os.path.isdir('./logs'): - try: - shutil.rmtree('./logs') - except OSError: - logger.warning('in "update_local_repo" - Unable to remove "logs" in archive dir.') - - cmd = ['svn', 'delete', './logs'] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - - if os.path.isdir('./postprocess/logs'): - os.chdir('./postprocess') - try: - shutil.rmtree('./logs') - except OSError: - logger.warning('in "update_local_repo" - '\ - 'Unable to remove "postprocess/logs" in archive dir.') - - cmd = ['svn', 'delete', './logs'] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - else: - # add log files - if os.path.exists('{0}/logs'.format(from_dir)): - if not os.path.exists('{0}/logs'.format(to_dir)): - os.makedirs('{0}/logs'.format(to_dir)) - os.chdir(os.path.join(from_dir, 'logs')) - for filename in glob.glob('*.*'): - update_repo_add_file(filename, os.path.join(from_dir, 'logs'), - os.path.join(to_dir, 'logs')) - - if os.path.exists('{0}/postprocess/logs'.format(from_dir)): - if not os.path.exists('{0}/postprocess/logs'.format(to_dir)): - os.makedirs('{0}/postprocess/logs'.format(to_dir)) - os.chdir(os.path.join(from_dir, 'postprocess/logs')) - for filename in glob.glob('*.*'): - update_repo_add_file(filename, os.path.join(from_dir, 'postprocess', 'logs'), - os.path.join(to_dir, 'postprocess', 'logs')) - - - # check if ignore_timing is specified - if ignore_timing: - os.chdir(case_dict['archive_temp_dir']) - if os.path.isdir('./timing'): - try: - shutil.rmtree('./timing') - except OSError: - logger.warning('in "update_local_repo" - Unable to remove "timing" in archive dir.') - - cmd = ['svn', 'delete', './timing'] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - else: - # add timing files - if os.path.exists('{0}/timing'.format(from_dir)): - if not os.path.exists('{0}/timing'.format(to_dir)): - os.makedirs('{0}/timing'.format(to_dir)) - os.chdir(os.path.join(from_dir, 'timing')) - for filename in glob.glob('*.*'): - update_repo_add_file(filename, os.path.join(from_dir, 'timing'), - os.path.join(to_dir, 'timing')) - - -# --------------------------------------------------------------------- -def populate_local_repo(case_dict, ignore_logs, ignore_timing): -# --------------------------------------------------------------------- - """ populate_local_repo - - Populate local SVN sandbox - """ - logger.debug('populate_local_repo') - os.chdir(case_dict['CASEROOT']) - - # loop through the archive_list and copy to the temp archive dir - for archive in case_dict['archive_list']: - if os.path.exists(archive): - if os.path.isdir(archive): - try: - target = case_dict['archive_temp_dir']+'/'+archive - shutil.copytree(archive, target, symlinks=False, - ignore=shutil.ignore_patterns(*_ignore_patterns)) - except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=archive, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) - logger.warning(msg) - else: - try: - shutil.copy2(archive, case_dict['archive_temp_dir']) - except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=archive, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) - logger.warning(msg) - - # add files with .xml as the suffix - xml_files = glob.glob('*.xml') - for xml_file in xml_files: - if os.path.isfile(xml_file): - try: - shutil.copy2(xml_file, case_dict['archive_temp_dir']) - except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=xml_file, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) - logger.warning(msg) - - # add files with .xml as the suffix from the postprocess directory - if os.path.isdir('./postprocess'): - pp_path = '{0}/{1}'.format(case_dict['archive_temp_dir'], 'postprocess') - if not os.path.exists(pp_path): - os.mkdir(pp_path) - xml_files = glob.glob('./postprocess/*.xml') - for xml_file in xml_files: - if os.path.isfile(xml_file): - try: - shutil.copy2(xml_file, pp_path) - except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=xml_file, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) - logger.warning(msg) - - # add files with user_nl_ as the prefix - user_files = glob.glob('user_nl_*') - for user_file in user_files: - if os.path.isfile(user_file): - try: - shutil.copy2(user_file, case_dict['archive_temp_dir']) - except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=user_file, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) - logger.warning(msg) - - # add files with Depends as the prefix - conf_files = glob.glob('Depends.*') - for conf_file in conf_files: - if os.path.isfile(conf_file): - try: - shutil.copy2(conf_file, case_dict['archive_temp_dir']) - except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=conf_file, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) - logger.warning(msg) - - # check if ignore_logs is specified - if ignore_logs: - os.chdir(case_dict['archive_temp_dir']) - if os.path.isdir('./logs'): - try: - shutil.rmtree('./logs') - except OSError: - logger.warning('in "populate_local_repo" - Unable to remove "logs" in archive_temp_dir.') - if os.path.isdir('./postprocess/logs'): - os.chdir('./postprocess') - try: - shutil.rmtree('./logs') - except OSError: - logger.warning('in "populate_local_repo" - ' \ - 'Unable to remove "postprocess/logs" in archive_temp_dir.') - os.chdir(case_dict['CASEROOT']) - - # check if ignore_timing is specified - if ignore_timing: - os.chdir(case_dict['archive_temp_dir']) - if os.path.isdir('./timing'): - try: - shutil.rmtree('./timing') - except OSError: - logger.warning('in "populate_local_repo" - Unable to remove "timing" in archive_temp_dir.') - os.chdir(case_dict['CASEROOT']) - - -# --------------------------------------------------------------------- -def checkin_trunk(case_dict, svn_cmd, message, username, password): -# --------------------------------------------------------------------- - """ checkin_trunk - - Check in the local SVN sandbox to the remote trunk - """ - logger.debug('checkin_trunk') - - os.chdir(case_dict['archive_temp_dir']) - svn_repo = '{0}/trunk'.format(case_dict['svn_repo_url']) - msg = '"{0}"'.format(message) - cmd = ['svn', svn_cmd, '--username', username, - '--password', password, '.', '--message', msg] - - if svn_cmd in ['import']: - # create the trunk dir - msg = '"create trunk"' - cmd = ['svn', 'mkdir', '--parents', svn_repo, - '--username', username, '--password', password, '--message', msg] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'mkdir', '--parents', svn_repo, - '--username', username, '--password', '******', - '--message', msg] - msg = _call_template.substitute(function='checkin_trunk', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - - # create the trunk_tags dir - tags = '{0}/trunk_tags'.format(case_dict['svn_repo_url']) - msg = '"create trunk_tags"' - cmd = ['svn', 'mkdir', tags, '--username', username, - '--password', password, '--message', msg] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'mkdir', tags, '--username', username, - '--password', '******', '--message', msg] - msg = _call_template.substitute(function='checkin_trunk', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - - msg = '"{0}"'.format(message) - cmd = ['svn', svn_cmd, '--username', username, '--password', password, '.', - svn_repo, '--message', msg] - - # check-in the trunk to svn - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', svn_cmd, '--username', username, - '--password', '******', '.', '--message', msg] - msg = _call_template.substitute(function='checkin_trunk', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - -# --------------------------------------------------------------------- -def create_tag(case_dict, new_tag, username, password): -# --------------------------------------------------------------------- - """ create_tag - - create a new trunk tag in the remote repo - """ - logger.debug('create_tag') - - # create a new trunk tag - os.chdir(case_dict['archive_temp_dir']) - svn_repo = '{0}/trunk'.format(case_dict['svn_repo_url']) - svn_repo_tag = '{0}/trunk_tags/{1}'.format(case_dict['svn_repo_url'], new_tag) - msg = '"create new trunk tag"' - cmd = ['svn', 'copy', '--username', username, '--password', password, - svn_repo, svn_repo_tag, '--message', msg] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'copy', '--username', username, '--password', '******', - svn_repo, svn_repo_tag, '--message', msg] - msg = _call_template.substitute(function='checkin_trunk', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) - logger.warning(msg) - raise SVNException(msg) - -# ------------------------------------------------------------------------- -def update_repo(ignore_logs, ignore_timing, case_dict, username, password): -# ------------------------------------------------------------------------- - """ update_repo - - Update SVN repo - """ - logger.debug('update_repo') - - try: - # check if svn client is installed - svn_exists = check_svn() - - if svn_exists: - # check if the case repo exists - case_dict['svn_repo_url'] = '{0}/{1}'.format(_svn_expdb_url, case_dict['CASE']) - repo_exists = check_svn_repo(case_dict, username, password) - case_dict['archive_temp_dir'] = create_temp_archive(case_dict) - case_dict['archive_list'] = _archive_list + case_dict['user_add_files'] - - if repo_exists: - # update trunk and make a new tag - last_tag = get_trunk_tag(case_dict, username, password) - new_tag = '{0}_{1}'.format(case_dict['CASE'], str(last_tag+1).zfill(4)) - checkout_repo(case_dict, username, password) - update_local_repo(case_dict, ignore_logs, ignore_timing) - msg = 'update case metadata for {0} by {1}'.format(case_dict['CASE'], username) - checkin_trunk(case_dict, 'ci', msg, username, password) - create_tag(case_dict, new_tag, username, password) - logger.info('SVN repository trunk updated at URL "%s"', case_dict['svn_repo_url']) - logger.info(' and a new trunk tag created "%s"', new_tag) - else: - # create a new case repo - new_tag = '{0}_0001'.format(case_dict['CASE']) - create_readme(case_dict) - populate_local_repo(case_dict, ignore_logs, ignore_timing) - msg = ('initial import of case metadata for {0} by {1}' - .format(case_dict['CASE'], username)) - checkin_trunk(case_dict, 'import', msg, username, password) - create_tag(case_dict, new_tag, username, password) - logger.info('SVN repository imported to trunk URL "%s"', case_dict['svn_repo_url']) - logger.info(' and a new trunk tag created for "%s"', new_tag) - - except SVNException: - pass - - return case_dict - -# --------------------------------------------------------------------- -def get_timing_data(case_dict): -# --------------------------------------------------------------------- - """ get_timing_data - parse the timing data file and add information to the case_dict - - Arguments: - case_dict (dict) - case dictionary to store XML variables - """ - logger.debug('get_timing_data') - - # initialize the timing values in the dictionary - case_dict['model_cost'] = 'undefined' - case_dict['model_throughput'] = 'undefined' - - timing_dir = case_dict['CASEROOT']+'/timing' - last_time = '' - if os.path.exists(timing_dir): - # check if timing files exists - timing_file_pattern = 'cesm_timing.'+case_dict['CASE'] - last_time = max(glob.glob(timing_dir+'/'+timing_file_pattern+'.*'), - key=os.path.getctime) - if last_time: - if 'gz' in last_time: - # gunzip file first - with gzip.open(last_time, 'rb') as fname: - file_content = fname.readlines() - else: - with open(last_time, 'r') as fname: - file_content = fname.readlines() - - # search the file content for matching lines - model_cost = [line for line in file_content if 'Model Cost:' in line] - model_throughput = [line for line in file_content if 'Model Throughput:' in line] - - case_dict['model_cost'] = ' '.join(model_cost[0].split()) - case_dict['model_throughput'] = ' '.join(model_throughput[0].split()) - - return case_dict - -# --------------------------------------------------------------------- -def initialize_main(options): -# --------------------------------------------------------------------- - """ initialize_main - - Initialize the case dictionary data structure with command line options - """ - logger.debug('intialize_main') - - case_dict = dict() - - case_dict['CASEROOT'] = os.getcwd() - if options.caseroot: - case_dict['CASEROOT'] = options.caseroot[0] - - case_dict['workdir'] = case_dict['CASEROOT'] - if options.workdir: - case_dict['workdir'] = options.workdir[0] - - username = None - if options.user: - username = options.user - case_dict['svnlogin'] = username - - password = None - if options.password: - password = options.password - - if options.expType: - case_dict['expType'] = options.expType[0] - - case_dict['title'] = None - if options.title: - case_dict['title'] = options.title[0] - - case_dict['dryrun'] = False - if options.dryrun: - case_dict['dryrun'] = True - - case_dict['archive_temp_dir'] = '' - - case_dict['user_add_files'] = list() - if options.user_add_files: - case_dict['user_add_files'] = options.user_add_files.split(',') - - case_dict['q_casename'] = '' - case_dict['q_outfile'] = '' - if options.query_cmip6: - case_dict['q_casename'] = options.query_cmip6[0] - case_dict['q_outfile'] = options.query_cmip6[1] - - case_dict['base_expdb_url'] = 'https://csegweb.cgd.ucar.edu/expdb2.0' - if options.test_post: - case_dict['base_expdb_url'] = 'https://csegwebdev.cgd.ucar.edu/expdb2.0' - case_dict['json_expdb_url'] = case_dict['base_expdb_url'] + '/cgi-bin/processJSON.cgi' - case_dict['query_expdb_url'] = case_dict['base_expdb_url'] + '/cgi-bin/query.cgi' - - return case_dict, username, password - -# --------------------------------------------------------------------- -def main_func(options): -# --------------------------------------------------------------------- - """ main function - - Arguments: - options (list) - input options from command line - """ - logger.debug('main_func') - - (case_dict, username, password) = initialize_main(options) - - # check if query_cmip6 argument is specified - if options.query_cmip6: - if case_dict['dryrun']: - logger.info('Dryrun - calling query_expdb_cmip6 for case metadata') - else: - if query_expdb_cmip6(case_dict, username, password): - logger.info('Casename "%s" CMIP6 global attribute '\ - 'metadata written to "%s/archive_files/%s" ' \ - 'from "%s"', - case_dict['workdir'], case_dict['q_casename'], - case_dict['q_outfile'], case_dict['query_expdb_url']) - logger.info('Successful completion of archive_metadata') - sys.exit(0) - else: - logger.info('ERROR archive_metadata failed to find "%s" '\ - 'in experiments database at "%s".', - case_dict['q_casename'], case_dict['query_expdb_url']) - sys.exit(1) - - # loop through the _xml_vars gathering values - with Case(case_dict['CASEROOT'], read_only=True) as case: - if case_dict['dryrun']: - logger.info('Dryrun - calling get_case_vars') - else: - case_dict = get_case_vars(case_dict, case) - - # check reserved casename expdb for CMIP6 experiments - if case_dict['expType'].lower() == 'cmip6': - if case_dict['dryrun']: - logger.info('Dryrun - calling check_expdb_case for CMIP6 experiment reservation') - else: - case_dict['case_id'] = check_expdb_case(case_dict, username, password) - if case_dict['case_id'] < 1: - logger.info('Unable to archive CMIP6 metadata. '\ - '"%s" casename does not exist in database. '\ - 'All CMIP6 experiments casenames must be '\ - 'reserved in the experiments database at URL: '\ - 'https://csegweb.cgd.ucar.edu/expdb2.0 '\ - 'prior to running archive_metadata.', case_dict['CASE']) - sys.exit(1) - - # get the case status into the case_dict - if case_dict['dryrun']: - logger.info('Dryrun - calling get_case_status') - else: - case_dict = get_case_status(case_dict) - - # create / update the cesm expdb repo with the caseroot files - if not options.ignore_repo_update: - if case_dict['dryrun']: - logger.info('Dryrun - calling update_repo') - else: - case_dict = update_repo(options.ignore_logs, options.ignore_timing, - case_dict, username, password) - - # parse the timing data into the case_dict - if not options.ignore_timing: - if case_dict['dryrun']: - logger.info('Dryrun - calling get_timing_data') - else: - case_dict = get_timing_data(case_dict) - - # Create a JSON file containing the case_dict with the date appended to the filename - if case_dict['dryrun']: - logger.info('Dryrun - calling create_json') - else: - create_json(case_dict) - - # post the JSON to the remote DB - if case_dict['dryrun']: - logger.info('Dryrun - calling post_json') - else: - post_json(case_dict, username, password) - - # clean-up the temporary archive files dir - if case_dict['dryrun']: - logger.info('Dryrun - deleting "./archive_temp_dir"') - else: - if not options.ignore_repo_update and os.path.exists(case_dict['archive_temp_dir']): - shutil.rmtree(case_dict['archive_temp_dir']) - - logger.info('Successful completion of archive_metadata') - - return 0 - -#=================================== -if __name__ == "__main__": - - try: - __status__ = main_func(commandline_options(sys.argv)) - sys.exit(__status__) - except Exception as error: - print("{}".format(str(error))) - sys.exit(1) diff --git a/scripts/Tools/bld_diff b/scripts/Tools/bld_diff deleted file mode 100755 index 8b5256c23f8..00000000000 --- a/scripts/Tools/bld_diff +++ /dev/null @@ -1,172 +0,0 @@ -#! /usr/bin/env python - -""" -Try to calculate and succinctly present the differences between two bld logs -for the same component -""" - -from standard_script_setup import * -from CIME.utils import expect, run_cmd_no_fail - -import argparse, sys, os, gzip - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} log1 log2 -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - > {0} case1 case2 -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("log1", help="First log.") - - parser.add_argument("log2", help="Second log.") - - parser.add_argument("-I", "--ignore-includes", action="store_true", - help="Ignore differences in include flags") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.log1, args.log2, args.ignore_includes - -############################################################################### -def is_compile_line(line): -############################################################################### - return line.count("-I") > 1 and not line.startswith("gmake ") and not line.startswith("make ") - -############################################################################### -def get_compile_lines_from_log(logfile_text): -############################################################################### - result = [] - for line in logfile_text.splitlines(): - if is_compile_line(line): - result.append(line) - - return result - -_SRCFILE_ENDINGS = (".F", ".f", ".c", ".F90", ".f90", ".cpp") -############################################################################### -def parse_log(logfile_text): -############################################################################### - compile_lines = get_compile_lines_from_log(logfile_text) - result = {} - for compile_line in compile_lines: - items = compile_line.split() - compiled_file = None - for item in items: - for ending in _SRCFILE_ENDINGS: - if item.endswith(ending): - expect(compiled_file is None, "Found multiple things that look like files in '{}'".format(compile_line)) - compiled_file = item - - if compiled_file is None: - print("WARNING: Found nothing that looks like a file in '{}'".format(compile_line)) - else: - expect(compiled_file not in result, "Found multiple compilations of {}?".format(compiled_file)) - result[compiled_file] = items - - # TODO - Need to capture link lines too - - return result - -############################################################################### -def get_case_from_log(logpath): -############################################################################### - return os.path.abspath(os.path.join(os.path.dirname(logpath), "..")) - -############################################################################### -def read_maybe_gzip(filepath): -############################################################################### - opener = lambda: gzip.open(filepath, "rt") if filepath.endswith(".gz") else open(filepath, "r") - with opener() as fd: - return fd.read() - -############################################################################### -def log_diff(log1, log2, repls, ignore_includes): -############################################################################### - """ - Search for build/link commands and compare them - """ - are_same = True - - # Read files - log1_contents = read_maybe_gzip(log1) - log2_contents = read_maybe_gzip(log2) - - # Normalize log2 - for replace_item, replace_with in repls.items(): - log2_contents = log2_contents.replace(replace_item, replace_with) - - # Transform log contents to a map of filename -> compile_args - compile_dict1 = parse_log(log1_contents) - compile_dict2 = parse_log(log2_contents) - - file_set1 = set(compile_dict1.keys()) - file_set2 = set(compile_dict2.keys()) - - for item in (file_set1 - file_set2): - print("{} is missing compilation of {}".format(log2, item)) - are_same = False - - for item in (file_set2 - file_set1): - print("{} has unexpected compilation of {}".format(log2, item)) - are_same = False - - for item in (file_set1 & file_set2): - print("Checking compilation of {}".format(item)) - flags1 = compile_dict1[item] - flags2 = compile_dict2[item] - - missing = set(flags1) - set(flags2) - extra = set(flags2) - set(flags1) - - # Let's not worry about order yet even though some flags are order-sensitive - for flag in missing: - if not (ignore_includes and flag.startswith("-I")): - print(" Missing flag {}".format(flag)) - are_same = False - - for flag in extra: - if flag != "-o" and not flag.startswith("CMakeFiles") and not (ignore_includes and flag.startswith("-I")): - print(" Extra flag {}".format(flag)) - are_same = False - - return are_same - -############################################################################### -def _main_func(description): -############################################################################### - log1, log2, ignore_includes = parse_command_line(sys.argv, description) - - xml_normalize_fields = ["TEST_TESTID", "SRCROOT"] - repls = {} - for xml_normalize_field in xml_normalize_fields: - try: - case1 = get_case_from_log(log1) - case2 = get_case_from_log(log2) - val1 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case1) - val2 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case2) - if os.sep in val1: - repls[os.path.normpath(val2)] = os.path.normpath(val1) - else: - repls[val2] = val1 - except Exception as e: - logging.warning("Warning, failed to normalize on {}: {}".format(xml_normalize_field, str(e))) - repls = {} - - same = log_diff(log1, log2, repls, ignore_includes) - sys.exit(0 if same == 0 else 1) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/bless_test_results b/scripts/Tools/bless_test_results deleted file mode 100755 index 8ff276b2811..00000000000 --- a/scripts/Tools/bless_test_results +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python - -""" -Analyze results from a test root area, finding namelist and non-BFB -changes, and updating baselines. Purpose is, instead of re-running tests -in generate mode, which is very slow, allow for very fast analsis and -blessing of diffs. - -You may need to load modules for cprnc to work. -""" - -from standard_script_setup import * - -from CIME.utils import expect -from CIME.XML.machines import Machines -from CIME.bless_test_results import bless_test_results - -import argparse, sys, os - -_MACHINE = Machines() - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [-n] [-r ] [-b ] [-c ] [ ...] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# From most recent run, bless any namelist changes \033[0m - > {0} -n - \033[1;32m# From most recent run, bless all changes \033[0m - > {0} - \033[1;32m# From most recent run, bless changes to test foo and bar only \033[0m - > {0} foo bar - \033[1;32m# From most recent run, bless only namelist changes to test foo and bar only \033[0m - > {0} -n foo bar - \033[1;32m# From most recent run of jenkins, bless history changes for next \033[0m - > {0} -r /home/jenkins/acme/scratch/jenkins -b next --hist-only -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - default_compiler = _MACHINE.get_default_compiler() - scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") - default_testroot = os.path.join(scratch_root) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("-n", "--namelists-only", action="store_true", - help="Only analyze namelists.") - - parser.add_argument("--hist-only", action="store_true", - help="Only analyze history files.") - - parser.add_argument("-b", "--baseline-name", - help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.") - - parser.add_argument("--baseline-root", - help="Root of baselines. Default will use the BASELINE_ROOT from the case.") - - parser.add_argument("-c", "--compiler", default=default_compiler, - help="Compiler of run you want to bless") - - parser.add_argument("-p", "--no-skip-pass", action="store_true", - help="Normally, if namelist or baseline phase exists and shows PASS, we assume no bless is needed. " - "This option forces the bless to happen regardless.") - - parser.add_argument("--report-only", action="store_true", - help="Only report what files will be overwritten and why. Caution is a good thing when updating baselines") - - parser.add_argument("-r", "--test-root", default=default_testroot, - help="Path to test results that are being blessed") - - parser.add_argument("-t", "--test-id", - help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.") - - parser.add_argument("-f", "--force", action="store_true", - help="Update every diff without asking. VERY DANGEROUS. Should only be used within testing scripts.") - - parser.add_argument("bless_tests", nargs="*", - help="When blessing, limit the bless to tests matching these regex") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - expect(not (args.report_only and args.force), - "Makes no sense to use -r and -f simultaneously") - expect(not (args.namelists_only and args.hist_only), - "Makes no sense to use --namelists-only and --hist-only simultaneously") - - return args.baseline_name, args.baseline_root, args.test_root, args.compiler, args.test_id, args.namelists_only, args.hist_only, args.report_only, args.force, args.bless_tests, args.no_skip_pass - -############################################################################### -def _main_func(description): -############################################################################### - baseline_name, baseline_root, test_root, compiler, test_id, namelists_only, hist_only, report_only, force, bless_tests, no_skip_pass = \ - parse_command_line(sys.argv, description) - - success = bless_test_results(baseline_name, baseline_root, test_root, compiler, - test_id=test_id, namelists_only=namelists_only, hist_only=hist_only, - report_only=report_only, force=force, bless_tests=bless_tests, no_skip_pass=no_skip_pass) - sys.exit(0 if success else 1) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/case.build b/scripts/Tools/case.build deleted file mode 100755 index 162fd6123f8..00000000000 --- a/scripts/Tools/case.build +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env python - -""" -Builds the case. - -case.setup must be run before this. In addition, any changes to env_build.xml -must be made before running this. - -This must be run before running case.submit. - -There are two usage modes; both modes accept the --caseroot option, but -other options are specific to one mode or the other: - -1) To build the model: - - Typical usage is simply: - ./case.build - - This can be used for the initial build as well as for incrementally - rebuilding after changing some source files. - - Optionally, you can specify one of the following options, although this is - not common: - --sharedlib-only - --model-only - --build ... - - In addition, if you'd like to skip saving build provenance (typically because - there was some error in doing so), you can add: - --skip-provenance-check - -2) To clean part or all of the build: - - To clean the whole build; this should be done after modifying either - env_build.xml or Macros.make: - ./case.build --clean-all - - To clean select portions of the build, for example, after adding new source - files for one component: - ./case.build --clean ... - or: - ./case.build --clean-depends ... -""" - -from standard_script_setup import * - -import CIME.build as build -from CIME.case import Case -from CIME.utils import find_system_test -from CIME.test_status import * - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build.\n" - "Default is current directory.") - - mutex_group = parser.add_mutually_exclusive_group() - - # TODO mvertens: the following is hard-wired - otherwise it does not work with nuopc - # files = Files() - # config_file = files.get_value("CONFIG_CPL_FILE") - # component = Component(config_file, "CPL") - # comps = [x.lower() for x in component.get_valid_model_components()] - comps = ["cpl","atm","lnd","ice","ocn","rof","glc","wav","esp","iac"] - libs = ["csmshare", "mct", "pio", "gptl"] - allobjs = comps + libs - - mutex_group.add_argument("--sharedlib-only", action="store_true", - help="Only build shared libraries.") - - mutex_group.add_argument("-m", "--model-only", action="store_true", - help="Assume shared libraries are already built.") - - mutex_group.add_argument("-b", "--build", nargs="+", choices=allobjs, - help="Libraries to build.\n" - "Will cause namelist generation to be skipped.") - - mutex_group.add_argument("--skip-provenance-check", action="store_true", - help="Do not check and save build provenance") - - mutex_group.add_argument("--clean-all", action="store_true", - help="Clean all objects (including sharedlib objects that may be\n" - "used by other builds).") - - mutex_group.add_argument("--clean", nargs="*", choices=allobjs, - help="Clean objects associated with specific libraries.\n" - "With no arguments, clean all objects other than sharedlib objects.") - - mutex_group.add_argument("--clean-depends", nargs="*", choices=comps+["csmshare"], - help="Clean Depends and Srcfiles only.\n" - "This allows you to rebuild after adding new\n" - "files in the source tree or in SourceMods.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - clean_depends = args.clean_depends if args.clean_depends is None or len(args.clean_depends) else comps - - cleanlist = args.clean if args.clean is None or len(args.clean) else comps - buildlist = None if args.build is None or len(args.build) == 0 else args.build - - return args.caseroot, args.sharedlib_only, args.model_only, cleanlist, args.clean_all, buildlist, clean_depends, not args.skip_provenance_check - -############################################################################### -def _main_func(description): -############################################################################### - caseroot, sharedlib_only, model_only, cleanlist, clean_all, buildlist,clean_depends, save_build_provenance = parse_command_line(sys.argv, description) - - success = True - with Case(caseroot, read_only=False) as case: - testname = case.get_value('TESTCASE') - - if cleanlist is not None or clean_all or clean_depends is not None: - build.clean(case, cleanlist=cleanlist, clean_all=clean_all, clean_depends=clean_depends) - elif(testname is not None): - logging.warning("Building test for {} in directory {}".format(testname, - caseroot)) - try: - # The following line can throw exceptions if the testname is - # not found or the test constructor throws. We need to be - # sure to leave TestStatus in the appropriate state if that - # happens. - test = find_system_test(testname, case)(case) - except BaseException: - phase_to_fail = MODEL_BUILD_PHASE if model_only else SHAREDLIB_BUILD_PHASE - with TestStatus(test_dir=caseroot) as ts: - ts.set_status(phase_to_fail, TEST_FAIL_STATUS, comments="failed to initialize") - raise - - expect(buildlist is None, "Build lists don't work with tests") - success = test.build(sharedlib_only=sharedlib_only, model_only=model_only) - else: - success = build.case_build(caseroot, case=case, sharedlib_only=sharedlib_only, - model_only=model_only, buildlist=buildlist, - save_build_provenance=save_build_provenance) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/case.cmpgen_namelists b/scripts/Tools/case.cmpgen_namelists deleted file mode 100755 index 61f6ea54a74..00000000000 --- a/scripts/Tools/case.cmpgen_namelists +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python - -""" -case.cmpgen_namelists - perform namelist baseline operations (compare, -generate, or both) for this case. -""" - -from standard_script_setup import * - -from CIME.case import Case -from argparse import RawTextHelpFormatter - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory for which namelists are compared/generated. " - "\nDefault is current directory.") - - parser.add_argument("-c", "--compare", action="store_true", - help="Force a namelist comparison against baselines. " - "\nDefault is to follow the case specification.") - - parser.add_argument("-g", "--generate", action="store_true", - help="Force a generation of namelist baselines. " - "\nDefault is to follow the case specification.") - - parser.add_argument("--compare-name", - help="Force comparison to use baselines with this name. " - "\nDefault is to follow the case specification.") - - parser.add_argument("--generate-name", - help="Force generation to use baselines with this name. " - "\nDefault is to follow the case specification.") - - parser.add_argument("--baseline-root", - help="Root of baselines. " - "\nDefault is the case's BASELINE_ROOT.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot, args.compare, args.generate, args.compare_name, args.generate_name, args.baseline_root - -############################################################################### -def _main_func(description): -############################################################################### - caseroot, compare, generate, compare_name, generate_name, baseline_root \ - = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_cmpgen_namelists(compare, generate, compare_name, generate_name, baseline_root) - - sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/case.qstatus b/scripts/Tools/case.qstatus deleted file mode 100755 index 9cd2199b39a..00000000000 --- a/scripts/Tools/case.qstatus +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python - -""" -Shows the batch status of all jobs associated with this case. - -Typical usage is simply: - ./case.qstatus -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.test_status import * - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to query.\n" - "Default is current directory.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot - -############################################################################### -def _main_func(description): -############################################################################### - caseroot = parse_command_line(sys.argv, description) - - with Case(caseroot, read_only=False) as case: - case.report_job_status() - - sys.exit(0) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/case.setup b/scripts/Tools/case.setup deleted file mode 100755 index 8e720025608..00000000000 --- a/scripts/Tools/case.setup +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -""" -Creates various files and directories needed in order to build the case, -create namelists and run the case. - -Any changes to env_mach_pes.xml and env_mach_specific.xml must be made -before running this. - -This must be run before running case.build. - -To run this initially for the case, simply run: - ./case.setup - -To rerun after making changes to env_mach_pes.xml or env_mach_specific.xml, run: - ./case.setup --reset -""" - -from standard_script_setup import * -from CIME.case import Case - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to setup.\n" - "Default is current directory.") - - parser.add_argument("-c", "--clean", action="store_true", - help="Removes the batch run script for target machine.\n" - "If the testmode argument is present then keep the test\n" - "script if it is present - otherwise remove it.\n" - "The user_nl_xxx and Macros files are never removed by case.setup -\n" - "you must remove them manually.") - - parser.add_argument("-t", "--test-mode", action="store_true", - help="Keeps the test script when the --clean argument is used.") - - parser.add_argument("-r", "--reset", action="store_true", - help="Does a clean followed by setup.\n" - "This flag should be used when rerunning case.setup after it\n" - "has already been run for this case.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot, args.clean, args.test_mode, args.reset - -############################################################################### -def _main_func(description): -############################################################################### - caseroot, clean, test_mode, reset = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - case.case_setup(clean=clean, test_mode=test_mode, reset=reset) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/case.submit b/scripts/Tools/case.submit deleted file mode 100755 index e6b4304135b..00000000000 --- a/scripts/Tools/case.submit +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python - -""" -Submits the case to the queuing system, or runs it if there is no queueing system. - -Also submits any other jobs (such as the short-term archiver) associated with this case. - -Running case.submit is the only way you should start a job. - -Typical usage is simply: - ./case.submit - -Other examples: - ./case.submit -m begin,end - Submits the case, requesting mail at job beginning and end -""" - -from standard_script_setup import * -from CIME.case import Case -from six.moves import configparser - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to submit.\n" - "Default is current directory.") - - parser.add_argument("--job", "-j", - help="Name of the job to be submitted;\n" - "can be any of the jobs listed in env_batch.xml.\n" - "Default is case.run.") - - parser.add_argument("--no-batch", action="store_true", - help="Do not submit jobs to batch system, run locally.") - - parser.add_argument("--prereq", - help="Specify a prerequisite job id, this job will not start until the\n" - "job with this id is completed (batch mode only).") - - parser.add_argument("--prereq-allow-failure", action="store_true", - help="Allows starting the run even if the prerequisite fails.\n" - "This also allows resubmits to run if the original failed and the\n" - "resubmit was submitted to the queue with the orginal as a dependency,\n" - "as in the case of --resubmit-immediate.") - - parser.add_argument("--resubmit", action="store_true", - help="Used with tests only, to continue rather than restart a test.") - - parser.add_argument("--resubmit-immediate", action="store_true", - help="This queues all of the resubmissions immediately after\n" - "the first job is queued. These rely on the queue system to\n" - "handle dependencies.") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run.") - - CIME.utils.add_mail_type_args(parser) - - parser.add_argument("-a", "--batch-args", - help="Used to pass additional arguments to batch system.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - CIME.utils.resolve_mail_type_args(args) - - return (args.caseroot, args.job, args.no_batch, args.prereq, args.prereq_allow_failure, - args.resubmit, args.resubmit_immediate, args.skip_preview_namelist, args.mail_user, - args.mail_type, args.batch_args) - -############################################################################### -def _main_func(description, test_args=False): -############################################################################### - caseroot, job, no_batch, prereq, allow_fail, resubmit, resubmit_immediate, skip_pnl, \ - mail_user, mail_type, batch_args = parse_command_line(sys.argv, description) - - # save these options to a hidden file for use during resubmit - config_file = os.path.join(caseroot,".submit_options") - if skip_pnl or mail_user or mail_type or batch_args: - config = configparser.RawConfigParser() - config.add_section("SubmitOptions") - if skip_pnl: - config.set("SubmitOptions", "skip_pnl", "True") - if mail_user: - config.set("SubmitOptions", "mail_user", mail_user) - if mail_type: - config.set("SubmitOptions", "mail_type", ",".join(mail_type)) - if batch_args: - config.set("SubmitOptions", "batch_args", batch_args) - with open(config_file, "w") as fd: - config.write(fd) - elif os.path.exists(config_file): - os.remove(config_file) - - if not test_args: - with Case(caseroot, read_only=False) as case: - case.submit(job=job, no_batch=no_batch, prereq=prereq, allow_fail=allow_fail, - resubmit=resubmit, resubmit_immediate=resubmit_immediate, skip_pnl=skip_pnl, - mail_user=mail_user, mail_type=mail_type, batch_args=batch_args) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/case_diff b/scripts/Tools/case_diff deleted file mode 100755 index 484c9d448c2..00000000000 --- a/scripts/Tools/case_diff +++ /dev/null @@ -1,145 +0,0 @@ -#! /usr/bin/env python - -""" -Try to calculate and succinctly present the differences between two large -directory trees. -""" - -from standard_script_setup import * -from CIME.utils import run_cmd, run_cmd_no_fail - -import argparse, sys, os - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} case1 case2 [skip-files] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - > {0} case1 case2 -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("case1", help="First case.") - - parser.add_argument("case2", help="Second case.") - - parser.add_argument("skip_list", nargs="*", - help="skip these files. You'll probably want to skip the bld directory if it's inside the case") - - parser.add_argument("-b", "--show-binary", action="store_true", - help="Show binary diffs") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.case1, args.case2, args.show_binary, args.skip_list - -############################################################################### -def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): -############################################################################### - """ - Starting at dir1, dir2 respectively, compare their contents - """ - # The assertions below hurt performance - #assert os.path.isdir(dir1), dir1 + " not a directory" - #assert os.path.isdir(dir2), dir2 + " not a directory" - - # Get contents of both directories - dir1_contents = set(os.listdir(dir1)) - dir2_contents = set(os.listdir(dir2)) - - # Use set operations to figure out what they have in common - dir1_only = dir1_contents - dir2_contents - dir2_only = dir2_contents - dir1_contents - both = dir1_contents & dir2_contents - - num_differing_files = 0 - - # Print the unique items - for dirname, set_obj in [(dir1, dir1_only), (dir2, dir2_only)]: - for item in sorted(set_obj): - if (item not in skip_list): - print ("===============================================================================") - print (os.path.join(dirname, item), "is unique") - num_differing_files += 1 - - # Handling of the common items is trickier - for item in sorted(both): - if (item in skip_list): - continue - path1 = os.path.join(dir1, item) - path2 = os.path.join(dir2, item) - path1isdir = os.path.isdir(path1) - - # If the directory status of the files differs, report diff - if (path1isdir != os.path.isdir(path2)): - print ("===============================================================================") - print (path1 + " DIFFERS (directory status)") - num_differing_files += 1 - continue - - # If we've made it this far, the files' status is the same. If the - # files are directories, recursively check them, otherwise check - # that the file contents match - if (path1isdir): - num_differing_files += recursive_diff(path1, path2, repls, show_binary, skip_list) - else: - # # As a (huge) performance enhancement, if the files have the same - # # size, we assume the contents match - # if (os.path.getsize(path1) != os.path.getsize(path2)): - # print path1 + " DIFFERS (contents)" - - stat, out, err = run_cmd("file {}".format(path1)) - if (stat != 0): - logging.warning("Failed to probe file '{}', out: '{}', err: '{}'".format(path1, out, err)) - continue - - is_text_file = "text" in out - if (not (not show_binary and not is_text_file)): - the_text = open(path2, "r").read() - for replace_item, replace_with in repls.items(): - the_text = the_text.replace(replace_item, replace_with) - - stat, out, _ = run_cmd("diff -w {} -".format(path1), input_str=the_text) - if (stat != 0): - print ("===============================================================================") - print (path1 + " DIFFERS (contents)") - num_differing_files += 1 - print (" "+ out) - - return num_differing_files - -############################################################################### -def _main_func(description): -############################################################################### - case1, case2, show_binary, skip_list = parse_command_line(sys.argv, description) - - xml_normalize_fields = ["TEST_TESTID", "SRCROOT"] - repls = {} - for xml_normalize_field in xml_normalize_fields: - try: - val1 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case1) - val2 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case2) - if os.sep in val1: - repls[os.path.normpath(val2)] = os.path.normpath(val1) - else: - repls[val2] = val1 - except Exception: - logging.warning("Warning, failed to normalize on " + xml_normalize_field) - repls = {} - - num_differing_files = recursive_diff(case1, case2, repls, show_binary, skip_list) - logging.info(num_differing_files, "files are different") - sys.exit(0 if num_differing_files == 0 else 1) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/check_case b/scripts/Tools/check_case deleted file mode 100755 index 1a99a966bba..00000000000 --- a/scripts/Tools/check_case +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python - -""" -Script to verify that the case is ready for submission. - -Typical usage is simply: - ./check_case - -You can run this before running case.submit to: - - Ensure that all of the env xml files are in sync with the locked files - - Create namelists (thus verifying that there will be no problems with - namelist generation) - - Ensure that the build is complete - -Running this is completely optional: these checks will be done -automatically when running case.submit. However, you can run this if you -want to perform these checks without actually submitting the case. -""" - -from standard_script_setup import * - -from CIME.utils import expect -from CIME.case import Case - -import argparse - -logger = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args, description): -############################################################################### - - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - -############################################################################### -def _main_func(description): -############################################################################### - parse_command_line(sys.argv, description) - - with Case(read_only=False) as case: - case.check_lockedfiles() - case.create_namelists() - build_complete = case.get_value("BUILD_COMPLETE") - - if not build_complete: - expect(False, - "Please rebuild the model interactively by calling case.build") - - logger.info( "check_case OK ") - -############################################################################### - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/check_input_data b/scripts/Tools/check_input_data deleted file mode 100755 index 35eedbd0f74..00000000000 --- a/scripts/Tools/check_input_data +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python - -""" -This script determines if the required data files for your case exist on local disk in the appropriate subdirectory of -$DIN_LOC_ROOT. It automatically downloads missing data required for your simulation. - -It is recommended that users on a given system share a common $DIN_LOC_ROOT directory to avoid duplication on -disk of large amounts of input data. You may need to talk to your system administrator in order to set this up. - -This script should be run from $CASEROOT. - -To verify the presence of required data use: - ./check_input_data - -To obtain missing datasets from the input data server(s) use: - ./check_input_data --download - -This script is automatically called by the case control system, when the case is built and submitted. -So manual usage of this script is optional. -""" -from standard_script_setup import * -from CIME.case import Case - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--protocol", default=None, - help="The input data protocol to download data.") - - parser.add_argument("--server", default=None, - help="The input data repository from which to download data.") - - - parser.add_argument("-i", "--input-data-root",default=None, - help="The root directory where input data goes,\n" - "use xmlquery DIN_LOC_ROOT to see default value.") - - - parser.add_argument("--data-list-dir", default="Buildconf", - help="Where to find list of input files") - - parser.add_argument("--download", action="store_true", - help="Attempt to download missing input files") - - parser.add_argument("--chksum", action="store_true", - help="chksum inputfiles against inputdata_chksum.dat (if available)") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.protocol, args.server, args.input_data_root, args.data_list_dir, args.download, args.chksum - -############################################################################### -def _main_func(description): -############################################################################### - protocol, address, input_data_root, data_list_dir, download, chksum = parse_command_line(sys.argv, description) - - with Case() as case: - sys.exit(0 if case.check_all_input_data(protocol=protocol, - address=address, - input_data_root=input_data_root, - data_list_dir=data_list_dir, - download=download, - chksum=chksum) else 1) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/check_lockedfiles b/scripts/Tools/check_lockedfiles deleted file mode 100755 index ff207a6e091..00000000000 --- a/scripts/Tools/check_lockedfiles +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python -""" -This script compares xml files -""" - -from standard_script_setup import * -from CIME.case import Case - -def parse_command_line(args, description): - parser = argparse.ArgumentParser( - usage="""\n{0} [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# check_lockedfiles SMS\033[0m - > {0} -""".format(os.path.basename(args[0])), - - description=description, - - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to build") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot - -def _main_func(description): - caseroot = parse_command_line(sys.argv, description) - - with Case(case_root=caseroot, read_only=True) as case: - case.check_lockedfiles() - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/cime_bisect b/scripts/Tools/cime_bisect deleted file mode 100755 index 3d2654d25cd..00000000000 --- a/scripts/Tools/cime_bisect +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python - -""" -A script to help track down the commit that caused tests to fail. -""" - -from standard_script_setup import * -from CIME.utils import expect, run_cmd_no_fail, run_cmd -from CIME.XML.machines import Machines - -import argparse, sys, os, re - -logger = logging.getLogger(__name__) - -_MACHINE = Machines() - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--bad=] [--compare=] [--no-batch] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Bisect ERS.f45_g37.B1850C5 which got broken in the last 4 commits \033[0m - > cd - > {0} HEAD~4 ERS.f45_g37.B1850C5 - - \033[1;32m# Bisect ERS.f45_g37.B1850C5 which started to DIFF in the last 4 commits \033[0m - > cd - > {0} HEAD~4 'ERS.f45_g37.B1850C5 -c -b master' - - \033[1;32m# Bisect a build error for ERS.f45_g37.B1850C5 which got broken in the last 4 commits \033[0m - > cd - > {0} HEAD~4 'ERS.f45_g37.B1850C5 --no-run' - - \033[1;32m# Bisect two different failing tests which got broken in the last 4 commits \033[0m - > cd - > {0} HEAD~4 'ERS.f45_g37.B1850C5 --no-run' 'SMS.f45_g37.F' - -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("good", help="Name of most recent known good commit.") - - parser.add_argument("-B", "--bad", default="HEAD", - help="Name of bad commit, default is current HEAD.") - - parser.add_argument("-a", "--all-commits", action="store_true", - help="Test all commits, not just merges") - - parser.add_argument("-C", "--cime-integration", action="store_true", - help="Bisect CIME instead of the whole code. Useful for finding errors after CIME merges") - - parser.add_argument("-S", "--script", - help="Use your own custom script instead") - - ct_modifiers = parser.add_argument_group("create_test", "flags for modifying create_test call to be bisected") - - ct_modifiers.add_argument("testargs", nargs="*", help="String to pass to create_test. Combine with single quotes if it includes multiple args.") - - ct_modifiers.add_argument("-r", "--test-root", - help="Path to testroot to use for testcases for bisect. WARNING. This will be cleared by this script.") - - ct_modifiers.add_argument("-n", "--check-namelists", action="store_true", - help="Consider a commit to be broken if namelist check fails") - - ct_modifiers.add_argument("-t", "--check-throughput", action="store_true", - help="Consider a commit to be broken if throughput check fails (fail if tests slow down)") - - ct_modifiers.add_argument("-m", "--check-memory", action="store_true", - help="Consider a commit to be broken if memory check fails (fail if tests footprint grows)") - - ct_modifiers.add_argument("-l", "--check-memleak", action="store_true", - help="Consider a commit to be broken if a memleak is detected") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.test_root is None: - args.test_root = os.path.join(_MACHINE.get_value("CIME_OUTPUT_ROOT"), "cime_bisect") - - if args.cime_integration: - expect(os.path.basename(os.getcwd()) == "cime" and os.path.isdir(".git"), \ -""" -In order for --cime-integration mode to work, it is expected that you have deleted -your cime subtree and replaced it with a clone of ESMCI cime. It is also expected -that you run this script from the cime direcory. -""") - - expect(os.path.isdir(".git"), "Please run the root of a repo") - - expect( not (bool(args.script) and (bool(args.testargs) or args.check_namelists or args.check_throughput or args.check_memory or args.check_memleak) ), - "Makes no sense to use custom script but also ask for create_test modifiers") - - return args.testargs, args.good, args.bad, args.test_root, \ - args.check_namelists, args.check_throughput, args.check_memory, args.check_memleak, args.all_commits, args.script - -############################################################################### -def cime_bisect(testargs, good, bad, testroot, - check_namelists, check_throughput, check_memory, check_memleak, commits_to_skip, - custom_script): -############################################################################### - logger.info("####################################################") - logger.info("TESTING WITH ARGS '{}'".format(testargs)) - logger.info("####################################################") - - if os.path.exists("scripts/create_test"): - create_test = os.path.join(os.getcwd(), "scripts", "create_test") - else: - create_test = os.path.join(os.getcwd(), "cime", "scripts", "create_test") - - expect(os.path.exists(create_test), "Please run the root of a CIME repo") - - # Basic setup - run_cmd_no_fail("git bisect start") - run_cmd_no_fail("git bisect good {}".format(good), verbose=True) - run_cmd_no_fail("git bisect bad {}".format(bad), verbose=True) - if commits_to_skip: - run_cmd_no_fail("git bisect skip {}".format(" ".join(commits_to_skip))) - - # Formulate the create_test command, let create_test make the test-id, it will use - # a timestamp that will allow us to avoid collisions - bisect_cmd = "git submodule update && {} {} --test-root {}".format(create_test, testargs, testroot) - - is_batch = _MACHINE.has_batch_system() - if (is_batch and "--no-run" not in testargs and "--no-build" not in testargs and "--no-setup" not in testargs): - # Forumulate the wait_for_tests command. - - bisect_cmd += " --wait" - - if (check_throughput): - bisect_cmd += " --wait-check-throughput" - if (check_memory): - bisect_cmd += " --wait-check-memory" - if (not check_namelists): - bisect_cmd += " --wait-ignore-namelists" - if (not check_memleak): - bisect_cmd += " --wait-ignore-memleak" - - try: - if custom_script: - cmd = "git bisect run {}".format(custom_script) - else: - cmd = "git bisect run sh -c '{}'".format(bisect_cmd) - - output = run_cmd(cmd, verbose=True)[1] - - # Get list of potentially bad commits from output - lines = output.splitlines() - regex = re.compile(r'^([a-f0-9]{40}).*$') - bad_commits = set([regex.match(line).groups()[0] for line in lines if regex.match(line)]) - - bad_commits_filtered = bad_commits - commits_to_skip - - expect(len(bad_commits_filtered) == 1, bad_commits_filtered) - - logger.info("####################################################") - logger.info("BAD MERGE FOR ARGS '{}' IS:".format(testargs)) - logger.info("####################################################") - logger.warning(run_cmd_no_fail("git show {}".format(bad_commits_filtered.pop()))) - - finally: - run_cmd_no_fail("git bisect reset && git submodule update") - -############################################################################### -def _main_func(description): -############################################################################### - testargs, good, bad, testroot, check_namelists, check_throughput, check_memory, check_memleak, all_commits, custom_script = \ - parse_command_line(sys.argv, description) - - # Important: we only want to test merges - if not all_commits: - commits_we_want_to_test = run_cmd_no_fail("git rev-list {}..{} --merges --first-parent".format(good, bad)).splitlines() - all_commits_ = run_cmd_no_fail("git rev-list {}..{}".format(good, bad)).splitlines() - commits_to_skip = set(all_commits_) - set(commits_we_want_to_test) - logger.info("Skipping {} non-merge commits".format(len(commits_to_skip))) - for item in commits_to_skip: - logger.debug(item) - else: - commits_to_skip = set() - - if custom_script: - cime_bisect(custom_script, good, bad, testroot, check_namelists, check_throughput, check_memory, check_memleak, commits_to_skip, custom_script) - else: - for set_of_test_args in testargs: - cime_bisect(set_of_test_args, good, bad, testroot, check_namelists, check_throughput, check_memory, check_memleak, commits_to_skip, custom_script) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/code_checker b/scripts/Tools/code_checker deleted file mode 100755 index b6623e23a65..00000000000 --- a/scripts/Tools/code_checker +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python - -""" -Ensure that all CIME python files are free of errors -and follow the PEP8 standard. -""" - -from standard_script_setup import * - -from CIME.code_checker import check_code, expect - -import argparse, sys, os -#pylint: disable=import-error -from distutils.spawn import find_executable - -logger = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( -usage="""\n{0} [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Check code \033[0m - > {0} - - \033[1;32m# Check code single file case.py \033[0m - \033[1;32m# Note, you do NOT have to provide the path to this file, the tool will find it \033[0m - > {0} case.py -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("-j", "--num-procs", type=int, default=10, - help="The number of files to check in parallel") - - parser.add_argument("files", nargs="*", - help="Restrict checking to specific files. Relative name is fine.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.num_procs, args.files - -############################################################################### -def _main_func(description): -############################################################################### - pylint = find_executable("pylint") - expect(pylint is not None, "pylint not found") - - num_procs, files = parse_command_line(sys.argv, description) - - results = check_code(files, num_procs=num_procs, interactive=True) - for result in results.values(): - if result != "": - sys.exit(1) - - sys.exit(0) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/compare_namelists b/scripts/Tools/compare_namelists deleted file mode 100755 index 6922fbc793b..00000000000 --- a/scripts/Tools/compare_namelists +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python - -""" -Compare namelists. Should be called by an ACME test. Designed -to not be sensitive to order or whitespace. -""" - -from standard_script_setup import * -import CIME.compare_namelists -from CIME.utils import expect - -import argparse, sys, os - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( -usage="""\n{0} [-c ] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Compare namelist files\033[0m - > {0} baseline_dir/test/namelistfile mytestarea/namelistfile -c -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("gold_file", help="Path to gold file") - - parser.add_argument("new_file", help="Path to file to compare against gold") - - parser.add_argument("-c", "--case", action="store", dest="case", default=None, - help="The case base id (..). Helps us normalize data.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - # Normalize case - if (args.case is not None): - args.case = CIME.utils.normalize_case_id(args.case) - - return args.gold_file, args.new_file, args.case - -############################################################################### -def _main_func(description): -############################################################################### - gold_file, compare_file, case = \ - parse_command_line(sys.argv, description) - - if (case is None): - logging.warning("No case id data available, will not be able to normalize values as effectively") - else: - logging.info("Using case: '{}'".format(case)) - - success, comments = CIME.compare_namelists.compare_namelist_files(gold_file, compare_file, case) - expect(success, - "Namelist diff between files {} and {}\n{}".format(gold_file, compare_file, comments)) - - print("Files {} and {} MATCH".format(gold_file, compare_file)) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/compare_test_results b/scripts/Tools/compare_test_results deleted file mode 100755 index 1f7b969a1f5..00000000000 --- a/scripts/Tools/compare_test_results +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python - -"""Analyze results from a test root area, comparing non-BFB changes. - -Purpose is, instead of re-running tests in compare mode, which is very slow, -allow for very fast analysis of diffs. - -Outputs results for each test to stdout (one line per test); possible status -codes are: PASS, FAIL, SKIP. (A SKIP denotes a test that did not make it to the -run phase or a test for which the run phase did not pass: we skip baseline -comparisons in this case.) - -In addition, creates files named compare.log.BASELINE_NAME.TIMESTAMP in each -test directory, which contain more detailed output. Also creates -*.cprnc.out.BASELINE_NAME.TIMESTAMP files in each run directory. - -Returns a 0 exit status if all tests are bit-for-bit, and a non-zero exit status -(TESTS_FAILED_ERR_CODE) if any tests differed from the baseline. - -You may need to load modules for cprnc to work. - -""" - -from standard_script_setup import * - -from CIME.XML.machines import Machines -from CIME.compare_test_results import compare_test_results - -import argparse, sys, os - -_MACHINE = Machines() - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( -usage="""\n{0} [-r ] [-b -c ] [-t ] [ ...] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# From most recent run, compare all changes \033[0m - > {0} - \033[1;32m# From most recent run, compare only changes for test foo and bar only \033[0m - > {0} foo bar - \033[1;32m# For an old run where you know test-id, compare only changes for test foo and bar only \033[0m - > {0} foo bar -t mytestid - \033[1;32m# From most recent run of jenkins, compare history changes for next \033[0m - > {0} -r /home/jenkins/acme/scratch/jenkins -b next - \033[1;32m# For typical CESM workflow, where baselines are named with tags \033[0m - > {0} -t TESTID -b BASELINE_TAG -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - default_compiler = _MACHINE.get_default_compiler() - scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") - default_testroot = os.path.join(scratch_root) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("-n", "--namelists-only", action="store_true", - help="Only analyze namelists.") - - parser.add_argument("--hist-only", action="store_true", - help="Only analyze history files.") - - parser.add_argument("-b", "--baseline-name", - help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.") - - parser.add_argument("--baseline-root", - help="Root of baselines. Default will use BASELINE_ROOT from the case.") - - parser.add_argument("-c", "--compiler", default=default_compiler, - help="Compiler of run you want to compare") - - parser.add_argument("-r", "--test-root", default=default_testroot, - help="Path to test results that are being compared") - - parser.add_argument("-t", "--test-id", - help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.") - - parser.add_argument("compare_tests", nargs="*", - help="When comparing, limit the comparison to tests matching these regex") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.baseline_name, args.baseline_root, args.test_root, args.compiler, args.test_id, args.compare_tests, args.namelists_only, args.hist_only - -############################################################################### -def _main_func(description): -############################################################################### - baseline_name, baseline_root, test_root, compiler, test_id, compare_tests, namelists_only, hist_only = \ - parse_command_line(sys.argv, description) - - success = compare_test_results(baseline_name, baseline_root, test_root, compiler, test_id, compare_tests, namelists_only, hist_only) - sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/component_compare_baseline b/scripts/Tools/component_compare_baseline deleted file mode 100755 index 117ca672ed5..00000000000 --- a/scripts/Tools/component_compare_baseline +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python - -""" -Compares current component history files against baselines -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.hist_utils import compare_baseline - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Compare baselines \033[0m - > {0} -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") - - parser.add_argument("-b", "--baseline-dir", - help="Use custom baseline dir") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot, args.baseline_dir - -############################################################################### -def _main_func(description): -############################################################################### - caseroot, baseline_dir = parse_command_line(sys.argv, description) - with Case(caseroot) as case: - success, comments = compare_baseline(case, baseline_dir) - print(comments) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/component_compare_copy b/scripts/Tools/component_compare_copy deleted file mode 100755 index d88f0b4185a..00000000000 --- a/scripts/Tools/component_compare_copy +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python - -""" -Copy the most recent batch of hist files in a case, adding the given suffix. -This allows us to save these results if we want to run the case again. -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.hist_utils import copy - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} suffix [] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Setup case \033[0m - > {0} -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("suffix", - help="Suffix to append to hist files") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.suffix, args.caseroot - -############################################################################### -def _main_func(description): -############################################################################### - suffix, caseroot = parse_command_line(sys.argv, description) - with Case(caseroot) as case: - copy(case, suffix) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/component_compare_test b/scripts/Tools/component_compare_test deleted file mode 100755 index 952760e93a7..00000000000 --- a/scripts/Tools/component_compare_test +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python - -""" -Compares two component history files in the testcase directory -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.hist_utils import compare_test - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} suffix1 suffix2 [] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Setup case \033[0m - > {0} -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("suffix1", - help="The suffix of the first set of files") - - parser.add_argument("suffix2", - help="The suffix of the second set of files") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.suffix1, args.suffix2, args.caseroot - -############################################################################### -def _main_func(description): -############################################################################### - suffix1, suffix2, caseroot = parse_command_line(sys.argv, description) - with Case(caseroot) as case: - success, comments = compare_test(case, suffix1, suffix2) - print(comments) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/component_generate_baseline b/scripts/Tools/component_generate_baseline deleted file mode 100755 index 8bd4fc66374..00000000000 --- a/scripts/Tools/component_generate_baseline +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python - -""" -Copies current component history files into baselines -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.hist_utils import generate_baseline - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Generate baselines \033[0m - > {0} -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") - - parser.add_argument("-b", "--baseline-dir", - help="Use custom baseline dir") - - parser.add_argument("-o", "--allow-baseline-overwrite", action="store_true", - help="By default an attempt to overwrite an existing baseline directory " - "will raise an error. Specifying this option allows " - "existing baseline directories to be silently overwritten.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot, args.baseline_dir, args.allow_baseline_overwrite - -############################################################################### -def _main_func(description): -############################################################################### - caseroot, baseline_dir, allow_baseline_overwrite = \ - parse_command_line(sys.argv, description) - with Case(caseroot) as case: - success, comments = generate_baseline(case, baseline_dir, - allow_baseline_overwrite) - print(comments) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/cs.status b/scripts/Tools/cs.status deleted file mode 100755 index 86189594d00..00000000000 --- a/scripts/Tools/cs.status +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python -""" -List test results based on TestStatus files. - -Typical usage: - ./cs.status /path/to/testroot/*.testid/TestStatus - -Returns True if no errors occured (not based on test statuses). -""" - -from standard_script_setup import * -import argparse, sys, os, logging, glob -from CIME.utils import expect -from CIME.cs_status import cs_status -from CIME import test_status - -_PERFORMANCE_PHASES = [test_status.THROUGHPUT_PHASE, - test_status.MEMCOMP_PHASE] - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument("paths", nargs="*", help="Paths to TestStatus files.") - - options_group = parser.add_mutually_exclusive_group() - - options_group.add_argument("-s", "--summary", action="store_true", - help="Only show summary") - - options_group.add_argument("-f", "--fails-only", action="store_true", - help="Only show non-PASSes (this includes PENDs as well as FAILs)") - - parser.add_argument("-c", "--count-fails", action="append", default=[], - metavar="PHASE", - help="For this phase, do not give line-by-line output; instead, just report\n" - "the total number of tests that have not PASSed this phase\n" - "(this includes PENDs as well as FAILs).\n" - "This is typically used with the --fails-only option,\n" - "but it can also be used without that option.\n" - "(However, it cannot be used with the --summary option.)\n" - "(Can be specified multiple times.)") - - performance_fails_equivalent = ' '.join(["--count-fails {}".format(phase) - for phase in _PERFORMANCE_PHASES]) - parser.add_argument("-p", "--count-performance-fails", action="store_true", - help="For phases that involve performance comparisons with baseline:\n" - "Do not give line-by-line output; instead, just report the total number\n" - "of tests that have not PASSed this phase.\n" - "(This can be useful because these performance comparisons can be\n" - "subject to machine variability.)\n" - "This is equivalent to specifying:\n" - "{}".format(performance_fails_equivalent)) - - parser.add_argument("-x", "--expected-fails-file", - help="Path to XML file listing expected failures for this test suite") - - parser.add_argument("-t", "--test-id", action="append", default=[], - help="Include all tests with this test id.\n" - "(Can be specified multiple times.)") - - parser.add_argument("-r", "--test-root", default=os.getcwd(), - help="Test root used when --test-id is given") - - args = parser.parse_args(args[1:]) - - _validate_args(args) - - if args.count_performance_fails: - args.count_fails.extend(_PERFORMANCE_PHASES) - - return args.paths, args.summary, args.fails_only, args.count_fails, args.expected_fails_file, args.test_id, args.test_root - -def _validate_args(args): - expect(not (args.summary and args.count_fails), - "--count-fails cannot be specified with --summary") - expect(not (args.summary and args.count_performance_fails), - "--count-performance-fails cannot be specified with --summary") - _validate_phases(args.count_fails, '--count-fails') - -def _validate_phases(list_of_phases, arg_name): - for phase in list_of_phases: - expect(phase in test_status.ALL_PHASES, - "Phase {} specified with {} argument is not a valid TestStatus phase".format( - phase, arg_name)) - -############################################################################### -def _main_func(description): -############################################################################### - test_paths, summary, fails_only, count_fails, expected_fails_file, test_ids, test_root = parse_command_line(sys.argv, description) - for test_id in test_ids: - test_paths.extend(glob.glob(os.path.join(test_root, "*%s/TestStatus" % test_id))) - - cs_status(test_paths=test_paths, - summary=summary, - fails_only=fails_only, - count_fails_phase_list=count_fails, - expected_fails_filepath=expected_fails_file) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/e3sm_check_env b/scripts/Tools/e3sm_check_env deleted file mode 100755 index 921f877cf13..00000000000 --- a/scripts/Tools/e3sm_check_env +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python - -""" -A script to verify that the environment is compliant with E3SM's software requirements. - -Be sure to source your env_mach_specific file before running this check. -""" - -from standard_script_setup import * -from CIME.utils import run_cmd - -import sys, os, argparse - -# Here's where we keep the various reports and instructions. -LOG = [] - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--verbose] -OR -{0} --help -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - -############################################################################### -def check_sh(): -############################################################################### - stat = run_cmd('sh --version')[0] - if stat != 0: - LOG.append('* sh appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') - -############################################################################### -def check_csh(): # Can't believe I'm actually checking for csh. -JNJ -############################################################################### - stat = run_cmd('csh --version')[0] - if stat != 0: # Also tolerates tcsh - LOG.append('* csh appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') - -############################################################################### -def check_perl_module(module_name): -############################################################################### - stat = run_cmd('perl -e "require {};"'.format(module_name)[0]) - if stat != 0: - LOG.append('* E3SM requires the Perl module {}, but it is not available.'.format(module_name)) - LOG.append(' Please make sure that it exists in your @INC.') - -############################################################################### -def check_perl(): -############################################################################### - # First, make sure we have the right version of Perl. - e3sm_perl_major_version = 5 - e3sm_perl_minor_version = 16 - - stat, output, _ = run_cmd("perl -e 'print $^V;'") - if stat != 0: - LOG.append('* Perl appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') - return - - output = output[1:] # get rid of leading 'v' character - major_version, minor_version, _ = [int(item) for item in output.split('.')] - if major_version != e3sm_perl_major_version or minor_version < e3sm_perl_minor_version: - LOG.append('* E3SM requires Perl version {:d}.{:d}+. You appear to be using {:d}.{:d}.'.format(e3sm_perl_major_version, e3sm_perl_minor_version, major_version, minor_version)) - LOG.append(' Please check to see whether an appropriate version exists on this machine,') - LOG.append(' possibly via a loadable module.') - - # Okay, our version is good. What about all those pesky modules? - check_perl_module('XML::LibXML') - check_perl_module('XML::SAX') - check_perl_module('XML::SAX::Exception') - check_perl_module('Switch') - -############################################################################### -def check_git(): -############################################################################### - e3sm_git_major_version = 2 - e3sm_git_minor_version = 0 - - stat, output, _ = run_cmd('git --version') - if stat != 0: - LOG.append('* Git appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') - return - - version = output.split()[-1] - num_dots = version.count('.') - if num_dots == 1: - major_version, minor_version = [int(s) for s in version.split('.')] - elif num_dots == 2: - major_version, minor_version, _ = [int(s) for s in version.split('.')] - else: - LOG.append('* Unparseable git version string: "{}"'.format(output)) - return - - if major_version != e3sm_git_major_version or minor_version < e3sm_git_minor_version: - LOG.append('* E3SM requires Git version {:d}.{:d}+. You appear to be using version {:d}.{:d}.'.format(e3sm_git_major_version, e3sm_git_minor_version, major_version, minor_version)) - -############################################################################### -def check_svn(): -############################################################################### - e3sm_svn_major_version = 1 - e3sm_svn_minor_version = 4 - e3sm_svn_patch_version = 2 - - stat, output, _ = run_cmd('svn --version --quiet') - if stat != 0: - LOG.append('* Subversion appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') - return - - major_version, minor_version, patch_version = [int(s) for s in output.split('.')] - if major_version < e3sm_svn_major_version or minor_version < e3sm_svn_minor_version or patch_version < e3sm_svn_patch_version: - LOG.append('* E3SM requires Subversion version {:d}.{:d}.{:d}+. You appear to be using version {:d}.{:d}.{:d}.'.format(e3sm_svn_major_version, e3sm_svn_minor_version, e3sm_svn_patch_version, major_version, minor_version, patch_version)) - -############################################################################### -def _main_func(description): -############################################################################### - parse_command_line(sys.argv, description) - - check_sh() - check_csh() - check_perl() - check_git() - check_svn() - - if len(LOG) > 0: - print('e3sm_check_env found problems with your E3SM development environment:\n') - for line in LOG: - print(line) - - sys.exit(1) - else: - print('e3sm_check_env found no problems with your E3SM development environment.') - sys.exit(0) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/e3sm_cime_merge b/scripts/Tools/e3sm_cime_merge deleted file mode 100755 index b0ec5e276b5..00000000000 --- a/scripts/Tools/e3sm_cime_merge +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python - -""" -A script to merge ESMCI's cime development and make it into a PR for E3SM -""" - -from standard_script_setup import * -from e3sm_cime_mgmt import * - -import sys, os, argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--verbose] -OR -{0} --help -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("repo", nargs="?", - help="Location of repo to use, default is based on current location") - - parser.add_argument("-r", "--resume", action="store_true", - help="Resume merge after fixing conflicts") - - parser.add_argument("-a", "--abort", action="store_true", - help="Abort merge") - - parser.add_argument("--squash", action="store_true", - help="Do the merge as squashy as possible") - - parser.add_argument("--auto-conf", action="store_true", - help="Try to automatically resolve conflicts") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - expect(not (args.resume and args.abort), "Makes no sense to abort and resume") - expect(not (args.abort and args.squash), "Makes no sense to abort and squash") - - return args.repo, args.resume, args.abort, args.squash, args.auto_conf - -############################################################################### -def _main_func(description): -############################################################################### - repo, resume, abort, squash, auto_conf = parse_command_line(sys.argv, description) - - if repo is not None: - os.chdir(repo) - - if abort: - abort_merge() - else: - e3sm_cime_merge(resume, squash=squash, auto_conf=auto_conf) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/e3sm_cime_split b/scripts/Tools/e3sm_cime_split deleted file mode 100755 index ed4936a553a..00000000000 --- a/scripts/Tools/e3sm_cime_split +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python - -""" -A script to split-off E3SM's cime development and make it into a PR for ESMCI -""" - -from standard_script_setup import * -from e3sm_cime_mgmt import * -from CIME.utils import expect - -import sys, os, argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--verbose] -OR -{0} --help -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("repo", nargs="?", - help="Location of repo to use, default is based on current location") - - parser.add_argument("-r", "--resume", action="store_true", - help="Resume split after fixing conflicts") - - parser.add_argument("-a", "--abort", action="store_true", - help="Abort split") - - parser.add_argument("--squash", action="store_true", - help="Do the split as squashy as possible") - - parser.add_argument("--auto-conf", action="store_true", - help="Try to automatically resolve conflicts") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - expect(not (args.resume and args.abort), "Makes no sense to abort and resume") - expect(not (args.abort and args.squash), "Makes no sense to abort and squash") - - return args.repo, args.resume, args.abort, args.squash, args.auto_conf - -############################################################################### -def _main_func(description): -############################################################################### - repo, resume, abort, squash, auto_conf = parse_command_line(sys.argv, description) - - if repo is not None: - os.chdir(repo) - - if abort: - abort_split() - else: - e3sm_cime_split(resume, squash=squash, auto_conf=auto_conf) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/generate_cylc_workflow.py b/scripts/Tools/generate_cylc_workflow.py deleted file mode 100755 index ed7fb092f95..00000000000 --- a/scripts/Tools/generate_cylc_workflow.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python - -""" -Generates a cylc workflow file for the case. See https://cylc.github.io for details about cylc -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.utils import expect, transform_vars - -import argparse, re -logger = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory for which namelists are generated.\n" - "Default is current directory.") - - parser.add_argument('--cycles', default=1, - help="The number of cycles to run, default is RESUBMIT") - - parser.add_argument("--ensemble", default=1, - help="generate suite.rc for an ensemble of cases, the case name argument must end in an integer.\n" - "for example: ./generate_cylc_workflow.py --ensemble 4 \n" - "will generate a workflow file in the current case, if that case is named case.01," - "the workflow will include case.01, case.02, case.03 and case.04") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot, args.cycles, int(args.ensemble) - -def cylc_get_ensemble_first_and_last(case, ensemble): - if ensemble == 1: - return 1,None - casename = case.get_value("CASE") - m = re.search(r"(.*[^\d])(\d+)$", casename) - minval = int(m.group(2)) - maxval = minval+ensemble-1 - return minval,maxval - -def cylc_get_case_path_string(case, ensemble): - caseroot = case.get_value("CASEROOT") - casename = case.get_value("CASE") - if ensemble == 1: - return "{};".format(caseroot) - basepath = os.path.abspath(caseroot+"/..") - m = re.search(r"(.*[^\d])(\d+)$", casename) - - expect(m, "casename {} must end in an integer for ensemble method".format(casename)) - - return "{basepath}/{basename}$(printf \"%0{intlen}d\"".format(basepath=basepath, basename=m.group(1), intlen=len(m.group(2))) + " ${CYLC_TASK_PARAM_member});" - - -def cylc_batch_job_template(job, jobname, case, ensemble): - - env_batch = case.get_env("batch") - batch_system_type = env_batch.get_batch_system_type() - batchsubmit = env_batch.get_value("batch_submit") - submit_args = env_batch.get_submit_args(case, job) - case_path_string = cylc_get_case_path_string(case, ensemble) - - return """ - [[{jobname}]] - script = cd {case_path_string} ./case.submit --job {job} - [[[job]]] - batch system = {batch_system_type} - batch submit command template = {batchsubmit} {submit_args} '%(job)s' - [[[directives]]] -""".format(jobname=jobname, job=job, case_path_string=case_path_string, batch_system_type=batch_system_type, - batchsubmit=batchsubmit, submit_args=submit_args) + "{{ batchdirectives }}\n" - - -def cylc_script_job_template(job, case, ensemble): - case_path_string = cylc_get_case_path_string(case, ensemble) - return """ - [[{job}]] - script = cd {case_path_string} ./case.submit --job {job} -""".format(job=job, case_path_string=case_path_string) - -############################################################################### -def _main_func(description): -############################################################################### - caseroot, cycles, ensemble = parse_command_line(sys.argv, description) - - expect(os.path.isfile(os.path.join(caseroot, "CaseStatus")), - "case.setup must be run prior to running {}".format(__file__)) - with Case(caseroot, read_only=True) as case: - if cycles == 1: - cycles = max(1, case.get_value('RESUBMIT')) - env_batch = case.get_env('batch') - env_workflow = case.get_env('workflow') - jobs = env_workflow.get_jobs() - casename = case.get_value('CASE') - input_template = os.path.join(case.get_value("MACHDIR"),"cylc_suite.rc.template") - - overrides = {"cycles":cycles, - 'casename':casename} - input_text = open(input_template).read() - - first,last = cylc_get_ensemble_first_and_last(case, ensemble) - if ensemble == 1: - overrides.update({'members':"{}".format(first)}) - overrides.update({"workflow_description":"case {}".format(case.get_value("CASE"))}) - else: - overrides.update({'members':"{}..{}".format(first,last)}) - firstcase = case.get_value("CASE") - intlen = len(str(last)) - lastcase = firstcase[:-intlen]+str(last) - overrides.update({"workflow_description":"ensemble from {} to {}".format(firstcase,lastcase)}) - overrides.update({"case_path_string":cylc_get_case_path_string(case, ensemble)}) - - for job in jobs: - jobname = job - if job == 'case.st_archive': - continue - if job == 'case.run': - jobname = 'run' - overrides.update(env_batch.get_job_overrides(job, case)) - overrides.update({'job_id':'run.'+casename}) - input_text = input_text + cylc_batch_job_template(job, jobname, case, ensemble) - else: - depends_on = env_workflow.get_value('dependency', subgroup=job) - if depends_on.startswith('case.'): - depends_on = depends_on[5:] - input_text = input_text.replace(' => '+depends_on,' => '+depends_on+' => '+job) - - - overrides.update(env_batch.get_job_overrides(job, case)) - overrides.update({'job_id':job+'.'+casename}) - if 'total_tasks' in overrides and overrides['total_tasks'] > 1: - input_text = input_text + cylc_batch_job_template(job, jobname, case, ensemble) - else: - input_text = input_text + cylc_script_job_template(jobname, case, ensemble) - - - overrides.update({'batchdirectives':env_batch.get_batch_directives(case,job, overrides=overrides, - output_format='cylc')}) - # we need to re-transform for each job to get job size correctly - input_text = transform_vars(input_text, case=case, subgroup=job, overrides=overrides) - - with open("suite.rc", "w") as f: - f.write(case.get_resolved_value(input_text)) - - - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/getTiming b/scripts/Tools/getTiming deleted file mode 100755 index 22878582ee5..00000000000 --- a/scripts/Tools/getTiming +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -""" -Get timing information from run -""" - -from standard_script_setup import * -import argparse, sys, os -from CIME.case import Case -from CIME.get_timing import get_timing - -def parse_command_line(args, description): - parser = argparse.ArgumentParser( - usage="\n%s [-lid|--lid] [-h|--help]" % os.path.basename(args[0]), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("-lid", "--lid", - help="print using yymmdd-hhmmss format", - default="999999-999999") - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to get timing for") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - return args.caseroot, args.lid - -def __main_func(description): - """ - Reads timing information from $CASEROOT/timing/$MODEL_timing_stats.$lid and - outputs to $CASEROOT/timing/$MODEL_timing.$CASE.$lid - """ - caseroot, lid = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=True) as case: - get_timing(case, lid) - -if __name__ == "__main__": - __main_func(__doc__) diff --git a/scripts/Tools/get_standard_makefile_args b/scripts/Tools/get_standard_makefile_args deleted file mode 100755 index ba0d306ac46..00000000000 --- a/scripts/Tools/get_standard_makefile_args +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python - -""" -Output the list of standard makefile args to the command line. This script -should only be used when the components buildlib is not written in python -""" - -from standard_script_setup import * - -from CIME.build import get_standard_makefile_args -from CIME.build import get_standard_cmake_args -from CIME.case import Case -from CIME.test_status import * - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build.\n" - "Default is current directory.") - - parser.add_argument("--cmake", action="store_true", help="Get cmake-style args instead of Make") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot, args.cmake - -############################################################################### -def _main_func(description): -############################################################################### - caseroot, cmake = parse_command_line(sys.argv, description) - - success = True - with Case(caseroot) as case: - print("{}".format(get_standard_cmake_args(case) if cmake else get_standard_makefile_args(case))) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/jenkins_generic_job b/scripts/Tools/jenkins_generic_job deleted file mode 100755 index 0556160197f..00000000000 --- a/scripts/Tools/jenkins_generic_job +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python - -""" -Jenkins runs this script to perform a test of an e3sm -test suite. Essentially, a wrapper around create_test and -wait_for_tests that handles cleanup of old test results and -ensures that the batch system is left in a clean state. -""" - -from standard_script_setup import * - -import CIME.wait_for_tests -from CIME.utils import expect -from CIME.XML.machines import Machines -from jenkins_generic_job import jenkins_generic_job - -# Don't know if this belongs here longterm -MACHINES_THAT_MAINTAIN_BASELINES = ("melvin", "sandiatoss3") - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( -usage="""\n{0} [-g] [-d] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run the tests and compare baselines \033[0m - > {0} - \033[1;32m# Run the tests, compare baselines, and update dashboard \033[0m - > {0} -d - \033[1;32m# Run the tests, generating a full set of baselines (useful for first run on a machine) \033[0m - > {0} -g -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - default_baseline = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) - if default_baseline is not None: - default_baseline = default_baseline.replace(".", "_").replace("/", "_") # Dots or slashes will mess things up - - parser.add_argument("-g", "--generate-baselines", action="store_true", - help="Generate baselines") - - parser.add_argument("--baseline-compare", - help="Do baseline comparisons (yes/no)") - - parser.add_argument("--submit-to-cdash", action="store_true", - help="Send results to CDash") - - parser.add_argument("--no-batch", action="store_true", - help="Do not use batch system even if on batch machine") - - parser.add_argument("-c", "--cdash-build-name", - help="Build name to use for CDash submission. Default will be __") - - parser.add_argument("-p", "--cdash-project", default=CIME.wait_for_tests.E3SM_MAIN_CDASH, - help="The name of the CDash project where results should be uploaded") - - parser.add_argument("-b", "--baseline-name", default=default_baseline, - help="Baseline name for baselines to use. Also impacts dashboard job name. Useful for testing a branch other than next or master") - - parser.add_argument("-B", "--baseline-root", - help="Baseline area for baselines to use. Default will be config_machine value for machine") - - parser.add_argument("-O", "--override-baseline-name", - help="Force comparison with these baseines without impacting dashboard or test-id.") - - parser.add_argument("-t", "--test-suite", - help="Override default e3sm test suite that will be run") - - parser.add_argument("-r", "--scratch-root", - help="Override default e3sm scratch root. Use this to avoid conflicting with other jenkins jobs") - - parser.add_argument("--cdash-build-group", default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, - help="The build group to be used to display results on the CDash dashboard.") - - parser.add_argument("-j", "--parallel-jobs", type=int, default=None, - help="Number of tasks create_test should perform simultaneously. Default " - "will be min(num_cores, num_tests).") - - parser.add_argument("--walltime", - help="Force a specific walltime for all tests.") - - parser.add_argument("-m", "--machine", - help="The machine for which to build tests, this machine must be defined" - " in the config_machines.xml file for the given model. " - "Default is to match the name of the machine in the test name or " - "the name of the machine this script is run on to the " - "NODENAME_REGEX field in config_machines.xml. This option is highly " - "unsafe and should only be used if you know what you're doing.") - - parser.add_argument("--compiler", - help="Compiler to use to build cime. Default will be the default defined for the machine.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - expect(not (args.submit_to_cdash and args.generate_baselines), - "Does not make sense to use --generate-baselines and --submit-to-cdash together") - expect(not (args.cdash_build_name is not None and not args.submit_to_cdash), - "Does not make sense to use --cdash-build-name without --submit-to-cdash") - expect(not (args.cdash_project is not CIME.wait_for_tests.E3SM_MAIN_CDASH and not args.submit_to_cdash), - "Does not make sense to use --cdash-project without --submit-to-cdash") - expect (args.baseline_compare in [None, "yes", "no"], - "Valid args for --baseline-compare are 'yes' or 'no'") - - machine = Machines(machine=args.machine) - machine_name = machine.get_machine_name() - - args.machine = machine - args.test_suite = machine.get_value("TESTS") if args.test_suite is None else args.test_suite - default_maintain_baselines = machine_name in MACHINES_THAT_MAINTAIN_BASELINES - args.baseline_compare = default_maintain_baselines if args.baseline_compare is None else args.baseline_compare == "yes" - args.scratch_root = machine.get_value("CIME_OUTPUT_ROOT") if args.scratch_root is None else args.scratch_root - args.compiler = machine.get_default_compiler() if args.compiler is None else args.compiler - - expect(args.baseline_name is not None, - "Failed to probe baseline_name from git branch, please provide one. It is essential for formulating the test-id even if baseline comparisons are not being done") - - if args.override_baseline_name is None: - args.override_baseline_name = args.baseline_name - - return args.generate_baselines, args.submit_to_cdash, args.no_batch, args.baseline_name, args.cdash_build_name, \ - args.cdash_project, args.test_suite, args.cdash_build_group, args.baseline_compare, args.scratch_root, args.parallel_jobs, args.walltime, args.machine, args.compiler, args.override_baseline_name, args.baseline_root - -############################################################################### -def _main_func(description): -############################################################################### - generate_baselines, submit_to_cdash, no_batch, baseline_name, cdash_build_name, cdash_project, test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, machine, compiler, real_baseline_name, baseline_root = \ - parse_command_line(sys.argv, description) - - sys.exit(0 if jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, baseline_name, cdash_build_name, cdash_project, test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, machine, compiler, real_baseline_name, baseline_root) - else CIME.utils.TESTS_FAILED_ERR_CODE) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/jenkins_script b/scripts/Tools/jenkins_script deleted file mode 100755 index 0f6bb9be0a8..00000000000 --- a/scripts/Tools/jenkins_script +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# -# Wrapper around jenkins_generic_job that will allow output -# from that script to always be printed to the screen and -# recoverable if Jenkins is forced to kill the job. This is the -# script that should be used from Jenkins. -# - -SCRIPT_DIR=$( cd "$( dirname "$0" )" && pwd ) -DATE_STAMP=$(date "+%Y-%m-%d_%H%M%S") -export JENKINS_START_TIME=$(date "+%s") - -umask 002 - -$SCRIPT_DIR/jenkins_generic_job --submit-to-cdash "$@" >& JENKINS_$DATE_STAMP diff --git a/scripts/Tools/list_e3sm_tests b/scripts/Tools/list_e3sm_tests deleted file mode 100755 index c45975d8f6b..00000000000 --- a/scripts/Tools/list_e3sm_tests +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python - -""" -List e3sm test suites. Can be used to show what's being tested. Can just -list tested grids, compsets, etc. -""" - -from standard_script_setup import * -from CIME.utils import expect -import get_tests - -import sys, argparse, os - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( -usage="""\n{0} [ ...] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# List all tested compsets \033[0m - > {0} compsets - \033[1;32m# List all compsets tested by e3sm_developer \033[0m - > {0} compsets e3sm_developer - \033[1;32m# List all grids tested by e3sm_developer \033[0m - > {0} grid e3sm_developer -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("suites", nargs="+", - help="The tests suites to list. Test suites: {}".format(", ".join(get_tests.get_test_suites()))) - - parser.add_argument("-t", "--thing-to-list", choices=("compsets", "grids", "testcases", "tests"), default="tests", - help="The thing you want to list") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if (not args.suites): - args.suites = get_tests.get_test_suites() - - return args.thing_to_list, args.suites - -############################################################################### -def list_tests(thing_to_list, suites): -############################################################################### - things = set() - for suite in suites: - tests = get_tests.get_test_suite(suite) - for test in tests: - testcase, _, grid, compset = CIME.utils.parse_test_name(test)[:4] - if (thing_to_list == "compsets"): - things.add(compset) - elif (thing_to_list == "grids"): - things.add(grid) - elif (thing_to_list == "testcases"): - things.add(testcase) - elif (thing_to_list == "tests"): - things.add(test) - else: - expect(False, "Unrecognized thing to list '{}'".format(thing_to_list)) - - print("Tested {} for test suites: {}".format(thing_to_list, ", ".join(suites))) - for item in sorted(things): - print(" {}".format(item)) - -############################################################################### -def _main_func(description): -############################################################################### - thing_to_list, suites = parse_command_line(sys.argv, description) - - list_tests(thing_to_list, suites) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/mkSrcfiles b/scripts/Tools/mkSrcfiles deleted file mode 100755 index 74745bbb9b8..00000000000 --- a/scripts/Tools/mkSrcfiles +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env perl - -# Make list of files containing source code. The source list contains all -# .F90, .f90, .F, .f, .c and .cpp files in a specified list of directories. -# The directories are specified one per line in a file called Filepath which -# this script tries to open in the current directory. The current -# directory is prepended to the specified list of directories. If Filepath -# doesn't exist then only the source files in the current directory are -# listed. -# The list of source files is written to the file Srcfiles. - -# Check usage: -@ARGV == 0 or usage(); - -if ( open(FILEPATH,"< Filepath") ) { - @paths = ; - close( FILEPATH ); -} else { - @paths = (); -} -chomp @paths; -unshift(@paths, '.'); -foreach $dir (@paths) { # (could check that directories exist here) - $dir =~ s!/?\s*$!!; # remove / and any whitespace at end of directory name - ($dir) = glob $dir; # Expand tildes in path names. -} - -# Loop through the directories and add each filename as a hash key. This -# automatically eliminates redunancies. -%src = (); -my $skip_prefix = $ENV{mkSrcfiles_skip_prefix}; - -foreach $dir (@paths) { - @filenames = (glob("$dir/*.[Ffc]"), glob("$dir/*.[Ff]90"), glob("$dir/*.cpp")); - foreach $filename (@filenames) { - $filename =~ s!.*/!!; # remove part before last slash - if (defined $skip_prefix){ - if ($filename =~ /^${skip_prefix}/){ - print "WARNING: Skipping file $dir/$filename Source files beginning in $skip_prefix are ignored\n"; - next; - } - } - $src{$filename} = 1; - } - @templates = glob("$dir/*.F90.in"); - foreach $filename (@templates) { - $filename =~ s!.*/!!; # remove part before last slash - my $dfile = $filename; - $dfile =~ s/\.in//; - delete $src{$dfile} if(defined $src{$dfile}); - $src{$filename} = 1; - } -} - -my @srcfiles; -my $foundcnt=0; -my $writenew=1; -if(-e "Srcfiles"){ # file already exists, do not update if no changes are required - open(SRC,"Srcfiles"); - @srcfiles = ; - close(SRC); - $writenew=0; - foreach $file (@srcfiles){ - chomp $file; - if($src{$file}){ - $src{$file}=0; - }else{ - $writenew=1; # A srcfile was removed - last; - } - - } - foreach $file (keys %src){ - if($src{$file} == 1){ - $writenew=1; # A srcfile was added - last; - } - } -} - -if($writenew==1){ - open(SRC,"> Srcfiles") or die "Can't open Srcfiles\n"; - - foreach $file ( sort keys %src ) { - print SRC "$file\n"; - } - - close( SRC ); -} -#-------------------------------------------------------------------------------------- - -sub usage { - ($ProgName = $0) =~ s!.*/!!; # name of program - die < {0} case1 case2 -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("case1", help="First case. This one will be changed") - - parser.add_argument("case2", help="Second case. This one will not be changed") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.case1, args.case2 - -############################################################################### -def normalize_cases(case1, case2): -############################################################################### - # gunzip all logs - for case_dir in [case1, case2]: - for log_dir in ["bld", "run"]: - gzips = glob.glob(os.path.join(case_dir, log_dir, "*.gz")) - if (gzips): - run_cmd_no_fail("gunzip -f {}".format(" ".join(gzips))) - - # Change case1 to be as if it had same test-id as case2 - test_id1 = run_cmd_no_fail("./xmlquery --value TEST_TESTID", from_dir=case1) - test_id2 = run_cmd_no_fail("./xmlquery --value TEST_TESTID", from_dir=case2) - run_cmd_no_fail("for item in $(find -type f); do sed -i 's/{}/{}/g' $item; done".format(test_id1, test_id2), - from_dir=case1) - - # Change case1 to look as if it is was built/run at exact same time as case2 - for log_dir in ["bld", "run"]: - case1_lids = set() - for logfile in glob.glob("{}/{}/*.bldlog.*".format(case1, log_dir)): - case1_lids.add(logfile.split(".")[-1]) - - case2_lids = set() - for logfile in glob.glob("{}/{}/*.bldlog.*".format(case2, log_dir)): - case2_lids.add(logfile.split(".")[-1]) - - case1_lids = list(sorted(case1_lids)) - case2_lids = list(sorted(case2_lids)) - - for case1_lid, case2_lid in zip(case1_lids, case2_lids): - run_cmd_no_fail("for item in $(find -type f); do sed -i 's/{}/{}/g' $item; done".format(case1_lid, case2_lid), - from_dir=case1) - - for case1_lid, case2_lid in zip(case1_lids, case2_lids): - files_needing_rename = run_cmd_no_fail('find -depth -name "*.{}"'.format(case1_lid), from_dir=case1).splitlines() - for file_needing_rename in files_needing_rename: - expect(file_needing_rename.endswith(case1_lid), "broken") - new_name = file_needing_rename.rstrip(case1_lid) + case2_lid - os.rename(os.path.join(case1, file_needing_rename), os.path.join(case1, new_name)) - - # Normalize CIMEROOT - case1_root = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=case1) - case2_root = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=case2) - if (case1_root != case2_root): - run_cmd_no_fail("for item in $(find -type f); do sed -i 's:{}:{}:g' $item; done".format(case1_root, case2_root), - from_dir=case1) - -############################################################################### -def _main_func(description): -############################################################################### - case1, case2 = parse_command_line(sys.argv, description) - - normalize_cases(case1, case2) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/pelayout b/scripts/Tools/pelayout deleted file mode 100755 index d2edd9bad52..00000000000 --- a/scripts/Tools/pelayout +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env python - -""" -This utility allows the CIME user to view and modify a case's PE layout. -With this script, a user can: - -1) View the PE layout of a case - ./pelayout - ./pelayout --format "%C: %06T/%+H" --header "Comp: Tasks /Th" -2) Attempt to scale the number of tasks used by a case - ./pelayout --set-ntasks 144 -3) Set the number of threads used by a case - ./pelayout --set-nthrds 2 - -The --set-ntasks option attempts to scale all components so that the -job will run in the provided number of tasks. For a component using the -maximum number of tasks, this will merely set that component to the new -number. However, for components running in parallel using a portion of -the maximum tasks, --set-ntasks will attempt to scale the tasks -proportionally, changing the value of ROOTPE to maintain the same level -of parallel behavior. If the --set-ntasks algorithm is unable to -automatically find a new layout, it will print an error message -indicating the component(s) it was unable to reset and no changes will -be made to the case. - -Interpreted FORMAT sequences are: -%% a literal % -%C the component name -%T the task count for the component -%H the thread count for the component -%R the PE root for the component - -Standard format extensions, such as a field length and padding are supported. -Python dictionary-format strings are also supported. For instance, ---format "{C:4}", will print the component name padded to 4 spaces. - -If you encounter problems with this tool or find it is missing any -feature that you need, please open an issue on https://github.com/ESMCI/cime -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.utils import expect, convert_to_string -import sys -import re - -logger = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args, description): -############################################################################### - # Start with usage description - parser = argparse.ArgumentParser(description=description , - formatter_class=argparse.RawDescriptionHelpFormatter) - CIME.utils.setup_standard_logging_options(parser) - - # Set command line options - parser.add_argument("--set-ntasks", default=None, - help="Total number of tasks to set for the case") - - parser.add_argument("--set-nthrds", "--set-nthreads", default=None, - help="Number of threads to set for all components") - - parser.add_argument("--format", - default="%4C: %6T/%6H; %6R", - help="Format the PE layout items for each component (see below)") - - parser.add_argument("--header", - default="Comp NTASKS NTHRDS ROOTPE", - help="Custom header for PE layout display") - - parser.add_argument("--no-header", default=False , action="store_true" , - help="Do not print any PE layout header") - - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to reference") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if (args.no_header): - args.header = None - # End if - - return args.format, args.set_ntasks, args.set_nthrds, args.header, args.caseroot -# End def parse_command_line - - -############################################################################### -def get_value_as_string(case, var, attribute=None, resolved=False, subgroup=None): -############################################################################### - thistype = case.get_type_info(var) - value = case.get_value(var, attribute=attribute, resolved=resolved, subgroup=subgroup) - if value is not None and thistype: - value = convert_to_string(value, thistype, var) - return value - -############################################################################### -def format_pelayout(comp, ntasks, nthreads, rootpe, arg_format): -############################################################################### - """ - Format the PE layout information for each component, using a default format, - or using the arg_format input, if it exists. - """ - subs = { 'C': comp, 'T': ntasks, 'H': nthreads, 'R': rootpe } - layout_str = re.sub(r"%([0-9]*)C", r"{C:\1}", arg_format) - layout_str = re.sub(r"%([-+0-9]*)T", r"{T:\1}", layout_str) - layout_str = re.sub(r"%([-+0-9]*)H", r"{H:\1}", layout_str) - layout_str = re.sub(r"%([-+0-9]*)R", r"{R:\1}", layout_str) - layout_str = layout_str.format(**subs) - return layout_str -# End def format_pelayout - -############################################################################### -def print_pelayout(case, ntasks, nthreads, rootpes, arg_format, header): -############################################################################### - """ - Print the PE layout information for each component, using the format, - if it exists. - """ - comp_classes = case.get_values("COMP_CLASSES") - - if (header is not None): - print(header) - # End if - for comp in comp_classes: - print(format_pelayout(comp, ntasks[comp], nthreads[comp], rootpes[comp], arg_format)) - # End for - -# End def print_pelayout - -############################################################################### -def gather_pelayout(case): -############################################################################### - """ - Gather the PE layout information for each component - """ - ntasks = {} - nthreads = {} - rootpes = {} - comp_classes = case.get_values("COMP_CLASSES") - - for comp in comp_classes: - ntasks[comp] = int(case.get_value("NTASKS_"+comp)) - nthreads[comp] = int(case.get_value("NTHRDS_"+comp)) - rootpes[comp] = int(case.get_value("ROOTPE_"+comp)) - # End for - return ntasks, nthreads, rootpes -# End def gather_pelayout - -############################################################################### -def set_nthreads(case, nthreads): -############################################################################### - comp_classes = case.get_values("COMP_CLASSES") - - for comp in comp_classes: - case.set_value("NTHRDS", nthreads, comp) - # End for -# End def set_nthreads - -############################################################################### -def modify_ntasks(case, new_tot_tasks): -############################################################################### - comp_classes = case.get_values("COMP_CLASSES") - new_tasks = {} - new_roots = {} - curr_tot_tasks = 0 - - # First, gather current task and root pe info - curr_tasks, _, curr_roots = gather_pelayout(case) - - # How many tasks are currently being used? - for comp in comp_classes: - if ((curr_tasks[comp] + curr_roots[comp]) > curr_tot_tasks): - curr_tot_tasks = curr_tasks[comp] + curr_roots[comp] - # End if - # End for - - if (new_tot_tasks != curr_tot_tasks): - # Compute new task counts and root pes - for comp in comp_classes: - new_tasks[comp] = curr_tasks[comp] * new_tot_tasks / curr_tot_tasks - new_roots[comp] = curr_roots[comp] * new_tot_tasks / curr_tot_tasks - # End for - - # Check for valid recomputation - mod_ok = True - for comp in comp_classes: - if (new_tasks[comp] * curr_tot_tasks / new_tot_tasks) != curr_tasks[comp]: - logger.error("Task change invalid for {}".format(comp)) - mod_ok = False - - if (new_roots[comp] * curr_tot_tasks / new_tot_tasks) != curr_roots[comp]: - logger.error("Root PE change invalid for {}".format(comp)) - mod_ok = False - # End for - expect(mod_ok, "pelayout unable to set ntasks to {}".format(new_tot_tasks)) - - # We got this far? Go ahead and change PE layout - for comp in comp_classes: - case.set_value("NTASKS_"+comp, new_tasks[comp], comp) - case.set_value("ROOTPE_"+comp, new_roots[comp], comp) - # End for - # End if (#tasks changed) -# End def modify_ntasks - -############################################################################### -def _main_func(description): -############################################################################### - # Initialize command line parser and get command line options - arg_format, set_ntasks, set_nthrds, header, caseroot = parse_command_line(sys.argv, description) - - # Initialize case ; read in all xml files from caseroot - with Case(caseroot) as case: - if (set_nthrds is not None): - set_nthreads(case, set_nthrds) - # End if - if (set_ntasks is not None): - modify_ntasks(case, int(set_ntasks)) - # End if - ntasks, nthreads, rootpes = gather_pelayout(case) - print_pelayout(case, ntasks, nthreads, rootpes, arg_format, header) - # End with - -# End def _main_func - -if (__name__ == "__main__"): - _main_func(__doc__) -# End if diff --git a/scripts/Tools/preview_namelists b/scripts/Tools/preview_namelists deleted file mode 100755 index dca2bf2d82f..00000000000 --- a/scripts/Tools/preview_namelists +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python - -""" -Creates namelist and other model input files for each component (by running each -component's buildnml script). Then copies the generated files to the CaseDocs -subdirectory for inspection. - -It is not required to run this manually: namelists will be generated -automatically when the run starts. However, this can be useful in order to -review the namelists before submitting the case. - -case.setup must be run before this. - -Typical usage is simply: - ./preview_namelists -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.utils import expect - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory for which namelists are generated.\n" - "Default is current directory.") - - parser.add_argument('--component', - help="Specify component's namelist to build.\n" - "If not specified, generates namelists for all components.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args - -############################################################################### -def _main_func(description): -############################################################################### - args = parse_command_line(sys.argv, description) - - expect(os.path.isfile(os.path.join(args.caseroot, "CaseStatus")), - "case.setup must be run prior to running preview_namelists") - with Case(args.caseroot, read_only=False) as case: - case.create_namelists(component=args.component) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/preview_run b/scripts/Tools/preview_run deleted file mode 100755 index 5024df2d9bb..00000000000 --- a/scripts/Tools/preview_run +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python - -""" -Queries key CIME shell commands (mpirun and batch submission). - -To force a certain mpirun command, use: - ./xmlchange MPI_RUN_COMMAND=$your_cmd - -Example: - ./xmlchange MPI_RUN_COMMAND='mpiexec -np 16 --some-flag' - -To force a certain qsub command, use: - ./xmlchange --subgroup=case.run BATCH_COMMAND_FLAGS=$your_flags - -Example: - ./xmlchange --subgroup=case.run BATCH_COMMAND_FLAGS='--some-flag --other-flag' -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.utils import set_logger_indent - -logger = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to query.\n" - "Default is current directory.") - - parser.add_argument("-j", "--job", default=None, - help="The job you want to print.\n" - "Default is case.run (or case.test if this is a test).") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.caseroot, args.job - -############################################################################### -def _main_func(description): -############################################################################### - caseroot, job = parse_command_line(sys.argv, description) - logging.disable(logging.INFO) - - with Case(caseroot, read_only=False) as case: - print("CASE INFO:") - print(" nodes: {}".format(case.num_nodes)) - print(" total tasks: {}".format(case.total_tasks)) - print(" tasks per node: {}".format(case.tasks_per_node)) - print(" thread count: {}".format(case.thread_count)) - print("") - - print("BATCH INFO:") - if job is None: - job = case.get_primary_job() - - set_logger_indent(" ") - job_id_to_cmd = case.submit_jobs(dry_run=True, job=job) - env_batch = case.get_env('batch') - for job_id, cmd in job_id_to_cmd: - print(" FOR JOB: {}".format(job_id)) - print(" ENV:") - case.load_env(job=job_id, reset=True, verbose=True) - - if "OMP_NUM_THREADS" in os.environ: - print(" Setting Environment OMP_NUM_THREADS={}".format(os.environ["OMP_NUM_THREADS"])) - print("") - print(" SUBMIT CMD:") - print(" {}".format(case.get_resolved_value(cmd))) - print("") - - # get_job_overrides must come after the case.load_env since the cmd may use - # env vars. - overrides = env_batch.get_job_overrides(job_id, case) - print(" MPIRUN (job={}):".format(job_id)) - print (" {}".format(case.get_resolved_value(overrides["mpirun"]))) - print("") - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/save_provenance b/scripts/Tools/save_provenance deleted file mode 100755 index 731e9080ce3..00000000000 --- a/scripts/Tools/save_provenance +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python - -""" -This tool provide command-line access to provenance-saving functionality -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.provenance import * -from CIME.utils import get_lids -from CIME.get_timing import get_timing - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Save run (timing) provenance for current case \033[0m - > {0} postrun -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("mode", choices=("build", "prerun", "postrun"), - help="Phase for which to save provenance. " - "prerun is mostly for infrastructure testing; " - "it does not make sense to store this information manually otherwise") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") - - parser.add_argument("-l", "--lid", - help="Force system to save provenance with this LID") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.mode, args.caseroot, args.lid - -############################################################################### -def _main_func(description): -############################################################################### - mode, caseroot, lid = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - if mode == "build": - expect(False, "Saving build provenance manually is not currently supported " - "but it should already always be happening automatically") - save_build_provenance(case, lid=lid) - elif mode == "prerun": - expect(lid is not None, "You must provide LID for prerun mode") - save_prerun_provenance(case, lid=lid) - elif mode == "postrun": - expect(lid is None, "Please allow me to autodetect LID") - model = case.get_value("MODEL") - caseid = case.get_value("CASE") - case.set_value("SAVE_TIMING", True) - lids = get_lids(case) - for lid in lids: - # call get_timing if needed - expected_timing_file = os.path.join(caseroot, "timing", "{}_timing.{}.{}.gz" .format(model, caseid, lid)) - if (not os.path.exists(expected_timing_file)): - get_timing(case, lid) - save_prerun_provenance(case, lid=lid) - save_postrun_provenance(case, lid=lid) - else: - expect(False, "Unhandled mode '{}'".format(mode)) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/scripts/Tools/simple_compare b/scripts/Tools/simple_compare deleted file mode 100755 index 97d21fd2e2f..00000000000 --- a/scripts/Tools/simple_compare +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python - -""" -Compare files in a normalized way. Used by create_test for -diffing non-namelist files. -""" - -from standard_script_setup import * -import CIME.simple_compare -from CIME.utils import expect - -import argparse, sys, os - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( -usage="""\n{0} [-c ] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Compare files\033[0m - > {0} baseline_dir/test/file mytestarea/file -c -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("gold_file", help="Path to gold file") - - parser.add_argument("new_file", help="Path to file to compare against gold") - - parser.add_argument("-c", "--case", action="store", dest="case", default=None, - help="The case base id (..). Helps us normalize data.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - # Normalize case - if (args.case is not None): - args.case = CIME.utils.normalize_case_id(args.case) - - return args.gold_file, args.new_file, args.case - -############################################################################### -def _main_func(description): -############################################################################### - gold_file, compare_file, case = \ - parse_command_line(sys.argv, description) - - if (case is None): - logging.warning("No case id data available, will not be able to normalize values as effectively") - else: - logging.info("Using case: '{}'".format(case)) - - if gold_file.endswith('runconfig'): - success, comments = CIME.simple_compare.compare_runconfigfiles(gold_file, compare_file, case) - else: - success, comments = CIME.simple_compare.compare_files(gold_file, compare_file, case) - expect(success, - "Diff between files {} and {}:\n{}".format(gold_file, compare_file, comments)) - - print("Files {} and {} MATCH".format(gold_file, compare_file)) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/standard_script_setup.py b/scripts/Tools/standard_script_setup.py deleted file mode 100644 index a3f4801faf0..00000000000 --- a/scripts/Tools/standard_script_setup.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Encapsulate the importing of python utils and logging setup, things -that every script should do. -""" -# pylint: disable=unused-import - -import sys, os -import __main__ as main -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..") -_LIB_DIR = os.path.join(_CIMEROOT, "scripts", "lib") -sys.path.append(_LIB_DIR) - -# Important: Allows external tools to link up with CIME -os.environ["CIMEROOT"] = _CIMEROOT - -import CIME.utils -CIME.utils.check_minimum_python_version(2, 7) -CIME.utils.stop_buffering_output() -import logging, argparse diff --git a/scripts/Tools/testreporter.py b/scripts/Tools/testreporter.py deleted file mode 100755 index 455d9f7042e..00000000000 --- a/scripts/Tools/testreporter.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/env python - -""" -Simple script to populate CESM test database with test results. -""" - -from standard_script_setup import * - -from CIME.XML.env_build import EnvBuild -from CIME.XML.env_case import EnvCase -from CIME.XML.env_test import EnvTest -from CIME.XML.test_reporter import TestReporter -from CIME.utils import expect -from CIME.XML.generic_xml import GenericXML - -import glob - -############################################################################### -def parse_command_line(args): -############################################################################### - parser = argparse.ArgumentParser() - - CIME.utils.setup_standard_logging_options(parser) - - # Parse command line options - - #parser = argparse.ArgumentParser(description='Arguements for testreporter') - parser.add_argument("--tagname", - help="Name of the tag being tested.") - parser.add_argument("--testid", - help="Test id, ie c2_0_a6g_ing,c2_0_b6g_gnu.") - parser.add_argument("--testroot", - help="Root directory for tests to populate the database.") - parser.add_argument("--testtype", - help="Type of test, prealpha or prebeta.") - parser.add_argument("--dryrun",action="store_true", - help="Do a dry run, database will not be populated.") - parser.add_argument("--dumpxml",action="store_true", - help="Dump XML test results to sceen.") - args = parser.parse_args() - CIME.utils.parse_args_and_handle_standard_logging_options(args) - - return args.testroot, args.testid, args.tagname, args.testtype, args.dryrun, args.dumpxml - -############################################################################### -def get_testreporter_xml(testroot, testid, tagname, testtype): -############################################################################### - os.chdir(testroot) - - # - # Retrieve compiler name and mpi library - # - xml_file=glob.glob("*"+testid+"/env_build.xml") - expect(len(xml_file) > 0, "Tests not found. It's possible your testid, {} is wrong.".format(testid)) - envxml=(EnvBuild(".",infile=xml_file[0])) - compiler=envxml.get_value("COMPILER") - mpilib=envxml.get_value("MPILIB") - - # - # Retrieve machine name - # - xml_file=glob.glob("*"+testid+"/env_case.xml") - envxml=(EnvCase(".",infile=xml_file[0])) - machine=envxml.get_value("MACH") - - # - # Retrieve baseline tag to compare to - # - xml_file=glob.glob("*"+testid+"/env_test.xml") - envxml=(EnvTest(".",infile=xml_file[0])) - baseline = envxml.get_value("BASELINE_NAME_CMP") - - # - # Create XML header - # - - testxml=TestReporter() - testxml.setup_header(tagname,machine,compiler,mpilib,testroot,testtype,baseline) - - # - # Create lists on tests based on the testid in the testroot directory. - # - test_names=glob.glob("*"+testid) - # - # Loop over all tests and parse the test results - # - test_status={} - for test_name in test_names: - if not os.path.isfile(test_name+"/TestStatus"): - continue - test_status['COMMENT']="" - test_status['BASELINE']='----' - test_status['MEMCOMP']='----' - test_status['MEMLEAK']='----' - test_status['NLCOMP']='----' - test_status['STATUS']='----' - test_status['TPUTCOMP']='----' - # - # Check to see if TestStatus is present, if not then continue - # I might want to set the status to fail - # - try: - lines = [line.rstrip('\n') for line in open(test_name+"/TestStatus")] - except (IOError, OSError): - test_status['STATUS']="FAIL" - test_status['COMMENT']="TestStatus missing. " - continue - # - # Loop over each line of TestStatus, and check for different types of failures. - # - for line in lines: - if "NLCOMP" in line: - test_status['NLCOMP']=line[0:4] - if "MEMLEAK" in line: - test_status['MEMLEAK']=line[0:4] - if "MEMCOMP" in line: - test_status['MEMCOMP']=line[0:4] - if "BASELINE" in line: - test_status['BASELINE']=line[0:4] - if "TPUTCOMP" in line: - test_status['TPUTCOMP']=line[0:4] - if "FAIL PFS" in line: - test_status['STATUS']="FAIL" - if "INIT" in line: - test_status['INIT']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="INIT fail! " - break - if "CREATE_NEWCASE" in line: - test_status['CREATE_NEWCASE']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="CREATE_NEWCASE fail! " - break - if "XML" in line: - test_status['XML']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="XML fail! " - break - if "SETUP" in line: - test_status['SETUP']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="SETUP fail! " - break - if "SHAREDLIB_BUILD" in line: - test_status['SHAREDLIB_BUILD']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="CFAIL" - test_status['COMMENT']+="SHAREDLIB_BUILD fail! " - break - if "MODEL_BUILD" in line: - test_status['MODEL_BUILD']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="CFAIL" - test_status['COMMENT']+="MODEL_BUILD fail! " - break - if "SUBMIT" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="SUBMIT fail! " - break - if "RUN" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="RUN fail! " - break - if "COMPARE_base_rest" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Restart fail! " - break - if "COMPARE_base_hybrid" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Hybrid fail! " - break - if "COMPARE_base_multiinst" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Multi instance fail! " - break - if "COMPARE_base_test" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Base test fail! " - break - if "COMPARE_base_single_thread" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Thread test fail! " - break - - # - # Do not include time comments. Just a preference to have cleaner comments in the test database - # - try: - if 'time=' not in line and 'GENERATE' not in line: - if 'BASELINE' not in line: - test_status['COMMENT']+=line.split(' ',3)[3]+' ' - else: - test_status['COMMENT']+=line.split(' ',4)[4]+' ' - except Exception: # Probably want to be more specific here - pass - - # - # Fill in the xml with the test results - # - testxml.add_result(test_name,test_status) - - return testxml - -############################################################################## -def _main_func(): -############################################################################### - - testroot, testid, tagname, testtype, dryrun, dumpxml = parse_command_line(sys.argv) - - testxml = get_testreporter_xml(testroot, testid, tagname, testtype) - - # - # Dump xml to a file. - # - if dumpxml: - GenericXML.write(testxml,outfile="TestRecord.xml") - - - # - # Prompt for username and password, then post the XML string to the test database website - # - if not dryrun: - testxml.push2testdb() - -############################################################################### - -if __name__ == "__main__": - _main_func() diff --git a/scripts/Tools/wait_for_tests b/scripts/Tools/wait_for_tests deleted file mode 100755 index 12566bc267d..00000000000 --- a/scripts/Tools/wait_for_tests +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python - -""" -Wait for a queued set of E3SM tests to finish by watching the -TestStatus files. If all tests pass, 0 is returned, otherwise a -non-zero error code is returned. Note that this program waits -for the RUN phase specifically and will not terminate if the -RUN phase didn't happen. -""" - -from standard_script_setup import * - -import CIME.wait_for_tests - -import argparse, sys, os - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( -usage="""\n{0} [ ...] [--verbose] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Wait for test in current dir\033[0m - > {0} - \033[1;32m# Wait for test in user specified tests\033[0m - > {0} path/to/testdir - \033[1;32m# Wait for all tests in a test area\033[0m - > {0} path/to/testarea/*/TestStatus -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("paths", default=".", nargs="*", help="Paths to test directories or status file. Pwd default.") - - parser.add_argument("-n", "--no-wait", action="store_true", - help="Do not wait for tests to finish") - - parser.add_argument("--no-run", action="store_true", - help="Do not expect run phase to be completed") - - parser.add_argument("-t", "--check-throughput", action="store_true", - help="Fail if throughput check fails (fail if tests slow down)") - - parser.add_argument("-m", "--check-memory", action="store_true", - help="Fail if memory check fails (fail if tests footprint grows)") - - parser.add_argument("-i", "--ignore-namelist-diffs", action="store_true", - help="Do not fail a test if the only problem is diffing namelists") - - parser.add_argument("--ignore-memleak", action="store_true", - help="Do not fail a test if the only problem is a memleak") - - parser.add_argument("--force-log-upload", action="store_true", - help="Always upload logs to cdash, even if test passed") - - parser.add_argument("-b", "--cdash-build-name", - help="Build name, implies you want results send to Cdash") - - parser.add_argument("-p", "--cdash-project", default=CIME.wait_for_tests.E3SM_MAIN_CDASH, - help="The name of the CDash project where results should be uploaded") - - parser.add_argument("-g", "--cdash-build-group", default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, - help="The build group to be used to display results on the CDash dashboard.") - - parser.add_argument("--timeout", type=int, - help="Timeout wait in seconds.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.paths, args.no_wait, args.check_throughput, args.check_memory, args.ignore_namelist_diffs, args.ignore_memleak, args.cdash_build_name, args.cdash_project, args.cdash_build_group, args.timeout, args.force_log_upload, args.no_run - -############################################################################### -def _main_func(description): -############################################################################### - test_paths, no_wait, check_throughput, check_memory, ignore_namelist_diffs, ignore_memleak, cdash_build_name, cdash_project, cdash_build_group, timeout, force_log_upload, no_run = \ - parse_command_line(sys.argv, description) - - sys.exit(0 if CIME.wait_for_tests.wait_for_tests(test_paths, - no_wait=no_wait, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelist_diffs, - ignore_memleak=ignore_memleak, - cdash_build_name=cdash_build_name, - cdash_project=cdash_project, - cdash_build_group=cdash_build_group, - timeout=timeout, - force_log_upload=force_log_upload, - no_run=no_run) - else CIME.utils.TESTS_FAILED_ERR_CODE) - -############################################################################### - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange deleted file mode 100755 index 64f3c2fd6c0..00000000000 --- a/scripts/Tools/xmlchange +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env python - -""" -Allows changing variables in env_*xml files via a command-line interface. - -This provides two main benefits over editing the xml files by hand: - - Settings are checked immediately for validity - - Settings are echoed to the CaseStatus file, providing a "paper trail" of - changes made by the user. - -Examples: - - To set a single variable: - ./xmlchange REST_N=4 - - To set multiple variables at once: - ./xmlchange REST_OPTION=ndays,REST_N=4 - - Alternative syntax (no longer recommended, but supported for backwards - compatibility; only works for a single variable at a time): - ./xmlchange --id REST_N --val 4 - - Several xml variables that have settings for each component have somewhat special treatment. - The variables that this currently applies to are: - NTASKS, NTHRDS, ROOTPE, PIO_TYPENAME, PIO_STRIDE, PIO_NUMTASKS - For example, to set the number of tasks for all components to 16, use: - ./xmlchange NTASKS=16 - To set just the number of tasks for the atm component, use: - ./xmlchange NTASKS_ATM=16 - - The CIME case xml variables are grouped together in xml elements . - This is done to associate together xml variables with common features. - Most variables are only associated with one group. However, in env_batch.xml, - there are also xml variables that are associated with each potential batch job. - For these variables, the '--subgroup' option may be used to specify a particular - group for which the variable's value will be adjusted. - - As an example, in env_batch.xml, the xml variables JOB_QUEUE and JOB_WALLCLOCK_TIME - appear in each of the batch job groups (defined in config_batch.xml): - - - - To set the variable JOB_WALLCLOCK_TIME only for case.run: - ./xmlchange JOB_WALLCLOCK_TIME=0:30 --subgroup case.run - To set the variable JOB_WALLCLOCK_TIME for all jobs: - ./xmlchange JOB_WALLCLOCK_TIME=0:30 -""" - -from standard_script_setup import * - -from CIME.utils import expect, convert_to_type, append_case_status, get_batch_script_for_job -from CIME.case import Case - -import re - -# Set logger -logger = logging.getLogger("xmlchange") - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("listofsettings", nargs="?", default='', - help="Comma-separated list of settings in the form: var1=value,var2=value,...") - - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to change.\n" - "Default is current directory.") - - # Need to support older single dash version of arguments for compatibility with components - - parser.add_argument("--append","-append", action="store_true", - help="Append to the existing value rather than overwriting it.") - - parser.add_argument("--subgroup","-subgroup", - help="Apply to this subgroup only.") - - parser.add_argument("--id", "-id", - help="The variable to set.\n" - "(Used in the alternative --id var --val value form, rather than\n" - "the recommended var=value form.)") - - parser.add_argument("--val","-val", - help="The value to set.\n" - "(Used in the alternative --id var --val value form, rather than\n" - "the recommended var=value form.)") - - parser.add_argument("--file", "-file", - help="XML file to edit.\n" - "Generally not needed, but can be specified to ensure that only the\n" - "expected file is being changed. (If a variable is not found in this file,\n" - "an error will be generated.)") - - parser.add_argument("--delimiter","-delimiter", type=str, default="," , - help="Delimiter string in listofvalues.\n" - "Default is ','.") - - parser.add_argument("--dryrun","-dryrun", action="store_true", - help="Parse settings and print key-value pairs, but don't actually change anything.") - - parser.add_argument("--noecho", "-noecho", action="store_true", - help="Do not update CaseStatus with this change.\n" - "This option is mainly meant to be used by cime scripts: the 'paper trail' in\n" - "CaseStatus is meant to show changes made by the user, so we generally don't\n" - "want this to be contaminated by changes made automatically by cime scripts.") - - parser.add_argument("-f","--force", action="store_true", - help="Ignore typing checks and store value.") - - parser.add_argument("-loglevel", - help="Ignored, only for backwards compatibility.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - listofsettings = [] - if( len(args.listofsettings )): - expect(args.id is None, "Cannot specify both listofsettings and --id") - expect(args.val is None, "Cannot specify both listofsettings and --val") - delimiter = re.escape(args.delimiter) - listofsettings = re.split(r'(? %s " % (argstr) - append_case_status("xmlchange", "success", msg=msg, caseroot=caseroot) - -def _main_func(description): - # pylint: disable=unused-variable - caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, force , dry = parse_command_line(sys.argv, description) - - xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, force, dry) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/xmlconvertors/config_pes_converter.py b/scripts/Tools/xmlconvertors/config_pes_converter.py deleted file mode 100755 index a02bf7cfab0..00000000000 --- a/scripts/Tools/xmlconvertors/config_pes_converter.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/env python -""" -config_pes_converter.py -- convert (or verify) config_pes elements from CIME2 -format to CIME5. This tool will compare the two versions and suggest updates to -the CIME5 file. - -The location of these files are needed by the script: - CIME2: cime/machines-acme/config_pes.xml - CIME5: config/acme/allactive/config_pesall.xml -""" - - -from standard_script_setup import * -from CIME.utils import run_cmd -from distutils.spawn import find_executable -import xml.etree.ElementTree as ET -import grid_xml_converter -LOGGER = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args): -############################################################################### - parser = argparse.ArgumentParser(description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - # Set command line options - parser.add_argument("-cime2file", "--cime2file", - help="location of config_grid.xml file in CIME2 format", - required=True) - parser.add_argument("-cime5file", "--cime5file", - help="location of config_grids.xml file in CIME5 format", - required=True) - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.cime2file is None or args.cime5file is None: - parser.print_help() - exit() - - return args.cime2file, args.cime5file - -class PesNode(grid_xml_converter.DataNode): - def __init__(self,root): - self.ignore = False - super(PesNode, self).__init__(root) - - def __str__(self): - return ET.tostring(self.xmlnode) - - def setattrib(self, node, tag, key=None): - if key is None: - key = tag - if key in self.data: - node.set(tag, self.data[key]) - else: - node.set(tag, 'any') - - def keyvalue(self): - return "{}:{}:{}:{}".format(self.data['gridname'], self.data['machname'], - self.data['pesize'], self.data['compset']) - - - def to_cime5(self): - gridnode = ET.Element('grid') - self.setattrib(gridnode, 'name', 'gridname') - machnode = ET.SubElement(gridnode, 'mach') - self.setattrib(machnode, 'name', 'machname') - pesnode = ET.SubElement(machnode, 'pes') - self.setattrib(pesnode, 'compset') - self.setattrib(pesnode, 'pesize') - commentnode = ET.SubElement(pesnode, 'comment') - commentnode.text = "none" - for d in ['ntasks', 'nthrds', 'rootpe']: - newnode = ET.SubElement(pesnode, d) - for comp in ['atm', 'lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl', 'iac']: - tag = d + '_' + comp - if tag in self.data[d]: - ET.SubElement(newnode, tag).text = str(self.data[d][tag]) - - return gridnode - - - - def __eq__(self, other): - for k in ['gridname', 'machname', 'pesize', 'compset']: - if k not in self.data and k not in other.data: - continue - if k not in self.data or k not in other.data: - return False - if self.data[k] != other.data[k]: - return False - for d in ['ntasks', 'nthrds', 'rootpe']: - for k in self.data[d]: - if k not in self.data[d] and k not in other.data[d]: - continue - if k not in self.data[d] or k not in other.data[d]: - return False - if self.data[d][k] != other.data[d][k]: - return False - return True - -class Cime5PesNode(PesNode): - def set_data(self, xmlnode): - for d in ['ntasks', 'nthrds', 'rootpe']: - self.data[d] = {} - self.xmlnode = xmlnode - self.data['gridname'] = xmlnode.get('name') - machnode = xmlnode.find('mach') - self.data['machname'] = machnode.get('name') - pesnode = machnode.find('pes') - self.data['pesize'] = pesnode.get('pesize') - self.data['compset'] = pesnode.get('compset') - commentnode = pesnode.find('comment') - if commentnode is not None: - self.data['comment'] = commentnode.text - for tag in ['ntasks', 'nthrds', 'rootpe']: - node = pesnode.find(tag) - for child in node.getchildren(): - self.data[tag][child.tag] = child.text.strip() - -class Cime2PesNode(PesNode): - ISDEFAULT = "-999999" - DEFAULTS = {'ntasks':'16', 'nthrds':'1', 'rootpe':'0'} - def set_data(self, xmlnode): - # Set Defaults - for d in ['ntasks', 'nthrds', 'rootpe']: - self.data[d] = {} - for comp in ['atm', 'lnd', 'ice', 'ocn', 'glc', 'rof', 'wav', 'cpl', 'iac']: - self.data['ntasks']['ntasks_' + comp] = self.ISDEFAULT - self.data['nthrds']['nthrds_' + comp] = self.ISDEFAULT - self.data['rootpe']['rootpe_' + comp] = self.ISDEFAULT - - # Read in node - self.xmlnode = xmlnode - for checktag in ['OS', 'TEST']: - check = xmlnode.get(checktag) - if check is not None: - self.ignore = True - return - self.data['machname'] = xmlnode.get('MACH', default='any') - self.data['gridname'] = xmlnode.get('GRID', default='any') - self.data['pesize'] = xmlnode.get('PECOUNT', default='any') - self.data['compset'] = xmlnode.get('CCSM_LCOMPSET', default='any') - for d in ['ntasks', 'nthrds', 'rootpe']: - for comp in ['atm', 'lnd', 'ice', 'ocn', 'glc', 'rof', 'wav', 'cpl', 'iac']: - tag = d + '_' + comp - node = xmlnode.find(tag.upper()) - if node is not None: - val = node.text.strip() - if val[0] == '$': - resolvetag = val[1:] - if resolvetag == "MAX_TASKS_PER_NODE": - val = '-1' - else: - refnode = xmlnode.find(resolvetag) - if refnode is None: - # use default value - val = self.data[resolvetag.lower()[0:6]][resolvetag.lower()] - else: - val = xmlnode.find(resolvetag).text.strip() - - self.data[d][tag] = val - # Set to defaults. CIME2 had unresolved defaults that referred - # back to the ATM value, so setting just the ATM value would in effect - # set all values - for d in ['ntasks', 'nthrds', 'rootpe']: - atmtag = d + '_atm' - if self.data[d][atmtag] == self.ISDEFAULT: - self.data[d][atmtag] = self.DEFAULTS[d] - for comp in ['lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl', 'iac']: - tag = d + '_' + comp - if self.data[d][tag] == self.ISDEFAULT: - self.data[d][tag] = self.data[d][atmtag] - - - - -class PesTree(grid_xml_converter.DataTree): - def __init__(self, xmlfilename): - # original xml file has bad comments - import re, StringIO - if os.access(xmlfilename, os.R_OK): - with open(xmlfilename, 'r') as xmlfile: - t1 = xmlfile.read() - t2 = re.sub(r'(?<=)', - lambda x: x.group(0).replace('-', ' '), t2) - tempxml = StringIO.StringIO(t3) - super(PesTree, self).__init__(tempxml) - tempxml.close() - - else: - super(PesTree, self).__init__(xmlfilename) - - def populate(self): - if self.root is None: - return - xmlnodes = self.root.findall('grid') - nodeclass = Cime5PesNode - - if len(xmlnodes) == 0: - xmlnodes = self.root.findall('pes') - nodeclass = Cime2PesNode - for xmlnode in xmlnodes: - datanode = nodeclass(self.root) - datanode.set_data(xmlnode) - if not datanode.ignore: - self.nodes.append(datanode) - - - - def writexml(self, addlist, newfilename): - root = ET.Element('config_pes') - for a, b in addlist: - if b is not None: - root.append(ET.Element('REPLACE')) - root.append(b.to_cime5()) - root.append(ET.Element('WITH')) - if a is not None: - root.append(a.to_cime5()) - xmllint = find_executable("xmllint") - if xmllint is not None: - run_cmd("{} --format --output {} -".format(xmllint, newfilename), - input_str=ET.tostring(root)) - -def diff_tree(atree, btree): - afound = [] - bfound = [] - oklist = [] - fixlist = [] - addlist = [] - duplist = [] - bkeys = [] - for bnode in btree.nodes: - if bnode.keyvalue() in bkeys: - duplist.append(bnode.keyvalue()) - else: - bkeys.append(bnode.keyvalue()) - - - for anode in atree.nodes: - for bnode in btree.nodes: - if bnode in bfound: - continue - if anode.keyvalue() == bnode.keyvalue(): - afound.append(anode) - bfound.append(bnode) - - if anode == bnode: - oklist.append([anode, bnode]) - else: - fixlist.append([anode, bnode]) - break - - if anode in afound: - continue - - addlist.append([anode, None]) - - - - LOGGER.info("Number of ok nodes: {:d}".format(len(oklist))) - LOGGER.info("Number of wrong nodes: {:d}".format(len(fixlist))) - LOGGER.info("Number of missing nodes: {:d}".format(len(addlist))) - for miss in addlist: - LOGGER.debug(miss[0].keyvalue()) - LOGGER.info("Number of duplicate nodes: {:d}".format(len(duplist))) - for dup in duplist: - LOGGER.info(dup) - return [oklist, fixlist, addlist] - - -def pes_compare(): - cime2file, cime5file = parse_command_line(sys.argv) - - cime2pestree = PesTree(cime2file) - cime5pestree = PesTree(cime5file) - - LOGGER.info("Comparing config_pes files...") - oklist, fixlist, addlist = diff_tree(cime2pestree, cime5pestree) - cime5pestree.postprocess(fixlist, addlist, "tempgrid.xml", cime5file, - "badgrid.xml") - -if __name__ == "__main__": - pes_compare() - - diff --git a/scripts/Tools/xmlconvertors/convert-grid-v1-to-v2 b/scripts/Tools/xmlconvertors/convert-grid-v1-to-v2 deleted file mode 100755 index 37fbb86988d..00000000000 --- a/scripts/Tools/xmlconvertors/convert-grid-v1-to-v2 +++ /dev/null @@ -1,217 +0,0 @@ -#! /usr/bin/env python - -""" -Convert a grid file from v1 to v2. -""" - -import argparse, sys, os -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) - -from standard_script_setup import * -from CIME.utils import expect -from CIME.XML.generic_xml import GenericXML -import xml.etree.ElementTree as ET - -from collections import OrderedDict - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} -OR -{0} --help -""".format(os.path.basename(args[0])), - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("v1file", - help="v1 file path") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.v1file - -############################################################################### -def convert_gridmaps(v1file_obj, v2file_obj): -############################################################################### - gridmap_data = [] # (attribs, {name->file}) - - v1gridmaps = v1file_obj.get_child(name="gridmaps") - v1gridmap = v1file_obj.get_children(name="gridmap", root=v1gridmaps) - for gridmap_block in v1gridmap: - attribs = v1file_obj.attrib(gridmap_block) - children = [] - for child in v1file_obj.get_children(root=gridmap_block): - children.append( (v1file_obj.name(child), v1file_obj.text(child)) ) - - gridmap_data.append( (attribs, children) ) - - v2gridmaps = v2file_obj.make_child("gridmaps") - - for attribs, children in gridmap_data: - gridmap = v2file_obj.make_child("gridmap", attributes=attribs, root=v2gridmaps) - for name, text in children: - v2file_obj.make_child("map", attributes={"name": name}, root=gridmap, text=text) - -############################################################################### -def convert_domains(v1file_obj, v2file_obj): -############################################################################### - domain_data = [] # (name, nx, ny, {filemask->mask->file}, {pathmask->mask->path}, desc) - - v1domains = v1file_obj.get_child(name="domains") - v1domain = v1file_obj.get_children(name="domain", root=v1domains) - for domain_block in v1domain: - attrib = v1file_obj.attrib(domain_block) - expect(attrib.keys() == ["name"], - "Unexpected attribs: {}".format(attrib)) - - name = attrib["name"] - - desc = v1file_obj.get_element_text("desc", root=domain_block) - sup = v1file_obj.get_element_text("support", root=domain_block) - nx = v1file_obj.get_element_text("nx", root=domain_block) - ny = v1file_obj.get_element_text("ny", root=domain_block) - - if sup and not desc: - desc = sup - - file_masks, path_masks = OrderedDict(), OrderedDict() - - for child_name, masks in [("file", file_masks), ("path", path_masks)]: - children = v1file_obj.get_children(name=child_name, root=domain_block) - for child in children: - attrib = v1file_obj.attrib(child) - expect(len(attrib) == 1, "Bad {} attrib: {}".format(child_name, attrib)) - mask_key, mask_value = attrib.items()[0] - - component, _ = mask_key.split("_") - masks.setdefault(component, OrderedDict())[mask_value] = v1file_obj.text(child) - - for child in v1file_obj.get_children(root=domain_block): - expect(v1file_obj.name(child) in ["nx", "ny", "file", "path", "desc", "support"], - "Unhandled child of grid '{}'".format(v1file_obj.name(child))) - - domain_data.append( (name, nx, ny, file_masks, path_masks, desc) ) - - v2domains = v2file_obj.make_child("domains") - - for name, nx, ny, file_masks, path_masks, desc in domain_data: - attribs = {"name":name} if name else {} - domain_block = v2file_obj.make_child("domain", attributes=attribs, root=v2domains) - - v2file_obj.make_child("nx", root=domain_block, text=nx) - v2file_obj.make_child("ny", root=domain_block, text=ny) - - file_to_attrib = OrderedDict() - for component, mask_values in file_masks.iteritems(): - for mask_value, filename in mask_values.iteritems(): - if filename is None: - continue - - try: - path = path_masks[component][mask_value] - except KeyError: - path = "$DIN_LOC_ROOT/share/domains" - - fullfile = os.path.join(path, filename) - mask_value = mask_value if mask_value not in ["reg", name] else "" - file_to_attrib.setdefault(fullfile, OrderedDict()).setdefault(mask_value, []).append(component) - - for filename, masks in file_to_attrib.iteritems(): - attrib = {} - expect(len(masks) == 1, "Bad mask") - for mask, components in masks.iteritems(): - attrib["grid"] = "|".join(components) - - if mask: - attrib["mask"] = mask - - v2file_obj.make_child("file", attributes=attrib, root=domain_block, text=filename) - - if desc: - v2file_obj.make_child("desc", root=domain_block, text=desc) - -############################################################################### -def convert_grids(v1file_obj, v2file_obj): -############################################################################### - grid_data = [] # (compset, lname, sname, alias, support) - - v1grids = v1file_obj.get_child(name="grids") - v1grid = v1file_obj.get_children(name="grid", root=v1grids) - for grid_block in v1grid: - attrib = v1file_obj.attrib(grid_block) - - compset = attrib["compset"] if "compset" in attrib else None - expect(attrib.keys() in [ ["compset"], [] ], - "Unexpected attribs: {}".format(attrib)) - - lname = v1file_obj.get_element_text("lname", root=grid_block) - sname = v1file_obj.get_element_text("sname", root=grid_block) - alias = v1file_obj.get_element_text("alias", root=grid_block) - support = v1file_obj.get_element_text("support", root=grid_block) - - for child in v1file_obj.get_children(root=grid_block): - expect(v1file_obj.name(child) in ["lname", "sname", "alias", "support"], - "Unhandled child of grid '{}'".format(v1file_obj.name(child))) - - grid_data.append((compset, lname, sname, alias, support)) - - v2grids = v2file_obj.make_child("grids") - - # TODO: How to leverage model_grid_defaults - - for compset, lname, sname, alias, support in grid_data: - v2_alias = alias if alias else sname - attribs = {"alias":v2_alias} if v2_alias else {} - attribs.update({"compset":compset} if compset else {}) - v2grid = v2file_obj.make_child("model_grid", attributes=attribs, root=v2grids) - - pieces_raw = lname.split("_") - pieces = [] - for raw_piece in pieces_raw: - if "%" in raw_piece: - pieces.append(raw_piece) - else: - pieces[-1] += ("_" + raw_piece) - - ctype_map = {"a":"atm", "l":"lnd", "oi":"ocnice", "r":"rof", "m":"mask", "g":"glc", "w":"wav"} - mask = None - for piece in pieces: - ctype, data = piece.split("%") - cname = ctype_map[ctype.strip()] - if cname == "mask": - expect(mask is None, "Multiple masks") - mask = data - else: - v2file_obj.make_child("grid", attributes={"name":cname}, text=data, root=v2grid) - - if mask is not None: - v2file_obj.make_child("mask", text=mask, root=v2grid) - -############################################################################### -def convert_to_v2(v1file): -############################################################################### - v1file_obj = GenericXML(infile=v1file, read_only=True) - v2file_obj = GenericXML(infile="out.xml", read_only=False, root_name_override="grid_data", root_attrib_override={"version":"2.0"}) - - convert_grids(v1file_obj, v2file_obj) - - convert_domains(v1file_obj, v2file_obj) - - convert_gridmaps(v1file_obj, v2file_obj) - - v2file_obj.write(outfile=sys.stdout) - -############################################################################### -def _main_func(description): -############################################################################### - v1file = parse_command_line(sys.argv, description) - - convert_to_v2(v1file) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/Tools/xmlconvertors/grid_xml_converter.py b/scripts/Tools/xmlconvertors/grid_xml_converter.py deleted file mode 100755 index da26917a493..00000000000 --- a/scripts/Tools/xmlconvertors/grid_xml_converter.py +++ /dev/null @@ -1,460 +0,0 @@ -#!/usr/bin/env python -""" -grid_xml_converter.py -- convert (or verify) grid elements from CIME2 format -to CIME5. This tool will compare the two versions and suggest updates -to the CIME5 file. - -The location of these files are needed by the script: - CIME2: cime/scripts/Tools/config_grid.xml - CIME5: config/acme/config_grids.xml -""" - -# make sure cime2, cime roots are defined -# use categories -# GRID CONFIGURATIONS grid list domain grid maps -# CIME2: cime/scripts/Tools/config_grid.xml -# CIME5: config/acme/config_grids.xml -# - -from standard_script_setup import * -from CIME.utils import run_cmd_no_fail -from distutils.spawn import find_executable -import xml.etree.ElementTree as ET -import operator - -logger = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args): -############################################################################### - parser = argparse.ArgumentParser(description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - # Set command line options - parser.add_argument("-cime2file", "--cime2file", - help="location of config_grid.xml file in CIME2 format", - required=True) - parser.add_argument("-cime5file", "--cime5file", - help="location of config_grids.xml file in CIME5 format", - required=True) - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.cime2file, args.cime5file - - - -class DataNode(object): - """ - non-demoninational dictionary of node data: - """ - def __init__(self, xmlroot): - self.xmlroot = xmlroot # in case additional information needed - self.data = {} - self.name = None - self.xmlnode = None - - def keyvalue(self): - return self.data[self.key] - -class GridNode(DataNode): - key = 'lname' - def __str__(self): - return ET.tostring(self.xmlnode) - - - def to_cime5(self): - node = ET.Element('grid') - if 'compset' in self.data and self.data['compset'] is not None: - node.set('compset', self.data['compset']) - - for k in ['sname', 'lname', 'alias', 'support']: - if k in self.data and self.data[k] is not None: - ET.SubElement(node, k).text = self.data[k] - - return node - - - - def __eq__(self, other): - for k in ['lname', 'sname', 'compset', 'alias']: - if k not in self.data and k not in other.data: - continue - if k not in self.data or k not in other.data: - return False - if self.data[k] != other.data[k]: - return False - return True - -class Cime2GridNode(GridNode): - def set_data(self, xmlnode): - self.xmlnode = xmlnode - if xmlnode.text is not None: - self.data['lname'] = xmlnode.text - for k in ['lname', 'sname', 'alias', 'compset']: - tmpval = xmlnode.get(k) - if tmpval is not None: - self.data[k] = tmpval.strip() - tmpval = xmlnode.get('support_level') - if tmpval is not None: - self.data['support'] = tmpval.strip() - -class Cime5GridNode(GridNode): - def set_data(self, xmlnode): - self.xmlnode = xmlnode - for k in ['sname', 'lname', 'support', 'alias']: - if xmlnode.find(k) is not None: - self.data[k] = xmlnode.find(k).text.strip() - if xmlnode.get('compset') is not None: - self.data['compset'] = xmlnode.get('compset').strip() - -class GridmapNode(DataNode): - def set_data(self, xmlnode): - self.keys = [] - self.data['maps'] = {} - self.xmlnode = xmlnode - for k in ['atm_grid', 'lnd_grid', 'ocn_grid', 'rof_grid', 'glc_grid', - 'wav_grid', 'ice_grid', 'iac_grid' ]: - att = xmlnode.get(k) - if att is not None: - self.data[k] = att.strip() - self.keys.append(k) - self.sort() - for child in xmlnode.getchildren(): - self.data['maps'][child.tag] = child.text.strip() - def sort(self): - newlist = sorted(self.keys, key=operator.itemgetter(0)) - self.keys = newlist - def to_cime5(self): - node = ET.Element('gridmap') - for k in ['atm_grid', 'lnd_grid', 'ocn_grid', 'rof_grid', 'glc_grid']: - if k in self.data: - node.set(k, self.data[k]) - for key, value in self.data['maps'].items(): - ET.SubElement(node, key).text = value - return node - def __str__(self): - return str(self.keyvalue()) + str(self.data) - def __eq__(self, other): - if self.keyvalue() != other.keyvalue(): - return False - if len(self.data['maps']) != len(other.data['maps']): - return False - for key, value in self.data['maps'].items(): - if key not in other.data['maps'] or value != other.data['maps'][key]: - return False - return True - - def keyvalue(self): - return "{}:{}:{}:{}".format(self.keys[0], self.data[self.keys[0]], - self.keys[1], self.data[self.keys[1]]) -class DomainNode(DataNode): - """ - non-demoninational dictionary of domain node information: - """ - key = 'name' - - def to_cime5(self): - node = ET.Element('domain') - node.set('name', self.data['name']) - for tag in ['nx', 'ny', 'desc', 'support']: - if tag in self.data: - ET.SubElement(node, tag).text = self.data[tag] - for fop in ['file', 'path']: - if fop in self.data: - for comp, mask, filename in self.data[fop]: - attribs = {'{}_mask'.format(comp:mask)} - ET.SubElement(node, fop, attribs).text = filename - return node - - - - def sort(self): - for fop in ['file', 'path']: - newlist = sorted(self.data[fop], key=operator.itemgetter(0)) - self.data[fop] = newlist - - def __eq__(self, other): - # Check for different name, nx, or ny values - for k in ['name', 'nx', 'ny']: - if k not in self.data and k not in other.data: - continue - if k not in self.data or k not in other.data: - return False - if self.data[k] != other.data[k]: - return False - # Compare (sorted) file, path lists for equality - for fop in ['file', 'path']: - if fop not in self.data and fop not in other.data: - contine - if fop not in self.data or fop not in other.data: - return False - if len(self.data[fop]) != len(other.data[fop]): - return False - - for i in range(0, len(self.data[fop])): - for j in range(0, 2): - if self.data[fop][i][j] != other.data[fop][i][j]: - return False - - return True - - def __str__(self): - return str(self.data) - -class Cime2DomainNode(DomainNode): - """ - Read in a domain node from Cime2 xml format - """ - def set_data(self, xmlnode): - self.xmlnode = xmlnode - self.data['name'] = xmlnode.get('name').strip() - self.data['file'] = [] - self.data['path'] = [] - for tag in ['nx', 'ny', 'desc']: - child = xmlnode.find(tag) - if child is not None: - self.data[tag] = child.text - - # Find any griddom nodes that match this name - griddoms = self.xmlroot.findall('.griddom[@grid="{}"]'.format(self.data['name'])) - for gd in griddoms: - mask = gd.get('mask') - for comp in ['ATM', 'LND', 'OCN', 'ICE']: - for fop in ['FILE', 'PATH']: - tag = '{}_DOMAIN_{}'.format(comp, fop) - n = gd.find(tag) - if n is not None: - self.data[fop.lower()].append([comp.lower(), mask, - n.text]) - # sort the file and path entries - self.sort() - -class Cime5DomainNode(DomainNode): - """ - Read in a domain node from Cime5 xml format - """ - def set_data(self, xmlnode): - self.xmlnode = xmlnode - self.data['name'] = xmlnode.get('name') - self.data['file'] = [] - self.data['path'] = [] - for tag in ['nx', 'ny', 'desc', 'support']: - child = xmlnode.find(tag) - if child is not None: - self.data[tag] = child.text - for comp in ['lnd', 'atm', 'ocn', 'ice']: - masktag = '{}_mask'.format(comp) - for fop in ['file', 'path']: - fopnodes = xmlnode.findall('{}[@{}]'.format(fop, masktag)) - for n in fopnodes: - mask = n.get(masktag) - filename = n.text.strip() - self.data[fop].append([comp, mask, filename]) - - # sort the file and path entries - self.sort() - -class DataTree(object): - def __init__(self, xmlfilename): - self.xmlfilename = xmlfilename - - if hasattr(xmlfilename, 'read') or os.access(xmlfilename, os.R_OK): - self.doc = ET.parse(xmlfilename) - else: - self.doc = ET.ElementTree() - - self.root = self.doc.getroot() - self.index = 0 - self.n = 0 - self.nodes = [] - self.populate() - - def next(self): - if self.index >= len(self.nodes): - self.index = 0 - raise StopIteration - if self.index < len(self.nodes): - self.index += 1 - return self.nodes[self.index-1] - - def __iter__(self): - return self - - def postprocess(self, fixlist, addlist, newxmlfile, currentxmlfile, - badxmlfile): - if len(addlist) > 0: - logger.info("\n\nWriting suggested nodes to {}".format(newxmlfile)) - logger.info("Copy 'grid' nodes into corresponding location in") - logger.info(currentxmlfile) - self.writexml(addlist, newxmlfile) - self.writexml(fixlist, badxmlfile) - if len(fixlist) > 0: - logger.info("Some nodes should be removed from") - logger.info("config/acme/config_grids.xml. These nodes") - logger.info("have been written to {}".format(badxmlfile)) - -class GridTree(DataTree): - def populate(self): - if self.root is None: - return - xmlnodes = self.root.findall('GRID') - nodeclass = Cime2GridNode - if len(xmlnodes) == 0: - xmlnodes = self.root.findall('./grids/grid') - nodeclass = Cime5GridNode - - for xmlnode in xmlnodes: - datanode = nodeclass(self.root) - datanode.set_data(xmlnode) - self.nodes.append(datanode) - - def writexml(self, addlist, newfilename): - root = ET.Element('grid_data') - grids = ET.SubElement(root, 'grids') - for a, b in addlist: - if b is not None: - grids.append(ET.Element('REPLACE')) - grids.append(b.to_cime5()) - grids.append(ET.Element('WITH')) - - if a is not None: - grids.append(a.to_cime5()) - xmllint = find_executable("xmllint") - if xmllint is not None: - run_cmd_no_fail("{} --format --output {} -".format(xmllint, newfilename), - input_str=ET.tostring(root)) - - -class DomainTree(DataTree): - def populate(self): - if self.root is None: - return - - xmlnodes = self.root.findall('gridhorz') - nodeclass = Cime2DomainNode - if len(xmlnodes) == 0: - xmlnodes = self.root.findall('./domains/domain') - nodeclass = Cime5DomainNode - - for node in xmlnodes: - datanode = nodeclass(self.root) - datanode.set_data(node) - self.nodes.append(datanode) - - def writexml(self, addlist, newfilename): - root = ET.Element('grid_data') - domains = ET.SubElement(root, 'domains') - for a, b in addlist: - if b is not None: - domains.append(ET.Element('REPLACE')) - domains.append(b.to_cime5()) - domains.append(ET.Element('WITH')) - if a is not None: - domains.append(a.to_cime5()) - xmllint = find_executable("xmllint") - if xmllint is not None: - run_cmd_no_fail("{} --format --output {} -".format(xmllint, newfilename), - input_str=ET.tostring(root)) - -class GridmapTree(DataTree): - def populate(self): - if self.root is None: - return - xmlnodes = self.root.findall('gridmap') - if len(xmlnodes) == 0: - xmlnodes = self.root.findall('./gridmaps/gridmap') - for xmlnode in xmlnodes: - datanode = GridmapNode(self.root) - datanode.set_data(xmlnode) - self.nodes.append(datanode) - - def writexml(self, addlist, newfilename): - root = ET.Element('gridmaps') - gridmaps = ET.SubElement(root, 'gridmap') - for a, b in addlist: - if b is not None: - gridmaps.append(ET.Element('REPLACE')) - gridmaps.append(b.to_cime5()) - gridmaps.append(ET.Element('WITH')) - if a is not None: - gridmaps.append(a.to_cime5()) - xmllint = find_executable("xmllint") - if xmllint is not None: - run_cmd_no_fail("{} --format --output {} -".format(xmllint, newfilename), - input_str=ET.tostring(root)) - -def diff_tree(atree, btree): - afound = [] - bfound = [] - oklist = [] - fixlist = [] - addlist = [] - duplist = [] - bkeys = [] - for bnode in btree.nodes: - if bnode.keyvalue() in bkeys: - duplist.append(bnode.keyvalue()) - else: - bkeys.append(bnode.keyvalue()) - - - for anode in atree.nodes: - for bnode in btree.nodes: - if bnode in bfound: - continue - if anode.keyvalue() == bnode.keyvalue(): - afound.append(anode) - bfound.append(bnode) - - if anode == bnode: - oklist.append([anode, bnode]) - else: - fixlist.append([anode, bnode]) - break - - if anode in afound: - continue - - addlist.append([anode, None]) - - - - logger.info("Number of ok nodes: {:d}".format(len(oklist))) - logger.info("Number of wrong nodes: {:d}".format(len(fixlist))) - logger.info("Number of missing nodes: {:d}".format(len(addlist))) - logger.info("Number of duplicate nodes: {:d}".format(len(duplist))) - for dup in duplist: - logger.info(dup) - return [oklist, fixlist, addlist] - - - - -def grid_compare(): - cime2file, cime5file = parse_command_line(sys.argv) - - cime2gridtree = GridTree(cime2file) - cime5gridtree = GridTree(cime5file) - cime2domaintree = DomainTree(cime2file) - cime5domaintree = DomainTree(cime5file) - cime2gridmaptree = GridmapTree(cime2file) - cime5gridmaptree = GridmapTree(cime5file) - - logger.info("Comparing grid nodes...") - oklist, fixlist, addlist = diff_tree(cime2gridtree, cime5gridtree) - cime5gridtree.postprocess(fixlist, addlist, "tempgrid.xml", cime5file, - "badgrid.xml") - - oklist, fixlist, addlist = diff_tree(cime2domaintree, cime5domaintree) - cime5domaintree.postprocess(fixlist, addlist, "tempdomain.xml", - cime5file, "baddomain.xml") - - oklist, fixlist, addlist = diff_tree(cime2gridmaptree, cime5gridmaptree) - cime5gridmaptree.postprocess(fixlist, addlist, "tempgridmap.xml", - cime5file, "badgridmap.xml") - -if __name__ == "__main__": - grid_compare() diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery deleted file mode 100755 index 055ab7f5364..00000000000 --- a/scripts/Tools/xmlquery +++ /dev/null @@ -1,373 +0,0 @@ -#!/usr/bin/env python - -""" -Allows querying variables from env_*xml files and listing all available variables. - -There are two usage modes: - -1) Querying variables: - - - You can query a variable, or a list of variables via - ./xmlquery var1 - - or, for multiple variables (either comma or space separated) - ./xmlquery var1,var2,var3 .... - ./xmlquery var1 var2 var3 .... - where var1, var2 and var3 are variables that appear in a CIME case xml file - - Several xml variables that have settings for each component have somewhat special treatment - The variables that this currently applies to are - NTASKS, NTHRDS, ROOTPE, PIO_TYPENAME, PIO_STRIDE, PIO_NUMTASKS - As examples: - - to show the number of tasks for each component, issue - ./xmlquery NTASKS - - to show the number of tasks just for the atm component, issue - ./xmlquery NTASKS_ATM - - - The CIME case xml variables are grouped together in xml elements . - This is done to associate together xml variables with common features. - Most variables are only associated with one group. However, in env_batch.xml, - there are also xml variables that are associated with each potential batch job. - For these variables, the '--subgroup' option may be used to query the variable's - value for a particular group. - - As an example, in env_batch.xml, the xml variable JOB_QUEUE appears in each of - the batch job groups (defined in config_batch.xml): - - - - - To query the variable JOB_QUEUE only for one group in case.run, you need - to specify a sub-group argument to xmlquery. - ./xmlquery JOB_QUEUE --subgroup case.run - JOB_QUEUE: regular - ./xmlquery JOB_QUEUE - Results in group case.run - JOB_QUEUE: regular - Results in group case.st_archive - JOB_QUEUE: caldera - Results in group case.test - JOB_QUEUE: regular - - - You can tailor the query by adding ONE of the following possible qualifier arguments: - [--full --fileonly --value --raw --description --get-group --type --valid-values ] - as examples: - ./xmlquery var1,var2 --full - ./xmlquery var1,var2 --fileonly - - - You can query variables via a partial-match, using --partial-match or -p - as examples: - ./xmlquery STOP --partial-match - Results in group run_begin_stop_restart - STOP_DATE: -999 - STOP_N: 5 - STOP_OPTION: ndays - ./xmlquery STOP_N - STOP_N: 5 - - - By default variable values are resolved prior to output. If you want to see the unresolved - value(s), use the --no-resolve qualifier - as examples: - ./xmlquery RUNDIR - RUNDIR: /glade/scratch/mvertens/atest/run - ./xmlquery RUNDIR --no-resolve - RUNDIR: $CIME_OUTPUT_ROOT/$CASE/run - -2) Listing all groups and variables in those groups - - ./xmlquery --listall - - - You can list a subset of variables by adding one of the following qualifier arguments: - [--subgroup GROUP --file FILE] - - As examples: - - If you want to see the all of the variables in group 'case.run' issue - ./xmlquery --listall --subgroup case.run - - If you want to see all of the variables in 'env_run.xml' issue - ./xmlquery --listall --file env_run.xml - - If you want to see all of the variables in LockedFiles/env_build.xml issue - ./xmlquery --listall --file LockedFiles/env_build.xml - - - You can tailor the query by adding ONE of the following possible qualifier arguments: - [--full --fileonly --raw --description --get-group --type --valid-values] - - - The env_mach_specific.xml and env_archive.xml files are not supported by this tool. -""" - -from standard_script_setup import * - -from CIME.case import Case -from CIME.utils import expect, convert_to_string - -import textwrap, sys, re - -logger = logging.getLogger("xmlquery") -unsupported_files = ["env_mach_specific.xml", "env_archive.xml"] -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - # Set command line options - parser.add_argument("variables", nargs="*" , - help="Variable name(s) to query from env_*.xml file(s)\n" - "( 'variable_name' from value ).\n" - "Multiple variables can be given, separated by commas or spaces.\n") - - parser.add_argument("--caseroot" , "-caseroot", default=os.getcwd(), - help="Case directory to reference.\n" - "Default is current directory.") - - parser.add_argument("--listall", "-listall" , default=False , action="store_true" , - help="List all variables and their values.") - - parser.add_argument("--file" , "-file", - help="The file you want to query. If not given, queries all files.\n" - "Typically used with the --listall option.") - - parser.add_argument("--subgroup","-subgroup", - help="Apply to this subgroup only.") - - parser.add_argument("-p", "--partial-match", action="store_true", - help="Allow partial matches of variable names, treats args as regex.") - - parser.add_argument("--no-resolve", "-no-resolve", action="store_true", - help="Do not resolve variable values.") - - group = parser.add_mutually_exclusive_group() - - group.add_argument("--full", default=False, action="store_true", - help="Print a full listing for each variable, including value, type,\n" - "valid values, description and file.") - - group.add_argument("--fileonly", "-fileonly", default=False, action="store_true", - help="Only print the filename that each variable is defined in.") - - group.add_argument("--value", "-value", default=False, action="store_true", - help="Only print one value without newline character.\n" - "If more than one has been found print first value in list.") - - group.add_argument("--raw", default=False, action="store_true", - help="Print the complete raw record associated with each variable.") - - group.add_argument("--description", default=False, action="store_true", - help="Print the description associated with each variable.") - - group.add_argument("--get-group", default=False, action="store_true", - help="Print the group associated with each variable.") - - group.add_argument("--type", default=False, action="store_true", - help="Print the data type associated with each variable.") - - group.add_argument("--valid-values", default=False, action="store_true", - help="Print the valid values associated with each variable, if defined.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if (len(sys.argv) == 1) : - parser.print_help() - exit() - - if len(args.variables) == 1: - variables = args.variables[0].split(',') - else: - variables = args.variables - - return variables, args.subgroup, args.caseroot, args.listall, args.fileonly, \ - args.value, args.no_resolve, args.raw, args.description, args.get_group, args.full, \ - args.type, args.valid_values, args.partial_match, args.file - -def get_value_as_string(case, var, attribute=None, resolved=False, subgroup=None): - if var in ["THREAD_COUNT", "TOTAL_TASKS", "TASKS_PER_NODE", "NUM_NODES", "SPARE_NODES", "TASKS_PER_NUMA", "CORES_PER_TASK"]: - value = str(getattr(case, var.lower())) - else: - thistype = case.get_type_info(var) - value = case.get_value(var, attribute=attribute, resolved=resolved, subgroup=subgroup) - if value is not None and thistype: - value = convert_to_string(value, thistype, var) - - return value - -def xmlquery_sub(case, variables, subgroup=None, fileonly=False, - resolved=True, raw=False, description=False, get_group=False, - full=False, dtype=False, valid_values=False, xmlfile=None): - """ - Return list of attributes and their values, print formatted - - """ - results = {} - comp_classes = case.get_values("COMP_CLASSES") - if xmlfile: - case.set_file(xmlfile) - - # Loop over variables - for var in variables: - if subgroup is not None: - groups = [subgroup] - else: - groups = case.get_record_fields(var, "group") - if not groups: - groups = ['none'] - - if xmlfile: - expect(xmlfile not in unsupported_files, - "XML file {} is unsupported by this tool." - .format(xmlfile)) - - if not groups: - value = case.get_value(var, resolved=resolved) - results['none'] = {} - results['none'][var] = {} - results['none'][var]['value'] = value - elif not groups: - results['none'] = {} - results['none'][var] = {} - - for group in groups: - if not group in results: - results[group] = {} - if not var in results[group]: - results[group][var] = {} - - expect(group, "No group found for var {}".format(var)) - if get_group: - results[group][var]['get_group'] = group - - value = get_value_as_string(case, var, resolved=resolved, subgroup=group) - if value is None: - var, comp, iscompvar = case.check_if_comp_var(var) - if iscompvar: - value = [] - for comp in comp_classes: - try: - nextval = get_value_as_string(case,var, attribute={"compclass" : comp}, resolved=resolved, subgroup=group) - except Exception: # probably want to be more specific - nextval = get_value_as_string(case,var, attribute={"compclass" : comp}, resolved=False, subgroup=group) - - if nextval is not None: - value.append(comp + ":" + "{}".format(nextval)) - else: - value = get_value_as_string(case, var, resolved=resolved, subgroup=group) - - if value is None: - if xmlfile: - expect(False, " No results found for variable {} in file {}".format(var, xmlfile)) - else: - expect(False, " No results found for variable {}".format(var)) - - results[group][var]['value'] = value - - if raw: - results[group][var]['raw'] = case.get_record_fields(var, "raw") - if description or full: - results[group][var]['desc'] = case.get_record_fields(var, "desc") - if fileonly or full: - results[group][var]['file'] = case.get_record_fields(var, "file") - if dtype or full: - results[group][var]['type'] = case.get_type_info(var) - if valid_values or full: - results[group][var]['valid_values'] = case.get_record_fields(var, "valid_values") #*** this is the problem *** - - return results - -def _main_func(description): - # Initialize command line parser and get command line options - variables, subgroup, caseroot, listall, fileonly, \ - value, no_resolve, raw, description, get_group, full, dtype, \ - valid_values, partial_match, xmlfile = parse_command_line(sys.argv, description) - - expect(xmlfile not in unsupported_files, - "XML file {} is unsupported by this tool." - .format(xmlfile)) - - # Initialize case ; read in all xml files from caseroot - with Case(caseroot) as case: - if listall or partial_match: - if xmlfile: - case.set_file(xmlfile) - all_variables = sorted(case.get_record_fields(None, "varid")) - logger.debug("all_variables: {}".format(all_variables)) - if partial_match: - all_matching_vars = [] - for variable in variables: - regex = re.compile(variable) - for all_variable in all_variables: - if regex.search(all_variable): - if subgroup is not None: - vargroups = case.get_record_fields(all_variable, "group") - if subgroup not in vargroups: - continue - - all_matching_vars.append(all_variable) - - variables = all_matching_vars - else: - if subgroup is not None: - all_matching_vars = [] - for all_variable in all_variables: - vargroups = case.get_record_fields(all_variable, "group") - if subgroup not in vargroups: - continue - else: - all_matching_vars.append(all_variable) - - variables = all_matching_vars - else: - variables = all_variables - expect(variables, "No variables found") - results = xmlquery_sub(case, variables, subgroup, fileonly, resolved=not no_resolve, - raw=raw, description=description, get_group=get_group, full=full, - dtype=dtype, valid_values=valid_values, xmlfile=xmlfile) - - if full or description: - wrapper=textwrap.TextWrapper() - wrapper.subsequent_indent = "\t\t\t" - wrapper.fix_sentence_endings = True - - cnt = 0 - for group in sorted(iter(results)): - if (len(variables) > 1 or len(results) > 1 or full) and not get_group and not value: - print("\nResults in group {}".format(group)) - for var in variables: - if var in results[group]: - if raw: - print(results[group][var]['raw']) - elif get_group: - print("\t{}: {}".format(var, results[group][var]['get_group'])) - elif value: - if cnt > 0: - sys.stdout.write(",") - sys.stdout.write("{}".format(results[group][var]['value'])) - cnt += 1 - elif description: - if results[group][var]['desc'][0] is not None: - desc_text = ' '.join(results[group][var]['desc'][0].split()) - print("\t{}: {}".format(var, wrapper.fill(desc_text))) - elif fileonly: - print("\t{}: {}".format(var, results[group][var]['file'])) - elif dtype: - print("\t{}: {}".format(var, results[group][var]['type'])) - elif valid_values: - if 'valid_values' in results[group][var]: - print("\t{}: {}".format(var, results[group][var]["valid_values"])) - elif full: - if results[group][var]['desc'][0] is not None: - desc_text = ' '.join(results[group][var]['desc'][0].split()) - print("\t{}: value={}".format(var, results[group][var]['value'])) - print("\t\ttype: {}".format(results[group][var]['type'][0])) - if 'valid_values' in results[group][var]: - print("\t\tvalid_values: {}".format(results[group][var]["valid_values"])) - print("\t\tdescription: {}".format(wrapper.fill(desc_text))) - print("\t\tfile: {}".format(results[group][var]['file'][0])) - else: - print("\t{}: {}".format(var, results[group][var]['value'])) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/climate_reproducibility/README.md b/scripts/climate_reproducibility/README.md index 5d2ec104e58..38640369b38 100644 --- a/scripts/climate_reproducibility/README.md +++ b/scripts/climate_reproducibility/README.md @@ -1,7 +1,6 @@ -Climate reproducibility testing -=============================== +# Climate reproducibility testing -Requiring model changes to pass stringent tests before being accepted as part of E3SM’s main development +Requiring model changes to pass stringent tests before being accepted as part of E3SM’s main development branch is critical for quickly and efficiently producing a trustworthy model. Depending on their impacts on model output, code modifications can be classified into three types: @@ -10,54 +9,53 @@ impacts on model output, code modifications can be classified into three types: averaged over a sufficiently long time 3. Changes that lead to a different model climate -Only (3) impacts model climate, and changes of this type should only be implemented within the code +Only (3) impacts model climate, and changes of this type should only be implemented within the code after an in-depth demonstration of improvement. However, distinguishing between (2) and (3) requires -a comprehensive analysis of both a baseline climate and the currently produced climate. +a comprehensive analysis of both a baseline climate and the currently produced climate. Through the CMDV Software project, we've provided a set of climate reproducibility tests to determine whether or not non-bit-for-bit (nb4b) model changes are climate changing. The current tests provided are: - * **MVK** -- This tests the null hypothesis that the baseline (n) and modified (m) model Short Independent - Simulation Ensembles (SISE) represent the same climate state, based on the equality of distribution - of each variable's annual global average in the standard monthly model output between the two - simulations. The (per variable) null hypothesis uses the non-parametric, two-sample (n and m) + * **MVK** -- This tests the null hypothesis that the baseline (n) and modified (m) model Short Independent + Simulation Ensembles (SISE) represent the same climate state, based on the equality of distribution + of each variable's annual global average in the standard monthly model output between the two + simulations. The (per variable) null hypothesis uses the non-parametric, two-sample (n and m) Kolmogorov-Smirnov test as the univariate test of of equality of distribution of global means. - + * **PGN** -- This tests the null hypothesis that the reference (n) and modified (m) model ensembles represent the same atmospheric state after each physics parameterization is applied within a single time-step using the two-sample (n and m) T-test for equal averages at a 95% confidence level. Ensembles are generated by repeating the simulation for many initial conditions, with each initial condition subject to multiple perturbations. - + * **TSC** -- This tests the null hypothesis that the convergence of the time stepping error for a set of key atmospheric variables is the same for a reference ensemble and a test ensemble. Both the reference and test ensemble are generated with a two-second time step, and for each variable the RMSD between each ensemble and - a truth ensemble, generated with a one-second time step, is calculated. At each + a truth ensemble, generated with a one-second time step, is calculated. At each 10 second interval during the 10 minute long simulations, the difference in the reference and test RMSDs for each variable, each ensemble member, and each domain are calculated and these ΔRMSDs should be zero for identical climates. A one sided (due to self convergence) Student's T Test is used to test the null hypothesis that the ensemble mean ΔRMSD is statistically zero. - - - -Running the tests ------------------ -These tests are built into E3SM-CIME as system tests and will be launched using the `create_test` scripts. + + +## Running the tests + +These tests are built into E3SM-CIME as system tests and will be launched using the `create_test` scripts. *However*, because these tests use high level statistics, they have additional python dependencies which need to be installed on your system and accessible via the compute nodes (if you're on a batch machine). -Primarily, the statistical analysis of the climates is done through [EVV](https://github.com/LIVVkit/evv4esm) -which will generate a portable test website to describe the results (pass or fail) in detail (see the extended output -section below). +Primarily, the statistical analysis of the climates is done through [EVV](https://github.com/LIVVkit/evv4esm) +which will generate a portable test website to describe the results (pass or fail) in detail (see the extended output +section below). -For E3SM supported machines, the `e3sm_simple` conda environment is provided for these tests and includes the `EVV` -conda package. You can activate the `e3sm_simple` environment in the same way as `e3sm_unified` environment: +For E3SM supported machines, the `cime_env` conda environment is provided for these tests and includes the `EVV` +conda package. You can activate the `cime_env` environment in the same way as `e3sm_unified` environment: ``` -source /load_latest_e3sm_simple.sh +source /load_latest_cime_env.sh ``` where `` is the machine-specific location of the activation script as described on this confluence page: @@ -65,174 +63,205 @@ where `` is the machine-specific location of the activation scrip https://acme-climate.atlassian.net/wiki/spaces/EIDMG/pages/780271950/Diagnostics+and+Analysis+Quickstart#DiagnosticsandAnalysisQuickstart-Accessingmetapackagesoftwarebyactivatingacondaenvironment If you don't have access to confluence or are unable to activate this environment for whatever reason, you can install -your own `e3sm_simple` conda environment with this command (once you have anaconda/miniconda installed): +your own `cime_env` conda environment with this command (once you have anaconda/miniconda installed): ``` -conda create -n e3sm-simple -c conda-forge -c e3sm e3sm-simple +conda create -n cime-env -c conda-forge -c e3sm cime-env ``` -*NOTE: If you run into problems with getting this environment working on your machine, please open an issue on E3SM's -Github and tag @jhkennedy, or send Joseph H. Kennedy an email.* +*NOTE: If you run into problems with getting this environment working on your machine, please open an issue on E3SM's +Github and tag @mkstratos. -After you've activated the `e3sm_simple` environment, change to the `$E3SM/cime/scripts` directory (where `$E3SM` is the -directory containing E3SM). Then to run one of the tests, you will use the `create_test` script like normal. -To run the `MVK` test and generate a baseline, you would run a command like: +After you've activated the `cime_env` environment, change to the `$E3SM/cime/scripts` directory (where `$E3SM` is the +directory containing E3SM). Then to run one of the tests, you will use the `create_test` script like normal. +To run the `MVK` test and generate a baseline, you would run a command like: ``` -./create_test MVK_PL.ne4_oQU240.FC5AV1C-04P2 -g --baseline-root "/PATH/TO/BASELINE" +./create_test MVK_PS.ne4pg2_oQU480.F2010 -g --baseline-root "/PATH/TO/BASELINE" ``` -And to compare to the baseline, you would run a command like: +And to compare to the baseline, you would run a command like: ``` -./create_test MVK_PL.ne4_oQU240.FC5AV1C-04P2 -c --baseline-root "/PATH/TO/BASELINE" +./create_test MVK_PS.ne4pg2_oQU480.F2010 -c --baseline-root "/PATH/TO/BASELINE" ``` -*NOTE: The MVK run a 20 member ensemble for at least 13 months (using the last 12 for the -statistical tests) and, depending on the machine, may take some fiddling to execute within a particular -queue's wallclock time limit. You may want to over-ride the requested walltime using `--walltime HH:MM:SS` -option to `create_test`.* +*NOTE: The MVK runs a 30 member ensemble for 13 months (using the last 12 for the +statistical tests) and, depending on the machine, may take some fiddling to execute within a particular +queue's wallclock time limit. You may want to over-ride the requested walltime using `--walltime HH:MM:SS` +option to `create_test`.* -The full set of commands to run the MVK test used on Cori are: +The full set of commands to run the MVK test used on Perlmutter are: -*Generate a baseline:* +*Generate a baseline* ``` cd $E3SM/cime/scripts -source /global/project/projectdirs/acme/software/anaconda_envs/load_latest_e3sm_simple.sh +source /global/common/software/e3sm/anaconda_envs/load_latest_cime_env.sh -./create_test MVK_PL.ne4_ne4.FC5AV1C-04P2 --baseline-root "${CSCRATCH}/baselines" --project acme -g -o --walltime 01:00:00 +./create_test MVK_PS.ne4pg2_oQU480.F2010 --baseline-root "${PSCRATCH}/baselines" --project e3sm -g -o --walltime 01:00:00 ``` -*Compare to a baseline:* +*Compare to a baseline* ``` cd $E3SM/cime/scripts -source /global/project/projectdirs/acme/software/anaconda_envs/load_latest_e3sm_simple.sh +source /global/common/software/e3sm/anaconda_envs/load_latest_cime_env.sh -./create_test MVK_PL.ne4_ne4.FC5AV1C-04P2 --baseline-root "${CSCRATCH}/baselines" --project acme -c --walltime 01:00:00 +./create_test MVK_PS.ne4pg2_oQU480.F2010 --baseline-root "${PSCRATCH}/baselines" --project e3sm -c --walltime 01:00:00 ``` -Test pass/fail and extended output ----------------------------------- +## Test pass/fail and extended output -When you launch these tests, CIME will ouput the location of the case directory, which will look +When you launch these tests and compare to a baseline, CIME will output the location of the case directory, which will look something like this: ``` -# On cori-knl: -./create_test MVK_PL.ne4_ne4.FC5AV1C-04P2 --baseline-root "${CSCRATCH}/baselines" --project acme -c --walltime 01:00:00 - Creating test directory /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-04P2.cori-knl_intel.G.20190807_140111_rhfxn9 +# On pm-cpu: +./create_test MVK_PS.ne4pg2_oQU480.F2010 --baseline-root "${PSCRATCH}/baselines" --project e3sm -c --walltime 01:00:00 + Creating test directory ${PSCRATCH}/e3sm_scratch/pm-cpu/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID ``` -Let's call that directory `$CASE_DIR`. Once all the jobs are finished, navigate to that directory and -you can `cat TestStatus` to determine if the test passed or failed by looking at the `BASELINE` status: +Let's call that directory `$CASE_DIR`. Once all the jobs are finished, navigate to that directory and +you can `cat TestStatus` to determine if the test passed or failed by looking at the `BASELINE` status: ``` cd $CASE_DIR cat TestStatus ... - PASS MVK_PL.ne4_ne4.FC5AV1C-04P2.cori-knl_intel BASELINE + PASS MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel BASELINE ... ``` -To get some basic summary statistics about the test that was run, look in the output of the final job -submission for EVV's analysis: +To get some basic summary statistics about the test that was run, look in the `TestStatus.log` file: - ``` -view MVK_PL.ne4_ne4.FC5AV1C-04P2.cori-knl_intel.G.20190807_140111_rhfxn9.C.YYYYMMDD_HHMMSS_RANDOMID.test.oJOBID - ... - 2019-08-14 22:09:02 CASE.RUN HAS FINISHED - -------------------------------------------------------------------- - ______ __ __ __ __ - | ____| \ \ / / \ \ / / - | |__ \ \ / / \ \ / / - | __| \ \/ / \ \/ / - | |____ \ / \ / - |______| \/ \/ - - Extended Verification and Validation for Earth System Models - -------------------------------------------------------------------- - - Current run: 2019-08-14 21:51:32 - User: ${USER} - OS Type: Linux 4.12.14-25.22_5.0.70-cray_ari_c - Machine: nid06598 - - ------------------------------------------------------------------- - ----------------------------------------------------------------- - Beginning extensions test suite - ----------------------------------------------------------------- - - - Kolmogorov-Smirnov test: YYYYMMDD_HHMMSS_RANDOMID - Variables analyzed: 378 - Rejecting: 9 - Critical value: 13.0 - Ensembles: identical - - ----------------------------------------------------------------- - Extensions test suite complete - ----------------------------------------------------------------- - - ------------------------------------------------------------------- - Done! Results can be seen in a web browser at: - /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-04P2.cori-knl_intel.G.20190807_140111_rhfxn9/run/MVK_PL.ne4_ne4.FC5AV1C-04P2.cori-knl_intel.G.20190807_140111_rhfxn9.evv/index.html - ------------------------------------------------------------------- - ... +``` +2019-08-14 22:09:02: BASELINE PASS for test 'YYYYMMDD_HHMMSS_RANDOMID'. + Case: YYYYMMDD_HHMMSS_RANDOMID; Test status: pass; Variables analyzed: 118; Rejecting: 0; Critical value: 13; Ensembles: statistically identical + EVV results can be viewed at: ${PSCRATCH}/e3sm_scratch/pm-cpu/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/ + EVV viewing instructions can be found at: https://github.com/ESMCI/CIME/blob/master/scripts/climate_reproducibility/README.md#test-passfail-and-extended-output ``` -EVV also prints the location of the output website where you can see the details of the analysis. For -the MVK test, you will be able to view per variable Q-Q plots, P-P plots, the K-S test statistic, and -whether it rejects or accepts the null hypothesis, as well as a description of the test itself -- you +EVV reports the location of the output website where you can see the details of the analysis. For +the MVK test, you will be able to view per variable Q-Q plots, P-P plots, the K-S test statistic, and +whether it rejects or accepts the null hypothesis, as well as a description of the test itself -- you can see an example of the output website [here](http://livvkit.github.io/evv4esm/). -Please note: the output website uses some JavaScript to render elements of the page (especially figures), -and opening up the `index.html` file using the `file://` protocol in a web browser will likely not work -well (most browser have stopped allowing access to "local resources" like JavaScript through the `file://` -protocol). You can view the website by either copying it to a hosted location (`~/WWW` which is hosted at -`http://users.nccs.gov/~user` on Titan, for example) or copying it to your local machine and running a -local http server (included in python!) and viewing it through an address like `http://0.0.0.0:8000/index.html`. +To view the website, you can either tunnel the website to your local machine through ssh, or copy +the website directory to your machine and view it using EVV. + +### View via ssh + +For this example, we'll assume the tests were run on Perlmutter at NERSC, but these instructions should be +easily adaptable to any E3SM supported machine. First, log into Perlmutter via ssh and connect your local +8080 port to the 8080 port on Perlmutter: + +``` +ssh -L 8080:localhost:8080 [USER]@saul-p1.nersc.gov +``` + +Activate the `cime_env` environment: + +``` +source /global/common/software/e3sm/anaconda_envs/load_latest_cime_env.sh +``` + +Navigate to the case's run directory: + +``` +pushd ${CASE_DIR}/run +``` + +Then, using EVV, serve the website over port 8080: + +``` +evv -o PGN_P1x1.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s 8080 +``` + +Evv will then report to you the URL where you can view the website: + +``` + +-------------------------------------------------------------------- + ______ __ __ __ __ + | ____| \ \ / / \ \ / / + | |__ \ \ / / \ \ / / + | __| \ \/ / \ \/ / + | |____ \ / \ / + |______| \/ \/ + + Extended Verification and Validation for Earth System Models +-------------------------------------------------------------------- -**For the easiest viewing** we recommend copying the website to your local machine, and using EVV to -view it. you can install EVV locally by running this command: + Current run: 2024-03-06 07:56:37 + User: mek + OS Type: Linux 5.14.21-150400.24.81_12.0.87-cray_shasta_c + Machine: login31 + +Serving HTTP on 0.0.0.0 port 8080 (http://0.0.0.0:8080/) + +View the generated website by navigating to: + + http://0.0.0.0:8080/PGN_P1x1.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/index.html + +Exit by pressing `ctrl+c` to send a keyboard interrupt. ``` -conda install evv4esm -``` -Then, copy the website to your local machine, and view it: +You can now either click that link or copy-paste that link into your favorite web +browser to view the output website. + +### View a local copy + +For this example, we'll assume the tests were run on Perlmutter at NERSC, but these instructions should be +easily adaptable to any E3SM supported machine. Install `cime_env` locally and activate it: +``` +conda create -n cime_env -c conda-forge -c e3sm cime-env +conda activate cime_env +``` + +Then, copy the website to your local machine, and view it: ``` # on your local machine -scp -r /lustre/atlas/proj-shared/cli115/$USER/MVK_PL.ne4_oQU240.FC5AV1C-04P2.titan_pgi.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PL.ne4_oQU240.FC5AV1C-04P2.titan_pgi.C.YYYYMMDD_HHMMSS_RANDOMID.eve . -evv -o MVK_PL.ne4_oQU240.FC5AV1C-04P2.titan_pgi.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s +scp -r ${PSCRATCH}/e3sm_scratch/pm-cpu/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv . +evv -o MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s -------------------------------------------------------------------- - ______ __ __ __ __ - | ____| \ \ / / \ \ / / - | |__ \ \ / / \ \ / / - | __| \ \/ / \ \/ / - | |____ \ / \ / - |______| \/ \/ - - Extended Verification and Validation for Earth System Models + ______ __ __ __ __ + | ____| \ \ / / \ \ / / + | |__ \ \ / / \ \ / / + | __| \ \/ / \ \/ / + | |____ \ / \ / + |______| \/ \/ + + Extended Verification and Validation for Earth System Models -------------------------------------------------------------------- - + Current run: 2018-08-06 15:15:03 User: ${USER} OS Type: Linux 4.15.0-29-generic Machine: pc0101123 - - + + Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) - + View the generated website by navigating to: - - http://0.0.0.0:8000/MVK_PL.ne4_oQU240.FC5AV1C-04P2.titan_pgi.C.YYYYMMDD_HHMMSS_RANDOMID.evv/index.html - + + http://0.0.0.0:8000/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/index.html + Exit by pressing `ctrl+c` to send a keyboard interrupt. - + ``` +You can now either click that link or copy-paste that link into your favorite web +browser to view the output website. + + +**Please note:** the output website uses some JavaScript to render elements of the page (especially figures), +and opening up the `index.html` file using the `file://` protocol in a web browser will likely not work +well (most browser have stopped allowing access to "local resources" like JavaScript through the `file://` +protocol). You can view the website by either copying it to a hosted location (`/global/cfs/projectdirs/e3sm/www/${USER}` which is hosted at +`https://portal.nersc.gov/project/e3sm/${USER}` on NERSC, for example) or copying it to your local machine and running a +local http server (included in python!) and viewing it through an address like `http://0.0.0.0:8000/index.html`. diff --git a/scripts/create_clone b/scripts/create_clone index c024892d90d..41d3fb13cfc 100755 --- a/scripts/create_clone +++ b/scripts/create_clone @@ -1,106 +1,18 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -from Tools.standard_script_setup import * +import os +import sys -from CIME.utils import expect -from CIME.case import Case -from argparse import RawTextHelpFormatter -import re -logger = logging.getLogger(__name__) +real_file_dir = os.path.dirname(os.path.realpath(__file__)) +cimeroot = os.path.abspath(os.path.join(real_file_dir, "..")) +# Set tools path as well so external scripts can import either +# CIME.Tools.standard_script_setup or just standard_script_setup +tools_path = os.path.join(cimeroot, "CIME", "Tools") -############################################################################### -def parse_command_line(args): -############################################################################### - parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) +sys.path.insert(0, cimeroot) +sys.path.insert(1, tools_path) - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--case", "-case", required=True, - help="(required) Specify a new case name. If not a full pathname, " - "\nthe new case will be created under then current working directory.") - - parser.add_argument("--clone", "-clone", required=True, - help="(required) Specify a case to be cloned. If not a full pathname, " - "\nthe case to be cloned is assumed to be under then current working directory.") - - parser.add_argument("--ensemble", default=1, - help="clone an ensemble of cases, the case name argument must end in an integer.\n" - "for example: ./create_clone --clone case.template --case case.001 --ensemble 4 \n" - "will create case.001, case.002, case.003, case.004 from existing case.template" ) - - parser.add_argument("--user-mods-dir", - help="Full pathname to a directory containing any combination of user_nl_* files " - "\nand shell_commands script (typically containing xmlchange commands). " - "\nThe directory can also contain an SourceMods/ directory with the same structure " - "\nas would be found in a case directory. If this argument is used in conjunction " - "\nwith the --keepexe flag, then no changes will be permitted to the env_build.xml " - "\nin the newly created case directory. ") - - parser.add_argument("--keepexe", "-keepexe", action="store_true", - help="Sets EXEROOT to point to original build. It is HIGHLY recommended " - "\nthat the original case be built BEFORE cloning it if the --keepexe flag is specfied. " - "\nThis flag will make the SourceMods/ directory in the newly created case directory a " - "\nsymbolic link to the SourceMods/ directory in the original case directory. ") - - parser.add_argument("--mach-dir", "-mach_dir", - help="Specify the locations of the Machines directory, other than the default. " - "\nThe default is CIMEROOT/machines.") - - parser.add_argument("--project", "-project", - help="Specify a project id for the case (optional)." - "\nUsed for accounting and directory permissions when on a batch system." - "\nThe default is user or machine specified by PROJECT." - "\nAccounting (only) may be overridden by user or machine specified CHARGE_ACCOUNT.") - - parser.add_argument("--cime-output-root", - help="Specify the root output directory. The default is the setting in the original" - "\ncase directory. NOTE: create_clone will fail if this directory is not writable.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.case is None: - expect(False, - "Must specify -case as an input argument") - - if args.clone is None: - expect(False, - "Must specify -clone as an input argument") - - startval = '1' - if int(args.ensemble) > 1: - m = re.search(r'(\d+)$', args.case) - expect(m, " case name must end in an integer to use this feature") - startval = m.group(1) - - return args.case, args.clone, args.keepexe, args.mach_dir, args.project, \ - args.cime_output_root, args.user_mods_dir, int(args.ensemble), startval - -############################################################################## -def _main_func(): -############################################################################### - - case, clone, keepexe, mach_dir, project, cime_output_root, user_mods_dir, \ - ensemble, startval = parse_command_line(sys.argv) - - cloneroot = os.path.abspath(clone) - expect(os.path.isdir(cloneroot), - "Missing cloneroot directory %s " % cloneroot) - - if user_mods_dir is not None: - if os.path.isdir(user_mods_dir): - user_mods_dir = os.path.abspath(user_mods_dir) - nint = len(startval) - - for i in range(int(startval), int(startval)+ensemble): - if ensemble > 1: - case = case[:-nint] + '{{0:0{0:d}d}}'.format(nint).format(i) - with Case(cloneroot, read_only=False) as clone: - clone.create_clone(case, keepexe=keepexe, mach_dir=mach_dir, - project=project, - cime_output_root=cime_output_root, - user_mods_dir=user_mods_dir) - -############################################################################### +from CIME.scripts.create_clone import _main_func if __name__ == "__main__": _main_func() diff --git a/scripts/create_newcase b/scripts/create_newcase index 183d9dae2b7..af0b5634a79 100755 --- a/scripts/create_newcase +++ b/scripts/create_newcase @@ -1,227 +1,18 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -""" -Script to create a new CIME Case Control System (CSS) experimental case. -""" +import os +import sys -from Tools.standard_script_setup import * -from CIME.utils import expect, get_model, get_cime_config, get_cime_default_driver -from CIME.case import Case -from argparse import RawTextHelpFormatter +real_file_dir = os.path.dirname(os.path.realpath(__file__)) +cimeroot = os.path.abspath(os.path.join(real_file_dir, "..")) +# Set tools path as well so external scripts can import either +# CIME.Tools.standard_script_setup or just standard_script_setup +tools_path = os.path.join(cimeroot, "CIME", "Tools") -logger = logging.getLogger(__name__) +sys.path.insert(0, cimeroot) +sys.path.insert(1, tools_path) -############################################################################### -def parse_command_line(args, cimeroot, description): -############################################################################### - parser = argparse.ArgumentParser(description=description, - formatter_class=RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - try: - cime_config = get_cime_config() - except Exception: - cime_config = None - - parser.add_argument("--case", "-case", required=True, metavar="CASENAME", - help="(required) Specify the case name. " - "\nIf this is simply a name (not a path), the case directory is created in the current working directory." - "\nThis can also be a relative or absolute path specifying where the case should be created;" - "\nwith this usage, the name of the case will be the last component of the path.") - - parser.add_argument("--compset", "-compset", required=True, - help="(required) Specify a compset. " - "\nTo see list of current compsets, use the utility ./query_config --compsets in this directory.\n") - - parser.add_argument("--res", "-res", required=True, metavar="GRID", - help="(required) Specify a model grid resolution. " - "\nTo see list of current model resolutions, use the utility " - "\n./query_config --grids in this directory.") - - parser.add_argument("--machine", "-mach", - help="Specify a machine. " - "The default value is the match to NODENAME_REGEX in config_machines.xml. To see " - "\nthe list of current machines, invoke ./query_config --machines.") - - parser.add_argument("--compiler", "-compiler", - help="Specify a compiler. " - "\nTo see list of supported compilers for each machine, use the utility " - "\n./query_config --machines in this directory. " - "\nThe default value will be the first one listed.") - - parser.add_argument("--multi-driver",action="store_true", - help="Specify that --ninst should modify the number of driver/coupler instances. " - "\nThe default is to have one driver/coupler supporting multiple component instances.") - - parser.add_argument("--ninst",default=1, - help="Specify number of model ensemble instances. " - "\nThe default is multiple components and one driver/coupler. " - "\nUse --multi-driver to run multiple driver/couplers in the ensemble.") - - parser.add_argument("--mpilib", "-mpilib", - help="Specify the MPI library. " - "To see list of supported mpilibs for each machine, invoke ./query_config --machines." - "\nThe default is the first listing in MPILIBS in config_machines.xml.\n") - - parser.add_argument("--project", "-project", - help="Specify a project id for the case (optional)." - "\nUsed for accounting and directory permissions when on a batch system." - "\nThe default is user or machine specified by PROJECT." - "\nAccounting (only) may be overridden by user or machine specified CHARGE_ACCOUNT.") - - parser.add_argument("--pecount", "-pecount", default="M", - help="Specify a target size description for the number of cores. " - "\nThis is used to query the appropriate config_pes.xml file and find the " - "\noptimal PE-layout for your case - if it exists there. " - "\nAllowed options are ('S','M','L','X1','X2','[0-9]x[0-9]','[0-9]').\n") - - parser.add_argument("--user-mods-dir", - help="Full pathname to a directory containing any combination of user_nl_* files " - "\nand a shell_commands script (typically containing xmlchange commands). " - "\nThe directory can also contain an SourceMods/ directory with the same structure " - "\nas would be found in a case directory.") - - parser.add_argument("--pesfile", - help="Full pathname of an optional pes specification file. " - "\nThe file can follow either the config_pes.xml or the env_mach_pes.xml format.") - - parser.add_argument("--gridfile", - help="Full pathname of config grid file to use. " - "\nThis should be a copy of config/config_grids.xml with the new user grid changes added to it. \n") - - if cime_config and cime_config.has_option('main','workflow'): - workflow_default = cime_config.get('main', 'workflow') - else: - workflow_default = "default" - - parser.add_argument("--workflow-case",default=workflow_default, - help="A workflow from config_workflow.xml to apply to this case. ") - - - if cime_config and cime_config.has_option('main','SRCROOT'): - srcroot_default = cime_config.get('main', 'srcroot') - else: - srcroot_default = os.path.dirname(cimeroot) - - parser.add_argument("--srcroot", default=srcroot_default, - help="Alternative pathname for source root directory. " - "The default is cimeroot/../") - - parser.add_argument("--output-root", - help="Alternative pathname for the directory where case output is written.") - - # The following is a deprecated option - parser.add_argument("--script-root", dest="script_root", default=None, help=argparse.SUPPRESS) - if cime_config: - model = get_model() - else: - model = None - - if model == "cesm": - parser.add_argument("--run-unsupported", action="store_true", - help="Force the creation of a case that is not tested or supported by CESM developers.") - # hidden argument indicating called from create_test - # Indicates that create_newcase was called from create_test - do not use otherwise. - parser.add_argument("--test", "-test", action="store_true", help=argparse.SUPPRESS) - - parser.add_argument("--walltime", default=os.getenv("CIME_GLOBAL_WALLTIME"), - help="Set the wallclock limit for this case in the format (the usual format is HH:MM:SS). " - "\nYou may use env var CIME_GLOBAL_WALLTIME to set this. " - "\nIf CIME_GLOBAL_WALLTIME is not defined in the environment, then the walltime" - "\nwill be the maximum allowed time defined for the queue in config_batch.xml.") - - parser.add_argument("-q", "--queue", default=None, - help="Force batch system to use the specified queue. ") - - parser.add_argument("--handle-preexisting-dirs", dest="answer", choices=("a", "r", "u"), default=None, - help="Do not query how to handle pre-existing bld/exe dirs. " - "\nValid options are (a)bort (r)eplace or (u)se existing. " - "\nThis can be useful if you need to run create_newcase non-iteractively.") - - parser.add_argument("-i", "--input-dir", - help="Use a non-default location for input files. This will change the xml value of DIN_LOC_ROOT.") - parser.add_argument("--driver", default=get_cime_default_driver(), - choices=('mct','nuopc', 'moab'), - help=argparse.SUPPRESS) - - parser.add_argument("-n", "--non-local", action="store_true", - help="Use when you've requested a machine that you aren't on. " - "Will reduce errors for missing directories etc.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.srcroot is not None: - expect(os.path.isdir(args.srcroot), - "Input non-default directory srcroot {} does not exist ".format(args.srcroot)) - args.srcroot = os.path.abspath(args.srcroot) - - if args.gridfile is not None: - expect(os.path.isfile(args.gridfile), - "Grid specification file {} does not exist ".format(args.gridfile)) - - if args.pesfile is not None: - expect(os.path.isfile(args.pesfile), - "Pes specification file {} cannot be found ".format(args.pesfile)) - - run_unsupported = False - if model == "cesm": - run_unsupported = args.run_unsupported - - expect(CIME.utils.check_name(args.case, fullpath=True), - "Illegal case name argument provided") - - if args.input_dir is not None: - args.input_dir = os.path.abspath(args.input_dir) - elif cime_config and cime_config.has_option("main","input_dir"): - args.input_dir = os.path.abspath(cime_config.get("main","input_dir")) - - return args.case, args.compset, args.res, args.machine, args.compiler,\ - args.mpilib, args.project, args.pecount, \ - args.user_mods_dir, args.pesfile, \ - args.gridfile, args.srcroot, args.test, args.multi_driver, \ - args.ninst, args.walltime, args.queue, args.output_root, args.script_root, \ - run_unsupported, args.answer, args.input_dir, args.driver, args.workflow_case, args.non_local - -############################################################################### -def _main_func(description): -############################################################################### - cimeroot = os.path.abspath(CIME.utils.get_cime_root()) - - casename, compset, grid, machine, compiler, \ - mpilib, project, pecount, \ - user_mods_dir, pesfile, \ - gridfile, srcroot, test, multi_driver, ninst, walltime, \ - queue, output_root, script_root, run_unsupported, \ - answer, input_dir, driver, \ - workflow, non_local = parse_command_line(sys.argv, cimeroot, description) - - - if script_root is None: - caseroot = os.path.abspath(casename) - else: - caseroot = os.path.abspath(script_root) - - if user_mods_dir is not None: - if os.path.isdir(user_mods_dir): - user_mods_dir = os.path.abspath(user_mods_dir) - - # create_test creates the caseroot before calling create_newcase - # otherwise throw an error if this directory exists - expect(not (os.path.exists(caseroot) and not test), - "Case directory {} already exists".format(caseroot)) - - with Case(caseroot, read_only=False) as case: - # Configure the Case - case.create(casename, srcroot, compset, grid, user_mods_dir=user_mods_dir, - machine_name=machine, project=project, - pecount=pecount, compiler=compiler, mpilib=mpilib, - pesfile=pesfile,gridfile=gridfile, - multi_driver=multi_driver, ninst=ninst, test=test, - walltime=walltime, queue=queue, output_root=output_root, - run_unsupported=run_unsupported, answer=answer, - input_dir=input_dir, driver=driver, workflow_case=workflow, non_local=non_local) - -############################################################################### +from CIME.scripts.create_newcase import _main_func if __name__ == "__main__": - _main_func(__doc__) + _main_func() diff --git a/scripts/create_test b/scripts/create_test index bb9b1fce1d3..dc3ef859f5c 100755 --- a/scripts/create_test +++ b/scripts/create_test @@ -1,644 +1,18 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -""" -Script to create, build and run CIME tests. This script can: +import os +import sys -1) Run a single test, or more than one test - ./create_test TESTNAME - ./create_test TESTNAME1 TESTNAME2 ... -2) Run a test suite from a text file with one test per line - ./create_test -f TESTFILE -3) Run an E3SM test suite: - Below, a suite name, SUITE, is defined in $CIMEROOT/scripts/lib/get_tests.py - - Run a single suite - ./create_test SUITE - - Run two suites - ./create_test SUITE1 SUITE2 - - Run all tests in a suite except for one - ./create_test SUITE ^TESTNAME - - Run all tests in a suite except for tests that are in another suite - ./create_test SUITE1 ^SUITE2 - - Run all tests in a suite with baseline comparisons against master baselines - ./create_test SUITE1 -c -b master -4) Run a CESM test suite(s): - ./create_test --xml-category XML_CATEGORY [--xml-machine XML_MACHINE] [--xml-compiler XML_COMPILER] [ --xml-testlist XML_TESTLIST] +real_file_dir = os.path.dirname(os.path.realpath(__file__)) +cimeroot = os.path.abspath(os.path.join(real_file_dir, "..")) +# Set tools path as well so external scripts can import either +# CIME.Tools.standard_script_setup or just standard_script_setup +tools_path = os.path.join(cimeroot, "CIME", "Tools") -If this tool is missing any feature that you need, please add an issue on -https://github.com/ESMCI/cime -""" -from Tools.standard_script_setup import * +sys.path.insert(0, cimeroot) +sys.path.insert(1, tools_path) -import get_tests -from CIME.test_scheduler import TestScheduler, RUN_PHASE -from CIME.utils import expect, convert_to_seconds, compute_total_time, convert_to_babylonian_time, run_cmd_no_fail, get_cime_config -from CIME.XML.machines import Machines -from CIME.case import Case -from CIME.test_utils import get_tests_from_xml -from argparse import RawTextHelpFormatter - -import argparse, math, glob - -logger = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args, description): -############################################################################### - - parser = argparse.ArgumentParser(description=description, - formatter_class=RawTextHelpFormatter) - - model = CIME.utils.get_model() - - CIME.utils.setup_standard_logging_options(parser) - - config = get_cime_config() - - parser.add_argument("--no-run", action="store_true", - help="Do not run generated tests") - - parser.add_argument("--no-build", action="store_true", - help="Do not build generated tests, implies --no-run") - - parser.add_argument("--no-setup", action="store_true", - help="Do not setup generated tests, implies --no-build and --no-run") - - parser.add_argument("-u", "--use-existing", action="store_true", - help="Use pre-existing case directories they will pick up at the " - "\nlatest PEND state or re-run the first failed state. Requires test-id") - - default = get_default_setting(config, "SAVE_TIMING", False, check_main=False) - - parser.add_argument("--save-timing", action="store_true", - default=default, - help="Enable archiving of performance data.") - - parser.add_argument("--no-batch", action="store_true", - help="Do not submit jobs to batch system, run locally." - "\nIf false, this will default to machine setting.") - - parser.add_argument("--single-exe", action="store_true", - default=False, - help="Use a single build for all cases. This can " - "\ndrastically improve test throughput but is currently use-at-your-own risk." - "\nIt's up to the user to ensure that all cases are build-compatible." - "\nE3SM tests belonging to a suite with share enabled will always share exes.") - - default = get_default_setting(config, "SINGLE_SUBMIT", False, check_main=False) - - parser.add_argument("--single-submit", action="store_true", - default=default, - help="Use a single interactive allocation to run all the tests. This can " - "\ndrastically reduce queue waiting but only makes sense on batch machines.") - - default = get_default_setting(config, "TEST_ROOT", None, check_main=False) - - parser.add_argument("-r", "--test-root", - default=default, - help="Where test cases will be created. The default is output root" - "\nas defined in the config_machines file") - - default = get_default_setting(config, "OUTPUT_ROOT", None, check_main=False) - - parser.add_argument("--output-root", - default=default, - help="Where the case output is written.") - - default = get_default_setting(config, "BASELINE_ROOT", None, check_main=False) - - parser.add_argument("--baseline-root", - default=default, - help="Specifies a root directory for baseline datasets that will " - "\nbe used for Bit-for-bit generate and/or compare testing.") - - default = get_default_setting(config, "CLEAN", False, check_main=False) - - parser.add_argument("--clean", action="store_true", - default=default, - help="Specifies if tests should be cleaned after run. If set, all object" - "\nexecutables and data files will be removed after the tests are run.") - - default = get_default_setting(config, "MACHINE", None, check_main=True) - - parser.add_argument("-m", "--machine", - default=default, - help="The machine for creating and building tests. This machine must be defined" - "\nin the config_machines.xml file for the given model. The default is to " - "\nto match the name of the machine in the test name or the name of the " - "\nmachine this script is run on to the NODENAME_REGEX field in " - "\nconfig_machines.xml. WARNING: This option is highly unsafe and should " - "\nonly be used if you are an expert.") - - default = get_default_setting(config, "MPILIB", None, check_main=True) - - parser.add_argument("--mpilib", - default=default, - help="Specify the mpilib. To see list of supported MPI libraries for each machine, " - "\ninvoke ./query_config. The default is the first listing .") - - if model == "cesm": - parser.add_argument("-c", "--compare", - help="While testing, compare baselines against the given compare directory. ") - - parser.add_argument("-g", "--generate", - help="While testing, generate baselines in the given generate directory. " - "\nNOTE: this can also be done after the fact with bless_test_results") - - parser.add_argument("--xml-machine", - help="Use this machine key in the lookup in testlist.xml. " - "\nThe default is all if any --xml- argument is used.") - - parser.add_argument("--xml-compiler", - help="Use this compiler key in the lookup in testlist.xml. " - "\nThe default is all if any --xml- argument is used.") - - parser.add_argument("--xml-category", - help="Use this category key in the lookup in testlist.xml. " - "\nThe default is all if any --xml- argument is used.") - - parser.add_argument("--xml-testlist", - help="Use this testlist to lookup tests.The default is specified in config_files.xml") - - parser.add_argument("--xml-driver", choices=('mct', 'nuopc', 'moab'), - help="Override driver specified in tests and use this one.") - - parser.add_argument("testargs", nargs="*", - help="Tests to run. Testname form is TEST.GRID.COMPSET[.MACHINE_COMPILER]") - - else: - - parser.add_argument("testargs", nargs="+", - help="Tests or test suites to run." - " Testname form is TEST.GRID.COMPSET[.MACHINE_COMPILER]") - - parser.add_argument("-b", "--baseline-name", - help="If comparing or generating baselines, use this directory under baseline root. " - "\nDefault will be current branch name.") - - parser.add_argument("-c", "--compare", action="store_true", - help="While testing, compare baselines") - - parser.add_argument("-g", "--generate", action="store_true", - help="While testing, generate baselines. " - "\nNOTE: this can also be done after the fact with bless_test_results") - - default = get_default_setting(config, "COMPILER", None, check_main=True) - - parser.add_argument("--compiler", - default=default, - help="Compiler for building cime. Default will be the name in the " - "\nTestname or the default defined for the machine.") - - parser.add_argument("-n", "--namelists-only", action="store_true", - help="Only perform namelist actions for tests") - - parser.add_argument("-p", "--project", - help="Specify a project id for the case (optional)." - "\nUsed for accounting and directory permissions when on a batch system." - "\nThe default is user or machine specified by PROJECT." - "\nAccounting (only) may be overridden by user or machine specified CHARGE_ACCOUNT.") - - parser.add_argument("-t", "--test-id", - help="Specify an 'id' for the test. This is simply a string that is appended " - "\nto the end of a test name. If no test-id is specified, a time stamp plus a " - "\nrandom string will be used (ensuring a high probability of uniqueness). " - "\nIf a test-id is specified, it is the user's responsibility to ensure that " - "\neach run of create_test uses a unique test-id. WARNING: problems will occur " - "\nif you use the same test-id twice on the same file system, even if the test " - "\nlists are completely different.") - - default = get_default_setting(config, "PARALLEL_JOBS", None, check_main=False) - - parser.add_argument("-j", "--parallel-jobs", type=int, default=default, - help="Number of tasks create_test should perform simultaneously. The default " - "\n is min(num_cores, num_tests).") - - default = get_default_setting(config, "PROC_POOL", None, check_main=False) - - parser.add_argument("--proc-pool", type=int, default=default, - help="The size of the processor pool that create_test can use. The default is " - "\nMAX_MPITASKS_PER_NODE + 25 percent.") - - default = os.getenv("CIME_GLOBAL_WALLTIME") - if default is None: - default = get_default_setting(config, "WALLTIME", None, check_main=True) - - parser.add_argument("--walltime", default=default, - help="Set the wallclock limit for all tests in the suite. " - "\nUse the variable CIME_GLOBAL_WALLTIME to set this for all tests.") - - default = get_default_setting(config, "JOB_QUEUE", None, check_main=True) - - parser.add_argument("-q", "--queue", default=default, - help="Force batch system to use a certain queue") - - parser.add_argument("-f", "--testfile", - help="A file containing an ascii list of tests to run") - - default = get_default_setting(config, "ALLOW_BASELINE_OVERWRITE", False, check_main=False) - - parser.add_argument("-o", "--allow-baseline-overwrite", action="store_true", - default=default, - help="If the --generate option is given, then an attempt to overwrite " - "\nan existing baseline directory will raise an error. WARNING: Specifying this " - "\noption will allow existing baseline directories to be silently overwritten.") - - default = get_default_setting(config, "WAIT", False, check_main=False) - - parser.add_argument("--wait", action="store_true", - default=default, - help="On batch systems, wait for submitted jobs to complete") - - default = get_default_setting(config, "ALLOW_PNL", False, check_main=False) - - parser.add_argument("--allow-pnl", action="store_true", - default=default, - help="Do not pass skip-pnl to case.submit") - - parser.add_argument("--wait-check-throughput", action="store_true", - help="If waiting, fail if throughput check fails") - - parser.add_argument("--wait-check-memory", action="store_true", - help="If waiting, fail if memory check fails") - - parser.add_argument("--wait-ignore-namelists", action="store_true", - help="If waiting, ignore if namelist diffs") - - parser.add_argument("--wait-ignore-memleak", action="store_true", - help="If waiting, ignore if there's a memleak") - - default = get_default_setting(config, "FORCE_PROCS", None, check_main=False) - - parser.add_argument("--force-procs", type=int, default=default, - help="For all tests to run with this number of processors") - - default = get_default_setting(config, "FORCE_THREADS", None, check_main=False) - - parser.add_argument("--force-threads", type=int, default=default, - help="For all tests to run with this number of threads") - - default = get_default_setting(config, "INPUT_DIR", None, check_main=True) - - parser.add_argument("-i", "--input-dir", - default=default, - help="Use a non-default location for input files") - - default = get_default_setting(config, "PESFILE", None, check_main=True) - - parser.add_argument("--pesfile",default=default, - help="Full pathname of an optional pes specification file. The file" - "\ncan follow either the config_pes.xml or the env_mach_pes.xml format.") - - default = get_default_setting(config, "RETRY", 0, check_main=False) - - parser.add_argument("--retry", type=int, default=default, - help="Automatically retry failed tests. >0 implies --wait") - - parser.add_argument("-N", "--non-local", action="store_true", - help="Use when you've requested a machine that you aren't on. " - "Will reduce errors for missing directories etc.") - - CIME.utils.add_mail_type_args(parser) - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - CIME.utils.resolve_mail_type_args(args) - - # generate and compare flags may not point to the same directory - if model == "cesm": - if args.generate is not None: - expect(not (args.generate == args.compare), - "Cannot generate and compare baselines at the same time") - - if args.xml_testlist is not None: - expect(not (args.xml_machine is None and args.xml_compiler - is None and args.xml_category is None), - "If an xml-testlist is present at least one of --xml-machine, " - "--xml-compiler, --xml-category must also be present") - - else: - expect(not (args.baseline_name is not None and (not args.compare and not args.generate)), - "Provided baseline name but did not specify compare or generate") - expect(not (args.compare and args.generate), - "Tried to compare and generate at same time") - - expect(not (args.namelists_only and not (args.generate or args.compare)), - "Must provide either --compare or --generate with --namelists-only") - - if args.retry > 0: - args.wait = True - - if args.parallel_jobs is not None: - expect(args.parallel_jobs > 0, - "Invalid value for parallel_jobs: %d" % args.parallel_jobs) - - if args.use_existing: - expect(args.test_id is not None, "Must provide test-id of pre-existing cases") - - if args.no_setup: - args.no_build = True - - if args.no_build: - args.no_run = True - - # Namelist-only forces some other options: - if args.namelists_only: - expect(not args.no_setup, "Cannot compare namelists without setup") - args.no_build = True - args.no_run = True - args.no_batch = True - - expect(not (args.non_local and not args.no_build), "Cannot build on non-local machine") - - if args.single_submit: - expect(not args.no_run, "Doesn't make sense to request single-submit if no-run is on") - args.no_build = True - args.no_run = True - args.no_batch = True - - if args.test_id is None: - args.test_id = "%s_%s"%(CIME.utils.get_timestamp(), CIME.utils.id_generator()) - else: - expect(CIME.utils.check_name(args.test_id, additional_chars="."), - "invalid test-id argument provided") - - if args.testfile is not None: - with open(args.testfile, "r") as fd: - args.testargs.extend( [line.strip() for line in fd.read().splitlines() if line.strip() and not line.startswith('#')] ) - - # Compute list of fully-resolved test_names - test_extra_data = {} - if model == "cesm": - machine_name = args.xml_machine if args.machine is None else args.machine - - # If it's still unclear what machine to use, look at test names - if machine_name is None: - for test in args.testargs: - testsplit = CIME.utils.parse_test_name(test) - if testsplit[4] is not None: - if machine_name is None: - machine_name = testsplit[4] - else: - expect(machine_name == testsplit[4], - "ambiguity in machine, please use the --machine option") - - mach_obj = Machines(machine=machine_name) - if args.testargs: - args.compiler = mach_obj.get_default_compiler() if args.compiler is None else args.compiler - test_names = get_tests.get_full_test_names(args.testargs, - mach_obj.get_machine_name(), args.compiler) - else: - expect(not (args.xml_machine is None and args.xml_compiler - is None and args.xml_category is None and args.xml_testlist is None), - "At least one of --xml-machine, --xml-testlist, " - "--xml-compiler, --xml-category or a valid test name must be provided.") - - test_data = get_tests_from_xml(xml_machine=args.xml_machine, xml_category=args.xml_category, - xml_compiler=args.xml_compiler, xml_testlist=args.xml_testlist, - machine=machine_name, compiler=args.compiler, driver=args.xml_driver) - test_names = [item["name"] for item in test_data] - for test_datum in test_data: - test_extra_data[test_datum["name"]] = test_datum - - logger.info("Testnames: %s" % test_names) - else: - if args.machine is None: - args.machine = get_tests.infer_machine_name_from_tests(args.testargs) - - mach_obj = Machines(machine=args.machine) - args.compiler = mach_obj.get_default_compiler() if args.compiler is None else args.compiler - - test_names = get_tests.get_full_test_names(args.testargs, mach_obj.get_machine_name(), args.compiler) - - expect(mach_obj.is_valid_compiler(args.compiler), - "Compiler %s not valid for machine %s" % (args.compiler, mach_obj.get_machine_name())) - - if not args.wait: - expect(not args.wait_check_throughput, "Makes no sense to use --wait-check-throughput without --wait") - expect(not args.wait_check_memory, "Makes no sense to use --wait-check-memory without --wait") - expect(not args.wait_ignore_namelists, "Makes no sense to use --wait-ignore-namelists without --wait") - expect(not args.wait_ignore_memleak, "Makes no sense to use --wait-ignore-memleak without --wait") - - # Normalize compare/generate between the models - baseline_cmp_name = None - baseline_gen_name = None - if args.compare or args.generate: - if model == "cesm": - if args.compare is not None: - baseline_cmp_name = args.compare - if args.generate is not None: - baseline_gen_name = args.generate - else: - baseline_name = args.baseline_name if args.baseline_name else CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) - expect(baseline_name is not None, - "Could not determine baseline name from branch, please use -b option") - if args.compare: - baseline_cmp_name = baseline_name - elif args.generate: - baseline_gen_name = baseline_name - - if args.input_dir is not None: - args.input_dir = os.path.abspath(args.input_dir) - - # sanity check - for name in test_names: - dot_count = name.count('.') - expect(dot_count > 1 and dot_count <= 4, "Invalid test Name, '{}'".format(name)) - - # for e3sm, sort by walltime - if model == "e3sm": - if args.walltime is None: - # Longest tests should run first - test_names.sort(key=get_tests.key_test_time, reverse=True) - else: - test_names.sort() - - return test_names, test_extra_data, args.compiler, mach_obj.get_machine_name(), args.no_run, args.no_build, args.no_setup, args.no_batch,\ - args.test_root, args.baseline_root, args.clean, baseline_cmp_name, baseline_gen_name, \ - args.namelists_only, args.project, \ - args.test_id, args.parallel_jobs, args.walltime, \ - args.single_submit, args.proc_pool, args.use_existing, args.save_timing, args.queue, \ - args.allow_baseline_overwrite, args.output_root, args.wait, args.force_procs, args.force_threads, args.mpilib, args.input_dir, args.pesfile, args.retry, args.mail_user, args.mail_type, args.wait_check_throughput, args.wait_check_memory, args.wait_ignore_namelists, args.wait_ignore_memleak, args.allow_pnl, args.non_local, args.single_exe - -############################################################################### -def get_default_setting(config, varname, default_if_not_found, check_main=False): -############################################################################### - if config.has_option("create_test",varname): - default = config.get("create_test",varname) - elif check_main and config.has_option("main", varname): - default = config.get("main",varname) - else: - default=default_if_not_found - return default - -############################################################################### -def single_submit_impl(machine_name, test_id, proc_pool, _, args, job_cost_map, wall_time, test_root): -############################################################################### - mach = Machines(machine=machine_name) - expect(mach.has_batch_system(), "Single submit does not make sense on non-batch machine '%s'" % mach.get_machine_name()) - - machine_name = mach.get_machine_name() - - # - # Compute arg list for second call to create_test - # - new_args = list(args) - new_args.remove("--single-submit") - new_args.append("--no-batch") - new_args.append("--use-existing") - no_arg_is_a_test_id_arg = True - no_arg_is_a_proc_pool_arg = True - no_arg_is_a_machine_arg = True - for arg in new_args: - if arg == "-t" or arg.startswith("--test-id"): - no_arg_is_a_test_id_arg = False - elif arg.startswith("--proc-pool"): - no_arg_is_a_proc_pool_arg = False - elif arg == "-m" or arg.startswith("--machine"): - no_arg_is_a_machine_arg = True - - if no_arg_is_a_test_id_arg: - new_args.append("-t %s" % test_id) - if no_arg_is_a_proc_pool_arg: - new_args.append("--proc-pool %d" % proc_pool) - if no_arg_is_a_machine_arg: - new_args.append("-m %s" % machine_name) - - # - # Resolve batch directives manually. There is currently no other way - # to do this without making a Case object. Make a throwaway case object - # to help us here. - # - testcase_dirs = glob.glob("%s/*%s*/TestStatus" % (test_root, test_id)) - expect(testcase_dirs, "No test case dirs found!?") - first_case = os.path.abspath(os.path.dirname(testcase_dirs[0])) - with Case(first_case, read_only=False) as case: - env_batch = case.get_env("batch") - - submit_cmd = env_batch.get_value("batch_submit", subgroup=None) - submit_args = env_batch.get_submit_args(case, "case.test") - - tasks_per_node = mach.get_value("MAX_MPITASKS_PER_NODE") - num_nodes = int(math.ceil(float(proc_pool) / tasks_per_node)) - if wall_time is None: - wall_time = compute_total_time(job_cost_map, proc_pool) - wall_time_bab = convert_to_babylonian_time(int(wall_time)) - else: - wall_time_bab = wall_time - - queue = env_batch.select_best_queue(num_nodes, proc_pool, walltime=wall_time_bab) - wall_time_max_bab = env_batch.get_queue_specs(queue)[3] - if wall_time_max_bab is not None: - wall_time_max = convert_to_seconds(wall_time_max_bab) - if wall_time_max < wall_time: - wall_time = wall_time_max - wall_time_bab = convert_to_babylonian_time(wall_time) - - overrides = { - "job_id" : "create_test_single_submit_%s" % test_id, - "num_nodes" : num_nodes, - "tasks_per_node": tasks_per_node, - "totaltasks" : tasks_per_node * num_nodes, - "job_wallclock_time": wall_time_bab, - "job_queue": queue - } - - directives = env_batch.get_batch_directives(case, "case.test", overrides=overrides) - - # - # Make simple submit script and submit - # - - script = "#! /bin/bash\n" - script += "\n%s" % directives - script += "\n" - script += "cd %s\n"%os.getcwd() - script += "%s %s\n" % (__file__, " ".join(new_args)) - - submit_cmd = "%s %s" % (submit_cmd, submit_args) - logger.info("Script:\n%s" % script) - - run_cmd_no_fail(submit_cmd, input_str=script, arg_stdout=None, arg_stderr=None, verbose=True) - -############################################################################### -# pragma pylint: disable=protected-access -def create_test(test_names, test_data, compiler, machine_name, no_run, no_build, no_setup, no_batch, test_root, - baseline_root, clean, baseline_cmp_name, baseline_gen_name, namelists_only, project, test_id, parallel_jobs, - walltime, single_submit, proc_pool, use_existing, save_timing, queue, allow_baseline_overwrite, output_root, wait, - force_procs, force_threads, mpilib, input_dir, pesfile, mail_user, mail_type, - wait_check_throughput, wait_check_memory, wait_ignore_namelists, wait_ignore_memleak, allow_pnl, non_local, single_exe): -############################################################################### - impl = TestScheduler(test_names, test_data=test_data, - no_run=no_run, no_build=no_build, no_setup=no_setup, no_batch=no_batch, - test_root=test_root, test_id=test_id, - baseline_root=baseline_root, baseline_cmp_name=baseline_cmp_name, - baseline_gen_name=baseline_gen_name, - clean=clean, machine_name=machine_name, compiler=compiler, - namelists_only=namelists_only, - project=project, parallel_jobs=parallel_jobs, walltime=walltime, - proc_pool=proc_pool, use_existing=use_existing, save_timing=save_timing, - queue=queue, allow_baseline_overwrite=allow_baseline_overwrite, - output_root=output_root, force_procs=force_procs, force_threads=force_threads, - mpilib=mpilib, input_dir=input_dir, pesfile=pesfile, mail_user=mail_user, mail_type=mail_type, allow_pnl=allow_pnl, - non_local=non_local, single_exe=single_exe) - - success = impl.run_tests(wait=wait, - wait_check_throughput=wait_check_throughput, - wait_check_memory=wait_check_memory, - wait_ignore_namelists=wait_ignore_namelists, - wait_ignore_memleak=wait_ignore_memleak) - - if success and single_submit: - # Get real test root - test_root = impl._test_root - - job_cost_map = {} - largest_case = 0 - for test in impl._tests: - test_dir = impl._get_test_dir(test) - procs_needed = impl._get_procs_needed(test, RUN_PHASE) - time_needed = convert_to_seconds(run_cmd_no_fail("./xmlquery JOB_WALLCLOCK_TIME -value -subgroup case.test", from_dir=test_dir)) - job_cost_map[test] = (procs_needed, time_needed) - if procs_needed > largest_case: - largest_case = procs_needed - - if proc_pool is None: - # Based on size of created jobs, choose a reasonable proc_pool. May need to put - # more thought into this. - proc_pool = 2 * largest_case - - # Create submit script - single_submit_impl(machine_name, test_id, proc_pool, project, sys.argv[1:], job_cost_map, walltime, test_root) - - return success - -############################################################################### -def _main_func(description): -############################################################################### - test_names, test_data, compiler, machine_name, no_run, no_build, no_setup, no_batch, \ - test_root, baseline_root, clean, baseline_cmp_name, baseline_gen_name, namelists_only, \ - project, test_id, parallel_jobs, walltime, single_submit, proc_pool, use_existing, \ - save_timing, queue, allow_baseline_overwrite, output_root, wait, force_procs, force_threads, mpilib, input_dir, pesfile, \ - retry, mail_user, mail_type, wait_check_throughput, wait_check_memory, wait_ignore_namelists, wait_ignore_memleak, allow_pnl, non_local, single_exe = \ - parse_command_line(sys.argv, description) - - success = False - run_count = 0 - while not success and run_count <= retry: - use_existing = use_existing if run_count == 0 else True - success = create_test(test_names, test_data, compiler, machine_name, no_run, no_build, no_setup, no_batch, test_root, - baseline_root, clean, baseline_cmp_name, baseline_gen_name, namelists_only, - project, test_id, parallel_jobs, walltime, single_submit, proc_pool, use_existing, save_timing, - queue, allow_baseline_overwrite, output_root, wait, force_procs, force_threads, mpilib, input_dir, pesfile, - mail_user, mail_type, wait_check_throughput, wait_check_memory, wait_ignore_namelists, wait_ignore_memleak, allow_pnl, non_local, single_exe) - run_count += 1 - - # For testing only - os.environ["TESTBUILDFAIL_PASS"] = "True" - os.environ["TESTRUNFAIL_PASS"] = "True" - - sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) - -############################################################################### +from CIME.scripts.create_test import _main_func if __name__ == "__main__": - _main_func(__doc__) + _main_func() diff --git a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt index 5e018dad2eb..dac4fec1f30 100644 --- a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +++ b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt @@ -1,5 +1,6 @@ set(sources_needed circle.F90) extract_sources("${sources_needed}" "${circle_area_sources}" test_sources) -create_pFUnit_test(pFunit_circle_area pFunittest_circle_area_exe "test_circle.pf" ${test_sources}) - +add_pfunit_ctest(pFunit_circle_area + TEST_SOURCES "test_circle.pf" + OTHER_SOURCES "${test_sources}") diff --git a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf index 6d557d8af56..b59af13d15c 100644 --- a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf +++ b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf @@ -1,6 +1,6 @@ module test_circle -use pfunit_mod +use funit use circle, only: circle_area, pi, r8 diff --git a/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt index f370d036242..1bddfc1cbd4 100644 --- a/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt +++ b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt @@ -1,2 +1,5 @@ extract_sources("interpolate_1d.F90" "${interpolate_sources}" test_sources) -create_pFUnit_test(pFunit_interpolate pFunit_interpolate_exe "test_interpolate_point.pf" ${test_sources}) + +add_pfunit_ctest(pFunit_interpolate + TEST_SOURCES "test_interpolate_point.pf" + OTHER_SOURCES "${test_sources}") diff --git a/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf index 0e92bf7e686..295aad0228c 100644 --- a/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf +++ b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf @@ -1,6 +1,6 @@ module test_interpolate_point -use pfunit_mod +use funit use interpolate_1d diff --git a/scripts/fortran_unit_testing/README b/scripts/fortran_unit_testing/README index 8bd194ce57d..b91da3e0c65 100644 --- a/scripts/fortran_unit_testing/README +++ b/scripts/fortran_unit_testing/README @@ -104,8 +104,8 @@ Quick guide to the CIME unit testing framework ** Compilers - CIME compiler options This module is also part of the run_tests.py interface; the primary - purpose is to read in flags generated from the config_compilers.xml - file. However, it's also a catch-all for compiler-specific + purpose is to read in flags generated from the cmake_macros + directory. However, it's also a catch-all for compiler-specific information. The details of this module shouldn't be important to most users, most of @@ -130,55 +130,6 @@ Quick guide to the CIME unit testing framework Preprocesses genf90 files and puts them in the output directory. The named list will have generated sources appended to it. -** FindpFUnit - Find module for the pFUnit library - - This is a typical CMake Find module; it defines the following variables - with their conventional CMake meanings: - - - PFUNIT_FOUND - - PFUNIT_LIBRARY - - PFUNIT_LIBRARIES - - PFUNIT_INCLUDE_DIR - - PFUNIT_INCLUDE_DIRS - - Three additional, pFUnit-specific variables are defined: - - - PFUNIT_MODULE_DIR :: Directory with *.mod files. This is already - included in PFUNIT_INCLUDE_DIRS, so you usually shouldn't need - this. - - PFUNIT_DRIVER :: Path to the pFUnit driver source. - - PFUNIT_PARSER :: Path to pFUnitParser.py (the pFUnit preprocessor). - - If run_tests.py can find the pFUnit directory in config_compilers.xml, - the variable $PFUNIT will be set to assist the FindpFUnit module. - Otherwise, you must do one of the following: - - - Define the environment variable $PFUNIT with the location of the - installation. - - Put the pFUnit "bin" directory in your $PATH. - -** pFUnit_utils - pFUnit preprocessing and driver tools - - This module aims to greatly simplify use of the pFUnit parser and - driver. (Currently, it assumes that both are being used.) - - This module requires the variables defined by the FindpFUnit module. - - - add_pFUnit_executable(name pf_file_list output_directory - fortran_source_list). - - This function automatically processes the .pf files to create tests, - then links them with the Fortran files, pFUnit's library, and the - pFUnit driver to create a test executable with the given name. - - The output_directory is a location where generated sources should be - placed; ${CMAKE_CURRENT_BUILD_DIR} is usually a safe place. - - - define_pFUnit_failure(test_name) - - This tells CTest to detect test success or failure using regular - expressions appropriate for pFUnit. - ** Sourcelist_utils - Utilities for VPATH emulation This module provides functions for working with lists of source code @@ -282,7 +233,7 @@ end program test_driver module test_circle -use pfunit_mod +use funit use circle, only: circle_area, pi, r8 diff --git a/scripts/fortran_unit_testing/python/printer.py b/scripts/fortran_unit_testing/python/printer.py index dcd5551f5c4..7aa74b3a648 100644 --- a/scripts/fortran_unit_testing/python/printer.py +++ b/scripts/fortran_unit_testing/python/printer.py @@ -54,7 +54,7 @@ def print(self, item, end="\n"): item - Object to be printed. end - String appended to the end. """ - self._output.write(str(item)+end) + self._output.write(str(item) + end) def comment(self, string): """Print the input str as a comment. @@ -72,7 +72,7 @@ def draw_rule(self, char="=", length=50): char - Character that the line is composed of. length - Horizontal line length. """ - self.comment(char*length) + self.comment(char * length) def print_header(self, string): """Write a string into a header, denoting a new output section.""" @@ -87,14 +87,15 @@ def print_error(self, error_message): if self._color: # ANSI sequence turns the text bright red. esc_char = chr(curses.ascii.ESC) - to_red_text = esc_char+"[1;31m" - to_default_text = esc_char+"[0m" + to_red_text = esc_char + "[1;31m" + to_default_text = esc_char + "[0m" else: to_red_text = "" to_default_text = "" - self._error.write(to_red_text+"ERROR: "+ - error_message+to_default_text+"\n") + self._error.write( + to_red_text + "ERROR: " + error_message + to_default_text + "\n" + ) class ScriptPrinter(Printer): @@ -128,7 +129,7 @@ def __init__(self, output=sys.stdout, error=sys.stderr, indent_size=2): def comment(self, string): """Write a comment (prepends "#").""" - self.print("# "+string) + self.print("# " + string) def print_header(self, string): """Write a header in a comment. @@ -147,5 +148,5 @@ def print(self, item, end="\n"): item - Object to be printed. end - String appended to the end. """ - new_item = (" "*self.indent_size*self.indent_level)+str(item) + new_item = (" " * self.indent_size * self.indent_level) + str(item) super(ScriptPrinter, self).print(new_item, end) diff --git a/scripts/fortran_unit_testing/python/test_xml_test_list.py b/scripts/fortran_unit_testing/python/test_xml_test_list.py index 565f12d89f6..df80572ad72 100755 --- a/scripts/fortran_unit_testing/python/test_xml_test_list.py +++ b/scripts/fortran_unit_testing/python/test_xml_test_list.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Unit tests for the xml_test_list module. Public classes: @@ -14,6 +14,7 @@ __all__ = ("TestSuitesFromXML", "TestSuitesFromXML") + class TestTestSuiteSpec(unittest.TestCase): """Tests for the TestSuiteSpec class.""" @@ -27,8 +28,7 @@ def test_absolute_path(self): def test_relative_path(self): """TestSuiteSpec works as intended on relative paths.""" spec = TestSuiteSpec("name", [None, None], ["path", "./path"]) - self.assertEqual([abspath("path"), abspath("./path")], - spec.directories) + self.assertEqual([abspath("path"), abspath("./path")], spec.directories) def test_no_path(self): """TestSuiteSpec works with no paths.""" @@ -50,15 +50,16 @@ def test_no_label(self): def test_iterate(self): """TestSuiteSpec provides an iterator over directories.""" spec = TestSuiteSpec("name", ["foo", "bar"], ["/foo", "/bar"]) - self.assertEqual([("foo", "/foo"), ("bar", "/bar")], - list(d for d in spec)) + self.assertEqual([("foo", "/foo"), ("bar", "/bar")], list(d for d in spec)) + class TestSuitesFromXML(unittest.TestCase): """Tests for the suites_from_xml function.""" - def check_spec_list(self, xml_str, names, directories, - known_paths=None, labels=None): + def check_spec_list( + self, xml_str, names, directories, known_paths=None, labels=None + ): """Check that a spec list matches input names and directories. This is used by the following tests to do the dirty work of making @@ -69,25 +70,31 @@ def check_spec_list(self, xml_str, names, directories, xml_tree = ElementTree(XML(xml_str)) spec_list = list(suites_from_xml(xml_tree, known_paths)) - self.assertEqual(len(names), len(directories), - msg="Internal test suite error: name and "+ - "directories lists are different sizes!") + self.assertEqual( + len(names), + len(directories), + msg="Internal test suite error: name and " + + "directories lists are different sizes!", + ) - self.assertEqual(len(spec_list), len(names), - msg="Wrong number of suite specs returned.") + self.assertEqual( + len(spec_list), len(names), msg="Wrong number of suite specs returned." + ) - self.assertEqual(names, - [spec.name for spec in spec_list], - msg="Wrong suite name(s).") + self.assertEqual( + names, [spec.name for spec in spec_list], msg="Wrong suite name(s)." + ) - self.assertEqual(directories, - [spec.directories for spec in spec_list], - msg="Wrong suite path(s).") + self.assertEqual( + directories, + [spec.directories for spec in spec_list], + msg="Wrong suite path(s).", + ) if labels is not None: - self.assertEqual(labels, - [spec.labels for spec in spec_list], - msg="Wrong suite label(s).") + self.assertEqual( + labels, [spec.labels for spec in spec_list], msg="Wrong suite label(s)." + ) def test_no_suites(self): """suites_from_xml output returns empty list for no matches.""" @@ -123,9 +130,9 @@ def test_multiple_suites(self): """ - self.check_spec_list(xml_str, - ["suite1", "suite2"], - [["/the/path"], ["/other/path"]]) + self.check_spec_list( + xml_str, ["suite1", "suite2"], [["/the/path"], ["/other/path"]] + ) def test_path_relative_to_known(self): """suites_from_xml handles a relative_to directory attribute.""" @@ -137,10 +144,9 @@ def test_path_relative_to_known(self): """ - self.check_spec_list(xml_str, - ["suite1"], - [["/foodir/path"]], - known_paths={"foo": "/foodir"}) + self.check_spec_list( + xml_str, ["suite1"], [["/foodir/path"]], known_paths={"foo": "/foodir"} + ) def test_path_with_whitespace(self): """suites_from_xml handles a directory with whitespace added.""" @@ -166,8 +172,7 @@ def test_path_with_label(self): """ - self.check_spec_list(xml_str, ["suite1"], [["/foo"]], - labels=[["foo"]]) + self.check_spec_list(xml_str, ["suite1"], [["/foo"]], labels=[["foo"]]) if __name__ == "__main__": diff --git a/scripts/fortran_unit_testing/python/xml_test_list.py b/scripts/fortran_unit_testing/python/xml_test_list.py index ec0cf68796e..af95bb52ca9 100644 --- a/scripts/fortran_unit_testing/python/xml_test_list.py +++ b/scripts/fortran_unit_testing/python/xml_test_list.py @@ -10,6 +10,7 @@ __all__ = ("TestSuiteSpec", "suites_from_xml") + class TestSuiteSpec(object): """Specification for the location of a test suite. @@ -35,9 +36,10 @@ def __init__(self, name, labels, directories): directories - Path to the test suite. """ - assert (len(labels) == len(directories)), \ - "TestSuiteSpec: Number of spec labels and number of spec "+ \ - "directories do not match." + assert len(labels) == len(directories), ( + "TestSuiteSpec: Number of spec labels and number of spec " + + "directories do not match." + ) self.name = name self.labels = [] @@ -48,15 +50,15 @@ def __init__(self, name, labels, directories): else: self.labels.append(self.UNLABELED_STRING) - self.directories = [os.path.abspath(directory) - for directory in directories] + self.directories = [os.path.abspath(directory) for directory in directories] def __iter__(self): """Iterate over directories. Each iteration yields a (label, directory) pair. """ - return ( (l, d) for l, d in zip(self.labels, self.directories) ) + return ((l, d) for l, d in zip(self.labels, self.directories)) + def suites_from_xml(xml_tree, known_paths=None): """Generate test suite descriptions from XML. @@ -89,10 +91,10 @@ def suites_from_xml(xml_tree, known_paths=None): path = directory.text.strip() if "relative_to" in directory.keys(): relative_to_key = directory.get("relative_to") - assert relative_to_key in known_paths, \ - "suites_from_xml: Unrecognized relative_to attribute." - path = os.path.join(known_paths[relative_to_key], - path) + assert ( + relative_to_key in known_paths + ), "suites_from_xml: Unrecognized relative_to attribute." + path = os.path.join(known_paths[relative_to_key], path) directories.append(path) if "label" in directory.keys(): labels.append(directory.get("label")) diff --git a/scripts/fortran_unit_testing/run_tests.py b/scripts/fortran_unit_testing/run_tests.py index 45e945f3549..6a7df6f1adf 100755 --- a/scripts/fortran_unit_testing/run_tests.py +++ b/scripts/fortran_unit_testing/run_tests.py @@ -1,135 +1,142 @@ -#!/usr/bin/env python -from __future__ import print_function +#!/usr/bin/env python3 import os, sys + _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) +sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) sys.path.append(os.path.join(_CIMEROOT, "scripts", "utils", "python")) sys.path.append(os.path.join(_CIMEROOT, "scripts", "fortran_unit_testing", "python")) from standard_script_setup import * from CIME.BuildTools.configure import configure, FakeCase -from CIME.utils import run_cmd_no_fail, stringify_bool, expect +from CIME.utils import run_cmd_no_fail, stringify_bool, expect, get_src_root, safe_copy from CIME.XML.machines import Machines -from CIME.XML.compilers import Compilers from CIME.XML.env_mach_specific import EnvMachSpecific +from CIME.build import CmakeTmpBuildDir from xml_test_list import TestSuiteSpec, suites_from_xml -import socket -#================================================= +import socket, shutil +from pathlib import Path + +# ================================================= # Standard library modules. -#================================================= +# ================================================= from printer import Printer from shutil import rmtree + # This violates CIME policy - move to CIME/XML directory from xml.etree.ElementTree import ElementTree logger = logging.getLogger(__name__) + def parse_command_line(args): """Command line argument parser for configure.""" - description="""Within build_directory (--build-dir), runs cmake on test + description = """Within build_directory (--build-dir), runs cmake on test specification directories (from --test-spec-dir or --xml-test-list), then builds and runs the tests defined via CMake.""" parser = argparse.ArgumentParser(description=description) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("--build-dir", default=".", - help="""Directory where tests are built and run. Will be created if it does not exist.""" - ) + parser.add_argument( + "--build-dir", + default=".", + help="""Directory where tests are built and run. Will be created if it does not exist.""", + ) - parser.add_argument("--build-optimized", action="store_true", - help="""By default, tests are built with debug flags. + parser.add_argument( + "--build-optimized", + action="store_true", + help="""By default, tests are built with debug flags. If this option is provided, then tests are instead built - in optimized mode.""") - - parser.add_argument("--machine", - help="The machine to create build information for.") - - parser.add_argument("--machines-dir", - help="The machines directory to take build information " - "from. Overrides the CIME_MODEL environment variable, " - "and must be specified if that variable is not set.") - - parser.add_argument("--clean", action="store_true", - help="""Clean build directory before building. Removes CMake cache and -runs "make clean".""" - ) - parser.add_argument("--cmake-args", - help="""Additional arguments to pass to CMake.""" - ) - parser.add_argument("--comp-interface", - default="mct", - help="""The cime driver/cpl interface to use.""" - ) - parser.add_argument("--color", action="store_true", - default=sys.stdout.isatty(), - help="""Turn on colorized output.""" - ) - parser.add_argument("--no-color", action="store_false", - help="""Turn off colorized output.""" + in optimized mode.""", + ) + + parser.add_argument( + "--machine", help="The machine to create build information for." + ) + + parser.add_argument( + "--machines-dir", + help="The machines directory to take build information " + "from. Overrides the CIME_MODEL environment variable, " + "and must be specified if that variable is not set.", + ) + + parser.add_argument( + "--clean", + action="store_true", + help="""Clean build directory before building. Removes CMake cache and +runs "make clean".""", + ) + parser.add_argument( + "--cmake-args", help="""Additional arguments to pass to CMake.""" + ) + parser.add_argument( + "--comp-interface", + default="mct", + help="""The cime driver/cpl interface to use.""", + ) + parser.add_argument( + "--color", + action="store_true", + default=sys.stdout.isatty(), + help="""Turn on colorized output.""", + ) + parser.add_argument( + "--no-color", action="store_false", help="""Turn off colorized output.""" ) - parser.add_argument("--compiler", - help="""Compiler vendor for build (supported depends on machine). - If not specified, use the default for this machine.""" - ) - parser.add_argument("--enable-genf90", action="store_true", - default=True, - help="""Use genf90.pl to regenerate out-of-date sources from .F90.in + parser.add_argument( + "--compiler", + help="""Compiler vendor for build (supported depends on machine). + If not specified, use the default for this machine.""", + ) + parser.add_argument( + "--enable-genf90", + action="store_true", + default=True, + help="""Use genf90.pl to regenerate out-of-date sources from .F90.in templates. Not enabled by default because it creates in-source output, and because it -requires genf90.pl to be in the user's path.""" - ) - - parser.add_argument("--make-j", type=int, default=8, - help="""Number of processes to use for build.""" - ) - - parser.add_argument("--use-mpi", action="store_true", - help="""If specified, run unit tests with an mpi-enabled version - of pFUnit, via mpirun. (Default is to use a serial build without - mpirun.) This requires a pFUnit build with MPI support.""") - - parser.add_argument("--mpilib", - help="""MPI Library to use in build. - If not specified, use the default for this machine/compiler. - Must match an MPILIB option in config_compilers.xml. - e.g., for cheyenne, can use 'mpt'. - Only relevant if --use-mpi is specified.""" +requires genf90.pl to be in the user's path.""", ) - parser.add_argument("--mpirun-command", - help="""Command to use to run an MPI executable. - If not specified, uses the default for this machine. - Only relevant if --use-mpi is specified.""" + parser.add_argument( + "--make-j", + type=int, + default=8, + help="""Number of processes to use for build.""", ) + parser.add_argument( - "--test-spec-dir", default=".", + "--test-spec-dir", + default=".", help="""Location where tests are specified. - Defaults to current directory.""" - ) + Defaults to current directory.""", + ) parser.add_argument( - "-T", "--ctest-args", - help="""Additional arguments to pass to CTest.""" + "-T", "--ctest-args", help="""Additional arguments to pass to CTest.""" ) parser.add_argument( - "--use-env-compiler", action="store_true", + "--use-env-compiler", + action="store_true", default=False, help="""Always use environment settings to set compiler commands. This is only necessary if using a CIME build type, if the user wants to -override the command provided by Machines.""" +override the command provided by Machines.""", ) parser.add_argument( - "--use-openmp", action="store_true", + "--use-openmp", + action="store_true", help="""If specified, include OpenMP support for tests. (Default is to run without OpenMP support.) This requires a pFUnit build with - OpenMP support.""" + OpenMP support.""", ) parser.add_argument( "--xml-test-list", - help="""Path to an XML file listing directories to run tests from.""" - ) + help="""Path to an XML file listing directories to run tests from.""", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) output = Printer(color=args.color) @@ -137,20 +144,45 @@ def parse_command_line(args): if args.xml_test_list is None and args.test_spec_dir is None: output.print_error( "You must specify either --test-spec-dir or --xml-test-list." - ) + ) raise Exception("Missing required argument.") if args.make_j < 1: raise Exception("--make-j must be >= 1") - return output, args.build_dir, args.build_optimized, args.clean,\ - args.cmake_args, args.compiler, args.enable_genf90, args.machine, args.machines_dir,\ - args.make_j, args.use_mpi, args.mpilib, args.mpirun_command, args.test_spec_dir, args.ctest_args,\ - args.use_openmp, args.xml_test_list, args.verbose, args.comp_interface + return ( + output, + args.build_dir, + args.build_optimized, + args.clean, + args.cmake_args, + args.compiler, + args.enable_genf90, + args.machine, + args.machines_dir, + args.make_j, + args.test_spec_dir, + args.ctest_args, + args.use_openmp, + args.xml_test_list, + args.verbose, + args.comp_interface, + ) -def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, - cmake_args=None, clean=False, verbose=False, enable_genf90=True, color=True): +def cmake_stage( + name, + test_spec_dir, + build_optimized, + use_mpiserial, + output, + pfunit_path, + cmake_args=None, + clean=False, + verbose=False, + enable_genf90=True, + color=True, +): """Run cmake in the current working directory. Arguments: @@ -168,7 +200,7 @@ def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_comm if not os.path.isfile("CMakeCache.txt"): - output.print_header("Running cmake for "+name+".") + output.print_header("Running cmake for " + name + ".") # This build_type only has limited uses, and should probably be removed, # but for now it's still needed @@ -181,12 +213,13 @@ def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_comm "cmake", "-C Macros.cmake", test_spec_dir, - "-DCIMEROOT="+_CIMEROOT, - "-DCIME_CMAKE_MODULE_DIRECTORY="+os.path.abspath(os.path.join(_CIMEROOT,"src","CMake")), - "-DCMAKE_BUILD_TYPE="+build_type, - "-DPFUNIT_MPIRUN='"+mpirun_command+"'", - "-DPFUNIT_PATH="+pfunit_path - ] + "-DCIMEROOT=" + _CIMEROOT, + "-DSRC_ROOT=" + get_src_root(), + "-DCIME_CMAKE_MODULE_DIRECTORY=" + + os.path.abspath(os.path.join(_CIMEROOT, "CIME", "non_py", "src", "CMake")), + "-DCMAKE_BUILD_TYPE=" + build_type, + "-DCMAKE_PREFIX_PATH=" + pfunit_path, + ] if use_mpiserial: cmake_command.append("-DUSE_MPI_SERIAL=ON") if verbose: @@ -194,10 +227,8 @@ def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_comm if enable_genf90: cmake_command.append("-DENABLE_GENF90=ON") - genf90_dir = os.path.join( - _CIMEROOT,"src","externals","genf90" - ) - cmake_command.append("-DCMAKE_PROGRAM_PATH="+genf90_dir) + genf90_dir = os.path.join(_CIMEROOT, "CIME", "non_py", "externals", "genf90") + cmake_command.append("-DCMAKE_PROGRAM_PATH=" + genf90_dir) if not color: cmake_command.append("-DUSE_COLOR=OFF") @@ -207,6 +238,7 @@ def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_comm run_cmd_no_fail(" ".join(cmake_command), combine_output=True) + def make_stage(name, output, make_j, clean=False, verbose=True): """Run make in the current working directory. @@ -214,54 +246,71 @@ def make_stage(name, output, make_j, clean=False, verbose=True): name - Name for output messages. make_j (int) - number of processes to use for make """ - output.print_header("Running make for "+name+".") + output.print_header("Running make for " + name + ".") if clean: run_cmd_no_fail("make clean") - make_command = ["make","-j",str(make_j)] + make_command = ["make", "-j", str(make_j)] if verbose: make_command.append("VERBOSE=1") run_cmd_no_fail(" ".join(make_command), combine_output=True) -def find_pfunit(compilerobj, mpilib, use_openmp): + +def find_pfunit(caseroot, cmake_args): """Find the pfunit installation we'll be using, and print its path Aborts if necessary information cannot be found. Args: - - compilerobj: Object of type Compilers - - mpilib: String giving the mpi library we're using - - use_openmp: Boolean + - caseroot: The dir with the macros + - cmake_args: The cmake args used to invoke cmake (so that we get the correct makefile vars) """ - attrs = {"MPILIB": mpilib, - "compile_threaded": "TRUE" if use_openmp else "FALSE" - } - - pfunit_path = compilerobj.get_optional_compiler_node("PFUNIT_PATH", attributes=attrs) - expect(pfunit_path is not None, - """PFUNIT_PATH not found for this machine and compiler, with MPILIB={} and compile_threaded={}. -You must specify PFUNIT_PATH in config_compilers.xml, with attributes MPILIB and compile_threaded.""".format(mpilib, attrs['compile_threaded'])) - logger.info("Using PFUNIT_PATH: {}".format(compilerobj.text(pfunit_path))) - return compilerobj.text(pfunit_path) - -#================================================= + with CmakeTmpBuildDir(macroloc=caseroot) as cmaketmp: + all_vars = cmaketmp.get_makefile_vars(cmake_args=cmake_args) + + all_vars_list = all_vars.splitlines() + for all_var in all_vars_list: + if ":=" in all_var: + expect(all_var.count(":=") == 1, "Bad makefile line: {}".format(all_var)) + varname, value = [item.strip() for item in all_var.split(":=")] + if varname == "PFUNIT_PATH": + logger.info("Using PFUNIT_PATH: {}".format(value)) + return value + + expect(False, "PFUNIT_PATH not found for this machine and compiler") + return None + +# ================================================= # Iterate over input suite specs, building the tests. -#================================================= +# ================================================= def _main(): - output, build_dir, build_optimized, clean,\ - cmake_args, compiler, enable_genf90, machine, machines_dir,\ - make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\ - use_openmp, xml_test_list, verbose, comp_interface \ - = parse_command_line(sys.argv) - -#================================================= -# Find directory and file paths. -#================================================= + ( + output, + build_dir, + build_optimized, + clean, + cmake_args, + compiler, + enable_genf90, + machine, + machines_dir, + make_j, + test_spec_dir, + ctest_args, + use_openmp, + xml_test_list, + verbose, + comp_interface, + ) = parse_command_line(sys.argv) + + # ================================================= + # Find directory and file paths. + # ================================================= suite_specs = [] # TODO: this violates cime policy of direct access to xml # should be moved to CIME/XML @@ -270,15 +319,16 @@ def _main(): test_xml_tree.parse(xml_test_list) known_paths = { "here": os.path.abspath(os.path.dirname(xml_test_list)), - } + } suite_specs.extend(suites_from_xml(test_xml_tree, known_paths)) if test_spec_dir is not None: suite_specs.append( - TestSuiteSpec("__command_line_test__", - ["__command_line_test__"], - [os.path.abspath(test_spec_dir)]) + TestSuiteSpec( + "__command_line_test__", + ["__command_line_test__"], + [os.path.abspath(test_spec_dir)], ) - + ) if machines_dir is not None: machines_file = os.path.join(machines_dir, "config_machines.xml") @@ -298,69 +348,84 @@ def _main(): pwd_contents = os.listdir(os.getcwd()) # Clear CMake cache. for file_ in pwd_contents: - if file_ in ("Macros.cmake", "env_mach_specific.xml") \ - or file_.startswith('Depends') or file_.startswith(".env_mach_specific"): + if ( + file_ in ("Macros.cmake", "env_mach_specific.xml") + or file_.startswith("Depends") + or file_.startswith(".env_mach_specific") + ): os.remove(file_) - #================================================= + # ================================================= # Functions to perform various stages of build. - #================================================= + # ================================================= - if not use_mpi: - mpilib = "mpi-serial" - elif mpilib is None: - mpilib = machobj.get_default_MPIlib() - logger.info("Using mpilib: {}".format(mpilib)) + # In the switch from pFUnit3 to pFUnit4, we have dropped support for MPI for now + # because it seems like the way this is done differs for pFUnit4 and we weren't + # leveraging the parallel capabilities of pFUnit anyway. So we force mpilib = + # "mpi-serial" and use_mpiserial = True for now until we need to generalize this. + mpilib = "mpi-serial" + use_mpiserial = True if compiler is None: compiler = machobj.get_default_compiler() logger.info("Compiler is {}".format(compiler)) - compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib) - - pfunit_path = find_pfunit(compilerobj, mpilib=mpilib, use_openmp=use_openmp) - debug = not build_optimized os_ = machobj.get_value("OS") # Create the environment, and the Macros.cmake file # # - configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, - comp_interface, os_, unit_testing=True) + configure( + machobj, + build_dir, + ["CMake"], + compiler, + mpilib, + debug, + comp_interface, + os_, + unit_testing=True, + ) machspecific = EnvMachSpecific(build_dir, unit_testing=True) - fake_case = FakeCase(compiler, mpilib, debug, comp_interface) + fake_case = FakeCase(compiler, mpilib, debug, comp_interface, threading=use_openmp) machspecific.load_env(fake_case) - cmake_args = "{}-DOS={} -DCOMPILER={} -DDEBUG={} -DMPILIB={} -Dcompile_threaded={}".format( - "" if not cmake_args else " ", os_, compiler, stringify_bool(debug), mpilib, stringify_bool(use_openmp)) + cmake_args = ( + "{}-DOS={} -DMACH={} -DCOMPILER={} -DDEBUG={} -DMPILIB={} -Dcompile_threaded={} -DCASEROOT={}".format( + "" if not cmake_args else "{} ".format(cmake_args), + os_, + machobj.get_machine_name(), + compiler, + stringify_bool(debug), + mpilib, + stringify_bool(use_openmp), + build_dir + ) + ) + + pfunit_path = find_pfunit(build_dir, cmake_args) os.environ["UNIT_TEST_HOST"] = socket.gethostname() if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDF_PATH - logger.info("Setting NETCDF environment variable: {}".format(os.environ["NETCDF_PATH"])) + logger.info( + "Setting NETCDF environment variable: {}".format(os.environ["NETCDF_PATH"]) + ) os.environ["NETCDF"] = os.environ["NETCDF_PATH"] - if not use_mpi: - mpirun_command = "" - elif mpirun_command is None: - mpi_attribs = { - "compiler" : compiler, - "mpilib" : mpilib, - "threaded" : use_openmp, - "comp_interface" : comp_interface, - "unit_testing" : True - } - - # We can get away with specifying case=None since we're using exe_only=True - mpirun_command, _, _, _ = machspecific.get_mpirun(None, mpi_attribs, None, exe_only=True) - mpirun_command = machspecific.get_resolved_value(mpirun_command) - logger.info("mpirun command is '{}'".format(mpirun_command)) + if "NETCDFROOT" in os.environ and not "NETCDF" in os.environ: + # The CMake Netcdf find utility that we use (from pio2) seems to key off + # of the environment variable NETCDF, but not NETCDFROOT + logger.info( + "Setting NETCDF environment variable: {}".format(os.environ["NETCDFROOT"]) + ) + os.environ["NETCDF"] = os.environ["NETCDFROOT"] -#================================================= -# Run tests. -#================================================= + # ================================================= + # Run tests. + # ================================================= for spec in suite_specs: os.chdir(build_dir) @@ -372,29 +437,36 @@ def _main(): os.mkdir(spec.name) for label, directory in spec: - os.chdir(os.path.join(build_dir,spec.name)) + os.chdir(os.path.join(build_dir, spec.name)) if not os.path.isdir(label): os.mkdir(label) os.chdir(label) - name = spec.name+"/"+label + name = spec.name + "/" + label if not os.path.islink("Macros.cmake"): - os.symlink(os.path.join(build_dir,"Macros.cmake"), "Macros.cmake") - use_mpiserial = not use_mpi - cmake_stage(name, directory, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, verbose=verbose, - enable_genf90=enable_genf90, cmake_args=cmake_args) + os.symlink(os.path.join(build_dir, "Macros.cmake"), "Macros.cmake") + cmake_stage( + name, + directory, + build_optimized, + use_mpiserial, + output, + pfunit_path, + verbose=verbose, + enable_genf90=enable_genf90, + cmake_args=cmake_args, + ) make_stage(name, output, make_j, clean=clean, verbose=verbose) - for spec in suite_specs: - os.chdir(os.path.join(build_dir,spec.name)) + os.chdir(os.path.join(build_dir, spec.name)) for label, directory in spec: - name = spec.name+"/"+label + name = spec.name + "/" + label - output.print_header("Running CTest tests for "+name+".") + output.print_header("Running CTest tests for " + name + ".") ctest_command = ["ctest", "--output-on-failure"] @@ -405,8 +477,11 @@ def _main(): ctest_command.extend(ctest_args.split(" ")) logger.info("Running '{}'".format(" ".join(ctest_command))) - output = run_cmd_no_fail(" ".join(ctest_command), from_dir=label, combine_output=True) + output = run_cmd_no_fail( + " ".join(ctest_command), from_dir=label, combine_output=True + ) logger.info(output) + if __name__ == "__main__": _main() diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py deleted file mode 100644 index 6a42b4ef382..00000000000 --- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py +++ /dev/null @@ -1,139 +0,0 @@ -"""Classes used to write build system files. - -The classes here are used to write out settings for use by Makefile and CMake -build systems. The two relevant classes are CMakeMacroWriter and -MakeMacroWriter, which encapsulate the information necessary to write CMake and -Makefile formatted text, respectively. See the docstrings for those classes for -more. -""" - -# This is not the most useful check. -# pylint: disable=invalid-name - -from CIME.BuildTools.macrowriterbase import MacroWriterBase -from CIME.XML.standard_module_setup import * -logger = logging.getLogger(__name__) - - -class CMakeMacroWriter(MacroWriterBase): - - """Macro writer for the CMake format. - - For details on the provided methods, see MacroWriterBase, which this - class inherits from. - """ - - def __init__(self, output): - """Initialize a CMake macro writer. - - Arguments: - output - File-like object (probably an io.TextIOWrapper), which - will be written to. - """ - super(CMakeMacroWriter, self).__init__(output) - # This counter is for avoiding name conflicts in temporary - # variables used for shell commands. - self._var_num = 0 - - def environment_variable_string(self, name): - """Return an environment variable reference. - - >>> import io - >>> s = io.StringIO() - >>> CMakeMacroWriter(s).environment_variable_string("foo") - '$ENV{foo}' - """ - return "$ENV{" + name + "}" - - def shell_command_strings(self, command): - # pylint: disable=line-too-long - """Return strings used to get the output of a shell command. - - >>> import io - >>> s = io.StringIO() - >>> set_up, inline, tear_down = CMakeMacroWriter(s).shell_command_strings("echo bar") - >>> set_up - 'execute_process(COMMAND echo bar OUTPUT_VARIABLE CIME_TEMP_SHELL0 OUTPUT_STRIP_TRAILING_WHITESPACE)' - >>> inline - '${CIME_TEMP_SHELL0}' - >>> tear_down - 'unset(CIME_TEMP_SHELL0)' - """ - # pylint: enable=line-too-long - # Create a unique variable name, then increment variable number - # counter so that we get a different value next time. - var_name = "CIME_TEMP_SHELL" + str(self._var_num) - self._var_num += 1 - set_up = "execute_process(COMMAND " + command + \ - " OUTPUT_VARIABLE " + var_name + \ - " OUTPUT_STRIP_TRAILING_WHITESPACE)" - tear_down = "unset(" + var_name + ")" - return (set_up, "${" + var_name + "}", tear_down) - - def variable_string(self, name): - """Return a string to refer to a variable with the given name. - - >>> import io - >>> s = io.StringIO() - >>> CMakeMacroWriter(s).variable_string("foo") - '${foo}' - """ - return "${" + name + "}" - - def set_variable(self, name, value): - """Write out a statement setting a variable to some value. - - >>> import io - >>> s = io.StringIO() - >>> CMakeMacroWriter(s).set_variable("foo", "bar") - >>> str(s.getvalue()) - 'set(foo "bar")\\n' - """ - value_transformed = self._transform_value(value) - self.write_line("set(" + name + ' "' + value_transformed + '")') - - def start_ifeq(self, left, right): - """Write out a statement to start a conditional block. - - >>> import io - >>> s = io.StringIO() - >>> CMakeMacroWriter(s).start_ifeq("foo", "bar") - >>> str(s.getvalue()) - 'if("foo" STREQUAL "bar")\\n' - """ - self.write_line('if("' + left + '" STREQUAL "' + right + '")') - self.indent_right() - - def end_ifeq(self): - """Write out a statement to end a block started with start_ifeq. - - >>> import io - >>> s = io.StringIO() - >>> writer = CMakeMacroWriter(s) - >>> writer.start_ifeq("foo", "bar") - >>> writer.set_variable("foo2", "bar2") - >>> writer.end_ifeq() - >>> str(s.getvalue()) - 'if("foo" STREQUAL "bar")\\n set(foo2 "bar2")\\nendif()\\n' - """ - self.indent_left() - self.write_line("endif()") - - def _transform_value(self, value): - """Some elements need their values transformed in some way for CMake to handle them properly. - This method does those transformations. - - Args: - - value (str): value of element - - Returns transformed value - """ - - # Not all variables need leading & trailing whitespace removed, but some - # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC, - # SCXX, SFC) are only handled correctly if leading & trailing whitespace - # is removed. It doesn't seem to hurt to remove whitespace from all - # variables. - value_transformed = value.strip() - - return value_transformed diff --git a/scripts/lib/CIME/BuildTools/configure.py b/scripts/lib/CIME/BuildTools/configure.py deleted file mode 100644 index b5980590393..00000000000 --- a/scripts/lib/CIME/BuildTools/configure.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python - -"""This script writes CIME build information to a directory. - -The pieces of information that will be written include: - -1. Machine-specific build settings (i.e. the "Macros" file). -2. File-specific build settings (i.e. "Depends" files). -3. Environment variable loads (i.e. the env_mach_specific files). - -The .env_mach_specific.sh and .env_mach_specific.csh files are specific to a -given compiler, MPI library, and DEBUG setting. By default, these will be the -machine's default compiler, the machine's default MPI library, and FALSE, -respectively. These can be changed by setting the environment variables -COMPILER, MPILIB, and DEBUG, respectively. -""" - -from CIME.XML.standard_module_setup import * -from CIME.utils import expect, safe_copy -from CIME.XML.compilers import Compilers -from CIME.XML.env_mach_specific import EnvMachSpecific - -logger = logging.getLogger(__name__) - -def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, - comp_interface, sysos, unit_testing=False): - """Add Macros, Depends, and env_mach_specific files to a directory. - - Arguments: - machobj - Machines argument for this machine. - output_dir - Directory in which to place output. - macros_format - Container containing the string 'Makefile' to produce - Makefile Macros output, and/or 'CMake' for CMake output. - compiler - String containing the compiler vendor to configure for. - mpilib - String containing the MPI implementation to configure for. - debug - Boolean specifying whether debugging options are enabled. - unit_testing - Boolean specifying whether we're running unit tests (as - opposed to a system run) - """ - # Macros generation. - suffixes = {'Makefile': 'make', 'CMake': 'cmake'} - macro_maker = Compilers(machobj, compiler=compiler, mpilib=mpilib) - for form in macros_format: - out_file_name = os.path.join(output_dir,"Macros."+suffixes[form]) - macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form]) - - _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler) - _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, - debug, comp_interface, sysos, unit_testing) - -def _copy_depends_files(machine_name, machines_dir, output_dir, compiler): - """ - Copy any system or compiler Depends files if they do not exist in the output directory - If there is a match for Depends.machine_name.compiler copy that and ignore the others - """ - for suffix in [machine_name, compiler, "{}.{}".format(machine_name, compiler)]: - for extra_suffix in ["", ".cmake"]: - basename = "Depends.{}{}".format(suffix, extra_suffix) - dfile = os.path.join(machines_dir, basename) - outputdfile = os.path.join(output_dir, basename) - if os.path.isfile(dfile) and not os.path.exists(outputdfile): - safe_copy(dfile, outputdfile) - -class FakeCase(object): - - def __init__(self, compiler, mpilib, debug, comp_interface): - # PIO_VERSION is needed to parse config_machines.xml but isn't otherwise used - # by FakeCase - self._vals = {"COMPILER":compiler, "MPILIB":mpilib, "DEBUG":debug, - "COMP_INTERFACE":comp_interface, "PIO_VERSION":2} - - def get_value(self, attrib): - expect(attrib in self._vals, "FakeCase does not support getting value of '%s'" % attrib) - return self._vals[attrib] - -def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, - comp_interface, sysos, unit_testing): - """ - env_mach_specific generation. - """ - ems_path = os.path.join(output_dir, "env_mach_specific.xml") - if os.path.exists(ems_path): - logger.warning("{} already exists, delete to replace".format(ems_path)) - return - ems_file = EnvMachSpecific(output_dir, unit_testing=unit_testing) - ems_file.populate(machobj) - ems_file.write() - fake_case = FakeCase(compiler, mpilib, debug, comp_interface) - ems_file.load_env(fake_case) - for shell in ('sh', 'csh'): - ems_file.make_env_mach_specific_file(shell, fake_case) - shell_path = os.path.join(output_dir, ".env_mach_specific." + shell) - with open(shell_path, 'a') as shell_file: - if shell == 'sh': - shell_file.write("\nexport COMPILER={}\n".format(compiler)) - shell_file.write("export MPILIB={}\n".format(mpilib)) - shell_file.write("export DEBUG={}\n".format(repr(debug).upper())) - shell_file.write("export OS={}\n".format(sysos)) - else: - shell_file.write("\nsetenv COMPILER {}\n".format(compiler)) - shell_file.write("setenv MPILIB {}\n".format(mpilib)) - shell_file.write("setenv DEBUG {}\n".format(repr(debug).upper())) - shell_file.write("setenv OS {}\n".format(sysos)) diff --git a/scripts/lib/CIME/BuildTools/macroconditiontree.py b/scripts/lib/CIME/BuildTools/macroconditiontree.py deleted file mode 100644 index 4a7026b88bf..00000000000 --- a/scripts/lib/CIME/BuildTools/macroconditiontree.py +++ /dev/null @@ -1,170 +0,0 @@ -from CIME.XML.standard_module_setup import * -logger = logging.getLogger(__name__) - -class MacroConditionTree(object): # pylint: disable=too-many-instance-attributes - - """Tree containing the various possible settings of a specific macro. - - Unlike the PossibleValues class, this class assumes that we have - finished determining which settings could apply on a given machine. It - also sorts the settings based on the conditions under which they take - effect, in preparation for writing out the Macros file itself. - - Public methods: - merge - write_out - """ - - def __init__(self, name, settings): - """Create a MacroConditionTree recursively. - - Arguments: - name - Name of the variable. - settings - A list of all settings for this variable. - """ - # Search for any conditions controlling the number of settings. - condition = None - # Prefer the COMPILER attribute as the top level attribute, for - # readability of the merged file. - if any("COMPILER" in setting.conditions for setting in settings): - condition = "COMPILER" - else: - # To make merging more effective, sort the conditions. - all_conditions = [] - for setting in settings: - all_conditions += setting.conditions.keys() - if all_conditions: - condition = sorted(all_conditions)[0] - if condition is None: - # If there are no conditions, we have reached a leaf. - # We combine whatever settings are left; there should be at - # most one non-appending setting, or an arbitrary number of - # appending settings. - self._is_leaf = True - self._assignments = [] - self._set_up = [] - self._tear_down = [] - self._do_append = True - for setting in settings: - if not setting.do_append: - self._do_append = False - assert len(settings) == 1, \ - "Internal error in macros: An ambiguity was " \ - "found after the ambiguity check was complete, " \ - "or there is a mixture of appending and initial " \ - "settings in the condition tree." - self._assignments.append((name, setting.value)) - self._set_up += setting.set_up - self._tear_down += setting.tear_down - else: - # If a condition was found, partition the settings depending on - # how they use it, and recursively create a tree for each - # partition. - self._is_leaf = False - self._condition = condition - partition = dict() - for setting in settings: - # If some of the settings don't use a condition, we use - # None to represent that. - cond_val = setting.conditions.pop(condition, None) - if cond_val in partition: - partition[cond_val].append(setting) - else: - partition[cond_val] = [setting] - branches = dict() - for cond_val in partition: - branches[cond_val] = \ - MacroConditionTree(name, partition[cond_val]) - self._branches = branches - - # pylint shouldn't concern itself with the way that we access other, since - # it's actually a member of the same class. - # pylint:disable=protected-access - def merge(self, other): - """Merge another tree with this one. - - This should be considered destructive to both trees. The only valid - value is the one that's returned. - """ - if self._is_leaf: - if other._is_leaf: - assert self._do_append == other._do_append, \ - "Internal error in macros: Tried to merge an " \ - "appending tree with a tree containing initial "\ - "settings." - # If both are leaves, just merge the values. - self._assignments += other._assignments - self._set_up += other._set_up - self._tear_down += other._tear_down - return self - else: - # If other is not a leaf, swap the arguments so that self - # is the one that's not a leaf, handled below. - return other.merge(self) - else: - # If self is not a leaf but other is, it should go in - # self._branches[None]. The same goes for the case where the - # conditions don't match, and self._condition is last - # alphabetically. - if other._is_leaf or self._condition > other._condition: - if None in self._branches: - self._branches[None] = self._branches[None].merge(other) - else: - self._branches[None] = other - return self - else: - # If the other condition comes last alphabetically, swap - # the order. - if self._condition < other._condition: - return other.merge(self) - # If neither is a leaf and their conditions match, merge - # their sets of branches. - for (cond_val, other_branch) in other._branches.items(): - if cond_val in self._branches: - self._branches[cond_val] = \ - self._branches[cond_val].merge(other_branch) - else: - self._branches[cond_val] = other_branch - return self - # pylint:enable=protected-access - - def write_out(self, writer): - """Write tree to file. - - The writer argument is an object inheriting from MacroWriterBase. - This function first writes out all the initial settings with - appropriate conditionals, then the appending settings. - """ - if self._is_leaf: - for line in self._set_up: - writer.write_line(line) - for (name, value) in self._assignments: - if self._do_append: - writer.append_variable(name, value) - else: - writer.set_variable(name, value) - for line in self._tear_down: - writer.write_line(line) - else: - condition = self._condition - # Take care of the settings that don't use this condition. - if None in self._branches: - self._branches[None].write_out(writer) - # Now all the if statements. - for cond_val in self._branches: - if cond_val is None: - continue - env_ref = writer.variable_string(condition) - writer.start_ifeq(env_ref, cond_val) - self._branches[cond_val].write_out(writer) - writer.end_ifeq() - -def merge_optional_trees(tree, big_tree): - """Merge two MacroConditionTrees when one or both objects may be `None`.""" - if tree is not None: - if big_tree is None: - return tree - else: - return big_tree.merge(tree) - else: - return big_tree diff --git a/scripts/lib/CIME/BuildTools/macrowriterbase.py b/scripts/lib/CIME/BuildTools/macrowriterbase.py deleted file mode 100644 index 7f91ba6b085..00000000000 --- a/scripts/lib/CIME/BuildTools/macrowriterbase.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Classes used to write build system files. - -The classes here are used to write out settings for use by Makefile and CMake -build systems. The two relevant classes are CMakeMacroWriter and -MakeMacroWriter, which encapsulate the information necessary to write CMake and -Makefile formatted text, respectively. See the docstrings for those classes for -more. -""" - -# This is not the most useful check. -# pylint: disable=invalid-name - -from abc import ABCMeta, abstractmethod -from CIME.XML.standard_module_setup import * -from CIME.utils import get_cime_root -from six import add_metaclass - -logger = logging.getLogger(__name__) - -def _get_components(value): - """ - >>> value = '-something ${shell ${NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack' - >>> _get_components(value) - [(False, '-something'), (True, '${NETCDF_PATH}/bin/nf-config --flibs'), (False, '-lblas -llapack')] - >>> value = '${shell ${NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack' - >>> _get_components(value) - [(True, '${NETCDF_PATH}/bin/nf-config --flibs'), (False, '-lblas -llapack')] - >>> value = '${shell ${NETCDF_PATH}/bin/nf-config --flibs}' - >>> _get_components(value) - [(True, '${NETCDF_PATH}/bin/nf-config --flibs')] - """ - value = value.strip() - components = [] - curr_comp = "" - idx = 0 - while idx < len(value): - if value[idx:idx+8] == "${shell ": - if curr_comp: - components.append((False, curr_comp.strip())) - curr_comp = "" - - idx += 8 - brace_cnt = 0 - done = False - while not done: - if value[idx] == "{": - brace_cnt += 1 - curr_comp += value[idx] - - elif value[idx] == "}": - if brace_cnt == 0: - done = True - else: - brace_cnt -= 1 - curr_comp += value[idx] - - else: - curr_comp += value[idx] - - idx += 1 - - components.append((True, curr_comp.strip())) - curr_comp = "" - else: - curr_comp += value[idx] - idx += 1 - - if curr_comp: - components.append((False, curr_comp.strip())) - - return components - -@add_metaclass(ABCMeta) -class MacroWriterBase(object): - - """Abstract base class for macro file writers. - - The methods here come in three flavors: - 1. indent_left/indent_right change the level of indent used internally by - the class. - 2. The various methods ending in "_string" return strings relevant to the - build system. - 3. The other methods write information to the file handle associated with - an individual writer instance. - - Public attributes: - indent_increment - Number of spaces to indent if blocks (does not apply - to format-specific indentation, e.g. cases where - Makefiles must use tabs). - output - File-like object that output is written to. - - Public methods: - indent_string - indent_left - indent_right - write_line - environment_variable_string - shell_command_string - variable_string - set_variable - append_variable - start_ifeq - end_ifeq - """ - - indent_increment = 2 - - def __init__(self, output): - """Initialize a macro writer. - - Arguments: - output - File-like object (probably an io.TextIOWrapper), which - will be written to. - """ - self.output = output - self._indent_num = 0 - - def indent_string(self): - """Return an appropriate number of spaces for the indent.""" - return ' ' * self._indent_num - - def indent_left(self): - """Decrease the amount of line indent.""" - self._indent_num -= 2 - - def indent_right(self): - """Increase the amount of line indent.""" - self._indent_num += 2 - - def write_line(self, line): - """Write a single line of output, appropriately indented. - - A trailing newline is added, whether or not the input has one. - """ - self.output.write(u"{}{}\n".format(self.indent_string(), line)) - - @abstractmethod - def environment_variable_string(self, name): - """Return an environment variable reference.""" - - @abstractmethod - def shell_command_strings(self, command): - """Return strings used to get the output of a shell command. - - Implementations should return a tuple of three strings: - 1. A line that is needed to get the output of the command (or None, - if a command can be run inline). - 2. A string that can be used within a line to refer to the output. - 3. A line that does any cleanup of temporary variables (or None, if - no cleanup is necessary). - - Example usage: - - # Get strings and write initial command. - (pre, var, post) = writer.shell_command_strings(command) - if pre is not None: - writer.write(pre) - - # Use the variable to write an if block. - writer.start_ifeq(var, "TRUE") - writer.set_variable("foo", "bar") - writer.end_ifeq() - - # Cleanup - if post is not None: - writer.write(post) - """ - - @abstractmethod - def variable_string(self, name): - """Return a string to refer to a variable with the given name.""" - - @abstractmethod - def set_variable(self, name, value): - """Write out a statement setting a variable to some value.""" - - def append_variable(self, name, value): - """Write out a statement appending a value to a string variable.""" - var_string = self.variable_string(name) - self.set_variable(name, var_string + " " + value) - - @abstractmethod - def start_ifeq(self, left, right): - """Write out a statement to start a conditional block. - - The arguments to this method are compared, and the block is entered - only if they are equal. - """ - - @abstractmethod - def end_ifeq(self): - """Write out a statement to end a block started with start_ifeq.""" diff --git a/scripts/lib/CIME/BuildTools/makemacroswriter.py b/scripts/lib/CIME/BuildTools/makemacroswriter.py deleted file mode 100644 index 559047f9f33..00000000000 --- a/scripts/lib/CIME/BuildTools/makemacroswriter.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Classes used to write build system files. - -The classes here are used to write out settings for use by Makefile and CMake -build systems. The two relevant classes are CMakeMacroWriter and -MakeMacroWriter, which encapsulate the information necessary to write CMake and -Makefile formatted text, respectively. See the docstrings for those classes for -more. -""" - -from CIME.BuildTools.macrowriterbase import MacroWriterBase -from CIME.XML.standard_module_setup import * -#logger = logging.getLogger(__name__) - -# This is not the most useful check. -# pylint: disable=invalid-name - -class MakeMacroWriter(MacroWriterBase): - - """Macro writer for the Makefile format. - - For details on the provided methods, see MacroWriterBase, which this - class inherits from. - """ - - def environment_variable_string(self, name): - """Return an environment variable reference. - - >>> import io - >>> s = io.StringIO() - >>> MakeMacroWriter(s).environment_variable_string("foo") - '$(foo)' - """ - return "$(" + name + ")" - - def shell_command_strings(self, command): - """Return strings used to get the output of a shell command. - - >>> import io - >>> s = io.StringIO() - >>> MakeMacroWriter(s).shell_command_strings("echo bar") - (None, '$(shell echo bar)', None) - """ - return (None, "$(shell " + command + ")", None) - - def variable_string(self, name): - """Return a string to refer to a variable with the given name. - - >>> import io - >>> s = io.StringIO() - >>> MakeMacroWriter(s).variable_string("foo") - '$(foo)' - """ - return "$(" + name + ")" - - def set_variable(self, name, value): - """Write out a statement setting a variable to some value. - - >>> import io - >>> s = io.StringIO() - >>> MakeMacroWriter(s).set_variable("foo", "bar") - >>> str(s.getvalue()) - 'foo := bar\\n' - """ - # Note that ":=" is used so that we can control the behavior for - # both Makefile and CMake variables similarly. - self.write_line(name + " := " + value) - - def start_ifeq(self, left, right): - """Write out a statement to start a conditional block. - - >>> import io - >>> s = io.StringIO() - >>> MakeMacroWriter(s).start_ifeq("foo", "bar") - >>> str(s.getvalue()) - 'ifeq (foo,bar)\\n' - """ - self.write_line("ifeq (" + left + "," + right + ")") - self.indent_right() - - def end_ifeq(self): - """Write out a statement to end a block started with start_ifeq. - - >>> import io - >>> s = io.StringIO() - >>> writer = MakeMacroWriter(s) - >>> writer.start_ifeq("foo", "bar") - >>> writer.set_variable("foo2", "bar2") - >>> writer.end_ifeq() - >>> str(s.getvalue()) - 'ifeq (foo,bar)\\n foo2 := bar2\\nendif\\n' - """ - self.indent_left() - self.write_line("endif") diff --git a/scripts/lib/CIME/BuildTools/possiblevalues.py b/scripts/lib/CIME/BuildTools/possiblevalues.py deleted file mode 100644 index 837c702824c..00000000000 --- a/scripts/lib/CIME/BuildTools/possiblevalues.py +++ /dev/null @@ -1,159 +0,0 @@ -from CIME.XML.standard_module_setup import * -from CIME.BuildTools.macroconditiontree import MacroConditionTree - -logger = logging.getLogger(__name__) - -class PossibleValues(object): - - """Holds a list of settings for a single "Macros" variable. - - This helper class takes in variable settings and, for each one, decides - whether to throw it out, add it to the list of values, or replace the - existing list of values with the new, more specific setting. - - This class also performs ambiguity checking; if it is possible at build - time for more than one setting to match the same variable, this is - considered an error. - - Public attributes: - name - The name of the variable. - settings - The current list of possible initial settings for the - variable. - append_settings - A dictionary of lists of possible appending settings - for the variable, with the specificity of each list - as the associated dictionary key. - - Public methods: - add_setting - ambiguity_check - dependencies - to_cond_trees - """ - - def __init__(self, name, setting, specificity, depends): - """Construct a PossibleValues object. - - The name argument refers to the name of the variable. The other - arguments are the same as for append_match. - """ - self.name = name - # If this is an appending setting, its specificity can't cause it - # to overwrite other settings. - if setting.do_append: - self.settings = [] - self.append_settings = [setting] - self._specificities = [] - self._depends = [] - self._append_depends = depends - else: - self.settings = [setting] - self.append_settings = [] - self._specificities = [specificity] - self._depends = [depends] - self._append_depends = set() - - def add_setting(self, setting, specificity, depends): - """Add a possible value for a variable. - - Arguments: - setting - A ValueSetting to start the list. - specificity - An integer representing how specific the setting is. - Low-specificity settings that will never be used will be - dropped from the list. The lowest allowed specificity is - 0. - depends - A set of variable names, specifying the variables that - have to be set before this setting can be used (e.g. if - SLIBS refers to NETCDF_PATH, then NETCDF_PATH has to be - set first). - - >>> from CIME.BuildTools.valuesetting import ValueSetting - >>> a = ValueSetting('foo', False, dict(), [], []) - >>> b = ValueSetting('bar', False, dict(), [], []) - >>> vals = PossibleValues('var', a, 0, {'dep1'}) - >>> vals.add_setting(b, 1, {'dep2'}) - >>> a not in vals.settings and b in vals.settings - True - >>> 'dep1' not in vals.dependencies() and 'dep2' in vals.dependencies() - True - >>> vals.add_setting(a, 1, {'dep1'}) - >>> a in vals.settings and b in vals.settings - True - >>> 'dep1' in vals.dependencies() and 'dep2' in vals.dependencies() - True - """ - if setting.do_append: - self.append_settings.append(setting) - self._append_depends |= depends - else: - mark_deletion = [] - for i in range(len(self.settings)): - other_setting = self.settings[i] - other_specificity = self._specificities[i] - # Ignore this case if it's less specific than one we have. - if other_specificity > specificity: - if other_setting.has_special_case(setting): - return - # Override cases that are less specific than this one. - elif other_specificity < specificity: - if setting.has_special_case(other_setting): - mark_deletion.append(i) - mark_deletion.reverse() - for i in mark_deletion: - del self.settings[i] - del self._specificities[i] - del self._depends[i] - self.settings.append(setting) - self._specificities.append(specificity) - self._depends.append(depends) - - def ambiguity_check(self): - """Check the current list of settings for ambiguity. - - This function raises an error if an ambiguity is found. - """ - for i in range(len(self.settings)-1): - for j in range(i+1, len(self.settings)): - if self._specificities[i] != self._specificities[j]: - continue - other = self.settings[j] - expect(not self.settings[i].is_ambiguous_with(other), - "Variable "+self.name+" is set ambiguously in " - "config_compilers.xml. Check the file for these " - "conflicting settings: \n1: {}\n2: {}".format( - self.settings[i].conditions, other.conditions)) - - def dependencies(self): - """Returns a set of names of variables needed to set this variable.""" - depends = self._append_depends.copy() - for other in self._depends: - depends |= other - return depends - - def to_cond_trees(self): - """Convert this object to a pair of MacroConditionTree objects. - - This represents the step where the list of possible values is - frozen and we're ready to convert it into an actual text file. This - object is checked for ambiguities before conversion. - - The return value is a tuple of two items. The first is a dict of - condition trees containing all initial settings, with the specificities - as the dictionary keys. The second is a single tree containing all - appending settings. If the appending tree would be empty, None is - returned instead. - """ - self.ambiguity_check() - # Get all values of specificity for which we need to make a tree. - specificities = sorted(list(set(self._specificities))) - # Build trees, starting from the least specific and working up. - normal_trees = {} - for specificity in specificities: - settings_for_tree = [self.settings[i] - for i in range(len(self.settings)) - if self._specificities[i] == specificity] - normal_trees[specificity] = MacroConditionTree(self.name, settings_for_tree) - if self.append_settings: - append_tree = MacroConditionTree(self.name, self.append_settings) - else: - append_tree = None - return (normal_trees, append_tree) diff --git a/scripts/lib/CIME/BuildTools/valuesetting.py b/scripts/lib/CIME/BuildTools/valuesetting.py deleted file mode 100644 index 706e8fb01f4..00000000000 --- a/scripts/lib/CIME/BuildTools/valuesetting.py +++ /dev/null @@ -1,144 +0,0 @@ -from CIME.XML.standard_module_setup import * - -logger = logging.getLogger(__name__) - -class ValueSetting(object): - - """Holds data about how a value can be assigned to a variable. - - Note that this class doesn't know or care *which* variable might be - assigned in this way, only that there is a procedure to perform that - operation - - Public attributes: - value - The actual value that will be set. - do_append - Boolean describing whether the value should be - appended to the existing value of the variable rather - than overwriting other settings. - conditions - Dictionary containing the set of values that different - variables have to have to use this setting (e.g. - DEBUG="TRUE" might be a condition on a debug flag). - set_up - List of any commands that have to be executed in the build - system before this setting can occur. - tear_down - List of any commands that should be executed to clean up - after setting the variable. - - Public methods: - is_ambiguous_with - has_special_case - """ - - def __init__(self, value, do_append, conditions, set_up, tear_down): # pylint: disable=too-many-arguments - """Create a ValueSetting object by specifying all its data.""" - self.value = value - self.do_append = do_append - self.conditions = conditions - self.set_up = set_up - self.tear_down = tear_down - - def is_ambiguous_with(self, other): - """Check to see if this setting conflicts with another one. - - The purpose of this routine is to see if two settings can coexist - in the same Macros file, or if doing so would raise an ambiguity - about which one should be preferred over the other. Note that this - is a symmetric relation (this function returns the same value if - self and other are swapped). - - The rules to determine this are as follows: - - 1) If one or both settings are appending to the value, there's no - ambiguity, because both can cooperate to set the value. - - >>> a = ValueSetting('foo', True, dict(), [], []) - >>> b = ValueSetting('bar', False, dict(), [], []) - >>> a.is_ambiguous_with(b) - False - >>> b.is_ambiguous_with(a) - False - - 2) If the two settings have conflicting conditions, then there - is no ambiguity because they can't both apply to the same - build. - - >>> a = ValueSetting('foo', False, {"DEBUG": "TRUE"}, [], []) - >>> b = ValueSetting('bar', False, {"DEBUG": "FALSE"}, [], []) - >>> a.is_ambiguous_with(b) - False - - 3) If one setting is strictly more specific than the other, then - there's no ambiguity, because we prefer the more specific - setting whenever both apply to a build. - - >>> a = ValueSetting('foo', False, {"DEBUG": "TRUE"}, [], []) - >>> b = ValueSetting('bar', False, {"DEBUG": "TRUE", "MPILIB": "mpich2"}, [], []) - >>> a.is_ambiguous_with(b) - False - >>> b.is_ambiguous_with(a) - False - - 4) All other cases are considered ambiguous. - - >>> a = ValueSetting('foo', False, dict(), [], []) - >>> b = ValueSetting('bar', False, dict(), [], []) - >>> a.is_ambiguous_with(b) - True - >>> a = ValueSetting('foo', False, {"DEBUG": "TRUE"}, [], []) - >>> b = ValueSetting('bar', False, {"MPILIB": "mpich2"}, [], []) - >>> a.is_ambiguous_with(b) - True - """ - # Append check. - if self.do_append or other.do_append: - return False - # Consistency check. - for var_name in self.conditions: - if var_name not in other.conditions: - continue - if self.conditions[var_name] != other.conditions[var_name]: - return False - # Specificity check. - self_set = set(self.conditions.keys()) - other_set = set(other.conditions.keys()) - if self_set < other_set or other_set < self_set: - return False - # Any situation we couldn't resolve is ambiguous. - return True - - def has_special_case(self, other): - """Check to see if another setting is a special case of this one. - - The purpose of this routine is to see if one of the settings requires - conditions that are a strict subset of another's conditions. This is - used to check whether a setting can be thrown out entirely in favor of a - more general, but machine-specific setting. - - >>> a = ValueSetting('foo', False, {"DEBUG": "TRUE"}, [], []) - >>> b = ValueSetting('bar', False, {"DEBUG": "TRUE", "MPILIB": "mpich2"}, [], []) - >>> c = ValueSetting('bar', False, {"DEBUG": "TRUE", "compile_threaded": "FALSE"}, [], []) - >>> d = ValueSetting('foo', False, {"DEBUG": "FALSE"}, [], []) - >>> a.has_special_case(b) - True - >>> b.has_special_case(a) - False - >>> a.has_special_case(c) - True - >>> c.has_special_case(a) - False - >>> b.has_special_case(c) - False - >>> c.has_special_case(b) - False - >>> a.has_special_case(a) - True - >>> d.has_special_case(a) - False - >>> d.has_special_case(b) - False - """ - for var_name in self.conditions: - if var_name not in other.conditions: - return False - if self.conditions[var_name] != other.conditions[var_name]: - return False - return True diff --git a/scripts/lib/CIME/Servers/__init__.py b/scripts/lib/CIME/Servers/__init__.py deleted file mode 100644 index 8d22604875c..00000000000 --- a/scripts/lib/CIME/Servers/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -#pylint: disable=import-error -from distutils.spawn import find_executable -has_gftp = find_executable("globus-url-copy") -has_svn = find_executable("svn") -has_wget = find_executable("wget") -has_ftp = True -try: - from ftplib import FTP -except ImportError: - has_ftp = False -if has_ftp: - from CIME.Servers.ftp import FTP -if has_svn: - from CIME.Servers.svn import SVN -if has_wget: - from CIME.Servers.wget import WGET -if has_gftp: - from CIME.Servers.gftp import GridFTP diff --git a/scripts/lib/CIME/Servers/ftp.py b/scripts/lib/CIME/Servers/ftp.py deleted file mode 100644 index dcf94c41f91..00000000000 --- a/scripts/lib/CIME/Servers/ftp.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -FTP Server class. Interact with a server using FTP protocol -""" -# pylint: disable=super-init-not-called -from CIME.XML.standard_module_setup import * -from CIME.Servers.generic_server import GenericServer -from ftplib import FTP as FTPpy -from ftplib import all_errors as all_ftp_errors - - -logger = logging.getLogger(__name__) -# I think that multiple inheritence would be useful here, but I couldnt make it work -# in a py2/3 compatible way. -class FTP(GenericServer): - def __init__(self, address, user='', passwd=''): - ftp_server, root_address = address.split('/', 1) - logger.info("server address {} root path {}".format(ftp_server, root_address)) - self.ftp = FTPpy(ftp_server) - if not user: - user = '' - if not passwd: - passwd = '' - - self._ftp_server = address - - stat = self.ftp.login(user, passwd) - logger.debug("login stat {}".format(stat)) - if "Login successful" not in stat: - logging.warning("FAIL: Could not login to ftp server {}\n error {}".format(ftp_server, stat)) - return None - stat = self.ftp.cwd(root_address) - logger.debug("cwd {} stat {}".format(root_address,stat)) - if "Directory successfully changed" not in stat: - logging.warning("FAIL: Could not cd to server root directory {}\n error {}".format(root_address, stat)) - return None - - def fileexists(self, rel_path): - try: - stat = self.ftp.nlst(rel_path) - except all_ftp_errors: - logger.warning("ERROR from ftp server, trying next server") - return False - - if rel_path not in stat: - if not stat or not stat[0].startswith(rel_path): - logging.warning("FAIL: File {} not found.\nerror {}".format(rel_path, stat)) - return False - return True - - def getfile(self, rel_path, full_path): - try: - stat = self.ftp.retrbinary('RETR {}'.format(rel_path), open(full_path, "wb").write) - except all_ftp_errors: - if os.path.isfile(full_path): - os.remove(full_path) - logger.warning("ERROR from ftp server, trying next server") - return False - - if (stat != '226 Transfer complete.'): - logging.warning("FAIL: Failed to retreve file '{}' from FTP repo '{}' stat={}\n". - format(rel_path, self._ftp_server, stat)) - return False - return True - - def getdirectory(self, rel_path, full_path): - try: - stat = self.ftp.nlst(rel_path) - except all_ftp_errors: - logger.warning("ERROR from ftp server, trying next server") - return False - - for _file in stat: - self.getfile(_file, full_path+os.sep+os.path.basename(_file)) diff --git a/scripts/lib/CIME/Servers/generic_server.py b/scripts/lib/CIME/Servers/generic_server.py deleted file mode 100644 index 6cebb46bba5..00000000000 --- a/scripts/lib/CIME/Servers/generic_server.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Generic Server class. There should be little or no functionality in this class, it serves only -to make sure that specific server classes maintain a consistant argument list and functionality -so that they are interchangable objects -""" -# pylint: disable=unused-argument - -from CIME.XML.standard_module_setup import * -from socket import _GLOBAL_DEFAULT_TIMEOUT -logger = logging.getLogger(__name__) - -class GenericServer(object): - def __init__(self, host=' ',user=' ', passwd=' ', acct=' ', timeout=_GLOBAL_DEFAULT_TIMEOUT): - raise NotImplementedError - - def fileexists(self, rel_path): - ''' Returns True if rel_path exists on server ''' - raise NotImplementedError - - def getfile(self, rel_path, full_path): - ''' Get file from rel_path on server and place in location full_path on client - fail if full_path already exists on client, return True if successful ''' - raise NotImplementedError diff --git a/scripts/lib/CIME/Servers/gftp.py b/scripts/lib/CIME/Servers/gftp.py deleted file mode 100644 index b99c9f3049f..00000000000 --- a/scripts/lib/CIME/Servers/gftp.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -GridFTP Server class. Interact with a server using GridFTP protocol -""" -# pylint: disable=super-init-not-called -from CIME.XML.standard_module_setup import * -from CIME.Servers.generic_server import GenericServer -from CIME.utils import run_cmd - -logger = logging.getLogger(__name__) - -class GridFTP(GenericServer): - def __init__(self, address, user='', passwd=''): - self._root_address = address - - def fileexists(self, rel_path): - stat,out,err = run_cmd("globus-url-copy -list {}".format(os.path.join(self._root_address, os.path.dirname(rel_path))+os.sep)) - if stat or os.path.basename(rel_path) not in out: - logging.warning("FAIL: File {} not found.\nstat={} error={}".format(rel_path, stat, err)) - return False - return True - - def getfile(self, rel_path, full_path): - stat, _,err = run_cmd("globus-url-copy -v {} file://{}".format(os.path.join(self._root_address, rel_path), full_path)) - - if (stat != 0): - logging.warning("FAIL: GridFTP repo '{}' does not have file '{}' error={}\n". - format(self._root_address,rel_path, err)) - return False - return True - - def getdirectory(self, rel_path, full_path): - stat, _,err = run_cmd("globus-url-copy -v -r {}{} file://{}{}".format(os.path.join(self._root_address, rel_path), os.sep, full_path, os.sep)) - - if (stat != 0): - logging.warning("FAIL: GridFTP repo '{}' does not have directory '{}' error={}\n". - format(self._root_address,rel_path, err)) - return False - return True diff --git a/scripts/lib/CIME/Servers/svn.py b/scripts/lib/CIME/Servers/svn.py deleted file mode 100644 index 3b8dd18ea83..00000000000 --- a/scripts/lib/CIME/Servers/svn.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -SVN Server class. Interact with a server using SVN protocol -""" -# pylint: disable=super-init-not-called -from CIME.XML.standard_module_setup import * -from CIME.Servers.generic_server import GenericServer - -logger = logging.getLogger(__name__) - -class SVN(GenericServer): - def __init__(self, address, user='', passwd=''): - self._args = '' - if user: - self._args += "--username {}".format(user) - if passwd: - self._args += "--password {}".format(passwd) - - self._svn_loc = address - - err = run_cmd("svn --non-interactive --trust-server-cert {} ls {}".format(self._args, address))[0] - if err != 0: - logging.warning( -""" -Could not connect to svn repo '{0}' -This is most likely either a credential, proxy, or network issue . -To check connection and store your credential run 'svn ls {0}' and permanently store your password""".format(address)) - return None - - def fileexists(self, rel_path): - full_url = os.path.join(self._svn_loc, rel_path) - stat, out, err = run_cmd("svn --non-interactive --trust-server-cert {} ls {}".format(self._args, full_url)) - if (stat != 0): - logging.warning("FAIL: SVN repo '{}' does not have file '{}'\nReason:{}\n{}\n".format(self._svn_loc, full_url, out, err)) - return False - return True - - def getfile(self, rel_path, full_path): - if not rel_path: - return False - full_url = os.path.join(self._svn_loc, rel_path) - stat, output, errput = \ - run_cmd("svn --non-interactive --trust-server-cert {} export {} {}".format(self._args, full_url, full_path)) - if (stat != 0): - logging.warning("svn export failed with output: {} and errput {}\n".format(output, errput)) - return False - else: - logging.info("SUCCESS\n") - return True - - def getdirectory(self, rel_path, full_path): - full_url = os.path.join(self._svn_loc, rel_path) - stat, output, errput = \ - run_cmd("svn --non-interactive --trust-server-cert {} export --force {} {}".format(self._args, full_url, full_path)) - if (stat != 0): - logging.warning("svn export failed with output: {} and errput {}\n".format(output, errput)) - return False - else: - logging.info("SUCCESS\n") - return True diff --git a/scripts/lib/CIME/Servers/wget.py b/scripts/lib/CIME/Servers/wget.py deleted file mode 100644 index d96f0cd414f..00000000000 --- a/scripts/lib/CIME/Servers/wget.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -WGET Server class. Interact with a server using WGET protocol -""" -# pylint: disable=super-init-not-called -from CIME.XML.standard_module_setup import * -from CIME.Servers.generic_server import GenericServer -logger = logging.getLogger(__name__) - -class WGET(GenericServer): - def __init__(self, address, user='', passwd=''): - self._args = '' - if user: - self._args += "--user {} ".format(user) - if passwd: - self._args += "--password {} ".format(passwd) - self._server_loc = address - - cmd = "wget {} --no-check-certificate --spider {}".format(self._args, address) - err, output, _ = run_cmd(cmd, combine_output=True) - expect(err == 0,"Could not connect to repo via '{}'\nThis is most likely either a proxy, or network issue.\nOutput:\n{}".format(cmd, output.encode('utf-8'))) - - def fileexists(self, rel_path): - full_url = os.path.join(self._server_loc, rel_path) - stat, out, err = run_cmd("wget {} --no-check-certificate --spider {}".format(self._args, full_url)) - if (stat != 0): - logging.warning("FAIL: Repo '{}' does not have file '{}'\nReason:{}\n{}\n".format(self._server_loc, full_url, out.encode('utf-8'), err.encode('utf-8'))) - return False - return True - - def getfile(self, rel_path, full_path): - full_url = os.path.join(self._server_loc, rel_path) - stat, output, errput = \ - run_cmd("wget {} {} -nc --no-check-certificate --output-document {}".format(self._args, full_url, full_path)) - if (stat != 0): - logging.warning("wget failed with output: {} and errput {}\n".format(output.encode('utf-8'), errput.encode('utf-8'))) - # wget puts an empty file if it fails. - try: - os.remove(full_path) - except OSError: - pass - return False - else: - logging.info("SUCCESS\n") - return True - - def getdirectory(self, rel_path, full_path): - full_url = os.path.join(self._server_loc, rel_path) - stat, output, errput = \ - run_cmd("wget {} {} -r -N --no-check-certificate --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path) - logger.debug(output) - logger.debug(errput) - if (stat != 0): - logging.warning("wget failed with output: {} and errput {}\n".format(output.encode('utf-8'), errput.encode('utf-8'))) - # wget puts an empty file if it fails. - try: - os.remove(full_path) - except OSError: - pass - return False - else: - logging.info("SUCCESS\n") - return True diff --git a/scripts/lib/CIME/SystemTests/README b/scripts/lib/CIME/SystemTests/README deleted file mode 100644 index 31ee7c4f3e3..00000000000 --- a/scripts/lib/CIME/SystemTests/README +++ /dev/null @@ -1,153 +0,0 @@ -The following are the test functionality categories: - 1) smoke tests - 2) basic reproducibility tests - 3) restart tests - 4) threading/pe-count modification tests - 5) sequencing (layout) modification tests - 6) multi-instance tests - 7) performance tests - 8) spinup tests (TODO) - 9) other component-specific tests - -Some tests not yet implemented in python. They can be found in -cime/scripts/Testing/Testcases - - -NOTES: -- IOP is currently not functional - -====================================================================== - Smoke Tests -====================================================================== - -SMS smoke startup test (default length) - do a 5 day initial test (suffix: base) - if $IOP_ON is set then suffix is base_iop - success for non-iop is just a successful coupler - -====================================================================== - Basic reproducibility Tests -====================================================================== - -REP reproducibility: do two identical runs give the same results? - -====================================================================== - Restart Tests -====================================================================== - -ERS exact restart from startup (default 6 days + 5 days) - do an 11 day initial test - write a restart at day 6 (suffix: base) - if $IOP_ON is set then suffix is base_iop - do a 5 day restart test starting from restart at day 6 (suffix: rest) - if $IOP_ON is set then suffix is rest_iop - compare component history files ".base" and ".rest" at day 11 - -ERP pes counts hybrid (open-MP/MPI) restart bfb test from startup, default 6 days + 5 days (previousy PER) - initial pes set up out of the box - do an 11 day initial test - write a restart at day 6 (suffix base) - half the number of tasks and threads for each component - do a 5 day restart test starting from restart at day 6 (suffix rest) - this is just like an ERS test but the pe-counts/threading count are modified on retart - -ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) - (1) ref1case - do an initial for ${STOP_N}/6 writing restarts at ${STOP_N}/6 - ref1 case is a clone of the main case (by default this will be 4 days) - short term archiving is on - (2) ref2case - do a hybrid for ${STOP_N}-${STOP_N}/6 running with ref1 restarts from ${STOP_N}/6 - and writing restarts at ( ${STOP_N} - ${STOP_N}/6 )/2 +1 - (by default will run for 18 days and write a restart after 10 days) - ref2 case is a clone of the main case - short term archiving is on - (3) case - do a branch run starting from restart written in ref2 case - and run for ??? days - (4) case do a restart run from the branch case - -ERT Similar to ERS but longer. 2 months + 1 month - - -====================================================================== - Restart and Archive Tests -====================================================================== -ERR does an ERS test except that after the initial run the short term archive tool is run - which moves model output out of the run directory into the short-term archive directory - then the restart run is staged from the short term archive directory. In batch mode there are - four submitted jobs for this test (mira excepted) these are run1, sta1, run2 and sta2 - run1 and sta1 are submitted together with RESUBMIT=1. sta1 has a batch system dependancy - on successful completion of run1, when sta1 is completed it uses the cime resubmit capabilty - to submit run2. - - -====================================================================== - Threading/PE-Counts/Pe-Sequencing Tests -====================================================================== - -PET modified threading openmp bfb test (seq tests) - do an initial run where all components are threaded by default (suffix: base) - do another initial run with nthrds=1 for all components (suffix: single_thread) - compare base and single_thread - -PEM modified pe counts mpi bfb test (seq tests) - do an initial run with default pe layout (suffix: base) - do another initial run with modified pes (NTASKS_XXX => NTASKS_XXX/2) (suffix: modpes) - compare base and single_thread - -PEA single pe bfb test - do an initial run on 1 pe with mpi (suffix: base) - do the same run on 1 pe with mpiserial (suffix: mpiserial) - -====================================================================== - Sequencing (layout) Tests (smoke) -====================================================================== - -SEQ different sequencing bfb test - do an initial run test with out-of-box PE-layout (suffix: base) - do a second run where all root pes are at pe-0 (suffix: seq) - compare base and seq - -====================================================================== - Multi-Instance Tests (smoke) -====================================================================== - -NCK multi-instance validation vs single instance - sequential PE for instances (default length) - do an initial run test with NINST 1 (suffix: base) - do an initial run test with NINST 2 (suffix: multiinst for both _0001 and _0002) - compare base and _0001 and _0002 - -NCR multi-instance validation vs single instance - concurrent PE for instances (default length) - do an initial run test with NINST 1 (suffix: base) - do an initial run test with NINST 2 (suffix: multiinst for both _0001 and _0002) - compare base and _0001 and _0002 - (***note that NCR_script and NCK_script are the same - but NCR_build.csh and NCK_build.csh are different***) - -NOC multi-instance validation for single instance ocean (default length) - do an initial run test with NINST 2 (other than ocn), with mod to instance 1 (suffix: inst1_base, inst2_mod) - do an initial run test with NINST 2 (other than ocn), with mod to instance 2 (suffix: inst1_base, inst2_mod) - compare inst1_base with inst2_base - compare inst1_mod with inst2_mod - - -====================================================================== - Performance Tests -====================================================================== - -PFS system performance test. Do 20 day run, no restarts -ICP cice performance test - -====================================================================== - SPINUP tests -====================================================================== - -SSP smoke CLM spinup test (only valid for CLM compsets with CN or BGC) (TODO - change to SPL) - do an initial spin test (setting CLM_BLDNML_OTPS to -bgc_spinup_on) - write restarts at the end of the run - short term archiving is on - do a hybrid non-spinup run run from the restart files generated in the first phase - -====================================================================== - Other component-specific tests -====================================================================== - -LII CLM initial condition interpolation test diff --git a/scripts/lib/CIME/SystemTests/dae.py b/scripts/lib/CIME/SystemTests/dae.py deleted file mode 100644 index bc4a9a24e5b..00000000000 --- a/scripts/lib/CIME/SystemTests/dae.py +++ /dev/null @@ -1,168 +0,0 @@ -""" -Implementation of the CIME data assimilation test: -Compares standard run with run broken into two data assimilation cycles. -Runs a simple DA script on each cycle which performs checks but does not -change any model state (restart files). Compares answers of two runs. - -""" - -import os.path -import logging -import glob -import gzip - -import CIME.XML.standard_module_setup as sms -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo -from CIME.utils import expect - -############################################################################### -class DAE(SystemTestsCompareTwo): -############################################################################### - """ - Implementation of the CIME data assimilation test: - Compares standard run with a run broken into two data assimilation cycles. - Runs a simple DA script on each cycle which performs checks but does not - change any model state (restart files). Compares answers of two runs. - Refers to a faux data assimilation script in the - cime/scripts/data_assimilation directory - """ - - ########################################################################### - def __init__(self, case): - ########################################################################### - SystemTestsCompareTwo.__init__(self, case, - separate_builds=False, - run_two_suffix='da', - run_one_description='no data assimilation', - run_two_description='data assimilation') - - ########################################################################### - def _case_one_setup(self): - ########################################################################### - # Even though there may be test mods turning on data assimilation, - # case1 is the control so turn it off - self._case.set_value("DATA_ASSIMILATION_SCRIPT", "") - self._case.set_value("DATA_ASSIMILATION_CYCLES", 1) - - ########################################################################### - def _case_two_setup(self): - ########################################################################### - # Allow testmods to set an assimilation script - if len(self._case.get_value("DATA_ASSIMILATION_SCRIPT")) == 0: - # We need to find the scripts/data_assimilation directory - # LIB_DIR should be our parent dir - da_dir = os.path.join(os.path.dirname(sms.LIB_DIR), "data_assimilation") - expect(os.path.isdir(da_dir), "ERROR: da_dir, '{}', does not exist".format(da_dir)) - da_file = os.path.join(da_dir, "da_no_data_mod.sh") - expect(os.path.isfile(da_file), "ERROR: da_file, '{}', does not exist".format(da_file)) - # Set up two data assimilation cycles each half of the full run - self._case.set_value("DATA_ASSIMILATION_SCRIPT", da_file) - - # We need at least 2 DA cycles - da_cycles = self._case.get_value("DATA_ASSIMILATION_CYCLES") - if da_cycles < 2: - da_cycles = 2 - self._case.set_value("DATA_ASSIMILATION_CYCLES", da_cycles) - stopn = self._case.get_value("STOP_N") - expect((stopn % da_cycles) == 0, "ERROR: DAE test with {0} cycles requires that STOP_N be divisible by {0}".format(da_cycles)) - stopn = int(stopn / da_cycles) - self._case.set_value("STOP_N", stopn) - - self._case.flush() - - ########################################################################### - def run_phase(self): # pylint: disable=arguments-differ - ########################################################################### - # Clean up any da.log files in case this is a re-run. - self._activate_case2() - case_root = self._get_caseroot2() - rundir2 = self._case.get_value("RUNDIR") - da_files = glob.glob(os.path.join(rundir2, 'da.log.*')) - for file_ in da_files: - os.remove(file_) - # End for - - # CONTINUE_RUN ends up TRUE, set it back in case this is a re-run. - with self._case: - self._case.set_value("CONTINUE_RUN", False) - # Turn off post DA in case this is a re-run - for comp in self._case.get_values("COMP_CLASSES"): - if comp == "ESP": - continue - else: - self._case.set_value("DATA_ASSIMILATION_{}".format(comp), False) - - # Start normal run here - self._activate_case1() - SystemTestsCompareTwo.run_phase(self) - - # Do some checks on the data assimilation 'output' from case2 - self._activate_case2() - da_files = glob.glob(os.path.join(rundir2, 'da.log.*')) - if da_files is None: - logger = logging.getLogger(__name__) - path = os.path.join(case_root, 'da.log.*') - logger.warning("No DA files in {}".format(path)) - - da_cycles = self._case.get_value("DATA_ASSIMILATION_CYCLES") - expect((da_files is not None) and (len(da_files) == da_cycles), - "ERROR: There were {:d} DA cycles in run but {:d} DA files were found".format(da_cycles, len(da_files) if da_files is not None else 0)) - da_files.sort() - cycle_num = 0 - compset = self._case.get_value("COMPSET") - # Special case for DWAV so we can make sure other variables are set - is_dwav = '_DWAV' in compset - for fname in da_files: - found_caseroot = False - found_cycle = False - found_signal = 0 - found_init = 0 - if is_dwav: - expected_init = self._case.get_value("NINST_WAV") - else: - # Expect a signal from every instance of every DA component - expected_init = 0 - for comp in self._case.get_values("COMP_CLASSES"): - if comp == "ESP": - continue - elif self._case.get_value("DATA_ASSIMILATION_{}".format(comp)): - expected_init = expected_init + self._case.get_value("NINST_{}".format(comp)) - - # Adjust expected initial run and post-DA numbers - if cycle_num == 0: - expected_signal = 0 - else: - expected_signal = expected_init - expected_init = 0 - - with gzip.open(fname, "r") as dfile: - for bline in dfile: - line = bline.decode("utf-8") - expect(not 'ERROR' in line, "ERROR, error line {} found in {}".format(line, fname)) - if 'caseroot' in line[0:8]: - found_caseroot = True - elif 'cycle' in line[0:5]: - found_cycle = True - expect(int(line[7:]) == cycle_num, - "ERROR: Wrong cycle ({:d}) found in {} (expected {:d})".format(int(line[7:]), fname, cycle_num)) - elif 'resume signal' in line: - found_signal = found_signal + 1 - expect('Post-DA resume signal found' in line[0:27], - "ERROR: bad post-DA message found in {}".format(fname)) - elif 'Initial run' in line: - found_init = found_init + 1 - expect('Initial run signal found' in line[0:24], - "ERROR: bad Initial run message found in {}".format(fname)) - else: - expect(False, "ERROR: Unrecognized line ('{}') found in {}".format(line, fname)) - - # End for - expect(found_caseroot, "ERROR: No caseroot found in {}".format(fname)) - expect(found_cycle, "ERROR: No cycle found in {}".format(fname)) - expect(found_signal == expected_signal, - "ERROR: Expected {} post-DA resume signal message(s), {} found in {}".format(expected_signal, found_signal, fname)) - expect(found_init == expected_init, - "ERROR: Expected {} Initial run message(s), {} found in {}".format(expected_init, found_init, fname)) - # End with - cycle_num = cycle_num + 1 - # End for diff --git a/scripts/lib/CIME/SystemTests/eri.py b/scripts/lib/CIME/SystemTests/eri.py deleted file mode 100644 index 35877bcd748..00000000000 --- a/scripts/lib/CIME/SystemTests/eri.py +++ /dev/null @@ -1,226 +0,0 @@ -""" -CIME ERI test This class inherits from SystemTestsCommon -""" -from CIME.XML.standard_module_setup import * -from CIME.utils import safe_copy -from CIME.SystemTests.system_tests_common import SystemTestsCommon -import shutil, glob, os - -logger = logging.getLogger(__name__) - -def _helper(dout_sr, refdate, refsec, rundir): - rest_path = os.path.join(dout_sr, "rest", "{}-{}".format(refdate, refsec)) - - for item in glob.glob("{}/*{}*".format(rest_path, refdate)): - dst = os.path.join(rundir, os.path.basename(item)) - if os.path.exists(dst): - os.remove(dst) - os.symlink(item, dst) - - for item in glob.glob("{}/*rpointer*".format(rest_path)): - safe_copy(item, rundir) - -class ERI(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to the ERI system test - """ - SystemTestsCommon.__init__(self, case) - self._testname = "ERI" - - def run_phase(self): - caseroot = self._case.get_value("CASEROOT") - clone1_path = "{}.ref1".format(caseroot) - clone2_path = "{}.ref2".format(caseroot) - #self._case.set_value("CHECK_TIMING", False) - - # - # clone the main case to create ref1 and ref2 cases - # - for clone_path in [clone1_path, clone2_path]: - if os.path.exists(clone_path): - shutil.rmtree(clone_path) - - clone1, clone2 = [self._case.create_clone(clone_path, keepexe=True) for clone_path in [clone1_path, clone2_path]] - orig_case = self._case - orig_casevar = orig_case.get_value("CASE") - # - # determine run lengths needed below - # - stop_n = self._case.get_value("STOP_N") - stop_option = self._case.get_value("STOP_OPTION") - run_startdate = self._case.get_value("RUN_STARTDATE") - - stop_n1 = int(stop_n / 6) - rest_n1 = stop_n1 - start_1 = run_startdate - - stop_n2 = stop_n - stop_n1 - rest_n2 = int(stop_n2 / 2 + 1) - hist_n = stop_n2 - - start_1_year, start_1_month, start_1_day = [int(item) for item in start_1.split("-")] - start_2_year = start_1_year + 2 - start_2 = "{:04d}-{:02d}-{:02d}".format(start_2_year, start_1_month, start_1_day) - - stop_n3 = stop_n2 - rest_n2 - rest_n3 = int(stop_n3 / 2 + 1) - - stop_n4 = stop_n3 - rest_n3 - - expect(stop_n4 >= 1 and stop_n1 >= 1, "Run length too short") - - # - # (1) Test run: - # do an initial ref1 case run - # cloned the case and running there - # (NOTE: short term archiving is on) - # - - os.chdir(clone1_path) - self._set_active_case(clone1) - - logger.info("ref1 startup: doing a {} {} startup run from {} and 00000 seconds".format(stop_n1, stop_option, start_1)) - logger.info(" writing restarts at {} {}".format(rest_n1, stop_option)) - logger.info(" short term archiving is on ") - - with clone1: - clone1.set_value("CONTINUE_RUN", False) - clone1.set_value("RUN_STARTDATE", start_1) - clone1.set_value("STOP_N", stop_n1) - clone1.set_value("REST_OPTION", stop_option) - clone1.set_value("REST_N", rest_n1) - clone1.set_value("HIST_OPTION", "never") - - dout_sr1 = clone1.get_value("DOUT_S_ROOT") - - # force cam namelist to write out initial file at end of run - if os.path.exists("user_nl_cam"): - if "inithist" not in open("user_nl_cam", "r").read(): - with open("user_nl_cam", "a") as fd: - fd.write("inithist = 'ENDOFRUN'\n") - - with clone1: - clone1.case_setup(test_mode=True, reset=True) - # if the initial case is hybrid this will put the reference data in the correct location - clone1.check_all_input_data() - - self._skip_pnl = False - self.run_indv(st_archive=True, suffix=None) - - # - # (2) Test run: - # do a hybrid ref2 case run - # cloned the main case and running with ref1 restarts - # (NOTE: short term archiving is on) - # - - os.chdir(clone2_path) - self._set_active_case(clone2) - - # Set startdate to start2, set ref date based on ref1 restart - refdate_2 = run_cmd_no_fail(r'ls -1dt {}/rest/*-00000* | head -1 | sed "s/-00000.*//" | sed "s/^.*rest\///"'.format(dout_sr1)) - refsec_2 = "00000" - - logger.info("ref2 hybrid: doing a {} {} startup hybrid run".format(stop_n2, stop_option)) - logger.info(" starting from {} and using ref1 {} and {} seconds".format(start_2, refdate_2, refsec_2)) - logger.info(" writing restarts at {} {}".format(rest_n2, stop_option)) - logger.info(" short term archiving is on ") - - # setup ref2 case - with clone2: - clone2.set_value("RUN_TYPE", "hybrid") - clone2.set_value("RUN_STARTDATE", start_2) - clone2.set_value("RUN_REFCASE", "{}.ref1".format(orig_casevar)) - clone2.set_value("RUN_REFDATE", refdate_2) - clone2.set_value("RUN_REFTOD", refsec_2) - clone2.set_value("GET_REFCASE", False) - clone2.set_value("CONTINUE_RUN", False) - clone2.set_value("STOP_N", stop_n2) - clone2.set_value("REST_OPTION", stop_option) - clone2.set_value("REST_N", rest_n2) - clone2.set_value("HIST_OPTION", stop_option) - clone2.set_value("HIST_N", hist_n) - - rundir2 = clone2.get_value("RUNDIR") - dout_sr2 = clone2.get_value("DOUT_S_ROOT") - - _helper(dout_sr1, refdate_2, refsec_2, rundir2) - - # run ref2 case (all component history files will go to short term archiving) - with clone2: - clone2.case_setup(test_mode=True, reset=True) - - self._skip_pnl = False - self.run_indv(suffix="hybrid", st_archive=True) - - # - # (3a) Test run: - # do a branch run from ref2 restart (short term archiving is off) - # - - os.chdir(caseroot) - self._set_active_case(orig_case) - - refdate_3 = run_cmd_no_fail(r'ls -1dt {}/rest/*-00000* | head -1 | sed "s/-00000.*//" | sed "s/^.*rest\///"'.format(dout_sr2)) - refsec_3 = "00000" - - logger.info("branch: doing a {} {} branch".format(stop_n3, stop_option)) - logger.info(" starting from ref2 {} and {} seconds restarts".format(refdate_3, refsec_3)) - logger.info(" writing restarts at {} {}".format(rest_n3, stop_option)) - logger.info(" short term archiving is off") - - self._case.set_value("RUN_TYPE" , "branch") - self._case.set_value("RUN_REFCASE" , "{}.ref2".format(self._case.get_value("CASE"))) - self._case.set_value("RUN_REFDATE" , refdate_3) - self._case.set_value("RUN_REFTOD" , refsec_3) - self._case.set_value("GET_REFCASE" , False) - self._case.set_value("CONTINUE_RUN" , False) - self._case.set_value("STOP_N" , stop_n3) - self._case.set_value("REST_OPTION" , stop_option) - self._case.set_value("REST_N" , rest_n3) - self._case.set_value("HIST_OPTION" , stop_option) - self._case.set_value("HIST_N" , stop_n2) - self._case.set_value("DOUT_S" , False) - self._case.flush() - - rundir = self._case.get_value("RUNDIR") - if not os.path.exists(rundir): - os.makedirs(rundir) - - _helper(dout_sr2, refdate_3, refsec_3, rundir) - - # link the hybrid history files from ref2 to the run dir for comparison - for item in glob.iglob("%s/*.hybrid"%rundir2): - newfile = "{}".format(item.replace(".ref2", "")) - newfile = os.path.basename(newfile) - dst = os.path.join(rundir, newfile) - if os.path.exists(dst): - os.remove(dst) - os.symlink(item, dst) - - self._skip_pnl = False - # run branch case (short term archiving is off) - self.run_indv() - - # - # (3b) Test run: - # do a restart continue from (3a) (short term archiving off) - # - - logger.info("branch restart: doing a {} {} continue restart test".format(stop_n4, stop_option)) - - self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("STOP_N", stop_n4) - self._case.set_value("REST_OPTION", "never") - self._case.set_value("DOUT_S", False) - self._case.set_value("HIST_OPTION", stop_option) - self._case.set_value("HIST_N", hist_n) - self._case.flush() - - # do the restart run (short term archiving is off) - self.run_indv(suffix="rest") - - self._component_compare_test("base", "hybrid") - self._component_compare_test("base", "rest") diff --git a/scripts/lib/CIME/SystemTests/erio.py b/scripts/lib/CIME/SystemTests/erio.py deleted file mode 100644 index a52dd9a8433..00000000000 --- a/scripts/lib/CIME/SystemTests/erio.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -ERIO tests restart with different PIO methods - -This class inherits from SystemTestsCommon -""" -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon - -logger = logging.getLogger(__name__) - -class ERIO(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to file env_test.xml in the case directory - """ - SystemTestsCommon.__init__(self, case, expected=["TEST"]) - - self._pio_types = self._case.get_env("run").get_valid_values("PIO_TYPENAME") - self._stop_n = self._case.get_value("STOP_N") - - def _full_run(self, pio_type): - stop_option = self._case.get_value("STOP_OPTION") - expect(self._stop_n > 0, "Bad STOP_N: {:d}".format(self._stop_n)) - - # Move to config_tests.xml once that's ready - rest_n = int(self._stop_n/2) + 1 - self._case.set_value("REST_N", rest_n) - self._case.set_value("REST_OPTION", stop_option) - self._case.set_value("HIST_N", self._stop_n) - self._case.set_value("HIST_OPTION", stop_option) - self._case.set_value("CONTINUE_RUN", False) - self._case.flush() - - expect(self._stop_n > 2, "ERROR: stop_n value {:d} too short".format(self._stop_n)) - logger.info("doing an {0} {1} initial test with restart file at {2} {1} with pio type {3}".format(str(self._stop_n), stop_option, str(rest_n), pio_type)) - self.run_indv(suffix=pio_type) - - def _restart_run(self, pio_type, other_pio_type): - stop_option = self._case.get_value("STOP_OPTION") - - rest_n = int(self._stop_n/2) + 1 - stop_new = self._stop_n - rest_n - expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,self._stop_n,rest_n)) - - self._case.set_value("STOP_N", stop_new) - self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("REST_OPTION","never") - self._case.flush() - logger.info("doing an {} {} restart test with {} against {}".format(str(stop_new), stop_option, pio_type, other_pio_type)) - - suffix = "{}.{}".format(other_pio_type, pio_type) - self.run_indv(suffix=suffix) - - # Compare restart file - self._component_compare_test(other_pio_type, suffix) - - def run_phase(self): - - for idx, pio_type1 in enumerate(self._pio_types): - if pio_type1 != "default": - self._case.set_value("PIO_TYPENAME", pio_type1) - self._full_run(pio_type1) - for pio_type2 in self._pio_types[idx+1:]: - if pio_type2 != "default": - self._case.set_value("PIO_TYPENAME", pio_type2) - self._restart_run(pio_type2, pio_type1) diff --git a/scripts/lib/CIME/SystemTests/erp.py b/scripts/lib/CIME/SystemTests/erp.py deleted file mode 100644 index f80c82ce7ff..00000000000 --- a/scripts/lib/CIME/SystemTests/erp.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -CIME ERP test. This class inherits from RestartTest - -This is a pes counts hybrid (open-MP/MPI) restart bfb test from -startup. This is just like an ERS test but the pe-counts/threading -count are modified on restart. -(1) Do an initial run with pes set up out of the box (suffix base) -(2) Do a restart test with half the number of tasks and threads (suffix rest) -""" - -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.restart_tests import RestartTest - -logger = logging.getLogger(__name__) - -class ERP(RestartTest): - - def __init__(self, case): - """ - initialize a test object - """ - RestartTest.__init__(self, case, - separate_builds = True, - run_two_suffix = 'rest', - run_one_description = 'initial', - run_two_description = 'restart') - - def _case_two_setup(self): - # halve the number of tasks and threads - for comp in self._case.get_values("COMP_CLASSES"): - ntasks = self._case1.get_value("NTASKS_{}".format(comp)) - nthreads = self._case1.get_value("NTHRDS_{}".format(comp)) - rootpe = self._case1.get_value("ROOTPE_{}".format(comp)) - if ( nthreads > 1 ): - self._case.set_value("NTHRDS_{}".format(comp), int(nthreads/2)) - if ( ntasks > 1 ): - self._case.set_value("NTASKS_{}".format(comp), int(ntasks/2)) - self._case.set_value("ROOTPE_{}".format(comp), int(rootpe/2)) - - RestartTest._case_two_setup(self) - self._case.case_setup(test_mode=True, reset=True) - # Note, some components, like CESM-CICE, have - # decomposition information in env_build.xml that - # needs to be regenerated for the above new tasks and thread counts - - def _case_one_custom_postrun_action(self): - self.copy_case1_restarts_to_case2() diff --git a/scripts/lib/CIME/SystemTests/err.py b/scripts/lib/CIME/SystemTests/err.py deleted file mode 100644 index 9c0cd885af7..00000000000 --- a/scripts/lib/CIME/SystemTests/err.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -CIME ERR test This class inherits from ERS -ERR tests short term archiving and restart capabilities -""" -import glob, os -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.restart_tests import RestartTest -from CIME.utils import ls_sorted_by_mtime, safe_copy - -logger = logging.getLogger(__name__) - -class ERR(RestartTest): - def __init__(self, case): # pylint: disable=super-init-not-called - """ - initialize an object interface to the ERR system test - """ - super(ERR, self).__init__(case, - separate_builds = False, - run_two_suffix = 'rest', - run_one_description = 'initial', - run_two_description = 'restart', - multisubmit = True) - - def _case_one_setup(self): - super(ERR, self)._case_one_setup() - self._case.set_value("DOUT_S", True) - - def _case_two_setup(self): - super(ERR, self)._case_two_setup() - self._case.set_value("DOUT_S", False) - - def _case_two_custom_prerun_action(self): - dout_s_root = self._case1.get_value("DOUT_S_ROOT") - rest_root = os.path.abspath(os.path.join(dout_s_root,"rest")) - restart_list = ls_sorted_by_mtime(rest_root) - expect(len(restart_list) >= 1, "No restart files found in {}".format(rest_root)) - self._case.restore_from_archive(rest_dir= - os.path.join(rest_root, restart_list[0])) - - def _case_two_custom_postrun_action(self): - # Link back to original case1 name - # This is needed so that the necessary files are present for - # baseline comparison and generation, - # since some of them may have been moved to the archive directory - for case_file in glob.iglob(os.path.join(self._case1.get_value("RUNDIR"), - "*.nc.{}".format(self._run_one_suffix))): - orig_file = case_file[:-(1+len(self._run_one_suffix))] - if not os.path.isfile(orig_file): - safe_copy(case_file, orig_file) diff --git a/scripts/lib/CIME/SystemTests/ers.py b/scripts/lib/CIME/SystemTests/ers.py deleted file mode 100644 index 17dc50c77c3..00000000000 --- a/scripts/lib/CIME/SystemTests/ers.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -CIME restart test This class inherits from SystemTestsCommon -""" -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon - -logger = logging.getLogger(__name__) - -class ERS(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to the ERS system test - """ - SystemTestsCommon.__init__(self, case) - - def _ers_first_phase(self): - stop_n = self._case.get_value("STOP_N") - stop_option = self._case.get_value("STOP_OPTION") - rest_n = self._case.get_value("REST_N") - expect(stop_n > 0, "Bad STOP_N: {:d}".format(stop_n)) - - expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n)) - logger.info("doing an {0} {1} initial test with restart file at {2} {1}".format(str(stop_n), stop_option, str(rest_n))) - self.run_indv() - - def _ers_second_phase(self): - stop_n = self._case.get_value("STOP_N") - stop_option = self._case.get_value("STOP_OPTION") - - rest_n = int(stop_n/2 + 1) - stop_new = stop_n - rest_n - expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n)) - - self._case.set_value("HIST_N", stop_n) - self._case.set_value("STOP_N", stop_new) - self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("REST_OPTION","never") - self._case.flush() - logger.info("doing an {} {} restart test".format(str(stop_new), stop_option)) - self._skip_pnl=False - self.run_indv(suffix="rest") - - # Compare restart file - self._component_compare_test("base", "rest") - - def run_phase(self): - self._ers_first_phase() - self._ers_second_phase() diff --git a/scripts/lib/CIME/SystemTests/ers2.py b/scripts/lib/CIME/SystemTests/ers2.py deleted file mode 100644 index fbfc185f98f..00000000000 --- a/scripts/lib/CIME/SystemTests/ers2.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -CIME restart test 2 This class inherits from SystemTestsCommon -""" -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon - -logger = logging.getLogger(__name__) - -class ERS2(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to the ERS2 system test - """ - SystemTestsCommon.__init__(self, case) - - def _ers2_first_phase(self): - stop_n = self._case.get_value("STOP_N") - stop_option = self._case.get_value("STOP_OPTION") - rest_n = self._case.get_value("REST_N") - - # Don't need restarts for first run - self._case.set_value("REST_OPTION","never") - - expect(stop_n > 0, "Bad STOP_N: {:d}".format(stop_n)) - expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n)) - - logger.info("doing an {0} {1} initial test with restart file at {2} {1}".format(str(stop_n), stop_option, str(rest_n))) - self.run_indv() - - def _ers2_second_phase(self): - stop_n = self._case.get_value("STOP_N") - stop_option = self._case.get_value("STOP_OPTION") - - rest_n = stop_n/2 + 1 - stop_new = rest_n - - self._case.set_value("REST_OPTION",stop_option) - self._case.set_value("STOP_N", stop_new) - self._case.flush() - logger.info("doing first part {} {} restart test".format(str(stop_new), stop_option)) - self.run_indv(suffix="intermediate") - - stop_new = stop_n - rest_n - self._case.set_value("STOP_N", stop_new) - self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("REST_OPTION","never") - - logger.info("doing second part {} {} restart test".format(str(stop_new), stop_option)) - self.run_indv(suffix="rest") - - # Compare restart file - self._component_compare_test("base", "rest") - - def run_phase(self): - self._ers2_first_phase() - self._ers2_second_phase() diff --git a/scripts/lib/CIME/SystemTests/funit.py b/scripts/lib/CIME/SystemTests/funit.py deleted file mode 100644 index f1c084eba06..00000000000 --- a/scripts/lib/CIME/SystemTests/funit.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -CIME FUNIT test. This class inherits from SystemTestsCommon. It runs -the fortran unit tests; grid and compset are ignored. -""" -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon -from CIME.build import post_build -from CIME.utils import append_testlog, get_cime_root -from CIME.test_status import * - -logger = logging.getLogger(__name__) - -class FUNIT(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to the FUNIT system test - """ - SystemTestsCommon.__init__(self, case) - case.load_env() - - def build_phase(self, sharedlib_only=False, model_only=False): - if not sharedlib_only: - exeroot = self._case.get_value("EXEROOT") - logfile = os.path.join(exeroot, "funit.bldlog") - with open(logfile, "w") as fd: - fd.write("No-op\n") - - post_build(self._case, [logfile], build_complete=True) - - def get_test_spec_dir(self): - """ - Override this to change what gets tested. - """ - return get_cime_root() - - def run_phase(self): - - rundir = self._case.get_value("RUNDIR") - exeroot = self._case.get_value("EXEROOT") - mach = self._case.get_value("MACH") - - log = os.path.join(rundir, "funit.log") - if os.path.exists(log): - os.remove(log) - - test_spec_dir = self.get_test_spec_dir() - unit_test_tool = os.path.abspath(os.path.join(get_cime_root(),"scripts","fortran_unit_testing","run_tests.py")) - args = "--build-dir {} --test-spec-dir {} --machine {}".format(exeroot, test_spec_dir, mach) - stat = run_cmd("{} {} >& funit.log".format(unit_test_tool, args), from_dir=rundir)[0] - - append_testlog(open(os.path.join(rundir, "funit.log"), "r").read()) - - expect(stat == 0, "RUN FAIL for FUNIT") - - # Funit is a bit of an oddball test since it's not really running the E3SM model - # We need to override some methods to make the core infrastructure work. - - def _generate_baseline(self): - with self._test_status: - self._test_status.set_status(GENERATE_PHASE, TEST_PASS_STATUS) - - def _compare_baseline(self): - with self._test_status: - self._test_status.set_status(BASELINE_PHASE, TEST_PASS_STATUS) diff --git a/scripts/lib/CIME/SystemTests/homme.py b/scripts/lib/CIME/SystemTests/homme.py deleted file mode 100644 index 277a72f0049..00000000000 --- a/scripts/lib/CIME/SystemTests/homme.py +++ /dev/null @@ -1,99 +0,0 @@ -""" -CIME HOMME test. This class inherits from SystemTestsCommon -""" -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon -from CIME.build import post_build -from CIME.utils import append_testlog, SharedArea -from CIME.test_status import * - -import shutil -from distutils import dir_util - -logger = logging.getLogger(__name__) - -class HOMME(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to the SMS system test - """ - SystemTestsCommon.__init__(self, case) - case.load_env() - - def build_phase(self, sharedlib_only=False, model_only=False): - if not sharedlib_only: - # Build HOMME - srcroot = self._case.get_value("SRCROOT") - mach = self._case.get_value("MACH") - procs = self._case.get_value("TOTALPES") - exeroot = self._case.get_value("EXEROOT") - baseline = self._case.get_value("BASELINE_ROOT") - basecmp = self._case.get_value("BASECMP_CASE") - compare = self._case.get_value("COMPARE_BASELINE") - gmake = self._case.get_value("GMAKE") - gmake_j = self._case.get_value("GMAKE_J") - cprnc = self._case.get_value("CCSM_CPRNC") - - if compare: - basename = basecmp - baselinedir = baseline - else: - basename = "" - baselinedir = exeroot - - cmake_cmd = "cmake -C {0}/components/homme/cmake/machineFiles/{1}.cmake -DUSE_NUM_PROCS={2} {0}/components/homme -DHOMME_BASELINE_DIR={3}/{4} -DCPRNC_DIR={5}/..".format(srcroot, mach, procs, baselinedir, basename, cprnc) - - run_cmd_no_fail(cmake_cmd, arg_stdout="homme.bldlog", combine_output=True, from_dir=exeroot) - run_cmd_no_fail("{} -j{} VERBOSE=1 test-execs".format(gmake, gmake_j), arg_stdout="homme.bldlog", combine_output=True, from_dir=exeroot) - - post_build(self._case, [os.path.join(exeroot, "homme.bldlog")], build_complete=True) - - def run_phase(self): - - rundir = self._case.get_value("RUNDIR") - exeroot = self._case.get_value("EXEROOT") - baseline = self._case.get_value("BASELINE_ROOT") - compare = self._case.get_value("COMPARE_BASELINE") - generate = self._case.get_value("GENERATE_BASELINE") - basegen = self._case.get_value("BASEGEN_CASE") - gmake = self._case.get_value("GMAKE") - - log = os.path.join(rundir, "homme.log") - if os.path.exists(log): - os.remove(log) - - if generate: - full_baseline_dir = os.path.join(baseline, basegen, "tests", "baseline") - stat = run_cmd("{} -j 4 baseline".format(gmake), arg_stdout=log, combine_output=True, from_dir=exeroot)[0] - if stat == 0: - if os.path.isdir(full_baseline_dir): - shutil.rmtree(full_baseline_dir) - - with SharedArea(): - dir_util.copy_tree(os.path.join(exeroot, "tests", "baseline"), full_baseline_dir, preserve_mode=False) - - elif compare: - stat = run_cmd("{} -j 4 check".format(gmake), arg_stdout=log, combine_output=True, from_dir=exeroot)[0] - - else: - stat = run_cmd("{} -j 4 baseline".format(gmake), arg_stdout=log, combine_output=True, from_dir=exeroot)[0] - stat = run_cmd("{} -j 4 check".format(gmake), arg_stdout=log, combine_output=True, from_dir=exeroot)[0] - - # Add homme.log output to TestStatus.log so that it can - # appear on the dashboard. Otherwise, the TestStatus.log - # is pretty useless for this test. - append_testlog(open(log, "r").read()) - - expect(stat == 0, "RUN FAIL for HOMME") - - # Homme is a bit of an oddball test since it's not really running the E3SM model - # We need to override some methods to make the core infrastructure work. - - def _generate_baseline(self): - with self._test_status: - self._test_status.set_status(GENERATE_PHASE, TEST_PASS_STATUS) - - def _compare_baseline(self): - with self._test_status: - self._test_status.set_status(BASELINE_PHASE, TEST_PASS_STATUS) diff --git a/scripts/lib/CIME/SystemTests/icp.py b/scripts/lib/CIME/SystemTests/icp.py deleted file mode 100644 index 86195c193e4..00000000000 --- a/scripts/lib/CIME/SystemTests/icp.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -CIME ICP test This class inherits from SystemTestsCommon -""" -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon - -class ICP(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to file env_test.xml in the case directory - """ - SystemTestsCommon.__init__(self, case) - - def build_phase(self, sharedlib_only=False, model_only=False): - self._case.set_value("CICE_AUTO_DECOMP", "false") - - def run_phase(self): - self._case.set_value("CONTINUE_RUN",False) - self._case.set_value("REST_OPTION","none") - self._case.set_value("HIST_OPTION","$STOP_OPTION") - self._case.set_value("HIST_N","$STOP_N") - self._case.flush() - - self.run_indv(self) diff --git a/scripts/lib/CIME/SystemTests/irt.py b/scripts/lib/CIME/SystemTests/irt.py deleted file mode 100644 index 68dd744d896..00000000000 --- a/scripts/lib/CIME/SystemTests/irt.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Implementation of the CIME IRT. (Interim Restart Test) -This test the model's restart capability as well as the short term archiver's interim restart capability - -(1) Do a Run of length N with restart at N/2 and DOUT_S_SAVE_INTERIM_RESTART set to TRUE -(2) Archive Run using ST archive tools -(3) Recover first interim restart to the case2 run directory -(4) Start case2 from restart and run to the end of case1 -(5) compare results. - -""" - -from CIME.SystemTests.restart_tests import RestartTest -from CIME.XML.standard_module_setup import * -from CIME.utils import ls_sorted_by_mtime - -logger = logging.getLogger(__name__) - -class IRT(RestartTest): - - def __init__(self, case): - RestartTest.__init__(self, case, - separate_builds=False, - run_two_suffix = 'restart', - run_one_description = 'initial', - run_two_description = 'restart', - multisubmit = False) - self._skip_pnl = False - - def _case_one_custom_postrun_action(self): - self._case.case_st_archive() - # Since preview namelist is run before _case_two_prerun_action, we need to do this here. - dout_s_root = self._case1.get_value("DOUT_S_ROOT") - restart_list = ls_sorted_by_mtime(os.path.join(dout_s_root,"rest")) - logger.info("Restart directory list is {}".format(restart_list)) - expect(len(restart_list) >=2,"Expected at least two restart directories") - # Get the older of the two restart directories - self._case2.restore_from_archive(rest_dir=os.path.abspath( - os.path.join(dout_s_root, "rest", restart_list[0]))) diff --git a/scripts/lib/CIME/SystemTests/ldsta.py b/scripts/lib/CIME/SystemTests/ldsta.py deleted file mode 100644 index 442780b7a32..00000000000 --- a/scripts/lib/CIME/SystemTests/ldsta.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -CIME last date short term archiver test. This class inherits from SystemTestsCommon -It does a run without restarting, then runs the archiver with various last-date parameters -The test verifies the archive directory contains the expected files -""" - -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon -from CIME.utils import expect -from CIME.date import get_file_date - -import datetime -import glob -import os -import random -import shutil - -logger = logging.getLogger(__name__) - -# datetime objects can't be used anywhere else -def _date_to_datetime(date_obj): - return datetime.datetime(year = date_obj.year(), - month = date_obj.month(), - day = date_obj.day(), - hour = date_obj.hour(), - minute = date_obj.minute(), - second = date_obj.second()) - -class LDSTA(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to the SMS system test - """ - SystemTestsCommon.__init__(self, case) - - def run_phase(self): - archive_dir = self._case.get_value('DOUT_S_ROOT') - if os.path.isdir(archive_dir): - shutil.rmtree(archive_dir) - self.run_indv() - # finished running, so all archive files should exist - start_date = _date_to_datetime(get_file_date(self._case.get_value('RUN_STARTDATE'))) - rest_dir = os.path.join(archive_dir, 'rest') - delta_day = datetime.timedelta(1) - current_date = start_date + delta_day - next_datecheck = current_date - days_left = self._case.get_value('STOP_N') - final_date = start_date + delta_day * days_left - while current_date < final_date: - logger.info('Testing archiving with last date: {}'.format(current_date)) - current_date_str = '{:04}-{:02}-{:02}'.format(current_date.year, - current_date.month, - current_date.day) - self._case.case_st_archive(last_date_str=current_date_str, copy_only=False) - archive_dates = [_date_to_datetime(get_file_date(fname)) - for fname in glob.glob(os.path.join(rest_dir, '*'))] - while next_datecheck <= current_date: - expect(next_datecheck in archive_dates, - 'Not all dates generated and/or archived: ' - + '{} is missing'.format(next_datecheck)) - next_datecheck += delta_day - for date in archive_dates: - expect(date <= current_date, - 'Archived date greater than specified by last-date: ' - + '{}'.format(date)) - num_days = random.randint(1, min(3, days_left)) - days_left -= num_days - current_date += num_days * delta_day diff --git a/scripts/lib/CIME/SystemTests/mcc.py b/scripts/lib/CIME/SystemTests/mcc.py deleted file mode 100644 index 96ab0c28ce0..00000000000 --- a/scripts/lib/CIME/SystemTests/mcc.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Implemetation of CIME MCC test: Compares ensemble methods - -This does two runs: In the first we run a three member ensemble using the - MULTI_DRIVER capability, then we run a second single instance case and compare -""" -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo - -logger = logging.getLogger(__name__) - - -class MCC(SystemTestsCompareTwo): - - def __init__(self, case): - self._comp_classes = [] - self._test_instances = 3 - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = 'single_instance', - run_two_description = 'single instance', - run_one_description = 'multi driver') - - def _case_one_setup(self): - # The multicoupler case will increase the number of tasks by the - # number of requested couplers. - self._case.set_value("MULTI_DRIVER",True) - self._case.set_value("NINST", self._test_instances) - - def _case_two_setup(self): - self._case.set_value("NINST", 1) diff --git a/scripts/lib/CIME/SystemTests/mvk.py b/scripts/lib/CIME/SystemTests/mvk.py deleted file mode 100644 index c7d26309a06..00000000000 --- a/scripts/lib/CIME/SystemTests/mvk.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -Multivariate test for climate reproducibility using the Kolmogrov-Smirnov (K-S) -test and based on The CESM/E3SM model's multi-instance capability is used to -conduct an ensemble of simulations starting from different initial conditions. - -This class inherits from SystemTestsCommon. -""" - -import os -import json -import logging - -import CIME.test_status -from CIME.SystemTests.system_tests_common import SystemTestsCommon -from CIME.case.case_setup import case_setup -from CIME.hist_utils import _get_all_hist_files -from CIME.utils import safe_copy, SharedArea - -import evv4esm # pylint: disable=import-error -from evv4esm.__main__ import main as evv # pylint: disable=import-error - -evv_lib_dir = os.path.abspath(os.path.dirname(evv4esm.__file__)) -logger = logging.getLogger(__name__) - -NINST = 20 - - -class MVK(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to the MVK test - """ - SystemTestsCommon.__init__(self, case) - - if self._case.get_value("RESUBMIT") == 0 \ - and self._case.get_value("GENERATE_BASELINE") is False: - self._case.set_value("COMPARE_BASELINE", True) - else: - self._case.set_value("COMPARE_BASELINE", False) - - def build_phase(self, sharedlib_only=False, model_only=False): - # Only want this to happen once. It will impact the sharedlib build - # so it has to happen there. - if not model_only: - logging.warning('Starting to build multi-instance exe') - for comp in self._case.get_values("COMP_CLASSES"): - self._case.set_value('NTHRDS_{}'.format(comp), 1) - - ntasks = self._case.get_value("NTASKS_{}".format(comp)) - - self._case.set_value('NTASKS_{}'.format(comp), ntasks * NINST) - if comp != 'CPL': - self._case.set_value('NINST_{}'.format(comp), NINST) - - self._case.set_value('ATM_NCPL', 18) - - self._case.flush() - - case_setup(self._case, test_mode=False, reset=True) - - self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) - - for iinst in range(1, NINST + 1): - with open('user_nl_cam_{:04d}'.format(iinst), 'w') as nl_atm_file: - nl_atm_file.write('new_random = .true.\n') - nl_atm_file.write('pertlim = 1.0e-10\n') - nl_atm_file.write('seed_custom = {}\n'.format(iinst)) - - def _generate_baseline(self): - """ - generate a new baseline case based on the current test - """ - super(MVK, self)._generate_baseline() - - with SharedArea(): - basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASEGEN_CASE")) - - rundir = self._case.get_value("RUNDIR") - ref_case = self._case.get_value("RUN_REFCASE") - - model = 'cam' - hists = _get_all_hist_files(model, rundir, [r'h\d*.*\.nc'], ref_case=ref_case) - logger.debug("MVK additional baseline files: {}".format(hists)) - for hist in hists: - basename = hist[hist.rfind(model):] - baseline = os.path.join(basegen_dir, basename) - if os.path.exists(baseline): - os.remove(baseline) - - safe_copy(hist, baseline, preserve_meta=False) - - def _compare_baseline(self): - with self._test_status: - if int(self._case.get_value("RESUBMIT")) > 0: - # This is here because the comparison is run for each submission - # and we only want to compare once the whole run is finished. We - # need to return a pass here to continue the submission process. - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_PASS_STATUS) - return - - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_FAIL_STATUS) - - run_dir = self._case.get_value("RUNDIR") - case_name = self._case.get_value("CASE") - base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASECMP_CASE")) - - test_name = "{}".format(case_name.split('.')[-1]) - evv_config = { - test_name: { - "module": os.path.join(evv_lib_dir, "extensions", "ks.py"), - "test-case": "Test", - "test-dir": run_dir, - "ref-case": "Baseline", - "ref-dir": base_dir, - "var-set": "default", - "ninst": NINST, - "critical": 13 - } - } - - json_file = os.path.join(run_dir, '.'.join([case_name, 'json'])) - with open(json_file, 'w') as config_file: - json.dump(evv_config, config_file, indent=4) - - evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv'])) - evv(['-e', json_file, '-o', evv_out_dir]) - - with open(os.path.join(evv_out_dir, 'index.json')) as evv_f: - evv_status = json.load(evv_f) - - for evv_elem in evv_status['Data']['Elements']: - if evv_elem['Type'] == 'ValSummary' \ - and evv_elem['TableTitle'] == 'Kolmogorov-Smirnov test': - if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass': - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_PASS_STATUS) - break diff --git a/scripts/lib/CIME/SystemTests/pea.py b/scripts/lib/CIME/SystemTests/pea.py deleted file mode 100644 index cb3d3f9344c..00000000000 --- a/scripts/lib/CIME/SystemTests/pea.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Implementation of the CIME PEA test. - -Builds runs and compares a single processor mpi model to a model built using mpi-serial -(1) do a run with default mpi library (suffix base) -(2) do a run with mpi-serial (suffix mpi-serial) -""" - -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo -from CIME.XML.standard_module_setup import * -from CIME.XML.machines import Machines - -logger = logging.getLogger(__name__) - -class PEA(SystemTestsCompareTwo): - - def __init__(self, case): - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = 'mpi-serial', - run_one_description = 'default mpi library', - run_two_description = 'mpi-serial') - - def _common_setup(self): - for comp in self._case.get_values("COMP_CLASSES"): - self._case.set_value("NTASKS_{}".format(comp), 1) - self._case.set_value("NTHRDS_{}".format(comp), 1) - self._case.set_value("ROOTPE_{}".format(comp), 0) - - def _case_one_setup(self): - pass - - def _case_two_setup(self): - mach_name = self._case.get_value("MACH") - mach_obj = Machines(machine=mach_name) - if mach_obj.is_valid_MPIlib("mpi-serial"): - self._case.set_value("MPILIB","mpi-serial") - else: - logger.warning("mpi-serial is not supported on machine '{}', " - "so we have to fall back to default MPI and " - "therefore very little is being tested".format(mach_name)) - - if os.path.isfile("Macros"): - os.remove("Macros") - self._case.case_setup(test_mode=True, reset=True) diff --git a/scripts/lib/CIME/SystemTests/pem.py b/scripts/lib/CIME/SystemTests/pem.py deleted file mode 100644 index 34bb67ae8c1..00000000000 --- a/scripts/lib/CIME/SystemTests/pem.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -Implementation of the CIME PEM test: Tests bfb with different MPI -processor counts - -This is just like running a smoke test twice - but the pe-counts -are modified the second time. -(1) Run with pes set up out of the box (suffix base) -(2) Run with half the number of tasks (suffix modpes) -""" - -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo - -logger = logging.getLogger(__name__) - -class PEM(SystemTestsCompareTwo): - - def __init__(self, case): - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = 'modpes', - run_one_description = 'default pe counts', - run_two_description = 'halved pe counts') - - def _case_one_setup(self): - pass - - def _case_two_setup(self): - for comp in self._case.get_values("COMP_CLASSES"): - ntasks = self._case.get_value("NTASKS_{}".format(comp)) - if ( ntasks > 1 ): - self._case.set_value("NTASKS_{}".format(comp), int(ntasks/2)) - self._case.case_setup(test_mode=True, reset=True) diff --git a/scripts/lib/CIME/SystemTests/pet.py b/scripts/lib/CIME/SystemTests/pet.py deleted file mode 100644 index 17f1fbe8c8e..00000000000 --- a/scripts/lib/CIME/SystemTests/pet.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -Implementation of the CIME PET test. This class inherits from SystemTestsCommon - -This is an openmp test to determine that changing thread counts does not change answers. -(1) do an initial run where all components are threaded by default (suffix: base) -(2) do another initial run with nthrds=1 for all components (suffix: single_thread) -""" - -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo - -logger = logging.getLogger(__name__) - -class PET(SystemTestsCompareTwo): - - def __init__(self, case): - """ - initialize a test object - """ - SystemTestsCompareTwo.__init__(self, case, - separate_builds = False, - multisubmit=True, - run_two_suffix = 'single_thread', - run_one_description = 'default threading', - run_two_description = 'threads set to 1') - - def _case_one_setup(self): - # first make sure that all components have threaded settings - for comp in self._case.get_values("COMP_CLASSES"): - if self._case.get_value("NTHRDS_{}".format(comp)) <= 1: - self._case.set_value("NTHRDS_{}".format(comp), 2) - - # Need to redo case_setup because we may have changed the number of threads - - - def _case_two_setup(self): - #Do a run with all threads set to 1 - for comp in self._case.get_values("COMP_CLASSES"): - self._case.set_value("NTHRDS_{}".format(comp), 1) - - # Need to redo case_setup because we may have changed the number of threads - self._case.case_setup(reset=True, test_mode=True) diff --git a/scripts/lib/CIME/SystemTests/pgn.py b/scripts/lib/CIME/SystemTests/pgn.py deleted file mode 100644 index 026c7a6e599..00000000000 --- a/scripts/lib/CIME/SystemTests/pgn.py +++ /dev/null @@ -1,248 +0,0 @@ -""" -Perturbation Growth New (PGN) - The CESM/ACME model's -multi-instance capability is used to conduct an ensemble -of simulations starting from different initial conditions. - -This class inherits from SystemTestsCommon. - -""" - -from __future__ import division - -import os -import re -import json -import shutil -import logging - -import pandas as pd -import numpy as np -from collections import OrderedDict - -import CIME.test_status -from CIME.SystemTests.system_tests_common import SystemTestsCommon -from CIME.case.case_setup import case_setup -from CIME.utils import expect - -import evv4esm # pylint: disable=import-error -from evv4esm.extensions import pg # pylint: disable=import-error -from evv4esm.__main__ import main as evv # pylint: disable=import-error - -evv_lib_dir = os.path.abspath(os.path.dirname(evv4esm.__file__)) - -logger = logging.getLogger(__name__) - -NUMBER_INITIAL_CONDITIONS = 6 -PERTURBATIONS = OrderedDict([('woprt', 0.0), - ('posprt', 1.0e-14), - ('negprt', -1.0e-14), - ]) -FCLD_NC = 'cam.h0.cloud.nc' -INIT_COND_FILE_TEMPLATE = \ - "SMS_Ly5.ne4_ne4.FC5AV1C-04P2.eos_intel.ne45y.{}.{}.0002-{:02d}-01-00000.nc" -# FIXME: should 'cam' be 'atm' now? -INSTANCE_FILE_TEMPLATE = '{}cam_{:04d}.h0.0001-01-01-00000{}.nc' - - -class PGN(SystemTestsCommon): - - def __init__(self, case): - """ - initialize an object interface to the PGN test - """ - super(PGN, self).__init__(case) - - def build_phase(self, sharedlib_only=False, model_only=False): - ninst = NUMBER_INITIAL_CONDITIONS * len(PERTURBATIONS) - logger.debug('PGN_INFO: number of instance: '+str(ninst)) - - default_ninst = self._case.get_value("NINST_ATM") - - if default_ninst == 1: # if multi-instance is not already set - # Only want this to happen once. It will impact the sharedlib build - # so it has to happen here. - if not model_only: - # Lay all of the components out concurrently - logger.debug("PGN_INFO: Updating NINST for multi-instance in " - "env_mach_pes.xml") - for comp in ['ATM', 'OCN', 'WAV', 'GLC', 'ICE', 'ROF', 'LND']: - ntasks = self._case.get_value("NTASKS_{}".format(comp)) - self._case.set_value("ROOTPE_{}".format(comp), 0) - self._case.set_value("NINST_{}".format(comp), ninst) - self._case.set_value("NTASKS_{}".format(comp), ntasks*ninst) - - self._case.set_value("ROOTPE_CPL", 0) - self._case.set_value("NTASKS_CPL", ntasks*ninst) - self._case.flush() - - case_setup(self._case, test_mode=False, reset=True) - - self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) - - logger.debug("PGN_INFO: Updating user_nl_* files") - - csmdata_root = self._case.get_value("DIN_LOC_ROOT") - csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v1_init") - csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_v1_init/b58d55680") - - iinst = 1 - for icond in range(1, NUMBER_INITIAL_CONDITIONS + 1): - fatm_in = os.path.join(csmdata_atm, INIT_COND_FILE_TEMPLATE.format('cam', 'i', icond)) - flnd_in = os.path.join(csmdata_lnd, INIT_COND_FILE_TEMPLATE.format('clm2', 'r', icond)) - for iprt in PERTURBATIONS.values(): - with open('user_nl_cam_{:04d}'.format(iinst), 'w') as atmnlfile, \ - open('user_nl_clm_{:04d}'.format(iinst), 'w') as lndnlfile: - - atmnlfile.write("ncdata = '{}' \n".format(fatm_in)) - lndnlfile.write("finidat = '{}' \n".format(flnd_in)) - - atmnlfile.write("avgflag_pertape = 'I' \n") - atmnlfile.write("nhtfrq = 1 \n") - atmnlfile.write("mfilt = 2 \n") - atmnlfile.write("ndens = 1 \n") - atmnlfile.write("pergro_mods = .true. \n") - atmnlfile.write("pergro_test_active = .true. \n") - - if iprt != 0.0: - atmnlfile.write("pertlim = {} \n".format(iprt)) - - iinst += 1 - - self._case.set_value("STOP_N", "1") - self._case.set_value("STOP_OPTION", "nsteps") - - def get_var_list(self): - """ - Get variable list for pergro specific output vars - """ - rundir = self._case.get_value("RUNDIR") - prg_fname = 'pergro_ptend_names.txt' - var_file = os.path.join(rundir, prg_fname) - expect(os.path.isfile(var_file), - "File {} does not exist in: {}".format(prg_fname, rundir)) - - with open(var_file, 'r') as fvar: - var_list = fvar.readlines() - - return list(map(str.strip, var_list)) - - def _compare_baseline(self): - """ - Compare baselines in the pergro test sense. That is, - compare PGE from the test simulation with the baseline - cloud - """ - with self._test_status: - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_FAIL_STATUS) - - logger.debug("PGN_INFO:BASELINE COMPARISON STARTS") - - run_dir = self._case.get_value("RUNDIR") - case_name = self._case.get_value("CASE") - base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASECMP_CASE")) - - var_list = self.get_var_list() - - test_name = "{}".format(case_name.split('.')[-1]) - evv_config = { - test_name: { - "module": os.path.join(evv_lib_dir, "extensions", "pg.py"), - "test-case": case_name, - "test-name": "Test", - "test-dir": run_dir, - "ref-name": "Baseline", - "ref-dir": base_dir, - "variables": var_list, - "perturbations": PERTURBATIONS, - "pge-cld": FCLD_NC, - "ninit": NUMBER_INITIAL_CONDITIONS, - "init-file-template": INIT_COND_FILE_TEMPLATE, - "instance-file-template": INSTANCE_FILE_TEMPLATE, - } - } - - json_file = os.path.join(run_dir, '.'.join([case_name, 'json'])) - with open(json_file, 'w') as config_file: - json.dump(evv_config, config_file, indent=4) - - evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv'])) - evv(['-e', json_file, '-o', evv_out_dir]) - - with open(os.path.join(evv_out_dir, 'index.json'), 'r') as evv_f: - evv_status = json.load(evv_f) - - for evv_elem in evv_status['Data']['Elements']: - if evv_elem['Type'] == 'ValSummary' \ - and evv_elem['TableTitle'] == 'Perturbation growth test': - if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass': - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_PASS_STATUS) - break - - def run_phase(self): - logger.debug("PGN_INFO: RUN PHASE") - - self.run_indv() - - # Here were are in case directory, we need to go to the run directory - # and rename files - rundir = self._case.get_value("RUNDIR") - casename = self._case.get_value("CASE") - logger.debug("PGN_INFO: Case name is:{}".format(casename)) - - for icond in range(NUMBER_INITIAL_CONDITIONS): - for iprt, (prt_name, prt_value) in enumerate(PERTURBATIONS.items()): - iinst = pg._sub2instance(icond, iprt, len(PERTURBATIONS)) - fname = os.path.join(rundir, INSTANCE_FILE_TEMPLATE.format(casename + '.', iinst, '')) - renamed_fname = re.sub(r'\.nc$', '_{}.nc'.format(prt_name), fname) - - logger.debug("PGN_INFO: fname to rename:{}".format(fname)) - logger.debug("PGN_INFO: Renamed file:{}".format(renamed_fname)) - try: - shutil.move(fname, renamed_fname) - except IOError: - expect(os.path.isfile(renamed_fname), - "ERROR: File {} does not exist".format(renamed_fname)) - logger.debug("PGN_INFO: Renamed file already exists:" - "{}".format(renamed_fname)) - - logger.debug("PGN_INFO: RUN PHASE ENDS") - - def _generate_baseline(self): - super(PGN, self)._generate_baseline() - - basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASEGEN_CASE")) - - rundir = self._case.get_value("RUNDIR") - casename = self._case.get_value("CASE") - - var_list = self.get_var_list() - nvar = len(var_list) - nprt = len(PERTURBATIONS) - rmse_prototype = {} - for icond in range(NUMBER_INITIAL_CONDITIONS): - prt_rmse = {} - for iprt, prt_name in enumerate(PERTURBATIONS): - if prt_name == 'woprt': - continue - iinst_ctrl = pg._sub2instance(icond, 0, nprt) - ifile_ctrl = os.path.join(rundir, - INSTANCE_FILE_TEMPLATE.format(casename + '.', iinst_ctrl, '_woprt')) - - iinst_test = pg._sub2instance(icond, iprt, nprt) - ifile_test = os.path.join(rundir, - INSTANCE_FILE_TEMPLATE.format(casename + '.', iinst_test, '_' + prt_name)) - - prt_rmse[prt_name] = pg.variables_rmse(ifile_test, ifile_ctrl, var_list, 't_') - rmse_prototype[icond] = pd.concat(prt_rmse) - rmse = pd.concat(rmse_prototype) - cld_rmse = np.reshape(rmse.RMSE.values, (NUMBER_INITIAL_CONDITIONS, nprt - 1, nvar)) - - pg.rmse_writer(os.path.join(rundir, FCLD_NC), - cld_rmse, list(PERTURBATIONS.keys()), var_list, INIT_COND_FILE_TEMPLATE) - - logger.debug("PGN_INFO:copy:{} to {}".format(FCLD_NC, basegen_dir)) - shutil.copy(os.path.join(rundir, FCLD_NC), basegen_dir) diff --git a/scripts/lib/CIME/SystemTests/pre.py b/scripts/lib/CIME/SystemTests/pre.py deleted file mode 100644 index b1578b51a6b..00000000000 --- a/scripts/lib/CIME/SystemTests/pre.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -Implementation of the CIME pause/resume test: Tests having driver -'pause' (write cpl restart file) and 'resume' (read cpl restart file) -possibly changing the restart file. Compared to non-pause/resume run. -Test can also be run with other component combinations. -Test requires DESP component to function correctly. -""" - -import os.path -import logging -import glob - -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo -from CIME.utils import expect -from CIME.hist_utils import cprnc - -############################################################################### -class PRE(SystemTestsCompareTwo): -############################################################################### - """ - Implementation of the CIME pause/resume test: Tests having driver - 'pause' (write cpl and/or other restart file(s)) and 'resume' - (read cpl and/or other restart file(s)) possibly changing restart - file. Compare to non-pause/resume run. - """ - - ########################################################################### - def __init__(self, case): - ########################################################################### - SystemTestsCompareTwo.__init__(self, case, - separate_builds=False, - run_two_suffix='pr', - run_one_description='no pause/resume', - run_two_description='pause/resume') - self._stopopt = '' - self._stopn = 0 - - ########################################################################### - def _case_one_setup(self): - ########################################################################### - pass - - ########################################################################### - def _case_two_setup(self): - ########################################################################### - # Set up a pause/resume run - stopopt = self._case1.get_value("STOP_OPTION") - stopn = self._case1.get_value("STOP_N") - self._case.set_value("STOP_OPTION", stopopt) - self._case.set_value("STOP_N", stopn) - self._case.set_value("ESP_RUN_ON_PAUSE", "TRUE") - if stopn > 3: - pausen = 2 - else: - pausen = 1 - # End if - - self._case.set_value("PAUSE_OPTION", stopopt) - self._case.set_value("PAUSE_N", pausen) - comps = self._case.get_values("COMP_CLASSES") - pause_active = [] - for comp in comps: - pause_active.append(self._case.get_value("PAUSE_ACTIVE_{}".format(comp))) - - expect(any(pause_active), "No pause_active flag is set") - - self._case.flush() - - ########################################################################### - def run_phase(self): # pylint: disable=arguments-differ - ########################################################################### - self._activate_case2() - should_match = (self._case.get_value("DESP_MODE") == "NOCHANGE") - SystemTestsCompareTwo.run_phase(self, success_change=not should_match) - # Look for expected coupler restart files - logger = logging.getLogger(__name__) - self._activate_case1() - rundir1 = self._case.get_value("RUNDIR") - self._activate_case2() - rundir2 = self._case.get_value("RUNDIR") - compare_ok = True - multi_driver = self._case.get_value("MULTI_DRIVER") - comps = self._case.get_values("COMP_CLASSES") - for comp in comps: - if not self._case.get_value("PAUSE_ACTIVE_{}".format(comp)): - continue - if comp == "CPL": - if multi_driver: - ninst = self._case.get_value("NINST_MAX") - else: - ninst = 1 - else: - ninst = self._case.get_value("NINST_{}".format(comp)) - - comp_name = self._case.get_value('COMP_{}'.format(comp)) - for index in range(1,ninst+1): - if ninst == 1: - rname = '*.{}.r.*'.format(comp_name) - else: - rname = '*.{}_{:04d}.r.*'.format(comp_name, index) - - restart_files_1 = glob.glob(os.path.join(rundir1, rname)) - expect((len(restart_files_1) > 0), "No case1 restart files for {}".format(comp)) - restart_files_2 = glob.glob(os.path.join(rundir2, rname)) - expect((len(restart_files_2) > len(restart_files_1)), - "No pause (restart) files found in case2 for {}".format(comp)) - # Do cprnc of restart files. - rfile1 = restart_files_1[len(restart_files_1) - 1] - # rfile2 has to match rfile1 (same time string) - parts = os.path.basename(rfile1).split(".") - glob_str = "*.{}".format(".".join(parts[len(parts)-4:])) - restart_files_2 = glob.glob(os.path.join(rundir2, glob_str)) - expect((len(restart_files_2) == 1), - "Missing case2 restart file, {}", glob_str) - rfile2 = restart_files_2[0] - ok = cprnc(comp, rfile1, rfile2, self._case, rundir2)[0] - logger.warning("CPRNC result for {}: {}".format(os.path.basename(rfile1), "PASS" if (ok == should_match) else "FAIL")) - compare_ok = compare_ok and (should_match == ok) - - expect(compare_ok, - "Not all restart files {}".format("matched" if should_match else "failed to match")) diff --git a/scripts/lib/CIME/SystemTests/rep.py b/scripts/lib/CIME/SystemTests/rep.py deleted file mode 100644 index a3cef73fbfc..00000000000 --- a/scripts/lib/CIME/SystemTests/rep.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Implementation of the CIME REP test - -This test verifies that two identical runs give bit-for-bit results -""" - -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo - -class REP(SystemTestsCompareTwo): - - def __init__(self, case): - SystemTestsCompareTwo.__init__(self, case, - separate_builds = False, - run_two_suffix = 'rep2') - - def _case_one_setup(self): - pass - - def _case_two_setup(self): - pass diff --git a/scripts/lib/CIME/SystemTests/restart_tests.py b/scripts/lib/CIME/SystemTests/restart_tests.py deleted file mode 100644 index cce4ca79241..00000000000 --- a/scripts/lib/CIME/SystemTests/restart_tests.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -Abstract class for restart tests - -""" - -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo -from CIME.XML.standard_module_setup import * - -logger = logging.getLogger(__name__) - -class RestartTest(SystemTestsCompareTwo): - - def __init__(self, case, - separate_builds, - run_two_suffix = 'restart', - run_one_description = 'initial', - run_two_description = 'restart', - multisubmit = False): - SystemTestsCompareTwo.__init__(self, case, - separate_builds, - run_two_suffix = run_two_suffix, - run_one_description = run_one_description, - run_two_description = run_two_description, - multisubmit = multisubmit) - - - def _case_one_setup(self): - stop_n = self._case1.get_value("STOP_N") - expect(stop_n >= 3,"STOP_N must be at least 3, STOP_N = {}".format(stop_n)) - - def _case_two_setup(self): - rest_n = self._case1.get_value("REST_N") - stop_n = self._case1.get_value("STOP_N") - stop_new = stop_n - rest_n - expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n)) - # hist_n is set to the stop_n value of case1 - self._case.set_value("HIST_N", stop_n) - self._case.set_value("STOP_N", stop_new) - self._case.set_value("CONTINUE_RUN",True) - self._case.set_value("REST_OPTION", "never") diff --git a/scripts/lib/CIME/SystemTests/seq.py b/scripts/lib/CIME/SystemTests/seq.py deleted file mode 100644 index 716215db567..00000000000 --- a/scripts/lib/CIME/SystemTests/seq.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -sequencing bfb test (10 day seq,conc tests) -""" -from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo - -logger = logging.getLogger(__name__) - -class SEQ(SystemTestsCompareTwo): - - def __init__(self, case): - """ - initialize an object interface to file env_test.xml in the case directory - """ - SystemTestsCompareTwo.__init__(self, - case, - separate_builds=True, - run_two_suffix="seq", - run_one_description = "base", - run_two_description = "sequence") - - def _case_one_setup(self): - pass - - def _case_two_setup(self): - comp_classes = self._case.get_values("COMP_CLASSES") - any_changes = False - for comp in comp_classes: - any_changes |= self._case.get_value("ROOTPE_{}".format(comp)) != 0 - if any_changes: - for comp in comp_classes: - self._case.set_value("ROOTPE_{}".format(comp), 0) - else: - totalpes = self._case.get_value("TOTALPES") - newntasks = max(1, totalpes//len(comp_classes)) - rootpe = newntasks - - for comp in comp_classes: - # here we set the cpl to have the first 2 tasks - # and each component to have a different ROOTPE - if comp == "CPL": - self._case.set_value("NTASKS_CPL", newntasks) - else: - self._case.set_value("NTASKS_{}".format(comp), newntasks) - self._case.set_value("ROOTPE_{}".format(comp), rootpe) - rootpe += newntasks - - self._case.flush() - self._case.case_setup(test_mode=True, reset=True) diff --git a/scripts/lib/CIME/SystemTests/system_tests_common.py b/scripts/lib/CIME/SystemTests/system_tests_common.py deleted file mode 100644 index 77c7c05ece8..00000000000 --- a/scripts/lib/CIME/SystemTests/system_tests_common.py +++ /dev/null @@ -1,715 +0,0 @@ -""" -Base class for CIME system tests -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.env_run import EnvRun -from CIME.utils import append_testlog, get_model, safe_copy, get_timestamp, CIMEError -from CIME.test_status import * -from CIME.hist_utils import * -from CIME.provenance import save_test_time -from CIME.locked_files import LOCKED_DIR, lock_file, is_locked -import CIME.build as build - -import glob, gzip, time, traceback, six - -logger = logging.getLogger(__name__) - -class SystemTestsCommon(object): - - def __init__(self, case, expected=None): - """ - initialize a CIME system test object, if the locked env_run.orig.xml - does not exist copy the current env_run.xml file. If it does exist restore values - changed in a previous run of the test. - """ - self._case = case - caseroot = case.get_value("CASEROOT") - self._caseroot = caseroot - self._orig_caseroot = caseroot - self._runstatus = None - self._casebaseid = self._case.get_value("CASEBASEID") - self._test_status = TestStatus(test_dir=caseroot, test_name=self._casebaseid) - self._init_environment(caseroot) - self._init_locked_files(caseroot, expected) - self._skip_pnl = False - self._cpllog = "med" if self._case.get_value("COMP_INTERFACE")=="nuopc" else "cpl" - - def _init_environment(self, caseroot): - """ - Do initializations of environment variables that are needed in __init__ - """ - # Needed for sh scripts - os.environ["CASEROOT"] = caseroot - - def _init_locked_files(self, caseroot, expected): - """ - If the locked env_run.orig.xml does not exist, copy the current - env_run.xml file. If it does exist, restore values changed in a previous - run of the test. - """ - if is_locked("env_run.orig.xml"): - self.compare_env_run(expected=expected) - elif os.path.isfile(os.path.join(caseroot, "env_run.xml")): - lock_file("env_run.xml", caseroot=caseroot, newname="env_run.orig.xml") - - def _resetup_case(self, phase, reset=False): - """ - Re-setup this case. This is necessary if user is re-running an already-run - phase. - """ - # We never want to re-setup if we're doing the resubmitted run - phase_status = self._test_status.get_status(phase) - if reset or (self._case.get_value("IS_FIRST_RUN") and phase_status != TEST_PEND_STATUS): - - logging.warning("Resetting case due to detected re-run of phase {}".format(phase)) - self._case.set_initial_test_values() - - self._case.case_setup(reset=True, test_mode=True) - - def build(self, sharedlib_only=False, model_only=False): - """ - Do NOT override this method, this method is the framework that - controls the build phase. build_phase is the extension point - that subclasses should use. - """ - success = True - for phase_name, phase_bool in [(SHAREDLIB_BUILD_PHASE, not model_only), - (MODEL_BUILD_PHASE, not sharedlib_only)]: - if phase_bool: - self._resetup_case(phase_name) - with self._test_status: - self._test_status.set_status(phase_name, TEST_PEND_STATUS) - - start_time = time.time() - try: - self.build_phase(sharedlib_only=(phase_name==SHAREDLIB_BUILD_PHASE), - model_only=(phase_name==MODEL_BUILD_PHASE)) - except BaseException as e: # We want KeyboardInterrupts to generate FAIL status - success = False - if isinstance(e, CIMEError): - # Don't want to print stacktrace for a build failure since that - # is not a CIME/infrastructure problem. - excmsg = str(e) - else: - excmsg = "Exception during build:\n{}\n{}".format(str(e), traceback.format_exc()) - - append_testlog(excmsg, self._orig_caseroot) - raise - - finally: - time_taken = time.time() - start_time - with self._test_status: - self._test_status.set_status(phase_name, TEST_PASS_STATUS if success else TEST_FAIL_STATUS, comments=("time={:d}".format(int(time_taken)))) - - return success - - def build_phase(self, sharedlib_only=False, model_only=False): - """ - This is the default build phase implementation, it just does an individual build. - This is the subclass' extension point if they need to define a custom build - phase. - - PLEASE THROW EXCEPTION ON FAIL - """ - self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) - - def build_indv(self, sharedlib_only=False, model_only=False): - """ - Perform an individual build - """ - model = self._case.get_value('MODEL') - build.case_build(self._caseroot, case=self._case, - sharedlib_only=sharedlib_only, model_only=model_only, - save_build_provenance=not model=='cesm') - - def clean_build(self, comps=None): - if comps is None: - comps = [x.lower() for x in self._case.get_values("COMP_CLASSES")] - build.clean(self._case, cleanlist=comps) - - def run(self, skip_pnl=False): - """ - Do NOT override this method, this method is the framework that controls - the run phase. run_phase is the extension point that subclasses should use. - """ - success = True - start_time = time.time() - self._skip_pnl = skip_pnl - try: - self._resetup_case(RUN_PHASE) - with self._test_status: - self._test_status.set_status(RUN_PHASE, TEST_PEND_STATUS) - - self.run_phase() - - if self._case.get_value("GENERATE_BASELINE"): - self._phase_modifying_call(GENERATE_PHASE, self._generate_baseline) - - if self._case.get_value("COMPARE_BASELINE"): - self._phase_modifying_call(BASELINE_PHASE, self._compare_baseline) - self._phase_modifying_call(MEMCOMP_PHASE, self._compare_memory) - self._phase_modifying_call(THROUGHPUT_PHASE, self._compare_throughput) - - self._phase_modifying_call(MEMLEAK_PHASE, self._check_for_memleak) - - self._phase_modifying_call(STARCHIVE_PHASE, self._st_archive_case_test) - - except BaseException as e: # We want KeyboardInterrupts to generate FAIL status - success = False - if isinstance(e, CIMEError): - # Don't want to print stacktrace for a model failure since that - # is not a CIME/infrastructure problem. - excmsg = str(e) - else: - excmsg = "Exception during run:\n{}\n{}".format(str(e), traceback.format_exc()) - - append_testlog(excmsg, self._orig_caseroot) - raise - - finally: - # Writing the run status should be the very last thing due to wait_for_tests - time_taken = time.time() - start_time - status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS - with self._test_status: - self._test_status.set_status(RUN_PHASE, status, comments=("time={:d}".format(int(time_taken)))) - - if success and get_model() == "e3sm": - save_test_time(self._case.get_value("BASELINE_ROOT"), self._casebaseid, time_taken) - - if get_model() == "cesm" and self._case.get_value("GENERATE_BASELINE"): - baseline_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASEGEN_CASE")) - generate_teststatus(self._caseroot, baseline_dir) - - # We return success if the run phase worked; memleaks, diffs will not be taken into account - # with this return value. - return success - - def run_phase(self): - """ - This is the default run phase implementation, it just does an individual run. - This is the subclass' extension point if they need to define a custom run phase. - - PLEASE THROW AN EXCEPTION ON FAIL - """ - self.run_indv() - - def _get_caseroot(self): - """ - Returns the current CASEROOT value - """ - return self._caseroot - - def _set_active_case(self, case): - """ - Use for tests that have multiple cases - """ - self._case = case - self._case.load_env(reset=True) - self._caseroot = case.get_value("CASEROOT") - - def run_indv(self, suffix="base", st_archive=False): - """ - Perform an individual run. Raises an EXCEPTION on fail. - """ - stop_n = self._case.get_value("STOP_N") - stop_option = self._case.get_value("STOP_OPTION") - run_type = self._case.get_value("RUN_TYPE") - rundir = self._case.get_value("RUNDIR") - is_batch = self._case.get_value("BATCH_SYSTEM") != "none" - - # remove any cprnc output leftover from previous runs - for compout in glob.iglob(os.path.join(rundir,"*.cprnc.out")): - os.remove(compout) - - infostr = "doing an {:d} {} {} test".format(stop_n, stop_option, run_type) - - rest_option = self._case.get_value("REST_OPTION") - if rest_option == "none" or rest_option == "never": - infostr += ", no restarts written" - else: - rest_n = self._case.get_value("REST_N") - infostr += ", with restarts every {:d} {}".format(rest_n, rest_option) - - logger.info(infostr) - - self._case.case_run(skip_pnl=self._skip_pnl, submit_resubmits=is_batch) - - if not self._coupler_log_indicates_run_complete(): - expect(False, "Coupler did not indicate run passed") - - if suffix is not None: - self._component_compare_copy(suffix) - - if st_archive: - self._case.case_st_archive(resubmit=True) - - def _coupler_log_indicates_run_complete(self): - newestcpllogfiles = self._get_latest_cpl_logs() - logger.debug("Latest Coupler log file(s) {}" .format(newestcpllogfiles)) - # Exception is raised if the file is not compressed - allgood = len(newestcpllogfiles) - for cpllog in newestcpllogfiles: - try: - if six.b("SUCCESSFUL TERMINATION") in gzip.open(cpllog, 'rb').read(): - allgood = allgood - 1 - except Exception as e: # Probably want to be more specific here - msg = e.__str__() - - logger.info("{} is not compressed, assuming run failed {}".format(cpllog, msg)) - - return allgood==0 - - def _component_compare_copy(self, suffix): - comments = copy(self._case, suffix) - append_testlog(comments, self._orig_caseroot) - - def _component_compare_test(self, suffix1, suffix2, - success_change=False, - ignore_fieldlist_diffs=False): - """ - Return value is not generally checked, but is provided in case a custom - run case needs indirection based on success. - If success_change is True, success requires some files to be different. - If ignore_fieldlist_diffs is True, then: If the two cases differ only in their - field lists (i.e., all shared fields are bit-for-bit, but one case has some - diagnostic fields that are missing from the other case), treat the two cases - as identical. - """ - success, comments = self._do_compare_test(suffix1, suffix2, - ignore_fieldlist_diffs=ignore_fieldlist_diffs) - if success_change: - success = not success - - append_testlog(comments, self._orig_caseroot) - status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS - with self._test_status: - self._test_status.set_status("{}_{}_{}".format(COMPARE_PHASE, suffix1, suffix2), status) - return success - - def _do_compare_test(self, suffix1, suffix2, ignore_fieldlist_diffs=False): - """ - Wraps the call to compare_test to facilitate replacement in unit - tests - """ - return compare_test(self._case, suffix1, suffix2, - ignore_fieldlist_diffs=ignore_fieldlist_diffs) - - def _st_archive_case_test(self): - result = self._case.test_env_archive() - with self._test_status: - if result: - self._test_status.set_status(STARCHIVE_PHASE, TEST_PASS_STATUS) - else: - self._test_status.set_status(STARCHIVE_PHASE, TEST_FAIL_STATUS) - - def _get_mem_usage(self, cpllog): - """ - Examine memory usage as recorded in the cpl log file and look for unexpected - increases. - """ - memlist = [] - meminfo = re.compile(r".*model date =\s+(\w+).*memory =\s+(\d+\.?\d+).*highwater") - if cpllog is not None and os.path.isfile(cpllog): - if '.gz' == cpllog[-3:]: - fopen = gzip.open - else: - fopen = open - with fopen(cpllog, "rb") as f: - for line in f: - m = meminfo.match(line.decode('utf-8')) - if m: - memlist.append((float(m.group(1)), float(m.group(2)))) - # Remove the last mem record, it's sometimes artificially high - if len(memlist) > 0: - memlist.pop() - return memlist - - def _get_throughput(self, cpllog): - """ - Examine memory usage as recorded in the cpl log file and look for unexpected - increases. - """ - if cpllog is not None and os.path.isfile(cpllog): - with gzip.open(cpllog, "rb") as f: - cpltext = f.read().decode('utf-8') - m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s",cpltext) - if m: - return float(m.group(1)) - return None - - def _phase_modifying_call(self, phase, function): - """ - Ensures that unexpected exceptions from phases will result in a FAIL result - in the TestStatus file for that phase. - """ - try: - function() - except Exception as e: # Do NOT want to catch KeyboardInterrupt - msg = e.__str__() - excmsg = "Exception during {}:\n{}\n{}".format(phase, msg, traceback.format_exc()) - - logger.warning(excmsg) - append_testlog(excmsg, self._orig_caseroot) - - with self._test_status: - self._test_status.set_status(phase, TEST_FAIL_STATUS, comments="exception") - - def _check_for_memleak(self): - """ - Examine memory usage as recorded in the cpl log file and look for unexpected - increases. - """ - with self._test_status: - latestcpllogs = self._get_latest_cpl_logs() - for cpllog in latestcpllogs: - memlist = self._get_mem_usage(cpllog) - - if len(memlist)<3: - self._test_status.set_status(MEMLEAK_PHASE, TEST_PASS_STATUS, comments="insuffiencient data for memleak test") - else: - finaldate = int(memlist[-1][0]) - originaldate = int(memlist[0][0]) - finalmem = float(memlist[-1][1]) - originalmem = float(memlist[0][1]) - memdiff = -1 - if originalmem > 0: - memdiff = (finalmem - originalmem)/originalmem - tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") - if tolerance is None: - tolerance = 0.1 - expect(tolerance > 0.0, "Bad value for memleak tolerance in test") - if memdiff < 0: - self._test_status.set_status(MEMLEAK_PHASE, TEST_PASS_STATUS, comments="insuffiencient data for memleak test") - elif memdiff < tolerance: - self._test_status.set_status(MEMLEAK_PHASE, TEST_PASS_STATUS) - else: - comment = "memleak detected, memory went from {:f} to {:f} in {:d} days".format(originalmem, finalmem, finaldate-originaldate) - append_testlog(comment, self._orig_caseroot) - self._test_status.set_status(MEMLEAK_PHASE, TEST_FAIL_STATUS, comments=comment) - - def compare_env_run(self, expected=None): - """ - Compare env_run file to original and warn about differences - """ - components = self._case.get_values("COMP_CLASSES") - f1obj = self._case.get_env("run") - f2obj = EnvRun(self._caseroot, os.path.join(LOCKED_DIR, "env_run.orig.xml"), components=components) - diffs = f1obj.compare_xml(f2obj) - for key in diffs.keys(): - if expected is not None and key in expected: - logging.warning(" Resetting {} for test".format(key)) - f1obj.set_value(key, f2obj.get_value(key, resolved=False)) - else: - print("WARNING: Found difference in test {}: case: {} original value {}".format(key, diffs[key][0], diffs[key][1])) - return False - return True - - def _get_latest_cpl_logs(self): - """ - find and return the latest cpl log file in the run directory - """ - coupler_log_path = self._case.get_value("RUNDIR") - cpllogs = glob.glob(os.path.join(coupler_log_path, '{}*.log.*'.format(self._cpllog))) - lastcpllogs = [] - if cpllogs: - lastcpllogs.append(max(cpllogs, key=os.path.getctime)) - basename = os.path.basename(lastcpllogs[0]) - suffix = basename.split('.',1)[1] - for log in cpllogs: - if log in lastcpllogs: - continue - - if log.endswith(suffix): - lastcpllogs.append(log) - - return lastcpllogs - - def _compare_memory(self): - with self._test_status: - # compare memory usage to baseline - baseline_name = self._case.get_value("BASECMP_CASE") - basecmp_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), baseline_name) - newestcpllogfiles = self._get_latest_cpl_logs() - if len(newestcpllogfiles) > 0: - memlist = self._get_mem_usage(newestcpllogfiles[0]) - for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog),cpllog) - if m is not None: - baselog = os.path.join(basecmp_dir, m.group(1))+".gz" - if baselog is None or not os.path.isfile(baselog): - # for backward compatibility - baselog = os.path.join(basecmp_dir, self._cpllog+".log") - if os.path.isfile(baselog) and len(memlist) > 3: - blmem = self._get_mem_usage(baselog) - blmem = 0 if blmem == [] else blmem[-1][1] - curmem = memlist[-1][1] - diff = (curmem-blmem)/blmem - if diff < 0.1 and self._test_status.get_status(MEMCOMP_PHASE) is None: - self._test_status.set_status(MEMCOMP_PHASE, TEST_PASS_STATUS) - elif self._test_status.get_status(MEMCOMP_PHASE) != TEST_FAIL_STATUS: - comment = "Error: Memory usage increase > 10% from baseline" - self._test_status.set_status(MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=comment) - append_testlog(comment, self._orig_caseroot) - - def _compare_throughput(self): - with self._test_status: - # compare memory usage to baseline - baseline_name = self._case.get_value("BASECMP_CASE") - basecmp_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), baseline_name) - newestcpllogfiles = self._get_latest_cpl_logs() - for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) - if m is not None: - baselog = os.path.join(basecmp_dir, m.group(1))+".gz" - if baselog is None or not os.path.isfile(baselog): - # for backward compatibility - baselog = os.path.join(basecmp_dir, self._cpllog) - - if os.path.isfile(baselog): - # compare throughput to baseline - current = self._get_throughput(cpllog) - baseline = self._get_throughput(baselog) - #comparing ypd so bigger is better - if baseline is not None and current is not None: - diff = (baseline - current)/baseline - tolerance = self._case.get_value("TEST_TPUT_TOLERANCE") - if tolerance is None: - tolerance = 0.25 - expect(tolerance > 0.0, "Bad value for throughput tolerance in test") - if diff < tolerance and self._test_status.get_status(THROUGHPUT_PHASE) is None: - self._test_status.set_status(THROUGHPUT_PHASE, TEST_PASS_STATUS) - elif self._test_status.get_status(THROUGHPUT_PHASE) != TEST_FAIL_STATUS: - comment = "Error: Computation time increase > {:d} pct from baseline".format(int(tolerance*100)) - self._test_status.set_status(THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment) - append_testlog(comment, self._orig_caseroot) - - def _compare_baseline(self): - """ - compare the current test output to a baseline result - """ - with self._test_status: - # compare baseline - success, comments = compare_baseline(self._case) - append_testlog(comments, self._orig_caseroot) - status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS - baseline_name = self._case.get_value("BASECMP_CASE") - ts_comments = os.path.dirname(baseline_name) + ": " + get_ts_synopsis(comments) - self._test_status.set_status(BASELINE_PHASE, status, comments=ts_comments) - - def _generate_baseline(self): - """ - generate a new baseline case based on the current test - """ - with self._test_status: - # generate baseline - success, comments = generate_baseline(self._case) - append_testlog(comments, self._orig_caseroot) - status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS - baseline_name = self._case.get_value("BASEGEN_CASE") - self._test_status.set_status(GENERATE_PHASE, status, comments=os.path.dirname(baseline_name)) - basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASEGEN_CASE")) - # copy latest cpl log to baseline - # drop the date so that the name is generic - newestcpllogfiles = self._get_latest_cpl_logs() - for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog),cpllog) - if m is not None: - baselog = os.path.join(basegen_dir, m.group(1))+".gz" - safe_copy(cpllog, - os.path.join(basegen_dir,baselog)) - -class FakeTest(SystemTestsCommon): - """ - Inheriters of the FakeTest Class are intended to test the code. - - All members of the FakeTest Class must - have names beginnig with "TEST" this is so that the find_system_test - in utils.py will work with these classes. - """ - def _set_script(self, script): - self._script = script # pylint: disable=attribute-defined-outside-init - - def build_phase(self, sharedlib_only=False, model_only=False): - if (not sharedlib_only): - exeroot = self._case.get_value("EXEROOT") - cime_model = self._case.get_value("MODEL") - modelexe = os.path.join(exeroot, "{}.exe".format(cime_model)) - - with open(modelexe, 'w') as f: - f.write("#!/bin/bash\n") - f.write(self._script) - - os.chmod(modelexe, 0o755) - - build.post_build(self._case, [], build_complete=True) - - def run_indv(self, suffix="base", st_archive=False): - mpilib = self._case.get_value("MPILIB") - # This flag is needed by mpt to run a script under mpiexec - if mpilib == "mpt": - os.environ["MPI_SHEPHERD"] = "true" - super(FakeTest, self).run_indv(suffix, st_archive) - -class TESTRUNPASS(FakeTest): - - def build_phase(self, sharedlib_only=False, model_only=False): - rundir = self._case.get_value("RUNDIR") - cimeroot = self._case.get_value("CIMEROOT") - case = self._case.get_value("CASE") - script = \ -""" -echo Insta pass -echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID -cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) - self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) - -class TESTRUNDIFF(FakeTest): - """ - You can generate a diff with this test as follows: - 1) Run the test and generate a baseline - 2) set TESTRUNDIFF_ALTERNATE environment variable to TRUE - 3) Re-run the same test from step 1 but do a baseline comparison instead of generation - 3.a) This should give you a DIFF - """ - def build_phase(self, sharedlib_only=False, model_only=False): - rundir = self._case.get_value("RUNDIR") - cimeroot = self._case.get_value("CIMEROOT") - case = self._case.get_value("CASE") - script = \ -""" -echo Insta pass -echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID -if [ -z "$TESTRUNDIFF_ALTERNATE" ]; then - cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -else - cp {root}/scripts/tests/cpl.hi2.nc.test {rundir}/{case}.cpl.hi.0.nc -fi -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) - self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) - -class TESTTESTDIFF(FakeTest): - - def build_phase(self, sharedlib_only=False, model_only=False): - rundir = self._case.get_value("RUNDIR") - cimeroot = self._case.get_value("CIMEROOT") - case = self._case.get_value("CASE") - script = \ -""" -echo Insta pass -echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID -cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -cp {root}/scripts/tests/cpl.hi2.nc.test {rundir}/{case}.cpl.hi.0.nc.rest -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) - self._set_script(script) - super(TESTTESTDIFF, self).build_phase(sharedlib_only=sharedlib_only, - model_only=model_only) - - def run_phase(self): - super(TESTTESTDIFF, self).run_phase() - self._component_compare_test("base", "rest") - -class TESTRUNFAIL(FakeTest): - - def build_phase(self, sharedlib_only=False, model_only=False): - rundir = self._case.get_value("RUNDIR") - cimeroot = self._case.get_value("CIMEROOT") - case = self._case.get_value("CASE") - script = \ -""" -if [ -z "$TESTRUNFAIL_PASS" ]; then - echo Insta fail - echo model failed > {rundir}/{log}.log.$LID - exit -1 -else - echo Insta pass - echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID - cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -fi -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) - self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) - -class TESTRUNFAILEXC(TESTRUNPASS): - - def run_phase(self): - raise RuntimeError("Exception from run_phase") - -class TESTRUNSTARCFAIL(TESTRUNPASS): - - def _st_archive_case_test(self): - raise RuntimeError("Exception from st archive") - -class TESTBUILDFAIL(TESTRUNPASS): - - def build_phase(self, sharedlib_only=False, model_only=False): - if "TESTBUILDFAIL_PASS" in os.environ: - TESTRUNPASS.build_phase(self, sharedlib_only, model_only) - else: - if (not sharedlib_only): - blddir = self._case.get_value("EXEROOT") - bldlog = os.path.join(blddir, "{}.bldlog.{}".format(get_model(), get_timestamp("%y%m%d-%H%M%S"))) - with open(bldlog, "w") as fd: - fd.write("BUILD FAIL: Intentional fail for testing infrastructure") - - expect(False, "BUILD FAIL: Intentional fail for testing infrastructure") - -class TESTBUILDFAILEXC(FakeTest): - - def __init__(self, case): - FakeTest.__init__(self, case) - raise RuntimeError("Exception from init") - -class TESTRUNSLOWPASS(FakeTest): - - def build_phase(self, sharedlib_only=False, model_only=False): - rundir = self._case.get_value("RUNDIR") - cimeroot = self._case.get_value("CIMEROOT") - case = self._case.get_value("CASE") - script = \ -""" -sleep 300 -echo Slow pass -echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID -cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) - self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) - -class TESTMEMLEAKFAIL(FakeTest): - def build_phase(self, sharedlib_only=False, model_only=False): - rundir = self._case.get_value("RUNDIR") - cimeroot = self._case.get_value("CIMEROOT") - case = self._case.get_value("CASE") - testfile = os.path.join(cimeroot,"scripts","tests","cpl.log.failmemleak.gz") - script = \ -""" -echo Insta pass -gunzip -c {testfile} > {rundir}/{log}.log.$LID -cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -""".format(testfile=testfile, rundir=rundir, log=self._cpllog, root=cimeroot, case=case) - self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) - -class TESTMEMLEAKPASS(FakeTest): - def build_phase(self, sharedlib_only=False, model_only=False): - rundir = self._case.get_value("RUNDIR") - cimeroot = self._case.get_value("CIMEROOT") - case = self._case.get_value("CASE") - testfile = os.path.join(cimeroot,"scripts","tests","cpl.log.passmemleak.gz") - script = \ -""" -echo Insta pass -gunzip -c {testfile} > {rundir}/{log}.log.$LID -cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -""".format(testfile=testfile, rundir=rundir, log=self._cpllog, root=cimeroot, case=case) - self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) diff --git a/scripts/lib/CIME/SystemTests/test_utils/user_nl_utils.py b/scripts/lib/CIME/SystemTests/test_utils/user_nl_utils.py deleted file mode 100644 index f91b832b943..00000000000 --- a/scripts/lib/CIME/SystemTests/test_utils/user_nl_utils.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -This module contains functions for working with user_nl files in system tests. -""" - -import os -import glob - -def append_to_user_nl_files(caseroot, component, contents): - """ - Append the string given by 'contents' to the end of each user_nl file for - the given component (there may be multiple such user_nl files in the case of - a multi-instance test). - - Also puts new lines before and after the appended text - so 'contents' - does not need to contain a trailing new line (but it's also okay if it - does). - - Args: - caseroot (str): Full path to the case directory - - component (str): Name of component (e.g., 'clm'). This is used to - determine which user_nl files are appended to. For example, for - component='clm', this function will operate on all user_nl files - matching the pattern 'user_nl_clm*'. (We do a wildcard match to - handle multi-instance tests.) - - contents (str): Contents to append to the end of each user_nl file - """ - - files = _get_list_of_user_nl_files(caseroot, component) - - if len(files) == 0: - raise RuntimeError('No user_nl files found for component ' + component) - - for one_file in files: - with open(one_file, 'a') as user_nl_file: - user_nl_file.write('\n' + contents + '\n') - -def _get_list_of_user_nl_files(path, component): - """Get a list of all user_nl files in the current path for the component - of interest. For a component 'foo', we match all files of the form - user_nl_foo* - with a wildcard match at the end in order to match files - in a multi-instance case. - - The list of returned files gives their full path. - """ - - file_pattern = 'user_nl_' + component + '*' - file_list = glob.glob(os.path.join(path, file_pattern)) - - return file_list diff --git a/scripts/lib/CIME/SystemTests/tsc.py b/scripts/lib/CIME/SystemTests/tsc.py deleted file mode 100644 index 86e50f7eb26..00000000000 --- a/scripts/lib/CIME/SystemTests/tsc.py +++ /dev/null @@ -1,181 +0,0 @@ -""" -Solution reproducibility test based on time-step convergence -The CESM/ACME model's -multi-instance capability is used to conduct an ensemble -of simulations starting from different initial conditions. - -This class inherits from SystemTestsCommon. -""" - -import os -import json -import logging - -import CIME.test_status -from CIME.SystemTests.system_tests_common import SystemTestsCommon -from CIME.case.case_setup import case_setup -from CIME.hist_utils import rename_all_hist_files, _get_all_hist_files -from CIME.utils import safe_copy, SharedArea - -import evv4esm # pylint: disable=import-error -from evv4esm.__main__ import main as evv # pylint: disable=import-error - -evv_lib_dir = os.path.abspath(os.path.dirname(evv4esm.__file__)) - -logger = logging.getLogger(__name__) - - -NINST = 12 -SIM_LENGTH = 600 # seconds -OUT_FREQ = 10 # seconds -INSPECT_AT = [300, 450, 600] # seconds -INIT_COND_FILE_TEMPLATE = \ - "SMS_Ly5.ne4_ne4.FC5AV1C-04P2.eos_intel.ne45y.{}.{}.0002-{:02d}-01-00000.nc" -VAR_LIST = ["T", "Q", "V", "CLDLIQ", "CLDICE", "NUMLIQ", "NUMICE", "num_a1", "num_a2", "num_a3"] -P_THRESHOLD = 0.005 - - -class TSC(SystemTestsCommon): - def __init__(self, case): - """ - initialize an object interface to the TSC test - """ - super(TSC, self).__init__(case) - - def build_phase(self, sharedlib_only=False, model_only=False): - # Only want this to happen once. It will impact the sharedlib build - # so it has to happen there. - if not model_only: - logging.warning("Starting to build multi-instance exe") - for comp in ['ATM', 'OCN', 'WAV', 'GLC', 'ICE', 'ROF', 'LND']: - ntasks = self._case.get_value("NTASKS_{}".format(comp)) - self._case.set_value("ROOTPE_{}".format(comp), 0) - self._case.set_value("NINST_{}".format(comp), NINST) - self._case.set_value("NTASKS_{}".format(comp), ntasks * NINST) - - self._case.set_value("ROOTPE_CPL", 0) - self._case.set_value("NTASKS_CPL", ntasks * NINST) - self._case.flush() - - case_setup(self._case, test_mode=False, reset=True) - - self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) - - def _run_with_specified_dtime(self, dtime=2): - """ - Conduct one multi-instance run with a specified time step size. - - :param dtime (int): Specified time step size in seconds - """ - coupling_frequency = 86400 // dtime - self._case.set_value("ATM_NCPL", str(coupling_frequency)) - - nsteps = SIM_LENGTH // dtime - self._case.set_value("STOP_N", str(nsteps)) - self._case.set_value("STOP_OPTION", "nsteps") - - csmdata_root = self._case.get_value("DIN_LOC_ROOT") - csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v1_init") - csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_v1_init/b58d55680") - - nstep_output = OUT_FREQ // dtime - for iinst in range(1, NINST+1): - with open('user_nl_cam_'+str(iinst).zfill(4), 'w') as atmnlfile, \ - open('user_nl_clm_'+str(iinst).zfill(4), 'w') as lndnlfile: - - fatm_in = os.path.join(csmdata_atm, INIT_COND_FILE_TEMPLATE.format('cam', 'i', iinst)) - flnd_in = os.path.join(csmdata_lnd, INIT_COND_FILE_TEMPLATE.format('clm2', 'r', iinst)) - atmnlfile.write("ncdata = '{}' \n".format(fatm_in)) - lndnlfile.write("finidat = '{}' \n".format(flnd_in)) - - lndnlfile.write("dtime = {} \n".format(dtime)) - - atmnlfile.write("dtime = {} \n".format(dtime)) - atmnlfile.write("iradsw = 2 \n") - atmnlfile.write("iradlw = 2 \n") - - atmnlfile.write("avgflag_pertape = 'I' \n") - atmnlfile.write("nhtfrq = {} \n".format(nstep_output)) - atmnlfile.write("mfilt = 1 \n") - atmnlfile.write("ndens = 1 \n") - atmnlfile.write("empty_htapes = .true. \n") - atmnlfile.write("fincl1 = 'PS','U','LANDFRAC',{} \n".format( - ''.join(["'{}',".format(s) for s in VAR_LIST])[:-1] - )) - - # Force rebuild namelists - self._skip_pnl = False - - self.run_indv() - - rename_all_hist_files(self._case, suffix="DT{:04d}".format(dtime)) - - def run_phase(self): - self._run_with_specified_dtime(dtime=2) - - if self._case.get_value("GENERATE_BASELINE"): - self._run_with_specified_dtime(dtime=1) - - def _compare_baseline(self): - with self._test_status as ts: - ts.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_FAIL_STATUS) - - run_dir = self._case.get_value("RUNDIR") - case_name = self._case.get_value("CASE") - base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASECMP_CASE")) - - test_name = "{}".format(case_name.split('.')[-1]) - evv_config = { - test_name: { - "module": os.path.join(evv_lib_dir, "extensions", "tsc.py"), - "test-case": case_name, - "test-dir": run_dir, - "ref-case": "Baseline", - "ref-dir": base_dir, - "time-slice": [OUT_FREQ, SIM_LENGTH], - "inspect-times": INSPECT_AT, - "variables": VAR_LIST, - "p-threshold": P_THRESHOLD, - } - } - - json_file = os.path.join(run_dir, '.'.join([case_name, 'json'])) - with open(json_file, 'w') as config_file: - json.dump(evv_config, config_file, indent=4) - - evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv'])) - evv(['-e', json_file, '-o', evv_out_dir]) - - with open(os.path.join(evv_out_dir, 'index.json'), 'r') as evv_f: - evv_status = json.load(evv_f) - - for evv_elem in evv_status['Data']['Elements']: - if evv_elem['Type'] == 'ValSummary' \ - and evv_elem['TableTitle'] == 'Time step convergence test': - if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass': - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_PASS_STATUS) - break - - def _generate_baseline(self): - super(TSC, self)._generate_baseline() - - with SharedArea(): - basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASEGEN_CASE")) - - rundir = self._case.get_value("RUNDIR") - ref_case = self._case.get_value("RUN_REFCASE") - - model = 'cam' - hists = _get_all_hist_files(model, rundir, [r'h\d*.*\.nc\.DT\d*'], ref_case=ref_case) - logger.debug("TSC additional baseline files: {}".format(hists)) - for hist in hists: - basename = hist[hist.rfind(model):] - baseline = os.path.join(basegen_dir, basename) - if os.path.exists(baseline): - os.remove(baseline) - - safe_copy(hist, baseline, preserve_meta=False) diff --git a/scripts/lib/CIME/XML/archive.py b/scripts/lib/CIME/XML/archive.py deleted file mode 100644 index 0c7b781e86f..00000000000 --- a/scripts/lib/CIME/XML/archive.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -Interface to the archive.xml file. This class inherits from GenericXML.py -""" - -from CIME.XML.standard_module_setup import * -from CIME.XML.archive_base import ArchiveBase -from CIME.XML.files import Files -from CIME.utils import expect, get_model - -logger = logging.getLogger(__name__) - -class Archive(ArchiveBase): - - def __init__(self, infile=None, files=None): - """ - initialize an object - """ - if files is None: - files = Files() - schema = files.get_schema("ARCHIVE_SPEC_FILE") - super(Archive, self).__init__(infile, schema) - - def setup(self, env_archive, components, files=None): - if files is None: - files = Files() - - components_node = env_archive.make_child("components", attributes={"version":"2.0"}) - - model = get_model() - if 'drv' not in components: - components.append('drv') - if 'dart' not in components and model == 'cesm': - components.append('dart') - - for comp in components: - infile = files.get_value("ARCHIVE_SPEC_FILE", {"component":comp}) - - if infile is not None and os.path.isfile(infile): - arch = Archive(infile=infile, files=files) - specs = arch.get_optional_child(name="comp_archive_spec", attributes={"compname":comp}) - else: - if infile is None: - logger.debug("No archive file defined for component {}".format(comp)) - else: - logger.debug("Archive file {} for component {} not found".format(infile,comp)) - - specs = self.get_optional_child(name="comp_archive_spec", attributes={"compname":comp}) - - if specs is None: - logger.debug("No archive specs found for component {}".format(comp)) - else: - logger.debug("adding archive spec for {}".format(comp)) - env_archive.add_child(specs, root=components_node) - - def get_all_config_archive_files(self, files): - """ - Returns the list of ARCHIVE_SPEC_FILES that exist on disk as defined in config_files.xml - """ - archive_spec_node = files.get_child("entry", {"id" : "ARCHIVE_SPEC_FILE"}) - component_nodes = files.get_children("value", root=files.get_child("values", root=archive_spec_node)) - config_archive_files = [] - for comp in component_nodes: - attr = self.get(comp,"component") - if attr: - compval = files.get_value("ARCHIVE_SPEC_FILE", attribute={"component":attr}) - else: - compval = self.text(comp) - if os.path.isfile(compval): - config_archive_files.append(compval) - - config_archive_files = list(set(config_archive_files)) - return config_archive_files diff --git a/scripts/lib/CIME/XML/archive_base.py b/scripts/lib/CIME/XML/archive_base.py deleted file mode 100644 index e134ea74f68..00000000000 --- a/scripts/lib/CIME/XML/archive_base.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Base class for archive files. This class inherits from generic_xml.py -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.generic_xml import GenericXML - -logger = logging.getLogger(__name__) - -class ArchiveBase(GenericXML): - - def get_entry(self, compname): - return self.scan_optional_child('comp_archive_spec', - attributes={"compname":compname}) - - - def get_rest_file_extensions(self, archive_entry): - file_extensions = [] - nodes = self.get_children('rest_file_extension', root=archive_entry) - for node in nodes: - file_extensions.append(self.text(node)) - return file_extensions - - def get_hist_file_extensions(self, archive_entry): - file_extensions = [] - nodes = self.get_children('hist_file_extension', root=archive_entry) - for node in nodes: - file_extensions.append(self.text(node)) - return file_extensions - - def get_entry_value(self, name, archive_entry): - node = self.get_optional_child(name, root=archive_entry) - if node is not None: - return self.text(node) - return None diff --git a/scripts/lib/CIME/XML/batch.py b/scripts/lib/CIME/XML/batch.py deleted file mode 100644 index b9e4385496d..00000000000 --- a/scripts/lib/CIME/XML/batch.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -Interface to the config_batch.xml file. This class inherits from GenericXML.py - -The batch_system type="foo" blocks define most things. Machine-specific overrides -can be defined by providing a batch_system MACH="mach" block. -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.generic_xml import GenericXML -from CIME.XML.files import Files -from CIME.utils import expect - -logger = logging.getLogger(__name__) - -class Batch(GenericXML): - - def __init__(self, batch_system=None, machine=None, infile=None, files=None): - """ - initialize an object - """ - if files is None: - files = Files() - if infile is None: - infile = files.get_value("BATCH_SPEC_FILE") - - schema = files.get_schema("BATCH_SPEC_FILE") - - GenericXML.__init__(self, infile, schema=schema) - - self.batch_system_node = None - self.machine_node = None - self.batch_system = batch_system - self.machine = machine - - #Append the contents of $HOME/.cime/config_batch.xml if it exists - #This could cause problems if node matchs are repeated when only one is expected - infile = os.path.join(os.environ.get("HOME"),".cime","config_batch.xml") - if os.path.exists(infile): - GenericXML.read(self, infile) - - if self.batch_system is not None: - self.set_batch_system(self.batch_system, machine=machine) - - def get_batch_system(self): - """ - Return the name of the batch system - """ - return self.batch_system - - def get_optional_batch_node(self, nodename, attributes=None): - """ - Return data on a node for a batch system - """ - expect(self.batch_system_node is not None, "Batch system not set, use parent get_node?") - - if self.machine_node is not None: - result = self.get_optional_child(nodename, attributes, root=self.machine_node) - if result is None: - return self.get_optional_child(nodename, attributes, root=self.batch_system_node) - else: - return result - else: - return self.get_optional_child(nodename, attributes, root=self.batch_system_node) - - def set_batch_system(self, batch_system, machine=None): - """ - Sets the batch system block in the Batch object - """ - machine = machine if machine is not None else self.machine - if self.batch_system != batch_system or self.batch_system_node is None: - nodes = self.get_children("batch_system",{"type" : batch_system}) - for node in nodes: - mach = self.get(node, "MACH") - if mach is None: - self.batch_system_node = node - elif mach == machine: - self.machine = machine - self.machine_node = node - - expect(self.batch_system_node is not None, "No batch system '{}' found".format(batch_system)) - - return batch_system - - #pylint: disable=arguments-differ - def get_value(self, name, attribute=None, resolved=True, subgroup=None): - """ - Get Value of fields in the config_batch.xml file - """ - expect(self.batch_system_node is not None, "Batch object has no batch system defined") - expect(subgroup is None, "This class does not support subgroups") - value = None - - node = self.get_optional_batch_node(name) - if node is not None: - value = self.text(node) - - if resolved: - if value is not None: - value = self.get_resolved_value(value) - elif name in os.environ: - value = os.environ[name] - - return value - - def get_batch_jobs(self): - """ - Return a list of jobs with the first element the name of the case script - and the second a dict of qualifiers for the job - """ - jobs = [] - bnode = self.get_optional_child("batch_jobs") - if bnode: - for jnode in self.get_children(root=bnode): - if self.name(jnode) == "job": - name = self.get(jnode, "name") - jdict = {} - for child in self.get_children(root=jnode): - jdict[self.name(child)] = self.text(child) - - jobs.append((name, jdict)) - - return jobs diff --git a/scripts/lib/CIME/XML/compilerblock.py b/scripts/lib/CIME/XML/compilerblock.py deleted file mode 100644 index 679b360c6d9..00000000000 --- a/scripts/lib/CIME/XML/compilerblock.py +++ /dev/null @@ -1,250 +0,0 @@ -""" -Classes used to build the CIME Macros file. - -The main "public" class here is Build. It is initialized with machine-specific -information, and its write_macros method is the driver for translating the -config_compilers.xml file into a Makefile or CMake-format Macros file. - -For developers, here's the role of the other classes in the process: - -- A CompilerBlock is responsible for translating the XML code in a - tag into Python data structures. - -- A PossibleValues object keeps track of all the settings that could affect a - particular variable, and is the main way that these settings are stored. - -- A MacroConditionTree is the structure that is responsible for writing out the - settings. While the PossibleValues objects are organized by variable name, the - MacroConditionTree is organized by conditional blocks, and thus roughly - plays the role of a syntax tree corresponding to the Makefile/CMake output. - -In more detail: - -- Build.write_macros immediately creates a MakeMacroWriter or CMakeMacroWriter - to translate strings for the build system. - -- It also creates value_lists, a dictionary of PossibleValues objects, with - variable names as the keys. Each variable has a single PossibleValues object - associated with it. - -- For each element, Build.write_macros creates a CompilerBlock - instance. This object is responsible for translating the XML in its block, in - order to populate the PossibleValues instances. This includes handling the - $VAR, $ENV{...} and $SHELL{...} and keeping track of dependencies induced by one - variable referencing another's value. - -- The PossibleValues object holds the information about how one variable can be - set, based on various build options. It has two main roles: - 1. As we iterate through the XML input file, each setting is added to the - relevant PossibleValues object. The PossibleValues object contains lists - of settings sorted by how machine-specific those settings are. - 2. The PossibleValues object iterates through the list of settings to check - for ambiguities. E.g. if there is a setting for DEBUG=TRUE, and another - setting for MPILIB=mpi-serial, it is ambiguous in the case where both - conditions hold. - -- A ValueSetting object is a simple struct that a setting from the XML file is - translated to. The lists in the PossibleValues class contain these objects. - -- Once the XML has all been read in and the PossibleValues objects are - populated, the dependencies among variables are checked in Build.write_macros. - For each variable, if all its dependencies have been handled, it is converted - to a MacroConditionTree merged with all other trees for variables that are - ready, and written out. Then we loop through the variable list again to check - for variables whose dependencies are all handled. - -- The MacroConditionTree acts as a primitive syntax tree. Its __init__ method - reorganizes the data into conditional blocks, and its write_out method writes - uses the MakeMacroWriter/CMakeMacroWrite object to write to the Macros file. - MacroConditionTree objects can be merged to reduce the length of the output. -""" - -# These don't seem to be particularly useful checks. -# pylint: disable=invalid-name,too-few-public-methods,unused-wildcard-import -# pylint: disable=wildcard-import - -from CIME.XML.standard_module_setup import * -from CIME.BuildTools.valuesetting import ValueSetting -from CIME.BuildTools.possiblevalues import PossibleValues - -logger = logging.getLogger(__name__) - -class CompilerBlock(object): - - """Data used to translate a single element. - - This is used during write_macros to traverse the XML and create a list - of settings specified in the element. - - Public methods: - add_settings_to_lists - matches_machine - """ - - def __init__(self, writer, compiler_elem, machobj, db): - """Construct a CompilerBlock. - - Arguments: - writer - The Makefile/CMake writer object. - compiler_elem - An xml.ElementTree.Element corresponding to this - element. - machobj - Machines object for this machine. - """ - self._writer = writer - self._compiler_elem = compiler_elem - self._db = db - self._machobj = machobj - # If there's no COMPILER attribute, self._compiler is None. - self._compiler = db.get(compiler_elem, "COMPILER") - self._specificity = 0 - - def _handle_references(self, elem, set_up, tear_down, depends): - """Expand markup used internally. - - This function is responsible for expanding $ENV{...}, $VAR, and - $SHELL{...} syntax into Makefile/CMake syntax. - - Arguments: - elem - An ElementTree.Element containing text to expand. - set_up - A list to add any preparation commands to. - tear_down - A list to add any cleanup commands to. - depends - A set of variables that need to be set before this one. - - Note that while the return value of this function is the expanded - text, the set_up, tear_down, and depends variables are also - modified and thus serve as additional outputs. - """ - writer = self._writer - output = self._db.text(elem) - if output is None: - output = "" - - logger.debug("Initial output={}".format(output)) - reference_re = re.compile(r'\${?(\w+)}?') - env_ref_re = re.compile(r'\$ENV\{(\w+)\}') - shell_prefix = "$SHELL{" - - for m in reference_re.finditer(output): - var_name = m.groups()[0] - if var_name not in ("SHELL","ENV"): - output = output.replace(m.group(), writer.variable_string(var_name)) - depends.add(var_name) - - logger.debug("preenv pass output={}".format(output)) - - for m in env_ref_re.finditer(output): - logger.debug("look for {} in env {}".format(output,writer.environment_variable_string(m.groups()[0]))) - output = output.replace(m.group(), - writer.environment_variable_string(m.groups()[0])) - logger.debug("and output {}".format(output)) - - logger.debug("postenv pass output={}".format(output)) - - while shell_prefix in output: - sidx = output.index(shell_prefix) - brace_count = 1 - idx = 0 - for idx in range(sidx + len(shell_prefix), len(output)): - if output[idx] == "{": - brace_count += 1 - elif output[idx] == "}": - brace_count -= 1 - if brace_count == 0: - break - - command = output[sidx + len(shell_prefix) : idx] - logger.debug("execute {} in shell, command {}".format(output, command)) - new_set_up, inline, new_tear_down = \ - writer.shell_command_strings(command) - output = output.replace(output[sidx:idx+1], inline, 1) - if new_set_up is not None: - set_up.append(new_set_up) - if new_tear_down is not None: - tear_down.append(new_tear_down) - logger.debug("set_up {} inline {} tear_down {}".format(new_set_up,inline,new_tear_down)) - - logger.debug("First pass output={}".format(output)) - - return output - - def _elem_to_setting(self, elem): - """Take an element and convert it to a ValueSetting. - - Arguments: - elem - An ElementTree.Element with data to add. - - This function returns a tuple containing a ValueSetting - corresponding to the element, along with a set of names of - variables that this setting depends on. - """ - # Attributes on an element are the conditions on that element. - conditions = self._db.attrib(elem) - if self._compiler is not None: - conditions["COMPILER"] = self._compiler - # Deal with internal markup. - set_up = [] - tear_down = [] - depends = set() - value_text = self._handle_references(elem, set_up, - tear_down, depends) - # Create the setting object. - append = self._db.name(elem) == "append" - setting = ValueSetting(value_text, append, - conditions, set_up, tear_down) - - return (setting, depends) - - def _add_elem_to_lists(self, name, elem, value_lists): - """Add an element's data to an appropriate list of value settings. - - Arguments: - name - The name of the variable being set by this element. - elem - The element to translate into a ValueSetting. - value_lists - A dictionary of PossibleValues, containing the lists - of all settings for each variable. - """ - setting, depends = self._elem_to_setting(elem) - if name not in value_lists: - value_lists[name] = PossibleValues(name, setting, - self._specificity, depends) - else: - value_lists[name].add_setting(setting, self._specificity,depends) - - def add_settings_to_lists(self, flag_vars, value_lists): - """Add all data in the element to lists of settings. - - Arguments: - flag_vars - A set of variables containing "flag-like" data. - value_lists - A dictionary of PossibleValues, containing the lists - of all settings for each variable. - """ - for elem in self._db.get_children(root=self._compiler_elem): - # Deal with "flag"-type variables. - if self._db.name(elem) in flag_vars: - for child in self._db.get_children(root=elem): - self._add_elem_to_lists(self._db.name(elem), child, value_lists) - else: - self._add_elem_to_lists(self._db.name(elem), elem, value_lists) - - def matches_machine(self): - """Check whether this block matches a machine/os. - This also sets the specificity of the block, so this must be called - before add_settings_to_lists if machine-specific output is needed. - """ - self._specificity = 0 - if self._db.has(self._compiler_elem, "MACH"): - if self._machobj.get_machine_name() == \ - self._db.get(self._compiler_elem, "MACH"): - self._specificity += 2 - else: - return False - if self._db.has(self._compiler_elem, "OS"): - if self._machobj.get_value("OS") == self._db.get(self._compiler_elem, "OS"): - self._specificity += 1 - else: - return False - # Check if the compiler is valid on this machine. - if self._compiler is not None: - return self._machobj.is_valid_compiler(self._compiler) - else: - return True diff --git a/scripts/lib/CIME/XML/compilers.py b/scripts/lib/CIME/XML/compilers.py deleted file mode 100644 index 0215b35fc77..00000000000 --- a/scripts/lib/CIME/XML/compilers.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -Interface to the config_compilers.xml file. This class inherits from GenericXML.py -""" - -from CIME.XML.standard_module_setup import * -from CIME.XML.generic_xml import GenericXML -from CIME.XML.files import Files -from CIME.XML.compilerblock import CompilerBlock -from CIME.BuildTools.makemacroswriter import MakeMacroWriter -from CIME.BuildTools.cmakemacroswriter import CMakeMacroWriter -from CIME.BuildTools.macroconditiontree import merge_optional_trees -import six - -logger = logging.getLogger(__name__) - -class Compilers(GenericXML): - - def __init__(self, machobj, infile=None, compiler=None, mpilib=None, files=None, version=None): - """ - initialize an object - """ - - if infile is None: - if files is None: - files = Files() - infile = files.get_value("COMPILERS_SPEC_FILE") - schema = files.get_schema("COMPILERS_SPEC_FILE") - - GenericXML.__init__(self, infile, schema) - if version is not None: - # this is used in scripts_regression_tests to force version 2, it should not be used otherwise - self._version = version - else: - self._version = self.get_version() - - self._machobj = machobj - self.machine = machobj.get_machine_name() - self.os = machobj.get_value("OS") - if compiler is None: - compiler = machobj.get_default_compiler() - self.compiler = compiler - - if mpilib is None: - if compiler is None: - mpilib = machobj.get_default_MPIlib() - else: - mpilib = machobj.get_default_MPIlib(attributes={'compiler':compiler}) - self.mpilib = mpilib - - self.compiler_nodes = None # Listed from last to first - #Append the contents of $HOME/.cime/config_compilers.xml if it exists - #This could cause problems if node matchs are repeated when only one is expected - infile = os.path.join(os.environ.get("HOME"),".cime","config_compilers.xml") - if os.path.exists(infile): - GenericXML.read(self, infile, schema=schema) - - if self.compiler is not None: - self.set_compiler(compiler) - - if self._version > 1.0: - schema_db = GenericXML(infile=schema) - compiler_vars = schema_db.get_child("{http://www.w3.org/2001/XMLSchema}group", attributes={"name":"compilerVars"}) - choice = schema_db.get_child(name="{http://www.w3.org/2001/XMLSchema}choice", root=compiler_vars) - self.flag_vars = set(schema_db.get(elem, "name") for elem in schema_db.get_children(root=choice, attributes={"type":"flagsVar"})) - - def get_compiler(self): - """ - Return the name of the compiler - """ - return self.compiler - - def get_optional_compiler_node(self, nodename, attributes=None): - """ - Return data on a node for a compiler - """ - expect(self.compiler_nodes is not None, "Compiler not set, use parent get_node?") - for compiler_node in self.compiler_nodes: - result = self.get_optional_child(name=nodename, attributes=attributes, root=compiler_node) - if result is not None: - return result - - return None - - def _is_compatible(self, compiler_node, compiler, machine, os_, mpilib): - for xmlid, value in [ ("COMPILER", compiler), ("MACH", machine), ("OS", os_), ("MPILIB", mpilib) ]: - if value is not None and self.has(compiler_node, xmlid) and value != self.get(compiler_node, xmlid): - return False - - return True - - def set_compiler(self, compiler, machine=None, os_=None, mpilib=None): - """ - Sets the compiler block in the Compilers object - >>> from CIME.XML.machines import Machines - >>> compobj = Compilers(Machines(machine="melvin")) - >>> compobj.set_compiler("gnu") - >>> compobj.get_compiler() - 'gnu' - """ - machine = machine if machine else self.machine - os_ = os_ if os_ else self.os - mpilib = mpilib if mpilib else self.mpilib - - if self.compiler != compiler or self.machine != machine or self.os != os_ or self.mpilib != mpilib or self.compiler_nodes is None: - self.compiler_nodes = [] - nodes = self.get_children(name="compiler") - for node in nodes: - if self._is_compatible(node, compiler, machine, os_, mpilib): - self.compiler_nodes.append(node) - - self.compiler_nodes.reverse() - - self.compiler = compiler - self.machine = machine - self.os = os_ - self.mpilib = mpilib - - #pylint: disable=arguments-differ - def get_value(self, name, attribute=None, resolved=True, subgroup=None): - """ - Get Value of fields in the config_compilers.xml file - """ - expect(self.compiler_nodes is not None, "Compiler object has no compiler defined") - expect(subgroup is None, "This class does not support subgroups") - value = None - - node = self.get_optional_compiler_node(name, attributes=attribute) - if node is not None: - value = self.text(node) - - if resolved: - if value is not None: - value = self.get_resolved_value(value) - elif name in os.environ: - value = os.environ[name] - - return value - - def write_macros_file(self, macros_file="Macros.make", output_format="make", xml=None): - if self._version <= 1.0: - expect(False, "config_compilers.xml version '{}' is no longer supported".format(self._version)) - else: - if output_format == "make": - format_ = "Makefile" - elif output_format == "cmake": - format_ = "CMake" - else: - format_ = output_format - - if isinstance(macros_file, six.string_types): - with open(macros_file, "w") as macros: - self._write_macros_file(format_, macros) - else: - self._write_macros_file(format_, macros_file, xml) - - def _write_macros_file(self, build_system, output, xml=None): - """Write a Macros file for this machine. - - Arguments: - build_system - Format of the file to be written. Currently the only - valid values are "Makefile" and "CMake". - output - Text I/O object (inheriting from io.TextIOBase) that - output should be written to. Typically, this will be the - Macros file, opened for writing. - """ - # Set up writer for this build system. - if build_system == "Makefile": - writer = MakeMacroWriter(output) - elif build_system == "CMake": - writer = CMakeMacroWriter(output) - else: - expect(False, - "Unrecognized build system provided to write_macros: " + - build_system) - - # Start processing the file. - value_lists = dict() - node_list = [] - if xml is None: - node_list = self.get_children(name="compiler") - else: - gen_xml = GenericXML() - gen_xml.read_fd(xml) - node_list = gen_xml.get_children(name="compiler") - - for compiler_elem in node_list: - block = CompilerBlock(writer, compiler_elem, self._machobj, self) - # If this block matches machine settings, use it. - if block.matches_machine(): - block.add_settings_to_lists(self.flag_vars, value_lists) - # Now that we've scanned through the input, output the variable - # settings. - vars_written = set() - while value_lists: - # Variables that are ready to be written. - ready_variables = [ - var_name for var_name in value_lists - if value_lists[var_name].dependencies() <= vars_written - ] - expect(len(ready_variables) > 0, - "The file {} has bad $VAR references. " - "Check for circular references or variables that " - "are used in a $VAR but not actually defined.".format(self.filename)) - big_normal_trees = {} - big_append_tree = None - for var_name in ready_variables: - # Note that we're writing this variable. - vars_written.add(var_name) - # Make the conditional trees and write them out. - normal_trees, append_tree = \ - value_lists[var_name].to_cond_trees() - for spec in normal_trees: - if spec in big_normal_trees: - big_normal_trees[spec] = merge_optional_trees(normal_trees[spec], - big_normal_trees[spec]) - else: - big_normal_trees[spec] = normal_trees[spec] - big_append_tree = merge_optional_trees(append_tree, - big_append_tree) - # Remove this variable from the list of variables to handle - # next iteration. - del value_lists[var_name] - specificities = sorted(list(big_normal_trees.keys())) - for spec in specificities: - big_normal_trees[spec].write_out(writer) - if big_append_tree is not None: - big_append_tree.write_out(writer) diff --git a/scripts/lib/CIME/XML/component.py b/scripts/lib/CIME/XML/component.py deleted file mode 100644 index dcf6cb5fd96..00000000000 --- a/scripts/lib/CIME/XML/component.py +++ /dev/null @@ -1,318 +0,0 @@ -""" -Interface to the config_component.xml files. This class inherits from EntryID.py -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.entry_id import EntryID -from CIME.XML.files import Files -from CIME.utils import get_cime_root - -logger = logging.getLogger(__name__) - -class Component(EntryID): - - def __init__(self, infile, comp_class): - """ - initialize a Component obect from the component xml file in infile - associate the component class with comp_class if provided. - """ - self._comp_class = comp_class - if infile == 'testingonly': - self.filename = infile - return - files = Files() - schema = None - EntryID.__init__(self, infile) - schema = files.get_schema("CONFIG_{}_FILE".format(comp_class), attributes={"version":"{}".format(self.get_version())}) - - if schema is not None: - self.validate_xml_file(infile, schema) - - #pylint: disable=arguments-differ - def get_value(self, name, attribute=None, resolved=False, subgroup=None): - expect(subgroup is None, "This class does not support subgroups") - return EntryID.get_value(self, name, attribute, resolved) - - def get_valid_model_components(self): - """ - return a list of all possible valid generic (e.g. atm, clm, ...) model components - from the entries in the model CONFIG_CPL_FILE - """ - components = [] - comps_node = self.get_child("entry", {"id":"COMP_CLASSES"}) - comps = self.get_default_value(comps_node) - components = comps.split(',') - return components - - def _get_value_match(self, node, attributes=None, exact_match=False): - """ - return the best match for the node entries - Note that a component object uses a different matching algorithm than an entryid object - For a component object the _get_value_match used is below and is not the one in entry_id.py - """ - match_value = None - match_max = 0 - match_count = 0 - match_values = [] - expect(not exact_match, " exact_match not implemented in this method") - expect(node is not None," Empty node in _get_value_match") - values = self.get_optional_child("values", root=node) - if values is None: - return - - # determine match_type if there is a tie - # ASSUME a default of "last" if "match" attribute is not there - match_type = self.get(values, "match", default="last") - - # use the default_value if present - val_node = self.get_optional_child("default_value", root=node) - if val_node is None: - logger.debug("No default_value for {}".format(self.get(node, "id"))) - return val_node - value = self.text(val_node) - if value is not None and len(value) > 0 and value != "UNSET": - match_values.append(value) - - for valnode in self.get_children("value", root=values): - # loop through all the keys in valnode (value nodes) attributes - for key,value in self.attrib(valnode).items(): - # determine if key is in attributes dictionary - match_count = 0 - if attributes is not None and key in attributes: - if re.search(value, attributes[key]): - logger.debug("Value {} and key {} match with value {}".format(value, key, attributes[key])) - match_count += 1 - else: - match_count = 0 - break - - # a match is found - if match_count > 0: - # append the current result - if self.get(values, "modifier") == "additive": - match_values.append(self.text(valnode)) - - # replace the current result if it already contains the new value - # otherwise append the current result - elif self.get(values, "modifier") == "merge": - if self.text(valnode) in match_values: - del match_values[:] - match_values.append(self.text(valnode)) - - else: - if match_type == "last": - # take the *last* best match - if match_count >= match_max: - del match_values[:] - match_max = match_count - match_value = self.text(valnode) - elif match_type == "first": - # take the *first* best match - if match_count > match_max: - del match_values[:] - match_max = match_count - match_value = self.text(valnode) - else: - expect(False, "match attribute can only have a value of 'last' or 'first'") - - if len(match_values) > 0: - match_value = " ".join(match_values) - - return match_value - - #pylint: disable=arguments-differ - def get_description(self, compsetname): - if self.get_version() == 3.0: - return self._get_description_v3(compsetname, self._comp_class) - else: - return self._get_description_v2(compsetname) - - def get_forcing_description(self, compsetname): - if self.get_version() == 3.0: - return self._get_description_v3(compsetname, 'forcing') - else: - return "" - - def _get_description_v3(self, compsetname, comp_class): - """ - version 3 of the config_component.xml file has the description section at the top of the file - the description field has one attribute 'modifier_mode' which has allowed values - '*' 0 or more modifiers (default) - '1' exactly 1 modifier - '?' 0 or 1 modifiers - '+' 1 or more modifiers - - modifiers are fields in the component section of the compsetname following the % symbol. - - The desc field can have an attribute which is the component class ('cpl', 'atm', 'lnd' etc) - or it can have an attribute 'option' which provides descriptions of each optional modifier - or (in the config_component_{model}.xml in the driver only) it can have the attribute 'forcing' - - component descriptions are matched to the compsetname using a set method - """ - expect(comp_class is not None,"comp_class argument required for version3 files") - comp_class = comp_class.lower() - rootnode = self.get_child("description") - desc = "" - desc_nodes = self.get_children("desc", root=rootnode) - - modifier_mode = self.get(rootnode, 'modifier_mode') - if modifier_mode is None: - modifier_mode = '*' - expect(modifier_mode in ('*','1','?','+'), - "Invalid modifier_mode {} in file {}".format(modifier_mode, self.filename)) - optiondesc = {} - if comp_class == "forcing": - for node in desc_nodes: - forcing = self.get(node, 'forcing') - if forcing is not None and compsetname.startswith(forcing+'_'): - expect(len(desc)==0, - "Too many matches on forcing field {} in file {}".\ - format(forcing, self.filename)) - desc = self.text(node) - if desc is None: - desc = compsetname.split('_')[0] - return desc - - - # first pass just make a hash of the option descriptions - for node in desc_nodes: - option = self.get(node, 'option') - if option is not None: - optiondesc[option] = self.text(node) - - #second pass find a comp_class match - desc = "" - for node in desc_nodes: - compdesc = self.get(node, comp_class) - - if compdesc is not None: - opt_parts = [ x.rstrip("]") for x in compdesc.split("[%") ] - parts = opt_parts.pop(0).split("%") - reqset = set(parts) - fullset = set(parts+opt_parts) - match, complist = self._get_description_match(compsetname, reqset, fullset, modifier_mode) - if match: - desc = self.text(node) - for opt in complist: - if opt in optiondesc: - desc += optiondesc[opt] - - - # cpl and esp components may not have a description - if comp_class not in ['cpl','esp']: - expect(len(desc) > 0, - "No description found for comp_class {} matching compsetname {} in file {}, expected match in {} % {}"\ - .format(comp_class,compsetname, self.filename, list(reqset), list(opt_parts))) - return desc - - def _get_description_match(self, compsetname, reqset, fullset, modifier_mode): - """ - - >>> obj = Component('testingonly', 'ATM') - >>> obj._get_description_match("1850_DATM%CRU_FRED",set(["DATM"]), set(["DATM","CRU","HSI"]), "*") - (True, ['DATM', 'CRU']) - >>> obj._get_description_match("1850_DATM%FRED_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "*") - (False, None) - >>> obj._get_description_match("1850_DATM_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "?") - (True, ['DATM']) - >>> obj._get_description_match("1850_DATM_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "1") # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: Expected exactly one modifer found 0 in ['DATM'] - >>> obj._get_description_match("1850_DATM%CRU%HSI_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "1") # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: Expected exactly one modifer found 2 in ['DATM', 'CRU', 'HSI'] - >>> obj._get_description_match("1850_CAM50%WCCM%RCO2_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "*") - (True, ['CAM50', 'WCCM', 'RCO2']) - - # The following is not allowed because the required WCCM field is missing - >>> obj._get_description_match("1850_CAM50%RCO2_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "*") - (False, None) - >>> obj._get_description_match("1850_CAM50_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+") - (False, None) - >>> obj._get_description_match("1850_CAM50%WCCM_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+") - (True, ['CAM50', 'WCCM']) - """ - match = False - comparts = compsetname.split('_') - matchcomplist = None - - for comp in comparts: - complist = comp.split('%') - cset = set(complist) - if cset == reqset or (cset > reqset and cset <= fullset): - if modifier_mode == '1': - expect(len(complist) == 2, - "Expected exactly one modifer found {} in {}".format(len(complist)-1,complist)) - elif modifier_mode == '+': - expect(len(complist) >= 2, - "Expected one or more modifers found {} in {}".format(len(complist)-1, list(reqset))) - elif modifier_mode == '?': - expect(len(complist) <= 2, - "Expected 0 or one modifers found {} in {}".format(len(complist)-1, complist)) - expect(not match,"Found multiple matches in file {} for {}".format(self.filename,comp)) - match = True - matchcomplist = complist - # found a match - - return match, matchcomplist - - def _get_description_v2(self, compsetname): - rootnode = self.get_child("description") - desc = "" - desc_nodes = self.get_children("desc", root=rootnode) - for node in desc_nodes: - compsetmatch = self.get(node, "compset") - if compsetmatch is not None and re.search(compsetmatch, compsetname): - desc += self.text(node) - - return desc - - def print_values(self): - """ - print values for help and description in target config_component.xml file - """ - helpnode = self.get_child("help") - helptext = self.text(helpnode) - logger.info(" {}".format(helptext)) - entries = self.get_children("entry") - for entry in entries: - name = self.get(entry, "id") - text = self.text(self.get_child("desc", root=entry)) - logger.info(" {:20s} : {}".format(name, text.encode('utf-8'))) - - def return_values(self): - """ - return a list of hashes from target config_component.xml file - This routine is used by external tools in https://github.com/NCAR/CESM_xml2html - """ - entry_dict = dict() - items = list() - helpnode = self.get_optional_child("help") - if helpnode: - helptext = self.text(helpnode) - else: - helptext = '' - entries = self.get_children("entry") - for entry in entries: - item = dict() - name = self.get(entry, "id") - datatype = self.text(self.get_child("type", root=entry)) - valid_values = self.get_valid_values(name) - default_value = self.get_default_value(node=entry) - group = self.text(self.get_child("group", root=entry)) - filename = self.text(self.get_child("file", root=entry)) - text = self.text(self.get_child("desc", root=entry)) - item = {"name":name, - "datatype":datatype, - "valid_values":valid_values, - "value":default_value, - "group":group, - "filename":filename, - "desc":text.encode('utf-8')} - items.append(item) - entry_dict = {"items" : items} - - return helptext, entry_dict diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py deleted file mode 100644 index 737e3cc1202..00000000000 --- a/scripts/lib/CIME/XML/compsets.py +++ /dev/null @@ -1,98 +0,0 @@ -""" -Common interface to XML files which follow the compsets format, -""" - -from CIME.XML.standard_module_setup import * -from CIME.XML.generic_xml import GenericXML -from CIME.XML.entry_id import EntryID -from CIME.XML.files import Files - -logger = logging.getLogger(__name__) - -class Compsets(GenericXML): - - def __init__(self, infile=None, files=None): - if files is None: - files = Files() - schema = files.get_schema("COMPSETS_SPEC_FILE") - GenericXML.__init__(self, infile, schema=schema) - - def get_compset_match(self, name): - """ - science support is used in cesm to determine if this compset and grid - is scientifically supported. science_support is returned as an array of grids for this compset - """ - nodes = self.get_children("compset") - alias = None - lname = None - - science_support = [] - - for node in nodes: - alias = self.get_element_text("alias",root=node) - lname = self.get_element_text("lname",root=node) - if alias == name or lname == name: - science_support_nodes = self.get_children("science_support", root=node) - for snode in science_support_nodes: - science_support.append(self.get(snode, "grid")) - logger.debug("Found node match with alias: {} and lname: {}".format(alias, lname)) - return (lname, alias, science_support) - return (None, None, [False]) - - def get_compset_var_settings(self, compset, grid): - ''' - Variables can be set in config_compsets.xml in entry id settings with compset and grid attributes - find and return id value pairs here - ''' - entries = self.get_optional_child("entries") - result = [] - if entries is not None: - nodes = self.get_children("entry", root=entries) - # Get an empty entryid obj to use - entryidobj = EntryID() - for node in nodes: - value = entryidobj.get_default_value(node, {"grid":grid, "compset":compset}) - if value is not None: - result.append((self.get(node, "id"), value)) - - return result - - #pylint: disable=arguments-differ - def get_value(self, name, attribute=None, resolved=False, subgroup=None): - expect(subgroup is None, "This class does not support subgroups") - if name == "help": - rootnode = self.get_child("help") - helptext = self.text(rootnode) - return helptext - else: - compsets = {} - nodes = self.get_children("compset") - for node in nodes: - for child in node: - logger.debug ("Here child is {} with value {}".format(self.name(child),self.text(child))) - if self.name(child) == "alias": - alias = self.text(child) - if self.name(child) == "lname": - lname = self.text(child) - compsets[alias] = lname - return compsets - - def print_values(self, arg_help=True): - help_text = self.get_value(name="help") - compsets = self.get_children("compset") - if arg_help: - logger.info(" {} ".format(help_text)) - - logger.info(" --------------------------------------") - logger.info(" Compset Alias: Compset Long Name ") - logger.info(" --------------------------------------") - for compset in compsets: - logger.info(" {:20} : {}".format(self.text(self.get_child("alias",root=compset)), - self.text(self.get_child("lname", root=compset)))) - - def get_compset_longnames(self): - compset_nodes = self.get_children("compset") - longnames = [] - for comp in compset_nodes: - longnames.append(self.text(self.get_child("lname", root=comp))) - return(longnames) diff --git a/scripts/lib/CIME/XML/entry_id.py b/scripts/lib/CIME/XML/entry_id.py deleted file mode 100644 index d02e88680bb..00000000000 --- a/scripts/lib/CIME/XML/entry_id.py +++ /dev/null @@ -1,447 +0,0 @@ -""" -Common interface to XML files which follow the entry id format, -this is an abstract class and is expected to -be used by other XML interface modules and not directly. -""" -from CIME.XML.standard_module_setup import * -from CIME.utils import expect, convert_to_string, convert_to_type -from CIME.XML.generic_xml import GenericXML - -import six - -logger = logging.getLogger(__name__) - -class EntryID(GenericXML): - - def __init__(self, infile=None, schema=None, read_only=True): - GenericXML.__init__(self, infile, schema, read_only=read_only) - self.groups={} - - def get_default_value(self, node, attributes=None): - """ - Set the value of an entry to the default value for that entry - """ - value = self._get_value_match(node, attributes) - if value is None: - # Fall back to default value - value = self.get_element_text("default_value", root=node) - else: - logger.debug("node is {} value is {}".format(self.get(node, "id"), value)) - - if value is None: - logger.debug("For vid {} value is none".format(self.get(node, "id"))) - value = "" - - return value - - def set_default_value(self, vid, val): - node = self.get_optional_child("entry", {"id":vid}) - if node is not None: - val = self.set_element_text("default_value", val, root=node) - if val is None: - logger.warning("Called set_default_value on a node without default_value field") - - return val - - def get_value_match(self, vid, attributes=None, exact_match=False, entry_node=None): - # Handle this case: - # - # - # X - # Y - # Z - # - # - - if entry_node is not None: - value = self._get_value_match(entry_node, attributes, exact_match) - else: - node = self.get_optional_child("entry", {"id":vid}) - value = None - if node is not None: - value = self._get_value_match(node, attributes, exact_match) - logger.debug("(get_value_match) vid {} value {}".format(vid, value)) - return value - - def _get_value_match(self, node, attributes=None, exact_match=False): - ''' - Note that the component class has a specific version of this function - ''' - # if there is a element - check to see if there is a match attribute - # if there is NOT a match attribute, then set the default to "first" - # this is different than the component class _get_value_match where the default is "last" - values_node = self.get_optional_child("values", root=node) - if values_node is not None: - match_type = self.get(values_node, "match", default="first") - node = values_node - else: - match_type = "first" - - # Store nodes that match the attributes and their scores. - matches = [] - nodes = self.get_children("value", root=node) - for vnode in nodes: - # For each node in the list start a score. - score = 0 - if attributes: - for attribute in self.attrib(vnode).keys(): - # For each attribute, add to the score. - score += 1 - # If some attribute is specified that we don't know about, - # or the values don't match, it's not a match we want. - if exact_match: - if attribute not in attributes or \ - attributes[attribute] != self.get(vnode, attribute): - score = -1 - break - else: - if attribute not in attributes or not \ - re.search(self.get(vnode, attribute),attributes[attribute]): - score = -1 - break - - # Add valid matches to the list. - if score >= 0: - matches.append((score, vnode)) - - if not matches: - return None - - # Get maximum score using either a "last" or "first" match in case of a tie - max_score = -1 - mnode = None - for score,node in matches: - if match_type == "last": - # take the *last* best match - if score >= max_score: - max_score = score - mnode = node - elif match_type == "first": - # take the *first* best match - if score > max_score: - max_score = score - mnode = node - else: - expect(False, - "match attribute can only have a value of 'last' or 'first', value is %s" %match_type) - - return self.text(mnode) - - def get_node_element_info(self, vid, element_name): - node = self.get_optional_child("entry", {"id":vid}) - if node is None: - return None - else: - return self._get_node_element_info(node, element_name) - - def _get_node_element_info(self, node, element_name): - return self.get_element_text(element_name, root=node) - - def _get_type_info(self, node): - if node is None: - return None - val = self._get_node_element_info(node, "type") - if val is None: - return "char" - return val - - def get_type_info(self, vid): - vid, _, _ = self.check_if_comp_var(vid) - node = self.scan_optional_child("entry", {"id":vid}) - return self._get_type_info(node) - - # pylint: disable=unused-argument - def check_if_comp_var(self, vid, attribute=None, node=None): - # handled in classes - return vid, None, False - - def _get_default(self, node): - return self._get_node_element_info(node, "default_value") - - # Get description , expect child with tag "description" for parent node - def get_description (self, node): - return self._get_node_element_info(node, "desc") - - # Get group , expect node with tag "group" - # entry id nodes are children of group nodes - def get_groups(self, node): - groups = self.get_children("group") - result = [] - nodes = [] - vid = self.get(node, "id") - for group in groups: - nodes = self.get_children("entry", attributes={"id":vid}, root=group) - if nodes: - result.append(self.get(group, "id")) - - return result - - def get_valid_values(self, vid): - node = self.scan_optional_child("entry", {"id":vid}) - if node is None: - return None - return self._get_valid_values(node) - - def _get_valid_values(self, node): - valid_values = self.get_element_text("valid_values", root=node) - valid_values_list = [] - if valid_values: - valid_values_list = [item.lstrip() for item in valid_values.split(',')] - return valid_values_list - - def set_valid_values(self, vid, new_valid_values): - node = self.scan_optional_child("entry", {"id":vid}) - if node is None: - return None - return self._set_valid_values(node, new_valid_values) - - def get_nodes_by_id(self, vid): - return self.scan_children("entry", {"id":vid}) - - def _set_valid_values(self, node, new_valid_values): - old_vv = self._get_valid_values(node) - if old_vv is None: - self.make_child("valid_values", text=new_valid_values) - logger.debug("Adding valid_values {} for {}".format(new_valid_values, self.get(node, "id"))) - else: - vv_text = self.set_element_text("valid_values", new_valid_values, root=node) - logger.debug("Replacing valid_values {} with {} for {}".format(old_vv, vv_text, self.get(node, "id"))) - - current_value = self.get(node, "value") - valid_values_list = self._get_valid_values(node) - if current_value is not None and current_value not in valid_values_list: - logger.warning("WARNING: Current setting for {} not in new valid values. Updating setting to \"{}\"".format(self.get(node, "id"), valid_values_list[0])) - self._set_value(node, valid_values_list[0]) - return new_valid_values - - def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False): - """ - Set the value of an entry-id field to value - Returns the value or None if not found - subgroup is ignored in the general routine and applied in specific methods - """ - expect(subgroup is None, "Subgroup not supported") - str_value = self.get_valid_value_string(node, value, vid, ignore_type) - self.set(node, "value", str_value) - return value - - def get_valid_value_string(self, node, value,vid=None, ignore_type=False): - valid_values = self._get_valid_values(node) - if ignore_type: - expect(isinstance(value, six.string_types), "Value must be type string if ignore_type is true") - str_value = value - return str_value - type_str = self._get_type_info(node) - str_value = convert_to_string(value, type_str, vid) - - if valid_values and not str_value.startswith('$'): - expect(str_value in valid_values, "Did not find {} in valid values for {}: {}".format(value, vid, valid_values)) - return str_value - - def set_value(self, vid, value, subgroup=None, ignore_type=False): - """ - Set the value of an entry-id field to value - Returns the value or None if not found - subgroup is ignored in the general routine and applied in specific methods - """ - val = None - root = self.root if subgroup is None else self.get_optional_child("group", {"id":subgroup}) - node = self.get_optional_child("entry", {"id":vid}, root=root) - if node is not None: - val = self._set_value(node, value, vid, subgroup, ignore_type) - return val - - def get_values(self, vid, attribute=None, resolved=True, subgroup=None): - """ - Same functionality as get_value but it returns a list, if the - value in xml contains commas the list have multiple elements split on - commas - """ - results = [] - node = self.scan_optional_child("entry", {"id":vid}) - if node is None: - return results - str_result = self._get_value(node, attribute=attribute, resolved=resolved, subgroup=subgroup) - str_results = str_result.split(',') - for result in str_results: - # Return value as right type if we were able to fully resolve - # otherwise, we have to leave as string. - if "$" in result: - results.append(result) - else: - type_str = self._get_type_info(node) - results.append( convert_to_type(result, type_str, vid)) - return results - - #pylint: disable=arguments-differ - def get_value(self, vid, attribute=None, resolved=True, subgroup=None): - """ - Get a value for entry with id attribute vid. - or from the values field if the attribute argument is provided - and matches - """ - root = self.root if subgroup is None else self.get_optional_child("group", {"id":subgroup}) - node = self.scan_optional_child("entry", {"id":vid}, root=root) - if node is None: - return - - val = self._get_value(node, attribute=attribute, resolved=resolved, subgroup=subgroup) - # Return value as right type if we were able to fully resolve - # otherwise, we have to leave as string. - if val is None: - return val - elif "$" in val: - return val - else: - type_str = self._get_type_info(node) - return convert_to_type(val, type_str, vid) - - def _get_value(self, node, attribute=None, resolved=True, subgroup=None): - """ - internal get_value, does not convert to type - """ - logger.debug("(_get_value) ({}, {}, {})".format(attribute, resolved, subgroup)) - val = None - if node is None: - logger.debug("No node") - return val - - logger.debug("Found node {} with attributes {}".format(self.name(node) , self.attrib(node))) - if attribute: - vals = self.get_optional_child("values", root=node) - node = vals if vals is not None else node - val = self.get_element_text("value", attributes=attribute, root=node) - elif self.get(node, "value") is not None: - val = self.get(node, "value") - else: - val = self.get_default_value(node) - - if resolved: - val = self.get_resolved_value(val) - - return val - - def get_child_content(self, vid, childname): - val = None - node = self.get_optional_child("entry", {"id" : vid}) - if node is not None: - val = self.get_element_text(childname, root=node) - return val - - def get_elements_from_child_content(self, childname, childcontent): - nodes = self.get_children("entry") - elements = [] - for node in nodes: - content = self.get_element_text(childname, root=node) - expect(content is not None,"No childname {} for id {}".format(childname, self.get(node, "id"))) - if content == childcontent: - elements.append(node) - - return elements - - def add_elements_by_group(self, srcobj, attributes=None, infile=None): - """ - Add elements from srcobj to self under the appropriate - group element, entries to be added must have a child element - with value "infile" - """ - if infile is None: - infile = os.path.basename(self.filename) - - # First get the list of entries in srcobj with matching file children - nodelist = srcobj.get_elements_from_child_content('file', infile) - - # For matchs found: Remove {, , } - # children from each entry and set the default value for the - # new entries in self - putting the entries as children of - # group elements in file $file - for src_node in nodelist: - node = self.copy(src_node) - gname = srcobj.get_element_text("group", root=src_node) - if gname is None: - gname = "group_not_set" - - # If group with id=$gname does not exist in self.groups - # then create the group node and add it to infile file - if gname not in self.groups.keys(): - # initialize an empty list - newgroup = self.make_child(name="group", attributes={"id":gname}) - self.groups[gname] = newgroup - - # Remove {, , } from the entry element - self.cleanupnode(node) - - # Add the entry element to the group - self.add_child(node, root=self.groups[gname]) - - # Set the default value, it may be determined by a regular - # expression match to a dictionary value in attributes matching a - # value attribute in node - value = srcobj.get_default_value(src_node, attributes) - if value is not None and len(value): - self._set_value(node, value) - - logger.debug ("Adding to group " + gname) - - return nodelist - - def cleanupnode(self, node): - """ - in env_base.py, not expected to get here - """ - expect(False, " Not expected to be here {}".format(self.get(node, "id"))) - - def compare_xml(self, other, root=None, otherroot=None): - xmldiffs = {} - if root is not None: - expect(otherroot is not None," inconsistant request") - f1nodes = self.scan_children("entry", root=root) - for node in f1nodes: - vid = self.get(node, "id") - logger.debug("Compare vid {}".format(vid)) - f2match = other.scan_optional_child("entry", attributes={"id":vid},root=otherroot) - expect(f2match is not None,"Could not find {} in Locked file".format(vid)) - if node != f2match: - f1val = self.get_value(vid, resolved=False) - if f1val is not None: - f2val = other.get_value(vid, resolved=False) - if f1val != f2val: - xmldiffs[vid] = [f1val, f2val] - elif hasattr(self, "_components"): - # pylint: disable=no-member - for comp in self._components: - f1val = self.get_value("{}_{}".format(vid,comp), resolved=False) - if f1val is not None: - f2val = other.get_value("{}_{}".format(vid,comp), resolved=False) - if f1val != f2val: - xmldiffs[vid] = [f1val, f2val] - else: - if node != f2match: - f1value_nodes = self.get_children("value", root=node) - for valnode in f1value_nodes: - f2valnodes = other.get_children("value", root=f2match, attributes=self.attrib(valnode)) - for f2valnode in f2valnodes: - if self.attrib(valnode) is None and self.attrib(f2valnode) is None or \ - self.attrib(f2valnode) == self.attrib(valnode): - if other.get_resolved_value(self.text(f2valnode)) != self.get_resolved_value(self.text(valnode)): - xmldiffs["{}:{}".format(vid, self.attrib(valnode))] = [self.text(valnode), self.text(f2valnode)] - return xmldiffs - - def overwrite_existing_entries(self): - # if there exist two nodes with the same id delete the first one. - for node in self.get_children("entry"): - vid = self.get(node, "id") - samenodes = self.get_nodes_by_id(vid) - if len(samenodes) > 1: - expect(len(samenodes) == 2, "Too many matchs for id {} in file {}".format(vid, self.filename)) - logger.debug("Overwriting node {}".format(vid)) - read_only = self.read_only - if read_only: - self.read_only = False - self.remove_child(samenodes[0]) - self.read_only = read_only - - def __iter__(self): - for node in self.scan_children("entry"): - vid = self.get(node, "id") - yield vid, self.get_value(vid) diff --git a/scripts/lib/CIME/XML/env_archive.py b/scripts/lib/CIME/XML/env_archive.py deleted file mode 100644 index 469ac39d2e7..00000000000 --- a/scripts/lib/CIME/XML/env_archive.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Interface to the env_archive.xml file. This class inherits from EnvBase -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.archive_base import ArchiveBase -from CIME.XML.env_base import EnvBase - -logger = logging.getLogger(__name__) -# pylint: disable=super-init-not-called -class EnvArchive(ArchiveBase,EnvBase): - - def __init__(self, case_root=None, infile="env_archive.xml", read_only=False): - """ - initialize an object interface to file env_archive.xml in the case directory - """ - schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_archive.xsd") - EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) - - def get_entries(self): - return self.get_children('comp_archive_spec') - - def get_entry_info(self, archive_entry): - compname = self.get(archive_entry, 'compname') - compclass = self.get(archive_entry, 'compclass') - return compname,compclass - - def get_rpointer_contents(self, archive_entry): - rpointer_items = [] - rpointer_nodes = self.get_children('rpointer', root=archive_entry) - for rpointer_node in rpointer_nodes: - file_node = self.get_child('rpointer_file', root=rpointer_node) - content_node = self.get_child('rpointer_content', root=rpointer_node) - rpointer_items.append([self.text(file_node),self.text(content_node)]) - return rpointer_items - - def get_type_info(self, vid): - return "char" diff --git a/scripts/lib/CIME/XML/env_base.py b/scripts/lib/CIME/XML/env_base.py deleted file mode 100644 index 801a315a87b..00000000000 --- a/scripts/lib/CIME/XML/env_base.py +++ /dev/null @@ -1,228 +0,0 @@ -""" -Base class for env files. This class inherits from EntryID.py -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.entry_id import EntryID -from CIME.XML.headers import Headers -from CIME.utils import convert_to_type -logger = logging.getLogger(__name__) - -class EnvBase(EntryID): - - def __init__(self, case_root, infile, schema=None, read_only=False): - if case_root is None: - case_root = os.getcwd() - self._caseroot = case_root - if os.path.isabs(infile): - fullpath = infile - else: - fullpath = os.path.join(case_root, infile) - - EntryID.__init__(self, fullpath, schema=schema, read_only=read_only) - - self._id_map = None - self._group_map = None - - if not os.path.isfile(fullpath): - headerobj = Headers() - headernode = headerobj.get_header_node(os.path.basename(fullpath)) - self.add_child(headernode) - else: - self._setup_cache() - - def _setup_cache(self): - self._id_map = {} # map id directly to nodes - self._group_map = {} # map group name to entry id dict - - group_elems = self.get_children("group") - for group_elem in group_elems: - group_name = self.get(group_elem, "id") - expect(group_name not in self._group_map, "Repeat group '{}'".format(group_name)) - group_map = {} - self._group_map[group_name] = group_map - entry_elems = self.get_children("entry", root=group_elem) - for entry_elem in entry_elems: - entry_id = self.get(entry_elem, "id") - expect(entry_id not in group_map, "Repeat entry '{}' in group '{}'".format(entry_id, group_name)) - group_map[entry_id] = entry_elem - if entry_id in self._id_map: - self._id_map[entry_id].append(entry_elem) - else: - self._id_map[entry_id] = [entry_elem] - - self.lock() - - def change_file(self, newfile, copy=False): - self.unlock() - EntryID.change_file(self, newfile, copy=copy) - self._setup_cache() - - def get_children(self, name=None, attributes=None, root=None): - if self.locked and name == "entry" and attributes is not None and attributes.keys() == ["id"]: - entry_id = attributes["id"] - if root is None or self.name(root) == "file": - if entry_id in self._id_map: - return self._id_map[entry_id] - else: - return [] - else: - expect(self.name(root) == "group", "Unexpected elem '{}' for {}, attrs {}".format(self.name(root), self.filename, self.attrib(root))) - group_id = self.get(root, "id") - if group_id in self._group_map and entry_id in self._group_map[group_id]: - return [self._group_map[group_id][entry_id]] - else: - return [] - - else: - # Non-compliant look up - return EntryID.get_children(self, name=name, attributes=attributes, root=root) - - def scan_children(self, nodename, attributes=None, root=None): - if self.locked and nodename == "entry" and attributes is not None and attributes.keys() == ["id"]: - return EnvBase.get_children(self, name=nodename, attributes=attributes, root=root) - else: - return EntryID.scan_children(self, nodename, attributes=attributes, root=root) - - def set_components(self, components): - if hasattr(self, '_components'): - # pylint: disable=attribute-defined-outside-init - self._components = components - - def check_if_comp_var(self, vid, attribute=None, node=None): - comp = None - if node is None: - nodes = self.scan_children("entry", {"id" : vid}) - if len(nodes): - node = nodes[0] - - if node: - valnodes = self.scan_children("value", attributes={"compclass":None}, root=node) - if len(valnodes) == 0: - logger.debug("vid {} is not a compvar".format(vid)) - return vid, None, False - else: - logger.debug("vid {} is a compvar".format(vid)) - if attribute is not None: - comp = attribute["compclass"] - return vid, comp, True - else: - if hasattr(self, "_components") and self._components: - new_vid = None - for comp in self._components: - if vid.endswith('_'+comp): - new_vid = vid.replace('_'+comp, '', 1) - elif vid.startswith(comp+'_'): - new_vid = vid.replace(comp+'_', '', 1) - elif '_' + comp + '_' in vid: - new_vid = vid.replace(comp+'_','', 1) - if new_vid is not None: - break - if new_vid is not None: - logger.debug("vid {} is a compvar with comp {}".format(vid, comp)) - return new_vid, comp, True - - return vid, None, False - - def get_value(self, vid, attribute=None, resolved=True, subgroup=None): - """ - Get a value for entry with id attribute vid. - or from the values field if the attribute argument is provided - and matches - """ - value = None - vid, comp, iscompvar = self.check_if_comp_var(vid, attribute) - logger.debug("vid {} comp {} iscompvar {}".format(vid, comp, iscompvar)) - if iscompvar: - if comp is None: - if subgroup is not None: - comp = subgroup - else: - logger.debug("Not enough info to get value for {}".format(vid)) - return value - if attribute is None: - attribute = {"compclass" : comp} - else: - attribute["compclass"] = comp - node = self.scan_optional_child("entry", {"id":vid}) - if node is not None: - type_str = self._get_type_info(node) - values = self.get_optional_child("values", root=node) - node = values if values is not None else node - val = self.get_element_text("value", attribute, root=node) - if val is not None: - if val.startswith("$"): - value = val - else: - value = convert_to_type(val,type_str, vid) - return value - - return EntryID.get_value(self, vid, attribute=attribute, resolved=resolved, subgroup=subgroup) - - def set_value(self, vid, value, subgroup=None, ignore_type=False): - """ - Set the value of an entry-id field to value - Returns the value or None if not found - subgroup is ignored in the general routine and applied in specific methods - """ - vid, comp, iscompvar = self.check_if_comp_var(vid, None) - val = None - root = self.root if subgroup is None else self.get_optional_child("group", {"id":subgroup}) - node = self.scan_optional_child("entry", {"id":vid}, root=root) - if node is not None: - if iscompvar and comp is None: - # pylint: disable=no-member - for comp in self._components: - val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp) - else: - val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp) - return val - - # pylint: disable=arguments-differ - def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False, compclass=None): - if vid is None: - vid = self.get(node, "id") - vid, _, iscompvar = self.check_if_comp_var(vid, node=node) - - if iscompvar: - expect(compclass is not None, "compclass must be specified if is comp var") - attribute = {"compclass":compclass} - str_value = self.get_valid_value_string(node, value, vid, ignore_type) - values = self.get_optional_child("values", root=node) - node = values if values is not None else node - val = self.set_element_text("value", str_value, attribute, root=node) - else: - val = EntryID._set_value(self, node, value, vid, subgroup, ignore_type) - return val - - def get_nodes_by_id(self, varid): - varid, _, _ = self.check_if_comp_var(varid, None) - return EntryID.get_nodes_by_id(self, varid) - - def cleanupnode(self, node): - """ - Remove the , , and childnodes from node - """ - fnode = self.get_child("file", root=node) - self.remove_child(fnode, node) - gnode = self.get_child("group", root=node) - self.remove_child(gnode, node) - dnode = self.get_optional_child("default_value", root=node) - if dnode is not None: - self.remove_child(dnode, node) - - vnode = self.get_optional_child("values", root=node) - if vnode is not None: - componentatt = self.get_children("value", attributes={"component":"ATM"}, root=vnode) - # backward compatibility (compclasses and component were mixed - # now we seperated into component and compclass) - if len(componentatt) > 0: - for ccnode in self.get_children("value", attributes={"component":None}, root=vnode): - val = self.get(ccnode, "component") - self.pop(ccnode, "component") - self.set(ccnode, "compclass", val) - - compclassatt = self.get_children("value", attributes={"compclass":None}, root=vnode) - if len(compclassatt) == 0: - self.remove_child(vnode, root=node) - - return node diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py deleted file mode 100644 index 1ab8cd4244c..00000000000 --- a/scripts/lib/CIME/XML/env_batch.py +++ /dev/null @@ -1,912 +0,0 @@ -""" -Interface to the env_batch.xml file. This class inherits from EnvBase -""" - -from CIME.XML.standard_module_setup import * -from CIME.XML.env_base import EnvBase -from CIME.utils import transform_vars, get_cime_root, convert_to_seconds, get_cime_config, get_batch_script_for_job, get_logging_options - -from collections import OrderedDict -import stat, re, math - -logger = logging.getLogger(__name__) - -# pragma pylint: disable=attribute-defined-outside-init - -class EnvBatch(EnvBase): - - def __init__(self, case_root=None, infile="env_batch.xml", read_only=False): - """ - initialize an object interface to file env_batch.xml in the case directory - """ - self._batchtype = None - # This arbitrary setting should always be overwritten - self._default_walltime = "00:20:00" - schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_batch.xsd") - super(EnvBatch,self).__init__(case_root, infile, schema=schema, read_only=read_only) - - # pylint: disable=arguments-differ - def set_value(self, item, value, subgroup=None, ignore_type=False): - """ - Override the entry_id set_value function with some special cases for this class - """ - val = None - - if item == "JOB_QUEUE": - expect(value in self._get_all_queue_names() or ignore_type, - "Unknown Job Queue specified use --force to set") - - # allow the user to set item for all jobs if subgroup is not provided - if subgroup is None: - gnodes = self.get_children("group") - for gnode in gnodes: - node = self.get_optional_child("entry", {"id":item}, root=gnode) - if node is not None: - self._set_value(node, value, vid=item, ignore_type=ignore_type) - val = value - else: - group = self.get_optional_child("group", {"id":subgroup}) - if group is not None: - node = self.get_optional_child("entry", {"id":item}, root=group) - if node is not None: - val = self._set_value(node, value, vid=item, ignore_type=ignore_type) - - return val - - # pylint: disable=arguments-differ - def get_value(self, item, attribute=None, resolved=True, subgroup=None): - """ - Must default subgroup to something in order to provide single return value - """ - - value = None - node = self.get_optional_child(item, attribute) - if node is None: - # this will take the last instance of item listed in all batch_system elements - bs_nodes = self.get_children("batch_system") - for bsnode in bs_nodes: - cnode = self.get_optional_child(item, attribute, root=bsnode) - if cnode is not None: - node = cnode - if node is None or item in ("BATCH_SYSTEM", "PROJECT_REQUIRED"): - value = super(EnvBatch, self).get_value(item,attribute,resolved) - else: - value = self.text(node) - if resolved: - value = self.get_resolved_value(value) - - return value - - def get_type_info(self, vid): - gnodes = self.get_children("group") - for gnode in gnodes: - nodes = self.get_children("entry",{"id":vid}, root=gnode) - type_info = None - for node in nodes: - new_type_info = self._get_type_info(node) - if type_info is None: - type_info = new_type_info - else: - expect( type_info == new_type_info, - "Inconsistent type_info for entry id={} {} {}".format(vid, new_type_info, type_info)) - return type_info - - def get_jobs(self): - groups = self.get_children("group") - results = [] - for group in groups: - if self.get(group, "id") not in ["job_submission", "config_batch"]: - results.append(self.get(group, "id")) - - return results - - def create_job_groups(self, batch_jobs, is_test): - # Subtle: in order to support dynamic batch jobs, we need to remove the - # job_submission group and replace with job-based groups - - orig_group = self.get_child("group", {"id":"job_submission"}, - err_msg="Looks like job groups have already been created") - orig_group_children = super(EnvBatch, self).get_children(root=orig_group) - - childnodes = [] - for child in reversed(orig_group_children): - childnodes.append(child) - - self.remove_child(orig_group) - - for name, jdict in batch_jobs: - if name == "case.run" and is_test: - pass # skip - elif name == "case.test" and not is_test: - pass # skip - elif name == "case.run.sh": - pass # skip - else: - new_job_group = self.make_child("group", {"id":name}) - for field in jdict.keys(): - val = jdict[field] - node = self.make_child("entry", {"id":field,"value":val}, root=new_job_group) - self.make_child("type", root=node, text="char") - - for child in childnodes: - self.add_child(self.copy(child), root=new_job_group) - - def cleanupnode(self, node): - if self.get(node, "id") == "batch_system": - fnode = self.get_child(name="file", root=node) - self.remove_child(fnode, root=node) - gnode = self.get_child(name="group", root=node) - self.remove_child(gnode, root=node) - vnode = self.get_optional_child(name="values", root=node) - if vnode is not None: - self.remove_child(vnode, root=node) - else: - node = super(EnvBatch, self).cleanupnode(node) - return node - - def set_batch_system(self, batchobj, batch_system_type=None): - if batch_system_type is not None: - self.set_batch_system_type(batch_system_type) - - if batchobj.batch_system_node is not None and batchobj.machine_node is not None: - for node in batchobj.get_children("",root=batchobj.machine_node): - name = self.name(node) - if name != 'directives': - oldnode = batchobj.get_optional_child(name, root=batchobj.batch_system_node) - if oldnode is not None: - logger.debug( "Replacing {}".format(self.name(oldnode))) - batchobj.remove_child(oldnode, root=batchobj.batch_system_node) - - if batchobj.batch_system_node is not None: - self.add_child(self.copy(batchobj.batch_system_node)) - if batchobj.machine_node is not None: - self.add_child(self.copy(batchobj.machine_node)) - self.set_value("BATCH_SYSTEM", batch_system_type) - - def get_job_overrides(self, job, case): - env_workflow = case.get_env('workflow') - total_tasks, num_nodes, tasks_per_node, thread_count = env_workflow.get_job_specs(job) - overrides = {} - - if total_tasks: - overrides["total_tasks"] = total_tasks - overrides["num_nodes"] = num_nodes - overrides["tasks_per_node"] = tasks_per_node - if thread_count: - overrides["thread_count"] = thread_count - else: - total_tasks = case.get_value("TOTALPES")*int(case.thread_count) - thread_count = case.thread_count - if int(total_tasks)*int(thread_count) < case.get_value("MAX_TASKS_PER_NODE"): - overrides["max_tasks_per_node"] = int(total_tasks) - - overrides["mpirun"] = case.get_mpirun_cmd(job=job, overrides=overrides) - return overrides - - def make_batch_script(self, input_template, job, case, outfile=None): - expect(os.path.exists(input_template), "input file '{}' does not exist".format(input_template)) - overrides = self.get_job_overrides(job, case) - ext = os.path.splitext(job)[-1] - if len(ext) == 0: - ext = job - if ext.startswith('.'): - ext = ext[1:] - overrides["job_id"] = ext + '.' + case.get_value("CASE") - if "pleiades" in case.get_value("MACH"): - # pleiades jobname needs to be limited to 15 chars - overrides["job_id"] = overrides["job_id"][:15] - overrides["batchdirectives"] = self.get_batch_directives(case, job, overrides=overrides) - output_text = transform_vars(open(input_template,"r").read(), case=case, subgroup=job, overrides=overrides) - output_name = get_batch_script_for_job(job) if outfile is None else outfile - logger.info("Creating file {}".format(output_name)) - with open(output_name, "w") as fd: - fd.write(output_text) - - # make sure batch script is exectuble - os.chmod(output_name, os.stat(output_name).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - - def set_job_defaults(self, batch_jobs, case): - if self._batchtype is None: - self._batchtype = self.get_batch_system_type() - - if self._batchtype == "none": - return - env_workflow = case.get_env('workflow') - known_jobs = env_workflow.get_jobs() - - for job, jsect in batch_jobs: - if job not in known_jobs: - continue - - walltime = case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) if case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) else None - force_queue = case.get_value("USER_REQUESTED_QUEUE", subgroup=job) if case.get_value("USER_REQUESTED_QUEUE", subgroup=job) else None - logger.info("job is {} USER_REQUESTED_WALLTIME {} USER_REQUESTED_QUEUE {}".format(job, walltime, force_queue)) - task_count = int(jsect["task_count"]) if "task_count" in jsect else case.total_tasks - walltime = jsect["walltime"] if ("walltime" in jsect and walltime is None) else walltime - if "task_count" in jsect: - # job is using custom task_count, need to compute a node_count based on this - node_count = int(math.ceil(float(task_count)/float(case.tasks_per_node))) - else: - node_count = case.num_nodes - - if force_queue: - if not self.queue_meets_spec(force_queue, node_count, task_count, walltime=walltime, job=job): - logger.warning("WARNING: User-requested queue '{}' does not meet requirements for job '{}'".format(force_queue, job)) - if self.queue_meets_spec(force_queue, node_count, task_count, walltime=None, job=job): - if case.get_value("TEST"): - walltime = self.get_queue_specs(force_queue)[3] - logger.warning(" Using walltime '{}' instead".format(walltime)) - else: - logger.warning(" Continuing with suspect walltime, batch submission may fail") - - queue = force_queue - else: - queue = self.select_best_queue(node_count, task_count, walltime=walltime, job=job) - if queue is None and walltime is not None: - # Try to see if walltime was the holdup - queue = self.select_best_queue(node_count, task_count, walltime=None, job=job) - if queue is not None: - # It was, override the walltime if a test, otherwise just warn the user - new_walltime = self.get_queue_specs(queue)[3] - expect(new_walltime is not None, "Should never make it here") - logger.warning("WARNING: Requested walltime '{}' could not be matched by any queue".format(walltime)) - if case.get_value("TEST"): - logger.warning(" Using walltime '{}' instead".format(new_walltime)) - walltime = new_walltime - else: - logger.warning(" Continuing with suspect walltime, batch submission may fail") - - if queue is None: - logger.warning("WARNING: No queue on this system met the requirements for this job. Falling back to defaults") - default_queue_node = self.get_default_queue() - queue = self.text(default_queue_node) - walltime = self.get_queue_specs(queue)[3] - - specs = self.get_queue_specs(queue) - if walltime is None: - # Figure out walltime - if specs is None: - # Queue is unknown, use specs from default queue - walltime = self.get(self.get_default_queue(), "walltimemax") - else: - walltime = specs[3] - - walltime = self._default_walltime if walltime is None else walltime # last-chance fallback - env_workflow.set_value("JOB_QUEUE", queue, subgroup=job, ignore_type=specs is None) - env_workflow.set_value("JOB_WALLCLOCK_TIME", walltime, subgroup=job) - logger.debug("Job {} queue {} walltime {}".format(job, queue, walltime)) - - def _match_attribs(self, attribs, case, queue): - # check for matches with case-vars - for attrib in attribs: - if attrib in ["default", "prefix"]: - # These are not used for matching - continue - - elif attrib == "queue": - if not self._match(queue, attribs["queue"]): - return False - - else: - val = case.get_value(attrib.upper()) - expect(val is not None, "Cannot match attrib '%s', case has no value for it" % attrib.upper()) - if not self._match(val, attribs[attrib]): - return False - - return True - - def _match(self, my_value, xml_value): - if xml_value.startswith("!"): - result = re.match(xml_value[1:],str(my_value)) is None - elif isinstance(my_value, bool): - if my_value: result = xml_value == "TRUE" - else: result = xml_value == "FALSE" - else: - result = re.match(xml_value,str(my_value)) is not None - - logger.debug("(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result)) - return result - - def get_batch_directives(self, case, job, overrides=None, output_format='default'): - """ - """ - result = [] - directive_prefix = None - - roots = self.get_children("batch_system") - queue = self.get_value("JOB_QUEUE", subgroup=job) - if self._batchtype != "none" and not queue in self._get_all_queue_names(): - unknown_queue = True - qnode = self.get_default_queue() - default_queue = self.text(qnode) - else: - unknown_queue = False - - for root in roots: - if root is not None: - if directive_prefix is None: - if output_format == 'default': - directive_prefix = self.get_element_text("batch_directive", root=root) - elif output_format == 'cylc': - directive_prefix = " " - if unknown_queue: - unknown_queue_directives = self.get_element_text("unknown_queue_directives", - root=root) - if unknown_queue_directives is None: - queue = default_queue - else: - queue = unknown_queue_directives - - dnodes = self.get_children("directives", root=root) - for dnode in dnodes: - nodes = self.get_children("directive", root=dnode) - if self._match_attribs(self.attrib(dnode), case, queue): - for node in nodes: - directive = self.get_resolved_value("" if self.text(node) is None else self.text(node)) - if output_format == 'cylc': - if self._batchtype == 'pbs': - # cylc includes the -N itself, no need to add - if directive.startswith("-N"): - directive='' - continue - m = re.match(r'\s*(-[\w])', directive) - if m: - directive = re.sub(r'(-[\w]) ','{} = '.format(m.group(1)), directive) - - default = self.get(node, "default") - if default is None: - directive = transform_vars(directive, case=case, subgroup=job, default=default, overrides=overrides) - else: - directive = transform_vars(directive, default=default) - - custom_prefix = self.get(node, "prefix") - prefix = directive_prefix if custom_prefix is None else custom_prefix - - result.append("{}{}".format("" if not prefix else (prefix + " "), directive)) - - return "\n".join(result) - - def get_submit_args(self, case, job): - ''' - return a list of touples (flag, name) - ''' - submitargs = " " - bs_nodes = self.get_children("batch_system") - submit_arg_nodes = [] - - for node in bs_nodes: - sanode = self.get_optional_child("submit_args", root=node) - if sanode is not None: - submit_arg_nodes += self.get_children("arg",root=sanode) - - for arg in submit_arg_nodes: - flag = self.get(arg, "flag") - name = self.get(arg, "name") - if self._batchtype == "cobalt" and job == "case.st_archive": - if flag == "-n": - name = 'task_count' - if flag == "--mode": - continue - - if name is None: - submitargs+=" {}".format(flag) - else: - if name.startswith("$"): - name = name[1:] - - if '$' in name: - # We have a complex expression and must rely on get_resolved_value. - # Hopefully, none of the values require subgroup - val = case.get_resolved_value(name) - else: - val = case.get_value(name, subgroup=job) - - if val is not None and len(str(val)) > 0 and val != "None": - # Try to evaluate val if it contains any whitespace - if " " in val: - try: - rval = eval(val) - except Exception: - rval = val - else: - rval = val - # need a correction for tasks per node - if flag == "-n" and rval<= 0: - rval = 1 - - if flag == "-q" and rval == "batch" and case.get_value("MACH") == "blues": - # Special case. Do not provide '-q batch' for blues - continue - - if flag.rfind("=", len(flag)-1, len(flag)) >= 0 or\ - flag.rfind(":", len(flag)-1, len(flag)) >= 0: - submitargs+=" {}{}".format(flag,str(rval).strip()) - else: - submitargs+=" {} {}".format(flag,str(rval).strip()) - - return submitargs - - def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None, skip_pnl=False, - allow_fail=False, resubmit_immediate=False, mail_user=None, mail_type=None, - batch_args=None, dry_run=False): - env_workflow = case.get_env('workflow') - external_workflow = case.get_value("EXTERNAL_WORKFLOW") - alljobs = env_workflow.get_jobs() - alljobs = [j for j in alljobs - if os.path.isfile(os.path.join(self._caseroot,get_batch_script_for_job(j)))] - startindex = 0 - jobs = [] - firstjob = job - if job is not None: - expect(job in alljobs, "Do not know about batch job {}".format(job)) - startindex = alljobs.index(job) - for index, job in enumerate(alljobs): - logger.debug( "Index {:d} job {} startindex {:d}".format(index, job, startindex)) - if index < startindex: - continue - try: - prereq = env_workflow.get_value('prereq', subgroup=job, resolved=False) - if external_workflow or prereq is None or job == firstjob or (dry_run and prereq == "$BUILD_COMPLETE"): - prereq = True - else: - prereq = case.get_resolved_value(prereq) - prereq = eval(prereq) - except Exception: - expect(False,"Unable to evaluate prereq expression '{}' for job '{}'".format(self.get_value('prereq',subgroup=job), job)) - if prereq: - jobs.append((job, env_workflow.get_value('dependency', subgroup=job))) - - if self._batchtype == "cobalt": - break - depid = OrderedDict() - jobcmds = [] - - if resubmit_immediate: - num_submit = case.get_value("RESUBMIT") + 1 - case.set_value("RESUBMIT", 0) - if num_submit <= 0: - num_submit = 1 - else: - num_submit = 1 - - prev_job = None - batch_job_id = None - for _ in range(num_submit): - for job, dependency in jobs: - if dependency is not None: - deps = dependency.split() - else: - deps = [] - dep_jobs = [] - if user_prereq is not None: - dep_jobs.append(user_prereq) - for dep in deps: - if dep in depid.keys() and depid[dep] is not None: - dep_jobs.append(str(depid[dep])) - if prev_job is not None: - dep_jobs.append(prev_job) - - logger.debug("job {} depends on {}".format(job, dep_jobs)) - result = self._submit_single_job(case, job, - skip_pnl=skip_pnl, - resubmit_immediate=resubmit_immediate, - dep_jobs=dep_jobs, - allow_fail=allow_fail, - no_batch=no_batch, - mail_user=mail_user, - mail_type=mail_type, - batch_args=batch_args, - dry_run=dry_run) - batch_job_id = str(alljobs.index(job)) if dry_run else result - depid[job] = batch_job_id - jobcmds.append( (job, result) ) - - if self._batchtype == "cobalt" or external_workflow: - break - - if not external_workflow and not no_batch: - expect(batch_job_id, "No result from jobs {}".format(jobs)) - prev_job = batch_job_id - - if dry_run: - return jobcmds - else: - return depid - - @staticmethod - def _get_supported_args(job, no_batch): - """ - Returns a map of the supported parameters and their arguments to the given script - TODO: Maybe let each script define this somewhere? - - >>> EnvBatch._get_supported_args("", False) - {} - >>> EnvBatch._get_supported_args("case.test", False) - {'skip_pnl': '--skip-preview-namelist'} - >>> EnvBatch._get_supported_args("case.st_archive", True) - {'resubmit': '--resubmit'} - """ - supported = {} - if job in ["case.run", "case.test"]: - supported["skip_pnl"] = "--skip-preview-namelist" - if job == "case.run": - supported["set_continue_run"] = "--completion-sets-continue-run" - if job in ["case.st_archive", "case.run"]: - if job == "case.st_archive" and no_batch: - supported["resubmit"] = "--resubmit" - else: - supported["submit_resubmits"] = "--resubmit" - return supported - - @staticmethod - def _build_run_args(job, no_batch, **run_args): - """ - Returns a map of the filtered parameters for the given script, - as well as the values passed and the equivalent arguments for calling the script - - >>> EnvBatch._build_run_args("case.run", False, skip_pnl=True, cthulu="f'taghn") - {'skip_pnl': (True, '--skip-preview-namelist')} - >>> EnvBatch._build_run_args("case.run", False, skip_pnl=False, cthulu="f'taghn") - {} - """ - supported_args = EnvBatch._get_supported_args(job, no_batch) - args = {} - for arg_name, arg_value in run_args.items(): - if arg_value and (arg_name in supported_args.keys()): - args[arg_name] = (arg_value, supported_args[arg_name]) - return args - - def _build_run_args_str(self, job, no_batch, **run_args): - """ - Returns a string of the filtered arguments for the given script, - based on the arguments passed - """ - args = self._build_run_args(job, no_batch, **run_args) - run_args_str = " ".join(param for _, param in args.values()) - logging_options = get_logging_options() - if logging_options: - run_args_str += " {}".format(logging_options) - - batch_env_flag = self.get_value("batch_env", subgroup=None) - if not batch_env_flag: - return run_args_str - elif len(run_args_str) > 0: - batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) - logger.debug("batch_system: {}: ".format(batch_system)) - if batch_system == "lsf": - return "{} \"all, ARGS_FOR_SCRIPT={}\"".format(batch_env_flag, run_args_str) - else: - return "{} ARGS_FOR_SCRIPT='{}'".format(batch_env_flag, run_args_str) - else: - return "" - - def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False, - no_batch=False, skip_pnl=False, mail_user=None, mail_type=None, - batch_args=None, dry_run=False, resubmit_immediate=False): - - if not dry_run: - logger.warning("Submit job {}".format(job)) - batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) - if batch_system is None or batch_system == "none" or no_batch: - logger.info("Starting job script {}".format(job)) - function_name = job.replace(".", "_") - job_name = "."+job - if not dry_run: - args = self._build_run_args(job, True, skip_pnl=skip_pnl, set_continue_run=resubmit_immediate, - submit_resubmits=not resubmit_immediate) - try: - if hasattr(case, function_name): - getattr(case, function_name)(**{k: v for k, (v, _) in args.items()}) - else: - expect(os.path.isfile(job_name),"Could not find file {}".format(job_name)) - run_cmd_no_fail(os.path.join(self._caseroot,job_name), combine_output=True, verbose=True, from_dir=self._caseroot) - except Exception as e: - # We don't want exception from the run phases getting into submit phase - logger.warning("Exception from {}: {}".format(function_name, str(e))) - - return - - submitargs = self.get_submit_args(case, job) - args_override = self.get_value("BATCH_COMMAND_FLAGS", subgroup=job) - if args_override: - submitargs = args_override - - if dep_jobs is not None and len(dep_jobs) > 0: - logger.debug("dependencies: {}".format(dep_jobs)) - if allow_fail: - dep_string = self.get_value("depend_allow_string", subgroup=None) - if dep_string is None: - logger.warning("'depend_allow_string' is not defined for this batch system, " + - "falling back to the 'depend_string'") - dep_string = self.get_value("depend_string", subgroup=None) - else: - dep_string = self.get_value("depend_string", subgroup=None) - expect(dep_string is not None, "'depend_string' is not defined for this batch system") - - separator_string = self.get_value("depend_separator", subgroup=None) - expect(separator_string is not None,"depend_separator string not defined") - - expect("jobid" in dep_string, "depend_string is missing jobid for prerequisite jobs") - dep_ids_str = str(dep_jobs[0]) - for dep_id in dep_jobs[1:]: - dep_ids_str += separator_string + str(dep_id) - dep_string = dep_string.replace("jobid",dep_ids_str.strip()) # pylint: disable=maybe-no-member - submitargs += " " + dep_string - - if batch_args is not None: - submitargs += " " + batch_args - - cime_config = get_cime_config() - - if mail_user is None and cime_config.has_option("main", "MAIL_USER"): - mail_user = cime_config.get("main", "MAIL_USER") - - if mail_user is not None: - mail_user_flag = self.get_value('batch_mail_flag', subgroup=None) - if mail_user_flag is not None: - submitargs += " " + mail_user_flag + " " + mail_user - - if mail_type is None: - if job == "case.test" and cime_config.has_option("create_test", "MAIL_TYPE"): - mail_type = cime_config.get("create_test", "MAIL_TYPE") - elif cime_config.has_option("main", "MAIL_TYPE"): - mail_type = cime_config.get("main", "MAIL_TYPE") - else: - mail_type = self.get_value("batch_mail_default") - - if mail_type: - mail_type = mail_type.split(",") # pylint: disable=no-member - - if mail_type: - mail_type_flag = self.get_value("batch_mail_type_flag", subgroup=None) - if mail_type_flag is not None: - mail_type_args = [] - for indv_type in mail_type: - mail_type_arg = self.get_batch_mail_type(indv_type) - mail_type_args.append(mail_type_arg) - - if mail_type_flag == "-m": - # hacky, PBS-type systems pass multiple mail-types differently - submitargs += " {} {}".format(mail_type_flag, "".join(mail_type_args)) - else: - submitargs += " {} {}".format(mail_type_flag, " {} ".format(mail_type_flag).join(mail_type_args)) - batchsubmit = self.get_value("batch_submit", subgroup=None) - expect(batchsubmit is not None, - "Unable to determine the correct command for batch submission.") - batchredirect = self.get_value("batch_redirect", subgroup=None) - batch_env_flag = self.get_value("batch_env", subgroup=None) - run_args = self._build_run_args_str(job, False, skip_pnl=skip_pnl, set_continue_run=resubmit_immediate, - submit_resubmits=not resubmit_immediate) - if batch_system == 'lsf' and not batch_env_flag: - sequence = (run_args, batchsubmit, submitargs, batchredirect, get_batch_script_for_job(job)) - elif batch_env_flag: - sequence = (batchsubmit, submitargs, run_args, batchredirect, get_batch_script_for_job(job)) - else: - sequence = (batchsubmit, submitargs, batchredirect, get_batch_script_for_job(job), run_args) - - submitcmd = " ".join(s.strip() for s in sequence if s is not None) - if dry_run: - return submitcmd - else: - logger.info("Submitting job script {}".format(submitcmd)) - output = run_cmd_no_fail(submitcmd, combine_output=True) - jobid = self.get_job_id(output) - logger.info("Submitted job id is {}".format(jobid)) - return jobid - - def get_batch_mail_type(self, mail_type): - raw = self.get_value("batch_mail_type", subgroup=None) - mail_types = [item.strip() for item in raw.split(",")] # pylint: disable=no-member - idx = ["never", "all", "begin", "end", "fail"].index(mail_type) - - return mail_types[idx] if idx < len(mail_types) else None - - def get_batch_system_type(self): - nodes = self.get_children("batch_system") - for node in nodes: - type_ = self.get(node, "type") - if type_ is not None: - self._batchtype = type_ - return self._batchtype - - def set_batch_system_type(self, batchtype): - self._batchtype = batchtype - - def get_job_id(self, output): - jobid_pattern = self.get_value("jobid_pattern", subgroup=None) - expect(jobid_pattern is not None, "Could not find jobid_pattern in env_batch.xml") - search_match = re.search(jobid_pattern, output) - expect(search_match is not None, - "Couldn't match jobid_pattern '{}' within submit output:\n '{}'".format(jobid_pattern, output)) - jobid = search_match.group(1) - return jobid - - def queue_meets_spec(self, queue, num_nodes, num_tasks, walltime=None, job=None): - specs = self.get_queue_specs(queue) - if specs is None: - logger.warning("WARNING: queue '{}' is unknown to this system".format(queue)) - return True - - nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict = specs - - # A job name match automatically meets spec - if job is not None and jobname is not None: - return jobname == job - - if nodemin is not None and num_nodes < nodemin or \ - nodemax is not None and num_nodes > nodemax or \ - jobmin is not None and num_tasks < jobmin or \ - jobmax is not None and num_tasks > jobmax: - return False - - if walltime is not None and walltimemax is not None and strict: - walltime_s = convert_to_seconds(walltime) - walltimemax_s = convert_to_seconds(walltimemax) - if walltime_s > walltimemax_s: - return False - - return True - - def _get_all_queue_names(self): - all_queues = [] - all_queues = self.get_all_queues() - # Default queue needs to be first - all_queues.insert(0, self.get_default_queue()) - - queue_names = [] - for queue in all_queues: - queue_names.append(self.text(queue)) - - return queue_names - - def select_best_queue(self, num_nodes, num_tasks, walltime=None, job=None): - # Make sure to check default queue first. - qnames = self._get_all_queue_names() - for qname in qnames: - if self.queue_meets_spec(qname, num_nodes, num_tasks, walltime=walltime, job=job): - return qname - - return None - - def get_queue_specs(self, queue): - """ - Get queue specifications by name. - - Returns (nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, is_strict) - """ - for queue_node in self.get_all_queues(): - if self.text(queue_node) == queue: - nodemin = self.get(queue_node, "nodemin") - nodemin = None if nodemin is None else int(nodemin) - nodemax = self.get(queue_node, "nodemax") - nodemax = None if nodemax is None else int(nodemax) - - jobmin = self.get(queue_node, "jobmin") - jobmin = None if jobmin is None else int(jobmin) - jobmax = self.get(queue_node, "jobmax") - jobmax = None if jobmax is None else int(jobmax) - - expect( nodemin is None or jobmin is None, "Cannot specify both nodemin and jobmin for a queue") - expect( nodemax is None or jobmax is None, "Cannot specify both nodemax and jobmax for a queue") - - jobname = self.get(queue_node, "jobname") - walltimemax = self.get(queue_node, "walltimemax") - strict = self.get(queue_node, "strict") == "true" - - return nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict - - return None - - def get_default_queue(self): - bs_nodes = self.get_children("batch_system") - node = None - for bsnode in bs_nodes: - qnodes = self.get_children("queues", root=bsnode) - for qnode in qnodes: - node = self.get_optional_child("queue", attributes={"default" : "true"}, root=qnode) - if node is None: - node = self.get_optional_child("queue", root=qnode) - - expect(node is not None, "No queues found") - return node - - def get_all_queues(self): - bs_nodes = self.get_children("batch_system") - nodes = [] - for bsnode in bs_nodes: - qnode = self.get_optional_child("queues", root=bsnode) - if qnode is not None: - nodes.extend(self.get_children("queue", root=qnode)) - return nodes - - def get_children(self, name=None, attributes=None, root=None): - if name == "PROJECT_REQUIRED": - nodes = super(EnvBatch, self).get_children("entry", attributes={"id":name}, root=root) - else: - nodes = super(EnvBatch, self).get_children(name, attributes=attributes, root=root) - - return nodes - - def get_status(self, jobid): - batch_query = self.get_optional_child("batch_query") - if batch_query is None: - logger.warning("Batch queries not supported on this platform") - else: - cmd = self.text(batch_query) + " " - if self.has(batch_query, "per_job_arg"): - cmd += self.get(batch_query, "per_job_arg") + " " - - cmd += jobid - - status, out, err = run_cmd(cmd) - if status != 0: - logger.warning("Batch query command '{}' failed with error '{}'".format(cmd, err)) - else: - return out.strip() - - def cancel_job(self, jobid): - batch_cancel = self.get_optional_child("batch_cancel") - if batch_cancel is None: - logger.warning("Batch cancellation not supported on this platform") - return False - else: - cmd = self.text(batch_cancel) + " " + str(jobid) - - status, out, err = run_cmd(cmd) - if status != 0: - logger.warning("Batch cancel command '{}' failed with error '{}'".format(cmd, out + "\n" + err)) - else: - return True - - def compare_xml(self, other): - xmldiffs = {} - f1batchnodes = self.get_children("batch_system") - for bnode in f1batchnodes: - f2bnodes = other.get_children("batch_system", - attributes = self.attrib(bnode)) - f2bnode=None - if len(f2bnodes): - f2bnode = f2bnodes[0] - f1batchnodes = self.get_children(root=bnode) - for node in f1batchnodes: - name = self.name(node) - text1 = self.text(node) - text2 = "" - attribs = self.attrib(node) - f2matches = other.scan_children(name, attributes=attribs, root=f2bnode) - foundmatch=False - for chkmatch in f2matches: - name2 = other.name(chkmatch) - attribs2 = other.attrib(chkmatch) - text2 = other.text(chkmatch) - if(name == name2 and attribs==attribs2 and text1==text2): - foundmatch=True - break - if not foundmatch: - xmldiffs[name] = [text1, text2] - - f1groups = self.get_children("group") - for node in f1groups: - group = self.get(node, "id") - f2group = other.get_child("group", attributes={"id":group}) - xmldiffs.update(super(EnvBatch, self).compare_xml(other, - root=node, otherroot=f2group)) - return xmldiffs - - def make_all_batch_files(self, case): - machdir = case.get_value("MACHDIR") - env_workflow = case.get_env("workflow") - logger.info("Creating batch scripts") - jobs = env_workflow.get_jobs() - for job in jobs: - template = case.get_resolved_value(env_workflow.get_value('template', subgroup=job)) - - if os.path.isabs(template): - input_batch_script = template - else: - input_batch_script = os.path.join(machdir,template) - if os.path.isfile(input_batch_script): - logger.info("Writing {} script from input template {}".format(job, input_batch_script)) - self.make_batch_script(input_batch_script, job, case) - else: - logger.warning("Input template file {} for job {} does not exist or cannot be read.".format(input_batch_script, job)) diff --git a/scripts/lib/CIME/XML/env_build.py b/scripts/lib/CIME/XML/env_build.py deleted file mode 100644 index 2c022a40691..00000000000 --- a/scripts/lib/CIME/XML/env_build.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Interface to the env_build.xml file. This class inherits from EnvBase -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.env_base import EnvBase - -logger = logging.getLogger(__name__) - -class EnvBuild(EnvBase): - # pylint: disable=unused-argument - def __init__(self, case_root=None, infile="env_build.xml",components=None, read_only=False): - """ - initialize an object interface to file env_build.xml in the case directory - """ - schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_entry_id.xsd") - EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) diff --git a/scripts/lib/CIME/XML/env_case.py b/scripts/lib/CIME/XML/env_case.py deleted file mode 100644 index ad2a2317f2b..00000000000 --- a/scripts/lib/CIME/XML/env_case.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Interface to the env_case.xml file. This class inherits from EnvBase -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.env_base import EnvBase - -logger = logging.getLogger(__name__) - -class EnvCase(EnvBase): - # pylint: disable=unused-argument - def __init__(self, case_root=None, infile="env_case.xml", components=None, read_only=False): - """ - initialize an object interface to file env_case.xml in the case directory - """ - schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_entry_id.xsd") - EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) diff --git a/scripts/lib/CIME/XML/env_mach_pes.py b/scripts/lib/CIME/XML/env_mach_pes.py deleted file mode 100644 index 207e2880fc7..00000000000 --- a/scripts/lib/CIME/XML/env_mach_pes.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -Interface to the env_mach_pes.xml file. This class inherits from EntryID -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.env_base import EnvBase -import math - -logger = logging.getLogger(__name__) - -class EnvMachPes(EnvBase): - - def __init__(self, case_root=None, infile="env_mach_pes.xml", components=None, read_only=False): - """ - initialize an object interface to file env_mach_pes.xml in the case directory - """ - self._components = components - schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_mach_pes.xsd") - EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) - - def add_comment(self, comment): - if comment is not None: - node = self.make_child("comment", text=comment) - # make_child adds to the end of the file but we want it to follow the header - # so we need to remove it and add it in the correct position - self.remove_child(node) - self.add_child(node, position=1) - - def get_value(self, vid, attribute=None, resolved=True, subgroup=None, max_mpitasks_per_node=None): # pylint: disable=arguments-differ - # Special variable NINST_MAX is used to determine the number of - # drivers in multi-driver mode. - if vid == "NINST_MAX": - # in the nuopc driver there is only a single NINST value - value = 1 - for comp in self._components: - if comp != "CPL": - value = max(value, self.get_value("NINST_{}".format(comp))) - return value - - value = EnvBase.get_value(self, vid, attribute, resolved, subgroup) - - if "NTASKS" in vid or "ROOTPE" in vid: - if max_mpitasks_per_node is None: - max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") - if value is not None and value < 0: - value = -1*value*max_mpitasks_per_node - # in the nuopc driver there is only one NINST value - # so that NINST_{comp} = NINST - if "NINST_" in vid and value is None: - value = self.get_value("NINST") - return value - - def set_value(self, vid, value, subgroup=None, ignore_type=False): - """ - Set the value of an entry-id field to value - Returns the value or None if not found - subgroup is ignored in the general routine and applied in specific methods - """ - if vid == "MULTI_DRIVER" and value: - ninst_max = self.get_value("NINST_MAX") - for comp in self._components: - if comp == "CPL": - continue - ninst = self.get_value("NINST_{}".format(comp)) - expect(ninst == ninst_max, - "All components must have the same NINST value in multi_driver mode. NINST_{}={} shoud be {}".format(comp,ninst,ninst_max)) - if "NTASKS" in vid or "NTHRDS" in vid: - expect(value != 0, "Cannot set NTASKS or NTHRDS to 0") - - - return EnvBase.set_value(self, vid, value, subgroup=subgroup, ignore_type=ignore_type) - - - def get_max_thread_count(self, comp_classes): - ''' Find the maximum number of openmp threads for any component in the case ''' - max_threads = 1 - for comp in comp_classes: - threads = self.get_value("NTHRDS",attribute={"compclass":comp}) - expect(threads is not None, "Error no thread count found for component class {}".format(comp)) - if threads > max_threads: - max_threads = threads - return max_threads - - def get_total_tasks(self, comp_classes): - total_tasks = 0 - maxinst = self.get_value("NINST") - if maxinst: - comp_interface = "nuopc" - else: - comp_interface = 'unknown' - maxinst = 1 - for comp in comp_classes: - ntasks = self.get_value("NTASKS", attribute={"compclass":comp}) - rootpe = self.get_value("ROOTPE", attribute={"compclass":comp}) - pstrid = self.get_value("PSTRID", attribute={"compclass":comp}) - if comp != "CPL" and comp_interface!="nuopc": - ninst = self.get_value("NINST", attribute={"compclass":comp}) - maxinst = max(maxinst, ninst) - tt = rootpe + (ntasks - 1) * pstrid + 1 - total_tasks = max(tt, total_tasks) - if self.get_value("MULTI_DRIVER"): - total_tasks *= maxinst - return total_tasks - - def get_tasks_per_node(self, total_tasks, max_thread_count): - expect(total_tasks > 0,"totaltasks > 0 expected, totaltasks = {}".format(total_tasks)) - tasks_per_node = min(self.get_value("MAX_TASKS_PER_NODE")// max_thread_count, - self.get_value("MAX_MPITASKS_PER_NODE"), total_tasks) - return tasks_per_node if tasks_per_node > 0 else 1 - - def get_total_nodes(self, total_tasks, max_thread_count): - """ - Return (num_active_nodes, num_spare_nodes) - """ - tasks_per_node = self.get_tasks_per_node(total_tasks, max_thread_count) - num_nodes = int(math.ceil(float(total_tasks) / tasks_per_node)) - return num_nodes, self.get_spare_nodes(num_nodes) - - def get_spare_nodes(self, num_nodes): - force_spare_nodes = self.get_value("FORCE_SPARE_NODES") - if force_spare_nodes != -999: - return force_spare_nodes - - if self.get_value("ALLOCATE_SPARE_NODES"): - ten_pct = int(math.ceil(float(num_nodes) * 0.1)) - if ten_pct < 1: - return 1 # Always provide at lease one spare node - elif ten_pct > 10: - return 10 # Never provide more than 10 spare nodes - else: - return ten_pct - else: - return 0 diff --git a/scripts/lib/CIME/XML/env_mach_specific.py b/scripts/lib/CIME/XML/env_mach_specific.py deleted file mode 100644 index 86a6bfc75dc..00000000000 --- a/scripts/lib/CIME/XML/env_mach_specific.py +++ /dev/null @@ -1,491 +0,0 @@ -""" -Interface to the env_mach_specific.xml file. This class inherits from EnvBase -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.env_base import EnvBase -from CIME.utils import transform_vars, get_cime_root -import string, resource -from collections import OrderedDict - -logger = logging.getLogger(__name__) - -# Is not of type EntryID but can use functions from EntryID (e.g -# get_type) otherwise need to implement own functions and make GenericXML parent class -class EnvMachSpecific(EnvBase): - # pylint: disable=unused-argument - def __init__(self, caseroot=None, infile="env_mach_specific.xml", - components=None, unit_testing=False, read_only=False): - """ - initialize an object interface to file env_mach_specific.xml in the case directory - """ - schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_mach_specific.xsd") - EnvBase.__init__(self, caseroot, infile, schema=schema, read_only=read_only) - self._allowed_mpi_attributes = ("compiler", "mpilib", "threaded", "unit_testing", "queue") - self._unit_testing = unit_testing - - def populate(self, machobj): - """Add entries to the file using information from a Machines object.""" - items = ("module_system", "environment_variables", "resource_limits", "mpirun", "run_exe","run_misc_suffix") - default_run_suffix = machobj.get_child("default_run_suffix", root=machobj.root) - default_run_exe_node = machobj.get_child("default_run_exe", root=default_run_suffix) - default_run_misc_suffix_node = machobj.get_child("default_run_misc_suffix", root=default_run_suffix) - - group_node = self.make_child("group", {"id":"compliant_values"}) - - for item in items: - nodes = machobj.get_first_child_nodes(item) - if item == "run_exe" or item == "run_misc_suffix": - if len(nodes) == 0: - value = self.text(default_run_exe_node) if item == "run_exe" else self.text(default_run_misc_suffix_node) - else: - value = self.text(nodes[0]) - - entity_node = self.make_child("entry", {"id":item, "value":value}, root=group_node) - - self.make_child("type", root=entity_node, text="char") - self.make_child("desc", root=entity_node, text=("executable name" if item == "run_exe" else "redirect for job output")) - - else: - for node in nodes: - self.add_child(node) - - def _get_modules_for_case(self, case, job=None): - module_nodes = self.get_children("modules", root=self.get_child("module_system")) - modules_to_load = None - if module_nodes is not None: - modules_to_load = self._compute_module_actions(module_nodes, case, job=job) - - return modules_to_load - - def _get_envs_for_case(self, case, job=None): - env_nodes = self.get_children("environment_variables") - - envs_to_set = None - if env_nodes is not None: - envs_to_set = self._compute_env_actions(env_nodes, case, job=job) - - return envs_to_set - - def load_env(self, case, force_method=None, job=None, verbose=False): - """ - Should only be called by case.load_env - """ - # Do the modules so we can refer to env vars set by the modules - # in the environment_variables block - modules_to_load = self._get_modules_for_case(case) - if (modules_to_load is not None): - self._load_modules(modules_to_load, force_method=force_method, verbose=verbose) - - envs_to_set = self._get_envs_for_case(case, job=job) - if (envs_to_set is not None): - self._load_envs(envs_to_set, verbose=verbose) - - self._get_resources_for_case(case) - - def _get_resources_for_case(self, case): - resource_nodes = self.get_children("resource_limits") - if resource_nodes is not None: - nodes = self._compute_resource_actions(resource_nodes, case) - for name, val in nodes: - attr = getattr(resource, name) - limits = resource.getrlimit(attr) - logger.info("Setting resource.{} to {} from {}".format(name, val, limits)) - limits = (int(val), limits[1]) - resource.setrlimit(attr, limits) - - def _load_modules(self, modules_to_load, force_method=None, verbose=False): - module_system = self.get_module_system_type() if force_method is None else force_method - if (module_system == "module"): - self._load_module_modules(modules_to_load, verbose=verbose) - elif (module_system == "soft"): - self._load_modules_generic(modules_to_load, verbose=verbose) - elif (module_system == "generic"): - self._load_modules_generic(modules_to_load, verbose=verbose) - elif (module_system == "none"): - self._load_none_modules(modules_to_load) - else: - expect(False, "Unhandled module system '{}'".format(module_system)) - - def list_modules(self): - module_system = self.get_module_system_type() - - # If the user's login shell is not sh, it's possible that modules - # won't be configured so we need to be sure to source the module - # setup script if it exists. - init_path = self.get_module_system_init_path("sh") - if init_path: - source_cmd = "source {} && ".format(init_path) - else: - source_cmd = "" - - if (module_system in ["module"]): - return run_cmd_no_fail("{}module list".format(source_cmd), combine_output=True) - elif (module_system == "soft"): - # Does soft really not provide this capability? - return "" - elif (module_system == "generic"): - return run_cmd_no_fail("{}use -lv".format(source_cmd)) - elif (module_system == "none"): - return "" - else: - expect(False, "Unhandled module system '{}'".format(module_system)) - - def save_all_env_info(self, filename): - """ - Get a string representation of all current environment info and - save it to file. - """ - with open(filename, "w") as f: - f.write(self.list_modules()) - run_cmd_no_fail("echo -e '\n' && env", arg_stdout=filename) - - def make_env_mach_specific_file(self, shell, case): - module_system = self.get_module_system_type() - sh_init_cmd = self.get_module_system_init_path(shell) - sh_mod_cmd = self.get_module_system_cmd_path(shell) - lines = ["# This file is for user convenience only and is not used by the model"] - - lines.append("# Changes to this file will be ignored and overwritten") - lines.append("# Changes to the environment should be made in env_mach_specific.xml") - lines.append("# Run ./case.setup --reset to regenerate this file") - if sh_init_cmd: - lines.append("source {}".format(sh_init_cmd)) - - if "SOFTENV_ALIASES" in os.environ: - lines.append("source $SOFTENV_ALIASES") - if "SOFTENV_LOAD" in os.environ: - lines.append("source $SOFTENV_LOAD") - - modules_to_load = self._get_modules_for_case(case) - envs_to_set = self._get_envs_for_case(case) - filename = ".env_mach_specific.{}".format(shell) - if modules_to_load is not None: - if module_system == "module": - lines.extend(self._get_module_commands(modules_to_load, shell)) - else: - for action, argument in modules_to_load: - lines.append("{} {} {}".format(sh_mod_cmd, action, "" if argument is None else argument)) - - if envs_to_set is not None: - for env_name, env_value in envs_to_set: - if shell == "sh": - lines.append("export {}={}".format(env_name, env_value)) - elif shell == "csh": - lines.append("setenv {} {}".format(env_name, env_value)) - else: - expect(False, "Unknown shell type: '{}'".format(shell)) - - with open(filename, "w") as fd: - fd.write("\n".join(lines)) - - # Private API - - def _load_envs(self, envs_to_set, verbose=False): - for env_name, env_value in envs_to_set: - logger_func = logger.warning if verbose else logger.debug - if env_value is None and env_name in os.environ: - del os.environ[env_name] - logger_func("Unsetting Environment {}".format(env_name)) - elif env_value is not None: - os.environ[env_name] = env_value - logger_func("Setting Environment {}={}".format(env_name, env_value)) - - def _compute_module_actions(self, module_nodes, case, job=None): - return self._compute_actions(module_nodes, "command", case, job=job) - - def _compute_env_actions(self, env_nodes, case, job=None): - return self._compute_actions(env_nodes, "env", case, job=job) - - def _compute_resource_actions(self, resource_nodes, case, job=None): - return self._compute_actions(resource_nodes, "resource", case, job=job) - - def _compute_actions(self, nodes, child_tag, case, job=None): - result = [] # list of tuples ("name", "argument") - compiler, mpilib = case.get_value("COMPILER"), case.get_value("MPILIB") - - for node in nodes: - if (self._match_attribs(self.attrib(node), case, job=job)): - for child in self.get_children(root=node): - expect(self.name(child) == child_tag, "Expected {} element".format(child_tag)) - if (self._match_attribs(self.attrib(child), case, job=job)): - val = self.text(child) - if val is not None: - # We allow a couple special substitutions for these fields - for repl_this, repl_with in [("$COMPILER", compiler), ("$MPILIB", mpilib)]: - val = val.replace(repl_this, repl_with) - - val = self.get_resolved_value(val) - expect("$" not in val, "Not safe to leave unresolved items in env var value: '{}'".format(val)) - - # intentional unindent, result is appended even if val is None - result.append( (self.get(child, "name"), val) ) - - return result - - def _match_attribs(self, attribs, case, job=None): - # check for matches with case-vars - for attrib in attribs: - if attrib == "unit_testing": # special case - if not self._match(self._unit_testing, attribs["unit_testing"].upper()): - return False - elif attrib == "queue": - if job is not None: - val = case.get_value("JOB_QUEUE", subgroup=job) - expect(val is not None, "Cannot match attrib '%s', case has no value for it" % attrib.upper()) - if not self._match(val, attribs[attrib]): - return False - elif attrib == "name": - pass - else: - val = case.get_value(attrib.upper()) - expect(val is not None, "Cannot match attrib '%s', case has no value for it" % attrib.upper()) - if not self._match(val, attribs[attrib]): - return False - - return True - - def _match(self, my_value, xml_value): - if xml_value.startswith("!"): - result = re.match(xml_value[1:] + "$",str(my_value)) is None - elif isinstance(my_value, bool): - if my_value: result = xml_value == "TRUE" - else: result = xml_value == "FALSE" - else: - result = re.match(xml_value + "$",str(my_value)) is not None - - logger.debug("(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result)) - return result - - def _get_module_commands(self, modules_to_load, shell): - # Note this is independent of module system type - mod_cmd = self.get_module_system_cmd_path(shell) - cmds = [] - last_action = None - last_cmd = None - - # Normally, we will try to combine or batch module commands together... - # - # module load X - # module load Y - # module load Z - # - # is the same as ... - # - # module load X Y Z - # - # ... except the latter is significatly faster due to performing 1/3 as - # many forks. - # - # Not all module commands support batching though and we enurmerate those - # here. - actions_that_cannot_be_batched = ["swap", "switch"] - - for action, argument in modules_to_load: - if argument is None: - argument = "" - - if action == last_action and action not in actions_that_cannot_be_batched: - last_cmd = "{} {}".format(last_cmd, argument) - else: - if last_cmd is not None: - cmds.append(last_cmd) - - last_cmd = "{} {} {}".format(mod_cmd, action, "" if argument is None else argument) - last_action = action - - if last_cmd: - cmds.append(last_cmd) - - return cmds - - def _load_module_modules(self, modules_to_load, verbose=False): - logger_func = logger.warning if verbose else logger.debug - for cmd in self._get_module_commands(modules_to_load, "python"): - logger_func("module command is {}".format(cmd)) - stat, py_module_code, errout = run_cmd(cmd) - expect(stat==0 and (len(errout) == 0 or self.allow_error()), - "module command {} failed with message:\n{}".format(cmd, errout)) - exec(py_module_code) - - def _load_modules_generic(self, modules_to_load, verbose=False): - sh_init_cmd = self.get_module_system_init_path("sh") - sh_mod_cmd = self.get_module_system_cmd_path("sh") - logger_func = logger.warning if verbose else logger.debug - - # Purpose is for environment management system that does not have - # a python interface and therefore can only determine what they - # do by running shell command and looking at the changes - # in the environment. - - cmd = "source {}".format(sh_init_cmd) - - if "SOFTENV_ALIASES" in os.environ: - cmd += " && source $SOFTENV_ALIASES" - if "SOFTENV_LOAD" in os.environ: - cmd += " && source $SOFTENV_LOAD" - - for action,argument in modules_to_load: - cmd += " && {} {} {}".format(sh_mod_cmd, action, "" if argument is None else argument) - - # Use null terminated lines to give us something more definitive to split on. - # Env vars can contain newlines, so splitting on newlines can be ambiguous - cmd += " && env -0" - logger_func("cmd: {}".format(cmd)) - output = run_cmd_no_fail(cmd) - - ################################################### - # Parse the output to set the os.environ dictionary - ################################################### - newenv = OrderedDict() - for line in output.split('\0'): - if "=" in line: - key, val = line.split("=", 1) - newenv[key] = val - - # resolve variables - for key, val in newenv.items(): - newenv[key] = string.Template(val).safe_substitute(newenv) - - # Set environment with new or updated values - for key in newenv: - if key in os.environ and os.environ[key] == newenv[key]: - pass - else: - os.environ[key] = newenv[key] - - for oldkey in list(os.environ.keys()): - if oldkey not in newenv: - del os.environ[oldkey] - - def _load_none_modules(self, modules_to_load): - """ - No Action required - """ - expect(not modules_to_load, - "Module system was specified as 'none' yet there are modules that need to be loaded?") - - def _mach_specific_header(self, shell): - ''' - write a shell module file for this case. - ''' - header = ''' -#!/usr/bin/env {} -#=============================================================================== -# Automatically generated module settings for $self->{{machine}} -# DO NOT EDIT THIS FILE DIRECTLY! Please edit env_mach_specific.xml -# in your CASEROOT. This file is overwritten every time modules are loaded! -#=============================================================================== -'''.format(shell) - header += "source {}".format(self.get_module_system_init_path(shell)) - return header - - def get_module_system_type(self): - """ - Return the module system used on this machine - """ - module_system = self.get_child("module_system") - return self.get(module_system, "type") - - def allow_error(self): - """ - Return True if stderr output from module commands should be assumed - to be an error. Default False. This is necessary since implementations - of environment modules are highlty variable and some systems produce - stderr output even when things are working fine. - """ - module_system = self.get_child("module_system") - value = self.get(module_system, "allow_error") - return value.upper() == "TRUE" if value is not None else False - - def get_module_system_init_path(self, lang): - init_nodes = self.get_optional_child("init_path", attributes={"lang":lang}, root=self.get_child("module_system")) - return self.text(init_nodes) if init_nodes is not None else None - - def get_module_system_cmd_path(self, lang): - cmd_nodes = self.get_optional_child("cmd_path", attributes={"lang":lang}, root=self.get_child("module_system")) - return self.text(cmd_nodes) if cmd_nodes is not None else None - - def get_mpirun(self, case, attribs, job, exe_only=False, overrides=None): - """ - Find best match, return (executable, {arg_name : text}) - """ - mpirun_nodes = self.get_children("mpirun") - best_match = None - best_num_matched = -1 - default_match = None - best_num_matched_default = -1 - args = [] - for mpirun_node in mpirun_nodes: - xml_attribs = self.attrib(mpirun_node) - all_match = True - matches = 0 - is_default = False - - for key, value in attribs.items(): - expect(key in self._allowed_mpi_attributes, "Unexpected key {} in mpirun attributes".format(key)) - if key in xml_attribs: - if xml_attribs[key].lower() == "false": - xml_attrib = False - elif xml_attribs[key].lower() == "true": - xml_attrib = True - else: - xml_attrib = xml_attribs[key] - - if xml_attrib == value: - matches += 1 - elif key == "mpilib" and value != "mpi-serial" and xml_attrib == "default": - is_default = True - else: - all_match = False - break - - if all_match: - if is_default: - if matches > best_num_matched_default: - default_match = mpirun_node - best_num_matched_default = matches - else: - if matches > best_num_matched: - best_match = mpirun_node - best_num_matched = matches - - # if there are no special arguments required for mpi-serial it need not have an entry in config_machines.xml - if "mpilib" in attribs and attribs["mpilib"] == "mpi-serial" and best_match is None: - return "",[],None,None - - expect(best_match is not None or default_match is not None, - "Could not find a matching MPI for attributes: {}".format(attribs)) - - the_match = best_match if best_match is not None else default_match - - # Now that we know the best match, compute the arguments - if not exe_only: - arg_node = self.get_optional_child("arguments", root=the_match) - if arg_node: - arg_nodes = self.get_children("arg", root=arg_node) - for arg_node in arg_nodes: - arg_value = transform_vars(self.text(arg_node), - case=case, - subgroup=job,overrides=overrides, - default=self.get(arg_node, "default")) - args.append(arg_value) - - exec_node = self.get_child("executable", root=the_match) - expect(exec_node is not None,"No executable found") - executable = self.text(exec_node) - run_exe = None - run_misc_suffix = None - - run_exe_node = self.get_optional_child('run_exe', root=the_match) - if run_exe_node: - run_exe = self.text(run_exe_node) - - run_misc_suffix_node = self.get_optional_child('run_misc_suffix', root=the_match) - if run_misc_suffix_node: - run_misc_suffix = self.text(run_misc_suffix_node) - - return executable, args, run_exe, run_misc_suffix - - def get_type_info(self, vid): - return "char" diff --git a/scripts/lib/CIME/XML/env_run.py b/scripts/lib/CIME/XML/env_run.py deleted file mode 100644 index f4b5301e315..00000000000 --- a/scripts/lib/CIME/XML/env_run.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Interface to the env_run.xml file. This class inherits from EnvBase -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.env_base import EnvBase - -logger = logging.getLogger(__name__) - -class EnvRun(EnvBase): - - def __init__(self, case_root=None, infile="env_run.xml", components=None, read_only=False): - """ - initialize an object interface to file env_run.xml in the case directory - """ - self._components = components - schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_entry_id.xsd") - - EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) diff --git a/scripts/lib/CIME/XML/env_workflow.py b/scripts/lib/CIME/XML/env_workflow.py deleted file mode 100644 index 5a70e4d5987..00000000000 --- a/scripts/lib/CIME/XML/env_workflow.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -Interface to the env_workflow.xml file. This class inherits from EnvBase -""" - -from CIME.XML.standard_module_setup import * -from CIME.XML.env_base import EnvBase -from CIME.utils import get_cime_root -import re, math - -logger = logging.getLogger(__name__) - -# pragma pylint: disable=attribute-defined-outside-init - -class EnvWorkflow(EnvBase): - - def __init__(self, case_root=None, infile="env_workflow.xml", read_only=False): - """ - initialize an object interface to file env_workflow.xml in the case directory - """ - # This arbitrary setting should always be overwritten - # schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_workflow.xsd") - # TODO: define schema for this file - schema = None - super(EnvWorkflow,self).__init__(case_root, infile, schema=schema, read_only=read_only) - - def create_job_groups(self, batch_jobs, is_test): - # Subtle: in order to support dynamic batch jobs, we need to remove the - # job_submission group and replace with job-based groups - - orig_group = self.get_child("group", {"id":"job_submission"}, - err_msg="Looks like job groups have already been created") - orig_group_children = super(EnvWorkflow, self).get_children(root=orig_group) - - childnodes = [] - for child in reversed(orig_group_children): - childnodes.append(child) - - self.remove_child(orig_group) - - for name, jdict in batch_jobs: - if name == "case.run" and is_test: - pass # skip - elif name == "case.test" and not is_test: - pass # skip - elif name == "case.run.sh": - pass # skip - else: - new_job_group = self.make_child("group", {"id":name}) - for field in jdict.keys(): - if field == "runtime_parameters": - continue - val = jdict[field] - node = self.make_child("entry", {"id":field,"value":val}, root=new_job_group) - self.make_child("type", root=node, text="char") - - for child in childnodes: - self.add_child(self.copy(child), root=new_job_group) - - def get_jobs(self): - groups = self.get_children("group") - results = [] - for group in groups: - results.append(self.get(group, "id")) - return results - - def get_type_info(self, vid): - gnodes = self.get_children("group") - type_info = None - for gnode in gnodes: - nodes = self.get_children("entry",{"id":vid}, root=gnode) - type_info = None - for node in nodes: - new_type_info = self._get_type_info(node) - if type_info is None: - type_info = new_type_info - else: - expect( type_info == new_type_info, - "Inconsistent type_info for entry id={} {} {}".format(vid, new_type_info, type_info)) - return type_info - - def get_job_specs(self, job): - task_count = self.get_value("task_count", subgroup=job) - tasks_per_node = self.get_value("tasks_per_node", subgroup=job) - thread_count = self.get_value("thread_count", subgroup=job) - num_nodes = None - if task_count is not None and tasks_per_node is not None: - task_count = int(task_count) - num_nodes = int(math.ceil(float(task_count)/float(tasks_per_node))) - tasks_per_node = task_count//num_nodes - if not thread_count: - thread_count = 1 - - return task_count, num_nodes, tasks_per_node, thread_count - - # pylint: disable=arguments-differ - def get_value(self, item, attribute=None, resolved=True, subgroup="PRIMARY"): - """ - Must default subgroup to something in order to provide single return value - """ - value = None - if subgroup == "PRIMARY": - subgroup = "case.test" if "case.test" in self.get_jobs() else "case.run" - - #pylint: disable=assignment-from-none - if value is None: - value = super(EnvWorkflow, self).get_value(item, attribute=attribute, resolved=resolved, subgroup=subgroup) - - return value - - # pylint: disable=arguments-differ - def set_value(self, item, value, subgroup=None, ignore_type=False): - """ - Override the entry_id set_value function with some special cases for this class - """ - val = None - - # allow the user to set item for all jobs if subgroup is not provided - if subgroup is None: - gnodes = self.get_children("group") - for gnode in gnodes: - node = self.get_optional_child("entry", {"id":item}, root=gnode) - if node is not None: - self._set_value(node, value, vid=item, ignore_type=ignore_type) - val = value - else: - group = self.get_optional_child("group", {"id":subgroup}) - if group is not None: - node = self.get_optional_child("entry", {"id":item}, root=group) - if node is not None: - val = self._set_value(node, value, vid=item, ignore_type=ignore_type) - - return val - - def get_children(self, name=None, attributes=None, root=None): - if name in ("JOB_WALLCLOCK_TIME", "PROJECT", "CHARGE_ACCOUNT", - "JOB_QUEUE", "BATCH_COMMAND_FLAGS"): - nodes = super(EnvWorkflow, self).get_children("entry", attributes={"id":name}, root=root) - else: - nodes = super(EnvWorkflow, self).get_children(name, attributes=attributes, root=root) - - return nodes diff --git a/scripts/lib/CIME/XML/files.py b/scripts/lib/CIME/XML/files.py deleted file mode 100644 index 66b8b28c688..00000000000 --- a/scripts/lib/CIME/XML/files.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -Interface to the config_files.xml file. This class inherits from EntryID.py -""" -import re -from CIME.XML.standard_module_setup import * - -from CIME.XML.entry_id import EntryID -from CIME.utils import expect, get_cime_root, get_model - -logger = logging.getLogger(__name__) - -class Files(EntryID): - - def __init__(self, comp_interface="mct"): - """ - initialize an object - - >>> files = Files() - >>> files.get_value('CASEFILE_HEADERS',resolved=False) - '$CIMEROOT/config/config_headers.xml' - """ - cimeroot = get_cime_root() - infile = os.path.join(cimeroot, "config", get_model(), "config_files.xml") - expect(os.path.isfile(infile), "Could not find or open file {}".format(infile)) - schema = os.path.join(cimeroot, "config", "xml_schemas", "entry_id.xsd") - EntryID.__init__(self, infile, schema=schema) - config_files_override = os.path.join(os.path.dirname(cimeroot),".config_files.xml") - # variables COMP_ROOT_DIR_{} are mutable, all other variables are read only - self.COMP_ROOT_DIR = {} - self._comp_interface = comp_interface - # .config_file.xml at the top level may overwrite COMP_ROOT_DIR_ nodes in config_files - - if os.path.isfile(config_files_override): - self.read(config_files_override) - self.overwrite_existing_entries() - - def get_value(self, vid, attribute=None, resolved=True, subgroup=None): - if "COMP_ROOT_DIR" in vid: - if vid in self.COMP_ROOT_DIR: - if attribute is not None: - if vid+attribute["component"] in self.COMP_ROOT_DIR: - return self.COMP_ROOT_DIR[vid+attribute["component"]] - else: - return self.COMP_ROOT_DIR[vid] - - value = super(Files, self).get_value(vid, attribute=attribute, resolved=False, subgroup=subgroup) - if value is None and attribute is not None: - value = super(Files, self).get_value(vid, attribute=None, resolved=False, subgroup=subgroup) - - if "COMP_ROOT_DIR" not in vid and value is not None and "COMP_ROOT_DIR" in value: - m = re.search("(COMP_ROOT_DIR_[^/]+)/", value) - comp_root_dir_var_name = m.group(1) - comp_root_dir = self.get_value(comp_root_dir_var_name, attribute=attribute, resolved=False, subgroup=subgroup) - self.set_value(comp_root_dir_var_name, comp_root_dir,subgroup=attribute) - if resolved: - value = value.replace("$"+comp_root_dir_var_name, comp_root_dir) - - if resolved and value is not None: - value = value.replace("$COMP_INTERFACE", self._comp_interface) - value = self.get_resolved_value(value) - return value - - def set_value(self, vid, value,subgroup=None,ignore_type=False): - if "COMP_ROOT_DIR" in vid: - if subgroup is not None: - self.COMP_ROOT_DIR[vid+subgroup["component"]] = value - else: - self.COMP_ROOT_DIR[vid] = value - - else: - expect(False, "Attempt to set a nonmutable variable {}".format(vid)) - return value - - - def get_schema(self, nodename, attributes=None): - node = self.get_optional_child("entry", {"id":nodename}) - schemanode = self.get_optional_child("schema", root=node, attributes=attributes) - if schemanode is not None: - logger.debug("Found schema for {}".format(nodename)) - return self.get_resolved_value(self.text(schemanode)) - return None - - def get_components(self, nodename): - node = self.get_optional_child("entry", {"id":nodename}) - if node is not None: - valnodes = self.get_children("value", root=self.get_child("values", root=node)) - values = [] - for valnode in valnodes: - value = self.get(valnode, "component") - values.append(value) - return values - - return None diff --git a/scripts/lib/CIME/XML/generic_xml.py b/scripts/lib/CIME/XML/generic_xml.py deleted file mode 100644 index d2d40d1a021..00000000000 --- a/scripts/lib/CIME/XML/generic_xml.py +++ /dev/null @@ -1,546 +0,0 @@ -""" -Common interface to XML files, this is an abstract class and is expected to -be used by other XML interface modules and not directly. -""" -from CIME.XML.standard_module_setup import * -from CIME.utils import safe_copy - -import xml.etree.ElementTree as ET -#pylint: disable=import-error -from distutils.spawn import find_executable -import getpass -import six -from copy import deepcopy -from collections import namedtuple - -logger = logging.getLogger(__name__) - -class _Element(object): # private class, don't want users constructing directly or calling methods on it - - def __init__(self, xml_element): - self.xml_element = xml_element - - def __eq__(self, rhs): - expect(isinstance(rhs, _Element), "Wrong type") - return self.xml_element == rhs.xml_element # pylint: disable=protected-access - - def __ne__(self, rhs): - expect(isinstance(rhs, _Element), "Wrong type") - return self.xml_element != rhs.xml_element # pylint: disable=protected-access - - def __hash__(self): - return hash(self.xml_element) - - def __deepcopy__(self, _): - return _Element(deepcopy(self.xml_element)) - -class GenericXML(object): - - _FILEMAP = {} - DISABLE_CACHING = False - CacheEntry = namedtuple("CacheEntry", ["tree", "root", "modtime"]) - - @classmethod - def invalidate(cls, filename): - if filename in cls._FILEMAP: - del cls._FILEMAP[filename] - - def __init__(self, infile=None, schema=None, root_name_override=None, root_attrib_override=None, read_only=True): - """ - Initialize an object - """ - logger.debug("Initializing {}".format(infile)) - self.tree = None - self.root = None - self.locked = False - self.read_only = read_only - self.filename = infile - self.needsrewrite = False - if infile is None: - return - - if os.path.isfile(infile) and os.access(infile, os.R_OK): - # If file is defined and exists, read it - self.read(infile, schema) - else: - # if file does not exist create a root xml element - # and set it's id to file - expect(not self.read_only, "Makes no sense to have empty read-only file") - logger.debug("File {} does not exists.".format(infile)) - expect("$" not in infile,"File path not fully resolved {}".format(infile)) - - root = _Element(ET.Element("xml")) - - if root_name_override: - self.root = self.make_child(root_name_override, root=root, attributes=root_attrib_override) - else: - self.root = self.make_child("file", root=root, attributes={"id":os.path.basename(infile), "version":"2.0"}) - - self.tree = ET.ElementTree(root) - - self._FILEMAP[infile] = self.CacheEntry(self.tree, self.root, 0.0) - - def read(self, infile, schema=None): - """ - Read and parse an xml file into the object - """ - cached_read = False - if not self.DISABLE_CACHING and infile in self._FILEMAP: - timestamp_cache = self._FILEMAP[infile].modtime - timestamp_file = os.path.getmtime(infile) - if timestamp_file == timestamp_cache: - logger.debug("read (cached): {}".format(infile)) - expect(self.read_only or not self.filename or not self.needsrewrite, "Reading into object marked for rewrite, file {}" - .format(self.filename)) - self.tree, self.root, _ = self._FILEMAP[infile] - cached_read = True - - if not cached_read: - logger.debug("read: {}".format(infile)) - file_open = (lambda x: open(x, 'r', encoding='utf-8')) if six.PY3 else (lambda x: open(x, 'r')) - with file_open(infile) as fd: - self.read_fd(fd) - - if schema is not None and self.get_version() > 1.0: - self.validate_xml_file(infile, schema) - - logger.debug("File version is {}".format(str(self.get_version()))) - - self._FILEMAP[infile] = self.CacheEntry(self.tree, self.root, os.path.getmtime(infile)) - - def read_fd(self, fd): - expect(self.read_only or not self.filename or not self.needsrewrite, "Reading into object marked for rewrite, file {}" .format(self.filename)) - read_only = self.read_only - if self.tree: - addroot = _Element(ET.parse(fd).getroot()) - # we need to override the read_only mechanism here to append the xml object - self.read_only = False - if addroot.xml_element.tag == self.name(self.root): - for child in self.get_children(root=addroot): - self.add_child(child) - else: - self.add_child(addroot) - self.read_only = read_only - else: - self.tree = ET.parse(fd) - self.root = _Element(self.tree.getroot()) - include_elems = self.scan_children("xi:include") - # First remove all includes found from the list - for elem in include_elems: - self.read_only = False - self.remove_child(elem) - self.read_only = read_only - # Then recursively add the included files. - for elem in include_elems: - path = os.path.abspath( - os.path.join(os.getcwd(), os.path.dirname(self.filename), - self.get(elem, "href"))) - logger.debug("Include file {}".format(path)) - self.read(path) - - def lock(self): - """ - A subclass is doing caching, we need to lock the tree structure - in order to avoid invalidating cache. - """ - self.locked = True - - def unlock(self): - self.locked = False - - def change_file(self, newfile, copy=False): - if copy: - new_case = os.path.dirname(newfile) - if not os.path.exists(new_case): - os.makedirs(new_case) - safe_copy(self.filename, newfile) - - self.tree = None - self.filename = newfile - self.read(newfile) - - # - # API for individual node operations - # - - def get(self, node, attrib_name, default=None): - return node.xml_element.get(attrib_name, default=default) - - def has(self, node, attrib_name): - return attrib_name in node.xml_element.attrib - - def set(self, node, attrib_name, value): - if self.get(node, attrib_name) != value: - expect(not self.read_only, "read_only: cannot set attrib[{}]={} for node {} in file {}".format(attrib_name, value, self.name(node), self.filename)) - if attrib_name == "id": - expect(not self.locked, "locked: cannot set attrib[{}]={} for node {} in file {}".format(attrib_name, value, self.name(node), self.filename)) - self.needsrewrite = True - return node.xml_element.set(attrib_name, value) - - def pop(self, node, attrib_name): - expect(not self.read_only, "read_only: cannot pop attrib[{}] for node {} in file {}".format(attrib_name, self.name(node), self.filename)) - if attrib_name == "id": - expect(not self.locked, "locked: cannot pop attrib[{}] for node {} in file {}".format(attrib_name, self.name(node), self.filename)) - self.needsrewrite = True - return node.xml_element.attrib.pop(attrib_name) - - def attrib(self, node): - # Return a COPY. We do not want clients making changes directly - return None if node.xml_element.attrib is None else dict(node.xml_element.attrib) - - def set_name(self, node, name): - expect(not self.read_only, "read_only: set node name {} in file {}".format(name, self.filename)) - if node.xml_element.tag != name: - self.needsrewrite = True - node.xml_element.tag = name - - def set_text(self, node, text): - expect(not self.read_only, "read_only: set node text {} for node {} in file {}".format(text, self.name(node), self.filename)) - if node.xml_element.text != text: - node.xml_element.text = text - self.needsrewrite = True - - def name(self, node): - return node.xml_element.tag - - def text(self, node): - return node.xml_element.text - - def add_child(self, node, root=None, position=None): - """ - Add element node to self at root - """ - expect(not self.locked and not self.read_only, "{}: cannot add child {} in file {}".format("read_only" if self.read_only else "locked", self.name(node), self.filename)) - self.needsrewrite = True - root = root if root is not None else self.root - if position is not None: - root.xml_element.insert(position, node.xml_element) - else: - root.xml_element.append(node.xml_element) - - def copy(self, node): - return deepcopy(node) - - def remove_child(self, node, root=None): - expect(not self.locked and not self.read_only, "{}: cannot remove child {} in file {}".format("read_only" if self.read_only else "locked", self.name(node), self.filename)) - self.needsrewrite = True - root = root if root is not None else self.root - root.xml_element.remove(node.xml_element) - - def make_child(self, name, attributes=None, root=None, text=None): - expect(not self.locked and not self.read_only, "{}: cannot make child {} in file {}".format("read_only" if self.read_only else "locked", name, self.filename)) - root = root if root is not None else self.root - self.needsrewrite = True - if attributes is None: - node = _Element(ET.SubElement(root.xml_element, name)) - else: - node = _Element(ET.SubElement(root.xml_element, name, attrib=attributes)) - - if text: - self.set_text(node, text) - - return node - - def get_children(self, name=None, attributes=None, root=None): - """ - This is the critical function, its interface and performance are crucial. - - You can specify attributes={key:None} if you want to select chilren - with the key attribute but you don't care what its value is. - """ - root = root if root is not None else self.root - children = [] - for child in root.xml_element: - if name is not None: - if child.tag != name: - continue - - if attributes is not None: - if child.attrib is None: - continue - else: - match = True - for key, value in attributes.items(): - if key not in child.attrib: - match = False - break - elif value is not None: - if child.attrib[key] != value: - match = False - break - - if not match: - continue - - children.append(_Element(child)) - - return children - - def get_child(self, name=None, attributes=None, root=None, err_msg=None): - children = self.get_children(root=root, name=name, attributes=attributes) - expect(len(children) == 1, err_msg if err_msg else "Expected one child, found {} with name '{}' and attribs '{}' in file {}".format(len(children), name, attributes, self.filename)) - return children[0] - - def get_optional_child(self, name=None, attributes=None, root=None, err_msg=None): - children = self.get_children(root=root, name=name, attributes=attributes) - expect(len(children) <= 1, err_msg if err_msg else "Multiple matches for name '{}' and attribs '{}' in file {}".format(name, attributes, self.filename)) - return children[0] if children else None - - def get_element_text(self, element_name, attributes=None, root=None): - element_node = self.get_optional_child(name=element_name, attributes=attributes, root=root) - if element_node is not None: - return self.text(element_node) - return None - - def set_element_text(self, element_name, new_text, attributes=None, root=None): - element_node = self.get_optional_child(name=element_name, attributes=attributes, root=root) - if element_node is not None: - self.set_text(element_node, new_text) - return new_text - return None - - def to_string(self, node, method="xml", encoding="us-ascii"): - return ET.tostring(node.xml_element, method=method, encoding=encoding) - - # - # API for operations over the entire file - # - - def get_version(self): - version = self.get(self.root, "version") - version = 1.0 if version is None else float(version) - return version - - def check_timestamp(self): - timestamp_cache = self._FILEMAP[self.filename].modtime - if timestamp_cache != 0.0: - timestamp_file = os.path.getmtime(self.filename) - expect(timestamp_file == timestamp_cache, - "File {} appears to have changed without a corresponding invalidation, modtimes {:0.2f} != {:0.2f}".format(self.filename, timestamp_cache, timestamp_file)) - - def write(self, outfile=None, force_write=False): - """ - Write an xml file from data in self - """ - self.check_timestamp() - - if not (self.needsrewrite or force_write): - return - - if outfile is None: - outfile = self.filename - - logger.debug("write: " + (outfile if isinstance(outfile, six.string_types) else str(outfile))) - - xmlstr = self.get_raw_record() - - # xmllint provides a better format option for the output file - xmllint = find_executable("xmllint") - if xmllint is not None: - if isinstance(outfile, six.string_types): - run_cmd_no_fail("{} --format --output {} -".format(xmllint, outfile), input_str=xmlstr) - else: - outfile.write(run_cmd_no_fail("{} --format -".format(xmllint), input_str=xmlstr)) - - else: - with open(outfile,'w') as xmlout: - xmlout.write(xmlstr) - - self._FILEMAP[self.filename] = self.CacheEntry(self.tree, self.root, os.path.getmtime(self.filename)) - - self.needsrewrite = False - - def scan_child(self, nodename, attributes=None, root=None): - """ - Get an xml element matching nodename with optional attributes. - - Error unless exactly one match. - """ - - nodes = self.scan_children(nodename, attributes=attributes, root=root) - - expect(len(nodes) == 1, "Incorrect number of matches, {:d}, for nodename '{}' and attrs '{}' in file '{}'".format(len(nodes), nodename, attributes, self.filename)) - return nodes[0] - - def scan_optional_child(self, nodename, attributes=None, root=None): - """ - Get an xml element matching nodename with optional attributes. - - Return None if no match. - """ - nodes = self.scan_children(nodename, attributes=attributes, root=root) - - expect(len(nodes) <= 1, "Multiple matches for nodename '{}' and attrs '{}' in file '{}', found {} matches".format(nodename, attributes, self.filename, len(nodes))) - return nodes[0] if nodes else None - - def scan_children(self, nodename, attributes=None, root=None): - - logger.debug("(get_nodes) Input values: {}, {}, {}, {}".format(self.__class__.__name__, nodename, attributes, root)) - - if root is None: - root = self.root - nodes = [] - - namespace = {"xi" : "http://www.w3.org/2001/XInclude"} - - xpath = ".//" + (nodename if nodename else "") - - if attributes: - # xml.etree has limited support for xpath and does not allow more than - # one attribute in an xpath query so we query seperately for each attribute - # and create a result with the intersection of those lists - - for key, value in attributes.items(): - if value is None: - xpath = ".//{}[@{}]".format(nodename, key) - else: - xpath = ".//{}[@{}=\'{}\']".format(nodename, key, value) - - logger.debug("xpath is {}".format(xpath)) - - try: - newnodes = root.xml_element.findall(xpath, namespace) - except Exception as e: - expect(False, "Bad xpath search term '{}', error: {}".format(xpath, e)) - - if not nodes: - nodes = newnodes - else: - for node in nodes[:]: - if node not in newnodes: - nodes.remove(node) - if not nodes: - return [] - - else: - logger.debug("xpath: {}".format(xpath)) - nodes = root.xml_element.findall(xpath, namespace) - - logger.debug("Returning {} nodes ({})".format(len(nodes), nodes)) - - return [_Element(node) for node in nodes] - - def get_value(self, item, attribute=None, resolved=True, subgroup=None): # pylint: disable=unused-argument - """ - get_value is expected to be defined by the derived classes, if you get here - the value was not found in the class. - """ - logger.debug("Get Value for " + item) - return None - - def get_values(self, vid, attribute=None, resolved=True, subgroup=None):# pylint: disable=unused-argument - logger.debug("Get Values for " + vid) - return [] - - def set_value(self, vid, value, subgroup=None, ignore_type=True): # pylint: disable=unused-argument - """ - ignore_type is not used in this flavor - """ - valnodes = self.get_children(vid) - for node in valnodes: - self.set_text(node, value) - - return value if valnodes else None - - def get_resolved_value(self, raw_value, allow_unresolved_envvars=False): - """ - A value in the xml file may contain references to other xml - variables or to environment variables. These are refered to in - the perl style with $name and $ENV{name}. - - >>> obj = GenericXML() - >>> os.environ["FOO"] = "BAR" - >>> os.environ["BAZ"] = "BARF" - >>> obj.get_resolved_value("one $ENV{FOO} two $ENV{BAZ} three") - 'one BAR two BARF three' - >>> obj.get_resolved_value("2 + 3 - 1") - '4' - >>> obj.get_resolved_value("0001-01-01") - '0001-01-01' - >>> obj.get_resolved_value("$SHELL{echo hi}") == 'hi' - True - """ - logger.debug("raw_value {}".format(raw_value)) - reference_re = re.compile(r'\${?(\w+)}?') - env_ref_re = re.compile(r'\$ENV\{(\w+)\}') - shell_ref_re = re.compile(r'\$SHELL\{([^}]+)\}') - math_re = re.compile(r'\s[+-/*]\s') - item_data = raw_value - - if item_data is None: - return None - - if not isinstance(item_data, six.string_types): - return item_data - - for m in env_ref_re.finditer(item_data): - logger.debug("look for {} in env".format(item_data)) - env_var = m.groups()[0] - env_var_exists = env_var in os.environ - if not allow_unresolved_envvars: - expect(env_var_exists, "Undefined env var '{}'".format(env_var)) - if env_var_exists: - item_data = item_data.replace(m.group(), os.environ[env_var]) - - for s in shell_ref_re.finditer(item_data): - logger.debug("execute {} in shell".format(item_data)) - shell_cmd = s.groups()[0] - item_data = item_data.replace(s.group(), run_cmd_no_fail(shell_cmd)) - - for m in reference_re.finditer(item_data): - var = m.groups()[0] - logger.debug("find: {}".format(var)) - # The overridden versions of this method do not simply return None - # so the pylint should not be flagging this - ref = self.get_value(var) # pylint: disable=assignment-from-none - - if ref is not None: - logger.debug("resolve: " + str(ref)) - item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref))) - elif var == "CIMEROOT": - cimeroot = get_cime_root() - item_data = item_data.replace(m.group(), cimeroot) - elif var == "SRCROOT": - srcroot = os.path.normpath(os.path.join(get_cime_root(),"..")) - item_data = item_data.replace(m.group(), srcroot) - elif var == "USER": - item_data = item_data.replace(m.group(), getpass.getuser()) - - if math_re.search(item_data): - try: - tmp = eval(item_data) - except Exception: - tmp = item_data - item_data = str(tmp) - - return item_data - - def validate_xml_file(self, filename, schema): - """ - validate an XML file against a provided schema file using pylint - """ - expect(os.path.isfile(filename),"xml file not found {}".format(filename)) - expect(os.path.isfile(schema),"schema file not found {}".format(schema)) - xmllint = find_executable("xmllint") - if xmllint is not None: - logger.debug("Checking file {} against schema {}".format(filename, schema)) - run_cmd_no_fail("{} --xinclude --noout --schema {} {}".format(xmllint, schema, filename)) - else: - logger.warning("xmllint not found, could not validate file {}".format(filename)) - - def get_raw_record(self, root=None): - logger.debug("writing file {}".format(self.filename)) - if root is None: - root = self.root - try: - xmlstr = ET.tostring(root.xml_element) - except ET.ParseError as e: - ET.dump(root.xml_element) - expect(False, "Could not write file {}, xml formatting error '{}'".format(self.filename, e)) - return xmlstr - - def get_id(self): - xmlid = self.get(self.root, "id") - if xmlid is not None: - return xmlid - return self.name(self.root) diff --git a/scripts/lib/CIME/XML/grids.py b/scripts/lib/CIME/XML/grids.py deleted file mode 100644 index 64402bbeb67..00000000000 --- a/scripts/lib/CIME/XML/grids.py +++ /dev/null @@ -1,416 +0,0 @@ -""" -Common interface to XML files which follow the grids format, -This is not an abstract class - but inherits from the abstact class GenericXML -""" - -from CIME.XML.standard_module_setup import * -from CIME.XML.files import Files -from CIME.XML.generic_xml import GenericXML - -logger = logging.getLogger(__name__) - -class Grids(GenericXML): - - def __init__(self, infile=None, files=None): - if files is None: - files = Files() - if infile is None: - infile = files.get_value("GRIDS_SPEC_FILE") - logger.debug(" Grid specification file is {}".format(infile)) - schema = files.get_schema("GRIDS_SPEC_FILE") - - GenericXML.__init__(self, infile, schema) - self._version = self.get_version() - - self._comp_gridnames = self._get_grid_names() - - def _get_grid_names(self): - grids = self.get_child("grids") - model_grid_defaults = self.get_child("model_grid_defaults", root=grids) - nodes = self.get_children("grid", root=model_grid_defaults) - gridnames = [] - for node in nodes: - gn = self.get(node, "name") - if gn not in gridnames: - gridnames.append(gn) - if "mask" not in gridnames: - gridnames.append("mask") - - return gridnames - - def get_grid_info(self, name, compset, driver): - """ - Find the matching grid node - """ - gridinfo = {} - atmnlev = None - lndnlev = None - - #mechanism to specify atm levels - atmlevregex = re.compile(r"([^_]+)z(\d+)(.*)$") - levmatch = re.match(atmlevregex, name) - if levmatch: - atmnlev = levmatch.group(2) - name = levmatch.group(1)+levmatch.group(3) - - #mechanism to specify lnd levels - lndlevregex = re.compile(r"(.*_)([^_]+)z(\d+)(_[^m].*)$") - levmatch = re.match(lndlevregex, name) - if levmatch: - lndnlev = levmatch.group(3) - name = levmatch.group(1)+levmatch.group(2)+levmatch.group(4) - - # determine component_grids dictionary and grid longname - lname, component_grids = self._read_config_grids(name, compset, atmnlev, lndnlev) - gridinfo["GRID"] = lname - - # determine domains given component_grids - domains = self._get_domains(component_grids, atmlevregex, lndlevregex, driver) - - gridinfo.update(domains) - - # determine gridmaps given component_grids - gridmaps = self._get_gridmaps(component_grids, driver) - gridinfo.update(gridmaps) - - return gridinfo - - def _read_config_grids(self, name, compset, atmnlev, lndnlev): - """ - read config_grids.xml with version 2.0 schema - """ - component_grids = {} - model_grid = {} - for comp_gridname in self._comp_gridnames: - model_grid[comp_gridname] = None - - # (1) set array of component grid defaults that match current compset - grids_node = self.get_child("grids") - grid_defaults_node = self.get_child("model_grid_defaults", root=grids_node) - for grid_node in self.get_children("grid", root=grid_defaults_node): - name_attrib = self.get(grid_node, "name") - compset_attrib = self.get(grid_node, "compset") - compset_match = re.search(compset_attrib, compset) - if compset_match is not None: - model_grid[name_attrib] = self.text(grid_node) - - # (2)loop over all of the "model grid" nodes and determine is there an alias match with the - # input grid name - if there is an alias match determine if the "compset" and "not_compset" - # regular expression attributes match the match the input compset - - model_gridnodes = self.get_children("model_grid", root=grids_node) - model_gridnode = None - foundalias = False - for node in model_gridnodes: - alias = self.get(node, "alias") - if alias == name: - foundalias = True - foundcompset = False - compset_attrib = self.get(node, "compset") - not_compset_attrib = self.get(node, "not_compset") - if compset_attrib and not_compset_attrib: - compset_match = re.search(compset_attrib, compset) - not_compset_match = re.search(not_compset_attrib, compset) - if compset_match is not None and not_compset_match is not None: - foundcompset = True - model_gridnode = node - logger.debug("Found match for {} with compset_match {} and not_compset_match {}" - .format(alias, compset_attrib, not_compset_attrib)) - break - elif compset_attrib: - compset_match = re.search(compset_attrib, compset) - if compset_match is not None: - foundcompset = True - model_gridnode = node - logger.debug("Found match for {} with compset_match {}" - .format(alias, compset_attrib)) - break - elif not_compset_attrib: - not_compset_match = re.search(not_compset_attrib, compset) - if not_compset_match is None: - foundcompset = True - model_gridnode = node - logger.debug("Found match for {} with not_compset_match {}" - .format(alias, not_compset_attrib)) - break - else: - foundcompset = True - model_gridnode = node - logger.debug("Found match for {}".format(alias)) - break - expect(foundalias, "no alias {} defined".format(name)) - # if no match is found in config_grids.xml - exit - expect(foundcompset, "grid alias {} not valid for compset {}".format(name, compset)) - - # for the match - find all of the component grid settings - grid_nodes = self.get_children("grid", root=model_gridnode) - for grid_node in grid_nodes: - name = self.get(grid_node, "name") - value = self.text(grid_node) - if model_grid[name] != "null": - model_grid[name] = value - mask_node = self.get_optional_child("mask",root=model_gridnode) - if mask_node is not None: - model_grid["mask"] = self.text(mask_node) - else: - model_grid["mask"] = model_grid["ocnice"] - - # determine component grids and associated required domains and gridmaps - # TODO: this should be in XML, not here - prefix = {"atm":"a%", "lnd":"l%", "ocnice":"oi%", "rof":"r%", "wav":"w%", "glc":"g%", "mask":"m%", "iac":"z%"} - lname = "" - for component_gridname in self._comp_gridnames: - if lname: - lname = lname + "_" + prefix[component_gridname] - else: - lname = prefix[component_gridname] - if model_grid[component_gridname] is not None: - lname += model_grid[component_gridname] - if component_gridname == 'atm' and atmnlev is not None: - if not ("a{:n}ull" in lname): - lname += "z" + atmnlev - - elif component_gridname == 'lnd' and lndnlev is not None: - if not ("l{:n}ull" in lname): - lname += "z" + lndnlev - - else: - lname += 'null' - component_grids = self._get_component_grids_from_longname(lname) - return lname, component_grids - - def _get_component_grids_from_longname(self, name): - gridRE = re.compile(r"[_]{0,1}[a-z]{1,2}%") - grids = gridRE.split(name)[1:] - prefixes = re.findall("[a-z]+%",name) - component_grids = {} - i = 0 - while i < len(grids): - prefix = prefixes[i] - grid = grids[i] - component_grids[prefix] = grid - i += 1 - component_grids["i%"] = component_grids["oi%"] - component_grids["o%"] = component_grids["oi%"] - return component_grids - - def _get_component_grids(self, name): - gridRE = re.compile(r"[_]{0,1}[a-z]{1,2}%") - component_grids = gridRE.split(name)[1:] - return component_grids - - def _get_domains(self, component_grids, atmlevregex, lndlevregex, driver): - """ determine domains dictionary for config_grids.xml v2 schema""" - # use component_grids to create grids dictionary - # TODO: this should be in XML, not here - grids = [("atm", "a%"), ("lnd", "l%"), ("ocn", "o%"), ("mask", "m%"),\ - ("ice", "i%"), ("rof", "r%"), ("glc", "g%"), ("wav", "w%"), ("iac", "z%")] - domains = {} - mask_name = None - if 'm%' in component_grids: - mask_name = component_grids['m%'] - else: - mask_name = component_grids['oi%'] - - for grid in grids: - grid_name = component_grids[grid[1]] - - # Determine grid name with no nlev suffix if there is one - grid_name_nonlev = grid_name - levmatch = re.match(atmlevregex, grid_name) - if levmatch: - grid_name_nonlev = levmatch.group(1)+levmatch.group(3) - levmatch = re.match(lndlevregex, grid_name) - if levmatch: - grid_name_nonlev = levmatch.group(1)+levmatch.group(2)+levmatch.group(4) - - # Determine all domain information search for the grid name with no level suffix in config_grids.xml - domain_node = self.get_optional_child("domain", attributes={"name":grid_name_nonlev}, - root=self.get_child("domains")) - if domain_node is not None: - comp_name = grid[0].upper() - - # determine xml variable name - if not comp_name == "MASK": - domains[comp_name + "_NX"] = int(self.get_element_text("nx", root=domain_node)) - domains[comp_name + "_NY"] = int(self.get_element_text("ny", root=domain_node)) - file_name = comp_name + "_DOMAIN_FILE" - path_name = comp_name + "_DOMAIN_PATH" - mesh_name = comp_name + "_DOMAIN_MESH" - - # set up dictionary of domain files for every component - domains[comp_name + "_GRID"] = grid_name - - file_nodes = self.get_children("file", root=domain_node) - for file_node in file_nodes: - grid_attrib = self.get(file_node, "grid") - mask_attrib = self.get(file_node, "mask") - domain_name = "" - if grid_attrib is not None and mask_attrib is not None: - grid_match = re.search(comp_name.lower(), grid_attrib) - mask_match = False - if mask_name is not None: - mask_match = mask_name == mask_attrib - if grid_match is not None and mask_match: - domain_name = self.text(file_node) - elif grid_attrib is not None: - grid_match = re.search(comp_name.lower(), grid_attrib) - if grid_match is not None: - domain_name = self.text(file_node) - elif mask_attrib is not None: - mask_match = mask_name == mask_attrib - if mask_match: - domain_name = self.text(file_node) - if domain_name: - domains[file_name] = os.path.basename(domain_name) - path = os.path.dirname(domain_name) - if len(path) > 0: - domains[path_name] = path - - if not comp_name == "MASK": - mesh_nodes = self.get_children("mesh", root=domain_node) - for mesh_node in mesh_nodes: - driver_attrib = self.get(mesh_node, "driver") - if driver == driver_attrib: - domains[mesh_name] = self.text(mesh_node) - - return domains - - def _get_gridmaps(self, component_grids, driver): - """ - set all mapping files for config_grids.xml v2 schema - """ - grids = [("atm_grid","a%"), ("lnd_grid","l%"), ("ocn_grid","o%"), \ - ("rof_grid","r%"), ("glc_grid","g%"), ("wav_grid","w%"), ("iac_grid","z%")] - gridmaps = {} - - # (1) set all possibly required gridmaps to idmap - required_gridmaps_node = self.get_child("required_gridmaps") - required_gridmap_nodes = self.get_children("required_gridmap", root=required_gridmaps_node) - for node in required_gridmap_nodes: - gridmaps[self.text(node)] = "idmap" - - # (2) determine values gridmaps for target grid - for idx, grid in enumerate(grids): - for other_grid in grids[idx+1:]: - gridname = grid[0] - other_gridname = other_grid[0] - gridvalue = component_grids[grid[1]] - if gridname == "atm_grid": - atm_gridvalue = gridvalue - other_gridvalue = component_grids[other_grid[1]] - gridmaps_roots = self.get_children("gridmaps") - gridmap_nodes = [] - for root in gridmaps_roots: - gmdriver = self.get(root, "driver") - if gmdriver is None or gmdriver == driver: - gridmap_nodes.extend(self.get_children("gridmap", root=root, - attributes={gridname:gridvalue, other_gridname:other_gridvalue})) - for gridmap_node in gridmap_nodes: - expect(len(self.attrib(gridmap_node)) == 2, - " Bad attribute count in gridmap node %s"%self.attrib(gridmap_node)) - map_nodes = self.get_children("map",root=gridmap_node) - for map_node in map_nodes: - name = self.get(map_node, "name") - value = self.text(map_node) - if name is not None and value is not None: - gridmaps[name] = value - logger.debug(" gridmap name,value are {}: {}" - .format(name,value)) - - # (3) check that all necessary maps are not set to idmap - griddict = dict(grids) - for node in required_gridmap_nodes: - grid1_name = self.get(node, "grid1") - grid2_name = self.get(node, "grid2") - prefix1 = griddict[grid1_name] - prefix2 = griddict[grid2_name] - grid1_value = component_grids[prefix1] - grid2_value = component_grids[prefix2] - if grid1_value is not None and grid2_value is not None: - if grid1_value != grid2_value and grid1_value != 'null' and grid2_value != 'null': - map_ = gridmaps[self.text(node)] - if map_ == 'idmap': - if grid1_name == "ocn_grid" and grid1_value == atm_gridvalue: - logger.debug('ocn_grid == atm_grid so this is not an idmap error') - else: - if driver == "nuopc": - gridmaps[self.text(node)] = 'unset' - else: - logger.warning("Warning: missing non-idmap {} for {}, {} and {} {} ".format(self.text(node), grid1_name, grid1_value, grid2_name, grid2_value)) - - return gridmaps - - def print_values(self, long_output=None): - # write out help message - helptext = self.get_element_text("help") - logger.info("{} ".format(helptext)) - - logger.info("{:5s}-------------------------------------------------------------".format("")) - logger.info("{:10s} default component grids:\n".format("")) - logger.info(" component compset value " ) - logger.info("{:5s}-------------------------------------------------------------".format("")) - default_nodes = self.get_children("model_grid_defaults", root=self.get_child("grids")) - for default_node in default_nodes: - grid_nodes = self.get_children("grid", root=default_node) - for grid_node in grid_nodes: - name = self.get(grid_node, "name") - compset = self.get(grid_node, "compset") - value = self.text(grid_node) - logger.info(" {:6s} {:15s} {:10s}".format(name, compset, value)) - logger.info("{:5s}-------------------------------------------------------------".format("")) - - domains = {} - if long_output is not None: - domain_nodes = self.get_children("domain",root=self.get_child("domains")) - for domain_node in domain_nodes: - name = self.get(domain_node, 'name') - if name == 'null': - continue - desc = self.text(self.get_child("desc", root=domain_node)) - files = "" - file_nodes = self.get_children("file", root=domain_node) - for file_node in file_nodes: - filename = self.text(file_node) - mask_attrib = self.get(file_node, "mask") - grid_attrib = self.get(file_node, "grid") - files += "\n " + filename - if mask_attrib or grid_attrib: - files += " (only for" - if mask_attrib: - files += " mask: " + mask_attrib - if grid_attrib: - files += " grid match: " + grid_attrib - if mask_attrib or grid_attrib: - files += ")" - domains[name] = "\n {} with domain file(s): {} ".format(desc, files) - - model_grid_nodes = self.get_children("model_grid", root=self.get_child("grids")) - for model_grid_node in model_grid_nodes: - alias = self.get(model_grid_node, "alias") - compset = self.get(model_grid_node, "compset") - not_compset = self.get(model_grid_node, "not_compset") - restriction = "" - if compset: - restriction += "only for compsets that are {} ".format(compset) - if not_compset: - restriction += "only for compsets that are not {} ".format(not_compset) - if restriction: - logger.info("\n alias: {} ({})".format(alias,restriction)) - else: - logger.info("\n alias: {}".format(alias)) - grid_nodes = self.get_children("grid", root=model_grid_node) - grids = "" - gridnames = [] - for grid_node in grid_nodes: - gridnames.append(self.text(grid_node)) - grids += self.get(grid_node, "name") + ":" + self.text(grid_node) + " " - logger.info(" non-default grids are: {}".format(grids)) - mask_nodes = self.get_children("mask", root=model_grid_node) - for mask_node in mask_nodes: - logger.info(" mask is: {}".format(self.text(mask_node))) - if long_output is not None: - gridnames = set(gridnames) - for gridname in gridnames: - if gridname != "null": - logger.info (" {}".format(domains[gridname])) diff --git a/scripts/lib/CIME/XML/headers.py b/scripts/lib/CIME/XML/headers.py deleted file mode 100644 index d2e98ca9c68..00000000000 --- a/scripts/lib/CIME/XML/headers.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Interface to the config_headers.xml file. This class inherits from EntryID.py -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.generic_xml import GenericXML -from CIME.XML.files import Files - -logger = logging.getLogger(__name__) - -class Headers(GenericXML): - def __init__(self,infile=None): - """ - initialize an object - - >>> files = Files() - >>> files.get_value('CASEFILE_HEADERS',resolved=False) - '$CIMEROOT/config/config_headers.xml' - """ - if infile is None: - files = Files() - infile = files.get_value('CASEFILE_HEADERS', resolved=True) - super(Headers, self).__init__(infile) - - def get_header_node(self, fname): - fnode = self.get_child("file", attributes={"name" : fname}) - headernode = self.get_child("header", root=fnode) - return headernode diff --git a/scripts/lib/CIME/XML/inputdata.py b/scripts/lib/CIME/XML/inputdata.py deleted file mode 100644 index 47f140dc512..00000000000 --- a/scripts/lib/CIME/XML/inputdata.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Interface to the config_inputdata.xml file. This class inherits from GenericXML.py -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.generic_xml import GenericXML -from CIME.XML.files import Files -from CIME.utils import expect - -logger = logging.getLogger(__name__) - -class Inputdata(GenericXML): - - def __init__(self, infile=None, files=None): - """ - initialize a files object given input pes specification file - """ - if files is None: - files = Files() - if infile is None: - infile = files.get_value("INPUTDATA_SPEC_FILE") - schema = files.get_schema("INPUTDATA_SPEC_FILE") - logger.debug("DEBUG: infile is {}".format(infile)) - GenericXML.__init__(self, infile, schema=schema) - - self._servernode = None - - def get_next_server(self): - protocol = None - address = None - user = '' - passwd = '' - chksum_file = None - servernodes = self.get_children("server") - if self._servernode is None: - self._servernode = servernodes[0] - else: - prevserver = self._servernode - for i, node in enumerate(servernodes): - if self._servernode == node and len(servernodes)>i+1: - self._servernode = servernodes[i+1] - break - if prevserver is not None and self._servernode == prevserver: - self._servernode = None - - if self._servernode is not None: - protocol = self.text(self.get_child("protocol", root = self._servernode)) - address = self.text(self.get_child("address", root = self._servernode)) - unode = self.get_optional_child("user", root = self._servernode) - if unode: - user = self.text(unode) - pnode = self.get_optional_child("password", root = self._servernode) - if pnode: - passwd = self.text(pnode) - csnode = self.get_optional_child("checksum", root = self._servernode) - if csnode: - chksum_file = self.text(csnode) - return protocol, address, user, passwd, chksum_file diff --git a/scripts/lib/CIME/XML/machines.py b/scripts/lib/CIME/XML/machines.py deleted file mode 100644 index 25a91ce2059..00000000000 --- a/scripts/lib/CIME/XML/machines.py +++ /dev/null @@ -1,350 +0,0 @@ -""" -Interface to the config_machines.xml file. This class inherits from GenericXML.py -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.generic_xml import GenericXML -from CIME.XML.files import Files -from CIME.utils import convert_to_unknown_type, get_cime_config - -import socket - -logger = logging.getLogger(__name__) - -class Machines(GenericXML): - - def __init__(self, infile=None, files=None, machine=None): - """ - initialize an object - if a filename is provided it will be used, - otherwise if a files object is provided it will be used - otherwise create a files object from default values - """ - - self.machine_node = None - self.machine = None - self.machines_dir = None - schema = None - if files is None: - files = Files() - if infile is None: - infile = files.get_value("MACHINES_SPEC_FILE") - schema = files.get_schema("MACHINES_SPEC_FILE") - logger.debug("Verifying using schema {}".format(schema)) - - self.machines_dir = os.path.dirname(infile) - - GenericXML.__init__(self, infile, schema) - - # Append the contents of $HOME/.cime/config_machines.xml if it exists - # This could cause problems if node matchs are repeated when only one is expected - local_infile = os.path.join(os.environ.get("HOME"),".cime","config_machines.xml") - logger.debug("Infile: {}".format(local_infile)) - if os.path.exists(local_infile): - GenericXML.read(self, local_infile, schema) - - if machine is None: - if "CIME_MACHINE" in os.environ: - machine = os.environ["CIME_MACHINE"] - else: - cime_config = get_cime_config() - if cime_config.has_option("main", "machine"): - machine = cime_config.get("main", "machine") - if machine is None: - machine = self.probe_machine_name() - - expect(machine is not None, "Could not initialize machine object from {} or {}".format(infile, local_infile)) - self.set_machine(machine) - - def get_child(self, name=None, attributes=None, root=None, err_msg=None): - if root is None: - root = self.machine_node - return super(Machines, self).get_child(name, attributes, root, err_msg) - - def get_machines_dir(self): - """ - Return the directory of the machines file - """ - return self.machines_dir - - def get_machine_name(self): - """ - Return the name of the machine - """ - return self.machine - - def get_node_names(self): - """ - Return the names of all the child nodes for the target machine - """ - nodes = self.get_children(root=self.machine_node) - node_names = [] - for node in nodes: - node_names.append(self.name(node)) - return node_names - - def get_first_child_nodes(self, nodename): - """ - Return the names of all the child nodes for the target machine - """ - nodes = self.get_children(nodename, root=self.machine_node) - return nodes - - def list_available_machines(self): - """ - Return a list of machines defined for a given CIME_MODEL - """ - machines = [] - nodes = self.get_children("machine") - for node in nodes: - mach = self.get(node, "MACH") - machines.append(mach) - return machines - - def probe_machine_name(self, warn=True): - """ - Find a matching regular expression for hostname - in the NODENAME_REGEX field in the file. First match wins. - """ - - names_not_found = [] - - nametomatch = socket.getfqdn() - machine = self._probe_machine_name_one_guess(nametomatch) - - if machine is None: - names_not_found.append(nametomatch) - - nametomatch = socket.gethostname() - machine = self._probe_machine_name_one_guess(nametomatch) - - if machine is None: - names_not_found.append(nametomatch) - - names_not_found_quoted = ["'" + name + "'" for name in names_not_found] - names_not_found_str = ' or '.join(names_not_found_quoted) - if warn: - logger.warning("Could not find machine match for {}".format(names_not_found_str)) - - return machine - - def _probe_machine_name_one_guess(self, nametomatch): - """ - Find a matching regular expression for nametomatch in the NODENAME_REGEX - field in the file. First match wins. Returns None if no match is found. - """ - - machine = None - nodes = self.get_children("machine") - - for node in nodes: - machtocheck = self.get(node, "MACH") - logger.debug("machine is " + machtocheck) - regex_str_node = self.get_optional_child("NODENAME_REGEX", root=node) - regex_str = machtocheck if regex_str_node is None else self.text(regex_str_node) - - if regex_str is not None: - logger.debug("machine regex string is " + regex_str) - regex = re.compile(regex_str) - if regex.match(nametomatch): - logger.debug("Found machine: {} matches {}".format(machtocheck, nametomatch)) - machine = machtocheck - break - - return machine - - def set_machine(self, machine): - """ - Sets the machine block in the Machines object - - >>> machobj = Machines(machine="melvin") - >>> machobj.get_machine_name() - 'melvin' - >>> machobj.set_machine("trump") # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: No machine trump found - """ - if machine == "Query": - self.machine = machine - elif self.machine != machine or self.machine_node is None: - self.machine_node = super(Machines,self).get_child("machine", {"MACH" : machine}, err_msg="No machine {} found".format(machine)) - self.machine = machine - - return machine - #pylint: disable=arguments-differ - def get_value(self, name, attributes=None, resolved=True, subgroup=None): - """ - Get Value of fields in the config_machines.xml file - """ - expect(self.machine_node is not None, "Machine object has no machine defined") - expect(subgroup is None, "This class does not support subgroups") - value = None - - # COMPILER and MPILIB are special, if called without arguments they get the default value from the - # COMPILERS and MPILIBS lists in the file. - if name == "COMPILER": - value = self.get_default_compiler() - elif name == "MPILIB": - value = self.get_default_MPIlib(attributes) - else: - node = self.get_optional_child(name, root=self.machine_node, attributes=attributes) - if node is not None: - value = self.text(node) - if resolved: - if value is not None: - value = self.get_resolved_value(value) - elif name in os.environ: - value = os.environ[name] - - value = convert_to_unknown_type(value) - - return value - - def get_field_from_list(self, listname, reqval=None, attributes=None): - """ - Some of the fields have lists of valid values in the xml, parse these - lists and return the first value if reqval is not provided and reqval - if it is a valid setting for the machine - """ - expect(self.machine_node is not None, "Machine object has no machine defined") - supported_values = self.get_value(listname, attributes=attributes) - # if no match with attributes, try without - if supported_values is None: - supported_values = self.get_value(listname, attributes=None) - - expect(supported_values is not None, - "No list found for " + listname + " on machine " + self.machine) - supported_values = supported_values.split(",") #pylint: disable=no-member - - if reqval is None or reqval == "UNSET": - return supported_values[0] - - for val in supported_values: - if val == reqval: - return reqval - return None - - def get_default_compiler(self): - """ - Get the compiler to use from the list of COMPILERS - """ - cime_config = get_cime_config() - if cime_config.has_option('main','COMPILER'): - value = cime_config.get('main', 'COMPILER') - expect(self.is_valid_compiler(value), "User-selected compiler {} is not supported on machine {}".format(value, self.machine)) - else: - value = self.get_field_from_list("COMPILERS") - return value - - def get_default_MPIlib(self, attributes=None): - """ - Get the MPILIB to use from the list of MPILIBS - """ - return self.get_field_from_list("MPILIBS", attributes=attributes) - - def is_valid_compiler(self,compiler): - """ - Check the compiler is valid for the current machine - - >>> machobj = Machines(machine="edison") - >>> machobj.get_default_compiler() - 'intel' - >>> machobj.is_valid_compiler("gnu") - True - >>> machobj.is_valid_compiler("nag") - False - """ - return self.get_field_from_list("COMPILERS", reqval=compiler) is not None - - def is_valid_MPIlib(self, mpilib, attributes=None): - """ - Check the MPILIB is valid for the current machine - - >>> machobj = Machines(machine="edison") - >>> machobj.is_valid_MPIlib("mpi-serial") - True - >>> machobj.is_valid_MPIlib("fake-mpi") - False - """ - return mpilib == "mpi-serial" or \ - self.get_field_from_list("MPILIBS", reqval=mpilib, attributes=attributes) is not None - - def has_batch_system(self): - """ - Return if this machine has a batch system - - >>> machobj = Machines(machine="edison") - >>> machobj.has_batch_system() - True - >>> machobj.set_machine("melvin") - 'melvin' - >>> machobj.has_batch_system() - False - """ - result = False - batch_system = self.get_optional_child("BATCH_SYSTEM", root=self.machine_node) - if batch_system is not None: - result = (self.text(batch_system) is not None and self.text(batch_system) != "none") - logger.debug("Machine {} has batch: {}".format(self.machine, result)) - return result - - def get_suffix(self, suffix_type): - node = self.get_optional_child("default_run_suffix") - if node is not None: - suffix_node = self.get_optional_child(suffix_type, root=node) - if suffix_node is not None: - return self.text(suffix_node) - - return None - - def set_value(self, vid, value, subgroup=None, ignore_type=True): - tmproot = self.root - self.root = self.machine_node - #pylint: disable=assignment-from-no-return - result = super(Machines, self).set_value(vid, value, subgroup=subgroup, - ignore_type=ignore_type) - self.root = tmproot - return result - - - def print_values(self): - # write out machines - machines = self.get_children("machine") - logger.info("Machines") - for machine in machines: - name = self.get(machine, "MACH") - desc = self.get_child("DESC", root=machine) - os_ = self.get_child("OS", root=machine) - compilers = self.get_child("COMPILERS", root=machine) - max_tasks_per_node = self.get_child("MAX_TASKS_PER_NODE", root=machine) - max_mpitasks_per_node = self.get_child("MAX_MPITASKS_PER_NODE", root=machine) - - print( " {} : {} ".format(name , self.text(desc))) - print( " os ", self.text(os_)) - print( " compilers ",self.text(compilers)) - if max_mpitasks_per_node is not None: - print(" pes/node ",self.text(max_mpitasks_per_node)) - if max_tasks_per_node is not None: - print(" max_tasks/node ",self.text(max_tasks_per_node)) - - def return_values(self): - """ return a dictionary of machine info - This routine is used by external tools in https://github.com/NCAR/CESM_xml2html - """ - machines = self.get_children("machine") - mach_dict = dict() - logger.debug("Machines return values") - for machine in machines: - name = self.get(machine, "MACH") - desc = self.get_child("DESC", root=machine) - mach_dict[(name,"description")] = self.text(desc) - os_ = self.get_child("OS", root=machine) - mach_dict[(name,"os")] = self.text(os_) - compilers = self.get_child("COMPILERS", root=machine) - mach_dict[(name,"compilers")] = self.text(compilers) - max_tasks_per_node = self.get_child("MAX_TASKS_PER_NODE", root=machine) - mach_dict[(name,"max_tasks_per_node")] = self.text(max_tasks_per_node) - max_mpitasks_per_node = self.get_child("MAX_MPITASKS_PER_NODE", root=machine) - mach_dict[(name,"max_mpitasks_per_node")] = self.text(max_mpitasks_per_node) - - return mach_dict diff --git a/scripts/lib/CIME/XML/namelist_definition.py b/scripts/lib/CIME/XML/namelist_definition.py deleted file mode 100644 index 849e65b529a..00000000000 --- a/scripts/lib/CIME/XML/namelist_definition.py +++ /dev/null @@ -1,448 +0,0 @@ -"""Interface to `namelist_definition.xml`. - -This module contains only one class, `NamelistDefinition`, inheriting from -`EntryID`. -""" - -# Warnings we typically ignore. -# pylint:disable=invalid-name - -# Disable warnings due to using `standard_module_setup` -# pylint:disable=wildcard-import,unused-wildcard-import - -import re -import collections - -from CIME.namelist import fortran_namelist_base_value, \ - is_valid_fortran_namelist_literal, character_literal_to_string, \ - expand_literal_list, Namelist, get_fortran_name_only - -from CIME.XML.standard_module_setup import * -from CIME.XML.entry_id import EntryID -from CIME.XML.files import Files - -logger = logging.getLogger(__name__) - -_array_size_re = re.compile(r'^(?P[^(]+)\((?P[^)]+)\)$') - -class CaseInsensitiveDict(dict): - - """Basic case insensitive dict with strings only keys. - From https://stackoverflow.com/a/27890005 """ - - proxy = {} - - def __init__(self, data): - dict.__init__(self) - self.proxy = dict((k.lower(), k) for k in data) - for k in data: - self[k] = data[k] - - def __contains__(self, k): - return k.lower() in self.proxy - - def __delitem__(self, k): - key = self.proxy[k.lower()] - super(CaseInsensitiveDict, self).__delitem__(key) - del self.proxy[k.lower()] - - def __getitem__(self, k): - key = self.proxy[k.lower()] - return super(CaseInsensitiveDict, self).__getitem__(key) - - def get(self, k, default=None): - return self[k] if k in self else default - - def __setitem__(self, k, v): - super(CaseInsensitiveDict, self).__setitem__(k, v) - self.proxy[k.lower()] = k - -class NamelistDefinition(EntryID): - - """Class representing variable definitions for a namelist. - This class inherits from `EntryID`, and supports most inherited methods; - however, `set_value` is unsupported. - - Additional public methods: - - dict_to_namelist. - - is_valid_value - - validate - """ - - def __init__(self, infile, files=None): - """Construct a `NamelistDefinition` from an XML file.""" - - # if the file is invalid we may not be able to check the version - # but we need to do it this way until we remove the version 1 files - schema = None - if files is None: - files = Files() - schema = files.get_schema("NAMELIST_DEFINITION_FILE") - expect(os.path.isfile(infile), "File {} does not exist".format(infile)) - super(NamelistDefinition, self).__init__(infile, schema=schema) - - self._attributes = {} - self._entry_nodes = [] - self._entry_ids = [] - self._valid_values = {} - self._entry_types = {} - self._group_names = CaseInsensitiveDict({}) - self._nodes = {} - - def set_nodes(self, skip_groups=None): - """ - populates the object data types for all nodes that are not part of the skip_groups array - returns nodes that do not have attributes of `skip_default_entry` or `per_stream_entry` - """ - default_nodes = [] - for node in self.get_children("entry"): - name = self.get(node, "id") - skip_default_entry = self.get(node, "skip_default_entry") == "true" - per_stream_entry = self.get(node, "per_stream_entry") == "true" - set_node_values = False - if skip_groups: - group_name = self._get_group_name(node) - if not group_name in skip_groups: - self._entry_nodes.append(node) - set_node_values = True - if not skip_default_entry and not per_stream_entry: - default_nodes.append(node) - else: - self._entry_nodes.append(node) - set_node_values = True - if not skip_default_entry and not per_stream_entry: - default_nodes.append(node) - if set_node_values: - self._entry_nodes.append(node) - self._entry_ids.append(name) - self._nodes[name] = node - self._entry_types[name] = self._get_type(node) - self._valid_values[name] = self._get_valid_values(node) - self._group_names[name] = self._get_group_name(node) - return default_nodes - - def _get_group_name(self, node=None): - if self.get_version() == 1.0: - group = self.get(node, 'group') - elif self.get_version() >= 2.0: - group = self.get_element_text("group", root=node) - return(group) - - def _get_type(self, node): - if self.get_version() == 1.0: - type_info = self.get(node, 'type') - elif self.get_version() >= 2.0: - type_info = self._get_type_info(node) - return(type_info) - - def _get_valid_values(self, node): - # The "valid_values" attribute is not required, and an empty string has - # the same effect as not specifying it. - # Returns a list from a comma seperated string in xml - valid_values = '' - if self.get_version() == 1.0: - valid_values = self.get(node, 'valid_values') - elif self.get_version() >= 2.0: - valid_values = self._get_node_element_info(node, "valid_values") - if valid_values == '': - valid_values = None - if valid_values is not None: - valid_values = valid_values.split(',') - return valid_values - - def get_group(self, name): - return self._group_names[name] - - def add_attributes(self, attributes): - self._attributes = attributes - - def get_entry_nodes(self): - return self._entry_nodes - - def get_per_stream_entries(self): - entries = [] - nodes = self.get_children("entry") - for node in nodes: - per_stream_entry = self.get(node, "per_stream_entry") == "true" - if per_stream_entry: - entries.append(self.get(node, "id")) - return entries - - # Currently we don't use this object to construct new files, and it's no - # good for that purpose anyway, so stop this function from being called. - def set_value(self, vid, value, subgroup=None, ignore_type=True): - """This function is not implemented.""" - raise TypeError("NamelistDefinition does not support `set_value`.") - - def get_value_match(self, vid, attributes=None, exact_match=True, entry_node=None): - """Return the default value for the variable named `vid`. - - The return value is a list of strings corresponding to the - comma-separated list of entries for the value (length 1 for scalars). If - there is no default value in the file, this returns `None`. - """ - # Merge internal attributes with those passed in. - all_attributes = {} - if self._attributes is not None: - all_attributes.update(self._attributes) - if attributes is not None: - all_attributes.update(attributes) - - if entry_node is None: - entry_node = self._nodes[vid] - value = super(NamelistDefinition, self).get_value_match(vid.lower(),attributes=all_attributes, exact_match=exact_match, - entry_node=entry_node) - if value is None: - value = '' - else: - value = self._split_defaults_text(value) - - return value - - @staticmethod - def _split_defaults_text(string): - """Take a comma-separated list in a string, and split it into a list.""" - # Some trickiness here; we want to split items on commas, but not inside - # quote-delimited strings. Stripping whitespace is also useful. - value = [] - if len(string): - pos = 0 - delim = None - for i, char in enumerate(string): - if delim is None: - # If not inside a string... - if char in ('"', "'"): - # if we have a quote character, start a string. - delim = char - elif char == ',': - # if we have a comma, this is a new value. - value.append(string[pos:i].strip()) - pos = i+1 - else: - # If inside a string, the only thing that can happen is the end - # of the string. - if char == delim: - delim = None - value.append(string[pos:].strip()) - return value - - def split_type_string(self, name): - """Split a 'type' attribute string into its component parts. - - The `name` argument is the variable name. - This is used for error reporting purposes. - - The return value is a tuple consisting of the type itself, a length - (which is an integer for character variables, otherwise `None`), and the - size of the array (which is 1 for scalar variables). - """ - type_string = self._entry_types[name] - - # 'char' is frequently used as an abbreviation of 'character'. - type_string = type_string.replace('char', 'character') - - # Separate into a size and the rest of the type. - size_match = _array_size_re.search(type_string) - if size_match: - type_string = size_match.group('type') - size_string = size_match.group('size') - try: - size = int(size_string) - except ValueError: - expect(False, - "In namelist definition, variable {} had the non-integer string {!r} specified as an array size.".format(name, size_string)) - else: - size = 1 - - # Separate into a type and an optional length. - type_, star, length = type_string.partition('*') - if star == '*': - # Length allowed only for character variables. - expect(type_ == 'character', - "In namelist definition, length specified for non-character " - "variable {}.".format(name)) - # Check that the length is actually an integer, to make the error - # message a bit cleaner if the xml input is bad. - try: - max_len = int(length) - except ValueError: - expect(False, - "In namelist definition, character variable {} had the non-integer string {!r} specified as a length.".format(name, length)) - else: - max_len = None - return type_, max_len, size - - @staticmethod - def _canonicalize_value(type_, value): - """Create 'canonical' version of a value for comparison purposes.""" - canonical_value = [fortran_namelist_base_value(scalar) - for scalar in value] - canonical_value = [scalar for scalar in canonical_value if scalar != ''] - if type_ == 'character': - canonical_value = [character_literal_to_string(scalar) - for scalar in canonical_value] - elif type_ == 'integer': - canonical_value = [int(scalar) for scalar in canonical_value] - return canonical_value - - def is_valid_value(self, name, value): - """Determine whether a value is valid for the named variable. - - The `value` argument must be a list of strings formatted as they would - appear in the namelist (even for scalar variables, in which case the - length of the list is always 1). - """ - # Separate into a type, optional length, and optional size. - type_, max_len, size = self.split_type_string(name) - invalid = [] - - # Check value against type. - for scalar in value: - if not is_valid_fortran_namelist_literal(type_, scalar): - invalid.append(scalar) - if len(invalid) > 0: - logger.warning("Invalid values {}".format(invalid)) - return False - - # Now that we know that the strings as input are valid Fortran, do some - # canonicalization for further checks. - canonical_value = self._canonicalize_value(type_, value) - - # Check maximum length (if applicable). - if max_len is not None: - for scalar in canonical_value: - if len(scalar) > max_len: - return False - - # Check valid value constraints (if applicable). - valid_values = self._valid_values[name] - if valid_values is not None: - expect(type_ in ('integer', 'character'), - "Found valid_values attribute for variable {} with type {}, but valid_values only allowed for character and integer variables.".format(name, type_)) - if type_ == 'integer': - compare_list = [int(vv) for vv in valid_values] - else: - compare_list = valid_values - for scalar in canonical_value: - if scalar not in compare_list: - invalid.append(scalar) - if len(invalid) > 0: - logger.warning("Invalid values {}".format(invalid)) - return False - - # Check size of input array. - if len(expand_literal_list(value)) > size: - expect(False, "Value index exceeds variable size for variable {}, allowed array length is {} value array size is {}".format(name, size, len(expand_literal_list(value)))) - return True - - def _expect_variable_in_definition(self, name, variable_template): - """Used to get a better error message for an unexpected variable. - case insensitve match""" - - expect(name in self._entry_ids, - (variable_template + " is not in the namelist definition.").format(str(name))) - - def _user_modifiable_in_variable_definition(self, name): - # Is name user modifiable? - node = self.get_optional_child("entry", attributes={'id': name}) - user_modifiable_only_by_xml = self.get(node, 'modify_via_xml') - if user_modifiable_only_by_xml is not None: - expect(False, - "Cannot change {} in user_nl file: set via xml variable {}".format(name, user_modifiable_only_by_xml)) - user_cannot_modify = self.get(node, 'cannot_modify_by_user_nl') - if user_cannot_modify is not None: - expect(False, - "Cannot change {} in user_nl file: {}".format(name, user_cannot_modify)) - def _generate_variable_template(self, filename): - # Improve error reporting when a file name is provided. - if filename is None: - variable_template = "Variable {!r}" - else: - # for the next step we want the name of the original user_nl file not the internal one - # We do this by extracting the component name from the filepath string - if "Buildconf" in filename and "namelist_infile" in filename: - msgfn = "user_nl_" + (filename.split(os.sep)[-2])[:-4] - else: - msgfn = filename - variable_template = "Variable {!r} from file " + repr(str(msgfn)) - return variable_template - - def validate(self, namelist,filename=None): - """Validate a namelist object against this definition. - - The optional `filename` argument can be used to assist in error - reporting when the namelist comes from a specific, known file. - """ - variable_template = self._generate_variable_template(filename) - - # Iterate through variables. - for group_name in namelist.get_group_names(): - for variable_name in namelist.get_variable_names(group_name): - # Check that the variable is defined... - qualified_variable_name = get_fortran_name_only(variable_name) - self._expect_variable_in_definition(qualified_variable_name, variable_template) - - # Check if can actually change this variable via filename change - if filename is not None: - self._user_modifiable_in_variable_definition(qualified_variable_name) - - # and has the right group name... - var_group = self.get_group(qualified_variable_name) - expect(var_group == group_name, - (variable_template + " is in a group named {!r}, but should be in {!r}.").format(str(variable_name), str(group_name), str(var_group))) - - # and has a valid value. - value = namelist.get_variable_value(group_name, variable_name) - expect(self.is_valid_value(qualified_variable_name, value), - (variable_template + " has invalid value {!r}.").format(str(variable_name), [str(scalar) for scalar in value])) - - def dict_to_namelist(self, dict_, filename=None): - """Converts a dictionary of name-value pairs to a `Namelist`. - - The input is assumed to be similar to the output of `parse` when - `groupless=True` is set. This function uses the namelist definition file - to look up the namelist group associated with each variable, and uses - this information to create a true `Namelist` object. - - The optional `filename` argument can be used to assist in error - reporting when the namelist comes from a specific, known file. - """ - # Improve error reporting when a file name is provided. - variable_template = self._generate_variable_template(filename) - groups = {} - for variable_name in dict_: - variable_lc = variable_name.lower() - qualified_varname = get_fortran_name_only(variable_lc) - self._expect_variable_in_definition(qualified_varname, variable_template) - group_name = self.get_group(qualified_varname) - expect (group_name is not None, "No group found for var {}".format(variable_lc)) - if group_name not in groups: - groups[group_name] = collections.OrderedDict() - groups[group_name][variable_lc] = dict_[variable_name] - return Namelist(groups) - - def get_input_pathname(self, name): - node = self._nodes[name] - if self.get_version() == 1.0: - input_pathname = self.get(node, 'input_pathname') - elif self.get_version() >= 2.0: - input_pathname = self._get_node_element_info(node, "input_pathname") - return(input_pathname) - - # pylint: disable=arguments-differ - def get_default_value(self, item, attribute=None): - """Return the default value for the variable named `item`. - - The return value is a list of strings corresponding to the - comma-separated list of entries for the value (length 1 for scalars). If - there is no default value in the file, this returns `None`. - """ - # Merge internal attributes with those passed in. - all_attributes = {} - if self._attributes is not None: - all_attributes.update(self._attributes) - if attribute is not None: - all_attributes.update(attribute) - - value = self.get_value_match(item.lower(), all_attributes, True) - return self._split_defaults_text(value) diff --git a/scripts/lib/CIME/XML/pes.py b/scripts/lib/CIME/XML/pes.py deleted file mode 100644 index 238e59efd3c..00000000000 --- a/scripts/lib/CIME/XML/pes.py +++ /dev/null @@ -1,172 +0,0 @@ -""" -Interface to the config_pes.xml file. This class inherits from GenericXML.py -""" -from CIME.XML.standard_module_setup import * -from CIME.XML.generic_xml import GenericXML -from CIME.XML.files import Files -from CIME.utils import expect - -logger = logging.getLogger(__name__) - -class Pes(GenericXML): - - def __init__(self, infile, files=None): - """ - initialize a files object given input pes specification file - """ - if files is None: - files = Files() - schema = files.get_schema("PES_SPEC_FILE") - logger.debug("DEBUG: infile is {}".format(infile)) - GenericXML.__init__(self, infile, schema=schema) - - def find_pes_layout(self, grid, compset, machine, pesize_opts='M', mpilib=None): - opes_ntasks = {} - opes_nthrds = {} - opes_rootpe = {} - opes_pstrid = {} - oother_settings = {} - other_settings = {} - o_grid_nodes = [] - comments = None - # Get any override nodes - overrides = self.get_optional_child("overrides") - ocomments = None - if overrides is not None: - o_grid_nodes = self.get_children("grid", root = overrides) - opes_ntasks, opes_nthrds, opes_rootpe, opes_pstrid, oother_settings, ocomments = self._find_matches(o_grid_nodes, grid, compset, machine, pesize_opts, True) - - # Get all the nodes - grid_nodes = self.get_children("grid") - if o_grid_nodes: - gn_set = set(grid_nodes) - ogn_set = set(o_grid_nodes) - gn_set.difference_update(ogn_set) - grid_nodes = list(gn_set) - - - pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings, comments = self._find_matches(grid_nodes, grid, compset, machine, pesize_opts, False) - pes_ntasks.update(opes_ntasks) - pes_nthrds.update(opes_nthrds) - pes_rootpe.update(opes_rootpe) - pes_pstrid.update(opes_pstrid) - other_settings.update(oother_settings) - if ocomments is not None: - comments = ocomments - - - if mpilib == "mpi-serial": - for i in iter(pes_ntasks): - pes_ntasks[i] = 1 - for i in iter(pes_rootpe): - pes_rootpe[i] = 0 - for i in iter(pes_pstrid): - pes_pstrid[i] = 0 - - logger.info("Pes setting: grid is {} ".format(grid)) - logger.info("Pes setting: compset is {} ".format(compset)) - logger.info("Pes setting: tasks is {} ".format(pes_ntasks)) - logger.info("Pes setting: threads is {} ".format(pes_nthrds)) - logger.info("Pes setting: rootpe is {} ".format(pes_rootpe)) - logger.info("Pes setting: pstrid is {} ".format(pes_pstrid)) - logger.info("Pes other settings: {}".format(other_settings)) - if comments is not None: - logger.info("Pes comments: {}".format(comments)) - - return pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings, comments - - def _find_matches(self, grid_nodes, grid, compset, machine, pesize_opts, override=False): - grid_choice = None - mach_choice = None - compset_choice = None - pesize_choice = None - max_points = -1 - pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings = {}, {}, {}, {}, {} - pe_select = None - comment = None - for grid_node in grid_nodes: - grid_match = self.get(grid_node, "name") - if grid_match == "any" or re.search(grid_match,grid): - mach_nodes = self.get_children("mach",root=grid_node) - for mach_node in mach_nodes: - mach_match = self.get(mach_node, "name") - if mach_match == "any" or re.search(mach_match, machine): - pes_nodes = self.get_children("pes", root=mach_node) - for pes_node in pes_nodes: - pesize_match = self.get(pes_node, "pesize") - compset_match = self.get(pes_node, "compset") - if (pesize_match == "any" or (pesize_opts is not None and \ - pesize_match == pesize_opts)) and \ - (compset_match == "any" or \ - re.search(compset_match,compset)): - - points = int(grid_match!="any")*3+int(mach_match!="any")*7+\ - int(compset_match!="any")*2+int(pesize_match!="any") - if override and points > 0: - for node in self.get_children(root=pes_node): - vid = self.name(node) - logger.info("vid is {}".format(vid)) - if "comment" in vid: - comment = self.text(node) - elif "ntasks" in vid: - for child in self.get_children(root=node): - pes_ntasks[self.name(child).upper()] = int(self.text(child)) - elif "nthrds" in vid: - for child in self.get_children(root=node): - pes_nthrds[self.name(child).upper()] = int(self.text(child)) - elif "rootpe" in vid: - for child in self.get_children(root=node): - pes_rootpe[self.name(child).upper()] = int(self.text(child)) - elif "pstrid" in vid: - for child in self.get_children(root=node): - pes_pstrid[self.name(child).upper()] = int(self.text(child)) - # if the value is already upper case its something else we are trying to set - elif vid == self.name(node): - other_settings[vid] = self.text(node) - - else: - if points > max_points: - pe_select = pes_node - max_points = points - mach_choice = mach_match - grid_choice = grid_match - compset_choice = compset_match - pesize_choice = pesize_match - elif points == max_points: - logger.warning("mach_choice {} mach_match {}".format(mach_choice, mach_match)) - logger.warning("grid_choice {} grid_match {}".format(grid_choice, grid_match)) - logger.warning("compset_choice {} compset_match {}".format(compset_choice, compset_match)) - logger.warning("pesize_choice {} pesize_match {}".format(pesize_choice, pesize_match)) - logger.warning("points = {:d}".format(points)) - expect(False, "More than one PE layout matches given PE specs") - if not override: - for node in self.get_children(root=pe_select): - vid = self.name(node) - logger.debug("vid is {}".format(vid)) - if "comment" in vid: - comment = self.text(node) - elif "ntasks" in vid: - for child in self.get_children(root=node): - pes_ntasks[self.name(child).upper()] = int(self.text(child)) - elif "nthrds" in vid: - for child in self.get_children(root=node): - pes_nthrds[self.name(child).upper()] = int(self.text(child)) - elif "rootpe" in vid: - for child in self.get_children(root=node): - pes_rootpe[self.name(child).upper()] = int(self.text(child)) - elif "pstrid" in vid: - for child in self.get_children(root=node): - pes_pstrid[self.name(child).upper()] = int(self.text(child)) - # if the value is already upper case its something else we are trying to set - elif vid == self.name(node): - other_settings[vid] = self.text(node) - if grid_choice != 'any' or logger.isEnabledFor(logging.DEBUG): - logger.info("Pes setting: grid match is {} ".format(grid_choice )) - if mach_choice != 'any' or logger.isEnabledFor(logging.DEBUG): - logger.info("Pes setting: machine match is {} ".format(mach_choice)) - if compset_choice != 'any' or logger.isEnabledFor(logging.DEBUG): - logger.info("Pes setting: compset_match is {} ".format(compset_choice)) - if pesize_choice != 'any' or logger.isEnabledFor(logging.DEBUG): - logger.info("Pes setting: pesize match is {} ".format(pesize_choice)) - - return pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings, comment diff --git a/scripts/lib/CIME/XML/pio.py b/scripts/lib/CIME/XML/pio.py deleted file mode 100644 index ebe8e30ab12..00000000000 --- a/scripts/lib/CIME/XML/pio.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Class for config_pio files . This class inherits from EntryID.py -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.entry_id import EntryID -from CIME.XML.files import Files -logger = logging.getLogger(__name__) - -class PIO(EntryID): - - def __init__(self, infile=None, files=None): - if infile is None: - if files is None: - files = Files() - infile = files.get_value("PIO_SPEC_FILE") - - EntryID.__init__(self, infile) - - def get_defaults(self, grid=None, compset=None, mach=None, compiler=None, mpilib=None): # pylint: disable=unused-argument - # should we have a env_pio file - defaults = {} - - # Load args into attribute dict - attributes = {} - for attrib in ["grid", "compset", "mach", "compiler", "mpilib"]: - if locals()[attrib] is not None: - attributes[attrib] = locals()[attrib] - - # Find defauts - for node in self.get_children("entry"): - value = self.get_default_value(node, attributes) - if value: - defaults[self.get(node, "id")] = value - - return defaults diff --git a/scripts/lib/CIME/XML/test_reporter.py b/scripts/lib/CIME/XML/test_reporter.py deleted file mode 100644 index d296807992b..00000000000 --- a/scripts/lib/CIME/XML/test_reporter.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -Interface to the testreporter xml. This class inherits from GenericXML.py - -""" -#pylint: disable=import-error -from six.moves import urllib -import six -from CIME.XML.standard_module_setup import * -from CIME.XML.generic_xml import GenericXML -from CIME.utils import expect,get_model - -class TestReporter(GenericXML): - - def __init__(self): - """ - initialize an object - """ - - expect(get_model() == 'cesm', "testreport is only meant to populate the CESM test database." ) - self.root = None - - GenericXML.__init__(self, root_name_override="testrecord", read_only=False, infile="TestRecord.xml") - - def setup_header(self, tagname,machine,compiler,mpilib,testroot,testtype,baseline): - # - # Create the XML header that the testdb is expecting to recieve - # - for name, text, attribs in [ ("tag_name" , tagname , None), - ("mach" , machine , None), - ("compiler" , compiler, {"version":""}), - ("mpilib" , mpilib , {"version":""}), - ("testroot" , testroot, None), - ("testtype" , testtype, None), - ("baselinetag", baseline, None) ]: - self.make_child(name, attributes=attribs, text=text) - - def add_result(self,test_name,test_status): - # - # Add a test result to the XML structure. - # - tlelem = self.make_child("tests", {"testname":test_name}) - - for attrib_name, text in [ ("casestatus", None), - ("comment", test_status["COMMENT"]), - ("compare", test_status["BASELINE"]), - ("memcomp", test_status["MEMCOMP"]), - ("memleak", test_status["MEMLEAK"]), - ("nlcomp", test_status["NLCOMP"]), - ("status", test_status["STATUS"]), - ("tputcomp", test_status["TPUTCOMP"]) ]: - - self.make_child("category", attributes={"name": attrib_name}, text=text, root=tlelem) - - def push2testdb(self): - # - # Post test result XML to CESM test database - # - xmlstr = self.get_raw_record() - username=six.moves.input("Username:") - os.system("stty -echo") - password=six.moves.input("Password:") - os.system("stty echo") - params={'username':username,'password':password,'testXML':xmlstr} - url="https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" - params = urllib.parse.urlencode(params) - f = urllib.request.urlopen(url, params) - # - # Print any messages from the post command - # - print(f.read()) - print(f.code) diff --git a/scripts/lib/CIME/XML/testlist.py b/scripts/lib/CIME/XML/testlist.py deleted file mode 100644 index 9c14ce20e39..00000000000 --- a/scripts/lib/CIME/XML/testlist.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -Interface to the config_files.xml file. This class inherits from generic_xml.py -It supports version 2.0 of the testlist.xml file - -In version 2 of the file options can be specified to further refine a test or -set of tests. They can be specified either at the top level, in which case they -apply to all machines/compilers for this test: - - - - - - ... - - -or at the level of a particular machine/compiler: - - - - - - - - - - - -Currently supported options are: - -- walltime: sets the wallclock limit in the queuing system - -- memleak_tolerance: specifies the relative memory growth expected for this test - -- comment: has no effect, but is written out when printing the test list - -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.generic_xml import GenericXML -from CIME.XML.files import Files - -logger = logging.getLogger(__name__) - -class Testlist(GenericXML): - - def __init__(self,infile, files=None): - """ - initialize an object - """ - schema = None - if files is None: - files = Files() - schema = files.get_schema("TESTS_SPEC_FILE") - GenericXML.__init__(self, infile, schema=schema) - expect(self.get_version() >= 2.0, - "{} is an unsupported version of the testfile format and will be ignored".format(infile)) - - def get_tests(self, machine=None, category=None, compiler=None, compset=None, grid=None, supported_only=False): - tests = [] - attributes = {} - if compset is not None: - attributes['compset'] = compset - if grid is not None: - attributes['grid'] = grid - - testnodes = self.get_children("test", attributes=attributes) - - machatts = {} - if machine is not None: - machatts["name"] = machine - if category is not None: - machatts["category"] = category - if compiler is not None: - machatts["compiler"] = compiler - - - for tnode in testnodes: - if supported_only and self.has(tnode, "supported") and self.get(tnode, "supported") == 'false': - continue - - machnode = self.get_optional_child("machines", root=tnode) - machnodes = None if machnode is None else self.get_children("machine",machatts,root=machnode) - if machnodes: - this_test_node = {} - for key, value in self.attrib(tnode).items(): - if key == "name": - this_test_node["testname"] = value - else: - this_test_node[key] = value - - - - # Get options that apply to all machines/compilers for this test - options = self.get_children("options", root=tnode) - if len(options) > 0: - optionnodes = self.get_children("option", root=options[0]) - else: - optionnodes = [] - for mach in machnodes: - # this_test_node can include multiple tests - this_test = dict(this_test_node) - for key, value in self.attrib(mach).items(): - if key == "name": - this_test["machine"] = value - else: - this_test[key] = value - this_test["options"] = {} - - for onode in optionnodes: - this_test['options'][self.get(onode, 'name')] = self.text(onode) - - # Now get options specific to this machine/compiler - options = self.get_optional_child("options", root=mach) - optionnodes = [] if options is None else self.get_children("option", root=options) - for onode in optionnodes: - this_test['options'][self.get(onode, 'name')] = self.text(onode) - - tests.append(this_test) - - return tests diff --git a/scripts/lib/CIME/XML/tests.py b/scripts/lib/CIME/XML/tests.py deleted file mode 100644 index 86b9c965e5d..00000000000 --- a/scripts/lib/CIME/XML/tests.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Interface to the config_tests.xml file. This class inherits from GenericEntry -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.generic_xml import GenericXML -from CIME.XML.files import Files - -logger = logging.getLogger(__name__) - -class Tests(GenericXML): - - def __init__(self, infile=None, files=None): - """ - initialize an object interface to file config_tests.xml - """ - if infile is None: - if files is None: - files = Files() - infile = files.get_value("CONFIG_TESTS_FILE") - GenericXML.__init__(self, infile) - # append any component specific config_tests.xml files - for comp in files.get_components("CONFIG_TESTS_FILE"): - if comp is None: - continue - infile = files.get_value("CONFIG_TESTS_FILE", attribute={"component":comp}) - if os.path.isfile(infile): - self.read(infile) - - def get_test_node(self, testname): - logger.debug("Get settings for {}".format(testname)) - node = self.get_child("test",{"NAME":testname}) - logger.debug("Found {}".format(self.text(node))) - return node - - def print_values(self, skip_infrastructure_tests=True): - """ - Print each test type and its description. - - If skip_infrastructure_tests is True, then this does not write - information for tests with the attribute - INFRASTRUCTURE_TEST="TRUE". - """ - all_tests = [] - root = self.get_optional_child("testlist") - if root is not None: - all_tests = self.get_children("test", root=root) - for one_test in all_tests: - if skip_infrastructure_tests: - infrastructure_test = self.get(one_test, "INFRASTRUCTURE_TEST") - if (infrastructure_test is not None and - infrastructure_test.upper() == "TRUE"): - continue - name = self.get(one_test, "NAME") - desc = self.get_element_text("DESC", root=one_test) - logger.info("{}: {}".format(name, desc)) diff --git a/scripts/lib/CIME/XML/testspec.py b/scripts/lib/CIME/XML/testspec.py deleted file mode 100644 index a3647d1dd42..00000000000 --- a/scripts/lib/CIME/XML/testspec.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -Interface to the testspec.xml file. This class inherits from generic_xml.py -""" -from CIME.XML.standard_module_setup import * - -from CIME.XML.generic_xml import GenericXML - -logger = logging.getLogger(__name__) - -class TestSpec(GenericXML): - - def __init__(self, infile): - """ - initialize an object - """ - GenericXML.__init__(self, infile) - self._testnodes = {} - self._testlist_node = None - if os.path.isfile(infile): - testnodes = self.get_children('test') - for node in testnodes: - self._testnodes[self.get(node, "name")] = node - - def set_header(self, testroot, machine, testid, baselinetag=None, baselineroot=None): - tlelem = self.make_child("testlist") - - for name, text in [ ("testroot", testroot), ("machine", machine), ("testid", testid), ("baselinetag", baselinetag), ("baselineroot", baselineroot) ]: - if text is not None: - self.make_child(name, root=tlelem, text=text) - - self._testlist_node = tlelem - - def add_test(self, compiler, mpilib, testname): - expect(testname not in self._testnodes, "Test {} already in testlist".format(testname)) - - telem = self.make_child("test", attributes={"name":testname}, root=self._testlist_node) - - for name, text in [ ("compiler", compiler), ("mpilib", mpilib) ]: - self.make_child(name, root=telem, text=text) - - self._testnodes[testname] = telem - - def update_test_status(self, testname, phase, status): - expect(testname in self._testnodes, "Test {} not defined in testlist".format(testname)) - root = self._testnodes[testname] - pnode = self.get_optional_child("section", {"name":phase}, root=root) - if pnode is not None: - self.set(pnode, "status", status) - else: - self.make_child("section", {"name":phase, "status":status}, root=root) diff --git a/scripts/lib/CIME/aprun.py b/scripts/lib/CIME/aprun.py deleted file mode 100755 index 30c053c07d0..00000000000 --- a/scripts/lib/CIME/aprun.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -Aprun is far too complex to handle purely through XML. We need python -code to compute and assemble aprun commands. -""" - -from CIME.XML.standard_module_setup import * - -import math - -logger = logging.getLogger(__name__) - -############################################################################### -def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, - max_tasks_per_node, max_mpitasks_per_node, - pio_numtasks, pio_async_interface, - compiler, machine, run_exe): -############################################################################### - """ - No one really understands this code, but we can at least test it. - - >>> ntasks = [512, 675, 168, 512, 128, 168, 168, 512, 1] - >>> nthreads = [2, 2, 2, 2, 4, 2, 2, 2, 1] - >>> rootpes = [0, 0, 512, 0, 680, 512, 512, 0, 0] - >>> pstrids = [1, 1, 1, 1, 1, 1, 1, 1, 1] - >>> max_tasks_per_node = 16 - >>> max_mpitasks_per_node = 16 - >>> pio_numtasks = -1 - >>> pio_async_interface = False - >>> compiler = "pgi" - >>> machine = "titan" - >>> run_exe = "e3sm.exe" - >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, max_mpitasks_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) - (' -S 4 -n 680 -N 8 -d 2 e3sm.exe : -S 2 -n 128 -N 4 -d 4 e3sm.exe ', 117, 808, 4, 4) - >>> compiler = "intel" - >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, max_mpitasks_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) - (' -S 4 -cc numa_node -n 680 -N 8 -d 2 e3sm.exe : -S 2 -cc numa_node -n 128 -N 4 -d 4 e3sm.exe ', 117, 808, 4, 4) - - >>> ntasks = [64, 64, 64, 64, 64, 64, 64, 64, 1] - >>> nthreads = [1, 1, 1, 1, 1, 1, 1, 1, 1] - >>> rootpes = [0, 0, 0, 0, 0, 0, 0, 0, 0] - >>> pstrids = [1, 1, 1, 1, 1, 1, 1, 1, 1] - >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, max_mpitasks_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) - (' -S 8 -cc numa_node -n 64 -N 16 -d 1 e3sm.exe ', 4, 64, 16, 1) - """ - max_tasks_per_node = 1 if max_tasks_per_node < 1 else max_tasks_per_node - - total_tasks = 0 - for ntask, rootpe, pstrid in zip(ntasks, rootpes, pstrids): - tt = rootpe + (ntask - 1) * pstrid + 1 - total_tasks = max(tt, total_tasks) - - # Check if we need to add pio's tasks to the total task count - if pio_async_interface: - total_tasks += pio_numtasks if pio_numtasks > 0 else max_mpitasks_per_node - - # Compute max threads for each mpi task - maxt = [0] * total_tasks - for ntask, nthrd, rootpe, pstrid in zip(ntasks, nthreads, rootpes, pstrids): - c2 = 0 - while c2 < ntask: - s = rootpe + c2 * pstrid - if nthrd > maxt[s]: - maxt[s] = nthrd - - c2 += 1 - - # make sure all maxt values at least 1 - for c1 in range(0, total_tasks): - if maxt[c1] < 1: - maxt[c1] = 1 - - # Compute task and thread settings for batch commands - tasks_per_node, min_tasks_per_node, task_count, thread_count, max_thread_count, total_node_count, total_task_count, aprun_args = \ - 0, max_mpitasks_per_node, 1, maxt[0], maxt[0], 0, 0, "" - c1list = list(range(1, total_tasks)) - c1list.append(None) - for c1 in c1list: - if c1 is None or maxt[c1] != thread_count: - tasks_per_node = min(max_mpitasks_per_node, int(max_tasks_per_node / thread_count)) - - tasks_per_node = min(task_count, tasks_per_node) - - # Compute for every subset - task_per_numa = int(math.ceil(tasks_per_node / 2.0)) - # Option for Titan - if machine == "titan" and tasks_per_node > 1: - aprun_args += " -S {:d}".format(task_per_numa) - if compiler == "intel": - aprun_args += " -cc numa_node" - - aprun_args += " -n {:d} -N {:d} -d {:d} {} {}".format(task_count, tasks_per_node, thread_count, run_exe, "" if c1 is None else ":") - - node_count = int(math.ceil(float(task_count) / tasks_per_node)) - total_node_count += node_count - total_task_count += task_count - - if tasks_per_node < min_tasks_per_node: - min_tasks_per_node = tasks_per_node - - if c1 is not None: - thread_count = maxt[c1] - max_thread_count = max(max_thread_count, maxt[c1]) - task_count = 1 - - else: - task_count += 1 - - return aprun_args, total_node_count, total_task_count, min_tasks_per_node, max_thread_count - -############################################################################### -def get_aprun_cmd_for_case(case, run_exe, overrides=None): -############################################################################### - """ - Given a case, construct and return the aprun command and optimized node count - """ - models = case.get_values("COMP_CLASSES") - ntasks, nthreads, rootpes, pstrids = [], [], [], [] - for model in models: - model = "CPL" if model == "DRV" else model - for the_list, item_name in zip([ntasks, nthreads, rootpes, pstrids], - ["NTASKS", "NTHRDS", "ROOTPE", "PSTRID"]): - the_list.append(case.get_value("_".join([item_name, model]))) - max_tasks_per_node = case.get_value("MAX_TASKS_PER_NODE") - if overrides: - if 'max_tasks_per_node' in overrides: - max_tasks_per_node = overrides['max_tasks_per_node'] - if 'total_tasks' in overrides: - ntasks = [overrides['total_tasks'] if x > 1 else x for x in ntasks] - if 'thread_count' in overrides: - nthreads = [overrides['thread_count'] if x > 1 else x for x in nthreads] - - - - return _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, - max_tasks_per_node, - case.get_value("MAX_MPITASKS_PER_NODE"), - case.get_value("PIO_NUMTASKS"), - case.get_value("PIO_ASYNC_INTERFACE"), - case.get_value("COMPILER"), - case.get_value("MACH"), - run_exe) diff --git a/scripts/lib/CIME/bless_test_results.py b/scripts/lib/CIME/bless_test_results.py deleted file mode 100644 index 3569e18a8de..00000000000 --- a/scripts/lib/CIME/bless_test_results.py +++ /dev/null @@ -1,183 +0,0 @@ -import CIME.compare_namelists, CIME.simple_compare -from CIME.test_scheduler import NAMELIST_PHASE -from CIME.utils import run_cmd, expect, get_scripts_root, get_model, EnvironmentContext -from CIME.test_status import * -from CIME.hist_utils import generate_baseline, compare_baseline -from CIME.case import Case - -import os, glob, time, six -logger = logging.getLogger(__name__) - -############################################################################### -def bless_namelists(test_name, report_only, force, baseline_name, baseline_root): -############################################################################### - # Be aware that restart test will overwrite the original namelist files - # with versions of the files that should not be blessed. This forces us to - # re-run create_test. - - # Update namelist files - logger.info("Test '{}' had namelist diff".format(test_name)) - if (not report_only and - (force or six.moves.input("Update namelists (y/n)? ").upper() in ["Y", "YES"])): - - create_test_gen_args = " -g {} ".format(baseline_name if get_model() == "cesm" else " -g -b {} ".format(baseline_name)) - stat, out, _ = run_cmd("{}/create_test {} -n {} --baseline-root {} -o".format(get_scripts_root(), test_name, create_test_gen_args, baseline_root), combine_output=True) - if stat != 0: - return False, "Namelist regen failed: '{}'".format(out) - else: - return True, None - else: - return True, None - -############################################################################### -def bless_history(test_name, case, baseline_name, baseline_root, report_only, force): -############################################################################### - real_user = case.get_value("REALUSER") - with EnvironmentContext(USER=real_user): - - baseline_full_dir = os.path.join(baseline_root, baseline_name, case.get_value("CASEBASEID")) - - cmp_result, cmp_comments = compare_baseline(case, baseline_dir=baseline_full_dir, outfile_suffix=None) - if cmp_result: - logger.info("Diff appears to have been already resolved.") - return True, None - else: - logger.info(cmp_comments) - if (not report_only and - (force or six.moves.input("Update this diff (y/n)? ").upper() in ["Y", "YES"])): - gen_result, gen_comments = generate_baseline(case, baseline_dir=baseline_full_dir) - if not gen_result: - logger.warning("Hist file bless FAILED for test {}".format(test_name)) - return False, "Generate baseline failed: {}".format(gen_comments) - else: - logger.info(gen_comments) - return True, None - else: - return True, None - -############################################################################### -def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_id=None, namelists_only=False, hist_only=False, - report_only=False, force=False, bless_tests=None, no_skip_pass=False): -############################################################################### - test_id_glob = "*{}*".format(compiler) if test_id is None else "*{}*".format(test_id) - test_status_files = glob.glob("{}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME)) - expect(test_status_files, "No matching test cases found in for {}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME)) - - # auto-adjust test-id if multiple rounds of tests were matched - timestamps = set() - for test_status_file in test_status_files: - timestamp = os.path.basename(os.path.dirname(test_status_file)).split(".")[-1] - timestamps.add(timestamp) - - if (len(timestamps) > 1): - logger.warning("Multiple sets of tests were matched! Selected only most recent tests.") - - most_recent = sorted(timestamps)[-1] - logger.info("Matched test batch is {}".format(most_recent)) - - broken_blesses = [] - for test_status_file in test_status_files: - if not most_recent in test_status_file: - logger.info("Skipping {}".format(test_status_file)) - continue - - test_dir = os.path.dirname(test_status_file) - ts = TestStatus(test_dir=test_dir) - test_name = ts.get_name() - if test_name is None: - case_dir = os.path.basename(test_dir) - test_name = CIME.utils.normalize_case_id(case_dir) - if (bless_tests in [[], None] or CIME.utils.match_any(test_name, bless_tests)): - broken_blesses.append(("unknown", "test had invalid TestStatus file: '{}'".format(test_status_file))) - continue - else: - continue - - if (bless_tests in [[], None] or CIME.utils.match_any(test_name, bless_tests)): - overall_result = ts.get_overall_test_status() - - # See if we need to bless namelist - if (not hist_only): - if no_skip_pass: - nl_bless = True - else: - nl_bless = ts.get_status(NAMELIST_PHASE) != TEST_PASS_STATUS - else: - nl_bless = False - - # See if we need to bless baselines - if (not namelists_only): - run_result = ts.get_status(RUN_PHASE) - if (run_result is None): - broken_blesses.append((test_name, "no run phase")) - logger.warning("Test '{}' did not make it to run phase".format(test_name)) - hist_bless = False - elif (run_result != TEST_PASS_STATUS): - broken_blesses.append((test_name, "test did not pass")) - logger.warning("Test '{}' did not pass, not safe to bless".format(test_name)) - hist_bless = False - elif no_skip_pass: - hist_bless = True - else: - hist_bless = ts.get_status(BASELINE_PHASE) != TEST_PASS_STATUS - else: - hist_bless = False - - # Now, do the bless - if not nl_bless and not hist_bless: - logger.info("Nothing to bless for test: {}, overall status: {}".format(test_name, overall_result)) - else: - - logger.info("###############################################################################") - logger.info("Blessing results for test: {}, most recent result: {}".format(test_name, overall_result)) - logger.info("Case dir: {}".format(test_dir)) - logger.info("###############################################################################") - if not force: - time.sleep(2) - - with Case(test_dir) as case: - # Resolve baseline_name and baseline_root - if baseline_name is None: - baseline_name_resolved = case.get_value("BASELINE_NAME_CMP") - if not baseline_name_resolved: - baseline_name_resolved = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) - else: - baseline_name_resolved = baseline_name - - if baseline_root is None: - baseline_root_resolved = case.get_value("BASELINE_ROOT") - else: - baseline_root_resolved = baseline_root - - if baseline_name_resolved is None: - broken_blesses.append((test_name, "Could not determine baseline name")) - continue - - if baseline_root_resolved is None: - broken_blesses.append((test_name, "Could not determine baseline root")) - continue - - # Bless namelists - if nl_bless: - success, reason = bless_namelists(test_name, report_only, force, baseline_name_resolved, baseline_root_resolved) - if not success: - broken_blesses.append((test_name, reason)) - - # Bless hist files - if hist_bless: - if "HOMME" in test_name: - success = False - reason = "HOMME tests cannot be blessed with bless_for_tests" - else: - success, reason = bless_history(test_name, case, baseline_name_resolved, baseline_root_resolved, report_only, force) - - if (not success): - broken_blesses.append((test_name, reason)) - - # Make sure user knows that some tests were not blessed - success = True - for broken_bless, reason in broken_blesses: - logger.warning("FAILED TO BLESS TEST: {}, reason {}".format(broken_bless, reason)) - success = False - - return success diff --git a/scripts/lib/CIME/build.py b/scripts/lib/CIME/build.py deleted file mode 100644 index 25a649f6f29..00000000000 --- a/scripts/lib/CIME/build.py +++ /dev/null @@ -1,572 +0,0 @@ -""" -functions for building CIME models -""" -import glob, shutil, time, threading, subprocess -from CIME.XML.standard_module_setup import * -from CIME.utils import get_model, analyze_build_log, stringify_bool, run_and_log_case_status, get_timestamp, run_sub_or_cmd, run_cmd, get_batch_script_for_job, gzip_existing_file, safe_copy -from CIME.provenance import save_build_provenance as save_build_provenance_sub -from CIME.locked_files import lock_file, unlock_file - -logger = logging.getLogger(__name__) - -_CMD_ARGS_FOR_BUILD = \ - ("CASEROOT", "CASETOOLS", "CIMEROOT", "COMP_INTERFACE", - "COMPILER", "DEBUG", "EXEROOT", "INCROOT", "LIBROOT", - "MACH", "MPILIB", "NINST_VALUE", "OS", "PIO_VERSION", - "SHAREDLIBROOT", "SMP_PRESENT", "USE_ESMF_LIB", "USE_MOAB", - "CAM_CONFIG_OPTS", "COMPARE_TO_NUOPC", "HOMME_TARGET", - "OCN_SUBMODEL", "CISM_USE_TRILINOS", "USE_ALBANY", "USE_PETSC") - -def get_standard_makefile_args(case, shared_lib=False): - make_args = "CIME_MODEL={} ".format(case.get_value("MODEL")) - make_args += " compile_threaded={} ".format(stringify_bool(case.get_build_threaded())) - if not shared_lib: - make_args += " USE_KOKKOS={} ".format(stringify_bool(uses_kokkos(case))) - for var in _CMD_ARGS_FOR_BUILD: - make_args += xml_to_make_variable(case, var) - - return make_args - -def get_standard_cmake_args(case, shared_lib=False): - cmake_args = "-DCIME_MODEL={} ".format(case.get_value("MODEL")) - if not shared_lib: - cmake_args += " -DUSE_KOKKOS={} ".format(stringify_bool(uses_kokkos(case))) - for var in _CMD_ARGS_FOR_BUILD: - cmake_args += xml_to_make_variable(case, var, cmake=True) - - # Disable compiler checks - cmake_args += " -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER_WORKS=1 -DCMAKE_Fortran_COMPILER_WORKS=1" - - return cmake_args - -def xml_to_make_variable(case, varname, cmake=False): - varvalue = case.get_value(varname) - if varvalue is None: - return "" - if type(varvalue) == type(True): - varvalue = stringify_bool(varvalue) - return "{}{}=\"{}\" ".format("-D" if cmake else "", varname, varvalue) - -############################################################################### -def uses_kokkos(case): -############################################################################### - cam_target = case.get_value("CAM_TARGET") - return get_model() == "e3sm" and cam_target in ("preqx_kokkos", "theta-l") - -############################################################################### -def _build_model(build_threaded, exeroot, incroot, complist, - lid, caseroot, cimeroot, compiler, buildlist, comp_interface, case): -############################################################################### - logs = [] - - thread_bad_results = [] - for model, comp, nthrds, _, config_dir in complist: - if buildlist is not None and model.lower() not in buildlist: - continue - - # aquap has a dependency on atm so we will build it after the threaded loop - if comp == "aquap": - logger.debug("Skip aquap ocn build here") - continue - - # coupler handled seperately - if model == "cpl": - continue - - # special case for clm - # clm 4_5 and newer is a shared (as in sharedlibs, shared by all tests) library - # (but not in E3SM) and should be built in build_libraries - if get_model() != "e3sm" and comp == "clm": - continue - else: - logger.info(" - Building {} Library ".format(model)) - - smp = nthrds > 1 or build_threaded - - bldroot = os.path.join(exeroot, model, "obj") - libroot = os.path.join(exeroot, "lib") - file_build = os.path.join(exeroot, "{}.bldlog.{}".format(model, lid)) - logger.debug("bldroot is {}".format(bldroot)) - logger.debug("libroot is {}".format(libroot)) - - # make sure bldroot and libroot exist - for build_dir in [bldroot, libroot]: - if not os.path.exists(build_dir): - os.makedirs(build_dir) - - # build the component library - # thread_bad_results captures error output from thread (expected to be empty) - # logs is a list of log files to be compressed and added to the case logs/bld directory - t = threading.Thread(target=_build_model_thread, - args=(config_dir, model, comp, caseroot, libroot, bldroot, incroot, file_build, - thread_bad_results, smp, compiler, case)) - t.start() - - logs.append(file_build) - - # Wait for threads to finish - while(threading.active_count() > 1): - time.sleep(1) - - expect(not thread_bad_results, "\n".join(thread_bad_results)) - - # - # Now build the executable - # - - if not buildlist: - cime_model = get_model() - file_build = os.path.join(exeroot, "{}.bldlog.{}".format(cime_model, lid)) - - config_dir = os.path.join(cimeroot, "src", "drivers", comp_interface, "cime_config") - bldroot = os.path.join(exeroot, "cpl", "obj") - if not os.path.isdir(bldroot): - os.makedirs(bldroot) - logger.info("Building {} with output to {} ".format(cime_model, file_build)) - - with open(file_build, "w") as fd: - stat = run_cmd("{}/buildexe {} {} {} " - .format(config_dir, caseroot, libroot, bldroot), - from_dir=bldroot, arg_stdout=fd, - arg_stderr=subprocess.STDOUT)[0] - - analyze_build_log("{} exe".format(cime_model), file_build, compiler) - expect(stat == 0, "BUILD FAIL: buildexe failed, cat {}".format(file_build)) - - # Copy the just-built ${MODEL}.exe to ${MODEL}.exe.$LID - safe_copy("{}/{}.exe".format(exeroot, cime_model), "{}/{}.exe.{}".format(exeroot, cime_model, lid)) - - logs.append(file_build) - - return logs - -############################################################################### -def _build_checks(case, build_threaded, comp_interface, use_esmf_lib, - debug, compiler, mpilib, complist, ninst_build, smp_value, - model_only, buildlist): -############################################################################### - """ - check if a build needs to be done and warn if a clean is warrented first - returns the relative sharedpath directory for sharedlibraries - """ - smp_build = case.get_value("SMP_BUILD") - build_status = case.get_value("BUILD_STATUS") - expect(comp_interface in ("mct", "moab", "nuopc"), - "Only supporting mct nuopc, or moab comp_interfaces at this time, found {}".format(comp_interface)) - smpstr = "" - ninst_value = "" - for model, _, nthrds, ninst, _ in complist: - if nthrds > 1: - build_threaded = True - if build_threaded: - smpstr += "{}1".format(model[0]) - else: - smpstr += "{}0".format(model[0]) - ninst_value += "{}{:d}".format((model[0]),ninst) - - case.set_value("SMP_VALUE", smpstr) - case.set_value("NINST_VALUE", ninst_value) - - debugdir = "debug" if debug else "nodebug" - threaddir = "threads" if build_threaded else "nothreads" - sharedpath = os.path.join(compiler, mpilib, debugdir, threaddir, comp_interface) - - logger.debug("compiler={} mpilib={} debugdir={} threaddir={}" - .format(compiler,mpilib,debugdir,threaddir)) - - expect(ninst_build == ninst_value or ninst_build == "0", - """ -ERROR, NINST VALUES HAVE CHANGED - NINST_BUILD = {} - NINST_VALUE = {} - A manual clean of your obj directories is strongly recommended - You should execute the following: - ./case.build --clean - Then rerun the build script interactively - ---- OR ---- - You can override this error message at your own risk by executing: - ./xmlchange -file env_build.xml -id NINST_BUILD -val 0 - Then rerun the build script interactively -""".format(ninst_build, ninst_value)) - - expect(smp_build == smpstr or smp_build == "0", - """ -ERROR, SMP VALUES HAVE CHANGED - SMP_BUILD = {} - SMP_VALUE = {} - smpstr = {} - A manual clean of your obj directories is strongly recommended - You should execute the following: - ./case.build --clean - Then rerun the build script interactively - ---- OR ---- - You can override this error message at your own risk by executing: - ./xmlchange -file env_build.xml -id SMP_BUILD -val 0 - Then rerun the build script interactively -""".format(smp_build, smp_value, smpstr)) - - expect(build_status == 0, - """ -ERROR env_build HAS CHANGED - A manual clean of your obj directories is required - You should execute the following: - ./case.build --clean-all -""") - - - expect(mpilib != "mpi-serial" or not use_esmf_lib, - """ -ERROR MPILIB is mpi-serial and USE_ESMF_LIB IS TRUE - MPILIB can only be used with an ESMF library built with mpiuni on - Set USE_ESMF_LIB to FALSE with - ./xmlchange -file env_build.xml -id USE_ESMF_LIB -val FALSE - ---- OR ---- - Make sure the ESMF_LIBDIR used was built with mipuni (or change it to one that was) - And comment out this if block in Tools/models_buildexe -""") - - case.set_value("BUILD_COMPLETE", False) - - # User may have rm -rf their build directory - case.create_dirs() - - case.flush() - if not model_only and not buildlist: - logger.info("Generating component namelists as part of build") - case.create_namelists() - - return sharedpath - -############################################################################### -def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid, compiler, buildlist, comp_interface): -############################################################################### - - shared_lib = os.path.join(exeroot, sharedpath, "lib") - shared_inc = os.path.join(exeroot, sharedpath, "include") - for shared_item in [shared_lib, shared_inc]: - if (not os.path.exists(shared_item)): - os.makedirs(shared_item) - - mpilib = case.get_value("MPILIB") - libs = ["gptl", "mct", "pio", "csm_share"] - if mpilib == "mpi-serial": - libs.insert(0, mpilib) - - if uses_kokkos(case): - libs.append("kokkos") - - logs = [] - sharedlibroot = os.path.abspath(case.get_value("SHAREDLIBROOT")) - for lib in libs: - if buildlist is not None and lib not in buildlist: - continue - - if lib == "csm_share": - # csm_share adds its own dir name - full_lib_path = os.path.join(sharedlibroot, sharedpath) - elif lib == "mpi-serial": - full_lib_path = os.path.join(sharedlibroot, sharedpath, "mct", lib) - else: - full_lib_path = os.path.join(sharedlibroot, sharedpath, lib) - # pio build creates its own directory - if (lib != "pio" and not os.path.exists(full_lib_path)): - os.makedirs(full_lib_path) - - file_build = os.path.join(exeroot, "{}.bldlog.{}".format(lib, lid)) - my_file = os.path.join(cimeroot, "src", "build_scripts", "buildlib.{}".format(lib)) - logger.info("Building {} with output to file {}".format(lib,file_build)) - - run_sub_or_cmd(my_file, [full_lib_path, os.path.join(exeroot, sharedpath), caseroot], 'buildlib', - [full_lib_path, os.path.join(exeroot, sharedpath), case], logfile=file_build) - - analyze_build_log(lib, file_build, compiler) - logs.append(file_build) - if lib == "pio": - bldlog = open(file_build, "r") - for line in bldlog: - if re.search("Current setting for", line): - logger.warning(line) - - # clm not a shared lib for E3SM - if get_model() != "e3sm" and (buildlist is None or "lnd" in buildlist): - comp_lnd = case.get_value("COMP_LND") - if comp_lnd == "clm": - logging.info(" - Building clm library ") - esmfdir = "esmf" if case.get_value("USE_ESMF_LIB") else "noesmf" - bldroot = os.path.join(sharedlibroot, sharedpath, comp_interface, esmfdir, "clm","obj" ) - libroot = os.path.join(exeroot, sharedpath, comp_interface, esmfdir, "lib") - incroot = os.path.join(exeroot, sharedpath, comp_interface, esmfdir, "include") - file_build = os.path.join(exeroot, "lnd.bldlog.{}".format( lid)) - config_lnd_dir = os.path.dirname(case.get_value("CONFIG_LND_FILE")) - - for ndir in [bldroot, libroot, incroot]: - if (not os.path.isdir(ndir)): - os.makedirs(ndir) - - smp = "SMP" in os.environ and os.environ["SMP"] == "TRUE" - # thread_bad_results captures error output from thread (expected to be empty) - # logs is a list of log files to be compressed and added to the case logs/bld directory - thread_bad_results = [] - _build_model_thread(config_lnd_dir, "lnd", comp_lnd, caseroot, libroot, bldroot, incroot, - file_build, thread_bad_results, smp, compiler, case) - logs.append(file_build) - expect(not thread_bad_results, "\n".join(thread_bad_results)) - - case.flush() # python sharedlib subs may have made XML modifications - return logs - -############################################################################### -def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldroot, incroot, file_build, - thread_bad_results, smp, compiler, _): # (case not used yet) -############################################################################### - logger.info("Building {} with output to {}".format(compclass, file_build)) - t1 = time.time() - cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib") - if os.path.isfile(cmd): - logger.warning("WARNING: using local buildlib script for {}".format(compname)) - else: - cmd = os.path.join(config_dir, "buildlib") - expect(os.path.isfile(cmd), "Could not find buildlib for {}".format(compname)) - - with open(file_build, "w") as fd: - stat = run_cmd("MODEL={} SMP={} {} {} {} {} " - .format(compclass, stringify_bool(smp), cmd, caseroot, libroot, bldroot), - from_dir=bldroot, arg_stdout=fd, - arg_stderr=subprocess.STDOUT)[0] - - analyze_build_log(compclass, file_build, compiler) - - if stat != 0: - thread_bad_results.append("BUILD FAIL: {}.buildlib failed, cat {}".format(compname, file_build)) - - analyze_build_log(compclass, file_build, compiler) - - for mod_file in glob.glob(os.path.join(bldroot, "*_[Cc][Oo][Mm][Pp]_*.mod")): - safe_copy(mod_file, incroot) - - t2 = time.time() - logger.info("{} built in {:f} seconds".format(compname, (t2 - t1))) - -############################################################################### -def _clean_impl(case, cleanlist, clean_all, clean_depends): -############################################################################### - exeroot = os.path.abspath(case.get_value("EXEROOT")) - if clean_all: - # If cleanlist is empty just remove the bld directory - expect(exeroot is not None,"No EXEROOT defined in case") - if os.path.isdir(exeroot): - logging.info("cleaning directory {}".format(exeroot)) - shutil.rmtree(exeroot) - # if clean_all is True also remove the sharedlibpath - sharedlibroot = os.path.abspath(case.get_value("SHAREDLIBROOT")) - expect(sharedlibroot is not None,"No SHAREDLIBROOT defined in case") - if sharedlibroot != exeroot and os.path.isdir(sharedlibroot): - logging.warning("cleaning directory {}".format(sharedlibroot)) - shutil.rmtree(sharedlibroot) - else: - expect((cleanlist is not None and len(cleanlist) > 0) or - (clean_depends is not None and len(clean_depends)),"Empty cleanlist not expected") - gmake = case.get_value("GMAKE") - casetools = case.get_value("CASETOOLS") - - cmd = gmake + " -f " + os.path.join(casetools, "Makefile") - cmd += " {}".format(get_standard_makefile_args(case)) - if cleanlist is not None: - for item in cleanlist: - tcmd = cmd + " clean" + item - logger.info("calling {} ".format(tcmd)) - run_cmd_no_fail(tcmd) - else: - for item in clean_depends: - tcmd = cmd + " clean_depends" + item - logger.info("calling {} ".format(tcmd)) - run_cmd_no_fail(tcmd) - - # unlink Locked files directory - unlock_file("env_build.xml") - - # reset following values in xml files - case.set_value("SMP_BUILD",str(0)) - case.set_value("NINST_BUILD",str(0)) - case.set_value("BUILD_STATUS",str(0)) - case.set_value("BUILD_COMPLETE","FALSE") - case.flush() - -############################################################################### -def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, - save_build_provenance): -############################################################################### - - t1 = time.time() - - expect(not (sharedlib_only and model_only), - "Contradiction: both sharedlib_only and model_only") - logger.info("Building case in directory {}".format(caseroot)) - logger.info("sharedlib_only is {}".format(sharedlib_only)) - logger.info("model_only is {}".format(model_only)) - - expect(os.path.isdir(caseroot), "'{}' is not a valid directory".format(caseroot)) - os.chdir(caseroot) - - expect(os.path.exists(get_batch_script_for_job(case.get_primary_job())), - "ERROR: must invoke case.setup script before calling build script ") - - cimeroot = case.get_value("CIMEROOT") - - comp_classes = case.get_values("COMP_CLASSES") - - case.check_lockedfiles(skip="env_batch") - - # Retrieve relevant case data - # This environment variable gets set for cesm Make and - # needs to be unset before building again. - if "MODEL" in os.environ: - del os.environ["MODEL"] - build_threaded = case.get_build_threaded() - exeroot = os.path.abspath(case.get_value("EXEROOT")) - incroot = os.path.abspath(case.get_value("INCROOT")) - libroot = os.path.abspath(case.get_value("LIBROOT")) - multi_driver = case.get_value("MULTI_DRIVER") - complist = [] - ninst = 1 - comp_interface = case.get_value("COMP_INTERFACE") - for comp_class in comp_classes: - if comp_class == "CPL": - config_dir = None - if multi_driver: - ninst = case.get_value("NINST_MAX") - else: - config_dir = os.path.dirname(case.get_value("CONFIG_{}_FILE".format(comp_class))) - if multi_driver: - ninst = 1 - else: - ninst = case.get_value("NINST_{}".format(comp_class)) - - comp = case.get_value("COMP_{}".format(comp_class)) - if comp_interface == 'nuopc' and comp in ('satm', 'slnd', 'sesp', 'sglc', 'srof', 'sice', 'socn', 'swav', 'siac'): - continue - thrds = case.get_value("NTHRDS_{}".format(comp_class)) - expect(ninst is not None,"Failed to get ninst for comp_class {}".format(comp_class)) - complist.append((comp_class.lower(), comp, thrds, ninst, config_dir )) - os.environ["COMP_{}".format(comp_class)] = comp - - compiler = case.get_value("COMPILER") - mpilib = case.get_value("MPILIB") - use_esmf_lib = case.get_value("USE_ESMF_LIB") - debug = case.get_value("DEBUG") - ninst_build = case.get_value("NINST_BUILD") - smp_value = case.get_value("SMP_VALUE") - clm_use_petsc = case.get_value("CLM_USE_PETSC") - cism_use_trilinos = case.get_value("CISM_USE_TRILINOS") - mali_use_albany = case.get_value("MALI_USE_ALBANY") - mach = case.get_value("MACH") - - # Load some params into env - os.environ["BUILD_THREADED"] = stringify_bool(build_threaded) - - if get_model() == "e3sm" and mach == "titan" and compiler == "pgiacc": - case.set_value("CAM_TARGET", "preqx_acc") - - # This is a timestamp for the build , not the same as the testid, - # and this case may not be a test anyway. For a production - # experiment there may be many builds of the same case. - lid = get_timestamp("%y%m%d-%H%M%S") - os.environ["LID"] = lid - - # Set the overall USE_PETSC variable to TRUE if any of the - # *_USE_PETSC variables are TRUE. - # For now, there is just the one CLM_USE_PETSC variable, but in - # the future there may be others -- so USE_PETSC will be true if - # ANY of those are true. - - use_petsc = clm_use_petsc - case.set_value("USE_PETSC", use_petsc) - - # Set the overall USE_TRILINOS variable to TRUE if any of the - # *_USE_TRILINOS variables are TRUE. - # For now, there is just the one CISM_USE_TRILINOS variable, but in - # the future there may be others -- so USE_TRILINOS will be true if - # ANY of those are true. - - use_trilinos = False if cism_use_trilinos is None else cism_use_trilinos - case.set_value("USE_TRILINOS", use_trilinos) - - # Set the overall USE_ALBANY variable to TRUE if any of the - # *_USE_ALBANY variables are TRUE. - # For now, there is just the one MALI_USE_ALBANY variable, but in - # the future there may be others -- so USE_ALBANY will be true if - # ANY of those are true. - - use_albany = stringify_bool(mali_use_albany) - case.set_value("USE_ALBANY", use_albany) - - # Load modules - case.load_env() - - sharedpath = _build_checks(case, build_threaded, comp_interface, - use_esmf_lib, debug, compiler, mpilib, - complist, ninst_build, smp_value, model_only, buildlist) - - t2 = time.time() - logs = [] - - if not model_only: - logs = _build_libraries(case, exeroot, sharedpath, caseroot, - cimeroot, libroot, lid, compiler, buildlist, comp_interface) - - if not sharedlib_only: - os.environ["INSTALL_SHAREDPATH"] = os.path.join(exeroot, sharedpath) # for MPAS makefile generators - - logs.extend(_build_model(build_threaded, exeroot, incroot, complist, - lid, caseroot, cimeroot, compiler, buildlist, comp_interface, case)) - - if not buildlist: - # in case component build scripts updated the xml files, update the case object - case.read_xml() - # Note, doing buildlists will never result in the system thinking the build is complete - - post_build(case, logs, build_complete=not (buildlist or sharedlib_only), - save_build_provenance=save_build_provenance) - - t3 = time.time() - - if not sharedlib_only: - logger.info("Time spent not building: {:f} sec".format(t2 - t1)) - logger.info("Time spent building: {:f} sec".format(t3 - t2)) - logger.info("MODEL BUILD HAS FINISHED SUCCESSFULLY") - - return True - -############################################################################### -def post_build(case, logs, build_complete=False, save_build_provenance=True): -############################################################################### - for log in logs: - gzip_existing_file(log) - - if build_complete: - # must ensure there's an lid - lid = os.environ["LID"] if "LID" in os.environ else get_timestamp("%y%m%d-%H%M%S") - if save_build_provenance: - save_build_provenance_sub(case, lid=lid) - # Set XML to indicate build complete - case.set_value("BUILD_COMPLETE", True) - case.set_value("BUILD_STATUS", 0) - if "SMP_VALUE" in os.environ: - case.set_value("SMP_BUILD", os.environ["SMP_VALUE"]) - - case.flush() - - lock_file("env_build.xml", caseroot=case.get_value("CASEROOT")) - -############################################################################### -def case_build(caseroot, case, sharedlib_only=False, model_only=False, buildlist=None, save_build_provenance=True): -############################################################################### - functor = lambda: _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, - save_build_provenance) - return run_and_log_case_status(functor, "case.build", caseroot=caseroot) - -############################################################################### -def clean(case, cleanlist=None, clean_all=False, clean_depends=None): -############################################################################### - functor = lambda: _clean_impl(case, cleanlist, clean_all, clean_depends) - return run_and_log_case_status(functor, "build.clean", caseroot=case.get_value("CASEROOT")) diff --git a/scripts/lib/CIME/buildlib.py b/scripts/lib/CIME/buildlib.py deleted file mode 100644 index d0a7941015d..00000000000 --- a/scripts/lib/CIME/buildlib.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -common utilities for buildlib -""" - -from CIME.XML.standard_module_setup import * -from CIME.case import Case -from CIME.utils import parse_args_and_handle_standard_logging_options, setup_standard_logging_options -from CIME.build import get_standard_makefile_args -import sys, os, argparse -logger = logging.getLogger(__name__) - -############################################################################### -def parse_input(argv): -############################################################################### - - parser = argparse.ArgumentParser() - - setup_standard_logging_options(parser) - - parser.add_argument("caseroot", default=os.getcwd(), - help="Case directory") - - parser.add_argument("libroot", - help="root for creating the library") - - parser.add_argument("bldroot", - help="root for building library") - - args = parse_args_and_handle_standard_logging_options(argv, parser) - - # Some compilers have trouble with long include paths, setting - # EXEROOT to the relative path from bldroot solves the problem - # doing it in the environment means we don't need to change all of - # the component buildlib scripts - with Case(args.caseroot) as case: - os.environ["EXEROOT"] = os.path.relpath(case.get_value("EXEROOT"), args.bldroot) - - - return args.caseroot, args.libroot, args.bldroot - -############################################################################### -def build_cime_component_lib(case, compname, libroot, bldroot): -############################################################################### - - cimeroot = case.get_value("CIMEROOT") - compclass = compname[1:] - comp_interface = case.get_value("COMP_INTERFACE") - - with open(os.path.join(bldroot,'Filepath'), 'w') as out: - out.write(os.path.join(case.get_value('CASEROOT'), "SourceMods", - "src.{}\n".format(compname)) + "\n") - if compname.startswith('d'): - if (comp_interface == 'nuopc'): - out.write(os.path.join(cimeroot, "src", "components", "data_comps", "dshr_nuopc") + "\n") - out.write(os.path.join(cimeroot, "src", "components", "data_comps", compname, comp_interface) + "\n") - out.write(os.path.join(cimeroot, "src", "components", "data_comps", compname) + "\n") - elif compname.startswith('x'): - out.write(os.path.join(cimeroot, "src", "components", "xcpl_comps", "xshare") + "\n") - out.write(os.path.join(cimeroot, "src", "components", "xcpl_comps", "xshare", comp_interface) + "\n") - out.write(os.path.join(cimeroot, "src", "components", "xcpl_comps", compname, comp_interface) + "\n") - elif compname.startswith('s'): - out.write(os.path.join(cimeroot, "src", "components", "stub_comps", compname, comp_interface) + "\n") - - # Build the component - run_gmake(case, compclass, libroot, bldroot) - -############################################################################### -def run_gmake(case, compclass, libroot, bldroot, libname="", user_cppdefs=""): -############################################################################### - gmake_args = get_standard_makefile_args(case) - - gmake_j = case.get_value("GMAKE_J") - gmake = case.get_value("GMAKE") - - complib = "" - if libname: - complib = os.path.join(libroot, "lib{}.a".format(libname)) - else: - complib = os.path.join(libroot, "lib{}.a".format(compclass)) - - makefile = os.path.join(case.get_value("CASETOOLS"), "Makefile") - - cmd = "{} complib -j {:d} MODEL={} COMPLIB={} {} -f {} -C {} " \ - .format(gmake, gmake_j, compclass, complib, gmake_args, makefile, bldroot) - if user_cppdefs: - cmd = cmd + "USER_CPPDEFS='{}'".format(user_cppdefs ) - - _, out, _ = run_cmd(cmd, combine_output=True) - print(out.encode('utf-8')) diff --git a/scripts/lib/CIME/buildnml.py b/scripts/lib/CIME/buildnml.py deleted file mode 100644 index 04d3cbff653..00000000000 --- a/scripts/lib/CIME/buildnml.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -common implementation for building namelist commands - -These are used by components///cime_config/buildnml -""" - -from CIME.XML.standard_module_setup import * -from CIME.utils import expect, parse_args_and_handle_standard_logging_options, setup_standard_logging_options -import sys, os, argparse - -logger = logging.getLogger(__name__) - -############################################################################### -def parse_input(argv): -############################################################################### - - parser = argparse.ArgumentParser() - - setup_standard_logging_options(parser) - - parser.add_argument("caseroot", default=os.getcwd(), - help="Case directory") - - args = parse_args_and_handle_standard_logging_options(argv, parser) - - return args.caseroot - -############################################################################### -#pylint: disable=unused-argument -def build_xcpl_nml(case, caseroot, compname): -############################################################################### - compclasses = case.get_values("COMP_CLASSES") - compclass = None - for compclass in compclasses: - if case.get_value("COMP_{}".format(compclass)) == compname: - break - expect(compclass is not None, - "Could not identify compclass for compname {}".format(compname)) - rundir = case.get_value("RUNDIR") - comp_interface = case.get_value("COMP_INTERFACE") - - if comp_interface != "nuopc": - ninst = case.get_value("NINST_{}".format(compclass.upper())) - else: - ninst = case.get_value("NINST") - if not ninst: - ninst = 1 - nx = case.get_value("{}_NX".format(compclass.upper())) - ny = case.get_value("{}_NY".format(compclass.upper())) - if compname == "xrof": - flood_mode = case.get_value('XROF_FLOOD_MODE') - - extras = [] - dtype = 1 - npes = 0 - length = 0 - - if compname == "xatm": - if ny == 1: - dtype = 2 - extras = [["24", - "ncpl number of communications w/coupler per dat"], - ["0.0", - "simul time proxy (secs): time between cpl comms"]] - elif compname == "xglc" or compname == "xice": - dtype = 2 - elif compname == "xlnd": - dtype = 11 - elif compname == "xocn": - dtype = 4 - elif compname == "xrof": - dtype = 11 - if flood_mode == "ACTIVE": - extras = [[".true.", "flood flag"]] - else: - extras = [[".false.", "flood flag"]] - - for i in range(1, ninst + 1): - # If only 1 file, name is 'compclass_in' - # otherwise files are 'compclass_in0001', 'compclass_in0002', etc - if ninst == 1: - filename = os.path.join(rundir, "{}_in".format(compname)) - else: - filename = os.path.join(rundir, "{}_in_{:04d}".format(compname, i)) - - with open(filename, 'w') as infile: - infile.write("{:<20d} ! i-direction global dimension\n".format(nx)) - infile.write("{:<20d} ! j-direction global dimension\n".format(ny)) - infile.write("{:<20d} ! decomp_type 1=1d-by-lat, 2=1d-by-lon, 3=2d, 4=2d evensquare, 11=segmented\n".format(dtype)) - infile.write("{:<20d} ! num of pes for i (type 3 only)\n".format(npes)) - infile.write("{:<20d} ! length of segments (type 4 only)\n".format(length)) - for extra in extras: - #infile.write("{:-20s} ! {}\n".format(extra[0], extra[1])) - infile.write("{:<20s} ! {}\n".format(extra[0], extra[1])) - -############################################################################### -def create_namelist_infile(case, user_nl_file, namelist_infile, infile_text=""): -############################################################################### - lines_input = [] - if os.path.isfile(user_nl_file): - with open(user_nl_file, "r") as file_usernl: - lines_input = file_usernl.readlines() - else: - logger.warning("WARNING: No file {} found in case directory".format(user_nl_file)) - - lines_output = [] - lines_output.append("&comp_inparm \n") - if infile_text: - lines_output.append(infile_text) - logger.debug("file_infile {} ".format(infile_text)) - - for line in lines_input: - match1 = re.search(r"^[\&\/\!]", line) - match2 = re.search(r"\$([\w\_])+", line) - if match1 is None and match2 is not None: - line = case.get_resolved_value(line) - if match1 is None: - lines_output.append(line) - - lines_output.append("/ \n") - with open(namelist_infile, "w") as file_infile: - file_infile.write("\n".join(lines_output)) diff --git a/scripts/lib/CIME/case/README b/scripts/lib/CIME/case/README deleted file mode 100644 index de33f9e01b9..00000000000 --- a/scripts/lib/CIME/case/README +++ /dev/null @@ -1 +0,0 @@ -Files in this directory are members of the class Case defined in file case.py and should not be directly imported. \ No newline at end of file diff --git a/scripts/lib/CIME/case/case.py b/scripts/lib/CIME/case/case.py deleted file mode 100644 index 4e7c72719ee..00000000000 --- a/scripts/lib/CIME/case/case.py +++ /dev/null @@ -1,1641 +0,0 @@ -""" -Wrapper around all env XML for a case. - -All interaction with and between the module files in XML/ takes place -through the Case module. -""" -from copy import deepcopy -import glob, os, shutil, math, six -from CIME.XML.standard_module_setup import * -#pylint: disable=import-error,redefined-builtin -from six.moves import input -from CIME.utils import expect, get_cime_root, append_status -from CIME.utils import convert_to_type, get_model -from CIME.utils import get_project, get_charge_account, check_name -from CIME.utils import get_current_commit, safe_copy, get_cime_default_driver -from CIME.locked_files import LOCKED_DIR, lock_file -from CIME.XML.machines import Machines -from CIME.XML.pes import Pes -from CIME.XML.files import Files -from CIME.XML.testlist import Testlist -from CIME.XML.component import Component -from CIME.XML.compsets import Compsets -from CIME.XML.grids import Grids -from CIME.XML.batch import Batch -from CIME.XML.workflow import Workflow -from CIME.XML.pio import PIO -from CIME.XML.archive import Archive -from CIME.XML.env_test import EnvTest -from CIME.XML.env_mach_specific import EnvMachSpecific -from CIME.XML.env_case import EnvCase -from CIME.XML.env_mach_pes import EnvMachPes -from CIME.XML.env_build import EnvBuild -from CIME.XML.env_run import EnvRun -from CIME.XML.env_archive import EnvArchive -from CIME.XML.env_batch import EnvBatch -from CIME.XML.env_workflow import EnvWorkflow -from CIME.XML.generic_xml import GenericXML -from CIME.user_mod_support import apply_user_mods -from CIME.aprun import get_aprun_cmd_for_case - -logger = logging.getLogger(__name__) - -class Case(object): - """ - https://github.com/ESMCI/cime/wiki/Developers-Introduction - The Case class is the heart of the CIME Case Control system. All - interactions with a Case take part through this class. All of the - variables used to create and manipulate a case are defined in xml - files and for every xml file there is a python class to interact - with that file. - - XML files which are part of the CIME distribution and are meant to - be readonly with respect to a case are typically named - config_something.xml and the corresponding python Class is - Something and can be found in file CIME.XML.something.py. I'll - refer to these as the CIME config classes. - - XML files which are part of a case and thus are read/write to a - case are typically named env_whatever.xml and the cooresponding - python modules are CIME.XML.env_whatever.py and classes are - EnvWhatever. I'll refer to these as the Case env classes. - - The Case Class includes an array of the Case env classes, in the - configure function and it's supporting functions defined below - the case object creates and manipulates the Case env classes - by reading and interpreting the CIME config classes. - - This class extends across multiple files, class members external to this file - are listed in the following imports - """ - from CIME.case.case_setup import case_setup - from CIME.case.case_clone import create_clone, _copy_user_modified_to_clone - from CIME.case.case_test import case_test - from CIME.case.case_submit import check_DA_settings, check_case, submit - from CIME.case.case_st_archive import case_st_archive, restore_from_archive, \ - archive_last_restarts, test_st_archive, test_env_archive - from CIME.case.case_run import case_run - from CIME.case.case_cmpgen_namelists import case_cmpgen_namelists - from CIME.case.check_lockedfiles import check_lockedfile, check_lockedfiles, check_pelayouts_require_rebuild - from CIME.case.preview_namelists import create_dirs, create_namelists - from CIME.case.check_input_data import check_all_input_data, stage_refcase, check_input_data - - def __init__(self, case_root=None, read_only=True): - - if case_root is None: - case_root = os.getcwd() - self._caseroot = case_root - logger.debug("Initializing Case.") - self._read_only_mode = True - self._force_read_only = read_only - self._primary_component = None - - self._env_entryid_files = [] - self._env_generic_files = [] - self._files = [] - - self.read_xml() - - # Hold arbitary values. In create_newcase we may set values - # for xml files that haven't been created yet. We need a place - # to store them until we are ready to create the file. At file - # creation we get the values for those fields from this lookup - # table and then remove the entry. - self.lookups = {} - self.set_lookup_value('CIMEROOT',os.path.abspath(get_cime_root())) - self._cime_model = get_model() - self.set_lookup_value('MODEL', self._cime_model) - self._compsetname = None - self._gridname = None - self._pesfile = None - self._gridfile = None - self._components = [] - self._component_classes = [] - self._component_description = {} - self._is_env_loaded = False - - # these are user_mods as defined in the compset - # Command Line user_mods are handled seperately - - # Derived attributes - self.thread_count = None - self.total_tasks = None - self.tasks_per_node = None - self.num_nodes = None - self.spare_nodes = None - self.tasks_per_numa = None - self.cores_per_task = None - self.srun_binding = None - - # check if case has been configured and if so initialize derived - if self.get_value("CASEROOT") is not None: - self.initialize_derived_attributes() - - def check_if_comp_var(self, vid): - for env_file in self._env_entryid_files: - new_vid, new_comp, iscompvar = env_file.check_if_comp_var(vid) - if iscompvar: - return new_vid, new_comp, iscompvar - - return vid, None, False - - def initialize_derived_attributes(self): - """ - These are derived variables which can be used in the config_* files - for variable substitution using the {{ var }} syntax - """ - env_mach_pes = self.get_env("mach_pes") - env_mach_spec = self.get_env('mach_specific') - comp_classes = self.get_values("COMP_CLASSES") - max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") - - self.thread_count = env_mach_pes.get_max_thread_count(comp_classes) - - mpi_attribs = { - "compiler" : self.get_value("COMPILER"), - "mpilib" : self.get_value("MPILIB"), - "threaded" : self.get_build_threaded(), - } - - job = self.get_primary_job() - executable = env_mach_spec.get_mpirun(self, mpi_attribs, job, exe_only=True)[0] - if executable is not None and "aprun" in executable: - _, self.num_nodes, self.total_tasks, self.tasks_per_node, self.thread_count = get_aprun_cmd_for_case(self, "e3sm.exe") - self.spare_nodes = env_mach_pes.get_spare_nodes(self.num_nodes) - self.num_nodes += self.spare_nodes - else: - self.total_tasks = env_mach_pes.get_total_tasks(comp_classes) - self.tasks_per_node = env_mach_pes.get_tasks_per_node(self.total_tasks, self.thread_count) - - self.num_nodes, self.spare_nodes = env_mach_pes.get_total_nodes(self.total_tasks, self.thread_count) - self.num_nodes += self.spare_nodes - - logger.debug("total_tasks {} thread_count {}".format(self.total_tasks, self.thread_count)) - - self.tasks_per_numa = int(math.ceil(self.tasks_per_node / 2.0)) - smt_factor = max(1,int(self.get_value("MAX_TASKS_PER_NODE") / max_mpitasks_per_node)) - - threads_per_node = self.tasks_per_node * self.thread_count - threads_per_core = 1 if (threads_per_node <= max_mpitasks_per_node) else smt_factor - self.cores_per_task = self.thread_count / threads_per_core - - os.environ["OMP_NUM_THREADS"] = str(self.thread_count) - - self.srun_binding = smt_factor*max_mpitasks_per_node / self.tasks_per_node - - # Define __enter__ and __exit__ so that we can use this as a context manager - # and force a flush on exit. - def __enter__(self): - if not self._force_read_only: - self._read_only_mode = False - return self - - def __exit__(self, *_): - self.flush() - self._read_only_mode = True - return False - - def read_xml(self): - for env_file in self._files: - expect(not env_file.needsrewrite, "Potential loss of unflushed changes in {}".format(env_file.filename)) - - self._env_entryid_files = [] - self._env_entryid_files.append(EnvCase(self._caseroot, components=None, read_only=self._force_read_only)) - components = self._env_entryid_files[0].get_values("COMP_CLASSES") - self._env_entryid_files.append(EnvRun(self._caseroot, components=components, read_only=self._force_read_only)) - self._env_entryid_files.append(EnvBuild(self._caseroot, components=components, read_only=self._force_read_only)) - self._env_entryid_files.append(EnvMachPes(self._caseroot, components=components, read_only=self._force_read_only)) - self._env_entryid_files.append(EnvBatch(self._caseroot, read_only=self._force_read_only)) - self._env_entryid_files.append(EnvWorkflow(self._caseroot, read_only=self._force_read_only)) - - if os.path.isfile(os.path.join(self._caseroot,"env_test.xml")): - self._env_entryid_files.append(EnvTest(self._caseroot, components=components, read_only=self._force_read_only)) - self._env_generic_files = [] - self._env_generic_files.append(EnvMachSpecific(self._caseroot, read_only=self._force_read_only)) - self._env_generic_files.append(EnvArchive(self._caseroot, read_only=self._force_read_only)) - self._files = self._env_entryid_files + self._env_generic_files - - def get_case_root(self): - """Returns the root directory for this case.""" - return self._caseroot - - def get_env(self, short_name, allow_missing=False): - full_name = "env_{}.xml".format(short_name) - for env_file in self._files: - if os.path.basename(env_file.filename) == full_name: - return env_file - if allow_missing: - return None - expect(False,"Could not find object for {} in case".format(full_name)) - - def check_timestamps(self, short_name=None): - if short_name is not None: - env_file = self.get_env(short_name) - env_file.check_timestamp() - else: - for env_file in self._files: - env_file.check_timestamp() - - def copy(self, newcasename, newcaseroot, newcimeroot=None, newsrcroot=None): - newcase = deepcopy(self) - for env_file in newcase._files: # pylint: disable=protected-access - basename = os.path.basename(env_file.filename) - newfile = os.path.join(newcaseroot, basename) - env_file.change_file(newfile, copy=True) - - if newcimeroot is not None: - newcase.set_value("CIMEROOT", newcimeroot) - - if newsrcroot is not None: - newcase.set_value("SRCROOT", newsrcroot) - - newcase.set_value("CASE",newcasename) - newcase.set_value("CASEROOT",newcaseroot) - newcase.set_value("CONTINUE_RUN","FALSE") - newcase.set_value("RESUBMIT",0) - - # Important, and subtle: Writability should NOT be copied because - # this allows the copy to be modified without needing a "with" statement - # which opens the door to tricky errors such as unflushed writes. - newcase._read_only_mode = True # pylint: disable=protected-access - - return newcase - - def flush(self, flushall=False): - if not os.path.isdir(self._caseroot): - # do not flush if caseroot wasnt created - return - for env_file in self._files: - env_file.write(force_write=flushall) - - def get_values(self, item, attribute=None, resolved=True, subgroup=None): - for env_file in self._files: - # Wait and resolve in self rather than in env_file - results = env_file.get_values(item, attribute, resolved=False, subgroup=subgroup) - if len(results) > 0: - new_results = [] - if resolved: - for result in results: - if isinstance(result, six.string_types): - result = self.get_resolved_value(result) - vtype = env_file.get_type_info(item) - if vtype is not None or vtype != "char": - result = convert_to_type(result, vtype, item) - - new_results.append(result) - - else: - new_results.append(result) - - else: - new_results = results - - return new_results - - # Return empty result - return [] - - def get_value(self, item, attribute=None, resolved=True, subgroup=None): - result = None - for env_file in self._files: - # Wait and resolve in self rather than in env_file - result = env_file.get_value(item, attribute, resolved=False, subgroup=subgroup) - - if result is not None: - if resolved and isinstance(result, six.string_types): - result = self.get_resolved_value(result) - vtype = env_file.get_type_info(item) - if vtype is not None and vtype != "char": - result = convert_to_type(result, vtype, item) - - return result - - # Return empty result - return result - - def get_record_fields(self, variable, field): - """ get_record_fields gets individual requested field from an entry_id file - this routine is used only by xmlquery """ - # Empty result - result = [] - - for env_file in self._env_entryid_files: - # Wait and resolve in self rather than in env_file - logger.debug("(get_record_field) Searching in {}".format(env_file.__class__.__name__)) - if field == "varid": - roots = env_file.scan_children("entry") - else: - roots = env_file.get_nodes_by_id(variable) - - for root in roots: - if root is not None: - if field == "raw": - result.append(env_file.get_raw_record(root)) - elif field == "desc": - result.append(env_file.get_description(root)) - elif field == "varid": - result.append(env_file.get(root, "id")) - elif field == "group": - result.extend(env_file.get_groups(root)) - elif field == "valid_values": - # pylint: disable=protected-access - vv = env_file._get_valid_values(root) - if vv: - result.extend(vv) - elif field == "file": - result.append(env_file.filename) - - if not result: - for env_file in self._env_generic_files: - roots = env_file.scan_children(variable) - for root in roots: - if root is not None: - if field == "raw": - result.append(env_file.get_raw_record(root)) - elif field == "group": - result.extend(env_file.get_groups(root)) - elif field == "file": - result.append(env_file.filename) - - return list(set(result)) - - def get_type_info(self, item): - result = None - for env_file in self._env_entryid_files: - result = env_file.get_type_info(item) - if result is not None: - return result - - return result - - def get_resolved_value(self, item, recurse=0, allow_unresolved_envvars=False): - num_unresolved = item.count("$") if item else 0 - recurse_limit = 10 - if (num_unresolved > 0 and recurse < recurse_limit ): - for env_file in self._env_entryid_files: - item = env_file.get_resolved_value(item, - allow_unresolved_envvars=allow_unresolved_envvars) - if ("$" not in item): - return item - else: - item = self.get_resolved_value(item, recurse=recurse+1, - allow_unresolved_envvars=allow_unresolved_envvars) - - return item - - def set_value(self, item, value, subgroup=None, ignore_type=False, allow_undefined=False, return_file=False): - """ - If a file has been defined, and the variable is in the file, - then that value will be set in the file object and the resovled value - is returned unless return_file is True, in which case (resolved_value, filename) - is returned where filename is the name of the modified file. - """ - expect(not self._read_only_mode, "Cannot modify case, read_only. " - "Case must be opened with read_only=False and can only be modified within a context manager") - - if item == "CASEROOT": - self._caseroot = value - result = None - - for env_file in self._files: - result = env_file.set_value(item, value, subgroup, ignore_type) - if (result is not None): - logger.debug("Will rewrite file {} {}".format(env_file.filename, item)) - return (result, env_file.filename) if return_file else result - - if len(self._files) == 1: - expect(allow_undefined or result is not None, - "No variable {} found in file {}".format(item, self._files[0].filename)) - else: - expect(allow_undefined or result is not None, - "No variable {} found in case".format(item)) - - def set_valid_values(self, item, valid_values): - """ - Update or create a valid_values entry for item and populate it - """ - expect(not self._read_only_mode, "Cannot modify case, read_only. " - "Case must be opened with read_only=False and can only be modified within a context manager") - - result = None - for env_file in self._env_entryid_files: - result = env_file.set_valid_values(item, valid_values) - if (result is not None): - logger.debug("Will rewrite file {} {}".format(env_file.filename, item)) - return result - - def set_lookup_value(self, item, value): - if item in self.lookups and self.lookups[item] is not None: - logger.warning("Item {} already in lookups with value {}".format(item,self.lookups[item])) - else: - logger.debug("Setting in lookups: item {}, value {}".format(item,value)) - self.lookups[item] = value - - def clean_up_lookups(self, allow_undefined=False): - # put anything in the lookups table into existing env objects - for key,value in list(self.lookups.items()): - logger.debug("lookup key {} value {}".format(key, value)) - result = self.set_value(key,value, allow_undefined=allow_undefined) - if result is not None: - del self.lookups[key] - - def _set_compset(self, compset_name, files, driver="mct"): - """ - Loop through all the compset files and find the compset - specifation file that matches either the input 'compset_name'. - Note that the input compset name (i.e. compset_name) can be - either a longname or an alias. This will set various compset-related - info. - - Returns a tuple: (compset_alias, science_support, component_defining_compset) - (For a user-defined compset - i.e., a compset without an alias - these - return values will be None, [], None.) - """ - science_support = [] - compset_alias = None - components = files.get_components("COMPSETS_SPEC_FILE") - logger.debug(" Possible components for COMPSETS_SPEC_FILE are {}".format(components)) - - self.set_lookup_value("COMP_INTERFACE", driver) - if self._cime_model == 'cesm': - comp_root_dir_cpl = files.get_value("COMP_ROOT_DIR_CPL") - self.set_lookup_value("COMP_ROOT_DIR_CPL",comp_root_dir_cpl) - - # Loop through all of the files listed in COMPSETS_SPEC_FILE and find the file - # that has a match for either the alias or the longname in that order - for component in components: - - # Determine the compsets file for this component - compsets_filename = files.get_value("COMPSETS_SPEC_FILE", {"component":component}) - - # If the file exists, read it and see if there is a match for the compset alias or longname - if (os.path.isfile(compsets_filename)): - compsets = Compsets(compsets_filename) - match, compset_alias, science_support = compsets.get_compset_match(name=compset_name) - if match is not None: - self._compsetname = match - logger.info("Compset longname is {}".format(match)) - logger.info("Compset specification file is {}".format(compsets_filename)) - break - - if compset_alias is None: - logger.info("Did not find an alias or longname compset match for {} ".format(compset_name)) - self._compsetname = compset_name - - # Fill in compset name - self._compsetname, self._components = self.valid_compset(self._compsetname, compset_alias, files) - # if this is a valiid compset longname there will be at least 7 components. - components = self.get_compset_components() - expect(len(components) > 6, "No compset alias {} found and this does not appear to be a compset longname.".format(compset_name)) - - return compset_alias, science_support - - def get_primary_component(self): - if self._primary_component is None: - self._primary_component = self._find_primary_component() - return self._primary_component - - def _find_primary_component(self): - """ - try to glean the primary component based on compset name - """ - progcomps = {} - spec = {} - primary_component = None - for comp in self._component_classes: - if comp == "CPL": - continue - spec[comp] = self.get_value("COMP_{}".format(comp)) - notprogcomps = ("D{}".format(comp),"X{}".format(comp),"S{}".format(comp)) - if spec[comp].upper() in notprogcomps: - progcomps[comp] = False - else: - progcomps[comp] = True - expect("ATM" in progcomps and "LND" in progcomps and "OCN" in progcomps and \ - "ICE" in progcomps, " Not finding expected components in {}".format(self._component_classes)) - if progcomps["ATM"] and progcomps["LND"] and progcomps["OCN"] and \ - progcomps["ICE"]: - primary_component = "allactive" - elif progcomps["LND"] and progcomps["OCN"] and progcomps["ICE"]: - # this is a "J" compset - primary_component = "allactive" - elif progcomps["ATM"]: - if "DOCN%SOM" in self._compsetname and progcomps["LND"]: - # This is an "E" compset - primary_component = "allactive" - else: - # This is an "F" or "Q" compset - primary_component = spec["ATM"] - elif progcomps["LND"]: - # This is an "I" compset - primary_component = spec["LND"] - elif progcomps["OCN"]: - # This is a "C" or "G" compset - primary_component = spec["OCN"] - elif progcomps["ICE"]: - # This is a "D" compset - primary_component = spec["ICE"] - elif "GLC" in progcomps and progcomps["GLC"]: - # This is a "TG" compset - primary_component = spec["GLC"] - else: - # This is "A", "X" or "S" - primary_component = "drv" - - return primary_component - - def _valid_compset_impl(self, compset_name, compset_alias, comp_classes, comp_hash): - """Add stub models missing in , return full compset name. - is a list of all supported component classes. - is a dictionary where each key is a supported component - (e.g., datm) and the associated value is the index in of - that component's class (e.g., 1 for atm). - >>> Case(read_only=False)._valid_compset_impl('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) - ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) - >>> Case(read_only=False)._valid_compset_impl('2000_DATM%NYF_DICE%SSMI_DOCN%DOM_DROF%NYF', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) - ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) - >>> Case(read_only=False)._valid_compset_impl('2000_DICE%SSMI_DOCN%DOM_DATM%NYF_DROF%NYF', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) - ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) - >>> Case(read_only=False)._valid_compset_impl('2000_DICE%SSMI_DOCN%DOM_DATM%NYF_DROF%NYF_TEST', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) - ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP_TEST', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) - >>> Case(read_only=False)._valid_compset_impl('1850_CAM60_CLM50%BGC-CROP_CICE_POP2%ECO%ABIO-DIC_MOSART_CISM2%NOEVOLVE_WW3_BGC%BDRD', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1, 'cam':1,'dlnd':2,'clm':2,'slnd':2,'cice':3,'dice':3,'sice':3,'pop':4,'docn':4,'socn':4,'mosart':5,'drof':5,'srof':5,'cism':6,'sglc':6,'ww':7,'swav':7,'ww3':7,'sesp':8}) - ('1850_CAM60_CLM50%BGC-CROP_CICE_POP2%ECO%ABIO-DIC_MOSART_CISM2%NOEVOLVE_WW3_SESP_BGC%BDRD', ['1850', 'CAM60', 'CLM50%BGC-CROP', 'CICE', 'POP2%ECO%ABIO-DIC', 'MOSART', 'CISM2%NOEVOLVE', 'WW3', 'SESP']) - >>> Case(read_only=False)._valid_compset_impl('1850_CAM60_CLM50%BGC-CROP_CICE_POP2%ECO%ABIO-DIC_MOSART_CISM2%NOEVOLVE_WW3_BGC%BDRD_TEST', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'IAC', 'ESP'], {'datm':1,'satm':1, 'cam':1,'dlnd':2,'clm':2,'slnd':2,'cice':3,'dice':3,'sice':3,'pop':4,'docn':4,'socn':4,'mosart':5,'drof':5,'srof':5,'cism':6,'sglc':6,'ww':7,'swav':7,'ww3':7,'sesp':8}) - ('1850_CAM60_CLM50%BGC-CROP_CICE_POP2%ECO%ABIO-DIC_MOSART_CISM2%NOEVOLVE_WW3_SIAC_SESP_BGC%BDRD_TEST', ['1850', 'CAM60', 'CLM50%BGC-CROP', 'CICE', 'POP2%ECO%ABIO-DIC', 'MOSART', 'CISM2%NOEVOLVE', 'WW3', 'SIAC', 'SESP']) - >>> Case(read_only=False)._valid_compset_impl('1850_SATM_SLND_SICE_SOCN_SGLC_SWAV', 'S', ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'IAC', 'ESP'], {'datm':1,'satm':1, 'cam':1,'dlnd':2,'clm':2,'slnd':2,'cice':3,'dice':3,'sice':3,'pop':4,'docn':4,'socn':4,'mosart':5,'drof':5,'srof':5,'cism':6,'sglc':6,'ww':7,'swav':7,'ww3':7,'sesp':8}) - ('1850_SATM_SLND_SICE_SOCN_SROF_SGLC_SWAV_SIAC_SESP', ['1850', 'SATM', 'SLND', 'SICE', 'SOCN', 'SROF', 'SGLC', 'SWAV', 'SIAC', 'SESP']) - - >>> Case(read_only=False)._valid_compset_impl('1850_SATM_SLND_SICE_SOCN_SGLC_SWAV', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'IAC', 'ESP'], {'datm':1,'satm':1, 'cam':1,'dlnd':2,'clm':2,'slnd':2,'cice':3,'dice':3,'sice':3,'pop':4,'docn':4,'socn':4,'mosart':5,'drof':5,'srof':5,'cism':6,'sglc':6,'ww':7,'swav':7,'ww3':7,'sesp':8}) #doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - CIMEError: ERROR: Invalid compset name, 1850_SATM_SLND_SICE_SOCN_SGLC_SWAV, all stub components generated - """ - # Find the models declared in the compset - model_set = [None]*len(comp_classes) - components = compset_name.split('_') - model_set[0] = components[0] - noncomps = [] - allstubs = True - for model in components[1:]: - match = Case.__mod_match_re__.match(model.lower()) - expect(match is not None, "No model match for {}".format(model)) - mod_match = match.group(1) - # Check for noncomponent appends (BGC & TEST) - if mod_match in ('bgc', 'test'): - noncomps.append(model) - else: - expect(mod_match in comp_hash, - "Unknown model type, {}".format(model)) - comp_ind = comp_hash[mod_match] - model_set[comp_ind] = model - - # Fill in missing components with stubs - for comp_ind in range(1, len(model_set)): - if model_set[comp_ind] is None: - comp_class = comp_classes[comp_ind] - stub = 'S' + comp_class - logger.info("Automatically adding {} to compset".format(stub)) - model_set[comp_ind] = stub - elif model_set[comp_ind][0] != 'S': - allstubs = False - - expect((compset_alias is not None) or (not allstubs), - 'Invalid compset name, {}, all stub components generated'.format(compset_name)) - # Return the completed compset - compsetname = '_'.join(model_set) - for noncomp in noncomps: - compsetname = compsetname + '_' + noncomp - - return compsetname, model_set - - # RE to match component type name without optional piece (stuff after %). - # Drop any trailing digits (e.g., the 60 in CAM60) to ensure match - # Note, this will also drop trailing digits such as in ww3 but since it - # is handled consistenly, this should not affect functionality. - # Note: interstitial digits are included (e.g., in FV3GFS). - __mod_match_re__ = re.compile(r"([^%]*[^0-9%]+)") - def valid_compset(self, compset_name, compset_alias, files): - """Add stub models missing in , return full compset name. - is used to collect set of all supported components. - """ - # First, create hash of model names - # A note about indexing. Relevant component classes start at 1 - # because we ignore CPL for finding model components. - # Model components would normally start at zero but since we are - # dealing with a compset, 0 is reserved for the time field - drv_config_file = files.get_value("CONFIG_CPL_FILE") - drv_comp = Component(drv_config_file, "CPL") - comp_classes = drv_comp.get_valid_model_components() - comp_hash = {} # Hash model name to component class index - for comp_ind in range(1, len(comp_classes)): - comp = comp_classes[comp_ind] - # Find list of models for component class - # List can be in different locations, check CONFIG_XXX_FILE - node_name = 'CONFIG_{}_FILE'.format(comp) - models = files.get_components(node_name) - if (models is None) or (None in models): - # Backup, check COMP_ROOT_DIR_XXX - node_name = 'COMP_ROOT_DIR_' + comp - models = files.get_components(node_name) - - expect((models is not None) and (None not in models), - "Unable to find list of supported components") - - for model in models: - mod_match = Case.__mod_match_re__.match(model.lower()).group(1) - comp_hash[mod_match] = comp_ind - - return self._valid_compset_impl(compset_name, compset_alias, - comp_classes, comp_hash) - - - def _set_info_from_primary_component(self, files, pesfile=None): - """ - Sets file and directory paths that depend on the primary component of - this compset. - - Assumes that self._primary_component has already been set. - """ - component = self.get_primary_component() - - compset_spec_file = files.get_value("COMPSETS_SPEC_FILE", - {"component":component}, resolved=False) - - self.set_lookup_value("COMPSETS_SPEC_FILE" ,compset_spec_file) - if pesfile is None: - self._pesfile = files.get_value("PES_SPEC_FILE", {"component":component}) - pesfile_unresolved = files.get_value("PES_SPEC_FILE", {"component":component}, resolved=False) - logger.info("Pes specification file is {}".format(self._pesfile)) - else: - self._pesfile = pesfile - pesfile_unresolved = pesfile - expect(self._pesfile is not None,"No pesfile found for component {}".format(component)) - - self.set_lookup_value("PES_SPEC_FILE", pesfile_unresolved) - - tests_filename = files.get_value("TESTS_SPEC_FILE", {"component":component}, resolved=False) - tests_mods_dir = files.get_value("TESTS_MODS_DIR" , {"component":component}, resolved=False) - user_mods_dir = files.get_value("USER_MODS_DIR" , {"component":component}, resolved=False) - self.set_lookup_value("TESTS_SPEC_FILE", tests_filename) - self.set_lookup_value("TESTS_MODS_DIR" , tests_mods_dir) - self.set_lookup_value("USER_MODS_DIR" , user_mods_dir) - - - def get_compset_components(self): - #If are doing a create_clone then, self._compsetname is not set yet - components = [] - compset = self.get_value("COMPSET") - if compset is None: - compset = self._compsetname - expect(compset is not None, - "compset is not set") - # the first element is always the date operator - skip it - elements = compset.split('_')[1:] # pylint: disable=maybe-no-member - for element in elements: - # ignore the possible BGC or TEST modifier - if element.startswith("BGC%") or element.startswith("TEST"): - continue - else: - element_component = element.split('%')[0].lower() - if "ww" not in element_component and "fv3" not in element_component: - element_component = re.sub(r'[0-9]*',"",element_component) - components.append(element_component) - return components - - - def __iter__(self): - for entryid_file in self._env_entryid_files: - for key, val in entryid_file: - if isinstance(val, six.string_types) and '$' in val: - yield key, self.get_resolved_value(val) - else: - yield key, val - - def set_comp_classes(self, comp_classes): - self._component_classes = comp_classes - for env_file in self._env_entryid_files: - env_file.set_components(comp_classes) - - def _get_component_config_data(self, files, driver=None): - # attributes used for multi valued defaults - # attlist is a dictionary used to determine the value element that has the most matches - attlist = {"compset":self._compsetname, "grid":self._gridname, "cime_model":self._cime_model} - - # Determine list of component classes that this coupler/driver knows how - # to deal with. This list follows the same order as compset longnames follow. - - # Add the group and elements for the config_files.xml - for env_file in self._env_entryid_files: - env_file.add_elements_by_group(files, attlist) - - drv_config_file = files.get_value("CONFIG_CPL_FILE") - drv_comp = Component(drv_config_file, "CPL") - for env_file in self._env_entryid_files: - env_file.add_elements_by_group(drv_comp, attributes=attlist) - - drv_config_file_model_specific = files.get_value("CONFIG_CPL_FILE_MODEL_SPECIFIC") - expect(os.path.isfile(drv_config_file_model_specific), - "No {} specific file found for driver {}".format(get_model(),driver)) - drv_comp_model_specific = Component(drv_config_file_model_specific, 'CPL') - - self._component_description["forcing"] = drv_comp_model_specific.get_forcing_description(self._compsetname) - logger.info("Compset forcing is {}".format(self._component_description["forcing"])) - self._component_description["CPL"] = drv_comp_model_specific.get_description(self._compsetname) - if len(self._component_description["CPL"]) > 0: - logger.info("Com forcing is {}".format(self._component_description["CPL"])) - for env_file in self._env_entryid_files: - env_file.add_elements_by_group(drv_comp_model_specific, attributes=attlist) - - self.clean_up_lookups(allow_undefined=True) - - # loop over all elements of both component_classes and components - and get config_component_file for - # for each component - self.set_comp_classes(drv_comp.get_valid_model_components()) - - # will need a change here for new cpl components - root_dir_node_name = 'COMP_ROOT_DIR_CPL' - comp_root_dir = files.get_value(root_dir_node_name, {"component":driver}, resolved=False) - - if comp_root_dir is not None: - self.set_value(root_dir_node_name, comp_root_dir) - - for i in range(1,len(self._component_classes)): - comp_class = self._component_classes[i] - comp_name = self._components[i-1] - root_dir_node_name = 'COMP_ROOT_DIR_' + comp_class - node_name = 'CONFIG_' + comp_class + '_FILE' - comp_root_dir = files.get_value(root_dir_node_name, {"component":comp_name}, resolved=False) - if comp_root_dir is not None: - self.set_value(root_dir_node_name, comp_root_dir) - - compatt = {"component":comp_name} - # Add the group and elements for the config_files.xml - comp_config_file = files.get_value(node_name, compatt, resolved=False) - expect(comp_config_file is not None,"No component {} found for class {}".format(comp_name, comp_class)) - self.set_value(node_name, comp_config_file) - comp_config_file = files.get_value(node_name, compatt) - expect(comp_config_file is not None and os.path.isfile(comp_config_file), - "Config file {} for component {} not found.".format(comp_config_file, comp_name)) - compobj = Component(comp_config_file, comp_class) - # For files following version 3 schema this also checks the compsetname validity - - self._component_description[comp_class] = compobj.get_description(self._compsetname) - expect(self._component_description[comp_class] is not None, - "No description found in file {} for component {} in comp_class {}".format(comp_config_file, comp_name, comp_class)) - logger.info("{} component is {}".format(comp_class, self._component_description[comp_class])) - for env_file in self._env_entryid_files: - env_file.add_elements_by_group(compobj, attributes=attlist) - self.clean_up_lookups(allow_undefined=driver=='nuopc') - - def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): - #-------------------------------------------- - # pe layout - #-------------------------------------------- - mach_pes_obj = None - # self._pesfile may already be env_mach_pes.xml if so we can just return - gfile = GenericXML(infile=self._pesfile) - ftype = gfile.get_id() - expect(ftype == "env_mach_pes.xml" or ftype == "config_pes", " Do not recognize {} as a valid CIME pes file {}".format(self._pesfile, ftype)) - if ftype == "env_mach_pes.xml": - new_mach_pes_obj = EnvMachPes(infile=self._pesfile, components=self._component_classes) - self.update_env(new_mach_pes_obj, "mach_pes", blow_away=True) - return new_mach_pes_obj.get_value("TOTALPES") - - pesobj = Pes(self._pesfile) - - match1 = re.match('(.+)x([0-9]+)', "" if pecount is None else pecount) - match2 = re.match('([0-9]+)', "" if pecount is None else pecount) - - pes_ntasks = {} - pes_nthrds = {} - pes_rootpe = {} - pes_pstrid = {} - other = {} - comment = None - force_tasks = None - force_thrds = None - - if match1: - opti_tasks = match1.group(1) - if opti_tasks.isdigit(): - force_tasks = int(opti_tasks) - else: - pes_ntasks = pesobj.find_pes_layout(self._gridname, self._compsetname, machine_name, - pesize_opts=opti_tasks, mpilib=mpilib)[0] - force_thrds = int(match1.group(2)) - elif match2: - force_tasks = int(match2.group(1)) - pes_nthrds = pesobj.find_pes_layout(self._gridname, self._compsetname, machine_name, mpilib=mpilib)[1] - else: - pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other, comment = pesobj.find_pes_layout(self._gridname, self._compsetname, - machine_name, pesize_opts=pecount, mpilib=mpilib) - - if match1 or match2: - for component_class in self._component_classes: - if force_tasks is not None: - string_ = "NTASKS_" + component_class - pes_ntasks[string_] = force_tasks - - if force_thrds is not None: - string_ = "NTHRDS_" + component_class - pes_nthrds[string_] = force_thrds - - # Always default to zero rootpe if user forced procs and or threads - string_ = "ROOTPE_" + component_class - pes_rootpe[string_] = 0 - - mach_pes_obj = self.get_env("mach_pes") - mach_pes_obj.add_comment(comment) - - if other is not None: - for key, value in other.items(): - self.set_value(key, value) - - totaltasks = [] - for comp_class in self._component_classes: - ntasks_str = "NTASKS_{}".format(comp_class) - nthrds_str = "NTHRDS_{}".format(comp_class) - rootpe_str = "ROOTPE_{}".format(comp_class) - pstrid_str = "PSTRID_{}".format(comp_class) - - ntasks = pes_ntasks[ntasks_str] if ntasks_str in pes_ntasks else 1 - nthrds = pes_nthrds[nthrds_str] if nthrds_str in pes_nthrds else 1 - rootpe = pes_rootpe[rootpe_str] if rootpe_str in pes_rootpe else 0 - pstrid = pes_pstrid[pstrid_str] if pstrid_str in pes_pstrid else 1 - - totaltasks.append( (ntasks + rootpe) * nthrds ) - - mach_pes_obj.set_value(ntasks_str, ntasks) - mach_pes_obj.set_value(nthrds_str, nthrds) - mach_pes_obj.set_value(rootpe_str, rootpe) - mach_pes_obj.set_value(pstrid_str, pstrid) - - if multi_driver: - mach_pes_obj.set_value("MULTI_DRIVER", True) - - # Make sure that every component has been accounted for - # set, nthrds and ntasks to 1 otherwise. Also set the ninst values here. - for compclass in self._component_classes: - key = "NINST_{}".format(compclass) - if compclass == "CPL": - continue - mach_pes_obj.set_value(key, ninst) - - key = "NTASKS_{}".format(compclass) - if key not in pes_ntasks: - mach_pes_obj.set_value(key,1) - key = "NTHRDS_{}".format(compclass) - if compclass not in pes_nthrds: - mach_pes_obj.set_value(compclass,1) - - def configure(self, compset_name, grid_name, machine_name=None, - project=None, pecount=None, compiler=None, mpilib=None, - pesfile=None, gridfile=None, - multi_driver=False, ninst=1, test=False, - walltime=None, queue=None, output_root=None, - run_unsupported=False, answer=None, - input_dir=None, driver=None, workflow_case="default", - non_local=False): - - expect(check_name(compset_name, additional_chars='.'), "Invalid compset name {}".format(compset_name)) - - #-------------------------------------------- - # compset, pesfile, and compset components - #-------------------------------------------- - files = Files(comp_interface=driver) - - #-------------------------------------------- - # find and/or fill out compset name - #-------------------------------------------- - compset_alias, science_support = self._set_compset(compset_name, files, driver) - - self._components = self.get_compset_components() - - #-------------------------------------------- - # grid - #-------------------------------------------- - grids = Grids(gridfile) - - gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname, driver=driver) - - self._gridname = gridinfo["GRID"] - for key,value in gridinfo.items(): - logger.debug("Set grid {} {}".format(key,value)) - self.set_lookup_value(key,value) - - #-------------------------------------------- - # component config data - #-------------------------------------------- - self._get_component_config_data(files, driver=driver) - - # This needs to be called after self.set_comp_classes, which is called - # from self._get_component_config_data - self._primary_component = self.get_primary_component() - - self._set_info_from_primary_component(files, pesfile=pesfile) - - self.clean_up_lookups(allow_undefined=True) - - self.get_compset_var_settings(files) - - self.clean_up_lookups(allow_undefined=True) - - #-------------------------------------------- - # machine - #-------------------------------------------- - # set machine values in env_xxx files - machobj = Machines(machine=machine_name) - probed_machine = machobj.probe_machine_name() - machine_name = machobj.get_machine_name() - self.set_value("MACH", machine_name) - if probed_machine != machine_name and probed_machine is not None: - logger.warning("WARNING: User-selected machine '{}' does not match probed machine '{}'".format(machine_name, probed_machine)) - else: - logger.info("Machine is {}".format(machine_name)) - - nodenames = machobj.get_node_names() - nodenames = [x for x in nodenames if - '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ - 'COMPILER' not in x and 'MPILIB' not in x] - - for nodename in nodenames: - value = machobj.get_value(nodename, resolved=False) - type_str = self.get_type_info(nodename) - if type_str is not None: - logger.debug("machine nodname {} value {}".format(nodename, value)) - self.set_value(nodename, convert_to_type(value, type_str, nodename)) - - if compiler is None: - compiler = machobj.get_default_compiler() - else: - expect(machobj.is_valid_compiler(compiler), - "compiler {} is not supported on machine {}".format(compiler, machine_name)) - - self.set_value("COMPILER",compiler) - - if mpilib is None: - mpilib = machobj.get_default_MPIlib({"compiler":compiler}) - else: - expect(machobj.is_valid_MPIlib(mpilib, {"compiler":compiler}), - "MPIlib {} is not supported on machine {}".format(mpilib, machine_name)) - self.set_value("MPILIB",mpilib) - - machdir = machobj.get_machines_dir() - self.set_value("MACHDIR", machdir) - - # Create env_mach_specific settings from machine info. - env_mach_specific_obj = self.get_env("mach_specific") - env_mach_specific_obj.populate(machobj) - - self._setup_mach_pes(pecount, multi_driver, ninst, machine_name, mpilib) - - if multi_driver and ninst>1: - logger.info(" Driver/Coupler has %s instances" % ninst) - - #-------------------------------------------- - # archiving system - #-------------------------------------------- - env_archive = self.get_env("archive") - infile_node = files.get_child("entry", {"id":"ARCHIVE_SPEC_FILE"}) - infile = files.get_default_value(infile_node) - infile = self.get_resolved_value(infile) - logger.debug("archive defaults located in {}".format(infile)) - archive = Archive(infile=infile, files=files) - archive.setup(env_archive, self._components, files=files) - - self.set_value("COMPSET",self._compsetname) - - self._set_pio_xml() - logger.info(" Compset is: {} ".format(self._compsetname)) - logger.info(" Grid is: {} ".format(self._gridname )) - logger.info(" Components in compset are: {} ".format(self._components)) - - if not test and not run_unsupported and self._cime_model == "cesm": - if grid_name in science_support: - logger.info("\nThis is a CESM scientifically supported compset at this resolution.\n") - else: - self._check_testlists(compset_alias, grid_name, files) - - self.set_value("REALUSER", os.environ["USER"]) - - # Set project id - if project is None: - project = get_project(machobj) - if project is not None: - self.set_value("PROJECT", project) - elif machobj.get_value("PROJECT_REQUIRED"): - expect(project is not None, "PROJECT_REQUIRED is true but no project found") - # Get charge_account id if it exists - charge_account = get_charge_account(machobj, project) - if charge_account is not None: - self.set_value("CHARGE_ACCOUNT", charge_account) - - # Resolve the CIME_OUTPUT_ROOT variable, other than this - # we don't want to resolve variables until we need them - if output_root is None: - output_root = self.get_value("CIME_OUTPUT_ROOT") - output_root = os.path.abspath(output_root) - self.set_value("CIME_OUTPUT_ROOT", output_root) - if non_local: - self.set_value("EXEROOT", os.path.join(output_root, self.get_value("CASE"), "bld")) - self.set_value("RUNDIR", os.path.join(output_root, self.get_value("CASE"), "run")) - self.set_value("NONLOCAL", True) - - # Overwriting an existing exeroot or rundir can cause problems - exeroot = self.get_value("EXEROOT") - rundir = self.get_value("RUNDIR") - for wdir in (exeroot, rundir): - logging.debug("wdir is {}".format(wdir)) - if os.path.exists(wdir): - expect(not test, "Directory {} already exists, aborting test".format(wdir)) - if answer is None: - response = input("\nDirectory {} already exists, (r)eplace, (a)bort, or (u)se existing?".format(wdir)) - else: - response = answer - - if response.startswith("r"): - shutil.rmtree(wdir) - else: - expect(response.startswith("u"), "Aborting by user request") - - # miscellaneous settings - if self.get_value("RUN_TYPE") == 'hybrid': - self.set_value("GET_REFCASE", True) - - # Turn on short term archiving as cesm default setting - model = get_model() - self.set_model_version(model) - if model == "cesm" and not test: - self.set_value("DOUT_S",True) - self.set_value("TIMER_LEVEL", 4) - - if test: - self.set_value("TEST",True) - - self.initialize_derived_attributes() - - #-------------------------------------------- - # batch system (must come after initialize_derived_attributes) - #-------------------------------------------- - env_batch = self.get_env("batch") - - batch_system_type = machobj.get_value("BATCH_SYSTEM") - logger.info("Batch_system_type is {}".format(batch_system_type)) - batch = Batch(batch_system=batch_system_type, machine=machine_name, files=files) - workflow = Workflow(files=files) - bjobs = workflow.get_workflow_jobs(machine=machine_name, workflow_case=workflow_case) - env_workflow = self.get_env("workflow") - - env_batch.set_batch_system(batch, batch_system_type=batch_system_type) - env_workflow.create_job_groups(bjobs, test) - - if walltime: - self.set_value("USER_REQUESTED_WALLTIME", walltime, subgroup=self.get_primary_job()) - if queue: - self.set_value("USER_REQUESTED_QUEUE", queue, subgroup=self.get_primary_job()) - - env_batch.set_job_defaults(bjobs, self) - - # Make sure that parallel IO is not specified if total_tasks==1 - if self.total_tasks == 1: - for compclass in self._component_classes: - key = "PIO_TYPENAME_{}".format(compclass) - pio_typename = self.get_value(key) - if pio_typename in ("pnetcdf", "netcdf4p"): - self.set_value(key, "netcdf") - - if input_dir is not None: - self.set_value("DIN_LOC_ROOT", os.path.abspath(input_dir)) - - def get_compset_var_settings(self, files): - infile=files.get_value("COMPSETS_SPEC_FILE", - attribute={"component":self._primary_component}) - compset_obj = Compsets(infile=infile, files=files) - matches = compset_obj.get_compset_var_settings(self._compsetname, self._gridname) - for name, value in matches: - if len(value) > 0: - logger.info("Compset specific settings: name is {} and value is {}".format(name, value)) - self.set_lookup_value(name, value) - - def set_initial_test_values(self): - testobj = self.get_env("test") - testobj.set_initial_values(self) - - def get_batch_jobs(self): - batchobj = self.get_env("batch") - return batchobj.get_jobs() - - def _set_pio_xml(self): - pioobj = PIO() - grid = self.get_value("GRID") - compiler = self.get_value("COMPILER") - mach = self.get_value("MACH") - compset = self.get_value("COMPSET") - mpilib = self.get_value("MPILIB") - defaults = pioobj.get_defaults(grid=grid,compset=compset,mach=mach,compiler=compiler, mpilib=mpilib) - - for vid, value in defaults.items(): - self.set_value(vid,value) - - def _create_caseroot_tools(self): - machines_dir = os.path.abspath(self.get_value("MACHDIR")) - machine = self.get_value("MACH") - toolsdir = os.path.join(self.get_value("CIMEROOT"),"scripts","Tools") - casetools = os.path.join(self._caseroot, "Tools") - # setup executable files in caseroot/ - exefiles = (os.path.join(toolsdir, "case.setup"), - os.path.join(toolsdir, "case.build"), - os.path.join(toolsdir, "case.submit"), - os.path.join(toolsdir, "case.qstatus"), - os.path.join(toolsdir, "case.cmpgen_namelists"), - os.path.join(toolsdir, "preview_namelists"), - os.path.join(toolsdir, "preview_run"), - os.path.join(toolsdir, "check_input_data"), - os.path.join(toolsdir, "check_case"), - os.path.join(toolsdir, "xmlchange"), - os.path.join(toolsdir, "xmlquery"), - os.path.join(toolsdir, "pelayout")) - try: - for exefile in exefiles: - destfile = os.path.join(self._caseroot,os.path.basename(exefile)) - os.symlink(exefile, destfile) - except Exception as e: - logger.warning("FAILED to set up exefiles: {}".format(str(e))) - - toolfiles = [os.path.join(toolsdir, "check_lockedfiles"), - os.path.join(toolsdir, "get_standard_makefile_args"), - os.path.join(toolsdir, "getTiming"), - os.path.join(toolsdir, "save_provenance"), - os.path.join(toolsdir,"Makefile"), - os.path.join(toolsdir,"mkSrcfiles"), - os.path.join(toolsdir,"mkDepends")] - - # used on Titan - if os.path.isfile( os.path.join(toolsdir,"mdiag_reduce.csh") ): - toolfiles.append( os.path.join(toolsdir,"mdiag_reduce.csh") ) - toolfiles.append( os.path.join(toolsdir,"mdiag_reduce.pl") ) - - for toolfile in toolfiles: - destfile = os.path.join(casetools, os.path.basename(toolfile)) - expect(os.path.isfile(toolfile)," File {} does not exist".format(toolfile)) - try: - os.symlink(toolfile, destfile) - except Exception as e: - logger.warning("FAILED to set up toolfiles: {} {} {}".format(str(e), toolfile, destfile)) - - if get_model() == "e3sm": - if os.path.exists(os.path.join(machines_dir, "syslog.{}".format(machine))): - safe_copy(os.path.join(machines_dir, "syslog.{}".format(machine)), os.path.join(casetools, "mach_syslog")) - else: - safe_copy(os.path.join(machines_dir, "syslog.noop"), os.path.join(casetools, "mach_syslog")) - - # add archive_metadata to the CASEROOT but only for CESM - if get_model() == "cesm": - try: - exefile = os.path.join(toolsdir, "archive_metadata") - destfile = os.path.join(self._caseroot,os.path.basename(exefile)) - os.symlink(exefile, destfile) - except Exception as e: - logger.warning("FAILED to set up exefiles: {}".format(str(e))) - - def _create_caseroot_sourcemods(self): - components = self.get_compset_components() - components.extend(['share', 'drv']) - readme_message = """Put source mods for the {component} library in this directory. - -WARNING: SourceMods are not kept under version control, and can easily -become out of date if changes are made to the source code on which they -are based. We only recommend using SourceMods for small, short-term -changes that just apply to one or two cases. For larger or longer-term -changes, including gradual, incremental changes towards a final -solution, we highly recommend making changes in the main source tree, -leveraging version control (git or svn). -""" - - for component in components: - directory = os.path.join(self._caseroot,"SourceMods","src.{}".format(component)) - if not os.path.exists(directory): - os.makedirs(directory) - # Besides giving some information on SourceMods, this - # README file serves one other important purpose: By - # putting a file inside each SourceMods subdirectory, we - # prevent aggressive scrubbers from scrubbing these - # directories due to being empty (which can cause builds - # to fail). - readme_file = os.path.join(directory, "README") - with open(readme_file, "w") as fd: - fd.write(readme_message.format(component=component)) - - if get_model() == "cesm": - # Note: this is CESM specific, given that we are referencing cism explitly - if "cism" in components: - directory = os.path.join(self._caseroot, "SourceMods", "src.cism", "source_cism") - if not os.path.exists(directory): - os.makedirs(directory) - readme_file = os.path.join(directory, "README") - str_to_write = """Put source mods for the source_cism library in this subdirectory. -This includes any files from $COMP_ROOT_DIR_GLC/source_cism. Anything -else (e.g., mods to source_glc or drivers) goes in the src.cism -directory, NOT in this subdirectory.""" - - with open(readme_file, "w") as fd: - fd.write(str_to_write) - - def create_caseroot(self, clone=False): - if not os.path.exists(self._caseroot): - # Make the case directory - logger.info(" Creating Case directory {}".format(self._caseroot)) - os.makedirs(self._caseroot) - os.chdir(self._caseroot) - - # Create relevant directories in $self._caseroot - if clone: - newdirs = (LOCKED_DIR, "Tools") - else: - newdirs = ("SourceMods", LOCKED_DIR, "Buildconf", "Tools") - for newdir in newdirs: - os.makedirs(newdir) - - # Open a new README.case file in $self._caseroot - append_status(" ".join(sys.argv), "README.case", caseroot=self._caseroot) - compset_info = "Compset longname is {}".format(self.get_value("COMPSET")) - append_status(compset_info, - "README.case", caseroot=self._caseroot) - append_status("Compset specification file is {}".format(self.get_value("COMPSETS_SPEC_FILE")), - "README.case", caseroot=self._caseroot) - append_status("Pes specification file is {}".format(self.get_value("PES_SPEC_FILE")), - "README.case", caseroot=self._caseroot) - if "forcing" in self._component_description: - append_status("Forcing is {}".format(self._component_description["forcing"]) - ,"README.case", caseroot=self._caseroot) - for component_class in self._component_classes: - if component_class in self._component_description and \ - len(self._component_description[component_class])>0: - append_status("Component {} is {}".format(component_class, self._component_description[component_class]),"README.case", caseroot=self._caseroot) - if component_class == "CPL": - append_status("Using %s coupler instances" % - (self.get_value("NINST_CPL")), - "README.case", caseroot=self._caseroot) - continue - comp_grid = "{}_GRID".format(component_class) - - append_status("{} is {}".format(comp_grid,self.get_value(comp_grid)), - "README.case", caseroot=self._caseroot) - comp = str(self.get_value("COMP_{}".format(component_class))) - user_mods = self._get_comp_user_mods(comp) - if user_mods is not None: - note = "This component includes user_mods {}".format(user_mods) - append_status(note, "README.case", caseroot=self._caseroot) - logger.info(note) - if not clone: - self._create_caseroot_sourcemods() - self._create_caseroot_tools() - - def apply_user_mods(self, user_mods_dir=None): - """ - User mods can be specified on the create_newcase command line (usually when called from create test) - or they can be in the compset definition, or both. - """ - all_user_mods = [] - for comp in self._component_classes: - component = str(self.get_value("COMP_{}".format(comp))) - if component == self._primary_component: - continue - comp_user_mods = self._get_comp_user_mods(component) - if comp_user_mods is not None: - all_user_mods.append(comp_user_mods) - # get the primary last so that it takes precidence over other components - comp_user_mods = self._get_comp_user_mods(self._primary_component) - if comp_user_mods is not None: - all_user_mods.append(comp_user_mods) - if user_mods_dir is not None: - all_user_mods.append(user_mods_dir) - - # This looping order will lead to the specified user_mods_dir taking - # precedence over self._user_mods, if there are any conflicts. - for user_mods in all_user_mods: - if os.path.isabs(user_mods): - user_mods_path = user_mods - else: - user_mods_path = self.get_value('USER_MODS_DIR') - user_mods_path = os.path.join(user_mods_path, user_mods) - apply_user_mods(self._caseroot, user_mods_path) - - # User mods may have modified underlying XML files - if all_user_mods: - self.read_xml() - - def _get_comp_user_mods(self, component): - """ - For a component 'foo', gets the value of FOO_USER_MODS. - - Returns None if no value was found, or if the value is an empty string. - """ - comp_user_mods = self.get_value("{}_USER_MODS".format(component.upper())) - #pylint: disable=no-member - if comp_user_mods is None or comp_user_mods == "" or comp_user_mods.isspace(): - return None - else: - return comp_user_mods - - def submit_jobs(self, no_batch=False, job=None, skip_pnl=None, prereq=None, allow_fail=False, - resubmit_immediate=False, mail_user=None, mail_type=None, batch_args=None, - dry_run=False): - env_batch = self.get_env('batch') - result = env_batch.submit_jobs(self, no_batch=no_batch, skip_pnl=skip_pnl, - job=job, user_prereq=prereq, allow_fail=allow_fail, - resubmit_immediate=resubmit_immediate, - mail_user=mail_user, mail_type=mail_type, - batch_args=batch_args, dry_run=dry_run) - return result - - def get_job_info(self): - """ - Get information on batch jobs associated with this case - """ - xml_job_ids = self.get_value("JOB_IDS") - if not xml_job_ids: - return {} - else: - result = {} - job_infos = xml_job_ids.split(", ") # pylint: disable=no-member - for job_info in job_infos: - jobname, jobid = job_info.split(":") - result[jobname] = jobid - - return result - - def report_job_status(self): - jobmap = self.get_job_info() - if not jobmap: - logger.info("No job ids associated with this case. Either case.submit was not run or was run with no-batch") - else: - for jobname, jobid in jobmap.items(): - status = self.get_env("batch").get_status(jobid) - if status: - logger.info("{}: {}".format(jobname, status)) - else: - logger.info("{}: Unable to get status. Job may be complete already.".format(jobname)) - - def cancel_batch_jobs(self, jobids): - env_batch = self.get_env('batch') - for jobid in jobids: - success = env_batch.cancel_job(jobid) - if not success: - logger.warning("Failed to kill {}".format(jobid)) - - def get_mpirun_cmd(self, job=None, allow_unresolved_envvars=True, overrides=None): - if job is None: - job = self.get_primary_job() - - env_mach_specific = self.get_env('mach_specific') - run_exe = env_mach_specific.get_value("run_exe") - run_misc_suffix = env_mach_specific.get_value("run_misc_suffix") - run_misc_suffix = "" if run_misc_suffix is None else run_misc_suffix - - mpirun_cmd_override = self.get_value("MPI_RUN_COMMAND") - if mpirun_cmd_override not in ["", None, "UNSET"]: - return self.get_resolved_value(mpirun_cmd_override + " " + run_exe + " " + run_misc_suffix) - - # Things that will have to be matched against mpirun element attributes - mpi_attribs = { - "compiler" : self.get_value("COMPILER"), - "mpilib" : self.get_value("MPILIB"), - "threaded" : self.get_build_threaded(), - "queue" : self.get_value("JOB_QUEUE", subgroup=job), - "unit_testing" : False - } - - executable, mpi_arg_list, custom_run_exe, custom_run_misc_suffix = env_mach_specific.get_mpirun(self, mpi_attribs, job) - if custom_run_exe: - logger.info('Using a custom run_exe {}'.format(custom_run_exe)) - run_exe = custom_run_exe - if custom_run_misc_suffix: - logger.info('Using a custom run_misc_suffix {}'.format(custom_run_misc_suffix)) - run_misc_suffix = custom_run_misc_suffix - - # special case for aprun - if executable is not None and "aprun" in executable and not "theta" in self.get_value("MACH"): - aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe, overrides=overrides)[0:2] - if job in ("case.run","case.test"): - expect( (num_nodes + self.spare_nodes) == self.num_nodes, "Not using optimized num nodes") - return self.get_resolved_value(executable + aprun_args + " " + run_misc_suffix, allow_unresolved_envvars=allow_unresolved_envvars) - - else: - mpi_arg_string = " ".join(mpi_arg_list) - - if self.get_value("BATCH_SYSTEM") == "cobalt": - mpi_arg_string += " : " - - return self.get_resolved_value("{} {} {} {}".format(executable if executable is not None else "", mpi_arg_string, run_exe, run_misc_suffix), allow_unresolved_envvars=allow_unresolved_envvars) - - def set_model_version(self, model): - version = "unknown" - srcroot = self.get_value("SRCROOT") - version = get_current_commit(True, srcroot, tag=(model=="cesm")) - - self.set_value("MODEL_VERSION", version) - - if version != "unknown": - logger.info("{} model version found: {}".format(model, version)) - else: - logger.warning("WARNING: No {} Model version found.".format(model)) - - def load_env(self, reset=False, job=None, verbose=False): - if not self._is_env_loaded or reset: - if job is None: - job = self.get_primary_job() - os.environ["OMP_NUM_THREADS"] = str(self.thread_count) - env_module = self.get_env("mach_specific") - env_module.load_env(self, job=job, verbose=verbose) - self._is_env_loaded = True - - def get_build_threaded(self): - """ - Returns True if current settings require a threaded build/run. - """ - force_threaded = self.get_value("FORCE_BUILD_SMP") - smp_present = force_threaded or self.thread_count > 1 - return smp_present - - def _check_testlists(self, compset_alias, grid_name, files): - """ - CESM only: check the testlist file for tests of this compset grid combination - - compset_alias should be None for a user-defined compset (i.e., a compset - without an alias) - """ - if "TESTS_SPEC_FILE" in self.lookups: - tests_spec_file = self.get_resolved_value(self.lookups["TESTS_SPEC_FILE"]) - else: - tests_spec_file = self.get_value("TESTS_SPEC_FILE") - - testcnt = 0 - if os.path.isfile(tests_spec_file) and compset_alias is not None: - # It's important that we not try to find matching tests if - # compset_alias is None, since compset=None tells get_tests to find - # tests of all compsets! - # Only collect supported tests as this _check_testlists is only - # called if run_unsupported is False. - tests = Testlist(tests_spec_file, files) - testlist = tests.get_tests(compset=compset_alias, grid=grid_name, supported_only=True) - test_categories = ["prealpha", "prebeta", "test_release", "aux_"] - for test in testlist: - if test["category"] in test_categories \ - or get_cime_default_driver() in test["category"]: - testcnt += 1 - if testcnt > 0: - logger.warning("\n*********************************************************************************************************************************") - logger.warning("This compset and grid combination is not scientifically supported, however it is used in {:d} tests.".format(testcnt)) - logger.warning("*********************************************************************************************************************************\n") - else: - expect(False, "\nThis compset and grid combination is untested in CESM. " - "Override this warning with the --run-unsupported option to create_newcase.", - error_prefix="STOP: ") - - def set_file(self, xmlfile): - """ - force the case object to consider only xmlfile - """ - expect(os.path.isfile(xmlfile), "Could not find file {}".format(xmlfile)) - - if not self._read_only_mode: - self.flush(flushall=True) - - gfile = GenericXML(infile=xmlfile) - ftype = gfile.get_id() - - logger.warning("setting case file to {}".format(xmlfile)) - components = self.get_value("COMP_CLASSES") - new_env_file = None - for env_file in self._files: - if os.path.basename(env_file.filename) == ftype: - if ftype == "env_run.xml": - new_env_file = EnvRun(infile=xmlfile, components=components) - elif ftype == "env_build.xml": - new_env_file = EnvBuild(infile=xmlfile, components=components) - elif ftype == "env_case.xml": - new_env_file = EnvCase(infile=xmlfile, components=components) - elif ftype == "env_mach_pes.xml": - new_env_file = EnvMachPes(infile=xmlfile, components=components) - elif ftype == "env_batch.xml": - new_env_file = EnvBatch(infile=xmlfile) - elif ftype == "env_workflow.xml": - new_env_file = EnvWorkflow(infile=xmlfile) - elif ftype == "env_test.xml": - new_env_file = EnvTest(infile=xmlfile) - elif ftype == "env_archive.xml": - new_env_file = EnvArchive(infile=xmlfile) - elif ftype == "env_mach_specific.xml": - new_env_file = EnvMachSpecific(infile=xmlfile) - else: - expect(False, "No match found for file type {}".format(ftype)) - - if new_env_file is not None: - self._env_entryid_files = [] - self._env_generic_files = [] - if ftype in ["env_archive.xml", "env_mach_specific.xml"]: - self._env_generic_files = [new_env_file] - else: - self._env_entryid_files = [new_env_file] - - break - - expect(new_env_file is not None, "No match found for file type {}".format(ftype)) - self._files = [new_env_file] - - def update_env(self, new_object, env_file, blow_away=False): - """ - Replace a case env object file - """ - old_object = self.get_env(env_file) - if not blow_away: - expect(not old_object.needsrewrite, "Potential loss of unflushed changes in {}".format(env_file)) - - new_object.filename = old_object.filename - if old_object in self._env_entryid_files: - self._env_entryid_files.remove(old_object) - self._env_entryid_files.append(new_object) - elif old_object in self._env_generic_files: - self._env_generic_files.remove(old_object) - self._env_generic_files.append(new_object) - self._files.remove(old_object) - self._files.append(new_object) - - def get_latest_cpl_log(self, coupler_log_path=None, cplname="cpl"): - """ - find and return the latest cpl log file in the - coupler_log_path directory - """ - if coupler_log_path is None: - coupler_log_path = self.get_value("RUNDIR") - cpllog = None - cpllogs = glob.glob(os.path.join(coupler_log_path, '{}.log.*'.format(cplname))) - if cpllogs: - cpllog = max(cpllogs, key=os.path.getctime) - return cpllog - else: - return None - - def create(self, casename, srcroot, compset_name, grid_name, - user_mods_dir=None, machine_name=None, - project=None, pecount=None, compiler=None, mpilib=None, - pesfile=None, gridfile=None, - multi_driver=False, ninst=1, test=False, - walltime=None, queue=None, output_root=None, - run_unsupported=False, answer=None, - input_dir=None, driver=None, workflow_case="default", non_local=False): - try: - # Set values for env_case.xml - self.set_lookup_value("CASE", os.path.basename(casename)) - self.set_lookup_value("CASEROOT", self._caseroot) - self.set_lookup_value("SRCROOT", srcroot) - - # Configure the Case - self.configure(compset_name, grid_name, machine_name=machine_name, - project=project, - pecount=pecount, compiler=compiler, mpilib=mpilib, - pesfile=pesfile, gridfile=gridfile, - multi_driver=multi_driver, ninst=ninst, test=test, - walltime=walltime, queue=queue, - output_root=output_root, - run_unsupported=run_unsupported, answer=answer, - input_dir=input_dir, driver=driver, - workflow_case=workflow_case, non_local=non_local) - - self.create_caseroot() - - # Write out the case files - self.flush(flushall=True) - self.apply_user_mods(user_mods_dir) - - # Lock env_case.xml - lock_file("env_case.xml", self._caseroot) - except Exception: - if os.path.exists(self._caseroot): - if not logger.isEnabledFor(logging.DEBUG) and not test: - logger.warning("Failed to setup case, removing {}\nUse --debug to force me to keep caseroot".format(self._caseroot)) - shutil.rmtree(self._caseroot) - else: - logger.warning("Leaving broken case dir {}".format(self._caseroot)) - - raise - - def is_save_timing_dir_project(self,project): - """ - Check whether the project is permitted to archive performance data in the location - specified for the current machine - """ - save_timing_dir_projects = self.get_value("SAVE_TIMING_DIR_PROJECTS") - if not save_timing_dir_projects: - return False - else: - save_timing_dir_projects = save_timing_dir_projects.split(",") # pylint: disable=no-member - for save_timing_dir_project in save_timing_dir_projects: - regex = re.compile(save_timing_dir_project) - if regex.match(project): - return True - - return False - - def get_primary_job(self): - return "case.test" if self.get_value("TEST") else "case.run" diff --git a/scripts/lib/CIME/case/case_clone.py b/scripts/lib/CIME/case/case_clone.py deleted file mode 100644 index 2a873340233..00000000000 --- a/scripts/lib/CIME/case/case_clone.py +++ /dev/null @@ -1,191 +0,0 @@ -""" -create_clone is a member of the Case class from file case.py -""" -import os, glob, shutil -from CIME.XML.standard_module_setup import * -from CIME.utils import expect, check_name, safe_copy -from CIME.simple_compare import compare_files -from CIME.locked_files import lock_file -from CIME.user_mod_support import apply_user_mods - -logger = logging.getLogger(__name__) - -def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, - cime_output_root=None, exeroot=None, rundir=None, - user_mods_dir=None): - """ - Create a case clone - - If exeroot or rundir are provided (not None), sets these directories - to the given paths; if not provided, uses default values for these - directories. It is an error to provide exeroot if keepexe is True. - """ - if cime_output_root is None: - cime_output_root = self.get_value("CIME_OUTPUT_ROOT") - - newcaseroot = os.path.abspath(newcaseroot) - expect(not os.path.isdir(newcaseroot), - "New caseroot directory {} already exists".format(newcaseroot)) - newcasename = os.path.basename(newcaseroot) - expect(check_name(newcasename), - "New case name invalid {} ".format(newcasename)) - newcase_cimeroot = os.path.abspath(get_cime_root()) - - # create clone from case to case - clone_cimeroot = self.get_value("CIMEROOT") - if newcase_cimeroot != clone_cimeroot: - logger.warning(" case CIMEROOT is {} ".format(newcase_cimeroot)) - logger.warning(" clone CIMEROOT is {} ".format(clone_cimeroot)) - logger.warning(" It is NOT recommended to clone cases from different versions of CIME.") - - # *** create case object as deepcopy of clone object *** - srcroot = os.path.join(newcase_cimeroot,"..") - newcase = self.copy(newcasename, newcaseroot, newsrcroot=srcroot) - with newcase: - newcase.set_value("CIMEROOT", newcase_cimeroot) - - # if we are cloning to a different user modify the output directory - olduser = self.get_value("USER") - newuser = os.environ.get("USER") - if olduser != newuser: - cime_output_root = cime_output_root.replace(olduser, newuser) - newcase.set_value("USER", newuser) - newcase.set_value("CIME_OUTPUT_ROOT", cime_output_root) - - # try to make the new output directory and raise an exception - # on any error other than directory already exists. - if os.path.isdir(cime_output_root): - expect(os.access(cime_output_root, os.W_OK), "Directory {} is not writable " - "by this user. Use the --cime-output-root flag to provide a writable " - "scratch directory".format(cime_output_root)) - else: - if not os.path.isdir(cime_output_root): - os.makedirs(cime_output_root) - - # determine if will use clone executable or not - if keepexe: - orig_exeroot = self.get_value("EXEROOT") - newcase.set_value("EXEROOT", orig_exeroot) - newcase.set_value("BUILD_COMPLETE","TRUE") - orig_bld_complete = self.get_value("BUILD_COMPLETE") - if not orig_bld_complete: - logger.warning("\nWARNING: Creating a clone with --keepexe before building the original case may cause PIO_TYPENAME to be invalid in the clone") - logger.warning("Avoid this message by building case one before you clone.\n") - else: - newcase.set_value("BUILD_COMPLETE","FALSE") - - # set machdir - if mach_dir is not None: - newcase.set_value("MACHDIR", mach_dir) - - # set exeroot and rundir if requested - if exeroot is not None: - expect(not keepexe, "create_case_clone: if keepexe is True, " - "then exeroot cannot be set") - newcase.set_value("EXEROOT", exeroot) - if rundir is not None: - newcase.set_value("RUNDIR", rundir) - - # Set project id - # Note: we do not just copy this from the clone because it seems likely that - # users will want to change this sometimes, especially when cloning another - # user's case. However, note that, if a project is not given, the fallback will - # be to copy it from the clone, just like other xml variables are copied. - if project is None: - project = self.get_value("PROJECT", subgroup=self.get_primary_job()) - if project is not None: - newcase.set_value("PROJECT", project) - - # create caseroot - newcase.create_caseroot(clone=True) - - # Many files in the case will be links back to the source tree - # but users may have broken links to modify files locally. In this case - # copy the locally modified file. We only want to do this for files that - # already exist in the clone. - #pylint: disable=protected-access - self._copy_user_modified_to_clone(self.get_value("CASEROOT"), newcase.get_value("CASEROOT")) - self._copy_user_modified_to_clone(self.get_value("CASETOOLS"), newcase.get_value("CASETOOLS")) - - newcase.flush(flushall=True) - - # copy user_ files - cloneroot = self.get_case_root() - files = glob.glob(cloneroot + '/user_*') - - for item in files: - safe_copy(item, newcaseroot) - - # copy SourceMod and Buildconf files - # if symlinks exist, copy rather than follow links - for casesub in ("SourceMods", "Buildconf"): - shutil.copytree(os.path.join(cloneroot, casesub), - os.path.join(newcaseroot, casesub), - symlinks=True) - - # copy the postprocessing directory if it exists - if os.path.isdir(os.path.join(cloneroot, "postprocess")): - shutil.copytree(os.path.join(cloneroot, "postprocess"), - os.path.join(newcaseroot, "postprocess"), - symlinks=True) - - - - # lock env_case.xml in new case - lock_file("env_case.xml", newcaseroot) - - # apply user_mods if appropriate - newcase_root = newcase.get_value("CASEROOT") - if user_mods_dir is not None: - if keepexe: - # If keepexe CANNOT change any env_build.xml variables - so make a temporary copy of - # env_build.xml and verify that it has not been modified - safe_copy(os.path.join(newcaseroot, "env_build.xml"), - os.path.join(newcaseroot, "LockedFiles", "env_build.xml")) - - # Now apply contents of user_mods directory - apply_user_mods(newcase_root, user_mods_dir, keepexe=keepexe) - - # Determine if env_build.xml has changed - if keepexe: - success, comment = compare_files(os.path.join(newcaseroot, "env_build.xml"), - os.path.join(newcaseroot, "LockedFiles", "env_build.xml")) - if not success: - logger.warning(comment) - shutil.rmtree(newcase_root) - expect(False, "env_build.xml cannot be changed via usermods if keepexe is an option: \n " - "Failed to clone case, removed {}\n".format(newcase_root)) - - # if keep executable, then remove the new case SourceMods directory and link SourceMods to - # the clone directory - if keepexe: - shutil.rmtree(os.path.join(newcase_root, "SourceMods")) - os.symlink(os.path.join(cloneroot, "SourceMods"), - os.path.join(newcase_root, "SourceMods")) - - # Update README.case - fclone = open(cloneroot + "/README.case", "r") - fnewcase = open(newcaseroot + "/README.case", "a") - fnewcase.write("\n *** original clone README follows ****") - fnewcase.write("\n " + fclone.read()) - - clonename = self.get_value("CASE") - logger.info(" Successfully created new case {} from clone case {} ".format(newcasename, clonename)) - - newcase.case_setup() - - return newcase - -# pylint: disable=unused-argument -def _copy_user_modified_to_clone(self, origpath, newpath): - """ - If file_ exists and is a link in newpath, and exists but is not a - link in origpath, copy origpath file to newpath - """ - for file_ in os.listdir(newpath): - if (os.path.islink(os.path.join(newpath, file_)) and - os.path.isfile(os.path.join(origpath, file_)) and - not os.path.islink(os.path.join(origpath, file_))): - logger.info("Copying user modified file {} to clone".format(file_)) - os.unlink(os.path.join(newpath, file_)) - safe_copy(os.path.join(origpath, file_), newpath) diff --git a/scripts/lib/CIME/case/case_cmpgen_namelists.py b/scripts/lib/CIME/case/case_cmpgen_namelists.py deleted file mode 100644 index 5252c858592..00000000000 --- a/scripts/lib/CIME/case/case_cmpgen_namelists.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -Library for case.cmpgen_namelists. -case_cmpgen_namelists is a member of Class case from file case.py -""" - -from CIME.XML.standard_module_setup import * - -from CIME.compare_namelists import is_namelist_file, compare_namelist_files -from CIME.simple_compare import compare_files, compare_runconfigfiles -from CIME.utils import append_status, safe_copy, SharedArea -from CIME.test_status import * - -import os, shutil, traceback, stat, glob -from distutils import dir_util - -logger = logging.getLogger(__name__) - -def _do_full_nl_comp(case, test, compare_name, baseline_root=None): - test_dir = case.get_value("CASEROOT") - casedoc_dir = os.path.join(test_dir, "CaseDocs") - baseline_root = case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root - - all_match = True - baseline_dir = os.path.join(baseline_root, compare_name, test) - baseline_casedocs = os.path.join(baseline_dir, "CaseDocs") - - # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!) - # TODO: Namelist files should have consistent suffix - all_items_to_compare = [item for item in glob.glob("{}/*".format(casedoc_dir))\ - if "README" not in os.path.basename(item)\ - and not item.endswith("doc")\ - and not item.endswith("prescribed")\ - and not os.path.basename(item).startswith(".")] - - comments = "NLCOMP\n" - for item in all_items_to_compare: - baseline_counterpart = os.path.join(baseline_casedocs \ - if os.path.dirname(item).endswith("CaseDocs") \ - else baseline_dir,os.path.basename(item)) - if not os.path.exists(baseline_counterpart): - comments += "Missing baseline namelist '{}'\n".format(baseline_counterpart) - all_match = False - else: - if item.endswith("runconfig") or item.endswith("runseq"): - success, current_comments = compare_runconfigfiles(baseline_counterpart, item, test) - elif is_namelist_file(item): - success, current_comments = compare_namelist_files(baseline_counterpart, item, test) - else: - success, current_comments = compare_files(baseline_counterpart, item, test) - - all_match &= success - if not success: - comments += "Comparison failed between '{}' with '{}'\n".format(item, baseline_counterpart) - - comments += current_comments - - logging.info(comments) - return all_match, comments - -def _do_full_nl_gen_impl(case, test, generate_name, baseline_root=None): - test_dir = case.get_value("CASEROOT") - casedoc_dir = os.path.join(test_dir, "CaseDocs") - baseline_root = case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root - - baseline_dir = os.path.join(baseline_root, generate_name, test) - baseline_casedocs = os.path.join(baseline_dir, "CaseDocs") - - if not os.path.isdir(baseline_dir): - os.makedirs(baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH) - - if os.path.isdir(baseline_casedocs): - shutil.rmtree(baseline_casedocs) - - dir_util.copy_tree(casedoc_dir, baseline_casedocs, preserve_mode=False) - - for item in glob.glob(os.path.join(test_dir, "user_nl*")): - preexisting_baseline = os.path.join(baseline_dir, os.path.basename(item)) - if (os.path.exists(preexisting_baseline)): - os.remove(preexisting_baseline) - - safe_copy(item, baseline_dir, preserve_meta=False) - -def _do_full_nl_gen(case, test, generate_name, baseline_root=None): - with SharedArea(): - _do_full_nl_gen_impl(case, test, generate_name, baseline_root=baseline_root) - -def case_cmpgen_namelists(self, compare=False, generate=False, compare_name=None, generate_name=None, baseline_root=None, logfile_name="TestStatus.log"): - expect(self.get_value("TEST"), "Only makes sense to run this for a test case") - - caseroot, casebaseid = self.get_value("CASEROOT"), self.get_value("CASEBASEID") - - if not compare: - compare = self.get_value("COMPARE_BASELINE") - if not generate: - generate = self.get_value("GENERATE_BASELINE") - - if not compare and not generate: - logging.debug("No namelists compares requested") - return True - - # create namelists for case if they haven't been already - casedocs = os.path.join(caseroot, "CaseDocs") - if not os.path.exists(os.path.join(casedocs, "drv_in")): - self.create_namelists() - - test_name = casebaseid if casebaseid is not None else self.get_value("CASE") - with TestStatus(test_dir=caseroot, test_name=test_name) as ts: - try: - # Inside this try are where we catch non-fatal errors, IE errors involving - # baseline operations which may not directly impact the functioning of the viability of this case - if compare and not compare_name: - compare_name = self.get_value("BASELINE_NAME_CMP") - expect(compare_name, "Was asked to do baseline compare but unable to determine baseline name") - logging.info("Comparing namelists with baselines '{}'".format(compare_name)) - if generate and not generate_name: - generate_name = self.get_value("BASELINE_NAME_GEN") - expect(generate_name, "Was asked to do baseline generation but unable to determine baseline name") - logging.info("Generating namelists to baselines '{}'".format(generate_name)) - - success = True - output = "" - if compare: - success, output = _do_full_nl_comp(self, test_name, compare_name, baseline_root) - if not success and ts.get_status(RUN_PHASE) is not None: - run_warn = \ -"""NOTE: It is not necessarily safe to compare namelists after RUN -phase has completed. Running a case can pollute namelists. The namelists -kept in the baselines are pre-RUN namelists.""" - output += run_warn - logging.info(run_warn) - if generate: - _do_full_nl_gen(self, test_name, generate_name, baseline_root) - except Exception: - success = False - ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS) - warn = "Exception during namelist operations:\n{}\n{}".format(sys.exc_info()[1], traceback.format_exc()) - output += warn - logging.warning(warn) - finally: - ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS) - try: - append_status(output, logfile_name, caseroot=caseroot) - except IOError: - pass - - return success diff --git a/scripts/lib/CIME/case/case_run.py b/scripts/lib/CIME/case/case_run.py deleted file mode 100644 index 289d616ed5e..00000000000 --- a/scripts/lib/CIME/case/case_run.py +++ /dev/null @@ -1,346 +0,0 @@ -""" -case_run is a member of Class Case -'""" -from CIME.XML.standard_module_setup import * -from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status -from CIME.utils import run_sub_or_cmd, append_status, safe_copy, model_log -from CIME.get_timing import get_timing -from CIME.provenance import save_prerun_provenance, save_postrun_provenance - -import shutil, time, sys, os, glob - -logger = logging.getLogger(__name__) - -############################################################################### -def _pre_run_check(case, lid, skip_pnl=False, da_cycle=0): -############################################################################### - - # Pre run initialization code.. - if da_cycle > 0: - case.create_namelists(component='cpl') - return - - caseroot = case.get_value("CASEROOT") - din_loc_root = case.get_value("DIN_LOC_ROOT") - rundir = case.get_value("RUNDIR") - - if case.get_value("TESTCASE") == "PFS": - env_mach_pes = os.path.join(caseroot,"env_mach_pes.xml") - safe_copy(env_mach_pes,"{}.{}".format(env_mach_pes, lid)) - - # check for locked files, may impact BUILD_COMPLETE - skip = None - if case.get_value("EXTERNAL_WORKFLOW"): - skip = "env_batch" - case.check_lockedfiles(skip=skip) - logger.debug("check_lockedfiles OK") - build_complete = case.get_value("BUILD_COMPLETE") - - # check that build is done - expect(build_complete, - "BUILD_COMPLETE is not true\nPlease rebuild the model interactively") - logger.debug("build complete is {} ".format(build_complete)) - - # load the module environment... - case.load_env(reset=True) - - # create the timing directories, optionally cleaning them if needed. - if os.path.isdir(os.path.join(rundir, "timing")): - shutil.rmtree(os.path.join(rundir, "timing")) - - os.makedirs(os.path.join(rundir, "timing", "checkpoints")) - - # This needs to be done everytime the LID changes in order for log files to be set up correctly - # The following also needs to be called in case a user changes a user_nl_xxx file OR an env_run.xml - # variable while the job is in the queue - model_log("e3sm", logger, "{} NAMELIST CREATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - if skip_pnl: - case.create_namelists(component='cpl') - else: - logger.info("Generating namelists for {}".format(caseroot)) - case.create_namelists() - - model_log("e3sm", logger, "{} NAMELIST CREATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - logger.info("-------------------------------------------------------------------------") - logger.info(" - Prestage required restarts into {}".format(rundir)) - logger.info(" - Case input data directory (DIN_LOC_ROOT) is {} ".format(din_loc_root)) - logger.info(" - Checking for required input datasets in DIN_LOC_ROOT") - logger.info("-------------------------------------------------------------------------") - -############################################################################### -def _run_model_impl(case, lid, skip_pnl=False, da_cycle=0): -############################################################################### - - model_log("e3sm", logger, "{} PRE_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - _pre_run_check(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle) - model_log("e3sm", logger, "{} PRE_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - model = case.get_value("MODEL") - - # Set OMP_NUM_THREADS - os.environ["OMP_NUM_THREADS"] = str(case.thread_count) - - # Run the model - cmd = case.get_mpirun_cmd(allow_unresolved_envvars=False) - logger.info("run command is {} ".format(cmd)) - - rundir = case.get_value("RUNDIR") - loop = True - - # MPIRUN_RETRY_REGEX allows the mpi command to be reattempted if the - # failure described by that regular expression is matched in the model log - # case.spare_nodes is overloaded and may also represent the number of - # retries to attempt if ALLOCATE_SPARE_NODES is False - retry_run_re = case.get_value("MPIRUN_RETRY_REGEX") - node_fail_re = case.get_value("NODE_FAIL_REGEX") - retry_count = 0 - if retry_run_re: - retry_run_regex = re.compile(re.escape(retry_run_re)) - retry_count = case.get_value("MPIRUN_RETRY_COUNT") - if node_fail_re: - node_fail_regex = re.compile(re.escape(node_fail_re)) - - while loop: - loop = False - - model_log("e3sm", logger, "{} SAVE_PRERUN_PROVENANCE BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - save_prerun_provenance(case) - model_log("e3sm", logger, "{} SAVE_PRERUN_PROVENANCE HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - model_log("e3sm", logger, "{} MODEL EXECUTION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - run_func = lambda: run_cmd(cmd, from_dir=rundir)[0] - stat = run_and_log_case_status(run_func, "model execution", caseroot=case.get_value("CASEROOT")) - model_log("e3sm", logger, "{} MODEL EXECUTION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - model_logfile = os.path.join(rundir, model + ".log." + lid) - # Determine if failure was due to a failed node, if so, try to restart - if retry_run_re or node_fail_re: - model_logfile = os.path.join(rundir, model + ".log." + lid) - if os.path.exists(model_logfile): - num_node_fails=0 - num_retry_fails=0 - if node_fail_re: - num_node_fails = len(node_fail_regex.findall(open(model_logfile, 'r').read())) - if retry_run_re: - num_retry_fails = len(retry_run_regex.findall(open(model_logfile, 'r').read())) - logger.debug ("RETRY: num_retry_fails {} spare_nodes {} retry_count {}". - format(num_retry_fails, case.spare_nodes, retry_count)) - if num_node_fails > 0 and case.spare_nodes >= num_node_fails: - # We failed due to node failure! - logger.warning("Detected model run failed due to node failure, restarting") - case.spare_nodes -= num_node_fails - loop = True - case.set_value("CONTINUE_RUN", - case.get_value("RESUBMIT_SETS_CONTINUE_RUN")) - elif num_retry_fails > 0 and retry_count >= num_retry_fails: - logger.warning("Detected model run failed, restarting") - retry_count -= 1 - loop = True - if loop: - # Archive the last consistent set of restart files and restore them - if case.get_value("DOUT_S"): - case.case_st_archive(resubmit=False) - case.restore_from_archive() - - lid = new_lid() - case.create_namelists() - - if stat != 0 and not loop: - # We failed and we're not restarting - expect(False, "RUN FAIL: Command '{}' failed\nSee log file for details: {}".format(cmd, model_logfile)) - - model_log("e3sm", logger, "{} POST_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - _post_run_check(case, lid) - model_log("e3sm", logger, "{} POST_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - return lid - -############################################################################### -def _run_model(case, lid, skip_pnl=False, da_cycle=0): -############################################################################### - functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle) - return run_and_log_case_status(functor, "case.run", caseroot=case.get_value("CASEROOT")) - -############################################################################### -def _post_run_check(case, lid): -############################################################################### - - rundir = case.get_value("RUNDIR") - model = case.get_value("MODEL") - driver = case.get_value("COMP_INTERFACE") - - if driver == 'nuopc': - file_prefix = 'med' - else: - file_prefix = 'cpl' - - cpl_ninst = 1 - if case.get_value("MULTI_DRIVER"): - cpl_ninst = case.get_value("NINST_MAX") - cpl_logs = [] - - if cpl_ninst > 1: - for inst in range(cpl_ninst): - cpl_logs.append(os.path.join(rundir, file_prefix + "_%04d.log." % (inst+1) + lid)) - else: - cpl_logs = [os.path.join(rundir, file_prefix + ".log." + lid)] - - cpl_logfile = cpl_logs[0] - - # find the last model.log and cpl.log - model_logfile = os.path.join(rundir, model + ".log." + lid) - - if not os.path.isfile(model_logfile): - expect(False, "Model did not complete, no {} log file ".format(model_logfile)) - elif os.stat(model_logfile).st_size == 0: - expect(False, "Run FAILED") - else: - count_ok = 0 - for cpl_logfile in cpl_logs: - if not os.path.isfile(cpl_logfile): - break - with open(cpl_logfile, 'r') as fd: - if 'SUCCESSFUL TERMINATION' in fd.read(): - count_ok += 1 - if count_ok != cpl_ninst: - expect(False, "Model did not complete - see {} \n " .format(cpl_logfile)) - -############################################################################### -def _save_logs(case, lid): -############################################################################### - rundir = case.get_value("RUNDIR") - logfiles = glob.glob(os.path.join(rundir, "*.log.{}".format(lid))) - for logfile in logfiles: - if os.path.isfile(logfile): - gzip_existing_file(logfile) - -###################################################################################### -def _resubmit_check(case): -############################################################################### - - # check to see if we need to do resubmission from this particular job, - # Note that Mira requires special logic - - dout_s = case.get_value("DOUT_S") - logger.warning("dout_s {} ".format(dout_s)) - mach = case.get_value("MACH") - logger.warning("mach {} ".format(mach)) - resubmit_num = case.get_value("RESUBMIT") - logger.warning("resubmit_num {}".format(resubmit_num)) - # If dout_s is True than short-term archiving handles the resubmit - # If dout_s is True and machine is mira submit the st_archive script - resubmit = False - if not dout_s and resubmit_num > 0: - resubmit = True - elif dout_s and mach == 'mira': - caseroot = case.get_value("CASEROOT") - cimeroot = case.get_value("CIMEROOT") - cmd = "ssh cooleylogin1 'cd {case}; CIMEROOT={root} ./case.submit {case} --job case.st_archive'".format(case=caseroot, root=cimeroot) - run_cmd(cmd, verbose=True) - - if resubmit: - job = case.get_primary_job() - - case.submit(job=job, resubmit=True) - - logger.debug("resubmit after check is {}".format(resubmit)) - -############################################################################### -def _do_external(script_name, caseroot, rundir, lid, prefix): -############################################################################### - expect(os.path.isfile(script_name), "External script {} not found".format(script_name)) - filename = "{}.external.log.{}".format(prefix, lid) - outfile = os.path.join(rundir, filename) - append_status("Starting script {}".format(script_name), "CaseStatus") - run_sub_or_cmd(script_name, [caseroot], (os.path.basename(script_name).split('.',1))[0], [caseroot], logfile=outfile) # For sub, use case? - append_status("Completed script {}".format(script_name), "CaseStatus") - -############################################################################### -def _do_data_assimilation(da_script, caseroot, cycle, lid, rundir): -############################################################################### - expect(os.path.isfile(da_script), "Data Assimilation script {} not found".format(da_script)) - filename = "da.log.{}".format(lid) - outfile = os.path.join(rundir, filename) - run_sub_or_cmd(da_script, [caseroot, cycle], os.path.basename(da_script), [caseroot, cycle], logfile=outfile) # For sub, use case? - -############################################################################### -def case_run(self, skip_pnl=False, set_continue_run=False, submit_resubmits=False): -############################################################################### - model_log("e3sm", logger, "{} CASE.RUN BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - # Set up the run, run the model, do the postrun steps - prerun_script = self.get_value("PRERUN_SCRIPT") - postrun_script = self.get_value("POSTRUN_SCRIPT") - - data_assimilation_cycles = self.get_value("DATA_ASSIMILATION_CYCLES") - data_assimilation_script = self.get_value("DATA_ASSIMILATION_SCRIPT") - data_assimilation = (data_assimilation_cycles > 0 and - len(data_assimilation_script) > 0 and - os.path.isfile(data_assimilation_script)) - - - # set up the LID - lid = new_lid() - - if prerun_script: - model_log("e3sm", logger, "{} PRERUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - self.flush() - _do_external(prerun_script, self.get_value("CASEROOT"), self.get_value("RUNDIR"), - lid, prefix="prerun") - self.read_xml() - model_log("e3sm", logger, "{} PRERUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - for cycle in range(data_assimilation_cycles): - # After the first DA cycle, runs are restart runs - if cycle > 0: - lid = new_lid() - self.set_value("CONTINUE_RUN", - self.get_value("RESUBMIT_SETS_CONTINUE_RUN")) - - model_log("e3sm", logger, "{} RUN_MODEL BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - lid = _run_model(self, lid, skip_pnl, da_cycle=cycle) - model_log("e3sm", logger, "{} RUN_MODEL HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - if self.get_value("CHECK_TIMING") or self.get_value("SAVE_TIMING"): - model_log("e3sm", logger, "{} GET_TIMING BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - get_timing(self, lid) # Run the getTiming script - model_log("e3sm", logger, "{} GET_TIMING HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - - if data_assimilation: - model_log("e3sm", logger, "{} DO_DATA_ASSIMILATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - self.flush() - _do_data_assimilation(data_assimilation_script, self.get_value("CASEROOT"), cycle, lid, - self.get_value("RUNDIR")) - self.read_xml() - model_log("e3sm", logger, "{} DO_DATA_ASSIMILATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - _save_logs(self, lid) # Copy log files back to caseroot - - model_log("e3sm", logger, "{} SAVE_POSTRUN_PROVENANCE BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - save_postrun_provenance(self) - model_log("e3sm", logger, "{} SAVE_POSTRUN_PROVENANCE HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - if postrun_script: - model_log("e3sm", logger, "{} POSTRUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - self.flush() - _do_external(postrun_script, self.get_value("CASEROOT"), self.get_value("RUNDIR"), - lid, prefix="postrun") - self.read_xml() - _save_logs(self, lid) - model_log("e3sm", logger, "{} POSTRUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - if set_continue_run: - self.set_value("CONTINUE_RUN", - self.get_value("RESUBMIT_SETS_CONTINUE_RUN")) - - external_workflow = self.get_value("EXTERNAL_WORKFLOW") - if not external_workflow: - logger.warning("check for resubmit") - - logger.debug("submit_resubmits is {}".format(submit_resubmits)) - if submit_resubmits: - _resubmit_check(self) - - - model_log("e3sm", logger, "{} CASE.RUN HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - return True diff --git a/scripts/lib/CIME/case/case_setup.py b/scripts/lib/CIME/case/case_setup.py deleted file mode 100644 index 4966edf6f6f..00000000000 --- a/scripts/lib/CIME/case/case_setup.py +++ /dev/null @@ -1,250 +0,0 @@ -""" -Library for case.setup. -case_setup is a member of class Case from file case.py -""" - -from CIME.XML.standard_module_setup import * - -from CIME.XML.machines import Machines -from CIME.BuildTools.configure import configure -from CIME.utils import get_cime_root, run_and_log_case_status, get_model, get_batch_script_for_job, safe_copy -from CIME.test_status import * -from CIME.locked_files import unlock_file, lock_file - -logger = logging.getLogger(__name__) - -############################################################################### -def _build_usernl_files(case, model, comp): -############################################################################### - """ - Create user_nl_xxx files, expects cwd is caseroot - """ - model = model.upper() - if model == "DRV": - model_file = case.get_value("CONFIG_CPL_FILE") - else: - model_file = case.get_value("CONFIG_{}_FILE".format(model)) - expect(model_file is not None, - "Could not locate CONFIG_{}_FILE in config_files.xml".format(model)) - model_dir = os.path.dirname(model_file) - - expect(os.path.isdir(model_dir), - "cannot find cime_config directory {} for component {}".format(model_dir, comp)) - comp_interface = case.get_value("COMP_INTERFACE") - multi_driver = case.get_value("MULTI_DRIVER") - ninst = 1 - - if multi_driver: - ninst_max = case.get_value("NINST_MAX") - if comp_interface != "nuopc" and model not in ("DRV","CPL","ESP"): - ninst_model = case.get_value("NINST_{}".format(model)) - expect(ninst_model==ninst_max,"MULTI_DRIVER mode, all components must have same NINST value. NINST_{} != {}".format(model,ninst_max)) - if comp == "cpl": - if not os.path.exists("user_nl_cpl"): - safe_copy(os.path.join(model_dir, "user_nl_cpl"), ".") - else: - if comp_interface == "nuopc": - ninst = case.get_value("NINST") - elif ninst == 1: - ninst = case.get_value("NINST_{}".format(model)) - nlfile = "user_nl_{}".format(comp) - model_nl = os.path.join(model_dir, nlfile) - if ninst > 1: - for inst_counter in range(1, ninst+1): - inst_nlfile = "{}_{:04d}".format(nlfile, inst_counter) - if not os.path.exists(inst_nlfile): - # If there is a user_nl_foo in the case directory, copy it - # to user_nl_foo_INST; otherwise, copy the original - # user_nl_foo from model_dir - if os.path.exists(nlfile): - safe_copy(nlfile, inst_nlfile) - elif os.path.exists(model_nl): - safe_copy(model_nl, inst_nlfile) - else: - # ninst = 1 - if not os.path.exists(nlfile): - if os.path.exists(model_nl): - safe_copy(model_nl, nlfile) - -############################################################################### -def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): -############################################################################### - os.chdir(caseroot) - - non_local = case.get_value("NONLOCAL") - - # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests - if not non_local: - din_loc_root = case.get_value("DIN_LOC_ROOT") - testcase = case.get_value("TESTCASE") - expect(not (not os.path.isdir(din_loc_root) and testcase != "SBN"), - "inputdata root is not a directory: {}".format(din_loc_root)) - - # Remove batch scripts - if reset or clean: - # clean batch script - batch_script = get_batch_script_for_job(case.get_primary_job()) - if os.path.exists(batch_script): - os.remove(batch_script) - logger.info("Successfully cleaned batch script {}".format(batch_script)) - - if not test_mode: - # rebuild the models (even on restart) - case.set_value("BUILD_COMPLETE", False) - - if not clean: - if not non_local: - case.load_env() - - models = case.get_values("COMP_CLASSES") - mach = case.get_value("MACH") - compiler = case.get_value("COMPILER") - debug = case.get_value("DEBUG") - mpilib = case.get_value("MPILIB") - sysos = case.get_value("OS") - comp_interface = case.get_value("COMP_INTERFACE") - expect(mach is not None, "xml variable MACH is not set") - - # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler - # and env_mach_specific.xml if they don't already exist. - if not os.path.isfile("Macros.make") or not os.path.isfile("env_mach_specific.xml"): - configure(Machines(machine=mach), caseroot, ["Makefile"], compiler, mpilib, debug, comp_interface, sysos) - - # Also write out Cmake macro file - if not os.path.isfile("Macros.cmake"): - configure(Machines(machine=mach), caseroot, ["CMake"], compiler, mpilib, debug, comp_interface, sysos) - - # Set tasks to 1 if mpi-serial library - if mpilib == "mpi-serial": - case.set_value("NTASKS", 1) - - # Check ninst. - # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. - comp_interface = case.get_value("COMP_INTERFACE") - if comp_interface == "nuopc": - ninst = case.get_value("NINST") - - multi_driver = case.get_value("MULTI_DRIVER") - - for comp in models: - ntasks = case.get_value("NTASKS_{}".format(comp)) - if comp == "CPL": - continue - if comp_interface != "nuopc": - ninst = case.get_value("NINST_{}".format(comp)) - if multi_driver: - if comp_interface != "nuopc": - expect(case.get_value("NINST_LAYOUT_{}".format(comp)) == "concurrent", - "If multi_driver is TRUE, NINST_LAYOUT_{} must be concurrent".format(comp)) - case.set_value("NTASKS_PER_INST_{}".format(comp), ntasks) - else: - if ninst > ntasks: - if ntasks == 1: - case.set_value("NTASKS_{}".format(comp), ninst) - ntasks = ninst - else: - expect(False, "NINST_{comp} value {ninst} greater than NTASKS_{comp} {ntasks}".format(comp=comp, ninst=ninst, ntasks=ntasks)) - - case.set_value("NTASKS_PER_INST_{}".format(comp), max(1,int(ntasks / ninst))) - - if os.path.exists(get_batch_script_for_job(case.get_primary_job())): - logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") - - case.initialize_derived_attributes() - - case.set_value("SMP_PRESENT", case.get_build_threaded()) - - else: - case.check_pelayouts_require_rebuild(models) - - unlock_file("env_build.xml") - unlock_file("env_batch.xml") - - case.flush() - case.check_lockedfiles() - - case.initialize_derived_attributes() - - cost_per_node = case.get_value("COSTPES_PER_NODE") - case.set_value("COST_PES", case.num_nodes * cost_per_node) - threaded = case.get_build_threaded() - case.set_value("SMP_PRESENT", threaded) - if threaded and case.total_tasks * case.thread_count > cost_per_node: - smt_factor = max(1.0,int(case.get_value("MAX_TASKS_PER_NODE") / cost_per_node)) - case.set_value("TOTALPES", int(case.total_tasks * max(1.0,float(case.thread_count) / smt_factor))) - else: - case.set_value("TOTALPES", case.total_tasks*case.thread_count) - - # May need to select new batch settings if pelayout changed (e.g. problem is now too big for prev-selected queue) - env_batch = case.get_env("batch") - env_batch.set_job_defaults([(case.get_primary_job(), {})], case) - - # create batch files - env_batch.make_all_batch_files(case) - if get_model() == "e3sm" and not case.get_value("TEST"): - input_batch_script = os.path.join(case.get_value("MACHDIR"), "template.case.run.sh") - env_batch.make_batch_script(input_batch_script, "case.run", case, outfile=get_batch_script_for_job("case.run.sh")) - - # Make a copy of env_mach_pes.xml in order to be able - # to check that it does not change once case.setup is invoked - case.flush() - logger.debug("at copy TOTALPES = {}".format(case.get_value("TOTALPES"))) - lock_file("env_mach_pes.xml") - lock_file("env_batch.xml") - - # Create user_nl files for the required number of instances - if not os.path.exists("user_nl_cpl"): - logger.info("Creating user_nl_xxx files for components and cpl") - - # loop over models - for model in models: - comp = case.get_value("COMP_{}".format(model)) - logger.debug("Building {} usernl files".format(model)) - _build_usernl_files(case, model, comp) - if comp == "cism": - glcroot = case.get_value("COMP_ROOT_DIR_GLC") - run_cmd_no_fail("{}/cime_config/cism.template {}".format(glcroot, caseroot)) - - _build_usernl_files(case, "drv", "cpl") - - # Create needed directories for case - case.create_dirs() - - logger.info("If an old case build already exists, might want to run \'case.build --clean\' before building") - - # Some tests need namelists created here (ERP) - so do this if we are in test mode - if (test_mode or get_model() == "e3sm") and not non_local: - logger.info("Generating component namelists as part of setup") - case.create_namelists() - - # Record env information - env_module = case.get_env("mach_specific") - env_module.make_env_mach_specific_file("sh", case) - env_module.make_env_mach_specific_file("csh", case) - if not non_local: - env_module.save_all_env_info("software_environment.txt") - - logger.info("You can now run './preview_run' to get more info on how your case will be run") - -############################################################################### -def case_setup(self, clean=False, test_mode=False, reset=False): -############################################################################### - caseroot, casebaseid = self.get_value("CASEROOT"), self.get_value("CASEBASEID") - phase = "setup.clean" if clean else "case.setup" - functor = lambda: _case_setup_impl(self, caseroot, clean, test_mode, reset) - - if self.get_value("TEST") and not test_mode: - test_name = casebaseid if casebaseid is not None else self.get_value("CASE") - with TestStatus(test_dir=caseroot, test_name=test_name) as ts: - try: - run_and_log_case_status(functor, phase, caseroot=caseroot) - except BaseException: # Want to catch KeyboardInterrupt too - ts.set_status(SETUP_PHASE, TEST_FAIL_STATUS) - raise - else: - if clean: - ts.set_status(SETUP_PHASE, TEST_PEND_STATUS) - else: - ts.set_status(SETUP_PHASE, TEST_PASS_STATUS) - else: - run_and_log_case_status(functor, phase, caseroot=caseroot) diff --git a/scripts/lib/CIME/case/case_st_archive.py b/scripts/lib/CIME/case/case_st_archive.py deleted file mode 100644 index 9acb8e409a6..00000000000 --- a/scripts/lib/CIME/case/case_st_archive.py +++ /dev/null @@ -1,916 +0,0 @@ -""" -short term archiving -case_st_archive, restore_from_archive, archive_last_restarts -are members of class Case from file case.py -""" - -import shutil, glob, re, os - -from CIME.XML.standard_module_setup import * -from CIME.utils import run_and_log_case_status, ls_sorted_by_mtime, symlink_force, safe_copy, find_files -from CIME.date import get_file_date -from CIME.XML.archive import Archive -from CIME.XML.files import Files -from os.path import isdir, join - -logger = logging.getLogger(__name__) - -############################################################################### -def _get_archive_file_fn(copy_only): -############################################################################### - """ - Returns the function to use for archiving some files - """ - return safe_copy if copy_only else shutil.move - -############################################################################### -def _get_datenames(casename, rundir): -############################################################################### - """ - Returns the date objects specifying the times of each file - Note we are assuming that the coupler restart files exist and are consistent with other component datenames - Not doc-testable due to filesystem dependence - """ - expect(isdir(rundir), 'Cannot open directory {} '.format(rundir)) - - files = sorted(glob.glob(os.path.join(rundir, casename + '.cpl.r.*.nc'))) - if not files: - files = sorted(glob.glob(os.path.join(rundir, casename + '.cpl_0001.r.*.nc'))) - - logger.debug(" cpl files : {} ".format(files)) - - if not files: - logger.warning('Cannot find a {}.cpl*.r.*.nc file in directory {} '.format(casename, rundir)) - - datenames = [] - for filename in files: - file_date = get_file_date(filename) - datenames.append(file_date) - return datenames - -def _datetime_str(_date): - """ - Returns the standard format associated with filenames. - - >>> from CIME.date import date - >>> _datetime_str(date(5, 8, 22)) - '0005-08-22-00000' - >>> _datetime_str(get_file_date("0011-12-09-00435")) - '0011-12-09-00435' - """ - - format_string = "{year:04d}-{month:02d}-{day:02d}-{seconds:05d}" - return format_string.format(year = _date.year(), - month = _date.month(), - day = _date.day(), - seconds = _date.second_of_day()) - -def _datetime_str_mpas(_date): - """ - Returns the mpas format associated with filenames. - - >>> from CIME.date import date - >>> _datetime_str_mpas(date(5, 8, 22)) - '0005-08-22_00:00:00' - >>> _datetime_str_mpas(get_file_date("0011-12-09-00435")) - '0011-12-09_00:07:15' - """ - - format_string = "{year:04d}-{month:02d}-{day:02d}_{hours:02d}:{minutes:02d}:{seconds:02d}" - return format_string.format(year = _date.year(), - month = _date.month(), - day = _date.day(), - hours = _date.hour(), - minutes = _date.minute(), - seconds = _date.second()) - -############################################################################### -def _get_ninst_info(case, compclass): -############################################################################### - """ - Returns the number of instances used by a component and suffix strings for filenames - Not doc-testable due to case dependence - """ - - ninst = case.get_value('NINST_' + compclass.upper()) - ninst_strings = [] - if ninst is None: - ninst = 1 - for i in range(1,ninst+1): - if ninst > 1: - ninst_strings.append('_' + '{:04d}'.format(i)) - - logger.debug("ninst and ninst_strings are: {} and {} for {}".format(ninst, ninst_strings, compclass)) - return ninst, ninst_strings - -############################################################################### -def _get_component_archive_entries(components, archive): -############################################################################### - """ - Each time this generator function is called, it yields a tuple - (archive_entry, compname, compclass) for one component in this - case's compset components. - """ - for compname in components: - logger.debug("compname is {} ".format(compname)) - archive_entry = archive.get_entry(compname) - if archive_entry is None: - logger.debug("No entry found for {}".format(compname)) - compclass = None - else: - compclass = archive.get(archive_entry, "compclass") - yield(archive_entry, compname, compclass) - - -############################################################################### -def _archive_rpointer_files(casename, ninst_strings, rundir, save_interim_restart_files, archive, - archive_entry, archive_restdir, datename, datename_is_last): -############################################################################### - - if datename_is_last: - # Copy of all rpointer files for latest restart date - rpointers = glob.glob(os.path.join(rundir, 'rpointer.*')) - for rpointer in rpointers: - safe_copy(rpointer, os.path.join(archive_restdir, os.path.basename(rpointer))) - else: - # Generate rpointer file(s) for interim restarts for the one datename and each - # possible value of ninst_strings - if save_interim_restart_files: - - # parse env_archive.xml to determine the rpointer files - # and contents for the given archive_entry tag - rpointer_items = archive.get_rpointer_contents(archive_entry) - - # loop through the possible rpointer files and contents - for rpointer_file, rpointer_content in rpointer_items: - temp_rpointer_file = rpointer_file - temp_rpointer_content = rpointer_content - - # put in a temporary setting for ninst_strings if they are empty - # in order to have just one loop over ninst_strings below - if rpointer_content != 'unset': - if not ninst_strings: - ninst_strings = ["empty"] - - for ninst_string in ninst_strings: - rpointer_file = temp_rpointer_file - rpointer_content = temp_rpointer_content - if ninst_string == 'empty': - ninst_string = "" - for key, value in [('$CASE', casename), - ('$DATENAME', _datetime_str(datename)), - ('$MPAS_DATENAME', _datetime_str_mpas(datename)), - ('$NINST_STRING', ninst_string)]: - rpointer_file = rpointer_file.replace(key, value) - rpointer_content = rpointer_content.replace(key, value) - - # write out the respective files with the correct contents - rpointer_file = os.path.join(archive_restdir, rpointer_file) - logger.info("writing rpointer_file {}".format(rpointer_file)) - f = open(rpointer_file, 'w') - for output in rpointer_content.split(','): - f.write("{} \n".format(output)) - f.close() - else: - logger.info("rpointer_content unset, not creating rpointer file {}".format(rpointer_file)) - -############################################################################### -def _archive_log_files(dout_s_root, rundir, archive_incomplete, archive_file_fn): -############################################################################### - """ - Find all completed log files, or all log files if archive_incomplete is True, and archive them. - Each log file is required to have ".log." in its name, and completed ones will end with ".gz" - Not doc-testable due to file system dependence - """ - archive_logdir = os.path.join(dout_s_root, 'logs') - if not os.path.exists(archive_logdir): - os.makedirs(archive_logdir) - logger.debug("created directory {} ".format(archive_logdir)) - - if archive_incomplete == False: - log_search = '*.log.*.gz' - else: - log_search = '*.log.*' - - logfiles = glob.glob(os.path.join(rundir, log_search)) - for logfile in logfiles: - srcfile = join(rundir, os.path.basename(logfile)) - destfile = join(archive_logdir, os.path.basename(logfile)) - archive_file_fn(srcfile, destfile) - logger.info("moving {} to {}".format(srcfile, destfile)) - -############################################################################### -def _archive_history_files(archive, archive_entry, - compclass, compname, histfiles_savein_rundir, - last_date, archive_file_fn, dout_s_root, casename, rundir): -############################################################################### - """ - perform short term archiving on history files in rundir - - Not doc-testable due to case and file system dependence - """ - - # determine history archive directory (create if it does not exist) - - archive_histdir = os.path.join(dout_s_root, compclass, 'hist') - if not os.path.exists(archive_histdir): - os.makedirs(archive_histdir) - logger.debug("created directory {}".format(archive_histdir)) - # the compname is drv but the files are named cpl - if compname == 'drv': - compname = 'cpl' - - if compname == 'clm': - compname = r'clm2?' - - if compname == 'nemo': - archive_rblddir = os.path.join(dout_s_root, compclass, 'rebuild') - if not os.path.exists(archive_rblddir): - os.makedirs(archive_rblddir) - logger.debug("created directory {}".format(archive_rblddir)) - - sfxrbld = r'mesh_mask_' + r'[0-9]*' - pfile = re.compile(sfxrbld) - rbldfiles = [f for f in os.listdir(rundir) if pfile.search(f)] - logger.debug("rbldfiles = {} ".format(rbldfiles)) - - if rbldfiles: - for rbldfile in rbldfiles: - srcfile = join(rundir, rbldfile) - destfile = join(archive_rblddir, rbldfile) - logger.info("moving {} to {} ".format(srcfile, destfile)) - archive_file_fn(srcfile, destfile) - - sfxhst = casename + r'_[0-9][mdy]_' + r'[0-9]*' - pfile = re.compile(sfxhst) - hstfiles = [f for f in os.listdir(rundir) if pfile.search(f)] - logger.debug("hstfiles = {} ".format(hstfiles)) - - if hstfiles: - for hstfile in hstfiles: - srcfile = join(rundir, hstfile) - destfile = join(archive_histdir, hstfile) - logger.info("moving {} to {} ".format(srcfile, destfile)) - archive_file_fn(srcfile, destfile) - - # determine ninst and ninst_string - - # archive history files - the only history files that kept in the - # run directory are those that are needed for restarts - - for suffix in archive.get_hist_file_extensions(archive_entry): - if compname.find('mpas') == 0 or compname == 'mali': - newsuffix = compname + r'\d*' - else: - newsuffix = casename + r'\.' + compname + r'_?' + r'\d*' - newsuffix += r'\.' + suffix - if not suffix.endswith('$'): - newsuffix += r'\.' - - logger.debug("short term archiving suffix is {} ".format(newsuffix)) - - pfile = re.compile(newsuffix) - histfiles = [f for f in os.listdir(rundir) if pfile.search(f)] - logger.debug("histfiles = {} ".format(histfiles)) - - if histfiles: - for histfile in histfiles: - file_date = get_file_date(os.path.basename(histfile)) - if last_date is None or file_date is None or file_date <= last_date: - srcfile = join(rundir, histfile) - expect(os.path.isfile(srcfile), - "history file {} does not exist ".format(srcfile)) - destfile = join(archive_histdir, histfile) - if histfile in histfiles_savein_rundir: - logger.info("copying {} to {} ".format(srcfile, destfile)) - safe_copy(srcfile, destfile) - else: - logger.info("moving {} to {} ".format(srcfile, destfile)) - archive_file_fn(srcfile, destfile) - -############################################################################### -def get_histfiles_for_restarts(rundir, archive, archive_entry, restfile, testonly=False): -############################################################################### - """ - query restart files to determine history files that are needed for restarts - - Not doc-testable due to filesystem dependence - """ - - # Make certain histfiles is a set so we don't repeat - histfiles = set() - rest_hist_varname = archive.get_entry_value('rest_history_varname', archive_entry) - if rest_hist_varname != 'unset': - cmd = "ncdump -v {} {} ".format(rest_hist_varname, os.path.join(rundir, restfile)) - if testonly: - out = "{} =".format(rest_hist_varname) - else: - rc, out, error = run_cmd(cmd) - if rc != 0: - logger.info(" WARNING: {} failed rc={:d}\n out={}\n err={}".format(cmd, rc, out, error)) - logger.debug(" get_histfiles_for_restarts: \n out={}".format(out)) - - searchname = "{} =".format(rest_hist_varname) - if searchname in out: - offset = out.index(searchname) - items = out[offset:].split(",") - for item in items: - # the following match has an option of having any number of '.'s and '/'s - # at the beginning of the history filename - matchobj = re.search(r"\"\S+\s*\"", item) - if matchobj: - histfile = matchobj.group(0).strip('" ') - histfile = os.path.basename(histfile) - # append histfile to the list ONLY if it exists in rundir before the archiving - if histfile in histfiles: - logger.warning("WARNING, tried to add a duplicate file to histfiles") - if os.path.isfile(os.path.join(rundir,histfile)): - histfiles.add(histfile) - else: - logger.debug(" get_histfiles_for_restarts: histfile {} does not exist ".format(histfile)) - return histfiles - -############################################################################### -def _archive_restarts_date(case, casename, rundir, archive, - datename, datename_is_last, last_date, - archive_restdir, archive_file_fn, components=None, - link_to_last_restart_files=False, testonly=False): -############################################################################### - """ - Archive restart files for a single date - - Returns a dictionary of histfiles that need saving in the run - directory, indexed by compname - """ - logger.info('-------------------------------------------') - logger.info('Archiving restarts for date {}'.format(datename)) - logger.debug('last date {}'.format(last_date)) - logger.debug('date is last? {}'.format(datename_is_last)) - logger.debug('components are {}'.format(components)) - logger.info('-------------------------------------------') - logger.debug("last date: {}".format(last_date)) - - if components is None: - components = case.get_compset_components() - components.append('drv') - components.append('dart') - - histfiles_savein_rundir_by_compname = {} - - for (archive_entry, compname, compclass) in _get_component_archive_entries(components, archive): - if compclass: - logger.info('Archiving restarts for {} ({})'.format(compname, compclass)) - - # archive restarts - histfiles_savein_rundir = _archive_restarts_date_comp(case, casename, rundir, - archive, archive_entry, - compclass, compname, - datename, datename_is_last, - last_date, archive_restdir, - archive_file_fn, - link_to_last_restart_files= - link_to_last_restart_files, - testonly=testonly) - histfiles_savein_rundir_by_compname[compname] = histfiles_savein_rundir - - return histfiles_savein_rundir_by_compname - -############################################################################### -def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, - compclass, compname, datename, datename_is_last, - last_date, archive_restdir, archive_file_fn, - link_to_last_restart_files=False, testonly=False): -############################################################################### - """ - Archive restart files for a single date and single component - - If link_to_last_restart_files is True, then make a symlink to the - last set of restart files (i.e., the set with datename_is_last - True); if False (the default), copy them. (This has no effect on the - history files that are associated with these restart files.) - """ - datename_str = _datetime_str(datename) - - if datename_is_last or case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'): - if not os.path.exists(archive_restdir): - os.makedirs(archive_restdir) - - # archive the rpointer file(s) for this datename and all possible ninst_strings - _archive_rpointer_files(casename, _get_ninst_info(case, compclass)[1], rundir, - case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'), - archive, archive_entry, archive_restdir, datename, datename_is_last) - - # move all but latest restart files into the archive restart directory - # copy latest restart files to archive restart directory - histfiles_savein_rundir = [] - - # determine function to use for last set of restart files - if link_to_last_restart_files: - last_restart_file_fn = symlink_force - last_restart_file_fn_msg = "linking" - else: - last_restart_file_fn = safe_copy - last_restart_file_fn_msg = "copying" - - # the compname is drv but the files are named cpl - if compname == 'drv': - compname = 'cpl' - - # get file_extension suffixes - for suffix in archive.get_rest_file_extensions(archive_entry): -# logger.debug("suffix is {} ninst {}".format(suffix, ninst)) - restfiles = "" - if compname.find('mpas') == 0 or compname == 'mali': - pattern = compname + r'\.' + suffix + r'\.' + '_'.join(datename_str.rsplit('-', 1)) - pfile = re.compile(pattern) - restfiles = [f for f in os.listdir(rundir) if pfile.search(f)] - elif compname == 'nemo': - pattern = r'_*_' + suffix + r'[0-9]*' - pfile = re.compile(pattern) - restfiles = [f for f in os.listdir(rundir) if pfile.search(f)] - else: - pattern = r"^{}\.{}[\d_]*\.".format(casename, compname) - pfile = re.compile(pattern) - files = [f for f in os.listdir(rundir) if pfile.search(f)] - pattern = r'_?' + r'\d*' + r'\.' + suffix + r'\.' + r'[^\.]*' + r'\.?' + datename_str - pfile = re.compile(pattern) - restfiles = [f for f in files if pfile.search(f)] - logger.debug("pattern is {} restfiles {}".format(pattern, restfiles)) - for rfile in restfiles: - rfile = os.path.basename(rfile) - - file_date = get_file_date(rfile) - if last_date is not None and file_date > last_date: - # Skip this file - continue - - if not os.path.exists(archive_restdir): - os.makedirs(archive_restdir) - - # obtain array of history files for restarts - # need to do this before archiving restart files - histfiles_for_restart = get_histfiles_for_restarts(rundir, archive, - archive_entry, rfile, - testonly=testonly) - - if datename_is_last and histfiles_for_restart: - for histfile in histfiles_for_restart: - if histfile not in histfiles_savein_rundir: - histfiles_savein_rundir.append(histfile) - - # archive restart files and all history files that are needed for restart - # Note that the latest file should be copied and not moved - if datename_is_last: - srcfile = os.path.join(rundir, rfile) - destfile = os.path.join(archive_restdir, rfile) - last_restart_file_fn(srcfile, destfile) - logger.info("{} file {} to {}".format(last_restart_file_fn_msg, srcfile, destfile)) - for histfile in histfiles_for_restart: - srcfile = os.path.join(rundir, histfile) - destfile = os.path.join(archive_restdir, histfile) - expect(os.path.isfile(srcfile), - "history restart file {} for last date does not exist ".format(srcfile)) - logger.info("Copying {} to {}".format(srcfile, destfile)) - safe_copy(srcfile, destfile) - logger.debug("datename_is_last + histfiles_for_restart copying \n {} to \n {}".format(srcfile, destfile)) - else: - # Only archive intermediate restarts if requested - otherwise remove them - if case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'): - srcfile = os.path.join(rundir, rfile) - destfile = os.path.join(archive_restdir, rfile) - expect(os.path.isfile(srcfile), - "restart file {} does not exist ".format(srcfile)) - archive_file_fn(srcfile, destfile) - logger.info("moving file {} to {}".format(srcfile, destfile)) - - # need to copy the history files needed for interim restarts - since - # have not archived all of the history files yet - for histfile in histfiles_for_restart: - srcfile = os.path.join(rundir, histfile) - destfile = os.path.join(archive_restdir, histfile) - expect(os.path.isfile(srcfile), - "hist file {} does not exist ".format(srcfile)) - logger.info("copying {} to {}".format(srcfile, destfile)) - safe_copy(srcfile, destfile) - else: - if compname == 'nemo': - flist = glob.glob(rundir + "/" + casename + "_*_restart*.nc") - logger.debug("nemo restart file {}".format(flist)) - if len(flist) > 2: - flist0 = glob.glob(rundir + "/" + casename + "_*_restart_0000.nc") - if len(flist0) > 1: - rstfl01 = flist0[0] - rstfl01spl = rstfl01.split("/") - logger.debug("splitted name {}".format(rstfl01spl)) - rstfl01nm = rstfl01spl[-1] - rstfl01nmspl = rstfl01nm.split("_") - logger.debug("splitted name step2 {}".format(rstfl01nmspl)) - rsttm01 = rstfl01nmspl[-3] - - rstfl02 = flist0[1] - rstfl02spl = rstfl02.split("/") - logger.debug("splitted name {}".format(rstfl02spl)) - rstfl02nm = rstfl02spl[-1] - rstfl02nmspl = rstfl02nm.split("_") - logger.debug("splitted name step2 {}".format(rstfl02nmspl)) - rsttm02 = rstfl02nmspl[-3] - - if int(rsttm01) > int(rsttm02): - restlist = glob.glob(rundir + "/" + casename + "_" + rsttm02 + "_restart_*.nc") - else: - restlist = glob.glob(rundir + "/" + casename + "_" + rsttm01 + "_restart_*.nc") - logger.debug("nemo restart list {}".format(restlist)) - if restlist: - for _restfile in restlist: - srcfile = os.path.join(rundir, _restfile) - logger.info("removing interim restart file {}".format(srcfile)) - if (os.path.isfile(srcfile)): - try: - os.remove(srcfile) - except OSError: - logger.warning("unable to remove interim restart file {}".format(srcfile)) - else: - logger.warning("interim restart file {} does not exist".format(srcfile)) - elif len(flist) == 2: - flist0 = glob.glob(rundir + "/" + casename + "_*_restart.nc") - if len(flist0) > 1: - rstfl01 = flist0[0] - rstfl01spl = rstfl01.split("/") - logger.debug("splitted name {}".format(rstfl01spl)) - rstfl01nm = rstfl01spl[-1] - rstfl01nmspl = rstfl01nm.split("_") - logger.debug("splitted name step2 {}".format(rstfl01nmspl)) - rsttm01 = rstfl01nmspl[-2] - - rstfl02 = flist0[1] - rstfl02spl = rstfl02.split("/") - logger.debug("splitted name {}".format(rstfl02spl)) - rstfl02nm = rstfl02spl[-1] - rstfl02nmspl = rstfl02nm.split("_") - logger.debug("splitted name step2 {}".format(rstfl02nmspl)) - rsttm02 = rstfl02nmspl[-2] - - if int(rsttm01) > int(rsttm02): - restlist = glob.glob(rundir + "/" + casename + "_" + rsttm02 + "_restart_*.nc") - else: - restlist = glob.glob(rundir + "/" + casename + "_" + rsttm01 + "_restart_*.nc") - logger.debug("nemo restart list {}".format(restlist)) - if restlist: - for _rfile in restlist: - srcfile = os.path.join(rundir, _rfile) - logger.info("removing interim restart file {}".format(srcfile)) - if (os.path.isfile(srcfile)): - try: - os.remove(srcfile) - except OSError: - logger.warning("unable to remove interim restart file {}".format(srcfile)) - else: - logger.warning("interim restart file {} does not exist".format(srcfile)) - else: - logger.warning("unable to find NEMO restart file in {}".format(rundir)) - - - else: - srcfile = os.path.join(rundir, rfile) - logger.info("removing interim restart file {}".format(srcfile)) - if (os.path.isfile(srcfile)): - try: - os.remove(srcfile) - except OSError: - logger.warning("unable to remove interim restart file {}".format(srcfile)) - else: - logger.warning("interim restart file {} does not exist".format(srcfile)) - - return histfiles_savein_rundir - -############################################################################### -def _archive_process(case, archive, last_date, archive_incomplete_logs, copy_only, - components=None,dout_s_root=None, casename=None, rundir=None, testonly=False): -############################################################################### - """ - Parse config_archive.xml and perform short term archiving - """ - - logger.debug('In archive_process...') - - if dout_s_root is None: - dout_s_root = case.get_value("DOUT_S_ROOT") - if rundir is None: - rundir = case.get_value("RUNDIR") - if casename is None: - casename = case.get_value("CASE") - if components is None: - components = case.get_compset_components() - components.append('drv') - components.append('dart') - - archive_file_fn = _get_archive_file_fn(copy_only) - - # archive log files - _archive_log_files(dout_s_root, rundir, - archive_incomplete_logs, archive_file_fn) - - # archive restarts and all necessary associated files (e.g. rpointer files) - datenames = _get_datenames(casename, rundir) - logger.debug("datenames {} ".format(datenames)) - histfiles_savein_rundir_by_compname = {} - for datename in datenames: - datename_is_last = False - if datename == datenames[-1]: - datename_is_last = True - - logger.debug("datename {} last_date {}".format(datename,last_date)) - if last_date is None or datename <= last_date: - archive_restdir = join(dout_s_root, 'rest', _datetime_str(datename)) - - histfiles_savein_rundir_by_compname_this_date = _archive_restarts_date( - case, casename, rundir, archive, datename, datename_is_last, - last_date, archive_restdir, archive_file_fn, components, testonly=testonly) - if datename_is_last: - histfiles_savein_rundir_by_compname = histfiles_savein_rundir_by_compname_this_date - - # archive history files - - for (archive_entry, compname, compclass) in _get_component_archive_entries(components, archive): - if compclass: - logger.info('Archiving history files for {} ({})'.format(compname, compclass)) - histfiles_savein_rundir = histfiles_savein_rundir_by_compname.get(compname, []) - logger.debug("_archive_process: histfiles_savein_rundir {} ".format(histfiles_savein_rundir)) - _archive_history_files(archive, archive_entry, - compclass, compname, histfiles_savein_rundir, - last_date, archive_file_fn, - dout_s_root, casename, rundir) - -############################################################################### -def restore_from_archive(self, rest_dir=None, dout_s_root=None, rundir=None): -############################################################################### - """ - Take archived restart files and load them into current case. Use rest_dir if provided otherwise use most recent - restore_from_archive is a member of Class Case - """ - if dout_s_root is None: - dout_s_root = self.get_value("DOUT_S_ROOT") - if rundir is None: - rundir = self.get_value("RUNDIR") - if rest_dir is not None: - if not os.path.isabs(rest_dir): - rest_dir = os.path.join(dout_s_root, "rest", rest_dir) - else: - rest_dir = os.path.join(dout_s_root, "rest", ls_sorted_by_mtime(os.path.join(dout_s_root, "rest"))[-1]) - - logger.info("Restoring restart from {}".format(rest_dir)) - - for item in glob.glob("{}/*".format(rest_dir)): - base = os.path.basename(item) - dst = os.path.join(rundir, base) - if os.path.exists(dst): - os.remove(dst) - logger.info("Restoring {} from {} to {}".format(item, rest_dir, rundir)) - - safe_copy(item, rundir) - -############################################################################### -def archive_last_restarts(self, archive_restdir, rundir, last_date=None, link_to_restart_files=False): -############################################################################### - """ - Convenience function for archiving just the last set of restart - files to a given directory. This also saves files attached to the - restart set, such as rpointer files and necessary history - files. However, it does not save other files that are typically - archived (e.g., history files, log files). - - Files are copied to the directory given by archive_restdir. - - If link_to_restart_files is True, then symlinks rather than copies - are done for the restart files. (This has no effect on the history - files that are associated with these restart files.) - """ - archive = self.get_env('archive') - casename = self.get_value("CASE") - datenames = _get_datenames(casename, rundir) - expect(len(datenames) >= 1, "No restart dates found") - last_datename = datenames[-1] - - # Not currently used for anything if we're only archiving the last - # set of restart files, but needed to satisfy the following interface - archive_file_fn = _get_archive_file_fn(copy_only=False) - - _ = _archive_restarts_date(case=self, - casename=casename, - rundir=rundir, - archive=archive, - datename=last_datename, - datename_is_last=True, - last_date=last_date, - archive_restdir=archive_restdir, - archive_file_fn=archive_file_fn, - link_to_last_restart_files=link_to_restart_files) - -############################################################################### -def case_st_archive(self, last_date_str=None, archive_incomplete_logs=True, copy_only=False, resubmit=True): -############################################################################### - """ - Create archive object and perform short term archiving - """ - logger.debug("resubmit {}".format(resubmit)) - caseroot = self.get_value("CASEROOT") - self.load_env(job="case.st_archive") - if last_date_str is not None: - try: - last_date = get_file_date(last_date_str) - except ValueError: - expect(False, 'Could not parse the last date to archive') - else: - last_date = None - - dout_s_root = self.get_value('DOUT_S_ROOT') - if dout_s_root is None or dout_s_root == 'UNSET': - expect(False, - 'XML variable DOUT_S_ROOT is required for short-term achiver') - if not isdir(dout_s_root): - os.makedirs(dout_s_root) - - dout_s_save_interim = self.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES') - if dout_s_save_interim == 'FALSE' or dout_s_save_interim == 'UNSET': - rest_n = self.get_value('REST_N') - stop_n = self.get_value('STOP_N') - if rest_n < stop_n: - logger.warning('Restart files from end of run will be saved' - 'interim restart files will be deleted') - - logger.info("st_archive starting") - - archive = self.get_env('archive') - functor = lambda: _archive_process(self, archive, last_date, archive_incomplete_logs, copy_only) - run_and_log_case_status(functor, "st_archive", caseroot=caseroot) - - logger.info("st_archive completed") - - # resubmit case if appropriate - if not self.get_value("EXTERNAL_WORKFLOW"): - resubmit_cnt = self.get_value("RESUBMIT") - logger.debug("resubmit_cnt {} resubmit {}".format(resubmit_cnt, resubmit)) - if resubmit_cnt > 0 and resubmit: - logger.info("resubmitting from st_archive, resubmit={:d}".format(resubmit_cnt)) - if self.get_value("MACH") == "mira": - expect(os.path.isfile(".original_host"), "ERROR alcf host file not found") - with open(".original_host", "r") as fd: - sshhost = fd.read() - run_cmd("ssh cooleylogin1 ssh {} '{case}/case.submit {case} --resubmit' "\ - .format(sshhost, case=caseroot), verbose=True) - else: - self.submit(resubmit=True) - - return True - -def test_st_archive(self, testdir="st_archive_test"): - archive = Archive() - files = Files() - components = [] -# expect(not self.get_value("MULTI_DRIVER"),"Test not configured for multi-driver cases") - - config_archive_files = archive.get_all_config_archive_files(files) - # create the run directory testdir and populate it with rest_file and hist_file from - # config_archive.xml test_file_names - if os.path.exists(testdir): - logger.info("Removing existing test directory {}".format(testdir)) - shutil.rmtree(testdir) - dout_s_root=os.path.join(testdir,"archive") - archive = Archive() - schema = files.get_schema("ARCHIVE_SPEC_FILE") - for config_archive_file in config_archive_files: - archive.read(config_archive_file, schema) - comp_archive_specs = archive.get_children("comp_archive_spec") - for comp_archive_spec in comp_archive_specs: - components.append(archive.get(comp_archive_spec, 'compname')) - test_file_names = archive.get_optional_child("test_file_names", root=comp_archive_spec) - if test_file_names is not None: - if not os.path.exists(testdir): - os.makedirs(os.path.join(testdir,"archive")) - - for file_node in archive.get_children("tfile", root=test_file_names): - fname = os.path.join(testdir,archive.text(file_node)) - disposition = archive.get(file_node, "disposition") - logger.info("Create file {} with disposition {}". - format(fname, disposition)) - with open(fname, 'w') as fd: - fd.write(disposition+"\n") - - logger.info("testing components: {} ".format(list(set(components)))) - _archive_process(self, archive, None, False, False,components=list(set(components)), - dout_s_root=dout_s_root, - casename="casename", rundir=testdir, testonly=True) - - _check_disposition(testdir) - - # Now test the restore capability - testdir2 = os.path.join(testdir,"run2") - os.makedirs(testdir2) - - restore_from_archive(self, rundir=testdir2, dout_s_root=dout_s_root) - - restfiles = [f for f in os.listdir(os.path.join(testdir,"archive","rest","1976-01-01-00000"))] - for _file in restfiles: - expect(os.path.isfile(os.path.join(testdir2,_file)), "Expected file {} to be restored from rest dir".format(_file)) - - return True - -def test_env_archive(self, testdir="env_archive_test"): - components = self.get_values("COMP_CLASSES") - comps_in_case = [] - # create the run directory testdir and populate it with rest_file and hist_file from - # config_archive.xml test_file_names - if os.path.exists(testdir): - logger.info("Removing existing test directory {}".format(testdir)) - shutil.rmtree(testdir) - dout_s_root=os.path.join(testdir,"archive") - archive = self.get_env('archive') - comp_archive_specs = archive.scan_children("comp_archive_spec") - - # ignore stub and dead components - for comp in list(components): - compname = self.get_value("COMP_{}".format(comp)) - if (compname == 's'+comp.lower() or compname == 'x'+comp.lower()) and comp != 'ESP': - logger.info("Not testing component {}".format(comp)) - components.remove(comp) - elif comp == 'ESP' and self.get_value('MODEL') == 'e3sm': - components.remove(comp) - else: - if compname == 'cpl': - compname = 'drv' - comps_in_case.append(compname) - - for comp_archive_spec in comp_archive_specs: - comp_expected = archive.get(comp_archive_spec, 'compname') - if comp_expected == "ww3": - comp_expected = "ww" - comp_class = archive.get(comp_archive_spec, 'compclass').upper() - if comp_class in components: - components.remove(comp_class) - else: - expect(False,"Error finding comp_class {} in components".format(comp_class)) - if comp_expected == 'cpl': - comp_expected = 'drv' - if comp_expected != 'dart': - expect(comp_expected in comps_in_case, "env_archive defines component {} not defined in case".format(comp_expected)) - - test_file_names = archive.get_optional_child("test_file_names", root=comp_archive_spec) - if test_file_names is not None: - if not os.path.exists(testdir): - os.makedirs(os.path.join(testdir,"archive")) - - for file_node in archive.get_children("tfile", root=test_file_names): - fname = os.path.join(testdir,archive.text(file_node)) - disposition = archive.get(file_node, "disposition") - logger.info("Create file {} with disposition {}". - format(fname, disposition)) - with open(fname, 'w') as fd: - fd.write(disposition+"\n") - - expect(not components, "No archive entry found for components: {}".format(components)) - if 'dart' not in comps_in_case: - comps_in_case.append('dart') - logger.info("testing components: {} ".format(comps_in_case)) - _archive_process(self, archive, None, False, False,components=comps_in_case, - dout_s_root=dout_s_root, - casename="casename", rundir=testdir, testonly=True) - - _check_disposition(testdir) - - # Now test the restore capability - testdir2 = os.path.join(testdir,"run2") - os.makedirs(testdir2) - - restore_from_archive(self, rundir=testdir2, dout_s_root=dout_s_root) - - restfiles = [f for f in os.listdir(os.path.join(testdir,"archive","rest","1976-01-01-00000"))] - for _file in restfiles: - expect(os.path.isfile(os.path.join(testdir2,_file)), "Expected file {} to be restored from rest dir".format(_file)) - - return True - -def _check_disposition(testdir): - copyfilelist = [] - for root, _, files in os.walk(testdir): - for _file in files: - with open(os.path.join(root, _file), "r") as fd: - disposition = fd.readline() - logger.info("Checking testfile {} with disposition {}".format(_file, disposition)) - if root == testdir: - if "move" in disposition: - if find_files(os.path.join(testdir, "archive"), _file): - expect(False, - "Copied file {} to archive with disposition move".format(_file)) - else: - expect(False, - "Failed to move file {} to archive".format(_file)) - if "copy" in disposition: - copyfilelist.append(_file) - elif "ignore" in disposition: - expect(False, "Moved file {} with dispostion ignore to directory {}".format(_file, root)) - elif "copy" in disposition: - expect(_file in copyfilelist, "File {} with disposition copy was moved to directory {}" - .format(_file, root)) - for _file in copyfilelist: - expect(find_files(os.path.join(testdir,"archive"), _file) != [], - "File {} was not copied to archive.".format(_file)) diff --git a/scripts/lib/CIME/case/case_submit.py b/scripts/lib/CIME/case/case_submit.py deleted file mode 100644 index 7559f8869e7..00000000000 --- a/scripts/lib/CIME/case/case_submit.py +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/env python - -""" -case.submit - Submit a cesm workflow to the queueing system or run it -if there is no queueing system. A cesm workflow may include multiple -jobs. -submit, check_case and check_da_settings are members of class Case in file case.py -""" -from six.moves import configparser -from CIME.XML.standard_module_setup import * -from CIME.utils import expect, run_and_log_case_status, verbatim_success_msg, CIMEError -from CIME.locked_files import unlock_file, lock_file -from CIME.test_status import * - -import socket - -logger = logging.getLogger(__name__) - -def _build_prereq_str(case, prev_job_ids): - delimiter = case.get_value("depend_separator") - prereq_str = "" - for job_id in prev_job_ids.values(): - prereq_str += str(job_id) + delimiter - return prereq_str[:-1] - -def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False, - resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None, - batch_args=None): - if job is None: - job = case.get_primary_job() - - # Check if CONTINUE_RUN value makes sense - if job != "case.test" and case.get_value("CONTINUE_RUN"): - rundir = case.get_value("RUNDIR") - expect(os.path.isdir(rundir), - "CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir)) - # only checks for the first instance in a multidriver case - if case.get_value("COMP_INTERFACE") == "nuopc": - rpointer = "rpointer.med" - elif case.get_value("MULTI_DRIVER"): - rpointer = "rpointer.drv_0001" - else: - rpointer = "rpointer.drv" - expect(os.path.exists(os.path.join(rundir,rpointer)), - "CONTINUE_RUN is true but this case does not appear to have restart files staged in {} {}".format(rundir,rpointer)) - # Finally we open the rpointer file and check that it's correct - casename = case.get_value("CASE") - with open(os.path.join(rundir,rpointer), "r") as fd: - ncfile = fd.readline().strip() - expect(ncfile.startswith(casename) and - os.path.exists(os.path.join(rundir,ncfile)), - "File {ncfile} not present or does not match case {casename}". - format(ncfile=os.path.join(rundir,ncfile),casename=casename)) - - # if case.submit is called with the no_batch flag then we assume that this - # flag will stay in effect for the duration of the RESUBMITs - env_batch = case.get_env("batch") - external_workflow = case.get_value("EXTERNAL_WORKFLOW") - if resubmit and env_batch.get_batch_system_type() == "none" or external_workflow: - no_batch = True - if no_batch: - batch_system = "none" - else: - batch_system = env_batch.get_batch_system_type() - - case.set_value("BATCH_SYSTEM", batch_system) - - env_batch_has_changed = False - if not external_workflow: - try: - case.check_lockedfile(os.path.basename(env_batch.filename)) - except: - env_batch_has_changed = True - - if batch_system != "none" and env_batch_has_changed and not external_workflow: - # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc) - logger.warning(\ -""" -env_batch.xml appears to have changed, regenerating batch scripts -manual edits to these file will be lost! -""") - env_batch.make_all_batch_files(case) - - unlock_file(os.path.basename(env_batch.filename)) - lock_file(os.path.basename(env_batch.filename)) - - if resubmit: - # This is a resubmission, do not reinitialize test values - if job == "case.test": - case.set_value("IS_FIRST_RUN", False) - - resub = case.get_value("RESUBMIT") - logger.info("Submitting job '{}', resubmit={:d}".format(job, resub)) - case.set_value("RESUBMIT", resub-1) - if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"): - case.set_value("CONTINUE_RUN", True) - - else: - if job == "case.test": - case.set_value("IS_FIRST_RUN", True) - - if no_batch: - batch_system = "none" - else: - batch_system = env_batch.get_batch_system_type() - - case.set_value("BATCH_SYSTEM", batch_system) - - env_batch_has_changed = False - try: - case.check_lockedfile(os.path.basename(env_batch.filename)) - except CIMEError: - env_batch_has_changed = True - - if env_batch.get_batch_system_type() != "none" and env_batch_has_changed: - # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc) - logger.warning(\ -""" -env_batch.xml appears to have changed, regenerating batch scripts -manual edits to these file will be lost! -""") - env_batch.make_all_batch_files(case) - - unlock_file(os.path.basename(env_batch.filename)) - lock_file(os.path.basename(env_batch.filename)) - - if job == case.get_primary_job(): - case.check_case() - case.check_DA_settings() - if case.get_value("MACH") == "mira": - with open(".original_host", "w") as fd: - fd.write( socket.gethostname()) - - #Load Modules - case.load_env() - - case.flush() - - logger.warning("submit_jobs {}".format(job)) - job_ids = case.submit_jobs(no_batch=no_batch, job=job, prereq=prereq, - skip_pnl=skip_pnl, resubmit_immediate=resubmit_immediate, - allow_fail=allow_fail, mail_user=mail_user, - mail_type=mail_type, batch_args=batch_args) - - xml_jobids = [] - for jobname, jobid in job_ids.items(): - logger.info("Submitted job {} with id {}".format(jobname, jobid)) - if jobid: - xml_jobids.append("{}:{}".format(jobname, jobid)) - - xml_jobid_text = ", ".join(xml_jobids) - if xml_jobid_text: - case.set_value("JOB_IDS", xml_jobid_text) - - return xml_jobid_text - -def submit(self, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False, - resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None, - batch_args=None): - if resubmit_immediate and self.get_value("MACH") in ['mira', 'cetus']: - logger.warning("resubmit_immediate does not work on Mira/Cetus, submitting normally") - resubmit_immediate = False - - caseroot = self.get_value("CASEROOT") - if self.get_value("TEST"): - casebaseid = self.get_value("CASEBASEID") - # This should take care of the race condition where the submitted job - # begins immediately and tries to set RUN phase. We proactively assume - # a passed SUBMIT phase. If this state is already PASS, don't set it again - # because then we'll lose RUN phase info if it's there. This info is important - # for system_tests_common to know if it needs to reinitialize the test or not. - with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts: - phase_status = ts.get_status(SUBMIT_PHASE) - if phase_status != TEST_PASS_STATUS: - ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS) - - # If this is a resubmit check the hidden file .submit_options for - # any submit options used on the original submit and use them again - submit_options = os.path.join(caseroot, ".submit_options") - if resubmit and os.path.exists(submit_options): - config = configparser.RawConfigParser() - config.read(submit_options) - if not skip_pnl and config.has_option('SubmitOptions','skip_pnl'): - skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl') - if mail_user is None and config.has_option('SubmitOptions', 'mail_user'): - mail_user = config.get('SubmitOptions', 'mail_user') - if mail_type is None and config.has_option('SubmitOptions', 'mail_type'): - mail_type = str(config.get('SubmitOptions', 'mail_type')).split(',') - if batch_args is None and config.has_option('SubmitOptions', 'batch_args'): - batch_args = config.get('SubmitOptions', 'batch_args') - - try: - functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq, - allow_fail=allow_fail, resubmit=resubmit, - resubmit_immediate=resubmit_immediate, skip_pnl=skip_pnl, - mail_user=mail_user, mail_type=mail_type, - batch_args=batch_args) - run_and_log_case_status(functor, "case.submit", caseroot=caseroot, - custom_success_msg_functor=verbatim_success_msg) - except BaseException: # Want to catch KeyboardInterrupt too - # If something failed in the batch system, make sure to mark - # the test as failed if we are running a test. - if self.get_value("TEST"): - with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts: - ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS) - - raise - -def check_case(self): - self.check_lockedfiles() - self.create_namelists() # Must be called before check_all_input_data - logger.info("Checking that inputdata is available as part of case submission") - self.check_all_input_data() - - if self.get_value('COMP_WAV') == 'ww': - # the ww3 buildnml has dependancies on inputdata so we must run it again - self.create_namelists(component='WAV') - - expect(self.get_value("BUILD_COMPLETE"), "Build complete is " - "not True please rebuild the model by calling case.build") - logger.info("Check case OK") - -def check_DA_settings(self): - script = self.get_value("DATA_ASSIMILATION_SCRIPT") - cycles = self.get_value("DATA_ASSIMILATION_CYCLES") - if len(script) > 0 and os.path.isfile(script) and cycles > 0: - logger.info("Data Assimilation enabled using script {} with {:d} cycles".format(script, - cycles)) diff --git a/scripts/lib/CIME/case/check_input_data.py b/scripts/lib/CIME/case/check_input_data.py deleted file mode 100644 index 3d543404de6..00000000000 --- a/scripts/lib/CIME/case/check_input_data.py +++ /dev/null @@ -1,401 +0,0 @@ -""" -API for checking input for testcase -""" -from CIME.XML.standard_module_setup import * -from CIME.utils import SharedArea, find_files, safe_copy, expect -from CIME.XML.inputdata import Inputdata -import CIME.Servers - -import glob, hashlib, shutil - -logger = logging.getLogger(__name__) -# The inputdata_checksum.dat file will be read into this hash if it's available -chksum_hash = dict() -local_chksum_file = 'inputdata_checksum.dat' - -def _download_checksum_file(rundir): - """ - Download the checksum files from each server and merge them into rundir. - """ - inputdata = Inputdata() - protocol = "svn" - # download and merge all available chksum files. - while protocol is not None: - protocol, address, user, passwd, chksum_file = inputdata.get_next_server() - if protocol not in vars(CIME.Servers): - logger.warning("Client protocol {} not enabled".format(protocol)) - continue - logger.info("Using protocol {} with user {} and passwd {}".format(protocol, user, passwd)) - if protocol == "svn": - server = CIME.Servers.SVN(address, user, passwd) - elif protocol == "gftp": - server = CIME.Servers.GridFTP(address, user, passwd) - elif protocol == "ftp": - server = CIME.Servers.FTP(address, user, passwd) - elif protocol == "wget": - server = CIME.Servers.WGET(address, user, passwd) - else: - expect(False, "Unsupported inputdata protocol: {}".format(protocol)) - - if not chksum_file: - continue - - success = False - rel_path = chksum_file - full_path = os.path.join(rundir, local_chksum_file) - new_file = full_path + '.raw' - protocol = type(server).__name__ - logging.info("Trying to download file: '{}' to path '{}' using {} protocol.".format(rel_path, new_file, protocol)) - tmpfile = None - if os.path.isfile(full_path): - tmpfile = full_path+".tmp" - os.rename(full_path, tmpfile) - # Use umask to make sure files are group read/writable. As long as parent directories - # have +s, then everything should work. - with SharedArea(): - success = server.getfile(rel_path, new_file) - if success: - _reformat_chksum_file(full_path, new_file) - if tmpfile: - _merge_chksum_files(full_path, tmpfile) - chksum_hash.clear() - else: - if tmpfile and os.path.isfile(tmpfile): - os.rename(tmpfile, full_path) - logger.warning("Could not automatically download file "+full_path+ - " Restoring existing version.") - else: - logger.warning("Could not automatically download file {}". - format(full_path)) - - -def _reformat_chksum_file(chksum_file, server_file): - """ - The checksum file on the server has 8 space seperated columns, I need the first and last ones. - This function gets the first and last column of server_file and saves it to chksum_file - """ - with open(server_file) as fd, open(chksum_file,"w") as fout: - lines = fd.readlines() - for line in lines: - lsplit = line.split() - if len(lsplit) < 8 or ' DIR ' in line: - continue - - # remove the first directory ('inputdata/') from the filename - chksum = lsplit[0] - fname = (lsplit[7]).split('/',1)[1] - fout.write(" ".join((chksum, fname))+"\n") - os.remove(server_file) - -def _merge_chksum_files(new_file, old_file): - """ - If more than one server checksum file is available, this merges the files and removes - any duplicate lines - """ - with open(old_file) as fin: - lines = fin.readlines() - with open(new_file) as fin: - lines += fin.readlines() - lines = set(lines) - with open(new_file, "w") as fout: - fout.write("".join(lines)) - os.remove(old_file) - - - -def _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False): - """ - Return True if successfully downloaded - server is an object handle of type CIME.Servers - input_data_root is the local path to inputdata (DIN_LOC_ROOT) - rel_path is the path to the file or directory relative to input_data_root - user is the user name of the person running the script - isdirectory indicates that this is a directory download rather than a single file - """ - if not (rel_path or server.fileexists(rel_path)): - return False - - full_path = os.path.join(input_data_root, rel_path) - logging.info("Trying to download file: '{}' to path '{}' using {} protocol.".format(rel_path, full_path, type(server).__name__)) - # Make sure local path exists, create if it does not - if isdirectory or full_path.endswith(os.sep): - if not os.path.exists(full_path): - logger.info("Creating directory {}".format(full_path)) - os.makedirs(full_path+".tmp") - isdirectory = True - elif not os.path.exists(os.path.dirname(full_path)): - os.makedirs(os.path.dirname(full_path)) - - # Use umask to make sure files are group read/writable. As long as parent directories - # have +s, then everything should work. - with SharedArea(): - if isdirectory: - success = server.getdirectory(rel_path, full_path+".tmp") - # this is intended to prevent a race condition in which - # one case attempts to use a refdir before another one has - # completed the download - if success: - os.rename(full_path+".tmp",full_path) - else: - shutil.rmtree(full_path+".tmp") - else: - success = server.getfile(rel_path, full_path) - return success - -def check_all_input_data(self, protocol=None, address=None, input_data_root=None, data_list_dir="Buildconf", - download=True, chksum=False): - """ - Read through all files of the form *.input_data_list in the data_list_dir directory. These files - contain a list of input and boundary files needed by each model component. For each file in the - list confirm that it is available in input_data_root and if not (optionally download it from a - server at address using protocol. Perform a chksum of the downloaded file. - """ - success = False - if protocol is not None and address is not None: - success = self.check_input_data(protocol=protocol, address=address, download=download, - input_data_root=input_data_root, data_list_dir=data_list_dir, chksum=chksum) - else: - if chksum: - _download_checksum_file(self.get_value("RUNDIR")) - - success = self.check_input_data(protocol=protocol, address=address, download=False, - input_data_root=input_data_root, data_list_dir=data_list_dir, chksum=chksum) - if download and not success: - if not chksum: - _download_checksum_file(self.get_value("RUNDIR")) - success = _downloadfromserver(self, input_data_root, data_list_dir) - - expect(not download or (download and success), "Could not find all inputdata on any server") - self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir) - return success - -def _downloadfromserver(case, input_data_root, data_list_dir): - """ - Download files - """ - success = False - protocol = 'svn' - inputdata = Inputdata() - if not input_data_root: - input_data_root = case.get_value('DIN_LOC_ROOT') - - while not success and protocol is not None: - protocol, address, user, passwd, _ = inputdata.get_next_server() - logger.info("Checking server {} with protocol {}".format(address, protocol)) - success = case.check_input_data(protocol=protocol, address=address, download=True, - input_data_root=input_data_root, - data_list_dir=data_list_dir, - user=user, passwd=passwd) - return success - -def stage_refcase(self, input_data_root=None, data_list_dir=None): - """ - Get a REFCASE for a hybrid or branch run - This is the only case in which we are downloading an entire directory instead of - a single file at a time. - """ - get_refcase = self.get_value("GET_REFCASE") - run_type = self.get_value("RUN_TYPE") - continue_run = self.get_value("CONTINUE_RUN") - - # We do not fully populate the inputdata directory on every - # machine and do not expect every user to download the 3TB+ of - # data in our inputdata repository. This code checks for the - # existence of inputdata in the local inputdata directory and - # attempts to download data from the server if it's needed and - # missing. - if get_refcase and run_type != "startup" and not continue_run: - din_loc_root = self.get_value("DIN_LOC_ROOT") - run_refdate = self.get_value("RUN_REFDATE") - run_refcase = self.get_value("RUN_REFCASE") - run_refdir = self.get_value("RUN_REFDIR") - rundir = self.get_value("RUNDIR") - - if os.path.isabs(run_refdir): - refdir = run_refdir - expect(os.path.isdir(refdir), "Reference case directory {} does not exist or is not readable".format(refdir)) - - else: - refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate) - if not os.path.isdir(refdir): - logger.warning("Refcase not found in {}, will attempt to download from inputdata".format(refdir)) - with open(os.path.join("Buildconf","refcase.input_data_list"),"w") as fd: - fd.write("refdir = {}{}".format(refdir, os.sep)) - if input_data_root is None: - input_data_root = din_loc_root - if data_list_dir is None: - data_list_dir = "Buildconf" - success = _downloadfromserver(self, input_data_root=input_data_root, data_list_dir=data_list_dir) - expect(success, "Could not download refcase from any server") - - logger.info(" - Prestaging REFCASE ({}) to {}".format(refdir, rundir)) - - # prestage the reference case's files. - - if (not os.path.exists(rundir)): - logger.debug("Creating run directory: {}".format(rundir)) - os.makedirs(rundir) - rpointerfile = None - # copy the refcases' rpointer files to the run directory - for rpointerfile in glob.iglob(os.path.join("{}","*rpointer*").format(refdir)): - logger.info("Copy rpointer {}".format(rpointerfile)) - safe_copy(rpointerfile, rundir) - expect(rpointerfile,"Reference case directory {} does not contain any rpointer files".format(refdir)) - # link everything else - - for rcfile in glob.iglob(os.path.join(refdir,"*")): - rcbaseline = os.path.basename(rcfile) - if not os.path.exists("{}/{}".format(rundir, rcbaseline)): - logger.info("Staging file {}".format(rcfile)) - os.symlink(rcfile, "{}/{}".format(rundir, rcbaseline)) - # Backward compatibility, some old refcases have cam2 in the name - # link to local cam file. - for cam2file in glob.iglob(os.path.join("{}","*.cam2.*").format(rundir)): - camfile = cam2file.replace("cam2", "cam") - os.symlink(cam2file, camfile) - elif not get_refcase and run_type != "startup": - logger.info("GET_REFCASE is false, the user is expected to stage the refcase to the run directory.") - if os.path.exists(os.path.join("Buildconf","refcase.input_data_list")): - os.remove(os.path.join("Buildconf","refcase.input_data_list")) - return True - -def check_input_data(case, protocol="svn", address=None, input_data_root=None, data_list_dir="Buildconf", - download=False, user=None, passwd=None, chksum=False): - """ - For a given case check for the relevant input data as specified in data_list_dir/*.input_data_list - in the directory input_data_root, if not found optionally download it using the servers specified - in config_inputdata.xml. If a chksum file is available compute the chksum and compare it to that - in the file. - Return True if no files missing - """ - case.load_env(reset=True) - rundir = case.get_value("RUNDIR") - # Fill in defaults as needed - input_data_root = case.get_value("DIN_LOC_ROOT") if input_data_root is None else input_data_root - - expect(os.path.isdir(data_list_dir), "Invalid data_list_dir directory: '{}'".format(data_list_dir)) - - data_list_files = find_files(data_list_dir, "*.input_data_list") - expect(data_list_files, "No .input_data_list files found in dir '{}'".format(data_list_dir)) - - no_files_missing = True - if download: - if protocol not in vars(CIME.Servers): - logger.warning("Client protocol {} not enabled".format(protocol)) - return False - logger.info("Using protocol {} with user {} and passwd {}".format(protocol, user, passwd)) - if protocol == "svn": - server = CIME.Servers.SVN(address, user, passwd) - elif protocol == "gftp": - server = CIME.Servers.GridFTP(address, user, passwd) - elif protocol == "ftp": - server = CIME.Servers.FTP(address, user, passwd) - elif protocol == "wget": - server = CIME.Servers.WGET(address, user, passwd) - else: - expect(False, "Unsupported inputdata protocol: {}".format(protocol)) - - for data_list_file in data_list_files: - logging.info("Loading input file list: '{}'".format(data_list_file)) - with open(data_list_file, "r") as fd: - lines = fd.readlines() - - for line in lines: - line = line.strip() - if (line and not line.startswith("#")): - tokens = line.split('=') - description, full_path = tokens[0].strip(), tokens[1].strip() - if description.endswith('datapath'): - continue - if(full_path): - # expand xml variables - full_path = case.get_resolved_value(full_path) - rel_path = full_path.replace(input_data_root, "") - model = os.path.basename(data_list_file).split('.')[0] - - if ("/" in rel_path and rel_path == full_path): - # User pointing to a file outside of input_data_root, we cannot determine - # rel_path, and so cannot download the file. If it already exists, we can - # proceed - if not os.path.exists(full_path): - logging.warning("Model {} missing file {} = '{}'".format(model, description, full_path)) - if download: - logging.warning(" Cannot download file since it lives outside of the input_data_root '{}'".format(input_data_root)) - no_files_missing = False - else: - logging.debug(" Found input file: '{}'".format(full_path)) - else: - # There are some special values of rel_path that - # we need to ignore - some of the component models - # set things like 'NULL' or 'same_as_TS' - - # basically if rel_path does not contain '/' (a - # directory tree) you can assume it's a special - # value and ignore it (perhaps with a warning) - isdirectory=rel_path.endswith(os.sep) - - if ("/" in rel_path and not os.path.exists(full_path)): - logger.warning(" Model {} missing file {} = '{}'".format(model, description, full_path)) - no_files_missing = False - - if (download): - no_files_missing = _download_if_in_repo(server, - input_data_root, rel_path.strip(os.sep), - isdirectory=isdirectory) - if no_files_missing: - verify_chksum(input_data_root, rundir, rel_path.strip(os.sep), isdirectory) - else: - if chksum: - verify_chksum(input_data_root, rundir, rel_path.strip(os.sep), isdirectory) - logger.info("Chksum passed for file {}".format(os.path.join(input_data_root,rel_path))) - logging.debug(" Already had input file: '{}'".format(full_path)) - else: - model = os.path.basename(data_list_file).split('.')[0] - logging.warning("Model {} no file specified for {}".format(model, description)) - - return no_files_missing - -def verify_chksum(input_data_root, rundir, filename, isdirectory): - """ - For file in filename perform a chksum and compare the result to that stored in - the local checksumfile, if isdirectory chksum all files in the directory of form *.* - """ - hashfile = os.path.join(rundir, local_chksum_file) - if not chksum_hash: - if not os.path.isfile(hashfile): - logger.warning("Failed to find or download file {}".format(hashfile)) - return - - with open(hashfile) as fd: - lines = fd.readlines() - for line in lines: - fchksum, fname = line.split() - if fname in chksum_hash: - expect(chksum_hash[fname] == fchksum, " Inconsistent hashes in chksum for file {}".format(fname)) - else: - chksum_hash[fname] = fchksum - - if isdirectory: - filenames = glob.glob(os.path.join(filename,"*.*")) - else: - filenames = [filename] - for fname in filenames: - if not os.sep in fname: - continue - chksum = md5(os.path.join(input_data_root, fname)) - if chksum_hash: - if not fname in chksum_hash: - logger.warning("Did not find hash for file {} in chksum file {}".format(filename, hashfile)) - else: - expect(chksum == chksum_hash[fname], - "chksum mismatch for file {} expected {} found {}". - format(os.path.join(input_data_root,fname),chksum, chksum_hash[fname])) - -def md5(fname): - """ - performs an md5 sum one chunk at a time to avoid memory issues with large files. - """ - hash_md5 = hashlib.md5() - with open(fname, "rb") as f: - for chunk in iter(lambda: f.read(4096), b""): - hash_md5.update(chunk) - return hash_md5.hexdigest() diff --git a/scripts/lib/CIME/case/check_lockedfiles.py b/scripts/lib/CIME/case/check_lockedfiles.py deleted file mode 100644 index 3a4972fc28a..00000000000 --- a/scripts/lib/CIME/case/check_lockedfiles.py +++ /dev/null @@ -1,125 +0,0 @@ -""" -API for checking locked files -check_lockedfile, check_lockedfiles, check_pelayouts_require_rebuild are members -of Class case.py from file case.py -""" - -from CIME.XML.standard_module_setup import * -from CIME.XML.env_build import EnvBuild -from CIME.XML.env_case import EnvCase -from CIME.XML.env_mach_pes import EnvMachPes -from CIME.XML.env_batch import EnvBatch -from CIME.locked_files import unlock_file, LOCKED_DIR -from CIME.build import clean - -logger = logging.getLogger(__name__) - -import glob, six - -def check_pelayouts_require_rebuild(self, models): - """ - Create if we require a rebuild, expects cwd is caseroot - """ - locked_pes = os.path.join(LOCKED_DIR, "env_mach_pes.xml") - if os.path.exists(locked_pes): - # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined - # for any component - env_mach_pes_locked = EnvMachPes(infile=locked_pes, components=self.get_values("COMP_CLASSES")) - for comp in models: - if self.get_value("{}_PE_CHANGE_REQUIRES_REBUILD".format(comp)): - # Changing these values in env_mach_pes.xml will force - # you to clean the corresponding component - old_tasks = env_mach_pes_locked.get_value("NTASKS_{}".format(comp)) - old_threads = env_mach_pes_locked.get_value("NTHRDS_{}".format(comp)) - old_inst = env_mach_pes_locked.get_value("NINST_{}".format(comp)) - - new_tasks = self.get_value("NTASKS_{}".format(comp)) - new_threads = self.get_value("NTHRDS_{}".format(comp)) - new_inst = self.get_value("NINST_{}".format(comp)) - - if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst: - logging.warning("{} pe change requires clean build {} {}".format(comp, old_tasks, new_tasks)) - cleanflag = comp.lower() - clean(self, cleanlist=[cleanflag]) - - unlock_file("env_mach_pes.xml", self.get_value("CASEROOT")) - -def check_lockedfile(self, filebase): - caseroot = self.get_value("CASEROOT") - - cfile = os.path.join(caseroot, filebase) - lfile = os.path.join(caseroot, "LockedFiles", filebase) - components = self.get_values("COMP_CLASSES") - if os.path.isfile(cfile): - objname = filebase.split('.')[0] - if objname == "env_build": - f1obj = self.get_env('build') - f2obj = EnvBuild(caseroot, lfile, read_only=True) - elif objname == "env_mach_pes": - f1obj = self.get_env('mach_pes') - f2obj = EnvMachPes(caseroot, lfile, components=components, read_only=True) - elif objname == "env_case": - f1obj = self.get_env('case') - f2obj = EnvCase(caseroot, lfile, read_only=True) - elif objname == "env_batch": - f1obj = self.get_env('batch') - f2obj = EnvBatch(caseroot, lfile, read_only=True) - else: - logging.warning("Locked XML file '{}' is not current being handled".format(filebase)) - return - - diffs = f1obj.compare_xml(f2obj) - if diffs: - - logging.warning("File {} has been modified".format(lfile)) - toggle_build_status = False - for key in diffs.keys(): - if key != "BUILD_COMPLETE": - logging.warning(" found difference in {} : case {} locked {}" - .format(key, repr(diffs[key][0]), repr(diffs[key][1]))) - toggle_build_status = True - if objname == "env_mach_pes": - expect(False, "Invoke case.setup --reset ") - elif objname == "env_case": - expect(False, "Cannot change file env_case.xml, please" - " recover the original copy from LockedFiles") - elif objname == "env_build": - if toggle_build_status: - logging.warning("Setting build complete to False") - self.set_value("BUILD_COMPLETE", False) - if "PIO_VERSION" in diffs: - self.set_value("BUILD_STATUS", 2) - logging.critical("Changing PIO_VERSION requires running " - "case.build --clean-all and rebuilding") - else: - self.set_value("BUILD_STATUS", 1) - - elif objname == "env_batch": - expect(False, "Batch configuration has changed, please run case.setup --reset") - else: - expect(False, "'{}' diff was not handled".format(objname)) - -def check_lockedfiles(self, skip=None): - """ - Check that all lockedfiles match what's in case - - If caseroot is not specified, it is set to the current working directory - """ - caseroot = self.get_value("CASEROOT") - lockedfiles = glob.glob(os.path.join(caseroot, "LockedFiles", "*.xml")) - skip = [] if skip is None else skip - skip = [skip] if isinstance(skip, six.string_types) else skip - for lfile in lockedfiles: - fpart = os.path.basename(lfile) - # ignore files used for tests such as env_mach_pes.ERP1.xml by looking for extra dots in the name - if fpart.count('.') > 1: - continue - - do_skip = False - for item in skip: - if fpart.startswith(item): - do_skip = True - break - - if not do_skip: - self.check_lockedfile(fpart) diff --git a/scripts/lib/CIME/case/preview_namelists.py b/scripts/lib/CIME/case/preview_namelists.py deleted file mode 100644 index 60a719415eb..00000000000 --- a/scripts/lib/CIME/case/preview_namelists.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -API for preview namelist -create_dirs and create_namelists are members of Class case from file case.py -""" - -from CIME.XML.standard_module_setup import * -from CIME.utils import run_sub_or_cmd, safe_copy -import time, glob -logger = logging.getLogger(__name__) - -def create_dirs(self): - """ - Make necessary directories for case - """ - # Get data from XML - exeroot = self.get_value("EXEROOT") - libroot = self.get_value("LIBROOT") - incroot = self.get_value("INCROOT") - rundir = self.get_value("RUNDIR") - caseroot = self.get_value("CASEROOT") - docdir = os.path.join(caseroot, "CaseDocs") - dirs_to_make = [] - models = self.get_values("COMP_CLASSES") - for model in models: - dirname = model.lower() - dirs_to_make.append(os.path.join(exeroot, dirname, "obj")) - - dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir]) - - for dir_to_make in dirs_to_make: - if (not os.path.isdir(dir_to_make)): - try: - logger.debug("Making dir '{}'".format(dir_to_make)) - os.makedirs(dir_to_make) - except OSError as e: - # In a multithreaded situation, we may have lost a race to create this dir. - # We do not want to crash if that's the case. - if not os.path.isdir(dir_to_make): - expect(False, "Could not make directory '{}', error: {}".format(dir_to_make, e)) - - # As a convenience write the location of the case directory in the bld and run directories - for dir_ in (exeroot, rundir): - with open(os.path.join(dir_,"CASEROOT"),"w+") as fd: - fd.write(caseroot+"\n") - -def create_namelists(self, component=None): - """ - Create component namelists - """ - self.flush() - - create_dirs(self) - - casebuild = self.get_value("CASEBUILD") - caseroot = self.get_value("CASEROOT") - rundir = self.get_value("RUNDIR") - - docdir = os.path.join(caseroot, "CaseDocs") - - # Load modules - self.load_env() - - self.stage_refcase() - - logger.info("Creating component namelists") - - # Create namelists - must have cpl last in the list below - # Note - cpl must be last in the loop below so that in generating its namelist, - # it can use xml vars potentially set by other component's buildnml scripts - models = self.get_values("COMP_CLASSES") - models += [models.pop(0)] - for model in models: - model_str = model.lower() - logger.info(" {} {}".format(time.strftime("%Y-%m-%d %H:%M:%S"),model_str)) - config_file = self.get_value("CONFIG_{}_FILE".format(model_str.upper())) - config_dir = os.path.dirname(config_file) - if model_str == "cpl": - compname = "drv" - else: - compname = self.get_value("COMP_{}".format(model_str.upper())) - if component is None or component == model_str: - # first look in the case SourceMods directory - cmd = os.path.join(caseroot, "SourceMods", "src."+compname, "buildnml") - if os.path.isfile(cmd): - logger.warning("\nWARNING: Using local buildnml file {}\n".format(cmd)) - else: - # otherwise look in the component config_dir - cmd = os.path.join(config_dir, "buildnml") - expect(os.path.isfile(cmd), "Could not find buildnml file for component {}".format(compname)) - run_sub_or_cmd(cmd, (caseroot), "buildnml", (self, caseroot, compname), case=self) - - logger.info("Finished creating component namelists") - - # Save namelists to docdir - if (not os.path.isdir(docdir)): - os.makedirs(docdir) - try: - with open(os.path.join(docdir, "README"), "w") as fd: - fd.write(" CESM Resolved Namelist Files\n For documentation only DO NOT MODIFY\n") - except (OSError, IOError) as e: - expect(False, "Failed to write {}/README: {}".format(docdir, e)) - - for cpglob in ["*_in_[0-9]*", "*modelio*", "*_in", "nuopc.runconfig", - "*streams*txt*", "*stxt", "*maps.rc", "*cism.config*", "nuopc.runseq"]: - for file_to_copy in glob.glob(os.path.join(rundir, cpglob)): - logger.debug("Copy file from '{}' to '{}'".format(file_to_copy, docdir)) - safe_copy(file_to_copy, docdir) - - # Copy over chemistry mechanism docs if they exist - if (os.path.isdir(os.path.join(casebuild, "camconf"))): - for file_to_copy in glob.glob(os.path.join(casebuild, "camconf", "*chem_mech*")): - safe_copy(file_to_copy, docdir) diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py deleted file mode 100644 index f6854430efb..00000000000 --- a/scripts/lib/CIME/code_checker.py +++ /dev/null @@ -1,129 +0,0 @@ -""" -Libraries for checking python code with pylint -""" - -from CIME.XML.standard_module_setup import * - -from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable, get_cime_default_driver - -from multiprocessing.dummy import Pool as ThreadPool -#pylint: disable=import-error -from distutils.spawn import find_executable - -logger = logging.getLogger(__name__) - -############################################################################### -def _run_pylint(on_file, interactive): -############################################################################### - pylint = find_executable("pylint") - - cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" - cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" - cmd_options += ",logging-format-interpolation,no-name-in-module" - cimeroot = get_cime_root() - - if "scripts/Tools" in on_file: - cmd_options +=",relative-import" - - # add init-hook option - cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\",\"%s\"))'"%\ - (os.path.join(cimeroot,"scripts","lib"), - os.path.join(cimeroot,"scripts","Tools"), - os.path.join(cimeroot,"scripts","fortran_unit_testing","python"), - os.path.join(cimeroot,"src","drivers","nuopc","cime_config","runseq")) - - cmd = "%s %s %s" % (pylint, cmd_options, on_file) - logger.debug("pylint command is %s"%cmd) - stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot) - if stat != 0: - if interactive: - logger.info("File %s has pylint problems, please fix\n Use command: %s" % (on_file, cmd)) - logger.info(out + "\n" + err) - return (on_file, out + "\n" + err) - else: - if interactive: - logger.info("File %s has no pylint problems" % on_file) - return (on_file, "") - -############################################################################### -def _matches(file_path, file_ends): -############################################################################### - for file_end in file_ends: - if file_path.endswith(file_end): - return True - - return False - -############################################################################### -def _should_pylint_skip(filepath): -############################################################################### - # TODO - get rid of this - list_of_directories_to_ignore = ("xmlconvertors", "pointclm", "point_clm", "tools", "machines", "apidocs", "doc") - for dir_to_skip in list_of_directories_to_ignore: - if dir_to_skip + "/" in filepath: - return True - if filepath == "scripts/lib/six.py": - return True - # intended to be temporary, file needs update - if filepath.endswith("archive_metadata") or filepath.endswith("pgn.py"): - return True - - return False - -############################################################################### -def get_all_checkable_files(): -############################################################################### - cimeroot = get_cime_root() - all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines() - if get_cime_default_driver() == "nuopc": - nuopc_git_files = run_cmd_no_fail("git ls-files", from_dir=os.path.join(cimeroot,"src","drivers","nuopc"), verbose=False).splitlines() - all_git_files.extend([os.path.join("src","drivers","nuopc",_file) for _file in nuopc_git_files]) - files_to_test = [item for item in all_git_files - if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))] - - return files_to_test - -############################################################################### -def check_code(files, num_procs=10, interactive=False): -############################################################################### - """ - Check all python files in the given directory - - Returns True if all files had no problems - """ - # Get list of files to check, we look to see if user-provided file argument - # is a valid file, if not, we search the repo for a file with similar name. - files_to_check = [] - if files: - repo_files = get_all_checkable_files() - for filearg in files: - if os.path.exists(filearg): - files_to_check.append(os.path.abspath(filearg)) - else: - found = False - for repo_file in repo_files: - if repo_file.endswith(filearg): - found = True - files_to_check.append(repo_file) # could have multiple matches - - if not found: - logger.warning("Could not find file matching argument '%s'" % filearg) - else: - # Check every python file - files_to_check = get_all_checkable_files() - - if "scripts/lib/six.py" in files_to_check: - files_to_check.remove("scripts/lib/six.py") - logger.info("Not checking contributed file six.py") - - expect(len(files_to_check) > 0, "No matching files found") - - # No point in using more threads than files - if len(files_to_check) < num_procs: - num_procs = len(files_to_check) - - pool = ThreadPool(num_procs) - results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check) - pool.close() - pool.join() - return dict(results) diff --git a/scripts/lib/CIME/compare_namelists.py b/scripts/lib/CIME/compare_namelists.py deleted file mode 100644 index ab559b88d19..00000000000 --- a/scripts/lib/CIME/compare_namelists.py +++ /dev/null @@ -1,550 +0,0 @@ -import os, re, logging, six - -from collections import OrderedDict -from CIME.utils import expect, CIMEError -logger=logging.getLogger(__name__) - -# pragma pylint: disable=unsubscriptable-object - -############################################################################### -def _normalize_lists(value_str): -############################################################################### - """ - >>> _normalize_lists("'one two' 'three four'") - "'one two','three four'" - >>> _normalize_lists("'one two' 'three four'") - "'one two','three four'" - >>> _normalize_lists("'one two' , 'three four'") - "'one two','three four'" - >>> _normalize_lists("'one two'") - "'one two'" - >>> _normalize_lists("1 2 3, 4 , 5") - '1,2,3,4,5' - """ - result = "" - inside_quotes = False - idx = 0 - while idx < len(value_str): - value_c = value_str[idx] - if value_c == "'": - inside_quotes = not inside_quotes - result += value_c - idx += 1 - elif value_c.isspace() or value_c == ",": - if inside_quotes: - result += value_c - idx += 1 - else: - result += "," - idx += 1 - while idx < len(value_str): - value_c = value_str[idx] - if not value_c.isspace() and value_c != ",": - break - idx += 1 - else: - result += value_c - idx += 1 - - return result - -############################################################################### -def _interpret_value(value_str, filename): -############################################################################### - comma_re = re.compile(r'\s*,\s*') - dict_re = re.compile(r"^'(\S+)\s*->\s*(\S+)\s*'") - - value_str = _normalize_lists(value_str) - - tokens = [item.strip() for item in comma_re.split(value_str) if item.strip() != ""] - if ("->" in value_str): - # dict - rv = OrderedDict() - for token in tokens: - m = dict_re.match(token) - expect(m is not None, "In file '{}', Dict entry '{}' does not match expected format".format(filename, token)) - k, v = m.groups() - rv[k] = _interpret_value(v, filename) - - return rv - else: - new_tokens = [] - for token in tokens: - if "*" in token: - try: - # the following ensure that the following to namelist settings trigger a match - # nmlvalue = 1,1,1 versus nmlvalue = 3*1 - sub_tokens = [item.strip() for item in token.split("*")] - expect(len(sub_tokens) == 2, "Incorrect usage of multiplication in token '{}'".format(token)) - new_tokens.extend([sub_tokens[1]] * int(sub_tokens[0])) - except Exception: - # User probably did not intend to use the * operator as a namelist multiplier - new_tokens.append(token) - else: - new_tokens.append(token) - - if "," in value_str or len(new_tokens) > 1: - return new_tokens - else: - return new_tokens[0] - -############################################################################### -def _parse_namelists(namelist_lines, filename): -############################################################################### - """ - Return data in form: {namelist -> {key -> value} }. - value can be an int, string, list, or dict - - >>> teststr = '''&nml - ... val = 'foo' - ... aval = 'one','two', 'three' - ... maval = 'one', 'two', - ... 'three', 'four' - ... dval = 'one->two', 'three -> four' - ... mdval = 'one -> two', - ... 'three -> four', - ... 'five -> six' - ... nval = 1850 - ... / - ... - ... # Hello - ... - ... &nml2 - ... val2 = .false. - ... / - ... ''' - >>> _parse_namelists(teststr.splitlines(), 'foo') - OrderedDict([('nml', OrderedDict([('val', "'foo'"), ('aval', ["'one'", "'two'", "'three'"]), ('maval', ["'one'", "'two'", "'three'", "'four'"]), ('dval', OrderedDict([('one', 'two'), ('three', 'four')])), ('mdval', OrderedDict([('one', 'two'), ('three', 'four'), ('five', 'six')])), ('nval', '1850')])), ('nml2', OrderedDict([('val2', '.false.')]))]) - - >>> teststr = '''&fire_emis_nl - ... fire_emis_factors_file = 'fire_emis_factors_c140116.nc' - ... fire_emis_specifier = 'bc_a1 = BC', 'pom_a1 = 1.4*OC', 'pom_a2 = A*B*C', 'SO2 = SO2' - ... / - ... ''' - >>> _parse_namelists(teststr.splitlines(), 'foo') - OrderedDict([('fire_emis_nl', OrderedDict([('fire_emis_factors_file', "'fire_emis_factors_c140116.nc'"), ('fire_emis_specifier', ["'bc_a1 = BC'", "'pom_a1 = 1.4*OC'", "'pom_a2 = A*B*C'", "'SO2 = SO2'"])]))]) - - >>> _parse_namelists('blah', 'foo') # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: File 'foo' does not appear to be a namelist file, skipping - - >>> teststr = '''&nml - ... val = 'one', 'two', - ... val2 = 'three' - ... /''' - >>> _parse_namelists(teststr.splitlines(), 'foo') # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: In file 'foo', Incomplete multiline variable: 'val' - - >>> teststr = '''&nml - ... val = 'one', 'two', - ... /''' - >>> _parse_namelists(teststr.splitlines(), 'foo') # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: In file 'foo', Incomplete multiline variable: 'val' - - >>> teststr = '''&nml - ... val = 'one', 'two', - ... 'three -> four' - ... /''' - >>> _parse_namelists(teststr.splitlines(), 'foo') # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: In file 'foo', multiline list variable 'val' had dict entries - - >>> teststr = '''&nml - ... val = 2, 2*13 - ... /''' - >>> _parse_namelists(teststr.splitlines(), 'foo') - OrderedDict([('nml', OrderedDict([('val', ['2', '13', '13'])]))]) - - >>> teststr = '''&nml - ... val = 2 2 3 - ... /''' - >>> _parse_namelists(teststr.splitlines(), 'foo') - OrderedDict([('nml', OrderedDict([('val', ['2', '2', '3'])]))]) - - >>> teststr = '''&nml - ... val = 'a brown cow' 'a red hen' - ... /''' - >>> _parse_namelists(teststr.splitlines(), 'foo') - OrderedDict([('nml', OrderedDict([('val', ["'a brown cow'", "'a red hen'"])]))]) - """ - - comment_re = re.compile(r'^[#!]') - namelist_re = re.compile(r'^&(\S+)$') - name_re = re.compile(r"^([^\s=']+)\s*=\s*(.+)$") - rcline_re = re.compile(r"^([^&\s':]+)\s*:\s*(.+)$") - - rv = OrderedDict() - current_namelist = None - multiline_variable = None # (name, value) - for line in namelist_lines: - - line = line.strip() - line = line.replace('"',"'") - - logger.debug("Parsing line: '{}'".format(line)) - - if (line == "" or comment_re.match(line) is not None): - logger.debug(" Line was whitespace or comment, skipping.") - continue - - rcline = rcline_re.match(line) - if (rcline is not None): - # Defining a variable (AKA name) - name, value = rcline.groups() - - - logger.debug(" Parsing variable '{}' with data '{}'".format(name, value)) - - if 'seq_maps.rc' not in rv: - rv['seq_maps.rc'] = OrderedDict() - - expect(name not in rv['seq_maps.rc'], "In file '{}', Duplicate name: '{}'".format(filename, name)) - rv['seq_maps.rc'][name] = value - - elif (current_namelist is None): - # Must start a namelist - expect(multiline_variable is None, - "In file '{}', Incomplete multiline variable: '{}'".format(filename, multiline_variable[0] if multiline_variable is not None else "")) - - # Unfortunately, other tools were using the old compare_namelists.pl script - # to compare files that are not namelist files. We need a special error - # to signify this event - if (namelist_re.match(line) is None): - expect(rv != OrderedDict(), - "File '{}' does not appear to be a namelist file, skipping".format(filename)) - expect(False, - "In file '{}', Line '{}' did not begin a namelist as expected".format(filename, line)) - - current_namelist = namelist_re.match(line).groups()[0] - expect(current_namelist not in rv, - "In file '{}', Duplicate namelist '{}'".format(filename, current_namelist)) - - rv[current_namelist] = OrderedDict() - - logger.debug(" Starting namelist '{}'".format(current_namelist)) - - elif (line == "/"): - # Ends a namelist - logger.debug(" Ending namelist '{}'".format(current_namelist)) - - expect(multiline_variable is None, - "In file '{}', Incomplete multiline variable: '{}'".format(filename, multiline_variable[0] if multiline_variable is not None else "")) - - current_namelist = None - - elif (name_re.match(line)): - # Defining a variable (AKA name) - name, value_str = name_re.match(line).groups() - - logger.debug(" Parsing variable '{}' with data '{}'".format(name, value_str)) - - expect(multiline_variable is None, - "In file '{}', Incomplete multiline variable: '{}'".format(filename, multiline_variable[0] if multiline_variable is not None else "")) - expect(name not in rv[current_namelist], "In file '{}', Duplicate name: '{}'".format(filename, name)) - - real_value = _interpret_value(value_str, filename) - - rv[current_namelist][name] = real_value - logger.debug(" Adding value: {}".format(real_value)) - - if (line.endswith(",")): - # Value will continue on in subsequent lines - multiline_variable = (name, real_value) - - logger.debug(" Var is multiline...") - - elif (multiline_variable is not None): - # Continuation of list or dict variable - current_value = multiline_variable[1] - logger.debug(" Continuing multiline variable '{}' with data '{}'".format(multiline_variable[0], line)) - - real_value = _interpret_value(line, filename) - if (type(current_value) is list): - expect(type(real_value) is not OrderedDict, "In file '{}', multiline list variable '{}' had dict entries".format(filename, multiline_variable[0])) - real_value = real_value if type(real_value) is list else [real_value] - current_value.extend(real_value) - - elif (type(current_value) is OrderedDict): - expect(type(real_value) is OrderedDict, "In file '{}', multiline dict variable '{}' had non-dict entries".format(filename, multiline_variable[0])) - current_value.update(real_value) - - else: - expect(False, "In file '{}', Continuation should have been for list or dict, instead it was: '{}'".format(filename, type(current_value))) - - logger.debug(" Adding value: {}".format(real_value)) - - if (not line.endswith(",")): - # Completed - multiline_variable = None - - logger.debug(" Terminating multiline variable") - - else: - expect(False, "In file '{}', Unrecognized line: '{}'".format(filename, line)) - - return rv - -############################################################################### -def _normalize_string_value(name, value, case): -############################################################################### - """ - Some of the string in namelists will contain data that's inherently prone - to diffs, like file paths, etc. This function attempts to normalize that - data so that it will not cause diffs. - """ - # Any occurance of case must be normalized because test-ids might not match - if (case is not None): - case_re = re.compile(r'{}[.]([GC]+)[.]([^./\s]+)'.format(case)) - value = case_re.sub("{}.ACTION.TESTID".format(case), value) - - if (name in ["runid", "model_version", "username"]): - # Don't even attempt to diff these, we don't care - return name.upper() - elif (".log." in value): - # Remove the part that's prone to diff - components = value.split(".") - return os.path.basename(".".join(components[0:-1])) - elif (":" in value): - items = value.split(":") - items = [_normalize_string_value(name, item, case) for item in items] - return ":".join(items) - elif ("/" in value): - # File path, just return the basename unless its a seq_maps.rc mapping - # mapname or maptype - if "mapname" not in name and "maptype" not in name: - return os.path.basename(value) - else: - return value - else: - return value - -############################################################################### -def _compare_values(name, gold_value, comp_value, case): -############################################################################### - """ - Compare values for a specific variable in a namelist. - - Returns comments - - Note there will only be comments if values did not match - """ - comments = "" - if (type(gold_value) != type(comp_value)): - comments += " variable '{}' did not have expected type '{}', instead is type '{}'\n".format(name, type(gold_value), type(comp_value)) - return comments - - if (type(gold_value) is list): - # Note, list values remain order sensitive - for idx, gold_value_list_item in enumerate(gold_value): - if (idx < len(comp_value)): - comments += _compare_values("{} list item {:d}".format(name, idx), - gold_value_list_item, comp_value[idx], case) - else: - comments += " list variable '{}' missing value {}\n".format(name, gold_value_list_item) - - if (len(comp_value) > len(gold_value)): - for comp_value_list_item in comp_value[len(gold_value):]: - comments += " list variable '{}' has extra value {}\n".format(name, comp_value_list_item) - - elif (type(gold_value) is OrderedDict): - for key, gold_value_dict_item in gold_value.items(): - if (key in comp_value): - comments += _compare_values("{} dict item {}".format(name, key), - gold_value_dict_item, comp_value[key], case) - else: - comments += " dict variable '{}' missing key {} with value {}\n".format(name, key, gold_value_dict_item) - - for key in comp_value: - if (key not in gold_value): - comments += " dict variable '{}' has extra key {} with value {}\n".format(name, key, comp_value[key]) - - else: - expect(isinstance(gold_value, six.string_types), "Unexpected type found: '{}'".format(type(gold_value))) - norm_gold_value = _normalize_string_value(name, gold_value, case) - norm_comp_value = _normalize_string_value(name, comp_value, case) - - if (norm_gold_value != norm_comp_value): - comments += " BASE: {} = {}\n".format(name, norm_gold_value) - comments += " COMP: {} = {}\n".format(name, norm_comp_value) - - return comments - -############################################################################### -def _compare_namelists(gold_namelists, comp_namelists, case): -############################################################################### - """ - Compare two namelists. Print diff information if any. - Returns comments - Note there will only be comments if the namelists were not an exact match - - Expect args in form: {namelist -> {key -> value} }. - value can be an int, string, list, or dict - - >>> teststr = '''&nml - ... val = 'foo' - ... aval = 'one','two', 'three' - ... maval = 'one', 'two', 'three', 'four' - ... dval = 'one -> two', 'three -> four' - ... mdval = 'one -> two', 'three -> four', 'five -> six' - ... nval = 1850 - ... / - ... &nml2 - ... val2 = .false. - ... / - ... ''' - >>> _compare_namelists(_parse_namelists(teststr.splitlines(), 'foo'), _parse_namelists(teststr.splitlines(), 'bar'), None) - '' - >>> teststr1 = '''&nml1 - ... val11 = 'foo' - ... / - ... &nml2 - ... val21 = 'foo' - ... val22 = 'foo', 'bar', 'baz' - ... val23 = 'baz' - ... val24 = '1 -> 2', '2 -> 3', '3 -> 4' - ... / - ... &nml3 - ... val3 = .false. - ... /''' - >>> teststr2 = '''&nml01 - ... val11 = 'foo' - ... / - ... &nml2 - ... val21 = 'foo0' - ... val22 = 'foo', 'bar0', 'baz' - ... val230 = 'baz' - ... val24 = '1 -> 20', '2 -> 3', '30 -> 4' - ... / - ... &nml3 - ... val3 = .false. - ... /''' - >>> comments = _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), None) - >>> print(comments) - Missing namelist: nml1 - Differences in namelist 'nml2': - BASE: val21 = 'foo' - COMP: val21 = 'foo0' - BASE: val22 list item 1 = 'bar' - COMP: val22 list item 1 = 'bar0' - missing variable: 'val23' - BASE: val24 dict item 1 = 2 - COMP: val24 dict item 1 = 20 - dict variable 'val24' missing key 3 with value 4 - dict variable 'val24' has extra key 30 with value 4 - found extra variable: 'val230' - Found extra namelist: nml01 - - - >>> teststr1 = '''&rad_cnst_nl - ... icecldoptics = 'mitchell' - ... logfile = 'cpl.log.150514-001533' - ... case_name = 'ERB.f19_g16.B1850C5.sandiatoss3_intel.C.150513-230221' - ... runid = 'FOO' - ... model_version = 'cam5_3_36' - ... username = 'jgfouca' - ... iceopticsfile = '/projects/ccsm/inputdata/atm/cam/physprops/iceoptics_c080917.nc' - ... liqcldoptics = 'gammadist' - ... liqopticsfile = '/projects/ccsm/inputdata/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc' - ... mode_defs = 'mam3_mode1:accum:=', 'A:num_a1:N:num_c1:num_mr:+', - ... 'A:so4_a1:N:so4_c1:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:pom_a1:N:pom_c1:p-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocpho_rrtmg_c101112.nc:+', - ... 'A:soa_a1:N:soa_c1:s-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', 'A:bc_a1:N:bc_c1:black-c:/projects/ccsm/inputdata/atm/cam/physprops/bcpho_rrtmg_c100508.nc:+', - ... 'A:dst_a1:N:dst_c1:dust:/projects/ccsm/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', 'A:ncl_a1:N:ncl_c1:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', - ... 'mam3_mode2:aitken:=', 'A:num_a2:N:num_c2:num_mr:+', - ... 'A:so4_a2:N:so4_c2:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:soa_a2:N:soa_c2:s-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', - ... 'A:ncl_a2:N:ncl_c2:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', 'mam3_mode3:coarse:=', - ... 'A:num_a3:N:num_c3:num_mr:+', 'A:dst_a3:N:dst_c3:dust:/projects/ccsm/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', - ... 'A:ncl_a3:N:ncl_c3:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc:+', 'A:so4_a3:N:so4_c3:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc' - ... rad_climate = 'A:Q:H2O', 'N:O2:O2', 'N:CO2:CO2', - ... 'N:ozone:O3', 'N:N2O:N2O', 'N:CH4:CH4', - ... 'N:CFC11:CFC11', 'N:CFC12:CFC12', 'M:mam3_mode1:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode1_rrtmg_c110318.nc', - ... 'M:mam3_mode2:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode2_rrtmg_c110318.nc', 'M:mam3_mode3:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode3_rrtmg_c110318.nc' - ... /''' - >>> teststr2 = '''&rad_cnst_nl - ... icecldoptics = 'mitchell' - ... logfile = 'cpl.log.150514-2398745' - ... case_name = 'ERB.f19_g16.B1850C5.sandiatoss3_intel.C.150513-1274213' - ... runid = 'BAR' - ... model_version = 'cam5_3_36' - ... username = 'hudson' - ... iceopticsfile = '/something/else/inputdata/atm/cam/physprops/iceoptics_c080917.nc' - ... liqcldoptics = 'gammadist' - ... liqopticsfile = '/something/else/inputdata/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc' - ... mode_defs = 'mam3_mode1:accum:=', 'A:num_a1:N:num_c1:num_mr:+', - ... 'A:so4_a1:N:so4_c1:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:pom_a1:N:pom_c1:p-organic:/something/else/inputdata/atm/cam/physprops/ocpho_rrtmg_c101112.nc:+', - ... 'A:soa_a1:N:soa_c1:s-organic:/something/else/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', 'A:bc_a1:N:bc_c1:black-c:/something/else/inputdata/atm/cam/physprops/bcpho_rrtmg_c100508.nc:+', - ... 'A:dst_a1:N:dst_c1:dust:/something/else/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', 'A:ncl_a1:N:ncl_c1:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', - ... 'mam3_mode2:aitken:=', 'A:num_a2:N:num_c2:num_mr:+', - ... 'A:so4_a2:N:so4_c2:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:soa_a2:N:soa_c2:s-organic:/something/else/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', - ... 'A:ncl_a2:N:ncl_c2:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', 'mam3_mode3:coarse:=', - ... 'A:num_a3:N:num_c3:num_mr:+', 'A:dst_a3:N:dst_c3:dust:/something/else/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', - ... 'A:ncl_a3:N:ncl_c3:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc:+', 'A:so4_a3:N:so4_c3:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc' - ... rad_climate = 'A:Q:H2O', 'N:O2:O2', 'N:CO2:CO2', - ... 'N:ozone:O3', 'N:N2O:N2O', 'N:CH4:CH4', - ... 'N:CFC11:CFC11', 'N:CFC12:CFC12', 'M:mam3_mode1:/something/else/inputdata/atm/cam/physprops/mam3_mode1_rrtmg_c110318.nc', - ... 'M:mam3_mode2:/something/else/inputdata/atm/cam/physprops/mam3_mode2_rrtmg_c110318.nc', 'M:mam3_mode3:/something/else/inputdata/atm/cam/physprops/mam3_mode3_rrtmg_c110318.nc' - ... /''' - >>> _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), 'ERB.f19_g16.B1850C5.sandiatoss3_intel') - '' - """ - different_namelists = OrderedDict() - for namelist, gold_names in gold_namelists.items(): - if (namelist not in comp_namelists): - different_namelists[namelist] = ["Missing namelist: {}\n".format(namelist)] - else: - comp_names = comp_namelists[namelist] - for name, gold_value in gold_names.items(): - if (name not in comp_names): - different_namelists.setdefault(namelist, []).append(" missing variable: '{}'\n".format(name)) - else: - comp_value = comp_names[name] - comments = _compare_values(name, gold_value, comp_value, case) - if comments != "": - different_namelists.setdefault(namelist, []).append(comments) - - for name in comp_names: - if (name not in gold_names): - different_namelists.setdefault(namelist, []).append(" found extra variable: '{}'\n".format(name)) - - for namelist in comp_namelists: - if (namelist not in gold_namelists): - different_namelists[namelist] = ["Found extra namelist: {}\n".format(namelist)] - - comments = "" - for namelist, nlcomment in different_namelists.items(): - if len(nlcomment) == 1: - comments += nlcomment[0] - else: - comments += "Differences in namelist '{}':\n".format(namelist) - comments += "".join(nlcomment) - - return comments - -############################################################################### -def compare_namelist_files(gold_file, compare_file, case=None): -############################################################################### - """ - Returns (is_match, comments) - """ - expect(os.path.exists(gold_file), "File not found: {}".format(gold_file)) - expect(os.path.exists(compare_file), "File not found: {}".format(compare_file)) - - gold_namelists = _parse_namelists(open(gold_file, "r").readlines(), gold_file) - comp_namelists = _parse_namelists(open(compare_file, "r").readlines(), compare_file) - comments = _compare_namelists(gold_namelists, comp_namelists, case) - return comments == "", comments - -############################################################################### -def is_namelist_file(file_path): -############################################################################### - try: - compare_namelist_files(file_path, file_path) - except CIMEError as e: - assert "does not appear to be a namelist file" in str(e), str(e) - return False - return True diff --git a/scripts/lib/CIME/compare_test_results.py b/scripts/lib/CIME/compare_test_results.py deleted file mode 100644 index a64e63bc62d..00000000000 --- a/scripts/lib/CIME/compare_test_results.py +++ /dev/null @@ -1,164 +0,0 @@ -import CIME.compare_namelists, CIME.simple_compare -from CIME.utils import expect, append_status, EnvironmentContext -from CIME.test_status import * -from CIME.hist_utils import compare_baseline, get_ts_synopsis -from CIME.case import Case - -import os, glob, logging - -############################################################################### -def append_status_cprnc_log(msg, logfile_name, test_dir): -############################################################################### - try: - append_status(msg, logfile_name, caseroot=test_dir) - except IOError: - pass - -############################################################################### -def compare_namelists(case, baseline_name, baseline_root, logfile_name): -############################################################################### - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - success = case.case_cmpgen_namelists(compare=True, compare_name=baseline_name, baseline_root=baseline_root, logfile_name=logfile_name) - logging.getLogger().setLevel(log_lvl) - return success - -############################################################################### -def compare_history(case, baseline_name, baseline_root, log_id): -############################################################################### - real_user = case.get_value("REALUSER") - with EnvironmentContext(USER=real_user): - baseline_full_dir = os.path.join(baseline_root, baseline_name, case.get_value("CASEBASEID")) - - outfile_suffix = "{}.{}".format(baseline_name, log_id) - try: - result, comments = compare_baseline(case, baseline_dir=baseline_full_dir, - outfile_suffix=outfile_suffix) - except IOError: - result, comments = compare_baseline(case, baseline_dir=baseline_full_dir, - outfile_suffix=None) - - return result, comments - -############################################################################### -def compare_test_results(baseline_name, baseline_root, test_root, compiler, test_id=None, compare_tests=None, namelists_only=False, hist_only=False): -############################################################################### - """ - Compares with baselines for all matching tests - - Outputs results for each test to stdout (one line per test); possible status - codes are: PASS, FAIL, SKIP. (A SKIP denotes a test that did not make it to - the run phase or a test for which the run phase did not pass: we skip - baseline comparisons in this case.) - - In addition, creates files named compare.log.BASELINE_NAME.TIMESTAMP in each - test directory, which contain more detailed output. Also creates - *.cprnc.out.BASELINE_NAME.TIMESTAMP files in each run directory. - - Returns True if all tests generated either PASS or SKIP results, False if - there was at least one FAIL result. - """ - test_id_glob = "*{}*".format(compiler) if test_id is None else "*{}".format(test_id) - test_status_files = glob.glob("{}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME)) - expect(test_status_files, "No matching test cases found in for {}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME)) - - # ID to use in the log file names, to avoid file name collisions with - # earlier files that may exist. - log_id = CIME.utils.get_timestamp() - - all_pass_or_skip = True - - for test_status_file in test_status_files: - test_dir = os.path.dirname(test_status_file) - ts = TestStatus(test_dir=test_dir) - test_name = ts.get_name() - if (compare_tests in [[], None] or CIME.utils.match_any(test_name, compare_tests)): - - if (not hist_only): - nl_compare_result = None - nl_compare_comment = "" - nl_result = ts.get_status(SETUP_PHASE) - if (nl_result is None): - nl_compare_result = "SKIP" - nl_compare_comment = "Test did not make it to setup phase" - nl_do_compare = False - else: - nl_do_compare = True - else: - nl_do_compare = False - - detailed_comments = "" - if (not namelists_only): - compare_result = None - compare_comment = "" - run_result = ts.get_status(RUN_PHASE) - if (run_result is None): - compare_result = "SKIP" - compare_comment = "Test did not make it to run phase" - do_compare = False - elif (run_result != TEST_PASS_STATUS): - compare_result = "SKIP" - compare_comment = "Run phase did not pass" - do_compare = False - else: - do_compare = True - else: - do_compare = False - - if nl_do_compare or do_compare: - with Case(test_dir) as case: - - if baseline_name is None: - baseline_name = case.get_value("BASELINE_NAME_CMP") - if not baseline_name: - baseline_name = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) - - if baseline_root is None: - baseline_root = case.get_value("BASELINE_ROOT") - - logfile_name = "compare.log.{}.{}".format(baseline_name.replace("/", "_"), log_id) - - append_status_cprnc_log( - "Comparing against baseline with compare_test_results:\n" - "Baseline: {}\n In baseline_root: {}".format(baseline_name, baseline_root), - logfile_name, - test_dir) - - if nl_do_compare: - nl_success = compare_namelists(case, baseline_name, baseline_root, logfile_name) - if nl_success: - nl_compare_result = TEST_PASS_STATUS - nl_compare_comment = "" - else: - nl_compare_result = TEST_FAIL_STATUS - nl_compare_comment = "See {}/{}".format(test_dir, logfile_name) - all_pass_or_skip = False - - if do_compare: - success, detailed_comments = compare_history(case, baseline_name, baseline_root, log_id) - if success: - compare_result = TEST_PASS_STATUS - else: - compare_result = TEST_FAIL_STATUS - all_pass_or_skip = False - - compare_comment = get_ts_synopsis(detailed_comments) - - brief_result = "" - if not hist_only: - brief_result += "{} {} {} {}\n".format(nl_compare_result, test_name, NAMELIST_PHASE, nl_compare_comment) - - if not namelists_only: - brief_result += "{} {} {}".format(compare_result, test_name, BASELINE_PHASE) - if compare_comment: - brief_result += " {}".format(compare_comment) - brief_result += "\n" - - print(brief_result) - - append_status_cprnc_log(brief_result, logfile_name, test_dir) - - if detailed_comments: - append_status_cprnc_log("Detailed comments:\n" + detailed_comments, logfile_name, test_dir) - - return all_pass_or_skip diff --git a/scripts/lib/CIME/cs_status.py b/scripts/lib/CIME/cs_status.py deleted file mode 100644 index c69ef06b27a..00000000000 --- a/scripts/lib/CIME/cs_status.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Implementation of the cs.status script, which prints the status of all -of the tests in one or more test suites -""" - -from __future__ import print_function -from CIME.XML.standard_module_setup import * -from CIME.XML.expected_fails_file import ExpectedFailsFile -from CIME.test_status import TestStatus -import os -import sys -from collections import defaultdict - -def cs_status(test_paths, summary=False, fails_only=False, - count_fails_phase_list=None, - expected_fails_filepath=None, - out=sys.stdout): - """Print the test statuses of all tests in test_paths. The default - is to print to stdout, but this can be overridden with the 'out' - argument. - - If summary is True, then only the overall status of each test is printed - - If fails_only is True, then only test failures are printed (this - includes PENDs as well as FAILs). - - If count_fails_phase_list is provided, it should be a list of phases - (from the phases given by test_status.ALL_PHASES). For each phase in - this list: do not give line-by-line output; instead, just report the - total number of tests that have not PASSed this phase (this includes - PENDs and FAILs). (This is typically used with the fails_only - option, but it can also be used without that option.) - - If expected_fails_filepath is provided, it should be a string giving - the full path to a file listing expected failures for this test - suite. Expected failures are then labeled as such in the output. - """ - expect(not (summary and fails_only), - "Cannot have both summary and fails_only") - expect(not (summary and count_fails_phase_list), - "Cannot have both summary and count_fails_phase_list") - if count_fails_phase_list is None: - count_fails_phase_list = [] - non_pass_counts = dict.fromkeys(count_fails_phase_list, 0) - xfails = _get_xfails(expected_fails_filepath) - test_id_output = defaultdict(str) - test_id_counts = defaultdict(int) - for test_path in test_paths: - test_dir=os.path.dirname(test_path) - ts = TestStatus(test_dir=test_dir) - test_id = os.path.basename(test_dir).split(".")[-1] - if summary: - output = _overall_output(ts, " {status} {test_name}\n") - else: - if fails_only: - output = '' - else: - output = _overall_output(ts, " {test_name} (Overall: {status}) details:\n") - output += ts.phase_statuses_dump(prefix=" ", - skip_passes=fails_only, - skip_phase_list=count_fails_phase_list, - xfails=xfails.get(ts.get_name())) - if count_fails_phase_list: - ts.increment_non_pass_counts(non_pass_counts) - - test_id_output[test_id] += output - test_id_counts[test_id] += 1 - - for test_id in sorted(test_id_output): - count = test_id_counts[test_id] - print("{}: {} test{}".format(test_id, count, 's' if count > 1 else ''), file=out) - print(test_id_output[test_id], file=out) - print(' ', file=out) - - if count_fails_phase_list: - print(72*'=', file=out) - print('Non-PASS results for select phases:', file=out) - for phase in count_fails_phase_list: - print('{} non-passes: {}'.format(phase, non_pass_counts[phase]), file=out) - -def _get_xfails(expected_fails_filepath): - """Returns a dictionary of ExpectedFails objects, where the keys are test names - - expected_fails_filepath should be either a string giving the path to - the file containing expected failures, or None. If None, then this - returns an empty dictionary (as if expected_fails_filepath were - pointing to a file with no expected failures listed). - """ - if expected_fails_filepath is not None: - expected_fails_file = ExpectedFailsFile(expected_fails_filepath) - xfails = expected_fails_file.get_expected_fails() - else: - xfails = {} - return xfails - -def _overall_output(ts, format_str): - """Returns a string giving the overall test status - - Args: - ts: TestStatus object - format_str (string): string giving the format of the output; must - contain place-holders for status and test_name - """ - test_name = ts.get_name() - status = ts.get_overall_test_status() - return format_str.format(status=status, test_name=test_name) diff --git a/scripts/lib/CIME/cs_status_creator.py b/scripts/lib/CIME/cs_status_creator.py deleted file mode 100644 index 59871e3c648..00000000000 --- a/scripts/lib/CIME/cs_status_creator.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Creates a test suite-specific cs.status file from a template -""" - -from CIME.XML.standard_module_setup import * -import CIME.utils -import os -import stat - -def create_cs_status(test_root, test_id, extra_args='', filename=None): - """Create a test suite-specific cs.status file from the template - - Arguments: - test_root (string): path to test root; the file will be put here. If - this directory doesn't exist, it is created. - test_id (string): test id for this test suite. This can contain - shell wildcards if you want this one cs.status file to work - across multiple test suites. However, be careful not to make - this too general: for example, ending this with '*' will pick up - the *.ref1 directories for ERI and other tests, which is NOT - what you want. - extra_args (string): extra arguments to the cs.status command - (If there are multiple arguments, these should be in a space-delimited string.) - filename (string): name of the generated cs.status file. If not - given, this will be built from the test_id. - """ - python_libs_root = CIME.utils.get_python_libs_root() - cime_root = CIME.utils.get_cime_root() - template_file = os.path.join(python_libs_root, "cs.status.template") - template = open(template_file, "r").read() - template = template.replace("", - os.path.join(cime_root,"scripts","Tools")).replace\ - ("", extra_args).replace\ - ("", test_id).replace\ - ("", test_root) - if not os.path.exists(test_root): - os.makedirs(test_root) - if filename is None: - filename = "cs.status.{}".format(test_id) - cs_status_file = os.path.join(test_root, filename) - with open(cs_status_file, "w") as fd: - fd.write(template) - os.chmod(cs_status_file, os.stat(cs_status_file).st_mode | stat.S_IXUSR | stat.S_IXGRP) diff --git a/scripts/lib/CIME/expected_fails.py b/scripts/lib/CIME/expected_fails.py deleted file mode 100644 index 4fbe9eeba7f..00000000000 --- a/scripts/lib/CIME/expected_fails.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Contains the definition of a class to hold information on expected failures for a single test -""" - -from CIME.XML.standard_module_setup import * - -EXPECTED_FAILURE_COMMENT = "(EXPECTED FAILURE)" -UNEXPECTED_FAILURE_COMMENT_START = "(UNEXPECTED" # There will be some additional text after this, before the end parentheses - -class ExpectedFails(object): - - def __init__(self): - """Initialize an empty ExpectedFails object""" - self._fails = {} - - def __eq__(self, rhs): - expect(isinstance(rhs, ExpectedFails), "Wrong type") - return self._fails == rhs._fails # pylint: disable=protected-access - - def __ne__(self, rhs): - result = self.__eq__(rhs) - return not result - - def __repr__(self): - return repr(self._fails) - - def add_failure(self, phase, expected_status): - """Add an expected failure to the list""" - expect(phase not in self._fails, "Phase {} already present in list".format(phase)) - self._fails[phase] = expected_status - - def expected_fails_comment(self, phase, status): - """Returns a string giving the expected fails comment for this phase and status""" - if phase not in self._fails: - return '' - - if self._fails[phase] == status: - return EXPECTED_FAILURE_COMMENT - else: - return "{}: expected {})".format(UNEXPECTED_FAILURE_COMMENT_START, - self._fails[phase]) diff --git a/scripts/lib/CIME/get_timing.py b/scripts/lib/CIME/get_timing.py deleted file mode 100644 index b52f8204090..00000000000 --- a/scripts/lib/CIME/get_timing.py +++ /dev/null @@ -1,740 +0,0 @@ -#!/usr/bin/env python - -""" -Library for implementing getTiming tool which gets timing -information from a run. -""" - -from CIME.XML.standard_module_setup import * -from CIME.utils import safe_copy - -import datetime, re - -logger = logging.getLogger(__name__) - -class _GetTimingInfo: - def __init__(self, name): - self.name = name - self.tmin = 0 - self.tmax = 0 - self.adays = 0 - -class _TimingParser: - def __init__(self, case, lid="999999-999999"): - self.case = case - self.caseroot = case.get_value("CASEROOT") - self.lid = lid - self.finlines = None - self.fout = None - self.adays=0 - self._driver = case.get_value("COMP_INTERFACE") - self.models = {} - self.ncount = 0 - self.nprocs = 0 - - def write(self, text): - self.fout.write(text) - - def prttime(self, label, offset=None, div=None, coff=-999): - if offset is None: - offset=self.models['CPL'].offset - if div is None: - div = self.adays - datalen = 20 - cstr = "<---->" - clen = len(cstr) - - minval, maxval, found = self.gettime(label) - if div >= 1.0: - mind = minval/div - maxd = maxval/div - else: - mind = minval - maxd = maxval - - pstrlen = 25 - if mind >= 0 and maxd >= 0 and found: - if coff >= 0: - zoff = pstrlen + coff + int((datalen-clen)/2) - csp = offset - coff - int((datalen-clen)/2) - self.write(" {label:<{width1}}{cstr:<{width2}} {minv:8.3f}:{maxv:8.3f} \n".format(label=label, width1=zoff, cstr=cstr, width2=csp, minv=mind, maxv=maxd)) - else: - zoff = pstrlen + offset - self.write(" {label:<{width1}} {minv:8.3f}:{maxv:8.3f} \n".format(label=label, width1=zoff, minv=mind, maxv=maxd)) - - def gettime2(self, heading_padded): - if self._driver == 'mct': - return self._gettime2_mct(heading_padded) - elif self._driver == 'nuopc': - return self._gettime2_nuopc() - - def _gettime2_mct(self, heading_padded): - nprocs = 0 - ncount = 0 - - heading = '"' + heading_padded.strip() + '"' - for line in self.finlines: - m = re.match(r'\s*{}\s+\S\s+(\d+)\s*\d+\s*(\S+)'.format(heading), line) - if m: - nprocs = int(float(m.groups()[0])) - ncount = int(float(m.groups()[1])) - return (nprocs, ncount) - else: - m = re.match(r'\s*{}\s+\S\s+(\d+)\s'.format(heading), line) - if m: - nprocs = 1 - ncount = int(float(m.groups()[0])) - return (nprocs, ncount) - return (0, 0) - - def _gettime2_nuopc(self): - self.nprocs = 0 - self.ncount = 0 - expression = re.compile(r'\s*\MED:\(med_fraction_set\)\s+(\d+)\s+(\d+)') - - for line in self.finlines: - match = expression.match(line) - if match: - self.nprocs = int(match.group(1)) - self.ncount = int(match.group(2)) - return (self.nprocs, self.ncount) - - return (0, 0) - - def gettime(self, heading_padded): - if self._driver == 'mct': - return self._gettime_mct(heading_padded) - elif self._driver == 'nuopc': - return self._gettime_nuopc(heading_padded) - - - def _gettime_mct(self, heading_padded): - found = False - heading = '"' + heading_padded.strip() + '"' - minval = 0 - maxval = 0 - - for line in self.finlines: - m = re.match(r'\s*{}\s+\S\s+\d+\s*\d+\s*\S+\s*\S+\s*(\d*\.\d+)\s*\(.*\)\s*(\d*\.\d+)\s*\(.*\)'.format(heading), line) - if m: - maxval = float(m.groups()[0]) - minval = float(m.groups()[1]) - found = True - return (minval, maxval, found) - return (0, 0, False) - - def _gettime_nuopc(self, heading, instance='0001'): - if instance == '': - instance = '0001' - minval = 0 - maxval = 0 - m = None - # PETs Count Mean (s) Min (s) Min PET Max (s) Max PET - timeline = re.compile(r'\s*{}\s+\d+\s+\d+\s+(\d*\.\d+)\s+(\d*\.\d+)\s+\d+\s+(\d*\.\d+)\s+\d+'.format(re.escape(heading))) - phase = None - for line in self.finlines: - phase = self._get_nuopc_phase(line, instance, phase) - if phase != "run" and not "[ensemble]" in heading: - continue - if heading in line: - m = timeline.match(line) - if m: - minval = float(m.group(2)) - maxval = float(m.group(3)) - return (minval, maxval, True) - - return (0, 0, False) - - @staticmethod - def _get_nuopc_phase(line, instance, phase): - if "[ensemble] Init 1" in line: - phase = "init" - elif "[ESM"+instance+"] RunPhase1" in line: - phase = "run" - elif "[ESM"+instance+"] Finalize" in line: - phase = "finalize" - elif "[ESM" in line and "RunPhase1" in line: - phase = "other" - return phase - - def getMEDtime(self, instance): - if instance == '': - instance = '0001' - med_phase_line = re.compile(r'\s*(\[MED\] med_phases\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+') - med_connector_line = re.compile(r'\s*(\[MED\] med_connectors\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+') - med_fraction_line = re.compile(r'\s*(\[MED\] med_fraction\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+') - - m = None - minval = 0 - maxval = 0 - phase = None - for line in self.finlines: - phase = self._get_nuopc_phase(line, instance, phase) - if phase != "run": - continue - m = med_phase_line.match(line) - if not m: - m = med_connector_line.match(line) - if not m: - m = med_fraction_line.match(line) - if m: - minval += float(m.group(2)) - maxval += float(m.group(2)) - - return(minval, maxval) - - def getCOMMtime(self, instance): - if instance == '': - instance = '0001' - comm_line = re.compile(r'\s*(\[\S+-TO-\S+\] RunPhase1)\s+\d+\s+\d+\s+(\d*\.\d+)\s+') - m = None - maxval = 0 - phase = None - for line in self.finlines: - phase = self._get_nuopc_phase(line, instance, phase) - if phase != "run": - continue - m = comm_line.match(line) - if m: - heading = m.group(1) - maxv = float(m.group(2)) - maxval += maxv - logger.debug("{} time={} sum={}".format(heading, maxv, maxval)) - return maxval - - - def getTiming(self): - ninst = 1 - multi_driver = self.case.get_value("MULTI_DRIVER") - if multi_driver: - ninst = self.case.get_value("NINST_MAX") - - if ninst > 1: - for inst in range(ninst): - self._getTiming(inst+1) - else: - self._getTiming() - - def _getTiming(self, inst=0): - components=self.case.get_values("COMP_CLASSES") - for s in components: - self.models[s] = _GetTimingInfo(s) - - atm = self.models['ATM'] - lnd = self.models['LND'] - rof = self.models['ROF'] - ice = self.models['ICE'] - ocn = self.models['OCN'] - glc = self.models['GLC'] - cpl = self.models['CPL'] - cime_model = self.case.get_value("MODEL") - caseid = self.case.get_value("CASE") - mach = self.case.get_value("MACH") - user = self.case.get_value("USER") - continue_run = self.case.get_value("CONTINUE_RUN") - rundir = self.case.get_value("RUNDIR") - run_type = self.case.get_value("RUN_TYPE") - ncpl_base_period = self.case.get_value("NCPL_BASE_PERIOD") - ncpl = 0 - for compclass in self.case.get_values("COMP_CLASSES"): - comp_ncpl = self.case.get_value("{}_NCPL".format(compclass)) - if compclass == "OCN": - ocn_ncpl = comp_ncpl - if comp_ncpl is not None: - ncpl = max(ncpl, comp_ncpl) - - compset = self.case.get_value("COMPSET") - if compset is None: - compset = "" - grid = self.case.get_value("GRID") - run_type = self.case.get_value("RUN_TYPE") - stop_option = self.case.get_value("STOP_OPTION") - stop_n = self.case.get_value("STOP_N") - - cost_pes = self.case.get_value("COST_PES") - costpes_per_node = self.case.get_value("COSTPES_PER_NODE") - - totalpes = self.case.get_value("TOTALPES") - max_mpitasks_per_node = self.case.get_value("MAX_MPITASKS_PER_NODE") - smt_factor = max(1,int(self.case.get_value("MAX_TASKS_PER_NODE") / max_mpitasks_per_node)) - - if cost_pes > 0: - pecost = cost_pes - elif costpes_per_node: - pecost = self.case.num_nodes * costpes_per_node - else: - pecost = totalpes - - for m in self.models.values(): - for key in ["NTASKS", "ROOTPE", "PSTRID", "NTHRDS", "NINST"]: - if key == "NINST" and m.name == "CPL": - m.ninst = 1 - else: - setattr(m, key.lower(), - int(self.case.get_value("{}_{}".format(key, m.name)))) - - m.comp = self.case.get_value("COMP_{}".format(m.name)) - m.pemax = m.rootpe + m.ntasks * m.pstrid - 1 - - now = datetime.datetime.ctime(datetime.datetime.now()) - inittype = "FALSE" - if (run_type == "startup" or run_type == "hybrid") and \ - not continue_run: - inittype = "TRUE" - - if inst > 0: - inst_label = '_{:04d}'.format(inst) - else: - inst_label = '' - if self._driver == 'mct': - binfilename = os.path.join(rundir, "timing", "model_timing{}_stats" . format(inst_label)) - finfilename = os.path.join(self.caseroot, "timing", - "{}_timing{}_stats.{}".format(cime_model, inst_label, self.lid)) - elif self._driver == 'nuopc': - binfilename = os.path.join(rundir, "ESMF_Profile.summary") - finfilename = os.path.join(self.caseroot, "timing", - "{}.ESMF_Profile.summary.{}".format(cime_model, self.lid)) - - foutfilename = os.path.join(self.caseroot, "timing", - "{}_timing{}.{}.{}".format(cime_model, inst_label, caseid, self.lid)) - - timingDir = os.path.join(self.caseroot, "timing") - if not os.path.isfile(binfilename): - logger.warning("No timing file found in run directory") - return - - if not os.path.isdir(timingDir): - os.makedirs(timingDir) - - safe_copy(binfilename, finfilename) - - os.chdir(self.caseroot) - try: - fin = open(finfilename, "r") - self.finlines = fin.readlines() - fin.close() - except Exception as e: - logger.critical("Unable to open file {}".format(finfilename)) - raise e - - tlen = 1.0 - if ncpl_base_period == "decade": - tlen = 3650.0 - elif ncpl_base_period == "year": - tlen = 365.0 - elif ncpl_base_period == "day": - tlen = 1.0 - elif ncpl_base_period == "hour": - tlen = 1.0/24.0 - else: - logger.warning("Unknown NCPL_BASE_PERIOD={}".format(ncpl_base_period)) - - - # at this point the routine becomes driver specific - if self._driver == 'mct': - nprocs, ncount = self.gettime2('CPL:CLOCK_ADVANCE ') - nsteps = ncount / nprocs - elif self._driver == 'nuopc': - nprocs, nsteps = self.gettime2('') - adays = nsteps*tlen/ncpl - odays = nsteps*tlen/ncpl - if inittype == "TRUE": - odays = odays - (tlen/ocn_ncpl) - - peminmax = max([m.rootpe for m in self.models.values()])+1 - if ncpl_base_period in ["decade","year","day"] and int(adays) > 0: - adays = int(adays) - if tlen % ocn_ncpl == 0: - odays = int(odays) - self.adays = adays - maxoffset = 40 - extraoff = 20 - for m in self.models.values(): - m.offset = int((maxoffset*m.rootpe)/peminmax) + extraoff - cpl.offset = 0 - try: - self.fout = open(foutfilename, "w") - except Exception as e: - logger.critical("Could not open file for writing: {}".format(foutfilename)) - raise e - - self.write("---------------- TIMING PROFILE ---------------------\n") - - self.write(" Case : {}\n".format(caseid)) - self.write(" LID : {}\n".format(self.lid)) - self.write(" Machine : {}\n".format(mach)) - self.write(" Caseroot : {}\n".format(self.caseroot)) - self.write(" Timeroot : {}/Tools\n".format(self.caseroot)) - self.write(" User : {}\n".format(user)) - self.write(" Curr Date : {}\n".format(now)) - - self.write(" grid : {}\n".format(grid)) - self.write(" compset : {}\n".format(compset)) - self.write(" run type : {}, continue_run = {} (inittype = {})\n".format(run_type, str(continue_run).upper(), inittype)) - self.write(" stop option : {}, stop_n = {}\n".format(stop_option, stop_n)) - self.write(" run length : {} days ({} for ocean)\n\n".format(adays, odays)) - - self.write(" component comp_pes root_pe tasks " - "x threads" - " instances (stride) \n") - self.write(" --------- ------ ------- ------ " - "------ --------- ------ \n") - maxthrds = 0 - xmax = 0 - for k in self.case.get_values("COMP_CLASSES"): - m = self.models[k] - if m.comp == "cpl": - comp_label = m.comp + inst_label - else: - comp_label = m.comp - self.write(" {} = {:<8s} {:<6d} {:<6d} {:<6d} x {:<6d} {:<6d} ({:<6d}) \n".format(m.name.lower(), comp_label, (m.ntasks*m.nthrds *smt_factor), m.rootpe, m.ntasks, m.nthrds, m.ninst, m.pstrid)) - if m.nthrds > maxthrds: - maxthrds = m.nthrds - if self._driver == 'nuopc': - for k in components: - m = self.models[k] - if k != "CPL": - m.tmin, m.tmax, _ = self._gettime_nuopc(' [{}] RunPhase1 '.format(m.name), inst_label[1:]) - else: - m.tmin, m.tmax = self.getMEDtime(inst_label[1:]) - nmax = self.gettime("[ensemble] Init 1")[1] - tmax = self.gettime("[ensemble] RunPhase1")[1] - fmax = self.gettime("[ensemble] FinalizePhase1")[1] - xmax = self.getCOMMtime(inst_label[1:]) - - if self._driver == 'mct': - for k in components: - if k != "CPL": - m = self.models[k] - m.tmin, m.tmax, _ = self.gettime(' CPL:{}_RUN '.format(m.name)) - nmax = self.gettime(' CPL:INIT ')[1] - tmax = self.gettime(' CPL:RUN_LOOP ')[1] - wtmin = self.gettime(' CPL:TPROF_WRITE ')[0] - fmax = self.gettime(' CPL:FINAL ')[1] - otmin, otmax, _ = self.gettime(' CPL:OCNT_RUN ') - - # pick OCNT_RUN for tight coupling - if otmax > ocn.tmax: - ocn.tmin = otmin - ocn.tmax = otmax - - cpl.tmin, cpl.tmax, _ = self.gettime(' CPL:RUN ') - xmax = self.gettime(' CPL:COMM ')[1] - ocnwaittime = self.gettime(' CPL:C2O_INITWAIT')[0] - - if odays != 0: - ocnrunitime = ocn.tmax * (adays/odays - 1.0) - else: - ocnrunitime = 0.0 - - correction = max(0, ocnrunitime - ocnwaittime) - - tmax = tmax + wtmin + correction - ocn.tmax += ocnrunitime - - for m in self.models.values(): - m.tmaxr = 0 - if m.tmax > 0: - m.tmaxr = adays*86400.0/(m.tmax*365.0) - xmaxr = 0 - if xmax > 0: - xmaxr = adays*86400.0/(xmax*365.0) - tmaxr = 0 - if tmax > 0: - tmaxr = adays*86400.0/(tmax*365.0) - - self.write("\n") - self.write(" total pes active : {} \n".format(totalpes*maxthrds*smt_factor )) - self.write(" mpi tasks per node : {} \n".format(max_mpitasks_per_node)) - self.write(" pe count for cost estimate : {} \n".format(pecost)) - self.write("\n") - - self.write(" Overall Metrics: \n") - if adays > 0: - self.write(" Model Cost: {:10.2f} pe-hrs/simulated_year \n".format((tmax*365.0*pecost)/(3600.0*adays))) - if tmax > 0: - self.write(" Model Throughput: {:10.2f} simulated_years/day \n".format((86400.0*adays)/(tmax*365.0)) ) - - self.write("\n") - - self.write(" Init Time : {:10.3f} seconds \n".format(nmax)) - if adays > 0: - self.write(" Run Time : {:10.3f} seconds {:10.3f} seconds/day \n".format(tmax, tmax/adays)) - self.write(" Final Time : {:10.3f} seconds \n".format(fmax)) - - self.write("\n") - if self._driver == 'mct': - self.write(" Actual Ocn Init Wait Time : {:10.3f} seconds \n".format(ocnwaittime)) - self.write(" Estimated Ocn Init Run Time : {:10.3f} seconds \n".format(ocnrunitime)) - self.write(" Estimated Run Time Correction : {:10.3f} seconds \n".format(correction)) - self.write(" (This correction has been applied to the ocean and" - " total run times) \n") - - self.write("\n") - self.write("Runs Time in total seconds, seconds/model-day, and" - " model-years/wall-day \n") - self.write("CPL Run Time represents time in CPL pes alone, " - "not including time associated with data exchange " - "with other components \n") - self.write("\n") - - - if adays > 0: - self.write(" TOT Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(tmax, tmax/adays, tmaxr)) - for k in self.case.get_values("COMP_CLASSES"): - m = self.models[k] - self.write(" {} Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(k, m.tmax, m.tmax/adays, m.tmaxr)) - self.write(" CPL COMM Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(xmax, xmax/adays, xmaxr)) - - pstrlen = 25 - hoffset = 1 - self.write(" NOTE: min:max driver timers (seconds/day): \n") - - for k in self.case.get_values("COMP_CLASSES"): - m = self.models[k] - xspace = (pstrlen+hoffset+m.offset) * ' ' - self.write(" {} {} (pes {:d} to {:d}) \n".format(xspace, k, m.rootpe, m.pemax)) - self.write("\n") - - self.prttime(' CPL:CLOCK_ADVANCE ') - self.prttime(' CPL:OCNPRE1_BARRIER ') - self.prttime(' CPL:OCNPRE1 ') - self.prttime(' CPL:ATMOCN1_BARRIER ') - self.prttime(' CPL:ATMOCN1 ') - self.prttime(' CPL:OCNPREP_BARRIER ') - self.prttime(' CPL:OCNPREP ') - self.prttime(' CPL:C2O_BARRIER ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:C2O ', offset=ocn.offset, div=odays, coff=cpl.offset) - self.prttime(' CPL:LNDPREP_BARRIER ') - self.prttime(' CPL:LNDPREP ') - self.prttime(' CPL:C2L_BARRIER ', offset=lnd.offset, coff=cpl.offset) - self.prttime(' CPL:C2L ', offset=lnd.offset, coff=cpl.offset) - self.prttime(' CPL:ICEPREP_BARRIER ') - self.prttime(' CPL:ICEPREP ') - self.prttime(' CPL:C2I_BARRIER ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:C2I ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:WAVPREP_BARRIER ') - self.prttime(' CPL:WAVPREP ') - self.prttime(' CPL:C2W_BARRIER ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:C2W ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:ROFPREP_BARRIER ') - self.prttime(' CPL:ROFPREP ') - self.prttime(' CPL:C2R_BARRIER ', offset=rof.offset, coff=cpl.offset) - self.prttime(' CPL:C2R ', offset=rof.offset, coff=cpl.offset) - self.prttime(' CPL:ICE_RUN_BARRIER ', offset=ice.offset) - self.prttime(' CPL:ICE_RUN ', offset=ice.offset) - self.prttime(' CPL:LND_RUN_BARRIER ', offset=lnd.offset) - self.prttime(' CPL:LND_RUN ', offset=lnd.offset) - self.prttime(' CPL:ROF_RUN_BARRIER ', offset=rof.offset) - self.prttime(' CPL:ROF_RUN ', offset=rof.offset) - self.prttime(' CPL:WAV_RUN_BARRIER ', offset=rof.offset) - self.prttime(' CPL:WAV_RUN ', offset=rof.offset) - self.prttime(' CPL:OCNT_RUN_BARRIER ', offset=ocn.offset, div=odays) - self.prttime(' CPL:OCNT_RUN ', offset=ocn.offset, div=odays) - self.prttime(' CPL:O2CT_BARRIER ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:O2CT ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:OCNPOSTT_BARRIER ') - self.prttime(' CPL:OCNPOSTT ') - self.prttime(' CPL:ATMOCNP_BARRIER ') - self.prttime(' CPL:ATMOCNP ') - self.prttime(' CPL:L2C_BARRIER ', offset=lnd.offset, coff=cpl.offset) - self.prttime(' CPL:L2C ', offset=lnd.offset, div=cpl.offset) - self.prttime(' CPL:LNDPOST_BARRIER ') - self.prttime(' CPL:LNDPOST ') - self.prttime(' CPL:GLCPREP_BARRIER ') - self.prttime(' CPL:GLCPREP ') - self.prttime(' CPL:C2G_BARRIER ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:C2G ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:R2C_BARRIER ', offset=rof.offset, coff=cpl.offset) - self.prttime(' CPL:R2C ', offset=rof.offset, coff=cpl.offset) - self.prttime(' CPL:ROFPOST_BARRIER ') - self.prttime(' CPL:ROFPOST ') - self.prttime(' CPL:BUDGET1_BARRIER ') - self.prttime(' CPL:BUDGET1 ') - self.prttime(' CPL:I2C_BARRIER ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:I2C ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:ICEPOST_BARRIER ') - self.prttime(' CPL:ICEPOST ') - self.prttime(' CPL:FRACSET_BARRIER ') - self.prttime(' CPL:FRACSET ') - self.prttime(' CPL:ATMOCN2_BARRIER ') - self.prttime(' CPL:ATMOCN2 ') - self.prttime(' CPL:OCNPRE2_BARRIER ') - self.prttime(' CPL:OCNPRE2 ') - self.prttime(' CPL:C2O2_BARRIER ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:C2O2 ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:ATMOCNQ_BARRIER') - self.prttime(' CPL:ATMOCNQ ') - self.prttime(' CPL:ATMPREP_BARRIER ') - self.prttime(' CPL:ATMPREP ') - self.prttime(' CPL:C2A_BARRIER ', offset=atm.offset, coff=cpl.offset) - self.prttime(' CPL:C2A ', offset=atm.offset, coff=cpl.offset) - self.prttime(' CPL:OCN_RUN_BARRIER ', offset=ocn.offset, div=odays) - self.prttime(' CPL:OCN_RUN ', offset=ocn.offset, div=odays) - self.prttime(' CPL:ATM_RUN_BARRIER ', offset=atm.offset) - self.prttime(' CPL:ATM_RUN ', offset=atm.offset) - self.prttime(' CPL:GLC_RUN_BARRIER ', offset=glc.offset) - self.prttime(' CPL:GLC_RUN ', offset=glc.offset) - self.prttime(' CPL:W2C_BARRIER ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:W2C ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:WAVPOST_BARRIER ') - self.prttime(' CPL:WAVPOST ', cpl.offset) - self.prttime(' CPL:G2C_BARRIER ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:G2C ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:GLCPOST_BARRIER ') - self.prttime(' CPL:GLCPOST ') - self.prttime(' CPL:A2C_BARRIER ', offset=atm.offset, coff=cpl.offset) - self.prttime(' CPL:A2C ', offset=atm.offset, coff=cpl.offset) - self.prttime(' CPL:ATMPOST_BARRIER ') - self.prttime(' CPL:ATMPOST ') - self.prttime(' CPL:BUDGET2_BARRIER ') - self.prttime(' CPL:BUDGET2 ') - self.prttime(' CPL:BUDGET3_BARRIER ') - self.prttime(' CPL:BUDGET3 ') - self.prttime(' CPL:BUDGETF_BARRIER ') - self.prttime(' CPL:BUDGETF ') - self.prttime(' CPL:O2C_BARRIER ', offset=ocn.offset, - div=odays, coff=cpl.offset) - self.prttime(' CPL:O2C ', offset=ocn.offset, div=odays, coff=cpl.offset) - self.prttime(' CPL:OCNPOST_BARRIER ') - self.prttime(' CPL:OCNPOST ') - self.prttime(' CPL:RESTART_BARRIER ') - self.prttime(' CPL:RESTART') - self.prttime(' CPL:HISTORY_BARRIER ') - self.prttime(' CPL:HISTORY ') - self.prttime(' CPL:TSTAMP_WRITE ') - self.prttime(' CPL:TPROF_WRITE ') - self.prttime(' CPL:RUN_LOOP_BSTOP ') - - self.write("\n\n") - self.write("More info on coupler timing:\n") - - self.write("\n") - self.prttime(' CPL:OCNPRE1 ') - self.prttime(' CPL:ocnpre1_atm2ocn ') - - self.write("\n") - self.prttime(' CPL:OCNPREP ') - self.prttime(' CPL:OCNPRE2 ') - self.prttime(' CPL:ocnprep_avg ') - self.prttime(' CPL:ocnprep_diagav ') - - self.write("\n") - self.prttime(' CPL:LNDPREP ') - self.prttime(' CPL:lndprep_atm2lnd ') - self.prttime(' CPL:lndprep_mrgx2l ') - self.prttime(' CPL:lndprep_diagav ') - - self.write("\n") - self.prttime(' CPL:ICEPREP ') - self.prttime(' CPL:iceprep_ocn2ice ') - self.prttime(' CPL:iceprep_atm2ice ') - self.prttime(' CPL:iceprep_mrgx2i ') - self.prttime(' CPL:iceprep_diagav ') - - self.write("\n") - self.prttime(' CPL:WAVPREP ') - self.prttime(' CPL:wavprep_atm2wav ') - self.prttime(' CPL:wavprep_ocn2wav ') - self.prttime(' CPL:wavprep_ice2wav ') - self.prttime(' CPL:wavprep_mrgx2w ') - self.prttime(' CPL:wavprep_diagav ') - - self.write("\n") - self.prttime(' CPL:ROFPREP ') - self.prttime(' CPL:rofprep_l2xavg ') - self.prttime(' CPL:rofprep_lnd2rof ') - self.prttime(' CPL:rofprep_mrgx2r ') - self.prttime(' CPL:rofprep_diagav ') - - self.write("\n") - self.prttime(' CPL:GLCPREP ') - self.prttime(' CPL:glcprep_avg ') - self.prttime(' CPL:glcprep_lnd2glc ') - self.prttime(' CPL:glcprep_mrgx2g ') - self.prttime(' CPL:glcprep_diagav ') - - self.write("\n") - self.prttime(' CPL:ATMPREP ') - self.prttime(' CPL:atmprep_xao2atm ') - self.prttime(' CPL:atmprep_ocn2atm ') - self.prttime(' CPL:atmprep_alb2atm ') - self.prttime(' CPL:atmprep_ice2atm ') - self.prttime(' CPL:atmprep_lnd2atm ') - self.prttime(' CPL:atmprep_mrgx2a ') - self.prttime(' CPL:atmprep_diagav ') - - self.write("\n") - self.prttime(' CPL:ATMOCNP ') - self.prttime(' CPL:ATMOCN1 ') - self.prttime(' CPL:ATMOCN2 ') - self.prttime(' CPL:atmocnp_ice2ocn ') - self.prttime(' CPL:atmocnp_wav2ocn ') - self.prttime(' CPL:atmocnp_fluxo ') - self.prttime(' CPL:atmocnp_fluxe ') - self.prttime(' CPL:atmocnp_mrgx2o ') - self.prttime(' CPL:atmocnp_accum ') - self.prttime(' CPL:atmocnp_ocnalb ') - - self.write("\n") - self.prttime(' CPL:ATMOCNQ ') - self.prttime(' CPL:atmocnq_ocn2atm ') - self.prttime(' CPL:atmocnq_fluxa ') - self.prttime(' CPL:atmocnq_atm2ocnf ') - - self.write("\n") - self.prttime(' CPL:OCNPOSTT ') - self.prttime(' CPL:OCNPOST ') - self.prttime(' CPL:ocnpost_diagav ') - - self.write("\n") - self.prttime(' CPL:LNDPOST ') - self.prttime(' CPL:lndpost_diagav ') - self.prttime(' CPL:lndpost_acc2lr ') - self.prttime(' CPL:lndpost_acc2lg ') - - self.write("\n") - self.prttime(' CPL:ROFOST ') - self.prttime(' CPL:rofpost_diagav ') - self.prttime(' CPL:rofpost_histaux ') - self.prttime(' CPL:rofpost_rof2lnd ') - self.prttime(' CPL:rofpost_rof2ice ') - self.prttime(' CPL:rofpost_rof2ocn ') - - self.write("\n") - self.prttime(' CPL:ICEPOST ') - self.prttime(' CPL:icepost_diagav ') - - self.write("\n") - self.prttime(' CPL:WAVPOST ') - self.prttime(' CPL:wavpost_diagav ') - - self.write("\n") - self.prttime(' CPL:GLCPOST ') - self.prttime(' CPL:glcpost_diagav ') - self.prttime(' CPL:glcpost_glc2lnd ') - self.prttime(' CPL:glcpost_glc2ice ') - self.prttime(' CPL:glcpost_glc2ocn ') - - self.write("\n") - self.prttime(' CPL:ATMPOST ') - self.prttime(' CPL:atmpost_diagav ') - - self.write("\n") - self.prttime(' CPL:BUDGET ') - self.prttime(' CPL:BUDGET1 ') - self.prttime(' CPL:BUDGET2 ') - self.prttime(' CPL:BUDGET3 ') - self.prttime(' CPL:BUDGETF ') - self.write("\n\n") - - self.fout.close() - -def get_timing(case, lid): - parser = _TimingParser(case, lid) - parser.getTiming() diff --git a/scripts/lib/CIME/hist_utils.py b/scripts/lib/CIME/hist_utils.py deleted file mode 100644 index f757477857d..00000000000 --- a/scripts/lib/CIME/hist_utils.py +++ /dev/null @@ -1,658 +0,0 @@ -""" -Functions for actions pertaining to history files. -""" -from CIME.XML.standard_module_setup import * -from CIME.test_status import TEST_NO_BASELINES_COMMENT, TEST_STATUS_FILENAME -from CIME.utils import get_current_commit, get_timestamp, get_model, safe_copy, SharedArea, parse_test_name - -import logging, os, re, filecmp -logger = logging.getLogger(__name__) - -BLESS_LOG_NAME = "bless_log" - -# ------------------------------------------------------------------------ -# Strings used in the comments generated by cprnc -# ------------------------------------------------------------------------ - -CPRNC_FIELDLISTS_DIFFER = "files differ only in their field lists" - -# ------------------------------------------------------------------------ -# Strings used in the comments generated by _compare_hists -# ------------------------------------------------------------------------ - -NO_COMPARE = "had no compare counterpart" -NO_ORIGINAL = "had no original counterpart" -FIELDLISTS_DIFFER = "had a different field list from" -DIFF_COMMENT = "did NOT match" -# COMPARISON_COMMENT_OPTIONS should include all of the above: these are any of the special -# comment strings that describe the reason for a comparison failure -COMPARISON_COMMENT_OPTIONS = set([NO_COMPARE, - NO_ORIGINAL, - FIELDLISTS_DIFFER, - DIFF_COMMENT]) -# Comments that indicate a true baseline comparison failure -COMPARISON_FAILURE_COMMENT_OPTIONS = (COMPARISON_COMMENT_OPTIONS - - set([NO_COMPARE, FIELDLISTS_DIFFER])) - -def _iter_model_file_substrs(case): - models = case.get_compset_components() - models.append('cpl') - for model in models: - yield model - -def _get_all_hist_files(model, from_dir, file_extensions, suffix="", ref_case=None): - suffix = (".{}".format(suffix) if suffix else "") - - test_hists = [] - # Match hist files produced by run - for extension in file_extensions: - if 'initial' in extension: - continue - if extension.endswith('$'): - extension = extension[:-1] - string = model+r'\d?_?(\d{4})?\.'+extension+suffix+'$' - logger.debug ("Regex is {}".format(string)) - pfile = re.compile(string) - test_hists.extend([os.path.join(from_dir,f) for f in os.listdir(from_dir) if pfile.search(f)]) - - if ref_case: - test_hists = [h for h in test_hists if not (ref_case in os.path.basename(h))] - - test_hists = list(set(test_hists)) - test_hists.sort() - logger.debug("_get_all_hist_files returns {} for model {}".format(test_hists, model)) - return test_hists - -def _get_latest_hist_files(model, from_dir, file_extensions, suffix="", ref_case=None): - test_hists = _get_all_hist_files(model, from_dir, file_extensions, suffix=suffix, ref_case=ref_case) - latest_files = {} - histlist = [] - for hist in test_hists: - ext = get_extension(model, hist) - latest_files[ext] = hist - - for key in latest_files.keys(): - histlist.append(latest_files[key]) - return histlist - -def copy(case, suffix): - """Copy the most recent batch of hist files in a case, adding the given suffix. - - This can allow you to temporarily "save" these files so they won't be blown - away if you re-run the case. - - case - The case containing the files you want to save - suffix - The string suffix you want to add to saved files, this can be used to find them later. - """ - rundir = case.get_value("RUNDIR") - ref_case = case.get_value("RUN_REFCASE") - # Loop over models - archive = case.get_env("archive") - comments = "Copying hist files to suffix '{}'\n".format(suffix) - num_copied = 0 - for model in _iter_model_file_substrs(case): - comments += " Copying hist files for model '{}'\n".format(model) - if model == 'cpl': - file_extensions = archive.get_hist_file_extensions(archive.get_entry('drv')) - else: - file_extensions = archive.get_hist_file_extensions(archive.get_entry(model)) - test_hists = _get_latest_hist_files(model, rundir, file_extensions, ref_case=ref_case) - num_copied += len(test_hists) - for test_hist in test_hists: - new_file = "{}.{}".format(test_hist, suffix) - if os.path.exists(new_file): - os.remove(new_file) - - comments += " Copying '{}' to '{}'\n".format(test_hist, new_file) - - # Need to copy rather than move in case there are some history files - # that will need to continue to be filled on the next phase; this - # can be the case for a restart run. - # - # (If it weren't for that possibility, a move/rename would be more - # robust here: The problem with a copy is that there can be - # confusion after the second run as to which files were created by - # the first run and which by the second. For example, if the second - # run fails to output any history files, the test will still pass, - # because the test system will think that run1's files were output - # by run2. But we live with that downside for the sake of the reason - # noted above.) - safe_copy(test_hist, new_file) - - expect(num_copied > 0, "copy failed: no hist files found in rundir '{}'".format(rundir)) - - return comments - -def rename_all_hist_files(case, suffix): - """Renaming all hist files in a case, adding the given suffix. - - case - The case containing the files you want to save - suffix - The string suffix you want to add to saved files, this can be used to find them later. - """ - rundir = case.get_value("RUNDIR") - ref_case = case.get_value("RUN_REFCASE") - # Loop over models - archive = case.get_env("archive") - comments = "Renaming hist files by adding suffix '{}'\n".format(suffix) - num_renamed = 0 - for model in _iter_model_file_substrs(case): - comments += " Renaming hist files for model '{}'\n".format(model) - if model == 'cpl': - file_extensions = archive.get_hist_file_extensions(archive.get_entry('drv')) - else: - file_extensions = archive.get_hist_file_extensions(archive.get_entry(model)) - test_hists = _get_all_hist_files(model, rundir, file_extensions, ref_case=ref_case) - num_renamed += len(test_hists) - for test_hist in test_hists: - new_file = "{}.{}".format(test_hist, suffix) - if os.path.exists(new_file): - os.remove(new_file) - - comments += " Renaming '{}' to '{}'\n".format(test_hist, new_file) - - os.rename(test_hist, new_file) - - expect(num_renamed > 0, "renaming failed: no hist files found in rundir '{}'".format(rundir)) - - return comments - -def _hists_match(model, hists1, hists2, suffix1="", suffix2=""): - """ - return (num in set 1 but not 2 , num in set 2 but not 1, matchups) - - >>> hists1 = ['FOO.G.cpl.h1.nc', 'FOO.G.cpl.h2.nc', 'FOO.G.cpl.h3.nc'] - >>> hists2 = ['cpl.h2.nc', 'cpl.h3.nc', 'cpl.h4.nc'] - >>> _hists_match('cpl', hists1, hists2) - (['FOO.G.cpl.h1.nc'], ['cpl.h4.nc'], [('FOO.G.cpl.h2.nc', 'cpl.h2.nc'), ('FOO.G.cpl.h3.nc', 'cpl.h3.nc')]) - >>> hists1 = ['FOO.G.cpl.h1.nc.SUF1', 'FOO.G.cpl.h2.nc.SUF1', 'FOO.G.cpl.h3.nc.SUF1'] - >>> hists2 = ['cpl.h2.nc.SUF2', 'cpl.h3.nc.SUF2', 'cpl.h4.nc.SUF2'] - >>> _hists_match('cpl', hists1, hists2, 'SUF1', 'SUF2') - (['FOO.G.cpl.h1.nc.SUF1'], ['cpl.h4.nc.SUF2'], [('FOO.G.cpl.h2.nc.SUF1', 'cpl.h2.nc.SUF2'), ('FOO.G.cpl.h3.nc.SUF1', 'cpl.h3.nc.SUF2')]) - >>> hists1 = ['cam.h0.1850-01-08-00000.nc'] - >>> hists2 = ['cam_0001.h0.1850-01-08-00000.nc','cam_0002.h0.1850-01-08-00000.nc'] - >>> _hists_match('cam', hists1, hists2, '', '') - ([], [], [('cam.h0.1850-01-08-00000.nc', 'cam_0001.h0.1850-01-08-00000.nc'), ('cam.h0.1850-01-08-00000.nc', 'cam_0002.h0.1850-01-08-00000.nc')]) - >>> hists1 = ['cam_0001.h0.1850-01-08-00000.nc.base','cam_0002.h0.1850-01-08-00000.nc.base'] - >>> hists2 = ['cam_0001.h0.1850-01-08-00000.nc.rest','cam_0002.h0.1850-01-08-00000.nc.rest'] - >>> _hists_match('cam', hists1, hists2, 'base', 'rest') - ([], [], [('cam_0001.h0.1850-01-08-00000.nc.base', 'cam_0001.h0.1850-01-08-00000.nc.rest'), ('cam_0002.h0.1850-01-08-00000.nc.base', 'cam_0002.h0.1850-01-08-00000.nc.rest')]) - """ - normalized1, normalized2 = [], [] - multi_normalized1, multi_normalized2 = [], [] - multiinst = False - - for hists, suffix, normalized, multi_normalized in [(hists1, suffix1, normalized1, multi_normalized1), (hists2, suffix2, normalized2, multi_normalized2)]: - for hist in hists: - normalized_name = hist[hist.rfind(model):] - if suffix != "": - expect(normalized_name.endswith(suffix), "How did '{}' not have suffix '{}'".format(hist, suffix)) - normalized_name = normalized_name[:len(normalized_name) - len(suffix) - 1] - - m = re.search("(.+)_[0-9]{4}(.+.nc)",normalized_name) - if m is not None: - multiinst = True - multi_normalized.append(m.group(1)+m.group(2)) - - normalized.append(normalized_name) - - set_of_1_not_2 = set(normalized1) - set(normalized2) - set_of_2_not_1 = set(normalized2) - set(normalized1) - - one_not_two = sorted([hists1[normalized1.index(item)] for item in set_of_1_not_2]) - two_not_one = sorted([hists2[normalized2.index(item)] for item in set_of_2_not_1]) - - both = set(normalized1) & set(normalized2) - - match_ups = sorted([ (hists1[normalized1.index(item)], hists2[normalized2.index(item)]) for item in both]) - - # Special case - comparing multiinstance to single instance files - - if multi_normalized1 != multi_normalized2: - # in this case hists1 contains multiinstance hists2 does not - if set(multi_normalized1) == set(normalized2): - for idx, norm_hist1 in enumerate(multi_normalized1): - for idx1, hist2 in enumerate(hists2): - norm_hist2 = normalized2[idx1] - if norm_hist1 == norm_hist2: - match_ups.append((hists1[idx], hist2)) - if hist2 in two_not_one: - two_not_one.remove(hist2) - if hists1[idx] in one_not_two: - one_not_two.remove(hists1[idx]) - # in this case hists2 contains multiinstance hists1 does not - if set(multi_normalized2) == set(normalized1): - for idx, norm_hist2 in enumerate(multi_normalized2): - for idx1, hist1 in enumerate(hists1): - norm_hist1 = normalized1[idx1] - if norm_hist2 == norm_hist1: - match_ups.append((hist1, hists2[idx])) - if hist1 in one_not_two: - one_not_two.remove(hist1) - if hists2[idx] in two_not_one: - two_not_one.remove(hists2[idx]) - - if not multiinst: - expect(len(match_ups) + len(set_of_1_not_2) == len(hists1), "Programming error1") - expect(len(match_ups) + len(set_of_2_not_1) == len(hists2), "Programming error2") - - return one_not_two, two_not_one, match_ups - -def _compare_hists(case, from_dir1, from_dir2, suffix1="", suffix2="", outfile_suffix="", - ignore_fieldlist_diffs=False): - if from_dir1 == from_dir2: - expect(suffix1 != suffix2, "Comparing files to themselves?") - - casename = case.get_value("CASE") - testcase = case.get_value("TESTCASE") - casedir = case.get_value("CASEROOT") - all_success = True - num_compared = 0 - comments = "Comparing hists for case '{}' dir1='{}', suffix1='{}', dir2='{}' suffix2='{}'\n".format(casename, from_dir1, suffix1, from_dir2, suffix2) - multiinst_driver_compare = False - archive = case.get_env('archive') - ref_case = case.get_value("RUN_REFCASE") - for model in _iter_model_file_substrs(case): - if model == 'cpl' and suffix2 == 'multiinst': - multiinst_driver_compare = True - comments += " comparing model '{}'\n".format(model) - if model == 'cpl': - file_extensions = archive.get_hist_file_extensions(archive.get_entry('drv')) - else: - file_extensions = archive.get_hist_file_extensions(archive.get_entry(model)) - hists1 = _get_latest_hist_files(model, from_dir1, file_extensions, suffix=suffix1, ref_case=ref_case) - hists2 = _get_latest_hist_files(model, from_dir2, file_extensions, suffix=suffix2, ref_case=ref_case) - if len(hists1) == 0 and len(hists2) == 0: - comments += " no hist files found for model {}\n".format(model) - continue - - one_not_two, two_not_one, match_ups = _hists_match(model, hists1, hists2, suffix1, suffix2) - for item in one_not_two: - comments += " File '{}' {} in '{}' with suffix '{}'\n".format(item, NO_COMPARE, from_dir2, suffix2) - all_success = False - - for item in two_not_one: - comments += " File '{}' {} in '{}' with suffix '{}'\n".format(item, NO_ORIGINAL, from_dir1, suffix1) - all_success = False - - num_compared += len(match_ups) - - for hist1, hist2 in match_ups: - success, cprnc_log_file, cprnc_comment = cprnc(model, hist1, hist2, case, from_dir1, - multiinst_driver_compare=multiinst_driver_compare, - outfile_suffix=outfile_suffix, - ignore_fieldlist_diffs=ignore_fieldlist_diffs) - if success: - comments += " {} matched {}\n".format(hist1, hist2) - else: - if cprnc_comment == CPRNC_FIELDLISTS_DIFFER: - comments += " {} {} {}\n".format(hist1, FIELDLISTS_DIFFER, hist2) - else: - comments += " {} {} {}\n".format(hist1, DIFF_COMMENT, hist2) - comments += " cat " + cprnc_log_file + "\n" - expected_log_file = os.path.join(casedir, os.path.basename(cprnc_log_file)) - if not (os.path.exists(expected_log_file) and filecmp.cmp(cprnc_log_file, expected_log_file)): - try: - safe_copy(cprnc_log_file, casedir) - except (OSError, IOError) as _: - logger.warning("Could not copy {} to {}".format(cprnc_log_file, casedir)) - - all_success = False - # PFS test may not have any history files to compare. - if num_compared == 0 and testcase != "PFS": - all_success = False - comments += "Did not compare any hist files! Missing baselines?\n" - - comments += "PASS" if all_success else "FAIL" - - return all_success, comments - -def compare_test(case, suffix1, suffix2, ignore_fieldlist_diffs=False): - """ - Compares two sets of component history files in the testcase directory - - case - The case containing the hist files to compare - suffix1 - The suffix that identifies the first batch of hist files - suffix1 - The suffix that identifies the second batch of hist files - ignore_fieldlist_diffs (bool): If True, then: If the two cases differ only in their - field lists (i.e., all shared fields are bit-for-bit, but one case has some - diagnostic fields that are missing from the other case), treat the two cases as - identical. - - returns (SUCCESS, comments) - """ - rundir = case.get_value("RUNDIR") - - return _compare_hists(case, rundir, rundir, suffix1, suffix2, - ignore_fieldlist_diffs=ignore_fieldlist_diffs) - -def cprnc(model, file1, file2, case, rundir, multiinst_driver_compare=False, outfile_suffix="", - ignore_fieldlist_diffs=False): - """ - Run cprnc to compare two individual nc files - - file1 - the full or relative path of the first file - file2 - the full or relative path of the second file - case - the case containing the files - rundir - the rundir for the case - outfile_suffix - if non-blank, then the output file name ends with this - suffix (with a '.' added before the given suffix). - Use None to avoid permissions issues in the case dir. - ignore_fieldlist_diffs (bool): If True, then: If the two cases differ only in their - field lists (i.e., all shared fields are bit-for-bit, but one case has some - diagnostic fields that are missing from the other case), treat the two cases as - identical. - - returns (True if the files matched, log_name, comment) - where 'comment' is either an empty string or one of the module-level constants - beginning with CPRNC_ (e.g., CPRNC_FIELDLISTS_DIFFER) - """ - cprnc_exe = case.get_value("CCSM_CPRNC") - basename = os.path.basename(file1) - multiinst_regex = re.compile(r'.*%s[^_]*(_[0-9]{4})[.]h.?[.][^.]+?[.]nc' % model) - mstr = '' - mstr1 = '' - mstr2 = '' - # If one is a multiinstance file but the other is not add an instance string - m1 = multiinst_regex.match(file1) - m2 = multiinst_regex.match(file2) - if m1 is not None: - mstr1 = m1.group(1) - if m2 is not None: - mstr2 = m2.group(1) - if mstr1 != mstr2: - mstr = mstr1+mstr2 - - output_filename = os.path.join(rundir, "{}{}.cprnc.out".format(basename, mstr)) - if outfile_suffix: - output_filename += ".{}".format(outfile_suffix) - - if outfile_suffix is None: - cpr_stat, out, _ = run_cmd("{} -m {} {}".format(cprnc_exe, file1, file2), combine_output=True) - else: - cpr_stat = run_cmd("{} -m {} {}".format(cprnc_exe, file1, file2), combine_output=True, arg_stdout=output_filename)[0] - with open(output_filename, "r") as fd: - out = fd.read() - - comment = '' - if cpr_stat == 0: - # Successful exit from cprnc - if multiinst_driver_compare: - # In a multiinstance test the cpl hist file will have a different number of - # dimensions and so cprnc will indicate that the files seem to be DIFFERENT - # in this case we only want to check that the fields we are able to compare - # have no differences. - files_match = " 0 had non-zero differences" in out - else: - if "files seem to be IDENTICAL" in out: - files_match = True - elif "the two files seem to be DIFFERENT" in out: - files_match = False - elif "the two files DIFFER only in their field lists" in out: - if ignore_fieldlist_diffs: - files_match = True - else: - files_match = False - comment = CPRNC_FIELDLISTS_DIFFER - else: - expect(False, "Did not find an expected summary string in cprnc output") - else: - # If there is an error in cprnc, we do the safe thing of saying the comparison failed - files_match = False - return (files_match, output_filename, comment) - -def compare_baseline(case, baseline_dir=None, outfile_suffix=""): - """ - compare the current test output to a baseline result - - case - The case containing the hist files to be compared against baselines - baseline_dir - Optionally, specify a specific baseline dir, otherwise it will be computed from case config - outfile_suffix - if non-blank, then the cprnc output file name ends with - this suffix (with a '.' added before the given suffix). if None, no output file saved. - - returns (SUCCESS, comments) - SUCCESS means all hist files matched their corresponding baseline - """ - rundir = case.get_value("RUNDIR") - if baseline_dir is None: - baselineroot = case.get_value("BASELINE_ROOT") - basecmp_dir = os.path.join(baselineroot, case.get_value("BASECMP_CASE")) - dirs_to_check = (baselineroot, basecmp_dir) - else: - basecmp_dir = baseline_dir - dirs_to_check = (basecmp_dir,) - - for bdir in dirs_to_check: - if not os.path.isdir(bdir): - return False, "ERROR {} baseline directory '{}' does not exist".format(TEST_NO_BASELINES_COMMENT,bdir) - - success, comments = _compare_hists(case, rundir, basecmp_dir, outfile_suffix=outfile_suffix) - if get_model() == "e3sm": - bless_log = os.path.join(basecmp_dir, BLESS_LOG_NAME) - if os.path.exists(bless_log): - lines = open(bless_log, "r").readlines() - if lines: - last_line = lines[-1] - comments += "\n Most recent bless: {}".format(last_line) - - return success, comments - -def get_extension(model, filepath): - """ - For a hist file for the given model, return what we call the "extension" - - model - The component model - filepath - The path of the hist file - - >>> get_extension("cpl", "cpl.hi.nc") - 'hi' - >>> get_extension("cpl", "cpl.h.nc") - 'h' - >>> get_extension("cpl", "cpl.h1.nc.base") - 'h1' - >>> get_extension("cpl", "TESTRUNDIFF.cpl.hi.0.nc.base") - 'hi' - >>> get_extension("cpl", "TESTRUNDIFF_Mmpi-serial.f19_g16_rx1.A.melvin_gnu.C.fake_testing_only_20160816_164150-20160816_164240.cpl.h.nc") - 'h' - >>> get_extension("clm","clm2_0002.h0.1850-01-06-00000.nc") - '0002.h0' - >>> get_extension("pop","PFS.f09_g16.B1850.cheyenne_intel.allactive-default.GC.c2_0_b1f2_int.pop.h.ecosys.nday1.0001-01-02.nc") - 'h' - >>> get_extension("mom", "ga0xnw.mom6.frc._0001_001.nc") - 'frc' - >>> get_extension("mom", "ga0xnw.mom6.sfc.day._0001_001.nc") - 'sfc.day' - >>> get_extension("mom", "bixmc5.mom6.prog._0001_01_05_84600.nc") - 'prog' - >>> get_extension("mom", "bixmc5.mom6.hm._0001_01_03_42300.nc") - 'hm' - >>> get_extension("mom", "bixmc5.mom6.hmz._0001_01_03_42300.nc") - 'hmz' - """ - basename = os.path.basename(filepath) - m = None - ext_regexes = [] - - # First add any model-specific extension regexes; these will be checked before the - # general regex - if model == "mom": - # Need to check 'sfc.day' specially: the embedded '.' messes up the - # general-purpose regex - ext_regexes.append(r'sfc\.day') - - # Now add the general-purpose extension regex - ext_regexes.append(r'\w+') - - for ext_regex in ext_regexes: - full_regex_str = model+r'\d?_?(\d{4})?\.('+ext_regex+r')[-\w\.]*\.nc\.?' - full_regex = re.compile(full_regex_str) - m = full_regex.search(basename) - if m is not None: - break - - expect(m is not None, "Failed to get extension for file '{}'".format(filepath)) - - if m.group(1) is not None: - result = m.group(1)+'.'+m.group(2) - else: - result = m.group(2) - - return result - -def generate_teststatus(testdir, baseline_dir): - """ - CESM stores it's TestStatus file in baselines. Do not let exceptions - escape from this function. - """ - if get_model() == "cesm": - try: - with SharedArea(): - if not os.path.isdir(baseline_dir): - os.makedirs(baseline_dir) - - safe_copy(os.path.join(testdir, TEST_STATUS_FILENAME), baseline_dir, preserve_meta=False) - except Exception as e: - logger.warning("Could not copy {} to baselines, {}".format(os.path.join(testdir, TEST_STATUS_FILENAME), str(e))) - -def _generate_baseline_impl(case, baseline_dir=None, allow_baseline_overwrite=False): - """ - copy the current test output to baseline result - - case - The case containing the hist files to be copied into baselines - baseline_dir - Optionally, specify a specific baseline dir, otherwise it will be computed from case config - allow_baseline_overwrite must be true to generate baselines to an existing directory. - - returns (SUCCESS, comments) - """ - rundir = case.get_value("RUNDIR") - ref_case = case.get_value("RUN_REFCASE") - if baseline_dir is None: - baselineroot = case.get_value("BASELINE_ROOT") - basegen_dir = os.path.join(baselineroot, case.get_value("BASEGEN_CASE")) - else: - basegen_dir = baseline_dir - testcase = case.get_value("CASE") - archive = case.get_env('archive') - - if not os.path.isdir(basegen_dir): - os.makedirs(basegen_dir) - - if (os.path.isdir(os.path.join(basegen_dir,testcase)) and - not allow_baseline_overwrite): - expect(False, " Cowardly refusing to overwrite existing baseline directory") - - comments = "Generating baselines into '{}'\n".format(basegen_dir) - num_gen = 0 - for model in _iter_model_file_substrs(case): - comments += " generating for model '{}'\n".format(model) - if model == 'cpl': - file_extensions = archive.get_hist_file_extensions(archive.get_entry('drv')) - else: - file_extensions = archive.get_hist_file_extensions(archive.get_entry(model)) - hists = _get_latest_hist_files(model, rundir, file_extensions, ref_case=ref_case) - logger.debug("latest_files: {}".format(hists)) - num_gen += len(hists) - for hist in hists: - basename = hist[hist.rfind(model):] - baseline = os.path.join(basegen_dir, basename) - if os.path.exists(baseline): - os.remove(baseline) - - safe_copy(hist, baseline, preserve_meta=False) - comments += " generating baseline '{}' from file {}\n".format(baseline, hist) - - # copy latest cpl log to baseline - # drop the date so that the name is generic - if case.get_value("COMP_INTERFACE") == "nuopc": - cplname = "med" - else: - cplname = "cpl" - - newestcpllogfile = case.get_latest_cpl_log(coupler_log_path=case.get_value("RUNDIR"), cplname=cplname) - if newestcpllogfile is None: - logger.warning("No {}.log file found in directory {}".format(cplname,case.get_value("RUNDIR"))) - else: - safe_copy(newestcpllogfile, os.path.join(basegen_dir, "{}.log.gz".format(cplname)), preserve_meta=False) - - testname = case.get_value("TESTCASE") - testopts = parse_test_name(case.get_value("CASEBASEID"))[1] - testopts = [] if testopts is None else testopts - expect(num_gen > 0 or (testname in ["PFS", "TSC"] or "B" in testopts), - "Could not generate any hist files for case '{}', something is seriously wrong".format(os.path.join(rundir, testcase))) - - if get_model() == "e3sm": - bless_log = os.path.join(basegen_dir, BLESS_LOG_NAME) - with open(bless_log, "a") as fd: - fd.write("sha:{} date:{}\n".format(get_current_commit(repo=case.get_value("CIMEROOT")), - get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S"))) - - return True, comments - -def generate_baseline(case, baseline_dir=None, allow_baseline_overwrite=False): - with SharedArea(): - return _generate_baseline_impl(case, baseline_dir=baseline_dir, allow_baseline_overwrite=allow_baseline_overwrite) - -def get_ts_synopsis(comments): - r""" - Reduce case diff comments down to a single line synopsis so that we can put - something in the TestStatus file. It's expected that the comments provided - to this function came from compare_baseline, not compare_tests. - - >>> get_ts_synopsis('') - '' - >>> get_ts_synopsis('big error') - 'big error' - >>> get_ts_synopsis('big error\n') - 'big error' - >>> get_ts_synopsis('stuff\n File foo had a different field list from bar with suffix baz\nPass\n') - 'FIELDLIST field lists differ (otherwise bit-for-bit)' - >>> get_ts_synopsis('stuff\n File foo had no compare counterpart in bar with suffix baz\nPass\n') - 'ERROR BFAIL some baseline files were missing' - >>> get_ts_synopsis('stuff\n File foo had a different field list from bar with suffix baz\n File foo had no compare counterpart in bar with suffix baz\nPass\n') - 'MULTIPLE ISSUES: field lists differ and some baseline files were missing' - >>> get_ts_synopsis('stuff\n File foo did NOT match bar with suffix baz\nPass\n') - 'DIFF' - >>> get_ts_synopsis('stuff\n File foo did NOT match bar with suffix baz\n File foo had a different field list from bar with suffix baz\nPass\n') - 'DIFF' - >>> get_ts_synopsis('stuff\n File foo did NOT match bar with suffix baz\n File foo had no compare counterpart in bar with suffix baz\nPass\n') - 'DIFF' - >>> get_ts_synopsis('File foo had no compare counterpart in bar with suffix baz\n File foo had no original counterpart in bar with suffix baz\n') - 'DIFF' - """ - if not comments: - return "" - elif "\n" not in comments.strip(): - return comments.strip() - else: - has_fieldlist_differences = False - has_bfails = False - has_real_fails = False - for line in comments.splitlines(): - if FIELDLISTS_DIFFER in line: - has_fieldlist_differences = True - if NO_COMPARE in line: - has_bfails = True - for comparison_failure_comment in COMPARISON_FAILURE_COMMENT_OPTIONS: - if comparison_failure_comment in line: - has_real_fails = True - - if has_real_fails: - # If there are any real differences, we just report that: we assume that the - # user cares much more about those real differences than fieldlist or bfail - # issues, and we don't want to complicate the matter by trying to report all - # issues in this case. - return "DIFF" - else: - if has_fieldlist_differences and has_bfails: - # It's not clear which of these (if either) the user would care more - # about, so we report both. We deliberately avoid printing the keywords - # 'FIELDLIST' or TEST_NO_BASELINES_COMMENT (i.e., 'BFAIL'): if we printed - # those, then (e.g.) a 'grep -v FIELDLIST' (which the user might do if - # (s)he was expecting fieldlist differences) would also filter out this - # line, which we don't want. - return "MULTIPLE ISSUES: field lists differ and some baseline files were missing" - elif has_fieldlist_differences: - return "FIELDLIST field lists differ (otherwise bit-for-bit)" - elif has_bfails: - return "ERROR {} some baseline files were missing".format(TEST_NO_BASELINES_COMMENT) - else: - return "" diff --git a/scripts/lib/CIME/locked_files.py b/scripts/lib/CIME/locked_files.py deleted file mode 100644 index 4d30c1cfe7a..00000000000 --- a/scripts/lib/CIME/locked_files.py +++ /dev/null @@ -1,39 +0,0 @@ -from CIME.XML.standard_module_setup import * -from CIME.utils import safe_copy -from CIME.XML.generic_xml import GenericXML - -logger = logging.getLogger(__name__) - -LOCKED_DIR = "LockedFiles" - -def lock_file(filename, caseroot=None, newname=None): - expect("/" not in filename, "Please just provide basename of locked file") - caseroot = os.getcwd() if caseroot is None else caseroot - newname = filename if newname is None else newname - fulllockdir = os.path.join(caseroot, LOCKED_DIR) - if not os.path.exists(fulllockdir): - os.mkdir(fulllockdir) - - logging.debug("Locking file {}".format(filename)) - - # JGF: It is extremely dangerous to alter our database (xml files) without - # going through the standard API. The copy below invalidates all existing - # GenericXML instances that represent this file and all caching that may - # have involved this file. We should probably seek a safer way of locking - # files. - safe_copy(os.path.join(caseroot, filename), os.path.join(fulllockdir, newname)) - GenericXML.invalidate(os.path.join(fulllockdir, newname)) - -def unlock_file(filename, caseroot=None): - expect("/" not in filename, "Please just provide basename of locked file") - caseroot = os.getcwd() if caseroot is None else caseroot - locked_path = os.path.join(caseroot, LOCKED_DIR, filename) - if os.path.exists(locked_path): - os.remove(locked_path) - - logging.debug("Unlocking file {}".format(filename)) - -def is_locked(filename, caseroot=None): - expect("/" not in filename, "Please just provide basename of locked file") - caseroot = os.getcwd() if caseroot is None else caseroot - return os.path.exists(os.path.join(caseroot, LOCKED_DIR, filename)) diff --git a/scripts/lib/CIME/nmlgen.py b/scripts/lib/CIME/nmlgen.py deleted file mode 100644 index 1cb28db26a0..00000000000 --- a/scripts/lib/CIME/nmlgen.py +++ /dev/null @@ -1,708 +0,0 @@ -"""Class for generating component namelists.""" - -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import - -import datetime -import re -import hashlib - -from CIME.XML.standard_module_setup import * -from CIME.namelist import Namelist, parse, \ - character_literal_to_string, string_to_character_literal, \ - expand_literal_list, compress_literal_list, merge_literal_lists -from CIME.XML.namelist_definition import NamelistDefinition -from CIME.utils import expect - -logger = logging.getLogger(__name__) - -_var_ref_re = re.compile(r"\$(\{)?(?P\w+)(?(1)\})") - -_ymd_re = re.compile(r"%(?P[1-9][0-9]*)?y(?Pm(?Pd)?)?") - -_stream_file_template = """ - - GENERIC - - - - {domain_varnames} - - - {domain_filepath} - - - {domain_filenames} - - - - - {data_varnames} - - - {data_filepath} - - - {data_filenames} - - - {offset} - - -""" - -class NamelistGenerator(object): - - """Utility class for generating namelists for a given component.""" - - _streams_variables = [] - - #pylint:disable=too-many-arguments - def __init__(self, case, definition_files, files=None): - """Construct a namelist generator. - - Arguments: - `case` - `Case` object corresponding to the current case. - `infiles` - List of files with user namelist options. - `definition_files` - List of XML files containing namelist definitions. - `config` - A dictionary of attributes for matching defaults. - """ - # Save off important information from inputs. - self._case = case - self._din_loc_root = case.get_value('DIN_LOC_ROOT') - - # Create definition object - this will validate the xml schema in the definition file - self._definition = NamelistDefinition(definition_files[0], files=files) - - # Determine array of _stream_variables from definition object - # This is only applicable to data models - self._streams_namelists = {"streams": []} - self._streams_variables = self._definition.get_per_stream_entries() - for variable in self._streams_variables: - self._streams_namelists[variable] = [] - - # Create namelist object. - self._namelist = Namelist() - - # Define __enter__ and __exit__ so that we can use this as a context manager - def __enter__(self): - return self - - def __exit__(self, *_): - return False - - def init_defaults(self, infiles, config, skip_groups=None, skip_entry_loop=False): - """Return array of names of all definition nodes - """ - # first clean out any settings left over from previous calls - self.new_instance() - - self._definition.set_nodes(skip_groups=skip_groups) - - # Determine the array of entry nodes that will be acted upon - entry_nodes = self._definition.set_nodes(skip_groups=skip_groups) - - # Add attributes to definition object - self._definition.add_attributes(config) - - # Parse the infile and create namelist settings for the contents of infile - # this will override all other settings in add_defaults - for file_ in infiles: - # Parse settings in "groupless" mode. - nml_dict = parse(in_file=file_, groupless=True) - - # Add groups using the namelist definition. - new_namelist = self._definition.dict_to_namelist(nml_dict, filename=file_) - - # Make sure that the input is actually valid. - self._definition.validate(new_namelist, filename=file_) - - # Merge into existing settings (earlier settings have precedence - # over later settings). - self._namelist.merge_nl(new_namelist) - - if not skip_entry_loop: - for entry in entry_nodes: - self.add_default(self._definition.get(entry, "id")) - - return [self._definition.get(entry, "id") for entry in entry_nodes] - - @staticmethod - def quote_string(string): - """Convert a string to a quoted Fortran literal. - - Does nothing if the string appears to be quoted already. - """ - if string == "" or \ - (string[0] not in ('"', "'") or string[0] != string[-1]): - string = string_to_character_literal(string) - return string - - def _to_python_value(self, name, literals): - """Transform a literal list as needed for `get_value`.""" - var_type, _, var_size, = self._definition.split_type_string(name) - if len(literals) > 0: - values = expand_literal_list(literals) - else: - return "" - - for i, scalar in enumerate(values): - if scalar == '': - values[i] = None - elif var_type == 'character': - values[i] = character_literal_to_string(scalar) - - if var_size == 1: - return values[0] - else: - return values - - def _to_namelist_literals(self, name, values): - """Transform a literal list as needed for `set_value`. - - This is the inverse of `_to_python_value`, except that many of the - changes have potentially already been performed. - """ - var_type, _, var_size, = self._definition.split_type_string(name) - if var_size == 1 and not isinstance(values, list): - values = [values] - - for i, scalar in enumerate(values): - if scalar is None: - values[i] = "" - elif var_type == 'character': - expect(not isinstance(scalar, list), name) - values[i] = self.quote_string(scalar) - - return compress_literal_list(values) - - def get_value(self, name): - """Get the current value of a given namelist variable. - - Note that the return value of this function is always a string or a list - of strings. E.g. the scalar logical value .false. will be returned as - `".false."`, while an array of two .false. values will be returned as - `[".false.", ".false."]`. Whether or not a value is scalar is determined - by checking the array size in the namelist definition file. - - Null values are converted to `None`, and repeated values are expanded, - e.g. `['2*3']` is converted to `['3', '3', '3']`. - - For character variables, the value is converted to a Python string (e.g. - quotation marks are removed). - - All other literals are returned as the raw string values that will be - written to the namelist. - """ - return self._to_python_value(name, self._namelist.get_value(name)) - - def set_value(self, name, value): - """Set the current value of a given namelist variable. - - Usually, you should use `add_default` instead of this function. - - The `name` argument is the name of the variable to set, and the `value` - is a list of strings to use as settings. If the variable is scalar, the - list is optional; i.e. a scalar logical can be set using either - `value='.false.'` or `value=['.false.']`. If the variable is of type - character, and the input is missing quotes, quotes will be added - automatically. If `None` is provided in place of a string, this will be - translated to a null value. - - Note that this function will overwrite the current value, which may hold - a user-specified setting. Even if `value` is (or contains) a null value, - the old setting for the variable will be thrown out completely. - """ - var_group = self._definition.get_group(name) - literals = self._to_namelist_literals(name, value) - _, _, var_size, = self._definition.split_type_string(name) - self._namelist.set_variable_value(var_group, name, literals, var_size) - - def get_default(self, name, config=None, allow_none=False): - """Get the value of a variable from the namelist definition file. - - The `config` argument is passed through to the underlying - `NamelistDefaults.get_value` call as the `attribute` argument. - - The return value of this function is a list of values that were found in - the defaults file. If there is no matching default, this function - returns `None` if `allow_none=True` is passed, otherwise an error is - raised. - - Note that we perform some translation of the values, since there are a - few differences between Fortran namelist literals and values in the - defaults file: - 1) In the defaults file, whitespace is ignored except within strings, so - the output of this function strips out most whitespace. (This implies - that commas are the only way to separate array elements in the - defaults file.) - 2) In the defaults file, quotes around character literals (strings) are - optional, as long as the literal does not contain whitespace, commas, - or (single or double) quotes. If a setting for a character variable - does not seem to have quotes (and is not a null value), this function - will add them. - 3) Default values may refer to variables in a case's `env_*.xml` files. - This function replaces references of the form `$VAR` or `${VAR}` with - the value of the variable `VAR` in an env file, if that variable - exists. This behavior is suppressed within single-quoted strings - (similar to parameter expansion in shell scripts). - """ - default = self._definition.get_value_match(name, attributes=config, exact_match=False) - if default is None: - expect(allow_none, "No default value found for {}.".format(name)) - return None - default = expand_literal_list(default) - - var_type,_,_ = self._definition.split_type_string(name) - - for i, scalar in enumerate(default): - # Skip single-quoted strings. - if var_type == 'character' and scalar != '' and \ - scalar[0] == scalar[-1] == "'": - continue - match = _var_ref_re.search(scalar) - while match: - env_val = self._case.get_value(match.group('name')) - expect(env_val is not None, - "Namelist default for variable {} refers to unknown XML variable {}.".format(name, match.group('name'))) - scalar = scalar.replace(match.group(0), str(env_val), 1) - match = _var_ref_re.search(scalar) - default[i] = scalar - - # Deal with missing quotes. - - if var_type == 'character': - for i, scalar in enumerate(default): - # Preserve null values. - if scalar != '': - default[i] = self.quote_string(scalar) - - default = self._to_python_value(name, default) - - return default - - def get_streams(self): - """Get a list of all streams used for the current data model mode.""" - return self.get_default("streamslist") - - def clean_streams(self): - for variable in self._streams_variables: - self._streams_namelists[variable] = [] - self._streams_namelists["streams"] = [] - - def new_instance(self): - """ Clean the object just enough to introduce a new instance """ - self.clean_streams() - self._namelist.clean_groups() - - def _sub_fields(self, varnames): - """Substitute indicators with given values in a list of fields. - - Replace any instance of the following substring indicators with the - appropriate values: - %glc = two-digit GLC elevation class from 00 through glc_nec - - The difference between this function and `_sub_paths` is that this - function is intended to be used for variable names (especially from the - `strm_datvar` defaults), whereas `_sub_paths` is intended for use on - input data file paths. - - Returns a string. - - Example: If `_sub_fields` is called with an array containing two - elements, each of which contains two strings, and glc_nec=3: - foo bar - s2x_Ss_tsrf%glc tsrf%glc - then the returned array will be: - foo bar - s2x_Ss_tsrf00 tsrf00 - s2x_Ss_tsrf01 tsrf01 - s2x_Ss_tsrf02 tsrf02 - s2x_Ss_tsrf03 tsrf03 - """ - lines = varnames.split("\n") - new_lines = [] - for line in lines: - if not line: - continue - if "%glc" in line: - if self._case.get_value('GLC_NEC') == 0: - glc_nec_indices = [] - else: - glc_nec_indices = range(self._case.get_value('GLC_NEC')+1) - for i in glc_nec_indices: - new_lines.append(line.replace("%glc", "{:02d}".format(i))) - else: - new_lines.append(line) - return "\n".join(new_lines) - - @staticmethod - def _days_in_month(month, year=1): - """Number of days in the given month (specified as an int, 1-12). - - The `year` argument gives the year for which to request the number of - days, in a Gregorian calendar. Defaults to `1` (not a leap year). - """ - month_start = datetime.date(year, month, 1) - if month == 12: - next_year = year+1 - next_month = 1 - else: - next_year = year - next_month = month + 1 - next_month_start = datetime.date(next_year, next_month, 1) - return (next_month_start - month_start).days - - def _sub_paths(self, filenames, year_start, year_end): - """Substitute indicators with given values in a list of filenames. - - Replace any instance of the following substring indicators with the - appropriate values: - %y = year from the range year_start to year_end - %ym = year-month from the range year_start to year_end with all 12 - months - %ymd = year-month-day from the range year_start to year_end with - all 12 months - - For the date indicators, the year may be prefixed with a number of - digits to use (the default is 4). E.g. `%2ymd` can be used to change the - number of year digits from 4 to 2. - - Note that we assume that there is no mixing and matching of date - indicators, i.e. you cannot use `%4ymd` and `%2y` in the same line. Note - also that we use a no-leap calendar, i.e. every month has the same - number of days every year. - - The difference between this function and `_sub_fields` is that this - function is intended to be used for file names (especially from the - `strm_datfil` defaults), whereas `_sub_fields` is intended for use on - variable names. - - Returns a string (filenames separated by newlines). - """ - lines = [line for line in filenames.split("\n") if line] - new_lines = [] - for line in lines: - match = _ymd_re.search(filenames) - if match is None: - new_lines.append(line) - continue - if match.group('digits'): - year_format = "{:0"+match.group('digits')+"d}" - else: - year_format = "{:04d}" - for year in range(year_start, year_end+1): - if match.group('day'): - for month in range(1, 13): - days = self._days_in_month(month) - for day in range(1, days+1): - date_string = (year_format + "-{:02d}-{:02d}").format(year, month, day) - new_line = line.replace(match.group(0), date_string) - new_lines.append(new_line) - elif match.group('month'): - for month in range(1, 13): - date_string = (year_format + "-{:02d}").format(year, month) - new_line = line.replace(match.group(0), date_string) - new_lines.append(new_line) - else: - date_string = year_format.format(year) - new_line = line.replace(match.group(0), date_string) - new_lines.append(new_line) - return "\n".join(new_lines) - - def create_stream_file_and_update_shr_strdata_nml(self, config, #pylint:disable=too-many-locals - stream, stream_path, data_list_path): - """Write the pseudo-XML file corresponding to a given stream. - - Arguments: - `config` - Used to look up namelist defaults. This is used *in addition* - to the `config` used to construct the namelist generator. The - main reason to supply additional configuration options here - is to specify stream-specific settings. - `stream` - Name of the stream. - `stream_path` - Path to write the stream file to. - `data_list_path` - Path of file to append input data information to. - """ - - # Stream-specific configuration. - config = config.copy() - config["stream"] = stream - - # Figure out the details of this stream. - if stream in ("prescribed", "copyall"): - # Assume only one file for prescribed mode! - grid_file = self.get_default("strm_grid_file", config) - domain_filepath, domain_filenames = os.path.split(grid_file) - data_file = self.get_default("strm_data_file", config) - data_filepath, data_filenames = os.path.split(data_file) - else: - domain_filepath = self.get_default("strm_domdir", config) - domain_filenames = self.get_default("strm_domfil", config) - data_filepath = self.get_default("strm_datdir", config) - data_filenames = self.get_default("strm_datfil", config) - - domain_varnames = self._sub_fields(self.get_default("strm_domvar", config)) - data_varnames = self._sub_fields(self.get_default("strm_datvar", config)) - offset = self.get_default("strm_offset", config) - year_start = int(self.get_default("strm_year_start", config)) - year_end = int(self.get_default("strm_year_end", config)) - data_filenames = self._sub_paths(data_filenames, year_start, year_end) - domain_filenames = self._sub_paths(domain_filenames, year_start, year_end) - - # Overwrite domain_file if should be set from stream data - if domain_filenames == 'null': - domain_filepath = data_filepath - domain_filenames = data_filenames.splitlines()[0] - - stream_file_text = _stream_file_template.format( - domain_varnames=domain_varnames, - domain_filepath=domain_filepath, - domain_filenames=domain_filenames, - data_varnames=data_varnames, - data_filepath=data_filepath, - data_filenames=data_filenames, - offset=offset, - ) - - with open(stream_path, 'w') as stream_file: - stream_file.write(stream_file_text) - - lines_hash = self._get_input_file_hash(data_list_path) - with open(data_list_path, 'a') as input_data_list: - for i, filename in enumerate(domain_filenames.split("\n")): - if filename.strip() == '': - continue - filepath, filename = os.path.split(filename) - if not filepath: - filepath = os.path.join(domain_filepath, os.path.dirname(filename.strip())) - string = "domain{:d} = {}\n".format(i+1, filepath) - hashValue = hashlib.md5(string.rstrip().encode('utf-8')).hexdigest() - if hashValue not in lines_hash: - input_data_list.write(string) - for i, filename in enumerate(data_filenames.split("\n")): - if filename.strip() == '': - continue - filepath = os.path.join(data_filepath, filename.strip()) - string = "file{:d} = {}\n".format(i+1, filepath) - hashValue = hashlib.md5(string.rstrip().encode('utf-8')).hexdigest() - if hashValue not in lines_hash: - input_data_list.write(string) - self.update_shr_strdata_nml(config, stream, stream_path) - - def update_shr_strdata_nml(self, config, stream, stream_path): - """Updates values for the `shr_strdata_nml` namelist group. - - This should be done once per stream, and it shouldn't usually be called - directly, since `create_stream_file` calls this method itself. - """ - assert config['stream'] == stream, \ - "config stream is {}, but input stream is {}".format(config['stream'], stream) - # Double-check the years for sanity. - year_start = int(self.get_default("strm_year_start", config)) - year_end = int(self.get_default("strm_year_end", config)) - year_align = int(self.get_default("strm_year_align", config)) - expect(year_end >= year_start, - "Stream {} starts at year {:d}, but ends at earlier year {:d}.".format(stream, year_start, year_end)) - # Add to streams file. - stream_string = "{} {:d} {:d} {:d}".format(os.path.basename(stream_path), - year_align, year_start, - year_end) - self._streams_namelists["streams"].append(stream_string) - for variable in self._streams_variables: - default = self.get_default(variable, config) - expect(len(default) == 1, - "Stream {} had multiple settings for variable {}.".format(stream, variable)) - self._streams_namelists[variable].append(default[0]) - - def set_abs_file_path(self, file_path): - """If `file_path` is relative, make it absolute using `DIN_LOC_ROOT`. - - If an absolute path is input, it is returned unchanged. - """ - if os.path.isabs(file_path): - return file_path - else: - fullpath = os.path.join(self._din_loc_root, file_path) - return fullpath - - def add_default(self, name, value=None, ignore_abs_path=None): - """Add a value for the specified variable to the namelist. - - If the specified variable is already defined in the object, the existing - value is preserved. Otherwise, the `value` argument, if provided, will - be used to set the value. If no such value is found, the defaults file - will be consulted. If null values are present in any of the above, the - result will be a merged array of values. - - If no value for the variable is found via any of the above, this method - will raise an exception. - """ - # pylint: disable=protected-access - group = self._definition.get_group(name) - - # Use this to see if we need to raise an error when nothing is found. - have_value = False - # Check for existing value. - current_literals = self._namelist.get_variable_value(group, name) - - # Check for input argument. - if value is not None: - have_value = True - # if compression were to occur, this is where it does - literals = self._to_namelist_literals(name, value) - current_literals = merge_literal_lists(literals, current_literals) - - # Check for default value. - default = self.get_default(name, allow_none=True) - if default is not None: - have_value = True - default_literals = self._to_namelist_literals(name, default) - current_literals = merge_literal_lists(default_literals, current_literals) - expect(have_value, "No default value found for {}.".format(name)) - - # Go through file names and prepend input data root directory for - # absolute pathnames. - var_type, _, var_size = self._definition.split_type_string(name) - if var_type == "character" and ignore_abs_path is None: - var_input_pathname = self._definition.get_input_pathname(name) - if var_input_pathname == 'abs': - current_literals = expand_literal_list(current_literals) - for i, literal in enumerate(current_literals): - if literal == '': - continue - file_path = character_literal_to_string(literal) - # NOTE - these are hard-coded here and a better way is to make these extensible - if file_path == 'UNSET' or file_path == 'idmap' or file_path == 'idmap_ignore' or file_path == 'unset': - continue - if file_path == 'null': - continue - file_path = self.set_abs_file_path(file_path) - if not os.path.exists(file_path): - logger.warning("File not found: {} = {}, will attempt to download in check_input_data phase".format(name, literal)) - current_literals[i] = string_to_character_literal(file_path) - current_literals = compress_literal_list(current_literals) - - # Set the new value. - self._namelist.set_variable_value(group, name, current_literals, var_size) - - def create_shr_strdata_nml(self): - """Set defaults for `shr_strdata_nml` variables other than the variable domainfile """ - self.add_default("datamode") - if self.get_value("datamode") != 'NULL': - self.add_default("streams", - value=self._streams_namelists["streams"]) - for variable in self._streams_variables: - self.add_default(variable, - value=self._streams_namelists[variable]) - - def get_group_variables(self, group_name): - return self._namelist.get_group_variables(group_name) - - def _get_input_file_hash(self, data_list_path): - lines_hash = set() - if os.path.isfile(data_list_path): - with open(data_list_path, "r") as input_data_list: - for line in input_data_list: - hashValue = hashlib.md5(line.rstrip().encode('utf-8')).hexdigest() - logger.debug( "Found line {} with hash {}".format(line,hashValue)) - lines_hash.add(hashValue) - return lines_hash - - def _write_input_files(self, data_list_path): - """Write input data files to list.""" - # append to input_data_list file - lines_hash = self._get_input_file_hash(data_list_path) - with open(data_list_path, "a") as input_data_list: - for group_name in self._namelist.get_group_names(): - for variable_name in self._namelist.get_variable_names(group_name): - input_pathname = self._definition.get_node_element_info(variable_name, "input_pathname") - if input_pathname is not None: - # This is where we end up for all variables that are paths - # to input data files. - literals = self._namelist.get_variable_value(group_name, variable_name) - for literal in literals: - file_path = character_literal_to_string(literal) - # NOTE - these are hard-coded here and a better way is to make these extensible - if file_path == 'UNSET' or file_path == 'idmap' or file_path == 'idmap_ignore': - continue - if input_pathname == 'abs': - # No further mangling needed for absolute paths. - # At this point, there are overwrites that should be ignored - if not os.path.isabs(file_path): - continue - else: - pass - elif input_pathname.startswith('rel:'): - # The part past "rel" is the name of a variable that - # this variable specifies its path relative to. - root_var = input_pathname[4:] - root_dir = self.get_value(root_var) - file_path = os.path.join(root_dir, file_path) - else: - expect(False, - "Bad input_pathname value: {}.".format(input_pathname)) - # Write to the input data list. - string = "{} = {}".format(variable_name, file_path) - hashValue = hashlib.md5(string.rstrip().encode('utf-8')).hexdigest() - if hashValue not in lines_hash: - logger.debug("Adding line {} with hash {}".format(string,hashValue)) - input_data_list.write(string+"\n") - else: - logger.debug("Line already in file {}".format(string)) - - def write_output_file(self, namelist_file, data_list_path=None, groups=None, sorted_groups=True): - """Write out the namelists and input data files. - - The `namelist_file` and `modelio_file` are the locations to which the - component and modelio namelists will be written, respectively. The - `data_list_path` argument is the location of the `*.input_data_list` - file, which will have the input data files added to it. - """ - self._definition.validate(self._namelist) - if groups is None: - groups = self._namelist.get_group_names() - - # remove groups that are never in namelist file - if "modelio" in groups: - groups.remove("modelio") - if "seq_maps" in groups: - groups.remove("seq_maps") - - # write namelist file - self._namelist.write(namelist_file, groups=groups, sorted_groups=sorted_groups) - - if data_list_path is not None: - self._write_input_files(data_list_path) - - - # For MCT - def add_nmlcontents(self, filename, group, append=True, format_="nmlcontents", sorted_groups=True): - """ Write only contents of nml group """ - self._namelist.write(filename, groups=[group], append=append, format_=format_, sorted_groups=sorted_groups) - - def write_seq_maps(self, filename): - """ Write mct out seq_maps.rc""" - self._namelist.write(filename, groups=["seq_maps"], format_="rc") - - def write_modelio_file(self, filename): - """ Write mct component modelio files""" - self._namelist.write(filename, groups=["modelio", "pio_inparm"], format_="nml") - - # For NUOPC - def write_nuopc_modelio_file(self, filename): - """ Write nuopc component modelio files""" - self._namelist.write(filename, groups=["pio_inparm"], format_="nml") - - - # For NUOPC - def write_nuopc_config_file(self, filename, data_list_path=None, skip_comps=None): - """ Write the nuopc config file""" - self._definition.validate(self._namelist) - groups = self._namelist.get_group_names() - self._namelist.write(filename, skip_comps=skip_comps, groups=groups, format_='nuopc', sorted_groups=False) - if data_list_path is not None: - # append to input_data_list file - self._write_input_files(data_list_path) diff --git a/scripts/lib/CIME/provenance.py b/scripts/lib/CIME/provenance.py deleted file mode 100644 index 53ffb300850..00000000000 --- a/scripts/lib/CIME/provenance.py +++ /dev/null @@ -1,458 +0,0 @@ -#!/usr/bin/env python - -""" -Library for saving build/run provenance. -""" - -from CIME.XML.standard_module_setup import * -from CIME.utils import touch, gzip_existing_file, SharedArea, convert_to_babylonian_time, get_current_commit, indent_string, run_cmd, run_cmd_no_fail, safe_copy - -import tarfile, getpass, signal, glob, shutil, sys - -logger = logging.getLogger(__name__) - -def _get_batch_job_id_for_syslog(case): - """ - mach_syslog only works on certain machines - """ - mach = case.get_value("MACH") - try: - if mach in ['titan']: - return os.environ["PBS_JOBID"] - elif mach in ['anvil', 'compy', 'cori-haswell', 'cori-knl']: - return os.environ["SLURM_JOB_ID"] - elif mach in ['mira', 'theta']: - return os.environ["COBALT_JOBID"] - elif mach in ['summit']: - return os.environ["LSB_JOBID"] - except KeyError: - pass - - return None - -def _save_build_provenance_e3sm(case, lid): - cimeroot = case.get_value("CIMEROOT") - exeroot = case.get_value("EXEROOT") - caseroot = case.get_value("CASEROOT") - - # Save git describe - describe_prov = os.path.join(exeroot, "GIT_DESCRIBE.{}".format(lid)) - desc = get_current_commit(tag=True, repo=cimeroot) - with open(describe_prov, "w") as fd: - fd.write(desc) - - # Save HEAD - headfile = os.path.join(cimeroot, ".git", "logs", "HEAD") - headfile_prov = os.path.join(exeroot, "GIT_LOGS_HEAD.{}".format(lid)) - if os.path.exists(headfile_prov): - os.remove(headfile_prov) - if os.path.exists(headfile): - safe_copy(headfile, headfile_prov, preserve_meta=False) - - # Save SourceMods - sourcemods = os.path.join(caseroot, "SourceMods") - sourcemods_prov = os.path.join(exeroot, "SourceMods.{}.tar.gz".format(lid)) - if os.path.exists(sourcemods_prov): - os.remove(sourcemods_prov) - if os.path.isdir(sourcemods): - with tarfile.open(sourcemods_prov, "w:gz") as tfd: - tfd.add(sourcemods, arcname="SourceMods") - - # Save build env - env_prov = os.path.join(exeroot, "build_environment.{}.txt".format(lid)) - if os.path.exists(env_prov): - os.remove(env_prov) - env_module = case.get_env("mach_specific") - env_module.save_all_env_info(env_prov) - - # For all the just-created post-build provenance files, symlink a generic name - # to them to indicate that these are the most recent or active. - for item in ["GIT_DESCRIBE", "GIT_LOGS_HEAD", "SourceMods", "build_environment"]: - globstr = "{}/{}.{}*".format(exeroot, item, lid) - matches = glob.glob(globstr) - expect(len(matches) < 2, "Multiple matches for glob {} should not have happened".format(globstr)) - if matches: - the_match = matches[0] - generic_name = the_match.replace(".{}".format(lid), "") - if os.path.exists(generic_name): - os.remove(generic_name) - os.symlink(the_match, generic_name) - -def _save_build_provenance_cesm(case, lid): # pylint: disable=unused-argument - version = case.get_value("MODEL_VERSION") - # version has already been recorded - srcroot = case.get_value("SRCROOT") - manic = os.path.join("manage_externals","checkout_externals") - manic_full_path = os.path.join(srcroot, manic) - out = None - if os.path.exists(manic_full_path): - args = " --status --verbose --no-logging" - stat, out, err = run_cmd(manic_full_path + args, from_dir=srcroot) - errmsg = """Error gathering provenance information from manage_externals. - -manage_externals error message: -{err} - -manage_externals output: -{out} - -To solve this, either: - -(1) Find and fix the problem: From {srcroot}, try to get this command to work: - {manic}{args} - -(2) If you don't need provenance information, rebuild with --skip-provenance-check -""".format(out=indent_string(out, 4), err=indent_string(err, 4), - srcroot=srcroot, manic=manic, args=args) - expect(stat==0,errmsg) - - caseroot = case.get_value("CASEROOT") - with open(os.path.join(caseroot, "CaseStatus"), "a") as fd: - if version is not None and version != "unknown": - fd.write("CESM version is {}\n".format(version)) - if out is not None: - fd.write("{}\n".format(out)) - -def save_build_provenance(case, lid=None): - with SharedArea(): - model = case.get_value("MODEL") - lid = os.environ["LID"] if lid is None else lid - - if model == "e3sm": - _save_build_provenance_e3sm(case, lid) - elif model == "cesm": - _save_build_provenance_cesm(case, lid) - -def _save_prerun_timing_e3sm(case, lid): - project = case.get_value("PROJECT", subgroup=case.get_primary_job()) - if not case.is_save_timing_dir_project(project): - return - - timing_dir = case.get_value("SAVE_TIMING_DIR") - if timing_dir is None or not os.path.isdir(timing_dir): - logger.warning("SAVE_TIMING_DIR {} is not valid. E3SM requires a valid SAVE_TIMING_DIR to archive timing data.".format(timing_dir)) - return - - logger.info("Archiving timing data and associated provenance in {}.".format(timing_dir)) - rundir = case.get_value("RUNDIR") - blddir = case.get_value("EXEROOT") - caseroot = case.get_value("CASEROOT") - cimeroot = case.get_value("CIMEROOT") - base_case = case.get_value("CASE") - full_timing_dir = os.path.join(timing_dir, "performance_archive", getpass.getuser(), base_case, lid) - if os.path.exists(full_timing_dir): - logger.warning("{} already exists. Skipping archive of timing data and associated provenance.".format(full_timing_dir)) - return - - try: - os.makedirs(full_timing_dir) - except OSError: - logger.warning("{} cannot be created. Skipping archive of timing data and associated provenance.".format(full_timing_dir)) - return - - mach = case.get_value("MACH") - compiler = case.get_value("COMPILER") - - # For some batch machines save queue info - job_id = _get_batch_job_id_for_syslog(case) - if job_id is not None: - if mach == "mira": - for cmd, filename in [("qstat -f", "qstatf"), ("qstat -lf %s" % job_id, "qstatf_jobid")]: - filename = "%s.%s" % (filename, lid) - run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir) - gzip_existing_file(os.path.join(full_timing_dir, filename)) - elif mach == "theta": - for cmd, filename in [("qstat -l --header JobID:JobName:User:Project:WallTime:QueuedTime:Score:RunTime:TimeRemaining:Nodes:State:Location:Mode:Command:Args:Procs:Queue:StartTime:attrs:Geometry", "qstatf"), - ("qstat -lf %s" % job_id, "qstatf_jobid"), - ("xtnodestat", "xtnodestat"), - ("xtprocadmin", "xtprocadmin")]: - filename = "%s.%s" % (filename, lid) - run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir) - gzip_existing_file(os.path.join(full_timing_dir, filename)) - elif mach in ["cori-haswell", "cori-knl"]: - for cmd, filename in [("sinfo -a -l", "sinfol"), ("sqs -f %s" % job_id, "sqsf_jobid"), - # ("sqs -f", "sqsf"), - ("squeue -o '%.10i %.15P %.20j %.10u %.7a %.2t %.6D %.8C %.10M %.10l %.20S %.20V'", "squeuef"), - ("squeue -t R -o '%.10i %R'", "squeues")]: - filename = "%s.%s" % (filename, lid) - run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir) - gzip_existing_file(os.path.join(full_timing_dir, filename)) - elif mach == "titan": - for cmd, filename in [("qstat -f %s >" % job_id, "qstatf_jobid"), - ("xtnodestat >", "xtnodestat"), - # ("qstat -f >", "qstatf"), - # ("xtdb2proc -f", "xtdb2proc"), - ("showq >", "showq")]: - full_cmd = cmd + " " + filename - run_cmd_no_fail(full_cmd + "." + lid, from_dir=full_timing_dir) - gzip_existing_file(os.path.join(full_timing_dir, filename + "." + lid)) - elif mach in ["anvil", "compy"]: - for cmd, filename in [("sinfo -l", "sinfol"), - ("squeue -o '%all' --job {}".format(job_id), "squeueall_jobid"), - ("squeue -o '%.10i %.10P %.15u %.20a %.2t %.6D %.8C %.12M %.12l %.20S %.20V %j'", "squeuef"), - ("squeue -t R -o '%.10i %R'", "squeues")]: - filename = "%s.%s" % (filename, lid) - run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir) - gzip_existing_file(os.path.join(full_timing_dir, filename)) - elif mach == "summit": - for cmd, filename in [("bjobs -u all >", "bjobsu_all"), - ("bjobs -r -u all -o 'jobid slots exec_host' >", "bjobsru_allo"), - ("bjobs -l -UF %s >" % job_id, "bjobslUF_jobid")]: - full_cmd = cmd + " " + filename - run_cmd_no_fail(full_cmd + "." + lid, from_dir=full_timing_dir) - gzip_existing_file(os.path.join(full_timing_dir, filename + "." + lid)) - - # copy/tar SourceModes - source_mods_dir = os.path.join(caseroot, "SourceMods") - if os.path.isdir(source_mods_dir): - with tarfile.open(os.path.join(full_timing_dir, "SourceMods.{}.tar.gz".format(lid)), "w:gz") as tfd: - tfd.add(source_mods_dir, arcname="SourceMods") - - # Save various case configuration items - case_docs = os.path.join(full_timing_dir, "CaseDocs.{}".format(lid)) - os.mkdir(case_docs) - globs_to_copy = [ - "CaseDocs/*", - "*.run", - ".*.run", - "*.xml", - "user_nl_*", - "*env_mach_specific*", - "Macros*", - "README.case", - "Depends.{}".format(mach), - "Depends.{}".format(compiler), - "Depends.{}.{}".format(mach, compiler), - "software_environment.txt" - ] - for glob_to_copy in globs_to_copy: - for item in glob.glob(os.path.join(caseroot, glob_to_copy)): - safe_copy(item, os.path.join(case_docs, "{}.{}".format(os.path.basename(item).lstrip("."), lid)), preserve_meta=False) - - # Copy some items from build provenance - blddir_globs_to_copy = [ - "GIT_LOGS_HEAD", - "build_environment.txt" - ] - for blddir_glob_to_copy in blddir_globs_to_copy: - for item in glob.glob(os.path.join(blddir, blddir_glob_to_copy)): - safe_copy(item, os.path.join(full_timing_dir, os.path.basename(item) + "." + lid), preserve_meta=False) - - # Save state of repo - from_repo = cimeroot if os.path.exists(os.path.join(cimeroot, ".git")) else os.path.dirname(cimeroot) - desc = get_current_commit(tag=True, repo=from_repo) - with open(os.path.join(full_timing_dir, "GIT_DESCRIBE.{}".format(lid)), "w") as fd: - fd.write(desc) - - # What this block does is mysterious to me (JGF) - if job_id is not None: - - # Kill mach_syslog from previous run if one exists - syslog_jobid_path = os.path.join(rundir, "syslog_jobid.{}".format(job_id)) - if os.path.exists(syslog_jobid_path): - try: - with open(syslog_jobid_path, "r") as fd: - syslog_jobid = int(fd.read().strip()) - os.kill(syslog_jobid, signal.SIGTERM) - except (ValueError, OSError) as e: - logger.warning("Failed to kill syslog: {}".format(e)) - finally: - os.remove(syslog_jobid_path) - - # If requested, spawn a mach_syslog process to monitor job progress - sample_interval = case.get_value("SYSLOG_N") - if sample_interval > 0: - archive_checkpoints = os.path.join(full_timing_dir, "checkpoints.{}".format(lid)) - os.mkdir(archive_checkpoints) - touch("{}/e3sm.log.{}".format(rundir, lid)) - syslog_jobid = run_cmd_no_fail("./mach_syslog {si} {jobid} {lid} {rundir} {rundir}/timing/checkpoints {ac} >& /dev/null & echo $!".format(si=sample_interval, jobid=job_id, lid=lid, rundir=rundir, ac=archive_checkpoints), - from_dir=os.path.join(caseroot, "Tools")) - with open(os.path.join(rundir, "syslog_jobid.{}".format(job_id)), "w") as fd: - fd.write("{}\n".format(syslog_jobid)) - -def _save_prerun_provenance_e3sm(case, lid): - if case.get_value("SAVE_TIMING"): - _save_prerun_timing_e3sm(case, lid) - -def _save_prerun_provenance_cesm(case, lid): # pylint: disable=unused-argument - pass - -def save_prerun_provenance(case, lid=None): - with SharedArea(): - # Always save env - lid = os.environ["LID"] if lid is None else lid - env_module = case.get_env("mach_specific") - logdir = os.path.join(case.get_value("CASEROOT"), "logs") - if not os.path.isdir(logdir): - os.makedirs(logdir) - env_module.save_all_env_info(os.path.join(logdir, "run_environment.txt.{}".format(lid))) - - model = case.get_value("MODEL") - if model == "e3sm": - _save_prerun_provenance_e3sm(case, lid) - elif model == "cesm": - _save_prerun_provenance_cesm(case, lid) - -def _save_postrun_provenance_cesm(case, lid): - save_timing = case.get_value("SAVE_TIMING") - if save_timing: - rundir = case.get_value("RUNDIR") - timing_dir = os.path.join("timing", case.get_value("CASE")) - shutil.move(os.path.join(rundir,"timing"), - os.path.join(timing_dir,"timing."+lid)) - -def _save_postrun_timing_e3sm(case, lid): - caseroot = case.get_value("CASEROOT") - rundir = case.get_value("RUNDIR") - - # tar timings - rundir_timing_dir = os.path.join(rundir, "timing." + lid) - shutil.move(os.path.join(rundir, "timing"), rundir_timing_dir) - with tarfile.open("%s.tar.gz" % rundir_timing_dir, "w:gz") as tfd: - tfd.add(rundir_timing_dir, arcname=os.path.basename(rundir_timing_dir)) - - shutil.rmtree(rundir_timing_dir) - - gzip_existing_file(os.path.join(caseroot, "timing", "e3sm_timing_stats.%s" % lid)) - - # JGF: not sure why we do this - timing_saved_file = "timing.%s.saved" % lid - touch(os.path.join(caseroot, "timing", timing_saved_file)) - - project = case.get_value("PROJECT", subgroup=case.get_primary_job()) - if not case.is_save_timing_dir_project(project): - return - - timing_dir = case.get_value("SAVE_TIMING_DIR") - if timing_dir is None or not os.path.isdir(timing_dir): - return - - mach = case.get_value("MACH") - base_case = case.get_value("CASE") - full_timing_dir = os.path.join(timing_dir, "performance_archive", getpass.getuser(), base_case, lid) - - if not os.path.isdir(full_timing_dir): - return - - # Kill mach_syslog - job_id = _get_batch_job_id_for_syslog(case) - if job_id is not None: - syslog_jobid_path = os.path.join(rundir, "syslog_jobid.{}".format(job_id)) - if os.path.exists(syslog_jobid_path): - try: - with open(syslog_jobid_path, "r") as fd: - syslog_jobid = int(fd.read().strip()) - os.kill(syslog_jobid, signal.SIGTERM) - except (ValueError, OSError) as e: - logger.warning("Failed to kill syslog: {}".format(e)) - finally: - os.remove(syslog_jobid_path) - - # copy timings - safe_copy("%s.tar.gz" % rundir_timing_dir, full_timing_dir, preserve_meta=False) - - # - # save output files and logs - # - globs_to_copy = [] - if job_id is not None: - if mach == "titan": - globs_to_copy.append("%s*OU" % job_id) - elif mach == "anvil": - globs_to_copy.append("%s*run*%s" % (case.get_value("CASE"), job_id)) - elif mach == "compy": - globs_to_copy.append("slurm.err") - globs_to_copy.append("slurm.out") - elif mach in ["mira", "theta"]: - globs_to_copy.append("%s*error" % job_id) - globs_to_copy.append("%s*output" % job_id) - globs_to_copy.append("%s*cobaltlog" % job_id) - elif mach in ["cori-haswell", "cori-knl"]: - globs_to_copy.append("%s*run*%s" % (case.get_value("CASE"), job_id)) - elif mach == "summit": - globs_to_copy.append("e3sm.stderr.%s" % job_id) - globs_to_copy.append("e3sm.stdout.%s" % job_id) - - globs_to_copy.append("logs/run_environment.txt.{}".format(lid)) - globs_to_copy.append(os.path.join(rundir, "e3sm.log.{}.gz".format(lid))) - globs_to_copy.append(os.path.join(rundir, "cpl.log.{}.gz".format(lid))) - globs_to_copy.append("timing/*.{}*".format(lid)) - globs_to_copy.append("CaseStatus") - - for glob_to_copy in globs_to_copy: - for item in glob.glob(os.path.join(caseroot, glob_to_copy)): - basename = os.path.basename(item) - if basename != timing_saved_file: - if lid not in basename and not basename.endswith(".gz"): - safe_copy(item, os.path.join(full_timing_dir, "{}.{}".format(basename, lid)), preserve_meta=False) - else: - safe_copy(item, full_timing_dir, preserve_meta=False) - - # zip everything - for root, _, files in os.walk(full_timing_dir): - for filename in files: - if not filename.endswith(".gz"): - gzip_existing_file(os.path.join(root, filename)) - -def _save_postrun_provenance_e3sm(case, lid): - if case.get_value("SAVE_TIMING"): - _save_postrun_timing_e3sm(case, lid) - -def save_postrun_provenance(case, lid=None): - with SharedArea(): - model = case.get_value("MODEL") - lid = os.environ["LID"] if lid is None else lid - - if model == "e3sm": - _save_postrun_provenance_e3sm(case, lid) - elif model == "cesm": - _save_postrun_provenance_cesm(case, lid) - -_WALLTIME_BASELINE_NAME = "walltimes" -_WALLTIME_FILE_NAME = "walltimes" -_GLOBAL_MINUMUM_TIME = 900 -_GLOBAL_WIGGLE = 1000 -_WALLTIME_TOLERANCE = ( (600, 2.0), (1800, 1.5), (9999999999, 1.25) ) - -def get_recommended_test_time_based_on_past(baseline_root, test, raw=False): - if baseline_root is not None: - try: - the_path = os.path.join(baseline_root, _WALLTIME_BASELINE_NAME, test, _WALLTIME_FILE_NAME) - if os.path.exists(the_path): - last_line = int(open(the_path, "r").readlines()[-1]) - if raw: - best_walltime = last_line - else: - best_walltime = None - for cutoff, tolerance in _WALLTIME_TOLERANCE: - if last_line <= cutoff: - best_walltime = int(float(last_line) * tolerance) - break - - if best_walltime < _GLOBAL_MINUMUM_TIME: - best_walltime = _GLOBAL_MINUMUM_TIME - - best_walltime += _GLOBAL_WIGGLE - - return convert_to_babylonian_time(best_walltime) - except Exception: - # We NEVER want a failure here to kill the run - logger.warning("Failed to read test time: {}".format(sys.exc_info()[1])) - - return None - -def save_test_time(baseline_root, test, time_seconds): - if baseline_root is not None: - try: - with SharedArea(): - the_dir = os.path.join(baseline_root, _WALLTIME_BASELINE_NAME, test) - if not os.path.exists(the_dir): - os.makedirs(the_dir) - - the_path = os.path.join(the_dir, _WALLTIME_FILE_NAME) - with open(the_path, "a") as fd: - fd.write("{}\n".format(int(time_seconds))) - - except Exception: - # We NEVER want a failure here to kill the run - logger.warning("Failed to store test time: {}".format(sys.exc_info()[1])) diff --git a/scripts/lib/CIME/simple_compare.py b/scripts/lib/CIME/simple_compare.py deleted file mode 100644 index 69ef73f04b2..00000000000 --- a/scripts/lib/CIME/simple_compare.py +++ /dev/null @@ -1,236 +0,0 @@ -import os, re - -from CIME.utils import expect - -############################################################################### -def _normalize_string_value(value, case): -############################################################################### - """ - Some of the strings are inherently prone to diffs, like file - paths, etc. This function attempts to normalize that data so that - it will not cause diffs. - """ - # Any occurance of case must be normalized because test-ids might not match - if (case is not None): - case_re = re.compile(r'{}[.]([GC])[.]([^./\s]+)'.format(case)) - value = case_re.sub("{}.ACTION.TESTID".format(case), value) - - if ("/" in value): - # File path, just return the basename - return os.path.basename(value) - elif ("username" in value): - return '' - elif (".log." in value): - # Remove the part that's prone to diff - components = value.split(".") - return os.path.basename(".".join(components[0:-1])) - else: - return value - -############################################################################### -def _skip_comments_and_whitespace(lines, idx): -############################################################################### - """ - Starting at idx, return next valid idx of lines that contains real data - """ - if (idx == len(lines)): - return idx - - comment_re = re.compile(r'^[#!]') - - lines_slice = lines[idx:] - for line in lines_slice: - line = line.strip() - if (comment_re.match(line) is not None or line == ""): - idx += 1 - else: - return idx - - return idx - -############################################################################### -def _compare_data(gold_lines, comp_lines, case, offset_method=False): -############################################################################### - """ - >>> teststr = ''' - ... data1 - ... data2 data3 - ... data4 data5 data6 - ... - ... # Comment - ... data7 data8 data9 data10 - ... ''' - >>> _compare_data(teststr.splitlines(), teststr.splitlines(), None) - ('', 0) - - >>> teststr2 = ''' - ... data1 - ... data2 data30 - ... data4 data5 data6 - ... data7 data8 data9 data10 - ... data00 - ... ''' - >>> results,_ = _compare_data(teststr.splitlines(), teststr2.splitlines(), None) - >>> print(results) - Inequivalent lines data2 data3 != data2 data30 - NORMALIZED: data2 data3 != data2 data30 - Found extra lines - data00 - - >>> teststr3 = ''' - ... data1 - ... data4 data5 data6 - ... data7 data8 data9 data10 - ... data00 - ... ''' - >>> results,_ = _compare_data(teststr3.splitlines(), teststr2.splitlines(), None, offset_method=True) - >>> print(results) - Inequivalent lines data4 data5 data6 != data2 data30 - NORMALIZED: data4 data5 data6 != data2 data30 - - """ - comments = "" - cnt = 0 - gidx, cidx = 0, 0 - gnum, cnum = len(gold_lines), len(comp_lines) - while (gidx < gnum or cidx < cnum): - gidx = _skip_comments_and_whitespace(gold_lines, gidx) - cidx = _skip_comments_and_whitespace(comp_lines, cidx) - - if (gidx == gnum): - if (cidx == cnum): - return comments, cnt - else: - comments += "Found extra lines\n" - comments += "\n".join(comp_lines[cidx:]) + "\n" - return comments, cnt - elif (cidx == cnum): - comments += "Missing lines\n" - comments += "\n".join(gold_lines[gidx:1]) + "\n" - return comments, cnt - - gold_value = gold_lines[gidx].strip() - gold_value = gold_value.replace('"',"'") - comp_value = comp_lines[cidx].strip() - comp_value = comp_value.replace('"',"'") - - norm_gold_value = _normalize_string_value(gold_value, case) - norm_comp_value = _normalize_string_value(comp_value, case) - - if (norm_gold_value != norm_comp_value): - comments += "Inequivalent lines {} != {}\n".format(gold_value, comp_value) - comments += " NORMALIZED: {} != {}\n".format(norm_gold_value, norm_comp_value) - cnt += 1 - if offset_method and (norm_gold_value != norm_comp_value): - if gnum > cnum: - gidx += 1 - else: - cidx += 1 - else: - gidx += 1 - cidx += 1 - - return comments, cnt - -############################################################################### -def compare_files(gold_file, compare_file, case=None): -############################################################################### - """ - Returns true if files are the same, comments are returned too: - (success, comments) - """ - expect(os.path.exists(gold_file), "File not found: {}".format(gold_file)) - expect(os.path.exists(compare_file), "File not found: {}".format(compare_file)) - - comments, cnt = _compare_data(open(gold_file, "r").readlines(), - open(compare_file, "r").readlines(), case) - - if cnt > 0: - comments2, cnt2 = _compare_data(open(gold_file, "r").readlines(), - open(compare_file, "r").readlines(), - case, offset_method=True) - if cnt2 < cnt: - comments = comments2 - - return comments == "", comments - -############################################################################### -def compare_runconfigfiles(gold_file, compare_file, case=None): -############################################################################### - """ - Returns true if files are the same, comments are returned too: - (success, comments) - """ - expect(os.path.exists(gold_file), "File not found: {}".format(gold_file)) - expect(os.path.exists(compare_file), "File not found: {}".format(compare_file)) - - #create dictionary's of the runconfig files and compare them - gold_dict = _parse_runconfig(gold_file) - compare_dict = _parse_runconfig(compare_file) - - comments = findDiff(gold_dict, compare_dict, case=case) - comments = comments.replace(" d1", " " + gold_file) - comments = comments.replace(" d2", " " + compare_file) - # this picks up the case that an entry in compare is not in gold - if comments == "": - comments = findDiff(compare_dict, gold_dict, case=case) - comments = comments.replace(" d2", " " + gold_file) - comments = comments.replace(" d1", " " + compare_file) - - return comments == "", comments - -def _parse_runconfig(filename): - runconfig = {} - inrunseq = False - insubsection = None - subsection_re = re.compile(r'\s*(\S+)::') - group_re = re.compile(r'\s*(\S+)\s*:\s*(\S+)') - var_re = re.compile(r'\s*(\S+)\s*=\s*(\S+)') - with open(filename, "r") as fd: - for line in fd: - # remove comments - line = line.split('#')[0] - subsection_match = subsection_re.match(line) - group_match = group_re.match(line) - var_match = var_re.match(line) - if re.match(r'\s*runSeq\s*::', line): - runconfig['runSeq'] = [] - inrunseq = True - elif re.match(r'\s*::\s*', line): - inrunseq = False - elif inrunseq: - runconfig['runSeq'].append(line) - elif subsection_match: - insubsection = subsection_match.group(1) - runconfig[insubsection] = {} - elif group_match: - runconfig[group_match.group(1)] = group_match.group(2) - elif insubsection and var_match: - runconfig[insubsection][var_match.group(1)] = var_match.group(2) - return runconfig - -def findDiff(d1, d2, path="", case=None): - comment = "" - for k in d1.keys(): - if not d2.has_key(k): - comment += path + ":\n" - comment += k + " as key not in d2\n" - else: - if type(d1[k]) is dict: - if path == "": - path = k - else: - path = path + "->" + k - comment += findDiff(d1[k],d2[k], path=path, case=case) - else: - if case in d1[k]: - pass - elif "username" in k: - pass - elif "logfile" in k: - pass - elif d1[k] != d2[k]: - comment += path+":\n" - comment += " - {} : {}\n".format(k,d1[k]) - comment += " + {} : {}\n".format(k,d2[k]) - return comment diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py deleted file mode 100644 index 5d9a66c950a..00000000000 --- a/scripts/lib/CIME/test_scheduler.py +++ /dev/null @@ -1,1082 +0,0 @@ -""" -A library for scheduling/running through the phases of a set -of system tests. Supports phase-level parallelism (can make progres -on multiple system tests at once). - -TestScheduler will handle the TestStatus for the 1-time setup -phases. All other phases need to handle their own status because -they can be run outside the context of TestScheduler. -""" - -import traceback, stat, threading, time, glob -from collections import OrderedDict - -from CIME.XML.standard_module_setup import * -import six -from get_tests import get_recommended_test_time, get_build_groups -from CIME.utils import append_status, append_testlog, TESTS_FAILED_ERR_CODE, parse_test_name, get_full_test_name, get_model, \ - convert_to_seconds, get_cime_root, get_project, get_timestamp, get_python_libs_root -from CIME.test_status import * -from CIME.XML.machines import Machines -from CIME.XML.generic_xml import GenericXML -from CIME.XML.env_test import EnvTest -from CIME.XML.env_mach_pes import EnvMachPes -from CIME.XML.files import Files -from CIME.XML.component import Component -from CIME.XML.tests import Tests -from CIME.case import Case -from CIME.wait_for_tests import wait_for_tests -from CIME.provenance import get_recommended_test_time_based_on_past -from CIME.locked_files import lock_file -from CIME.cs_status_creator import create_cs_status -from CIME.hist_utils import generate_teststatus -from CIME.build import post_build - -logger = logging.getLogger(__name__) - -# Phases managed by TestScheduler -TEST_START = "INIT" # Special pseudo-phase just for test_scheduler bookkeeping -PHASES = [TEST_START, CREATE_NEWCASE_PHASE, XML_PHASE, SETUP_PHASE, - SHAREDLIB_BUILD_PHASE, MODEL_BUILD_PHASE, RUN_PHASE] # Order matters - -############################################################################### -def _translate_test_names_for_new_pecount(test_names, force_procs, force_threads): -############################################################################### - new_test_names = [] - caseopts = [] - for test_name in test_names: - testcase, caseopts, grid, compset, machine, compiler, testmod = parse_test_name(test_name) - rewrote_caseopt = False - if caseopts is not None: - for idx, caseopt in enumerate(caseopts): - if caseopt.startswith("P"): - caseopt = caseopt[1:] - if "x" in caseopt: - old_procs, old_thrds = caseopt.split("x") - else: - old_procs, old_thrds = caseopt, None - - new_procs = force_procs if force_procs is not None else old_procs - new_thrds = force_threads if force_threads is not None else old_thrds - - newcaseopt = ("P{}".format(new_procs)) if new_thrds is None else ("P{}x{}".format(new_procs, new_thrds)) - caseopts[idx] = newcaseopt - - rewrote_caseopt = True - break - - if not rewrote_caseopt: - force_procs = "M" if force_procs is None else force_procs - newcaseopt = ("P{}".format(force_procs)) if force_threads is None else ("P{}x{}".format(force_procs, force_threads)) - if caseopts is None: - caseopts = [newcaseopt] - else: - caseopts.append(newcaseopt) - - new_test_name = get_full_test_name(testcase, caseopts=caseopts, grid=grid, compset=compset, machine=machine, compiler=compiler, testmod=testmod) - new_test_names.append(new_test_name) - - return new_test_names - -_TIME_CACHE = {} -############################################################################### -def _get_time_est(test, baseline_root, as_int=False, use_cache=False, raw=False): -############################################################################### - if test in _TIME_CACHE and use_cache: - return _TIME_CACHE[test] - - recommended_time = get_recommended_test_time_based_on_past(baseline_root, test, raw=raw) - - if recommended_time is None: - recommended_time = get_recommended_test_time(test) - - if as_int: - if recommended_time is None: - recommended_time = 9999999999 - else: - recommended_time = convert_to_seconds(recommended_time) - - if use_cache: - _TIME_CACHE[test] = recommended_time - - return recommended_time - -############################################################################### -def _order_tests_by_runtime(tests, baseline_root): -############################################################################### - tests.sort(key=lambda x: _get_time_est(x, baseline_root, as_int=True, use_cache=True, raw=True), reverse=True) - -############################################################################### -class TestScheduler(object): -############################################################################### - - ########################################################################### - def __init__(self, test_names, test_data=None, - no_run=False, no_build=False, no_setup=False, no_batch=None, - test_root=None, test_id=None, - machine_name=None, compiler=None, - baseline_root=None, baseline_cmp_name=None, baseline_gen_name=None, - clean=False, namelists_only=False, - project=None, parallel_jobs=None, - walltime=None, proc_pool=None, - use_existing=False, save_timing=False, queue=None, - allow_baseline_overwrite=False, output_root=None, - force_procs=None, force_threads=None, mpilib=None, - input_dir=None, pesfile=None, mail_user=None, mail_type=None, allow_pnl=False, - non_local=False, single_exe=False): - ########################################################################### - self._cime_root = get_cime_root() - self._cime_model = get_model() - self._cime_driver = "mct" - self._save_timing = save_timing - self._queue = queue - self._test_data = {} if test_data is None else test_data # Format: {test_name -> {data_name -> data}} - self._mpilib = mpilib # allow override of default mpilib - self._completed_tests = 0 - self._input_dir = input_dir - self._pesfile = pesfile - self._allow_baseline_overwrite = allow_baseline_overwrite - self._allow_pnl = allow_pnl - self._non_local = non_local - self._build_groups = [] - - self._mail_user = mail_user - self._mail_type = mail_type - - self._machobj = Machines(machine=machine_name) - - self._model_build_cost = 4 - - # If user is forcing procs or threads, re-write test names to reflect this. - if force_procs or force_threads: - test_names = _translate_test_names_for_new_pecount(test_names, force_procs, force_threads) - - self._no_setup = no_setup - self._no_build = no_build or no_setup or namelists_only - self._no_run = no_run or self._no_build - self._output_root = output_root - # Figure out what project to use - if project is None: - self._project = get_project() - if self._project is None: - self._project = self._machobj.get_value("PROJECT") - else: - self._project = project - - # We will not use batch system if user asked for no_batch or if current - # machine is not a batch machine - self._no_batch = no_batch or not self._machobj.has_batch_system() - expect(not (self._no_batch and self._queue is not None), - "Does not make sense to request a queue without batch system") - - # Determine and resolve test_root - if test_root is not None: - self._test_root = test_root - elif self._output_root is not None: - self._test_root = self._output_root - else: - self._test_root = self._machobj.get_value("CIME_OUTPUT_ROOT") - - if self._project is not None: - self._test_root = self._test_root.replace("$PROJECT", self._project) - - self._test_root = os.path.abspath(self._test_root) - self._test_id = test_id if test_id is not None else get_timestamp() - - self._compiler = self._machobj.get_default_compiler() if compiler is None else compiler - - self._clean = clean - self._namelists_only = namelists_only - - self._walltime = walltime - - if parallel_jobs is None: - mach_parallel_jobs = self._machobj.get_value("NTEST_PARALLEL_JOBS") - if mach_parallel_jobs is None: - mach_parallel_jobs = self._machobj.get_value("MAX_MPITASKS_PER_NODE") - self._parallel_jobs = min(len(test_names), mach_parallel_jobs) - else: - self._parallel_jobs = parallel_jobs - - self._baseline_cmp_name = baseline_cmp_name # Implies comparison should be done if not None - self._baseline_gen_name = baseline_gen_name # Implies generation should be done if not None - - # Compute baseline_root - self._baseline_root = baseline_root if baseline_root is not None \ - else self._machobj.get_value("BASELINE_ROOT") - - if self._project is not None: - self._baseline_root = self._baseline_root.replace("$PROJECT", self._project) - - self._baseline_root = os.path.abspath(self._baseline_root) - - if baseline_cmp_name or baseline_gen_name: - if self._baseline_cmp_name: - full_baseline_dir = os.path.join(self._baseline_root, self._baseline_cmp_name) - expect(os.path.isdir(full_baseline_dir), - "Missing baseline comparison directory {}".format(full_baseline_dir)) - - # the following is to assure that the existing generate directory is not overwritten - if self._baseline_gen_name: - full_baseline_dir = os.path.join(self._baseline_root, self._baseline_gen_name) - existing_baselines = [] - for test_name in test_names: - test_baseline = os.path.join(full_baseline_dir, test_name) - if os.path.isdir(test_baseline): - existing_baselines.append(test_baseline) - - expect(allow_baseline_overwrite or len(existing_baselines) == 0, - "Baseline directories already exists {}\n" \ - "Use -o to avoid this error".format(existing_baselines)) - - if self._cime_model == "e3sm": - _order_tests_by_runtime(test_names, self._baseline_root) - - # This is the only data that multiple threads will simultaneously access - # Each test has it's own value and setting/retrieving items from a dict - # is atomic, so this should be fine to use without mutex. - # name -> (phase, status) - self._tests = OrderedDict() - for test_name in test_names: - self._tests[test_name] = (TEST_START, TEST_PASS_STATUS) - - # Oversubscribe by 1/4 - if proc_pool is None: - pes = int(self._machobj.get_value("MAX_TASKS_PER_NODE")) - self._proc_pool = int(pes * 1.25) - else: - self._proc_pool = int(proc_pool) - - self._procs_avail = self._proc_pool - - # Setup phases - self._phases = list(PHASES) - if self._no_setup: - self._phases.remove(SETUP_PHASE) - if self._no_build: - self._phases.remove(SHAREDLIB_BUILD_PHASE) - self._phases.remove(MODEL_BUILD_PHASE) - if self._no_run: - self._phases.remove(RUN_PHASE) - - if use_existing: - for test in self._tests: - with TestStatus(self._get_test_dir(test)) as ts: - for phase, status in ts: - if phase in CORE_PHASES: - if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]: - if status == TEST_FAIL_STATUS: - # Import for potential subsequent waits - ts.set_status(phase, TEST_PEND_STATUS) - - # We need to pick up here - break - - else: - if phase != SUBMIT_PHASE: - # Somewhat subtle. Create_test considers submit/run to be the run phase, - # so don't try to update test status for a passed submit phase - self._update_test_status(test, phase, TEST_PEND_STATUS) - self._update_test_status(test, phase, status) - - if phase == RUN_PHASE: - logger.info("Test {} passed and will not be re-run".format(test)) - - logger.info("Using existing test directory {}".format(self._get_test_dir(test))) - else: - # None of the test directories should already exist. - for test in self._tests: - expect(not os.path.exists(self._get_test_dir(test)), - "Cannot create new case in directory '{}', it already exists." - " Pick a different test-id".format(self._get_test_dir(test))) - logger.info("Creating test directory {}".format(self._get_test_dir(test))) - - # Setup build groups - if single_exe: - self._build_groups = [self._tests] - elif self._cime_model == "e3sm": - # Any test that's in a shared-enabled suite with other tests should share exes - self._build_groups = get_build_groups(self._tests) - else: - self._build_groups = [ (item,) for item in self._tests ] - - # Build group to exeroot map - self._build_group_exeroots = {} - for build_group in self._build_groups: - self._build_group_exeroots[build_group] = None - - logger.debug("Build groups are:") - for build_group in self._build_groups: - for test_name in build_group: - logger.debug("{}{}".format(" " if test_name == build_group[0] else " ", test_name)) - - # By the end of this constructor, this program should never hard abort, - # instead, errors will be placed in the TestStatus files for the various - # tests cases - - ########################################################################### - def get_testnames(self): - ########################################################################### - return list(self._tests.keys()) - - ########################################################################### - def _log_output(self, test, output): - ########################################################################### - test_dir = self._get_test_dir(test) - if not os.path.isdir(test_dir): - # Note: making this directory could cause create_newcase to fail - # if this is run before. - os.makedirs(test_dir) - append_testlog(output, caseroot=test_dir) - - ########################################################################### - def _get_case_id(self, test): - ########################################################################### - baseline_action_code = "" - if self._baseline_gen_name: - baseline_action_code += "G" - if self._baseline_cmp_name: - baseline_action_code += "C" - if len(baseline_action_code) > 0: - return "{}.{}.{}".format(test, baseline_action_code, self._test_id) - else: - return "{}.{}".format(test, self._test_id) - - ########################################################################### - def _get_test_dir(self, test): - ########################################################################### - return os.path.join(self._test_root, self._get_case_id(test)) - - ########################################################################### - def _get_test_data(self, test): - ########################################################################### - # Must be atomic - return self._tests[test] - - ########################################################################### - def _is_broken(self, test): - ########################################################################### - status = self._get_test_status(test) - return status != TEST_PASS_STATUS and status != TEST_PEND_STATUS - - ########################################################################### - def _work_remains(self, test): - ########################################################################### - test_phase, test_status = self._get_test_data(test) - return (test_status == TEST_PASS_STATUS or test_status == TEST_PEND_STATUS) and\ - test_phase != self._phases[-1] - - ########################################################################### - def _get_test_status(self, test, phase=None): - ########################################################################### - curr_phase, curr_status = self._get_test_data(test) - if phase is None or phase == curr_phase: - return curr_status - else: - # Assume all future phases are PEND - if phase is not None and self._phases.index(phase) > self._phases.index(curr_phase): - return TEST_PEND_STATUS - - # Assume all older phases PASSed - return TEST_PASS_STATUS - - ########################################################################### - def _get_test_phase(self, test): - ########################################################################### - return self._get_test_data(test)[0] - - ########################################################################### - def _update_test_status(self, test, phase, status): - ########################################################################### - phase_idx = self._phases.index(phase) - old_phase, old_status = self._get_test_data(test) - - if old_phase == phase: - expect(old_status == TEST_PEND_STATUS, - "Only valid to transition from PEND to something else, found '{}' for phase '{}'".format(old_status, phase)) - expect(status != TEST_PEND_STATUS, - "Cannot transition from PEND -> PEND") - else: - expect(old_status == TEST_PASS_STATUS, - "Why did we move on to next phase when prior phase did not pass?") - expect(status == TEST_PEND_STATUS, - "New phase should be set to pending status") - expect(self._phases.index(old_phase) == phase_idx - 1, - "Skipped phase? {} {}".format(old_phase, phase_idx)) - - # Must be atomic - self._tests[test] = (phase, status) - - ########################################################################### - def _shell_cmd_for_phase(self, test, cmd, phase, from_dir=None): - ########################################################################### - while True: - rc, output, errput = run_cmd(cmd, from_dir=from_dir) - if rc != 0: - self._log_output(test, - "{} FAILED for test '{}'.\nCommand: {}\nOutput: {}\n". - format(phase, test, cmd, - output.encode('utf-8') + b"\n" + errput.encode('utf-8'))) - # Temporary hack to get around odd file descriptor use by - # buildnml scripts. - if "bad interpreter" in output: - time.sleep(1) - continue - else: - return False, errput - else: - # We don't want "RUN PASSED" in the TestStatus.log if the only thing that - # succeeded was the submission. - phase = "SUBMIT" if phase == RUN_PHASE else phase - self._log_output(test, - "{} PASSED for test '{}'.\nCommand: {}\nOutput: {}\n". - format(phase, test, cmd, - output.encode('utf-8') + b"\n" + errput.encode('utf-8'))) - return True, errput - - ########################################################################### - def _create_newcase_phase(self, test): - ########################################################################### - test_dir = self._get_test_dir(test) - - _, case_opts, grid, compset,\ - machine, compiler, test_mods = parse_test_name(test) - - create_newcase_cmd = "{} --case {} --res {} --compset {}"\ - " --test".format(os.path.join(self._cime_root, "scripts", "create_newcase"), - test_dir, grid, compset) - if machine is not None: - create_newcase_cmd += " --machine {}".format(machine) - if compiler is not None: - create_newcase_cmd += " --compiler {}".format(compiler) - if self._project is not None: - create_newcase_cmd += " --project {} ".format(self._project) - if self._output_root is not None: - create_newcase_cmd += " --output-root {} ".format(self._output_root) - if self._input_dir is not None: - create_newcase_cmd += " --input-dir {} ".format(self._input_dir) - if self._non_local: - create_newcase_cmd += " --non-local" - - if self._pesfile is not None: - create_newcase_cmd += " --pesfile {} ".format(self._pesfile) - - if test_mods is not None: - files = Files(comp_interface=self._cime_driver) - - if test_mods.find('/') != -1: - (component, modspath) = test_mods.split('/', 1) - else: - error = "Missing testmod component. Testmods are specified as '${component}-${testmod}'" - self._log_output(test, error) - return False, error - - testmods_dir = files.get_value("TESTS_MODS_DIR", {"component": component}) - test_mod_file = os.path.join(testmods_dir, component, modspath) - if not os.path.exists(test_mod_file): - error = "Missing testmod file '{}'".format(test_mod_file) - self._log_output(test, error) - return False, error - - create_newcase_cmd += " --user-mods-dir {}".format(test_mod_file) - - mpilib = None - ninst = 1 - ncpl = 1 - if case_opts is not None: - for case_opt in case_opts: # pylint: disable=not-an-iterable - if case_opt.startswith('M'): - mpilib = case_opt[1:] - create_newcase_cmd += " --mpilib {}".format(mpilib) - logger.debug (" MPILIB set to {}".format(mpilib)) - elif case_opt.startswith('N'): - expect(ncpl == 1,"Cannot combine _C and _N options") - ninst = case_opt[1:] - create_newcase_cmd += " --ninst {}".format(ninst) - logger.debug (" NINST set to {}".format(ninst)) - elif case_opt.startswith('C'): - expect(ninst == 1,"Cannot combine _C and _N options") - ncpl = case_opt[1:] - create_newcase_cmd += " --ninst {} --multi-driver" .format(ncpl) - logger.debug (" NCPL set to {}" .format(ncpl)) - elif case_opt.startswith('P'): - pesize = case_opt[1:] - create_newcase_cmd += " --pecount {}".format(pesize) - elif case_opt.startswith('V'): - self._cime_driver = case_opt[1:] - create_newcase_cmd += " --driver {}".format(self._cime_driver) - - # create_test mpilib option overrides default but not explicitly set case_opt mpilib - if mpilib is None and self._mpilib is not None: - create_newcase_cmd += " --mpilib {}".format(self._mpilib) - logger.debug (" MPILIB set to {}".format(self._mpilib)) - - if self._queue is not None: - create_newcase_cmd += " --queue={}".format(self._queue) - else: - # We need to hard code the queue for this test on cheyenne - # otherwise it runs in share and fails intermittently - test_case = parse_test_name(test)[0] - if test_case == "NODEFAIL": - machine = machine if machine is not None else self._machobj.get_machine_name() - if machine == "cheyenne": - create_newcase_cmd += " --queue=regular" - - if self._walltime is not None: - create_newcase_cmd += " --walltime {}".format(self._walltime) - else: - # model specific ways of setting time - if self._cime_model == "e3sm": - recommended_time = _get_time_est(test, self._baseline_root) - - if recommended_time is not None: - create_newcase_cmd += " --walltime {}".format(recommended_time) - - else: - if test in self._test_data and "options" in self._test_data[test] and \ - "wallclock" in self._test_data[test]['options']: - create_newcase_cmd += " --walltime {}".format(self._test_data[test]['options']['wallclock']) - - logger.debug("Calling create_newcase: " + create_newcase_cmd) - return self._shell_cmd_for_phase(test, create_newcase_cmd, CREATE_NEWCASE_PHASE) - - ########################################################################### - def _xml_phase(self, test): - ########################################################################### - test_case = parse_test_name(test)[0] - - # Create, fill and write an envtest object - test_dir = self._get_test_dir(test) - envtest = EnvTest(test_dir) - - # Determine list of component classes that this coupler/driver knows how - # to deal with. This list follows the same order as compset longnames follow. - files = Files(comp_interface=self._cime_driver) - drv_config_file = files.get_value("CONFIG_CPL_FILE") - drv_comp = Component(drv_config_file, "CPL") - envtest.add_elements_by_group(files, {}, "env_test.xml") - envtest.add_elements_by_group(drv_comp, {}, "env_test.xml") - envtest.set_value("TESTCASE", test_case) - envtest.set_value("TEST_TESTID", self._test_id) - envtest.set_value("CASEBASEID", test) - if test in self._test_data and "options" in self._test_data[test] and \ - "memleak_tolerance" in self._test_data[test]['options']: - envtest.set_value("TEST_MEMLEAK_TOLERANCE", self._test_data[test]['options']['memleak_tolerance']) - - test_argv = "-testname {} -testroot {}".format(test, self._test_root) - if self._baseline_gen_name: - test_argv += " -generate {}".format(self._baseline_gen_name) - basegen_case_fullpath = os.path.join(self._baseline_root,self._baseline_gen_name, test) - logger.debug("basegen_case is {}".format(basegen_case_fullpath)) - envtest.set_value("BASELINE_NAME_GEN", self._baseline_gen_name) - envtest.set_value("BASEGEN_CASE", os.path.join(self._baseline_gen_name, test)) - if self._baseline_cmp_name: - test_argv += " -compare {}".format(self._baseline_cmp_name) - envtest.set_value("BASELINE_NAME_CMP", self._baseline_cmp_name) - envtest.set_value("BASECMP_CASE", os.path.join(self._baseline_cmp_name, test)) - - envtest.set_value("TEST_ARGV", test_argv) - envtest.set_value("CLEANUP", self._clean) - - envtest.set_value("BASELINE_ROOT", self._baseline_root) - envtest.set_value("GENERATE_BASELINE", self._baseline_gen_name is not None) - envtest.set_value("COMPARE_BASELINE", self._baseline_cmp_name is not None) - envtest.set_value("CCSM_CPRNC", self._machobj.get_value("CCSM_CPRNC", resolved=False)) - tput_tolerance = self._machobj.get_value("TEST_TPUT_TOLERANCE", resolved=False) - if test in self._test_data and "options" in self._test_data[test] and \ - "tput_tolerance" in self._test_data[test]['options']: - tput_tolerance = self._test_data[test]['options']['tput_tolerance'] - - envtest.set_value("TEST_TPUT_TOLERANCE", 0.25 if tput_tolerance is None else tput_tolerance) - - # Add the test instructions from config_test to env_test in the case - config_test = Tests() - testnode = config_test.get_test_node(test_case) - envtest.add_test(testnode) - # Determine the test_case from the test name - test_case, case_opts = parse_test_name(test)[:2] - - # Determine case_opts from the test_case - if case_opts is not None: - logger.debug("case_opts are {} ".format(case_opts)) - for opt in case_opts: # pylint: disable=not-an-iterable - - logger.debug("case_opt is {}".format(opt)) - if opt == 'D': - envtest.set_test_parameter("DEBUG", "TRUE") - logger.debug (" DEBUG set to TRUE") - - elif opt == 'E': - envtest.set_test_parameter("USE_ESMF_LIB", "TRUE") - logger.debug (" USE_ESMF_LIB set to TRUE") - - elif opt == 'CG': - envtest.set_test_parameter("CALENDAR", "GREGORIAN") - logger.debug (" CALENDAR set to {}".format(opt)) - - elif opt.startswith('L'): - match = re.match('L([A-Za-z])([0-9]*)', opt) - stop_option = {"y":"nyears", "m":"nmonths", "d":"ndays", "h":"nhours", - "s":"nseconds", "n":"nsteps"} - opt = match.group(1) - envtest.set_test_parameter("STOP_OPTION",stop_option[opt]) - opti = match.group(2) - envtest.set_test_parameter("STOP_N", opti) - - logger.debug (" STOP_OPTION set to {}".format(stop_option[opt])) - logger.debug (" STOP_N set to {}".format(opti)) - - elif opt.startswith('R'): - # R option is for testing in PTS_MODE or Single Column Model - # (SCM) mode - envtest.set_test_parameter("PTS_MODE", "TRUE") - - # For PTS_MODE, set all tasks and threads to 1 - comps=["ATM","LND","ICE","OCN","CPL","GLC","ROF","WAV"] - - for comp in comps: - envtest.set_test_parameter("NTASKS_"+comp, "1") - envtest.set_test_parameter("NTHRDS_"+comp, "1") - envtest.set_test_parameter("ROOTPE_"+comp, "0") - - elif (opt.startswith('I') or # Marker to distinguish tests with same name - ignored - opt.startswith('M') or # handled in create_newcase - opt.startswith('P') or # handled in create_newcase - opt.startswith('N') or # handled in create_newcase - opt.startswith('C') or # handled in create_newcase - opt.startswith('V') or # handled in create_newcase - opt == 'B'): # handled in run_phase - pass - - elif opt.startswith('IOP'): - logger.warning("IOP test option not yet implemented") - else: - expect(False, "Could not parse option '{}' ".format(opt)) - - envtest.write() - lock_file("env_run.xml", caseroot=test_dir, newname="env_run.orig.xml") - - with Case(test_dir, read_only=False) as case: - if self._output_root is None: - self._output_root = case.get_value("CIME_OUTPUT_ROOT") - # if we are running a single test we don't need sharedlibroot - if len(self._tests) > 1 and self._cime_model != "e3sm": - case.set_value("SHAREDLIBROOT", - os.path.join(self._output_root, - "sharedlibroot.{}".format(self._test_id))) - envtest.set_initial_values(case) - case.set_value("TEST", True) - case.set_value("SAVE_TIMING", self._save_timing) - - # handle single-exe here, all cases will use the EXEROOT from - # the first case in the build group - is_first_test, _, my_build_group = self._get_build_group(test) - if is_first_test: - expect(self._build_group_exeroots[my_build_group] is None, "Should not already have exeroot") - self._build_group_exeroots[my_build_group] = case.get_value("EXEROOT") - else: - build_group_exeroot = self._build_group_exeroots[my_build_group] - expect(build_group_exeroot is not None, "Should already have exeroot") - case.set_value("EXEROOT", build_group_exeroot) - - # Scale back build parallelism on systems with few cores - if self._model_build_cost > self._proc_pool: - case.set_value("GMAKE_J", self._proc_pool) - self._model_build_cost = self._proc_pool - - return True, "" - - ########################################################################### - def _setup_phase(self, test): - ########################################################################### - test_dir = self._get_test_dir(test) - rv = self._shell_cmd_for_phase(test, "./case.setup", SETUP_PHASE, from_dir=test_dir) - - # It's OK for this command to fail with baseline diffs but not catastrophically - if rv[0]: - cmdstat, output, _ = run_cmd("./case.cmpgen_namelists", combine_output=True, from_dir=test_dir) - expect(cmdstat in [0, TESTS_FAILED_ERR_CODE], "Fatal error in case.cmpgen_namelists: {}".format(output)) - - return rv - - ########################################################################### - def _sharedlib_build_phase(self, test): - ########################################################################### - is_first_test, first_test, _ = self._get_build_group(test) - if not is_first_test: - if self._get_test_status(first_test, phase=SHAREDLIB_BUILD_PHASE) == TEST_PASS_STATUS: - return True, "" - else: - return False, "Cannot use build for test {} because it failed".format(first_test) - - test_dir = self._get_test_dir(test) - return self._shell_cmd_for_phase(test, "./case.build --sharedlib-only", SHAREDLIB_BUILD_PHASE, from_dir=test_dir) - - ########################################################################### - def _get_build_group(self, test): - ########################################################################### - for build_group in self._build_groups: - if test in build_group: - return test == build_group[0], build_group[0], build_group - - expect(False, "No build group for test '{}'".format(test)) - - ########################################################################### - def _model_build_phase(self, test): - ########################################################################### - is_first_test, first_test, _ = self._get_build_group(test) - - test_dir = self._get_test_dir(test) - - if not is_first_test: - if self._get_test_status(first_test, phase=MODEL_BUILD_PHASE) == TEST_PASS_STATUS: - with Case(test_dir, read_only=False) as case: - post_build(case, [], build_complete=True, save_build_provenance=False) - - return True, "" - else: - return False, "Cannot use build for test {} because it failed".format(first_test) - - return self._shell_cmd_for_phase(test, "./case.build --model-only", MODEL_BUILD_PHASE, from_dir=test_dir) - - ########################################################################### - def _run_phase(self, test): - ########################################################################### - test_dir = self._get_test_dir(test) - - case_opts = parse_test_name(test)[1] - if case_opts is not None and "B" in case_opts: # pylint: disable=unsupported-membership-test - self._log_output(test, "{} SKIPPED for test '{}'".format(RUN_PHASE, test)) - self._update_test_status_file(test, SUBMIT_PHASE, TEST_PASS_STATUS) - self._update_test_status_file(test, RUN_PHASE, TEST_PASS_STATUS) - - return True, "SKIPPED" - else: - cmd = "./case.submit" - if not self._allow_pnl: - cmd += " --skip-preview-namelist" - if self._no_batch: - cmd += " --no-batch" - if self._mail_user: - cmd += " --mail-user={}".format(self._mail_user) - if self._mail_type: - cmd += " -M={}".format(",".join(self._mail_type)) - - return self._shell_cmd_for_phase(test, cmd, RUN_PHASE, from_dir=test_dir) - - ########################################################################### - def _run_catch_exceptions(self, test, phase, run): - ########################################################################### - try: - return run(test) - except Exception as e: - exc_tb = sys.exc_info()[2] - errput = "Test '{}' failed in phase '{}' with exception '{}'\n".format(test, phase, str(e)) - errput += ''.join(traceback.format_tb(exc_tb)) - self._log_output(test, errput) - return False, errput - - ########################################################################### - def _get_procs_needed(self, test, phase, threads_in_flight=None, no_batch=False): - ########################################################################### - # For build pools, we must wait for the first case to complete XML, SHAREDLIB, - # and MODEL_BUILD phases before the other cases can do those phases - is_first_test, first_test, _ = self._get_build_group(test) - - if not is_first_test: - build_group_dep_phases = [XML_PHASE, SHAREDLIB_BUILD_PHASE, MODEL_BUILD_PHASE] - if phase in build_group_dep_phases: - if self._get_test_status(first_test, phase=phase) == TEST_PEND_STATUS: - return self._proc_pool + 1 - else: - return 1 - - if phase == RUN_PHASE and (self._no_batch or no_batch): - test_dir = self._get_test_dir(test) - total_pes = EnvMachPes(test_dir, read_only=True).get_value("TOTALPES") - return total_pes - - elif (phase == SHAREDLIB_BUILD_PHASE): - if self._cime_model == "cesm": - # Will force serialization of sharedlib builds - # TODO - instead of serializing, compute all library configs needed and build - # them all in parallel - for _, _, running_phase in threads_in_flight.values(): - if (running_phase == SHAREDLIB_BUILD_PHASE): - return self._proc_pool + 1 - - return 1 - elif (phase == MODEL_BUILD_PHASE): - # Model builds now happen in parallel - return self._model_build_cost - else: - return 1 - - ########################################################################### - def _wait_for_something_to_finish(self, threads_in_flight): - ########################################################################### - expect(len(threads_in_flight) <= self._parallel_jobs, "Oversubscribed?") - finished_tests = [] - while not finished_tests: - for test, thread_info in threads_in_flight.items(): - if not thread_info[0].is_alive(): - finished_tests.append((test, thread_info[1])) - - if not finished_tests: - time.sleep(0.2) - - for finished_test, procs_needed in finished_tests: - self._procs_avail += procs_needed - del threads_in_flight[finished_test] - - ########################################################################### - def _update_test_status_file(self, test, test_phase, status): - ########################################################################### - """ - In general, test_scheduler should not be responsible for updating - the TestStatus file, but there are a few cases where it has to. - """ - test_dir = self._get_test_dir(test) - with TestStatus(test_dir=test_dir, test_name=test) as ts: - ts.set_status(test_phase, status) - - ########################################################################### - def _consumer(self, test, test_phase, phase_method): - ########################################################################### - before_time = time.time() - success, errors = self._run_catch_exceptions(test, test_phase, phase_method) - elapsed_time = time.time() - before_time - status = (TEST_PEND_STATUS if test_phase == RUN_PHASE and not \ - self._no_batch else TEST_PASS_STATUS) if success else TEST_FAIL_STATUS - - if status != TEST_PEND_STATUS: - self._update_test_status(test, test_phase, status) - - if not self._work_remains(test): - self._completed_tests += 1 - total = len(self._tests) - status_str = "Finished {} for test {} in {:f} seconds ({}). [COMPLETED {:d} of {:d}]".format(test_phase, test, elapsed_time, status, self._completed_tests, total) - else: - status_str = "Finished {} for test {} in {:f} seconds ({})".format(test_phase, test, elapsed_time, status) - - if not success: - status_str += "\n Case dir: {}\n".format(self._get_test_dir(test)) - status_str += " Errors were:\n {}\n".format("\n ".join(str(errors.encode('utf-8')).splitlines())) - - logger.info(status_str) - - is_first_test = self._get_build_group(test)[0] - - if test_phase in [CREATE_NEWCASE_PHASE, XML_PHASE] or \ - (not is_first_test and test_phase in [SHAREDLIB_BUILD_PHASE, MODEL_BUILD_PHASE]): - # These are the phases for which TestScheduler is reponsible for - # updating the TestStatus file - self._update_test_status_file(test, test_phase, status) - - if test_phase == XML_PHASE: - append_status("Case Created using: "+" ".join(sys.argv), "README.case", caseroot=self._get_test_dir(test)) - - # On batch systems, we want to immediately submit to the queue, because - # it's very cheap to submit and will get us a better spot in line - if (success and not self._no_run and not self._no_batch and test_phase == MODEL_BUILD_PHASE): - logger.info("Starting {} for test {} with 1 proc on interactive node and {:d} procs on compute nodes".format(RUN_PHASE, test, self._get_procs_needed(test, RUN_PHASE, no_batch=True))) - self._update_test_status(test, RUN_PHASE, TEST_PEND_STATUS) - self._consumer(test, RUN_PHASE, self._run_phase) - - ########################################################################### - def _producer(self): - ########################################################################### - threads_in_flight = {} # test-name -> (thread, procs, phase) - while True: - work_to_do = False - num_threads_launched_this_iteration = 0 - for test in self._tests: - logger.debug("test_name: " + test) - - if self._work_remains(test): - work_to_do = True - - # If we have no workers available, immediately break out of loop so we can wait - if len(threads_in_flight) == self._parallel_jobs: - break - - if test not in threads_in_flight: - test_phase, test_status = self._get_test_data(test) - expect(test_status != TEST_PEND_STATUS, test) - next_phase = self._phases[self._phases.index(test_phase) + 1] - procs_needed = self._get_procs_needed(test, next_phase, threads_in_flight) - - if procs_needed <= self._procs_avail: - self._procs_avail -= procs_needed - - # Necessary to print this way when multiple threads printing - logger.info("Starting {} for test {} with {:d} procs".format(next_phase, test, procs_needed)) - - self._update_test_status(test, next_phase, TEST_PEND_STATUS) - new_thread = threading.Thread(target=self._consumer, - args=(test, next_phase, getattr(self, "_{}_phase".format(next_phase.lower())) )) - threads_in_flight[test] = (new_thread, procs_needed, next_phase) - new_thread.start() - num_threads_launched_this_iteration += 1 - - logger.debug(" Current workload:") - total_procs = 0 - for the_test, the_data in six.iteritems(threads_in_flight): - logger.debug(" {}: {} -> {}".format(the_test, the_data[2], the_data[1])) - total_procs += the_data[1] - - logger.debug(" Total procs in use: {}".format(total_procs)) - else: - if not threads_in_flight: - msg = "Phase '{}' for test '{}' required more processors, {:d}, than this machine can provide, {:d}".format(next_phase, test, procs_needed, self._procs_avail) - logger.warning(msg) - self._update_test_status(test, next_phase, TEST_PEND_STATUS) - self._update_test_status(test, next_phase, TEST_FAIL_STATUS) - self._log_output(test, msg) - if next_phase == RUN_PHASE: - self._update_test_status_file(test, SUBMIT_PHASE, TEST_PASS_STATUS) - self._update_test_status_file(test, next_phase, TEST_FAIL_STATUS) - else: - self._update_test_status_file(test, next_phase, TEST_FAIL_STATUS) - num_threads_launched_this_iteration += 1 - - if not work_to_do: - break - - if num_threads_launched_this_iteration == 0: - # No free resources, wait for something in flight to finish - self._wait_for_something_to_finish(threads_in_flight) - - for unfinished_thread, _, _ in threads_in_flight.values(): - unfinished_thread.join() - - ########################################################################### - def _setup_cs_files(self): - ########################################################################### - try: - python_libs_root = get_python_libs_root() - - create_cs_status(test_root=self._test_root, - test_id=self._test_id) - - template_file = os.path.join(python_libs_root, "cs.submit.template") - template = open(template_file, "r").read() - setup_cmd = "./case.setup" if self._no_setup else ":" - build_cmd = "./case.build" if self._no_build else ":" - test_cmd = "./case.submit" - template = template.replace("", setup_cmd).\ - replace("", build_cmd).\ - replace("", test_cmd).\ - replace("", self._test_id) - - if self._no_run: - cs_submit_file = os.path.join(self._test_root, "cs.submit.{}".format(self._test_id)) - with open(cs_submit_file, "w") as fd: - fd.write(template) - os.chmod(cs_submit_file, - os.stat(cs_submit_file).st_mode | stat.S_IXUSR | stat.S_IXGRP) - - if self._cime_model == "cesm": - template_file = os.path.join(python_libs_root, "testreporter.template") - template = open(template_file, "r").read() - template = template.replace("", - os.path.join(self._cime_root, "scripts", "Tools")) - testreporter_file = os.path.join(self._test_root, "testreporter") - with open(testreporter_file, "w") as fd: - fd.write(template) - os.chmod(testreporter_file, os.stat(testreporter_file).st_mode - | stat.S_IXUSR | stat.S_IXGRP) - - except Exception as e: - logger.warning("FAILED to set up cs files: {}".format(str(e))) - - ########################################################################### - def run_tests(self, wait=False, - wait_check_throughput=False, - wait_check_memory=False, - wait_ignore_namelists=False, - wait_ignore_memleak=False): - ########################################################################### - """ - Main API for this class. - - Return True if all tests passed. - """ - start_time = time.time() - - # Tell user what will be run - logger.info( "RUNNING TESTS:") - for test in self._tests: - logger.info( " {}".format(test)) - - # Setup cs files - self._setup_cs_files() - - GenericXML.DISABLE_CACHING = True - self._producer() - GenericXML.DISABLE_CACHING = False - - expect(threading.active_count() == 1, "Leftover threads?") - - # Copy TestStatus files to baselines for tests that have already failed. - if get_model() == "cesm": - for test in self._tests: - status = self._get_test_data(test)[1] - if status not in [TEST_PASS_STATUS, TEST_PEND_STATUS] and self._baseline_gen_name: - basegen_case_fullpath = os.path.join(self._baseline_root,self._baseline_gen_name, test) - test_dir = self._get_test_dir(test) - generate_teststatus(test_dir, basegen_case_fullpath) - - wait_handles_report = False - if not self._no_run and not self._no_batch: - if wait: - logger.info("Waiting for tests to finish") - rv = wait_for_tests(glob.glob(os.path.join(self._test_root, "*{}/TestStatus".format(self._test_id))), - check_throughput=wait_check_throughput, - check_memory=wait_check_memory, - ignore_namelists=wait_ignore_namelists, - ignore_memleak=wait_ignore_memleak) - wait_handles_report = True - else: - logger.info("Due to presence of batch system, create_test will exit before tests are complete.\n" \ - "To force create_test to wait for full completion, use --wait") - - # Return True if all tests passed from our point of view - if not wait_handles_report: - logger.info( "At test-scheduler close, state is:") - rv = True - for test in self._tests: - phase, status = self._get_test_data(test) - - # Give highest priority to fails in test schduler - if status not in [TEST_PASS_STATUS, TEST_PEND_STATUS]: - logger.info( "{} {} (phase {})".format(status, test, phase)) - rv = False - - else: - # Be cautious about telling the user that the test passed. This - # status should match what they would see on the dashboard. Our - # self._test_states does not include comparison fail information, - # so we need to parse test status. - ts = TestStatus(self._get_test_dir(test)) - nlfail = ts.get_status(NAMELIST_PHASE) == TEST_FAIL_STATUS - ts_status = ts.get_overall_test_status(ignore_namelists=True, check_memory=False, check_throughput=False) - local_run = not self._no_run and self._no_batch - - if ts_status not in [TEST_PASS_STATUS, TEST_PEND_STATUS]: - logger.info( "{} {} (phase {})".format(ts_status, test, phase)) - rv = False - elif ts_status == TEST_PEND_STATUS and local_run: - logger.info( "{} {} (Some phases left in PEND)".format(TEST_FAIL_STATUS, test)) - rv = False - elif nlfail: - logger.info( "{} {} (but otherwise OK) {}".format(NAMELIST_FAIL_STATUS, test, phase)) - rv = False - else: - logger.info("{} {} {}".format(status, test, phase)) - - logger.info( " Case dir: {}".format(self._get_test_dir(test))) - - logger.info( "test-scheduler took {} seconds".format(time.time() - start_time)) - - return rv diff --git a/scripts/lib/CIME/test_status.py b/scripts/lib/CIME/test_status.py deleted file mode 100644 index 9d2ffb0e6af..00000000000 --- a/scripts/lib/CIME/test_status.py +++ /dev/null @@ -1,470 +0,0 @@ -""" -Contains the crucial TestStatus class which manages phase-state of a test -case and ensure that this state is represented by the TestStatus file in -the case. - -TestStatus objects are only modifiable via the set_status method and this -is only allowed if the object is being accessed within the context of a -context manager. Example: - - with TestStatus(test_dir=caseroot) as ts: - ts.set_status(RUN_PHASE, TEST_PASS_STATUS) - -This file also contains all of the hardcoded phase information which includes -the phase names, phase orders, potential phase states, and which phases are -required (core phases). - -Additional important design decisions: -1) In order to ensure that incomplete tests are always left in a PEND - state, updating a core phase to a PASS state will automatically set the next - core state to PEND. -2) If the user repeats a core state, that invalidates all subsequent state. For - example, if a user rebuilds their case, then any of the post-run states like the - RUN state are no longer valid. -""" - -from CIME.XML.standard_module_setup import * - -from collections import OrderedDict - -import os, itertools -from CIME import expected_fails - -TEST_STATUS_FILENAME = "TestStatus" - -# The statuses that a phase can be in -TEST_PEND_STATUS = "PEND" -TEST_PASS_STATUS = "PASS" -TEST_FAIL_STATUS = "FAIL" - -ALL_PHASE_STATUSES = [TEST_PEND_STATUS, TEST_PASS_STATUS, TEST_FAIL_STATUS] - -# Special statuses that the overall test can be in -TEST_DIFF_STATUS = "DIFF" # Implies a failure in the BASELINE phase -NAMELIST_FAIL_STATUS = "NLFAIL" # Implies a failure in the NLCOMP phase - -# Special strings that can appear in comments, indicating particular types of failures -TEST_NO_BASELINES_COMMENT = "BFAIL" # Implies baseline directory is missing in the baseline comparison phase -# The expected and unexpected failure comments aren't used directly in this module, but -# are included here for symmetry, so other modules can access them from here. -TEST_EXPECTED_FAILURE_COMMENT = expected_fails.EXPECTED_FAILURE_COMMENT -TEST_UNEXPECTED_FAILURE_COMMENT_START = expected_fails.UNEXPECTED_FAILURE_COMMENT_START - -# The valid phases -CREATE_NEWCASE_PHASE = "CREATE_NEWCASE" -XML_PHASE = "XML" -SETUP_PHASE = "SETUP" -NAMELIST_PHASE = "NLCOMP" -SHAREDLIB_BUILD_PHASE = "SHAREDLIB_BUILD" -MODEL_BUILD_PHASE = "MODEL_BUILD" -SUBMIT_PHASE = "SUBMIT" -RUN_PHASE = "RUN" -THROUGHPUT_PHASE = "TPUTCOMP" -MEMCOMP_PHASE = "MEMCOMP" -MEMLEAK_PHASE = "MEMLEAK" -STARCHIVE_PHASE = "SHORT_TERM_ARCHIVER" -COMPARE_PHASE = "COMPARE" # This is one special, real phase will be COMPARE_$WHAT, this is for internal test comparisons, there could be multiple variations of this phase in one test -BASELINE_PHASE = "BASELINE" -GENERATE_PHASE = "GENERATE" - -ALL_PHASES = [CREATE_NEWCASE_PHASE, - XML_PHASE, - SETUP_PHASE, - NAMELIST_PHASE, - SHAREDLIB_BUILD_PHASE, - MODEL_BUILD_PHASE, - SUBMIT_PHASE, - RUN_PHASE, - COMPARE_PHASE, - BASELINE_PHASE, - THROUGHPUT_PHASE, - MEMCOMP_PHASE, - MEMLEAK_PHASE, - STARCHIVE_PHASE, - GENERATE_PHASE] - -# These are mandatory phases that a test must go through -CORE_PHASES = [CREATE_NEWCASE_PHASE, - XML_PHASE, - SETUP_PHASE, - SHAREDLIB_BUILD_PHASE, - MODEL_BUILD_PHASE, - SUBMIT_PHASE, - RUN_PHASE] - -def _test_helper1(file_contents): - ts = TestStatus(test_dir="/", test_name="ERS.foo.A") - ts._parse_test_status(file_contents) # pylint: disable=protected-access - return ts._phase_statuses # pylint: disable=protected-access - -def _test_helper2(file_contents, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, no_run=False, no_perm=False): - lines = file_contents.splitlines() - rv = None - perms = [lines] if no_perm else itertools.permutations(lines) - for perm in perms: - ts = TestStatus(test_dir="/", test_name="ERS.foo.A") - ts._parse_test_status("\n".join(perm)) # pylint: disable=protected-access - the_status = ts.get_overall_test_status(wait_for_run=wait_for_run, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelists, - no_run=no_run) - if rv is not None and the_status != rv: - return "{} != {}".format(rv, the_status) - else: - rv = the_status - - return rv - -class TestStatus(object): - - def __init__(self, test_dir=None, test_name=None, no_io=False): - """ - Create a TestStatus object - - If test_dir is not specified, it is set to the current working directory - - no_io is intended only for testing, and should be kept False in - production code - """ - test_dir = os.getcwd() if test_dir is None else test_dir - self._filename = os.path.join(test_dir, TEST_STATUS_FILENAME) - self._phase_statuses = OrderedDict() # {name -> (status, comments)} - self._test_name = test_name - self._ok_to_modify = False - self._no_io = no_io - - if os.path.exists(self._filename): - self._parse_test_status_file() - if not os.access(self._filename, os.W_OK): - self._no_io = True - else: - expect(test_name is not None, "Must provide test_name if TestStatus file doesn't exist") - - def __enter__(self): - self._ok_to_modify = True - return self - - def __exit__(self, *_): - self._ok_to_modify = False - self.flush() - - def __iter__(self): - for phase, data in self._phase_statuses.items(): - yield phase, data[0] - - def __eq__(self, rhs): - return self._phase_statuses == rhs._phase_statuses # pylint: disable=protected-access - - def __ne__(self, rhs): - return not self.__eq__(rhs) - - def get_name(self): - return self._test_name - - def set_status(self, phase, status, comments=""): - """ - Update the status of this test by changing the status of given phase to the - given status. - - >>> with TestStatus(test_dir="/", test_name="ERS.foo.A", no_io=True) as ts: - ... ts.set_status(CREATE_NEWCASE_PHASE, "PASS") - ... ts.set_status(XML_PHASE, "PASS") - ... ts.set_status(SETUP_PHASE, "FAIL") - ... ts.set_status(SETUP_PHASE, "PASS") - ... ts.set_status("{}_base_rest".format(COMPARE_PHASE), "FAIL") - ... ts.set_status(SHAREDLIB_BUILD_PHASE, "PASS", comments='Time=42') - >>> ts._phase_statuses - OrderedDict([('CREATE_NEWCASE', ('PASS', '')), ('XML', ('PASS', '')), ('SETUP', ('PASS', '')), ('SHAREDLIB_BUILD', ('PASS', 'Time=42')), ('COMPARE_base_rest', ('FAIL', '')), ('MODEL_BUILD', ('PEND', ''))]) - - >>> with TestStatus(test_dir="/", test_name="ERS.foo.A", no_io=True) as ts: - ... ts.set_status(CREATE_NEWCASE_PHASE, "PASS") - ... ts.set_status(XML_PHASE, "PASS") - ... ts.set_status(SETUP_PHASE, "FAIL") - ... ts.set_status(SETUP_PHASE, "PASS") - ... ts.set_status(BASELINE_PHASE, "PASS") - ... ts.set_status("{}_base_rest".format(COMPARE_PHASE), "FAIL") - ... ts.set_status(SHAREDLIB_BUILD_PHASE, "PASS", comments='Time=42') - ... ts.set_status(SETUP_PHASE, "PASS") - >>> ts._phase_statuses - OrderedDict([('CREATE_NEWCASE', ('PASS', '')), ('XML', ('PASS', '')), ('SETUP', ('PASS', '')), ('SHAREDLIB_BUILD', ('PEND', ''))]) - - >>> with TestStatus(test_dir="/", test_name="ERS.foo.A", no_io=True) as ts: - ... ts.set_status(CREATE_NEWCASE_PHASE, "FAIL") - >>> ts._phase_statuses - OrderedDict([('CREATE_NEWCASE', ('FAIL', ''))]) - """ - expect(self._ok_to_modify, "TestStatus not in a modifiable state, use 'with' syntax") - expect(phase in ALL_PHASES or phase.startswith(COMPARE_PHASE), - "Invalid phase '{}'".format(phase)) - expect(status in ALL_PHASE_STATUSES, "Invalid status '{}'".format(status)) - - if phase in CORE_PHASES and phase != CORE_PHASES[0]: - previous_core_phase = CORE_PHASES[CORE_PHASES.index(phase)-1] - #TODO: enable check below - #expect(previous_core_phase in self._phase_statuses, "Core phase '{}' was skipped".format(previous_core_phase)) - - if previous_core_phase in self._phase_statuses: - expect(self._phase_statuses[previous_core_phase][0] == TEST_PASS_STATUS, - "Cannot move past core phase '{}', it didn't pass: ".format(previous_core_phase)) - - reran_phase = (phase in self._phase_statuses and self._phase_statuses[phase][0] != TEST_PEND_STATUS and phase in CORE_PHASES) - if reran_phase: - # All subsequent phases are invalidated - phase_idx = ALL_PHASES.index(phase) - for subsequent_phase in ALL_PHASES[phase_idx+1:]: - if subsequent_phase in self._phase_statuses: - del self._phase_statuses[subsequent_phase] - if subsequent_phase.startswith(COMPARE_PHASE): - for stored_phase in list(self._phase_statuses.keys()): - if stored_phase.startswith(COMPARE_PHASE): - del self._phase_statuses[stored_phase] - - self._phase_statuses[phase] = (status, comments) # Can overwrite old phase info - - if status == TEST_PASS_STATUS and phase in CORE_PHASES and phase != CORE_PHASES[-1]: - next_core_phase = CORE_PHASES[CORE_PHASES.index(phase)+1] - self._phase_statuses[next_core_phase] = (TEST_PEND_STATUS, "") - - def get_status(self, phase): - return self._phase_statuses[phase][0] if phase in self._phase_statuses else None - - def get_comment(self, phase): - return self._phase_statuses[phase][1] if phase in self._phase_statuses else None - - def phase_statuses_dump(self, prefix='', skip_passes=False, skip_phase_list=None, xfails=None): - """ - Args: - prefix: string printed at the start of each line - skip_passes: if True, do not output lines that have a PASS status - skip_phase_list: list of phases (from the phases given by - ALL_PHASES) for which we skip output - xfails: object of type ExpectedFails, giving expected failures for this test - """ - if skip_phase_list is None: - skip_phase_list = [] - if xfails is None: - xfails = expected_fails.ExpectedFails() - result = "" - if self._phase_statuses: - for phase, data in self._phase_statuses.items(): - if phase in skip_phase_list: - continue - status, comments = data - xfail_comment = xfails.expected_fails_comment(phase, status) - if skip_passes: - if status == TEST_PASS_STATUS and not xfail_comment: - # Note that we still print the result of a PASSing test if there - # is a comment related to the expected failure status. Typically - # this will indicate that this is an unexpected PASS (and so - # should be removed from the expected fails list). - continue - result += "{}{} {} {}".format(prefix, status, self._test_name, phase) - if comments: - result += " {}".format(comments) - if xfail_comment: - result += " {}".format(xfail_comment) - result += "\n" - - return result - - def increment_non_pass_counts(self, non_pass_counts): - """ - Increment counts of the number of times given phases did not pass - - non_pass_counts is a dictionary whose keys are phases of - interest and whose values are running counts of the number of - non-passes. This method increments those counts based on results - in the given TestStatus object. - """ - for phase in non_pass_counts: - if phase in self._phase_statuses: - status, _ = self._phase_statuses[phase] - if status != TEST_PASS_STATUS: - non_pass_counts[phase] += 1 - - def flush(self): - if self._phase_statuses and not self._no_io: - with open(self._filename, "w") as fd: - fd.write(self.phase_statuses_dump()) - - def _parse_test_status(self, file_contents): - """ - >>> contents = ''' - ... PASS ERS.foo.A CREATE_NEWCASE - ... PASS ERS.foo.A XML - ... FAIL ERS.foo.A SETUP - ... PASS ERS.foo.A COMPARE_base_rest - ... PASS ERS.foo.A SHAREDLIB_BUILD Time=42 - ... ''' - >>> _test_helper1(contents) - OrderedDict([('CREATE_NEWCASE', ('PASS', '')), ('XML', ('PASS', '')), ('SETUP', ('FAIL', '')), ('COMPARE_base_rest', ('PASS', '')), ('SHAREDLIB_BUILD', ('PASS', 'Time=42'))]) - """ - for line in file_contents.splitlines(): - line = line.strip() - tokens = line.split() - if line == "": - pass # skip blank lines - elif len(tokens) >= 3: - status, curr_test_name, phase = tokens[:3] - if (self._test_name is None): - self._test_name = curr_test_name - else: - expect(self._test_name == curr_test_name, - "inconsistent test name in parse_test_status: '{}' != '{}'".format(self._test_name, curr_test_name)) - - expect(status in ALL_PHASE_STATUSES, - "Unexpected status '{}' in parse_test_status for test '{}'".format(status, self._test_name)) - expect(phase in ALL_PHASES or phase.startswith(COMPARE_PHASE), - "phase '{}' not expected in parse_test_status for test '{}'".format(phase, self._test_name)) - expect(phase not in self._phase_statuses, - "Should not have seen multiple instances of phase '{}' for test '{}'".format(phase, self._test_name)) - - self._phase_statuses[phase] = (status, " ".join(tokens[3:])) - else: - logging.warning("In TestStatus file for test '{}', line '{}' not in expected format".format(self._test_name, line)) - - def _parse_test_status_file(self): - with open(self._filename, "r") as fd: - self._parse_test_status(fd.read()) - - def _get_overall_status_based_on_phases(self, phases, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): - - rv = TEST_PASS_STATUS - run_phase_found = False - for phase in phases: # ensure correct order of processing phases - if phase in self._phase_statuses: - data = self._phase_statuses[phase] - else: - continue - - status = data[0] - if phase == RUN_PHASE: - run_phase_found = True - - if phase in [SUBMIT_PHASE, RUN_PHASE] and no_run: - break - - if (status == TEST_PEND_STATUS): - rv = TEST_PEND_STATUS - if not no_run: - break - - elif (status == TEST_FAIL_STATUS): - if ( (not check_throughput and phase == THROUGHPUT_PHASE) or - (not check_memory and phase == MEMCOMP_PHASE) or - (ignore_namelists and phase == NAMELIST_PHASE) or - (ignore_memleak and phase == MEMLEAK_PHASE) ): - continue - - if (phase == NAMELIST_PHASE): - if (rv == TEST_PASS_STATUS): - rv = NAMELIST_FAIL_STATUS - - elif (rv in [NAMELIST_FAIL_STATUS, TEST_PASS_STATUS] and phase == BASELINE_PHASE): - rv = TEST_DIFF_STATUS - - elif phase in CORE_PHASES: - return TEST_FAIL_STATUS - - else: - rv = TEST_FAIL_STATUS - - # The test did not fail but the RUN phase was not found, so if the user requested - # that we wait for the RUN phase, then the test must still be considered pending. - if rv != TEST_FAIL_STATUS and not run_phase_found and wait_for_run: - rv = TEST_PEND_STATUS - - return rv - - def get_overall_test_status(self, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): - r""" - Given the current phases and statuses, produce a single results for this test. Preference - is given to PEND since we don't want to stop waiting for a test - that hasn't finished. Namelist diffs are given the lowest precedence. - - >>> _test_helper2('PASS ERS.foo.A RUN') - 'PASS' - >>> _test_helper2('PASS ERS.foo.A SHAREDLIB_BUILD\nPEND ERS.foo.A RUN') - 'PEND' - >>> _test_helper2('FAIL ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN') - 'FAIL' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN') - 'PASS' - >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP') - 'PASS' - >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP', check_throughput=True) - 'FAIL' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') - 'NLFAIL' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') - 'PEND' - >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A MEMCOMP') - 'PASS' - >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP', ignore_namelists=True) - 'PASS' - >>> _test_helper2('PASS ERS.foo.A COMPARE_1\nFAIL ERS.foo.A NLCOMP\nFAIL ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN') - 'FAIL' - >>> _test_helper2('FAIL ERS.foo.A BASELINE\nFAIL ERS.foo.A NLCOMP\nPASS ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN') - 'DIFF' - >>> _test_helper2('FAIL ERS.foo.A BASELINE\nFAIL ERS.foo.A NLCOMP\nFAIL ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN') - 'FAIL' - >>> _test_helper2('PEND ERS.foo.A COMPARE_2\nFAIL ERS.foo.A RUN') - 'FAIL' - >>> _test_helper2('PEND ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN') - 'PEND' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD') - 'PASS' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD', wait_for_run=True) - 'PEND' - >>> _test_helper2('FAIL ERS.foo.A MODEL_BUILD', wait_for_run=True) - 'FAIL' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN', wait_for_run=True) - 'PEND' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nFAIL ERS.foo.A RUN', wait_for_run=True) - 'FAIL' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN', wait_for_run=True) - 'PASS' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nFAIL ERS.foo.A RUN\nPEND ERS.foo.A COMPARE') - 'FAIL' - >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN', no_run=True) - 'PASS' - >>> s = '''PASS ERS.foo.A CREATE_NEWCASE - ... PASS ERS.foo.A XML - ... PASS ERS.foo.A SETUP - ... PASS ERS.foo.A SHAREDLIB_BUILD time=454 - ... PASS ERS.foo.A NLCOMP - ... PASS ERS.foo.A MODEL_BUILD time=363 - ... PASS ERS.foo.A SUBMIT - ... PASS ERS.foo.A RUN time=73 - ... PEND ERS.foo.A COMPARE_base_single_thread - ... FAIL ERS.foo.A BASELINE master: DIFF - ... PASS ERS.foo.A TPUTCOMP - ... PASS ERS.foo.A MEMLEAK insuffiencient data for memleak test - ... PASS ERS.foo.A SHORT_TERM_ARCHIVER - ... ''' - >>> _test_helper2(s, no_perm=True) - 'PEND' - """ - # Core phases take priority - core_rv = self._get_overall_status_based_on_phases(CORE_PHASES, - wait_for_run=wait_for_run, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelists, - ignore_memleak=ignore_memleak, - no_run=no_run) - if core_rv != TEST_PASS_STATUS: - return core_rv - else: - phase_order = list(CORE_PHASES) - phase_order.extend([item for item in self._phase_statuses if item not in CORE_PHASES]) - - return self._get_overall_status_based_on_phases(phase_order, - wait_for_run=wait_for_run, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelists, - ignore_memleak=ignore_memleak, - no_run=no_run) - diff --git a/scripts/lib/CIME/test_utils.py b/scripts/lib/CIME/test_utils.py deleted file mode 100644 index 25f2af0a0a0..00000000000 --- a/scripts/lib/CIME/test_utils.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Utility functions used in test_scheduler.py, and by other utilities that need to -get test lists. -""" - -from CIME.XML.standard_module_setup import * -from CIME.XML.testlist import Testlist -from CIME.XML.files import Files -import CIME.utils - -logger = logging.getLogger(__name__) - -def get_tests_from_xml(xml_machine=None,xml_category=None,xml_compiler=None, xml_testlist=None, - machine=None, compiler=None, driver=None): - """ - Parse testlists for a list of tests - """ - listoftests = [] - testlistfiles = [] - if(machine is not None): - thismach=machine - if(compiler is not None): - thiscompiler = compiler - - if(xml_testlist is not None): - expect(os.path.isfile(xml_testlist), "Testlist not found or not readable "+xml_testlist) - testlistfiles.append(xml_testlist) - else: - files = Files() - comps = files.get_components("TESTS_SPEC_FILE") - for comp in comps: - test_spec_file = files.get_value("TESTS_SPEC_FILE", {"component":comp}) - if(os.path.isfile(test_spec_file)): - testlistfiles.append(test_spec_file) - - for testlistfile in testlistfiles: - thistestlistfile = Testlist(testlistfile) - logger.debug("Testlist file is "+testlistfile) - logger.debug("xml_machine {} xml_category {} xml_compiler {}".format(xml_machine, xml_category, xml_compiler)) - newtests = thistestlistfile.get_tests(xml_machine, xml_category, xml_compiler) - for test in newtests: - if(machine is None): - thismach = test["machine"] - if(compiler is None): - thiscompiler = test["compiler"] - test["name"] = CIME.utils.get_full_test_name(test["testname"], grid=test["grid"], compset=test["compset"], - machine=thismach, compiler=thiscompiler, - testmod=None if "testmods" not in test else test["testmods"]) - if driver: - # override default or specified driver - founddriver = False - for specdriver in ("Vnuopc","Vmct","Vmoab"): - if specdriver in test["name"]: - test["name"] = test["name"].replace(specdriver,"V{}".format(driver)) - founddriver = True - if not founddriver: - name = test["name"] - index = name.find('.') - test["name"] = name[:index] + "_V{}".format(driver) + name[index:] - - - logger.debug("Adding test {} with compiler {}".format(test["name"], test["compiler"])) - listoftests += newtests - logger.debug("Found {:d} tests".format(len(listoftests))) - - return listoftests - -def test_to_string(test, category_field_width=0, test_field_width=0, show_options=False): - """Given a test dictionary, return a string representation suitable for printing - - Args: - test (dict): dictionary for a single test - e.g., one element from the - list returned by get_tests_from_xml - category_field_width (int): minimum amount of space to use for printing the test category - test_field_width (int): minimum amount of space to use for printing the test category - show_options (bool): if True, print test options, too (note that the 'comment' - option is always printed, if present) - - Basic functionality: - >>> mytest = {'name': 'SMS.f19_g16.A.cheyenne_intel', 'category': 'prealpha', 'options': {}} - >>> test_to_string(mytest, 10) - 'prealpha : SMS.f19_g16.A.cheyenne_intel' - - Printing comments: - >>> mytest = {'name': 'SMS.f19_g16.A.cheyenne_intel', 'category': 'prealpha', 'options': {'comment': 'my remarks'}} - >>> test_to_string(mytest, 10) - 'prealpha : SMS.f19_g16.A.cheyenne_intel # my remarks' - - Printing other options, too: - >>> mytest = {'name': 'SMS.f19_g16.A.cheyenne_intel', 'category': 'prealpha', 'options': {'comment': 'my remarks', 'wallclock': '0:20', 'memleak_tolerance': 0.2}} - >>> test_to_string(mytest, 10, show_options=True) - 'prealpha : SMS.f19_g16.A.cheyenne_intel # my remarks # memleak_tolerance: 0.2 # wallclock: 0:20' - """ - - mystr = "%-*s: %-*s"%(category_field_width, test['category'], test_field_width, test['name']) - if 'options' in test: - myopts = test['options'].copy() - comment = myopts.pop('comment', None) - if comment: - mystr += " # {}".format(comment) - if show_options: - for one_opt in sorted(myopts): - mystr += " # {}: {}".format(one_opt, myopts[one_opt]) - - return mystr diff --git a/scripts/lib/CIME/tests/README b/scripts/lib/CIME/tests/README deleted file mode 100644 index 0af6c386d82..00000000000 --- a/scripts/lib/CIME/tests/README +++ /dev/null @@ -1,6 +0,0 @@ -The directory structure of this tests directory mirrors the directory structure -of the parent (production code) directory. - -So, for example, unit tests for CIME/foo.py should live in -CIME/tests/test_foo.py, and unit tests for CIME/SystemTests/bar.py should live -in CIME/tests/SystemTests/test_bar.py. diff --git a/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py b/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py deleted file mode 100644 index d9757bb3706..00000000000 --- a/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +++ /dev/null @@ -1,562 +0,0 @@ -#!/usr/bin/env python - -""" -This module contains unit tests of the core logic in SystemTestsCompareTwo. -""" - -# Ignore privacy concerns for unit tests, so that unit tests can access -# protected members of the system under test -# -# pylint:disable=protected-access - -import unittest -from collections import namedtuple -import os -import shutil -import tempfile - -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo -import CIME.test_status as test_status -from CIME.tests.case_fake import CaseFake - -# ======================================================================== -# Structure for storing information about calls made to methods -# ======================================================================== - -# You can create a Call object to record a single call made to a method: -# -# Call(method, arguments) -# method (str): name of method -# arguments (dict): dictionary mapping argument names to values -# -# Example: -# If you want to record a call to foo(bar = 1, baz = 2): -# somecall = Call(method = 'foo', arguments = {'bar': 1, 'baz': 2}) -# Or simply: -# somecall = Call('foo', {'bar': 1, 'baz': 2}) -Call = namedtuple('Call', ['method', 'arguments']) - -# ======================================================================== -# Names of methods for which we want to record calls -# ======================================================================== - -# We use constants for these method names because, in some cases, a typo in a -# hard-coded string could cause a test to always pass, which would be a Bad -# Thing. -# -# For now the names of the constants match the strings they equate to, which -# match the actual method names. But it's fine if this doesn't remain the case -# moving forward (which is another reason to use constants rather than -# hard-coded strings in the tests). - -METHOD_case_one_custom_prerun_action = "_case_one_custom_prerun_action" -METHOD_case_one_custom_postrun_action = "_case_one_custom_postrun_action" -METHOD_case_two_custom_prerun_action = "_case_two_custom_prerun_action" -METHOD_case_two_custom_postrun_action = "_case_two_custom_postrun_action" -METHOD_link_to_case2_output = "_link_to_case2_output" -METHOD_run_indv = "_run_indv" - -# ======================================================================== -# Fake version of SystemTestsCompareTwo that overrides some functionality for -# the sake of unit testing -# ======================================================================== - -# A SystemTestsCompareTwoFake object can be controlled to fail at a given -# point. See the documentation in its __init__ method for details. -# -# It logs what stubbed-out methods have been called in its log attribute; this -# is a list of Call objects (see above for their definition). - -class SystemTestsCompareTwoFake(SystemTestsCompareTwo): - def __init__(self, - case1, - run_one_suffix = 'base', - run_two_suffix = 'test', - separate_builds = False, - multisubmit = False, - case2setup_raises_exception = False, - run_one_should_pass = True, - run_two_should_pass = True, - compare_should_pass = True): - """ - Initialize a SystemTestsCompareTwoFake object - - The core test phases prior to RUN_PHASE are set to TEST_PASS_STATUS; - RUN_PHASE is left unset (as is any later phase) - - Args: - case1 (CaseFake): existing case - run_one_suffix (str, optional): Suffix used for first run. Defaults - to 'base'. Currently MUST be 'base'. - run_two_suffix (str, optional): Suffix used for the second run. Defaults to 'test'. - separate_builds (bool, optional): Passed to SystemTestsCompareTwo.__init__ - multisubmit (bool, optional): Passed to SystemTestsCompareTwo.__init__ - case2setup_raises_exception (bool, optional): If True, then the call - to _case_two_setup will raise an exception. Default is False. - run_one_should_pass (bool, optional): Whether the run_indv method should - pass for the first run. Default is True, meaning it will pass. - run_two_should_pass (bool, optional): Whether the run_indv method should - pass for the second run. Default is True, meaning it will pass. - compare_should_pass (bool, optional): Whether the comparison between the two - cases should pass. Default is True, meaning it will pass. - """ - - self._case2setup_raises_exception = case2setup_raises_exception - - # NOTE(wjs, 2016-08-03) Currently, due to limitations in the test - # infrastructure, run_one_suffix MUST be 'base'. However, I'm keeping it - # as an explicit argument to the constructor so that it's easy to relax - # this requirement later: To relax this assumption, remove the following - # assertion and add run_one_suffix as an argument to - # SystemTestsCompareTwo.__init__ - assert(run_one_suffix == 'base') - - SystemTestsCompareTwo.__init__( - self, - case1, - separate_builds = separate_builds, - run_two_suffix = run_two_suffix, - multisubmit = multisubmit) - - # Need to tell test status that all phases prior to the run phase have - # passed, since this is checked in the run call (at least for the build - # phase status) - with self._test_status: - for phase in test_status.CORE_PHASES: - if phase == test_status.RUN_PHASE: - break - self._test_status.set_status(phase, test_status.TEST_PASS_STATUS) - - self.run_pass_caseroot = [] - if run_one_should_pass: - self.run_pass_caseroot.append(self._case1.get_value('CASEROOT')) - if run_two_should_pass: - self.run_pass_caseroot.append(self._case2.get_value('CASEROOT')) - - self.compare_should_pass = compare_should_pass - - self.log = [] - - # ------------------------------------------------------------------------ - # Stubs of methods called by SystemTestsCommon.__init__ that interact with - # the system or case object in ways we want to avoid here - # ------------------------------------------------------------------------ - - def _init_environment(self, caseroot): - pass - - def _init_locked_files(self, caseroot, expected): - pass - - def _init_case_setup(self): - pass - - # ------------------------------------------------------------------------ - # Fake implementations of methods that are typically provided by - # SystemTestsCommon - # ------------------------------------------------------------------------ - - def run_indv(self, suffix="base", st_archive=False): - """ - This fake implementation appends to the log and raises an exception if - it's supposed to - - Note that the Call object appended to the log has the current CASE name - in addition to the method arguments. (This is mainly to ensure that the - proper suffix is used for the proper case, but this extra check can be - removed if it's a maintenance problem.) - """ - caseroot = self._case.get_value('CASEROOT') - self.log.append(Call(METHOD_run_indv, - {'suffix': suffix, 'CASEROOT': caseroot})) - - # Determine whether we should raise an exception - # - # It's important that this check be based on some attribute of the - # self._case object, to ensure that the right case has been activated - # for this call to run_indv (e.g., to catch if we forgot to activate - # case2 before the second call to run_indv). - if caseroot not in self.run_pass_caseroot: - raise RuntimeError('caseroot not in run_pass_caseroot') - - def _do_compare_test(self, suffix1, suffix2, ignore_fieldlist_diffs=False): - """ - This fake implementation allows controlling whether compare_test - passes or fails - """ - return (self.compare_should_pass, "no comment") - - def _check_for_memleak(self): - pass - - def _st_archive_case_test(self): - pass - - # ------------------------------------------------------------------------ - # Fake implementations of methods that are typically provided by - # SystemTestsCompareTwo - # - # Since we're overriding these, their functionality is untested here! - # (Though note that _link_to_case2_output is tested elsewhere.) - # ------------------------------------------------------------------------ - - def _case_from_existing_caseroot(self, caseroot): - """ - Returns a CaseFake object instead of a Case object - """ - return CaseFake(caseroot, create_case_root=False) - - def _link_to_case2_output(self): - self.log.append(Call(METHOD_link_to_case2_output, {})) - - # ------------------------------------------------------------------------ - # Fake implementations of methods that are typically provided by the - # individual test - # - # The values set here are asserted against in some unit tests - # ------------------------------------------------------------------------ - - def _common_setup(self): - self._case.set_value('var_set_in_common_setup', 'common_val') - - def _case_one_setup(self): - self._case.set_value('var_set_in_setup', 'case1val') - - def _case_two_setup(self): - self._case.set_value('var_set_in_setup', 'case2val') - if self._case2setup_raises_exception: - raise RuntimeError - - def _case_one_custom_prerun_action(self): - self.log.append(Call(METHOD_case_one_custom_prerun_action, {})) - - def _case_one_custom_postrun_action(self): - self.log.append(Call(METHOD_case_one_custom_postrun_action, {})) - - def _case_two_custom_prerun_action(self): - self.log.append(Call(METHOD_case_two_custom_prerun_action, {})) - - def _case_two_custom_postrun_action(self): - self.log.append(Call(METHOD_case_two_custom_postrun_action, {})) - -# ======================================================================== -# Test class itself -# ======================================================================== - -class TestSystemTestsCompareTwo(unittest.TestCase): - - def setUp(self): - self.original_wd = os.getcwd() - # create a sandbox in which case directories can be created - self.tempdir = tempfile.mkdtemp() - - def tearDown(self): - # Some tests trigger a chdir call in the SUT; make sure we return to the - # original directory at the end of the test - os.chdir(self.original_wd) - - shutil.rmtree(self.tempdir, ignore_errors=True) - - def get_caseroots(self, casename='mytest'): - """ - Returns a tuple (case1root, case2root) - """ - case1root = os.path.join(self.tempdir, casename) - case2root = os.path.join(case1root, 'case2', casename) - return case1root, case2root - - def get_compare_phase_name(self, mytest): - """ - Returns a string giving the compare phase name for this test - """ - run_one_suffix = mytest._run_one_suffix - run_two_suffix = mytest._run_two_suffix - compare_phase_name = "{}_{}_{}".format(test_status.COMPARE_PHASE, - run_one_suffix, - run_two_suffix) - return compare_phase_name - - def test_setup(self): - # Ensure that test setup properly sets up case 1 and case 2 - - # Setup - case1root = os.path.join(self.tempdir, 'case1') - case1 = CaseFake(case1root) - case1.set_value('var_preset', 'preset_value') - - # Exercise - mytest = SystemTestsCompareTwoFake(case1) - - # Verify - # Make sure that pre-existing values in case1 are copied to case2 (via - # clone) - self.assertEqual('preset_value', - mytest._case2.get_value('var_preset')) - - # Make sure that _common_setup is called for both - self.assertEqual('common_val', - mytest._case1.get_value('var_set_in_common_setup')) - self.assertEqual('common_val', - mytest._case2.get_value('var_set_in_common_setup')) - - # Make sure that _case_one_setup and _case_two_setup are called - # appropriately - self.assertEqual('case1val', - mytest._case1.get_value('var_set_in_setup')) - self.assertEqual('case2val', - mytest._case2.get_value('var_set_in_setup')) - - def test_setup_separate_builds_sharedlibroot(self): - # If we're using separate_builds, the two cases should still use - # the same sharedlibroot - - # Setup - case1root, _ = self.get_caseroots() - case1 = CaseFake(case1root) - case1.set_value("SHAREDLIBROOT", os.path.join(case1root, "sharedlibroot")) - - # Exercise - mytest = SystemTestsCompareTwoFake(case1, - separate_builds = True) - - # Verify - self.assertEqual(case1.get_value("SHAREDLIBROOT"), - mytest._case2.get_value("SHAREDLIBROOT")) - - def test_setup_case2_exists(self): - # If case2 already exists, then setup code should not be called - - # Setup - case1root = os.path.join(self.tempdir, 'case1') - case1 = CaseFake(case1root) - os.makedirs(os.path.join(case1root, 'case2','case1')) - - # Exercise - mytest = SystemTestsCompareTwoFake(case1, - run_two_suffix = 'test') - - # Verify: - - # Make sure that case2 object is set (i.e., that it doesn't remain None) - self.assertEqual('case1', mytest._case2.get_value('CASE')) - - # Variables set in various setup methods should not be set - # (In the real world - i.e., outside of this unit testing fakery - these - # values would be set when the Case objects are created.) - self.assertIsNone(mytest._case1.get_value('var_set_in_common_setup')) - self.assertIsNone(mytest._case2.get_value('var_set_in_common_setup')) - self.assertIsNone(mytest._case1.get_value('var_set_in_setup')) - self.assertIsNone(mytest._case2.get_value('var_set_in_setup')) - - def test_setup_error(self): - # If there is an error in setup, an exception should be raised and the - # case2 directory should be removed - - # Setup - case1root = os.path.join(self.tempdir, 'case1') - case1 = CaseFake(case1root) - - # Exercise - with self.assertRaises(Exception): - SystemTestsCompareTwoFake(case1, - run_two_suffix = 'test', - case2setup_raises_exception = True) - - # Verify - self.assertFalse(os.path.exists(os.path.join(case1root, 'case1.test'))) - - def test_run_phase_passes(self): - # Make sure the run phase behaves properly when all runs succeed. - - # Setup - case1root = os.path.join(self.tempdir, 'case1') - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1) - - # Exercise - mytest.run() - - # Verify - self.assertEqual(test_status.TEST_PASS_STATUS, - mytest._test_status.get_status(test_status.RUN_PHASE)) - - def test_run_phase_internal_calls(self): - # Make sure that the correct calls are made to methods stubbed out by - # SystemTestsCompareTwoFake (when runs succeed) - # - # The point of this is: A number of methods called from the run_phase - # method are stubbed out in the Fake test implementation, because their - # actions are awkward in these unit tests. But we still want to make - # sure that those methods actually got called correctly. - - # Setup - run_one_suffix = 'base' - run_two_suffix = 'run2' - case1root, case2root = self.get_caseroots() - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - run_one_suffix = run_one_suffix, - run_two_suffix = run_two_suffix) - - # Exercise - mytest.run() - - # Verify - expected_calls = [ - Call(METHOD_case_one_custom_prerun_action, {}), - Call(METHOD_run_indv, - {'suffix': run_one_suffix, 'CASEROOT': case1root}), - Call(METHOD_case_one_custom_postrun_action, {}), - Call(METHOD_case_two_custom_prerun_action, {}), - Call(METHOD_run_indv, - {'suffix': run_two_suffix, 'CASEROOT': case2root}), - Call(METHOD_case_two_custom_postrun_action, {}), - Call(METHOD_link_to_case2_output, {}) - ] - self.assertEqual(expected_calls, mytest.log) - - def test_run_phase_internal_calls_multisubmit_phase1(self): - # Make sure that the correct calls are made to methods stubbed out by - # SystemTestsCompareTwoFake (when runs succeed), when we have a - # multi-submit test, in the first phase - - # Setup - run_one_suffix = 'base' - run_two_suffix = 'run2' - case1root, _ = self.get_caseroots() - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake( - case1 = case1, - run_one_suffix = run_one_suffix, - run_two_suffix = run_two_suffix, - multisubmit = True) - # RESUBMIT=1 signals first phase - case1.set_value("RESUBMIT", 1) - - # Exercise - mytest.run() - - # Verify - expected_calls = [ - Call(METHOD_case_one_custom_prerun_action, {}), - Call(METHOD_run_indv, - {'suffix': run_one_suffix, 'CASEROOT': case1root}), - Call(METHOD_case_one_custom_postrun_action, {}), - ] - self.assertEqual(expected_calls, mytest.log) - - # Also verify that comparison is NOT called: - compare_phase_name = self.get_compare_phase_name(mytest) - self.assertEqual(test_status.TEST_PEND_STATUS, mytest._test_status.get_status(compare_phase_name)) - - def test_run_phase_internal_calls_multisubmit_phase2(self): - # Make sure that the correct calls are made to methods stubbed out by - # SystemTestsCompareTwoFake (when runs succeed), when we have a - # multi-submit test, in the second phase - - # Setup - run_one_suffix = 'base' - run_two_suffix = 'run2' - case1root, case2root = self.get_caseroots() - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake( - case1 = case1, - run_one_suffix = run_one_suffix, - run_two_suffix = run_two_suffix, - multisubmit = True, - compare_should_pass = True) - # RESUBMIT=0 signals second phase - case1.set_value("RESUBMIT", 0) - - # Exercise - mytest.run() - - # Verify - expected_calls = [ - Call(METHOD_case_two_custom_prerun_action, {}), - Call(METHOD_run_indv, - {'suffix': run_two_suffix, 'CASEROOT': case2root}), - Call(METHOD_case_two_custom_postrun_action, {}), - Call(METHOD_link_to_case2_output, {}) - ] - self.assertEqual(expected_calls, mytest.log) - - # Also verify that comparison is called: - compare_phase_name = self.get_compare_phase_name(mytest) - self.assertEqual(test_status.TEST_PASS_STATUS, - mytest._test_status.get_status(compare_phase_name)) - - def test_run1_fails(self): - # Make sure that a failure in run1 is reported correctly - - # Setup - case1root = os.path.join(self.tempdir, 'case1') - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - run_one_should_pass = False) - - # Exercise - try: - mytest.run() - except Exception: - pass - - # Verify - self.assertEqual(test_status.TEST_FAIL_STATUS, - mytest._test_status.get_status(test_status.RUN_PHASE)) - - def test_run2_fails(self): - # Make sure that a failure in run2 is reported correctly - - # Setup - case1root = os.path.join(self.tempdir, 'case1') - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - run_two_should_pass = False) - - # Exercise - try: - mytest.run() - except Exception: - pass - - # Verify - self.assertEqual(test_status.TEST_FAIL_STATUS, - mytest._test_status.get_status(test_status.RUN_PHASE)) - - def test_compare_passes(self): - # Make sure that a pass in the comparison is reported correctly - - # Setup - case1root = os.path.join(self.tempdir, 'case1') - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - compare_should_pass = True) - - # Exercise - mytest.run() - - # Verify - compare_phase_name = self.get_compare_phase_name(mytest) - self.assertEqual(test_status.TEST_PASS_STATUS, - mytest._test_status.get_status(compare_phase_name)) - - def test_compare_fails(self): - # Make sure that a failure in the comparison is reported correctly - - # Setup - case1root = os.path.join(self.tempdir, 'case1') - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - compare_should_pass = False) - - # Exercise - mytest.run() - - # Verify - compare_phase_name = self.get_compare_phase_name(mytest) - self.assertEqual(test_status.TEST_FAIL_STATUS, - mytest._test_status.get_status(compare_phase_name)) - -if __name__ == "__main__": - unittest.main(verbosity=2, catchbreak=True) diff --git a/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py b/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py deleted file mode 100644 index d0ea963cda1..00000000000 --- a/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python - -""" -This module contains unit tests of the method -SystemTestsCompareTwo._link_to_case2_output -""" - -# Ignore privacy concerns for unit tests, so that unit tests can access -# protected members of the system under test -# -# pylint:disable=protected-access - -import unittest -import os -import shutil -import tempfile -from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo -from CIME.tests.case_fake import CaseFake - -# ======================================================================== -# Fake version of SystemTestsCompareTwo that overrides some functionality for -# the sake of unit testing -# ======================================================================== - -class SystemTestsCompareTwoFake(SystemTestsCompareTwo): - def __init__(self, - case1, - run_two_suffix = 'test'): - - SystemTestsCompareTwo.__init__( - self, - case1, - separate_builds = False, - run_two_suffix = run_two_suffix) - - # ------------------------------------------------------------------------ - # Stubs of methods called by SystemTestsCommon.__init__ that interact with - # the system or case object in ways we want to avoid here - # ------------------------------------------------------------------------ - - def _init_environment(self, caseroot): - pass - - def _init_locked_files(self, caseroot, expected): - pass - - def _init_case_setup(self): - pass - - # ------------------------------------------------------------------------ - # Stubs of methods that are typically provided by the individual test - # ------------------------------------------------------------------------ - - def _case_one_setup(self): - pass - - def _case_two_setup(self): - pass - -# ======================================================================== -# Test class itself -# ======================================================================== - -class TestLinkToCase2Output(unittest.TestCase): - - # ======================================================================== - # Test helper functions - # ======================================================================== - - def setUp(self): - self.original_wd = os.getcwd() - # Create a sandbox in which case directories can be created - self.tempdir = tempfile.mkdtemp() - - def tearDown(self): - # Some tests trigger a chdir call in the SUT; make sure we return to the - # original directory at the end of the test - os.chdir(self.original_wd) - - shutil.rmtree(self.tempdir, ignore_errors=True) - - def setup_test_and_directories(self, casename1, run2_suffix): - """ - Returns test object - """ - - case1root = os.path.join(self.tempdir, casename1) - case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, run_two_suffix = run2_suffix) - mytest._case1.make_rundir() #pylint: disable=maybe-no-member - mytest._case2.make_rundir() #pylint: disable=maybe-no-member - - return mytest - - def create_file_in_rundir2(self, mytest, core_filename, run2_suffix): - """ - Creates a file in rundir2 named CASE2.CORE_FILENAME.nc.RUN2_SUFFIX - (where CASE2 is the casename of case2) - - Returns full path to the file created - """ - filename = '{}.{}.nc.{}'.format(mytest._case2.get_value('CASE'), - core_filename, - run2_suffix) - filepath = os.path.join(mytest._case2.get_value('RUNDIR'), filename) - open(filepath, 'w').close() - return filepath - - # ======================================================================== - # Begin actual tests - # ======================================================================== - - def test_basic(self): - # Setup - casename1 = 'mytest' - run2_suffix = 'run2' - - mytest = self.setup_test_and_directories(casename1, run2_suffix) - filepath1 = self.create_file_in_rundir2(mytest, 'clm2.h0', run2_suffix) - filepath2 = self.create_file_in_rundir2(mytest, 'clm2.h1', run2_suffix) - - # Exercise - mytest._link_to_case2_output() - - # Verify - expected_link_filename1 = '{}.clm2.h0.nc.{}'.format(casename1, run2_suffix) - expected_link_filepath1 = os.path.join(mytest._case1.get_value('RUNDIR'), - expected_link_filename1) - self.assertTrue(os.path.islink(expected_link_filepath1)) - self.assertEqual(filepath1, os.readlink(expected_link_filepath1)) - - expected_link_filename2 = '{}.clm2.h1.nc.{}'.format(casename1, run2_suffix) - expected_link_filepath2 = os.path.join(mytest._case1.get_value('RUNDIR'), - expected_link_filename2) - self.assertTrue(os.path.islink(expected_link_filepath2)) - self.assertEqual(filepath2, os.readlink(expected_link_filepath2)) - - def test_existing_link(self): - # Setup - casename1 = 'mytest' - run2_suffix = 'run2' - - mytest = self.setup_test_and_directories(casename1, run2_suffix) - self.create_file_in_rundir2(mytest, 'clm2.h0', run2_suffix) - - # Create initial link via a call to _link_to_case2_output - mytest._link_to_case2_output() - - # Exercise - # See what happens when we try to recreate that link - mytest._link_to_case2_output() - - # (No verification: Test passes if no exception was raised) diff --git a/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py b/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py deleted file mode 100644 index d504b82fd03..00000000000 --- a/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python - -import unittest -import os -import shutil -import tempfile -from CIME.SystemTests.test_utils import user_nl_utils -import six - -class TestUserNLCopier(unittest.TestCase): - - # ======================================================================== - # Test helper functions - # ======================================================================== - - def setUp(self): - self._caseroot = tempfile.mkdtemp() - - def tearDown(self): - shutil.rmtree(self._caseroot, ignore_errors=True) - - def write_user_nl_file(self, component, contents, suffix=''): - """Write contents to a user_nl file in the case directory. Returns the - basename (i.e., not the full path) of the file that is created. - - For a component foo, with the default suffix of '', the file name will - be user_nl_foo - - If the suffix is '_0001', the file name will be user_nl_foo_0001 - """ - - filename = 'user_nl_' + component + suffix - - with open(os.path.join(self._caseroot, filename), 'w') as user_nl_file: - user_nl_file.write(contents) - - return filename - - def assertFileContentsEqual(self, expected, filepath, msg=None): - """Asserts that the contents of the file given by 'filepath' are equal to - the string given by 'expected'. 'msg' gives an optional message to be - printed if the assertion fails.""" - - with open(filepath, 'r') as myfile: - contents = myfile.read() - - self.assertEqual(expected, contents, msg=msg) - - # ======================================================================== - # Begin actual tests - # ======================================================================== - - def test_append(self): - # Define some variables - component = 'foo' - # deliberately exclude new line from file contents, to make sure that's - # handled correctly - orig_contents = 'bar = 42' - contents_to_append = 'baz = 101' - - # Setup - filename = self.write_user_nl_file(component, orig_contents) - - # Exercise - user_nl_utils.append_to_user_nl_files(caseroot = self._caseroot, - component = component, - contents = contents_to_append) - - # Verify - expected_contents = orig_contents + '\n' + contents_to_append + '\n' - self.assertFileContentsEqual(expected_contents, - os.path.join(self._caseroot, filename)) - - def test_append_multiple_files(self): - # Simulates a multi-instance test - component = 'foo' - orig_contents1 = 'bar = 42' - orig_contents2 = 'bar = 17' - contents_to_append = 'baz = 101' - - # Setup - filename1 = self.write_user_nl_file(component, orig_contents1, suffix='_0001') - filename2 = self.write_user_nl_file(component, orig_contents2, suffix='_0002') - - # Exercise - user_nl_utils.append_to_user_nl_files(caseroot = self._caseroot, - component = component, - contents = contents_to_append) - - # Verify - expected_contents1 = orig_contents1 + '\n' + contents_to_append + '\n' - expected_contents2 = orig_contents2 + '\n' + contents_to_append + '\n' - self.assertFileContentsEqual(expected_contents1, - os.path.join(self._caseroot, filename1)) - self.assertFileContentsEqual(expected_contents2, - os.path.join(self._caseroot, filename2)) - - - def test_append_without_files_raises_exception(self): - # This test verifies that you get an exception if you call - # append_to_user_nl_files when there are no user_nl files of interest - - # Define some variables - component_exists = 'foo' - component_for_append = 'bar' - - # Setup - # Create file in caseroot for component_exists, but not for component_for_append - self.write_user_nl_file(component_exists, 'irrelevant contents') - - # Exercise & verify - six.assertRaisesRegex(self, RuntimeError, "No user_nl files found", - user_nl_utils.append_to_user_nl_files, - caseroot = self._caseroot, - component = component_for_append, - contents = 'irrelevant contents to append') diff --git a/scripts/lib/CIME/tests/XML/test_expected_fails_file.py b/scripts/lib/CIME/tests/XML/test_expected_fails_file.py deleted file mode 100644 index 0c909d9ddfe..00000000000 --- a/scripts/lib/CIME/tests/XML/test_expected_fails_file.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python - -import unittest -import os -import shutil -import tempfile -import six -from CIME.XML.expected_fails_file import ExpectedFailsFile -from CIME.utils import CIMEError -from CIME.expected_fails import ExpectedFails - -class TestExpectedFailsFile(unittest.TestCase): - - def setUp(self): - self._workdir = tempfile.mkdtemp() - self._xml_filepath = os.path.join(self._workdir, "expected_fails.xml") - - def tearDown(self): - shutil.rmtree(self._workdir) - - def test_basic(self): - """Basic test of the parsing of an expected fails file""" - contents = """ - - - - FAIL - #404 - - - PEND - #404 - Because of the RUN failure, this phase is listed as PEND - - - - - FAIL - ESMCI/cime#2917 - - - FAIL - ESMCI/cime#2917 - - - -""" - with open(self._xml_filepath, 'w') as xml_file: - xml_file.write(contents) - expected_fails_file = ExpectedFailsFile(self._xml_filepath) - xfails = expected_fails_file.get_expected_fails() - - expected_test1 = ExpectedFails() - expected_test1.add_failure('RUN', 'FAIL') - expected_test1.add_failure('COMPARE_base_rest', 'PEND') - expected_test2 = ExpectedFails() - expected_test2.add_failure('GENERATE', 'FAIL') - expected_test2.add_failure('BASELINE', 'FAIL') - expected = {'my.test.1': expected_test1, - 'my.test.2': expected_test2} - - self.assertEqual(xfails, expected) - - def test_same_test_appears_twice(self): - """If the same test appears twice, its information should be appended. - - This is not the typical, expected layout of the file, but it should be handled - correctly in case the file is written this way. - """ - contents = """ - - - - FAIL - #404 - - - - - PEND - #404 - Because of the RUN failure, this phase is listed as PEND - - - -""" - with open(self._xml_filepath, 'w') as xml_file: - xml_file.write(contents) - expected_fails_file = ExpectedFailsFile(self._xml_filepath) - xfails = expected_fails_file.get_expected_fails() - - expected_test1 = ExpectedFails() - expected_test1.add_failure('RUN', 'FAIL') - expected_test1.add_failure('COMPARE_base_rest', 'PEND') - expected = {'my.test.1': expected_test1} - - self.assertEqual(xfails, expected) - - def test_invalid_file(self): - """Given an invalid file, an exception should be raised in schema validation""" - - # This file is missing a element in the block. - # - # It's important to have the expectedFails version number be greater than 1, - # because schema validation isn't done in cime for files with a version of 1. - contents = """ - - - - - - -""" - with open(self._xml_filepath, 'w') as xml_file: - xml_file.write(contents) - - with six.assertRaisesRegex(self, CIMEError, "Schemas validity error"): - _ = ExpectedFailsFile(self._xml_filepath) - -if __name__ == '__main__': - unittest.main() diff --git a/scripts/lib/CIME/tests/custom_assertions_test_status.py b/scripts/lib/CIME/tests/custom_assertions_test_status.py deleted file mode 100644 index 17c4d5bf737..00000000000 --- a/scripts/lib/CIME/tests/custom_assertions_test_status.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -This module contains a class that extends unittest.TestCase, adding custom assertions that -can be used when testing TestStatus. -""" - -from CIME.XML.standard_module_setup import * - -import unittest -import re -import six -import six_additions -from CIME import test_status - -class CustomAssertionsTestStatus(unittest.TestCase): - - def assert_status_of_phase(self, output, status, phase, test_name, xfail=None): - """Asserts that 'output' contains a line showing the given - status for the given phase for the given test_name. - - 'xfail' should have one of the following values: - - None (the default): assertion passes regardless of whether there is an - EXPECTED/UNEXPECTED string - - 'no': The line should end with the phase, with no additional text after that - - 'expected': After the phase, the line should contain '(EXPECTED FAILURE)' - - 'unexpected': After the phase, the line should contain '(UNEXPECTED' - """ - expected = (r'^ *{} +'.format(re.escape(status)) + - self._test_name_and_phase_regex(test_name, phase)) - - if xfail == 'no': - # There should be no other text after the testname and phase regex - expected += r' *$' - elif xfail == 'expected': - expected += r' *{}'.format(re.escape(test_status.TEST_EXPECTED_FAILURE_COMMENT)) - elif xfail == 'unexpected': - expected += r' *{}'.format(re.escape(test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START)) - else: - expect(xfail is None, "Unhandled value of xfail argument") - - expected_re = re.compile(expected, flags=re.MULTILINE) - six.assertRegex(self, output, expected_re) - - def assert_phase_absent(self, output, phase, test_name): - """Asserts that 'output' does not contain a status line for the - given phase and test_name""" - expected = re.compile(r'^.* +' + - self._test_name_and_phase_regex(test_name, phase), - flags=re.MULTILINE) - - six_additions.assertNotRegex(self, output, expected) - - def assert_core_phases(self, output, test_name, fails): - """Asserts that 'output' contains a line for each of the core test - phases for the given test_name. All results should be PASS - except those given by the fails list, which should be FAILS. - """ - for phase in test_status.CORE_PHASES: - if phase in fails: - status = test_status.TEST_FAIL_STATUS - else: - status = test_status.TEST_PASS_STATUS - self.assert_status_of_phase(output=output, - status=status, - phase=phase, - test_name=test_name) - - def assert_num_expected_unexpected_fails(self, output, num_expected, num_unexpected): - """Asserts that the number of occurrences of expected and unexpected fails in - 'output' matches the given numbers""" - self.assertEqual(output.count(test_status.TEST_EXPECTED_FAILURE_COMMENT), num_expected) - self.assertEqual(output.count(test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START), num_unexpected) - - @staticmethod - def _test_name_and_phase_regex(test_name, phase): - """Returns a regex matching the portion of a TestStatus line - containing the test name and phase""" - # The main purpose of extracting this into a shared method is: - # assert_phase_absent could wrongly pass if the format of the - # TestStatus output changed without that method's regex - # changing. By making its regex shared as much as possible with - # the regex in assert_status_of_phase, we decrease the chances - # of these false passes. - return r'{} +{}'.format(re.escape(test_name), re.escape(phase)) diff --git a/scripts/lib/CIME/tests/test_case_fake.py b/scripts/lib/CIME/tests/test_case_fake.py deleted file mode 100644 index e2997cdc9f3..00000000000 --- a/scripts/lib/CIME/tests/test_case_fake.py +++ /dev/null @@ -1,38 +0,0 @@ -#/usr/bin/env python - -""" -This module contains unit tests of CaseFake -""" - -import unittest -import tempfile -import os -import shutil - -from CIME.tests.case_fake import CaseFake - -class TestCaseFake(unittest.TestCase): - - def setUp(self): - self.tempdir = tempfile.mkdtemp() - - def tearDown(self): - shutil.rmtree(self.tempdir, ignore_errors=True) - - def test_create_clone(self): - # Setup - old_caseroot = os.path.join(self.tempdir, 'oldcase') - oldcase = CaseFake(old_caseroot) - oldcase.set_value('foo', 'bar') - - # Exercise - new_caseroot = os.path.join(self.tempdir, 'newcase') - clone = oldcase.create_clone(new_caseroot) - - # Verify - self.assertEqual('bar', clone.get_value('foo')) - self.assertEqual('newcase', clone.get_value('CASE')) - self.assertEqual('newcase', clone.get_value('CASEBASEID')) - self.assertEqual(new_caseroot, clone.get_value('CASEROOT')) - self.assertEqual(os.path.join(new_caseroot, 'run'), - clone.get_value('RUNDIR')) diff --git a/scripts/lib/CIME/tests/test_cs_status.py b/scripts/lib/CIME/tests/test_cs_status.py deleted file mode 100644 index 3bf38025261..00000000000 --- a/scripts/lib/CIME/tests/test_cs_status.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env python - -import unittest -import shutil -import os -import tempfile -import re -import six -import six_additions -from CIME.cs_status import cs_status -from CIME import test_status -from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus - -class TestCsStatus(CustomAssertionsTestStatus): - - # ------------------------------------------------------------------------ - # Test helper functions - # ------------------------------------------------------------------------ - - # An arbitrary phase we can use when we want to work with a non-core phase - _NON_CORE_PHASE = test_status.MEMLEAK_PHASE - - # Another arbitrary phase if we need two different non-core phases - _NON_CORE_PHASE2 = test_status.BASELINE_PHASE - - def setUp(self): - self._testroot = tempfile.mkdtemp() - self._output = six.StringIO() - - def tearDown(self): - self._output.close() - shutil.rmtree(self._testroot, ignore_errors=True) - - def create_test_dir(self, test_dir): - """Creates the given test directory under testroot. - - Returns the full path to the created test directory. - """ - fullpath = os.path.join(self._testroot, test_dir) - os.makedirs(fullpath) - return fullpath - - @staticmethod - def create_test_status_core_passes(test_dir_path, test_name): - """Creates a TestStatus file in the given path, with PASS status - for all core phases""" - with test_status.TestStatus(test_dir=test_dir_path, - test_name=test_name) as ts: - for phase in test_status.CORE_PHASES: - ts.set_status(phase, test_status.TEST_PASS_STATUS) - - def set_last_core_phase_to_fail(self, test_dir_path, test_name): - """Sets the last core phase to FAIL - - Returns the name of this phase""" - fail_phase = test_status.CORE_PHASES[-1] - self.set_phase_to_status(test_dir_path=test_dir_path, - test_name=test_name, - phase=fail_phase, - status=test_status.TEST_FAIL_STATUS) - return fail_phase - - @staticmethod - def set_phase_to_status(test_dir_path, test_name, phase, status): - """Sets the given phase to the given status for this test""" - with test_status.TestStatus(test_dir=test_dir_path, - test_name=test_name) as ts: - ts.set_status(phase, status) - - # ------------------------------------------------------------------------ - # Begin actual tests - # ------------------------------------------------------------------------ - - def test_single_test(self): - """cs_status for a single test should include some minimal expected output""" - test_name = 'my.test.name' - test_dir = 'my.test.name.testid' - test_dir_path = self.create_test_dir(test_dir) - self.create_test_status_core_passes(test_dir_path, test_name) - cs_status([os.path.join(test_dir_path, 'TestStatus')], - out=self._output) - self.assert_core_phases(self._output.getvalue(), test_name, fails=[]) - - def test_two_tests(self): - """cs_status for two tests (one with a FAIL) should include some minimal expected output""" - test_name1 = 'my.test.name1' - test_name2 = 'my.test.name2' - test_dir1 = test_name1 + '.testid' - test_dir2 = test_name2 + '.testid' - test_dir_path1 = self.create_test_dir(test_dir1) - test_dir_path2 = self.create_test_dir(test_dir2) - self.create_test_status_core_passes(test_dir_path1, test_name1) - self.create_test_status_core_passes(test_dir_path2, test_name2) - test2_fail_phase = self.set_last_core_phase_to_fail(test_dir_path2, test_name2) - cs_status([os.path.join(test_dir_path1, 'TestStatus'), - os.path.join(test_dir_path2, 'TestStatus')], - out=self._output) - self.assert_core_phases(self._output.getvalue(), test_name1, fails=[]) - self.assert_core_phases(self._output.getvalue(), test_name2, fails=[test2_fail_phase]) - - def test_fails_only(self): - """With fails_only flag, only fails and pends should appear in the output""" - test_name = 'my.test.name' - test_dir = 'my.test.name.testid' - test_dir_path = self.create_test_dir(test_dir) - self.create_test_status_core_passes(test_dir_path, test_name) - fail_phase = self.set_last_core_phase_to_fail(test_dir_path, test_name) - pend_phase = self._NON_CORE_PHASE - self.set_phase_to_status(test_dir_path, test_name, - phase=pend_phase, - status=test_status.TEST_PEND_STATUS) - cs_status([os.path.join(test_dir_path, 'TestStatus')], - fails_only=True, - out=self._output) - self.assert_status_of_phase(output=self._output.getvalue(), - status=test_status.TEST_FAIL_STATUS, - phase=fail_phase, - test_name=test_name) - self.assert_status_of_phase(output=self._output.getvalue(), - status=test_status.TEST_PEND_STATUS, - phase=pend_phase, - test_name=test_name) - for phase in test_status.CORE_PHASES: - if phase != fail_phase: - self.assert_phase_absent(output=self._output.getvalue(), - phase=phase, - test_name=test_name) - six_additions.assertNotRegex(self, self._output.getvalue(), r'Overall:') - - def test_count_fails(self): - """Test the count of fails with three tests - - For first phase of interest: First test FAILs, second PASSes, - third FAILs; count should be 2, and this phase should not appear - individually for each test. - - For second phase of interest: First test PASSes, second PASSes, - third FAILs; count should be 1, and this phase should not appear - individually for each test. - """ - # Note that this test does NOT cover: - # - combining count_fails_phase_list with fails_only: currently, - # this wouldn't cover any additional code/logic - # - ensuring that PENDs are also counted: currently, this - # wouldn't cover any additional code/logic - phase_of_interest1 = self._NON_CORE_PHASE - phase_of_interest2 = self._NON_CORE_PHASE2 - statuses1 = [test_status.TEST_FAIL_STATUS, - test_status.TEST_PASS_STATUS, - test_status.TEST_FAIL_STATUS] - statuses2 = [test_status.TEST_PASS_STATUS, - test_status.TEST_PASS_STATUS, - test_status.TEST_FAIL_STATUS] - test_paths = [] - test_names = [] - for testnum in range(3): - test_name = 'my.test.name' + str(testnum) - test_names.append(test_name) - test_dir = test_name + '.testid' - test_dir_path = self.create_test_dir(test_dir) - self.create_test_status_core_passes(test_dir_path, test_name) - self.set_phase_to_status(test_dir_path, test_name, - phase=phase_of_interest1, - status=statuses1[testnum]) - self.set_phase_to_status(test_dir_path, test_name, - phase=phase_of_interest2, - status=statuses2[testnum]) - test_paths.append(os.path.join(test_dir_path, 'TestStatus')) - - cs_status(test_paths, - count_fails_phase_list=[phase_of_interest1, phase_of_interest2], - out=self._output) - - for testnum in range(3): - self.assert_phase_absent(output=self._output.getvalue(), - phase=phase_of_interest1, - test_name=test_names[testnum]) - self.assert_phase_absent(output=self._output.getvalue(), - phase=phase_of_interest2, - test_name=test_names[testnum]) - count_regex1 = r'{} +non-passes: +2'.format(re.escape(phase_of_interest1)) - six.assertRegex(self, self._output.getvalue(), count_regex1) - count_regex2 = r'{} +non-passes: +1'.format(re.escape(phase_of_interest2)) - six.assertRegex(self, self._output.getvalue(), count_regex2) - - def test_expected_fails(self): - """With the expected_fails_file flag, expected failures should be flagged as such""" - test_name1 = 'my.test.name1' - test_name2 = 'my.test.name2' - test_dir1 = test_name1 + '.testid' - test_dir2 = test_name2 + '.testid' - test_dir_path1 = self.create_test_dir(test_dir1) - test_dir_path2 = self.create_test_dir(test_dir2) - self.create_test_status_core_passes(test_dir_path1, test_name1) - self.create_test_status_core_passes(test_dir_path2, test_name2) - test1_fail_phase = self.set_last_core_phase_to_fail(test_dir_path1, test_name1) - test2_fail_phase = self.set_last_core_phase_to_fail(test_dir_path2, test_name2) - - # One phase is labeled as an expected failure for test1, nothing for test2: - expected_fails_contents = """ - - - - {fail_status} - - - -""".format(test_name1=test_name1, - test1_fail_phase=test1_fail_phase, - fail_status=test_status.TEST_FAIL_STATUS) - expected_fails_filepath = os.path.join(self._testroot, 'ExpectedFails.xml') - with open(expected_fails_filepath, 'w') as expected_fails_file: - expected_fails_file.write(expected_fails_contents) - - cs_status([os.path.join(test_dir_path1, 'TestStatus'), - os.path.join(test_dir_path2, 'TestStatus')], - expected_fails_filepath=expected_fails_filepath, - out=self._output) - - # Both test1 and test2 should have a failure for one phase, but this should be - # marked as expected only for test1. - self.assert_core_phases(self._output.getvalue(), test_name1, fails=[test1_fail_phase]) - self.assert_status_of_phase(self._output.getvalue(), - test_status.TEST_FAIL_STATUS, - test1_fail_phase, - test_name1, - xfail='expected') - self.assert_core_phases(self._output.getvalue(), test_name2, fails=[test2_fail_phase]) - self.assert_status_of_phase(self._output.getvalue(), - test_status.TEST_FAIL_STATUS, - test2_fail_phase, - test_name2, - xfail='no') - # Make sure that no other phases are mistakenly labeled as expected failures: - self.assert_num_expected_unexpected_fails(self._output.getvalue(), - num_expected=1, - num_unexpected=0) - -if __name__ == '__main__': - unittest.main() diff --git a/scripts/lib/CIME/tests/test_custom_assertions_test_status.py b/scripts/lib/CIME/tests/test_custom_assertions_test_status.py deleted file mode 100644 index 5b2324bf7c0..00000000000 --- a/scripts/lib/CIME/tests/test_custom_assertions_test_status.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/env python - -""" -This module contains unit tests of CustomAssertionsTestStatus -""" - -import unittest -from CIME import test_status -from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus - -class TestCustomAssertions(CustomAssertionsTestStatus): - - _UNEXPECTED_COMMENT = test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START + ' blah)' - - @staticmethod - def output_line(status, test_name, phase, extra=''): - output = status + ' ' + test_name + ' ' + phase - if extra: - output += ' ' + extra - output += '\n' - return output - - def test_assertPhaseAbsent_passes(self): - """assert_phase_absent should pass when the phase is absent for - the given test_name""" - test_name1 = 'my.test.name1' - test_name2 = 'my.test.name2' - output = self.output_line('PASS', test_name1, 'PHASE1') - output += self.output_line('PASS', test_name2, 'PHASE2') - - self.assert_phase_absent(output, 'PHASE2', test_name1) - self.assert_phase_absent(output, 'PHASE1', test_name2) - - def test_assertPhaseAbsent_fails(self): - """assert_phase_absent should fail when the phase is present for - the given test_name""" - test_name = 'my.test.name' - output = self.output_line('PASS', test_name, 'PHASE1') - - with self.assertRaises(AssertionError): - self.assert_phase_absent(output, 'PHASE1', test_name) - - def test_assertCorePhases_passes(self): - """assert_core_phases passes when it should""" - output = '' - fails = [test_status.CORE_PHASES[1]] - test_name = 'my.test.name' - for phase in test_status.CORE_PHASES: - if phase in fails: - status = test_status.TEST_FAIL_STATUS - else: - status = test_status.TEST_PASS_STATUS - output = output + self.output_line(status, test_name, phase) - - self.assert_core_phases(output, test_name, fails) - - def test_assertCorePhases_missingPhase_fails(self): - """assert_core_phases fails if there is a missing phase""" - output = '' - test_name = 'my.test.name' - for phase in test_status.CORE_PHASES: - if phase != test_status.CORE_PHASES[1]: - output = output + self.output_line(test_status.TEST_PASS_STATUS, test_name, phase) - - with self.assertRaises(AssertionError): - self.assert_core_phases(output, test_name, fails=[]) - - def test_assertCorePhases_wrongStatus_fails(self): - """assert_core_phases fails if a phase has the wrong status""" - output ='' - test_name = 'my.test.name' - for phase in test_status.CORE_PHASES: - output = output + self.output_line(test_status.TEST_PASS_STATUS, test_name, phase) - - with self.assertRaises(AssertionError): - self.assert_core_phases(output, test_name, fails=[test_status.CORE_PHASES[1]]) - - def test_assertCorePhases_wrongName_fails(self): - """assert_core_phases fails if the test name is wrong""" - output ='' - test_name = 'my.test.name' - for phase in test_status.CORE_PHASES: - output = output + self.output_line(test_status.TEST_PASS_STATUS, test_name, phase) - - with self.assertRaises(AssertionError): - self.assert_core_phases(output, 'my.test', fails=[]) - - # Note: Basic functionality of assert_status_of_phase is covered sufficiently via - # tests of assert_core_phases. Below we just cover some other aspects that aren't - # already covered. - - def test_assertStatusOfPhase_withExtra_passes(self): - """Make sure assert_status_of_phase passes when there is some extra text at the - end of the line""" - test_name = 'my.test.name' - output = self.output_line(test_status.TEST_FAIL_STATUS, - test_name, - test_status.CORE_PHASES[0], - extra=test_status.TEST_EXPECTED_FAILURE_COMMENT) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - test_status.CORE_PHASES[0], - test_name) - - def test_assertStatusOfPhase_xfailNo_passes(self): - """assert_status_of_phase should pass when xfail='no' and there is no - EXPECTED/UNEXPECTED on the line""" - test_name = 'my.test.name' - output = self.output_line(test_status.TEST_FAIL_STATUS, - test_name, - test_status.CORE_PHASES[0]) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - test_status.CORE_PHASES[0], - test_name, - xfail='no') - # While we're at it, also test assert_num_expected_unexpected_fails - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=0) - - def test_assertStatusOfPhase_xfailNo_fails(self): - """assert_status_of_phase should fail when xfail='no' but the line contains the - EXPECTED comment""" - test_name = 'my.test.name' - output = self.output_line(test_status.TEST_FAIL_STATUS, - test_name, - test_status.CORE_PHASES[0], - extra=test_status.TEST_EXPECTED_FAILURE_COMMENT) - - with self.assertRaises(AssertionError): - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - test_status.CORE_PHASES[0], - test_name, - xfail='no') - # While we're at it, also test assert_num_expected_unexpected_fails - self.assert_num_expected_unexpected_fails(output, - num_expected=1, - num_unexpected=0) - - def test_assertStatusOfPhase_xfailExpected_passes(self): - """assert_status_of_phase should pass when xfail='expected' and the line contains - the EXPECTED comment""" - test_name = 'my.test.name' - output = self.output_line(test_status.TEST_FAIL_STATUS, - test_name, - test_status.CORE_PHASES[0], - extra=test_status.TEST_EXPECTED_FAILURE_COMMENT) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - test_status.CORE_PHASES[0], - test_name, - xfail='expected') - # While we're at it, also test assert_num_expected_unexpected_fails - self.assert_num_expected_unexpected_fails(output, - num_expected=1, - num_unexpected=0) - - def test_assertStatusOfPhase_xfailExpected_fails(self): - """assert_status_of_phase should fail when xfail='expected' but the line does NOT contain - the EXPECTED comment""" - test_name = 'my.test.name' - # Note that the line contains the UNEXPECTED comment, but not the EXPECTED comment - # (we assume that if the assertion correctly fails in this case, then it will also - # correctly handle the case where neither the EXPECTED nor UNEXPECTED comment is - # present). - output = self.output_line(test_status.TEST_FAIL_STATUS, - test_name, - test_status.CORE_PHASES[0], - extra=self._UNEXPECTED_COMMENT) - - with self.assertRaises(AssertionError): - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - test_status.CORE_PHASES[0], - test_name, - xfail='expected') - # While we're at it, also test assert_num_expected_unexpected_fails - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=1) - - def test_assertStatusOfPhase_xfailUnexpected_passes(self): - """assert_status_of_phase should pass when xfail='unexpected' and the line contains - the UNEXPECTED comment""" - test_name = 'my.test.name' - output = self.output_line(test_status.TEST_FAIL_STATUS, - test_name, - test_status.CORE_PHASES[0], - extra=self._UNEXPECTED_COMMENT) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - test_status.CORE_PHASES[0], - test_name, - xfail='unexpected') - # While we're at it, also test assert_num_expected_unexpected_fails - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=1) - - def test_assertStatusOfPhase_xfailUnexpected_fails(self): - """assert_status_of_phase should fail when xfail='unexpected' but the line does NOT - contain the UNEXPECTED comment""" - test_name = 'my.test.name' - # Note that the line contains the EXPECTED comment, but not the UNEXPECTED comment - # (we assume that if the assertion correctly fails in this case, then it will also - # correctly handle the case where neither the EXPECTED nor UNEXPECTED comment is - # present). - output = self.output_line(test_status.TEST_FAIL_STATUS, - test_name, - test_status.CORE_PHASES[0], - extra=test_status.TEST_EXPECTED_FAILURE_COMMENT) - - with self.assertRaises(AssertionError): - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - test_status.CORE_PHASES[0], - test_name, - xfail='unexpected') - # While we're at it, also test assert_num_expected_unexpected_fails - self.assert_num_expected_unexpected_fails(output, - num_expected=1, - num_unexpected=0) - -if __name__ == '__main__': - unittest.main() diff --git a/scripts/lib/CIME/tests/test_test_status.py b/scripts/lib/CIME/tests/test_test_status.py deleted file mode 100644 index 72b504eea0c..00000000000 --- a/scripts/lib/CIME/tests/test_test_status.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python - -import unittest -import os -from CIME import test_status -from CIME import expected_fails -from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus - -class TestTestStatus(CustomAssertionsTestStatus): - - _TESTNAME = 'fake_test' - - # An arbitrary phase we can use when we want to work with a non-core phase - _NON_CORE_PHASE = test_status.MEMLEAK_PHASE - - def setUp(self): - self._ts = test_status.TestStatus(test_dir=os.path.join('nonexistent', 'path'), - test_name=self._TESTNAME, - no_io=True) - self._set_core_phases_to_pass() - - def _set_core_phases_to_pass(self): - """Set all core phases of self._ts to pass status""" - with self._ts: - for phase in test_status.CORE_PHASES: - self._ts.set_status(phase, test_status.TEST_PASS_STATUS) - - def _set_last_core_phase_to_fail(self): - """Sets the last core phase to FAIL - - Returns the name of this phase""" - fail_phase = test_status.CORE_PHASES[-1] - self._set_phase_to_status(fail_phase, test_status.TEST_FAIL_STATUS) - return fail_phase - - def _set_phase_to_status(self, phase, status): - """Set given phase to given status""" - with self._ts: - self._ts.set_status(phase, status) - - # ------------------------------------------------------------------------ - # Tests of TestStatus.phase_statuses_dump - # ------------------------------------------------------------------------ - - def test_psdump_corePhasesPass(self): - output = self._ts.phase_statuses_dump() - self.assert_core_phases(output, self._TESTNAME, fails=[]) - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=0) - - def test_psdump_oneCorePhaseFails(self): - fail_phase = self._set_last_core_phase_to_fail() - output = self._ts.phase_statuses_dump() - self.assert_core_phases(output, self._TESTNAME, fails=[fail_phase]) - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=0) - - def test_psdump_oneCorePhaseFailsAbsentFromXFails(self): - """One phase fails. There is an expected fails list, but that phase is not in it.""" - fail_phase = self._set_last_core_phase_to_fail() - xfails = expected_fails.ExpectedFails() - xfails.add_failure(phase=self._NON_CORE_PHASE, - expected_status=test_status.TEST_FAIL_STATUS) - output = self._ts.phase_statuses_dump(xfails=xfails) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - fail_phase, - self._TESTNAME, - xfail='no') - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=0) - - def test_psdump_oneCorePhaseFailsInXFails(self): - """One phase fails. That phase is in the expected fails list.""" - fail_phase = self._set_last_core_phase_to_fail() - xfails = expected_fails.ExpectedFails() - xfails.add_failure(phase=fail_phase, - expected_status=test_status.TEST_FAIL_STATUS) - output = self._ts.phase_statuses_dump(xfails=xfails) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - fail_phase, - self._TESTNAME, - xfail='expected') - self.assert_num_expected_unexpected_fails(output, - num_expected=1, - num_unexpected=0) - - def test_psdump_oneCorePhasePassesInXFails(self): - """One phase passes despite being in the expected fails list.""" - xfail_phase = test_status.CORE_PHASES[-1] - xfails = expected_fails.ExpectedFails() - xfails.add_failure(phase=xfail_phase, - expected_status=test_status.TEST_FAIL_STATUS) - output = self._ts.phase_statuses_dump(xfails=xfails) - self.assert_status_of_phase(output, - test_status.TEST_PASS_STATUS, - xfail_phase, - self._TESTNAME, - xfail='unexpected') - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=1) - - def test_psdump_skipPasses(self): - """With the skip_passes argument, only non-passes should appear""" - fail_phase = self._set_last_core_phase_to_fail() - output = self._ts.phase_statuses_dump(skip_passes=True) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - fail_phase, - self._TESTNAME, - xfail='no') - for phase in test_status.CORE_PHASES: - if phase != fail_phase: - self.assert_phase_absent(output, phase, self._TESTNAME) - - def test_psdump_unexpectedPass_shouldBePresent(self): - """Even with the skip_passes argument, an unexpected PASS should be present""" - xfail_phase = test_status.CORE_PHASES[-1] - xfails = expected_fails.ExpectedFails() - xfails.add_failure(phase=xfail_phase, - expected_status=test_status.TEST_FAIL_STATUS) - output = self._ts.phase_statuses_dump(skip_passes=True, xfails=xfails) - self.assert_status_of_phase(output, - test_status.TEST_PASS_STATUS, - xfail_phase, - self._TESTNAME, - xfail='unexpected') - for phase in test_status.CORE_PHASES: - if phase != xfail_phase: - self.assert_phase_absent(output, phase, self._TESTNAME) - -if __name__ == '__main__': - unittest.main() diff --git a/scripts/lib/CIME/tests/test_user_mod_support.py b/scripts/lib/CIME/tests/test_user_mod_support.py deleted file mode 100644 index fe946b33af7..00000000000 --- a/scripts/lib/CIME/tests/test_user_mod_support.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python - -import unittest -import shutil -import tempfile -import os -from CIME.user_mod_support import apply_user_mods -from CIME.utils import CIMEError -import six - -# ======================================================================== -# Define some parameters -# ======================================================================== - -_SOURCEMODS = os.path.join("SourceMods", "src.drv") - -class TestUserModSupport(unittest.TestCase): - - # ======================================================================== - # Test helper functions - # ======================================================================== - - def setUp(self): - self._caseroot = tempfile.mkdtemp() - self._caseroot_sourcemods = os.path.join(self._caseroot, _SOURCEMODS) - os.makedirs(self._caseroot_sourcemods) - self._user_mods_parent_dir = tempfile.mkdtemp() - - def tearDown(self): - shutil.rmtree(self._caseroot, ignore_errors=True) - shutil.rmtree(self._user_mods_parent_dir, ignore_errors=True) - - def createUserMod(self, name, include_dirs=None): - """Create a user_mods directory with the given name. - - This directory is created within self._user_mods_parent_dir - - For name='foo', it will contain: - - - A user_nl_cpl file with contents: - foo - - - A shell_commands file with contents: - echo foo >> /PATH/TO/CASEROOT/shell_commands_result - - - A file in _SOURCEMODS named myfile.F90 with contents: - foo - - If include_dirs is given, it should be a list of strings, giving names - of other user_mods directories to include. e.g., if include_dirs is - ['foo1', 'foo2'], then this will create a file 'include_user_mods' that - contains paths to the 'foo1' and 'foo2' user_mods directories, one per - line. - """ - - mod_dir = os.path.join(self._user_mods_parent_dir, name) - os.makedirs(mod_dir) - mod_dir_sourcemods = os.path.join(mod_dir, _SOURCEMODS) - os.makedirs(mod_dir_sourcemods) - - with open(os.path.join(mod_dir, "user_nl_cpl"), "w") as user_nl_cpl: - user_nl_cpl.write(name + "\n") - with open(os.path.join(mod_dir, "shell_commands"), "w") as shell_commands: - command = "echo {} >> {}/shell_commands_result\n".format(name, self._caseroot) - shell_commands.write(command) - with open(os.path.join(mod_dir_sourcemods, "myfile.F90"), "w") as f90_file: - f90_file.write(name + "\n") - - if include_dirs: - with open(os.path.join(mod_dir, "include_user_mods"), "w") as include_user_mods: - for one_include in include_dirs: - include_user_mods.write(os.path.join(self._user_mods_parent_dir, one_include) + "\n") - - def assertResults(self, expected_user_nl_cpl, - expected_shell_commands_result, - expected_sourcemod, - msg = ""): - """Asserts that the contents of the files in self._caseroot match expectations - - If msg is provided, it is printed for some failing assertions - """ - - path_to_user_nl_cpl = os.path.join(self._caseroot, "user_nl_cpl") - self.assertTrue(os.path.isfile(path_to_user_nl_cpl), - msg = msg + ": user_nl_cpl does not exist") - with open(path_to_user_nl_cpl, "r") as user_nl_cpl: - contents = user_nl_cpl.read() - self.assertEqual(expected_user_nl_cpl, contents) - - path_to_shell_commands_result = os.path.join(self._caseroot, "shell_commands_result") - self.assertTrue(os.path.isfile(path_to_shell_commands_result), - msg = msg + ": shell_commands_result does not exist") - with open(path_to_shell_commands_result, "r") as shell_commands_result: - contents = shell_commands_result.read() - self.assertEqual(expected_shell_commands_result, contents) - - path_to_sourcemod = os.path.join(self._caseroot_sourcemods, "myfile.F90") - self.assertTrue(os.path.isfile(path_to_sourcemod), - msg = msg + ": sourcemod file does not exist") - with open(path_to_sourcemod, "r") as sourcemod: - contents = sourcemod.read() - self.assertEqual(expected_sourcemod, contents) - - # ======================================================================== - # Begin actual tests - # ======================================================================== - - def test_basic(self): - self.createUserMod("foo") - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "foo")) - self.assertResults(expected_user_nl_cpl = "foo\n", - expected_shell_commands_result = "foo\n", - expected_sourcemod = "foo\n", - msg = "test_basic") - - def test_keepexe(self): - self.createUserMod("foo") - with six.assertRaisesRegex(self, CIMEError, "cannot have any source mods"): - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "foo"), keepexe=True) - - def test_two_applications(self): - """If apply_user_mods is called twice, the second should appear after the first so that it takes precedence.""" - - self.createUserMod("foo1") - self.createUserMod("foo2") - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "foo1")) - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "foo2")) - self.assertResults(expected_user_nl_cpl = "foo1\nfoo2\n", - expected_shell_commands_result = "foo1\nfoo2\n", - expected_sourcemod = "foo2\n", - msg = "test_two_applications") - - def test_include(self): - """If there is an included mod, the main one should appear after the included one so that it takes precedence.""" - - self.createUserMod("base") - self.createUserMod("derived", include_dirs=["base"]) - - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "derived")) - - self.assertResults(expected_user_nl_cpl = "base\nderived\n", - expected_shell_commands_result = "base\nderived\n", - expected_sourcemod = "derived\n", - msg = "test_include") - - def test_duplicate_includes(self): - """Test multiple includes, where both include the same base mod. - - The base mod should only be included once. - """ - - self.createUserMod("base") - self.createUserMod("derived1", include_dirs=["base"]) - self.createUserMod("derived2", include_dirs=["base"]) - self.createUserMod("derived_combo", - include_dirs = ["derived1", "derived2"]) - - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "derived_combo")) - - # NOTE(wjs, 2017-04-15) The ordering of derived1 vs. derived2 is not - # critical here: If this aspect of the behavior changes, the - # expected_contents can be changed to match the new behavior in this - # respect. - expected_contents = """base -derived2 -derived1 -derived_combo -""" - self.assertResults(expected_user_nl_cpl = expected_contents, - expected_shell_commands_result = expected_contents, - expected_sourcemod = "derived_combo\n", - msg = "test_duplicate_includes") diff --git a/scripts/lib/CIME/tests/test_utils.py b/scripts/lib/CIME/tests/test_utils.py deleted file mode 100644 index 586b127c4d9..00000000000 --- a/scripts/lib/CIME/tests/test_utils.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python - -import unittest -from CIME.utils import indent_string - -class TestIndentStr(unittest.TestCase): - """Test the indent_string function. - - """ - - def test_indent_string_singleline(self): - """Test the indent_string function with a single-line string - - """ - mystr = 'foo' - result = indent_string(mystr, 4) - expected = ' foo' - self.assertEqual(expected, result) - - def test_indent_string_multiline(self): - """Test the indent_string function with a multi-line string - - """ - mystr = """hello -hi -goodbye -""" - result = indent_string(mystr, 2) - expected = """ hello - hi - goodbye -""" - self.assertEqual(expected, result) - -if __name__ == '__main__': - unittest.main() - diff --git a/scripts/lib/CIME/user_mod_support.py b/scripts/lib/CIME/user_mod_support.py deleted file mode 100644 index 667526d2ec9..00000000000 --- a/scripts/lib/CIME/user_mod_support.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -user_mod_support.py -""" - -from CIME.XML.standard_module_setup import * -from CIME.utils import expect, run_cmd_no_fail, safe_copy -import glob - -logger = logging.getLogger(__name__) - -def apply_user_mods(caseroot, user_mods_path, keepexe=None): - ''' - Recursivlely apply user_mods to caseroot - this includes updating user_nl_xxx, - updating SourceMods and creating case shell_commands and xmlchange_cmds files - - First remove case shell_commands files if any already exist - - If this function is called multiple times, settings from later calls will - take precedence over earlier calls, if there are conflicts. - - keepexe is an optional argument that is needed for cases where apply_user_mods is - called from create_clone - ''' - case_shell_command_files = [os.path.join(caseroot,"shell_commands"), - os.path.join(caseroot,"xmlchange_cmnds")] - for shell_command_file in case_shell_command_files: - if os.path.isfile(shell_command_file): - os.remove(shell_command_file) - - include_dirs = build_include_dirs_list(user_mods_path) - # If a user_mods dir 'foo' includes 'bar', the include_dirs list returned - # from build_include_dirs has 'foo' before 'bar'. But with the below code, - # directories that occur later in the list take precedence over the earlier - # ones, and we want 'foo' to take precedence over 'bar' in this case (in - # general: we want a given user_mods directory to take precedence over any - # mods that it includes). So we reverse include_dirs to accomplish this. - include_dirs.reverse() - logger.debug("include_dirs are {}".format(include_dirs)) - for include_dir in include_dirs: - # write user_nl_xxx file in caseroot - for user_nl in glob.iglob(os.path.join(include_dir,"user_nl_*")): - with open(os.path.join(include_dir, user_nl), "r") as fd: - newcontents = fd.read() - if len(newcontents) == 0: - continue - case_user_nl = user_nl.replace(include_dir, caseroot) - # If the same variable is set twice in a user_nl file, the later one - # takes precedence. So by appending the new contents, later entries - # in the include_dirs list take precedence over earlier entries. - with open(case_user_nl, "a") as fd: - fd.write(newcontents) - - # update SourceMods in caseroot - for root, _, files in os.walk(include_dir,followlinks=True,topdown=False): - if "src" in os.path.basename(root): - if keepexe is not None: - expect(False, - "cannot have any source mods in {} if keepexe is an option".format(user_mods_path)) - for sfile in files: - source_mods = os.path.join(root,sfile) - case_source_mods = source_mods.replace(include_dir, caseroot) - # We overwrite any existing SourceMods file so that later - # include_dirs take precedence over earlier ones - if os.path.isfile(case_source_mods): - logger.warning("WARNING: Overwriting existing SourceMods in {}".format(case_source_mods)) - else: - logger.info("Adding SourceMod to case {}".format(case_source_mods)) - try: - safe_copy(source_mods, case_source_mods) - except Exception: - expect(False, "Could not write file {} in caseroot {}".format(case_source_mods,caseroot)) - - # create xmlchange_cmnds and shell_commands in caseroot - shell_command_files = glob.glob(os.path.join(include_dir,"shell_commands")) +\ - glob.glob(os.path.join(include_dir,"xmlchange_cmnds")) - for shell_commands_file in shell_command_files: - case_shell_commands = shell_commands_file.replace(include_dir, caseroot) - # add commands from both shell_commands and xmlchange_cmnds to - # the same file (caseroot/shell_commands) - case_shell_commands = case_shell_commands.replace("xmlchange_cmnds","shell_commands") - # Note that use of xmlchange_cmnds has been deprecated and will soon - # be removed altogether, so new tests should rely on shell_commands - if shell_commands_file.endswith("xmlchange_cmnds"): - logger.warning("xmlchange_cmnds is deprecated and will be removed " +\ - "in a future release; please rename {} shell_commands".format(shell_commands_file)) - with open(shell_commands_file,"r") as fd: - new_shell_commands = fd.read().replace("xmlchange","xmlchange --force") - # By appending the new commands to the end, settings from later - # include_dirs take precedence over earlier ones - with open(case_shell_commands, "a") as fd: - fd.write(new_shell_commands) - - for shell_command_file in case_shell_command_files: - if os.path.isfile(shell_command_file): - os.chmod(shell_command_file, 0o777) - run_cmd_no_fail(shell_command_file,verbose=True) - - -def build_include_dirs_list(user_mods_path, include_dirs=None): - ''' - If user_mods_path has a file "include_user_mods" read that - file and add directories to the include_dirs, recursively check - each of those directories for further directories. - The file may also include comments deleneated with # in the first column - ''' - include_dirs = [] if include_dirs is None else include_dirs - if user_mods_path is None or user_mods_path == 'UNSET': - return include_dirs - expect(os.path.isabs(user_mods_path), - "Expected full directory path, got '{}'".format(user_mods_path)) - expect(os.path.isdir(user_mods_path), - "Directory not found {}".format(user_mods_path)) - norm_path = os.path.normpath(user_mods_path) - - for dir_ in include_dirs: - if norm_path == dir_: - include_dirs.remove(norm_path) - break - - logger.info("Adding user mods directory {}".format(norm_path)) - include_dirs.append(norm_path) - include_file = os.path.join(norm_path,"include_user_mods") - if os.path.isfile(include_file): - with open(include_file, "r") as fd: - for newpath in fd: - newpath = newpath.rstrip() - if len(newpath) > 0 and not newpath.startswith("#"): - if not os.path.isabs(newpath): - newpath = os.path.join(user_mods_path, newpath) - if os.path.isabs(newpath): - build_include_dirs_list(newpath, include_dirs) - else: - logger.warning("Could not resolve path '{}' in file '{}'".format(newpath, include_file)) - - return include_dirs diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py deleted file mode 100644 index cf46d39ef03..00000000000 --- a/scripts/lib/CIME/utils.py +++ /dev/null @@ -1,1818 +0,0 @@ -""" -Common functions used by cime python scripts -Warning: you cannot use CIME Classes in this module as it causes circular dependencies -""" -import io, logging, gzip, sys, os, time, re, shutil, glob, string, random, imp, fnmatch -import errno, signal, warnings, filecmp -import stat as statlib -import six -from contextlib import contextmanager -#pylint: disable=import-error -from six.moves import configparser -from distutils import file_util - -# Return this error code if the scripts worked but tests failed -TESTS_FAILED_ERR_CODE = 100 -logger = logging.getLogger(__name__) - -@contextmanager -def redirect_stdout(new_target): - old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout - try: - yield new_target # run some code with the replaced stdout - finally: - sys.stdout = old_target # restore to the previous value - -@contextmanager -def redirect_stderr(new_target): - old_target, sys.stderr = sys.stderr, new_target # replace sys.stdout - try: - yield new_target # run some code with the replaced stdout - finally: - sys.stderr = old_target # restore to the previous value - -@contextmanager -def redirect_stdout_stderr(new_target): - old_stdout, old_stderr = sys.stdout, sys.stderr - sys.stdout, sys.stderr = new_target, new_target - try: - yield new_target - finally: - sys.stdout, sys.stderr = old_stdout, old_stderr - -@contextmanager -def redirect_logger(new_target, logger_name): - ch = logging.StreamHandler(stream=new_target) - ch.setLevel(logging.DEBUG) - log = logging.getLogger(logger_name) - root_log = logging.getLogger() - orig_handlers = log.handlers - orig_root_loggers = root_log.handlers - - try: - root_log.handlers = [] - log.handlers = [ch] - yield log - finally: - root_log.handlers = orig_root_loggers - log.handlers = orig_handlers - -class IndentFormatter(logging.Formatter): - def __init__(self, indent, fmt=None, datefmt=None): - logging.Formatter.__init__(self, fmt, datefmt) - self._indent = indent - - def format(self, record): - record.msg = "{}{}".format(self._indent, record.msg) - out = logging.Formatter.format(self, record) - return out - -def set_logger_indent(indent): - root_log = logging.getLogger() - root_log.handlers = [] - formatter = IndentFormatter(indent) - - handler = logging.StreamHandler() - handler.setFormatter(formatter) - root_log.addHandler(handler) - -class EnvironmentContext(object): - """ - Context manager for environment variables - Usage: - os.environ['MYVAR'] = 'oldvalue' - with EnvironmentContex(MYVAR='myvalue', MYVAR2='myvalue2'): - print os.getenv('MYVAR') # Should print myvalue. - print os.getenv('MYVAR2') # Should print myvalue2. - print os.getenv('MYVAR') # Should print oldvalue. - print os.getenv('MYVAR2') # Should print None. - - CREDIT: https://github.com/sakurai-youhei/envcontext - """ - - def __init__(self, **kwargs): - self.envs = kwargs - self.old_envs = {} - - def __enter__(self): - self.old_envs = {} - for k, v in self.envs.items(): - self.old_envs[k] = os.environ.get(k) - os.environ[k] = v - - def __exit__(self, *args): - for k, v in self.old_envs.items(): - if v: - os.environ[k] = v - else: - del os.environ[k] - -# This should be the go-to exception for CIME use. It's a subclass -# of SystemExit in order suppress tracebacks, which users generally -# hate seeing. It's a subclass of Exception because we want it to be -# "catchable". If you are debugging CIME and want to see the stacktrace, -# run your CIME command with the --debug flag. -class CIMEError(SystemExit, Exception): - pass - -def expect(condition, error_msg, exc_type=CIMEError, error_prefix="ERROR:"): - """ - Similar to assert except doesn't generate an ugly stacktrace. Useful for - checking user error, not programming error. - - >>> expect(True, "error1") - >>> expect(False, "error2") # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: error2 - """ - # Without this line we get a futurewarning on the use of condition below - warnings.filterwarnings("ignore") - if not condition: - if logger.isEnabledFor(logging.DEBUG): - import pdb - pdb.set_trace() - try: - msg = str(error_prefix + " " + error_msg) - except UnicodeEncodeError: - msg = (error_prefix + " " + error_msg).encode('utf-8') - raise exc_type(msg) - -def id_generator(size=6, chars=string.ascii_lowercase + string.digits): - return ''.join(random.choice(chars) for _ in range(size)) - -def check_name(fullname, additional_chars=None, fullpath=False): - """ - check for unallowed characters in name, this routine only - checks the final name and does not check if path exists or is - writable - - >>> check_name("test.id", additional_chars=".") - False - >>> check_name("case.name", fullpath=False) - True - >>> check_name("/some/file/path/case.name", fullpath=True) - True - >>> check_name("mycase+mods") - False - >>> check_name("mycase?mods") - False - >>> check_name("mycase*mods") - False - >>> check_name("/some/full/path/name/") - False - """ - - chars = '+*?<>/{}[\]~`@:' # pylint: disable=anomalous-backslash-in-string - if additional_chars is not None: - chars += additional_chars - if fullname.endswith('/'): - return False - if fullpath: - _, name = os.path.split(fullname) - else: - name = fullname - match = re.search(r"["+re.escape(chars)+"]", name) - if match is not None: - logger.warning("Illegal character {} found in name {}".format(match.group(0), name)) - return False - return True - -# Should only be called from get_cime_config() -def _read_cime_config_file(): - """ - READ the config file in ~/.cime, this file may contain - [main] - CIME_MODEL=e3sm,cesm - PROJECT=someprojectnumber - """ - allowed_sections = ("main", "create_test") - - allowed_in_main = ("cime_model", "project", "charge_account", "srcroot", "mail_type", - "mail_user", "machine", "mpilib", "compiler", "input_dir", "cime_driver") - allowed_in_create_test = ("mail_type", "mail_user", "save_timing", "single_submit", - "test_root", "output_root", "baseline_root", "clean", - "machine", "mpilib", "compiler", "parallel_jobs", "proc_pool", - "walltime", "job_queue", "allow_baseline_overwrite", "wait", - "force_procs", "force_threads", "input_dir", "pesfile", "retry", - "walltime") - - cime_config_file = os.path.abspath(os.path.join(os.path.expanduser("~"), - ".cime","config")) - cime_config = configparser.SafeConfigParser() - if(os.path.isfile(cime_config_file)): - cime_config.read(cime_config_file) - for section in cime_config.sections(): - expect(section in allowed_sections,"Unknown section {} in .cime/config\nallowed sections are {}".format(section, allowed_sections)) - if cime_config.has_section('main'): - for item,_ in cime_config.items('main'): - expect(item in allowed_in_main,"Unknown option in config section \"main\": \"{}\"\nallowed options are {}".format(item, allowed_in_main)) - if cime_config.has_section('create_test'): - for item,_ in cime_config.items('create_test'): - expect(item in allowed_in_create_test,"Unknown option in config section \"test\": \"{}\"\nallowed options are {}".format(item, allowed_in_create_test)) - else: - logger.debug("File {} not found".format(cime_config_file)) - cime_config.add_section('main') - - return cime_config - -_CIMECONFIG = None -def get_cime_config(): - global _CIMECONFIG - if (not _CIMECONFIG): - _CIMECONFIG = _read_cime_config_file() - - return _CIMECONFIG - -def reset_cime_config(): - """ - Useful to keep unit tests from interfering with each other - """ - global _CIMECONFIG - _CIMECONFIG = None - -def get_python_libs_location_within_cime(): - """ - From within CIME, return subdirectory of python libraries - """ - return os.path.join("scripts", "lib") - -def get_cime_root(case=None): - """ - Return the absolute path to the root of CIME that contains this script - - >>> os.path.isdir(os.path.join(get_cime_root(), get_scripts_location_within_cime())) - True - """ - script_absdir = os.path.abspath(os.path.join(os.path.dirname(__file__),"..")) - assert script_absdir.endswith(get_python_libs_location_within_cime()), script_absdir - cimeroot = os.path.abspath(os.path.join(script_absdir,"..","..")) - - if case is not None: - case_cimeroot = os.path.abspath(case.get_value("CIMEROOT")) - cimeroot = os.path.abspath(cimeroot) - expect(cimeroot == case_cimeroot, "Inconsistent CIMEROOT variable: case -> '{}', file location -> '{}'".format(case_cimeroot, cimeroot)) - - logger.debug( "CIMEROOT is " + cimeroot) - return cimeroot - -def get_cime_default_driver(): - driver = os.environ.get("CIME_DRIVER") - if driver: - logger.debug("Setting CIME_DRIVER={} from environment".format(driver)) - else: - cime_config = get_cime_config() - if (cime_config.has_option('main','CIME_DRIVER')): - driver = cime_config.get('main','CIME_DRIVER') - if driver: - logger.debug("Setting CIME_driver={} from ~/.cime/config".format(driver)) - if not driver: - driver = "mct" - expect(driver in ("mct", "nuopc", "moab"),"Attempt to set invalid driver {}".format(driver)) - return driver - -def set_model(model): - """ - Set the model to be used in this session - """ - cime_config = get_cime_config() - if not cime_config.has_section('main'): - cime_config.add_section('main') - expect(model == 'cesm' or model == 'e3sm',"model {} not recognized".format(model)) - cime_config.set('main','CIME_MODEL',model) - -def get_model(): - """ - Get the currently configured model value - The CIME_MODEL env variable may or may not be set - - >>> os.environ["CIME_MODEL"] = "garbage" - >>> get_model() # doctest:+ELLIPSIS +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: model garbage not recognized - >>> del os.environ["CIME_MODEL"] - >>> set_model('rocky') # doctest:+ELLIPSIS +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: model rocky not recognized - >>> set_model('e3sm') - >>> get_model() - 'e3sm' - >>> reset_cime_config() - """ - model = os.environ.get("CIME_MODEL") - if (model == 'cesm' or model == 'e3sm'): - logger.debug("Setting CIME_MODEL={} from environment".format(model)) - else: - expect(model is None,"model {} not recognized".format(model)) - cime_config = get_cime_config() - if (cime_config.has_option('main','CIME_MODEL')): - model = cime_config.get('main','CIME_MODEL') - if model is not None: - logger.debug("Setting CIME_MODEL={} from ~/.cime/config".format(model)) - - # One last try - if (model is None): - srcroot = None - if cime_config.has_section('main') and cime_config.has_option('main', 'SRCROOT'): - srcroot = cime_config.get('main','SRCROOT') - if srcroot is None: - srcroot = os.path.dirname(os.path.abspath(get_cime_root())) - if os.path.isfile(os.path.join(srcroot, "SVN_EXTERNAL_DIRECTORIES")) \ - or os.path.isdir(os.path.join(srcroot, "manage_externals")): - model = 'cesm' - else: - model = 'e3sm' - # This message interfers with the correct operation of xmlquery - # logger.debug("Guessing CIME_MODEL={}, set environment variable if this is incorrect".format(model)) - - if model is not None: - set_model(model) - return model - - modelroot = os.path.join(get_cime_root(), "config") - models = os.listdir(modelroot) - msg = ".cime/config or environment variable CIME_MODEL must be set to one of: " - msg += ", ".join([model for model in models - if os.path.isdir(os.path.join(modelroot,model)) - and model != "xml_schemas"]) - expect(False, msg) - -def _get_path(filearg, from_dir): - if not filearg.startswith("/") and from_dir is not None: - filearg = os.path.join(from_dir, filearg) - - return filearg - -def _convert_to_fd(filearg, from_dir, mode="a"): - filearg = _get_path(filearg, from_dir) - - return open(filearg, mode) - -_hack=object() - -def run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile=None, case=None, from_dir=None): - """ - This code will try to import and run each cmd as a subroutine - if that fails it will run it as a program in a seperate shell - - Raises exception on failure. - """ - do_run_cmd = True - - # Before attempting to load the script make sure it contains the subroutine - # we are expecting - with open(cmd, 'r') as fd: - for line in fd.readlines(): - if re.search(r"^def {}\(".format(subname), line): - do_run_cmd = False - break - - if not do_run_cmd: - try: - mod = imp.load_source(subname, cmd) - logger.info(" Calling {}".format(cmd)) - # Careful: logfile code is not thread safe! - if logfile: - with open(logfile,"w") as log_fd: - with redirect_logger(log_fd, subname): - with redirect_stdout_stderr(log_fd): - getattr(mod, subname)(*subargs) - else: - getattr(mod, subname)(*subargs) - - except (SyntaxError, AttributeError) as _: - pass # Need to try to run as shell command - - except Exception: - if logfile: - with open(logfile, "a") as log_fd: - log_fd.write(str(sys.exc_info()[1])) - - expect(False, "{} FAILED, cat {}".format(cmd, logfile)) - else: - raise - - else: - return # Running as python function worked, we're done - - logger.info(" Running {} ".format(cmd)) - if case is not None: - case.flush() - - fullcmd = cmd - if isinstance(cmdargs, list): - for arg in cmdargs: - fullcmd += " " + str(arg) - else: - fullcmd += " " + cmdargs - - if logfile: - fullcmd += " >& {} ".format(logfile) - - stat, output, _ = run_cmd("{}".format(fullcmd), combine_output=True, from_dir=from_dir) - if output: # Will be empty if logfile - logger.info(output) - - if stat != 0: - if logfile: - expect(False, "{} FAILED, cat {}".format(fullcmd, logfile)) - else: - expect(False, "{} FAILED, see above".format(fullcmd)) - - # refresh case xml object from file - if case is not None: - case.read_xml() - -def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, - arg_stdout=_hack, arg_stderr=_hack, env=None, combine_output=False): - """ - Wrapper around subprocess to make it much more convenient to run shell commands - - >>> run_cmd('ls file_i_hope_doesnt_exist')[0] != 0 - True - """ - import subprocess # Not safe to do globally, module not available in older pythons - - # Real defaults for these value should be subprocess.PIPE - if arg_stdout is _hack: - arg_stdout = subprocess.PIPE - elif isinstance(arg_stdout, six.string_types): - arg_stdout = _convert_to_fd(arg_stdout, from_dir) - - if arg_stderr is _hack: - arg_stderr = subprocess.STDOUT if combine_output else subprocess.PIPE - elif isinstance(arg_stderr, six.string_types): - arg_stderr = _convert_to_fd(arg_stdout, from_dir) - - if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): - logger.info("RUN: {}\nFROM: {}".format(cmd, os.getcwd() if from_dir is None else from_dir)) - - if (input_str is not None): - stdin = subprocess.PIPE - else: - stdin = None - - proc = subprocess.Popen(cmd, - shell=True, - stdout=arg_stdout, - stderr=arg_stderr, - stdin=stdin, - cwd=from_dir, - env=env) - - output, errput = proc.communicate(input_str) - if output is not None: - try: - output = output.decode('utf-8', errors='ignore').strip() - except AttributeError: - pass - if errput is not None: - try: - errput = errput.decode('utf-8', errors='ignore').strip() - except AttributeError: - pass - - stat = proc.wait() - if six.PY2: - if isinstance(arg_stdout, file): # pylint: disable=undefined-variable - arg_stdout.close() # pylint: disable=no-member - if isinstance(arg_stderr, file) and arg_stderr is not arg_stdout: # pylint: disable=undefined-variable - arg_stderr.close() # pylint: disable=no-member - else: - if isinstance(arg_stdout, io.IOBase): - arg_stdout.close() # pylint: disable=no-member - if isinstance(arg_stderr, io.IOBase) and arg_stderr is not arg_stdout: - arg_stderr.close() # pylint: disable=no-member - - - if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): - if stat != 0: - logger.info(" stat: {:d}\n".format(stat)) - if output: - logger.info(" output: {}\n".format(output)) - if errput: - logger.info(" errput: {}\n".format(errput)) - - return stat, output, errput - -def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, - arg_stdout=_hack, arg_stderr=_hack, env=None, combine_output=False): - """ - Wrapper around subprocess to make it much more convenient to run shell commands. - Expects command to work. Just returns output string. - - >>> run_cmd_no_fail('echo foo') == 'foo' - True - >>> run_cmd_no_fail('echo THE ERROR >&2; false') # doctest:+ELLIPSIS +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: Command: 'echo THE ERROR >&2; false' failed with error ... - - >>> run_cmd_no_fail('grep foo', input_str=b'foo') == 'foo' - True - >>> run_cmd_no_fail('echo THE ERROR >&2', combine_output=True) == 'THE ERROR' - True - """ - stat, output, errput = run_cmd(cmd, input_str, from_dir, verbose, arg_stdout, arg_stderr, env, combine_output) - if stat != 0: - # If command produced no errput, put output in the exception since we - # have nothing else to go on. - errput = output if not errput else errput - if errput is None: - if combine_output: - if isinstance(arg_stdout, six.string_types): - errput = "See {}".format(_get_path(arg_stdout, from_dir)) - else: - errput = "" - elif isinstance(arg_stderr, six.string_types): - errput = "See {}".format(_get_path(arg_stderr, from_dir)) - else: - errput = "" - - expect(False, "Command: '{}' failed with error '{}' from dir '{}'".format(cmd, errput.encode('utf-8'), os.getcwd() if from_dir is None else from_dir)) - - return output - -def check_minimum_python_version(major, minor): - """ - Check your python version. - - >>> check_minimum_python_version(sys.version_info[0], sys.version_info[1]) - >>> - """ - msg = "Python " + str(major) + ", minor version " + str(minor) + " is required, you have " + str(sys.version_info[0]) + "." + str(sys.version_info[1]) - expect(sys.version_info[0] > major or - (sys.version_info[0] == major and sys.version_info[1] >= minor), msg) - -def normalize_case_id(case_id): - """ - Given a case_id, return it in form TESTCASE.GRID.COMPSET.PLATFORM - - >>> normalize_case_id('ERT.ne16_g37.B1850C5.sandiatoss3_intel') - 'ERT.ne16_g37.B1850C5.sandiatoss3_intel' - >>> normalize_case_id('ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod') - 'ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod' - >>> normalize_case_id('ERT.ne16_g37.B1850C5.sandiatoss3_intel.G.20151121') - 'ERT.ne16_g37.B1850C5.sandiatoss3_intel' - >>> normalize_case_id('ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod.G.20151121') - 'ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod' - """ - sep_count = case_id.count(".") - expect(sep_count >= 3 and sep_count <= 6, - "Case '{}' needs to be in form: TESTCASE.GRID.COMPSET.PLATFORM[.TESTMOD] or TESTCASE.GRID.COMPSET.PLATFORM[.TESTMOD].GC.TESTID".format(case_id)) - if (sep_count in [5, 6]): - return ".".join(case_id.split(".")[:-2]) - else: - return case_id - -def parse_test_name(test_name): - """ - Given a CIME test name TESTCASE[_CASEOPTS].GRID.COMPSET[.MACHINE_COMPILER[.TESTMODS]], - return each component of the testname with machine and compiler split. - Do not error if a partial testname is provided (TESTCASE or TESTCASE.GRID) instead - parse and return the partial results. - - >>> parse_test_name('ERS') - ['ERS', None, None, None, None, None, None] - >>> parse_test_name('ERS.fe12_123') - ['ERS', None, 'fe12_123', None, None, None, None] - >>> parse_test_name('ERS.fe12_123.JGF') - ['ERS', None, 'fe12_123', 'JGF', None, None, None] - >>> parse_test_name('ERS_D.fe12_123.JGF') - ['ERS', ['D'], 'fe12_123', 'JGF', None, None, None] - >>> parse_test_name('ERS_D_P1.fe12_123.JGF') - ['ERS', ['D', 'P1'], 'fe12_123', 'JGF', None, None, None] - >>> parse_test_name('SMS_D_Ln9_Mmpi-serial.f19_g16_rx1.A') - ['SMS', ['D', 'Ln9', 'Mmpi-serial'], 'f19_g16_rx1', 'A', None, None, None] - >>> parse_test_name('ERS.fe12_123.JGF.machine_compiler') - ['ERS', None, 'fe12_123', 'JGF', 'machine', 'compiler', None] - >>> parse_test_name('ERS.fe12_123.JGF.machine_compiler.test-mods') - ['ERS', None, 'fe12_123', 'JGF', 'machine', 'compiler', 'test/mods'] - >>> parse_test_name('SMS.f19_g16.2000_DATM%QI.A_XLND_SICE_SOCN_XROF_XGLC_SWAV.mach-ine_compiler.test-mods') # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: Expected 4th item of 'SMS.f19_g16.2000_DATM%QI.A_XLND_SICE_SOCN_XROF_XGLC_SWAV.mach-ine_compiler.test-mods' ('A_XLND_SICE_SOCN_XROF_XGLC_SWAV') to be in form machine_compiler - >>> parse_test_name('SMS.f19_g16.2000_DATM%QI/A_XLND_SICE_SOCN_XROF_XGLC_SWAV.mach-ine_compiler.test-mods') # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - CIMEError: ERROR: Invalid compset name 2000_DATM%QI/A_XLND_SICE_SOCN_XROF_XGLC_SWAV - """ - rv = [None] * 7 - num_dots = test_name.count(".") - - rv[0:num_dots+1] = test_name.split(".") - testcase_field_underscores = rv[0].count("_") - rv.insert(1, None) # Make room for caseopts - rv.pop() - if (testcase_field_underscores > 0): - full_str = rv[0] - rv[0] = full_str.split("_")[0] - rv[1] = full_str.split("_")[1:] - - if (num_dots >= 3): - expect(check_name( rv[3] ), "Invalid compset name {}".format(rv[3])) - - expect(rv[4].count("_") == 1, - "Expected 4th item of '{}' ('{}') to be in form machine_compiler".format(test_name, rv[4])) - rv[4:5] = rv[4].split("_") - rv.pop() - - if (rv[-1] is not None): - rv[-1] = rv[-1].replace("-", "/") - - expect(num_dots <= 4, - "'{}' does not look like a CIME test name, expect TESTCASE.GRID.COMPSET[.MACHINE_COMPILER[.TESTMODS]]".format(test_name)) - - return rv - -def get_full_test_name(partial_test, caseopts=None, grid=None, compset=None, machine=None, compiler=None, testmod=None): - """ - Given a partial CIME test name, return in form TESTCASE.GRID.COMPSET.MACHINE_COMPILER[.TESTMODS] - Use the additional args to fill out the name if needed - - >>> get_full_test_name("ERS", grid="ne16_fe16", compset="JGF", machine="melvin", compiler="gnu") - 'ERS.ne16_fe16.JGF.melvin_gnu' - >>> get_full_test_name("ERS", caseopts=["D", "P16"], grid="ne16_fe16", compset="JGF", machine="melvin", compiler="gnu") - 'ERS_D_P16.ne16_fe16.JGF.melvin_gnu' - >>> get_full_test_name("ERS.ne16_fe16", compset="JGF", machine="melvin", compiler="gnu") - 'ERS.ne16_fe16.JGF.melvin_gnu' - >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu") - 'ERS.ne16_fe16.JGF.melvin_gnu' - >>> get_full_test_name("ERS.ne16_fe16.JGF.melvin_gnu.mods", machine="melvin", compiler="gnu") - 'ERS.ne16_fe16.JGF.melvin_gnu.mods' - >>> get_full_test_name("ERS.ne16_fe16.JGF", machine="melvin", compiler="gnu", testmod="mods/test") - 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test' - """ - partial_testcase, partial_caseopts, partial_grid, partial_compset, partial_machine, partial_compiler, partial_testmod = parse_test_name(partial_test) - - required_fields = [ - (partial_grid, grid, "grid"), - (partial_compset, compset, "compset"), - (partial_machine, machine, "machine"), - (partial_compiler, compiler, "compiler"), - ] - - result = partial_test - - for partial_val, arg_val, name in required_fields: - if (partial_val is None): - # Add to result based on args - expect(arg_val is not None, - "Could not fill-out test name, partial string '{}' had no {} information and you did not provide any".format(partial_test, name)) - result = "{}{}{}".format(result, "_" if name == "compiler" else ".", arg_val) - elif (arg_val is not None and partial_val != partial_compiler): - expect(arg_val == partial_val, - "Mismatch in field {}, partial string '{}' indicated it should be '{}' but you provided '{}'".format(name, partial_test, partial_val, arg_val)) - - if (partial_testmod is None): - if (testmod is None): - # No testmod for this test and that's OK - pass - else: - result += ".{}".format(testmod.replace("/", "-")) - elif (testmod is not None): - expect(testmod == partial_testmod, - "Mismatch in field testmod, partial string '{}' indicated it should be '{}' but you provided '{}'".format(partial_test, partial_testmod, testmod)) - - if (partial_caseopts is None): - if caseopts is None: - # No casemods for this test and that's OK - pass - else: - result = result.replace(partial_testcase, "{}_{}".format(partial_testcase, "_".join(caseopts)), 1) - elif caseopts is not None: - expect(caseopts == partial_caseopts, - "Mismatch in field caseopts, partial string '{}' indicated it should be '{}' but you provided '{}'".format(partial_test, partial_caseopts, caseopts)) - - return result - -def get_current_branch(repo=None): - """ - Return the name of the current branch for a repository - - >>> if "GIT_BRANCH" in os.environ: - ... get_current_branch() is not None - ... else: - ... os.environ["GIT_BRANCH"] = "foo" - ... get_current_branch() == "foo" - True - """ - if ("GIT_BRANCH" in os.environ): - # This approach works better for Jenkins jobs because the Jenkins - # git plugin does not use local tracking branches, it just checks out - # to a commit - branch = os.environ["GIT_BRANCH"] - if (branch.startswith("origin/")): - branch = branch.replace("origin/", "", 1) - return branch - else: - stat, output, _ = run_cmd("git symbolic-ref HEAD", from_dir=repo) - if (stat != 0): - return None - else: - return output.replace("refs/heads/", "") - -def get_current_commit(short=False, repo=None, tag=False): - """ - Return the sha1 of the current HEAD commit - - >>> get_current_commit() is not None - True - """ - if tag: - rc, output, _ = run_cmd("git describe --tags $(git log -n1 --pretty='%h')", from_dir=repo) - else: - rc, output, _ = run_cmd("git rev-parse {} HEAD".format("--short" if short else ""), from_dir=repo) - - return output if rc == 0 else "unknown" - -def get_scripts_location_within_cime(): - """ - From within CIME, return subdirectory where scripts live. - """ - return "scripts" - -def get_cime_location_within_e3sm(): - """ - From within e3sm, return subdirectory where CIME lives. - """ - return "cime" - -def get_model_config_location_within_cime(model=None): - model = get_model() if model is None else model - return os.path.join("config", model) - -def get_e3sm_root(): - """ - Return the absolute path to the root of E3SM that contains this script - """ - cime_absdir = get_cime_root() - assert cime_absdir.endswith(get_cime_location_within_e3sm()), cime_absdir - return os.path.normpath(cime_absdir[:len(cime_absdir)-len(get_cime_location_within_e3sm())]) - -def get_scripts_root(): - """ - Get absolute path to scripts - - >>> os.path.isdir(get_scripts_root()) - True - """ - return os.path.join(get_cime_root(), get_scripts_location_within_cime()) - -def get_python_libs_root(): - """ - Get absolute path to scripts - - >>> os.path.isdir(get_python_libs_root()) - True - """ - return os.path.join(get_cime_root(), get_python_libs_location_within_cime()) - -def get_model_config_root(model=None): - """ - Get absolute path to model config area" - - >>> os.path.isdir(get_model_config_root()) - True - """ - model = get_model() if model is None else model - return os.path.join(get_cime_root(), get_model_config_location_within_cime(model)) - -def stop_buffering_output(): - """ - All stdout, stderr will not be buffered after this is called. - """ - os.environ['PYTHONUNBUFFERED'] = '1' - -def start_buffering_output(): - """ - All stdout, stderr will be buffered after this is called. This is python's - default behavior. - """ - sys.stdout.flush() - sys.stdout = os.fdopen(sys.stdout.fileno(), 'w') - -def match_any(item, re_list): - """ - Return true if item matches any regex in re_list - """ - for regex_str in re_list: - regex = re.compile(regex_str) - if (regex.match(item)): - return True - - return False - -def safe_copy(src_path, tgt_path, preserve_meta=True): - """ - A flexbile and safe copy routine. Will try to copy file and metadata, but this - can fail if the current user doesn't own the tgt file. A fallback data-only copy is - attempted in this case. Works even if overwriting a read-only file. - - tgt_path can be a directory, src_path must be a file - - most of the complexity here is handling the case where the tgt_path file already - exists. This problem does not exist for the tree operations so we don't need to wrap those. - - preserve_meta toggles if file meta-data, like permissions, should be preserved. If you are - copying baseline files, you should be within a SharedArea context manager and preserve_meta - should be false so that the umask set up by SharedArea can take affect regardless of the - permissions of the src files. - """ - - tgt_path = os.path.join(tgt_path, os.path.basename(src_path)) if os.path.isdir(tgt_path) else tgt_path - - # Handle pre-existing file - if os.path.isfile(tgt_path): - st = os.stat(tgt_path) - owner_uid = st.st_uid - - # Handle read-only files if possible - if not os.access(tgt_path, os.W_OK): - if owner_uid == os.getuid(): - # I am the owner, make writeable - os.chmod(st.st_mode | statlib.S_IWRITE) - else: - # I won't be able to copy this file - raise OSError("Cannot copy over file {}, it is readonly and you are not the owner".format(tgt_path)) - - if owner_uid == os.getuid(): - # I am the owner, copy file contents, permissions, and metadata - file_util.copy_file(src_path, tgt_path, preserve_mode=preserve_meta, preserve_times=preserve_meta) - else: - # I am not the owner, just copy file contents - shutil.copyfile(src_path, tgt_path) - - else: - # We are making a new file, copy file contents, permissions, and metadata. - # This can fail if the underlying directory is not writable by current user. - file_util.copy_file(src_path, tgt_path, preserve_mode=preserve_meta, preserve_times=preserve_meta) - - # If src file was executable, then the tgt file should be too - st = os.stat(tgt_path) - if os.access(src_path, os.X_OK) and st.st_uid == os.getuid(): - os.chmod(tgt_path, st.st_mode | statlib.S_IXUSR | statlib.S_IXGRP | statlib.S_IXOTH) - -def safe_recursive_copy(src_dir, tgt_dir, file_map): - """ - Copies a set of files from one dir to another. Works even if overwriting a - read-only file. Files can be relative paths and the relative path will be - matched on the tgt side. - """ - for src_file, tgt_file in file_map: - full_tgt = os.path.join(tgt_dir, tgt_file) - full_src = src_file if os.path.isabs(src_file) else os.path.join(src_dir, src_file) - expect(os.path.isfile(full_src), "Source dir '{}' missing file '{}'".format(src_dir, src_file)) - safe_copy(full_src, full_tgt) - -def symlink_force(target, link_name): - """ - Makes a symlink from link_name to target. Unlike the standard - os.symlink, this will work even if link_name already exists (in - which case link_name will be overwritten). - """ - try: - os.symlink(target, link_name) - except OSError as e: - if e.errno == errno.EEXIST: - os.remove(link_name) - os.symlink(target, link_name) - else: - raise e - -def find_proc_id(proc_name=None, - children_only=False, - of_parent=None): - """ - Children implies recursive. - """ - expect(proc_name is not None or children_only, - "Must provide proc_name if not searching for children") - expect(not (of_parent is not None and not children_only), - "of_parent only used with children_only") - - parent = of_parent if of_parent is not None else os.getpid() - - pgrep_cmd = "pgrep {} {}".format(proc_name if proc_name is not None else "", - "-P {:d}".format(parent if children_only else "")) - stat, output, errput = run_cmd(pgrep_cmd) - expect(stat in [0, 1], "pgrep failed with error: '{}'".format(errput)) - - rv = set([int(item.strip()) for item in output.splitlines()]) - if (children_only): - pgrep_cmd = "pgrep -P {}".format(parent) - stat, output, errput = run_cmd(pgrep_cmd) - expect(stat in [0, 1], "pgrep failed with error: '{}'".format(errput)) - - for child in output.splitlines(): - rv = rv.union(set(find_proc_id(proc_name, children_only, int(child.strip())))) - - return list(rv) - -def get_timestamp(timestamp_format="%Y%m%d_%H%M%S", utc_time=False): - """ - Get a string representing the current UTC time in format: YYYYMMDD_HHMMSS - - The format can be changed if needed. - """ - if utc_time: - time_tuple = time.gmtime() - else: - time_tuple = time.localtime() - return time.strftime(timestamp_format, time_tuple) - -def get_project(machobj=None): - """ - Hierarchy for choosing PROJECT: - 0. Command line flag to create_newcase or create_test - 1. Environment variable PROJECT - 2 Environment variable ACCOUNT (this is for backward compatibility) - 3. File $HOME/.cime/config (this is new) - 4 File $HOME/.cesm_proj (this is for backward compatibility) - 5 config_machines.xml (if machobj provided) - """ - project = os.environ.get("PROJECT") - if (project is not None): - logger.info("Using project from env PROJECT: " + project) - return project - project = os.environ.get("ACCOUNT") - if (project is not None): - logger.info("Using project from env ACCOUNT: " + project) - return project - - cime_config = get_cime_config() - if (cime_config.has_option('main','PROJECT')): - project = cime_config.get('main','PROJECT') - if (project is not None): - logger.info("Using project from .cime/config: " + project) - return project - - projectfile = os.path.abspath(os.path.join(os.path.expanduser("~"), ".cesm_proj")) - if (os.path.isfile(projectfile)): - with open(projectfile,'r') as myfile: - for line in myfile: - project = line.rstrip() - if not project.startswith("#"): - break - logger.info("Using project from .cesm_proj: " + project) - cime_config.set('main','PROJECT',project) - return project - - if machobj is not None: - project = machobj.get_value("PROJECT") - if project is not None: - logger.info("Using project from config_machines.xml: " + project) - return project - - logger.info("No project info available") - return None - -def get_charge_account(machobj=None, project=None): - """ - Hierarchy for choosing CHARGE_ACCOUNT: - 1. Environment variable CHARGE_ACCOUNT - 2. File $HOME/.cime/config - 3. config_machines.xml (if machobj provided) - 4. default to same value as PROJECT - - >>> import CIME - >>> import CIME.XML.machines - >>> machobj = CIME.XML.machines.Machines(machine="theta") - >>> project = get_project(machobj) - >>> charge_account = get_charge_account(machobj, project) - >>> project == charge_account - True - >>> os.environ["CHARGE_ACCOUNT"] = "ChargeAccount" - >>> get_charge_account(machobj, project) - 'ChargeAccount' - >>> del os.environ["CHARGE_ACCOUNT"] - """ - charge_account = os.environ.get("CHARGE_ACCOUNT") - if (charge_account is not None): - logger.info("Using charge_account from env CHARGE_ACCOUNT: " + charge_account) - return charge_account - - cime_config = get_cime_config() - if (cime_config.has_option('main','CHARGE_ACCOUNT')): - charge_account = cime_config.get('main','CHARGE_ACCOUNT') - if (charge_account is not None): - logger.info("Using charge_account from .cime/config: " + charge_account) - return charge_account - - if machobj is not None: - charge_account = machobj.get_value("CHARGE_ACCOUNT") - if charge_account is not None: - logger.info("Using charge_account from config_machines.xml: " + charge_account) - return charge_account - - logger.info("No charge_account info available, using value from PROJECT") - return project - -def find_files(rootdir, pattern): - """ - recursively find all files matching a pattern - """ - result = [] - for root, _, files in os.walk(rootdir): - for filename in files: - if (fnmatch.fnmatch(filename, pattern)): - result.append(os.path.join(root, filename)) - - return result - - -def setup_standard_logging_options(parser): - helpfile = "{}.log".format(sys.argv[0]) - helpfile = os.path.join(os.getcwd(),os.path.basename(helpfile)) - parser.add_argument("-d", "--debug", action="store_true", - help="Print debug information (very verbose) to file {}".format(helpfile)) - parser.add_argument("-v", "--verbose", action="store_true", - help="Add additional context (time and file) to log messages") - parser.add_argument("-s", "--silent", action="store_true", - help="Print only warnings and error messages") - -class _LessThanFilter(logging.Filter): - def __init__(self, exclusive_maximum, name=""): - super(_LessThanFilter, self).__init__(name) - self.max_level = exclusive_maximum - - def filter(self, record): - #non-zero return means we log this message - return 1 if record.levelno < self.max_level else 0 - -def parse_args_and_handle_standard_logging_options(args, parser=None): - """ - Guide to logging in CIME. - - logger.debug -> Verbose/detailed output, use for debugging, off by default. Goes to a .log file - logger.info -> Goes to stdout (and log if --debug). Use for normal program output - logger.warning -> Goes to stderr (and log if --debug). Use for minor problems - logger.error -> Goes to stderr (and log if --debug) - """ - root_logger = logging.getLogger() - - verbose_formatter = logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', - datefmt='%m-%d %H:%M') - - # Change info to go to stdout. This handle applies to INFO exclusively - stdout_stream_handler = logging.StreamHandler(stream=sys.stdout) - stdout_stream_handler.setLevel(logging.INFO) - stdout_stream_handler.addFilter(_LessThanFilter(logging.WARNING)) - - # Change warnings and above to go to stderr - stderr_stream_handler = logging.StreamHandler(stream=sys.stderr) - stderr_stream_handler.setLevel(logging.WARNING) - - # scripts_regression_tests is the only thing that should pass a None argument in parser - if parser is not None: - if "--help" not in args[1:]: - _check_for_invalid_args(args[1:]) - args = parser.parse_args(args[1:]) - - # --verbose adds to the message format but does not impact the log level - if args.verbose: - stdout_stream_handler.setFormatter(verbose_formatter) - stderr_stream_handler.setFormatter(verbose_formatter) - - root_logger.addHandler(stdout_stream_handler) - root_logger.addHandler(stderr_stream_handler) - - if args.debug: - # Set up log file to catch ALL logging records - log_file = "{}.log".format(os.path.basename(sys.argv[0])) - - debug_log_handler = logging.FileHandler(log_file, mode='w') - debug_log_handler.setFormatter(verbose_formatter) - debug_log_handler.setLevel(logging.DEBUG) - root_logger.addHandler(debug_log_handler) - - root_logger.setLevel(logging.DEBUG) - elif args.silent: - root_logger.setLevel(logging.WARN) - else: - root_logger.setLevel(logging.INFO) - return args - -def get_logging_options(): - """ - Use to pass same logging options as was used for current - executable to subprocesses. - """ - root_logger = logging.getLogger() - - if (root_logger.level == logging.DEBUG): - return "--debug" - elif (root_logger.level == logging.WARN): - return "--silent" - else: - return "" - -def convert_to_type(value, type_str, vid=""): - """ - Convert value from string to another type. - vid is only for generating better error messages. - """ - if value is not None: - - if type_str == "char": - pass - - elif type_str == "integer": - try: - value = int(eval(value)) - except Exception: - expect(False, "Entry {} was listed as type int but value '{}' is not valid int".format(vid, value)) - - elif type_str == "logical": - expect(value.upper() in ["TRUE", "FALSE"], - "Entry {} was listed as type logical but had val '{}' instead of TRUE or FALSE".format(vid, value)) - value = value.upper() == "TRUE" - - elif type_str == "real": - try: - value = float(value) - except Exception: - expect(False, "Entry {} was listed as type real but value '{}' is not valid real".format(vid, value)) - - else: - expect(False, "Unknown type '{}'".format(type_str)) - - return value - -def convert_to_unknown_type(value): - """ - Convert value to it's real type by probing conversions. - """ - if value is not None: - - # Attempt to convert to logical - if value.upper() in ["TRUE", "FALSE"]: - return value.upper() == "TRUE" - - # Attempt to convert to integer - try: - value = int(eval(value)) - except Exception: - pass - else: - return value - - # Attempt to convert to float - try: - value = float(value) - except Exception: - pass - else: - return value - - # Just treat as string - - return value - -def convert_to_string(value, type_str=None, vid=""): - """ - Convert value back to string. - vid is only for generating better error messages. - >>> convert_to_string(6, type_str="integer") == '6' - True - >>> convert_to_string('6', type_str="integer") == '6' - True - >>> convert_to_string('6.0', type_str="real") == '6.0' - True - >>> convert_to_string(6.01, type_str="real") == '6.01' - True - """ - if value is not None and not isinstance(value, six.string_types): - if type_str == "char": - expect(isinstance(value, six.string_types), "Wrong type for entry id '{}'".format(vid)) - elif type_str == "integer": - expect(isinstance(value, six.integer_types), "Wrong type for entry id '{}'".format(vid)) - value = str(value) - elif type_str == "logical": - expect(type(value) is bool, "Wrong type for entry id '{}'".format(vid)) - value = "TRUE" if value else "FALSE" - elif type_str == "real": - expect(type(value) is float, "Wrong type for entry id '{}'".format(vid)) - value = str(value) - else: - expect(False, "Unknown type '{}'".format(type_str)) - if value is None: - value = "" - logger.debug("Attempt to convert None value for vid {} {}".format(vid,value)) - - return value - -def convert_to_seconds(time_str): - """ - Convert time value in [[HH:]MM:]SS to seconds - - >>> convert_to_seconds("42") - 42 - >>> convert_to_seconds("01:01:01") - 3661 - """ - components = time_str.split(":") - expect(len(components) < 4, "Unusual time string: '{}'".format(time_str)) - - components.reverse() - result = 0 - for idx, component in enumerate(components): - result += int(component) * pow(60, idx) - - return result - -def convert_to_babylonian_time(seconds): - """ - Convert time value to seconds to HH:MM:SS - - >>> convert_to_babylonian_time(3661) - '01:01:01' - """ - hours = int(seconds / 3600) - seconds %= 3600 - minutes = int(seconds / 60) - seconds %= 60 - - return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds) - -def get_time_in_seconds(timeval, unit): - """ - Convert a time from 'unit' to seconds - """ - if 'nyear' in unit: - dmult = 365 * 24 * 3600 - elif 'nmonth' in unit: - dmult = 30 * 24 * 3600 - elif 'nday' in unit: - dmult = 24 * 3600 - elif 'nhour' in unit: - dmult = 3600 - elif 'nminute' in unit: - dmult = 60 - else: - dmult = 1 - - return dmult * timeval - -def compute_total_time(job_cost_map, proc_pool): - """ - Given a map: jobname -> (procs, est-time), return a total time - estimate for a given processor pool size - - >>> job_cost_map = {"A" : (4, 3000), "B" : (2, 1000), "C" : (8, 2000), "D" : (1, 800)} - >>> compute_total_time(job_cost_map, 8) - 5160 - >>> compute_total_time(job_cost_map, 12) - 3180 - >>> compute_total_time(job_cost_map, 16) - 3060 - """ - current_time = 0 - waiting_jobs = dict(job_cost_map) - running_jobs = {} # name -> (procs, est-time, start-time) - while len(waiting_jobs) > 0 or len(running_jobs) > 0: - launched_jobs = [] - for jobname, data in waiting_jobs.items(): - procs_for_job, time_for_job = data - if procs_for_job <= proc_pool: - proc_pool -= procs_for_job - launched_jobs.append(jobname) - running_jobs[jobname] = (procs_for_job, time_for_job, current_time) - - for launched_job in launched_jobs: - del waiting_jobs[launched_job] - - completed_jobs = [] - for jobname, data in running_jobs.items(): - procs_for_job, time_for_job, time_started = data - if (current_time - time_started) >= time_for_job: - proc_pool += procs_for_job - completed_jobs.append(jobname) - - for completed_job in completed_jobs: - del running_jobs[completed_job] - - current_time += 60 # minute time step - - return current_time - -def format_time(time_format, input_format, input_time): - """ - Converts the string input_time from input_format to time_format - Valid format specifiers are "%H", "%M", and "%S" - % signs must be followed by an H, M, or S and then a separator - Separators can be any string without digits or a % sign - Each specifier can occur more than once in the input_format, - but only the first occurence will be used. - An example of a valid format: "%H:%M:%S" - Unlike strptime, this does support %H >= 24 - - >>> format_time("%H:%M:%S", "%H", "43") - '43:00:00' - >>> format_time("%H %M", "%M,%S", "59,59") - '0 59' - >>> format_time("%H, %S", "%H:%M:%S", "2:43:9") - '2, 09' - """ - input_fields = input_format.split("%") - expect(input_fields[0] == input_time[:len(input_fields[0])], - "Failed to parse the input time; does not match the header string") - input_time = input_time[len(input_fields[0]):] - timespec = {"H": None, "M": None, "S": None} - maxvals = {"M": 60, "S": 60} - DIGIT_CHECK = re.compile('[^0-9]*') - # Loop invariants given input follows the specs: - # field starts with H, M, or S - # input_time starts with a number corresponding with the start of field - for field in input_fields[1:]: - # Find all of the digits at the start of the string - spec = field[0] - value_re = re.match(r'\d*', input_time) - expect(value_re is not None, - "Failed to parse the input time for the '{}' specifier, expected an integer".format(spec)) - value = value_re.group(0) - expect(spec in timespec, "Unknown time specifier '" + spec + "'") - # Don't do anything if the time field is already specified - if timespec[spec] is None: - # Verify we aren't exceeding the maximum value - if spec in maxvals: - expect(int(value) < maxvals[spec], - "Failed to parse the '{}' specifier: A value less than {:d} is expected".format(spec, maxvals[spec])) - timespec[spec] = value - input_time = input_time[len(value):] - # Check for the separator string - expect(len(re.match(DIGIT_CHECK, field).group(0)) == len(field), - "Numbers are not permissible in separator strings") - expect(input_time[:len(field) - 1] == field[1:], - "The separator string ({}) doesn't match '{}'".format(field[1:], input_time)) - input_time = input_time[len(field) - 1:] - output_fields = time_format.split("%") - output_time = output_fields[0] - # Used when a value isn't given - min_len_spec = {"H": 1, "M": 2, "S": 2} - # Loop invariants given input follows the specs: - # field starts with H, M, or S - # output_time - for field in output_fields[1:]: - expect(field == output_fields[-1] or len(field) > 1, - "Separator strings are required to properly parse times") - spec = field[0] - expect(spec in timespec, "Unknown time specifier '" + spec + "'") - if timespec[spec] is not None: - output_time += "0" * (min_len_spec[spec] - len(timespec[spec])) - output_time += timespec[spec] - else: - output_time += "0" * min_len_spec[spec] - output_time += field[1:] - return output_time - -def append_status(msg, sfile, caseroot='.'): - """ - Append msg to sfile in caseroot - """ - ctime = time.strftime("%Y-%m-%d %H:%M:%S: ") - - # Reduce empty lines in CaseStatus. It's a very concise file - # and does not need extra newlines for readability - line_ending = "\n" - - with open(os.path.join(caseroot, sfile), "a") as fd: - fd.write(ctime + msg + line_ending) - fd.write(" ---------------------------------------------------" + line_ending) - -def append_testlog(msg, caseroot='.'): - """ - Add to TestStatus.log file - """ - append_status(msg, "TestStatus.log", caseroot) - -def append_case_status(phase, status, msg=None, caseroot='.'): - """ - Update CaseStatus file - """ - append_status("{} {}{}".format(phase, status, " {}".format(msg if msg else "")), "CaseStatus", caseroot) - -def does_file_have_string(filepath, text): - """ - Does the text string appear in the filepath file - """ - return os.path.isfile(filepath) and text in open(filepath).read() - -def is_last_process_complete(filepath, expect_text, fail_text): - """ - Search the filepath in reverse order looking for expect_text - before finding fail_text. This utility is used by archive_metadata. - - """ - complete = False - fh = open(filepath, 'r') - fb = fh.readlines() - - rfb = ''.join(reversed(fb)) - - findex = re.search(fail_text, rfb) - if findex is None: - findex = 0 - else: - findex = findex.start() - - eindex = re.search(expect_text, rfb) - if eindex is None: - eindex = 0 - else: - eindex = eindex.start() - - if findex > eindex: - complete = True - - return complete - -def transform_vars(text, case=None, subgroup=None, overrides=None, default=None): - """ - Do the variable substitution for any variables that need transforms - recursively. - - >>> transform_vars("{{ cesm_stdout }}", default="cesm.stdout") - 'cesm.stdout' - >>> member_store = lambda : None - >>> member_store.foo = "hi" - >>> transform_vars("I say {{ foo }}", overrides={"foo":"hi"}) - 'I say hi' - """ - directive_re = re.compile(r"{{ (\w+) }}", flags=re.M) - # loop through directive text, replacing each string enclosed with - # template characters with the necessary values. - while directive_re.search(text): - m = directive_re.search(text) - variable = m.groups()[0] - whole_match = m.group() - if overrides is not None and variable.lower() in overrides and overrides[variable.lower()] is not None: - repl = overrides[variable.lower()] - logger.debug("from overrides: in {}, replacing {} with {}".format(text, whole_match, str(repl))) - text = text.replace(whole_match, str(repl)) - - elif case is not None and hasattr(case, variable.lower()) and getattr(case, variable.lower()) is not None: - repl = getattr(case, variable.lower()) - logger.debug("from case members: in {}, replacing {} with {}".format(text, whole_match, str(repl))) - text = text.replace(whole_match, str(repl)) - - elif case is not None and case.get_value(variable.upper(), subgroup=subgroup) is not None: - repl = case.get_value(variable.upper(), subgroup=subgroup) - logger.debug("from case: in {}, replacing {} with {}".format(text, whole_match, str(repl))) - text = text.replace(whole_match, str(repl)) - - elif default is not None: - logger.debug("from default: in {}, replacing {} with {}".format(text, whole_match, str(default))) - text = text.replace(whole_match, default) - - else: - # If no queue exists, then the directive '-q' by itself will cause an error - if "-q {{ queue }}" in text: - text = "" - else: - logger.warning("Could not replace variable '{}'".format(variable)) - text = text.replace(whole_match, "") - - return text - -def wait_for_unlocked(filepath): - locked = True - file_object = None - while locked: - try: - buffer_size = 8 - # Opening file in append mode and read the first 8 characters. - file_object = open(filepath, 'a', buffer_size) - if file_object: - locked = False - except IOError: - locked = True - time.sleep(1) - finally: - if file_object: - file_object.close() - -def gunzip_existing_file(filepath): - with gzip.open(filepath, "rb") as fd: - return fd.read() - -def gzip_existing_file(filepath): - """ - Gzips an existing file, removes the unzipped version, returns path to zip file. - Note the that the timestamp of the original file will be maintained in - the zipped file. - - >>> import tempfile - >>> fd, filename = tempfile.mkstemp(text=True) - >>> _ = os.write(fd, b"Hello World") - >>> os.close(fd) - >>> gzfile = gzip_existing_file(filename) - >>> gunzip_existing_file(gzfile) == b'Hello World' - True - >>> os.remove(gzfile) - """ - expect(os.path.exists(filepath), "{} does not exists".format(filepath)) - - st = os.stat(filepath) - orig_atime, orig_mtime = st[statlib.ST_ATIME], st[statlib.ST_MTIME] - - gzpath = '{}.gz'.format(filepath) - with open(filepath, "rb") as f_in: - with gzip.open(gzpath, "wb") as f_out: - shutil.copyfileobj(f_in, f_out) - - os.remove(filepath) - - os.utime(gzpath, (orig_atime, orig_mtime)) - - return gzpath - -def touch(fname): - if os.path.exists(fname): - os.utime(fname, None) - else: - open(fname, 'a').close() - -def find_system_test(testname, case): - """ - Find and import the test matching testname - Look through the paths set in config_files.xml variable SYSTEM_TESTS_DIR - for components used in this case to find a test matching testname. Add the - path to that directory to sys.path if its not there and return the test object - Fail if the test is not found in any of the paths. - """ - from importlib import import_module - system_test_path = None - if testname.startswith("TEST"): - system_test_path = "CIME.SystemTests.system_tests_common.{}".format(testname) - else: - components = ["any"] - components.extend( case.get_compset_components()) - fdir = [] - for component in components: - tdir = case.get_value("SYSTEM_TESTS_DIR", - attribute={"component":component}) - if tdir is not None: - tdir = os.path.abspath(tdir) - system_test_file = os.path.join(tdir ,"{}.py".format(testname.lower())) - if os.path.isfile(system_test_file): - fdir.append(tdir) - logger.debug( "found "+system_test_file) - if component == "any": - system_test_path = "CIME.SystemTests.{}.{}".format(testname.lower(), testname) - else: - system_test_dir = os.path.dirname(system_test_file) - if system_test_dir not in sys.path: - sys.path.append(system_test_dir) - system_test_path = "{}.{}".format(testname.lower(), testname) - expect(len(fdir) > 0, "Test {} not found, aborting".format(testname)) - expect(len(fdir) == 1, "Test {} found in multiple locations {}, aborting".format(testname, fdir)) - expect(system_test_path is not None, "No test {} found".format(testname)) - - path, m = system_test_path.rsplit('.',1) - mod = import_module(path) - return getattr(mod, m) - -def _get_most_recent_lid_impl(files): - """ - >>> files = ['/foo/bar/e3sm.log.20160905_111212', '/foo/bar/e3sm.log.20160906_111212.gz'] - >>> _get_most_recent_lid_impl(files) - ['20160905_111212', '20160906_111212'] - """ - results = [] - for item in files: - basename = os.path.basename(item) - components = basename.split(".") - if len(components) > 2: - results.append(components[2]) - else: - logger.warning("Apparent model log file '{}' did not conform to expected name format".format(item)) - - return sorted(results) - -def ls_sorted_by_mtime(path): - ''' return list of path sorted by timestamp oldest first''' - mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime - return list(sorted(os.listdir(path), key=mtime)) - -def get_lids(case): - model = case.get_value("MODEL") - rundir = case.get_value("RUNDIR") - return _get_most_recent_lid_impl(glob.glob("{}/{}.log*".format(rundir, model))) - -def new_lid(): - lid = time.strftime("%y%m%d-%H%M%S") - jobid = batch_jobid() - if jobid is not None: - lid = jobid+'.'+lid - os.environ["LID"] = lid - return lid - -def batch_jobid(): - jobid = os.environ.get("PBS_JOBID") - if jobid is None: - jobid = os.environ.get("SLURM_JOB_ID") - if jobid is None: - jobid = os.environ.get("LSB_JOBID") - if jobid is None: - jobid = os.environ.get("COBALT_JOBID") - return jobid - -def analyze_build_log(comp, log, compiler): - """ - Capture and report warning count, - capture and report errors and undefined references. - """ - warncnt = 0 - if "intel" in compiler: - warn_re = re.compile(r" warning #") - error_re = re.compile(r" error #") - undefined_re = re.compile(r" undefined reference to ") - elif "gnu" in compiler or "nag" in compiler: - warn_re = re.compile(r"^Warning: ") - error_re = re.compile(r"^Error: ") - undefined_re = re.compile(r" undefined reference to ") - else: - # don't know enough about this compiler - return - - with open(log,"r") as fd: - for line in fd: - if re.search(warn_re, line): - warncnt += 1 - if re.search(error_re, line): - logger.warning(line) - if re.search(undefined_re, line): - logger.warning(line) - - if warncnt > 0: - logger.info("Component {} build complete with {} warnings".format(comp, warncnt)) - -def is_python_executable(filepath): - first_line = None - if os.path.isfile(filepath): - with open(filepath, "rt") as f: - try: - first_line = f.readline() - except Exception: - pass - - return first_line is not None and first_line.startswith("#!") and "python" in first_line - return False - -def get_umask(): - current_umask = os.umask(0) - os.umask(current_umask) - - return current_umask - -def stringify_bool(val): - val = False if val is None else val - expect(type(val) is bool, "Wrong type for val '{}'".format(repr(val))) - return "TRUE" if val else "FALSE" - -def indent_string(the_string, indent_level): - """Indents the given string by a given number of spaces - - Args: - the_string: str - indent_level: int - - Returns a new string that is the same as the_string, except that - each line is indented by 'indent_level' spaces. - - In python3, this can be done with textwrap.indent. - """ - - lines = the_string.splitlines(True) - padding = ' ' * indent_level - lines_indented = [padding + line for line in lines] - return ''.join(lines_indented) - -def verbatim_success_msg(return_val): - return return_val - -CASE_SUCCESS = "success" -CASE_FAILURE = "error" -def run_and_log_case_status(func, phase, caseroot='.', custom_success_msg_functor=None): - append_case_status(phase, "starting", caseroot=caseroot) - rv = None - try: - rv = func() - except BaseException: - e = sys.exc_info()[1] - append_case_status(phase, CASE_FAILURE, msg=("\n{}".format(e)), caseroot=caseroot) - raise - else: - custom_success_msg = custom_success_msg_functor(rv) if custom_success_msg_functor else None - append_case_status(phase, CASE_SUCCESS, msg=custom_success_msg, caseroot=caseroot) - - return rv - -def _check_for_invalid_args(args): - if get_model() != "e3sm": - for arg in args: - # if arg contains a space then it was originally quoted and we can ignore it here. - if " " in arg or arg.startswith("--"): - continue - if arg.startswith("-") and len(arg) > 2: - sys.stderr.write( "WARNING: The {} argument is deprecated. Multi-character arguments should begin with \"--\" and single character with \"-\"\n Use --help for a complete list of available options\n".format(arg)) - -def add_mail_type_args(parser): - parser.add_argument("--mail-user", help="Email to be used for batch notification.") - - parser.add_argument("-M", "--mail-type", action="append", - help="When to send user email. Options are: never, all, begin, end, fail.\n" - "You can specify multiple types with either comma-separated args or multiple -M flags.") - -def resolve_mail_type_args(args): - if args.mail_type is not None: - resolved_mail_types = [] - for mail_type in args.mail_type: - resolved_mail_types.extend(mail_type.split(",")) - - for mail_type in resolved_mail_types: - expect(mail_type in ("never", "all", "begin", "end", "fail"), - "Unsupported mail-type '{}'".format(mail_type)) - - args.mail_type = resolved_mail_types - -def copyifnewer(src, dest): - """ if dest does not exist or is older than src copy src to dest """ - if not os.path.isfile(dest) or not filecmp.cmp(src, dest): - safe_copy(src, dest) - -class SharedArea(object): - """ - Enable 0002 umask within this manager - """ - - def __init__(self, new_perms=0o002): - self._orig_umask = None - self._new_perms = new_perms - - def __enter__(self): - self._orig_umask = os.umask(self._new_perms) - - def __exit__(self, *_): - os.umask(self._orig_umask) - -class Timeout(object): - """ - A context manager that implements a timeout. By default, it - will raise exception, but a custon function call can be provided. - Provided None as seconds makes this class a no-op - """ - def __init__(self, seconds, action=None): - self._seconds = seconds - self._action = action if action is not None else self._handle_timeout - - def _handle_timeout(self, *_): - raise RuntimeError("Timeout expired") - - def __enter__(self): - if self._seconds is not None: - signal.signal(signal.SIGALRM, self._action) - signal.alarm(self._seconds) - - def __exit__(self, *_): - if self._seconds is not None: - signal.alarm(0) - -def filter_unicode(unistr): - """ - Sometimes unicode chars can cause problems - """ - return "".join([i if ord(i) < 128 else ' ' for i in unistr]) - -def run_bld_cmd_ensure_logging(cmd, arg_logger, from_dir=None): - arg_logger.info(cmd) - stat, output, errput = run_cmd(cmd, from_dir=from_dir) - arg_logger.info(output) - arg_logger.info(errput) - expect(stat == 0, filter_unicode(errput)) - -def get_batch_script_for_job(job): - return job if "st_archive" in job else "." + job - -def string_in_list(_string, _list): - """Case insensitive search for string in list - returns the matching list value - >>> string_in_list("Brack",["bar", "bracK", "foo"]) - 'bracK' - >>> string_in_list("foo", ["FFO", "FOO", "foo2", "foo3"]) - 'FOO' - >>> string_in_list("foo", ["FFO", "foo2", "foo3"]) - """ - for x in _list: - if _string.lower() == x.lower(): - return x - return None - -def model_log(model, arg_logger, msg, debug_others=True): - if get_model() == model: - arg_logger.info(msg) - elif debug_others: - arg_logger.debug(msg) diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py deleted file mode 100644 index 22c59dae2f5..00000000000 --- a/scripts/lib/CIME/wait_for_tests.py +++ /dev/null @@ -1,458 +0,0 @@ -#pylint: disable=import-error -from six.moves import queue -import os, time, threading, socket, signal, shutil, glob -#pylint: disable=import-error -from distutils.spawn import find_executable -import logging -import xml.etree.ElementTree as xmlet - -import CIME.utils -from CIME.utils import expect, Timeout, run_cmd_no_fail, safe_copy, CIMEError -from CIME.XML.machines import Machines -from CIME.test_status import * - -SIGNAL_RECEIVED = False -E3SM_MAIN_CDASH = "ACME_Climate" -CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest" -SLEEP_INTERVAL_SEC = .1 - -############################################################################### -def signal_handler(*_): -############################################################################### - global SIGNAL_RECEIVED - SIGNAL_RECEIVED = True - -############################################################################### -def set_up_signal_handlers(): -############################################################################### - signal.signal(signal.SIGTERM, signal_handler) - signal.signal(signal.SIGINT, signal_handler) - -############################################################################### -def get_test_time(test_path): -############################################################################### - ts = TestStatus(test_dir=test_path) - comment = ts.get_comment(RUN_PHASE) - if comment is None or "time=" not in comment: - logging.warning("No run-phase time data found in {}".format(test_path)) - return 0 - else: - time_data = [token for token in comment.split() if token.startswith("time=")][0] - return int(time_data.split("=")[1]) - -############################################################################### -def get_test_output(test_path): -############################################################################### - output_file = os.path.join(test_path, "TestStatus.log") - if (os.path.exists(output_file)): - return open(output_file, 'r').read() - else: - logging.warning("File '{}' not found".format(output_file)) - return "" - -############################################################################### -def create_cdash_xml_boiler(phase, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit): -############################################################################### - site_elem = xmlet.Element("Site") - - if ("JENKINS_START_TIME" in os.environ): - time_info_str = "Total testing time: {:d} seconds".format(int(current_time) - int(os.environ["JENKINS_START_TIME"])) - else: - time_info_str = "" - - site_elem.attrib["BuildName"] = cdash_build_name - site_elem.attrib["BuildStamp"] = "{}-{}".format(utc_time, cdash_build_group) - site_elem.attrib["Name"] = hostname - site_elem.attrib["OSName"] = "Linux" - site_elem.attrib["Hostname"] = hostname - site_elem.attrib["OSVersion"] = "Commit: {}{}".format(git_commit, time_info_str) - - phase_elem = xmlet.SubElement(site_elem, phase) - - xmlet.SubElement(phase_elem, "StartDateTime").text = time.ctime(current_time) - xmlet.SubElement(phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase)).text = str(int(current_time)) - - return site_elem, phase_elem - -############################################################################### -def create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): -############################################################################### - site_elem, config_elem = create_cdash_xml_boiler("Configure", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) - - xmlet.SubElement(config_elem, "ConfigureCommand").text = "namelists" - - config_results = [] - for test_name in sorted(results): - test_status = results[test_name][1] - config_results.append("{} {} Config {}".format("" if test_status != NAMELIST_FAIL_STATUS else "CMake Warning:\n", test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF")) - - xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results) - - xmlet.SubElement(config_elem, "ConfigureStatus").text = "0" - xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now - - etree = xmlet.ElementTree(site_elem) - etree.write(os.path.join(data_rel_path, "Configure.xml")) - -############################################################################### -def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): -############################################################################### - site_elem, build_elem = create_cdash_xml_boiler("Build", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) - - xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build" - - build_results = [] - for test_name in sorted(results): - build_results.append(test_name) - - xmlet.SubElement(build_elem, "Log").text = "\n".join(build_results) - - for idx, test_name in enumerate(sorted(results)): - test_path = results[test_name][0] - test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path) - if get_test_time(test_norm_path) == 0: - error_elem = xmlet.SubElement(build_elem, "Error") - xmlet.SubElement(error_elem, "Text").text = test_name - xmlet.SubElement(error_elem, "BuildLogLine").text = str(idx) - xmlet.SubElement(error_elem, "PreContext").text = test_name - xmlet.SubElement(error_elem, "PostContext").text = "" - xmlet.SubElement(error_elem, "RepeatCount").text = "0" - - xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now - - etree = xmlet.ElementTree(site_elem) - etree.write(os.path.join(data_rel_path, "Build.xml")) - -############################################################################### -def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): -############################################################################### - site_elem, testing_elem = create_cdash_xml_boiler("Testing", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) - - test_list_elem = xmlet.SubElement(testing_elem, "TestList") - for test_name in sorted(results): - xmlet.SubElement(test_list_elem, "Test").text = test_name - - for test_name in sorted(results): - test_path, test_status = results[test_name] - test_passed = test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] - test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path) - - full_test_elem = xmlet.SubElement(testing_elem, "Test") - if test_passed: - full_test_elem.attrib["Status"] = "passed" - elif (test_status == TEST_PEND_STATUS): - full_test_elem.attrib["Status"] = "notrun" - else: - full_test_elem.attrib["Status"] = "failed" - - xmlet.SubElement(full_test_elem, "Name").text = test_name - - xmlet.SubElement(full_test_elem, "Path").text = test_norm_path - - xmlet.SubElement(full_test_elem, "FullName").text = test_name - - xmlet.SubElement(full_test_elem, "FullCommandLine") - # text ? - - results_elem = xmlet.SubElement(full_test_elem, "Results") - - named_measurements = ( - ("text/string", "Exit Code", test_status), - ("text/string", "Exit Value", "0" if test_passed else "1"), - ("numeric_double", "Execution Time", str(get_test_time(test_norm_path))), - ("text/string", "Completion Status", "Not Completed" if test_status == TEST_PEND_STATUS else "Completed"), - ("text/string", "Command line", "create_test") - ) - - for type_attr, name_attr, value in named_measurements: - named_measurement_elem = xmlet.SubElement(results_elem, "NamedMeasurement") - named_measurement_elem.attrib["type"] = type_attr - named_measurement_elem.attrib["name"] = name_attr - - xmlet.SubElement(named_measurement_elem, "Value").text = value - - measurement_elem = xmlet.SubElement(results_elem, "Measurement") - - value_elem = xmlet.SubElement(measurement_elem, "Value") - value_elem.text = ''.join([item for item in get_test_output(test_norm_path) if ord(item) < 128]) - - xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now - - etree = xmlet.ElementTree(site_elem) - - etree.write(os.path.join(data_rel_path, "Test.xml")) - -############################################################################### -def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname): -############################################################################### - # We assume all cases were created from the same code repo - first_result_case = os.path.dirname(list(results.items())[0][1][0]) - try: - srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=first_result_case) - except CIMEError: - # Use repo containing this script as last resort - srcroot = CIME.utils.get_cime_root() - - git_commit = CIME.utils.get_current_commit(repo=srcroot) - - data_rel_path = os.path.join("Testing", utc_time) - - create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) - - create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) - - create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) - -############################################################################### -def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload): -############################################################################### - - data_rel_path = os.path.join("Testing", utc_time) - - try: - log_dir = "{}_logs".format(cdash_build_name) - - need_to_upload = False - - for test_name, test_data in results.items(): - test_path, test_status = test_data - - if test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] or force_log_upload: - test_case_dir = os.path.dirname(test_path) - ts = TestStatus(test_case_dir) - - build_status = ts.get_status(SHAREDLIB_BUILD_PHASE) - build_status = TEST_FAIL_STATUS if build_status == TEST_FAIL_STATUS else ts.get_status(MODEL_BUILD_PHASE) - run_status = ts.get_status(RUN_PHASE) - baseline_status = ts.get_status(BASELINE_PHASE) - - if build_status == TEST_FAIL_STATUS or run_status == TEST_FAIL_STATUS or baseline_status == TEST_FAIL_STATUS or force_log_upload: - case_dirs = [test_case_dir] - case_base = os.path.basename(test_case_dir) - test_case2_dir = os.path.join(test_case_dir, "case2", case_base) - if os.path.exists(test_case2_dir): - case_dirs.append(test_case2_dir) - - for case_dir in case_dirs: - param = "EXEROOT" if build_status == TEST_FAIL_STATUS else "RUNDIR" - log_src_dir = run_cmd_no_fail("./xmlquery {} --value".format(param), from_dir=case_dir) - - log_dst_dir = os.path.join(log_dir, "{}{}_{}_logs".format(test_name, "" if case_dir == test_case_dir else ".case2", param)) - os.makedirs(log_dst_dir) - for log_file in glob.glob(os.path.join(log_src_dir, "*log*")): - safe_copy(log_file, log_dst_dir) - for log_file in glob.glob(os.path.join(log_src_dir, "*.cprnc.out*")): - safe_copy(log_file, log_dst_dir) - - need_to_upload = True - - if (need_to_upload): - - tarball = "{}.tar.gz".format(log_dir) - if (os.path.exists(tarball)): - os.remove(tarball) - - run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball) - base64 = run_cmd_no_fail("base64 {}".format(tarball)) - - xml_text = \ -r""" - "?> - - - - -{} - - - - -""".format(cdash_build_name, utc_time, cdash_build_group, hostname, os.path.abspath(tarball), base64) - - with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd: - fd.write(xml_text) - - finally: - if (os.path.isdir(log_dir)): - shutil.rmtree(log_dir) - -############################################################################### -def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False): -############################################################################### - - # - # Create dart config file - # - - current_time = time.time() - - utc_time_tuple = time.gmtime(current_time) - cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple) - - hostname = Machines().get_machine_name() - if (hostname is None): - hostname = socket.gethostname().split(".")[0] - logging.warning("Could not convert hostname '{}' into an E3SM machine name".format(hostname)) - - dart_config = \ -""" -SourceDirectory: {0} -BuildDirectory: {0} - -# Site is something like machine.domain, i.e. pragmatic.crd -Site: {1} - -# Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++ -BuildName: {2} - -# Submission information -IsCDash: TRUE -CDashVersion: -QueryCDashVersion: -DropSite: my.cdash.org -DropLocation: /submit.php?project={3} -DropSiteUser: -DropSitePassword: -DropSiteMode: -DropMethod: http -TriggerSite: -ScpCommand: {4} - -# Dashboard start time -NightlyStartTime: {5} UTC -""".format(os.getcwd(), hostname, cdash_build_name, cdash_project, - find_executable("scp"), cdash_timestamp) - - with open("DartConfiguration.tcl", "w") as dart_fd: - dart_fd.write(dart_config) - - utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple) - os.makedirs(os.path.join("Testing", utc_time)) - - # Make tag file - with open("Testing/TAG", "w") as tag_fd: - tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group)) - - create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname) - - create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload) - - run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True) - -############################################################################### -def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run): -############################################################################### - if (os.path.isdir(test_path)): - test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME) - else: - test_status_filepath = test_path - - logging.debug("Watching file: '{}'".format(test_status_filepath)) - test_log_path = os.path.join(os.path.dirname(test_status_filepath), ".internal_test_status.log") - - # We don't want to make it a requirement that wait_for_tests has write access - # to all case directories - try: - fd = open(test_log_path, "w") - fd.close() - except (IOError, OSError): - test_log_path = "/dev/null" - - prior_ts = None - with open(test_log_path, "w") as log_fd: - while (True): - if (os.path.exists(test_status_filepath)): - ts = TestStatus(test_dir=os.path.dirname(test_status_filepath)) - test_name = ts.get_name() - test_status = ts.get_overall_test_status(wait_for_run=not no_run, # Important - no_run=no_run, - check_throughput=check_throughput, - check_memory=check_memory, ignore_namelists=ignore_namelists, - ignore_memleak=ignore_memleak) - - if prior_ts is not None and prior_ts != ts: - log_fd.write(ts.phase_statuses_dump()) - log_fd.write("OVERALL: {}\n\n".format(test_status)) - - prior_ts = ts - - if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)): - time.sleep(SLEEP_INTERVAL_SEC) - logging.debug("Waiting for test to finish") - else: - results.put( (test_name, test_path, test_status) ) - break - - else: - if (wait and not SIGNAL_RECEIVED): - logging.debug("File '{}' does not yet exist".format(test_status_filepath)) - time.sleep(SLEEP_INTERVAL_SEC) - else: - test_name = os.path.abspath(test_status_filepath).split("/")[-2] - results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath)) ) - break - -############################################################################### -def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): -############################################################################### - results = queue.Queue() - - for test_path in test_paths: - t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run)) - t.daemon = True - t.start() - - while threading.active_count() > 1: - time.sleep(1) - - test_results = {} - completed_test_paths = [] - while (not results.empty()): - test_name, test_path, test_status = results.get() - if (test_name in test_results): - prior_path, prior_status = test_results[test_name] - if (test_status == prior_status): - logging.warning("Test name '{}' was found in both '{}' and '{}'".format(test_name, test_path, prior_path)) - else: - raise CIMEError("Test name '{}' was found in both '{}' and '{}' with different results".format(test_name, test_path, prior_path)) - - test_results[test_name] = (test_path, test_status) - completed_test_paths.append(test_path) - - expect(set(test_paths) == set(completed_test_paths), - "Missing results for test paths: {}".format(set(test_paths) - set(completed_test_paths))) - - return test_results - -############################################################################### -def wait_for_tests(test_paths, - no_wait=False, - check_throughput=False, - check_memory=False, - ignore_namelists=False, - ignore_memleak=False, - cdash_build_name=None, - cdash_project=E3SM_MAIN_CDASH, - cdash_build_group=CDASH_DEFAULT_BUILD_GROUP, - timeout=None, - force_log_upload=False, - no_run=False): -############################################################################### - # Set up signal handling, we want to print results before the program - # is terminated - set_up_signal_handlers() - - with Timeout(timeout, action=signal_handler): - test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run) - - all_pass = True - for test_name, test_data in sorted(test_results.items()): - test_path, test_status = test_data - logging.info("Test '{}' finished with status '{}'".format(test_name, test_status)) - logging.info(" Path: {}".format(test_path)) - all_pass &= test_status == TEST_PASS_STATUS - - if cdash_build_name: - create_cdash_xml(test_results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload) - - return all_pass diff --git a/scripts/lib/e3sm_cime_mgmt.py b/scripts/lib/e3sm_cime_mgmt.py deleted file mode 100644 index a1c8d026c37..00000000000 --- a/scripts/lib/e3sm_cime_mgmt.py +++ /dev/null @@ -1,313 +0,0 @@ -from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_timestamp, CIMEError - -import getpass, logging, os -import stat as osstat - -# Constants -ESMCI_REMOTE_NAME = "esmci_remote_for_split" -ESMCI_URL = "git@github.com:ESMCI/CIME.git" -SPLIT_TAG_PREFIX = "acme-split-" -MERGE_TAG_PREFIX = "to-acme-" - -############################################################################### -def setup(): -############################################################################### - run_cmd_no_fail("git config merge.renameLimit 999999") - run_cmd_no_fail("git checkout master && git pull", verbose=True) - - remotes = run_cmd_no_fail("git remote") - if ESMCI_REMOTE_NAME not in remotes: - run_cmd_no_fail("git remote add {} {}".format(ESMCI_REMOTE_NAME, ESMCI_URL), verbose=True) - - for origin in ["origin", ESMCI_REMOTE_NAME]: - run_cmd_no_fail("git fetch --prune {}".format(origin), verbose=True) - run_cmd_no_fail("git fetch --prune {} --tags".format(origin), verbose=True) - - run_cmd_no_fail("git clean -fd", verbose=True) - -############################################################################### -def get_tag(prefix, expected_num=1): -############################################################################### - tags = run_cmd_no_fail("git tag").split() - tags = [tag for tag in tags if tag.startswith(prefix)] - - expect(len(tags) >= expected_num, "Did not see enough {} tags".format(prefix)) - - if expected_num == 1: - return tags[-1] - else: - return tags[-expected_num:] - -############################################################################### -def get_split_tag(expected_num=1): -############################################################################### - return get_tag(SPLIT_TAG_PREFIX, expected_num=expected_num) - -############################################################################### -def get_merge_tag(expected_num=1): -############################################################################### - return get_tag(MERGE_TAG_PREFIX, expected_num=expected_num) - -############################################################################### -def make_new_tag(prefix, old_tag, remote="origin", commit="HEAD"): -############################################################################### - new_tag = "{}{}".format(prefix, get_timestamp(timestamp_format="%Y-%m-%d")) - expect(old_tag != new_tag, "New tag must have different name than old tag") - - run_cmd_no_fail("git tag {} {}".format(new_tag, commit), verbose=True) - run_cmd_no_fail("git push {} {}".format(remote, new_tag), verbose=True) - - return new_tag - -############################################################################### -def make_new_split_tag(old_split_tag): -############################################################################### - return make_new_tag(SPLIT_TAG_PREFIX, old_split_tag) - -############################################################################### -def make_new_merge_tag(old_merge_tag): -############################################################################### - return make_new_tag(MERGE_TAG_PREFIX, old_merge_tag, - remote=ESMCI_REMOTE_NAME, commit="{}/master".format(ESMCI_REMOTE_NAME)) - -############################################################################### -def get_branch_from_tag(tag): -############################################################################### - branch = "{}/branch-for-{}".format(getpass.getuser(), tag) - return branch - -############################################################################### -def do_subtree_split(new_split_tag, merge_tag): -############################################################################### - subtree_branch = get_branch_from_tag(new_split_tag) - run_cmd_no_fail("git subtree split --prefix=cime --onto={} -b {}".\ - format(merge_tag, subtree_branch), verbose=True) - return subtree_branch - -############################################################################### -def touches_file(start_range, end_range, filepath, title, skip=None): -############################################################################### - skip_str = "--grep={} --invert-grep".format(skip) if skip is not None else "" - result = run_cmd_no_fail("git log {} {}..{} -- {}".format(skip_str, start_range, end_range, filepath)) - - if result: - logging.debug(" touched by {} within range {}..{} by commits\n{}".format(title, start_range, end_range, result)) - - return result != "" - -############################################################################### -def reset_file(version, srcpath, dstpath): -############################################################################### - is_exe = os.access(dstpath, os.X_OK) - os.remove(dstpath) - try: - run_cmd_no_fail("git show {}:{} > {}".format(version, srcpath, dstpath)) - except CIMEError: - # If the above failes, then the file was deleted - run_cmd_no_fail("git rm -f {}".format(dstpath)) - else: - if is_exe: - os.chmod(dstpath, os.stat(dstpath).st_mode | osstat.S_IXUSR | osstat.S_IXGRP | osstat.S_IXOTH) - - run_cmd_no_fail("git add {}".format(dstpath)) - -############################################################################### -def get_last_instance_of(branch_name, head): -############################################################################### - return run_cmd_no_fail("git log --first-parent {} --grep='{}' -1 --oneline".format(head, branch_name)).split()[0] - -############################################################################### -def handle_easy_conflict(dst_filepath, is_merge): -############################################################################### - """ - src = repo we are coming from - dst = repo we merging something into - - for a "split" - src = e3sm - dst = cime - - for a "merge" - src = cime - dst = e3sm - """ - src_tag_prefix = MERGE_TAG_PREFIX if is_merge else SPLIT_TAG_PREFIX - dst_tag_prefix = SPLIT_TAG_PREFIX if is_merge else MERGE_TAG_PREFIX - - # For split, src_tag_prefix = acme-split-, dst_tag_prefix = to-acme- - - src_branch_prefix = "branch-for-{}".format(dst_tag_prefix) # NOTE: opposite of tag - dst_branch_prefix = "branch-for-{}".format(src_tag_prefix) # NOTE: opposite of tag - - # For split, dst_branch_prefix = branch-for-acme-split - - # we can't use ORIG_HEAD for src_head for splits - # because ORIG_HEAD has been re-written by subtree, making the tag - # we want to use useless in a range operation - src_head = "MERGE_HEAD" if is_merge else "origin/master" - dst_head = "ORIG_HEAD" if is_merge else "MERGE_HEAD" - - # For split, src_head = origin/master, dst_head = MERGE_HEAD - - src_filepath = dst_filepath.replace("cime/", "", 1) if is_merge else os.path.join("cime", dst_filepath) - - # For split, src_filepath = cime/xxx/yyy, dst_filepath = xxx/yyy - - # There's no tag for last dst operation - try: - last_dst_operation = get_last_instance_of(dst_branch_prefix, dst_head) - except Exception as e: - logging.warning("Could not get most recent merge for branch {}, {}".format(dst_branch_prefix, e)) - return False - - # Use tag for last src operation - last_src_operation = get_tag(src_tag_prefix, expected_num=2)[0] - - logging.info("Examining file {} ...".format(dst_filepath)) - - if not touches_file(last_dst_operation, dst_head, dst_filepath, "dst"): - logging.info(" File '{}' appears to have had no recent dst mods, setting to src".format(dst_filepath)) - reset_file(src_head, src_filepath, dst_filepath) - return True - # We don't want to pick up the last dst->src operation as a src modification of this file - elif not touches_file(last_src_operation, src_head, src_filepath, "src", skip=src_branch_prefix): - logging.info(" File '{}' appears to have had no recent src mods, setting to dst".format(src_filepath)) - reset_file(dst_head, dst_filepath, dst_filepath) - return True - else: - logging.info(" File '{}' appears to have real conflicts".format(dst_filepath)) - return False - -############################################################################### -def handle_easy_conflicts(is_merge): -############################################################################### - conflicting_files = run_cmd_no_fail("git diff --name-only --diff-filter=U").splitlines() - if not conflicting_files: - expect(False, "Merge appears to have failed for reasons other than merge conflicts") - - rv = [] - for conflicting_file in conflicting_files: - able_to_handle = handle_easy_conflict(conflicting_file, is_merge) - if not able_to_handle: - rv.append(conflicting_file) - - return rv - -############################################################################### -def handle_conflicts(is_merge=False, auto_conf=False): -############################################################################### - logging.info("There are conflicts, analyzing...") - remaining_conflicts = handle_easy_conflicts(is_merge) if auto_conf else True - if remaining_conflicts: - expect(False, "There are merge conflicts. Please fix, commit, and re-run this tool with --resume") - else: - logging.info("All conflicts were automatically resovled, continuing") - run_cmd_no_fail("git commit --no-edit") - -############################################################################### -def do_subtree_pull(squash=False, auto_conf=False): -############################################################################### - stat = run_cmd("git subtree pull {} --prefix=cime {} master".format("--squash" if squash else "", ESMCI_REMOTE_NAME), - verbose=True)[0] - if stat != 0: - handle_conflicts(is_merge=True, auto_conf=auto_conf) - -############################################################################### -def make_pr_branch(branch, branch_head): -############################################################################### - run_cmd_no_fail("git checkout --no-track -b {} {}".format(branch, branch_head), verbose=True) - - return branch - -############################################################################### -def merge_branch(branch, squash=False, auto_conf=False): -############################################################################### - stat = run_cmd("git merge {} -m 'Merge {branch}' -X rename-threshold=25 {branch}".format("--squash" if squash else "", branch=branch), - verbose=True)[0] - if stat != 0: - handle_conflicts(auto_conf=auto_conf) - -############################################################################### -def delete_tag(tag, remote="origin"): -############################################################################### - run_cmd_no_fail("git tag -d {}".format(tag), verbose=True) - run_cmd_no_fail("git push {} :refs/tags/{}".format(remote, tag), verbose=True) - -############################################################################### -def e3sm_cime_split(resume, squash=False, auto_conf=False): -############################################################################### - if not resume: - setup() - - old_split_tag = get_split_tag() - - try: - new_split_tag = make_new_split_tag(old_split_tag) - - merge_tag = get_merge_tag() - - pr_branch = do_subtree_split(new_split_tag, merge_tag) - - run_cmd_no_fail("git checkout {}".format(pr_branch), verbose=True) - except BaseException: - # If unexpected failure happens, delete new split tag - logging.info("Abandoning split due to unexpected failure") - delete_tag(new_split_tag) - raise - - # upstream merge, potential conflicts - merge_branch("{}/master".format(ESMCI_REMOTE_NAME), squash=squash, auto_conf=auto_conf) - - else: - old_split_tag, new_split_tag = get_split_tag(expected_num=2) - logging.info("Resuming split with old tag {} and new tag {}".format(old_split_tag, new_split_tag)) - pr_branch = get_branch_from_tag(new_split_tag) - - run_cmd_no_fail("git push -u {} {}".format(ESMCI_REMOTE_NAME, pr_branch), verbose=True) - -############################################################################### -def e3sm_cime_merge(resume, squash=False, auto_conf=False): -############################################################################### - if not resume: - setup() - - old_merge_tag = get_merge_tag() - - try: - new_merge_tag = make_new_merge_tag(old_merge_tag) - - pr_branch = make_pr_branch(get_branch_from_tag(new_merge_tag), "origin/master") - except BaseException: - logging.info("Abandoning merge due to unexpected failure") - delete_tag(new_merge_tag, remote=ESMCI_REMOTE_NAME) - raise - - # potential conflicts - do_subtree_pull(squash=squash, auto_conf=auto_conf) - - else: - old_merge_tag, new_merge_tag = get_merge_tag(expected_num=2) - logging.info("Resuming merge with old tag {} and new tag {}".format(old_merge_tag, new_merge_tag)) - pr_branch = get_branch_from_tag(new_merge_tag) - - run_cmd_no_fail("git push -u origin {}".format(pr_branch), verbose=True) - -############################################################################### -def abort_split(): -############################################################################### - new_split_tag = get_split_tag() - pr_branch = get_branch_from_tag(new_split_tag) - delete_tag(new_split_tag) - run_cmd_no_fail("git reset --hard origin/master", verbose=True) - run_cmd_no_fail("git checkout master", verbose=True) - run_cmd("git branch -D {}".format(pr_branch), verbose=True) - -############################################################################### -def abort_merge(): -############################################################################### - new_merge_tag = get_merge_tag() - pr_branch = get_branch_from_tag(new_merge_tag) - delete_tag(new_merge_tag, remote=ESMCI_REMOTE_NAME) - run_cmd_no_fail("git reset --hard origin/master", verbose=True) - run_cmd_no_fail("git checkout master", verbose=True) - run_cmd("git branch -D {}".format(pr_branch), verbose=True) diff --git a/scripts/lib/get_tests.py b/scripts/lib/get_tests.py deleted file mode 100644 index 755165387db..00000000000 --- a/scripts/lib/get_tests.py +++ /dev/null @@ -1,394 +0,0 @@ -import CIME.utils -from CIME.utils import expect, convert_to_seconds, parse_test_name, get_cime_root, get_model -from CIME.XML.machines import Machines -import six, sys, os - -# Expect that, if a model wants to use python-based test lists, they will have a file -# config/$model/tests.py , containing a test dictionary called _TESTS - -sys.path.insert(0, os.path.join(get_cime_root(), "config", get_model())) -_ALL_TESTS = {} -try: - from tests import _TESTS # pylint: disable=import-error - _ALL_TESTS.update(_TESTS) -except ImportError: - pass - -# Here are the tests belonging to cime suites. Format for individual tests is -# ..[.] -# -# suite_name : { -# "inherit" : (suite1, suite2, ...), # Optional. Suites to inherit tests from. Default is None. Tuple, list, or str. -# "time" : "HH:MM:SS", # Optional. Recommended upper-limit on test time. -# "share" : True|False, # Optional. If True, all tests in this suite share a build. Default is False. -# "tests" : (test1, test2, ...) # Optional. The list of tests for this suite. See above for format. Tuple, list, or str. This is the ONLY inheritable attribute. -# } - -_CIME_TESTS = { - - "cime_tiny" : { - "time" : "0:10:00", - "tests" : ( - "ERS.f19_g16_rx1.A", - "NCK.f19_g16_rx1.A", - ) - }, - - "cime_test_only_pass" : { - "time" : "0:10:00", - "tests" : ( - "TESTRUNPASS_P1.f19_g16_rx1.A", - "TESTRUNPASS_P1.ne30_g16_rx1.A", - "TESTRUNPASS_P1.f45_g37_rx1.A", - ) - }, - - "cime_test_only_slow_pass" : { - "time" : "0:10:00", - "tests" : ( - "TESTRUNSLOWPASS_P1.f19_g16_rx1.A", - "TESTRUNSLOWPASS_P1.ne30_g16_rx1.A", - "TESTRUNSLOWPASS_P1.f45_g37_rx1.A", - ) - }, - - "cime_test_only" : { - "time" : "0:10:00", - "tests" : ( - "TESTBUILDFAIL_P1.f19_g16_rx1.A", - "TESTBUILDFAILEXC_P1.f19_g16_rx1.A", - "TESTRUNFAIL_P1.f19_g16_rx1.A", - "TESTRUNSTARCFAIL_P1.f19_g16_rx1.A", - "TESTRUNFAILEXC_P1.f19_g16_rx1.A", - "TESTRUNPASS_P1.f19_g16_rx1.A", - "TESTTESTDIFF_P1.f19_g16_rx1.A", - "TESTMEMLEAKFAIL_P1.f09_g16.X", - "TESTMEMLEAKPASS_P1.f09_g16.X", - ) - }, - - "cime_test_all" : { - "inherit" : "cime_test_only", - "time" : "0:10:00", - "tests" : "TESTRUNDIFF_P1.f19_g16_rx1.A" - }, - - "cime_test_share" : { - "time" : "0:10:00", - "share" : True, - "tests" : ( - "SMS_P2.f19_g16_rx1.A", - "SMS_P4.f19_g16_rx1.A", - "SMS_P8.f19_g16_rx1.A", - "SMS_P16.f19_g16_rx1.A", - ) - }, - - "cime_test_share2" : { - "time" : "0:10:00", - "share" : True, - "tests" : ( - "SMS_P2.f19_g16_rx1.X", - "SMS_P4.f19_g16_rx1.X", - "SMS_P8.f19_g16_rx1.X", - "SMS_P16.f19_g16_rx1.X", - ) - }, - - "cime_test_repeat" : { - "tests" : ( - "TESTRUNPASS_P1.f19_g16_rx1.A", - "TESTRUNPASS_P2.ne30_g16_rx1.A", - "TESTRUNPASS_P4.f45_g37_rx1.A", - ) - }, - - "cime_test_time" : { - "time" : "0:13:00", - "tests" : ( - "TESTRUNPASS_P69.f19_g16_rx1.A.testmod", - ) - }, - - "cime_test_multi_inherit" : { - "inherit" : ("cime_test_repeat", "cime_test_only_pass", "cime_test_all") - }, - - "cime_developer" : { - "time" : "0:15:00", - "tests" : ( - "NCK_Ld3.f45_g37_rx1.A", - "ERI.f09_g16.X", - "ERIO.f09_g16.X", - "SEQ_Ln9.f19_g16_rx1.A", - "ERS.ne30_g16_rx1.A.drv-y100k", - "IRT_N2.f19_g16_rx1.A", - "ERR.f45_g37_rx1.A", - "ERP.f45_g37_rx1.A", - "SMS_D_Ln9_Mmpi-serial.f19_g16_rx1.A", - "DAE.ww3a.ADWAV", - "PET_P4.f19_f19.A", - "PEM_P4.f19_f19.A", - "SMS.T42_T42.S", - "PRE.f19_f19.ADESP", - "PRE.f19_f19.ADESP_TEST", - "MCC_P1.f19_g16_rx1.A", - "LDSTA.f45_g37_rx1.A", - ) - }, - -} - -_ALL_TESTS.update(_CIME_TESTS) - -############################################################################### -def _get_key_data(raw_dict, key, the_type): -############################################################################### - if key not in raw_dict: - if the_type is tuple: - return () - elif the_type is str: - return None - elif the_type is bool: - return False - else: - expect(False, "Unsupported type {}".format(the_type)) - else: - val = raw_dict[key] - if the_type is tuple and isinstance(val, six.string_types): - val = (val, ) - - expect(isinstance(val, the_type), - "Wrong type for {}, {} is a {} but expected {}".format(key, val, type(val), the_type)) - - return val - -############################################################################### -def get_test_data(suite): -############################################################################### - """ - For a given suite, returns (inherit, time, share, tests) - """ - raw_dict = _ALL_TESTS[suite] - for key in raw_dict.keys(): - expect(key in ["inherit", "time", "share", "tests"], "Unexpected test key '{}'".format(key)) - - return _get_key_data(raw_dict, "inherit", tuple), _get_key_data(raw_dict, "time", str), _get_key_data(raw_dict, "share", bool), _get_key_data(raw_dict, "tests", tuple) - -############################################################################### -def get_test_suites(): -############################################################################### - return list(_ALL_TESTS.keys()) - -############################################################################### -def get_test_suite(suite, machine=None, compiler=None, skip_inherit=False): -############################################################################### - """ - Return a list of FULL test names for a suite. - """ - expect(suite in get_test_suites(), "Unknown test suite: '{}'".format(suite)) - machobj = Machines(machine=machine) - machine = machobj.get_machine_name() - - if(compiler is None): - compiler = machobj.get_default_compiler() - expect(machobj.is_valid_compiler(compiler),"Compiler {} not valid for machine {}".format(compiler,machine)) - - inherits_from, _, _, tests_raw = get_test_data(suite) - tests = [] - for item in tests_raw: - expect(isinstance(item, six.string_types), "Bad type of test {}, expected string".format(item)) - - test_mod = None - test_components = item.split(".") - expect(len(test_components) in [3, 4], "Bad test name {}".format(item)) - - if (len(test_components) == 4): - test_name = ".".join(test_components[:-1]) - test_mod = test_components[-1] - else: - test_name = item - - tests.append(CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler, testmod=test_mod)) - - if not skip_inherit: - for inherits in inherits_from: - inherited_tests = get_test_suite(inherits, machine, compiler) - - for inherited_test in inherited_tests: - if inherited_test not in tests: - tests.append(inherited_test) - - return tests - -############################################################################### -def suite_has_test(suite, test_full_name, skip_inherit=False): -############################################################################### - _, _, _, _, machine, compiler, _ = CIME.utils.parse_test_name(test_full_name) - expect(machine is not None, "{} is not a full test name".format(test_full_name)) - - tests = get_test_suite(suite, machine=machine, compiler=compiler, skip_inherit=skip_inherit) - return test_full_name in tests - -############################################################################### -def get_build_groups(tests): -############################################################################### - """ - Given a list of tests, return a list of lists, with each list representing - a group of tests that can share executables. - - >>> tests = ["SMS_P2.f19_g16_rx1.A.melvin_gnu", "SMS_P4.f19_g16_rx1.A.melvin_gnu", "SMS_P2.f19_g16_rx1.X.melvin_gnu", "SMS_P4.f19_g16_rx1.X.melvin_gnu", "TESTRUNSLOWPASS_P1.f19_g16_rx1.A.melvin_gnu", "TESTRUNSLOWPASS_P1.ne30_g16_rx1.A.melvin_gnu"] - >>> get_build_groups(tests) - [('SMS_P2.f19_g16_rx1.A.melvin_gnu', 'SMS_P4.f19_g16_rx1.A.melvin_gnu'), ('SMS_P2.f19_g16_rx1.X.melvin_gnu', 'SMS_P4.f19_g16_rx1.X.melvin_gnu'), ('TESTRUNSLOWPASS_P1.f19_g16_rx1.A.melvin_gnu',), ('TESTRUNSLOWPASS_P1.ne30_g16_rx1.A.melvin_gnu',)] - """ - build_groups = [] # list of tuples ([tests], set(suites)) - - # Get a list of suites that share exes - suites = get_test_suites() - share_suites = [] - for suite in suites: - share = get_test_data(suite)[2] - if share: - share_suites.append(suite) - - # Divide tests up into build groups. Assumes that build-compatibility is transitive - for test in tests: - matched = False - - my_share_suites = set() - for suite in share_suites: - if suite_has_test(suite, test, skip_inherit=True): - my_share_suites.add(suite) - - # Try to match this test with an existing build group - if my_share_suites: - for build_group_tests, build_group_suites in build_groups: - overlap = build_group_suites & my_share_suites - if overlap: - matched = True - build_group_tests.append(test) - build_group_suites.update(my_share_suites) - break - - # Nothing matched, this test is in a build group of its own - if not matched: - build_groups.append(([test], my_share_suites)) - - return [tuple(item[0]) for item in build_groups] - -############################################################################### -def infer_machine_name_from_tests(testargs): -############################################################################### - """ - >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A.melvin_gnu"]) - 'melvin' - >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A"]) - >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A", "NCK.f19_g16_rx1.A.melvin_gnu"]) - 'melvin' - >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A.melvin_gnu", "NCK.f19_g16_rx1.A.melvin_gnu"]) - 'melvin' - """ - e3sm_test_suites = get_test_suites() - - machine = None - for testarg in testargs: - testarg = testarg.strip() - if testarg.startswith("^"): - testarg = testarg[1:] - - if testarg not in e3sm_test_suites: - machine_for_this_test = parse_test_name(testarg)[4] - if machine_for_this_test is not None: - if machine is None: - machine = machine_for_this_test - else: - expect(machine == machine_for_this_test, "Must have consistent machine '%s' != '%s'" % (machine, machine_for_this_test)) - - return machine - -############################################################################### -def get_full_test_names(testargs, machine, compiler): -############################################################################### - """ - Return full test names in the form: - TESTCASE.GRID.COMPSET.MACHINE_COMPILER.TESTMODS - Testmods are optional - - Testargs can be categories or test names and support the NOT symbol '^' - - >>> get_full_test_names(["cime_tiny"], "melvin", "gnu") - ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu'] - - >>> get_full_test_names(["cime_tiny", "PEA_P1_M.f45_g37_rx1.A"], "melvin", "gnu") - ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu'] - - >>> get_full_test_names(['ERS.f19_g16_rx1.A', 'NCK.f19_g16_rx1.A', 'PEA_P1_M.f45_g37_rx1.A'], "melvin", "gnu") - ['ERS.f19_g16_rx1.A.melvin_gnu', 'NCK.f19_g16_rx1.A.melvin_gnu', 'PEA_P1_M.f45_g37_rx1.A.melvin_gnu'] - - >>> get_full_test_names(["cime_tiny", "^NCK.f19_g16_rx1.A"], "melvin", "gnu") - ['ERS.f19_g16_rx1.A.melvin_gnu'] - - >>> get_full_test_names(["cime_test_multi_inherit"], "melvin", "gnu") - ['TESTBUILDFAILEXC_P1.f19_g16_rx1.A.melvin_gnu', 'TESTBUILDFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTMEMLEAKFAIL_P1.f09_g16.X.melvin_gnu', 'TESTMEMLEAKPASS_P1.f09_g16.X.melvin_gnu', 'TESTRUNDIFF_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNFAILEXC_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.f19_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.f45_g37_rx1.A.melvin_gnu', 'TESTRUNPASS_P1.ne30_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P2.ne30_g16_rx1.A.melvin_gnu', 'TESTRUNPASS_P4.f45_g37_rx1.A.melvin_gnu', 'TESTRUNSTARCFAIL_P1.f19_g16_rx1.A.melvin_gnu', 'TESTTESTDIFF_P1.f19_g16_rx1.A.melvin_gnu'] - """ - expect(machine is not None, "Must define a machine") - expect(compiler is not None, "Must define a compiler") - e3sm_test_suites = get_test_suites() - - tests_to_run = set() - negations = set() - - for testarg in testargs: - # remove any whitespace in name - testarg = testarg.strip() - if (testarg.startswith("^")): - negations.add(testarg[1:]) - elif (testarg in e3sm_test_suites): - tests_to_run.update(get_test_suite(testarg, machine, compiler)) - else: - try: - tests_to_run.add(CIME.utils.get_full_test_name(testarg, machine=machine, compiler=compiler)) - except Exception: - if "." not in testarg: - expect(False, "Unrecognized test suite '{}'".format(testarg)) - else: - raise - - for negation in negations: - if (negation in e3sm_test_suites): - tests_to_run -= set(get_test_suite(negation, machine, compiler)) - else: - fullname = CIME.utils.get_full_test_name(negation, machine=machine, compiler=compiler) - if (fullname in tests_to_run): - tests_to_run.remove(fullname) - - return list(sorted(tests_to_run)) - -############################################################################### -def get_recommended_test_time(test_full_name): -############################################################################### - """ - >>> get_recommended_test_time("ERS.f19_g16_rx1.A.melvin_gnu") - '0:10:00' - - >>> get_recommended_test_time("TESTRUNPASS_P69.f19_g16_rx1.A.melvin_gnu.testmod") - '0:13:00' - - >>> get_recommended_test_time("PET_Ln20.ne30_ne30.FC5.sandiatoss3_intel.cam-outfrq9s") - >>> - """ - best_time = None - suites = get_test_suites() - for suite in suites: - rec_time = get_test_data(suite)[1] - if suite_has_test(suite, test_full_name, skip_inherit=True) and rec_time is not None and \ - (best_time is None or convert_to_seconds(rec_time) < convert_to_seconds(best_time)): - best_time = rec_time - - return best_time - -############################################################################### -def key_test_time(test_full_name): -############################################################################### - result = get_recommended_test_time(test_full_name) - return 99999999 if result is None else convert_to_seconds(result) diff --git a/scripts/lib/jenkins_generic_job.py b/scripts/lib/jenkins_generic_job.py deleted file mode 100644 index 381450d3de2..00000000000 --- a/scripts/lib/jenkins_generic_job.py +++ /dev/null @@ -1,245 +0,0 @@ -import CIME.wait_for_tests -from CIME.utils import expect, run_cmd_no_fail -from CIME.case import Case - -import os, shutil, glob, signal, logging, threading, sys, re, tarfile - -############################################################################## -def cleanup_queue(test_root, test_id): -############################################################################### - """ - Delete all jobs left in the queue - """ - for teststatus_file in glob.iglob("{}/*{}*/TestStatus".format(test_root, test_id)): - case_dir = os.path.dirname(teststatus_file) - with Case(case_dir, read_only=True) as case: - jobmap = case.get_job_info() - jobkills = [] - for jobname, jobid in jobmap.items(): - logging.warning("Found leftover batch job {} ({}) that need to be deleted".format(jobid, jobname)) - jobkills.append(jobid) - - case.cancel_batch_jobs(jobkills) - -############################################################################### -def delete_old_test_data(mach_comp, test_id_root, scratch_root, test_root, run_area, build_area, archive_area, avoid_test_id): -############################################################################### - # Remove old dirs - for clutter_area in [scratch_root, test_root, run_area, build_area, archive_area]: - for old_file in glob.glob("{}/*{}*{}*".format(clutter_area, mach_comp, test_id_root)): - if avoid_test_id not in old_file: - logging.info("TEST ARCHIVER: Removing {}".format(old_file)) - if (os.path.isdir(old_file)): - shutil.rmtree(old_file) - else: - os.remove(old_file) - -############################################################################### -def scan_for_test_ids(old_test_archive, mach_comp, test_id_root): -############################################################################### - results = set([]) - test_id_re = re.compile(".+[.]([^.]+)") - for item in glob.glob("{}/{}/*{}*{}*".format(old_test_archive, "old_cases", mach_comp, test_id_root)): - filename = os.path.basename(item) - the_match = test_id_re.match(filename) - if the_match: - test_id = the_match.groups()[0] - results.add(test_id) - - return list(results) - -############################################################################### -def archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_root, old_test_archive, avoid_test_id): -############################################################################### - - # Remove old cs.status, cs.submit. I don't think there's any value to leaving these around - # or archiving them - for old_cs_file in glob.glob("{}/cs.*".format(scratch_root)): - if avoid_test_id not in old_cs_file: - logging.info("TEST ARCHIVER: Removing {}".format(old_cs_file)) - os.remove(old_cs_file) - - # Remove the old CTest XML, same reason as above - if (os.path.isdir("Testing")): - logging.info("TEST ARCHIVER: Removing {}".format(os.path.join(os.getcwd(), "Testing"))) - shutil.rmtree("Testing") - - if not os.path.exists(old_test_archive): - os.mkdir(old_test_archive) - - # Archive old data by looking at old test cases - for old_case in glob.glob("{}/*{}*{}*".format(test_root, mach_comp, test_id_root)): - if avoid_test_id not in old_case: - logging.info("TEST ARCHIVER: archiving case {}".format(old_case)) - exeroot, rundir, archdir = run_cmd_no_fail("./xmlquery EXEROOT RUNDIR DOUT_S_ROOT --value", from_dir=old_case).split(",") - - for the_dir, target_area in [(exeroot, "old_builds"), (rundir, "old_runs"), (archdir, "old_archives"), (old_case, "old_cases")]: - if os.path.exists(the_dir): - logging.info("TEST ARCHIVER: archiving {} to {}".format(the_dir, os.path.join(old_test_archive, target_area))) - if not os.path.exists(os.path.join(old_test_archive, target_area)): - os.mkdir(os.path.join(old_test_archive, target_area)) - - old_case_name = os.path.basename(old_case) - with tarfile.open(os.path.join(old_test_archive, target_area, "{}.tar.gz".format(old_case_name)), "w:gz") as tfd: - tfd.add(the_dir, arcname=old_case_name) - - shutil.rmtree(the_dir) - - # Remove parent dir if it's empty - parent_dir = os.path.dirname(the_dir) - if not os.listdir(parent_dir) or os.listdir(parent_dir) == ["case2_output_root"]: - shutil.rmtree(parent_dir) - - # Check size of archive - bytes_of_old_test_data = int(run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0]) - bytes_allowed = machine.get_value("MAX_GB_OLD_TEST_DATA") * 1000000000 - if bytes_of_old_test_data > bytes_allowed: - logging.info("TEST ARCHIVER: Too much test data, {}GB (actual) > {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) - old_test_ids = scan_for_test_ids(old_test_archive, mach_comp, test_id_root) - for old_test_id in sorted(old_test_ids): - logging.info("TEST ARCHIVER: Removing old data for test {}".format(old_test_id)) - for item in ["old_cases", "old_builds", "old_runs", "old_archives"]: - for dir_to_rm in glob.glob("{}/{}/*{}*{}*".format(old_test_archive, item, mach_comp, old_test_id)): - logging.info("TEST ARCHIVER: Removing {}".format(dir_to_rm)) - if (os.path.isdir(dir_to_rm)): - shutil.rmtree(dir_to_rm) - else: - os.remove(dir_to_rm) - - bytes_of_old_test_data = int(run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0]) - if bytes_of_old_test_data < bytes_allowed: - break - - else: - logging.info("TEST ARCHIVER: Test data is within accepted bounds, {}GB (actual) < {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) - -############################################################################### -def handle_old_test_data(machine, compiler, test_id_root, scratch_root, test_root, avoid_test_id): -############################################################################### - run_area = os.path.dirname(os.path.dirname(machine.get_value("RUNDIR"))) # Assumes XXX/$CASE/run - build_area = os.path.dirname(os.path.dirname(machine.get_value("EXEROOT"))) # Assumes XXX/$CASE/build - archive_area = os.path.dirname(machine.get_value("DOUT_S_ROOT")) # Assumes XXX/archive/$CASE - old_test_archive = os.path.join(scratch_root, "old_test_archive") - - mach_comp = "{}_{}".format(machine.get_machine_name(), compiler) - - try: - archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_root, old_test_archive, avoid_test_id) - except Exception: - logging.warning("TEST ARCHIVER: Archiving of old test data FAILED: {}\nDeleting data instead".format(sys.exc_info()[1])) - delete_old_test_data(mach_comp, test_id_root, scratch_root, test_root, run_area, build_area, archive_area, avoid_test_id) - -############################################################################### -def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, - baseline_name, - arg_cdash_build_name, cdash_project, - arg_test_suite, - cdash_build_group, baseline_compare, - scratch_root, parallel_jobs, walltime, - machine, compiler, real_baseline_name, baseline_root): -############################################################################### - """ - Return True if all tests passed - """ - use_batch = machine.has_batch_system() and not no_batch - test_suite = machine.get_value("TESTS") - proxy = machine.get_value("PROXY") - test_suite = test_suite if arg_test_suite is None else arg_test_suite - test_root = os.path.join(scratch_root, "J") - - if (use_batch): - batch_system = machine.get_value("BATCH_SYSTEM") - expect(batch_system is not None, "Bad XML. Batch machine has no batch_system configuration.") - - # - # Env changes - # - - if (submit_to_cdash and proxy is not None): - os.environ["http_proxy"] = proxy - - if (not os.path.isdir(scratch_root)): - os.makedirs(scratch_root) - - # Important, need to set up signal handlers before we officially - # kick off tests. We don't want this process getting killed outright - # since it's critical that the cleanup in the finally block gets run - CIME.wait_for_tests.set_up_signal_handlers() - - # - # Clean up leftovers from previous run of jenkins_generic_job. This will - # break the previous run of jenkins_generic_job if it's still running. Set up - # the Jenkins jobs with timeouts to avoid this. - # - - test_id_root = "J{}{}".format(baseline_name.capitalize(), test_suite.replace("e3sm_", "").capitalize()) - test_id = "%s%s" % (test_id_root, CIME.utils.get_timestamp()) - archiver_thread = threading.Thread(target=handle_old_test_data, args=(machine, compiler, test_id_root, scratch_root, test_root, test_id)) - archiver_thread.start() - - # - # Set up create_test command and run it - # - - create_test_args = [test_suite, "--test-root %s" % test_root, "-t %s" % test_id, "--machine %s" % machine.get_machine_name(), "--compiler %s" % compiler] - if (generate_baselines): - create_test_args.append("-g -b " + real_baseline_name) - elif (baseline_compare): - create_test_args.append("-c -b " + real_baseline_name) - - if scratch_root != machine.get_value("CIME_OUTPUT_ROOT"): - create_test_args.append("--output-root=" + scratch_root) - - if no_batch: - create_test_args.append("--no-batch") - - if parallel_jobs is not None: - create_test_args.append("-j {:d}".format(parallel_jobs)) - - if walltime is not None: - create_test_args.append(" --walltime " + walltime) - - if baseline_root is not None: - create_test_args.append(" --baseline-root " + baseline_root) - - create_test_cmd = "./create_test " + " ".join(create_test_args) - - if (not CIME.wait_for_tests.SIGNAL_RECEIVED): - create_test_stat = CIME.utils.run_cmd(create_test_cmd, from_dir=CIME.utils.get_scripts_root(), - verbose=True, arg_stdout=None, arg_stderr=None)[0] - # Create_test should have either passed, detected failing tests, or timed out - expect(create_test_stat in [0, CIME.utils.TESTS_FAILED_ERR_CODE, -signal.SIGTERM], - "Create_test script FAILED with error code '{:d}'!".format(create_test_stat)) - - # - # Wait for tests - # - - if (submit_to_cdash): - cdash_build_name = "_".join([test_suite, baseline_name, compiler]) if arg_cdash_build_name is None else arg_cdash_build_name - else: - cdash_build_name = None - - os.environ["CIME_MACHINE"] = machine.get_machine_name() - - if submit_to_cdash: - logging.info("To resubmit to dashboard: wait_for_tests {}/*{}/TestStatus --no-wait -b {}".format(test_root, test_id, cdash_build_name)) - - tests_passed = CIME.wait_for_tests.wait_for_tests(glob.glob("{}/*{}/TestStatus".format(test_root, test_id)), - no_wait=not use_batch, # wait if using queue - check_throughput=False, # don't check throughput - check_memory=False, # don't check memory - ignore_namelists=False, # don't ignore namelist diffs - cdash_build_name=cdash_build_name, - cdash_project=cdash_project, - cdash_build_group=cdash_build_group) - - logging.info("TEST ARCHIVER: Waiting for archiver thread") - archiver_thread.join() - logging.info("TEST ARCHIVER: Waiting for archiver finished") - - if use_batch and CIME.wait_for_tests.SIGNAL_RECEIVED: - # Cleanup - cleanup_queue(test_root, test_id) - - return tests_passed diff --git a/scripts/lib/six.py b/scripts/lib/six.py deleted file mode 100644 index a0297d7113d..00000000000 --- a/scripts/lib/six.py +++ /dev/null @@ -1,890 +0,0 @@ -# Copyright (c) 2010-2017 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -"""Utilities for writing code that runs on Python 2 and 3""" - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson " -__version__ = "1.11.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("getoutput", "commands", "subprocess"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("splitvalue", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), - MovedAttribute("parse_http_list", "urllib2", "urllib.request"), - MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - try: - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - finally: - value = None - tb = None - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - try: - raise tp, value, tb - finally: - tb = None -""") - - -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - try: - if from_value is None: - raise value - raise value from from_value - finally: - value = None -""") -elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - try: - raise value from from_value - finally: - value = None -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(type): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - - @classmethod - def __prepare__(cls, name, this_bases): - return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def python_2_unicode_compatible(klass): - """ - A decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/scripts/lib/six_additions.py b/scripts/lib/six_additions.py deleted file mode 100644 index 00743992563..00000000000 --- a/scripts/lib/six_additions.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Additions to the six library needed for python 2/3 compatibility""" - -import six - -if six.PY3: - # This is only available in python3.2 and later, so this code won't - # run with python3 versions prior to 3.2 - _assertNotRegex = "assertNotRegex" -else: - _assertNotRegex = "assertNotRegexpMatches" - -def assertNotRegex(self, *args, **kwargs): - return getattr(self, _assertNotRegex)(*args, **kwargs) diff --git a/scripts/query_config b/scripts/query_config index 5c62f3d2bdb..8438430d5e8 100755 --- a/scripts/query_config +++ b/scripts/query_config @@ -1,370 +1,19 @@ -#!/usr/bin/env python -""" -Displays information about available compsets, component settings, grids and/or -machines. Typically run with one of the arguments --compsets, --settings, ---grids or --machines; if you specify more than one of these arguments, -information will be listed for each. -""" +#!/usr/bin/env python3 -from Tools.standard_script_setup import * -import re -from CIME.utils import expect -from CIME.XML.files import Files -from CIME.XML.component import Component -from CIME.XML.compsets import Compsets -from CIME.XML.grids import Grids -#from CIME.XML.machines import Machines -import CIME.XML.machines -from argparse import RawTextHelpFormatter +import os +import sys -logger = logging.getLogger(__name__) -supported_comp_interfaces = ["mct", "nuopc", "moab"] +real_file_dir = os.path.dirname(os.path.realpath(__file__)) +cimeroot = os.path.abspath(os.path.join(real_file_dir, "..")) +# Set tools path as well so external scripts can import either +# CIME.Tools.standard_script_setup or just standard_script_setup +tools_path = os.path.join(cimeroot, "CIME", "Tools") -def query_grids(files, long_output): - """ - query all grids. - """ - config_file = files.get_value("GRIDS_SPEC_FILE") - expect(os.path.isfile(config_file), - "Cannot find config_file {} on disk".format(config_file)) +sys.path.insert(0, cimeroot) +sys.path.insert(1, tools_path) - grids = Grids(config_file) - if long_output: - grids.print_values(long_output=long_output) - else: - grids.print_values() +from CIME.scripts.query_config import _main_func -def query_machines(files, machine_name='all'): - """ - query machines. Defaule: all - """ - config_file = files.get_value("MACHINES_SPEC_FILE") - expect(os.path.isfile(config_file), - "Cannot find config_file {} on disk".format(config_file)) - # Provide a special machine name indicating no need for a machine name - machines = Machines(config_file, machine="Query") - machines.print_values(machine_name=machine_name) -def query_compsets(files, name): - """ - query compset definition give a compset name - """ - # Determine valid component values by checking the value attributes for COMPSETS_SPEC_FILE - components = get_compsets(files) - match_found = None - all_components = False - if re.search("^all$", name): # print all compsets - match_found = name - all_components = True - else: - for component in components: - if component == name: - match_found = name - break - - # If name is not a valid argument - exit with error - expect(match_found is not None, - "Invalid input argument {}, valid input arguments are {}".format(name, components)) - - if all_components: # print all compsets - for component in components: - # the all_components flag will only print available components - print_compset(component, files, all_components=all_components) - else: - print_compset(name, files) - -def print_compset(name, files, all_components=False): - """ - print compsets associated with the component name, but if all_components is true only - print the details if the associated component is available - """ - - # Determine the config_file for the target component - config_file = files.get_value("COMPSETS_SPEC_FILE", attribute={"component":name}) - # only error out if we aren't printing all otherwise exit quitely - if not all_components: - expect((config_file), - "Cannot find any config_component.xml file for {}".format(name)) - - # Check that file exists on disk - expect(os.path.isfile(config_file), - "Cannot find config_file {} on disk".format(config_file)) - elif config_file is None or not os.path.isfile(config_file): - return - - print("\nActive component: {}".format(name)) - # Now parse the compsets file and write out the compset alias and longname as well as the help text - # determine component xml content - compsets = Compsets(config_file) - # print compsets associated with component without help text - compsets.print_values(arg_help=False) - -def query_all_components(files): - """ - query all components - """ - components = get_components(files) - # Loop through the elements for each component class (in config_files.xml) - for comp in components: - string = "CONFIG_{}_FILE".format(comp) - - # determine all components in string - components = files.get_components(string) - for item in components: - query_component(item, files, all_components=True) - -def query_component(name, files, all_components=False): - """ - query a component by name - """ - # Determine the valid component classes (e.g. atm) for the driver/cpl - # These are then stored in comps_array - components = get_components(files) - - # Loop through the elements for each component class (in config_files.xml) - # and see if there is a match for the the target component in the component attribute - match_found = False - valid_components = [] - config_exists = False - for comp in components: - string = "CONFIG_{}_FILE".format(comp) - config_file = None - # determine all components in string - root_dir_node_name = "COMP_ROOT_DIR_{}".format(comp) - components = files.get_components(root_dir_node_name) - if components is None: - components = files.get_components(string) - for item in components: - valid_components.append(item) - logger.debug ("{}: valid_components {}".format(comp, valid_components)) - # determine if config_file is on disk - if name is None: - config_file = files.get_value(string) - elif name in valid_components: - config_file = files.get_value(string, attribute={"component":name}) - logger.debug("query {}".format(config_file)) - if config_file is not None: - match_found = True - config_exists = os.path.isfile(config_file) - break - - if not all_components and not config_exists: - expect(config_exists, - "Cannot find config_file {} on disk".format(config_file)) - elif all_components and not config_exists: - print("WARNING: Couldn't find config_file {} on disk".format(config_file)) - return - # If name is not a valid argument - exit with error - expect(match_found, - "Invalid input argument {}, valid input arguments are {}".format(name, valid_components)) - - # Check that file exists on disk, if not exit with error - expect((config_file), - "Cannot find any config_component.xml file for {}".format(name)) - - # determine component xml content - component = Component(config_file, "CPL") - component.print_values() - -def parse_command_line(args, description): - """ - parse command line arguments - """ - cime_model = CIME.utils.get_model() - - parser = ArgumentParser(description=description, - formatter_class=RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - valid_components = ['all'] - files = {} - for comp_interface in supported_comp_interfaces: - files[comp_interface] = Files(comp_interface=comp_interface) - components = files[comp_interface].get_components("COMPSETS_SPEC_FILE") - for item in components: - valid_components.append(item) - - parser.add_argument("--compsets", nargs='?', const='all', choices=valid_components, - help="Query compsets corresponding to the target component for the {} model." - " If no component is given, lists compsets defined by all components".format(cime_model)) - - # Loop through the elements for each component class (in config_files.xml) - valid_components = ['all'] - tmp_comp_interfaces = supported_comp_interfaces - for comp_interface in tmp_comp_interfaces: - try: - components = get_components(files[comp_interface]) - except Exception: - supported_comp_interfaces.remove(comp_interface) - - for comp in components: - if cime_model == "cesm": - string = "COMP_ROOT_DIR_{}".format(comp) - else: - string = "CONFIG_{}_FILE".format(comp) - - # determine all components in string - components = files[comp_interface].get_components(string) - if components: - for item in components: - valid_components.append(item) - - parser.add_argument("--components", nargs='?', const='all', choices=valid_components, - help="Query component settings corresponding to the target component for {} model." - "\nIf the option is empty, then the lists settings defined by all components is output".format(cime_model)) - - parser.add_argument("--grids", action="store_true", - help="Query supported model grids for {} model.".format(cime_model)) - # same for all comp_interfaces - config_file = files['mct'].get_value("MACHINES_SPEC_FILE") - expect(os.path.isfile(config_file), - "Cannot find config_file {} on disk".format(config_file)) - machines = Machines(config_file, machine="Query") - machine_names = ['all', 'current'] - machine_names.extend(machines.list_available_machines()) - - parser.add_argument("--machines", nargs='?', const='all', choices=machine_names, - help="Query supported machines for {} model." - "\nIf option is left empty then all machines are listed," - "\nIf the option is 'current' then only the current machine details are listed.".format(cime_model)) - - parser.add_argument("--long", action="store_true", - help="Provide long output for queries") - - parser.add_argument("--comp_interface", choices=supported_comp_interfaces, - default='mct', - help="Coupler/Driver interface") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - # make sure at least one argument has been passed - if not (args.grids or args.compsets or args.components or args.machines): - parser.print_help(sys.stderr) - - return args.grids, args.compsets, args.components, args.machines, \ - args.long, files[args.comp_interface] - -def get_compsets(files): - """ - Determine valid component values by checking the value attributes for COMPSETS_SPEC_FILE - """ - return files.get_components("COMPSETS_SPEC_FILE") - -def get_components(files): - """ - Determine the valid component classes (e.g. atm) for the driver/cpl - These are then stored in comps_array - """ - infile = files.get_value("CONFIG_CPL_FILE") - config_drv = Component(infile, "CPL") - return config_drv.get_valid_model_components() - -class ArgumentParser(argparse.ArgumentParser): - """ - we override the error message from ArgumentParser to have a more helpful - message in the case of missing arguments - """ - def error(self, message): - self.print_usage(sys.stderr) - # missing argument - # TODO: assumes comp_interface='mct' - if "expected one argument" in message: - if "compset" in message: - components = get_compsets(Files(comp_interface='mct')) - self.exit(2, '{}: error: {}\nValid input arguments are {}\n' - .format(self.prog, message, components)) - elif "component" in message: - files = Files(comp_interface='mct') - components = get_components(files) - # Loop through the elements for each component class (in config_files.xml) - valid_components = [] - for comp in components: - string = "CONFIG_{}_FILE".format(comp) - - # determine all components in string - components = files.get_components(string) - for item in components: - valid_components.append(item) - self.exit(2, '{}: error: {}\nValid input arguments are {}\n' - .format(self.prog, message, valid_components)) - # for all other errors - self.exit(2, '{}: error: {}\n'.format(self.prog, message)) - -class Machines(CIME.XML.machines.Machines): - """ - we overide print_values from Machines to add current in machine description - """ - def print_values(self, machine_name='all'): # pylint: disable=arguments-differ - # set flag to look for single machine - if 'all' not in machine_name: - single_machine = True - if machine_name == 'current': - machine_name = self.probe_machine_name(warn=False) - else: - single_machine = False - - # if we can't find the specified machine - if single_machine and machine_name is None: - files = Files() - config_file = files.get_value("MACHINES_SPEC_FILE") - print("Machine is not listed in config file: {}".format(config_file)) - else: # write out machines - if single_machine: - machine_names = [machine_name] - else: - machine_names = self.list_available_machines() - print("Machine(s)\n") - for name in machine_names: - self.set_machine(name) - desc = self.text(self.get_child("DESC")) - os_ = self.text(self.get_child("OS")) - compilers = self.text(self.get_child("COMPILERS")) - mpilibnodes = self.get_children("MPILIBS", root=self.machine_node) - mpilibs = [] - for node in mpilibnodes: - mpilibs.extend(self.text(node).split(',')) - # This does not include the possible depedancy of mpilib on compiler - # it simply provides a list of mpilibs available on the machine - mpilibs = list(set(mpilibs)) - max_tasks_per_node = self.text(self.get_child("MAX_TASKS_PER_NODE")) - mpitasks_node = self.get_optional_child("MAX_MPITASKS_PER_NODE") - max_mpitasks_per_node = self.text(mpitasks_node) if mpitasks_node else max_tasks_per_node - - current_machine = self.probe_machine_name(warn=False) - name += " (current)" if current_machine and current_machine in name else "" - print(" {} : {} ".format(name, desc)) - print(" os ", os_) - print(" compilers ",compilers) - print(" mpilibs ",mpilibs) - if max_mpitasks_per_node is not None: - print(" pes/node ",max_mpitasks_per_node) - if max_tasks_per_node is not None: - print(" max_tasks/node ",max_tasks_per_node) - print('') - -def _main_func(description): - """ - main function - """ - grids, compsets, components, machines, long_output, files = parse_command_line(sys.argv, description) - - - if grids: - query_grids(files, long_output) - - if compsets is not None: - query_compsets(files, name=compsets) - - if components is not None: - if re.search("^all$", components): # print all compsets - query_all_components(files) - else: - query_component(components, files) - - if machines is not None: - query_machines(files, machine_name=machines) - -# main entry point if __name__ == "__main__": - _main_func(__doc__) + _main_func() diff --git a/scripts/query_testlists b/scripts/query_testlists index cb1b67496f0..6ea16e3cada 100755 --- a/scripts/query_testlists +++ b/scripts/query_testlists @@ -1,220 +1,19 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -""" -Script to query xml test lists, displaying all tests in human-readable form. +import os +import sys -Usage: - ./query_testlists [--show-options] [--define-testtypes] - Display a list of tests - ./query_testlists --count - Count tests by category/machine/compiler - ./query_testlists --list {category,categories,machine,machines,compiler,compilers} - List the available options for --xml-category, --xml-machine, or --xml-compiler +real_file_dir = os.path.dirname(os.path.realpath(__file__)) +cimeroot = os.path.abspath(os.path.join(real_file_dir, "..")) +# Set tools path as well so external scripts can import either +# CIME.Tools.standard_script_setup or just standard_script_setup +tools_path = os.path.join(cimeroot, "CIME", "Tools") - All of the above support the various --xml-* arguments for subsetting which tests are included. -""" +sys.path.insert(0, cimeroot) +sys.path.insert(1, tools_path) -from Tools.standard_script_setup import * -from CIME.test_utils import get_tests_from_xml, test_to_string -from CIME.XML.tests import Tests -from CIME.utils import expect +from CIME.scripts.query_testlists import _main_func -logger = logging.getLogger(__name__) - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--count", action="store_true", - help="Rather than listing tests, just give counts by category/machine/compiler.") - - parser.add_argument("--list", dest='list_type', - choices = ['category', 'categories', - 'machine', 'machines', - 'compiler', 'compilers'], - help="Rather than listing tests, list the available options for\n" - "--xml-category, --xml-machine, or --xml-compiler.\n" - "(The singular and plural forms are equivalent - so '--list category'\n" - "is equivalent to '--list categories', etc.)") - - parser.add_argument("--show-options", action="store_true", - help="For each test, also show options for that test\n" - "(wallclock time, memory leak tolerance, etc.).\n" - "(Has no effect with --list or --count options.)") - - parser.add_argument("--define-testtypes", action="store_true", - help="At the top of the list of tests, define all of the possible test types.\n" - "(Has no effect with --list or --count options.)") - - parser.add_argument("--xml-category", - help="Only include tests in this category; default is all categories.") - - parser.add_argument("--xml-machine", - help="Only include tests for this machine; default is all machines.") - - parser.add_argument("--xml-compiler", - help="Only include tests for this compiler; default is all compilers.") - - parser.add_argument("--xml-testlist", - help="Path to testlist file from which tests are gathered;\n" - "default is all files specified in config_files.xml.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - _check_argument_compatibility(args) - - if args.list_type: - _process_list_type(args) - - return args - -############################################################################### -def _check_argument_compatibility(args): -############################################################################### - """Ensures there are no incompatible arguments - - If incompatible arguments are found, aborts with a helpful error - message. - """ - - expect(not(args.count and args.list_type), - "Cannot specify both --count and --list arguments.") - - if args.count: - expect(not args.show_options, - "--show-options is incompatible with --count") - expect(not args.define_testtypes, - "--define-testtypes is incompatible with --count") - - if args.list_type: - expect(not args.show_options, - "--show-options is incompatible with --list") - expect(not args.define_testtypes, - "--define-testtypes is incompatible with --list") - -############################################################################### -def _process_list_type(args): -############################################################################### - """Convert args.list_type into a name that matches one of the keys of the - test data dictionaries - - Args: - args: object containing list_type string attribute - """ - - if args.list_type == 'categories': - args.list_type = 'category' - elif args.list_type == 'machines': - args.list_type = 'machine' - elif args.list_type == 'compilers': - args.list_type = 'compiler' - -############################################################################### -def print_test_data(test_data, show_options, define_testtypes): -############################################################################### - """ - Args: - test_data (dict): dictionary of test data, containing at least these keys: - - name: full test name - - category: test category - """ - - if define_testtypes: - print("#"*72) - print("Test types") - print("----------") - test_definitions = Tests() - test_definitions.print_values(skip_infrastructure_tests=True) - print("#"*72) - - categories = sorted(set([item['category'] for item in test_data])) - max_category_len = max([len(category) for category in categories]) - max_test_len = max([len(item['name']) for item in test_data]) - for category in categories: - test_subset = [one_test for one_test in test_data if - one_test['category'] == category] - for one_test in test_subset: - print(test_to_string( - test = one_test, - category_field_width = max_category_len, - test_field_width = max_test_len, - show_options = show_options)) - -############################################################################### -def count_test_data(test_data): -############################################################################### - """ - Args: - test_data (dict): dictionary of test data, containing at least these keys: - - name: full test name - - category: test category - - machine - - compiler - """ - - tab_stop = ' '*4 - - categories = sorted(set([item['category'] for item in test_data])) - for category in categories: - tests_this_category = [one_test for one_test in test_data if - one_test['category'] == category] - print("%s: %d"%(category, len(tests_this_category))) - - machines = sorted(set([item['machine'] for item in tests_this_category])) - for machine in machines: - tests_this_machine = [one_test for one_test in tests_this_category if - one_test['machine'] == machine] - print("%s%s: %d"%(tab_stop, machine, len(tests_this_machine))) - - compilers = sorted(set([item['compiler'] for item in tests_this_machine])) - for compiler in compilers: - tests_this_compiler = [one_test for one_test in tests_this_machine if - one_test['compiler'] == compiler] - print("%s%s: %d"%(tab_stop*2, compiler, len(tests_this_compiler))) - -############################################################################### -def list_test_data(test_data, list_type): -############################################################################### - """List categories, machines or compilers - - Args: - test_data (dict): dictionary of test data, containing at least these keys: - - category - - machine - - compiler - list_type (str): one of 'category', 'machine' or 'compiler' - """ - - items = sorted(set([one_test[list_type] for one_test in test_data])) - for item in items: - print(item) - -############################################################################### -def _main_func(description): -############################################################################### - args = parse_command_line(sys.argv, description) - - test_data = get_tests_from_xml( - xml_machine = args.xml_machine, - xml_category = args.xml_category, - xml_compiler = args.xml_compiler, - xml_testlist = args.xml_testlist) - - expect(test_data, "No tests found with the following options (where 'None' means no subsetting on that attribute):\n" - "\tMachine = %s\n\tCategory = %s\n\tCompiler = %s\n\tTestlist = %s"% - (args.xml_machine, args.xml_category, args.xml_compiler, args.xml_testlist)) - - if args.count: - count_test_data(test_data) - elif args.list_type: - list_test_data(test_data, args.list_type) - else: - print_test_data(test_data, args.show_options, args.define_testtypes) if __name__ == "__main__": - _main_func(__doc__) + _main_func() diff --git a/scripts/tests/CMakeLists.txt b/scripts/tests/CMakeLists.txt index 235e29c71dd..06b01082d87 100644 --- a/scripts/tests/CMakeLists.txt +++ b/scripts/tests/CMakeLists.txt @@ -2,21 +2,21 @@ # Environment variables CIME_COMPILER and CIME_MPILIB # can be used to send --compiler and --mpilib settings to scripts_regression_tests.py # - +PROJECT(cime) cmake_minimum_required(VERSION 2.8) include(CTest) if (DEFINED ENV{PYTHON}) set(PYTHON $ENV{PYTHON}) else() - set(PYTHON "python") + set(PYTHON "python3") endif() execute_process(COMMAND ${PYTHON} "--version" OUTPUT_VARIABLE PY_VER OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE) -MESSAGE("Python version is " ${PY_VER}) +MESSAGE("Python version is " ${PY_VER}) execute_process(COMMAND ${PYTHON} "list_tests" OUTPUT_VARIABLE STR_TESTS WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE @@ -35,6 +35,6 @@ separate_arguments(ARG_LIST UNIX_COMMAND ${args}) foreach(ATEST ${TEST_LIST}) # this assignment prevents quotes being added to testname in add_test set(fulltest "${ATEST}") - add_test(NAME ${ATEST} COMMAND ./scripts_regression_tests.py -v ${fulltest} ${ARG_LIST} + add_test(NAME ${ATEST} COMMAND ../../CIME/tests/scripts_regression_tests.py -v ${fulltest} ${ARG_LIST} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endforeach(ATEST) diff --git a/scripts/tests/CTestConfig.cmake b/scripts/tests/CTestConfig.cmake index b84eabd2132..fe305ffb492 100644 --- a/scripts/tests/CTestConfig.cmake +++ b/scripts/tests/CTestConfig.cmake @@ -2,12 +2,17 @@ # Environment variables CIME_COMPILER and CIME_MPILIB # can be used to send --compiler and --mpilib settings to scripts_regression_tests.py # - +set(CTEST_SITE "$CIME_MACHINE $CIME_COMPILER") set(CTEST_PROJECT_NAME "CIME") string(TIMESTAMP CURRTIME "%H:%M:%S" UTC) set(CTEST_NIGHTLY_START_TIME "${CURRTIME} UTC") -set(CTEST_DROP_METHOD "http") +set(CTEST_DROP_METHOD "https") +set ( + CTEST_CURL_OPTIONS + "CURLOPT_SSL_VERIFYPEER_OFF" + "CURLOPT_SSL_VERIFYHOST_OFF" + ) set(CTEST_DROP_SITE "my.cdash.org") set(CTEST_DROP_LOCATION "/submit.php?project=CIME") set(CTEST_DROP_SITE_CDASH TRUE) @@ -28,4 +33,10 @@ else() set(mpilib "default") endif() -set(BUILDNAME "scripts_regression_${shell}_${compiler}_${mpilib}") +if (DEFINED ENV{CIME_DRIVER}) + set(driver $ENV{CIME_DRIVER}) +else() + set(driver "mct") +endif() + +set(BUILDNAME "scripts_regression_${shell}_${compiler}_${mpilib}_${driver}") diff --git a/scripts/tests/list_tests b/scripts/tests/list_tests index 01b08116753..7db4576e733 100755 --- a/scripts/tests/list_tests +++ b/scripts/tests/list_tests @@ -1,29 +1,16 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # This script will print the list of test classes in # scripts_regression_tests.py # + import unittest -DEBUG = False -#pylint: disable=protected-access +# pylint: disable=protected-access def list_tests_from(): - loader = unittest.TestLoader() - suite = loader.discover(".", pattern="scripts_regression_tests.py") - test_classes = [] - for alltests in suite: - tests = alltests._tests - if len(tests): - for atest in tests: - if DEBUG: - print(atest) - for btest in atest._tests: - btestname = btest.__str__().split() - test_classes.append(btestname[1][1:-1].split('.')[1]) - # add this explicitly, not captured by the above - test_classes.append("B_CheckCode") - for ctest in sorted(list(set(test_classes))): - print(ctest) + suite = unittest.defaultTestLoader.discover("../../CIME/tests") + for test in suite: + print(test._tests[0]._testMethodName) if __name__ == "__main__": # Include the directories diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py deleted file mode 100755 index 4a0a7e4b82d..00000000000 --- a/scripts/tests/scripts_regression_tests.py +++ /dev/null @@ -1,3413 +0,0 @@ -#!/usr/bin/env python - -""" -Script containing CIME python regression test suite. This suite should be run -to confirm overall CIME correctness. -""" - -import glob, os, re, shutil, signal, sys, tempfile, \ - threading, time, logging, unittest, getpass, \ - filecmp, time - -from xml.etree.ElementTree import ParseError - -LIB_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","lib") -sys.path.append(LIB_DIR) -# Remove all pyc files to ensure we're testing the right things -import subprocess, argparse -subprocess.call('/bin/rm -f $(find . -name "*.pyc")', shell=True, cwd=LIB_DIR) -import six -from six import assertRaisesRegex -import stat as osstat - -import collections - -from CIME.utils import run_cmd, run_cmd_no_fail, get_lids, get_current_commit, safe_copy, CIMEError, get_cime_root -import get_tests -import CIME.test_scheduler, CIME.wait_for_tests -from CIME.test_scheduler import TestScheduler -from CIME.XML.compilers import Compilers -from CIME.XML.env_run import EnvRun -from CIME.XML.machines import Machines -from CIME.XML.files import Files -from CIME.case import Case -from CIME.code_checker import check_code, get_all_checkable_files -from CIME.test_status import * - -SCRIPT_DIR = CIME.utils.get_scripts_root() -TOOLS_DIR = os.path.join(SCRIPT_DIR,"Tools") -TEST_COMPILER = None -GLOBAL_TIMEOUT = None -TEST_MPILIB = None -MACHINE = None -FAST_ONLY = False -NO_BATCH = False -NO_CMAKE = False -TEST_ROOT = None -NO_TEARDOWN = False - -os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00" - -# pragma pylint: disable=protected-access -############################################################################### -def run_cmd_assert_result(test_obj, cmd, from_dir=None, expected_stat=0, env=None, verbose=False): -############################################################################### - from_dir = os.getcwd() if from_dir is None else from_dir - stat, output, errput = run_cmd(cmd, from_dir=from_dir, env=env, verbose=verbose) - if expected_stat == 0: - expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat - else: - expectation = "EXPECTED STAT %s, INSTEAD GOT STAT %s" % (expected_stat, stat) - msg = \ -""" -COMMAND: %s -FROM_DIR: %s -%s -OUTPUT: %s -ERRPUT: %s -""" % (cmd, from_dir, expectation, output, errput) - test_obj.assertEqual(stat, expected_stat, msg=msg) - - return output - -############################################################################### -def assert_test_status(test_obj, test_name, test_status_obj, test_phase, expected_stat): -############################################################################### - test_status = test_status_obj.get_status(test_phase) - test_obj.assertEqual(test_status, expected_stat, msg="Problem with {}: for phase '{}': has status '{}', expected '{}'".format(test_name, test_phase, test_status, expected_stat)) - -############################################################################### -def verify_perms(test_obj, root_dir): -############################################################################### - for root, dirs, files in os.walk(root_dir): - - for filename in files: - full_path = os.path.join(root, filename) - st = os.stat(full_path) - test_obj.assertTrue(st.st_mode & osstat.S_IWGRP, msg="file {} is not group writeable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IRGRP, msg="file {} is not group readable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IROTH, msg="file {} is not world readable".format(full_path)) - - for dirname in dirs: - full_path = os.path.join(root, dirname) - st = os.stat(full_path) - - test_obj.assertTrue(st.st_mode & osstat.S_IWGRP, msg="dir {} is not group writable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IRGRP, msg="dir {} is not group readable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IXGRP, msg="dir {} is not group executable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IROTH, msg="dir {} is not world readable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IXOTH, msg="dir {} is not world executable".format(full_path)) - -############################################################################### -class A_RunUnitTests(unittest.TestCase): -############################################################################### - - def test_resolve_variable_name(self): - files = Files() - machinefile = files.get_value("MACHINES_SPEC_FILE") - self.assertTrue(os.path.isfile(machinefile), - msg="Path did not resolve to existing file %s" % machinefile) - - def test_unittests(self): - # Finds all files contained in CIME/tests or its subdirectories that - # match the pattern 'test*.py', and runs the unit tests found there - # (i.e., tests defined using python's unittest module). - # - # This is analogous to running: - # python -m unittest discover -s CIME/tests -t . - # from cime/scripts/lib - # - # Yes, that means we have a bunch of unit tests run from this one unit - # test. - - testsuite = unittest.defaultTestLoader.discover( - start_dir = os.path.join(LIB_DIR,"CIME","tests"), - pattern = 'test*.py', - top_level_dir = LIB_DIR) - - testrunner = unittest.TextTestRunner(buffer=False) - - # Disable logging; otherwise log messages written by code under test - # clutter the unit test output - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - results = testrunner.run(testsuite) - finally: - logging.getLogger().setLevel(log_lvl) - - self.assertTrue(results.wasSuccessful()) - - def test_lib_doctests(self): - # Find and run all the doctests in the lib directory tree - skip_list = ["six.py", "CIME/SystemTests/mvk.py", "CIME/SystemTests/pgn.py"] - for root, _, files in os.walk(LIB_DIR): - for file_ in files: - filepath = os.path.join(root, file_)[len(LIB_DIR)+1:] - if filepath.endswith(".py") and filepath not in skip_list: - with open(os.path.join(root, file_)) as fd: - content = fd.read() - if '>>>' in content: - print("Running doctests for {}".format(filepath)) - run_cmd_assert_result(self, 'PYTHONPATH={}:$PYTHONPATH python -m doctest {} 2>&1'.format(LIB_DIR, filepath), from_dir=LIB_DIR) - else: - print("{} has no doctests".format(filepath)) - -############################################################################### -def make_fake_teststatus(path, testname, status, phase): -############################################################################### - expect(phase in CORE_PHASES, "Bad phase '%s'" % phase) - with TestStatus(test_dir=path, test_name=testname) as ts: - for core_phase in CORE_PHASES: - if core_phase == phase: - ts.set_status(core_phase, status, comments=("time=42" if phase == RUN_PHASE else "")) - break - else: - ts.set_status(core_phase, TEST_PASS_STATUS, comments=("time=42" if phase == RUN_PHASE else "")) - -############################################################################### -def parse_test_status(line): -############################################################################### - regex = re.compile(r"Test '(\w+)' finished with status '(\w+)'") - m = regex.match(line) - return m.groups() - -############################################################################### -def kill_subprocesses(name=None, sig=signal.SIGKILL, expected_num_killed=None, tester=None): -############################################################################### - # Kill all subprocesses - proc_ids = CIME.utils.find_proc_id(proc_name=name, children_only=True) - if (expected_num_killed is not None): - tester.assertEqual(len(proc_ids), expected_num_killed, - msg="Expected to find %d processes to kill, found %d" % (expected_num_killed, len(proc_ids))) - for proc_id in proc_ids: - try: - os.kill(proc_id, sig) - except OSError: - pass - -############################################################################### -def kill_python_subprocesses(sig=signal.SIGKILL, expected_num_killed=None, tester=None): -############################################################################### - kill_subprocesses("[Pp]ython", sig, expected_num_killed, tester) - -########################################################################### -def assert_dashboard_has_build(tester, build_name, expected_count=1): -########################################################################### - # Do not test E3SM dashboard if model is CESM - if CIME.utils.get_model() == "e3sm": - time.sleep(10) # Give chance for cdash to update - - wget_file = tempfile.mktemp() - - run_cmd_no_fail("wget https://my.cdash.org/api/v1/index.php?project=ACME_test --no-check-certificate -O %s" % wget_file) - - raw_text = open(wget_file, "r").read() - os.remove(wget_file) - - num_found = raw_text.count(build_name) - tester.assertEqual(num_found, expected_count, - msg="Dashboard did not have expected num occurances of build name '%s'. Expected %s, found %s" % (build_name, expected_count, num_found)) - -############################################################################### -def setup_proxy(): -############################################################################### - if ("http_proxy" not in os.environ): - proxy = MACHINE.get_value("PROXY") - if (proxy is not None): - os.environ["http_proxy"] = proxy - return True - - return False - -############################################################################### -class N_TestUnitTest(unittest.TestCase): -############################################################################### - @classmethod - def setUpClass(cls): - cls._do_teardown = [] - cls._testroot = os.path.join(TEST_ROOT, 'TestUnitTests') - cls._testdirs = [] - - def _has_unit_test_support(self): - if TEST_COMPILER is None: - default_compiler = MACHINE.get_default_compiler() - compiler = Compilers(MACHINE, compiler=default_compiler) - else: - compiler = Compilers(MACHINE, compiler=TEST_COMPILER) - attrs = {'MPILIB': 'mpi-serial', 'compile_threaded': 'FALSE'} - pfunit_path = compiler.get_optional_compiler_node("PFUNIT_PATH", - attributes=attrs) - if pfunit_path is None: - return False - else: - return True - - def test_a_unit_test(self): - cls = self.__class__ - if not self._has_unit_test_support(): - self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine") - test_dir = os.path.join(cls._testroot,"unit_tester_test") - cls._testdirs.append(test_dir) - os.makedirs(test_dir) - unit_test_tool = os.path.abspath(os.path.join(get_cime_root(),"scripts","fortran_unit_testing","run_tests.py")) - test_spec_dir = os.path.join(os.path.dirname(unit_test_tool),"Examples", "interpolate_1d", "tests") - args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir) - args += " --machine {}".format(MACHINE.get_machine_name()) - run_cmd_no_fail("{} {}".format(unit_test_tool, args)) - cls._do_teardown.append(test_dir) - - def test_b_cime_f90_unit_tests(self): - cls = self.__class__ - if (FAST_ONLY): - self.skipTest("Skipping slow test") - - if not self._has_unit_test_support(): - self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine") - - test_dir = os.path.join(cls._testroot,"driver_f90_tests") - cls._testdirs.append(test_dir) - os.makedirs(test_dir) - test_spec_dir = get_cime_root() - unit_test_tool = os.path.abspath(os.path.join(test_spec_dir,"scripts","fortran_unit_testing","run_tests.py")) - args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir) - args += " --machine {}".format(MACHINE.get_machine_name()) - run_cmd_no_fail("{} {}".format(unit_test_tool, args)) - cls._do_teardown.append(test_dir) - - @classmethod - def tearDownClass(cls): - do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN - - teardown_root = True - for tfile in cls._testdirs: - if tfile not in cls._do_teardown: - print("Detected failed test or user request no teardown") - print("Leaving case directory : %s"%tfile) - teardown_root = False - elif do_teardown: - shutil.rmtree(tfile) - - if teardown_root and do_teardown: - shutil.rmtree(cls._testroot) - -############################################################################### -class J_TestCreateNewcase(unittest.TestCase): -############################################################################### - @classmethod - def setUpClass(cls): - cls._testdirs = [] - cls._do_teardown = [] - cls._testroot = os.path.join(TEST_ROOT, 'TestCreateNewcase') - - def test_a_createnewcase(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testcreatenewcase') - if os.path.exists(testdir): - shutil.rmtree(testdir) - args = " --case %s --compset X --output-root %s --handle-preexisting-dirs=r " % (testdir, cls._testroot) - if TEST_COMPILER is not None: - args = args + " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - - cls._testdirs.append(testdir) - run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR) - self.assertTrue(os.path.exists(testdir)) - self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) - - run_cmd_assert_result(self, "./case.setup", from_dir=testdir) - run_cmd_assert_result(self, "./case.build", from_dir=testdir) - - with Case(testdir, read_only=False) as case: - ntasks = case.get_value("NTASKS_ATM") - case.set_value("NTASKS_ATM", ntasks+1) - - # this should fail with a locked file issue - run_cmd_assert_result(self, "./case.build", - from_dir=testdir, expected_stat=1) - - run_cmd_assert_result(self, "./case.setup --reset", from_dir=testdir) - run_cmd_assert_result(self, "./case.build", from_dir=testdir) - with Case(testdir, read_only=False) as case: - case.set_value("CHARGE_ACCOUNT", "fred") - - # this should not fail with a locked file issue - run_cmd_assert_result(self, "./case.build", from_dir=testdir) - - run_cmd_assert_result(self, "./case.st_archive --test-all", from_dir=testdir) - - # Trying to set values outside of context manager should fail - case = Case(testdir, read_only=False) - with self.assertRaises(CIMEError): - case.set_value("NTASKS_ATM", 42) - - # Trying to read_xml with pending changes should fail - with self.assertRaises(CIMEError): - with Case(testdir, read_only=False) as case: - case.set_value("CHARGE_ACCOUNT", "fouc") - case.read_xml() - - cls._do_teardown.append(testdir) - - def test_aa_no_flush_on_instantiate(self): - testdir = os.path.join(self.__class__._testroot, 'testcreatenewcase') - with Case(testdir, read_only=False) as case: - for env_file in case._files: - self.assertFalse(env_file.needsrewrite, msg="Instantiating a case should not trigger a flush call") - - with Case(testdir, read_only=False) as case: - case.set_value("HIST_OPTION","nyears") - runfile = case.get_env('run') - self.assertTrue(runfile.needsrewrite, msg="Expected flush call not triggered") - for env_file in case._files: - if env_file != runfile: - self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}" - .format(env_file.filename)) - # Flush the file - runfile.write() - # set it again to the same value - case.set_value("HIST_OPTION","nyears") - # now the file should not need to be flushed - for env_file in case._files: - self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}" - .format(env_file.filename)) - - # Check once more with a new instance - with Case(testdir, read_only=False) as case: - case.set_value("HIST_OPTION","nyears") - for env_file in case._files: - self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}" - .format(env_file.filename)) - - def test_b_user_mods(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testusermods') - if os.path.exists(testdir): - shutil.rmtree(testdir) - - cls._testdirs.append(testdir) - - user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test1") - args = " --case %s --compset X --user-mods-dir %s --output-root %s --handle-preexisting-dirs=r"% (testdir, user_mods_dir, cls._testroot) - if TEST_COMPILER is not None: - args = args + " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - - run_cmd_assert_result(self, "%s/create_newcase %s " - % (SCRIPT_DIR, args),from_dir=SCRIPT_DIR) - - self.assertTrue(os.path.isfile(os.path.join(testdir,"SourceMods","src.drv","somefile.F90")), msg="User_mods SourceMod missing") - with open(os.path.join(testdir,"user_nl_cpl"),"r") as fd: - contents = fd.read() - self.assertTrue("a different cpl test option" in contents, msg="User_mods contents of user_nl_cpl missing") - self.assertTrue("a cpl namelist option" in contents, msg="User_mods contents of user_nl_cpl missing") - cls._do_teardown.append(testdir) - - def test_c_create_clone_keepexe(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'test_create_clone_keepexe') - if os.path.exists(testdir): - shutil.rmtree(testdir) - prevtestdir = cls._testdirs[0] - user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test3") - - cmd = "%s/create_clone --clone %s --case %s --keepexe --user-mods-dir %s" \ - % (SCRIPT_DIR, prevtestdir, testdir, user_mods_dir) - run_cmd_assert_result(self, cmd, from_dir=SCRIPT_DIR, expected_stat=1) - - def test_d_create_clone_new_user(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'test_create_clone_new_user') - if os.path.exists(testdir): - shutil.rmtree(testdir) - prevtestdir = cls._testdirs[0] - cls._testdirs.append(testdir) - # change the USER and CIME_OUTPUT_ROOT to nonsense values - # this is intended as a test of whether create_clone is independent of user - run_cmd_assert_result(self, "./xmlchange USER=this_is_not_a_user", - from_dir=prevtestdir) - - fakeoutputroot = cls._testroot.replace(os.environ.get("USER"), "this_is_not_a_user") - run_cmd_assert_result(self, "./xmlchange CIME_OUTPUT_ROOT=%s"%fakeoutputroot, - from_dir=prevtestdir) - - # this test should pass (user name is replaced) - run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s " % - (SCRIPT_DIR, prevtestdir, testdir),from_dir=SCRIPT_DIR) - - shutil.rmtree(testdir) - # this test should pass - run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s --cime-output-root %s" % - (SCRIPT_DIR, prevtestdir, testdir, cls._testroot),from_dir=SCRIPT_DIR) - - cls._do_teardown.append(testdir) - - def test_dd_create_clone_not_writable(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'test_create_clone_not_writable') - if os.path.exists(testdir): - shutil.rmtree(testdir) - prevtestdir = cls._testdirs[0] - cls._testdirs.append(testdir) - - with Case(prevtestdir, read_only=False) as case1: - case2 = case1.create_clone(testdir) - with self.assertRaises(CIMEError): - case2.set_value("CHARGE_ACCOUNT", "fouc") - - def test_e_xmlquery(self): - # Set script and script path - xmlquery = "./xmlquery" - cls = self.__class__ - casedir = cls._testdirs[0] - - # Check for environment - self.assertTrue(os.path.isdir(SCRIPT_DIR)) - self.assertTrue(os.path.isdir(TOOLS_DIR)) - self.assertTrue(os.path.isfile(os.path.join(casedir,xmlquery))) - - # Test command line options - with Case(casedir, read_only=True) as case: - STOP_N = case.get_value("STOP_N") - COMP_CLASSES = case.get_values("COMP_CLASSES") - BUILD_COMPLETE = case.get_value("BUILD_COMPLETE") - cmd = xmlquery + " STOP_N --value" - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == str(STOP_N), msg="%s != %s"%(output, STOP_N)) - cmd = xmlquery + " BUILD_COMPLETE --value" - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == "TRUE", msg="%s != %s"%(output, BUILD_COMPLETE)) - # we expect DOCN_MODE to be undefined in this X compset - # this test assures that we do not try to resolve this as a compvar - cmd = xmlquery + " DOCN_MODE --value" - _, output, error = run_cmd(cmd, from_dir=casedir) - self.assertTrue(error == "ERROR: No results found for variable DOCN_MODE", - msg="unexpected result for DOCN_MODE, output {}, error {}". - format(output, error)) - - for comp in COMP_CLASSES: - caseresult = case.get_value("NTASKS_%s"%comp) - cmd = xmlquery + " NTASKS_%s --value"%comp - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult)) - cmd = xmlquery + " NTASKS --subgroup %s --value"%comp - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult)) - if MACHINE.has_batch_system(): - JOB_QUEUE = case.get_value("JOB_QUEUE", subgroup="case.run") - cmd = xmlquery + " JOB_QUEUE --subgroup case.run --value" - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == JOB_QUEUE, msg="%s != %s"%(output, JOB_QUEUE)) - - cmd = xmlquery + " --listall" - run_cmd_no_fail(cmd, from_dir=casedir) - - cls._do_teardown.append(cls._testroot) - - def test_f_createnewcase_with_user_compset(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset') - if os.path.exists(testdir): - shutil.rmtree(testdir) - - cls._testdirs.append(testdir) - - pesfile = os.path.join("..","src","drivers","mct","cime_config","config_pes.xml") - args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot) - if CIME.utils.get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - - run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "./case.setup", from_dir=testdir) - run_cmd_assert_result(self, "./case.build", from_dir=testdir) - - cls._do_teardown.append(testdir) - - def test_g_createnewcase_with_user_compset_and_env_mach_pes(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset_and_env_mach_pes') - if os.path.exists(testdir): - shutil.rmtree(testdir) - previous_testdir = cls._testdirs[-1] - cls._testdirs.append(testdir) - - pesfile = os.path.join(previous_testdir,"env_mach_pes.xml") - args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot) - if CIME.utils.get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args += " --mpilib %s"%TEST_MPILIB - - run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir) - # this line should cause the diff to fail (I assume no machine is going to default to 17 tasks) - run_cmd_assert_result(self, "./xmlchange NTASKS=17", from_dir=testdir) - run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir, - expected_stat=1) - - cls._do_teardown.append(testdir) - - def test_h_primary_component(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testprimarycomponent') - if os.path.exists(testdir): - shutil.rmtree(testdir) - - cls._testdirs.append(testdir) - args = " --case CreateNewcaseTest --script-root %s --compset X --output-root %s --handle-preexisting-dirs u" % (testdir, cls._testroot) - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args += " --mpilib %s"%TEST_MPILIB - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - - run_cmd_assert_result(self, "%s/create_newcase %s" % (SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - self.assertTrue(os.path.exists(testdir)) - self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) - - with Case(testdir, read_only=False) as case: - case._compsetname = case.get_value("COMPSET") - case.set_comp_classes(case.get_values("COMP_CLASSES")) - primary = case._find_primary_component() - self.assertEqual(primary, "drv", msg="primary component test expected drv but got %s"%primary) - # now we are going to corrupt the case so that we can do more primary_component testing - case.set_valid_values("COMP_GLC","%s,fred"%case.get_value("COMP_GLC")) - case.set_value("COMP_GLC","fred") - primary = case._find_primary_component() - self.assertEqual(primary, "fred", msg="primary component test expected fred but got %s"%primary) - case.set_valid_values("COMP_ICE","%s,wilma"%case.get_value("COMP_ICE")) - case.set_value("COMP_ICE","wilma") - primary = case._find_primary_component() - self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary) - - case.set_valid_values("COMP_OCN","%s,bambam,docn"%case.get_value("COMP_OCN")) - case.set_value("COMP_OCN","bambam") - primary = case._find_primary_component() - self.assertEqual(primary, "bambam", msg="primary component test expected bambam but got %s"%primary) - - case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND")) - case.set_value("COMP_LND","barney") - primary = case._find_primary_component() - # This is a "J" compset - self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary) - case.set_value("COMP_OCN","docn") - case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND")) - case.set_value("COMP_LND","barney") - primary = case._find_primary_component() - self.assertEqual(primary, "barney", msg="primary component test expected barney but got %s"%primary) - case.set_valid_values("COMP_ATM","%s,wilma"%case.get_value("COMP_ATM")) - case.set_value("COMP_ATM","wilma") - primary = case._find_primary_component() - self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary) - # this is a "E" compset - case._compsetname = case._compsetname.replace("XOCN","DOCN%SOM") - primary = case._find_primary_component() - self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary) - # finally a "B" compset - case.set_value("COMP_OCN","bambam") - primary = case._find_primary_component() - self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary) - - cls._do_teardown.append(testdir) - - def test_j_createnewcase_user_compset_vs_alias(self): - """ - Create a compset using the alias and another compset using the full compset name - and make sure they are the same by comparing the namelist files in CaseDocs. - Ignore the modelio files and clean the directory names out first. - """ - - cls = self.__class__ - - testdir1 = os.path.join(cls._testroot, 'testcreatenewcase_user_compset') - if os.path.exists(testdir1): - shutil.rmtree(testdir1) - cls._testdirs.append(testdir1) - args = ' --case CreateNewcaseTest --script-root {} --compset 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV --res f19_g16 --output-root {} --handle-preexisting-dirs u' .format(testdir1, cls._testroot) - if CIME.utils.get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args += " --mpilib %s"%TEST_MPILIB - - run_cmd_assert_result(self, "{}/create_newcase {}" .format (SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "./case.setup ", from_dir=testdir1) - run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir1) - - dir1 = os.path.join(testdir1,"CaseDocs") - dir2 = os.path.join(testdir1,"CleanCaseDocs") - os.mkdir(dir2) - for _file in os.listdir(dir1): - if "modelio" in _file: - continue - with open(os.path.join(dir1,_file),"r") as fi: - file_text = fi.read() - file_text = file_text.replace(os.path.basename(testdir1),"PATH") - file_text = re.sub(r"logfile =.*","",file_text) - with open(os.path.join(dir2,_file), "w") as fo: - fo.write(file_text) - cleancasedocs1 = dir2 - - testdir2 = os.path.join(cls._testroot, 'testcreatenewcase_alias_compset') - if os.path.exists(testdir2): - shutil.rmtree(testdir2) - cls._testdirs.append(testdir2) - args = ' --case CreateNewcaseTest --script-root {} --compset ADSOMAQP --res f19_g16 --output-root {} --handle-preexisting-dirs u'.format(testdir2, cls._testroot) - if CIME.utils.get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args += " --mpilib %s"%TEST_MPILIB - - run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "./case.setup ", from_dir=testdir2) - run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir2) - - dir1 = os.path.join(testdir2,"CaseDocs") - dir2 = os.path.join(testdir2,"CleanCaseDocs") - os.mkdir(dir2) - for _file in os.listdir(dir1): - if "modelio" in _file: - continue - with open(os.path.join(dir1,_file),"r") as fi: - file_text = fi.read() - file_text = file_text.replace(os.path.basename(testdir2),"PATH") - file_text = re.sub(r"logfile =.*","",file_text) - with open(os.path.join(dir2,_file), "w") as fo: - fo.write(file_text) - - cleancasedocs2 = dir2 - dcmp = filecmp.dircmp(cleancasedocs1, cleancasedocs2) - self.assertTrue(len(dcmp.diff_files) == 0, "CaseDocs differ {}".format(dcmp.diff_files)) - - cls._do_teardown.append(testdir1) - cls._do_teardown.append(testdir2) - - - def test_k_append_config(self): - machlist_before = MACHINE.list_available_machines() - self.assertEqual(len(machlist_before)>1, True, msg="Problem reading machine list") - - newmachfile = os.path.join(get_cime_root(),"config", - "xml_schemas","config_machines_template.xml") - MACHINE.read(newmachfile) - machlist_after = MACHINE.list_available_machines() - - self.assertEqual(len(machlist_after)-len(machlist_before), 1, msg="Not able to append config_machines.xml {} {}".format(len(machlist_after), len(machlist_before))) - self.assertEqual("mymachine" in machlist_after, True, msg="Not able to append config_machines.xml") - - - def test_m_createnewcase_alternate_drivers(self): - # Test that case.setup runs for nuopc and moab drivers - cls = self.__class__ - model = CIME.utils.get_model() - for driver in ("nuopc", "moab"): - if not os.path.exists(os.path.join(get_cime_root(),"src","drivers",driver)): - self.skipTest("Skipping driver test for {}, driver not found".format(driver)) - if ((model == 'cesm' and driver == 'moab') or - (model == 'e3sm' and driver == 'nuopc')): - continue - - testdir = os.path.join(cls._testroot, 'testcreatenewcase.{}'.format( driver)) - if os.path.exists(testdir): - shutil.rmtree(testdir) - args = " --driver {} --case {} --compset X --res f19_g16 --output-root {} --handle-preexisting-dirs=r".format(driver, testdir, cls._testroot) - if model == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args = args + " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - - cls._testdirs.append(testdir) - run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR) - self.assertTrue(os.path.exists(testdir)) - self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) - - run_cmd_assert_result(self, "./case.setup", from_dir=testdir) - with Case(testdir, read_only=False) as case: - comp_interface = case.get_value("COMP_INTERFACE") - self.assertTrue(driver == comp_interface, msg="%s != %s"%(driver, comp_interface)) - - cls._do_teardown.append(testdir) - - def test_n_createnewcase_bad_compset(self): - cls = self.__class__ - model = CIME.utils.get_model() - - testdir = os.path.join(cls._testroot, 'testcreatenewcase_bad_compset') - if os.path.exists(testdir): - shutil.rmtree(testdir) - args = " --case %s --compset InvalidCompsetName --output-root %s --handle-preexisting-dirs=r " % (testdir, cls._testroot) - if model == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args = args + " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - - cls._testdirs.append(testdir) - run_cmd_assert_result(self, "./create_newcase %s"%(args), - from_dir=SCRIPT_DIR, expected_stat=1) - self.assertFalse(os.path.exists(testdir)) - - @classmethod - def tearDownClass(cls): - do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN - - for tfile in cls._testdirs: - if tfile not in cls._do_teardown: - print("Detected failed test or user request no teardown") - print("Leaving case directory : %s"%tfile) - elif do_teardown: - try: - print ("Attempt to remove directory {}".format(tfile)) - shutil.rmtree(tfile) - except BaseException: - print("Could not remove directory {}".format(tfile)) - -############################################################################### -class M_TestWaitForTests(unittest.TestCase): -############################################################################### - - ########################################################################### - def setUp(self): - ########################################################################### - self._testroot = os.path.join(TEST_ROOT,"TestWaitForTests") - self._timestamp = CIME.utils.get_timestamp() - - # basic tests - self._testdir_all_pass = os.path.join(self._testroot, 'scripts_regression_tests.testdir_all_pass') - self._testdir_with_fail = os.path.join(self._testroot, 'scripts_regression_tests.testdir_with_fail') - self._testdir_unfinished = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished') - self._testdir_unfinished2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished2') - - # live tests - self._testdir_teststatus1 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus1') - self._testdir_teststatus2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus2') - - self._testdirs = [self._testdir_all_pass, self._testdir_with_fail, self._testdir_unfinished, self._testdir_unfinished2, - self._testdir_teststatus1, self._testdir_teststatus2] - basic_tests = self._testdirs[:self._testdirs.index(self._testdir_teststatus1)] - - for testdir in self._testdirs: - if os.path.exists(testdir): - shutil.rmtree(testdir) - os.makedirs(testdir) - - for r in range(10): - for testdir in basic_tests: - os.makedirs(os.path.join(testdir, str(r))) - make_fake_teststatus(os.path.join(testdir, str(r)), "Test_%d" % r, TEST_PASS_STATUS, RUN_PHASE) - - make_fake_teststatus(os.path.join(self._testdir_with_fail, "5"), "Test_5", TEST_FAIL_STATUS, RUN_PHASE) - make_fake_teststatus(os.path.join(self._testdir_unfinished, "5"), "Test_5", TEST_PEND_STATUS, RUN_PHASE) - make_fake_teststatus(os.path.join(self._testdir_unfinished2, "5"), "Test_5", TEST_PASS_STATUS, SUBMIT_PHASE) - - integration_tests = self._testdirs[len(basic_tests):] - for integration_test in integration_tests: - os.makedirs(os.path.join(integration_test, "0")) - make_fake_teststatus(os.path.join(integration_test, "0"), "Test_0", TEST_PASS_STATUS, CORE_PHASES[0]) - - # Set up proxy if possible - self._unset_proxy = setup_proxy() - - self._thread_error = None - - ########################################################################### - def tearDown(self): - ########################################################################### - do_teardown = sys.exc_info() == (None, None, None) and not NO_TEARDOWN - - if do_teardown: - for testdir in self._testdirs: - shutil.rmtree(testdir) - - kill_subprocesses() - - if (self._unset_proxy): - del os.environ["http_proxy"] - - ########################################################################### - def simple_test(self, testdir, expected_results, extra_args="", build_name=None): - ########################################################################### - # Need these flags to test dashboard if e3sm - if CIME.utils.get_model() == "e3sm" and build_name is not None: - extra_args += " -b %s" % build_name - - expected_stat = 0 if expected_results == ["PASS"]*len(expected_results) else CIME.utils.TESTS_FAILED_ERR_CODE - output = run_cmd_assert_result(self, "%s/wait_for_tests -p ACME_test */TestStatus %s" % (TOOLS_DIR, extra_args), - from_dir=testdir, expected_stat=expected_stat) - - lines = [line for line in output.splitlines() if line.startswith("Test '")] - self.assertEqual(len(lines), len(expected_results)) - for idx, line in enumerate(lines): - testname, status = parse_test_status(line) - self.assertEqual(status, expected_results[idx]) - self.assertEqual(testname, "Test_%d" % idx) - - ########################################################################### - def threaded_test(self, testdir, expected_results, extra_args="", build_name=None): - ########################################################################### - try: - self.simple_test(testdir, expected_results, extra_args, build_name) - except AssertionError as e: - self._thread_error = str(e) - - ########################################################################### - def test_wait_for_test_all_pass(self): - ########################################################################### - self.simple_test(self._testdir_all_pass, ["PASS"] * 10) - - ########################################################################### - def test_wait_for_test_with_fail(self): - ########################################################################### - expected_results = ["FAIL" if item == 5 else "PASS" for item in range(10)] - self.simple_test(self._testdir_with_fail, expected_results) - - ########################################################################### - def test_wait_for_test_no_wait(self): - ########################################################################### - expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] - self.simple_test(self._testdir_unfinished, expected_results, "-n") - - ########################################################################### - def test_wait_for_test_timeout(self): - ########################################################################### - expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] - self.simple_test(self._testdir_unfinished, expected_results, "--timeout=3") - - ########################################################################### - def test_wait_for_test_wait_for_pend(self): - ########################################################################### - run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, ["PASS"] * 10)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) # Kinda hacky - - self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited") - - with TestStatus(test_dir=os.path.join(self._testdir_unfinished, "5")) as ts: - ts.set_status(RUN_PHASE, TEST_PASS_STATUS) - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished") - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - ########################################################################### - def test_wait_for_test_wait_for_missing_run_phase(self): - ########################################################################### - run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished2, ["PASS"] * 10)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) # Kinda hacky - - self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited") - - with TestStatus(test_dir=os.path.join(self._testdir_unfinished2, "5")) as ts: - ts.set_status(RUN_PHASE, TEST_PASS_STATUS) - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished") - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - ########################################################################### - def test_wait_for_test_wait_kill(self): - ########################################################################### - expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] - run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, expected_results)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) - - self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited") - - kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self) - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished") - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - ########################################################################### - def test_wait_for_test_cdash_pass(self): - ########################################################################### - expected_results = ["PASS"] * 10 - build_name = "regression_test_pass_" + self._timestamp - run_thread = threading.Thread(target=self.threaded_test, - args=(self._testdir_all_pass, expected_results, "", build_name)) - run_thread.daemon = True - run_thread.start() - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished") - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - assert_dashboard_has_build(self, build_name) - - ########################################################################### - def test_wait_for_test_cdash_kill(self): - ########################################################################### - expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] - build_name = "regression_test_kill_" + self._timestamp - run_thread = threading.Thread(target=self.threaded_test, - args=(self._testdir_unfinished, expected_results, "", build_name)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) - - self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited") - - kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self) - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished") - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - assert_dashboard_has_build(self, build_name) - - if CIME.utils.get_model() == "e3sm": - cdash_result_dir = os.path.join(self._testdir_unfinished, "Testing") - tag_file = os.path.join(cdash_result_dir, "TAG") - self.assertTrue(os.path.isdir(cdash_result_dir)) - self.assertTrue(os.path.isfile(tag_file)) - - tag = open(tag_file, "r").readlines()[0].strip() - xml_file = os.path.join(cdash_result_dir, tag, "Test.xml") - self.assertTrue(os.path.isfile(xml_file)) - - xml_contents = open(xml_file, "r").read() - self.assertTrue(r'Test_0Test_1Test_2Test_3Test_4Test_5Test_6Test_7Test_8Test_9' - in xml_contents) - self.assertTrue(r'Test_5' in xml_contents) - - # TODO: Any further checking of xml output worth doing? - - ########################################################################### - def live_test_impl(self, testdir, expected_results, last_phase, last_status): - ########################################################################### - run_thread = threading.Thread(target=self.threaded_test, args=(testdir, expected_results)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) - - self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited") - - for core_phase in CORE_PHASES[1:]: - with TestStatus(test_dir=os.path.join(self._testdir_teststatus1, "0")) as ts: - ts.set_status(core_phase, last_status if core_phase == last_phase else TEST_PASS_STATUS) - - time.sleep(5) - - if core_phase != last_phase: - self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited after passing phase {}".format(core_phase)) - else: - run_thread.join(timeout=10) - self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished after phase {}".format(core_phase)) - break - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - ########################################################################### - def test_wait_for_test_test_status_integration_pass(self): - ########################################################################### - self.live_test_impl(self._testdir_teststatus1, ["PASS"], RUN_PHASE, TEST_PASS_STATUS) - - ########################################################################### - def test_wait_for_test_test_status_integration_submit_fail(self): - ########################################################################### - self.live_test_impl(self._testdir_teststatus1, ["FAIL"], SUBMIT_PHASE, TEST_FAIL_STATUS) - -############################################################################### -class TestCreateTestCommon(unittest.TestCase): -############################################################################### - - ########################################################################### - def setUp(self): - ########################################################################### - self._thread_error = None - self._unset_proxy = setup_proxy() - self._machine = MACHINE.get_machine_name() - self._compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER - self._baseline_name = "fake_testing_only_%s" % CIME.utils.get_timestamp() - self._baseline_area = os.path.join(TEST_ROOT, "baselines") - self._testroot = TEST_ROOT - self._hasbatch = MACHINE.has_batch_system() and not NO_BATCH - self._do_teardown = not NO_TEARDOWN - - ########################################################################### - def tearDown(self): - ########################################################################### - kill_subprocesses() - - if (self._unset_proxy): - del os.environ["http_proxy"] - - files_to_clean = [] - - baselines = os.path.join(self._baseline_area, self._baseline_name) - if (os.path.isdir(baselines)): - files_to_clean.append(baselines) - - for test_id in ["master", self._baseline_name]: - for leftover in glob.glob(os.path.join(self._testroot, "*%s*" % test_id)): - files_to_clean.append(leftover) - - do_teardown = self._do_teardown and sys.exc_info() == (None, None, None) - if (not do_teardown): - print("Detected failed test or user request no teardown") - print("Leaving files:") - for file_to_clean in files_to_clean: - print(" " + file_to_clean) - else: - # For batch machines need to avoid race condition as batch system - # finishes I/O for the case. - if self._hasbatch: - time.sleep(5) - - for file_to_clean in files_to_clean: - if (os.path.isdir(file_to_clean)): - shutil.rmtree(file_to_clean) - else: - os.remove(file_to_clean) - - ########################################################################### - def _create_test(self, extra_args, test_id=None, pre_run_errors=False, run_errors=False, env_changes=""): - ########################################################################### - # All stub model not supported in nuopc driver - driver = CIME.utils.get_cime_default_driver() - if driver == 'nuopc': - extra_args.append(" ^SMS.T42_T42.S") - - test_id = CIME.utils.get_timestamp() if test_id is None else test_id - extra_args.append("-t {}".format(test_id)) - extra_args.append("--baseline-root {}".format(self._baseline_area)) - if NO_BATCH: - extra_args.append("--no-batch") - if TEST_COMPILER and ([extra_arg for extra_arg in extra_args if "--compiler" in extra_arg] == []): - extra_args.append("--compiler={}".format(TEST_COMPILER)) - if TEST_MPILIB and ([extra_arg for extra_arg in extra_args if "--mpilib" in extra_arg] == []): - extra_args.append("--mpilib={}".format(TEST_MPILIB)) - extra_args.append("--test-root={0} --output-root={0}".format(TEST_ROOT)) - - full_run = (set(extra_args) & set(["-n", "--namelist-only", "--no-setup", "--no-build"])) == set() - - if self._hasbatch: - expected_stat = 0 if not pre_run_errors else CIME.utils.TESTS_FAILED_ERR_CODE - else: - expected_stat = 0 if not pre_run_errors and not run_errors else CIME.utils.TESTS_FAILED_ERR_CODE - - run_cmd_assert_result(self, "{} {}/create_test {}".format(env_changes, SCRIPT_DIR, " ".join(extra_args)), - expected_stat=expected_stat) - - if full_run: - self._wait_for_tests(test_id, expect_works=(not pre_run_errors and not run_errors)) - - ########################################################################### - def _wait_for_tests(self, test_id, expect_works=True): - ########################################################################### - if self._hasbatch: - timeout_arg = "--timeout={}".format(GLOBAL_TIMEOUT) if GLOBAL_TIMEOUT is not None else "" - expected_stat = 0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE - run_cmd_assert_result(self, "{}/wait_for_tests {} *{}/TestStatus".format(TOOLS_DIR, timeout_arg, test_id), - from_dir=self._testroot, expected_stat=expected_stat) - -############################################################################### -class O_TestTestScheduler(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_a_phases(self): - ########################################################################### - # exclude the MEMLEAK tests here. - tests = get_tests.get_full_test_names(["cime_test_only", - "^TESTMEMLEAKFAIL_P1.f09_g16.X", - "^TESTMEMLEAKPASS_P1.f09_g16.X", - "^TESTRUNSTARCFAIL_P1.f19_g16_rx1.A", - "^TESTTESTDIFF_P1.f19_g16_rx1.A", - "^TESTBUILDFAILEXC_P1.f19_g16_rx1.A", - "^TESTRUNFAILEXC_P1.f19_g16_rx1.A"], - self._machine, self._compiler) - self.assertEqual(len(tests), 3) - ct = TestScheduler(tests, test_root=TEST_ROOT, output_root=TEST_ROOT, - compiler=self._compiler, mpilib=TEST_MPILIB) - - build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0] - run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0] - pass_test = [item for item in tests if "TESTRUNPASS" in item][0] - - self.assertTrue("BUILDFAIL" in build_fail_test, msg="Wrong test '%s'" % build_fail_test) - self.assertTrue("RUNFAIL" in run_fail_test, msg="Wrong test '%s'" % run_fail_test) - self.assertTrue("RUNPASS" in pass_test, msg="Wrong test '%s'" % pass_test) - - for idx, phase in enumerate(ct._phases): - for test in ct._tests: - if (phase == CIME.test_scheduler.TEST_START): - continue - elif (phase == MODEL_BUILD_PHASE): - ct._update_test_status(test, phase, TEST_PEND_STATUS) - - if (test == build_fail_test): - ct._update_test_status(test, phase, TEST_FAIL_STATUS) - self.assertTrue(ct._is_broken(test)) - self.assertFalse(ct._work_remains(test)) - else: - ct._update_test_status(test, phase, TEST_PASS_STATUS) - self.assertFalse(ct._is_broken(test)) - self.assertTrue(ct._work_remains(test)) - - elif (phase == RUN_PHASE): - if (test == build_fail_test): - with self.assertRaises(CIMEError): - ct._update_test_status(test, phase, TEST_PEND_STATUS) - else: - ct._update_test_status(test, phase, TEST_PEND_STATUS) - self.assertFalse(ct._work_remains(test)) - - if (test == run_fail_test): - ct._update_test_status(test, phase, TEST_FAIL_STATUS) - self.assertTrue(ct._is_broken(test)) - else: - ct._update_test_status(test, phase, TEST_PASS_STATUS) - self.assertFalse(ct._is_broken(test)) - - self.assertFalse(ct._work_remains(test)) - - else: - with self.assertRaises(CIMEError): - ct._update_test_status(test, ct._phases[idx+1], TEST_PEND_STATUS) - - with self.assertRaises(CIMEError): - ct._update_test_status(test, phase, TEST_PASS_STATUS) - - ct._update_test_status(test, phase, TEST_PEND_STATUS) - self.assertFalse(ct._is_broken(test)) - self.assertTrue(ct._work_remains(test)) - - with self.assertRaises(CIMEError): - ct._update_test_status(test, phase, TEST_PEND_STATUS) - - ct._update_test_status(test, phase, TEST_PASS_STATUS) - - with self.assertRaises(CIMEError): - ct._update_test_status(test, phase, TEST_FAIL_STATUS) - - self.assertFalse(ct._is_broken(test)) - self.assertTrue(ct._work_remains(test)) - - ########################################################################### - def test_b_full(self): - ########################################################################### - tests = get_tests.get_full_test_names(["cime_test_only"], self._machine, self._compiler) - test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=TEST_ROOT, - output_root=TEST_ROOT,compiler=self._compiler, mpilib=TEST_MPILIB) - - build_fail_test = [item for item in tests if "TESTBUILDFAIL_" in item][0] - build_fail_exc_test = [item for item in tests if "TESTBUILDFAILEXC" in item][0] - run_fail_test = [item for item in tests if "TESTRUNFAIL_" in item][0] - run_fail_exc_test = [item for item in tests if "TESTRUNFAILEXC" in item][0] - pass_test = [item for item in tests if "TESTRUNPASS" in item][0] - test_diff_test = [item for item in tests if "TESTTESTDIFF" in item][0] - mem_fail_test = [item for item in tests if "TESTMEMLEAKFAIL" in item][0] - mem_pass_test = [item for item in tests if "TESTMEMLEAKPASS" in item][0] - st_arch_fail_test = [item for item in tests if "TESTRUNSTARCFAIL" in item][0] - - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - ct.run_tests() - finally: - logging.getLogger().setLevel(log_lvl) - - self._wait_for_tests(test_id, expect_works=False) - - test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) - self.assertEqual(len(tests), len(test_statuses)) - - for test_status in test_statuses: - ts = TestStatus(test_dir=os.path.dirname(test_status)) - test_name = ts.get_name() - log_files = glob.glob("%s/%s*%s/TestStatus.log" % (self._testroot, test_name, test_id)) - self.assertEqual(len(log_files), 1, "Expected exactly one TestStatus.log file, found %d" % len(log_files)) - log_file = log_files[0] - if (test_name == build_fail_test): - - - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS) - data = open(log_file, "r").read() - self.assertTrue("Intentional fail for testing infrastructure" in data, - "Broken test did not report build error:\n%s" % data) - elif (test_name == build_fail_exc_test): - data = open(log_file, "r").read() - assert_test_status(self, test_name, ts, SHAREDLIB_BUILD_PHASE, TEST_FAIL_STATUS) - self.assertTrue("Exception from init" in data, - "Broken test did not report build error:\n%s" % data) - elif (test_name == run_fail_test): - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS) - elif (test_name == run_fail_exc_test): - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS) - data = open(log_file, "r").read() - self.assertTrue("Exception from run_phase" in data, - "Broken test did not report run error:\n%s" % data) - elif (test_name == mem_fail_test): - assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_FAIL_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - elif (test_name == test_diff_test): - assert_test_status(self, test_name, ts, "COMPARE_base_rest", TEST_FAIL_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - elif test_name == st_arch_fail_test: - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, STARCHIVE_PHASE, TEST_FAIL_STATUS) - else: - self.assertTrue(test_name in [pass_test, mem_pass_test]) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - if (test_name == mem_pass_test): - assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_PASS_STATUS) - - ########################################################################### - def test_c_use_existing(self): - ########################################################################### - tests = get_tests.get_full_test_names(["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A"], - self._machine, self._compiler) - test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=TEST_ROOT, - output_root=TEST_ROOT,compiler=self._compiler, mpilib=TEST_MPILIB) - - build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0] - run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0] - pass_test = [item for item in tests if "TESTRUNPASS" in item][0] - - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - ct.run_tests() - finally: - logging.getLogger().setLevel(log_lvl) - - test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) - self.assertEqual(len(tests), len(test_statuses)) - - self._wait_for_tests(test_id, expect_works=False) - - for test_status in test_statuses: - casedir = os.path.dirname(test_status) - ts = TestStatus(test_dir=casedir) - test_name = ts.get_name() - if test_name == build_fail_test: - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS) - with TestStatus(test_dir=casedir) as ts: - ts.set_status(MODEL_BUILD_PHASE, TEST_PEND_STATUS) - elif test_name == run_fail_test: - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS) - with TestStatus(test_dir=casedir) as ts: - ts.set_status(SUBMIT_PHASE, TEST_PEND_STATUS) - else: - self.assertTrue(test_name == pass_test) - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - - os.environ["TESTBUILDFAIL_PASS"] = "True" - os.environ["TESTRUNFAIL_PASS"] = "True" - ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True, - test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler, - mpilib=TEST_MPILIB) - - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - ct2.run_tests() - finally: - logging.getLogger().setLevel(log_lvl) - - self._wait_for_tests(test_id) - - for test_status in test_statuses: - ts = TestStatus(test_dir=os.path.dirname(test_status)) - test_name = ts.get_name() - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - - del os.environ["TESTBUILDFAIL_PASS"] - del os.environ["TESTRUNFAIL_PASS"] - - # test that passed tests are not re-run - - ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True, - test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler, - mpilib=TEST_MPILIB) - - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - ct2.run_tests() - finally: - logging.getLogger().setLevel(log_lvl) - - self._wait_for_tests(test_id) - - for test_status in test_statuses: - ts = TestStatus(test_dir=os.path.dirname(test_status)) - test_name = ts.get_name() - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - - ########################################################################### - def test_d_retry(self): - ########################################################################### - args = ["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A", "--retry=1"] - - self._create_test(args) - -############################################################################### -class P_TestJenkinsGenericJob(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def setUp(self): - ########################################################################### - if CIME.utils.get_model() != "e3sm": - self.skipTest("Skipping Jenkins tests. E3SM feature") - TestCreateTestCommon.setUp(self) - - # Need to run in a subdir in order to not have CTest clash. Name it - # such that it should be cleaned up by the parent tearDown - self._testdir = os.path.join(self._testroot, "jenkins_test_%s" % self._baseline_name) - os.makedirs(self._testdir) - - # Change root to avoid clashing with other jenkins_generic_jobs - self._jenkins_root = os.path.join(self._testdir, "J") - - ########################################################################### - def tearDown(self): - ########################################################################### - TestCreateTestCommon.tearDown(self) - - if "TESTRUNDIFF_ALTERNATE" in os.environ: - del os.environ["TESTRUNDIFF_ALTERNATE"] - - ########################################################################### - def simple_test(self, expect_works, extra_args, build_name=None): - ########################################################################### - if NO_BATCH: - extra_args += " --no-batch" - - # Need these flags to test dashboard if e3sm - if CIME.utils.get_model() == "e3sm" and build_name is not None: - extra_args += " -p ACME_test --submit-to-cdash --cdash-build-group=Nightly -c %s" % build_name - - run_cmd_assert_result(self, "%s/jenkins_generic_job -r %s %s -B %s" % (TOOLS_DIR, self._testdir, extra_args, self._baseline_area), - from_dir=self._testdir, expected_stat=(0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE)) - - ########################################################################### - def threaded_test(self, expect_works, extra_args, build_name=None): - ########################################################################### - try: - self.simple_test(expect_works, extra_args, build_name) - except AssertionError as e: - self._thread_error = str(e) - - ########################################################################### - def assert_num_leftovers(self, suite): - ########################################################################### - num_tests_in_tiny = len(get_tests.get_test_suite(suite)) - - jenkins_dirs = glob.glob("%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize())) # case dirs - # scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs - - self.assertEqual(num_tests_in_tiny, len(jenkins_dirs), - msg="Wrong number of leftover directories in %s, expected %d, see %s" % \ - (self._jenkins_root, num_tests_in_tiny, jenkins_dirs)) - - # JGF: Can't test this at the moment due to root change flag given to jenkins_generic_job - # self.assertEqual(num_tests_in_tiny + 1, len(scratch_dirs), - # msg="Wrong number of leftover directories in %s, expected %d, see %s" % \ - # (self._testroot, num_tests_in_tiny, scratch_dirs)) - - ########################################################################### - def test_jenkins_generic_job(self): - ########################################################################### - # Generate fresh baselines so that this test is not impacted by - # unresolved diffs - self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name) - self.assert_num_leftovers("cime_test_only_pass") - - build_name = "jenkins_generic_job_pass_%s" % CIME.utils.get_timestamp() - self.simple_test(True, "-t cime_test_only_pass -b %s" % self._baseline_name, build_name=build_name) - self.assert_num_leftovers("cime_test_only_pass") # jenkins_generic_job should have automatically cleaned up leftovers from prior run - assert_dashboard_has_build(self, build_name) - - ########################################################################### - def test_jenkins_generic_job_kill(self): - ########################################################################### - build_name = "jenkins_generic_job_kill_%s" % CIME.utils.get_timestamp() - run_thread = threading.Thread(target=self.threaded_test, args=(False, " -t cime_test_only_slow_pass -b master --baseline-compare=no", build_name)) - run_thread.daemon = True - run_thread.start() - - time.sleep(120) - - kill_subprocesses(sig=signal.SIGTERM) - - run_thread.join(timeout=30) - - self.assertFalse(run_thread.isAlive(), msg="jenkins_generic_job should have finished") - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - assert_dashboard_has_build(self, build_name) - - ########################################################################### - def test_jenkins_generic_job_realistic_dash(self): - ########################################################################### - # The actual quality of the cdash results for this test can only - # be inspected manually - - # Generate fresh baselines so that this test is not impacted by - # unresolved diffs - self.simple_test(False, "-t cime_test_all -g -b %s" % self._baseline_name) - self.assert_num_leftovers("cime_test_all") - - # Should create a diff - os.environ["TESTRUNDIFF_ALTERNATE"] = "True" - - # Should create a nml diff - # Modify namelist - fake_nl = """ - &fake_nml - fake_item = 'fake' - fake = .true. -/""" - baseline_glob = glob.glob(os.path.join(self._baseline_area, self._baseline_name, "TESTRUNPASS*")) - self.assertEqual(len(baseline_glob), 1, msg="Expected one match, got:\n%s" % "\n".join(baseline_glob)) - - for baseline_dir in baseline_glob: - nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") - self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) - - os.chmod(nl_path, osstat.S_IRUSR | osstat.S_IWUSR) - with open(nl_path, "a") as nl_file: - nl_file.write(fake_nl) - - build_name = "jenkins_generic_job_mixed_%s" % CIME.utils.get_timestamp() - self.simple_test(False, "-t cime_test_all -b %s" % self._baseline_name, build_name=build_name) - self.assert_num_leftovers("cime_test_all") # jenkins_generic_job should have automatically cleaned up leftovers from prior run - assert_dashboard_has_build(self, build_name) - -############################################################################### -class M_TestCimePerformance(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_cime_case_ctrl_performance(self): - ########################################################################### - - ts = time.time() - - num_repeat = 5 - for _ in range(num_repeat): - self._create_test(["cime_tiny","--no-build"]) - - elapsed = time.time() - ts - - print("Perf test result: {:0.2f}".format(elapsed)) - -############################################################################### -class T_TestRunRestart(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_run_restart(self): - ########################################################################### - driver = CIME.utils.get_cime_default_driver() - if driver == "mct": - walltime="00:15:00" - else: - walltime="00:30:00" - - self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "{}.{}".format(CIME.utils.get_full_test_name("NODEFAIL_P1.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name)) - rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) - fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL") - self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel) - - self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 3) - - ########################################################################### - def test_run_restart_too_many_fails(self): - ########################################################################### - driver = CIME.utils.get_cime_default_driver() - if driver == "mct": - walltime="00:15:00" - else: - walltime="00:30:00" - - self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name, env_changes="NODEFAIL_NUM_FAILS=5", run_errors=True) - - casedir = os.path.join(self._testroot, - "{}.{}".format(CIME.utils.get_full_test_name("NODEFAIL_P1.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name)) - rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) - fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL") - self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel) - - self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 4) - -############################################################################### -class Q_TestBlessTestResults(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def setUp(self): - ########################################################################### - TestCreateTestCommon.setUp(self) - - # Set a restrictive umask so we can test that SharedAreas used for - # recording baselines are working - restrictive_mask = 0o027 - self._orig_umask = os.umask(restrictive_mask) - - ########################################################################### - def tearDown(self): - ########################################################################### - TestCreateTestCommon.tearDown(self) - - if "TESTRUNDIFF_ALTERNATE" in os.environ: - del os.environ["TESTRUNDIFF_ALTERNATE"] - - os.umask(self._orig_umask) - - ############################################################################### - def test_bless_test_results(self): - ############################################################################### - # Generate some baselines - test_name = "TESTRUNDIFF_P1.f19_g16_rx1.A" - - if CIME.utils.get_model() == "e3sm": - genargs = ["-g", "-o", "-b", self._baseline_name, test_name] - compargs = ["-c", "-b", self._baseline_name, test_name] - else: - genargs = ["-g", self._baseline_name, "-o", test_name, - "--baseline-root ", self._baseline_area] - compargs = ["-c", self._baseline_name, test_name, - "--baseline-root ", self._baseline_area] - self._create_test(genargs) - - # Hist compare should pass - self._create_test(compargs) - - # Change behavior - os.environ["TESTRUNDIFF_ALTERNATE"] = "True" - - # Hist compare should now fail - test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - self._create_test(compargs, test_id=test_id, run_errors=True) - - # compare_test_results should detect the fail - cpr_cmd = "{}/compare_test_results --test-root {} -t {} 2>&1" \ - .format(TOOLS_DIR, TEST_ROOT, test_id) - output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE) - - # use regex - expected_pattern = re.compile(r'FAIL %s[^\s]* BASELINE' % test_name) - the_match = expected_pattern.search(output) - self.assertNotEqual(the_match, None, - msg="Cmd '%s' failed to display failed test in output:\n%s" % (cpr_cmd, output)) - - # Bless - run_cmd_no_fail("{}/bless_test_results --test-root {} --hist-only --force -t {}" - .format(TOOLS_DIR, TEST_ROOT, test_id)) - - # Hist compare should now pass again - self._create_test(compargs) - - verify_perms(self, self._baseline_area) - - ############################################################################### - def test_rebless_namelist(self): - ############################################################################### - # Generate some namelist baselines - test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A" - if CIME.utils.get_model() == "e3sm": - genargs = ["-n", "-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"] - compargs = ["-n", "-c", "-b", self._baseline_name, "cime_test_only_pass"] - else: - genargs = ["-n", "-g", self._baseline_name, "-o", "cime_test_only_pass"] - compargs = ["-n", "-c", self._baseline_name, "cime_test_only_pass"] - - self._create_test(genargs) - - # Basic namelist compare - test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - self._create_test(compargs, test_id=test_id) - - # Check standalone case.cmpgen_namelists - casedir = os.path.join(self._testroot, - "%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id)) - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir) - - # compare_test_results should pass - cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \ - .format(TOOLS_DIR, TEST_ROOT, test_id) - output = run_cmd_assert_result(self, cpr_cmd) - - # use regex - expected_pattern = re.compile(r'PASS %s[^\s]* NLCOMP' % test_to_change) - the_match = expected_pattern.search(output) - self.assertNotEqual(the_match, None, - msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output)) - - - # Modify namelist - fake_nl = """ - &fake_nml - fake_item = 'fake' - fake = .true. -/""" - baseline_area = self._baseline_area - baseline_glob = glob.glob(os.path.join(baseline_area, self._baseline_name, "TEST*")) - self.assertEqual(len(baseline_glob), 3, msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob)) - - for baseline_dir in baseline_glob: - nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") - self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) - - os.chmod(nl_path, osstat.S_IRUSR | osstat.S_IWUSR) - with open(nl_path, "a") as nl_file: - nl_file.write(fake_nl) - - # Basic namelist compare should now fail - test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - self._create_test(compargs, test_id=test_id, pre_run_errors=True) - casedir = os.path.join(self._testroot, - "%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id)) - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100) - - # preview namelists should work - run_cmd_assert_result(self, "./preview_namelists", from_dir=casedir) - - # This should still fail - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100) - - # compare_test_results should fail - cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \ - .format(TOOLS_DIR, TEST_ROOT, test_id) - output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE) - - # use regex - expected_pattern = re.compile(r'FAIL %s[^\s]* NLCOMP' % test_to_change) - the_match = expected_pattern.search(output) - self.assertNotEqual(the_match, None, - msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output)) - - # Bless - run_cmd_no_fail("{}/bless_test_results --test-root {} -n --force -t {}" - .format(TOOLS_DIR, TEST_ROOT, test_id)) - - # Basic namelist compare should now pass again - self._create_test(compargs) - - verify_perms(self, self._baseline_area) - -class X_TestQueryConfig(unittest.TestCase): - def test_query_compsets(self): - run_cmd_no_fail("{}/query_config --compsets".format(SCRIPT_DIR)) - - def test_query_components(self): - run_cmd_no_fail("{}/query_config --components".format(SCRIPT_DIR)) - - def test_query_grids(self): - run_cmd_no_fail("{}/query_config --grids".format(SCRIPT_DIR)) - - def test_query_machines(self): - run_cmd_no_fail("{}/query_config --machines".format(SCRIPT_DIR)) - -############################################################################### -class Z_FullSystemTest(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_full_system(self): - ########################################################################### - # Put this inside any test that's slow - if (FAST_ONLY): - self.skipTest("Skipping slow test") - - self._create_test(["--walltime=0:15:00", "cime_developer"], test_id=self._baseline_name) - - run_cmd_assert_result(self, "%s/cs.status.%s" % (self._testroot, self._baseline_name), - from_dir=self._testroot) - - # Ensure that we can get test times - test_statuses = glob.glob(os.path.join(self._testroot, "*%s" % self._baseline_name, "TestStatus")) - for test_status in test_statuses: - test_time = CIME.wait_for_tests.get_test_time(os.path.dirname(test_status)) - self.assertIs(type(test_time), int, msg="get time did not return int for %s" % test_status) - self.assertTrue(test_time > 0, msg="test time was zero for %s" % test_status) - - # Test that re-running works - tests = get_tests.get_test_suite("cime_developer", machine=self._machine, compiler=self._compiler) - for test in tests: - casedir = os.path.join(TEST_ROOT, "%s.%s" % (test, self._baseline_name)) - - # Subtle issue: The run phases of these tests will be in the PASS state until - # the submitted case.test script is run, which could take a while if the system is - # busy. This potentially leaves a window where the wait_for_tests command below will - # not wait for the re-submitted jobs to run because it sees the original PASS. - # The code below forces things back to PEND to avoid this race condition. Note - # that we must use the MEMLEAK phase, not the RUN phase, because RUN being in a non-PEND - # state is how system tests know they are being re-run and must reset certain - # case settings. - if self._hasbatch: - with TestStatus(test_dir=casedir) as ts: - ts.set_status(MEMLEAK_PHASE, TEST_PEND_STATUS) - - run_cmd_assert_result(self, "./case.submit --skip-preview-namelist", from_dir=casedir) - - self._wait_for_tests(self._baseline_name) - -############################################################################### -class K_TestCimeCase(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_cime_case(self): - ########################################################################### - self._create_test(["--no-build", "TESTRUNPASS_P1.f19_g16_rx1.A"], test_id=self._baseline_name) - - self.assertEqual(type(MACHINE.get_value("MAX_TASKS_PER_NODE")), int) - self.assertTrue(type(MACHINE.get_value("PROJECT_REQUIRED")) in [type(None) , bool]) - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - with Case(casedir, read_only=False) as case: - build_complete = case.get_value("BUILD_COMPLETE") - self.assertFalse(build_complete, - msg="Build complete had wrong value '%s'" % - build_complete) - - case.set_value("BUILD_COMPLETE", True) - build_complete = case.get_value("BUILD_COMPLETE") - self.assertTrue(build_complete, - msg="Build complete had wrong value '%s'" % - build_complete) - - case.flush() - - build_complete = run_cmd_no_fail("./xmlquery BUILD_COMPLETE --value", - from_dir=casedir) - self.assertEqual(build_complete, "TRUE", - msg="Build complete had wrong value '%s'" % - build_complete) - - # Test some test properties - self.assertEqual(case.get_value("TESTCASE"), "TESTRUNPASS") - - def _batch_test_fixture(self, testcase_name): - if not MACHINE.has_batch_system() or NO_BATCH: - self.skipTest("Skipping testing user prerequisites without batch systems") - testdir = os.path.join(TEST_ROOT, testcase_name) - if os.path.exists(testdir): - shutil.rmtree(testdir) - args = "--case {name} --script-root {testdir} --compset X --res f19_g16 --handle-preexisting-dirs=r --output-root {testdir}".format(name=testcase_name, testdir=testdir) - if CIME.utils.get_cime_default_driver() == 'nuopc': - args += " --run-unsupported" - - run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args), - from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "./case.setup", from_dir=testdir) - - return testdir - - ########################################################################### - def test_cime_case_prereq(self): - ########################################################################### - testcase_name = 'prereq_test' - testdir = self._batch_test_fixture(testcase_name) - with Case(testdir, read_only=False) as case: - if case.get_value("depend_string") is None: - self.skipTest("Skipping prereq test, depend_string was not provided for this batch system") - job_name = "case.run" - prereq_name = 'prereq_test' - batch_commands = case.submit_jobs(prereq=prereq_name, job=job_name, skip_pnl=True, dry_run=True) - self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run") - self.assertTrue(len(batch_commands) > 0, "case.submit_jobs did not return any job submission string") - # The first element in the internal sequence should just be the job name - # The second one (batch_cmd_index) should be the actual batch submission command - batch_cmd_index = 1 - # The prerequisite should be applied to all jobs, though we're only expecting one - for batch_cmd in batch_commands: - self.assertTrue(isinstance(batch_cmd, collections.Sequence), "case.submit_jobs did not return a sequence of sequences") - self.assertTrue(len(batch_cmd) > batch_cmd_index, "case.submit_jobs returned internal sequences with length <= {}".format(batch_cmd_index)) - self.assertTrue(isinstance(batch_cmd[1], six.string_types), "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format(batch_cmd[1])) - batch_cmd_args = batch_cmd[1] - - jobid_ident = "jobid" - dep_str_fmt = case.get_env('batch').get_value('depend_string', subgroup=None) - self.assertTrue(jobid_ident in dep_str_fmt, "dependency string doesn't include the jobid identifier {}".format(jobid_ident)) - dep_str = dep_str_fmt[:dep_str_fmt.index(jobid_ident)] - - prereq_substr = None - while dep_str in batch_cmd_args: - dep_id_pos = batch_cmd_args.find(dep_str) + len(dep_str) - batch_cmd_args = batch_cmd_args[dep_id_pos:] - prereq_substr = batch_cmd_args[:len(prereq_name)] - if prereq_substr == prereq_name: - break - - self.assertTrue(prereq_name in prereq_substr, "Dependencies added, but not the user specified one") - - ########################################################################### - def test_cime_case_allow_failed_prereq(self): - ########################################################################### - testcase_name = 'allow_failed_prereq_test' - testdir = self._batch_test_fixture(testcase_name) - with Case(testdir, read_only=False) as case: - depend_allow = case.get_value("depend_allow_string") - if depend_allow is None: - self.skipTest("Skipping allow_failed_prereq test, depend_allow_string was not provided for this batch system") - job_name = "case.run" - prereq_name = "prereq_allow_fail_test" - depend_allow = depend_allow.replace("jobid", prereq_name) - batch_commands = case.submit_jobs(prereq=prereq_name, allow_fail=True, job=job_name, skip_pnl=True, dry_run=True) - self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run") - num_submissions = 1 - if case.get_value("DOUT_S"): - num_submissions = 2 - self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return any job submission strings") - self.assertTrue(depend_allow in batch_commands[0][1]) - - ########################################################################### - def test_cime_case_resubmit_immediate(self): - ########################################################################### - testcase_name = 'resubmit_immediate_test' - testdir = self._batch_test_fixture(testcase_name) - with Case(testdir, read_only=False) as case: - depend_string = case.get_value("depend_string") - if depend_string is None: - self.skipTest("Skipping resubmit_immediate test, depend_string was not provided for this batch system") - depend_string = re.sub('jobid.*$','',depend_string) - job_name = "case.run" - num_submissions = 6 - case.set_value("RESUBMIT", num_submissions - 1) - batch_commands = case.submit_jobs(job=job_name, skip_pnl=True, dry_run=True, resubmit_immediate=True) - self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run") - if case.get_value("DOUT_S"): - num_submissions = 12 - self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return {} submitted jobs".format(num_submissions)) - for i, cmd in enumerate(batch_commands): - if i > 0: - self.assertTrue(depend_string in cmd[1]) - - ########################################################################### - def test_cime_case_st_archive_resubmit(self): - ########################################################################### - testcase_name = "st_archive_resubmit_test" - testdir = self._batch_test_fixture(testcase_name) - with Case(testdir, read_only=False) as case: - case.case_setup(clean=False, test_mode=False, reset=True) - orig_resubmit = 2 - case.set_value("RESUBMIT", orig_resubmit) - case.case_st_archive(resubmit=False) - new_resubmit = case.get_value("RESUBMIT") - self.assertTrue(orig_resubmit == new_resubmit, "st_archive resubmitted when told not to") - case.case_st_archive(resubmit=True) - new_resubmit = case.get_value("RESUBMIT") - self.assertTrue((orig_resubmit - 1) == new_resubmit, "st_archive did not resubmit when told to") - - ########################################################################### - def test_cime_case_build_threaded_1(self): - ########################################################################### - self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - with Case(casedir, read_only=False) as case: - build_threaded = case.get_value("SMP_PRESENT") - self.assertFalse(build_threaded) - - build_threaded = case.get_build_threaded() - self.assertFalse(build_threaded) - - case.set_value("FORCE_BUILD_SMP", True) - - build_threaded = case.get_build_threaded() - self.assertTrue(build_threaded) - - ########################################################################### - def test_cime_case_build_threaded_2(self): - ########################################################################### - self._create_test(["--no-build", "TESTRUNPASS_P1x2.f19_g16_rx1.A"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x2.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - with Case(casedir, read_only=False) as case: - build_threaded = case.get_value("SMP_PRESENT") - self.assertTrue(build_threaded) - - build_threaded = case.get_build_threaded() - self.assertTrue(build_threaded) - - ########################################################################### - def test_cime_case_mpi_serial(self): - ########################################################################### - self._create_test(["--no-build", "TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - with Case(casedir, read_only=True) as case: - - # Serial cases should not be using pnetcdf - self.assertEqual(case.get_value("CPL_PIO_TYPENAME"), "netcdf") - - # Serial cases should be using 1 task - self.assertEqual(case.get_value("TOTALPES"), 1) - - self.assertEqual(case.get_value("NTASKS_CPL"), 1) - - ########################################################################### - def test_cime_case_force_pecount(self): - ########################################################################### - self._create_test(["--no-build", "--force-procs=16", "--force-threads=8", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P16x8.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - with Case(casedir, read_only=True) as case: - self.assertEqual(case.get_value("NTASKS_CPL"), 16) - - self.assertEqual(case.get_value("NTHRDS_CPL"), 8) - - ########################################################################### - def test_cime_case_xmlchange_append(self): - ########################################################################### - self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P1x1.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt1'", from_dir=casedir) - result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir) - self.assertEqual(result, "-opt1") - - run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt2' --append", from_dir=casedir) - result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir) - self.assertEqual(result, "-opt1 -opt2") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_1(self): - ########################################################################### - if CIME.utils.get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS.f19_g16_rx1.A" - machine, compiler = "blues", "gnu" - self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "0:10:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "batch") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_2(self): - ########################################################################### - if CIME.utils.get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS_P64.f19_g16_rx1.A" - machine, compiler = "blues", "gnu" - self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "03:00:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "batch") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_3(self): - ########################################################################### - if CIME.utils.get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS_P64.f19_g16_rx1.A" - machine, compiler = "blues", "gnu" - self._create_test(["--no-setup", "--machine={}".format(machine), "--walltime=0:10:00", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "0:10:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "batch") # Not smart enough to select faster queue - - ########################################################################### - def test_cime_case_test_walltime_mgmt_4(self): - ########################################################################### - if CIME.utils.get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS_P1.f19_g16_rx1.A" - machine, compiler = "blues", "gnu" - self._create_test(["--no-setup", "--machine={}".format(machine), "--walltime=2:00:00", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "2:00:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "batch") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_5(self): - ########################################################################### - if CIME.utils.get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS_P1.f19_g16_rx1.A" - machine, compiler = "blues", "gnu" - self._create_test(["--no-setup", "--machine={}".format(machine), test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --subgroup=case.test", from_dir=casedir, expected_stat=1) - - run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --force --subgroup=case.test", from_dir=casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "03:00:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "slartibartfast") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_6(self): - ########################################################################### - if not self._hasbatch: - self.skipTest("Skipping walltime test. Depends on batch system") - - test_name = "ERS_P1.f19_g16_rx1.A" - self._create_test(["--no-build", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir) - - run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - with Case(casedir) as case: - walltime_format = case.get_value("walltime_format", subgroup=None) - if walltime_format is not None and walltime_format.count(":") == 1: - self.assertEqual(result, "421:32") - else: - self.assertEqual(result, "421:32:11") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_7(self): - ########################################################################### - if not self._hasbatch: - self.skipTest("Skipping walltime test. Depends on batch system") - - test_name = "ERS_P1.f19_g16_rx1.A" - self._create_test(["--no-build", "--walltime=01:00:00", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir) - - run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - with Case(casedir) as case: - walltime_format = case.get_value("walltime_format", subgroup=None) - if walltime_format is not None and walltime_format.count(":") == 1: - self.assertEqual(result, "421:32") - else: - self.assertEqual(result, "421:32:11") - - ########################################################################### - def test_cime_case_test_custom_project(self): - ########################################################################### - test_name = "ERS_P1.f19_g16_rx1.A" - machine, compiler = "melvin", "gnu" # have to use a machine both models know and one that doesn't put PROJECT in any key paths - self._create_test(["--no-setup", "--machine={}".format(machine), "--compiler={}".format(compiler), "--project=testproj", test_name], - test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - result = run_cmd_assert_result(self, "./xmlquery --value PROJECT --subgroup=case.test", from_dir=casedir) - self.assertEqual(result, "testproj") - - ########################################################################### - def test_create_test_longname(self): - ########################################################################### - self._create_test(["SMS.f19_g16.2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV", "--no-build"]) - - ########################################################################### - def test_env_loading(self): - ########################################################################### - if self._machine != "melvin": - self.skipTest("Skipping env load test - Only works on melvin") - - self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - with Case(casedir, read_only=True) as case: - env_mach = case.get_env("mach_specific") - orig_env = dict(os.environ) - - env_mach.load_env(case) - module_env = dict(os.environ) - - os.environ.clear() - os.environ.update(orig_env) - - env_mach.load_env(case, force_method="generic") - generic_env = dict(os.environ) - - os.environ.clear() - os.environ.update(orig_env) - - problems = "" - for mkey, mval in module_env.items(): - if mkey not in generic_env: - if not mkey.startswith("PS") and mkey != "OLDPWD": - problems += "Generic missing key: {}\n".format(mkey) - elif mval != generic_env[mkey] and mkey not in ["_", "SHLVL", "PWD"] and not mkey.endswith("()"): - problems += "Value mismatch for key {}: {} != {}\n".format(mkey, repr(mval), repr(generic_env[mkey])) - - for gkey in generic_env.keys(): - if gkey not in module_env: - problems += "Modules missing key: {}\n".format(gkey) - - self.assertEqual(problems, "", msg=problems) - - ########################################################################### - def test_case_submit_interface(self): - ########################################################################### - try: - import imp - except ImportError: - print("imp not found, skipping case.submit interface test") - return - sys.path.append(TOOLS_DIR) - case_submit_path = os.path.join(TOOLS_DIR, "case.submit") - submit_interface = imp.load_source("case_submit_interface", case_submit_path) - sys.argv = ["case.submit", "--batch-args", "'random_arguments_here.%j'", - "--mail-type", "fail", "--mail-user", "'random_arguments_here.%j'"] - submit_interface._main_func(None, True) - - ########################################################################### - def test_xml_caching(self): - ########################################################################### - self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name)) - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir) - - active = os.path.join(casedir, "env_run.xml") - backup = os.path.join(casedir, "env_run.xml.bak") - - safe_copy(active, backup) - - with Case(casedir, read_only=False) as case: - env_run = EnvRun(casedir, read_only=True) - self.assertEqual(case.get_value("RUN_TYPE"), "startup") - case.set_value("RUN_TYPE", "branch") - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - self.assertEqual(env_run.get_value("RUN_TYPE"), "branch") - - with Case(casedir) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - - time.sleep(0.2) - safe_copy(backup, active) - - with Case(casedir, read_only=False) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "startup") - case.set_value("RUN_TYPE", "branch") - - with Case(casedir, read_only=False) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - time.sleep(0.2) - safe_copy(backup, active) - case.read_xml() # Manual re-sync - self.assertEqual(case.get_value("RUN_TYPE"), "startup") - case.set_value("RUN_TYPE", "branch") - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - - with Case(casedir) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - time.sleep(0.2) - safe_copy(backup, active) - env_run = EnvRun(casedir, read_only=True) - self.assertEqual(env_run.get_value("RUN_TYPE"), "startup") - - with Case(casedir, read_only=False) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "startup") - case.set_value("RUN_TYPE", "branch") - - # behind the back detection - with self.assertRaises(CIMEError): - with Case(casedir, read_only=False) as case: - time.sleep(0.2) - safe_copy(backup, active) - - with Case(casedir, read_only=False) as case: - case.set_value("RUN_TYPE", "branch") - - with self.assertRaises(CIMEError): - with Case(casedir) as case: - time.sleep(0.2) - safe_copy(backup, active) - - ########################################################################### - def test_configure(self): - ########################################################################### - self._create_test(["SMS.f09_g16.X", "--no-build"], test_id=self._baseline_name) - - casedir = os.path.join(self._testroot, - "{}.{}".format(CIME.utils.get_full_test_name("SMS.f09_g16.X", machine=self._machine, compiler=self._compiler), self._baseline_name)) - - manual_config_dir = os.path.join(casedir, "manual_config") - os.mkdir(manual_config_dir) - - run_cmd_no_fail("{} --machine={} --compiler={}".format(os.path.join(get_cime_root(), "tools", "configure"), self._machine, self._compiler), from_dir=manual_config_dir) - - with open(os.path.join(casedir, "env_mach_specific.xml"), "r") as fd: - case_env_contents = fd.read() - - with open(os.path.join(manual_config_dir, "env_mach_specific.xml"), "r") as fd: - man_env_contents = fd.read() - - self.assertEqual(case_env_contents, man_env_contents) - -############################################################################### -class X_TestSingleSubmit(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_single_submit(self): - ########################################################################### - # Skip unless on a batch system and users did not select no-batch - if (not self._hasbatch): - self.skipTest("Skipping single submit. Not valid without batch") - if CIME.utils.get_model() != "e3sm": - self.skipTest("Skipping single submit. E3SM experimental feature") - if self._machine not in ["sandiatoss3"]: - self.skipTest("Skipping single submit. Only works on sandiatoss3") - - # Keep small enough for now that we don't have to worry about load balancing - self._create_test(["--single-submit", "SMS_Ln9_P8.f45_g37_rx1.A", "SMS_Ln9_P8.f19_g16_rx1.A"], - env_changes="unset CIME_GLOBAL_WALLTIME &&") - -############################################################################### -class L_TestSaveTimings(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def simple_test(self, manual_timing=False): - ########################################################################### - timing_flag = "" if manual_timing else "--save-timing" - driver = CIME.utils.get_cime_default_driver() - if driver == "mct": - walltime="00:15:00" - else: - walltime="00:30:00" - self._create_test(["SMS_Ln9_P1.f19_g16_rx1.A", timing_flag, "--walltime="+walltime], test_id=self._baseline_name) - - statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, self._baseline_name)) - self.assertEqual(len(statuses), 1, msg="Should have had exactly one match, found %s" % statuses) - casedir = os.path.dirname(statuses[0]) - - with Case(casedir, read_only=True) as case: - lids = get_lids(case) - timing_dir = case.get_value("SAVE_TIMING_DIR") - casename = case.get_value("CASE") - - self.assertEqual(len(lids), 1, msg="Expected one LID, found %s" % lids) - - if manual_timing: - run_cmd_assert_result(self, "cd %s && %s/save_provenance postrun" % (casedir, TOOLS_DIR)) - - if CIME.utils.get_model() == "e3sm": - provenance_dirs = glob.glob(os.path.join(timing_dir, "performance_archive", getpass.getuser(), casename, lids[0] + "*")) - self.assertEqual(len(provenance_dirs), 1, msg="provenance dirs were missing") - verify_perms(self, timing_dir) - - ########################################################################### - def test_save_timings(self): - ########################################################################### - self.simple_test() - - ########################################################################### - def test_save_timings_manual(self): - ########################################################################### - self.simple_test(manual_timing=True) - -# Machinery for Macros generation tests. - -class MockMachines(object): - - """A mock version of the Machines object to simplify testing.""" - - def __init__(self, name, os_): - """Store the name.""" - self.name = name - self.os = os_ - - def get_machine_name(self): - """Return the name we were given.""" - return self.name - - def get_value(self, var_name): - """Allow the operating system to be queried.""" - assert var_name == "OS", "Build asked for a value not " \ - "implemented in the testing infrastructure." - return self.os - - def is_valid_compiler(self, _): # pylint:disable=no-self-use - """Assume all compilers are valid.""" - return True - - def is_valid_MPIlib(self, _): - """Assume all MPILIB settings are valid.""" - return True - -# pragma pylint: disable=unused-argument - def get_default_MPIlib(self, attributes=None): - return "mpich2" - - def get_default_compiler(self): - return "intel" - - -def get_macros(macro_maker, build_xml, build_system): - """Generate build system ("Macros" file) output from config_compilers XML. - - Arguments: - macro_maker - The underlying Build object. - build_xml - A string containing the XML to operate on. - build_system - Either "Makefile" or "CMake", depending on desired output. - - The return value is a string containing the build system output. - """ - # Build.write_macros expects file-like objects as input, so - # we need to wrap the strings in StringIO objects. - xml = six.StringIO(str(build_xml)) - output = six.StringIO() - output_format = None - if build_system == "Makefile": - output_format = "make" - elif build_system == "CMake": - output_format = "cmake" - else: - output_format = build_system - - macro_maker.write_macros_file(macros_file=output, - output_format=output_format, xml=xml) - return str(output.getvalue()) - - -def _wrap_config_compilers_xml(inner_string): - """Utility function to create a config_compilers XML string. - - Pass this function a string containing elements, and it will add - the necessary header/footer to the file. - """ - _xml_template = """ - -{} - -""" - - return _xml_template.format(inner_string) - - -class MakefileTester(object): - - """Helper class for checking Makefile output. - - Public methods: - __init__ - query_var - assert_variable_equals - assert_variable_matches - """ -# Note that the following is a Makefile and the echo line must begin with a tab - _makefile_template = """ -include Macros -query: -\techo '$({})' > query.out -""" - - def __init__(self, parent, make_string): - """Constructor for Makefile test helper class. - - Arguments: - parent - The TestCase object that is using this item. - make_string - Makefile contents to test. - """ - self.parent = parent - self.make_string = make_string - - def query_var(self, var_name, env, var): - """Request the value of a variable in the Makefile, as a string. - - Arguments: - var_name - Name of the variable to query. - env - A dict containing extra environment variables to set when calling - make. - var - A dict containing extra make variables to set when calling make. - (The distinction between env and var actually matters only for - CMake, though.) - """ - if env is None: - env = dict() - if var is None: - var = dict() - - # Write the Makefile strings to temporary files. - temp_dir = tempfile.mkdtemp() - macros_file_name = os.path.join(temp_dir, "Macros") - makefile_name = os.path.join(temp_dir, "Makefile") - output_name = os.path.join(temp_dir, "query.out") - - with open(macros_file_name, "w") as macros_file: - macros_file.write(self.make_string) - with open(makefile_name, "w") as makefile: - makefile.write(self._makefile_template.format(var_name)) - - environment = os.environ.copy() - environment.update(env) - environment.update(var) - gmake_exe = MACHINE.get_value("GMAKE") - if gmake_exe is None: - gmake_exe = "gmake" - run_cmd_assert_result(self.parent, "%s query --directory=%s 2>&1" % (gmake_exe, temp_dir), env=environment) - - with open(output_name, "r") as output: - query_result = output.read().strip() - - # Clean up the Makefiles. - shutil.rmtree(temp_dir) - - return query_result - - def assert_variable_equals(self, var_name, value, env=None, var=None): - """Assert that a variable in the Makefile has a given value. - - Arguments: - var_name - Name of variable to check. - value - The string that the variable value should be equal to. - env - Optional. Dict of environment variables to set when calling make. - var - Optional. Dict of make variables to set when calling make. - """ - self.parent.assertEqual(self.query_var(var_name, env, var), value) - - def assert_variable_matches(self, var_name, regex, env=None, var=None): - """Assert that a variable in the Makefile matches a regex. - - Arguments: - var_name - Name of variable to check. - regex - The regex to match. - env - Optional. Dict of environment variables to set when calling make. - var - Optional. Dict of make variables to set when calling make. - """ - self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex) - - -class CMakeTester(object): - - """Helper class for checking CMake output. - - Public methods: - __init__ - query_var - assert_variable_equals - assert_variable_matches - """ - - _cmakelists_template = """ -include(./Macros.cmake) -file(WRITE query.out "${{{}}}") -""" - - def __init__(self, parent, cmake_string): - """Constructor for CMake test helper class. - - Arguments: - parent - The TestCase object that is using this item. - cmake_string - CMake contents to test. - """ - self.parent = parent - self.cmake_string = cmake_string - - def query_var(self, var_name, env, var): - """Request the value of a variable in Macros.cmake, as a string. - - Arguments: - var_name - Name of the variable to query. - env - A dict containing extra environment variables to set when calling - cmake. - var - A dict containing extra CMake variables to set when calling cmake. - """ - if env is None: - env = dict() - if var is None: - var = dict() - - # Write the CMake strings to temporary files. - temp_dir = tempfile.mkdtemp() - macros_file_name = os.path.join(temp_dir, "Macros.cmake") - cmakelists_name = os.path.join(temp_dir, "CMakeLists.txt") - output_name = os.path.join(temp_dir, "query.out") - - with open(macros_file_name, "w") as macros_file: - for key in var: - macros_file.write("set({} {})\n".format(key, var[key])) - macros_file.write(self.cmake_string) - with open(cmakelists_name, "w") as cmakelists: - cmakelists.write(self._cmakelists_template.format(var_name)) - - environment = os.environ.copy() - environment.update(env) - os_ = MACHINE.get_value("OS") - # cmake will not work on cray systems without this flag - if os_ == "CNL": - cmake_args = "-DCMAKE_SYSTEM_NAME=Catamount" - else: - cmake_args = "" - - run_cmd_assert_result(self.parent, "cmake %s . 2>&1" % cmake_args, from_dir=temp_dir, env=environment) - - with open(output_name, "r") as output: - query_result = output.read().strip() - - # Clean up the CMake files. - shutil.rmtree(temp_dir) - - return query_result - - def assert_variable_equals(self, var_name, value, env=None, var=None): - """Assert that a variable in the CMakeLists has a given value. - - Arguments: - var_name - Name of variable to check. - value - The string that the variable value should be equal to. - env - Optional. Dict of environment variables to set when calling cmake. - var - Optional. Dict of CMake variables to set when calling cmake. - """ - self.parent.assertEqual(self.query_var(var_name, env, var), value) - - def assert_variable_matches(self, var_name, regex, env=None, var=None): - """Assert that a variable in the CMkeLists matches a regex. - - Arguments: - var_name - Name of variable to check. - regex - The regex to match. - env - Optional. Dict of environment variables to set when calling cmake. - var - Optional. Dict of CMake variables to set when calling cmake. - """ - self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex) - - -############################################################################### -class G_TestMacrosBasic(unittest.TestCase): -############################################################################### - - """Basic infrastructure tests. - - This class contains tests that do not actually depend on the output of the - macro file conversion. This includes basic smoke testing and tests of - error-handling in the routine. - """ - - def test_script_is_callable(self): - """The test script can be called on valid output without dying.""" - # This is really more a smoke test of this script than anything else. - maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) - test_xml = _wrap_config_compilers_xml("FALSE") - get_macros(maker, test_xml, "Makefile") - - def test_script_rejects_bad_xml(self): - """The macro writer rejects input that's not valid XML.""" - maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) - with self.assertRaises(ParseError): - get_macros(maker, "This is not valid XML.", "Makefile") - - def test_script_rejects_bad_build_system(self): - """The macro writer rejects a bad build system string.""" - maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) - bad_string = "argle-bargle." - with assertRaisesRegex(self, - CIMEError, - "Unrecognized build system provided to write_macros: " + bad_string): - get_macros(maker, "This string is irrelevant.", bad_string) - - -############################################################################### -class H_TestMakeMacros(unittest.TestCase): -############################################################################### - - """Makefile macros tests. - - This class contains tests of the Makefile output of Build. - - Aside from the usual setUp and test methods, this class has a utility method - (xml_to_tester) that converts XML input directly to a MakefileTester object. - """ - def setUp(self): - self.test_os = "SomeOS" - self.test_machine = "mymachine" - self.test_compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER - self.test_mpilib = MACHINE.get_default_MPIlib(attributes={"compiler":self.test_compiler}) if TEST_MPILIB is None else TEST_MPILIB - - self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version=2.0) - - def xml_to_tester(self, xml_string): - """Helper that directly converts an XML string to a MakefileTester.""" - test_xml = _wrap_config_compilers_xml(xml_string) - return MakefileTester(self, get_macros(self._maker, test_xml, "Makefile")) - - def test_generic_item(self): - """The macro writer can write out a single generic item.""" - xml_string = "FALSE" - tester = self.xml_to_tester(xml_string) - tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") - - def test_machine_specific_item(self): - """The macro writer can pick out a machine-specific item.""" - xml1 = """TRUE""".format(self.test_machine) - xml2 = """FALSE""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - # Do this a second time, but with elements in the reverse order, to - # ensure that the code is not "cheating" by taking the first match. - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - - def test_ignore_non_match(self): - """The macro writer ignores an entry with the wrong machine name.""" - xml1 = """TRUE""" - xml2 = """FALSE""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") - # Again, double-check that we don't just get lucky with the order. - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") - - def test_os_specific_item(self): - """The macro writer can pick out an OS-specific item.""" - xml1 = """TRUE""".format(self.test_os) - xml2 = """FALSE""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - - def test_mach_other_compiler(self): - """The macro writer compiler-specific logic works as expected.""" - xml1 = """a b c""".format(self.test_compiler) - xml2 = """x y z""".format(self.test_machine) - xml3 = """x y z""".format(self.test_machine,self.test_compiler) - xml4 = """x y z""".format(self.test_machine,self.test_compiler) - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml1+xml3) - tester.assert_variable_equals("CFLAGS", "a b c x y z",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml1+xml4) - tester.assert_variable_equals("CFLAGS", "x y z",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml4+xml1) - tester.assert_variable_equals("CFLAGS", "x y z",var={"COMPILER":self.test_compiler}) - - def test_mach_beats_os(self): - """The macro writer chooses machine-specific over os-specific matches.""" - xml1 = """FALSE""".format(self.test_os) - xml2 = """TRUE""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - - def test_mach_and_os_beats_mach(self): - """The macro writer chooses the most-specific match possible.""" - xml1 = """FALSE""".format(self.test_machine) - xml2 = """TRUE""" - xml2 = xml2.format(self.test_machine, self.test_os) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - - def test_build_time_attribute(self): - """The macro writer writes conditionals for build-time choices.""" - xml1 = """/path/to/mpich""" - xml2 = """/path/to/openmpi""" - xml3 = """/path/to/default""" - tester = self.xml_to_tester(xml1+xml2+xml3) - tester.assert_variable_equals("MPI_PATH", "/path/to/default") - tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"}) - tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"}) - tester = self.xml_to_tester(xml3+xml2+xml1) - tester.assert_variable_equals("MPI_PATH", "/path/to/default") - tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"}) - tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"}) - - def test_reject_duplicate_defaults(self): - """The macro writer dies if given many defaults.""" - xml1 = """/path/to/default""" - xml2 = """/path/to/other_default""" - with assertRaisesRegex(self, - CIMEError, - "Variable MPI_PATH is set ambiguously in config_compilers.xml."): - self.xml_to_tester(xml1+xml2) - - def test_reject_duplicates(self): - """The macro writer dies if given many matches for a given configuration.""" - xml1 = """/path/to/mpich""" - xml2 = """/path/to/mpich2""" - with assertRaisesRegex(self, - CIMEError, - "Variable MPI_PATH is set ambiguously in config_compilers.xml."): - self.xml_to_tester(xml1+xml2) - - def test_reject_ambiguous(self): - """The macro writer dies if given an ambiguous set of matches.""" - xml1 = """/path/to/mpich""" - xml2 = """/path/to/mpi-debug""" - with assertRaisesRegex(self, - CIMEError, - "Variable MPI_PATH is set ambiguously in config_compilers.xml."): - self.xml_to_tester(xml1+xml2) - - def test_compiler_changeable_at_build_time(self): - """The macro writer writes information for multiple compilers.""" - xml1 = """FALSE""" - xml2 = """TRUE""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE", var={"COMPILER": "gnu"}) - tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") - - def test_base_flags(self): - """Test that we get "base" compiler flags.""" - xml1 = """-O2""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-O2") - - def test_machine_specific_base_flags(self): - """Test selection among base compiler flag sets based on machine.""" - xml1 = """-O2""" - xml2 = """-O3""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-O3") - - def test_build_time_base_flags(self): - """Test selection of base flags based on build-time attributes.""" - xml1 = """-O2""" - xml2 = """-O3""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-O2") - tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) - - def test_build_time_base_flags_same_parent(self): - """Test selection of base flags in the same parent element.""" - xml1 = """-O2""" - xml2 = """-O3""" - tester = self.xml_to_tester(""+xml1+xml2+"") - tester.assert_variable_equals("FFLAGS", "-O2") - tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) - # Check for order independence here, too. - tester = self.xml_to_tester(""+xml2+xml1+"") - tester.assert_variable_equals("FFLAGS", "-O2") - tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) - - def test_append_flags(self): - """Test appending flags to a list.""" - xml1 = """-delicious""" - xml2 = """-cake""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-delicious -cake") - # Order independence, as usual. - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("FFLAGS", "-delicious -cake") - - def test_machine_specific_append_flags(self): - """Test appending flags that are either more or less machine-specific.""" - xml1 = """-delicious""" - xml2 = """-cake""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_matches("FFLAGS", "^(-delicious -cake|-cake -delicious)$") - - def test_machine_specific_base_keeps_append_flags(self): - """Test that machine-specific base flags don't override default append flags.""" - xml1 = """-delicious""" - xml2 = """-cake""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-cake -delicious") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("FFLAGS", "-cake -delicious") - - def test_machine_specific_base_and_append_flags(self): - """Test that machine-specific base flags coexist with machine-specific append flags.""" - xml1 = """-delicious""".format(self.test_machine) - xml2 = """-cake""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-cake -delicious") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("FFLAGS", "-cake -delicious") - - def test_append_flags_without_base(self): - """Test appending flags to a value set before Macros is included.""" - xml1 = """-cake""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-delicious -cake", var={"FFLAGS": "-delicious"}) - - def test_build_time_append_flags(self): - """Test build_time selection of compiler flags.""" - xml1 = """-cake""" - xml2 = """-and-pie""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-cake") - tester.assert_variable_matches("FFLAGS", "^(-cake -and-pie|-and-pie -cake)$", var={"DEBUG": "TRUE"}) - - def test_environment_variable_insertion(self): - """Test that ENV{..} inserts environment variables.""" - # DO it again with $ENV{} style - xml1 = """-L$ENV{NETCDF} -lnetcdf""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("LDFLAGS", "-L/path/to/netcdf -lnetcdf", - env={"NETCDF": "/path/to/netcdf"}) - - def test_shell_command_insertion(self): - """Test that $SHELL insert shell command output.""" - xml1 = """-O$SHELL{echo 2} -fast""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-O2 -fast") - - def test_multiple_shell_commands(self): - """Test that more than one $SHELL element can be used.""" - xml1 = """-O$SHELL{echo 2} -$SHELL{echo fast}""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-O2 -fast") - - def test_env_and_shell_command(self): - """Test that $ENV works inside $SHELL elements.""" - xml1 = """-O$SHELL{echo $ENV{OPT_LEVEL}} -fast""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"}) - - def test_config_variable_insertion(self): - """Test that $VAR insert variables from config_compilers.""" - # Construct an absurd chain of references just to sure that we don't - # pass by accident, e.g. outputting things in the right order just due - # to good luck in a hash somewhere. - xml1 = """stuff-${MPI_PATH}-stuff""" - xml2 = """${MPICC}""" - xml3 = """${MPICXX}""" - xml4 = """${MPIFC}""" - xml5 = """mpicc""" - tester = self.xml_to_tester(""+xml1+xml2+xml3+xml4+xml5+"") - tester.assert_variable_equals("MPI_LIB_NAME", "stuff-mpicc-stuff") - - def test_config_reject_self_references(self): - """Test that $VAR self-references are rejected.""" - # This is a special case of the next test, which also checks circular - # references. - xml1 = """${MPI_LIB_NAME}""" - err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." - with assertRaisesRegex(self,CIMEError, err_msg): - self.xml_to_tester(""+xml1+"") - - def test_config_reject_cyclical_references(self): - """Test that cyclical $VAR references are rejected.""" - xml1 = """${MPI_PATH}""" - xml2 = """${MPI_LIB_NAME}""" - err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." - with assertRaisesRegex(self,CIMEError, err_msg): - self.xml_to_tester(""+xml1+xml2+"") - - def test_variable_insertion_with_machine_specific_setting(self): - """Test that machine-specific $VAR dependencies are correct.""" - xml1 = """something""" - xml2 = """$MPI_PATH""".format(self.test_machine) - xml3 = """${MPI_LIB_NAME}""" - err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." - with assertRaisesRegex(self,CIMEError, err_msg): - self.xml_to_tester(xml1+xml2+xml3) - - def test_override_with_machine_and_new_attributes(self): - """Test that overrides with machine-specific settings with added attributes work correctly.""" - xml1 = """ - - icc - mpicxx - mpif90 - mpicc -""".format(self.test_compiler) - xml2 = """ - - mpifoo - mpiffoo - mpifouc - -""".format(self.test_compiler, self.test_machine, self.test_mpilib) - - tester = self.xml_to_tester(xml1+xml2) - - tester.assert_variable_equals("SCC", "icc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPICXX", "mpifoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPIFC", "mpiffoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPICC", "mpicc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - - tester = self.xml_to_tester(xml2+xml1) - - tester.assert_variable_equals("SCC", "icc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPICXX", "mpifoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPIFC", "mpiffoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPICC", "mpicc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - - def test_override_with_machine_and_same_attributes(self): - """Test that machine-specific conditional overrides with the same attribute work correctly.""" - xml1 = """ - - mpifc -""".format(self.test_compiler, self.test_mpilib) - xml2 = """ - - mpif90 - -""".format(self.test_machine, self.test_compiler, self.test_mpilib) - - tester = self.xml_to_tester(xml1+xml2) - - tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - - tester = self.xml_to_tester(xml2+xml1) - - tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - - def test_appends_not_overriden(self): - """Test that machine-specific base value changes don't interfere with appends.""" - xml1=""" - - - -base1 - -debug1 - -""".format(self.test_compiler) - - xml2=""" - - - -base2 - -debug2 - -""".format(self.test_machine, self.test_compiler) - - tester = self.xml_to_tester(xml1+xml2) - - tester.assert_variable_equals("FFLAGS", "-base2", var={"COMPILER": self.test_compiler}) - tester.assert_variable_equals("FFLAGS", "-base2 -debug2", var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}) - tester.assert_variable_equals("FFLAGS", "-base2 -debug1", var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}) - - tester = self.xml_to_tester(xml2+xml1) - - tester.assert_variable_equals("FFLAGS", "-base2", var={"COMPILER": self.test_compiler}) - tester.assert_variable_equals("FFLAGS", "-base2 -debug2", var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}) - tester.assert_variable_equals("FFLAGS", "-base2 -debug1", var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}) - - def test_multilevel_specificity(self): - """Check that settings with multiple levels of machine-specificity can be resolved.""" - xml1=""" - - mpifc -""" - - xml2=""" - - mpif03 -""".format(self.test_os, self.test_mpilib) - - xml3=""" - - mpif90 -""".format(self.test_machine) - - # To verify order-independence, test every possible ordering of blocks. - testers = [] - testers.append(self.xml_to_tester(xml1+xml2+xml3)) - testers.append(self.xml_to_tester(xml1+xml3+xml2)) - testers.append(self.xml_to_tester(xml2+xml1+xml3)) - testers.append(self.xml_to_tester(xml2+xml3+xml1)) - testers.append(self.xml_to_tester(xml3+xml1+xml2)) - testers.append(self.xml_to_tester(xml3+xml2+xml1)) - - for tester in testers: - tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "TRUE"}) - tester.assert_variable_equals("MPIFC", "mpif03", var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "FALSE"}) - tester.assert_variable_equals("MPIFC", "mpifc", var={"COMPILER": self.test_compiler, "MPILIB": "NON_MATCHING_MPI", "DEBUG": "FALSE"}) - - def test_remove_dependency_issues(self): - """Check that overridden settings don't cause inter-variable dependencies.""" - xml1=""" - - ${SFC} -""" - - xml2=""" -""".format(self.test_machine) + """ - ${MPIFC} - mpif90 -""" - - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SFC", "mpif90") - tester.assert_variable_equals("MPIFC", "mpif90") - - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SFC", "mpif90") - tester.assert_variable_equals("MPIFC", "mpif90") - - -############################################################################### -class I_TestCMakeMacros(H_TestMakeMacros): -############################################################################### - - """CMake macros tests. - - This class contains tests of the CMake output of Build. - - This class simply inherits all of the methods of TestMakeOutput, but changes - the definition of xml_to_tester to create a CMakeTester instead. - """ - - def xml_to_tester(self, xml_string): - """Helper that directly converts an XML string to a MakefileTester.""" - test_xml = _wrap_config_compilers_xml(xml_string) - if (NO_CMAKE): - self.skipTest("Skipping cmake test") - else: - return CMakeTester(self, get_macros(self._maker, test_xml, "CMake")) - -############################################################################### -class S_TestManageAndQuery(unittest.TestCase): - """Tests various scripts to manage and query xml files""" - - def _run_and_assert_query_testlist(self, extra_args=""): - """Ensure that query_testlist runs successfully with the given extra arguments""" - files = Files() - testlist_drv = files.get_value("TESTS_SPEC_FILE", {"component":"drv"}) - - run_cmd_assert_result(self, "{}/query_testlists --xml-testlist {} {}".format( - SCRIPT_DIR, testlist_drv, extra_args)) - - def test_query_testlists_runs(self): - """Make sure that query_testlists runs successfully - - This simply makes sure that query_testlists doesn't generate any errors - when it runs. This helps ensure that changes in other utilities don't - break query_testlists. - """ - self._run_and_assert_query_testlist(extra_args="--show-options") - - def test_query_testlists_define_testtypes_runs(self): - """Make sure that query_testlists runs successfully with the --define-testtypes argument""" - self._run_and_assert_query_testlist(extra_args="--define-testtypes") - - def test_query_testlists_count_runs(self): - """Make sure that query_testlists runs successfully with the --count argument""" - self._run_and_assert_query_testlist(extra_args="--count") - - def test_query_testlists_list_runs(self): - """Make sure that query_testlists runs successfully with the --list argument""" - self._run_and_assert_query_testlist(extra_args="--list categories") - -############################################################################### -class B_CheckCode(unittest.TestCase): -############################################################################### - # Tests are generated in the main loop below - longMessage = True - - all_results = None - -def make_pylint_test(pyfile, all_files): - def test(self): - if B_CheckCode.all_results is None: - B_CheckCode.all_results = check_code(all_files) - #pylint: disable=unsubscriptable-object - result = B_CheckCode.all_results[pyfile] - self.assertTrue(result == "", msg=result) - - return test - -def check_for_pylint(): - #pylint: disable=import-error - from distutils.spawn import find_executable - pylint = find_executable("pylint") - if pylint is not None: - output = run_cmd_no_fail("pylint --version") - pylintver = re.search(r"pylint\s+(\d+)[.](\d+)[.](\d+)", output) - major = int(pylintver.group(1)) - minor = int(pylintver.group(2)) - if pylint is None or major < 1 or (major == 1 and minor < 5): - print("pylint version 1.5 or newer not found, pylint tests skipped") - return False - return True - -def write_provenance_info(): - curr_commit = get_current_commit(repo=LIB_DIR) - logging.info("\nTesting commit %s" % curr_commit) - cime_model = CIME.utils.get_model() - logging.info("Using cime_model = %s" % cime_model) - logging.info("Testing machine = %s" % MACHINE.get_machine_name()) - if TEST_COMPILER is not None: - logging.info("Testing compiler = %s"% TEST_COMPILER) - if TEST_MPILIB is not None: - logging.info("Testing mpilib = %s"% TEST_MPILIB) - logging.info("Test root: %s" % TEST_ROOT) - logging.info("Test driver: %s\n" % CIME.utils.get_cime_default_driver()) - -def _main_func(description): - global MACHINE - global NO_CMAKE - global FAST_ONLY - global NO_BATCH - global TEST_COMPILER - global TEST_MPILIB - global TEST_ROOT - global GLOBAL_TIMEOUT - global NO_TEARDOWN - config = CIME.utils.get_cime_config() - - help_str = \ -""" -{0} [TEST] [TEST] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run the full suite \033[0m - > {0} - - \033[1;32m# Run all code checker tests \033[0m - > {0} B_CheckCode - - \033[1;32m# Run test test_wait_for_test_all_pass from class M_TestWaitForTests \033[0m - > {0} M_TestWaitForTests.test_wait_for_test_all_pass -""".format(os.path.basename(sys.argv[0])) - - parser = argparse.ArgumentParser(usage=help_str, - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument("--fast", action="store_true", - help="Skip full system tests, which saves a lot of time") - - parser.add_argument("--no-batch", action="store_true", - help="Do not submit jobs to batch system, run locally." - " If false, will default to machine setting.") - - parser.add_argument("--no-cmake", action="store_true", - help="Do not run cmake tests") - - parser.add_argument("--no-teardown", action="store_true", - help="Do not delete directories left behind by testing") - - parser.add_argument("--machine", - help="Select a specific machine setting for cime") - - parser.add_argument("--compiler", - help="Select a specific compiler setting for cime") - - parser.add_argument( "--mpilib", - help="Select a specific compiler setting for cime") - - parser.add_argument( "--test-root", - help="Select a specific test root for all cases created by the testing") - - parser.add_argument("--timeout", type=int, - help="Select a specific timeout for all tests") - - ns, args = parser.parse_known_args() - - # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone) - sys.argv[1:] = args - - FAST_ONLY = ns.fast - NO_BATCH = ns.no_batch - NO_CMAKE = ns.no_cmake - GLOBAL_TIMEOUT = ns.timeout - NO_TEARDOWN = ns.no_teardown - - if ns.machine is not None: - MACHINE = Machines(machine=ns.machine) - os.environ["CIME_MACHINE"] = ns.machine - elif "CIME_MACHINE" in os.environ: - mach_name = os.environ["CIME_MACHINE"] - MACHINE = Machines(machine=mach_name) - elif config.has_option("create_test", "MACHINE"): - MACHINE = Machines(machine=config.get("create_test", "MACHINE")) - elif config.has_option("main", "MACHINE"): - MACHINE = Machines(machine=config.get("main", "MACHINE")) - else: - MACHINE = Machines() - - if ns.compiler is not None: - TEST_COMPILER = ns.compiler - elif config.has_option("create_test", "COMPILER"): - TEST_COMPILER = config.get("create_test", "COMPILER") - elif config.has_option("main", "COMPILER"): - TEST_COMPILER = config.get("main", "COMPILER") - - if ns.mpilib is not None: - TEST_MPILIB = ns.mpilib - elif config.has_option("create_test", "MPILIB"): - TEST_MPILIB = config.get("create_test", "MPILIB") - elif config.has_option("main", "MPILIB"): - TEST_MPILIB = config.get("main", "MPILIB") - - if ns.test_root is not None: - TEST_ROOT = ns.test_root - elif config.has_option("create_test", "TEST_ROOT"): - TEST_ROOT = config.get("create_test", "TEST_ROOT") - else: - TEST_ROOT = os.path.join(MACHINE.get_value("CIME_OUTPUT_ROOT"), - "scripts_regression_test.%s"% CIME.utils.get_timestamp()) - - args = lambda: None # just something to set attrs on - for log_param in ["debug", "silent", "verbose"]: - flag = "--%s" % log_param - if flag in sys.argv: - sys.argv.remove(flag) - setattr(args, log_param, True) - else: - setattr(args, log_param, False) - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, None) - - write_provenance_info() - - # Find all python files in repo and create a pylint test for each - if check_for_pylint(): - files_to_test = get_all_checkable_files() - - for file_to_test in files_to_test: - pylint_test = make_pylint_test(file_to_test, files_to_test) - testname = "test_pylint_%s" % file_to_test.replace("/", "_").replace(".", "_") - expect(not hasattr(B_CheckCode, testname), "Repeat %s" % testname) - setattr(B_CheckCode, testname, pylint_test) - - try: - unittest.main(verbosity=2, catchbreak=True) - except CIMEError as e: - if e.__str__() != "False": - print("Detected failures, leaving directory:", TEST_ROOT) - else: - print("All pass, removing directory:", TEST_ROOT) - if os.path.exists(TEST_ROOT) and not NO_TEARDOWN: - shutil.rmtree(TEST_ROOT) - - raise - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/tests/user_mods_test3/shell_commands b/scripts/tests/user_mods_test3/shell_commands deleted file mode 100755 index 8b25d8d32bc..00000000000 --- a/scripts/tests/user_mods_test3/shell_commands +++ /dev/null @@ -1,7 +0,0 @@ -pioversion=`./xmlquery --value PIO_VERSION` -if [[ "$pioversion" -eq "1" ]] -then - ./xmlchange PIO_VERSION=2 -else - ./xmlchange PIO_VERSION=1 -fi \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000000..1c4058ebd85 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,25 @@ +[metadata] +name = CIME +version = 0.0.1 + +[options] +packages = find: +include_package_data = True + +[options.entry_points] +console_scripts = + create_clone = CIME.scripts.create_clone:_main_func + create_newcase = CIME.scripts.create_newcase:_main_func + create_test = CIME.scripts.create_test:_main_func + query_config = CIME.scripts.query_config:_main_func + query_testlists = CIME.scripts.query_testlists:_main_func + +[tool:pytest] +junit_family=xunit2 +python_files = test_*.py +testpaths = + CIME/tests + +[coverage:report] +omit = + CIME/tests/* diff --git a/setup.py b/setup.py new file mode 100644 index 00000000000..ed77ad02286 --- /dev/null +++ b/setup.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +from setuptools import find_packages, setup + +with open("README.md") as f: + readme = f.read() + +setup( + author="CIME developers", + python_requires=">=3.5", + description=" Common Infrastructure for Modeling the Earth", + long_description=readme, + include_package_data=True, + name="CIME", + packages=find_packages(), + test_suite="CIME.tests", + tests_requires=["pytest"], + url="https://github.com/ESMCI/cime", + version="0.1.0" +) diff --git a/src/CMake/FindpFUnit.cmake b/src/CMake/FindpFUnit.cmake deleted file mode 100644 index a46c0f73eee..00000000000 --- a/src/CMake/FindpFUnit.cmake +++ /dev/null @@ -1,52 +0,0 @@ -# Find module for pFUnit -# -# For this module to work, either the pFUnit parser must be discoverable -# (e.g. in the user's PATH), or else the environment variable "PFUNIT" must -# be defined, and point to the root directory for the PFUNIT installation. -# -# This module sets some typical variables: -# PFUNIT_FOUND -# PFUNIT_LIBRARY(/LIBRARIES) -# PFUNIT_INCLUDE_DIR(/DIRS) -# -# The module also sets: -# PFUNIT_DRIVER - Path to the pFUnit driver source. -# PFUNIT_MODULE_DIR - Directory containing pFUnit's module files. -# PFUNIT_PARSER - Path to pFUnitParser.py (the preprocessor). - -#========================================================================== -# Copyright (c) 2013-2014, University Corporation for Atmospheric Research -# -# This software is distributed under a two-clause BSD license, with no -# warranties, express or implied. See the accompanying LICENSE file for -# details. -#========================================================================== - -include(FindPackageHandleStandardArgs) - -find_program(PFUNIT_PARSER pFUnitParser.py - HINTS ${PFUNIT_PATH}/bin $ENV{PFUNIT}/bin) - -string(REGEX REPLACE "bin/pFUnitParser\\.py\$" "" - pfunit_directory ${PFUNIT_PARSER}) - -find_library(PFUNIT_LIBRARY pfunit - HINTS ${PFUNIT_PATH}/lib ${pfunit_directory}/lib) - -find_path(PFUNIT_INCLUDE_DIR driver.F90 - HINTS ${pfunit_directory}/include) - -set(PFUNIT_DRIVER ${PFUNIT_INCLUDE_DIR}/driver.F90) - -find_path(PFUNIT_MODULE_DIR NAMES pfunit.mod PFUNIT.MOD - HINTS ${pfunit_directory}/include ${pfunit_directory}/mod) - -set(PFUNIT_LIBRARIES ${PFUNIT_LIBRARY}) -set(PFUNIT_INCLUDE_DIRS ${PFUNIT_INCLUDE_DIR} ${PFUNIT_MODULE_DIR}) - -# Handle QUIETLY and REQUIRED. -find_package_handle_standard_args(pFUnit DEFAULT_MSG - PFUNIT_LIBRARY PFUNIT_INCLUDE_DIR PFUNIT_MODULE_DIR PFUNIT_PARSER) - -mark_as_advanced(PFUNIT_INCLUDE_DIR PFUNIT_LIBRARY PFUNIT_MODULE_DIR - PFUNIT_PARSER) diff --git a/src/CMake/README.md b/src/CMake/README.md deleted file mode 100644 index ae583206b0e..00000000000 --- a/src/CMake/README.md +++ /dev/null @@ -1,39 +0,0 @@ -CMake_Fortran_utils -=================== - -CMake modules dealing with Fortran-specific issues and Fortran libraries - -Currently, these modules should work with CMake version 2.8.8 and later -versions. Earlier CMake versions may work but are untested. - -Below is a brief listing of modules. More detailed information on the -purpose and use of these modules can be found in comments at the top of -each file. - -Find modules for specific libraries: - -FindNETCDF - -FindpFUnit - -FindPnetcdf - -Utility modules: - -genf90_utils - Generate Fortran code from genf90.pl templates. - -pFUnit_utils - Create executables using the pFUnit parser and driver. - -Sourcelist_utils - Use source file lists defined over multiple directories. - -Modules that are CESM-specific and/or incomplete: - -CIME\_initial\_setup - Handles setup that must be done before the 'project' -line. This must be included before the 'project' line in the main CMakeLists.txt -file. - -CIME_utils - Handles a few options, and includes several other modules. This -must be included after the 'project' line in the main CMakeLists.txt file, and -after the inclusion of CIME\_initial\_setup. - -Compilers - Specify compiler-specific behavior, add build types for CESM. diff --git a/src/CMake/pFUnit_utils.cmake b/src/CMake/pFUnit_utils.cmake deleted file mode 100644 index 2accabe20e1..00000000000 --- a/src/CMake/pFUnit_utils.cmake +++ /dev/null @@ -1,227 +0,0 @@ -# Utilities for using pFUnit's preprocessor and provided driver file. -# -# This module relies upon the variables defined by the FindpFUnit module. -# -#========================================================================== -# -# add_pFUnit_executable -# -# Arguments: -# name - Name of the executable to add. -# pf_file_list - List of .pf files to process. -# output_directory - Directory where generated sources will be placed. -# fortran_source_list - List of Fortran files to include. -# -# Preprocesses the input .pf files to create test suites, then creates an -# executable that drives those suites with the pFUnit driver. -# -# Limitations: -# add_pFUnit_executable cannot currently handle cases where the user -# choses to do certain things "manually", such as: -# -# - Test suites written in normal Fortran (not .pf) files. -# - User-specified testSuites.inc -# - User-specified driver file in fortran_source_list. -# -#========================================================================== -# -# define_pFUnit_failure -# -# Arguments: -# test_name - Name of a CTest test. -# -# Defines FAIL_REGULAR_EXPRESSION and PASS_REGULAR_EXPRESSION for the given -# test, so that pFUnit's overall pass/fail status can be detected. -# -#========================================================================== -# -# create_pFUnit_test -# -# Required arguments: -# test_name - Name of a CTest test. -# executable_name - Name of the executable associated with this test. -# pf_file_list - List of .pf files to process. -# fortran_source_list - List of Fortran files to include. -# -# Optional arguments, specified via keyword: -# GEN_OUTPUT_DIRECTORY - directory for generated source files, relative to CMAKE_CURRENT_BINARY_DIR -# - Defaults to CMAKE_CURRENT_BINARY_DIR -# - Needs to be given if you have multiple separate pFUnit tests defined in the same directory -# COMMAND - Command to run the pFUnit test -# - Defaults to ./executable_name -# - Needs to be given if you need more on the command line than just the executable -# name, such as setting the number of threads -# - A multi-part command should NOT be enclosed in quotes (see example below) -# - COMMAND should NOT contain the mpirun command: this is specified -# separately, via the PFUNIT_MPIRUN CMake variable -# - The name of the executable should be prefixed with ./ for this to work -# when dot is not in your path (e.g., ./foo_exe rather than simply foo_exe) -# -# Non-standard CMake variables used: -# PFUNIT_MPIRUN - If executables need to be prefixed with an mpirun command, -# PFUNIT_MPIRUN gives this prefix (e.g., "mpirun") -# -# Does everything needed to create a pFUnit-based test, wrapping -# add_pFUnit_executable, add_test, and define_pFUnit_failure. -# -# Example, using defaults for the optional arguments: -# create_pFUnit_test(mytest mytest_exe "${pfunit_sources}" "${test_sources}") -# -# Example, specifying values for the optional arguments: -# create_pFUnit_test(mytest mytest_exe "${pfunit_sources}" "${test_sources}" -# GEN_OUTPUT_DIRECTORY mytest_dir -# COMMAND env OMP_NUM_THREADS=3 ./mytest_exe) -# -#========================================================================== - -#========================================================================== -# Copyright (c) 2013-2014, University Corporation for Atmospheric Research -# -# This software is distributed under a two-clause BSD license, with no -# warranties, express or implied. See the accompanying LICENSE file for -# details. -#========================================================================== - -include(CMakeParseArguments) - -# Notify CMake that a given Fortran file can be produced by preprocessing a -# pFUnit file. -function(preprocess_pf_suite pf_file fortran_file) - - add_custom_command(OUTPUT ${fortran_file} - COMMAND python ${PFUNIT_PARSER} ${pf_file} ${fortran_file} - MAIN_DEPENDENCY ${pf_file}) - -endfunction(preprocess_pf_suite) - -# This function manages most of the work involved in preprocessing pFUnit -# files. You provide every *.pf file for a given executable, an output -# directory where generated sources should be output, and a list name. It -# will generate the sources, and append them and the pFUnit driver to the -# named list. -function(process_pFUnit_source_list pf_file_list output_directory - fortran_list_name) - - foreach(pf_file IN LISTS pf_file_list) - - # If a file is a relative path, expand it (relative to current source - # directory. - get_filename_component(pf_file "${pf_file}" ABSOLUTE) - - # Get extensionless base name from input. - get_filename_component(pf_file_stripped "${pf_file}" NAME_WE) - - # Add the generated Fortran files to the source list. - set(fortran_file ${output_directory}/${pf_file_stripped}.F90) - preprocess_pf_suite(${pf_file} ${fortran_file}) - list(APPEND ${fortran_list_name} ${fortran_file}) - - # Add the file to testSuites.inc - set(testSuites_contents - "${testSuites_contents}ADD_TEST_SUITE(${pf_file_stripped}_suite)\n") - endforeach() - - # Regenerate testSuites.inc if and only if necessary. - if(EXISTS ${output_directory}/testSuites.inc) - file(READ ${output_directory}/testSuites.inc old_testSuites_contents) - endif() - - if(NOT testSuites_contents STREQUAL old_testSuites_contents) - file(WRITE ${output_directory}/testSuites.inc ${testSuites_contents}) - endif() - - # Export ${fortran_list_name} to the caller, and add ${PFUNIT_DRIVER} - # to it. - set(${fortran_list_name} "${${fortran_list_name}}" "${PFUNIT_DRIVER}" - PARENT_SCOPE) - -endfunction(process_pFUnit_source_list) - -# Creates an executable of the given name using the pFUnit driver. Input -# variables are the executable name, a list of .pf files, the output -# directory for generated sources, and a list of regular Fortran files. -function(add_pFUnit_executable name pf_file_list output_directory - fortran_source_list) - - # Handle source code generation, add to list of sources. - process_pFUnit_source_list("${pf_file_list}" ${output_directory} - fortran_source_list) - - # Create the executable itself. - add_executable(${name} ${fortran_source_list}) - - # Handle pFUnit linking. - target_link_libraries(${name} "${PFUNIT_LIBRARIES}") - - # Necessary to include testSuites.inc - get_target_property(includes ${name} INCLUDE_DIRECTORIES) - if(NOT includes) - unset(includes) - endif() - list(APPEND includes ${output_directory} "${PFUNIT_INCLUDE_DIRS}") - set_target_properties(${name} PROPERTIES - INCLUDE_DIRECTORIES "${includes}") - - # The above lines are equivalent to: - # target_include_directories(${name} PRIVATE ${output_directory}) - # However, target_include_directories was not added until 2.8.11, and at - # the time of this writing, we can't depend on having such a recent - # version of CMake available on HPC systems. - -endfunction(add_pFUnit_executable) - -# Tells CTest what regular expressions are used to signal pass/fail from -# pFUnit output. -function(define_pFUnit_failure test_name) - # Set both pass and fail regular expressions to minimize the chance that - # the system under test will interfere with output and cause a false - # negative. - set_tests_properties(${test_name} PROPERTIES - FAIL_REGULAR_EXPRESSION "FAILURES!!!") - set_tests_properties(${test_name} PROPERTIES - PASS_REGULAR_EXPRESSION "OK") -endfunction(define_pFUnit_failure) - -# Does everything needed to create a pFUnit-based test, wrapping add_pFUnit_executable, -# add_test, and define_pFUnit_failure. -# -# Required input variables are the test name, the executable name, a list of .pf files, -# and a list of regular Fortran files. -# -# Optional input variables are GEN_OUTPUT_DIRECTORY and COMMAND (see usage notes at the -# top of this file for details). -# -# If executables need to be prefixed with an mpirun command, this prefix (e.g., -# "mpirun") should be given in the CMAKE variable PFUNIT_MPIRUN. -function(create_pFUnit_test test_name executable_name pf_file_list fortran_source_list) - - # Parse optional arguments - set(options "") - set(oneValueArgs GEN_OUTPUT_DIRECTORY) - set(multiValueArgs COMMAND) - cmake_parse_arguments(MY "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - if (MY_UNPARSED_ARGUMENTS) - message(FATAL_ERROR "Unknown keywords given to create_pFUnit_test(): \"${MY_UNPARSED_ARGUMENTS}\"") - endif() - - # Change GEN_OUTPUT_DIRECTORY to an absolute path, relative to CMAKE_CURRENT_BINARY_DIR - # Note that, if GEN_OUTPUT_DIRECTORY isn't given, this logic will make the output - # directory default to CMAKE_CURRENT_BINARY_DIR - set(MY_GEN_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${MY_GEN_OUTPUT_DIRECTORY}) - - # Give default values to optional arguments that aren't present - if (NOT MY_COMMAND) - set(MY_COMMAND ./${executable_name}) - endif() - - # Prefix command with an mpirun command - separate_arguments(PFUNIT_MPIRUN_LIST UNIX_COMMAND ${PFUNIT_MPIRUN}) - set (MY_COMMAND ${PFUNIT_MPIRUN_LIST} ${MY_COMMAND}) - - # Do the work - add_pFUnit_executable(${executable_name} "${pf_file_list}" - ${MY_GEN_OUTPUT_DIRECTORY} "${fortran_source_list}") - add_test(NAME ${test_name} COMMAND ${MY_COMMAND}) - define_pFUnit_failure(${test_name}) - -endfunction(create_pFUnit_test) diff --git a/src/build_scripts/buildlib.csm_share b/src/build_scripts/buildlib.csm_share deleted file mode 100755 index ab0fea86b4f..00000000000 --- a/src/build_scripts/buildlib.csm_share +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python -from standard_script_setup import * -from CIME.utils import copyifnewer, run_bld_cmd_ensure_logging, expect -from CIME.case import Case -from CIME.build import get_standard_makefile_args -import glob - -logger = logging.getLogger(__name__) - -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--debug] -OR -{0} --verbose -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run \033[0m - > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("buildroot", - help="build path root") - - parser.add_argument("installpath", - help="install path ") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.buildroot, args.installpath, args.caseroot - - -def buildlib(bldroot, installpath, case): -############################################################################### - gmake_args = get_standard_makefile_args(case, shared_lib=True) - comp_interface = case.get_value("COMP_INTERFACE") - cimeroot = case.get_value("CIMEROOT") - caseroot = case.get_value("CASEROOT") - - filepath = [os.path.join(caseroot,"SourceMods","src.share"), - os.path.join(cimeroot,"src","share","streams"), - os.path.join(cimeroot,"src","share","util"), - os.path.join(cimeroot,"src","share","RandNum","src"), - os.path.join(cimeroot,"src","share","RandNum","src","dsfmt_f03"), - os.path.join(cimeroot,"src","share","RandNum","src","kissvec"), - os.path.join(cimeroot,"src","share","RandNum","src","mt19937")] - - # Append path for driver - currently only values of 'mct' and 'nuopc' are accepted - - if comp_interface == "mct": - filepath.append(os.path.join(cimeroot,"src","drivers","mct","shr")) - elif comp_interface == "nuopc": - filepath.append(os.path.join(cimeroot,"src","share","nuopc")) - else: - expect(False, "driver value of {} not supported".format(comp_interface)) - - if case.get_value("USE_ESMF_LIB"): - use_esmf = "esmf" - else: - use_esmf = "noesmf" - filepath.append(os.path.join(cimeroot, "src", "share", "esmf_wrf_timemgr")) - - comp_interface = case.get_value("COMP_INTERFACE") - ninst_value = case.get_value("NINST_VALUE") - libdir = os.path.join(bldroot,comp_interface,use_esmf, ninst_value,"csm_share") - if not os.path.isdir(libdir): - os.makedirs(libdir) - - filepathfile = os.path.join(libdir, "Filepath") - # if the filepathfile has a different number of lines than filepath, replace it - file_len = 0 - if os.path.isfile(filepathfile): - file_len = len(open(filepathfile).readlines()) - - if len(filepath) != file_len: - with open(filepathfile, "w") as fd: - for path in filepath: - fd.write("{}\n".format(path)) - - components = case.get_values("COMP_CLASSES") - multiinst_cppdefs = "" - multi_driver = case.get_value("MULTI_DRIVER") - for comp in components: - if comp == "CPL": - continue - if multi_driver: - ninst_comp = 1 - else: - ninst_comp = case.get_value("NINST_{}".format(comp)) - multiinst_cppdefs += " -DNUM_COMP_INST_{}={}".format(comp, ninst_comp) - - if case.get_value("COMP_OCN") == "nemo": - multiinst_cppdefs += " -DNEMO_IN_CCSM " - - installdir = os.path.join(installpath, comp_interface, - use_esmf, ninst_value) - for ndir in ("lib", "include"): - if not os.path.isdir(os.path.join(installdir,ndir)): - os.makedirs(os.path.join(installdir,ndir)) - # copy some header files - for _file in glob.iglob(os.path.join(cimeroot,"src","share","include","*")): - copyifnewer(_file, os.path.join(installdir, "include", os.path.basename(_file))) - for _file in glob.iglob(os.path.join(cimeroot,"src","share","RandNum","include","*")): - copyifnewer(_file, os.path.join(installdir, "include", os.path.basename(_file))) - - # This runs the make command - gmake_opts = "-f {}/Makefile complib MODEL=csm_share ".format(os.path.join(caseroot,"Tools")) - gmake_opts += "-j {} ".format(case.get_value("GMAKE_J")) - gmake_opts += " COMPLIB=libcsm_share.a" - gmake_opts += ' USER_CPPDEFS="{} -DTIMING" '.format(multiinst_cppdefs) - gmake_opts += "INCLUDE_DIR={} ".format(os.path.join(installdir, "include")) - gmake_opts += gmake_args - gmake_opts += " -C {}".format(libdir) - - gmake_cmd = case.get_value("GMAKE") - - cmd = "{} {}".format(gmake_cmd, gmake_opts) - run_bld_cmd_ensure_logging(cmd, logger) - -def _main(argv, documentation): - bldroot, installpath, caseroot = parse_command_line(argv, documentation) - with Case(caseroot) as case: - buildlib(bldroot, installpath, case) - -if (__name__ == "__main__"): - _main(sys.argv, __doc__) diff --git a/src/build_scripts/buildlib.gptl b/src/build_scripts/buildlib.gptl deleted file mode 100755 index 21bf035e5c4..00000000000 --- a/src/build_scripts/buildlib.gptl +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -from standard_script_setup import * -from CIME.utils import run_bld_cmd_ensure_logging -from CIME.case import Case -from CIME.build import get_standard_makefile_args - -logger = logging.getLogger(__name__) - -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--debug] -OR -{0} --verbose -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run \033[0m - > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("buildroot", - help="build path root") - - parser.add_argument("installpath", - help="install path ") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.buildroot, args.installpath, args.caseroot - -def buildlib(bldroot, installpath, case): -############################################################################### - caseroot = case.get_value("CASEROOT") - gptl_dir = os.path.join(case.get_value("CIMEROOT"), "src", "share", "timing") - gmake_opts = "-f {gptl}/Makefile install -C {bldroot} MACFILE={macfile} MODEL=gptl GPTL_DIR={gptl} GPTL_LIBDIR={bldroot}"\ - " SHAREDPATH={install} {stdargs} "\ - .format(gptl=gptl_dir, bldroot=bldroot, macfile=os.path.join(caseroot,"Macros.make"), - install=installpath, stdargs=get_standard_makefile_args(case, shared_lib=True)) - - gmake_cmd = case.get_value("GMAKE") - - # This runs the gptl make command - cmd = "{} {}".format(gmake_cmd, gmake_opts) - run_bld_cmd_ensure_logging(cmd, logger) - -def _main(argv, documentation): - bldroot, installpath, caseroot = parse_command_line(argv, documentation) - with Case(caseroot) as case: - buildlib(bldroot, installpath, case) - -if (__name__ == "__main__"): - _main(sys.argv, __doc__) diff --git a/src/build_scripts/buildlib.internal_components b/src/build_scripts/buildlib.internal_components deleted file mode 100755 index e33ea7ddb2a..00000000000 --- a/src/build_scripts/buildlib.internal_components +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python - -""" -build cime component model library. This buildlib script is used by all cime internal -components. -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..", "..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildlib import build_cime_component_lib, parse_input -from CIME.case import Case - -def buildlib(case, libroot, bldroot, compname=None): - if compname is None: - thisdir = os.path.dirname(os.path.abspath(__file__)) - path, dir1 = os.path.split(thisdir) - _, dir2 = os.path.split(path) - if dir1 == "cime_config": - compname = dir2 - else: - compname = dir1.split('.')[1] - build_cime_component_lib(case, compname, libroot, bldroot) - -def _main_func(args): - caseroot, libroot, bldroot = parse_input(args) - with Case(caseroot) as case: - buildlib(case, libroot, bldroot) - -if __name__ == "__main__": - _main_func(sys.argv) diff --git a/src/build_scripts/buildlib.kokkos b/src/build_scripts/buildlib.kokkos deleted file mode 100755 index e0c93c61486..00000000000 --- a/src/build_scripts/buildlib.kokkos +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -from standard_script_setup import * -from CIME.utils import expect, run_bld_cmd_ensure_logging, run_cmd_no_fail, run_cmd -from CIME.case import Case -from CIME.build import get_standard_makefile_args - -logger = logging.getLogger(__name__) - -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--debug] -OR -{0} --verbose -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run \033[0m - > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("buildroot", - help="build path root") - - parser.add_argument("installpath", - help="install path ") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.buildroot, args.installpath, args.caseroot - -############################################################################### -def buildlib(bldroot, installpath, case): -############################################################################### - srcroot = case.get_value("SRCROOT") - kokkos_dir = os.path.join(srcroot, "externals", "kokkos") - expect(os.path.isdir(kokkos_dir), "Missing kokkos submodule") - - # We want to get the compiler and kokkos_options from Macros.make - # (generated from config_compilers.xml), but we want to otherwise - # let kokkos control flags - make_args = get_standard_makefile_args(case, shared_lib=True) - stat, output, _ = run_cmd("make -f Macros.make {} -p | grep KOKKOS_OPTIONS".format(make_args)) - if stat == 0: - kokkos_options = output.split(":=")[-1].strip() - else: - # This is the default setup. - kokkos_options = "--with-serial" - build_threaded = case.get_build_threaded() - if build_threaded: - kokkos_options += " --with-openmp" - logger.warning("Failed to find custom kokkos options, using default: {:s}.". - format(kokkos_options)) - - cxx = run_cmd_no_fail("make -f Macros.make {} -p | grep SCXX".format(make_args)).split(":=")[-1].strip() - - gmake_cmd = case.get_value("GMAKE") - gmake_j = case.get_value("GMAKE_J") - - gen_makefile_cmd = "{kokkos_dir}/generate_makefile.bash {kokkos_options} --compiler={cxx} --prefix={installpath}"\ - .format(kokkos_dir=kokkos_dir, kokkos_options=kokkos_options, cxx=cxx, installpath=installpath) - - run_bld_cmd_ensure_logging(gen_makefile_cmd, logger, from_dir=bldroot) - run_bld_cmd_ensure_logging("{} -j {}".format(gmake_cmd, gmake_j), logger, from_dir=bldroot) - run_bld_cmd_ensure_logging("{} install".format(gmake_cmd), logger, from_dir=bldroot) - -def _main(argv, documentation): - bldroot, installpath, caseroot = parse_command_line(argv, documentation) - with Case(caseroot, read_only=False) as case: - buildlib(bldroot, installpath, case) - -if (__name__ == "__main__"): - _main(sys.argv, __doc__) diff --git a/src/build_scripts/buildlib.mct b/src/build_scripts/buildlib.mct deleted file mode 100755 index ba1cd34b4a5..00000000000 --- a/src/build_scripts/buildlib.mct +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -from standard_script_setup import * -from CIME.utils import copyifnewer, run_bld_cmd_ensure_logging -from CIME.case import Case -from CIME.build import get_standard_makefile_args -import glob - -logger = logging.getLogger(__name__) - -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--debug] -OR -{0} --verbose -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run \033[0m - > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("buildroot", - help="build path root") - - parser.add_argument("installpath", - help="install path ") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.buildroot, args.installpath, args.caseroot - -def buildlib(bldroot, installpath, case): -############################################################################### - caseroot = case.get_value("CASEROOT") - cimeroot = case.get_value("CIMEROOT") - mct_dir = os.path.join(cimeroot,"src","externals","mct") - for _dir in ("mct", "mpeu"): - if not os.path.isdir(os.path.join(bldroot,_dir)): - os.makedirs(os.path.join(bldroot,_dir)) - copyifnewer(os.path.join(mct_dir,_dir,"Makefile"), - os.path.join(bldroot,_dir,"Makefile")) - - gmake_opts = "-f {} ".format(os.path.join(caseroot,"Tools","Makefile")) - gmake_opts += " -C {} ".format(bldroot) - gmake_opts += get_standard_makefile_args(case, shared_lib=True) - gmake_opts += "MODEL=mct {}".format(os.path.join(bldroot,"Makefile.conf")) - - gmake_cmd = case.get_value("GMAKE") - - # This runs the mpi-serial configure command - cmd = "{} {}".format(gmake_cmd, gmake_opts) - run_bld_cmd_ensure_logging(cmd, logger) - - # Now we run the mct make command - gmake_opts = "-f {} ".format(os.path.join(mct_dir,"Makefile")) - gmake_opts += " -C {} ".format(bldroot) - gmake_opts += " -j {} ".format(case.get_value("GMAKE_J")) - gmake_opts += " SRCDIR={} ".format(os.path.join(mct_dir)) - - cmd = "{} {}".format(gmake_cmd, gmake_opts) - run_bld_cmd_ensure_logging(cmd, logger) - - for _dir in ("mct", "mpeu"): - for _file in glob.iglob(os.path.join(bldroot,_dir,"*.a")): - logger.info("Installing {} to {}".format(_file,installpath)) - copyifnewer(_file, os.path.join(installpath, "lib", os.path.basename(_file))) - for _file in glob.iglob(os.path.join(bldroot,_dir,"*.mod")): - logger.info("Installing {} to {}".format(_file,installpath)) - copyifnewer(_file, os.path.join(installpath, "include", os.path.basename(_file))) - -def _main(argv, documentation): - bldroot, installpath, caseroot = parse_command_line(argv, documentation) - with Case(caseroot) as case: - buildlib(bldroot, installpath, case) - -if (__name__ == "__main__"): - _main(sys.argv, __doc__) diff --git a/src/build_scripts/buildlib.mpi-serial b/src/build_scripts/buildlib.mpi-serial deleted file mode 100755 index 7b3214e8f6c..00000000000 --- a/src/build_scripts/buildlib.mpi-serial +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python -from standard_script_setup import * -from CIME.utils import copyifnewer, run_bld_cmd_ensure_logging -from CIME.case import Case -from CIME.build import get_standard_makefile_args -import glob - -logger = logging.getLogger(__name__) - -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--debug] -OR -{0} --verbose -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run \033[0m - > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("buildroot", - help="build path root") - - parser.add_argument("installpath", - help="install path ") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.buildroot, args.installpath, args.caseroot - -def buildlib(bldroot, installpath, case): -############################################################################### - caseroot = case.get_value("CASEROOT") - cimeroot = case.get_value("CIMEROOT") - mct_dir = os.path.join(cimeroot,"src","externals","mct") - for _file in glob.iglob(os.path.join(mct_dir, "mpi-serial","*.h")): - copyifnewer(_file, os.path.join(bldroot,os.path.basename(_file))) - - gmake_opts = "-f {} ".format(os.path.join(caseroot,"Tools","Makefile")) - gmake_opts += " -C {} ".format(bldroot) - gmake_opts += " {} ".format(get_standard_makefile_args(case, shared_lib=True)) - gmake_opts += "MODEL=mpi-serial {}".format(os.path.join(bldroot,"Makefile.conf")) - - gmake_cmd = case.get_value("GMAKE") - - # This runs the mpi-serial configure command - cmd = "{} {}".format(gmake_cmd, gmake_opts) - run_bld_cmd_ensure_logging(cmd, logger) - - # Now we run the mpi-serial make command - gmake_opts = "-f {} ".format(os.path.join(mct_dir,"mpi-serial","Makefile")) - gmake_opts += " -C {} ".format(bldroot) - gmake_opts += " -j {} ".format(case.get_value("GMAKE_J")) - gmake_opts += " SRCDIR={} ".format(os.path.join(mct_dir)) - - cmd = "{} {}".format(gmake_cmd, gmake_opts) - run_bld_cmd_ensure_logging(cmd, logger) - - copyifnewer(os.path.join(bldroot, "libmpi-serial.a"), os.path.join(installpath, "lib", "libmpi-serial.a")) - for _file in ("mpi.h", "mpif.h", "mpi.mod", "MPI.mod"): - if os.path.isfile(os.path.join(bldroot,_file)): - copyifnewer(os.path.join(bldroot, _file), os.path.join(installpath, "include", _file)) - -def _main(argv, documentation): - bldroot, installpath, caseroot = parse_command_line(argv, documentation) - with Case(caseroot) as case: - buildlib(bldroot, installpath, case) - -if (__name__ == "__main__"): - _main(sys.argv, __doc__) diff --git a/src/build_scripts/buildlib.pio b/src/build_scripts/buildlib.pio deleted file mode 100755 index bab9a9f71c6..00000000000 --- a/src/build_scripts/buildlib.pio +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python - -import glob, re -from standard_script_setup import * -from CIME.utils import expect, run_bld_cmd_ensure_logging, safe_copy -from CIME.build import get_standard_makefile_args -from CIME.case import Case - -logger = logging.getLogger(__name__) - -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser( - usage="""\n{0} [--debug] -OR -{0} --verbose -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run \033[0m - > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("buildroot", - help="build path root") - - parser.add_argument("installpath", - help="install path ") - - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - return args.buildroot, args.installpath, args.caseroot - -############################################################################### -def buildlib(bldroot, installpath, case): -############################################################################### - caseroot = case.get_value("CASEROOT") - pio_version = case.get_value("PIO_VERSION") - # If variable PIO_VERSION_MAJOR is defined in the environment then - # we assume that PIO is installed on the system - # and expect to find - # PIO_LIBDIR, PIO_INCDIR, PIO_TYPENAME_VALID_VALUES - # also defined in the environment. In this case we - # will use the installed pio and not build it here. - installed_pio_version = os.environ.get("PIO_VERSION_MAJOR") - logger.info("pio_version_major = {} pio_version = {}".format(installed_pio_version, pio_version)) - if installed_pio_version is not None and int(installed_pio_version) == pio_version: - logger.info("Using installed PIO library") - _set_pio_valid_values(case, os.environ.get("PIO_TYPENAME_VALID_VALUES")) - return - - pio_model = "pio{}".format(pio_version) - pio_dir = os.path.join(bldroot, pio_model) - if not os.path.isdir(pio_dir): - os.makedirs(pio_dir) - casetools = case.get_value("CASETOOLS") - cmake_opts = "\"-D GENF90_PATH=$CIMEROOT/src/externals/genf90 \"" - stdargs = get_standard_makefile_args(case, shared_lib=True) - - gmake_opts = "{pio_dir}/Makefile -C {pio_dir} CASEROOT={caseroot} MODEL={pio_model} USER_CMAKE_OPTS={cmake_opts} "\ - "PIO_LIBDIR={pio_dir} CASETOOLS={casetools} "\ - " USER_CPPDEFS=-DTIMING {stdargs} -f {casetools}/Makefile"\ - .format(pio_dir=pio_dir, caseroot=caseroot, pio_model=pio_model, cmake_opts=cmake_opts, - casetools=casetools, stdargs=stdargs) - - gmake_cmd = case.get_value("GMAKE") - - # This runs the pio cmake command from the cime case Makefile - cmd = "{} {}".format(gmake_cmd, gmake_opts) - run_bld_cmd_ensure_logging(cmd, logger, from_dir=pio_dir) - - # This runs the pio make command from the cmake generated Makefile - run_bld_cmd_ensure_logging("{} -j {}".format(gmake_cmd, case.get_value("GMAKE_J")), logger, from_dir=pio_dir) - - if pio_version == 1: - installed_lib = os.path.join(installpath,"lib","libpio.a") - installed_lib_time = 0 - if os.path.isfile(installed_lib): - installed_lib_time = os.path.getmtime(installed_lib) - newlib = os.path.join(pio_dir,"pio","libpio.a") - newlib_time = os.path.getmtime(newlib) - if newlib_time > installed_lib_time: - logger.info("Installing pio version 1") - safe_copy(newlib, installed_lib) - for glob_to_copy in ("*.h", "*.mod"): - for item in glob.glob(os.path.join(pio_dir,"pio",glob_to_copy)): - safe_copy(item, "{}/include".format(installpath)) - expect_string = "D_NETCDF;" - pnetcdf_string = "D_PNETCDF" - netcdf4_string = "D_NETCDF4" - else: - globs_to_copy = (os.path.join("src","clib","libpioc.*"), - os.path.join("src","flib","libpiof.*"), - os.path.join("src","clib","*.h"), - os.path.join("src","flib","*.mod")) - for glob_to_copy in globs_to_copy: - installed_file_time = 0 - for item in glob.glob(os.path.join(pio_dir,glob_to_copy)): - if item.endswith(".a") or item.endswith(".so"): - installdir = "lib" - else: - installdir = "include" - installed_file = os.path.join(installpath,installdir,os.path.basename(item)) - item_time = os.path.getmtime(item) - if os.path.isfile(installed_file): - installed_file_time = os.path.getmtime(installed_file) - if item_time > installed_file_time: - safe_copy(item, installed_file) - expect_string = "NetCDF_C_LIBRARY-ADVANCED" - pnetcdf_string = "PnetCDF_C_LIBRARY-ADVANCED" - netcdf4_string = "NetCDF_C_HAS_PARALLEL:BOOL=TRUE" - - - # make sure case pio_typename valid_values is set correctly - expect_string_found = False - pnetcdf_found = False - netcdf4_parallel_found = False - - cache_file = open(os.path.join(pio_dir,"CMakeCache.txt"), "r") - for line in cache_file: - if re.search(expect_string, line): - expect_string_found = True - if re.search(pnetcdf_string, line): - pnetcdf_found = True - if re.search(netcdf4_string, line): - netcdf4_parallel_found = True - - expect(expect_string_found, "CIME models require NETCDF in PIO build") - valid_values = "netcdf" - if pnetcdf_found: - valid_values += ",pnetcdf" - if netcdf4_parallel_found: - valid_values += ",netcdf4p,netcdf4c" - _set_pio_valid_values(case, valid_values) - - -def _set_pio_valid_values(case, valid_values): - # nothing means use the general default - valid_values += ",nothing" - logger.warning("Updating valid_values for PIO_TYPENAME: {}".format(valid_values)) - env_run = case.get_env("run") - env_run.set_valid_values("PIO_TYPENAME",valid_values) - - for comp in case.get_values("COMP_CLASSES"): - comp_pio_typename = "{}_PIO_TYPENAME".format(comp) - current_value = case.get_value(comp_pio_typename) - if current_value not in valid_values: - logger.warning("Resetting PIO_TYPENAME to netcdf for component {}".format(comp)) - env_run.set_value(comp_pio_typename,"netcdf") - -def _main(argv, documentation): - bldroot, installpath, caseroot = parse_command_line(argv, documentation) - with Case(caseroot, read_only=False) as case: - buildlib(bldroot, installpath, case) - -if (__name__ == "__main__"): - _main(sys.argv, __doc__) diff --git a/src/components/data_comps/datm/cime_config/buildlib b/src/components/data_comps/datm/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/data_comps/datm/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/data_comps/datm/cime_config/buildnml b/src/components/data_comps/datm/cime_config/buildnml deleted file mode 100755 index a59e3335436..00000000000 --- a/src/components/data_comps/datm/cime_config/buildnml +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python - -"""Namelist creator for CIME's data atmosphere model. -""" - -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position - -import os, sys, glob - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.case import Case -from CIME.nmlgen import NamelistGenerator -from CIME.utils import expect, get_model, safe_copy -from CIME.buildnml import create_namelist_infile, parse_input -from CIME.XML.files import Files - -logger = logging.getLogger(__name__) - -# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements -#################################################################################### -def _create_namelists(case, confdir, inst_string, infile, nmlgen, data_list_path): -#################################################################################### - """Write out the namelist for this component. - - Most arguments are the same as those for `NamelistGenerator`. The - `inst_string` argument is used as a suffix to distinguish files for - different instances. The `confdir` argument is used to specify the directory - in which output files will be placed. - """ - - #---------------------------------------------------- - # Get a bunch of information from the case. - #---------------------------------------------------- - datm_mode = case.get_value("DATM_MODE") - datm_topo = case.get_value("DATM_TOPO") - datm_presaero = case.get_value("DATM_PRESAERO") - datm_co2_tseries = case.get_value("DATM_CO2_TSERIES") - atm_grid = case.get_value("ATM_GRID") - grid = case.get_value("GRID") - clm_usrdat_name = case.get_value("CLM_USRDAT_NAME") - - #---------------------------------------------------- - # Check for incompatible options. - #---------------------------------------------------- - if "CLM" in datm_mode: - expect(datm_presaero != "none", - "A DATM_MODE for CLM is incompatible with DATM_PRESAERO=none.") - expect(datm_topo != "none", - "A DATM_MODE for CLM is incompatible with DATM_TOPO=none.") - expect(grid != "CLM_USRDAT" or clm_usrdat_name in ("", "UNSET"), - "GRID=CLM_USRDAT and CLM_USRDAT_NAME is NOT set.") - - #---------------------------------------------------- - # Log some settings. - #---------------------------------------------------- - logger.debug("DATM mode is {}".format(datm_mode)) - logger.debug("DATM grid is {}".format(atm_grid)) - logger.debug("DATM presaero mode is {}".format(datm_presaero)) - logger.debug("DATM topo mode is {}".format(datm_topo)) - - #---------------------------------------------------- - # Create configuration information. - #---------------------------------------------------- - config = {} - config['grid'] = grid - config['atm_grid'] = atm_grid - config['datm_mode'] = datm_mode - config['datm_co2_tseries'] = datm_co2_tseries - config['datm_presaero'] = datm_presaero - config['cime_model'] = get_model() - - #---------------------------------------------------- - # Initialize namelist defaults - #---------------------------------------------------- - nmlgen.init_defaults(infile, config) - - #---------------------------------------------------- - # Construct the list of streams. - #---------------------------------------------------- - streams = nmlgen.get_streams() - # - # This disable is required because nmlgen.get_streams - # may return a string or a list. See issue #877 in ESMCI/cime - # - #pylint: disable=no-member - if datm_presaero != "none": - streams.append("presaero.{}".format(datm_presaero)) - - if datm_topo != "none": - streams.append("topo.{}".format(datm_topo)) - - if datm_co2_tseries != "none": - streams.append("co2tseries.{}".format(datm_co2_tseries)) - - # Add bias correction stream if given in namelist. - bias_correct = nmlgen.get_value("bias_correct") - streams.append(bias_correct) - - # Add all anomaly forcing streams given in namelist. - anomaly_forcing = nmlgen.get_value("anomaly_forcing") - streams += anomaly_forcing - - #---------------------------------------------------- - # For each stream, create stream text file and update - # shr_strdata_nml group and input data list. - #---------------------------------------------------- - for stream in streams: - - # Ignore null values. - if stream is None or stream in ("NULL", ""): - continue - - inst_stream = stream + inst_string - logger.debug("DATM stream is {}".format(inst_stream)) - stream_path = os.path.join(confdir, "datm.streams.txt." + inst_stream) - user_stream_path = os.path.join(case.get_case_root(), "user_datm.streams.txt." + inst_stream) - - # Use the user's stream file, or create one if necessary. - if os.path.exists(user_stream_path): - safe_copy(user_stream_path, stream_path) - config['stream'] = stream - nmlgen.update_shr_strdata_nml(config, stream, stream_path) - else: - nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) - - #---------------------------------------------------- - # Create `shr_strdata_nml` namelist group. - #---------------------------------------------------- - # set per-stream variables - nmlgen.create_shr_strdata_nml() - - # Determine model domain filename (in datm_in) - if "CPLHIST" in datm_mode: - datm_cplhist_domain_file = case.get_value("DATM_CPLHIST_DOMAIN_FILE") - if datm_cplhist_domain_file == 'null': - logger.info(" .... Obtaining DATM model domain info from first stream file: {}".format(streams[0])) - else: - logger.info(" .... Obtaining DATM model domain info from stream {}".format(streams[0])) - nmlgen.add_default("domainfile", value=datm_cplhist_domain_file) - else: - atm_domain_file = case.get_value("ATM_DOMAIN_FILE") - atm_domain_path = case.get_value("ATM_DOMAIN_PATH") - if atm_domain_file != "UNSET": - full_domain_path = os.path.join(atm_domain_path, atm_domain_file) - nmlgen.add_default("domainfile", value=full_domain_path) - else: - nmlgen.add_default("domainfile", value='null') - - #---------------------------------------------------- - # Finally, write out all the namelists. - #---------------------------------------------------- - namelist_file = os.path.join(confdir, "datm_in") - nmlgen.write_output_file(namelist_file, data_list_path, groups=['datm_nml','shr_strdata_nml']) - -############################################################################### -def buildnml(case, caseroot, compname): -############################################################################### - - # Build the component namelist and required stream txt files - if compname != "datm": - raise AttributeError - - rundir = case.get_value("RUNDIR") - ninst = case.get_value("NINST_ATM") - if ninst is None: - ninst = case.get_value("NINST") - - # Determine configuration directory - confdir = os.path.join(caseroot,"Buildconf",compname + "conf") - if not os.path.isdir(confdir): - os.makedirs(confdir) - - #---------------------------------------------------- - # Construct the namelist generator - #---------------------------------------------------- - # Determine directory for user modified namelist_definitions.xml and namelist_defaults.xml - user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) - expect (os.path.isdir(user_xml_dir), - "user_xml_dir {} does not exist ".format(user_xml_dir)) - - # NOTE: User definition *replaces* existing definition. - files = Files() - definition_file = [files.get_value("NAMELIST_DEFINITION_FILE", {"component":"datm"})] - - user_definition = os.path.join(user_xml_dir, "namelist_definition_datm.xml") - if os.path.isfile(user_definition): - definition_file = [user_definition] - for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) - - # Create the namelist generator object - independent of instance - nmlgen = NamelistGenerator(case, definition_file, files=files) - - #---------------------------------------------------- - # Clear out old data. - #---------------------------------------------------- - data_list_path = os.path.join(case.get_case_root(), "Buildconf", - "datm.input_data_list") - if os.path.exists(data_list_path): - os.remove(data_list_path) - - #---------------------------------------------------- - # Loop over instances - #---------------------------------------------------- - for inst_counter in range(1, ninst+1): - # determine instance string - inst_string = "" - if ninst > 1: - inst_string = '_' + '{:04d}'.format(inst_counter) - - # If multi-instance case does not have restart file, use - # single-case restart for each instance - rpointer = "rpointer." + compname - if (os.path.isfile(os.path.join(rundir,rpointer)) and - (not os.path.isfile(os.path.join(rundir,rpointer + inst_string)))): - safe_copy(os.path.join(rundir, rpointer), - os.path.join(rundir, rpointer + inst_string)) - - inst_string_label = inst_string - if not inst_string_label: - inst_string_label = "\"\"" - - # create namelist output infile using user_nl_file as input - user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) - expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file {} ".format(user_nl_file)) - infile = os.path.join(confdir, "namelist_infile") - create_namelist_infile(case, user_nl_file, infile) - namelist_infile = [infile] - - # create namelist and stream file(s) data component - _create_namelists(case, confdir, inst_string, namelist_infile, nmlgen, data_list_path) - - # copy namelist files and stream text files, to rundir - if os.path.isdir(rundir): - filename = compname + "_in" - file_src = os.path.join(confdir, filename) - file_dest = os.path.join(rundir, filename) - if inst_string: - file_dest += inst_string - safe_copy(file_src,file_dest) - - for txtfile in glob.glob(os.path.join(confdir, "*txt*")): - safe_copy(txtfile, rundir) - -############################################################################### -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "datm") - - -if __name__ == "__main__": - _main_func() diff --git a/src/components/data_comps/datm/cime_config/config_archive.xml b/src/components/data_comps/datm/cime_config/config_archive.xml deleted file mode 100644 index f3a12a840a9..00000000000 --- a/src/components/data_comps/datm/cime_config/config_archive.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - r - unset - - rpointer.atm$NINST_STRING - $CASE.datm$NINST_STRING.r.$DATENAME.nc,$CASE.datm$NINST_STRING.rs1.$DATENAME.bin - - - diff --git a/src/components/data_comps/datm/cime_config/config_component.xml b/src/components/data_comps/datm/cime_config/config_component.xml deleted file mode 100644 index f4dfd271445..00000000000 --- a/src/components/data_comps/datm/cime_config/config_component.xml +++ /dev/null @@ -1,316 +0,0 @@ - - - - - - Data driven ATM - QIAN data set - QIAN with water isotopes - CRUNCEP data set - CLM CRU NCEP v7 data set - GSWP3v1 data set - NLDAS2 regional 0.125 degree data set over the U.S. (25-53N, 235-293E). WARNING: Garbage data will be produced for runs extending beyond this regional domain. - Coupler hist data set (in this mode, it is strongly recommended that the model domain and the coupler history forcing are on the same domain) - single point tower site data set - COREv2 normal year forcing - COREv2 interannual forcing - interannual JRA55 forcing - - - - char - datm - datm - case_comp - env_case.xml - Name of atmospheric component - - - - char - CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEPv7,CLMGSWP3v1,CLMNLDAS2,CPLHIST,CORE_IAF_JRA - CORE2_NYF - run_component_datm - env_run.xml - Mode for data atmosphere component. - CORE2_NYF (CORE2 normal year forcing) are modes used in forcing prognostic ocean/sea-ice components. - CLM_QIAN, CLMCRUNCEP, CLMCRUNCEPv7, CLMGSWP3v1, CLMNLDAS2 and CLM1PT are modes using observational data for forcing prognostic land components. - WARNING for CLMNLDAS2: This is a regional forcing dataset over the U.S. (25-53N, 235-293E). Garbage data will be produced for runs extending beyond this regional domain. - - CORE2_NYF - CORE2_IAF - CORE_IAF_JRA - CLM_QIAN - CLM_QIAN_WISO - CLMCRUNCEP - CLMCRUNCEPv7 - CLMGSWP3v1 - CLMNLDAS2 - CLM1PT - CPLHIST - - - - - char - none,clim_1850,clim_2000,clim_2010,trans_1850-2000,SSP1-1.9,SSP1-2.6,SSP2-4.5,SSP3-7.0,SSP4-3.4,SSP4-6.0,SSP5-3.4,SSP5-8.5,cplhist - clim_2000 - - clim_1850 - clim_2000 - clim_2010 - SSP1-1.9 - SSP1-2.6 - SSP2-4.5 - SSP3-7.0 - SSP4-3.4 - SSP4-6.0 - SSP5-3.4 - SSP5-8.5 - trans_1850-2000 - trans_1850-2000 - cplhist - none - - run_component_datm - env_run.xml - DATM prescribed aerosol forcing - - - - char - none,observed,cplhist - observed - - - none - none - cplhist - - run_component_datm - env_run.xml - DATM surface topography forcing - - - - char - none,20tr,20tr.latbnd,SSP1-1.9,SSP1-2.6,SSP2-4.5,SSP3-7.0,SSP4-3.4,SSP4-6.0,SSP5-3.4,SSP5-8.5,SSP1-1.9.latbnd,SSP1-2.6.latbnd,SSP2-4.5.latbnd,SSP3-7.0.latbnd,SSP4-3.4.latbnd,SSP4-6.0.latbnd,SSP5-3.4.latbnd,SSP5-8.5.latbnd - none - - SSP1-1.9 - SSP1-2.6 - SSP2-4.5 - SSP3-7.0 - SSP4-3.4 - SSP4-6.0 - SSP5-3.4 - SSP5-8.5 - 20tr - 20tr - - run_component_datm - env_run.xml - DATM CO2 time series - - - - char - - null - run_component_datm - env_run.xml - - Full pathname for domain file for datm when DATM_MODE is - CPLHIST, NOTE: if this is set to 'null' (the default), then - domain information is read in from the first coupler history - file in the target stream and it is assumed that the first - coupler stream file that is pointed to contains the domain - information for that stream. - - - - - char - - UNSET - run_component_datm - env_run.xml - directory for coupler history data mode (only used for when DATM_MODE is CPLHIST) - - - - char - - UNSET - run_component_datm - env_run.xml - case name used to determine stream filenames when DATM_MODE is CPLHIST - - - - integer - - -999 - run_component_datm - env_run.xml - - Simulation year corresponding to DATM_CPLHIST_YR_START (only used - when DATM_MODE is CPLHIST). A common usage is to set this to - RUN_STARTDATE. With this setting, the forcing in the first year of - the run will be the forcing of year DATM_CPLHIST_YR_START. Another - use case is to align the calendar of transient forcing with the - model calendar. For example, setting - DATM_CPLHIST_YR_ALIGN=DATM_CPLHIST_YR_START will lead to the - forcing calendar being the same as the model calendar. The forcing - for a given model year would be the forcing of the same year. This - would be appropriate in transient runs where the model calendar is - setup to span the same year range as the forcing data. - - - - - integer - - -999 - run_component_datm - env_run.xml - starting year to loop data over (only used when DATM_MODE is CPLHIST) - - - - integer - - -999 - run_component_datm - env_run.xml - ending year to loop data over (only used when DATM_MODE is CPLHIST) - - - - integer - - 1 - - 1 - 1 - 1 - 1895 - 1901 - 1901 - $DATM_CLMNCEP_YR_START - 1895 - 1901 - 1901 - $DATM_CLMNCEP_YR_START - $DATM_CLMNCEP_YR_START - $DATM_CLMNCEP_YR_START - 1 - 1 - $DATM_CLMNCEP_YR_START - $DATM_CLMNCEP_YR_START - $DATM_CLMNCEP_YR_START - $DATM_CLMNCEP_YR_START - - run_component_datm - env_run.xml - year align - - - - integer - - 2004 - - 1972 - 1948 - 1901 - 1901 - 0 - 2000 - 1972 - 1948 - 1901 - 1901 - 0 - 1948 - 1901 - 1901 - 0 - 1948 - 1995 - 2001 - 2001 - 2002 - 1901 - 1991 - 2002 - 2005 - 1901 - 1991 - 2005 - 2002 - 1980 - 2005 - 2002 - - run_component_datm - env_run.xml - starting year to loop data over - - - - integer - - 2004 - - 2004 - 1972 - 1920 - 1920 - -1 - 2004 - 2004 - 1972 - 1920 - 1920 - -1 - 1972 - 1920 - 1920 - -1 - 2004 - 2004 - 2016 - 2014 - 2003 - 1920 - 2010 - 2003 - 2014 - 1920 - 2010 - 2014 - 2003 - 2018 - 2014 - 2003 - - run_component_datm - env_run.xml - ending year to loop data over - - - - ========================================= - DATM naming conventions in compset name - ========================================= - - - diff --git a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml deleted file mode 100644 index 2a1389bd2db..00000000000 --- a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml +++ /dev/null @@ -1,2808 +0,0 @@ - - - - - - - - - - - - - - char(100) - streams - streams_file - List of streams used for the given datm_mode - - CLM_QIAN.Solar,CLM_QIAN.Precip,CLM_QIAN.TPQW - CLM_QIAN_WISO.Solar,CLM_QIAN_WISO.Precip,CLM_QIAN_WISO.TPQW - CLM1PT.$ATM_GRID - CLMCRUNCEP.Solar,CLMCRUNCEP.Precip,CLMCRUNCEP.TPQW - CLMCRUNCEPv7.Solar,CLMCRUNCEPv7.Precip,CLMCRUNCEPv7.TPQW - CLMGSWP3v1.Solar,CLMGSWP3v1.Precip,CLMGSWP3v1.TPQW - CLMNLDAS2.Solar,CLMNLDAS2.Precip,CLMNLDAS2.TPQW - CORE2_NYF.GISS,CORE2_NYF.GXGXS,CORE2_NYF.NCEP - CORE2_IAF.GCGCS.PREC,CORE2_IAF.GISS.LWDN,CORE2_IAF.GISS.SWDN,CORE2_IAF.GISS.SWUP,CORE2_IAF.NCEP.DN10,CORE2_IAF.NCEP.Q_10,CORE2_IAF.NCEP.SLP_,CORE2_IAF.NCEP.T_10,CORE2_IAF.NCEP.U_10,CORE2_IAF.NCEP.V_10,CORE2_IAF.CORE2.ArcFactor - CORE_IAF_JRA.PREC,CORE_IAF_JRA.LWDN,CORE_IAF_JRA.SWDN,CORE_IAF_JRA.Q_10,CORE_IAF_JRA.SLP_,CORE_IAF_JRA.T_10,CORE_IAF_JRA.U_10,CORE_IAF_JRA.V_10,CORE_IAF_JRA.CORE2.ArcFactor - CORE2_IAF.NCEP.DENS.SOFS,CORE2_IAF.NCEP.PSLV.SOFS,CORE2_IAF.PREC.SOFS.DAILY,CORE2_IAF.LWDN.SOFS.DAILY,CORE2_IAF.SWDN.SOFS.DAILY,CORE2_IAF.SWUP.SOFS.DAILY,CORE2_IAF.SHUM.SOFS.6HOUR,CORE2_IAF.TBOT.SOFS.6HOUR,CORE2_IAF.U.SOFS.6HOUR,CORE2_IAF.V.SOFS.6HOUR,CORE2_IAF.CORE2.ArcFactor - CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux,CPLHISTForcing.State3hr,CPLHISTForcing.State1hr - - - - - char - streams - streams_file - Stream domain file directory. - - null - $DIN_LOC_ROOT/atm/datm7/domain.clm - $ATM_DOMAIN_PATH - null - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.v7.c160715 - $DIN_LOC_ROOT/share/domains/domain.clm - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.GSWP3.0.5d.v1.c170516 - $DIN_LOC_ROOT/share/domains/domain.clm - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.V5.c140715 - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.GSWP3.0.5d.v1.c170516 - $DIN_LOC_ROOT/share/domains/domain.clm - $DIN_LOC_ROOT/atm/datm7/NYF - $DIN_LOC_ROOT/atm/datm7/CORE2 - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/share/domains - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7 - $DIN_LOC_ROOT/atm/datm7/JRA55 - $DIN_LOC_ROOT/atm/datm7/JRA55 - $DIN_LOC_ROOT/atm/datm7/JRA55 - $DIN_LOC_ROOT/atm/datm7/JRA55 - $DIN_LOC_ROOT/atm/datm7/JRA55 - $DIN_LOC_ROOT/atm/datm7/JRA55 - $DIN_LOC_ROOT/atm/datm7/JRA55 - $DIN_LOC_ROOT/atm/datm7/JRA55 - $DIN_LOC_ROOT/atm/datm7/CORE2 - $DIN_LOC_ROOT/atm/datm7/CO2 - $DIN_LOC_ROOT/atm/datm7/bias_correction/precip/gpcp/qian - $DIN_LOC_ROOT/atm/datm7/clm_output/cruncep_precip_1deg/gpcp_1deg_bias_correction - $DIN_LOC_ROOT/atm/datm7/bias_correction/precip/cmap/cruncep - $DIN_LOC_ROOT/atm/datm7/anomaly_forcing - null - $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero - $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero - $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero - $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero - null - $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero - $DIN_LOC_ROOT/atm/datm7/topo_forcing - null - - - - - char - streams - streams_file - Stream domain file path(s). - - null - domain.lnd.1x1pt-mexicocityMEX_navy.090715.nc - domain.lnd.1x1pt-vancouverCAN_navy.090715.nc - domain.lnd.1x1pt-urbanc_alpha_navy.090715.nc - $ATM_DOMAIN_FILE - null - domain.T62.050609.nc - domain.T62.050609.nc - domain.lnd.360x720_cruncep.130305.nc - domain.lnd.360x720.130305.nc - domain.lnd.360x720_gswp3.0v1.c170606.nc - domain.lnd.360x720_gswp3.0v1.c170606.nc - domain.lnd.0.125nldas2_0.125nldas2.190410.nc - nyf.giss.T62.051007.nc - nyf.gxgxs.T62.051007.nc - nyf.ncep.T62.050923.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.ocn.01col.SOFS.20150828.nc - domain.T62.050609.nc - domain.T62.050609.nc - domain.T62.050609.nc - domain.T62.050609.nc - domain.T62.050609.nc - domain.T62.050609.nc - domain.T62.050609.nc - domain.T62.050609.nc - domain.T62.050609.nc - domain.T62.050609.nc - CORE2.t_10.ArcFactor.T62.1997-2004.nc - domain.atm.TL319.151007.nc - domain.atm.TL319.151007.nc - domain.atm.TL319.151007.nc - domain.atm.TL319.151007.nc - domain.atm.TL319.151007.nc - domain.atm.TL319.151007.nc - domain.atm.TL319.151007.nc - domain.atm.TL319.151007.nc - CORE2.t_10.ArcFactor.T62.1997-2004.nc - fco2_datm_1765-2007_c100614.nc - fco2_datm_global_simyr_1750-2014_CMIP6_c180929.nc - fco2_datm_lat-bands_simyr_1750-2015_CMIP6_c180929.nc - fco2_datm_lat-bandsSSP1-1.9_simyr_2014-2500_CMIP6_c190514.nc - fco2_datm_lat-bandsSSP1-2.6__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP2-4.5__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP3-7.0__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP4-3.4_simyr_2014-2500_CMIP6_c190514.nc - fco2_datm_lat-bandsSSP4-6.0__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP5-3.4__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP5-8.5__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_globalSSP1-1.9_simyr_2014-2501_CMIP6_c190514.nc - fco2_datm_globalSSP1-2.6__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP2-4.5__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP3-7.0__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP4-3.4_simyr_2014-2501_CMIP6_c190514.nc - fco2_datm_globalSSP4-6.0__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP5-3.4__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP5-8.5__simyr_2014-2501_CMIP6_c190506.nc - qian.bc.domain.c130531.nc - cruncep.bc.domain.0.9x1.25.nc - cruncep.bc.domain.c100429.nc - domain.permafrostRCN_P2.c2013.nc - null - aerosoldep_monthly_1849-2006_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP1-1.9_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP1-2.6_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP2-4.5_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP3-7.0_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP4-3.4_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP4-6.0_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP5-3.4_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP5-8.5_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_monthly_1850_mean_1.9x2.5_c090421.nc - aerosoldep_monthly_2000_mean_1.9x2.5_c090421.nc - aerosoldep_monthly_1849-2006_1.9x2.5_c090803.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - - aerodep_clm_SSP245_b.e21.BWSSP245cmip6.f09_g17.CMIP6-SSP2-4.5-WACCM.001_2014-2101_monthly_0.9x1.25_c190401.nc - aerodep_clm_SSP370_b.e21.BWSSP370cmip6.f09_g17.CMIP6-SSP3-7.0-WACCM.001_2014-2101_monthly_0.9x1.25_c190402.nc - null - aerodep_clm_SSP585_b.e21.BSSP585cmip6.f09_g17.CMIP6-SSP5-8.5.001_2014-2101_monthly_0.9x1.25_c190419.nc - topodata_0.9x1.25_USGS_070110_stream_c151201.nc - null - - - - - char - streams - streams_file - Stream domain variable name(s). - - - time time - xc lon - yc lat - area area - mask mask - - - time time - doma_lon lon - doma_lat lat - doma_aream area - doma_mask mask - - - time time - lon lon - lat lat - area area - mask mask - - - time time - lon lon - lat lat - area area - mask mask - - - time time - LONGXY lon - LATIXY lat - area area - mask mask - - - time time - lonc lon - latc lat - area area - mask mask - - - - - - char - streams - streams_file - Stream data file directory. - - null - $DIN_LOC_ROOT/atm/datm7/CLM1PT_data/mexicocityMEX.c080124 - $DIN_LOC_ROOT/atm/datm7/CLM1PT_data/vancouverCAN.c080124 - $DIN_LOC_ROOT/atm/datm7/CLM1PT_data/urbanc_alpha.c080416 - $DIN_LOC_ROOT_CLMFORC/$CLM_USRDAT_NAME/CLM1PT_data - $DATM_CPLHIST_DIR - $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.Qian.T62.c080727/Solar6Hrly - $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.Qian.T62.c080727/Precip6Hrly - $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.Qian.T62.c080727/TmpPrsHumWnd3Hrly - $DIN_LOC_ROOT/atm/datm7/atm_forcing_iso.datm7.Qian.T62.c080727/Solar6Hrly - $DIN_LOC_ROOT/atm/datm7/atm_forcing_iso.datm7.Qian.T62.c080727/Precip6Hrly - $DIN_LOC_ROOT/atm/datm7/atm_forcing_iso.datm7.Qian.T62.c080727/TmpPrsHumWnd3Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.V4.c130305/Solar6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.V4.c130305/Precip6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.V4.c130305/TPHWL6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.v7.c160715/Solar6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.v7.c160715/Precip6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.v7.c160715/TPHWL6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.GSWP3.0.5d.v1.c170516/Solar - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.GSWP3.0.5d.v1.c170516/Precip - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.GSWP3.0.5d.v1.c170516/TPHWL - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.V5.c140715/Solar6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.V5.c140715/Precip6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.cruncep_qianFill.0.5d.V5.c140715/TPHWL6Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.GSWP3.0.5d.v1.c170516/Solar3Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.GSWP3.0.5d.v1.c170516/Precip3Hrly - $DIN_LOC_ROOT_CLMFORC/atm_forcing.datm7.GSWP3.0.5d.v1.c170516/TPHWL3Hrly - $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.NLDAS2.0.125d.v1/Solar - $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.NLDAS2.0.125d.v1/Precip - $DIN_LOC_ROOT/atm/datm7/atm_forcing.datm7.NLDAS2.0.125d.v1/TPQWL - $DIN_LOC_ROOT/atm/datm7/NYF - $DIN_LOC_ROOT/atm/datm7/CORE2 - $DIN_LOC_ROOT/ocn/iaf - $DIN_LOC_ROOT/ocn/jra55/v1.3_noleap - $DIN_LOC_ROOT/ocn/jra55/v1.3_noleap - $DIN_LOC_ROOT/ocn/jra55/v1.3_noleap - $DIN_LOC_ROOT/ocn/jra55/v1.3_noleap - $DIN_LOC_ROOT/ocn/jra55/v1.3_noleap - $DIN_LOC_ROOT/ocn/jra55/v1.3_noleap - $DIN_LOC_ROOT/ocn/jra55/v1.3_noleap - $DIN_LOC_ROOT/ocn/jra55/v1.3_noleap - $DIN_LOC_ROOT/atm/datm7/CORE2 - $DIN_LOC_ROOT/atm/datm7/CO2 - $DIN_LOC_ROOT/atm/datm7/bias_correction/precip/gpcp/qian - $DIN_LOC_ROOT/atm/datm7/clm_output/cruncep_precip_1deg/gpcp_1deg_bias_correction - $DIN_LOC_ROOT/atm/datm7/bias_correction/precip/cmap/cruncep - $DIN_LOC_ROOT/atm/datm7/anomaly_forcing - $DATM_CPLHIST_DIR - null - $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero - $DIN_LOC_ROOT/atm/datm7/topo_forcing - $DATM_CPLHIST_DIR - - - - - char - streams - streams_file - Stream data file path(s). - - null - clm1pt-1993-12.nc - clm1pt-1992-08.nc - - clm1pt-0001-08.nc - clm1pt-0001-09.nc - clm1pt-0001-10.nc - clm1pt-0001-11.nc - clm1pt-0001-12.nc - clm1pt-0002-01.nc - clm1pt-0002-02.nc - clm1pt-0002-03.nc - clm1pt-0002-04.nc - clm1pt-0002-05.nc - clm1pt-0002-06.nc - clm1pt-0002-07.nc - clm1pt-0002-08.nc - clm1pt-0002-09.nc - clm1pt-0002-10.nc - clm1pt-0002-11.nc - - %ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x1hi.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x1h.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x1d.%ym.nc - $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc - clmforc.Qian.c2006.T62.Solr.%ym.nc - clmforc.Qian.c2006.T62.Prec.%ym.nc - clmforc.Qian.c2006.T62.TPQW.%ym.nc - clmforc.Qian.c2006.T62.Solr.%ym.nc - clmforc.Qian.c2006.T62.Prec.%ym.nc - clmforc.Qian.c2006.T62.TPQW.%ym.nc - clmforc.cruncep.V4.c2011.0.5d.Solr.%ym.nc - clmforc.cruncep.V4.c2011.0.5d.Prec.%ym.nc - clmforc.cruncep.V4.c2011.0.5d.TPQWL.%ym.nc - clmforc.cruncep.V7.c2016.0.5d.Solr.%ym.nc - clmforc.cruncep.V7.c2016.0.5d.Prec.%ym.nc - clmforc.cruncep.V7.c2016.0.5d.TPQWL.%ym.nc - clmforc.GSWP3.c2011.0.5x0.5.Solr.%ym.nc - clmforc.GSWP3.c2011.0.5x0.5.Prec.%ym.nc - clmforc.GSWP3.c2011.0.5x0.5.TPQWL.%ym.nc - ctsmforc.NLDAS2.0.125d.v1.Solr.%ym.nc - ctsmforc.NLDAS2.0.125d.v1.Prec.%ym.nc - ctsmforc.NLDAS2.0.125d.v1.TPQWL.%ym.nc - nyf.giss.T62.051007.nc - nyf.gxgxs.T62.051007.nc - nyf.ncep.T62.050923.nc - ncep.SOFS.2010.nc - ncep.SOFS.2010.nc - SOFS.daily.03-09.2010.nc - SOFS.daily.03-09.2010.nc - SOFS.daily.03-09.2010.nc - SOFS.daily.03-09.2010.nc - SOFS.6hour.03-09.2010.nc - SOFS.6hour.03-09.2010.nc - SOFS.6hour.03-09.2010.nc - SOFS.6hour.03-09.2010.nc - - - - gcgcs.prec.T62.1948.nc - gcgcs.prec.T62.1949.nc - gcgcs.prec.T62.1950.nc - gcgcs.prec.T62.1951.nc - gcgcs.prec.T62.1952.nc - gcgcs.prec.T62.1953.nc - gcgcs.prec.T62.1954.nc - gcgcs.prec.T62.1955.nc - gcgcs.prec.T62.1956.nc - gcgcs.prec.T62.1957.nc - gcgcs.prec.T62.1958.nc - gcgcs.prec.T62.1959.nc - gcgcs.prec.T62.1960.nc - gcgcs.prec.T62.1961.nc - gcgcs.prec.T62.1962.nc - gcgcs.prec.T62.1963.nc - gcgcs.prec.T62.1964.nc - gcgcs.prec.T62.1965.nc - gcgcs.prec.T62.1966.nc - gcgcs.prec.T62.1967.nc - gcgcs.prec.T62.1968.nc - gcgcs.prec.T62.1969.nc - gcgcs.prec.T62.1970.nc - gcgcs.prec.T62.1971.nc - gcgcs.prec.T62.1972.nc - gcgcs.prec.T62.1973.nc - gcgcs.prec.T62.1974.nc - gcgcs.prec.T62.1975.nc - gcgcs.prec.T62.1976.nc - gcgcs.prec.T62.1977.nc - gcgcs.prec.T62.1978.nc - gcgcs.prec.T62.1979.nc - gcgcs.prec.T62.1980.nc - gcgcs.prec.T62.1981.nc - gcgcs.prec.T62.1982.nc - gcgcs.prec.T62.1983.nc - gcgcs.prec.T62.1984.nc - gcgcs.prec.T62.1985.nc - gcgcs.prec.T62.1986.nc - gcgcs.prec.T62.1987.nc - gcgcs.prec.T62.1988.nc - gcgcs.prec.T62.1989.nc - gcgcs.prec.T62.1990.nc - gcgcs.prec.T62.1991.nc - gcgcs.prec.T62.1992.nc - gcgcs.prec.T62.1993.nc - gcgcs.prec.T62.1994.nc - gcgcs.prec.T62.1995.nc - gcgcs.prec.T62.1996.nc - gcgcs.prec.T62.1997.nc - gcgcs.prec.T62.1998.nc - gcgcs.prec.T62.1999.nc - gcgcs.prec.T62.2000.nc - gcgcs.prec.T62.2001.nc - gcgcs.prec.T62.2002.nc - gcgcs.prec.T62.2003.nc - gcgcs.prec.T62.2004.nc - gcgcs.prec.T62.2005.nc - gcgcs.prec.T62.2006.nc - gcgcs.prec.T62.2007.nc - gcgcs.prec.T62.2008.20120412.nc - gcgcs.prec.T62.2009.20120412.nc - - - giss.lwdn.T62.1948.nc - giss.lwdn.T62.1949.nc - giss.lwdn.T62.1950.nc - giss.lwdn.T62.1951.nc - giss.lwdn.T62.1952.nc - giss.lwdn.T62.1953.nc - giss.lwdn.T62.1954.nc - giss.lwdn.T62.1955.nc - giss.lwdn.T62.1956.nc - giss.lwdn.T62.1957.nc - giss.lwdn.T62.1958.nc - giss.lwdn.T62.1959.nc - giss.lwdn.T62.1960.nc - giss.lwdn.T62.1961.nc - giss.lwdn.T62.1962.nc - giss.lwdn.T62.1963.nc - giss.lwdn.T62.1964.nc - giss.lwdn.T62.1965.nc - giss.lwdn.T62.1966.nc - giss.lwdn.T62.1967.nc - giss.lwdn.T62.1968.nc - giss.lwdn.T62.1969.nc - giss.lwdn.T62.1970.nc - giss.lwdn.T62.1971.nc - giss.lwdn.T62.1972.nc - giss.lwdn.T62.1973.nc - giss.lwdn.T62.1974.nc - giss.lwdn.T62.1975.nc - giss.lwdn.T62.1976.nc - giss.lwdn.T62.1977.nc - giss.lwdn.T62.1978.nc - giss.lwdn.T62.1979.nc - giss.lwdn.T62.1980.nc - giss.lwdn.T62.1981.nc - giss.lwdn.T62.1982.nc - giss.lwdn.T62.1983.nc - giss.lwdn.T62.1984.nc - giss.lwdn.T62.1985.nc - giss.lwdn.T62.1986.nc - giss.lwdn.T62.1987.nc - giss.lwdn.T62.1988.nc - giss.lwdn.T62.1989.nc - giss.lwdn.T62.1990.nc - giss.lwdn.T62.1991.nc - giss.lwdn.T62.1992.nc - giss.lwdn.T62.1993.nc - giss.lwdn.T62.1994.nc - giss.lwdn.T62.1995.nc - giss.lwdn.T62.1996.nc - giss.lwdn.T62.1997.nc - giss.lwdn.T62.1998.nc - giss.lwdn.T62.1999.nc - giss.lwdn.T62.2000.nc - giss.lwdn.T62.2001.nc - giss.lwdn.T62.2002.nc - giss.lwdn.T62.2003.nc - giss.lwdn.T62.2004.nc - giss.lwdn.T62.2005.nc - giss.lwdn.T62.2006.nc - giss.lwdn.T62.2007.nc - giss.lwdn.T62.2008.20120412.nc - giss.lwdn.T62.2009.20120412.nc - - - giss.swdn.T62.1948.nc - giss.swdn.T62.1949.nc - giss.swdn.T62.1950.nc - giss.swdn.T62.1951.nc - giss.swdn.T62.1952.nc - giss.swdn.T62.1953.nc - giss.swdn.T62.1954.nc - giss.swdn.T62.1955.nc - giss.swdn.T62.1956.nc - giss.swdn.T62.1957.nc - giss.swdn.T62.1958.nc - giss.swdn.T62.1959.nc - giss.swdn.T62.1960.nc - giss.swdn.T62.1961.nc - giss.swdn.T62.1962.nc - giss.swdn.T62.1963.nc - giss.swdn.T62.1964.nc - giss.swdn.T62.1965.nc - giss.swdn.T62.1966.nc - giss.swdn.T62.1967.nc - giss.swdn.T62.1968.nc - giss.swdn.T62.1969.nc - giss.swdn.T62.1970.nc - giss.swdn.T62.1971.nc - giss.swdn.T62.1972.nc - giss.swdn.T62.1973.nc - giss.swdn.T62.1974.nc - giss.swdn.T62.1975.nc - giss.swdn.T62.1976.nc - giss.swdn.T62.1977.nc - giss.swdn.T62.1978.nc - giss.swdn.T62.1979.nc - giss.swdn.T62.1980.nc - giss.swdn.T62.1981.nc - giss.swdn.T62.1982.nc - giss.swdn.T62.1983.nc - giss.swdn.T62.1984.nc - giss.swdn.T62.1985.nc - giss.swdn.T62.1986.nc - giss.swdn.T62.1987.nc - giss.swdn.T62.1988.nc - giss.swdn.T62.1989.nc - giss.swdn.T62.1990.nc - giss.swdn.T62.1991.nc - giss.swdn.T62.1992.nc - giss.swdn.T62.1993.nc - giss.swdn.T62.1994.nc - giss.swdn.T62.1995.nc - giss.swdn.T62.1996.nc - giss.swdn.T62.1997.nc - giss.swdn.T62.1998.nc - giss.swdn.T62.1999.nc - giss.swdn.T62.2000.nc - giss.swdn.T62.2001.nc - giss.swdn.T62.2002.nc - giss.swdn.T62.2003.nc - giss.swdn.T62.2004.nc - giss.swdn.T62.2005.nc - giss.swdn.T62.2006.nc - giss.swdn.T62.2007.nc - giss.swdn.T62.2008.20120412.nc - giss.swdn.T62.2009.20120412.nc - - - giss.swup.T62.1948.nc - giss.swup.T62.1949.nc - giss.swup.T62.1950.nc - giss.swup.T62.1951.nc - giss.swup.T62.1952.nc - giss.swup.T62.1953.nc - giss.swup.T62.1954.nc - giss.swup.T62.1955.nc - giss.swup.T62.1956.nc - giss.swup.T62.1957.nc - giss.swup.T62.1958.nc - giss.swup.T62.1959.nc - giss.swup.T62.1960.nc - giss.swup.T62.1961.nc - giss.swup.T62.1962.nc - giss.swup.T62.1963.nc - giss.swup.T62.1964.nc - giss.swup.T62.1965.nc - giss.swup.T62.1966.nc - giss.swup.T62.1967.nc - giss.swup.T62.1968.nc - giss.swup.T62.1969.nc - giss.swup.T62.1970.nc - giss.swup.T62.1971.nc - giss.swup.T62.1972.nc - giss.swup.T62.1973.nc - giss.swup.T62.1974.nc - giss.swup.T62.1975.nc - giss.swup.T62.1976.nc - giss.swup.T62.1977.nc - giss.swup.T62.1978.nc - giss.swup.T62.1979.nc - giss.swup.T62.1980.nc - giss.swup.T62.1981.nc - giss.swup.T62.1982.nc - giss.swup.T62.1983.nc - giss.swup.T62.1984.nc - giss.swup.T62.1985.nc - giss.swup.T62.1986.nc - giss.swup.T62.1987.nc - giss.swup.T62.1988.nc - giss.swup.T62.1989.nc - giss.swup.T62.1990.nc - giss.swup.T62.1991.nc - giss.swup.T62.1992.nc - giss.swup.T62.1993.nc - giss.swup.T62.1994.nc - giss.swup.T62.1995.nc - giss.swup.T62.1996.nc - giss.swup.T62.1997.nc - giss.swup.T62.1998.nc - giss.swup.T62.1999.nc - giss.swup.T62.2000.nc - giss.swup.T62.2001.nc - giss.swup.T62.2002.nc - giss.swup.T62.2003.nc - giss.swup.T62.2004.nc - giss.swup.T62.2005.nc - giss.swup.T62.2006.nc - giss.swup.T62.2007.nc - giss.swup.T62.2008.20120412.nc - giss.swup.T62.2009.20120412.nc - - - ncep.dn10.T62.1948.nc - ncep.dn10.T62.1949.nc - ncep.dn10.T62.1950.nc - ncep.dn10.T62.1951.nc - ncep.dn10.T62.1952.nc - ncep.dn10.T62.1953.nc - ncep.dn10.T62.1954.nc - ncep.dn10.T62.1955.nc - ncep.dn10.T62.1956.nc - ncep.dn10.T62.1957.nc - ncep.dn10.T62.1958.nc - ncep.dn10.T62.1959.nc - ncep.dn10.T62.1960.nc - ncep.dn10.T62.1961.nc - ncep.dn10.T62.1962.nc - ncep.dn10.T62.1963.nc - ncep.dn10.T62.1964.nc - ncep.dn10.T62.1965.nc - ncep.dn10.T62.1966.nc - ncep.dn10.T62.1967.nc - ncep.dn10.T62.1968.nc - ncep.dn10.T62.1969.nc - ncep.dn10.T62.1970.nc - ncep.dn10.T62.1971.nc - ncep.dn10.T62.1972.nc - ncep.dn10.T62.1973.nc - ncep.dn10.T62.1974.nc - ncep.dn10.T62.1975.nc - ncep.dn10.T62.1976.nc - ncep.dn10.T62.1977.nc - ncep.dn10.T62.1978.nc - ncep.dn10.T62.1979.nc - ncep.dn10.T62.1980.nc - ncep.dn10.T62.1981.nc - ncep.dn10.T62.1982.nc - ncep.dn10.T62.1983.nc - ncep.dn10.T62.1984.nc - ncep.dn10.T62.1985.nc - ncep.dn10.T62.1986.nc - ncep.dn10.T62.1987.nc - ncep.dn10.T62.1988.nc - ncep.dn10.T62.1989.nc - ncep.dn10.T62.1990.nc - ncep.dn10.T62.1991.nc - ncep.dn10.T62.1992.nc - ncep.dn10.T62.1993.nc - ncep.dn10.T62.1994.nc - ncep.dn10.T62.1995.nc - ncep.dn10.T62.1996.nc - ncep.dn10.T62.1997.nc - ncep.dn10.T62.1998.nc - ncep.dn10.T62.1999.nc - ncep.dn10.T62.2000.nc - ncep.dn10.T62.2001.nc - ncep.dn10.T62.2002.nc - ncep.dn10.T62.2003.nc - ncep.dn10.T62.2004.nc - ncep.dn10.T62.2005.20120414.nc - ncep.dn10.T62.2006.nc - ncep.dn10.T62.2007.nc - ncep.dn10.T62.2008.20120412.nc - ncep.dn10.T62.2009.20120412.nc - - - ncep.q_10.T62.1948.nc - ncep.q_10.T62.1949.nc - ncep.q_10.T62.1950.nc - ncep.q_10.T62.1951.nc - ncep.q_10.T62.1952.nc - ncep.q_10.T62.1953.nc - ncep.q_10.T62.1954.nc - ncep.q_10.T62.1955.nc - ncep.q_10.T62.1956.nc - ncep.q_10.T62.1957.nc - ncep.q_10.T62.1958.nc - ncep.q_10.T62.1959.nc - ncep.q_10.T62.1960.nc - ncep.q_10.T62.1961.nc - ncep.q_10.T62.1962.nc - ncep.q_10.T62.1963.nc - ncep.q_10.T62.1964.nc - ncep.q_10.T62.1965.nc - ncep.q_10.T62.1966.nc - ncep.q_10.T62.1967.nc - ncep.q_10.T62.1968.nc - ncep.q_10.T62.1969.nc - ncep.q_10.T62.1970.nc - ncep.q_10.T62.1971.nc - ncep.q_10.T62.1972.nc - ncep.q_10.T62.1973.nc - ncep.q_10.T62.1974.nc - ncep.q_10.T62.1975.nc - ncep.q_10.T62.1976.nc - ncep.q_10.T62.1977.nc - ncep.q_10.T62.1978.nc - ncep.q_10.T62.1979.nc - ncep.q_10.T62.1980.nc - ncep.q_10.T62.1981.nc - ncep.q_10.T62.1982.nc - ncep.q_10.T62.1983.nc - ncep.q_10.T62.1984.nc - ncep.q_10.T62.1985.nc - ncep.q_10.T62.1986.nc - ncep.q_10.T62.1987.nc - ncep.q_10.T62.1988.nc - ncep.q_10.T62.1989.nc - ncep.q_10.T62.1990.nc - ncep.q_10.T62.1991.nc - ncep.q_10.T62.1992.nc - ncep.q_10.T62.1993.nc - ncep.q_10.T62.1994.nc - ncep.q_10.T62.1995.nc - ncep.q_10.T62.1996.nc - ncep.q_10.T62.1997.nc - ncep.q_10.T62.1998.nc - ncep.q_10.T62.1999.nc - ncep.q_10.T62.2000.nc - ncep.q_10.T62.2001.nc - ncep.q_10.T62.2002.nc - ncep.q_10.T62.2003.nc - ncep.q_10.T62.2004.nc - ncep.q_10.T62.2005.20120414.nc - ncep.q_10.T62.2006.nc - ncep.q_10.T62.2007.nc - ncep.q_10.T62.2008.20120412.nc - ncep.q_10.T62.2009.20120412.nc - - - ncep.slp_.T62.1948.nc - ncep.slp_.T62.1949.nc - ncep.slp_.T62.1950.nc - ncep.slp_.T62.1951.nc - ncep.slp_.T62.1952.nc - ncep.slp_.T62.1953.nc - ncep.slp_.T62.1954.nc - ncep.slp_.T62.1955.nc - ncep.slp_.T62.1956.nc - ncep.slp_.T62.1957.nc - ncep.slp_.T62.1958.nc - ncep.slp_.T62.1959.nc - ncep.slp_.T62.1960.nc - ncep.slp_.T62.1961.nc - ncep.slp_.T62.1962.nc - ncep.slp_.T62.1963.nc - ncep.slp_.T62.1964.nc - ncep.slp_.T62.1965.nc - ncep.slp_.T62.1966.nc - ncep.slp_.T62.1967.nc - ncep.slp_.T62.1968.nc - ncep.slp_.T62.1969.nc - ncep.slp_.T62.1970.nc - ncep.slp_.T62.1971.nc - ncep.slp_.T62.1972.nc - ncep.slp_.T62.1973.nc - ncep.slp_.T62.1974.nc - ncep.slp_.T62.1975.nc - ncep.slp_.T62.1976.nc - ncep.slp_.T62.1977.nc - ncep.slp_.T62.1978.nc - ncep.slp_.T62.1979.nc - ncep.slp_.T62.1980.nc - ncep.slp_.T62.1981.nc - ncep.slp_.T62.1982.nc - ncep.slp_.T62.1983.nc - ncep.slp_.T62.1984.nc - ncep.slp_.T62.1985.nc - ncep.slp_.T62.1986.nc - ncep.slp_.T62.1987.nc - ncep.slp_.T62.1988.nc - ncep.slp_.T62.1989.nc - ncep.slp_.T62.1990.nc - ncep.slp_.T62.1991.nc - ncep.slp_.T62.1992.nc - ncep.slp_.T62.1993.nc - ncep.slp_.T62.1994.nc - ncep.slp_.T62.1995.nc - ncep.slp_.T62.1996.nc - ncep.slp_.T62.1997.nc - ncep.slp_.T62.1998.nc - ncep.slp_.T62.1999.nc - ncep.slp_.T62.2000.nc - ncep.slp_.T62.2001.nc - ncep.slp_.T62.2002.nc - ncep.slp_.T62.2003.nc - ncep.slp_.T62.2004.nc - ncep.slp_.T62.2005.20120414.nc - ncep.slp_.T62.2006.nc - ncep.slp_.T62.2007.nc - ncep.slp_.T62.2008.20120412.nc - ncep.slp_.T62.2009.20120412.nc - - - ncep.t_10.T62.1948.nc - ncep.t_10.T62.1949.nc - ncep.t_10.T62.1950.nc - ncep.t_10.T62.1951.nc - ncep.t_10.T62.1952.nc - ncep.t_10.T62.1953.nc - ncep.t_10.T62.1954.nc - ncep.t_10.T62.1955.nc - ncep.t_10.T62.1956.nc - ncep.t_10.T62.1957.nc - ncep.t_10.T62.1958.nc - ncep.t_10.T62.1959.nc - ncep.t_10.T62.1960.nc - ncep.t_10.T62.1961.nc - ncep.t_10.T62.1962.nc - ncep.t_10.T62.1963.nc - ncep.t_10.T62.1964.nc - ncep.t_10.T62.1965.nc - ncep.t_10.T62.1966.nc - ncep.t_10.T62.1967.nc - ncep.t_10.T62.1968.nc - ncep.t_10.T62.1969.nc - ncep.t_10.T62.1970.nc - ncep.t_10.T62.1971.nc - ncep.t_10.T62.1972.nc - ncep.t_10.T62.1973.nc - ncep.t_10.T62.1974.nc - ncep.t_10.T62.1975.nc - ncep.t_10.T62.1976.nc - ncep.t_10.T62.1977.nc - ncep.t_10.T62.1978.nc - ncep.t_10.T62.1979.nc - ncep.t_10.T62.1980.nc - ncep.t_10.T62.1981.nc - ncep.t_10.T62.1982.nc - ncep.t_10.T62.1983.nc - ncep.t_10.T62.1984.nc - ncep.t_10.T62.1985.nc - ncep.t_10.T62.1986.nc - ncep.t_10.T62.1987.nc - ncep.t_10.T62.1988.nc - ncep.t_10.T62.1989.nc - ncep.t_10.T62.1990.nc - ncep.t_10.T62.1991.nc - ncep.t_10.T62.1992.nc - ncep.t_10.T62.1993.nc - ncep.t_10.T62.1994.nc - ncep.t_10.T62.1995.nc - ncep.t_10.T62.1996.nc - ncep.t_10.T62.1997.nc - ncep.t_10.T62.1998.nc - ncep.t_10.T62.1999.nc - ncep.t_10.T62.2000.nc - ncep.t_10.T62.2001.nc - ncep.t_10.T62.2002.nc - ncep.t_10.T62.2003.nc - ncep.t_10.T62.2004.nc - ncep.t_10.T62.2005.20120414.nc - ncep.t_10.T62.2006.nc - ncep.t_10.T62.2007.nc - ncep.t_10.T62.2008.20120412.nc - ncep.t_10.T62.2009.20120412.nc - - - ncep.u_10.T62.1948.nc - ncep.u_10.T62.1949.nc - ncep.u_10.T62.1950.nc - ncep.u_10.T62.1951.nc - ncep.u_10.T62.1952.nc - ncep.u_10.T62.1953.nc - ncep.u_10.T62.1954.nc - ncep.u_10.T62.1955.nc - ncep.u_10.T62.1956.nc - ncep.u_10.T62.1957.nc - ncep.u_10.T62.1958.nc - ncep.u_10.T62.1959.nc - ncep.u_10.T62.1960.nc - ncep.u_10.T62.1961.nc - ncep.u_10.T62.1962.nc - ncep.u_10.T62.1963.nc - ncep.u_10.T62.1964.nc - ncep.u_10.T62.1965.nc - ncep.u_10.T62.1966.nc - ncep.u_10.T62.1967.nc - ncep.u_10.T62.1968.nc - ncep.u_10.T62.1969.nc - ncep.u_10.T62.1970.nc - ncep.u_10.T62.1971.nc - ncep.u_10.T62.1972.nc - ncep.u_10.T62.1973.nc - ncep.u_10.T62.1974.nc - ncep.u_10.T62.1975.nc - ncep.u_10.T62.1976.nc - ncep.u_10.T62.1977.nc - ncep.u_10.T62.1978.nc - ncep.u_10.T62.1979.nc - ncep.u_10.T62.1980.nc - ncep.u_10.T62.1981.nc - ncep.u_10.T62.1982.nc - ncep.u_10.T62.1983.nc - ncep.u_10.T62.1984.nc - ncep.u_10.T62.1985.nc - ncep.u_10.T62.1986.nc - ncep.u_10.T62.1987.nc - ncep.u_10.T62.1988.nc - ncep.u_10.T62.1989.nc - ncep.u_10.T62.1990.nc - ncep.u_10.T62.1991.nc - ncep.u_10.T62.1992.nc - ncep.u_10.T62.1993.nc - ncep.u_10.T62.1994.nc - ncep.u_10.T62.1995.nc - ncep.u_10.T62.1996.nc - ncep.u_10.T62.1997.nc - ncep.u_10.T62.1998.nc - ncep.u_10.T62.1999.nc - ncep.u_10.T62.2000.nc - ncep.u_10.T62.2001.nc - ncep.u_10.T62.2002.nc - ncep.u_10.T62.2003.nc - ncep.u_10.T62.2004.nc - ncep.u_10.T62.2005.20120414.nc - ncep.u_10.T62.2006.nc - ncep.u_10.T62.2007.nc - ncep.u_10.T62.2008.20120412.nc - ncep.u_10.T62.2009.20120412.nc - - - ncep.v_10.T62.1948.nc - ncep.v_10.T62.1949.nc - ncep.v_10.T62.1950.nc - ncep.v_10.T62.1951.nc - ncep.v_10.T62.1952.nc - ncep.v_10.T62.1953.nc - ncep.v_10.T62.1954.nc - ncep.v_10.T62.1955.nc - ncep.v_10.T62.1956.nc - ncep.v_10.T62.1957.nc - ncep.v_10.T62.1958.nc - ncep.v_10.T62.1959.nc - ncep.v_10.T62.1960.nc - ncep.v_10.T62.1961.nc - ncep.v_10.T62.1962.nc - ncep.v_10.T62.1963.nc - ncep.v_10.T62.1964.nc - ncep.v_10.T62.1965.nc - ncep.v_10.T62.1966.nc - ncep.v_10.T62.1967.nc - ncep.v_10.T62.1968.nc - ncep.v_10.T62.1969.nc - ncep.v_10.T62.1970.nc - ncep.v_10.T62.1971.nc - ncep.v_10.T62.1972.nc - ncep.v_10.T62.1973.nc - ncep.v_10.T62.1974.nc - ncep.v_10.T62.1975.nc - ncep.v_10.T62.1976.nc - ncep.v_10.T62.1977.nc - ncep.v_10.T62.1978.nc - ncep.v_10.T62.1979.nc - ncep.v_10.T62.1980.nc - ncep.v_10.T62.1981.nc - ncep.v_10.T62.1982.nc - ncep.v_10.T62.1983.nc - ncep.v_10.T62.1984.nc - ncep.v_10.T62.1985.nc - ncep.v_10.T62.1986.nc - ncep.v_10.T62.1987.nc - ncep.v_10.T62.1988.nc - ncep.v_10.T62.1989.nc - ncep.v_10.T62.1990.nc - ncep.v_10.T62.1991.nc - ncep.v_10.T62.1992.nc - ncep.v_10.T62.1993.nc - ncep.v_10.T62.1994.nc - ncep.v_10.T62.1995.nc - ncep.v_10.T62.1996.nc - ncep.v_10.T62.1997.nc - ncep.v_10.T62.1998.nc - ncep.v_10.T62.1999.nc - ncep.v_10.T62.2000.nc - ncep.v_10.T62.2001.nc - ncep.v_10.T62.2002.nc - ncep.v_10.T62.2003.nc - ncep.v_10.T62.2004.nc - ncep.v_10.T62.2005.20120414.nc - ncep.v_10.T62.2006.nc - ncep.v_10.T62.2007.nc - ncep.v_10.T62.2008.20120412.nc - ncep.v_10.T62.2009.20120412.nc - - CORE2.t_10.ArcFactor.T62.1997-2004.nc - - JRA.v1.3.prec.TL319.1958.171019.nc - JRA.v1.3.prec.TL319.1959.171019.nc - JRA.v1.3.prec.TL319.1960.171019.nc - JRA.v1.3.prec.TL319.1961.171019.nc - JRA.v1.3.prec.TL319.1962.171019.nc - JRA.v1.3.prec.TL319.1963.171019.nc - JRA.v1.3.prec.TL319.1964.171019.nc - JRA.v1.3.prec.TL319.1965.171019.nc - JRA.v1.3.prec.TL319.1966.171019.nc - JRA.v1.3.prec.TL319.1967.171019.nc - JRA.v1.3.prec.TL319.1968.171019.nc - JRA.v1.3.prec.TL319.1969.171019.nc - JRA.v1.3.prec.TL319.1970.171019.nc - JRA.v1.3.prec.TL319.1971.171019.nc - JRA.v1.3.prec.TL319.1972.171019.nc - JRA.v1.3.prec.TL319.1973.171019.nc - JRA.v1.3.prec.TL319.1974.171019.nc - JRA.v1.3.prec.TL319.1975.171019.nc - JRA.v1.3.prec.TL319.1976.171019.nc - JRA.v1.3.prec.TL319.1977.171019.nc - JRA.v1.3.prec.TL319.1978.171019.nc - JRA.v1.3.prec.TL319.1979.171019.nc - JRA.v1.3.prec.TL319.1980.171019.nc - JRA.v1.3.prec.TL319.1981.171019.nc - JRA.v1.3.prec.TL319.1982.171019.nc - JRA.v1.3.prec.TL319.1983.171019.nc - JRA.v1.3.prec.TL319.1984.171019.nc - JRA.v1.3.prec.TL319.1985.171019.nc - JRA.v1.3.prec.TL319.1986.171019.nc - JRA.v1.3.prec.TL319.1987.171019.nc - JRA.v1.3.prec.TL319.1988.171019.nc - JRA.v1.3.prec.TL319.1989.171019.nc - JRA.v1.3.prec.TL319.1990.171019.nc - JRA.v1.3.prec.TL319.1991.171019.nc - JRA.v1.3.prec.TL319.1992.171019.nc - JRA.v1.3.prec.TL319.1993.171019.nc - JRA.v1.3.prec.TL319.1994.171019.nc - JRA.v1.3.prec.TL319.1995.171019.nc - JRA.v1.3.prec.TL319.1996.171019.nc - JRA.v1.3.prec.TL319.1997.171019.nc - JRA.v1.3.prec.TL319.1998.171019.nc - JRA.v1.3.prec.TL319.1999.171019.nc - JRA.v1.3.prec.TL319.2000.171019.nc - JRA.v1.3.prec.TL319.2001.171019.nc - JRA.v1.3.prec.TL319.2002.171019.nc - JRA.v1.3.prec.TL319.2003.171019.nc - JRA.v1.3.prec.TL319.2004.171019.nc - JRA.v1.3.prec.TL319.2005.171019.nc - JRA.v1.3.prec.TL319.2006.171019.nc - JRA.v1.3.prec.TL319.2007.171019.nc - JRA.v1.3.prec.TL319.2008.171019.nc - JRA.v1.3.prec.TL319.2009.171019.nc - JRA.v1.3.prec.TL319.2010.171019.nc - JRA.v1.3.prec.TL319.2011.171019.nc - JRA.v1.3.prec.TL319.2012.171019.nc - JRA.v1.3.prec.TL319.2013.171019.nc - JRA.v1.3.prec.TL319.2014.171019.nc - JRA.v1.3.prec.TL319.2015.171019.nc - JRA.v1.3.prec.TL319.2016.171019.nc - - - JRA.v1.3.lwdn.TL319.1958.171019.nc - JRA.v1.3.lwdn.TL319.1959.171019.nc - JRA.v1.3.lwdn.TL319.1960.171019.nc - JRA.v1.3.lwdn.TL319.1961.171019.nc - JRA.v1.3.lwdn.TL319.1962.171019.nc - JRA.v1.3.lwdn.TL319.1963.171019.nc - JRA.v1.3.lwdn.TL319.1964.171019.nc - JRA.v1.3.lwdn.TL319.1965.171019.nc - JRA.v1.3.lwdn.TL319.1966.171019.nc - JRA.v1.3.lwdn.TL319.1967.171019.nc - JRA.v1.3.lwdn.TL319.1968.171019.nc - JRA.v1.3.lwdn.TL319.1969.171019.nc - JRA.v1.3.lwdn.TL319.1970.171019.nc - JRA.v1.3.lwdn.TL319.1971.171019.nc - JRA.v1.3.lwdn.TL319.1972.171019.nc - JRA.v1.3.lwdn.TL319.1973.171019.nc - JRA.v1.3.lwdn.TL319.1974.171019.nc - JRA.v1.3.lwdn.TL319.1975.171019.nc - JRA.v1.3.lwdn.TL319.1976.171019.nc - JRA.v1.3.lwdn.TL319.1977.171019.nc - JRA.v1.3.lwdn.TL319.1978.171019.nc - JRA.v1.3.lwdn.TL319.1979.171019.nc - JRA.v1.3.lwdn.TL319.1980.171019.nc - JRA.v1.3.lwdn.TL319.1981.171019.nc - JRA.v1.3.lwdn.TL319.1982.171019.nc - JRA.v1.3.lwdn.TL319.1983.171019.nc - JRA.v1.3.lwdn.TL319.1984.171019.nc - JRA.v1.3.lwdn.TL319.1985.171019.nc - JRA.v1.3.lwdn.TL319.1986.171019.nc - JRA.v1.3.lwdn.TL319.1987.171019.nc - JRA.v1.3.lwdn.TL319.1988.171019.nc - JRA.v1.3.lwdn.TL319.1989.171019.nc - JRA.v1.3.lwdn.TL319.1990.171019.nc - JRA.v1.3.lwdn.TL319.1991.171019.nc - JRA.v1.3.lwdn.TL319.1992.171019.nc - JRA.v1.3.lwdn.TL319.1993.171019.nc - JRA.v1.3.lwdn.TL319.1994.171019.nc - JRA.v1.3.lwdn.TL319.1995.171019.nc - JRA.v1.3.lwdn.TL319.1996.171019.nc - JRA.v1.3.lwdn.TL319.1997.171019.nc - JRA.v1.3.lwdn.TL319.1998.171019.nc - JRA.v1.3.lwdn.TL319.1999.171019.nc - JRA.v1.3.lwdn.TL319.2000.171019.nc - JRA.v1.3.lwdn.TL319.2001.171019.nc - JRA.v1.3.lwdn.TL319.2002.171019.nc - JRA.v1.3.lwdn.TL319.2003.171019.nc - JRA.v1.3.lwdn.TL319.2004.171019.nc - JRA.v1.3.lwdn.TL319.2005.171019.nc - JRA.v1.3.lwdn.TL319.2006.171019.nc - JRA.v1.3.lwdn.TL319.2007.171019.nc - JRA.v1.3.lwdn.TL319.2008.171019.nc - JRA.v1.3.lwdn.TL319.2009.171019.nc - JRA.v1.3.lwdn.TL319.2010.171019.nc - JRA.v1.3.lwdn.TL319.2011.171019.nc - JRA.v1.3.lwdn.TL319.2012.171019.nc - JRA.v1.3.lwdn.TL319.2013.171019.nc - JRA.v1.3.lwdn.TL319.2014.171019.nc - JRA.v1.3.lwdn.TL319.2015.171019.nc - JRA.v1.3.lwdn.TL319.2016.171019.nc - - - JRA.v1.3.swdn.TL319.1958.171019.nc - JRA.v1.3.swdn.TL319.1959.171019.nc - JRA.v1.3.swdn.TL319.1960.171019.nc - JRA.v1.3.swdn.TL319.1961.171019.nc - JRA.v1.3.swdn.TL319.1962.171019.nc - JRA.v1.3.swdn.TL319.1963.171019.nc - JRA.v1.3.swdn.TL319.1964.171019.nc - JRA.v1.3.swdn.TL319.1965.171019.nc - JRA.v1.3.swdn.TL319.1966.171019.nc - JRA.v1.3.swdn.TL319.1967.171019.nc - JRA.v1.3.swdn.TL319.1968.171019.nc - JRA.v1.3.swdn.TL319.1969.171019.nc - JRA.v1.3.swdn.TL319.1970.171019.nc - JRA.v1.3.swdn.TL319.1971.171019.nc - JRA.v1.3.swdn.TL319.1972.171019.nc - JRA.v1.3.swdn.TL319.1973.171019.nc - JRA.v1.3.swdn.TL319.1974.171019.nc - JRA.v1.3.swdn.TL319.1975.171019.nc - JRA.v1.3.swdn.TL319.1976.171019.nc - JRA.v1.3.swdn.TL319.1977.171019.nc - JRA.v1.3.swdn.TL319.1978.171019.nc - JRA.v1.3.swdn.TL319.1979.171019.nc - JRA.v1.3.swdn.TL319.1980.171019.nc - JRA.v1.3.swdn.TL319.1981.171019.nc - JRA.v1.3.swdn.TL319.1982.171019.nc - JRA.v1.3.swdn.TL319.1983.171019.nc - JRA.v1.3.swdn.TL319.1984.171019.nc - JRA.v1.3.swdn.TL319.1985.171019.nc - JRA.v1.3.swdn.TL319.1986.171019.nc - JRA.v1.3.swdn.TL319.1987.171019.nc - JRA.v1.3.swdn.TL319.1988.171019.nc - JRA.v1.3.swdn.TL319.1989.171019.nc - JRA.v1.3.swdn.TL319.1990.171019.nc - JRA.v1.3.swdn.TL319.1991.171019.nc - JRA.v1.3.swdn.TL319.1992.171019.nc - JRA.v1.3.swdn.TL319.1993.171019.nc - JRA.v1.3.swdn.TL319.1994.171019.nc - JRA.v1.3.swdn.TL319.1995.171019.nc - JRA.v1.3.swdn.TL319.1996.171019.nc - JRA.v1.3.swdn.TL319.1997.171019.nc - JRA.v1.3.swdn.TL319.1998.171019.nc - JRA.v1.3.swdn.TL319.1999.171019.nc - JRA.v1.3.swdn.TL319.2000.171019.nc - JRA.v1.3.swdn.TL319.2001.171019.nc - JRA.v1.3.swdn.TL319.2002.171019.nc - JRA.v1.3.swdn.TL319.2003.171019.nc - JRA.v1.3.swdn.TL319.2004.171019.nc - JRA.v1.3.swdn.TL319.2005.171019.nc - JRA.v1.3.swdn.TL319.2006.171019.nc - JRA.v1.3.swdn.TL319.2007.171019.nc - JRA.v1.3.swdn.TL319.2008.171019.nc - JRA.v1.3.swdn.TL319.2009.171019.nc - JRA.v1.3.swdn.TL319.2010.171019.nc - JRA.v1.3.swdn.TL319.2011.171019.nc - JRA.v1.3.swdn.TL319.2012.171019.nc - JRA.v1.3.swdn.TL319.2013.171019.nc - JRA.v1.3.swdn.TL319.2014.171019.nc - JRA.v1.3.swdn.TL319.2015.171019.nc - JRA.v1.3.swdn.TL319.2016.171019.nc - - - JRA.v1.3.q_10.TL319.1958.171019.nc - JRA.v1.3.q_10.TL319.1959.171019.nc - JRA.v1.3.q_10.TL319.1960.171019.nc - JRA.v1.3.q_10.TL319.1961.171019.nc - JRA.v1.3.q_10.TL319.1962.171019.nc - JRA.v1.3.q_10.TL319.1963.171019.nc - JRA.v1.3.q_10.TL319.1964.171019.nc - JRA.v1.3.q_10.TL319.1965.171019.nc - JRA.v1.3.q_10.TL319.1966.171019.nc - JRA.v1.3.q_10.TL319.1967.171019.nc - JRA.v1.3.q_10.TL319.1968.171019.nc - JRA.v1.3.q_10.TL319.1969.171019.nc - JRA.v1.3.q_10.TL319.1970.171019.nc - JRA.v1.3.q_10.TL319.1971.171019.nc - JRA.v1.3.q_10.TL319.1972.171019.nc - JRA.v1.3.q_10.TL319.1973.171019.nc - JRA.v1.3.q_10.TL319.1974.171019.nc - JRA.v1.3.q_10.TL319.1975.171019.nc - JRA.v1.3.q_10.TL319.1976.171019.nc - JRA.v1.3.q_10.TL319.1977.171019.nc - JRA.v1.3.q_10.TL319.1978.171019.nc - JRA.v1.3.q_10.TL319.1979.171019.nc - JRA.v1.3.q_10.TL319.1980.171019.nc - JRA.v1.3.q_10.TL319.1981.171019.nc - JRA.v1.3.q_10.TL319.1982.171019.nc - JRA.v1.3.q_10.TL319.1983.171019.nc - JRA.v1.3.q_10.TL319.1984.171019.nc - JRA.v1.3.q_10.TL319.1985.171019.nc - JRA.v1.3.q_10.TL319.1986.171019.nc - JRA.v1.3.q_10.TL319.1987.171019.nc - JRA.v1.3.q_10.TL319.1988.171019.nc - JRA.v1.3.q_10.TL319.1989.171019.nc - JRA.v1.3.q_10.TL319.1990.171019.nc - JRA.v1.3.q_10.TL319.1991.171019.nc - JRA.v1.3.q_10.TL319.1992.171019.nc - JRA.v1.3.q_10.TL319.1993.171019.nc - JRA.v1.3.q_10.TL319.1994.171019.nc - JRA.v1.3.q_10.TL319.1995.171019.nc - JRA.v1.3.q_10.TL319.1996.171019.nc - JRA.v1.3.q_10.TL319.1997.171019.nc - JRA.v1.3.q_10.TL319.1998.171019.nc - JRA.v1.3.q_10.TL319.1999.171019.nc - JRA.v1.3.q_10.TL319.2000.171019.nc - JRA.v1.3.q_10.TL319.2001.171019.nc - JRA.v1.3.q_10.TL319.2002.171019.nc - JRA.v1.3.q_10.TL319.2003.171019.nc - JRA.v1.3.q_10.TL319.2004.171019.nc - JRA.v1.3.q_10.TL319.2005.171019.nc - JRA.v1.3.q_10.TL319.2006.171019.nc - JRA.v1.3.q_10.TL319.2007.171019.nc - JRA.v1.3.q_10.TL319.2008.171019.nc - JRA.v1.3.q_10.TL319.2009.171019.nc - JRA.v1.3.q_10.TL319.2010.171019.nc - JRA.v1.3.q_10.TL319.2011.171019.nc - JRA.v1.3.q_10.TL319.2012.171019.nc - JRA.v1.3.q_10.TL319.2013.171019.nc - JRA.v1.3.q_10.TL319.2014.171019.nc - JRA.v1.3.q_10.TL319.2015.171019.nc - JRA.v1.3.q_10.TL319.2016.171019.nc - - - JRA.v1.3.slp.TL319.1958.171019.nc - JRA.v1.3.slp.TL319.1959.171019.nc - JRA.v1.3.slp.TL319.1960.171019.nc - JRA.v1.3.slp.TL319.1961.171019.nc - JRA.v1.3.slp.TL319.1962.171019.nc - JRA.v1.3.slp.TL319.1963.171019.nc - JRA.v1.3.slp.TL319.1964.171019.nc - JRA.v1.3.slp.TL319.1965.171019.nc - JRA.v1.3.slp.TL319.1966.171019.nc - JRA.v1.3.slp.TL319.1967.171019.nc - JRA.v1.3.slp.TL319.1968.171019.nc - JRA.v1.3.slp.TL319.1969.171019.nc - JRA.v1.3.slp.TL319.1970.171019.nc - JRA.v1.3.slp.TL319.1971.171019.nc - JRA.v1.3.slp.TL319.1972.171019.nc - JRA.v1.3.slp.TL319.1973.171019.nc - JRA.v1.3.slp.TL319.1974.171019.nc - JRA.v1.3.slp.TL319.1975.171019.nc - JRA.v1.3.slp.TL319.1976.171019.nc - JRA.v1.3.slp.TL319.1977.171019.nc - JRA.v1.3.slp.TL319.1978.171019.nc - JRA.v1.3.slp.TL319.1979.171019.nc - JRA.v1.3.slp.TL319.1980.171019.nc - JRA.v1.3.slp.TL319.1981.171019.nc - JRA.v1.3.slp.TL319.1982.171019.nc - JRA.v1.3.slp.TL319.1983.171019.nc - JRA.v1.3.slp.TL319.1984.171019.nc - JRA.v1.3.slp.TL319.1985.171019.nc - JRA.v1.3.slp.TL319.1986.171019.nc - JRA.v1.3.slp.TL319.1987.171019.nc - JRA.v1.3.slp.TL319.1988.171019.nc - JRA.v1.3.slp.TL319.1989.171019.nc - JRA.v1.3.slp.TL319.1990.171019.nc - JRA.v1.3.slp.TL319.1991.171019.nc - JRA.v1.3.slp.TL319.1992.171019.nc - JRA.v1.3.slp.TL319.1993.171019.nc - JRA.v1.3.slp.TL319.1994.171019.nc - JRA.v1.3.slp.TL319.1995.171019.nc - JRA.v1.3.slp.TL319.1996.171019.nc - JRA.v1.3.slp.TL319.1997.171019.nc - JRA.v1.3.slp.TL319.1998.171019.nc - JRA.v1.3.slp.TL319.1999.171019.nc - JRA.v1.3.slp.TL319.2000.171019.nc - JRA.v1.3.slp.TL319.2001.171019.nc - JRA.v1.3.slp.TL319.2002.171019.nc - JRA.v1.3.slp.TL319.2003.171019.nc - JRA.v1.3.slp.TL319.2004.171019.nc - JRA.v1.3.slp.TL319.2005.171019.nc - JRA.v1.3.slp.TL319.2006.171019.nc - JRA.v1.3.slp.TL319.2007.171019.nc - JRA.v1.3.slp.TL319.2008.171019.nc - JRA.v1.3.slp.TL319.2009.171019.nc - JRA.v1.3.slp.TL319.2010.171019.nc - JRA.v1.3.slp.TL319.2011.171019.nc - JRA.v1.3.slp.TL319.2012.171019.nc - JRA.v1.3.slp.TL319.2013.171019.nc - JRA.v1.3.slp.TL319.2014.171019.nc - JRA.v1.3.slp.TL319.2015.171019.nc - JRA.v1.3.slp.TL319.2016.171019.nc - - - JRA.v1.3.t_10.TL319.1958.171019.nc - JRA.v1.3.t_10.TL319.1959.171019.nc - JRA.v1.3.t_10.TL319.1960.171019.nc - JRA.v1.3.t_10.TL319.1961.171019.nc - JRA.v1.3.t_10.TL319.1962.171019.nc - JRA.v1.3.t_10.TL319.1963.171019.nc - JRA.v1.3.t_10.TL319.1964.171019.nc - JRA.v1.3.t_10.TL319.1965.171019.nc - JRA.v1.3.t_10.TL319.1966.171019.nc - JRA.v1.3.t_10.TL319.1967.171019.nc - JRA.v1.3.t_10.TL319.1968.171019.nc - JRA.v1.3.t_10.TL319.1969.171019.nc - JRA.v1.3.t_10.TL319.1970.171019.nc - JRA.v1.3.t_10.TL319.1971.171019.nc - JRA.v1.3.t_10.TL319.1972.171019.nc - JRA.v1.3.t_10.TL319.1973.171019.nc - JRA.v1.3.t_10.TL319.1974.171019.nc - JRA.v1.3.t_10.TL319.1975.171019.nc - JRA.v1.3.t_10.TL319.1976.171019.nc - JRA.v1.3.t_10.TL319.1977.171019.nc - JRA.v1.3.t_10.TL319.1978.171019.nc - JRA.v1.3.t_10.TL319.1979.171019.nc - JRA.v1.3.t_10.TL319.1980.171019.nc - JRA.v1.3.t_10.TL319.1981.171019.nc - JRA.v1.3.t_10.TL319.1982.171019.nc - JRA.v1.3.t_10.TL319.1983.171019.nc - JRA.v1.3.t_10.TL319.1984.171019.nc - JRA.v1.3.t_10.TL319.1985.171019.nc - JRA.v1.3.t_10.TL319.1986.171019.nc - JRA.v1.3.t_10.TL319.1987.171019.nc - JRA.v1.3.t_10.TL319.1988.171019.nc - JRA.v1.3.t_10.TL319.1989.171019.nc - JRA.v1.3.t_10.TL319.1990.171019.nc - JRA.v1.3.t_10.TL319.1991.171019.nc - JRA.v1.3.t_10.TL319.1992.171019.nc - JRA.v1.3.t_10.TL319.1993.171019.nc - JRA.v1.3.t_10.TL319.1994.171019.nc - JRA.v1.3.t_10.TL319.1995.171019.nc - JRA.v1.3.t_10.TL319.1996.171019.nc - JRA.v1.3.t_10.TL319.1997.171019.nc - JRA.v1.3.t_10.TL319.1998.171019.nc - JRA.v1.3.t_10.TL319.1999.171019.nc - JRA.v1.3.t_10.TL319.2000.171019.nc - JRA.v1.3.t_10.TL319.2001.171019.nc - JRA.v1.3.t_10.TL319.2002.171019.nc - JRA.v1.3.t_10.TL319.2003.171019.nc - JRA.v1.3.t_10.TL319.2004.171019.nc - JRA.v1.3.t_10.TL319.2005.171019.nc - JRA.v1.3.t_10.TL319.2006.171019.nc - JRA.v1.3.t_10.TL319.2007.171019.nc - JRA.v1.3.t_10.TL319.2008.171019.nc - JRA.v1.3.t_10.TL319.2009.171019.nc - JRA.v1.3.t_10.TL319.2010.171019.nc - JRA.v1.3.t_10.TL319.2011.171019.nc - JRA.v1.3.t_10.TL319.2012.171019.nc - JRA.v1.3.t_10.TL319.2013.171019.nc - JRA.v1.3.t_10.TL319.2014.171019.nc - JRA.v1.3.t_10.TL319.2015.171019.nc - JRA.v1.3.t_10.TL319.2016.171019.nc - - - JRA.v1.3.u_10.TL319.1958.171019.nc - JRA.v1.3.u_10.TL319.1959.171019.nc - JRA.v1.3.u_10.TL319.1960.171019.nc - JRA.v1.3.u_10.TL319.1961.171019.nc - JRA.v1.3.u_10.TL319.1962.171019.nc - JRA.v1.3.u_10.TL319.1963.171019.nc - JRA.v1.3.u_10.TL319.1964.171019.nc - JRA.v1.3.u_10.TL319.1965.171019.nc - JRA.v1.3.u_10.TL319.1966.171019.nc - JRA.v1.3.u_10.TL319.1967.171019.nc - JRA.v1.3.u_10.TL319.1968.171019.nc - JRA.v1.3.u_10.TL319.1969.171019.nc - JRA.v1.3.u_10.TL319.1970.171019.nc - JRA.v1.3.u_10.TL319.1971.171019.nc - JRA.v1.3.u_10.TL319.1972.171019.nc - JRA.v1.3.u_10.TL319.1973.171019.nc - JRA.v1.3.u_10.TL319.1974.171019.nc - JRA.v1.3.u_10.TL319.1975.171019.nc - JRA.v1.3.u_10.TL319.1976.171019.nc - JRA.v1.3.u_10.TL319.1977.171019.nc - JRA.v1.3.u_10.TL319.1978.171019.nc - JRA.v1.3.u_10.TL319.1979.171019.nc - JRA.v1.3.u_10.TL319.1980.171019.nc - JRA.v1.3.u_10.TL319.1981.171019.nc - JRA.v1.3.u_10.TL319.1982.171019.nc - JRA.v1.3.u_10.TL319.1983.171019.nc - JRA.v1.3.u_10.TL319.1984.171019.nc - JRA.v1.3.u_10.TL319.1985.171019.nc - JRA.v1.3.u_10.TL319.1986.171019.nc - JRA.v1.3.u_10.TL319.1987.171019.nc - JRA.v1.3.u_10.TL319.1988.171019.nc - JRA.v1.3.u_10.TL319.1989.171019.nc - JRA.v1.3.u_10.TL319.1990.171019.nc - JRA.v1.3.u_10.TL319.1991.171019.nc - JRA.v1.3.u_10.TL319.1992.171019.nc - JRA.v1.3.u_10.TL319.1993.171019.nc - JRA.v1.3.u_10.TL319.1994.171019.nc - JRA.v1.3.u_10.TL319.1995.171019.nc - JRA.v1.3.u_10.TL319.1996.171019.nc - JRA.v1.3.u_10.TL319.1997.171019.nc - JRA.v1.3.u_10.TL319.1998.171019.nc - JRA.v1.3.u_10.TL319.1999.171019.nc - JRA.v1.3.u_10.TL319.2000.171019.nc - JRA.v1.3.u_10.TL319.2001.171019.nc - JRA.v1.3.u_10.TL319.2002.171019.nc - JRA.v1.3.u_10.TL319.2003.171019.nc - JRA.v1.3.u_10.TL319.2004.171019.nc - JRA.v1.3.u_10.TL319.2005.171019.nc - JRA.v1.3.u_10.TL319.2006.171019.nc - JRA.v1.3.u_10.TL319.2007.171019.nc - JRA.v1.3.u_10.TL319.2008.171019.nc - JRA.v1.3.u_10.TL319.2009.171019.nc - JRA.v1.3.u_10.TL319.2010.171019.nc - JRA.v1.3.u_10.TL319.2011.171019.nc - JRA.v1.3.u_10.TL319.2012.171019.nc - JRA.v1.3.u_10.TL319.2013.171019.nc - JRA.v1.3.u_10.TL319.2014.171019.nc - JRA.v1.3.u_10.TL319.2015.171019.nc - JRA.v1.3.u_10.TL319.2016.171019.nc - - - JRA.v1.3.v_10.TL319.1958.171019.nc - JRA.v1.3.v_10.TL319.1959.171019.nc - JRA.v1.3.v_10.TL319.1960.171019.nc - JRA.v1.3.v_10.TL319.1961.171019.nc - JRA.v1.3.v_10.TL319.1962.171019.nc - JRA.v1.3.v_10.TL319.1963.171019.nc - JRA.v1.3.v_10.TL319.1964.171019.nc - JRA.v1.3.v_10.TL319.1965.171019.nc - JRA.v1.3.v_10.TL319.1966.171019.nc - JRA.v1.3.v_10.TL319.1967.171019.nc - JRA.v1.3.v_10.TL319.1968.171019.nc - JRA.v1.3.v_10.TL319.1969.171019.nc - JRA.v1.3.v_10.TL319.1970.171019.nc - JRA.v1.3.v_10.TL319.1971.171019.nc - JRA.v1.3.v_10.TL319.1972.171019.nc - JRA.v1.3.v_10.TL319.1973.171019.nc - JRA.v1.3.v_10.TL319.1974.171019.nc - JRA.v1.3.v_10.TL319.1975.171019.nc - JRA.v1.3.v_10.TL319.1976.171019.nc - JRA.v1.3.v_10.TL319.1977.171019.nc - JRA.v1.3.v_10.TL319.1978.171019.nc - JRA.v1.3.v_10.TL319.1979.171019.nc - JRA.v1.3.v_10.TL319.1980.171019.nc - JRA.v1.3.v_10.TL319.1981.171019.nc - JRA.v1.3.v_10.TL319.1982.171019.nc - JRA.v1.3.v_10.TL319.1983.171019.nc - JRA.v1.3.v_10.TL319.1984.171019.nc - JRA.v1.3.v_10.TL319.1985.171019.nc - JRA.v1.3.v_10.TL319.1986.171019.nc - JRA.v1.3.v_10.TL319.1987.171019.nc - JRA.v1.3.v_10.TL319.1988.171019.nc - JRA.v1.3.v_10.TL319.1989.171019.nc - JRA.v1.3.v_10.TL319.1990.171019.nc - JRA.v1.3.v_10.TL319.1991.171019.nc - JRA.v1.3.v_10.TL319.1992.171019.nc - JRA.v1.3.v_10.TL319.1993.171019.nc - JRA.v1.3.v_10.TL319.1994.171019.nc - JRA.v1.3.v_10.TL319.1995.171019.nc - JRA.v1.3.v_10.TL319.1996.171019.nc - JRA.v1.3.v_10.TL319.1997.171019.nc - JRA.v1.3.v_10.TL319.1998.171019.nc - JRA.v1.3.v_10.TL319.1999.171019.nc - JRA.v1.3.v_10.TL319.2000.171019.nc - JRA.v1.3.v_10.TL319.2001.171019.nc - JRA.v1.3.v_10.TL319.2002.171019.nc - JRA.v1.3.v_10.TL319.2003.171019.nc - JRA.v1.3.v_10.TL319.2004.171019.nc - JRA.v1.3.v_10.TL319.2005.171019.nc - JRA.v1.3.v_10.TL319.2006.171019.nc - JRA.v1.3.v_10.TL319.2007.171019.nc - JRA.v1.3.v_10.TL319.2008.171019.nc - JRA.v1.3.v_10.TL319.2009.171019.nc - JRA.v1.3.v_10.TL319.2010.171019.nc - JRA.v1.3.v_10.TL319.2011.171019.nc - JRA.v1.3.v_10.TL319.2012.171019.nc - JRA.v1.3.v_10.TL319.2013.171019.nc - JRA.v1.3.v_10.TL319.2014.171019.nc - JRA.v1.3.v_10.TL319.2015.171019.nc - JRA.v1.3.v_10.TL319.2016.171019.nc - - CORE2.t_10.ArcFactor.T62.1997-2004.nc - fco2_datm_lat-bands_simyr_1750-2015_CMIP6_c180929.nc - fco2_datm_global_simyr_1750-2014_CMIP6_c180929.nc - fco2_datm_1765-2007_c100614.nc - fco2_datm_lat-bandsSSP1-1.9_simyr_2014-2500_CMIP6_c190514.nc - fco2_datm_lat-bandsSSP1-2.6__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP2-4.5__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP3-7.0__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP4-3.4_simyr_2014-2500_CMIP6_c190514.nc - fco2_datm_lat-bandsSSP4-6.0__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP5-3.4__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_lat-bandsSSP5-8.5__simyr_2014-2500_CMIP6_c190506.nc - fco2_datm_globalSSP1-1.9_simyr_2014-2501_CMIP6_c190514.nc - fco2_datm_globalSSP1-2.6__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP2-4.5__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP3-7.0__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP4-3.4_simyr_2014-2501_CMIP6_c190514.nc - fco2_datm_globalSSP4-6.0__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP5-3.4__simyr_2014-2501_CMIP6_c190506.nc - fco2_datm_globalSSP5-8.5__simyr_2014-2501_CMIP6_c190506.nc - bias_correction.Prec.%y.nc - bias_correction.Prec.%y.nc - bias_correction.Prec.%y.nc - af.pr.ccsm4.rcp45.2006-2300.nc - af.tas.ccsm4.rcp45.2006-2300.nc - af.ps.ccsm4.rcp45.2006-2300.nc - af.huss.ccsm4.rcp45.2006-2300.nc - af.uas.ccsm4.rcp45.2006-2300.nc - af.vas.ccsm4.rcp45.2006-2300.nc - af.rsds.ccsm4.rcp45.2006-2300.nc - af.rlds.ccsm4.rcp45.2006-2300.nc - - aerosoldep_monthly_1850_mean_1.9x2.5_c090421.nc - aerosoldep_monthly_2000_mean_1.9x2.5_c090421.nc - aerosoldep_monthly_1849-2006_1.9x2.5_c090803.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - aerosoldep_WACCM.ensmean_monthly_hist_1849-2015_0.9x1.25_CMIP6_c180926.nc - - aerosoldep_SSP1-1.9_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP1-2.6_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP2-4.5_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP3-7.0_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP4-3.4_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP4-6.0_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP5-3.4_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - aerosoldep_SSP5-8.5_monthly_1849-2101_${CLM_USRDAT_NAME}.nc - - aerodep_clm_SSP126_b.e21.BSSP126cmip6.f09_g17.CMIP6-SSP1-2.6.001_2014-2101_monthly_0.9x1.25_c190523.nc - aerodep_clm_SSP245_b.e21.BWSSP245cmip6.f09_g17.CMIP6-SSP2-4.5-WACCM.001_2014-2101_monthly_0.9x1.25_c190401.nc - aerodep_clm_SSP370_b.e21.BWSSP370cmip6.f09_g17.CMIP6-SSP3-7.0-WACCM.001_2014-2101_monthly_0.9x1.25_c190402.nc - null - aerodep_clm_SSP585_b.e21.BSSP585cmip6.f09_g17.CMIP6-SSP5-8.5.001_2014-2101_monthly_0.9x1.25_c190419.nc - topodata_0.9x1.25_USGS_070110_stream_c151201.nc - - - - - char - streams - streams_file - Stream data variable name(s). - - null - - ZBOT z - TBOT tbot - RH rh - WIND wind - PRECTmms precn - FSDS swdn - FSDSdir swdndr - FSDSdif swdndf - PSRF pbot - FLDS lwdn - - - ZBOT z - TBOT tbot - RH rh - WIND wind - PRECTmms precn - FSDS swdn - FSDSdir swdndr - FSDSdif swdndf - PSRF pbot - FLDS lwdn - - - ZBOT z - TBOT tbot - QBOT shum - WIND wind - PRECTmms precn - FSDS swdn - FSDSdir swdndr - FSDSdif swdndf - PSRF pbot - FLDS lwdn - - - ZBOT z - TBOT tbot - RH rh - WIND wind - PRECTmms precn - FSDS swdn - PSRF pbot - FLDS lwdn - - - a2x1hi_Faxa_swndr swndr - a2x1hi_Faxa_swvdr swvdr - a2x1hi_Faxa_swndf swndf - a2x1hi_Faxa_swvdf swvdf - - - a2x3h_Faxa_rainc rainc - a2x3h_Faxa_rainl rainl - a2x3h_Faxa_snowc snowc - a2x3h_Faxa_snowl snowl - a2x3h_Faxa_lwdn lwdn - - - a2x3h_Sa_z z - a2x3h_Sa_tbot tbot - a2x3h_Sa_ptem ptem - a2x3h_Sa_shum shum - a2x3h_Sa_pbot pbot - a2x3h_Sa_dens dens - a2x3h_Sa_pslv pslv - a2x3h_Sa_co2diag co2diag - a2x3h_Sa_co2prog co2prog - - - a2x1h_Sa_u u - a2x1h_Sa_v v - - - FSDS swdn - - - PRECTmms precn - - - TBOT tbot - WIND wind - QBOT shum - PSRF pbot - - - FSDS swdn - - - PRECTmms precn - - - TBOT tbot - WIND wind - QBOT shum - PSRF pbot - - - FSDS swdn - - - PRECTmms precn - - - TBOT tbot - WIND wind - QBOT shum - PSRF pbot - - - FSDS swdn - - - PRECTmms precn - - - TBOT tbot - WIND wind - QBOT shum - PSRF pbot - - - FSDS swdn - - - PRECTmms precn - - - TBOT tbot - WIND wind - QBOT shum - PSRF pbot - FLDS lwdn - - - FSDS swdn - - - PRECTmms precn - - - TBOT tbot - WIND wind - QBOT shum - PSRF pbot - FLDS lwdn - - - lwdn lwdn - swdn swdn - swup swup - - - prc prec - - - dn10 dens - slp_ pslv - q_10 shum - t_10 tbot - u_10 u - v_10 v - - - dn10 dens - - - slp_ pslv - - - prec prec - - - lwdn lwdn - - - swdn swdn - - - swup swup - - - q3_5 shum - - - t3_5 tbot - - - u3_5 u - - - v3_5 v - - - prc prec - - - lwdn lwdn - - - swdn swdn - - - swup swup - - - dn10 dens - - - q_10 shum - - - slp_ pslv - - - t_10 tbot - - - u_10 u - - - v_10 v - - - TarcFactor tarcf - - - prec prec - - - lwdn lwdn - - - swdn swdn - - - q_10 shum - - - slp pslv - - - t_10 tbot - - - u_10 u - - - v_10 v - - - TarcFactor tarcf - - - CO2 co2diag - - - CO2 co2diag - - - BC_PREC precsf - - - BC_PREC precsf - - - BC_PREC precsf - - - pr prec_af - - - tas tbot_af - - - ps pbot_af - - - huss shum_af - - - uas u_af - - - vas v_af - - - rsds swdn_af - - - rlds lwdn_af - - - a2x1d_Faxa_bcphiwet bcphiwet - a2x1d_Faxa_bcphodry bcphodry - a2x1d_Faxa_bcphidry bcphidry - a2x1d_Faxa_ocphiwet ocphiwet - a2x1d_Faxa_ocphidry ocphidry - a2x1d_Faxa_ocphodry ocphodry - a2x1d_Faxa_dstwet1 dstwet1 - a2x1d_Faxa_dstdry1 dstdry1 - a2x1d_Faxa_dstwet2 dstwet2 - a2x1d_Faxa_dstdry2 dstdry2 - a2x1d_Faxa_dstwet3 dstwet3 - a2x1d_Faxa_dstdry3 dstdry3 - a2x1d_Faxa_dstwet4 dstwet4 - a2x1d_Faxa_dstdry4 dstdry4 - - - BCDEPWET bcphiwet - BCPHODRY bcphodry - BCPHIDRY bcphidry - OCDEPWET ocphiwet - OCPHIDRY ocphidry - OCPHODRY ocphodry - DSTX01WD dstwet1 - DSTX01DD dstdry1 - DSTX02WD dstwet2 - DSTX02DD dstdry2 - DSTX03WD dstwet3 - DSTX03DD dstdry3 - DSTX04WD dstwet4 - DSTX04DD dstdry4 - - - a2x3h_Sa_topo topo - - - TOPO topo - - - - - - integer - streams - streams_file - Simulation year to align stream to. - - -999 - $DATM_CLMNCEP_YR_ALIGN - $DATM_CPLHIST_YR_ALIGN - $DATM_CLMNCEP_YR_ALIGN - $DATM_CLMNCEP_YR_ALIGN - $DATM_CLMNCEP_YR_ALIGN - $DATM_CLMNCEP_YR_ALIGN - 1 - 1 - 1 - 1 - 1850 - 2015 - 1979 - 1979 - 1979 - 2006 - $DATM_CPLHIST_YR_ALIGN - - 1 - 1 - 1 - 1 - - 1849 - 2015 - 1 - $DATM_CPLHIST_YR_ALIGN - - - - - integer - streams - streams_file - First year of stream. - - -999 - 1993 - 1992 - 0001 - $DATM_CLMNCEP_YR_START - $DATM_CPLHIST_YR_START - 2000 - $DATM_CLMNCEP_YR_START - $DATM_CLMNCEP_YR_START - $DATM_CLMNCEP_YR_START - $DATM_CLMNCEP_YR_START - 1 - 2010 - 2010 - 2010 - 2010 - 2010 - 2010 - 2010 - 2010 - 2010 - 2010 - 1948 - 1948 - 1948 - 1948 - 1948 - 1948 - 1948 - 1948 - 1948 - 1948 - 1948 - 1958 - 1958 - 1958 - 1958 - 1958 - 1958 - 1958 - 1958 - 1958 - 1850 - 2015 - 1979 - 1979 - 2006 - $DATM_CPLHIST_YR_START - - 1 - 1850 - 2000 - 2010 - - 1849 - 2015 - 1 - $DATM_CPLHIST_YR_START - - - - - integer - streams - streams_file - Last year of stream. - - -999 - 1993 - 1992 - 0002 - $DATM_CLMNCEP_YR_END - $DATM_CPLHIST_YR_END - 2004 - $DATM_CLMNCEP_YR_END - $DATM_CLMNCEP_YR_END - $DATM_CLMNCEP_YR_END - $DATM_CLMNCEP_YR_END - 1 - 2011 - 2011 - 2011 - 2011 - 2011 - 2011 - 2011 - 2011 - 2011 - 2011 - 2009 - 2009 - 2009 - 2009 - 2009 - 2009 - 2009 - 2009 - 2009 - 2009 - 2009 - 2016 - 2016 - 2016 - 2016 - 2016 - 2016 - 2016 - 2016 - 2016 - 2007 - 2014 - 2500 - 2004 - 2012 - 2010 - 2300 - $DATM_CPLHIST_YR_END - - 1 - 2006 - 1850 - 2000 - 2010 - 2014 - - 2101 - 1 - $DATM_CPLHIST_YR_END - - - - - integer - streams - streams_file - Stream offset. - - 0 - -5400 - 2700 - 900 - 900 - 900 - 0 - 0 - - - - - - - - - - - - - char - streams - shr_strdata_nml - CLMNCEP,COPYALL,CORE2_NYF,CORE2_IAF,CORE_IAF_JRA,NULL - - general method that operates on the data. this is generally - implemented in the data models but is set in the strdata method for - convenience. valid options are dependent on the data model and will - be described elsewhere. NULL is always a valid option and means no - data will be generated. default='NULL' - - datamode = "NULL" - turns off the data model as a provider of data to the coupler. - The atm_present flag will be set to false - and the coupler will assume no exchange of data to or from the data model. - datamode = "COPYALL" - The default science mode of the data model is the COPYALL mode. - COPYALL mode will examine the fields found in all input data streams, - if any input field names match the field names used internally, they - are copied into the export array and passed directly to the coupler - without any special user code. Any required fields not found on an - input stream will be set to zero except for aerosol deposition fields - which will be set to a special value. There are several other - scientific modes supported by the model, they are listed below. The - mode is selected by a character string set in the strdata namelist - variable dataMode. - datamode = "CORE2_NYF" - Coordinated Ocean-ice Reference Experiments (CORE) Version 2 Normal Year Forcing. - datamode = "CORE2_IAF" - In conjunction with with CORE Version 2 atmospheric forcing data, - provides the atmosphere forcing favored by the Ocean Model Working - Group when coupling an active ocean model with observed atmospheric - forcing. This mode and associated data sets implement the CORE-IAF - Version 2 forcing data, as developed by Large and Yeager (2008) at - NCAR. See the documentation for CORE version 2 datasets at - http://data1.gfdl.noaa.gov/nomads/forms/mom4/COREv2.html Also see - W.G.Large, S.G.Yeager (2008), The global climatology of an - interannually varying air-sea flux data set. - Clm Dyn doi 10.1007/s00382-008-0441-3. - datamode = "CORE_IAF_JRA" - JRA55 intra-annual year forcing - datamode = "CLMNCEP" - In conjunction with NCEP climatological atmosphere data, provides the - atmosphere forcing favored by the Land Model Working Group when - coupling an active land model with observed atmospheric forcing. This - mode replicates code previously found in CLM (circa 2005), before the - LMWG started using the CCSM flux coupler and data models to do - active-land-only simulations. - - - NULL - CLMNCEP - CORE2_NYF - CORE2_IAF - CORE_IAF_JRA - COPYALL - - - - - char - streams - abs - shr_strdata_nml - - model spatial gridfile associated with the strdata. grid information will - be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - - - - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are just copy (ie. no fill), special value, - nearest neighbor, nearest neighbor in "i" direction, or nearest - neighbor in "j" direction. - valid values: 'copy','spval','nn','nnoni','nnonj' - - - nn - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - plays no role is fill algorithm at the present time. - valid values: "nomask,srcmask,dstmask,bothmask" - - - nomask - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read in instead of computing the - weights on the fly for the fill operation. if this is set, fillalgo - and fillmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the fill operation. this allows a user to - save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - array (up to 30 elements) of masking algorithms for mapping input data - associated with the array of streams. valid options are map only from - valid src points, map only to valid destination points, ignore all - masks, map only from valid src points to valid destination points. - valid values: srcmask, dstmask, nomask,bothmask - - - nomask - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are copy by index, set to special value, - nearest neighbor, nearest neighbor in "i" direction, nearest neighbor - in "j" direction, or bilinear. - valid values: copy,spval,nn,nnoni,nnonj,bilinear - - - bilinear - nn - copy - copy - copy - nn - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read instead of computing - weights on the fly for the mapping (interpolation) operation. if this - is set, mapalgo and mapmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the mapping (interpolation) operation. this - allows a user to save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - coszen,nearest,linear,lower,upper - - array (up to 30 elements) of time interpolation options associated with the array of - streams. - valid values: lower,upper,nearest,linear,coszen - lower = Use lower time-value - upper = Use upper time-value - nearest = Use the nearest time-value - linear = Linearly interpolate between the two time-values - coszen = Scale according to the cosine of the solar zenith angle (for solar) - - - linear - nearest - nearest - nearest - nearest - nearest - nearest - linear - linear - nearest - nearest - coszen - nearest - coszen - nearest - coszen - nearest - coszen - nearest - coszen - nearest - coszen - nearest - nearest - nearest - nearest - nearest - nearest - nearest - nearest - nearest - nearest - nearest - - lower - coszen - coszen - - - - - char(30) - streams - shr_strdata_nml - extend,cycle,limit - - array of time axis modes associated with the array of streams for - handling data outside the specified stream time axis. - valid options are to cycle the data based on the first, last, and - align settings associated with the stream dataset, to extend the first - and last valid value indefinitely, or to limit the interpolated data - to fall only between the least and greatest valid value of the time array. - valid values: cycle,extend,limit - - - cycle - extend - extend - extend - cycle - - - - - char(30) - streams - shr_strdata_nml - single,full_file - - array (up to 30 elements) of reading mode associated with the array of - streams. specifies the mode of reading temporal stream dataset. - valid options are "single" (read temporal dataset one at a time) or - "full_file" (read all entires of temporal dataset in a given netcdf file) - valid values: single,full_file - - - single - - - - - real(30) - streams - shr_strdata_nml - - array (up to 30 elements) of delta time ratio limits placed on the - time interpolation associated with the array of streams. this real - value causes the model to stop if the ratio of the running maximum - delta time divided by the minimum delta time is greater than the - dtlimit for that stream. for instance, with daily data, the delta - time should be exactly one day throughout the dataset and the computed - maximum divided by minimum delta time should always be 1.0. for - monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. the running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - - - 1.5 - 1.e30 - 1.e30 - 3.0 - 3.0 - 3.0 - 3.0 - 3.0 - 3.0 - - - - - char - streams - shr_strdata_nml - - list of paired colon delimited field names that should be treated as - vectors when carrying out spatial interpolation. unlike other - character arrays in this namelist, this array is completely decoupled - from the list of streams. this is a list of vector pairs that span - all input streams where different fields of the vector pair could - appear in different streams. - for example, vectors = 'u:v','taux:tauy'. - - - null - u:v - u:v - u:v - - - - - char(30) - streams - shr_strdata_nml - - character array (up to 30 elements) of stream input files. this - string is actually parsed by a stream method and so the format is - specified by the stream module. this string consists of a - "stream_input_filename year_align year_first year_last". the - stream_input_filename is a stream text input file and the format and - options are described elsewhere. year_align, year_first, and - year_last provide information about the time axis of the file and how - to relate the input time axis to the model time axis. - - - - - - - - - - - - - char - datm - datm_nml - BC.QIAN.CMAP.Precip,BC.QIAN.GPCP.Precip,BC.CRUNCEP.CMAP.Precip,BC.CRUNCEP.GPCP.Precip - - If set, include bias correction streams in namelist. - - - - - - - - char(10) - datm - datm_nml - Anomaly.Forcing.Precip,Anomaly.Forcing.Temperature,Anomaly.Forcing.Pressure,Anomaly.Forcing.Humidity,Anomaly.Forcing.Uwind,Anomaly.Forcing.Vwind,Anomaly.Forcing.Shortwave,Anomaly.Forcing.Longwave - - If set, include anomaly forcing streams in namelist. - - - - - - - - - char - datm - datm_nml - 1d,root - - Set the decomposition option for the data model. valid options are - placing the global array on the root task or a simple stride-one - load balanced one-dimensional decomposition. other decompositions - may be added in the future. valid values are ['root','1d']. - 1d = Vector decomposition, root = run only on the master task - - - 1d - - - - - char - datm - datm_nml - abs - - filename containing correction factors for use only with CORE2 modes (CORE2_IAF and CORE2_NYF). - - - null - $DIN_LOC_ROOT/atm/datm7/CORE2/COREv2.correction_factors.T62.121007.nc - $DIN_LOC_ROOT/atm/datm7/CORE2/COREv2.correction_factors.T62.121007.nc - null - - - - - logical - datm - datm_nml - - If true, prescribed aerosols are sent from datm (must be true for running with CLM). - - - .true. - .false. - - - - - logical - datm - datm_nml - - If true, prognostic is forced to true. (default=false) - - - .false. - - - - - integer - datm - datm_nml - - Frequency to update radiation in number of steps (or hours if negative) - irdasw is the radiation setting used to compute the next shortwave - Julian date. values greater than 1 set the next radiation to the - present time plus 2 timesteps every iradsw. values less than 0 turn - set the next radiation to the present time plus two timesteps every - -iradsw hours. if iradsw is zero, the next radiation time is the - present time plus 1 timestep. (default=0.) - - - 1 - -1 - - - - - char - datm - datm_nml - - Model restart filename for the data atmosphere model data. This is - optional. If both restfils and restfilm are undefined, the restart - filename will be read from the DATM restart pointer file (or files for multiple instances). - - - undefined - - - - - char - datm - datm_nml - - stream restart filename for the data atmosphere stream data. This is - optional. If both restfils and restfilm are undefined, the restart - filename will be read from the DATM restart pointer file (or files for multiple instances). - - - undefined - - - - - logical - datm - datm_nml - - flag which if true, says to turn on water isotopes - - - .false. - .true. - - - - diff --git a/src/components/data_comps/datm/cime_config/user_nl_datm b/src/components/data_comps/datm/cime_config/user_nl_datm deleted file mode 100644 index fb8503100b1..00000000000 --- a/src/components/data_comps/datm/cime_config/user_nl_datm +++ /dev/null @@ -1,13 +0,0 @@ -!------------------------------------------------------------------------ -! Users should ONLY USE user_nl_datm to change namelists variables -! Users should add all user specific namelist changes below in the form of -! namelist_var = new_namelist_value -! Note that any namelist variable from shr_strdata_nml and datm_nml can -! be modified below using the above syntax -! User preview_namelists to view (not modify) the output namelist in the -! directory $CASEROOT/CaseDocs -! To modify the contents of a stream txt file, first use preview_namelists -! to obtain the contents of the stream txt files in CaseDocs, and then -! place a copy of the modified stream txt file in $CASEROOT with the string -! user_ prepended. -!------------------------------------------------------------------------ diff --git a/src/components/data_comps/datm/doc/SOM.pdf b/src/components/data_comps/datm/doc/SOM.pdf deleted file mode 100644 index 92d15e00483..00000000000 Binary files a/src/components/data_comps/datm/doc/SOM.pdf and /dev/null differ diff --git a/src/components/data_comps/datm/doc/bookinfo.xml b/src/components/data_comps/datm/doc/bookinfo.xml deleted file mode 100644 index e9ef4b53583..00000000000 --- a/src/components/data_comps/datm/doc/bookinfo.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -CESM1.2 Data Model v8: User's Guide - - - - - Mariana - Vertenstein - - NCAR - - - - - Tony - Craig - - NCAR - - - - - Brian - Kauffman - - NCAR - - - - - -2013-06-07 - - diff --git a/src/components/data_comps/datm/doc/datacomps.xml b/src/components/data_comps/datm/doc/datacomps.xml deleted file mode 100644 index cebf5221f52..00000000000 --- a/src/components/data_comps/datm/doc/datacomps.xml +++ /dev/null @@ -1,436 +0,0 @@ - - - - - -Data Model Science Modes - - - -When a given data models run, the user must specify which -science mode it will run in. Each data model has -a fixed set of fields that it must send to the coupler, but it is the -choice of mode that specifies how that set of fields is to be -computed. Each mode activates various assumptions about what input -fields are available from the input data streams, what input fields -are available from the the coupler, and how to use this input data to -compute the output fields sent to the coupler. - - - -In general, a mode might specify... - - that fields be set to a time invariant constant (so that no input data is needed) - that fields be taken directly from a input data files (the input streams) - that fields be computed using data read in from input files - that fields be computed using from data received from the coupler - some combination of the above. - - If a science mode is chosen that is not consistent with -the input data provided, the model may abort (perhaps with a "missing -data" error message), or the model may send erroneous data to the -coupler (for example, if a mode assumes an input stream has -temperature in Kelvin on it, but it really has temperature in -Celsius). Such an error is unlikely unless a user has edited the run -scripts to specify either non-standard input data or a non-standard -science mode. When editing the run scripts to use non-standard stream -data or modes, users must be careful that the input data is consistent -with the science mode and should verify that the data model is -providing data to the coupler as expected. - - - -The data model mode is a character string that is set in the namelist -variable "datamode" in the namelist group "shr_strdata_nml". -Although each data model, -datm, -dlnd, -drof, -docn, and -dicn, -has its own set of valid datamode values, two modes are common to all -data models: COPYALL and NULL. - - - - - -The default mode is COPYALL -- the model will assume all -the data that must be sent to the coupler will be found in the input data streams, -and that this data can be sent to the coupler, unaltered, except for spatial and -temporal interpolation. - - - - - NULL mode turns off the data model as a -provider of data to the coupler. The model_present flag -(eg. atm_present) will be set to false and the coupler will assume no -exchange of data to or from the data model. - - - - - - - - - -Data Atmosphere Model - - - -Namelists - - DATM namelists can be separated into two groups, stream-independent namelist -variables that are specific to the DATM model and stream-specific namelist -variables that are contained in share code and whose names are -common to all the data models. - -For stream-independent input, the namelist input filename is -hardwired in the data model code to "datm_in" (or datm_in_NNNN for -multiple instances) and the namelist group is called "datm_nml". The -variable formats are character string (char), integer (int), double -precision real (r8), or logical (log) or one dimensional arrays of any -of those things (array of ...). - - For stream-dependent input, the namelist input file is -datm_atm_in (or datm_atm_in_NNNN for multiple instances) and the -namelist group is "shr_strdata_nml". One of the variables in -shr_strdata_nml is the datamode value. The mode is selected by a -character string set in the strdata namelist variable dataMode. Each -data model has a unique set of datamode values that it supports. -Those for DATM are listed in detail in the datamode definition. - - - - - -Fields - - -The pre-defined internal field names in the data atmosphere model are as follows. -In general, the stream input file should translate the input variable names into -these names for use within the data atmosphere model. - - - - (/"z ","u ","v ","tbot ", & - "ptem ","shum ","dens ","pbot ", & - "pslv ","lwdn ","rainc ","rainl ", & - "snowc ","snowl ","swndr ","swvdr ", & - "swndf ","swvdf ","swnet ","co2prog ", & - "co2diag ","bcphidry ","bcphodry ","bcphiwet ", & - "ocphidry ","ocphodry ","ocphiwet ","dstwet1 ", & - "dstwet2 ","dstwet3 ","dstwet4 ","dstdry1 ", & - "dstdry2 ","dstdry3 ","dstdry4 ", & - "tref ","qref ","avsdr ","anidr ", & - "avsdf ","anidf ","ts ","to ", & - "snowhl ","lfrac ","ifrac ","ofrac ", & - "taux ","tauy ","lat ","sen ", & - "lwup ","evap ","co2lnd ","co2ocn ", & - "dms " /) - - - - - - - -Data Land Model - - - -Namelists - - -The land model is unique because it supports land data and snow data -(lnd and sno) almost as if -they were two separate components, but they are in fact running in one -component model through one interface. The lnd (land) data consist of -fields sent to the atmosphere. This set of data is used when running -dlnd with an active atmosphere. In general this is not a mode that is -used or supported in CESM1.1. The sno (snow) data consist of fields -sent to the glacier model. This set of data is used when running dlnd -with an active glacier model (TG compsets). Both sets of data are -assumed to be on the same grid. - - - -DLND namelists can be separated into two groups, stream-independent -namelist variables that are specific to the DLND model and -stream-specific -namelist variables that are contained in share code and whose -names are common to all the data models. - - - -For stream-independent input, the namelist input filename is hardwired -in the data model code to "dlnd_in" (or dlnd_in_NNNN for multiple -instances) and the namelist group is called "dlnd_nml". The variable -formats are character string (char), integer (int), double precision -real (r8), or logical (log) or one dimensional arrays of any of those -things (array of ...). - - - For stream-dependent input, the namelist input file is -dlnd_lnd_in and - -dlnd_sno_in (or dlnd_lnd_in_NNNN and -dlnd_sno_in_NNNN for NNNN multiple instances) and the namelist group -is "shr_strdata_nml". One of the variables in shr_strdata_nml is the -datamode value. The mode is selected by a character string set in the -strdata namelist variable dataMode. Each data model has a unique set -of datamode values that it supports. Those for DLND are listed in -detail in the datamode -definition. - - - -If you want to change the namelist settings in -dlnd_lnd_in or dlnd_in you -should edit the file user_nl_dlnd. If you want -to change the namelist settings in dsno_lnd_in or -dsno_in you should edit the file -user_nl_dsno. - - - - - -Fields - -The pre-defined internal field names in the data land model are as follows. -In general, the stream input file should translate the input variable names into -these names for use within the data land model. - - - (/ "t ","tref ","qref ","avsdr ","anidr ", & - "avsdf ","anidf ","snowh ","taux ","tauy ", & - "lat ","sen ","lwup ","evap ","swnet ", & - "lfrac ","fv ","ram1 ", & - "flddst1 ","flxdst2 ","flxdst3 ","flxdst4 ", & - "tsrf01 ","topo01 ","tsrf02 ","topo02 ","tsrf03 ", & - "topo03 ","tsrf04 ","topo04 ","tsrf05 ","topo05 ", & - "tsrf06 ","topo06 ","tsrf07 ","topo07 ","tsrf08 ", & - "topo08 ","tsrf09 ","topo09 ","tsrf10 ","topo10 ", & - "qice01 ","qice02 ","qice03 ","qice04 ","qice05 ", & - "qice06 ","qice07 ","qice08 ","qice09 ","qice10 " /) - - - - - - - -Data River Runoff Model - - - -Namelists - - - -The data river runoff model is new and is effectively the runoff part of the dlnd model -in CESM1.0 that has been made its own top level component. - - - - -DROF namelists can be separated into two groups, stream-independent -namelist variables that are specific to the DROF model and -stream-specific -namelist variables that are contained in share code and whose -names are common to all the data models. - - -For stream-independent input, the namelist input filename is -hardwired in the data model code to "drof_in" (or drof_in_NNNN for -multiple instances) and the namelist group is called "drof_nml". The -variable formats are character string (char), integer (int), double -precision real (r8), or logical (log) or one dimensional arrays of any -of those things (array of ...). - - For stream-dependent input, the namelist input file is -"drof_lnd_in" (or drof_rof_in_NNNN for NNNN multiple instances) and -the namelist group is "shr_strdata_nml". One of the variables in -shr_strdata_nml is the datamode value. The mode is selected by a -character string set in the strdata namelist variable dataMode. Each -data model has a unique set of datamode values that it supports. -Those for DROF are listed in detail in -the datamode -definition. - - - - - -Fields - - -The pre-defined internal field names in the data river runoff model are as follows. -In general, the stream input file should translate the input variable names into -these names for use within the data river runoff model. - - - - (/ "roff ","ioff "/) - - - - - - - - -Data Ocean Model - - - -Namelists - - - DOCN namelists can be separated into two groups, stream-independent namelist -variables that are specific to the DATM model and stream-specific namelist -variables that are contained in share code and whose names are -common to all the data models. - - For stream-independent input, the namelist input filename is -hardwired in the data model code to "docn_in" (or docn_in_NNNN for -multiple instances) and the namelist group is called "docn_nml". The -variable formats are character string (char), integer (int), double -precision real (r8), or logical (log) or one dimensional arrays of any -of those things (array of ...). - - For stream-dependent input, the namelist input file is -docn_ocn_in (or docn_ocn_in_NNNN for multiple instances) and the -namelist group is "shr_strdata_nml". One of the variables in -shr_strdata_nml is the datamode value. The mode is selected by a -character string set in the strdata namelist variable dataMode. Each -data model has a unique set of datamode values that it supports. -Those for DOCN are listed in detail in the datamode -definition. As part of the stream independent namelist input, DOCN -supports two science modes, "SSTDATA" and "SOM". SOM ("slab ocean -model") mode is a prognostic mode. This mode computes a prognostic -sea surface temperature and a freeze/melt potential (surface Q-flux) -used by the sea ice model. This calculation requires an external SOM -forcing data file that includes ocean mixed layer depths and -bottom-of-the-slab Q-fluxes. Scientifically appropriate -bottom-of-the-slab Q-fluxes are normally ocean resolution dependent -and are derived from the ocean model output of a fully coupled CCSM -run. Note that this mode no longer runs out of the box, the default testing SOM -forcing file is not scientifically appropriate and is provided for -testing and development purposes only. Users must create -scientifically appropriate data for their particular application or use one -of the standard SOM forcing files from the CESM control runs. Some of these -are available in the inputdata repository. The user -then edits the DOCN_SOM_FILENAME variable in env_run.xml to point to the -appropriate SOM forcing dataset. A tool is available to derive valid SOM -forcing. More information on creating the SOM -forcing is also available. - - - - - -Fields - - The pre-defined internal field names in the data ocean model are as follows. -In general, the stream input file should translate the input variable names into -these names for use within the data ocean model. - - - (/ "ifrac ","pslv ","duu10n ","taux ","tauy ", & - "swnet ","lat ","sen ","lwup ","lwdn ", & - "melth ","salt ","prec ","snow ","rain ", & - "evap ","meltw ","roff ","ioff ", & - "t ","u ","v ","dhdx ","dhdy ", & - "s ","q ","h ","qbot " /) - - - - - - - -Data Ice Model - - - -Namelists - - DICE namelists can be separated into two groups, stream-independent namelist -variables that are specific to the DATM model and stream-specific namelist -variables that are contained in share code and whose names are -common to all the data models. - - For stream-independent input, the namelist input filename is -hardwired in the data model code to "dice_in" (or dice_in_NNNN for -multiple instances) and the namelist group is called "dice_nml". - - - Its important to point out that the only currently supported -datamode that is not "NULL" or "COPYALL" is "SSTDATA", which is a -prognostic mode and therefore requires data be sent to the ice model. -Ice fraction (extent) data is read from an input stream, atmosphere -state variables are received from the coupler, and then an -atmosphere-ice surface flux is computed and sent to the coupler. It -is called "SSTDATA" mode because normally the ice fraction data is -found in the same data files that provide SST data to the data ocean -model. They are normally found in the same file because the SST and -ice fraction data are derived from the same observational data sets -and are consistent with each other. - - - - -Fields - - The pre-defined internal field names in the data ice model are as follows. -In general, the stream input file should translate the input variable names into -these names for use within the data ocean model. - - - (/"to ","s ","uo ","vo ", & - "dhdx ","dhdy ","q ","z ", & - "ua ","va ","ptem ","tbot ", & - "shum ","dens ","swndr ","swvdr ", & - "swndf ","swvdf ","lwdn ","rain ", & - "snow ","t ","tref ","qref ", & - "ifrac ","avsdr ","anidr ","avsdf ", & - "anidf ","tauxa ","tauya ","lat ", & - "sen ","lwup ","evap ","swnet ", & - "swpen ","melth ","meltw ","salt ", & - "tauxo ","tauyo " /) - - - - - - - -Data Land-Ice Model - - - -This model does not yet exist. - - - - - - diff --git a/src/components/data_comps/datm/doc/intro.xml b/src/components/data_comps/datm/doc/intro.xml deleted file mode 100644 index a69a96443fa..00000000000 --- a/src/components/data_comps/datm/doc/intro.xml +++ /dev/null @@ -1,370 +0,0 @@ - - - - -Introduction - - -Overview - - - -The CESM1.2 data models continue to perform the basic function of -reading external data files, modifying that data, and then sending it -to the coupler via standard CESM coupling interfaces. The coupler and -other models have no fundamental knowledge of whether another -component is fully active or just a data model. In some cases, data -models are prognostic, that is, they also receive and use data sent by -the coupler to the data model. But in most cases, the data models are -not running prognostically and have no need to receive any data from -the coupler. - - - -The CESM data models have parallel capability and share -significant amounts of source code. Methods for reading -and interpolating data have been established and can -easily be reused. There is a natural hierarchy in the -system. The data model calls strdata -("stream data") methods which then call stream methods. -There are inputs associated with the -data model, strdata, and -streams to configure the setup. -The stream methods are responsible for managing lists of input data files -and their time axis. -The information is then passed up to the strdata methods where the -data is read and interpolated in space and time. The interpolated -data is passed up to the data model where final fields are -derived, packed, and returned to the coupler. - - - - - -Design - - -The strdata implementation is hardwired to execute a set of specific -operations associated with reading and interpolating data in space and -time. The text box below shows the sequencing of the computation of -model fields using the strdata methods. - - - - -STRDATA Implementation: - for the current model time - determine nearest lower and upper bound data from the input dataset - if that is new data then - read lower and upper bound data - fill lower and upper bound data - spatially map lower and upper bound data to model grid - endif - time interpolate lower and upper bound data to model time - return fields to data model - - - - - -IO Through Data Models - - - -The two timestamps of input data that bracket the present model time -are read first. These are called the lower and upper bounds of data -and will change as the model advances. Those two sets of inputdata -are first filled based on the user setting of the namelist variables -str_fillalgo -and str_fillmask That operation occurs on the input data grid. -The lower and upper bound data are then spatially mapped to the model -grid based upon the user setting of the namelist variables str_mapalgo and -str_mapmask. Spatial interpolation only occurs if the input -data grid and model grid are not the identical, and this is determined -in the strdata module automatically. Time interpolation is the final -step and is done using a time interpolation method specified by the -user in namelist (via the shr_strdata_nml namelist variable "tintalgo"). -A final set of fields is then available to the data model on the model -grid and for the current model time. - - - -There are two primary costs associated with strdata, reading data and -spatially mapping data. Time interpolation is relatively cheap in the -current implementation. As much as possible, redundant operations are -minimized. Fill and mapping weights are generated at initialization -and saved. The upper and lower bound mapped input data is saved -between time steps to reduce mapping costs in cases where data is time -interpolated more often than new data is read. If the input data -timestep is relatively small (for example, hourly data as opposed to -daily or monthly data) the cost of reading input data can be quite -large. Also, there can be significant variation in cost of the data -model over the coarse of the run, for instance, when new inputdata -must be read and interpolated, although it's relatively predictable. -The present implementation doesn't support changing the order of -operations, for instance, time interpolating the data before spatial -mapping. Because the present computations are always linear, -changing the order of operations will not fundamentally change the -results. The present order of operations generally minimizes the -mapping cost for typical data model use cases. - - - - -There are several limitations in both options and usage within the -data models at the present time. Spatial interpolation can -only be performed from a two-dimensional latitude-longitude -input grid. The target grid can be arbitrary but the source -grid must be able to be described by simple one-dimensional lists of longitudes -and latitudes, although they don't have to have equally spaced. - - - - -At the present time, data models can only read netcdf data, and -IO is handled through either standard netcdf interfaces or through -the pio library using either netcdf or pnetcdf. If standard netcdf -is used, global fields are read and then scattered one field at a -time. If pio is used, then data will be -read either serially or in parallel in chunks that are approximately -the global field size divided by the number of io tasks. If -pnetcdf is used through pio, then the pnetcdf library must be -included during the build of the model. The pnetcdf path and option -is hardwired into the Macros file for the specific machine. -To turn on pnetcdf in the build, make sure the Macros variables -PNETCDF_PATH, INC_PNETCDF, and LIB_PNETCDF are set and that the -pio CONFIG_ARGS sets the PNETCDF_PATH argument. See the CESM1.2 users -guide for more information. - - - -Beyond just the option of selecting IO with pio, -several namelist are available to help optimize pio IO performance. -Those are TODO - list these. -The total mpi tasks that can be used for IO is limited -to the total number of tasks used by the data model. Often though, -fewer io tasks result in improved performance. In -general, [io_root + (num_iotasks-1)*io_stride + 1] has to be less -than the total number of data model tasks. In practice, pio seems -to perform optimally somewhere between the extremes of 1 task and -all tasks, and is highly machine and problem dependent. - - - - - -Restart Files - - -Restart files are generated automatically by the data models based -upon a flag sent from the coupler. The restart files must meet the -naming convention specified by the CESM project and an rpointer file -is generated at the same time. An rpointer file is a -restart pointer file which contains the name of -the most recently created restart file. Normally, if restart files -are read, the restart filenames are specified in the rpointer file. -Optionally though, there are namelist variables such as restfilm -to specify the restart filenames via namelist. If those namelist are -set, the rpointer file will be ignored. The standard procedure in -is to use the rpointer files to specify the restart filenames. -In many cases, no model restart is required for the data models to -restart exactly. This is because there is no memory between timesteps -in many of the data model science modes. If a model restart is -required, it will be written automatically and then must be used to -continue the previous run. - - - -There are separate stream restart files that only exist for -performance reasons. A stream restart file contains information about -the time axis of the input streams. This information helps reduce the -start costs associated with reading the input dataset time axis -information. If a stream restart file is missing, the code will -restart without it but may need to reread data from the input data -files that would have been stored in the stream restart file. This -will take extra time but will not impact the results. - - - - - - -Hierarchy - - -The hierarchy of data models, strdata, and streams also -compartmentalize grids and fields. In CESM1.2, data models communicate with -the coupler with fields on only the data model model grid (in CESM1.0 the -data land model communicated with the coupler on two different grids, -a land grid and a runoff grid). -Although for each strdata namelist, data is -interpolated to a single model grid, each strdata namelist input -can have multiple stream description files and each stream input file -can contain data on a different grid. The strdata module will -gracefully read the different streams of input data and interpolate -both spatially and temporally to the appropriate final model grid and -model time. The text box below provides a schematic of the hierarchy - - - - driver : call data land model - data model : data land model - data model : land_data - data model : grid - data model : strdata - strdata : interpa interpb interpc - strdata : streama streamb streamc - stream : grida gridb gridc - stream : filea_01 fileb_01 filec_01 - stream : ... ... - stream : filea_04 filec_96 - - - -Users will primarily setup different data model configurations through -existing namelist settings. The strdata and stream input -options and format are identical for all data models. The -data model specific namelist has significant overlap between data -models, but each data model has a slightly different set of input -namelist variables and each model reads that namelist from a unique -filename. The detailed namelist options for each data model will be -described later, but each model will specify a filename or filenames -for strdata namelist input and each strdata namelist will specify a -set of stream input files. - - - -To continue with the above example, the following inputs would be -consistent with the above figure. -The data model namelist input file is hardwired to "dlnd_in" and in this case, -the namelist would look something like - - - -file="dlnd_in": -&dlnd_nml - lnd_in = 'dlnd_lnd_in' - decomp = '1d' -/ - - - -The lnd_in specifies the filenames associated with the -strdata namelist input for the land and runoff data separately. -The land and runoff strdata namelist would then look like - - - -file="dlnd_lnd_in": -&shr_strdata_nml - dataMode = 'CPLHIST' - domainFile = 'grid.nc' - streams = 'streama', - 'streamb', - 'streamc' - mapalgo = 'interpa', - 'interpb', - 'interpc' -/ - - - -Three stream description files are then expected to be available, -streama, streamb and streamc. Those files specify the input data -filenames, input data grids, and input fields that are expected among -other things. For instance, one of the stream description files might -look like - - - -<stream> - <dataSource> - GENERIC - </dataSource> - <fieldInfo> - <variableNames> - dn10 dens - slp_ pslv - q_10 shum - t_10 tbot - u_10 u - v_10 v - </variableNames> - <filePath> - /glade/proj3/cseg/inputdata/atm/datm7/NYF - </filePath> - <offset> - 0 - </offset> - <fileNames> - nyf.ncep.T62.050923.nc - </fileNames> - </fieldInfo> - <domainInfo> - <variableNames> - time time - lon lon - lat lat - area area - mask mask - </variableNames> - <filePath> - /glade/proj3/cseg/inputdata/atm/datm7/NYF - </filePath> - <fileNames> - nyf.ncep.T62.050923.nc - </fileNames> - </domainInfo> -</stream> - - - -The stream files are not Fortran namelist format. Their format and -options will be described later. In general, these -examples of input files are not complete, but they do show the general -hierarchy and feel of the data model input. - - - - - -Summary - - -In summary, for each data model a top level namelist will be set -that will point to a file that contains the strdata namelist. That -namelist will specify the data model mode, stream description text files, and -interpolation options. The stream description files will be provided as -separate input files and contain the files and fields that need to -be read. - - - -From a user perspective, for any data model, it's important to know -what modes are supported and the internal field names in the data model. -That information will be used in the strdata namelist and stream input files. - - - - -Next Sections - - -In the next sections, more details will be presented including a full -description of the science modes and namelist settings for the data -atmosphere, data land, data runoff, data ocean, and data ice models; -namelist settings for the strdata namelist input; a description of the -format and options for the stream description input files; and a list -of internal field names for each of the data components. The internal -data model field names are important because they are used to setup -the stream description files and to map the input data fields to the -internal data model field names. - - - - - - diff --git a/src/components/data_comps/datm/doc/rundocbook.csh b/src/components/data_comps/datm/doc/rundocbook.csh deleted file mode 100755 index c83fc1626ed..00000000000 --- a/src/components/data_comps/datm/doc/rundocbook.csh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/csh - -if (-e ./ug.pdf) then - if (-e ./ug.pdf.prev) rm -f ./ug.pdf.prev - mv ./ug.pdf ./ug.pdf.prev -endif - -mv index.html index.html.hold -rm *.html -mv index.html.hold index.html -docbook2html ug.xml -docbook2pdf ug.xml -chgrp cgdcsmweb * -chmod g+w * diff --git a/src/components/data_comps/datm/doc/streams.xml b/src/components/data_comps/datm/doc/streams.xml deleted file mode 100644 index 6d3ad3066bf..00000000000 --- a/src/components/data_comps/datm/doc/streams.xml +++ /dev/null @@ -1,498 +0,0 @@ - - - - - - - - Input Data Streams - - - - - Overview - - - - - An input data stream is a time-series of input data files - where all the fields in the stream are located in the same data file - and all share the same spatial and temporal coordinates - (ie. are all on the same grid and share the same time axis). - Normally a time axis has a uniform dt, but this is not a requirement. - - - - The data models can have multiple input streams. - - - - The data for one stream may be all in one file or may be spread over several files. - For example, 50 years of monthly average data might be contained all in one data file - or it might be spread over 50 files, each containing one year of data. - - - - The data models can loop over stream data -- - repeatedly cycle over some subset of an input stream's time axis. - When looping, the models can only loop over whole years. - For example, an input stream might have SST data for years 1950 through 2000, - but a model could loop over the data for years 1960 through 1980. - A model cannot loop over partial years, for example, - from 1950-Feb-10 through 1980-Mar-15. - - - - The input data must be in a netcdf file and the time axis in that file - must be CF-1.0 compliant. - - - There are two main categories of information that the data models need - to know about a stream: - - - data that describes what a user wants -- - what streams to use and how to use them -- things that can be changed by a user. - - - data that describes the stream data -- meta-data about the - inherent properties of the data itself -- things that cannot be - changed by a user. - - - - Generally, information about what streams a user wants to use and how - to use them is input via the strdata ("stream data") Fortran namelist, - while meta-data that describes the stream data itself is found in an - xml-like text file called a "stream description file." - - - - - - Stream Data - - - - The strdata (short for "stream data") input is - set via a fortran namelist called shr_strdata_nml. That namelist, the - strdata datatype, and the methods are contained in the share source - code file, share/util/shr_strdata_mod.F90. In general, - strdata input defines an array of input streams and operations to - perform on those streams. Therefore, many namelist inputs are arrays - of character strings. Different variable of the same index are - associated. For instance, mapalgo(1) spatial interpolation will be - performed between streams(1) and the target domain. - - - - The following namelist are available with the strdata namelist. - - - dataMode - component specific mode - domainFile- final domain - streams - input files - vectors - paired vector field names - fillalgo - fill algorithm - fillmask - fill mask - fillread - fill mapping file to read - fillwrite - fill mapping file to write - mapalgo - spatial interpolation algorithm - mapmask - spatial interpolation mask - mapread - spatial interpolation mapping file to read - mapwrite - spatial interpolation mapping file to write - tintalgo - time interpolation algorithm - taxMode - time interpolation mode - dtlimit - delta time axis limit - - - The set of shr_strdata_nml namelist keywords are the same for all data - models. As a result, any of the data model namelist documentation can - be used to view a full description. For example, see stream specific namelist - settings . - - - - - - - Specifying What Streams to Use - - - The data models have a namelist variable that specifies which input - streams to use and, for each input stream, the name of the - corresponding stream description file, what years of data to use, and - how to align the input stream time axis with the model run time axis. - This input is set in the strdata namelist input. - - - General format: - - - &shr_strdata_nml - streams = 'stream1.txt year_align year_first year_last ', - 'stream2.txt year_align year_first year_last ', - ... - 'streamN.txt year_align year_first year_last ' - / - - - - where: - - - - - - the stream description file, a plain text file containing details about the input stream (see below) - - - - the first year of data that will be used - - - - the last year of data that will be used - - - - a model year that will be aligned with data for year_first - - - - - - The stream text files for a given data model mode are automatically - generated by the corresponding data model - build-namelist with present names. As an example - we refer to the following datm_atm_in example - file (that would appear in both - $CASEROOT/CaseDocs and - $RUNDIR): - - - - datamode = 'CLMNCEP' - domainfile = '/glade/proj3/cseg/inputdata/share/domains/domain.lnd.fv1.9x2.5_gx1v6.090206.nc' - dtlimit = 1.5,1.5,1.5,1.5 - fillalgo = 'nn','nn','nn','nn' - fillmask = 'nomask','nomask','nomask','nomask' - mapalgo = 'bilinear','bilinear','bilinear','bilinear' - mapmask = 'nomask','nomask','nomask','nomask' - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1972 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1972 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1972 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - taxmode = 'cycle','cycle','cycle','cycle' - tintalgo = 'coszen','nearest','linear','linear' - vectors = 'null' - - - - As is discussed in the CESM1.2 User's Guide, to change the contents of - datm_atm_in, you can edit - $CASEROOT/user_nl_datm to change any of the above - settings EXCEPT FOR THE NAMES - datm.streams.txt.CLM_QIAN.Solar, - datm.streams.txt.CLM_QIAN.Precip, - datm.streams.txt.CLM_QIAN.TPQW and - datm.streams.txt.presaero.trans_1850-2000. Note - that any namelist variable from shr_strdata_nml and datm_nml can be - modified by adding the appropriate keyword/value pairs to - user_nl_datm. As an example, the following could - be the contents of $CASEROOT/user_nl_datm: - - - - !------------------------------------------------------------------------ - ! Users should ONLY USE user_nl_datm to change namelists variables - ! Users should add all user specific namelist changes below in the form of - ! namelist_var = new_namelist_value - ! Note that any namelist variable from shr_strdata_nml and datm_nml can - ! be modified below using the above syntax - ! User preview_namelists to view (not modify) the output namelist in the - ! directory $CASEROOT/CaseDocs - ! To modify the contents of a stream txt file, first use preview_namelists - ! to obtain the contents of the stream txt files in CaseDocs, and then - ! place a copy of the modified stream txt file in $CASEROOT with the string - ! user_ prepended. - !------------------------------------------------------------------------ - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - - - - and the contents of shr_strdata_nml (in both $CASEROOT/CaseDocs and $RUNDIR) - would be - - - - datamode = 'CLMNCEP' - domainfile = '/glade/proj3/cseg/inputdata/share/domains/domain.lnd.fv1.9x2.5_gx1v6.090206.nc' - dtlimit = 1.5,1.5,1.5,1.5 - fillalgo = 'nn','nn','nn','nn' - fillmask = 'nomask','nomask','nomask','nomask' - mapalgo = 'bilinear','bilinear','bilinear','bilinear' - mapmask = 'nomask','nomask','nomask','nomask' - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - taxmode = 'cycle','cycle','cycle','cycle' - tintalgo = 'coszen','nearest','linear','linear' - vectors = 'null' - - - As is discussed in the User's Guide, you should use - preview_namelists to view (not modify) the output - namelist in CaseDocs. - - - - - - - Stream Description File - - - - The stream description file is not a Fortran - namelist, but a locally built xml-like parsing implementation. - Sometimes it is called a "stream dot-text file" because it has a - ".txt." in the filename. Stream description files contain data that - specifies the names of the fields in the stream, the names of the - input data files, and the file system directory where the data files - are located. In addition, a few other options are available such as - the time axis offset parameter. - - - - In CESM1.2, each data model's build-namelist - utility (e.g. models/atm/datm/bld/build-namelist) automatically - generates these stream description files. The directory contents of - each data model will look like the following (using DATM as an - example) - - - - models/atm/datm/bld/build-namelist - models/atm/datm/bld/namelist_files/namelist_definition_datm.xml - models/atm/datm/bld/namelist_files/namelist_defaults_datm.xml - - - - The namelist_definition_datm.xml file defines all - the namelist variables and associated groups. The - namelist_defaults_datm.xml provides the out of - the box settings for the target data model and target stream. - build-namelist utilizes these two files to - construct the stream files for the given compset settings. You can - modify the generated stream files for your particular needs by doing - the following: - - - - Call setup OR - preview_namelists. - - Copy the relevant description file from - $CASEROOT/CaseDocs to - $CASEROOT and pre-pend a "user_" string to the - filename. Change the permission of the file to write. For example, - assuming you are in $CASEROOT - - cp CaseDocs/datm.streams.txt.CLM_QIAN.Solar user_datm.streams.txt.CLM_QIAN.Solar - chmod u+w user_datm.streams.txt.CLM_QIAN.Solar - - - - - - - - Edit user_datm.streams.txt.CLM_QIAN.Solar - with your desired changes. - - - - Be sure not to put any tab characters in the file: use spaces instead. - - - In contrast to other user_nl_xxx files, be sure to set all relevant data model settings in the xml files, issue the preview_namelist - command and THEN edit the user_datm.streams.txt.CLM_QIAN.Solar file. - - - - Once you have created a user_xxx.streams.txt.* file, further modifications to the relevant data model settings in the xml files will be ignored. - - - If you later realize that you need to change some settings in an xml file, you should remove the user_xxx.streams.txt.* file(s), make the modifications in the xml file, rerun preview_namelists, - and then reintroduce your modifications into a new user_xxx.streams.txt.* stream file(s). - - - - - - - Call preview_namelists - - - Verify that your changes do indeed appear in the - resultant stream description file appear in - CaseDocs/datm.streams.txt.CLM_QIAN.Solar. These - changes will also appear in - $RUNDIR/datm.streams.txt.CLM_QIAN.Solar. - - - - - - - The data elements found in the stream description file are: - - - - - - A comment about the source of the data -- always set to GENERIC in - CESM1.2 and not used by the model. This is there only for backwards - compatibility. - - - - Information about the field data for this stream... - - - A list of the field variable names. This is a paired list - with the name of the variable in the netCDF file on the left and the - name of the corresponding model variable on the right. - This is the list of fields to read in from the data file, there may be - other fields in the file which are not read in (ie. they won't be used). - - - The file system directory where the data files are located. - - - The list of data files to use. If there is more than one file, - the files must be in chronological order, that is, the dates in time axis - of the first file are before the dates in the time axis of the second file. - - - The option is obsolete and no longer performs a function. Control - of the time interpolation algorithm is in the strdata namelists, - tinterp_algo and taxMode - . - - - - - The offset allows a user to shift the time axis of a data stream by a fixed and constant number of seconds. For instance, if a data set contains daily average data with timestamps for the data at the end of the day, it might be appropriate to shift the time axis by 12 hours so the data is taken to be at the middle of the day instead of the end of the day. This feature supports only simple shifts in seconds as a way of correcting input data time axes without having to modify the input data time axis manually. This feature does not support more complex shifts such as end of month to mid-month. But in conjunction with the time interpolation methods in the strdata input, hopefully most user needs can be accommodated with the two settings. Note that a positive offset advances the input data time axis forward by that number of seconds. - - The data models advance in time discretely. At a given time, they read/derive fields from input files. Those input files have data on a discrete time axis as well. Each data point in the input files are associated with a discrete time (as opposed to a time interval). Depending whether you pick lower, upper, nearest, linear, or coszen; the data in the input file will be "interpolated" to the time in the model. - - The offset shifts the time axis of the input data the given number of seconds. so if the input data is at 0, 3600, 7200, 10800 seconds (hourly) and you set an offset of 1800, then the input data will be set at times 1800, 5400, 9000, and 12600. so a model at time 3600 using linear interpolation would have data at "n=2" with offset of 0 will have data at "n=(2+3)/2" with an offset of 1800. n=2 is the 2nd data in the time list 0, 3600, 7200, 10800 in this example. n=(2+3)/2 - is the average of the 2nd and 3rd data in the time list 0, 3600, 7200, 10800. offset can be positive or negative. - - - - - - - Information about the domain data for this stream... - - - - - A list of the domain variable names. This is a paired list - with the name of the variable in the netCDF file on the left and the - name of the corresponding model variable on the right. - This data models require five variables in this list. - The names of model's variables (names on the right) must be: - "time," "lon," "lat," "area," and "mask." - - - - - - - The file system directory where the domain data file is located. - - - - - - - The name of the domain data file. - Often the domain data is located in the same file as the field data (above), - in which case the name of the domain file could simply be the name of the - first field data file. Sometimes the field data files don't contain the - domain data required by the data models, in this case, one new file can - be created that contains the required data. - - - - - - - - - - - - - - Actual example: - - - - - <stream> - <dataSource> - GENERIC - </dataSource> - <domainInfo> - <variableNames> - time time - lon lon - lat lat - area area - mask mask - </variableNames> - <filePath> - /glade/proj3/cseg/inputdata/atm/datm7/NYF - </filePath> - <fileNames> - nyf.ncep.T62.050923.nc - </fileNames> - </domainInfo> - <fieldInfo> - <variableNames> - dn10 dens - slp_ pslv - q10 shnum - t_10 tbot - u_10 u - v_10 v - </variableNames> - <filePath> - /glade/proj3/cseg/inputdata/atm/datm7/NYF - </filePath> - <offset> - 0 - </offset> - <fileNames> - nyf.ncep.T62.050923.nc - </fileNames> - </fieldInfo> - </stream> - - - - - - - - diff --git a/src/components/data_comps/datm/doc/ug.xml b/src/components/data_comps/datm/doc/ug.xml deleted file mode 100644 index 68837e7a54b..00000000000 --- a/src/components/data_comps/datm/doc/ug.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - build-namelist"> - CAM"> - CAM_ROOT"> - CCSM"> - CICE"> - configure"> - Filepath"> - ESMF"> - env_build.xml"> - env_case.xml"> - env_conf.xml"> - env_mach_pes.xml"> - env_run.xml"> - env_mach_specific"> - Filepath"> - Fortran"> - gmake"> - GNU"> - LAPACK"> - make"> - Makefile"> - MPI"> - NetCDF"> - OpenMP"> - Perl"> - perl"> - PnetCDF"> - XML"> - - CSMDATA"> - DIN_LOC_ROOT"> - CASEROOT"> - CASE"> - CIMEROOT"> - CLONEROOT"> - MACH"> - - <dir>"> - <dir1>[,<dir2>[,<dir3>[...]]]"> - <list>"> - <name>"> - <n>"> - <string>"> - -]> - - - - &bookinfo; - &chap1; - &chap2; - &chap3; - - diff --git a/src/components/data_comps/datm/mct/atm_comp_mct.F90 b/src/components/data_comps/datm/mct/atm_comp_mct.F90 deleted file mode 100644 index a5f2855ae63..00000000000 --- a/src/components/data_comps/datm/mct/atm_comp_mct.F90 +++ /dev/null @@ -1,296 +0,0 @@ -module atm_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use datm_comp_mod , only: datm_comp_init, datm_comp_run, datm_comp_final - use datm_shr_mod , only: datm_shr_read_namelists - use datm_shr_mod , only: presaero - use seq_flds_mod , only: seq_flds_a2x_fields, seq_flds_x2a_fields - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: atm_init_mct - public :: atm_run_mct - public :: atm_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - type(shr_strdata_type) :: SDATM - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - integer(IN),parameter :: master_task=0 ! task number of master task - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine atm_init_mct( EClock, cdata, x2a, a2x, NLFilename ) - - implicit none - - ! !DESCRIPTION: initialize data atm model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2a, a2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local variables --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer :: phase ! phase of method - logical :: atm_present ! flag - logical :: atm_prognostic ! flag - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - logical :: read_restart ! start from restart - integer(IN) :: ierr ! error code - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_SPVAL ! single column lat - real(R8) :: scmLon = shr_const_SPVAL ! single column lon - real(R8) :: orbEccen ! orb eccentricity (unit-less) - real(R8) :: orbMvelpp ! orb moving vernal eq (radians) - real(R8) :: orbLambm0 ! orb mean long of perhelion (radians) - real(R8) :: orbObliqr ! orb obliquity (radians) - real(R8) :: nextsw_cday ! calendar of next atm sw - - !--- formats --- - character(*), parameter :: F00 = "('(datm_comp_init) ',8a)" - integer(IN) , parameter :: master_task=0 ! task number of master task - character(*), parameter :: subName = "(atm_init_mct) " - !------------------------------------------------------------------------------- - - ! Set cdata pointers to derived types (in coupler) - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata,& - atm_phase=phase, & - single_column=scmMode, & - scmlat=scmlat, & - scmlon=scmLon, & - orb_eccen=orbEccen,& - orb_mvelpp=orbMvelpp, & - orb_lambm0=orbLambm0,& - orb_obliqr=orbObliqr, & - read_restart=read_restart) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - if (phase == 1) then - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('atm_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Read input namelists and set present and prognostic flags - !---------------------------------------------------------------------------- - - if (phase == 1) then - call t_startf('datm_readnml') - call datm_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDATM, atm_present, atm_prognostic) - - call seq_infodata_PutData(infodata, & - atm_present=atm_present, & - atm_prognostic=atm_prognostic) - call t_stopf('datm_readnml') - end if - - !---------------------------------------------------------------------------- - ! RETURN if present flag is false - !---------------------------------------------------------------------------- - - if (phase == 1) then - if (.not. atm_present) then - RETURN - end if - end if - - ! NOTE: the following will never be called if atm_present is .false. - - !---------------------------------------------------------------------------- - ! Initialize datm - !---------------------------------------------------------------------------- - - call datm_comp_init(Eclock, x2a, a2x, & - seq_flds_x2a_fields, seq_flds_a2x_fields, & - SDATM, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon, & - orbEccen, orbMvelpp, orbLambm0, orbObliqr, phase, nextsw_cday) - - !---------------------------------------------------------------------------- - ! Fill infodata that needs to be returned from datm - !---------------------------------------------------------------------------- - - if (phase == 1) then - call seq_infodata_PutData(infodata, & - atm_nx=SDATM%nxg, & - atm_ny=SDATM%nyg, & - atm_aero=presaero, & - nextsw_cday=nextsw_cday ) - else - call seq_infodata_PutData(infodata, & - nextsw_cday=nextsw_cday ) - end if - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - if (my_task == master_task) write(logunit,F00) 'datm_comp_init done' - call shr_sys_flush(logunit) - call shr_sys_flush(shrlogunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine atm_init_mct - - !=============================================================================== - subroutine atm_run_mct( EClock, cdata, x2a, a2x) - - ! !DESCRIPTION: run method for datm model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2a ! driver -> dead - type(mct_aVect) ,intent(inout) :: a2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(CL) :: case_name ! case name - real(R8) :: orbEccen ! orb eccentricity (unit-less) - real(R8) :: orbMvelpp ! orb moving vernal eq (radians) - real(R8) :: orbLambm0 ! orb mean long of perhelion (radians) - real(R8) :: orbObliqr ! orb obliquity (radians) - real(R8) :: nextsw_cday ! calendar of next atm sw - character(*), parameter :: subName = "(atm_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call seq_infodata_GetData(infodata, & - case_name=case_name, & - orb_eccen=orbEccen, & - orb_mvelpp=orbMvelpp, & - orb_lambm0=orbLambm0, & - orb_obliqr=orbObliqr) - - call datm_comp_run( & - EClock = EClock, & - x2a = x2a, & - a2x = a2x, & - SDATM = SDATM, & - gsmap = gsmap, & - ggrid = ggrid, & - mpicom = mpicom, & - compid = compid, & - my_task = my_task, & - master_task = master_task, & - inst_suffix = inst_suffix, & - logunit = logunit, & - orbEccen = orbEccen, & - orbMvelpp = orbMvelpp, & - orbLambm0 = orbLambm0, & - orbObliqr = orbObliqr, & - nextsw_cday = nextsw_cday, & - case_name = case_name) - - call seq_infodata_PutData(infodata, nextsw_cday=nextsw_cday ) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine atm_run_mct - - !=============================================================================== - subroutine atm_final_mct(EClock, cdata, x2a, a2x) - - ! !DESCRIPTION: finalize method for dead atm model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2a - type(mct_aVect) ,intent(inout) :: a2x - - !--- formats --- - character(*), parameter :: subName = "(atm_final_mct) " - !------------------------------------------------------------------------------- - - call datm_comp_final(my_task, master_task, logunit) - - end subroutine atm_final_mct - !=============================================================================== - -end module atm_comp_mct diff --git a/src/components/data_comps/datm/mct/datm_comp_mod.F90 b/src/components/data_comps/datm/mct/datm_comp_mod.F90 deleted file mode 100644 index def030c6798..00000000000 --- a/src/components/data_comps/datm/mct/datm_comp_mod.F90 +++ /dev/null @@ -1,1197 +0,0 @@ -#ifdef AIX - @PROCESS ALIAS_SIZE(805306368) -#endif -module datm_comp_mod - - ! !USES: - - use mct_mod - use esmf - use perf_mod - use shr_const_mod - use shr_sys_mod - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only: shr_file_getunit, shr_file_freeunit - use shr_cal_mod , only: shr_cal_date2julian, shr_cal_ymdtod2string - use shr_mpi_mod , only: shr_mpi_bcast - use shr_precip_mod , only: shr_precip_partition_rain_snow_ramp - use shr_strdata_mod, only: shr_strdata_type, shr_strdata_pioinit, shr_strdata_init - use shr_strdata_mod, only: shr_strdata_setOrbs, shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod, only: shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only: shr_dmodel_gsmapcreate, shr_dmodel_rearrGGrid - use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list - use seq_timemgr_mod, only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - - use datm_shr_mod , only: datm_shr_getNextRadCDay, datm_shr_esat, datm_shr_CORE2getFactors - use datm_shr_mod , only: datamode ! namelist input - use datm_shr_mod , only: decomp ! namelist input - use datm_shr_mod , only: wiso_datm ! namelist input - use datm_shr_mod , only: rest_file ! namelist input - use datm_shr_mod , only: rest_file_strm ! namelist input - use datm_shr_mod , only: factorfn ! namelist input - use datm_shr_mod , only: iradsw ! namelist input - use datm_shr_mod , only: nullstr - - ! !PUBLIC TYPES: - - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: datm_comp_init - public :: datm_comp_run - public :: datm_comp_final - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(CS) :: myModelName = 'atm' ! user defined model name - logical :: firstcall = .true. ! first call logical - real(R8) :: tbotmax ! units detector - real(R8) :: tdewmax ! units detector - real(R8) :: anidrmax ! existance detector - - character(len=*),parameter :: rpfile = 'rpointer.atm' - - real(R8),parameter :: tKFrz = SHR_CONST_TKFRZ - real(R8),parameter :: degtorad = SHR_CONST_PI/180.0_R8 - real(R8),parameter :: pstd = SHR_CONST_PSTD ! standard pressure ~ Pa - real(R8),parameter :: stebol = SHR_CONST_STEBOL ! Stefan-Boltzmann constant ~ W/m^2/K^4 - real(R8),parameter :: rdair = SHR_CONST_RDAIR ! dry air gas constant ~ J/K/kg - real(R8),parameter :: avg_c0 = 61.846_R8 - real(R8),parameter :: avg_c1 = 1.107_R8 - real(R8),parameter :: amp_c0 = -21.841_R8 - real(R8),parameter :: amp_c1 = -0.447_R8 - real(R8),parameter :: phs_c0 = 0.298_R8 - real(R8),parameter :: dLWarc = -5.000_R8 - - real(R8) :: dTarc(12) - data dTarc / 0.49_R8, 0.06_R8,-0.73_R8, -0.89_R8,-0.77_R8,-1.02_R8, & - -1.99_R8,-0.91_R8, 1.72_R8, 2.30_R8, 1.81_R8, 1.06_R8/ - - integer(IN) :: kz,ktopo,ku,kv,ktbot,kptem,kshum,kdens,kpbot,kpslv,klwdn - integer(IN) :: krc,krl,ksc,ksl,kswndr,kswndf,kswvdr,kswvdf,kswnet - integer(IN) :: kanidr,kanidf,kavsdr,kavsdf - integer(IN) :: stbot,swind,sz,spbot,sshum,stdew,srh,slwdn,sswdn,sswdndf,sswdndr - integer(IN) :: sprecc,sprecl,sprecn,sco2p,sco2d,sswup,sprec,starcf - - ! water isotopes / tracer input - integer(IN) :: kshum_16O, kshum_18O, kshum_HDO - integer(IN) :: krc_18O, krc_HDO - integer(IN) :: krl_18O, krl_HDO - integer(IN) :: ksc_18O, ksc_HDO - integer(IN) :: ksl_18O, ksl_HDO - integer(IN) :: srh_16O, srh_18O, srh_HDO - integer(IN) :: sprecn_16O, sprecn_18O, sprecn_HDO - - ! anomaly forcing - integer(IN) :: sprecsf - integer(IN) :: sprec_af,su_af,sv_af,stbot_af,sshum_af,spbot_af,slwdn_af,sswdn_af - - type(mct_rearr) :: rearr - type(mct_avect) :: avstrm ! av of data from stream - integer(IN), pointer :: imask(:) - real(R8), pointer :: yc(:) - real(R8), pointer :: windFactor(:) - real(R8), pointer :: winddFactor(:) - real(R8), pointer :: qsatFactor(:) - - integer(IN),parameter :: ktrans = 77 - - character(16),parameter :: avofld(1:ktrans) = & - (/"Sa_z ","Sa_topo ", & - "Sa_u ","Sa_v ","Sa_tbot ", & - "Sa_ptem ","Sa_shum ","Sa_dens ","Sa_pbot ", & - "Sa_pslv ","Faxa_lwdn ","Faxa_rainc ","Faxa_rainl ", & - "Faxa_snowc ","Faxa_snowl ","Faxa_swndr ","Faxa_swvdr ", & - "Faxa_swndf ","Faxa_swvdf ","Faxa_swnet ","Sa_co2prog ", & - "Sa_co2diag ","Faxa_bcphidry ","Faxa_bcphodry ","Faxa_bcphiwet ", & - "Faxa_ocphidry ","Faxa_ocphodry ","Faxa_ocphiwet ","Faxa_dstwet1 ", & - "Faxa_dstwet2 ","Faxa_dstwet3 ","Faxa_dstwet4 ","Faxa_dstdry1 ", & - "Faxa_dstdry2 ","Faxa_dstdry3 ","Faxa_dstdry4 ", & - "Sx_tref ","Sx_qref ","Sx_avsdr ","Sx_anidr ", & - "Sx_avsdf ","Sx_anidf ","Sx_t ","So_t ", & - "Sl_snowh ","Sf_lfrac ","Sf_ifrac ","Sf_ofrac ", & - "Faxx_taux ","Faxx_tauy ","Faxx_lat ","Faxx_sen ", & - "Faxx_lwup ","Faxx_evap ","Fall_fco2_lnd ","Faoo_fco2_ocn ", & - "Faoo_fdms_ocn ", & - ! add values for bias correction / anomaly forcing - "Sa_precsf ", & - "Sa_prec_af ","Sa_u_af ","Sa_v_af ","Sa_tbot_af ",& - "Sa_pbot_af ","Sa_shum_af ","Sa_swdn_af ","Sa_lwdn_af ",& - ! isotopic forcing - "Faxa_rainc_18O ","Faxa_rainc_HDO ","Faxa_rainl_18O ","Faxa_rainl_HDO ",& - "Faxa_snowc_18O ","Faxa_snowc_HDO ","Faxa_snowl_18O ","Faxa_snowl_HDO ",& - "Sa_shum_16O ","Sa_shum_18O ","Sa_shum_HDO " & - /) - - character(16),parameter :: avifld(1:ktrans) = & - (/"z ","topo ", & - "u ","v ","tbot ", & - "ptem ","shum ","dens ","pbot ", & - "pslv ","lwdn ","rainc ","rainl ", & - "snowc ","snowl ","swndr ","swvdr ", & - "swndf ","swvdf ","swnet ","co2prog ", & - "co2diag ","bcphidry ","bcphodry ","bcphiwet ", & - "ocphidry ","ocphodry ","ocphiwet ","dstwet1 ", & - "dstwet2 ","dstwet3 ","dstwet4 ","dstdry1 ", & - "dstdry2 ","dstdry3 ","dstdry4 ", & - "tref ","qref ","avsdr ","anidr ", & - "avsdf ","anidf ","ts ","to ", & - "snowhl ","lfrac ","ifrac ","ofrac ", & - "taux ","tauy ","lat ","sen ", & - "lwup ","evap ","co2lnd ","co2ocn ", & - "dms ", & - ! add values for bias correction / anomaly forcing (add Sa_precsf for precip scale factor) - "precsf ", & - "prec_af ","u_af ","v_af ","tbot_af ", & - "pbot_af ","shum_af ","swdn_af ","lwdn_af ", & - ! isotopic forcing - "rainc_18O ","rainc_HDO ","rainl_18O ","rainl_HDO ", & - "snowc_18O ","snowc_HDO ","snowl_18O ","snowl_HDO ", & - "shum_16O ","shum_18O ","shum_HDO " & - /) - - ! The stofld and stifld lists are used for fields that are read but not passed to the - ! coupler (e.g., they are used to compute fields that are passed to the coupler), and - ! other fields used in calculations. Fields that are simply read and passed directly to - ! the coupler do not need to be in these lists. - - integer(IN),parameter :: ktranss = 33 - - character(16),parameter :: stofld(1:ktranss) = & - (/"strm_tbot ","strm_wind ","strm_z ","strm_pbot ", & - "strm_shum ","strm_tdew ","strm_rh ","strm_lwdn ", & - "strm_swdn ","strm_swdndf ","strm_swdndr ","strm_precc ", & - "strm_precl ","strm_precn ","strm_co2prog ","strm_co2diag ", & - "strm_swup ","strm_prec ","strm_tarcf ", & - ! add bias correction / anomaly forcing streams - "strm_precsf ", & - "strm_prec_af ","strm_u_af ","strm_v_af ","strm_tbot_af ", & - "strm_pbot_af ","strm_shum_af ","strm_swdn_af ","strm_lwdn_af ", & - "strm_rh_18O ","strm_rh_HDO ", & - "strm_precn_16O ","strm_precn_18O ","strm_precn_HDO " & - /) - - character(16),parameter :: stifld(1:ktranss) = & - (/"tbot ","wind ","z ","pbot ", & - "shum ","tdew ","rh ","lwdn ", & - "swdn ","swdndf ","swdndr ","precc ", & - "precl ","precn ","co2prog ","co2diag ", & - ! add precsf - "swup ","prec ","tarcf ","precsf ", & - ! add anomaly forcing streams - "prec_af ","u_af ","v_af ","tbot_af ", & - "pbot_af ","shum_af ","swdn_af ","lwdn_af ", & - ! isotopic forcing - "rh_18O ","rh_HDO ", & - "precn_16O ","precn_18O ","precn_HDO " & - /) - - character(CL), pointer :: ilist_av(:) ! input list for translation - character(CL), pointer :: olist_av(:) ! output list for translation - character(CL), pointer :: ilist_st(:) ! input list for translation - character(CL), pointer :: olist_st(:) ! output list for translation - integer(IN) , pointer :: count_av(:) - integer(IN) , pointer :: count_st(:) - - save - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - !=============================================================================== - subroutine datm_comp_init(Eclock, x2a, a2x, & - seq_flds_x2a_fields, seq_flds_a2x_fields, & - SDATM, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon, & - orbEccen, orbMvelpp, orbLambm0, orbObliqr, phase, nextsw_cday) - - ! !DESCRIPTION: initialize data atm model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2a, a2x - character(len=*) , intent(in) :: seq_flds_x2a_fields ! fields from mediator - character(len=*) , intent(in) :: seq_flds_a2x_fields ! fields to mediator - type(shr_strdata_type) , intent(inout) :: SDATM ! model shr_strdata instance (output) - type(mct_gsMap) , pointer :: gsMap ! model global sep map (output) - type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: scmMode ! single column mode - real(R8) , intent(in) :: scmLat ! single column lat - real(R8) , intent(in) :: scmLon ! single column lon - real(R8) , intent(in) :: orbEccen ! orb eccentricity (unit-less) - real(R8) , intent(in) :: orbMvelpp ! orb moving vernal eq (radians) - real(R8) , intent(in) :: orbLambm0 ! orb mean long of perhelion (radians) - real(R8) , intent(in) :: orbObliqr ! orb obliquity (radians) - integer , intent(in) :: phase ! initialization phase index - real(R8) , intent(out) :: nextsw_cday ! calendar of next atm sw - - !--- local variables --- - integer(IN) :: n,k ! generic counters - integer(IN) :: lsize ! local size - integer(IN) :: kmask ! field reference - integer(IN) :: klat ! field reference - integer(IN) :: kfld ! fld index - integer(IN) :: cnt ! counter - integer(IN) :: idt ! integer timestep - - logical :: exists ! filename existance - integer(IN) :: nu ! unit number - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: stepno ! step number - character(CL) :: calendar ! calendar type - character(CL) :: flds_strm - - !--- formats --- - character(*), parameter :: F00 = "('(datm_comp_init) ',8a)" - character(*), parameter :: F0L = "('(datm_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(datm_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(datm_comp_init) ',a,4es13.6)" - character(*), parameter :: F03 = "('(datm_comp_init) ',a,i8,a)" - character(*), parameter :: F04 = "('(datm_comp_init) ',2a,2i8,'s')" - character(*), parameter :: F05 = "('(datm_comp_init) ',a,2f10.4)" - character(*), parameter :: F90 = "('(datm_comp_init) ',73('='))" - character(*), parameter :: F91 = "('(datm_comp_init) ',73('-'))" - character(*), parameter :: subName = "(datm_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DATM_INIT') - - if (phase == 1) then - call t_startf('datm_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize PIO - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDATM, COMPID) - - !---------------------------------------------------------------------------- - ! Initialize SDATM - !---------------------------------------------------------------------------- - - call seq_timemgr_EClockGetData( EClock, dtime=idt, calendar=calendar ) - - ! NOTE: shr_strdata_init calls shr_dmodel_readgrid which reads the data model - ! grid and from that computes SDATM%gsmap and SDATM%ggrid. DATM%gsmap is created - ! using the decomp '2d1d' (1d decomp of 2d grid) - if (scmmode) then - if (my_task == master_task) then - write(logunit,F05) ' scm lon lat = ',scmlon,scmlat - end if - call shr_strdata_init(SDATM,& - mpicom, compid, name='atm', & - scmmode=scmmode,scmlon=scmlon,scmlat=scmlat, & - calendar=calendar) - else - call shr_strdata_init(SDATM,& - mpicom, compid, name='atm', & - calendar=calendar) - endif - - !--- overwrite mask and frac --- - k = mct_aVect_indexRA(SDATM%grid%data,'mask') - SDATM%grid%data%rAttr(k,:) = 1.0_R8 - - k = mct_aVect_indexRA(SDATM%grid%data,'frac') - SDATM%grid%data%rAttr(k,:) = 1.0_R8 - - if (my_task == master_task) then - call shr_strdata_print(SDATM,'ATM data') - endif - - call t_stopf('datm_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize MCT global seg map, 1d decomp, gsmap - !---------------------------------------------------------------------------- - - call t_startf('datm_initgsmaps') - if (my_task == master_task) write(logunit,F00) ' initialize gsmaps' - call shr_sys_flush(logunit) - - ! create a data model global seqmap (gsmap) given the data model global grid sizes - ! NOTE: gsmap is initialized using the decomp read in from the datm_in namelist - ! (which by default is "1d") - call shr_dmodel_gsmapcreate(gsmap, SDATM%nxg*SDATM%nyg, compid, mpicom, decomp) - lsize = mct_gsmap_lsize(gsmap,mpicom) - - ! create a rearranger from the data model SDATM%gsmap to gsmap - call mct_rearr_init(SDATM%gsmap, gsmap, mpicom, rearr) - call t_stopf('datm_initgsmaps') - - !---------------------------------------------------------------------------- - ! Initialize MCT domain - !---------------------------------------------------------------------------- - - call t_startf('datm_initmctdom') - if (my_task == master_task) write(logunit,F00) 'copy domains' - call shr_sys_flush(logunit) - - call shr_dmodel_rearrGGrid(SDATM%grid, ggrid, gsmap, rearr, mpicom) - call t_stopf('datm_initmctdom') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('datm_initmctavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - call shr_sys_flush(logunit) - - call mct_aVect_init(a2x, rList=seq_flds_a2x_fields, lsize=lsize) - call mct_aVect_zero(a2x) - - kz = mct_aVect_indexRA(a2x,'Sa_z') - ktopo = mct_aVect_indexRA(a2x,'Sa_topo') - ku = mct_aVect_indexRA(a2x,'Sa_u') - kv = mct_aVect_indexRA(a2x,'Sa_v') - ktbot = mct_aVect_indexRA(a2x,'Sa_tbot') - kptem = mct_aVect_indexRA(a2x,'Sa_ptem') - kshum = mct_aVect_indexRA(a2x,'Sa_shum') - kdens = mct_aVect_indexRA(a2x,'Sa_dens') - kpbot = mct_aVect_indexRA(a2x,'Sa_pbot') - kpslv = mct_aVect_indexRA(a2x,'Sa_pslv') - klwdn = mct_aVect_indexRA(a2x,'Faxa_lwdn') - krc = mct_aVect_indexRA(a2x,'Faxa_rainc') - krl = mct_aVect_indexRA(a2x,'Faxa_rainl') - ksc = mct_aVect_indexRA(a2x,'Faxa_snowc') - ksl = mct_aVect_indexRA(a2x,'Faxa_snowl') - kswndr= mct_aVect_indexRA(a2x,'Faxa_swndr') - kswndf= mct_aVect_indexRA(a2x,'Faxa_swndf') - kswvdr= mct_aVect_indexRA(a2x,'Faxa_swvdr') - kswvdf= mct_aVect_indexRA(a2x,'Faxa_swvdf') - kswnet= mct_aVect_indexRA(a2x,'Faxa_swnet') - - if (wiso_datm) then ! water isotopic forcing - kshum_16O = mct_aVect_indexRA(a2x,'Sa_shum_16O') - kshum_18O = mct_aVect_indexRA(a2x,'Sa_shum_18O') - kshum_HDO = mct_aVect_indexRA(a2x,'Sa_shum_HDO') - krc_18O = mct_aVect_indexRA(a2x,'Faxa_rainc_18O') - krc_HDO = mct_aVect_indexRA(a2x,'Faxa_rainc_HDO') - krl_18O = mct_aVect_indexRA(a2x,'Faxa_rainl_18O') - krl_HDO = mct_aVect_indexRA(a2x,'Faxa_rainl_HDO') - ksc_18O = mct_aVect_indexRA(a2x,'Faxa_snowc_18O') - ksc_HDO = mct_aVect_indexRA(a2x,'Faxa_snowc_HDO') - ksl_18O = mct_aVect_indexRA(a2x,'Faxa_snowl_18O') - ksl_HDO = mct_aVect_indexRA(a2x,'Faxa_snowl_HDO') - end if - - call mct_aVect_init(x2a, rList=seq_flds_x2a_fields, lsize=lsize) - call mct_aVect_zero(x2a) - - kanidr = mct_aVect_indexRA(x2a,'Sx_anidr') - kanidf = mct_aVect_indexRA(x2a,'Sx_anidf') - kavsdr = mct_aVect_indexRA(x2a,'Sx_avsdr') - kavsdf = mct_aVect_indexRA(x2a,'Sx_avsdf') - - !--- figure out what's on the standard streams --- - cnt = 0 - flds_strm = '' - do n = 1,SDATM%nstreams - do k = 1,ktranss - kfld = mct_aVect_indexRA(SDATM%avs(n),trim(stifld(k)),perrWith='quiet') - if (kfld > 0) then - cnt = cnt + 1 - if (cnt == 1) then - flds_strm = trim(stofld(k)) - else - flds_strm = trim(flds_strm)//':'//trim(stofld(k)) - endif - endif - enddo - enddo - - if (my_task == master_task) write(logunit,F00) ' flds_strm = ',trim(flds_strm) - call shr_sys_flush(logunit) - - call mct_aVect_init(avstrm, rList=flds_strm, lsize=lsize) - call mct_aVect_zero(avstrm) - - stbot = mct_aVect_indexRA(avstrm,'strm_tbot' ,perrWith='quiet') - swind = mct_aVect_indexRA(avstrm,'strm_wind' ,perrWith='quiet') - sz = mct_aVect_indexRA(avstrm,'strm_z' ,perrWith='quiet') - spbot = mct_aVect_indexRA(avstrm,'strm_pbot' ,perrWith='quiet') - sshum = mct_aVect_indexRA(avstrm,'strm_shum' ,perrWith='quiet') - stdew = mct_aVect_indexRA(avstrm,'strm_tdew' ,perrWith='quiet') - srh = mct_aVect_indexRA(avstrm,'strm_rh' ,perrWith='quiet') - slwdn = mct_aVect_indexRA(avstrm,'strm_lwdn' ,perrWith='quiet') - sswdn = mct_aVect_indexRA(avstrm,'strm_swdn' ,perrWith='quiet') - sswdndf= mct_aVect_indexRA(avstrm,'strm_swdndf' ,perrWith='quiet') - sswdndr= mct_aVect_indexRA(avstrm,'strm_swdndr' ,perrWith='quiet') - sprecc = mct_aVect_indexRA(avstrm,'strm_precc' ,perrWith='quiet') - sprecl = mct_aVect_indexRA(avstrm,'strm_precl' ,perrWith='quiet') - sprecn = mct_aVect_indexRA(avstrm,'strm_precn' ,perrWith='quiet') - sco2p = mct_aVect_indexRA(avstrm,'strm_co2p' ,perrWith='quiet') - sco2d = mct_aVect_indexRA(avstrm,'strm_co2d' ,perrWith='quiet') - sswup = mct_aVect_indexRA(avstrm,'strm_swup' ,perrWith='quiet') - sprec = mct_aVect_indexRA(avstrm,'strm_prec' ,perrWith='quiet') - starcf = mct_aVect_indexRA(avstrm,'strm_tarcf' ,perrWith='quiet') - - ! anomaly forcing - sprecsf = mct_aVect_indexRA(avstrm,'strm_precsf' ,perrWith='quiet') - sprec_af = mct_aVect_indexRA(avstrm,'strm_prec_af' ,perrWith='quiet') - su_af = mct_aVect_indexRA(avstrm,'strm_u_af' ,perrWith='quiet') - sv_af = mct_aVect_indexRA(avstrm,'strm_v_af' ,perrWith='quiet') - stbot_af = mct_aVect_indexRA(avstrm,'strm_tbot_af' ,perrWith='quiet') - spbot_af = mct_aVect_indexRA(avstrm,'strm_pbot_af' ,perrWith='quiet') - sshum_af = mct_aVect_indexRA(avstrm,'strm_shum_af' ,perrWith='quiet') - sswdn_af = mct_aVect_indexRA(avstrm,'strm_swdn_af' ,perrWith='quiet') - slwdn_af = mct_aVect_indexRA(avstrm,'strm_lwdn_af' ,perrWith='quiet') - - if(wiso_datm) then - ! isotopic forcing - sprecn_16O = mct_aVect_indexRA(avstrm,'strm_precn_16O',perrWith='quiet') - sprecn_18O = mct_aVect_indexRA(avstrm,'strm_precn_18O',perrWith='quiet') - sprecn_HDO = mct_aVect_indexRA(avstrm,'strm_precn_HDO',perrWith='quiet') - ! Okay here to just use srh_18O and srh_HDO, because the forcing is (should) - ! just be deltas, applied in lnd_comp_mct to the base tracer - srh_16O = mct_aVect_indexRA(avstrm,'strm_rh_16O',perrWith='quiet') - srh_18O = mct_aVect_indexRA(avstrm,'strm_rh_18O',perrWith='quiet') - srh_HDO = mct_aVect_indexRA(avstrm,'strm_rh_HDO',perrWith='quiet') - end if - - allocate(imask(lsize)) - allocate(yc(lsize)) - allocate(windFactor(lsize)) - allocate(winddFactor(lsize)) - allocate(qsatFactor(lsize)) - - kmask = mct_aVect_indexRA(ggrid%data,'mask') - imask(:) = nint(ggrid%data%rAttr(kmask,:)) - klat = mct_aVect_indexRA(ggrid%data,'lat') - yc(:) = ggrid%data%rAttr(klat,:) - - call t_stopf('datm_initmctavs') - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - if (trim(rest_file) == trim(nullstr) .and. & - trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer' - call shr_sys_flush(logunit) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (.not.exists) then - write(logunit,F00) ' ERROR: rpointer file does not exist' - call shr_sys_abort(trim(subname)//' ERROR: rpointer file missing') - endif - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - call shr_sys_flush(logunit) - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - - call shr_mpi_bcast(exists,mpicom,'exists') - - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDATM,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - call shr_sys_flush(logunit) - endif - - if (read_restart) then - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, stepno=stepno, dtime=idt ) - call seq_timemgr_EClockGetData( EClock, calendar=calendar ) - nextsw_cday = datm_shr_getNextRadCDay( CurrentYMD, CurrentTOD, stepno, idt, iradsw, calendar ) - else - call seq_timemgr_EClockGetData( EClock, curr_cday=nextsw_cday, stepno=stepno ) - endif - - else ! phase = 2 - - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, stepno=stepno, dtime=idt) - call seq_timemgr_EClockGetData( EClock, calendar=calendar ) - nextsw_cday = datm_shr_getNextRadCDay( CurrentYMD, CurrentTOD, stepno, idt, iradsw, calendar ) - - endif - - !---------------------------------------------------------------------------- - ! Set initial atm state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - call datm_comp_run( & - EClock = EClock, & - x2a = x2a, & - a2x = a2x, & - SDATM = SDATM, & - gsmap = gsmap, & - ggrid = ggrid, & - mpicom = mpicom, & - compid = compid, & - my_task = my_task, & - master_task = master_task, & - inst_suffix = inst_suffix, & - logunit = logunit, & - orbEccen = orbEccen, & - orbMvelpp = orbMvelpp, & - orbLambm0 = orbLambm0, & - orbObliqr = orbObliqr, & - nextsw_cday = nextsw_cday) - call t_adj_detailf(-2) - - call t_stopf('DATM_INIT') - - end subroutine datm_comp_init - - !=============================================================================== - subroutine datm_comp_run(EClock, x2a, a2x, & - SDATM, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, & - orbEccen, orbMvelpp, orbLambm0, orbObliqr, & - nextsw_cday, case_name) - - ! !DESCRIPTION: run method for datm model - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2a - type(mct_aVect) , intent(inout) :: a2x - type(shr_strdata_type) , intent(inout) :: SDATM - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer(IN) , intent(in) :: logunit ! logging unit number - real(R8) , intent(in) :: orbEccen ! orb eccentricity (unit-less) - real(R8) , intent(in) :: orbMvelpp ! orb moving vernal eq (radians) - real(R8) , intent(in) :: orbLambm0 ! orb mean long of perhelion (radians) - real(R8) , intent(in) :: orbObliqr ! orb obliquity (radians) - real(R8) , intent(out) :: nextsw_cday ! calendar of next atm sw - character(CL) , intent(in), optional :: case_name ! case name - - !--- local --- - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: yy,mm,dd ! year month day - integer(IN) :: n ! indices - integer(IN) :: lsize ! size of attr vect - integer(IN) :: idt ! integer timestep - real(R8) :: dt ! timestep - logical :: write_restart ! restart now - character(CL) :: rest_file ! restart_file - character(CL) :: rest_file_strm ! restart_file - integer(IN) :: nu ! unit number - integer(IN) :: stepno ! step number - real(R8) :: rday ! elapsed day - real(R8) :: cosFactor ! cosine factor - real(R8) :: factor ! generic/temporary correction factor - real(R8) :: avg_alb ! average albedo - real(R8) :: tMin ! minimum temperature - character(CL) :: calendar ! calendar type - - character(len=18) :: date_str - !--- temporaries - real(R8) :: uprime,vprime,swndr,swndf,swvdr,swvdf,ratio_rvrf - real(R8) :: tbot,pbot,rtmp,vp,ea,e,qsat,frac - - character(*), parameter :: F00 = "('(datm_comp_run) ',8a)" - character(*), parameter :: F04 = "('(datm_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(datm_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DATM_RUN') - - call t_startf('datm_run1') - - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, curr_yr=yy, curr_mon=mm, curr_day=dd) - call seq_timemgr_EClockGetData( EClock, stepno=stepno, dtime=idt) - call seq_timemgr_EClockGetData( EClock, calendar=calendar) - dt = idt * 1.0_r8 - write_restart = seq_timemgr_RestartAlarmIsOn(EClock) - - call t_stopf('datm_run1') - - !-------------------- - ! ADVANCE ATM - !-------------------- - - call t_barrierf('datm_BARRIER',mpicom) - call t_startf('datm') - - nextsw_cday = datm_shr_getNextRadCDay( CurrentYMD, CurrentTOD, stepno, idt, iradsw, calendar ) - - !--- set data needed for cosz t-interp method --- - call shr_strdata_setOrbs(SDATM,orbEccen,orbMvelpp,orbLambm0,orbObliqr,idt) - - !--- copy all fields from streams to a2x as default --- - - call t_startf('datm_strdata_advance') - call shr_strdata_advance(SDATM,currentYMD,currentTOD,mpicom,'datm') - call t_stopf('datm_strdata_advance') - - call t_barrierf('datm_scatter_BARRIER',mpicom) - - call t_startf('datm_scatter') - if (firstcall) then - allocate(ilist_av(SDATM%nstreams)) - allocate(olist_av(SDATM%nstreams)) - allocate(ilist_st(SDATM%nstreams)) - allocate(olist_st(SDATM%nstreams)) - allocate(count_av(SDATM%nstreams)) - allocate(count_st(SDATM%nstreams)) - end if - do n = 1,SDATM%nstreams - if (firstcall) then - call shr_dmodel_translate_list(SDATM%avs(n),a2x,& - avifld(1:ktrans),avofld,ilist_av(n),olist_av(n),count_av(n)) - end if - if (count_av(n) > 0) then - call shr_dmodel_translateAV_list(SDATM%avs(n),a2x,& - ilist_av(n),olist_av(n),rearr) - end if - enddo - do n = 1,SDATM%nstreams - if (firstcall) then - call shr_dmodel_translate_list(SDATM%avs(n),avstrm,& - stifld(1:ktranss),stofld,ilist_st(n),olist_st(n),count_st(n)) - end if - if (count_st(n) > 0) then - call shr_dmodel_translateAV_list(SDATM%avs(n),avstrm,& - ilist_st(n),olist_st(n),rearr) - end if - enddo - call t_stopf('datm_scatter') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('datm_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - case('CORE2_NYF','CORE2_IAF') - if (firstcall) then - if (sprec < 1 .or. sswdn < 1) then - write(logunit,F00) 'ERROR: prec and swdn must be in streams for CORE2' - call shr_sys_abort(trim(subname)//'ERROR: prec and swdn must be in streams for CORE2') - endif - if (trim(datamode) == 'CORE2_IAF' ) then - if (starcf < 1 ) then - write(logunit,F00) 'ERROR: tarcf must be in an input stream for CORE2_IAF' - call shr_sys_abort(trim(subname)//'tarcf must be in an input stream for CORE2_IAF') - endif - endif - call datm_shr_CORE2getFactors(factorFn,windFactor,winddFactor,qsatFactor, & - mpicom,compid,gsmap,ggrid,SDATM%nxg,SDATM%nyg) - endif - call shr_cal_date2julian(currentYMD,currentTOD,rday,calendar) - rday = mod((rday - 1.0_R8),365.0_R8) - cosfactor = cos((2.0_R8*SHR_CONST_PI*rday)/365 - phs_c0) - - lsize = mct_avect_lsize(a2x) - do n = 1,lsize - a2x%rAttr(kz,n) = 10.0_R8 - - !--- correction to NCEP winds based on QSCAT --- - uprime = a2x%rAttr(ku,n)*windFactor(n) - vprime = a2x%rAttr(kv,n)*windFactor(n) - a2x%rAttr(ku,n) = uprime*cos(winddFactor(n)*degtorad)- & - vprime*sin(winddFactor(n)*degtorad) - a2x%rAttr(kv,n) = uprime*sin(winddFactor(n)*degtorad)+ & - vprime*cos(winddFactor(n)*degtorad) - - !--- density, tbot, & pslv taken directly from input stream, set pbot --- - a2x%rAttr(kpbot,n) = a2x%rAttr(kpslv,n) - - !--- correction to NCEP Arctic & Antarctic air T & potential T --- - if ( yc(n) < -60.0_R8 ) then - tMin = (avg_c0 + avg_c1*yc(n)) + (amp_c0 + amp_c1*yc(n))*cosFactor + tKFrz - a2x%rAttr(ktbot,n) = max(a2x%rAttr(ktbot,n), tMin) - else if ( yc(n) > 60.0_R8 ) then - factor = MIN(1.0_R8, 0.1_R8*(yc(n)-60.0_R8) ) - a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + factor * dTarc(mm) - endif - a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) - - !--- correction to NCEP relative humidity for heat budget balance --- - a2x%rAttr(kshum,n) = a2x%rAttr(kshum,n) + qsatFactor(n) - - !--- Dupont correction to NCEP Arctic air T --- - !--- don't correct during summer months (July-September) - !--- ONLY correct when forcing year is 1997->2004 - if (trim(datamode) == 'CORE2_IAF' ) then - a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + avstrm%rAttr(starcf,n) - a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) - end if - - !------------------------------------------------------------------------- - ! PRECIPITATION DATA - !------------------------------------------------------------------------- - - avstrm%rAttr(sprec,n) = avstrm%rAttr(sprec,n)/86400.0_R8 ! convert mm/day to kg/m^2/s - - ! only correct satellite products, do not correct Serreze Arctic data - if ( yc(n) < 58. ) then - avstrm%rAttr(sprec,n) = avstrm%rAttr(sprec,n)*1.14168_R8 - endif - if ( yc(n) >= 58. .and. yc(n) < 68. ) then - factor = MAX(0.0_R8, 1.0_R8 - 0.1_R8*(yc(n)-58.0_R8) ) - avstrm%rAttr(sprec,n) = avstrm%rAttr(sprec,n)*(factor*(1.14168_R8 - 1.0_R8) + 1.0_R8) - endif - - a2x%rAttr(krc,n) = 0.0_R8 ! default zero - a2x%rAttr(ksc,n) = 0.0_R8 - if (a2x%rAttr(ktbot,n) < tKFrz ) then ! assign precip to rain/snow components - a2x%rAttr(krl,n) = 0.0_R8 - a2x%rAttr(ksl,n) = avstrm%rAttr(sprec,n) - else - a2x%rAttr(krl,n) = avstrm%rAttr(sprec,n) - a2x%rAttr(ksl,n) = 0.0_R8 - endif - - !------------------------------------------------------------------------- - ! RADIATION DATA - !------------------------------------------------------------------------- - - !--- fabricate required swdn components from net swdn --- - a2x%rAttr(kswvdr,n) = avstrm%rAttr(sswdn,n)*(0.28_R8) - a2x%rAttr(kswndr,n) = avstrm%rAttr(sswdn,n)*(0.31_R8) - a2x%rAttr(kswvdf,n) = avstrm%rAttr(sswdn,n)*(0.24_R8) - a2x%rAttr(kswndf,n) = avstrm%rAttr(sswdn,n)*(0.17_R8) - - !--- compute net short-wave based on LY08 latitudinally-varying albedo --- - avg_alb = ( 0.069 - 0.011*cos(2.0_R8*yc(n)*degtorad ) ) - a2x%rAttr(kswnet,n) = avstrm%rAttr(sswdn,n)*(1.0_R8 - avg_alb) - - !--- corrections to GISS sswdn for heat budget balancing --- - factor = 1.0_R8 - if ( -60.0_R8 < yc(n) .and. yc(n) < -50.0_R8 ) then - factor = 1.0_R8 - (yc(n) + 60.0_R8)*(0.05_R8/10.0_R8) - else if ( -50.0_R8 < yc(n) .and. yc(n) < 30.0_R8 ) then - factor = 0.95_R8 - else if ( 30.0_R8 < yc(n) .and. yc(n) < 40._R8 ) then - factor = 1.0_R8 - (40.0_R8 - yc(n))*(0.05_R8/10.0_R8) - endif - a2x%rAttr(kswnet,n) = a2x%rAttr(kswnet,n)*factor - a2x%rAttr(kswvdr,n) = a2x%rAttr(kswvdr,n)*factor - a2x%rAttr(kswndr,n) = a2x%rAttr(kswndr,n)*factor - a2x%rAttr(kswvdf,n) = a2x%rAttr(kswvdf,n)*factor - a2x%rAttr(kswndf,n) = a2x%rAttr(kswndf,n)*factor - - !--- correction to GISS lwdn in Arctic --- - if ( yc(n) > 60._R8 ) then - factor = MIN(1.0_R8, 0.1_R8*(yc(n)-60.0_R8) ) - a2x%rAttr(klwdn,n) = a2x%rAttr(klwdn,n) + factor * dLWarc - endif - - enddo ! lsize - - case('CORE_IAF_JRA') - if (firstcall) then - if (sprec < 1 .or. sswdn < 1) then - write(logunit,F00) 'ERROR: prec and swdn must be in streams for CORE_IAF_JRA' - call shr_sys_abort(trim(subname)//'ERROR: prec and swdn must be in streams for CORE_IAF_JRA') - endif - if (trim(datamode) == 'CORE_IAF_JRA' ) then - if (starcf < 1 ) then - write(logunit,F00) 'ERROR: tarcf must be in an input stream for CORE_IAF_JRA' - call shr_sys_abort(trim(subname)//'tarcf must be in an input stream for CORE_IAF_JRA') - endif - endif - if (trim(factorFn) == 'null') then - windFactor = 1.0_R8 - winddFactor = 1.0_R8 - qsatFactor = 1.0_R8 - else - call datm_shr_CORE2getFactors(factorFn,windFactor,winddFactor,qsatFactor, & - mpicom,compid,gsmap,ggrid,SDATM%nxg,SDATM%nyg) - endif - endif - call shr_cal_date2julian(currentYMD,currentTOD,rday,calendar) - rday = mod((rday - 1.0_R8),365.0_R8) - cosfactor = cos((2.0_R8*SHR_CONST_PI*rday)/365 - phs_c0) - - lsize = mct_avect_lsize(a2x) - do n = 1,lsize - a2x%rAttr(kz,n) = 10.0_R8 - - !--- density, tbot, & pslv taken directly from input stream, set pbot --- - a2x%rAttr(kpbot,n) = a2x%rAttr(kpslv,n) - - a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) - - !--- density computation for JRA55 forcing --- - a2x%rAttr(kdens,n) = a2x%rAttr(kpbot,n)/(rdair*a2x%rAttr(ktbot,n) & - *(1+0.608* a2x%rAttr(kshum,n))) - - !------------------------------------------------------------------------- - ! PRECIPITATION DATA - !------------------------------------------------------------------------- - - a2x%rAttr(krc,n) = 0.0_R8 ! default zero - a2x%rAttr(ksc,n) = 0.0_R8 - if (a2x%rAttr(ktbot,n) < tKFrz ) then ! assign precip to rain/snow components - a2x%rAttr(krl,n) = 0.0_R8 - a2x%rAttr(ksl,n) = avstrm%rAttr(sprec,n) - else - a2x%rAttr(krl,n) = avstrm%rAttr(sprec,n) - a2x%rAttr(ksl,n) = 0.0_R8 - endif - - !------------------------------------------------------------------------- - ! RADIATION DATA - !------------------------------------------------------------------------- - - !--- fabricate required swdn components from net swdn --- - a2x%rAttr(kswvdr,n) = avstrm%rAttr(sswdn,n)*(0.28_R8) - a2x%rAttr(kswndr,n) = avstrm%rAttr(sswdn,n)*(0.31_R8) - a2x%rAttr(kswvdf,n) = avstrm%rAttr(sswdn,n)*(0.24_R8) - a2x%rAttr(kswndf,n) = avstrm%rAttr(sswdn,n)*(0.17_R8) - - !--- compute net short-wave based on LY08 latitudinally-varying albedo --- - avg_alb = ( 0.069 - 0.011*cos(2.0_R8*yc(n)*degtorad ) ) - a2x%rAttr(kswnet,n) = avstrm%rAttr(sswdn,n)*(1.0_R8 - avg_alb) - - enddo ! lsize - - case('CLMNCEP') - if (firstcall) then - if (swind < 1 .or. stbot < 1) then - write(logunit,F00) ' ERROR: wind and tbot must be in streams for CLMNCEP' - call shr_sys_abort(trim(subname)//' ERROR: wind and tbot must be in streams for CLMNCEP') - endif - rtmp = maxval(a2x%rAttr(ktbot,:)) - call shr_mpi_max(rtmp,tbotmax,mpicom,'datm_tbot',all=.true.) - rtmp = maxval(x2a%rAttr(kanidr,:)) - call shr_mpi_max(rtmp,anidrmax,mpicom,'datm_ani',all=.true.) - if (stdew > 0) then - rtmp = maxval(avstrm%rAttr(stdew,:)) - call shr_mpi_max(rtmp,tdewmax,mpicom,'datm_tdew',all=.true.) - endif - if (my_task == master_task) & - write(logunit,*) trim(subname),' max values = ',tbotmax,tdewmax,anidrmax - endif - lsize = mct_avect_lsize(a2x) - do n = 1,lsize - !--- bottom layer height --- - if (sz < 1) a2x%rAttr(kz,n) = 30.0_R8 - - !--- temperature --- - if (tbotmax < 50.0_R8) a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + tkFrz - ! Limit very cold forcing to 180K - a2x%rAttr(ktbot,n) = max(180._r8, a2x%rAttr(ktbot,n)) - a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) - - !--- pressure --- - if (spbot < 1) a2x%rAttr(kpbot,n) = pstd - a2x%rAttr(kpslv,n) = a2x%rAttr(kpbot,n) - - !--- u, v wind velocity --- - a2x%rAttr(ku,n) = avstrm%rAttr(swind,n)/sqrt(2.0_R8) - a2x%rAttr(kv,n) = a2x%rAttr(ku,n) - - !--- specific humidity --- - tbot = a2x%rAttr(ktbot,n) - pbot = a2x%rAttr(kpbot,n) - if (sshum > 0) then - e = datm_shr_esat(tbot,tbot) - qsat = (0.622_R8 * e)/(pbot - 0.378_R8 * e) - if (qsat < a2x%rAttr(kshum,n)) then - a2x%rAttr(kshum,n) = qsat - endif - else if (srh > 0) then - e = avstrm%rAttr(srh,n) * 0.01_R8 * datm_shr_esat(tbot,tbot) - qsat = (0.622_R8 * e)/(pbot - 0.378_R8 * e) - a2x%rAttr(kshum,n) = qsat - if(wiso_datm) then - ! isotopic forcing - ! For tracer specific humidity, lnd_import_mct expects a delta, so - ! just keep the delta from the input file - TW - a2x%rAttr(kshum_16O,n) = avstrm%rAttr(srh_16O,n) - a2x%rAttr(kshum_18O,n) = avstrm%rAttr(srh_18O,n) - a2x%rAttr(kshum_HDO,n) = avstrm%rAttr(srh_HDO,n) - end if - else if (stdew > 0) then - if (tdewmax < 50.0_R8) avstrm%rAttr(stdew,n) = avstrm%rAttr(stdew,n) + tkFrz - e = datm_shr_esat(avstrm%rAttr(stdew,n),tbot) - qsat = (0.622_R8 * e)/(pbot - 0.378_R8 * e) - a2x%rAttr(kshum,n) = qsat - else - call shr_sys_abort(subname//'ERROR: cannot compute shum') - endif - - !--- density --- - vp = (a2x%rAttr(kshum,n)*pbot) / (0.622_R8 + 0.378_R8 * a2x%rAttr(kshum,n)) - a2x%rAttr(kdens,n) = (pbot - 0.378_R8 * vp) / (tbot*rdair) - - !--- downward longwave --- - if (slwdn < 1) then - e = a2x%rAttr(kpslv,n) * a2x%rAttr(kshum,n) / (0.622_R8 + 0.378_R8 * a2x%rAttr(kshum,n)) - ea = 0.70_R8 + 5.95e-05_R8 * 0.01_R8 * e * exp(1500.0_R8/tbot) - a2x%rAttr(klwdn,n) = ea * stebol * tbot**4 - endif - - !--- shortwave radiation --- - if (sswdndf > 0 .and. sswdndr > 0) then - a2x%rAttr(kswndr,n) = avstrm%rAttr(sswdndr,n) * 0.50_R8 - a2x%rAttr(kswvdr,n) = avstrm%rAttr(sswdndr,n) * 0.50_R8 - a2x%rAttr(kswndf,n) = avstrm%rAttr(sswdndf,n) * 0.50_R8 - a2x%rAttr(kswvdf,n) = avstrm%rAttr(sswdndf,n) * 0.50_R8 - elseif (sswdn > 0) then - ! relationship between incoming NIR or VIS radiation and ratio of - ! direct to diffuse radiation calculated based on one year's worth of - ! hourly CAM output from CAM version cam3_5_55 - swndr = avstrm%rAttr(sswdn,n) * 0.50_R8 - ratio_rvrf = min(0.99_R8,max(0.29548_R8 + 0.00504_R8*swndr & - -1.4957e-05_R8*swndr**2 + 1.4881e-08_R8*swndr**3,0.01_R8)) - a2x%rAttr(kswndr,n) = ratio_rvrf*swndr - swndf = avstrm%rAttr(sswdn,n) * 0.50_R8 - a2x%rAttr(kswndf,n) = (1._R8 - ratio_rvrf)*swndf - - swvdr = avstrm%rAttr(sswdn,n) * 0.50_R8 - ratio_rvrf = min(0.99_R8,max(0.17639_R8 + 0.00380_R8*swvdr & - -9.0039e-06_R8*swvdr**2 + 8.1351e-09_R8*swvdr**3,0.01_R8)) - a2x%rAttr(kswvdr,n) = ratio_rvrf*swvdr - swvdf = avstrm%rAttr(sswdn,n) * 0.50_R8 - a2x%rAttr(kswvdf,n) = (1._R8 - ratio_rvrf)*swvdf - else - call shr_sys_abort(subName//'ERROR: cannot compute short-wave down') - endif - - !--- swnet: a diagnostic quantity --- - if (anidrmax < 1.0e-8 .or. anidrmax > SHR_CONST_SPVAL * 0.9_R8) then - a2x%rAttr(kswnet,n) = 0.0_R8 - else - a2x%rAttr(kswnet,n) = (1.0_R8-x2a%rAttr(kanidr,n))*a2x%rAttr(kswndr,n) + & - (1.0_R8-x2a%rAttr(kavsdr,n))*a2x%rAttr(kswvdr,n) + & - (1.0_R8-x2a%rAttr(kanidf,n))*a2x%rAttr(kswndf,n) + & - (1.0_R8-x2a%rAttr(kavsdf,n))*a2x%rAttr(kswvdf,n) - endif - - !--- rain and snow --- - if (sprecc > 0 .and. sprecl > 0) then - a2x%rAttr(krc,n) = avstrm%rAttr(sprecc,n) - a2x%rAttr(krl,n) = avstrm%rAttr(sprecl,n) - elseif (sprecn > 0) then - a2x%rAttr(krc,n) = avstrm%rAttr(sprecn,n)*0.1_R8 - a2x%rAttr(krl,n) = avstrm%rAttr(sprecn,n)*0.9_R8 - else - call shr_sys_abort(subName//'ERROR: cannot compute rain and snow') - endif - - !--- split precip between rain & snow --- - call shr_precip_partition_rain_snow_ramp(tbot, frac) - a2x%rAttr(ksc,n) = max(0.0_R8, a2x%rAttr(krc,n)*(1.0_R8 - frac) ) - a2x%rAttr(ksl,n) = max(0.0_R8, a2x%rAttr(krl,n)*(1.0_R8 - frac) ) - a2x%rAttr(krc,n) = max(0.0_R8, a2x%rAttr(krc,n)*( frac) ) - a2x%rAttr(krl,n) = max(0.0_R8, a2x%rAttr(krl,n)*( frac) ) - - enddo - - end select - - call t_stopf('datm_datamode') - - !---------------------------------------------------------- - ! bias correction / anomaly forcing ( start block ) - !---------------------------------------------------------- - - ! modify atmospheric input fields if streams exist - - lsize = mct_avect_lsize(avstrm) - - ! bias correct precipitation relative to observed - ! (via bias_correct nameslist option) - if (sprecsf > 0) then - do n = 1,lsize - a2x%rAttr(ksc,n) = a2x%rAttr(ksc,n)*min(1.e2_r8,avstrm%rAttr(sprecsf,n)) - a2x%rAttr(ksl,n) = a2x%rAttr(ksl,n)*min(1.e2_r8,avstrm%rAttr(sprecsf,n)) - a2x%rAttr(krc,n) = a2x%rAttr(krc,n)*min(1.e2_r8,avstrm%rAttr(sprecsf,n)) - a2x%rAttr(krl,n) = a2x%rAttr(krl,n)*min(1.e2_r8,avstrm%rAttr(sprecsf,n)) - - end do - endif - - ! adjust atmospheric input fields if anomaly forcing streams exist - ! (via anomaly_forcing namelist option) - - ! wind - if (su_af > 0 .and. sv_af > 0) then - do n = 1,lsize - a2x%rAttr(ku,n) = a2x%rAttr(ku,n) + avstrm%rAttr(su_af,n) - a2x%rAttr(kv,n) = a2x%rAttr(kv,n) + avstrm%rAttr(sv_af,n) - end do - endif - - ! specific humidity - if (sshum_af > 0) then - do n = 1,lsize - a2x%rAttr(kshum,n) = a2x%rAttr(kshum,n) + avstrm%rAttr(sshum_af,n) - - ! avoid possible negative q values - if(a2x%rAttr(kshum,n) < 0._r8) then - a2x%rAttr(kshum,n) = 1.e-6_r8 - endif - - end do - endif - - ! pressure - if (spbot_af > 0) then - do n = 1,lsize - a2x%rAttr(kpbot,n) = a2x%rAttr(kpbot,n) + avstrm%rAttr(spbot_af,n) - end do - endif - - ! temperature - if (stbot_af > 0) then - do n = 1,lsize - a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + avstrm%rAttr(stbot_af,n) - end do - endif - - ! longwave - if (slwdn_af > 0) then - do n = 1,lsize - a2x%rAttr(klwdn,n) = a2x%rAttr(klwdn,n)*avstrm%rAttr(slwdn_af,n) - end do - endif - - ! precipitation - if (sprec_af > 0) then - do n = 1,lsize - a2x%rAttr(ksc,n) = a2x%rAttr(ksc,n)*avstrm%rAttr(sprec_af,n) - a2x%rAttr(ksl,n) = a2x%rAttr(ksl,n)*avstrm%rAttr(sprec_af,n) - a2x%rAttr(krc,n) = a2x%rAttr(krc,n)*avstrm%rAttr(sprec_af,n) - a2x%rAttr(krl,n) = a2x%rAttr(krl,n)*avstrm%rAttr(sprec_af,n) - enddo - endif - ! shortwave - if (sswdn_af > 0) then - do n = 1,lsize - a2x%rAttr(kswndr,n) = a2x%rAttr(kswndr,n)*avstrm%rAttr(sswdn_af,n) - a2x%rAttr(kswvdr,n) = a2x%rAttr(kswvdr,n)*avstrm%rAttr(sswdn_af,n) - a2x%rAttr(kswndf,n) = a2x%rAttr(kswndf,n)*avstrm%rAttr(sswdn_af,n) - a2x%rAttr(kswvdf,n) = a2x%rAttr(kswvdf,n)*avstrm%rAttr(sswdn_af,n) - enddo - endif - !---------------------------------------------------------- - ! bias correction / anomaly forcing ( end block ) - !---------------------------------------------------------- - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('datm_restart') - call shr_cal_ymdtod2string(date_str, yy,mm,dd,currentTOD) - - write(rest_file,"(6a)") & - trim(case_name), '.datm',trim(inst_suffix),'.r.', trim(date_str), '.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.datm',trim(inst_suffix),'.rs1.', trim(date_str), '.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),currentYMD,currentTOD - call shr_strdata_restWrite(trim(rest_file_strm),SDATM,mpicom,trim(case_name),'SDATM strdata') - call shr_sys_flush(logunit) - call t_stopf('datm_restart') - endif - - call t_stopf('datm') - - !---------------------------------------------------------------------------- - ! Log output for model date - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call t_startf('datm_run2') - if (my_task == master_task) then - write(logunit,F04) trim(myModelName),': model date ', CurrentYMD,CurrentTOD - call shr_sys_flush(logunit) - end if - - firstcall = .false. - call t_stopf('datm_run2') - - call t_stopf('DATM_RUN') - - end subroutine datm_comp_run - - !=============================================================================== - subroutine datm_comp_final(my_task, master_task, logunit) - - ! !DESCRIPTION: finalize method for datm model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - - !--- formats --- - character(*), parameter :: F00 = "('(datm_comp_final) ',8a)" - character(*), parameter :: F91 = "('(datm_comp_final) ',73('-'))" - character(*), parameter :: subName = "(datm_comp_final) " - !------------------------------------------------------------------------------- - - call t_startf('DATM_FINAL') - - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) trim(myModelName),': end of main integration loop' - write(logunit,F91) - end if - - call t_stopf('DATM_FINAL') - - end subroutine datm_comp_final - !=============================================================================== - -end module datm_comp_mod diff --git a/src/components/data_comps/datm/mct/datm_shr_mod.F90 b/src/components/data_comps/datm/mct/datm_shr_mod.F90 deleted file mode 100644 index 720a2840a98..00000000000 --- a/src/components/data_comps/datm/mct/datm_shr_mod.F90 +++ /dev/null @@ -1,614 +0,0 @@ -module datm_shr_mod - - ! !USES: - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_const_mod , only : SHR_CONST_CDAY,SHR_CONST_TKFRZ,SHR_CONST_SPVAL - use shr_file_mod , only : shr_file_getlogunit, shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_readnml - use shr_dmodel_mod , only : shr_dmodel_mapset - use shr_cal_mod , only : shr_cal_date2julian - use shr_ncread_mod , only : shr_ncread_varExists, shr_ncread_varDimSizes, shr_ncread_field4dG - use shr_strdata_mod, only : shr_strdata_type - use mct_mod - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: datm_shr_getNextRadCDay - public :: datm_shr_CORE2getFactors - public :: datm_shr_TN460getFactors - public :: datm_shr_eSat - public :: datm_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! input namelist variables - character(CL) , public :: decomp ! decomp strategy - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - character(CL) , public :: bias_correct ! true => send bias correction fields to coupler - character(CL) , public :: anomaly_forcing(8) ! true => send anomaly forcing fields to coupler - logical , public :: force_prognostic_true ! if true set prognostic true - logical , public :: wiso_datm = .false. ! expect isotopic forcing from file? - integer(IN) , public :: iradsw ! radiation interval - character(CL) , public :: factorFn ! file containing correction factors - logical , public :: presaero ! true => send valid prescribe aero fields to coupler - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine datm_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDATM, atm_present, atm_prognostic) - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer , intent(in) :: inst_index ! number of current instance (ie. 1) - character(len=16) , intent(in) :: inst_suffix ! char string associated with instance - character(len=16) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - integer(IN) , intent(in) :: shrlogunit ! original log unit and level - type(shr_strdata_type) , intent(inout) :: SDATM - logical , intent(out) :: atm_present ! flag - logical , intent(out) :: atm_prognostic ! flag - - !--- local variables --- - character(CL) :: fileName ! generic file name - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - - !--- formats --- - character(*), parameter :: F00 = "('(datm_comp_init) ',8a)" - character(*), parameter :: F0L = "('(datm_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(datm_comp_init) ',a,5i8)" - character(*), parameter :: subName = "(shr_datm_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / datm_nml / & - decomp, iradsw, factorFn, restfilm, restfils, presaero, bias_correct, & - anomaly_forcing, force_prognostic_true, wiso_datm - - !---------------------------------------------------------------------------- - ! Determine input filenamname - !---------------------------------------------------------------------------- - - filename = "datm_in"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! Read datm_in - !---------------------------------------------------------------------------- - - decomp = "1d" - iradsw = 0 - factorFn = 'null' - restfilm = trim(nullstr) - restfils = trim(nullstr) - presaero = .false. - force_prognostic_true = .false. - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=datm_nml,iostat=ierr) - close(nunit) - - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' decomp = ',trim(decomp) - write(logunit,F01)' iradsw = ',iradsw - write(logunit,F00)' factorFn = ',trim(factorFn) - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' presaero = ',presaero - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - write(logunit,F0L)' wiso_datm = ', wiso_datm - write(logunit,F01) 'inst_index = ',inst_index - write(logunit,F00) 'inst_name = ',trim(inst_name) - write(logunit,F00) 'inst_suffix = ',trim(inst_suffix) - call shr_sys_flush(logunit) - endif - call shr_mpi_bcast(decomp,mpicom,'decomp') - call shr_mpi_bcast(iradsw,mpicom,'iradsw') - call shr_mpi_bcast(factorFn,mpicom,'factorFn') - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(presaero,mpicom,'presaero') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - call shr_mpi_bcast(wiso_datm, mpicom, 'wiso_datm') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDATM, trim(filename), mpicom=mpicom) - call shr_sys_flush(shrlogunit) - - ! Validate mode - - datamode = trim(SDATM%dataMode) - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'CORE2_NYF' .or. & - trim(datamode) == 'CORE2_IAF' .or. & - trim(datamode) == 'CORE_IAF_JRA' .or. & - trim(datamode) == 'CLMNCEP' .or. & - trim(datamode) == 'COPYALL' ) then - if (my_task == master_task) then - write(logunit,F00) ' datm datamode = ',trim(datamode) - call shr_sys_flush(logunit) - end if - else - write(logunit,F00) ' ERROR illegal datm datamode = ',trim(datamode) - call shr_sys_abort() - endif - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flag - !---------------------------------------------------------------------------- - - atm_present = .false. - atm_prognostic = .false. - if (force_prognostic_true) then - atm_present = .true. - atm_prognostic = .true. - endif - if (trim(datamode) /= 'NULL') then - atm_present = .true. - end if - - end subroutine datm_shr_read_namelists - - !=============================================================================== - real(R8) function datm_shr_getNextRadCDay( ymd, tod, stepno, dtime, iradsw, calendar ) - - ! !DESCRIPTION: - ! Return the calendar day of the next radiation time-step. - ! General Usage: nextswday = datm_shr_getNextRadCDay(curr_date) - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN), intent(IN) :: ymd - integer(IN), intent(IN) :: tod - integer(IN), intent(IN) :: stepno - integer(IN), intent(IN) :: dtime - integer(IN), intent(IN) :: iradsw - character(*),intent(in) :: calendar - - !----- local ----- - real(R8) :: nextsw_cday - real(R8) :: julday - integer :: liradsw - character(*),parameter :: subName = '(datm_shr_getNextRadCDay) ' - !------------------------------------------------------------------------------- - - liradsw = iradsw - if (liradsw < 0) liradsw = nint((-liradsw *3600._r8)/dtime) - - call shr_cal_date2julian(ymd,tod,julday,calendar) - - if (liradsw > 1) then - if (mod(stepno+1,liradsw) == 0 .and. stepno > 0) then - nextsw_cday = julday + 2*dtime/SHR_CONST_CDAY - else - nextsw_cday = -1._r8 - end if - else - nextsw_cday = julday + dtime/SHR_CONST_CDAY - end if - datm_shr_getNextRadCDay = nextsw_cday - - end function datm_shr_getNextRadCDay - - !=============================================================================== - subroutine datm_shr_CORE2getFactors(fileName,windF,winddF,qsatF,mpicom,compid, & - gsmap,ggrid,nxg,nyg) - - implicit none - - !--- arguments --- - character(*) ,intent(in) :: fileName ! file name string - real(R8) ,intent(inout) :: windF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: winddF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: qsatF(:) ! rel humidty adjustment factors - integer(IN) ,intent(in) :: mpicom ! mpi comm - integer(IN) ,intent(in) :: compid ! mct compid - type(mct_gsmap) ,intent(in) :: gsmap ! decomp of wind,windd,qsat - type(mct_ggrid) ,intent(in) :: ggrid ! ggrid of grid info - integer(IN) ,intent(in) :: nxg ! size of input grid - integer(IN) ,intent(in) :: nyg ! size of input grid - - !--- local --- - integer(IN) :: my_task,logunit,ier - - !--- formats --- - character(*),parameter :: subName = '(datm_shr_CORE2getFactors) ' - character(*),parameter :: F00 = "('(datm_shr_CORE2getFactors) ',4a) " - !------------------------------------------------------------------------------- - - call MPI_COMM_RANK(mpicom,my_task,ier) - call shr_file_getLogUnit(logunit) - - if (my_task == 0) then - - !--- verify necessary data is in input file --- - if ( .not. shr_ncread_varExists(fileName,'lat' ) & - .or. .not. shr_ncread_varExists(fileName,'lon' ) & - .or. .not. shr_ncread_varExists(fileName,'mask' ) & - .or. .not. shr_ncread_varExists(fileName,'windFactor') & - .or. .not. shr_ncread_varExists(fileName,'winddFactor') & - .or. .not. shr_ncread_varExists(fileName,'qsatFactor') ) then - write(logunit,F00) "ERROR: invalid correction factor data file" - call shr_sys_abort(subName//"invalid correction factor data file") - end if - endif - - call datm_shr_getFactors(fileName,windF,winddF,qsatF,mpicom,compid, & - gsmap,ggrid,nxg,nyg) - - end subroutine datm_shr_CORE2getFactors - - !=============================================================================== - subroutine datm_shr_TN460getFactors(fileName,windF,qsatF,mpicom,compid, & - gsmap,ggrid,nxg,nyg) - - implicit none - - !--- arguments --- - character(*) ,intent(in) :: fileName ! file name string - real(R8) ,intent(inout) :: windF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: qsatF(:) ! rel humidty adjustment factors - integer(IN) ,intent(in) :: mpicom ! mpi comm - integer(IN) ,intent(in) :: compid ! mct compid - type(mct_gsmap) ,intent(in) :: gsmap ! decomp of wind,windd,qsat - type(mct_ggrid) ,intent(in) :: ggrid ! ggrid of grid info - integer(IN) ,intent(in) :: nxg ! size of input grid - integer(IN) ,intent(in) :: nyg ! size of input grid - - !--- local --- - integer(IN) :: my_task,logunit,ier - real(R8),pointer :: winddF(:) ! wind adjustment factor - - !--- formats --- - character(*),parameter :: subName = '(datm_shr_TN460getFactors) ' - character(*),parameter :: F00 = "('(datm_shr_TN460getFactors) ',4a) " - - !------------------------------------------------------------------------------- - - call MPI_COMM_RANK(mpicom,my_task,ier) - call shr_file_getLogUnit(logunit) - - if (my_task == 0) then - - !--- verify necessary data is in input file --- - if ( .not. shr_ncread_varExists(fileName,'lat' ) & - .or. .not. shr_ncread_varExists(fileName,'lon' ) & - .or. .not. shr_ncread_varExists(fileName,'mask' ) & - .or. .not. shr_ncread_varExists(fileName,'windFactor') & - .or. .not. shr_ncread_varExists(fileName,'qsatFactor') ) then - write(logunit,F00) "ERROR: invalid correction factor data file" - call shr_sys_abort(subName//"invalid correction factor data file") - end if - endif - - call datm_shr_getFactors(fileName,windF,winddF,qsatF,mpicom,compid, & - gsmap,ggrid,nxg,nyg) - - end subroutine datm_shr_TN460getFactors - - !=============================================================================== - subroutine datm_shr_getFactors(fileName,windF,winddF,qsatF,mpicom,compid, & - gsmapo,ggrido,nxgo,nygo) - - use shr_map_mod - - implicit none - - !--- arguments --- - character(*) ,intent(in) :: fileName ! file name string - real(R8) ,intent(inout) :: windF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: winddF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: qsatF(:) ! rel humidty adjustment factors - integer(IN) ,intent(in) :: mpicom ! mpi comm - integer(IN) ,intent(in) :: compid ! mct compid - type(mct_gsmap) ,intent(in) :: gsmapo ! decomp of wind,windd,qsat - type(mct_ggrid) ,intent(in) :: ggrido ! ggrid of grid info - integer(IN) ,intent(in) :: nxgo ! size of input grid - integer(IN) ,intent(in) :: nygo ! size of input grid - - !--- data that describes the local model domain --- - integer(IN) :: ni0,nj0 ! dimensions of global bundle0 - integer(IN) :: i,j,n ! generic indicies - integer(IN) :: my_task ! local pe number - integer(IN) :: ier ! error code - integer(IN) :: logunit ! logging unit - type(mct_ggrid) :: ggridi ! input file grid - type(mct_ggrid) :: ggridoG ! output grid gathered - type(mct_gsmap) :: gsmapi ! input file gsmap - type(mct_sMatp) :: smatp ! sparse matrix weights - type(mct_avect) :: avi ! input attr vect - type(mct_avect) :: avo ! output attr vect - integer(IN) :: lsizei ! local size of input - integer(IN) :: lsizeo ! local size of output - integer(IN),pointer :: start(:) ! start list - integer(IN),pointer :: length(:) ! length list - integer(IN) :: gsizei ! input global size - integer(IN) :: numel ! number of elements in start list - real(R8) :: dadd ! lon correction - logical :: domap ! map or not - integer(IN) :: klon,klat ! lon lat fld index - - !--- temp arrays for data input --- - real(R8) ,allocatable :: tempR4D(:,:,:,:) ! 4D data array - real(R8) ,pointer :: tempR1D(:) ! 1D data array - integer(IN),allocatable :: tempI4D(:,:,:,:) ! 4D data array - - !--- formats --- - character(*),parameter :: subName = '(datm_shr_getFactors) ' - character(*),parameter :: F00 = "('(datm_shr_getFactors) ',4a) " - character(*),parameter :: F01 = "('(datm_shr_getFactors) ',a,2i5)" - character(*),parameter :: F02 = "('(datm_shr_getFactors) ',a,6e12.3)" - - !------------------------------------------------------------------------------- - ! Note: gsmapi is all gridcells on root pe - !------------------------------------------------------------------------------- - - call MPI_COMM_RANK(mpicom,my_task,ier) - call shr_file_getLogUnit(logunit) - - ni0 = 0 - nj0 = 0 - allocate(start(1),length(1)) - start = 0 - length = 0 - numel = 0 - - !---------------------------------------------------------------------------- - ! read in and map global correction factors - !---------------------------------------------------------------------------- - if (my_task == 0) then - - !--- verify necessary data is in input file --- - if ( .not. shr_ncread_varExists(fileName,'lat' ) & - .or. .not. shr_ncread_varExists(fileName,'lon' ) & - .or. .not. shr_ncread_varExists(fileName,'mask' ) & - .or. .not. shr_ncread_varExists(fileName,'windFactor') ) then - write(logunit,F00) "ERROR: invalid correction factor data file" - call shr_sys_abort(subName//"invalid correction factor data file") - end if - call shr_ncread_varDimSizes(fileName,"windFactor",ni0,nj0) - start = 1 - length = ni0*nj0 - numel = 1 - endif - call shr_mpi_bcast(ni0,mpicom,subname//' ni0') - call shr_mpi_bcast(nj0,mpicom,subname//' nj0') - gsizei = ni0*nj0 - - !--- allocate datatypes for input data --- - call mct_gsmap_init(gsmapi,start,length,0,mpicom,compid,gsize=gsizei,numel=numel) - deallocate(start,length) - lsizei = mct_gsmap_lsize(gsmapi,mpicom) - lsizeo = mct_gsmap_lsize(gsmapo,mpicom) - call mct_gGrid_init(GGrid=gGridi, & - CoordChars='lat:lon:hgt', OtherChars='area:aream:mask:frac', lsize=lsizei ) - call mct_aVect_init(avi,rList="wind:windd:qsat",lsize=lsizei) - avi%rAttr = SHR_CONST_SPVAL - - !--- gather output grid for map logic --- - call mct_ggrid_gather(ggrido, ggridoG, gsmapo, 0, mpicom) - - if (my_task == 0) then - allocate(tempR1D(ni0*nj0)) - - !--- read domain data: lon --- - allocate(tempR4D(ni0,1,1,1)) - call shr_ncread_field4dG(fileName,'lon' ,rfld=tempR4D) - !--- needs to be monotonically increasing, add 360 at wraparound+ --- - dadd = 0.0_R8 - do i = 2,ni0 - if (tempR4D(i-1,1,1,1) > tempR4D(i,1,1,1)) dadd = 360.0_R8 - tempR4D(i,1,1,1) = tempR4D(i,1,1,1) + dadd - enddo - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(i,1,1,1) - end do - end do - deallocate(tempR4D) - call mct_gGrid_importRattr(gGridi,'lon',tempR1D,lsizei) - - !--- read domain data: lat --- - allocate(tempR4D(nj0,1,1,1)) - call shr_ncread_field4dG(fileName,'lat' ,rfld=tempR4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(j,1,1,1) - end do - end do - deallocate(tempR4D) - call mct_gGrid_importRattr(gGridi,'lat',tempR1D,lsizei) - - !--- read domain mask--- - allocate(tempI4D(ni0,nj0,1,1)) - call shr_ncread_field4dG(fileName,'mask',ifld=tempI4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = real(tempI4D(i,j,1,1),R8) - end do - end do - deallocate(tempI4D) - call mct_gGrid_importRattr(gGridi,'mask',tempR1D,lsizei) - - !--- read bundle data: wind factor --- - allocate(tempR4D(ni0,nj0,1,1)) - if (shr_ncread_varExists(fileName,'windFactor') ) then - call shr_ncread_field4dG(fileName,'windFactor',rfld=tempR4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(i,j,1,1) - end do - end do - call mct_aVect_importRattr(avi,'wind',tempR1D,lsizei) - endif - - !--- read bundle data: windd factor --- - if (shr_ncread_varExists(fileName,'winddFactor') ) then - call shr_ncread_field4dG(fileName,'winddFactor',rfld=tempR4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(i,j,1,1) - end do - end do - call mct_aVect_importRattr(avi,'windd',tempR1D,lsizei) - endif - - !--- read bundle data: qsat factor --- - if (shr_ncread_varExists(fileName,'qsatFactor') ) then - call shr_ncread_field4dG(fileName,'qsatFactor',rfld=tempR4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(i,j,1,1) - end do - end do - call mct_aVect_importRattr(avi,'qsat',tempR1D,lsizei) - endif - - deallocate(tempR4D) - deallocate(tempR1D) - - domap = .false. - if (ni0 /= nxgo .or. nj0 /= nygo) then - domap = .true. - else - klon = mct_aVect_indexRA(ggridi%data,'lon') - klat = mct_aVect_indexRA(ggrido%data,'lat') - do n = 1,lsizei - if (abs(ggridi%data%rAttr(klon,n)-ggridoG%data%rAttr(klon,n)) > 0.01_R8) domap=.true. - if (abs(ggridi%data%rAttr(klat,n)-ggridoG%data%rAttr(klat,n)) > 0.01_R8) domap=.true. - enddo - endif - - call mct_gGrid_clean(ggridoG) - - endif - - call shr_mpi_bcast(domap,mpicom,subname//' domap') - - if (domap) then - call shr_dmodel_mapSet(smatp,ggridi,gsmapi,ni0 ,nj0 , & - ggrido,gsmapo,nxgo,nygo, & - 'datmfactor',shr_map_fs_remap,shr_map_fs_bilinear,shr_map_fs_srcmask, & - shr_map_fs_scalar,compid,mpicom,'Xonly') - call mct_aVect_init(avo,avi,lsizeo) - call mct_sMat_avMult(avi,smatp,avo) - call mct_sMatP_clean(smatp) - else - call mct_aVect_scatter(avi,avo,gsmapo,0,mpicom) - endif - - !--- fill the interface arrays, only if they are the right size --- - allocate(tempR1D(lsizeo)) - if (size(windF ) >= lsizeo) then - call mct_aVect_exportRattr(avo,'wind' ,tempR1D,lsizeo) - windF = tempR1D - endif - if (size(winddF) >= lsizeo) then - call mct_aVect_exportRattr(avo,'windd',tempR1D,lsizeo) - winddF = tempR1D - endif - if (size(qsatF ) >= lsizeo) then - call mct_aVect_exportRattr(avo,'qsat' ,tempR1D,lsizeo) - qsatF = tempR1D - endif - deallocate(tempR1D) - - call mct_aVect_clean(avi) - call mct_aVect_clean(avo) - call mct_gGrid_clean(ggridi) - call mct_gsmap_clean(gsmapi) - - end subroutine datm_shr_getFactors - - !=============================================================================== - real(R8) function datm_shr_eSat(tK,tKbot) - - !--- arguments --- - real(R8),intent(in) :: tK ! temp used in polynomial calculation - real(R8),intent(in) :: tKbot ! bottom atm temp - - !--- local --- - real(R8) :: t ! tK converted to Celcius - real(R8),parameter :: tkFrz = SHR_CONST_TKFRZ ! freezing T of fresh water ~ K - - !--- coefficients for esat over water --- - real(R8),parameter :: a0=6.107799961_R8 - real(R8),parameter :: a1=4.436518521e-01_R8 - real(R8),parameter :: a2=1.428945805e-02_R8 - real(R8),parameter :: a3=2.650648471e-04_R8 - real(R8),parameter :: a4=3.031240396e-06_R8 - real(R8),parameter :: a5=2.034080948e-08_R8 - real(R8),parameter :: a6=6.136820929e-11_R8 - - !--- coefficients for esat over ice --- - real(R8),parameter :: b0=6.109177956_R8 - real(R8),parameter :: b1=5.034698970e-01_R8 - real(R8),parameter :: b2=1.886013408e-02_R8 - real(R8),parameter :: b3=4.176223716e-04_R8 - real(R8),parameter :: b4=5.824720280e-06_R8 - real(R8),parameter :: b5=4.838803174e-08_R8 - real(R8),parameter :: b6=1.838826904e-10_R8 - - !---------------------------------------------------------------------------- - ! use polynomials to calculate saturation vapor pressure and derivative with - ! respect to temperature: over water when t > 0 c and over ice when t <= 0 c - ! required to convert relative humidity to specific humidity - !---------------------------------------------------------------------------- - - t = min( 50.0_R8, max(-50.0_R8,(tK-tKfrz)) ) - if ( tKbot < tKfrz) then - datm_shr_eSat = 100.0_R8*(b0+t*(b1+t*(b2+t*(b3+t*(b4+t*(b5+t*b6)))))) - else - datm_shr_eSat = 100.0_R8*(a0+t*(a1+t*(a2+t*(a3+t*(a4+t*(a5+t*a6)))))) - end if - - end function datm_shr_eSat - - !=============================================================================== - !=============================================================================== - -end module datm_shr_mod diff --git a/src/components/data_comps/datm/nuopc/atm_comp_nuopc.F90 b/src/components/data_comps/datm/nuopc/atm_comp_nuopc.F90 deleted file mode 100644 index 0917c7b9027..00000000000 --- a/src/components/data_comps/datm/nuopc/atm_comp_nuopc.F90 +++ /dev/null @@ -1,690 +0,0 @@ -module atm_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for DATM - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_cal_mod , only : shr_cal_noleap, shr_cal_gregorian, shr_cal_ymd2date - use shr_const_mod , only : SHR_CONST_SPVAL - use shr_sys_mod , only : shr_sys_abort - use dshr_nuopc_mod , only : fld_list_type, fldsMax, dshr_realize - use dshr_nuopc_mod , only : ModelInitPhase, ModelSetRunClock, ModelSetMetaData - use dshr_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dshr_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use datm_shr_mod , only : datm_shr_read_namelists, iradsw, datm_shr_getNextRadCDay - use datm_comp_mod , only : datm_comp_advertise, datm_comp_init, datm_comp_run - use datm_comp_mod , only : datm_comp_import, datm_comp_export - use perf_mod , only : t_startf, t_stopf, t_barrierf - - implicit none - private ! except - - public :: SetServices - - private :: InitializeAdvertise - private :: InitializeRealize - private :: ModelAdvance - private :: ModelFinalize - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=128) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - integer :: flds_scalar_index_nextsw_cday = 0 - - integer :: fldsToAtm_num = 0 - integer :: fldsFrAtm_num = 0 - type (fld_list_type) :: fldsToAtm(fldsMax) - type (fld_list_type) :: fldsFrAtm(fldsMax) - - integer :: compid ! mct comp id - integer :: mpicom ! mpi communicator - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - character(len=CL) :: case_name ! case name - character(len=CS) :: calendar ! calendar name - logical :: atm_prognostic ! data is sent back to datm - logical :: use_esmf_metadata = .false. - character(*),parameter :: modName = "(atm_comp_nuopc)" - integer, parameter :: debug = 0 ! if > 0 will diagnose export fields - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p1"/), userRoutine=InitializeAdvertise, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p3"/), userRoutine=InitializeRealize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, & - specRoutine=ModelAdvance, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, & - specRoutine=ModelSetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, & - specRoutine=ModelFinalize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - integer :: lmpicom - character(len=CL) :: cvalue - integer :: n - integer :: ierr ! error code - integer :: shrlogunit ! original log unit - integer :: localPet - logical :: flds_co2a ! use case - logical :: flds_co2b ! use case - logical :: flds_co2c ! use case - logical :: flds_wiso ! use case - character(len=CL) :: fileName ! generic file name - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! generate local mpi comm - !---------------------------------------------------------------------------- - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, mpiCommunicator=lmpicom, localPet=localPet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call mpi_comm_dup(lmpicom, mpicom, ierr) - call mpi_comm_rank(mpicom, my_task, ierr) - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - inst_name = "ATM"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Read input namelists and set present and prognostic flags - !---------------------------------------------------------------------------- - - filename = "datm_in"//trim(inst_suffix) - call datm_shr_read_namelists(filename, mpicom, my_task, master_task, logunit, atm_prognostic) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxNextSwCday", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nextsw_cday - write(logmsg,*) flds_scalar_index_nextsw_cday - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nextsw_cday = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxNextSwCday') - endif - - call NUOPC_CompAttributeGet(gcomp, name='flds_co2a', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) flds_co2a - call ESMF_LogWrite('flds_co2a = '// trim(cvalue), ESMF_LOGMSG_INFO) - - call NUOPC_CompAttributeGet(gcomp, name='flds_co2b', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) flds_co2b - call ESMF_LogWrite('flds_co2b = '// trim(cvalue), ESMF_LOGMSG_INFO) - - call NUOPC_CompAttributeGet(gcomp, name='flds_co2c', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) flds_co2c - call ESMF_LogWrite('flds_co2c = '// trim(cvalue), ESMF_LOGMSG_INFO) - - call NUOPC_CompAttributeGet(gcomp, name='flds_wiso', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) flds_wiso - call ESMF_LogWrite('flds_wiso = '// trim(cvalue), ESMF_LOGMSG_INFO) - - call datm_comp_advertise(importState, exportState, flds_scalar_name, & - atm_prognostic, flds_wiso, flds_co2a, flds_co2b, flds_co2c, & - fldsFrAtm_num, fldsFrAtm, fldsToAtm_num, fldsToAtm, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_Mesh) :: Emesh - type(ESMF_TIME) :: currTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_Calendar) :: esmf_calendar ! esmf calendar - type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - integer :: current_ymd ! model date - integer :: current_year ! model year - integer :: current_mon ! model month - integer :: current_day ! model day - integer :: current_tod ! model sec into model date - integer(I8) :: stepno ! step number - integer :: modeldt ! integer timestep - real(R8) :: nextsw_cday ! calendar of next atm sw - character(len=256) :: cvalue ! character string for input config - integer :: shrlogunit ! original log unit - logical :: read_restart ! start from restart - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_spval ! single column lat - real(R8) :: scmLon = shr_const_spval ! single column lon - real(R8) :: orbEccen ! orb eccentricity (unit-less) - real(R8) :: orbMvelpp ! orb moving vernal eq (radians) - real(R8) :: orbLambm0 ! orb mean long of perhelion (radians) - real(R8) :: orbObliqr ! orb obliquity (radians) - integer :: nxg, nyg - character(len=*), parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - ! TODO: read_restart, scmlat, scmlon, orbeccen, orbmvelpp, orblambm0, orbobliqr needs to be obtained - ! from the config attributes of the gridded component - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! Determine necessary config variables - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='case_name', value=case_name, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompAttributeGet(gcomp, name='scmlon', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmlon - - call NUOPC_CompAttributeGet(gcomp, name='scmlat', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmlat - - call NUOPC_CompAttributeGet(gcomp, name='single_column', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmMode - - call NUOPC_CompAttributeGet(gcomp, name='read_restart', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) read_restart - - call NUOPC_CompAttributeGet(gcomp, name='MCTID', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) compid - - ! Determine orbital values (these might change dynamically) - call NUOPC_CompAttributeGet(gcomp, name='orb_eccen', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) orbEccen - call NUOPC_CompAttributeGet(gcomp, name='orb_obliqr', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) orbObliqr - call NUOPC_CompAttributeGet(gcomp, name='orb_lambm0', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) orbLambm0 - call NUOPC_CompAttributeGet(gcomp, name='orb_mvelpp', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) orbMvelpp - - !---------------------------------------------------------------------------- - ! Determine calendar info - !---------------------------------------------------------------------------- - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, advanceCount=stepno, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_TimeGet( currTime, yy=current_year, mm=current_mon, dd=current_day, s=current_tod, & - calkindflag=esmf_caltype, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(current_year, current_mon, current_day, current_ymd) - - if (esmf_caltype == ESMF_CALKIND_NOLEAP) then - calendar = shr_cal_noleap - else if (esmf_caltype == ESMF_CALKIND_GREGORIAN) then - calendar = shr_cal_gregorian - else - call ESMF_LogWrite(subname//" ERROR bad ESMF calendar name "//trim(calendar), ESMF_LOGMSG_ERROR) - rc = ESMF_Failure - return - end if - - call ESMF_TimeIntervalGet( timeStep, s=modeldt, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Set nextsw_cday - !---------------------------------------------------------------------------- - - nextsw_cday = datm_shr_getNextRadCDay( current_ymd, current_tod, stepno, modeldt, iradsw, calendar ) - - !-------------------------------- - ! Generate the mesh - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='mesh_atm', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - Emesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (my_task == master_task) then - write(logunit,*) " (datm_comp_nuopc): obtaining datm mesh from " // trim(cvalue) - end if - - !---------------------------------------------------------------------------- - ! Initialize model - !---------------------------------------------------------------------------- - - call datm_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon, & - orbEccen, orbMvelpp, orbLambm0, orbObliqr, & - calendar, modeldt, current_ymd, current_tod, current_mon, & - atm_prognostic, EMesh, nxg, nyg) - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call dshr_realize( & - state=ExportState, & - fldList=fldsFrAtm, & - numflds=fldsFrAtm_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':datmExport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_realize( & - state=importState, & - fldList=fldsToAtm, & - numflds=fldsToAtm_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':datmImport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - ! Set the coupling scalars - !-------------------------------- - - call datm_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg), flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(nextsw_cday, flds_scalar_index_nextsw_cday, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (debug > 0) then - call State_diagnose(exportState, subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - if (use_esmf_metadata) then - call ModelSetMetaData(gcomp, name='DATM', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: importState, exportState - type(ESMF_Time) :: time - type(ESMF_Alarm) :: alarm - type(ESMF_Time) :: currTime - type(ESMF_Time) :: nextTime - type(ESMF_TimeInterval) :: timeStep - integer :: shrlogunit ! original log unit - real(r8) :: nextsw_cday - logical :: write_restart ! restart alarm is ringing - integer :: nextymd ! model date - integer :: nexttod ! model sec into model date - integer :: yr ! year - integer :: mon ! month - integer :: day ! day in month - integer(I8) :: stepno ! step number - integer :: modeldt ! model timestep - real(R8) :: orbEccen ! orb eccentricity (unit-less) - real(R8) :: orbMvelpp ! orb moving vernal eq (radians) - real(R8) :: orbLambm0 ! orb mean long of perhelion (radians) - real(R8) :: orbObliqr ! orb obliquity (radians) - character(len=CL) :: cvalue - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call t_startf(subname) - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - call memcheck(subname, 5, my_task==master_task) - - !-------------------------------- - ! Reset shr logging to my log file - !-------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! query the Component for its clock, importState and exportState - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, importState=importState, exportState=exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Unpack import state - !-------------------------------- - - if (atm_prognostic) then - call datm_comp_import(importState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - !-------------------------------- - ! Run model - !-------------------------------- - - call t_startf('datm_get_attributes') - ! Get orbital parameters (these can be changed by the mediator) - ! TODO: need to put in capability for these to be modified for variable orbitals - call NUOPC_CompAttributeGet(gcomp, name='orb_eccen', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) orbEccen - call NUOPC_CompAttributeGet(gcomp, name='orb_obliqr', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) orbObliqr - call NUOPC_CompAttributeGet(gcomp, name='orb_lambm0', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) orbLambm0 - call NUOPC_CompAttributeGet(gcomp, name='orb_mvelpp', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) orbMvelpp - call t_stopf('datm_get_attributes') - - ! Determine if need to write restarts - - call t_startf('datm_get_clockinfo') - - call ESMF_ClockGetAlarm(clock, alarmname='alarm_restart', alarm=alarm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (ESMF_AlarmIsRinging(alarm, rc=rc)) then - if (ChkErr(rc,__LINE__,u_FILE_u)) return - write_restart = .true. - call ESMF_AlarmRingerOff( alarm, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - write_restart = .false. - endif - - ! For nuopc - the component clock is advanced at the end of the time interval - ! For these to match for now - need to advance nuopc one timestep ahead for - ! shr_strdata time interpolation - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, advanceCount=stepno, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - nextTime = currTime + timeStep - call ESMF_TimeGet( nextTime, yy=yr, mm=mon, dd=day, s=nexttod, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(yr, mon, day, nextymd) - - call ESMF_TimeIntervalGet( timeStep, s=modeldt, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call t_stopf('datm_get_clockinfo') - - ! Advance the model - - call t_startf('datm_run') - call datm_comp_run( mpicom, compid, my_task, master_task, & - inst_suffix, logunit, & - orbEccen, orbMvelpp, orbLambm0, orbObliqr, & - write_restart, nextYMD, nextTOD, mon, modeldt, calendar, & - atm_prognostic, case_name) - call t_stopf('datm_run') - - ! Use nextYMD and nextTOD here since since the component - clock is advance at the END of the time interval - nextsw_cday = datm_shr_getNextRadCDay( nextYMD, nextTOD, stepno, modeldt, iradsw, calendar ) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call t_startf('datm_export') - call datm_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(nextsw_cday, flds_scalar_index_nextsw_cday, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call t_stopf('datm_export') - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (debug > 0) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (my_task == master_task) then - call log_clock_advance(clock, 'DATM', logunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - end if - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - call t_stopf(subname) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(*), parameter :: F00 = "('(datm_comp_final) ',8a)" - character(*), parameter :: F91 = "('(datm_comp_final) ',73('-'))" - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) 'datm : end of main integration loop' - write(logunit,F91) - end if - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine ModelFinalize - -end module atm_comp_nuopc diff --git a/src/components/data_comps/datm/nuopc/datm_comp_mod.F90 b/src/components/data_comps/datm/nuopc/datm_comp_mod.F90 deleted file mode 100644 index df6d5876ff8..00000000000 --- a/src/components/data_comps/datm/nuopc/datm_comp_mod.F90 +++ /dev/null @@ -1,1643 +0,0 @@ -module datm_comp_mod - - use NUOPC , only : NUOPC_Advertise - use ESMF , only : ESMF_State, ESMF_SUCCESS, ESMF_State - use ESMF , only : ESMF_Mesh, ESMF_DistGrid, ESMF_MeshGet, ESMF_DistGridGet - use perf_mod , only : t_startf, t_stopf, t_adj_detailf, t_barrierf - use mct_mod , only : mct_gsmap_init - use mct_mod , only : mct_avect, mct_avect_indexRA, mct_avect_zero, mct_aVect_nRattr - use mct_mod , only : mct_avect_init, mct_avect_lsize - use shr_kind_mod , only : r8=>shr_kind_r8, cxx=>shr_kind_cxx, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_const_mod , only : SHR_CONST_SPVAL - use shr_const_mod , only : SHR_CONST_TKFRZ - use shr_const_mod , only : SHR_CONST_PI - use shr_const_mod , only : SHR_CONST_PSTD - use shr_const_mod , only : SHR_CONST_STEBOL - use shr_const_mod , only : SHR_CONST_RDAIR - use shr_string_mod , only : shr_string_listGetName - use shr_sys_mod , only : shr_sys_abort - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_cal_mod , only : shr_cal_calendarname - use shr_cal_mod , only : shr_cal_date2julian, shr_cal_datetod2string - use shr_mpi_mod , only : shr_mpi_bcast, shr_mpi_max - use shr_precip_mod , only : shr_precip_partition_rain_snow_ramp - use shr_strdata_mod , only : shr_strdata_init_model_domain - use shr_strdata_mod , only : shr_strdata_init_streams - use shr_strdata_mod , only : shr_strdata_init_mapping - use shr_strdata_mod , only : shr_strdata_type, shr_strdata_pioinit - use shr_strdata_mod , only : shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only : shr_strdata_advance, shr_strdata_restWrite - use shr_strdata_mod , only : shr_strdata_setorbs - use shr_dmodel_mod , only : shr_dmodel_translate_list, shr_dmodel_translateAV_list - use dshr_methods_mod , only : ChkErr - use dshr_nuopc_mod , only : fld_list_type, dshr_fld_add, dshr_export, dshr_import - use datm_shr_mod , only : datm_shr_esat, datm_shr_CORE2getFactors - use datm_shr_mod , only : datamode ! namelist input - use datm_shr_mod , only : wiso_datm ! namelist input - use datm_shr_mod , only : rest_file ! namelist input - use datm_shr_mod , only : rest_file_strm ! namelist input - use datm_shr_mod , only : factorfn ! namelist input - use datm_shr_mod , only : iradsw ! namelist input - use datm_shr_mod , only : nullstr - use datm_shr_mod , only : presaero - use datm_shr_mod , only : SDATM - - ! !PUBLIC TYPES: - - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: datm_comp_advertise - public :: datm_comp_init - public :: datm_comp_run - public :: datm_comp_import - public :: datm_comp_export - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - type(mct_aVect) :: x2a - type(mct_aVect) :: a2x - character(CXX) :: flds_a2x = '' - character(CXX) :: flds_x2a = '' - - integer :: debug_import = 0 ! debug level (if > 0 will print all import fields) - integer :: debug_export = 0 ! debug level (if > 0 will print all export fields) - - real(R8) :: tbotmax ! units detector - real(R8) :: tdewmax ! units detector - real(R8) :: anidrmax ! existance detector - - ! Attribute vectors field indices - integer :: kz,ktopo,ku,kv,ktbot,kptem,kshum,kdens,kpbot,kpslv,klwdn - integer :: krc,krl,ksc,ksl,kswndr,kswndf,kswvdr,kswvdf,kswnet - integer :: kanidr,kanidf,kavsdr,kavsdf - integer :: kshum_16O, kshum_18O, kshum_HDO - integer :: krc_16O, krc_18O, krc_HDO - integer :: krl_16O, krl_18O, krl_HDO - integer :: ksc_16O, ksc_18O, ksc_HDO - integer :: ksl_16O, ksl_18O, ksl_HDO - integer :: stbot,swind,sz,spbot,sshum,stdew,srh,slwdn,sswdn,sswdndf,sswdndr - integer :: sprecc,sprecl,sprecn,sco2p,sco2d,sswup,sprec,starcf - integer :: srh_16O, srh_18O, srh_HDO, sprecn_16O, sprecn_18O, sprecn_HDO - integer :: sprecsf - integer :: sprec_af,su_af,sv_af,stbot_af,sshum_af,spbot_af,slwdn_af,sswdn_af - integer :: kbcphidry, kbcphodry, kbcphiwet - integer :: kocphidry, kocphodry, kocphiwet - integer :: kdstdry1, kdstdry2, kdstdry3, kdstdry4 - integer :: kdstwet1, kdstwet2, kdstwet3, kdstwet4 - - type(mct_avect) :: avstrm ! av of data from stream - character(len=CS), pointer :: avifld(:) ! character array for field names coming from streams - character(len=CS), pointer :: avofld(:) ! character array for field names to be sent/received from mediator - character(len=CS), pointer :: stifld(:) ! character array for field names coming from streams - character(len=CS), pointer :: stofld(:) ! character array for field intermediate avs for calculations - character(len=CL), pointer :: ilist_av(:) ! input character array for translation (avifld->avofld) - character(len=CL), pointer :: olist_av(:) ! output character array for translation (avifld->avofld) - integer , pointer :: count_av(:) ! number of fields in translation (avifld->avofld) - character(len=CL), pointer :: ilist_st(:) ! input character array for translation (stifld->strmofld) - character(len=CL), pointer :: olist_st(:) ! output character array for translation (stifld->strmofld) - integer , pointer :: count_st(:) ! number of fields in translation (stifld->strmofld) - character(len=CXX) :: flds_strm = '' ! colon deliminated string of field names - - real(R8), pointer :: xc(:), yc(:) ! arrays of model latitudes and longitudes - real(R8), pointer :: windFactor(:) - real(R8), pointer :: winddFactor(:) - real(R8), pointer :: qsatFactor(:) - - real(R8),parameter :: tKFrz = SHR_CONST_TKFRZ - real(R8),parameter :: degtorad = SHR_CONST_PI/180.0_R8 - real(R8),parameter :: pstd = SHR_CONST_PSTD ! standard pressure ~ Pa - real(R8),parameter :: stebol = SHR_CONST_STEBOL ! Stefan-Boltzmann constant ~ W/m^2/K^4 - real(R8),parameter :: rdair = SHR_CONST_RDAIR ! dry air gas constant ~ J/K/kg - real(R8),parameter :: avg_c0 = 61.846_R8 - real(R8),parameter :: avg_c1 = 1.107_R8 - real(R8),parameter :: amp_c0 = -21.841_R8 - real(R8),parameter :: amp_c1 = -0.447_R8 - real(R8),parameter :: phs_c0 = 0.298_R8 - real(R8),parameter :: dLWarc = -5.000_R8 - - real(R8) :: dTarc(12) - data dTarc / 0.49_R8, 0.06_R8,-0.73_R8, -0.89_R8,-0.77_R8,-1.02_R8, & - -1.99_R8,-0.91_R8, 1.72_R8, 2.30_R8, 1.81_R8, 1.06_R8/ - - logical :: flds_co2a, flds_co2b, flds_co2c, flds_wiso - - character(len=*),parameter :: rpfile = 'rpointer.atm' - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine datm_comp_advertise(importState, exportState, flds_scalar_name, & - atm_prognostic, flds_wiso_in, flds_co2a_in, flds_co2b_in, flds_co2c_in, & - fldsFrAtm_num, fldsFrAtm, fldsToAtm_num, fldsToAtm, rc) - - ! 1. determine export and import fields to advertise to mediator - ! 2. determine translation of fields from streams to export/import fields - ! 3. determine module indices for attribute vectors - - ! input/output arguments - type(ESMF_State) , intent(inout) :: importState - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: flds_scalar_name - logical , intent(in) :: atm_prognostic - logical , intent(in) :: flds_wiso_in ! use case - logical , intent(in) :: flds_co2a_in ! use case - logical , intent(in) :: flds_co2b_in ! use case - logical , intent(in) :: flds_co2c_in ! use case - integer , intent(out) :: fldsFrAtm_num - type (fld_list_type) , intent(out) :: fldsFrAtm(:) - integer , intent(out) :: fldsToAtm_num - type (fld_list_type) , intent(out) :: fldsToAtm(:) - integer , intent(out) :: rc - - ! local variables - integer :: n - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - flds_wiso = flds_wiso_in - flds_co2a = flds_co2a_in - flds_co2b = flds_co2b_in - flds_co2c = flds_co2c_in - - !------------------- - ! export fields - !------------------- - - ! scalar fields that need to be advertised - - fldsFrAtm_num=1 - fldsFrAtm(1)%stdname = trim(flds_scalar_name) - - ! export fields that have a corresponding stream field - - call dshr_fld_add(data_fld="topo", data_fld_array=avifld, model_fld="Sa_topo", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=ktopo, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="z", data_fld_array=avifld, model_fld="Sa_z", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kz, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="u", data_fld_array=avifld, model_fld="Sa_u", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=ku, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="v", data_fld_array=avifld, model_fld="Sa_v", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kv, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="ptem", data_fld_array=avifld, model_fld="Sa_ptem", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kptem, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="dens", data_fld_array=avifld, model_fld="Sa_dens", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kdens, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="pslv", data_fld_array=avifld, model_fld="Sa_pslv", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kpslv, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="rainc", data_fld_array=avifld, model_fld="Faxa_rainc", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=krc, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - call dshr_fld_add(data_fld="rainl", data_fld_array=avifld, model_fld="Faxa_rainl", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=krl, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="snowc", data_fld_array=avifld, model_fld="Faxa_snowc", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=ksc, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - call dshr_fld_add(data_fld="snowl", data_fld_array=avifld, model_fld="Faxa_snowl", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=ksl, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="swndr", data_fld_array=avifld, model_fld="Faxa_swndr", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kswndr, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - call dshr_fld_add(data_fld="swvdr", data_fld_array=avifld, model_fld="Faxa_swvdr", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kswvdr, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - call dshr_fld_add(data_fld="swndf", data_fld_array=avifld, model_fld="Faxa_swndf", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kswndf, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - call dshr_fld_add(data_fld="swvdf", data_fld_array=avifld, model_fld="Faxa_swvdf", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kswvdf, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - call dshr_fld_add(data_fld="swnet", data_fld_array=avifld, model_fld="Faxa_swnet", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kswnet, fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - ! export fields that have a corresponding stream field AND that have a corresponding internal field - - call dshr_fld_add(data_fld="tbot", data_fld_array=avifld, model_fld="Sa_tbot", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=ktbot , fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="pbot", data_fld_array=avifld, model_fld="Sa_pbot", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kpbot , fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="shum", data_fld_array=avifld, model_fld="Sa_shum", model_fld_array=avofld, & - model_fld_concat=flds_a2x, model_fld_index=kshum , fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="lwdn", data_fld_array=avifld, & - model_fld="Faxa_lwdn", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=klwdn, & - fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - if (flds_co2a .or. flds_co2b .or. flds_co2c) then - call dshr_fld_add(data_fld="co2prog", data_fld_array=avifld, & - model_fld="Sa_co2prog", model_fld_array=avofld, model_fld_concat=flds_x2a, & - fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - - call dshr_fld_add(data_fld="co2diag", data_fld_array=avifld, & - model_fld="Sa_co2diag", model_fld_array=avofld, model_fld_concat=flds_x2a, & - fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm) - end if - - if (presaero) then - - call dshr_fld_add(data_fld="bcphidry", data_fld_array=avifld, & - model_fld="Faxa_bcphidry", model_fld_array=avofld, model_fld_index=kbcphidry, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="bcphodry", data_fld_array=avifld, & - model_fld="Faxa_bcphodry", model_fld_array=avofld, model_fld_index=kbcphodry, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="bcphiwet", data_fld_array=avifld, & - model_fld="Faxa_bcphiwet", model_fld_array=avofld, model_fld_index=kbcphiwet, model_fld_concat=flds_a2x) - - call dshr_fld_add(med_fld='Faxa_bcph', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=3) - - call dshr_fld_add(data_fld="ocphidry", data_fld_array=avifld, & - model_fld="Faxa_ocphidry", model_fld_array=avofld, model_fld_index=kocphidry, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="ocphodry", data_fld_array=avifld, & - model_fld="Faxa_ocphodry", model_fld_array=avofld, model_fld_index=kocphodry, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="ocphiwet", data_fld_array=avifld, & - model_fld="Faxa_ocphiwet", model_fld_array=avofld, model_fld_index=kocphiwet, model_fld_concat=flds_a2x) - - call dshr_fld_add(med_fld='Faxa_ocph', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=3) - - call dshr_fld_add(data_fld="dstwet1", data_fld_array=avifld, & - model_fld="Faxa_dstwet1", model_fld_array=avofld, model_fld_index=kdstwet1, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="dstwet2", data_fld_array=avifld, & - model_fld="Faxa_dstwet2", model_fld_array=avofld, model_fld_index=kdstwet2, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="dstwet3", data_fld_array=avifld, & - model_fld="Faxa_dstwet3", model_fld_array=avofld, model_fld_index=kdstwet3, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="dstwet4", data_fld_array=avifld, & - model_fld="Faxa_dstwet4", model_fld_array=avofld, model_fld_index=kdstwet4, model_fld_concat=flds_a2x) - - call dshr_fld_add(med_fld='Faxa_dstwet', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=4) - - call dshr_fld_add(data_fld="dstdry1", data_fld_array=avifld, & - model_fld="Faxa_dstdry1", model_fld_array=avofld, model_fld_index=kdstdry1, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="dstdry2", data_fld_array=avifld, & - model_fld="Faxa_dstdry2", model_fld_array=avofld, model_fld_index=kdstdry2, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="dstdry3", data_fld_array=avifld, & - model_fld="Faxa_dstdry3", model_fld_array=avofld, model_fld_index=kdstdry3, model_fld_concat=flds_a2x) - call dshr_fld_add(data_fld="dstdry4", data_fld_array=avifld, & - model_fld="Faxa_dstdry4", model_fld_array=avofld, model_fld_index=kdstdry4, model_fld_concat=flds_a2x) - - call dshr_fld_add(med_fld='Faxa_dstdry', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=4) - - end if - - ! isopic forcing - if (flds_wiso) then - call dshr_fld_add(data_fld="rainc_16O", data_fld_array=avifld,& - model_fld="Faxa_rainc_18O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=krc_16O) - call dshr_fld_add(data_fld="rainc_18O", data_fld_array=avifld,& - model_fld="Faxa_rainc_18O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=krc_18O) - call dshr_fld_add(data_fld="rainc_HDO", data_fld_array=avifld, & - model_fld="Faxa_rainc_HDO", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=krc_HDO) - call dshr_fld_add(med_fld='Faxa_rainc_wiso', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=3) - - call dshr_fld_add(data_fld="rainl_16O", data_fld_array=avifld, & - model_fld="Faxa_rainl_16O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=krl_16O) - call dshr_fld_add(data_fld="rainl_18O", data_fld_array=avifld, & - model_fld="Faxa_rainl_18O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=krl_18O) - call dshr_fld_add(data_fld="rainl_HDO", data_fld_array=avifld, & - model_fld="Faxa_rainl_HDO", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=krl_HDO) - call dshr_fld_add(med_fld='Faxa_rainl_wiso', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=3) - - call dshr_fld_add(data_fld="snowc_16O", data_fld_array=avifld, & - model_fld="Faxa_snowc_16O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=ksc_18O) - call dshr_fld_add(data_fld="snowc_18O", data_fld_array=avifld, & - model_fld="Faxa_snowc_18O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=ksc_18O) - call dshr_fld_add(data_fld="snowc_HDO", data_fld_array=avifld, & - model_fld="Faxa_snowc_HDO", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=ksc_HDO) - call dshr_fld_add(med_fld='Faxa_snowc_wiso', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=3) - - call dshr_fld_add(data_fld="snowl_16O", data_fld_array=avifld, & - model_fld="Faxa_snowl_16O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=ksl_18O) - call dshr_fld_add(data_fld="snowl_18O", data_fld_array=avifld, & - model_fld="Faxa_snowl_18O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=ksl_18O) - call dshr_fld_add(data_fld="snowl_HDO", data_fld_array=avifld, & - model_fld="Faxa_snowl_HDO", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=ksl_HDO) - call dshr_fld_add(med_fld='Faxa_snowl_wiso', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=3) - - call dshr_fld_add(data_fld="shum_16O", data_fld_array=avifld, & - model_fld="Sa_shum_16O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=kshum_16O) - call dshr_fld_add(data_fld="shum_18O", data_fld_array=avifld, & - model_fld="Sa_shum_18O", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=kshum_18O) - call dshr_fld_add(data_fld="shum_HDO", data_fld_array=avifld, & - model_fld="Sa_shum_HDO", model_fld_array=avofld, model_fld_concat=flds_a2x, model_fld_index=kshum_HDO) - call dshr_fld_add(med_fld='Faxa_shum_wiso', fldlist_num=fldsFrAtm_num, fldlist=fldsFrAtm, & - ungridded_lbound=1, ungridded_ubound=3) - end if - - !------------------- - ! import fields (have no corresponding stream fields) - !------------------- - - if (atm_prognostic) then - - fldsToAtm_num=1 - fldsToAtm(1)%stdname = trim(flds_scalar_name) - - ! The module indices set by the model_fld_index argument are used in the run phase - call dshr_fld_add(model_fld="Sx_avsdr", model_fld_concat=flds_x2a, model_fld_index=kavsdr, & - fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Sx_anidr", model_fld_concat=flds_x2a, model_fld_index=kanidr, & - fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Sx_avsdf", model_fld_concat=flds_x2a, model_fld_index=kavsdf, & - fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Sx_anidf", model_fld_concat=flds_x2a, model_fld_index=kanidf, & - fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - - call dshr_fld_add(model_fld="Sx_tref" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Sx_qref" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Sx_t" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="So_t" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Sl_snowh" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Sl_lfrac" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Si_ifrac" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="So_ofrac" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Faxx_taux" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Faxx_tauy" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Faxx_lat" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Faxx_sen" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Faxx_lwup" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - call dshr_fld_add(model_fld="Faxx_evap" , model_fld_concat=flds_x2a, fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - ! call dshr_fld_add(model_fld="Fall_fco2_lnd" , model_fld_concat=flds_x2a ,fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - ! call dshr_fld_add(model_fld="Faoo_fco2_ocn" , model_fld_concat=flds_x2a ,fldlist_num=fldsToAtm_num, fldlist=fldsToAtm) - - end if - - !------------------- - ! advertise fields for import and export states - !------------------- - - do n = 1,fldsFrAtm_num - call NUOPC_Advertise(exportState, standardName=fldsFrAtm(n)%stdname, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - enddo - - if (atm_prognostic) then - do n = 1,fldsToAtm_num - call NUOPC_Advertise(importState, standardName=fldsToAtm(n)%stdname, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end do - end if - - !------------------- - ! module character arrays stifld and stofld - !------------------- - - ! - stifld is a character array of stream field names - ! - stofld is a character array of data model field names that have a one-to-one correspondence with names in stifld - ! - flds_strm is a colon delimited string of field names that is created from the field names in stofld for ONLY - ! those field names that are available in the data streams present in SDATM%sdatm - ! - avstrm is an attribute vector created from flds_strm - - call dshr_fld_add(data_fld="wind" , data_fld_array=stifld, model_fld="strm_wind" , model_fld_array=stofld) - call dshr_fld_add(data_fld="tdew" , data_fld_array=stifld, model_fld="strm_tdew" , model_fld_array=stofld) - call dshr_fld_add(data_fld="tbot" , data_fld_array=stifld, model_fld="strm_tbot" , model_fld_array=stofld) - call dshr_fld_add(data_fld="pbot" , data_fld_array=stifld, model_fld="strm_pbot" , model_fld_array=stofld) - call dshr_fld_add(data_fld="shum" , data_fld_array=stifld, model_fld="strm_shum" , model_fld_array=stofld) - call dshr_fld_add(data_fld="lwdn" , data_fld_array=stifld, model_fld="strm_lwdn" , model_fld_array=stofld) - call dshr_fld_add(data_fld="wind" , data_fld_array=stifld, model_fld="strm_wind" , model_fld_array=stofld) - call dshr_fld_add(data_fld="rh" , data_fld_array=stifld, model_fld="strm_rh" , model_fld_array=stofld) - call dshr_fld_add(data_fld="swdn" , data_fld_array=stifld, model_fld="strm_swdn" , model_fld_array=stofld) - call dshr_fld_add(data_fld="swdndf" , data_fld_array=stifld, model_fld="strm_swdndf" , model_fld_array=stofld) - call dshr_fld_add(data_fld="swdndr" , data_fld_array=stifld, model_fld="strm_swdndr" , model_fld_array=stofld) - call dshr_fld_add(data_fld="prec" , data_fld_array=stifld, model_fld="strm_prec" , model_fld_array=stofld) - call dshr_fld_add(data_fld="precc" , data_fld_array=stifld, model_fld="strm_precc" , model_fld_array=stofld) - call dshr_fld_add(data_fld="precl" , data_fld_array=stifld, model_fld="strm_precl" , model_fld_array=stofld) - call dshr_fld_add(data_fld="precn" , data_fld_array=stifld, model_fld="strm_precn" , model_fld_array=stofld) - call dshr_fld_add(data_fld="swup" , data_fld_array=stifld, model_fld="strm_swup" , model_fld_array=stofld) - call dshr_fld_add(data_fld="tarcf" , data_fld_array=stifld, model_fld="strm_tarcf" , model_fld_array=stofld) - - ! water isotopes - call dshr_fld_add(data_fld="rh_16O" , data_fld_array=stifld, model_fld="strm_rh_16O" , model_fld_array=stofld) - call dshr_fld_add(data_fld="rh_18O" , data_fld_array=stifld, model_fld="strm_rh_18O" , model_fld_array=stofld) - call dshr_fld_add(data_fld="rh_HDO" , data_fld_array=stifld, model_fld="strm_rh_HDO" , model_fld_array=stofld) - call dshr_fld_add(data_fld="precn_16O" , data_fld_array=stifld, model_fld="strm_precn_16O" , model_fld_array=stofld) - call dshr_fld_add(data_fld="precn_18O" , data_fld_array=stifld, model_fld="strm_precn_18O" , model_fld_array=stofld) - call dshr_fld_add(data_fld="precn_HDO" , data_fld_array=stifld, model_fld="strm_precn_HDO" , model_fld_array=stofld) - - ! values for optional bias correction / anomaly forcing (add Sa_precsf for precip scale factor) - call dshr_fld_add(data_fld="precsf" , data_fld_array=stifld, model_fld="strm_precsf" , model_fld_array=stofld) - call dshr_fld_add(data_fld="prec_af" , data_fld_array=stifld, model_fld="strm_prec_af" , model_fld_array=stofld) - call dshr_fld_add(data_fld="u_af" , data_fld_array=stifld, model_fld="strm_u_af" , model_fld_array=stofld) - call dshr_fld_add(data_fld="v_af" , data_fld_array=stifld, model_fld="strm_v_af" , model_fld_array=stofld) - call dshr_fld_add(data_fld="tbot_af" , data_fld_array=stifld, model_fld="strm_tbot_af" , model_fld_array=stofld) - call dshr_fld_add(data_fld="pbot_af" , data_fld_array=stifld, model_fld="strm_pbot_af" , model_fld_array=stofld) - call dshr_fld_add(data_fld="shum_af" , data_fld_array=stifld, model_fld="strm_shum_af" , model_fld_array=stofld) - call dshr_fld_add(data_fld="swdn_af" , data_fld_array=stifld, model_fld="strm_swdn_af" , model_fld_array=stofld) - call dshr_fld_add(data_fld="lwdn_af" , data_fld_array=stifld, model_fld="strm_lwdn_af" , model_fld_array=stofld) - - if (flds_co2a .or. flds_co2b .or. flds_co2c) then - call dshr_fld_add(data_fld="co2prog", data_fld_array=stifld, model_fld="strm_co2prog" , model_fld_array=stofld) - call dshr_fld_add(data_fld="co2diag", data_fld_array=stifld, model_fld="strm_co2diag" , model_fld_array=stofld) - end if - - end subroutine datm_comp_advertise - - !=============================================================================== - - subroutine datm_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon, & - orbEccen, orbMvelpp, orbLambm0, orbObliqr, & - calendar, modeldt, current_ymd, current_tod, current_mon, & - atm_prognostic, mesh, nxg, nyg) - - use dshr_nuopc_mod, only : dshr_fld_add - - ! !DESCRIPTION: initialize data atm model - - ! !INPUT/OUTPUT PARAMETERS: - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie."lnd_0001") - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: scmMode ! single column mode - real(R8) , intent(in) :: scmLat ! single column lat - real(R8) , intent(in) :: scmLon ! single column lon - real(R8) , intent(in) :: orbEccen ! orb eccentricity (unit-less) - real(R8) , intent(in) :: orbMvelpp ! orb moving vernal eq (radians) - real(R8) , intent(in) :: orbLambm0 ! orb mean long of perhelion (radians) - real(R8) , intent(in) :: orbObliqr ! orb obliquity (radians) - character(len=*) , intent(in) :: calendar ! calendar type - integer , intent(in) :: modeldt ! model time step - integer , intent(in) :: current_ymd ! model date - integer , intent(in) :: current_tod ! model sec into model date - integer , intent(in) :: current_mon ! model month - logical , intent(in) :: atm_prognostic ! if true, need x2a data - type(ESMF_Mesh) , intent(inout) :: mesh - integer , intent(out) :: nxg, nyg - - !--- local variables --- - integer :: n,k ! generic counters - integer :: lsize ! local size - integer :: kmask ! field reference - integer :: klon,klat ! field reference - integer :: kfld ! fld index - integer :: cnt ! counter - logical :: exists,exists1 ! filename existance - integer :: nu ! unit number - integer :: stepno ! step number - type(ESMF_DistGrid) :: distGrid - integer, allocatable, target :: gindex(:) - integer :: rc - integer :: dimCount - integer :: tileCount - integer :: deCount - integer :: gsize - integer, allocatable :: elementCountPTile(:) - integer, allocatable :: indexCountPDE(:,:) - integer :: spatialDim - integer :: numOwnedElements - real(R8), pointer :: ownedElemCoords(:) - character(*), parameter :: F00 ="('(datm_comp_init) ',8a)" - character(*), parameter :: F01 ="('(datm_comp_init) ',a,2f10.4)" - character(*), parameter :: subName ="(datm_comp_init)" - !------------------------------------------------------------------------------- - - call t_startf('DATM_INIT') - - !---------------------------------------------------------------------------- - ! Initialize PIO - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDATM, COMPID) - - !---------------------------------------------------------------------------- - ! Create a data model global seqmap - !---------------------------------------------------------------------------- - - call t_startf('datm_strdata_init') - - if (my_task == master_task) write(logunit,F00) ' initialize SDATM gsmap' - - ! obtain the distgrid from the mesh that was read in - call ESMF_MeshGet(Mesh, elementdistGrid=distGrid, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determin local size on my processor - call ESMF_distGridGet(distGrid, localDe=0, elementCount=lsize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global index space for my processor - allocate(gindex(lsize)) - call ESMF_distGridGet(distGrid, localDe=0, seqIndexList=gindex, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global size of distgrid - call ESMF_distGridGet(distGrid, dimCount=dimCount, deCount=deCount, tileCount=tileCount, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - allocate(elementCountPTile(tileCount)) - call ESMF_distGridGet(distGrid, elementCountPTile=elementCountPTile, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - gsize = 0 - do n = 1,size(elementCountPTile) - gsize = gsize + elementCountPTile(n) - end do - deallocate(elementCountPTile) - - ! create the data model gsmap given the local size, global size and gindex - call mct_gsMap_init( SDATM%gsmap, gindex, mpicom, compid, lsize, gsize) - deallocate(gindex) - - !---------------------------------------------------------------------------- - ! Initialize SDATM - !---------------------------------------------------------------------------- - - ! The call to shr_strdata_init_model_domain creates the SDATM%gsmap which - ! is a '2d1d' decommp (1d decomp of 2d grid) and also create SDATM%grid - - SDATM%calendar = trim(shr_cal_calendarName(trim(calendar))) - - if (scmmode) then - if (my_task == master_task) write(logunit,F01) ' scm lon lat = ',scmlon,scmlat - call shr_strdata_init_model_domain(SDATM, mpicom, compid, my_task, & - scmmode=scmmode, scmlon=scmlon, scmlat=scmlat, gsmap=SDATM%gsmap) - else - call shr_strdata_init_model_domain(SDATM, mpicom, compid, my_task, gsmap=SDATM%gsmap) - end if - - if (my_task == master_task) then - call shr_strdata_print(SDATM,'SDATM data') - endif - - ! obtain mesh lats and lons - call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - allocate(ownedElemCoords(spatialDim*numOwnedElements)) - allocate(xc(numOwnedElements), yc(numOwnedElements)) - call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (numOwnedElements /= lsize) then - call shr_sys_abort('ERROR: numOwnedElements is not equal to lsize') - end if - do n = 1,lsize - xc(n) = ownedElemCoords(2*n-1) - yc(n) = ownedElemCoords(2*n) - end do - - ! error check that mesh lats and lons correspond to those on the input domain file - klon = mct_aVect_indexRA(SDATM%grid%data,'lon') - do n = 1, lsize - if (abs(mod(SDATM%grid%data%rattr(klon,n) - xc(n),360.0_R8)) > 1.e-5) then - write(6,*)'ERROR: DATM n, lon(domain), lon(mesh) = ',n, SDATM%grid%data%rattr(klon,n),xc(n) - write(6,*)'ERROR: DATM lon diff = ',abs(SDATM%grid%data%rattr(klon,n) - xc(n)),' too large' - call shr_sys_abort() - end if - !SDATM%grid%data%rattr(klon,n) = xc(n) ! overwrite ggrid with mesh data - xc(n) = SDATM%grid%data%rattr(klon,n) - end do - klat = mct_aVect_indexRA(SDATM%grid%data,'lat') - do n = 1, lsize - if (abs( SDATM%grid%data%rattr(klat,n) - yc(n)) > 1.e-5) then - write(6,*)'ERROR: DATM n, lat(domain), lat(mesh) = ',n,SDATM%grid%data%rattr(klat,n),yc(n) - write(6,*)'ERROR: DATM lat diff = ',abs(SDATM%grid%data%rattr(klat,n) - yc(n)),' too large' - call shr_sys_abort() - end if - !SDATM%grid%data%rattr(klat,n) = yc(n) ! overwrite ggrid with mesh data - yc(n) = SDATM%grid%data%rattr(klat,n) - end do - - ! overwrite mask and frac - k = mct_aVect_indexRA(SDATM%grid%data,'mask') - SDATM%grid%data%rAttr(k,:) = 1.0_R8 - - k = mct_aVect_indexRA(SDATM%grid%data,'frac') - SDATM%grid%data%rAttr(k,:) = 1.0_R8 - - if (my_task == master_task) then - call shr_strdata_print(SDATM,'ATM data') - endif - - !---------------------------------------------------------------------------- - ! Initialize SDATM attributes for streams and mapping of streams to model domain - !---------------------------------------------------------------------------- - - call shr_strdata_init_streams(SDATM, compid, mpicom, my_task) - call shr_strdata_init_mapping(SDATM, compid, mpicom, my_task) - - !---------------------------------------------------------------------------- - ! allocate module arrays - !---------------------------------------------------------------------------- - - allocate(windFactor(lsize)) - allocate(winddFactor(lsize)) - allocate(qsatFactor(lsize)) - - call t_stopf('datm_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('datm_initmctavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - - call mct_aVect_init(a2x, rList=flds_a2x, lsize=lsize) - call mct_aVect_zero(a2x) - call mct_aVect_init(x2a, rList=flds_x2a, lsize=lsize) - call mct_aVect_zero(x2a) - - ! Initialize internal attribute vectors for optional streams - ! Create the colon deliminted list flds_strm based on mapping the - ! input stream fields from SDATM%avs(n) to with the names in stifld to stofld - - cnt = 0 - flds_strm = '' - do n = 1,SDATM%nstreams - ! Loop over the field names in stifld - do k = 1,size(stifld) - ! Search the streams for the field name stifld(k) - kfld = mct_aVect_indexRA(SDATM%avs(n), trim(stifld(k)), perrWith='quiet') - if (kfld > 0) then - cnt = cnt + 1 - ! Append the colon deliminted flds_strm with the mapped field name stofld(k) - if (cnt == 1) then - flds_strm = trim(stofld(k)) - else - flds_strm = trim(flds_strm)//':'//trim(stofld(k)) - endif - endif - enddo - enddo - - ! Initialize avstrm based on the active streams determined above - if (my_task == master_task) write(logunit,F00) ' flds_strm = ',trim(flds_strm) - call mct_aVect_init(avstrm, rList=flds_strm, lsize=lsize) - call mct_aVect_zero(avstrm) - - ! Note: because the following needs to occur AFTER we determine the fields in - ! flds_strm - the indices below CANNOT be set in the datm_comp_advertise phase - - ! Now set indices into these active streams - stbot = mct_aVect_indexRA(avstrm,'strm_tbot' ,perrWith='quiet') - swind = mct_aVect_indexRA(avstrm,'strm_wind' ,perrWith='quiet') - sz = mct_aVect_indexRA(avstrm,'strm_z' ,perrWith='quiet') - spbot = mct_aVect_indexRA(avstrm,'strm_pbot' ,perrWith='quiet') - sshum = mct_aVect_indexRA(avstrm,'strm_shum' ,perrWith='quiet') - stdew = mct_aVect_indexRA(avstrm,'strm_tdew' ,perrWith='quiet') - srh = mct_aVect_indexRA(avstrm,'strm_rh' ,perrWith='quiet') - slwdn = mct_aVect_indexRA(avstrm,'strm_lwdn' ,perrWith='quiet') - sswdn = mct_aVect_indexRA(avstrm,'strm_swdn' ,perrWith='quiet') - sswdndf= mct_aVect_indexRA(avstrm,'strm_swdndf' ,perrWith='quiet') - sswdndr= mct_aVect_indexRA(avstrm,'strm_swdndr' ,perrWith='quiet') - sprecc = mct_aVect_indexRA(avstrm,'strm_precc' ,perrWith='quiet') - sprecl = mct_aVect_indexRA(avstrm,'strm_precl' ,perrWith='quiet') - sprecn = mct_aVect_indexRA(avstrm,'strm_precn' ,perrWith='quiet') - sco2p = mct_aVect_indexRA(avstrm,'strm_co2p' ,perrWith='quiet') - sco2d = mct_aVect_indexRA(avstrm,'strm_co2d' ,perrWith='quiet') - sswup = mct_aVect_indexRA(avstrm,'strm_swup' ,perrWith='quiet') - sprec = mct_aVect_indexRA(avstrm,'strm_prec' ,perrWith='quiet') - starcf = mct_aVect_indexRA(avstrm,'strm_tarcf' ,perrWith='quiet') - - ! anomaly forcing - sprecsf = mct_aVect_indexRA(avstrm,'strm_precsf' ,perrWith='quiet') - sprec_af = mct_aVect_indexRA(avstrm,'strm_prec_af' ,perrWith='quiet') - su_af = mct_aVect_indexRA(avstrm,'strm_u_af' ,perrWith='quiet') - sv_af = mct_aVect_indexRA(avstrm,'strm_v_af' ,perrWith='quiet') - stbot_af = mct_aVect_indexRA(avstrm,'strm_tbot_af' ,perrWith='quiet') - spbot_af = mct_aVect_indexRA(avstrm,'strm_pbot_af' ,perrWith='quiet') - sshum_af = mct_aVect_indexRA(avstrm,'strm_shum_af' ,perrWith='quiet') - sswdn_af = mct_aVect_indexRA(avstrm,'strm_swdn_af' ,perrWith='quiet') - slwdn_af = mct_aVect_indexRA(avstrm,'strm_lwdn_af' ,perrWith='quiet') - - ! isotopic forcing - if (wiso_datm) then - sprecn_16O = mct_aVect_indexRA(avstrm,'strm_precn_16O',perrWith='quiet') - sprecn_18O = mct_aVect_indexRA(avstrm,'strm_precn_18O',perrWith='quiet') - sprecn_HDO = mct_aVect_indexRA(avstrm,'strm_precn_HDO',perrWith='quiet') - ! Okay here to just use srh_18O and srh_HDO, because the forcing is (should) - ! just be deltas, applied in CTSM to the base tracer - srh_16O = mct_aVect_indexRA(avstrm,'strm_rh_16O',perrWith='quiet') - srh_18O = mct_aVect_indexRA(avstrm,'strm_rh_18O',perrWith='quiet') - srh_HDO = mct_aVect_indexRA(avstrm,'strm_rh_HDO',perrWith='quiet') - end if - - call t_stopf('datm_initmctavs') - - nxg = SDATM%nxg - nyg = SDATM%nyg - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - exists = .false. - exists1 = .false. - if (trim(rest_file) == trim(nullstr) .and. & - trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer = ',trim(rpfile) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (exists) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - inquire(file=trim(rest_file),exist=exists1) - endif - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - - call shr_mpi_bcast(exists,mpicom,'exists') - call shr_mpi_bcast(exists1,mpicom,'exists1') - - ! if (exists1) then - ! if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) - ! call shr_pcdf_readwrite('read',SDATM%pio_subsystem, SDATM%io_type, & - ! trim(rest_file),mpicom,gsmap=SDATM%gsmap,rf1=water,rf1n='water',io_format=SDATM%io_format) - ! else - ! if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file) - ! endif - - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDATM,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - endif - - !---------------------------------------------------------------------------- - ! Set initial atm state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - call datm_comp_run(mpicom=mpicom, compid=compid, my_task=my_task, & - master_task=master_task, inst_suffix=inst_suffix, logunit=logunit, & - orbEccen=orbEccen, orbMvelpp=orbMvelpp, orbLambm0=orbLambm0, orbObliqr=orbObliqr, & - write_restart=.false., target_ymd=current_ymd, target_tod=current_tod, target_mon=current_mon, & - calendar=calendar, modeldt=modeldt, atm_prognostic=atm_prognostic) - call t_adj_detailf(-2) - - call t_stopf('DATM_INIT') - - end subroutine datm_comp_init - - !=============================================================================== - - subroutine datm_comp_run(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, & - orbEccen, orbMvelpp, orbLambm0, orbObliqr, & - write_restart, target_ymd, target_tod, target_mon, modeldt, calendar, & - atm_prognostic, case_name) - - ! !DESCRIPTION: run method for datm model - - ! !INPUT/OUTPUT PARAMETERS: - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - real(R8) , intent(in) :: orbEccen ! orb eccentricity (unit-less) - real(R8) , intent(in) :: orbMvelpp ! orb moving vernal eq (radians) - real(R8) , intent(in) :: orbLambm0 ! orb mean long of perhelion (radians) - real(R8) , intent(in) :: orbObliqr ! orb obliquity (radians) - logical , intent(in) :: write_restart ! restart alarm is on - integer , intent(in) :: target_ymd ! model date - integer , intent(in) :: target_tod ! model sec into model date - integer , intent(in) :: target_mon ! model month - character(len=*) , intent(in) :: calendar ! calendar type - Integer , intent(in) :: modeldt ! model time step - logical , intent(in) :: atm_prognostic - character(len=*) , intent(in), optional :: case_name ! case name - - !--- local --- - integer :: n,nfld ! indices - integer :: lsize ! size of attr vect - character(CL) :: rest_file ! restart_file - character(CL) :: rest_file_strm ! restart_file - integer :: nu ! unit number - integer :: eday ! elapsed day - real(R8) :: rday ! elapsed day - real(R8) :: cosFactor ! cosine factor - real(R8) :: factor ! generic/temporary correction factor - real(R8) :: avg_alb ! average albedo - real(R8) :: tMin ! minimum temperature - character(len=18) :: date_str - character(len=CS) :: fldname - real(R8) :: uprime,vprime,swndr,swndf,swvdr,swvdf,ratio_rvrf - real(R8) :: tbot,pbot,rtmp,vp,ea,e,qsat,frac,qsatT - logical :: firstcall = .true. ! first call logical - character(*), parameter :: F00 = "('(datm_comp_run) ',8a)" - character(*), parameter :: F04 = "('(datm_comp_run) ',2a,2i8,'s')" - character(*), parameter :: F0D = "('(datm_comp_run) ',a, i7,2x,i5,2x,i5,2x,d21.14)" - character(*), parameter :: subName = "(datm_comp_run) " - !------------------------------------------------------------------------------- - - !-------------------- - ! Debug output - !-------------------- - - if (debug_import > 0 .and. my_task == master_task .and. atm_prognostic) then - do nfld = 1, mct_aVect_nRAttr(x2a) - call shr_string_listGetName(trim(flds_x2a), nfld, fldname) - do n = 1, mct_aVect_lsize(x2a) - write(logunit,F0D)'import: ymd,tod,n = '// trim(fldname),target_ymd, target_tod, & - n, x2a%rattr(nfld,n) - end do - end do - end if - - !-------------------- - ! ADVANCE ATM - !-------------------- - - call t_startf('DATM_RUN') - call t_barrierf('datm_BARRIER',mpicom) - call t_startf('datm') - - !--- set data needed for cosz t-interp method --- - call shr_strdata_setOrbs(SDATM,orbEccen,orbMvelpp,orbLambm0,orbObliqr,modeldt) - - !--- copy all fields from streams to a2x as default --- - call t_startf('datm_strdata_advance') - call shr_strdata_advance(SDATM,target_ymd,target_tod,mpicom,'datm') - call t_stopf('datm_strdata_advance') - - call t_barrierf('datm_scatter_BARRIER',mpicom) - - call t_startf('datm_scatter') - if (firstcall) then - allocate(ilist_av(SDATM%nstreams)) - allocate(olist_av(SDATM%nstreams)) - allocate(ilist_st(SDATM%nstreams)) - allocate(olist_st(SDATM%nstreams)) - allocate(count_av(SDATM%nstreams)) - allocate(count_st(SDATM%nstreams)) - do n = 1,SDATM%nstreams - ! Obtain a smaller list for translate given the actual fields present in the streams - ! This can only be done once the SDATM has been initialized - call shr_dmodel_translate_list( & - avi=SDATM%avs(n), & ! input av - avo=a2x, & ! output av - avifld=avifld, & ! input field names for translation - avofld=avofld, & ! output field names for translation - ilist=ilist_av(n), & ! input list for translation - olist=olist_av(n), & ! output list for translation - cnt=count_av(n)) ! indices - end do - do n = 1,SDATM%nstreams - call shr_dmodel_translate_list( & - avi=SDATM%avs(n), & ! input av - avo=avstrm, & ! output av - avifld=stifld, & ! input field names for translation - avofld=stofld, & ! output field names for translation - ilist=ilist_st(n), & ! input list for translation - olist=olist_st(n), & ! output list for translation - cnt=count_st(n)) ! indices - end do - end if - - ! At this point DATM%avs(n) has been interpolated to the model - ! grid and the model time - - ! Fill in a2x from ALL the streams in SDATM%avs(:) - do n = 1,SDATM%nstreams - if (count_av(n) > 0) then - call shr_dmodel_translateAV_list( avi=SDATM%avs(n), avo=a2x, & - ilist=ilist_av(n), olist=olist_av(n)) - end if - enddo - - ! Fill in avstrm from ALL the streams in SDATM%avs(:) - do n = 1,SDATM%nstreams - if (count_st(n) > 0) then - call shr_dmodel_translateAV_list( avi=SDATM%avs(n), avo=avstrm, & - ilist=ilist_st(n), olist=olist_st(n)) - end if - enddo - call t_stopf('datm_scatter') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('datm_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - case('CORE2_NYF','CORE2_IAF') - if (firstcall) then - if (sprec < 1 .or. sswdn < 1) then - write(logunit,F00) 'ERROR: prec and swdn must be in streams for CORE2' - call shr_sys_abort(trim(subname)//'ERROR: prec and swdn must be in streams for CORE2') - endif - if (trim(datamode) == 'CORE2_IAF' ) then - if (starcf < 1 ) then - write(logunit,F00) 'ERROR: tarcf must be in an input stream for CORE2_IAF' - call shr_sys_abort(trim(subname)//'tarcf must be in an input stream for CORE2_IAF') - endif - endif - call datm_shr_CORE2getFactors(factorFn,windFactor,winddFactor,qsatFactor, & - mpicom,compid, SDATM%gsmap, SDATM%grid, SDATM%nxg, SDATM%nyg) - endif - call shr_cal_date2julian(target_ymd,target_tod,rday,calendar) - rday = mod((rday - 1.0_R8),365.0_R8) - cosfactor = cos((2.0_R8*SHR_CONST_PI*rday)/365 - phs_c0) - - lsize = mct_avect_lsize(a2x) - do n = 1,lsize - a2x%rAttr(kz,n) = 10.0_R8 - - !--- correction to NCEP winds based on QSCAT --- - uprime = a2x%rAttr(ku,n)*windFactor(n) - vprime = a2x%rAttr(kv,n)*windFactor(n) - a2x%rAttr(ku,n) = uprime*cos(winddFactor(n)*degtorad)- & - vprime*sin(winddFactor(n)*degtorad) - a2x%rAttr(kv,n) = uprime*sin(winddFactor(n)*degtorad)+ & - vprime*cos(winddFactor(n)*degtorad) - - !--- density, tbot, & pslv taken directly from input stream, set pbot --- - a2x%rAttr(kpbot,n) = a2x%rAttr(kpslv,n) - - !--- correction to NCEP Arctic & Antarctic air T & potential T --- - if ( yc(n) < -60.0_R8 ) then - tMin = (avg_c0 + avg_c1*yc(n)) + (amp_c0 + amp_c1*yc(n))*cosFactor + tKFrz - a2x%rAttr(ktbot,n) = max(a2x%rAttr(ktbot,n), tMin) - else if ( yc(n) > 60.0_R8 ) then - factor = MIN(1.0_R8, 0.1_R8*(yc(n)-60.0_R8) ) - a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + factor * dTarc(target_mon) - endif - a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) - - !--- correction to NCEP relative humidity for heat budget balance --- - a2x%rAttr(kshum,n) = a2x%rAttr(kshum,n) + qsatFactor(n) - - !--- Dupont correction to NCEP Arctic air T --- - !--- don't correct during summer months (July-September) - !--- ONLY correct when forcing year is 1997->2004 - if (trim(datamode) == 'CORE2_IAF' ) then - a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + avstrm%rAttr(starcf,n) - a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) - end if - - !------------------------------------------------------------------------- - ! PRECIPITATION DATA - !------------------------------------------------------------------------- - - avstrm%rAttr(sprec,n) = avstrm%rAttr(sprec,n)/86400.0_R8 ! convert mm/day to kg/m^2/s - - ! only correct satellite products, do not correct Serreze Arctic data - if ( yc(n) < 58. ) then - avstrm%rAttr(sprec,n) = avstrm%rAttr(sprec,n)*1.14168_R8 - endif - if ( yc(n) >= 58. .and. yc(n) < 68. ) then - factor = MAX(0.0_R8, 1.0_R8 - 0.1_R8*(yc(n)-58.0_R8) ) - avstrm%rAttr(sprec,n) = avstrm%rAttr(sprec,n)*(factor*(1.14168_R8 - 1.0_R8) + 1.0_R8) - endif - - a2x%rAttr(krc,n) = 0.0_R8 ! default zero - a2x%rAttr(ksc,n) = 0.0_R8 - if (a2x%rAttr(ktbot,n) < tKFrz ) then ! assign precip to rain/snow components - a2x%rAttr(krl,n) = 0.0_R8 - a2x%rAttr(ksl,n) = avstrm%rAttr(sprec,n) - else - a2x%rAttr(krl,n) = avstrm%rAttr(sprec,n) - a2x%rAttr(ksl,n) = 0.0_R8 - endif - - !------------------------------------------------------------------------- - ! RADIATION DATA - !------------------------------------------------------------------------- - - !--- fabricate required swdn components from net swdn --- - a2x%rAttr(kswvdr,n) = avstrm%rAttr(sswdn,n)*(0.28_R8) - a2x%rAttr(kswndr,n) = avstrm%rAttr(sswdn,n)*(0.31_R8) - a2x%rAttr(kswvdf,n) = avstrm%rAttr(sswdn,n)*(0.24_R8) - a2x%rAttr(kswndf,n) = avstrm%rAttr(sswdn,n)*(0.17_R8) - - !--- compute net short-wave based on LY08 latitudinally-varying albedo --- - avg_alb = ( 0.069 - 0.011*cos(2.0_R8*yc(n)*degtorad ) ) - a2x%rAttr(kswnet,n) = avstrm%rAttr(sswdn,n)*(1.0_R8 - avg_alb) - - !--- corrections to GISS sswdn for heat budget balancing --- - factor = 1.0_R8 - if ( -60.0_R8 < yc(n) .and. yc(n) < -50.0_R8 ) then - factor = 1.0_R8 - (yc(n) + 60.0_R8)*(0.05_R8/10.0_R8) - else if ( -50.0_R8 < yc(n) .and. yc(n) < 30.0_R8 ) then - factor = 0.95_R8 - else if ( 30.0_R8 < yc(n) .and. yc(n) < 40._R8 ) then - factor = 1.0_R8 - (40.0_R8 - yc(n))*(0.05_R8/10.0_R8) - endif - a2x%rAttr(kswnet,n) = a2x%rAttr(kswnet,n)*factor - a2x%rAttr(kswvdr,n) = a2x%rAttr(kswvdr,n)*factor - a2x%rAttr(kswndr,n) = a2x%rAttr(kswndr,n)*factor - a2x%rAttr(kswvdf,n) = a2x%rAttr(kswvdf,n)*factor - a2x%rAttr(kswndf,n) = a2x%rAttr(kswndf,n)*factor - - !--- correction to GISS lwdn in Arctic --- - if ( yc(n) > 60._R8 ) then - factor = MIN(1.0_R8, 0.1_R8*(yc(n)-60.0_R8) ) - a2x%rAttr(klwdn,n) = a2x%rAttr(klwdn,n) + factor * dLWarc - endif - - enddo ! lsize - - case('CORE_IAF_JRA') - if (firstcall) then - if (sprec < 1 .or. sswdn < 1) then - write(logunit,F00) 'ERROR: prec and swdn must be in streams for CORE_IAF_JRA' - call shr_sys_abort(trim(subname)//'ERROR: prec and swdn must be in streams for CORE_IAF_JRA') - endif - if (trim(datamode) == 'CORE_IAF_JRA' ) then - if (starcf < 1 ) then - write(logunit,F00) 'ERROR: tarcf must be in an input stream for CORE_IAF_JRA' - call shr_sys_abort(trim(subname)//'tarcf must be in an input stream for CORE_IAF_JRA') - endif - endif - if (trim(factorFn) == 'null') then - windFactor = 1.0_R8 - winddFactor = 1.0_R8 - qsatFactor = 1.0_R8 - else - call datm_shr_CORE2getFactors(factorFn,windFactor,winddFactor,qsatFactor, & - mpicom, compid, SDATM%gsmap, SDATM%grid, SDATM%nxg, SDATM%nyg) - endif - endif - call shr_cal_date2julian(target_ymd,target_tod,rday,calendar) - rday = mod((rday - 1.0_R8),365.0_R8) - cosfactor = cos((2.0_R8*SHR_CONST_PI*rday)/365 - phs_c0) - - lsize = mct_avect_lsize(a2x) - do n = 1,lsize - a2x%rAttr(kz,n) = 10.0_R8 - - !--- density, tbot, & pslv taken directly from input stream, set pbot --- - a2x%rAttr(kpbot,n) = a2x%rAttr(kpslv,n) - - a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) - - !--- density computation for JRA55 forcing --- - a2x%rAttr(kdens,n) = a2x%rAttr(kpbot,n)/(rdair*a2x%rAttr(ktbot,n) & - *(1+0.608* a2x%rAttr(kshum,n))) - - !------------------------------------------------------------------------- - ! PRECIPITATION DATA - !------------------------------------------------------------------------- - - a2x%rAttr(krc,n) = 0.0_R8 ! default zero - a2x%rAttr(ksc,n) = 0.0_R8 - if (a2x%rAttr(ktbot,n) < tKFrz ) then ! assign precip to rain/snow components - a2x%rAttr(krl,n) = 0.0_R8 - a2x%rAttr(ksl,n) = avstrm%rAttr(sprec,n) - else - a2x%rAttr(krl,n) = avstrm%rAttr(sprec,n) - a2x%rAttr(ksl,n) = 0.0_R8 - endif - - !------------------------------------------------------------------------- - ! RADIATION DATA - !------------------------------------------------------------------------- - - !--- fabricate required swdn components from net swdn --- - a2x%rAttr(kswvdr,n) = avstrm%rAttr(sswdn,n)*(0.28_R8) - a2x%rAttr(kswndr,n) = avstrm%rAttr(sswdn,n)*(0.31_R8) - a2x%rAttr(kswvdf,n) = avstrm%rAttr(sswdn,n)*(0.24_R8) - a2x%rAttr(kswndf,n) = avstrm%rAttr(sswdn,n)*(0.17_R8) - - !--- compute net short-wave based on LY08 latitudinally-varying albedo --- - avg_alb = ( 0.069 - 0.011*cos(2.0_R8*yc(n)*degtorad ) ) - a2x%rAttr(kswnet,n) = avstrm%rAttr(sswdn,n)*(1.0_R8 - avg_alb) - - enddo ! lsize - - case('CLMNCEP') - if (firstcall) then - if (swind < 1 .or. stbot < 1) then - write(logunit,F00) ' ERROR: wind and tbot must be in streams for CLMNCEP' - call shr_sys_abort(trim(subname)//' ERROR: wind and tbot must be in streams for CLMNCEP') - endif - rtmp = maxval(a2x%rAttr(ktbot,:)) - call shr_mpi_max(rtmp,tbotmax,mpicom,'datm_tbot',all=.true.) - if (atm_prognostic) then - rtmp = maxval(x2a%rAttr(kanidr,:)) - call shr_mpi_max(rtmp,anidrmax,mpicom,'datm_ani',all=.true.) - else - anidrmax = SHR_CONST_SPVAL ! see below for use - end if - if (stdew > 0) then - rtmp = maxval(avstrm%rAttr(stdew,:)) - call shr_mpi_max(rtmp,tdewmax,mpicom,'datm_tdew',all=.true.) - endif - if (my_task == master_task) & - write(logunit,*) trim(subname),' max values = ',tbotmax,tdewmax,anidrmax - endif - lsize = mct_avect_lsize(a2x) - do n = 1,lsize - !--- bottom layer height --- - if (sz < 1) a2x%rAttr(kz,n) = 30.0_R8 - - !--- temperature --- - if (tbotmax < 50.0_R8) a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + tkFrz - ! Limit very cold forcing to 180K - a2x%rAttr(ktbot,n) = max(180._r8, a2x%rAttr(ktbot,n)) - a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) - - !--- pressure --- - if (spbot < 1) a2x%rAttr(kpbot,n) = pstd - a2x%rAttr(kpslv,n) = a2x%rAttr(kpbot,n) - - !--- u, v wind velocity --- - a2x%rAttr(ku,n) = avstrm%rAttr(swind,n)/sqrt(2.0_R8) - a2x%rAttr(kv,n) = a2x%rAttr(ku,n) - - !--- specific humidity --- - tbot = a2x%rAttr(ktbot,n) - pbot = a2x%rAttr(kpbot,n) - if (sshum > 0) then - e = datm_shr_esat(tbot,tbot) - qsat = (0.622_R8 * e)/(pbot - 0.378_R8 * e) - if (qsat < a2x%rAttr(kshum,n)) then - a2x%rAttr(kshum,n) = qsat - endif - else if (srh > 0) then - e = avstrm%rAttr(srh,n) * 0.01_R8 * datm_shr_esat(tbot,tbot) - qsat = (0.622_R8 * e)/(pbot - 0.378_R8 * e) - a2x%rAttr(kshum,n) = qsat - if(wiso_datm) then - ! isotopic forcing - ! For tracer specific humidity, lnd_import_mct expects a delta, so - ! just keep the delta from the input file - TW - a2x%rAttr(kshum_16O,n) = avstrm%rAttr(srh_16O,n) - a2x%rAttr(kshum_18O,n) = avstrm%rAttr(srh_18O,n) - a2x%rAttr(kshum_HDO,n) = avstrm%rAttr(srh_HDO,n) - end if - else if (stdew > 0) then - if (tdewmax < 50.0_R8) avstrm%rAttr(stdew,n) = avstrm%rAttr(stdew,n) + tkFrz - e = datm_shr_esat(avstrm%rAttr(stdew,n),tbot) - qsat = (0.622_R8 * e)/(pbot - 0.378_R8 * e) - a2x%rAttr(kshum,n) = qsat - else - call shr_sys_abort(subname//'ERROR: cannot compute shum') - endif - - !--- density --- - vp = (a2x%rAttr(kshum,n)*pbot) / (0.622_R8 + 0.378_R8 * a2x%rAttr(kshum,n)) - a2x%rAttr(kdens,n) = (pbot - 0.378_R8 * vp) / (tbot*rdair) - - !--- downward longwave --- - if (slwdn < 1) then - e = a2x%rAttr(kpslv,n) * a2x%rAttr(kshum,n) / (0.622_R8 + 0.378_R8 * a2x%rAttr(kshum,n)) - ea = 0.70_R8 + 5.95e-05_R8 * 0.01_R8 * e * exp(1500.0_R8/tbot) - a2x%rAttr(klwdn,n) = ea * stebol * tbot**4 - endif - - !--- shortwave radiation --- - if (sswdndf > 0 .and. sswdndr > 0) then - a2x%rAttr(kswndr,n) = avstrm%rAttr(sswdndr,n) * 0.50_R8 - a2x%rAttr(kswvdr,n) = avstrm%rAttr(sswdndr,n) * 0.50_R8 - a2x%rAttr(kswndf,n) = avstrm%rAttr(sswdndf,n) * 0.50_R8 - a2x%rAttr(kswvdf,n) = avstrm%rAttr(sswdndf,n) * 0.50_R8 - elseif (sswdn > 0) then - ! relationship between incoming NIR or VIS radiation and ratio of - ! direct to diffuse radiation calculated based on one year's worth of - ! hourly CAM output from CAM version cam3_5_55 - swndr = avstrm%rAttr(sswdn,n) * 0.50_R8 - ratio_rvrf = min(0.99_R8,max(0.29548_R8 + 0.00504_R8*swndr & - -1.4957e-05_R8*swndr**2 + 1.4881e-08_R8*swndr**3,0.01_R8)) - a2x%rAttr(kswndr,n) = ratio_rvrf*swndr - swndf = avstrm%rAttr(sswdn,n) * 0.50_R8 - a2x%rAttr(kswndf,n) = (1._R8 - ratio_rvrf)*swndf - - swvdr = avstrm%rAttr(sswdn,n) * 0.50_R8 - ratio_rvrf = min(0.99_R8,max(0.17639_R8 + 0.00380_R8*swvdr & - -9.0039e-06_R8*swvdr**2 + 8.1351e-09_R8*swvdr**3,0.01_R8)) - a2x%rAttr(kswvdr,n) = ratio_rvrf*swvdr - swvdf = avstrm%rAttr(sswdn,n) * 0.50_R8 - a2x%rAttr(kswvdf,n) = (1._R8 - ratio_rvrf)*swvdf - else - call shr_sys_abort(subName//'ERROR: cannot compute short-wave down') - endif - - !--- swnet: a diagnostic quantity --- - if (anidrmax < 1.0e-8 .or. anidrmax > SHR_CONST_SPVAL * 0.9_R8) then - a2x%rAttr(kswnet,n) = 0.0_R8 - else - a2x%rAttr(kswnet,n) = (1.0_R8-x2a%rAttr(kanidr,n))*a2x%rAttr(kswndr,n) + & - (1.0_R8-x2a%rAttr(kavsdr,n))*a2x%rAttr(kswvdr,n) + & - (1.0_R8-x2a%rAttr(kanidf,n))*a2x%rAttr(kswndf,n) + & - (1.0_R8-x2a%rAttr(kavsdf,n))*a2x%rAttr(kswvdf,n) - endif - - !--- rain and snow --- - if (sprecc > 0 .and. sprecl > 0) then - a2x%rAttr(krc,n) = avstrm%rAttr(sprecc,n) - a2x%rAttr(krl,n) = avstrm%rAttr(sprecl,n) - elseif (sprecn > 0) then - a2x%rAttr(krc,n) = avstrm%rAttr(sprecn,n)*0.1_R8 - a2x%rAttr(krl,n) = avstrm%rAttr(sprecn,n)*0.9_R8 - else - call shr_sys_abort(subName//'ERROR: cannot compute rain and snow') - endif - - !--- split precip between rain & snow --- - call shr_precip_partition_rain_snow_ramp(tbot, frac) - a2x%rAttr(ksc,n) = max(0.0_R8, a2x%rAttr(krc,n)*(1.0_R8 - frac) ) - a2x%rAttr(ksl,n) = max(0.0_R8, a2x%rAttr(krl,n)*(1.0_R8 - frac) ) - a2x%rAttr(krc,n) = max(0.0_R8, a2x%rAttr(krc,n)*( frac) ) - a2x%rAttr(krl,n) = max(0.0_R8, a2x%rAttr(krl,n)*( frac) ) - - enddo - - end select - - !---------------------------------------------------------- - ! bias correction / anomaly forcing ( start block ) - !---------------------------------------------------------- - - ! modify atmospheric input fields if streams exist - lsize = mct_avect_lsize(avstrm) - - ! bias correct precipitation relative to observed - ! (via bias_correct nameslist option) - if (sprecsf > 0) then - do n = 1,lsize - a2x%rAttr(ksc,n) = a2x%rAttr(ksc,n) * min(1.e2_r8,avstrm%rAttr(sprecsf,n)) - a2x%rAttr(ksl,n) = a2x%rAttr(ksl,n) * min(1.e2_r8,avstrm%rAttr(sprecsf,n)) - a2x%rAttr(krc,n) = a2x%rAttr(krc,n) * min(1.e2_r8,avstrm%rAttr(sprecsf,n)) - a2x%rAttr(krl,n) = a2x%rAttr(krl,n) * min(1.e2_r8,avstrm%rAttr(sprecsf,n)) - - end do - endif - - ! adjust atmospheric input fields if anomaly forcing streams exist - ! (via anomaly_forcing namelist option) - - ! wind - if (su_af > 0 .and. sv_af > 0) then - do n = 1,lsize - a2x%rAttr(ku,n) = a2x%rAttr(ku,n) + avstrm%rAttr(su_af,n) - a2x%rAttr(kv,n) = a2x%rAttr(kv,n) + avstrm%rAttr(sv_af,n) - end do - endif - - ! specific humidity - if (sshum_af > 0) then - do n = 1,lsize - a2x%rAttr(kshum,n) = a2x%rAttr(kshum,n) + avstrm%rAttr(sshum_af,n) - - ! avoid possible negative q values - if(a2x%rAttr(kshum,n) < 0._r8) then - a2x%rAttr(kshum,n) = 1.e-6_r8 - endif - - end do - endif - - ! pressure - if (spbot_af > 0) then - do n = 1,lsize - a2x%rAttr(kpbot,n) = a2x%rAttr(kpbot,n) + avstrm%rAttr(spbot_af,n) - end do - endif - - ! temperature - if (stbot_af > 0) then - do n = 1,lsize - a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + avstrm%rAttr(stbot_af,n) - end do - endif - - ! longwave - if (slwdn_af > 0) then - do n = 1,lsize - a2x%rAttr(klwdn,n) = a2x%rAttr(klwdn,n) * avstrm%rAttr(slwdn_af,n) - end do - endif - - ! precipitation - if (sprec_af > 0) then - do n = 1,lsize - a2x%rAttr(ksc,n) = a2x%rAttr(ksc,n) * avstrm%rAttr(sprec_af,n) - a2x%rAttr(ksl,n) = a2x%rAttr(ksl,n) * avstrm%rAttr(sprec_af,n) - a2x%rAttr(krc,n) = a2x%rAttr(krc,n) * avstrm%rAttr(sprec_af,n) - a2x%rAttr(krl,n) = a2x%rAttr(krl,n) * avstrm%rAttr(sprec_af,n) - enddo - endif - - ! shortwave - if (sswdn_af > 0) then - do n = 1,lsize - a2x%rAttr(kswndr,n) = a2x%rAttr(kswndr,n) * avstrm%rAttr(sswdn_af,n) - a2x%rAttr(kswvdr,n) = a2x%rAttr(kswvdr,n) * avstrm%rAttr(sswdn_af,n) - a2x%rAttr(kswndf,n) = a2x%rAttr(kswndf,n) * avstrm%rAttr(sswdn_af,n) - a2x%rAttr(kswvdf,n) = a2x%rAttr(kswvdf,n) * avstrm%rAttr(sswdn_af,n) - enddo - endif - !-------------------- - ! bias correction / anomaly forcing ( end block ) - !-------------------- - - call t_stopf('datm_datamode') - - !-------------------- - ! Debug output - !-------------------- - - if (debug_export > 0 .and. my_task == master_task) then - do nfld = 1, mct_aVect_nRAttr(a2x) - call shr_string_listGetName(trim(flds_a2x), nfld, fldname) - do n = 1, mct_aVect_lsize(a2x) - write(logunit,F0D)'export: ymd,tod,n = '// trim(fldname),target_ymd, target_tod, & - n, a2x%rattr(nfld,n) - end do - end do - end if - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('datm_restart') - call shr_cal_datetod2string(date_str, target_ymd, target_tod) - - write(rest_file,"(6a)") & - trim(case_name), '.datm',trim(inst_suffix),'.r.', trim(date_str), '.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.datm',trim(inst_suffix),'.rs1.', trim(date_str), '.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),target_ymd,target_tod - call shr_strdata_restWrite(trim(rest_file_strm),SDATM,mpicom,trim(case_name),'SDATM strdata') - call t_stopf('datm_restart') - endif - - firstcall = .false. - - call t_stopf('datm') - call t_stopf('DATM_RUN') - - end subroutine datm_comp_run - - !=============================================================================== - - subroutine datm_comp_import(importState, rc) - - ! input/output variables - type(ESMF_State) :: importState - integer, intent(out) :: rc - - ! local variables - integer :: k - !---------------------------------------------------------------- - - k = mct_aVect_indexRA(x2a, 'Sx_avsdr') - call dshr_import(importState, 'Sx_avsdr', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Sx_avsdf') - call dshr_import(importState, 'Sx_avsdf', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Sx_ansdr') - call dshr_import(importState, 'Sx_anidr', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Sx_anidf') - call dshr_import(importState, 'Sx_anidf', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Sx_tref') - call dshr_import(importState, 'Sx_tref', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Sx_qref') - call dshr_import(importState, 'Sx_qref', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Sx_t') - call dshr_import(importState, 'Sx_t', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'So_t') - call dshr_import(importState, 'So_t', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Sl_snowh') - call dshr_import(importState, 'Sl_snowh', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Sl_lfrac') - call dshr_import(importState, 'Sl_lfrac', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Si_lfrac') - call dshr_import(importState, 'Si_lfrac', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'So_ofrac') - call dshr_import(importState, 'So_ofrac', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Faxx_taux') - call dshr_import(importState, 'Faxx_taux', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Faxx_tauy') - call dshr_import(importState, 'Faxx_tauy', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Faxx_lat') - call dshr_import(importState, 'Faxx_lat', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Faxx_sen') - call dshr_import(importState, 'Faxx_sen', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Faxx_lwup') - call dshr_import(importState, 'Faxx_lwup', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(x2a, 'Faxx_evap') - call dshr_import(importState, 'Faxx_evap', x2a%rattr(:,k), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine datm_comp_import - - !=============================================================================== - - subroutine datm_comp_export(exportState, rc) - - ! input/output variables - type(ESMF_State) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: k - !---------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call dshr_export(a2x%rattr(ktopo,:) , exportState, 'Sa_topo', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kz,:) , exportState, 'Sa_z', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(ku,:) , exportState, 'Sa_u', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kv,:) , exportState, 'Sa_v', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kptem,:) , exportState, 'Sa_ptem', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kdens,:) , exportState, 'Sa_dens', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kpslv,:) , exportState, 'Sa_pslv', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(ktbot,:) , exportState, 'Sa_tbot', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kpbot,:) , exportState, 'Sa_pbot', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kshum,:) , exportState, 'Sa_shum', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(krc,:) , exportState, 'Faxa_rainc', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(krl,:) , exportState, 'Faxa_rainl', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(ksc,:) , exportState, 'Faxa_snowc', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(ksl,:) , exportState, 'Faxa_snowl', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(kswndr,:), exportState, 'Faxa_swndr', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kswndf,:), exportState, 'Faxa_swndf', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kswvdr,:), exportState, 'Faxa_swvdr', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kswvdf,:), exportState, 'Faxa_swvdf', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kswnet,:), exportState, 'Faxa_swnet', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(klwdn,:) , exportState, 'Faxa_lwdn', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (flds_co2a .or. flds_co2b .or. flds_co2c) then - call dshr_export(avstrm%rattr(sco2p,:), exportState, 'Sa_co2prog' , rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(avstrm%rattr(sco2d,:), exportState, 'Sa_co2diag' , rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - if (presaero) then - call dshr_export(a2x%rattr(kbcphidry,:), exportState, 'Faxa_bcph', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kbcphodry,:), exportState, 'Faxa_bcph', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kbcphiwet,:), exportState, 'Faxa_bcph', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(kocphidry,:), exportState, 'Faxa_ocph', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kocphodry,:), exportState, 'Faxa_ocph', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kocphiwet,:), exportState, 'Faxa_ocph', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(kdstwet1,:), exportState, 'Faxa_dstwet', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kdstwet2,:), exportState, 'Faxa_dstwet', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kdstwet3,:), exportState, 'Faxa_dstwet', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kdstwet4,:), exportState, 'Faxa_dstwet', ungridded_index=4, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(kdstdry1,:), exportState, 'Faxa_dstdry', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kdstdry2,:), exportState, 'Faxa_dstdry', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kdstdry3,:), exportState, 'Faxa_dstdry', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kdstdry4,:), exportState, 'Faxa_dstdry', ungridded_index=4, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - if (flds_wiso) then - call dshr_export(a2x%rattr(krc_16O,:), exportState, 'Faxa_rainc_wiso', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(krc_18O,:), exportState, 'Faxa_rainc_wiso', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(krc_HDO,:), exportState, 'Faxa_rainc_wiso', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(krl_16O,:), exportState, 'Faxa_rainl_wiso', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(krl_18O,:), exportState, 'Faxa_rainl_wiso', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(krl_HDO,:), exportState, 'Faxa_rainl_wiso', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(ksc_16O,:), exportState, 'Faxa_snowc_wiso', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(ksc_18O,:), exportState, 'Faxa_snowc_wiso', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(ksc_HDO,:), exportState, 'Faxa_snowc_wiso', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(ksl_16O,:), exportState, 'Faxa_snowl_wiso', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(ksl_18O,:), exportState, 'Faxa_snowl_wiso', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(ksl_HDO,:), exportState, 'Faxa_snowl_wiso', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(a2x%rattr(kshum_16O,:), exportState, 'Faxa_shum_wiso', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kshum_18O,:), exportState, 'Faxa_shum_wiso', ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(a2x%rattr(kshum_HDO,:), exportState, 'Faxa_shum_wiso', ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - end subroutine datm_comp_export - -end module datm_comp_mod diff --git a/src/components/data_comps/datm/nuopc/datm_shr_mod.F90 b/src/components/data_comps/datm/nuopc/datm_shr_mod.F90 deleted file mode 100644 index 828a2fb87b4..00000000000 --- a/src/components/data_comps/datm/nuopc/datm_shr_mod.F90 +++ /dev/null @@ -1,625 +0,0 @@ -module datm_shr_mod - - ! !USES: - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, I8=>SHR_KIND_I8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_const_mod , only : SHR_CONST_CDAY,SHR_CONST_TKFRZ,SHR_CONST_SPVAL - use shr_file_mod , only : shr_file_getlogunit, shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_cal_mod , only : shr_cal_date2julian - use shr_dmodel_mod , only : shr_dmodel_mapset - use shr_ncread_mod , only : shr_ncread_varExists, shr_ncread_varDimSizes, shr_ncread_field4dG - use shr_strdata_mod, only : shr_strdata_readnml, shr_strdata_type - use mct_mod - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - interface datm_shr_getNextRadCday - module procedure datm_shr_getNextRadCDay_i8 - module procedure datm_shr_getNextRadCDay_i4 - end interface datm_shr_getNextRadCday - - public :: datm_shr_getNextRadCDay - public :: datm_shr_CORE2getFactors - public :: datm_shr_TN460getFactors - public :: datm_shr_eSat - public :: datm_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! Note that model decomp will now come from reading in the mesh directly - - ! stream data type - type(shr_strdata_type), public :: SDATM - - ! input namelist variables - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - character(CL) , public :: bias_correct ! true => send bias correction fields to coupler - character(CL) , public :: anomaly_forcing(8) ! true => send anomaly forcing fields to coupler - logical , public :: force_prognostic_true ! if true set prognostic true - logical , public :: wiso_datm = .false. ! expect isotopic forcing from file? - integer(IN) , public :: iradsw ! radiation interval - character(CL) , public :: factorFn ! file containing correction factors - logical , public :: presaero ! true => send valid prescribe aero fields to coupler - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine datm_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, atm_prognostic) - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: filename ! input namelist filename - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(out) :: atm_prognostic ! flag - - !--- local variables --- - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - character(CL) :: decomp ! decomp strategy - not used for NUOPC - but still needed in namelist for now - - !--- formats --- - character(*), parameter :: F00 = "('(datm_comp_init) ',8a)" - character(*), parameter :: F0L = "('(datm_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(datm_comp_init) ',a,5i8)" - character(*), parameter :: subName = "(shr_datm_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / datm_nml / decomp, & - iradsw, factorFn, restfilm, restfils, presaero, bias_correct, & - anomaly_forcing, force_prognostic_true, wiso_datm - - !---------------------------------------------------------------------------- - ! Read datm_in - !---------------------------------------------------------------------------- - - iradsw = 0 - factorFn = 'null' - restfilm = trim(nullstr) - restfils = trim(nullstr) - presaero = .false. - force_prognostic_true = .false. - - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=datm_nml,iostat=ierr) - close(nunit) - - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F01)' iradsw = ',iradsw - write(logunit,F00)' factorFn = ',trim(factorFn) - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' presaero = ',presaero - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - write(logunit,F0L)' wiso_datm = ',wiso_datm - call shr_sys_flush(logunit) - endif - call shr_mpi_bcast(iradsw ,mpicom, 'iradsw') - call shr_mpi_bcast(factorFn ,mpicom, 'factorFn') - call shr_mpi_bcast(restfilm ,mpicom, 'restfilm') - call shr_mpi_bcast(restfils ,mpicom, 'restfils') - call shr_mpi_bcast(presaero ,mpicom, 'presaero') - call shr_mpi_bcast(force_prognostic_true ,mpicom, 'force_prognostic_true') - call shr_mpi_bcast(wiso_datm ,mpicom, 'wiso_datm') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDATM, trim(filename), mpicom=mpicom) - - ! Validate mode - - datamode = trim(SDATM%dataMode) - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'CORE2_NYF' .or. & - trim(datamode) == 'CORE2_IAF' .or. & - trim(datamode) == 'CORE_IAF_JRA' .or. & - trim(datamode) == 'CLMNCEP' .or. & - trim(datamode) == 'COPYALL' ) then - if (my_task == master_task) then - write(logunit,F00) ' datm datamode = ',trim(datamode) - call shr_sys_flush(logunit) - end if - else - write(logunit,F00) ' ERROR illegal datm datamode = ',trim(datamode) - call shr_sys_abort() - endif - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flag - !---------------------------------------------------------------------------- - - atm_prognostic = .false. - if (force_prognostic_true) then - atm_prognostic = .true. - endif - - end subroutine datm_shr_read_namelists - - !=============================================================================== - real(R8) function datm_shr_getNextRadCDay_i8( ymd, tod, stepno, dtime, iradsw, calendar ) - - ! Return the calendar day of the next radiation time-step. - ! General Usage: nextswday = datm_shr_getNextRadCDay(curr_date) - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN), intent(in) :: ymd - integer(IN), intent(in) :: tod - integer(I8), intent(in) :: stepno - integer(IN), intent(in) :: dtime - integer(IN), intent(in) :: iradsw - character(*),intent(in) :: calendar - - !----- local ----- - real(R8) :: nextsw_cday - real(R8) :: julday - integer :: liradsw - integer :: yy,mm,dd - character(*),parameter :: subName = '(datm_shr_getNextRadCDay) ' - !------------------------------------------------------------------------------- - - liradsw = iradsw - if (liradsw < 0) liradsw = nint((-liradsw *3600._r8)/dtime) - - call shr_cal_date2julian(ymd,tod,julday,calendar) - - if (liradsw > 1) then - if (mod(stepno+1,liradsw) == 0 .and. stepno > 0) then - nextsw_cday = julday + 2*dtime/SHR_CONST_CDAY - else - nextsw_cday = -1._r8 - end if - else - nextsw_cday = julday + dtime/SHR_CONST_CDAY - end if - datm_shr_getNextRadCDay_i8 = nextsw_cday - - end function datm_shr_getNextRadCDay_i8 - - real(R8) function datm_shr_getNextRadCDay_i4( ymd, tod, stepno, dtime, iradsw, calendar ) - - ! Return the calendar day of the next radiation time-step. - ! General Usage: nextswday = datm_shr_getNextRadCDay(curr_date) - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN), intent(in) :: ymd - integer(IN), intent(in) :: tod - integer , intent(in) :: stepno - integer(IN), intent(in) :: dtime - integer(IN), intent(in) :: iradsw - character(*),intent(in) :: calendar - - !----- local ----- - real(R8) :: nextsw_cday - real(R8) :: julday - integer :: liradsw - integer :: yy,mm,dd - character(*),parameter :: subName = '(datm_shr_getNextRadCDay) ' - !------------------------------------------------------------------------------- - - liradsw = iradsw - if (liradsw < 0) liradsw = nint((-liradsw *3600._r8)/dtime) - - call shr_cal_date2julian(ymd,tod,julday,calendar) - - if (liradsw > 1) then - if (mod(stepno+1,liradsw) == 0 .and. stepno > 0) then - nextsw_cday = julday + 2*dtime/SHR_CONST_CDAY - else - nextsw_cday = -1._r8 - end if - else - nextsw_cday = julday + dtime/SHR_CONST_CDAY - end if - datm_shr_getNextRadCDay_i4 = nextsw_cday - - end function datm_shr_getNextRadCDay_i4 - - !=============================================================================== - subroutine datm_shr_CORE2getFactors(fileName,windF,winddF,qsatF,mpicom,compid, & - gsmap,ggrid,nxg,nyg) - - !--- arguments --- - character(*) ,intent(in) :: fileName ! file name string - real(R8) ,intent(inout) :: windF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: winddF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: qsatF(:) ! rel humidty adjustment factors - integer(IN) ,intent(in) :: mpicom ! mpi comm - integer(IN) ,intent(in) :: compid ! mct compid - type(mct_gsmap) ,intent(in) :: gsmap ! decomp of wind,windd,qsat - type(mct_ggrid) ,intent(in) :: ggrid ! ggrid of grid info - integer(IN) ,intent(in) :: nxg ! size of input grid - integer(IN) ,intent(in) :: nyg ! size of input grid - - !--- local --- - integer(IN) :: my_task,logunit,ier - character(*),parameter :: subName = '(datm_shr_CORE2getFactors) ' - character(*),parameter :: F00 = "('(datm_shr_CORE2getFactors) ',4a) " - !------------------------------------------------------------------------------- - - call MPI_COMM_RANK(mpicom,my_task,ier) - call shr_file_getLogUnit(logunit) - - if (my_task == 0) then - - !--- verify necessary data is in input file --- - if ( .not. shr_ncread_varExists(fileName,'lat' ) & - .or. .not. shr_ncread_varExists(fileName,'lon' ) & - .or. .not. shr_ncread_varExists(fileName,'mask' ) & - .or. .not. shr_ncread_varExists(fileName,'windFactor') & - .or. .not. shr_ncread_varExists(fileName,'winddFactor') & - .or. .not. shr_ncread_varExists(fileName,'qsatFactor') ) then - write(logunit,F00) "ERROR: invalid correction factor data file" - call shr_sys_abort(subName//"invalid correction factor data file") - end if - endif - - call datm_shr_getFactors(fileName,windF,winddF,qsatF,mpicom,compid, & - gsmap,ggrid,nxg,nyg) - - end subroutine datm_shr_CORE2getFactors - - !=============================================================================== - - subroutine datm_shr_TN460getFactors(fileName,windF,qsatF,mpicom,compid, & - gsmap,ggrid,nxg,nyg) - - !--- arguments --- - character(*) ,intent(in) :: fileName ! file name string - real(R8) ,intent(inout) :: windF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: qsatF(:) ! rel humidty adjustment factors - integer(IN) ,intent(in) :: mpicom ! mpi comm - integer(IN) ,intent(in) :: compid ! mct compid - type(mct_gsmap) ,intent(in) :: gsmap ! decomp of wind,windd,qsat - type(mct_ggrid) ,intent(in) :: ggrid ! ggrid of grid info - integer(IN) ,intent(in) :: nxg ! size of input grid - integer(IN) ,intent(in) :: nyg ! size of input grid - - !--- local --- - integer(IN) :: my_task,logunit,ier - real(R8),pointer :: winddF(:) ! wind adjustment factor - character(*),parameter :: subName = '(datm_shr_TN460getFactors) ' - character(*),parameter :: F00 = "('(datm_shr_TN460getFactors) ',4a) " - !------------------------------------------------------------------------------- - - call MPI_COMM_RANK(mpicom,my_task,ier) - call shr_file_getLogUnit(logunit) - - if (my_task == 0) then - - !--- verify necessary data is in input file --- - if ( .not. shr_ncread_varExists(fileName,'lat' ) & - .or. .not. shr_ncread_varExists(fileName,'lon' ) & - .or. .not. shr_ncread_varExists(fileName,'mask' ) & - .or. .not. shr_ncread_varExists(fileName,'windFactor') & - .or. .not. shr_ncread_varExists(fileName,'qsatFactor') ) then - write(logunit,F00) "ERROR: invalid correction factor data file" - call shr_sys_abort(subName//"invalid correction factor data file") - end if - endif - - call datm_shr_getFactors(fileName,windF,winddF,qsatF,mpicom,compid, & - gsmap,ggrid,nxg,nyg) - - end subroutine datm_shr_TN460getFactors - - !=============================================================================== - - subroutine datm_shr_getFactors(fileName,windF,winddF,qsatF,mpicom,compid, & - gsmapo,ggrido,nxgo,nygo) - - use shr_map_mod - - !--- arguments --- - character(*) ,intent(in) :: fileName ! file name string - real(R8) ,intent(inout) :: windF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: winddF(:) ! wind adjustment factor - real(R8) ,intent(inout) :: qsatF(:) ! rel humidty adjustment factors - integer(IN) ,intent(in) :: mpicom ! mpi comm - integer(IN) ,intent(in) :: compid ! mct compid - type(mct_gsmap) ,intent(in) :: gsmapo ! decomp of wind,windd,qsat - type(mct_ggrid) ,intent(in) :: ggrido ! ggrid of grid info - integer(IN) ,intent(in) :: nxgo ! size of input grid - integer(IN) ,intent(in) :: nygo ! size of input grid - - !--- data that describes the local model domain --- - integer(IN) :: ni0,nj0 ! dimensions of global bundle0 - integer(IN) :: ni1,nj1,nf1 ! dimensions of global bundle1 - integer(IN) :: i,j,n ! generic indicies - integer(IN) :: my_task ! local pe number - integer(IN) :: ier ! error code - integer(IN) :: logunit ! logging unit - type(mct_ggrid) :: ggridi ! input file grid - type(mct_ggrid) :: ggridoG ! output grid gathered - type(mct_gsmap) :: gsmapi ! input file gsmap - type(mct_sMatp) :: smatp ! sparse matrix weights - type(mct_avect) :: avi ! input attr vect - type(mct_avect) :: avo ! output attr vect - integer(IN) :: lsizei ! local size of input - integer(IN) :: lsizeo ! local size of output - integer(IN),pointer :: start(:) ! start list - integer(IN),pointer :: length(:) ! length list - integer(IN) :: gsizei ! input global size - integer(IN) :: numel ! number of elements in start list - real(R8) :: dadd ! lon correction - logical :: domap ! map or not - integer(IN) :: klon,klat ! lon lat fld index - - !--- temp arrays for data input --- - real(R8) ,allocatable :: tempR4D(:,:,:,:) ! 4D data array - real(R8) ,pointer :: tempR1D(:) ! 1D data array - integer(IN),allocatable :: tempI4D(:,:,:,:) ! 4D data array - character(*),parameter :: subName = '(datm_shr_getFactors) ' - character(*),parameter :: F00 = "('(datm_shr_getFactors) ',4a) " - character(*),parameter :: F01 = "('(datm_shr_getFactors) ',a,2i5)" - character(*),parameter :: F02 = "('(datm_shr_getFactors) ',a,6e12.3)" - - !------------------------------------------------------------------------------- - ! Note: gsmapi is all gridcells on root pe - !------------------------------------------------------------------------------- - - call MPI_COMM_RANK(mpicom,my_task,ier) - call shr_file_getLogUnit(logunit) - - ni0 = 0 - nj0 = 0 - allocate(start(1),length(1)) - start = 0 - length = 0 - numel = 0 - - !---------------------------------------------------------------------------- - ! read in and map global correction factors - !---------------------------------------------------------------------------- - if (my_task == 0) then - - !--- verify necessary data is in input file --- - if ( .not. shr_ncread_varExists(fileName,'lat' ) & - .or. .not. shr_ncread_varExists(fileName,'lon' ) & - .or. .not. shr_ncread_varExists(fileName,'mask' ) & - .or. .not. shr_ncread_varExists(fileName,'windFactor') ) then - write(logunit,F00) "ERROR: invalid correction factor data file" - call shr_sys_abort(subName//"invalid correction factor data file") - end if - call shr_ncread_varDimSizes(fileName,"windFactor",ni0,nj0) - start = 1 - length = ni0*nj0 - numel = 1 - endif - call shr_mpi_bcast(ni0,mpicom,subname//' ni0') - call shr_mpi_bcast(nj0,mpicom,subname//' nj0') - gsizei = ni0*nj0 - - !--- allocate datatypes for input data --- - call mct_gsmap_init(gsmapi,start,length,0,mpicom,compid,gsize=gsizei,numel=numel) - deallocate(start,length) - lsizei = mct_gsmap_lsize(gsmapi,mpicom) - lsizeo = mct_gsmap_lsize(gsmapo,mpicom) - call mct_gGrid_init(GGrid=gGridi, & - CoordChars='lat:lon:hgt', OtherChars='area:aream:mask:frac', lsize=lsizei ) - call mct_aVect_init(avi,rList="wind:windd:qsat",lsize=lsizei) - avi%rAttr = SHR_CONST_SPVAL - - !--- gather output grid for map logic --- - call mct_ggrid_gather(ggrido, ggridoG, gsmapo, 0, mpicom) - - if (my_task == 0) then - allocate(tempR1D(ni0*nj0)) - - !--- read domain data: lon --- - allocate(tempR4D(ni0,1,1,1)) - call shr_ncread_field4dG(fileName,'lon' ,rfld=tempR4D) - !--- needs to be monotonically increasing, add 360 at wraparound+ --- - dadd = 0.0_R8 - do i = 2,ni0 - if (tempR4D(i-1,1,1,1) > tempR4D(i,1,1,1)) dadd = 360.0_R8 - tempR4D(i,1,1,1) = tempR4D(i,1,1,1) + dadd - enddo - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(i,1,1,1) - end do - end do - deallocate(tempR4D) - call mct_gGrid_importRattr(gGridi,'lon',tempR1D,lsizei) - - !--- read domain data: lat --- - allocate(tempR4D(nj0,1,1,1)) - call shr_ncread_field4dG(fileName,'lat' ,rfld=tempR4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(j,1,1,1) - end do - end do - deallocate(tempR4D) - call mct_gGrid_importRattr(gGridi,'lat',tempR1D,lsizei) - - !--- read domain mask--- - allocate(tempI4D(ni0,nj0,1,1)) - call shr_ncread_field4dG(fileName,'mask',ifld=tempI4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = real(tempI4D(i,j,1,1),R8) - end do - end do - deallocate(tempI4D) - call mct_gGrid_importRattr(gGridi,'mask',tempR1D,lsizei) - - !--- read bundle data: wind factor --- - allocate(tempR4D(ni0,nj0,1,1)) - if (shr_ncread_varExists(fileName,'windFactor') ) then - call shr_ncread_field4dG(fileName,'windFactor',rfld=tempR4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(i,j,1,1) - end do - end do - call mct_aVect_importRattr(avi,'wind',tempR1D,lsizei) - endif - - !--- read bundle data: windd factor --- - if (shr_ncread_varExists(fileName,'winddFactor') ) then - call shr_ncread_field4dG(fileName,'winddFactor',rfld=tempR4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(i,j,1,1) - end do - end do - call mct_aVect_importRattr(avi,'windd',tempR1D,lsizei) - endif - - !--- read bundle data: qsat factor --- - if (shr_ncread_varExists(fileName,'qsatFactor') ) then - call shr_ncread_field4dG(fileName,'qsatFactor',rfld=tempR4D) - n = 0 - do j=1,nj0 - do i=1,ni0 - n = n + 1 - tempR1D(n) = tempR4D(i,j,1,1) - end do - end do - call mct_aVect_importRattr(avi,'qsat',tempR1D,lsizei) - endif - - deallocate(tempR4D) - deallocate(tempR1D) - - domap = .false. - if (ni0 /= nxgo .or. nj0 /= nygo) then - domap = .true. - else - klon = mct_aVect_indexRA(ggridi%data,'lon') - klat = mct_aVect_indexRA(ggrido%data,'lat') - do n = 1,lsizei - if (abs(ggridi%data%rAttr(klon,n)-ggridoG%data%rAttr(klon,n)) > 0.01_R8) domap=.true. - if (abs(ggridi%data%rAttr(klat,n)-ggridoG%data%rAttr(klat,n)) > 0.01_R8) domap=.true. - enddo - endif - - call mct_gGrid_clean(ggridoG) - - endif - - call shr_mpi_bcast(domap,mpicom,subname//' domap') - - if (domap) then - call shr_dmodel_mapSet(smatp,ggridi,gsmapi,ni0 ,nj0 , & - ggrido,gsmapo,nxgo,nygo, & - 'datmfactor',shr_map_fs_remap,shr_map_fs_bilinear,shr_map_fs_srcmask, & - shr_map_fs_scalar,compid,mpicom,'Xonly') - call mct_aVect_init(avo,avi,lsizeo) - call mct_sMat_avMult(avi,smatp,avo) - call mct_sMatP_clean(smatp) - else - call mct_aVect_scatter(avi,avo,gsmapo,0,mpicom) - endif - - !--- fill the interface arrays, only if they are the right size --- - allocate(tempR1D(lsizeo)) - if (size(windF ) >= lsizeo) then - call mct_aVect_exportRattr(avo,'wind' ,tempR1D,lsizeo) - windF = tempR1D - endif - if (size(winddF) >= lsizeo) then - call mct_aVect_exportRattr(avo,'windd',tempR1D,lsizeo) - winddF = tempR1D - endif - if (size(qsatF ) >= lsizeo) then - call mct_aVect_exportRattr(avo,'qsat' ,tempR1D,lsizeo) - qsatF = tempR1D - endif - deallocate(tempR1D) - - call mct_aVect_clean(avi) - call mct_aVect_clean(avo) - call mct_gGrid_clean(ggridi) - call mct_gsmap_clean(gsmapi) - - end subroutine datm_shr_getFactors - - !=============================================================================== - - real(R8) function datm_shr_eSat(tK,tKbot) - - !--- arguments --- - real(R8),intent(in) :: tK ! temp used in polynomial calculation - real(R8),intent(in) :: tKbot ! bottom atm temp - - !--- local --- - real(R8) :: t ! tK converted to Celcius - real(R8),parameter :: tkFrz = SHR_CONST_TKFRZ ! freezing T of fresh water ~ K - - !--- coefficients for esat over water --- - real(R8),parameter :: a0=6.107799961_R8 - real(R8),parameter :: a1=4.436518521e-01_R8 - real(R8),parameter :: a2=1.428945805e-02_R8 - real(R8),parameter :: a3=2.650648471e-04_R8 - real(R8),parameter :: a4=3.031240396e-06_R8 - real(R8),parameter :: a5=2.034080948e-08_R8 - real(R8),parameter :: a6=6.136820929e-11_R8 - - !--- coefficients for esat over ice --- - real(R8),parameter :: b0=6.109177956_R8 - real(R8),parameter :: b1=5.034698970e-01_R8 - real(R8),parameter :: b2=1.886013408e-02_R8 - real(R8),parameter :: b3=4.176223716e-04_R8 - real(R8),parameter :: b4=5.824720280e-06_R8 - real(R8),parameter :: b5=4.838803174e-08_R8 - real(R8),parameter :: b6=1.838826904e-10_R8 - - !---------------------------------------------------------------------------- - ! use polynomials to calculate saturation vapor pressure and derivative with - ! respect to temperature: over water when t > 0 c and over ice when t <= 0 c - ! required to convert relative humidity to specific humidity - !---------------------------------------------------------------------------- - - t = min( 50.0_R8, max(-50.0_R8,(tK-tKfrz)) ) - if ( tKbot < tKfrz) then - datm_shr_eSat = 100.0_R8*(b0+t*(b1+t*(b2+t*(b3+t*(b4+t*(b5+t*b6)))))) - else - datm_shr_eSat = 100.0_R8*(a0+t*(a1+t*(a2+t*(a3+t*(a4+t*(a5+t*a6)))))) - end if - - end function datm_shr_eSat - -end module datm_shr_mod diff --git a/src/components/data_comps/desp/cime_config/buildlib b/src/components/data_comps/desp/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/data_comps/desp/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/data_comps/desp/cime_config/buildnml b/src/components/data_comps/desp/cime_config/buildnml deleted file mode 100755 index 96cdfee64d8..00000000000 --- a/src/components/data_comps/desp/cime_config/buildnml +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python - -"""Namelist creator for CIME's data external system processing (ESP) model. -""" - -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import - -import os, sys, glob - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.case import Case -from CIME.nmlgen import NamelistGenerator -from CIME.utils import expect, safe_copy -from CIME.buildnml import create_namelist_infile, parse_input - -logger = logging.getLogger(__name__) - -# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements -#################################################################################### -def _create_namelists(case, confdir, infile, nmlgen, data_list_path): -#################################################################################### - """Write out the namelist for this component. - - Most arguments are the same as those for `NamelistGenerator`. The - `inst_string` argument is used as a suffix to distinguish files for - different instances. The `confdir` argument is used to specify the directory - in which output files will be placed. - """ - - #---------------------------------------------------- - # Get a bunch of information from the case. - #---------------------------------------------------- - desp_mode = case.get_value("DESP_MODE") - - #---------------------------------------------------- - # Check for incompatible options. - #---------------------------------------------------- - - #---------------------------------------------------- - # Log some settings. - #---------------------------------------------------- - logger.debug("DESP mode is %s", desp_mode) - - #---------------------------------------------------- - # Create configuration information. - #---------------------------------------------------- - config = {} - config['desp_mode'] = desp_mode - - #---------------------------------------------------- - # Initialize namelist defaults - #---------------------------------------------------- - nmlgen.init_defaults(infile, config) - - # - # This disable is required because nmlgen.get_streams - # may return a string or a list. See issue #877 in ESMCI/cime - # - #pylint: disable=no-member - - #---------------------------------------------------- - # Finally, write out all the namelists. - #---------------------------------------------------- - namelist_file = os.path.join(confdir, "desp_in") - nmlgen.write_output_file(namelist_file, data_list_path, groups=['desp_nml']) - -############################################################################### -def buildnml(case, caseroot, compname): -############################################################################### - - # Build the component namelist and required stream txt files - if compname != "desp": - raise AttributeError - - cimeroot = case.get_value("CIMEROOT") - rundir = case.get_value("RUNDIR") - ninst = case.get_value("NINST_ESP") - - # Determine configuration directory - confdir = os.path.join(caseroot,"Buildconf",compname + "conf") - if not os.path.isdir(confdir): - os.makedirs(confdir) - - #---------------------------------------------------- - # Construct the namelist generator - #---------------------------------------------------- - # Determine directory for user modified namelist_definitions.xml and namelist_defaults.xml - user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) - expect (os.path.isdir(user_xml_dir), - "user_xml_dir %s does not exist " %user_xml_dir) - - # NOTE: User definition *replaces* existing definition. - namelist_xml_dir = os.path.join(cimeroot, "src", "components", "data_comps", compname, "cime_config") - definition_file = [os.path.join(namelist_xml_dir, "namelist_definition_desp.xml")] - user_definition = os.path.join(user_xml_dir, "namelist_definition_desp.xml") - if os.path.isfile(user_definition): - definition_file = [user_definition] - for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file %s not found!" % file_) - - # Create the namelist generator object - independent of instance - nmlgen = NamelistGenerator(case, definition_file) - - #---------------------------------------------------- - # Clear out old data. - #---------------------------------------------------- - data_list_path = os.path.join(case.get_case_root(), "Buildconf", - "desp.input_data_list") - if os.path.exists(data_list_path): - os.remove(data_list_path) - #---------------------------------------------------- - # Loop over instances - #---------------------------------------------------- - for inst_counter in range(1, ninst+1): - # determine instance string - inst_string = "" - if ninst > 1: - inst_string = '_' + '%04d' % inst_counter - - # If multi-instance case does not have restart file, use - # single-case restart for each instance - rpointer = "rpointer." + compname - if (os.path.isfile(os.path.join(rundir,rpointer)) and - (not os.path.isfile(os.path.join(rundir,rpointer + inst_string)))): - safe_copy(os.path.join(rundir, rpointer), - os.path.join(rundir, rpointer + inst_string)) - - inst_string_label = inst_string - if not inst_string_label: - inst_string_label = "\"\"" - - # create namelist output infile using user_nl_file as input - user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) - expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file %s " %(user_nl_file)) - infile = os.path.join(confdir, "namelist_infile") - create_namelist_infile(case, user_nl_file, infile) - namelist_infile = [infile] - - # create namelist and stream file(s) data component - _create_namelists(case, confdir, namelist_infile, nmlgen, data_list_path) - - # copy namelist files and stream text files, to rundir - if os.path.isdir(rundir): - filename = compname + "_in" - file_src = os.path.join(confdir, filename) - file_dest = os.path.join(rundir, filename) - if inst_string: - file_dest += inst_string - safe_copy(file_src,file_dest) - - for txtfile in glob.glob(os.path.join(confdir, "*txt*")): - safe_copy(txtfile, rundir) - -############################################################################### -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "desp") - - -if __name__ == "__main__": - _main_func() diff --git a/src/components/data_comps/desp/cime_config/config_component.xml b/src/components/data_comps/desp/cime_config/config_component.xml deleted file mode 100644 index 29ce7addb17..00000000000 --- a/src/components/data_comps/desp/cime_config/config_component.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - - - - Data External System Processor (DESP) - no modification of any model data - test modification of any model data - - - - char - desp - desp - case_comp - env_case.xml - Name of external system processing component - - - - char - NOCHANGE,DATATEST - NOCHANGE - run_component_desp - env_run.xml - Mode for external system processing component. - The default is NOOP, do not modify any model data. - - NOCHANGE - DATATEST - - - - - ========================================= - DESP naming conventions in compset name - ========================================= - - - diff --git a/src/components/data_comps/desp/cime_config/namelist_definition_desp.xml b/src/components/data_comps/desp/cime_config/namelist_definition_desp.xml deleted file mode 100644 index 9c3c0291c8a..00000000000 --- a/src/components/data_comps/desp/cime_config/namelist_definition_desp.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - char - desp - desp_nml - NOCHANGE,DATATEST - - The mode of operation for the DESP model. - NOCHANGE: report status of model 'pause' but make no changes - DATATEST: make a roundoff change to the restart files of the - components with their 'PAUSE_ACTIVE_XXX' XML variable - set to TRUE. - Default: NOCHANGE - - - $DESP_MODE - - - - - char - desp - desp_nml - - Model restart filename for the external system (ESP) model data. This is - optional. If restfilm is undefined, the restart filename will be - read from the DESP restart pointer file (or files for multiple instances). - - - undefined - - - - diff --git a/src/components/data_comps/desp/cime_config/user_nl_desp b/src/components/data_comps/desp/cime_config/user_nl_desp deleted file mode 100644 index 0b6d4bbb162..00000000000 --- a/src/components/data_comps/desp/cime_config/user_nl_desp +++ /dev/null @@ -1,16 +0,0 @@ -!------------------------------------------------------------------------ -! Users should ONLY USE user_nl_desp to change namelists variables -! Users should add all user specific namelist changes below in the form of -! namelist_var = new_namelist_value -! Note that any namelist variable from desp_nml can -! be modified below using the above syntax -! User preview_namelists to view (not modify) the output namelist in the -! directory $CASEROOT/CaseDocs -! -! Note that some namelist variables MAY NOT be changed in user_nl_desp - -! they are defined in a $CASEROOT xml file and must be changed with -! xmlchange. -! -! For example, rather than set username to 'foo' in user_nl_desp, call -! ./xmlchange USER=foo -!------------------------------------------------------------------------ diff --git a/src/components/data_comps/desp/desp_comp_mod.F90 b/src/components/data_comps/desp/desp_comp_mod.F90 deleted file mode 100644 index 09a55168950..00000000000 --- a/src/components/data_comps/desp/desp_comp_mod.F90 +++ /dev/null @@ -1,538 +0,0 @@ -module desp_comp_mod - - ! !USES: - - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use shr_kind_mod, only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod, only: CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod, only: shr_file_getunit, shr_file_freeunit, shr_file_setio - use shr_file_mod, only: shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod, only: shr_file_setlogunit, shr_file_setloglevel - use shr_mpi_mod, only: shr_mpi_bcast - use esmf, only: ESMF_Clock - use perf_mod, only: t_startf, t_stopf, t_barrierf - use shr_strdata_mod, only: shr_strdata_type, shr_strdata_advance - use shr_strdata_mod, only: shr_strdata_pioinit - use seq_timemgr_mod, only: seq_timemgr_EClockGetData - use seq_timemgr_mod, only: seq_timemgr_RestartAlarmIsOn - use seq_comm_mct, only: num_inst_cpl => num_inst_driver - - ! Used to link esp components across multiple drivers - use seq_comm_mct, only: global_comm - - implicit none - private - -#include - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: desp_comp_init - public :: desp_comp_run - public :: desp_comp_final - public :: desp_bcast_res_files - - !-------------------------------------------------------------------------- - ! Public module data - !-------------------------------------------------------------------------- - integer, public, parameter :: desp_num_comps = 8 - character(len=3), public, parameter :: comp_names(desp_num_comps) = & - (/ 'atm', 'lnd', 'ice', 'ocn', 'glc', 'rof', 'wav', 'drv' /) - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(len=CS) :: myModelName = 'esp' ! user defined model name - integer(IN) :: mpicom - integer(IN) :: COMPID ! mct comp id - integer(IN) :: my_task ! my task in mpicom - integer(IN) :: npes ! total number of tasks - integer(IN), parameter :: master_task=0 ! task number of master task - integer(IN) :: global_numpes ! #PEs in global_commm - integer(IN) :: global_mype ! My rank in global_comm - integer(IN) :: logunit ! logging unit number - integer(IN) :: loglevel ! logging level - integer(IN) :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "esp_0001") - character(len=16) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") - character(len=CL) :: desp_mode ! mode of operation - logical :: bcast_filenames = .false. - character(len=*), parameter :: rpprefix = 'rpointer.' - character(len=*), parameter :: rpfile = rpprefix//'esp' - character(len=*), parameter :: nullstr = 'undefined' - character(len=*), parameter :: null_mode = 'NULL' ! Take no action - character(len=*), parameter :: noop_mode = 'NOCHANGE' ! Do not change data - character(len=*), parameter :: test_mode = 'DATATEST' ! Modify restart data - - integer, parameter :: NOERR = 0 - integer, parameter :: BAD_ID = -1 - integer, parameter :: NO_READ = -5 - integer, parameter :: NO_WRITE = -6 - - type(shr_strdata_type) :: SDESP - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !============================================================================ - subroutine desp_comp_init(EClock, espid, mpicom_in, phase, read_restart, & - inst_name_in, inst_index_in, inst_suffix_in, esp_present, esp_prognostic) - - ! !DESCRIPTION: initialize data esp model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock), intent(in) :: EClock - integer, intent(in) :: espid - integer, intent(in) :: mpicom_in - integer, intent(in) :: phase - logical, intent(in) :: read_restart - integer, intent(in) :: inst_index_in ! (e.g., 1) - character(len=16), intent(in) :: inst_name_in ! (e.g. "exp_0001") - character(len=16), intent(in) :: inst_suffix_in ! (e.g. "_0001" or "") - logical, intent(out) :: esp_present ! flag - logical, intent(out) :: esp_prognostic ! flag - - !--- local variables --- - integer(IN) :: ierr ! error code - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: nunit ! unit number - - character(len=CL) :: fileName ! generic file name - - character(len=CL) :: rest_file ! restart filename - character(len=CL) :: restfilm ! rest_file namelist entry - logical :: exists ! filename existance - integer(IN) :: nu ! unit number - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: stepno ! step number - character(len=CL) :: calendar ! calendar type - - !----- define namelist ----- - namelist / desp_nml / & - desp_mode, restfilm - - !--- formats --- - character(len=*), parameter :: subName = "(desp_comp_init) " - character(len=*), parameter :: F00 = "('"//subName//"',8a)" - character(len=*), parameter :: F01 = "('"//subName//"',a,5i8)" - character(len=*), parameter :: F04 = "('"//subName//"',2(a,i0))" - !------------------------------------------------------------------------------- - - call t_startf('DESP_INIT') - - !------------------------------------------------------------------------ - ! Initialize module variables from inputs - !------------------------------------------------------------------------ - COMPID = espid - mpicom = mpicom_in - inst_name = inst_name_in - inst_index = inst_index_in - inst_suffix = inst_suffix_in - - !------------------------------------------------------------------------ - ! Initialize output variables - !------------------------------------------------------------------------ - esp_present = .false. - esp_prognostic = .false. - - if (phase == 1) then - ! Determine communicator groups and sizes - call mpi_comm_rank(mpicom, my_task, ierr) - call mpi_comm_size(mpicom, npes, ierr) - call mpi_comm_rank(global_comm, global_mype, ierr) - call mpi_comm_size(global_comm, global_numpes, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('esp_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - end if - - !------------------------------------------------------------------------ - ! Reset shr logging to my log file - !------------------------------------------------------------------------ - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !------------------------------------------------------------------------ - ! Read desp_in - !------------------------------------------------------------------------ - - call t_startf('desp_readnml') - - filename = "desp_in"//trim(inst_suffix) - desp_mode = trim(nullstr) - restfilm = trim(nullstr) - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=desp_nml,iostat=ierr) - close(nunit) - - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00) 'restfilm = ',trim(restfilm) - write(logunit,F01) 'inst_index = ',inst_index - write(logunit,F00) 'inst_name = ',trim(inst_name) - write(logunit,F00) 'inst_suffix = ',trim(inst_suffix) - write(logunit,F04) 'root global rank ',global_mype,' of ',global_numpes - call shr_sys_flush(logunit) - end if - call shr_mpi_bcast(desp_mode, mpicom, 'desp_mode') - call shr_mpi_bcast(restfilm, mpicom, 'restfilm') - - rest_file = trim(restfilm) - loglevel = 1 ! could be shrloglev - call shr_file_setLogLevel(loglevel) - - !------------------------------------------------------------------------ - ! Initialize PIO - !------------------------------------------------------------------------ - - call shr_strdata_pioinit(SDESP, COMPID) - - !------------------------------------------------------------------------ - ! Validate mode - !------------------------------------------------------------------------ - - if ( (trim(desp_mode) == null_mode) .or. & - (trim(desp_mode) == noop_mode) .or. & - (trim(desp_mode) == test_mode)) then - - if (my_task == master_task) then - write(logunit,F00) 'desp mode = ',trim(desp_mode) - call shr_sys_flush(logunit) - end if - if (trim(desp_mode) /= null_mode) then - esp_present = .true. - end if - else - write(logunit,F00) ' ERROR illegal esp mode = ',trim(desp_mode) - call shr_sys_abort(subName//' Illegal ESP mode = '//trim(desp_mode)) - end if - - call t_stopf('desp_readnml') - - !------------------------------------------------------------------------ - ! Read restart - !------------------------------------------------------------------------ - - if (read_restart) then - if (trim(rest_file) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filename from rpointer = ',trim(rpfile) - call shr_sys_flush(logunit) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (exists) then - nu = shr_file_getUnit() - open(nu, file=trim(rpfile)//trim(inst_suffix), form='formatted') - read(nu,'(a)') rest_file - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file),exist=exists) - endif - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filename from namelist ' - call shr_sys_flush(logunit) - inquire(file=trim(rest_file),exist=exists) - end if - end if - - call shr_mpi_bcast(exists, mpicom, 'exists') - - if (exists) then - if (my_task == master_task) then - write(logunit,F00) ' reading ',trim(rest_file) - end if - ! Read any restart info here? --XXgoldyXX - else - if (my_task == master_task) then - write(logunit,F00) ' file not found, skipping ',trim(rest_file) - end if - end if - call shr_sys_flush(logunit) - end if - - if (read_restart) then - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, calendar=calendar ) - else - call seq_timemgr_EClockGetData( EClock, stepno=stepno ) - end if - else - call shr_sys_abort(trim(subname)//' DESP initialization only has phase 1') - end if - - !-------------------------------------------------------------------------- - ! Reset shr logging to original values - !-------------------------------------------------------------------------- - - if (my_task == master_task) then - write(logunit,F00) 'desp_comp_init done' - end if - call shr_sys_flush(logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - call t_stopf('DESP_INIT') - - end subroutine desp_comp_init - - !============================================================================= - subroutine desp_comp_run(EClock, case_name, pause_sig, atm_resume, & - lnd_resume, rof_resume, ocn_resume, ice_resume, glc_resume, & - wav_resume, cpl_resume) - - ! !DESCRIPTION: run method for data esp model - - use seq_comm_mct, only: num_inst_atm, num_inst_lnd, num_inst_rof - use seq_comm_mct, only: num_inst_ocn, num_inst_ice, num_inst_glc - use seq_comm_mct, only: num_inst_wav - use esp_utils, only: esp_pio_modify_variable - use shr_cal_mod, only: shr_cal_ymdtod2string - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(in) :: EClock - character(len=*), intent(in) :: case_name - logical, intent(in) :: pause_sig(desp_num_comps) - character(len=CL), pointer :: atm_resume(:) - character(len=CL), pointer :: lnd_resume(:) - character(len=CL), pointer :: rof_resume(:) - character(len=CL), pointer :: ocn_resume(:) - character(len=CL), pointer :: ice_resume(:) - character(len=CL), pointer :: glc_resume(:) - character(len=CL), pointer :: wav_resume(:) - character(len=CL), pointer :: cpl_resume(:) - - !--- local --- - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: yy,mm,dd ! year month day - integer(IN) :: ind ! loop index - integer(IN) :: inst ! loop index - integer(IN) :: errcode ! error return code - character(len=CL) :: rfilename ! Restart filenames - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: idt ! integer timestep - real(R8) :: dt ! timestep - logical :: write_restart ! restart now - logical :: var_found ! var on file - character(len=CL) :: rest_file ! restart_file - integer(IN) :: nu ! unit number - integer(IN) :: stepno ! step number - character(len=CL) :: calendar ! calendar type - character(len=CS) :: varname - character(len=18) :: date_str - character(len=*), parameter :: subName = "(desp_comp_run) " - character(len=*), parameter :: F00 = "('"//subName//"',8a)" - character(len=*), parameter :: F04 = "('"//subName//"',2a,2i8,'s')" - !-------------------------------------------------------------------------- - - call t_startf('DESP_RUN') - - call t_startf('desp_run1') - - !-------------------------------------------------------------------------- - ! Reset shr logging to my log file - !-------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - call shr_file_setLogLevel(loglevel) - - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, curr_yr=yy, curr_mon=mm, curr_day=dd) - call seq_timemgr_EClockGetData( EClock, stepno=stepno, dtime=idt) - call seq_timemgr_EClockGetData( EClock, calendar=calendar) - dt = idt * 1.0_r8 - write_restart = seq_timemgr_RestartAlarmIsOn(EClock) - - call t_stopf('desp_run1') - - !-------------------- - ! ADVANCE ESP - !-------------------- - - call t_barrierf('desp_BARRIER',mpicom) - call t_startf('desp') - - if (trim(desp_mode) /= null_mode) then - call t_startf('desp_strdata_advance') - call shr_strdata_advance(SDESP, currentYMD, currentTOD, mpicom, 'desp') - call t_stopf('desp_strdata_advance') - end if - - call t_startf('desp_mode') - - if (.not. ANY(pause_sig)) then - if ( (my_task == master_task) .and. & - ((loglevel > 1) .or. (trim(desp_mode) == test_mode))) then - write(logunit, '(2a,i6.4,"-",i2.2,"-",i2.2,"-",i5.5)') subname, & - 'WARNING: No pause signals found at ',yy,mm,dd,CurrentTOD - end if - end if - - errcode = NOERR - ! Find the active components and their restart files - ! Note hard-coded variable names are just for testing. This could be - ! changed if this feature comes to regular use - do ind = 1, desp_num_comps - if (pause_sig(ind)) then - select case (comp_names(ind)) - case('atm') - rfilename = atm_resume(inst_index) - varname = 'T' - case('lnd') - rfilename = lnd_resume(inst_index) - varname = 'T' - case('ice') - rfilename = ice_resume(inst_index) - varname = 'T' - case('ocn') - rfilename = ocn_resume(inst_index) - varname = 'PSURF_CUR' - case('glc') - rfilename = glc_resume(inst_index) - varname = 'T' - case('rof') - rfilename = rof_resume(inst_index) - varname = 'T' - case('wav') - rfilename = wav_resume(inst_index) - varname = 'T' - case('drv') - ! The driver is special, there may only be one (not multi-driver) - rfilename = cpl_resume(min(inst_index,size(cpl_resume,1))) - varname = 'x2oacc_ox_Foxx_swnet' - case default - call shr_sys_abort(subname//'Unrecognized ind') - end select - ! Just die on errors for now - select case (errcode) - case(NO_READ) - call shr_sys_abort(subname//'No restart read access for '//comp_names(ind)) - case(NO_WRITE) - call shr_sys_abort(subname//'No restart write access for '//comp_names(ind)) - ! No default case needed, just fall through - end select - select case (trim(desp_mode)) - case(noop_mode) - ! Find the correct restart files but do not change them. - if ((my_task == master_task) .and. (loglevel > 0)) then - write(logunit, *) subname, 'Found restart file ',trim(rfilename) - end if - case(test_mode) - ! Find the correct restart files and 'tweak' them - if ((my_task == master_task) .and. (loglevel > 0)) then - write(logunit, *) subname, 'Found restart file ',trim(rfilename) - end if - call esp_pio_modify_variable(COMPID, mpicom, rfilename, varname, var_found) - if (.not. var_found) then - call shr_sys_abort(subname//'Variable, '//trim(varname)//', not found on '//rfilename) - end if - case (null_mode) - ! Since DESP is not 'present' for this mode, we should not get here. - call shr_sys_abort(subname//'DESP should not run in "NULL" mode') - end select - end if - end do - - call t_stopf('desp_mode') - - if (write_restart) then - call t_startf('desp_restart') - call shr_cal_ymdtod2string(date_str, yy,mm,dd,currentTOD) - write(rest_file,"(6a)") & - trim(case_name), '.desp',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - close(nu) - call shr_file_freeUnit(nu) - end if - - if (my_task == master_task) then - write(logunit,F04) ' writing ',trim(rest_file), currentYMD, currentTOD - end if - ! Write any restart info here? -- XXgoldyXX - call shr_sys_flush(logunit) - call t_stopf('desp_restart') - - end if - - call t_stopf('desp') - - !-------------------------------------------------------------------------- - ! Log output for model date - ! Reset shr logging to original values - !-------------------------------------------------------------------------- - - call t_startf('desp_run2') - if ((loglevel > 1) .and. (my_task == master_task)) then - write(logunit,F04) trim(myModelName),': model date ', CurrentYMD,CurrentTOD - call shr_sys_flush(logunit) - end if - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - call t_stopf('desp_run2') - - call t_stopf('DESP_RUN') - - end subroutine desp_comp_run - - !============================================================================ - subroutine desp_comp_final() - - ! !DESCRIPTION: finalize method for data esp model - !--- formats --- - character(len=*), parameter :: subName = "(desp_comp_final) " - character(len=*), parameter :: F00 = "('"//subName//"',8a)" - character(len=*), parameter :: F91 = "('"//subName//"',73('-'))" - !-------------------------------------------------------------------------- - - call t_startf('DESP_FINAL') - - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) trim(myModelName),': end of main integration loop' - write(logunit,F91) - end if - - call t_stopf('DESP_FINAL') - - end subroutine desp_comp_final - - !============================================================================ - logical function desp_bcast_res_files(oneletterid) - character(len=1), intent(in) :: oneletterid - - desp_bcast_res_files = bcast_filenames - end function desp_bcast_res_files - - !============================================================================ - -end module desp_comp_mod diff --git a/src/components/data_comps/desp/esp_utils.F90 b/src/components/data_comps/desp/esp_utils.F90 deleted file mode 100644 index c25846d7417..00000000000 --- a/src/components/data_comps/desp/esp_utils.F90 +++ /dev/null @@ -1,345 +0,0 @@ -module esp_utils - - ! !USES: - - use shr_kind_mod, only: r8=>shr_kind_r8, CL=>SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - - implicit none - private - - public :: esp_pio_modify_variable - -CONTAINS - - subroutine esp_modify_array(array, pertlim, absolute) - real(r8), intent(inout) :: array(:) - real(r8), intent(in) :: pertlim - logical, intent(in) :: absolute - - integer :: ind - integer :: rndm_seed_sz - integer, allocatable :: rndm_seed(:) - real(r8) :: pertval - - call random_seed(size=rndm_seed_sz) - allocate(rndm_seed(rndm_seed_sz)) - rndm_seed = 8.53973422267357_r8 - call random_seed(put=rndm_seed) - do ind = 1, size(array) - call random_number(pertval) - pertval = 2._r8 * pertlim * (0.5_r8 - pertval) - if (absolute) then - array(ind) = pertval - else - array(ind) = array(ind) * (1.0_r8 + pertval) - end if - end do - - end subroutine esp_modify_array - - subroutine esp_pio_handle_error(ierr, errorstr) - use pio, only: pio_noerr - - ! Dummy arguments - integer, intent(in) :: ierr - character(len=*), intent(in) :: errorstr - - ! Local variables - character(len=256) :: errormsg - - if (ierr /= PIO_NOERR) then - write(errormsg, '(a,i6,2a)') '(PIO:', ierr, ') ', trim(errorstr) - call shr_sys_abort(errormsg) - end if - - end subroutine esp_pio_handle_error - - subroutine esp_pio_openfile(file, fname, piosys, iotype, mode, iulog) - use pio, only: pio_openfile, file_desc_t, pio_iotask_rank, iosystem_desc_t - use pio, only: PIO_NOWRITE, PIO_WRITE, PIO_NOERR - - type(file_desc_t), target, intent(inout) :: file - character(len=*), intent(in) :: fname - type(iosystem_desc_t), pointer :: piosys - integer, intent(in) :: iotype - integer, intent(in) :: mode - integer, optional, intent(in) :: iulog - - integer :: ierr - character(len=CL) :: errmsg - character(len=*), parameter :: subname = 'ESP_PIO_OPENFILE: ' - - ierr = pio_openfile(piosys, file, iotype, fname, mode) - - if(ierr/= PIO_NOERR) then - if (mode == PIO_nowrite) then - write(errmsg, '(3a,i0)') 'Failed to open ',trim(fname),' to read, error = ',ierr - call shr_sys_abort(subname//trim(errmsg)) - else if (mode == PIO_write) then - write(errmsg, '(3a,i0)') 'Failed to open ',trim(fname),' to write, error = ',ierr - call shr_sys_abort(subname//trim(errmsg)) - else - write(errmsg, '(3a,i0,a,i0)') 'Failed to open ',trim(fname),' with mode = ',mode,', error = ',ierr - call shr_sys_abort(subname//trim(errmsg)) - end if - else if((pio_iotask_rank(piosys) == 0) .and. present(iulog)) then - write(iulog,'(2a)') 'Opened existing file, ', trim(fname) - call shr_sys_flush(iulog) - end if - - end subroutine esp_pio_openfile - - subroutine esp_pio_closefile(file) - - use pio, only : pio_closefile, file_desc_t - - type(file_desc_t), intent(inout), target :: file - - call pio_closefile(file) - - end subroutine esp_pio_closefile - - logical function esp_pio_fileexists(fname, piosys, iotype) - use pio, only: pio_openfile, file_desc_t, iosystem_desc_t - use pio, only: pio_seterrorhandling, PIO_BCAST_ERROR - use pio, only: pio_closefile, PIO_NOERR, PIO_NOWRITE - - character(len=*), intent(in) :: fname - type(iosystem_desc_t), pointer :: piosys - integer, intent(in) :: iotype - - type(file_desc_t) :: file - integer :: ierr - integer :: err_handling - - ! We will handle errors for this routine - - call pio_seterrorhandling(piosys, PIO_BCAST_ERROR, err_handling) - - ierr = pio_openfile(piosys, file, iotype, fname, PIO_NOWRITE) - esp_pio_fileexists = (ierr == PIO_NOERR) - if (esp_pio_fileexists) then - call pio_closefile(file) - end if - - ! Back to whatever error handling was running before this routine - call pio_seterrorhandling(File, err_handling) - - end function esp_pio_fileexists - - !----------------------------------------------------------------------- - ! - ! esp_pio_var_info: Retrieve variable properties - ! - !----------------------------------------------------------------------- - subroutine esp_pio_var_info(ncid, varid, ndims, dimids, dimlens, dimnames, varname, unlimDimID) - use pio, only: file_desc_t, var_desc_t - use pio, only: PIO_inq_varndims, PIO_inq_vardimid, PIO_inq_dimlen - use pio, only: PIO_inquire, PIO_inq_dimname - use pio, only: PIO_seterrorhandling, PIO_BCAST_ERROR - - - ! Dummy arguments - type(file_desc_t), intent(inout) :: ncid - type(var_desc_t), intent(in) :: varid - integer, intent(out) :: ndims - integer, intent(out) :: dimids(:) - integer, intent(out) :: dimlens(:) - character(len=*), optional, intent(out) :: dimnames(:) - integer, optional, intent(out) :: unlimDimID - character(len=*), optional, intent(in) :: varname - - ! Local variables - integer :: ret ! PIO return value - integer :: i - integer :: err_handling - character(len=128) :: errsuff - !----------------------------------------------------------------------- - ! We will handle errors for this routine - - call PIO_seterrorhandling(ncid, PIO_BCAST_ERROR, err_handling) - - dimids = -1 - ndims = 0 - dimlens = 0 - - if (present(varname)) then - errsuff = ' for '//trim(varname) - else - errsuff = '' - end if - ! Check dimensions - ret = PIO_inq_varndims(ncid, varid, ndims) - call esp_pio_handle_error(ret, 'ESP_PIO_VAR_INFO: Error with num dimensions') - if (size(dimids) < ndims) then - call shr_sys_abort('ESP_PIO_VAR_INFO: dimids too small'//trim(errsuff)) - end if - ret = PIO_inq_vardimid(ncid, varid, dimids(1:ndims)) - call esp_pio_handle_error(ret, 'ESP_PIO_VAR_INFO: Error with inq dim ids'//trim(errsuff)) - if (size(dimlens) < ndims) then - call shr_sys_abort('ESP_PIO_VAR_INFO: dimlens too small'//trim(errsuff)) - end if - do i = 1, ndims - ret = PIO_inq_dimlen(ncid, dimids(i), dimlens(i)) - call esp_pio_handle_error(ret, 'ESP_PIO_VAR_INFO: Error with inq dimlens') - if (present(dimnames)) then - ret = PIO_inq_dimname(ncid, dimids(i), dimnames(i)) - call esp_pio_handle_error(ret, 'ESP_PIO_VAR_INFO: Error with inq dimnames') - end if - end do - if (present(unlimDimID)) then - ret = PIO_inquire(ncid, unlimitedDimID=unlimDimID) - call esp_pio_handle_error(ret, 'ESP_PIO_VAR_INFO: Error with inquire') - end if - call PIO_seterrorhandling(ncid, err_handling) - - end subroutine esp_pio_var_info - - subroutine esp_pio_find_var(ncid, varname, varid, found) - use pio, only: file_desc_t, var_desc_t - use pio, only: pio_inq_varid, pio_noerr - use pio, only: PIO_seterrorhandling, PIO_BCAST_ERROR - - ! Dummy arguments - type(file_desc_t), intent(inout) :: ncid - character(len=*), intent(in) :: varname - type(var_desc_t), intent(out) :: varid - logical, intent(out) :: found - - ! Local variables - integer :: ret ! PIO return value - integer :: err_handling - - !----------------------------------------------------------------------- - ! We will handle errors for this routine - - call PIO_seterrorhandling(ncid, PIO_BCAST_ERROR, err_handling) - ret = PIO_inq_varid(ncid, trim(varname), varid) - found = (ret == PIO_NOERR) - call PIO_seterrorhandling(ncid, err_handling) - - end subroutine esp_pio_find_var - - subroutine esp_pio_newdecomp(iodesc, piosys, iotype, dims, dof, dtype) - use pio, only: pio_initdecomp, pio_offset_kind, pio_iotype_pnetcdf - use pio, only: io_desc_t, iosystem_desc_t, PIO_REARR_SUBSET, PIO_REARR_BOX - - type(io_desc_t) :: iodesc - type(iosystem_desc_t), pointer :: piosys - integer, intent(in) :: iotype - integer, intent(in) :: dims(:) - integer(kind=PIO_OFFSET_KIND), intent(in) :: dof(:) - integer, intent(in) :: dtype - - integer :: pio_rearranger - - if(iotype == pio_iotype_pnetcdf) then - pio_rearranger = PIO_REARR_SUBSET - else - pio_rearranger = PIO_REARR_BOX - endif - - call pio_initdecomp(piosys, dtype, dims, dof, iodesc, rearr=pio_rearranger) - - end subroutine esp_pio_newdecomp - - subroutine esp_pio_modify_variable(id, comm, filename, varname, found) - use mpi, only: MPI_LOGICAL, MPI_LAND - use shr_mpi_mod, only: shr_mpi_commsize, shr_mpi_commrank - use shr_pio_mod, only: shr_pio_getiosys, shr_pio_getiotype - use pio, only: PIO_write, file_desc_t, pio_offset_kind - use pio, only: io_desc_t, var_desc_t, pio_freedecomp, PIO_DOUBLE - use pio, only: pio_read_darray, pio_write_darray, iosystem_desc_t - - integer, intent(in) :: id - integer, intent(in) :: comm - character(len=*), intent(in) :: filename - character(len=*), intent(in) :: varname - logical, intent(out) :: found - - type(file_desc_t) :: file - integer :: ierr - integer :: mode - integer :: pio_iotype - type(var_desc_t) :: varid - integer :: ndims - integer :: dimids(7) - integer :: dimlens(7) - integer :: totalsize, mysize - integer :: i - integer :: uid - integer :: npes, iam - logical :: equiv, all_equiv - integer :: offset - real(r8), allocatable :: varr(:) - integer(pio_offset_kind), allocatable :: ldof(:) - type(io_desc_t) :: iodesc - type(iosystem_desc_t), pointer :: pio_subsystem - character(len=*), parameter :: subname = 'ESP_PIO_MODIFY_VARIABLE: ' - - pio_subsystem => shr_pio_getiosys(id) - pio_iotype = shr_pio_getiotype(id) - mode = PIO_WRITE - - call esp_pio_openfile(file, filename, pio_subsystem, pio_iotype, mode) - ! Find the variable - call esp_pio_find_var(file, varname, varid, found) - if (found) then - ! Check dimensions - call esp_pio_var_info(file, varid, ndims, dimids, dimlens, & - varname=varname, unlimDimID=uid) - ! Skip the unlimited dimension if it is in varname - ierr = 1 - do i = 1, ndims - if (i > ierr) then - dimids(ierr) = dimids(i) - dimlens(ierr) = dimlens(i) - end if - if (dimids(i) /= uid) then - ierr = ierr + 1 - end if - end do - ndims = ndims - COUNT(dimids(1:ndims) == uid) - ! Calculate global and local array sizes - totalsize = PRODUCT(dimlens(1:ndims)) - call shr_mpi_commsize(comm, npes) - call shr_mpi_commrank(comm, iam) - mysize = totalsize / npes - if (iam < MOD(totalsize, npes)) then - mysize = mysize + 1 - end if - allocate(varr(mysize)) - allocate(ldof(mysize)) - offset = (iam * (totalsize / npes)) + MIN(iam, MOD(totalsize, npes)) - do i = 1, mysize - ldof(i) = i + offset - end do - call esp_pio_newdecomp(iodesc, pio_subsystem, pio_iotype, & - dimlens(1:ndims), ldof, PIO_DOUBLE) - call pio_read_darray(file, varid, iodesc, varr, ierr) - call esp_pio_handle_error(ierr, subname//'Error reading variable '//trim(varname)) - ! See if we have a constant zero value - equiv = ALL(varr == 0.0_r8) - call mpi_allreduce(equiv, all_equiv, 1, MPI_LOGICAL, MPI_LAND, comm, ierr) - ! Modify and write back to file - call esp_modify_array(varr, 1.0e-12_r8, all_equiv) - call pio_write_darray(file, varid, iodesc, varr, ierr) - call esp_pio_handle_error(ierr, subname//'Error writing variable '//trim(varname)) - endif - - call esp_pio_closefile(file) - if (found) then - ! Cleanup - call pio_freedecomp(pio_subsystem, iodesc) - end if - if (allocated(varr)) then - deallocate(varr) - end if - if (allocated(ldof)) then - deallocate(ldof) - end if - - end subroutine esp_pio_modify_variable - -end module esp_utils diff --git a/src/components/data_comps/desp/mct/esp_comp_mct.F90 b/src/components/data_comps/desp/mct/esp_comp_mct.F90 deleted file mode 100644 index 16494c054e0..00000000000 --- a/src/components/data_comps/desp/mct/esp_comp_mct.F90 +++ /dev/null @@ -1,203 +0,0 @@ -module esp_comp_mct - - ! !USES: - - use esmf, only: ESMF_Clock - use mct_mod, only: mct_aVect - use seq_cdata_mod, only: seq_cdata - use seq_infodata_mod, only: seq_infodata_type - use desp_comp_mod, only: desp_num_comps - - ! !PUBLIC TYPES: - implicit none - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: esp_init_mct - public :: esp_run_mct - public :: esp_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - integer :: comp_index(desp_num_comps) - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !============================================================================ - !BOP ======================================================================== - ! - ! !IROUTINE: esp_init_mct - ! - ! !DESCRIPTION: - ! initialize data esp model - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: --------------------------------------------------------------- - - subroutine esp_init_mct(EClock, cdata, x2a, a2x, NLFilename) - use desp_comp_mod, only: desp_comp_init, comp_names - use seq_cdata_mod, only: seq_cdata_setptrs - use seq_comm_mct, only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData - use seq_timemgr_mod, only: seq_timemgr_pause_component_index - - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(inout) :: EClock - type(seq_cdata), intent(inout) :: cdata - type(mct_aVect), intent(inout) :: x2a, a2x ! Not used - character(len=*), optional, intent(in) :: NLFilename ! Not used - - !EOP - - integer :: inst_index ! (e.g., 1) - character(len=16) :: inst_name ! (e.g. "exp_0001") - character(len=16) :: inst_suffix ! (e.g. "_0001" or "") - integer :: ind - integer :: ESPID - integer :: mpicom_esp - integer :: esp_phase - logical :: esp_present - logical :: esp_prognostic - logical :: read_restart - type(seq_infodata_type), pointer :: infodata - character(len=*), parameter :: subName = "(esp_init_mct) " - !-------------------------------------------------------------------------- - - ! Retrieve info for init method - call seq_cdata_setptrs(cdata, ID=ESPID, mpicom=mpicom_esp, & - infodata=infodata) - call seq_infodata_getData(infodata, esp_phase=esp_phase, & - read_restart=read_restart) - - inst_name = seq_comm_name(ESPID) - inst_index = seq_comm_inst(ESPID) - inst_suffix = seq_comm_suffix(ESPID) - - call desp_comp_init(EClock, ESPID, mpicom_esp, esp_phase, read_restart, & - inst_name, inst_index, inst_suffix, esp_present, esp_prognostic) - - ! Set the ESP model state - call seq_infodata_PutData(infodata, & - esp_present=esp_present, esp_prognostic=esp_prognostic) - - ! Retrieve component indices from the time manager - do ind = 1, desp_num_comps - comp_index(ind) = seq_timemgr_pause_component_index(comp_names(ind)) - end do - - end subroutine esp_init_mct - - !============================================================================ - !BOP ======================================================================== - ! - ! !IROUTINE: esp_run_mct - ! - ! !DESCRIPTION: - ! run method for data esp model - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine esp_run_mct( EClock, cdata, x2a, a2x) - use shr_kind_mod, only: CL=>SHR_KIND_CL - use seq_comm_mct, only: num_inst_atm, num_inst_lnd, num_inst_rof - use seq_comm_mct, only: num_inst_ocn, num_inst_ice, num_inst_glc - use seq_comm_mct, only: num_inst_wav, num_inst_max, num_inst_driver - use desp_comp_mod, only: desp_comp_run, desp_bcast_res_files - use seq_cdata_mod, only: seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData - use seq_pauseresume_mod, only: seq_resume_get_files, seq_resume_store_comp - use seq_timemgr_mod, only: seq_timemgr_pause_component_active - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(inout) :: EClock - type(seq_cdata), intent(inout) :: cdata - type(mct_aVect), intent(inout) :: x2a ! Not used - type(mct_aVect), intent(inout) :: a2x ! Not used - - !EOP - - integer :: ind - type(seq_infodata_type), pointer :: infodata - logical :: pause_sig(desp_num_comps) - character(len=CL), pointer :: atm_resume(:) - character(len=CL), pointer :: lnd_resume(:) - character(len=CL), pointer :: rof_resume(:) - character(len=CL), pointer :: ocn_resume(:) - character(len=CL), pointer :: ice_resume(:) - character(len=CL), pointer :: glc_resume(:) - character(len=CL), pointer :: wav_resume(:) - character(len=CL), pointer :: cpl_resume(:) - character(len=CL) :: case_name - character(len=*), parameter :: subName = "(esp_run_mct) " - !-------------------------------------------------------------------------- - - ! Grab infodata and case name - call seq_cdata_setptrs(cdata, infodata=infodata) - call seq_infodata_GetData(infodata, case_name=case_name) - ! Grab any active resume filenames - call seq_resume_get_files('a', atm_resume, bcast=desp_bcast_res_files('a')) - call seq_resume_get_files('l', lnd_resume, bcast=desp_bcast_res_files('l')) - call seq_resume_get_files('o', ocn_resume, bcast=desp_bcast_res_files('o')) - call seq_resume_get_files('i', ice_resume, bcast=desp_bcast_res_files('i')) - call seq_resume_get_files('r', rof_resume, bcast=desp_bcast_res_files('r')) - call seq_resume_get_files('g', glc_resume, bcast=desp_bcast_res_files('g')) - call seq_resume_get_files('w', wav_resume, bcast=desp_bcast_res_files('w')) - call seq_resume_get_files('x', cpl_resume, bcast=desp_bcast_res_files('x')) - ! Find out if we should be running - do ind = 1, desp_num_comps - pause_sig(ind) = seq_timemgr_pause_component_active(comp_index(ind)) - end do - call desp_comp_run(EClock, case_name, pause_sig, atm_resume, lnd_resume, & - rof_resume, ocn_resume, ice_resume, glc_resume, wav_resume, & - cpl_resume) - - end subroutine esp_run_mct - - !============================================================================ - !BOP ======================================================================== - ! - ! !IROUTINE: esp_final_mct - ! - ! !DESCRIPTION: - ! finalize method for data esp model - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: --------------------------------------------------------------- - ! - subroutine esp_final_mct(EClock, cdata, x2d, d2x) - use desp_comp_mod, only: desp_comp_final - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> data - type(mct_aVect) ,intent(inout) :: d2x ! data -> driver - - !EOP - - !--- formats --- - character(*), parameter :: subName = "(esp_final_mct) " - !-------------------------------------------------------------------------- - - call desp_comp_final() - - end subroutine esp_final_mct - !============================================================================ - !============================================================================ - - -end module esp_comp_mct diff --git a/src/components/data_comps/dice/cime_config/buildlib b/src/components/data_comps/dice/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/data_comps/dice/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/data_comps/dice/cime_config/buildnml b/src/components/data_comps/dice/cime_config/buildnml deleted file mode 100755 index 3b0b3cfdb01..00000000000 --- a/src/components/data_comps/dice/cime_config/buildnml +++ /dev/null @@ -1,219 +0,0 @@ -#!/usr/bin/env python - -"""Namelist creator for CIME's data ice model. -""" - -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position - -import os, sys, glob - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.case import Case -from CIME.XML.files import Files -from CIME.nmlgen import NamelistGenerator -from CIME.utils import expect, safe_copy -from CIME.buildnml import create_namelist_infile, parse_input - -logger = logging.getLogger(__name__) - -# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements -#################################################################################### -def _create_namelists(case, confdir, inst_string, infile, nmlgen, data_list_path): -#################################################################################### - """Write out the namelist for this component. - - Most arguments are the same as those for `NamelistGenerator`. The - `inst_string` argument is used as a suffix to distinguish files for - different instances. The `confdir` argument is used to specify the directory - in which output files will be placed. - """ - - #---------------------------------------------------- - # Get a bunch of information from the case. - #---------------------------------------------------- - ice_domain_file = case.get_value("ICE_DOMAIN_FILE") - ice_domain_path = case.get_value("ICE_DOMAIN_PATH") - dice_mode = case.get_value("DICE_MODE") - ice_grid = case.get_value("ICE_GRID") - sstice_stream = case.get_value("SSTICE_STREAM") - - #---------------------------------------------------- - # Check for incompatible options. - #---------------------------------------------------- - expect(ice_grid != "null", - "ICE_GRID cannot be null") - expect(dice_mode != "NULL", - "DICE_MODE cannot be NULL") - - #---------------------------------------------------- - # Log some settings. - #---------------------------------------------------- - logger.debug("DICE mode is {}".format(dice_mode)) - logger.debug("DICE grid is {}".format(ice_grid)) - - #---------------------------------------------------- - # Create configuration information. - #---------------------------------------------------- - config = {} - config['ice_grid'] = ice_grid - config['dice_mode'] = dice_mode - config['sstice_stream'] = sstice_stream - - #---------------------------------------------------- - # Initialize namelist defaults - #---------------------------------------------------- - nmlgen.init_defaults(infile, config) - - #---------------------------------------------------- - # Construct the list of streams. - #---------------------------------------------------- - streams = nmlgen.get_streams() - - #---------------------------------------------------- - # For each stream, create stream text file and update - # shr_strdata_nml group and input data list. - #---------------------------------------------------- - for stream in streams: - - # Ignore null values. - if stream is None or stream in ("NULL", ""): - continue - - inst_stream = stream + inst_string - logger.debug("DICE stream is {}".format(inst_stream)) - stream_path = os.path.join(confdir, "dice.streams.txt." + inst_stream) - user_stream_path = os.path.join(case.get_case_root(), - "user_dice.streams.txt." + inst_stream) - - # Use the user's stream file, or create one if necessary. - if os.path.exists(user_stream_path): - safe_copy(user_stream_path, stream_path) - config['stream'] = stream - nmlgen.update_shr_strdata_nml(config, stream, stream_path) - else: - nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) - - #---------------------------------------------------- - # Create `shr_strdata_nml` namelist group. - #---------------------------------------------------- - # set per-stream variables - nmlgen.create_shr_strdata_nml() - - # set variables that are not per-stream - if ice_domain_file != "UNSET": - full_domain_path = os.path.join(ice_domain_path, ice_domain_file) - nmlgen.add_default("domainfile", value=full_domain_path) - else: - nmlgen.add_default("domainfile", value='null') - - #---------------------------------------------------- - # Finally, write out all the namelists. - #---------------------------------------------------- - namelist_file = os.path.join(confdir, "dice_in") - nmlgen.write_output_file(namelist_file, data_list_path, groups=['dice_nml','shr_strdata_nml']) - -############################################################################### -def buildnml(case, caseroot, compname): -############################################################################### - if compname != "dice": - raise AttributeError - - rundir = case.get_value("RUNDIR") - ninst = case.get_value("NINST_ICE") - if ninst is None: - ninst = case.get_value("NINST") - - # Determine configuration directory - confdir = os.path.join(caseroot,"Buildconf",compname + "conf") - if not os.path.isdir(confdir): - os.makedirs(confdir) - - #---------------------------------------------------- - # Construct the namelist generator - #---------------------------------------------------- - # determine directory for user modified namelist_definitions.xml and namelist_defaults.xml - user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) - expect (os.path.isdir(user_xml_dir), - "user_xml_dir {} does not exist ".format(user_xml_dir)) - - # NOTE: User definition *replaces* existing definition. - files = Files() - definition_file = [files.get_value("NAMELIST_DEFINITION_FILE", {"component":"dice"})] - - user_definition = os.path.join(user_xml_dir, "namelist_definition_dice.xml") - if os.path.isfile(user_definition): - definition_file = [user_definition] - for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) - - # Create the namelist generator object - independent of instance - nmlgen = NamelistGenerator(case, definition_file, files=files) - - #---------------------------------------------------- - # Clear out old data. - #---------------------------------------------------- - data_list_path = os.path.join(case.get_case_root(), "Buildconf", - "dice.input_data_list") - if os.path.exists(data_list_path): - os.remove(data_list_path) - - #---------------------------------------------------- - # Loop over instances - #---------------------------------------------------- - for inst_counter in range(1, ninst+1): - # determine instance string - inst_string = "" - if ninst > 1: - inst_string = '_' + "{:04d}".format(inst_counter) - - # If multi-instance case does not have restart file, use - # single-case restart for each instance - rpointer = "rpointer." + compname - if (os.path.isfile(os.path.join(rundir,rpointer)) and - (not os.path.isfile(os.path.join(rundir,rpointer + inst_string)))): - safe_copy(os.path.join(rundir, rpointer), - os.path.join(rundir, rpointer + inst_string)) - - inst_string_label = inst_string - if not inst_string_label: - inst_string_label = "\"\"" - - # create namelist output infile using user_nl_file as input - user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) - expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file {} ".format(user_nl_file)) - infile = os.path.join(confdir, "namelist_infile") - create_namelist_infile(case, user_nl_file, infile) - namelist_infile = [infile] - - # create namelist and stream file(s) data component - _create_namelists(case, confdir, inst_string, namelist_infile, nmlgen, data_list_path) - - # copy namelist files and stream text files, to rundir - if os.path.isdir(rundir): - filename = compname + "_in" - file_src = os.path.join(confdir, filename) - file_dest = os.path.join(rundir, filename) - if inst_string: - file_dest += inst_string - safe_copy(file_src,file_dest) - - for txtfile in glob.glob(os.path.join(confdir, "*txt*")): - safe_copy(txtfile, rundir) - -############################################################################### -def _main_func(): - # Build the component namelist and required stream txt files - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "dice") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/data_comps/dice/cime_config/config_archive.xml b/src/components/data_comps/dice/cime_config/config_archive.xml deleted file mode 100644 index a36739c721a..00000000000 --- a/src/components/data_comps/dice/cime_config/config_archive.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - r - unset - - rpointer.ice$NINST_STRING - $CASE.dice$NINST_STRING.r.$DATENAME.nc,$CASE.dice$NINST_STRING.rs1.$DATENAME.bin - - - casename.dice.r.1976-01-01-00000.nc - rpointer.ice_0001 - rpointer.ice - - - diff --git a/src/components/data_comps/dice/cime_config/config_component.xml b/src/components/data_comps/dice/cime_config/config_component.xml deleted file mode 100644 index 4ccdc1a81e3..00000000000 --- a/src/components/data_comps/dice/cime_config/config_component.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - dice mode - is ssmi - is siaf - is prescribed - is null - - - - char - dice - dice - case_comp - env_case.xml - Name of ice component - - - - char - prescribed,ssmi,ssmi_iaf,copyall,null - ssmi - - ssmi - ssmi_iaf - prescribed - null - - run_component_dice - env_run.xml - DICE mode. DICE is a combination of a data model and a prognostic model. - The data functionality reads in ice coverage. The prognostic functionality - calculates the ice/atmosphere and ice/ocean fluxes. DICE receives the same - atmospheric input from the coupler as the active CICE model (i.e., atmospheric - states, shortwave fluxes, and ocean ice melt flux). DICE acts very similarly - to CICE running in prescribed mode.) Currently, this component is only - used to drive POP in C compsets. - If DICE_MODE is set to ssmi or ssmi_iaf, it is a prognostic mode. - It requires data be sent to the ice model. - Ice fraction (extent) data is read from an input stream, - atmosphere state variables are received from the coupler, and then - an atmosphere-ice surface flux is computed and sent to the - coupler. Normally the ice fraction data is found in the same data files - that provide SST data to the data ocean model. They are normally found - in the same file because the SST and ice fraction data are derived from the - same observational data sets and are consistent with each other. - - - - ========================================= - DICE naming conventions - ========================================= - - - diff --git a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml b/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml deleted file mode 100644 index 03c3ab2f8a7..00000000000 --- a/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml +++ /dev/null @@ -1,684 +0,0 @@ - - - - - - - - - - - - - - char(100) - streams - streams_file - List of streams used for the given dice_mode. - - NULL - SSMI - SSMI_IAF - prescribed - - - - - char - streams - derived - does not appear in namelist - only used to set domain and data information - - $SSTICE_GRID_FILENAME - - - - - char - streams - derived - does not appear in namelist - only used to set domain and data information - - $SSTICE_GRID_FILENAME - - - - - char - streams - streams_file - Stream domain file directory. - - null - $DIN_LOC_ROOT/ice/dice7 - $DIN_LOC_ROOT/ice/dice7 - - - - - char - streams - streams_file - Stream domain file path(s). - - null - domain.ocn.x0.5.090227.nc - domain.ocn.x0.5.090227.nc - - - - - char - streams - streams_file - Stream domain variable name(s). - - - time time - xc lon - yc lat - area area - mask mask - - - - - - char - streams - streams_file - Stream data file directory. - - null - $DIN_LOC_ROOT/ice/dice7/SSMI - $DIN_LOC_ROOT/ocn/iaf - - - - - char - streams - streams_file - Stream data file path(s). - - null - ssmi_ifrac.clim.x0.5.090319.nc - - ssmi.ifrac.0.5x0.5.1948.nc - ssmi.ifrac.0.5x0.5.1949.nc - ssmi.ifrac.0.5x0.5.1950.nc - ssmi.ifrac.0.5x0.5.1951.nc - ssmi.ifrac.0.5x0.5.1952.nc - ssmi.ifrac.0.5x0.5.1953.nc - ssmi.ifrac.0.5x0.5.1954.nc - ssmi.ifrac.0.5x0.5.1955.nc - ssmi.ifrac.0.5x0.5.1956.nc - ssmi.ifrac.0.5x0.5.1957.nc - ssmi.ifrac.0.5x0.5.1958.nc - ssmi.ifrac.0.5x0.5.1959.nc - ssmi.ifrac.0.5x0.5.1960.nc - ssmi.ifrac.0.5x0.5.1961.nc - ssmi.ifrac.0.5x0.5.1962.nc - ssmi.ifrac.0.5x0.5.1963.nc - ssmi.ifrac.0.5x0.5.1964.nc - ssmi.ifrac.0.5x0.5.1965.nc - ssmi.ifrac.0.5x0.5.1966.nc - ssmi.ifrac.0.5x0.5.1967.nc - ssmi.ifrac.0.5x0.5.1968.nc - ssmi.ifrac.0.5x0.5.1969.nc - ssmi.ifrac.0.5x0.5.1970.nc - ssmi.ifrac.0.5x0.5.1971.nc - ssmi.ifrac.0.5x0.5.1972.nc - ssmi.ifrac.0.5x0.5.1973.nc - ssmi.ifrac.0.5x0.5.1974.nc - ssmi.ifrac.0.5x0.5.1975.nc - ssmi.ifrac.0.5x0.5.1976.nc - ssmi.ifrac.0.5x0.5.1977.nc - ssmi.ifrac.0.5x0.5.1978.nc - ssmi.ifrac.0.5x0.5.1979.nc - ssmi.ifrac.0.5x0.5.1980.nc - ssmi.ifrac.0.5x0.5.1981.nc - ssmi.ifrac.0.5x0.5.1982.nc - ssmi.ifrac.0.5x0.5.1983.nc - ssmi.ifrac.0.5x0.5.1984.nc - ssmi.ifrac.0.5x0.5.1985.nc - ssmi.ifrac.0.5x0.5.1986.nc - ssmi.ifrac.0.5x0.5.1987.nc - ssmi.ifrac.0.5x0.5.1988.nc - ssmi.ifrac.0.5x0.5.1989.nc - ssmi.ifrac.0.5x0.5.1990.nc - ssmi.ifrac.0.5x0.5.1991.nc - ssmi.ifrac.0.5x0.5.1992.nc - ssmi.ifrac.0.5x0.5.1993.nc - ssmi.ifrac.0.5x0.5.1994.nc - ssmi.ifrac.0.5x0.5.1995.nc - ssmi.ifrac.0.5x0.5.1996.nc - ssmi.ifrac.0.5x0.5.1997.nc - ssmi.ifrac.0.5x0.5.1998.nc - ssmi.ifrac.0.5x0.5.1999.nc - ssmi.ifrac.0.5x0.5.2000.nc - ssmi.ifrac.0.5x0.5.2001.nc - ssmi.ifrac.0.5x0.5.2002.nc - ssmi.ifrac.0.5x0.5.2003.nc - ssmi.ifrac.0.5x0.5.2004.nc - ssmi.ifrac.0.5x0.5.2005.nc - ssmi.ifrac.0.5x0.5.2006.nc - ssmi.ifrac.0.5x0.5.2007.nc - ssmi.ifrac.0.5x0.5.2008.20120420.nc - ssmi.ifrac.0.5x0.5.2009.20120420.nc - - - - - - char - streams - streams_file - Stream data variable name(s). - - - ifrac ifrac - - - - - - integer - streams - streams_file - Stream offset. - - 0 - - - - - integer - streams - streams_file - Simulation year to align stream to. - - -999 - 1 - 1 - $SSTICE_YEAR_ALIGN - - - - - integer - streams - streams_file - First year of stream. - - -999 - 1 - 1948 - $SSTICE_YEAR_START - - - - - integer - streams - streams_file - Last year of stream. - - -999 - 1 - 2009 - $SSTICE_YEAR_END - - - - - - - - - - - - - char - streams - shr_strdata_nml - NULL,SSTDATA,COPYALL - - general method that operates on the data. this is generally - implemented in the data models but is set in the strdata method for - convenience. valid options are dependent on the data model and will - be described elsewhere. NULL is always a valid option and means no - data will be generated. default='NULL' - datamode = "NULL" - Turns off the data model as a provider of data to the coupler. The - ice_present flag will be set to false and the coupler will assume no - exchange of data to or from the data model. - dataMode = "COPYALL" - Copies all fields directly from the input data streams Any required - fields not found on an input stream will be set to zero. - dataMode = "SSTDATA" - Is a prognostic mode. It requires data be sent to the ice - model. Ice fraction (extent) data is read from an input stream, - atmosphere state variables are received from the coupler, and then - an atmosphere-ice surface flux is computed and sent to the - coupler. It is called "SSTDATA" mode because normally the ice - fraction data is found in the same data files that provide SST - data to the data ocean model. They are normally found in the same - file because the SST and ice fraction data are derived from the - same observational data sets and are consistent with each other. - Set by the xml variable DICE_MODE in env_run.xml - Currently, DICE_MODE can be [ssmi, ssmi_iaf, null] - If DICE_MODE is set to ssmi or ssmi_iaf, datamode will be set to SSTDATA - If DICE_MODE is set to null, datamodel will be set to NULL - - - NULL - SSTDATA - SSTDATA - SSTDATA - - - - - char - streams - abs - shr_strdata_nml - - spatial gridfile associated with the strdata. grid information will - be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - - - $ICE_DOMAIN_PATH/$ICE_DOMAIN_FILE - $SSTICE_GRID_FILENAME - $SSTICE_GRID_FILENAME - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are just copy (ie. no fill), special value, - nearest neighbor, nearest neighbor in "i" direction, or nearest - neighbor in "j" direction. - valid values: 'copy','spval','nn','nnoni','nnonj' - - - nn - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - plays no role is fill algorithm at the present time. - valid values: "nomask,srcmask,dstmask,bothmask" - - - nomask - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read in instead of computing the - weights on the fly for the fill operation. if this is set, fillalgo - and fillmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the fill operation. this allows a user to - save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - array (up to 30 elements) of masking algorithms for mapping input data - associated with the array of streams. valid options are map only from - valid src points, map only to valid destination points, ignore all - masks, map only from valid src points to valid destination points. - valid values: srcmask, dstmask, nomask, bothmask - - - dstmask - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are copy by index, set to special value, - nearest neighbor, nearest neighbor in "i" direction, nearest neighbor - in "j" direction, or bilinear. - valid values: copy,spval,nn,nnoni,nnonj,bilinear - - - bilinear - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read instead of computing - weights on the fly for the mapping (interpolation) operation. if this - is set, mapalgo and mapmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the mapping (interpolation) operation. this - allows a user to save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - coszen,nearest,linear,lower,upper - - array (up to 30 elements) of time interpolation options associated with the array of - streams. - valid values: lower,upper,nearest,linear,coszen - lower = Use lower time-value - upper = Use upper time-value - nearest = Use the nearest time-value - linear = Linearly interpolate between the two time-values - coszen = Scale according to the cosine of the solar zenith angle (for solar) - - - linear - - - - - char(30) - streams - shr_strdata_nml - extend,cycle,limit - - array of time axis modes associated with the array of streams for - handling data outside the specified stream time axis. - valid options are to cycle the data based on the first, last, and - align settings associated with the stream dataset, to extend the first - and last valid value indefinitely, or to limit the interpolated data - to fall only between the least and greatest valid value of the time array. - valid values: cycle,extend,limit - extend = extrapolate before and after the period by using the first or last value. - cycle = cycle between the range of data - limit = restrict to the period for which the data is valid - - - cycle - - - - - char(30) - streams - shr_strdata_nml - single,full_file - - array (up to 30 elements) of reading mode associated with the array of - streams. specifies the mode of reading temporal stream dataset. - valid options are "single" (read temporal dataset one at a time) or - "full_file" (read all entires of temporal dataset in a given netcdf file) - valid values: single,full_file - - - single - - - - - real(30) - streams - shr_strdata_nml - - array (up to 30 elements) of delta time ratio limits placed on the - time interpolation associated with the array of streams. this real - value causes the model to stop if the ratio of the running maximum - delta time divided by the minimum delta time is greater than the - dtlimit for that stream. for instance, with daily data, the delta - time should be exactly one day throughout the dataset and the computed - maximum divided by minimum delta time should always be 1.0. for - monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. the running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - - - 1.5e0 - - - - - char - streams - shr_strdata_nml - - list of paired colon delimited field names that should be treated as - vectors when carrying out spatial interpolation. unlike other - character arrays in this namelist, this array is completely decoupled - from the list of streams. this is a list of vector pairs that span - all input streams where different fields of the vector pair could - appear in different streams. - for example, vectors = 'u:v','taux:tauy'. - - - null - - - - - char(30) - streams - shr_strdata_nml - - character array (up to 30 elements) of stream input files. this - string is actually parsed by a stream method and so the format is - specified by the stream module. this string consists of a - "stream_input_filename year_align year_first year_last". the - stream_input_filename is a stream text input file and the format and - options are described elsewhere. year_align, year_first, and - year_last provide information about the time axis of the file and how - to relate the input time axis to the model time axis. - default="null". - - - - - - - - - - - - char - dice - dice_nml - 1d,root - - set the decomposition option for the data model. valid options are - placing the global array on the root task or a simple stride-one - load balanced one-dimensional decomposition. other decompositions - may be added in the future. - valid values are ['root','1d']. - 1d = Vector decomposition - root = run only on the master task - - - 1d - - - - - logical - dice - dice_nml - - activates water accumulation/melt wrt Q - - - .true. - - - - - real - dice - dice_nml - - initial water accumulation value - - - 0. - - - - - real - dice - dice_nml - - bound on melt rate - - - -300.e0 - - - - - real - dice - dice_nml - - short-wave penatration factor - - - 0. - - - - - char - dice - dice_nml - - Model restart filename for the data ice model data. This is optional. - If both restfils and restfilm are undefined, the restart filename will - be read from the ICE restart pointer file (or files for multiple instances). - - - undefined - - - - - char - dice - dice_nml - - Stream restart filename for the data ice stream data. This is - optional. If both restfils and restfilm are undefined, the restart - filename will be read from the DICE restart pointer file (or files for - multiple instances). - - - undefined - - - - diff --git a/src/components/data_comps/dice/cime_config/user_nl_dice b/src/components/data_comps/dice/cime_config/user_nl_dice deleted file mode 100644 index b497393570c..00000000000 --- a/src/components/data_comps/dice/cime_config/user_nl_dice +++ /dev/null @@ -1,13 +0,0 @@ -!------------------------------------------------------------------------ -! Users should ONLY USE user_nl_dice to change namelists variables -! Users should add all user specific namelist changes below in the form of -! namelist_var = new_namelist_value -! Note that any namelist variable from shr_strdata_nml and dice_nml can -! be modified below using the above syntax -! User preview_namelists to view (not modify) the output namelist in the -! directory $CASEROOT/CaseDocs -! To modify the contents of a stream txt file, first use preview_namelists -! to obtain the contents of the stream txt files in CaseDocs, and then -! place a copy of the modified stream txt file in $CASEROOT with the string -! user_ prepended. -!------------------------------------------------------------------------ diff --git a/src/components/data_comps/dice/mct/dice_comp_mod.F90 b/src/components/data_comps/dice/mct/dice_comp_mod.F90 deleted file mode 100644 index 8f3afd70bf8..00000000000 --- a/src/components/data_comps/dice/mct/dice_comp_mod.F90 +++ /dev/null @@ -1,720 +0,0 @@ -#ifdef AIX -@PROCESS ALIAS_SIZE(805306368) -#endif -module dice_comp_mod - - ! !USES: - use esmf - use mct_mod - use perf_mod - use shr_pcdf_mod - use shr_const_mod - use shr_sys_mod - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only: shr_file_getunit, shr_file_freeunit - use shr_cal_mod , only: shr_cal_date2julian - use shr_mpi_mod , only: shr_mpi_bcast - use shr_frz_mod , only: shr_frz_freezetemp - use shr_cal_mod , only: shr_cal_ymd2julian - use shr_strdata_mod , only: shr_strdata_type, shr_strdata_pioinit, shr_strdata_init - use shr_strdata_mod , only: shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only: shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only: shr_dmodel_gsmapcreate, shr_dmodel_rearrGGrid - use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV - use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - - use dice_shr_mod , only: datamode ! namelist input - use dice_shr_mod , only: decomp ! namelist input - use dice_shr_mod , only: rest_file ! namelist input - use dice_shr_mod , only: rest_file_strm ! namelist input - use dice_shr_mod , only: flux_swpf ! namelist input -short-wave penatration factor - use dice_shr_mod , only: flux_Qmin ! namelist input -bound on melt rate - use dice_shr_mod , only: flux_Qacc ! namelist input -activates water accumulation/melt wrt Q - use dice_shr_mod , only: flux_Qacc0 ! namelist input -initial water accumulation value - use dice_shr_mod , only: nullstr - use dice_flux_atmice_mod, only: dice_flux_atmice - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dice_comp_init - public :: dice_comp_run - public :: dice_comp_final - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(CS) :: myModelName = 'ice' ! user defined model name - logical :: firstcall = .true. ! first call logical - character(len=*),parameter :: rpfile = 'rpointer.ice' - - real(R8),parameter :: pi = shr_const_pi ! pi - real(R8),parameter :: spval = shr_const_spval ! flags invalid data - real(R8),parameter :: tFrz = shr_const_tkfrz ! temp of freezing - real(R8),parameter :: latice = shr_const_latice ! latent heat of fusion - real(R8),parameter :: cDay = shr_const_cDay ! sec in calendar day - real(R8),parameter :: waterMax = 1000.0_R8 ! wrt iFrac comp & frazil ice (kg/m^2) - - !----- surface albedo constants ------ - real(R8),parameter :: snwfrac = 0.286_R8 ! snow cover fraction ~ [0,1] - real(R8),parameter :: as_nidf = 0.950_R8 ! albedo: snow,near-infr,diffuse - real(R8),parameter :: as_vsdf = 0.700_R8 ! albedo: snow,visible ,diffuse - real(R8),parameter :: as_nidr = 0.960_R8 ! albedo: snow,near-infr,direct - real(R8),parameter :: as_vsdr = 0.800_R8 ! albedo: snow,visible ,direct - real(R8),parameter :: ai_nidf = 0.700_R8 ! albedo: ice, near-infr,diffuse - real(R8),parameter :: ai_vsdf = 0.500_R8 ! albedo: ice, visible ,diffuse - real(R8),parameter :: ai_nidr = 0.700_R8 ! albedo: ice, near-infr,direct - real(R8),parameter :: ai_vsdr = 0.500_R8 ! albedo: ice, visible ,direct - real(R8),parameter :: ax_nidf = ai_nidf*(1.0_R8-snwfrac) + as_nidf*snwfrac - real(R8),parameter :: ax_vsdf = ai_vsdf*(1.0_R8-snwfrac) + as_vsdf*snwfrac - real(R8),parameter :: ax_nidr = ai_nidr*(1.0_R8-snwfrac) + as_nidr*snwfrac - real(R8),parameter :: ax_vsdr = ai_vsdr*(1.0_R8-snwfrac) + as_vsdr*snwfrac - - integer(IN) :: kswvdr,kswndr,kswvdf,kswndf,kq,kz,kua,kva,kptem,kshum,kdens,ktbot - integer(IN) :: kiFrac,kt,kavsdr,kanidr,kavsdf,kanidf,kswnet,kmelth,kmeltw - integer(IN) :: ksen,klat,klwup,kevap,ktauxa,ktauya,ktref,kqref,kswpen,ktauxo,ktauyo,ksalt - integer(IN) :: ksalinity - integer(IN) :: kbcpho, kbcphi, kflxdst - integer(IN) :: kbcphidry, kbcphodry, kbcphiwet, kocphidry, kocphodry, kocphiwet - integer(IN) :: kdstdry1, kdstdry2, kdstdry3, kdstdry4, kdstwet1, kdstwet2, kdstwet3, kdstwet4 - - ! optional per thickness category fields - integer(IN) :: kiFrac_01,kswpen_iFrac_01 - - type(mct_rearr) :: rearr - ! type(mct_avect) :: avstrm ! av of data from stream - integer(IN) , pointer :: imask(:) - real(R8) , pointer :: yc(:) - real(R8) , pointer :: water(:) - real(R8) , pointer :: tfreeze(:) - ! real(R8) , pointer :: ifrac0(:) - - !-------------------------------------------------------------------------- - integer(IN),parameter :: ktrans = 1 - character(16),parameter :: avofld(1:ktrans) = (/"Si_ifrac "/) - character(16),parameter :: avifld(1:ktrans) = (/"ifrac "/) - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine dice_comp_init(Eclock, x2i, i2x, & - seq_flds_x2i_fields, seq_flds_i2x_fields, seq_flds_i2o_per_cat, & - SDICE, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon) - - ! !DESCRIPTION: initialize dice model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2i, i2x ! input/output attribute vectors - character(len=*) , intent(in) :: seq_flds_x2i_fields ! fields from mediator - character(len=*) , intent(in) :: seq_flds_i2x_fields ! fields to mediator - logical , intent(in) :: seq_flds_i2o_per_cat ! .true. if select per ice thickness fields from ice - type(shr_strdata_type) , intent(inout) :: SDICE ! dice shr_strdata instance (output) - type(mct_gsMap) , pointer :: gsMap ! model global seg map (output) - type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: scmMode ! single column mode - real(R8) , intent(in) :: scmLat ! single column lat - real(R8) , intent(in) :: scmLon ! single column lon - - !--- local variables --- - integer(IN) :: lsize ! local size - integer(IN) :: kfld ! field reference - logical :: exists ! file existance logical - integer(IN) :: nu ! unit number - character(CL) :: calendar ! calendar type - - !--- formats --- - character(*), parameter :: F00 = "('(dice_comp_init) ',8a)" - character(*), parameter :: F0L = "('(dice_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(dice_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(dice_comp_init) ',a,4es13.6)" - character(*), parameter :: F03 = "('(dice_comp_init) ',a,i8,a)" - character(*), parameter :: F04 = "('(dice_comp_init) ',2a,2i8,'s')" - character(*), parameter :: F05 = "('(dice_comp_init) ',a,2f10.4)" - character(*), parameter :: F06 = "('(dice_comp_init) ',a,5l3)" - character(*), parameter :: F90 = "('(dice_comp_init) ',73('='))" - character(*), parameter :: F91 = "('(dice_comp_init) ',73('-'))" - character(*), parameter :: subName = "(dice_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DICE_INIT') - - call shr_strdata_pioinit(SDICE, COMPID) - - !---------------------------------------------------------------------------- - ! Initialize SDICE - !---------------------------------------------------------------------------- - - call t_startf('dice_strdata_init') - - call seq_timemgr_EClockGetData( EClock, calendar=calendar ) - - ! NOTE: shr_strdata_init calls shr_dmodel_readgrid which reads the data model - ! grid and from that computes SDICE%gsmap and SDICE%ggrid. DICE%gsmap is created - ! using the decomp '2d1d' (1d decomp of 2d grid) - - if (scmmode) then - if (my_task == master_task) then - write(logunit,F05) ' scm lon lat = ',scmlon,scmlat - end if - call shr_strdata_init(SDICE,mpicom,compid,name='ice', & - scmmode=scmmode,scmlon=scmlon,scmlat=scmlat, & - calendar=calendar) - else - call shr_strdata_init(SDICE,mpicom,compid,name='ice', & - calendar=calendar) - endif - - if (my_task == master_task) then - call shr_strdata_print(SDICE,'SDICE data') - endif - - call t_stopf('dice_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize MCT global seg map, 1d decomp - !---------------------------------------------------------------------------- - - call t_startf('dice_initgsmaps') - if (my_task == master_task) write(logunit,F00) ' initialize gsmaps' - call shr_sys_flush(logunit) - - ! create a data model global seqmap (gsmap) given the data model global grid sizes - ! NOTE: gsmap is initialized using the decomp read in from the dice_in namelist - ! (which by default is "1d") - call shr_dmodel_gsmapcreate(gsmap, SDICE%nxg*SDICE%nyg, compid, mpicom, decomp) - lsize = mct_gsmap_lsize(gsmap, mpicom) - - ! create a rearranger from the data model SDICE%gsmap to gsmap - call mct_rearr_init(SDICE%gsmap, gsmap, mpicom, rearr) - call t_stopf('dice_initgsmaps') - - !---------------------------------------------------------------------------- - ! Initialize MCT domain - !---------------------------------------------------------------------------- - - call t_startf('dice_initmctdom') - if (my_task == master_task) write(logunit,F00) 'copy domains' - call shr_sys_flush(logunit) - - call shr_dmodel_rearrGGrid(SDICE%grid, ggrid, gsmap, rearr, mpicom) - call t_stopf('dice_initmctdom') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('dice_initmctavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - call shr_sys_flush(logunit) - - call mct_aVect_init(i2x, rList=seq_flds_i2x_fields, lsize=lsize) - call mct_aVect_zero(i2x) - - kiFrac = mct_aVect_indexRA(i2x,'Si_ifrac') - kt = mct_aVect_indexRA(i2x,'Si_t') - ktref = mct_aVect_indexRA(i2x,'Si_tref') - kqref = mct_aVect_indexRA(i2x,'Si_qref') - kavsdr = mct_aVect_indexRA(i2x,'Si_avsdr') - kanidr = mct_aVect_indexRA(i2x,'Si_anidr') - kavsdf = mct_aVect_indexRA(i2x,'Si_avsdf') - kanidf = mct_aVect_indexRA(i2x,'Si_anidf') - kswnet = mct_aVect_indexRA(i2x,'Faii_swnet') - ksen = mct_aVect_indexRA(i2x,'Faii_sen') - klat = mct_aVect_indexRA(i2x,'Faii_lat') - klwup = mct_aVect_indexRA(i2x,'Faii_lwup') - kevap = mct_aVect_indexRA(i2x,'Faii_evap') - ktauxa = mct_aVect_indexRA(i2x,'Faii_taux') - ktauya = mct_aVect_indexRA(i2x,'Faii_tauy') - kmelth = mct_aVect_indexRA(i2x,'Fioi_melth') - kmeltw = mct_aVect_indexRA(i2x,'Fioi_meltw') - kswpen = mct_aVect_indexRA(i2x,'Fioi_swpen') - ktauxo = mct_aVect_indexRA(i2x,'Fioi_taux') - ktauyo = mct_aVect_indexRA(i2x,'Fioi_tauy') - ksalt = mct_aVect_indexRA(i2x,'Fioi_salt') - kbcpho = mct_aVect_indexRA(i2x,'Fioi_bcpho') - kbcphi = mct_aVect_indexRA(i2x,'Fioi_bcphi') - kflxdst= mct_aVect_indexRA(i2x,'Fioi_flxdst') - - ! optional per thickness category fields - if (seq_flds_i2o_per_cat) then - kiFrac_01 = mct_aVect_indexRA(i2x,'Si_ifrac_01') - kswpen_iFrac_01 = mct_aVect_indexRA(i2x,'PFioi_swpen_ifrac_01') - end if - - call mct_aVect_init(x2i, rList=seq_flds_x2i_fields, lsize=lsize) - call mct_aVect_zero(x2i) - - kswvdr = mct_aVect_indexRA(x2i,'Faxa_swvdr') - kswndr = mct_aVect_indexRA(x2i,'Faxa_swndr') - kswvdf = mct_aVect_indexRA(x2i,'Faxa_swvdf') - kswndf = mct_aVect_indexRA(x2i,'Faxa_swndf') - kq = mct_aVect_indexRA(x2i,'Fioo_q') - kz = mct_aVect_indexRA(x2i,'Sa_z') - kua = mct_aVect_indexRA(x2i,'Sa_u') - kva = mct_aVect_indexRA(x2i,'Sa_v') - kptem = mct_aVect_indexRA(x2i,'Sa_ptem') - kshum = mct_aVect_indexRA(x2i,'Sa_shum') - kdens = mct_aVect_indexRA(x2i,'Sa_dens') - ktbot = mct_aVect_indexRA(x2i,'Sa_tbot') - ksalinity = mct_aVect_indexRA(x2i,'So_s') - kbcphidry = mct_aVect_indexRA(x2i,'Faxa_bcphidry') - kbcphodry = mct_aVect_indexRA(x2i,'Faxa_bcphodry') - kbcphiwet = mct_aVect_indexRA(x2i,'Faxa_bcphiwet') - kocphidry = mct_aVect_indexRA(x2i,'Faxa_ocphidry') - kocphodry = mct_aVect_indexRA(x2i,'Faxa_ocphodry') - kocphiwet = mct_aVect_indexRA(x2i,'Faxa_ocphiwet') - kdstdry1 = mct_aVect_indexRA(x2i,'Faxa_dstdry1') - kdstdry2 = mct_aVect_indexRA(x2i,'Faxa_dstdry2') - kdstdry3 = mct_aVect_indexRA(x2i,'Faxa_dstdry3') - kdstdry4 = mct_aVect_indexRA(x2i,'Faxa_dstdry4') - kdstwet1 = mct_aVect_indexRA(x2i,'Faxa_dstwet1') - kdstwet2 = mct_aVect_indexRA(x2i,'Faxa_dstwet2') - kdstwet3 = mct_aVect_indexRA(x2i,'Faxa_dstwet3') - kdstwet4 = mct_aVect_indexRA(x2i,'Faxa_dstwet4') - - ! call mct_aVect_init(avstrm, rList=flds_strm, lsize=lsize) - ! call mct_aVect_zero(avstrm) - - allocate(imask(lsize)) - allocate(yc(lsize)) - allocate(water(lsize)) - allocate(tfreeze(lsize)) - ! allocate(iFrac0(lsize)) - - kfld = mct_aVect_indexRA(ggrid%data,'mask') - imask(:) = nint(ggrid%data%rAttr(kfld,:)) - kfld = mct_aVect_indexRA(ggrid%data,'lat') - yc(:) = ggrid%data%rAttr(kfld,:) - - call t_stopf('dice_initmctavs') - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - if (trim(rest_file) == trim(nullstr) .and. & - trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer' - call shr_sys_flush(logunit) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (.not.exists) then - write(logunit,F00) ' ERROR: rpointer file does not exist' - call shr_sys_abort(trim(subname)//' ERROR: rpointer file missing') - endif - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - call shr_sys_flush(logunit) - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - call shr_mpi_bcast(exists,mpicom,'exists') - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) - call shr_pcdf_readwrite('read',SDICE%pio_subsystem, SDICE%io_type, & - trim(rest_file),mpicom,gsmap=gsmap,rf1=water,rf1n='water',io_format=SDICE%io_format) - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDICE,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - call shr_sys_flush(logunit) - endif - - !---------------------------------------------------------------------------- - ! On initial call, x2i is unset, so set for use in run method - ! These values should have no impact on the solution!! - !---------------------------------------------------------------------------- - x2i%rAttr(kz,:) = 10.0_R8 - x2i%rAttr(kua,:) = 5.0_R8 - x2i%rAttr(kva,:) = 5.0_R8 - x2i%rAttr(kptem,:) = 260.0_R8 - x2i%rAttr(ktbot,:) = 260.0_R8 - x2i%rAttr(kshum,:) = 0.0014_R8 - x2i%rAttr(kdens,:) = 1.3_R8 - - !---------------------------------------------------------------------------- - ! Set initial ice state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - call dice_comp_run(EClock, x2i, i2x, & - seq_flds_i2o_per_cat, & - SDICE, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart) - call t_adj_detailf(-2) - - call t_stopf('DICE_INIT') - - end subroutine dice_comp_init - - !=============================================================================== - subroutine dice_comp_run(EClock, x2i, i2x, & - seq_flds_i2o_per_cat, & - SDICE, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, case_name) - use shr_cal_mod, only : shr_cal_ymdtod2string - ! !DESCRIPTION: run method for dice model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2i - type(mct_aVect) , intent(inout) :: i2x - logical , intent(in) :: seq_flds_i2o_per_cat ! .true. if select per ice thickness fields from ice - type(shr_strdata_type) , intent(inout) :: SDICE - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - character(CL) , intent(in), optional :: case_name ! case name - - !--- local --- - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: yy,mm,dd ! year month day - integer(IN) :: n ! indices - integer(IN) :: lsize ! size of attr vect - integer(IN) :: idt ! integer timestep - real(R8) :: dt ! timestep - integer(IN) :: nu ! unit number - real(R8) :: qmeltall ! q that would melt all accumulated water - real(R8) :: cosarg ! for setting ice temp pattern - real(R8) :: jday, jday0 ! elapsed day counters - character(CS) :: calendar ! calendar type - logical :: write_restart ! restart now - character(len=18) :: date_str - - character(*), parameter :: F00 = "('(dice_comp_run) ',8a)" - character(*), parameter :: F04 = "('(dice_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(dice_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DICE_RUN') - - call t_startf('dice_run1') - - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, curr_yr=yy, curr_mon=mm, curr_day=dd) - call seq_timemgr_EClockGetData( EClock, dtime=idt, calendar=calendar) - dt = idt * 1.0_r8 - write_restart = seq_timemgr_RestartAlarmIsOn(EClock) - - call t_stopf('dice_run1') - - !-------------------- - ! ADVANCE ICE - !-------------------- - - call t_barrierf('dice_BARRIER',mpicom) - call t_startf('dice') - - !--- copy all fields from streams to i2x as default --- - - if (trim(datamode) /= 'NULL') then - call t_startf('dice_strdata_advance') - call shr_strdata_advance(SDICE,currentYMD,currentTOD,mpicom,'dice') - call t_stopf('dice_strdata_advance') - call t_barrierf('dice_scatter_BARRIER',mpicom) - call t_startf('dice_scatter') - do n = 1,SDICE%nstreams - call shr_dmodel_translateAV(SDICE%avs(n),i2x,avifld,avofld,rearr) - enddo - call t_stopf('dice_scatter') - else - call mct_aVect_zero(i2x) - endif - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('dice_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - case('SSTDATA') - if (firstcall .and. .not. read_restart) then - ! iFrac0 = iFrac ! previous step's ice fraction - water = 0.0_R8 ! previous step's water accumulation - where (i2x%rAttr(kiFrac,:) > 0.0_R8) water(:) = flux_Qacc0 - endif - - ! tcraig, feb 10, 2012, ymd2eday no longer exists, use ymd2julian instead - ! this could be improved for use in gregorian calendar - ! call shr_cal_ymd2eday(0,mm,dd,eDay ,calendar) ! model date - ! call shr_cal_ymd2eday(0,09,01,eDay0,calendar) ! sept 1st - ! cosArg = 2.0_R8*pi*(real(eDay,R8) + real(currentTOD,R8)/cDay - real(eDay0,R8))/365.0_R8 - call shr_cal_ymd2julian(0,mm,dd,currentTOD,jDay ,calendar) ! julian day for model - call shr_cal_ymd2julian(0, 9, 1,0 ,jDay0,calendar) ! julian day for Sept 1 - cosArg = 2.0_R8*pi*(jday - jday0)/365.0_R8 - - lsize = mct_avect_lsize(i2x) - - tfreeze = shr_frz_freezetemp(x2i%rAttr(ksalinity,:)) + tFrz ! convert to Kelvin - - do n = 1,lsize - - !--- fix erroneous iFrac --- - i2x%rAttr(kiFrac,n) = min(1.0_R8,max(0.0_R8,i2x%rAttr(kiFrac,n))) - - !--- fabricate ice surface T, fix erroneous iFrac --- - if ( yc(n) > 0.0_R8) then - i2x%rAttr(kt,n) = 260.0_R8 + 10.0_R8*cos(cosArg) - else - i2x%rAttr(kt,n) = 260.0_R8 - 10.0_R8*cos(cosArg) - end if - - !--- set albedos (constant) --- - i2x%rAttr(kavsdr,n) = ax_vsdr - i2x%rAttr(kanidr,n) = ax_nidr - i2x%rAttr(kavsdf,n) = ax_vsdf - i2x%rAttr(kanidf,n) = ax_nidf - - !--- swnet is sent to cpl as a diagnostic quantity only --- - !--- newly recv'd swdn goes with previously sent albedo --- - !--- but albedos are (currently) time invariant --- - i2x%rAttr(kswnet,n) = (1.0_R8 - i2x%rAttr(kavsdr,n))*x2i%rAttr(kswvdr,n) & - + (1.0_R8 - i2x%rAttr(kanidr,n))*x2i%rAttr(kswndr,n) & - + (1.0_R8 - i2x%rAttr(kavsdf,n))*x2i%rAttr(kswvdf,n) & - + (1.0_R8 - i2x%rAttr(kanidf,n))*x2i%rAttr(kswndf,n) - - !--- compute melt/freeze water balance, adjust iFrac ------------- - if ( .not. flux_Qacc ) then ! Q accumulation option is OFF - i2x%rAttr(kmelth,n) = min(x2i%rAttr(kq,n),0.0_R8 ) ! q<0 => melt potential - i2x%rAttr(kmelth,n) = max(i2x%rAttr(kmelth,n),Flux_Qmin ) ! limit the melt rate - i2x%rAttr(kmeltw,n) = -i2x%rAttr(kmelth,n)/latice ! corresponding water flux - - else ! Q accumulation option is ON - !-------------------------------------------------------------- - ! 1a) Q<0 & iFrac > 0 => infinite supply of water to melt - ! 1b) Q<0 & iFrac = 0 => melt accumulated water only - ! 2a) Q>0 & iFrac > 0 => zero-out accumulated water - ! 2b) Q>0 & iFrac = 0 => accumulated water - !-------------------------------------------------------------- - if ( x2i%rAttr(kq,n) < 0.0_R8 ) then ! Q<0 => melt - if (i2x%rAttr(kiFrac,n) > 0.0_R8 ) then - i2x%rAttr(kmelth,n) = i2x%rAttr(kiFrac,n)*max(x2i%rAttr(kq,n),Flux_Qmin) - i2x%rAttr(kmeltw,n) = -i2x%rAttr(kmelth,n)/latice - ! water(n) = < don't change this value > - else - Qmeltall = -water(n)*latice/dt - i2x%rAttr(kmelth,n) = max(x2i%rAttr(kq,n), Qmeltall, Flux_Qmin ) - i2x%rAttr(kmeltw,n) = -i2x%rAttr(kmelth,n)/latice - water(n) = water(n) - i2x%rAttr(kmeltw,n)*dt - end if - else ! Q>0 => freeze - if (i2x%rAttr(kiFrac,n) > 0.0_R8 ) then - i2x%rAttr(kmelth,n) = 0.0_R8 - i2x%rAttr(kmeltw,n) = 0.0_R8 - water(n) = 0.0_R8 - else - i2x%rAttr(kmelth,n) = 0.0_R8 - i2x%rAttr(kmeltw,n) = 0.0_R8 - water(n) = water(n) + dt*x2i%rAttr(kq,n)/latice - end if - end if - - if (water(n) < 1.0e-16_R8 ) water(n) = 0.0_R8 - - !--- non-zero water => non-zero iFrac --- - if (i2x%rAttr(kiFrac,n) <= 0.0_R8 .and. water(n) > 0.0_R8) then - i2x%rAttr(kiFrac,n) = min(1.0_R8,water(n)/waterMax) - ! i2x%rAttr(kT,n) = tfreeze(n) ! T can be above freezing?!? - end if - - !--- cpl multiplies melth & meltw by iFrac --- - !--- divide by iFrac here => fixed quantity flux (not per area) --- - if (i2x%rAttr(kiFrac,n) > 0.0_R8) then - i2x%rAttr(kiFrac,n) = max( 0.01_R8, i2x%rAttr(kiFrac,n)) ! min iFrac - i2x%rAttr(kmelth,n) = i2x%rAttr(kmelth,n)/i2x%rAttr(kiFrac,n) - i2x%rAttr(kmeltw,n) = i2x%rAttr(kmeltw,n)/i2x%rAttr(kiFrac,n) - else - i2x%rAttr(kmelth,n) = 0.0_R8 - i2x%rAttr(kmeltw,n) = 0.0_R8 - end if - end if - - !--- modify T wrt iFrac: (iFrac -> 0) => (T -> tfreeze) --- - i2x%rAttr(kt,n) = tfreeze(n) + i2x%rAttr(kiFrac,n)*(i2x%rAttr(kt,n)-tfreeze(n)) - - end do - - ! compute atm/ice surface fluxes - call dice_flux_atmice( & - iMask ,x2i%rAttr(kz,:) ,x2i%rAttr(kua,:) ,x2i%rAttr(kva,:) , & - x2i%rAttr(kptem,:) ,x2i%rAttr(kshum,:) ,x2i%rAttr(kdens,:) ,x2i%rAttr(ktbot,:), & - i2x%rAttr(kt,:) ,i2x%rAttr(ksen,:) ,i2x%rAttr(klat,:) ,i2x%rAttr(klwup,:), & - i2x%rAttr(kevap,:) ,i2x%rAttr(ktauxa,:) ,i2x%rAttr(ktauya,:) ,i2x%rAttr(ktref,:), & - i2x%rAttr(kqref,:) ,logunit ) - - ! compute ice/oce surface fluxes (except melth & meltw, see above) - do n=1,lsize - if (iMask(n) == 0) then - i2x%rAttr(kswpen,n) = spval - i2x%rAttr(kmelth,n) = spval - i2x%rAttr(kmeltw,n) = spval - i2x%rAttr(ksalt ,n) = spval - i2x%rAttr(ktauxo,n) = spval - i2x%rAttr(ktauyo,n) = spval - i2x%rAttr(kiFrac,n) = 0.0_R8 - else - !--- penetrating short wave --- - i2x%rAttr(kswpen,n) = max(0.0_R8, flux_swpf*i2x%rAttr(kswnet,n) ) ! must be non-negative - - !--- i/o surface stress ( = atm/ice stress) --- - i2x%rAttr(ktauxo,n) = i2x%rAttr(ktauxa,n) - i2x%rAttr(ktauyo,n) = i2x%rAttr(ktauya,n) - - !--- salt flux --- - i2x%rAttr(ksalt ,n) = 0.0_R8 - end if - - ! !--- save ifrac for next timestep - ! iFrac0(n) = i2x%rAttr(kiFrac,n) - end do - - ! Compute outgoing aerosol fluxes - do n = 1,lsize - i2x%rAttr(kbcpho ,n) = x2i%rAttr(kbcphodry,n) - i2x%rAttr(kbcphi ,n) = x2i%rAttr(kbcphidry,n) + x2i%rAttr(kbcphiwet,n) - i2x%rAttr(kflxdst,n) = x2i%rAttr(kdstdry1,n) + x2i%rAttr(kdstwet1,n) & - + x2i%rAttr(kdstdry2,n) + x2i%rAttr(kdstwet2,n) & - + x2i%rAttr(kdstdry3,n) + x2i%rAttr(kdstwet3,n) & - + x2i%rAttr(kdstdry4,n) + x2i%rAttr(kdstwet4,n) - end do - - end select - - !------------------------------------------------- - ! optional per thickness category fields - !------------------------------------------------- - - if (seq_flds_i2o_per_cat) then - do n=1,lsize - i2x%rAttr(kiFrac_01,n) = i2x%rAttr(kiFrac,n) - i2x%rAttr(kswpen_iFrac_01,n) = i2x%rAttr(kswpen,n) * i2x%rAttr(kiFrac,n) - end do - end if - - call t_stopf('dice_datamode') - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('dice_restart') - call shr_cal_ymdtod2string(date_str, yy, mm, dd, currentTOD) - write(rest_file,"(6a)") & - trim(case_name), '.dice',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.dice',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file),currentYMD,currentTOD - call shr_pcdf_readwrite('write',SDICE%pio_subsystem, SDICE%io_type, & - trim(rest_file),mpicom,gsmap,clobber=.true.,rf1=water,rf1n='water') - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),currentYMD,currentTOD - call shr_strdata_restWrite(trim(rest_file_strm),SDICE,mpicom,trim(case_name),'SDICE strdata') - call shr_sys_flush(logunit) - call t_stopf('dice_restart') - endif - - call t_stopf('dice') - - !---------------------------------------------------------------------------- - ! Log output for model date - !---------------------------------------------------------------------------- - - call t_startf('dice_run2') - if (my_task == master_task) then - write(logunit,F04) trim(myModelName),': model date ', CurrentYMD,CurrentTOD - call shr_sys_flush(logunit) - end if - - firstcall = .false. - call t_stopf('dice_run2') - - call t_stopf('DICE_RUN') - - end subroutine dice_comp_run - - !=============================================================================== - subroutine dice_comp_final(my_task, master_task, logunit) - - ! !DESCRIPTION: finalize method for dice model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - - !--- formats --- - character(*), parameter :: F00 = "('(dice_comp_final) ',8a)" - character(*), parameter :: F91 = "('(dice_comp_final) ',73('-'))" - character(*), parameter :: subName = "(dice_comp_final) " - !------------------------------------------------------------------------------- - - call t_startf('DICE_FINAL') - - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) trim(myModelName),': end of main integration loop' - write(logunit,F91) - end if - - call t_stopf('DICE_FINAL') - - end subroutine dice_comp_final - !=============================================================================== -end module dice_comp_mod diff --git a/src/components/data_comps/dice/mct/dice_flux_atmice_mod.F90 b/src/components/data_comps/dice/mct/dice_flux_atmice_mod.F90 deleted file mode 100644 index 6a044d08b33..00000000000 --- a/src/components/data_comps/dice/mct/dice_flux_atmice_mod.F90 +++ /dev/null @@ -1,277 +0,0 @@ -module dice_flux_atmice_mod - - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_const_mod ! shared constants - use shr_sys_mod ! shared system routines - - implicit none - - real(R8) :: loc_zvir = shr_const_zvir - real(R8) :: loc_cpdair = shr_const_cpdair - real(R8) :: loc_cpvir = shr_const_cpvir - real(R8) :: loc_karman = shr_const_karman - real(R8) :: loc_g = shr_const_g - real(R8) :: loc_latvap = shr_const_latvap - real(R8) :: loc_latice = shr_const_latice - real(R8) :: loc_stebol = shr_const_stebol - - integer,parameter :: dbug = 0 ! internal debug level - -!=============================================================================== -contains -!=============================================================================== - - subroutine dice_flux_atmice( & - mask ,zbot ,ubot ,vbot ,thbot, & - qbot ,rbot ,tbot ,ts ,sen, & - lat ,lwup ,evap ,taux ,tauy, & - tref ,qref ,logunit ) - - !------------------------------------------------------------------------------- - ! PURPOSE: - ! using atm & ice state variables, compute atm/ice fluxes - ! and diagnostic 10m air temperature and humidity - ! - ! NOTE: - ! o all fluxes are positive downward - ! o net heat flux = net sw + lw up + lw down + sen + lat - ! o here, tstar = /U*, and qstar = /U*. - ! o wind speeds should all be above a minimum speed (eg. 1.0 m/s) - ! - ! ASSUME: - ! o The saturation humidity of air at T(K): qsat(T) (kg/m^3) - !------------------------------------------------------------------------------- - - !--- input arguments -------------------------------- - integer(IN),intent(in) :: mask (:) ! 0 <=> cell NOT in model domain - real(R8) ,intent(in) :: zbot (:) ! atm level height (m) - real(R8) ,intent(in) :: ubot (:) ! atm u wind (m/s) - real(R8) ,intent(in) :: vbot (:) ! atm v wind (m/s) - real(R8) ,intent(in) :: thbot(:) ! atm potential T (K) - real(R8) ,intent(in) :: qbot (:) ! atm specific humidity (kg/kg) - real(R8) ,intent(in) :: rbot (:) ! atm air density (kg/m^3) - real(R8) ,intent(in) :: tbot (:) ! atm T (K) - real(R8) ,intent(in) :: ts (:) ! surface temperature - integer(IN),intent(in) :: logunit ! logging unit number - - !--- output arguments ------------------------------- - real(R8) ,intent(out) :: sen (:) ! sensible heat flux (W/m^2) - real(R8) ,intent(out) :: lat (:) ! latent heat flux (W/m^2) - real(R8) ,intent(out) :: lwup (:) ! long-wave upward heat flux (W/m^2) - real(R8) ,intent(out) :: evap (:) ! evaporative water flux ((kg/s)/m^2) - real(R8) ,intent(out) :: taux (:) ! x surface stress (N) - real(R8) ,intent(out) :: tauy (:) ! y surface stress (N) - real(R8) ,intent(out) :: tref (:) ! 2m reference height temperature - real(R8) ,intent(out) :: qref (:) ! 2m reference height humidity - - !--- local constants -------------------------------- - real(R8),parameter :: umin = 1.0_R8 ! minimum wind speed (m/s) - real(R8),parameter :: zref = 10.0_R8 ! ref height ~ m - real(R8),parameter :: ztref = 2.0_R8 ! ref height for air T ~ m - real(R8),parameter :: spval = shr_const_spval ! special value - real(R8),parameter :: zzsice = 0.0005_R8 ! ice surface roughness - - !--- local variables -------------------------------- - integer(IN) :: lsize ! array dimensions - integer(IN) :: n ! array indicies - real(R8) :: vmag ! surface wind magnitude (m/s) - real(R8) :: thvbot ! virtual temperature (K) - real(R8) :: ssq ! sea surface humidity (kg/kg) - real(R8) :: dssqdt ! derivative of ssq wrt Ts (kg/kg/K) - real(R8) :: delt ! potential T difference (K) - real(R8) :: delq ! humidity difference (kg/kg) - real(R8) :: stable ! stability factor - real(R8) :: rdn ! sqrt of neutral exchange coefficient (momentum) - real(R8) :: rhn ! sqrt of neutral exchange coefficient (heat) - real(R8) :: ren ! sqrt of neutral exchange coefficient (water) - real(R8) :: rd ! sqrt of exchange coefficient (momentum) - real(R8) :: rh ! sqrt of exchange coefficient (heat) - real(R8) :: re ! sqrt of exchange coefficient (water) - real(R8) :: ustar ! ustar - real(R8) :: qstar ! qstar - real(R8) :: tstar ! tstar - real(R8) :: hol ! H (at zbot) over L - real(R8) :: xsq ! temporary variable - real(R8) :: xqq ! temporary variable - real(R8) :: psimh ! stability function at zbot (momentum) - real(R8) :: psixh ! stability function at zbot (heat and water) - real(R8) :: alz ! ln(zbot/z10) - real(R8) :: ltheat ! latent heat for surface - real(R8) :: tau ! stress at zbot - real(R8) :: cp ! specific heat of moist air - - real(R8) :: bn ! exchange coef funct for interpolation - real(R8) :: bh ! exchange coef funct for interpolation - real(R8) :: fac ! interpolation factor - real(R8) :: ln0 ! log factor for interpolation - real(R8) :: ln3 ! log factor for interpolation - - !--- local functions -------------------------------- - real(R8) :: Tk ! temperature (K) - real(R8) :: qsat ! the saturation humidity of air (kg/m^3) - real(R8) :: dqsatdt ! derivative of qsat wrt surface temperature - real(R8) :: xd ! dummy argument - real(R8) :: psimhu ! unstable part of psimh - real(R8) :: psixhu ! unstable part of psimx - - qsat(Tk) = 627572.4_R8 / exp(5107.4_R8/Tk) - dqsatdt(Tk) = (5107.4_R8 / Tk**2) * 627572.4_R8 / exp(5107.4_R8/Tk) - psimhu(xd) = log((1.0_R8+xd*(2.0_R8+xd))*(1.0_R8+xd*xd)/8.0_R8) - 2.0_R8*atan(xd) + 1.571_R8 - psixhu(xd) = 2.0_R8 * log((1.0_R8 + xd*xd)/2.0_R8) - - !--- formats ---------------------------------------- - character(*),parameter :: F01 = "('(dice_flux_atmIce) ',a, i7,2x,d21.14)" - character(*),parameter :: subName = "(dice_flux_atmIce) " - !------------------------------------------------------------------------------- - - lsize = size(tbot) - - do n = 1,lsize - - if (mask(n) == 0) then - sen (n) = spval - lat (n) = spval - lwup (n) = spval - evap (n) = spval - taux (n) = spval - tauy (n) = spval - tref (n) = spval - qref (n) = spval - else - !--- define some needed variables --- - vmag = max(umin, sqrt(ubot(n)**2+vbot(n)**2)) - thvbot = thbot(n)*(1.0_R8 + loc_zvir * qbot(n)) ! virtual pot temp (K) - ssq = qsat (ts(n)) / rbot(n) ! sea surf hum (kg/kg) - dssqdt = dqsatdt(ts(n)) / rbot(n) ! deriv of ssq wrt Ts - delt = thbot(n) - ts(n) ! pot temp diff (K) - delq = qbot(n) - ssq ! spec hum dif (kg/kg) - alz = log(zbot(n)/zref) - cp = loc_cpdair*(1.0_R8 + loc_cpvir*ssq) - ltheat = loc_latvap + loc_latice - - !---------------------------------------------------------- - ! first estimate of Z/L and ustar, tstar and qstar - !---------------------------------------------------------- - - !--- neutral coefficients, z/L = 0.0 --- - rdn = loc_karman/log(zref/zzsice) - rhn = rdn - ren = rdn - - !--- ustar,tstar,qstar ---- - ustar = rdn * vmag - tstar = rhn * delt - qstar = ren * delq - - !--- compute stability & evaluate all stability functions --- - hol = loc_karman * loc_g * zbot(n) & - & * (tstar/thvbot+qstar/(1.0_R8/loc_zvir+qbot(n))) / ustar**2 - hol = sign( min(abs(hol),10.0_R8), hol ) - stable = 0.5_R8 + sign(0.5_R8 , hol) - xsq = max(sqrt(abs(1.0_R8 - 16.0_R8*hol)) , 1.0_R8) - xqq = sqrt(xsq) - psimh = -5.0_R8*hol*stable + (1.0_R8-stable)*psimhu(xqq) - psixh = -5.0_R8*hol*stable + (1.0_R8-stable)*psixhu(xqq) - - !--- shift all coeffs to measurement height and stability --- - rd = rdn / (1.0_R8+rdn/loc_karman*(alz-psimh)) - rh = rhn / (1.0_R8+rhn/loc_karman*(alz-psixh)) - re = ren / (1.0_R8+ren/loc_karman*(alz-psixh)) - - !--- update ustar, tstar, qstar w/ updated, shifted coeffs -- - ustar = rd * vmag - tstar = rh * delt - qstar = re * delq - - !---------------------------------------------------------- - ! iterate to converge on Z/L, ustar, tstar and qstar - !---------------------------------------------------------- - - !--- compute stability & evaluate all stability functions --- - hol = loc_karman * loc_g * zbot(n) & - & * (tstar/thvbot+qstar/(1.0_R8/loc_zvir+qbot(n))) / ustar**2 - hol = sign( min(abs(hol),10.0_R8), hol ) - stable = 0.5_R8 + sign(0.5_R8 , hol) - xsq = max(sqrt(abs(1.0_R8 - 16.0_R8*hol)) , 1.0_R8) - xqq = sqrt(xsq) - psimh = -5.0_R8*hol*stable + (1.0_R8-stable)*psimhu(xqq) - psixh = -5.0_R8*hol*stable + (1.0_R8-stable)*psixhu(xqq) - - !--- shift all coeffs to measurement height and stability --- - rd = rdn / (1.0_R8+rdn/loc_karman*(alz-psimh)) - rh = rhn / (1.0_R8+rhn/loc_karman*(alz-psixh)) - re = ren / (1.0_R8+ren/loc_karman*(alz-psixh)) - - !--- update ustar, tstar, qstar w/ updated, shifted coeffs -- - ustar = rd * vmag - tstar = rh * delt - qstar = re * delq - - !---------------------------------------------------------- - ! compute the fluxes - !---------------------------------------------------------- - - tau = rbot(n) * ustar * ustar - - !--- momentum flux --- - taux(n) = tau * ubot(n) / vmag - tauy(n) = tau * vbot(n) / vmag - - !--- heat flux --- - sen (n) = cp * tau * tstar / ustar - lat (n) = ltheat * tau * qstar / ustar - lwup(n) = -loc_stebol * ts(n)**4 - - !--- water flux --- - evap(n) = lat(n)/ltheat - - !---------------------------------------------------------- - ! compute diagnostic: 2m reference height temperature - !---------------------------------------------------------- - - !--- Compute function of exchange coefficients. Assume that - !--- cn = rdn*rdn, cm=rd*rd and ch=rh*rd, and therefore - !--- 1/sqrt(cn(n))=1/rdn and sqrt(cm(n))/ch(n)=1/rh - bn = loc_karman/rdn - bh = loc_karman/rh - - !--- Interpolation factor for stable and unstable cases - ln0 = log(1.0_R8 + (ztref/zbot(n))*(exp(bn) - 1.0_R8)) - ln3 = log(1.0_R8 + (ztref/zbot(n))*(exp(bn - bh) - 1.0_R8)) - fac = (ln0 - ztref/zbot(n)*(bn - bh))/bh * stable & - & + (ln0 - ln3)/bh * (1.0_R8-stable) - fac = min(max(fac,0.0_R8),1.0_R8) - - !--- actual interpolation - tref(n) = ts(n) + (tbot(n) - ts(n))*fac - qref(n) = qbot(n) - delq*fac - - endif - enddo - - if (dbug > 0) then - do n = 1,lsize - if (mask(n) /= 0) then - write(logunit, F01)'n,mask = ',n,mask(n) - write(logunit, F01)'n,zbot = ',n,zbot(n) - write(logunit, F01)'n,ubot = ',n,ubot(n) - write(logunit, F01)'n,vbot = ',n,vbot(n) - write(logunit, F01)'n,thbot = ',n,thbot(n) - write(logunit, F01)'n,qbot = ',n,qbot(n) - write(logunit, F01)'n,tbot = ',n,tbot(n) - write(logunit, F01)'n,ts = ',n,ts(n) - write(logunit, F01)'n,lat = ',n,lat(n) - write(logunit, F01)'n,sen = ',n,sen(n) - write(logunit, F01)'n,taux = ',n,taux(n) - write(logunit, F01)'n,taux = ',n,tauy(n) - write(logunit, F01)'n,lwup = ',n,lwup(n) - write(logunit, F01)'n,evap = ',n,evap(n) - write(logunit, F01)'n,tref = ',n,tref(n) - write(logunit, F01)'n,qref = ',n,qref(n) - end if - end do - end if - - end subroutine dice_flux_atmIce - -end module dice_flux_atmice_mod diff --git a/src/components/data_comps/dice/mct/dice_shr_mod.F90 b/src/components/data_comps/dice/mct/dice_shr_mod.F90 deleted file mode 100644 index 8a0ef57fe83..00000000000 --- a/src/components/data_comps/dice/mct/dice_shr_mod.F90 +++ /dev/null @@ -1,173 +0,0 @@ -module dice_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dice_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! input namelist variables - character(CL) , public :: decomp ! decomp strategy - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - real(R8) , public :: flux_swpf ! short-wave penatration factor - real(R8) , public :: flux_Qmin ! bound on melt rate - logical , public :: flux_Qacc ! activates water accumulation/melt wrt Q - real(R8) , public :: flux_Qacc0 ! initial water accumulation value - logical , public :: force_prognostic_true ! if true set prognostic true - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine dice_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDICE, ice_present, ice_prognostic) - - ! !DESCRIPTION: Read in dice namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer , intent(in) :: inst_index ! number of current instance (ie. 1) - character(len=16) , intent(in) :: inst_suffix ! char string associated with instance - character(len=16) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - integer(IN) , intent(in) :: shrlogunit ! original log unit and level - type(shr_strdata_type) , intent(inout) :: SDICE - logical , intent(out) :: ice_present ! flag - logical , intent(out) :: ice_prognostic ! flag - - !--- local variables --- - character(CL) :: fileName ! generic file name - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - - !--- formats --- - character(*), parameter :: F00 = "('(dice_comp_init) ',8a)" - character(*), parameter :: F0L = "('(dice_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(dice_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(dice_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(dice_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_dice_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / dice_nml / & - decomp, flux_swpf, flux_Qmin, flux_Qacc, flux_Qacc0, restfilm, restfils, & - force_prognostic_true - - !---------------------------------------------------------------------------- - ! Determine input filenamname - !---------------------------------------------------------------------------- - - filename = "dice_in"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! Read dice_in - !---------------------------------------------------------------------------- - - filename = "dice_in"//trim(inst_suffix) - decomp = "1d" - flux_swpf = 0.0_R8 ! no penetration - flux_Qmin = -300.0_R8 ! kg/s/m^2 - flux_Qacc = .false. ! no accumulation - flux_Qacc0 = 0.0_R8 ! no water - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=dice_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' decomp = ',trim(decomp) - write(logunit,F02)' flux_swpf = ',flux_swpf - write(logunit,F02)' flux_Qmin = ',flux_Qmin - write(logunit,F06)' flux_Qacc = ',flux_Qacc - write(logunit,F02)' flux_Qacc0 = ',flux_Qacc0 - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - endif - call shr_mpi_bcast(decomp ,mpicom,'decomp') - call shr_mpi_bcast(flux_swpf ,mpicom,'flux_swpf') - call shr_mpi_bcast(flux_Qmin ,mpicom,'flux_Qmin') - call shr_mpi_bcast(flux_Qacc ,mpicom,'flux_Qacc') - call shr_mpi_bcast(flux_Qacc0,mpicom,'flux_Qacc0') - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDICE,trim(filename),mpicom=mpicom) - - ! Validate mode - - datamode = trim(SDICE%dataMode) - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'SSTDATA' .or. & - trim(datamode) == 'COPYALL') then - if (my_task == master_task) then - write(logunit,F00) ' dice datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal dice datamode = ',trim(datamode) - call shr_sys_abort() - endif - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flag - !---------------------------------------------------------------------------- - - ice_present = .false. - ice_prognostic = .false. - if (force_prognostic_true) then - ice_present = .true. - ice_prognostic = .true. - endif - if (trim(datamode) /= 'NULL') then - ice_present = .true. - end if - if (trim(datamode) == 'SSTDATA' .or. trim(datamode) == 'COPYALL') then - ice_prognostic = .true. - endif - - end subroutine dice_shr_read_namelists - -end module dice_shr_mod diff --git a/src/components/data_comps/dice/mct/ice_comp_mct.F90 b/src/components/data_comps/dice/mct/ice_comp_mct.F90 deleted file mode 100644 index e409e598931..00000000000 --- a/src/components/data_comps/dice/mct/ice_comp_mct.F90 +++ /dev/null @@ -1,236 +0,0 @@ -module ice_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dice_comp_mod , only: dice_comp_init, dice_comp_run, dice_comp_final - use dice_shr_mod , only: dice_shr_read_namelists - use seq_flds_mod , only: seq_flds_i2x_fields, seq_flds_x2i_fields, seq_flds_i2o_per_cat - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: ice_init_mct - public :: ice_run_mct - public :: ice_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - type(shr_strdata_type) :: SDICE - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - logical :: read_restart ! start from restart - - character(*), parameter :: F00 = "('(dice_comp_init) ',8a)" - integer(IN) , parameter :: master_task=0 ! task number of master task - character(*), parameter :: subName = "(ice_init_mct) " - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine ice_init_mct( EClock, cdata, x2i, i2x, NLFilename ) - - ! !DESCRIPTION: initialize dice model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2i, i2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - logical :: ice_present ! flag - logical :: ice_prognostic ! flag - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: ierr ! error code - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_SPVAL ! single column lat - real(R8) :: scmLon = shr_const_SPVAL ! single column lon - character(*), parameter :: subName = "(ice_init_mct) " - !------------------------------------------------------------------------------- - - ! Set cdata pointers - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, & - single_column=scmMode, & - scmlat=scmlat, scmlon=scmLon, & - read_restart=read_restart) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('ice_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call t_startf('dice_readnml') - - call dice_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDICE, ice_present, ice_prognostic) - - call seq_infodata_PutData(infodata, & - ice_present=ice_present, & - ice_prognostic=ice_prognostic, & - iceberg_prognostic=.false.) - - call t_stopf('dice_readnml') - - !---------------------------------------------------------------------------- - ! RETURN if present flag is false - !---------------------------------------------------------------------------- - - if (.not. ice_present) then - RETURN - end if - - ! NOTE: the following will never be called if ice_present is .false. - - !---------------------------------------------------------------------------- - ! Initialize dice - !---------------------------------------------------------------------------- - - call dice_comp_init(Eclock, x2i, i2x, & - seq_flds_x2i_fields, seq_flds_i2x_fields, seq_flds_i2o_per_cat, & - SDICE, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon) - - !---------------------------------------------------------------------------- - ! Fill infodata that needs to be returned from dice - !---------------------------------------------------------------------------- - - call seq_infodata_PutData(infodata, & - ice_nx=SDICE%nxg, & - ice_ny=SDICE%nyg ) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - if (my_task == master_task) write(logunit,F00) 'dice_comp_init done' - call shr_sys_flush(logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine ice_init_mct - - !=============================================================================== - subroutine ice_run_mct( EClock, cdata, x2i, i2x) - - ! !DESCRIPTION: run method for dice model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2i ! driver -> dead - type(mct_aVect) ,intent(inout) :: i2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(CL) :: case_name ! case name - character(*), parameter :: subName = "(ice_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call seq_infodata_GetData(infodata, case_name=case_name) - - call dice_comp_run(EClock, x2i, i2x, & - seq_flds_i2o_per_cat, & - SDICE, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, case_name) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine ice_run_mct - - !=============================================================================== - subroutine ice_final_mct(EClock, cdata, x2d, d2x) - - ! !DESCRIPTION: finalize method for dead ice model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- formats --- - character(*), parameter :: subName = "(ice_final_mct) " - !------------------------------------------------------------------------------- - - call dice_comp_final(my_task, master_task, logunit) - - end subroutine ice_final_mct - !=============================================================================== - -end module ice_comp_mct diff --git a/src/components/data_comps/dice/nuopc/dice_comp_mod.F90 b/src/components/data_comps/dice/nuopc/dice_comp_mod.F90 deleted file mode 100644 index 308671b8bd8..00000000000 --- a/src/components/data_comps/dice/nuopc/dice_comp_mod.F90 +++ /dev/null @@ -1,1065 +0,0 @@ -module dice_comp_mod - - use NUOPC , only : NUOPC_Advertise - use ESMF , only : ESMF_State, ESMF_SUCCESS, ESMF_State - use ESMF , only : ESMF_Mesh, ESMF_DistGrid, ESMF_MeshGet, ESMF_DistGridGet - use perf_mod , only : t_startf, t_stopf, t_adj_detailf, t_barrierf - use mct_mod , only : mct_gsmap_init - use mct_mod , only : mct_avect, mct_avect_indexRA, mct_avect_zero, mct_aVect_nRattr - use mct_mod , only : mct_avect_init, mct_avect_lsize - use shr_kind_mod , only : r8=>shr_kind_r8, cxx=>shr_kind_cxx, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_const_mod , only : shr_const_pi, shr_const_spval, shr_const_tkfrz, shr_const_latice - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only : shr_mpi_bcast - use shr_frz_mod , only : shr_frz_freezetemp - use shr_cal_mod , only : shr_cal_calendarname - use shr_cal_mod , only : shr_cal_datetod2string - use shr_string_mod , only : shr_string_listGetName - use shr_sys_mod , only : shr_sys_abort - use shr_strdata_mod , only : shr_strdata_init_model_domain - use shr_strdata_mod , only : shr_strdata_init_streams - use shr_strdata_mod , only : shr_strdata_init_mapping - use shr_strdata_mod , only : shr_strdata_type, shr_strdata_pioinit - use shr_strdata_mod , only : shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only : shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only : shr_dmodel_translateAV - use dshr_methods_mod , only : ChkErr - use dshr_nuopc_mod , only : fld_list_type, dshr_fld_add, dshr_import, dshr_export - use dice_shr_mod , only : datamode ! namelist input - use dice_shr_mod , only : rest_file ! namelist input - use dice_shr_mod , only : rest_file_strm ! namelist input - use dice_shr_mod , only : flux_swpf ! namelist input -short-wave penatration factor - use dice_shr_mod , only : flux_Qmin ! namelist input -bound on melt rate - use dice_shr_mod , only : flux_Qacc ! namelist input -activates water accumulation/melt wrt Q - use dice_shr_mod , only : flux_Qacc0 ! namelist input -initial water accumulation value - use dice_shr_mod , only : nullstr - use dice_shr_mod , only : SDICE - use dice_flux_atmice_mod , only : dice_flux_atmice - use shr_pcdf_mod - - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dice_comp_advertise - public :: dice_comp_init - public :: dice_comp_run - public :: dice_comp_import - public :: dice_comp_export - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - type(mct_aVect) :: x2i - type(mct_aVect) :: i2x - character(CXX) :: flds_i2x = '' - character(CXX) :: flds_x2i = '' - - integer :: debug_import = 0 ! debug level (if > 0 will print all import fields) - integer :: debug_export = 0 ! debug level (if > 0 will print all export fields) - - real(R8),parameter :: pi = shr_const_pi ! pi - real(R8),parameter :: spval = shr_const_spval ! flags invalid data - real(R8),parameter :: tFrz = shr_const_tkfrz ! temp of freezing - real(R8),parameter :: latice = shr_const_latice ! latent heat of fusion - real(R8),parameter :: waterMax = 1000.0_R8 ! wrt iFrac comp & frazil ice (kg/m^2) - - !----- surface albedo constants ------ - real(R8),parameter :: snwfrac = 0.286_R8 ! snow cover fraction ~ [0,1] - real(R8),parameter :: as_nidf = 0.950_R8 ! albedo: snow,near-infr,diffuse - real(R8),parameter :: as_vsdf = 0.700_R8 ! albedo: snow,visible ,diffuse - real(R8),parameter :: as_nidr = 0.960_R8 ! albedo: snow,near-infr,direct - real(R8),parameter :: as_vsdr = 0.800_R8 ! albedo: snow,visible ,direct - real(R8),parameter :: ai_nidf = 0.700_R8 ! albedo: ice, near-infr,diffuse - real(R8),parameter :: ai_vsdf = 0.500_R8 ! albedo: ice, visible ,diffuse - real(R8),parameter :: ai_nidr = 0.700_R8 ! albedo: ice, near-infr,direct - real(R8),parameter :: ai_vsdr = 0.500_R8 ! albedo: ice, visible ,direct - real(R8),parameter :: ax_nidf = ai_nidf*(1.0_R8-snwfrac) + as_nidf*snwfrac - real(R8),parameter :: ax_vsdf = ai_vsdf*(1.0_R8-snwfrac) + as_vsdf*snwfrac - real(R8),parameter :: ax_nidr = ai_nidr*(1.0_R8-snwfrac) + as_nidr*snwfrac - real(R8),parameter :: ax_vsdr = ai_vsdr*(1.0_R8-snwfrac) + as_vsdr*snwfrac - - integer :: km - integer :: kswvdr,kswndr,kswvdf,kswndf,kq,kz,kua,kva,kptem,kshum,kdens,ktbot - integer :: kiFrac,kt,kavsdr,kanidr,kavsdf,kanidf,kswnet,kmelth,kmeltw - integer :: ksen,klat,klwup,kevap,ktauxa,ktauya,ktref,kqref,kswpen,ktauxo,ktauyo,ksalt - integer :: ksalinity - integer :: kbcpho, kbcphi, kflxdst - integer :: kbcphidry, kbcphodry, kbcphiwet - integer :: kocphidry, kocphodry, kocphiwet - integer :: kdstdry1, kdstdry2, kdstdry3, kdstdry4 - integer :: kdstwet1, kdstwet2, kdstwet3, kdstwet4 - integer :: kiFrac_01,kswpen_iFrac_01 ! optional per thickness category fields - integer :: index_lat, index_lon - - integer , pointer :: imask(:) - real(R8) , pointer :: xc(:), yc(:) ! arrays of model latitudes and longitudes - real(R8) , pointer :: water(:) - real(R8) , pointer :: tfreeze(:) - !real(R8) , pointer :: ifrac0(:) - - character(len=CS), pointer :: avifld(:) - character(len=CS), pointer :: avofld(:) - character(len=CS), pointer :: strmifld(:) - character(len=CS), pointer :: strmofld(:) - character(len=CXX) :: flds_strm = '' ! colon deliminated string of field names - - logical :: firstcall = .true. ! first call logical - character(len=*),parameter :: rpfile = 'rpointer.ice' - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine dice_comp_advertise(importState, exportState, flds_scalar_name, & - ice_present, ice_prognostic, flds_i2o_per_cat, & - fldsFrIce_num, fldsFrIce, fldsToIce_num, fldsToIce, rc) - - ! input/output arguments - type(ESMF_State) , intent(inout) :: importState - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: flds_scalar_name - logical , intent(in) :: ice_present - logical , intent(in) :: ice_prognostic - logical , intent(in) :: flds_i2o_per_cat - integer , intent(out) :: fldsToIce_num - integer , intent(out) :: fldsFrIce_num - type (fld_list_type) , intent(out) :: fldsToIce(:) - type (fld_list_type) , intent(out) :: fldsFrIce(:) - integer , intent(out) :: rc - - ! local variables - integer :: n - !------------------------------------------------------------------------------- - - if (.not. ice_present) return - - !-------------------------------- - ! export fields - !-------------------------------- - - fldsFrIce_num=1 - fldsFrIce(1)%stdname = trim(flds_scalar_name) - - ! export fields that have a corresponding stream field - ! - model_fld_index sets the module variables kiFrac - ! - model_fld_concat variable sets the output variable flds_i2x - ! - model_fld_array sets the module character array avofld - ! - data_fld_array sets the module character array avifld - - call dshr_fld_add(data_fld='ifrac', data_fld_array=avifld, model_fld='Si_ifrac', model_fld_array=avofld, & - model_fld_concat=flds_i2x, model_fld_index=kiFrac, fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - if (flds_i2o_per_cat) then - call dshr_fld_add(model_fld='Si_ifrac_01' , model_fld_concat=flds_i2x, model_fld_index=kiFrac_01) - call dshr_fld_add(model_fld='Fioi_swpen_ifrac_01', model_fld_concat=flds_i2x, model_fld_index=kswpen_iFrac_01) - call dshr_fld_add(med_fld='Si_ifrac_n', fldlist_num=fldsFrIce_num, fldlist=fldsFrIce, & - ungridded_lbound=1, ungridded_ubound=1) - call dshr_fld_add(med_fld='Fioi_swpen_ifrac_n', fldlist_num=fldsFrIce_num, fldlist=fldsFrIce, & - ungridded_lbound=1, ungridded_ubound=1) - end if - - ! export fields that have no corresponding stream field (computed internally) - - call dshr_fld_add(model_fld='Si_imask', model_fld_concat=flds_i2x, model_fld_index=km, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Si_t', model_fld_concat=flds_i2x, model_fld_index=kt, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Si_tref', model_fld_concat=flds_i2x, model_fld_index=ktref, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Si_qref', model_fld_concat=flds_i2x, model_fld_index=kqref, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Si_avsdr', model_fld_concat=flds_i2x, model_fld_index=kavsdr, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Si_anidr', model_fld_concat=flds_i2x, model_fld_index=kanidr, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Si_avsdf', model_fld_concat=flds_i2x, model_fld_index=kavsdf, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Si_anidf', model_fld_concat=flds_i2x, model_fld_index=kanidf, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Faii_swnet', model_fld_concat=flds_i2x, model_fld_index=kswnet, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Faii_sen', model_fld_concat=flds_i2x, model_fld_index=ksen, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Faii_lat', model_fld_concat=flds_i2x, model_fld_index=klat, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Faii_lwup', model_fld_concat=flds_i2x, model_fld_index=klwup, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Faii_evap', model_fld_concat=flds_i2x, model_fld_index=kevap, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Faii_taux', model_fld_concat=flds_i2x, model_fld_index=ktauxa, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Faii_tauy', model_fld_concat=flds_i2x, model_fld_index=ktauya, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_melth', model_fld_concat=flds_i2x, model_fld_index=kmelth, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_meltw', model_fld_concat=flds_i2x, model_fld_index=kmeltw, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_swpen', model_fld_concat=flds_i2x, model_fld_index=kswpen, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_taux', model_fld_concat=flds_i2x, model_fld_index=ktauxo, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_tauy', model_fld_concat=flds_i2x, model_fld_index=ktauyo, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_salt', model_fld_concat=flds_i2x, model_fld_index=ksalt, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_bcpho', model_fld_concat=flds_i2x, model_fld_index=kbcpho, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_bcphi', model_fld_concat=flds_i2x, model_fld_index=kbcphi, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - call dshr_fld_add(model_fld='Fioi_flxdst', model_fld_concat=flds_i2x, model_fld_index=kflxdst, & - fldlist_num=fldsFrIce_num, fldlist=fldsFrIce) - - !------------------- - ! import fields (have no corresponding stream fields) - !------------------- - - if (ice_prognostic) then - - fldsToIce_num=1 - fldsToIce(1)%stdname = trim(flds_scalar_name) - - call dshr_fld_add(model_fld='Faxa_swvdr', model_fld_concat=flds_x2i, model_fld_index=kswvdr, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Faxa_swvdf', model_fld_concat=flds_x2i, model_fld_index=kswvdf, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Faxa_swndr', model_fld_concat=flds_x2i, model_fld_index=kswndr, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Faxa_swndf', model_fld_concat=flds_x2i, model_fld_index=kswndf, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Fioo_q', model_fld_concat=flds_x2i, model_fld_index=kq, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Sa_z', model_fld_concat=flds_x2i, model_fld_index=kz, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Sa_u', model_fld_concat=flds_x2i, model_fld_index=kua, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Sa_v', model_fld_concat=flds_x2i, model_fld_index=kva, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Sa_ptem', model_fld_concat=flds_x2i, model_fld_index=kptem, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Sa_shum', model_fld_concat=flds_x2i, model_fld_index=kshum, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Sa_dens', model_fld_concat=flds_x2i, model_fld_index=kdens, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Sa_tbot', model_fld_concat=flds_x2i, model_fld_index=ktbot, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='So_s', model_fld_concat=flds_x2i, model_fld_index=ksalinity, & - fldlist_num=fldsToIce_num, fldlist=fldsToIce) - - call dshr_fld_add(model_fld='Faxa_bcphidry', model_fld_concat=flds_x2i, model_fld_index=kbcphidry) - call dshr_fld_add(model_fld='Faxa_bcphodry', model_fld_concat=flds_x2i, model_fld_index=kbcphodry) - call dshr_fld_add(model_fld='Faxa_bcphiwet', model_fld_concat=flds_x2i, model_fld_index=kbcphiwet) - call dshr_fld_add(med_fld='Faxa_bcph', fldlist_num=fldsToIce_num, fldlist=fldsToIce, & - ungridded_lbound=1, ungridded_ubound=3) - - call dshr_fld_add(model_fld='Faxa_ocphidry', model_fld_concat=flds_x2i, model_fld_index=kocphidry) - call dshr_fld_add(model_fld='Faxa_ocphodry', model_fld_concat=flds_x2i, model_fld_index=kocphodry) - call dshr_fld_add(model_fld='Faxa_ocphiwet', model_fld_concat=flds_x2i, model_fld_index=kocphiwet) - call dshr_fld_add(med_fld='Faxa_ocph', fldlist_num=fldsToIce_num, fldlist=fldsToIce, & - ungridded_lbound=1, ungridded_ubound=3) - - call dshr_fld_add(model_fld='Faxa_dstdry1', model_fld_concat=flds_x2i, model_fld_index=kdstdry1) - call dshr_fld_add(model_fld='Faxa_dstdry2', model_fld_concat=flds_x2i, model_fld_index=kdstdry2) - call dshr_fld_add(model_fld='Faxa_dstdry3', model_fld_concat=flds_x2i, model_fld_index=kdstdry3) - call dshr_fld_add(model_fld='Faxa_dstdry4', model_fld_concat=flds_x2i, model_fld_index=kdstdry4) - call dshr_fld_add(med_fld='Faxa_dstdry', fldlist_num=fldsToIce_num, fldlist=fldsToIce, & - ungridded_lbound=1, ungridded_ubound=4) - - call dshr_fld_add(model_fld='Faxa_dstwet1', model_fld_concat=flds_x2i, model_fld_index=kdstwet1) - call dshr_fld_add(model_fld='Faxa_dstwet2', model_fld_concat=flds_x2i, model_fld_index=kdstwet2) - call dshr_fld_add(model_fld='Faxa_dstwet3', model_fld_concat=flds_x2i, model_fld_index=kdstwet3) - call dshr_fld_add(model_fld='Faxa_dstwet4', model_fld_concat=flds_x2i, model_fld_index=kdstwet4) - call dshr_fld_add(med_fld='Faxa_dstwet', fldlist_num=fldsToIce_num, fldlist=fldsToIce, & - ungridded_lbound=1, ungridded_ubound=4) - - end if - - do n = 1,fldsFrIce_num - call NUOPC_Advertise(exportState, standardName=fldsFrIce(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - enddo - - if (ice_prognostic) then - do n = 1,fldsToIce_num - call NUOPC_Advertise(importState, standardName=fldsToIce(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - enddo - end if - - end subroutine dice_comp_advertise - - !=============================================================================== - - subroutine dice_comp_init(flds_i2o_per_cat, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon, calendar, mesh, nxg, nyg) - - ! !DESCRIPTION: initialize dice model - - ! input/output parameters: - logical , intent(in) :: flds_i2o_per_cat ! .true. if select per ice thickness fields from ice - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: scmMode ! single column mode - real(R8) , intent(in) :: scmLat ! single column lat - real(R8) , intent(in) :: scmLon ! single column lon - character(len=*) , intent(in) :: calendar ! calendar type - type(ESMF_Mesh) , intent(in) :: mesh ! ESMF dice mesh - integer , intent(out) :: nxg, nyg - - !--- local variables --- - integer :: n,k ! generic counters - integer :: ierr ! error code - integer :: lsize ! local size - integer :: kfld ! field reference - logical :: exists,exists1 ! file existance logical - integer :: nu ! unit number - type(ESMF_DistGrid) :: distGrid - integer, allocatable, target :: gindex(:) - integer :: rc - integer :: dimCount - integer :: tileCount - integer :: deCount - integer :: gsize - integer, allocatable :: elementCountPTile(:) - integer, allocatable :: indexCountPDE(:,:) - integer :: spatialDim - integer :: numOwnedElements - real(R8), pointer :: ownedElemCoords(:) - character(*), parameter :: F00 = "('(dice_comp_init) ',8a)" - character(*), parameter :: F01 = "('(dice_comp_init) ',a,2f10.4)" - character(*), parameter :: subName = "(dice_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DICE_INIT') - - !---------------------------------------------------------------------------- - ! Initialize PIO - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDICE, compid) - - !---------------------------------------------------------------------------- - ! Create a data model global segmap - !---------------------------------------------------------------------------- - - call t_startf('dice_strdata_init') - - if (my_task == master_task) write(logunit,F00) ' initialize SDICE gsmap' - - ! obtain the distgrid from the mesh that was read in - call ESMF_MeshGet(Mesh, elementdistGrid=distGrid, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determin local size on my processor - call ESMF_distGridGet(distGrid, localDe=0, elementCount=lsize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global index space for my processor - allocate(gindex(lsize)) - call ESMF_distGridGet(distGrid, localDe=0, seqIndexList=gindex, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global size of distgrid - call ESMF_distGridGet(distGrid, dimCount=dimCount, deCount=deCount, tileCount=tileCount, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - allocate(elementCountPTile(tileCount)) - call ESMF_distGridGet(distGrid, elementCountPTile=elementCountPTile, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - gsize = 0 - do n = 1,size(elementCountPTile) - gsize = gsize + elementCountPTile(n) - end do - deallocate(elementCountPTile) - - ! create the data model gsmap given the local size, global size and gindex - call mct_gsMap_init( SDICE%gsmap, gindex, mpicom, compid, lsize, gsize) - deallocate(gindex) - - !---------------------------------------------------------------------------- - ! Initialize SDICE - !---------------------------------------------------------------------------- - - ! The call to shr_strdata_init_model_domain creates the SDICE%gsmap which - ! is a '2d1d' decommp (1d decomp of 2d grid) and also create SDICE%grid - - SDICE%calendar = trim(shr_cal_calendarName(trim(calendar))) - - if (scmmode) then - if (my_task == master_task) write(logunit,F01) ' scm lon lat = ',scmlon,scmlat - call shr_strdata_init_model_domain(SDICE, mpicom, compid, my_task, & - scmmode=scmmode, scmlon=scmlon, scmlat=scmlat, gsmap=SDICE%gsmap) - else - call shr_strdata_init_model_domain(SDICE, mpicom, compid, my_task, gsmap=SDICE%gsmap) - end if - - if (my_task == master_task) then - call shr_strdata_print(SDICE,'SDICE data') - endif - - ! obtain mesh lats and lons - call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - allocate(ownedElemCoords(spatialDim*numOwnedElements)) - allocate(xc(numOwnedElements), yc(numOwnedElements)) - call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (numOwnedElements /= lsize) then - call shr_sys_abort('ERROR: numOwnedElements is not equal to lsize') - end if - do n = 1,lsize - xc(n) = ownedElemCoords(2*n-1) - yc(n) = ownedElemCoords(2*n) - end do - - ! error check that mesh lats and lons correspond to those on the input domain file - index_lon = mct_aVect_indexRA(SDICE%grid%data,'lon') - do n = 1, lsize - if (abs(mod(SDICE%grid%data%rattr(index_lon,n) - xc(n),360.0_R8)) > 1.e-4) then - write(6,*)'ERROR: lon diff = ',abs(SDICE%grid%data%rattr(index_lon,n) - xc(n)),' too large' - call shr_sys_abort() - end if - !SDICE%grid%data%rattr(index_lon,n) = xc(n) ! overwrite ggrid with mesh data - end do - index_lat = mct_aVect_indexRA(SDICE%grid%data,'lat') - do n = 1, lsize - if (abs( SDICE%grid%data%rattr(index_lat,n) - yc(n)) > 1.e-4) then - write(6,*)'ERROR: lat diff = ',abs(SDICE%grid%data%rattr(index_lat,n) - yc(n)),' too large' - call shr_sys_abort() - end if - !SDICE%grid%data%rattr(index_lat,n) = yc(n) ! overwrite ggrid with mesh data - end do - - ! Note that the module array, imask, does not change after initialization - allocate(imask(lsize)) - kfld = mct_aVect_indexRA(SDICE%grid%data,'mask') - imask(:) = nint(SDICE%grid%data%rAttr(kfld,:)) - - if (my_task == master_task) then - call shr_strdata_print(SDICE,'SDICE data') - endif - - call t_stopf('dice_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize SDICE attributes for streams and mapping of streams to model domain - !---------------------------------------------------------------------------- - - call shr_strdata_init_streams(SDICE, compid, mpicom, my_task) - call shr_strdata_init_mapping(SDICE, compid, mpicom, my_task) - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('dice_initmctavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - - call mct_aVect_init(i2x, rList=flds_i2x, lsize=lsize) - call mct_aVect_zero(i2x) - - ! optional per thickness category fields - if (flds_i2o_per_cat) then - kiFrac_01 = mct_aVect_indexRA(i2x,'Si_ifrac_01') - kswpen_iFrac_01 = mct_aVect_indexRA(i2x,'Fioi_swpen_ifrac_01') - end if - - call mct_aVect_init(x2i, rList=flds_x2i, lsize=lsize) - call mct_aVect_zero(x2i) - - allocate(water(lsize)) - allocate(tfreeze(lsize)) - ! allocate(iFrac0(lsize)) - - if (km /= 0) then - i2x%rAttr(km, :) = imask(:) - end if - - call t_stopf('dice_initmctavs') - - nxg = SDICE%nxg - nyg = SDICE%nyg - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - exists = .false. - exists1 = .false. - if (trim(rest_file) == trim(nullstr) .and. & - trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer = ',trim(rpfile) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (exists) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - inquire(file=trim(rest_file),exist=exists1) - endif - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - - call shr_mpi_bcast(exists,mpicom,'exists') - call shr_mpi_bcast(exists1,mpicom,'exists1') - - if (exists1) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) - call shr_pcdf_readwrite('read',SDICE%pio_subsystem, SDICE%io_type, & - trim(rest_file), mpicom, gsmap=SDICE%gsmap, rf1=water, rf1n='water', io_format=SDICE%io_format) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file) - endif - - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDICE,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - endif - - !---------------------------------------------------------------------------- - ! On initial call, x2i is unset, so set for use in run method - ! These values should have no impact on the solution!! - !---------------------------------------------------------------------------- - - x2i%rAttr(kz,:) = 10.0_R8 - x2i%rAttr(kua,:) = 5.0_R8 - x2i%rAttr(kva,:) = 5.0_R8 - x2i%rAttr(kptem,:) = 260.0_R8 - x2i%rAttr(ktbot,:) = 260.0_R8 - x2i%rAttr(kshum,:) = 0.0014_R8 - x2i%rAttr(kdens,:) = 1.3_R8 - - call t_stopf('DICE_INIT') - - end subroutine dice_comp_init - - !=============================================================================== - - subroutine dice_comp_run(flds_i2o_per_cat, mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - calendar, modeldt, target_ymd, target_tod, cosArg, case_name ) - - ! !DESCRIPTION: run method for dice model - - ! input/output parameters: - logical , intent(in) :: flds_i2o_per_cat ! .true. if select per ice thickness fields from ice - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: write_restart ! restart now - character(len=*) , intent(in) :: calendar - integer , intent(in) :: modeldt - integer , intent(in) :: target_ymd - integer , intent(in) :: target_tod - real(R8) , intent(in) :: cosarg ! for setting ice temp pattern - character(len=*) , intent(in), optional :: case_name ! case name - - !--- local --- - integer :: n,nfld ! indices - integer :: lsize ! size of attr vect - real(R8) :: dt ! timestep - integer :: nu ! unit number - real(R8) :: qmeltall ! q that would melt all accumulated water - character(len=CS) :: fldname - character(len=18) :: date_str - - character(*), parameter :: F00 = "('(dice_comp_run) ',8a)" - character(*), parameter :: F04 = "('(dice_comp_run) ',2a,2i8,'s')" - character(*), parameter :: F0D = "('(dice_comp_run) ',a, i7,2x,i5,2x,i5,2x,d21.14)" - character(*), parameter :: subName = "(dice_comp_run) " - !------------------------------------------------------------------------------- - - !-------------------- - ! Debug output - !-------------------- - - if (debug_import > 1 .and. my_task == master_task) then - do nfld = 1, mct_aVect_nRAttr(x2i) - call shr_string_listGetName(trim(flds_x2i), nfld, fldname) - do n = 1, mct_aVect_lsize(x2i) - write(logunit,F0D)'import: ymd,tod,n = '// trim(fldname),target_ymd, target_tod, & - n, x2i%rattr(nfld,n) - end do - end do - end if - - !-------------------- - ! ADVANCE ICE - !-------------------- - - call t_startf('DICE_RUN') - call t_barrierf('dice_BARRIER',mpicom) - call t_startf('dice') - - dt = modeldt * 1.0_r8 - - !--- copy all stream fields to i2x as default (avifld in streams -> avofld in i2x) - - if (trim(datamode) /= 'NULL') then - call t_startf('dice_strdata_advance') - call shr_strdata_advance(SDICE,target_ymd,target_tod,mpicom,'dice') - call t_stopf('dice_strdata_advance') - call t_barrierf('dice_scatter_BARRIER',mpicom) - call t_startf('dice_scatter') - do n = 1,SDICE%nstreams - call shr_dmodel_translateAV(SDICE%avs(n),i2x,avifld,avofld) - enddo - call t_stopf('dice_scatter') - else - call mct_aVect_zero(i2x) - endif - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('dice_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - case('SSTDATA') - if (firstcall .and. .not. read_restart) then - ! iFrac0 = iFrac ! previous step's ice fraction - water = 0.0_R8 ! previous step's water accumulation - where (i2x%rAttr(kiFrac,:) > 0.0_R8) water(:) = flux_Qacc0 - endif - - lsize = mct_avect_lsize(i2x) - - tfreeze = shr_frz_freezetemp(x2i%rAttr(ksalinity,:)) + tFrz ! convert to Kelvin - - do n = 1,lsize - - !--- fix erroneous iFrac --- - i2x%rAttr(kiFrac,n) = min(1.0_R8,max(0.0_R8,i2x%rAttr(kiFrac,n))) - - !--- fabricate ice surface T, fix erroneous iFrac --- - if ( yc(n) > 0.0_R8) then - i2x%rAttr(kt,n) = 260.0_R8 + 10.0_R8*cos(cosArg) - else - i2x%rAttr(kt,n) = 260.0_R8 - 10.0_R8*cos(cosArg) - end if - - !--- set albedos (constant) --- - i2x%rAttr(kavsdr,n) = ax_vsdr - i2x%rAttr(kanidr,n) = ax_nidr - i2x%rAttr(kavsdf,n) = ax_vsdf - i2x%rAttr(kanidf,n) = ax_nidf - - !--- swnet is sent to cpl as a diagnostic quantity only --- - !--- newly recv'd swdn goes with previously sent albedo --- - !--- but albedos are (currently) time invariant --- - i2x%rAttr(kswnet,n) = (1.0_R8 - i2x%rAttr(kavsdr,n))*x2i%rAttr(kswvdr,n) & - + (1.0_R8 - i2x%rAttr(kanidr,n))*x2i%rAttr(kswndr,n) & - + (1.0_R8 - i2x%rAttr(kavsdf,n))*x2i%rAttr(kswvdf,n) & - + (1.0_R8 - i2x%rAttr(kanidf,n))*x2i%rAttr(kswndf,n) - - !--- compute melt/freeze water balance, adjust iFrac ------------- - if ( .not. flux_Qacc ) then ! Q accumulation option is OFF - i2x%rAttr(kmelth,n) = min(x2i%rAttr(kq,n),0.0_R8 ) ! q<0 => melt potential - i2x%rAttr(kmelth,n) = max(i2x%rAttr(kmelth,n),Flux_Qmin ) ! limit the melt rate - i2x%rAttr(kmeltw,n) = -i2x%rAttr(kmelth,n)/latice ! corresponding water flux - - else ! Q accumulation option is ON - !-------------------------------------------------------------- - ! 1a) Q<0 & iFrac > 0 => infinite supply of water to melt - ! 1b) Q<0 & iFrac = 0 => melt accumulated water only - ! 2a) Q>0 & iFrac > 0 => zero-out accumulated water - ! 2b) Q>0 & iFrac = 0 => accumulated water - !-------------------------------------------------------------- - if ( x2i%rAttr(kq,n) < 0.0_R8 ) then ! Q<0 => melt - if (i2x%rAttr(kiFrac,n) > 0.0_R8 ) then - i2x%rAttr(kmelth,n) = i2x%rAttr(kiFrac,n)*max(x2i%rAttr(kq,n),Flux_Qmin) - i2x%rAttr(kmeltw,n) = -i2x%rAttr(kmelth,n)/latice - ! water(n) = < don't change this value > - else - Qmeltall = -water(n)*latice/dt - i2x%rAttr(kmelth,n) = max(x2i%rAttr(kq,n), Qmeltall, Flux_Qmin ) - i2x%rAttr(kmeltw,n) = -i2x%rAttr(kmelth,n)/latice - water(n) = water(n) - i2x%rAttr(kmeltw,n)*dt - end if - else ! Q>0 => freeze - if (i2x%rAttr(kiFrac,n) > 0.0_R8 ) then - i2x%rAttr(kmelth,n) = 0.0_R8 - i2x%rAttr(kmeltw,n) = 0.0_R8 - water(n) = 0.0_R8 - else - i2x%rAttr(kmelth,n) = 0.0_R8 - i2x%rAttr(kmeltw,n) = 0.0_R8 - water(n) = water(n) + dt*x2i%rAttr(kq,n)/latice - end if - end if - - if (water(n) < 1.0e-16_R8 ) water(n) = 0.0_R8 - - !--- non-zero water => non-zero iFrac --- - if (i2x%rAttr(kiFrac,n) <= 0.0_R8 .and. water(n) > 0.0_R8) then - i2x%rAttr(kiFrac,n) = min(1.0_R8,water(n)/waterMax) - ! i2x%rAttr(kT,n) = tfreeze(n) ! T can be above freezing?!? - end if - - !--- cpl multiplies melth & meltw by iFrac --- - !--- divide by iFrac here => fixed quantity flux (not per area) --- - if (i2x%rAttr(kiFrac,n) > 0.0_R8) then - i2x%rAttr(kiFrac,n) = max( 0.01_R8, i2x%rAttr(kiFrac,n)) ! min iFrac - i2x%rAttr(kmelth,n) = i2x%rAttr(kmelth,n)/i2x%rAttr(kiFrac,n) - i2x%rAttr(kmeltw,n) = i2x%rAttr(kmeltw,n)/i2x%rAttr(kiFrac,n) - else - i2x%rAttr(kmelth,n) = 0.0_R8 - i2x%rAttr(kmeltw,n) = 0.0_R8 - end if - end if - - !--- modify T wrt iFrac: (iFrac -> 0) => (T -> tfreeze) --- - i2x%rAttr(kt,n) = tfreeze(n) + i2x%rAttr(kiFrac,n)*(i2x%rAttr(kt,n)-tfreeze(n)) - - end do - - ! compute ice/ice surface fluxes - call dice_flux_atmice( & - iMask ,x2i%rAttr(kz,:) ,x2i%rAttr(kua,:) ,x2i%rAttr(kva,:) , & - x2i%rAttr(kptem,:) ,x2i%rAttr(kshum,:) ,x2i%rAttr(kdens,:) ,x2i%rAttr(ktbot,:), & - i2x%rAttr(kt,:) ,i2x%rAttr(ksen,:) ,i2x%rAttr(klat,:) ,i2x%rAttr(klwup,:), & - i2x%rAttr(kevap,:) ,i2x%rAttr(ktauxa,:) ,i2x%rAttr(ktauya,:) ,i2x%rAttr(ktref,:), & - i2x%rAttr(kqref,:) ,logunit ) - - ! compute ice/oce surface fluxes (except melth & meltw, see above) - do n=1,lsize - if (iMask(n) == 0) then - i2x%rAttr(kswpen,n) = spval - i2x%rAttr(kmelth,n) = spval - i2x%rAttr(kmeltw,n) = spval - i2x%rAttr(ksalt ,n) = spval - i2x%rAttr(ktauxo,n) = spval - i2x%rAttr(ktauyo,n) = spval - i2x%rAttr(kiFrac,n) = 0.0_R8 - else - !--- penetrating short wave --- - i2x%rAttr(kswpen,n) = max(0.0_R8, flux_swpf*i2x%rAttr(kswnet,n) ) ! must be non-negative - - !--- i/o surface stress ( = atm/ice stress) --- - i2x%rAttr(ktauxo,n) = i2x%rAttr(ktauxa,n) - i2x%rAttr(ktauyo,n) = i2x%rAttr(ktauya,n) - - !--- salt flux --- - i2x%rAttr(ksalt ,n) = 0.0_R8 - end if - if (km /= 0) then - i2x%rAttr(km, n) = imask(n) - end if - ! !--- save ifrac for next timestep - ! iFrac0(n) = i2x%rAttr(kiFrac,n) - end do - - ! Compute outgoing aerosol fluxes - do n = 1,lsize - i2x%rAttr(kbcpho ,n) = x2i%rAttr(kbcphodry,n) - i2x%rAttr(kbcphi ,n) = x2i%rAttr(kbcphidry,n) + x2i%rAttr(kbcphiwet,n) - i2x%rAttr(kflxdst,n) = x2i%rAttr(kdstdry1,n) + x2i%rAttr(kdstwet1,n) & - + x2i%rAttr(kdstdry2,n) + x2i%rAttr(kdstwet2,n) & - + x2i%rAttr(kdstdry3,n) + x2i%rAttr(kdstwet3,n) & - + x2i%rAttr(kdstdry4,n) + x2i%rAttr(kdstwet4,n) - end do - - end select - - !------------------------------------------------- - ! optional per thickness category fields - !------------------------------------------------- - - if (flds_i2o_per_cat) then - do n=1,lsize - i2x%rAttr(kiFrac_01,n) = i2x%rAttr(kiFrac,n) - i2x%rAttr(kswpen_iFrac_01,n) = i2x%rAttr(kswpen,n) * i2x%rAttr(kiFrac,n) - end do - end if - - call t_stopf('dice_datamode') - - !-------------------- - ! Debug output - !-------------------- - - if (debug_export > 1 .and. my_task == master_task) then - do nfld = 1, mct_aVect_nRAttr(i2x) - call shr_string_listGetName(trim(flds_i2x), nfld, fldname) - do n = 1, mct_aVect_lsize(i2x) - write(logunit,F0D)'export: ymd,tod,n = '// trim(fldname),target_ymd, target_tod, & - n, i2x%rattr(nfld,n) - end do - end do - end if - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('dice_restart') - call shr_cal_datetod2string(date_str, target_ymd, target_tod) - write(rest_file,"(6a)") & - trim(case_name), '.dice',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.dice',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file),target_ymd,target_tod - call shr_pcdf_readwrite('write',SDICE%pio_subsystem, SDICE%io_type, & - trim(rest_file), mpicom, SDICE%gsmap, clobber=.true., rf1=water, rf1n='water') - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),target_ymd,target_tod - call shr_strdata_restWrite(trim(rest_file_strm),SDICE,mpicom,trim(case_name),'SDICE strdata') - call t_stopf('dice_restart') - endif - - call t_stopf('dice') - - firstcall = .false. - - call t_stopf('DICE_RUN') - - end subroutine dice_comp_run - - !=============================================================================== - - subroutine dice_comp_import(importState, rc) - - ! input/output variables - type(ESMF_State) :: importState - integer, intent(out) :: rc - - ! local variables - integer :: k - !---------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call dshr_import(importState, 'Sa_z', x2i%rattr(kz,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Sa_u', x2i%rattr(kua,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Sa_v', x2i%rattr(kva,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Sa_ptem', x2i%rattr(kptem,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Sa_dens', x2i%rattr(kdens,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Sa_tbot', x2i%rattr(ktbot,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Sa_shum', x2i%rattr(kshum,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Faxa_swndr' , x2i%rattr(kswndr,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_swndf' , x2i%rattr(kswndf,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_swvdr' , x2i%rattr(kswvdr,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_swvdf' , x2i%rattr(kswvdf,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Faxa_bcph', x2i%rattr(kbcphidry,:), ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_bcph', x2i%rattr(kbcphodry,:), ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_bcph', x2i%rattr(kbcphiwet,:), ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Faxa_ocph', x2i%rattr(kocphidry,:), ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_ocph', x2i%rattr(kocphodry,:), ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_ocph', x2i%rattr(kocphiwet,:), ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Faxa_dstwet', x2i%rattr(kdstwet1,:), ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_dstwet', x2i%rattr(kdstwet2,:), ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_dstwet', x2i%rattr(kdstwet3,:), ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_dstwet', x2i%rattr(kdstwet4,:), ungridded_index=4, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Faxa_dstdry', x2i%rattr(kdstdry1,:), ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_dstdry', x2i%rattr(kdstdry2,:), ungridded_index=2, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_dstdry', x2i%rattr(kdstdry3,:), ungridded_index=3, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'Faxa_dstdry', x2i%rattr(kdstdry4,:), ungridded_index=4, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Fioo_q' , x2i%rattr(kq,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_import(importState, 'So_s' , x2i%rattr(ksalinity,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine dice_comp_import - - !=============================================================================== - - subroutine dice_comp_export(exportState, flds_i2o_per_cat, rc) - - ! input/output variables - type(ESMF_State) :: exportState - logical, intent(in) :: flds_i2o_per_cat - integer, intent(out) :: rc - - ! local variables - integer :: k - !---------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call dshr_export(i2x%rattr(kiFrac,:) , exportState, 'Si_ifrac', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (flds_i2o_per_cat) then - call dshr_export(i2x%rattr(kiFrac_01,:), exportState, 'Si_ifrac_n', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(kswpen_iFrac_01,:), exportState, 'Fioi_swpen_ifrac_n', ungridded_index=1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - call dshr_export(i2x%rattr(km,:) , exportState, 'Si_imask', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(kt,:), exportState, 'Si_t', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(ktref,:), exportState, 'Si_tref' , rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(kqref,:), exportState, 'Si_qref', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(kavsdr,:), exportState, 'Si_avsdr', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(kanidr,:), exportState, 'Si_anidr', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(kavsdf,:), exportState, 'Si_avsdf', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(kanidf,:), exportState, 'Si_anidf', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(kswnet,:), exportState, 'Faii_swnet', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(ksen,:), exportState, 'Faii_sen', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(klat,:), exportState, 'Faii_lat', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(klwup,:), exportState, 'Faii_lwup', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(kevap,:), exportState, 'Faii_evap', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(ktauxa,:), exportState, 'Faii_taux', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(ktauya,:), exportState, 'Faii_tauy', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(kmelth,:), exportState, 'Fioi_melth', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(kmeltw,:), exportState, 'Fioi_meltw', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(kswpen,:), exportState, 'Fioi_swpen', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(ktauxo,:), exportState, 'Fioi_taux', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(ktauyo,:), exportState, 'Fioi_tauy', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(ksalt,:), exportState, 'Fioi_salt', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(kbcpho,:), exportState, 'Fioi_bcpho', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call dshr_export(i2x%rattr(kbcphi,:), exportState, 'Fioi_bcphi', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(i2x%rattr(kflxdst,:), exportState, 'Fioi_flxdst', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine dice_comp_export - -end module dice_comp_mod diff --git a/src/components/data_comps/dice/nuopc/dice_flux_atmice_mod.F90 b/src/components/data_comps/dice/nuopc/dice_flux_atmice_mod.F90 deleted file mode 100644 index 6a044d08b33..00000000000 --- a/src/components/data_comps/dice/nuopc/dice_flux_atmice_mod.F90 +++ /dev/null @@ -1,277 +0,0 @@ -module dice_flux_atmice_mod - - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_const_mod ! shared constants - use shr_sys_mod ! shared system routines - - implicit none - - real(R8) :: loc_zvir = shr_const_zvir - real(R8) :: loc_cpdair = shr_const_cpdair - real(R8) :: loc_cpvir = shr_const_cpvir - real(R8) :: loc_karman = shr_const_karman - real(R8) :: loc_g = shr_const_g - real(R8) :: loc_latvap = shr_const_latvap - real(R8) :: loc_latice = shr_const_latice - real(R8) :: loc_stebol = shr_const_stebol - - integer,parameter :: dbug = 0 ! internal debug level - -!=============================================================================== -contains -!=============================================================================== - - subroutine dice_flux_atmice( & - mask ,zbot ,ubot ,vbot ,thbot, & - qbot ,rbot ,tbot ,ts ,sen, & - lat ,lwup ,evap ,taux ,tauy, & - tref ,qref ,logunit ) - - !------------------------------------------------------------------------------- - ! PURPOSE: - ! using atm & ice state variables, compute atm/ice fluxes - ! and diagnostic 10m air temperature and humidity - ! - ! NOTE: - ! o all fluxes are positive downward - ! o net heat flux = net sw + lw up + lw down + sen + lat - ! o here, tstar = /U*, and qstar = /U*. - ! o wind speeds should all be above a minimum speed (eg. 1.0 m/s) - ! - ! ASSUME: - ! o The saturation humidity of air at T(K): qsat(T) (kg/m^3) - !------------------------------------------------------------------------------- - - !--- input arguments -------------------------------- - integer(IN),intent(in) :: mask (:) ! 0 <=> cell NOT in model domain - real(R8) ,intent(in) :: zbot (:) ! atm level height (m) - real(R8) ,intent(in) :: ubot (:) ! atm u wind (m/s) - real(R8) ,intent(in) :: vbot (:) ! atm v wind (m/s) - real(R8) ,intent(in) :: thbot(:) ! atm potential T (K) - real(R8) ,intent(in) :: qbot (:) ! atm specific humidity (kg/kg) - real(R8) ,intent(in) :: rbot (:) ! atm air density (kg/m^3) - real(R8) ,intent(in) :: tbot (:) ! atm T (K) - real(R8) ,intent(in) :: ts (:) ! surface temperature - integer(IN),intent(in) :: logunit ! logging unit number - - !--- output arguments ------------------------------- - real(R8) ,intent(out) :: sen (:) ! sensible heat flux (W/m^2) - real(R8) ,intent(out) :: lat (:) ! latent heat flux (W/m^2) - real(R8) ,intent(out) :: lwup (:) ! long-wave upward heat flux (W/m^2) - real(R8) ,intent(out) :: evap (:) ! evaporative water flux ((kg/s)/m^2) - real(R8) ,intent(out) :: taux (:) ! x surface stress (N) - real(R8) ,intent(out) :: tauy (:) ! y surface stress (N) - real(R8) ,intent(out) :: tref (:) ! 2m reference height temperature - real(R8) ,intent(out) :: qref (:) ! 2m reference height humidity - - !--- local constants -------------------------------- - real(R8),parameter :: umin = 1.0_R8 ! minimum wind speed (m/s) - real(R8),parameter :: zref = 10.0_R8 ! ref height ~ m - real(R8),parameter :: ztref = 2.0_R8 ! ref height for air T ~ m - real(R8),parameter :: spval = shr_const_spval ! special value - real(R8),parameter :: zzsice = 0.0005_R8 ! ice surface roughness - - !--- local variables -------------------------------- - integer(IN) :: lsize ! array dimensions - integer(IN) :: n ! array indicies - real(R8) :: vmag ! surface wind magnitude (m/s) - real(R8) :: thvbot ! virtual temperature (K) - real(R8) :: ssq ! sea surface humidity (kg/kg) - real(R8) :: dssqdt ! derivative of ssq wrt Ts (kg/kg/K) - real(R8) :: delt ! potential T difference (K) - real(R8) :: delq ! humidity difference (kg/kg) - real(R8) :: stable ! stability factor - real(R8) :: rdn ! sqrt of neutral exchange coefficient (momentum) - real(R8) :: rhn ! sqrt of neutral exchange coefficient (heat) - real(R8) :: ren ! sqrt of neutral exchange coefficient (water) - real(R8) :: rd ! sqrt of exchange coefficient (momentum) - real(R8) :: rh ! sqrt of exchange coefficient (heat) - real(R8) :: re ! sqrt of exchange coefficient (water) - real(R8) :: ustar ! ustar - real(R8) :: qstar ! qstar - real(R8) :: tstar ! tstar - real(R8) :: hol ! H (at zbot) over L - real(R8) :: xsq ! temporary variable - real(R8) :: xqq ! temporary variable - real(R8) :: psimh ! stability function at zbot (momentum) - real(R8) :: psixh ! stability function at zbot (heat and water) - real(R8) :: alz ! ln(zbot/z10) - real(R8) :: ltheat ! latent heat for surface - real(R8) :: tau ! stress at zbot - real(R8) :: cp ! specific heat of moist air - - real(R8) :: bn ! exchange coef funct for interpolation - real(R8) :: bh ! exchange coef funct for interpolation - real(R8) :: fac ! interpolation factor - real(R8) :: ln0 ! log factor for interpolation - real(R8) :: ln3 ! log factor for interpolation - - !--- local functions -------------------------------- - real(R8) :: Tk ! temperature (K) - real(R8) :: qsat ! the saturation humidity of air (kg/m^3) - real(R8) :: dqsatdt ! derivative of qsat wrt surface temperature - real(R8) :: xd ! dummy argument - real(R8) :: psimhu ! unstable part of psimh - real(R8) :: psixhu ! unstable part of psimx - - qsat(Tk) = 627572.4_R8 / exp(5107.4_R8/Tk) - dqsatdt(Tk) = (5107.4_R8 / Tk**2) * 627572.4_R8 / exp(5107.4_R8/Tk) - psimhu(xd) = log((1.0_R8+xd*(2.0_R8+xd))*(1.0_R8+xd*xd)/8.0_R8) - 2.0_R8*atan(xd) + 1.571_R8 - psixhu(xd) = 2.0_R8 * log((1.0_R8 + xd*xd)/2.0_R8) - - !--- formats ---------------------------------------- - character(*),parameter :: F01 = "('(dice_flux_atmIce) ',a, i7,2x,d21.14)" - character(*),parameter :: subName = "(dice_flux_atmIce) " - !------------------------------------------------------------------------------- - - lsize = size(tbot) - - do n = 1,lsize - - if (mask(n) == 0) then - sen (n) = spval - lat (n) = spval - lwup (n) = spval - evap (n) = spval - taux (n) = spval - tauy (n) = spval - tref (n) = spval - qref (n) = spval - else - !--- define some needed variables --- - vmag = max(umin, sqrt(ubot(n)**2+vbot(n)**2)) - thvbot = thbot(n)*(1.0_R8 + loc_zvir * qbot(n)) ! virtual pot temp (K) - ssq = qsat (ts(n)) / rbot(n) ! sea surf hum (kg/kg) - dssqdt = dqsatdt(ts(n)) / rbot(n) ! deriv of ssq wrt Ts - delt = thbot(n) - ts(n) ! pot temp diff (K) - delq = qbot(n) - ssq ! spec hum dif (kg/kg) - alz = log(zbot(n)/zref) - cp = loc_cpdair*(1.0_R8 + loc_cpvir*ssq) - ltheat = loc_latvap + loc_latice - - !---------------------------------------------------------- - ! first estimate of Z/L and ustar, tstar and qstar - !---------------------------------------------------------- - - !--- neutral coefficients, z/L = 0.0 --- - rdn = loc_karman/log(zref/zzsice) - rhn = rdn - ren = rdn - - !--- ustar,tstar,qstar ---- - ustar = rdn * vmag - tstar = rhn * delt - qstar = ren * delq - - !--- compute stability & evaluate all stability functions --- - hol = loc_karman * loc_g * zbot(n) & - & * (tstar/thvbot+qstar/(1.0_R8/loc_zvir+qbot(n))) / ustar**2 - hol = sign( min(abs(hol),10.0_R8), hol ) - stable = 0.5_R8 + sign(0.5_R8 , hol) - xsq = max(sqrt(abs(1.0_R8 - 16.0_R8*hol)) , 1.0_R8) - xqq = sqrt(xsq) - psimh = -5.0_R8*hol*stable + (1.0_R8-stable)*psimhu(xqq) - psixh = -5.0_R8*hol*stable + (1.0_R8-stable)*psixhu(xqq) - - !--- shift all coeffs to measurement height and stability --- - rd = rdn / (1.0_R8+rdn/loc_karman*(alz-psimh)) - rh = rhn / (1.0_R8+rhn/loc_karman*(alz-psixh)) - re = ren / (1.0_R8+ren/loc_karman*(alz-psixh)) - - !--- update ustar, tstar, qstar w/ updated, shifted coeffs -- - ustar = rd * vmag - tstar = rh * delt - qstar = re * delq - - !---------------------------------------------------------- - ! iterate to converge on Z/L, ustar, tstar and qstar - !---------------------------------------------------------- - - !--- compute stability & evaluate all stability functions --- - hol = loc_karman * loc_g * zbot(n) & - & * (tstar/thvbot+qstar/(1.0_R8/loc_zvir+qbot(n))) / ustar**2 - hol = sign( min(abs(hol),10.0_R8), hol ) - stable = 0.5_R8 + sign(0.5_R8 , hol) - xsq = max(sqrt(abs(1.0_R8 - 16.0_R8*hol)) , 1.0_R8) - xqq = sqrt(xsq) - psimh = -5.0_R8*hol*stable + (1.0_R8-stable)*psimhu(xqq) - psixh = -5.0_R8*hol*stable + (1.0_R8-stable)*psixhu(xqq) - - !--- shift all coeffs to measurement height and stability --- - rd = rdn / (1.0_R8+rdn/loc_karman*(alz-psimh)) - rh = rhn / (1.0_R8+rhn/loc_karman*(alz-psixh)) - re = ren / (1.0_R8+ren/loc_karman*(alz-psixh)) - - !--- update ustar, tstar, qstar w/ updated, shifted coeffs -- - ustar = rd * vmag - tstar = rh * delt - qstar = re * delq - - !---------------------------------------------------------- - ! compute the fluxes - !---------------------------------------------------------- - - tau = rbot(n) * ustar * ustar - - !--- momentum flux --- - taux(n) = tau * ubot(n) / vmag - tauy(n) = tau * vbot(n) / vmag - - !--- heat flux --- - sen (n) = cp * tau * tstar / ustar - lat (n) = ltheat * tau * qstar / ustar - lwup(n) = -loc_stebol * ts(n)**4 - - !--- water flux --- - evap(n) = lat(n)/ltheat - - !---------------------------------------------------------- - ! compute diagnostic: 2m reference height temperature - !---------------------------------------------------------- - - !--- Compute function of exchange coefficients. Assume that - !--- cn = rdn*rdn, cm=rd*rd and ch=rh*rd, and therefore - !--- 1/sqrt(cn(n))=1/rdn and sqrt(cm(n))/ch(n)=1/rh - bn = loc_karman/rdn - bh = loc_karman/rh - - !--- Interpolation factor for stable and unstable cases - ln0 = log(1.0_R8 + (ztref/zbot(n))*(exp(bn) - 1.0_R8)) - ln3 = log(1.0_R8 + (ztref/zbot(n))*(exp(bn - bh) - 1.0_R8)) - fac = (ln0 - ztref/zbot(n)*(bn - bh))/bh * stable & - & + (ln0 - ln3)/bh * (1.0_R8-stable) - fac = min(max(fac,0.0_R8),1.0_R8) - - !--- actual interpolation - tref(n) = ts(n) + (tbot(n) - ts(n))*fac - qref(n) = qbot(n) - delq*fac - - endif - enddo - - if (dbug > 0) then - do n = 1,lsize - if (mask(n) /= 0) then - write(logunit, F01)'n,mask = ',n,mask(n) - write(logunit, F01)'n,zbot = ',n,zbot(n) - write(logunit, F01)'n,ubot = ',n,ubot(n) - write(logunit, F01)'n,vbot = ',n,vbot(n) - write(logunit, F01)'n,thbot = ',n,thbot(n) - write(logunit, F01)'n,qbot = ',n,qbot(n) - write(logunit, F01)'n,tbot = ',n,tbot(n) - write(logunit, F01)'n,ts = ',n,ts(n) - write(logunit, F01)'n,lat = ',n,lat(n) - write(logunit, F01)'n,sen = ',n,sen(n) - write(logunit, F01)'n,taux = ',n,taux(n) - write(logunit, F01)'n,taux = ',n,tauy(n) - write(logunit, F01)'n,lwup = ',n,lwup(n) - write(logunit, F01)'n,evap = ',n,evap(n) - write(logunit, F01)'n,tref = ',n,tref(n) - write(logunit, F01)'n,qref = ',n,qref(n) - end if - end do - end if - - end subroutine dice_flux_atmIce - -end module dice_flux_atmice_mod diff --git a/src/components/data_comps/dice/nuopc/dice_shr_mod.F90 b/src/components/data_comps/dice/nuopc/dice_shr_mod.F90 deleted file mode 100644 index 3074836cee9..00000000000 --- a/src/components/data_comps/dice/nuopc/dice_shr_mod.F90 +++ /dev/null @@ -1,152 +0,0 @@ -module dice_shr_mod - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dice_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! Note that model decomp will now come from reading in the mesh directly - - ! stream data type - type(shr_strdata_type), public :: SDICE - - ! input namelist variables - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - real(R8) , public :: flux_swpf ! short-wave penatration factor - real(R8) , public :: flux_Qmin ! bound on melt rate - logical , public :: flux_Qacc ! activates water accumulation/melt wrt Q - real(R8) , public :: flux_Qacc0 ! initial water accumulation value - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine dice_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, ice_present, ice_prognostic) - - ! !DESCRIPTION: Read in dice namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: filename ! input namelist filename - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(out) :: ice_present ! flag - logical , intent(out) :: ice_prognostic ! flag - - !--- local variables --- - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - character(CL) :: decomp ! decomp strategy - not used for NUOPC - but still needed in namelist for now - - !--- formats --- - character(*), parameter :: F00 = "('(dice_comp_init) ',8a)" - character(*), parameter :: F01 = "('(dice_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(dice_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(dice_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_dice_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / dice_nml / decomp, & - flux_swpf, flux_Qmin, flux_Qacc, flux_Qacc0, restfilm, restfils - - !---------------------------------------------------------------------------- - ! Read dice_in - !---------------------------------------------------------------------------- - - flux_swpf = 0.0_R8 ! no penetration - flux_Qmin = -300.0_R8 ! kg/s/m^2 - flux_Qacc = .false. ! no accumulation - flux_Qacc0 = 0.0_R8 ! no water - restfilm = trim(nullstr) - restfils = trim(nullstr) - - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=dice_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F02)' flux_swpf = ',flux_swpf - write(logunit,F02)' flux_Qmin = ',flux_Qmin - write(logunit,F06)' flux_Qacc = ',flux_Qacc - write(logunit,F02)' flux_Qacc0 = ',flux_Qacc0 - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - endif - - call shr_mpi_bcast(flux_swpf ,mpicom,'flux_swpf') - call shr_mpi_bcast(flux_Qmin ,mpicom,'flux_Qmin') - call shr_mpi_bcast(flux_Qacc ,mpicom,'flux_Qacc') - call shr_mpi_bcast(flux_Qacc0,mpicom,'flux_Qacc0') - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDICE,trim(filename),mpicom=mpicom) - - ! Validate mode - - datamode = trim(SDICE%dataMode) - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'SSTDATA' .or. & - trim(datamode) == 'COPYALL') then - if (my_task == master_task) then - write(logunit,F00) ' dice datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal dice datamode = ',trim(datamode) - call shr_sys_abort() - endif - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flag - !---------------------------------------------------------------------------- - - if (trim(datamode) == 'NULL') then - ice_present = .false. - ice_prognostic = .false. - else - ice_present = .true. - ice_prognostic = .true. - end if - - end subroutine dice_shr_read_namelists - -end module dice_shr_mod diff --git a/src/components/data_comps/dice/nuopc/ice_comp_nuopc.F90 b/src/components/data_comps/dice/nuopc/ice_comp_nuopc.F90 deleted file mode 100644 index 7f54b781504..00000000000 --- a/src/components/data_comps/dice/nuopc/ice_comp_nuopc.F90 +++ /dev/null @@ -1,598 +0,0 @@ -module ice_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for DICE - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_cal_mod , only : shr_cal_noleap, shr_cal_gregorian, shr_cal_ymd2date, shr_cal_ymd2julian - use shr_const_mod , only : SHR_CONST_SPVAL - use shr_sys_mod , only : shr_sys_abort - use shr_const_mod , only : shr_const_spval, shr_const_pi - use dshr_nuopc_mod , only : fld_list_type, fldsMax, dshr_realize - use dshr_nuopc_mod , only : ModelInitPhase, ModelSetRunClock, ModelSetMetaData - use dshr_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dshr_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dice_shr_mod , only : dice_shr_read_namelists - use dice_comp_mod , only : dice_comp_init, dice_comp_run, dice_comp_advertise - use dice_comp_mod , only : dice_comp_import, dice_comp_export - use perf_mod , only : t_startf, t_stopf, t_barrierf - - implicit none - private ! except - - public :: SetServices - - private :: InitializeAdvertise - private :: InitializeRealize - private :: ModelAdvance - private :: ModelFinalize - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CS) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - - integer :: fldsToIce_num = 0 - integer :: fldsFrIce_num = 0 - type (fld_list_type) :: fldsToIce(fldsMax) - type (fld_list_type) :: fldsFrIce(fldsMax) - - integer :: compid ! mct comp id - integer :: mpicom ! mpi communicator - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer, parameter :: master_task=0 ! task number of master task - logical :: read_restart ! start from restart - character(len=256) :: case_name ! case name - logical :: flds_i2o_per_cat ! .true. if select per ice thickness - ! category fields are passed from ice to ocean - character(len=80) :: calendar ! calendar name - integer :: modeldt ! integer timestep - logical :: use_esmf_metadata = .false. - real(R8) ,parameter :: pi = shr_const_pi ! pi - character(*),parameter :: modName = "(ice_comp_nuopc)" - integer, parameter :: debug_import = 0 ! if > 0 will diagnose import fields - integer, parameter :: debug_export = 0 ! if > 0 will diagnose export fields - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p1"/), userRoutine=InitializeAdvertise, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p3"/), userRoutine=InitializeRealize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, & - specRoutine=ModelAdvance, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, & - specRoutine=ModelSetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, & - specRoutine=ModelFinalize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - logical :: ice_present ! flag - logical :: ice_prognostic ! flag - type(ESMF_VM) :: vm - integer :: lmpicom - character(len=CL) :: cvalue - integer :: n - integer :: ierr ! error code - integer :: shrlogunit ! original log unit - character(len=CL) :: diro - character(len=CL) :: logfile - integer :: localPet - character(len=CL) :: fileName ! generic file name - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! generate local mpi comm - !---------------------------------------------------------------------------- - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, mpiCommunicator=lmpicom, localPet=localPet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call mpi_comm_dup(lmpicom, mpicom, ierr) - call mpi_comm_rank(mpicom, my_task, ierr) - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - inst_name = "ICE"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Read input namelists and set present and prognostic flags - !---------------------------------------------------------------------------- - - filename = "dice_in"//trim(inst_suffix) - call dice_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, ice_present, ice_prognostic) - - !-------------------------------- - ! Advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name='flds_i2o_per_cat', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) flds_i2o_per_cat - - call dice_comp_advertise(importstate, exportState, flds_scalar_name, & - ice_present, ice_prognostic, flds_i2o_per_cat, & - fldsFrIce_num, fldsFrIce, fldsToIce_num, fldsToIce, rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_Mesh) :: Emesh - type(ESMF_TIME) :: currTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_Calendar) :: esmf_calendar ! esmf calendar - type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - integer :: n ! index - character(len=256) :: cvalue ! tempoaray character string - integer :: shrlogunit ! original log unit - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_SPVAL ! single column lat - real(R8) :: scmLon = shr_const_SPVAL ! single column lon - integer :: current_ymd ! model date - integer :: current_year ! model year - integer :: current_mon ! model month - integer :: current_day ! model day - integer :: current_tod ! model sec into model date - real(R8) :: cosarg ! for setting ice temp pattern - real(R8) :: jday, jday0 ! elapsed day counters - logical :: write_restart - integer :: nxg, nyg - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! get config variables - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='case_name', value=case_name, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompAttributeGet(gcomp, name='scmlon', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmlon - - call NUOPC_CompAttributeGet(gcomp, name='scmlat', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmlat - - call NUOPC_CompAttributeGet(gcomp, name='single_column', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmMode - - call NUOPC_CompAttributeGet(gcomp, name='read_restart', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) read_restart - - call NUOPC_CompAttributeGet(gcomp, name='flds_i2o_per_cat', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) flds_i2o_per_cat ! module variable - - call NUOPC_CompAttributeGet(gcomp, name='MCTID', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) compid - - !---------------------------------------------------------------------------- - ! Determine calendar info - !---------------------------------------------------------------------------- - - call ESMF_ClockGet( clock, calkindflag=esmf_caltype, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (esmf_caltype == ESMF_CALKIND_NOLEAP) then - calendar = shr_cal_noleap - else if (esmf_caltype == ESMF_CALKIND_GREGORIAN) then - calendar = shr_cal_gregorian - else - call ESMF_LogWrite(subname//" ERROR bad ESMF calendar name "//trim(calendar), & - ESMF_LOGMSG_ERROR) - rc = ESMF_Failure - return - end if - - !-------------------------------- - ! Generate the mesh - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='mesh_ice', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - Emesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (my_task == master_task) then - write(logunit,*) " obtaining dice mesh from " // trim(cvalue) - end if - - !-------------------------------- - ! Initialize model - !-------------------------------- - - call dice_comp_init(flds_i2o_per_cat, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon, calendar, Emesh, nxg, nyg) - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call dshr_realize( & - state=ExportState, & - fldList=fldsFrIce, & - numflds=fldsFrIce_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':diceExport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_realize( & - state=importState, & - fldList=fldsToIce, & - numflds=fldsToIce_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':diceImport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Set initial ice state and pack export state - !---------------------------------------------------------------------------- - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_TimeGet( currTime, yy=current_year, mm=current_mon, dd=current_day, s=current_tod, & - calkindflag=esmf_caltype, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(current_year, current_mon, current_day, current_ymd) - - call ESMF_TimeIntervalGet( timeStep, s=modeldt, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call shr_cal_ymd2julian(0, current_mon, current_day, current_tod, jDay , calendar) ! julian day for model - call shr_cal_ymd2julian(0, 9, 1, 0, jDay0, calendar) ! julian day for Sept 1 - cosArg = 2.0_R8*pi*(jday - jday0)/365.0_R8 - - write_restart = .false. - call dice_comp_run(flds_i2o_per_cat, mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - calendar, modeldt, current_ymd, current_tod, cosArg) - - ! Pack export state - call dice_comp_export(exportState, flds_i2o_per_cat, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (debug_export > 0) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - if (use_esmf_metadata) then - call ModelSetMetaData(gcomp, name='DICE', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_Alarm) :: alarm - type(ESMF_Time) :: currTime, nextTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_State) :: importState, exportState - integer :: shrlogunit ! original log unit - logical :: write_restart ! restart alarm is ringing - logical :: read_restart ! read restart flag - integer :: next_ymd ! model date - integer :: next_tod ! model sec into model date - integer :: yr ! year - integer :: mon ! month - integer :: day ! day in month - real(R8) :: cosarg ! for setting ice temp pattern - real(R8) :: jday, jday0 ! elapsed day counters - character(len=CL) :: cvalue - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - call memcheck(subname, 5, my_task==master_task) - - !-------------------------------- - ! Reset shr logging to my log file - !-------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! query the Component for its clock, importState and exportState - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, importState=importState, exportState=exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Unpack import state - !-------------------------------- - - call dice_comp_import(importState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Run model - !-------------------------------- - - ! Determine if will write restart - - call ESMF_ClockGetAlarm(clock, alarmname='alarm_restart', alarm=alarm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (ESMF_AlarmIsRinging(alarm, rc=rc)) then - if (ChkErr(rc,__LINE__,u_FILE_u)) return - write_restart = .true. - call ESMF_AlarmRingerOff( alarm, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - write_restart = .false. - endif - - ! For nuopc - the component clock is advanced at the end of the time interval - ! For these to match for now - need to advance nuopc one timestep ahead for - ! shr_strdata time interpolation - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - nextTime = currTime + timeStep - call ESMF_TimeGet( nextTime, yy=yr, mm=mon, dd=day, s=next_tod, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(yr, mon, day, next_ymd) - - call shr_cal_ymd2julian(0, mon, day, next_tod, jDay , calendar) ! julian day for model - call shr_cal_ymd2julian(0, 9, 1, 0, jDay0, calendar) ! julian day for Sept 1 - cosArg = 2.0_R8*pi*(jday - jday0)/365.0_R8 - - ! Run dice - - read_restart = .false. - call dice_comp_run(flds_i2o_per_cat, mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - calendar, modeldt, next_ymd, next_tod, cosArg, case_name) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call dice_comp_export(exportState, flds_i2o_per_cat, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (debug_export > 0) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (my_task == master_task) then - call log_clock_advance(clock, 'DICE', logunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - endif - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(*), parameter :: F00 = "('(dice_comp_final) ',8a)" - character(*), parameter :: F91 = "('(dice_comp_final) ',73('-'))" - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) ' dice: end of main integration loop' - write(logunit,F91) - end if - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine ModelFinalize - -end module ice_comp_nuopc diff --git a/src/components/data_comps/dlnd/cime_config/buildlib b/src/components/data_comps/dlnd/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/data_comps/dlnd/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/data_comps/dlnd/cime_config/buildnml b/src/components/data_comps/dlnd/cime_config/buildnml deleted file mode 100755 index 59a593d7daa..00000000000 --- a/src/components/data_comps/dlnd/cime_config/buildnml +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env python - -"""Namelist creator for CIME's data land model. -""" - -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position - -import os, sys, glob - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.case import Case -from CIME.nmlgen import NamelistGenerator -from CIME.utils import expect, safe_copy -from CIME.XML.files import Files -from CIME.buildnml import create_namelist_infile, parse_input - -logger = logging.getLogger(__name__) - -# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements -#################################################################################### -def _create_namelists(case, confdir, inst_string, infile, nmlgen, data_list_path): -#################################################################################### - """Write out the namelist for this component. - - Most arguments are the same as those for `NamelistGenerator`. The - `inst_string` argument is used as a suffix to distinguish files for - different instances. The `confdir` argument is used to specify the directory - in which output files will be placed. - """ - - #---------------------------------------------------- - # Get a bunch of information from the case. - #---------------------------------------------------- - dlnd_mode = case.get_value("DLND_MODE") - lnd_grid = case.get_value("LND_GRID") - glc_nec = case.get_value("GLC_NEC") - - #---------------------------------------------------- - # Check for incompatible options. - #---------------------------------------------------- - expect(lnd_grid != "null", - "LND_GRID cannot be null") - expect(dlnd_mode != "NULL", - "DLND_MODE cannot be NULL") - - #---------------------------------------------------- - # Log some settings. - #---------------------------------------------------- - logger.debug("DLND mode is {}".format(dlnd_mode)) - logger.debug("DLND grid is {}".format(lnd_grid)) - logger.debug("DLND glc_nec is {}".format(glc_nec)) - - #---------------------------------------------------- - # Create configuration information. - #---------------------------------------------------- - config = {} - config['lnd_grid'] = lnd_grid - config['dlnd_mode'] = dlnd_mode - - #---------------------------------------------------- - # Initialize namelist defaults - #---------------------------------------------------- - nmlgen.init_defaults(infile, config) - - #---------------------------------------------------- - # Construct the list of streams. - #---------------------------------------------------- - streams = nmlgen.get_streams() - - #---------------------------------------------------- - # For each stream, create stream text file and update - # shr_strdata_nml group and input data list. - #---------------------------------------------------- - for stream in streams: - # Ignore null values. - if stream is None or stream in ("NULL", ""): - continue - - inst_stream = stream + inst_string - logger.debug("DLND stream is {}".format(inst_stream)) - stream_path = os.path.join(confdir, "dlnd.streams.txt." + inst_stream) - user_stream_path = os.path.join(case.get_case_root(), - "user_dlnd.streams.txt." + inst_stream) - - # Use the user's stream file, or create one if necessary. - if os.path.exists(user_stream_path): - safe_copy(user_stream_path, stream_path) - config['stream'] = stream - nmlgen.update_shr_strdata_nml(config, stream, stream_path) - else: - nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) - - #---------------------------------------------------- - # Create `shr_strdata_nml` namelist group. - #---------------------------------------------------- - # set per-stream variables - nmlgen.create_shr_strdata_nml() - - # Determine model domain filename (in dlnd_in) - if "CPLHIST" in dlnd_mode: - dlnd_cplhist_domain_file = case.get_value("DLND_CPLHIST_DOMAIN_FILE") - if dlnd_cplhist_domain_file == 'null': - logger.info(" .... Obtaining DLND model domain info from first stream file: {}".format(streams[0])) - else: - logger.info(" .... Obtaining DLND model domain info from stream {}".format(streams[0])) - nmlgen.add_default("domainfile", value=dlnd_cplhist_domain_file) - else: - lnd_domain_file = case.get_value("LND_DOMAIN_FILE") - lnd_domain_path = case.get_value("LND_DOMAIN_PATH") - if lnd_domain_file != "UNSET": - full_domain_path = os.path.join(lnd_domain_path, lnd_domain_file) - nmlgen.add_default("domainfile", value=full_domain_path) - else: - nmlgen.add_default("domainfile", "null") - - #---------------------------------------------------- - # Finally, write out all the namelists. - #---------------------------------------------------- - namelist_file = os.path.join(confdir, "dlnd_in") - nmlgen.write_output_file(namelist_file, data_list_path, groups=['dlnd_nml','shr_strdata_nml']) - -############################################################################### -def buildnml(case, caseroot, compname): -############################################################################### - - # Build the component namelist and required stream txt files - - if compname != "dlnd": - raise AttributeError - - rundir = case.get_value("RUNDIR") - ninst = case.get_value("NINST_LND") - if ninst is None: - ninst = case.get_value("NINST") - confdir = os.path.join(caseroot,"Buildconf",compname + "conf") - if not os.path.isdir(confdir): - os.makedirs(confdir) - - #---------------------------------------------------- - # Construct the namelist generator - #---------------------------------------------------- - # determine directory for user modified namelist_definitions.xml - user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) - expect (os.path.isdir(user_xml_dir), - "user_xml_dir {} does not exist ".format(user_xml_dir)) - - # NOTE: User definition *replaces* existing definition. - files = Files() - definition_file = [files.get_value("NAMELIST_DEFINITION_FILE", {"component":"dlnd"})] - - user_definition = os.path.join(user_xml_dir, "namelist_definition_dlnd.xml") - if os.path.isfile(user_definition): - definition_file = [user_definition] - for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) - - # Create the namelist generator object - independent of instance - nmlgen = NamelistGenerator(case, definition_file, files=files) - #---------------------------------------------------- - # Clear out old data. - #---------------------------------------------------- - data_list_path = os.path.join(case.get_case_root(), "Buildconf", - "dlnd.input_data_list") - if os.path.exists(data_list_path): - os.remove(data_list_path) - - #---------------------------------------------------- - # Loop over instances - #---------------------------------------------------- - for inst_counter in range(1, ninst+1): - # determine instance string - inst_string = "" - if ninst > 1: - inst_string = '_' + "{:04d}".format(inst_counter) - - # If multi-instance case does not have restart file, use - # single-case restart for each instance - rpointer = "rpointer." + compname - if (os.path.isfile(os.path.join(rundir,rpointer)) and - (not os.path.isfile(os.path.join(rundir,rpointer + inst_string)))): - safe_copy(os.path.join(rundir, rpointer), - os.path.join(rundir, rpointer + inst_string)) - - inst_string_label = inst_string - if not inst_string_label: - inst_string_label = "\"\"" - - # create namelist output infile using user_nl_file as input - user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) - expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file {} ".format(user_nl_file)) - infile = os.path.join(confdir, "namelist_infile") - create_namelist_infile(case, user_nl_file, infile) - namelist_infile = [infile] - - # create namelist and stream file(s) data component - _create_namelists(case, confdir, inst_string, namelist_infile, nmlgen, data_list_path) - - # copy namelist files and stream text files, to rundir - if os.path.isdir(rundir): - filename = compname + "_in" - file_src = os.path.join(confdir, filename) - file_dest = os.path.join(rundir, filename) - if inst_string: - file_dest += inst_string - safe_copy(file_src,file_dest) - - for txtfile in glob.glob(os.path.join(confdir, "*txt*")): - safe_copy(txtfile, rundir) - -############################################################################### -def _main_func(): - # Build the component namelist and required stream txt files - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "dlnd") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/data_comps/dlnd/cime_config/config_archive.xml b/src/components/data_comps/dlnd/cime_config/config_archive.xml deleted file mode 100644 index ae756e7a002..00000000000 --- a/src/components/data_comps/dlnd/cime_config/config_archive.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - r - unset - - rpointer.lnd$NINST_STRING - $CASE.dlnd$NINST_STRING.r.$DATENAME.nc,$CASE.dlnd$NINST_STRING.rs1.$DATENAME.bin - - - diff --git a/src/components/data_comps/dlnd/cime_config/config_component.xml b/src/components/data_comps/dlnd/cime_config/config_component.xml deleted file mode 100644 index 044a52ed9ee..00000000000 --- a/src/components/data_comps/dlnd/cime_config/config_component.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - - Data land model (DLND) - Null mode - snow coupling mode - non-snow coupling - - - - char - dlnd - dlnd - case_comp - env_case.xml - Name of land component - - - - char - CPLHIST,GLC_CPLHIST,NULL - NULL - - NULL - CPLHIST - GLC_CPLHIST - - run_component_dlnd - env_run.xml - DLND mode. In CPLHIST mode, land forcing data (produced by CLM) from a previous - model run is output in coupler history files and read in by the data land model. IN GLC_CPLHIST, - glc coupling fields are read in from a coupler history file. In NULL mode, land forcing is - set to zero and not utilized. The default is NULL. - - - - char - - null - run_component_dlnd - env_run.xml - - Full pathname for domain file for dlnd when DLND_MODE is CPLHIST - or GLC_CPLHIST. NOTE: if this is set to 'null' (the default), - then domain information is read in from the first coupler - history file in the target stream and it is assumed that the - first coupler stream file that is pointed to contains the domain - information for that stream. - - - - - char - UNSET - - $DIN_LOC_ROOT/lnd/dlnd7/CPLHIST_SNO/i.e20.I1850Clm50Sp.f09_g17.001_c180502 - - run_component_dlnd - env_run.xml - directory for coupler history data mode (only used for CPLHIST mode) - - - - char - UNSET - - i.e20.I1850Clm50Sp.f09_g17.001 - - run_component_dlnd - env_run.xml - case name for coupler history data mode (only used for CPLHIST mode) - - - - integer - -999 - - 1 - - run_component_dlnd - env_run.xml - - Simulation year corresponding to DLND_CPLHIST_YR_START (only used - when DLND_MODE is CPLHIST or GLC_CPLHIST). A common usage is to - set this to RUN_STARTDATE. With this setting, the forcing in the - first year of the run will be the forcing of year - DLND_CPLHIST_YR_START. Another use case is to align the calendar - of transient forcing with the model calendar. For example, setting - DLND_CPLHIST_YR_ALIGN=DLND_CPLHIST_YR_START will lead to the - forcing calendar being the same as the model calendar. The forcing - for a given model year would be the forcing of the same year. This - would be appropriate in transient runs where the model calendar is - setup to span the same year range as the forcing data. - - - - - integer - -999 - - 1 - - run_component_dlnd - env_run.xml - starting year to loop data over (only used when DLND_MODE is CPLHIST or GLC_CPLHIST) - - - - integer - -999 - - 30 - - run_component_dlnd - env_run.xml - ending year to loop data over (only used when DLND_MODE is CPLHIST or GLC_CPLHIST) - - - - ========================================= - DLND naming conventions - ========================================= - - - diff --git a/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml b/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml deleted file mode 100644 index 38f9efc11c4..00000000000 --- a/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml +++ /dev/null @@ -1,541 +0,0 @@ - - - - - - - - - - - - - - char(100) - streams - streams_file - List of streams used for the given datm_mode. - - NULL - lnd.cplhist - sno.cplhist - - - - - char - streams - streams_file - Stream domain file directory. - - null - - - - - char - streams - streams_file - Stream domain file path(s). - - null - - - - - char - streams - streams_file - Stream domain variable name(s). - - - time time - doml_lon lon - doml_lat lat - doml_aream area - doml_mask mask - - - - - - char - streams - streams_file - Stream data file directory. - - $DLND_CPLHIST_DIR - - - - - char - streams - streams_file - Stream data file path(s). - - $DLND_CPLHIST_CASE.cpl.hl2x1yr_glc.%y-01-01.nc - - - - - char - streams - streams_file - Stream data variable name(s). - - - l2x1yr_glc_Sl_tsrf%glc tsrf%glc - l2x1yr_glc_Sl_topo%glc topo%glc - l2x1yr_glc_Flgl_qice%glc qice%glc - - - - - - integer - streams - streams_file - Stream offset. - - 0 - - - - - integer - streams - streams_file - Simulation year to align stream to. - - $DLND_CPLHIST_YR_ALIGN - - - - - integer - streams - streams_file - First year of stream. - - $DLND_CPLHIST_YR_START - - - - - integer - streams - streams_file - Last year of stream. - - $DLND_CPLHIST_YR_END - - - - - - - - - - - - - char - streams - shr_strdata_nml - NULL,COPYALL - - datamode = "NULL" - NULL is always a valid option and means no data will be generated. - Turns off the data model as a provider of data to the coupler. The - lnd_present flag will be set to false and the coupler will assume no - exchange of data to or from the data model. - dataMode = "COPYALL" - Copies all fields directly from the input data streams Any required - fields not found on an input stream will be set to zero. - Set by the DLND_MODE xml variable in env_run.xml - - - NULL - COPYALL - COPYALL - - - - - char - streams - abs - shr_strdata_nml - - spatial gridfile associated with the strdata. grid information will - be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - - - null - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are just copy (ie. no fill), special value, - nearest neighbor, nearest neighbor in "i" direction, or nearest - neighbor in "j" direction. - valid values: 'copy','spval','nn','nnoni','nnonj' - - - nn - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - plays no role is fill algorithm at the present time. - - - nomask - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read in instead of computing the - weights on the fly for the fill operation. if this is set, fillalgo - and fillmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the fill operation. this allows a user to - save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - array (up to 30 elements) of masking algorithms for mapping input data - associated with the array of streams. valid options are map only from - valid src points, map only to valid destination points, ignore all - masks, map only from valid src points to valid destination points. - - - dstmask - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are copy by index, set to special value, - nearest neighbor, nearest neighbor in "i" direction, nearest neighbor - in "j" direction, or bilinear. - valid values: copy,spval,nn,nnoni,nnonj,bilinear - - - bilinear - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read instead of computing - weights on the fly for the mapping (interpolation) operation. if this - is set, mapalgo and mapmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the mapping (interpolation) operation. this - allows a user to save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - coszen,nearest,linear,lower,upper - - array (up to 30 elements) of time interpolation options associated with the array of - streams. - valid values: lower,upper,nearest,linear,coszen - lower = Use lower time-value - upper = Use upper time-value - nearest = Use the nearest time-value - linear = Linearly interpolate between the two time-values - coszen = Scale according to the cosine of the solar zenith angle (for solar) - - - linear - lower - - - - - char(30) - streams - shr_strdata_nml - extend,cycle,limit - - array of time axis modes associated with the array of streams for - handling data outside the specified stream time axis. - valid options are to cycle the data based on the first, last, and - align settings associated with the stream dataset, to extend the first - and last valid value indefinitely, or to limit the interpolated data - to fall only between the least and greatest valid value of the time array. - valid values: cycle,extend,limit - extend = extrapolate before and after the period by using the first or last value. - cycle = cycle between the range of data - limit = restrict to the period for which the data is valid - - - cycle - - - - - char(30) - streams - shr_strdata_nml - single,full_file - - array (up to 30 elements) of reading mode associated with the array of - streams. specifies the mode of reading temporal stream dataset. - valid options are "single" (read temporal dataset one at a time) or - "full_file" (read all entires of temporal dataset in a given netcdf file) - valid values: single,full_file - - - single - - - - - real(30) - streams - shr_strdata_nml - - array (up to 30 elements) of delta time ratio limits placed on the - time interpolation associated with the array of streams. this real - value causes the model to stop if the ratio of the running maximum - delta time divided by the minimum delta time is greater than the - dtlimit for that stream. for instance, with daily data, the delta - time should be exactly one day throughout the dataset and the computed - maximum divided by minimum delta time should always be 1.0. for - monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. the running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - - - 1.5e0 - - - - - char - streams - shr_strdata_nml - - list of paired colon delimited field names that should be treated as - vectors when carrying out spatial interpolation. unlike other - character arrays in this namelist, this array is completely decoupled - from the list of streams. this is a list of vector pairs that span - all input streams where different fields of the vector pair could - appear in different streams. - for example, vectors = 'u:v','taux:tauy'. - - - null - - - - - char(30) - streams - shr_strdata_nml - - character array (up to 30 elements) of stream input files. this - string is actually parsed by a stream method and so the format is - specified by the stream module. this string consists of a - "stream_input_filename year_align year_first year_last". the - stream_input_filename is a stream text input file and the format and - options are described elsewhere. year_align, year_first, and - year_last provide information about the time axis of the file and how - to relate the input time axis to the model time axis. - - - - - - - - - - - - char - dlnd - dlnd_nml - 1d,root - - DLND Decomposition strategy - 1d = Vector decomposition - root = run only on the master task - - - 1d - - - - - char - dlnd - dlnd_nml - - Master restart file name for dlnd model - - - undefined - - - - - char - dlnd - dlnd_nml - - Stream restart file name for dlnd model, needed for branch simulations - - - undefined - - - - - logical - dlnd - dlnd_nml - If TRUE, prognostic is forced to true. - - .false. - - - - - char - dlnd - dlnd_nml - - Name of the fraction field on the first stream file (i.e., the - file from which domain information is read.) - - - doml_frac - - - - diff --git a/src/components/data_comps/dlnd/cime_config/user_nl_dlnd b/src/components/data_comps/dlnd/cime_config/user_nl_dlnd deleted file mode 100644 index 69b4ba55b0e..00000000000 --- a/src/components/data_comps/dlnd/cime_config/user_nl_dlnd +++ /dev/null @@ -1,13 +0,0 @@ -!------------------------------------------------------------------------ -! Users should ONLY USE user_nl_dlnd to change namelists variables -! Users should add all user specific namelist changes below in the form of -! namelist_var = new_namelist_value -! Note that any namelist variable from shr_strdata_nml and dlnd_nml can -! be modified below using the above syntax -! User preview_namelists to view (not modify) the output namelist in the -! directory $CASEROOT/CaseDocs -! To modify the contents of a stream txt file, first use preview_namelists -! to obtain the contents of the stream txt files in CaseDocs, and then -! place a copy of the modified stream txt file in $CASEROOT with the string -! user_ prepended. -!------------------------------------------------------------------------ diff --git a/src/components/data_comps/dlnd/mct/dlnd_comp_mod.F90 b/src/components/data_comps/dlnd/mct/dlnd_comp_mod.F90 deleted file mode 100644 index b24bdd96c38..00000000000 --- a/src/components/data_comps/dlnd/mct/dlnd_comp_mod.F90 +++ /dev/null @@ -1,487 +0,0 @@ -#ifdef AIX -@PROCESS ALIAS_SIZE(805306368) -#endif -module dlnd_comp_mod - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use shr_pcdf_mod - use shr_sys_mod - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only: shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only: shr_mpi_bcast - use shr_strdata_mod , only: shr_strdata_type, shr_strdata_pioinit, shr_strdata_init - use shr_strdata_mod , only: shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only: shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only: shr_dmodel_gsmapcreate, shr_dmodel_rearrGGrid - use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV - use shr_cal_mod , only: shr_cal_ymdtod2string - use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - use glc_elevclass_mod , only: glc_get_num_elevation_classes, glc_elevclass_as_string - - use dlnd_shr_mod , only: datamode ! namelist input - use dlnd_shr_mod , only: decomp ! namelist input - use dlnd_shr_mod , only: rest_file ! namelist input - use dlnd_shr_mod , only: rest_file_strm ! namelist input - use dlnd_shr_mod , only: domain_fracname ! namelist input - use dlnd_shr_mod , only: nullstr - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dlnd_comp_init - public :: dlnd_comp_run - public :: dlnd_comp_final - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - !--- other --- - character(CS) :: myModelName = 'lnd' ! user defined model name - character(len=*),parameter :: rpfile = 'rpointer.lnd' - type(mct_rearr) :: rearr - - !-------------------------------------------------------------------------- - !--- names of fields --- - integer(IN),parameter :: fld_len = 12 ! max character length of fields in avofld & avifld - integer(IN),parameter :: nflds_nosnow = 22 - - ! fields other than snow fields: - character(fld_len),parameter :: avofld_nosnow(1:nflds_nosnow) = & - (/ "Sl_t ","Sl_tref ","Sl_qref ","Sl_avsdr ","Sl_anidr ", & - "Sl_avsdf ","Sl_anidf ","Sl_snowh ","Fall_taux ","Fall_tauy ", & - "Fall_lat ","Fall_sen ","Fall_lwup ","Fall_evap ","Fall_swnet ", & - "Sl_landfrac ","Sl_fv ","Sl_ram1 ", & - "Fall_flxdst1","Fall_flxdst2","Fall_flxdst3","Fall_flxdst4" /) - - character(fld_len),parameter :: avifld_nosnow(1:nflds_nosnow) = & - (/ "t ","tref ","qref ","avsdr ","anidr ", & - "avsdf ","anidf ","snowh ","taux ","tauy ", & - "lat ","sen ","lwup ","evap ","swnet ", & - "lfrac ","fv ","ram1 ", & - "flddst1 ","flxdst2 ","flxdst3 ","flxdst4 " /) - - integer(IN), parameter :: nflds_snow = 3 ! number of snow fields in each elevation class - integer(IN), parameter :: nec_len = 2 ! length of elevation class index in field names - ! for these snow fields, the actual field names will have the elevation class index at - ! the end (e.g., Sl_tsrf01, tsrf01) - - character(fld_len-nec_len),parameter :: avofld_snow(nflds_snow) = & - (/"Sl_tsrf ", "Sl_topo ", "Flgl_qice"/) - - character(fld_len-nec_len),parameter :: avifld_snow(nflds_snow) = & - (/"tsrf", "topo", "qice"/) - - ! all fields: - character(fld_len),dimension(:),allocatable :: avofld - character(fld_len),dimension(:),allocatable :: avifld - !-------------------------------------------------------------------------- - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine dlnd_comp_init(Eclock, x2l, l2x, & - seq_flds_x2l_fields, seq_flds_l2x_fields, & - SDLND, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon) - - ! !DESCRIPTION: initialize dlnd model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2l, l2x ! input/output attribute vectors - character(len=*) , intent(in) :: seq_flds_x2l_fields ! fields from mediator - character(len=*) , intent(in) :: seq_flds_l2x_fields ! fields to mediator - type(shr_strdata_type) , intent(inout) :: SDLND ! model shr_strdata instance (output) - type(mct_gsMap) , pointer :: gsMap ! model global seg map (output) - type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: scmMode ! single column mode - real(R8) , intent(in) :: scmLat ! single column lat - real(R8) , intent(in) :: scmLon ! single column lon - - !--- local variables --- - integer(IN) :: n,k ! generic counters - integer(IN) :: ierr ! error code - integer(IN) :: lsize ! local size - logical :: exists ! file existance - integer(IN) :: nu ! unit number - character(CL) :: calendar ! model calendar - integer(IN) :: glc_nec ! number of elevation classes - integer(IN) :: nflds_glc_nec ! number of snow fields separated by elev class - integer(IN) :: field_num ! field number - character(nec_len) :: nec_str ! elevation class, as character string - - !--- formats --- - character(*), parameter :: F00 = "('(dlnd_comp_init) ',8a)" - character(*), parameter :: F0L = "('(dlnd_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(dlnd_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(dlnd_comp_init) ',a,4es13.6)" - character(*), parameter :: F03 = "('(dlnd_comp_init) ',a,i8,a)" - character(*), parameter :: F05 = "('(dlnd_comp_init) ',a,2f10.4)" - character(*), parameter :: F90 = "('(dlnd_comp_init) ',73('='))" - character(*), parameter :: F91 = "('(dlnd_comp_init) ',73('-'))" - character(*), parameter :: subName = "(dlnd_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DLND_INIT') - - !---------------------------------------------------------------------------- - ! Initialize pio - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDLND, COMPID) - - !---------------------------------------------------------------------------- - ! Initialize SDLND - !---------------------------------------------------------------------------- - - call t_startf('dlnd_strdata_init') - - call seq_timemgr_EClockGetData( EClock, calendar=calendar ) - - ! NOTE: shr_strdata_init calls shr_dmodel_readgrid which reads the data model - ! grid and from that computes SDLND%gsmap and SDLND%ggrid. DLND%gsmap is created - ! using the decomp '2d1d' (1d decomp of 2d grid) - - if (scmmode) then - if (my_task == master_task) & - write(logunit,F05) ' scm lon lat = ',scmlon,scmlat - call shr_strdata_init(SDLND,mpicom,compid,name='lnd', & - scmmode=scmmode,scmlon=scmlon,scmlat=scmlat, calendar=calendar, & - dmodel_domain_fracname_from_stream=domain_fracname) - else - call shr_strdata_init(SDLND,mpicom,compid,name='lnd', calendar=calendar, & - dmodel_domain_fracname_from_stream=domain_fracname) - endif - - if (my_task == master_task) then - call shr_strdata_print(SDLND,'SDLND data') - endif - - call t_stopf('dlnd_strdata_init') - - !---------------------------------------------------------------------------- - ! Build avofld & avifld - !---------------------------------------------------------------------------- - - glc_nec = glc_get_num_elevation_classes() - if (glc_nec == 0) then - nflds_glc_nec = 0 - else - nflds_glc_nec = (glc_nec+1)*nflds_snow - end if - - ! Start with non-snow fields - allocate(avofld(nflds_nosnow + nflds_glc_nec)) - allocate(avifld(nflds_nosnow + nflds_glc_nec)) - avofld(1:nflds_nosnow) = avofld_nosnow - avifld(1:nflds_nosnow) = avifld_nosnow - field_num = nflds_nosnow - - ! Append each snow field - if (glc_nec > 0) then - do k = 1, nflds_snow - do n = 0, glc_nec - ! nec_str will be something like '02' or '10' - nec_str = glc_elevclass_as_string(n) - - field_num = field_num + 1 - avofld(field_num) = trim(avofld_snow(k))//nec_str - avifld(field_num) = trim(avifld_snow(k))//nec_str - end do - end do - end if - - !---------------------------------------------------------------------------- - ! Initialize MCT global seg map, 1d decomp - !---------------------------------------------------------------------------- - - call t_startf('dlnd_initgsmaps') - if (my_task == master_task) write(logunit,F00) ' initialize gsmaps' - call shr_sys_flush(logunit) - - ! create a data model global seqmap (gsmap) given the data model global grid sizes - ! NOTE: gsmap is initialized using the decomp read in from the dlnd_in namelist - ! (which by default is "1d") - call shr_dmodel_gsmapcreate(gsmap,SDLND%nxg*SDLND%nyg,compid,mpicom,decomp) - lsize = mct_gsmap_lsize(gsmap,mpicom) - - ! create a rearranger from the data model DLND%gsmap to gsmap - call mct_rearr_init(SDLND%gsmap, gsmap, mpicom, rearr) - - call t_stopf('dlnd_initgsmaps') - - !---------------------------------------------------------------------------- - ! Initialize MCT domain - !---------------------------------------------------------------------------- - - call t_startf('dlnd_initmctdom') - if (my_task == master_task) write(logunit,F00) 'copy domains' - call shr_sys_flush(logunit) - - call shr_dmodel_rearrGGrid(SDLND%grid, ggrid, gsmap, rearr, mpicom) - - call t_stopf('dlnd_initmctdom') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('dlnd_initmctavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - call shr_sys_flush(logunit) - - call mct_aVect_init(l2x, rList=seq_flds_l2x_fields, lsize=lsize) - call mct_aVect_zero(l2x) - - call mct_aVect_init(x2l, rList=seq_flds_x2l_fields, lsize=lsize) - call mct_aVect_zero(x2l) - - call t_stopf('dlnd_initmctavs') - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - if (trim(rest_file) == trim(nullstr) .and. trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer' - call shr_sys_flush(logunit) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (.not.exists) then - write(logunit,F00) ' ERROR: rpointer file does not exist' - call shr_sys_abort(trim(subname)//' ERROR: rpointer file missing') - endif - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - call shr_sys_flush(logunit) - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - call shr_mpi_bcast(exists,mpicom,'exists') - !if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) - !call shr_pcdf_readwrite('read',trim(rest_file),mpicom,gsmap,rf1=somtp,rf1n='somtp') - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDLND,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - call shr_sys_flush(logunit) - endif - - !---------------------------------------------------------------------------- - ! Set initial lnd state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - call dlnd_comp_run(EClock, x2l, l2x, & - SDLND, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit) - call t_adj_detailf(-2) - - if (my_task == master_task) write(logunit,F00) 'dlnd_comp_init done' - call shr_sys_flush(logunit) - - call t_stopf('DLND_INIT') - - end subroutine dlnd_comp_init - - !=============================================================================== - subroutine dlnd_comp_run(EClock, x2l, l2x, & - SDLND, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, case_name) - - ! !DESCRIPTION: run method for dlnd model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2l - type(mct_aVect) , intent(inout) :: l2x - type(shr_strdata_type) , intent(inout) :: SDLND - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer(IN) , intent(in) :: logunit ! logging unit number - character(CL) , intent(in), optional :: case_name ! case name - - !--- local --- - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: yy,mm,dd ! year month day - integer(IN) :: n ! indices - integer(IN) :: idt ! integer timestep - real(R8) :: dt ! timestep - integer(IN) :: nu ! unit number - logical :: write_restart ! restart now - character(len=18) :: date_str - - character(*), parameter :: F00 = "('(dlnd_comp_run) ',8a)" - character(*), parameter :: F04 = "('(dlnd_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(dlnd_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DLND_RUN') - - call t_startf('dlnd_run1') - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, curr_yr=yy, curr_mon=mm, curr_day=dd) - write_restart = seq_timemgr_RestartAlarmIsOn(EClock) - call t_stopf('dlnd_run1') - - !-------------------- - ! UNPACK - !-------------------- - - call t_startf('dlnd_unpack') - ! Nothing to be done for now - call t_stopf('dlnd_unpack') - - !-------------------- - ! ADVANCE LAND - !-------------------- - - call t_barrierf('dlnd_BARRIER',mpicom) - call t_startf('dlnd') - - call t_startf('dlnd_strdata_advance') - call shr_strdata_advance(SDLND,currentYMD,currentTOD,mpicom,'dlnd') - call t_stopf('dlnd_strdata_advance') - - call t_barrierf('dlnd_scatter_BARRIER',mpicom) - call t_startf('dlnd_scatter') - do n = 1,SDLND%nstreams - call shr_dmodel_translateAV(SDLND%avs(n), l2x, avifld, avofld, rearr) - enddo - call t_stopf('dlnd_scatter') - - call t_stopf('dlnd') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('dlnd_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - end select - call t_stopf('dlnd_datamode') - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('dlnd_restart') - call shr_cal_ymdtod2string(date_str, yy,mm,dd,currentTOD) - write(rest_file,"(6a)") & - trim(case_name), '.dlnd',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.dlnd',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - !if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file),currentYMD,currentTOD - !call shr_pcdf_readwrite('write',trim(rest_file),mpicom,gsmap,clobber=.true., & - ! rf1=somtp,rf1n='somtp') - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),currentYMD,currentTOD - call shr_strdata_restWrite(trim(rest_file_strm),SDLND,mpicom,trim(case_name),'SDLND strdata') - call shr_sys_flush(logunit) - call t_stopf('dlnd_restart') - endif - - !---------------------------------------------------------------------------- - ! Log output for model date - !---------------------------------------------------------------------------- - - call t_startf('dlnd_run2') - if (my_task == master_task) then - write(logunit,F04) trim(myModelName),': model date ', CurrentYMD,CurrentTOD - call shr_sys_flush(logunit) - end if - call t_stopf('dlnd_run2') - - call t_stopf('DLND_RUN') - - end subroutine dlnd_comp_run - - !=============================================================================== - subroutine dlnd_comp_final(my_task, master_task, logunit) - - ! !DESCRIPTION: finalize method for dlnd model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - - !--- formats --- - character(*), parameter :: F00 = "('(dlnd_comp_final) ',8a)" - character(*), parameter :: F91 = "('(dlnd_comp_final) ',73('-'))" - character(*), parameter :: subName = "(dlnd_comp_final) " - !------------------------------------------------------------------------------- - - call t_startf('DLND_FINAL') - - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) trim(myModelName),': end of main integration loop' - write(logunit,F91) - end if - - call t_stopf('DLND_FINAL') - - end subroutine dlnd_comp_final - !=============================================================================== - -end module dlnd_comp_mod diff --git a/src/components/data_comps/dlnd/mct/dlnd_shr_mod.F90 b/src/components/data_comps/dlnd/mct/dlnd_shr_mod.F90 deleted file mode 100644 index 18f794c0a1d..00000000000 --- a/src/components/data_comps/dlnd/mct/dlnd_shr_mod.F90 +++ /dev/null @@ -1,159 +0,0 @@ -module dlnd_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dlnd_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! input namelist variables - character(CL) , public :: decomp ! decomp strategy - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - logical , public :: force_prognostic_true ! if true set prognostic true - character(CL) , public :: domain_fracname ! name of fraction field on first stream file - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine dlnd_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDLND, lnd_present, lnd_prognostic) - - ! !DESCRIPTION: Read in dlnd namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer , intent(in) :: inst_index ! number of current instance (ie. 1) - character(len=16) , intent(in) :: inst_suffix ! char string associated with instance - character(len=16) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - integer(IN) , intent(in) :: shrlogunit ! original log unit and level - type(shr_strdata_type) , intent(inout) :: SDLND - logical , intent(out) :: lnd_present ! flag - logical , intent(out) :: lnd_prognostic ! flag - - !--- local variables --- - character(CL) :: fileName ! generic file name - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - - !--- formats --- - character(*), parameter :: F00 = "('(dlnd_comp_init) ',8a)" - character(*), parameter :: F0L = "('(dlnd_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(dlnd_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(dlnd_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(dlnd_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_dlnd_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / dlnd_nml / & - decomp, restfilm, restfils, force_prognostic_true, domain_fracname - - !---------------------------------------------------------------------------- - ! Determine input filenamname - !---------------------------------------------------------------------------- - - filename = "dlnd_in"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! Read dlnd_in - !---------------------------------------------------------------------------- - - filename = "dlnd_in"//trim(inst_suffix) - decomp = "1d" - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - domain_fracname = trim(nullstr) - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=dlnd_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' decomp = ',trim(decomp) - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - write(logunit,F00)' domain_fracname = ',trim(domain_fracname) - endif - call shr_mpi_bcast(decomp ,mpicom,'decomp') - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - call shr_mpi_bcast(domain_fracname,mpicom,'domain_fracname') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDLND,trim(filename),mpicom=mpicom) - - !---------------------------------------------------------------------------- - ! Determine and validate datamode - !---------------------------------------------------------------------------- - - datamode = trim(SDLND%dataMode) - - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'COPYALL') then - if (my_task == master_task) then - write(logunit,F00) 'dlnd datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal dlnd datamode = ',trim(datamode) - call shr_sys_abort() - end if - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flags - !---------------------------------------------------------------------------- - - lnd_present = .false. - lnd_prognostic = .false. - if (force_prognostic_true) then - lnd_present = .true. - lnd_prognostic = .true. - endif - if (trim(datamode) /= 'NULL') then - lnd_present = .true. - end if - - end subroutine dlnd_shr_read_namelists - -end module dlnd_shr_mod diff --git a/src/components/data_comps/dlnd/mct/lnd_comp_mct.F90 b/src/components/data_comps/dlnd/mct/lnd_comp_mct.F90 deleted file mode 100644 index f5193ca8458..00000000000 --- a/src/components/data_comps/dlnd/mct/lnd_comp_mct.F90 +++ /dev/null @@ -1,242 +0,0 @@ -module lnd_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dlnd_comp_mod , only: dlnd_comp_init, dlnd_comp_run, dlnd_comp_final - use dlnd_shr_mod , only: dlnd_shr_read_namelists - use seq_flds_mod , only: seq_flds_x2l_fields, seq_flds_l2x_fields - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: lnd_init_mct - public :: lnd_run_mct - public :: lnd_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - type(shr_strdata_type) :: SDLND - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - - character(*), parameter :: F00 = "('(dlnd_comp_init) ',8a)" - integer(IN) , parameter :: master_task=0 ! task number of master task - character(*), parameter :: subName = "(lnd_init_mct) " - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine lnd_init_mct( EClock, cdata, x2l, l2x, NLFilename ) - - ! !DESCRIPTION: initialize dlnd model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2l, l2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer :: phase ! phase of method - logical :: lnd_present ! flag - logical :: lnd_prognostic ! flag - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - logical :: read_restart ! start from restart - integer(IN) :: ierr ! error code - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_SPVAL ! single column lat - real(R8) :: scmLon = shr_const_SPVAL ! single column lon - character(*), parameter :: subName = "(lnd_init_mct) " - !------------------------------------------------------------------------------- - - ! Set cdata pointers - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, & - single_column=scmMode, & - scmlat=scmlat, scmlon=scmLon, & - read_restart=read_restart) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('lnd_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Read dlnd namelists and set prognostic, present flags in infodata - !---------------------------------------------------------------------------- - - call t_startf('dlnd_readnml') - - call dlnd_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDLND, lnd_present, lnd_prognostic) - - call seq_infodata_PutData(infodata, & - lnd_present=lnd_present, & - lnd_prognostic=lnd_prognostic) - - call t_stopf('dlnd_readnml') - - !---------------------------------------------------------------------------- - ! RETURN if present flag is false - !---------------------------------------------------------------------------- - - if (.not. lnd_present) then - RETURN - end if - - ! NOTE: the following will never be called if lnd_present is .false. - - !---------------------------------------------------------------------------- - ! Initialize dlnd - !---------------------------------------------------------------------------- - - call dlnd_comp_init(Eclock, x2l, l2x, & - seq_flds_x2l_fields, seq_flds_l2x_fields, & - SDLND, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon) - - !---------------------------------------------------------------------------- - ! Fill infodata that needs to be returned from dlnd - !---------------------------------------------------------------------------- - - call seq_infodata_PutData(infodata, & - lnd_nx=SDLND%nxg, & - lnd_ny=SDLND%nyg ) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - if (my_task == master_task) write(logunit,F00) 'dlnd_comp_init done' - call shr_sys_flush(logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine lnd_init_mct - - !=============================================================================== - subroutine lnd_run_mct( EClock, cdata, x2l, l2x) - - ! !DESCRIPTION: run method for dlnd model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2l - type(mct_aVect) ,intent(inout) :: l2x - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(CL) :: case_name ! case name - character(*), parameter :: subName = "(lnd_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call seq_infodata_GetData(infodata, case_name=case_name) - - call dlnd_comp_run(EClock, x2l, l2x, & - SDLND, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, case_name) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine lnd_run_mct - - !=============================================================================== - subroutine lnd_final_mct(EClock, cdata, x2l, l2x) - - ! !DESCRIPTION: finalize method for dlnd model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2l - type(mct_aVect) ,intent(inout) :: l2x - - !--- formats --- - character(*), parameter :: subName = "(lnd_final_mct) " - !------------------------------------------------------------------------------- - - call dlnd_comp_final(my_task, master_task, logunit) - - end subroutine lnd_final_mct - !=============================================================================== - -end module lnd_comp_mct diff --git a/src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90 b/src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90 deleted file mode 100644 index 388280d7081..00000000000 --- a/src/components/data_comps/dlnd/nuopc/dlnd_comp_mod.F90 +++ /dev/null @@ -1,556 +0,0 @@ -module dlnd_comp_mod - - use NUOPC , only : NUOPC_Advertise - use ESMF , only : ESMF_State, ESMF_SUCCESS, ESMF_STATE - use ESMF , only : ESMF_Mesh, ESMF_DistGrid, ESMF_MeshGet, ESMF_DistGridGet - use perf_mod , only : t_startf, t_stopf, t_adj_detailf, t_barrierf - use mct_mod , only : mct_gsmap_init - use mct_mod , only : mct_avect, mct_avect_indexRA, mct_avect_zero, mct_aVect_nRattr - use mct_mod , only : mct_avect_init, mct_avect_lsize - use shr_kind_mod , only : r8=>shr_kind_r8, cxx=>shr_kind_cxx, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_sys_mod , only : shr_sys_abort - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only : shr_mpi_bcast - use shr_strdata_mod , only : shr_strdata_init_model_domain - use shr_strdata_mod , only : shr_strdata_init_streams - use shr_strdata_mod , only : shr_strdata_init_mapping - use shr_strdata_mod , only : shr_strdata_type, shr_strdata_pioinit - use shr_strdata_mod , only : shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only : shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only : shr_dmodel_translateAV - use shr_cal_mod , only : shr_cal_calendarname - use shr_cal_mod , only : shr_cal_datetod2string - use dshr_methods_mod , only : ChkErr - use dshr_nuopc_mod , only : fld_list_type, dshr_fld_add, dshr_import, dshr_export - use glc_elevclass_mod , only : glc_elevclass_as_string, glc_elevclass_init - use dlnd_shr_mod , only : datamode ! namelist input - use dlnd_shr_mod , only : rest_file ! namelist input - use dlnd_shr_mod , only : rest_file_strm ! namelist input - use dlnd_shr_mod , only : domain_fracname ! namelist input - use dlnd_shr_mod , only : nullstr - use dlnd_shr_mod , only : SDLND - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dlnd_comp_advertise - public :: dlnd_comp_init - public :: dlnd_comp_run - public :: dlnd_comp_export - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - type(mct_aVect) :: x2l - type(mct_aVect) :: l2x - character(len=CXX) :: flds_l2x = '' - character(len=CXX) :: flds_x2l = '' - character(len=CS), pointer :: avifld(:) ! char array field names coming from streams - character(len=CS), pointer :: avofld(:) ! char array field names to be sent/recd from med - integer :: kf ! index for frac in AV - integer :: glc_nec - real(R8), pointer :: lfrac(:) ! land frac - character(len=*), parameter :: rpfile = 'rpointer.lnd' - integer , parameter :: nec_len = 2 ! length of elevation class index in field names - character(*) , parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine dlnd_comp_advertise(importState, exportState, flds_scalar_name, & - lnd_present, lnd_prognostic, glc_nec_in, & - fldsFrLnd_num, fldsFrLnd, fldsToLnd_num, fldsToLnd, rc) - - ! 1. determine export and import fields to advertise to mediator - ! 2. determine translation of fields from streams to export/import fields - - ! input/output arguments - type(ESMF_State) :: importState - type(ESMF_State) :: exportState - character(len=*) , intent(in) :: flds_scalar_name - integer , intent(in) :: glc_nec_in - logical , intent(in) :: lnd_present - logical , intent(in) :: lnd_prognostic - integer , intent(out) :: fldsFrLnd_num - type (fld_list_type) , intent(out) :: fldsFrLnd(:) - integer , intent(inout) :: fldsToLnd_num - type (fld_list_type) , intent(inout) :: fldsToLnd(:) - integer , intent(out) :: rc - - ! local variables - integer :: n - character(nec_len) :: nec_str ! elevation class, as character string - character(len=CS) :: data_fld_name - character(len=CS) :: model_fld_name - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - if (.not. lnd_present) return - - glc_nec = glc_nec_in - - call glc_elevclass_init(glc_nec) - - !------------------- - ! export fields - !------------------- - - ! scalar fields that need to be advertised - - fldsFrLnd_num=1 - fldsFrLnd(1)%stdname = trim(flds_scalar_name) - - call dshr_fld_add(model_fld="Sl_lfrin", model_fld_concat=flds_l2x, model_fld_index=kf) - - ! The actual snow field names will have the elevation class index at the end (e.g., Sl_tsrf01, tsrf01) - if (glc_nec > 0) then - do n = 0, glc_nec - nec_str = glc_elevclass_as_string(n) - - data_fld_name = "tsrf" // nec_str - model_fld_name = "Sl_tsrf" // nec_str - call dshr_fld_add(data_fld=trim(data_fld_name), data_fld_array=avifld, & - model_fld=trim(model_fld_name), model_fld_array=avofld, model_fld_concat=flds_l2x) - - data_fld_name = "topo" // nec_str - model_fld_name = "Sl_topo" // nec_str - call dshr_fld_add(data_fld=trim(data_fld_name), data_fld_array=avifld, & - model_fld=trim(model_fld_name), model_fld_array=avofld, model_fld_concat=flds_l2x) - - data_fld_name = "qice" // nec_str - model_fld_name = "Flgl_qice" // nec_str - call dshr_fld_add(data_fld=trim(data_fld_name), data_fld_array=avifld, & - model_fld=trim(model_fld_name), model_fld_array=avofld, model_fld_concat=flds_l2x) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end do - - ! The following puts all of the elevation class fields as an - ! undidstributed dimension in the export state field - index1 is bare land - and the total number of - ! elevation classes not equal to bare land go from index2 -> glc_nec+1 - - call dshr_fld_add(med_fld="Sl_lfrin", fldlist_num=fldsFrLnd_num, fldlist=fldsFrLnd) - call dshr_fld_add(med_fld='Sl_tsrf_elev', fldlist_num=fldsFrLnd_num, fldlist=fldsFrLnd, & - ungridded_lbound=1, ungridded_ubound=glc_nec+1) - call dshr_fld_add(med_fld='Sl_topo_elev', fldlist_num=fldsFrLnd_num, fldlist=fldsFrLnd, & - ungridded_lbound=1, ungridded_ubound=glc_nec+1) - call dshr_fld_add(med_fld='Flgl_qice_elev', fldlist_num=fldsFrLnd_num, fldlist=fldsFrLnd, & - ungridded_lbound=1, ungridded_ubound=glc_nec+1) - - end if - - ! Non snow fields that nead to be added if dlnd is in cplhist mode - ! "Sl_t " "Sl_tref " "Sl_qref " "Sl_avsdr " - ! "Sl_anidr " "Sl_avsdf " "Sl_anidf " "Sl_snowh " - ! "Fall_taux " "Fall_tauy " "Fall_lat " "Fall_sen " - ! "Fall_lwup " "Fall_evap " "Fall_swnet " "Sl_landfrac " - ! "Sl_fv " "Sl_ram1 " - ! "Fall_flxdst1" "Fall_flxdst2" "Fall_flxdst3" "Fall_flxdst4" - - do n = 1,fldsFrLnd_num - call NUOPC_Advertise(exportState, standardName=fldsFrLnd(n)%stdname, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - enddo - - end subroutine dlnd_comp_advertise - - !=============================================================================== - - subroutine dlnd_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, & - scmMode, scmlat, scmlon, calendar, current_ymd, current_tod, mesh, nxg, nyg) - - ! !DESCRIPTION: initialize dlnd model - - ! input/output variables - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: scmMode ! single column mode - real(R8) , intent(in) :: scmLat ! single column lat - real(R8) , intent(in) :: scmLon ! single column lon - character(len=*) , intent(in) :: calendar ! calendar name - integer , intent(in) :: current_ymd ! model date - integer , intent(in) :: current_tod ! model sec into model date - type(ESMF_Mesh) , intent(in) :: mesh ! ESMF docn mesh - integer , intent(out) :: nxg, nyg ! global size of model grid - - ! local variables - integer :: n,k ! generic counters - integer :: lsize ! local size - logical :: exists ! file existance - logical :: write_restart - integer :: nu ! unit number - type(ESMF_DistGrid) :: distGrid - integer, allocatable, target :: gindex(:) - integer :: dimCount - integer :: tileCount - integer :: deCount - integer :: gsize - integer, allocatable :: elementCountPTile(:) - integer, allocatable :: indexCountPDE(:,:) - integer :: spatialDim - integer :: numOwnedElements - real(R8), pointer :: ownedElemCoords(:) - integer :: klat, klon, kfrac ! AV indices - real(R8) :: domlon,domlat ! domain lats and lots - real(R8), pointer :: xc(:), yc(:) ! mesh lats and lons - integer :: rc - character(*), parameter :: F00 = "('(dlnd_comp_init) ',8a)" - character(*), parameter :: F01 = "('(dice_comp_init) ',a,2f10.4)" - character(*), parameter :: subName = "(dlnd_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DLND_INIT') - - !---------------------------------------------------------------------------- - ! Initialize pio - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDLND, compid) - - !---------------------------------------------------------------------------- - ! Create a data model global segmap - !---------------------------------------------------------------------------- - - call t_startf('dlnd_strdata_init') - - if (my_task == master_task) write(logunit,F00) ' initialize SDLND gsmap' - - ! obtain the distgrid from the mesh that was read in - call ESMF_MeshGet(Mesh, elementdistGrid=distGrid, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determin local size on my processor - call ESMF_distGridGet(distGrid, localDe=0, elementCount=lsize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global index space for my processor - allocate(gindex(lsize)) - call ESMF_distGridGet(distGrid, localDe=0, seqIndexList=gindex, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global size of distgrid - call ESMF_distGridGet(distGrid, dimCount=dimCount, deCount=deCount, tileCount=tileCount, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - allocate(elementCountPTile(tileCount)) - call ESMF_distGridGet(distGrid, elementCountPTile=elementCountPTile, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - gsize = 0 - do n = 1,size(elementCountPTile) - gsize = gsize + elementCountPTile(n) - end do - deallocate(elementCountPTile) - - ! create the data model gsmap given the local size, global size and gindex - call mct_gsMap_init( SDLND%gsmap, gindex, mpicom, compid, lsize, gsize) - deallocate(gindex) - - !---------------------------------------------------------------------------- - ! Initialize SDLND - !---------------------------------------------------------------------------- - - ! The call to shr_strdata_init_model_domain creates the SDLND%gsmap which - ! is a '2d1d' decommp (1d decomp of 2d grid) and also create SDLND%grid - - SDLND%calendar = trim(shr_cal_calendarName(trim(calendar))) - - if (scmmode) then - if (my_task == master_task) write(logunit,F01) ' scm lon lat = ',scmlon,scmlat - call shr_strdata_init_model_domain(SDLND, mpicom, compid, my_task, & - scmmode=scmmode, scmlon=scmlon, scmlat=scmlat, gsmap=SDLND%gsmap, & - dmodel_domain_fracname_from_stream=domain_fracname) - else - call shr_strdata_init_model_domain(SDLND, mpicom, compid, my_task, gsmap=SDLND%gsmap, & - dmodel_domain_fracname_from_stream=domain_fracname) - end if - - if (my_task == master_task) then - call shr_strdata_print(SDLND,'SDLND data') - endif - - ! obtain mesh lats and lons - call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - allocate(ownedElemCoords(spatialDim*numOwnedElements)) - allocate(xc(numOwnedElements), yc(numOwnedElements)) - call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (numOwnedElements /= lsize) then - call shr_sys_abort('ERROR: numOwnedElements is not equal to lsize') - end if - do n = 1,lsize - xc(n) = ownedElemCoords(2*n-1) - yc(n) = ownedElemCoords(2*n) - end do - - ! error check that mesh lats and lons correspond to those on the input domain file - klon = mct_aVect_indexRA(SDLND%grid%data,'lon') - do n = 1, lsize - domlon = SDLND%grid%data%rattr(klon,n) - if (abs( domlon - xc(n)) > 1.e-10 .and. domlon /= 0.0_r8) then - write(6,100) n, domlon, xc(n), abs(xc(n)-domlon) -100 format('ERROR: DLND n, dom_lon, mesh_lon, diff_lon = ',i6,2(f21.13,3x),d21.5) - call shr_sys_abort() - end if - !SDLND%grid%data%rattr(klon,n) = xc(n) - end do - klat = mct_aVect_indexRA(SDLND%grid%data,'lat') - do n = 1, lsize - domlat = SDLND%grid%data%rattr(klat,n) - if (abs( domlat - yc(n)) > 1.e-10 .and. domlat /= 0.0_r8) then - write(6,101) n, domlat,yc(n), abs(yc(n)-domlat) -101 format('ERROR: DLND n, dom_lat, mesh_lat, diff_lat = ',i6,2(f21.13,3x),d21.5) - call shr_sys_abort() - end if - !SDLND%grid%data%rattr(klat,n) = yc(n) - end do - - allocate(lfrac(lsize)) - kfrac = mct_aVect_indexRA(SDLND%grid%data,'frac') - lfrac(:) = SDLND%grid%data%rAttr(kfrac,:) - - !---------------------------------------------------------------------------- - ! Initialize SDLND attributes for streams and mapping of streams to model domain - !---------------------------------------------------------------------------- - - call shr_strdata_init_streams(SDLND, compid, mpicom, my_task) - call shr_strdata_init_mapping(SDLND, compid, mpicom, my_task) - - call t_stopf('dlnd_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - call mct_aVect_init(l2x, rList=flds_l2x, lsize=lsize) - call mct_aVect_zero(l2x) - call mct_aVect_init(x2l, rList=flds_x2l, lsize=lsize) - call mct_aVect_zero(x2l) - - nxg = SDLND%nxg - nyg = SDLND%nyg - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - if (trim(rest_file) == trim(nullstr) .and. trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer' - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (.not.exists) then - write(logunit,F00) ' ERROR: rpointer file does not exist' - call shr_sys_abort(trim(subname)//' ERROR: rpointer file missing') - endif - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - call shr_mpi_bcast(exists,mpicom,'exists') - !if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) - !call shr_pcdf_readwrite('read',trim(rest_file),mpicom,SDLND%gsmap,rf1=somtp,rf1n='somtp') - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDLND,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - endif - - !---------------------------------------------------------------------------- - ! Set initial lnd state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - - write_restart = .false. - call dlnd_comp_run(mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - current_ymd, current_tod) - - call t_adj_detailf(-2) - - if (my_task == master_task) then - write(logunit,F00) 'dlnd_comp_init done' - end if - - call t_stopf('DLND_INIT') - - end subroutine dlnd_comp_init - - !=============================================================================== - - subroutine dlnd_comp_run(mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - target_ymd, target_tod, case_name) - - ! !DESCRIPTION: run method for dlnd model - - ! input/output variables: - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: write_restart ! write restart - integer , intent(in) :: target_ymd ! model date - integer , intent(in) :: target_tod ! model sec into model date - character(len=*) , intent(in), optional :: case_name ! case name - - ! local variables - integer :: n ! indices - integer :: nu ! unit number - integer :: lsize ! local size - character(len=18) :: date_str - character(*), parameter :: F00 = "('(dlnd_comp_run) ',8a)" - character(*), parameter :: F01 = "('(dlnd_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(dlnd_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DLND_RUN') - - !-------------------- - ! UNPACK - !-------------------- - - call t_startf('dlnd_unpack') - ! Nothing to be done for now - call t_stopf('dlnd_unpack') - - !-------------------- - ! ADVANCE LAND - !-------------------- - - call t_barrierf('dlnd_BARRIER',mpicom) - call t_startf('dlnd') - - call t_startf('dlnd_strdata_advance') - lsize = mct_avect_lsize(l2x) - do n = 1,lsize - l2x%rAttr(kf,n) = lfrac(n) - enddo - call shr_strdata_advance(SDLND,target_ymd,target_tod,mpicom,'dlnd') - call t_stopf('dlnd_strdata_advance') - - call t_barrierf('dlnd_scatter_BARRIER',mpicom) - call t_startf('dlnd_scatter') - do n = 1,SDLND%nstreams - call shr_dmodel_translateAV(SDLND%avs(n), l2x, avifld, avofld) - enddo - call t_stopf('dlnd_scatter') - - call t_stopf('dlnd') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('dlnd_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - end select - call t_stopf('dlnd_datamode') - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('dlnd_restart') - call shr_cal_datetod2string(date_str, target_ymd, target_tod) - write(rest_file,"(6a)") & - trim(case_name), '.dlnd',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.dlnd',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (my_task == master_task) then - write(logunit,F01) ' writing ',trim(rest_file_strm),target_ymd,target_tod - end if - call shr_strdata_restWrite(trim(rest_file_strm),SDLND,mpicom,trim(case_name),'SDLND strdata') - call t_stopf('dlnd_restart') - endif - - call t_stopf('DLND_RUN') - - end subroutine dlnd_comp_run - - !=============================================================================== - - subroutine dlnd_comp_export(exportState, rc) - - ! input/output variables - type(ESMF_State) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: k,n - character(nec_len) :: nec_str ! elevation class, as character string - !---------------------------------------------------------------- - - rc = ESMF_SUCCESS - - k = mct_aVect_indexRA(l2x, "Sl_lfrin") - call dshr_export(l2x%rattr(k,:), exportState, "Sl_lfrin", rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - do n = 0,glc_nec - nec_str = glc_elevclass_as_string(n) - - k = mct_aVect_indexRA(l2x, "Sl_tsrf" // nec_str) - call dshr_export(l2x%rattr(k,:), exportState, "Sl_tsrf_elev", ungridded_index=n+1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(l2x, "Sl_topo" // nec_str) - call dshr_export(l2x%rattr(k,:), exportState, "Sl_topo_elev", ungridded_index=n+1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(l2x, "Flgl_qice" // nec_str) - call dshr_export(l2x%rattr(k,:), exportState, "Flgl_qice_elev", ungridded_index=n+1, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end do - - end subroutine dlnd_comp_export - -end module dlnd_comp_mod diff --git a/src/components/data_comps/dlnd/nuopc/dlnd_shr_mod.F90 b/src/components/data_comps/dlnd/nuopc/dlnd_shr_mod.F90 deleted file mode 100644 index 175d7e59aa0..00000000000 --- a/src/components/data_comps/dlnd/nuopc/dlnd_shr_mod.F90 +++ /dev/null @@ -1,146 +0,0 @@ -module dlnd_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dlnd_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! stream data type - type(shr_strdata_type), public :: SDLND - - ! input namelist variables - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - logical , public :: force_prognostic_true ! if true set prognostic true - character(CL) , public :: domain_fracname ! name of fraction field on first stream file - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine dlnd_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, lnd_present, lnd_prognostic) - - ! !DESCRIPTION: Read in dlnd namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: filename ! input namelist filename - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(out) :: lnd_present ! flag - logical , intent(out) :: lnd_prognostic ! flag - - !--- local variables --- - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - character(CL) :: decomp ! decomp strategy - not used for NUOPC - but still needed in namelist for now - - character(*), parameter :: F00 = "('(dlnd_comp_init) ',8a)" - character(*), parameter :: F0L = "('(dlnd_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(dlnd_comp_init) ',a,5i8)" - character(*), parameter :: subName = "(shr_dlnd_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / dlnd_nml / decomp, & - restfilm, restfils, force_prognostic_true, domain_fracname - - !---------------------------------------------------------------------------- - ! Read dlnd_in - !---------------------------------------------------------------------------- - - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - domain_fracname = trim(nullstr) - - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=dlnd_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - write(logunit,F00)' domain_fracname = ',trim(domain_fracname) - endif - - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - call shr_mpi_bcast(domain_fracname,mpicom,'domain_fracname') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDLND,trim(filename),mpicom=mpicom) - - !---------------------------------------------------------------------------- - ! Determine and validate datamode - !---------------------------------------------------------------------------- - - datamode = trim(SDLND%dataMode) - - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'COPYALL') then - if (my_task == master_task) then - write(logunit,F00) 'dlnd datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal dlnd datamode = ',trim(datamode) - call shr_sys_abort() - end if - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flags - !---------------------------------------------------------------------------- - - lnd_present = .false. - lnd_prognostic = .false. - if (force_prognostic_true) then - lnd_present = .true. - lnd_prognostic = .true. - endif - if (trim(datamode) /= 'NULL') then - lnd_present = .true. - end if - - end subroutine dlnd_shr_read_namelists - -end module dlnd_shr_mod diff --git a/src/components/data_comps/dlnd/nuopc/lnd_comp_nuopc.F90 b/src/components/data_comps/dlnd/nuopc/lnd_comp_nuopc.F90 deleted file mode 100644 index 3c12f6a3307..00000000000 --- a/src/components/data_comps/dlnd/nuopc/lnd_comp_nuopc.F90 +++ /dev/null @@ -1,521 +0,0 @@ -module lnd_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for DLND - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_cal_mod , only : shr_cal_noleap, shr_cal_gregorian, shr_cal_ymd2date - use shr_const_mod , only : SHR_CONST_SPVAL - use shr_sys_mod , only : shr_sys_abort - use dshr_nuopc_mod , only : fld_list_type, fldsMax, dshr_realize - use dshr_nuopc_mod , only : ModelInitPhase, ModelSetRunClock, ModelSetMetaData - use dshr_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dshr_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dlnd_shr_mod , only : dlnd_shr_read_namelists - use dlnd_comp_mod , only : dlnd_comp_init, dlnd_comp_run, dlnd_comp_advertise - use dlnd_comp_mod , only : dlnd_comp_export - - implicit none - private ! except - - public :: SetServices - - private :: InitializeAdvertise - private :: InitializeRealize - private :: ModelAdvance - private :: ModelFinalize - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CS) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - - integer :: fldsToLnd_num = 0 - integer :: fldsFrLnd_num = 0 - type (fld_list_type) :: fldsToLnd(fldsMax) - type (fld_list_type) :: fldsFrLnd(fldsMax) - - integer :: compid ! mct comp id - integer :: mpicom ! mpi communicator - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - character(CL) :: case_name ! case name - logical :: lnd_prognostic ! data is sent back to dlnd - character(len=80) :: calendar ! calendar name - logical :: use_esmf_metadata = .false. - character(*),parameter :: modName = "(lnd_comp_nuopc)" - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p1"/), userRoutine=InitializeAdvertise, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p3"/), userRoutine=InitializeRealize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - logical :: lnd_present ! flag - logical :: lnd_prognostic ! flag - type(ESMF_VM) :: vm - integer :: lmpicom - character(len=CL) :: cvalue - integer :: n - integer :: ierr ! error code - integer :: shrlogunit ! original log unit - character(len=CL) :: diro - character(len=CL) :: logfile - integer :: glc_nec ! number of elevation classes - integer :: localPet - character(len=CL) :: fileName ! generic file name - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! generate local mpi comm - !---------------------------------------------------------------------------- - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, mpiCommunicator=lmpicom, localPet=localPet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call mpi_comm_dup(lmpicom, mpicom, ierr) - call mpi_comm_rank(mpicom, my_task, ierr) - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - inst_name = "LND"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Read input namelists and set present and prognostic flags - !---------------------------------------------------------------------------- - - filename = "dlnd_in"//trim(inst_suffix) - call dlnd_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, lnd_present, lnd_prognostic) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name='glc_nec', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) glc_nec - call ESMF_LogWrite('glc_nec = '// trim(cvalue), ESMF_LOGMSG_INFO) - - call dlnd_comp_advertise(importState, exportState, flds_scalar_name, & - lnd_present, lnd_prognostic, glc_nec, & - fldsFrLnd_num, fldsFrLnd, fldsToLnd_num, fldsToLnd, rc) - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_Mesh) :: Emesh - type(ESMF_TIME) :: currTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_Calendar) :: esmf_calendar ! esmf calendar - type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - integer :: current_ymd ! model date - integer :: current_year ! model year - integer :: current_mon ! model month - integer :: current_day ! model day - integer :: current_tod ! model sec into model date - character(CL) :: cvalue - integer :: shrlogunit ! original log unit - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_SPVAL ! single column lat - real(R8) :: scmLon = shr_const_SPVAL ! single column lon - logical :: read_restart ! start from restart - integer :: nxg, nyg - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! Determine necessary config variables - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='case_name', value=case_name, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompAttributeGet(gcomp, name='read_restart', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) read_restart - - call NUOPC_CompAttributeGet(gcomp, name='MCTID', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) compid - - !---------------------------------------------------------------------------- - ! Determine calendar info - !---------------------------------------------------------------------------- - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_TimeGet( currTime, yy=current_year, mm=current_mon, dd=current_day, s=current_tod, & - calkindflag=esmf_caltype, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(current_year, current_mon, current_day, current_ymd) - - if (esmf_caltype == ESMF_CALKIND_NOLEAP) then - calendar = shr_cal_noleap - else if (esmf_caltype == ESMF_CALKIND_GREGORIAN) then - calendar = shr_cal_gregorian - else - call ESMF_LogWrite(subname//" ERROR bad ESMF calendar name "//trim(calendar), ESMF_LOGMSG_ERROR) - rc = ESMF_Failure - return - end if - - !-------------------------------- - ! Generate the mesh - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='mesh_lnd', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (my_task == master_task) then - write(logunit,*) " obtaining dlnd mesh from " // trim(cvalue) - end if - - Emesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize model - !---------------------------------------------------------------------------- - - call dlnd_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, & - scmMode, scmlat, scmlon, calendar, current_ymd, current_tod, Emesh, nxg, nyg) - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call dshr_realize( & - state=ExportState, & - fldList=fldsFrLnd, & - numflds=fldsFrLnd_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':dlndExport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! No import send for now - only export snow fields - - !-------------------------------- - ! Pack export state - ! Copy from l2x to exportState - ! Set the coupling scalars - !-------------------------------- - - call dlnd_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call shr_file_setLogUnit (shrlogunit) - - if (use_esmf_metadata) then - call ModelSetMetaData(gcomp, name='DLND', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_Alarm) :: alarm - type(ESMF_TimeInterval) :: timeStep - type(ESMF_Time) :: currTime, nextTime - type(ESMF_State) :: importState, exportState - integer :: shrlogunit ! original log unit - logical :: write_restart ! write restart - integer :: nextYMD ! model date - integer :: nextTOD ! model sec into model date - integer :: yr ! year - integer :: mon ! month - integer :: day ! day in month - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - call memcheck(subname, 5, my_task==master_task) - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! query the Component for its clock, importState and exportState - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, importState=importState, exportState=exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Unpack import state - !-------------------------------- - - if (lnd_prognostic) then - ! No import state for now - only snow fields - !call dlnd_comp_import(importState, rc=rc) - !if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - !-------------------------------- - ! Run model - !-------------------------------- - - call ESMF_ClockGetAlarm(clock, alarmname='alarm_restart', alarm=alarm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (ESMF_AlarmIsRinging(alarm, rc=rc)) then - if (ChkErr(rc,__LINE__,u_FILE_u)) return - write_restart = .true. - call ESMF_AlarmRingerOff( alarm, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - write_restart = .false. - endif - - ! For nuopc - the component clock is advanced at the end of the time interval - ! For these to match for now - need to advance nuopc one timestep ahead for - ! shr_strdata time interpolation - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - nextTime = currTime + timeStep - call ESMF_TimeGet( nextTime, yy=yr, mm=mon, dd=day, s=nexttod, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(yr, mon, day, nextymd) - - call dlnd_comp_run(mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart=.false., write_restart=write_restart, & - target_ymd=nextYMD, target_tod=nextTOD, case_name=case_name) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call dlnd_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (my_task == master_task) then - call log_clock_advance(clock, 'DATM', logunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - call shr_file_setLogUnit (shrlogunit) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(*), parameter :: F00 = "('(dlnd_comp_final) ',8a)" - character(*), parameter :: F91 = "('(dlnd_comp_final) ',73('-'))" - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) ' dlnd : end of main integration loop' - write(logunit,F91) - end if - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine ModelFinalize - -end module lnd_comp_nuopc diff --git a/src/components/data_comps/docn/cime_config/buildlib b/src/components/data_comps/docn/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/data_comps/docn/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/data_comps/docn/cime_config/buildnml b/src/components/data_comps/docn/cime_config/buildnml deleted file mode 100755 index a3004328565..00000000000 --- a/src/components/data_comps/docn/cime_config/buildnml +++ /dev/null @@ -1,232 +0,0 @@ -#!/usr/bin/env python - -"""Namelist creator for CIME's data ocn model. -""" - -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position - -import os, sys, glob, re - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.case import Case -from CIME.XML.files import Files -from CIME.nmlgen import NamelistGenerator -from CIME.utils import expect, safe_copy -from CIME.buildnml import create_namelist_infile, parse_input - -logger = logging.getLogger(__name__) - -# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements -#################################################################################### -def _create_namelists(case, confdir, inst_string, infile, nmlgen, data_list_path): -#################################################################################### - """Write out the namelist for this component. - - Most arguments are the same as those for `NamelistGenerator`. The - `inst_string` argument is used as a suffix to distinguish files for - different instances. The `confdir` argument is used to specify the directory - in which output files will be placed. - """ - - #---------------------------------------------------- - # Get a bunch of information from the case. - #---------------------------------------------------- - ocn_domain_file = case.get_value("OCN_DOMAIN_FILE") - ocn_domain_path = case.get_value("OCN_DOMAIN_PATH") - docn_mode = case.get_value("DOCN_MODE") - ocn_grid = case.get_value("OCN_GRID") - sstice_stream = case.get_value("SSTICE_STREAM") - - #---------------------------------------------------- - # Check for incompatible options. - #---------------------------------------------------- - expect(ocn_grid != "null", - "OCN_GRID cannot be null") - expect(docn_mode != "NULL", - "DOCN_MODE cannot be NULL") - - #---------------------------------------------------- - # Log some settings. - #---------------------------------------------------- - logger.debug("DOCN mode is {}".format(docn_mode)) - logger.debug("DOCN grid is {}".format(ocn_grid)) - - #---------------------------------------------------- - # Create configuration information. - #---------------------------------------------------- - config = {} - config['ocn_grid'] = ocn_grid - config['docn_mode'] = docn_mode - config['sstice_stream'] = sstice_stream - - #---------------------------------------------------- - # Initialize namelist defaults - #---------------------------------------------------- - nmlgen.init_defaults(infile, config) - - #---------------------------------------------------- - # Construct the list of streams. - #---------------------------------------------------- - streams = nmlgen.get_streams() - - #---------------------------------------------------- - # For each stream, create stream text file and update - # shr_strdata_nml group and input data list. - #---------------------------------------------------- - for stream in streams: - - # Ignore null values. - if stream is None or stream in ("NULL", ""): - continue - - inst_stream = stream + inst_string - logger.debug("DOCN stream is {}".format(inst_stream)) - stream_path = os.path.join(confdir, "docn.streams.txt." + inst_stream) - user_stream_path = os.path.join(case.get_case_root(), - "user_docn.streams.txt." + inst_stream) - - # Use the user's stream file, or create one if necessary. - if os.path.exists(user_stream_path): - safe_copy(user_stream_path, stream_path) - config['stream'] = stream - nmlgen.update_shr_strdata_nml(config, stream, stream_path) - else: - nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) - - # For aquaplanet prescribed have no streams - match = re.match(r'^sst_aquap\d+',docn_mode) - if match is not None: - if match.group(0): - value = ['null'] - nmlgen.set_value("streams",value) - - #---------------------------------------------------- - # Create `shr_strdata_nml` namelist group. - #---------------------------------------------------- - # set per-stream variables - nmlgen.create_shr_strdata_nml() - - # set variables that are not per-stream - if ocn_domain_file != "UNSET": - full_domain_path = os.path.join(ocn_domain_path, ocn_domain_file) - nmlgen.add_default("domainfile", value=full_domain_path) - else: - if "aqua" in docn_mode: - expect( ocn_domain_file != "UNSET", - "in aquaplanet mode the xml variable OCN_DOMAIN_FILE cannot be equal to UNSET ") - else: - nmlgen.add_default("domainfile", value='null') - - #---------------------------------------------------- - # Finally, write out all the namelists. - #---------------------------------------------------- - namelist_file = os.path.join(confdir, "docn_in") - nmlgen.write_output_file(namelist_file, data_list_path, groups=['docn_nml','shr_strdata_nml']) - -############################################################################### -def buildnml(case, caseroot, compname): -############################################################################### - - # Build the component namelist and required stream txt files - if compname != "docn": - raise AttributeError - - rundir = case.get_value("RUNDIR") - ninst = case.get_value("NINST_OCN") - if ninst is None: - ninst = case.get_value("NINST") - - # Determine configuration directory - confdir = os.path.join(caseroot,"Buildconf",compname + "conf") - if not os.path.isdir(confdir): - os.makedirs(confdir) - - #---------------------------------------------------- - # Construct the namelist generator - #---------------------------------------------------- - # determine directory for user modified namelist_definitions.xml - user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) - expect (os.path.isdir(user_xml_dir), - "user_xml_dir {} does not exist ".format(user_xml_dir)) - - # NOTE: User definition *replaces* existing definition. - files = Files() - definition_file = [files.get_value("NAMELIST_DEFINITION_FILE", {"component":"docn"})] - - user_definition = os.path.join(user_xml_dir, "namelist_definition_docn.xml") - if os.path.isfile(user_definition): - definition_file = [user_definition] - for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) - - # Create the namelist generator object - independent of instance - nmlgen = NamelistGenerator(case, definition_file, files=files) - - #---------------------------------------------------- - # Clear out old data. - #---------------------------------------------------- - data_list_path = os.path.join(case.get_case_root(), "Buildconf", - "docn.input_data_list") - if os.path.exists(data_list_path): - os.remove(data_list_path) - - #---------------------------------------------------- - # Loop over instances - #---------------------------------------------------- - for inst_counter in range(1, ninst+1): - # determine instance string - inst_string = "" - if ninst > 1: - inst_string = '_' + "{:04d}".format(inst_counter) - - # If multi-instance case does not have restart file, use - # single-case restart for each instance - rpointer = "rpointer." + compname - if (os.path.isfile(os.path.join(rundir,rpointer)) and - (not os.path.isfile(os.path.join(rundir,rpointer + inst_string)))): - safe_copy(os.path.join(rundir, rpointer), - os.path.join(rundir, rpointer + inst_string)) - - inst_string_label = inst_string - if not inst_string_label: - inst_string_label = "\"\"" - - # create namelist output infile using user_nl_file as input - user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) - expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file {} ".format(user_nl_file)) - infile = os.path.join(confdir, "namelist_infile") - create_namelist_infile(case, user_nl_file, infile) - namelist_infile = [infile] - - # create namelist and stream file(s) data component - _create_namelists(case, confdir, inst_string, namelist_infile, nmlgen, data_list_path) - - # copy namelist files and stream text files, to rundir - if os.path.isdir(rundir): - filename = compname + "_in" - file_src = os.path.join(confdir, filename) - file_dest = os.path.join(rundir, filename) - if inst_string: - file_dest += inst_string - safe_copy(file_src,file_dest) - - for txtfile in glob.glob(os.path.join(confdir, "*txt*")): - safe_copy(txtfile, rundir) - -############################################################################### -def _main_func(): - # Build the component namelist and required stream txt files - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "docn") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/data_comps/docn/cime_config/config_archive.xml b/src/components/data_comps/docn/cime_config/config_archive.xml deleted file mode 100644 index f156ce499b9..00000000000 --- a/src/components/data_comps/docn/cime_config/config_archive.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - rs*\d* - unset - - rpointer.ocn$NINST_STRING - $CASE.docn$NINST_STRING.r.$DATENAME.nc,$CASE.docn$NINST_STRING.rs1.$DATENAME.bin - - - - diff --git a/src/components/data_comps/docn/cime_config/config_component.xml b/src/components/data_comps/docn/cime_config/config_component.xml deleted file mode 100644 index 1fedb4c126e..00000000000 --- a/src/components/data_comps/docn/cime_config/config_component.xml +++ /dev/null @@ -1,280 +0,0 @@ - - - - - - - - - DOCN - null mode - prescribed ocean mode - slab ocean mode - aquaplanet slab ocean mode - interannual mode - aquaplanet mode: - analytic aquaplanet sst - option 1 - analytic aquaplanet sst - option 2 - analytic aquaplanet sst - option 3 - analytic aquaplanet sst - option 4 - analytic aquaplanet sst - option 5 - analytic aquaplanet sst - option 6 - analytic aquaplanet sst - option 7 - analytic aquaplanet sst - option 8 - analytic aquaplanet sst - option 9 - analytic aquaplanet sst - option 10 - file input aquaplanet sst - - - - char - docn - docn - case_comp - env_case.xml - Name of ocn component - - - - char - prescribed,sst_aquap1,sst_aquap2,sst_aquap3,sst_aquap4,sst_aquap5,sst_aquap6,sst_aquap7,sst_aquap8,sst_aquap9,sst_aquap10,sst_aquapfile,som,som_aquap,interannual,null - prescribed - - null - prescribed - som - som_aquap - interannual - sst_aquap1 - sst_aquap2 - sst_aquap3 - sst_aquap4 - sst_aquap5 - sst_aquap6 - sst_aquap7 - sst_aquap8 - sst_aquap9 - sst_aquap10 - sst_aquapfile - - run_component_docn - env_run.xml - DOCN mode. The data ocean component (DOCN) always returns SSTs to the - driver. The atmosphere/ocean fluxes are computed in the - coupler. Therefore, the data ocean model does not compute fluxes like - the data ice model. DOCN has two distinct modes of operation. It can - run as a pure data model, reading in ocean SSTs (normally - climatological) from input datasets, performing time/spatial - interpolations, and passing these to the coupler. Alternatively, - DOCN can compute updated SSTs by running as a slab ocean model where - bottom ocean heat flux convergence and boundary layer depths are read - in and used with the atmosphere/ocean and ice/ocean fluxes obtained - from the driver. - --- A setting of prescribed assumes the only field in the input stream is SST. - It also assumes the SST is in Celsius and must be converted to Kelvin. - All other fields are set to zero except for ocean salinity, which is set to a - constant reference salinity value. Normally the ice fraction data is found in - the same data files that provide SST data to the data ocean model. They are - normally found in the same file because the SST and ice fraction data are derived - from the same observational data sets and are consistent with each other. - --- Settings of som (slab ocean model) or som_aquap (aquaplanet slab ocean) are - prognostic modes which compute a prognostic sea surface temperature and a - freeze/melt potential (surface Q-flux) used by the sea ice model. This - calculation requires an external SOM forcing data file that includes - ocean mixed layer depths and bottom-of-the-slab Q-fluxes. - Scientifically appropriate bottom-of-the-slab Q-fluxes are normally - ocean resolution dependent and are derived from the ocean model output - of a fully coupled CCSM run. Note that while this mode runs out of - the box, the default SOM forcing file is not scientifically - appropriate and is provided for testing and development purposes only. - Users must create scientifically appropriate data for their particular - application. A tool is available to derive valid SOM forcing. - --- A setting of sst_aquapN (where "N" is an integer index value) is a - type of prescribed SST mode used specifically for an aquaplanet setup in - which global SSTs correspond to an analytic form set by the index value. - Currently, indices for 10 SST profiles are supported [e.g., index 3 corresponds - to the "QOBS" profile of Neale and Hoskins (2001, Atmos. Sci. Lett.)]. - With source code modifications, it is possible for users to create their own - analytic SST distributions and match them to indices 11 or greater. - - - - - char - - UNSET - - pop_frc.1x1d.090130.nc - default.som.forcing.aquaplanet.Qflux0_h30_sstQOBS.2degFV_c20170421.nc - default.som.forcing.aquaplanet.Qflux0_h30_sstQOBS.1degFV_c20170421.nc - - run_component_docn - env_run.xml - Sets SOM forcing filename. - This is only used when DOCN_MODE=som. - - - - char - - UNSET - - sst_c4aquasom_0.9x1.25_clim.c170512.nc - - run_component_docn - env_run.xml - Sets aquaplanet forcing filename instead of using an analytic form. - This is only used when DOCN_MODE=sst_aquapfile. - - - - char - - CAMDATA - run_component_docn - env_run.xml - Prescribed SST and ice coverage stream name. - Sets SST and ice coverage stream name. - This is only used when DOCN_MODE=prescribed. - - - - char - - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1x1_clim_c101029.nc - - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_48x96_clim_c050526.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1.9x2.5_clim_c061031.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.9x1.25_clim_c040926.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.47x0.63_clim_c061106.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.23x0.31_clim_c110526.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1x1_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_48x96_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1.9x2.5_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.9x1.25_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.47x0.63_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.23x0.31_1850_2012_c130411.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1x1_clim_pi_c101029.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_48x96_clim_pi_c101028.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_1.9x2.5_clim_pi_c101028.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.9x1.25_clim_pi_c101028.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.47x0.63_clim_pi_c101028.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.23x0.31_clim_pi_c101028.nc - $DIN_LOC_ROOT/ocn/docn7/SSTDATA/sst_ice_CMIP6_DECK_E3SM_1x1_1950_clim_c20180910.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.9x1.25_clim_c040926.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_0.9x1.25_clim_c040926.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_4x5_clim_c110526.nc - $DIN_LOC_ROOT/atm/cam/sst/sst_HadOIBl_bc_4x5_clim_c110526.nc - - run_component_docn - env_run.xml - Prescribed SST and ice coverage data file name. - Sets SST and ice coverage data file name. - This is only used when DOCN_MODE=prescribed. - - - - char - - $DIN_LOC_ROOT/ocn/docn7/domain.ocn.1x1.111007.nc - - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.48x96_gx3v7_100114.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.1.9x2.5_gx1v6_090403.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.9x1.25_gx1v6_090403.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.fv0.9x1.25_gx1v7.151020.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.47x0.63_gx1v6_090408.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.23x0.31_gx1v6_101108.nc - $DIN_LOC_ROOT/ocn/docn7/domain.ocn.1x1.111007.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.48x96_gx3v7_100114.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.1.9x2.5_gx1v6_090403.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.9x1.25_gx1v6_090403.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.fv0.9x1.25_gx1v7.151020.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.47x0.63_gx1v6_090408.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.23x0.31_gx1v6_101108.nc - $DIN_LOC_ROOT/ocn/docn7/domain.ocn.1x1.111007.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.48x96_gx3v7_100114.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.1.9x2.5_gx1v6_090403.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.9x1.25_gx1v6_090403.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.fv0.9x1.25_gx1v7.151020.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.47x0.63_gx1v6_090408.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.23x0.31_gx1v6_101108.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.9x1.25_gx1v6_090403.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.0.9x1.25_gx1v6_090403.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.4x5_gx3v7_100120.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.4x5_gx3v7_100120.nc - - run_component_cam_sstice - env_run.xml - Prescribed SST and ice coverage grid file name. - Sets SST and ice coverage grid file name for prescribed runs. - This is only used when DOCN_MODE=prescribed. - - - - integer - - 1 - - 1850 - - run_component_cam_sstice - env_run.xml - The model year that corresponds to SSTICE_YEAR_START on the data file. - Prescribed SST and ice coverage data will be aligned so that the first year of - data corresponds to SSTICE_YEAR_ALIGN in the model. For instance, if the first - year of prescribed data is the same as the first year of the model run, this - should be set to the year given in RUN_STARTDATE. - If SSTICE_YEAR_ALIGN is later than the model's starting year, or if the model is - run after the prescribed data ends (as determined by SSTICE_YEAR_END), the - default behavior is to assume that the data from SSTICE_YEAR_START to - SSTICE_YEAR_END cyclically repeats. This behavior is controlled by the - "taxmode" stream option; see the data model documentation for more details. - This is only used when DOCN_MODE=prescribed. - - - - integer - - 0 - - 1850 - - run_component_cam_sstice - env_run.xml - The first year of data to use from SSTICE_DATA_FILENAME. - This is the first year of prescribed SST and ice coverage data to use. For - example, if a data file has data for years 0-99, and SSTICE_YEAR_START is 10, - years 0-9 in the file will not be used. - This is only used when DOCN_MODE=prescribed. - - - - integer - - 0 - - 2012 - - run_component_cam_sstice - env_run.xml - The last year of data to use from SSTICE_DATA_FILENAME. - This is the last year of prescribed SST and ice coverage data to use. For - example, if a data file has data for years 0-99, and value is 49, - years 50-99 in the file will not be used. - This is only used when DOCN_MODE=prescribed. - - - - ========================================= - DOCN naming conventions - ========================================= - - - diff --git a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml deleted file mode 100644 index 98d544af65e..00000000000 --- a/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +++ /dev/null @@ -1,645 +0,0 @@ - - - - - - - - - - - - - - char(100) - streams - streams_file - List of streams used for the given docn_mode. - - prescribed - '' - '' - '' - '' - '' - '' - '' - '' - '' - '' - aquapfile - som - som - interannual - - - - - char - streams - derived - does not appear in namelist - only used to set domain and data information - - $SSTICE_GRID_FILENAME - - - - - char - streams - derived - does not appear in namelist - only used to set domain and data information - - $SSTICE_DATA_FILENAME - - - - - char - streams - streams_file - Stream domain file directory. - - null - $DIN_LOC_ROOT/ocn/docn7/AQUAPLANET/ - $DIN_LOC_ROOT/ocn/docn7/SOM - $DIN_LOC_ROOT/atm/cam/sst - - - - - char - streams - streams_file - Stream domain file path(s). - - null - $DOCN_AQP_FILENAME - $DOCN_SOM_FILENAME - sst_HadOIBl_bc_1x1_1850_2014_c150416.nc - - - - - char - streams - streams_file - Stream domain variable name(s). - - - time time - xc lon - yc lat - area area - mask mask - - - time time - lon lon - lat lat - - - time time - lon lon - lat lat - - - - - - char - streams - streams_file - Stream data file directory. - - null - $DIN_LOC_ROOT/ocn/docn7/AQUAPLANET - $DIN_LOC_ROOT/ocn/docn7/SOM - $DIN_LOC_ROOT/atm/cam/sst - - - - - char - streams - streams_file - Stream data file path(s). - - null - $DOCN_AQP_FILENAME - $DOCN_SOM_FILENAME - sst_HadOIBl_bc_1x1_1850_2014_c150416.nc - - - - - char - streams - streams_file - Stream data variable name(s). - - - sst t - - - T t - S s - U u - V v - dhdx dhdx - dhdy dhdy - hblt h - qdp qbot - - - SST_cpl t - - - SST_cpl t - - - SST_cpl t - - - - - - integer - streams - streams_file - Stream offset. - - 0 - - - - - integer - streams - streams_file - Simulation year to align stream to. - - -999 - $SSTICE_YEAR_ALIGN - 0 - 1 - 1 - - - - - integer - streams - streams_file - First year of stream. - - -999 - $SSTICE_YEAR_START - 0 - 1 - 1850 - - - - - integer - streams - streams_file - Last year of stream. - - -999 - $SSTICE_YEAR_END - 0 - 1 - 2014 - - - - - - - - - - - - - char - streams - shr_strdata_nml - SSTDATA,SST_AQUAP1,SST_AQUAP2,SST_AQUAP3,SST_AQUAP4,SST_AQUAP5,SST_AQUAP6,SST_AQUAP7,SST_AQUAP8,SST_AQUAP9,SST_AQUAP10,SST_AQUAPFILE,SOM,SOM_AQUAP,IAF,NULL,COPYALL - - General method that operates on the data. This is generally - implemented in the data models but is set in the strdata method for - convenience. - - datamode = "NULL" - NULL is always a valid option and means no data will be generated. - Turns off the data model as a provider of data to the coupler. The - ice_present flag will be set to false and the coupler will assume no - exchange of data to or from the data model. - dataMode = "COPYALL" - Copies all fields directly from the input data streams Any required - fields not found on an input stream will be set to zero. - dataMode = "SSTDATA" - SSTDATA mode assumes the only field in the input stream is SST. - It also assumes the SST is in Celsius and must be converted to Kelvin. - All other fields are set to zero except for ocean salinity, which - is set to a constant reference salinity value. - Normally the ice fraction data is found in the same data files that - provide SST data to the data ocean model. They are normally found in - the same file because the SST and ice fraction data are derived from - the same observational data sets and are consistent with each other. - to the data ocean model. They are normally found in the same file - because the SST and ice fraction data are derived from the same - observational data sets and are consistent with each other. - dataMode = "IAF" - IAF is the interannually varying version of SSTDATA - dataMode = "SOM" - SOM ("slab ocean model") mode is a prognostic mode. This mode - computes a prognostic sea surface temperature and a freeze/melt - potential (surface Q-flux) used by the sea ice model. This - calculation requires an external SOM forcing data file that includes - ocean mixed layer depths and bottom-of-the-slab Q-fluxes. - Scientifically appropriate bottom-of-the-slab Q-fluxes are normally - ocean resolution dependent and are derived from the ocean model output - of a fully coupled CCSM run. Note that while this mode runs out of - the box, the default SOM forcing file is not scientifically - appropriate and is provided for testing and development purposes only. - Users must create scientifically appropriate data for their particular - application. A tool is available to derive valid SOM forcing. - - Set by the xml variable DOCN_MODE in env_run.xml - Currently, DOCN_MODE can be [prescribed,som,interannual,null] - If DOCN_MODE is prescribed, datamode will be set to SSTDATA - If DOCN_MODE is interannual, datamode will be set to IAF - If DOCN_MODE is som , datamode will be set to SOM - If DOCN_MODE is sst_aqup[n], datamode will be set to SST_AQUAP - If DOCN_MODE is som_aqup[n], datamode will be set to SOM_AQUAP - If DOCN_MODE is null, datamode will be set to NULL - - default: SSTDATA (prescribed setting for DOCN_MODE)' - - - NULL - SSTDATA - SST_AQUAP1 - SST_AQUAP2 - SST_AQUAP3 - SST_AQUAP4 - SST_AQUAP5 - SST_AQUAP6 - SST_AQUAP7 - SST_AQUAP8 - SST_AQUAP9 - SST_AQUAP10 - SST_AQUAPFILE - SOM - SOM_AQUAP - IAF - - - - - char - streams - abs - shr_strdata_nml - - spatial gridfile associated with the strdata. grid information will - be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - - - null - - - - - char(30) - streams - shr_strdata_nml - copy,none,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are just copy (ie. no fill), special value, - nearest neighbor, nearest neighbor in "i" direction, or nearest - neighbor in "j" direction. - valid values: 'copy','none','spval','nn','nnoni','nnonj' - - - nn - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - plays no role is fill algorithm at the present time. - valid values: "nomask,srcmask,dstmask,bothmask" - - - nomask - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read in instead of computing the - weights on the fly for the fill operation. if this is set, fillalgo - and fillmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the fill operation. this allows a user to - save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - array (up to 30 elements) of masking algorithms for mapping input data - associated with the array of streams. valid options are map only from - valid src points, map only to valid destination points, ignore all - masks, map only from valid src points to valid destination points. - valid values: srcmask, dstmask, nomask,bothmask - - - dstmask - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are copy by index, set to special value, - nearest neighbor, nearest neighbor in "i" direction, nearest neighbor - in "j" direction, or bilinear. - valid values: copy,spval,nn,nnoni,nnonj,bilinear - - - bilinear - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read instead of computing - weights on the fly for the mapping (interpolation) operation. if this - is set, mapalgo and mapmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the mapping (interpolation) operation. this - allows a user to save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - coszen,nearest,linear,lower,upper - - array (up to 30 elements) of time interpolation options associated with the array of - streams. - valid values: lower,upper,nearest,linear,coszen - lower = Use lower time-value - upper = Use upper time-value - nearest = Use the nearest time-value - linear = Linearly interpolate between the two time-values - coszen = Scale according to the cosine of the solar zenith angle (for solar) - - - linear - - - - - char(30) - streams - shr_strdata_nml - extend,cycle,limit - - array of time axis modes associated with the array of streams for - handling data outside the specified stream time axis. - valid options are to cycle the data based on the first, last, and - align settings associated with the stream dataset, to extend the first - and last valid value indefinitely, or to limit the interpolated data - to fall only between the least and greatest valid value of the time array. - valid values: cycle,extend,limit - extend = extrapolate before and after the period by using the first or last value. - cycle = cycle between the range of data - limit = restrict to the period for which the data is valid - - - cycle - limit - - - - - char(30) - streams - shr_strdata_nml - single,full_file - - array (up to 30 elements) of reading mode associated with the array of - streams. specifies the mode of reading temporal stream dataset. - valid options are "single" (read temporal dataset one at a time) or - "full_file" (read all entires of temporal dataset in a given netcdf file) - valid values: single,full_file - - - single - - - - - real(30) - streams - shr_strdata_nml - - array (up to 30 elements) of delta time ratio limits placed on the - time interpolation associated with the array of streams. this real - value causes the model to stop if the ratio of the running maximum - delta time divided by the minimum delta time is greater than the - dtlimit for that stream. for instance, with daily data, the delta - time should be exactly one day throughout the dataset and the computed - maximum divided by minimum delta time should always be 1.0. for - monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. the running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - - - 1.5e0 - - - - - char - streams - shr_strdata_nml - - list of paired colon delimited field names that should be treated as - vectors when carrying out spatial interpolation. unlike other - character arrays in this namelist, this array is completely decoupled - from the list of streams. this is a list of vector pairs that span - all input streams where different fields of the vector pair could - appear in different streams. - for example, vectors = 'u:v','taux:tauy'. - - - null - - - - - char(30) - streams - shr_strdata_nml - - character array (up to 30 elements) of stream input files. this - string is actually parsed by a stream method and so the format is - specified by the stream module. this string consists of a - "stream_input_filename year_align year_first year_last". the - stream_input_filename is a stream text input file and the format and - options are described elsewhere. year_align, year_first, and - year_last provide information about the time axis of the file and how - to relate the input time axis to the model time axis. - - - - - - - - - - - - char - docn - docn_nml - 1d,root - - DOCN Decomposition strategy - 1d = Vector decomposition - root = run only on the master task - - - 1d - - - - - char - docn - docn_nml - - Master restart file name for docn model - - - undefined - - - - - char - docn - docn_nml - - Stream restart file name for docn model, needed for branch simulations - - - undefined - - - - - logical - docn - docn_nml - If TRUE, prognostic is forced to true. (default=false) - - .false. - - - - diff --git a/src/components/data_comps/docn/cime_config/user_nl_docn b/src/components/data_comps/docn/cime_config/user_nl_docn deleted file mode 100644 index be007da82f0..00000000000 --- a/src/components/data_comps/docn/cime_config/user_nl_docn +++ /dev/null @@ -1,15 +0,0 @@ -!------------------------------------------------------------------------ -! Users should ONLY USE user_nl_docn to change namelists variables -! Users should add all user specific namelist changes below in the form of -! namelist_var = new_namelist_value -! Note that any namelist variable from shr_strdata_nml and docn_nml can -! be modified below using the above syntax -! User preview_namelists to view (not modify) the output namelist in the -! directory $CASEROOT/CaseDocs -! To modify the contents of a stream txt file, first use preview_namelists -! to obtain the contents of the stream txt files in CaseDocs, and then -! place a copy of the modified stream txt file in $CASEROOT with the string -! user_ prepended. -! As an example, to modify docn.streams.txt.prescribed, place the modified -! version in $CASEROOT with the name user_docn.streams.txt.prescribed -!------------------------------------------------------------------------ diff --git a/src/components/data_comps/docn/mct/docn_comp_mod.F90 b/src/components/data_comps/docn/mct/docn_comp_mod.F90 deleted file mode 100644 index 924c6ab262d..00000000000 --- a/src/components/data_comps/docn/mct/docn_comp_mod.F90 +++ /dev/null @@ -1,926 +0,0 @@ -#ifdef AIX -@PROCESS ALIAS_SIZE(805306368) -#endif -module docn_comp_mod - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use shr_pcdf_mod - use shr_const_mod - use shr_sys_mod - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only: shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only: shr_mpi_bcast - use shr_frz_mod , only: shr_frz_freezetemp - use shr_strdata_mod , only: shr_strdata_type, shr_strdata_pioinit, shr_strdata_init - use shr_strdata_mod , only: shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only: shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only: shr_dmodel_gsmapcreate, shr_dmodel_rearrGGrid - use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV - use shr_cal_mod , only: shr_cal_datetod2string - use seq_timemgr_mod , only: seq_timemgr_EClockGetData - - use docn_shr_mod , only: datamode ! namelist input - use docn_shr_mod , only: aquap_option ! derived from datamode namelist input - use docn_shr_mod , only: decomp ! namelist input - use docn_shr_mod , only: rest_file ! namelist input - use docn_shr_mod , only: rest_file_strm ! namelist input - use docn_shr_mod , only: nullstr - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: docn_comp_init - public :: docn_comp_run - public :: docn_comp_final - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(CS) :: myModelName = 'ocn' ! user defined model name - logical :: firstcall = .true. ! first call logical - - character(len=*),parameter :: rpfile = 'rpointer.ocn' - integer(IN) :: dbug = 0 ! debug level (higher is more) - - real(R8),parameter :: cpsw = shr_const_cpsw ! specific heat of sea h2o ~ J/kg/K - real(R8),parameter :: rhosw = shr_const_rhosw ! density of sea water ~ kg/m^3 - real(R8),parameter :: TkFrz = shr_const_TkFrz ! freezing point, fresh water (Kelvin) - real(R8),parameter :: TkFrzSw = shr_const_TkFrzSw ! freezing point, sea water (Kelvin) - real(R8),parameter :: latice = shr_const_latice ! latent heat of fusion - real(R8),parameter :: ocnsalt = shr_const_ocn_ref_sal ! ocean reference salinity - - integer(IN) :: kt,ks,ku,kv,kdhdx,kdhdy,kq,kswp ! field indices - integer(IN) :: kswnet,klwup,klwdn,ksen,klat,kmelth,ksnow,krofi - integer(IN) :: kh,kqbot - integer(IN) :: index_lat, index_lon - integer(IN) :: kmask, kfrac ! frac and mask field indices of docn domain - integer(IN) :: ksomask ! So_omask field index - - type(mct_rearr) :: rearr - type(mct_avect) :: avstrm ! av of data from stream - real(R8), pointer :: somtp(:) - real(R8), pointer :: tfreeze(:) - integer(IN), pointer :: imask(:) - real(R8), pointer :: xc(:), yc(:) ! arryas of model latitudes and longitudes - - !-------------------------------------------------------------------------- - integer(IN) , parameter :: ktrans = 8 - character(12) , parameter :: avifld(1:ktrans) = & - (/ "t ","u ","v ","dhdx ",& - "dhdy ","s ","h ","qbot "/) - character(12) , parameter :: avofld(1:ktrans) = & - (/ "So_t ","So_u ","So_v ","So_dhdx ",& - "So_dhdy ","So_s ","strm_h ","strm_qbot "/) - character(len=*), parameter :: flds_strm = 'strm_h:strm_qbot' - !-------------------------------------------------------------------------- - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine docn_comp_init(Eclock, x2o, o2x, & - seq_flds_x2o_fields, seq_flds_o2x_fields, & - SDOCN, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon) - - ! !DESCRIPTION: initialize docn model - use pio , only : iosystem_desc_t - use shr_pio_mod, only : shr_pio_getiosys, shr_pio_getiotype - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2o, o2x ! input/output attribute vectors - character(len=*) , intent(in) :: seq_flds_x2o_fields ! fields from mediator - character(len=*) , intent(in) :: seq_flds_o2x_fields ! fields to mediator - type(shr_strdata_type) , intent(inout) :: SDOCN ! model shr_strdata instance (output) - type(mct_gsMap) , pointer :: gsMap ! model global seg map (output) - type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: scmMode ! single column mode - real(R8) , intent(in) :: scmLat ! single column lat - real(R8) , intent(in) :: scmLon ! single column lon - - !--- local variables --- - integer(IN) :: n,k ! generic counters - integer(IN) :: ierr ! error code - integer(IN) :: lsize ! local size - logical :: exists, exists1 ! file existance - integer(IN) :: nu ! unit number - character(CL) :: calendar ! model calendar - integer(IN) :: currentYMD ! model date - integer(IN) :: currentTOD ! model sec into model date - logical :: write_restart=.false. - type(iosystem_desc_t), pointer :: ocn_pio_subsystem - - !--- formats --- - character(*), parameter :: F00 = "('(docn_comp_init) ',8a)" - character(*), parameter :: F0L = "('(docn_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(docn_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(docn_comp_init) ',a,4es13.6)" - character(*), parameter :: F03 = "('(docn_comp_init) ',a,i8,a)" - character(*), parameter :: F04 = "('(docn_comp_init) ',2a,2i8,'s')" - character(*), parameter :: F05 = "('(docn_comp_init) ',a,2f10.4)" - character(*), parameter :: F06 = "('(docn_comp_init) ',a,f10.4)" - character(*), parameter :: F90 = "('(docn_comp_init) ',73('='))" - character(*), parameter :: F91 = "('(docn_comp_init) ',73('-'))" - character(*), parameter :: subName = "(docn_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DOCN_INIT') - - !---------------------------------------------------------------------------- - ! Initialize pio - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDOCN, COMPID) - - !---------------------------------------------------------------------------- - ! Initialize SDOCN - !---------------------------------------------------------------------------- - - call t_startf('docn_strdata_init') - - call seq_timemgr_EClockGetData( EClock, calendar=calendar ) - - ! NOTE: shr_strdata_init calls shr_dmodel_readgrid which reads the data model - ! grid and from that computes SDOCN%gsmap and SDOCN%ggrid. DOCN%gsmap is created - ! using the decomp '2d1d' (1d decomp of 2d grid) - - if (scmmode) then - if (my_task == master_task) & - write(logunit,F05) ' scm lon lat = ',scmlon,scmlat - call shr_strdata_init(SDOCN,mpicom,compid,name='ocn', & - scmmode=scmmode,scmlon=scmlon,scmlat=scmlat, calendar=calendar) - else - if (datamode == 'SST_AQUAPANAL' .or. datamode == 'SST_AQUAPFILE' .or. datamode == 'SOM_AQUAP') then - ! Special logic for either prescribed or som aquaplanet - overwrite and - call shr_strdata_init(SDOCN,mpicom,compid,name='ocn', calendar=calendar, reset_domain_mask=.true.) - else - call shr_strdata_init(SDOCN,mpicom,compid,name='ocn', calendar=calendar) - end if - endif - - if (my_task == master_task) then - call shr_strdata_print(SDOCN,'SDOCN data') - endif - - call t_stopf('docn_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize data model MCT global seg map, 1d decomp - !---------------------------------------------------------------------------- - - call t_startf('docn_initgsmaps') - if (my_task == master_task) write(logunit,F00) ' initialize gsmaps' - call shr_sys_flush(logunit) - - ! create a data model global seqmap (gsmap) given the data model global grid sizes - ! NOTE: gsmap is initialized using the decomp read in from the docn_in namelist - ! (which by default is "1d") - call shr_dmodel_gsmapcreate(gsmap, SDOCN%nxg*SDOCN%nyg, compid, mpicom, decomp) - lsize = mct_gsmap_lsize(gsmap, mpicom) - - ! create a rearranger from the data model SDOCN%gsmap to gsmap - call mct_rearr_init(SDOCN%gsmap, gsmap, mpicom, rearr) - call t_stopf('docn_initgsmaps') - - !---------------------------------------------------------------------------- - ! Initialize data model MCT domain - !---------------------------------------------------------------------------- - - call t_startf('docn_initmctdom') - if (my_task == master_task) write(logunit,F00) 'copy domains' - call shr_sys_flush(logunit) - - call shr_dmodel_rearrGGrid(SDOCN%grid, ggrid, gsmap, rearr, mpicom) - call t_stopf('docn_initmctdom') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('docn_initmctavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - call shr_sys_flush(logunit) - - call mct_aVect_init(o2x, rList=seq_flds_o2x_fields, lsize=lsize) - call mct_aVect_zero(o2x) - - kt = mct_aVect_indexRA(o2x,'So_t') - ks = mct_aVect_indexRA(o2x,'So_s') - ku = mct_aVect_indexRA(o2x,'So_u') - kv = mct_aVect_indexRA(o2x,'So_v') - kdhdx = mct_aVect_indexRA(o2x,'So_dhdx') - kdhdy = mct_aVect_indexRA(o2x,'So_dhdy') - kswp = mct_aVect_indexRA(o2x,'So_fswpen', perrwith='quiet') - kq = mct_aVect_indexRA(o2x,'Fioo_q') - - call mct_aVect_init(x2o, rList=seq_flds_x2o_fields, lsize=lsize) - call mct_aVect_zero(x2o) - - kswnet = mct_aVect_indexRA(x2o,'Foxx_swnet') - klwup = mct_aVect_indexRA(x2o,'Foxx_lwup') - ksen = mct_aVect_indexRA(x2o,'Foxx_sen') - klat = mct_aVect_indexRA(x2o,'Foxx_lat') - krofi = mct_aVect_indexRA(x2o,'Foxx_rofi') - klwdn = mct_aVect_indexRA(x2o,'Faxa_lwdn') - ksnow = mct_aVect_indexRA(x2o,'Faxa_snow') - kmelth = mct_aVect_indexRA(x2o,'Fioi_melth') - - call mct_aVect_init(avstrm, rList=flds_strm, lsize=lsize) - call mct_aVect_zero(avstrm) - - kh = mct_aVect_indexRA(avstrm,'strm_h') - kqbot = mct_aVect_indexRA(avstrm,'strm_qbot') - - allocate(somtp(lsize)) - allocate(tfreeze(lsize)) - allocate(imask(lsize)) - allocate(xc(lsize)) - allocate(yc(lsize)) - - kmask = mct_aVect_indexRA(ggrid%data,'mask') - imask(:) = nint(ggrid%data%rAttr(kmask,:)) - - kfrac = mct_aVect_indexRA(ggrid%data,'frac') - - ksomask = mct_aVect_indexRA(o2x,'So_omask', perrwith='quiet') - if (ksomask /= 0) then - o2x%rAttr(ksomask, :) = ggrid%data%rAttr(kfrac,:) - end if - - index_lon = mct_aVect_indexRA(ggrid%data,'lon') - xc(:) = ggrid%data%rAttr(index_lon,:) - - index_lat = mct_aVect_indexRA(ggrid%data,'lat') - yc(:) = ggrid%data%rAttr(index_lat,:) - - call t_stopf('docn_initmctavs') - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - exists = .false. - exists1 = .false. - if (trim(rest_file) == trim(nullstr) .and. & - trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer = ',trim(rpfile) - call shr_sys_flush(logunit) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (exists) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - inquire(file=trim(rest_file),exist=exists1) - endif - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - call shr_sys_flush(logunit) - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - - call shr_mpi_bcast(exists,mpicom,'exists') - call shr_mpi_bcast(exists1,mpicom,'exists1') - - if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then - if (exists1) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) - call shr_pcdf_readwrite('read',SDOCN%pio_subsystem, SDOCN%io_type, & - trim(rest_file), mpicom, gsmap=gsmap, rf1=somtp, rf1n='somtp', io_format=SDOCN%io_format) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file) - endif - endif - - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDOCN,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - call shr_sys_flush(logunit) - endif - - !---------------------------------------------------------------------------- - ! Set initial ocn state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - - call docn_comp_run(EClock, x2o, o2x, & - SDOCN, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - currentYMD, currentTOD) - - if (my_task == master_task) write(logunit,F00) 'docn_comp_init done' - call shr_sys_flush(logunit) - - call t_adj_detailf(-2) - - if (dbug > 0 .and. my_task == master_task) then - do n = 1,lsize - write(logunit,F06)'n,ofrac = ',mct_aVect_indexRA(ggrid%data,'frac') - end do - end if - - call t_stopf('DOCN_INIT') - - end subroutine docn_comp_init - - !=============================================================================== - - subroutine docn_comp_run(EClock, x2o, o2x, & - SDOCN, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - target_ymd, target_tod, case_name) - - ! !DESCRIPTION: run method for docn model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2o - type(mct_aVect) , intent(inout) :: o2x - type(shr_strdata_type) , intent(inout) :: SDOCN - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: write_restart ! restart alarm is on - integer(IN) , intent(in) :: target_ymd ! model date - integer(IN) , intent(in) :: target_tod ! model sec into model date - character(len=*) , intent(in),optional :: case_name ! case name - - !--- local --- - integer(IN) :: yy,mm,dd,tod ! year month day time-of-day - integer(IN) :: n ! indices - integer(IN) :: nf ! fields loop index - integer(IN) :: nl ! ocn frac index - integer(IN) :: lsize ! size of attr vect - integer(IN) :: idt ! integer timestep - real(R8) :: dt ! timestep - integer(IN) :: nu ! unit number - real(R8) :: hn ! h field - character(len=18) :: date_str - character(len=CL) :: local_case_name - real(R8), parameter :: & - swp = 0.67_R8*(exp((-1._R8*shr_const_zsrflyr) /1.0_R8)) + 0.33_R8*exp((-1._R8*shr_const_zsrflyr)/17.0_R8) - - character(*), parameter :: F00 = "('(docn_comp_run) ',8a)" - character(*), parameter :: F01 = "('(docn_comp_run) ',a, i7,2x,i5,2x,i5,2x,d21.14)" - character(*), parameter :: F04 = "('(docn_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(docn_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DOCN_RUN') - - call t_startf('docn_run1') - call seq_timemgr_EClockGetData( EClock, dtime=idt) - dt = idt * 1.0_R8 - call t_stopf('docn_run1') - if(present(case_name)) then - local_case_name = case_name - else - local_case_name = "" - endif - - !-------------------- - ! ADVANCE OCN - !-------------------- - - call t_barrierf('docn_BARRIER',mpicom) - call t_startf('docn') - - !--- defaults, copy all fields from streams to o2x --- - - lsize = mct_avect_lsize(o2x) - do n = 1,lsize - if (ksomask /= 0) then - o2x%rAttr(ksomask, n) = ggrid%data%rAttr(kfrac,n) - end if - o2x%rAttr(kt ,n) = TkFrz - o2x%rAttr(ks ,n) = ocnsalt - o2x%rAttr(ku ,n) = 0.0_R8 - o2x%rAttr(kv ,n) = 0.0_R8 - o2x%rAttr(kdhdx,n) = 0.0_R8 - o2x%rAttr(kdhdy,n) = 0.0_R8 - o2x%rAttr(kq ,n) = 0.0_R8 - if (kswp /= 0) then - o2x%rAttr(kswp ,n) = swp - end if - enddo - - ! NOTE: for SST_AQUAPANAL, the docn buildnml sets the stream to "null" - ! and thereby shr_strdata_advance does nothing - - call t_startf('docn_strdata_advance') - call shr_strdata_advance(SDOCN, target_ymd, target_tod, mpicom, 'docn') - call t_stopf('docn_strdata_advance') - - !--- copy streams to o2x --- - call t_barrierf('docn_scatter_BARRIER', mpicom) - call t_startf('docn_scatter') - do n = 1, SDOCN%nstreams - call shr_dmodel_translateAV(SDOCN%avs(n), o2x, avifld, avofld, rearr) - enddo - call t_stopf('docn_scatter') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('docn_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - case('SSTDATA') - lsize = mct_avect_lsize(o2x) - do n = 1,lsize - o2x%rAttr(kt ,n) = o2x%rAttr(kt,n) + TkFrz - o2x%rAttr(ks ,n) = ocnsalt - o2x%rAttr(ku ,n) = 0.0_R8 - o2x%rAttr(kv ,n) = 0.0_R8 - o2x%rAttr(kdhdx,n) = 0.0_R8 - o2x%rAttr(kdhdy,n) = 0.0_R8 - o2x%rAttr(kq ,n) = 0.0_R8 - if (kswp /= 0) then - o2x%rAttr(kswp ,n) = swp - end if - enddo - - case('SST_AQUAPANAL') - lsize = mct_avect_lsize(o2x) - ! Zero out the attribute vector before calling the prescribed_sst - ! function - so this also zeroes out the So_omask if it is needed - ! so need to re-introduce it - do n = 1,lsize - o2x%rAttr(:,n) = 0.0_r8 - end do - call prescribed_sst(xc, yc, lsize, aquap_option, o2x%rAttr(kt,:)) - do n = 1,lsize - o2x%rAttr(kt,n) = o2x%rAttr(kt,n) + TkFrz - if (ksomask /= 0) then - o2x%rAttr(ksomask, n) = ggrid%data%rAttr(kfrac,n) - end if - enddo - - case('SST_AQUAPFILE') - lsize = mct_avect_lsize(o2x) - do n = 1,lsize - o2x%rAttr(kt ,n) = o2x%rAttr(kt,n) + TkFrz - o2x%rAttr(ks ,n) = ocnsalt - o2x%rAttr(ku ,n) = 0.0_R8 - o2x%rAttr(kv ,n) = 0.0_R8 - o2x%rAttr(kdhdx,n) = 0.0_R8 - o2x%rAttr(kdhdy,n) = 0.0_R8 - o2x%rAttr(kq ,n) = 0.0_R8 - if (kswp /= 0) then - o2x%rAttr(kswp ,n) = swp - end if - enddo - - case('IAF') - lsize = mct_avect_lsize(o2x) - do n = 1,lsize - o2x%rAttr(kt ,n) = o2x%rAttr(kt,n) + TkFrz - o2x%rAttr(ks ,n) = ocnsalt - o2x%rAttr(ku ,n) = 0.0_R8 - o2x%rAttr(kv ,n) = 0.0_R8 - o2x%rAttr(kdhdx,n) = 0.0_R8 - o2x%rAttr(kdhdy,n) = 0.0_R8 - o2x%rAttr(kq ,n) = 0.0_R8 - if (kswp /= 0) then - o2x%rAttr(kswp ,n) = swp - end if - enddo - - case('SOM') - lsize = mct_avect_lsize(o2x) - do n = 1,SDOCN%nstreams - call shr_dmodel_translateAV(SDOCN%avs(n),avstrm,avifld,avofld,rearr) - enddo - if (firstcall) then - do n = 1,lsize - if (.not. read_restart) then - somtp(n) = o2x%rAttr(kt,n) + TkFrz - endif - o2x%rAttr(kt,n) = somtp(n) - o2x%rAttr(kq,n) = 0.0_R8 - enddo - else ! firstcall - tfreeze = shr_frz_freezetemp(o2x%rAttr(ks,:)) + TkFrz - do n = 1,lsize - if (imask(n) /= 0) then - !--- pull out h from av for resuse below --- - hn = avstrm%rAttr(kh,n) - !--- compute new temp --- - o2x%rAttr(kt,n) = somtp(n) + & - (x2o%rAttr(kswnet,n) + & ! shortwave - x2o%rAttr(klwup ,n) + & ! longwave - x2o%rAttr(klwdn ,n) + & ! longwave - x2o%rAttr(ksen ,n) + & ! sensible - x2o%rAttr(klat ,n) + & ! latent - x2o%rAttr(kmelth,n) - & ! ice melt - avstrm%rAttr(kqbot ,n) - & ! flux at bottom - (x2o%rAttr(ksnow,n)+x2o%rAttr(krofi,n))*latice) * & ! latent by prec and roff - dt/(cpsw*rhosw*hn) - !--- compute ice formed or melt potential --- - o2x%rAttr(kq,n) = (tfreeze(n) - o2x%rAttr(kt,n))*(cpsw*rhosw*hn)/dt ! ice formed q>0 - o2x%rAttr(kt,n) = max(tfreeze(n),o2x%rAttr(kt,n)) ! reset temp - somtp(n) = o2x%rAttr(kt,n) ! save temp - endif - enddo - endif ! firstcall - - case('SOM_AQUAP') - lsize = mct_avect_lsize(o2x) - do n = 1,SDOCN%nstreams - call shr_dmodel_translateAV(SDOCN%avs(n),avstrm,avifld,avofld,rearr) - enddo - if (firstcall) then - do n = 1,lsize - if (.not. read_restart) then - somtp(n) = o2x%rAttr(kt,n) + TkFrz - endif - o2x%rAttr(kt,n) = somtp(n) - o2x%rAttr(kq,n) = 0.0_R8 - enddo - else ! firstcall - tfreeze = shr_frz_freezetemp(o2x%rAttr(ks,:)) + TkFrz - do n = 1,lsize - !--- pull out h from av for resuse below --- - hn = avstrm%rAttr(kh,n) - !--- compute new temp --- - o2x%rAttr(kt,n) = somtp(n) + & - (x2o%rAttr(kswnet,n) + & ! shortwave - x2o%rAttr(klwup ,n) + & ! longwave - x2o%rAttr(klwdn ,n) + & ! longwave - x2o%rAttr(ksen ,n) + & ! sensible - x2o%rAttr(klat ,n) + & ! latent - x2o%rAttr(kmelth,n) - & ! ice melt - avstrm%rAttr(kqbot ,n) - & ! flux at bottom - (x2o%rAttr(ksnow,n)+x2o%rAttr(krofi,n))*latice) * & ! latent by prec and roff - dt/(cpsw*rhosw*hn) - !--- compute ice formed or melt potential --- - o2x%rAttr(kq,n) = (tfreeze(n) - o2x%rAttr(kt,n))*(cpsw*rhosw*hn)/dt ! ice formed q>0 - somtp(n) = o2x%rAttr(kt,n) ! save temp - enddo - endif ! firstcall - - end select - - call t_stopf('docn_datamode') - - !---------------------------------------------------------- - ! Debug output - !---------------------------------------------------------- - - if (dbug > 0 .and. my_task == master_task) then - do n = 1,lsize - write(logunit,F01)'import: ymd,tod,n,Foxx_swnet = ', target_ymd, target_tod, n, x2o%rattr(kswnet,n) - write(logunit,F01)'import: ymd,tod,n,Foxx_lwup = ', target_ymd, target_tod, n, x2o%rattr(klwup,n) - write(logunit,F01)'import: ymd,tod,n,Foxx_lwdn = ', target_ymd, target_tod, n, x2o%rattr(klwdn,n) - write(logunit,F01)'import: ymd,tod,n,Fioi_melth = ', target_ymd, target_tod, n, x2o%rattr(kmelth,n) - write(logunit,F01)'import: ymd,tod,n,Foxx_sen = ', target_ymd, target_tod, n, x2o%rattr(ksen,n) - write(logunit,F01)'import: ymd,tod,n,Foxx_lat = ', target_ymd, target_tod, n, x2o%rattr(klat,n) - write(logunit,F01)'import: ymd,tod,n,Foxx_rofi = ', target_ymd, target_tod, n, x2o%rattr(krofi,n) - - write(logunit,F01)'export: ymd,tod,n,So_t = ', target_ymd, target_tod, n, o2x%rattr(kt,n) - write(logunit,F01)'export: ymd,tod,n,So_s = ', target_ymd, target_tod, n, o2x%rattr(ks,n) - write(logunit,F01)'export: ymd,tod,n,So_u = ', target_ymd, target_tod, n, o2x%rattr(ku,n) - write(logunit,F01)'export: ymd,tod,n,So_v = ', target_ymd, target_tod, n, o2x%rattr(kv,n) - write(logunit,F01)'export: ymd,tod,n,So_dhdx = ', target_ymd, target_tod, n, o2x%rattr(kdhdx,n) - write(logunit,F01)'export: ymd,tod,n,So_dhdy = ', target_ymd, target_tod, n, o2x%rattr(kdhdy,n) - write(logunit,F01)'export: ymd,tod,n,Fioo_q = ', target_ymd, target_tod, n, o2x%rattr(kq,n) - end do - end if - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('docn_restart') - call shr_cal_datetod2string(date_str, target_ymd, target_tod) - write(rest_file,"(6a)") & - trim(local_case_name), '.docn',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(local_case_name), '.docn',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file),target_ymd,target_tod - call shr_pcdf_readwrite('write', SDOCN%pio_subsystem, SDOCN%io_type,& - trim(rest_file), mpicom, gsmap, clobber=.true., rf1=somtp,rf1n='somtp') - endif - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),target_ymd,target_tod - call shr_strdata_restWrite(trim(rest_file_strm), SDOCN, mpicom, trim(local_case_name), 'SDOCN strdata') - call shr_sys_flush(logunit) - call t_stopf('docn_restart') - endif - - call t_stopf('docn') - - !---------------------------------------------------------------------------- - ! Log output for model date - !---------------------------------------------------------------------------- - - call t_startf('docn_run2') - if (my_task == master_task) then - write(logunit,F04) trim(myModelName),': model date ', target_ymd,target_tod - call shr_sys_flush(logunit) - end if - - firstcall = .false. - call t_stopf('docn_run2') - - call t_stopf('DOCN_RUN') - - end subroutine docn_comp_run - - !=============================================================================== - subroutine docn_comp_final(my_task, master_task, logunit) - - ! !DESCRIPTION: finalize method for docn model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - - !--- formats --- - character(*), parameter :: F00 = "('(docn_comp_final) ',8a)" - character(*), parameter :: F91 = "('(docn_comp_final) ',73('-'))" - character(*), parameter :: subName = "(docn_comp_final) " - !------------------------------------------------------------------------------- - - call t_startf('DOCN_FINAL') - - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) trim(myModelName),': end of main integration loop' - write(logunit,F91) - end if - - call t_stopf('DOCN_FINAL') - - end subroutine docn_comp_final - - !=============================================================================== - subroutine prescribed_sst(xc, yc, lsize, sst_option, sst) - - real(R8) , intent(in) :: xc(:) !degrees - real(R8) , intent(in) :: yc(:) !degrees - integer(IN) , intent(in) :: lsize - integer(IN) , intent(in) :: sst_option - real(R8) , intent(inout) :: sst(:) - - ! local - integer :: i - real(r8) :: tmp, tmp1, pi - real(r8) :: rlon(lsize), rlat(lsize) - - real(r8), parameter :: pio180 = SHR_CONST_PI/180._r8 - - ! Parameters for zonally symmetric experiments - real(r8), parameter :: t0_max = 27._r8 - real(r8), parameter :: t0_min = 0._r8 - real(r8), parameter :: maxlat = 60._r8*pio180 - real(r8), parameter :: shift = 5._r8*pio180 - real(r8), parameter :: shift9 = 10._r8*pio180 - real(r8), parameter :: shift10 = 15._r8*pio180 - - ! Parameters for zonally asymmetric experiments - real(r8), parameter :: t0_max6 = 1._r8 - real(r8), parameter :: t0_max7 = 3._r8 - real(r8), parameter :: latcen = 0._r8*pio180 - real(r8), parameter :: loncen = 0._r8*pio180 - real(r8), parameter :: latrad6 = 15._r8*pio180 - real(r8), parameter :: latrad8 = 30._r8*pio180 - real(r8), parameter :: lonrad = 30._r8*pio180 - !------------------------------------------------------------------------------- - - pi = SHR_CONST_PI - - ! convert xc and yc from degrees to radians - - rlon(:) = xc(:) * pio180 - rlat(:) = yc(:) * pio180 - - ! Control - - if (sst_option < 1 .or. sst_option > 10) then - call shr_sys_abort ('prescribed_sst: ERROR: sst_option must be between 1 and 10') - end if - - if (sst_option == 1 .or. sst_option == 6 .or. sst_option == 7 .or. sst_option == 8) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else - tmp = sin(rlat(i)*pi*0.5_r8/maxlat) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! Flat - - if (sst_option == 2) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else - tmp = sin(rlat(i)*pi*0.5_r8/maxlat) - tmp = 1._r8 - tmp*tmp*tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! Qobs - - if (sst_option == 3) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else - tmp = sin(rlat(i)*pi*0.5_r8/maxlat) - tmp = (2._r8 - tmp*tmp*tmp*tmp - tmp*tmp)*0.5_r8 - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! Peaked - - if (sst_option == 4) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else - tmp = (maxlat - abs(rlat(i)))/maxlat - tmp1 = 1._r8 - tmp - sst(i) = t0_max*tmp + t0_min*tmp1 - end if - end do - end if - - ! Control-5N - - if (sst_option == 5) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else if (rlat(i) > shift) then - tmp = sin((rlat(i)-shift)*pi*0.5_r8/(maxlat-shift)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - else - tmp = sin((rlat(i)-shift)*pi*0.5_r8/(maxlat+shift)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! 1KEQ - - if (sst_option == 6) then - do i = 1,lsize - if (abs(rlat(i)-latcen) <= latrad6) then - tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad6) - tmp1 = tmp1*tmp1 - tmp = abs(rlon(i)-loncen) - tmp = min(tmp , 2._r8*pi-tmp) - if(tmp <= lonrad) then - tmp = cos(tmp*pi*0.5_r8/lonrad) - tmp = tmp*tmp - sst(i) = sst(i) + t0_max6*tmp*tmp1 - end if - end if - end do - end if - - ! 3KEQ - - if (sst_option == 7) then - do i = 1, lsize - if (abs(rlat(i)-latcen) <= latrad6) then - tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad6) - tmp1 = tmp1*tmp1 - tmp = abs(rlon(i)-loncen) - tmp = min(tmp , 2._r8*pi-tmp) - if (tmp <= lonrad) then - tmp = cos(tmp*pi*0.5_r8/lonrad) - tmp = tmp*tmp - sst(i) = sst(i) + t0_max7*tmp*tmp1 - end if - end if - end do - end if - - ! 3KW1 - - if (sst_option == 8) then - do i = 1, lsize - if (abs(rlat(i)-latcen) <= latrad8) then - tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad8) - tmp1 = tmp1*tmp1 - tmp = cos(rlon(i)-loncen) - sst(i) = sst(i) + t0_max7*tmp*tmp1 - end if - end do - end if - - ! Control-10N - - if (sst_option == 9) then - do i = 1, lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else if (rlat(i) > shift9) then - tmp = sin((rlat(i)-shift9)*pi*0.5_r8/(maxlat-shift9)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - else - tmp = sin((rlat(i)-shift9)*pi*0.5_r8/(maxlat+shift9)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! Control-15N - - if (sst_option == 10) then - do i = 1, lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else if(rlat(i) > shift10) then - tmp = sin((rlat(i)-shift10)*pi*0.5_r8/(maxlat-shift10)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - else - tmp = sin((rlat(i)-shift10)*pi*0.5_r8/(maxlat+shift10)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - end subroutine prescribed_sst - -end module docn_comp_mod diff --git a/src/components/data_comps/docn/mct/docn_shr_mod.F90 b/src/components/data_comps/docn/mct/docn_shr_mod.F90 deleted file mode 100644 index ead9bf45c47..00000000000 --- a/src/components/data_comps/docn/mct/docn_shr_mod.F90 +++ /dev/null @@ -1,184 +0,0 @@ -module docn_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: docn_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! input namelist variables - character(CL) , public :: decomp ! decomp strategy - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - logical , public :: force_prognostic_true ! if true set prognostic true - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - integer(IN) , public :: aquap_option - character(len=*), public, parameter :: nullstr = 'undefined' - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine docn_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDOCN, ocn_present, ocn_prognostic, ocnrof_prognostic) - - ! !DESCRIPTION: Read in docn namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer , intent(in) :: inst_index ! number of current instance (ie. 1) - character(len=16) , intent(in) :: inst_suffix ! char string associated with instance - character(len=16) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - integer(IN) , intent(in) :: shrlogunit ! original log unit and level - type(shr_strdata_type) , intent(inout) :: SDOCN - logical , intent(out) :: ocn_present ! flag - logical , intent(out) :: ocn_prognostic ! flag - logical , intent(out) :: ocnrof_prognostic ! flag - - !--- local variables --- - character(CL) :: fileName ! generic file name - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - - !--- formats --- - character(*), parameter :: F00 = "('(docn_comp_init) ',8a)" - character(*), parameter :: F0L = "('(docn_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(docn_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(docn_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(docn_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_docn_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / docn_nml / & - decomp, restfilm, restfils, force_prognostic_true - - !---------------------------------------------------------------------------- - ! Determine input filenamname - !---------------------------------------------------------------------------- - - filename = "docn_in"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! Read docn_in - !---------------------------------------------------------------------------- - - filename = "docn_in"//trim(inst_suffix) - decomp = "1d" - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=docn_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' decomp = ',trim(decomp) - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - endif - call shr_mpi_bcast(decomp ,mpicom,'decomp') - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDOCN,trim(filename),mpicom=mpicom) - - datamode = trim(SDOCN%dataMode) - - ! Special logic for prescribed aquaplanet - - if (datamode(1:9) == 'SST_AQUAP' .and. trim(datamode) /= 'SST_AQUAPFILE') then - ! First determine the prescribed aquaplanet option - if (len_trim(datamode) == 10) then - read(datamode(10:10),'(i1)') aquap_option - else if (len_trim(datamode) == 11) then - read(datamode(10:11),'(i2)') aquap_option - end if - ! Now remove the index from the datamode value, to have a generic setting - ! for use below - datamode = "SST_AQUAPANAL" - end if - - ! Validate mode - - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'SSTDATA' .or. & - trim(datamode) == 'SST_AQUAPANAL' .or. & - trim(datamode) == 'SST_AQUAPFILE' .or. & - trim(datamode) == 'COPYALL' .or. & - trim(datamode) == 'IAF' .or. & - trim(datamode) == 'SOM' .or. & - trim(datamode) == 'SOM_AQUAP') then - if (my_task == master_task) then - write(logunit,F00) ' docn datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal docn datamode = ',trim(datamode) - call shr_sys_abort() - endif - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flag - !---------------------------------------------------------------------------- - - ocn_present = .false. - ocn_prognostic = .false. - ocnrof_prognostic = .false. - if (force_prognostic_true) then - ocn_present = .true. - ocn_prognostic = .true. - ocnrof_prognostic = .true. - endif - if (trim(datamode) /= 'NULL') then - ocn_present = .true. - end if - if (trim(datamode) == 'IAF') then - ocn_prognostic = .true. - ocnrof_prognostic = .true. - endif - if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then - ocn_prognostic = .true. - endif - - end subroutine docn_shr_read_namelists - -end module docn_shr_mod diff --git a/src/components/data_comps/docn/mct/ocn_comp_mct.F90 b/src/components/data_comps/docn/mct/ocn_comp_mct.F90 deleted file mode 100644 index 19e393ced48..00000000000 --- a/src/components/data_comps/docn/mct/ocn_comp_mct.F90 +++ /dev/null @@ -1,271 +0,0 @@ -module ocn_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use seq_timemgr_mod , only: seq_timemgr_RestartAlarmIsOn, seq_timemgr_EClockGetData - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use docn_comp_mod , only: docn_comp_init, docn_comp_run, docn_comp_final - use docn_shr_mod , only: docn_shr_read_namelists - use seq_flds_mod , only: seq_flds_x2o_fields, seq_flds_o2x_fields - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: ocn_init_mct - public :: ocn_run_mct - public :: ocn_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - type(shr_strdata_type) :: SDOCN - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - logical :: read_restart ! start from restart - integer(IN) , parameter :: master_task=0 ! task number of master task - integer , parameter :: dbug = 10 - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine ocn_init_mct( EClock, cdata, x2o, o2x, NLFilename ) - - ! !DESCRIPTION: initialize docn model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2o, o2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer :: phase ! phase of method - logical :: ocn_present ! flag - logical :: ocn_prognostic ! flag - logical :: ocnrof_prognostic ! flag - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: ierr ! error code - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_SPVAL ! single column lat - real(R8) :: scmLon = shr_const_SPVAL ! single column lon - character(*), parameter :: F00 = "('(docn_comp_init) ',8a)" - character(*), parameter :: subName = "(ocn_init_mct) " - !------------------------------------------------------------------------------- - - ! Set cdata pointers - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, & - single_column=scmMode, & - scmlat=scmlat, scmlon=scmLon, & - read_restart=read_restart) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('ocn_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogLevel(max(shrloglev,1)) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Read docn namelists and set prognostic, present flags in infodata - !---------------------------------------------------------------------------- - - call t_startf('docn_readnml') - - call docn_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDOCN, ocn_present, ocn_prognostic, ocnrof_prognostic) - - call seq_infodata_PutData(infodata, & - ocn_present=ocn_present, & - ocn_prognostic=ocn_prognostic, & - ocnrof_prognostic=ocnrof_prognostic) - - call t_stopf('docn_readnml') - - !---------------------------------------------------------------------------- - ! RETURN if present flag is false - !---------------------------------------------------------------------------- - - if (.not. ocn_present) then - RETURN - end if - - ! NOTE: the following will never be called if ocn_present is .false. - - !---------------------------------------------------------------------------- - ! Initialize docn - !---------------------------------------------------------------------------- - - call docn_comp_init(Eclock, x2o, o2x, & - seq_flds_x2o_fields, seq_flds_o2x_fields, & - SDOCN, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - scmMode, scmlat, scmlon) - - !---------------------------------------------------------------------------- - ! Fill infodata that needs to be returned from docn - !---------------------------------------------------------------------------- - - call seq_infodata_PutData(infodata, & - ocn_nx=SDOCN%nxg, & - ocn_ny=SDOCN%nyg ) - - !---------------------------------------------------------------------------- - ! diagnostics - !---------------------------------------------------------------------------- - - if (dbug > 1) then - if (my_task == master_task) then - call mct_aVect_info(2, o2x, istr="initial diag"//':AV') - end if - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - if (my_task == master_task) write(logunit,F00) 'docn_comp_init done' - call shr_sys_flush(logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine ocn_init_mct - - !=============================================================================== - - subroutine ocn_run_mct( EClock, cdata, x2o, o2x) - - ! !DESCRIPTION: run method for docn model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2o ! driver -> docn - type(mct_aVect) ,intent(inout) :: o2x ! docn -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(CL) :: case_name ! case name - logical :: write_restart ! restart alarm is ringing - integer(IN) :: currentYMD ! model date - integer(IN) :: currentTOD ! model sec into model date - character(*), parameter :: subName = "(ocn_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogLevel(max(shrloglev,1)) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call seq_infodata_GetData(infodata, case_name=case_name) - - write_restart = seq_timemgr_RestartAlarmIsOn(EClock) - - ! For mct - the component clock is advance at the beginning of the time interval - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - - call docn_comp_run(EClock, x2o, o2x, & - SDOCN, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - currentYMD, currentTOD, case_name=case_name) - - if (dbug > 1) then - if (my_task == master_task) then - call mct_aVect_info(2, o2x, istr="run diag"//':AV') - end if - endif - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - - end subroutine ocn_run_mct - - !=============================================================================== - subroutine ocn_final_mct(EClock, cdata, x2o, o2x) - - ! !DESCRIPTION: finalize method for docn model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2o - type(mct_aVect) ,intent(inout) :: o2x - - !--- formats --- - character(*), parameter :: subName = "(ocn_final_mct) " - !------------------------------------------------------------------------------- - - call docn_comp_final(my_task, master_task, logunit) - - end subroutine ocn_final_mct - !=============================================================================== - -end module ocn_comp_mct diff --git a/src/components/data_comps/docn/nuopc/docn_comp_mod.F90 b/src/components/data_comps/docn/nuopc/docn_comp_mod.F90 deleted file mode 100644 index 1b0104be91e..00000000000 --- a/src/components/data_comps/docn/nuopc/docn_comp_mod.F90 +++ /dev/null @@ -1,1125 +0,0 @@ -module docn_comp_mod - - use NUOPC , only : NUOPC_Advertise - use ESMF , only : ESMF_State, ESMF_SUCCESS, ESMF_State - use ESMF , only : ESMF_Mesh, ESMF_DistGrid, ESMF_MeshGet, ESMF_DistGridGet - use ESMF , only : ESMF_State, ESMF_LOGMSG_INFO, ESMF_LogWrite - use perf_mod , only : t_startf, t_stopf, t_adj_detailf, t_barrierf - use mct_mod , only : mct_gsmap, mct_gsmap_init, mct_gsmap_lsize - use mct_mod , only : mct_avect, mct_avect_indexRA, mct_avect_zero, mct_aVect_nRattr - use mct_mod , only : mct_avect_init, mct_avect_lsize, mct_avect_clean - use shr_kind_mod , only : r8=>shr_kind_r8, cxx=>shr_kind_cxx, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_const_mod , only : shr_const_cpsw, shr_const_rhosw, shr_const_TkFrz - use shr_const_mod , only : shr_const_TkFrzSw, shr_const_latice, shr_const_ocn_ref_sal - use shr_const_mod , only : shr_const_zsrflyr, shr_const_pi - use shr_string_mod , only : shr_string_listGetName - use shr_sys_mod , only : shr_sys_abort - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only : shr_mpi_bcast - use shr_frz_mod , only : shr_frz_freezetemp - use shr_cal_mod , only : shr_cal_calendarname - use shr_cal_mod , only : shr_cal_datetod2string - use shr_strdata_mod , only : shr_strdata_init_model_domain - use shr_strdata_mod , only : shr_strdata_init_streams - use shr_strdata_mod , only : shr_strdata_init_mapping - use shr_strdata_mod , only : shr_strdata_type, shr_strdata_pioinit - use shr_strdata_mod , only : shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only : shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only : shr_dmodel_translateAV - use shr_pcdf_mod , only : shr_pcdf_readwrite - use dshr_methods_mod , only : ChkErr - use dshr_nuopc_mod , only : fld_list_type, dshr_fld_add, dshr_import, dshr_export - use docn_shr_mod , only : datamode ! namelist input - use docn_shr_mod , only : aquap_option ! derived from datamode namelist input - use docn_shr_mod , only : rest_file ! namelist input - use docn_shr_mod , only : rest_file_strm ! namelist input - use docn_shr_mod , only : nullstr - use docn_shr_mod , only : SDOCN - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public/Private interfaces - !-------------------------------------------------------------------------- - - public :: docn_comp_advertise - public :: docn_comp_init - public :: docn_comp_run - public :: docn_comp_import - public :: docn_comp_export - - private :: prescribed_sst - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - type(mct_aVect) :: x2o - type(mct_aVect) :: o2x - character(CXX) :: flds_o2x = '' - character(CXX) :: flds_x2o = '' - - integer :: debug_import = 0 ! debug level (if > 0 will print all import fields) - integer :: debug_export = 0 ! debug level (if > 0 will print all export fields) - - real(R8),parameter :: cpsw = shr_const_cpsw ! specific heat of sea h2o ~ J/kg/K - real(R8),parameter :: rhosw = shr_const_rhosw ! density of sea water ~ kg/m^3 - real(R8),parameter :: TkFrz = shr_const_TkFrz ! freezing point, fresh water (Kelvin) - real(R8),parameter :: TkFrzSw = shr_const_TkFrzSw ! freezing point, sea water (Kelvin) - real(R8),parameter :: latice = shr_const_latice ! latent heat of fusion - real(R8),parameter :: ocnsalt = shr_const_ocn_ref_sal ! ocean reference salinity - - integer :: kt,ks,ku,kv,kdhdx,kdhdy,kq,kswp ! field indices - integer :: kswnet,klwup,klwdn - integer :: ksen,klat,kmelth,ksnow,krofi - integer :: kh,kqbot - integer :: kmask, kfrac ! frac and mask field indices of docn domain - integer :: ksomask ! So_omask field index - - type(mct_avect) :: avstrm ! av of data created from all stream input - character(len=CS), pointer :: avifld(:) ! names of fields in input streams - character(len=CS), pointer :: avofld(:) ! local names of fields in input streams for import/export - character(len=CS), pointer :: stifld(:) ! names of fields in input streams - character(len=CS), pointer :: stofld(:) ! local names of fields in input streams for calculations - character(CXX) :: flds_strm = '' ! set in docn_comp_init - logical :: ocn_prognostic_mod ! set in docn_comp_advertise - - integer , pointer :: imask(:) ! integer ocean mask - real(R8), pointer :: xc(:), yc(:) ! arrays of model latitudes and longitudes - real(R8), pointer :: somtp(:) ! SOM ocean temperature - real(R8), pointer :: tfreeze(:) ! SOM ocean freezing temperature - - logical :: firstcall = .true. ! first call logical - character(len=*),parameter :: rpfile = 'rpointer.ocn' ! name of ocean ropinter file - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine docn_comp_advertise(importState, exportState, flds_scalar_name, & - ocn_prognostic, fldsFrOcn_num, fldsFrOcn, fldsToOcn_num, fldsToOcn, rc) - - ! input/output arguments - type(ESMF_State) , intent(inout) :: importState - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: flds_scalar_name - logical , intent(in) :: ocn_prognostic - integer , intent(out) :: fldsToOcn_num - integer , intent(out) :: fldsFrOcn_num - type (fld_list_type) , intent(out) :: fldsToOcn(:) - type (fld_list_type) , intent(out) :: fldsFrOcn(:) - integer , intent(out) :: rc - - ! local variables - integer :: n - !------------------------------------------------------------------------------- - - !-------------------------------- - ! export fields - !-------------------------------- - - fldsFrOcn_num=1 - fldsFrOcn(1)%stdname = trim(flds_scalar_name) - - ! export fields that have no corresponding stream field (computed internally) - - call dshr_fld_add(model_fld='So_omask', model_fld_concat=flds_o2x, model_fld_index=ksomask, & - fldlist_num=fldsFrOcn_num, fldlist=fldsFrOcn) - call dshr_fld_add(model_fld='Fioo_q', model_fld_concat=flds_o2x, model_fld_index=kq, & - fldlist_num=fldsFrOcn_num, fldlist=fldsFrOcn) - - ! export fields that have a corresponding stream field - - call dshr_fld_add(data_fld='t', data_fld_array=avifld, & - model_fld='So_t', model_fld_array=avofld, model_fld_concat=flds_o2x, model_fld_index=kt, & - fldlist_num=fldsFrOcn_num, fldlist=fldsFrOcn) - call dshr_fld_add(data_fld='s', data_fld_array=avifld, & - model_fld='So_s', model_fld_array=avofld, model_fld_concat=flds_o2x, model_fld_index=ks, & - fldlist_num=fldsFrOcn_num, fldlist=fldsFrOcn) - call dshr_fld_add(data_fld='u', data_fld_array=avifld, & - model_fld='So_u', model_fld_array=avofld, model_fld_concat=flds_o2x, model_fld_index=ku, & - fldlist_num=fldsFrOcn_num, fldlist=fldsFrOcn) - call dshr_fld_add(data_fld='v', data_fld_array=avifld, & - model_fld='So_v', model_fld_array=avofld, model_fld_concat=flds_o2x, model_fld_index=kv, & - fldlist_num=fldsFrOcn_num, fldlist=fldsFrOcn) - call dshr_fld_add(data_fld='dhdx', data_fld_array=avifld, & - model_fld='So_dhdx', model_fld_array=avofld, model_fld_concat=flds_o2x, model_fld_index=kdhdx, & - fldlist_num=fldsFrOcn_num, fldlist=fldsFrOcn) - call dshr_fld_add(data_fld='dhdy', data_fld_array=avifld, & - model_fld='So_dhdy', model_fld_array=avofld, model_fld_concat=flds_o2x, model_fld_index=kdhdy, & - fldlist_num=fldsFrOcn_num, fldlist=fldsFrOcn) - - !------------------- - ! import fields (have no corresponding stream fields) - !------------------- - - if (ocn_prognostic) then - - fldsToOcn_num=1 - fldsToOcn(1)%stdname = trim(flds_scalar_name) - - call dshr_fld_add(model_fld='Foxx_swnet', model_fld_concat=flds_x2o, model_fld_index=kswnet, & - fldlist_num=fldsToOcn_num, fldlist=fldsToOcn) - call dshr_fld_add(model_fld='Foxx_lwup', model_fld_concat=flds_x2o, model_fld_index=klwup, & - fldlist_num=fldsToOcn_num, fldlist=fldsToOcn) - call dshr_fld_add(model_fld='Foxx_sen', model_fld_concat=flds_x2o, model_fld_index=ksen, & - fldlist_num=fldsToOcn_num, fldlist=fldsToOcn) - call dshr_fld_add(model_fld='Foxx_lat', model_fld_concat=flds_x2o, model_fld_index=klat, & - fldlist_num=fldsToOcn_num, fldlist=fldsToOcn) - call dshr_fld_add(model_fld='Faxa_lwdn', model_fld_concat=flds_x2o, model_fld_index=klwdn, & - fldlist_num=fldsToOcn_num, fldlist=fldsToOcn) - call dshr_fld_add(model_fld='Faxa_snow', model_fld_concat=flds_x2o, model_fld_index=ksnow, & - fldlist_num=fldsToOcn_num, fldlist=fldsToOcn) - call dshr_fld_add(model_fld='Fioi_melth', model_fld_concat=flds_x2o, model_fld_index=kmelth, & - fldlist_num=fldsToOcn_num, fldlist=fldsToOcn) - call dshr_fld_add(model_fld='Foxx_rofi', model_fld_concat=flds_x2o, model_fld_index=krofi, & - fldlist_num=fldsToOcn_num, fldlist=fldsToOcn) - - end if - - !------------------- - ! Advertise fields for import and export states - !------------------- - - do n = 1,fldsFrOcn_num - call NUOPC_Advertise(exportState, standardName=fldsFrOcn(n)%stdname, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call ESMF_LogWrite('(ocn_comp_nuopc):(InitializeAdvertise):Fr_ocn'//trim(fldsFrOcn(n)%stdname), & - ESMF_LOGMSG_INFO) - enddo - - if (ocn_prognostic) then - do n = 1,fldsToOcn_num - call NUOPC_Advertise(importState, standardName=fldsToOcn(n)%stdname, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call ESMF_LogWrite('(ocn_comp_nuopc):(InitializeAdvertise):To_ocn'//trim(fldsToOcn(n)%stdname), & - ESMF_LOGMSG_INFO) - end do - end if - - !------------------- - ! Save as module variables for use in debugging - !------------------- - - ocn_prognostic_mod = ocn_prognostic - - !------------------- - ! module character arrays stifld and stofld - !------------------- - - ! - stifld is a character array of stream field names - ! - stofld is a character array of data model field names that have a one-to-one correspondence with names in stifld - ! - flds_strm is a colon delimited string of field names that is created from the field names in stofld for ONLY - ! those field names that are available in the data streams present in SDOCN%sdatm - ! - avstrm is an attribute vector created from flds_strm - - if (ocn_prognostic_mod) then - call dshr_fld_add(data_fld="h" , data_fld_array=stifld, model_fld="strm_h" , model_fld_array=stofld) - call dshr_fld_add(data_fld="qbot", data_fld_array=stifld, model_fld="strm_qbot", model_fld_array=stofld) - end if - - end subroutine docn_comp_advertise - - !=============================================================================== - - subroutine docn_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, & - scmMode, scmlat, scmlon, calendar, current_ymd, current_tod, modeldt, mesh, nxg, nyg) - - ! !DESCRIPTION: initialize docn model - use pio , only : iosystem_desc_t - use shr_pio_mod, only : shr_pio_getiosys, shr_pio_getiotype - - ! --- input/output arguments --- - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: scmMode ! single column mode - real(R8) , intent(in) :: scmLat ! single column lat - real(R8) , intent(in) :: scmLon ! single column lon - character(len=*) , intent(in) :: calendar ! model calendar type - integer , intent(in) :: current_ymd ! model date - integer , intent(in) :: current_tod ! model sec into model date - integer , intent(in) :: modeldt ! model time step - type(ESMF_Mesh) , intent(in) :: mesh ! ESMF docn mesh - integer , intent(out) :: nxg, nyg - - !--- local variables --- - integer :: n,k ! generic counters - integer :: lsize ! local size - integer :: kfld ! fld index - integer :: cnt ! counter - logical :: exists ! file existance - logical :: exists1 ! file existance - integer :: nu ! unit number - type(ESMF_DistGrid) :: distGrid - integer, allocatable, target :: gindex(:) - integer :: rc - type(iosystem_desc_t), pointer :: ocn_pio_subsystem - integer :: dimCount - integer :: tileCount - integer :: deCount - integer :: gsize - integer, allocatable :: elementCountPTile(:) - integer, allocatable :: indexCountPDE(:,:) - integer :: spatialDim - integer :: numOwnedElements - real(R8), pointer :: ownedElemCoords(:) - integer :: klat, klon - character(*), parameter :: F00 = "('(docn_comp_init) ',8a)" - character(*), parameter :: F05 = "('(docn_comp_init) ',a,2f10.4)" - character(*), parameter :: F06 = "('(docn_comp_init) ',a,f10.4)" - character(*), parameter :: subName = "(docn_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DOCN_INIT') - - !---------------------------------------------------------------------------- - ! Initialize pio - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDOCN, COMPID) - - !---------------------------------------------------------------------------- - ! Create a data model global seqmap - !---------------------------------------------------------------------------- - - call t_startf('docn_strdata_init') - - if (my_task == master_task) write(logunit,F00) ' initialize DOCN gsmap' - - ! obtain the distgrid from the mesh that was read in - call ESMF_MeshGet(Mesh, elementdistGrid=distGrid, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determin local size on my processor - call ESMF_distGridGet(distGrid, localDe=0, elementCount=lsize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global index space for my processor - allocate(gindex(lsize)) - call ESMF_distGridGet(distGrid, localDe=0, seqIndexList=gindex, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global size of distgrid - call ESMF_distGridGet(distGrid, dimCount=dimCount, deCount=deCount, tileCount=tileCount, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - allocate(elementCountPTile(tileCount)) - call ESMF_distGridGet(distGrid, elementCountPTile=elementCountPTile, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - gsize = 0 - do n = 1,size(elementCountPTile) - gsize = gsize + elementCountPTile(n) - end do - deallocate(elementCountPTile) - - ! create the data model gsmap given the local size, global size and gindex - call mct_gsMap_init( SDOCN%gsmap, gindex, mpicom, compid, lsize, gsize) - deallocate(gindex) - - !---------------------------------------------------------------------------- - ! Initialize SDOCN model domain attributes - !---------------------------------------------------------------------------- - - ! The call to shr_strdata_init_model_domain creates the SDOCN%gsmap which - ! is a '2d1d' decommp (1d decomp of 2d grid) and also create SDOCN%grid - - SDOCN%calendar = trim(shr_cal_calendarName(trim(calendar))) - - if (scmmode) then - if (my_task == master_task) write(logunit,F05) ' scm lon lat = ',scmlon,scmlat - call shr_strdata_init_model_domain(SDOCN, mpicom, compid, my_task, & - scmmode=scmmode, scmlon=scmlon, scmlat=scmlat, gsmap=SDOCN%gsmap) - else if (datamode == 'SST_AQUAPANAL' .or. datamode == 'SST_AQUAPFILE' .or. datamode == 'SOM_AQUAP') then - call shr_strdata_init_model_domain(SDOCN, mpicom, compid, my_task, & - reset_domain_mask=.true., gsmap=SDOCN%gsmap) - else - call shr_strdata_init_model_domain(SDOCN, mpicom, compid, my_task, gsmap=SDOCN%gsmap) - end if - - if (my_task == master_task) then - call shr_strdata_print(SDOCN,'SDOCN data') - endif - - ! obtain mesh lats and lons - call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - allocate(ownedElemCoords(spatialDim*numOwnedElements)) - allocate(xc(numOwnedElements), yc(numOwnedElements)) - call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (numOwnedElements /= lsize) then - call shr_sys_abort('ERROR: numOwnedElements is not equal to lsize') - end if - do n = 1,lsize - xc(n) = ownedElemCoords(2*n-1) - yc(n) = ownedElemCoords(2*n) - end do - - ! error check that mesh lats and lons correspond to those on the input domain file - klon = mct_aVect_indexRA(SDOCN%grid%data,'lon') - do n = 1, lsize - if (abs( SDOCN%grid%data%rattr(klon,n) - xc(n)) > 1.e-4) then - write(6,*)'ERROR: DOCN lon diff = ',abs(SDOCN%grid%data%rattr(klon,n) - xc(n)),' too large' - call shr_sys_abort() - end if - !SDOCN%grid%data%rattr(klon,n) = xc(n) ! overwrite ggrid with mesh data - xc(n) = SDOCN%grid%data%rattr(klon,n) ! overwrite mesh data with ggrid data - end do - klat = mct_aVect_indexRA(SDOCN%grid%data,'lat') - do n = 1, lsize - if (abs( SDOCN%grid%data%rattr(klat,n) - yc(n)) > 1.e-4) then - write(6,*)'ERROR: DOCN lat diff = ',abs(SDOCN%grid%data%rattr(klat,n) - yc(n)),' too large' - call shr_sys_abort() - end if - !SDOCN%grid%data%rattr(klat,n) = yc(n) - yc(n) = SDOCN%grid%data%rattr(klat,n) - end do - - ! determine module mask array (imask) - allocate(imask(lsize)) - kmask = mct_aVect_indexRA(SDOCN%grid%data,'mask') - imask(:) = nint(SDOCN%grid%data%rAttr(kmask,:)) - - !---------------------------------------------------------------------------- - ! Initialize the SDOCN streams and mapping of streams to model domain - !---------------------------------------------------------------------------- - - call shr_strdata_init_streams(SDOCN, compid, mpicom, my_task) - call shr_strdata_init_mapping(SDOCN, compid, mpicom, my_task) - - !---------------------------------------------------------------------------- - ! Allocate module arrays - !---------------------------------------------------------------------------- - - allocate(somtp(lsize)) - allocate(tfreeze(lsize)) - - call t_stopf('docn_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('docn_initavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - - call mct_aVect_init(o2x, rList=flds_o2x, lsize=lsize) - call mct_aVect_zero(o2x) - - kfrac = mct_aVect_indexRA(SDOCN%grid%data,'frac') - o2x%rAttr(ksomask,:) = SDOCN%grid%data%rAttr(kfrac,:) - - if (ocn_prognostic_mod) then - call mct_aVect_init(x2o, rList=flds_x2o, lsize=lsize) - call mct_aVect_zero(x2o) - - ! Initialize internal attribute vectors for optional streams - ! Create the colon deliminted list flds_strm based on mapping the - ! input stream fields from SDOCN%avs(n) with names in stifld to flds_strm with the names in stofld - - cnt = 0 - flds_strm = '' - do n = 1,SDOCN%nstreams - ! Loop over the field names in stifld - do k = 1,size(stifld) - ! Search input stream n for the field name stifld(k) - kfld = mct_aVect_indexRA(SDOCN%avs(n), trim(stifld(k)), perrWith='quiet') - if (kfld > 0) then - cnt = cnt + 1 - ! Append the colon deliminted flds_strm with the mapped field name stofld(k) - if (cnt == 1) then - flds_strm = trim(stofld(k)) - else - flds_strm = trim(flds_strm)//':'//trim(stofld(k)) - endif - endif - enddo - enddo - - ! Initialize avstrm based on the active streams determined above - if (my_task == master_task) write(logunit,F00) ' flds_strm = ',trim(flds_strm) - call mct_aVect_init(avstrm, rList=flds_strm, lsize=lsize) - call mct_aVect_zero(avstrm) - - ! Note: because the following needs to occur AFTER we determine the fields in - ! flds_strm - the indices below CANNOT be set in the docn_comp_advertise phase - - ! Now set indices into these active streams - kh = mct_aVect_indexRA(avstrm,'strm_h' , perrWith='quiet') - kqbot = mct_aVect_indexRA(avstrm,'strm_qbot', perrWith='quiet') - end if - - call t_stopf('docn_initavs') - - nxg = SDOCN%nxg - nyg = SDOCN%nyg - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - exists = .false. - exists1 = .false. - if (trim(rest_file) == trim(nullstr) .and. & - trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer = ',trim(rpfile) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (exists) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - inquire(file=trim(rest_file),exist=exists1) - endif - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - - call shr_mpi_bcast(exists,mpicom,'exists') - call shr_mpi_bcast(exists1,mpicom,'exists1') - - if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then - if (exists1) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) - call shr_pcdf_readwrite('read',SDOCN%pio_subsystem, SDOCN%io_type, & - trim(rest_file), mpicom, gsmap=SDOCN%gsmap, rf1=somtp, rf1n='somtp', & - io_format=SDOCN%io_format) - else - if (my_task == master_task) then - write(logunit,F00) ' file not found, skipping ',trim(rest_file) - end if - endif - endif - - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDOCN,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - endif - - !---------------------------------------------------------------------------- - ! Set initial ocn state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - - call docn_comp_run(mpicom=mpicom, compid=compid, my_task=my_task, & - master_task=master_task, inst_suffix=inst_suffix, logunit=logunit, & - read_restart=read_restart, write_restart=.false., & - target_ymd=current_ymd, target_tod=current_tod, modeldt=modeldt) - - if (my_task == master_task) then - write(logunit,F00) 'docn_comp_init done' - end if - - call t_adj_detailf(-2) - - call t_stopf('DOCN_INIT') - - end subroutine docn_comp_init - - !=============================================================================== - - subroutine docn_comp_run(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - target_ymd, target_tod, modeldt, case_name) - - ! !DESCRIPTION: run method for docn model - - ! !INPUT/OUTPUT PARAMETERS: - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: write_restart ! restart alarm is on - integer , intent(in) :: target_ymd ! model date - integer , intent(in) :: target_tod ! model sec into model date - integer , intent(in) :: modeldt - character(len=*) , intent(in), optional :: case_name ! case name - - !--- local --- - integer :: n,nfld ! indices - integer :: lsize ! size of attr vect - real(R8) :: dt ! timestep - integer :: nu ! unit number - character(len=18) :: date_str - character(len=CS) :: fldname - character(len=CL) :: local_case_name - character(*), parameter :: F00 = "('(docn_comp_run) ',8a)" - character(*), parameter :: F01 = "('(docn_comp_run) ',a, i7,2x,i5,2x,i5,2x,d21.14)" - character(*), parameter :: F04 = "('(docn_comp_run) ',2a,2i8,'s')" - character(*), parameter :: F0D = "('(docn_comp_run) ',a, i7,2x,i5,2x,i5,2x,d21.14)" - character(*), parameter :: subName = "(docn_comp_run) " - real(R8), parameter :: & - swp = 0.67_R8*(exp((-1._R8*shr_const_zsrflyr) /1.0_R8)) + 0.33_R8*exp((-1._R8*shr_const_zsrflyr)/17.0_R8) - !------------------------------------------------------------------------------- - - !-------------------- - ! Debug input - !-------------------- - - if (debug_import > 0 .and. my_task == master_task .and. ocn_prognostic_mod) then - do nfld = 1, mct_aVect_nRAttr(x2o) - call shr_string_listGetName(trim(flds_x2o), nfld, fldname) - do n = 1, mct_aVect_lsize(x2o) - write(logunit,F0D)'import: ymd,tod,n = '// trim(fldname),target_ymd, target_tod, & - n, x2o%rattr(nfld,n) - end do - end do - end if - - call t_startf('DOCN_RUN') - call t_barrierf('docn_BARRIER',mpicom) - if(present(case_name)) then - local_case_name = case_name - else - local_case_name = " " - endif - - !-------------------- - ! ADVANCE OCN - !-------------------- - - call t_startf('docn') - - !--- defaults, copy all fields from streams to o2x --- - - lsize = mct_avect_lsize(o2x) - do n = 1,lsize - if (ksomask /= 0) then - o2x%rAttr(ksomask, n) = SDOCN%grid%data%rAttr(kfrac,n) - end if - o2x%rAttr(kt ,n) = TkFrz - o2x%rAttr(ks ,n) = ocnsalt - o2x%rAttr(ku ,n) = 0.0_R8 - o2x%rAttr(kv ,n) = 0.0_R8 - o2x%rAttr(kdhdx,n) = 0.0_R8 - o2x%rAttr(kdhdy,n) = 0.0_R8 - o2x%rAttr(kq ,n) = 0.0_R8 - if (kswp /= 0) then - o2x%rAttr(kswp ,n) = swp - end if - enddo - - ! NOTE: for SST_AQUAPANAL, the docn buildnml sets the stream to "null" - ! and thereby shr_strdata_advance does nothing - - call t_startf('docn_strdata_advance') - call shr_strdata_advance(SDOCN, target_ymd, target_tod, mpicom, 'docn') - call t_stopf('docn_strdata_advance') - - !--- copy streams to o2x --- - call t_barrierf('docn_scatter_BARRIER', mpicom) - call t_startf('docn_scatter') - do n = 1, SDOCN%nstreams - call shr_dmodel_translateAV(SDOCN%avs(n), o2x, avifld, avofld) - enddo - call t_stopf('docn_scatter') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('docn_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - case('SSTDATA') - lsize = mct_avect_lsize(o2x) - do n = 1,lsize - o2x%rAttr(kt ,n) = o2x%rAttr(kt,n) + TkFrz - o2x%rAttr(ks ,n) = ocnsalt - o2x%rAttr(ku ,n) = 0.0_R8 - o2x%rAttr(kv ,n) = 0.0_R8 - o2x%rAttr(kdhdx,n) = 0.0_R8 - o2x%rAttr(kdhdy,n) = 0.0_R8 - o2x%rAttr(kq ,n) = 0.0_R8 - if (kswp /= 0) then - o2x%rAttr(kswp ,n) = swp - end if - enddo - - case('SST_AQUAPANAL') - lsize = mct_avect_lsize(o2x) - ! Zero out the attribute vector before calling the prescribed_sst - ! function - so this also zeroes out the So_omask if it is needed - ! so need to re-introduce it - do n = 1,lsize - o2x%rAttr(:,n) = 0.0_r8 - end do - call prescribed_sst(xc, yc, lsize, aquap_option, o2x%rAttr(kt,:)) - do n = 1,lsize - o2x%rAttr(kt,n) = o2x%rAttr(kt,n) + TkFrz - if (ksomask /= 0) then - o2x%rAttr(ksomask, n) = SDOCN%grid%data%rAttr(kfrac,n) - end if - enddo - - case('SST_AQUAPFILE') - lsize = mct_avect_lsize(o2x) - do n = 1,lsize - o2x%rAttr(kt ,n) = o2x%rAttr(kt,n) + TkFrz - o2x%rAttr(ks ,n) = ocnsalt - o2x%rAttr(ku ,n) = 0.0_R8 - o2x%rAttr(kv ,n) = 0.0_R8 - o2x%rAttr(kdhdx,n) = 0.0_R8 - o2x%rAttr(kdhdy,n) = 0.0_R8 - o2x%rAttr(kq ,n) = 0.0_R8 - if (kswp /= 0) then - o2x%rAttr(kswp ,n) = swp - end if - enddo - - case('IAF') - lsize = mct_avect_lsize(o2x) - do n = 1,lsize - o2x%rAttr(kt ,n) = o2x%rAttr(kt,n) + TkFrz - o2x%rAttr(ks ,n) = ocnsalt - o2x%rAttr(ku ,n) = 0.0_R8 - o2x%rAttr(kv ,n) = 0.0_R8 - o2x%rAttr(kdhdx,n) = 0.0_R8 - o2x%rAttr(kdhdy,n) = 0.0_R8 - o2x%rAttr(kq ,n) = 0.0_R8 - if (kswp /= 0) then - o2x%rAttr(kswp ,n) = swp - end if - enddo - - case('SOM') - lsize = mct_avect_lsize(o2x) - do n = 1,SDOCN%nstreams - call shr_dmodel_translateAV(SDOCN%avs(n),avstrm,stifld,stofld) - enddo - if (firstcall) then - do n = 1,lsize - if (.not. read_restart) then - somtp(n) = o2x%rAttr(kt,n) + TkFrz - endif - o2x%rAttr(kt,n) = somtp(n) - o2x%rAttr(kq,n) = 0.0_R8 - enddo - else ! firstcall - tfreeze = shr_frz_freezetemp(o2x%rAttr(ks,:)) + TkFrz - dt = modeldt * 1.0_R8 - do n = 1,lsize - if (imask(n) /= 0) then - !--- compute new temp --- - o2x%rAttr(kt,n) = somtp(n) + & - (x2o%rAttr(kswnet,n) + & ! shortwave - x2o%rAttr(klwup ,n) + & ! longwave - x2o%rAttr(klwdn ,n) + & ! longwave - x2o%rAttr(ksen ,n) + & ! sensible - x2o%rAttr(klat ,n) + & ! latent - x2o%rAttr(kmelth,n) - & ! ice melt - avstrm%rAttr(kqbot ,n) - & ! flux at bottom - (x2o%rAttr(ksnow,n)+x2o%rAttr(krofi,n))*latice) * & ! latent by prec and roff - dt/(cpsw*rhosw* avstrm%rAttr(kh,n)) - !--- compute ice formed or melt potential --- - o2x%rAttr(kq,n) = (tfreeze(n) - o2x%rAttr(kt,n))*(cpsw*rhosw*avstrm%rAttr(kh,n))/dt ! ice formed q>0 - o2x%rAttr(kt,n) = max(tfreeze(n),o2x%rAttr(kt,n)) ! reset temp - somtp(n) = o2x%rAttr(kt,n) ! save temp - endif - end do - endif ! firstcall - - case('SOM_AQUAP') - lsize = mct_avect_lsize(o2x) - do n = 1,SDOCN%nstreams - call shr_dmodel_translateAV(SDOCN%avs(n),avstrm,stifld,stofld) - enddo - if (firstcall) then - do n = 1,lsize - if (.not. read_restart) then - somtp(n) = o2x%rAttr(kt,n) + TkFrz - endif - o2x%rAttr(kt,n) = somtp(n) - o2x%rAttr(kq,n) = 0.0_R8 - enddo - else ! firstcall - tfreeze = shr_frz_freezetemp(o2x%rAttr(ks,:)) + TkFrz - do n = 1,lsize - !--- compute new temp --- - o2x%rAttr(kt,n) = somtp(n) + & - (x2o%rAttr(kswnet,n) + & ! shortwave - x2o%rAttr(klwup ,n) + & ! longwave - x2o%rAttr(klwdn ,n) + & ! longwave - x2o%rAttr(ksen ,n) + & ! sensible - x2o%rAttr(klat ,n) + & ! latent - x2o%rAttr(kmelth,n) - & ! ice melt - avstrm%rAttr(kqbot ,n) - & ! flux at bottom - (x2o%rAttr(ksnow,n)+x2o%rAttr(krofi,n))*latice) * & ! latent by prec and roff - dt/(cpsw*rhosw*avstrm%rAttr(kh,n)) - !--- compute ice formed or melt potential --- - o2x%rAttr(kq,n) = (tfreeze(n) - o2x%rAttr(kt,n))*(cpsw*rhosw*avstrm%rAttr(kh,n))/dt ! ice formed q>0 - somtp(n) = o2x%rAttr(kt,n) ! save temp - enddo - endif ! firstcall - - end select - - call t_stopf('docn_datamode') - - !-------------------- - ! Debug output - !-------------------- - - if (debug_export > 1 .and. my_task == master_task) then - do nfld = 1, mct_aVect_nRAttr(o2x) - call shr_string_listGetName(trim(flds_o2x), nfld, fldname) - do n = 1, mct_aVect_lsize(o2x) - write(logunit,F0D)'export: ymd,tod,n = '// trim(fldname),target_ymd, target_tod, & - n, o2x%rattr(nfld,n) - end do - end do - end if - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('docn_restart') - call shr_cal_datetod2string(date_str, target_ymd, target_tod) - write(rest_file,"(6a)") & - trim(case_name), '.docn',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.docn',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then - if (my_task == master_task) then - write(logunit,F04) ' writing ',trim(rest_file),target_ymd,target_tod - end if - call shr_pcdf_readwrite('write', SDOCN%pio_subsystem, SDOCN%io_type,& - trim(rest_file), mpicom, SDOCN%gsmap, clobber=.true., rf1=somtp,rf1n='somtp') - endif - if (my_task == master_task) then - write(logunit,F04) ' writing ',trim(rest_file_strm),target_ymd,target_tod - end if - call shr_strdata_restWrite(trim(rest_file_strm), SDOCN, mpicom, trim(case_name), 'SDOCN strdata') - call t_stopf('docn_restart') - endif - - firstcall = .false. - - call t_stopf('docn') - call t_stopf('DOCN_RUN') - - end subroutine docn_comp_run - - !=============================================================================== - - subroutine docn_comp_import(importState, rc) - - ! input/output variables - type(ESMF_State) :: importState - integer, intent(out) :: rc - !---------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call dshr_import(importState, 'Foxx_swnet', x2o%rattr(kswnet,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Foxx_lwup', x2o%rattr(klwup,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Foxx_sen', x2o%rattr(ksen,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Foxx_lat', x2o%rattr(klat,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Faxa_lwdn', x2o%rattr(klwdn,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Faxa_snow', x2o%rattr(ksnow,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_import(importState, 'Fioi_melth', x2o%rattr(kmelth,:), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine docn_comp_import - - !=============================================================================== - - subroutine docn_comp_export(exportState, rc) - - ! input/output variables - type(ESMF_State) :: exportState - integer, intent(out) :: rc - !---------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call dshr_export(o2x%rattr(ksomask,:), exportState, 'So_omask', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(o2x%rattr(kt,:), exportState, 'So_t', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(o2x%rattr(ks,:), exportState, 'So_s', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(o2x%rattr(ku,:), exportState, 'So_u', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(o2x%rattr(kv,:), exportState, 'So_v', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(o2x%rattr(kdhdx,:), exportState, 'So_dhdx', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(o2x%rattr(kdhdy,:), exportState, 'So_dhdy', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call dshr_export(o2x%rattr(kq,:), exportState, 'Fioo_q', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine docn_comp_export - - !=============================================================================== - - subroutine prescribed_sst(xc, yc, lsize, sst_option, sst) - - real(R8) , intent(in) :: xc(:) !degrees - real(R8) , intent(in) :: yc(:) !degrees - integer , intent(in) :: lsize - integer , intent(in) :: sst_option - real(R8) , intent(inout) :: sst(:) - - ! local - integer :: i - real(r8) :: tmp, tmp1, pi - real(r8) :: rlon(lsize), rlat(lsize) - - real(r8), parameter :: pio180 = SHR_CONST_PI/180._r8 - - ! Parameters for zonally symmetric experiments - real(r8), parameter :: t0_max = 27._r8 - real(r8), parameter :: t0_min = 0._r8 - real(r8), parameter :: maxlat = 60._r8*pio180 - real(r8), parameter :: shift = 5._r8*pio180 - real(r8), parameter :: shift9 = 10._r8*pio180 - real(r8), parameter :: shift10 = 15._r8*pio180 - - ! Parameters for zonally asymmetric experiments - real(r8), parameter :: t0_max6 = 1._r8 - real(r8), parameter :: t0_max7 = 3._r8 - real(r8), parameter :: latcen = 0._r8*pio180 - real(r8), parameter :: loncen = 0._r8*pio180 - real(r8), parameter :: latrad6 = 15._r8*pio180 - real(r8), parameter :: latrad8 = 30._r8*pio180 - real(r8), parameter :: lonrad = 30._r8*pio180 - !------------------------------------------------------------------------------- - - pi = SHR_CONST_PI - - ! convert xc and yc from degrees to radians - - rlon(:) = xc(:) * pio180 - rlat(:) = yc(:) * pio180 - - ! Control - - if (sst_option < 1 .or. sst_option > 10) then - call shr_sys_abort ('prescribed_sst: ERROR: sst_option must be between 1 and 10') - end if - - if (sst_option == 1 .or. sst_option == 6 .or. sst_option == 7 .or. sst_option == 8) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else - tmp = sin(rlat(i)*pi*0.5_r8/maxlat) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! Flat - - if (sst_option == 2) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else - tmp = sin(rlat(i)*pi*0.5_r8/maxlat) - tmp = 1._r8 - tmp*tmp*tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! Qobs - - if (sst_option == 3) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else - tmp = sin(rlat(i)*pi*0.5_r8/maxlat) - tmp = (2._r8 - tmp*tmp*tmp*tmp - tmp*tmp)*0.5_r8 - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! Peaked - - if (sst_option == 4) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else - tmp = (maxlat - abs(rlat(i)))/maxlat - tmp1 = 1._r8 - tmp - sst(i) = t0_max*tmp + t0_min*tmp1 - end if - end do - end if - - ! Control-5N - - if (sst_option == 5) then - do i = 1,lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else if (rlat(i) > shift) then - tmp = sin((rlat(i)-shift)*pi*0.5_r8/(maxlat-shift)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - else - tmp = sin((rlat(i)-shift)*pi*0.5_r8/(maxlat+shift)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! 1KEQ - - if (sst_option == 6) then - do i = 1,lsize - if (abs(rlat(i)-latcen) <= latrad6) then - tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad6) - tmp1 = tmp1*tmp1 - tmp = abs(rlon(i)-loncen) - tmp = min(tmp , 2._r8*pi-tmp) - if(tmp <= lonrad) then - tmp = cos(tmp*pi*0.5_r8/lonrad) - tmp = tmp*tmp - sst(i) = sst(i) + t0_max6*tmp*tmp1 - end if - end if - end do - end if - - ! 3KEQ - - if (sst_option == 7) then - do i = 1, lsize - if (abs(rlat(i)-latcen) <= latrad6) then - tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad6) - tmp1 = tmp1*tmp1 - tmp = abs(rlon(i)-loncen) - tmp = min(tmp , 2._r8*pi-tmp) - if (tmp <= lonrad) then - tmp = cos(tmp*pi*0.5_r8/lonrad) - tmp = tmp*tmp - sst(i) = sst(i) + t0_max7*tmp*tmp1 - end if - end if - end do - end if - - ! 3KW1 - - if (sst_option == 8) then - do i = 1, lsize - if (abs(rlat(i)-latcen) <= latrad8) then - tmp1 = cos((rlat(i)-latcen)*pi*0.5_r8/latrad8) - tmp1 = tmp1*tmp1 - tmp = cos(rlon(i)-loncen) - sst(i) = sst(i) + t0_max7*tmp*tmp1 - end if - end do - end if - - ! Control-10N - - if (sst_option == 9) then - do i = 1, lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else if (rlat(i) > shift9) then - tmp = sin((rlat(i)-shift9)*pi*0.5_r8/(maxlat-shift9)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - else - tmp = sin((rlat(i)-shift9)*pi*0.5_r8/(maxlat+shift9)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - ! Control-15N - - if (sst_option == 10) then - do i = 1, lsize - if (abs(rlat(i)) > maxlat) then - sst(i) = t0_min - else if(rlat(i) > shift10) then - tmp = sin((rlat(i)-shift10)*pi*0.5_r8/(maxlat-shift10)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - else - tmp = sin((rlat(i)-shift10)*pi*0.5_r8/(maxlat+shift10)) - tmp = 1._r8 - tmp*tmp - sst(i) = tmp*(t0_max - t0_min) + t0_min - end if - end do - end if - - end subroutine prescribed_sst - -end module docn_comp_mod diff --git a/src/components/data_comps/docn/nuopc/docn_shr_mod.F90 b/src/components/data_comps/docn/nuopc/docn_shr_mod.F90 deleted file mode 100644 index 8fdc128fc84..00000000000 --- a/src/components/data_comps/docn/nuopc/docn_shr_mod.F90 +++ /dev/null @@ -1,164 +0,0 @@ -module docn_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: docn_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! Note that model decomp will now come from reading in the mesh directly - - ! stream data type - type(shr_strdata_type), public :: SDOCN - - ! input namelist variables - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - logical , public :: force_prognostic_true ! if true set prognostic true - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - integer(IN) , public :: aquap_option - character(len=*), public, parameter :: nullstr = 'undefined' - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine docn_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, ocn_prognostic) - - ! !DESCRIPTION: Read in docn namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: filename ! input namelist filename - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(out) :: ocn_prognostic ! flag - - !--- local variables --- - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - character(CL) :: decomp ! decomp strategy - not used for NUOPC - but still needed in namelist for now - - !--- formats --- - character(*), parameter :: F00 = "('(docn_comp_init) ',8a)" - character(*), parameter :: F0L = "('(docn_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(docn_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(docn_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(docn_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_docn_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / docn_nml / decomp, & - restfilm, restfils, force_prognostic_true - - !---------------------------------------------------------------------------- - ! Read docn_in - !---------------------------------------------------------------------------- - - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=docn_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - endif - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDOCN,trim(filename),mpicom=mpicom) - - datamode = trim(SDOCN%dataMode) - - ! Special logic for prescribed aquaplanet - - if (datamode(1:9) == 'SST_AQUAP' .and. trim(datamode) /= 'SST_AQUAPFILE') then - ! First determine the prescribed aquaplanet option - if (len_trim(datamode) == 10) then - read(datamode(10:10),'(i1)') aquap_option - else if (len_trim(datamode) == 11) then - read(datamode(10:11),'(i2)') aquap_option - end if - ! Now remove the index from the datamode value, to have a generic setting - ! for use below - datamode = "SST_AQUAPANAL" - end if - - ! Validate mode - - if ( trim(datamode) == 'NULL' .or. & - trim(datamode) == 'SSTDATA' .or. & - trim(datamode) == 'SST_AQUAPANAL' .or. & - trim(datamode) == 'SST_AQUAPFILE' .or. & - trim(datamode) == 'COPYALL' .or. & - trim(datamode) == 'IAF' .or. & - trim(datamode) == 'SOM' .or. & - trim(datamode) == 'SOM_AQUAP') then - if (my_task == master_task) then - write(logunit,F00) ' docn datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal docn datamode = ',trim(datamode) - call shr_sys_abort() - endif - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flag - !---------------------------------------------------------------------------- - - ocn_prognostic = .false. - if (force_prognostic_true) then - ocn_prognostic = .true. - end if - if (trim(datamode) == 'IAF') then - ocn_prognostic = .true. - end if - if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then - ocn_prognostic = .true. - endif - - end subroutine docn_shr_read_namelists - -end module docn_shr_mod diff --git a/src/components/data_comps/docn/nuopc/ocn_comp_nuopc.F90 b/src/components/data_comps/docn/nuopc/ocn_comp_nuopc.F90 deleted file mode 100644 index a60e2ce5eb0..00000000000 --- a/src/components/data_comps/docn/nuopc/ocn_comp_nuopc.F90 +++ /dev/null @@ -1,564 +0,0 @@ -module ocn_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for DOCN - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_cal_mod , only : shr_cal_noleap, shr_cal_gregorian, shr_cal_ymd2date - use shr_const_mod , only : SHR_CONST_SPVAL - use shr_sys_mod , only : shr_sys_abort - use shr_const_mod , only : shr_const_spval, shr_const_pi - use dshr_nuopc_mod , only : fld_list_type, fldsMax, dshr_realize - use dshr_nuopc_mod , only : ModelInitPhase, ModelSetRunClock, ModelSetMetaData - use dshr_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dshr_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use docn_shr_mod , only : docn_shr_read_namelists - use docn_comp_mod , only : docn_comp_init, docn_comp_run, docn_comp_advertise - use docn_comp_mod , only : docn_comp_import, docn_comp_export - - implicit none - private ! except - - public :: SetServices - - private :: InitializeAdvertise - private :: InitializeRealize - private :: ModelAdvance - private :: ModelFinalize - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CS) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - - integer :: fldsToOcn_num = 0 - integer :: fldsFrOcn_num = 0 - type (fld_list_type) :: fldsToOcn(fldsMax) - type (fld_list_type) :: fldsFrOcn(fldsMax) - - integer :: compid ! mct comp id - integer :: mpicom ! mpi communicator - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer, parameter :: master_task=0 ! task number of master task - logical :: read_restart ! start from restart - character(CL) :: case_name ! case name - character(len=80) :: calendar ! calendar name - logical :: ocn_present ! flag - logical :: ocn_prognostic ! flag - integer :: logunit ! logging unit number - logical :: use_esmf_metadata = .false. - character(*),parameter :: modName = "(ocn_comp_nuopc)" - integer, parameter :: debug_import = 0 ! if > 0 will diagnose import fields - integer, parameter :: debug_export = 0 ! if > 0 will diagnose export fields - character(*),parameter :: u_FILE_u = & - __FILE__ - - !=============================================================================== - contains - !=============================================================================== - - subroutine SetServices(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p1"/), userRoutine=InitializeAdvertise, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p3"/), userRoutine=InitializeRealize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, & - specRoutine=ModelAdvance, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, & - specRoutine=ModelSetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, & - specRoutine=ModelFinalize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - integer :: shrlogunit ! original log unit - character(len=CL) :: fileName ! generic file name - character(len=CL) :: cvalue - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! get mpi data - !---------------------------------------------------------------------------- - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, mpiCommunicator=mpicom, localPet=my_task, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - inst_name = "OCN"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Read input namelists and set present and prognostic flags - !---------------------------------------------------------------------------- - - filename = "docn_in"//trim(inst_suffix) - call docn_shr_read_namelists(filename, mpicom, my_task, master_task, logunit, ocn_prognostic) - - !-------------------------------- - ! Advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call docn_comp_advertise(importstate, exportState, flds_scalar_name, & - ocn_prognostic, fldsFrOcn_num, fldsFrOcn, fldsToOcn_num, fldsToOcn, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - use shr_const_mod, only : shr_const_spval - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - integer :: n - integer :: nxg, nyg - character(CL) :: cvalue - type(ESMF_Mesh) :: Emesh - type(ESMF_Time) :: currTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_Calendar) :: esmf_calendar ! esmf calendar - type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - integer :: current_ymd ! model date - integer :: current_year ! model year - integer :: current_mon ! model month - integer :: current_day ! model day - integer :: current_tod ! model sec into model date - integer :: modeldt ! model timestep - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_spval ! single column lat - real(R8) :: scmLon = shr_const_spval ! single column lon - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! Determine necessary config variables - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='case_name', value=case_name, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompAttributeGet(gcomp, name='scmlon', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmlon - - call NUOPC_CompAttributeGet(gcomp, name='scmlat', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmlat - - call NUOPC_CompAttributeGet(gcomp, name='single_column', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmMode - - call NUOPC_CompAttributeGet(gcomp, name='read_restart', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) read_restart - - call NUOPC_CompAttributeGet(gcomp, name='MCTID', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) compid - - !---------------------------------------------------------------------------- - ! Determine calendar info - !---------------------------------------------------------------------------- - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_TimeGet( currTime, yy=current_year, mm=current_mon, dd=current_day, s=current_tod, & - calkindflag=esmf_caltype, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(current_year, current_mon, current_day, current_ymd) - - if (esmf_caltype == ESMF_CALKIND_NOLEAP) then - calendar = shr_cal_noleap - else if (esmf_caltype == ESMF_CALKIND_GREGORIAN) then - calendar = shr_cal_gregorian - else - call ESMF_LogWrite(subname//" ERROR bad ESMF calendar name "//trim(calendar), ESMF_LOGMSG_ERROR) - rc = ESMF_Failure - return - end if - - call ESMF_TimeIntervalGet( timeStep, s=modeldt, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Generate the mesh - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='mesh_ocn', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (my_task == master_task) then - write(logunit,*) " obtaining docn mesh from " // trim(cvalue) - end if - - Emesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize model - !---------------------------------------------------------------------------- - - call docn_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, & - scmMode, scmlat, scmlon, calendar, current_ymd, current_tod, modeldt, Emesh, nxg, nyg) - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - ! export fields - call dshr_realize( & - state=ExportState, & - fldList=fldsFrOcn, & - numflds=fldsFrOcn_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':docnExport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! import fields - call dshr_realize( & - state=importState, & - fldList=fldsToOcn, & - numflds=fldsToOcn_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':docnImport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - ! Set the coupling scalars - !-------------------------------- - - call docn_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (debug_export > 0) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - if (use_esmf_metadata) then - call ModelSetMetaData(gcomp, name='DOCN', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: importState, exportState - type(ESMF_Time) :: time - type(ESMF_Alarm) :: alarm - type(ESMF_Time) :: currTime - type(ESMF_Time) :: nextTime - type(ESMF_TimeInterval) :: timeStep - logical :: write_restart ! restart alarm is ringing - integer :: currentYMD ! model date - integer :: currentTOD ! model sec into model date - integer :: nextYMD ! model date - integer :: nextTOD ! model sec into model date - integer :: yr ! year - integer :: mon ! month - integer :: day ! day in month - integer :: modeldt ! model timestep - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - call memcheck(subname, 5, my_task==master_task) - - !-------------------------------- - ! Reset shr logging to my log file - !-------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! query the Component for its clock, importState and exportState - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, importState=importState, exportState=exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Unpack import state - !-------------------------------- - - if (ocn_prognostic) then - call docn_comp_import(importState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - !-------------------------------- - ! Run model - !-------------------------------- - - ! Determine if need to write restarts - - call ESMF_ClockGetAlarm(clock, alarmname='alarm_restart', alarm=alarm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (ESMF_AlarmIsRinging(alarm, rc=rc)) then - if (ChkErr(rc,__LINE__,u_FILE_u)) return - write_restart = .true. - call ESMF_AlarmRingerOff( alarm, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - write_restart = .false. - endif - - ! For nuopc - the component clock is advanced at the end of the time interval - ! For these to match for now - need to advance nuopc one timestep ahead for - ! shr_strdata time interpolation - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - nextTime = currTime + timeStep - call ESMF_TimeGet( nextTime, yy=yr, mm=mon, dd=day, s=nexttod, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(yr, mon, day, nextymd) - - call ESMF_TimeIntervalGet( timeStep, s=modeldt, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! Advance the model - - call docn_comp_run(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - nextYMD, nextTOD, modeldt, case_name=case_name) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call docn_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (debug_export > 0) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (my_task == master_task) then - call log_clock_advance(clock, 'DICE', logunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - endif - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - call shr_file_setLogUnit (shrlogunit) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(*), parameter :: F00 = "('(docn_comp_final) ',8a)" - character(*), parameter :: F91 = "('(docn_comp_final) ',73('-'))" - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) 'docn : end of main integration loop' - write(logunit,F91) - end if - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine ModelFinalize - -end module ocn_comp_nuopc diff --git a/src/components/data_comps/docn/tools/pop_som_frc/README b/src/components/data_comps/docn/tools/pop_som_frc/README deleted file mode 100644 index f1be314c244..00000000000 --- a/src/components/data_comps/docn/tools/pop_som_frc/README +++ /dev/null @@ -1,65 +0,0 @@ - -Slab Ocean Model (SOM) forcing files. - -The two main scripts in this directory are: - -pop_frc.csh - -and - -pop_frc*.ncl - -Step 1: - -First, one needs to edit the 'pop_frc.csh' script to select a particular case -name from which to make a monthly-mean annual climatology. - -1. Change CASE to the case name you are interested in. -2. Set BEGYR and ENDYR to the appropriate years for your climatology. -3. If there are coupler history files available, these can be used - for the velocities and sea surface tilt terms. Set CPLHIST to TRUE. -3. The script assumes the 'pop_frc' directory sits in your home directory, - and your work directory is /ptmp/${USER}/${CASE}. These can be changed - as needed. -4. Specify the path name to the mass store directory where the case resides. - -Execute the script by typing, say: - -./pop_frc.csh > pop_frc.out & - -For a twenty year climatology, the script takes about a day of wall-clock time -on gale, gust, breeze, etc. - -Step 2: - -Once the two climatogical files have been created, you can run one of -the NCL scripts, 'pop_frc*.ncl'. You will need to edit the script and -point to the POP and coupler (popmac and cplmac) climatological files. - -pop_frc_mlt.ncl* -> uses mixed-layer temperature to derive the Q-flux - -pop_frc_mlann.ncl -> uses mixed-layer temperature to derive the Q-flux - also produces velocities and salinities averaged - over the annual mean ml depth - -pop_frc_interp.ncl -> Interpolates the data onto a standard 1x1d grid - -* recommended script - -Execute the script by typing: - -ncl pop_frc_mlt.ncl - -A file called 'oceanmixed_ice.nc' should be created with the appropriate SOM -forcing fields. To make this file usable by the DOCN7 SOM, have a look in: - -/fs/cgd/csm/inputdata/ocn/docn7/SOM - -The file 'oceanmixed_ice.nc' should be renamed with the convention: - -pop_frc.${GRID}.yymmdd.nc - -A text stream file also needs to accompany the netcdf file. See the examples -in the above directory for the conventions. - - diff --git a/src/components/data_comps/docn/tools/pop_som_frc/SOM.doc b/src/components/data_comps/docn/tools/pop_som_frc/SOM.doc deleted file mode 100644 index b1902964485..00000000000 Binary files a/src/components/data_comps/docn/tools/pop_som_frc/SOM.doc and /dev/null differ diff --git a/src/components/data_comps/docn/tools/pop_som_frc/SOM.pdf b/src/components/data_comps/docn/tools/pop_som_frc/SOM.pdf deleted file mode 100644 index 92d15e00483..00000000000 Binary files a/src/components/data_comps/docn/tools/pop_som_frc/SOM.pdf and /dev/null differ diff --git a/src/components/data_comps/docn/tools/pop_som_frc/pop_frc.csh b/src/components/data_comps/docn/tools/pop_som_frc/pop_frc.csh deleted file mode 100755 index 851d97129e7..00000000000 --- a/src/components/data_comps/docn/tools/pop_som_frc/pop_frc.csh +++ /dev/null @@ -1,98 +0,0 @@ -#/bin/csh -f - -# This shell script creates a seasonal cycle climatology over a specified -# number of years. The pop history files are quite large, so each month is -# read for all years from the mass store, the climatology for that month -# is created and the history files are deleted. The coupler history files -# are much smaller and can all be read off the mass store at once. - -set echo on - -# Need to set the CASE name for the run to extract POP and CPL history -# files from and the start and end years of the period for the climatology. - -setenv CASE b40.1850.track1.2deg.003 - -set BEGYR = 481 -set ENDYR = 500 - -# Set the mass store userid where the files are located. If the run did -# not save the CPL history files, set CPLFILES to FALSE. If you have plenty -# of local space for the POP history files, set TONS_O_SPACE to TRUE. - -set CCSMUSER = CCSM -set CPLFILES = FALSE -set TONS_O_SPACE = TRUE - -set SCRIPT_HOME = ${HOME}/pop_frc -setenv PATH_MSS /${CCSMUSER}/csm/${CASE}/ocn/hist/ - -set DATE_FORMAT = 'yyyy-mm' - -setenv WKDIR /biptmp/${USER}/${CASE} -setenv FILE_HEADER ${CASE}.pop.h. - -mkdir -p $WKDIR -cd $WKDIR - -if !( -e ${CASE}.pop.h.${BEGYR}-${ENDYR}.MAC.nc ) then - -# If you have plenty of local disk space for the POP history files. -if ( $TONS_O_SPACE == 'TRUE' ) then - ${SCRIPT_HOME}/read_from_mss.csh $DATE_FORMAT $CASE $BEGYR $ENDYR -endif - -foreach MONTH ( 01 02 03 04 05 06 07 08 09 10 11 12 ) - -# Otherwise if you have limited local disk space, just read a single -# month from each year and delete. -if !( $TONS_O_SPACE == 'TRUE' ) then - ${SCRIPT_HOME}/read_from_mss_month.csh $DATE_FORMAT $CASE $BEGYR $ENDYR $MONTH -endif - - ncra -O -vTEMP,SALT,UVEL,VVEL,SHF,QFLUX,MELTH_F,RESID_T,HBLT,REGION_MASK,TAREA,TLONG,TLAT,ANGLET *-${MONTH}.nc ${MONTH}.nc - -if !( $TONS_O_SPACE == 'TRUE' ) then - /bin/rm -f *-??.nc -endif - -end - -ncrcat -O ??.nc ${CASE}.pop.h.${BEGYR}-${ENDYR}.MAC.nc - -/bin/rm -f ??.nc - -endif - -if ( $CPLFILES == TRUE ) then - -setenv PATH_MSS /${CCSMUSER}/csm/${CASE}/cpl/hist/ -setenv FILE_HEADER ${CASE}.cpl6.ha. - -mkdir -p $WKDIR -cd $WKDIR - -if !( -e ${CASE}.cpl6.ha.${BEGYR}-${ENDYR}.MAC.nc ) then - -${SCRIPT_HOME}/read_from_mss.csh $DATE_FORMAT $CASE $BEGYR $ENDYR - -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-01.nc 01.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-02.nc 02.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-03.nc 03.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-04.nc 04.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-05.nc 05.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-06.nc 06.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-07.nc 07.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-08.nc 08.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-09.nc 09.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-10.nc 10.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-11.nc 11.nc -ncra -O -v avXc2i_i_So_u,avXc2i_i_So_v,avXc2i_i_So_dhdx,avXc2i_i_So_dhdy *-12.nc 12.nc - -ncrcat -O ??.nc ${CASE}.cpl6.ha.${BEGYR}-${ENDYR}.MAC.nc -/bin/rm -f *-??.nc ??.nc - -endif -endif - -end diff --git a/src/components/data_comps/docn/tools/pop_som_frc/pop_frc_mlann.ncl b/src/components/data_comps/docn/tools/pop_som_frc/pop_frc_mlann.ncl deleted file mode 100644 index bdf0ef470b3..00000000000 --- a/src/components/data_comps/docn/tools/pop_som_frc/pop_frc_mlann.ncl +++ /dev/null @@ -1,296 +0,0 @@ - -; This NCL script takes the two climatological files created by pop_frc.csh -; and creates a slab ocean model (SOM) forcing file. This script does the -; same steps as the pop_frc.m matlab script used by C. Bitz. - -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl" -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl" -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/contributed.ncl" - -case = "b40.999" - -begin - -; Need to point to climatologies created by pop_frc.csh. - - popmac = "/biptmp/dbailey/"+case+"/"+case+".pop.h.481-500.MAC.nc" -;cplmac = "/biptmp/dbailey/"+case+"/"+case+".cpl6.h.241-260.MAC.nc" - - f1 = addfile(popmac,"r") -;f2 = addfile(cplmac,"r") - f3 = addfile("/fis/cgd/cseg/csm/inputdata/ocn/docn7/domain.ocn.gx1v5.061230.nc","r") - - time = (/14., 46., 74., 105., 135., 166., 196., 227., 258., 288., 319., 349./) - maskr = f1->REGION_MASK - delete(maskr@coordinates) - area = f3->area - delete(area@coordinates) - tarea = (/f1->TAREA/) - tlon = f1->TLONG - tlat = f1->TLAT - xc = flt2dble(tlon) - yc = flt2dble(tlat) - dims = dimsizes(xc) - nlat = dims(0) - nlon = dims(1) - ntime = 12 - -; Use the annual mean mixed layer depth - hbltin = f1->HBLT - delete(hbltin@coordinates) - delete(hbltin@cell_methods) - - hblt_avg = dim_avg(hbltin(nlat|:,nlon|:,time|:)) - hblttmp = conform(hbltin,hblt_avg,(/1,2/)) / 100. - - z_t = f1->z_t - print(z_t) - nz = dimsizes(z_t) - print(nz) - zint = fspan(1,nz+1,nz+1) - zint(0) = 0 - do n=1,nz-1 - zint(n) = 0.5*(z_t(n)+z_t(n-1)) - end do - zint(nz) = 2.0*z_t(nz-1)-zint(nz-1) - print(zint) - dz = fspan(1,nz,nz) - do n=0,nz-1 - dz(n) = zint(n+1)-zint(n) - end do - print(dz) - wgt = dz / sum(dz(0:9)) - - Ttmp = f1->TEMP(:,0:9,:,:) - Stmp = f1->SALT(:,0:9,:,:) - Tin = dim_avg_wgt_Wrap(Ttmp(time|:,nlat|:,nlon|:,z_t|:),wgt(0:9),0) - Sin = dim_avg_wgt_Wrap(Stmp(time|:,nlat|:,nlon|:,z_t|:),wgt(0:9),0) - -; This version uses the velocities from the POP history file, so -; has to do interpolation and rotation. If the CPL history files were -; available we can use the surface ocean currents there. -;Uin = f2->avXc2i_i_So_u -;Vin = f2->avXc2i_i_So_v - Utmp = f1->UVEL(:,0,:,:) - Vtmp = f1->VVEL(:,0,:,:) - ANGLET1 = f1->ANGLET - ANGLET = doubletofloat(ANGLET1) - Utmp2 = Utmp*0. - Vtmp2 = Vtmp*0. - Uin = Utmp*0. - Vin = Vtmp*0. - do j=1,nlat-1 - do i=1,nlon-1 - Utmp2(:,j,i) = 0.25*(Utmp(:,j,i)+Utmp(:,j-1,i)+Utmp(:,j,i-1)+Utmp(:,j-1,i-1)) - Vtmp2(:,j,i) = 0.25*(Vtmp(:,j,i)+Vtmp(:,j-1,i)+Vtmp(:,j,i-1)+Vtmp(:,j-1,i-1)) - end do - end do - do nt=0,ntime-1 - Uin(nt,:,:) = (Utmp2(nt,:,:)*cos(ANGLET(:,:))+Vtmp2(nt,:,:)*sin(-ANGLET(:,:)))*0.01 - Vin(nt,:,:) = (Vtmp2(nt,:,:)*cos(ANGLET(:,:))-Utmp2(nt,:,:)*sin(-ANGLET(:,:)))*0.01 - end do - -; We do not have sea surface tilt terms in the POP history files. These are -; only in the CPL history files. -;dhdxin = f2->avXc2i_i_So_dhdx -;dhdyin = f2->avXc2i_i_So_dhdy - dhdxin = Tin*0. - dhdyin = Tin*0. - -; Need to weight the monthly means - - daysinmo = (/31.,28.,31.,30.,31.,30.,31.,31.,30.,31.,30.,31./) - xnp = daysinmo - xnm = daysinmo - xnm(1:11) = daysinmo(1:11)+daysinmo(0:10) - xnm(0) = daysinmo(0)+daysinmo(11) - xnp(0:10) = daysinmo(0:10)+daysinmo(1:11) - xnp(11) = daysinmo(11)+daysinmo(0) - aa = 2.*daysinmo / xnm - cc = 2.*daysinmo / xnp - a = aa / 8. - c = cc / 8. - b = 1. - a - c - M = (/(/b(0),c(0),0,0,0,0,0,0,0,0,0,a(0)/), \ - (/a(1),b(1),c(1),0,0,0,0,0,0,0,0,0/), \ - (/0,a(2),b(2),c(2),0,0,0,0,0,0,0,0/), \ - (/0,0,a(3),b(3),c(3),0,0,0,0,0,0,0/), \ - (/0,0,0,a(4),b(4),c(4),0,0,0,0,0,0/), \ - (/0,0,0,0,a(5),b(5),c(5),0,0,0,0,0/), \ - (/0,0,0,0,0,a(6),b(6),c(6),0,0,0,0/), \ - (/0,0,0,0,0,0,a(7),b(7),c(7),0,0,0/), \ - (/0,0,0,0,0,0,0,a(8),b(8),c(8),0,0/), \ - (/0,0,0,0,0,0,0,0,a(9),b(9),c(9),0/), \ - (/0,0,0,0,0,0,0,0,0,a(10),b(10),c(10)/), \ - (/c(11),0,0,0,0,0,0,0,0,0,a(11),b(11)/)/) - invM = inverse_matrix(M) - - shf = f1->SHF - qflux = f1->QFLUX - melth_f = f1->MELTH_F - resid_t = f1->RESID_T -;tfw_t = f1->TWF_T - - rcp_sw = 1026.*3996. - surf = shf+qflux - T1 = Tin - T1(0:10,:,:) = Tin(1:11,:,:) - T1(11,:,:) = Tin(0,:,:) - T2 = Tin - T2(0,:,:) = Tin(11,:,:) - T2(1:11,:,:) = Tin(0:10,:,:) - dT = T1-T2 - release = rcp_sw*dT*hblttmp / (86400.*365./6.) - - ocnheat = surf-release - maskt = new((/nlat,nlon/),double) - maskt = 1 - maskt = mask(maskt,ismissing(ocnheat(0,:,:)),False) - err = new(12,double) - do n=0,ntime-1 - tmp = flt2dble(ndtooned(ocnheat(n,:,:))) - tmp(ind(ismissing(tmp))) = 0. - err(n) = tmp # ndtooned(tarea) / sum(tarea * maskt) - end do - print(err) - glob = avg(err) - print(glob) - ocnheat = ocnheat - dble2flt(glob) - - T = new(dimsizes(Tin),typeof(Tin)) - S = new(dimsizes(Sin),typeof(Sin)) - U = new(dimsizes(Uin),typeof(Uin)) - V = new(dimsizes(Vin),typeof(Vin)) - dhdx = new(dimsizes(dhdxin),typeof(dhdxin)) - dhdy = new(dimsizes(dhdyin),typeof(dhdyin)) - hblt = new(dimsizes(hbltin),typeof(hbltin)) - qdp = new(dimsizes(shf),typeof(shf)) - - T = 0. - S = 0. - U = 0. - V = 0. - dhdx = 0. - dhdy = 0. - hblt = 0. - qdp = 0. - - do j=0,ntime-1 - do i=0,ntime-1 - T(j,:,:) = T(j,:,:) + invM(j,i)*Tin(i,:,:) - S(j,:,:) = S(j,:,:) + invM(j,i)*Sin(i,:,:) - U(j,:,:) = U(j,:,:) + invM(j,i)*Uin(i,:,:) - V(j,:,:) = V(j,:,:) + invM(j,i)*Vin(i,:,:) - dhdx(j,:,:) = dhdx(j,:,:) + invM(j,i)*dhdxin(i,:,:) - dhdy(j,:,:) = dhdy(j,:,:) + invM(j,i)*dhdyin(i,:,:) - hblt(j,:,:) = hblt(j,:,:) + invM(j,i)*hblttmp(i,:,:) - qdp(j,:,:) = qdp(j,:,:) + invM(j,i)*ocnheat(i,:,:) - end do - end do - - time@units = "days since 0001-01-01 00:00:00" - time@long_name = "observation time" - time@calendar = "noleap" - - area@units = "area" - area@long_name = "area of grid cell in radians squared" - - maskr@long_name = "domain maskr" - maskr@units = "unitless" - - xc@long_name = "longitude of grid cell center" - xc@units = "degrees east" - - yc@long_name = "latitude of grid cell center" - yc@units = "degrees north" - - S@long_name = "salinity" - S@units = "ppt" - - T@long_name = "temperature" - T@units = "degC" - - U@long_name = "u ocean current" - U@units = "m/s" - - V@long_name = "v ocean current" - V@units = "m/s" - - dhdx@long_name = "ocean surface slope: zonal" - dhdx@units = "m/m" - - dhdy@long_name = "ocean surface slope: meridional" - dhdy@units = "m/m" - - hblt@long_name = "boundary layer depth" - hblt@units = "m" - - qdp@long_name = "ocean heat flux convergence" - qdp@units = "W/m^2" - - fout = addfile("oceanmixed_ice.nc","c") - setfileoption(fout,"DefineMode",True) - - fileAtt = True - fileAtt@title = "Monthly averaged ocean forcing from POP output" - fileAtt@conventions = "CCSM data model domain description" - fileAtt@source = "pop_frc.ncl" - fileAtt@description = "Input data for DOCN7 mixed layer model from " + case - fileAtt@note1 = "fields computed from 20-yr monthly means from pop" - fileAtt@note2 = "all fields interpolated to T-grid" - fileAtt@note3 = "qdp is computed from depth summed ocean column" - fileAtt@author = "D. Bailey" - fileAtt@calendar = "standard" - fileAtt@comment = "This data is on the displaced pole grid gx1v5" - fileAtt@creation_date = systemfunc("date") - fileattdef(fout,fileAtt) - - dimNames = (/"time","nj","ni"/) - dimSizes = (/ntime,nlat,nlon/) - dimUnlim = (/False,False,False/) - filedimdef(fout,dimNames,dimSizes,dimUnlim) - - filevardef(fout,"area",typeof(area),(/"nj","ni"/)) - filevarattdef(fout,"area",area) - filevardef(fout,"mask",typeof(maskr),(/"nj","ni"/)) - filevarattdef(fout,"mask",maskr) - filevardef(fout,"xc",typeof(xc),(/"nj","ni"/)) - filevarattdef(fout,"xc",xc) - filevardef(fout,"yc",typeof(yc),(/"nj","ni"/)) - filevarattdef(fout,"yc",yc) - - filevardef(fout,"time",typeof(time),"time") - filevarattdef(fout,"time",time) - - filevardef(fout,"S",typeof(S),dimNames) - filevarattdef(fout,"S",S) - filevardef(fout,"T",typeof(T),dimNames) - filevarattdef(fout,"T",T) - filevardef(fout,"U",typeof(U),dimNames) - filevarattdef(fout,"U",U) - filevardef(fout,"V",typeof(V),dimNames) - filevarattdef(fout,"V",V) - filevardef(fout,"dhdx",typeof(dhdx),dimNames) - filevarattdef(fout,"dhdx",dhdx) - filevardef(fout,"dhdy",typeof(dhdy),dimNames) - filevarattdef(fout,"dhdy",dhdy) - filevardef(fout,"hblt",typeof(hblt),dimNames) - filevarattdef(fout,"hblt",hblt) - filevardef(fout,"qdp",typeof(qdp),dimNames) - filevarattdef(fout,"qdp",qdp) - - fout->area = (/area/) - fout->mask = (/maskr/) - fout->xc = (/xc/) - fout->yc = (/yc/) - fout->time = (/time/) - fout->S = (/S/) - fout->T = (/T/) - fout->U = (/U/) - fout->V = (/V/) - fout->dhdx = (/dhdx/) - fout->dhdy = (/dhdy/) - fout->hblt = (/hblt/) - fout->qdp = (/qdp/) - -end diff --git a/src/components/data_comps/docn/tools/pop_som_frc/pop_frc_mlt.ncl b/src/components/data_comps/docn/tools/pop_som_frc/pop_frc_mlt.ncl deleted file mode 100644 index a5b62931615..00000000000 --- a/src/components/data_comps/docn/tools/pop_som_frc/pop_frc_mlt.ncl +++ /dev/null @@ -1,320 +0,0 @@ - -; This NCL script takes the two climatological files created by pop_frc.csh -; and creates a slab ocean model (SOM) forcing file. This script does the -; same steps as the pop_frc.m matlab script used by C. Bitz. -; This version uses the annual mean mixed-layer depth and averages all -; quantities over the mixed-layer including temperature, salinity, and -; the ocean currents. - -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl" -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl" -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/contributed.ncl" - -case = "b.e11.B20TRC5CNBDRD.f09_g16" - -begin - - popmac = "/glade/scratch/dbailey/"+case+"/"+case+".ensmean.pop.h.1980-2005.MAC.nc" -;cplmac = "/glade/scratch/dbailey/"+case+"/"+case+".cpl6.ha.126-165.MAC.nc" - - f1 = addfile(popmac,"r") -;f2 = addfile(cplmac,"r") - f3 = addfile("/glade/p/cesm/cseg/inputdata/ocn/docn7/domain.ocn.gx1v6.090206.nc","r") - - time = (/14., 46., 74., 105., 135., 166., 196., 227., 258., 288., 319., 349./) - maskr = f1->REGION_MASK - delete(maskr@coordinates) - area = f3->area - delete(area@coordinates) - tarea = (/f1->TAREA/) - tlon = f1->TLONG - tlat = f1->TLAT -;xc = flt2dble(tlon) -;yc = flt2dble(tlat) - xc = tlon - yc = tlat - dims = dimsizes(xc) - nlat = dims(0) - nlon = dims(1) - ntime = 12 - -; Use the annual mean mixed layer depth - hbltin = f1->HBLT - delete(hbltin@coordinates) - delete(hbltin@cell_methods) - - hblt_avg = dim_avg(hbltin(nlat|:,nlon|:,time|:)) - hblttmp = conform(hbltin,hblt_avg,(/1,2/)) / 100. - hblttmp = where(hblttmp.eq.0.,-999.0,hblttmp) - hblttmp@missing_value = -999.0 - hblttmp@_FillValue = -999.0 - -;hblttmp = hbltin / 100. -;hblttmp = 50. - - z_t = f1->z_t - print(z_t) - nz = dimsizes(z_t) - print(nz) - zint = fspan(1,nz+1,nz+1) - zint(0) = 0 - do n=1,nz-1 - zint(n) = 0.5*(z_t(n)+z_t(n-1))*0.01 - end do - zint(nz) = 2.0*z_t(nz-1)*0.01-zint(nz-1) - print(zint) - dz = fspan(1,nz,nz) - wgt = new((/ntime,nlat,nlon,nz/),"float") - wgt@_FillValue = -999.0 - do n=0,nz-1 - dz(n) = zint(n+1)-zint(n) - wgt(:,:,:,n) = hblttmp(:,:,:)-zint(n) - wgt(:,:,:,n) = where (wgt(:,:,:,n).lt.0.,wgt@_FillValue,wgt(:,:,:,n)) - wgt(:,:,:,n) = where (wgt(:,:,:,n).gt.dz(n),dz(n),wgt(:,:,:,n)) - wgt(:,:,:,n) = wgt(:,:,:,n) / hblttmp(:,:,:) - end do - print(dz) - print(wgt(11,360,175,:)) - -; Use mixed-layer temperature - Ttmp = f1->TEMP(:,:,:,:) - Stmp = f1->SALT(:,:,:,:) - Ttmp2 = Ttmp(time|:,nlat|:,nlon|:,z_t|:)*wgt - Stmp2 = Stmp(time|:,nlat|:,nlon|:,z_t|:)*wgt - print(Ttmp2(11,360,175,:)) - print(Stmp2(11,360,175,:)) - Tin = dim_sum_Wrap(Ttmp2) - Sin = dim_sum_Wrap(Stmp2) -; Use SST -;Tin = f1->TEMP(:,0,:,:) -;Sin = f1->SALT(:,0,:,:) - -; Use velocities and sea surface tilt terms from coupler if available. -;Uin = f2->avXc2i_i_So_u -;Vin = f2->avXc2i_i_So_v -;dhdxin = f2->avXc2i_i_So_dhdx -;dhdyin = f2->avXc2i_i_So_dhdy - -; Otherwise uses velocities from POP history file and do the appropriate -; rotations. Assume sea surface tilt terms are zero. - Uin = Tin*0. - Vin = Tin*0. - Utmp = f1->UVEL(:,0,:,:) - Vtmp = f1->VVEL(:,0,:,:) - ANGLET1 = f1->ANGLET - ANGLET = doubletofloat(ANGLET1) - Utmp2 = Utmp*0. - Vtmp2 = Vtmp*0. - Uin = Utmp*0. - Vin = Vtmp*0. - do j=1,nlat-1 - do i=1,nlon-1 - Utmp2(:,j,i) = 0.25*(Utmp(:,j,i)+Utmp(:,j-1,i)+Utmp(:,j,i-1)+Utmp(:,j-1,i-1)) - Vtmp2(:,j,i) = 0.25*(Vtmp(:,j,i)+Vtmp(:,j-1,i)+Vtmp(:,j,i-1)+Vtmp(:,j-1,i-1)) - end do - end do - do nt=0,ntime-1 - Uin(nt,:,:) = (Utmp2(nt,:,:)*cos(ANGLET(:,:))+Vtmp2(nt,:,:)*sin(-ANGLET(:,:)))*0.01 - Vin(nt,:,:) = (Vtmp2(nt,:,:)*cos(ANGLET(:,:))-Utmp2(nt,:,:)*sin(-ANGLET(:,:)))*0.01 - end do - - dhdxin = Tin*0. - dhdyin = Tin*0. - -; Need to weight the monthly means - - daysinmo = (/31.,28.,31.,30.,31.,30.,31.,31.,30.,31.,30.,31./) - xnp = daysinmo - xnm = daysinmo - xnm(1:11) = daysinmo(1:11)+daysinmo(0:10) - xnm(0) = daysinmo(0)+daysinmo(11) - xnp(0:10) = daysinmo(0:10)+daysinmo(1:11) - xnp(11) = daysinmo(11)+daysinmo(0) - aa = 2.*daysinmo / xnm - cc = 2.*daysinmo / xnp - a = aa / 8. - c = cc / 8. - b = 1. - a - c - M = (/(/b(0),c(0),0,0,0,0,0,0,0,0,0,a(0)/), \ - (/a(1),b(1),c(1),0,0,0,0,0,0,0,0,0/), \ - (/0,a(2),b(2),c(2),0,0,0,0,0,0,0,0/), \ - (/0,0,a(3),b(3),c(3),0,0,0,0,0,0,0/), \ - (/0,0,0,a(4),b(4),c(4),0,0,0,0,0,0/), \ - (/0,0,0,0,a(5),b(5),c(5),0,0,0,0,0/), \ - (/0,0,0,0,0,a(6),b(6),c(6),0,0,0,0/), \ - (/0,0,0,0,0,0,a(7),b(7),c(7),0,0,0/), \ - (/0,0,0,0,0,0,0,a(8),b(8),c(8),0,0/), \ - (/0,0,0,0,0,0,0,0,a(9),b(9),c(9),0/), \ - (/0,0,0,0,0,0,0,0,0,a(10),b(10),c(10)/), \ - (/c(11),0,0,0,0,0,0,0,0,0,a(11),b(11)/)/) - invM = inverse_matrix(M) - - shf = f1->SHF - qflux = f1->QFLUX - melth_f = f1->MELTH_F - resid_t = f1->RESID_T -;tfw_t = f1->TWF_T - - rcp_sw = 1026.*3996. - surf = shf+qflux - T1 = Tin - T1(0:10,:,:) = Tin(1:11,:,:) - T1(11,:,:) = Tin(0,:,:) - T2 = Tin - T2(0,:,:) = Tin(11,:,:) - T2(1:11,:,:) = Tin(0:10,:,:) - dT = T1-T2 - release = rcp_sw*dT*hblttmp / (86400.*365./6.) - - ocnheat = surf-release - maskt = new((/nlat,nlon/),double) - maskt = 1 - maskt = mask(maskt,ismissing(ocnheat(0,:,:)),False) - err = new(12,double) - do n=0,ntime-1 - tmp = flt2dble(ndtooned(ocnheat(n,:,:))) - tmp(ind(ismissing(tmp))) = 0. - err(n) = tmp # ndtooned(tarea) / sum(tarea * maskt) - end do - print(err) - glob = avg(err) - print(glob) - ocnheat = ocnheat - dble2flt(glob) - - T = new(dimsizes(Tin),typeof(Tin)) - S = new(dimsizes(Sin),typeof(Sin)) - U = new(dimsizes(Uin),typeof(Uin)) - V = new(dimsizes(Vin),typeof(Vin)) - dhdx = new(dimsizes(dhdxin),typeof(dhdxin)) - dhdy = new(dimsizes(dhdyin),typeof(dhdyin)) - hblt = new(dimsizes(hbltin),typeof(hbltin)) - qdp = new(dimsizes(shf),typeof(shf)) - - T = 0. - S = 0. - U = 0. - V = 0. - dhdx = 0. - dhdy = 0. - hblt = 0. - qdp = 0. - - do j=0,ntime-1 - do i=0,ntime-1 - T(j,:,:) = T(j,:,:) + invM(j,i)*Tin(i,:,:) - S(j,:,:) = S(j,:,:) + invM(j,i)*Sin(i,:,:) - U(j,:,:) = U(j,:,:) + invM(j,i)*Uin(i,:,:) - V(j,:,:) = V(j,:,:) + invM(j,i)*Vin(i,:,:) - dhdx(j,:,:) = dhdx(j,:,:) + invM(j,i)*dhdxin(i,:,:) - dhdy(j,:,:) = dhdy(j,:,:) + invM(j,i)*dhdyin(i,:,:) - hblt(j,:,:) = hblt(j,:,:) + invM(j,i)*hblttmp(i,:,:) - qdp(j,:,:) = qdp(j,:,:) + invM(j,i)*ocnheat(i,:,:) - end do - end do - - time@units = "days since 0001-01-01 00:00:00" - time@long_name = "observation time" - time@calendar = "noleap" - - area@units = "area" - area@long_name = "area of grid cell in radians squared" - - maskr@long_name = "domain maskr" - maskr@units = "unitless" - - xc@long_name = "longitude of grid cell center" - xc@units = "degrees east" - - yc@long_name = "latitude of grid cell center" - yc@units = "degrees north" - - S@long_name = "salinity" - S@units = "ppt" - - T@long_name = "temperature" - T@units = "degC" - - U@long_name = "u ocean current" - U@units = "m/s" - - V@long_name = "v ocean current" - V@units = "m/s" - - dhdx@long_name = "ocean surface slope: zonal" - dhdx@units = "m/m" - - dhdy@long_name = "ocean surface slope: meridional" - dhdy@units = "m/m" - - hblt@long_name = "boundary layer depth" - hblt@units = "m" - - qdp@long_name = "ocean heat flux convergence" - qdp@units = "W/m^2" - - fout = addfile("oceanmixed_ice.nc","c") - setfileoption(fout,"DefineMode",True) - - fileAtt = True - fileAtt@title = "Monthly averaged ocean forcing from POP output" - fileAtt@conventions = "CCSM data model domain description" - fileAtt@source = "pop_frc.ncl" - fileAtt@description = "Input data for DOCN7 mixed layer model from " + case - fileAtt@note1 = "fields computed from years 402 to 1510 monthly means from pop" - fileAtt@note2 = "all fields interpolated to T-grid" - fileAtt@note3 = "qdp is computed from depth summed ocean column" - fileAtt@author = "D. Bailey" - fileAtt@calendar = "standard" - fileAtt@comment = "This data is on the displaced pole grid gx1v5" - fileAtt@creation_date = systemfunc("date") - fileattdef(fout,fileAtt) - - dimNames = (/"time","nj","ni"/) - dimSizes = (/ntime,nlat,nlon/) - dimUnlim = (/False,False,False/) - filedimdef(fout,dimNames,dimSizes,dimUnlim) - - filevardef(fout,"area",typeof(area),(/"nj","ni"/)) - filevarattdef(fout,"area",area) - filevardef(fout,"mask",typeof(maskr),(/"nj","ni"/)) - filevarattdef(fout,"mask",maskr) - filevardef(fout,"xc",typeof(xc),(/"nj","ni"/)) - filevarattdef(fout,"xc",xc) - filevardef(fout,"yc",typeof(yc),(/"nj","ni"/)) - filevarattdef(fout,"yc",yc) - - filevardef(fout,"time",typeof(time),"time") - filevarattdef(fout,"time",time) - - filevardef(fout,"S",typeof(S),dimNames) - filevarattdef(fout,"S",S) - filevardef(fout,"T",typeof(T),dimNames) - filevarattdef(fout,"T",T) - filevardef(fout,"U",typeof(U),dimNames) - filevarattdef(fout,"U",U) - filevardef(fout,"V",typeof(V),dimNames) - filevarattdef(fout,"V",V) - filevardef(fout,"dhdx",typeof(dhdx),dimNames) - filevarattdef(fout,"dhdx",dhdx) - filevardef(fout,"dhdy",typeof(dhdy),dimNames) - filevarattdef(fout,"dhdy",dhdy) - filevardef(fout,"hblt",typeof(hblt),dimNames) - filevarattdef(fout,"hblt",hblt) - filevardef(fout,"qdp",typeof(qdp),dimNames) - filevarattdef(fout,"qdp",qdp) - - fout->area = (/area/) - fout->mask = (/maskr/) - fout->xc = (/xc/) - fout->yc = (/yc/) - fout->time = (/time/) - fout->S = (/S/) - fout->T = (/T/) - fout->U = (/U/) - fout->V = (/V/) - fout->dhdx = (/dhdx/) - fout->dhdy = (/dhdy/) - fout->hblt = (/hblt/) - fout->qdp = (/qdp/) - -end diff --git a/src/components/data_comps/docn/tools/pop_som_frc/pop_interp.ncl b/src/components/data_comps/docn/tools/pop_som_frc/pop_interp.ncl deleted file mode 100644 index 1e3e1fd759c..00000000000 --- a/src/components/data_comps/docn/tools/pop_som_frc/pop_interp.ncl +++ /dev/null @@ -1,233 +0,0 @@ - -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl" -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl" -load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/contributed.ncl" - -begin - - oceanmixed = "oceanmixed_ice.nc" - - f1 = addfile(oceanmixed,"r") - f3 = addfile("/fis/cgd/cseg/csm/inputdata/ocn/docn7/domain.ocn.gx1v3.051111.nc","r") - - time = (/14., 46., 74., 105., 135., 166., 196., 227., 258., 288., 319., 349./) - maskr = f1->REGION_MASK - delete(maskr@coordinates) - area = f3->area - delete(area@coordinates) - tarea = f1->TAREA - tlon = f1->TLONG - tlat = f1->TLAT - xc = flt2dble(tlon) - yc = flt2dble(tlat) - dims = dimsizes(xc) - nj = dims(0) - ni = dims(1) - ntime = 12 - -; Use the annual mean mixed layer depth - hbltin = f1->HBLT - Tin = f1->TEMP(:,0,:,:) - Sin = f1->SALT(:,0,:,:) - Uin = f2->avXc2i_i_So_u - Vin = f2->avXc2i_i_So_v - dhdxin = f2->avXc2i_i_So_dhdx - dhdyin = f2->avXc2i_i_So_dhdy - -; Need to weight the monthly means - - daysinmo = (/31.,28.,31.,30.,31.,30.,31.,31.,30.,31.,30.,31./) - xnp = daysinmo - xnm = daysinmo - xnm(1:11) = daysinmo(1:11)+daysinmo(0:10) - xnm(0) = daysinmo(0)+daysinmo(11) - xnp(0:10) = daysinmo(0:10)+daysinmo(1:11) - xnp(11) = daysinmo(11)+daysinmo(0) - aa = 2.*daysinmo / xnm - cc = 2.*daysinmo / xnp - a = aa / 8. - c = cc / 8. - b = 1. - a - c - M = (/(/b(0),c(0),0,0,0,0,0,0,0,0,0,a(0)/), \ - (/a(1),b(1),c(1),0,0,0,0,0,0,0,0,0/), \ - (/0,a(2),b(2),c(2),0,0,0,0,0,0,0,0/), \ - (/0,0,a(3),b(3),c(3),0,0,0,0,0,0,0/), \ - (/0,0,0,a(4),b(4),c(4),0,0,0,0,0,0/), \ - (/0,0,0,0,a(5),b(5),c(5),0,0,0,0,0/), \ - (/0,0,0,0,0,a(6),b(6),c(6),0,0,0,0/), \ - (/0,0,0,0,0,0,a(7),b(7),c(7),0,0,0/), \ - (/0,0,0,0,0,0,0,a(8),b(8),c(8),0,0/), \ - (/0,0,0,0,0,0,0,0,a(9),b(9),c(9),0/), \ - (/0,0,0,0,0,0,0,0,0,a(10),b(10),c(10)/), \ - (/c(11),0,0,0,0,0,0,0,0,0,a(11),b(11)/)/) - invM = inverse_matrix(M) - - shf = f1->SHF - qflux = f1->QFLUX - melth_f = f1->MELTH_F - resid_t = f1->RESID_T -;tfw_t = f1->TWF_T - - rcp_sw = 1026.*3996. - surf = shf+qflux - T1 = Tin - T1(0:10,:,:) = Tin(1:11,:,:) - T1(11,:,:) = Tin(0,:,:) - T2 = Tin - T2(0,:,:) = Tin(11,:,:) - T2(1:11,:,:) = Tin(0:10,:,:) - dT = T1-T2 - release = rcp_sw*dT*hblttmp / (86400.*365./6.) - - ocnheat = surf-release - maskt = new((/nj,ni/),typeof(ocnheat)) - maskt = 1 - maskt = mask(maskt,ismissing(ocnheat(0,:,:)),False) - err = new(12,typeof(ocnheat)) - do n=0,ntime-1 - tmp = ndtooned(ocnheat(n,:,:)) - tmp(ind(ismissing(tmp))) = 0. - err(n) = tmp # ndtooned(tarea) / sum(tarea * maskt) - end do - print(err) - glob = avg(err) - print(glob) - ocnheat = ocnheat - glob - - T = new(dimsizes(Tin),typeof(Tin)) - S = new(dimsizes(Sin),typeof(Sin)) - U = new(dimsizes(Uin),typeof(Uin)) - V = new(dimsizes(Vin),typeof(Vin)) - dhdx = new(dimsizes(dhdxin),typeof(dhdxin)) - dhdy = new(dimsizes(dhdyin),typeof(dhdyin)) - hblt = new(dimsizes(hbltin),typeof(hbltin)) - qdp = new(dimsizes(shf),typeof(shf)) - - T = 0. - S = 0. - U = 0. - V = 0. - dhdx = 0. - dhdy = 0. - hblt = 0. - qdp = 0. - - do j=0,ntime-1 - do i=0,ntime-1 - T(j,:,:) = T(j,:,:) + invM(j,i)*Tin(i,:,:) - S(j,:,:) = S(j,:,:) + invM(j,i)*Sin(i,:,:) - U(j,:,:) = U(j,:,:) + invM(j,i)*Uin(i,:,:) - V(j,:,:) = V(j,:,:) + invM(j,i)*Vin(i,:,:) - dhdx(j,:,:) = dhdx(j,:,:) + invM(j,i)*dhdxin(i,:,:) - dhdy(j,:,:) = dhdy(j,:,:) + invM(j,i)*dhdyin(i,:,:) - hblt(j,:,:) = hblt(j,:,:) + invM(j,i)*hblttmp(i,:,:) - qdp(j,:,:) = qdp(j,:,:) + invM(j,i)*ocnheat(i,:,:) - end do - end do - - time@units = "days since 0001-01-01 00:00:00" - time@long_name = "observation time" - time@calendar = "noleap" - - area@units = "area" - area@long_name = "area of grid cell in radians squared" - - maskr@long_name = "domain maskr" - maskr@units = "unitless" - - xc@long_name = "longitude of grid cell center" - xc@units = "degrees east" - - yc@long_name = "latitude of grid cell center" - yc@units = "degrees north" - - S@long_name = "salinity" - S@units = "ppt" - - T@long_name = "temperature" - T@units = "degC" - - U@long_name = "u ocean current" - U@units = "m/s" - - V@long_name = "v ocean current" - V@units = "m/s" - - dhdx@long_name = "ocean surface slope: zonal" - dhdx@units = "m/m" - - dhdy@long_name = "ocean surface slope: meridional" - dhdy@units = "m/m" - - hblt@long_name = "boundary layer depth" - hblt@units = "m" - - qdp@long_name = "ocean heat flux convergence" - qdp@units = "W/m^2" - - fout = addfile("oceanmixed_ice.nc","c") - setfileoption(fout,"DefineMode",True) - - fileAtt = True - fileAtt@title = "Monthly averaged ocean forcing from POP output" - fileAtt@conventions = "CCSM2.0 data model domain description" - fileAtt@source = "pop_frc.ncl" - fileAtt@description = "Input data for CSIM4 mixed layer model from b30.004" - fileAtt@note1 = "fields computed from 20-yr monthly means from pop" - fileAtt@note2 = "all fields interpolated to T-grid" - fileAtt@note3 = "qdp is computed from depth summed ocean column" - fileAtt@author = "D. Bailey" - fileAtt@calendar = "standard" - fileAtt@comment = "This data is on the displaced pole grid gx1v3" - fileAtt@creation_date = systemfunc("date") - fileattdef(fout,fileAtt) - - dimNames = (/"time","nj","ni"/) - dimSizes = (/ntime,nj,ni/) - dimUnlim = (/False,False,False/) - filedimdef(fout,dimNames,dimSizes,dimUnlim) - - filevardef(fout,"area",typeof(area),(/"nj","ni"/)) - filevarattdef(fout,"area",area) - filevardef(fout,"mask",typeof(maskr),(/"nj","ni"/)) - filevarattdef(fout,"mask",maskr) - filevardef(fout,"xc",typeof(xc),(/"nj","ni"/)) - filevarattdef(fout,"xc",xc) - filevardef(fout,"yc",typeof(yc),(/"nj","ni"/)) - filevarattdef(fout,"yc",yc) - - filevardef(fout,"time",typeof(time),"time") - filevarattdef(fout,"time",time) - - filevardef(fout,"S",typeof(S),dimNames) - filevarattdef(fout,"S",S) - filevardef(fout,"T",typeof(T),dimNames) - filevarattdef(fout,"T",T) - filevardef(fout,"U",typeof(U),dimNames) - filevarattdef(fout,"U",U) - filevardef(fout,"V",typeof(V),dimNames) - filevarattdef(fout,"V",V) - filevardef(fout,"dhdx",typeof(dhdx),dimNames) - filevarattdef(fout,"dhdx",dhdx) - filevardef(fout,"dhdy",typeof(dhdy),dimNames) - filevarattdef(fout,"dhdy",dhdy) - filevardef(fout,"hblt",typeof(hblt),dimNames) - filevarattdef(fout,"hblt",hblt) - filevardef(fout,"qdp",typeof(qdp),dimNames) - filevarattdef(fout,"qdp",qdp) - - fout->area = (/area/) - fout->mask = (/maskr/) - fout->xc = (/xc/) - fout->yc = (/yc/) - fout->time = (/time/) - fout->S = (/S/) - fout->T = (/T/) - fout->U = (/U/) - fout->V = (/V/) - fout->dhdx = (/dhdx/) - fout->dhdy = (/dhdy/) - fout->hblt = (/hblt/) - fout->qdp = (/qdp/) - -end diff --git a/src/components/data_comps/docn/tools/pop_som_frc/read_from_mss.csh b/src/components/data_comps/docn/tools/pop_som_frc/read_from_mss.csh deleted file mode 100755 index 287798a71e6..00000000000 --- a/src/components/data_comps/docn/tools/pop_som_frc/read_from_mss.csh +++ /dev/null @@ -1,117 +0,0 @@ -#!/bin/csh -f - -set echo on -#***************************************************************** -# Check to see of 12 months of a year already exist, then -# get the monthly files from Mass Storage System if needed -#***************************************************************** - -# This file reads in files from MSS -# $DATE_FORMAT form of date in history file name (eg. yyyy-mm), input -# $read_dir case name of file to read , input -# $BEG_READ first year of data to read , input -# $END_READ last year of data to read , input -# $FILE_HEADER beginning of filename -# $PATH_MSS directory on MSS where data resides -# $WKDIR directory on dataproc where data will be put - -if ($#argv != 4) then - echo "usage: read_from_mss.csh $DATE_FORMAT $read_dir $BEG_READ $END_READ" - exit -endif - -set DATE_FORMAT = $1 -set read_dir = $2 -@ BEG_READ = $3 -@ END_READ = $4 - -# Set msread password for b20.003 case -if ( $read_dir == b20.003 ) then - set msspwd = '-rpwd ccsm1330' -else - set msspwd = ' ' -endif - -echo GETTING MONTHLY FILES FROM THE MSS -echo THIS MIGHT TAKE SOME TIME -echo ' ' - -if ($BEG_READ < 1) then # so we don't get a negative number - echo ERROR: FIRST YEAR OF TEST DATA $BEG_READ MUST BE GT ZERO - exit -endif - -@ IYEAR = $BEG_READ -#------------------------------------------------------- -# Loop through years -#------------------------------------------------------- - -while ($IYEAR <= $END_READ) - -@ ICOUNT = 0 # Count number of months of data for each year that - # are already on $WKDIR -#------------------------------------------------------------- - @ IMONTH = 1 - while ($IMONTH <= 12) - - set four_digit_year = `printf "%04d" {$IYEAR}` - set date_string = `echo $DATE_FORMAT | sed s/yyyy/$four_digit_year/` - set two_digit_month = `printf "%02d" {$IMONTH}` - set date_string = `echo $date_string | sed s/mm/${two_digit_month}/` - set date_string = `echo $date_string | sed s/dd/01/` - set filename = ${FILE_HEADER}${date_string} - - if (-e ${WKDIR}/${filename}.nc) then - echo " File ${filename}.nc already exists on $WKDIR" - @ ICOUNT++ - endif - @ IMONTH++ - end # End of IMONTH <=12 -#------------------------------------------------------------- -# Read a year of data from MSS -#------------------------------------------------------------- - if ($ICOUNT < 12) then - - echo 'GETTING '{$PATH_MSS}{$FILE_HEADER}${four_digit_year}'*.nc' - if (`which msrcp | wc -w` == 1) then - msls {$PATH_MSS}{$FILE_HEADER}${four_digit_year}'-01.nc' - if ( $status == 0 ) setenv TAR 0 - else - hsi -q 'ls {$PATH_MSS}{$FILE_HEADER}${four_digit_year}-01.nc' - if ( $status == 0 ) setenv TAR 0 - endif - if (`which msrcp | wc -w` == 1) then - msls {$PATH_MSS}{$FILE_HEADER}${four_digit_year}'.tar' - if ( $status == 0 ) setenv TAR 1 - else - hsi -q 'ls {$PATH_MSS}{$FILE_HEADER}${four_digit_year}.tar' - if ( $status == 0 ) setenv TAR 1 - endif - if ( $TAR == 0 ) then - if (`which msrcp | wc -w` == 1) then - msrcp $msspwd 'mss:'{$PATH_MSS}{$FILE_HEADER}${four_digit_year}'-{01,02,03,04,05,06,07,08,09,10,11,12}.nc' $WKDIR - else - pushd $WKDIR - hsi -q 'cd {$PATH_MSS}; prompt; mget {$FILE_HEADER}${four_digit_year}-{01,02,03,04,05,06,07,08,09,10,11,12}.nc' - popd - endif - else - pushd $WKDIR - if (`which msrcp | wc -w` == 1) then - msrcp $msspwd 'mss:'{$PATH_MSS}{$FILE_HEADER}${four_digit_year}'.tar' $WKDIR - else - hsi -q 'cd {$PATH_MSS}; get {$FILE_HEADER}${four_digit_year}.tar' - endif - tar -xvf {$FILE_HEADER}${four_digit_year}.tar - /bin/rm {$FILE_HEADER}${four_digit_year}.tar - popd - endif - - endif # End of if ICOUNT < 12 - @ IYEAR++ # advance year -end # End of IYEAR <= END_READ - -echo MONTHLY FILES COPIED FROM THE MSS TO {$WKDIR} -echo ' ' - -end diff --git a/src/components/data_comps/docn/tools/pop_som_frc/read_from_mss_month.csh b/src/components/data_comps/docn/tools/pop_som_frc/read_from_mss_month.csh deleted file mode 100755 index 592321f0f91..00000000000 --- a/src/components/data_comps/docn/tools/pop_som_frc/read_from_mss_month.csh +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/csh -f - -#***************************************************************** -# Check to see of 12 months of a year already exist, then -# get the monthly files from Mass Storage System if needed -#***************************************************************** - -# This file reads in files from MSS -# $DATE_FORMAT form of date in history file name (eg. yyyy-mm), input -# $read_dir case name of file to read , input -# $BEG_READ first year of data to read , input -# $END_READ last year of data to read , input -# $FILE_HEADER beginning of filename -# $PATH_MSS directory on MSS where data resides -# $WKDIR directory on dataproc where data will be put - -if ($#argv != 5) then - echo "usage: read_from_mss.csh $DATE_FORMAT $read_dir $BEG_READ $END_READ $month" - exit -endif - -set DATE_FORMAT = $1 -set read_dir = $2 -@ BEG_READ = $3 -@ END_READ = $4 -set month = $5 - -# Set msread password for b20.003 case -if ( $read_dir == b20.003 ) then - set msspwd = '-rpwd ccsm1330' -else - set msspwd = ' ' -endif - -echo GETTING MONTHLY FILES FROM THE MSS -echo THIS MIGHT TAKE SOME TIME -echo ' ' - -if ($BEG_READ < 1) then # so we don't get a negative number - echo ERROR: FIRST YEAR OF TEST DATA $BEG_READ MUST BE GT ZERO - exit -endif - -@ IYEAR = $BEG_READ -#------------------------------------------------------- -# Loop through years -#------------------------------------------------------- - -while ($IYEAR <= $END_READ) - - set four_digit_year = `printf "%04d" {$IYEAR}` - -#------------------------------------------------------------- -# Read a month of data from MSS -#------------------------------------------------------------- - - echo 'GETTING '{$PATH_MSS}{$FILE_HEADER}${four_digit_year}'*.nc' - if (`which msrcp | wc -w` == 1) then - msls {$PATH_MSS}{$FILE_HEADER}${four_digit_year}'-01.nc' - if ( $status == 0 ) setenv TAR 0 - else - hsi -q 'ls {$PATH_MSS}{$FILE_HEADER}${four_digit_year}-01.nc' - if ( $status == 0 ) setenv TAR 0 - endif - if (`which msrcp | wc -w` == 1) then - msls {$PATH_MSS}{$FILE_HEADER}${four_digit_year}'.tar' - if ( $status == 0 ) setenv TAR 1 - else - hsi -q 'ls {$PATH_MSS}{$FILE_HEADER}${four_digit_year}.tar' - if ( $status == 0 ) setenv TAR 1 - endif - if ( $TAR == 0 ) then - if (`which msrcp | wc -w` == 1) then - msrcp $msspwd 'mss:'{$PATH_MSS}{$FILE_HEADER}${four_digit_year}'-'${month}'.nc' $WKDIR - else - pushd $WKDIR - hsi -q 'cd {$PATH_MSS}; get {$FILE_HEADER}${four_digit_year}-${month}.nc' - popd - endif - else - pushd $WKDIR - if (`which msrcp | wc -w` == 1) then - msrcp $msspwd 'mss:'{$PATH_MSS}{$FILE_HEADER}${four_digit_year}'.tar' $WKDIR - else - hsi -q 'cd {$PATH_MSS}; get {$FILE_HEADER}${four_digit_year}.tar' - endif - tar -xvf {$FILE_HEADER}${four_digit_year}.tar {$FILE_HEADER}${four_digit_year}-${month}.nc - /bin/rm {$FILE_HEADER}${four_digit_year}.tar - popd - endif - - @ IYEAR++ # advance year -end # End of IYEAR <= END_READ - -echo MONTHLY FILES COPIED FROM THE MSS TO {$WKDIR} -echo ' ' - -end diff --git a/src/components/data_comps/drof/cime_config/buildlib b/src/components/data_comps/drof/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/data_comps/drof/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/data_comps/drof/cime_config/buildnml b/src/components/data_comps/drof/cime_config/buildnml deleted file mode 100755 index 73770f1315f..00000000000 --- a/src/components/data_comps/drof/cime_config/buildnml +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env python - -"""Namelist creator for CIME's data river model. -""" - -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position - -import os, sys, glob - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.case import Case -from CIME.XML.files import Files -from CIME.nmlgen import NamelistGenerator -from CIME.utils import expect, safe_copy -from CIME.buildnml import create_namelist_infile, parse_input - -logger = logging.getLogger(__name__) - -# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements -#################################################################################### -def _create_namelists(case, confdir, inst_string, infile, nmlgen, data_list_path): -#################################################################################### - """Write out the namelist for this component. - - Most arguments are the same as those for `NamelistGenerator`. The - `inst_string` argument is used as a suffix to distinguish files for - different instances. The `confdir` argument is used to specify the directory - in which output files will be placed. - """ - - #---------------------------------------------------- - # Get a bunch of information from the case. - #---------------------------------------------------- - rof_domain_file = case.get_value("ROF_DOMAIN_FILE") - rof_domain_path = case.get_value("ROF_DOMAIN_PATH") - drof_mode = case.get_value("DROF_MODE") - rof_grid = case.get_value("ROF_GRID") - - #---------------------------------------------------- - # Check for incompatible options. - #---------------------------------------------------- - expect(rof_grid != "null", - "ROF_GRID cannot be null") - expect(drof_mode != "NULL", - "DROF_MODE cannot be NULL") - - #---------------------------------------------------- - # Log some settings. - #---------------------------------------------------- - logger.debug("DROF mode is {}".format(drof_mode)) - logger.debug("DROF grid is {}".format(rof_grid)) - - #---------------------------------------------------- - # Create configuration information. - #---------------------------------------------------- - config = {} - config['rof_grid'] = rof_grid - config['drof_mode'] = drof_mode - - #---------------------------------------------------- - # Initialize namelist defaults - #---------------------------------------------------- - nmlgen.init_defaults(infile, config) - - #---------------------------------------------------- - # Construct the list of streams. - #---------------------------------------------------- - streams = nmlgen.get_streams() - - #---------------------------------------------------- - # For each stream, create stream text file and update - # shr_strdata_nml group and input data list. - #---------------------------------------------------- - for stream in streams: - - # Ignore null values. - if stream is None or stream in ("NULL", ""): - continue - - inst_stream = stream + inst_string - logger.debug("DROF stream is {}".format(inst_stream)) - stream_path = os.path.join(confdir, "drof.streams.txt." + inst_stream) - user_stream_path = os.path.join(case.get_case_root(), - "user_drof.streams.txt." + inst_stream) - - # Use the user's stream file, or create one if necessary. - if os.path.exists(user_stream_path): - safe_copy(user_stream_path, stream_path) - config['stream'] = stream - nmlgen.update_shr_strdata_nml(config, stream, stream_path) - else: - nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) - - #---------------------------------------------------- - # Create `shr_strdata_nml` namelist group. - #---------------------------------------------------- - # set per-stream variables - nmlgen.create_shr_strdata_nml() - - # Determine model domain filename (in drof_in) - if "CPLHIST" in drof_mode: - drof_cplhist_domain_file = case.get_value("DROF_CPLHIST_DOMAIN_FILE") - if drof_cplhist_domain_file == 'null': - logger.info(" .... Obtaining DROF model domain info from first stream file: {}".format(streams[0])) - else: - logger.info(" .... Obtaining DROF model domain info from stream {}".format(streams[0])) - nmlgen.add_default("domainfile", value=drof_cplhist_domain_file) - else: - if rof_domain_file != "UNSET": - full_domain_path = os.path.join(rof_domain_path, rof_domain_file) - nmlgen.add_default("domainfile", value=full_domain_path) - else: - nmlgen.add_default("domainfile", value="null") - - #---------------------------------------------------- - # Finally, write out all the namelists. - #---------------------------------------------------- - namelist_file = os.path.join(confdir, "drof_in") - nmlgen.write_output_file(namelist_file, data_list_path, groups=['drof_nml','shr_strdata_nml']) - -############################################################################### -def buildnml(case, caseroot, compname): -############################################################################### - - # Build the component namelist and required stream txt files - if compname != "drof": - raise AttributeError - - rundir = case.get_value("RUNDIR") - ninst = case.get_value("NINST_ROF") - if ninst is None: - ninst = case.get_value("NINST") - # Determine configuration directory - confdir = os.path.join(caseroot,"Buildconf",compname + "conf") - if not os.path.isdir(confdir): - os.makedirs(confdir) - - #---------------------------------------------------- - # Construct the namelist generator - #---------------------------------------------------- - # determine directory for user modified namelist_definitions.xml - user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) - expect (os.path.isdir(user_xml_dir), - "user_xml_dir {} does not exist ".format(user_xml_dir)) - - # NOTE: User definition *replaces* existing definition. - files = Files() - definition_file = [files.get_value("NAMELIST_DEFINITION_FILE", {"component":"drof"})] - - user_definition = os.path.join(user_xml_dir, "namelist_definition_drof.xml") - if os.path.isfile(user_definition): - definition_file = [user_definition] - for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) - - # Create the namelist generator object - independent of instance - nmlgen = NamelistGenerator(case, definition_file) - - #---------------------------------------------------- - # Clear out old data. - #---------------------------------------------------- - data_list_path = os.path.join(case.get_case_root(), "Buildconf", - "drof.input_data_list") - if os.path.exists(data_list_path): - os.remove(data_list_path) - - #---------------------------------------------------- - # Loop over instances - #---------------------------------------------------- - for inst_counter in range(1, ninst+1): - # determine instance string - inst_string = "" - if ninst > 1: - inst_string = '_' + "{:04d}".format(inst_counter) - - # If multi-instance case does not have restart file, use - # single-case restart for each instance - rpointer = "rpointer." + compname - if (os.path.isfile(os.path.join(rundir,rpointer)) and - (not os.path.isfile(os.path.join(rundir,rpointer + inst_string)))): - safe_copy(os.path.join(rundir, rpointer), - os.path.join(rundir, rpointer + inst_string)) - - inst_string_label = inst_string - if not inst_string_label: - inst_string_label = "\"\"" - - # create namelist output infile using user_nl_file as input - user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) - expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file {} ".format(user_nl_file)) - infile = os.path.join(confdir, "namelist_infile") - create_namelist_infile(case, user_nl_file, infile) - namelist_infile = [infile] - - # create namelist and stream file(s) data component - _create_namelists(case, confdir, inst_string, namelist_infile, nmlgen, data_list_path) - - # copy namelist files and stream text files, to rundir - if os.path.isdir(rundir): - filename = compname + "_in" - file_src = os.path.join(confdir, filename) - file_dest = os.path.join(rundir, filename) - if inst_string: - file_dest += inst_string - safe_copy(file_src,file_dest) - - for txtfile in glob.glob(os.path.join(confdir, "*txt*")): - safe_copy(txtfile, rundir) - -############################################################################### -def _main_func(): - # Build the component namelist and required stream txt files - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "drof") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/data_comps/drof/cime_config/config_archive.xml b/src/components/data_comps/drof/cime_config/config_archive.xml deleted file mode 100644 index b5ef72df655..00000000000 --- a/src/components/data_comps/drof/cime_config/config_archive.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - r - unset - - rpointer.rof$NINST_STRING - $CASE.drof$NINST_STRING.r.$DATENAME.nc,$CASE.drof$NINST_STRING.rs1.$DATENAME.bin - - - diff --git a/src/components/data_comps/drof/cime_config/config_component.xml b/src/components/data_comps/drof/cime_config/config_component.xml deleted file mode 100644 index 86a28d4d0f6..00000000000 --- a/src/components/data_comps/drof/cime_config/config_component.xml +++ /dev/null @@ -1,145 +0,0 @@ - - - - - - - - - Data runoff model - NULL mode - COREv2 normal year forcing: - COREv2 interannual year forcing: - COREv2 interannual year forcing: - COREv2 interannual year forcing: - COREv2 interannual year forcing: - CPLHIST mode: - JRA55 interannual forcing - - - - char - drof - drof - case_comp - env_case.xml - Name of rof component - - - - char - CPLHIST,DIATREN_ANN_RX1,DIATREN_IAF_RX1,DIATREN_IAF_AIS00_RX1,DIATREN_IAF_AIS45_RX1,DIATREN_IAF_AIS55_RX1,IAF_JRA,NULL - DIATREN_ANN_RX1 - - NULL - DIATREN_ANN_RX1 - DIATREN_ANN_AIS00_RX1 - DIATREN_ANN_AIS45_RX1 - DIATREN_ANN_AIS55_RX1 - DIATREN_IAF_RX1 - DIATREN_IAF_AIS00_RX1 - DIATREN_IAF_AIS45_RX1 - DIATREN_IAF_AIS55_RX1 - CPLHIST - IAF_JRA - NULL - - run_component_drof - env_run.xml - DROF mode. Values are CPLHIST, *_RX1, and NULL. In *_RX1 mode, - observational climatological or interannual - 1-degree runoff data is used. In CPLHIST mode, runoff - data from a previous model run is read in. In NULL mode, the runoff - data is set to zero. In CPLHIST mode, runoff forcing data from a previous - model run is output by the coupler and read in by the data rof - model. In NULL mode, runoff forcing is set to zero and not used. The - default is DIATREN_ANN_RX1. - - - - char - - null - run_component_drof - env_run.xml - - Full pathname for domain file for drof when DROF_MODE is - CPLHIST. NOTE: if this is set to 'null' (the default), then - domain information is read in from the first coupler history - file in the target stream and it is assumed that the first - coupler stream file that is pointed to contains the domain - information for that stream. - - - - - char - - UNSET - run_component_drof - env_run.xml - directory for coupler history data mode (only used when DROF_MODE is CPLHIST mode) - - - - char - - UNSET - run_component_drof - env_run.xml - case name for coupler history data mode (only used when DROF_MODE is CPLHIST mode) - - - - integer - - -999 - run_component_drof - env_run.xml - - Simulation year corresponding to DROF_CPLHIST_YR_START (only used - when DROF_MODE is CPLHIST). A common usage is to set this to - RUN_STARTDATE. With this setting, the forcing in the first year of - the run will be the forcing of year DROF_CPLHIST_YR_START. Another - use case is to align the calendar of transient forcing with the - model calendar. For example, setting - DROF_CPLHIST_YR_ALIGN=DROF_CPLHIST_YR_START will lead to the - forcing calendar being the same as the model calendar. The forcing - for a given model year would be the forcing of the same year. This - would be appropriate in transient runs where the model calendar is - setup to span the same year range as the forcing data. - - - - - integer - - -999 - run_component_drof - env_run.xml - starting year to loop data over (only used when DROF_MODE is CPLHIST) - - - - integer - - -999 - run_component_drof - env_run.xml - ending year to loop data over (only used when DROF_MODE is CPLHIST) - - - - ========================================= - DROF naming conventions - ========================================= - - - diff --git a/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml b/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml deleted file mode 100644 index b49602f4ee2..00000000000 --- a/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml +++ /dev/null @@ -1,690 +0,0 @@ - - - - - - - - - - - - - - char(100) - streams - streams_file - List of streams used for the given drof_mode. - - NULL - rof.cplhist - rof.diatren_ann_rx1 - rof.diatren_ann_ais00_rx1 - rof.diatren_ann_ais45_rx1 - rof.diatren_ann_ais55_rx1 - rof.diatren_iaf_rx1 - rof.diatren_iaf_ais00_rx1 - rof.diatren_iaf_ais45_rx1 - rof.diatren_iaf_ais55_rx1 - rof.iaf_jra - - - - - char - streams - streams_file - Stream domain file directory. - - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/JRA55 - null - - - - - char - streams - streams_file - Stream domain file path(s). - - runoff.daitren.annual.20190226.nc - runoff.daitren.annual.20190226.nc - runoff.daitren.annual.20190226.nc - runoff.daitren.annual.20190226.nc - runoff.daitren.iaf.20120419.nc - runoff.daitren.iaf-AISx00.20120419.nc - runoff.daitren.iaf-AISx45.20120419.nc - runoff.daitren.iaf-AISx55.20120419.nc - domain.roff.JRA025.170111.nc - null - - - - - char - streams - streams_file - Stream domain variable name(s). - - - time time - xc lon - yc lat - area area - mask mask - - - time time - xc lon - yc lat - arear area - mask mask - - - time time - xc lon - yc lat - arear area - mask mask - - - time time - xc lon - yc lat - arear area - mask mask - - - time time - xc lon - yc lat - arear area - mask mask - - - time time - domrb_lon lon - domrb_lat lat - domrb_aream area - domrb_mask mask - - - - - - char - streams - streams_file - Stream data file directory. - - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/RX1 - $DIN_LOC_ROOT/lnd/dlnd7/JRA55 - $DROF_CPLHIST_DIR - - - - - char - streams - streams_file - Stream data file path(s). - - runoff.daitren.annual.20190226.nc - runoff.daitren.annual.20190226.nc - runoff.daitren.annual.20190226.nc - runoff.daitren.annual.20190226.nc - runoff.daitren.iaf.20120419.nc - runoff.daitren.iaf-AISx00.20120419.nc - runoff.daitren.iaf-AISx45.20120419.nc - runoff.daitren.iaf-AISx55.20120419.nc - - JRA.v1.1.runoff.1958.170807.nc - JRA.v1.1.runoff.1959.170807.nc - JRA.v1.1.runoff.1960.170807.nc - JRA.v1.1.runoff.1961.170807.nc - JRA.v1.1.runoff.1962.170807.nc - JRA.v1.1.runoff.1963.170807.nc - JRA.v1.1.runoff.1964.170807.nc - JRA.v1.1.runoff.1965.170807.nc - JRA.v1.1.runoff.1966.170807.nc - JRA.v1.1.runoff.1967.170807.nc - JRA.v1.1.runoff.1968.170807.nc - JRA.v1.1.runoff.1969.170807.nc - JRA.v1.1.runoff.1970.170807.nc - JRA.v1.1.runoff.1971.170807.nc - JRA.v1.1.runoff.1972.170807.nc - JRA.v1.1.runoff.1973.170807.nc - JRA.v1.1.runoff.1974.170807.nc - JRA.v1.1.runoff.1975.170807.nc - JRA.v1.1.runoff.1976.170807.nc - JRA.v1.1.runoff.1977.170807.nc - JRA.v1.1.runoff.1978.170807.nc - JRA.v1.1.runoff.1979.170807.nc - JRA.v1.1.runoff.1980.170807.nc - JRA.v1.1.runoff.1981.170807.nc - JRA.v1.1.runoff.1982.170807.nc - JRA.v1.1.runoff.1983.170807.nc - JRA.v1.1.runoff.1984.170807.nc - JRA.v1.1.runoff.1985.170807.nc - JRA.v1.1.runoff.1986.170807.nc - JRA.v1.1.runoff.1987.170807.nc - JRA.v1.1.runoff.1988.170807.nc - JRA.v1.1.runoff.1989.170807.nc - JRA.v1.1.runoff.1990.170807.nc - JRA.v1.1.runoff.1991.170807.nc - JRA.v1.1.runoff.1992.170807.nc - JRA.v1.1.runoff.1993.170807.nc - JRA.v1.1.runoff.1994.170807.nc - JRA.v1.1.runoff.1995.170807.nc - JRA.v1.1.runoff.1996.170807.nc - JRA.v1.1.runoff.1997.170807.nc - JRA.v1.1.runoff.1998.170807.nc - JRA.v1.1.runoff.1999.170807.nc - JRA.v1.1.runoff.2000.170807.nc - JRA.v1.1.runoff.2001.170807.nc - JRA.v1.1.runoff.2002.170807.nc - JRA.v1.1.runoff.2003.170807.nc - JRA.v1.1.runoff.2004.170807.nc - JRA.v1.1.runoff.2005.170807.nc - JRA.v1.1.runoff.2006.170807.nc - JRA.v1.1.runoff.2007.170807.nc - JRA.v1.1.runoff.2008.170807.nc - JRA.v1.1.runoff.2009.170807.nc - JRA.v1.1.runoff.2010.170807.nc - JRA.v1.1.runoff.2011.170807.nc - JRA.v1.1.runoff.2012.170807.nc - JRA.v1.1.runoff.2013.170807.nc - JRA.v1.1.runoff.2014.170807.nc - JRA.v1.1.runoff.2015.170807.nc - JRA.v1.1.runoff.2016.170807.nc - - $DROF_CPLHIST_CASE.cpl.hr2x.%ym.nc - - - - - char - streams - streams_file - Stream data variable name(s). - - - runoff rofl - - - runoff rofl - - - runoff rofl - - - runoff rofl - - - rofl rofl - rofi rofi - - - r2x_Forr_rofl rofl - r2x_Forr_rofi rofi - - - - - - integer - streams - streams_file - Stream offset. - - 0 - 0 - - - - - integer - streams - streams_file - Simulation year to align stream to. - - 1 - 1 - 1 - 1 - 1 - $DROF_CPLHIST_YR_ALIGN - - - - - integer - streams - streams_file - First year of stream. - - 1 - 1 - 1 - 1 - 1948 - 1948 - 1948 - 1948 - 1958 - $DROF_CPLHIST_YR_START - - - - - integer - streams - streams_file - Last year of stream. - - 1 - 1 - 1 - 1 - 2009 - 2009 - 2009 - 2009 - 2016 - $DROF_CPLHIST_YR_END - - - - - - - - - - - - - char - streams - shr_strdata_nml - NULL,COPYALL - - The runoff data is associated with the river model. - datamode = "NULL" - NULL is always a valid option and means no data will be generated. - Turns off the data model as a provider of data to the coupler. The - rof_present flag will be set to false and the coupler will assume no - exchange of data to or from the data model. - dataMode = "COPYALL" - Copies all fields directly from the input data streams Any required - fields not found on an input stream will be set to zero. - - - COPYALL - NULL - - - - - char - streams - abs - shr_strdata_nml - - spatial gridfile associated with the strdata. grid information will - be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - - - null - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are just copy (ie. no fill), special value, - nearest neighbor, nearest neighbor in "i" direction, or nearest - neighbor in "j" direction. - valid values: 'copy','spval','nn','nnoni','nnonj' - - - nn - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - plays no role is fill algorithm at the present time. - valid values: "nomask,srcmask,dstmask,bothmask" - - - nomask - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read in instead of computing the - weights on the fly for the fill operation. if this is set, fillalgo - and fillmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the fill operation. this allows a user to - save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - array (up to 30 elements) of masking algorithms for mapping input data - associated with the array of streams. valid options are map only from - valid src points, map only to valid destination points, ignore all - masks, map only from valid src points to valid destination points. - valid values: srcmask, dstmask, nomask,bothmask - - - dstmask - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are copy by index, set to special value, - nearest neighbor, nearest neighbor in "i" direction, nearest neighbor - in "j" direction, or bilinear. - valid values: copy,spval,nn,nnoni,nnonj,bilinear - - - bilinear - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read instead of computing - weights on the fly for the mapping (interpolation) operation. if this - is set, mapalgo and mapmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the mapping (interpolation) operation. this - allows a user to save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - coszen,nearest,linear,lower,upper - - array (up to 30 elements) of time interpolation options associated with the array of - streams. - valid values: lower,upper,nearest,linear,coszen - lower = Use lower time-value - upper = Use upper time-value - nearest = Use the nearest time-value - linear = Linearly interpolate between the two time-values - coszen = Scale according to the cosine of the solar zenith angle (for solar) - - - linear - upper - upper - upper - nearest - - - - - char(30) - streams - shr_strdata_nml - extend,cycle,limit - - array of time axis modes associated with the array of streams for - handling data outside the specified stream time axis. - valid options are to cycle the data based on the first, last, and - align settings associated with the stream dataset, to extend the first - and last valid value indefinitely, or to limit the interpolated data - to fall only between the least and greatest valid value of the time array. - valid values: cycle,extend,limit - extend = extrapolate before and after the period by using the first or last value. - cycle = cycle between the range of data - limit = restrict to the period for which the data is valid - - - cycle - - - - - char(30) - streams - shr_strdata_nml - single,full_file - - array (up to 30 elements) of reading mode associated with the array of - streams. specifies the mode of reading temporal stream dataset. - valid options are "single" (read temporal dataset one at a time) or - "full_file" (read all entires of temporal dataset in a given netcdf file) - valid values: single,full_file - - - single - - - - - real(30) - streams - shr_strdata_nml - - array (up to 30 elements) of delta time ratio limits placed on the - time interpolation associated with the array of streams. this real - value causes the model to stop if the ratio of the running maximum - delta time divided by the minimum delta time is greater than the - dtlimit for that stream. for instance, with daily data, the delta - time should be exactly one day throughout the dataset and the computed - maximum divided by minimum delta time should always be 1.0. for - monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. the running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - - - 3.0 - - - - - char - streams - shr_strdata_nml - - list of paired colon delimited field names that should be treated as - vectors when carrying out spatial interpolation. unlike other - character arrays in this namelist, this array is completely decoupled - from the list of streams. this is a list of vector pairs that span - all input streams where different fields of the vector pair could - appear in different streams. - for example, vectors = 'u:v','taux:tauy'. - - - null - - - - - char(30) - streams - shr_strdata_nml - - character array (up to 30 elements) of stream input files. this - string is actually parsed by a stream method and so the format is - specified by the stream module. this string consists of a - "stream_input_filename year_align year_first year_last". the - stream_input_filename is a stream text input file and the format and - options are described elsewhere. year_align, year_first, and - year_last provide information about the time axis of the file and how - to relate the input time axis to the model time axis. - - - - - - - - - - - - char - drof - drof_nml - 1d,root - - DROF Decomposition strategy - 1d = Vector decomposition - root = run only on the master task - - - 1d - - - - - logical - drof - drof_nml - If true, prognostic is forced to true. - - .false. - - - - - char - drof - drof_nml - Master restart file name for drof model - - undefined - - - - - char - drof - drof_nml - Stream restart file name for drof model, needed for branch simulations - - undefined - - - - diff --git a/src/components/data_comps/drof/cime_config/user_nl_drof b/src/components/data_comps/drof/cime_config/user_nl_drof deleted file mode 100644 index ffb71d1124a..00000000000 --- a/src/components/data_comps/drof/cime_config/user_nl_drof +++ /dev/null @@ -1,13 +0,0 @@ -!------------------------------------------------------------------------ -! Users should ONLY USE user_nl_drof to change namelists variables -! Users should add all user specific namelist changes below in the form of -! namelist_var = new_namelist_value -! Note that any namelist variable from shr_strdata_nml and drof_nml can -! be modified below using the above syntax -! User preview_namelists to view (not modify) the output namelist in the -! directory $CASEROOT/CaseDocs -! To modify the contents of a stream txt file, first use preview_namelists -! to obtain the contents of the stream txt files in CaseDocs, and then -! place a copy of the modified stream txt file in $CASEROOT with the string -! user_ prepended. -!------------------------------------------------------------------------ diff --git a/src/components/data_comps/drof/mct/drof_comp_mod.F90 b/src/components/data_comps/drof/mct/drof_comp_mod.F90 deleted file mode 100644 index cb060e02ff8..00000000000 --- a/src/components/data_comps/drof/mct/drof_comp_mod.F90 +++ /dev/null @@ -1,429 +0,0 @@ -#ifdef AIX -@PROCESS ALIAS_SIZE(805306368) -#endif -module drof_comp_mod - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use shr_sys_mod , only: shr_sys_flush, shr_sys_abort - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only: shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only: shr_mpi_bcast - use shr_strdata_mod , only: shr_strdata_type, shr_strdata_pioinit, shr_strdata_init - use shr_strdata_mod , only: shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only: shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only: shr_dmodel_gsmapcreate, shr_dmodel_rearrGGrid - use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV - use shr_cal_mod , only: shr_cal_ymdtod2string - use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - - use drof_shr_mod , only: datamode ! namelist input - use drof_shr_mod , only: decomp ! namelist input - use drof_shr_mod , only: rest_file ! namelist input - use drof_shr_mod , only: rest_file_strm ! namelist input - use drof_shr_mod , only: nullstr - - ! - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: drof_comp_init - public :: drof_comp_run - public :: drof_comp_final - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(CS) :: myModelName = 'rof' ! user defined model name - character(len=*), parameter :: rpfile = 'rpointer.rof' - type(mct_rearr) :: rearr - - !-------------------------------------------------------------------------- - integer(IN),parameter :: ktrans = 2 - - character(12),parameter :: avofld(1:ktrans) = & - (/ "Forr_rofl ","Forr_rofi "/) - - character(12),parameter :: avifld(1:ktrans) = & - (/ "rofl ","rofi "/) - !-------------------------------------------------------------------------- - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine drof_comp_init(Eclock, x2r, r2x, & - seq_flds_x2r_fields, seq_flds_r2x_fields, & - SDROF, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart) - - ! !DESCRIPTION: initialize drof model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2r, r2x ! input/output attribute vectors - character(len=*) , intent(in) :: seq_flds_x2r_fields ! fields from mediator - character(len=*) , intent(in) :: seq_flds_r2x_fields ! fields to mediator - type(shr_strdata_type) , intent(inout) :: SDROF ! model shr_strdata instance (output) - type(mct_gsMap) , pointer :: gsMap ! model global seg map (output) - type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - - !--- local variables --- - integer(IN) :: lsize ! local size - logical :: exists ! file existance logical - integer(IN) :: nu ! unit number - character(CL) :: calendar ! model calendar - - !--- formats --- - character(*), parameter :: F00 = "('(drof_comp_init) ',8a)" - character(*), parameter :: F0L = "('(drof_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(drof_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(drof_comp_init) ',a,4es13.6)" - character(*), parameter :: F03 = "('(drof_comp_init) ',a,i8,a)" - character(*), parameter :: F05 = "('(drof_comp_init) ',a,2f10.4)" - character(*), parameter :: F90 = "('(drof_comp_init) ',73('='))" - character(*), parameter :: F91 = "('(drof_comp_init) ',73('-'))" - character(*), parameter :: subName = "(drof_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DROF_INIT') - - !---------------------------------------------------------------------------- - ! Initialize pio - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDROF, COMPID) - - !---------------------------------------------------------------------------- - ! Initialize SDROF - !---------------------------------------------------------------------------- - - call t_startf('drof_strdata_init') - - call seq_timemgr_EClockGetData( EClock, calendar=calendar ) - - ! NOTE: shr_strdata_init calls shr_dmodel_readgrid which reads the data model - ! grid and from that computes SDROF%gsmap and SDROF%ggrid. DROF%gsmap is created - ! using the decomp '2d1d' (1d decomp of 2d grid) - - call shr_strdata_init(SDROF, mpicom, compid, name='rof', calendar=calendar) - - if (my_task == master_task) then - call shr_strdata_print(SDROF,'SDROF data') - endif - - call t_stopf('drof_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize MCT global seg map, 1d decomp - !---------------------------------------------------------------------------- - - call t_startf('drof_initgsmaps') - if (my_task == master_task) write(logunit,F00) ' initialize gsmaps' - call shr_sys_flush(logunit) - - ! create a data model global seqmap (gsmap) given the data model global grid sizes - ! NOTE: gsmap is initialized using the decomp read in from the docn_in namelist - ! (which by default is "1d") - call shr_dmodel_gsmapcreate(gsmap, SDROF%nxg*SDROF%nyg, compid, mpicom, decomp) - lsize = mct_gsmap_lsize(gsmap,mpicom) - - ! create a rearranger from the data model SDOCN%gsmap to gsmap - call mct_rearr_init(SDROF%gsmap, gsmap, mpicom, rearr) - - call t_stopf('drof_initgsmaps') - - !---------------------------------------------------------------------------- - ! Initialize MCT domain - !---------------------------------------------------------------------------- - - call t_startf('drof_initmctdom') - if (my_task == master_task) write(logunit,F00) 'copy domains' - call shr_sys_flush(logunit) - - call shr_dmodel_rearrGGrid(SDROF%grid, ggrid, gsmap, rearr, mpicom) - - call t_stopf('drof_initmctdom') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('drof_initmctavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - call shr_sys_flush(logunit) - - call mct_aVect_init(x2r, rList=seq_flds_x2r_fields, lsize=lsize) - call mct_aVect_zero(x2r) - - call mct_aVect_init(r2x, rList=seq_flds_r2x_fields, lsize=lsize) - call mct_aVect_zero(r2x) - call t_stopf('drof_initmctavs') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('drof_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - end select - call t_stopf('drof_datamode') - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - if (trim(rest_file) == trim(nullstr) .and. trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer' - call shr_sys_flush(logunit) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (.not.exists) then - write(logunit,F00) ' ERROR: rpointer file does not exist' - call shr_sys_abort(trim(subname)//' ERROR: rpointer file missing') - endif - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - call shr_sys_flush(logunit) - inquire(file=trim(rest_file_strm),exist=exists) - endif - end if - call shr_mpi_bcast(exists,mpicom,'exists') - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDROF,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - call shr_sys_flush(logunit) - end if - - !---------------------------------------------------------------------------- - ! Set initial rof state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - call drof_comp_run(EClock, x2r, r2x, & - SDROF, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit) - call t_adj_detailf(-2) - - if (my_task == master_task) write(logunit,F00) 'drof_comp_init done' - call shr_sys_flush(logunit) - - call t_stopf('DROF_INIT') - - end subroutine drof_comp_init - - !=============================================================================== - subroutine drof_comp_run(EClock, x2r, r2x, & - SDROF, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, case_name) - - ! !DESCRIPTION: run method for drof model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2r - type(mct_aVect) , intent(inout) :: r2x - type(shr_strdata_type) , intent(inout) :: SDROF - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer(IN) , intent(in) :: logunit ! logging unit number - character(CL) , intent(in), optional :: case_name ! case name - - !--- local --- - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: yy,mm,dd ! year month day - integer(IN) :: n ! indices - integer(IN) :: nf ! fields loop index - integer(IN) :: lsize ! size of attr vect - logical :: write_restart ! restart now - integer(IN) :: nu ! unit number - integer(IN) :: nflds_r2x - character(len=18) :: date_str - - character(*), parameter :: F00 = "('(drof_comp_run) ',8a)" - character(*), parameter :: F04 = "('(drof_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(drof_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DROF_RUN') - - call t_startf('drof_run1') - - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, curr_yr=yy, curr_mon=mm, curr_day=dd) - write_restart = seq_timemgr_RestartAlarmIsOn(EClock) - - call t_stopf('drof_run1') - - !-------------------- - ! UNPACK - !-------------------- - - ! do nothing currently - - !-------------------- - ! ADVANCE ROF - !-------------------- - - call t_barrierf('drof_r_BARRIER',mpicom) - call t_startf('drof_r') - - call t_startf('drof_r_strdata_advance') - - call shr_strdata_advance(SDROF, currentYMD, currentTOD, mpicom, 'drof_r') - call t_stopf('drof_r_strdata_advance') - - !--- copy streams to r2x --- - call t_barrierf('drof_r_scatter_BARRIER', mpicom) - call t_startf('drof_r_scatter') - do n = 1,SDROF%nstreams - call shr_dmodel_translateAV(SDROF%avs(n), r2x, avifld, avofld, rearr) - enddo - call t_stopf('drof_r_scatter') - - ! zero out "special values" - lsize = mct_avect_lsize(r2x) - nflds_r2x = mct_avect_nRattr(r2x) - do nf=1,nflds_r2x - do n=1,lsize - if (abs(r2x%rAttr(nf,n)) > 1.0e28) then - r2x%rAttr(nf,n) = 0.0_r8 - end if - ! write(6,*)'crrentymd, currenttod, nf,n,r2x= ',currentymd, currenttod, nf,n,r2x%rattr(nf,n) - enddo - enddo - - call t_stopf('drof_r') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('drof_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - end select - call t_stopf('drof_datamode') - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('drof_restart') - call shr_cal_ymdtod2string(date_str, yy, mm, dd, currentTOD) - write(rest_file,"(6a)") & - trim(case_name), '.drof',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.drof',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),currentYMD,currentTOD - call shr_strdata_restWrite(trim(rest_file_strm), SDROF, mpicom, trim(case_name), 'SDROF strdata') - call shr_sys_flush(logunit) - call t_stopf('drof_restart') - end if - - !---------------------------------------------------------------------------- - ! Log output for model date - !---------------------------------------------------------------------------- - - call t_startf('drof_run2') - if (my_task == master_task) then - write(logunit,F04) trim(myModelName),': model date ', CurrentYMD,CurrentTOD - call shr_sys_flush(logunit) - end if - call t_stopf('drof_run2') - - call t_stopf('DROF_RUN') - - end subroutine drof_comp_run - - !=============================================================================== - subroutine drof_comp_final(my_task, master_task, logunit) - - ! !DESCRIPTION: finalize method for drof model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - - !--- formats --- - character(*), parameter :: F00 = "('(drof_comp_final) ',8a)" - character(*), parameter :: F91 = "('(drof_comp_final) ',73('-'))" - character(*), parameter :: subName = "(drof_comp_final) " - !------------------------------------------------------------------------------- - - call t_startf('DROF_FINAL') - - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) trim(myModelName),': end of main integration loop' - write(logunit,F91) - end if - - call t_stopf('DROF_FINAL') - - end subroutine drof_comp_final - !=============================================================================== - -end module drof_comp_mod diff --git a/src/components/data_comps/drof/mct/drof_shr_mod.F90 b/src/components/data_comps/drof/mct/drof_shr_mod.F90 deleted file mode 100644 index b519984e6bd..00000000000 --- a/src/components/data_comps/drof/mct/drof_shr_mod.F90 +++ /dev/null @@ -1,158 +0,0 @@ -module drof_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: drof_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! input namelist variables - character(CL) , public :: decomp ! decomp strategy - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - logical , public :: force_prognostic_true ! if true set prognostic true - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine drof_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDROF, rof_present, rof_prognostic, rofice_present, flood_present) - - ! !DESCRIPTION: Read in drof namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer , intent(in) :: inst_index ! number of current instance (ie. 1) - character(len=16) , intent(in) :: inst_suffix ! char string associated with instance - character(len=16) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - integer(IN) , intent(in) :: shrlogunit ! original log unit and level - type(shr_strdata_type) , intent(inout) :: SDROF - logical , intent(out) :: rof_present ! flag - logical , intent(out) :: rof_prognostic ! flag - logical , intent(out) :: rofice_present ! flag - logical , intent(out) :: flood_present ! flag - - !--- local variables --- - character(CL) :: fileName ! generic file name - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - - !--- formats --- - character(*), parameter :: F00 = "('(drof_comp_init) ',8a)" - character(*), parameter :: F0L = "('(drof_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(drof_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(drof_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(drof_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_drof_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / drof_nml / & - decomp, restfilm, restfils, force_prognostic_true - - !---------------------------------------------------------------------------- - ! Determine input filenamname - !---------------------------------------------------------------------------- - - filename = "drof_in"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! Read drof_in - !---------------------------------------------------------------------------- - - filename = "drof_in"//trim(inst_suffix) - decomp = "1d" - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=drof_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' decomp = ',trim(decomp) - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - endif - call shr_mpi_bcast(decomp ,mpicom,'decomp') - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDROF,trim(filename),mpicom=mpicom) - - datamode = trim(SDROF%dataMode) - - ! Validate mode - - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'COPYALL') then - if (my_task == master_task) then - write(logunit,F00) 'drof datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal drof datamode = ',trim(datamode) - call shr_sys_abort() - end if - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flags - !---------------------------------------------------------------------------- - - rof_present = .false. - rofice_present = .false. - rof_prognostic = .false. - flood_present = .false. - if (force_prognostic_true) then - rof_present = .true. - rof_prognostic = .true. - endif - if (trim(datamode) /= 'NULL') then - rof_present = .true. - rofice_present = .true. - end if - - end subroutine drof_shr_read_namelists - -end module drof_shr_mod diff --git a/src/components/data_comps/drof/mct/rof_comp_mct.F90 b/src/components/data_comps/drof/mct/rof_comp_mct.F90 deleted file mode 100644 index bafdc6d3f98..00000000000 --- a/src/components/data_comps/drof/mct/rof_comp_mct.F90 +++ /dev/null @@ -1,234 +0,0 @@ -module rof_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use drof_comp_mod , only: drof_comp_init, drof_comp_run, drof_comp_final - use drof_shr_mod , only: drof_shr_read_namelists - use seq_flds_mod , only: seq_flds_x2r_fields, seq_flds_r2x_fields - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: rof_init_mct - public :: rof_run_mct - public :: rof_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - type(shr_strdata_type) :: SDROF - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - - character(*), parameter :: F00 = "('(drof_comp_init) ',8a)" - integer(IN) , parameter :: master_task=0 ! task number of master task - character(*), parameter :: subName = "(rof_init_mct) " - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine rof_init_mct( EClock, cdata, x2r, r2x, NLFilename ) - - ! !DESCRIPTION: initialize drof model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2r, r2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - logical :: rof_present ! flag - logical :: rof_prognostic ! flag - logical :: rofice_present ! flag - logical :: flood_present ! flag - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - logical :: read_restart ! start from restart - integer(IN) :: ierr ! error code - character(*), parameter :: subName = "(rof_init_mct) " - !------------------------------------------------------------------------------- - - ! Set cdata pointers - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, & - read_restart=read_restart) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('rof_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call t_startf('drof_readnml') - - call drof_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDROF, rof_present, rof_prognostic, rofice_present, flood_present) - - call seq_infodata_PutData(infodata, & - rof_present=rof_present, & - rof_prognostic=rof_prognostic, & - rofice_present=rofice_present, & - flood_present=flood_present) - - call t_stopf('drof_readnml') - - !---------------------------------------------------------------------------- - ! RETURN if present flag is false - !---------------------------------------------------------------------------- - - if (.not. rof_present) then - RETURN - end if - - ! NOTE: the following will never be called if rof_present is .false. - - !---------------------------------------------------------------------------- - ! Initialize drof - !---------------------------------------------------------------------------- - - call drof_comp_init(Eclock, x2r, r2x, & - seq_flds_x2r_fields, seq_flds_r2x_fields, & - SDROF, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart) - - !---------------------------------------------------------------------------- - ! Fill infodata that needs to be returned from drof - !---------------------------------------------------------------------------- - - call seq_infodata_PutData(infodata, & - rof_nx=SDROF%nxg, & - rof_ny=SDROF%nyg ) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - if (my_task == master_task) write(logunit,F00) 'drof_comp_init done' - call shr_sys_flush(logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine rof_init_mct - - !=============================================================================== - subroutine rof_run_mct( EClock, cdata, x2r, r2x) - - ! !DESCRIPTION: run method for drof model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2r - type(mct_aVect) ,intent(inout) :: r2x - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(CL) :: case_name ! case name - character(*), parameter :: subName = "(rof_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call seq_infodata_GetData(infodata, case_name=case_name) - - call drof_comp_run(EClock, x2r, r2x, & - SDROF, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, case_name=case_name) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine rof_run_mct - - !=============================================================================== - subroutine rof_final_mct(EClock, cdata, x2r, r2x) - - ! !DESCRIPTION: finalize method for drof model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2r - type(mct_aVect) ,intent(inout) :: r2x - - !--- formats --- - character(*), parameter :: subName = "(rof_final_mct) " - !------------------------------------------------------------------------------- - - call drof_comp_final(my_task, master_task, logunit) - - end subroutine rof_final_mct - !=============================================================================== - -end module rof_comp_mct diff --git a/src/components/data_comps/drof/nuopc/drof_comp_mod.F90 b/src/components/data_comps/drof/nuopc/drof_comp_mod.F90 deleted file mode 100644 index 4bd43b3d010..00000000000 --- a/src/components/data_comps/drof/nuopc/drof_comp_mod.F90 +++ /dev/null @@ -1,509 +0,0 @@ -module drof_comp_mod - - use NUOPC , only : NUOPC_Advertise - use ESMF , only : ESMF_State, ESMF_SUCCESS, ESMF_State - use ESMF , only : ESMF_Mesh, ESMF_DistGrid, ESMF_MeshGet, ESMF_DistGridGet - use perf_mod , only : t_startf, t_stopf, t_adj_detailf, t_barrierf - use mct_mod , only : mct_gsmap_init - use mct_mod , only : mct_avect, mct_avect_indexRA, mct_avect_zero, mct_aVect_nRattr - use mct_mod , only : mct_avect_init, mct_avect_lsize - use shr_kind_mod , only : r8=>shr_kind_r8, cxx=>shr_kind_cxx, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_sys_mod , only : shr_sys_abort - use shr_sys_mod , only : shr_sys_abort - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only : shr_mpi_bcast - use shr_cal_mod , only : shr_cal_calendarname, shr_cal_datetod2string - use shr_strdata_mod , only : shr_strdata_init_model_domain - use shr_strdata_mod , only : shr_strdata_init_streams - use shr_strdata_mod , only : shr_strdata_init_mapping - use shr_strdata_mod , only : shr_strdata_type, shr_strdata_pioinit - use shr_strdata_mod , only : shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only : shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only : shr_dmodel_translateAV - use dshr_methods_mod , only : ChkErr - use dshr_nuopc_mod , only : fld_list_type, dshr_fld_add, dshr_export - use drof_shr_mod , only : datamode ! namelist input - use drof_shr_mod , only : rest_file ! namelist input - use drof_shr_mod , only : rest_file_strm ! namelist input - use drof_shr_mod , only : nullstr - use drof_shr_mod , only : SDROF - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: drof_comp_advertise - public :: drof_comp_init - public :: drof_comp_run - public :: drof_comp_export - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - type(mct_aVect) :: x2r - type(mct_aVect) :: r2x - character(CXX) :: flds_r2x = '' - character(CXX) :: flds_x2r = '' - - character(len=CS), pointer :: avifld(:) ! character array for field names coming from streams - character(len=CS), pointer :: avofld(:) ! character array for field names to be sent/received from mediator - - character(len=*), parameter :: rpfile = 'rpointer.rof' - character(*) , parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine drof_comp_advertise(importState, exportState, flds_scalar_name, & - rof_present, rof_prognostic, & - fldsFrRof_num, fldsFrRof, fldsToRof_num, fldsToRof, rc) - - - ! 1. determine export and import fields to advertise to mediator - ! 2. determine translation of fields from streams to export/import fields - - ! input/output arguments - type(ESMF_State) :: importState - type(ESMF_State) :: exportState - character(len=*) , intent(in) :: flds_scalar_name - logical , intent(in) :: rof_present - logical , intent(in) :: rof_prognostic - integer , intent(out) :: fldsFrRof_num - type (fld_list_type) , intent(out) :: fldsFrRof(:) - integer , intent(out) :: fldsToRof_num - type (fld_list_type) , intent(out) :: fldsToRof(:) - integer , intent(out) :: rc - - ! local variables - integer :: n - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - if (.not. rof_present) return - - !------------------- - ! export fields - !------------------- - - ! scalar fields that need to be advertised - - fldsFrRof_num=1 - fldsFrRof(1)%stdname = trim(flds_scalar_name) - - ! export fields that have a corresponding stream field - - call dshr_fld_add(data_fld="rofl", data_fld_array=avifld, model_fld="Forr_rofl", model_fld_array=avofld, & - model_fld_concat=flds_r2x, fldlist_num=fldsFrRof_num, fldlist=fldsFrRof) - - call dshr_fld_add(data_fld="rofi", data_fld_array=avifld, model_fld="Forr_rofi", model_fld_array=avofld, & - model_fld_concat=flds_r2x, fldlist_num=fldsFrRof_num, fldlist=fldsFrRof) - - !------------------- - ! advertise export state - !------------------- - - do n = 1,fldsFrRof_num - call NUOPC_Advertise(exportState, standardName=fldsFrRof(n)%stdname, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - enddo - - end subroutine drof_comp_advertise - - !=============================================================================== - - subroutine drof_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, & - target_ymd, target_tod, calendar, mesh, nxg, nyg) - - ! !DESCRIPTION: initialize drof model - - ! input/output arguments - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - integer , intent(in) :: target_ymd ! model date - integer , intent(in) :: target_tod ! model sec into model date - character(len=*) , intent(in) :: calendar ! model calendar - type(ESMF_Mesh) , intent(in) :: mesh ! ESMF docn mesh - integer , intent(out) :: nxg, nyg - - ! local variables - integer :: n,k ! generic counters - integer :: ierr ! error code - integer :: lsize ! local size - logical :: exists ! file existance logical - logical :: exists1 ! file existance logical - integer :: nu ! unit number - type(ESMF_DistGrid) :: distGrid - integer, allocatable, target :: gindex(:) - integer :: rc - logical :: write_restart - integer :: dimCount - integer :: tileCount - integer :: deCount - integer :: gsize - integer, allocatable :: elementCountPTile(:) - integer, allocatable :: indexCountPDE(:,:) - integer :: spatialDim - integer :: numOwnedElements - real(R8), pointer :: ownedElemCoords(:) - integer :: index_lat, index_lon - real(R8), pointer :: xc(:), yc(:) ! arrays of model latitudes and longitudes - character(*), parameter :: F00 = "('(drof_comp_init) ',8a)" - character(*), parameter :: subName = "(drof_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DROF_INIT') - - !---------------------------------------------------------------------------- - ! Initialize pio - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDROF, COMPID) - - !---------------------------------------------------------------------------- - ! Create a data model global segmap - !---------------------------------------------------------------------------- - - call t_startf('drof_strdata_init') - - if (my_task == master_task) write(logunit,F00) ' initialize SDROF gsmap' - - ! obtain the distgrid from the mesh that was read in - call ESMF_MeshGet(Mesh, elementdistGrid=distGrid, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determin local size on my processor - call ESMF_distGridGet(distGrid, localDe=0, elementCount=lsize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global index space for my processor - allocate(gindex(lsize)) - call ESMF_distGridGet(distGrid, localDe=0, seqIndexList=gindex, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global size of distgrid - call ESMF_distGridGet(distGrid, dimCount=dimCount, deCount=deCount, tileCount=tileCount, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - allocate(elementCountPTile(tileCount)) - call ESMF_distGridGet(distGrid, elementCountPTile=elementCountPTile, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - gsize = 0 - do n = 1,size(elementCountPTile) - gsize = gsize + elementCountPTile(n) - end do - deallocate(elementCountPTile) - - ! create the data model gsmap given the local size, global size and gindex - call mct_gsMap_init( SDROF%gsmap, gindex, mpicom, compid, lsize, gsize) - deallocate(gindex) - - !---------------------------------------------------------------------------- - ! Initialize SDROF - !---------------------------------------------------------------------------- - - ! The call to shr_strdata_init_model_domain creates the SDROF%gsmap which - ! is a '2d1d' decommp (1d decomp of 2d grid) and also create SDROF%grid - - SDROF%calendar = trim(shr_cal_calendarName(trim(calendar))) - - call shr_strdata_init_model_domain(SDROF, mpicom, compid, my_task, gsmap=SDROF%gsmap) - - if (my_task == master_task) then - call shr_strdata_print(SDROF,'SDROF data') - endif - - ! obtain mesh lats and lons - call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - allocate(ownedElemCoords(spatialDim*numOwnedElements)) - allocate(xc(numOwnedElements), yc(numOwnedElements)) - call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (numOwnedElements /= lsize) then - call shr_sys_abort('ERROR: numOwnedElements is not equal to lsize') - end if - do n = 1,lsize - xc(n) = ownedElemCoords(2*n-1) - yc(n) = ownedElemCoords(2*n) - end do - - ! error check that mesh lats and lons correspond to those on the input domain file - index_lon = mct_aVect_indexRA(SDROF%grid%data,'lon') - do n = 1, lsize - if (abs( SDROF%grid%data%rattr(index_lon,n) - xc(n)) > 1.e-10) then - write(6,*)'ERROR: lon diff = ',abs(SDROF%grid%data%rattr(index_lon,n) - xc(n)),' too large' - call shr_sys_abort() - end if - !SDROF%grid%data%rattr(index_lon,n) = xc(n) ! overwrite ggrid with mesh data - end do - index_lat = mct_aVect_indexRA(SDROF%grid%data,'lat') - do n = 1, lsize - if (abs( SDROF%grid%data%rattr(index_lat,n) - yc(n)) > 1.e-10) then - write(6,*)'ERROR: lat diff = ',abs(SDROF%grid%data%rattr(index_lat,n) - yc(n)),' too large' - call shr_sys_abort() - end if - !SDROF%grid%data%rattr(index_lat,n) = yc(n) ! overwrite ggrid with mesh data - end do - - !---------------------------------------------------------------------------- - ! Initialize SDROF attributes for streams and mapping of streams to model domain - !---------------------------------------------------------------------------- - - call shr_strdata_init_streams(SDROF, compid, mpicom, my_task) - call shr_strdata_init_mapping(SDROF, compid, mpicom, my_task) - - call t_stopf('drof_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('drof_initmctavs') - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - - call mct_aVect_init(x2r, rList=flds_x2r, lsize=lsize) - call mct_aVect_zero(x2r) - call mct_aVect_init(r2x, rList=flds_r2x, lsize=lsize) - call mct_aVect_zero(r2x) - call t_stopf('drof_initmctavs') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('drof_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - end select - call t_stopf('drof_datamode') - - nxg = SDROF%nxg - nyg = SDROF%nyg - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - exists = .false. - exists1 = .false. - if (trim(rest_file) == trim(nullstr) .and. & - trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer = ',trim(rpfile) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (exists) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - inquire(file=trim(rest_file),exist=exists1) - endif - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - inquire(file=trim(rest_file_strm),exist=exists) - endif - end if - - call shr_mpi_bcast(exists,mpicom,'exists') - call shr_mpi_bcast(exists1,mpicom,'exists1') - - ! if (exists1) then - ! if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) - ! call shr_pcdf_readwrite('read',SDROF%pio_subsystem, SDROF%io_type, & - ! trim(rest_file),mpicom,gsmap=SDROF%gsmap,rf1=water,rf1n='water',io_format=SDROF%io_format) - ! else - ! if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file) - ! endif - - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDROF,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - end if - - !---------------------------------------------------------------------------- - ! Set initial rof state - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - - write_restart=.false. - call drof_comp_run(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - target_ymd, target_tod) - - if (my_task == master_task) write(logunit,F00) 'drof_comp_init done' - - call t_adj_detailf(-2) - - call t_stopf('DROF_INIT') - - end subroutine drof_comp_init - - !=============================================================================== - - subroutine drof_comp_run(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - target_ymd, target_tod, case_name) - - ! ------------------------------- - ! run method for drof model - ! ------------------------------- - - ! input/output variables - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: write_restart ! write restart - integer , intent(in) :: target_ymd ! model date - integer , intent(in) :: target_tod ! model sec into model date - character(len=*) , intent(in), optional :: case_name ! case name - - ! local variables - integer :: n ! indices - integer :: nf ! fields loop index - integer :: nu ! unit number - character(len=18) :: date_str - character(*), parameter :: F00 = "('(drof_comp_run) ',8a)" - character(*), parameter :: F04 = "('(drof_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(drof_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DROF_RUN') - - !-------------------- - ! UNPACK - !-------------------- - - ! do nothing currently - - !-------------------- - ! ADVANCE ROF - !-------------------- - - call t_barrierf('drof_r_BARRIER',mpicom) - call t_startf('drof_r') - - call t_startf('drof_r_strdata_advance') - call shr_strdata_advance(SDROF, target_ymd, target_tod, mpicom, 'drof_r') - call t_stopf('drof_r_strdata_advance') - - !--- copy streams to r2x --- - call t_barrierf('drof_r_scatter_BARRIER', mpicom) - call t_startf('drof_r_scatter') - do n = 1,SDROF%nstreams - call shr_dmodel_translateAV(SDROF%avs(n), r2x, avifld, avofld) - enddo - call t_stopf('drof_r_scatter') - - ! zero out "special values" - do nf = 1, mct_avect_nRattr(r2x) - do n = 1, mct_avect_lsize(r2x) - if (abs(r2x%rAttr(nf,n)) > 1.0e28) then - r2x%rAttr(nf,n) = 0.0_r8 - end if - enddo - enddo - - call t_stopf('drof_r') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('drof_datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - end select - call t_stopf('drof_datamode') - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('drof_restart') - call shr_cal_datetod2string(date_str, target_ymd, target_tod) - write(rest_file,"(6a)") & - trim(case_name), '.drof',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.drof',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),target_ymd,target_tod - call shr_strdata_restWrite(trim(rest_file_strm), SDROF, mpicom, trim(case_name), 'SDROF strdata') - call t_stopf('drof_restart') - end if - - call t_stopf('DROF_RUN') - - end subroutine drof_comp_run - - !=============================================================================== - - subroutine drof_comp_export(exportState, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: exportState - integer , intent(out) :: rc - - ! local variables - integer :: k - !---------------------------------------------------------------- - - rc = ESMF_SUCCESS - - k = mct_aVect_indexRA(r2x, 'Forr_rofl') - call dshr_export(r2x%rattr(k,:), exportState, 'Forr_rofl', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(r2x, 'Forr_rofi') - call dshr_export(r2x%rattr(k,:), exportState, 'Forr_rofi', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine drof_comp_export - -end module drof_comp_mod diff --git a/src/components/data_comps/drof/nuopc/drof_shr_mod.F90 b/src/components/data_comps/drof/nuopc/drof_shr_mod.F90 deleted file mode 100644 index 92a56d61b34..00000000000 --- a/src/components/data_comps/drof/nuopc/drof_shr_mod.F90 +++ /dev/null @@ -1,141 +0,0 @@ -module drof_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: drof_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! stream data type - type(shr_strdata_type), public :: SDROF - - ! input namelist variables - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - logical , public :: force_prognostic_true ! if true set prognostic true - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine drof_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, rof_present, rof_prognostic) - - ! !DESCRIPTION: Read in drof namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: filename ! input namelist filename - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(out) :: rof_present ! flag - logical , intent(out) :: rof_prognostic ! flag - - !--- local variables --- - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - character(CL) :: decomp ! decomp strategy - not used for NUOPC - but still needed in namelist for now - - !--- formats --- - character(*), parameter :: F00 = "('(drof_comp_init) ',8a)" - character(*), parameter :: F0L = "('(drof_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(drof_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(drof_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(drof_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_drof_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / drof_nml / decomp, & - restfilm, restfils, force_prognostic_true - - !---------------------------------------------------------------------------- - ! Read drof_in - !---------------------------------------------------------------------------- - - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=drof_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - endif - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDROF,trim(filename),mpicom=mpicom) - - datamode = trim(SDROF%dataMode) - - ! Validate mode - - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'COPYALL') then - if (my_task == master_task) then - write(logunit,F00) 'drof datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal drof datamode = ',trim(datamode) - call shr_sys_abort() - end if - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flags - !---------------------------------------------------------------------------- - - rof_present = .true. - if (trim(datamode) == 'NULL') then - rof_present = .false. - end if - - rof_prognostic = .false. - if (force_prognostic_true) then - rof_prognostic = .true. - end if - - end subroutine drof_shr_read_namelists - -end module drof_shr_mod diff --git a/src/components/data_comps/drof/nuopc/rof_comp_nuopc.F90 b/src/components/data_comps/drof/nuopc/rof_comp_nuopc.F90 deleted file mode 100644 index db27db4a0a8..00000000000 --- a/src/components/data_comps/drof/nuopc/rof_comp_nuopc.F90 +++ /dev/null @@ -1,531 +0,0 @@ -module rof_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for DROF - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_cal_mod , only : shr_cal_noleap, shr_cal_gregorian, shr_cal_ymd2date - use dshr_nuopc_mod , only : fld_list_type, fldsMax, dshr_realize - use dshr_nuopc_mod , only : ModelInitPhase, ModelSetRunClock, ModelSetMetaData - use dshr_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dshr_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use drof_shr_mod , only : drof_shr_read_namelists - use drof_comp_mod , only : drof_comp_init, drof_comp_run, drof_comp_advertise - use drof_comp_mod , only : drof_comp_export - - implicit none - private ! except - - public :: SetServices - - private :: InitializeAdvertise - private :: InitializeRealize - private :: ModelAdvance - private :: ModelFinalize - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CS) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - - integer :: fldsToRof_num = 0 - integer :: fldsFrRof_num = 0 - type (fld_list_type) :: fldsToRof(fldsMax) - type (fld_list_type) :: fldsFrRof(fldsMax) - - integer :: compid ! mct comp id - integer :: mpicom ! mpi communicator - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - logical :: rof_prognostic ! flag - character(CL) :: case_name ! case name - character(CL) :: tmpstr ! tmp string - character(len=80) :: calendar ! calendar name - logical :: use_esmf_metadata = .false. - character(*),parameter :: modName = "(rof_comp_nuopc)" - integer, parameter :: debug_import = 0 ! if > 0 will diagnose import fields - integer, parameter :: debug_export = 0 ! if > 0 will diagnose export fields - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! Local varaibles - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - !-------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p1"/), userRoutine=InitializeAdvertise, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p3"/), userRoutine=InitializeRealize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - logical :: rof_present ! flag - type(ESMF_VM) :: vm - integer :: lmpicom - character(len=CL) :: cvalue - integer :: n - integer :: ierr ! error code - integer :: shrlogunit ! original log unit - character(len=CL) :: diro - character(len=CL) :: logfile - integer :: localPet - character(len=CL) :: fileName ! generic file name - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! generate local mpi comm - !---------------------------------------------------------------------------- - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, mpiCommunicator=lmpicom, localPet=localPet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call mpi_comm_dup(lmpicom, mpicom, ierr) - call mpi_comm_rank(mpicom, my_task, ierr) - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Read input namelists and set present and prognostic flags - !---------------------------------------------------------------------------- - - filename = "drof_in"//trim(inst_suffix) - call drof_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, rof_present, rof_prognostic) - - !-------------------------------- - ! advertise export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call drof_comp_advertise(importState, exportState, flds_scalar_name, & - rof_present, rof_prognostic, & - fldsFrRof_num, fldsFrRof, fldsToRof_num, fldsToRof, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_Mesh) :: Emesh - type(ESMF_TIME) :: currTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_Calendar) :: esmf_calendar ! esmf calendar - type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - character(CL) :: cvalue - integer :: shrlogunit ! original log unit - integer :: ierr ! error code - integer :: current_ymd ! model date - integer :: current_year ! model year - integer :: current_mon ! model month - integer :: current_day ! model day - integer :: current_tod ! model sec into model date - logical :: read_restart ! start from restart - integer :: nxg, nyg - character(len=*), parameter :: F00 = "('rof_comp_nuopc: ')',8a)" - character(len=*), parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! Determine necessary config variables - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='case_name', value=case_name, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompAttributeGet(gcomp, name='read_restart', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) read_restart - - call NUOPC_CompAttributeGet(gcomp, name='MCTID', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) compid - - !---------------------------------------------------------------------------- - ! Determine calendar info - !---------------------------------------------------------------------------- - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_TimeGet( currTime, yy=current_year, mm=current_mon, dd=current_day, s=current_tod, & - calkindflag=esmf_caltype, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(current_year, current_mon, current_day, current_ymd) - - if (esmf_caltype == ESMF_CALKIND_NOLEAP) then - calendar = shr_cal_noleap - else if (esmf_caltype == ESMF_CALKIND_GREGORIAN) then - calendar = shr_cal_gregorian - else - call ESMF_LogWrite(subname//" ERROR bad ESMF calendar name "//trim(calendar), ESMF_LOGMSG_ERROR) - rc = ESMF_Failure - return - end if - - !-------------------------------- - ! Generate the mesh - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='mesh_rof', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (my_task == master_task) then - write(logunit,*) " obtaining drof mesh from " // trim(cvalue) - end if - - Emesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize model - !---------------------------------------------------------------------------- - - call drof_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, & - current_ymd, current_tod, calendar, Emesh, nxg, nyg) - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call dshr_realize( & - state=ExportState, & - fldList=fldsFrRof, & - numflds=fldsFrRof_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':drofExport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! No import state for now - - !-------------------------------- - ! Pack export state - ! Copy from r2x to exportState - ! Set the coupling scalars - !-------------------------------- - - call drof_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (debug_export > 0) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - if (use_esmf_metadata) then - call ModelSetMetaData(gcomp, name='DROF', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_Alarm) :: alarm - type(ESMF_Time) :: currTime, nextTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_State) :: importState, exportState - integer :: shrlogunit ! original log unit - logical :: write_restart ! write restart - logical :: read_restart ! read restart - integer :: yr ! year - integer :: mon ! month - integer :: day ! day in month - integer :: next_ymd ! model date - integer :: next_tod ! model sec into model date - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - call memcheck(subname, 5, my_task == master_task) - - !-------------------------------- - ! Reset shr logging to my log file - !-------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! query the Component for its clock, importState and exportState - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, importState=importState, exportState=exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Unpack import state - !-------------------------------- - - if (rof_prognostic) then - ! Do nothing for now - end if - - !-------------------------------- - ! Run model - !-------------------------------- - - ! Determine if need to write restarts - - call ESMF_ClockGetAlarm(clock, alarmname='alarm_restart', alarm=alarm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (ESMF_AlarmIsRinging(alarm, rc=rc)) then - if (ChkErr(rc,__LINE__,u_FILE_u)) return - write_restart = .true. - call ESMF_AlarmRingerOff( alarm, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - write_restart = .false. - endif - - ! For nuopc - the component clock is advanced at the end of the time interval - ! For these to match for now - need to advance nuopc one timestep ahead for - ! shr_strdata time interpolation - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - nextTime = currTime + timeStep - call ESMF_TimeGet( nextTime, yy=yr, mm=mon, dd=day, s=next_tod, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(yr, mon, day, next_ymd) - - ! Run the model - - read_restart = .false. - call drof_comp_run(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - next_ymd, next_tod, case_name) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call drof_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (debug_export > 0) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (my_task == master_task) then - call log_clock_advance(clock, 'DROF', logunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - endif - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(*), parameter :: F00 = "('(drof_comp_final) ',8a)" - character(*), parameter :: F91 = "('(drof_comp_final) ',73('-'))" - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) 'drof : end of main integration loop' - write(logunit,F91) - end if - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine ModelFinalize - -end module rof_comp_nuopc diff --git a/src/components/data_comps/dshr_nuopc/dshr_methods_mod.F90 b/src/components/data_comps/dshr_nuopc/dshr_methods_mod.F90 deleted file mode 100644 index 0cf4da07fcc..00000000000 --- a/src/components/data_comps/dshr_nuopc/dshr_methods_mod.F90 +++ /dev/null @@ -1,840 +0,0 @@ -module dshr_methods_mod - - use ESMF , only : operator(<), operator(/=), operator(+) - use ESMF , only : operator(-), operator(*) , operator(>=) - use ESMF , only : operator(<=), operator(>), operator(==) - use ESMF , only : ESMF_LOGERR_PASSTHRU, ESMF_LogFoundError, ESMF_LOGMSG_ERROR, ESMF_MAXSTR - use ESMF , only : ESMF_SUCCESS, ESMF_LogWrite, ESMF_LOGMSG_INFO, ESMF_FAILURE - use ESMF , only : ESMF_State, ESMF_StateGet - use ESMF , only : ESMF_Field, ESMF_FieldGet - use ESMF , only : ESMF_GridComp, ESMF_GridCompGet, ESMF_GridCompSet - use ESMF , only : ESMF_GeomType_Flag, ESMF_FieldStatus_Flag - use ESMF , only : ESMF_Mesh, ESMF_MeshGet - use ESMF , only : ESMF_GEOMTYPE_MESH, ESMF_GEOMTYPE_GRID, ESMF_FIELDSTATUS_COMPLETE - use ESMF , only : ESMF_Clock, ESMF_ClockCreate, ESMF_ClockGet, ESMF_ClockSet - use ESMF , only : ESMF_ClockPrint, ESMF_ClockAdvance - use ESMF , only : ESMF_Alarm, ESMF_AlarmCreate, ESMF_AlarmGet, ESMF_AlarmSet - use ESMF , only : ESMF_Calendar, ESMF_CALKIND_NOLEAP, ESMF_CALKIND_GREGORIAN - use ESMF , only : ESMF_Time, ESMF_TimeGet, ESMF_TimeSet - use ESMF , only : ESMF_TimeInterval, ESMF_TimeIntervalSet, ESMF_TimeIntervalGet - use ESMF , only : ESMF_VM, ESMF_VMGet, ESMF_VMBroadcast, ESMF_VMGetCurrent - use NUOPC , only : NUOPC_CompAttributeGet - use NUOPC_Model , only : NUOPC_ModelGet - use shr_kind_mod , only : r8 => shr_kind_r8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_sys_mod , only : shr_sys_abort - use shr_file_mod , only : shr_file_setlogunit, shr_file_getLogUnit - - implicit none - private - - public :: memcheck - public :: get_component_instance - public :: set_component_logging - public :: log_clock_advance - public :: state_getscalar - public :: state_setscalar - public :: state_diagnose - public :: alarmInit - public :: chkerr - - private :: timeInit - private :: field_getfldptr - - ! Clock and alarm options - character(len=*), private, parameter :: & - optNONE = "none" , & - optNever = "never" , & - optNSteps = "nsteps" , & - optNStep = "nstep" , & - optNSeconds = "nseconds" , & - optNSecond = "nsecond" , & - optNMinutes = "nminutes" , & - optNMinute = "nminute" , & - optNHours = "nhours" , & - optNHour = "nhour" , & - optNDays = "ndays" , & - optNDay = "nday" , & - optNMonths = "nmonths" , & - optNMonth = "nmonth" , & - optNYears = "nyears" , & - optNYear = "nyear" , & - optMonthly = "monthly" , & - optYearly = "yearly" , & - optDate = "date" , & - optIfdays0 = "ifdays0" - - ! Module data - integer, parameter :: SecPerDay = 86400 ! Seconds per day - integer, parameter :: memdebug_level=1 - character(len=1024) :: msgString - character(len=*), parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine memcheck(string, level, mastertask) - - ! input/output variables - character(len=*) , intent(in) :: string - integer , intent(in) :: level - logical , intent(in) :: mastertask - - ! local variables - integer :: ierr - integer, external :: GPTLprint_memusage - !----------------------------------------------------------------------- - - if ((mastertask .and. memdebug_level > level) .or. memdebug_level > level+1) then - ierr = GPTLprint_memusage(string) - endif - - end subroutine memcheck - -!=============================================================================== - - subroutine get_component_instance(gcomp, inst_suffix, inst_index, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - character(len=*) , intent(out) :: inst_suffix - integer , intent(out) :: inst_index - integer , intent(out) :: rc - - ! local variables - logical :: isPresent - character(len=4) :: cvalue - !----------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call NUOPC_CompAttributeGet(gcomp, name="inst_suffix", isPresent=isPresent, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (isPresent) then - call NUOPC_CompAttributeGet(gcomp, name="inst_suffix", value=inst_suffix, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - cvalue = inst_suffix(2:) - read(cvalue, *) inst_index - else - inst_suffix = "" - inst_index=1 - endif - - end subroutine get_component_instance - -!=============================================================================== - - subroutine set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - logical, intent(in) :: mastertask - integer, intent(out) :: logunit - integer, intent(out) :: shrlogunit - integer, intent(out) :: rc - - ! local variables - character(len=CL) :: diro - character(len=CL) :: logfile - !----------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - shrlogunit = 6 - - if (mastertask) then - call NUOPC_CompAttributeGet(gcomp, name="diro", value=diro, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompAttributeGet(gcomp, name="logfile", value=logfile, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - open(newunit=logunit,file=trim(diro)//"/"//trim(logfile)) - else - logUnit = 6 - endif - - call shr_file_setLogUnit (logunit) - - end subroutine set_component_logging - -!=============================================================================== - - subroutine log_clock_advance(clock, component, logunit, rc) - - ! input/output variables - type(ESMF_Clock) :: clock - character(len=*) , intent(in) :: component - integer , intent(in) :: logunit - integer , intent(out) :: rc - - ! local variables - character(len=CL) :: cvalue, prestring - !----------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - write(prestring, *) "------>Advancing ",trim(component)," from: " - call ESMF_ClockPrint(clock, options="currTime", unit=cvalue, preString=trim(prestring), rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - write(logunit, *) trim(cvalue) - - call ESMF_ClockPrint(clock, options="stopTime", unit=cvalue, & - preString="--------------------------------> to: ", rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - write(logunit, *) trim(cvalue) - - end subroutine log_clock_advance - -!=============================================================================== - - subroutine state_getscalar(state, scalar_id, scalar_value, flds_scalar_name, flds_scalar_num, rc) - - ! ---------------------------------------------- - ! Get scalar data from State for a particular name and broadcast it to all other pets - ! ---------------------------------------------- - - ! input/output variables - type(ESMF_State), intent(in) :: state - integer, intent(in) :: scalar_id - real(r8), intent(out) :: scalar_value - character(len=*), intent(in) :: flds_scalar_name - integer, intent(in) :: flds_scalar_num - integer, intent(inout) :: rc - - ! local variables - integer :: mytask, ierr, len - type(ESMF_VM) :: vm - type(ESMF_Field) :: field - real(r8), pointer :: farrayptr(:,:) - real(r8) :: tmp(1) - character(len=*), parameter :: subname='(state_getscalar)' - ! ---------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_VMGetCurrent(vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localPet=mytask, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_StateGet(State, itemName=trim(flds_scalar_name), field=field, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (mytask == 0) then - call ESMF_FieldGet(field, farrayPtr = farrayptr, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (scalar_id < 0 .or. scalar_id > flds_scalar_num) then - call ESMF_LogWrite(trim(subname)//": ERROR in scalar_id", ESMF_LOGMSG_INFO, line=__LINE__, file=u_FILE_u) - rc = ESMF_FAILURE - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - endif - tmp(:) = farrayptr(scalar_id,:) - endif - call ESMF_VMBroadCast(vm, tmp, 1, 0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - scalar_value = tmp(1) - - end subroutine state_getscalar - -!================================================================================ - - subroutine state_setscalar(scalar_value, scalar_id, State, flds_scalar_name, flds_scalar_num, rc) - - ! ---------------------------------------------- - ! Set scalar data from State for a particular name - ! ---------------------------------------------- - - ! input/output arguments - real(r8), intent(in) :: scalar_value - integer, intent(in) :: scalar_id - type(ESMF_State), intent(inout) :: State - character(len=*), intent(in) :: flds_scalar_name - integer, intent(in) :: flds_scalar_num - integer, intent(inout) :: rc - - ! local variables - integer :: mytask - type(ESMF_Field) :: lfield - type(ESMF_VM) :: vm - real(r8), pointer :: farrayptr(:,:) - character(len=*), parameter :: subname='(state_setscalar)' - ! ---------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_VMGetCurrent(vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localPet=mytask, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_StateGet(State, itemName=trim(flds_scalar_name), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (mytask == 0) then - call ESMF_FieldGet(lfield, farrayPtr = farrayptr, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (scalar_id < 0 .or. scalar_id > flds_scalar_num) then - call ESMF_LogWrite(trim(subname)//": ERROR in scalar_id", ESMF_LOGMSG_INFO) - rc = ESMF_FAILURE - return - endif - farrayptr(scalar_id,1) = scalar_value - endif - - end subroutine state_setscalar - -!=============================================================================== - - subroutine state_diagnose(State, string, rc) - - ! ---------------------------------------------- - ! Diagnose status of State - ! ---------------------------------------------- - - type(ESMF_State), intent(in) :: state - character(len=*), intent(in) :: string - integer , intent(out) :: rc - - ! local variables - integer :: i,j,n - type(ESMf_Field) :: lfield - integer :: fieldCount, lrank - character(ESMF_MAXSTR) ,pointer :: lfieldnamelist(:) - real(r8), pointer :: dataPtr1d(:) - real(r8), pointer :: dataPtr2d(:,:) - character(len=*),parameter :: subname='(state_diagnose)' - ! ---------------------------------------------- - - call ESMF_StateGet(state, itemCount=fieldCount, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - allocate(lfieldnamelist(fieldCount)) - - call ESMF_StateGet(state, itemNameList=lfieldnamelist, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - do n = 1, fieldCount - - call ESMF_StateGet(state, itemName=lfieldnamelist(n), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call field_getfldptr(lfield, fldptr1=dataPtr1d, fldptr2=dataPtr2d, rank=lrank, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (lrank == 0) then - ! no local data - elseif (lrank == 1) then - if (size(dataPtr1d) > 0) then - write(msgString,'(A,3g14.7,i8)') trim(string)//': '//trim(lfieldnamelist(n)), & - minval(dataPtr1d), maxval(dataPtr1d), sum(dataPtr1d), size(dataPtr1d) - else - write(msgString,'(A,a)') trim(string)//': '//trim(lfieldnamelist(n))," no data" - endif - elseif (lrank == 2) then - if (size(dataPtr2d) > 0) then - write(msgString,'(A,3g14.7,i8)') trim(string)//': '//trim(lfieldnamelist(n)), & - minval(dataPtr2d), maxval(dataPtr2d), sum(dataPtr2d), size(dataPtr2d) - else - write(msgString,'(A,a)') trim(string)//': '//trim(lfieldnamelist(n))," no data" - endif - else - call ESMF_LogWrite(trim(subname)//": ERROR rank not supported ", ESMF_LOGMSG_ERROR) - rc = ESMF_FAILURE - return - endif - call ESMF_LogWrite(trim(msgString), ESMF_LOGMSG_INFO) - enddo - - deallocate(lfieldnamelist) - - end subroutine state_diagnose - -!=============================================================================== - - subroutine field_getfldptr(field, fldptr1, fldptr2, rank, abort, rc) - - ! ---------------------------------------------- - ! for a field, determine rank and return fldptr1 or fldptr2 - ! abort is true by default and will abort if fldptr is not yet allocated in field - ! rank returns 0, 1, or 2. 0 means fldptr not allocated and abort=false - ! ---------------------------------------------- - - ! input/output variables - type(ESMF_Field) , intent(in) :: field - real(r8), pointer , intent(inout), optional :: fldptr1(:) - real(r8), pointer , intent(inout), optional :: fldptr2(:,:) - integer , intent(out) , optional :: rank - logical , intent(in) , optional :: abort - integer , intent(out) , optional :: rc - - ! local variables - type(ESMF_GeomType_Flag) :: geomtype - type(ESMF_FieldStatus_Flag) :: status - type(ESMF_Mesh) :: lmesh - integer :: lrank, nnodes, nelements - logical :: labort - character(len=*), parameter :: subname='(field_getfldptr)' - ! ---------------------------------------------- - - if (.not.present(rc)) then - call ESMF_LogWrite(trim(subname)//": ERROR rc not present ", & - ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) - rc = ESMF_FAILURE - return - endif - - rc = ESMF_SUCCESS - - labort = .true. - if (present(abort)) then - labort = abort - endif - lrank = -99 - - call ESMF_FieldGet(field, status=status, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (status /= ESMF_FIELDSTATUS_COMPLETE) then - lrank = 0 - if (labort) then - call ESMF_LogWrite(trim(subname)//": ERROR data not allocated ", ESMF_LOGMSG_INFO, rc=rc) - rc = ESMF_FAILURE - return - else - call ESMF_LogWrite(trim(subname)//": WARNING data not allocated ", ESMF_LOGMSG_INFO, rc=rc) - endif - else - - call ESMF_FieldGet(field, geomtype=geomtype, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (geomtype == ESMF_GEOMTYPE_GRID) then - call ESMF_FieldGet(field, rank=lrank, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - elseif (geomtype == ESMF_GEOMTYPE_MESH) then - call ESMF_FieldGet(field, rank=lrank, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_FieldGet(field, mesh=lmesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_MeshGet(lmesh, numOwnedNodes=nnodes, numOwnedElements=nelements, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (nnodes == 0 .and. nelements == 0) lrank = 0 - else - call ESMF_LogWrite(trim(subname)//": ERROR geomtype not supported ", & - ESMF_LOGMSG_INFO, rc=rc) - rc = ESMF_FAILURE - return - endif ! geomtype - - if (lrank == 0) then - call ESMF_LogWrite(trim(subname)//": no local nodes or elements ", & - ESMF_LOGMSG_INFO) - elseif (lrank == 1) then - if (.not.present(fldptr1)) then - call ESMF_LogWrite(trim(subname)//": ERROR missing rank=1 array ", & - ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) - rc = ESMF_FAILURE - return - endif - call ESMF_FieldGet(field, farrayPtr=fldptr1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - elseif (lrank == 2) then - if (.not.present(fldptr2)) then - call ESMF_LogWrite(trim(subname)//": ERROR missing rank=2 array ", & - ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) - rc = ESMF_FAILURE - return - endif - call ESMF_FieldGet(field, farrayPtr=fldptr2, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - call ESMF_LogWrite(trim(subname)//": ERROR in rank ", & - ESMF_LOGMSG_ERROR, line=__LINE__, file=u_FILE_u) - rc = ESMF_FAILURE - return - endif - - endif ! status - - if (present(rank)) then - rank = lrank - endif - - end subroutine field_getfldptr - -!=============================================================================== - - subroutine alarmInit( clock, alarm, option, & - opt_n, opt_ymd, opt_tod, RefTime, alarmname, rc) - - ! Setup an alarm in a clock - ! Notes: The ringtime sent to AlarmCreate MUST be the next alarm - ! time. If you send an arbitrary but proper ringtime from the - ! past and the ring interval, the alarm will always go off on the - ! next clock advance and this will cause serious problems. Even - ! if it makes sense to initialize an alarm with some reference - ! time and the alarm interval, that reference time has to be - ! advance forward to be >= the current time. In the logic below - ! we set an appropriate "NextAlarm" and then we make sure to - ! advance it properly based on the ring interval. - - ! input/output variables - type(ESMF_Clock) , intent(inout) :: clock ! clock - type(ESMF_Alarm) , intent(inout) :: alarm ! alarm - character(len=*) , intent(in) :: option ! alarm option - integer , optional , intent(in) :: opt_n ! alarm freq - integer , optional , intent(in) :: opt_ymd ! alarm ymd - integer , optional , intent(in) :: opt_tod ! alarm tod (sec) - type(ESMF_Time) , optional , intent(in) :: RefTime ! ref time - character(len=*) , optional , intent(in) :: alarmname ! alarm name - integer , intent(inout) :: rc ! Return code - - ! local variables - type(ESMF_Calendar) :: cal ! calendar - integer :: lymd ! local ymd - integer :: ltod ! local tod - integer :: cyy,cmm,cdd,csec ! time info - character(len=64) :: lalarmname ! local alarm name - logical :: update_nextalarm ! update next alarm - type(ESMF_Time) :: CurrTime ! Current Time - type(ESMF_Time) :: NextAlarm ! Next restart alarm time - type(ESMF_TimeInterval) :: AlarmInterval ! Alarm interval - integer :: sec - character(len=*), parameter :: subname = '(set_alarmInit): ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - lalarmname = 'alarm_unknown' - if (present(alarmname)) lalarmname = trim(alarmname) - ltod = 0 - if (present(opt_tod)) ltod = opt_tod - lymd = -1 - if (present(opt_ymd)) lymd = opt_ymd - - call ESMF_ClockGet(clock, CurrTime=CurrTime, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_TimeGet(CurrTime, yy=cyy, mm=cmm, dd=cdd, s=csec, rc=rc ) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! initial guess of next alarm, this will be updated below - if (present(RefTime)) then - NextAlarm = RefTime - else - NextAlarm = CurrTime - endif - - ! Determine calendar - call ESMF_ClockGet(clock, calendar=cal) - - ! Determine inputs for call to create alarm - selectcase (trim(option)) - - case (optNONE) - call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_TimeSet( NextAlarm, yy=9999, mm=12, dd=1, s=0, calendar=cal, rc=rc ) - if (chkerr(rc,__LINE__,u_FILE_u)) return - update_nextalarm = .false. - - case (optNever) - call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_TimeSet( NextAlarm, yy=9999, mm=12, dd=1, s=0, calendar=cal, rc=rc ) - if (chkerr(rc,__LINE__,u_FILE_u)) return - update_nextalarm = .false. - - case (optDate) - if (.not. present(opt_ymd)) then - call shr_sys_abort(subname//trim(option)//' requires opt_ymd') - end if - if (lymd < 0 .or. ltod < 0) then - call shr_sys_abort(subname//trim(option)//'opt_ymd, opt_tod invalid') - end if - call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call timeInit(NextAlarm, lymd, cal, ltod, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - update_nextalarm = .false. - - case (optIfdays0) - if (.not. present(opt_ymd)) then - call shr_sys_abort(subname//trim(option)//' requires opt_ymd') - end if - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=opt_n, s=0, calendar=cal, rc=rc ) - if (chkerr(rc,__LINE__,u_FILE_u)) return - update_nextalarm = .true. - - case (optNSteps) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_ClockGet(clock, TimeStep=AlarmInterval, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNStep) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - call ESMF_ClockGet(clock, TimeStep=AlarmInterval, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNSeconds) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, s=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNSecond) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, s=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNMinutes) - call ESMF_TimeIntervalSet(AlarmInterval, s=60, rc=rc) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNMinute) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, s=60, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNHours) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, s=3600, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNHour) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, s=3600, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNDays) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, d=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNDay) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, d=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNMonths) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNMonth) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optMonthly) - call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=1, s=0, calendar=cal, rc=rc ) - if (chkerr(rc,__LINE__,u_FILE_u)) return - update_nextalarm = .true. - - case (optNYears) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optNYear) - if (.not.present(opt_n)) then - call shr_sys_abort(subname//trim(option)//' requires opt_n') - end if - if (opt_n <= 0) then - call shr_sys_abort(subname//trim(option)//' invalid opt_n') - end if - call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - AlarmInterval = AlarmInterval * opt_n - update_nextalarm = .true. - - case (optYearly) - call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_TimeSet( NextAlarm, yy=cyy, mm=1, dd=1, s=0, calendar=cal, rc=rc ) - if (chkerr(rc,__LINE__,u_FILE_u)) return - update_nextalarm = .true. - - case default - call shr_sys_abort(subname//'unknown option '//trim(option)) - - end select - - ! -------------------------------------------------------------------------------- - ! --- AlarmInterval and NextAlarm should be set --- - ! -------------------------------------------------------------------------------- - - ! --- advance Next Alarm so it won't ring on first timestep for - ! --- most options above. go back one alarminterval just to be careful - - if (update_nextalarm) then - NextAlarm = NextAlarm - AlarmInterval - do while (NextAlarm <= CurrTime) - NextAlarm = NextAlarm + AlarmInterval - enddo - endif - - alarm = ESMF_AlarmCreate( name=lalarmname, clock=clock, ringTime=NextAlarm, & - ringInterval=AlarmInterval, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine alarmInit - -!=============================================================================== - - subroutine timeInit( Time, ymd, cal, tod, rc) - - ! Create the ESMF_Time object corresponding to the given input time, - ! given in YMD (Year Month Day) and TOD (Time-of-day) format. - ! Set the time by an integer as YYYYMMDD and integer seconds in the day - - ! input/output parameters: - type(ESMF_Time) , intent(inout) :: Time ! ESMF time - integer , intent(in) :: ymd ! year, month, day YYYYMMDD - type(ESMF_Calendar) , intent(in) :: cal ! ESMF calendar - integer , intent(in) :: tod ! time of day in seconds - integer , intent(out) :: rc - - ! local variables - integer :: year, mon, day ! year, month, day as integers - integer :: tdate ! temporary date - integer :: date ! coded-date (yyyymmdd) - character(len=*), parameter :: subname='(timeInit)' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - if ( (ymd < 0) .or. (tod < 0) .or. (tod > SecPerDay) )then - call shr_sys_abort( subname//'ERROR yymmdd is a negative number or time-of-day out of bounds' ) - end if - - tdate = abs(date) - year = int(tdate/10000) - if (date < 0) year = -year - mon = int( mod(tdate,10000)/ 100) - day = mod(tdate, 100) - - call ESMF_TimeSet( Time, yy=year, mm=mon, dd=day, s=tod, calendar=cal, rc=rc ) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine timeInit - -!=============================================================================== - - logical function chkerr(rc, line, file) - - integer, intent(in) :: rc - integer, intent(in) :: line - character(len=*), intent(in) :: file - - integer :: lrc - - chkerr = .false. - lrc = rc - if (ESMF_LogFoundError(rcToCheck=lrc, msg=ESMF_LOGERR_PASSTHRU, line=line, file=file)) then - chkerr = .true. - endif - end function chkerr - -!=============================================================================== - -end module dshr_methods_mod diff --git a/src/components/data_comps/dshr_nuopc/dshr_nuopc_mod.F90 b/src/components/data_comps/dshr_nuopc/dshr_nuopc_mod.F90 deleted file mode 100644 index fea0711884f..00000000000 --- a/src/components/data_comps/dshr_nuopc/dshr_nuopc_mod.F90 +++ /dev/null @@ -1,606 +0,0 @@ -module dshr_nuopc_mod - - use NUOPC - use NUOPC_Model - use ESMF - use dshr_methods_mod , only : alarmInit, chkerr - use shr_kind_mod , only : r8=>shr_kind_r8, cs=>shr_kind_cs, cxx=>shr_kind_cxx - use shr_string_mod , only : shr_string_listGetIndex - use shr_sys_mod , only : shr_sys_abort - - implicit none - public - - public :: dshr_fld_add - public :: dshr_import - public :: dshr_export - public :: dshr_realize - public :: ModelInitPhase ! TODO: rename to dshr_modelinit - public :: ModelSetRunClock ! TODO: rename to dshr_setrunclock - public :: ModelSetMetaData ! TODO rename to dshr_setmetadata - - type fld_list_type - character(len=128) :: stdname - integer :: ungridded_lbound = 0 - integer :: ungridded_ubound = 0 - end type fld_list_type - - interface dshr_fld_add ; module procedure & - dshr_fld_add, & - dshr_fld_add_model, & - dshr_fld_add_model_and_data - end interface dshr_fld_add - - integer :: gridTofieldMap = 2 ! ungridded dimension is innermost - integer , parameter :: fldsMax = 100 - integer , parameter :: dbug = 10 - character(*), parameter :: modName = "(dhsr_nuopc_mod)" - character(*), parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine dshr_fld_add(med_fld, fldlist_num, fldlist, ungridded_lbound, ungridded_ubound) - - ! input/output variables - character(len=*) , intent(in) :: med_fld - integer , intent(inout) :: fldlist_num - type(fld_list_type) , intent(inout) :: fldlist(:) - integer , optional , intent(in) :: ungridded_lbound - integer , optional , intent(in) :: ungridded_ubound - - ! local variables - integer :: rc - character(len=*), parameter :: subname='(dshr_nuopc_mod:dshr_fld_add)' - ! ---------------------------------------------- - - call dshr_fld_list_add(fldlist_num, fldlist, med_fld, ungridded_lbound, ungridded_ubound) - - end subroutine dshr_fld_add - -!=============================================================================== - - subroutine dshr_fld_add_model(model_fld, model_fld_concat, model_fld_index, & - fldlist_num, fldlist, ungridded_lbound, ungridded_ubound) - - ! input/output variables - character(len=*) , intent(in) :: model_fld - character(len=*) , intent(inout) :: model_fld_concat - integer , optional , intent(out) :: model_fld_index - integer , optional , intent(inout) :: fldlist_num - type(fld_list_type) , optional , intent(inout) :: fldlist(:) - integer , optional , intent(in) :: ungridded_lbound - integer , optional , intent(in) :: ungridded_ubound - - ! local variables - integer :: rc - character(len=*), parameter :: subname='(dshr_nuopc_mod:dshr_fld_add_model)' - ! ---------------------------------------------- - - if (len_trim(model_fld_concat) + len_trim(model_fld) + 1 >= len(model_fld_concat)) then - call ESMF_LogWrite(subname//': ERROR: max len of model_fld_concat has been exceeded', ESMF_LOGMSG_INFO) - rc = ESMF_FAILURE - return - end if - - if (trim(model_fld_concat) == '') then - model_fld_concat = trim(model_fld) - else - model_fld_concat = trim(model_fld_concat)//':'//trim(model_fld) - end if - - if (present(model_fld_index)) then - call shr_string_listGetIndex(trim(model_fld_concat), trim(model_fld), model_fld_index) - end if - - !---------------------------------- - ! Update fldlist array if appropriate - !---------------------------------- - - if (present(fldlist_num) .and. present(fldlist)) then - call dshr_fld_list_add(fldlist_num, fldlist, model_fld, ungridded_lbound, ungridded_ubound) - end if - - end subroutine dshr_fld_add_model - - !=============================================================================== - - subroutine dshr_fld_add_model_and_data( data_fld, data_fld_array, & - model_fld, model_fld_array, model_fld_concat, model_fld_index, & - fldlist_num, fldlist, ungridded_lbound, ungridded_ubound) - - ! input/output variables - character(len=*) , intent(in) :: data_fld - character(len=*) , pointer :: data_fld_array(:) - character(len=*) , intent(in) :: model_fld - character(len=*) , pointer :: model_fld_array(:) - character(len=*) , optional , intent(inout) :: model_fld_concat - integer , optional , intent(out) :: model_fld_index - integer , optional , intent(inout) :: fldlist_num - type(fld_list_type) , optional , intent(inout) :: fldlist(:) - integer , optional , intent(in) :: ungridded_lbound - integer , optional , intent(in) :: ungridded_ubound - - ! local variables - integer :: rc - integer :: n, oldsize, id - character(len=CS), pointer :: new_data_fld_array(:) - character(len=CS), pointer :: new_model_fld_array(:) - character(len=*), parameter :: subname='(dshr_nuopc_mod:dshr_fld_add_model_and_data) ' - ! ---------------------------------------------- - - !---------------------------------- - ! Create new data_fld_array and model_fld_array - ! Model is what the data model sends and receives from the mediator - ! Data is what the data model obtains from the various streams - !---------------------------------- - - ! 1) determine new index - if (associated(data_fld_array)) then - oldsize = size(data_fld_array) - else - oldsize = 0 - end if - id = oldsize + 1 - - ! 2) allocate new_data_fld_array and oldavi to one element larger than input - allocate(new_data_fld_array(id)) - allocate(new_model_fld_array(id)) - - ! 3) copy data_fld_array and model_fld_array into first N-1 elements of data_fld_arrays and model_fld_array - do n = 1,oldsize - new_data_fld_array(n) = data_fld_array(n) - new_model_fld_array(n) = model_fld_array(n) - end do - - ! 4) deallocate / nullify data_fld_array and model_fld_array - if (oldsize > 0) then - deallocate(data_fld_array) - deallocate(model_fld_array) - nullify(data_fld_array) - nullify(model_fld_array) - end if - - ! 5) point data_fld_array => new_data_fld_array and - ! model_fld_array => new_model_fld_array and update info for new entry - data_fld_array => new_data_fld_array - model_fld_array => new_model_fld_array - data_fld_array(id) = trim(data_fld) - model_fld_array(id) = trim(model_fld) - - !---------------------------------- - ! Update flds_concat colon delimited string if appropriate - !---------------------------------- - - if (present(model_fld_concat)) then - if (len_trim(model_fld_concat) + len_trim(model_fld) + 1 >= cxx) then - call ESMF_LogWrite(subname//': ERROR: max len of model_fld_concat has been exceeded', ESMF_LOGMSG_INFO) - call shr_sys_abort() - end if - if (trim(model_fld_concat) == '') then - model_fld_concat = trim(model_fld) - else - model_fld_concat = trim(model_fld_concat)//':'//trim(model_fld) - end if - - ! Get model field index if appropriated - if (present(model_fld_index)) then - call shr_string_listGetIndex(trim(model_fld_concat), trim(model_fld), model_fld_index) - end if - end if - - !---------------------------------- - ! Update fldlist array if appropriate - !---------------------------------- - if (present(fldlist_num) .and. present(fldlist)) then - call dshr_fld_list_add(fldlist_num, fldlist, model_fld, ungridded_lbound, ungridded_ubound) - end if - - end subroutine dshr_fld_add_model_and_data - - !=============================================================================== - - subroutine dshr_fld_list_add(num, fldlist, stdname, ungridded_lbound, ungridded_ubound) - - ! input/output variables - integer, intent(inout) :: num - type(fld_list_type), intent(inout) :: fldlist(:) - character(len=*), intent(in) :: stdname - integer, optional, intent(in) :: ungridded_lbound - integer, optional, intent(in) :: ungridded_ubound - - ! local variables - integer :: rc - character(len=*), parameter :: subname='(dshr_nuopc_mod:fld_list_add)' - !---------------------------------------------------------------------- - - ! Set up a list of field information - - num = num + 1 - if (num > fldsMax) then - call ESMF_LogWrite(trim(subname)//": ERROR num > fldsMax "//trim(stdname), ESMF_LOGMSG_INFO) - rc = ESMF_FAILURE - return - endif - fldlist(num)%stdname = trim(stdname) - - if (present(ungridded_lbound) .and. present(ungridded_ubound)) then - fldlist(num)%ungridded_lbound = ungridded_lbound - fldlist(num)%ungridded_ubound = ungridded_ubound - end if - - end subroutine dshr_fld_list_add - - !=============================================================================== - - subroutine dshr_realize(state, fldList, numflds, flds_scalar_name, flds_scalar_num, mesh, tag, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: state - type(fld_list_type) , intent(in) :: fldList(:) - integer , intent(in) :: numflds - character(len=*) , intent(in) :: flds_scalar_name - integer , intent(in) :: flds_scalar_num - character(len=*) , intent(in) :: tag - type(ESMF_Mesh) , intent(in) :: mesh - integer , intent(inout) :: rc - - ! local variables - integer :: n - type(ESMF_Field) :: field - character(len=80) :: stdname - character(len=*),parameter :: subname='(dshr_nuopc_mod:fld_list_realize)' - ! ---------------------------------------------- - - rc = ESMF_SUCCESS - - do n = 1, numflds - stdname = fldList(n)%stdname - if (NUOPC_IsConnected(state, fieldName=stdname)) then - if (stdname == trim(flds_scalar_name)) then - call ESMF_LogWrite(trim(subname)//trim(tag)//" Field = "//trim(stdname)//" is connected on root pe", & - ESMF_LOGMSG_INFO) - ! Create the scalar field - call SetScalarField(field, flds_scalar_name, flds_scalar_num, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - else - ! Create the field - if (fldlist(n)%ungridded_lbound > 0 .and. fldlist(n)%ungridded_ubound > 0) then - field = ESMF_FieldCreate(mesh, ESMF_TYPEKIND_R8, name=stdname, meshloc=ESMF_MESHLOC_ELEMENT, & - ungriddedLbound=(/fldlist(n)%ungridded_lbound/), & - ungriddedUbound=(/fldlist(n)%ungridded_ubound/), gridToFieldMap=(/gridToFieldMap/), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - field = ESMF_FieldCreate(mesh, ESMF_TYPEKIND_R8, name=stdname, meshloc=ESMF_MESHLOC_ELEMENT, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - end if - call ESMF_LogWrite(trim(subname)//trim(tag)//" Field = "//trim(stdname)//" is connected using mesh", & - ESMF_LOGMSG_INFO) - endif - - ! NOW call NUOPC_Realize - call NUOPC_Realize(state, field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - else - if (stdname /= trim(flds_scalar_name)) then - call ESMF_LogWrite(subname // trim(tag) // " Field = "// trim(stdname) // " is not connected.", & - ESMF_LOGMSG_INFO) - call ESMF_StateRemove(state, (/stdname/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - end if - end if - end do - - contains !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - subroutine SetScalarField(field, flds_scalar_name, flds_scalar_num, rc) - ! ---------------------------------------------- - ! create a field with scalar data on the root pe - ! ---------------------------------------------- - - type(ESMF_Field) , intent(inout) :: field - character(len=*) , intent(in) :: flds_scalar_name - integer , intent(in) :: flds_scalar_num - integer , intent(inout) :: rc - - ! local variables - type(ESMF_Distgrid) :: distgrid - type(ESMF_Grid) :: grid - character(len=*), parameter :: subname='(dshr_nuopc_mod:SetScalarField)' - ! ---------------------------------------------- - - rc = ESMF_SUCCESS - - ! create a DistGrid with a single index space element, which gets mapped onto DE 0. - distgrid = ESMF_DistGridCreate(minIndex=(/1/), maxIndex=(/1/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - - grid = ESMF_GridCreate(distgrid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - - field = ESMF_FieldCreate(name=trim(flds_scalar_name), grid=grid, typekind=ESMF_TYPEKIND_R8, & - ungriddedLBound=(/1/), ungriddedUBound=(/flds_scalar_num/), gridToFieldMap=(/2/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - - end subroutine SetScalarField - - end subroutine dshr_realize - - !=============================================================================== - - subroutine ModelInitPhase(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - ! Switch to IPDv01 by filtering all other phaseMap entries - call NUOPC_CompFilterPhaseMap(gcomp, ESMF_METHOD_INITIALIZE, acceptStringList=(/"IPDv01p"/), rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine ModelInitPhase - - !=============================================================================== - - subroutine ModelSetRunClock(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: mclock, dclock - type(ESMF_Time) :: mcurrtime, dcurrtime - type(ESMF_Time) :: mstoptime - type(ESMF_TimeInterval) :: mtimestep, dtimestep - character(len=256) :: cvalue - character(len=256) :: restart_option ! Restart option units - integer :: restart_n ! Number until restart interval - integer :: restart_ymd ! Restart date (YYYYMMDD) - type(ESMF_ALARM) :: restart_alarm - character(len=128) :: name - integer :: alarmcount - character(len=*),parameter :: subname='dshr_nuopc_mod:(ModelSetRunClock) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - ! query the Component for its clocks - call NUOPC_ModelGet(gcomp, driverClock=dclock, modelClock=mclock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_ClockGet(dclock, currTime=dcurrtime, timeStep=dtimestep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_ClockGet(mclock, currTime=mcurrtime, timeStep=mtimestep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! force model clock currtime and timestep to match driver and set stoptime - !-------------------------------- - - mstoptime = mcurrtime + dtimestep - call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! set restart alarm - !-------------------------------- - - call ESMF_ClockGetAlarmList(mclock, alarmlistflag=ESMF_ALARMLIST_ALL, alarmCount=alarmCount, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (alarmCount == 0) then - - call ESMF_GridCompGet(gcomp, name=name, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call ESMF_LogWrite(subname//'setting alarms for' // trim(name), ESMF_LOGMSG_INFO) - - call NUOPC_CompAttributeGet(gcomp, name="restart_option", value=restart_option, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompAttributeGet(gcomp, name="restart_n", value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) restart_n - - call NUOPC_CompAttributeGet(gcomp, name="restart_ymd", value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) restart_ymd - - call alarmInit(mclock, restart_alarm, restart_option, & - opt_n = restart_n, & - opt_ymd = restart_ymd, & - RefTime = mcurrTime, & - alarmname = 'alarm_restart', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AlarmSet(restart_alarm, clock=mclock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end if - - !-------------------------------- - ! Advance model clock to trigger alarms then reset model clock back to currtime - !-------------------------------- - - call ESMF_ClockAdvance(mclock,rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine ModelSetRunClock - - !=============================================================================== - - subroutine ModelSetMetadata(gcomp, name, rc) - - type(ESMF_GridComp) :: gcomp - character(len=*) , intent(in) :: name - integer , intent(out) :: rc - - ! local variables - character(ESMF_MAXSTR) :: convCIM, purpComp - - rc = ESMF_SUCCESS - - convCIM = "CIM" - purpComp = "Model Component Simulation Description" - call ESMF_AttributeAdd(gcomp, convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AttributeSet(gcomp, "ShortName", trim(name), convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AttributeSet(gcomp, "LongName", "Climatological SeaIce Data Model", convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AttributeSet(gcomp, "Description", & - "The CIME data models perform the basic function of " // & - "reading external data, modifying that data, and then " // & - "sending it to the driver via coupling " // & - "interfaces. The driver and other models have no " // & - "fundamental knowledge of whether another component " // & - "is fully active or just a data model. In some cases, " // & - "data models are prognostic and also receive and use " // & - "some data sent by the driver to the data model. But " // & - "in most cases, the data models are not running " // & - "prognostically and have no need to receive any data " // & - "from the driver.", & - convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AttributeSet(gcomp, "ReleaseDate", "2010", convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AttributeSet(gcomp, "ModelType", "SeaIce", convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AttributeSet(gcomp, "Name", "TBD", convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AttributeSet(gcomp, "EmailAddress", "TBD", convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AttributeSet(gcomp, "ResponsiblePartyRole", "contact", convention=convCIM, purpose=purpComp, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine ModelSetMetadata - - !----------------------------------------------------------------------------- - - subroutine dshr_export(array, state, fldname, ungridded_index, rc) - - ! ---------------------------------- - ! copy array data to state fields - ! ---------------------------------- - - ! input/otuput variables - real(r8) , intent(inout) :: array(:) - type(ESMF_State) , intent(inout) :: state - character(len=*) , intent(in) :: fldname - integer, optional, intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: lsize, n - type(ESMF_Field) :: lfield - real(R8), pointer :: farray1d(:) - real(R8), pointer :: farray2d(:,:) - character(*),parameter :: subName = "(dshr_nuopc_mod: dshr_export)" - !---------------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(state, itemName=trim(fldname), field=lfield, rc=rc) - if (.not. ChkErr(rc,__LINE__,u_FILE_u)) then - call ESMF_LogWrite(trim(subname)//": fldname = "//trim(fldname)//" copy", ESMF_LOGMSG_INFO) - - lsize = size(array) - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=farray2d, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do n = 1,lsize - farray2d(n,ungridded_index) = array(n) - enddo - else if (gridToFieldMap == 2) then - do n = 1,lsize - farray2d(ungridded_index,n) = array(n) - end do - end if - else - call ESMF_FieldGet(lfield, farrayPtr=farray1d, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - do n = 1,lsize - farray1d(n) = array(n) - enddo - end if - end if - - end subroutine dshr_export - - !----------------------------------------------------------------------------- - - subroutine dshr_import(state, fldname, array, ungridded_index, rc) - - ! ---------------------------------- - ! copy state field to array data - ! ---------------------------------- - - ! input/output variables - type(ESMF_State) , intent(in) :: state - character(len=*) , intent(in) :: fldname - real(r8) , intent(inout) :: array(:) - integer, optional , intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: lsize, n - type(ESMF_Field) :: lfield - real(R8), pointer :: farray1d(:) - real(R8), pointer :: farray2d(:,:) - character(*),parameter :: subName = "(dshr_nuopc_mod: dshr_import)" - !---------------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(state, itemName=trim(fldname), field=lfield, rc=rc) - if (.not. ChkErr(rc,__LINE__,u_FILE_u)) then - call ESMF_LogWrite(trim(subname)//": fldname = "//trim(fldname)//" copy", ESMF_LOGMSG_INFO) - - lsize = size(array) - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=farray2d, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do n = 1,lsize - array(n) = farray2d(n,ungridded_index) - enddo - else if (gridToFieldMap == 2) then - do n = 1,lsize - array(n) = farray2d(ungridded_index,n) - enddo - end if - else - call ESMF_FieldGet(lfield, farrayPtr=farray1d, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - do n = 1,lsize - array(n) = farray1d(n) - enddo - end if - end if - - end subroutine dshr_import - -end module dshr_nuopc_mod diff --git a/src/components/data_comps/dwav/README b/src/components/data_comps/dwav/README deleted file mode 100644 index 164509d523a..00000000000 --- a/src/components/data_comps/dwav/README +++ /dev/null @@ -1,4 +0,0 @@ -QL, 150527 -read climatological enhancement factor for -Langmuir mixing parameterization (vr12-ma) -and Surface Stokes drift diff --git a/src/components/data_comps/dwav/bld/README b/src/components/data_comps/dwav/bld/README deleted file mode 100644 index e852adf36c7..00000000000 --- a/src/components/data_comps/dwav/bld/README +++ /dev/null @@ -1,2 +0,0 @@ -QL, 150527, all files adapted from drof/bld and docn/bld - 150612, now read enhancement factor, ustokes and vstokes diff --git a/src/components/data_comps/dwav/cime_config/buildlib b/src/components/data_comps/dwav/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/data_comps/dwav/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/data_comps/dwav/cime_config/buildnml b/src/components/data_comps/dwav/cime_config/buildnml deleted file mode 100755 index c61f5c49ed6..00000000000 --- a/src/components/data_comps/dwav/cime_config/buildnml +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/env python - -"""Namelist creator for CIME's data wave model. -""" -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position - -import os, sys, glob - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.case import Case -from CIME.XML.files import Files -from CIME.nmlgen import NamelistGenerator -from CIME.utils import expect, safe_copy -from CIME.buildnml import create_namelist_infile, parse_input - -logger = logging.getLogger(__name__) - -# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements -#################################################################################### -def _create_namelists(case, confdir, inst_string, infile, nmlgen, data_list_path): -#################################################################################### - """Write out the namelist for this component. - - Most arguments are the same as those for `NamelistGenerator`. The - `inst_string` argument is used as a suffix to distinguish files for - different instances. The `confdir` argument is used to specify the directory - in which output files will be placed. - """ - - #---------------------------------------------------- - # Get a bunch of information from the case. - #---------------------------------------------------- - wav_domain_file = case.get_value("WAV_DOMAIN_FILE") - wav_domain_path = case.get_value("WAV_DOMAIN_PATH") - dwav_mode = case.get_value("DWAV_MODE") - wav_grid = case.get_value("WAV_GRID") - - #---------------------------------------------------- - # Check for incompatible options. - #---------------------------------------------------- - expect(wav_grid != "null", - "WAV_GRID cannot be null") - expect(dwav_mode != "NULL", - "DWAV_MODE cannot be NULL") - - #---------------------------------------------------- - # Log some settings. - #---------------------------------------------------- - logger.debug("DWAV mode is {}".format(dwav_mode)) - logger.debug("DWAV grid is {}".format(wav_grid)) - - #---------------------------------------------------- - # Create configuration information. - #---------------------------------------------------- - config = {} - config['wav_grid'] = wav_grid - config['dwav_mode'] = dwav_mode - - #---------------------------------------------------- - # Initialize namelist defaults - #---------------------------------------------------- - nmlgen.init_defaults(infile, config) - - #---------------------------------------------------- - # Construct the list of streams. - #---------------------------------------------------- - streams = nmlgen.get_streams() - - #---------------------------------------------------- - # For each stream, create stream text file and update - # shr_strdata_nml group and input data list. - #---------------------------------------------------- - for stream in streams: - - # Ignore null values. - if stream is None or stream in ("NULL", ""): - continue - - inst_stream = stream + inst_string - logger.debug("DWAV stream is {}".format(inst_stream)) - stream_path = os.path.join(confdir, "dwav.streams.txt." + inst_stream) - user_stream_path = os.path.join(case.get_case_root(), - "user_dwav.streams.txt." + inst_stream) - - # Use the user's stream file, or create one if necessary. - if os.path.exists(user_stream_path): - safe_copy(user_stream_path, stream_path) - config['stream'] = stream - nmlgen.update_shr_strdata_nml(config, stream, stream_path) - else: - nmlgen.create_stream_file_and_update_shr_strdata_nml(config, stream, stream_path, data_list_path) - - #---------------------------------------------------- - # Create dwav_nml namelists group - #---------------------------------------------------- - # set per-stream variables - nmlgen.create_shr_strdata_nml() - - # set variables that are not per-stream - if wav_domain_file != "UNSET": - full_domain_path = os.path.join(wav_domain_path, wav_domain_file) - nmlgen.add_default("domainfile", value=full_domain_path) - else: - nmlgen.add_default("domainfile", value="null") - - #---------------------------------------------------- - # Finally, write out all the namelists. - #---------------------------------------------------- - namelist_file = os.path.join(confdir, "dwav_in") - nmlgen.write_output_file(namelist_file, data_list_path, groups=['dwav_nml','shr_strdata_nml']) - -############################################################################### -def buildnml(case, caseroot, compname): -############################################################################### - - # Build the component namelist and required stream txt files - - if compname != "dwav": - raise AttributeError - - rundir = case.get_value("RUNDIR") - ninst = case.get_value("NINST_WAV") - if ninst is None: - ninst = case.get_value("NINST") - - # Determine configuration directory - confdir = os.path.join(caseroot,"Buildconf",compname + "conf") - if not os.path.isdir(confdir): - os.makedirs(confdir) - - #---------------------------------------------------- - # Construct the namelist generator - #---------------------------------------------------- - # determine directory for user modified namelist_definitions.xml - user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) - expect (os.path.isdir(user_xml_dir), - "user_xml_dir {} does not exist ".format(user_xml_dir)) - - # NOTE: User definition *replaces* existing definition. - files = Files() - definition_file = [files.get_value("NAMELIST_DEFINITION_FILE", {"component":"dwav"})] - - user_definition = os.path.join(user_xml_dir, "namelist_definition_dwav.xml") - if os.path.isfile(user_definition): - definition_file = [user_definition] - for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) - - # Create the namelist generator object - independent of instance - nmlgen = NamelistGenerator(case, definition_file) - - #---------------------------------------------------- - # Clear out old data. - #---------------------------------------------------- - data_list_path = os.path.join(case.get_case_root(), "Buildconf", "dwav.input_data_list") - if os.path.exists(data_list_path): - os.remove(data_list_path) - - #---------------------------------------------------- - # Loop over instances - #---------------------------------------------------- - for inst_counter in range(1, ninst+1): - # determine instance string - inst_string = "" - if ninst > 1: - inst_string = '_' + "{:04d}".format(inst_counter) - - # If multi-instance case does not have restart file, use - # single-case restart for each instance - rpointer = "rpointer." + compname - if (os.path.isfile(os.path.join(rundir,rpointer)) and - (not os.path.isfile(os.path.join(rundir,rpointer + inst_string)))): - safe_copy(os.path.join(rundir, rpointer), - os.path.join(rundir, rpointer + inst_string)) - - inst_string_label = inst_string - if not inst_string_label: - inst_string_label = "\"\"" - - # create namelist output infile using user_nl_file as input - user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) - expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file {} ".format(user_nl_file)) - infile = os.path.join(confdir, "namelist_infile") - create_namelist_infile(case, user_nl_file, infile) - namelist_infile = [infile] - - # create namelist and stream file(s) data component - _create_namelists(case, confdir, inst_string, namelist_infile, nmlgen, data_list_path) - - # copy namelist files and stream text files, to rundir - if os.path.isdir(rundir): - filename = compname + "_in" - file_src = os.path.join(confdir, filename) - file_dest = os.path.join(rundir, filename) - if inst_string: - file_dest += inst_string - safe_copy(file_src,file_dest) - - for txtfile in glob.glob(os.path.join(confdir, "*txt*")): - safe_copy(txtfile, rundir) - -############################################################################### -def _main_func(): - # Build the component namelist and required stream txt files - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "dwav") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/data_comps/dwav/cime_config/config_archive.xml b/src/components/data_comps/dwav/cime_config/config_archive.xml deleted file mode 100644 index 2c832d5b6ad..00000000000 --- a/src/components/data_comps/dwav/cime_config/config_archive.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - r - unset - - rpointer.wav$NINST_STRING - $CASE.dwav$NINST_STRING.r.$DATENAME.nc,$CASE.dwav$NINST_STRING.rs1.$DATENAME.bin - - - diff --git a/src/components/data_comps/dwav/cime_config/config_component.xml b/src/components/data_comps/dwav/cime_config/config_component.xml deleted file mode 100644 index 79d4f7a2e80..00000000000 --- a/src/components/data_comps/dwav/cime_config/config_component.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - Data wave model (DWAV) - null mode - climatological mode - - - - char - dwav - dwav - case_comp - env_case.xml - Name of wave component - - - - char - NULL,CLIMO - NULL - - NULL - CLIMO - - run_component_dwav - env_run.xml - DWAV mode. Values are null and copyall. - In null mode, land forcing is set to zero and not used. - default is copyall - - - - ========================================= - DWAV naming conventions - ========================================= - - - diff --git a/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml b/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml deleted file mode 100644 index 1f19a45ce21..00000000000 --- a/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml +++ /dev/null @@ -1,510 +0,0 @@ - - - - - - - - - - - - - - char(100) - streams - streams_file - List of streams used for the given dwav_mode. - - null - climo - - - - - char - streams - streams_file - Stream domain file directory. - - $DIN_LOC_ROOT/wav/dwav - - - - - char - streams - streams_file - Stream domain file path(s). - - waveclim.mon.ww3a.150612.nc - - - - - char - streams - streams_file - Stream domain variable name(s). - - - time time - xc lon - yc lat - area area - mask mask - - - - - - char - streams - streams_file - Stream data file directory. - - $DIN_LOC_ROOT/wav/dwav - - - - - char - streams - streams_file - Stream data file path(s). - - waveclim.mon.ww3a.150612.nc - - - - - char - streams - streams_file - Stream data variable name(s). - - - lamult lamult - ustokes ustokes - vstokes vstokes - - - - - - integer - streams - streams_file - Stream offset. - - 0 - - - - - integer - streams - streams_file - Simulation year to align stream to. - - 1 - - - - - integer - streams - streams_file - First year of stream. - - 1 - - - - - integer - streams - streams_file - Last year of stream. - - 1 - - - - - - - - - - - - - char - streams - shr_strdata_nml - NULL,COPYALL - - The wave data is associated with the wave model - and is normally on a different grid than the ocean data. - datamode = "NULL" - Is always a valid option and means no data will be generated. - Turns off the data model as a provider of data to the coupler. - datamode = "COPYALL" - Copies all fields directly from the input data streams Any required - fields not found on an input stream will be set to zero. - Set by the following xml variables in env_run.xml DWAV_MODE - specifies values for wav mode: copyall,null - - - COPYALL - NULL - - - - - char - streams - abs - shr_strdata_nml - - spatial gridfile associated with the strdata. grid information will - be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - - - null - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are just copy (ie. no fill), special value, - nearest neighbor, nearest neighbor in "i" direction, or nearest - neighbor in "j" direction. - valid values: 'copy','spval','nn','nnoni','nnonj' - - - nn - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - plays no role is fill algorithm at the present time. - valid values: "nomask,srcmask,dstmask,bothmask" - - - nomask - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read in instead of computing the - weights on the fly for the fill operation. if this is set, fillalgo - and fillmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the fill operation. this allows a user to - save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - nomask,srcmask,dstmask,bothmask - - array (up to 30 elements) of masking algorithms for mapping input data - associated with the array of streams. valid options are map only from - valid src points, map only to valid destination points, ignore all - masks, map only from valid src points to valid destination points. - valid values: srcmask, dstmask, nomask,bothmask - - - dstmask - - - - - char(30) - streams - shr_strdata_nml - copy,bilinear,nn,nnoni,nnonj,spval - - array (up to 30 elements) of fill algorithms associated with the array - of streams. valid options are copy by index, set to special value, - nearest neighbor, nearest neighbor in "i" direction, nearest neighbor - in "j" direction, or bilinear. - valid values: copy,spval,nn,nnoni,nnonj,bilinear - - - bilinear - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to read instead of computing - weights on the fly for the mapping (interpolation) operation. if this - is set, mapalgo and mapmask are ignored. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - - array (up to 30 elements) of filenames associated with the array of - streams. specifies the weights file to generate after weights are - computed on the fly for the mapping (interpolation) operation. this - allows a user to save and reuse a set of weights later. - - - NOT_SET - - - - - char(30) - streams - shr_strdata_nml - coszen,nearest,linear,lower,upper - - array (up to 30 elements) of time interpolation options associated with the array of - streams. - valid values: lower,upper,nearest,linear,coszen - lower = Use lower time-value - upper = Use upper time-value - nearest = Use the nearest time-value - linear = Linearly interpolate between the two time-values - coszen = Scale according to the cosine of the solar zenith angle (for solar) - - - linear - - - - - char(30) - streams - shr_strdata_nml - extend,cycle,limit - - array of time axis modes associated with the array of streams for - handling data outside the specified stream time axis. - valid options are to cycle the data based on the first, last, and - align settings associated with the stream dataset, to extend the first - and last valid value indefinitely, or to limit the interpolated data - to fall only between the least and greatest valid value of the time array. - valid values: cycle,extend,limit - extend = extrapolate before and after the period by using the first or last value. - cycle = cycle between the range of data - limit = restrict to the period for which the data is valid - - - cycle - - - - - char(30) - streams - shr_strdata_nml - single,full_file - - array (up to 30 elements) of reading mode associated with the array of - streams. specifies the mode of reading temporal stream dataset. - valid options are "single" (read temporal dataset one at a time) or - "full_file" (read all entires of temporal dataset in a given netcdf file) - valid values: single,full_file - - - single - - - - - real(30) - streams - shr_strdata_nml - - array (up to 30 elements) of delta time ratio limits placed on the - time interpolation associated with the array of streams. this real - value causes the model to stop if the ratio of the running maximum - delta time divided by the minimum delta time is greater than the - dtlimit for that stream. for instance, with daily data, the delta - time should be exactly one day throughout the dataset and the computed - maximum divided by minimum delta time should always be 1.0. for - monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. the running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - - - 1.5e0 - - - - - char - streams - shr_strdata_nml - - list of paired colon delimited field names that should be treated as - vectors when carrying out spatial interpolation. unlike other - character arrays in this namelist, this array is completely decoupled - from the list of streams. this is a list of vector pairs that span - all input streams where different fields of the vector pair could - appear in different streams. - for example, vectors = 'u:v','taux:tauy'. - - - null - - - - - char(30) - streams - shr_strdata_nml - - character array (up to 30 elements) of stream input files. this - string is actually parsed by a stream method and so the format is - specified by the stream module. this string consists of a - "stream_input_filename year_align year_first year_last". the - stream_input_filename is a stream text input file and the format and - options are described elsewhere. year_align, year_first, and - year_last provide information about the time axis of the file and how - to relate the input time axis to the model time axis. - - - - - - - - - - - - char - dwav - dwav_nml - 1d,root - - DWAV Decomposition strategy - 1d = Vector decomposition - root = run only on the master task - - - 1d - - - - - char - dwav - dwav_nml - - Master restart file name for dwav model - - - undefined - - - - - char - dwav - dwav_nml - - Stream restart file name for dwav model, needed for branch simulations - - - undefined - - - - - logical - dwav - dwav_nml - If TRUE, prognostic is forced to true. - - .false. - - - - diff --git a/src/components/data_comps/dwav/cime_config/user_nl_dwav b/src/components/data_comps/dwav/cime_config/user_nl_dwav deleted file mode 100644 index 51ba47f53de..00000000000 --- a/src/components/data_comps/dwav/cime_config/user_nl_dwav +++ /dev/null @@ -1,15 +0,0 @@ -!------------------------------------------------------------------------ -! Users should ONLY USE user_nl_dwav to change namelists variables -! Users should add all user specific namelist changes below in the form of -! namelist_var = new_namelist_value -! Note that any namelist variable from shr_strdata_nml and dwav_nml can -! be modified below using the above syntax -! User preview_namelists to view (not modify) the output namelist in the -! directory $CASEROOT/CaseDocs -! To modify the contents of a stream txt file, first use preview_namelists -! to obtain the contents of the stream txt files in CaseDocs, and then -! place a copy of the modified stream txt file in $CASEROOT with the string -! user_ prepended. -! As an example, to modify dwav.streams.txt.prescribed, place the modified -! version in $CASEROOT with the name user_dwav.streams.txt.prescribed -!------------------------------------------------------------------------ diff --git a/src/components/data_comps/dwav/mct/dwav_comp_mod.F90 b/src/components/data_comps/dwav/mct/dwav_comp_mod.F90 deleted file mode 100644 index 4cf1fe68461..00000000000 --- a/src/components/data_comps/dwav/mct/dwav_comp_mod.F90 +++ /dev/null @@ -1,419 +0,0 @@ -#ifdef AIX -@PROCESS ALIAS_SIZE(805306368) -#endif -module dwav_comp_mod - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use shr_pcdf_mod - use shr_sys_mod - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only: shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only: shr_mpi_bcast - use shr_strdata_mod , only: shr_strdata_type, shr_strdata_pioinit, shr_strdata_init - use shr_strdata_mod , only: shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only: shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only: shr_dmodel_gsmapcreate, shr_dmodel_rearrGGrid - use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV - use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - - use dwav_shr_mod , only: datamode ! namelist input - use dwav_shr_mod , only: decomp ! namelist input - use dwav_shr_mod , only: rest_file ! namelist input - use dwav_shr_mod , only: rest_file_strm ! namelist input - use dwav_shr_mod , only: nullstr - - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dwav_comp_init - public :: dwav_comp_run - public :: dwav_comp_final - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(CS) :: myModelName = 'wav' ! user defined model name - character(len=*),parameter :: rpfile = 'rpointer.wav' - type(mct_rearr) :: rearr - - !-------------------------------------------------------------------------- - integer(IN),parameter :: ktrans = 3 - character(12),parameter :: avofld(1:ktrans) = (/"Sw_lamult ","Sw_ustokes ","Sw_vstokes "/) - character(12),parameter :: avifld(1:ktrans) = (/"lamult ","ustokes ","vstokes "/) - !-------------------------------------------------------------------------- - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine dwav_comp_init(Eclock, x2w, w2x, & - SDWAV, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - seq_flds_w2x_fields, seq_flds_x2w_fields) - - ! !DESCRIPTION: initialize dwav model - use pio , only : iosystem_desc_t - use shr_pio_mod, only : shr_pio_getiosys, shr_pio_getiotype - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2w, w2x ! input/output attribute vectors - type(shr_strdata_type) , intent(inout) :: SDWAV ! model - type(mct_gsMap) , pointer :: gsMap ! model global seg map (output) - type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie. "wav_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - character(len=*) , intent(in) :: seq_flds_x2w_fields ! fields to mediator - character(len=*) , intent(in) :: seq_flds_w2x_fields ! fields from mediator - - !--- local variables --- - integer(IN) :: n,k ! generic counters - integer(IN) :: ierr ! error code - integer(IN) :: lsize ! local size - logical :: exists ! file existance - integer(IN) :: nu ! unit number - character(CL) :: calendar ! model calendar - type(iosystem_desc_t), pointer :: wav_pio_subsystem - - !--- formats --- - character(*), parameter :: F00 = "('(dwav_comp_init) ',8a)" - character(*), parameter :: F0L = "('(dwav_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(dwav_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(dwav_comp_init) ',a,4es13.6)" - character(*), parameter :: F03 = "('(dwav_comp_init) ',a,i8,a)" - character(*), parameter :: F04 = "('(dwav_comp_init) ',2a,2i8,'s')" - character(*), parameter :: F05 = "('(dwav_comp_init) ',a,2f10.4)" - character(*), parameter :: F90 = "('(dwav_comp_init) ',73('='))" - character(*), parameter :: F91 = "('(dwav_comp_init) ',73('-'))" - character(*), parameter :: subName = "(dwav_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DWAV_INIT') - - !---------------------------------------------------------------------------- - ! Initialize pio - !---------------------------------------------------------------------------- - - wav_pio_subsystem => shr_pio_getiosys(trim(inst_name)) - call shr_strdata_pioinit(SDWAV, wav_pio_subsystem, shr_pio_getiotype(trim(inst_name))) - - !---------------------------------------------------------------------------- - ! Initialize SDWAV - !---------------------------------------------------------------------------- - - call t_startf('dwav_strdata_init') - - call seq_timemgr_EClockGetData( EClock, calendar=calendar ) - - ! NOTE: shr_strdata_init calls shr_dmodel_readgrid which reads the data model - ! grid and from that computes SDWAV%gsmap and SDWAV%ggrid. DWAV%gsmap is created - ! using the decomp '2d1d' (1d decomp of 2d grid) - - call shr_strdata_init(SDWAV,mpicom,compid,name='wav', calendar=calendar) - - if (my_task == master_task) then - call shr_strdata_print(SDWAV,'SDWAV data') - endif - - call t_stopf('dwav_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize MCT global seg map, 1d decomp - !---------------------------------------------------------------------------- - - call t_startf('dwav_initgsmaps') - - if (my_task == master_task) write(logunit,F00) ' initialize gsmaps' - call shr_sys_flush(logunit) - - ! create a data model global seqmap (gsmap) given the data model global grid sizes - ! NOTE: gsmap is initialized using the decomp read in from the docn_in namelist - ! (which by default is "1d") - call shr_dmodel_gsmapcreate(gsmap,SDWAV%nxg*SDWAV%nyg,compid,mpicom,decomp) - lsize = mct_gsmap_lsize(gsmap,mpicom) - - ! create a rearranger from the data model SDOCN%gsmap to gsmap - call mct_rearr_init(SDWAV%gsmap,gsmap,mpicom,rearr) - - write(logunit,*)'lsize= ',lsize - call shr_sys_flush(logunit) - - call t_stopf('dwav_initgsmaps') - - !---------------------------------------------------------------------------- - ! Initialize MCT domain - !---------------------------------------------------------------------------- - - call t_startf('dwav_initmctdom') - !write(logunit,F00)' dwav_initmctdom...' - - if (my_task == master_task) write(logunit,F00) 'copy domains' - call shr_sys_flush(logunit) - - call shr_dmodel_rearrGGrid(SDWAV%grid, ggrid, gsmap, rearr, mpicom) - - call t_stopf('dwav_initmctdom') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - call t_startf('dwav_initmctavs') - !write(logunit,F00)' dwav_initmctavs...' - - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - call shr_sys_flush(logunit) - - call mct_avect_init(w2x, rlist=seq_flds_w2x_fields, lsize=lsize) - call mct_avect_zero(w2x) - - call mct_avect_init(x2w, rlist=seq_flds_x2w_fields, lsize=lsize) - call mct_avect_zero(x2w) - - call t_stopf('dwav_initmctavs') - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - if (trim(rest_file) == trim(nullstr) .and. trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer' - call shr_sys_flush(logunit) - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (.not.exists) then - write(logunit,F00) ' ERROR: rpointer file does not exist' - call shr_sys_abort(trim(subname)//' ERROR: rpointer file missing') - endif - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - call shr_sys_flush(logunit) - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - call shr_mpi_bcast(exists,mpicom,'exists') - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDWAV,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - call shr_sys_flush(logunit) - endif - - !---------------------------------------------------------------------------- - ! Set initial wav state, needed for CCSM atm initialization - !---------------------------------------------------------------------------- - - call t_adj_detailf(+2) - call dwav_comp_run(EClock, x2w, w2x, & - SDWAV, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit) - call t_adj_detailf(-2) - - if (my_task == master_task) write(logunit, F00) 'dwav_comp_init done' - call shr_sys_flush(logunit) - - call t_stopf('DWAV_INIT') - - end subroutine dwav_comp_init - - !=============================================================================== - subroutine dwav_comp_run(EClock, x2w, w2x, & - SDWAV, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, case_name) - - use shr_cal_mod, only : shr_cal_ymdtod2string - ! !DESCRIPTION: run method for dwav model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(in) :: EClock - type(mct_aVect) , intent(inout) :: x2w - type(mct_aVect) , intent(inout) :: w2x - type(shr_strdata_type) , intent(inout) :: SDWAV - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer(IN) , intent(in) :: logunit ! logging unit number - character(CL) , intent(in), optional :: case_name ! case name - - !--- local --- - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: yy,mm,dd ! year month day - integer(IN) :: n ! indices - integer(IN) :: idt ! integer timestep - real(R8) :: dt ! timestep - integer(IN) :: nu ! unit number - logical :: write_restart ! restart now - character(len=18) :: date_str - - character(*), parameter :: F00 = "('(dwav_comp_run) ',8a)" - character(*), parameter :: F04 = "('(dwav_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(dwav_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DWAV_RUN') - - call t_startf('dwav_run1') - call seq_timemgr_EClockGetData( EClock, curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - call seq_timemgr_EClockGetData( EClock, curr_yr=yy, curr_mon=mm, curr_day=dd) - call seq_timemgr_EClockGetData( EClock, dtime=idt) - dt = idt * 1.0_r8 - write_restart = seq_timemgr_RestartAlarmIsOn(EClock) - call t_stopf('dwav_run1') - - !-------------------- - ! UNPACK - !-------------------- - - call t_startf('dwav_unpack') - ! Nothing to be done for now - call t_stopf('dwav_unpack') - - !-------------------- - ! ADVANCE WAV - !-------------------- - - call t_barrierf('dwav_BARRIER',mpicom) - call t_startf('dwav') - - call t_startf('dwav_strdata_advance') - call shr_strdata_advance(SDWAV,currentYMD,currentTOD,mpicom,'dwav') - call t_stopf('dwav_strdata_advance') - - !--- copy all fields from streams to w2x as default --- - call t_barrierf('dwav_scatter_BARRIER',mpicom) - call t_startf('dwav_scatter') - do n = 1,SDWAV%nstreams - call shr_dmodel_translateAV(SDWAV%avs(n),w2x,avifld,avofld,rearr) - enddo - call t_stopf('dwav_scatter') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - - end select - - call t_stopf('datamode') - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('dwav_restart') - call shr_cal_ymdtod2string(date_str, yy,mm,dd,currentTOD) - write(rest_file,"(6a)") & - trim(case_name), '.dwav',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.dwav',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file_strm),currentYMD,currentTOD - call shr_strdata_restWrite(trim(rest_file_strm),SDWAV,mpicom,trim(case_name),'SDWAV strdata') - call shr_sys_flush(logunit) - call t_stopf('dwav_restart') - endif - - call t_stopf('dwav') - - !---------------------------------------------------------------------------- - ! Log output for model date - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call t_startf('dwav_run2') - if (my_task == master_task) then - write(logunit,F04) trim(myModelName),': model date ', CurrentYMD,CurrentTOD - call shr_sys_flush(logunit) - end if - call t_stopf('dwav_run2') - - call t_stopf('DWAV_RUN') - - end subroutine dwav_comp_run - - !=============================================================================== - subroutine dwav_comp_final(my_task, master_task, logunit) - - ! !DESCRIPTION: finalize method for dwav model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - - !--- formats --- - character(*), parameter :: F00 = "('(dwav_comp_final) ',8a)" - character(*), parameter :: F91 = "('(dwav_comp_final) ',73('-'))" - character(*), parameter :: subName = "(dwav_comp_final) " - !------------------------------------------------------------------------------- - - call t_startf('DWAV_FINAL') - - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) trim(myModelName),': end of main integration loop' - write(logunit,F91) - end if - - call t_stopf('DWAV_FINAL') - - end subroutine dwav_comp_final - !=============================================================================== - -end module dwav_comp_mod diff --git a/src/components/data_comps/dwav/mct/dwav_shr_mod.F90 b/src/components/data_comps/dwav/mct/dwav_shr_mod.F90 deleted file mode 100644 index 6f650f3e761..00000000000 --- a/src/components/data_comps/dwav/mct/dwav_shr_mod.F90 +++ /dev/null @@ -1,155 +0,0 @@ -module dwav_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dwav_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! input namelist variables - character(CL) , public :: decomp ! decomp strategy - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - logical , public :: force_prognostic_true ! if true set prognostic true - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine dwav_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDWAV, wav_present, wav_prognostic) - - ! !DESCRIPTION: Read in dwav namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer , intent(in) :: inst_index ! number of current instance (ie. 1) - character(len=16) , intent(in) :: inst_suffix ! char string associated with instance - character(len=16) , intent(in) :: inst_name ! fullname of current instance (ie. "wav_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - integer(IN) , intent(in) :: shrlogunit ! original log unit and level - type(shr_strdata_type) , intent(inout) :: SDWAV - logical , intent(out) :: wav_present ! flag - logical , intent(out) :: wav_prognostic ! flag - - !--- local variables --- - character(CL) :: fileName ! generic file name - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - - !--- formats --- - character(*), parameter :: F00 = "('(dwav_comp_init) ',8a)" - character(*), parameter :: F0L = "('(dwav_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(dwav_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(dwav_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(dwav_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_dwav_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / dwav_nml / & - decomp, restfilm, restfils, force_prognostic_true - - !---------------------------------------------------------------------------- - ! Determine input filenamname - !---------------------------------------------------------------------------- - - filename = "dwav_in"//trim(inst_suffix) - - !---------------------------------------------------------------------------- - ! Read dwav_in - !---------------------------------------------------------------------------- - - filename = "dwav_in"//trim(inst_suffix) - decomp = "1d" - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=dwav_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' decomp = ',trim(decomp) - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - endif - call shr_mpi_bcast(decomp ,mpicom,'decomp') - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDWAV,trim(filename),mpicom=mpicom) - - !---------------------------------------------------------------------------- - ! Determine and validate datamode - !---------------------------------------------------------------------------- - - datamode = trim(SDWAV%dataMode) - - if (trim(datamode) == 'NULL' .or. & - trim(datamode) == 'COPYALL') then - if (my_task == master_task) then - write(logunit,F00) 'dwav datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal dwav datamode = ',trim(datamode) - call shr_sys_abort() - end if - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flags - !---------------------------------------------------------------------------- - - wav_present = .false. - wav_prognostic = .false. - if (force_prognostic_true) then - wav_present = .true. - wav_prognostic = .true. - endif - if (trim(datamode) /= 'NULL') then - wav_present = .true. - end if - - end subroutine dwav_shr_read_namelists - -end module dwav_shr_mod diff --git a/src/components/data_comps/dwav/mct/wav_comp_mct.F90 b/src/components/data_comps/dwav/mct/wav_comp_mct.F90 deleted file mode 100644 index f16b7b3bbac..00000000000 --- a/src/components/data_comps/dwav/mct/wav_comp_mct.F90 +++ /dev/null @@ -1,257 +0,0 @@ -module wav_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dwav_comp_mod , only: dwav_comp_init, dwav_comp_run, dwav_comp_final - use dwav_shr_mod , only: dwav_shr_read_namelists - use seq_flds_mod , only: seq_flds_w2x_fields, seq_flds_x2w_fields - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: wav_init_mct - public :: wav_run_mct - public :: wav_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - type(shr_strdata_type) :: SDWAV - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "wav_0001") - character(len=16) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - - character(*), parameter :: F00 = "('(dwav_comp_init) ',8a)" - integer(IN) , parameter :: master_task=0 ! task number of master task - character(*), parameter :: subName = "(wav_init_mct) " - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine wav_init_mct( EClock, cdata, x2w, w2x, NLFilename ) - - ! !DESCRIPTION: initialize dwav model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2w, w2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer :: phase ! phase of method - logical :: wav_present ! flag - logical :: wav_prognostic ! flag - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - logical :: read_restart ! start from restart - integer(IN) :: ierr ! error code - logical :: scmMode = .false. ! single column mode - real(R8) :: scmLat = shr_const_SPVAL ! single column lat - real(R8) :: scmLon = shr_const_SPVAL ! single column lon - logical :: post_assim = .false. ! Run is post-DA - character(*), parameter :: subName = "(wav_init_mct) " - !------------------------------------------------------------------------------- - - ! Set cdata pointers - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata, & - post_assimilation=post_assim) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, & - single_column=scmMode, & - scmlat=scmlat, scmlon=scmLon, & - read_restart=read_restart) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('wav_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Read dwav namelists and set prognostic, present flags in infodata - !---------------------------------------------------------------------------- - - call t_startf('dwav_readnml') - - call dwav_shr_read_namelists(mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, & - logunit, shrlogunit, SDWAV, wav_present, wav_prognostic) - - call seq_infodata_PutData(infodata, & - wav_present=wav_present, & - wav_prognostic=wav_prognostic) - - call t_stopf('dwav_readnml') - - !---------------------------------------------------------------------------- - ! RETURN if present flag is false - !---------------------------------------------------------------------------- - - if (.not. wav_present) then - RETURN - end if - - ! NOTE: the following will never be called if wav_present is .false. - - ! Diagnostic print statement to test DATA_ASSIMILATION_WAV XML variable - ! usage (and therefore a proxy for other component types). - if (my_task == master_task) then - if (post_assim) then - write(logunit, *) subName//': Post data assimilation signal' - else if (read_restart) then - write(logunit, *) subName//': Restart run' - else - write(logunit, *) subName//': Initial run' - end if - call shr_sys_flush(logunit) - end if - - !---------------------------------------------------------------------------- - ! Initialize dwav - !---------------------------------------------------------------------------- - - call dwav_comp_init(Eclock, x2w, w2x, & - SDWAV, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, inst_name, logunit, read_restart, & - seq_flds_w2x_fields, seq_flds_x2w_fields) - - !---------------------------------------------------------------------------- - ! Fill infodata that needs to be returned from dwav - !---------------------------------------------------------------------------- - - call seq_infodata_PutData(infodata, & - wav_nx=SDWAV%nxg, & - wav_ny=SDWAV%nyg ) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - if (my_task == master_task) write(logunit,F00) 'dwav_comp_init done' - call shr_sys_flush(logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine wav_init_mct - - !=============================================================================== - subroutine wav_run_mct( EClock, cdata, x2w, w2x) - - ! !DESCRIPTION: run method for dwav model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2w - type(mct_aVect) ,intent(inout) :: w2x - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - logical :: read_restart ! start from restart - character(CL) :: case_name ! case name - character(*), parameter :: subName = "(wav_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call seq_infodata_GetData(infodata, case_name=case_name) - - call dwav_comp_run(EClock, x2w, w2x, & - SDWAV, gsmap, ggrid, mpicom, compid, my_task, master_task, & - inst_suffix, logunit, case_name) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine wav_run_mct - - !=============================================================================== - subroutine wav_final_mct(EClock, cdata, x2w, w2x) - - ! !DESCRIPTION: finalize method for dwav model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2w - type(mct_aVect) ,intent(inout) :: w2x - - !--- formats --- - character(*), parameter :: subName = "(wav_final_mct) " - !------------------------------------------------------------------------------- - - call dwav_comp_final(my_task, master_task, logunit) - - end subroutine wav_final_mct - !=============================================================================== - -end module wav_comp_mct diff --git a/src/components/data_comps/dwav/nuopc/dwav_comp_mod.F90 b/src/components/data_comps/dwav/nuopc/dwav_comp_mod.F90 deleted file mode 100644 index 286d83f36c4..00000000000 --- a/src/components/data_comps/dwav/nuopc/dwav_comp_mod.F90 +++ /dev/null @@ -1,508 +0,0 @@ -module dwav_comp_mod - - use NUOPC , only : NUOPC_Advertise - use ESMF , only : ESMF_State, ESMF_SUCCESS, ESMF_STATE - use ESMF , only : ESMF_Mesh, ESMF_DistGrid, ESMF_MeshGet, ESMF_DistGridGet - use perf_mod , only : t_startf, t_stopf, t_adj_detailf, t_barrierf - use mct_mod , only : mct_gsmap_init - use mct_mod , only : mct_avect, mct_avect_indexRA, mct_avect_zero, mct_aVect_nRattr - use mct_mod , only : mct_avect_init, mct_avect_lsize - use shr_sys_mod , only : shr_sys_abort - use shr_kind_mod , only : r8=>shr_kind_r8, cxx=>shr_kind_cxx, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_sys_mod , only : shr_sys_abort - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only : shr_mpi_bcast - use shr_strdata_mod , only : shr_strdata_init_model_domain - use shr_strdata_mod , only : shr_strdata_init_streams - use shr_strdata_mod , only : shr_strdata_init_mapping - use shr_strdata_mod , only : shr_strdata_type, shr_strdata_pioinit - use shr_strdata_mod , only : shr_strdata_print, shr_strdata_restRead - use shr_strdata_mod , only : shr_strdata_advance, shr_strdata_restWrite - use shr_dmodel_mod , only : shr_dmodel_translateAV - use shr_cal_mod , only : shr_cal_calendarname, shr_cal_datetod2string - use dshr_methods_mod , only : ChkErr - use dshr_nuopc_mod , only : fld_list_type, dshr_fld_add, dshr_export - use dwav_shr_mod , only : datamode ! namelist input - use dwav_shr_mod , only : rest_file ! namelist input - use dwav_shr_mod , only : rest_file_strm ! namelist input - use dwav_shr_mod , only : nullstr - use dwav_shr_mod , only : SDWAV - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dwav_comp_advertise - public :: dwav_comp_init - public :: dwav_comp_run - public :: dwav_comp_export - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - type(mct_aVect) :: x2w - type(mct_aVect) :: w2x - character(len=CS), pointer :: avifld(:) ! character array for field names coming from streams - character(len=CS), pointer :: avofld(:) ! character array for field names to be sent/received from mediator - character(CXX) :: flds_w2x = '' - character(CXX) :: flds_x2w = '' - character(len=*), parameter :: rpfile = 'rpointer.wav' - character(*) , parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine dwav_comp_advertise(importState, exportState, flds_scalar_name, & - wav_present, wav_prognostic, & - fldsFrWav_num, fldsFrWav, fldsToWav_num, fldsToWav, rc) - - ! 1. determine export and import fields to advertise to mediator - ! 2. determine translation of fields from streams to export/import fields - - ! input/output arguments - type(ESMF_State) :: importState - type(ESMF_State) :: exportState - character(len=*) , intent(in) :: flds_scalar_name - logical , intent(in) :: wav_present - logical , intent(in) :: wav_prognostic - integer , intent(out) :: fldsFrWav_num - type (fld_list_type) , intent(out) :: fldsFrWav(:) - integer , intent(out) :: fldsToWav_num - type (fld_list_type) , intent(out) :: fldsToWav(:) - integer , intent(out) :: rc - - ! local variables - integer :: n - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - if (.not. wav_present) return - - !------------------- - ! export fields - !------------------- - - ! scalar fields that need to be advertised - - fldsFrWav_num=1 - fldsFrWav(1)%stdname = trim(flds_scalar_name) - - ! export fields that have a corresponding stream field - - call dshr_fld_add(data_fld="lamult", data_fld_array=avifld, model_fld="Sw_lamult", model_fld_array=avofld, & - model_fld_concat=flds_w2x, fldlist_num=fldsFrWav_num, fldlist=fldsFrWav) - - call dshr_fld_add(data_fld="ustokes", data_fld_array=avifld, model_fld="Sw_ustokes", model_fld_array=avofld, & - model_fld_concat=flds_w2x, fldlist_num=fldsFrWav_num, fldlist=fldsFrWav) - - call dshr_fld_add(data_fld="vstokes", data_fld_array=avifld, model_fld="Sw_vstokes", model_fld_array=avofld, & - model_fld_concat=flds_w2x, fldlist_num=fldsFrWav_num, fldlist=fldsFrWav) - - !------------------- - ! advertise export state - !------------------- - - do n = 1,fldsFrWav_num - call NUOPC_Advertise(exportState, standardName=fldsFrWav(n)%stdname, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - enddo - - end subroutine dwav_comp_advertise - - !=============================================================================== - - subroutine dwav_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, & - target_ymd, target_tod, calendar, mesh, nxg, nyg) - - ! !DESCRIPTION: initialize dwav model - - ! !INPUT/OUTPUT PARAMETERS: - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: compid ! mct comp id - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - integer , intent(in) :: target_ymd ! model date - integer , intent(in) :: target_tod ! model sec into model date - character(len=*) , intent(in) :: calendar ! calendar type - type(ESMF_Mesh) , intent(in) :: mesh ! ESMF docn mesh - integer , intent(out) :: nxg, nyg - - !--- local variables --- - integer :: n,k ! generic counters - integer :: lsize ! local size - logical :: exists ! file existance - integer :: nu ! unit number - logical :: write_restart - type(ESMF_DistGrid) :: distGrid - integer, allocatable, target :: gindex(:) - integer :: dimCount - integer :: tileCount - integer :: deCount - integer :: gsize - integer, allocatable :: elementCountPTile(:) - integer, allocatable :: indexCountPDE(:,:) - integer :: spatialDim - integer :: numOwnedElements - real(R8), pointer :: ownedElemCoords(:) - integer :: klat, klon, kfrac ! AV indices - real(R8), pointer :: domlon(:),domlat(:) ! ggrid domain lats and lots - real(R8), pointer :: xc(:), yc(:) ! mesh lats and lons - real(r8) :: maxerr, err - integer :: maxn - real(r8) :: tolerance = 1.e-4 - integer :: rc - character(*), parameter :: F00 = "('(dwav_comp_init) ',8a)" - character(*), parameter :: subName = "(dwav_comp_init) " - !------------------------------------------------------------------------------- - - call t_startf('DWAV_INIT') - - !---------------------------------------------------------------------------- - ! Initialize pio - !---------------------------------------------------------------------------- - - call shr_strdata_pioinit(SDWAV, compid) - - !---------------------------------------------------------------------------- - ! Create a data model global segmap - !---------------------------------------------------------------------------- - - call t_startf('dwav_strdata_init') - - if (my_task == master_task) write(logunit,F00) ' initialize SDWAV gsmap' - - ! obtain the distgrid from the mesh that was read in - call ESMF_MeshGet(Mesh, elementdistGrid=distGrid, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determin local size on my processor - call ESMF_distGridGet(distGrid, localDe=0, elementCount=lsize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global index space for my processor - allocate(gindex(lsize)) - call ESMF_distGridGet(distGrid, localDe=0, seqIndexList=gindex, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! determine global size of distgrid - call ESMF_distGridGet(distGrid, dimCount=dimCount, deCount=deCount, tileCount=tileCount, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - allocate(elementCountPTile(tileCount)) - call ESMF_distGridGet(distGrid, elementCountPTile=elementCountPTile, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - gsize = 0 - do n = 1,size(elementCountPTile) - gsize = gsize + elementCountPTile(n) - end do - deallocate(elementCountPTile) - - ! create the data model gsmap given the local size, global size and gindex - call mct_gsMap_init( SDWAV%gsmap, gindex, mpicom, compid, lsize, gsize) - deallocate(gindex) - - !---------------------------------------------------------------------------- - ! Initialize SDWAV - !---------------------------------------------------------------------------- - - ! The call to shr_strdata_init_model_domain creates the SDWAV%gsmap which - ! is a '2d1d' decommp (1d decomp of 2d grid) and also create SDWAV%grid - - SDWAV%calendar = trim(shr_cal_calendarName(trim(calendar))) - - call shr_strdata_init_model_domain(SDWAV, mpicom, compid, my_task, gsmap=SDWAV%gsmap) - - if (my_task == master_task) then - call shr_strdata_print(SDWAV,'SDWAV data') - endif - - ! obtain mesh lats and lons - call ESMF_MeshGet(mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - allocate(ownedElemCoords(spatialDim*numOwnedElements)) - allocate(xc(numOwnedElements), yc(numOwnedElements)) - call ESMF_MeshGet(mesh, ownedElemCoords=ownedElemCoords) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (numOwnedElements /= lsize) then - call shr_sys_abort('ERROR: numOwnedElements is not equal to lsize') - end if - do n = 1,lsize - xc(n) = ownedElemCoords(2*n-1) - yc(n) = ownedElemCoords(2*n) - end do - - ! error check that mesh lats and lons correspond to those on the input domain file - allocate(domlon(lsize), domlat(lsize)) - - maxerr = 0._r8 - maxn = 0 - klon = mct_aVect_indexRA(SDWAV%grid%data,'lon') - do n = 1, lsize - domlon(n) = SDWAV%grid%data%rattr(klon,n) - err = abs(domlon(n) - xc(n)) - if (err > maxerr) then - maxerr = err - maxn = n - end if - !SDWAV%grid%data%rattr(klon,n) = xc(n) - end do - if (maxerr > 0._r8) then - write(6,100) maxn, domlon(maxn), xc(maxn), maxerr -100 format('WARNING: DWAV n, dom_lon, mesh_lon, max_diff_lon = ',i6,2(f21.13,3x),d21.5) - end if - if (maxerr > tolerance) then - write(6,*)'ERROR: diff of dom_lon and mesh_lon is greater than tolerance of ',tolerance - call shr_sys_abort() - end if - - maxerr = 0._r8 - maxn = 0 - klat = mct_aVect_indexRA(SDWAV%grid%data,'lat') - do n = 1, lsize - domlat(n) = SDWAV%grid%data%rattr(klat,n) - err = abs(domlat(n) - yc(n)) - if ( err > maxerr ) then - maxerr = err - maxn = n - end if - !SDWAV%grid%data%rattr(klat,n) = yc(n) - end do - if (maxerr > 0._r8) then - write(6,101) maxn, domlat(maxn), yc(maxn), maxerr -101 format('WARNING: DWAV n, dom_lat, mesh_lat, max_diff_lat = ',i6,2(f21.13,3x),d21.5) - end if - if (maxerr > tolerance) then - write(6,*)'ERROR: diff of dom_lat and mesh_lat is greater than tolerance of ',tolerance - call shr_sys_abort() - end if - - deallocate(domlon, domlat) - - !---------------------------------------------------------------------------- - ! Initialize SDWAV attributes for streams and mapping of streams to model domain - !---------------------------------------------------------------------------- - - call shr_strdata_init_streams(SDWAV, compid, mpicom, my_task) - call shr_strdata_init_mapping(SDWAV, compid, mpicom, my_task) - - call t_stopf('dwav_strdata_init') - - !---------------------------------------------------------------------------- - ! Initialize MCT attribute vectors - !---------------------------------------------------------------------------- - - if (my_task == master_task) write(logunit,F00) 'allocate AVs' - - call mct_avect_init(w2x, rlist=flds_w2x, lsize=lsize) - call mct_avect_zero(w2x) - - ! no import state for now - ! call mct_avect_init(x2w, rlist=flds_x2w, lsize=lsize) - ! call mct_avect_zero(x2w) - - nxg = SDWAV%nxg - nyg = SDWAV%nyg - - !---------------------------------------------------------------------------- - ! Read restart - !---------------------------------------------------------------------------- - - if (read_restart) then - if (trim(rest_file) == trim(nullstr) .and. trim(rest_file_strm) == trim(nullstr)) then - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from rpointer' - inquire(file=trim(rpfile)//trim(inst_suffix),exist=exists) - if (.not.exists) then - write(logunit,F00) ' ERROR: rpointer file does not exist' - call shr_sys_abort(trim(subname)//' ERROR: rpointer file missing') - endif - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - read(nu,'(a)') rest_file - read(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - inquire(file=trim(rest_file_strm),exist=exists) - endif - call shr_mpi_bcast(rest_file,mpicom,'rest_file') - call shr_mpi_bcast(rest_file_strm,mpicom,'rest_file_strm') - else - ! use namelist already read - if (my_task == master_task) then - write(logunit,F00) ' restart filenames from namelist ' - inquire(file=trim(rest_file_strm),exist=exists) - endif - endif - call shr_mpi_bcast(exists,mpicom,'exists') - if (exists) then - if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file_strm) - call shr_strdata_restRead(trim(rest_file_strm),SDWAV,mpicom) - else - if (my_task == master_task) write(logunit,F00) ' file not found, skipping ',trim(rest_file_strm) - endif - endif - - !---------------------------------------------------------------------------- - ! Set initial wav state - !---------------------------------------------------------------------------- - - write_restart = .false. - call dwav_comp_run(mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - target_ymd, target_tod) - - if (my_task == master_task) then - write(logunit,F00) 'dwav_comp_init done' - end if - - call t_stopf('DWAV_INIT') - - end subroutine dwav_comp_init - - !=============================================================================== - - subroutine dwav_comp_run(mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - target_ymd, target_tod, case_name) - - ! ---------------------------- - ! run method for dwav model - ! ---------------------------- - - ! input/output parameters: - integer , intent(in) :: mpicom ! mpi communicator - integer , intent(in) :: my_task ! my task in mpi communicator mpicom - integer , intent(in) :: master_task ! task number of master task - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - logical , intent(in) :: read_restart ! start from restart - logical , intent(in) :: write_restart ! write restart - integer , intent(in) :: target_ymd - integer , intent(in) :: target_tod - character(CL) , intent(in), optional :: case_name ! case name - - ! local variables - integer :: n ! indices - integer :: idt ! integer timestep - integer :: nu ! unit number - character(len=18) :: date_str - character(*), parameter :: F00 = "('(dwav_comp_run) ',8a)" - character(*), parameter :: F04 = "('(dwav_comp_run) ',2a,2i8,'s')" - character(*), parameter :: subName = "(dwav_comp_run) " - !------------------------------------------------------------------------------- - - call t_startf('DWAV_RUN') - - !-------------------- - ! UNPACK - !-------------------- - - call t_startf('dwav_unpack') - ! Nothing to be done for now - call t_stopf('dwav_unpack') - - !-------------------- - ! ADVANCE WAV - !-------------------- - - call t_barrierf('dwav_BARRIER',mpicom) - call t_startf('dwav') - - call t_startf('dwav_strdata_advance') - call shr_strdata_advance(SDWAV,target_ymd,target_tod,mpicom,'dwav') - call t_stopf('dwav_strdata_advance') - - !--- copy all fields from streams to w2x as default --- - call t_barrierf('dwav_scatter_BARRIER',mpicom) - call t_startf('dwav_scatter') - do n = 1,SDWAV%nstreams - call shr_dmodel_translateAV(SDWAV%avs(n), w2x, avifld, avofld) - enddo - call t_stopf('dwav_scatter') - - !------------------------------------------------- - ! Determine data model behavior based on the mode - !------------------------------------------------- - - call t_startf('datamode') - select case (trim(datamode)) - - case('COPYALL') - ! do nothing extra - end select - - call t_stopf('datamode') - - !-------------------- - ! Write restart - !-------------------- - - if (write_restart) then - call t_startf('dwav_restart') - call shr_cal_datetod2string(date_str, target_ymd, target_tod) - write(rest_file, "(6a)") & - trim(case_name), '.dwav',trim(inst_suffix),'.r.', & - trim(date_str),'.nc' - write(rest_file_strm,"(6a)") & - trim(case_name), '.dwav',trim(inst_suffix),'.rs1.', & - trim(date_str),'.bin' - if (my_task == master_task) then - nu = shr_file_getUnit() - open(nu,file=trim(rpfile)//trim(inst_suffix),form='formatted') - write(nu,'(a)') rest_file - write(nu,'(a)') rest_file_strm - close(nu) - call shr_file_freeUnit(nu) - endif - if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file),target_ymd,target_tod - call shr_strdata_restWrite(trim(rest_file_strm),SDWAV,mpicom,trim(case_name),'SDWAV strdata') - call t_stopf('dwav_restart') - endif - - call t_stopf('dwav') - - call t_stopf('DWAV_RUN') - - end subroutine dwav_comp_run - - !=============================================================================== - - subroutine dwav_comp_export(exportState, rc) - - ! input/output variables - type(ESMF_State) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: k - !---------------------------------------------------------------- - - rc = ESMF_SUCCESS - - k = mct_aVect_indexRA(w2x, "Sw_lamult") - call dshr_export(w2x%rattr(k,:), exportState, "Sw_lamult", rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(w2x, "Sw_ustokes") - call dshr_export(w2x%rattr(k,:), exportState, "Sw_ustokes", rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - k = mct_aVect_indexRA(w2x, "Sw_vstokes") - call dshr_export(w2x%rattr(k,:), exportState, "Sw_vstokes", rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - end subroutine dwav_comp_export - -end module dwav_comp_mod diff --git a/src/components/data_comps/dwav/nuopc/dwav_shr_mod.F90 b/src/components/data_comps/dwav/nuopc/dwav_shr_mod.F90 deleted file mode 100644 index 8723b99d98c..00000000000 --- a/src/components/data_comps/dwav/nuopc/dwav_shr_mod.F90 +++ /dev/null @@ -1,144 +0,0 @@ -module dwav_shr_mod - - ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 - use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_sys_mod , only : shr_sys_flush, shr_sys_abort - use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast - - ! !PUBLIC TYPES: - implicit none - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: dwav_shr_read_namelists - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! stream data type - type(shr_strdata_type), public :: SDWAV - - ! input namelist variables - character(CL) , public :: restfilm ! model restart file namelist - character(CL) , public :: restfils ! stream restart file namelist - logical , public :: force_prognostic_true ! if true set prognostic true - - ! variables obtained from namelist read - character(CL) , public :: rest_file ! restart filename - character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: datamode ! mode - character(len=*), public, parameter :: nullstr = 'undefined' - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - subroutine dwav_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, wav_present, wav_prognostic) - - ! !DESCRIPTION: Read in dwav namelists - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: filename ! input namelist filename - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - logical , intent(out) :: wav_present ! flag - logical , intent(out) :: wav_prognostic ! flag - - !--- local variables --- - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - character(CL) :: decomp ! decomp strategy - not used for NUOPC - but still needed in namelist for now - - !--- formats --- - character(*), parameter :: F00 = "('(dwav_comp_init) ',8a)" - character(*), parameter :: F0L = "('(dwav_comp_init) ',a, l2)" - character(*), parameter :: F01 = "('(dwav_comp_init) ',a,5i8)" - character(*), parameter :: F02 = "('(dwav_comp_init) ',a,4es13.6)" - character(*), parameter :: F06 = "('(dwav_comp_init) ',a,5l3)" - character(*), parameter :: subName = "(shr_dwav_read_namelists) " - !------------------------------------------------------------------------------- - - !----- define namelist ----- - namelist / dwav_nml / decomp, & - restfilm, restfils, force_prognostic_true - - decomp = "1d" - restfilm = trim(nullstr) - restfils = trim(nullstr) - force_prognostic_true = .false. - - if (my_task == master_task) then - nunit = shr_file_getUnit() ! get unused unit number - open (nunit,file=trim(filename),status="old",action="read") - read (nunit,nml=dwav_nml,iostat=ierr) - close(nunit) - call shr_file_freeUnit(nunit) - if (ierr > 0) then - write(logunit,F01) 'ERROR: reading input namelist, '//trim(filename)//' iostat=',ierr - call shr_sys_abort(subName//': namelist read error '//trim(filename)) - end if - write(logunit,F00)' decomp = ',trim(decomp) - write(logunit,F00)' restfilm = ',trim(restfilm) - write(logunit,F00)' restfils = ',trim(restfils) - write(logunit,F0L)' force_prognostic_true = ',force_prognostic_true - endif - - call shr_mpi_bcast(decomp ,mpicom,'decomp') - call shr_mpi_bcast(restfilm,mpicom,'restfilm') - call shr_mpi_bcast(restfils,mpicom,'restfils') - call shr_mpi_bcast(force_prognostic_true,mpicom,'force_prognostic_true') - - rest_file = trim(restfilm) - rest_file_strm = trim(restfils) - - !---------------------------------------------------------------------------- - ! Read dshr namelist - !---------------------------------------------------------------------------- - - call shr_strdata_readnml(SDWAV, trim(filename), mpicom=mpicom) - - !---------------------------------------------------------------------------- - ! Determine and validate datamode - !---------------------------------------------------------------------------- - - datamode = trim(SDWAV%dataMode) - - if ( trim(datamode) == 'NULL' .or. & - trim(datamode) == 'COPYALL') then - if (my_task == master_task) then - write(logunit,F00) 'dwav datamode = ',trim(datamode) - end if - else - write(logunit,F00) ' ERROR illegal dwav datamode = ',trim(datamode) - call shr_sys_abort() - end if - - !---------------------------------------------------------------------------- - ! Determine present and prognostic flags - !---------------------------------------------------------------------------- - - wav_present = .false. - wav_prognostic = .false. - if (force_prognostic_true) then - wav_present = .true. - wav_prognostic = .true. - endif - if (trim(datamode) /= 'NULL') then - wav_present = .true. - end if - - end subroutine dwav_shr_read_namelists - -end module dwav_shr_mod diff --git a/src/components/data_comps/dwav/nuopc/wav_comp_nuopc.F90 b/src/components/data_comps/dwav/nuopc/wav_comp_nuopc.F90 deleted file mode 100644 index 033da13b471..00000000000 --- a/src/components/data_comps/dwav/nuopc/wav_comp_nuopc.F90 +++ /dev/null @@ -1,510 +0,0 @@ -module wav_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for DWAV - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_cal_mod , only : shr_cal_noleap, shr_cal_gregorian, shr_cal_ymd2date - use shr_const_mod , only : SHR_CONST_SPVAL - use shr_sys_mod , only : shr_sys_abort - use dshr_nuopc_mod , only : fld_list_type, fldsMax, dshr_realize - use dshr_nuopc_mod , only : ModelInitPhase, ModelSetRunClock, ModelSetMetaData - use dshr_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dshr_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dwav_shr_mod , only : dwav_shr_read_namelists - use dwav_comp_mod , only : dwav_comp_init, dwav_comp_run, dwav_comp_advertise - use dwav_comp_mod , only : dwav_comp_export - - implicit none - private ! except - - public :: SetServices - - private :: InitializeAdvertise - private :: InitializeRealize - private :: ModelAdvance - private :: ModelFinalize - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CS) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - - integer :: fldsToWav_num = 0 - integer :: fldsFrWav_num = 0 - type (fld_list_type) :: fldsToWav(fldsMax) - type (fld_list_type) :: fldsFrWav(fldsMax) - - integer :: compid ! mct comp id - integer :: mpicom ! mpi communicator - integer :: my_task ! my task in mpi communicator mpicom - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - logical :: read_restart ! start from restart - character(len=256) :: case_name ! case name - character(len=80) :: calendar ! calendar name - logical :: wav_prognostic ! flag - logical :: use_esmf_metadata = .false. - character(*), parameter :: modName = "(wav_comp_nuopc)" - character(*), parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p1"/), userRoutine=InitializeAdvertise, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - phaseLabelList=(/"IPDv01p3"/), userRoutine=InitializeRealize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - logical :: wav_present ! flag - type(ESMF_VM) :: vm - integer :: lmpicom - character(CL) :: cvalue - integer :: n - integer :: ierr ! error code - integer :: shrlogunit ! original log unit - integer :: localPet - integer :: inst_index ! number of current instance (ie. 1) - character(len=CL) :: fileName ! generic file name - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! generate local mpi comm - !---------------------------------------------------------------------------- - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, mpiCommunicator=lmpicom, localpet=my_task, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call mpi_comm_dup(lmpicom, mpicom, ierr) - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Read input namelists and set present and prognostic flags - !---------------------------------------------------------------------------- - - filename = "dwav_in"//trim(inst_suffix) - call dwav_shr_read_namelists(filename, mpicom, my_task, master_task, & - logunit, wav_present, wav_prognostic) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - endif - - write(6,*)'DEBUG: wav index nx = ', flds_scalar_index_nx - write(6,*)'DEBUG: wav index ny = ', flds_scalar_index_ny - - call dwav_comp_advertise(importState, exportState, flds_scalar_name, & - wav_present, wav_prognostic, & - fldsFrWav_num, fldsFrWav, fldsToWav_num, fldsToWav, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_Mesh) :: Emesh - type(ESMF_TIME) :: currTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_Calendar) :: esmf_calendar ! esmf calendar - type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - integer :: current_ymd ! model date - integer :: current_year ! model year - integer :: current_mon ! model month - integer :: current_day ! model day - integer :: current_tod ! model sec into model date - character(CL) :: cvalue - integer :: shrlogunit ! original log unit - integer :: nxg, nyg - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! get config variables - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='case_name', value=case_name, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompAttributeGet(gcomp, name='read_restart', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) read_restart - - call NUOPC_CompAttributeGet(gcomp, name='MCTID', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) compid - - !---------------------------------------------------------------------------- - ! Determine calendar info - !---------------------------------------------------------------------------- - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call ESMF_TimeGet( currTime, yy=current_year, mm=current_mon, dd=current_day, s=current_tod, & - calkindflag=esmf_caltype, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(current_year, current_mon, current_day, current_ymd) - - if (esmf_caltype == ESMF_CALKIND_NOLEAP) then - calendar = shr_cal_noleap - else if (esmf_caltype == ESMF_CALKIND_GREGORIAN) then - calendar = shr_cal_gregorian - else - call ESMF_LogWrite(subname//" ERROR bad ESMF calendar name "//trim(calendar), ESMF_LOGMSG_ERROR) - rc = ESMF_Failure - return - end if - - !-------------------------------- - ! Generate the mesh - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name='mesh_wav', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (my_task == master_task) then - write(logunit,*) " obtaining dwav mesh from " // trim(cvalue) - end if - - Emesh = ESMF_MeshCreate(filename=trim(cvalue), fileformat=ESMF_FILEFORMAT_ESMFMESH, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Initialize model - !-------------------------------- - - call dwav_comp_init(mpicom, compid, my_task, master_task, & - inst_suffix, logunit, read_restart, & - current_ymd, current_tod, calendar, EMesh, nxg, nyg) - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call dshr_realize( & - state=ExportState, & - fldList=fldsFrWav, & - numflds=fldsFrWav_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':dwavExport',& - mesh=Emesh, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call dwav_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call State_diagnose(exportState, subname//':ES', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call shr_file_setLogUnit (shrlogunit) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - if (use_esmf_metadata) then - call ModelSetMetaData(gcomp, name='DWAV', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_Alarm) :: alarm - type(ESMF_Time) :: currTime, nextTime - type(ESMF_TimeInterval) :: timeStep - type(ESMF_State) :: importState, exportState - integer :: shrlogunit ! original log unit - logical :: write_restart ! write a restart - integer :: yr ! year - integer :: mon ! month - integer :: day ! day in month - integer :: next_ymd ! model date - integer :: next_tod ! model sec into model date - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - call memcheck(subname, 3, my_task==master_task) - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! query the Component for its clock, importState and exportState - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, importState=importState, exportState=exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Unpack import state - !-------------------------------- - - if (wav_prognostic) then - ! no import data for now - end if - - !-------------------------------- - ! Run model - !-------------------------------- - - ! Determine if will write restart - - call ESMF_ClockGetAlarm(clock, alarmname='alarm_restart', alarm=alarm, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (ESMF_AlarmIsRinging(alarm, rc=rc)) then - if (ChkErr(rc,__LINE__,u_FILE_u)) return - write_restart = .true. - call ESMF_AlarmRingerOff( alarm, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - write_restart = .false. - endif - - ! For nuopc - the component clock is advanced at the end of the time interval - ! For these to match for now - need to advance nuopc one timestep ahead for - ! shr_strdata time interpolation - - call ESMF_ClockGet( clock, currTime=currTime, timeStep=timeStep, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - nextTime = currTime + timeStep - call ESMF_TimeGet( nextTime, yy=yr, mm=mon, dd=day, s=next_tod, rc=rc ) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call shr_cal_ymd2date(yr, mon, day, next_ymd) - - call dwav_comp_run(mpicom, my_task, master_task, & - inst_suffix, logunit, read_restart, write_restart, & - next_ymd, next_tod, case_name=case_name) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call dwav_comp_export(exportState, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - call State_diagnose(exportState, subname//':ES', rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - if (my_task == master_task) then - call log_clock_advance(clock, 'DATM', logunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - call shr_file_setLogUnit (shrlogunit) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(*), parameter :: F00 = "('(dwav_comp_final) ',8a)" - character(*), parameter :: F91 = "('(dwav_comp_final) ',73('-'))" - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) ' dwav : end of main integration loop' - write(logunit,F91) - end if - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine ModelFinalize - -end module wav_comp_nuopc diff --git a/src/components/stub_comps/satm/cime_config/buildlib b/src/components/stub_comps/satm/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/satm/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/satm/cime_config/buildnml b/src/components/stub_comps/satm/cime_config/buildnml deleted file mode 100755 index 70eb7f07d82..00000000000 --- a/src/components/stub_comps/satm/cime_config/buildnml +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" -# DO NOTHING -#pylint: disable=unused-argument -def buildnml(case, caseroot, compname): - pass diff --git a/src/components/stub_comps/satm/mct/atm_comp_mct.F90 b/src/components/stub_comps/satm/mct/atm_comp_mct.F90 deleted file mode 100644 index e23fd770676..00000000000 --- a/src/components/stub_comps/satm/mct/atm_comp_mct.F90 +++ /dev/null @@ -1,115 +0,0 @@ -module atm_comp_mct - - ! !USES: - - use mct_mod - use esmf - use seq_cdata_mod - use seq_infodata_mod - - ! - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: atm_init_mct - public :: atm_run_mct - public :: atm_final_mct - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: atm_init_mct - ! - ! !DESCRIPTION: - ! stub atm model init - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine atm_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename - - !EOP - !------------------------------------------------------------------------------- - - call seq_infodata_PutData( cdata%infodata, atm_present=.false.) - call seq_infodata_PutData( cdata%infodata, atm_prognostic=.false.) - - end subroutine atm_init_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: atm_run_mct - ! - ! !DESCRIPTION: - ! stub atm model run - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine atm_run_mct( EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine atm_run_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: atm_final_mct - ! - ! !DESCRIPTION: - ! stub atm model finalize - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - ! - subroutine atm_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !EOP - !------------------------------------------------------------------------------- - - end subroutine atm_final_mct - - !=============================================================================== - -end module atm_comp_mct diff --git a/src/components/stub_comps/sesp/cime_config/buildlib b/src/components/stub_comps/sesp/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/sesp/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/sesp/cime_config/buildnml b/src/components/stub_comps/sesp/cime_config/buildnml deleted file mode 100755 index 70eb7f07d82..00000000000 --- a/src/components/stub_comps/sesp/cime_config/buildnml +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" -# DO NOTHING -#pylint: disable=unused-argument -def buildnml(case, caseroot, compname): - pass diff --git a/src/components/stub_comps/sesp/mct/esp_comp_mct.F90 b/src/components/stub_comps/sesp/mct/esp_comp_mct.F90 deleted file mode 100644 index 88bd012f3ef..00000000000 --- a/src/components/stub_comps/sesp/mct/esp_comp_mct.F90 +++ /dev/null @@ -1,113 +0,0 @@ -module esp_comp_mct - - ! !USES: - - use mct_mod, only: mct_aVect - use esmf, only: ESMF_Clock - use seq_cdata_mod, only: seq_cdata - use seq_infodata_mod, only: seq_infodata_PutData - - ! - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: esp_init_mct - public :: esp_run_mct - public :: esp_final_mct - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: esp_init_mct - ! - ! !DESCRIPTION: - ! stub esp model init - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine esp_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename - - !EOP - !------------------------------------------------------------------------------- - - call seq_infodata_PutData(cdata%infodata, esp_present=.false., & - esp_prognostic=.false., esp_phase=1) - - end subroutine esp_init_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: esp_run_mct - ! - ! !DESCRIPTION: - ! stub esp model run - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine esp_run_mct( EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine esp_run_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: esp_final_mct - ! - ! !DESCRIPTION: - ! stub esp model finalize - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - ! - subroutine esp_final_mct( EClock, cdata, x2d, d2x) - - implicit none - - !----- arguments ----- - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine esp_final_mct - - !=============================================================================== - -end module esp_comp_mct diff --git a/src/components/stub_comps/sglc/cime_config/buildlib b/src/components/stub_comps/sglc/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/sglc/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/sglc/cime_config/buildnml b/src/components/stub_comps/sglc/cime_config/buildnml deleted file mode 100755 index 70eb7f07d82..00000000000 --- a/src/components/stub_comps/sglc/cime_config/buildnml +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" -# DO NOTHING -#pylint: disable=unused-argument -def buildnml(case, caseroot, compname): - pass diff --git a/src/components/stub_comps/sglc/mct/glc_comp_mct.F90 b/src/components/stub_comps/sglc/mct/glc_comp_mct.F90 deleted file mode 100644 index d1613ba1988..00000000000 --- a/src/components/stub_comps/sglc/mct/glc_comp_mct.F90 +++ /dev/null @@ -1,113 +0,0 @@ -module glc_comp_mct - - ! !USES: - - use mct_mod - use esmf - use seq_cdata_mod - use seq_infodata_mod - ! - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: glc_init_mct - public :: glc_run_mct - public :: glc_final_mct - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: glc_init_mct - ! - ! !DESCRIPTION: - ! stub glc model init - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine glc_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename - - !EOP - !------------------------------------------------------------------------------- - - call seq_infodata_PutData(cdata%infodata, glc_present=.false., & - glclnd_present=.false., glcocn_present=.false., glcice_present=.false., & - glc_prognostic=.false.) - - end subroutine glc_init_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: glc_run_mct - ! - ! !DESCRIPTION: - ! stub glc model run - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine glc_run_mct( EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine glc_run_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: glc_final_mct - ! - ! !DESCRIPTION: - ! stub glc model finalize - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - ! - subroutine glc_final_mct( EClock, cdata, x2d, d2x) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine glc_final_mct - - !=============================================================================== - -end module glc_comp_mct diff --git a/src/components/stub_comps/siac/cime_config/buildlib b/src/components/stub_comps/siac/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/siac/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/siac/cime_config/buildnml b/src/components/stub_comps/siac/cime_config/buildnml deleted file mode 100755 index 6ddff93c44d..00000000000 --- a/src/components/stub_comps/siac/cime_config/buildnml +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" - -# DO NOTHING diff --git a/src/components/stub_comps/siac/mct/iac_comp_mct.F90 b/src/components/stub_comps/siac/mct/iac_comp_mct.F90 deleted file mode 100644 index 2c87fecb488..00000000000 --- a/src/components/stub_comps/siac/mct/iac_comp_mct.F90 +++ /dev/null @@ -1,114 +0,0 @@ -module iac_comp_mct - -! !USES: - - use mct_mod - use esmf - use seq_cdata_mod - use seq_infodata_mod - -! -! !PUBLIC TYPES: - implicit none - save - private ! except - -!-------------------------------------------------------------------------- -! Public interfaces -!-------------------------------------------------------------------------- - - public :: iac_init_mct - public :: iac_run_mct - public :: iac_final_mct -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -!=============================================================================== -!BOP =========================================================================== -! -! !IROUTINE: iac_init_mct -! -! !DESCRIPTION: -! stub iac model init -! -! !REVISION HISTORY: -! -! !INTERFACE: ------------------------------------------------------------------ - - subroutine iac_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - -! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename - -!EOP -!------------------------------------------------------------------------------- - - call seq_infodata_PutData(cdata%infodata, & - iac_present=.false., iac_prognostic=.false.) - -end subroutine iac_init_mct - -!=============================================================================== -!BOP =========================================================================== -! -! !IROUTINE: iac_run_mct -! -! !DESCRIPTION: -! stub iac model run -! -! !REVISION HISTORY: -! -! !INTERFACE: ------------------------------------------------------------------ - -subroutine iac_run_mct( EClock, cdata, x2d, d2x) - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - -!EOP -!------------------------------------------------------------------------------- - -end subroutine iac_run_mct - -!=============================================================================== -!BOP =========================================================================== -! -! !IROUTINE: iac_final_mct -! -! !DESCRIPTION: -! stub iac model finalize -! -! !REVISION HISTORY: -! -! !INTERFACE: ------------------------------------------------------------------ -! -subroutine iac_final_mct( EClock, cdata, x2d, d2x) - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - -!EOP -!------------------------------------------------------------------------------- - - end subroutine iac_final_mct - -!=============================================================================== - -end module iac_comp_mct diff --git a/src/components/stub_comps/sice/cime_config/buildlib b/src/components/stub_comps/sice/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/sice/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/sice/cime_config/buildnml b/src/components/stub_comps/sice/cime_config/buildnml deleted file mode 100755 index 70eb7f07d82..00000000000 --- a/src/components/stub_comps/sice/cime_config/buildnml +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" -# DO NOTHING -#pylint: disable=unused-argument -def buildnml(case, caseroot, compname): - pass diff --git a/src/components/stub_comps/sice/mct/ice_comp_mct.F90 b/src/components/stub_comps/sice/mct/ice_comp_mct.F90 deleted file mode 100644 index 324b14983c3..00000000000 --- a/src/components/stub_comps/sice/mct/ice_comp_mct.F90 +++ /dev/null @@ -1,113 +0,0 @@ -module ice_comp_mct - - ! !USES: - - use mct_mod - use esmf - use seq_cdata_mod - use seq_infodata_mod - - ! - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: ice_init_mct - public :: ice_run_mct - public :: ice_final_mct - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: ice_init_mct - ! - ! !DESCRIPTION: - ! stub ice model init - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine ice_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename - - !EOP - !------------------------------------------------------------------------------- - - call seq_infodata_PutData(cdata%infodata, ice_present=.false., & - ice_prognostic=.false., iceberg_prognostic=.false.) - - end subroutine ice_init_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: ice_run_mct - ! - ! !DESCRIPTION: - ! stub ice model run - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine ice_run_mct( EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine ice_run_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: ice_final_mct - ! - ! !DESCRIPTION: - ! stub ice model finalize - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - ! - subroutine ice_final_mct( EClock, cdata, x2d, d2x) - - implicit none - - !----- arguments ----- - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine ice_final_mct - - !=============================================================================== - -end module ice_comp_mct diff --git a/src/components/stub_comps/slnd/cime_config/buildlib b/src/components/stub_comps/slnd/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/slnd/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/slnd/cime_config/buildnml b/src/components/stub_comps/slnd/cime_config/buildnml deleted file mode 100755 index 70eb7f07d82..00000000000 --- a/src/components/stub_comps/slnd/cime_config/buildnml +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" -# DO NOTHING -#pylint: disable=unused-argument -def buildnml(case, caseroot, compname): - pass diff --git a/src/components/stub_comps/slnd/mct/lnd_comp_mct.F90 b/src/components/stub_comps/slnd/mct/lnd_comp_mct.F90 deleted file mode 100644 index 939895bf88c..00000000000 --- a/src/components/stub_comps/slnd/mct/lnd_comp_mct.F90 +++ /dev/null @@ -1,114 +0,0 @@ -module lnd_comp_mct - - ! !USES: - - use mct_mod - use esmf - use seq_cdata_mod - use seq_infodata_mod - - ! - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: lnd_init_mct - public :: lnd_run_mct - public :: lnd_final_mct - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: lnd_init_mct - ! - ! !DESCRIPTION: - ! stub lnd model init - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine lnd_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename - - !EOP - !------------------------------------------------------------------------------- - - call seq_infodata_PutData(cdata%infodata, & - lnd_present=.false., lnd_prognostic=.false.) - - end subroutine lnd_init_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: lnd_run_mct - ! - ! !DESCRIPTION: - ! stub lnd model run - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine lnd_run_mct( EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine lnd_run_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: lnd_final_mct - ! - ! !DESCRIPTION: - ! stub lnd model finalize - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - ! - subroutine lnd_final_mct( EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine lnd_final_mct - - !=============================================================================== - -end module lnd_comp_mct diff --git a/src/components/stub_comps/socn/cime_config/buildlib b/src/components/stub_comps/socn/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/socn/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/socn/cime_config/buildnml b/src/components/stub_comps/socn/cime_config/buildnml deleted file mode 100755 index 70eb7f07d82..00000000000 --- a/src/components/stub_comps/socn/cime_config/buildnml +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" -# DO NOTHING -#pylint: disable=unused-argument -def buildnml(case, caseroot, compname): - pass diff --git a/src/components/stub_comps/socn/mct/ocn_comp_mct.F90 b/src/components/stub_comps/socn/mct/ocn_comp_mct.F90 deleted file mode 100644 index 06974d5f959..00000000000 --- a/src/components/stub_comps/socn/mct/ocn_comp_mct.F90 +++ /dev/null @@ -1,114 +0,0 @@ -module ocn_comp_mct - - ! !USES: - - use mct_mod - use esmf - use seq_cdata_mod - use seq_infodata_mod - ! - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: ocn_init_mct - public :: ocn_run_mct - public :: ocn_final_mct - - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: ocn_init_mct - ! - ! !DESCRIPTION: - ! stub ocn model init - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine ocn_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename - - !EOP - !------------------------------------------------------------------------------- - - call seq_infodata_PutData(cdata%infodata, ocn_present=.false., & - ocn_prognostic=.false., ocnrof_prognostic=.false.) - - end subroutine ocn_init_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: ocn_run_mct - ! - ! !DESCRIPTION: - ! stub ocn model run - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine ocn_run_mct( EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine ocn_run_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: ocn_final_mct - ! - ! !DESCRIPTION: - ! stub ocn model finalize - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - ! - subroutine ocn_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - !----- arguments ----- - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !EOP - !------------------------------------------------------------------------------- - - end subroutine ocn_final_mct - - !=============================================================================== - -end module ocn_comp_mct diff --git a/src/components/stub_comps/srof/cime_config/buildlib b/src/components/stub_comps/srof/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/srof/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/srof/cime_config/buildnml b/src/components/stub_comps/srof/cime_config/buildnml deleted file mode 100755 index 70eb7f07d82..00000000000 --- a/src/components/stub_comps/srof/cime_config/buildnml +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" -# DO NOTHING -#pylint: disable=unused-argument -def buildnml(case, caseroot, compname): - pass diff --git a/src/components/stub_comps/srof/mct/rof_comp_mct.F90 b/src/components/stub_comps/srof/mct/rof_comp_mct.F90 deleted file mode 100644 index 64197a0c051..00000000000 --- a/src/components/stub_comps/srof/mct/rof_comp_mct.F90 +++ /dev/null @@ -1,113 +0,0 @@ -module rof_comp_mct - - ! !USES: - - use mct_mod - use esmf - use seq_cdata_mod - use seq_infodata_mod - - ! - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: rof_init_mct - public :: rof_run_mct - public :: rof_final_mct - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: rof_init_mct - ! - ! !DESCRIPTION: - ! stub rof model init - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine rof_init_mct( EClock, cdata, x2r, r2x, NLFilename ) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2r, r2x - character(len=*), optional , intent(in) :: NLFilename - - !EOP - !------------------------------------------------------------------------------- - - call seq_infodata_PutData(cdata%infodata, rof_present=.false., & - rofice_present=.false., rof_prognostic=.false.) - call seq_infodata_PutData(cdata%infodata, flood_present=.false.) - - end subroutine rof_init_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: rof_run_mct - ! - ! !DESCRIPTION: - ! stub rof model run - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine rof_run_mct( EClock, cdata, x2r, r2x ) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2r, r2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine rof_run_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: rof_final_mct - ! - ! !DESCRIPTION: - ! stub rof model finalize - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - ! - subroutine rof_final_mct( EClock, cdata, x2r, r2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2r, r2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine rof_final_mct - - !=============================================================================== - -end module rof_comp_mct diff --git a/src/components/stub_comps/swav/cime_config/buildlib b/src/components/stub_comps/swav/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/stub_comps/swav/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/stub_comps/swav/cime_config/buildnml b/src/components/stub_comps/swav/cime_config/buildnml deleted file mode 100755 index 70eb7f07d82..00000000000 --- a/src/components/stub_comps/swav/cime_config/buildnml +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python - -""" -build stub model namelist -""" -# DO NOTHING -#pylint: disable=unused-argument -def buildnml(case, caseroot, compname): - pass diff --git a/src/components/stub_comps/swav/mct/wav_comp_mct.F90 b/src/components/stub_comps/swav/mct/wav_comp_mct.F90 deleted file mode 100644 index 08140ac17b1..00000000000 --- a/src/components/stub_comps/swav/mct/wav_comp_mct.F90 +++ /dev/null @@ -1,113 +0,0 @@ -module wav_comp_mct - - ! !USES: - - use mct_mod - use esmf - use seq_cdata_mod - use seq_infodata_mod - - ! - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: wav_init_mct - public :: wav_run_mct - public :: wav_final_mct - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS - !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: wav_init_mct - ! - ! !DESCRIPTION: - ! stub wav model init - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine wav_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename - - !EOP - !------------------------------------------------------------------------------- - - call seq_infodata_PutData(cdata%infodata, wav_present=.false., & - wav_prognostic=.false.) - - end subroutine wav_init_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: wav_run_mct - ! - ! !DESCRIPTION: - ! stub wav model run - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine wav_run_mct( EClock, cdata, x2d, d2x) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine wav_run_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: wav_final_mct - ! - ! !DESCRIPTION: - ! stub wav model finalize - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - ! - subroutine wav_final_mct( EClock, cdata, x2d, d2x) - - implicit none - - !----- arguments ----- - type(ESMF_Clock) ,intent(inout) :: EClock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d - type(mct_aVect) ,intent(inout) :: d2x - - !EOP - !------------------------------------------------------------------------------- - - end subroutine wav_final_mct - - !=============================================================================== - -end module wav_comp_mct diff --git a/src/components/xcpl_comps/xatm/cime_config/buildlib b/src/components/xcpl_comps/xatm/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/xcpl_comps/xatm/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/xcpl_comps/xatm/cime_config/buildnml b/src/components/xcpl_comps/xatm/cime_config/buildnml deleted file mode 100755 index 50a814c79d3..00000000000 --- a/src/components/xcpl_comps/xatm/cime_config/buildnml +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -""" -build data model library -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildnml import build_xcpl_nml, parse_input -from CIME.case import Case - -def buildnml(case, caseroot, compname): - if compname != "xatm": - raise AttributeError - build_xcpl_nml(case, caseroot, compname) - -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "xatm") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/xcpl_comps/xatm/mct/atm_comp_mct.F90 b/src/components/xcpl_comps/xatm/mct/atm_comp_mct.F90 deleted file mode 100644 index fb603eb8b95..00000000000 --- a/src/components/xcpl_comps/xatm/mct/atm_comp_mct.F90 +++ /dev/null @@ -1,212 +0,0 @@ -module atm_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dead_mct_mod , only: dead_init_mct, dead_run_mct, dead_final_mct - use seq_flds_mod , only: seq_flds_a2x_fields, seq_flds_x2a_fields - use seq_timemgr_mod , only: seq_timemgr_EClockGetData - - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: atm_init_mct - public :: atm_run_mct - public :: atm_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - real(r8) , pointer :: gbuf(:,:) ! model grid - integer(IN),parameter :: master_task=0 ! task number of master task - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine atm_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !DESCRIPTION: initialize dead atm model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local variables --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: nxg ! global dim i-direction - integer(IN) :: nyg ! global dim j-direction - integer(IN) :: phase ! initialization phase - integer(IN) :: ierr ! error code - logical :: atm_present ! if true, component is present - logical :: atm_prognostic ! if true, component is prognostic - !------------------------------------------------------------------------------- - - ! Set cdata pointers to derived types (in coupler) - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, atm_phase=phase) - if (phase > 1) RETURN - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - if (phase == 1) then - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('atm_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Initialize xatm - !---------------------------------------------------------------------------- - - call dead_init_mct('atm', Eclock, x2d, d2x, & - seq_flds_x2a_fields, seq_flds_a2x_fields, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, nxg, nyg) - - if (nxg == 0 .and. nyg == 0) then - atm_present = .false. - atm_prognostic = .false. - else - atm_present = .true. - atm_prognostic = .true. - end if - - call seq_infodata_PutData( infodata, dead_comps=.true., & - atm_present=atm_present, & - atm_prognostic=atm_prognostic, & - atm_nx=nxg, atm_ny=nyg) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logunit) - - end subroutine atm_init_mct - - !=============================================================================== - subroutine atm_run_mct(EClock, cdata, x2d, d2x) - - ! !DESCRIPTION: run method for dead atm model - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - real(R8) :: nextsw_cday ! calendar of next atm sw - character(*), parameter :: subName = "(atm_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call dead_run_mct('atm', EClock, x2d, d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, logunit) - - ! Set time of next radiadtion computation - call seq_timemgr_EClockGetData (EClock, next_cday=nextsw_cday) - call seq_infodata_PutData(infodata, nextsw_cday=nextsw_cday) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine atm_run_mct - - !=============================================================================== - subroutine atm_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - ! !DESCRIPTION: finalize method for dead model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- formats --- - character(*), parameter :: subName = "(atm_final_mct) " - !------------------------------------------------------------------------------- - - call dead_final_mct('atm', my_task, master_task, logunit) - - end subroutine atm_final_mct - !=============================================================================== - -end module atm_comp_mct diff --git a/src/components/xcpl_comps/xatm/nuopc/atm_comp_nuopc.F90 b/src/components/xcpl_comps/xatm/nuopc/atm_comp_nuopc.F90 deleted file mode 100644 index 7a6ef4f0b97..00000000000 --- a/src/components/xcpl_comps/xatm/nuopc/atm_comp_nuopc.F90 +++ /dev/null @@ -1,593 +0,0 @@ -module atm_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for XATM - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_sys_mod , only : shr_sys_abort - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dead_nuopc_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_index - use dead_nuopc_mod , only : dead_init_nuopc, dead_final_nuopc, dead_meshinit - use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type - use dead_nuopc_mod , only : ModelInitPhase, ModelSetRunClock - - implicit none - private ! except - - public :: SetServices - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CL) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - integer :: flds_scalar_index_nextsw_cday = 0 - - integer :: fldsToAtm_num = 0 - integer :: fldsFrAtm_num = 0 - type (fld_list_type) :: fldsToAtm(fldsMax) - type (fld_list_type) :: fldsFrAtm(fldsMax) - integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost - - real(r8), pointer :: gbuf(:,:) ! model info - real(r8), pointer :: lat(:) - real(r8), pointer :: lon(:) - integer , allocatable :: gindex(:) - integer :: nxg ! global dim i-direction - integer :: nyg ! global dim j-direction - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=5) :: inst_suffix ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - logical :: mastertask - integer :: dbug = 1 - character(*),parameter :: modName = "(xatm_comp_nuopc)" - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & - userRoutine=InitializeAdvertise, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & - userRoutine=InitializeRealize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - character(CS) :: stdname - integer :: n - integer :: lsize ! local array size - integer :: shrlogunit ! original log unit - character(CL) :: cvalue - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localpet=my_task, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - mastertask = (my_task==0) - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize xatm - !---------------------------------------------------------------------------- - - call dead_init_nuopc('atm', inst_suffix, logunit, lsize, gbuf, nxg, nyg) - - allocate(gindex(lsize)) - allocate(lon(lsize)) - allocate(lat(lsize)) - - gindex(:) = gbuf(:,dead_grid_index) - lat(:) = gbuf(:,dead_grid_lat) - lon(:) = gbuf(:,dead_grid_lon) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxNextSwCday", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nextsw_cday - write(logmsg,*) flds_scalar_index_nextsw_cday - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nextsw_cday = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxNextSwCday') - endif - - if (nxg /= 0 .and. nyg /= 0) then - - call fld_list_add(fldsFrAtm_num, fldsFrAtm, trim(flds_scalar_name)) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_topo' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_z' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_u' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_v' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_tbot' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_ptem' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_shum' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_pbot' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_dens' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Sa_pslv' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_rainc' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_rainl' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_snowc' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_snowl' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_lwdn' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swndr' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swvdr' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swndf' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swvdf' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_swnet' ) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_bcph' , ungridded_lbound=1, ungridded_ubound=3) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_ocph' , ungridded_lbound=1, ungridded_ubound=3) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_dstwet', ungridded_lbound=1, ungridded_ubound=4) - call fld_list_add(fldsFrAtm_num, fldsFrAtm, 'Faxa_dstdry', ungridded_lbound=1, ungridded_ubound=4) - - call fld_list_add(fldsToAtm_num, fldsToAtm, trim(flds_scalar_name)) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_anidr' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_avsdf' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_anidf' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_avsdr' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sl_lfrac' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Si_ifrac' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'So_ofrac' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_tref' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_qref' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_t' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'So_t' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sl_fv' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sl_ram1' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sl_snowh' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Si_snowh' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'So_ssq' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'So_re' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Sx_u10' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_taux' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_tauy' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_lat' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_sen' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_lwup' ) - call fld_list_add(fldsToAtm_num, fldsToAtm, 'Faxx_evap' ) - - do n = 1,fldsFrAtm_num - if(mastertask) write(logunit,*)'Advertising From Xatm ',trim(fldsFrAtm(n)%stdname) - call NUOPC_Advertise(exportState, standardName=fldsFrAtm(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - - do n = 1,fldsToAtm_num - if(mastertask) write(logunit,*)'Advertising To Xatm',trim(fldsToAtm(n)%stdname) - call NUOPC_Advertise(importState, standardName=fldsToAtm(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - end if - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! input/output arguments - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - character(ESMF_MAXSTR) :: convCIM, purpComp - type(ESMF_Mesh) :: Emesh - type(ESMF_Time) :: nextTime - real(r8) :: nextsw_cday - integer :: n - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize: xatm) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! generate the mesh - !-------------------------------- - - call dead_meshinit(gcomp, nxg, nyg, gindex, lon, lat, Emesh, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call fld_list_realize( & - state=exportState, & - fldList=fldsFrAtm, & - numflds=fldsFrAtm_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':datmExport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call fld_list_realize( & - state=importState, & - fldList=fldsToAtm, & - numflds=fldsToAtm_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':datmImport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call state_setexport(exportState, rc=rc) - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! Set time of next radiation computation - - call ESMF_ClockGetNextTime(clock, nextTime) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_TimeGet(nextTime, dayOfYear_r8=nextsw_cday) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(nextsw_cday, flds_scalar_index_nextsw_cday, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - -#ifdef USE_ESMF_METADATA - convCIM = "CIM" - purpComp = "Model Component Simulation Description" - call ESMF_AttributeAdd(comp, convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ShortName", "XATM", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "LongName", "Atmosphere Dead Model", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "Description", & - "The dead models stand in as test model for active components." // & - "Coupling data is artificially generated ", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ReleaseDate", "2017", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ModelType", "Sea Ice", convention=convCIM, purpose=purpComp, rc=rc) -#endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: exportState - real(r8) :: nextsw_cday - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - if (dbug > 1) then - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - end if - call memcheck(subname, 3, mastertask) - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(nextsw_cday, flds_scalar_index_nextsw_cday, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call state_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (mastertask) then - call log_clock_advance(clock, 'XATM', logunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) then - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - end if - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine state_setexport(exportState, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: nf, nind - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - ! Start from index 2 in order to Skip the scalar field here - do nf = 2,fldsFrAtm_num - if (fldsFrAtm(nf)%ungridded_ubound == 0) then - call field_setexport(exportState, trim(fldsFrAtm(nf)%stdname), lon, lat, nf=nf, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - do nind = 1,fldsFrAtm(nf)%ungridded_ubound - call field_setexport(exportState, trim(fldsFrAtm(nf)%stdname), lon, lat, nf=nf+nind-1, & - ungridded_index=nind, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - end if - end do - - end subroutine state_setexport - - !=============================================================================== - - subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) - - use shr_const_mod , only : pi=>shr_const_pi - - ! intput/otuput variables - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: fldname - real(r8) , intent(in) :: lon(:) - real(r8) , intent(in) :: lat(:) - integer , intent(in) :: nf - integer, optional , intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: i, ncomp - type(ESMF_Field) :: lfield - real(r8), pointer :: data1d(:) - real(r8), pointer :: data2d(:,:) - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ncomp = 1 - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do i = 1,size(data2d, dim=1) - data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - else if (gridToFieldMap == 2) then - do i = 1,size(data2d, dim=2) - data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - else - call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - do i = 1,size(data1d) - data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - - end subroutine field_setexport - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - !-------------------------------- - ! Finalize routine - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call dead_final_nuopc('atm', logunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelFinalize - -end module atm_comp_nuopc diff --git a/src/components/xcpl_comps/xglc/cime_config/buildlib b/src/components/xcpl_comps/xglc/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/xcpl_comps/xglc/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/xcpl_comps/xglc/cime_config/buildnml b/src/components/xcpl_comps/xglc/cime_config/buildnml deleted file mode 100755 index 64077a469c3..00000000000 --- a/src/components/xcpl_comps/xglc/cime_config/buildnml +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -""" -build data model library -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildnml import build_xcpl_nml, parse_input -from CIME.case import Case - -def buildnml(case, caseroot, compname): - if compname != "xglc": - raise AttributeError - build_xcpl_nml(case, caseroot, compname) - -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "xglc") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/xcpl_comps/xglc/mct/glc_comp_mct.F90 b/src/components/xcpl_comps/xglc/mct/glc_comp_mct.F90 deleted file mode 100644 index 312de1dc220..00000000000 --- a/src/components/xcpl_comps/xglc/mct/glc_comp_mct.F90 +++ /dev/null @@ -1,216 +0,0 @@ -module glc_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dead_mct_mod , only: dead_init_mct, dead_run_mct, dead_final_mct - use seq_flds_mod , only: seq_flds_g2x_fields, seq_flds_x2g_fields - - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: glc_init_mct - public :: glc_run_mct - public :: glc_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "glc_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - real(r8) , pointer :: gbuf(:,:) ! model grid - integer(IN),parameter :: master_task=0 ! task number of master task - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine glc_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !DESCRIPTION: initialize dead glc model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local variables --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: nxg ! global dim i-direction - integer(IN) :: nyg ! global dim j-direction - integer(IN) :: phase ! initialization phase - integer(IN) :: ierr ! error code - logical :: glc_present ! if true, component is present - logical :: glc_prognostic ! if true, component is prognostic - logical :: glcocn_present - logical :: glcice_present - logical :: glclnd_present - !------------------------------------------------------------------------------- - - ! Set cdata pointers to derived types (in coupler) - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, glc_phase=phase) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - if (phase == 1) then - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('glc_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Initialize xglc - !---------------------------------------------------------------------------- - - call dead_init_mct('glc', Eclock, x2d, d2x, & - seq_flds_x2g_fields, seq_flds_g2x_fields, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, nxg, nyg) - - if (nxg == 0 .and. nyg == 0) then - glc_present = .false. - glc_prognostic = .false. - glclnd_present = .false. - glcocn_present = .false. - glcice_present = .false. - else - glc_present = .true. - glc_prognostic = .true. - glclnd_present = .true. - glcocn_present = .false. - glcice_present = .false. - end if - - call seq_infodata_PutData( infodata, dead_comps=.true., & - glc_present=glc_present, & - glc_prognostic=glc_prognostic, & - glclnd_present=glclnd_present, & - glcocn_present=glcocn_present, & - glcice_present=glcice_present, & - glc_nx=nxg, glc_ny=nyg) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logunit) - - end subroutine glc_init_mct - - !=============================================================================== - subroutine glc_run_mct(EClock, cdata, x2d, d2x) - - ! !DESCRIPTION: run method for dead glc model - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(*), parameter :: subName = "(glc_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call dead_run_mct('glc', EClock, x2d, d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine glc_run_mct - - !=============================================================================== - subroutine glc_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - ! !DESCRIPTION: finalize method for dead model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- formats --- - character(*), parameter :: subName = "(glc_final_mct) " - !------------------------------------------------------------------------------- - - call dead_final_mct('glc', my_task, master_task, logunit) - - end subroutine glc_final_mct - !=============================================================================== - -end module glc_comp_mct diff --git a/src/components/xcpl_comps/xglc/nuopc/glc_comp_nuopc.F90 b/src/components/xcpl_comps/xglc/nuopc/glc_comp_nuopc.F90 deleted file mode 100644 index 6da7c9f24cf..00000000000 --- a/src/components/xcpl_comps/xglc/nuopc/glc_comp_nuopc.F90 +++ /dev/null @@ -1,536 +0,0 @@ -module glc_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for XGLC - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_sys_mod , only : shr_sys_abort - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dead_nuopc_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_index - use dead_nuopc_mod , only : dead_init_nuopc, dead_final_nuopc, dead_meshinit - use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type - use dead_nuopc_mod , only : ModelInitPhase, ModelSetRunClock - - implicit none - private ! except - - public :: SetServices - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CL) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - - integer :: fldsToGlc_num = 0 - integer :: fldsFrGlc_num = 0 - type (fld_list_type) :: fldsToGlc(fldsMax) - type (fld_list_type) :: fldsFrGlc(fldsMax) - integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost - - real(r8), pointer :: gbuf(:,:) ! model info - real(r8), pointer :: lat(:) - real(r8), pointer :: lon(:) - integer , allocatable :: gindex(:) - integer :: nxg ! global dim i-direction - integer :: nyg ! global dim j-direction - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - logical :: mastertask - integer :: dbug = 1 - character(*),parameter :: modName = "(xglc_comp_nuopc)" - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & - userRoutine=InitializeAdvertise, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & - userRoutine=InitializeRealize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - character(CS) :: stdname - integer :: n - integer :: lsize ! local array size - integer :: shrlogunit ! original log unit - character(CL) :: cvalue - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localpet=my_task, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - mastertask = my_task == master_task - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize xglc - !---------------------------------------------------------------------------- - - call dead_init_nuopc('glc', inst_suffix, logunit, lsize, gbuf, nxg, nyg) - - allocate(gindex(lsize)) - allocate(lon(lsize)) - allocate(lat(lsize)) - - gindex(:) = gbuf(:,dead_grid_index) - lat(:) = gbuf(:,dead_grid_lat) - lon(:) = gbuf(:,dead_grid_lon) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') - endif - - if (nxg /= 0 .and. nyg /= 0) then - - call fld_list_add(fldsFrGlc_num, fldsFrGlc, trim(flds_scalar_name)) - call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Sg_icemask' ) - call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Sg_icemask_coupled_fluxes' ) - call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Sg_ice_covered' ) - call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Sg_topo' ) - call fld_list_add(fldsFrGlc_num, fldsFrGlc, 'Flgg_hflx' ) - - call fld_list_add(fldsToGlc_num, fldsToGlc, trim(flds_scalar_name)) - call fld_list_add(fldsToGlc_num, fldsToGlc, 'Sl_tsrf') - call fld_list_add(fldsToGlc_num, fldsToGlc, 'Sl_topo') - call fld_list_add(fldsToGlc_num, fldsToGlc, 'Flgg_hflx') - - do n = 1,fldsFrGlc_num - if (mastertask) write(logunit,*)'Advertising From Xglc ',trim(fldsFrGlc(n)%stdname) - call NUOPC_Advertise(exportState, standardName=fldsFrglc(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - - do n = 1,fldsToGlc_num - if (mastertask) write(logunit,*)'Advertising To Xglc ',trim(fldsToGlc(n)%stdname) - call NUOPC_Advertise(importState, standardName=fldsToglc(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - end if - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - character(ESMF_MAXSTR) :: convCIM, purpComp - type(ESMF_Mesh) :: Emesh - integer :: shrlogunit ! original log unit - integer :: n - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! generate the mesh - ! grid_option specifies grid or mesh - !-------------------------------- - - call dead_meshinit(gcomp, nxg, nyg, gindex, lon, lat, Emesh, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call fld_list_realize( & - state=ExportState, & - fldList=fldsFrGlc, & - numflds=fldsFrGlc_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':dglcExport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call fld_list_realize( & - state=importState, & - fldList=fldsToGlc, & - numflds=fldsToGlc_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':dglcImport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - ! Copy from d2x to exportState - ! Set the coupling scalars - !-------------------------------- - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call state_setscalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call state_setscalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call state_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - -#ifdef USE_ESMF_METADATA - convCIM = "CIM" - purpComp = "Model Component Simulation Description" - call ESMF_AttributeAdd(comp, convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ShortName", "XGLC", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "LongName", "Land-Ice Dead Model", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "Description", & - "The dead models stand in as test model for active components." // & - "Coupling data is artificially generated ", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ReleaseDate", "2017", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ModelType", "Land-Ice", convention=convCIM, purpose=purpComp, rc=rc) -#endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: exportState - integer :: n - integer :: shrlogunit ! original log unit - real(r8), pointer :: dataptr(:) - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - call memcheck(subname, 3, mastertask) - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call state_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (my_task == master_task) then - call log_clock_advance(clock, 'XGLC', logunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine state_setexport(exportState, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: nf, nind - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - ! Start from index 2 in order to skip the scalar field - do nf = 2,fldsFrGlc_num - if (fldsFrGlc(nf)%ungridded_ubound == 0) then - call field_setexport(exportState, trim(fldsFrGlc(nf)%stdname), lon, lat, nf=nf, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - do nind = 1,fldsFrGlc(nf)%ungridded_ubound - call field_setexport(exportState, trim(fldsFrGlc(nf)%stdname), lon, lat, nf=nf+nind-1, & - ungridded_index=nind, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - end if - end do - - end subroutine state_setexport - - !=============================================================================== - - subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) - - use shr_const_mod , only : pi=>shr_const_pi - - ! intput/otuput variables - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: fldname - real(r8) , intent(in) :: lon(:) - real(r8) , intent(in) :: lat(:) - integer , intent(in) :: nf - integer, optional , intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: i, ncomp - type(ESMF_Field) :: lfield - real(r8), pointer :: data1d(:) - real(r8), pointer :: data2d(:,:) - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ncomp = 5 - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do i = 1,size(data2d, dim=1) - data2d(i,ungridded_index) = (nf*100) & - * cos (pi*lat(i)/180.0_R8) * cos (pi*lat(i)/180.0_R8) & - * sin (pi*lon(i)/180.0_R8) * sin (pi*lon(i)/180.0_R8) & - + (ncomp*10.0_R8) - enddo - else if (gridToFieldMap == 2) then - do i = 1,size(data2d, dim=2) - data2d(ungridded_index,i) = (nf*100) & - * cos (pi*lat(i)/180.0_R8) * cos (pi*lat(i)/180.0_R8) & - * sin (pi*lon(i)/180.0_R8) * sin (pi*lon(i)/180.0_R8) & - + (ncomp*10.0_R8) - end do - end if - else - call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (fldname == 'Sg_icemask' .or. fldname == 'Sg_icemask_coupled_fluxes' .or. fldname == 'Sg_ice_covered') then - data1d(:) = 1._r8 - else - do i = 1,size(data1d) - data1d(i) = (nf*100) & - * cos (pi*lat(i)/180.0_R8) * cos (pi*lat(i)/180.0_R8) & - * sin (pi*lon(i)/180.0_R8) * sin (pi*lon(i)/180.0_R8) & - + (ncomp*10.0_R8) - end do - end if - end if - - end subroutine field_setexport - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - !-------------------------------- - ! Finalize routine - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call dead_final_nuopc('glc', logunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelFinalize - - !=============================================================================== - -end module glc_comp_nuopc diff --git a/src/components/xcpl_comps/xice/cime_config/buildlib b/src/components/xcpl_comps/xice/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/xcpl_comps/xice/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/xcpl_comps/xice/cime_config/buildnml b/src/components/xcpl_comps/xice/cime_config/buildnml deleted file mode 100755 index e023dc7c6d2..00000000000 --- a/src/components/xcpl_comps/xice/cime_config/buildnml +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -""" -build data model library -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildnml import build_xcpl_nml, parse_input -from CIME.case import Case - -def buildnml(case, caseroot, compname): - if compname != "xice": - raise AttributeError - build_xcpl_nml(case, caseroot, compname) - -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "xice") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/xcpl_comps/xice/mct/ice_comp_mct.F90 b/src/components/xcpl_comps/xice/mct/ice_comp_mct.F90 deleted file mode 100644 index 974b246502c..00000000000 --- a/src/components/xcpl_comps/xice/mct/ice_comp_mct.F90 +++ /dev/null @@ -1,209 +0,0 @@ -module ice_comp_mct - - ! !USES: - - use esmf , only: ESMF_Clock - use mct_mod - - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dead_mct_mod , only: dead_init_mct, dead_run_mct, dead_final_mct - use seq_flds_mod , only: seq_flds_i2x_fields, seq_flds_x2i_fields - - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: ice_init_mct - public :: ice_run_mct - public :: ice_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - real(r8) , pointer :: gbuf(:,:) ! model grid - integer(IN),parameter :: master_task=0 ! task number of master task - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine ice_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !DESCRIPTION: initialize dead ice model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local variables --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: nxg ! global dim i-direction - integer(IN) :: nyg ! global dim j-direction - integer(IN) :: phase ! initialization phase - integer(IN) :: ierr ! error code - logical :: ice_present ! if true, component is present - logical :: ice_prognostic ! if true, component is prognostic - logical :: iceberg_prognostic ! if true, component is iceberg_prognostic - !------------------------------------------------------------------------------- - - ! Set cdata pointers to derived types (in coupler) - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, ice_phase=phase) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - if (phase == 1) then - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('ice_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Initialize xice - !---------------------------------------------------------------------------- - - call dead_init_mct('ice', Eclock, x2d, d2x, & - seq_flds_x2i_fields, seq_flds_i2x_fields, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, nxg, nyg) - - if (nxg == 0 .and. nyg == 0) then - ice_present = .false. - ice_prognostic = .false. - iceberg_prognostic = .false. - else - ice_present = .true. - ice_prognostic = .true. - iceberg_prognostic = .true. - end if - - call seq_infodata_PutData(infodata, dead_comps=.true., & - ice_present=ice_present, & - ice_prognostic=ice_prognostic, & - iceberg_prognostic=iceberg_prognostic, & - ice_nx=nxg, ice_ny=nyg) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logunit) - - end subroutine ice_init_mct - - !=============================================================================== - subroutine ice_run_mct(EClock, cdata, x2d, d2x) - - ! !DESCRIPTION: run method for dead ice model - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(*), parameter :: subName = "(ice_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call dead_run_mct('ice', EClock, x2d, d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine ice_run_mct - - !=============================================================================== - subroutine ice_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - ! !DESCRIPTION: finalize method for dead model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- formats --- - character(*), parameter :: subName = "(ice_final_mct) " - !------------------------------------------------------------------------------- - - call dead_final_mct('ice', my_task, master_task, logunit) - - end subroutine ice_final_mct - !=============================================================================== - -end module ice_comp_mct diff --git a/src/components/xcpl_comps/xice/nuopc/ice_comp_nuopc.F90 b/src/components/xcpl_comps/xice/nuopc/ice_comp_nuopc.F90 deleted file mode 100644 index 60b5e3ff37f..00000000000 --- a/src/components/xcpl_comps/xice/nuopc/ice_comp_nuopc.F90 +++ /dev/null @@ -1,571 +0,0 @@ -module ice_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for XICE - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_sys_mod , only : shr_sys_abort - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dead_nuopc_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_index - use dead_nuopc_mod , only : dead_init_nuopc, dead_final_nuopc, dead_meshinit - use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type - use dead_nuopc_mod , only : ModelInitPhase, ModelSetRunClock - - implicit none - private ! except - - public :: SetServices - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CL) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - - integer :: fldsToIce_num = 0 - integer :: fldsFrIce_num = 0 - type (fld_list_type) :: fldsToIce(fldsMax) - type (fld_list_type) :: fldsFrIce(fldsMax) - integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost - - real(r8), pointer :: gbuf(:,:) ! model info - real(r8), pointer :: lat(:) - real(r8), pointer :: lon(:) - integer , allocatable :: gindex(:) - integer :: nxg ! global dim i-direction - integer :: nyg ! global dim j-direction - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - logical :: mastertask - integer :: dbug = 1 - character(*),parameter :: modName = "(xice_comp_nuopc)" - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & - userRoutine=InitializeAdvertise, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & - userRoutine=InitializeRealize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - character(CL) :: cvalue - character(CS) :: stdname - integer :: n - integer :: lsize ! local array size - integer :: shrlogunit ! original log unit - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localpet=my_task, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - mastertask = my_task == master_task - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize xice - !---------------------------------------------------------------------------- - - call dead_init_nuopc('ice', inst_suffix, logunit, lsize, gbuf, nxg, nyg) - - allocate(gindex(lsize)) - allocate(lon(lsize)) - allocate(lat(lsize)) - - gindex(:) = gbuf(:,dead_grid_index) - lat(:) = gbuf(:,dead_grid_lat) - lon(:) = gbuf(:,dead_grid_lon) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') - endif - - if (nxg /= 0 .and. nyg /= 0) then - - call fld_list_add(fldsFrIce_num, fldsFrIce, trim(flds_scalar_name)) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_imask' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_ifrac' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_t' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_tref' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_qref' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_snowh' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_u10' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_avsdr' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_anidr' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_avsdf' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Si_anidf' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_taux' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_tauy' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_lat' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_sen' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_lwup' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_evap' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Faii_swnet' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_melth' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_swpen' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_meltw' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_salt' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_taux' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_tauy' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_bcpho' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_bcphi' ) - call fld_list_add(fldsFrIce_num, fldsFrIce, 'Fioi_flxdst' ) - - call fld_list_add(fldsToIce_num, fldsToIce, trim(flds_scalar_name)) - call fld_list_add(fldsToIce_num, fldsToIce, 'So_dhdx' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'So_dhdy' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'So_t' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'So_s' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'So_u' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'So_v' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Fioo_q' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_z' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_u' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_v' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_ptem' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_shum' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_dens' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Sa_tbot' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_swvdr' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_swndr' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_swvdf' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_swndf' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_lwdn' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_rain' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_snow' ) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_bcph' , ungridded_lbound=1, ungridded_ubound=3) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_ocph' , ungridded_lbound=1, ungridded_ubound=3) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_dstwet', ungridded_lbound=1, ungridded_ubound=4) - call fld_list_add(fldsToIce_num, fldsToIce, 'Faxa_dstdry', ungridded_lbound=1, ungridded_ubound=4) - - do n = 1,fldsFrIce_num - if(mastertask) write(logunit,*)'Advertising From Xice ',trim(fldsFrIce(n)%stdname) - call NUOPC_Advertise(exportState, standardName=fldsFrIce(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - - do n = 1,fldsToIce_num - if(mastertask) write(logunit,*)'Advertising To Xice ',trim(fldsToIce(n)%stdname) - call NUOPC_Advertise(importState, standardName=fldsToIce(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - end if - - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - character(ESMF_MAXSTR) :: convCIM, purpComp - type(ESMF_Mesh) :: Emesh - integer :: shrlogunit ! original log unit - integer :: n - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! generate the mesh - !-------------------------------- - - call dead_meshinit(gcomp, nxg, nyg, gindex, lon, lat, Emesh, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call fld_list_realize( & - state=ExportState, & - fldlist=fldsFrIce, & - numflds=fldsFrIce_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':diceExport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call fld_list_realize( & - state=importState, & - fldList=fldsToIce, & - numflds=fldsToIce_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':diceImport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call State_SetExport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - -#ifdef USE_ESMF_METADATA - convCIM = "CIM" - purpComp = "Model Component Simulation Description" - call ESMF_AttributeAdd(comp, convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ShortName", "XICE", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "LongName", "Sea Ice Dead Model", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "Description", & - "The dead models stand in as test model for active components." // & - "Coupling data is artificially generated ", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ReleaseDate", "2017", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ModelType", "Sea Ice", convention=convCIM, purpose=purpComp, rc=rc) -#endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: exportState - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - call memcheck(subname, 3, mastertask) - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (my_task == master_task) then - call log_clock_advance(clock, 'XICE', logunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - endif - - call shr_file_setLogUnit (shrlogunit) - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine state_setexport(exportState, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: nf, nind - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - ! Start from index 2 in order to skip the scalar field - do nf = 2,fldsFrIce_num - if (fldsFrIce(nf)%ungridded_ubound == 0) then - call field_setexport(exportState, trim(fldsFrIce(nf)%stdname), lon, lat, nf=nf, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - do nind = 1,fldsFrIce(nf)%ungridded_ubound - call field_setexport(exportState, trim(fldsFrIce(nf)%stdname), lon, lat, nf=nf+nind-1, & - ungridded_index=nind, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - end if - end do - - end subroutine state_setexport - - !=============================================================================== - - subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) - - use shr_const_mod , only : pi=>shr_const_pi - - ! intput/otuput variables - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: fldname - real(r8) , intent(in) :: lon(:) - real(r8) , intent(in) :: lat(:) - integer , intent(in) :: nf - integer, optional , intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: i, ncomp - type(ESMF_Field) :: lfield - real(r8), pointer :: data1d(:) - real(r8), pointer :: data2d(:,:) - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ncomp = 3 - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do i = 1,size(data2d, dim=1) - data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - else if (gridToFieldMap == 2) then - do i = 1,size(data2d, dim=2) - data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - else - call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - do i = 1,size(data1d) - data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - ! Reset some fields - if (fldname == 'Si_ifrac') then - do i = 1,size(data1d) - data1d(i) = min(1.0_R8,max(0.0_R8,data1d(i))) - end do - else if (fldname == 'Si_imask') then - do i = 1,size(data1d) - data1d(i) = float(nint(min(1.0_R8,max(0.0_R8,data1d(i))))) - end do - end if - end if - - end subroutine field_setexport - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - !-------------------------------- - ! Finalize routine - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call dead_final_nuopc('ice', logunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelFinalize - -end module ice_comp_nuopc diff --git a/src/components/xcpl_comps/xlnd/cime_config/buildlib b/src/components/xcpl_comps/xlnd/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/xcpl_comps/xlnd/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/xcpl_comps/xlnd/cime_config/buildnml b/src/components/xcpl_comps/xlnd/cime_config/buildnml deleted file mode 100755 index 760d75e5c06..00000000000 --- a/src/components/xcpl_comps/xlnd/cime_config/buildnml +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -""" -build data model library -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildnml import build_xcpl_nml, parse_input -from CIME.case import Case - -def buildnml(case, caseroot, compname): - if compname != "xlnd": - raise AttributeError - build_xcpl_nml(case, caseroot, compname) - -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "xlnd") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/xcpl_comps/xlnd/mct/lnd_comp_mct.F90 b/src/components/xcpl_comps/xlnd/mct/lnd_comp_mct.F90 deleted file mode 100644 index 47e9072be02..00000000000 --- a/src/components/xcpl_comps/xlnd/mct/lnd_comp_mct.F90 +++ /dev/null @@ -1,204 +0,0 @@ -module lnd_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dead_mct_mod , only: dead_init_mct, dead_run_mct, dead_final_mct - use seq_flds_mod , only: seq_flds_l2x_fields, seq_flds_x2l_fields - - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: lnd_init_mct - public :: lnd_run_mct - public :: lnd_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - real(r8) , pointer :: gbuf(:,:) ! model grid - integer(IN),parameter :: master_task=0 ! task number of master task - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine lnd_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !DESCRIPTION: initialize dead lnd model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local variables --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: nxg ! global dim i-direction - integer(IN) :: nyg ! global dim j-direction - integer(IN) :: phase ! initialization phase - integer(IN) :: ierr ! error code - logical :: lnd_present ! if true, component is present - logical :: lnd_prognostic ! if true, component is prognostic - !------------------------------------------------------------------------------- - - ! Set cdata pointers to derived types (in coupler) - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, lnd_phase=phase) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - if (phase == 1) then - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('lnd_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Initialize xlnd - !---------------------------------------------------------------------------- - call dead_init_mct('lnd', Eclock, x2d, d2x, & - seq_flds_x2l_fields, seq_flds_l2x_fields, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, nxg, nyg) - - if (nxg == 0 .and. nyg == 0) then - lnd_present = .false. - lnd_prognostic = .false. - else - lnd_present = .true. - lnd_prognostic = .true. - end if - - call seq_infodata_PutData( infodata, dead_comps=.true., & - lnd_present=lnd_present, & - lnd_prognostic=lnd_prognostic, & - lnd_nx=nxg, lnd_ny=nyg) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logunit) - - end subroutine lnd_init_mct - - !=============================================================================== - subroutine lnd_run_mct(EClock, cdata, x2d, d2x) - - ! !DESCRIPTION: run method for dead lnd model - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(*), parameter :: subName = "(lnd_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call dead_run_mct('lnd', EClock, x2d, d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine lnd_run_mct - - !=============================================================================== - subroutine lnd_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - ! !DESCRIPTION: finalize method for dead model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- formats --- - character(*), parameter :: subName = "(lnd_final_mct) " - !------------------------------------------------------------------------------- - - call dead_final_mct('lnd', my_task, master_task, logunit) - - end subroutine lnd_final_mct - !=============================================================================== - -end module lnd_comp_mct diff --git a/src/components/xcpl_comps/xlnd/nuopc/lnd_comp_nuopc.F90 b/src/components/xcpl_comps/xlnd/nuopc/lnd_comp_nuopc.F90 deleted file mode 100644 index 229e7150789..00000000000 --- a/src/components/xcpl_comps/xlnd/nuopc/lnd_comp_nuopc.F90 +++ /dev/null @@ -1,565 +0,0 @@ -module lnd_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for XLND - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_sys_mod , only : shr_sys_abort - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dead_nuopc_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_index - use dead_nuopc_mod , only : dead_init_nuopc, dead_final_nuopc, dead_meshinit - use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type - use dead_nuopc_mod , only : ModelInitPhase, ModelSetRunClock - - implicit none - private ! except - - public :: SetServices - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CL) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - integer :: flds_scalar_index_nextsw_cday = 0._r8 - - integer :: fldsToLnd_num = 0 - integer :: fldsFrLnd_num = 0 - type (fld_list_type) :: fldsToLnd(fldsMax) - type (fld_list_type) :: fldsFrLnd(fldsMax) - integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost - - real(r8), pointer :: gbuf(:,:) ! model info - real(r8), pointer :: lat(:) - real(r8), pointer :: lon(:) - integer , allocatable :: gindex(:) - integer :: nxg ! global dim i-direction - integer :: nyg ! global dim j-direction - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - logical :: mastertask - integer :: dbug = 1 - character(*),parameter :: modName = "(xlnd_comp_nuopc)" - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & - userRoutine=InitializeAdvertise, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & - userRoutine=InitializeRealize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - character(CS) :: stdname - integer :: n - integer :: lsize ! local array size - integer :: shrlogunit ! original log unit - character(CL) :: cvalue - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localpet=my_task, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - mastertask = my_task == master_task - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize xlnd - !---------------------------------------------------------------------------- - - call dead_init_nuopc('lnd', inst_suffix, logunit, lsize, gbuf, nxg, nyg) - - allocate(gindex(lsize)) - allocate(lon(lsize)) - allocate(lat(lsize)) - - gindex(:) = gbuf(:,dead_grid_index) - lat(:) = gbuf(:,dead_grid_lat) - lon(:) = gbuf(:,dead_grid_lon) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') - end if - - if (nxg /= 0 .and. nyg /= 0) then - - call fld_list_add(fldsFrLnd_num, fldsFrlnd, trim(flds_scalar_name)) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_lfrin' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_t' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_tref' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_qref' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_avsdr' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_anidr' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_avsdf' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_anidf' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_snowh' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_u10' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_fv' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Sl_ram1' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_rofsur' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_rofgwl' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_rofsub' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_rofi' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Flrl_irrig' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_taux' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_tauy' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_lat' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_sen' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_lwup' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_evap' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_swnet' ) - call fld_list_add(fldsFrLnd_num, fldsFrlnd, 'Fall_flxdst', ungridded_lbound=1, ungridded_ubound=4) - - call fld_list_add(fldsToLnd_num, fldsToLnd, trim(flds_scalar_name)) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_z' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_topo' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_u' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_v' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_ptem' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_pbot' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_tbot' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Sa_shum' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Flrr_volr' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Flrr_volrmch' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_lwdn' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_rainc' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_rainl' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_snowc' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_snowl' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_swndr' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_swvdr' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_swndf' ) - call fld_list_add(fldsToLnd_num, fldsToLnd, 'Faxa_swvdf' ) - call fld_list_add(fldsTolnd_num, fldsTolnd, 'Faxa_bcph' , ungridded_lbound=1, ungridded_ubound=3) - call fld_list_add(fldsTolnd_num, fldsTolnd, 'Faxa_ocph' , ungridded_lbound=1, ungridded_ubound=3) - call fld_list_add(fldsTolnd_num, fldsTolnd, 'Faxa_dstwet', ungridded_lbound=1, ungridded_ubound=4) - call fld_list_add(fldsTolnd_num, fldsTolnd, 'Faxa_dstdry', ungridded_lbound=1, ungridded_ubound=4) - - do n = 1,fldsFrLnd_num - if (mastertask) write(logunit,*)'Advertising From Xlnd ',trim(fldsFrLnd(n)%stdname) - call NUOPC_Advertise(exportState, standardName=fldsFrLnd(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - - do n = 1,fldsToLnd_num - if(mastertask) write(logunit,*)'Advertising To Xlnd',trim(fldsToLnd(n)%stdname) - call NUOPC_Advertise(importState, standardName=fldsToLnd(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - - end if - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! intput/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - character(ESMF_MAXSTR) :: convCIM, purpComp - type(ESMF_Mesh) :: Emesh - integer :: shrlogunit ! original log unit - type(ESMF_VM) :: vm - integer :: n - logical :: connected ! is field connected? - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! generate the mesh - !-------------------------------- - - call dead_meshinit(gcomp, nxg, nyg, gindex, lon, lat, Emesh, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call fld_list_realize( & - state=ExportState, & - fldlist=fldsFrLnd, & - numflds=fldsFrLnd_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':dlndExport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call fld_list_realize( & - state=importState, & - fldList=fldsToLnd, & - numflds=fldsToLnd_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':dlndImport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call state_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - -#ifdef USE_ESMF_METADATA - convCIM = "CIM" - purpComp = "Model Component Simulation Description" - call ESMF_AttributeAdd(comp, convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ShortName", "XLND", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "LongName", "Land Dead Model", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "Description", & - "The dead models stand in as test model for active components." // & - "Coupling data is artificially generated ", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ReleaseDate", "2017", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ModelType", "Land", convention=convCIM, purpose=purpComp, rc=rc) -#endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: exportState - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - call memcheck(subname, 3, mastertask) - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call state_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (mastertask) then - call log_clock_advance(clock, 'LND', logunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - endif - - call shr_file_setLogUnit (shrlogunit) - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine state_setexport(exportState, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: nf, nind - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - ! Start from index 2 in order to Skip the scalar field here - do nf = 2,fldsFrLnd_num - if (fldsFrLnd(nf)%ungridded_ubound == 0) then - call field_setexport(exportState, trim(fldsFrLnd(nf)%stdname), lon, lat, nf=nf, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - do nind = 1,fldsFrLnd(nf)%ungridded_ubound - call field_setexport(exportState, trim(fldsFrLnd(nf)%stdname), lon, lat, nf=nf+nind-1, & - ungridded_index=nind, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - end if - end do - - end subroutine state_setexport - - !=============================================================================== - - subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) - - use shr_const_mod , only : pi=>shr_const_pi - - ! intput/otuput variables - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: fldname - real(r8) , intent(in) :: lon(:) - real(r8) , intent(in) :: lat(:) - integer , intent(in) :: nf - integer, optional , intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: i, ncomp - type(ESMF_Field) :: lfield - real(r8), pointer :: data1d(:) - real(r8), pointer :: data2d(:,:) - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ncomp = 2 - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do i = 1,size(data2d, dim=1) - data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - else if (gridToFieldMap == 2) then - do i = 1,size(data2d, dim=2) - data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - else - call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (fldname == 'Sl_lfrin') then - data1d(:) = 1._r8 - else - do i = 1,size(data1d) - data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - end if - - end subroutine field_setexport - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - !-------------------------------- - ! Finalize routine - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call dead_final_nuopc('lnd', logunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelFinalize - -end module lnd_comp_nuopc diff --git a/src/components/xcpl_comps/xocn/cime_config/buildlib b/src/components/xcpl_comps/xocn/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/xcpl_comps/xocn/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/xcpl_comps/xocn/cime_config/buildnml b/src/components/xcpl_comps/xocn/cime_config/buildnml deleted file mode 100755 index c6f9453766e..00000000000 --- a/src/components/xcpl_comps/xocn/cime_config/buildnml +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -""" -build data model library -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildnml import build_xcpl_nml, parse_input -from CIME.case import Case - -def buildnml(case, caseroot, compname): - if compname != "xocn": - raise AttributeError - build_xcpl_nml(case, caseroot, compname) - -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "xocn") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/xcpl_comps/xocn/mct/ocn_comp_mct.F90 b/src/components/xcpl_comps/xocn/mct/ocn_comp_mct.F90 deleted file mode 100644 index b51eddb35f5..00000000000 --- a/src/components/xcpl_comps/xocn/mct/ocn_comp_mct.F90 +++ /dev/null @@ -1,208 +0,0 @@ -module ocn_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dead_mct_mod , only: dead_init_mct, dead_run_mct, dead_final_mct - use seq_flds_mod , only: seq_flds_o2x_fields, seq_flds_x2o_fields - - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: ocn_init_mct - public :: ocn_run_mct - public :: ocn_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - real(r8) , pointer :: gbuf(:,:) ! model grid - integer(IN),parameter :: master_task=0 ! task number of master task - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine ocn_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !DESCRIPTION: initialize dead ocn model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local variables --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: nxg ! global dim i-direction - integer(IN) :: nyg ! global dim j-direction - integer(IN) :: phase ! initialization phase - integer(IN) :: ierr ! error code - logical :: ocn_present ! if true, component is present - logical :: ocn_prognostic ! if true, component is prognostic - logical :: ocnrof_prognostic - !------------------------------------------------------------------------------- - - ! Set cdata pointers to derived types (in coupler) - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, ocn_phase=phase) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - if (phase == 1) then - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('ocn_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Initialize xocn - !---------------------------------------------------------------------------- - - call dead_init_mct('ocn', Eclock, x2d, d2x, & - seq_flds_x2o_fields, seq_flds_o2x_fields, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, nxg, nyg) - - if (nxg == 0 .and. nyg == 0) then - ocn_present = .false. - ocn_prognostic = .false. - ocnrof_prognostic = .false. - else - ocn_present = .true. - ocn_prognostic = .true. - ocnrof_prognostic = .true. - end if - - call seq_infodata_PutData( infodata, dead_comps=.true., & - ocn_present=ocn_present, & - ocn_prognostic=ocn_prognostic, & - ocnrof_prognostic=ocnrof_prognostic, & - ocn_nx=nxg, ocn_ny=nyg) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logunit) - - end subroutine ocn_init_mct - - !=============================================================================== - subroutine ocn_run_mct(EClock, cdata, x2d, d2x) - - ! !DESCRIPTION: run method for dead ocn model - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(*), parameter :: subName = "(ocn_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call dead_run_mct('ocn', EClock, x2d, d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine ocn_run_mct - - !=============================================================================== - subroutine ocn_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - ! !DESCRIPTION: finalize method for dead model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- formats --- - character(*), parameter :: subName = "(ocn_final_mct) " - !------------------------------------------------------------------------------- - - call dead_final_mct('ocn', my_task, master_task, logunit) - - end subroutine ocn_final_mct - !=============================================================================== - -end module ocn_comp_mct diff --git a/src/components/xcpl_comps/xocn/nuopc/ocn_comp_nuopc.F90 b/src/components/xcpl_comps/xocn/nuopc/ocn_comp_nuopc.F90 deleted file mode 100644 index e9786868dba..00000000000 --- a/src/components/xcpl_comps/xocn/nuopc/ocn_comp_nuopc.F90 +++ /dev/null @@ -1,551 +0,0 @@ -module ocn_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for XOCN - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_sys_mod , only : shr_sys_abort - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dead_nuopc_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_index - use dead_nuopc_mod , only : dead_init_nuopc, dead_final_nuopc, dead_meshinit - use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type - use dead_nuopc_mod , only : ModelInitPhase, ModelSetRunClock - - implicit none - private ! except - - public :: SetServices - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CL) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - integer :: flds_scalar_index_nextsw_cday = 0._r8 - - integer :: fldsToOcn_num = 0 - integer :: fldsFrOcn_num = 0 - type (fld_list_type) :: fldsToOcn(fldsMax) - type (fld_list_type) :: fldsFrOcn(fldsMax) - integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost - - real(r8), pointer :: gbuf(:,:) ! model info - real(r8), pointer :: lat(:) - real(r8), pointer :: lon(:) - integer , allocatable :: gindex(:) - integer :: nxg ! global dim i-direction - integer :: nyg ! global dim j-direction - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "ocn_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - logical :: mastertask - integer :: dbug = 1 - character(*),parameter :: modName = "(xocn_comp_nuopc)" - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & - userRoutine=InitializeAdvertise, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & - userRoutine=InitializeRealize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine SetServices - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - character(CS) :: stdname - integer :: n - integer :: lsize ! local array size - integer :: shrlogunit ! original log unit - character(CL) :: cvalue - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! generate local mpi comm - !---------------------------------------------------------------------------- - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localpet=my_task, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - mastertask = my_task == master_task - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, my_task==master_task, logunit, shrlogunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize xocn - !---------------------------------------------------------------------------- - - call dead_init_nuopc('ocn', inst_suffix, logunit, lsize, gbuf, nxg, nyg) - - allocate(gindex(lsize)) - allocate(lon(lsize)) - allocate(lat(lsize)) - - gindex(:) = gbuf(:,dead_grid_index) - lat(:) = gbuf(:,dead_grid_lat) - lon(:) = gbuf(:,dead_grid_lon) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') - endif - - if (nxg /= 0 .and. nyg /= 0) then - - call fld_list_add(fldsFrOcn_num, fldsFrOcn, trim(flds_scalar_name)) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_omask" ) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_t" ) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_s" ) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_u" ) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_v" ) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_dhdx" ) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_dhdy" ) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "So_bldepth" ) - call fld_list_add(fldsFrOcn_num, fldsFrOcn, "Fioo_q" ) - - call fld_list_add(fldsToOcn_num, fldsToOcn, trim(flds_scalar_name)) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_rain" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_snow" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_lwdn" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_swndr" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_swvdr" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_swndf" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Faxa_swvdf" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_taux" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_tauy" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_sen" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_lat" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_lwup" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_evap" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Fioi_salt" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_rofl" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Foxx_rofi" ) - call fld_list_add(fldsToOcn_num, fldsToOcn, "Sa_pslv" ) - - do n = 1,fldsFrOcn_num - if(mastertask) write(logunit,*)'Advertising From Xocn ',trim(fldsFrOcn(n)%stdname) - call NUOPC_Advertise(exportState, standardName=fldsFrOcn(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - - do n = 1,fldsToOcn_num - if(mastertask) write(logunit,*)'Advertising To Xocn',trim(fldsToOcn(n)%stdname) - call NUOPC_Advertise(importState, standardName=fldsToOcn(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - character(ESMF_MAXSTR) :: convCIM, purpComp - type(ESMF_Mesh) :: Emesh - integer :: shrlogunit ! original log unit - integer :: n - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize: xocn) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! generate the mesh - !-------------------------------- - - call dead_meshinit(gcomp, nxg, nyg, gindex, lon, lat, Emesh, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call fld_list_realize( & - state=ExportState, & - fldlist=fldsFrOcn, & - numflds=fldsFrOcn_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':docnExport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call fld_list_realize( & - state=importState, & - fldList=fldsToOcn, & - numflds=fldsToOcn_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':docnImport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call state_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - -#ifdef USE_ESMF_METADATA - convCIM = "CIM" - purpComp = "Model Component Simulation Description" - call ESMF_AttributeAdd(comp, convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ShortName", "XOCN", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "LongName", "Ocean Dead Model", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "Description", & - "The dead models stand in as test model for active components." // & - "Coupling data is artificially generated ", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ReleaseDate", "2017", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ModelType", "Ocean", convention=convCIM, purpose=purpComp, rc=rc) -#endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! intput/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: exportState - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - call memcheck(subname, 3, mastertask) - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call state_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (my_task == master_task) then - call log_clock_advance(clock, 'OCN', logunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - endif - - call shr_file_setLogUnit (shrlogunit) - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine state_setexport(exportState, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: nf, nind - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - ! Start from index 2 in order to Skip the scalar field here - do nf = 2,fldsFrOcn_num - if (fldsFrOcn(nf)%ungridded_ubound == 0) then - call field_setexport(exportState, trim(fldsFrOcn(nf)%stdname), lon, lat, nf=nf, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - do nind = 1,fldsFrOcn(nf)%ungridded_ubound - call field_setexport(exportState, trim(fldsFrOcn(nf)%stdname), lon, lat, nf=nf, & - ungridded_index=nind, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - end if - end do - - end subroutine state_setexport - - !=============================================================================== - - subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) - - use shr_const_mod , only : pi=>shr_const_pi - - ! intput/otuput variables - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: fldname - real(r8) , intent(in) :: lon(:) - real(r8) , intent(in) :: lat(:) - integer , intent(in) :: nf - integer, optional , intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: i, ncomp - type(ESMF_Field) :: lfield - real(r8), pointer :: data1d(:) - real(r8), pointer :: data2d(:,:) - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ncomp = 4 - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do i = 1,size(data2d, dim=1) - data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - else if (gridToFieldMap == 2) then - do i = 1,size(data2d, dim=2) - data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - else - call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - do i = 1,size(data1d) - data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - - if (fldname == 'So_omask') then - do i = 1,size(data1d) - !data1d(i) = float(nint(min(1.0_R8,max(0.0_R8,data1d(i))))) - data1d(i) = 0._r8 - end do - end if - - end subroutine field_setexport - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - !-------------------------------- - ! Finalize routine - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call dead_final_nuopc('ocn', logunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelFinalize - - !=============================================================================== - -end module ocn_comp_nuopc diff --git a/src/components/xcpl_comps/xrof/cime_config/buildlib b/src/components/xcpl_comps/xrof/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/xcpl_comps/xrof/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/xcpl_comps/xrof/cime_config/buildnml b/src/components/xcpl_comps/xrof/cime_config/buildnml deleted file mode 100755 index 182cf39efa5..00000000000 --- a/src/components/xcpl_comps/xrof/cime_config/buildnml +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -""" -build data model library -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildnml import build_xcpl_nml, parse_input -from CIME.case import Case - -def buildnml(case, caseroot, compname): - if compname != "xrof": - raise AttributeError - build_xcpl_nml(case, caseroot, compname) - -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "xrof") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/xcpl_comps/xrof/mct/rof_comp_mct.F90 b/src/components/xcpl_comps/xrof/mct/rof_comp_mct.F90 deleted file mode 100644 index f3eecbb534e..00000000000 --- a/src/components/xcpl_comps/xrof/mct/rof_comp_mct.F90 +++ /dev/null @@ -1,213 +0,0 @@ -module rof_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dead_mct_mod , only: dead_init_mct, dead_run_mct, dead_final_mct - use seq_flds_mod , only: seq_flds_r2x_fields, seq_flds_x2r_fields - - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: rof_init_mct - public :: rof_run_mct - public :: rof_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - character(CS) :: myModelName = 'rof' ! user defined model name - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - real(r8) , pointer :: gbuf(:,:) ! model grid - integer(IN),parameter :: master_task=0 ! task number of master task - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine rof_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !DESCRIPTION: initialize dead rof model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local variables --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: nxg ! global dim i-direction - integer(IN) :: nyg ! global dim j-direction - integer(IN) :: phase ! initialization phase - integer(IN) :: ierr ! error code - logical :: rof_present ! if true, component is present - logical :: rof_prognostic ! if true, component is prognostic - logical :: rofice_present - logical :: flood_present - !------------------------------------------------------------------------------- - - ! Set cdata pointers to derived types (in coupler) - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - ! Obtain infodata variables - call seq_infodata_getData(infodata, rof_phase=phase) - - ! Determine instance information - inst_name = seq_comm_name(compid) - inst_index = seq_comm_inst(compid) - inst_suffix = seq_comm_suffix(compid) - - if (phase == 1) then - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('rof_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Initialize xrof - !---------------------------------------------------------------------------- - - call dead_init_mct('rof', Eclock, x2d, d2x, & - seq_flds_x2r_fields, seq_flds_r2x_fields, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, nxg, nyg) - - if (nxg == 0 .and. nyg == 0) then - rof_present = .false. - rof_prognostic = .false. - rofice_present = .false. - flood_present = .false. - else - rof_present = .true. - rof_prognostic = .true. - rofice_present = .false. - flood_present = .true. - end if - - call seq_infodata_PutData( infodata, dead_comps=.true., & - rof_present=rof_present, & - rof_prognostic=rof_prognostic, & - rofice_present=rofice_present, & - flood_present=flood_present, & - rof_nx=nxg, rof_ny=nyg) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logunit) - - end subroutine rof_init_mct - - !=============================================================================== - subroutine rof_run_mct(EClock, cdata, x2d, d2x) - - ! !DESCRIPTION: run method for dead rof model - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(*), parameter :: subName = "(rof_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call dead_run_mct('rof', EClock, x2d, d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine rof_run_mct - - !=============================================================================== - subroutine rof_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - ! !DESCRIPTION: finalize method for dead model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- formats --- - character(*), parameter :: subName = "(rof_final_mct) " - !------------------------------------------------------------------------------- - - call dead_final_mct('rof', my_task, master_task, logunit) - - end subroutine rof_final_mct - !=============================================================================== - -end module rof_comp_mct diff --git a/src/components/xcpl_comps/xrof/nuopc/rof_comp_nuopc.F90 b/src/components/xcpl_comps/xrof/nuopc/rof_comp_nuopc.F90 deleted file mode 100644 index 18713967f18..00000000000 --- a/src/components/xcpl_comps/xrof/nuopc/rof_comp_nuopc.F90 +++ /dev/null @@ -1,525 +0,0 @@ -module rof_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for XROF - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_sys_mod , only : shr_sys_abort - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dead_nuopc_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_index - use dead_nuopc_mod , only : dead_init_nuopc, dead_final_nuopc, dead_meshinit - use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type - use dead_nuopc_mod , only : ModelInitPhase, ModelSetRunClock - - implicit none - private ! except - - public :: SetServices - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CL) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - integer :: flds_scalar_index_nextsw_cday = 0 - - integer :: fldsToRof_num = 0 - integer :: fldsFrRof_num = 0 - type (fld_list_type) :: fldsToRof(fldsMax) - type (fld_list_type) :: fldsFrRof(fldsMax) - integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost - - real(r8), pointer :: gbuf(:,:) ! model info - real(r8), pointer :: lat(:) - real(r8), pointer :: lon(:) - integer , allocatable :: gindex(:) - integer :: nxg ! global dim i-direction - integer :: nyg ! global dim j-direction - integer :: my_task ! my task in mpi - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - integer ,parameter :: master_task=0 ! task number of master task - logical :: mastertask - integer :: dbug = 1 - character(*),parameter :: modName = "(xrof_comp_nuopc)" - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & - userRoutine=InitializeAdvertise, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & - userRoutine=InitializeRealize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine SetServices - -!=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - character(CS) :: stdname - integer :: n - integer :: lsize ! local array size - integer :: shrlogunit ! original log unit - character(CL) :: cvalue - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localpet=my_task, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - mastertask = my_task == master_task - - !---------------------------------------------------------------------------- - ! determine instance information - !---------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize xrof - !---------------------------------------------------------------------------- - - call dead_init_nuopc('rof', inst_suffix, logunit, lsize, gbuf, nxg, nyg) - - allocate(gindex(lsize)) - allocate(lon(lsize)) - allocate(lat(lsize)) - - gindex(:) = gbuf(:,dead_grid_index) - lat(:) = gbuf(:,dead_grid_lat) - lon(:) = gbuf(:,dead_grid_lon) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') - endif - - if (nxg /= 0 .and. nyg /= 0) then - - call fld_list_add(fldsFrRof_num, fldsFrRof, trim(flds_scalar_name)) - call fld_list_add(fldsFrRof_num, fldsFrRof, 'Forr_rofl') - call fld_list_add(fldsFrRof_num, fldsFrRof, 'Forr_rofi') - call fld_list_add(fldsFrRof_num, fldsFrRof, 'Flrr_flood') - call fld_list_add(fldsFrRof_num, fldsFrRof, 'Flrr_volr') - call fld_list_add(fldsFrRof_num, fldsFrRof, 'Flrr_volrmch') - - call fld_list_add(fldsToRof_num, fldsToRof, trim(flds_scalar_name)) - call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofsur') - call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofgwl') - call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofsub') - call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofdto') - call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_rofi') - call fld_list_add(fldsToRof_num, fldsToRof, 'Flrl_irrig') - - do n = 1,fldsFrRof_num - if(mastertask) write(logunit,*)'Advertising From Xrof ',trim(fldsFrRof(n)%stdname) - call NUOPC_Advertise(exportState, standardName=fldsFrRof(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - - do n = 1,fldsToRof_num - if(mastertask) write(logunit,*)'Advertising To Xrof',trim(fldsToRof(n)%stdname) - call NUOPC_Advertise(importState, standardName=fldsToRof(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - end if - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! input/output arguments - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - character(ESMF_MAXSTR) :: convCIM, purpComp - type(ESMF_Mesh) :: Emesh - integer :: n - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) - - !-------------------------------- - ! generate the mesh - !-------------------------------- - - call dead_meshinit(gcomp, nxg, nyg, gindex, lon, lat, Emesh, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call fld_list_realize( & - state=ExportState, & - fldlist=fldsFrRof, & - numflds=fldsFrRof_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':drofExport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call fld_list_realize( & - state=importState, & - fldList=fldsToRof, & - numflds=fldsToRof_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':drofImport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - -#ifdef USE_ESMF_METADATA - convCIM = "CIM" - purpComp = "Model Component Simulation Description" - call ESMF_AttributeAdd(comp, convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ShortName", "XROF", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "LongName", "River Dead Model", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "Description", & - "The dead models stand in as test model for active components." // & - "Coupling data is artificially generated ", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ReleaseDate", "2017", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ModelType", "River", convention=convCIM, purpose=purpComp, rc=rc) -#endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: exportState - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - if (dbug > 5) then - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - end if - call memcheck(subname, 3, mastertask) - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetExport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (mastertask) then - call log_clock_advance(clock, 'XROF', logunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) then - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - end if - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine state_setexport(exportState, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: exportState - integer, intent(out) :: rc - - ! local variables - integer :: nf, nind - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - ! Start from index 2 in order to skip the scalar field - do nf = 2,fldsFrRof_num - if (fldsFrRof(nf)%ungridded_ubound == 0) then - call field_setexport(exportState, trim(fldsFrRof(nf)%stdname), lon, lat, nf=nf, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - do nind = 1,fldsFrRof(nf)%ungridded_ubound - call field_setexport(exportState, trim(fldsFrRof(nf)%stdname), lon, lat, nf=nf+nind-1, & - ungridded_index=nind, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - end if - end do - - end subroutine state_setexport - - !=============================================================================== - - subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) - - use shr_const_mod , only : pi=>shr_const_pi - - ! intput/otuput variables - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: fldname - real(r8) , intent(in) :: lon(:) - real(r8) , intent(in) :: lat(:) - integer , intent(in) :: nf - integer, optional , intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: i, ncomp - type(ESMF_Field) :: lfield - real(r8), pointer :: data1d(:) - real(r8), pointer :: data2d(:,:) - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ncomp = 6 - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do i = 1,size(data2d, dim=1) - data2d(i,ungridded_index) = (nf+1) * 1.0_r8 - end do - else if (gridToFieldMap == 2) then - do i = 1,size(data2d, dim=2) - data2d(ungridded_index,i) = (nf+1) * 1.0_r8 - end do - end if - else - call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - do i = 1,size(data1d) - data1d(i) = (nf+1) * 1.0_r8 - end do - end if - - end subroutine field_setexport - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - !-------------------------------- - ! Finalize routine - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call dead_final_nuopc('rof', logunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelFinalize - -end module rof_comp_nuopc diff --git a/src/components/xcpl_comps/xshare/mct/dead_data_mod.F90 b/src/components/xcpl_comps/xshare/mct/dead_data_mod.F90 deleted file mode 100644 index 5de917f895c..00000000000 --- a/src/components/xcpl_comps/xshare/mct/dead_data_mod.F90 +++ /dev/null @@ -1,26 +0,0 @@ -MODULE dead_data_mod - - ! !DESCRIPTION: - ! Declares data that's shared across the init, run, and finalize methods - - ! !USES: - implicit none - - ! !PUBLIC TYPES: - ! no public types - - ! !PUBLIC MEMBER FUNCTIONS: - ! no public functions - - ! !PUBLIC DATA MEMBERS: - integer :: dead_grid_lat = 1 ! lat from component - integer :: dead_grid_lon = 2 ! lon from component - integer :: dead_grid_area = 3 ! area from component - integer :: dead_grid_mask = 4 ! mask, 0 = inactive cell - integer :: dead_grid_frac = 5 ! fractional area coverage - integer :: dead_grid_aream = 6 ! area from mapping file - integer :: dead_grid_index = 7 ! global index - integer :: dead_grid_pid = 8 ! proc id number - integer :: dead_grid_total = 8 - -END MODULE dead_data_mod diff --git a/src/components/xcpl_comps/xshare/mct/dead_mct_mod.F90 b/src/components/xcpl_comps/xshare/mct/dead_mct_mod.F90 deleted file mode 100644 index a2c0c007b6f..00000000000 --- a/src/components/xcpl_comps/xshare/mct/dead_mct_mod.F90 +++ /dev/null @@ -1,353 +0,0 @@ -module dead_mct_mod - - use esmf , only : esmf_clock - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_sys_mod , only : shr_sys_abort, shr_sys_flush - use shr_const_mod , only : shr_const_pi - use mct_mod , only : mct_gsmap, mct_ggrid, mct_avect, mct_ggrid_init, mct_gsmap_lsize, mct_ggrid_lsize - use mct_mod , only : mct_avect_lsize, MCT_AVECT_NRATTR, mct_avect_indexra,mct_avect_zero - use mct_mod , only : mct_ggrid_importiattr, mct_ggrid_importrattr, mct_gsmap_init, mct_aVect_init - use mct_mod , only : mct_gsmap_orderedpoints - use dead_data_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_area, dead_grid_mask, dead_grid_frac, dead_grid_index - use dead_mod , only : dead_setnewgrid, dead_read_inparms - use seq_flds_mod , only : seq_flds_dom_coord, seq_flds_dom_other - use seq_timemgr_mod , only : seq_timemgr_EClockGetData - - implicit none - private - save - - public :: dead_init_mct, dead_run_mct, dead_final_mct - private :: dead_domain_mct - -!=============================================================================== -contains -!=============================================================================== - - subroutine dead_init_mct(model, Eclock, x2d, d2x, & - flds_x2d, flds_d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, nxg, nyg) - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: model - type(ESMF_Clock) , intent(inout) :: EClock - type(mct_aVect) , intent(inout) :: x2d ! driver -> dead - type(mct_aVect) , intent(inout) :: d2x ! dead -> driver - character(len=*) , intent(in) :: flds_x2d - character(len=*) , intent(in) :: flds_d2x - type(mct_gsMap) , pointer :: gsMap ! model global sep map (output) - type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) - real(r8) , pointer :: gbuf(:,:) ! model grid (output) - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: inst_index ! instance number - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - character(len=*) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - integer(IN) , intent(out) :: nxg ! global dim i-direction - integer(IN) , intent(out) :: nyg ! global dim j-direction - - !--- local variables --- - integer(IN) :: ierr ! error code - integer(IN) :: local_comm ! local communicator - integer(IN) :: mype ! pe info - integer(IN) :: totpe ! total number of pes - integer(IN), allocatable :: gindex(:) ! global index - integer(IN) :: lsize - integer(IN) :: nproc_x - integer(IN) :: seg_len - integer(IN) :: decomp_type - logical :: flood=.false. ! rof flood flag - - !--- formats --- - character(*), parameter :: F00 = "('(',a,'_init_mct) ',8a)" - character(*), parameter :: F01 = "('(',a,'_init_mct) ',a,4i8)" - character(*), parameter :: F02 = "('(',a,'_init_mct) ',a,4es13.6)" - character(*), parameter :: F90 = "('(',a,'_init_mct) ',73('='))" - character(*), parameter :: F91 = "('(',a,'_init_mct) ',73('-'))" - character(*), parameter :: subName = "(dead_init_mct) " - !------------------------------------------------------------------------------- - - ! Determine communicator groups and sizes - - local_comm = mpicom - call MPI_COMM_RANK(local_comm,mype ,ierr) - call MPI_COMM_SIZE(local_comm,totpe,ierr) - - ! Read input parms - - call dead_read_inparms(model, mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, & - nxg, nyg, decomp_type, nproc_x, seg_len, flood) - - ! Initialize grid - - call dead_setNewGrid(decomp_type, nxg, nyg, totpe, mype, logunit, & - lsize, gbuf, seg_len, nproc_x) - - ! Initialize MCT global seg map - - allocate(gindex(lsize)) - gindex(:) = gbuf(:,dead_grid_index) - call mct_gsMap_init( gsMap, gindex, mpicom, compid, lsize, nxg*nyg ) - deallocate(gindex) - - ! Initialize MCT domain - - call dead_domain_mct(mpicom, gbuf, gsMap, logunit, ggrid) - - ! Initialize MCT attribute vectors - - call mct_aVect_init(d2x, rList=flds_d2x, lsize=lsize) - call mct_aVect_zero(d2x) - - call mct_aVect_init(x2d, rList=flds_x2d, lsize=lsize) - call mct_aVect_zero(x2d) - - end subroutine dead_init_mct - - !=============================================================================== - subroutine dead_run_mct(model, EClock, x2d, d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, logunit) - - implicit none - - ! !DESCRIPTION: run method for dead model - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: model - type(ESMF_Clock) , intent(inout) :: EClock - type(mct_aVect) , intent(inout) :: x2d ! driver -> dead - type(mct_aVect) , intent(inout) :: d2x ! dead -> driver - type(mct_gsMap) , pointer :: gsMap ! model global sep map (output) - type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) - real(r8) , pointer :: gbuf(:,:) ! model grid - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: compid ! mct comp id - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - - !--- local --- - integer(IN) :: CurrentYMD ! model date - integer(IN) :: CurrentTOD ! model sec into model date - integer(IN) :: n ! index - integer(IN) :: nf ! fields loop index - integer(IN) :: ki ! index of ifrac - integer(IN) :: lsize ! size of AttrVect - real(R8) :: lat ! latitude - real(R8) :: lon ! longitude - integer :: nflds_d2x, nflds_x2d - integer :: ncomp - character(*), parameter :: F04 = "('(',a,'_run_mct) ',2a,2i8,'s')" - character(*), parameter :: subName = "(dead_run_mct) " - !------------------------------------------------------------------------------- - - ! PACK (currently no unpacking) - - selectcase(model) - case('atm') - ncomp = 1 - case('lnd') - ncomp = 2 - case('ice') - ncomp = 3 - case('ocn') - ncomp = 4 - case('glc') - ncomp = 5 - case('rof') - ncomp = 6 - case('wav') - ncomp = 7 - end select - - lsize = mct_avect_lsize(x2d) - nflds_d2x = mct_avect_nRattr(d2x) - nflds_x2d = mct_avect_nRattr(x2d) - - if (model.eq.'rof') then - - do nf=1,nflds_d2x - do n=1,lsize - d2x%rAttr(nf,n) = (nf+1) * 1.0_r8 - enddo - enddo - - else if (model.eq.'glc') then - - do nf=1,nflds_d2x - do n=1,lsize - lon = gbuf(n,dead_grid_lon) - lat = gbuf(n,dead_grid_lat) - d2x%rAttr(nf,n) = (nf*100) & - * cos (SHR_CONST_PI*lat/180.0_R8) & - * cos (SHR_CONST_PI*lat/180.0_R8) & - * sin (SHR_CONST_PI*lon/180.0_R8) & - * sin (SHR_CONST_PI*lon/180.0_R8) & - + (ncomp*10.0_R8) - enddo - enddo - - else - - do nf=1,nflds_d2x - do n=1,lsize - lon = gbuf(n,dead_grid_lon) - lat = gbuf(n,dead_grid_lat) - d2x%rAttr(nf,n) = (nf*100) & - * cos (SHR_CONST_PI*lat/180.0_R8) & - * sin((SHR_CONST_PI*lon/180.0_R8) & - - (ncomp-1)*(SHR_CONST_PI/3.0_R8) ) & - + (ncomp*10.0_R8) - enddo - enddo - - endif - - selectcase(model) - case('ice') - - ki = mct_aVect_indexRA(d2x,"Si_ifrac",perrWith=subname) - d2x%rAttr(ki,:) = min(1.0_R8,max(0.0_R8,d2x%rAttr(ki,:))) - - case('glc') - - ki = mct_aVect_indexRA(d2x,"Sg_icemask",perrWith=subname) - d2x%rAttr(ki,:) = 1.0_R8 - - ki = mct_aVect_indexRA(d2x,"Sg_icemask_coupled_fluxes",perrWith=subname) - d2x%rAttr(ki,:) = 1.0_R8 - - ki = mct_aVect_indexRA(d2x,"Sg_ice_covered",perrWith=subname) - d2x%rAttr(ki,:) = 1.0_R8 - - end select - - ! log output for model date - - if (my_task == master_task) then - call seq_timemgr_EClockGetData(EClock,curr_ymd=CurrentYMD, curr_tod=CurrentTOD) - write(logunit,F04) model,trim(model),': model date ', CurrentYMD,CurrentTOD - call shr_sys_flush(logunit) - end if - - end subroutine dead_run_mct - - !=============================================================================== - subroutine dead_final_mct(model, my_task, master_task, logunit) - - ! !DESCRIPTION: finalize method for datm model - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: model - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer(IN) , intent(in) :: logunit ! logging unit number - - !--- formats --- - character(*), parameter :: F00 = "('(dead_comp_final) ',8a)" - character(*), parameter :: F91 = "('(dead_comp_final) ',73('-'))" - character(*), parameter :: subName = "(dead_comp_final) " - !------------------------------------------------------------------------------- - - if (my_task == master_task) then - write(logunit,F91) - write(logunit,F00) trim(model),': end of main integration loop' - write(logunit,F91) - end if - - end subroutine dead_final_mct - - !=============================================================================== - subroutine dead_domain_mct( mpicom, gbuf, gsMap, logunit, domain ) - - !------------------------------------------------------------------- - !---arguments--- - integer(IN) , intent(in) :: mpicom - real(R8) , intent(in) :: gbuf(:,:) - type(mct_gsMap), intent(in) :: gsMap - integer(IN) , intent(in) :: logunit - type(mct_ggrid), intent(out) :: domain - - !---local variables--- - integer(IN) :: my_task ! mpi task within communicator - integer(IN) :: lsize ! temporary - integer(IN) :: n ,j,i ! indices - integer(IN) :: ier ! error status - real(R8), pointer :: data(:) ! temporary - integer(IN), pointer :: idata(:) ! temporary - !------------------------------------------------------------------- - ! - ! Initialize mct dead domain - ! - call mct_gGrid_init( GGrid=domain, CoordChars=trim(seq_flds_dom_coord), & - OtherChars=trim(seq_flds_dom_other), & - lsize=mct_gsMap_lsize(gsMap, mpicom) ) - call mct_aVect_zero(domain%data) - ! - ! Allocate memory - ! - lsize = mct_gGrid_lsize(domain) - if (size(gbuf,dim=1) /= lsize) then - call shr_sys_abort('mct_dead_domain size error') - endif - allocate(data(lsize)) - allocate(idata(lsize)) - ! - ! Initialize attribute vector with special value - ! - call mpi_comm_rank(mpicom, my_task, ier) - call mct_gsMap_orderedPoints(gsMap, my_task, idata) - call mct_gGrid_importIAttr(domain,'GlobGridNum',idata,lsize) - ! - call mct_aVect_zero(domain%data) - data(:) = -9999.0_R8 ! generic special value - call mct_gGrid_importRAttr(domain,"lat" ,data,lsize) - call mct_gGrid_importRAttr(domain,"lon" ,data,lsize) - call mct_gGrid_importRAttr(domain,"area",data,lsize) - call mct_gGrid_importRAttr(domain,"frac",data,lsize) - - data(:) = 0.0_R8 ! generic special value - call mct_gGrid_importRAttr(domain,"mask" ,data,lsize) - call mct_gGrid_importRAttr(domain,"aream",data,lsize) - ! - ! Fill in correct values for domain components - ! - do n = 1,lsize - data(n) = gbuf(n,dead_grid_lat) - enddo - call mct_gGrid_importRAttr(domain,"lat",data,lsize) - - do n = 1,lsize - data(n) = gbuf(n,dead_grid_lon) - enddo - call mct_gGrid_importRAttr(domain,"lon",data,lsize) - - do n = 1,lsize - data(n) = gbuf(n,dead_grid_area) - enddo - call mct_gGrid_importRAttr(domain,"area",data,lsize) - call mct_gGrid_importRAttr(domain,"aream",data,lsize) - - do n = 1,lsize - data(n) = gbuf(n,dead_grid_mask) - enddo - call mct_gGrid_importRAttr(domain,"mask" ,data,lsize) - - do n = 1,lsize - data(n) = gbuf(n,dead_grid_frac) - enddo - call mct_gGrid_importRAttr(domain,"frac" ,data,lsize) - - deallocate(data) - deallocate(idata) - - end subroutine dead_domain_mct - !=============================================================================== - -end module dead_mct_mod diff --git a/src/components/xcpl_comps/xshare/mct/dead_mod.F90 b/src/components/xcpl_comps/xshare/mct/dead_mod.F90 deleted file mode 100644 index a0724fd2783..00000000000 --- a/src/components/xcpl_comps/xshare/mct/dead_mod.F90 +++ /dev/null @@ -1,338 +0,0 @@ -module dead_mod - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_const_mod , only : shr_const_pi, shr_const_rearth - use shr_file_mod , only : shr_file_getunit, shr_file_freeunit - use shr_mpi_mod , only : shr_mpi_bcast - use shr_sys_mod , only : shr_sys_abort, shr_sys_flush - use dead_data_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_area, dead_grid_mask, dead_grid_frac, dead_grid_index - use dead_data_mod , only : dead_grid_total - - implicit none - private - - public :: dead_setNewGrid - public :: dead_read_inparms - -!=============================================================================== -contains -!=============================================================================== - - subroutine dead_read_inparms(model, mpicom, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, & - nxg, nyg, decomp_type, nproc_x, seg_len, flood) - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*) , intent(in) :: model - integer(IN) , intent(in) :: mpicom ! mpi communicator - integer(IN) , intent(in) :: my_task ! my task in mpi communicator mpicom - integer(IN) , intent(in) :: master_task ! task number of master task - integer , intent(in) :: inst_index ! number of current instance (ie. 1) - character(len=16) , intent(in) :: inst_suffix ! char string associated with instance - character(len=16) , intent(in) :: inst_name ! fullname of current instance (ie. "lnd_0001") - integer(IN) , intent(in) :: logunit ! logging unit number - integer(IN) , intent(out) :: nproc_x - integer(IN) , intent(out) :: seg_len - integer(IN) , intent(out) :: nxg ! global dim i-direction - integer(IN) , intent(out) :: nyg ! global dim j-direction - integer(IN) , intent(out) :: decomp_type ! decomposition type - logical , intent(out) :: flood ! rof flood flag - - !--- local variables --- - character(CL) :: fileName ! generic file name - integer(IN) :: nunit ! unit number - integer(IN) :: ierr ! error code - integer(IN) :: unitn ! Unit for namelist file - - !--- formats --- - character(*), parameter :: F00 = "('(dead_read_inparms) ',8a)" - character(*), parameter :: F01 = "('(dead_read_inparms) ',a,a,4i8)" - character(*), parameter :: F02 = "('(dead_read_inparms) ',a,L2)" - character(*), parameter :: F03 = "('(dead_read_inparms) ',a,a,i8,a)" - character(*), parameter :: subName = "(dead_read_inpamrs) " - !------------------------------------------------------------------------------- - - ! read the input parms (used to configure model) - nxg = -9999 - nyg = -9999 - nproc_x = -9999 - seg_len = -9999 - decomp_type = -9999 - - if (my_task == master_task) then - unitn = shr_file_getUnit() - open(unitn, file='x'//model//'_in'//trim(inst_suffix), status='old' ) - read(unitn,*) nxg - read(unitn,*) nyg - read(unitn,*) decomp_type - read(unitn,*) nproc_x - read(unitn,*) seg_len - if (model.eq.'rof') then - read(unitn,*) flood - end if - close (unitn) - call shr_file_freeunit(unitn) - endif - - call shr_mpi_bcast(nxg , mpicom,'x'//model//' nxg') - call shr_mpi_bcast(nyg , mpicom,'x'//model//' nyg') - call shr_mpi_bcast(decomp_type, mpicom,'x'//model//' decomp_type') - call shr_mpi_bcast(nproc_x , mpicom,'x'//model//' nproc_x') - call shr_mpi_bcast(seg_len , mpicom,'x'//model//' seg_len') - if (model.eq.'rof') then - call shr_mpi_bcast(flood , mpicom,'xrof flood') - end if - - if (my_task == master_task) then - write(logunit,*)' Read in X'//model//' input from file= x'//model//'_in' - write(logunit,F00) model - write(logunit,F00) model,' Model : ',model - write(logunit,F01) model,' NGX : ',nxg - write(logunit,F01) model,' NGY : ',nyg - write(logunit,F01) model,' Decomposition : ',decomp_type - write(logunit,F03) model,' Num pes in X : ',nproc_x,' (type 3 only)' - write(logunit,F03) model,' Segment Length : ',seg_len,' (type 11 only)' - write(logunit,F01) model,' inst_index : ',inst_index - write(logunit,F00) model,' inst_name : ',trim(inst_name) - write(logunit,F00) model,' inst_suffix : ',trim(inst_suffix) - if (model.eq.'rof') then - write(logunit,F02) ' Flood mode : ',flood - endif - write(logunit,F00) model - call shr_sys_flush(logunit) - end if - end subroutine dead_read_inparms - - !=============================================================================== - subroutine dead_setNewGrid(decomp_type, nxg, nyg, totpe, mype, logunit, lsize, & - gbuf, seg_len, nproc_x) - - implicit none - - ! !DESCRIPTION: - ! This sets up some defaults. The user may want to overwrite some - ! of these fields in the main program after initialization in complete. - - ! !INPUT/OUTPUT PARAMETERS: - - integer(IN) ,intent(in) :: decomp_type ! - integer(IN) ,intent(in) :: nxg,nyg ! global grid sizes - integer(IN) ,intent(in) :: totpe ! total number of pes - integer(IN) ,intent(in) :: mype ! local pe number - integer(IN) ,intent(in) :: logunit ! output logunit - integer(IN) ,intent(out) :: lsize ! local grid sizes - real(R8) ,pointer :: gbuf(:,:) ! output data - integer(IN) ,intent(in),optional :: seg_len ! seg len decomp setting - integer(IN) ,intent(in),optional :: nproc_x ! 2d decomp setting - - !--- local --- - integer(IN) :: ierr ! error code - logical :: found - integer(IN) :: i,j,ig,jg - integer(IN) :: n,ng,is,ie,js,je,nx,ny ! indices - integer(IN) :: npesx,npesy,mypex,mypey,nxp,nyp - real (R8) :: hscore,bscore - real (R8) :: dx,dy,deg2rad,ys,yc,yn,area,re - integer(IN),allocatable :: gindex(:) - - !--- formats --- - character(*), parameter :: F00 = "('(dead_setNewGrid) ',8a)" - character(*), parameter :: F01 = "('(dead_setNewGrid) ',a,4i8)" - character(*), parameter :: F02 = "('(dead_setNewGrid) ',a,4es13.6)" - character(*), parameter :: subName = "(dead_setNewGrid) " - !------------------------------------------------------------------------------- - - if (decomp_type == 1 .or. & - decomp_type == 2 .or. & - decomp_type == 3 .or. & - decomp_type == 4 .or. & - decomp_type == 11) then - ! valid - else - !------------------------------------------------------------------------- - ! invalid decomposition type - !------------------------------------------------------------------------- - if (mype == 0) then - write(logunit,F01) 'ERROR: invalid decomp_type = ',decomp_type - end if - call shr_sys_abort(subName//'invalid decomp_type') - endif - - if (nxg*nyg == 0) then - lsize = 0 - allocate(gbuf(lsize,dead_grid_total)) - ! gbuf = -888.0_R8 - if (mype == 0) then - write(logunit,*) subname,' grid size is zero, lsize = ',lsize - end if - return - endif - - found = .false. - - if (decomp_type == 1) then ! 1d decomp by lat - npesx = 1 - npesy = totpe - found = .true. - elseif (decomp_type == 2) then ! 1d decomp by lon - npesx = totpe - npesy = 1 - found = .true. - elseif (decomp_type == 3) then ! 2d decomp - if (present(nproc_x)) then - if ( nproc_x > 0 ) then - npesx=nproc_x - npesy=totpe/npesx - if ( npesx*npesy /= totpe) then - write(logunit,F00) 'ERROR: uneven decomposition' - call shr_sys_abort(subName//'uneven decomp') - end if - found = .true. - endif - endif - if (.not.found) then ! narrow blocks - do nx = 1,totpe - ny = totpe/nx - if (nx*ny == totpe) then - npesx = nx - npesy = ny - found = .true. - endif - enddo - endif - elseif (decomp_type == 4) then ! 2d evenly divisible square block decomp - hscore = nxg*nyg - do nx = 1,totpe - ny = totpe/nx - if (nx*ny == totpe .and. mod(nxg,nx) == 0 .and. mod(nyg,ny) == 0) then - bscore = ((nxg*ny*1.0_r8) / (nyg*nx*1.0_r8)) - 1.0_r8 - bscore = bscore * bscore - if (bscore < hscore .or. .not.found) then - hscore = bscore - npesx = nx - npesy = ny - found = .true. - endif - endif - enddo - endif - - if (found) then - nx = nxg/npesx - mypex = mod(mype,npesx) - mypey = mype/npesx - is = (mypex ) * (nx) + 1 - ie = (mypex + 1) * (nx) - - ny = nyg/npesy - js = (mypey ) * (ny) + 1 - je = (mypey + 1) * (ny) - - nxp = nxg - (nx*npesx) ! extra lons not accounted for yet - nyp = nyg - (ny*npesy) ! extra lats not accounted for yet - - is = is + min(mypex,nxp) ! add them to first few pes and shift everything - ie = ie + min(mypex+1,nxp) - js = js + min(mypey,nyp) ! add them to first few pes and shift everything - je = je + min(mypey+1,nyp) - - lsize = (ie - is + 1) * (je - js + 1) - - allocate(gindex(lsize)) - n = 0 - do j = js,je - do i = is,ie - n = n + 1 - gindex(n) = (j-1)*nxg + i - enddo - enddo - endif - - if (.not.found) then - !------------------------------------------------------------------------- - ! type 11 general segment decomp - !------------------------------------------------------------------------- - nx = nxg*nyg / (totpe*13) + 1 ! 13 segments per pe (arbitrary) - ! nx override with seg_len - if (present(seg_len)) then - if (seg_len > 0) nx = seg_len - endif - - n = 0 - i = 0 - lsize = 0 - do while (n < nxg*nyg) - ny = min(nx,nxg*nyg-n) - do j = 1,ny - n = n + 1 - if (mype == mod(i,totpe)) then - lsize = lsize + 1 - endif - enddo - i = i + 1 - enddo - - allocate(gindex(lsize)) - - n = 0 - i = 0 - lsize = 0 - do while (n < nxg*nyg) - ny = min(nx,nxg*nyg-n) - do j = 1,ny - n = n + 1 - if (mype == mod(i,totpe)) then - lsize = lsize + 1 - gindex(lsize) = n - endif - enddo - i = i + 1 - enddo - - if (mype == 0) then - write(logunit,*) 'dead_setNewGrid decomp seg ',mype,lsize,nx - end if - - found = .true. - - endif - - if ( .not.found ) then - write(logunit,F01) 'ERROR: with decomp nxg,nyg,totpe=',nxg,nyg,totpe - call shr_sys_abort(subName//'decomp') - endif - - deg2rad = shr_const_pi / 180.0_R8 - re = shr_const_rearth - - allocate(gbuf(lsize,dead_grid_total)) - gbuf = -888.0_R8 - if (mype == 0) then - write(logunit,*) subname,' Decomp is ',decomp_type,' lsize = ',lsize - end if - - n=0 - dx = 360.0_R8/nxg * deg2rad - do n = 1,lsize - ig = mod((gindex(n)-1),nxg) + 1 - jg = (gindex(n)-1)/nxg + 1 - - ys = -90.0_R8 + (jg-1.0_R8)*180.0_R8/(nyg) - yc = -90.0_R8 + (jg-0.5_R8)*180.0_R8/(nyg) - yn = -90.0_R8 + (jg-0.0_R8)*180.0_R8/(nyg) - dy = sin(yn*deg2rad) - sin(ys*deg2rad) - area = dx*dy*re*re - - gbuf(n,dead_grid_lon ) = (ig-1.0_R8)*360.0_R8/(nxg) - gbuf(n,dead_grid_lat ) = yc - gbuf(n,dead_grid_index) = gindex(n) - gbuf(n,dead_grid_area ) = area - gbuf(n,dead_grid_mask ) = 1 - gbuf(n,dead_grid_frac ) = 1.0_R8 - enddo - - deallocate(gindex) - - end subroutine dead_setNewGrid - -end module dead_mod diff --git a/src/components/xcpl_comps/xshare/nuopc/dead_nuopc_mod.F90 b/src/components/xcpl_comps/xshare/nuopc/dead_nuopc_mod.F90 deleted file mode 100644 index 98111376008..00000000000 --- a/src/components/xcpl_comps/xshare/nuopc/dead_nuopc_mod.F90 +++ /dev/null @@ -1,907 +0,0 @@ -module dead_nuopc_mod - - use ESMF , only : ESMF_Gridcomp, ESMF_State, ESMF_StateGet - use ESMF , only : ESMF_Clock, ESMF_Time, ESMF_TimeInterval, ESMF_Alarm - use ESMF , only : ESMF_GridCompGet, ESMF_ClockGet, ESMF_ClockSet, ESMF_ClockAdvance, ESMF_AlarmSet - use ESMF , only : ESMF_SUCCESS, ESMF_LogWrite, ESMF_LOGMSG_INFO, ESMF_METHOD_INITIALIZE - use ESMF , only : ESMF_FAILURE, ESMF_LOGMSG_ERROR - use ESMF , only : ESMF_VMGetCurrent, ESMF_VM, ESMF_VMBroadcast, ESMF_VMGet - use ESMF , only : ESMF_VM, ESMF_VMGetCurrent, ESMF_VmGet - use ESMF , only : operator(/=), operator(==), operator(+) - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_sys_mod , only : shr_sys_abort - use dead_methods_mod , only : chkerr, alarmInit - - implicit none - private - - public :: dead_init_nuopc - public :: dead_final_nuopc - public :: dead_meshinit - public :: ModelInitPhase - public :: ModelSetRunClock - public :: fld_list_add - public :: fld_list_realize - - ! !PUBLIC DATA MEMBERS: - integer, public :: dead_grid_lat = 1 ! lat from component - integer, public :: dead_grid_lon = 2 ! lon from component - integer, public :: dead_grid_area = 3 ! area from component - integer, public :: dead_grid_mask = 4 ! mask, 0 = inactive cell - integer, public :: dead_grid_frac = 5 ! fractional area coverage - integer, public :: dead_grid_index = 6 ! global index - integer, public :: dead_grid_total = 6 - - type fld_list_type - character(len=128) :: stdname - integer :: ungridded_lbound = 0 - integer :: ungridded_ubound = 0 - end type fld_list_type - public :: fld_list_type - - integer, parameter, public :: fldsMax = 100 - integer :: dbug_flag = 0 - character(*), parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine dead_read_inparms(model, inst_suffix, logunit, & - nxg, nyg, decomp_type, nproc_x, seg_len) - - ! input/output variables - character(len=*) , intent(in) :: model - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - integer , intent(out) :: nproc_x - integer , intent(out) :: seg_len - integer , intent(out) :: nxg ! global dim i-direction - integer , intent(out) :: nyg ! global dim j-direction - integer , intent(out) :: decomp_type ! decomposition type - - ! local variables - type(ESMF_VM) :: vm - character(CL) :: fileName ! generic file name - integer :: nunit ! unit number - integer :: unitn ! Unit for namelist file - integer :: tmp(5) ! array for broadcast - integer :: localPet ! mpi id of current task in current context - integer :: rc ! return code - character(*), parameter :: F00 = "('(dead_read_inparms) ',8a)" - character(*), parameter :: F01 = "('(dead_read_inparms) ',a,a,4i8)" - character(*), parameter :: F03 = "('(dead_read_inparms) ',a,a,i8,a)" - character(*), parameter :: subName = "(dead_read_inpamrs) " - !------------------------------------------------------------------------------- - - ! read the input parms (used to configure model) - nxg = -9999 - nyg = -9999 - nproc_x = -9999 - seg_len = -9999 - decomp_type = -9999 - - call ESMF_VMGetCurrent(vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_VMGet(vm, localPet=localPet, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (localPet==0) then - open(newunit=unitn, file='x'//model//'_in'//trim(inst_suffix), status='old' ) - read(unitn,*) nxg - read(unitn,*) nyg - read(unitn,*) decomp_type - read(unitn,*) nproc_x - read(unitn,*) seg_len - close (unitn) - endif - - tmp(1) = nxg - tmp(2) = nyg - tmp(3) = decomp_type - tmp(4) = nproc_x - tmp(5) = seg_len - - call ESMF_VMBroadcast(vm, tmp, 6, 0, rc=rc) - - nxg = tmp(1) - nyg = tmp(2) - decomp_type = tmp(3) - nproc_x = tmp(4) - seg_len = tmp(5) - - if (localPet==0) then - write(logunit,*)' Read in X'//model//' input from file= x'//model//'_in' - write(logunit,F00) model - write(logunit,F00) model,' Model : ',model - write(logunit,F01) model,' NGX : ',nxg - write(logunit,F01) model,' NGY : ',nyg - write(logunit,F01) model,' Decomposition : ',decomp_type - write(logunit,F03) model,' Num pes in X : ',nproc_x,' (type 3 only)' - write(logunit,F03) model,' Segment Length : ',seg_len,' (type 11 only)' - write(logunit,F00) model,' inst_suffix : ',trim(inst_suffix) - write(logunit,F00) model - end if - - end subroutine dead_read_inparms - - !=============================================================================== - - subroutine dead_setNewGrid(decomp_type, nxg, nyg, logunit, lsize, gbuf, seg_len, nproc_x) - - ! This sets up some defaults. The user may want to overwrite some - ! of these fields in the main program after initialization in complete. - - use shr_const_mod , only : shr_const_pi, shr_const_rearth - - ! input/output parameters: - integer , intent(in) :: decomp_type ! - integer , intent(in) :: nxg,nyg ! global grid sizes - integer , intent(in) :: logunit ! output logunit - integer , intent(out) :: lsize ! local grid sizes - real(R8), pointer :: gbuf(:,:) ! output data - integer , intent(in),optional :: seg_len ! seg len decomp setting - integer , intent(in),optional :: nproc_x ! 2d decomp setting - - ! local - type(ESMF_VM) :: vm - integer :: rc - integer :: mype - integer :: totpe ! total number of pes - logical :: found - integer :: i,j,ig,jg - integer :: n,ng,is,ie,js,je,nx,ny - integer :: npesx,npesy,mypex,mypey,nxp,nyp - real(R8) :: hscore,bscore - real(R8) :: dx,dy,deg2rad,ys,yc,yn,area,re - integer, allocatable :: gindex(:) - character(*), parameter :: F00 = "('(dead_setNewGrid) ',8a)" - character(*), parameter :: F01 = "('(dead_setNewGrid) ',a,4i8)" - character(*), parameter :: subName = "(dead_setNewGrid) " - !------------------------------------------------------------------------------- - - call ESMF_VMGetCurrent(vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_VMGet(vm, localPet=mype, peCount=totpe, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if ( decomp_type == 1 .or. & - decomp_type == 2 .or. & - decomp_type == 3 .or. & - decomp_type == 4 .or. & - decomp_type == 11) then - else - ! invalid decomposition type - if (mype == 0) then - write(logunit,F01) 'ERROR: invalid decomp_type = ',decomp_type - end if - call shr_sys_abort(subName//'invalid decomp_type') - endif - - if (nxg*nyg == 0) then - lsize = 0 - allocate(gbuf(lsize,dead_grid_total)) - if (mype == 0) then - write(logunit,*) subname,' grid size is zero, lsize = ',lsize - end if - return - endif - - found = .false. - - if (decomp_type == 1) then ! 1d decomp by lat - npesx = 1 - npesy = totpe - found = .true. - elseif (decomp_type == 2) then ! 1d decomp by lon - npesx = totpe - npesy = 1 - found = .true. - elseif (decomp_type == 3) then ! 2d decomp - if (present(nproc_x)) then - if ( nproc_x > 0 ) then - npesx=nproc_x - npesy=totpe/npesx - if ( npesx*npesy /= totpe) then - write(logunit,F00) 'ERROR: uneven decomposition' - call shr_sys_abort(subName//'uneven decomp') - end if - found = .true. - endif - endif - if (.not.found) then ! narrow blocks - do nx = 1,totpe - ny = totpe/nx - if (nx*ny == totpe) then - npesx = nx - npesy = ny - found = .true. - endif - enddo - endif - elseif (decomp_type == 4) then ! 2d evenly divisible square block decomp - hscore = nxg*nyg - do nx = 1,totpe - ny = totpe/nx - if (nx*ny == totpe .and. mod(nxg,nx) == 0 .and. mod(nyg,ny) == 0) then - bscore = ((nxg*ny*1.0_r8) / (nyg*nx*1.0_r8)) - 1.0_r8 - bscore = bscore * bscore - if (bscore < hscore .or. .not.found) then - hscore = bscore - npesx = nx - npesy = ny - found = .true. - endif - endif - enddo - endif - - if (found) then - nx = nxg/npesx - mypex = mod(mype,npesx) - mypey = mype/npesx - is = (mypex ) * (nx) + 1 - ie = (mypex + 1) * (nx) - - ny = nyg/npesy - js = (mypey ) * (ny) + 1 - je = (mypey + 1) * (ny) - - nxp = nxg - (nx*npesx) ! extra lons not accounted for yet - nyp = nyg - (ny*npesy) ! extra lats not accounted for yet - - is = is + min(mypex,nxp) ! add them to first few pes and shift everything - ie = ie + min(mypex+1,nxp) - js = js + min(mypey,nyp) ! add them to first few pes and shift everything - je = je + min(mypey+1,nyp) - - lsize = (ie - is + 1) * (je - js + 1) - - allocate(gindex(lsize)) - n = 0 - do j = js,je - do i = is,ie - n = n + 1 - gindex(n) = (j-1)*nxg + i - enddo - enddo - endif - - if (.not.found) then - !------------------------------------------------------------------------- - ! type 11 general segment decomp - !------------------------------------------------------------------------- - nx = nxg*nyg / (totpe*13) + 1 ! 13 segments per pe (arbitrary) - ! nx override with seg_len - if (present(seg_len)) then - if (seg_len > 0) nx = seg_len - endif - - n = 0 - i = 0 - lsize = 0 - do while (n < nxg*nyg) - ny = min(nx,nxg*nyg-n) - do j = 1,ny - n = n + 1 - if (mype == mod(i,totpe)) then - lsize = lsize + 1 - endif - enddo - i = i + 1 - enddo - - allocate(gindex(lsize)) - - n = 0 - i = 0 - lsize = 0 - do while (n < nxg*nyg) - ny = min(nx,nxg*nyg-n) - do j = 1,ny - n = n + 1 - if (mype == mod(i,totpe)) then - lsize = lsize + 1 - gindex(lsize) = n - endif - enddo - i = i + 1 - enddo - - if (mype == 0) then - write(logunit,*) 'dead_setNewGrid decomp seg ',mype,lsize,nx - end if - - found = .true. - - endif - - if ( .not.found ) then - write(logunit,F01) 'ERROR: with decomp nxg,nyg,totpe=',nxg,nyg,totpe - call shr_sys_abort(subName//'decomp') - endif - - deg2rad = shr_const_pi / 180.0_R8 - re = shr_const_rearth - - allocate(gbuf(lsize,dead_grid_total)) - gbuf = -888.0_R8 - if (mype == 0) then - write(logunit,*) subname,' Decomp is ',decomp_type,' lsize = ',lsize - end if - - n=0 - dx = 360.0_R8/nxg * deg2rad - do n = 1,lsize - ig = mod((gindex(n)-1),nxg) + 1 - jg = (gindex(n)-1)/nxg + 1 - - ys = -90.0_R8 + (jg-1.0_R8)*180.0_R8/(nyg) - yc = -90.0_R8 + (jg-0.5_R8)*180.0_R8/(nyg) - yn = -90.0_R8 + (jg-0.0_R8)*180.0_R8/(nyg) - dy = sin(yn*deg2rad) - sin(ys*deg2rad) - area = dx*dy*re*re - - gbuf(n,dead_grid_lon ) = (ig-1.0_R8)*360.0_R8/(nxg) - gbuf(n,dead_grid_lat ) = yc - gbuf(n,dead_grid_index) = gindex(n) - gbuf(n,dead_grid_area ) = area - gbuf(n,dead_grid_mask ) = 0 - gbuf(n,dead_grid_frac ) = 1.0_R8 - enddo - - deallocate(gindex) - - end subroutine dead_setNewGrid - - !=============================================================================== - - subroutine dead_init_nuopc(model, inst_suffix, logunit, lsize, gbuf, nxg, nyg) - - ! input/output parameters: - character(len=*) , intent(in) :: model - character(len=*) , intent(in) :: inst_suffix ! char string associated with instance - integer , intent(in) :: logunit ! logging unit number - integer , intent(out) :: lsize ! logging unit number - real(r8) , pointer :: gbuf(:,:) ! model grid - integer , pointer :: gindex(:) ! global index space - integer , intent(out) :: nxg ! global dim i-direction - integer , intent(out) :: nyg ! global dim j-direction - - !--- local variables --- - integer :: local_comm ! local communicator - integer :: nproc_x - integer :: seg_len - integer :: decomp_type - character(*), parameter :: subName = "(dead_init_nuopc) " - !------------------------------------------------------------------------------- - - ! Read input parms - call dead_read_inparms(model, inst_suffix, logunit, nxg, nyg, decomp_type, nproc_x, seg_len) - - ! Initialize grid - call dead_setNewGrid(decomp_type, nxg, nyg, logunit, lsize, gbuf, seg_len, nproc_x) - - end subroutine dead_init_nuopc - - !=============================================================================== - - subroutine dead_final_nuopc(model, logunit) - - ! finalize method for xcpl component - - ! input/output parameters: - character(len=*) , intent(in) :: model - integer , intent(in) :: logunit ! logging unit number - - ! local variables - type(ESMF_VM) :: vm - integer :: rc - integer :: localPet - character(*), parameter :: F00 = "('(dead_comp_final) ',8a)" - character(*), parameter :: F91 = "('(dead_comp_final) ',73('-'))" - character(*), parameter :: subName = "(dead_comp_final) " - !------------------------------------------------------------------------------- - - call ESMF_VMGetCurrent(vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_VMGet(vm, localPet=localPet, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (localPet==0) then - write(logunit,F91) - write(logunit,F00) trim(model),': end of main integration loop' - write(logunit,F91) - end if - - end subroutine dead_final_nuopc - - !=============================================================================== - - subroutine fld_list_add(num, fldlist, stdname, ungridded_lbound, ungridded_ubound) - - ! input/output variables - integer , intent(inout) :: num - type(fld_list_type) , intent(inout) :: fldlist(:) - character(len=*) , intent(in) :: stdname - integer, optional , intent(in) :: ungridded_lbound - integer, optional , intent(in) :: ungridded_ubound - - ! local variables - character(len=*), parameter :: subname='(dead_nuopc_mod:fld_list_add)' - !------------------------------------------------------------------------------- - - ! Set up a list of field information - num = num + 1 - if (num > fldsMax) then - call ESMF_LogWrite(trim(subname)//": ERROR num > fldsMax "//trim(stdname), & - ESMF_LOGMSG_ERROR, line=__LINE__, file=__FILE__) - return - endif - fldlist(num)%stdname = trim(stdname) - - if (present(ungridded_lbound) .and. present(ungridded_ubound)) then - fldlist(num)%ungridded_lbound = ungridded_lbound - fldlist(num)%ungridded_ubound = ungridded_ubound - end if - - end subroutine fld_list_add - - !=============================================================================== - - subroutine fld_list_realize(state, fldList, numflds, flds_scalar_name, flds_scalar_num, mesh, tag, rc) - - use NUOPC , only : NUOPC_IsConnected, NUOPC_Realize - use ESMF , only : ESMF_MeshLoc_Element, ESMF_FieldCreate, ESMF_TYPEKIND_R8 - use ESMF , only : ESMF_MAXSTR, ESMF_Field, ESMF_State, ESMF_Mesh, ESMF_StateRemove - use ESMF , only : ESMF_LogFoundError, ESMF_LOGMSG_INFO, ESMF_SUCCESS - use ESMF , only : ESMF_LogWrite, ESMF_LOGMSG_ERROR, ESMF_LOGERR_PASSTHRU - - type(ESMF_State) , intent(inout) :: state - type(fld_list_type) , intent(in) :: fldList(:) - integer , intent(in) :: numflds - character(len=*) , intent(in) :: flds_scalar_name - integer , intent(in) :: flds_scalar_num - character(len=*) , intent(in) :: tag - type(ESMF_Mesh) , intent(in) :: mesh - integer , intent(inout) :: rc - - ! local variables - integer :: n - type(ESMF_Field) :: field - character(len=80) :: stdname - integer :: gridtoFieldMap=2 - character(len=*),parameter :: subname='(dead_nuopc_mod:fld_list_realize)' - ! ---------------------------------------------- - - rc = ESMF_SUCCESS - - do n = 1, numflds - stdname = fldList(n)%stdname - if (NUOPC_IsConnected(state, fieldName=stdname)) then - if (stdname == trim(flds_scalar_name)) then - call ESMF_LogWrite(trim(subname)//trim(tag)//" Field = "//trim(stdname)//" is connected on root pe", & - ESMF_LOGMSG_INFO) - ! Create the scalar field - call SetScalarField(field, flds_scalar_name, flds_scalar_num, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - else - call ESMF_LogWrite(trim(subname)//trim(tag)//" Field = "//trim(stdname)//" is connected using mesh", & - ESMF_LOGMSG_INFO) - ! Create the field - if (fldlist(n)%ungridded_lbound > 0 .and. fldlist(n)%ungridded_ubound > 0) then - field = ESMF_FieldCreate(mesh, ESMF_TYPEKIND_R8, name=stdname, meshloc=ESMF_MESHLOC_ELEMENT, & - ungriddedLbound=(/fldlist(n)%ungridded_lbound/), & - ungriddedUbound=(/fldlist(n)%ungridded_ubound/), & - gridToFieldMap=(/gridToFieldMap/), rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - field = ESMF_FieldCreate(mesh, ESMF_TYPEKIND_R8, name=stdname, meshloc=ESMF_MESHLOC_ELEMENT, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - end if - endif - - ! NOW call NUOPC_Realize - call NUOPC_Realize(state, field=field, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - else - if (stdname /= trim(flds_scalar_name)) then - call ESMF_LogWrite(subname // trim(tag) // " Field = "// trim(stdname) // " is not connected.", & - ESMF_LOGMSG_INFO) - call ESMF_StateRemove(state, (/stdname/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - end if - end if - end do - - contains !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - subroutine SetScalarField(field, flds_scalar_name, flds_scalar_num, rc) - ! ---------------------------------------------- - ! create a field with scalar data on the root pe - ! ---------------------------------------------- - - use ESMF, only : ESMF_Field, ESMF_DistGrid, ESMF_Grid - use ESMF, only : ESMF_DistGridCreate, ESMF_GridCreate, ESMF_LogFoundError, ESMF_LOGERR_PASSTHRU - use ESMF, only : ESMF_FieldCreate, ESMF_GridCreate, ESMF_TYPEKIND_R8 - - type(ESMF_Field) , intent(inout) :: field - character(len=*) , intent(in) :: flds_scalar_name - integer , intent(in) :: flds_scalar_num - integer , intent(inout) :: rc - - ! local variables - type(ESMF_Distgrid) :: distgrid - type(ESMF_Grid) :: grid - character(len=*), parameter :: subname='(dead_nuopc_mod:SetScalarField)' - ! ---------------------------------------------- - - rc = ESMF_SUCCESS - - ! create a DistGrid with a single index space element, which gets mapped onto DE 0. - distgrid = ESMF_DistGridCreate(minIndex=(/1/), maxIndex=(/1/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - - grid = ESMF_GridCreate(distgrid, rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - - field = ESMF_FieldCreate(name=trim(flds_scalar_name), grid=grid, typekind=ESMF_TYPEKIND_R8, & - ungriddedLBound=(/1/), ungriddedUBound=(/flds_scalar_num/), gridToFieldMap=(/2/), rc=rc) - if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return - - end subroutine SetScalarField - - end subroutine fld_list_realize - - !=============================================================================== - - subroutine ModelInitPhase(gcomp, importState, exportState, clock, rc) - - use NUOPC, only : NUOPC_CompFilterPhaseMap - - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - ! Switch to IPDv01 by filtering all other phaseMap entries - call NUOPC_CompFilterPhaseMap(gcomp, ESMF_METHOD_INITIALIZE, acceptStringList=(/"IPDv01p"/), rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine ModelInitPhase - - !=============================================================================== - - subroutine ModelSetRunClock(gcomp, rc) - - use ESMF , only : ESMF_ClockGetAlarmList, ESMF_ALARMLIST_ALL - use NUOPC_Model , only : NUOPC_ModelGet - use NUOPC , only : NUOPC_CompAttributeGet - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: mclock, dclock - type(ESMF_Time) :: mcurrtime, dcurrtime - type(ESMF_Time) :: mstoptime - type(ESMF_TimeInterval) :: mtimestep, dtimestep - character(len=256) :: cvalue - character(len=256) :: restart_option ! Restart option units - integer :: restart_n ! Number until restart interval - integer :: restart_ymd ! Restart date (YYYYMMDD) - type(ESMF_ALARM) :: restart_alarm - character(len=128) :: name - integer :: alarmcount - character(len=*),parameter :: subname='dead_nuopc_mod:(ModelSetRunClock) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - if (dbug_flag > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - - ! query the Component for its clocks - call NUOPC_ModelGet(gcomp, driverClock=dclock, modelClock=mclock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_ClockGet(dclock, currTime=dcurrtime, timeStep=dtimestep, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_ClockGet(mclock, currTime=mcurrtime, timeStep=mtimestep, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! force model clock currtime and timestep to match driver and set stoptime - !-------------------------------- - - mstoptime = mcurrtime + dtimestep - call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! set restart alarm - !-------------------------------- - - call ESMF_ClockGetAlarmList(mclock, alarmlistflag=ESMF_ALARMLIST_ALL, alarmCount=alarmCount, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (alarmCount == 0) then - - call ESMF_GridCompGet(gcomp, name=name, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_LogWrite(subname//'setting alarms for' // trim(name), ESMF_LOGMSG_INFO) - - call NUOPC_CompAttributeGet(gcomp, name="restart_option", value=restart_option, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompAttributeGet(gcomp, name="restart_n", value=cvalue, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) restart_n - - call NUOPC_CompAttributeGet(gcomp, name="restart_ymd", value=cvalue, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) restart_ymd - - call alarmInit(mclock, restart_alarm, restart_option, & - opt_n = restart_n, & - opt_ymd = restart_ymd, & - RefTime = mcurrTime, & - alarmname = 'alarm_restart', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_AlarmSet(restart_alarm, clock=mclock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end if - - !-------------------------------- - ! Advance model clock to trigger alarms then reset model clock back to currtime - !-------------------------------- - - call ESMF_ClockAdvance(mclock,rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_ClockSet(mclock, currTime=dcurrtime, timeStep=dtimestep, stopTime=mstoptime, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - if (dbug_flag > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO) - - end subroutine ModelSetRunClock - - !=============================================================================== - - subroutine dead_meshinit(gcomp, nx_global, ny_global, gindex, lon, lat, Emesh, rc) - - !----------------------------------------- - ! create an Emesh object for Fields - !----------------------------------------- - - use ESMF , only : ESMF_GridComp, ESMF_VM, ESMF_Mesh - use ESMF , only : ESMF_VMGet, ESMF_GridCompGet, ESMF_VMBroadCast, ESMF_VMAllGatherV - use ESMF , only : ESMF_SUCCESS, ESMF_LOGMSG_INFO, ESMF_LogWrite - use ESMF , only : ESMF_VMGather, ESMF_LogFoundError, ESMF_LOGERR_PASSTHRU - use ESMF , only : ESMF_MeshCreate, ESMF_COORDSYS_SPH_DEG, ESMF_REDUCE_SUM - use ESMF , only : ESMF_VMAllReduce, ESMF_MESHELEMTYPE_QUAD - - ! input/output arguments - type(ESMF_GridComp) :: gcomp - integer , intent(in) :: nx_global - integer , intent(in) :: ny_global - integer , intent(in) :: gindex(:) - real(r8), pointer , intent(in) :: lon(:) - real(r8), pointer , intent(in) :: lat(:) - type(ESMF_Mesh) , intent(inout) :: Emesh - integer , intent(inout) :: rc - - ! local variables - integer :: n,n1,n2,de - integer :: iam - integer :: lsize - integer :: numTotElems, numNodes, numConn, nodeindx - integer :: iur,iul,ill,ilr - integer :: xid, yid, xid0, yid0 - real(r8) :: lonur, lonul, lonll, lonlr - integer, pointer :: iurpts(:) - integer, pointer :: elemIds(:) - integer, pointer :: elemTypes(:) - integer, pointer :: elemConn(:) - real(r8),pointer :: elemCoords(:) - integer, pointer :: nodeIds(:) - integer, pointer :: nodeOwners(:) - real(r8),pointer :: nodeCoords(:) - real(r8),pointer :: latG(:) - real(r8),pointer :: lonG(:) - integer ,pointer :: pes_local(:) - integer ,pointer :: pes_global(:) - integer, pointer :: recvOffsets(:) - integer, pointer :: recvCounts(:) - integer :: sendData(1) - type(ESMF_VM) :: vm - integer :: petCount - character(len=*),parameter :: subname='(dead_MeshInit)' - !-------------------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_LogWrite(subname, ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - lsize = size(gindex) - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, petCount=petCount, localpet=iam, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - allocate(latG(nx_global*ny_global)) - allocate(lonG(nx_global*ny_global)) - - allocate(recvoffsets(petCount)) - allocate(recvCounts(petCount)) - - sendData(1) = lsize - call ESMF_VMGather(vm, sendData=sendData, recvData=recvCounts, count=1, rootPet=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMBroadCast(vm, bcstData=recvCounts, count=petCount, rootPet=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - recvoffsets(1) = 0 - do n = 2,petCount - recvoffsets(n) = recvoffsets(n-1) + recvCounts(n-1) - end do - - call ESMF_VMAllGatherV(vm, lat, lsize, latG, recvCounts, recvOffsets, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMAllGatherV(vm, lon, lsize, lonG, recvCounts, recvOffsets, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - deallocate(recvoffsets) - deallocate(recvCounts) - - ! assumes quadrilaterals for each gridcell (element) - ! element index matches gsmap index value - ! nodeid at lower left of each gridcell matches gsmap index value - ! assumes wrap around in x direction but no wrap in y direction - ! node ids need to be described in counter clockwise direction - ! node id associated with lower left cell is assigned to local PET - ! node ids at top of y boundary assigned to the element to the right - - numTotElems = lsize - - allocate(elemIds(numTotElems)) - allocate(elemTypes(numTotElems)) - elemTypes=(/ESMF_MESHELEMTYPE_QUAD/) - allocate(elemConn(4*numTotElems)) - allocate(elemCoords(2*numTotElems)) - - allocate(nodeIds(numTotElems*4)) - nodeIds = -99 - - elemIds(:) = gindex(:) - numNodes = 0 - numConn = 0 - - do n = 1,numTotElems - elemTypes(n) = ESMF_MESHELEMTYPE_QUAD - elemCoords(2*n-1) = lon(n) - elemCoords(2*n) = lat(n) - - do n1 = 1,4 - - numNodes = numNodes + 1 - nodeindx = numNodes - if (n1 == 1 .or. n1 == 3) xid = mod(elemIds(n)-1,nx_global) + 1 - if (n1 == 2 .or. n1 == 4) xid = mod(elemIds(n) ,nx_global) + 1 - if (n1 == 1 .or. n1 == 2) yid = (elemIds(n)-1)/nx_global + 1 - if (n1 == 3 .or. n1 == 4) yid = (elemIds(n)-1)/nx_global + 2 - nodeIds(numNodes) = (yid-1) * nx_global + xid - n2 = 0 - do while (n2 < numNodes - 1 .and. nodeindx == numNodes) - n2 = n2 + 1 - if (nodeIds(numNodes) == nodeIds(n2)) nodeindx = n2 - enddo - if (nodeindx /= numNodes) then - numNodes = numNodes - 1 - endif - - numConn = numConn + 1 - elemConn(numConn) = nodeindx - enddo - enddo - - - allocate(nodeCoords(2*numNodes)) - allocate(nodeOwners(numNodes)) - allocate(iurpts(numNodes)) - - do n = 1,numNodes - - xid0 = mod(nodeIds(n)-1, nx_global) + 1 - yid0 = (nodeIds(n)-1) / nx_global + 1 - - xid = xid0 - yid = max(min(yid0,ny_global),1) - iur = (yid-1) * nx_global + xid - iurpts(n) = iur - - xid = mod(xid0 - 2 + nx_global, nx_global) + 1 - yid = max(min(yid0,ny_global),1) - iul = (yid-1) * nx_global + xid - - xid = mod(xid0 - 2 + nx_global, nx_global) + 1 - yid = max(min(yid0-1,ny_global),1) - ill = (yid-1) * nx_global + xid - - xid = xid0 - yid = max(min(yid0-1,ny_global),1) - ilr = (yid-1) * nx_global + xid - - ! write(tmpstr,'(2a,8i6)') subname,' nodecoord = ',n,nodeIds(n),xid0,yid0,iur,iul,ill,ilr - ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO) - - ! need to normalize lon values to same 360 degree setting, use lonur as reference value - lonur = lonG(iur) - lonul = lonG(iul) - lonll = lonG(ill) - lonlr = lonG(ilr) - - if (abs(lonul + 360._r8 - lonur) < abs(lonul - lonur)) lonul = lonul + 360._r8 - if (abs(lonul - 360._r8 - lonur) < abs(lonul - lonur)) lonul = lonul - 360._r8 - if (abs(lonll + 360._r8 - lonur) < abs(lonll - lonur)) lonll = lonll + 360._r8 - if (abs(lonll - 360._r8 - lonur) < abs(lonll - lonur)) lonll = lonll - 360._r8 - if (abs(lonlr + 360._r8 - lonur) < abs(lonlr - lonur)) lonlr = lonlr + 360._r8 - if (abs(lonlr - 360._r8 - lonur) < abs(lonlr - lonur)) lonlr = lonlr - 360._r8 - - nodeCoords(2*n-1) = 0.25_r8 * (lonur + lonul + lonll + lonlr) - nodeCoords(2*n) = 0.25_r8 * (latG(iur) + latG(iul) + latG(ill) + latG(ilr)) - enddo - - deallocate(lonG) - deallocate(latG) - - ! Determine the pes that own each index of iurpts (nodeOwners) - - allocate(pes_local(nx_global*ny_global)) - allocate(pes_global(nx_global*ny_global)) - pes_local(:) = 0 - do n = 1,lsize - pes_local(gindex(n)) = iam - end do - - call ESMF_VMAllReduce(vm, sendData=pes_local, recvData=pes_global, count=nx_global*ny_global, & - reduceflag=ESMF_REDUCE_SUM, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - do n = 1,numNodes - nodeOwners(n) = pes_global(iurpts(n)) - end do - deallocate(pes_local) - deallocate(pes_global) - - Emesh = ESMF_MeshCreate(parametricDim=2, & - spatialDim=2, & - coordSys=ESMF_COORDSYS_SPH_DEG, & - nodeIds=nodeIds(1:numNodes), & - nodeCoords=nodeCoords, & - nodeOwners=nodeOwners, & - elementIds=elemIds,& - elementTypes=elemTypes, & - elementConn=elemConn, & - elementCoords=elemCoords, & - rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - deallocate(iurpts) - deallocate(nodeIds, nodeCoords, nodeOwners) - deallocate(elemIds, elemTypes, elemConn, elemCoords) - - end subroutine dead_meshinit - -end module dead_nuopc_mod diff --git a/src/components/xcpl_comps/xwav/cime_config/buildlib b/src/components/xcpl_comps/xwav/cime_config/buildlib deleted file mode 120000 index 9601a6fa7cc..00000000000 --- a/src/components/xcpl_comps/xwav/cime_config/buildlib +++ /dev/null @@ -1 +0,0 @@ -../../../../build_scripts/buildlib.internal_components \ No newline at end of file diff --git a/src/components/xcpl_comps/xwav/cime_config/buildnml b/src/components/xcpl_comps/xwav/cime_config/buildnml deleted file mode 100755 index b7b8576babd..00000000000 --- a/src/components/xcpl_comps/xwav/cime_config/buildnml +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -""" -build data model library -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildnml import build_xcpl_nml, parse_input -from CIME.case import Case - -def buildnml(case, caseroot, compname): - if compname != "xwav": - raise AttributeError - build_xcpl_nml(case, caseroot, compname) - -def _main_func(): - caseroot = parse_input(sys.argv) - with Case(caseroot) as case: - buildnml(case, caseroot, "xwav") - -if __name__ == "__main__": - _main_func() diff --git a/src/components/xcpl_comps/xwav/mct/wav_comp_mct.F90 b/src/components/xcpl_comps/xwav/mct/wav_comp_mct.F90 deleted file mode 100644 index bd8e62e250c..00000000000 --- a/src/components/xcpl_comps/xwav/mct/wav_comp_mct.F90 +++ /dev/null @@ -1,196 +0,0 @@ -module wav_comp_mct - - ! !USES: - - use esmf - use mct_mod - use perf_mod - use seq_cdata_mod , only: seq_cdata, seq_cdata_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_putdata, seq_infodata_getdata - use seq_comm_mct , only: seq_comm_inst, seq_comm_name, seq_comm_suffix - use shr_kind_mod , only: IN=>SHR_KIND_IN, R8=>SHR_KIND_R8, CS=>SHR_KIND_CS, CL=>SHR_KIND_CL - use shr_strdata_mod , only: shr_strdata_type - use shr_file_mod , only: shr_file_getunit, shr_file_getlogunit, shr_file_getloglevel - use shr_file_mod , only: shr_file_setlogunit, shr_file_setloglevel, shr_file_setio - use shr_file_mod , only: shr_file_freeunit - use dead_mct_mod , only: dead_init_mct, dead_run_mct, dead_final_mct - use seq_flds_mod , only: seq_flds_w2x_fields, seq_flds_x2w_fields - - ! !PUBLIC TYPES: - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: wav_init_mct - public :: wav_run_mct - public :: wav_final_mct - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - integer(IN) :: mpicom ! mpi communicator - integer(IN) :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_name ! fullname of current instance (ie. "lnd_0001") - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer(IN) :: logunit ! logging unit number - integer(IN) :: compid ! mct comp id - real(r8) , pointer :: gbuf(:,:) ! model grid - integer(IN),parameter :: master_task=0 ! task number of master task - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -CONTAINS -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - !=============================================================================== - subroutine wav_init_mct( EClock, cdata, x2d, d2x, NLFilename ) - - ! !DESCRIPTION: initialize dead wav model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) , intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2d, d2x - character(len=*), optional , intent(in) :: NLFilename ! Namelist filename - - !--- local variables --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - integer(IN) :: nxg ! global dim i-direction - integer(IN) :: nyg ! global dim j-direction - integer(IN) :: phase ! initialization phase - integer(IN) :: ierr ! error code - logical :: wav_present ! if true, component is present - logical :: wav_prognostic ! if true, component is prognostic - !------------------------------------------------------------------------------- - - ! Set cdata pointers to derived types (in coupler) - call seq_cdata_setptrs(cdata, & - id=compid, & - mpicom=mpicom, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call seq_infodata_getData(infodata, wav_phase=phase) - - if (phase == 1) then - ! Determine communicator group - call mpi_comm_rank(mpicom, my_task, ierr) - - !--- open log file --- - if (my_task == master_task) then - logUnit = shr_file_getUnit() - call shr_file_setIO('wav_modelio.nml'//trim(inst_suffix),logUnit) - else - logUnit = 6 - endif - endif - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - !---------------------------------------------------------------------------- - ! Initialize xwav - !---------------------------------------------------------------------------- - - call dead_init_mct('wav', Eclock, x2d, d2x, & - seq_flds_x2w_fields, seq_flds_w2x_fields, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, & - inst_index, inst_suffix, inst_name, logunit, nxg, nyg) - - if (nxg == 0 .and. nyg == 0) then - wav_present = .false. - wav_prognostic = .false. - else - wav_present = .true. - wav_prognostic = .true. - end if - - call seq_infodata_PutData( infodata, dead_comps=.true., wav_present=wav_present, & - wav_prognostic=wav_prognostic, wav_nx=nxg, wav_ny=nyg) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logunit) - - end subroutine wav_init_mct - - !=============================================================================== - subroutine wav_run_mct(EClock, cdata, x2d, d2x) - - ! !DESCRIPTION: run method for dead wav model - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- local --- - type(seq_infodata_type), pointer :: infodata - type(mct_gsMap) , pointer :: gsMap - type(mct_gGrid) , pointer :: ggrid - integer(IN) :: shrlogunit ! original log unit - integer(IN) :: shrloglev ! original log level - character(*), parameter :: subName = "(wav_run_mct) " - !------------------------------------------------------------------------------- - - ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit (logUnit) - - call seq_cdata_setptrs(cdata, & - gsMap=gsmap, & - dom=ggrid, & - infodata=infodata) - - call dead_run_mct('wav', EClock, x2d, d2x, & - gsmap, ggrid, gbuf, mpicom, compid, my_task, master_task, logunit) - - call shr_file_setLogUnit (shrlogunit) - call shr_file_setLogLevel(shrloglev) - call shr_sys_flush(logunit) - - end subroutine wav_run_mct - - !=============================================================================== - subroutine wav_final_mct(EClock, cdata, x2d, d2x) - - implicit none - - ! !DESCRIPTION: finalize method for dead model - - ! !INPUT/OUTPUT PARAMETERS: - type(ESMF_Clock) ,intent(inout) :: EClock ! clock - type(seq_cdata) ,intent(inout) :: cdata - type(mct_aVect) ,intent(inout) :: x2d ! driver -> dead - type(mct_aVect) ,intent(inout) :: d2x ! dead -> driver - - !--- formats --- - character(*), parameter :: subName = "(wav_final_mct) " - !------------------------------------------------------------------------------- - - call dead_final_mct('wav', my_task, master_task, logunit) - - end subroutine wav_final_mct - !=============================================================================== - -end module wav_comp_mct diff --git a/src/components/xcpl_comps/xwav/nuopc/wav_comp_nuopc.F90 b/src/components/xcpl_comps/xwav/nuopc/wav_comp_nuopc.F90 deleted file mode 100644 index 5e59effb785..00000000000 --- a/src/components/xcpl_comps/xwav/nuopc/wav_comp_nuopc.F90 +++ /dev/null @@ -1,532 +0,0 @@ -module wav_comp_nuopc - - !---------------------------------------------------------------------------- - ! This is the NUOPC cap for XWAV - !---------------------------------------------------------------------------- - - use ESMF - use NUOPC , only : NUOPC_CompDerive, NUOPC_CompSetEntryPoint, NUOPC_CompSpecialize - use NUOPC , only : NUOPC_CompAttributeGet, NUOPC_Advertise - use NUOPC_Model , only : model_routine_SS => SetServices - use NUOPC_Model , only : model_label_Advance => label_Advance - use NUOPC_Model , only : model_label_SetRunClock => label_SetRunClock - use NUOPC_Model , only : model_label_Finalize => label_Finalize - use NUOPC_Model , only : NUOPC_ModelGet - use shr_sys_mod , only : shr_sys_abort - use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit - use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck - use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance - use dead_nuopc_mod , only : dead_grid_lat, dead_grid_lon, dead_grid_index - use dead_nuopc_mod , only : dead_init_nuopc, dead_final_nuopc, dead_meshinit - use dead_nuopc_mod , only : fld_list_add, fld_list_realize, fldsMax, fld_list_type - use dead_nuopc_mod , only : ModelInitPhase, ModelSetRunClock - - implicit none - private ! except - - public :: SetServices - - !-------------------------------------------------------------------------- - ! Private module data - !-------------------------------------------------------------------------- - - character(len=CL) :: flds_scalar_name = '' - integer :: flds_scalar_num = 0 - integer :: flds_scalar_index_nx = 0 - integer :: flds_scalar_index_ny = 0 - integer :: flds_scalar_index_nextsw_cday = 0 - - integer :: fldsToWav_num = 0 - integer :: fldsFrWav_num = 0 - type (fld_list_type) :: fldsToWav(fldsMax) - type (fld_list_type) :: fldsFrWav(fldsMax) - integer, parameter :: gridTofieldMap = 2 ! ungridded dimension is innermost - - real(r8), pointer :: gbuf(:,:) ! model info - real(r8), pointer :: lat(:) - real(r8), pointer :: lon(:) - integer , allocatable :: gindex(:) - integer :: nxg ! global dim i-direction - integer :: nyg ! global dim j-direction - integer :: my_task ! my task in mpi communicator mpicom - integer :: inst_index ! number of current instance (ie. 1) - character(len=16) :: inst_suffix = "" ! char string associated with instance (ie. "_0001" or "") - integer :: logunit ! logging unit number - logical :: mastertask - integer :: dbug = 1 - character(*),parameter :: modName = "(xwav_comp_nuopc)" - character(*),parameter :: u_FILE_u = & - __FILE__ - -!=============================================================================== -contains -!=============================================================================== - - subroutine SetServices(gcomp, rc) - - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - character(len=*),parameter :: subname=trim(modName)//':(SetServices) ' - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! the NUOPC gcomp component will register the generic methods - call NUOPC_CompDerive(gcomp, model_routine_SS, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! switching to IPD versions - call ESMF_GridCompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, & - userRoutine=ModelInitPhase, phase=0, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! set entry point for methods that require specific implementation - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p1"/), & - userRoutine=InitializeAdvertise, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSetEntryPoint(gcomp, ESMF_METHOD_INITIALIZE, phaseLabelList=(/"IPDv01p3"/), & - userRoutine=InitializeRealize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ! attach specializing method(s) - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Advance, specRoutine=ModelAdvance, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_MethodRemove(gcomp, label=model_label_SetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_SetRunClock, specRoutine=ModelSetRunClock, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call NUOPC_CompSpecialize(gcomp, specLabel=model_label_Finalize, specRoutine=ModelFinalize, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - end subroutine SetServices - - - !=============================================================================== - - subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - type(ESMF_VM) :: vm - character(CS) :: stdname - integer :: n - integer :: lsize ! local array size - integer :: shrlogunit ! original log unit - character(CL) :: cvalue - character(len=CL) :: logmsg - logical :: isPresent, isSet - character(len=*),parameter :: subname=trim(modName)//':(InitializeAdvertise) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_GridCompGet(gcomp, vm=vm, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call ESMF_VMGet(vm, localpet=my_task, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - mastertask = (my_task == 0) - - !---------------------------------------------------------------------------- - ! determine instance information - !--------------------------------------------------------------------------- - - call get_component_instance(gcomp, inst_suffix, inst_index, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! set logunit and set shr logging to my log file - !---------------------------------------------------------------------------- - - call set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Initialize xwav - !---------------------------------------------------------------------------- - - call dead_init_nuopc('wav', inst_suffix, logunit, lsize, gbuf, nxg, nyg) - - allocate(gindex(lsize)) - allocate(lon(lsize)) - allocate(lat(lsize)) - - gindex(:) = gbuf(:,dead_grid_index) - lat(:) = gbuf(:,dead_grid_lat) - lon(:) = gbuf(:,dead_grid_lon) - - !-------------------------------- - ! advertise import and export fields - !-------------------------------- - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldName", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - flds_scalar_name = trim(cvalue) - call ESMF_LogWrite(trim(subname)//' flds_scalar_name = '//trim(flds_scalar_name), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldName') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldCount", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue, *) flds_scalar_num - write(logmsg,*) flds_scalar_num - call ESMF_LogWrite(trim(subname)//' flds_scalar_num = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldCount') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNX", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_nx - write(logmsg,*) flds_scalar_index_nx - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_nx = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNX') - endif - - call NUOPC_CompAttributeGet(gcomp, name="ScalarFieldIdxGridNY", value=cvalue, isPresent=isPresent, isSet=isSet, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (isPresent .and. isSet) then - read(cvalue,*) flds_scalar_index_ny - write(logmsg,*) flds_scalar_index_ny - call ESMF_LogWrite(trim(subname)//' : flds_scalar_index_ny = '//trim(logmsg), ESMF_LOGMSG_INFO) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - else - call shr_sys_abort(subname//'Need to set attribute ScalarFieldIdxGridNY') - endif - - if (nxg /= 0 .and. nyg /= 0) then - - call fld_list_add(fldsFrWav_num, fldsFrWav, trim(flds_scalar_name)) - call fld_list_add(fldsFrWav_num, fldsFrWav, 'Sw_lamult' ) - call fld_list_add(fldsFrWav_num, fldsFrWav, 'Sw_ustokes' ) - call fld_list_add(fldsFrWav_num, fldsFrWav, 'Sw_vstokes' ) - call fld_list_add(fldsFrWav_num, fldsFrWav, 'Sw_hstokes' ) - - call fld_list_add(fldsToWav_num, fldsToWav, trim(flds_scalar_name)) - call fld_list_add(fldsToWav_num, fldsToWav, 'Sa_u' ) - call fld_list_add(fldsToWav_num, fldsToWav, 'Sa_v' ) - call fld_list_add(fldsToWav_num, fldsToWav, 'Sa_tbot' ) - call fld_list_add(fldsToWav_num, fldsToWav, 'Si_ifrac' ) - call fld_list_add(fldsToWav_num, fldsToWav, 'So_t' ) - call fld_list_add(fldsToWav_num, fldsToWav, 'So_u' ) - call fld_list_add(fldsToWav_num, fldsToWav, 'So_v' ) - call fld_list_add(fldsToWav_num, fldsToWav, 'So_bldepth' ) - - do n = 1,fldsFrWav_num - if (mastertask) write(logunit,*)'Advertising From Xwav ',trim(fldsFrWav(n)%stdname) - call NUOPC_Advertise(exportState, standardName=fldsFrWav(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - - do n = 1,fldsToWav_num - if(mastertask) write(logunit,*)'Advertising To Xwav ',trim(fldsToWav(n)%stdname) - call NUOPC_Advertise(importState, standardName=fldsToWav(n)%stdname, & - TransferOfferGeomObject='will provide', rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - enddo - end if - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !---------------------------------------------------------------------------- - ! Reset shr logging to original values - !---------------------------------------------------------------------------- - - call shr_file_setLogUnit (shrlogunit) - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState, exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! local variables - character(ESMF_MAXSTR) :: convCIM, purpComp - type(ESMF_Mesh) :: Emesh - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(InitializeRealize) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - !---------------------------------------------------------------------------- - ! Reset shr logging to my log file - !---------------------------------------------------------------------------- - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! generate the mesh - !-------------------------------- - - call dead_meshinit(gcomp, nxg, nyg, gindex, lon, lat, Emesh, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! realize the actively coupled fields, now that a mesh is established - ! NUOPC_Realize "realizes" a previously advertised field in the importState and exportState - ! by replacing the advertised fields with the newly created fields of the same name. - !-------------------------------- - - call fld_list_realize( & - state=ExportState, & - fldlist=fldsFrWav, & - numflds=fldsFrWav_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':dwavExport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call fld_list_realize( & - state=importState, & - fldList=fldsToWav, & - numflds=fldsToWav_num, & - flds_scalar_name=flds_scalar_name, & - flds_scalar_num=flds_scalar_num, & - tag=subname//':dwavImport',& - mesh=Emesh, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call State_SetExport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nxg),flds_scalar_index_nx, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call State_SetScalar(dble(nyg),flds_scalar_index_ny, exportState, & - flds_scalar_name, flds_scalar_num, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - -#ifdef USE_ESMF_METADATA - convCIM = "CIM" - purpComp = "Model Component Simulation Description" - call ESMF_AttributeAdd(comp, convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ShortName", "XWAV", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "LongName", "Wave Dead Model", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "Description", & - "The dead models stand in as test model for active components." // & - "Coupling data is artificially generated ", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ReleaseDate", "2017", convention=convCIM, purpose=purpComp, rc=rc) - call ESMF_AttributeSet(comp, "ModelType", "Wave", convention=convCIM, purpose=purpComp, rc=rc) -#endif - - call shr_file_setLogUnit (shrlogunit) - - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine InitializeRealize - - !=============================================================================== - - subroutine ModelAdvance(gcomp, rc) - - ! input/output variables - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - type(ESMF_Clock) :: clock - type(ESMF_State) :: exportState - integer :: shrlogunit ! original log unit - character(len=*),parameter :: subname=trim(modName)//':(ModelAdvance) ' - !------------------------------------------------------------------------------- - - rc = ESMF_SUCCESS - - if (dbug > 5) then - call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - end if - call memcheck(subname, 3, mastertask) - - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) - - !-------------------------------- - ! Pack export state - !-------------------------------- - - call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - call state_setexport(exportState, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - !-------------------------------- - ! diagnostics - !-------------------------------- - - if (dbug > 1) then - call State_diagnose(exportState,subname//':ES',rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if ( mastertask) then - call log_clock_advance(clock, 'XWAV', logunit, rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - endif - endif - - call shr_file_setLogUnit (shrlogunit) - - if (dbug > 5) then - call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - end if - - end subroutine ModelAdvance - - !=============================================================================== - - subroutine state_setexport(exportState, rc) - - ! input/output variables - type(ESMF_State) , intent(inout) :: exportState - integer , intent(out) :: rc - - ! local variables - integer :: nf, nind, nfstart, ubound - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - nfstart = 0 ! for fields that have ubound > 0 - do nf = 2,fldsFrWav_num ! Start from index 2 in order to skip the scalar field - ubound = fldsFrWav(nf)%ungridded_ubound - if (ubound == 0) then - call field_setexport(exportState, trim(fldsFrWav(nf)%stdname), lon, lat, nf=nf, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - else - nfstart = nfstart + nf + ubound - 1 - do nind = 1,ubound - call field_setexport(exportState, trim(fldsFrWav(nf)%stdname), lon, lat, nf=nfstart+nind-1, & - ungridded_index=nind, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - end do - end if - end do - - end subroutine state_setexport - - !=============================================================================== - - subroutine field_setexport(exportState, fldname, lon, lat, nf, ungridded_index, rc) - - use shr_const_mod , only : pi=>shr_const_pi - - ! intput/otuput variables - type(ESMF_State) , intent(inout) :: exportState - character(len=*) , intent(in) :: fldname - real(r8) , intent(in) :: lon(:) - real(r8) , intent(in) :: lat(:) - integer , intent(in) :: nf - integer, optional , intent(in) :: ungridded_index - integer , intent(out) :: rc - - ! local variables - integer :: i, ncomp - type(ESMF_Field) :: lfield - real(r8), pointer :: data1d(:) - real(r8), pointer :: data2d(:,:) - !-------------------------------------------------- - - rc = ESMF_SUCCESS - - call ESMF_StateGet(exportState, itemName=trim(fldname), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - - ncomp = 7 - if (present(ungridded_index)) then - call ESMF_FieldGet(lfield, farrayPtr=data2d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (gridToFieldMap == 1) then - do i = 1,size(data2d, dim=1) - data2d(i,ungridded_index) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - else if (gridToFieldMap == 2) then - do i = 1,size(data2d, dim=2) - data2d(ungridded_index,i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - else - call ESMF_FieldGet(lfield, farrayPtr=data1d, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - do i = 1,size(data1d) - data1d(i) = (nf*100) * cos(pi*lat(i)/180.0_R8) * & - sin((pi*lon(i)/180.0_R8) - (ncomp-1)*(pi/3.0_R8) ) + (ncomp*10.0_R8) - end do - end if - - end subroutine field_setexport - - !=============================================================================== - - subroutine ModelFinalize(gcomp, rc) - type(ESMF_GridComp) :: gcomp - integer, intent(out) :: rc - - ! local variables - character(len=*),parameter :: subname=trim(modName)//':(ModelFinalize) ' - !------------------------------------------------------------------------------- - - !-------------------------------- - ! Finalize routine - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) - - call dead_final_nuopc('wav', logunit) - - if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) - - end subroutine ModelFinalize - -end module wav_comp_nuopc diff --git a/src/drivers/mct/cime_config/buildexe b/src/drivers/mct/cime_config/buildexe deleted file mode 100755 index 288f93f4d21..00000000000 --- a/src/drivers/mct/cime_config/buildexe +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python - -""" -build model executable -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildlib import parse_input -from CIME.case import Case -from CIME.utils import expect, run_cmd -from CIME.build import get_standard_makefile_args - -logger = logging.getLogger(__name__) - -############################################################################### -def _main_func(): -############################################################################### - - caseroot, _, _ = parse_input(sys.argv) - - logger.info("Building a single executable version of target coupled model") - - with Case(caseroot) as case: - casetools = case.get_value("CASETOOLS") - cimeroot = case.get_value("CIMEROOT") - gmake = case.get_value("GMAKE") - gmake_j = case.get_value("GMAKE_J") - num_esp = case.get_value("NUM_COMP_INST_ESP") - ocn_model = case.get_value("COMP_OCN") - atm_model = case.get_value("COMP_ATM") - gmake_opts = get_standard_makefile_args(case) - - if ocn_model == 'mom' or atm_model == "fv3gfs": - gmake_opts += "USE_FMS=TRUE" - - - expect((num_esp is None) or (int(num_esp) == 1), "ESP component restricted to one instance") - - - with open('Filepath', 'w') as out: - out.write(os.path.join(caseroot, "SourceMods", "src.drv") + "\n") - out.write(os.path.join(cimeroot, "src", "drivers", "mct", "main") + "\n") - - # build model executable - - makefile = os.path.join(casetools, "Makefile") - exename = os.path.join(case.get_value("EXEROOT"), case.get_value("MODEL") + ".exe") - - cmd = "{gmake} exec_se -j {gmake_j} EXEC_SE={exename} MODEL=driver {gmake_opts} -f {makefile} ".format(gmake=gmake, gmake_j=gmake_j, exename=exename, - gmake_opts=gmake_opts, makefile=makefile) - - rc, out, _ = run_cmd(cmd, combine_output=True) - expect(rc==0,"Command %s failed rc=%d\nout=%s"%(cmd,rc,out)) - logger.info(out) - -############################################################################### - -if __name__ == "__main__": - _main_func() diff --git a/src/drivers/mct/cime_config/buildnml b/src/drivers/mct/cime_config/buildnml deleted file mode 100755 index 5606438fabe..00000000000 --- a/src/drivers/mct/cime_config/buildnml +++ /dev/null @@ -1,411 +0,0 @@ -#!/usr/bin/env python -"""Namelist creator for CIME's driver. -""" -# Typically ignore this. -# pylint: disable=invalid-name - -# Disable these because this is our standard setup -# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position - -import os, sys, glob, itertools, re - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.case import Case -from CIME.nmlgen import NamelistGenerator -from CIME.utils import expect, safe_copy -from CIME.utils import get_model, get_time_in_seconds, get_timestamp -from CIME.buildnml import create_namelist_infile, parse_input -from CIME.XML.files import Files -from CIME.XML.grids import Grids - -logger = logging.getLogger(__name__) - -############################################################################### -def _create_drv_namelists(case, infile, confdir, nmlgen, files): -############################################################################### - - #-------------------------------- - # Set up config dictionary - #-------------------------------- - config = {} - config['cime_model'] = get_model() - config['iyear'] = case.get_value('COMPSET').split('_')[0] - config['BGC_MODE'] = case.get_value("CCSM_BGC") - config['CPL_I2O_PER_CAT'] = case.get_value('CPL_I2O_PER_CAT') - config['COMP_RUN_BARRIERS'] = case.get_value('COMP_RUN_BARRIERS') - config['DRV_THREADING'] = case.get_value('DRV_THREADING') - config['CPL_ALBAV'] = case.get_value('CPL_ALBAV') - config['CPL_EPBAL'] = case.get_value('CPL_EPBAL') - config['FLDS_WISO'] = case.get_value('FLDS_WISO') - config['BUDGETS'] = case.get_value('BUDGETS') - config['MACH'] = case.get_value('MACH') - config['MPILIB'] = case.get_value('MPILIB') - config['MULTI_DRIVER'] = '.true.' if case.get_value('MULTI_DRIVER') else '.false.' - config['OS'] = case.get_value('OS') - config['glc_nec'] = 0 if case.get_value('GLC_NEC') == 0 else case.get_value('GLC_NEC') - config['single_column'] = 'true' if case.get_value('PTS_MODE') else 'false' - config['timer_level'] = 'pos' if case.get_value('TIMER_LEVEL') >= 1 else 'neg' - config['bfbflag'] = 'on' if case.get_value('BFBFLAG') else 'off' - config['continue_run'] = '.true.' if case.get_value('CONTINUE_RUN') else '.false.' - config['atm_grid'] = case.get_value('ATM_GRID') - config['compocn'] = case.get_value('COMP_OCN') - - if case.get_value('RUN_TYPE') == 'startup': - config['run_type'] = 'startup' - elif case.get_value('RUN_TYPE') == 'hybrid': - config['run_type'] = 'startup' - elif case.get_value('RUN_TYPE') == 'branch': - config['run_type'] = 'branch' - - #---------------------------------------------------- - # Initialize namelist defaults - #---------------------------------------------------- - nmlgen.init_defaults(infile, config) - - #-------------------------------- - # Overwrite: set brnch_retain_casename - #-------------------------------- - start_type = nmlgen.get_value('start_type') - if start_type != 'startup': - if case.get_value('CASE') == case.get_value('RUN_REFCASE'): - nmlgen.set_value('brnch_retain_casename' , value='.true.') - - #-------------------------------- - # Overwrite: set component coupling frequencies - #-------------------------------- - ncpl_base_period = case.get_value('NCPL_BASE_PERIOD') - if ncpl_base_period == 'hour': - basedt = 3600 - elif ncpl_base_period == 'day': - basedt = 3600 * 24 - elif ncpl_base_period == 'year': - if case.get_value('CALENDAR') == 'NO_LEAP': - basedt = 3600 * 24 * 365 - else: - expect(False, "Invalid CALENDAR for NCPL_BASE_PERIOD {} ".format(ncpl_base_period)) - elif ncpl_base_period == 'decade': - if case.get_value('CALENDAR') == 'NO_LEAP': - basedt = 3600 * 24 * 365 * 10 - else: - expect(False, "invalid NCPL_BASE_PERIOD NCPL_BASE_PERIOD {} ".format(ncpl_base_period)) - else: - expect(False, "invalid NCPL_BASE_PERIOD NCPL_BASE_PERIOD {} ".format(ncpl_base_period)) - - if basedt < 0: - expect(False, "basedt invalid overflow for NCPL_BASE_PERIOD {} ".format(ncpl_base_period)) - - comps = case.get_values("COMP_CLASSES") - mindt = basedt - for comp in comps: - ncpl = case.get_value(comp.upper() + '_NCPL') - if ncpl is not None: - cpl_dt = int(basedt / int(ncpl)) - totaldt = cpl_dt * int(ncpl) - if totaldt != basedt: - expect(False, " {} ncpl doesn't divide base dt evenly".format(comp)) - nmlgen.add_default(comp.lower() + '_cpl_dt', value=cpl_dt) - mindt = min(mindt, cpl_dt) - - # sanity check - comp_atm = case.get_value("COMP_ATM") - if comp_atm is not None and comp_atm not in('datm', 'xatm', 'satm'): - atmdt = int(basedt / case.get_value('ATM_NCPL')) - expect(atmdt == mindt, 'Active atm should match shortest model timestep atmdt={} mindt={}' - .format(atmdt, mindt)) - - #-------------------------------- - # Overwrite: set start_ymd - #-------------------------------- - run_startdate = "".join(str(x) for x in case.get_value('RUN_STARTDATE').split('-')) - nmlgen.set_value('start_ymd', value=run_startdate) - - #-------------------------------- - # Overwrite: set tprof_option and tprof_n - if tprof_total is > 0 - #-------------------------------- - # This would be better handled inside the alarm logic in the driver routines. - # Here supporting only nday(s), nmonth(s), and nyear(s). - - stop_option = case.get_value('STOP_OPTION') - if 'nyear' in stop_option: - tprofoption = 'ndays' - tprofmult = 365 - elif 'nmonth' in stop_option: - tprofoption = 'ndays' - tprofmult = 30 - elif 'nday' in stop_option: - tprofoption = 'ndays' - tprofmult = 1 - else: - tprofmult = 1 - tprofoption = 'never' - - tprof_total = case.get_value('TPROF_TOTAL') - if ((tprof_total > 0) and (case.get_value('STOP_DATE') < 0) and ('ndays' in tprofoption)): - stop_n = case.get_value('STOP_N') - stopn = tprofmult * stop_n - tprofn = int(stopn / tprof_total) - if tprofn < 1: - tprofn = 1 - nmlgen.set_value('tprof_option', value=tprofoption) - nmlgen.set_value('tprof_n' , value=tprofn) - - # Set up the pause_component_list if pause is active - pauseo = case.get_value('PAUSE_OPTION') - if pauseo is not None and pauseo != 'never' and pauseo != 'none': - pausen = case.get_value('PAUSE_N') - # Set esp interval - if 'nstep' in pauseo: - esp_time = mindt - else: - esp_time = get_time_in_seconds(pausen, pauseo) - - nmlgen.set_value('esp_cpl_dt', value=esp_time) - # End if pause is active - - #-------------------------------- - # (1) Write output namelist file drv_in and input dataset list. - #-------------------------------- - write_drv_in_file(case, nmlgen, confdir) - - #-------------------------------- - # (2) Write out seq_map.rc file - #-------------------------------- - write_seq_maps_file(case, nmlgen, confdir) - - #-------------------------------- - # (3) Construct and write out drv_flds_in - #-------------------------------- - write_drv_flds_in_file(case, nmlgen, files) - -############################################################################### -def write_drv_in_file(case, nmlgen, confdir): -############################################################################### - data_list_path = os.path.join(case.get_case_root(), "Buildconf", "cpl.input_data_list") - if os.path.exists(data_list_path): - os.remove(data_list_path) - namelist_file = os.path.join(confdir, "drv_in") - nmlgen.write_output_file(namelist_file, data_list_path ) - -############################################################################### -def write_seq_maps_file(case, nmlgen, confdir): -############################################################################### - # first determine if there are invalid idmap settings - # if source and destination grid are different, mapping file must not be "idmap" - gridvalue = {} - ignore_component = {} - exclude_list = ["CPL","ESP"] - for comp_class in case.get_values("COMP_CLASSES"): - if comp_class not in exclude_list: - gridvalue[comp_class.lower()] = case.get_value(comp_class + "_GRID" ) - if case.get_value(comp_class + "_GRID" ) == 'null': - ignore_component[comp_class.lower()] = True - else: - ignore_component[comp_class.lower()] = False - - # Currently, hard-wire values of mapping file names to ignore - # TODO: for rof2ocn_fmapname -needs to be resolved since this is currently - # used in prep_ocn_mod.F90 if flood_present is True - this is in issue #1908. - # The following is only approriate for config_grids.xml version 2.0 or later - grid_version = Grids().get_version() - if grid_version >= 2.0: - group_variables = nmlgen.get_group_variables("seq_maps") - for name in group_variables: - value = group_variables[name] - if "mapname" in name: - value = re.sub('\"', '', value) - if 'idmap' == value: - component1 = name[0:3] - component2 = name[4:7] - if not ignore_component[component1] and not ignore_component[component2]: - if "rof2ocn_" in name: - if case.get_value("COMP_OCN") == 'docn': - logger.warning(" NOTE: ignoring setting of {}=idmap in seq_maps.rc".format(name)) - else: - expect(gridvalue[component1] == gridvalue[component2], - "Need to provide valid mapping file between {} and {} in xml variable {} ".\ - format(component1, component2, name)) - - # now write out the file - seq_maps_file = os.path.join(confdir, "seq_maps.rc") - nmlgen.write_seq_maps(seq_maps_file) - -############################################################################### -def write_drv_flds_in_file(case, nmlgen, files): -############################################################################### - # In thte following, all values come simply from the infiles - no default values need to be added - # FIXME - do want to add the possibility that will use a user definition file for drv_flds_in - - caseroot = case.get_value('CASEROOT') - - nmlgen.add_default('drv_flds_in_files') - drvflds_files = nmlgen.get_default('drv_flds_in_files') - infiles = [] - for drvflds_file in drvflds_files: - infile = os.path.join(caseroot, drvflds_file) - if os.path.isfile(infile): - infiles.append(infile) - - if len(infiles) != 0: - # First read the drv_flds_in files and make sure that - # for any key there are not two conflicting values - dicts = {} - for infile in infiles: - dict_ = {} - with open(infile) as myfile: - for line in myfile: - if "=" in line and '!' not in line: - name, var = line.partition("=")[::2] - name = name.strip() - var = var.strip() - dict_[name] = var - dicts[infile] = dict_ - - for first,second in itertools.combinations(dicts.keys(),2): - compare_drv_flds_in(dicts[first], dicts[second], first, second) - - # Now create drv_flds_in - config = {} - definition_dir = os.path.dirname(files.get_value("NAMELIST_DEFINITION_FILE", attribute={"component":"drv"})) - definition_file = [os.path.join(definition_dir, "namelist_definition_drv_flds.xml")] - nmlgen = NamelistGenerator(case, definition_file, files=files) - skip_entry_loop = True - nmlgen.init_defaults(infiles, config, skip_entry_loop=skip_entry_loop) - drv_flds_in = os.path.join(caseroot, "CaseDocs", "drv_flds_in") - nmlgen.write_output_file(drv_flds_in) - -############################################################################### -def compare_drv_flds_in(first, second, infile1, infile2): -############################################################################### - sharedKeys = set(first.keys()).intersection(second.keys()) - for key in sharedKeys: - if first[key] != second[key]: - print('Key: {}, \n Value 1: {}, \n Value 2: {}'.format(key, first[key], second[key])) - expect(False, "incompatible settings in drv_flds_in from \n {} \n and \n {}".format(infile1, infile2)) - -############################################################################### -def _create_component_modelio_namelists(case, files): -############################################################################### - - # will need to create a new namelist generator - infiles = [] - definition_dir = os.path.dirname(files.get_value("NAMELIST_DEFINITION_FILE", attribute={"component":"drv"})) - definition_file = [os.path.join(definition_dir, "namelist_definition_modelio.xml")] - - confdir = os.path.join(case.get_value("CASEBUILD"), "cplconf") - lid = os.environ["LID"] if "LID" in os.environ else get_timestamp("%y%m%d-%H%M%S") - - #if we are in multi-coupler mode the number of instances of cpl will be the max - # of any NINST_* value - maxinst = 1 - if case.get_value("MULTI_DRIVER"): - maxinst = case.get_value("NINST_MAX") - - for model in case.get_values("COMP_CLASSES"): - model = model.lower() - with NamelistGenerator(case, definition_file) as nmlgen: - config = {} - config['component'] = model - entries = nmlgen.init_defaults(infiles, config, skip_entry_loop=True) - if maxinst == 1 and model != 'cpl': - inst_count = case.get_value("NINST_" + model.upper()) - else: - inst_count = maxinst - - inst_string = "" - inst_index = 1 - while inst_index <= inst_count: - # determine instance string - if inst_count > 1: - inst_string = '_{:04d}'.format(inst_index) - - # set default values - for entry in entries: - nmlgen.add_default(entry) - - # overwrite defaults - moddiri = case.get_value('EXEROOT') + "/" + model - nmlgen.set_value('diri', moddiri) - - moddiro = case.get_value('RUNDIR') - nmlgen.set_value('diro', moddiro) - - logfile = model + inst_string + ".log." + str(lid) - nmlgen.set_value('logfile', logfile) - - # Write output file - modelio_file = model + "_modelio.nml" + inst_string - nmlgen.write_modelio_file(os.path.join(confdir, modelio_file)) - - inst_index = inst_index + 1 - -############################################################################### -def buildnml(case, caseroot, component): -############################################################################### - if component != "drv": - raise AttributeError - - confdir = os.path.join(case.get_value("CASEBUILD"), "cplconf") - if not os.path.isdir(confdir): - os.makedirs(confdir) - - # NOTE: User definition *replaces* existing definition. - # TODO: Append instead of replace? - user_xml_dir = os.path.join(caseroot, "SourceMods", "src.drv") - - expect (os.path.isdir(user_xml_dir), - "user_xml_dir {} does not exist ".format(user_xml_dir)) - - files = Files(comp_interface="mct") - definition_file = [files.get_value("NAMELIST_DEFINITION_FILE", {"component": "drv"})] - - user_definition = os.path.join(user_xml_dir, "namelist_definition_drv.xml") - if os.path.isfile(user_definition): - definition_file = [user_definition] - - # Create the namelist generator object - independent of instance - nmlgen = NamelistGenerator(case, definition_file) - - # create cplconf/namelist - infile_text = "" - if case.get_value('COMP_ATM') == 'cam': - # cam is actually changing the driver namelist settings - cam_config_opts = case.get_value("CAM_CONFIG_OPTS") - if "aquaplanet" in cam_config_opts: - infile_text = "aqua_planet = .true. \n aqua_planet_sst = 1" - - user_nl_file = os.path.join(caseroot, "user_nl_cpl") - namelist_infile = os.path.join(confdir, "namelist_infile") - create_namelist_infile(case, user_nl_file, namelist_infile, infile_text) - infile = [namelist_infile] - - # create the files drv_in, drv_flds_in and seq_maps.rc - _create_drv_namelists(case, infile, confdir, nmlgen, files) - - # create the files comp_modelio.nml where comp = [atm, lnd...] - _create_component_modelio_namelists(case, files) - - # copy drv_in, drv_flds_in, seq_maps.rc and all *modio* fiels to rundir - rundir = case.get_value("RUNDIR") - - safe_copy(os.path.join(confdir,"drv_in"), rundir) - drv_flds_in = os.path.join(caseroot, "CaseDocs", "drv_flds_in") - if os.path.isfile(drv_flds_in): - safe_copy(drv_flds_in, rundir) - - safe_copy(os.path.join(confdir,"seq_maps.rc"), rundir) - - for filename in glob.glob(os.path.join(confdir, "*modelio*")): - safe_copy(filename, rundir) - -############################################################################### -def _main_func(): - caseroot = parse_input(sys.argv) - - with Case(caseroot) as case: - buildnml(case, caseroot, "drv") - -if __name__ == "__main__": - _main_func() diff --git a/src/drivers/mct/cime_config/config_archive.xml b/src/drivers/mct/cime_config/config_archive.xml deleted file mode 100644 index cd22504cbcd..00000000000 --- a/src/drivers/mct/cime_config/config_archive.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - r - hi\..*\.nc$ - ha\..*\.nc$ - h\w+\..*\.nc$ - unset - - rpointer$NINST_STRING.drv - $CASE.cpl$NINST_STRING.r.$DATENAME.nc - - - cpl_0001.log.5548574.chadmin1.180228-124723.gz - casename.cpl.r.1976-01-01-00000.nc - casename.cpl.hi.1976-01-01-00000.nc - casename.cpl.ha.1976-01-01-00000.nc - casename.cpl.ha2x1d.1976-01-01-00000.nc - casename.cpl.ha2x1h.1976-01-01-00000.nc - casename.cpl.hl2x1yr_glc.1976-01-01-00000.nc - rpointer.drv_0001 - rpointer.drv - casenamenot.cpl.r.1976-01-01-00000.nc - - - - diff --git a/src/drivers/mct/cime_config/config_component.xml b/src/drivers/mct/cime_config/config_component.xml deleted file mode 100644 index 24f33af6b52..00000000000 --- a/src/drivers/mct/cime_config/config_component.xml +++ /dev/null @@ -1,2951 +0,0 @@ - - - - - - - - - - char - CPL,ATM,LND,ICE,OCN,ROF,GLC,WAV,IAC,ESP - env_case.xml - case_comp - List of component classes supported by this driver - - - - char - cpl - cpl - case_comp - env_case.xml - Name of coupling component - - - - - - - - - char - $CIMEROOT/config_files.xml - case_def - env_case.xml - master configuration file that specifies all relevant filenames - and directories to configure a case - - - - - - - - char - UNSET - case_def - env_case.xml - full pathname of case - - - - logical - TRUE,FALSE - FALSE - case_def - env_case.xml - user is not on the requested machine - - - - char - $CASEROOT/Tools - case_der - env_case.xml - Case Tools directory location (derived variable, not in namelists - - - - char - $CASEROOT/Buildconf - case_der - env_case.xml - Buildconf directory location (derived variable not in namelist) - - - - char - $CIMEROOT/scripts - case_der - env_case.xml - Scripts root directory location (setup automatically to $CIMEROOT/scripts- DO NOT EDIT) - - - - char - UNSET - case_def - env_case.xml - full pathname of CIME source root directory - - - - char - $CIMEROOT/.. - case_def - env_case.xml - full pathname of source root directory - - - - char - $CIMEROOT/scripts/Tools - case_der - env_case.xml - Scripts root utils directory location (setup automatically to $CIMEROOT/scripts/Tools - DO NOT EDIT) - - - - - - - - char - UNSET - case_def - env_case.xml - case name - - - - char - UNSET - run_desc - env_run.xml - case description - - - - char - UNSET - case_last - env_case.xml - Component set long name (for documentation only - DO NOT EDIT) - - - - char - UNSET - build_grid - env_build.xml - Model grid - DO NOT EDIT (for experts only) - - - - char - UNSET - case_def - env_case.xml - current machine name support contact - - - - char - $ENV{USER} - case_desc - env_case.xml - case user name - - - - char - $ENV{USER} - case_desc - env_case.xml - username of user who created case - - - - - - - - char - startup,hybrid,branch - startup - run_begin_stop_restart - env_run.xml - - Determines the model run initialization type. - This setting is only important for the initial run of a production run when the - CONTINUE_RUN variable is set to FALSE. After the initial run, the CONTINUE_RUN - variable is set to TRUE, and the model restarts exactly using input - files in a case, date, and bit-for-bit continuous fashion. - Default: startup. - -- In a startup run (the default), all components are initialized - using baseline states. These baseline states are set independently by - each component and can include the use of restart files, initial - files, external observed data files, or internal initialization (i.e., - a cold start). In a startup run, the coupler sends the start date to - the components at initialization. In addition, the coupler does not - need an input data file. In a startup initialization, the ocean model - does not start until the second ocean coupling (normally the second - day). - -- In a branch run, all components are initialized using a consistent - set of restart files from a previous run (determined by the - RUN_REFCASE and RUN_REFDATE variables in env_run.xml). The case name - is generally changed for a branch run, although it does not have to - be. In a branch run, setting RUN_STARTDATE is ignored because the - model components obtain the start date from their restart datasets. - Therefore, the start date cannot be changed for a branch run. This is - the same mechanism that is used for performing a restart run (where - CONTINUE_RUN is set to TRUE in the env_run.xml) Branch runs are - typically used when sensitivity or parameter studies are required, or - when settings for history file output streams need to be modified - while still maintaining bit-for-bit reproducibility. Under this - scenario, the new case is able to produce an exact bit-for-bit restart - in the same manner as a continuation run IF no source code or - component namelist inputs are modified. All models use restart files - to perform this type of run. RUN_REFCASE and RUN_REFDATE are required - for branch runs. - To set up a branch run, locate the restart tar file or restart - directory for RUN_REFCASE and RUN_REFDATE from a previous run, then - place those files in the RUNDIR directory. - --- In a hybrid run the model is initialized as a startup, BUT uses - initialization datasets FROM A PREVIOUS case. This - is somewhat analogous to a branch run with relaxed restart - constraints. A hybrid run allows users to bring together combinations - of initial/restart files from a previous case (specified by - RUN_REFCASE) at a given model output date (specified by - RUN_REFDATE). Unlike a branch run, the starting date of a hybrid run - (specified by RUN_STARTDATE) can be modified relative to the reference - case. In a hybrid run, the model does not continue in a bit-for-bit - fashion with respect to the reference case. The resulting climate, - however, should be continuous provided that no model source code or - namelists are changed in the hybrid run. In a hybrid initialization, - the ocean model does not start until the second ocean coupling - (normally the second day), and the coupler does a cold start without - a restart file. - - - - - char - cesm2_init - run_begin_stop_restart - env_run.xml - - Reference directory containing RUN_REFCASE data - used for hybrid or branch runs - - - - - char - case.std - run_begin_stop_restart - env_run.xml - - Reference case for hybrid or branch runs - - - - - char - 0001-01-01 - run_begin_stop_restart - env_run.xml - - Reference date for hybrid or branch runs (yyyy-mm-dd) - - - - - char - 00000 - run_begin_stop_restart - env_run.xml - - Reference time of day (seconds) for hybrid or branch runs (sssss) - - - - - logical - TRUE,FALSE - FALSE - run_begin_stop_restart - env_run.xml - - Flag for automatically prestaging the refcase restart dataset. - If TRUE, then the refcase data is prestaged into the executable directory - - - - - char - 0001-01-01 - run_begin_stop_restart - env_run.xml - - Run start date (yyyy-mm-dd). Only used for startup or hybrid runs. - - - - - integer - 0 - run_begin_stop_restart - env_run.xml - - Run start time-of-day - - - - - char - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,nmonth,nyears,nyear,date,ifdays0,end - ndays - run_begin_stop_restart - env_run.xml - - Sets the run length along with STOP_N and STOP_DATE - - - - - integer - 5 - run_begin_stop_restart - env_run.xml - - Provides a numerical count for $STOP_OPTION. - - - - - integer - -999 - run_begin_stop_restart - env_run.xml - - Alternative date yyyymmdd date option, sets the run length with STOP_OPTION and STOP_N - negative value implies off - - - - - char - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,nmonth,nyears,nyear,date,ifdays0,end - $STOP_OPTION - run_begin_stop_restart - env_run.xml - - sets frequency of model restart writes (same options as STOP_OPTION) - - - - - integer - $STOP_N - run_begin_stop_restart - env_run.xml - - sets model restart writes with REST_OPTION and REST_DATE - - - - - char - $STOP_DATE - run_begin_stop_restart - env_run.xml - - Alternative date in yyyymmdd format - sets model restart write date with REST_OPTION and REST_N - - - - - char - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,nmonth,nyears,nyear - never - run_begin_stop_restart - env_run.xml - - Sets the pause frequency along with PAUSE_N - - - - - integer - 0 - run_begin_stop_restart - env_run.xml - - Provides a numerical count for $PAUSE_OPTION. - - - - - logical - TRUE,FALSE - run_begin_stop_restart - env_run.xml - - Pause the model at times specified by PAUSE_OPTION and PAUSE_N. - Components 'pause' by writing a restart file. - - - FALSE - FALSE - FALSE - FALSE - FALSE - FALSE - FALSE - FALSE - FALSE - - - - - char - 1 - run_begin_stop_restart - env_run.xml - - Sets periodic model barriers with BARRIER_OPTION and BARRIER_DATE for synchronization - - - - - char - -999 - run_begin_stop_restart - env_run.xml - - Alternative date in yyyymmdd format - sets periodic model barriers with BARRIER_OPTION and BARRIER_N for synchronization - - - - - logical - TRUE,FALSE - FALSE - run_begin_stop_restart - env_run.xml - - ESP component runs after driver 'pause cycle' If any component - 'pauses' (see PAUSE_OPTION, - PAUSE_N and PAUSE_ACTIVE_XXX XML variables), - the ESP component (if present) will be run to process the - component 'pause' (restart) files and set any required 'resume' - signals. If true, esp_cpl_dt and esp_cpl_offset settings are - ignored. default: false - - - - - logical - TRUE,FALSE - FALSE - run_begin_stop_restart - env_run.xml - - A setting of TRUE implies a continuation run - When you first begin a branch, hybrid or startup run, CONTINUE_RUN - must be set to FALSE. When you successfully run and get a restart - file, you will need to change CONTINUE_RUN to TRUE for the remainder - of your run. This variable determines if the run is a restart run. - Set to FALSE when initializing a startup, branch or hybrid case. - Set to TRUE when continuing a run. - - - - - integer - 0 - run_begin_stop_restart - env_run.xml - If RESUBMIT is greater than 0, then case will automatically resubmit - Enables the model to automatically resubmit a new run. To get - multiple runs, set RESUBMIT greater than 0, then RESUBMIT will be - decremented and the case will be resubmitted. The case will stop automatically - resubmitting when the RESUBMIT value reaches 0. - Long runs can easily outstrip supercomputer queue time limits. For - this reason, a case is usually run as a series of jobs, each - restarting where the previous finished. - - - - - logical - TRUE - run_begin_stop_restart - env_run.xml - This flag controls whether the RESUBMIT flag causes - CONTINUE_RUN to toggle from FALSE to TRUE. The default is - TRUE. This flag might be used in conjunction with COMP_RUN_BARRIERS for - timing tests. - - - - - char - - run_begin_stop_restart - env_run.xml - List of job ids for most recent case.submit - - - - - - - - logical - TRUE,FALSE - FALSE - run_data_archive - env_run.xml - Logical to turn on short term archiving. - If TRUE, short term archiving will be turned on. - - - - integer - 900 - run_data_archive - env_run.xml - system workload snapshot frequency (in seconds, if greater than 0; disabled otherwise) - - - - - - - - char - UNSET - config_batch - env_mach_specific.xml - The environment variables that will be loaded for this machine - - - - char - none - nersc_slurm,lc_slurm,moab,pbs,lsf,slurm,cobalt,cobalt_theta,none - config_batch - env_batch.xml - The batch system type to use for this machine. - - - - char - UNSET - config_batch - env_mach_specific.xml - The individual environment variable entry for config_machines - - - - char - UNSET - config_batch - env_mach_specific.xml - The limits tag - - - - char - UNSET - config_batch - env_mach_specific.xml - The individual limit variable - - - - - - - - char - - build_derived - env_build.xml - Perl 5 library directory - - - - - - - - - - char - - UNSET - build_def - env_build.xml - Output root directory for each machine. - Base directory for build and run directories. - - - - - char - - $CIME_OUTPUT_ROOT/$CASE/bld - build_def - env_build.xml - Case executable root directory. - (executable is $EXEROOT/$MODEL.exe, component libraries are in $EXEROOT/lib) - This is where the model builds its executable and by default runs the executable. - Note that EXEROOT needs to have enough disk space for the experimental configuration - requirements. As an example, a model run can produce more than a terabyte of - data during a 100-year run, so you should set EXEROOT to scratch or - tmp space and frequently back up the data to a long term archiving storage device - For a supported machine, EXEROOT is set in $CIMEROOT/machines/config_machines.xml. - For a userdefined machine, EXEROOT must explicitly be set it in env_build.xml. - - - - char - - USERDEFINED_required_macros - build_macros - env_build.xml - Operating system - DO NOT EDIT UNLESS for userdefined machine - ignored once Macros has been created. - - - - char - - - build_macros - env_build.xml - Machine compiler (must match one the supported compilers) - Set in $CIMEROOT/machines/config_machines.xml for each supported machine. - Must be explicitly set in env_build.xml for userdefined machine. - - - - char - - USERDEFINED_required_macros - build_macros - env_build.xml - mpi library (must match one of the supported libraries) - - ignored once Macros has been created - Set in $CIMEROOT/machines/config_machines.xml for each supported machine. - Must be explicitly set in env_build.xml for userdefined machine. - - - - char - NO_LEAP,GREGORIAN - NO_LEAP - build_def - env_build.xml - calendar type - - - - char - mct,nuopc,moab - mct - build_def - env_build.xml - use MCT component interface - - - - logical - TRUE,FALSE - FALSE - build_def - env_build.xml - TRUE implies using the ESMF library specified by ESMF_LIBDIR or ESMFMKFILE - - - - logical - TRUE,FALSE - FALSE - build_def - env_build.xml - TRUE implies turning on run and compile time debugging - Flag to turn on debugging for run time and compile time. - If TRUE, compile-time debugging flags are activated that you can use to verify - software robustness, such as bounds checking. - Important:: On IBM machines, floating point trapping is not activated for production - runs (i.e., non-DEBUG), due to performance penalties associated with turning on these flags. - - - - logical - TRUE,FALSE - FALSE - build_def - env_build.xml - TRUE implies always build model for openmp capability - If FALSE, component libraries are built with OpenMP capability only if - the NTHREADS_ setting for that component is greater than 1 in env_mach_pes.xml. - If TRUE, the component libraries are always built with OpenMP capability. - - - - logical - TRUE,FALSE - FALSE - build_def - env_build.xml - TRUE implies that at least one of the components is built threaded (DO NOT EDIT) - - - - logical - TRUE,FALSE - FALSE - build_def - env_build.xml - TRUE implies linking to the PETSc library - set - automatically by XXX_USE_PETSC options (do not edit). Flag to turn - on linking to the PETSc library. Currently this is used by - CLM. This is currently only supported for certain machines. - - - - logical - TRUE,FALSE - FALSE - build_def - env_build.xml - TRUE implies linking to the Albany library - set - automatically by XXX_USE_ALBANY options (do not edit). Flag to - turn on linking to the Albany library. Currently this is used by - MALI. Note that Albany is a C++ library, so setting this - variable to TRUE will involve the inclusion of C++ code in the - MALI executable. This is currently only supported for certain - machines. - - - - logical - TRUE,FALSE - FALSE - build_def - env_build.xml - TRUE implies linking to the MOAB library - - - - logical - TRUE,FALSE - FALSE - build_def - env_build.xml - TRUE implies linking to the trilinos library - set automatically by XXX_USE_TRILINOS options (do not edit) - Flag to turn on linking to the trilinos library. Currently this is - used by CISM. Note that trilinos is a C++ library, so setting this - variable to TRUE will involve the inclusion of C++ code in the model - executable. This is currently only supported for certain machines. - - - - char - - gmake - build_def - env_run.xml - GNU make command - - - - integer - - 1 - build_def - env_run.xml - Number of processors for gmake - - - - logical - TRUE,FALSE - FALSE - build_status - env_build.xml - Status output: if TRUE, models have been built successfully. (DO NOT EDIT)> - - - - char - - 0 - build_status - env_build.xml - Status: smp status of previous build, coded string. (DO NOT EDIT) - - - - char - - 0 - build_status - env_build.xml - Status: smp status of current case, coded string (DO NOT EDIT) - - - - char - - 0 - build_status - env_build.xml - Status: ninst status of previous build, coded string. (DO NOT EDIT)> - - - - char - - 0 - build_status - env_build.xml - Status: ninst status of current case, coded string (DO NOT EDIT) - - - - integer - 0,1,2 - 0 - build_status - env_build.xml - Status: of prior build. (DO NOT EDIT) - - - - char - - $EXEROOT - build_derived - env_build.xml - case build directory (set automatically to $EXEROOT, - DO NOT EDIT) - - - - char - - $EXEROOT/lib - build_derived - env_build.xml - case lib directory (set automatically to $EXEROOT/lib - DO NOT EDIT) - - - - char - - $EXEROOT/lib/include - build_derived - env_build.xml - case lib include directory (set automatically to $EXEROOT/lib/include - DO NOT EDIT) - - - - char - - $EXEROOT - build_derived - env_build.xml - Shared library root, (set automatically to $EXEROOT - DO NOT EDIT) - - - - - - - - logical - TRUE,FALSE - TRUE - run_flags - env_run.xml - logical to diagnose model timing at the end of the run - - - - logical - TRUE,FALSE - FALSE - run_flags - env_run.xml - Enables the papi hardware counters in gptl - The papi library must be included in the build step for - this to work. - - - - char - ESMF_LOGKIND_SINGLE,ESMF_LOGKIND_MULTI,ESMF_LOGKIND_MULTI_ON_ERROR,ESMF_LOGKIND_NONE - ESMF_LOGKIND_NONE - run_flags - env_run.xml - - Determines what ESMF log files (if any) are generated when - USE_ESMF_LIB is TRUE. - ESMF_LOGKIND_SINGLE: Use a single log file, combining messages from - all of the PETs. Not supported on some platforms. - ESMF_LOGKIND_MULTI: Use multiple log files -- one per PET. - ESMF_LOGKIND_NONE: Do not issue messages to a log file. - By default, no ESMF log files are generated. - - - - - logical - TRUE,FALSE - FALSE - run_flags - env_run.xml - Turns on component barriers for component timing. - This variable is for testing and debugging only and should never - be set for a production run. - - - - - integer - 0 - mach_pes_last - env_mach_pes.xml - pes or cores used relative to MAX_MPITASKS_PER_NODE for accounting (0 means TOTALPES is valid) - - - - - - - - char - UNSET - build_grid - env_build.xml - atmosphere grid - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of atmosphere cells in i direction - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of atmosphere cells in j direction - DO NOT EDIT (for experts only) - - - - char - UNSET - build_grid - env_build.xml - land grid - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of land cells in i direction - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of land cells in j direction - DO NOT EDIT (for experts only) - - - - char - UNSET - build_grid - env_build.xml - ocn grid - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of ocn cells in i direction - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of ocn cells in j direction - DO NOT EDIT (for experts only) - - - - char - UNSET - build_grid - env_build.xml - ice grid (must equal ocn grid) - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of ice cells in i direction - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of ice cells in j direction - DO NOT EDIT (for experts only) - - - - integer - 1 - build_grid - env_build.xml - number of ice thickness categories - DO NOT EDIT (set by CICE configure) - - - - char - UNSET - build_grid - env_build.xml - river runoff (rof) grid - - - - integer - 0 - build_grid - env_build.xml - number of rof cells in i direction - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of rof cells in j direction - DO NOT EDIT (for experts only) - - - - char - gland20,gland10,gland5,gland5UM,gland4,mpas.aisgis20km,mpas.gis20km,mpas.ais20km,null - gland5UM - build_grid - env_build.xml - glacier (glc) grid - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of glc cells in i direction - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of glc cells in j direction - DO NOT EDIT (for experts only) - - - - - char - UNSET - build_grid - env_build.xml - wave model (wav) grid - - - - integer - 0 - build_grid - env_build.xml - number of wav cells in i direction - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of wav cells in j direction - DO NOT EDIT (for experts only) - - - - char - UNSET - build_grid - env_build.xml - iac model (iac) grid - - - - integer - 0 - build_grid - env_build.xml - number of iac cells in i direction - DO NOT EDIT (for experts only) - - - - integer - 0 - build_grid - env_build.xml - number of iac cells in j direction - DO NOT EDIT (for experts only) - - - - char - UNSET - build_grid - env_build.xml - grid mask - DO NOT EDIT (for experts only) - - - - logical - TRUE,FALSE - FALSE - run_domain - env_run.xml - Operate on only a single point of the global grid - DO NOT EDIT (for experts only) - - - - real - -999.99 - run_domain - env_run.xml - Latitude to find nearest points for points mode (only used if PTS_MODE is TRUE) - - - - real - -999.99 - run_domain - env_run.xml - Longitude to find nearest points for points mode (only used if PTS_MODE is TRUE) - - - - - - - - char - UNSET - run_domain - env_run.xml - atm domain file - - - - char - $DIN_LOC_ROOT/share/domains - run_domain - env_run.xml - path of atm domain file - - - - char - UNSET - run_domain - env_run.xml - lnd domain file - - - - char - $DIN_LOC_ROOT/share/domains - run_domain - env_run.xml - path of lnd domain file - - - - char - UNSET - run_domain - env_run.xml - rof domain file - - - - char - $DIN_LOC_ROOT/share/domains - run_domain - env_run.xml - path of rof domain file - - - - char - UNSET - run_domain - env_run.xml - wav domain file - - - - char - $DIN_LOC_ROOT/share/domains - run_domain - env_run.xml - path of wav domain file - - - - char - UNSET - run_domain - env_run.xml - iac domain file - - - - char - $DIN_LOC_ROOT/share/domains - run_domain - env_run.xml - path of iac domain file - - - - char - UNSET - run_domain - env_run.xml - ice domain file - - - - char - $DIN_LOC_ROOT/share/domains - run_domain - env_run.xml - path of ice domain file - - - - char - UNSET - run_domain - env_run.xml - ocn domain file - - - - char - $DIN_LOC_ROOT/share/domains - run_domain - env_run.xml - path of ocn domain file - - - - char - UNSET - run_domain - env_run.xml - glc domain file - - - - char - $DIN_LOC_ROOT/share/domains - run_domain - env_run.xml - path of glc domain file - - - - - - - char - idmap - run_domain - env_run.xml - atm2ocn flux mapping file - - - - char - X,Y - X - run_domain - env_run.xml - atm2ocn flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - atm2ocn state mapping file - - - - char - X,Y - X - run_domain - env_run.xml - atm2ocn state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - atm2ocn vector mapping file - - - - char - X,Y - X - run_domain - env_run.xml - atm2ocn vector mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - atm2lnd flux mapping file - - - - char - X,Y - X - run_domain - env_run.xml - atm2lnd flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - atm2lnd state mapping file - - - - char - X,Y - X - run_domain - env_run.xml - atm2lnd state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - atm2wav state mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - atm2wav state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - ocn2atm flux mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - ocn2atm flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - ocn2atm state mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - ocn2atm state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - lnd2atm flux mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - lnd2atm flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - lnd2atm state mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - lnd2atm state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - lnd2glc flux mapping file - - - - char - X,Y - X - run_domain - env_run.xml - lnd2glc flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - lnd2glc state mapping file - - - - char - X,Y - X - run_domain - env_run.xml - lnd2glc state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - lnd2rof flux mapping file - - - - char - X,Y - X - run_domain - env_run.xml - lnd2rof flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - rof2lnd flux mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - rof2lnd flux mapping file decomp type - - - - char - idmap_ignore - run_domain - env_run.xml - rof2ocn flux mapping file - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - Y - run_domain - env_run.xml - rof2ocn flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - rof2ocn runoff mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - rof2ocn runoff mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - rof2ocn runoff mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - rof2ocn runoff mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - glc2lnd flux mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - glc2lnd flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - glc2lnd state mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - glc2lnd state mapping file decomp type - - - - char - idmap_ignore - run_domain - env_run.xml - glc2ice flux mapping file - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - X - run_domain - env_run.xml - glc2ice flux mapping file decomp type - - - - char - idmap_ignore - run_domain - env_run.xml - glc2ice state mapping file - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - X - run_domain - env_run.xml - glc2ice state mapping file decomp type - - - - char - idmap_ignore - run_domain - env_run.xml - glc2ice runoff mapping file - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - Y - run_domain - env_run.xml - glc2ice runoff mapping file decomp type - - - - char - idmap_ignore - run_domain - env_run.xml - glc2ocn flux mapping file - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - Y - run_domain - env_run.xml - glc2ocn flux mapping file decomp type - - - - char - idmap_ignore - run_domain - env_run.xml - glc2ocn state mapping file - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - Y - run_domain - env_run.xml - glc2ocn state mapping file decomp type - - - - char - idmap_ignore - run_domain - env_run.xml - glc2ocn runoff mapping file for liquid runoff - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - Y - run_domain - env_run.xml - glc2ocn runoff mapping file decomp type for liquid runoff - - - - char - idmap_ignore - run_domain - env_run.xml - glc2ocn runoff mapping file for ice runoff - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - Y - run_domain - env_run.xml - glc2ocn runoff mapping file decomp type for ice runoff - - - - char - idmap_ignore - run_domain - env_run.xml - ocn2glc flux mapping file - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - Y - run_domain - env_run.xml - ocn2glc flux mapping file decomp type - - - - char - idmap_ignore - run_domain - env_run.xml - ocn2glc state mapping file - the default value idmap_ignore, if set, will be ignored by buildnml and - will generate a runtime error if in fact a file is required for the given compset - - - - char - X,Y - Y - run_domain - env_run.xml - ocn2glc state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - ocn2wav state mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - ocn2wav state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - ice2wav state mapping file - - - - char - X,Y - Y - run_domain - env_run.xml - ice2wav state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - wav2ocn state mapping file - - - - char - X,Y - X - run_domain - env_run.xml - wav2ocn state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - iac2atm flux mapping file - - - - char - X,Y - X - run_domain - env_run.xml - iac2atm flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - iac2atm state mapping file - - - - char - X,Y - X - run_domain - env_run.xml - iac2atm state mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - iac2lnd flux mapping file - - - - char - X,Y - X - run_domain - env_run.xml - iac2lnd flux mapping file decomp type - - - - char - idmap - run_domain - env_run.xml - iac2lnd state mapping file - - - - char - X,Y - X - run_domain - env_run.xml - iac2lnd state mapping file decomp type - - - - char - none,npfix,cart3d,cart3d_diag,cart3d_uvw,cart3d_uvw_diag - cart3d - run_domain - env_run.xml - vector mapping option - - - - char - 1.0e-02 - run_domain - env_run.xml - Error tolerance for differences in fractions in domain checking - - - - real - 9.0e-07 - run_domain - env_run.xml - Error tolerance for differences in atm/land areas in domain checking - - - - real - 1.0e-13 - run_domain - env_run.xml - Error tolerance for differences in atm/land masks in domain checking - - - - real - 1.0e-12 - run_domain - env_run.xml - Error tolerance for differences in atm/land lat/lon in domain checking - - - - real - 1.0e-01 - run_domain - env_run.xml - Error tolerance for differences in ocean/ice lon/lat in domain checking - - - - real - 1.0e-06 - run_domain - env_run.xml - Error tolerance for differences in ocean/ice lon/lat in domain checking - - - - real - 1.0e-01 - run_domain - env_run.xml - Error tolerance for differences in ocean/ice lon/lat in domain checking - - - - - - - - char - UNSET - case_def - env_case.xml - Machine name - - - - char - - case_def - env_case.xml - Machines directory location - - - - char - $CIME_OUTPUT_ROOT/$CASE/run - run_desc - env_run.xml - - The directory where the executable will be run. - By default this is set to EXEROOT/../run. - RUNDIR allows you to keep the run directory separate from the build directory - - - - - char - UNSET - run_din - env_run.xml - - A regular expression to match machine node names to ACME machine. - - - - - char - run_din - env_run.xml - - A regular expression to search for an indication that a run failure - was caused by a node failure and should therefore be re-attempted. - - - - - char - run_din - env_run.xml - - A regular expression to search for an indication that a run failure - was caused by a known event such as a timeout and should therefore - be re-attempted. - - - - - integer - 0 - run_din - env_run.xml - The number of times to reattempt the mpirun command if - MPIRUN_RETRY_REGEX is matched but ALLOCATE_SPARE_NODES is False - - - - - char - UNSET - run_din - env_run.xml - - Proxy (if any) setting for http_proxy to allow web access on this machine. - - - - - logical - FALSE - run_din - env_run.xml - - Indicates to case.submit that this is a test case. - - - - - char - UNSET - run_din - env_run.xml - - The root directory of all CIME and component input data for the selected machine. - This is usually a shared disk area. - Default values for the target machine are in the - $CIMEROOT/machines/config_machines.xml - - - - - char - UNSET - run_din - env_run.xml - CLM-specific root directory for CLM type input forcing data - This directory will only be used for I (CLM/DATM) compsets and only - for datm forcing data that is NOT checked into the svn repository - (datasets other than the Qian or single-point forcing). - This is usually a shared disk area. - Default values for the target machine are in the - $CIMEROOT/machines/config_machines.xml - - - - char - UNSET - run_dout - env_run.xml - Root directory for short term archiving. This directory must be visible to compute nodes. - - - - char - UNSET - run_mpi - env_run.xml - override the mpi run command, do not include model executable - - - - - - - - logical - TRUE,FALSE - FALSE - mach_pes - env_mach_pes.xml - Allocate some spare nodes to handle node failures. The system will pick a reasonable number - - - - integer - -999 - mach_pes - env_mach_pes.xml - Force this exact number of spare nodes to be allocated - - - - integer - - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - $MAX_MPITASKS_PER_NODE - - mach_pes - env_mach_pes.xml - number of tasks for each component - - - - integer - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - mach_pes - env_mach_pes.xml - Number of tasks per instance for each component. DO NOT EDIT: Set automatically by case.setup based on NTASKS, NINST and MULTI_DRIVER - - - - integer - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - mach_pes - env_mach_pes.xml - number of threads for each task in each component - - - - integer - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - mach_pes - env_mach_pes.xml - ROOTPE (mpi task in MPI_COMM_WORLD) for each component - - - - logical - FALSE - TRUE,FALSE - mach_pes - env_mach_pes.xml - MULTI_DRIVER mode provides a separate driver/coupler component for each - ensemble member. All components must have an equal number of members. If - MULTI_DRIVER mode is False prognostic components must have the same number - of members but data or stub components may also have 1 member. - - - - integer - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - mach_pes - env_mach_pes.xml - Number of instances for each component. If MULTI_DRIVER is True - the NINST_MAX value will be used. - - - - - char - sequential,concurrent - - concurrent - concurrent - concurrent - concurrent - concurrent - concurrent - concurrent - concurrent - concurrent - - mach_pes - env_mach_pes.xml - Layout of component instances for each component - - - - integer - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - mach_pes - env_mach_pes.xml - The mpi global processors stride associated with the mpi tasks for the a component - - - - - integer - 0 - mach_pes_last - env_mach_pes.xml - total number of physical cores used (setup automatically - DO NOT EDIT) - - - - integer - 0 - mach_pes_last - env_mach_pes.xml - maximum number of tasks/ threads allowed per node - - - - integer - 0 - mach_pes_last - env_mach_pes.xml - pes or cores per node for mpitasks - - - - integer - $MAX_MPITASKS_PER_NODE - mach_pes_last - env_mach_pes.xml - pes or cores per node for accounting purposes - - - - - - - - integer - 1 - 1,2 - build_macros - env_build.xml - PIO library version - - - - char - - build_macros - env_build.xml - PIO configure options, see PIO configure utility for details - - - - logical - TRUE,FALSE - FALSE - run_pio - env_run.xml - TRUE implies perform asynchronous i/o - - - - char - p2p,coll,default - p2p - run_pio - env_run.xml - pio rearranger communication type - - - - char - 2denable,io2comp,comp2io,disable,default - 2denable - run_pio - env_run.xml - pio rearranger communication flow control direction - - - - integer - - 0 - run_pio - env_run.xml - pio rearranger communication max pending requests (comp2io) : 0 implies that CIME internally calculates the value ( = max(64, 2 * PIO_NUMTASKS) ), -1 implies no bound on max pending requests - - - - logical - TRUE,FALSE - TRUE - run_pio - env_run.xml - pio rearranger communiation options (comp2io) : TRUE implies enable handshake - - - - logical - TRUE,FALSE - FALSE - run_pio - env_run.xml - pio rearranger communiation options (comp2io) : TRUE implies enable isend - - - - integer - - 64 - run_pio - env_run.xml - pio rearranger communication max pending requests (io2comp) : -1 implies no bound on max pending requests - - - - - - logical - TRUE,FALSE - FALSE - run_pio - env_run.xml - pio rearranger communiation options (io2comp) : TRUE implies enable handshake - - - - logical - TRUE,FALSE - TRUE - run_pio - env_run.xml - pio rearranger communiation options (io2comp) : TRUE implies enable isend - - - - - integer - 0 - run_pio - env_run.xml - pio debug level - - - - integer - -1 - run_pio - env_run.xml - pio blocksize for box decompositions - - - - integer - -1 - run_pio - env_run.xml - pio buffer size limit for pnetcdf output - - - - char - netcdf,pnetcdf,netcdf4p,netcdf4c,default - run_pio - env_run.xml - pio io type - - default - default - default - default - default - default - default - default - default - default - - - - - char - classic,64bit_offset,64bit_data - run_pio - env_run.xml - pio netcdf format (ignored for netcdf4p and netcdf4c) - https://www.unidata.ucar.edu/software/netcdf/docs/data_type.html - - - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - - - - - integer - run_pio - env_run.xml - - stride in compute comm of io tasks for each component, if this value is -99 it will - be computed based on PIO_NUMTASKS and number of compute tasks - - - - - - - - - - - - - - - - - integer - 1,2 - run_pio - env_run.xml - pio rearranger choice box=1, subset=2 - - $PIO_VERSION - - - - - - - - - - - - - - - integer - run_pio - env_run.xml - pio root processor relative to component root - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - - - integer - run_pio - env_run.xml - - pio number of io tasks, if this value is -99 it will be computed based on PIO_STRIDE and - number of tasks - - - -99 - -99 - -99 - -99 - -99 - -99 - -99 - -99 - -99 - -99 - - - - - - - - - char - UNSET - test - env_test.xml - Test type name - - - - char - UNSET - test - env_test.xml - Test type descriptor - - - - char - UNSET - test - env_test.xml - Testcase short name - - - - char - UNSET - test - env_test.xml - Case base ID - - - - logical - TRUE,FALSE - TRUE - test - env_test.xml - Is first run of test - - - - char - UNSET - test - env_test.xml - Arguments supplied to create_test - - - - char - UNSET - test - env_test.xml - supplied or computed test id - - - - real - 0.10 - test - env_test.xml - Expected relative memory usage growth for test - - - - real - 0.25 - test - env_test.xml - Expected throughput deviation - - - - logical - TRUE,FALSE - FALSE - test - env_test.xml - Whether to generate a baseline - - - - logical - TRUE,FALSE - FALSE - test - env_test.xml - Whether to compare the baseline - - - - char - UNSET - test - env_test.xml - The tagname we are comparing baselines against - - - - char - UNSET - test - env_test.xml - The tagname we are comparing baselines against - - - - char - /UNSET - test - env_test.xml - The directory where baselines are stored - - - - char - UNSET - test - env_test.xml - The tagname we are generating baselines for - - - - char - UNSET - test - env_test.xml - The tagname we are comparing baselines against - - - - logical - TRUE,FALSE - FALSE - test - env_test.xml - Whether to clean the test after it is built/run - - - - char - UNSET - test - env_test.xml - standard full pathname of the cprnc executable - - - - - - - - logical - TRUE,FALSE - FALSE - run_coupling - env_run.xml - determine if per ice thickness category fields are passed from ice to ocean - DO NOT EDIT (set by POP build-namelist) - - - - - - - - char - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,nmonth,nyears,nyear,date,ifdays0,end - never - run_drv_history - env_run.xml - Sets driver snapshot history file frequency (like REST_OPTION) - - - - integer - - -999 - run_drv_history - env_run.xml - Sets driver snapshot history file frequency (like REST_N) - - - - - integer - - -999 - run_drv_history - env_run.xml - yyyymmdd format, sets coupler snapshot history date (like REST_DATE) - - - - integer - 0,1,2,3,4,5,6 - 0 - run_flags - env_run.xml - Coupler decomposition option. - - - - integer - 0,1,2,3 - 1 - run_flags - env_run.xml - level of debug output, 0=minimum, 1=normal, 2=more, 3=too much - - - - logical - TRUE,FALSE - FALSE - build_component_clm - env_build.xml - TRUE implies CLM is built with support for the PETSc - library. The Variably Saturated Flow Model (VSFM) solver in CLM - uses the PETSc library. In order to use the VSFM solver, CLM - must be built with PETSc support and linking to PETSc must occur - when building the ACME executable. This occurs if this variable - is set to TRUE. Note that is only available on a limited set of - machines/compilers. - - - - - - char - - external_tools - env_run.xml - External script to be run before model completion - - - char - - external_tools - env_run.xml - External script to be run after model completion - - - - - - logical - TRUE,FALSE - external_tools - env_run.xml - Run the external tool pointed to by DATA_ASSIMILATION_SCRIPT after the model run completes - - FALSE - FALSE - FALSE - FALSE - FALSE - FALSE - FALSE - FALSE - FALSE - - - - - integer - - 1 - external_tools - env_run.xml - Number of model run - data assimilation steps to complete - - - char - - - external_tools - env_run.xml - External script to be run after model completion - - - - logical - TRUE,FALSE - FALSE - external_tools - env_run.xml - whether the case uses an external workflow driver - - - - - char - job_submission - env_workflow.xml - Store user override for queue - - - - char - job_submission - env_workflow.xml - Store user override for walltime - - - - char - - - job_submission - env_workflow.xml - The machine queue in which to submit the job. Default determined in config_machines.xml can be overwritten by testing - - - - char - - - job_submission - env_workflow.xml - The machine wallclock setting. Default determined in config_machines.xml can be overwritten by testing - - - - char - - - job_submission - env_workflow.xml - Override the batch submit command this job. Do not include executable or dependencies - - - - char - - job_submission - env_workflow.xml - project for project-sensitive build and run paths, and job scripts - - - - char - - job_submission - env_workflow.xml - project to charge in scripts if different from PROJECT - - - - char - unknown - case_der - env_case.xml - Apparent version of the model used for this case - - - - logical - TRUE,FALSE - FALSE - job_submission - env_batch.xml - whether the PROJECT value is required on this machine - - - - logical - TRUE,FALSE - FALSE - comparison_to_nuopc - env_build.xml - TRUE=>turn on CPP variable COMPARE_TO_NUOPC - - - - ========================================= - Notes: - (1) Time period is first four characters of - compset name - ========================================= - - - diff --git a/src/drivers/mct/cime_config/config_component_cesm.xml b/src/drivers/mct/cime_config/config_component_cesm.xml deleted file mode 100644 index 3bae22bba37..00000000000 --- a/src/drivers/mct/cime_config/config_component_cesm.xml +++ /dev/null @@ -1,587 +0,0 @@ - - - - - - - - - 1972-2004 - 2002-2003 - Historic transient - Twentieth century transient - - CMIP6 SSP1-1.9 forcing - CMIP6 SSP1-2.6 forcing - CMIP6 SSP2-4.5 forcing - CMIP6 SSP3-7.0 forcing - CMIP6 SSP4-3.4 forcing - CMIP6 SSP4-6.0 forcing - CMIP6 SSP5-3.4 forcing - CMIP6 SSP5-8.5 forcing - Biogeochemistry intercomponent - with diagnostic CO2 - with prognostic CO2 - - - - char - https://doi.org/10.5065/D67H1H0V - run_metadata - env_case.xml - run DOI - - - - logical - TRUE,FALSE - FALSE - run_flags - env_run.xml - Turns on component varying thread control in the driver. - Used to set the driver namelist variable "drv_threading". - - - - integer - 0,1,2 - 0 - run_flags - env_run.xml - Sets level of task-to-node mapping output for the whole model - (0: no output; 1: compact; 2: verbose). - - - - integer - 0,1,2 - 0 - run_flags - env_run.xml - Sets level of task-to-node mapping output for supported component models - (0: no output; 1: compact; 2: verbose). - - - - logical - TRUE,FALSE - FALSE - run_flags - env_run.xml - logical to save timing files in rundir - - - - integer - 0 - run_flags - env_run.xml - Determines number of times profiler is called over the model run period. - This sets values for tprof_option and tprof_n that determine the timing output file frequency - - - - - integer - 2 - run_flags - env_run.xml - - integer indicating maximum detail level to profile. This xml - variable is used to set the namelist variable - timing_detail_limit. This namelist variable is used by perf_mod - (in $CIMEROOT/src/share/timing/perf_mod.F90) to turn timers off - and on depending on calls to the routine t_adj_detailf. If in the - code a statement appears like t_adj_detailf(+1), then the current - timer detail level is incremented by 1 and compared to the - time_detail_limit obtained from the namelist. If the limit is - exceeded then the timer is turned off. - - - - - integer - 12 - run_flags - env_run.xml - Maximum code stack depth of enabled timers. - - - - logical - TRUE,FALSE - FALSE - run_data_archive - env_run.xml - Logical to archive all interim restart files, not just those at eor - If TRUE, perform short term archiving on all interim restart files, - not just those at the end of the run. By default, this value is TRUE. - The restart files are saved under the specific component directory - ($DOUT_S_ROOT/$CASE/$COMPONENT/rest rather than the top-level $DOUT_S_ROOT/$CASE/rest directory). - Interim restart files are created using the REST_N and REST_OPTION variables. - This is for expert users ONLY and requires expert knowledge. - We will not document this further in this guide. - - - - logical - TRUE,FALSE - FALSE - run_flags - env_run.xml - turns on coupler bit-for-bit reproducibility with varying pe counts - - - - char - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,nmonth,nyears,nyear,date,ifdays0,end - never - - ndays - - run_begin_stop_restart - env_run.xml - - sets frequency of full model barrier (same options as STOP_OPTION) for synchronization with BARRIER_N and BARRIER_DATE - - - - - char - none,CO2A,CO2B,CO2C - none - - CO2A - none - CO2A - CO2A - CO2A - CO2C - CO2C - - run_coupling - env_run.xml - Activates additional CO2-related fields to be exchanged between components. Possible values are: - - CO2A: sets the driver namelist variable flds_co2a = .true.; this adds - prognostic CO2 and diagnostic CO2 at the lowest model level to be sent from - the atmosphere to the land and ocean. - - CO2B: sets the driver namelist variable flds_co2b = .true.; this adds - prognostic CO2 and diagnostic CO2 at the lowest model level to be sent from - the atmosphere just to the land, and the surface upward flux of CO2 to be - sent from the land back to the atmosphere - - CO2C: sets the driver namelist variable flds_co2c = .true.; this adds - prognostic CO2 and diagnostic CO2 at the lowest model level to be sent from - the atmosphere to the land and ocean, and the surface upward flux of CO2 - to be sent from the land and the open ocean back to the atmosphere. - - The namelist variables flds_co2a, flds_co2b and flds_co2c are in the - namelist group cpl_flds_inparm. - - - - - char - - - - - - run_component_cpl - env_case.xml - User mods to apply to specific compset matches. - - - - char - hour,day,year,decade - run_coupling - env_run.xml - day - - year - hour - - Base period associated with NCPL coupling frequency. - This xml variable is only used to set the driver namelist variables, - atm_cpl_dt, lnd_cpl_dt, ocn_cpl_dt, ice_cpl_dt, glc_cpl_dt, rof_cpl_dt, wav_cpl_dt, and esp_dt. - - - - integer - 48 - - 144 - 288 - 288 - 72 - 48 - 4 - 24 - 24 - 24 - 24 - 24 - 48 - 48 - 1 - 96 - 96 - 96 - 96 - 192 - 192 - 192 - 192 - 384 - 384 - 384 - 144 - 72 - 144 - 288 - 48 - 48 - 24 - 24 - 1 - 4 - 4 - 4 - 4 - - run_coupling - env_run.xml - Number of atm coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist atm_cpl_dt, equal to basedt/ATM_NCPL, - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - - 1 - - run_coupling - env_run.xml - Number of land coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist atm_cpl_dt, equal to basedt/LND_NCPL, - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - - 1 - - run_coupling - env_run.xml - Number of ice coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist ice_cpl_dt, equal to basedt/ICE_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - - 1 - 24 - 4 - 24 - 24 - 1 - 1 - 1 - $ATM_NCPL - $ATM_NCPL - 24 - - run_coupling - env_run.xml - Number of ocn coupling intervals per NCPL_BASE_PERIOD. - Thisn is used to set the driver namelist ocn_cpl_dt, equal to basedt/OCN_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - 1 - - 1 - $ATM_NCPL - $ATM_NCPL - 1 - - run_coupling - env_run.xml - Number of glc coupling intervals per NCPL_BASE_PERIOD. - - - - char - glc_coupling_period,yearly - yearly - run_coupling - env_run.xml - Period at which coupler averages fields sent to GLC. - This supports doing the averaging to GLC less frequently than GLC is called - (i.e., separating the averaging frequency from the calling frequency). - This is useful because there are benefits to only averaging the GLC inputs - as frequently as they are really needed (yearly for CISM), but GLC needs to - still be called more frequently than that in order to support mid-year restarts. - - Setting GLC_AVG_PERIOD to 'glc_coupling_period' means that the averaging is - done exactly when the GLC is called (governed by GLC_NCPL). - - IMPORTANT: In order to restart mid-year when running with CISM, you MUST specify GLC_AVG_PERIOD = 'yearly'. - If using GLC_AVG_PERIOD = 'glc_coupling_period' with CISM, you can only restart on year boundaries. - - - - - integer - 8 - - $ATM_NCPL - $ATM_NCPL - $ATM_NCPL - $ATM_NCPL - 8 - 8 - $ATM_NCPL - 1 - $ATM_NCPL - $ATM_NCPL - - run_coupling - env_run.xml - Number of rof coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist rof_cpl_dt, equal to basedt/ROF_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - run_coupling - env_run.xml - Number of wav coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist wav_cpl_dt, equal to basedt/WAV_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - - - logical - TRUE,FALSE - FALSE - - TRUE - TRUE - TRUE - FALSE - FALSE - - run_component_cpl - env_run.xml - - Only used for compsets with DATM and POP (currently C, G and J): - If true, compute albedos to work with daily avg SW down - If false (default), albedos are computed with the assumption that downward - solar radiation from the atm component has a diurnal cycle and zenith-angle - dependence. This is normally the case when using an active atm component - If true, albedos are computed with the assumption that downward - solar radiation from the atm component is a daily average quantity and - does not have a zenith-angle dependence. This is often the case when - using a data atm component. Only used for compsets with DATM and POP (currently C, G and J). - NOTE: This should really depend on the datm forcing and not the compset per se. - So, for example, whether it is set in a J compset should depend on - what datm forcing is used. - - - - - char - off,ocn - off - - ocn - off - - run_component_cpl - env_run.xml - - Only used for compsets with DATM and POP (currently C, G and J): - If ocn, ocn provides EP balance factor for precipitation. - Provides EP balance factor for precip for POP. A factor computed by - POP is applied to precipitation so that precipitation balances - evaporation and ocn global salinity does not drift. This is intended - for use when coupling POP to a DATM. Only used for C, G and J compsets. - Default is off - - - - - char - CESM1_MOD,CESM1_MOD_TIGHT,RASM_OPTION1,RASM_OPTION2,NUOPC,NUOPC_TIGHT - CESM1_MOD_TIGHT - - CESM1_MOD - CESM1_MOD - RASM_OPTION1 - RASM_OPTION1 - RASM_OPTION1 - CESM1_MOD - CESM1_MOD - CESM1_MOD - CESM1_MOD - - run_coupling - env_run.xml - - Coupler sequencing option. This is used to set the driver namelist variable cpl_seq_option. - CESM1_MOD includes a cesm1.3 mod that swaps ocean merging and atm/ocn flux - computation. - RASM_OPTION1 runs prep ocean before the ocean coupling reducing - most of the lags and field inconsistency but still allowing the ocean to run - concurrently with the ice and atmosphere. - RASM_OPTION2 is similar to RASM_OPTION1 - but sequences the ice model, prep ocean and ocean model in that order. The - ocean model loses some of the concurrency with the ice model. - CESM1_MOD_TIGHT are consistent with the old variables ocean_tight_coupling = true in the driver. - - - - - char - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,nmonth,nyears,nyear,date,ifdays0,end - never - - nmonths - - run_drv_history - env_run.xml - Sets driver average history file frequency (like REST_OPTION) - - - - char - - -999 - - 1 - - run_drv_history - env_run.xml - Sets driver average history file frequency (like REST_N) - - - - integer - - -999 - run_drv_history - env_run.xml - yyyymmdd format, sets driver average history date (like REST_DATE) - - - - logical - TRUE,FALSE - FALSE - - TRUE - TRUE - TRUE - - run_budgets - env_run.xml - logical that turns on diagnostic budgets for driver - - - - real - - 284.7 - - 367.0 - 284.7 - - run_co2 - env_run.xml - - Mechanism for setting the CO2 value in ppmv for - CLM if CLM_CO2_TYPE is constant or for - POP if OCN_CO2_TYPE is constant. - - - - - logical - TRUE,FALSE - FALSE - - TRUE - TRUE - - run_flags - env_run.xml - Turn on the passing of water isotope fields through the coupler - - - - integer - 1,3,5,10,36 - 10 - run_glc - env_run.xml - Number of glacier elevation classes used in CLM. - Used by both CLM and the coupler (even if CISM is not running, and only SGLC is used). - - - - logical - TRUE,FALSE - FALSE - - TRUE - - TRUE - - run_glc - env_run.xml - Whether the glacier component feeds back to the rest of the system - This affects: - (1) Whether CLM updates its areas based on glacier areas sent from GLC - (2) Whether GLC sends fluxes (e.g., calving fluxes) to the coupler - Note that this is set to TRUE by default for TG compsets - even though there are - no feedbacks for TG compsets, this enables extra coupler diagnostics for these - compsets. - - - - char - minus1p8,linear_salt,mushy - mushy - run_physics - env_run.xml - Freezing point calculation for salt water. - - - - diff --git a/src/drivers/mct/cime_config/config_component_e3sm.xml b/src/drivers/mct/cime_config/config_component_e3sm.xml deleted file mode 100644 index 07db0278cef..00000000000 --- a/src/drivers/mct/cime_config/config_component_e3sm.xml +++ /dev/null @@ -1,796 +0,0 @@ - - - - - - char - https://doi.org/10.11578/E3SM/dc.20180418.36 - run_metadata - env_case.xml - run DOI - - - - logical - TRUE,FALSE - TRUE - run_flags - env_run.xml - Turns on component varying thread control in the driver. - Used to set the driver namelist variable "drv_threading". - - - - integer - 0,1,2 - 2 - run_flags - env_run.xml - Sets level of task-to-node mapping output for the whole model - (0: no output; 1: compact; 2: verbose). - - - - integer - 0,1,2 - 0 - run_flags - env_run.xml - Sets level of task-to-node mapping output for supported component models - (0: no output; 1: compact; 2: verbose). - - - - logical - TRUE,FALSE - TRUE - run_flags - env_run.xml - logical to save timing files in rundir - - - - char - - UNSET - run_flags - env_run.xml - Where to auto archive timing data - - - - char - - - run_flags - env_run.xml - A comma-separated list of projects that are allowed to auto archive timing data in SAVE_TIMING_DIR - - - - integer - 12 - run_flags - env_run.xml - timer output depth - - - - integer - 20 - run_flags - env_run.xml - timer output depth - - - - integer - 12 - run_flags - env_run.xml - Sets maximum number of run loop timing data checkpoints. - This sets values for tprof_option and tprof_n that determine - the timing output file frequency. If less than one, uses - defaults for tprof_option and tprof_n and also allows them - to be set in user_nl_cpl. - - - - logical - TRUE,FALSE - TRUE - run_data_archive - env_run.xml - Logical to archive all interim restart files, not just those at eor - If TRUE, perform short term archiving on all interim restart files, - not just those at the end of the run. By default, this value is TRUE. - The restart files are saved under the specific component directory - ($DOUT_S_ROOT/$CASE/$COMPONENT/rest rather than the top-level $DOUT_S_ROOT/$CASE/rest directory). - Interim restart files are created using the REST_N and REST_OPTION variables. - This is for expert users ONLY and requires expert knowledge. - We will not document this further in this guide. - - - - logical - TRUE,FALSE - TRUE - run_flags - env_run.xml - turns on coupler bit-for-bit reproducibility with varying pe counts - - - - char - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,nmonth,nyears,nyear,date,ifdays0,end - never - - ndays - - run_begin_stop_restart - env_run.xml - - sets frequency of full model barrier (same options as STOP_OPTION) for synchronization with BARRIER_N and BARRIER_DATE - - - - - logical - TRUE,FALSE - FALSE - - TRUE - TRUE - - run_flags - env_run.xml - Turn on the passing of water isotope fields through the coupler - - - - char - minus1p8,linear_salt,mushy - minus1p8 - run_physics - env_run.xml - Freezing point calculation for salt water. - - - - - - - - char - CESM1_ORIG,CESM1_ORIG_TIGHT,CESM1_MOD,CESM1_MOD_TIGHT,RASM_OPTION1,RASM_OPTION2 - CESM1_MOD_TIGHT - - CESM1_MOD - CESM1_MOD - CESM1_MOD - CESM1_MOD - CESM1_MOD - CESM1_MOD - CESM1_MOD - RASM_OPTION1 - - run_coupling - env_run.xml - - Coupler sequencing option. This is used to set the driver namelist variable cpl_seq_option. - CESM1_ORIG is the cesm1.1 implementation. - CESM1_MOD includes a cesm1.3 mod that swaps ocean merging and atm/ocn flux - computation. - RASM_OPTION1 runs prep ocean before the ocean coupling reducing - most of the lags and field inconsistency but still allowing the ocean to run - concurrently with the ice and atmosphere. - RASM_OPTION2 is similar to RASM_OPTION1 - but sequences the ice model, prep ocean and ocean model in that order. The - ocean model loses some of the concurrency with the ice model. - CESM1_ORIG_TIGHT and CESM1_MOD_TIGHT are consistent with the old variables - ocean_tight_coupling = true in the driver. That namelist is gone and the - cpl_seq_option flags take it's place. - TIGHT coupling makes no sense with the OPTION5 and OPTION6 flags. - - - - - char - none,CO2A,CO2A_OI,CO2B,CO2C,CO2C_OI,CO2_DMSA - none - - CO2A - none - CO2C - CO2A - CO2A - CO2A - CO2A - CO2A_OI - CO2A_OI - CO2C - CO2C - CO2C - CO2C - CO2C_OI - CO2C_OI - CO2C_OI - CO2C_OI - - run_coupling - env_run.xml - Activates additional CO2-related fields to be exchanged between components. Possible values are: - - CO2A: sets the driver namelist variable flds_co2a = .true.; this adds - prognostic CO2 and diagnostic CO2 at the lowest model level to be sent from - the atmosphere to the land and ocean. - - CO2A_OI: sets the driver namelist variable flds_co2a = .true.; this adds - prognostic CO2 and diagnostic CO2 at the lowest model level to be sent from - the atmosphere to the land and ocean. Also sets the driver namelist variable - flds_bgc_oi = .true.; this turns on the transfer of bgc fields between the - ocean and seaice components via the coupler. - - CO2B: sets the driver namelist variable flds_co2b = .true.; this adds - prognostic CO2 and diagnostic CO2 at the lowest model level to be sent from - the atmosphere just to the land, and the surface upward flux of CO2 to be - sent from the land back to the atmosphere - - CO2C: sets the driver namelist variable flds_co2c = .true.; this adds - prognostic CO2 and diagnostic CO2 at the lowest model level to be sent from - the atmosphere to the land and ocean, and the surface upward flux of CO2 - to be sent from the land and the open ocean back to the atmosphere. - - CO2_DMSA: sets the driver namelist variable flds_co2_dmsa = .true. - - The namelist variables flds_co2a, flds_co2b, flds_co2c and flds_co2_dmsa are - in the namelist group cpl_flds_inparm. - - - - - char - - - - - run_component_cpl - env_case.xml - User mods to apply to specific compset matches. - - - - char - hour,day,year,decade - run_coupling - env_run.xml - day - - year - year - day - day - day - day - day - day - - Base period associated with NCPL coupling frequency. - This xml variable is only used to set the driver namelist variables, - atm_cpl_dt, lnd_cpl_dt, ocn_cpl_dt, ice_cpl_dt, glc_cpl_dt, rof_cpl_dt, wav_cpl_dt, and esp_dt. - - - - integer - 48 - - 48 - 48 - 48 - 48 - 144 - 288 - 288 - 48 - 48 - 72 - 48 - 4 - 24 - 24 - 24 - 1 - 1 - 1 - 24 - 12 - 12 - 12 - 24 - 48 - 48 - 48 - 96 - 96 - 96 - 96 - 96 - 96 - 48 - 12 - 96 - 96 - 12 - 24 - 12 - 96 - 96 - 144 - 144 - 96 - 144 - 144 - 96 - 96 - 72 - 144 - 288 - 48 - 48 - 24 - 24 - 1 - 4 - 4 - - run_coupling - env_run.xml - Number of atm coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist atm_cpl_dt, equal to basedt/ATM_NCPL, - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - - 1 - 1 - 1 - $ATM_NCPL - 48 - $ATM_NCPL - 12 - 96 - - run_coupling - env_run.xml - Number of land coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist atm_cpl_dt, equal to basedt/LND_NCPL, - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - - 1 - 1 - 1 - $ATM_NCPL - $ATM_NCPL - - run_coupling - env_run.xml - Number of ice coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist ice_cpl_dt, equal to basedt/ICE_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - - 1 - 4 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 24 - 6 - 12 - 12 - 24 - 48 - 48 - 48 - 48 - 48 - 48 - 96 - 48 - 48 - - - run_coupling - env_run.xml - Number of ocn coupling intervals per NCPL_BASE_PERIOD. - Thisn is used to set the driver namelist ocn_cpl_dt, equal to basedt/OCN_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - 1 - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - run_coupling - env_run.xml - Number of glc coupling intervals per NCPL_BASE_PERIOD. - - - - char - glc_coupling_period,yearly - glc_coupling_period - run_coupling - env_run.xml - Period at which coupler averages fields sent to GLC. - This supports doing the averaging to GLC less frequently than GLC is called - (i.e., separating the averaging frequency from the calling frequency). - This is useful because there are benefits to only averaging the GLC inputs - as frequently as they are really needed (yearly for CISM), but GLC needs to - still be called more frequently than that in order to support mid-year restarts. - - Setting GLC_AVG_PERIOD to 'glc_coupling_period' means that the averaging is - done exactly when the GLC is called (governed by GLC_NCPL). - - - - - integer - 8 - - 6 - 6 - $ATM_NCPL - $ATM_NCPL - $ATM_NCPL - $ATM_NCPL - $ATM_NCPL - $ATM_NCPL - $ATM_NCPL - $ATM_NCPL - 1 - 1 - 1 - 24 - 8 - 6 - 4 - 8 - - run_coupling - env_run.xml - Number of rof coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist rof_cpl_dt, equal to basedt/ROF_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - run_coupling - env_run.xml - Number of wav coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist wav_cpl_dt, equal to basedt/WAV_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - integer - $ATM_NCPL - run_coupling - env_run.xml - Number of iac coupling intervals per NCPL_BASE_PERIOD. - This is used to set the driver namelist iac_cpl_dt, equal to basedt/IAC_NCPL - where basedt is equal to NCPL_BASE_PERIOD in seconds. - - - - - logical - TRUE,FALSE - FALSE - - TRUE - TRUE - - run_component_cpl - env_run.xml - - Only used for compsets with DATM and POP (currently C, G and J): - If true, compute albedos to work with daily avg SW down - If false (default), albedos are computed with the assumption that downward - solar radiation from the atm component has a diurnal cycle and zenith-angle - dependence. This is normally the case when using an active atm component - If true, albedos are computed with the assumption that downward - solar radiation from the atm component is a daily average quantity and - does not have a zenith-angle dependence. This is often the case when - using a data atm component. Only used for compsets with DATM and POP (currently C, G and J). - NOTE: This should really depend on the datm forcing and not the compset per se. - So, for example, whether it is set in a J compset should depend on - what datm forcing is used. - - - - - char - off,ocn - off - - ocn - - run_component_cpl - env_run.xml - - Only used for compsets with DATM and POP (currently C, G and J): - If ocn, ocn provides EP balance factor for precipitation. - Provides EP balance factor for precip for POP. A factor computed by - POP is applied to precipitation so that precipitation balances - evaporation and ocn global salinity does not drift. This is intended - for use when coupling POP to a DATM. Only used for C, G and J compsets. - Default is off - - - - - char - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,nmonth,nyears,nyear,date,ifdays0,end - never - - nmonths - - run_drv_history - env_run.xml - Sets driver average history file frequency (like REST_OPTION) - - - - char - - -999 - - 1 - - run_drv_history - env_run.xml - Sets driver average history file frequency (like REST_N) - - - - integer - - -999 - run_drv_history - env_run.xml - yyyymmdd format, sets driver average history date (like REST_DATE) - - - - logical - TRUE,FALSE - FALSE - - TRUE - TRUE - TRUE - - run_budgets - env_run.xml - logical that turns on diagnostic budgets for driver - - - - real - - 379.000 - - 284.7 - 284.7 - 284.7 - 0.000001 - 0.000001 - 367.0 - 379.000 - 284.7 - 379.000 - 379.000 - 367.0 - 367.0 - 367.0 - 367.0 - 367.0 - 367.0 - 367.0 - 379.000 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 368.9 - 368.9 - 367.0 - 284.725 - 284.725 - 368.865 - 368.865 - 368.865 - 368.865 - 368.865 - 368.865 - 368.865 - 368.865 - 284.317 - 312.821 - - 0.000001 - 0.000001 - - 284.7 - 368.9 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - 0.000001 - - run_co2 - env_run.xml - This set the namelist values of CO2 ppmv for CAM and CLM. This variables is - introduced to coordinate this value among multiple components. - - - - char - on, off - off - case_def - env_case.xml - If set to off, this component set/ grid specification is not scientifically supported. - If set to on, this component set/ grid specification is scientifically supported - - - - integer - 500 - case_def - env_case.xml - How much old test to allow - - - - integer - 0,1,3,5,10,36 - 10 - - 0 - - run_glc - env_run.xml - Glacier model number of elevation classes, 0 implies no glacier land unit in clm - Used by both CLM and CISM (even if CISM is not running, and only SGLC is used). - - - - logical - TRUE,FALSE - FALSE - - TRUE - TRUE - TRUE - - TRUE - - run_glc - env_run.xml - Whether the glacier component feeds back to the rest of the system - This affects: - (1) Whether CLM updates its areas based on glacier areas sent from GLC - (2) Whether GLC sends fluxes (e.g., calving fluxes) to the coupler - Note that this is set to TRUE by default for TG compsets - even though there are - no feedbacks for TG compsets, this enables extra coupler diagnostics for these - compsets. - - - - integer - - 0 - case_cost - env_case.xml - 2**n relative cost of grid where f19_g15 is 1 (DO NOT EDIT) - - - - integer - - 0 - case_cost - env_case.xml - 2**n relative cost of machine (DO NOT EDIT) - - - - BGC CO2=prog, rad CO2=prog: - BGC CO2=diag, rad CO2=diag: - BGC CO2=cons, rad CO2=diag: - BGC CO2=diag, rad CO2=cons: - BGC CO2=cons, rad CO2=cons: - ECO in POP: - --DO NOT USE FOR LONG SIMULATIONS: - pre-industrial: - present day: - Historical 1850 to 2000 transient: - Historical 1850 to 2000 transient: - AMIP for stand-alone cam: - CCMI REFC2 1950 to 2100 transient: - CCMI REFC2 2004 to 2100 transient: - 1948 to 2004 transient: - CCMI REFC1 Free running, 1950 to 2010 transient: - CCMI REFC1 Specified dynamics, 1975 to 2010 transient: - RCP8.5 future scenario: - RCP6.0 future scenario: - RCP4.5 future scenario: - 1955 to 2005 transient: - RCP8.5 future scenario: - RCP6.0 future scenario: - RCP4.5 future scenario: - RCP2.6 future scenario: - RCP4.5 based scenario from 2013 (control for WACCM/CARMA nuclear winter study): - 1992 to 2005 transient: - prescribed meteorology: for stand-alone cam - ARM95 IOP: for stand-alone cam - ARM97 IOP: for stand-alone cam - CLM transient land use: - CLM transient land use: - - pre-industrial (1850) to present day: - -----------------------------WARNING ------------------------------------------------ - "PIPD" compsets use complete forcing data from observed sources - up to the year 2005. Following this period they are a combination of observed sources - (land-use, SST, sea ice, CO2, CH4, N2O) to present day and IPCC RCP4.5 scenario data. - ------------------------------------------------------------------------------------- - - - -----------------------------WARNING ------------------------------------------------ - This compset is not spun-up! In later versions of the model, spun-up initial - conditions will be provided and this warning will be removed. - ------------------------------------------------------------------------------------- - - - diff --git a/src/drivers/mct/cime_config/config_compsets.xml b/src/drivers/mct/cime_config/config_compsets.xml deleted file mode 100644 index 2c91672f401..00000000000 --- a/src/drivers/mct/cime_config/config_compsets.xml +++ /dev/null @@ -1,109 +0,0 @@ - - - - - - ========================================= - compset naming convention - ========================================= - The compset longname below has the specified order - atm, lnd, ice, ocn, river, glc wave esp cesm-options - - The notation for the compset longname below is - TIME_ATM[%phys]_LND[%phys]_ICE[%phys]_OCN[%phys]_ROF[%phys]_GLC[%phys]_WAV[%phys][_ESP][_BGC%phys] - - The following compsets are those that can be tested in CIME stand-alone configurations - without any prognostic components. - For the compsets below the following are the only allowable values of the components. - - TIME = Time period (e.g. 2000, HIST, SSP585...) - ATM = [DATM, SATM, XATM] - LND = [DLND, SLND, XLND] - ICE = [DICE, SICE, XICE] - OCN = [DOCN, SOCN, XOCN] - ROF = [DROF, SROF, XROF] - GLC = [ SGLC ] - WAV = [DWAV, SWAV ] - IAC = [ SIAC ] - ESP = [DESP, SESP ] - - The OPTIONAL %phys attributes specify submodes of the given system - For example DOCN%DOM is the data ocean model for DOCN - ALL data models must have a %phys option that corresponds to the data model mode - - Each compset node is associated with the following elements - - lname - - alias - - support (optional description of the support level for this compset) - Each compset node can also have the following attributes - - grid (optional regular expression match for grid to work with the compset) - - - - A - 2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SIAC - - - - ADSOM - 2000_DATM%NYF_SLND_DICE%SSMI_DOCN%SOM_DROF%NYF_SGLC_SWAV_TEST - - - - ADSOMAQP - 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV - - - - ADAQP3 - 2000_DATM%NYF_SLND_SICE_DOCN%AQP3_SROF_SGLC_SWAV - - - - ADAQPFILE - 2000_DATM%NYF_SLND_SICE_DOCN%AQPFILE_SROF_SGLC_SWAV - - - - A1850DLND - 1850_SATM_DLND%SCPL_SICE_SOCN_SROF_SGLC_SWAV - - - - ADWAV - 2000_SATM_SLND_SICE_SOCN_SROF_SGLC_DWAV%CLIMO - - - - ADESP - 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV_DESP%NOOP - - - - ADESP_TEST - 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV_DESP%TEST - - - - AIAF - 2000_DATM%IAF_SLND_DICE%IAF_DOCN%IAF_DROF%IAF_SGLC_SWAV - - - - S - 2000_SATM_SLND_SICE_SOCN_SROF_SGLC_SWAV_SESP - - - - X - 2000_XATM_XLND_XICE_XOCN_XROF_XGLC_XWAV - - - - - - TRUE - - - - diff --git a/src/drivers/mct/cime_config/config_pes.xml b/src/drivers/mct/cime_config/config_pes.xml deleted file mode 100644 index db4bebb81a7..00000000000 --- a/src/drivers/mct/cime_config/config_pes.xml +++ /dev/null @@ -1,213 +0,0 @@ - - - - - - - - none - - -1 - -1 - -1 - -1 - -1 - -1 - -1 - -1 - -1 - -1 - - - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - - none - - 60 - 60 - 60 - 60 - 60 - 60 - 60 - 60 - 60 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - - none - - -8 - -8 - -8 - -8 - -8 - -8 - -8 - -8 - -8 - - - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 2 - 1 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - - none - - -1 - -1 - -1 - -1 - -1 - -1 - -1 - -1 - -1 - - - 8 - 8 - 8 - 8 - 8 - 8 - 8 - 8 - 8 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - - - - PE layout for tests - - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - 64 - - - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - 16 - - - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - - - - - - diff --git a/src/drivers/mct/cime_config/namelist_definition_drv.xml b/src/drivers/mct/cime_config/namelist_definition_drv.xml deleted file mode 100644 index 1336ffd3b2e..00000000000 --- a/src/drivers/mct/cime_config/namelist_definition_drv.xml +++ /dev/null @@ -1,4727 +0,0 @@ - - - - - - - - - - - - - - integer - cime_driver_inst - cime_driver_inst - - Number of CESM driver instances. Only used if MULTI_DRIVER is TRUE. - - - 1 - $NINST_MAX - - - - - - - - - logical - seq_flds - seq_cplflds_inparm - - If set to .true., adds prognostic CO2 and diagnostic CO2 at the lowest - model level to be sent from the atmosphere to the land and ocean. - If CCSM_BGC is set to 'CO2A', then flds_co2a will be set to .true. by default - - - .false. - .true. - - - - - logical - seq_flds - seq_cplflds_inparm - - If set to .true., adds prognostic CO2 and diagnostic CO2 at the lowest - model level to be sent from the atmosphere just to the land, and the - surface upward flux of CO2 to be sent from the land back to the - atmosphere. - If CCSM_BGC is set to 'CO2B', then flds_co2b will be set to .true. by default. - - - .false. - .true. - - - - - logical - seq_flds - seq_cplflds_inparm - - If set to .true., adds prognostic CO2 and diagnostic CO2 at the lowest - model level to be sent from the atmosphere to the land and ocean, and the - surface upward flux of CO2 to be sent from the land and the open ocean - back to the atmosphere. - If CCSM_BGC is set to 'CO2C', then flds_co2c will be set to .true. by default. - - - .false. - .true. - - - - - logical - seq_flds - seq_cplflds_inparm - - If CCSM_BGC is set to 'CO2_DMSA', then flds_co2_dmsa will be set to .true. by default. - - - .false. - .true. - - - - - logical - seq_flds - seq_cplflds_inparm - - If set to .true. BGC fields will be passed back and forth between the ocean and seaice - via the coupler. - - - .false. - .true. - - - - - logical - seq_flds - seq_cplflds_inparm - - Pass water isotopes between components - - - $FLDS_WISO - - - - - integer - seq_flds - seq_cplflds_inparm - - Number of cism elevation classes. Set by the xml variable GLC_NEC in env_run.xml - - - $GLC_NEC - - - - - integer - seq_flds - seq_cplflds_inparm - - Number of sea ice thickness categories. Set by the xml variable ICE_NCAT in env_build.xml - - - $ICE_NCAT - - - - - logical - seq_flds - seq_cplflds_inparm - - .true. if select per ice thickness category fields are passed to the ocean. - Set by the xml variable CPL_I2O_PER_CAT in env_run.xml - - - $CPL_I2O_PER_CAT - - - - - logical - seq_flds - seq_cplflds_inparm - - .true. means that all fields passed to coupler are checked for NaN values - - - .false. - .true. - - - - - - - - - char(200) - seq_flds - seq_cplflds_userspec - - New fields that are user specidied can be added as namelist variables - by the user in the cpl namelist seq_flds_user using the namelist variable - array cplflds_customs. The user specified new fields must follow the - above naming convention. - As an example, say you want to add a new state 'foo' that is passed - from the land to the atm - you would do this as follows - &seq_flds_user - cplflds_custom = 'Sa_foo->a2x', 'Sa_foo->x2a' - / - This would add the field 'Sa_foo' to the character strings defining the - attribute vectors a2x and x2a. It is assumed that code would need to be - introduced in the atm and land components to deal with this new attribute - vector field. - Modify user_nl_cpl to edit this. - - - '' - - - - - - - - - char - expdef - seq_infodata_inparm - e3sm,cesm - cime model - - cesm - e3sm - - - - - logical - expdef - seq_infodata_inparm - - true => turn on aquaplanet mode in cam - - - .false. - - - - - integer - expdef - seq_infodata_inparm - - 1 => default sst mode for aquaplanet in cam - - - 1 - - - - - char - expdef - seq_infodata_inparm - - case name. - - - $CASE - - - - - char - expdef - seq_infodata_inparm - - case description. - - - $CASESTR - - - - - char - expdef - seq_infodata_inparm - - model version documentation, - - - $MODEL_VERSION - - - - - char - expdef - seq_infodata_inparm - - model doi url - - - $MODEL_DOI_URL - - - - - char - expdef - seq_infodata_inparm - - username documentation - - - $USER - - - - - char - expdef - seq_infodata_inparm - - hostname information, - - - $MACH - - - - - char - expdef - seq_infodata_inparm - - location of timing output. - - - ./timing - - - - - char - expdef - seq_infodata_inparm - - location of timing checkpoint output. - - - ./timing/checkpoints - - - - - char - expdef - seq_infodata_inparm - startup,branch,continue - - mode to start the run up, [startup,branch,continue], - automatically derived from RUN_TYPE in env_run.xml - - - startup - startup - branch - continue - continue - continue - - - - - logical - expdef - seq_infodata_inparm - - Allow same branch casename as reference casename. If $CASE and $REFCASE are the same and the start_type is - not startup, then the value of brnch_retain_casename is set to .true. - - - .false. - - - - - integer - expdef - seq_infodata_inparm - - Level of debug output, 0=minimum, 1=normal, 2=more, 3=too much (default: 1) - - - $INFO_DBUG - - - - - logical - expdef - seq_infodata_inparm - - turns on bfb option in coupler which produce bfb results in the - coupler on different processor counts. (default: .false.) - - - $BFBFLAG - - - - - char - orbital - seq_infodata_inparm - fixed_year,variable_year,fixed_parameters - - orbital model setting. this sets how the orbital mode will be - configured. - "fixed_year" uses the orb_iyear and other orb inputs are ignored. In - this mode, the orbital parameters are constant and based on the year. - "variable_year" uses the orb_iyear and orb_iyear_align. In this mode, - the orbital parameters vary as the model year advances and the model - year orb_iyear_align has the equivalent orbital year of orb_iyear. - "fixed_parameters" uses the orb_eccen, orb_mvelp, and orb_obliq to set - the orbital parameters which then remain constant through the model - integration. [fixed_year, variable_year, fixed_parameters] (default: 'fixed_year'.) - - - fixed_year - variable_year - - - - - integer - orbital - seq_infodata_inparm - - model year associated with orb_iyear when orb_mode is variable_year. (default: 1990) - - - 1990 - 1850 - 2000 - 1850 - - - - - integer - orbital - seq_infodata_inparm - - year of orbit, used when orb_mode is fixed_year or variable_year. (default: 1990) - - - 1990 - 1850 - 2000 - 1850 - - - - - real - orbital - seq_infodata_inparm - - eccentricity of orbit, used when orb_mode is fixed_parameters. - default: SHR_ORB_UNDEF_REAL (1.e36) (Not currently used in build-namelist) - - - 1.e36 - - - - - real - orbital - seq_infodata_inparm - - location of vernal equinox in longitude degrees, used when orb_mode is fixed_parameters. - default: SHR_ORB_UNDEF_REAL (1.e36)(Not currently used in build-namelist) - - - 1.e36 - - - - - real - orbital - seq_infodata_inparm - - obliquity of orbit in degrees, used when orb_mode is fixed_parameters. - default: SHR_ORB_UNDEF_REAL (1.e36) (Not currently used in build-namelist) - - - 1.e36 - - - - - char - wv_sat - seq_infodata_inparm - GoffGratch,MurphyKoop,Bolton,Flatau - - Type of water vapor saturation vapor pressure scheme employed. 'GoffGratch' for - Goff and Gratch (1946); 'MurphyKoop' for Murphy and Koop (2005); 'Bolton' for - Bolton (1980); 'Flatau' for Flatau, Walko, and Cotton (1992). - Default: GoffGratch - - - GoffGratch - - - - - real - wv_sat - seq_infodata_inparm - - Width of the liquid-ice transition range in mixed-phase water saturation vapor - pressure calculations. The range always ends at 0 degrees Celsius, so this - variable only affects the start of the transition. - Default: 20K - WARNING: CAM is tuned to the default value of this variable. Because it affects - so many different parameterizations, changes to this variable may require a - significant retuning of CAM's cloud physics to give reasonable results. - - - 20.0D0 - - - - - logical - wv_sat - seq_infodata_inparm - - Whether or not to produce lookup tables at init time to use as a cache for - saturation vapor pressure. - Default: .false. - - - .false. - - - - - real - wv_sat - seq_infodata_inparm - - Temperature resolution of saturation vapor pressure lookup tables in Kelvin. - (This is only used if wv_sat_use_tables is .true.) - Default: 1.0 - - - 1.0D0 - - - - - char - control - seq_infodata_inparm - Freezing point calculation for salt water. - - $TFREEZE_SALTWATER_OPTION - - - - - char - control - seq_infodata_inparm - off,ocn - - Only used for C,G compsets: if ocn, ocn provides EP balance factor for precip - - - $CPL_EPBAL - - - - - logical - control - seq_infodata_inparm - - Only used for C,G compsets: if true, compute albedos to work with daily avg SW down - - - $CPL_ALBAV - - - - - char - control - seq_infodata_inparm - on,off,on_if_glc_coupled_fluxes - - Whether to renormalize the surface mass balance (smb) sent from lnd to glc so that the - global integral on the glc grid agrees with the global integral on the lnd grid. - - Unlike most fluxes, smb is remapped with bilinear rather than conservative mapping weights, - so this option is needed for conservation. However, conservation is not required in many - cases, since we often run glc as a diagnostic (one-way-coupled) component. - - Allowable values are: - 'on': always do this renormalization - 'off': never do this renormalization (see WARNING below) - 'on_if_glc_coupled_fluxes': Determine at runtime whether to do this renormalization. - Does the renormalization if we're running a two-way-coupled glc that sends fluxes - to other components (which is the case where we need conservation). - Does NOT do the renormalization if we're running a one-way-coupled glc, or if - we're running a glc-only compset (T compsets). - (In these cases, conservation is not important.) - - Only used if running with a prognostic GLC component. - - WARNING: Setting this to 'off' will break conservation when running with an - evolving, two-way-coupled glc. - - - on_if_glc_coupled_fluxes - - - - - real - control - seq_infodata_inparm - - Wall time limit for run - default: -1.0 - - - -1.0 - - - - - char - control - seq_infodata_inparm - day,month,year - - Force stop at the next month, day, etc when wall_time_limit is hit - default: month - - - month - - - - - logical - control - seq_infodata_inparm - - if true use Mahrt and Sun 1995,MWR modification to surface flux calculation - - - .false. - .true. - .false. - .false. - - - - - logical - control - seq_infodata_inparm - - If true, turn on diurnal cycle in computing atm/ocn fluxes - default: false - - - .false. - - - - - integer - control - seq_infodata_inparm - - 0: standard surface flux calculation as in E3SMv1 - 1: COAREv3.0 flux computation (Fairall et al., 2003) - 2: University of Arizona algorithm (Zeng et al., 1998) - default: 0 - - - 0 - - - - - real - control - seq_infodata_inparm - - Iterate atmocn flux calculation to this % difference - Setting this to zero will always do flux_max_iteration - - - - 0.01 - 0.0 - 0.0 - - - - - integer - control - seq_infodata_inparm - - Iterate atmocn flux calculation a max of this value - - - 5 - 2 - - - - - char - mapping - seq_infodata_inparm - - ATM_GRID values passed into driver. - - - $ATM_GRID - - - - - char - mapping - seq_infodata_inparm - - LND_GRID values passed into driver. - - - $LND_GRID - - - - - char - mapping - seq_infodata_inparm - - OCN_GRID values passed into driver. - - - $OCN_GRID - - - - - char - mapping - seq_infodata_inparm - - ICE_GRID values passed into driver. - - - $ICE_GRID - - - - - char - mapping - seq_infodata_inparm - - ROF_GRID values passed into driver. - - - $ROF_GRID - - - - - char - mapping - seq_infodata_inparm - - GLC_GRID values passed into driver. - - - $GLC_GRID - - - - - char - mapping - seq_infodata_inparm - - WAV_GRID values passed into driver. - - - $WAV_GRID - - - - - char - mapping - seq_infodata_inparm - - IAC_GRID values passed into driver. - - - $IAC_GRID - - - - - logical - mapping - seq_infodata_inparm - - invoke pole averaging corrections in shr_map_mod weights generation (default: true) - - - .true. - - - - - char - mapping - seq_infodata_inparm - none,npfix,cart3d,cart3d_diag,cart3d_uvw,cart3d_uvw_diag - - vect_map - turns on the vector mapping option for u and v vector mapping between - atm and ocean grids in the coupler. the options are none, npfix, - cart3d, cart3d_diag, cart3d_uvw, and cart3d_uvw_diag. the none option - results in scalar mapping independently for the u and v field which - tends to generate large errors near the poles. npfix is the - traditional option where the vectors are corrected on the ocean grid - north of the last latitude line of the atmosphere grid. the cart3d - options convert the east (u) and north (v) vectors to 3d (x,y,z) - triplets, and maps those fields before converting back to the east (u) - and north (v) directions. the cart3d ignores the resuling "w" - velocity. the cart3d_uvw calculates the resulting u and v vectors by - preserving the total "u,v,w" speed and the angle of the (u,v) vector. - the _diag options just add diagnotics to the log file about the vector - mapping. - - - $VECT_MAP - - - - - char - mapping - seq_infodata_inparm - ocn,atm,exch - - Grid for atm ocn flux calc (untested) - default: ocn - - - ocn - - - - - logical - mapping - seq_infodata_inparm - - mct alltoall mapping flag - default: false - - - .false. - - - - - logical - mapping - seq_infodata_inparm - - mct vector flag - default: false - - - .false. - - - - - integer - expdef - seq_infodata_inparm - 0,1,2,3,4,5,6 - - cpl decomp option (0=default, 1=comp decomp, 2=rearr comp decomp, 3=new single 1d seg - default: 0 - - - $CPL_DECOMP - - - - - char - expdef - seq_infodata_inparm - CESM1_MOD,CESM1_MOD_TIGHT,RASM_OPTION1,RASM_OPTION2,NUOPC,NUOPC_TIGHT - - Set the coupler sequencing. - - - $CPL_SEQ_OPTION - - - - - logical - budget - seq_infodata_inparm - - logical that turns on diagnostic budgets, false means budgets will never be written - - - $BUDGETS - - - - - logical - history - seq_infodata_inparm - - logical to write an extra initial coupler history file - - - .false. - - - - - integer - budget - seq_infodata_inparm - 0,1,2,3 - - sets the diagnotics level of the instantaneous budgets. [0,1,2,3], - written only if BUDGETS variable is true - 0=none, - 1=+net summary budgets, - 2=+detailed lnd/ocn/ice component budgets, - 3=+detailed atm budgets - default: 0 - - - 0 - - - - - integer - budget - seq_infodata_inparm - 0,1,2,3 - - sets the diagnotics level of the daily budgets. [0,1,2,3], - written only if do_budgets variable is .true., - 0=none, - 1=+net summary budgets, - 2=+detailed lnd/ocn/ice component budgets, - 3=+detailed atm budgets - default: 0 - - - 0 - - - - - integer - expdef - seq_infodata_inparm - 0,1,2,3 - - sets the diagnotics level of the monthy budgets. [0,1,2,3], - written only if do_budgets variable is .true., - 0=none, - 1=+net summary budgets, - 2=+detailed lnd/ocn/ice component budgets, - 3=+detailed atm budgets - default: 1 - - - 1 - - - - - integer - budget - seq_infodata_inparm - 0,1,2,3 - - sets the diagnotics level of the annual budgets. [0,1,2,3], - written only if do_budgets variable is .true., - 0=none, - 1=+net summary budgets, - 2=+detailed lnd/ocn/ice component budgets, - 3=+detailed atm budgets - default: 1 - - - 1 - - - - - integer - budget - seq_infodata_inparm - 0,1,2,3 - - sets the diagnotics level of the longterm budgets written at the end - of the year. [0,1,2,3], - written only if do_budgets variable is .true., - 0=none, - 1=+net summary budgets, - 2=+detailed lnd/ocn/ice component budgets, - 3=+detailed atm budgets, - default: 1 - - - 1 - - - - - integer - budget - seq_infodata_inparm - 0,1,2,3 - - sets the diagnotics level of the longterm budgets written at the end - of each run. [0,1,2,3], - written only if do_budgets variable is .true., - 0=none, - 1=+net summary budgets, - 2=+detailed lnd/ocn/ice component budgets, - 3=+detailed atm budgets, - default: 0 - - - 0 - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for instantaneous atm to coupler fields. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for 1-hour average atm to coupler fields. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for 1-hour instantaneous atm to coupler fields. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for 3-hour average atm to coupler fields. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for 3-hour average atm to coupler precip fields. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for daily average atm to coupler fields. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for instantaneous land to coupler fields. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for average* runoff to coupler fields - (*despite the lack of an averaging time span in the name). - Files are written at time-of-day = 00000, and at the end of the run interval, - even if that time is not 00000. - Run length less than 24 hours; averaging period is the run length, - Otherwise; averaging period is 24 hours for files before the last (partial) day, - averaging period is the last (partial) day for the last file. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - turns on coupler history stream for annual lnd to coupler glc forcing fields. - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - if true, use double-precision rather than single-precision for - coupler auxiliary history files - default: false - - - .false. - - - - - logical - history - seq_infodata_inparm - - writes atm fields in coupler average history files. - default: true - - - .true. - - - - - logical - history - seq_infodata_inparm - - writes lnd fields in coupler average history files. - default: true - - - .true. - - - - - logical - history - seq_infodata_inparm - - writes ocn fields in coupler average history files. - default: true - - - .true. - - - - - logical - history - seq_infodata_inparm - - writes ice fields in coupler average history files. - default: true - - - .true. - - - - - logical - history - seq_infodata_inparm - - writes rof fields in coupler average history files. - default: true - - - .true. - - - - - logical - history - seq_infodata_inparm - - writes glc fields in coupler average history files. - default: true - - - .true. - - - - - logical - history - seq_infodata_inparm - - writes wav fields in coupler average history files. - default: true - - - .true. - - - - - logical - history - seq_infodata_inparm - - writes iac fields in coupler average history files. - default: true - - - .true. - - - - - logical - history - seq_infodata_inparm - - writes xao fields in coupler average history files. - default: true - - - .true. - - - - - logical - performance - seq_infodata_inparm - - turn on run time control of threading per pe per component by the driver - default: false - - - $DRV_THREADING - - - - - logical - performance - seq_infodata_inparm - - default: .false. - - - $COMP_RUN_BARRIERS - - - - - real - domain_check - seq_infodata_inparm - - Error tolerance for differences in fractions in domain checking - default: 1.0e-02 - - - $EPS_FRAC - - - - - real - domain_check - seq_infodata_inparm - - Error tolerance for differences in atm/land masks in domain checking - default: 1.0e-13 - - - $EPS_AMASK - - - - - real - domain_check - seq_infodata_inparm - - Error tolerance for differences in atm/land lat/lon in domain checking - default: 1.0e-12 - - - $EPS_AGRID - - - - - real - domain_check - seq_infodata_inparm - - Error tolerance for differences in atm/land areas in domain checking - default: 1.0e-07 - - - $EPS_AAREA - - - - - real - domain_check - seq_infodata_inparm - - Error tolerance for differences in ocean/ice masks in domain checking - default: 1.0e-06 - - - $EPS_OMASK - - - - - real - domain_check - seq_infodata_inparm - - Error tolerance for differences in ocean/ice lon/lat in domain checking - default: 1.0e-2 - - - $EPS_OGRID - - - - - real - domain_check - seq_infodata_inparm - - Error tolerance for differences in ocean/ice lon/lat in domain checking - default: 1.0e-1 - - - $EPS_OAREA - - - - - logical - seq_infodata_inparm - seq_infodata_inparm - - turns on single column mode. set by PTS_MODE in env_case.xml, default: false - - - .false. - .true. - - - - - real - seq_infodata_inparm - seq_infodata_inparm - - grid point latitude associated with single column mode. - if set to -999, ignore this value - - - -999. - $PTS_LAT - - - - - real - seq_infodata_inparm - seq_infodata_inparm - - grid point longitude associated with single column mode. - set by PTS_LON in env_run.xml. - - - -999. - $PTS_LON - - - - - logical - reprosum - seq_infodata_inparm - - Use faster method for reprosum, but one where reproducibility is not always guaranteed. - default: .false. - - - .false. - - - - - logical - reprosum - seq_infodata_inparm - - Allow INF and NaN in summands - default: .false. - - - .false. - - - - - real - reprosum - seq_infodata_inparm - - Tolerance for relative error - default: -1.0e-8 - - - -1.0e-8 - - - - - logical - reprosum - seq_infodata_inparm - - Recompute with non-scalable algorithm if reprosum_diffmax is exceeded. - default: .false. - - - .false. - - - - - - - - - integer - time - seq_timemgr_inparm - - atm coupling interval in seconds - set via ATM_NCPL in env_run.xml. - ATM_NCPL is the number of times the atm is coupled per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is also set in env_run.xml and is the base period - associated with NCPL coupling frequency, and has valid values: hour,day,year,decade - - - - - integer - time - seq_timemgr_inparm - - lnd coupling interval in seconds - set via LND_NCPL in env_run.xml. - LND_NCPL is the number of times the lnd is coupled per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is also set in env_run.xml and is the base period - associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - - - - integer - time - seq_timemgr_inparm - - river runoff coupling interval in seconds - currently set by default to 10800 seconds. - default: 10800 - - - - - integer - time - seq_timemgr_inparm - - ice coupling interval in seconds - set via ICE_NCPL in env_run.xml. - ICE_NCPL is the number of times the ice is coupled per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is also set in env_run.xml and is the base period - associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - - - - integer - time - seq_timemgr_inparm - - ocn coupling interval in seconds - set via OCN_NCPL in env_run.xml. - OCN_NCPL is the number of times the ocn is coupled per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is also set in env_run.xml and is the base period - associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - - - - integer - time - seq_timemgr_inparm - - glc coupling interval in seconds - set via GLC_NCPL in env_run.xml. - GLC_NCPL is the number of times the glc is coupled per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is also set in env_run.xml and is the base period - associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - - - - char - time - seq_timemgr_inparm - glc_coupling_period,yearly - - $GLC_AVG_PERIOD - - - Period at which coupler averages fields sent to GLC. - This supports doing the averaging to GLC less frequently than GLC is called - (i.e., separating the averaging frequency from the calling frequency). - This is useful because there are benefits to only averaging the GLC inputs - as frequently as they are really needed (yearly for CISM), but GLC needs to - still be called more frequently than that in order to support mid-year restarts. - - Setting glc_avg_period to 'glc_coupling_period' means that the averaging is - done exactly when the GLC is called (governed by GLC_NCPL). - - - - - integer - time - seq_timemgr_inparm - - wav coupling interval in seconds - set via WAV_NCPL in env_run.xml. - WAV_NCPL is the number of times the wav is coupled per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is also set in env_run.xml and is the base period - associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - - - - integer - time - seq_timemgr_inparm - - iac coupling interval in seconds - set via IAC_NCPL in env_run.xml. - IAC_NCPL is the number of times the iac is coupled per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is also set in env_run.xml and is the base period - associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - - - - - integer - time - seq_timemgr_inparm - - esp run interval in seconds - esp_cpl_dt is the number of times the esp is run per NCPL_BASE_PERIOD - NCPL_BASE_PERIOD is set in env_run.xml and is the base period - associated with NCPL coupling frequency, nad has valid values: hour,day,year,decade - default value set by buildnml to be the pause interval if pause is active - otherwise, it is set to the shortest component coupling time - - - -999 - - - - - integer - time - seq_timemgr_inparm - - atm coupling interval offset in seconds default: 0 - - - 0 - - - - - integer - time - seq_timemgr_inparm - - lnd coupling interval offset in seconds default: 0 - - - 0 - - - - - integer - time - seq_timemgr_inparm - - ice coupling interval offset in seconds default: 0 - - - 0 - - - - - integer - time - seq_timemgr_inparm - - ocn coupling interval offset in seconds default: 0 - - - 0 - - - - - integer - time - seq_timemgr_inparm - - glc coupling interval offset in seconds default: 0 - - - 0 - - - - - integer - time - seq_timemgr_inparm - - wav coupling interval offset in seconds default: 0 - - - 0 - - - - - integer - time - seq_timemgr_inparm - - iac coupling interval offset in seconds default: 0 - - - 0 - - - - - integer - time - seq_timemgr_inparm - - esp coupling interval offset in seconds default: 0 - - - 0 - - - - - logical - time - seq_timemgr_inparm - - true => ESP component runs after driver 'pause cycle' If any - component 'pauses' (see PAUSE_OPTION, - PAUSE_N and DATA_ASSIMILATION_XXX XML - variables), the ESP component (if present) will be run to - process the component 'pause' (restart) files and set any - required 'resume' signals. If true, esp_cpl_dt and - esp_cpl_offset settings are ignored. default: true - - - .true. - - - - - char - time - seq_timemgr_inparm - NO_LEAP,GREGORIAN - - calendar in use. [NO_LEAP, GREOGORIAN]. - set by CALENDAR in env_build.xml - - - $CALENDAR - - - - - integer - time - seq_timemgr_inparm - - Run start date in yyyymmdd format, only used for startup and hybrid runs. - default: 00010101 - - - 00010101 - - - - - integer - time - seq_timemgr_inparm - - Start time-of-day in universal time (seconds), should be between zero and 86400 - default: 0 - - - $START_TOD - - - - - char - time - seq_timemgr_inparm - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,monthly,nmonths,nmonth,nyears,nyear,date,ifdays0,end - - sets the run length with stop_n and stop_ymd - stop_option alarms are: - [none/never], turns option off - [nstep/s] , stops every stop_n nsteps , relative to current run start time - [nsecond/s] , stops every stop_n nseconds, relative to current run start time - [nminute/s] , stops every stop_n nminutes, relative to current run start time - [nhour/s] , stops every stop_n nhours , relative to current run start time - [nday/s] , stops every stop_n ndays , relative to current run start time - [nmonth/s] , stops every stop_n nmonths , relative to current run start time - [monthly/s] , stops every month , relative to current run start time - [nyear/s] , stops every stop_n nyears , relative to current run start time - [date] , stops at stop_ymd value - [ifdays0] , stops at stop_n calendar day value and seconds equal 0 - [end] , stops at end - - - $STOP_OPTION - - - - - integer - time - seq_timemgr_inparm - - Sets the run length with stop_option and stop_ymd - - - $STOP_N - - - - - integer - time - seq_timemgr_inparm - - date in yyyymmdd format, sets the run length with stop_option and stop_n, - can be in addition to stop_option and stop_n, negative value implies off - - - $STOP_DATE - - - - - char - expdef - seq_infodata_inparm - - - Driver restart filename. - (NOTE: Normally THIS IS NOT USED -- Set with RUN_REFCASE and RUN_REFDATE) - - - str_undefined - - - - - char - time - seq_timemgr_inparm - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,monthly,nmonth,nyears,nyear,date,ifdays0,end - - sets the restart frequency with restart_n and restart_ymd - restart_option alarms are: - [none/never], turns option off - [nstep/s] , restarts every restart_n nsteps , relative to current run start time - [nsecond/s] , restarts every restart_n nseconds, relative to current run start time - [nminute/s] , restarts every restart_n nminutes, relative to current run start time - [nhour/s] , restarts every restart_n nhours , relative to current run start time - [nday/s] , restarts every restart_n ndays , relative to current run start time - [monthly/s] , restarts every month , relative to current run start time - [nmonth/s] , restarts every restart_n nmonths , relative to current run start time - [nyear/s] , restarts every restart_n nyears , relative to current run start time - [date] , restarts at restart_ymd value - [ifdays0] , restarts at restart_n calendar day value and seconds equal 0 - [end] , restarts at end - - - $REST_OPTION - - - - - integer - time - seq_timemgr_inparm - - Sets model restart writes with restart_option and restart_ymd (same options as stop_n) - - - $REST_N - - - - - integer - time - seq_timemgr_inparm - - Date in yyyymmdd format, sets model restart write date with rest_option and restart_n - default: STOP_N - - - $REST_DATE - - - - - logical - time - seq_timemgr_inparm - - true => write restarts at end of run - forces a restart write at the end of the run in addition to any - setting associated with rest_option. default=true. this setting - will be set to false if restart_option is none or never. - default: false - - - .false. - - - - - char - time - seq_timemgr_inparm - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,monthly,nmonths,nmonth,nyears,nyear,date,ifdays0,end - - coupler history snapshot option (used with history_n and history_ymd) - set by HIST_OPTION in env_run.xml. - history_option alarms are: - [none/never], turns option off - [nstep/s] , history snapshot every history_n nsteps , relative to current run start time - [nsecond/s] , history snapshot every history_n nseconds, relative to current run start time - [nminute/s] , history snapshot every history_n nminutes, relative to current run start time - [nhour/s] , history snapshot every history_n nhours , relative to current run start time - [nday/s] , history snapshot every history_n ndays , relative to current run start time - [monthly/s] , history snapshot every month , relative to current run start time - [nmonth/s] , history snapshot every history_n nmonths , relative to current run start time - [nyear/s] , history snapshot every history_n nyears , relative to current run start time - [date] , history snapshot at history_ymd value - [ifdays0] , history snapshot at history_n calendar day value and seconds equal 0 - [end] , history snapshot at end - - - $HIST_OPTION - - - - - integer - time - seq_timemgr_inparm - - sets coupler snapshot history file frequency (like restart_n) - set by HIST_N in env_run.xml. - - - $HIST_N - - - - - integer - time - seq_timemgr_inparm - - date associated with history_option date. yyyymmdd format. - set by HIST_DATE in env_run.xml. - - - $HIST_DATE - - - - - char - time - seq_timemgr_inparm - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,monthly,nmonths,nmonth,nyears,nyear,date,ifdays0,end - - coupler time average history option (used with histavg_n and histavg_ymd) - set by AVGHIST_OPTION in env_run.xml. - histavg_option alarms are: - [none/never], turns option off - [nstep/s] , history snapshot every histavg_n nsteps , relative to current run start time - [nsecond/s] , history snapshot every histavg_n nseconds, relative to current run start time - [nminute/s] , history snapshot every histavg_n nminutes, relative to current run start time - [nhour/s] , history snapshot every histavg_n nhours , relative to current run start time - [nday/s] , history snapshot every histavg_n ndays , relative to current run start time - [monthly/s] , history snapshot every month , relative to current run start time - [nmonth/s] , history snapshot every histavg_n nmonths , relative to current run start time - [nyear/s] , history snapshot every histavg_n nyears , relative to current run start time - [date] , history snapshot at histavg_ymd value - [ifdays0] , history snapshot at histavg_n calendar day value and seconds equal 0 - [end] , history snapshot at end - - - $AVGHIST_OPTION - - - - - integer - time - seq_timemgr_inparm - - Sets coupler time-average history file frequency (like restart_option) - set by AVGHIST_N in env_run.xml. - - - $AVGHIST_N - - - - - integer - time - seq_timemgr_inparm - - date associated with histavg_option date. yyyymmdd format. - set by AVGHIST_DATE in env_run.xml. - - - $AVGHIST_DATE - - - - - char - time - seq_timemgr_inparm - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,nmonths,monthly,nmonth,nyears,nyear,date,ifdays0,end - - sets the driver barrier frequency to sync models across all tasks with barrier_n and barrier_ymd - barrier_option alarms are like restart_option - default: never - - - $BARRIER_OPTION - - - - - integer - time - seq_timemgr_inparm - - Sets model barriers with barrier_option and barrier_ymd (same options as stop_n) - default: 1 - - - $BARRIER_N - - - - - integer - time - seq_timemgr_inparm - - Date in yyyymmdd format, sets model barriers date with barrier_option and barrier_n - - - $BARRIER_DATE - - - - - char - time - seq_timemgr_inparm - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,monthly,nmonths,nmonth,nyears,nyear,date,ifdays0,end - - Sets timing output file frequency (like rest_option but relative to run start date) - tprof_option alarms are: - [none/never], turns option off - [nstep/s] , every tprof_n nsteps , relative to current run start time - [nsecond/s] , every tprof_n nseconds, relative to current run start time - [nminute/s] , every tprof_n nminutes, relative to current run start time - [nhour/s] , every tprof_n nhours , relative to current run start time - [nday/s] , every tprof_n ndays , relative to current run start time - [monthly/s] , every month , relative to current run start time - [nmonth/s] , every tprof_n nmonths , relative to current run start time - [nyear/s] , every tprof_n nyears , relative to current run start time - [date] , at tprof_ymd value - [ifdays0] , at tprof_n calendar day value and seconds equal 0 - [end] , at end - - - never - - - - - integer - time - seq_timemgr_inparm - - Sets timing output file frequency (like restart_n) - - - -999 - - - - - integer - time - seq_timemgr_inparm - - yyyymmdd format, sets timing output file date (like restart_date) - - - -999 - - - - - char - time - seq_timemgr_inparm - none,never,nsteps,nstep,nseconds,nsecond,nminutes,nminute,nhours,nhour,ndays,nday,monthly,nmonths,nmonth,nyears,nyear - - sets the pause frequency with pause_n - pause_option alarms are: - [none/never], turns option off - [nstep/s] , pauses every pause_n nsteps , relative to start or last pause time - [nsecond/s] , pauses every pause_n nseconds, relative to start or last pause time - [nminute/s] , pauses every pause_n nminutes, relative to start or last pause time - [nhour/s] , pauses every pause_n nhours , relative to start or last pause time - [nday/s] , pauses every pause_n ndays , relative to start or last pause time - [nmonth/s] , pauses every pause_n nmonths , relative to start or last pause time - [monthly/s] , pauses every month , relative to start or last pause time - [nyear/s] , pauses every pause_n nyears , relative to start or last pause time - - - $PAUSE_OPTION - - - - - integer - time - seq_timemgr_inparm - - Sets the pause frequency with pause_option - - - $PAUSE_N - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component atm - - - $PAUSE_ACTIVE_ATM - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component CPL - - - $PAUSE_ACTIVE_CPL - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component ocn - - - $PAUSE_ACTIVE_OCN - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component wav - - - $PAUSE_ACTIVE_WAV - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component iac - - - $PAUSE_ACTIVE_IAC - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component glc - - - $PAUSE_ACTIVE_GLC - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component rof - - - $PAUSE_ACTIVE_ROF - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component ice - - - $PAUSE_ACTIVE_ICE - - - - - logical - time - seq_timemgr_inparm - - Whether Pause signals are active for component lnd - - - $PAUSE_ACTIVE_LND - - - - - char - driver - seq_infodata_inparm - - - Ending suffix "postfix" for output log files. - - - .log - - - - - char - drv_history - seq_infodata_inparm - - - Root directory for driver output files - - - ./ - - - - - real - driver - seq_infodata_inparm - - Abort model if coupler timestep wallclock time exceeds this value, ignored if 0, - if < 0 then use abs(max_cplstep_time)*cktime as the threshold. - default: 0 - - - 0.0 - - - - - - - - - char - driver - esmf_inparm - ESMF_LOGKIND_SINGLE,ESMF_LOGKIND_MULTI,ESMF_LOGKIND_MULTI_ON_ERROR,ESMF_LOGKIND_NONE - - Specify type of ESMF logging - - - $ESMF_LOGFILE_KIND - - - - - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the atm components. - set by NTASKS_ATM in env_configure.xml. - - - $NTASKS_ATM - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the atm component. - set by NTHRDS_ATM in env_configure.xml. - - - $NTHRDS_ATM - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the atm component. - set by ROOTPE_ATM in env_configure.xml. - - - $ROOTPE_ATM - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the atm component. - set by PSTRID_ATM in env_configure.xml. - - - $PSTRID_ATM - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance atms (if there are more than 1) - - - $NINST_ATM_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the lnd components. - set by NTASKS_LND in env_configure.xml. - - - $NTASKS_LND - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the lnd component. - set by NTHRDS_LND in env_configure.xml. - - - $NTHRDS_LND - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the lnd component. - set by ROOTPE_LND in env_configure.xml. - - - $ROOTPE_LND - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the lnd component. - set by PSTRID_LND in env_configure.xml. - - - $PSTRID_LND - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance lnds (if there are more than 1) - - - $NINST_LND_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the ice components. - set by NTASKS_ICE in env_configure.xml. - - - $NTASKS_ICE - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the ice component. - set by NTHRDS_ICE in env_configure.xml. - - - $NTHRDS_ICE - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the ice component. - set by ROOTPE_ICE in env_configure.xml. - - - $ROOTPE_ICE - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the ice component. - set by PSTRID_ICE in env_configure.xml. - - - $PSTRID_ICE - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance ices (if there are more than 1) - - - $NINST_ICE_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the ocn components. - set by NTASKS_OCN in env_configure.xml. - - - $NTASKS_OCN - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the ocn component. - set by NTHRDS_OCN in env_configure.xml. - - - $NTHRDS_OCN - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the ocn component. - set by ROOTPE_OCN in env_configure.xml. - - - $ROOTPE_OCN - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the ocn component. - set by PSTRID_OCN in env_configure.xml. default: 1 - - - $PSTRID_OCN - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance ocns (if there are more than 1) - - - $NINST_OCN_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the glc components. - set by NTASKS_GLC in env_configure.xml. - - - $NTASKS_GLC - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the glc component. - set by NTHRDS_GLC in env_configure.xml. - - - $NTHRDS_GLC - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the glc component. - set by ROOTPE_GLC in env_configure.xml. - - - $ROOTPE_GLC - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the glc component. - set by PSTRID_GLC in env_configure.xml. - - - $PSTRID_GLC - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance glcs (if there are more than 1) - - - $NINST_GLC_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the wav components. - set by NTASKS_WAV in env_configure.xml. - - - $NTASKS_WAV - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the wav component. - set by NTHRDS_WAV in env_configure.xml. - - - $NTHRDS_WAV - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the wav component. - set by ROOTPE_WAV in env_configure.xml. - - - $ROOTPE_WAV - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the wav component. - set by PSTRID_WAV in env_configure.xml. - - - $PSTRID_WAV - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance wavs (if there are more than 1) - - - $NINST_WAV_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the iac components. - set by NTASKS_IAC in env_configure.xml. - - - $NTASKS_IAC - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the iac component. - set by NTHRDS_IAC in env_configure.xml. - - - $NTHRDS_IAC - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the iac component. - set by ROOTPE_IAC in env_configure.xml. - - - $ROOTPE_IAC - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the iac component. - set by PSTRID_IAC in env_configure.xml. - - - $PSTRID_IAC - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance iacs (if there are more than 1) - - - $NINST_IAC_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the lnd components. - set by NTASKS_LND in env_configure.xml. - - - $NTASKS_ROF - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the lnd component. - set by NTHRDS_ROF in env_configure.xml. - - - $NTHRDS_ROF - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the lnd component. - set by ROOTPE_LND in env_configure.xml. - - - $ROOTPE_ROF - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the lnd component. - set by PSTRID_LND in env_configure.xml. - - - $PSTRID_ROF - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance lnds (if there are more than 1) - - - $NINST_ROF_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the esp components. - set by NTASKS_ESP in env_configure.xml. - - - $NTASKS_ESP - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the esp component. - set by NTHRDS_ESP in env_configure.xml. - - - $NTHRDS_ESP - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the esp component. - set by ROOTPE_ESP in env_configure.xml. - - - $ROOTPE_ESP - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the esp component. - set by PSTRID_ESP in env_configure.xml. - - - $PSTRID_ESP - - - - - char - cime_pes - cime_pes - concurrent,sequential - - Layout of multi-instance external system processor (if there are more than 1) - - - $NINST_ESP_LAYOUT - - - - - integer - cime_pes - cime_pes - - the number of mpi tasks assigned to the cpl components. - set by NTASKS_CPL in env_configure.xml. - - - $NTASKS_CPL - - - - - integer - cime_pes - cime_pes - - the number of threads per mpi task for the cpl component. - set by NTHRDS_CPL in env_configure.xml. - - - $NTHRDS_CPL - - - - - integer - cime_pes - cime_pes - - the global mpi task rank of the root processor assigned to the cpl component. - set by ROOTPE_CPL in env_configure.xml. - - - $ROOTPE_CPL - - - - - integer - cime_pes - cime_pes - - the mpi global processors stride associated with the mpi tasks for the cpl component. - set by PSTRID_CPL in env_configure.xml. - - - $PSTRID_CPL - - - - - integer - cime_pes - cime_pes - - level of task-to-node mapping output for the whole model - (0: no output; 1: compact; 2: verbose) - - - $INFO_TASKMAP_MODEL - - - - - integer - cime_pes - cime_pes - - level of task-to-node mapping output for individual component models - (0: no output; 1: compact; 2: verbose) - - - $INFO_TASKMAP_COMP - - - - - - - - - - logical - performance - prof_inparm - - - - .true. - - - - - logical - performance - prof_inparm - - - - .false. - - - - - logical - performance - prof_inparm - - - - .false. - .true. - - - - - logical - performance - prof_inparm - - - - .false. - - - - - integer - performance - prof_inparm - - - - $TIMER_LEVEL - - - - - integer - performance - prof_inparm - - - - 0 - - - - - integer - performance - prof_inparm - - - - $TIMER_DETAIL - - - - - integer - performance - prof_inparm - - - - 4 - 2 - 1 - 3 - - - - - logical - performance - prof_inparm - - default: .false. - - - .false. - - - - - logical - performance - prof_inparm - - default: .false. - - - .false. - - - - - integer - performance - prof_inparm - - default: 1 - - - 1 - - - - - logical - performance - prof_inparm - - default: .false. - - - $PROFILE_PAPI_ENABLE - - - - - - - - - - char - performance - papi_inparm - - See gptl_papi.c for the list of valid values - - - PAPI_FP_OPS - - - - - char - performance - papi_inparm - - See gptl_papi.c for the list of valid values - - - PAPI_NO_CTR - - - - - char - performance - papi_inparm - - See gptl_papi.c for the list of valid values - - - PAPI_NO_CTR - - - - - char - performance - papi_inparm - - See gptl_papi.c for the list of valid values - - - PAPI_NO_CTR - - - - - - - - - logical - pio - pio_default_inparm - - future asynchronous IO capability (not currently supported). - If pio_async_interface is .true. or {component}_PIO_* variable is not set or set to -99 - the component variable will be set using the pio_* value. - default: .false. - - - $PIO_ASYNC_INTERFACE - - - - - integer - pio - pio_default_inparm - 0,1,2,3,4,5,6 - - pio debug level - valid values: 0,1,2,3,4,5,6 - - - $PIO_DEBUG_LEVEL - - - - - integer - pio - pio_default_inparm - - blocksize for pio box rearranger - - - $PIO_BLOCKSIZE - - - - - integer - pio - pio_default_inparm - - pio buffer size limit - - - $PIO_BUFFER_SIZE_LIMIT - - - - - char - pio - pio_default_inparm - p2p,coll,default - - pio rearranger communication type. - valid values: p2p, coll, default - - - $PIO_REARR_COMM_TYPE - - - - - char - pio - pio_default_inparm - 2denable,io2comp,comp2io,disable,default - - pio rearranger communication flow control direction. - - - $PIO_REARR_COMM_FCD - - - - - integer - pio - pio_default_inparm - - pio rearranger communication max pending req (comp2io) - - - $PIO_REARR_COMM_MAX_PEND_REQ_COMP2IO - - - - - logical - pio - pio_default_inparm - - pio rearranger communication option: Enable handshake (comp2io) - - - $PIO_REARR_COMM_ENABLE_HS_COMP2IO - - - - - logical - pio - pio_default_inparm - - pio rearranger communication option: Enable isends (comp2io) - - - $PIO_REARR_COMM_ENABLE_ISEND_COMP2IO - - - - - integer - pio - pio_default_inparm - - pio rearranger communication max pending req (io2comp) - - - $PIO_REARR_COMM_MAX_PEND_REQ_IO2COMP - - - - - logical - pio - pio_default_inparm - - pio rearranger communication option: Enable handshake (io2comp) - - - $PIO_REARR_COMM_ENABLE_HS_IO2COMP - - - - - logical - pio - pio_default_inparm - - pio rearranger communication option: Enable isends (io2comp) - default: .false. - - - $PIO_REARR_COMM_ENABLE_ISEND_IO2COMP - - - - - - - - - char - mapping - abs - seq_maps - - atm to ocn flux mapping file for fluxes - - - $ATM2OCN_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2OCN_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - atm to ocn state mapping file for states - - - $ATM2OCN_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2OCN_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - atm to ocn state mapping file for velocity - - - $ATM2OCN_VMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2OCN_VMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - ocn to atm mapping file for fluxes - - - $OCN2ATM_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $OCN2ATM_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - ocn to atm mapping file for states - - - $OCN2ATM_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $OCN2ATM_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - atm to ice flux mapping file for fluxes - - - $ATM2OCN_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2OCN_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - atm to ice state mapping file for states - - - $ATM2OCN_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2OCN_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - atm to ice state mapping file for velocity - - - $ATM2OCN_VMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2OCN_VMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - ice to atm mapping file for fluxes - - - $OCN2ATM_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $OCN2ATM_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - ice to atm mapping file for states - - - $OCN2ATM_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $OCN2ATM_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - atm to land mapping file for fluxes - - - $ATM2LND_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2LND_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - atm to land mapping file for states - - - $ATM2LND_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2LND_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - land to atm mapping file for fluxes - - - $LND2ATM_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $LND2ATM_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - land to atm mapping file for states - - - $LND2ATM_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $LND2ATM_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - lnd to runoff conservative mapping file - - - $LND2ROF_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $LND2ROF_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - runoff to lnd conservative mapping file - - - $ROF2LND_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ROF2LND_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - runoff to ocn area overlap conservative mapping file - - - $ROF2OCN_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ROF2OCN_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to ocn flux mapping file for fluxes - - - $GLC2OCN_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2OCN_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to ocn state mapping file for states - - - $GLC2OCN_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2OCN_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to ocn runoff conservative mapping file for liquid runoff - - - $GLC2OCN_LIQ_RMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2OCN_LIQ_RMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to ocn runoff conservative mapping file for ice runoff - - - $GLC2OCN_ICE_RMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2OCN_ICE_RMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - ocn to glc flux mapping file for fluxes - - - $OCN2GLC_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $OCN2GLC_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - ocn to glc state mapping file for states - - - $OCN2GLC_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $OCN2GLC_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to ice runoff conservative mapping file - - - $GLC2ICE_RMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2ICE_RMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - runoff to ocn nearest neighbor plus smoothing conservative mapping file - - - $ROF2OCN_LIQ_RMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ROF2OCN_LIQ_RMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - runoff to ocn nearest neighbor plus smoothing conservative mapping file - - - $ROF2OCN_ICE_RMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ROF2OCN_ICE_RMAPTYPE - X - - - - - - char - mapping - abs - seq_maps - - land to glc mapping file for fluxes - - - $LND2GLC_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $LND2GLC_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - land to glc mapping file for states - - - $LND2GLC_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $LND2GLC_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to land mapping file for fluxes - - - $GLC2LND_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2LND_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to land mapping file for states - - - $GLC2LND_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2LND_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to ice flux mapping file for fluxes - - - $GLC2ICE_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2ICE_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - glc to ice state mapping file for states - - - $GLC2ICE_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $GLC2ICE_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - atm to wav state mapping file for states - - - $ATM2WAV_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ATM2WAV_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - ocn to wav state mapping file for states - - - $OCN2WAV_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $OCN2WAV_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - ice to wav state mapping file for states - - - $ICE2WAV_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $ICE2WAV_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - wav to ocn state mapping file for states - - - $WAV2OCN_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $WAV2OCN_SMAPTYPE - X - - - - - char(10) - drv_physics - default_settings - - List of files to merge together that contains drv_flds_in namelists - The paths are relative to the case directory. drv_flds_in include the namelists that - the driver reads and gives information on additional fields to be passed to different - components that need to look at the same data. - - - Buildconf/camconf/drv_flds_in,Buildconf/clmconf/drv_flds_in - - - - - char - mapping - abs - seq_maps - - iac to atm mapping file for fluxes - - - $IAC2ATM_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $IAC2ATM_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - iac to atm mapping file for states - - - $IAC2ATM_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $IAC2ATM_SMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - iac to lnd mapping file for fluxes - - - $IAC2LND_FMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $IAC2LND_FMAPTYPE - X - - - - - char - mapping - abs - seq_maps - - iac to lnd mapping file for states - - - $IAC2LND_SMAPNAME - - - - - char - mapping - seq_maps - - The type of mapping desired, either "source" or "destination" mapping. - X is associated with rearrangement of the source grid to the - destination grid and then local mapping. Y is associated with mapping - on the source grid and then rearrangement and sum to the destination - grid. - - - $IAC2LND_SMAPTYPE - X - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component atm - - - $DATA_ASSIMILATION_ATM - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component CPL - - - $DATA_ASSIMILATION_CPL - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component ocn - - - $DATA_ASSIMILATION_OCN - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component wav - - - $DATA_ASSIMILATION_WAV - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component iac - - - $DATA_ASSIMILATION_IAC - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component glc - - - $DATA_ASSIMILATION_GLC - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component rof - - - $DATA_ASSIMILATION_ROF - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component ice - - - $DATA_ASSIMILATION_ICE - - - - - logical - data_assimilation - seq_timemgr_inparm - - Whether Data Assimilation is on for component lnd - - - $DATA_ASSIMILATION_LND - - - - diff --git a/src/drivers/mct/cime_config/namelist_definition_drv_flds.xml b/src/drivers/mct/cime_config/namelist_definition_drv_flds.xml deleted file mode 100644 index 08847103afd..00000000000 --- a/src/drivers/mct/cime_config/namelist_definition_drv_flds.xml +++ /dev/null @@ -1,148 +0,0 @@ - - - - - - - - - - - - - - char - abs - drv_flds_in - megan_emis_nl - - File containing MEGAN emissions factors. Includes the list of MEGAN compounds that can be - used in the Comp_Name variable on the file. - - - - - char(100) - drv_flds_in - megan_emis_nl - - MEGAN specifier. This is in the form of: Chem-compound = megan_compound(s) - where megan_compound(s) can be the sum of megan compounds with a "+" between them. - In each equation, the item to the left of the equal sign is a CAM chemistry compound, the - items to the right are compounds known to the MEGAN model (single or combinations). - For example: megan_specifier = 'ISOP = isoprene', 'C10H16 = pinene_a + carene_3 + thujene_a' - - - - - logical - drv_flds_in - megan_emis_nl - - MEGAN mapped isoprene emissions factors switch - If TRUE then use mapped MEGAN emissions factors for isoprene. - - - - - char(150) - drv_flds_in - drv_physics - - List of possible MEGAN compounds to use - (the list used by the simulation is on the megan_factors_file as the Comp_Name) - - - - - - - - - char - dry-deposition - drydep_inparm - xactive_lnd,xactive_atm,table - - Where dry deposition is calculated (from land, atmosphere, or from a table) - This specifies the method used to calculate dry - deposition velocities of gas-phase chemical species. The available methods are: - 'table' - prescribed method in CAM - 'xactive_atm' - interactive method in CAM - 'xactive_lnd' - interactive method in CLM - - - - - char(300) - dry-deposition - drydep_inparm - - List of species that undergo dry deposition. - - - - - - - - - char(2) - nitrogen deposition - ndep_inparm - - List of nitrogen deposition fluxes to be sent from CAM to surfae models. - - - - - - - - - char - abs - Fire_emissions - fire_emis_nl - - File containing fire emissions factors. - - - - - char(100) - Fire_emissions - fire_emis_nl - - Fire emissions specifier. - - - - - logical - Fire_emissions - fire_emis_nl - - If ture fire emissions are input into atmosphere as elevated forcings. - Otherwise they are treated as surface emissions. - - - - - - - - - char - carma - carma_inparm - - List of fluxes needed by the CARMA model, from CLM to CAM. - - - - diff --git a/src/drivers/mct/cime_config/namelist_definition_modelio.xml b/src/drivers/mct/cime_config/namelist_definition_modelio.xml deleted file mode 100644 index 660bc93dee3..00000000000 --- a/src/drivers/mct/cime_config/namelist_definition_modelio.xml +++ /dev/null @@ -1,212 +0,0 @@ - - - - - - - - - - - - - - integer - pio - pio_inparm - - stride of tasks in pio used generically, component based value takes precedent. - - - $CPL_PIO_STRIDE - $ATM_PIO_STRIDE - $LND_PIO_STRIDE - $OCN_PIO_STRIDE - $ICE_PIO_STRIDE - $ROF_PIO_STRIDE - $GLC_PIO_STRIDE - $WAV_PIO_STRIDE - $IAC_PIO_STRIDE - -99 - - - - - integer - pio - pio_inparm - - io task root in pio used generically, component based value takes precedent. - - - $CPL_PIO_ROOT - $ATM_PIO_ROOT - $LND_PIO_ROOT - $OCN_PIO_ROOT - $ICE_PIO_ROOT - $ROF_PIO_ROOT - $GLC_PIO_ROOT - $WAV_PIO_ROOT - $IAC_PIO_ROOT - -99 - - - - - integer - pio - pio_inparm - -99,1,2 - - Rearranger method for pio 1=box, 2=subset. - - - $CPL_PIO_REARRANGER - $ATM_PIO_REARRANGER - $LND_PIO_REARRANGER - $OCN_PIO_REARRANGER - $ICE_PIO_REARRANGER - $ROF_PIO_REARRANGER - $GLC_PIO_REARRANGER - $WAV_PIO_REARRANGER - $IAC_PIO_REARRANGER - -99 - - - - - integer - pio - pio_inparm - - number of io tasks in pio used generically, component based value takes precedent. - - - $CPL_PIO_NUMTASKS - $ATM_PIO_NUMTASKS - $LND_PIO_NUMTASKS - $OCN_PIO_NUMTASKS - $ICE_PIO_NUMTASKS - $ROF_PIO_NUMTASKS - $GLC_PIO_NUMTASKS - $WAV_PIO_NUMTASKS - $IAC_PIO_NUMTASKS - -99 - - - - - char*64 - pio - pio_inparm - netcdf,pnetcdf,netcdf4p,netcdf4c,default - - io type in pio used generically, component based value takes precedent. - valid values: netcdf, pnetcdf, netcdf4p, netcdf4c, default - - - $CPL_PIO_TYPENAME - $ATM_PIO_TYPENAME - $LND_PIO_TYPENAME - $OCN_PIO_TYPENAME - $ICE_PIO_TYPENAME - $ROF_PIO_TYPENAME - $GLC_PIO_TYPENAME - $WAV_PIO_TYPENAME - $IAC_PIO_TYPENAME - nothing - - - - - char*64 - pio - pio_inparm - classic,64bit_offset,64bit_data - - format of netcdf files created by pio, ignored if - PIO_TYPENAME is netcdf4p or netcdf4c. 64bit_data only - supported in netcdf 4.4.0 or newer - - - $CPL_PIO_NETCDF_FORMAT - $ATM_PIO_NETCDF_FORMAT - $LND_PIO_NETCDF_FORMAT - $OCN_PIO_NETCDF_FORMAT - $ICE_PIO_NETCDF_FORMAT - $ROF_PIO_NETCDF_FORMAT - $GLC_PIO_NETCDF_FORMAT - $WAV_PIO_NETCDF_FORMAT - $IAC_PIO_NETCDF_FORMAT - - - - - - - - - char*256 - modelio - modelio - input directory (no longer needed) - - UNSET - - - - - char*256 - modelio - modelio - directory for output log files - - UNSET - - - - - char*256 - modelio - modelio - name of component output log file - - UNSET - - - - diff --git a/src/drivers/mct/cime_config/testdefs/testlist_drv.xml b/src/drivers/mct/cime_config/testdefs/testlist_drv.xml deleted file mode 100644 index 78ce6e4fed6..00000000000 --- a/src/drivers/mct/cime_config/testdefs/testlist_drv.xml +++ /dev/null @@ -1,661 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/5steps/shell_commands b/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/5steps/shell_commands deleted file mode 100644 index c72c4cfb820..00000000000 --- a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/5steps/shell_commands +++ /dev/null @@ -1,2 +0,0 @@ -./xmlchange STOP_OPTION="nsteps" - diff --git a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/default/shell_commands b/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/default/shell_commands deleted file mode 100755 index 180e38db21d..00000000000 --- a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/default/shell_commands +++ /dev/null @@ -1,2 +0,0 @@ -./xmlchange HIST_OPTION=ndays -./xmlchange HIST_N=1 diff --git a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/som/shell_commands b/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/som/shell_commands deleted file mode 100644 index f3a70e7e662..00000000000 --- a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/som/shell_commands +++ /dev/null @@ -1,2 +0,0 @@ -./xmlchange DOCN_SOM_FILENAME="pop_frc.1x1d.090130.nc" - diff --git a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/y100k/README b/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/y100k/README deleted file mode 100644 index 4b028c20694..00000000000 --- a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/y100k/README +++ /dev/null @@ -1,5 +0,0 @@ -This tests the ability to use 6-digit years. - -As of the time this test was created, the max year is about 214747 - -otherwise we exceed the limit of 4-byte integers when storing dates as -integers (yyyyyymmdd). diff --git a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/y100k/shell_commands b/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/y100k/shell_commands deleted file mode 100644 index 1f1324a745a..00000000000 --- a/src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/y100k/shell_commands +++ /dev/null @@ -1 +0,0 @@ -./xmlchange RUN_STARTDATE=99999-12-28 diff --git a/src/drivers/mct/cime_config/user_nl_cpl b/src/drivers/mct/cime_config/user_nl_cpl deleted file mode 100644 index a2095360793..00000000000 --- a/src/drivers/mct/cime_config/user_nl_cpl +++ /dev/null @@ -1,19 +0,0 @@ -!------------------------------------------------------------------------ -! Users should ONLY USE user_nl_cpl to change namelists variables -! for namelist variables in drv_in (except for the ones below) and -! any keyword/values in seq_maps.rc -! Users should add ALL user specific namelist and seq_maps.rc changes below -! using the following syntax -! namelist_var = new_namelist_value -! or -! mapname = new_map_name -! For example to change the default value of ocn2atm_fmapname to 'foo' use -! ocn2atm_fmapname = 'foo' -! -! Note that some namelist variables MAY NOT be changed in user_nl_cpl - -! they are defined in a $CASEROOT xml file and must be changed with -! xmlchange. -! -! For example, rather than set username to 'foo' in user_nl_cpl, call -! ./xmlchange USER=foo -!------------------------------------------------------------------------ diff --git a/src/drivers/mct/main/CMakeLists.txt b/src/drivers/mct/main/CMakeLists.txt deleted file mode 100644 index d18de153ec8..00000000000 --- a/src/drivers/mct/main/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -list(APPEND drv_sources - component_type_mod.F90 - map_glc2lnd_mod.F90 - map_lnd2rof_irrig_mod.F90 - seq_map_mod.F90 - seq_map_type_mod.F90 - ) - -sourcelist_to_parent(drv_sources) diff --git a/src/drivers/mct/main/cime_comp_mod.F90 b/src/drivers/mct/main/cime_comp_mod.F90 deleted file mode 100644 index f109d1e799b..00000000000 --- a/src/drivers/mct/main/cime_comp_mod.F90 +++ /dev/null @@ -1,4595 +0,0 @@ -module cime_comp_mod - - !------------------------------------------------------------------------------- - ! - ! Purpose: Main program for CIME cpl7. Can have different - ! land, sea-ice, and ocean models plugged in at compile-time. - ! These models can be either: stub, dead, data, or active - ! components or some combination of the above. - ! - ! stub -------- Do nothing. - ! dead -------- Send analytic data back. - ! data -------- Send data back interpolated from input files. - ! prognostic -- Prognostically simulate the given component. - ! - ! Method: Call appropriate initialization, run (time-stepping), and - ! finalization routines. - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! share code & libs - !---------------------------------------------------------------------------- - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use shr_const_mod, only: shr_const_cday - use shr_file_mod, only: shr_file_setLogLevel, shr_file_setLogUnit - use shr_file_mod, only: shr_file_setIO, shr_file_getUnit, shr_file_freeUnit - use shr_scam_mod, only: shr_scam_checkSurface - use shr_map_mod, only: shr_map_setDopole - use shr_mpi_mod, only: shr_mpi_min, shr_mpi_max - use shr_mpi_mod, only: shr_mpi_bcast, shr_mpi_commrank, shr_mpi_commsize - use shr_mem_mod, only: shr_mem_init, shr_mem_getusage - use shr_cal_mod, only: shr_cal_date2ymd, shr_cal_ymd2date, shr_cal_advdateInt - use shr_cal_mod, only: shr_cal_ymds2rday_offset - use shr_orb_mod, only: shr_orb_params - use shr_frz_mod, only: shr_frz_freezetemp_init - use shr_reprosum_mod, only: shr_reprosum_setopts - use shr_taskmap_mod, only: shr_taskmap_write - use mct_mod ! mct_ wrappers for mct lib - use perf_mod - use ESMF - - !---------------------------------------------------------------------------- - ! component model interfaces (init, run, final methods) - !---------------------------------------------------------------------------- - - use atm_comp_mct , only: atm_init=>atm_init_mct, atm_run=>atm_run_mct, atm_final=>atm_final_mct - use lnd_comp_mct , only: lnd_init=>lnd_init_mct, lnd_run=>lnd_run_mct, lnd_final=>lnd_final_mct - use ocn_comp_mct , only: ocn_init=>ocn_init_mct, ocn_run=>ocn_run_mct, ocn_final=>ocn_final_mct - use ice_comp_mct , only: ice_init=>ice_init_mct, ice_run=>ice_run_mct, ice_final=>ice_final_mct - use glc_comp_mct , only: glc_init=>glc_init_mct, glc_run=>glc_run_mct, glc_final=>glc_final_mct - use wav_comp_mct , only: wav_init=>wav_init_mct, wav_run=>wav_run_mct, wav_final=>wav_final_mct - use rof_comp_mct , only: rof_init=>rof_init_mct, rof_run=>rof_run_mct, rof_final=>rof_final_mct - use esp_comp_mct , only: esp_init=>esp_init_mct, esp_run=>esp_run_mct, esp_final=>esp_final_mct - use iac_comp_mct , only: iac_init=>iac_init_mct, iac_run=>iac_run_mct, iac_final=>iac_final_mct - - !---------------------------------------------------------------------------- - ! cpl7 modules - !---------------------------------------------------------------------------- - - ! mpi comm data & routines, plus logunit and loglevel - use seq_comm_mct, only: CPLID, GLOID, logunit, loglevel, info_taskmap_comp - use seq_comm_mct, only: ATMID, LNDID, OCNID, ICEID, GLCID, ROFID, WAVID, ESPID - use seq_comm_mct, only: ALLATMID,ALLLNDID,ALLOCNID,ALLICEID,ALLGLCID,ALLROFID,ALLWAVID,ALLESPID - use seq_comm_mct, only: CPLALLATMID,CPLALLLNDID,CPLALLOCNID,CPLALLICEID - use seq_comm_mct, only: CPLALLGLCID,CPLALLROFID,CPLALLWAVID,CPLALLESPID - use seq_comm_mct, only: CPLATMID,CPLLNDID,CPLOCNID,CPLICEID,CPLGLCID,CPLROFID,CPLWAVID,CPLESPID - use seq_comm_mct, only: IACID, ALLIACID, CPLALLIACID, CPLIACID - use seq_comm_mct, only: num_inst_atm, num_inst_lnd, num_inst_rof - use seq_comm_mct, only: num_inst_ocn, num_inst_ice, num_inst_glc - use seq_comm_mct, only: num_inst_wav, num_inst_esp - use seq_comm_mct, only: num_inst_iac - use seq_comm_mct, only: num_inst_xao, num_inst_frc, num_inst_phys - use seq_comm_mct, only: num_inst_total, num_inst_max - use seq_comm_mct, only: seq_comm_iamin, seq_comm_name, seq_comm_namelen - use seq_comm_mct, only: seq_comm_init, seq_comm_setnthreads, seq_comm_getnthreads - use seq_comm_mct, only: seq_comm_getinfo => seq_comm_setptrs - use seq_comm_mct, only: cpl_inst_tag - - ! clock & alarm routines and variables - use seq_timemgr_mod, only: seq_timemgr_type - use seq_timemgr_mod, only: seq_timemgr_clockInit - use seq_timemgr_mod, only: seq_timemgr_clockAdvance - use seq_timemgr_mod, only: seq_timemgr_clockPrint - use seq_timemgr_mod, only: seq_timemgr_EClockGetData - use seq_timemgr_mod, only: seq_timemgr_alarmIsOn - use seq_timemgr_mod, only: seq_timemgr_histavg_type - use seq_timemgr_mod, only: seq_timemgr_type_never - use seq_timemgr_mod, only: seq_timemgr_alarm_restart - use seq_timemgr_mod, only: seq_timemgr_alarm_stop - use seq_timemgr_mod, only: seq_timemgr_alarm_datestop - use seq_timemgr_mod, only: seq_timemgr_alarm_history - use seq_timemgr_mod, only: seq_timemgr_alarm_atmrun - use seq_timemgr_mod, only: seq_timemgr_alarm_lndrun - use seq_timemgr_mod, only: seq_timemgr_alarm_ocnrun - use seq_timemgr_mod, only: seq_timemgr_alarm_icerun - use seq_timemgr_mod, only: seq_timemgr_alarm_glcrun - use seq_timemgr_mod, only: seq_timemgr_alarm_glcrun_avg - use seq_timemgr_mod, only: seq_timemgr_alarm_ocnnext - use seq_timemgr_mod, only: seq_timemgr_alarm_tprof - use seq_timemgr_mod, only: seq_timemgr_alarm_histavg - use seq_timemgr_mod, only: seq_timemgr_alarm_rofrun - use seq_timemgr_mod, only: seq_timemgr_alarm_wavrun - use seq_timemgr_mod, only: seq_timemgr_alarm_esprun - use seq_timemgr_mod, only: seq_timemgr_alarm_iacrun - use seq_timemgr_mod, only: seq_timemgr_alarm_barrier - use seq_timemgr_mod, only: seq_timemgr_alarm_pause - use seq_timemgr_mod, only: seq_timemgr_pause_active - use seq_timemgr_mod, only: seq_timemgr_pause_component_active - use seq_timemgr_mod, only: seq_timemgr_pause_component_index - - ! "infodata" gathers various control flags into one datatype - use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData - use seq_infodata_mod, only: seq_infodata_init, seq_infodata_exchange - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_orb_variable_year - use seq_infodata_mod, only: seq_infodata_print, seq_infodata_init2 - - ! domain related routines - use seq_domain_mct, only : seq_domain_check - - ! history file routines - use seq_hist_mod, only : seq_hist_write, seq_hist_writeavg, seq_hist_writeaux - - ! restart file routines - use seq_rest_mod, only : seq_rest_read, seq_rest_write - - ! flux calc routines - use seq_flux_mct, only: seq_flux_init_mct, seq_flux_initexch_mct, seq_flux_ocnalb_mct - use seq_flux_mct, only: seq_flux_atmocn_mct, seq_flux_atmocnexch_mct - - ! domain fraction routines - use seq_frac_mct, only : seq_frac_init, seq_frac_set - - ! i/o subroutines - use seq_io_mod, only : seq_io_cpl_init - - ! rearrange type routines - use cplcomp_exchange_mod, only: seq_mctext_decomp - - ! diagnostic routines - use seq_diag_mct, only : seq_diag_zero_mct , seq_diag_avect_mct, seq_diag_lnd_mct - use seq_diag_mct, only : seq_diag_rof_mct , seq_diag_ocn_mct , seq_diag_atm_mct - use seq_diag_mct, only : seq_diag_ice_mct , seq_diag_accum_mct, seq_diag_print_mct - - ! list of fields transferred between components - use seq_flds_mod, only : seq_flds_a2x_fluxes, seq_flds_x2a_fluxes - use seq_flds_mod, only : seq_flds_i2x_fluxes, seq_flds_x2i_fluxes - use seq_flds_mod, only : seq_flds_l2x_fluxes, seq_flds_x2l_fluxes - use seq_flds_mod, only : seq_flds_o2x_fluxes, seq_flds_x2o_fluxes - use seq_flds_mod, only : seq_flds_g2x_fluxes, seq_flds_x2g_fluxes - use seq_flds_mod, only : seq_flds_w2x_fluxes, seq_flds_x2w_fluxes - use seq_flds_mod, only : seq_flds_r2x_fluxes, seq_flds_x2r_fluxes - use seq_flds_mod, only : seq_flds_set - use seq_flds_mod, only : seq_flds_z2x_fluxes, seq_flds_x2z_fluxes - - ! component type and accessor functions - use component_type_mod, only: component_get_iamin_compid, component_get_suffix - use component_type_mod, only: component_get_iamroot_compid - use component_type_mod, only: component_get_name, component_get_c2x_cx - use component_type_mod, only: atm, lnd, ice, ocn, rof, glc, wav, esp, iac - use component_mod, only: component_init_pre - use component_mod, only: component_init_cc, component_init_cx - use component_mod, only: component_run, component_final - use component_mod, only: component_init_areacor, component_init_aream - use component_mod, only: component_exch, component_diag - - ! prep routines (includes mapping routines between components and merging routines) - use prep_lnd_mod - use prep_ice_mod - use prep_wav_mod - use prep_rof_mod - use prep_glc_mod - use prep_ocn_mod - use prep_atm_mod - use prep_aoflux_mod - use prep_iac_mod - - !--- mapping routines --- - use seq_map_type_mod - use seq_map_mod ! generic mapping - - ! --- timing routines --- - use t_drv_timers_mod - - implicit none - - private - - ! public data - public :: timing_dir, mpicom_GLOID - - ! public routines - public :: cime_pre_init1 - public :: cime_pre_init2 - public :: cime_init - public :: cime_run - public :: cime_final - - ! private routines - private :: cime_esmf_readnl - private :: cime_printlogheader - private :: cime_comp_barriers - private :: cime_cpl_init - private :: cime_run_atmocn_fluxes - private :: cime_run_ocn_albedos - private :: cime_run_atm_setup_send - private :: cime_run_atm_recv_post - private :: cime_run_ocn_setup_send - private :: cime_run_ocn_recv_post - private :: cime_run_atmocn_setup - private :: cime_run_lnd_setup_send - private :: cime_run_lnd_recv_post - private :: cime_run_glc_setup_send - private :: cime_run_glc_recv_post - private :: cime_run_rof_setup_send - private :: cime_run_rof_recv_post - private :: cime_run_ice_setup_send - private :: cime_run_ice_recv_post - private :: cime_run_wav_setup_send - private :: cime_run_wav_recv_post - private :: cime_run_iac_setup_send - private :: cime_run_iac_recv_post - private :: cime_run_update_fractions - private :: cime_run_calc_budgets1 - private :: cime_run_calc_budgets2 - private :: cime_run_calc_budgets3 - private :: cime_run_write_history - private :: cime_run_write_restart - private :: cime_write_performance_checkpoint - -#include - - !---------------------------------------------------------------------------- - ! temporary variables - !---------------------------------------------------------------------------- - - !- from prep routines (arrays of instances) - type(mct_aVect) , pointer :: a2x_ox(:) => null() - type(mct_aVect) , pointer :: o2x_ax(:) => null() - type(mct_aVect) , pointer :: xao_ox(:) => null() - type(mct_aVect) , pointer :: xao_ax(:) => null() - - !- from component type (single instance inside array of components) - type(mct_aVect) , pointer :: o2x_ox => null() - type(mct_aVect) , pointer :: a2x_ax => null() - - character(len=CL) :: inst_suffix - logical :: iamin_id - character(len=seq_comm_namelen) :: compname - - !---------------------------------------------------------------------------- - ! domains & related - !---------------------------------------------------------------------------- - - !--- domain fractions (only defined on cpl pes) --- - type(mct_aVect) , pointer :: fractions_ax(:) ! Fractions on atm grid, cpl processes - type(mct_aVect) , pointer :: fractions_lx(:) ! Fractions on lnd grid, cpl processes - type(mct_aVect) , pointer :: fractions_ix(:) ! Fractions on ice grid, cpl processes - type(mct_aVect) , pointer :: fractions_ox(:) ! Fractions on ocn grid, cpl processes - type(mct_aVect) , pointer :: fractions_gx(:) ! Fractions on glc grid, cpl processes - type(mct_aVect) , pointer :: fractions_rx(:) ! Fractions on rof grid, cpl processes - type(mct_aVect) , pointer :: fractions_wx(:) ! Fractions on wav grid, cpl processes - type(mct_aVect) , pointer :: fractions_zx(:) ! Fractions on iac grid, cpl processes - - !--- domain equivalent 2d grid size --- - integer :: atm_nx, atm_ny ! nx, ny of 2d grid, if known - integer :: lnd_nx, lnd_ny - integer :: ice_nx, ice_ny - integer :: ocn_nx, ocn_ny - integer :: rof_nx, rof_ny - integer :: glc_nx, glc_ny - integer :: wav_nx, wav_ny - integer :: iac_nx, iac_ny - - !---------------------------------------------------------------------------- - ! Infodata: inter-model control flags, domain info - !---------------------------------------------------------------------------- - - type (seq_infodata_type), target :: infodata ! single instance for cpl and all comps - - !---------------------------------------------------------------------------- - ! time management - !---------------------------------------------------------------------------- - - type (seq_timemgr_type), SAVE :: seq_SyncClock ! array of all clocks & alarm - type (ESMF_Clock), target :: EClock_d ! driver clock - type (ESMF_Clock), target :: EClock_a ! atmosphere clock - type (ESMF_Clock), target :: EClock_l ! land clock - type (ESMF_Clock), target :: EClock_o ! ocean clock - type (ESMF_Clock), target :: EClock_i ! ice clock - type (ESMF_Clock), target :: EClock_g ! glc clock - type (ESMF_Clock), target :: EClock_r ! rof clock - type (ESMF_Clock), target :: EClock_w ! wav clock - type (ESMF_Clock), target :: EClock_e ! esp clock - type (ESMF_Clock), target :: EClock_z ! iac clock - - logical :: restart_alarm ! restart alarm - logical :: history_alarm ! history alarm - logical :: histavg_alarm ! history alarm - logical :: stop_alarm ! stop alarm - logical :: atmrun_alarm ! atm run alarm - logical :: lndrun_alarm ! lnd run alarm - logical :: icerun_alarm ! ice run alarm - logical :: ocnrun_alarm ! ocn run alarm - logical :: ocnnext_alarm ! ocn run alarm on next timestep - logical :: glcrun_alarm ! glc run alarm - logical :: glcrun_avg_alarm ! glc run averaging alarm - logical :: rofrun_alarm ! rof run alarm - logical :: wavrun_alarm ! wav run alarm - logical :: esprun_alarm ! esp run alarm - logical :: iacrun_alarm ! iac run alarm - logical :: tprof_alarm ! timing profile alarm - logical :: barrier_alarm ! barrier alarm - logical :: t1hr_alarm ! alarm every hour - logical :: t2hr_alarm ! alarm every two hours - logical :: t3hr_alarm ! alarm every three hours - logical :: t6hr_alarm ! alarm every six hours - logical :: t12hr_alarm ! alarm every twelve hours - logical :: t24hr_alarm ! alarm every twentyfour hours - logical :: t1yr_alarm ! alarm every year, at start of year - logical :: pause_alarm ! pause alarm - logical :: write_hist_alarm ! alarm to write a history file under multiple conditions - integer :: drv_index ! seq_timemgr index for driver - - real(r8) :: days_per_year = 365.0 ! days per year - - integer :: dtime ! dt of one coupling interval - integer :: ncpl ! number of coupling intervals per day - integer :: ymd ! Current date (YYYYMMDD) - integer :: year ! Current date (YYYY) - integer :: month ! Current date (MM) - integer :: day ! Current date (DD) - integer :: tod ! Current time of day (seconds) - integer :: ymdtmp ! temporary date (YYYYMMDD) - integer :: todtmp ! temporary time of day (seconds) - character(CL) :: orb_mode ! orbital mode - character(CS) :: tfreeze_option ! Freezing point calculation - integer :: orb_iyear ! orbital year - integer :: orb_iyear_align ! associated with model year - integer :: orb_cyear ! orbital year for current orbital computation - integer :: orb_nyear ! orbital year associated with currrent model year - real(r8) :: orb_eccen ! orbital eccentricity - real(r8) :: orb_obliq ! obliquity in degrees - real(r8) :: orb_mvelp ! moving vernal equinox long - real(r8) :: orb_obliqr ! Earths obliquity in rad - real(r8) :: orb_lambm0 ! Mean long of perihelion at vernal equinox (radians) - real(r8) :: orb_mvelpp ! moving vernal equinox long - real(r8) :: wall_time_limit ! wall time limit in hours - real(r8) :: wall_time ! current wall time used - character(CS) :: force_stop_at ! force stop at next (month, day, etc) - logical :: force_stop ! force the model to stop - integer :: force_stop_ymd ! force stop ymd - integer :: force_stop_tod ! force stop tod - - !--- for documenting speed of the model --- - character(8) :: dstr ! date string - character(10) :: tstr ! time string - integer :: begStep, endStep ! Begining and ending step number - character(CL) :: calendar ! calendar name - real(r8) :: simDays ! Number of simulated days - real(r8) :: SYPD ! Simulated years per day - real(r8) :: Time_begin ! Start time - real(r8) :: Time_end ! Ending time - real(r8) :: Time_bstep ! Start time - real(r8) :: Time_estep ! Ending time - real(r8) :: time_brun ! Start time - real(r8) :: time_erun ! Ending time - real(r8) :: cktime ! delta time - real(r8) :: cktime_acc(10) ! cktime accumulator array 1 = all, 2 = atm, etc - integer :: cktime_cnt(10) ! cktime counter array - real(r8) :: max_cplstep_time - character(CL) :: timing_file ! Local path to tprof filename - character(CL) :: timing_dir ! timing directory - character(CL) :: tchkpt_dir ! timing checkpoint directory - - !---------------------------------------------------------------------------- - ! control flags - !---------------------------------------------------------------------------- - - logical :: atm_present ! .true. => atm is present - logical :: lnd_present ! .true. => land is present - logical :: ice_present ! .true. => ice is present - logical :: ocn_present ! .true. => ocn is present - logical :: glc_present ! .true. => glc is present - logical :: glclnd_present ! .true. => glc is computing land coupling - logical :: glcocn_present ! .true. => glc is computing ocean runoff - logical :: glcice_present ! .true. => glc is computing icebergs - logical :: rofice_present ! .true. => rof is computing icebergs - logical :: rof_present ! .true. => rof is present - logical :: flood_present ! .true. => rof is computing flood - logical :: wav_present ! .true. => wav is present - logical :: esp_present ! .true. => esp is present - logical :: iac_present ! .true. => iac is present - - logical :: atm_prognostic ! .true. => atm comp expects input - logical :: lnd_prognostic ! .true. => lnd comp expects input - logical :: ice_prognostic ! .true. => ice comp expects input - logical :: iceberg_prognostic ! .true. => ice comp can handle iceberg input - logical :: ocn_prognostic ! .true. => ocn comp expects input - logical :: ocnrof_prognostic ! .true. => ocn comp expects runoff input - logical :: glc_prognostic ! .true. => glc comp expects input - logical :: rof_prognostic ! .true. => rof comp expects input - logical :: wav_prognostic ! .true. => wav comp expects input - logical :: esp_prognostic ! .true. => esp comp expects input - logical :: iac_prognostic ! .true. => iac comp expects input - - logical :: atm_c2_lnd ! .true. => atm to lnd coupling on - logical :: atm_c2_ocn ! .true. => atm to ocn coupling on - logical :: atm_c2_ice ! .true. => atm to ice coupling on - logical :: atm_c2_wav ! .true. => atm to wav coupling on - logical :: lnd_c2_atm ! .true. => lnd to atm coupling on - logical :: lnd_c2_rof ! .true. => lnd to rof coupling on - logical :: lnd_c2_glc ! .true. => lnd to glc coupling on - logical :: ocn_c2_atm ! .true. => ocn to atm coupling on - logical :: ocn_c2_ice ! .true. => ocn to ice coupling on - logical :: ocn_c2_glcshelf ! .true. => ocn to glc ice shelf coupling on - logical :: ocn_c2_wav ! .true. => ocn to wav coupling on - logical :: ice_c2_atm ! .true. => ice to atm coupling on - logical :: ice_c2_ocn ! .true. => ice to ocn coupling on - logical :: ice_c2_wav ! .true. => ice to wav coupling on - logical :: rof_c2_lnd ! .true. => rof to lnd coupling on - logical :: rof_c2_ocn ! .true. => rof to ocn coupling on - logical :: rof_c2_ice ! .true. => rof to ice coupling on - logical :: glc_c2_lnd ! .true. => glc to lnd coupling on - logical :: glc_c2_ocn ! .true. => glc to ocn coupling on - logical :: glc_c2_ice ! .true. => glc to ice coupling on - logical :: glcshelf_c2_ocn ! .true. => glc ice shelf to ocn coupling on - logical :: glcshelf_c2_ice ! .true. => glc ice shelf to ice coupling on - logical :: wav_c2_ocn ! .true. => wav to ocn coupling on - - logical :: iac_c2_lnd ! .true. => iac to lnd coupling on - logical :: iac_c2_atm ! .true. => iac to atm coupling on - logical :: lnd_c2_iac ! .true. => lnd to iac coupling on - - logical :: dead_comps ! .true. => dead components - logical :: esmf_map_flag ! .true. => use esmf for mapping - - logical :: areafact_samegrid ! areafact samegrid flag - logical :: single_column ! scm mode logical - real(r8) :: scmlon ! single column lon - real(r8) :: scmlat ! single column lat - logical :: aqua_planet ! aqua planet mode - real(r8) :: nextsw_cday ! radiation control - logical :: atm_aero ! atm provides aerosol data - - character(CL) :: cpl_seq_option ! coupler sequencing option - logical :: skip_ocean_run ! skip the ocean model first pass - logical :: cpl2ocn_first ! use to call initial cpl2ocn timer - logical :: run_barriers ! barrier the component run calls - - character(CS) :: aoflux_grid ! grid for a/o flux calc: atm xor ocn - character(CS) :: vect_map ! vector mapping type - - character(CL) :: atm_gnam ! atm grid - character(CL) :: lnd_gnam ! lnd grid - character(CL) :: ocn_gnam ! ocn grid - character(CL) :: ice_gnam ! ice grid - character(CL) :: rof_gnam ! rof grid - character(CL) :: glc_gnam ! glc grid - character(CL) :: wav_gnam ! wav grid - character(CL) :: iac_gnam ! iac grid - - logical :: samegrid_ao ! samegrid atm and ocean - logical :: samegrid_al ! samegrid atm and land - logical :: samegrid_lr ! samegrid land and rof - logical :: samegrid_oi ! samegrid ocean and ice - logical :: samegrid_ro ! samegrid runoff and ocean - logical :: samegrid_aw ! samegrid atm and wave - logical :: samegrid_ow ! samegrid ocean and wave - logical :: samegrid_lg ! samegrid glc and land - logical :: samegrid_og ! samegrid glc and ocean - logical :: samegrid_ig ! samegrid glc and ice - logical :: samegrid_alo ! samegrid atm, lnd, ocean - logical :: samegrid_zl ! samegrid iac and land - - logical :: read_restart ! local read restart flag - character(CL) :: rest_file ! restart file path + filename - - logical :: shr_map_dopole ! logical for dopole in shr_map_mod - logical :: domain_check ! .true. => check consistency of domains - logical :: reprosum_use_ddpdd ! setup reprosum, use ddpdd - logical :: reprosum_allow_infnan ! setup reprosum, allow INF and NaN in summands - real(r8) :: reprosum_diffmax ! setup reprosum, set rel_diff_max - logical :: reprosum_recompute ! setup reprosum, recompute if tolerance exceeded - - logical :: output_perf = .false. ! require timing data output for this pe - logical :: in_first_day = .true. ! currently simulating first day - - !--- history & budgets --- - logical :: do_budgets ! heat/water budgets on - logical :: do_histinit ! initial hist file - logical :: do_histavg ! histavg on or off - logical :: do_hist_r2x ! create aux files: r2x - logical :: do_hist_l2x ! create aux files: l2x - logical :: do_hist_a2x24hr ! create aux files: a2x - logical :: do_hist_l2x1yrg ! create aux files: l2x 1yr glc forcings - logical :: do_hist_a2x ! create aux files: a2x - logical :: do_hist_a2x3hrp ! create aux files: a2x 3hr precip - logical :: do_hist_a2x3hr ! create aux files: a2x 3hr states - logical :: do_hist_a2x1hri ! create aux files: a2x 1hr instantaneous - logical :: do_hist_a2x1hr ! create aux files: a2x 1hr - integer :: budget_inst ! instantaneous budget flag - integer :: budget_daily ! daily budget flag - integer :: budget_month ! monthly budget flag - integer :: budget_ann ! annual budget flag - integer :: budget_ltann ! long term budget flag for end of year writing - integer :: budget_ltend ! long term budget flag for end of run writing - - character(CL) :: hist_a2x_flds = & - 'Faxa_swndr:Faxa_swvdr:Faxa_swndf:Faxa_swvdf' - - character(CL) :: hist_a2x3hrp_flds = & - 'Faxa_rainc:Faxa_rainl:Faxa_snowc:Faxa_snowl' - - character(CL) :: hist_a2x24hr_flds = & - 'Faxa_bcphiwet:Faxa_bcphodry:Faxa_bcphidry:Faxa_ocphiwet:Faxa_ocphidry:& - &Faxa_ocphodry:Faxa_dstwet1:Faxa_dstdry1:Faxa_dstwet2:Faxa_dstdry2:Faxa_dstwet3:& - &Faxa_dstdry3:Faxa_dstwet4:Faxa_dstdry4:Sa_co2prog:Sa_co2diag' - - character(CL) :: hist_a2x1hri_flds = & - 'Faxa_swndr:Faxa_swvdr:Faxa_swndf:Faxa_swvdf' - - character(CL) :: hist_a2x1hr_flds = & - 'Sa_u:Sa_v' - - character(CL) :: hist_a2x3hr_flds = & - 'Sa_z:Sa_topo:Sa_u:Sa_v:Sa_tbot:Sa_ptem:Sa_shum:Sa_dens:Sa_pbot:Sa_pslv:Faxa_lwdn:& - &Faxa_rainc:Faxa_rainl:Faxa_snowc:Faxa_snowl:& - &Faxa_swndr:Faxa_swvdr:Faxa_swndf:Faxa_swvdf:& - &Sa_co2diag:Sa_co2prog' - - ! --- other --- - - integer :: driver_id ! ID for multi-driver setup - integer :: ocnrun_count ! number of times ocn run alarm went on - logical :: exists ! true if file exists - integer :: ierr ! MPI error return - - character(*), parameter :: NLFileName = "drv_in" ! input namelist filename - - integer :: info_debug = 0 ! local info_debug level - - !---------------------------------------------------------------------------- - ! memory monitoring - !---------------------------------------------------------------------------- - real(r8) :: msize,msize0,msize1 ! memory size (high water) - real(r8) :: mrss ,mrss0 ,mrss1 ! resident size (current memory use) - - !---------------------------------------------------------------------------- - ! threading control - !---------------------------------------------------------------------------- - integer :: nthreads_GLOID ! OMP global number of threads - integer :: nthreads_CPLID ! OMP cpl number of threads - integer :: nthreads_ATMID ! OMP atm number of threads - integer :: nthreads_LNDID ! OMP lnd number of threads - integer :: nthreads_ICEID ! OMP ice number of threads - integer :: nthreads_OCNID ! OMP ocn number of threads - integer :: nthreads_GLCID ! OMP glc number of threads - integer :: nthreads_ROFID ! OMP glc number of threads - integer :: nthreads_WAVID ! OMP wav number of threads - integer :: nthreads_ESPID ! OMP esp number of threads - integer :: nthreads_IACID ! OMP iac number of threads - - integer :: pethreads_GLOID ! OMP number of threads per task - - logical :: drv_threading ! driver threading control - - !---------------------------------------------------------------------------- - ! communicator groups and related - !---------------------------------------------------------------------------- - integer :: global_comm - integer :: mpicom_GLOID ! MPI global communicator - integer :: mpicom_CPLID ! MPI cpl communicator - integer :: mpicom_OCNID ! MPI ocn communicator for ensemble member 1 - - integer :: mpicom_CPLALLATMID ! MPI comm for CPLALLATMID - integer :: mpicom_CPLALLLNDID ! MPI comm for CPLALLLNDID - integer :: mpicom_CPLALLICEID ! MPI comm for CPLALLICEID - integer :: mpicom_CPLALLOCNID ! MPI comm for CPLALLOCNID - integer :: mpicom_CPLALLGLCID ! MPI comm for CPLALLGLCID - integer :: mpicom_CPLALLROFID ! MPI comm for CPLALLROFID - integer :: mpicom_CPLALLWAVID ! MPI comm for CPLALLWAVID - integer :: mpicom_CPLALLIACID ! MPI comm for CPLALLIACID - - integer :: iam_GLOID ! pe number in global id - logical :: iamin_CPLID ! pe associated with CPLID - logical :: iamroot_GLOID ! GLOID masterproc - logical :: iamroot_CPLID ! CPLID masterproc - - logical :: iamin_CPLALLATMID ! pe associated with CPLALLATMID - logical :: iamin_CPLALLLNDID ! pe associated with CPLALLLNDID - logical :: iamin_CPLALLICEID ! pe associated with CPLALLICEID - logical :: iamin_CPLALLOCNID ! pe associated with CPLALLOCNID - logical :: iamin_CPLALLGLCID ! pe associated with CPLALLGLCID - logical :: iamin_CPLALLROFID ! pe associated with CPLALLROFID - logical :: iamin_CPLALLWAVID ! pe associated with CPLALLWAVID - logical :: iamin_CPLALLIACID ! pe associated with CPLALLIACID - - - !---------------------------------------------------------------------------- - ! complist: list of comps on this pe - !---------------------------------------------------------------------------- - - ! allow enough room for names of all physical components + coupler, - ! where each string can be up to (max_inst_name_len+1) characters - ! long (+1 allows for a space before each name) - character(len=(seq_comm_namelen+1)*(num_inst_phys+1)) :: complist - - !---------------------------------------------------------------------------- - ! comp_num_: unique component number for each component type - !---------------------------------------------------------------------------- - integer, parameter :: comp_num_atm = 1 - integer, parameter :: comp_num_lnd = 2 - integer, parameter :: comp_num_ice = 3 - integer, parameter :: comp_num_ocn = 4 - integer, parameter :: comp_num_glc = 5 - integer, parameter :: comp_num_rof = 6 - integer, parameter :: comp_num_wav = 7 - integer, parameter :: comp_num_esp = 8 - integer, parameter :: comp_num_iac = 9 - - !---------------------------------------------------------------------------- - ! misc - !---------------------------------------------------------------------------- - - integer, parameter :: ens1=1 ! use first instance of ensemble only - integer, parameter :: fix1=1 ! temporary hard-coding to first ensemble, needs to be fixed - integer :: eai, eli, eoi, eii, egi, eri, ewi, eei, exi, efi, ezi ! component instance counters - - !---------------------------------------------------------------------------- - ! formats - !---------------------------------------------------------------------------- - character(*), parameter :: subname = '(seq_mct_drv)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - character(*), parameter :: F0L = "('"//subname//" : ', A, L6 )" - character(*), parameter :: F01 = "('"//subname//" : ', A, 2i8, 3x, A )" - character(*), parameter :: F0R = "('"//subname//" : ', A, 2g23.15 )" - character(*), parameter :: FormatA = '(A,": =============== ", A44, " ===============")' - character(*), parameter :: FormatD = '(A,": =============== ", A20,I10.8,I8,8x, " ===============")' - character(*), parameter :: FormatR = '(A,": =============== ", A31,F12.3,1x, " ===============")' - character(*), parameter :: FormatQ = '(A,": =============== ", A20,2F10.2,4x," ===============")' - !=============================================================================== -contains - !=============================================================================== - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_pre_init1(esmf_log_option) - use shr_pio_mod, only : shr_pio_init1, shr_pio_init2 - use seq_comm_mct, only: num_inst_driver - !---------------------------------------------------------- - !| Initialize MCT and MPI communicators and IO - !---------------------------------------------------------- - - character(CS), intent(out) :: esmf_log_option ! For esmf_logfile_kind - - integer, dimension(num_inst_total) :: comp_id, comp_comm, comp_comm_iam - logical :: comp_iamin(num_inst_total) - character(len=seq_comm_namelen) :: comp_name(num_inst_total) - integer :: it - integer :: driver_comm - integer :: npes_CPLID - logical :: verbose_taskmap_output - character(len=8) :: c_cpl_inst ! coupler instance number - character(len=8) :: c_cpl_npes ! number of pes in coupler - - call mpi_init(ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_init') - call mpi_comm_dup(MPI_COMM_WORLD, global_comm, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_dup') - - comp_comm = MPI_COMM_NULL - time_brun = mpi_wtime() - - !--- Initialize multiple driver instances, if requested --- - call cime_cpl_init(global_comm, driver_comm, num_inst_driver, driver_id) - - call shr_pio_init1(num_inst_total,NLFileName, driver_comm) - ! - ! If pio_async_interface is true Global_comm is MPI_COMM_NULL on the servernodes - ! and server nodes do not return from shr_pio_init2 - ! - ! if (Global_comm /= MPI_COMM_NULL) then - - if (num_inst_driver > 1) then - call seq_comm_init(global_comm, driver_comm, NLFileName, drv_comm_ID=driver_id) - write(cpl_inst_tag,'("_",i4.4)') driver_id - else - call seq_comm_init(global_comm, driver_comm, NLFileName) - cpl_inst_tag = '' - end if - - !--- set task based threading counts --- - call seq_comm_getinfo(GLOID,pethreads=pethreads_GLOID,iam=iam_GLOID) - call seq_comm_setnthreads(pethreads_GLOID) - - !--- get some general data --- - it=1 - call seq_comm_getinfo(GLOID,mpicom=mpicom_GLOID,& - iamroot=iamroot_GLOID,nthreads=nthreads_GLOID) - if (iamroot_GLOID) output_perf = .true. - - call seq_comm_getinfo(CPLID,mpicom=mpicom_CPLID,& - iamroot=iamroot_CPLID,npes=npes_CPLID, & - nthreads=nthreads_CPLID,iam=comp_comm_iam(it)) - if (iamroot_CPLID) output_perf = .true. - - comp_id(it) = CPLID - comp_comm(it) = mpicom_CPLID - iamin_CPLID = seq_comm_iamin(CPLID) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - - do eai = 1,num_inst_atm - it=it+1 - comp_id(it) = ATMID(eai) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(ATMID(eai), mpicom=comp_comm(it), & - nthreads=nthreads_ATMID, iam=comp_comm_iam(it)) - if (seq_comm_iamroot(ATMID(eai))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLATMID, mpicom=mpicom_CPLALLATMID) - iamin_CPLALLATMID = seq_comm_iamin(CPLALLATMID) - - do eli = 1,num_inst_lnd - it=it+1 - comp_id(it) = LNDID(eli) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(LNDID(eli), mpicom=comp_comm(it), & - nthreads=nthreads_LNDID, iam=comp_comm_iam(it)) - if (seq_comm_iamroot(LNDID(eli))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLLNDID, mpicom=mpicom_CPLALLLNDID) - iamin_CPLALLLNDID = seq_comm_iamin(CPLALLLNDID) - - do eoi = 1,num_inst_ocn - it=it+1 - comp_id(it) = OCNID(eoi) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(OCNID(eoi), mpicom=comp_comm(it), & - nthreads=nthreads_OCNID, iam=comp_comm_iam(it)) - if (seq_comm_iamroot(OCNID(eoi))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLOCNID, mpicom=mpicom_CPLALLOCNID) - iamin_CPLALLOCNID = seq_comm_iamin(CPLALLOCNID) - - do eii = 1,num_inst_ice - it=it+1 - comp_id(it) = ICEID(eii) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(ICEID(eii), mpicom=comp_comm(it), & - nthreads=nthreads_ICEID, iam=comp_comm_iam(it)) - if (seq_comm_iamroot(ICEID(eii))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLICEID, mpicom=mpicom_CPLALLICEID) - iamin_CPLALLICEID = seq_comm_iamin(CPLALLICEID) - - do egi = 1,num_inst_glc - it=it+1 - comp_id(it) = GLCID(egi) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(GLCID(egi), mpicom=comp_comm(it), nthreads=nthreads_GLCID, iam=comp_comm_iam(it)) - if (seq_comm_iamroot(GLCID(egi))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLGLCID, mpicom=mpicom_CPLALLGLCID) - iamin_CPLALLGLCID = seq_comm_iamin(CPLALLGLCID) - - do eri = 1,num_inst_rof - it=it+1 - comp_id(it) = ROFID(eri) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(ROFID(eri), mpicom=comp_comm(it), & - nthreads=nthreads_ROFID, iam=comp_comm_iam(it)) - if (seq_comm_iamroot(ROFID(eri))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLROFID, mpicom=mpicom_CPLALLROFID) - iamin_CPLALLROFID = seq_comm_iamin(CPLALLROFID) - - do ewi = 1,num_inst_wav - it=it+1 - comp_id(it) = WAVID(ewi) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(WAVID(ewi), mpicom=comp_comm(it), & - nthreads=nthreads_WAVID, iam=comp_comm_iam(it)) - if (seq_comm_iamroot(WAVID(ewi))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLWAVID, mpicom=mpicom_CPLALLWAVID) - iamin_CPLALLWAVID = seq_comm_iamin(CPLALLWAVID) - - ! IAC mods - do ezi = 1,num_inst_iac - it=it+1 - comp_id(it) = IACID(ezi) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(IACID(ezi), mpicom=comp_comm(it), & - nthreads=nthreads_IACID, iam=comp_comm_iam(it)) - if (seq_comm_iamin(IACID(ezi))) then - complist = trim(complist)//' '//trim(seq_comm_name(IACID(ezi))) - endif - if (seq_comm_iamroot(IACID(ezi))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLIACID, mpicom=mpicom_CPLALLIACID) - iamin_CPLALLIACID = seq_comm_iamin(CPLALLIACID) - - do eei = 1,num_inst_esp - it=it+1 - comp_id(it) = ESPID(eei) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(ESPID(eei), mpicom=comp_comm(it), & - nthreads=nthreads_ESPID, iam=comp_comm_iam(it)) - enddo - ! ESP components do not use the coupler (they are 'external') - - !---------------------------------------------------------- - !| Set logging parameters both for shr code and locally - !---------------------------------------------------------- - - if (iamroot_CPLID) then - inquire(file='cpl_modelio.nml'//trim(cpl_inst_tag),exist=exists) - if (exists) then - logunit = shr_file_getUnit() - call shr_file_setIO('cpl_modelio.nml'//trim(cpl_inst_tag),logunit) - call shr_file_setLogUnit(logunit) - loglevel = 1 - call shr_file_setLogLevel(loglevel) - endif - else - loglevel = 0 - call shr_file_setLogLevel(loglevel) - endif - - !---------------------------------------------------------- - !| Output task-to-node mapping data for coupler - !---------------------------------------------------------- - - if (info_taskmap_comp > 0) then - ! Identify SMP nodes and process/SMP mapping for the coupler. - ! (Assume that processor names are SMP node names on SMP clusters.) - - if (iamin_CPLID) then - - if (info_taskmap_comp == 1) then - verbose_taskmap_output = .false. - else - verbose_taskmap_output = .true. - endif - - write(c_cpl_inst,'(i8)') num_inst_driver - - if (iamroot_CPLID) then - write(c_cpl_npes,'(i8)') npes_CPLID - write(logunit,'(3A)') trim(adjustl(c_cpl_npes)), & - ' pes participating in computation of CPL instance #', & - trim(adjustl(c_cpl_inst)) - call shr_sys_flush(logunit) - endif - - call t_startf("shr_taskmap_write") - call shr_taskmap_write(logunit, mpicom_CPLID, & - 'CPL #'//trim(adjustl(c_cpl_inst)), & - verbose=verbose_taskmap_output ) - call t_stopf("shr_taskmap_write") - - endif - - endif - - !---------------------------------------------------------- - ! Log info about the environment settings - !---------------------------------------------------------- - - if (iamroot_CPLID) then -#ifdef USE_ESMF_LIB - write(logunit,'(2A)') subname,' USE_ESMF_LIB is set' -#else - write(logunit,'(2A)') subname,' USE_ESMF_LIB is NOT set, using esmf_wrf_timemgr' -#endif - write(logunit,'(2A)') subname,' MCT_INTERFACE is set' - if (num_inst_driver > 1) & - write(logunit,'(2A,I0,A)') subname,' Driver is running with',num_inst_driver,'instances' - endif - - !---------------------------------------------------------- - ! Read ESMF namelist settings - !---------------------------------------------------------- - call cime_esmf_readnl(NLFileName, mpicom_GLOID, esmf_log_option) - - ! - ! When using io servers (pio_async_interface=.true.) the server tasks do not return from - ! shr_pio_init2 - ! - call shr_pio_init2(comp_id,comp_name,comp_iamin,comp_comm,comp_comm_iam) - - end subroutine cime_pre_init1 - - !=============================================================================== - - subroutine cime_esmf_readnl(NLFileName, mpicom, esmf_logfile_kind) - use shr_file_mod, only: shr_file_getUnit, shr_file_freeUnit - - character(len=*), intent(in) :: NLFileName - integer, intent(in) :: mpicom - character(len=CS), intent(out) :: esmf_logfile_kind - - integer :: ierr ! I/O error code - integer :: unitn ! Namelist unit number to read - integer :: rank - character(len=*), parameter :: subname = '(esmf_readnl) ' - - namelist /esmf_inparm/ esmf_logfile_kind - - esmf_logfile_kind = 'ESMF_LOGKIND_NONE' - call mpi_comm_rank(mpicom, rank, ierr) - - !------------------------------------------------------------------------- - ! Read in namelist - !------------------------------------------------------------------------- - if (rank == 0) then - unitn = shr_file_getUnit() - write(logunit,"(A)") subname,' read esmf_inparm namelist from: '//trim(NLFileName) - open(unitn, file=trim(NLFileName), status='old') - ierr = 1 - do while( ierr /= 0 ) - read(unitn, nml=esmf_inparm, iostat=ierr) - if (ierr < 0) then - call shr_sys_abort( subname//':: namelist read returns an'// & - ' end of file or end of record condition' ) - end if - end do - close(unitn) - call shr_file_freeUnit(unitn) - end if - - call mpi_bcast(esmf_logfile_kind, CS, MPI_CHARACTER, 0, mpicom, ierr) - - end subroutine cime_esmf_readnl - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_pre_init2() - use pio, only : file_desc_t, pio_closefile, pio_file_is_open - use shr_const_mod, only: shr_const_tkfrz, shr_const_tktrip, & - shr_const_mwwv, shr_const_mwdair - use shr_wv_sat_mod, only: shr_wv_sat_set_default, shr_wv_sat_init, & - ShrWVSatTableSpec, shr_wv_sat_make_tables - - type(file_desc_t) :: pioid - integer :: maxthreads - - character(CS) :: wv_sat_scheme - real(r8) :: wv_sat_transition_start - logical :: wv_sat_use_tables - real(r8) :: wv_sat_table_spacing - character(CL) :: errstring - - type(ShrWVSatTableSpec) :: liquid_spec, ice_spec, mixed_spec - - real(r8), parameter :: epsilo = shr_const_mwwv/shr_const_mwdair - - !---------------------------------------------------------- - !| Timer initialization (has to be after mpi init) - !---------------------------------------------------------- - - maxthreads = max(nthreads_GLOID,nthreads_CPLID,nthreads_ATMID, & - nthreads_LNDID,nthreads_ICEID,nthreads_OCNID,nthreads_GLCID, & - nthreads_ROFID, nthreads_WAVID, nthreads_ESPID, nthreads_IACID, & - pethreads_GLOID ) - call t_initf(NLFileName, LogPrint=.true., mpicom=mpicom_GLOID, & - MasterTask=iamroot_GLOID,MaxThreads=maxthreads) - - if (iamin_CPLID) then - call seq_io_cpl_init() - endif - - !---------------------------------------------------------- - !| Memory test - !---------------------------------------------------------- - - !mt call shr_mem_init(prt=.true.) - call shr_mem_init(prt=iamroot_CPLID) - - !---------------------------------------------------------- - !| Initialize infodata - !---------------------------------------------------------- - - if (len_trim(cpl_inst_tag) > 0) then - call seq_infodata_init(infodata,nlfilename, GLOID, pioid, & - cpl_tag=cpl_inst_tag) - else - call seq_infodata_init(infodata,nlfilename, GLOID, pioid) - end if - - !---------------------------------------------------------- - ! Print Model heading and copyright message - !---------------------------------------------------------- - - if (iamroot_CPLID) call cime_printlogheader() - - !---------------------------------------------------------- - !| Initialize coupled fields (depends on infodata) - !---------------------------------------------------------- - - call seq_flds_set(nlfilename, GLOID, infodata) - - !---------------------------------------------------------- - !| Obtain infodata info - !---------------------------------------------------------- - - call seq_infodata_GetData(infodata, & - info_debug=info_debug) - - if (info_debug > 1 .and. iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,'(2A)') 'Status of infodata after seq_infodata_init' - call seq_infodata_print( infodata ) - write(logunit,*) ' ' - endif - - call seq_infodata_GetData(infodata , & - read_restart=read_restart , & - restart_file=rest_file , & - timing_dir=timing_dir , & - tchkpt_dir=tchkpt_dir , & - info_debug=info_debug , & - atm_present=atm_present , & - lnd_present=lnd_present , & - ice_present=ice_present , & - ocn_present=ocn_present , & - glc_present=glc_present , & - rof_present=rof_present , & - wav_present=wav_present , & - esp_present=esp_present , & - iac_present=iac_present , & - single_column=single_column , & - aqua_planet=aqua_planet , & - cpl_seq_option=cpl_seq_option , & - drv_threading=drv_threading , & - do_histinit=do_histinit , & - do_budgets=do_budgets , & - budget_inst=budget_inst , & - budget_daily=budget_daily , & - budget_month=budget_month , & - budget_ann=budget_ann , & - budget_ltann=budget_ltann , & - budget_ltend=budget_ltend , & - histaux_a2x=do_hist_a2x , & - histaux_a2x1hri=do_hist_a2x1hri , & - histaux_a2x1hr=do_hist_a2x1hr , & - histaux_a2x3hr =do_hist_a2x3hr , & - histaux_a2x3hrp=do_hist_a2x3hrp , & - histaux_a2x24hr=do_hist_a2x24hr , & - histaux_l2x=do_hist_l2x , & - histaux_l2x1yrg=do_hist_l2x1yrg , & - histaux_r2x=do_hist_r2x , & - run_barriers=run_barriers , & - mct_usealltoall=mct_usealltoall , & - mct_usevector=mct_usevector , & - aoflux_grid=aoflux_grid , & - vect_map=vect_map , & - atm_gnam=atm_gnam , & - lnd_gnam=lnd_gnam , & - ocn_gnam=ocn_gnam , & - ice_gnam=ice_gnam , & - rof_gnam=rof_gnam , & - glc_gnam=glc_gnam , & - wav_gnam=wav_gnam , & - iac_gnam=iac_gnam , & - tfreeze_option = tfreeze_option , & - cpl_decomp=seq_mctext_decomp , & - shr_map_dopole=shr_map_dopole , & - wall_time_limit=wall_time_limit , & - force_stop_at=force_stop_at , & - reprosum_use_ddpdd=reprosum_use_ddpdd , & - reprosum_allow_infnan=reprosum_allow_infnan, & - reprosum_diffmax=reprosum_diffmax , & - reprosum_recompute=reprosum_recompute , & - max_cplstep_time=max_cplstep_time) - - ! above - cpl_decomp is set to pass the cpl_decomp value to seq_mctext_decomp - ! (via a use statement) - - call shr_map_setDopole(shr_map_dopole) - - call shr_reprosum_setopts(& - repro_sum_use_ddpdd_in = reprosum_use_ddpdd, & - repro_sum_allow_infnan_in = reprosum_allow_infnan, & - repro_sum_rel_diff_max_in = reprosum_diffmax, & - repro_sum_recompute_in = reprosum_recompute) - - ! Check cpl_seq_option - - if (trim(cpl_seq_option) /= 'CESM1_MOD' .and. & - trim(cpl_seq_option) /= 'CESM1_MOD_TIGHT' .and. & - trim(cpl_seq_option) /= 'RASM_OPTION1' .and. & - trim(cpl_seq_option) /= 'RASM_OPTION2' .and. & - trim(cpl_seq_option) /= 'NUOPC' .and. & - trim(cpl_seq_option) /= 'NUOPC_TIGHT' ) then - call shr_sys_abort(subname//' invalid cpl_seq_option = '//trim(cpl_seq_option)) - endif - - !---------------------------------------------------------- - !| Test Threading Setup in driver - ! happens to be valid on all pes for all IDs - !---------------------------------------------------------- - - if (drv_threading) then - if (iamroot_GLOID) write(logunit,*) ' ' - if (iamroot_GLOID) write(logunit,'(2A) ') subname,' Test Threading in driver' - call seq_comm_setnthreads(nthreads_GLOID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_GLOID = ',& - nthreads_GLOID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_CPLID = ',& - nthreads_CPLID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_ATMID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_ATMID = ',& - nthreads_ATMID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_LNDID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_LNDID = ',& - nthreads_LNDID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_OCNID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_OCNID = ',& - nthreads_OCNID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_ICEID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_ICEID = ',& - nthreads_ICEID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_GLCID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_GLCID = ',& - nthreads_GLCID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_ROFID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_ROFID = ',& - nthreads_ROFID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_WAVID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_WAVID = ',& - nthreads_WAVID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_ESPID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_ESPID = ',& - nthreads_ESPID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_IACID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_IACID = ',& - nthreads_IACID,seq_comm_getnthreads() - if (iamroot_GLOID) write(logunit,*) ' ' - - call seq_comm_setnthreads(nthreads_GLOID) - endif - - !---------------------------------------------------------- - !| Initialize time manager - !---------------------------------------------------------- - - call seq_timemgr_clockInit(seq_SyncClock, nlfilename, & - read_restart, rest_file, pioid, mpicom_gloid, & - EClock_d, EClock_a, EClock_l, EClock_o, & - EClock_i, Eclock_g, Eclock_r, Eclock_w, Eclock_e, & - EClock_z) - - if (iamroot_CPLID) then - call seq_timemgr_clockPrint(seq_SyncClock) - endif - - !---------------------------------------------------------- - !| Initialize infodata items which need the clocks - !---------------------------------------------------------- - call seq_infodata_init2(infodata, GLOID) - - call seq_infodata_getData(infodata, & - orb_iyear=orb_iyear, & - orb_iyear_align=orb_iyear_align, & - orb_mode=orb_mode) - - !---------------------------------------------------------- - ! Initialize freezing point calculation for all components - !---------------------------------------------------------- - - call shr_frz_freezetemp_init(tfreeze_option, iamroot_GLOID) - - if (trim(orb_mode) == trim(seq_infodata_orb_variable_year)) then - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd) - - call shr_cal_date2ymd(ymd,year,month,day) - orb_cyear = orb_iyear + (year - orb_iyear_align) - - call shr_orb_params(orb_cyear, orb_eccen, orb_obliq, orb_mvelp, & - orb_obliqr, orb_lambm0, orb_mvelpp, iamroot_CPLID) - - call seq_infodata_putData(infodata, & - orb_eccen=orb_eccen, & - orb_obliqr=orb_obliqr, & - orb_lambm0=orb_lambm0, & - orb_mvelpp=orb_mvelpp) - endif - - call seq_infodata_getData(infodata, & - wv_sat_scheme=wv_sat_scheme, & - wv_sat_transition_start=wv_sat_transition_start, & - wv_sat_use_tables=wv_sat_use_tables, & - wv_sat_table_spacing=wv_sat_table_spacing) - - if (.not. shr_wv_sat_set_default(wv_sat_scheme)) then - call shr_sys_abort('Invalid wv_sat_scheme.') - end if - - call shr_wv_sat_init(shr_const_tkfrz, shr_const_tktrip, & - wv_sat_transition_start, epsilo, errstring) - - if (errstring /= "") then - call shr_sys_abort('shr_wv_sat_init: '//trim(errstring)) - end if - - ! The below produces internal lookup tables in the range 175-374K for - ! liquid water, and 125-274K for ice, with a resolution set by the - ! option wv_sat_table_spacing. - ! In theory these ranges could be specified in the namelist, but in - ! practice users will want to change them *very* rarely if ever, which - ! is why only the spacing is in the namelist. - if (wv_sat_use_tables) then - liquid_spec = ShrWVSatTableSpec(ceiling(200._r8/wv_sat_table_spacing), & - 175._r8, wv_sat_table_spacing) - ice_spec = ShrWVSatTableSpec(ceiling(150._r8/wv_sat_table_spacing), & - 125._r8, wv_sat_table_spacing) - mixed_spec = ShrWVSatTableSpec(ceiling(250._r8/wv_sat_table_spacing), & - 125._r8, wv_sat_table_spacing) - call shr_wv_sat_make_tables(liquid_spec, ice_spec, mixed_spec) - end if - - call seq_infodata_putData(infodata, & - atm_phase=1, & - lnd_phase=1, & - ocn_phase=1, & - ice_phase=1, & - glc_phase=1, & - wav_phase=1, & - iac_phase=1, & - esp_phase=1) - - !---------------------------------------------------------- - !| Set aqua_planet and single_column flags - ! If in single column mode, overwrite flags according to focndomain file - ! in ocn_in namelist. SCAM can reset the "present" flags for lnd, - ! ocn, ice, rof, and flood. - !---------------------------------------------------------- - - if (.not.aqua_planet .and. single_column) then - call seq_infodata_getData( infodata, & - scmlon=scmlon, scmlat=scmlat) - - call seq_comm_getinfo(OCNID(ens1), mpicom=mpicom_OCNID) - - call shr_scam_checkSurface(scmlon, scmlat, & - OCNID(ens1), mpicom_OCNID, & - lnd_present=lnd_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - rof_present=rof_present, & - flood_present=flood_present, & - rofice_present=rofice_present) - - call seq_infodata_putData(infodata, & - lnd_present=lnd_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - rof_present=rof_present, & - flood_present=flood_present, & - rofice_present=rofice_present) - endif - if(PIO_FILE_IS_OPEN(pioid)) then - call pio_closefile(pioid) - endif - - end subroutine cime_pre_init2 - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_init() - -104 format( A, i10.8, i8) - - !----------------------------------------------------------------------------- - !| Component Initialization - ! Note that within each component initialization, the relevant x_present flag - ! part of CIMEInit can be modified - ! By default, all these flags are set to true - ! The atm can reset the lnd_present, ice_present and ocn_present flags based - ! on aqua_planet, ideal_phys and adiabatic modes - ! The stub components will reset the present flags to false, all other - ! components will set them to true for the purposes of symmetry - !----------------------------------------------------------------------------- - - call t_startf('CPL:cime_init') - call t_adj_detailf(+1) - - call t_startf('CPL:init_comps') - if (iamroot_CPLID )then - write(logunit,*) ' ' - write(logunit,F00) 'Initialize each component: atm, lnd, rof, ocn, ice, glc, wav, esp, iac' - call shr_sys_flush(logunit) - endif - - call t_startf('CPL:comp_init_pre_all') - call component_init_pre(atm, ATMID, CPLATMID, CPLALLATMID, infodata, ntype='atm') - call component_init_pre(lnd, LNDID, CPLLNDID, CPLALLLNDID, infodata, ntype='lnd') - call component_init_pre(rof, ROFID, CPLROFID, CPLALLROFID, infodata, ntype='rof') - call component_init_pre(ocn, OCNID, CPLOCNID, CPLALLOCNID, infodata, ntype='ocn') - call component_init_pre(ice, ICEID, CPLICEID, CPLALLICEID, infodata, ntype='ice') - call component_init_pre(glc, GLCID, CPLGLCID, CPLALLGLCID, infodata, ntype='glc') - call component_init_pre(wav, WAVID, CPLWAVID, CPLALLWAVID, infodata, ntype='wav') - call component_init_pre(esp, ESPID, CPLESPID, CPLALLESPID, infodata, ntype='esp') - call component_init_pre(iac, IACID, CPLIACID, CPLALLIACID, infodata, ntype='iac') - - call t_stopf('CPL:comp_init_pre_all') - - call t_startf('CPL:comp_init_cc_atm') - call t_adj_detailf(+2) - - call component_init_cc(Eclock_a, atm, atm_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_atm') - - call t_startf('CPL:comp_init_cc_lnd') - call t_adj_detailf(+2) - call component_init_cc(Eclock_l, lnd, lnd_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_lnd') - - call t_startf('CPL:comp_init_cc_rof') - call t_adj_detailf(+2) - call component_init_cc(Eclock_r, rof, rof_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_rof') - - call t_startf('CPL:comp_init_cc_ocn') - call t_adj_detailf(+2) - call component_init_cc(Eclock_o, ocn, ocn_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_ocn') - - call t_startf('CPL:comp_init_cc_ice') - call t_adj_detailf(+2) - call component_init_cc(Eclock_i, ice, ice_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_ice') - - call t_startf('CPL:comp_init_cc_glc') - call t_adj_detailf(+2) - call component_init_cc(Eclock_g, glc, glc_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_glc') - - call t_startf('CPL:comp_init_cc_wav') - call t_adj_detailf(+2) - call component_init_cc(Eclock_w, wav, wav_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_wav') - - call t_startf('CPL:comp_init_cc_esp') - call t_adj_detailf(+2) - call component_init_cc(Eclock_e, esp, esp_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_esp') - - call t_startf('comp_init_cc_iac') - call t_adj_detailf(+2) - call component_init_cc(Eclock_z, iac, iac_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('comp_init_cc_iac') - - call t_startf('CPL:comp_init_cx_all') - call t_adj_detailf(+2) - call component_init_cx(atm, infodata) - call component_init_cx(lnd, infodata) - call component_init_cx(rof, infodata) - call component_init_cx(ocn, infodata) - call component_init_cx(ice, infodata) - call component_init_cx(glc, infodata) - call component_init_cx(wav, infodata) - call component_init_cx(iac, infodata) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cx_all') - - ! Determine complist (list of comps for each id) - - call t_startf('CPL:comp_list_all') - call t_adj_detailf(+2) - complist = " " - if (iamin_CPLID) complist = trim(complist)//' cpl' - - do eai = 1,num_inst_atm - iamin_ID = component_get_iamin_compid(atm(eai)) - if (iamin_ID) then - compname = component_get_name(atm(eai)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do eli = 1,num_inst_lnd - iamin_ID = component_get_iamin_compid(lnd(eli)) - if (iamin_ID) then - compname = component_get_name(lnd(eli)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do eii = 1,num_inst_ice - iamin_ID = component_get_iamin_compid(ice(eii)) - if (iamin_ID) then - compname = component_get_name(ice(eii)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do eoi = 1,num_inst_ocn - iamin_ID = component_get_iamin_compid(ocn(eoi)) - if (iamin_ID) then - compname = component_get_name(ocn(eoi)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do egi = 1,num_inst_glc - iamin_ID = component_get_iamin_compid(glc(egi)) - if (iamin_ID) then - compname = component_get_name(glc(egi)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do ewi = 1,num_inst_wav - iamin_ID = component_get_iamin_compid(wav(ewi)) - if (iamin_ID) then - compname = component_get_name(wav(ewi)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - - do ezi = 1,num_inst_iac - iamin_ID = component_get_iamin_compid(iac(ezi)) - if (iamin_ID) then - compname = component_get_name(iac(ezi)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - - do eei = 1,num_inst_esp - iamin_ID = component_get_iamin_compid(esp(eei)) - if (iamin_ID) then - compname = component_get_name(esp(eei)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - - call t_adj_detailf(-2) - call t_stopf('CPL:comp_list_all') - - call t_stopf('CPL:init_comps') - - !---------------------------------------------------------- - !| Determine coupling interactions based on present and prognostic flags - !---------------------------------------------------------- - - if (iamin_CPLALLATMID) call seq_infodata_exchange(infodata,CPLALLATMID,'cpl2atm_init') - if (iamin_CPLALLLNDID) call seq_infodata_exchange(infodata,CPLALLLNDID,'cpl2lnd_init') - if (iamin_CPLALLOCNID) call seq_infodata_exchange(infodata,CPLALLOCNID,'cpl2ocn_init') - if (iamin_CPLALLICEID) call seq_infodata_exchange(infodata,CPLALLICEID,'cpl2ice_init') - if (iamin_CPLALLGLCID) call seq_infodata_exchange(infodata,CPLALLGLCID,'cpl2glc_init') - if (iamin_CPLALLROFID) call seq_infodata_exchange(infodata,CPLALLROFID,'cpl2rof_init') - if (iamin_CPLALLWAVID) call seq_infodata_exchange(infodata,CPLALLWAVID,'cpl2wav_init') - if (iamin_CPLALLIACID) call seq_infodata_exchange(infodata,CPLALLIACID,'cpl2iac_init') - - if (iamroot_CPLID) then - write(logunit,F00) 'Determine final settings for presence of surface components' - call shr_sys_flush(logunit) - endif - - call seq_infodata_getData(infodata, & - atm_present=atm_present, & - lnd_present=lnd_present, & - ice_present=ice_present, & - ocn_present=ocn_present, & - glc_present=glc_present, & - glclnd_present=glclnd_present, & - glcocn_present=glcocn_present, & - glcice_present=glcice_present, & - rof_present=rof_present, & - rofice_present=rofice_present, & - wav_present=wav_present, & - iac_present=iac_present, & - esp_present=esp_present, & - flood_present=flood_present, & - atm_prognostic=atm_prognostic, & - lnd_prognostic=lnd_prognostic, & - ice_prognostic=ice_prognostic, & - iceberg_prognostic=iceberg_prognostic, & - ocn_prognostic=ocn_prognostic, & - ocnrof_prognostic=ocnrof_prognostic, & - ocn_c2_glcshelf=ocn_c2_glcshelf, & - glc_prognostic=glc_prognostic, & - rof_prognostic=rof_prognostic, & - wav_prognostic=wav_prognostic, & - iac_prognostic=iac_prognostic, & - esp_prognostic=esp_prognostic, & - dead_comps=dead_comps, & - esmf_map_flag=esmf_map_flag, & - atm_nx=atm_nx, atm_ny=atm_ny, & - lnd_nx=lnd_nx, lnd_ny=lnd_ny, & - rof_nx=rof_nx, rof_ny=rof_ny, & - ice_nx=ice_nx, ice_ny=ice_ny, & - glc_nx=glc_nx, glc_ny=glc_ny, & - ocn_nx=ocn_nx, ocn_ny=ocn_ny, & - wav_nx=wav_nx, wav_ny=wav_ny, & - iac_nx=iac_nx, iac_ny=iac_ny, & - atm_aero=atm_aero ) - - ! derive samegrid flags - - samegrid_ao = .true. - samegrid_al = .true. - samegrid_lr = .true. - samegrid_oi = .true. - samegrid_ro = .true. - samegrid_aw = .true. - samegrid_ow = .true. - samegrid_lg = .true. - samegrid_og = .true. - samegrid_ig = .true. - samegrid_alo = .true. - - ! set samegrid to true for single column - if (.not. single_column) then - if (trim(atm_gnam) /= trim(ocn_gnam)) samegrid_ao = .false. - if (trim(atm_gnam) /= trim(lnd_gnam)) samegrid_al = .false. - if (trim(lnd_gnam) /= trim(rof_gnam)) samegrid_lr = .false. - if (trim(rof_gnam) /= trim(ocn_gnam)) samegrid_ro = .false. - if (trim(ocn_gnam) /= trim(ice_gnam)) samegrid_oi = .false. - if (trim(atm_gnam) /= trim(wav_gnam)) samegrid_aw = .false. - if (trim(ocn_gnam) /= trim(wav_gnam)) samegrid_ow = .false. - if (trim(lnd_gnam) /= trim(glc_gnam)) samegrid_lg = .false. - if (trim(ocn_gnam) /= trim(glc_gnam)) samegrid_og = .false. - if (trim(ice_gnam) /= trim(glc_gnam)) samegrid_ig = .false. - samegrid_alo = (samegrid_al .and. samegrid_ao) - endif - - ! derive coupling connection flags - - atm_c2_lnd = .false. - atm_c2_ocn = .false. - atm_c2_ice = .false. - atm_c2_wav = .false. - lnd_c2_atm = .false. - lnd_c2_rof = .false. - lnd_c2_glc = .false. - ocn_c2_atm = .false. - ocn_c2_ice = .false. - ocn_c2_wav = .false. - ice_c2_atm = .false. - ice_c2_ocn = .false. - ice_c2_wav = .false. - rof_c2_lnd = .false. - rof_c2_ocn = .false. - rof_c2_ice = .false. - glc_c2_lnd = .false. - glc_c2_ocn = .false. - glc_c2_ice = .false. - glcshelf_c2_ocn = .false. - glcshelf_c2_ice = .false. - wav_c2_ocn = .false. - iac_c2_atm = .false. - iac_c2_lnd = .false. - lnd_c2_iac = .false. - - if (atm_present) then - if (lnd_prognostic) atm_c2_lnd = .true. - if (ocn_prognostic) atm_c2_ocn = .true. - if (ocn_present ) atm_c2_ocn = .true. ! needed for aoflux calc if aoflux=ocn - if (ice_prognostic) atm_c2_ice = .true. - if (wav_prognostic) atm_c2_wav = .true. - endif - if (lnd_present) then - if (atm_prognostic) lnd_c2_atm = .true. - if (rof_prognostic) lnd_c2_rof = .true. - if (glc_prognostic) lnd_c2_glc = .true. - if (iac_prognostic) lnd_c2_iac = .true. - endif - if (ocn_present) then - if (atm_prognostic) ocn_c2_atm = .true. - if (atm_present ) ocn_c2_atm = .true. ! needed for aoflux calc if aoflux=atm - if (ice_prognostic) ocn_c2_ice = .true. - if (wav_prognostic) ocn_c2_wav = .true. - - endif - if (ice_present) then - if (atm_prognostic) ice_c2_atm = .true. - if (ocn_prognostic) ice_c2_ocn = .true. - if (wav_prognostic) ice_c2_wav = .true. - endif - if (rof_present) then - if (lnd_prognostic ) rof_c2_lnd = .true. - if (ocnrof_prognostic) rof_c2_ocn = .true. - if (rofice_present .and. iceberg_prognostic) rof_c2_ice = .true. - endif - if (glc_present) then - if (glclnd_present .and. lnd_prognostic) glc_c2_lnd = .true. - if (glcocn_present .and. ocn_prognostic) glc_c2_ocn = .true. - ! For now, glcshelf->ocn only activated if the ocean has activated ocn->glcshelf - if (ocn_c2_glcshelf .and. glcocn_present .and. ocn_prognostic) glcshelf_c2_ocn = .true. - ! For now, glacshelf->ice also controlled by ocean's ocn_c2_glcshelf flag - ! Note that ice also has to be prognostic for glcshelf_c2_ice to be true. - ! It is not expected that glc and ice would ever be run without ocn prognostic. - if (ocn_c2_glcshelf .and. glcice_present .and. ice_prognostic) glcshelf_c2_ice = .true. - if (glcice_present .and. iceberg_prognostic) glc_c2_ice = .true. - endif - if (wav_present) then - if (ocn_prognostic) wav_c2_ocn = .true. - endif - if (iac_present) then - if (lnd_prognostic) iac_c2_lnd = .true. - if (atm_prognostic) iac_c2_atm = .true. - endif - - !---------------------------------------------------------- - ! Set domain check and other flag - !---------------------------------------------------------- - - domain_check = .true. - if (single_column ) domain_check = .false. - if (dead_comps ) domain_check = .false. - - ! set skip_ocean_run flag, used primarily for ocn run on first timestep - ! use reading a restart as a surrogate from whether this is a startup run - - skip_ocean_run = .true. - if ( read_restart) skip_ocean_run = .false. - ocnrun_count = 0 - cpl2ocn_first = .true. - - do_histavg = .true. - if (seq_timemgr_histavg_type == seq_timemgr_type_never) then - do_histavg = .false. - endif - - !---------------------------------------------------------- - !| Write component and coupler setup information - !---------------------------------------------------------- - - if (iamroot_CPLID) then - write(logunit,* )' ' - write(logunit,F00)'After component initialization:' - write(logunit,F0L)'atm model present = ',atm_present - write(logunit,F0L)'lnd model present = ',lnd_present - write(logunit,F0L)'ocn model present = ',ocn_present - write(logunit,F0L)'ice model present = ',ice_present - write(logunit,F0L)'glc model present = ',glc_present - write(logunit,F0L)'glc/lnd present = ',glclnd_present - write(logunit,F0L)'glc/ocn present = ',glcocn_present - write(logunit,F0L)'glc/ice present = ',glcice_present - write(logunit,F0L)'rof model present = ',rof_present - write(logunit,F0L)'rof/ice present = ',rofice_present - write(logunit,F0L)'rof/flood present = ',flood_present - write(logunit,F0L)'wav model present = ',wav_present - write(logunit,F0L)'iac model present = ',iac_present - write(logunit,F0L)'esp model present = ',esp_present - - write(logunit,F0L)'atm model prognostic = ',atm_prognostic - write(logunit,F0L)'lnd model prognostic = ',lnd_prognostic - write(logunit,F0L)'ocn model prognostic = ',ocn_prognostic - write(logunit,F0L)'ice model prognostic = ',ice_prognostic - write(logunit,F0L)'iceberg prognostic = ',iceberg_prognostic - write(logunit,F0L)'glc model prognostic = ',glc_prognostic - write(logunit,F0L)'rof model prognostic = ',rof_prognostic - write(logunit,F0L)'ocn rof prognostic = ',ocnrof_prognostic - write(logunit,F0L)'wav model prognostic = ',wav_prognostic - write(logunit,F0L)'iac model prognostic = ',iac_prognostic - write(logunit,F0L)'esp model prognostic = ',esp_prognostic - - write(logunit,F0L)'atm_c2_lnd = ',atm_c2_lnd - write(logunit,F0L)'atm_c2_ocn = ',atm_c2_ocn - write(logunit,F0L)'atm_c2_ice = ',atm_c2_ice - write(logunit,F0L)'atm_c2_wav = ',atm_c2_wav - write(logunit,F0L)'lnd_c2_atm = ',lnd_c2_atm - write(logunit,F0L)'lnd_c2_rof = ',lnd_c2_rof - write(logunit,F0L)'lnd_c2_glc = ',lnd_c2_glc - write(logunit,F0L)'ocn_c2_atm = ',ocn_c2_atm - write(logunit,F0L)'ocn_c2_ice = ',ocn_c2_ice - write(logunit,F0L)'ocn_c2_glcshelf = ',ocn_c2_glcshelf - write(logunit,F0L)'ocn_c2_wav = ',ocn_c2_wav - write(logunit,F0L)'ice_c2_atm = ',ice_c2_atm - write(logunit,F0L)'ice_c2_ocn = ',ice_c2_ocn - write(logunit,F0L)'ice_c2_wav = ',ice_c2_wav - write(logunit,F0L)'rof_c2_lnd = ',rof_c2_lnd - write(logunit,F0L)'rof_c2_ocn = ',rof_c2_ocn - write(logunit,F0L)'rof_c2_ice = ',rof_c2_ice - write(logunit,F0L)'glc_c2_lnd = ',glc_c2_lnd - write(logunit,F0L)'glc_c2_ocn = ',glc_c2_ocn - write(logunit,F0L)'glc_c2_ice = ',glc_c2_ice - write(logunit,F0L)'glcshelf_c2_ocn = ',glcshelf_c2_ocn - write(logunit,F0L)'glcshelf_c2_ice = ',glcshelf_c2_ice - write(logunit,F0L)'wav_c2_ocn = ',wav_c2_ocn - write(logunit,F0L)'iac_c2_lnd = ',iac_c2_lnd - write(logunit,F0L)'iac_c2_atm = ',iac_c2_atm - - write(logunit,F0L)'dead components = ',dead_comps - write(logunit,F0L)'domain_check = ',domain_check - write(logunit,F01)'atm_nx,atm_ny = ',atm_nx,atm_ny,trim(atm_gnam) - write(logunit,F01)'lnd_nx,lnd_ny = ',lnd_nx,lnd_ny,trim(lnd_gnam) - write(logunit,F01)'rof_nx,rof_ny = ',rof_nx,rof_ny,trim(rof_gnam) - write(logunit,F01)'ice_nx,ice_ny = ',ice_nx,ice_ny,trim(ice_gnam) - write(logunit,F01)'ocn_nx,ocn_ny = ',ocn_nx,ocn_ny,trim(ocn_gnam) - write(logunit,F01)'glc_nx,glc_ny = ',glc_nx,glc_ny,trim(glc_gnam) - write(logunit,F01)'wav_nx,wav_ny = ',wav_nx,wav_ny,trim(wav_gnam) - write(logunit,F01)'iac_nx,iac_ny = ',iac_nx,iac_ny,trim(iac_gnam) - write(logunit,F0L)'samegrid_ao = ',samegrid_ao - write(logunit,F0L)'samegrid_al = ',samegrid_al - write(logunit,F0L)'samegrid_ro = ',samegrid_ro - write(logunit,F0L)'samegrid_aw = ',samegrid_aw - write(logunit,F0L)'samegrid_ow = ',samegrid_ow - write(logunit,F0L)'skip init ocean run = ',skip_ocean_run - write(logunit,F00)'cpl sequence option = ',trim(cpl_seq_option) - write(logunit,F0L)'do_histavg = ',do_histavg - write(logunit,F0L)'atm_aero = ',atm_aero - write(logunit,* )' ' - call shr_sys_flush(logunit) - endif - - !---------------------------------------------------------- - !| Present and prognostic consistency checks - !---------------------------------------------------------- - - if (atm_prognostic .and. .not.atm_present) then - call shr_sys_abort(subname//' ERROR: if prognostic atm must also have atm present') - endif - if (ocn_prognostic .and. .not.ocn_present) then - call shr_sys_abort(subname//' ERROR: if prognostic ocn must also have ocn present') - endif - if (lnd_prognostic .and. .not.lnd_present) then - call shr_sys_abort(subname//' ERROR: if prognostic lnd must also have lnd present') - endif - if (ice_prognostic .and. .not.ice_present) then - call shr_sys_abort(subname//' ERROR: if prognostic ice must also have ice present') - endif - if (iceberg_prognostic .and. .not.ice_prognostic) then - call shr_sys_abort(subname//' ERROR: if prognostic iceberg must also have ice prognostic') - endif - if (glc_prognostic .and. .not.glc_present) then - call shr_sys_abort(subname//' ERROR: if prognostic glc must also have glc present') - endif - if (rof_prognostic .and. .not.rof_present) then - call shr_sys_abort(subname//' ERROR: if prognostic rof must also have rof present') - endif - if (wav_prognostic .and. .not.wav_present) then - call shr_sys_abort(subname//' ERROR: if prognostic wav must also have wav present') - endif - if (esp_prognostic .and. .not.esp_present) then - call shr_sys_abort(subname//' ERROR: if prognostic esp must also have esp present') - endif - if (iac_prognostic .and. .not.iac_present) then - call shr_sys_abort(subname//' ERROR: if prognostic iac must also have iac present') - endif -#ifndef CPL_BYPASS - if ((ice_prognostic .or. ocn_prognostic .or. lnd_prognostic) .and. .not. atm_present) then - call shr_sys_abort(subname//' ERROR: if prognostic surface model must also have atm present') - endif -#endif - if ((glclnd_present .or. glcocn_present .or. glcice_present) .and. .not.glc_present) then - call shr_sys_abort(subname//' ERROR: if glcxxx present must also have glc present') - endif - if ((ocn_c2_glcshelf .and. .not. glcshelf_c2_ocn) .or. (glcshelf_c2_ocn .and. .not. ocn_c2_glcshelf)) then - ! Current logic will not allow this to be true, but future changes could make it so, which may be nonsensical - call shr_sys_abort(subname//' ERROR: if glc_c2_ocn must also have ocn_c2_glc and vice versa. '//& - 'Boundary layer fluxes calculated in coupler require input from both components.') - endif - if (rofice_present .and. .not.rof_present) then - call shr_sys_abort(subname//' ERROR: if rofice present must also have rof present') - endif - if (ocnrof_prognostic .and. .not.rof_present) then - if (iamroot_CPLID) then - write(logunit,F00) 'WARNING: ocnrof_prognostic is TRUE but rof_present is FALSE' - call shr_sys_flush(logunit) - endif - endif - - !---------------------------------------------------------- - !| Samegrid checks - !---------------------------------------------------------- - - if (.not. samegrid_oi) then - call shr_sys_abort(subname//' ERROR: samegrid_oi is false') - endif - - !---------------------------------------------------------- - !| Check instances of prognostic components - !---------------------------------------------------------- - - if (atm_prognostic .and. num_inst_atm /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: atm_prognostic but num_inst_atm not num_inst_max') - if (lnd_prognostic .and. num_inst_lnd /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: lnd_prognostic but num_inst_lnd not num_inst_max') - if (ocn_prognostic .and. (num_inst_ocn /= num_inst_max .and. num_inst_ocn /= 1)) & - call shr_sys_abort(subname//' ERROR: ocn_prognostic but num_inst_ocn not 1 or num_inst_max') - if (ice_prognostic .and. num_inst_ice /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: ice_prognostic but num_inst_ice not num_inst_max') - if (glc_prognostic .and. num_inst_glc /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: glc_prognostic but num_inst_glc not num_inst_max') - if (rof_prognostic .and. num_inst_rof /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: rof_prognostic but num_inst_rof not num_inst_max') - if (wav_prognostic .and. num_inst_wav /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: wav_prognostic but num_inst_wav not num_inst_max') - if (iac_prognostic .and. num_inst_iac /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: iac_prognostic but num_inst_iac not num_inst_max') - - !---------------------------------------------------------- - !| Initialize attribute vectors for prep_c2C_init_avs routines and fractions - !| Initialize mapping between components - !---------------------------------------------------------- - - if (iamin_CPLID) then - - call t_startf('CPL:init_maps') - call t_adj_detailf(+2) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call prep_atm_init(infodata, ocn_c2_atm, ice_c2_atm, lnd_c2_atm, iac_c2_lnd) - - call prep_lnd_init(infodata, atm_c2_lnd, rof_c2_lnd, glc_c2_lnd, iac_c2_lnd) - - call prep_ocn_init(infodata, atm_c2_ocn, atm_c2_ice, ice_c2_ocn, rof_c2_ocn, wav_c2_ocn, glc_c2_ocn, glcshelf_c2_ocn) - - call prep_ice_init(infodata, ocn_c2_ice, glc_c2_ice, glcshelf_c2_ice, rof_c2_ice ) - - call prep_rof_init(infodata, lnd_c2_rof) - - call prep_glc_init(infodata, lnd_c2_glc, ocn_c2_glcshelf) - - call prep_wav_init(infodata, atm_c2_wav, ocn_c2_wav, ice_c2_wav) - - call prep_iac_init(infodata, lnd_c2_iac) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_adj_detailf(-2) - call t_stopf('CPL:init_maps') - - endif - - !---------------------------------------------------------- - !| Update aream in domains where appropriate - !---------------------------------------------------------- - - if (iamin_CPLID) then - call t_startf ('CPL:init_aream') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_init_aream(infodata, rof_c2_ocn, samegrid_ao, samegrid_al, & - samegrid_ro, samegrid_lg) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_aream') - endif ! iamin_CPLID - - !---------------------------------------------------------- - !| Check domains - ! This must be done after the mappers are initialized since - ! checking is done on each processor and not with a global gather - !---------------------------------------------------------- - - if (iamin_CPLID) then - call t_startf ('CPL:init_domain_check') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (domain_check) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Performing domain checking' - call shr_sys_flush(logunit) - endif - - call seq_domain_check( infodata, & - atm(ens1), ice(ens1), lnd(ens1), ocn(ens1), rof(ens1), glc(ens1), & - samegrid_al, samegrid_ao, samegrid_ro, samegrid_lg) - - endif - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_domain_check') - endif ! iamin_CPLID - - !---------------------------------------------------------- - !| Initialize area corrections based on aream (read in map_init) and area - !| Area correct component initialization output fields - !| Map initial component AVs from component to coupler pes - !---------------------------------------------------------- - - areafact_samegrid = .false. -#if (defined E3SM_SCM_REPLAY ) - if (.not.samegrid_alo) then - call shr_sys_abort(subname//' ERROR: samegrid_alo is false - Must run with same atm/ocn/lnd grids when configured for scam iop') - else - areafact_samegrid = .true. - endif -#endif - if (single_column) areafact_samegrid = .true. - -#ifdef COMPARE_TO_NUOPC - areafact_samegrid = .true. -#endif - - call t_startf ('CPL:init_areacor') - call t_adj_detailf(+2) - - call mpi_barrier(mpicom_GLOID,ierr) - if (atm_present) call component_init_areacor(atm, areafact_samegrid, seq_flds_a2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (lnd_present) call component_init_areacor(lnd, areafact_samegrid, seq_flds_l2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (rof_present) call component_init_areacor(rof, areafact_samegrid, seq_flds_r2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (ocn_present) call component_init_areacor(ocn, areafact_samegrid, seq_flds_o2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (ice_present) call component_init_areacor(ice, areafact_samegrid, seq_flds_i2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (glc_present) call component_init_areacor(glc, areafact_samegrid, seq_flds_g2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (wav_present) call component_init_areacor(wav, areafact_samegrid, seq_flds_w2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (iac_present) call component_init_areacor(iac, areafact_samegrid, seq_flds_z2x_fluxes) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_areacor') - - !---------------------------------------------------------- - !| global sum diagnostics for IC data - !---------------------------------------------------------- - - if (iamin_CPLID .and. info_debug > 1) then - call t_startf ('CPL:init_diag') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (atm_present) then - call component_diag(infodata, atm, flow='c2x', comment='recv IC atm', & - info_debug=info_debug) - endif - if (ice_present) then - call component_diag(infodata, ice, flow='c2x', comment='recv IC ice', & - info_debug=info_debug) - endif - if (lnd_present) then - call component_diag(infodata, lnd, flow='c2x', comment='recv IC lnd', & - info_debug=info_debug) - endif - if (rof_present) then - call component_diag(infodata, rof, flow='c2x', comment='recv IC rof', & - info_debug=info_debug) - endif - if (ocn_present) then - call component_diag(infodata, ocn, flow='c2x', comment='recv IC ocn', & - info_debug=info_debug) - endif - if (glc_present) then - call component_diag(infodata, glc, flow='c2x', comment='recv IC glc', & - info_debug=info_debug) - endif - if (wav_present) then - call component_diag(infodata, wav, flow='c2x', comment='recv IC wav', & - info_debug=info_debug) - endif - if (iac_present) then - call component_diag(infodata, iac, flow='c2x', comment='recv IC iac', & - info_debug=info_debug) - endif - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_diag') - endif - - !---------------------------------------------------------- - !| Initialize fractions - !---------------------------------------------------------- - - if (iamin_CPLID) then - call t_startf ('CPL:init_fracs') - call t_adj_detailf(+2) - - allocate(fractions_ax(num_inst_frc)) - allocate(fractions_lx(num_inst_frc)) - allocate(fractions_ox(num_inst_frc)) - allocate(fractions_ix(num_inst_frc)) - allocate(fractions_gx(num_inst_frc)) - allocate(fractions_rx(num_inst_frc)) - allocate(fractions_wx(num_inst_frc)) - allocate(fractions_zx(num_inst_frc)) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - do efi = 1,num_inst_frc - eii = mod((efi-1),num_inst_ice) + 1 - - if (iamroot_CPLID) then - write(logunit,*) ' ' - if (efi == 1) write(logunit,F00) 'Initializing fractions' - endif - - call seq_frac_init(infodata, & - atm(ens1), ice(ens1), lnd(ens1), & - ocn(ens1), glc(ens1), rof(ens1), & - wav(ens1), iac(ens1), & - fractions_ax(efi), fractions_ix(efi), fractions_lx(efi), & - fractions_ox(efi), fractions_gx(efi), fractions_rx(efi), & - fractions_wx(efi), fractions_zx(efi)) - - if (iamroot_CPLID) then - write(logunit,*) ' ' - if (efi == 1) write(logunit,F00) 'Setting fractions' - endif - - call seq_frac_set(infodata, ice(eii), & - fractions_ax(efi), fractions_ix(efi), fractions_ox(efi)) - - enddo - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_fracs') - endif - - !---------------------------------------------------------- - !| Initialize prep_aoflux_mod module variables - !---------------------------------------------------------- - - if (iamin_CPLID) then - call prep_aoflux_init(infodata, fractions_ox, fractions_ax) - endif - - !---------------------------------------------------------- - !| Initialize atm/ocn flux component and compute ocean albedos - !---------------------------------------------------------- - - if (iamin_CPLID) then - if (ocn_present) then - call t_startf ('CPL:init_aoflux') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing atm/ocn flux component' - endif - - if (trim(aoflux_grid) == 'ocn') then - - call seq_flux_init_mct(ocn(ens1), fractions_ox(ens1)) - - elseif (trim(aoflux_grid) == 'atm') then - - call seq_flux_init_mct(atm(ens1), fractions_ax(ens1)) - - elseif (trim(aoflux_grid) == 'exch') then - - call shr_sys_abort(subname//' aoflux_grid = exch not validated') - call seq_flux_initexch_mct(atm(ens1), ocn(ens1), mpicom_cplid, cplid) - - else - call shr_sys_abort(subname//' aoflux_grid = '//trim(aoflux_grid)//' not available') - - endif - - do exi = 1,num_inst_xao - !tcx is this correct? relation between xao and frc for ifrad and ofrad - efi = mod((exi-1),num_inst_frc) + 1 - eai = mod((exi-1),num_inst_atm) + 1 - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - a2x_ox => prep_ocn_get_a2x_ox() - call seq_flux_ocnalb_mct(infodata, ocn(1), a2x_ox(eai), fractions_ox(efi), xao_ox(exi)) - enddo - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_aoflux') - endif - endif - - !---------------------------------------------------------- - !| ATM PREP for recalculation of initial solar - ! Note that ocean albedos are ALWAYS CALCULATED on the ocean grid - ! If aoflux_grid = 'ocn' , xao_ox is input for atm/ocn fluxes and xao_ax is output - ! If aoflux_grid = 'atm' , xao_ax is input for atm/ocn fluxes and xao_ox is not used - ! If aoflux_grid = 'exch', xao_ax is input for atm/ocn /fluxes and xao_ox is not used - ! Merge atmosphere input state and run atmospheric radiation - !---------------------------------------------------------- - - if (atm_prognostic) then - if (iamin_CPLID) then - - if (lnd_present) then - ! Get lnd output on atm grid - call prep_atm_calc_l2x_ax(fractions_lx, timer='CPL:init_atminit') - endif - - if (ice_present) then - ! Get ice output on atm grid - call prep_atm_calc_i2x_ax(fractions_ix, timer='CPL:init_atminit') - endif - - if (ocn_present) then - ! Get ocn output on atm grid - call prep_atm_calc_o2x_ax(fractions_ox, timer='CPL:init_atminit') - endif - - if (ocn_present) then - ! Get albedos on atm grid - call prep_aoflux_calc_xao_ax(fractions_ox, flds='albedos', timer='CPL:init_atminit') - - ! Get atm/ocn fluxes on atm grid - if (trim(aoflux_grid) == 'ocn') then - call prep_aoflux_calc_xao_ax(fractions_ox, flds='states_and_fluxes', & - timer='CPL:init_atminit') - endif - endif - - if (lnd_present .or. ocn_present) then - ! Merge input to atmosphere on coupler pes - xao_ax => prep_aoflux_get_xao_ax() - if (associated(xao_ax)) then - call prep_atm_mrg(infodata, & - fractions_ax=fractions_ax, xao_ax=xao_ax, timer_mrg='CPL:init_atminit') - endif - endif - - call component_diag(infodata, atm, flow='x2c', comment='send atm', info_debug=info_debug) - - endif - - endif ! atm_prognostic - - !---------------------------------------------------------- - !| Second phase of atmosphere component initialization - ! Recalculate solar based on input albedo's from surface components. - ! Data or dead atmosphere may just return on this phase. - !---------------------------------------------------------- - - if (atm_prognostic) then - - call t_startf('CPL:comp_init_cc_atm2') - call t_adj_detailf(+2) - - if (iamroot_CPLID) then - write(logunit,F00) 'Calling atm_init_mct phase 2' - endif - - ! Send atm input data from coupler pes to atm pes - if (atm_prognostic) then - call component_exch(atm, flow='x2c', infodata=infodata, & - infodata_string='cpl2atm_init') - endif - - ! Set atm init phase to 2 for all atm instances on component instance pes - do eai = 1,num_inst_atm - if (component_get_iamin_compid(atm(eai))) then - call seq_infodata_putData(infodata, atm_phase=2) - endif - enddo - - ! Run atm_init_mct with init phase of 2 - call component_init_cc(Eclock_a, atm, atm_init, & - infodata, NLFilename, & - seq_flds_x2c_fluxes=seq_flds_x2a_fluxes, & - seq_flds_c2x_fluxes=seq_flds_a2x_fluxes) - - ! Map atm output data from atm pes to cpl pes - call component_exch(atm, flow='c2x', infodata=infodata, & - infodata_string='atm2cpl_init') - - if (iamin_CPLID) then - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - call component_diag(infodata, atm, flow='c2x', comment= 'recv IC2 atm', & - info_debug=info_debug) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_atm2') - endif ! atm present - - !---------------------------------------------------------- - !| Get time manager's index for driver - !---------------------------------------------------------- - drv_index = seq_timemgr_pause_component_index('drv') - - !---------------------------------------------------------- - !| Read driver restart file, overwrite anything previously sent or computed - !---------------------------------------------------------- - - call t_startf('CPL:init_readrestart') - call t_adj_detailf(+2) - - call seq_diag_zero_mct(mode='all') - if (read_restart .and. iamin_CPLID) then - call seq_rest_read(rest_file, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, fractions_zx) - endif - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_readrestart') - - !---------------------------------------------------------- - !| Map initial r2x_rx and g2x_gx to _ox, _ix and _lx - !---------------------------------------------------------- - - if (iamin_CPLID ) then - if (rof_c2_ocn) then - call prep_ocn_calc_r2x_ox(timer='CPL:init_rof2ocn') - endif - if (glc_c2_ocn) then - call prep_ocn_calc_g2x_ox(timer='CPL:init_glc2ocn') - endif - - if (glcshelf_c2_ocn) then - call prep_ocn_shelf_calc_g2x_ox(timer='CPL:init_glc2ocn_shelf') - endif - - if (rof_c2_ice) then - call prep_ice_calc_r2x_ix(timer='CPL:init_rof2ice') - endif - if (glc_c2_ice) then - call prep_ice_calc_g2x_ix(timer='CPL:init_glc2ice') - endif - - if (glcshelf_c2_ice) then - call prep_ice_shelf_calc_g2x_ix(timer='CPL:init_glc2ice_shelf') - endif - - if (rof_c2_lnd) then - call prep_lnd_calc_r2x_lx(timer='CPL:init_rof2lnd') - endif - if (glc_c2_lnd) then - call prep_lnd_calc_g2x_lx(timer='CPL:init_gllndnd') - endif - endif - - !---------------------------------------------------------- - !| Write histinit output file - !---------------------------------------------------------- - - if (do_histinit) then - if (iamin_CPLID) then - call t_startf('CPL:init_histinit') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd, curr_tod=tod ) - write(logunit,104) ' Write history file at ',ymd,tod - call shr_sys_flush(logunit) - endif - call seq_hist_write(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, fractions_zx, trim(cpl_inst_tag)) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf('CPL:init_histinit') - endif - endif - - if (iamroot_CPLID )then - write(logunit,*) ' ' - write(logunit,F00) 'Model initialization complete ' - write(logunit,*) ' ' - call shr_sys_flush(logunit) - endif - - call t_adj_detailf(-1) - call t_stopf('CPL:cime_init') - - end subroutine cime_init - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_run() - use shr_string_mod, only: shr_string_listGetIndexF - use seq_comm_mct, only: atm_layout, lnd_layout, ice_layout - use seq_comm_mct, only: glc_layout, rof_layout, ocn_layout - use seq_comm_mct, only: wav_layout, esp_layout, iac_layout, num_inst_driver - use seq_comm_mct, only: seq_comm_inst - use seq_pauseresume_mod, only: seq_resume_store_comp, seq_resume_get_files - use seq_pauseresume_mod, only: seq_resume_free - - ! gptl timer lookup variables - integer, parameter :: hashcnt=7 - integer :: hashint(hashcnt) - ! Driver pause/resume - logical :: drv_pause ! Driver writes pause restart file - character(len=CL) :: drv_resume ! Driver resets state from restart file - character(len=CL), pointer :: resume_files(:) ! Component resume files - - type(ESMF_Time) :: etime_curr ! Current model time - real(r8) :: tbnds1_offset ! Time offset for call to seq_hist_writeaux - logical :: lnd2glc_averaged_now ! Whether lnd2glc averages were taken this timestep - -101 format( A, i10.8, i8, 12A, A, F8.2, A, F8.2 ) -102 format( A, i10.8, i8, A, 8L3 ) -103 format( 5A ) -104 format( A, i10.8, i8) -105 format( A, i10.8, i8, A, f10.2, A, f10.2, A, A, i5, A, A) -108 format( A, f10.2, A, i8.8) -109 format( A, 2f10.3) - - hashint = 0 - - call seq_infodata_putData(infodata,atm_phase=1,lnd_phase=1,ocn_phase=1,ice_phase=1) - call seq_timemgr_EClockGetData( EClock_d, stepno=begstep) - call seq_timemgr_EClockGetData( EClock_d, dtime=dtime) - call seq_timemgr_EClockGetData( EClock_d, calendar=calendar) - ncpl = 86400/dtime - cktime_acc = 0._r8 - cktime_cnt = 0 - stop_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_stop) - if (seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_datestop)) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,103) subname,' NOTE: Stopping from alarm STOP DATE' - write(logunit,*) ' ' - endif - stop_alarm = .true. - endif - force_stop = .false. - force_stop_ymd = -1 - force_stop_tod = -1 - - ! --- Write out performance data for initialization - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd, curr_tod=tod) - write(timing_file,'(a,i8.8,a1,i5.5)') & - trim(tchkpt_dir)//"/model_timing"//trim(cpl_inst_tag)//"_",ymd,"_",tod - - call t_set_prefixf("CPL:INIT_") - call cime_write_performance_checkpoint(output_perf,timing_file,mpicom_GLOID) - call t_unset_prefixf() - - !|---------------------------------------------------------- - !| Beginning of driver time step loop - !|---------------------------------------------------------- - - call t_startf ('CPL:RUN_LOOP_BSTART') - call mpi_barrier(mpicom_GLOID,ierr) - call t_stopf ('CPL:RUN_LOOP_BSTART') - Time_begin = mpi_wtime() - Time_bstep = mpi_wtime() - do while ( .not. stop_alarm) - - call t_startf('CPL:RUN_LOOP', hashint(1)) - call t_startf('CPL:CLOCK_ADVANCE') - - !---------------------------------------------------------- - !| Advance Clock - ! (this is time that models should have before they return - ! to the driver). Write timestamp and run alarm status - !---------------------------------------------------------- - - call seq_timemgr_clockAdvance( seq_SyncClock, force_stop, force_stop_ymd, force_stop_tod) - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd, curr_tod=tod) - call shr_cal_date2ymd(ymd,year,month,day) - stop_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_stop) - atmrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_atmrun) - lndrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_lndrun) - rofrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_rofrun) - icerun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_icerun) - glcrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_glcrun) - wavrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_wavrun) - esprun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_esprun) - ocnrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_ocnrun) - ocnnext_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_ocnnext) - iacrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_iacrun) - restart_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_restart) - history_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_history) - histavg_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_histavg) - tprof_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_tprof) - barrier_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_barrier) - pause_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_pause) - - ! Does the driver need to pause? - drv_pause = pause_alarm .and. seq_timemgr_pause_component_active(drv_index) - - if (glc_prognostic) then - ! Is it time to average fields to pass to glc? - ! - ! Note that the glcrun_avg_alarm just controls what is passed to glc in terms - ! of averaged fields - it does NOT control when glc is called currently - - ! glc will be called on the glcrun_alarm setting - but it might not be passed relevant - ! info if the time averaging period to accumulate information passed to glc is greater - ! than the glcrun interval - glcrun_avg_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_glcrun_avg) - if (glcrun_avg_alarm .and. .not. glcrun_alarm) then - write(logunit,*) 'ERROR: glcrun_avg_alarm is true, but glcrun_alarm is false' - write(logunit,*) 'Make sure that NCPL_BASE_PERIOD, GLC_NCPL and GLC_AVG_PERIOD' - write(logunit,*) 'are set so that glc averaging only happens at glc coupling times.' - write(logunit,*) '(It is allowable for glc coupling to be more frequent than glc averaging,' - write(logunit,*) 'but not for glc averaging to be more frequent than glc coupling.)' - call shr_sys_abort(subname//' glcrun_avg_alarm is true, but glcrun_alarm is false') - end if - else - ! glcrun_avg_alarm shouldn't matter in this case - glcrun_avg_alarm = .false. - end if - - ! this probably belongs in seq_timemgr somewhere using proper clocks - t1hr_alarm = .false. - t2hr_alarm = .false. - t3hr_alarm = .false. - t6hr_alarm = .false. - t12hr_alarm = .false. - t24hr_alarm = .false. - t1yr_alarm = .false. - if (mod(tod, 3600) == 0) t1hr_alarm = .true. - if (mod(tod, 7200) == 0) t2hr_alarm = .true. - if (mod(tod,10800) == 0) t3hr_alarm = .true. - if (mod(tod,21600) == 0) t6hr_alarm = .true. - if (mod(tod,43200) == 0) t12hr_alarm = .true. - if (tod == 0) t24hr_alarm = .true. - if (month==1 .and. day==1 .and. tod==0) t1yr_alarm = .true. - - lnd2glc_averaged_now = .false. - - if (seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_datestop)) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,103) subname,' NOTE: Stopping from alarm STOP DATE' - write(logunit,*) ' ' - endif - stop_alarm = .true. - endif - - ! update the orbital data as needed - if (trim(orb_mode) == trim(seq_infodata_orb_variable_year)) then - orb_nyear = orb_iyear + (year - orb_iyear_align) - if (orb_nyear /= orb_cyear) then - orb_cyear = orb_nyear - call shr_orb_params(orb_cyear, orb_eccen, orb_obliq, orb_mvelp, & - orb_obliqr, orb_lambm0, orb_mvelpp, iamroot_CPLID) - call seq_infodata_putData(infodata,orb_eccen=orb_eccen,orb_obliqr=orb_obliqr, & - orb_lambm0=orb_lambm0,orb_mvelpp=orb_mvelpp) - endif - endif - - ! override ocnrun_alarm and ocnnext_alarm for first ocn run - ! skip_ocean_run is initialized above to true if it's a startup - ! if it's not a startup, ignore all of this - ! stop the overide on the second ocnrun_alarm - - if (ocnrun_alarm) ocnrun_count = ocnrun_count + 1 - if (ocnrun_count > 1) skip_ocean_run = .false. - if (skip_ocean_run) then - ocnrun_alarm = .false. - ocnnext_alarm = .false. - endif - - if (iamroot_CPLID) then - if (loglevel > 1) then - write(logunit,102) ' Alarm_state: model date = ',ymd,tod, & - ' aliogrw run alarms = ', atmrun_alarm, lndrun_alarm, & - icerun_alarm, ocnrun_alarm, glcrun_alarm, & - rofrun_alarm, wavrun_alarm, esprun_alarm, iacrun_alarm - write(logunit,102) ' Alarm_state: model date = ',ymd,tod, & - ' 1.2.3.6.12.24 run alarms = ', t1hr_alarm, t2hr_alarm, & - t3hr_alarm, t6hr_alarm, t12hr_alarm, t24hr_alarm - call shr_sys_flush(logunit) - endif - endif - - call t_stopf ('CPL:CLOCK_ADVANCE') - - !---------------------------------------------------------- - !| IAC SETUP-SEND - !---------------------------------------------------------- - if (iac_present .and. iacrun_alarm) then - call cime_run_iac_setup_send() - endif - - !---------------------------------------------------------- - !| MAP ATM to OCN - ! Set a2x_ox as a module variable in prep_ocn_mod - ! This will be used later in the ice prep and in the - ! atm/ocn flux calculation - !---------------------------------------------------------- - if (iamin_CPLID .and. (atm_c2_ocn .or. atm_c2_ice)) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:OCNPRE1_BARRIER') - call t_drvstartf ('CPL:OCNPRE1',cplrun=.true.,barrier=mpicom_CPLID,hashint=hashint(3)) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call prep_ocn_calc_a2x_ox(timer='CPL:ocnpre1_atm2ocn') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:OCNPRE1',cplrun=.true.,hashint=hashint(3)) - endif - - !---------------------------------------------------------- - !| ATM/OCN SETUP (rasm_option1) - !---------------------------------------------------------- - ! The following maps to the ocean, computes atm/ocn fluxes, merges to the ocean, - ! accumulates ocn input and computes ocean albedos - if (ocn_present) then - if (trim(cpl_seq_option) == 'RASM_OPTION1') then - call cime_run_atmocn_setup(hashint) - end if - endif - - !---------------------------------------------------------- - !| OCN SETUP-SEND (cesm1_mod, cesm1_mod_tight, or rasm_option1) - !---------------------------------------------------------- - if (ocn_present .and. ocnrun_alarm) then - if (trim(cpl_seq_option) == 'CESM1_MOD' .or. & - trim(cpl_seq_option) == 'CESM1_MOD_TIGHT' .or. & - trim(cpl_seq_option) == 'NUOPC_TIGHT' .or. & - trim(cpl_seq_option) == 'RASM_OPTION1') then - call cime_run_ocn_setup_send() - end if - endif - - !---------------------------------------------------------- - !| LND SETUP-SEND - !---------------------------------------------------------- - if (lnd_present .and. lndrun_alarm) then - call cime_run_lnd_setup_send() - endif - - !---------------------------------------------------------- - !| ICE SETUP-SEND - !---------------------------------------------------------- - if (ice_present .and. icerun_alarm) then - call cime_run_ice_setup_send() - endif - - !---------------------------------------------------------- - !| WAV SETUP-SEND - !---------------------------------------------------------- - if (wav_present .and. wavrun_alarm) then - call cime_run_wav_setup_send() - endif - - !---------------------------------------------------------- - !| ROF SETUP-SEND - !---------------------------------------------------------- - if (rof_present .and. rofrun_alarm) then - call cime_run_rof_setup_send() - endif - - !---------------------------------------------------------- - !| RUN IAC MODEL - !---------------------------------------------------------- - if (iac_present .and. iacrun_alarm) then - call component_run(Eclock_z, iac, iac_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2z_fluxes, & - seq_flds_c2x_fluxes=seq_flds_z2x_fluxes, & - comp_prognostic=iac_prognostic, comp_num=comp_num_iac, & - timer_barrier= 'CPL:IAC_RUN_BARRIER', timer_comp_run='CPL:IAC_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=iac_layout) - endif - - !---------------------------------------------------------- - !| RUN ICE MODEL - !---------------------------------------------------------- - if (ice_present .and. icerun_alarm) then - call component_run(Eclock_i, ice, ice_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2i_fluxes, & - seq_flds_c2x_fluxes=seq_flds_i2x_fluxes, & - comp_prognostic=ice_prognostic, comp_num=comp_num_ice, & - timer_barrier= 'CPL:ICE_RUN_BARRIER', timer_comp_run='CPL:ICE_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=ice_layout) - endif - - !---------------------------------------------------------- - !| RUN LND MODEL - !---------------------------------------------------------- - if (lnd_present .and. lndrun_alarm) then - call component_run(Eclock_l, lnd, lnd_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2l_fluxes, & - seq_flds_c2x_fluxes=seq_flds_l2x_fluxes, & - comp_prognostic=lnd_prognostic, comp_num=comp_num_lnd, & - timer_barrier= 'CPL:LND_RUN_BARRIER', timer_comp_run='CPL:LND_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=lnd_layout) - endif - - !---------------------------------------------------------- - !| RUN ROF MODEL - !---------------------------------------------------------- - if (rof_present .and. rofrun_alarm) then - call component_run(Eclock_r, rof, rof_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2r_fluxes, & - seq_flds_c2x_fluxes=seq_flds_r2x_fluxes, & - comp_prognostic=rof_prognostic, comp_num=comp_num_rof, & - timer_barrier= 'CPL:ROF_RUN_BARRIER', timer_comp_run='CPL:ROF_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=rof_layout) - endif - - !---------------------------------------------------------- - !| RUN WAV MODEL - !---------------------------------------------------------- - if (wav_present .and. wavrun_alarm) then - call component_run(Eclock_w, wav, wav_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2w_fluxes, & - seq_flds_c2x_fluxes=seq_flds_w2x_fluxes, & - comp_prognostic=wav_prognostic, comp_num=comp_num_wav, & - timer_barrier= 'CPL:WAV_RUN_BARRIER', timer_comp_run='CPL:WAV_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=wav_layout) - endif - - !---------------------------------------------------------- - !| RUN OCN MODEL (cesm1_mod_tight, nuopc_tight) - !---------------------------------------------------------- - if (ocn_present .and. ocnrun_alarm) then - if (trim(cpl_seq_option) == 'CESM1_MOD_TIGHT' .or. trim(cpl_seq_option) == 'NUOPC_TIGHT') then - call component_run(Eclock_o, ocn, ocn_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2o_fluxes, & - seq_flds_c2x_fluxes=seq_flds_o2x_fluxes, & - comp_prognostic=ocn_prognostic, comp_num=comp_num_ocn, & - timer_barrier= 'CPL:OCNT_RUN_BARRIER', timer_comp_run='CPL:OCNT_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=ocn_layout) - endif - end if - - !---------------------------------------------------------- - !| IAC RECV-POST - !---------------------------------------------------------- - if (iac_present .and. iacrun_alarm) then - call cime_run_iac_recv_post() - endif - - !---------------------------------------------------------- - !| OCN RECV-POST (cesm1_mod_tight, nuopc_tight) - !---------------------------------------------------------- - if (ocn_present .and. ocnnext_alarm) then - if (trim(cpl_seq_option) == 'CESM1_MOD_TIGHT' .or. trim(cpl_seq_option) == 'NUOPC_TIGHT') then - call cime_run_ocn_recv_post() - endif - end if - - !---------------------------------------------------------- - !| ATM/OCN SETUP (cesm1_mod or cesm1_mod_tight) - !---------------------------------------------------------- - ! The following maps to the ocean, computes atm/ocn fluxes, merges to the ocean, - ! accumulates ocn input and computes ocean albedos - if (ocn_present) then - if (trim(cpl_seq_option) == 'CESM1_MOD' .or. & - trim(cpl_seq_option) == 'CESM1_MOD_TIGHT' .or. & - trim(cpl_seq_option) == 'NUOPC' .or. & - trim(cpl_seq_option) == 'NUOPC_TIGHT' ) then - call cime_run_atmocn_setup(hashint) - end if - endif - - !---------------------------------------------------------- - !| LND RECV-POST - !---------------------------------------------------------- - if (lnd_present .and. lndrun_alarm) then - call cime_run_lnd_recv_post() - endif - - !---------------------------------------------------------- - !| GLC SETUP-SEND - !---------------------------------------------------------- - if (glc_present .and. glcrun_alarm) then - call cime_run_glc_setup_send(lnd2glc_averaged_now) - endif - - !---------------------------------------------------------- - !| ROF RECV-POST - !---------------------------------------------------------- - if (rof_present .and. rofrun_alarm) then - call cime_run_rof_recv_post() - endif - if (rof_present) then - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='DRIVER_ROFPOST_BARRIER') - call t_drvstartf ('DRIVER_ROFPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (do_hist_r2x) then - call t_drvstartf ('driver_rofpost_histaux', barrier=mpicom_CPLID) - ! Write coupler's hr2x file at 24 hour marks, - ! and at the end of the run interval, even if that's not at a 24 hour mark. - write_hist_alarm = t24hr_alarm .or. stop_alarm - do eri = 1,num_inst_rof - inst_suffix = component_get_suffix(rof(eri)) - call seq_hist_writeaux(infodata, EClock_d, rof(eri), flow='c2x', & - aname='r2x',dname='domrb',inst_suffix=trim(inst_suffix), & - nx=rof_nx, ny=rof_ny, nt=1, write_now=write_hist_alarm) - enddo - call t_drvstopf ('driver_rofpost_histaux') - endif - call t_drvstopf ('DRIVER_ROFPOST', cplrun=.true.) - endif - endif - !---------------------------------------------------------- - !| Budget with old fractions - !---------------------------------------------------------- - if (do_budgets) then - call cime_run_calc_budgets1() - endif - - !---------------------------------------------------------- - !| ICE RECV-POST - !---------------------------------------------------------- - if (ice_present .and. icerun_alarm) then - call cime_run_ice_recv_post() - endif - - !---------------------------------------------------------- - !| Update fractions based on new ice fractions - !---------------------------------------------------------- - call cime_run_update_fractions() - - !---------------------------------------------------------- - !| ATM/OCN SETUP (rasm_option2) - !---------------------------------------------------------- - ! The following maps to the ocean, computes atm/ocn fluxes, merges to the ocean, - ! accumulates ocn input and computes ocean albedos - if (ocn_present) then - if (trim(cpl_seq_option) == 'RASM_OPTION2') then - call cime_run_atmocn_setup(hashint) - end if - endif - - !---------------------------------------------------------- - !| OCN SETUP-SEND (rasm_option2) - !---------------------------------------------------------- - if (ocn_present .and. ocnrun_alarm) then - if (trim(cpl_seq_option) == 'RASM_OPTION2') then - call cime_run_ocn_setup_send() - end if - endif - - !---------------------------------------------------------- - !| ATM SETUP-SEND - !---------------------------------------------------------- - if (atm_present .and. atmrun_alarm) then - call cime_run_atm_setup_send() - endif - - !---------------------------------------------------------- - !| RUN OCN MODEL (NOT cesm1_mod_tight or nuopc_tight) - !---------------------------------------------------------- - if (ocn_present .and. ocnrun_alarm) then - if (trim(cpl_seq_option) == 'CESM1_MOD' .or. & - trim(cpl_seq_option) == 'RASM_OPTION1' .or. & - trim(cpl_seq_option) == 'RASM_OPTION2' .or. & - trim(cpl_seq_option) == 'NUOPC') then - call component_run(Eclock_o, ocn, ocn_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2o_fluxes, & - seq_flds_c2x_fluxes=seq_flds_o2x_fluxes, & - comp_prognostic=ocn_prognostic, comp_num=comp_num_ocn, & - timer_barrier= 'CPL:OCN_RUN_BARRIER', timer_comp_run='CPL:OCN_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=ocn_layout) - endif - end if - - !---------------------------------------------------------- - !| RUN ATM MODEL - !---------------------------------------------------------- - if (atm_present .and. atmrun_alarm) then - call component_run(Eclock_a, atm, atm_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2a_fluxes, & - seq_flds_c2x_fluxes=seq_flds_a2x_fluxes, & - comp_prognostic=atm_prognostic, comp_num=comp_num_atm, & - timer_barrier= 'CPL:ATM_RUN_BARRIER', timer_comp_run='CPL:ATM_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod, comp_layout=atm_layout) - endif - - !---------------------------------------------------------- - !| RUN GLC MODEL - !---------------------------------------------------------- - if (glc_present .and. glcrun_alarm) then - call component_run(Eclock_g, glc, glc_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2g_fluxes, & - seq_flds_c2x_fluxes=seq_flds_g2x_fluxes, & - comp_prognostic=glc_prognostic, comp_num=comp_num_glc, & - timer_barrier= 'CPL:GLC_RUN_BARRIER', timer_comp_run='CPL:GLC_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=glc_layout) - endif - - !---------------------------------------------------------- - !| WAV RECV-POST - !---------------------------------------------------------- - if (wav_present .and. wavrun_alarm) then - call cime_run_wav_recv_post() - endif - - !---------------------------------------------------------- - !| GLC RECV-POST - !---------------------------------------------------------- - if (glc_present .and. glcrun_alarm) then - call cime_run_glc_recv_post() - endif - - !---------------------------------------------------------- - !| ATM RECV-POST - !---------------------------------------------------------- - if (atm_present .and. atmrun_alarm) then - call cime_run_atm_recv_post - endif - - !---------------------------------------------------------- - !| Budget with new fractions - !---------------------------------------------------------- - if (do_budgets) then - call cime_run_calc_budgets2() - endif - - !---------------------------------------------------------- - !| OCN RECV-POST (NOT cesm1_mod_tight or nuopc_tight) - !---------------------------------------------------------- - if (ocn_present .and. ocnnext_alarm) then - if (trim(cpl_seq_option) == 'CESM1_MOD' .or. & - trim(cpl_seq_option) == 'RASM_OPTION1' .or. & - trim(cpl_seq_option) == 'RASM_OPTION2' .or. & - trim(cpl_seq_option) == 'NUOPC') then - call cime_run_ocn_recv_post() - end if - end if - - !---------------------------------------------------------- - !| Write driver restart file - !---------------------------------------------------------- - call cime_run_write_restart(drv_pause, restart_alarm, drv_resume) - - !---------------------------------------------------------- - !| Write history file, only AVs on CPLID - !---------------------------------------------------------- - call cime_run_write_history() - - if (iamin_CPLID) then - - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:HISTORY_BARRIER') - call t_drvstartf ('CPL:HISTORY',cplrun=.true.,barrier=mpicom_CPLID) - if ( history_alarm) then - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - write(logunit,104) ' Write history file at ',ymd,tod - call shr_sys_flush(logunit) - endif - - call seq_hist_write(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, fractions_zx, trim(cpl_inst_tag)) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - - if (do_histavg) then - call seq_hist_writeavg(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, iac, histavg_alarm, & - trim(cpl_inst_tag)) - endif - - if (do_hist_a2x) then - do eai = 1,num_inst_atm - inst_suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x',dname='doma', inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=ncpl) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x',dname='doma', inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=ncpl, flds=hist_a2x_flds) - endif - enddo - endif - - if (do_hist_a2x1hri .and. t1hr_alarm) then - do eai = 1,num_inst_atm - inst_suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x1hri_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1hi',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=24) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1hi',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=24, flds=hist_a2x1hri_flds) - endif - enddo - endif - - if (do_hist_a2x1hr) then - do eai = 1,num_inst_atm - inst_suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x1hr_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1h',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=24, write_now=t1hr_alarm) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1h',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=24, write_now=t1hr_alarm, flds=hist_a2x1hr_flds) - endif - enddo - endif - - if (do_hist_a2x3hr) then - do eai = 1,num_inst_atm - inst_suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x3hr_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x3h',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=8, write_now=t3hr_alarm) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x3h',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=8, write_now=t3hr_alarm, flds=hist_a2x3hr_flds) - endif - enddo - endif - - if (do_hist_a2x3hrp) then - do eai = 1,num_inst_atm - inst_suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x3hrp_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x3h_prec',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=8, write_now=t3hr_alarm) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x3h_prec',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=8, write_now=t3hr_alarm, flds=hist_a2x3hrp_flds) - endif - enddo - endif - - if (do_hist_a2x24hr) then - do eai = 1,num_inst_atm - inst_suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x24hr_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1d',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=1, write_now=t24hr_alarm) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1d',dname='doma',inst_suffix=trim(inst_suffix), & - nx=atm_nx, ny=atm_ny, nt=1, write_now=t24hr_alarm, flds=hist_a2x24hr_flds) - endif - enddo - endif - - if (do_hist_l2x1yrg) then - ! We use a different approach here than for other aux hist files: For other - ! files, we let seq_hist_writeaux accumulate fields in time. However, if we - ! stop in the middle of an accumulation period, these accumulated fields get - ! reset (because they aren't written to the cpl restart file); this is - ! potentially a problem for this year-long accumulation. Thus, here, we use - ! the existing accumulated fields from prep_glc_mod, because those *do* - ! continue properly through a restart. - - ! The logic here assumes that we average the lnd2glc fields exactly at the - ! year boundary - no more and no less. If that's not the case, we're likely - ! to be writing the wrong thing to these aux files, so we check that - ! assumption here. - if (t1yr_alarm .and. .not. lnd2glc_averaged_now) then - write(logunit,*) 'ERROR: histaux_l2x1yrg requested;' - write(logunit,*) 'it is the year boundary, but lnd2glc fields were not averaged this time step.' - write(logunit,*) 'One possible reason is that you are running with a stub glc model.' - write(logunit,*) '(It only works to request histaux_l2x1yrg if running with a prognostic glc model.)' - call shr_sys_abort(subname// & - ' do_hist_l2x1yrg and t1yr_alarm are true, but lnd2glc_averaged_now is false') - end if - if (lnd2glc_averaged_now .and. .not. t1yr_alarm) then - ! If we're averaging more frequently than yearly, then just writing the - ! current values of the averaged fields once per year won't give the true - ! annual averages. - write(logunit,*) 'ERROR: histaux_l2x1yrg requested;' - write(logunit,*) 'lnd2glc fields were averaged this time step, but it is not the year boundary.' - write(logunit,*) '(It only works to request histaux_l2x1yrg if GLC_AVG_PERIOD is yearly.)' - call shr_sys_abort(subname// & - ' do_hist_l2x1yrg and lnd2glc_averaged_now are true, but t1yr_alarm is false') - end if - - if (t1yr_alarm) then - call seq_timemgr_EClockGetData( EClock_d, ECurrTime = etime_curr) - ! We need to pass in tbnds1_offset because (unlike with most - ! seq_hist_writeaux calls) here we don't call seq_hist_writeaux every time - ! step, so the automatically determined lower time bound can be wrong. For - ! typical runs with a noleap calendar, we want tbnds1_offset = - ! -365. However, to determine this more generally, based on the calendar - ! we're using, we call this shr_cal routine. - call shr_cal_ymds2rday_offset(etime=etime_curr, & - rdays_offset = tbnds1_offset, & - years_offset = -1) - do eli = 1,num_inst_lnd - inst_suffix = component_get_suffix(lnd(eli)) - ! Use yr_offset=-1 so the file with fields from year 1 has time stamp - ! 0001-01-01 rather than 0002-01-01, etc. - call seq_hist_writeaux(infodata, EClock_d, lnd(eli), flow='c2x', & - aname='l2x1yr_glc',dname='doml',inst_suffix=trim(inst_suffix), & - nx=lnd_nx, ny=lnd_ny, nt=1, write_now=.true., & - tbnds1_offset = tbnds1_offset, yr_offset=-1, & - av_to_write=prep_glc_get_l2gacc_lx_one_instance(eli)) - enddo - endif - endif - - if (do_hist_l2x) then - do eli = 1,num_inst_lnd - inst_suffix = component_get_suffix(lnd(eli)) - call seq_hist_writeaux(infodata, EClock_d, lnd(eli), flow='c2x', & - aname='l2x',dname='doml',inst_suffix=trim(inst_suffix), & - nx=lnd_nx, ny=lnd_ny, nt=ncpl) - enddo - endif - call t_drvstopf ('CPL:HISTORY',cplrun=.true.) - - endif - !---------------------------------------------------------- - !| RUN ESP MODEL - !---------------------------------------------------------- - if (esp_present .and. esprun_alarm) then - ! Make sure that all couplers are here in multicoupler mode before running ESP component - if (num_inst_driver > 1) then - call mpi_barrier(global_comm, ierr) - end if - ! Gather up each instance's 'resume' files (written before 'pause') - do eai = 1, num_inst_atm - call seq_resume_store_comp(atm(eai)%oneletterid, & - atm(eai)%cdata_cc%resume_filename, num_inst_atm, & - ATMID(eai), component_get_iamroot_compid(atm(eai))) - end do - do eli = 1, num_inst_lnd - call seq_resume_store_comp(lnd(eli)%oneletterid, & - lnd(eli)%cdata_cc%resume_filename, num_inst_lnd, & - LNDID(eli), component_get_iamroot_compid(lnd(eli))) - end do - do eoi = 1, num_inst_ocn - call seq_resume_store_comp(ocn(eoi)%oneletterid, & - ocn(eoi)%cdata_cc%resume_filename, num_inst_ocn, & - OCNID(eoi), component_get_iamroot_compid(ocn(eoi))) - end do - do eii = 1, num_inst_ice - call seq_resume_store_comp(ice(eii)%oneletterid, & - ice(eii)%cdata_cc%resume_filename, num_inst_ice, & - ICEID(eii), component_get_iamroot_compid(ice(eii))) - end do - do eri = 1, num_inst_rof - call seq_resume_store_comp(rof(eri)%oneletterid, & - rof(eri)%cdata_cc%resume_filename, num_inst_rof, & - ROFID(eri), component_get_iamroot_compid(rof(eri))) - end do - do egi = 1, num_inst_glc - call seq_resume_store_comp(glc(egi)%oneletterid, & - glc(egi)%cdata_cc%resume_filename, num_inst_glc, & - GLCID(egi), component_get_iamroot_compid(glc(egi))) - end do - do ewi = 1, num_inst_wav - call seq_resume_store_comp(wav(ewi)%oneletterid, & - wav(ewi)%cdata_cc%resume_filename, num_inst_wav, & - WAVID(ewi), component_get_iamroot_compid(wav(ewi))) - end do - ! Here we pass 1 as num_inst_driver as num_inst_driver is used inside - call seq_resume_store_comp('x', drv_resume, 1, & - driver_id, iamroot_CPLID) - call component_run(Eclock_e, esp, esp_run, infodata, & - comp_prognostic=esp_prognostic, comp_num=comp_num_esp, & - timer_barrier= 'CPL:ESP_RUN_BARRIER', timer_comp_run='CPL:ESP_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=esp_layout) - - !--------------------------------------------------------------------- - !| ESP computes resume options for other components -- update everyone - !--------------------------------------------------------------------- - call seq_resume_get_files('a', resume_files) - if (associated(resume_files)) then - do eai = 1, num_inst_atm - atm(eai)%cdata_cc%resume_filename = resume_files(ATMID(eai)) - end do - end if - call seq_resume_get_files('l', resume_files) - if (associated(resume_files)) then - do eli = 1, num_inst_lnd - lnd(eli)%cdata_cc%resume_filename = resume_files(LNDID(eli)) - end do - end if - call seq_resume_get_files('o', resume_files) - if (associated(resume_files)) then - do eoi = 1, num_inst_ocn - ocn(eoi)%cdata_cc%resume_filename = resume_files(OCNID(eoi)) - end do - end if - call seq_resume_get_files('i', resume_files) - if (associated(resume_files)) then - do eii = 1, num_inst_ice - ice(eii)%cdata_cc%resume_filename = resume_files(ICEID(eii)) - end do - end if - call seq_resume_get_files('r', resume_files) - if (associated(resume_files)) then - do eri = 1, num_inst_rof - rof(eri)%cdata_cc%resume_filename = resume_files(ROFID(eri)) - end do - end if - call seq_resume_get_files('g', resume_files) - if (associated(resume_files)) then - do egi = 1, num_inst_glc - glc(egi)%cdata_cc%resume_filename = resume_files(GLCID(egi)) - end do - end if - call seq_resume_get_files('w', resume_files) - if (associated(resume_files)) then - do ewi = 1, num_inst_wav - wav(ewi)%cdata_cc%resume_filename = resume_files(WAVID(ewi)) - end do - end if - call seq_resume_get_files('x', resume_files) - if (associated(resume_files)) then - drv_resume = resume_files(driver_id) - end if - end if - - !---------------------------------------------------------- - !| RESUME (read restart) if signaled - !---------------------------------------------------------- - if (len_trim(drv_resume) > 0) then - if (iamroot_CPLID) then - write(logunit,103) subname,' Reading restart (resume) file ',trim(drv_resume) - call shr_sys_flush(logunit) - end if - if (iamin_CPLID) then - call seq_rest_read(drv_resume, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, fractions_zx) - end if - ! Clear the resume file so we don't try to read it again - drv_resume = ' ' - end if - - !---------------------------------------------------------- - !| Timing and memory diagnostics - !---------------------------------------------------------- - - call t_drvstartf ('CPL:TSTAMP_WRITE',cplrun=.true.) - if (tod == 0 .or. info_debug > 1) then - if (iamroot_CPLID) then - call date_and_time(dstr,tstr) - Time_estep = mpi_wtime() - cktime = time_estep-time_bstep - cktime_acc(1) = cktime_acc(1) + cktime - cktime_cnt(1) = cktime_cnt(1) + 1 -#ifndef CPL_BYPASS - write(logunit,101) ' tStamp_write: model date = ',ymd,tod, & - ' wall clock = ',dstr(1:4),'-',dstr(5:6),'-',dstr(7:8),' ',& - tstr(1:2),':',tstr(3:4),':',tstr(5:6), & - ' avg dt = ',cktime_acc(1)/cktime_cnt(1),' dt = ',cktime -#endif - Time_bstep = mpi_wtime() - call shr_sys_flush(logunit) - if(cktime > max_cplstep_time .and. max_cplstep_time > 0.0) then - call shr_sys_abort(subname//'Wall clock time exceeds max_cplstep_time') - else if(max_cplstep_time < -0.05) then - ! if max_cplstep_time is < 0 we use abs(max_cplstep_time) - ! times the initial cktime value as a threshhold - max_cplstep_time = -(max_cplstep_time)*cktime - endif - endif - end if - if (tod == 0 .and. wall_time_limit > 0.0_r8 .and. .not. force_stop) then - time_erun = mpi_wtime() - ! time_*run is seconds, wall_time_limit is hours - wall_time = (time_erun - time_brun) / 3600._r8 ! convert secs to hrs - write(logunit,109) subname//' check wall_time_limit: ',wall_time, wall_time_limit - if (wall_time > wall_time_limit) then - force_stop = .true. - force_stop_tod = 0 - if (trim(force_stop_at) == 'month') then - call shr_cal_date2ymd(ymd,year,month,day) - month = month + 1 - do while (month > 12) - month = month - 12 - year = year + 1 - enddo - call shr_cal_ymd2date(year,month,1,force_stop_ymd) - elseif (trim(force_stop_at) == 'year') then ! next year - call shr_cal_date2ymd(ymd,year,month,day) - call shr_cal_ymd2date(year+1,1,1,force_stop_ymd) - elseif (trim(force_stop_at) == 'day') then ! next day - ymdtmp = ymd - call shr_cal_advDateInt(1,'days' ,ymdtmp,0,force_stop_ymd,todtmp,calendar) - else ! day is default - ymdtmp = ymd - call shr_cal_advDateInt(1,'days' ,ymdtmp,0,force_stop_ymd,todtmp,calendar) - endif - write(logunit,108) subname//' reached wall_time_limit (hours) =',wall_time_limit, & - ' :stop at ',force_stop_ymd - endif - endif -#ifndef CPL_BYPASS - if (tod == 0 .or. info_debug > 1) then - !! Report on memory usage - !! For now, just look at the first instance of each component - if ( iamroot_CPLID .or. & - ocn(ens1)%iamroot_compid .or. & - atm(ens1)%iamroot_compid .or. & - lnd(ens1)%iamroot_compid .or. & - ice(ens1)%iamroot_compid .or. & - glc(ens1)%iamroot_compid .or. & - wav(ens1)%iamroot_compid .or. & - iac(ens1)%iamroot_compid) then - call shr_mem_getusage(msize,mrss,.true.) - - write(logunit,105) ' memory_write: model date = ',ymd,tod, & - ' memory = ',msize,' MB (highwater) ',mrss,' MB (usage)', & - ' (pe=',iam_GLOID,' comps=',trim(complist)//')' - endif - endif -#endif - if (info_debug > 1) then - if (iamroot_CPLID) then - call seq_infodata_GetData(infodata,nextsw_cday=nextsw_cday) - ! write(logunit,106) ' nextsw_cday = ',nextsw_cday - write(logunit,*) ' nextsw_cday = ',nextsw_cday - endif - endif - call t_drvstopf ('CPL:TSTAMP_WRITE',cplrun=.true.) - - call t_stopf ('CPL:RUN_LOOP', hashint(1)) - - ! --- Write out performance data - call t_startf ('CPL:TPROF_WRITE') - if ((tprof_alarm) .or. ((tod == 0) .and. in_first_day)) then - - if ((tod == 0) .and. in_first_day) then - in_first_day = .false. - endif - - write(timing_file,'(a,i8.8,a1,i5.5)') & - trim(tchkpt_dir)//"/model_timing"//trim(cpl_inst_tag)//"_",ymd,"_",tod - - call t_set_prefixf("CPL:RUN_LOOP_") - call cime_write_performance_checkpoint(output_perf,timing_file,mpicom_GLOID) - call t_unset_prefixf() - - endif - call t_stopf ('CPL:TPROF_WRITE') - - if (barrier_alarm) then - call t_drvstartf ('CPL:BARRIERALARM',cplrun=.true.) - call mpi_barrier(mpicom_GLOID,ierr) - call t_drvstopf ('CPL:BARRIERALARM',cplrun=.true.) - endif - - enddo ! driver run loop - - !|---------------------------------------------------------- - !| End of driver time step loop - !|--------------------------------------------------------- - - call t_startf ('CPL:RUN_LOOP_BSTOP') - call mpi_barrier(mpicom_GLOID,ierr) - call t_stopf ('CPL:RUN_LOOP_BSTOP') - - call seq_resume_free() - Time_end = mpi_wtime() - - end subroutine cime_run - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_final() - - use shr_pio_mod, only : shr_pio_finalize - use shr_wv_sat_mod, only: shr_wv_sat_final - character(len=cs) :: cime_model - - !------------------------------------------------------------------------ - ! Finalization of all models - !------------------------------------------------------------------------ - - call t_barrierf ('CPL:FINAL_BARRIER', mpicom_GLOID) - call t_startf ('CPL:FINAL') - call t_adj_detailf(+1) - - call t_startf('CPL:cime_final') - call t_adj_detailf(+1) - - call seq_timemgr_EClockGetData( EClock_d, stepno=endstep) - call shr_mem_getusage(msize,mrss) - - call component_final(EClock_a, atm, atm_final) - call component_final(EClock_l, lnd, lnd_final) - call component_final(EClock_r, rof, rof_final) - call component_final(EClock_i, ice, ice_final) - call component_final(EClock_o, ocn, ocn_final) - call component_final(EClock_g, glc, glc_final) - call component_final(EClock_w, wav, wav_final) - call component_final(EClock_w, iac, iac_final) - - !------------------------------------------------------------------------ - ! End the run cleanly - !------------------------------------------------------------------------ - - call shr_wv_sat_final() - call seq_infodata_GetData(infodata, cime_model=cime_model) - call shr_pio_finalize( ) - - call shr_mpi_min(msize ,msize0,mpicom_GLOID,' driver msize0', all=.true.) - call shr_mpi_max(msize ,msize1,mpicom_GLOID,' driver msize1', all=.true.) - call shr_mpi_min(mrss ,mrss0,mpicom_GLOID,' driver mrss0', all=.true.) - call shr_mpi_max(mrss ,mrss1,mpicom_GLOID,' driver mrss1', all=.true.) - - if (iamroot_CPLID )then - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd, curr_tod=tod, dtime=dtime) - simDays = (endStep-begStep)*dtime/(24._r8*3600._r8) - write(logunit,'(//)') - write(logunit,FormatA) subname, 'SUCCESSFUL TERMINATION OF CPL7-'//trim(cime_model) - write(logunit,FormatD) subname, ' at YMD,TOD = ',ymd,tod - write(logunit,FormatR) subname, '# simulated days (this run) = ', simDays - write(logunit,FormatR) subname, 'compute time (hrs) = ', (Time_end-Time_begin)/3600._r8 - if ( (Time_end /= Time_begin) .and. (simDays /= 0.0_r8) )then - SYPD = shr_const_cday*simDays/(days_per_year*(Time_end-Time_begin)) - write(logunit,FormatR) subname, '# simulated years / cmp-day = ', SYPD - endif - write(logunit,FormatR) subname,' pes min memory highwater (MB) = ',msize0 - write(logunit,FormatR) subname,' pes max memory highwater (MB) = ',msize1 - write(logunit,FormatR) subname,' pes min memory last usage (MB) = ',mrss0 - write(logunit,FormatR) subname,' pes max memory last usage (MB) = ',mrss1 - write(logunit,'(//)') - close(logunit) - endif - - call t_adj_detailf(-1) - call t_stopf('CPL:cime_final') - - call t_adj_detailf(-1) - call t_stopf ('CPL:FINAL') - - call t_set_prefixf("CPL:FINAL_") - - call t_startf("sync1_tprf") - call mpi_barrier(mpicom_GLOID,ierr) - call t_stopf("sync1_tprf") - - if (output_perf) then - call t_prf(trim(timing_dir)//'/model_timing'//trim(cpl_inst_tag), & - mpicom=mpicom_GLOID, output_thispe=output_perf) - else - call t_prf(trim(timing_dir)//'/model_timing'//trim(cpl_inst_tag), & - mpicom=mpicom_GLOID) - endif - - call t_unset_prefixf() - - call t_finalizef() - - end subroutine cime_final - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_printlogheader() - - !----------------------------------------------------------------------- - ! - ! Purpose: Print basic information on what this driver program is - ! to the logfile. - ! - !----------------------------------------------------------------------- - ! - ! Local variables - ! - - character(len=8) :: cdate ! System date - character(len=8) :: ctime ! System time - integer :: values(8) - character :: date*8, time*10, zone*5 - character(len=cs) :: cime_model - - !------------------------------------------------------------------------------- - - call date_and_time (date, time, zone, values) - call seq_infodata_GetData(infodata, cime_model=cime_model) - cdate(1:2) = date(5:6) - cdate(3:3) = '/' - cdate(4:5) = date(7:8) - cdate(6:6) = '/' - cdate(7:8) = date(3:4) - ctime(1:2) = time(1:2) - ctime(3:3) = ':' - ctime(4:5) = time(3:4) - ctime(6:6) = ':' - ctime(7:8) = time(5:6) - write(logunit,F00) '------------------------------------------------------------' - write(logunit,F00) ' Common Infrastructure for Modeling the Earth (CIME) CPL7 ' - write(logunit,F00) '------------------------------------------------------------' - write(logunit,F00) ' (Online documentation is available on the CIME ' - write(logunit,F00) ' github: http://esmci.github.io/cime/) ' - write(logunit,F00) ' License information is available as a link from above ' - write(logunit,F00) '------------------------------------------------------------' - write(logunit,F00) ' MODEL ',cime_model - write(logunit,F00) '------------------------------------------------------------' - write(logunit,F00) ' DATE ',cdate, ' TIME ', ctime - write(logunit,F00) '------------------------------------------------------------' - write(logunit,*)' ' - write(logunit,*)' ' - - end subroutine cime_printlogheader - - !=============================================================================== - - subroutine cime_comp_barriers(mpicom, timer) - integer , intent(in) :: mpicom - character(len=*), intent(in) :: timer - integer :: ierr - - if (run_barriers) then - call t_drvstartf (trim(timer)) - call mpi_barrier(mpicom,ierr) - call t_drvstopf (trim(timer)) - endif - end subroutine cime_comp_barriers - - !=============================================================================== - - subroutine cime_cpl_init(comm_in, comm_out, num_inst_driver, id) - !----------------------------------------------------------------------- - ! - ! Initialize multiple coupler instances, if requested - ! - !----------------------------------------------------------------------- - - implicit none - - integer , intent(in) :: comm_in - integer , intent(out) :: comm_out - integer , intent(out) :: num_inst_driver - integer , intent(out) :: id ! instance ID, starts from 1 - ! - ! Local variables - ! - integer :: ierr, mype, nu, numpes !, pes - integer :: ninst_driver, drvpes - character(len=*), parameter :: subname = '(cime_cpl_init) ' - - namelist /cime_driver_inst/ ninst_driver - - call shr_mpi_commrank(comm_in, mype , ' cime_cpl_init') - call shr_mpi_commsize(comm_in, numpes, ' cime_cpl_init') - - num_inst_driver = 1 - id = 1 ! For compatiblity with component instance numbering - - if (mype == 0) then - ! Read coupler namelist if it exists - ninst_driver = 1 - nu = shr_file_getUnit() - open(unit = nu, file = NLFileName, status = 'old', iostat = ierr) - rewind(unit = nu) - ierr = 1 - do while ( ierr /= 0 ) - read(unit = nu, nml = cime_driver_inst, iostat = ierr) - if (ierr < 0) then - call shr_sys_abort( subname//':: namelist read returns an'// & - ' end of file or end of record condition' ) - endif - enddo - close(unit = nu) - call shr_file_freeUnit(nu) - num_inst_driver = max(ninst_driver, 1) - end if - - call shr_mpi_bcast(num_inst_driver, comm_in, 'ninst_driver') - - if (mod(numpes, num_inst_driver) /= 0) then - call shr_sys_abort(subname // & - ' : Total PE number must be a multiple of coupler instance number') - end if - - if (num_inst_driver == 1) then - call mpi_comm_dup(comm_in, comm_out, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_dup') - else - id = mype * num_inst_driver / numpes + 1 - call mpi_comm_split(comm_in, id, 0, comm_out, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_split') - end if - call shr_mpi_commsize(comm_out, drvpes, ' cime_cpl_init') - - end subroutine cime_cpl_init - - !=============================================================================== - - subroutine cime_run_atmocn_fluxes(hashint) - integer, intent(inout) :: hashint(:) - - !---------------------------------------------------------- - !| atm/ocn flux on atm grid - !---------------------------------------------------------- - if (trim(aoflux_grid) == 'atm') then - ! compute o2x_ax for flux_atmocn, will be updated before atm merge - ! do not use fractions because fractions here are NOT consistent with fractions in atm_mrg - if (ocn_c2_atm) call prep_atm_calc_o2x_ax(timer='CPL:atmoca_ocn2atm') - - call t_drvstartf ('CPL:atmocna_fluxa',barrier=mpicom_CPLID, hashint=hashint(6)) - do exi = 1,num_inst_xao - eai = mod((exi-1),num_inst_atm) + 1 - eoi = mod((exi-1),num_inst_ocn) + 1 - efi = mod((exi-1),num_inst_frc) + 1 - a2x_ax => component_get_c2x_cx(atm(eai)) - o2x_ax => prep_atm_get_o2x_ax() ! array over all instances - xao_ax => prep_aoflux_get_xao_ax() ! array over all instances - call seq_flux_atmocn_mct(infodata, tod, dtime, a2x_ax, o2x_ax(eoi), xao_ax(exi)) - enddo - call t_drvstopf ('CPL:atmocna_fluxa',hashint=hashint(6)) - - if (atm_c2_ocn) call prep_aoflux_calc_xao_ox(timer='CPL:atmocna_atm2ocn') - endif ! aoflux_grid - - !---------------------------------------------------------- - !| atm/ocn flux on ocn grid - !---------------------------------------------------------- - if (trim(aoflux_grid) == 'ocn') then - call t_drvstartf ('CPL:atmocnp_fluxo',barrier=mpicom_CPLID, hashint=hashint(6)) - do exi = 1,num_inst_xao - eai = mod((exi-1),num_inst_atm) + 1 - eoi = mod((exi-1),num_inst_ocn) + 1 - efi = mod((exi-1),num_inst_frc) + 1 - a2x_ox => prep_ocn_get_a2x_ox() - o2x_ox => component_get_c2x_cx(ocn(eoi)) - xao_ox => prep_aoflux_get_xao_ox() - call seq_flux_atmocn_mct(infodata, tod, dtime, a2x_ox(eai), o2x_ox, xao_ox(exi)) - enddo - call t_drvstopf ('CPL:atmocnp_fluxo',hashint=hashint(6)) - endif ! aoflux_grid - - end subroutine cime_run_atmocn_fluxes - -!---------------------------------------------------------------------------------- - - subroutine cime_run_ocn_albedos(hashint) - integer, intent(inout) :: hashint(:) - - call t_drvstartf ('CPL:atmocnp_ocnalb', barrier=mpicom_CPLID, hashint=hashint(5)) - do exi = 1,num_inst_xao - efi = mod((exi-1),num_inst_frc) + 1 - eai = mod((exi-1),num_inst_atm) + 1 - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - a2x_ox => prep_ocn_get_a2x_ox() - call seq_flux_ocnalb_mct(infodata, ocn(1), a2x_ox(eai), fractions_ox(efi), xao_ox(exi)) - enddo - call t_drvstopf ('CPL:atmocnp_ocnalb', hashint=hashint(5)) - - end subroutine cime_run_ocn_albedos - -!---------------------------------------------------------------------------------- - - subroutine cime_run_atm_setup_send() - - !---------------------------------------------------------- - !| atm prep-merge - !---------------------------------------------------------- - - if (iamin_CPLID .and. atm_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ATMPREP_BARRIER') - call t_drvstartf ('CPL:ATMPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (ocn_c2_atm) then - if (trim(aoflux_grid) == 'ocn') then - ! map xao_ox states and fluxes to xao_ax if fluxes were computed on ocn grid - call prep_aoflux_calc_xao_ax(fractions_ox, flds='states_and_fluxes', & - timer='CPL:atmprep_xao2atm') - endif - - ! recompute o2x_ax now for the merge with fractions associated with merge - call prep_atm_calc_o2x_ax(fractions_ox, timer='CPL:atmprep_ocn2atm') - - ! map xao_ox albedos to the atm grid, these are always computed on the ocean grid - call prep_aoflux_calc_xao_ax(fractions_ox, flds='albedos', timer='CPL:atmprep_alb2atm') - endif - if (ice_c2_atm) then - call prep_atm_calc_i2x_ax(fractions_ix, timer='CPL:atmprep_ice2atm') - endif - if (lnd_c2_atm) then - call prep_atm_calc_l2x_ax(fractions_lx, timer='CPL:atmprep_lnd2atm') - endif - if (iac_c2_atm) then - call prep_atm_calc_z2x_ax(fractions_zx, timer='CPL:atmprep_iac2atm') - endif - if (associated(xao_ax)) then - call prep_atm_mrg(infodata, fractions_ax, xao_ax=xao_ax, timer_mrg='CPL:atmprep_mrgx2a') - endif - - call component_diag(infodata, atm, flow='x2c', comment= 'send atm', info_debug=info_debug, & - timer_diag='CPL:atmprep_diagav') - - call t_drvstopf ('CPL:ATMPREP',cplrun=.true.) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - - !---------------------------------------------------------- - !| cpl -> atm - !---------------------------------------------------------- - - if (iamin_CPLALLATMID .and. atm_prognostic) then - call component_exch(atm, flow='x2c', infodata=infodata, infodata_string='cpl2atm_run', & - mpicom_barrier=mpicom_CPLALLATMID, run_barriers=run_barriers, & - timer_barrier='CPL:C2A_BARRIER', timer_comp_exch='CPL:C2A', & - timer_map_exch='CPL:c2a_atmx2atmg', timer_infodata_exch='CPL:c2a_infoexch') - endif - - end subroutine cime_run_atm_setup_send - -!---------------------------------------------------------------------------------- - - subroutine cime_run_atm_recv_post() - - !---------------------------------------------------------- - !| atm -> cpl - !---------------------------------------------------------- - if (iamin_CPLALLATMID) then - call component_exch(atm, flow='c2x', infodata=infodata, infodata_string='atm2cpl_run', & - mpicom_barrier=mpicom_CPLALLATMID, run_barriers=run_barriers, & - timer_barrier='CPL:A2C_BARRIER', timer_comp_exch='CPL:A2C', & - timer_map_exch='CPL:a2c_atma2atmx', timer_infodata_exch='CPL:a2c_infoexch') - endif - - !---------------------------------------------------------- - !| atm post - !---------------------------------------------------------- - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ATMPOST_BARRIER') - call t_drvstartf ('CPL:ATMPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, atm, flow='c2x', comment= 'recv atm', & - info_debug=info_debug, timer_diag='CPL:atmpost_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ATMPOST',cplrun=.true.) - endif - - end subroutine cime_run_atm_recv_post - -!---------------------------------------------------------------------------------- - - subroutine cime_run_ocn_setup_send() - - !---------------------------------------------------- - ! "startup" wait - !---------------------------------------------------- - if (iamin_CPLALLOCNID) then - ! want to know the time the ocean pes waited for the cpl pes - ! at the first ocnrun_alarm, min ocean wait is wait time - ! do not use t_barrierf here since it can be "off", use mpi_barrier - do eoi = 1,num_inst_ocn - if (ocn(eoi)%iamin_compid) call t_drvstartf ('CPL:C2O_INITWAIT') - enddo - call mpi_barrier(mpicom_CPLALLOCNID,ierr) - do eoi = 1,num_inst_ocn - if (ocn(eoi)%iamin_compid) call t_drvstopf ('CPL:C2O_INITWAIT') - enddo - cpl2ocn_first = .false. - endif - - !---------------------------------------------------- - ! ocn average - !---------------------------------------------------- - if (iamin_CPLID .and. ocn_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:OCNPREP_BARRIER') - call t_drvstartf ('CPL:OCNPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - ! finish accumulating ocean inputs - ! reset the value of x2o_ox with the value in x2oacc_ox (module variable in prep_ocn_mod) - call prep_ocn_accum_avg(timer_accum='CPL:ocnprep_avg') - - call component_diag(infodata, ocn, flow='x2c', comment= 'send ocn', & - info_debug=info_debug, timer_diag='CPL:ocnprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:OCNPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - ! cpl -> ocn - !---------------------------------------------------- - if (iamin_CPLALLOCNID .and. ocn_prognostic) then - call component_exch(ocn, flow='x2c', & - infodata=infodata, infodata_string='cpl2ocn_run', & - mpicom_barrier=mpicom_CPLALLOCNID, run_barriers=run_barriers, & - timer_barrier='CPL:C2O_BARRIER', timer_comp_exch='CPL:C2O', & - timer_map_exch='CPL:c2o_ocnx2ocno', timer_infodata_exch='CPL:c2o_infoexch') - endif - - end subroutine cime_run_ocn_setup_send - - !---------------------------------------------------------------------------------- - - subroutine cime_run_ocn_recv_post() - - !---------------------------------------------------------- - ! ocn -> cpl - !---------------------------------------------------------- - if (iamin_CPLALLOCNID) then - call component_exch(ocn, flow='c2x', & - infodata=infodata, infodata_string='ocn2cpl_run', & - mpicom_barrier=mpicom_CPLALLOCNID, run_barriers=run_barriers, & - timer_barrier='CPL:O2CT_BARRIER', timer_comp_exch='CPL:O2CT', & - timer_map_exch='CPL:o2c_ocno2ocnx', timer_infodata_exch='CPL:o2c_infoexch') - endif - - !---------------------------------------------------------- - ! ocn post - !---------------------------------------------------------- - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:OCNPOSTT_BARRIER') - call t_drvstartf ('CPL:OCNPOSTT',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, ocn, flow='c2x', comment= 'recv ocn', & - info_debug=info_debug, timer_diag='CPL:ocnpost_diagav') - - call cime_run_ocnglc_coupling() - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:OCNPOSTT',cplrun=.true.) - endif - - end subroutine cime_run_ocn_recv_post - - !---------------------------------------------------------------------------------- - subroutine cime_run_iac_setup_send() - - !------------------------------------------------------- - ! | iac prep-merge - !------------------------------------------------------- - - if (iamin_CPLID .and. iac_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:IACPREP_BARRIER') - - call t_drvstartf ('CPL:IACPREP', cplrun=.true., barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - ! Average our accumulators - call prep_iac_accum_avg(timer='CPL:iacprep_l2xavg') - - ! Setup lnd inputs on iac grid. Right now I think they will be the same - ! thing, but I'm trying to code for the general case - if (lnd_c2_iac) then - call prep_iac_calc_l2x_zx(timer='CPL:iacprep_lnd2iac') - endif - - - call prep_iac_mrg(infodata, fractions_zx, timer_mrg='CPL:iacprep_mrgx2z') - - call component_diag(infodata, iac, flow='x2c', comment= 'send iac', & - info_debug=info_debug, timer_diag='CPL:iacprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:IACPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - !| cpl -> iac - !---------------------------------------------------- - - if (iamin_CPLALLIACID .and. iac_prognostic) then - call component_exch(iac, flow='x2c', & - infodata=infodata, infodata_string='cpl2iac_run', & - mpicom_barrier=mpicom_CPLALLLNDID, run_barriers=run_barriers, & - timer_barrier='CPL:C2Z_BARRIER', timer_comp_exch='CPL:C2Z', & - timer_map_exch='CPL:c2z_iacx2iacr', timer_infodata_exch='CPL:c2z_infoexch') - endif - - end subroutine cime_run_iac_setup_send - - !---------------------------------------------------------------------------------- - subroutine cime_run_iac_recv_post() - - !---------------------------------------------------------- - !| iac -> cpl - !---------------------------------------------------------- - - if (iamin_CPLALLIACID) then - call component_exch(rof, flow='c2x', & - infodata=infodata, infodata_string='iac2cpl_run', & - mpicom_barrier=mpicom_CPLALLIACID, run_barriers=run_barriers, & - timer_barrier='CPL:Z2C_BARRIER', timer_comp_exch='CPL:Z2C', & - timer_map_exch='CPL:z2c_iacr2iacx', timer_infodata_exch='CPL:z2c_infoexch') - endif - - !---------------------------------------------------------- - !| iac post - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:IACPOST_BARRIER') - call t_drvstartf ('CPL:IACPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, iac, flow='c2x', comment= 'recv iac', & - info_debug=info_debug, timer_diag='CPL:iacpost_diagav') - - ! TRS I think this is wrong - review these prep functions. I think it's more likely - if (iac_c2_lnd) then - call prep_lnd_calc_z2x_lx(timer='CPL:iacpost_iac2lnd') - endif - - if (iac_c2_atm) then - call prep_atm_calc_z2x_ax(fractions_zx, timer='CPL:iacpost_iac2atm') - endif - - call t_drvstopf ('CPL:IACPOST', cplrun=.true.) - endif - - end subroutine cime_run_iac_recv_post - - !---------------------------------------------------------------------------------- - - subroutine cime_run_atmocn_setup(hashint) - integer, intent(inout) :: hashint(:) - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ATMOCNP_BARRIER') - call t_drvstartf ('CPL:ATMOCNP',cplrun=.true.,barrier=mpicom_CPLID,hashint=hashint(7)) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (trim(cpl_seq_option(1:5)) == 'NUOPC') then - if (atm_c2_ocn) call prep_ocn_calc_a2x_ox(timer='CPL:atmocnp_atm2ocn') - end if - - if (ocn_prognostic) then - ! Map to ocn - if (ice_c2_ocn) call prep_ocn_calc_i2x_ox(timer='CPL:atmocnp_ice2ocn') - if (wav_c2_ocn) call prep_ocn_calc_w2x_ox(timer='CPL:atmocnp_wav2ocn') - if (trim(cpl_seq_option(1:5)) == 'NUOPC') then - if (rof_c2_ocn) call prep_ocn_calc_r2x_ox(timer='CPL:atmocnp_rof2ocn') - if (glc_c2_ocn) call prep_ocn_calc_g2x_ox(timer='CPL:atmocnp_glc2ocn') - end if - end if - - ! atm/ocn flux on either atm or ocean grid - call cime_run_atmocn_fluxes(hashint) - - ! ocn prep-merge (cesm1_mod or cesm1_mod_tight) - if (ocn_prognostic) then -#if COMPARE_TO_NUOPC - !This is need to compare to nuopc - if (.not. skip_ocean_run) then - ! ocn prep-merge - xao_ox => prep_aoflux_get_xao_ox() - call prep_ocn_mrg(infodata, fractions_ox, xao_ox=xao_ox, timer_mrg='CPL:atmocnp_mrgx2o') - - ! Accumulate ocn inputs - form partial sum of tavg ocn inputs (virtual "send" to ocn) - call prep_ocn_accum(timer='CPL:atmocnp_accum') - end if -#else - ! ocn prep-merge - xao_ox => prep_aoflux_get_xao_ox() - call prep_ocn_mrg(infodata, fractions_ox, xao_ox=xao_ox, timer_mrg='CPL:atmocnp_mrgx2o') - - ! Accumulate ocn inputs - form partial sum of tavg ocn inputs (virtual "send" to ocn) - call prep_ocn_accum(timer='CPL:atmocnp_accum') -#endif - end if - - !---------------------------------------------------------- - ! ocn albedos - ! (MUST BE AFTER prep_ocn_mrg for swnet to ocn to be computed properly - !---------------------------------------------------------- - call cime_run_ocn_albedos(hashint) - - !---------------------------------------------------------- - ! ocn budget - !---------------------------------------------------------- - if (do_budgets) then - call cime_run_calc_budgets3() - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ATMOCNP',cplrun=.true.,hashint=hashint(7)) - end if - - end subroutine cime_run_atmocn_setup - -!---------------------------------------------------------------------------------- - - subroutine cime_run_ocnglc_coupling() - !--------------------------------------- - ! Description: Run calculation of coupling fluxes between OCN and GLC - ! Note: this happens in the coupler to allow it be calculated on the - ! ocean time step but the GLC grid. - !--------------------------------------- - - if (glc_present) then - - if (ocn_c2_glcshelf .and. glcshelf_c2_ocn) then - ! the boundary flux calculations done in the coupler require inputs from both GLC and OCN, - ! so they will only be valid if both OCN->GLC and GLC->OCN - - call prep_glc_calc_o2x_gx(timer='CPL:glcprep_ocn2glc') !remap ocean fields to o2x_g at ocean couping interval - - call prep_glc_calculate_subshelf_boundary_fluxes ! this is actual boundary layer flux calculation - !this outputs - !x2g_g/g2x_g, where latter is going - !to ocean, so should get remapped to - !ocean grid in prep_ocn_shelf_calc_g2x_ox - call prep_ocn_shelf_calc_g2x_ox(timer='CPL:glcpost_glcshelf2ocn') - !Map g2x_gx shelf fields that were updated above, to g2x_ox. - !Do this at intrinsic coupling - !frequency - call prep_glc_accum_ocn(timer='CPL:glcprep_accum_ocn') !accum x2g_g fields here into x2g_gacc - endif - - if (glcshelf_c2_ice) then - call prep_ice_shelf_calc_g2x_ix(timer='CPL:glcpost_glcshelf2ice') - !Map g2x_gx shelf fields to g2x_ix. - !Do this at intrinsic coupling - !frequency. This is perhaps an - !unnecessary place to put this - !call, since these fields aren't - !changing on the intrinsic - !timestep. But I don't think it's - !unsafe to do it here. - endif - - endif - - end subroutine cime_run_ocnglc_coupling - -!---------------------------------------------------------------------------------- - - subroutine cime_run_lnd_setup_send() - - !---------------------------------------------------- - !| lnd prep-merge - !---------------------------------------------------- - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:LNDPREP_BARRIER') - call t_drvstartf ('CPL:LNDPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (atm_c2_lnd) call prep_lnd_calc_a2x_lx(timer='CPL:lndprep_atm2lnd') - if (trim(cpl_seq_option(1:5)) == 'NUOPC') then - if (glc_c2_lnd) call prep_lnd_calc_g2x_lx(timer='CPL:glcpost_glc2lnd') - end if - - ! IAC export onto lnd grid - if (iac_c2_lnd) then - call prep_lnd_calc_z2x_lx(timer='CPL:lndprep_iac2lnd') - endif - - if (lnd_prognostic) then - call prep_lnd_mrg(infodata, timer_mrg='CPL:lndprep_mrgx2l') - - call component_diag(infodata, lnd, flow='x2c', comment= 'send lnd', & - info_debug=info_debug, timer_diag='CPL:lndprep_diagav') - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:LNDPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - !| cpl -> lnd - !---------------------------------------------------- - if (iamin_CPLALLLNDID) then - call component_exch(lnd, flow='x2c', & - infodata=infodata, infodata_string='cpl2lnd_run', & - mpicom_barrier=mpicom_CPLALLLNDID, run_barriers=run_barriers, & - timer_barrier='CPL:C2L_BARRIER', timer_comp_exch='CPL:C2L', & - timer_map_exch='CPL:c2l_lndx2lndl', timer_infodata_exch='CPL:c2l_infoexch') - endif - - end subroutine cime_run_lnd_setup_send - -!---------------------------------------------------------------------------------- - - subroutine cime_run_lnd_recv_post() - - !---------------------------------------------------------- - !| lnd -> cpl - !---------------------------------------------------------- - if (iamin_CPLALLLNDID) then - call component_exch(lnd, flow='c2x', infodata=infodata, infodata_string='lnd2cpl_run', & - mpicom_barrier=mpicom_CPLALLLNDID, run_barriers=run_barriers, & - timer_barrier='CPL:L2C_BARRIER', timer_comp_exch='CPL:L2C', & - timer_map_exch='CPL:l2c_lndl2lndx', timer_infodata_exch='lnd2cpl_run') - endif - - !---------------------------------------------------------- - !| lnd post - !---------------------------------------------------------- - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:LNDPOST_BARRIER') - call t_drvstartf ('CPL:LNDPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, lnd, flow='c2x', comment='recv lnd', & - info_debug=info_debug, timer_diag='CPL:lndpost_diagav') - - ! Accumulate rof and glc inputs (module variables in prep_rof_mod and prep_glc_mod) - if (lnd_c2_rof) call prep_rof_accum(timer='CPL:lndpost_accl2r') - if (lnd_c2_glc) call prep_glc_accum_lnd(timer='CPL:lndpost_accl2g' ) - if (lnd_c2_iac) call prep_iac_accum(timer='CPL:lndpost_accl2z') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:LNDPOST',cplrun=.true.) - endif - - end subroutine cime_run_lnd_recv_post - -!---------------------------------------------------------------------------------- - - subroutine cime_run_glc_setup_send(lnd2glc_averaged_now) - - logical, intent(inout) :: lnd2glc_averaged_now ! Set to .true. if lnd2glc averages were taken this timestep (otherwise left unchanged) - - !---------------------------------------------------- - !| glc prep-merge - !---------------------------------------------------- - if (iamin_CPLID .and. glc_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') - call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - ! NOTE - only create appropriate input to glc if the avg_alarm is on - if (lnd_c2_glc .or. ocn_c2_glcshelf) then - if (glcrun_avg_alarm) then - call prep_glc_accum_avg(timer='CPL:glcprep_avg') - - if (lnd_c2_glc) then - lnd2glc_averaged_now = .true. - ! Note that l2x_gx is obtained from mapping the module variable l2gacc_lx - call prep_glc_calc_l2x_gx(fractions_lx, timer='CPL:glcprep_lnd2glc') - - call prep_glc_mrg_lnd(infodata, fractions_gx, timer_mrg='CPL:glcprep_mrgx2g') - endif - - call component_diag(infodata, glc, flow='x2c', comment='send glc', & - info_debug=info_debug, timer_diag='CPL:glcprep_diagav') - - else - call prep_glc_zero_fields() - endif ! glcrun_avg_alarm - end if ! lnd_c2_glc or ocn_c2_glcshelf - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) - - end if ! iamin_CPLID .and. glc_prognostic - - ! Set the infodata field on all tasks (not just those with iamin_CPLID). - if (glc_prognostic) then - if (glcrun_avg_alarm) then - call seq_infodata_PutData(infodata, glc_valid_input=.true.) - else - call seq_infodata_PutData(infodata, glc_valid_input=.false.) - end if - end if - - !---------------------------------------------------- - !| cpl -> glc - !---------------------------------------------------- - if (iamin_CPLALLGLCID .and. glc_prognostic) then - call component_exch(glc, flow='x2c', & - infodata=infodata, infodata_string='cpl2glc_run', & - mpicom_barrier=mpicom_CPLALLGLCID, run_barriers=run_barriers, & - timer_barrier='CPL:C2G_BARRIER', timer_comp_exch='CPL:C2G', & - timer_map_exch='CPL:c2g_glcx2glcg', timer_infodata_exch='CPL:c2g_infoexch') - endif - - end subroutine cime_run_glc_setup_send - -!---------------------------------------------------------------------------------- - - subroutine cime_run_glc_recv_post() - - !---------------------------------------------------------- - ! glc -> cpl - !---------------------------------------------------------- - if (iamin_CPLALLGLCID) then - call component_exch(glc, flow='c2x', infodata=infodata, infodata_string='glc2cpl_run', & - mpicom_barrier=mpicom_CPLALLGLCID, run_barriers=run_barriers, & - timer_barrier='CPL:G2C_BARRIER', timer_comp_exch='CPL:G2C', & - timer_map_exch='CPL:g2c_glcg2glcx', timer_infodata_exch='CPL:g2c_infoexch') - endif - - !---------------------------------------------------------- - ! glc post - !---------------------------------------------------------- - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPOST_BARRIER') - call t_drvstartf ('CPL:GLCPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, glc, flow='c2x', comment= 'recv glc', & - info_debug=info_debug, timer_diag='CPL:glcpost_diagav') - - if (trim(cpl_seq_option(1:5)) /= 'NUOPC') then - if (glc_c2_lnd) call prep_lnd_calc_g2x_lx(timer='CPL:glcpost_glc2lnd') - if (glc_c2_ocn) call prep_ocn_calc_g2x_ox(timer='CPL:glcpost_glc2ocn') - if (glc_c2_ice) call prep_ice_calc_g2x_ix(timer='CPL:glcpost_glc2ice') - end if - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:GLCPOST',cplrun=.true.) - endif - - end subroutine cime_run_glc_recv_post - -!---------------------------------------------------------------------------------- - - subroutine cime_run_rof_setup_send() - - !---------------------------------------------------- - ! rof prep-merge - !---------------------------------------------------- - if (iamin_CPLID .and. rof_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ROFPREP_BARRIER') - - call t_drvstartf ('CPL:ROFPREP', cplrun=.true., barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call prep_rof_accum_avg(timer='CPL:rofprep_l2xavg') - - if (lnd_c2_rof) call prep_rof_calc_l2r_rx(fractions_lx, timer='CPL:rofprep_lnd2rof') - - call prep_rof_mrg(infodata, fractions_rx, timer_mrg='CPL:rofprep_mrgx2r') - - call component_diag(infodata, rof, flow='x2c', comment= 'send rof', & - info_debug=info_debug, timer_diag='CPL:rofprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ROFPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - ! cpl -> rof - !---------------------------------------------------- - if (iamin_CPLALLROFID .and. rof_prognostic) then - call component_exch(rof, flow='x2c', & - infodata=infodata, infodata_string='cpl2rof_run', & - mpicom_barrier=mpicom_CPLALLLNDID, run_barriers=run_barriers, & - timer_barrier='CPL:C2R_BARRIER', timer_comp_exch='CPL:C2R', & - timer_map_exch='CPL:c2r_rofx2rofr', timer_infodata_exch='CPL:c2r_infoexch') - endif - - end subroutine cime_run_rof_setup_send - -!---------------------------------------------------------------------------------- - - subroutine cime_run_rof_recv_post() - - !---------------------------------------------------------- - ! rof -> cpl - !---------------------------------------------------------- - if (iamin_CPLALLROFID) then - call component_exch(rof, flow='c2x', & - infodata=infodata, infodata_string='rof2cpl_run', & - mpicom_barrier=mpicom_CPLALLROFID, run_barriers=run_barriers, & - timer_barrier='CPL:R2C_BARRIER', timer_comp_exch='CPL:R2C', & - timer_map_exch='CPL:r2c_rofr2rofx', timer_infodata_exch='CPL:r2c_infoexch') - endif - - !---------------------------------------------------------- - ! rof post - !---------------------------------------------------------- - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ROFPOST_BARRIER') - call t_drvstartf ('CPL:ROFPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, rof, flow='c2x', comment= 'recv rof', & - info_debug=info_debug, timer_diag='CPL:rofpost_diagav') - - if (trim(cpl_seq_option(1:5)) /= 'NUOPC') then - if (rof_c2_lnd) call prep_lnd_calc_r2x_lx(timer='CPL:rofpost_rof2lnd') - if (rof_c2_ice) call prep_ice_calc_r2x_ix(timer='CPL:rofpost_rof2ice') - if (rof_c2_ocn) call prep_ocn_calc_r2x_ox(timer='CPL:rofpost_rof2ocn') - end if - call t_drvstopf ('CPL:ROFPOST', cplrun=.true.) - endif - - end subroutine cime_run_rof_recv_post - -!---------------------------------------------------------------------------------- - - subroutine cime_run_ice_setup_send() - - ! Note that for atm->ice mapping below will leverage the assumption that the - ! ice and ocn are on the same grid and that mapping of atm to ocean is - ! done already for use by atmocn flux and ice model prep - - !---------------------------------------------------- - ! ice prep-merge - !---------------------------------------------------- - if (iamin_CPLID .and. ice_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ICEPREP_BARRIER') - - call t_drvstartf ('CPL:ICEPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (ocn_c2_ice) call prep_ice_calc_o2x_ix(timer='CPL:iceprep_ocn2ice') - if (trim(cpl_seq_option(1:5)) == 'NUOPC') then - if (rof_c2_ice) call prep_ice_calc_r2x_ix(timer='CPL:rofpost_rof2ice') - if (glc_c2_ice) call prep_ice_calc_g2x_ix(timer='CPL:glcpost_glc2ice') - end if - - if (atm_c2_ice) then - ! This is special to avoid remapping atm to ocn - ! Note it is constrained that different prep modules cannot use or call each other - a2x_ox => prep_ocn_get_a2x_ox() ! array - call prep_ice_calc_a2x_ix(a2x_ox, timer='CPL:iceprep_atm2ice') - endif - - call prep_ice_mrg(infodata, timer_mrg='CPL:iceprep_mrgx2i') - - call component_diag(infodata, ice, flow='x2c', comment= 'send ice', & - info_debug=info_debug, timer_diag='CPL:iceprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ICEPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - ! cpl -> ice - !---------------------------------------------------- - if (iamin_CPLALLICEID .and. ice_prognostic) then - call component_exch(ice, flow='x2c', & - infodata=infodata, infodata_string='cpl2ice_run', & - mpicom_barrier=mpicom_CPLALLICEID, run_barriers=run_barriers, & - timer_barrier='CPL:C2I_BARRIER', timer_comp_exch='CPL:C2I', & - timer_map_exch='CPL:c2i_icex2icei', timer_infodata_exch='CPL:ice_infoexch') - endif - - end subroutine cime_run_ice_setup_send - -!---------------------------------------------------------------------------------- - - subroutine cime_run_ice_recv_post() - - !---------------------------------------------------------- - ! ice -> cpl - !---------------------------------------------------------- - if (iamin_CPLALLICEID) then - call component_exch(ice, flow='c2x', & - infodata=infodata, infodata_string='ice2cpl_run', & - mpicom_barrier=mpicom_CPLALLICEID, run_barriers=run_barriers, & - timer_barrier='CPL:I2C_BARRIER', timer_comp_exch='CPL:I2C', & - timer_map_exch='CPL:i2c_icei2icex', timer_infodata_exch='CPL:i2c_infoexch') - endif - - !---------------------------------------------------------- - ! ice post - !---------------------------------------------------------- - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ICEPOST_BARRIER') - call t_drvstartf ('CPL:ICEPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, ice, flow='c2x', comment= 'recv ice', & - info_debug=info_debug, timer_diag='CPL:icepost_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ICEPOST',cplrun=.true.) - endif - - end subroutine cime_run_ice_recv_post - -!---------------------------------------------------------------------------------- - - subroutine cime_run_wav_setup_send() - - !---------------------------------------------------------- - ! wav prep-merge - !---------------------------------------------------------- - if (iamin_CPLID .and. wav_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:WAVPREP_BARRIER') - - call t_drvstartf ('CPL:WAVPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (atm_c2_wav) call prep_wav_calc_a2x_wx(timer='CPL:wavprep_atm2wav') - if (ocn_c2_wav) call prep_wav_calc_o2x_wx(timer='CPL:wavprep_ocn2wav') - if (ice_c2_wav) call prep_wav_calc_i2x_wx(timer='CPL:wavprep_ice2wav') - - call prep_wav_mrg(infodata, fractions_wx, timer_mrg='CPL:wavprep_mrgx2w') - - call component_diag(infodata, wav, flow='x2c', comment= 'send wav', & - info_debug=info_debug, timer_diag='CPL:wavprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:WAVPREP',cplrun=.true.) - endif - - !---------------------------------------------------------- - ! cpl -> wav - !---------------------------------------------------------- - if (iamin_CPLALLWAVID .and. wav_prognostic) then - call component_exch(wav, flow='x2c', & - infodata=infodata, infodata_string='cpl2wav_run', & - mpicom_barrier=mpicom_CPLALLWAVID, run_barriers=run_barriers, & - timer_barrier='CPL:C2W_BARRIER', timer_comp_exch='CPL:C2W', & - timer_map_exch='CPL:c2w_wavx2wavw', timer_infodata_exch='CPL:c2w_infoexch') - endif - - end subroutine cime_run_wav_setup_send - -!---------------------------------------------------------------------------------- - - subroutine cime_run_wav_recv_post() - - !---------------------------------------------------------- - ! wav -> cpl - !---------------------------------------------------------- - if (iamin_CPLALLWAVID) then - call component_exch(wav, flow='c2x', infodata=infodata, infodata_string='wav2cpl_run', & - mpicom_barrier=mpicom_CPLALLWAVID, run_barriers=run_barriers, & - timer_barrier='CPL:W2C_BARRIER', timer_comp_exch='CPL:W2C', & - timer_map_exch='CPL:w2c_wavw2wavx', timer_infodata_exch='CPL:w2c_infoexch') - endif - - !---------------------------------------------------------- - ! wav post - !---------------------------------------------------------- - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:WAVPOST_BARRIER') - call t_drvstartf ('CPL:WAVPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, wav, flow='c2x', comment= 'recv wav', & - info_debug=info_debug, timer_diag='CPL:wavpost_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:WAVPOST',cplrun=.true.) - endif - - end subroutine cime_run_wav_recv_post - -!---------------------------------------------------------------------------------- - - subroutine cime_run_update_fractions() - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:FRACSET_BARRIER') - call t_drvstartf ('CPL:FRACSET',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - call t_drvstartf ('CPL:fracset_fracset',barrier=mpicom_CPLID) - - do efi = 1,num_inst_frc - eii = mod((efi-1),num_inst_ice) + 1 - call seq_frac_set(infodata, ice(eii), fractions_ax(efi), fractions_ix(efi), fractions_ox(efi)) - enddo - call t_drvstopf ('CPL:fracset_fracset') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:FRACSET',cplrun=.true.) - endif - - end subroutine cime_run_update_fractions - -!---------------------------------------------------------------------------------- - - subroutine cime_run_calc_budgets1() - - !---------------------------------------------------------- - ! Budget with old fractions - !---------------------------------------------------------- - - ! WJS (2-17-11): I am just using the first instance for the budgets because we - ! don't expect budgets to be conserved for our case (I case). Also note that we - ! don't expect budgets to be conserved for the interactive ensemble use case either. - ! tcraig (aug 2012): put this after rof->cpl so the budget sees the new r2x_rx. - ! it will also use the current r2x_ox here which is the value from the last timestep - ! consistent with the ocean coupling - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:BUDGET1_BARRIER') - call t_drvstartf ('CPL:BUDGET1',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - if (lnd_present) then - call seq_diag_lnd_mct(lnd(ens1), fractions_lx(ens1), infodata, do_l2x=.true., do_x2l=.true.) - endif - if (rof_present) then - call seq_diag_rof_mct(rof(ens1), fractions_rx(ens1), infodata) - endif - if (ice_present) then - call seq_diag_ice_mct(ice(ens1), fractions_ix(ens1), infodata, do_x2i=.true.) - endif - call t_drvstopf ('CPL:BUDGET1',cplrun=.true.,budget=.true.) - end if - end subroutine cime_run_calc_budgets1 - -!---------------------------------------------------------------------------------- - - subroutine cime_run_calc_budgets2() - - !---------------------------------------------------------- - ! Budget with new fractions - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:BUDGET2_BARRIER') - - call t_drvstartf ('CPL:BUDGET2',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - if (atm_present) then - call seq_diag_atm_mct(atm(ens1), fractions_ax(ens1), infodata, do_a2x=.true., do_x2a=.true.) - endif - if (ice_present) then - call seq_diag_ice_mct(ice(ens1), fractions_ix(ens1), infodata, do_i2x=.true.) - endif - call t_drvstopf ('CPL:BUDGET2',cplrun=.true.,budget=.true.) - - call t_drvstartf ('CPL:BUDGET3',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - call seq_diag_accum_mct() - call t_drvstopf ('CPL:BUDGET3',cplrun=.true.,budget=.true.) - - call t_drvstartf ('CPL:BUDGETF',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - if (.not. dead_comps) then - call seq_diag_print_mct(EClock_d,stop_alarm,budget_inst, & - budget_daily, budget_month, budget_ann, budget_ltann, & - budget_ltend, infodata) - endif - call seq_diag_zero_mct(EClock=EClock_d) - - call t_drvstopf ('CPL:BUDGETF',cplrun=.true.,budget=.true.) - end if - end subroutine cime_run_calc_budgets2 - -!---------------------------------------------------------------------------------- - - subroutine cime_run_calc_budgets3() - - !---------------------------------------------------------- - ! ocn budget (rasm_option2) - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:BUDGET0_BARRIER') - call t_drvstartf ('CPL:BUDGET0',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - call seq_diag_ocn_mct(ocn(ens1), xao_ox(1), fractions_ox(ens1), infodata, & - do_o2x=.true., do_x2o=.true., do_xao=.true.) - call t_drvstopf ('CPL:BUDGET0',cplrun=.true.,budget=.true.) - end if - end subroutine cime_run_calc_budgets3 - -!---------------------------------------------------------------------------------- - - subroutine cime_run_write_history() - - !---------------------------------------------------------- - ! Write history file, only AVs on CPLID - !---------------------------------------------------------- - - if (iamin_CPLID) then - - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:HISTORY_BARRIER') - call t_drvstartf ('CPL:HISTORY',cplrun=.true.,barrier=mpicom_CPLID) - if ( history_alarm) then - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - write(logunit,104) ' Write history file at ',ymd,tod - call shr_sys_flush(logunit) - endif - - call seq_hist_write(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, fractions_zx, trim(cpl_inst_tag)) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - - if (do_histavg) then - call seq_hist_writeavg(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, iac, histavg_alarm, & - trim(cpl_inst_tag)) - endif - - call t_drvstopf ('CPL:HISTORY',cplrun=.true.) - - end if - -104 format( A, i10.8, i8) - end subroutine cime_run_write_history - -!---------------------------------------------------------------------------------- - - subroutine cime_run_write_restart(drv_pause, write_restart, drv_resume) - - !---------------------------------------------------------- - ! Write driver restart file - !---------------------------------------------------------- - - logical , intent(in) :: drv_pause - logical , intent(in) :: write_restart - character(len=*), intent(inout) :: drv_resume ! Driver resets state from restart file - -103 format( 5A ) -104 format( A, i10.8, i8) - - if (iamin_CPLID) then - if ( (restart_alarm .or. drv_pause)) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:RESTART_BARRIER') - call t_drvstartf ('CPL:RESTART',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - write(logunit,104) ' Write restart file at ',ymd,tod - call shr_sys_flush(logunit) - endif - - call seq_rest_write(EClock_d, seq_SyncClock, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, fractions_zx, & - trim(cpl_inst_tag), drv_resume) - - if (iamroot_CPLID) then - write(logunit,103) ' Restart filename: ',trim(drv_resume) - call shr_sys_flush(logunit) - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:RESTART',cplrun=.true.) - else - drv_resume = '' - endif - end if - - end subroutine cime_run_write_restart - -!---------------------------------------------------------------------------------- - - subroutine cime_write_performance_checkpoint(output_ckpt, ckpt_filename, & - ckpt_mpicom) - - !---------------------------------------------------------- - ! Checkpoint performance data - !---------------------------------------------------------- - - logical, intent(in) :: output_ckpt - character(len=*), intent(in) :: ckpt_filename - integer, intent(in) :: ckpt_mpicom - -103 format( 5A ) -104 format( A, i10.8, i8) - - call t_adj_detailf(+1) - - call t_startf("sync1_tprf") - call mpi_barrier(ckpt_mpicom,ierr) - call t_stopf("sync1_tprf") - - if (output_ckpt) then - call t_prf(filename=trim(ckpt_filename), mpicom=ckpt_mpicom, & - num_outpe=0, output_thispe=output_ckpt) - else - call t_prf(filename=trim(ckpt_filename), mpicom=ckpt_mpicom, & - num_outpe=0) - endif - - call t_startf("sync2_tprf") - call mpi_barrier(ckpt_mpicom,ierr) - call t_stopf("sync2_tprf") - - call t_adj_detailf(-1) - - end subroutine cime_write_performance_checkpoint - -end module cime_comp_mod diff --git a/src/drivers/mct/main/cime_driver.F90 b/src/drivers/mct/main/cime_driver.F90 deleted file mode 100644 index c8cd51e48cd..00000000000 --- a/src/drivers/mct/main/cime_driver.F90 +++ /dev/null @@ -1,141 +0,0 @@ -program cime_driver - - !------------------------------------------------------------------------------- - ! - ! Purpose: Main program for a CIME-driven model. Can have different - ! land, sea-ice, and ocean models plugged in at compile-time. - ! These models can be either: stub, dead, data, or active - ! components or some combination of the above. - ! - ! stub -------- Do nothing. - ! dead -------- Send analytic data back. - ! data -------- Send data back interpolated from input files. - ! active ------ Prognostically simulate the given component. - ! - ! Method: Call appropriate initialization, run (time-stepping), and - ! finalization routines. - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! share code & libs - !---------------------------------------------------------------------------- - use shr_kind_mod, only : r8 => SHR_KIND_R8 - use shr_kind_mod, only : i8 => SHR_KIND_I8 - use shr_kind_mod, only : CS => SHR_KIND_CS - use shr_sys_mod, only : shr_sys_irtc, shr_sys_abort - use perf_mod, only : t_startf, t_adj_detailf, t_stopf, t_startstop_valsf - use ESMF, only : ESMF_Initialize, ESMF_Finalize - use ESMF, only : ESMF_LogKind_Flag, ESMF_LOGKIND_NONE - use ESMF, only : ESMF_LOGKIND_SINGLE, ESMF_LOGKIND_MULTI -#if (! defined(USE_ESMF_LIB) ) || (ESMF_VERSION_MAJOR > 7) - use ESMF, only : ESMF_LOGKIND_MULTI_ON_ERROR -#endif - use cime_comp_mod, only : cime_pre_init1 - use cime_comp_mod, only : cime_pre_init2 - use cime_comp_mod, only : cime_init - use cime_comp_mod, only : cime_run - use cime_comp_mod, only : cime_final - use seq_comm_mct, only : logunit - - implicit none - - !-------------------------------------------------------------------------- - ! timing variables - !-------------------------------------------------------------------------- - integer(i8) :: beg_count, end_count, irtc_rate - real(r8) :: cime_pre_init1_time, ESMF_Initialize_time, & - cime_pre_init2_time, cime_init_time_adjustment - - !-------------------------------------------------------------------------- - ! For ESMF logging - !-------------------------------------------------------------------------- - character(len=CS) :: esmf_logfile_option - type(ESMF_LogKind_Flag) :: esmf_logfile_kind - - !-------------------------------------------------------------------------- - ! Setup and initialize the communications and logging. - !-------------------------------------------------------------------------- - beg_count = shr_sys_irtc(irtc_rate) - - call cime_pre_init1(esmf_logfile_option) - - end_count = shr_sys_irtc(irtc_rate) - cime_pre_init1_time = real( (end_count-beg_count), r8)/real(irtc_rate, r8) - - !-------------------------------------------------------------------------- - ! Initialize ESMF. This is done outside of the ESMF_INTERFACE ifdef - ! because it is needed for the time manager, even if the ESMF_INTERFACE - ! is not used. - !-------------------------------------------------------------------------- - beg_count = shr_sys_irtc(irtc_rate) - - select case(esmf_logfile_option) - case('ESMF_LOGKIND_SINGLE') - esmf_logfile_kind = ESMF_LOGKIND_SINGLE - case('ESMF_LOGKIND_MULTI') - esmf_logfile_kind = ESMF_LOGKIND_MULTI - case('ESMF_LOGKIND_MULTI_ON_ERROR') -#if (! defined(USE_ESMF_LIB) ) || (ESMF_VERSION_MAJOR > 7) - esmf_logfile_kind = ESMF_LOGKIND_MULTI_ON_ERROR -#else - write(logunit,*) 'ESMF library version being used: ', ESMF_VERSION_MAJOR - call shr_sys_abort('CIME ERROR: invalid ESMF logfile kind for this ESMF library version: ' & - //trim(esmf_logfile_option) ) -#endif - case('ESMF_LOGKIND_NONE') - esmf_logfile_kind = ESMF_LOGKIND_NONE - case default - call shr_sys_abort('CIME ERROR: invalid ESMF logfile kind '//trim(esmf_logfile_option)) - end select - call ESMF_Initialize(logkindflag=esmf_logfile_kind) - - end_count = shr_sys_irtc(irtc_rate) - ESMF_Initialize_time = real( (end_count-beg_count), r8)/real(irtc_rate, r8) - - !-------------------------------------------------------------------------- - ! Read in the configuration information and initialize the time manager. - !-------------------------------------------------------------------------- - ! Timer initialization has to be after determination of the maximum number - ! of threads used across all components, so called inside of - ! cime_pre_init2, as are t_startf and t_stopf for CPL:INIT and - ! cime_pre_init2. - !-------------------------------------------------------------------------- - beg_count = shr_sys_irtc(irtc_rate) - - call cime_pre_init2() - - end_count = shr_sys_irtc(irtc_rate) - cime_pre_init2_time = real( (end_count-beg_count), r8)/real(irtc_rate, r8) - - !-------------------------------------------------------------------------- - ! Call the initialize, run and finalize routines. - !-------------------------------------------------------------------------- - - call t_startf('CPL:INIT') - call t_adj_detailf(+1) - - call t_startstop_valsf('CPL:cime_pre_init1', walltime=cime_pre_init1_time) - call t_startstop_valsf('CPL:ESMF_Initialize', walltime=ESMF_Initialize_time) - call t_startstop_valsf('CPL:cime_pre_init2', walltime=cime_pre_init2_time) - - call cime_init() - - call t_adj_detailf(-1) - call t_stopf('CPL:INIT') - - cime_init_time_adjustment = cime_pre_init1_time & - + ESMF_Initialize_time & - + cime_pre_init2_time - call t_startstop_valsf('CPL:INIT', walltime=cime_init_time_adjustment, & - callcount=0) - - call cime_run() - call cime_final() - - !-------------------------------------------------------------------------- - ! Clean-up - !-------------------------------------------------------------------------- - call ESMF_Finalize( ) - -end program cime_driver diff --git a/src/drivers/mct/main/component_mod.F90 b/src/drivers/mct/main/component_mod.F90 deleted file mode 100644 index 3c9e6da2c33..00000000000 --- a/src/drivers/mct/main/component_mod.F90 +++ /dev/null @@ -1,965 +0,0 @@ -module component_mod - - !---------------------------------------------------------------------------- - ! share code & libs - !---------------------------------------------------------------------------- - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use shr_const_mod, only: shr_const_cday - use shr_file_mod, only: shr_file_setLogLevel, shr_file_setLogUnit - use shr_file_mod, only: shr_file_setIO, shr_file_getUnit - use shr_scam_mod, only: shr_scam_checkSurface - use shr_mpi_mod, only: shr_mpi_min, shr_mpi_max - use shr_mem_mod, only: shr_mem_init, shr_mem_getusage - use shr_cal_mod, only: shr_cal_date2ymd - use shr_orb_mod, only: shr_orb_params - use shr_reprosum_mod, only: shr_reprosum_setopts - use seq_comm_mct, only: GLOID, CPLID, logunit - use seq_comm_mct, only: seq_comm_iamin, seq_comm_namelen, num_inst_frc - use seq_comm_mct, only: seq_comm_suffix, seq_comm_name, seq_comm_setnthreads - use seq_comm_mct, only: seq_comm_getinfo => seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData - use seq_infodata_mod, only: seq_infodata_exchange, seq_infodata_type - use seq_diag_mct, only: seq_diag_avect_mct - use seq_map_type_mod - use seq_map_mod - use t_drv_timers_mod - use component_type_mod - use seq_cdata_mod, only : seq_cdata, seq_cdata_init - use mct_mod ! mct_ wrappers for mct lib - use perf_mod - use ESMF - use seq_flds_mod, only: nan_check_component_fields - implicit none - -#include - - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: component_init_pre - public :: component_init_cc ! mct and esmf versions - public :: component_init_cx - public :: component_init_aream - public :: component_init_areacor - public :: component_run ! mct and esmf versions - public :: component_final ! mct and esmf versions - public :: component_exch - public :: component_diag - - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - logical :: iamroot_GLOID, iamroot_CPLID ! GLOID, CPLID masterproc - logical :: iamin_CPLID ! true => pe associated with CPLID - integer :: mpicom_GLOID, mpicom_CPLID ! GLOID, CPLID mpi communicator - integer :: nthreads_GLOID, nthreads_CPLID - logical :: drv_threading - - !=============================================================================== - -contains - - !=============================================================================== - - subroutine component_init_pre(comp, compid, cplcompid, cplallcompid, & - infodata, ntype) - use seq_timemgr_mod, only: seq_timemgr_data_assimilation_active - - !--------------------------------------------------------------- - ! Initialize driver rearrangers and AVs on driver - ! Initialize cdata_*x data - ! Zero out x2*_** in case it never gets used then it'll produce zeros in diags - ! For ensembles, create only a single dom_*x for the coupler based on the - ! first ensemble member. otherwise, just extend the dom_** and dom_*x to - ! other ensemble members. - ! - ! Arguments - type(component_type) , intent(inout) :: comp(:) - integer , intent(in) :: compid(:) - integer , intent(in) :: cplcompid(:) - integer , intent(in) :: cplallcompid - type (seq_infodata_type) , intent(inout), target :: infodata - character(len=3) , intent(in) :: ntype - ! - ! Local Variables - logical :: flag - integer :: ierr - integer :: eci ! index - character(*), parameter :: subname = '(component_init_pre)' - !--------------------------------------------------------------- - - ! initialize module variables (this is repetitive here- but does not require a different routine) - - call seq_infodata_getdata(infodata, drv_threading=drv_threading) - call seq_comm_getinfo(GLOID, mpicom=mpicom_GLOID, iamroot=iamroot_GLOID, nthreads=nthreads_GLOID) - call seq_comm_getinfo(CPLID, mpicom=mpicom_CPLID, iamroot=iamroot_CPLID, nthreads=nthreads_CPLID) - iamin_CPLID = seq_comm_iamin(CPLID) - - ! Initialize component type variables - do eci = 1,size(comp) - - comp(eci)%compid = compid(eci) - comp(eci)%cplcompid = cplcompid(eci) - comp(eci)%cplallcompid = cplallcompid - - call seq_comm_getinfo(comp(eci)%cplallcompid, mpicom=comp(eci)%mpicom_cplallcompid) - call seq_comm_getinfo(comp(eci)%cplcompid , mpicom=comp(eci)%mpicom_cplcompid) - call seq_comm_getinfo(comp(eci)%compid , mpicom=comp(eci)%mpicom_compid) - call seq_comm_getinfo(comp(eci)%compid , iamroot=comp(eci)%iamroot_compid) - call seq_comm_getinfo(comp(eci)%compid , nthreads=comp(eci)%nthreads_compid) - - comp(eci)%iamin_compid = seq_comm_iamin (comp(eci)%compid) - comp(eci)%iamin_cplcompid = seq_comm_iamin (comp(eci)%cplcompid) - comp(eci)%iamin_cplallcompid = seq_comm_iamin (comp(eci)%cplallcompid) - comp(eci)%suffix = seq_comm_suffix(comp(eci)%compid) - comp(eci)%name = seq_comm_name (comp(eci)%compid) - comp(eci)%ntype = ntype(1:3) - - select case(ntype) - case ('atm','cpl','ocn','wav','glc','ice','rof','lnd','esp') - comp(eci)%oneletterid = ntype(1:1) - case ('iac') - comp(eci)%oneletterid = 'z' - case default - call shr_sys_abort(subname//': ntype, "'//ntype//'" not recognized"') - end select - - if (eci == 1) then - allocate(comp(1)%dom_cx) - allocate(comp(1)%gsmap_cx) - else - comp(eci)%dom_cx => comp(1)%dom_cx - comp(eci)%gsmap_cx => comp(1)%gsmap_cx - end if - - ! Set cdata_cc - unique for each instance - allocate(comp(eci)%dom_cc) - allocate(comp(eci)%gsmap_cc) - allocate(comp(eci)%cdata_cc) - call seq_cdata_init(comp(eci)%cdata_cc, comp(eci)%compid, & - 'cdata_'//ntype(1:1)//ntype(1:1), comp(eci)%dom_cc, & - comp(eci)%gsmap_cc, infodata, seq_timemgr_data_assimilation_active(ntype(1:3))) - - ! Determine initial value of comp_present in infodata - to do - add this to component -#ifdef CPRPGI - if (comp(1)%oneletterid == 'a') then - call seq_infodata_getData(infodata, atm_present=comp(eci)%present) - end if - if (comp(1)%oneletterid == 'l') then - call seq_infodata_getData(infodata, lnd_present=comp(eci)%present) - end if - if (comp(1)%oneletterid == 'i') then - call seq_infodata_getData(infodata, ice_present=comp(eci)%present) - end if - if (comp(1)%oneletterid == 'o') then - call seq_infodata_getData(infodata, ocn_present=comp(eci)%present) - end if - if (comp(1)%oneletterid == 'r') then - call seq_infodata_getData(infodata, rof_present=comp(eci)%present) - end if - if (comp(1)%oneletterid == 'g') then - call seq_infodata_getData(infodata, glc_present=comp(eci)%present) - end if - if (comp(1)%oneletterid == 'w') then - call seq_infodata_getData(infodata, wav_present=comp(eci)%present) - end if - if (comp(1)%oneletterid == 'e') then - call seq_infodata_getData(infodata, esp_present=comp(eci)%present) - end if - if (comp(1)%oneletterid == 'z') then - call seq_infodata_getData(infodata, iac_present=comp(eci)%present) - end if -#else - call seq_infodata_getData(comp(1)%oneletterid, infodata, comp_present=comp(eci)%present) -#endif - end do - - end subroutine component_init_pre - - !=============================================================================== - - subroutine component_init_cc(Eclock, comp, comp_init, infodata, NLFilename, & - seq_flds_x2c_fluxes, seq_flds_c2x_fluxes) - - !--------------------------------------------------------------- - ! - ! Arguments - type(ESMF_Clock) , intent(inout) :: EClock - type(component_type) , intent(inout) :: comp(:) - interface - subroutine comp_init( Eclock, cdata, x2c, c2x, nlfilename) - use ESMF , only: ESMF_Clock - use seq_cdata_mod, only: seq_cdata - use mct_mod , only: mct_avect - implicit none - type(ESMF_Clock), intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2c - type(mct_aVect) , intent(inout) :: c2x - character(len=*), optional, intent(IN) :: NLFilename ! Namelist filename - end subroutine comp_init - end interface - type (seq_infodata_type) , intent(inout) :: infodata - character(len=*) , intent(in) :: NLFilename - character(len=*) , intent(in), optional :: seq_flds_x2c_fluxes - character(len=*) , intent(in), optional :: seq_flds_c2x_fluxes - ! - ! Local Variables - integer :: k1, k2 - integer :: eci - character(*), parameter :: subname = '(component_init_cc:mct)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - ! **** Initialize component - this initializes x2c_cc and c2x_cc *** - ! the following will call the appropriate comp_init_mct routine - - call t_set_prefixf(comp(1)%oneletterid//"_i:") - - if (comp(1)%iamin_cplallcompid) then - call seq_infodata_exchange(infodata, comp(1)%cplallcompid, & - 'cpl2'//comp(1)%ntype(1:3)//'_init') - end if - - ! The following initializes the component instance cdata_cc (gsmap and dom), - ! x2c_cc and c2x_cc - - do eci = 1,size(comp) - if (iamroot_CPLID .and. comp(eci)%present) then - write(logunit,F00) 'Initialize component '//trim(comp(eci)%ntype) - call shr_sys_flush(logunit) - endif - - if (.not. associated(comp(eci)%x2c_cc)) allocate(comp(eci)%x2c_cc) - if (.not. associated(comp(eci)%c2x_cc)) then - allocate(comp(eci)%c2x_cc) - ! this is needed for check_fields - nullify(comp(eci)%c2x_cc%rattr) - endif - if (comp(eci)%iamin_compid .and. comp(eci)%present) then - if (drv_threading) call seq_comm_setnthreads(comp(eci)%nthreads_compid) - call shr_sys_flush(logunit) - - if (present(seq_flds_x2c_fluxes)) then - call mct_avect_vecmult(comp(eci)%x2c_cc, comp(eci)%drv2mdl, seq_flds_x2c_fluxes, mask_spval=.true.) - end if - - call t_startf('comp_init') - call comp_init( EClock, comp(eci)%cdata_cc, comp(eci)%x2c_cc, comp(eci)%c2x_cc, & - NLFilename=NLFilename ) - call t_stopf('comp_init') - if(nan_check_component_fields) then - call t_drvstartf ('check_fields') - call check_fields(comp(eci), eci) - call t_drvstopf ('check_fields') - end If - - if (present(seq_flds_c2x_fluxes)) then - call mct_avect_vecmult(comp(eci)%c2x_cc, comp(eci)%mdl2drv, seq_flds_c2x_fluxes, mask_spval=.true.) - end if - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - end if - end do - - if (comp(1)%iamin_cplcompid) then - call seq_infodata_exchange(infodata, comp(1)%cplcompid, & - comp(1)%ntype(1:3)//'2cpl_init') - endif - - ! Determine final value of comp_present in infodata (after component initialization) - - do eci = 1,size(comp) -#ifdef CPRPGI - if (comp(1)%oneletterid == 'a') call seq_infodata_getData(infodata, atm_present=comp(eci)%present) - if (comp(1)%oneletterid == 'l') call seq_infodata_getData(infodata, lnd_present=comp(eci)%present) - if (comp(1)%oneletterid == 'i') call seq_infodata_getData(infodata, ice_present=comp(eci)%present) - if (comp(1)%oneletterid == 'o') call seq_infodata_getData(infodata, ocn_present=comp(eci)%present) - if (comp(1)%oneletterid == 'r') call seq_infodata_getData(infodata, rof_present=comp(eci)%present) - if (comp(1)%oneletterid == 'g') call seq_infodata_getData(infodata, glc_present=comp(eci)%present) - if (comp(1)%oneletterid == 'w') call seq_infodata_getData(infodata, wav_present=comp(eci)%present) - if (comp(1)%oneletterid == 'e') call seq_infodata_getData(infodata, esp_present=comp(eci)%present) - if (comp(1)%oneletterid == 'z') call seq_infodata_getData(infodata, iac_present=comp(eci)%present) -#else - call seq_infodata_getData(comp(1)%oneletterid, infodata, comp_present=comp(eci)%present) -#endif - end do - - - ! Initialize aream, set it to area for now until maps are read - ! in some cases, maps are not read at all !! - ! Entire domain must have reasonable values before calling xxx2xxx init - - do eci = 1,size(comp) - if (comp(eci)%iamin_compid .and. comp(eci)%present .and. & - (comp(1)%oneletterid /= 'e')) then - if (drv_threading) call seq_comm_setnthreads(comp(eci)%nthreads_compid) - k1 = mct_aVect_indexRa(comp(eci)%cdata_cc%dom%data, "area" ,perrWith='aa area ') - k2 = mct_aVect_indexRa(comp(eci)%cdata_cc%dom%data, "aream" ,perrWith='aa aream') - - comp(eci)%cdata_cc%dom%data%rAttr(k2,:) = comp(eci)%cdata_cc%dom%data%rAttr(k1,:) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - end do - - call t_unset_prefixf() - - end subroutine component_init_cc - - !=============================================================================== - - subroutine component_init_cx(comp, infodata) - - !--------------------------------------------------------------- - ! Uses - use cplcomp_exchange_mod, only: seq_mctext_gsmapinit, seq_mctext_avInit - use cplcomp_exchange_mod, only: seq_mctext_avExtend, seq_mctext_gGridInit - use cplcomp_exchange_mod, only: seq_map_init_exchange, seq_map_map_exchange - use seq_domain_mct, only: seq_domain_compare - use mct_mod, only: mct_ggrid_clean - ! - ! Arguments - type(component_type) , intent(inout) :: comp(:) - type (seq_infodata_type) , intent(inout) :: infodata - ! - ! Local Variables - integer :: eci - integer :: rc ! return code - integer :: mpi_tag - type(mct_gGrid) :: dom_tmp ! temporary - character(*), parameter :: subname = '(component_init_cx)' - character(*), parameter :: F0I = "('"//subname//" : ', A, 2i8 )" - !--------------------------------------------------------------- - - ! Initialize driver rearrangers and AVs on driver - ! Initialize cdata_*x data - ! Zero out x2*_** in case it never gets used then it'll produce zeros in diags - ! For ensembles, create only a single dom_*x for the coupler based on the - ! first ensemble member. otherwise, just extend the dom_** and dom_*x to - ! other ensemble members. - - do eci = 1,size(comp) - if (comp(eci)%present) then - - if (iamroot_CPLID) then - write(logunit,*) ' ' - call shr_sys_flush(logunit) - end if - - if (comp(eci)%iamin_cplcompid) then - - ! Create gsmap_cx (note that comp(eci)%gsmap_cx all point to comp(1)%gsmap_cx - ! This will only be valid on the coupler pes - if (eci == 1) then - if (iamroot_CPLID) then - write(logunit,F0I) 'creating gsmap_cx for '//comp(eci)%ntype(1:3) - call shr_sys_flush(logunit) - end if - call seq_mctext_gsmapInit(comp(1)) - endif - - ! Create mapper_Cc2x and mapper_Cx2c - allocate(comp(eci)%mapper_Cc2x, comp(eci)%mapper_Cx2c) - if (iamroot_CPLID) then - write(logunit,F0I) 'Initializing mapper_C'//comp(eci)%ntype(1:1)//'2x',eci - call shr_sys_flush(logunit) - end if - call seq_map_init_exchange(comp(eci), flow='c2x', mapper=comp(eci)%mapper_Cc2x) - if (iamroot_CPLID) then - write(logunit,F0I) 'Initializing mapper_Cx2'//comp(eci)%ntype(1:1),eci - call shr_sys_flush(logunit) - end if - call seq_map_init_exchange(comp(eci), flow='x2c', mapper=comp(eci)%mapper_Cx2c) - - ! Create x2c_cx and c2x_cx - allocate(comp(eci)%x2c_cx, comp(eci)%c2x_cx) - call seq_mctext_avinit(comp(eci), flow='x2c') - call seq_mctext_avinit(comp(eci), flow='c2x') - - ! Create dom_cx (note that comp(eci)%dom_cx all point to comp(1)%dom_cx - ! Then verify other ensembles have same domain by comparing to dom_cx - if (eci == 1) then ! create dom_cx - if (iamroot_CPLID) then - write(logunit,F0I) 'creating dom_cx' - call shr_sys_flush(logunit) - end if - call seq_mctext_gGridInit(comp(1)) - - if (size(comp) > 1) then - mpi_tag = comp(eci)%cplcompid*100+eci*10+1 - else - mpi_tag = comp(eci)%cplcompid*10000+eci*10+1 - end if - call seq_map_map_exchange(comp(1), flow='c2x', dom_flag=.true., msgtag=mpi_tag) - - else if (eci > 1) then - if (iamroot_CPLID) then - write(logunit,F0I) 'comparing comp domain ensemble number ',eci - call shr_sys_flush(logunit) - end if - call seq_mctext_avExtend(comp(eci)%dom_cx%data, cplid, comp(eci)%cplcompid) - call seq_mctext_gGridInit(comp(eci), dom_tmp) - call seq_map_map_exchange(comp(eci), flow='c2x', dom_flag=.true., dom_tmp=dom_tmp) - if (iamin_CPLID) then - call seq_domain_compare(comp(eci)%dom_cx, dom_tmp, mpicom_CPLID) - end if - call mct_ggrid_clean(dom_tmp,rc) - endif - - call mct_avect_zero(comp(eci)%x2c_cc) - call mct_avect_zero(comp(eci)%x2c_cx) - - end if ! if comp(eci)%iamin_cplcompid - end if ! if comp(eci)%present - end do ! end of eci loop - - end subroutine component_init_cx - - !=============================================================================== - - subroutine component_init_aream(infodata, rof_c2_ocn, samegrid_ao, samegrid_al, & - samegrid_ro, samegrid_lg) - - !--------------------------------------------------------------- - ! Description - ! Update (read) aream in domains where appropriate - ON cpl pes - ! - ! Uses - use prep_ocn_mod, only : prep_ocn_get_mapper_Fa2o - use prep_lnd_mod, only : prep_lnd_get_mapper_Sa2l - use prep_ice_mod, only : prep_ice_get_mapper_SFo2i - use prep_glc_mod, only : prep_glc_get_mapper_Sl2g - use component_type_mod, only : atm, lnd, ice, ocn, rof, glc - ! - ! Arguments - type (seq_infodata_type) , intent(inout) :: infodata - logical , intent(in) :: rof_c2_ocn - logical , intent(in) :: samegrid_ao - logical , intent(in) :: samegrid_al - logical , intent(in) :: samegrid_ro - logical , intent(in) :: samegrid_lg ! lnd & glc on same grid - ! - ! Local variables - type(mct_gsmap), pointer :: gsmap_s, gsmap_d - type(mct_ggrid), pointer :: dom_s, dom_d - type(seq_map) , pointer :: mapper_Fa2o - type(seq_map) , pointer :: mapper_Sa2l - type(seq_map) , pointer :: mapper_SFo2i - type(seq_map) , pointer :: mapper_Sl2g - logical :: atm_present ! atm present flag - logical :: lnd_present ! lnd present flag - logical :: ocn_present ! ocn present flag - logical :: ice_present ! ice present flag - logical :: glc_present ! glc present flag - integer :: ka,km - character(*), parameter :: subname = '(component_init_aream)' - !--------------------------------------------------------------- - - ! Note that the following is assumed to hold - all gsmaps_cx for a given - ! instance of a component (e.g. atm(i)) are identical on the coupler processes - - mapper_Fa2o => prep_ocn_get_mapper_Fa2o() - mapper_Sa2l => prep_lnd_get_mapper_Sa2l() - mapper_SFo2i => prep_ice_get_mapper_SFo2i() - mapper_Sl2g => prep_glc_get_mapper_Sl2g() - - call seq_infodata_GetData( infodata, & - atm_present=atm_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - lnd_present=lnd_present, & - glc_present=glc_present) - - if (atm_present .and. ocn_present) then - if (samegrid_ao) then - dom_s => component_get_dom_cx(atm(1)) !dom_ax - dom_d => component_get_dom_cx(ocn(1)) !dom_ox - ka = mct_aVect_indexRa(dom_s%data, "area" ) - km = mct_aVect_indexRa(dom_s%data, "aream" ) - dom_s%data%rAttr(km,:) = dom_s%data%rAttr(ka,:) - - call seq_map_map(mapper_Fa2o, av_s=dom_s%data, av_d=dom_d%data, fldlist='aream') - else - gsmap_s => component_get_gsmap_cx(ocn(1)) ! gsmap_ox - gsmap_d => component_get_gsmap_cx(atm(1)) ! gsmap_ax - dom_s => component_get_dom_cx(ocn(1)) ! dom_ox - dom_d => component_get_dom_cx(atm(1)) ! dom_ax - - call seq_map_readdata('seq_maps.rc','ocn2atm_fmapname:', mpicom_CPLID, CPLID, & - gsmap_s=gsmap_s, av_s=dom_s%data, avfld_s='aream', filefld_s='area_a', & - gsmap_d=gsmap_d, av_d=dom_d%data, avfld_d='aream', filefld_d='area_b', & - string='ocn2atm aream initialization') - endif - end if - - if (ice_present .and. ocn_present) then - dom_s => component_get_dom_cx(ocn(1)) !dom_ox - dom_d => component_get_dom_cx(ice(1)) !dom_ix - - call seq_map_map(mapper_SFo2i, av_s=dom_s%data, av_d=dom_d%data, fldlist='aream') - endif - - if (rof_c2_ocn) then - if (.not.samegrid_ro) then - gsmap_s => component_get_gsmap_cx(rof(1)) ! gsmap_rx - dom_s => component_get_dom_cx(rof(1)) ! dom_rx - - call seq_map_readdata('seq_maps.rc', 'rof2ocn_liq_rmapname:',mpicom_CPLID, CPLID, & - gsmap_s=gsmap_s, av_s=dom_s%data, avfld_s='aream', filefld_s='area_a', & - string='rof2ocn liq aream initialization') - - call seq_map_readdata('seq_maps.rc', 'rof2ocn_ice_rmapname:',mpicom_CPLID, CPLID, & - gsmap_s=gsmap_s, av_s=dom_s%data, avfld_s='aream', filefld_s='area_a', & - string='rof2ocn ice aream initialization') - endif - end if - - if (lnd_present .and. atm_present) then - if (samegrid_al) then - dom_s => component_get_dom_cx(atm(1)) !dom_ax - dom_d => component_get_dom_cx(lnd(1)) !dom_lx - - call seq_map_map(mapper_Sa2l, av_s=dom_s%data, av_d=dom_d%data, fldlist='aream') - else - gsmap_d => component_get_gsmap_cx(lnd(1)) ! gsmap_lx - dom_d => component_get_dom_cx(lnd(1)) ! dom_lx - - call seq_map_readdata('seq_maps.rc','atm2lnd_fmapname:',mpicom_CPLID, CPLID, & - gsmap_d=gsmap_d, av_d=dom_d%data, avfld_d='aream', filefld_d='area_b', & - string='atm2lnd aream initialization') - endif - end if - - if (lnd_present .and. glc_present) then - if (samegrid_lg) then - dom_s => component_get_dom_cx(lnd(1)) !dom_lx - dom_d => component_get_dom_cx(glc(1)) !dom_gx - - call seq_map_map(mapper_Sl2g, av_s=dom_s%data, av_d=dom_d%data, fldlist='aream') - else - gsmap_d => component_get_gsmap_cx(glc(1)) ! gsmap_gx - dom_d => component_get_dom_cx(glc(1)) ! dom_gx - - call seq_map_readdata('seq_maps.rc','lnd2glc_fmapname:',mpicom_CPLID, CPLID, & - gsmap_d=gsmap_d, av_d=dom_d%data, avfld_d='aream', filefld_d='area_b', & - string='lnd2glc aream initialization') - endif - endif - - end subroutine component_init_aream - - !=============================================================================== - - subroutine component_init_areacor(comp, samegrid, seq_flds_c2x_fluxes) - !--------------------------------------------------------------- - ! COMPONENT PES and CPL/COMPONENT (for exchange only) - ! - ! Uses - use seq_domain_mct, only : seq_domain_areafactinit - ! - ! Arguments - type(component_type) , intent(inout) :: comp(:) - logical , intent(in) :: samegrid - character(len=*) , intent(in) :: seq_flds_c2x_fluxes - ! - ! Local Variables - integer :: eci, num_inst - integer :: mpi_tag - character(*), parameter :: subname = '(component_init_areacor)' - !--------------------------------------------------------------- - - num_inst = size(comp) - do eci = 1,num_inst - - ! For joint cpl-component pes - if (comp(eci)%iamin_cplcompid) then - - ! Map component domain from coupler to component processes - if ( num_inst > 1) then - mpi_tag = comp(eci)%cplcompid*100+eci*10+5 - else - mpi_tag = comp(eci)%cplcompid*10000+eci*10+5 - end if - call seq_map_map(comp(eci)%mapper_Cx2c, comp(eci)%dom_cx%data, comp(eci)%dom_cc%data, msgtag=mpi_tag) - - ! For only component pes - if (comp(eci)%iamin_compid) then - - ! Allocate and initialize area correction factors on component processes - ! Note that the following call allocates comp(eci)%mld2drv(:) and comp(eci)%drv2mdl(:) - call seq_domain_areafactinit(comp(eci)%dom_cc, & - comp(eci)%mdl2drv, comp(eci)%drv2mdl, samegrid, & - comp(eci)%mpicom_compid, comp(eci)%iamroot_compid, & - 'areafact_'//comp(eci)%oneletterid//'_'//trim(comp(eci)%name)) - - ! Area correct component initialization output fields - call mct_avect_vecmult(comp(eci)%c2x_cc, comp(eci)%mdl2drv, seq_flds_c2x_fluxes, mask_spval=.true.) - - endif - - ! Map corrected initial component AVs from component to coupler pes - if (num_inst > 1) then - mpi_tag = comp(eci)%cplcompid*100+eci*10+7 - else - mpi_tag = comp(eci)%cplcompid*10000+eci*10+7 - end if - call seq_map_map(comp(eci)%mapper_cc2x, comp(eci)%c2x_cc, comp(eci)%c2x_cx, msgtag=mpi_tag) - - endif - enddo - - end subroutine component_init_areacor - - !=============================================================================== - - subroutine component_run(Eclock, comp, comp_run, infodata, & - seq_flds_x2c_fluxes, seq_flds_c2x_fluxes, & - comp_prognostic, comp_num, timer_barrier, timer_comp_run, & - run_barriers, ymd, tod, comp_layout) - - !--------------------------------------------------------------- - ! Description - ! Run component model - ! Note that the optional arguments, seq_flds_x2c_fluxes and - ! seq_flds_c2x_fluxes, are not passed for external models (ESP) - ! since these type of models do not interact through the coupler. - ! The absence of these inputs should be used to avoid coupler- - ! based actions in component_run - ! - ! Arguments - type(ESMF_Clock) , intent(inout) :: EClock - type(component_type) , intent(inout) :: comp(:) - interface - subroutine comp_run( Eclock, cdata, x2c, c2x) - use ESMF, only : ESMF_Clock - use seq_cdata_mod, only : seq_cdata - use mct_mod, only : mct_avect - implicit none - type(ESMF_Clock), intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2c - type(mct_aVect) , intent(inout) :: c2x - end subroutine comp_run - end interface - type (seq_infodata_type) , intent(inout) :: infodata - character(len=*) , intent(in), optional :: seq_flds_x2c_fluxes - character(len=*) , intent(in), optional :: seq_flds_c2x_fluxes - logical , intent(in) :: comp_prognostic - integer , intent(in), optional :: comp_num - character(len=*) , intent(in), optional :: timer_barrier - character(len=*) , intent(in), optional :: timer_comp_run - logical , intent(in), optional :: run_barriers - integer , intent(in), optional :: ymd ! Current date (YYYYMMDD) - integer , intent(in), optional :: tod ! Current time of day (seconds) - character(len=*) , intent(in), optional :: comp_layout - ! - ! Local Variables - integer :: eci - integer :: ierr - integer :: num_inst - real(r8) :: time_brun ! Start time - real(r8) :: time_erun ! Ending time - real(r8) :: cktime ! delta time - real(r8) :: cktime_acc(10) ! cktime accumulator array 1 = all, 2 = atm, etc - integer :: cktime_cnt(10) ! cktime counter array - logical :: seq_multi_inst ! a special case of running multiinstances on the same pes. - integer :: phase, phasemin, phasemax ! phase support - logical :: firstloop ! first time around phase loop - character(*), parameter :: subname = '(component_run:mct)' - !--------------------------------------------------------------- - - num_inst = size(comp) - seq_multi_inst = .false. - phasemin = 1 - phasemax = 1 - - if(present(comp_layout)) then - if(comp_layout .eq. "sequential" .and. num_inst > 1) then - seq_multi_inst=.true. - phasemin = 0 - endif - endif - - do phase = phasemin,phasemax - if (phase == phasemin) then - firstloop = .true. - else - firstloop = .false. - endif -#ifdef CPRPGI - if (comp(1)%oneletterid == 'a') call seq_infodata_putData(infodata, atm_phase=phase) - if (comp(1)%oneletterid == 'l') call seq_infodata_putData(infodata, lnd_phase=phase) - if (comp(1)%oneletterid == 'i') call seq_infodata_putData(infodata, ice_phase=phase) - if (comp(1)%oneletterid == 'o') call seq_infodata_putData(infodata, ocn_phase=phase) - if (comp(1)%oneletterid == 'r') call seq_infodata_putData(infodata, rof_phase=phase) - if (comp(1)%oneletterid == 'g') call seq_infodata_putData(infodata, glc_phase=phase) - if (comp(1)%oneletterid == 'w') call seq_infodata_putData(infodata, wav_phase=phase) - if (comp(1)%oneletterid == 'e') call seq_infodata_putData(infodata, esp_phase=phase) - if (comp(1)%oneletterid == 'z') call seq_infodata_putData(infodata, iac_phase=phase) -#else - call seq_infodata_putData(comp(1)%oneletterid, infodata, comp_phase=phase) -#endif - - do eci = 1,num_inst - if (comp(eci)%iamin_compid) then - - if (present(timer_barrier)) then - if (present(run_barriers)) then - if (run_barriers) then - call t_drvstartf (trim(timer_barrier)) - call mpi_barrier(comp(eci)%mpicom_compid, ierr) - call t_drvstopf (trim(timer_barrier)) - time_brun = mpi_wtime() - endif - end if - end if - - if (present(timer_comp_run)) then - call t_drvstartf (trim(timer_comp_run), barrier=comp(eci)%mpicom_compid) - end if - if (drv_threading) call seq_comm_setnthreads(comp(1)%nthreads_compid) - - if (comp_prognostic .and. firstloop .and. present(seq_flds_x2c_fluxes)) then - call mct_avect_vecmult(comp(eci)%x2c_cc, comp(eci)%drv2mdl, seq_flds_x2c_fluxes, mask_spval=.true.) - end if - - call t_set_prefixf(comp(1)%oneletterid//":") - call comp_run(EClock, comp(eci)%cdata_cc, comp(eci)%x2c_cc, comp(eci)%c2x_cc) - if(nan_check_component_fields) then - call t_drvstartf ('check_fields') - call check_fields(comp(eci), eci) - call t_drvstopf ('check_fields') - endif - call t_unset_prefixf() - - if ((phase == 1) .and. present(seq_flds_c2x_fluxes)) then - call mct_avect_vecmult(comp(eci)%c2x_cc, comp(eci)%mdl2drv, seq_flds_c2x_fluxes, mask_spval=.true.) - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - if (present(timer_comp_run)) then - call t_drvstopf (trim(timer_comp_run)) - end if - - if (present(comp_num)) then - if (present(run_barriers)) then - if (run_barriers) then - time_erun = mpi_wtime() - cktime = time_erun - time_brun - cktime_acc(comp_num) = cktime_acc(comp_num) + cktime - cktime_cnt(comp_num) = cktime_cnt(comp_num) + 1 - if (present(ymd) .and. present(tod)) then - write(logunit,107) ' rstamp ',trim(comp(eci)%name), & - '_run_time: model date = ',ymd,tod, & - ' avg dt = ',cktime_acc(comp_num)/cktime_cnt(comp_num), & - ' dt = ',cktime, ' phase = ',phase - end if - endif - end if - end if - - endif - enddo ! eci - - enddo ! phase - -107 format( 3A, 2i8, A, f12.4, A, f12.4 ) - - end subroutine component_run - - !=============================================================================== - - subroutine component_final(Eclock, comp, comp_final) - - !--------------------------------------------------------------- - ! Description - ! Run component model - ! - ! Arguments - type(ESMF_Clock) , intent(inout) :: EClock - type(component_type) , intent(inout) :: comp(:) - interface - subroutine comp_final( Eclock, cdata, x2c, c2x) - use ESMF, only : ESMF_Clock - use seq_cdata_mod, only : seq_cdata - use mct_mod, only : mct_avect - implicit none - type(ESMF_Clock), intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2c - type(mct_aVect) , intent(inout) :: c2x - end subroutine comp_final - end interface - ! - ! Local Variables - integer :: eci - integer :: num_inst - character(*), parameter :: subname = '(component_final:mct)' - !--------------------------------------------------------------- - - num_inst = size(comp) - do eci = 1,num_inst - if (comp(eci)%iamin_compid) then - if (drv_threading) call seq_comm_setnthreads(comp(1)%nthreads_compid) - call t_set_prefixf(comp(1)%oneletterid//"_f:") - call comp_final(EClock, comp(eci)%cdata_cc, comp(eci)%x2c_cc, comp(eci)%c2x_cc) - call t_unset_prefixf() - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - end if - end do - - end subroutine component_final - - !=============================================================================== - - subroutine component_exch(comp, flow, infodata, infodata_string, & - mpicom_barrier, run_barriers, & - timer_barrier, timer_comp_exch, timer_map_exch, timer_infodata_exch) - - !--------------------------------------------------------------- - ! Description - ! Map x2m_mx to x2m_mm (component input av from - ! coupler processes to component model processes) - ! - ! Arguments - implicit none - type(component_type) , intent(inout) :: comp(:) - character(len=3) , intent(in) :: flow - type(seq_infodata_type) , intent(inout) :: infodata - character(len=*) , intent(in) :: infodata_string - integer , intent(in), optional :: mpicom_barrier ! mpicom for barrier call - logical , intent(in), optional :: run_barriers - character(len=*) , intent(in), optional :: timer_barrier ! timer - character(len=*) , intent(in), optional :: timer_comp_exch - character(len=*) , intent(in), optional :: timer_map_exch - character(len=*) , intent(in), optional :: timer_infodata_exch - ! - ! Local Variables - integer :: eci - integer :: ierr - integer :: mpi_tag - character(*), parameter :: subname = '(component_exch)' - !--------------------------------------------------------------- - - if (present(timer_barrier)) then - if (run_barriers) then - call t_drvstartf (trim(timer_barrier)) - call mpi_barrier(comp(1)%mpicom_cplallcompid,ierr) - call t_drvstopf (trim(timer_barrier)) - endif - end if - - if (present(timer_comp_exch)) then - if (present(mpicom_barrier)) then - call t_drvstartf (trim(timer_comp_exch), cplcom=.true., barrier=mpicom_barrier) - end if - end if - - do eci = 1,size(comp) - if (comp(eci)%iamin_cplcompid) then - if (present(timer_map_exch)) then - call t_drvstartf (trim(timer_map_exch), barrier=comp(eci)%mpicom_cplcompid) - end if - - if (flow == 'x2c') then ! coupler to component - if ( size(comp) > 1) then - mpi_tag = comp(eci)%cplcompid*100+eci*10+2 - else - mpi_tag = comp(eci)%cplcompid*10000+eci*10+2 - end if - call seq_map_map(comp(eci)%mapper_Cx2c, comp(eci)%x2c_cx, comp(eci)%x2c_cc, msgtag=mpi_tag) - else if (flow == 'c2x') then ! component to coupler - if ( size(comp) > 1) then - mpi_tag = comp(eci)%cplcompid*100+eci*10+4 - else - mpi_tag = comp(eci)%cplcompid*10000+eci*10+4 - end if - call seq_map_map(comp(eci)%mapper_Cc2x, comp(eci)%c2x_cc, comp(eci)%c2x_cx, msgtag=mpi_tag) - end if - - if (present(timer_map_exch)) then - call t_drvstopf (trim(timer_map_exch)) - end if - endif - enddo - - if (present(timer_infodata_exch)) then - call t_drvstartf (trim(timer_infodata_exch), barrier=mpicom_barrier) - end if - if (flow == 'c2x') then - if (comp(1)%iamin_cplcompid) then - call seq_infodata_exchange(infodata, comp(1)%cplcompid, trim(infodata_string)) - end if - else if (flow == 'x2c') then - if (comp(1)%iamin_cplallcompid) then - call seq_infodata_exchange(infodata, comp(1)%cplallcompid, trim(infodata_string)) - end if - endif - if (present(timer_infodata_exch)) then - call t_drvstopf (trim(timer_infodata_exch)) - end if - - if (present(timer_comp_exch)) then - if (present(mpicom_barrier)) then - call t_drvstopf (trim(timer_comp_exch), cplcom=.true.) - end if - end if - - end subroutine component_exch - - !=============================================================================== - - subroutine component_diag(infodata, comp, flow, comment, info_debug, timer_diag ) - - !--------------------------------------------------------------- - ! Description - ! Component diagnostics for send/recv to coupler - ! - ! Arguments - type (seq_infodata_type) , intent(inout) :: infodata - type(component_type) , intent(in) :: comp(:) - character(len=3) , intent(in) :: flow - character(len=*) , intent(in) :: comment - integer , intent(in) :: info_debug - character(len=*) , intent(in), optional :: timer_diag - ! - ! Local Variables - integer :: eci - character(*), parameter :: subname = '(component_diag)' - !--------------------------------------------------------------- - - if (info_debug > 1) then - if (present(timer_diag)) then - call t_drvstartf (trim(timer_diag), barrier=mpicom_CPLID) - end if - - do eci = 1,size(comp) - if (flow == 'x2c') then ! coupler to component - call seq_diag_avect_mct(infodata, CPLID, comp(eci)%x2c_cx, & - comp(eci)%dom_cx, comp(eci)%gsmap_cx, trim(comment)//comp(eci)%suffix) - end if - if (flow == 'c2x') then ! component to coupler - call seq_diag_avect_mct(infodata, CPLID, comp(eci)%c2x_cx, & - comp(eci)%dom_cx, comp(eci)%gsmap_cx, trim(comment)//comp(eci)%suffix) - end if - enddo - - if (present(timer_diag)) then - call t_drvstopf (trim(timer_diag)) - end if - endif - - end subroutine component_diag - -end module component_mod diff --git a/src/drivers/mct/main/component_type_mod.F90 b/src/drivers/mct/main/component_type_mod.F90 deleted file mode 100644 index 6d222c8a1d5..00000000000 --- a/src/drivers/mct/main/component_type_mod.F90 +++ /dev/null @@ -1,265 +0,0 @@ -module component_type_mod - - !---------------------------------------------------------------------------- - ! share code & libs - !---------------------------------------------------------------------------- - use shr_kind_mod , only: r8 => SHR_KIND_R8 - use shr_kind_mod , only: cs => SHR_KIND_CS - use shr_kind_mod , only: cl => SHR_KIND_CL - use shr_kind_mod , only: IN => SHR_KIND_IN - use seq_cdata_mod , only: seq_cdata - use seq_map_type_mod , only: seq_map - use seq_comm_mct , only: seq_comm_namelen - use seq_comm_mct , only: num_inst_atm, num_inst_lnd, num_inst_rof - use seq_comm_mct , only: num_inst_ocn, num_inst_ice, num_inst_glc - use seq_comm_mct , only: num_inst_wav, num_inst_esp, num_inst_iac - use mct_mod - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - ! - ! on component pes - public :: component_get_c2x_cc - public :: component_get_x2c_cc - public :: component_get_dom_cc - public :: component_get_gsmap_cc - public :: component_get_cdata_cc - public :: component_get_iamroot_compid - public :: check_fields - ! - ! on cpl pes - public :: component_get_x2c_cx - public :: component_get_c2x_cx - public :: component_get_dom_cx - public :: component_get_gsmap_cx - public :: component_get_drv2mdl - public :: component_get_mdl2drv - ! - ! on union coupler/component pes - public :: component_get_mapper_Cc2x - public :: component_get_mapper_Cx2c - ! - ! on driver pes (all pes) - public :: component_get_name - public :: component_get_suffix - public :: component_get_iamin_compid - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - type component_type - ! - ! Coupler pes - ! used by prep_xxx and all other coupler based routines - ! - type(mct_ggrid) , pointer :: dom_cx => null() ! component domain (same for all instances) - type(mct_gsMap) , pointer :: gsMap_cx => null() ! decomposition on coupler pes (same for all instances) - type(mct_aVect) , pointer :: x2c_cx => null() ! - type(mct_aVect) , pointer :: c2x_cx => null() - ! - ! Component pes - ! - type(seq_cdata) , pointer :: cdata_cc => null() - type(mct_ggrid) , pointer :: dom_cc => null() - type(mct_gsMap) , pointer :: gsMap_cc => null() ! decomposition on component pes - type(mct_aVect) , pointer :: x2c_cc => null() - type(mct_aVect) , pointer :: c2x_cc => null() - real(r8) , pointer :: drv2mdl(:) => null() ! area correction factors - real(r8) , pointer :: mdl2drv(:) => null() ! area correction factors - ! - ! Union of coupler/component pes - used by exchange routines - ! - type(seq_map) , pointer :: mapper_Cc2x => null() ! coupler -> component rearranging - type(seq_map) , pointer :: mapper_Cx2c => null() ! component -> coupler rearranging - ! - ! Driver pes (all pes) - ! - integer :: compid - integer :: cplcompid - integer :: cplallcompid - integer :: mpicom_compid - integer :: mpicom_cplcompid - integer :: mpicom_cplallcompid - logical :: iamin_compid - logical :: iamin_cplcompid - logical :: iamin_cplallcompid - logical :: iamroot_compid - logical :: present ! true => component is present and not stub - integer :: nthreads_compid - character(len=CL) :: suffix - character(len=1) :: oneletterid - character(len=3) :: ntype - character(len=seq_comm_namelen) :: name - end type component_type - - public :: component_type - - !---------------------------------------------------------------------------- - ! Component type instances - !---------------------------------------------------------------------------- - - type(component_type), target :: atm(num_inst_atm) - type(component_type), target :: lnd(num_inst_lnd) - type(component_type), target :: rof(num_inst_rof) - type(component_type), target :: ocn(num_inst_ocn) - type(component_type), target :: ice(num_inst_ice) - type(component_type), target :: glc(num_inst_glc) - type(component_type), target :: wav(num_inst_wav) - type(component_type), target :: esp(num_inst_esp) - type(component_type), target :: iac(num_inst_iac) - - public :: atm, lnd, rof, ocn, ice, glc, wav, esp, iac - - !=============================================================================== - -contains - - !=============================================================================== - ! Accessor functions into component instance - !=============================================================================== - - function component_get_c2x_cc(comp) - type(component_type), intent(in), target :: comp - type(mct_avect), pointer :: component_get_c2x_cc - component_get_c2x_cc => comp%c2x_cc - end function component_get_c2x_cc - - function component_get_c2x_cx(comp) - type(component_type), intent(in), target :: comp - type(mct_avect), pointer :: component_get_c2x_cx - component_get_c2x_cx => comp%c2x_cx - end function component_get_c2x_cx - - function component_get_x2c_cc(comp) - type(component_type), intent(in), target :: comp - type(mct_avect), pointer :: component_get_x2c_cc - component_get_x2c_cc => comp%x2c_cc - end function component_get_x2c_cc - - function component_get_x2c_cx(comp) - type(component_type), intent(in), target :: comp - type(mct_avect), pointer :: component_get_x2c_cx - component_get_x2c_cx => comp%x2c_cx - end function component_get_x2c_cx - - function component_get_name(comp) - type(component_type), intent(in), target :: comp - character(len=seq_comm_namelen) :: component_get_name - component_get_name = comp%name - end function component_get_name - - function component_get_iamin_compid(comp) - type(component_type), intent(in), target :: comp - logical :: component_get_iamin_compid - component_get_iamin_compid = comp%iamin_compid - end function component_get_iamin_compid - - function component_get_iamroot_compid(comp) - type(component_type), intent(in), target :: comp - logical :: component_get_iamroot_compid - component_get_iamroot_compid = comp%iamroot_compid - end function component_get_iamroot_compid - - function component_get_suffix(comp) - type(component_type), intent(in), target :: comp - character(len=CL) :: component_get_suffix - component_get_suffix = comp%suffix - end function component_get_suffix - - function component_get_dom_cx(comp) - type(component_type), intent(in), target :: comp - type(mct_ggrid), pointer :: component_get_dom_cx - component_get_dom_cx => comp%dom_cx - end function component_get_dom_cx - - function component_get_dom_cc(comp) - type(component_type), intent(in), target :: comp - type(mct_ggrid), pointer :: component_get_dom_cc - component_get_dom_cc => comp%dom_cc - end function component_get_dom_cc - - function component_get_gsmap_cx(comp) - type(component_type), intent(in), target :: comp - type(mct_gsmap), pointer :: component_get_gsmap_cx - component_get_gsmap_cx => comp%gsmap_cx - end function component_get_gsmap_cx - - function component_get_gsmap_cc(comp) - type(component_type), intent(in), target :: comp - type(mct_gsmap), pointer :: component_get_gsmap_cc - component_get_gsmap_cc => comp%gsmap_cc - end function component_get_gsmap_cc - - function component_get_cdata_cc(comp) - type(component_type), intent(in), target :: comp - type(seq_cdata), pointer :: component_get_cdata_cc - component_get_cdata_cc => comp%cdata_cc - end function component_get_cdata_cc - - function component_get_drv2mdl(comp) - type(component_type), intent(in), target :: comp - real(r8), pointer :: component_get_drv2mdl(:) - component_get_drv2mdl => comp%drv2mdl - end function component_get_drv2mdl - - function component_get_mdl2drv(comp) - type(component_type), intent(in), target :: comp - real(r8), pointer :: component_get_mdl2drv(:) - component_get_mdl2drv => comp%mdl2drv - end function component_get_mdl2drv - - function component_get_mapper_Cc2x(comp) - type(component_type), intent(in), target :: comp - type(seq_map), pointer :: component_get_mapper_Cc2x - component_get_mapper_Cc2x => comp%mapper_Cc2x - end function component_get_mapper_Cc2x - - function component_get_mapper_Cx2c(comp) - type(component_type), intent(in), target :: comp - type(seq_map), pointer :: component_get_mapper_Cx2c - component_get_mapper_Cx2c => comp%mapper_Cx2c - end function component_get_mapper_Cx2c - - subroutine check_fields(comp, comp_index) - use shr_infnan_mod, only: shr_infnan_isnan - use mct_mod, only: mct_avect_getrlist2c, mct_gsMap_orderedPoints - type(component_type), intent(in) :: comp - integer(in), intent(in) :: comp_index - - integer(IN) :: lsize ! size of attr vect - integer(IN) :: nflds ! number of attr vects - integer(in) :: fld, n ! iterators - integer(IN) :: rank - integer(IN) :: ierr - integer(IN), pointer :: gpts(:) - character(len=CL) :: msg - - if(associated(comp%c2x_cc) .and. associated(comp%c2x_cc%rattr)) then - lsize = mct_avect_lsize(comp%c2x_cc) - nflds = size(comp%c2x_cc%rattr,1) - ! c2x_cc is allocated even if not used such as in stub models - ! do not test this case. - if(lsize <= 1 .and. nflds <= 1) return - if(any(shr_infnan_isnan(comp%c2x_cc%rattr))) then - do fld=1,nflds - do n=1,lsize - if(shr_infnan_isnan(comp%c2x_cc%rattr(fld,n))) then - call mpi_comm_rank(comp%mpicom_compid, rank, ierr) - call mct_gsMap_orderedPoints(comp%gsmap_cc, rank, gpts) - write(msg,'(a,a,a,i4,a,a,a,i8)')'component_mod:check_fields NaN found in ',trim(comp%name),' instance: ',& - comp_index,' field ',trim(mct_avect_getRList2c(fld, comp%c2x_cc)), ' 1d global index: ',gpts(n) - call shr_sys_abort(msg) - endif - enddo - enddo - endif - endif - end subroutine check_fields - -end module component_type_mod diff --git a/src/drivers/mct/main/cplcomp_exchange_mod.F90 b/src/drivers/mct/main/cplcomp_exchange_mod.F90 deleted file mode 100644 index d82d19ac888..00000000000 --- a/src/drivers/mct/main/cplcomp_exchange_mod.F90 +++ /dev/null @@ -1,963 +0,0 @@ -module cplcomp_exchange_mod - - use shr_kind_mod, only: R8 => SHR_KIND_R8, IN=>SHR_KIND_IN - use shr_kind_mod, only: CL => SHR_KIND_CL, CX => SHR_KIND_CX, CXX => SHR_KIND_CXX - use shr_sys_mod - use shr_const_mod - use shr_mct_mod, only: shr_mct_sMatPInitnc, shr_mct_queryConfigFile - use mct_mod - use seq_map_type_mod - use component_type_mod - use seq_flds_mod, only: seq_flds_dom_coord, seq_flds_dom_other - use seq_comm_mct, only: cplid, logunit - use seq_comm_mct, only: seq_comm_getinfo => seq_comm_setptrs, seq_comm_iamin - use seq_diag_mct - - implicit none - private ! except -#include - save - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: seq_map_init_exchange ! union of cpl/component pes - public :: seq_map_map_exchange ! union of cpl/component pes - public :: seq_mctext_gsmapInit - public :: seq_mctext_avInit - public :: seq_mctext_gGridInit - public :: seq_mctext_avExtend - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - ! Shared routines for extension and computation of gsmaps, avs, and ggrids - private :: seq_mctext_gsmapIdentical - private :: seq_mctext_gsmapExtend - private :: seq_mctext_gsmapCreate - private :: seq_mctext_avCreate - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - integer,public :: seq_mctext_decomp - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(*),parameter :: subName = '(seq_mctext_mct)' - real(r8),parameter :: c1 = 1.0_r8 - - !======================================================================= -contains - !======================================================================= - - subroutine seq_map_init_exchange( comp, mapper, flow, string) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - type(seq_map) , intent(inout), pointer :: mapper - character(len=3), intent(in) :: flow - character(len=*), intent(in),optional :: string - ! - ! Local Variables - ! - integer(IN) :: ID_s - integer(IN) :: ID_d - integer(IN) :: ID_join - integer(IN) :: mapid - integer(IN) :: mpicom_s, mpicom_d, mpicom_join - type(mct_gsmap) , pointer :: gsmap_s - type(mct_gsmap) , pointer :: gsmap_d - type(mct_gsmap) :: gsmap_s_join - type(mct_gsmap) :: gsmap_d_join - character(len=*),parameter :: subname = "(seq_map_init_rearrsplit) " - !----------------------------------------------------- - - if (seq_comm_iamroot(CPLID) .and. present(string)) then - write(logunit,'(A)') subname//' called for '//trim(string) - endif - - id_join = comp%cplcompid - call seq_comm_getinfo(ID_join, mpicom=mpicom_join) - - if (flow == 'c2x') then - gsmap_s => component_get_gsmap_cc(comp) - gsmap_d => component_get_gsmap_cx(comp) - end if - if (flow == 'x2c') then - gsmap_s => component_get_gsmap_cx(comp) - gsmap_d => component_get_gsmap_cc(comp) - end if - - if (mct_gsmap_Identical(gsmap_s,gsmap_d)) then - - call seq_map_mapmatch(mapid, gsmap_s=gsmap_s, gsmap_d=gsmap_d, strategy="copy") - - if (mapid > 0) then - call seq_map_mappoint(mapid, mapper) - else - call seq_map_mapinit(mapper, mpicom_join) - mapper%copy_only = .true. - mapper%strategy = "copy" - if (flow == 'c2x') then - mapper%gsmap_s => component_get_gsmap_cc(comp) - mapper%gsmap_d => component_get_gsmap_cx(comp) - end if - if (flow == 'x2c') then - mapper%gsmap_s => component_get_gsmap_cx(comp) - mapper%gsmap_d => component_get_gsmap_cc(comp) - end if - endif - - if (seq_comm_iamroot(ID_join)) then - write(logunit,'(2A,L2)') subname,' gsmaps ARE IDENTICAL, copyoption = ',mapper%copy_only - endif - - else - - if (seq_comm_iamroot(ID_join)) write(logunit,'(2A)') subname,' gsmaps are not identical' - - if (flow == 'c2x') then - id_s = comp%compid - id_d = cplid - end if - if (flow == 'x2c') then - id_s = cplid - id_d = comp%compid - end if - call seq_comm_getinfo(ID_s , mpicom=mpicom_s) - call seq_comm_getinfo(ID_d , mpicom=mpicom_d) - call seq_comm_getinfo(ID_join, mpicom=mpicom_join) - - ! --- Extend gsmaps to join group of pes - - call seq_mctext_gsmapExtend(gsmap_s, mpicom_s, gsmap_s_join, mpicom_join, ID_join) - call seq_mctext_gsmapExtend(gsmap_d, mpicom_d, gsmap_d_join, mpicom_join, ID_join) - - ! --- Initialize rearranger based on join gsmaps - ! --- test for the gsmaps instead of the gsmap joins because the gsmap joins are temporary - - ! ------------------------------- - ! tcx tcraig mapmatch is a problem here because we're comparing gsmaps that may not be defined - ! on some pes. first issue is whether gsmap_identical in underlying routine will abort. - ! second issue is whether different pes return different values. use mapidmin, mapidmax to - ! confirm all mapids returned are the same. if not, then just set mapid to -1 and compute - ! a new rearranger. - ! tcx not clear this works all the time, so just do not do map matching here for time being - ! Sept 2013. - ! ------------------------------- - ! mapid = -1 - ! call seq_map_mapmatch(mapid,gsmap_s=gsmap_s,gsmap_d=gsmap_d,strategy="rearrange") - ! call shr_mpi_min(mapid,mapidmin,mpicom_join,subname//' min') - ! call shr_mpi_max(mapid,mapidmax,mpicom_join,subname//' max') - ! if (mapidmin /= mapidmax) mapid = -1 - ! ------------------------------- - - ! --- Initialize rearranger - ! --- the gsmap joins are temporary so store the regular gsmaps in the mapper - call seq_map_mapinit(mapper, mpicom_join) - mapper%rearrange_only = .true. - mapper%strategy = "rearrange" - if (flow == 'c2x') then - mapper%gsmap_s => component_get_gsmap_cc(comp) - mapper%gsmap_d => component_get_gsmap_cx(comp) - end if - if (flow == 'x2c') then - mapper%gsmap_s => component_get_gsmap_cx(comp) - mapper%gsmap_d => component_get_gsmap_cc(comp) - end if - call seq_map_gsmapcheck(gsmap_s_join, gsmap_d_join) - call mct_rearr_init(gsmap_s_join, gsmap_d_join, mpicom_join, mapper%rearr) - - ! --- Clean up temporary gsmaps - - call mct_gsMap_clean(gsmap_s_join) - call mct_gsMap_clean(gsmap_d_join) - - endif - - if (seq_comm_iamroot(CPLID)) then - write(logunit,'(2A,I6,4A)') subname,' mapper counter, strategy, mapfile = ', & - mapper%counter,' ',trim(mapper%strategy),' ',trim(mapper%mapfile) - call shr_sys_flush(logunit) - endif - - end subroutine seq_map_init_exchange - - !=============================================================================== - - subroutine seq_map_map_exchange( comp, flow, dom_flag, dom_tmp, string, msgtag ) - - !----------------------------------------------------- - ! - ! Arguments - ! - type(component_type) , intent(inout) :: comp - character(len=3) , intent(in) :: flow - logical , intent(in),optional :: dom_flag - type(mct_gGrid) , intent(in),optional, target :: dom_tmp - character(len=*) , intent(in),optional :: string - integer(IN) , intent(in),optional :: msgtag - ! - ! Local Variables - ! - type(seq_map) , pointer :: mapper - type(mct_aVect), pointer :: av_s - type(mct_aVect), pointer :: av_d - type(mct_gGrid), pointer :: dom_s - type(mct_gGrid), pointer :: dom_d - integer(IN),save :: ltag ! message tag for rearrange - character(len=*),parameter :: subname = "(seq_map_map) " - !----------------------------------------------------- - - if (seq_comm_iamroot(CPLID) .and. present(string)) then - write(logunit,'(A)') subname//' called for '//trim(string) - endif - - if (flow == 'c2x') then - if (present(dom_flag)) then - dom_s => component_get_dom_cc(comp) - dom_d => component_get_dom_cx(comp) - ! Overwrite dom_d pointer if dom_tmp is present - ! Needed for backwards compatibility with domain checker in component_init_cx - if (present(dom_tmp)) then - dom_d => dom_tmp - end if - else - av_s => component_get_c2x_cc(comp) - av_d => component_get_c2x_cx(comp) - end if - mapper => component_get_mapper_Cc2x(comp) - end if - if (flow == 'x2c') then - if (present(dom_flag)) then - dom_s => component_get_dom_cx(comp) - dom_d => component_get_dom_cc(comp) - else - av_s => component_get_x2c_cx(comp) - av_d => component_get_x2c_cc(comp) - end if - mapper => component_get_mapper_Cx2c(comp) - end if - - if (present(msgtag)) then - ltag = msgtag - else - ltag = 2000 - endif - - if (mapper%copy_only) then - !------------------------------------------- - ! COPY data - !------------------------------------------- - if (present(dom_flag)) then - call mct_aVect_copy(aVin=dom_s%data, aVout=dom_d%data, vector=mct_usevector) - else - call mct_aVect_copy(aVin=av_s, aVout=av_d, vector=mct_usevector) - end if - - else if (mapper%rearrange_only) then - !------------------------------------------- - ! REARRANGE data - !------------------------------------------- - if (present(dom_flag)) then - call mct_rearr_rearrange(dom_s%data, dom_d%data, mapper%rearr, tag=ltag, VECTOR=mct_usevector, & - ALLTOALL=mct_usealltoall) - else - call mct_rearr_rearrange(av_s, av_d, mapper%rearr, tag=ltag, VECTOR=mct_usevector, & - ALLTOALL=mct_usealltoall) - end if - end if - - end subroutine seq_map_map_exchange - - !======================================================================= - - subroutine seq_mctext_gsmapInit(comp) - - ! This routine initializes a gsmap based on another gsmap potentially - ! on other pes. It addresses non-overlap of pes. - - !----------------------------------------------------- - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - ! - ! Local Variables - ! - integer :: mpicom_cplid - integer :: mpicom_old - integer :: mpicom_new - integer :: mpicom_join - integer :: ID_old - integer :: ID_new - integer :: ID_join - type(mct_gsMap), pointer :: gsmap_old - type(mct_gsMap), pointer :: gsmap_new - type(mct_gsMap) :: gsmap_old_join ! gsmap_old on joined id, temporary - character(len=*),parameter :: subname = "(seq_mctext_gsmapInit) " - !----------------------------------------------------- - - call seq_comm_getinfo(CPLID, mpicom=mpicom_CPLID) - - id_new = cplid - id_old = comp%compid - id_join = comp%cplcompid - - mpicom_new = mpicom_cplid - mpicom_old = comp%mpicom_compid - mpicom_join = comp%mpicom_cplcompid - - gsmap_new => component_get_gsmap_cx(comp) - gsmap_old => component_get_gsmap_cc(comp) - - call seq_comm_getinfo(ID_old ,mpicom=mpicom_old) - call seq_comm_getinfo(ID_new ,mpicom=mpicom_new) - call seq_comm_getinfo(ID_join,mpicom=mpicom_join) - - ! --- Set gsmaps - ! --- Extend the old one to now span all pes on ID_join - ! --- Create a new gsmap on pes associated with ID_new using info from the old one - - call seq_mctext_gsmapExtend(gsmap_old , mpicom_old , gsmap_old_join, mpicom_join, ID_join) - call seq_mctext_gsmapCreate(gsmap_old_join, mpicom_join , gsmap_new , mpicom_new , ID_new ) - - call mct_gsMap_clean(gsmap_old_join) - - end subroutine seq_mctext_gsmapInit - - !======================================================================= - - subroutine seq_mctext_avInit( comp, flow ) - - !----------------------------------------------------- - ! This routine initializes Avs that may need to be extended - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - character(len=3) , intent(in) :: flow - ! - ! Local Variables - ! - integer :: lsize - integer :: mpicom_cplid - integer :: mpicom_new - integer :: ID_old - integer :: ID_new - integer :: ID_join - type(mct_aVect), pointer :: AV1_old - type(mct_aVect), pointer :: AV1_new - type(mct_gsmap), pointer :: gsmap_new - character(len=*),parameter :: subname = "(seq_mctext_avInit) " - !----------------------------------------------------- - - ! --- Setup data for use and make sure the old ID is ok - - call seq_comm_getinfo(CPLID ,mpicom=mpicom_CPLID) - - id_new = cplid - id_old = comp%compid - id_join = comp%cplcompid - - mpicom_new = mpicom_cplid - - gsmap_new => component_get_gsmap_cx(comp) - - if (flow == 'c2x') then - av1_old => component_get_c2x_cc(comp) - av1_new => component_get_c2x_cx(comp) - end if - if (flow == 'x2c') then - av1_old => component_get_x2c_cc(comp) - av1_new => component_get_x2c_cx(comp) - end if - - ! --- Extend old avs and initialize new avs for use in the future - - lsize = 0 - if (seq_comm_iamin(ID_new)) then - lsize = mct_gsMap_lsize(gsMap_new, mpicom_new) - endif - call seq_mctext_avExtend(AV1_old, ID_old, ID_join) - call seq_mctext_avCreate(AV1_old, ID_old, AV1_new, ID_join, lsize) - - end subroutine seq_mctext_avInit - - !======================================================================= - - subroutine seq_mctext_gGridInit(comp, ggrid_new) - - !----------------------------------------------------- - ! This routine initializes gGrids that may need to be extended - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - type(mct_gGrid), optional, target, intent(inout) :: ggrid_new - ! - ! Local Variables - ! - integer :: mpicom_cplid - integer :: lsize - integer :: mpicom_new - integer :: ID_old - integer :: ID_new - integer :: ID_join - type(mct_gGrid), pointer :: GG1_old - type(mct_gGrid), pointer :: GG1_new - type(mct_gsmap), pointer :: gsmap_new - character(len=*),parameter :: subname = "(seq_mctext_gGridInit) " - !----------------------------------------------------- - - ! --- Setup data for use and make sure the old ID is ok - - call seq_comm_getinfo(CPLID, mpicom=mpicom_CPLID) - - id_new = cplid - id_old = comp%compid - id_join = comp%cplcompid - - mpicom_new = mpicom_cplid - - gsmap_new => component_get_gsmap_cx(comp) - - gg1_old => component_get_dom_cc(comp) - gg1_new => component_get_dom_cx(comp) - - ! --- Extend old ggrids and initialize new ggrids for use in the future - - lsize = 0 - if (seq_comm_iamin(ID_new)) then - lsize = mct_gsMap_lsize(gsMap_new,mpicom_new) - endif - call seq_mctext_avExtend(GG1_old%data, ID_old, ID_join) - - if (present(ggrid_new)) then - call mct_gGrid_init(GGrid=ggrid_new, CoordChars=seq_flds_dom_coord, OtherChars=seq_flds_dom_other, lsize=lsize ) - call mct_avect_zero(ggrid_new%data) - else - call mct_gGrid_init(GGrid=GG1_new, CoordChars=seq_flds_dom_coord, OtherChars=seq_flds_dom_other, lsize=lsize ) - call mct_avect_zero(GG1_new%data) - end if - - end subroutine seq_mctext_gGridInit - - !======================================================================= - - subroutine seq_mctext_gsmapExtend(gsmapi, mpicomi, gsmapo, mpicomo, compido) - - !---------------------------------------------------------------- - ! Extend/Convert a gsmap from one mpicom to another mpicom that contains - ! at least all the pes that gsmap uses, but with different ranks - !---------------------------------------------------------------- - - implicit none - type(mct_gsMap), intent(IN) :: gsmapi - integer , intent(IN) :: mpicomi - type(mct_gsMap), intent(OUT):: gsmapo - integer , intent(IN) :: mpicomo - integer , intent(IN) :: compido - - character(len=*),parameter :: subname = "(seq_mctext_gsmapExtend) " - integer :: n - integer :: ngseg - integer :: gsize - integer :: msizei,msizeo - integer :: mrank,mranko,mrankog ! sets pe rank of root mpicomi pe in mpicomo - integer :: mpigrpi,mpigrpo - integer :: ierr - integer, pointer :: pei(:),peo(:) - integer, pointer :: start(:),length(:),peloc(:) - - mranko = -1 - - ! --- create the new gsmap on the mpicomi root only - - if (mpicomi /= MPI_COMM_NULL) then - call mpi_comm_rank(mpicomi,mrank,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_rank i') - if (mrank == 0) then - call mpi_comm_group(mpicomi,mpigrpi,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_group i') - call mpi_comm_group(mpicomo,mpigrpo,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_group o') - call mpi_comm_size(mpicomi,msizei,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_size i') - call mpi_comm_size(mpicomo,msizeo,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_size o') - - ! --- setup the translation of pe numbers from the old gsmap(mpicom) - ! --- to the new one, pei -> peo - - allocate(pei(0:msizei-1),peo(0:msizei-1)) - do n = 0,msizei-1 - pei(n) = n - enddo - - peo = -1 - call mpi_group_translate_ranks(mpigrpi,msizei,pei,mpigrpo,peo,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_group_translate_ranks') - - do n = 0,msizei-1 - if (peo(n) < 0 .or. peo(n) > msizeo-1) then - write(logunit,*) subname,' peo out of bounds ',peo(n),msizeo - call shr_sys_abort() - endif - enddo - - mranko = peo(0) - - ! --- compute the new gsmap which has the same start and length values - ! --- but peloc is now the mapping of pei to peo - - ngseg = gsmapi%ngseg - gsize = gsmapi%gsize - allocate(start(ngseg),length(ngseg),peloc(ngseg)) - do n = 1,ngseg - start(n) = gsmapi%start(n) - length(n) = gsmapi%length(n) - peloc(n) = peo(gsmapi%pe_loc(n)) - enddo - - ! --- initialize the gsmap on the root pe - - call mct_gsmap_init(gsmapo,compido,ngseg,gsize,start,length,peloc) - - deallocate(pei,peo,start,length,peloc) - endif - endif - - ! --- broadcast via allreduce the mpicomi root pe in mpicomo space - ! --- mranko is -1 except on the root pe where is it peo of that pe - - call mpi_allreduce(mranko,mrankog,1,MPI_INTEGER,MPI_MAX,mpicomo,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_allreduce max') - - ! --- broadcast the gsmap to all pes in mpicomo from mrankog - - call mct_gsmap_bcast(gsmapo, mrankog, mpicomo) - - ! tcx summarize decomp info -#if (1 == 0) - write(logunit,*) trim(subname),'tcxa ',mpicomi,mpicomo - call shr_sys_flush(logunit) - call mpi_barrier(mpicomo,ierr) - - if (mpicomi /= MPI_COMM_NULL) then - call mpi_comm_rank(mpicomi,mrank,ierr) - write(logunit,*) 'tcxbi ',mrank - if (mrank == 0) then - write(logunit,*) 'tcxci ',gsmapi%ngseg,size(gsmapi%start),gsmapi%gsize,gsmapi%comp_id - do n = 1,gsmapi%ngseg - write(logunit,*) 'tcx gsmti ',n,gsmapi%start(n),gsmapi%length(n),gsmapi%pe_loc(n) - enddo - call shr_sys_flush(logunit) - endif - endif - - if (mpicomo /= MPI_COMM_NULL) then - call mpi_comm_rank(mpicomo,mrank,ierr) - write(logunit,*) 'tcxbo ',mrank - if (mrank == 0) then - write(logunit,*) 'tcxco ',gsmapo%ngseg,size(gsmapo%start),gsmapo%gsize,gsmapo%comp_id - do n = 1,gsmapo%ngseg - write(logunit,*) 'tcx gsmto ',n,gsmapo%start(n),gsmapo%length(n),gsmapo%pe_loc(n) - enddo - call shr_sys_flush(logunit) - endif - endif - - call shr_sys_flush(logunit) - call mpi_barrier(mpicomo,ierr) -#endif - - - end subroutine seq_mctext_gsmapExtend - - !======================================================================= - - subroutine seq_mctext_gsmapCreate(gsmapi, mpicomi, gsmapo, mpicomo, compido) - - !--------------------------------------------------------------------- - ! creates a new gsmap on a subset of pes, requires setting a new decomp - !--------------------------------------------------------------------- - - implicit none - type(mct_gsMap), intent(IN) :: gsmapi - integer , intent(IN) :: mpicomi - type(mct_gsMap), intent(OUT):: gsmapo - integer , intent(IN) :: mpicomo - integer , intent(IN) :: compido - - character(len=*),parameter :: subname = "(seq_mctext_gsmapCreate) " - integer :: n,m,k - integer :: ktot ! number of active cells in gsmap - integer :: apesi, apeso ! number of active pes in gsmap - integer :: lsizeo ! local size for lindex - integer :: ngsegi,ngsego ! ngseg of mpicomi, mpicomo - integer :: gsizei,gsizeo ! gsize of mpicomi, mpicomo - integer :: msizei,msizeo ! size of mpicomi, mpicomo - integer :: mranki,mranko ! rank in mpicomi, mpicomo - integer :: ierr - integer :: decomp_type - integer, pointer :: start(:),length(:),peloc(:),perm(:),gindex(:),lindex(:) - real(r8):: rpeloc - logical :: gsmap_bfbflag = .false. ! normally this should be set to false - - ! --- create a new gsmap on new pes based on the old gsmap - ! --- gsmapi must be known on all mpicomo pes, compute the same - ! --- thing on all pes in parallel - - if (mpicomo /= MPI_COMM_NULL) then - call mpi_comm_rank(mpicomi,mranki,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank i') - call mpi_comm_size(mpicomi,msizei,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size i') - call mpi_comm_rank(mpicomo,mranko,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank o') - call mpi_comm_size(mpicomo,msizeo,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size o') - - ngsegi = gsmapi%ngseg - gsizei = gsmapi%gsize - gsizeo = gsizei - call mct_gsMap_activepes(gsmapi,apesi) - - decomp_type = 0 - - if (seq_mctext_decomp == 0) then - if (msizeo == apesi) then ! preserve segments and decomp - ! For testing - set decomp_type to 1 - to have gsmapi and gsmapo identical - if (gsmap_bfbflag) then - decomp_type = 1 ! better in cpl to have all decomps "same-ish" - else - decomp_type = 2 - end if - elseif (ngsegi >= msizeo) then ! preserve segments, new decomp - decomp_type = 2 - else ! new segments - decomp_type = 3 - endif - else - decomp_type = seq_mctext_decomp - endif - - !tcx decomp_type = 3 ! over ride setting above for testing - ! if (mranko == 0) write(logunit,'(2A,4I)') trim(subname),' decomp_type =',decomp_type,ngsegi,msizeo,apesi - - select case (decomp_type) - - case(1) ! --- preserve segments and decomp --------------------- - - ! -- copy the gsmap and translate the pes - call mct_gsMap_copy(gsmapi,gsmapo) - ngsego = ngsegi - do n = 1,ngsego - gsmapo%pe_loc(n) = mod(gsmapo%pe_loc(n),msizeo) ! translate pes 1:1 from old to new - enddo - - case(2) ! --- preserve segments, new decomp -------------------- - - ! --- preserve segments, sort the start and length, assign a new pe list - ngsego = ngsegi - allocate(start(ngsego),length(ngsego),peloc(ngsego),perm(ngsego)) - do n = 1,ngsego - start(n) = gsmapi%start(n) - length(n) = gsmapi%length(n) - enddo - ! --- sort gsmap to minimize permute cost in mct - call mct_indexset(perm) - call mct_indexsort(ngsego,perm,start) - call mct_permute(start,perm,ngsego) - call mct_permute(length,perm,ngsego) - ! --- give each pe "equal" number of segments, use reals to avoid integer overflow - do n = 1,ngsego - rpeloc = (((msizeo*c1)*((n-1)*c1))/(ngsego*c1)) ! give each pe "equal" number of segments, use reals to avoid integer overflow - peloc(n) = int(rpeloc) - enddo - call mct_gsmap_init(gsmapo,ngsego,start,length,peloc,0,mpicomo,compido,gsizeo) - deallocate(start,length,peloc,perm) - - case(3) ! --- new segments, new decomp ------------------------- - - ! --- new segments, compute gindex, then parse the gridcells out evenly - - k = 0 - do n = 1,ngsegi - do m = 1,gsmapi%length(n) - k = k + 1 - if (k > gsizei) then - write(logunit,*) trim(subname),' ERROR in gindex ',k,gsizei - call shr_sys_abort() - endif - enddo - enddo - ktot = k - - allocate(gindex(ktot),perm(ktot)) - - k = 0 - do n = 1,ngsegi - do m = 1,gsmapi%length(n) - k = k + 1 - gindex(k) = gsmapi%start(n) + m - 1 - enddo - enddo - call mct_indexset(perm) - call mct_indexsort(ktot,perm,gindex) - call mct_permute(gindex,perm,ktot) - - k = 0 - do m = 0,msizeo-1 - lsizeo = ktot/msizeo - if (m < (ktot - lsizeo*msizeo)) lsizeo = lsizeo + 1 - if (mranko == m) then - allocate(lindex(lsizeo)) - if (k+lsizeo > ktot) then - write(logunit,*) trim(subname),' ERROR: decomp out of bounds ',mranko,k,lsizeo,ktot - call shr_sys_abort() - endif - lindex(1:lsizeo) = gindex(k+1:k+lsizeo) - ! write(logunit,*) trim(subname),' decomp is ',mranko,lsizeo,k+1,k+lsizeo - endif - k = k + lsizeo - enddo - if (k /= ktot) then - write(logunit,*) trim(subname),' ERROR: decomp incomplete ',k,ktot - call shr_sys_abort() - endif - - call mct_gsmap_init(gsmapo,lindex,mpicomo,compido,size(lindex),gsizeo) - deallocate(gindex,perm,lindex) - - case default ! --- unknown --- - write(logunit,*) trim(subname),' ERROR decomp_type unknown ',decomp_type - call shr_sys_abort(trim(subname)//' ERROR decomp_type unknown') - - end select - - if (mranko == 0) then - write(logunit,102) trim(subname),' created new gsmap decomp_type =',decomp_type - write(logunit,102) trim(subname),' ngseg/gsize = ', & - mct_gsmap_ngseg(gsmapo),mct_gsmap_gsize(gsmapo) - call mct_gsmap_activepes(gsmapo,apeso) - write(logunit,102) trim(subname),' mpisize/active_pes = ', & - msizeo,apeso - write(logunit,102) trim(subname),' avg seg per pe/ape = ', & - mct_gsmap_ngseg(gsmapo)/msizeo,mct_gsmap_ngseg(gsmapo)/apeso - write(logunit,102) trim(subname),' nlseg/maxnlsegs = ', & - mct_gsmap_nlseg(gsmapo,0),mct_gsmap_maxnlseg(gsmapo) -102 format(2A,2I8) - endif - - ! if (.not. mct_gsmap_increasing(gsmapo) ) then - ! write(logunit,*) trim(subname),' ERROR: gsmapo not increasing' - ! call shr_sys_abort() - ! endif - - endif - - end subroutine seq_mctext_gsmapCreate - - !======================================================================= - - subroutine seq_mctext_avExtend(AVin,IDin,ID) - - !----------------------------------------------------------------------- - ! Extend an AV to a larger set of pes or - ! Initialize an AV on another set of pes - ! - ! Arguments - ! - type(mct_aVect), intent(INOUT):: AVin - integer ,intent(IN) :: IDin ! ID associated with AVin - integer , intent(IN) :: ID ! ID to initialize over - ! - ! Local variables - ! - character(len=*),parameter :: subname = "(seq_mctext_avExtend) " - integer :: mpicom - integer :: rank - integer :: lsizei, lsizen - integer :: srank,srankg - integer :: ierr - character(len=CXX) :: iList,rList - !----------------------------------------------------------------------- - - call seq_comm_getinfo(ID,mpicom=mpicom,iam=rank) - - ! --- lsizen is the size of the newly initialized AV, zero is valid - ! --- lsizei is -1 on any peszero on any pes where AV is not yet initialized - - lsizei = -1 - if (seq_comm_iamin(IDin)) lsizei = mct_aVect_lsize(AVin) - lsizen = 0 - - ! --- find a pe that already has AVin allocated, use MPI_MAX to do so - ! --- set the pe and broadcast it to all other pes using mpi_allreduce - - srank = -1 - srankg = -1 - if (lsizei > 0) srank = rank - - call mpi_allreduce(srank,srankg,1,MPI_INTEGER,MPI_MAX,mpicom,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_allreduce max') - - if (srankg < 0) then - write(logunit,*) subname,' WARNING AVin empty ' - return - endif - - ! --- set the iList and rList from the broadcast pe (srankg) and - ! --- broadcast the lists - - iList = " " - rList = " " - if (rank == srankg) then - if (mct_aVect_nIAttr(AVin) /= 0) iList = mct_aVect_ExportIList2c(AVin) - if (mct_aVect_nRattr(AVin) /= 0) rList = mct_aVect_ExportRList2c(AVin) - endif - - call mpi_bcast(iList,len(iList),MPI_CHARACTER,srankg,mpicom,ierr) - call mpi_bcast(rList,len(rList),MPI_CHARACTER,srankg,mpicom,ierr) - - ! --- now allocate the AV on any pes where the orig size is zero. those - ! --- should be pes that either have no data and may have been allocated - ! --- before (no harm in doing it again) or have never been allocated - - if (lsizei <= 0) then - if(len_trim(iList) > 0 .and. len_trim(rList) > 0) then - call mct_aVect_init(AVin,iList=iList,rList=rList,lsize=lsizen) - elseif (len_trim(iList) > 0 .and. len_trim(rList) == 0) then - call mct_aVect_init(AVin,iList=iList,lsize=lsizen) - elseif (len_trim(iList) == 0 .and. len_trim(rList) > 0) then - call mct_aVect_init(AVin,rList=rList,lsize=lsizen) - endif - endif - - end subroutine seq_mctext_avExtend - - !======================================================================= - - subroutine seq_mctext_avCreate(AVin,IDin,AVout,ID,lsize) - - !----------------------------------------------------------------------- - ! Extend an AV to a larger set of pes or - ! Initialize an AV on another set of pes - !----------------------------------------------------------------------- - - implicit none - type(mct_aVect), intent(INOUT):: AVin - integer ,intent(IN) :: IDin ! ID associated with AVin - type(mct_aVect), intent(INOUT):: AVout - integer , intent(IN) :: ID ! ID to initialize over - integer , intent(IN) :: lsize - - ! Local variables - - character(len=*),parameter :: subname = "(seq_mctext_avCreate) " - integer :: mpicom - integer :: rank - integer :: lsizei, lsizen - integer :: srank,srankg - integer :: ierr - character(len=CXX) :: iList,rList - - call seq_comm_getinfo(ID,mpicom=mpicom,iam=rank) - - ! --- lsizen is the size of the newly initialized AV, zero is valid - - lsizei = -1 - if (seq_comm_iamin(IDin)) lsizei = mct_aVect_lsize(AVin) - lsizen = lsize - - ! --- find a pe that already has AVin allocated, use MPI_MAX to do so - ! --- set the pe and broadcast it to all other pes - - srank = -1 - srankg = -1 - if (lsizei > 0) srank = rank - - call mpi_allreduce(srank,srankg,1,MPI_INTEGER,MPI_MAX,mpicom,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_allreduce max') - - if (srankg < 0) then - write(logunit,*) subname,' ERROR AVin not initialized ' - call shr_sys_abort() - endif - - ! --- set the iList and rList from the broadcast pe (srankg) and - ! --- broadcast the lists - - iList = " " - rList = " " - if (rank == srankg) then - if (mct_aVect_nIAttr(AVin) /= 0) iList = mct_aVect_ExportIList2c(AVin) - if (mct_aVect_nRattr(AVin) /= 0) rList = mct_aVect_ExportRList2c(AVin) - endif - - call mpi_bcast(iList,len(iList),MPI_CHARACTER,srankg,mpicom,ierr) - call mpi_bcast(rList,len(rList),MPI_CHARACTER,srankg,mpicom,ierr) - - ! --- now allocate the AV on all pes. the AV should not exist before. - ! --- If it does, mct should die. - - if(len_trim(iList) > 0 .and. len_trim(rList) > 0) then - call mct_aVect_init(AVout,iList=iList,rList=rList,lsize=lsizen) - elseif (len_trim(iList) > 0 .and. len_trim(rList) == 0) then - call mct_aVect_init(AVout,iList=iList,lsize=lsizen) - elseif (len_trim(iList) == 0 .and. len_trim(rList) > 0) then - call mct_aVect_init(AVout,rList=rList,lsize=lsizen) - endif - - end subroutine seq_mctext_avCreate - - !======================================================================= - - logical function seq_mctext_gsmapIdentical(gsmap1,gsmap2) - - implicit none - type(mct_gsMap), intent(IN):: gsmap1 - type(mct_gsMap), intent(IN):: gsmap2 - - ! Local variables - - character(len=*),parameter :: subname = "(seq_mctext_gsmapIdentical) " - integer :: n - logical :: identical - - !----------------------- - - identical = .true. - - ! --- continue compare --- - if (identical) then - if (mct_gsMap_gsize(gsmap1) /= mct_gsMap_gsize(gsmap2)) identical = .false. - if (mct_gsMap_ngseg(gsmap1) /= mct_gsMap_ngseg(gsmap2)) identical = .false. - endif - - ! --- continue compare --- - if (identical) then - do n = 1,mct_gsMap_ngseg(gsmap1) - if (gsmap1%start(n) /= gsmap2%start(n) ) identical = .false. - if (gsmap1%length(n) /= gsmap2%length(n)) identical = .false. - if (gsmap1%pe_loc(n) /= gsmap2%pe_loc(n)) identical = .false. - enddo - endif - - seq_mctext_gsmapIdentical = identical - - end function seq_mctext_gsmapIdentical - -end module cplcomp_exchange_mod diff --git a/src/drivers/mct/main/map_glc2lnd_mod.F90 b/src/drivers/mct/main/map_glc2lnd_mod.F90 deleted file mode 100644 index 4ae638ed71c..00000000000 --- a/src/drivers/mct/main/map_glc2lnd_mod.F90 +++ /dev/null @@ -1,399 +0,0 @@ -module map_glc2lnd_mod - - !--------------------------------------------------------------------- - ! - ! Purpose: - ! - ! This module contains routines for mapping fields from the GLC grid onto the LND grid - ! (separated by GLC elevation class) - ! - ! For high-level design, see: - ! https://docs.google.com/document/d/1sjsaiPYsPJ9A7dVGJIHGg4rVIY2qF5aRXbNzSXVAafU/edit?usp=sharing - -#include "shr_assert.h" - use seq_comm_mct, only : logunit - use shr_kind_mod, only : r8 => shr_kind_r8 - use glc_elevclass_mod, only : glc_get_num_elevation_classes, glc_get_elevation_class, & - glc_mean_elevation_virtual, glc_elevclass_as_string, & - GLC_ELEVCLASS_ERR_NONE, GLC_ELEVCLASS_ERR_TOO_LOW, & - GLC_ELEVCLASS_ERR_TOO_HIGH, glc_errcode_to_string - use mct_mod - use seq_map_type_mod, only : seq_map - use seq_map_mod, only : seq_map_map - use shr_log_mod, only : errMsg => shr_log_errMsg - use shr_sys_mod, only : shr_sys_abort - - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: map_glc2lnd_ec ! map all fields from GLC -> LND grid that need to be separated by elevation class - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: get_glc_elevation_classes ! get elevation class of each glc cell - private :: get_frac_this_ec ! get fraction in a given elevation class - private :: set_topo_in_virtual_columns - private :: make_aVect_frac_times_icemask - - character(len=*), parameter :: frac_times_icemask_field = 'Sg_frac_times_icemask' - -contains - - !----------------------------------------------------------------------- - subroutine map_glc2lnd_ec(g2x_g, & - frac_field, topo_field, icemask_field, extra_fields, & - mapper, g2x_l) - ! - ! !DESCRIPTION: - ! Maps fields from the GLC grid to the LND grid that need to be separated by - ! elevation class. - ! - ! Maps frac_field, topo_field, plus all fields defined in extra_fields. extra_fields - ! should be a colon-delimited list of fields, giving the field name in the g2x_g - ! attribute vector (i.e., without the elevation class suffixes). - ! - ! Assumes that g2x_g contains: - ! - frac_field - ! - topo_field - ! - icemask_field (Note: this is NOT mapped here, but is needed as an input to the mapping) - ! - each field in extra_fields - ! - ! Assumes that g2x_l contains: - ! - 00, 01, 02, ... - ! - 00, 01, 02, ... - ! - And similarly for each field in extra_fields - ! - ! Currently assumes that all fields are mapped using the same mapper, which should be - ! a conservative mapper (i.e., a flux mapper). - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect), intent(in) :: g2x_g - character(len=*), intent(in) :: frac_field ! name of field in g2x_g containing glc ice fraction - character(len=*), intent(in) :: topo_field ! name of field in g2x_g containing glc topo - character(len=*), intent(in) :: icemask_field ! name of field in g2x_g containing ice mask - character(len=*), intent(in) :: extra_fields - type(seq_map), intent(inout) :: mapper - type(mct_aVect), intent(inout) :: g2x_l - - ! - ! !LOCAL VARIABLES: - integer :: lsize_g - integer :: lsize_l - - ! The following need to be pointers to satisfy the MCT interface: - real(r8), pointer :: glc_frac(:) ! total ice fraction in each glc cell - real(r8), pointer :: glc_topo(:) ! topographic height of each glc cell - real(r8), pointer :: glc_frac_this_ec(:) ! ice fraction in this elevation class, for eachglc cell - - integer , allocatable :: glc_elevclass(:) ! elevation class of each glc cell (assuming cell is ice-covered) - integer :: n - character(len=:), allocatable :: elevclass_as_string - character(len=:), allocatable :: frac_field_ec ! field name: frac_field with elev class suffix - character(len=len(extra_fields)+100) :: fields_to_map - character(len=2*len(extra_fields)+100) :: fields_to_map_ec ! fields_to_map with elev class suffixes - - ! attribute vector holding glc fraction in one elev class, on the glc grid - type(mct_aVect) :: glc_frac_this_ec_g - - ! attribute vector holding glc fraction in one elev class, on the land grid - type(mct_aVect) :: glc_frac_this_ec_l - - ! attribute vector holding the product of (glc fraction in one elev class) x - ! (icemask), on the glc grid - type(mct_aVect) :: glc_frac_this_ec_times_icemask_g - - ! attribute vector holding fields to map (other than fraction) in one elevation - ! class, on the land grid - type(mct_aVect) :: glc_fields_this_ec_l - - character(len=*), parameter :: subname = 'map_glc2lnd_ec' - !----------------------------------------------------------------------- - - ! ------------------------------------------------------------------------ - ! Determine attribute vector sizes - ! ------------------------------------------------------------------------ - - lsize_g = mct_aVect_lsize(g2x_g) - lsize_l = mct_aVect_lsize(g2x_l) - - ! ------------------------------------------------------------------------ - ! Extract special fields from g2x_g - ! ------------------------------------------------------------------------ - - allocate(glc_frac(lsize_g)) - allocate(glc_topo(lsize_g)) - call mct_aVect_exportRattr(g2x_g, frac_field, glc_frac) - call mct_aVect_exportRattr(g2x_g, topo_field, glc_topo) - - ! ------------------------------------------------------------------------ - ! Determine elevation class of each glc point - ! ------------------------------------------------------------------------ - - allocate(glc_elevclass(lsize_g)) - allocate(glc_frac_this_ec(lsize_g)) - call get_glc_elevation_classes(glc_topo, glc_elevclass) - - ! ------------------------------------------------------------------------ - ! Map each elevation class - ! ------------------------------------------------------------------------ - - call shr_string_listMerge(extra_fields, topo_field, fields_to_map) - - do n = 0, glc_get_num_elevation_classes() - - ! ------------------------------------------------------------------------ - ! Put fraction in this elevation class into an attribute vector - ! ------------------------------------------------------------------------ - - call get_frac_this_ec(glc_frac, glc_elevclass, n, glc_frac_this_ec) - call mct_aVect_init(glc_frac_this_ec_g, rList = frac_field, lsize = lsize_g) - call mct_aVect_importRattr(glc_frac_this_ec_g, frac_field, glc_frac_this_ec) - - ! ------------------------------------------------------------------------ - ! Map fraction to the land grid - ! ------------------------------------------------------------------------ - - call mct_aVect_init(glc_frac_this_ec_l, rList = frac_field, lsize = lsize_l) - - call seq_map_map(mapper = mapper, av_s = glc_frac_this_ec_g, av_d = glc_frac_this_ec_l, & - norm = .true., avwts_s = g2x_g, avwtsfld_s = icemask_field) - - elevclass_as_string = glc_elevclass_as_string(n) - frac_field_ec = frac_field // elevclass_as_string - call mct_aVect_copy(glc_frac_this_ec_l, g2x_l, & - rList = frac_field, TrList = frac_field_ec) - - ! ------------------------------------------------------------------------ - ! Map other fields to the land grid - ! - ! Note that bare land values are mapped in the same way as ice-covered values - ! ------------------------------------------------------------------------ - - ! Create a mask that is (fraction in this elevation class) x (icemask). So, only - ! grid cells that are both (a) within the icemask and (b) in this elevation class - ! will be included in the following mapping. - call make_aVect_frac_times_icemask(frac_av = glc_frac_this_ec_g, & - mask_av = g2x_g, & - frac_field = frac_field, & - icemask_field = icemask_field, & - frac_times_icemask_av = glc_frac_this_ec_times_icemask_g) - - call mct_aVect_init(glc_fields_this_ec_l, rList = fields_to_map, lsize = lsize_l) - call seq_map_map(mapper = mapper, av_s = g2x_g, av_d = glc_fields_this_ec_l, & - fldlist = fields_to_map, & - norm = .true., & - avwts_s = glc_frac_this_ec_times_icemask_g, & - avwtsfld_s = frac_times_icemask_field) - - call set_topo_in_virtual_columns(n, glc_frac_this_ec_l, & - frac_field, topo_field, & - glc_fields_this_ec_l) - - call shr_string_listAddSuffix(fields_to_map, glc_elevclass_as_string(n), fields_to_map_ec) - call mct_aVect_copy(glc_fields_this_ec_l, g2x_l, & - rList = fields_to_map, TrList = fields_to_map_ec) - - ! ------------------------------------------------------------------------ - ! Clean up - ! ------------------------------------------------------------------------ - - call mct_aVect_clean(glc_frac_this_ec_l) - call mct_aVect_clean(glc_frac_this_ec_g) - call mct_aVect_clean(glc_frac_this_ec_times_icemask_g) - call mct_aVect_clean(glc_fields_this_ec_l) - - end do - - deallocate(glc_frac) - deallocate(glc_topo) - deallocate(glc_frac_this_ec) - - end subroutine map_glc2lnd_ec - - - !----------------------------------------------------------------------- - subroutine get_glc_elevation_classes(glc_topo, glc_elevclass) - ! - ! !DESCRIPTION: - ! Get elevation class of each grid cell on the glc grid. - ! - ! This does not consider glc_frac: it simply gives the elevation class that the grid - ! cell would be in if it were ice-covered. So it never returns an elevation class of - ! 0 (bare land). (This design would allow us, in the future, to have glc grid cells - ! that are part ice-covered, part ice-free.) - ! - ! !USES: - ! - ! !ARGUMENTS: - real(r8), intent(in) :: glc_topo(:) ! topographic height - integer , intent(out) :: glc_elevclass(:) ! elevation class - ! - ! !LOCAL VARIABLES: - integer :: npts - integer :: glc_pt - integer :: err_code - - character(len=*), parameter :: subname = 'get_glc_elevation_classes' - !----------------------------------------------------------------------- - - npts = size(glc_elevclass) - SHR_ASSERT_FL((size(glc_topo) == npts), __FILE__, __LINE__) - - do glc_pt = 1, npts - call glc_get_elevation_class(glc_topo(glc_pt), glc_elevclass(glc_pt), err_code) - select case (err_code) - case (GLC_ELEVCLASS_ERR_NONE) - ! Do nothing - case (GLC_ELEVCLASS_ERR_TOO_LOW, GLC_ELEVCLASS_ERR_TOO_HIGH) - write(logunit,*) subname, ': WARNING, for glc_pt, topo = ', glc_pt, glc_topo(glc_pt) - write(logunit,*) glc_errcode_to_string(err_code) - case default - write(logunit,*) subname, ': ERROR getting elevation class for glc_pt = ', glc_pt - write(logunit,*) glc_errcode_to_string(err_code) - call shr_sys_abort(subname//': ERROR getting elevation class') - end select - end do - - end subroutine get_glc_elevation_classes - - !----------------------------------------------------------------------- - subroutine get_frac_this_ec(glc_frac, glc_elevclass, this_elevclass, glc_frac_this_ec) - ! - ! !DESCRIPTION: - ! Get fractional ice coverage in a given elevation class. - ! - ! The input glc_elevclass gives the elevation class of each glc grid cell, assuming - ! that the grid cell is ice-covered. - ! - ! !USES: - ! - ! !ARGUMENTS: - real(r8), intent(in) :: glc_frac(:) ! total ice sheet fraction in each glc grid cell - integer , intent(in) :: glc_elevclass(:) ! elevation class of each glc grid cell - integer , intent(in) :: this_elevclass ! elevation class index of interest - real(r8), intent(out) :: glc_frac_this_ec(:) ! ice fraction in this elevation class - ! - ! !LOCAL VARIABLES: - integer :: npts - - character(len=*), parameter :: subname = 'get_frac_this_ec' - !----------------------------------------------------------------------- - - npts = size(glc_frac_this_ec) - SHR_ASSERT_FL((size(glc_frac) == npts), __FILE__, __LINE__) - SHR_ASSERT_FL((size(glc_elevclass) == npts), __FILE__, __LINE__) - - if (this_elevclass == 0) then - glc_frac_this_ec(:) = 1._r8 - glc_frac(:) - else - where (glc_elevclass == this_elevclass) - glc_frac_this_ec = glc_frac - elsewhere - glc_frac_this_ec = 0._r8 - end where - end if - - end subroutine get_frac_this_ec - - !----------------------------------------------------------------------- - subroutine set_topo_in_virtual_columns(elev_class, glc_frac_this_ec_l, & - frac_field, topo_field, & - glc_topo_this_ec_l) - ! - ! !DESCRIPTION: - ! Sets the topo field for virtual columns, in a given elevation class. - ! - ! This is needed because virtual columns (i.e., elevation classes that have no - ! contributing glc grid cells) won't have any topographic information mapped onto - ! them, so would otherwise end up with an elevation of 0. - ! - ! !USES: - ! - ! !ARGUMENTS: - integer, intent(in) :: elev_class - type(mct_aVect), intent(in) :: glc_frac_this_ec_l ! attr vect containing frac_field - character(len=*), intent(in) :: frac_field - character(len=*), intent(in) :: topo_field - type(mct_aVect), intent(inout) :: glc_topo_this_ec_l ! attr vect containing topo_field - ! - ! !LOCAL VARIABLES: - integer :: lsize - real(r8) :: topo_virtual - - ! The following need to be pointers to satisfy the MCT interface: - real(r8), pointer :: frac_l(:) ! ice fraction in this elev class, land grid - real(r8), pointer :: topo_l(:) ! topographic height in this elev class, land grid - - character(len=*), parameter :: subname = 'set_virtual_elevation_classes' - !----------------------------------------------------------------------- - - ! Extract fields from attribute vectors - lsize = mct_aVect_lsize(glc_frac_this_ec_l) - SHR_ASSERT_FL(mct_aVect_lsize(glc_topo_this_ec_l) == lsize, __FILE__, __LINE__) - allocate(frac_l(lsize)) - allocate(topo_l(lsize)) - call mct_aVect_exportRattr(glc_frac_this_ec_l, frac_field, frac_l) - call mct_aVect_exportRattr(glc_topo_this_ec_l, topo_field, topo_l) - - ! Set topo field for virtual columns - topo_virtual = glc_mean_elevation_virtual(elev_class) - where (frac_l <= 0) - topo_l = topo_virtual - end where - - ! Put updated field back in attribute vector - call mct_aVect_importRattr(glc_topo_this_ec_l, topo_field, topo_l) - - deallocate(frac_l) - deallocate(topo_l) - - end subroutine set_topo_in_virtual_columns - - !----------------------------------------------------------------------- - subroutine make_aVect_frac_times_icemask(frac_av, mask_av, frac_field, icemask_field, & - frac_times_icemask_av) - ! - ! !DESCRIPTION: - ! Create an attribute vector that is the product of frac_field and icemask_field - ! - ! The resulting frac_times_icemask_av will have a field frac_times_icemask_field which - ! contains this product. This attribute vector is initialized here; it is expected to - ! come in in an uninitialized/cleaned state. (So it needs to be cleaned with a call to - ! mct_aVect_clean later - including before the next call to this routine.) - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect), intent(in) :: frac_av ! attr vect containing frac_field - type(mct_aVect), intent(in) :: mask_av ! attr vect containing icemask_field - character(len=*), intent(in) :: frac_field - character(len=*), intent(in) :: icemask_field - type(mct_aVect), intent(out) :: frac_times_icemask_av ! attr vect that will contain frac_times_icemask_field - ! - ! !LOCAL VARIABLES: - integer :: lsize - - character(len=*), parameter :: subname = 'make_aVect_frac_times_icemask' - !----------------------------------------------------------------------- - - lsize = mct_aVect_lsize(frac_av) - SHR_ASSERT_FL(mct_aVect_lsize(mask_av) == lsize, __FILE__, __LINE__) - - call mct_aVect_init(frac_times_icemask_av, rList = frac_times_icemask_field, lsize = lsize) - call mct_aVect_copy(aVin = frac_av, aVout = frac_times_icemask_av, & - rList = frac_field, TrList = frac_times_icemask_field) - call mct_aVect_mult(frac_times_icemask_av, mask_av, icemask_field) - - end subroutine make_aVect_frac_times_icemask - -end module map_glc2lnd_mod diff --git a/src/drivers/mct/main/map_lnd2glc_mod.F90 b/src/drivers/mct/main/map_lnd2glc_mod.F90 deleted file mode 100644 index 1b418515e91..00000000000 --- a/src/drivers/mct/main/map_lnd2glc_mod.F90 +++ /dev/null @@ -1,481 +0,0 @@ -module map_lnd2glc_mod - - !--------------------------------------------------------------------- - ! - ! Purpose: - ! - ! This module contains routines for mapping fields from the LND grid (separated by GLC - ! elevation class) onto the GLC grid - ! - ! For high-level design, see: - ! https://docs.google.com/document/d/1H_SuK6SfCv1x6dK91q80dFInPbLYcOkUj_iAa6WRnqQ/edit - -#include "shr_assert.h" - use seq_comm_mct, only: CPLID, GLCID, logunit - use shr_kind_mod, only : r8 => shr_kind_r8 - use shr_kind_mod, only : cxx => SHR_KIND_CXX - use glc_elevclass_mod, only : glc_get_num_elevation_classes, glc_get_elevation_class, & - glc_elevclass_as_string, glc_all_elevclass_strings, GLC_ELEVCLASS_STRLEN, & - GLC_ELEVCLASS_ERR_NONE, GLC_ELEVCLASS_ERR_TOO_LOW, & - GLC_ELEVCLASS_ERR_TOO_HIGH, glc_errcode_to_string - use mct_mod - use seq_map_type_mod, only : seq_map - use seq_map_mod, only : seq_map_map - use shr_sys_mod, only : shr_sys_abort - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: map_lnd2glc ! map one field from LND -> GLC grid - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: get_glc_elevation_classes ! get the elevation class of each point on the glc grid - private :: map_bare_land ! remap the field of interest for the bare land "elevation class" - private :: map_ice_covered ! remap the field of interest for all elevation classes (excluding bare land) - -contains - - !----------------------------------------------------------------------- - subroutine map_lnd2glc(l2x_l, landfrac_l, g2x_g, fieldname, & - mapper, l2x_g) - ! - ! !DESCRIPTION: - ! Maps one field from the LND grid to the GLC grid. - ! - ! Mapping is done with a multiplication by landfrac on the source grid, with - ! normalization. - ! - ! Sets the given field within l2x_g, leaving the rest of l2x_g untouched. - ! - ! Assumes that l2x_l contains fields like: - ! - fieldname00 - ! - fieldname01 - ! - fieldname02 - ! - etc. - ! - ! and also: - ! - Sl_topo00 - ! - Sl_topo01 - ! - Sl_topo02 - ! - etc. - ! - ! and l2x_g contains a field named 'fieldname' - ! - ! Assumes that landfrac_l contains the field: - ! - lfrac: land fraction on the land grid - ! - ! Assumes that g2x_g contains the following fields: - ! - Sg_ice_covered: whether each glc grid cell is ice-covered (0 or 1) - ! - Sg_topo: ice topographic height on the glc grid - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect) , intent(in) :: l2x_l ! lnd -> cpl fields on the land grid - type(mct_aVect) , intent(in) :: landfrac_l ! lfrac field on the land grid - type(mct_aVect) , intent(in) :: g2x_g ! glc -> cpl fields on the glc grid - character(len=*) , intent(in) :: fieldname ! name of the field to map - type(seq_map) , intent(inout) :: mapper - type(mct_aVect) , intent(inout) :: l2x_g ! lnd -> cpl fields on the glc grid - ! - ! !LOCAL VARIABLES: - - ! fieldname with trailing blanks removed - character(len=:), allocatable :: fieldname_trimmed - - ! number of points on the GLC grid - integer :: lsize_g - - ! data for bare land on the GLC grid - ! needs to be a pointer to satisfy the MCT interface - real(r8), pointer :: data_g_bareland(:) - - ! data for ice-covered regions on the GLC grid - ! needs to be a pointer to satisfy the MCT interface - real(r8), pointer :: data_g_ice_covered(:) - - ! final data on the GLC grid - ! needs to be a pointer to satisfy the MCT interface - real(r8), pointer :: data_g(:) - - ! whether each point on the glc grid is ice-covered (1) or ice-free (0) - ! needs to be a pointer to satisfy the MCT interface - real(r8), pointer :: glc_ice_covered(:) - - ! ice topographic height on the glc grid - ! needs to be a pointer to satisfy the MCT interface - real(r8), pointer :: glc_topo(:) - - ! elevation class on the glc grid - ! 0 implies bare ground (no ice) - integer, allocatable :: glc_elevclass(:) - - character(len=*), parameter :: subname = 'map_lnd2glc' - !----------------------------------------------------------------------- - - ! ------------------------------------------------------------------------ - ! Initialize temporary arrays and other local variables - ! ------------------------------------------------------------------------ - - lsize_g = mct_aVect_lsize(l2x_g) - - allocate(data_g_ice_covered(lsize_g)) - allocate(data_g_bareland(lsize_g)) - allocate(data_g(lsize_g)) - - fieldname_trimmed = trim(fieldname) - - ! ------------------------------------------------------------------------ - ! Extract necessary fields from g2x_g - ! ------------------------------------------------------------------------ - - allocate(glc_ice_covered(lsize_g)) - allocate(glc_topo(lsize_g)) - call mct_aVect_exportRattr(g2x_g, 'Sg_ice_covered', glc_ice_covered) - call mct_aVect_exportRattr(g2x_g, 'Sg_topo', glc_topo) - - ! ------------------------------------------------------------------------ - ! Determine elevation class of each glc point - ! ------------------------------------------------------------------------ - - allocate(glc_elevclass(lsize_g)) - call get_glc_elevation_classes(glc_ice_covered, glc_topo, glc_elevclass) - - ! ------------------------------------------------------------------------ - ! Map elevation class 0 (bare land) and ice elevation classes - ! ------------------------------------------------------------------------ - - call map_bare_land(l2x_l, landfrac_l, fieldname_trimmed, mapper, data_g_bareland) - - ! Start by setting the output data equal to the bare land value everywhere; this will - ! later get overwritten in places where we have ice - ! - ! TODO(wjs, 2015-01-20) This implies that we pass data to CISM even in places that - ! CISM says is ocean (so CISM will ignore the incoming value). This differs from the - ! current glint implementation, which sets acab and artm to 0 over ocean (although - ! notes that this could lead to a loss of conservation). Figure out how to handle - ! this case. - data_g(:) = data_g_bareland(:) - - ! Map the SMB to ice-covered cells - call map_ice_covered(l2x_l, landfrac_l, fieldname_trimmed, & - glc_topo, mapper, data_g_ice_covered) - - where (glc_elevclass /= 0) - data_g = data_g_ice_covered - end where - - ! ------------------------------------------------------------------------ - ! Set field in output attribute vector - ! ------------------------------------------------------------------------ - - call mct_aVect_importRattr(l2x_g, fieldname_trimmed, data_g) - - ! ------------------------------------------------------------------------ - ! Clean up - ! ------------------------------------------------------------------------ - - deallocate(data_g_ice_covered) - deallocate(data_g_bareland) - deallocate(data_g) - deallocate(glc_ice_covered) - deallocate(glc_topo) - deallocate(glc_elevclass) - - end subroutine map_lnd2glc - - !----------------------------------------------------------------------- - subroutine get_glc_elevation_classes(glc_ice_covered, glc_topo, glc_elevclass) - ! - ! !DESCRIPTION: - ! Get the elevation class of each point on the glc grid. - ! - ! For grid cells that are ice-free, the elevation class is set to 0. - ! - ! All arguments (glc_ice_covered, glc_topo and glc_elevclass) must be the same size. - ! - ! !USES: - ! - ! !ARGUMENTS: - real(r8), intent(in) :: glc_ice_covered(:) ! ice-covered (1) vs. ice-free (0) - real(r8), intent(in) :: glc_topo(:) ! ice topographic height - integer , intent(out) :: glc_elevclass(:) ! elevation class - ! - ! !LOCAL VARIABLES: - integer :: npts - integer :: glc_pt - integer :: err_code - - ! Tolerance for checking whether ice_covered is 0 or 1 - real(r8), parameter :: ice_covered_tol = 1.e-13 - - character(len=*), parameter :: subname = 'get_glc_elevation_classes' - !----------------------------------------------------------------------- - - npts = size(glc_elevclass) - SHR_ASSERT_FL((size(glc_ice_covered) == npts), __FILE__, __LINE__) - SHR_ASSERT_FL((size(glc_topo) == npts), __FILE__, __LINE__) - - do glc_pt = 1, npts - if (abs(glc_ice_covered(glc_pt) - 1._r8) < ice_covered_tol) then - ! This is an ice-covered point - - call glc_get_elevation_class(glc_topo(glc_pt), glc_elevclass(glc_pt), err_code) - if ( err_code == GLC_ELEVCLASS_ERR_NONE .or. & - err_code == GLC_ELEVCLASS_ERR_TOO_LOW .or. & - err_code == GLC_ELEVCLASS_ERR_TOO_HIGH) then - ! These are all acceptable "errors" - it is even okay for these purposes if - ! the elevation is lower than the lower bound of elevation class 1, or - ! higher than the upper bound of the top elevation class. - - ! Do nothing - else - write(logunit,*) subname, ': ERROR getting elevation class for ', glc_pt - write(logunit,*) glc_errcode_to_string(err_code) - call shr_sys_abort(subname//': ERROR getting elevation class') - end if - else if (abs(glc_ice_covered(glc_pt) - 0._r8) < ice_covered_tol) then - ! This is a bare land point (no ice) - glc_elevclass(glc_pt) = 0 - else - ! glc_ice_covered is some value other than 0 or 1 - ! The lnd -> glc downscaling code would need to be reworked if we wanted to - ! handle a continuous fraction between 0 and 1. - write(logunit,*) subname, ': ERROR: glc_ice_covered must be 0 or 1' - write(logunit,*) 'glc_pt, glc_ice_covered = ', glc_pt, glc_ice_covered(glc_pt) - call shr_sys_abort(subname//': ERROR: glc_ice_covered must be 0 or 1') - end if - end do - - end subroutine get_glc_elevation_classes - - !----------------------------------------------------------------------- - subroutine map_bare_land(l2x_l, landfrac_l, fieldname, mapper, data_g_bare_land) - ! - ! !DESCRIPTION: - ! Remaps the field of interest for the bare land "elevation class". - ! - ! Puts the output in data_g_bare_land, which should already be allocated to have size - ! equal to the number of GLC points that this processor is responsible for. - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect) , intent(in) :: l2x_l ! lnd -> cpl fields on the land grid - type(mct_aVect) , intent(in) :: landfrac_l ! lfrac field on the land grid - character(len=*) , intent(in) :: fieldname ! name of the field to map (should have NO trailing blanks) - type(seq_map) , intent(inout) :: mapper - real(r8), pointer, intent(inout) :: data_g_bare_land(:) - ! - ! !LOCAL VARIABLES: - character(len=:), allocatable :: elevclass_as_string - character(len=:), allocatable :: fieldname_bare_land - integer :: lsize_g ! number of points for attribute vectors on the glc grid - type(mct_aVect) :: l2x_g_bare_land ! temporary attribute vector holding the remapped field for bare land - - character(len=*), parameter :: subname = 'map_bare_land' - !----------------------------------------------------------------------- - - SHR_ASSERT_FL(associated(data_g_bare_land), __FILE__, __LINE__) - - lsize_g = size(data_g_bare_land) - elevclass_as_string = glc_elevclass_as_string(0) - fieldname_bare_land = fieldname // elevclass_as_string - call mct_aVect_init(l2x_g_bare_land, rList = fieldname_bare_land, lsize = lsize_g) - - call seq_map_map(mapper = mapper, av_s = l2x_l, av_d = l2x_g_bare_land, & - fldlist = fieldname_bare_land, & - norm = .true., & - avwts_s = landfrac_l, & - avwtsfld_s = 'lfrac') - call mct_aVect_exportRattr(l2x_g_bare_land, fieldname_bare_land, data_g_bare_land) - - call mct_aVect_clean(l2x_g_bare_land) - - end subroutine map_bare_land - - !----------------------------------------------------------------------- - subroutine map_ice_covered(l2x_l, landfrac_l, fieldname, & - topo_g, mapper, data_g_ice_covered) - - ! - ! !DESCRIPTION: - ! Remaps the field of interest from the land grid (in multiple elevation classes) - ! to the glc grid - ! - ! Puts the output in data_g_ice_covered, which should already be allocated to have size - ! equal to the number of GLC points that this processor is responsible for. - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect) , intent(in) :: l2x_l ! lnd -> cpl fields on the land grid - type(mct_aVect) , intent(in) :: landfrac_l ! lfrac field on the land grid - character(len=*) , intent(in) :: fieldname ! name of the field to map (should have NO trailing blanks) - real(r8) , intent(in) :: topo_g(:) ! topographic height for each point on the glc grid - type(seq_map) , intent(inout) :: mapper - real(r8) , intent(out) :: data_g_ice_covered(:) ! field remapped to glc grid - - ! !LOCAL VARIABLES: - - character(len=*), parameter :: toponame = 'Sl_topo' ! base name for topo fields in l2x_l; - ! actual names will have elevation class suffix - - character(len=GLC_ELEVCLASS_STRLEN), allocatable :: all_elevclass_strings(:) - character(len=:), allocatable :: elevclass_as_string - character(len=:), allocatable :: fieldname_ec - character(len=:), allocatable :: toponame_ec - character(len=:), allocatable :: fieldnamelist - character(len=:), allocatable :: toponamelist - character(len=:), allocatable :: totalfieldlist - - integer :: nEC ! number of elevation classes - integer :: lsize_g ! number of cells on glc grid - integer :: n, ec - integer :: strlen - - real(r8) :: elev_l, elev_u ! lower and upper elevations in interpolation range - real(r8) :: d_elev ! elev_u - elev_l - - type(mct_aVect) :: l2x_g_temp ! temporary attribute vector holding the remapped fields for this elevation class - - real(r8), pointer :: tmp_field_g(:) ! must be a pointer to satisfy the MCT interface - real, pointer :: data_g_EC(:,:) ! remapped field in each glc cell, in each EC - real, pointer :: topo_g_EC(:,:) ! remapped topo in each glc cell, in each EC - - ! 1 is probably enough, but use 10 to be safe, in case the length of the delimiter - ! changes - integer, parameter :: extra_len_for_list_merge = 10 - - character(len=*), parameter :: subname = 'map_ice_covered' - !----------------------------------------------------------------------- - - lsize_g = size(data_g_ice_covered) - nEC = glc_get_num_elevation_classes() - SHR_ASSERT_FL((size(topo_g) == lsize_g), __FILE__, __LINE__) - - ! ------------------------------------------------------------------------ - ! Create temporary vectors - ! ------------------------------------------------------------------------ - - allocate(tmp_field_g(lsize_g)) - allocate(data_g_EC (lsize_g,nEC)) - allocate(topo_g_EC (lsize_g,nEC)) - - ! ------------------------------------------------------------------------ - ! Make a string that concatenates all EC levels of field, as well as the topo - ! The resulting list will look something like this: - ! 'Flgl_qice01:Flgl_qice02: ... :Flgl_qice10:Sl_topo01:Sl_topo02: ... :Sltopo10' - ! ------------------------------------------------------------------------ - - allocate(all_elevclass_strings(1:glc_get_num_elevation_classes())) - all_elevclass_strings = glc_all_elevclass_strings(include_zero = .false.) - fieldnamelist = shr_string_listFromSuffixes( & - suffixes = all_elevclass_strings, & - strBase = fieldname) - toponamelist = shr_string_listFromSuffixes( & - suffixes = all_elevclass_strings, & - strBase = toponame) - strlen = len_trim(fieldnamelist) + len_trim(toponamelist) + extra_len_for_list_merge - allocate(character(len=strlen) :: totalfieldlist) - call shr_string_listMerge(fieldnamelist, toponamelist, totalfieldlist ) - - ! ------------------------------------------------------------------------ - ! Make a temporary attribute vector. - ! For each grid cell on the land grid, this attribute vector contains the field and - ! topo values for all ECs. - ! ------------------------------------------------------------------------ - call mct_aVect_init(l2x_g_temp, rList = totalfieldlist, lsize = lsize_g) - - ! ------------------------------------------------------------------------ - ! Remap all these fields from the land (source) grid to the glc (destination) grid. - ! ------------------------------------------------------------------------ - - call seq_map_map(mapper = mapper, & - av_s = l2x_l, & - av_d = l2x_g_temp, & - fldlist = totalfieldlist, & - norm = .true., & - avwts_s = landfrac_l, & - avwtsfld_s = 'lfrac') - - ! ------------------------------------------------------------------------ - ! Export all elevation classes out of attribute vector and into local 2D arrays (xy,z) - ! ------------------------------------------------------------------------ - - do ec = 1, nEC - elevclass_as_string = glc_elevclass_as_string(ec) - fieldname_ec = fieldname // elevclass_as_string - toponame_ec = toponame // elevclass_as_string - call mct_aVect_exportRattr(l2x_g_temp, fieldname_ec, tmp_field_g) - data_g_EC(:,ec) = real(tmp_field_g) - call mct_aVect_exportRattr(l2x_g_temp, toponame_ec, tmp_field_g) - topo_g_EC(:,ec) = real(tmp_field_g) - enddo - - ! ------------------------------------------------------------------------ - ! Perform vertical interpolation of data onto ice sheet topography - ! ------------------------------------------------------------------------ - - data_g_ice_covered(:) = 0._r8 - - do n = 1, lsize_g - - ! For each ice sheet point, find bounding EC values... - if (topo_g(n) < topo_g_EC(n,1)) then - ! lower than lowest mean EC elevation value - data_g_ice_covered(n) = data_g_EC(n,1) - - else if (topo_g(n) >= topo_g_EC(n,nEC)) then - ! higher than highest mean EC elevation value - data_g_ice_covered(n) = data_g_EC(n,nEC) - - else - ! do linear interpolation of data in the vertical - do ec = 2, nEC - if (topo_g(n) < topo_g_EC(n, ec)) then - elev_l = topo_g_EC(n, ec-1) - elev_u = topo_g_EC(n, ec) - d_elev = elev_u - elev_l - if (d_elev <= 0) then - ! This shouldn't happen, but handle it in case it does. In this case, - ! let's arbitrarily use the mean of the two elevation classes, rather - ! than the weighted mean. - write(logunit,*) subname//' WARNING: topo diff between elevation classes <= 0' - write(logunit,*) 'n, ec, elev_l, elev_u = ', n, ec, elev_l, elev_u - write(logunit,*) 'Simply using mean of the two elevation classes,' - write(logunit,*) 'rather than the weighted mean.' - data_g_ice_covered(n) = data_g_EC(n,ec-1) * 0.5_r8 & - + data_g_EC(n,ec) * 0.5_r8 - else - data_g_ice_covered(n) = data_g_EC(n,ec-1) * (elev_u - topo_g(n)) / d_elev & - + data_g_EC(n,ec) * (topo_g(n) - elev_l) / d_elev - end if - - exit - end if - end do - end if ! topo_g(n) - end do ! lsize_g - - ! ------------------------------------------------------------------------ - ! Clean up - ! ------------------------------------------------------------------------ - - deallocate(tmp_field_g) - deallocate(data_g_EC) - deallocate(topo_g_EC) - - call mct_aVect_clean(l2x_g_temp) - - end subroutine map_ice_covered - -end module map_lnd2glc_mod diff --git a/src/drivers/mct/main/map_lnd2rof_irrig_mod.F90 b/src/drivers/mct/main/map_lnd2rof_irrig_mod.F90 deleted file mode 100644 index b7be99281fa..00000000000 --- a/src/drivers/mct/main/map_lnd2rof_irrig_mod.F90 +++ /dev/null @@ -1,287 +0,0 @@ -module map_lnd2rof_irrig_mod - - !--------------------------------------------------------------------- - ! - ! Purpose: - ! - ! This module contains routines for mapping the irrigation field from the LND grid onto - ! the ROF grid. - ! - ! These routines could go in prep_rof_mod, but are separated into their own module for - ! the sake of (1) testability: this module has fewer dependencies than prep_rof_mod; - ! and (2) symmetry with the lnd2glc and glc2lnd custom mapping routines, which also - ! have their own modules. - -#include "shr_assert.h" - use shr_kind_mod, only : r8 => shr_kind_r8 - use mct_mod - use seq_map_type_mod, only : seq_map - use seq_map_mod, only : seq_map_map - use shr_log_mod, only : errMsg => shr_log_errMsg - - implicit none - private - - ! ------------------------------------------------------------------------ - ! Public interfaces - ! ------------------------------------------------------------------------ - - public :: map_lnd2rof_irrig ! map irrigation from lnd -> rof grid - - ! ------------------------------------------------------------------------ - ! Private interfaces - ! ------------------------------------------------------------------------ - - private :: map_rof2lnd_volr ! map volr from rof -> lnd grid - - character(len=*), parameter, private :: sourcefile = & - __FILE__ - -contains - - subroutine map_lnd2rof_irrig(l2r_l, r2x_r, irrig_flux_field, & - avwts_s, avwtsfld_s, mapper_Fl2r, mapper_Fr2l, l2r_r) - !--------------------------------------------------------------- - ! Description - ! Do custom mapping for the irrigation flux, from land -> rof. - ! - ! The basic idea is that we want to pull irrigation out of ROF cells proportionally to - ! the river volume (volr) in each cell. This is important in cases where the various - ! ROF cells overlapping a CLM cell have very different volr: If we didn't do this - ! volr-normalized remapping, we'd try to extract the same amount of water from each - ! of the ROF cells, which would be more likely to have withdrawals exceeding - ! available volr. - ! - ! (Both RTM and MOSART have code to handle excess withdrawals, by pulling the excess - ! directly out of the ocean, but we'd like to avoid resorting to this as much as - ! possible.) - ! - ! This mapping works by: - ! - ! (1) Normalizing the land's irrigation flux by volr - ! - ! (2) Mapping this volr-normalized flux to the rof grid - ! - ! (3) Converting the mapped, volr-normalized flux back to a normal - ! (non-volr-normalized) flux on the rof grid. - ! - ! This assumes that the following fields are contained in the attribute vector - ! arguments: - ! - ! - l2r_l: field given by irrig_flux_field (read) - ! - l2r_r: field given by irrig_flux_field (set) - ! - r2x_r: 'Flrr_volrmch' (read) - ! - ! Arguments - type(mct_aVect) , intent(in) :: l2r_l ! lnd -> rof fields on the land grid - type(mct_aVect) , intent(in) :: r2x_r ! rof -> cpl fields on the rof grid - character(len=*) , intent(in) :: irrig_flux_field ! name of irrigation field to remap - type(mct_aVect) , intent(in) :: avwts_s ! attr vect for source weighting - character(len=*) , intent(in) :: avwtsfld_s ! field in avwts_s to use - type(seq_map) , intent(inout) :: mapper_Fl2r ! flux mapper for mapping lnd -> rof - type(seq_map) , intent(inout) :: mapper_Fr2l ! flux mapper for mapping rof -> lnd - type(mct_aVect) , intent(inout) :: l2r_r ! lnd -> rof fields on the rof grid - ! - ! Local variables - integer :: r, l - integer :: lsize_l ! number of land points - integer :: lsize_r ! number of rof points - type(mct_avect) :: irrig_l_av ! temporary attribute vector holding irrigation fluxes on the land grid - type(mct_avect) :: irrig_r_av ! temporary attribute vector holding irrigation fluxes on the rof grid - - ! The following need to be pointers to satisfy the MCT interface: - real(r8), pointer :: volr_r(:) ! river volume on the rof grid - real(r8), pointer :: volr_l(:) ! river volume on the land grid - real(r8), pointer :: irrig_flux_l(:) ! irrigation flux on the land grid [kg m-2 s-1] - real(r8), pointer :: irrig_flux_r(:) ! irrigation flux on the rof grid [kg m-2 s-1] - real(r8), pointer :: irrig_normalized_l(:) ! irrigation normalized by volr, land grid - real(r8), pointer :: irrig_normalized_r(:) ! irrigation normalized by volr, rof grid - real(r8), pointer :: irrig_volr0_l(:) ! irrigation where volr <= 0, land grid - real(r8), pointer :: irrig_volr0_r(:) ! irrigation where volr <= 0, rof grid - - character(len=*), parameter :: volr_field = 'Flrr_volrmch' - character(len=*), parameter :: irrig_normalized_field = 'Flrl_irrig_normalized' - character(len=*), parameter :: irrig_volr0_field = 'Flrl_irrig_volr0' - character(len=*), parameter :: fields_to_remap = & - irrig_normalized_field // ':' // irrig_volr0_field - !--------------------------------------------------------------- - - ! ------------------------------------------------------------------------ - ! Determine attribute vector sizes - ! ------------------------------------------------------------------------ - - lsize_l = mct_aVect_lsize(l2r_l) - lsize_r = mct_aVect_lsize(l2r_r) - - ! ------------------------------------------------------------------------ - ! Extract the necessary fields from attribute vectors - ! ------------------------------------------------------------------------ - - allocate(irrig_flux_l(lsize_l)) - call mct_aVect_exportRattr(l2r_l, irrig_flux_field, irrig_flux_l) - - allocate(volr_r(lsize_r)) - call mct_aVect_exportRattr(r2x_r, volr_field, volr_r) - - ! ------------------------------------------------------------------------ - ! Adjust volr_r, and map it to the land grid - ! ------------------------------------------------------------------------ - - ! Treat any rof point with volr < 0 as if it had volr = 0. Negative volr values can - ! arise in RTM. This fix is needed to avoid mapping negative irrigation to those - ! cells: while conservative, this would be unphysical (it would mean that irrigation - ! actually adds water to those cells). - do r = 1, lsize_r - if (volr_r(r) < 0._r8) then - volr_r(r) = 0._r8 - end if - end do - - allocate(volr_l(lsize_l)) - call map_rof2lnd_volr(volr_r, mapper_Fr2l, volr_l) - - ! ------------------------------------------------------------------------ - ! Determine irrigation normalized by volr - ! - ! In order to avoid possible divide by 0, as well as to handle non-sensical negative - ! volr on the land grid, we divide the land's irrigation flux into two separate flux - ! components: a component where we have positive volr on the land grid (put in - ! irrig_normalized_l, which is mapped using volr-normalization) and a component where - ! we have zero or negative volr on the land grid (put in irrig_volr0_l, which is - ! mapped as a standard flux). We then remap both of these components to the rof grid, - ! and then finally add the two components to determine the total irrigation flux on - ! the rof grid. - ! ------------------------------------------------------------------------ - - allocate(irrig_normalized_l(lsize_l)) - allocate(irrig_volr0_l(lsize_l)) - do l = 1, lsize_l - if (volr_l(l) > 0._r8) then - irrig_normalized_l(l) = irrig_flux_l(l) / volr_l(l) - irrig_volr0_l(l) = 0._r8 - else - irrig_normalized_l(l) = 0._r8 - irrig_volr0_l(l) = irrig_flux_l(l) - end if - end do - - ! ------------------------------------------------------------------------ - ! Map irrigation - ! ------------------------------------------------------------------------ - - call mct_aVect_init(irrig_l_av, rList = fields_to_remap, lsize = lsize_l) - call mct_aVect_importRattr(irrig_l_av, irrig_normalized_field, irrig_normalized_l) - call mct_aVect_importRattr(irrig_l_av, irrig_volr0_field, irrig_volr0_l) - call mct_aVect_init(irrig_r_av, rList = fields_to_remap, lsize = lsize_r) - - ! This mapping uses the same options (such as avwts) as is used for mapping all other - ! fields in prep_rof_calc_l2r_rx - call seq_map_map(mapper = mapper_Fl2r, & - av_s = irrig_l_av, & - av_d = irrig_r_av, & - fldlist = fields_to_remap, & - norm = .true., & - avwts_s = avwts_s, & - avwtsfld_s = avwtsfld_s) - - allocate(irrig_normalized_r(lsize_r)) - allocate(irrig_volr0_r(lsize_r)) - call mct_aVect_exportRattr(irrig_r_av, irrig_normalized_field, irrig_normalized_r) - call mct_aVect_exportRattr(irrig_r_av, irrig_volr0_field, irrig_volr0_r) - - ! ------------------------------------------------------------------------ - ! Convert to a total irrigation flux on the ROF grid, and put this in the l2r_rx - ! attribute vector - ! ------------------------------------------------------------------------ - - allocate(irrig_flux_r(lsize_r)) - do r = 1, lsize_r - irrig_flux_r(r) = (irrig_normalized_r(r) * volr_r(r)) + irrig_volr0_r(r) - end do - - call mct_aVect_importRattr(l2r_r, irrig_flux_field, irrig_flux_r) - - ! ------------------------------------------------------------------------ - ! Clean up - ! ------------------------------------------------------------------------ - - deallocate(volr_r) - deallocate(volr_l) - deallocate(irrig_flux_l) - deallocate(irrig_flux_r) - deallocate(irrig_normalized_l) - deallocate(irrig_normalized_r) - deallocate(irrig_volr0_l) - deallocate(irrig_volr0_r) - call mct_aVect_clean(irrig_l_av) - call mct_aVect_clean(irrig_r_av) - - end subroutine map_lnd2rof_irrig - - subroutine map_rof2lnd_volr(volr_r, mapper_Fr2l, volr_l) - !--------------------------------------------------------------- - ! Description - ! Map volr from the rof grid to the lnd grid. - ! - ! This is needed for the volr-normalization that is done in map_lnd2rof_irrig. - ! - ! Note that this mapping is also done in the course of mapping all rof -> lnd fields - ! in prep_lnd_calc_r2x_lx. However, we do this mapping ourselves here for two reasons: - ! - ! (1) For the sake of this normalization, we change all volr < 0 values to 0; this is - ! not done for the standard rof -> lnd mapping. - ! - ! (2) It's possible that the driver sequencing would be changed such that this rof -> - ! lnd mapping happens before the lnd -> rof mapping. If that happened, then volr_l - ! (i.e., volr that has been mapped to the land grid by prep_lnd_calc_r2x_lx) would - ! be inconsistent with volr_r, which would be a Bad Thing for the - ! volr-normalizated mapping (this mapping would no longer be conservative). So we - ! do the rof -> lnd remapping here to ensure we have a volr_l that is consistent - ! with volr_r. - ! - ! The pointer arguments to this routine should already be allocated to be the - ! appropriate size. - ! - ! Arguments - real(r8), pointer, intent(in) :: volr_r(:) ! river volume on the rof grid (input) - type(seq_map) , intent(inout) :: mapper_Fr2l ! flux mapper for mapping rof -> lnd - real(r8), pointer, intent(inout) :: volr_l(:) ! river volume on the lnd grid (output) (technically intent(in) since intent gives the association status of a pointer, but given as intent(inout) to avoid confusion, since its data are modified) - ! - ! Local variables - integer :: lsize_r ! number of rof points - integer :: lsize_l ! number of lnd points - type(mct_avect) :: volr_r_av ! temporary attribute vector holding volr on the rof grid - type(mct_avect) :: volr_l_av ! temporary attribute vector holding volr on the land grid - - ! This volr field name does not need to agree with the volr field name used in the - ! 'real' attribute vectors - character(len=*), parameter :: volr_field = 'volr' - !--------------------------------------------------------------- - - SHR_ASSERT_FL(associated(volr_r), sourcefile, __LINE__) - SHR_ASSERT_FL(associated(volr_l), sourcefile, __LINE__) - - lsize_r = size(volr_r) - lsize_l = size(volr_l) - - call mct_aVect_init(volr_r_av, rList = volr_field, lsize = lsize_r) - call mct_aVect_importRattr(volr_r_av, volr_field, volr_r) - call mct_aVect_init(volr_l_av, rList = volr_field, lsize = lsize_l) - - ! This mapping uses the same options as the standard rof -> lnd mapping done in - ! prep_lnd_calc_r2x_lx. If that mapping ever changed (e.g., introducing an avwts_s - ! argument), then it's *possible* that we'd want this mapping to change, too. - call seq_map_map(mapper = mapper_Fr2l, & - av_s = volr_r_av, & - av_d = volr_l_av, & - fldlist = volr_field, & - norm = .true.) - - call mct_aVect_exportRattr(volr_l_av, volr_field, volr_l) - - call mct_aVect_clean(volr_r_av) - call mct_aVect_clean(volr_l_av) - - end subroutine map_rof2lnd_volr - -end module map_lnd2rof_irrig_mod diff --git a/src/drivers/mct/main/mrg_mod.F90 b/src/drivers/mct/main/mrg_mod.F90 deleted file mode 100644 index 225552a1712..00000000000 --- a/src/drivers/mct/main/mrg_mod.F90 +++ /dev/null @@ -1,942 +0,0 @@ -module mrg_mod - - use shr_kind_mod, only: r8 => shr_kind_r8, cl => shr_kind_cl - use mct_mod - use seq_cdata_mod - use seq_comm_mct - use seq_infodata_mod - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! TODO - write summary of naming convention here as well - !-------------------------------------------------------------------------- - - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: mrg_x2a_run_mct - public :: mrg_x2i_run_mct - public :: mrg_x2l_run_mct - public :: mrg_x2r_run_mct - public :: mrg_x2o_run_mct - public :: mrg_x2g_run_mct - public :: mrg_x2s_run_mct - public :: mrg_x2w_run_mct - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: getfld - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - !=========================================================================================== -contains - !=========================================================================================== - - subroutine mrg_x2a_run_mct( cdata_a, l2x_a, o2x_a, xao_a, i2x_a, fractions_a, x2a_a ) - - !----------------------------------------------------------------------- - ! - ! Arguments - ! - type(seq_cdata), intent(in) :: cdata_a - type(mct_aVect), intent(in) :: l2x_a - type(mct_aVect), intent(in) :: o2x_a - type(mct_aVect), intent(in) :: xao_a - type(mct_aVect), intent(in) :: i2x_a - type(mct_aVect), intent(in) :: fractions_a - type(mct_aVect), intent(inout) :: x2a_a - !----------------------------------------------------------------------- - ! - ! Local workspace - ! - real(r8) :: fracl, fraci, fraco - integer :: n,ka,ki,kl,ko,kx,kof,kif,klf - integer :: lsize - integer :: index_x2a_Sf_lfrac - integer :: index_x2a_Sf_ifrac - integer :: index_x2a_Sf_ofrac - character(CL) :: field_atm ! string converted to char - character(CL) :: field_lnd ! string converted to char - character(CL) :: field_ice ! string converted to char - character(CL) :: field_xao ! string converted to char - character(CL) :: field_ocn ! string converted to char - character(CL) :: itemc_atm ! string converted to char - character(CL) :: itemc_lnd ! string converted to char - character(CL) :: itemc_ice ! string converted to char - character(CL) :: itemc_xao ! string converted to char - character(CL) :: itemc_ocn ! string converted to char - logical :: iamroot - logical :: first_time = .true. - logical, pointer, save :: lmerge(:),imerge(:),xmerge(:),omerge(:) - integer, pointer, save :: lindx(:), iindx(:), oindx(:),xindx(:) - integer, save :: naflds, klflds,niflds,noflds,nxflds - !----------------------------------------------------------------------- - ! - call seq_comm_setptrs(CPLID, iamroot=iamroot) - - if (first_time) then - - naflds = mct_aVect_nRattr(x2a_a) - klflds = mct_aVect_nRattr(l2x_a) - niflds = mct_aVect_nRattr(i2x_a) - noflds = mct_aVect_nRattr(o2x_a) - nxflds = mct_aVect_nRattr(xao_a) - - allocate(lindx(naflds), lmerge(naflds)) - allocate(iindx(naflds), imerge(naflds)) - allocate(xindx(naflds), xmerge(naflds)) - allocate(oindx(naflds), omerge(naflds)) - - lindx(:) = 0 - iindx(:) = 0 - xindx(:) = 0 - oindx(:) = 0 - lmerge(:) = .true. - imerge(:) = .true. - xmerge(:) = .true. - omerge(:) = .true. - - ! Field naming rules - ! Only atm states that are Sx_... will be merged - ! Only fluxes that are F??x_... will be merged - ! All fluxes will be multiplied by corresponding component fraction - - do ka = 1,naflds - call getfld(ka, x2a_a, field_atm, itemc_atm) - if (field_atm(1:2) == 'PF') then - cycle ! if flux has first character as P, pass straight through - end if - if (field_atm(1:1) == 'S' .and. field_atm(2:2) /= 'x') then - cycle ! any state fields that are not Sx_ will just be copied - end if - - do kl = 1,klflds - call getfld(kl, l2x_a, field_lnd, itemc_lnd) - if (trim(itemc_atm) == trim(itemc_lnd)) then - if ((trim(field_atm) == trim(field_lnd))) then - if (field_lnd(1:1) == 'F') lmerge(ka) = .false. - end if - lindx(ka) = kl - exit - end if - end do - do ki = 1,niflds - call getfld(ki, i2x_a, field_ice, itemc_ice) - if (field_ice(1:1) == 'F' .and. field_ice(2:4) == 'ioi') then - cycle ! ignore all fluxes that are ice/ocn fluxes - end if - if (trim(itemc_atm) == trim(itemc_ice)) then - if ((trim(field_atm) == trim(field_ice))) then - if (field_ice(1:1) == 'F') imerge(ka) = .false. - end if - iindx(ka) = ki - exit - end if - end do - do kx = 1,nxflds - call getfld(kx, xao_a, field_xao, itemc_xao) - if (trim(itemc_atm) == trim(itemc_xao)) then - if ((trim(field_atm) == trim(field_xao))) then - if (field_xao(1:1) == 'F') xmerge(ka) = .false. - end if - xindx(ka) = kx - exit - end if - end do - do ko = 1,noflds - call getfld(ko, o2x_a, field_ocn, itemc_ocn) - if (trim(itemc_atm) == trim(itemc_ocn)) then - if ((trim(field_atm) == trim(field_ocn))) then - if (field_ocn(1:1) == 'F') omerge(ka) = .false. - end if - oindx(ka) = ko - exit - end if - end do - if (lindx(ka) == 0) itemc_lnd = 'unset' - if (iindx(ka) == 0) itemc_ice = 'unset' - if (xindx(ka) == 0) itemc_xao = 'unset' - if (oindx(ka) == 0) itemc_ocn = 'unset' - - if (iamroot) then - write(logunit,10)trim(itemc_atm),trim(itemc_lnd),& - trim(itemc_ice),trim(itemc_xao),trim(itemc_ocn) -10 format(' ',' atm field: ',a15,', lnd merge: ',a15, & - ', ice merge: ',a15,', xao merge: ',a15,', ocn merge: ',a15) - write(logunit, *)'field_atm,lmerge, imerge, xmerge, omerge= ',& - trim(field_atm),lmerge(ka),imerge(ka),xmerge(ka),omerge(ka) - end if - end do - first_time = .false. - end if - - ! Zero attribute vector - - call mct_avect_zero(x2a_a) - - ! Update surface fractions - - kif=mct_aVect_indexRA(fractions_a,"ifrac") - klf=mct_aVect_indexRA(fractions_a,"lfrac") - kof=mct_aVect_indexRA(fractions_a,"ofrac") - lsize = mct_avect_lsize(x2a_a) - - index_x2a_Sf_lfrac = mct_aVect_indexRA(x2a_a,'Sf_lfrac') - index_x2a_Sf_ifrac = mct_aVect_indexRA(x2a_a,'Sf_ifrac') - index_x2a_Sf_ofrac = mct_aVect_indexRA(x2a_a,'Sf_ofrac') - do n = 1,lsize - x2a_a%rAttr(index_x2a_Sf_lfrac,n) = fractions_a%Rattr(klf,n) - x2a_a%rAttr(index_x2a_Sf_ifrac,n) = fractions_a%Rattr(kif,n) - x2a_a%rAttr(index_x2a_Sf_ofrac,n) = fractions_a%Rattr(kof,n) - end do - - ! Copy attributes that do not need to be merged - ! These are assumed to have the same name in - ! (o2x_a and x2a_a) and in (l2x_a and x2a_a), etc. - - call mct_aVect_copy(aVin=l2x_a, aVout=x2a_a, vector=mct_usevector) - call mct_aVect_copy(aVin=o2x_a, aVout=x2a_a, vector=mct_usevector) - call mct_aVect_copy(aVin=i2x_a, aVout=x2a_a, vector=mct_usevector) - call mct_aVect_copy(aVin=xao_a, aVout=x2a_a, vector=mct_usevector) - - ! If flux to atm is coming only from the ocean (based on field being in o2x_a) - - ! -- then scale by both ocean and ice fraction - ! If flux to atm is coming only from the land or ice or coupler - ! -- then do scale by fraction above - - do ka = 1,naflds - do n = 1,lsize - fracl = fractions_a%Rattr(klf,n) - fraci = fractions_a%Rattr(kif,n) - fraco = fractions_a%Rattr(kof,n) - if (lindx(ka) > 0 .and. fracl > 0._r8) then - if (lmerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + l2x_a%rAttr(lindx(ka),n) * fracl - else - x2a_a%rAttr(ka,n) = l2x_a%rAttr(lindx(ka),n) * fracl - end if - end if - if (iindx(ka) > 0 .and. fraci > 0._r8) then - if (imerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + i2x_a%rAttr(iindx(ka),n) * fraci - else - x2a_a%rAttr(ka,n) = i2x_a%rAttr(iindx(ka),n) * fraci - end if - end if - if (xindx(ka) > 0 .and. fraco > 0._r8) then - if (xmerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + xao_a%rAttr(xindx(ka),n) * fraco - else - x2a_a%rAttr(ka,n) = xao_a%rAttr(xindx(ka),n) * fraco - end if - end if - if (oindx(ka) > 0) then - if (omerge(ka) .and. fraco > 0._r8) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + o2x_a%rAttr(oindx(ka),n) * fraco - end if - if (.not. omerge(ka)) then - !--- NOTE: This IS using the ocean fields and ice fraction !! --- - x2a_a%rAttr(ka,n) = o2x_a%rAttr(oindx(ka),n) * fraci - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + o2x_a%rAttr(oindx(ka),n) * fraco - end if - end if - end do - end do - - end subroutine mrg_x2a_run_mct - - !-------------------------------------------------------------------------- - - subroutine mrg_x2i_run_mct( cdata_i, a2x_i, o2x_i, x2i_i ) - - !----------------------------------------------------------------------- - ! - ! Arguments - ! - type(seq_cdata),intent(in) :: cdata_i - type(mct_aVect),intent(in) :: a2x_i - type(mct_aVect),intent(in) :: o2x_i - type(mct_aVect),intent(inout):: x2i_i - ! - ! Local variables - ! - integer :: i - real(r8):: flux_epbalfact - type(seq_infodata_type),pointer :: infodata - integer, save :: index_a2x_Faxa_rainc - integer, save :: index_a2x_Faxa_rainl - integer, save :: index_a2x_Faxa_snowc - integer, save :: index_a2x_Faxa_snowl - integer, save :: index_x2i_Faxa_rain - integer, save :: index_x2i_Faxa_snow - logical, save :: first_time = .true. - logical, save :: flds_wiso = .false. - - !wiso fields: - integer, save :: index_a2x_Faxa_rainc_16O - integer, save :: index_a2x_Faxa_rainl_16O - integer, save :: index_a2x_Faxa_snowc_16O - integer, save :: index_a2x_Faxa_snowl_16O - integer, save :: index_x2i_Faxa_rain_16O - integer, save :: index_x2i_Faxa_snow_16O - integer, save :: index_a2x_Faxa_rainc_18O - integer, save :: index_a2x_Faxa_rainl_18O - integer, save :: index_a2x_Faxa_snowc_18O - integer, save :: index_a2x_Faxa_snowl_18O - integer, save :: index_x2i_Faxa_rain_18O - integer, save :: index_x2i_Faxa_snow_18O - integer, save :: index_a2x_Faxa_rainc_HDO - integer, save :: index_a2x_Faxa_rainl_HDO - integer, save :: index_a2x_Faxa_snowc_HDO - integer, save :: index_a2x_Faxa_snowl_HDO - integer, save :: index_x2i_Faxa_rain_HDO - integer, save :: index_x2i_Faxa_snow_HDO - - !----------------------------------------------------------------------- - - if (first_time) then - index_a2x_Faxa_snowc = mct_aVect_indexRA(a2x_i,'Faxa_snowc') - index_a2x_Faxa_snowl = mct_aVect_indexRA(a2x_i,'Faxa_snowl') - index_a2x_Faxa_rainc = mct_aVect_indexRA(a2x_i,'Faxa_rainc') - index_a2x_Faxa_rainl = mct_aVect_indexRA(a2x_i,'Faxa_rainl') - index_x2i_Faxa_rain = mct_aVect_indexRA(x2i_i,'Faxa_rain' ) - index_x2i_Faxa_snow = mct_aVect_indexRA(x2i_i,'Faxa_snow' ) - - ! H2_16O - index_a2x_Faxa_snowc_16O = mct_aVect_indexRA(a2x_i,'Faxa_snowc_16O', perrWith='quiet') - index_a2x_Faxa_snowl_16O = mct_aVect_indexRA(a2x_i,'Faxa_snowl_16O', perrWith='quiet') - index_a2x_Faxa_rainc_16O = mct_aVect_indexRA(a2x_i,'Faxa_rainc_16O', perrWith='quiet') - index_a2x_Faxa_rainl_16O = mct_aVect_indexRA(a2x_i,'Faxa_rainl_16O', perrWith='quiet') - index_x2i_Faxa_rain_16O = mct_aVect_indexRA(x2i_i,'Faxa_rain_16O' , perrWith='quiet') - index_x2i_Faxa_snow_16O = mct_aVect_indexRA(x2i_i,'Faxa_snow_16O' , perrWith='quiet') - if ( index_x2i_Faxa_rain_16O /= 0 ) flds_wiso = .true. - ! H2_18O - index_a2x_Faxa_snowc_18O = mct_aVect_indexRA(a2x_i,'Faxa_snowc_18O', perrWith='quiet') - index_a2x_Faxa_snowl_18O = mct_aVect_indexRA(a2x_i,'Faxa_snowl_18O', perrWith='quiet') - index_a2x_Faxa_rainc_18O = mct_aVect_indexRA(a2x_i,'Faxa_rainc_18O', perrWith='quiet') - index_a2x_Faxa_rainl_18O = mct_aVect_indexRA(a2x_i,'Faxa_rainl_18O', perrWith='quiet') - index_x2i_Faxa_rain_18O = mct_aVect_indexRA(x2i_i,'Faxa_rain_18O' , perrWith='quiet') - index_x2i_Faxa_snow_18O = mct_aVect_indexRA(x2i_i,'Faxa_snow_18O' , perrWith='quiet') - if ( index_x2i_Faxa_rain_18O /= 0 ) flds_wiso = .true. - ! HDO - index_a2x_Faxa_snowc_HDO = mct_aVect_indexRA(a2x_i,'Faxa_snowc_HDO', perrWith='quiet') - index_a2x_Faxa_snowl_HDO = mct_aVect_indexRA(a2x_i,'Faxa_snowl_HDO', perrWith='quiet') - index_a2x_Faxa_rainc_HDO = mct_aVect_indexRA(a2x_i,'Faxa_rainc_HDO', perrWith='quiet') - index_a2x_Faxa_rainl_HDO = mct_aVect_indexRA(a2x_i,'Faxa_rainl_HDO', perrWith='quiet') - index_x2i_Faxa_rain_HDO = mct_aVect_indexRA(x2i_i,'Faxa_rain_HDO' , perrWith='quiet') - index_x2i_Faxa_snow_HDO = mct_aVect_indexRA(x2i_i,'Faxa_snow_HDO' , perrWith='quiet') - if ( index_x2i_Faxa_rain_HDO /= 0 ) flds_wiso = .true. - - first_time = .false. - end if - - ! Apply correction to precipitation of requested driver namelist - call seq_cdata_setptrs(cdata_i,infodata=infodata) - call seq_infodata_GetData(infodata, flux_epbalfact = flux_epbalfact) - - call mct_aVect_copy(aVin=o2x_i, aVout=x2i_i, vector=mct_usevector) - call mct_aVect_copy(aVin=a2x_i, aVout=x2i_i, vector=mct_usevector) - - ! Merge total snow and precip for ice input - ! Scale total precip and runoff by flux_epbalfact - - do i = 1,mct_aVect_lsize(x2i_i) - x2i_i%rAttr(index_x2i_Faxa_rain,i) = a2x_i%rAttr(index_a2x_Faxa_rainc,i) + & - a2x_i%rAttr(index_a2x_Faxa_rainl,i) - x2i_i%rAttr(index_x2i_Faxa_snow,i) = a2x_i%rAttr(index_a2x_Faxa_snowc,i) + & - a2x_i%rAttr(index_a2x_Faxa_snowl,i) - - x2i_i%rAttr(index_x2i_Faxa_rain,i) = x2i_i%rAttr(index_x2i_Faxa_rain,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_snow,i) = x2i_i%rAttr(index_x2i_Faxa_snow,i) * flux_epbalfact - - end do - if ( flds_wiso )then - do i = 1,mct_aVect_lsize(x2i_i) - !H2_16O - x2i_i%rAttr(index_x2i_Faxa_rain_16O,i) = a2x_i%rAttr(index_a2x_Faxa_rainc_16O,i) + & - a2x_i%rAttr(index_a2x_Faxa_rainl_16O,i) - x2i_i%rAttr(index_x2i_Faxa_snow_16O,i) = a2x_i%rAttr(index_a2x_Faxa_snowc_16O,i) + & - a2x_i%rAttr(index_a2x_Faxa_snowl_16O,i) - !H2_18O - x2i_i%rAttr(index_x2i_Faxa_rain_18O,i) = a2x_i%rAttr(index_a2x_Faxa_rainc_18O,i) + & - a2x_i%rAttr(index_a2x_Faxa_rainl_18O,i) - x2i_i%rAttr(index_x2i_Faxa_snow_18O,i) = a2x_i%rAttr(index_a2x_Faxa_snowc_18O,i) + & - a2x_i%rAttr(index_a2x_Faxa_snowl_18O,i) - !HDO - x2i_i%rAttr(index_x2i_Faxa_rain_HDO,i) = a2x_i%rAttr(index_a2x_Faxa_rainc_HDO,i) + & - a2x_i%rAttr(index_a2x_Faxa_rainl_HDO,i) - x2i_i%rAttr(index_x2i_Faxa_snow_HDO,i) = a2x_i%rAttr(index_a2x_Faxa_snowc_HDO,i) + & - a2x_i%rAttr(index_a2x_Faxa_snowl_HDO,i) - - x2i_i%rAttr(index_x2i_Faxa_rain_16O,i) = x2i_i%rAttr(index_x2i_Faxa_rain_16O,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_snow_16O,i) = x2i_i%rAttr(index_x2i_Faxa_snow_16O,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_rain_18O,i) = x2i_i%rAttr(index_x2i_Faxa_rain_18O,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_snow_18O,i) = x2i_i%rAttr(index_x2i_Faxa_snow_18O,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_rain_HDO,i) = x2i_i%rAttr(index_x2i_Faxa_rain_HDO,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_snow_HDO,i) = x2i_i%rAttr(index_x2i_Faxa_snow_HDO,i) * flux_epbalfact - - end do - end if - - end subroutine mrg_x2i_run_mct - - !-------------------------------------------------------------------------- - - subroutine mrg_x2r_run_mct( cdata_r, l2x_r, fractions_r, x2r_r) - - !----------------------------------------------------------------------- - ! - ! Arguments - ! - type(seq_cdata),intent(in) :: cdata_r - type(mct_aVect),intent(in) :: l2x_r - type(mct_aVect),intent(in) :: fractions_r - type(mct_aVect),intent(inout):: x2r_r - ! - ! Local variables - ! - integer :: i - integer, save :: index_l2x_Flrl_rofliq - integer, save :: index_l2x_Flrl_rofice - integer, save :: index_x2r_Flrl_rofliq - integer, save :: index_x2r_Flrl_rofice - integer, save :: index_l2x_Flrl_rofliq_16O - integer, save :: index_l2x_Flrl_rofice_16O - integer, save :: index_x2r_Flrl_rofliq_16O - integer, save :: index_x2r_Flrl_rofice_16O - integer, save :: index_l2x_Flrl_rofliq_18O - integer, save :: index_l2x_Flrl_rofice_18O - integer, save :: index_x2r_Flrl_rofliq_18O - integer, save :: index_x2r_Flrl_rofice_18O - integer, save :: index_l2x_Flrl_rofliq_HDO - integer, save :: index_l2x_Flrl_rofice_HDO - integer, save :: index_x2r_Flrl_rofliq_HDO - integer, save :: index_x2r_Flrl_rofice_HDO - integer, save :: index_lfrac - logical, save :: first_time = .true. - logical, save :: flds_wiso = .false. - real(r8) :: lfrac - !----------------------------------------------------------------------- - - if (first_time) then - index_l2x_Flrl_rofliq = mct_aVect_indexRA(l2x_r,'Flrl_rofliq' ) - index_l2x_Flrl_rofice = mct_aVect_indexRA(l2x_r,'Flrl_rofice' ) - index_x2r_Flrl_rofliq = mct_aVect_indexRA(x2r_r,'Flrl_rofliq' ) - index_x2r_Flrl_rofice = mct_aVect_indexRA(x2r_r,'Flrl_rofice' ) - index_l2x_Flrl_rofliq_16O = mct_aVect_indexRA(l2x_r,'Flrl_rofliq_16O', perrWith='quiet' ) - index_l2x_Flrl_rofice_16O = mct_aVect_indexRA(l2x_r,'Flrl_rofice_16O', perrWith='quiet' ) - index_x2r_Flrl_rofliq_16O = mct_aVect_indexRA(x2r_r,'Flrl_rofliq_16O', perrWith='quiet' ) - index_x2r_Flrl_rofice_16O = mct_aVect_indexRA(x2r_r,'Flrl_rofice_16O', perrWith='quiet' ) - if ( index_l2x_Flrl_rofliq_16O /= 0 ) flds_wiso = .true. - index_l2x_Flrl_rofliq_18O = mct_aVect_indexRA(l2x_r,'Flrl_rofliq_18O', perrWith='quiet' ) - index_l2x_Flrl_rofice_18O = mct_aVect_indexRA(l2x_r,'Flrl_rofice_18O', perrWith='quiet' ) - index_x2r_Flrl_rofliq_18O = mct_aVect_indexRA(x2r_r,'Flrl_rofliq_18O', perrWith='quiet' ) - index_x2r_Flrl_rofice_18O = mct_aVect_indexRA(x2r_r,'Flrl_rofice_18O', perrWith='quiet' ) - if ( index_l2x_Flrl_rofliq_18O /= 0 ) flds_wiso = .true. - index_l2x_Flrl_rofliq_HDO = mct_aVect_indexRA(l2x_r,'Flrl_rofliq_HDO', perrWith='quiet' ) - index_l2x_Flrl_rofice_HDO = mct_aVect_indexRA(l2x_r,'Flrl_rofice_HDO', perrWith='quiet' ) - index_x2r_Flrl_rofliq_HDO = mct_aVect_indexRA(x2r_r,'Flrl_rofliq_HDO', perrWith='quiet' ) - index_x2r_Flrl_rofice_HDO = mct_aVect_indexRA(x2r_r,'Flrl_rofice_HDO', perrWith='quiet' ) - if ( index_l2x_Flrl_rofliq_HDO /= 0 ) flds_wiso = .true. - index_lfrac = mct_aVect_indexRA(fractions_r,"lfrac") - first_time = .false. - end if - - ! Merge land rof and ice forcing for rof input - - do i = 1,mct_aVect_lsize(x2r_r) - lfrac = fractions_r%rAttr(index_lfrac,i) - x2r_r%rAttr(index_x2r_Flrl_rofliq,i) = l2x_r%rAttr(index_l2x_Flrl_rofliq,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofice,i) = l2x_r%rAttr(index_l2x_Flrl_rofice,i) * lfrac - end do - if ( flds_wiso ) then - do i = 1,mct_aVect_lsize(x2r_r) - lfrac = fractions_r%rAttr(index_lfrac,i) - x2r_r%rAttr(index_x2r_Flrl_rofliq_16O,i) = l2x_r%rAttr(index_l2x_Flrl_rofliq_16O,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofice_16O,i) = l2x_r%rAttr(index_l2x_Flrl_rofice_16O,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofliq_18O,i) = l2x_r%rAttr(index_l2x_Flrl_rofliq_18O,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofice_18O,i) = l2x_r%rAttr(index_l2x_Flrl_rofice_18O,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofliq_HDO,i) = l2x_r%rAttr(index_l2x_Flrl_rofliq_HDO,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofice_HDO,i) = l2x_r%rAttr(index_l2x_Flrl_rofice_HDO,i) * lfrac - end do - end if - - end subroutine mrg_x2r_run_mct - - !-------------------------------------------------------------------------- - - subroutine mrg_x2l_run_mct( cdata_l, a2x_l, r2l_l, x2l_l ) - - !----------------------------------------------------------------------- - ! Arguments - ! - type(seq_cdata), intent(in) :: cdata_l - type(mct_aVect), intent(in) :: a2x_l ! input - type(mct_aVect), intent(in) :: r2l_l ! input - type(mct_aVect), intent(inout) :: x2l_l ! output - !----------------------------------------------------------------------- - - ! Create input land state directly from atm and runoff outputs - call mct_aVect_copy(aVin=a2x_l, aVout=x2l_l, vector=mct_usevector) - call mct_aVect_copy(aVin=r2l_l, aVout=x2l_l, vector=mct_usevector) - - end subroutine mrg_x2l_run_mct - - !-------------------------------------------------------------------------- - - subroutine mrg_x2o_run_mct( cdata_o, a2x_o, i2x_o, w2x_o, xao_o, fractions_o, x2o_o ) - - !----------------------------------------------------------------------- - ! Arguments - type(seq_cdata), intent(in) :: cdata_o - type(mct_aVect), intent(in) :: a2x_o - type(mct_aVect), intent(in) :: i2x_o - type(mct_aVect), intent(in) :: w2x_o - type(mct_aVect), intent(in) :: xao_o - type(mct_aVect), intent(in) :: fractions_o - type(mct_aVect), intent(inout) :: x2o_o - ! - ! Local variables - ! - integer :: n,ki,ko,kir,kor - integer :: lsize - real(r8) :: ifrac,ifracr - real(r8) :: afrac,afracr - real(r8) :: flux_epbalfact - real(r8) :: frac_sum - real(r8) :: avsdr, anidr, avsdf, anidf ! albedos - real(r8) :: fswabsv, fswabsi ! sw - integer :: noflds,naflds,niflds,nxflds - integer :: kof,kaf,kif,kxf - character(CL) :: field_ocn ! string converted to char - character(CL) :: field_atm ! string converted to char - character(CL) :: field_ice ! string converted to char - character(CL) :: field_xao ! string converted to char - character(CL) :: itemc_ocn ! string converted to char - character(CL) :: itemc_atm ! string converted to char - character(CL) :: itemc_ice ! string converted to char - character(CL) :: itemc_xao ! string converted to char - logical :: iamroot - type(seq_infodata_type),pointer :: infodata - integer, save :: index_a2x_Faxa_swvdr - integer, save :: index_a2x_Faxa_swvdf - integer, save :: index_a2x_Faxa_swndr - integer, save :: index_a2x_Faxa_swndf - integer, save :: index_i2x_Fioi_swpen - integer, save :: index_xao_So_avsdr - integer, save :: index_xao_So_anidr - integer, save :: index_xao_So_avsdf - integer, save :: index_xao_So_anidf - integer, save :: index_a2x_Faxa_snowc - integer, save :: index_a2x_Faxa_snowl - integer, save :: index_a2x_Faxa_rainc - integer, save :: index_a2x_Faxa_rainl - integer, save :: index_x2o_Foxx_swnet - integer, save :: index_x2o_Faxa_snow - integer, save :: index_x2o_Faxa_rain - integer, save :: index_x2o_Faxa_prec - - !wiso fields: - integer, save :: index_a2x_Faxa_rainc_16O - integer, save :: index_a2x_Faxa_rainl_16O - integer, save :: index_a2x_Faxa_snowc_16O - integer, save :: index_a2x_Faxa_snowl_16O - integer, save :: index_x2o_Faxa_rain_16O - integer, save :: index_x2o_Faxa_snow_16O - integer, save :: index_x2o_Faxa_prec_16O - integer, save :: index_a2x_Faxa_rainc_18O - integer, save :: index_a2x_Faxa_rainl_18O - integer, save :: index_a2x_Faxa_snowc_18O - integer, save :: index_a2x_Faxa_snowl_18O - integer, save :: index_x2o_Faxa_rain_18O - integer, save :: index_x2o_Faxa_snow_18O - integer, save :: index_x2o_Faxa_prec_18O - integer, save :: index_a2x_Faxa_rainc_HDO - integer, save :: index_a2x_Faxa_rainl_HDO - integer, save :: index_a2x_Faxa_snowc_HDO - integer, save :: index_a2x_Faxa_snowl_HDO - integer, save :: index_x2o_Faxa_rain_HDO - integer, save :: index_x2o_Faxa_snow_HDO - integer, save :: index_x2o_Faxa_prec_HDO - - logical, save, pointer :: amerge(:),imerge(:),xmerge(:) - integer, save, pointer :: aindx(:), iindx(:), xindx(:) - logical, save :: first_time = .true. - logical, save :: flds_wiso = .false. - character(*),parameter :: subName = '(mrg_x2o_run_mct) ' - !----------------------------------------------------------------------- - - call seq_comm_setptrs(CPLID, iamroot=iamroot) - - noflds = mct_aVect_nRattr(x2o_o) - naflds = mct_aVect_nRattr(a2x_o) - niflds = mct_aVect_nRattr(i2x_o) - nxflds = mct_aVect_nRattr(xao_o) - - if (first_time) then - index_a2x_Faxa_swvdr = mct_aVect_indexRA(a2x_o,'Faxa_swvdr') - index_a2x_Faxa_swvdf = mct_aVect_indexRA(a2x_o,'Faxa_swvdf') - index_a2x_Faxa_swndr = mct_aVect_indexRA(a2x_o,'Faxa_swndr') - index_a2x_Faxa_swndf = mct_aVect_indexRA(a2x_o,'Faxa_swndf') - index_i2x_Fioi_swpen = mct_aVect_indexRA(i2x_o,'Fioi_swpen') - index_xao_So_avsdr = mct_aVect_indexRA(xao_o,'So_avsdr') - index_xao_So_anidr = mct_aVect_indexRA(xao_o,'So_anidr') - index_xao_So_avsdf = mct_aVect_indexRA(xao_o,'So_avsdf') - index_xao_So_anidf = mct_aVect_indexRA(xao_o,'So_anidf') - index_x2o_Foxx_swnet = mct_aVect_indexRA(x2o_o,'Foxx_swnet') - - index_a2x_Faxa_snowc = mct_aVect_indexRA(a2x_o,'Faxa_snowc') - index_a2x_Faxa_snowl = mct_aVect_indexRA(a2x_o,'Faxa_snowl') - index_a2x_Faxa_rainc = mct_aVect_indexRA(a2x_o,'Faxa_rainc') - index_a2x_Faxa_rainl = mct_aVect_indexRA(a2x_o,'Faxa_rainl') - index_x2o_Faxa_snow = mct_aVect_indexRA(x2o_o,'Faxa_snow') - index_x2o_Faxa_rain = mct_aVect_indexRA(x2o_o,'Faxa_rain') - index_x2o_Faxa_prec = mct_aVect_indexRA(x2o_o,'Faxa_prec') - - !wiso: - ! H2_16O - index_a2x_Faxa_snowc_16O = mct_aVect_indexRA(a2x_o,'Faxa_snowc_16O', perrWith='quiet') - index_a2x_Faxa_snowl_16O = mct_aVect_indexRA(a2x_o,'Faxa_snowl_16O', perrWith='quiet') - index_a2x_Faxa_rainc_16O = mct_aVect_indexRA(a2x_o,'Faxa_rainc_16O', perrWith='quiet') - index_a2x_Faxa_rainl_16O = mct_aVect_indexRA(a2x_o,'Faxa_rainl_16O', perrWith='quiet') - index_x2o_Faxa_rain_16O = mct_aVect_indexRA(x2o_o,'Faxa_rain_16O' , perrWith='quiet') - index_x2o_Faxa_snow_16O = mct_aVect_indexRA(x2o_o,'Faxa_snow_16O' , perrWith='quiet') - index_x2o_Faxa_prec_16O = mct_aVect_indexRA(x2o_o,'Faxa_prec_16O' , perrWith='quiet') - if ( index_x2o_Faxa_rain_16O /= 0 ) flds_wiso = .true. - ! H2_18O - index_a2x_Faxa_snowc_18O = mct_aVect_indexRA(a2x_o,'Faxa_snowc_18O', perrWith='quiet') - index_a2x_Faxa_snowl_18O = mct_aVect_indexRA(a2x_o,'Faxa_snowl_18O', perrWith='quiet') - index_a2x_Faxa_rainc_18O = mct_aVect_indexRA(a2x_o,'Faxa_rainc_18O', perrWith='quiet') - index_a2x_Faxa_rainl_18O = mct_aVect_indexRA(a2x_o,'Faxa_rainl_18O', perrWith='quiet') - index_x2o_Faxa_rain_18O = mct_aVect_indexRA(x2o_o,'Faxa_rain_18O' , perrWith='quiet') - index_x2o_Faxa_snow_18O = mct_aVect_indexRA(x2o_o,'Faxa_snow_18O' , perrWith='quiet') - index_x2o_Faxa_prec_18O = mct_aVect_indexRA(x2o_o,'Faxa_prec_18O' , perrWith='quiet') - if ( index_x2o_Faxa_rain_18O /= 0 ) flds_wiso = .true. - ! HDO - index_a2x_Faxa_snowc_HDO = mct_aVect_indexRA(a2x_o,'Faxa_snowc_HDO', perrWith='quiet') - index_a2x_Faxa_snowl_HDO = mct_aVect_indexRA(a2x_o,'Faxa_snowl_HDO', perrWith='quiet') - index_a2x_Faxa_rainc_HDO = mct_aVect_indexRA(a2x_o,'Faxa_rainc_HDO', perrWith='quiet') - index_a2x_Faxa_rainl_HDO = mct_aVect_indexRA(a2x_o,'Faxa_rainl_HDO', perrWith='quiet') - index_x2o_Faxa_rain_HDO = mct_aVect_indexRA(x2o_o,'Faxa_rain_HDO' , perrWith='quiet') - index_x2o_Faxa_snow_HDO = mct_aVect_indexRA(x2o_o,'Faxa_snow_HDO' , perrWith='quiet') - index_x2o_Faxa_prec_HDO = mct_aVect_indexRA(x2o_o,'Faxa_prec_HDO' , perrWith='quiet') - if ( index_x2o_Faxa_rain_HDO /= 0 ) flds_wiso = .true. - - - - ! Compute all other quantities based on standardized naming convention (see below) - ! Only ocn field states that have the name-prefix Sx_ will be merged - ! Only field names have the same name-suffix (after the "_") will be merged - ! (e.g. Si_fldname, Sa_fldname => merged to => Sx_fldname) - ! All fluxes will be scaled by the corresponding afrac or ifrac - ! EXCEPT for - ! -- Faxa_snnet, Faxa_snow, Faxa_rain, Faxa_prec (derived) - ! -- Forr_* (treated in ccsm_comp_mod) - ! All i2x_o fluxes that have the name-suffix "Faii" (atm/ice fluxes) will be ignored - ! - only ice fluxes that are Fioi_... will be used in the ocean merges - - allocate(aindx(noflds), amerge(noflds)) - allocate(iindx(noflds), imerge(noflds)) - allocate(xindx(noflds), xmerge(noflds)) - aindx(:) = 0 - iindx(:) = 0 - xindx(:) = 0 - amerge(:) = .true. - imerge(:) = .true. - xmerge(:) = .true. - - do kof = 1,noflds - call getfld(kof, x2o_o, field_ocn, itemc_ocn) - if (field_ocn(1:2) == 'PF') then - cycle ! if flux has first character as P, pass straight through - end if - if (field_ocn(1:1) == 'S' .and. field_ocn(2:2) /= 'x') then - cycle ! ignore all ocn states that do not have a Sx_ prefix - end if - if (trim(field_ocn) == 'Foxx_swnet'.or. & - trim(field_ocn) == 'Faxa_snow' .or. & - trim(field_ocn) == 'Faxa_rain' .or. & - trim(field_ocn) == 'Faxa_prec') then - cycle ! ignore swnet, snow, rain, prec - treated explicitly above - end if - !wiso - if (trim(field_ocn) == 'Faxa_snow_16O' .or. & - trim(field_ocn) == 'Faxa_rain_16O' .or. & - trim(field_ocn) == 'Faxa_prec_16O' .or. & - trim(field_ocn) == 'Faxa_snow_18O' .or. & - trim(field_ocn) == 'Faxa_rain_18O' .or. & - trim(field_ocn) == 'Faxa_prec_18O' .or. & - trim(field_ocn) == 'Faxa_snow_HDO' .or. & - trim(field_ocn) == 'Faxa_rain_HDO' .or. & - trim(field_ocn) == 'Faxa_prec_HDO') then - cycle ! ignore iso snow, rain, prec - treated explicitly above - end if - if (trim(field_ocn(1:5)) == 'Forr_') then - cycle ! ignore runoff fields from land - treated in coupler - end if - - do kaf = 1,naflds - call getfld(kaf, a2x_o, field_atm, itemc_atm) - if (trim(itemc_ocn) == trim(itemc_atm)) then - if ((trim(field_ocn) == trim(field_atm))) then - if (field_atm(1:1) == 'F') amerge(kof) = .false. - end if - aindx(kof) = kaf - exit - end if - end do - do kif = 1,niflds - call getfld(kif, i2x_o, field_ice, itemc_ice) - if (field_ice(1:1) == 'F' .and. field_ice(2:4) == 'aii') then - cycle ! ignore all i2x_o fluxes that are ice/atm fluxes - end if - if (trim(itemc_ocn) == trim(itemc_ice)) then - if ((trim(field_ocn) == trim(field_ice))) then - if (field_ice(1:1) == 'F') imerge(kof) = .false. - end if - iindx(kof) = kif - exit - end if - end do - do kxf = 1,nxflds - call getfld(kxf, xao_o, field_xao, itemc_xao) - if (trim(itemc_ocn) == trim(itemc_xao)) then - if ((trim(field_ocn) == trim(field_xao))) then - if (field_xao(1:1) == 'F') xmerge(kof) = .false. - end if - xindx(kof) = kxf - exit - end if - end do - if (aindx(kof) == 0) itemc_atm = 'unset' - if (iindx(kof) == 0) itemc_ice = 'unset' - if (xindx(kof) == 0) itemc_xao = 'unset' - - if (iamroot) then - write(logunit,10)trim(itemc_ocn),& - trim(itemc_xao),trim(itemc_ice),trim(itemc_atm) -10 format(' ',' ocn field: ',a15,', xao merge: ',a15, & - ', ice merge: ',a15,', atm merge: ',a15) - write(logunit, *)'field_ocn,kof,imerge,amerge,xmerge= ',& - trim(field_ocn),kof,imerge(kof),xmerge(kof),amerge(kof) - end if - end do - - first_time = .false. - end if - - call seq_cdata_setptrs(cdata_o, infodata=infodata) - call seq_infodata_GetData(infodata, flux_epbalfact = flux_epbalfact) - - call mct_aVect_zero(x2o_o) - - call mct_aVect_copy(aVin=a2x_o, aVout=x2o_o, vector=mct_usevector) - call mct_aVect_copy(aVin=i2x_o, aVout=x2o_o, vector=mct_usevector) - call mct_aVect_copy(aVin=w2x_o, aVout=x2o_o, vector=mct_usevector) - call mct_aVect_copy(aVin=xao_o, aVout=x2o_o, vector=mct_usevector) - - ! Compute input ocn state (note that this only applies to non-land portion of gridcell) - - ki = mct_aVect_indexRa(fractions_o,"ifrac",perrWith=subName) - ko = mct_aVect_indexRa(fractions_o,"ofrac",perrWith=subName) - kir = mct_aVect_indexRa(fractions_o,"ifrad",perrWith=subName) - kor = mct_aVect_indexRa(fractions_o,"ofrad",perrWith=subName) - lsize = mct_aVect_lsize(x2o_o) - do n = 1,lsize - - ifrac = fractions_o%rAttr(ki,n) - afrac = fractions_o%rAttr(ko,n) - frac_sum = ifrac + afrac - if ((frac_sum) /= 0._r8) then - ifrac = ifrac / (frac_sum) - afrac = afrac / (frac_sum) - endif - - ifracr = fractions_o%rAttr(kir,n) - afracr = fractions_o%rAttr(kor,n) - frac_sum = ifracr + afracr - if ((frac_sum) /= 0._r8) then - ifracr = ifracr / (frac_sum) - afracr = afracr / (frac_sum) - endif - - ! Derived: compute net short-wave - avsdr = xao_o%rAttr(index_xao_So_avsdr,n) - anidr = xao_o%rAttr(index_xao_So_anidr,n) - avsdf = xao_o%rAttr(index_xao_So_avsdf,n) - anidf = xao_o%rAttr(index_xao_So_anidf,n) - fswabsv = a2x_o%rAttr(index_a2x_Faxa_swvdr,n) * (1.0_R8 - avsdr) & - + a2x_o%rAttr(index_a2x_Faxa_swvdf,n) * (1.0_R8 - avsdf) - fswabsi = a2x_o%rAttr(index_a2x_Faxa_swndr,n) * (1.0_R8 - anidr) & - + a2x_o%rAttr(index_a2x_Faxa_swndf,n) * (1.0_R8 - anidf) - x2o_o%rAttr(index_x2o_Foxx_swnet,n) = (fswabsv + fswabsi) * afracr + & - i2x_o%rAttr(index_i2x_Fioi_swpen,n) * ifrac - - ! Derived: compute total precipitation - scale total precip - ! Note that runoff is scaled by flux_epbalfact in ccsm_comp_mod - x2o_o%rAttr(index_x2o_Faxa_snow ,n) = a2x_o%rAttr(index_a2x_Faxa_snowc,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_snowl,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_rain ,n) = a2x_o%rAttr(index_a2x_Faxa_rainc,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_rainl,n) * afrac - - x2o_o%rAttr(index_x2o_Faxa_snow ,n) = x2o_o%rAttr(index_x2o_Faxa_snow ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_rain ,n) = x2o_o%rAttr(index_x2o_Faxa_rain ,n) * flux_epbalfact - - x2o_o%rAttr(index_x2o_Faxa_prec ,n) = x2o_o%rAttr(index_x2o_Faxa_rain ,n) + & - x2o_o%rAttr(index_x2o_Faxa_snow ,n) - - !wiso - if ( flds_wiso )then - x2o_o%rAttr(index_x2o_Faxa_snow_16O ,n) = a2x_o%rAttr(index_a2x_Faxa_snowc_16O,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_snowl_16O,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_rain_16O ,n) = a2x_o%rAttr(index_a2x_Faxa_rainc_16O,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_rainl_16O,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_snow_18O ,n) = a2x_o%rAttr(index_a2x_Faxa_snowc_18O,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_snowl_18O,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_rain_18O ,n) = a2x_o%rAttr(index_a2x_Faxa_rainc_18O,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_rainl_18O,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_snow_HDO ,n) = a2x_o%rAttr(index_a2x_Faxa_snowc_HDO,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_snowl_HDO,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_rain_HDO ,n) = a2x_o%rAttr(index_a2x_Faxa_rainc_HDO,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_rainl_HDO,n) * afrac - - x2o_o%rAttr(index_x2o_Faxa_snow_16O ,n) = x2o_o%rAttr(index_x2o_Faxa_snow_16O ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_rain_16O ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_16O ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_snow_18O ,n) = x2o_o%rAttr(index_x2o_Faxa_snow_18O ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_rain_18O ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_18O ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_snow_HDO ,n) = x2o_o%rAttr(index_x2o_Faxa_snow_HDO ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_rain_HDO ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_HDO ,n) * flux_epbalfact - - x2o_o%rAttr(index_x2o_Faxa_prec_16O ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_16O ,n) + & - x2o_o%rAttr(index_x2o_Faxa_snow_16O ,n) - x2o_o%rAttr(index_x2o_Faxa_prec_18O ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_18O ,n) + & - x2o_o%rAttr(index_x2o_Faxa_snow_18O ,n) - x2o_o%rAttr(index_x2o_Faxa_prec_HDO ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_HDO ,n) + & - x2o_o%rAttr(index_x2o_Faxa_snow_HDO ,n) - end if - - end do - - do kof = 1,noflds - do n = 1,lsize - ifrac = fractions_o%rAttr(ki,n) - afrac = fractions_o%rAttr(ko,n) - frac_sum = ifrac + afrac - if ((frac_sum) /= 0._r8) then - ifrac = ifrac / (frac_sum) - afrac = afrac / (frac_sum) - endif - if (iindx(kof) > 0) then - if (imerge(kof)) then - x2o_o%rAttr(kof,n) = x2o_o%rAttr(kof,n) + i2x_o%rAttr(iindx(kof),n) * ifrac - else - x2o_o%rAttr(kof,n) = i2x_o%rAttr(iindx(kof),n) * ifrac - end if - end if - if (aindx(kof) > 0) then - if (amerge(kof)) then - x2o_o%rAttr(kof,n) = x2o_o%rAttr(kof,n) + a2x_o%rAttr(aindx(kof),n) * afrac - else - x2o_o%rAttr(kof,n) = a2x_o%rAttr(aindx(kof),n) * afrac - end if - end if - if (xindx(kof) > 0) then - if (xmerge(kof)) then - x2o_o%rAttr(kof,n) = x2o_o%rAttr(kof,n) + xao_o%rAttr(xindx(kof),n) * afrac - else - x2o_o%rAttr(kof,n) = xao_o%rAttr(xindx(kof),n) * afrac - end if - end if - end do - end do - - end subroutine mrg_x2o_run_mct - - !-------------------------------------------------------------------------- - - subroutine mrg_x2g_run_mct( cdata_g, s2x_g, x2g_g ) - - !----------------------------------------------------------------------- - ! Arguments - ! - type(seq_cdata), intent(in) :: cdata_g - type(mct_aVect), intent(inout) :: s2x_g ! input - type(mct_aVect), intent(inout) :: x2g_g ! output - !----------------------------------------------------------------------- - - ! Create input glc state directly from land snow output state - call mct_aVect_copy(aVin=s2x_g, aVout=x2g_g, vector=mct_usevector) - - end subroutine mrg_x2g_run_mct - - !-------------------------------------------------------------------------- - - subroutine mrg_x2s_run_mct( cdata_s, g2x_s, x2s_s ) - - !----------------------------------------------------------------------- - ! Arguments - ! - type(seq_cdata), intent(in) :: cdata_s - type(mct_aVect), intent(inout) :: g2x_s ! input - type(mct_aVect), intent(inout) :: x2s_s ! output - !----------------------------------------------------------------------- - - ! Create input land state directly from glc output state - call mct_aVect_copy(aVin=g2x_s, aVout=x2s_s, vector=mct_usevector) - - end subroutine mrg_x2s_run_mct - - !-------------------------------------------------------------------------- - - subroutine mrg_x2w_run_mct( cdata_w, a2x_w, o2x_w, i2x_w, frac_w, x2w_w) - - !----------------------------------------------------------------------- - ! Arguments - ! - type(seq_cdata), intent(in) :: cdata_w - type(mct_aVect), intent(inout) :: a2x_w ! input - type(mct_aVect), intent(inout) :: o2x_w ! input - type(mct_aVect), intent(inout) :: i2x_w ! input - type(mct_aVect), intent(inout) :: frac_w ! input - type(mct_aVect), intent(inout) :: x2w_w ! output - !----------------------------------------------------------------------- - - ! Create input wave state directly from atm, ocn, ice output state - - call mct_aVect_copy(aVin=a2x_w, aVout=x2w_w, vector=mct_usevector) - call mct_aVect_copy(aVin=o2x_w, aVout=x2w_w, vector=mct_usevector) - call mct_aVect_copy(aVin=i2x_w, aVout=x2w_w, vector=mct_usevector) - - end subroutine mrg_x2w_run_mct - - !-------------------------------------------------------------------------- - - subroutine getfld(n, av, field, suffix) - integer , intent(in) :: n - type(mct_aVect) , intent(in) :: av - character(len=*), intent(out) :: field - character(len=*), intent(out) :: suffix - - type(mct_string) :: mstring ! mct char type - - call mct_aVect_getRList(mstring,n,av) - field = mct_string_toChar(mstring) - suffix = trim(field(scan(field,'_'):)) - call mct_string_clean(mstring) - - if (field(1:1) /= 'S' .and. field(1:1) /= 'F' .and. field(1:2) /= 'PF') then - write(6,*)'field attribute',trim(field),' must start with S or F or PF' - call shr_sys_abort() - end if - end subroutine getfld - -end module mrg_mod diff --git a/src/drivers/mct/main/prep_aoflux_mod.F90 b/src/drivers/mct/main/prep_aoflux_mod.F90 deleted file mode 100644 index ccf232df286..00000000000 --- a/src/drivers/mct/main/prep_aoflux_mod.F90 +++ /dev/null @@ -1,213 +0,0 @@ -module prep_aoflux_mod - - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use seq_comm_mct, only: num_inst_xao, num_inst_frc, num_inst_ocn - use seq_comm_mct, only: CPLID, logunit - use seq_comm_mct, only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_getdata, seq_infodata_type - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: atm, ocn - - implicit none - private ! except - save - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_aoflux_init - - public :: prep_aoflux_calc_xao_ox - public :: prep_aoflux_calc_xao_ax - - public :: prep_aoflux_get_xao_ox - public :: prep_aoflux_get_xao_ax - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! attribute vectors - type(mct_aVect), pointer :: xao_ox(:) ! Atm-ocn fluxes, ocn grid, cpl pes - type(mct_aVect), pointer :: xao_ax(:) ! Atm-ocn fluxes, atm grid, cpl pes - - ! seq_comm_getData variables - logical :: iamroot_CPLID ! .true. => CPLID masterproc - integer :: mpicom_CPLID ! MPI cpl communicator - - ! seq_infodata_getData variables - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_aoflux_init (infodata, fractions_ox, fractions_ax) - - !--------------------------------------------------------------- - ! Description - ! Initialize atm/ocn flux component and compute ocean albedos - ! module variables - ! - ! Arguments - type (seq_infodata_type) , intent(inout) :: infodata - type(mct_aVect) , intent(in) :: fractions_ox(:) - type(mct_aVect) , intent(in) :: fractions_ax(:) - ! - ! Local Variables - integer :: exi - integer :: lsize_o - integer :: lsize_a - character(SHR_KIND_CS) :: aoflux_grid ! grid for atm ocn flux calc - type(mct_avect) , pointer :: a2x_ax - type(mct_avect) , pointer :: o2x_ox - character(*) , parameter :: subname = '(prep_aoflux_init)' - !--------------------------------------------------------------- - - call seq_infodata_getdata(infodata, & - aoflux_grid=aoflux_grid) - - call seq_comm_getdata(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - a2x_ax => component_get_c2x_cx(atm(1)) - if (associated(a2x_ax)) then - lsize_a = mct_aVect_lsize(a2x_ax) - else - lsize_a = 0 - end if - - o2x_ox => component_get_c2x_cx(ocn(1)) - if (associated(o2x_ox)) then - lsize_o = mct_aVect_lsize(o2x_ox) - else - lsize_o = 0 - end if - - allocate(xao_ax(num_inst_xao)) - do exi = 1,num_inst_xao - call mct_aVect_init(xao_ax(exi), rList=seq_flds_xao_fields, lsize=lsize_a) - call mct_aVect_zero(xao_ax(exi)) - end do - allocate(xao_ox(num_inst_xao)) - do exi = 1,num_inst_xao - call mct_aVect_init(xao_ox(exi), rList=seq_flds_xao_fields, lsize=lsize_o) - call mct_aVect_zero(xao_ox(exi)) - enddo - - end subroutine prep_aoflux_init - - !================================================================================================ - - subroutine prep_aoflux_calc_xao_ax(fractions_ox, flds, timer) - !--------------------------------------------------------------- - ! Description - ! Create xao_ox - ! - ! Uses - use prep_atm_mod, only: prep_atm_get_mapper_So2a - use prep_atm_mod, only: prep_atm_get_mapper_Fo2a - ! - ! Arguments - type(mct_aVect) , intent(in) :: fractions_ox(:) - character(len=*), intent(in) :: flds - character(len=*), intent(in) :: timer - ! - ! Local Variables - type(seq_map) , pointer :: mapper_So2a - type(seq_map) , pointer :: mapper_Fo2a - integer :: exi, efi - character(*), parameter :: subname = '(prep_aoflux_calc_xao_ax)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - if (trim(flds) == 'albedos') then - do exi = 1,num_inst_xao - efi = mod((exi-1),num_inst_frc) + 1 - - mapper_So2a => prep_atm_get_mapper_So2a() - call seq_map_map(mapper_So2a, xao_ox(exi), xao_ax(exi), & - fldlist=seq_flds_xao_albedo, norm=.true., & - avwts_s=fractions_ox(efi),avwtsfld_s='ofrac') - enddo - end if - - if (trim(flds) == 'states_and_fluxes') then - do exi = 1,num_inst_xao - efi = mod((exi-1),num_inst_frc) + 1 - - mapper_So2a => prep_atm_get_mapper_So2a() - call seq_map_map(mapper_So2a, xao_ox(exi), xao_ax(exi), & - fldlist=seq_flds_xao_states, norm=.true., & - avwts_s=fractions_ox(efi),avwtsfld_s='ofrac') - - mapper_Fo2a => prep_atm_get_mapper_Fo2a() - call seq_map_map(mapper_Fo2a, xao_ox(exi), xao_ax(exi),& - fldlist=seq_flds_xao_fluxes, norm=.true., & - avwts_s=fractions_ox(efi),avwtsfld_s='ofrac') - enddo - end if - call t_drvstopf (trim(timer)) - - end subroutine prep_aoflux_calc_xao_ax - - !================================================================================================ - - subroutine prep_aoflux_calc_xao_ox(timer) - !--------------------------------------------------------------- - ! Description - ! Create xao_ox - ! - ! Uses - use prep_ocn_mod, only: prep_ocn_get_mapper_Fa2o - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - type(seq_map), pointer :: mapper_Fa2o - integer :: exi - character(*), parameter :: subname = '(prep_aoflux_calc_xao_ax)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - ! this mapping has to be done with area overlap mapping for all fields - ! due to the masking of the xao_ax data and the fact that a2oS is bilinear - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do exi = 1,num_inst_xao - ! if (iamroot_CPLID .and. exi == 1) then - ! write(logunit,F00) 'Calling map_atm2ocn_mct for mapping xao_ax to xao_ox' - ! end if - - mapper_Fa2o => prep_ocn_get_mapper_Fa2o() - call seq_map_map(mapper_Fa2o, xao_ax(exi), xao_ox(exi), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_aoflux_calc_xao_ox - - !================================================================================================ - - function prep_aoflux_get_xao_ox() - type(mct_aVect), pointer :: prep_aoflux_get_xao_ox(:) - prep_aoflux_get_xao_ox => xao_ox(:) - end function prep_aoflux_get_xao_ox - - function prep_aoflux_get_xao_ax() - type(mct_aVect), pointer :: prep_aoflux_get_xao_ax(:) - prep_aoflux_get_xao_ax => xao_ax(:) - end function prep_aoflux_get_xao_ax - -end module prep_aoflux_mod diff --git a/src/drivers/mct/main/prep_atm_mod.F90 b/src/drivers/mct/main/prep_atm_mod.F90 deleted file mode 100644 index 4fcf079b785..00000000000 --- a/src/drivers/mct/main/prep_atm_mod.F90 +++ /dev/null @@ -1,813 +0,0 @@ -module prep_atm_mod - - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use seq_comm_mct, only: num_inst_atm, num_inst_ocn, num_inst_ice, num_inst_lnd, num_inst_xao, & - num_inst_frc, num_inst_max, CPLID, ATMID, logunit - use seq_comm_mct, only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getdata - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: atm, lnd, ocn, ice - - implicit none - save - PRIVATE - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_atm_init - public :: prep_atm_mrg - - public :: prep_atm_get_l2x_ax - public :: prep_atm_get_i2x_ax - public :: prep_atm_get_o2x_ax - public :: prep_atm_get_z2x_ax - - public :: prep_atm_calc_l2x_ax - public :: prep_atm_calc_i2x_ax - public :: prep_atm_calc_o2x_ax - public :: prep_atm_calc_z2x_ax - - public :: prep_atm_get_mapper_So2a - public :: prep_atm_get_mapper_Fo2a - public :: prep_atm_get_mapper_Sl2a - public :: prep_atm_get_mapper_Fl2a - public :: prep_atm_get_mapper_Si2a - public :: prep_atm_get_mapper_Fi2a - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: prep_atm_merge - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_So2a - type(seq_map), pointer :: mapper_Sl2a - type(seq_map), pointer :: mapper_Si2a - type(seq_map), pointer :: mapper_Fo2a ! needed for seq_frac_init - type(seq_map), pointer :: mapper_Fl2a ! needed for seq_frac_init - type(seq_map), pointer :: mapper_Fi2a ! needed for seq_frac_init - - ! attribute vectors - type(mct_aVect), pointer :: l2x_ax(:) ! Lnd export, atm grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: i2x_ax(:) ! Ice export, atm grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: o2x_ax(:) ! Ocn export, atm grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: z2x_ax(:) ! Iac export, atm grid, cpl pes - allocated in driver - - ! other module variables - integer :: mpicom_CPLID ! MPI cpl communicator - logical :: iamroot_CPLID ! .true. => CPLID masterproc - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_atm_init(infodata, ocn_c2_atm, ice_c2_atm, lnd_c2_atm, iac_c2_atm) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and mappers - ! - ! Arguments - type (seq_infodata_type) , intent(inout) :: infodata - logical , intent(in) :: ocn_c2_atm ! .true. => ocn to atm coupling on - logical , intent(in) :: ice_c2_atm ! .true. => ice to atm coupling on - logical , intent(in) :: lnd_c2_atm ! .true. => lnd to atm coupling on - logical , intent(in) :: iac_c2_atm ! .true. => iac to atm coupling on - ! - ! Local Variables - integer :: lsize_a - integer :: eli, eii, emi - logical :: samegrid_ao ! samegrid atm and ocean - logical :: samegrid_al ! samegrid atm and land - logical :: esmf_map_flag ! .true. => use esmf for mapping - logical :: atm_present ! .true. => atm is present - logical :: ocn_present ! .true. => ocn is present - logical :: ice_present ! .true. => ice is present - logical :: lnd_present ! .true. => lnd is prsent - character(CL) :: ocn_gnam ! ocn grid - character(CL) :: atm_gnam ! atm grid - character(CL) :: lnd_gnam ! lnd grid - type(mct_avect), pointer :: a2x_ax - character(*), parameter :: subname = '(prep_atm_init)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call seq_infodata_getData(infodata, & - atm_present=atm_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - lnd_present=lnd_present, & - atm_gnam=atm_gnam, & - ocn_gnam=ocn_gnam, & - lnd_gnam=lnd_gnam, & - esmf_map_flag=esmf_map_flag) - - allocate(mapper_So2a) - allocate(mapper_Sl2a) - allocate(mapper_Si2a) - allocate(mapper_Fo2a) - allocate(mapper_Fl2a) - allocate(mapper_Fi2a) - - if (atm_present) then - - call seq_comm_getData(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - a2x_ax => component_get_c2x_cx(atm(1)) - lsize_a = mct_aVect_lsize(a2x_ax) - - allocate(l2x_ax(num_inst_lnd)) - do eli = 1,num_inst_lnd - call mct_aVect_init(l2x_ax(eli), rList=seq_flds_l2x_fields, lsize=lsize_a) - call mct_aVect_zero(l2x_ax(eli)) - end do - allocate(o2x_ax(num_inst_max)) - do emi = 1,num_inst_max - call mct_aVect_init(o2x_ax(emi), rList=seq_flds_o2x_fields, lsize=lsize_a) - call mct_aVect_zero(o2x_ax(emi)) - enddo - allocate(i2x_ax(num_inst_ice)) - do eii = 1,num_inst_ice - call mct_aVect_init(i2x_ax(eii), rList=seq_flds_i2x_fields, lsize=lsize_a) - call mct_aVect_zero(i2x_ax(eii)) - enddo - - samegrid_al = .true. - samegrid_ao = .true. - if (trim(atm_gnam) /= trim(lnd_gnam)) samegrid_al = .false. - if (trim(atm_gnam) /= trim(ocn_gnam)) samegrid_ao = .false. - - if (ocn_c2_atm) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_So2a' - end if - call seq_map_init_rcfile(mapper_So2a, ocn(1), atm(1), & - 'seq_maps.rc','ocn2atm_smapname:','ocn2atm_smaptype:',samegrid_ao, & - 'mapper_So2a initialization',esmf_map_flag) - end if - - ! needed for domain checking - if (ocn_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fo2a' - end if - call seq_map_init_rcfile(mapper_Fo2a, ocn(1), atm(1), & - 'seq_maps.rc','ocn2atm_fmapname:','ocn2atm_fmaptype:',samegrid_ao, & - 'mapper_Fo2a initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - if (ice_c2_atm) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Si2a' - end if - call seq_map_init_rcfile(mapper_Si2a, ice(1), atm(1), & - 'seq_maps.rc','ice2atm_smapname:','ice2atm_smaptype:',samegrid_ao, & - 'mapper_Si2a initialization',esmf_map_flag) - end if - - ! needed for domain checking - if (ice_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fi2a' - end if - call seq_map_init_rcfile(mapper_Fi2a, ice(1), atm(1), & - 'seq_maps.rc','ice2atm_fmapname:','ice2atm_fmaptype:',samegrid_ao, & - 'mapper_Fi2a initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - ! needed for domain checking - if (lnd_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fl2a' - end if - call seq_map_init_rcfile(mapper_Fl2a, lnd(1), atm(1), & - 'seq_maps.rc','lnd2atm_fmapname:','lnd2atm_fmaptype:',samegrid_al, & - 'mapper_Fl2a initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - if (lnd_c2_atm) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sl2a' - end if - call seq_map_init_rcfile(mapper_Sl2a, lnd(1), atm(1), & - 'seq_maps.rc','lnd2atm_smapname:','lnd2atm_smaptype:',samegrid_al, & - 'mapper_Sl2a initialization',esmf_map_flag) - end if - - - end if - - end subroutine prep_atm_init - - !================================================================================================ - - subroutine prep_atm_mrg(infodata, fractions_ax, xao_ax, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Prepare run phase, including running the merge - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - type(mct_aVect) , intent(in) :: fractions_ax(:) - type(mct_aVect) , intent(in) :: xao_ax(:) - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - integer :: eli, eoi, eii, exi, efi, eai, emi - type(mct_avect), pointer :: x2a_ax - character(*), parameter :: subname = '(prep_atm_mrg)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer_mrg),barrier=mpicom_CPLID) - do eai = 1,num_inst_atm - ! Use fortran mod to address ensembles in merge - eli = mod((eai-1),num_inst_lnd) + 1 - eoi = mod((eai-1),num_inst_ocn) + 1 - eii = mod((eai-1),num_inst_ice) + 1 - exi = mod((eai-1),num_inst_xao) + 1 - efi = mod((eai-1),num_inst_frc) + 1 - emi = mod((eai-1),num_inst_max) + 1 - - x2a_ax => component_get_x2c_cx(atm(eai)) ! This is actually modifying x2a_ax - call prep_atm_merge(l2x_ax(eli), o2x_ax(emi), xao_ax(exi), i2x_ax(eii), & - fractions_ax(efi), x2a_ax) - enddo - call t_drvstopf (trim(timer_mrg)) - - end subroutine prep_atm_mrg - - !================================================================================================ - - subroutine prep_atm_merge( l2x_a, o2x_a, xao_a, i2x_a, fractions_a, x2a_a ) - - !----------------------------------------------------------------------- - ! - ! Arguments - type(mct_aVect), intent(in) :: l2x_a - type(mct_aVect), intent(in) :: o2x_a - type(mct_aVect), intent(in) :: xao_a - type(mct_aVect), intent(in) :: i2x_a - type(mct_aVect), intent(in) :: fractions_a - type(mct_aVect), intent(inout) :: x2a_a - ! - ! Local workspace - real(r8) :: fracl, fraci, fraco - integer :: n,ka,ki,kl,ko,kx,kof,kif,klf,i,i1,o1 - integer :: lsize - integer :: index_x2a_Sf_lfrac - integer :: index_x2a_Sf_ifrac - integer :: index_x2a_Sf_ofrac - character(CL),allocatable :: field_atm(:) ! string converted to char - character(CL),allocatable :: field_lnd(:) ! string converted to char - character(CL),allocatable :: field_ice(:) ! string converted to char - character(CL),allocatable :: field_xao(:) ! string converted to char - character(CL),allocatable :: field_ocn(:) ! string converted to char - character(CL),allocatable :: itemc_atm(:) ! string converted to char - character(CL),allocatable :: itemc_lnd(:) ! string converted to char - character(CL),allocatable :: itemc_ice(:) ! string converted to char - character(CL),allocatable :: itemc_xao(:) ! string converted to char - character(CL),allocatable :: itemc_ocn(:) ! string converted to char - logical :: iamroot - character(CL),allocatable :: mrgstr(:) ! temporary string - logical, save :: first_time = .true. - type(mct_aVect_sharedindices),save :: l2x_sharedindices - type(mct_aVect_sharedindices),save :: o2x_sharedindices - type(mct_aVect_sharedindices),save :: i2x_sharedindices - type(mct_aVect_sharedindices),save :: xao_sharedindices - logical, pointer, save :: lmerge(:),imerge(:),xmerge(:),omerge(:) - integer, pointer, save :: lindx(:), iindx(:), oindx(:),xindx(:) - integer, save :: naflds, nlflds,niflds,noflds,nxflds - character(*), parameter :: subname = '(prep_atm_merge) ' - !----------------------------------------------------------------------- - ! - call seq_comm_getdata(CPLID, iamroot=iamroot) - - if (first_time) then - - naflds = mct_aVect_nRattr(x2a_a) - nlflds = mct_aVect_nRattr(l2x_a) - niflds = mct_aVect_nRattr(i2x_a) - noflds = mct_aVect_nRattr(o2x_a) - nxflds = mct_aVect_nRattr(xao_a) - - allocate(lindx(naflds), lmerge(naflds)) - allocate(iindx(naflds), imerge(naflds)) - allocate(xindx(naflds), xmerge(naflds)) - allocate(oindx(naflds), omerge(naflds)) - allocate(field_atm(naflds), itemc_atm(naflds)) - allocate(field_lnd(nlflds), itemc_lnd(nlflds)) - allocate(field_ice(niflds), itemc_ice(niflds)) - allocate(field_ocn(noflds), itemc_ocn(noflds)) - allocate(field_xao(nxflds), itemc_xao(nxflds)) - allocate(mrgstr(naflds)) - - lindx(:) = 0 - iindx(:) = 0 - xindx(:) = 0 - oindx(:) = 0 - lmerge(:) = .true. - imerge(:) = .true. - xmerge(:) = .true. - omerge(:) = .true. - - do ka = 1,naflds - field_atm(ka) = mct_aVect_getRList2c(ka, x2a_a) - itemc_atm(ka) = trim(field_atm(ka)(scan(field_atm(ka),'_'):)) - enddo - do kl = 1,nlflds - field_lnd(kl) = mct_aVect_getRList2c(kl, l2x_a) - itemc_lnd(kl) = trim(field_lnd(kl)(scan(field_lnd(kl),'_'):)) - enddo - do ki = 1,niflds - field_ice(ki) = mct_aVect_getRList2c(ki, i2x_a) - itemc_ice(ki) = trim(field_ice(ki)(scan(field_ice(ki),'_'):)) - enddo - do ko = 1,noflds - field_ocn(ko) = mct_aVect_getRList2c(ko, o2x_a) - itemc_ocn(ko) = trim(field_ocn(ko)(scan(field_ocn(ko),'_'):)) - enddo - do kx = 1,nxflds - field_xao(kx) = mct_aVect_getRList2c(kx, xao_a) - itemc_xao(kx) = trim(field_xao(kx)(scan(field_xao(kx),'_'):)) - enddo - - call mct_aVect_setSharedIndices(l2x_a, x2a_a, l2x_SharedIndices) - call mct_aVect_setSharedIndices(o2x_a, x2a_a, o2x_SharedIndices) - call mct_aVect_setSharedIndices(i2x_a, x2a_a, i2x_SharedIndices) - call mct_aVect_setSharedIndices(xao_a, x2a_a, xao_SharedIndices) - - ! Field naming rules - ! Only atm states that are Sx_... will be merged - ! Only fluxes that are F??x_... will be merged - ! All fluxes will be multiplied by corresponding component fraction - - do ka = 1,naflds - !--- document merge --- - mrgstr(ka) = subname//'x2a%'//trim(field_atm(ka))//' =' - if (field_atm(ka)(1:2) == 'PF') then - cycle ! if flux has first character as P, pass straight through - end if - if (field_atm(ka)(1:1) == 'S' .and. field_atm(ka)(2:2) /= 'x') then - cycle ! any state fields that are not Sx_ will just be copied - end if - - do kl = 1,nlflds - if (trim(itemc_atm(ka)) == trim(itemc_lnd(kl))) then - if ((trim(field_atm(ka)) == trim(field_lnd(kl)))) then - if (field_lnd(kl)(1:1) == 'F') lmerge(ka) = .false. - end if - ! --- make sure only one field matches --- - if (lindx(ka) /= 0) then - write(logunit,*) subname,' ERROR: found multiple kl field matches for ',trim(itemc_lnd(kl)) - call shr_sys_abort(subname//' ERROR multiple kl field matches') - endif - lindx(ka) = kl - end if - end do - do ki = 1,niflds - if (field_ice(ki)(1:1) == 'F' .and. field_ice(ki)(2:4) == 'ioi') then - cycle ! ignore all fluxes that are ice/ocn fluxes - end if - if (trim(itemc_atm(ka)) == trim(itemc_ice(ki))) then - if ((trim(field_atm(ka)) == trim(field_ice(ki)))) then - if (field_ice(ki)(1:1) == 'F') imerge(ka) = .false. - end if - ! --- make sure only one field matches --- - if (iindx(ka) /= 0) then - write(logunit,*) subname,' ERROR: found multiple ki field matches for ',trim(itemc_ice(ki)) - call shr_sys_abort(subname//' ERROR multiple ki field matches') - endif - iindx(ka) = ki - end if - end do - do kx = 1,nxflds - if (trim(itemc_atm(ka)) == trim(itemc_xao(kx))) then - if ((trim(field_atm(ka)) == trim(field_xao(kx)))) then - if (field_xao(kx)(1:1) == 'F') xmerge(ka) = .false. - end if - ! --- make sure only one field matches --- - if (xindx(ka) /= 0) then - write(logunit,*) subname,' ERROR: found multiple kx field matches for ',trim(itemc_xao(kx)) - call shr_sys_abort(subname//' ERROR multiple kx field matches') - endif - xindx(ka) = kx - end if - end do - do ko = 1,noflds - if (trim(itemc_atm(ka)) == trim(itemc_ocn(ko))) then - if ((trim(field_atm(ka)) == trim(field_ocn(ko)))) then - if (field_ocn(ko)(1:1) == 'F') omerge(ka) = .false. - end if - ! --- make sure only one field matches --- - if (oindx(ka) /= 0) then - write(logunit,*) subname,' ERROR: found multiple ko field matches for ',trim(itemc_ocn(ko)) - call shr_sys_abort(subname//' ERROR multiple ko field matches') - endif - oindx(ka) = ko - end if - end do - - ! --- add some checks --- - - ! --- make sure all terms agree on merge or non-merge aspect --- - if (oindx(ka) > 0 .and. xindx(ka) > 0) then - write(logunit,*) subname,' ERROR: oindx and xindx both non-zero, not allowed ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR oindx and xindx both non-zero') - endif - - ! --- make sure all terms agree on merge or non-merge aspect --- - if (lindx(ka) > 0 .and. iindx(ka) > 0 .and. (lmerge(ka) .neqv. imerge(ka))) then - write(logunit,*) subname,' ERROR: lindx and iindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR lindx and iindx merge logic error') - endif - if (lindx(ka) > 0 .and. xindx(ka) > 0 .and. (lmerge(ka) .neqv. xmerge(ka))) then - write(logunit,*) subname,' ERROR: lindx and xindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR lindx and xindx merge logic error') - endif - if (lindx(ka) > 0 .and. oindx(ka) > 0 .and. (lmerge(ka) .neqv. omerge(ka))) then - write(logunit,*) subname,' ERROR: lindx and oindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR lindx and oindx merge logic error') - endif - if (xindx(ka) > 0 .and. iindx(ka) > 0 .and. (xmerge(ka) .neqv. imerge(ka))) then - write(logunit,*) subname,' ERROR: xindx and iindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR xindx and iindx merge logic error') - endif - if (xindx(ka) > 0 .and. oindx(ka) > 0 .and. (xmerge(ka) .neqv. omerge(ka))) then - write(logunit,*) subname,' ERROR: xindx and oindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR xindx and oindx merge logic error') - endif - if (iindx(ka) > 0 .and. oindx(ka) > 0 .and. (imerge(ka) .neqv. omerge(ka))) then - write(logunit,*) subname,' ERROR: iindx and oindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR iindx and oindx merge logic error') - endif - - end do - end if - - ! Zero attribute vector - - call mct_avect_zero(x2a_a) - - ! Update surface fractions - - kif=mct_aVect_indexRA(fractions_a,"ifrac") - klf=mct_aVect_indexRA(fractions_a,"lfrac") - kof=mct_aVect_indexRA(fractions_a,"ofrac") - lsize = mct_avect_lsize(x2a_a) - - index_x2a_Sf_lfrac = mct_aVect_indexRA(x2a_a,'Sf_lfrac') - index_x2a_Sf_ifrac = mct_aVect_indexRA(x2a_a,'Sf_ifrac') - index_x2a_Sf_ofrac = mct_aVect_indexRA(x2a_a,'Sf_ofrac') - do n = 1,lsize - x2a_a%rAttr(index_x2a_Sf_lfrac,n) = fractions_a%Rattr(klf,n) - x2a_a%rAttr(index_x2a_Sf_ifrac,n) = fractions_a%Rattr(kif,n) - x2a_a%rAttr(index_x2a_Sf_ofrac,n) = fractions_a%Rattr(kof,n) - end do - - !--- document fraction operations --- - if (first_time) then - mrgstr(index_x2a_sf_lfrac) = trim(mrgstr(index_x2a_sf_lfrac))//' = fractions_a%lfrac' - mrgstr(index_x2a_sf_ifrac) = trim(mrgstr(index_x2a_sf_ifrac))//' = fractions_a%ifrac' - mrgstr(index_x2a_sf_ofrac) = trim(mrgstr(index_x2a_sf_ofrac))//' = fractions_a%ofrac' - endif - - ! Copy attributes that do not need to be merged - ! These are assumed to have the same name in - ! (o2x_a and x2a_a) and in (l2x_a and x2a_a), etc. - - !--- document copy operations --- - if (first_time) then - !--- document merge --- - do i=1,l2x_SharedIndices%shared_real%num_indices - i1=l2x_SharedIndices%shared_real%aVindices1(i) - o1=l2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = l2x%'//trim(field_lnd(i1)) - enddo - do i=1,o2x_SharedIndices%shared_real%num_indices - i1=o2x_SharedIndices%shared_real%aVindices1(i) - o1=o2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = o2x%'//trim(field_ocn(i1)) - enddo - do i=1,i2x_SharedIndices%shared_real%num_indices - i1=i2x_SharedIndices%shared_real%aVindices1(i) - o1=i2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = i2x%'//trim(field_ice(i1)) - enddo - do i=1,xao_SharedIndices%shared_real%num_indices - i1=xao_SharedIndices%shared_real%aVindices1(i) - o1=xao_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = xao%'//trim(field_xao(i1)) - enddo - endif - - ! call mct_aVect_copy(aVin=l2x_a, aVout=x2a_a, vector=mct_usevector) - ! call mct_aVect_copy(aVin=o2x_a, aVout=x2a_a, vector=mct_usevector) - ! call mct_aVect_copy(aVin=i2x_a, aVout=x2a_a, vector=mct_usevector) - ! call mct_aVect_copy(aVin=xao_a, aVout=x2a_a, vector=mct_usevector) - call mct_aVect_copy(aVin=l2x_a, aVout=x2a_a, vector=mct_usevector, sharedIndices=l2x_SharedIndices) - call mct_aVect_copy(aVin=o2x_a, aVout=x2a_a, vector=mct_usevector, sharedIndices=o2x_SharedIndices) - call mct_aVect_copy(aVin=i2x_a, aVout=x2a_a, vector=mct_usevector, sharedIndices=i2x_SharedIndices) - call mct_aVect_copy(aVin=xao_a, aVout=x2a_a, vector=mct_usevector, sharedIndices=xao_SharedIndices) - - ! If flux to atm is coming only from the ocean (based on field being in o2x_a) - - ! -- then scale by both ocean and ice fraction - ! If flux to atm is coming only from the land or ice or coupler - ! -- then do scale by fraction above - - do ka = 1,naflds - !--- document merge --- - if (first_time) then - if (lindx(ka) > 0) then - if (lmerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + lfrac*l2x%'//trim(field_lnd(lindx(ka))) - else - mrgstr(ka) = trim(mrgstr(ka))//' = lfrac*l2x%'//trim(field_lnd(lindx(ka))) - end if - end if - if (iindx(ka) > 0) then - if (imerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + ifrac*i2x%'//trim(field_ice(iindx(ka))) - else - mrgstr(ka) = trim(mrgstr(ka))//' = ifrac*i2x%'//trim(field_ice(iindx(ka))) - end if - end if - if (xindx(ka) > 0) then - if (xmerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + ofrac*xao%'//trim(field_xao(xindx(ka))) - else - mrgstr(ka) = trim(mrgstr(ka))//' = ofrac*xao%'//trim(field_xao(xindx(ka))) - end if - end if - if (oindx(ka) > 0) then - if (omerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + ofrac*o2x%'//trim(field_ocn(oindx(ka))) - end if - if (.not. omerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + (ifrac+ofrac)*o2x%'//trim(field_ocn(oindx(ka))) - end if - end if - endif - - do n = 1,lsize - fracl = fractions_a%Rattr(klf,n) - fraci = fractions_a%Rattr(kif,n) - fraco = fractions_a%Rattr(kof,n) - if (lindx(ka) > 0 .and. fracl > 0._r8) then - if (lmerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + l2x_a%rAttr(lindx(ka),n) * fracl - else - x2a_a%rAttr(ka,n) = l2x_a%rAttr(lindx(ka),n) * fracl - end if - end if - if (iindx(ka) > 0 .and. fraci > 0._r8) then - if (imerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + i2x_a%rAttr(iindx(ka),n) * fraci - else - x2a_a%rAttr(ka,n) = i2x_a%rAttr(iindx(ka),n) * fraci - end if - end if - if (xindx(ka) > 0 .and. fraco > 0._r8) then - if (xmerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + xao_a%rAttr(xindx(ka),n) * fraco - else - x2a_a%rAttr(ka,n) = xao_a%rAttr(xindx(ka),n) * fraco - end if - end if - if (oindx(ka) > 0) then - if (omerge(ka) .and. fraco > 0._r8) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + o2x_a%rAttr(oindx(ka),n) * fraco - end if - if (.not. omerge(ka)) then - !--- NOTE: This IS using the ocean fields and ice fraction !! --- - x2a_a%rAttr(ka,n) = o2x_a%rAttr(oindx(ka),n) * fraci - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + o2x_a%rAttr(oindx(ka),n) * fraco - end if - end if - end do - end do - - if (first_time) then - if (iamroot) then - write(logunit,'(A)') subname//' Summary:' - do ka = 1,naflds - write(logunit,'(A)') trim(mrgstr(ka)) - enddo - endif - deallocate(mrgstr) - deallocate(field_atm,itemc_atm) - deallocate(field_lnd,itemc_lnd) - deallocate(field_ice,itemc_ice) - deallocate(field_ocn,itemc_ocn) - deallocate(field_xao,itemc_xao) - endif - - first_time = .false. - - end subroutine prep_atm_merge - - !================================================================================================ - - subroutine prep_atm_calc_o2x_ax(fractions_ox, timer) - !--------------------------------------------------------------- - ! Description - ! Create o2x_ax (note that o2x_ax is a local module variable) - ! - ! Arguments - type(mct_aVect) , optional, intent(in) :: fractions_ox(:) - character(len=*), optional, intent(in) :: timer - ! - ! Local Variables - integer :: eoi, efi, emi - type(mct_aVect) , pointer :: o2x_ox - character(*), parameter :: subname = '(prep_atm_calc_o2x_ax)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do emi = 1,num_inst_max - eoi = mod((emi-1),num_inst_ocn) + 1 - efi = mod((emi-1),num_inst_frc) + 1 - - o2x_ox => component_get_c2x_cx(ocn(eoi)) - if (present(fractions_ox)) then - call seq_map_map(mapper_So2a, o2x_ox, o2x_ax(emi),& - fldlist=seq_flds_o2x_states,norm=.true., & - avwts_s=fractions_ox(efi),avwtsfld_s='ofrac') - else - call seq_map_map(mapper_So2a, o2x_ox, o2x_ax(emi),& - fldlist=seq_flds_o2x_states,norm=.true.) - endif - call seq_map_map(mapper_Fo2a, o2x_ox, o2x_ax(emi),& - fldlist=seq_flds_o2x_fluxes,norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_atm_calc_o2x_ax - - !================================================================================================ - - subroutine prep_atm_calc_i2x_ax(fractions_ix, timer) - !--------------------------------------------------------------- - ! Description - ! Create i2x_ax (note that i2x_ax is a local module variable) - ! - ! Arguments - type(mct_aVect) , intent(in) :: fractions_ix(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eii, efi - type(mct_aVect) , pointer :: i2x_ix - character(*), parameter :: subname = '(prep_atm_calc_i2x_ax)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eii = 1,num_inst_ice - efi = mod((eii-1),num_inst_frc) + 1 - - i2x_ix => component_get_c2x_cx(ice(eii)) - call seq_map_map(mapper_Si2a, i2x_ix, i2x_ax(eii), & - fldlist=seq_flds_i2x_states, & - avwts_s=fractions_ix(eii), avwtsfld_s='ifrac') - call seq_map_map(mapper_Fi2a, i2x_ix, i2x_ax(eii), & - fldlist=seq_flds_i2x_fluxes, & - avwts_s=fractions_ix(eii), avwtsfld_s='ifrac') - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_atm_calc_i2x_ax - - !================================================================================================ - - subroutine prep_atm_calc_l2x_ax(fractions_lx, timer) - !--------------------------------------------------------------- - ! Description - ! Create l2x_ax (note that l2x_ax is a local module variable) - ! - ! Arguments - type(mct_aVect) , intent(in) :: fractions_lx(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eli, efi - type(mct_avect), pointer :: l2x_lx - character(*), parameter :: subname = '(prep_atm_calc_l2x_ax)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eli = 1,num_inst_lnd - efi = mod((eli-1),num_inst_frc) + 1 - - l2x_lx => component_get_c2x_cx(lnd(eli)) - call seq_map_map(mapper_Sl2a, l2x_lx, l2x_ax(eli), & - fldlist=seq_flds_l2x_states, norm=.true., & - avwts_s=fractions_lx(efi), avwtsfld_s='lfrin') - call seq_map_map(mapper_Fl2a, l2x_lx, l2x_ax(eli), & - fldlist=seq_flds_l2x_fluxes, norm=.true., & - avwts_s=fractions_lx(efi), avwtsfld_s='lfrin') - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_atm_calc_l2x_ax - - !================================================================================================ - - subroutine prep_atm_calc_z2x_ax(fractions_zx, timer) - !--------------------------------------------------------------- - ! Description - ! Create z2x_ax (note that z2x_ax is a local module variable) - ! - ! Arguments - type(mct_aVect) , intent(in) :: fractions_zx(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - - end subroutine prep_atm_calc_z2x_ax - - !================================================================================================ - - function prep_atm_get_l2x_ax() - type(mct_aVect), pointer :: prep_atm_get_l2x_ax(:) - prep_atm_get_l2x_ax => l2x_ax(:) - end function prep_atm_get_l2x_ax - - function prep_atm_get_i2x_ax() - type(mct_aVect), pointer :: prep_atm_get_i2x_ax(:) - prep_atm_get_i2x_ax => i2x_ax(:) - end function prep_atm_get_i2x_ax - - function prep_atm_get_o2x_ax() - type(mct_aVect), pointer :: prep_atm_get_o2x_ax(:) - prep_atm_get_o2x_ax => o2x_ax(:) - end function prep_atm_get_o2x_ax - - function prep_atm_get_z2x_ax() - type(mct_aVect), pointer :: prep_atm_get_z2x_ax(:) - prep_atm_get_z2x_ax => z2x_ax(:) - end function prep_atm_get_z2x_ax - - function prep_atm_get_mapper_So2a() - type(seq_map), pointer :: prep_atm_get_mapper_So2a - prep_atm_get_mapper_So2a => mapper_So2a - end function prep_atm_get_mapper_So2a - - function prep_atm_get_mapper_Fo2a() - type(seq_map), pointer :: prep_atm_get_mapper_Fo2a - prep_atm_get_mapper_Fo2a => mapper_Fo2a - end function prep_atm_get_mapper_Fo2a - - function prep_atm_get_mapper_Sl2a() - type(seq_map), pointer :: prep_atm_get_mapper_Sl2a - prep_atm_get_mapper_Sl2a => mapper_Sl2a - end function prep_atm_get_mapper_Sl2a - - function prep_atm_get_mapper_Fl2a() - type(seq_map), pointer :: prep_atm_get_mapper_Fl2a - prep_atm_get_mapper_Fl2a => mapper_Fl2a - end function prep_atm_get_mapper_Fl2a - - function prep_atm_get_mapper_Si2a() - type(seq_map), pointer :: prep_atm_get_mapper_Si2a - prep_atm_get_mapper_Si2a => mapper_Si2a - end function prep_atm_get_mapper_Si2a - - function prep_atm_get_mapper_Fi2a() - type(seq_map), pointer :: prep_atm_get_mapper_Fi2a - prep_atm_get_mapper_Fi2a => mapper_Fi2a - end function prep_atm_get_mapper_Fi2a - - !================================================================================================ - -end module prep_atm_mod diff --git a/src/drivers/mct/main/prep_glc_mod.F90 b/src/drivers/mct/main/prep_glc_mod.F90 deleted file mode 100644 index 93e5368340d..00000000000 --- a/src/drivers/mct/main/prep_glc_mod.F90 +++ /dev/null @@ -1,1612 +0,0 @@ -module prep_glc_mod - -#include "shr_assert.h" - use shr_kind_mod , only: r8 => SHR_KIND_R8 - use shr_kind_mod , only: cl => SHR_KIND_CL - use shr_sys_mod , only: shr_sys_abort, shr_sys_flush - use seq_comm_mct , only: num_inst_glc, num_inst_lnd, num_inst_frc, & - num_inst_ocn - use seq_comm_mct , only: CPLID, GLCID, logunit - use seq_comm_mct , only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getdata - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: component_get_dom_cx - use component_type_mod, only: glc, lnd, ocn - use glc_elevclass_mod, only : glc_get_num_elevation_classes, glc_elevclass_as_string - use glc_elevclass_mod, only : glc_all_elevclass_strings, GLC_ELEVCLASS_STRLEN - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_glc_init - public :: prep_glc_mrg_lnd - - public :: prep_glc_accum_lnd - public :: prep_glc_accum_ocn - public :: prep_glc_accum_avg - - public :: prep_glc_calc_l2x_gx - public :: prep_glc_calc_o2x_gx - - public :: prep_glc_zero_fields - - public :: prep_glc_get_l2x_gx - public :: prep_glc_get_l2gacc_lx - public :: prep_glc_get_l2gacc_lx_one_instance - public :: prep_glc_get_l2gacc_lx_cnt - - public :: prep_glc_get_o2x_gx - public :: prep_glc_get_x2gacc_gx - public :: prep_glc_get_x2gacc_gx_cnt - - public :: prep_glc_get_mapper_Sl2g - public :: prep_glc_get_mapper_Fl2g - - public :: prep_glc_get_mapper_So2g - public :: prep_glc_get_mapper_Fo2g - - public :: prep_glc_calculate_subshelf_boundary_fluxes - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: prep_glc_do_renormalize_smb - private :: prep_glc_set_g2x_lx_fields - private :: prep_glc_merge_lnd_forcing - private :: prep_glc_map_one_state_field_lnd2glc - private :: prep_glc_map_qice_conservative_lnd2glc - private :: prep_glc_renormalize_smb - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_Sl2g - type(seq_map), pointer :: mapper_Fl2g - type(seq_map), pointer :: mapper_So2g - type(seq_map), pointer :: mapper_Fo2g - type(seq_map), pointer :: mapper_Fg2l - - ! attribute vectors - type(mct_aVect), pointer :: l2x_gx(:) ! Lnd export, glc grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: o2x_gx(:) ! Ocn export, glc grid, cpl pes - allocated in driver - - ! accumulation variables - - type(mct_aVect), pointer :: x2gacc_gx(:) ! Glc export, glc grid, cpl pes - allocated in driver - integer , target :: x2gacc_gx_cnt ! x2gacc_gx: number of time samples accumulated - - type(mct_aVect), pointer :: l2gacc_lx(:) ! Lnd export, lnd grid, cpl pes - allocated in driver - integer , target :: l2gacc_lx_cnt ! l2gacc_lx: number of time samples accumulated - - ! other module variables - integer :: mpicom_CPLID ! MPI cpl communicator - - ! Whether to renormalize the SMB for conservation. - ! Should be set to true for 2-way coupled runs with evolving ice sheets. - ! Does not need to be true for 1-way coupling. - logical :: smb_renormalize - logical :: glc_present ! .true. => glc is present - - ! Name of flux field giving surface mass balance - character(len=*), parameter :: qice_fieldname = 'Flgl_qice' - - ! Names of some other fields - character(len=*), parameter :: Sg_frac_field = 'Sg_ice_covered' - character(len=*), parameter :: Sg_topo_field = 'Sg_topo' - character(len=*), parameter :: Sg_icemask_field = 'Sg_icemask' - - ! Fields needed in the g2x_lx attribute vector used as part of mapping qice from lnd to glc - character(len=:), allocatable :: g2x_lx_fields - - type(mct_aVect), pointer :: o2gacc_ox(:) ! Ocn export, lnd grid, cpl pes - allocated in driver - integer , target :: o2gacc_ox_cnt ! number of time samples accumulated - - real(r8), allocatable :: oceanTemperature(:) - real(r8), allocatable :: oceanSalinity(:) - real(r8), allocatable :: oceanHeatTransferVelocity(:) - real(r8), allocatable :: oceanSaltTransferVelocity(:) - real(r8), allocatable :: interfacePressure(:) - real(r8), allocatable :: iceTemperature(:) - real(r8), allocatable :: iceTemperatureDistance(:) - integer, allocatable :: iceFloatingMask(:) - real(r8), allocatable :: outInterfaceSalinity(:) - real(r8), allocatable :: outInterfaceTemperature(:) - real(r8), allocatable :: outFreshwaterFlux(:) - real(r8), allocatable :: outOceanHeatFlux(:) - real(r8), allocatable :: outIceHeatFlux(:) - - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_glc_init(infodata, lnd_c2_glc, ocn_c2_glcshelf) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and mapping variables - ! - ! Arguments - type (seq_infodata_type) , intent(inout) :: infodata - logical , intent(in) :: lnd_c2_glc ! .true. => lnd to glc coupling on - logical , intent(in) :: ocn_c2_glcshelf ! .true. => ocn to glc coupling on - ! - ! Local Variables - integer :: eli, egi, eoi - integer :: lsize_l - integer :: lsize_g - integer :: lsize_o - logical :: samegrid_lg ! samegrid land and glc - logical :: samegrid_go ! .true. => samegrid ocean and glc - logical :: esmf_map_flag ! .true. => use esmf for mapping - logical :: iamroot_CPLID ! .true. => CPLID masterproc - character(CL) :: lnd_gnam ! lnd grid - character(CL) :: glc_gnam ! glc grid - character(CL) :: ocn_gnam ! ocn grid - - type(mct_avect), pointer :: l2x_lx - type(mct_avect), pointer :: x2g_gx - type(mct_avect), pointer :: o2x_ox - - character(*), parameter :: subname = '(prep_glc_init)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call seq_infodata_getData(infodata , & - esmf_map_flag=esmf_map_flag , & - glc_present=glc_present , & - lnd_gnam=lnd_gnam , & - glc_gnam=glc_gnam , & - ocn_gnam=ocn_gnam) - - allocate(mapper_Sl2g) - allocate(mapper_Fl2g) - allocate(mapper_So2g) - allocate(mapper_Fo2g) - allocate(mapper_Fg2l) - - smb_renormalize = prep_glc_do_renormalize_smb(infodata) - - if (glc_present .and. lnd_c2_glc) then - - call seq_comm_getData(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - l2x_lx => component_get_c2x_cx(lnd(1)) - lsize_l = mct_aVect_lsize(l2x_lx) - - x2g_gx => component_get_x2c_cx(glc(1)) - lsize_g = mct_aVect_lsize(x2g_gx) - - allocate(l2x_gx(num_inst_lnd)) - allocate(l2gacc_lx(num_inst_lnd)) - do eli = 1,num_inst_lnd - call mct_aVect_init(l2x_gx(eli), rList=seq_flds_x2g_fields, lsize=lsize_g) - call mct_aVect_zero(l2x_gx(eli)) - - call mct_aVect_init(l2gacc_lx(eli), rList=seq_flds_l2x_fields_to_glc, lsize=lsize_l) - call mct_aVect_zero(l2gacc_lx(eli)) - enddo - l2gacc_lx_cnt = 0 - - if (lnd_c2_glc) then - - samegrid_lg = .true. - if (trim(lnd_gnam) /= trim(glc_gnam)) samegrid_lg = .false. - - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sl2g' - end if - call seq_map_init_rcfile(mapper_Sl2g, lnd(1), glc(1), & - 'seq_maps.rc', 'lnd2glc_smapname:', 'lnd2glc_smaptype:', samegrid_lg, & - 'mapper_Sl2g initialization', esmf_map_flag) - - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fl2g' - end if - call seq_map_init_rcfile(mapper_Fl2g, lnd(1), glc(1), & - 'seq_maps.rc', 'lnd2glc_fmapname:', 'lnd2glc_fmaptype:', samegrid_lg, & - 'mapper_Fl2g initialization', esmf_map_flag) - - ! We need to initialize our own Fg2l mapper because in some cases (particularly - ! TG compsets - dlnd forcing CISM) the system doesn't otherwise create a Fg2l - ! mapper. - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fg2l' - end if - call seq_map_init_rcfile(mapper_Fg2l, glc(1), lnd(1), & - 'seq_maps.rc', 'glc2lnd_fmapname:', 'glc2lnd_fmaptype:', samegrid_lg, & - 'mapper_Fg2l initialization', esmf_map_flag) - - call prep_glc_set_g2x_lx_fields() - end if - call shr_sys_flush(logunit) - - end if - - if (glc_present .and. ocn_c2_glcshelf) then - - call seq_comm_getData(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - o2x_ox => component_get_c2x_cx(ocn(1)) - lsize_o = mct_aVect_lsize(o2x_ox) - - x2g_gx => component_get_x2c_cx(glc(1)) - lsize_g = mct_aVect_lsize(x2g_gx) - - allocate(o2x_gx(num_inst_ocn)) - do eoi = 1,num_inst_ocn - call mct_aVect_init(o2x_gx(eoi), rList=seq_flds_o2x_fields, lsize=lsize_g) - call mct_aVect_zero(o2x_gx(eoi)) - enddo - - allocate(x2gacc_gx(num_inst_glc)) - do egi = 1,num_inst_glc - call mct_aVect_init(x2gacc_gx(egi), x2g_gx, lsize_g) - call mct_aVect_zero(x2gacc_gx(egi)) - end do - - x2gacc_gx_cnt = 0 - samegrid_go = .true. - if (trim(ocn_gnam) /= trim(glc_gnam)) samegrid_go = .false. - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_So2g' - end if - call seq_map_init_rcfile(mapper_So2g, ocn(1), glc(1), & - 'seq_maps.rc','ocn2glc_smapname:','ocn2glc_smaptype:',samegrid_go, & - 'mapper_So2g initialization',esmf_map_flag) - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fo2g' - end if - call seq_map_init_rcfile(mapper_Fo2g, ocn(1), glc(1), & - 'seq_maps.rc','ocn2glc_fmapname:','ocn2glc_fmaptype:',samegrid_go, & - 'mapper_Fo2g initialization',esmf_map_flag) - - !Initialize module-level arrays associated with compute_melt_fluxes - allocate(oceanTemperature(lsize_g)) - allocate(oceanSalinity(lsize_g)) - allocate(oceanHeatTransferVelocity(lsize_g)) - allocate(oceanSaltTransferVelocity(lsize_g)) - allocate(interfacePressure(lsize_g)) - allocate(iceTemperature(lsize_g)) - allocate(iceTemperatureDistance(lsize_g)) - allocate(iceFloatingMask(lsize_g)) - allocate(outInterfaceSalinity(lsize_g)) - allocate(outInterfaceTemperature(lsize_g)) - allocate(outFreshwaterFlux(lsize_g)) - allocate(outOceanHeatFlux(lsize_g)) - allocate(outIceHeatFlux(lsize_g)) - ! TODO: Can we allocate these only while used or are we worried about performance hit? - ! TODO: add deallocates! - - call shr_sys_flush(logunit) - - end if - - - end subroutine prep_glc_init - - !================================================================================================ - - function prep_glc_do_renormalize_smb(infodata) result(do_renormalize_smb) - ! Returns a logical saying whether we should do the smb renormalization - logical :: do_renormalize_smb ! function return value - ! - ! Arguments - type (seq_infodata_type) , intent(in) :: infodata - - ! Local variables - character(len=cl) :: glc_renormalize_smb ! namelist option saying whether to do smb renormalization - logical :: glc_coupled_fluxes ! does glc send fluxes to other components? - logical :: lnd_prognostic ! is lnd a prognostic component? - - character(len=*), parameter :: subname = '(prep_glc_do_renormalize_smb)' - !--------------------------------------------------------------- - - call seq_infodata_getdata(infodata, & - glc_renormalize_smb = glc_renormalize_smb, & - glc_coupled_fluxes = glc_coupled_fluxes, & - lnd_prognostic = lnd_prognostic) - - select case (glc_renormalize_smb) - case ('on') - do_renormalize_smb = .true. - case ('off') - do_renormalize_smb = .false. - case ('on_if_glc_coupled_fluxes') - if (.not. lnd_prognostic) then - ! Do not renormalize if we're running glc with dlnd (T compsets): In this case - ! there is no feedback from glc to lnd, and conservation is not important - do_renormalize_smb = .false. - else if (.not. glc_coupled_fluxes) then - ! Do not renormalize if glc isn't sending fluxes to other components: In this - ! case conservation is not important - do_renormalize_smb = .false. - else - ! lnd_prognostic is true and glc_coupled_fluxes is true - do_renormalize_smb = .true. - end if - case default - write(logunit,*) subname,' ERROR: unknown value for glc_renormalize_smb: ', & - trim(glc_renormalize_smb) - call shr_sys_abort(subname//' ERROR: unknown value for glc_renormalize_smb') - end select - end function prep_glc_do_renormalize_smb - - !================================================================================================ - - subroutine prep_glc_set_g2x_lx_fields() - - !--------------------------------------------------------------- - ! Description - ! Sets the module-level g2x_lx_fields variable. - ! - ! This gives the fields needed in the g2x_lx attribute vector used as part of mapping - ! qice from lnd to glc. - ! - ! Local Variables - character(len=GLC_ELEVCLASS_STRLEN), allocatable :: all_elevclass_strings(:) - character(len=:), allocatable :: frac_fields - character(len=:), allocatable :: topo_fields - integer :: strlen - - ! 1 is probably enough, but use 10 to be safe, in case the length of the delimiter - ! changes - integer, parameter :: extra_len_for_list_merge = 10 - - character(len=*), parameter :: subname = '(prep_glc_set_g2x_lx_fields)' - !--------------------------------------------------------------- - - allocate(all_elevclass_strings(0:glc_get_num_elevation_classes())) - all_elevclass_strings = glc_all_elevclass_strings(include_zero = .true.) - frac_fields = shr_string_listFromSuffixes( & - suffixes = all_elevclass_strings, & - strBase = Sg_frac_field) - ! Sg_topo is not actually needed on the land grid in - ! prep_glc_map_qice_conservative_lnd2glc, but it is required by the current interface - ! for map_glc2lnd_ec. - topo_fields = shr_string_listFromSuffixes( & - suffixes = all_elevclass_strings, & - strBase = Sg_topo_field) - - strlen = len_trim(frac_fields) + len_trim(topo_fields) + extra_len_for_list_merge - allocate(character(len=strlen) :: g2x_lx_fields) - call shr_string_listMerge(frac_fields, topo_fields, g2x_lx_fields) - - end subroutine prep_glc_set_g2x_lx_fields - - - !================================================================================================ - - subroutine prep_glc_accum_lnd(timer) - - !--------------------------------------------------------------- - ! Description - ! Accumulate glc inputs from lnd - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eli - type(mct_avect), pointer :: l2x_lx - - character(*), parameter :: subname = '(prep_glc_accum_lnd)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eli = 1,num_inst_lnd - l2x_lx => component_get_c2x_cx(lnd(eli)) - if (l2gacc_lx_cnt == 0) then - call mct_avect_copy(l2x_lx, l2gacc_lx(eli)) - else - call mct_avect_accum(l2x_lx, l2gacc_lx(eli)) - endif - end do - l2gacc_lx_cnt = l2gacc_lx_cnt + 1 - call t_drvstopf (trim(timer)) - - end subroutine prep_glc_accum_lnd - - !================================================================================================ - - subroutine prep_glc_accum_ocn(timer) - - !--------------------------------------------------------------- - ! Description - ! Accumulate glc inputs from ocn - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: egi - type(mct_avect), pointer :: x2g_gx - - character(*), parameter :: subname = '(prep_glc_accum_ocn)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do egi = 1,num_inst_glc - x2g_gx => component_get_x2c_cx(glc(egi)) - if (x2gacc_gx_cnt == 0) then - call mct_avect_copy(x2g_gx, x2gacc_gx(egi)) - else - call mct_avect_accum(x2g_gx, x2gacc_gx(egi)) - endif - end do - x2gacc_gx_cnt = x2gacc_gx_cnt + 1 - call t_drvstopf (trim(timer)) - - end subroutine prep_glc_accum_ocn - - !================================================================================================ - - - subroutine prep_glc_accum_avg(timer) - - !--------------------------------------------------------------- - ! Description - ! Finalize accumulation of glc inputs - ! Note: There could be separate accum_avg routines for forcing coming - ! from each component (LND and OCN), but they can be combined here - ! by taking advantage of l2gacc_lx_cnt and x2gacc_gx_cnt variables - ! that will only be greater than 0 if corresponding coupling is enabled. - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eli, egi - type(mct_avect), pointer :: x2g_gx - - character(*), parameter :: subname = '(prep_glc_accum_avg)' - !--------------------------------------------------------------- - - ! Accumulation for LND - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - if (l2gacc_lx_cnt > 1) then - do eli = 1,num_inst_lnd - call mct_avect_avg(l2gacc_lx(eli), l2gacc_lx_cnt) - end do - end if - l2gacc_lx_cnt = 0 - - ! Accumulation for OCN - if (x2gacc_gx_cnt > 1) then - do egi = 1,num_inst_glc - ! temporary formation of average - call mct_avect_avg(x2gacc_gx(egi), x2gacc_gx_cnt) - - ! ***NOTE***THE FOLLOWING ACTUALLY MODIFIES x2g_gx - x2g_gx => component_get_x2c_cx(glc(egi)) - call mct_avect_copy(x2gacc_gx(egi), x2g_gx) - enddo - end if - x2gacc_gx_cnt = 0 - - call t_drvstopf (trim(timer)) - - end subroutine prep_glc_accum_avg - - !================================================================================================ - - subroutine prep_glc_mrg_lnd(infodata, fractions_gx, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Merge glc inputs - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - type(mct_aVect) , intent(in) :: fractions_gx(:) - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - integer :: egi, eli, efi - type(mct_avect), pointer :: x2g_gx - character(*), parameter :: subname = '(prep_glc_mrg_lnd)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer_mrg),barrier=mpicom_CPLID) - do egi = 1,num_inst_glc - ! Use fortran mod to address ensembles in merge - eli = mod((egi-1),num_inst_lnd) + 1 - efi = mod((egi-1),num_inst_frc) + 1 - - x2g_gx => component_get_x2c_cx(glc(egi)) - call prep_glc_merge_lnd_forcing(l2x_gx(eli), fractions_gx(efi), x2g_gx) - enddo - call t_drvstopf (trim(timer_mrg)) - - end subroutine prep_glc_mrg_lnd - - !================================================================================================ - - subroutine prep_glc_merge_lnd_forcing( l2x_g, fractions_g, x2g_g ) - - !----------------------------------------------------------------------- - ! Description - ! "Merge" land forcing for glc input. - ! - ! State fields are copied directly, meaning that averages are taken just over the - ! land-covered portion of the glc domain. - ! - ! Flux fields are downweighted by landfrac, which effectively sends a 0 flux from the - ! non-land-covered portion of the glc domain. - ! - ! Arguments - type(mct_aVect), intent(inout) :: l2x_g ! input - type(mct_aVect), intent(in) :: fractions_g - type(mct_aVect), intent(inout) :: x2g_g ! output - !----------------------------------------------------------------------- - - integer :: num_flux_fields - integer :: num_state_fields - integer :: nflds - integer :: i,n - integer :: mrgstr_index - integer :: index_l2x - integer :: index_x2g - integer :: index_lfrac - integer :: lsize - logical :: iamroot - logical, save :: first_time = .true. - character(CL),allocatable :: mrgstr(:) ! temporary string - character(CL) :: field ! string converted to char - character(*), parameter :: subname = '(prep_glc_merge_lnd_forcing) ' - - !----------------------------------------------------------------------- - - call seq_comm_getdata(CPLID, iamroot=iamroot) - lsize = mct_aVect_lsize(x2g_g) - - num_flux_fields = shr_string_listGetNum(trim(seq_flds_x2g_fluxes_from_lnd)) - num_state_fields = shr_string_listGetNum(trim(seq_flds_x2g_states_from_lnd)) - - if (first_time) then - nflds = num_flux_fields + num_state_fields - allocate(mrgstr(nflds)) - end if - - mrgstr_index = 1 - - do i = 1, num_state_fields - call seq_flds_getField(field, i, seq_flds_x2g_states) - index_l2x = mct_aVect_indexRA(l2x_g, trim(field)) - index_x2g = mct_aVect_indexRA(x2g_g, trim(field)) - - if (first_time) then - mrgstr(mrgstr_index) = subname//'x2g%'//trim(field)//' =' // & - ' = l2x%'//trim(field) - end if - - do n = 1, lsize - x2g_g%rAttr(index_x2g,n) = l2x_g%rAttr(index_l2x,n) - end do - - mrgstr_index = mrgstr_index + 1 - enddo - - index_lfrac = mct_aVect_indexRA(fractions_g,"lfrac") - do i = 1, num_flux_fields - - call seq_flds_getField(field, i, seq_flds_x2g_fluxes_from_lnd) - index_l2x = mct_aVect_indexRA(l2x_g, trim(field)) - index_x2g = mct_aVect_indexRA(x2g_g, trim(field)) - - if (trim(field) == qice_fieldname) then - - if (first_time) then - mrgstr(mrgstr_index) = subname//'x2g%'//trim(field)//' =' // & - ' = l2x%'//trim(field) - end if - - ! treat qice as if it were a state variable, with a simple copy. - do n = 1, lsize - x2g_g%rAttr(index_x2g,n) = l2x_g%rAttr(index_l2x,n) - end do - - else - write(logunit,*) subname,' ERROR: Flux fields other than ', & - qice_fieldname, ' currently are not handled in lnd2glc remapping.' - write(logunit,*) '(Attempt to handle flux field <', trim(field), '>.)' - write(logunit,*) 'Substantial thought is needed to determine how to remap other fluxes' - write(logunit,*) 'in a smooth, conservative manner.' - call shr_sys_abort(subname//& - ' ERROR: Flux fields other than qice currently are not handled in lnd2glc remapping.') - endif ! qice_fieldname - - mrgstr_index = mrgstr_index + 1 - - end do - - if (first_time) then - if (iamroot) then - write(logunit,'(A)') subname//' Summary:' - do i = 1,nflds - write(logunit,'(A)') trim(mrgstr(i)) - enddo - endif - deallocate(mrgstr) - endif - - first_time = .false. - - end subroutine prep_glc_merge_lnd_forcing - - - subroutine prep_glc_calc_o2x_gx(timer) - !--------------------------------------------------------------- - ! Description - ! Create o2x_gx - - ! Arguments - character(len=*), intent(in) :: timer - - character(*), parameter :: subname = '(prep_glc_calc_o2x_gx)' - ! Local Variables - integer eoi - type(mct_avect), pointer :: o2x_ox - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eoi = 1,num_inst_ocn - o2x_ox => component_get_c2x_cx(ocn(eoi)) - call seq_map_map(mapper_So2g, o2x_ox, o2x_gx(eoi), & - fldlist=seq_flds_x2g_states_from_ocn,norm=.true.) - enddo - - call t_drvstopf (trim(timer)) - end subroutine prep_glc_calc_o2x_gx - - !================================================================================================ - - - !================================================================================================ - - subroutine prep_glc_calc_l2x_gx(fractions_lx, timer) - !--------------------------------------------------------------- - ! Description - ! Create l2x_gx (note that l2x_gx is a local module variable) - ! Also l2x_gx is really the accumulated l2xacc_lx mapped to l2x_gx - ! - use shr_string_mod, only : shr_string_listGetNum - ! Arguments - type(mct_aVect) , intent(in) :: fractions_lx(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: egi, eli, efi - integer :: num_flux_fields - integer :: num_state_fields - integer :: field_num - character(len=cl) :: fieldname - character(*), parameter :: subname = '(prep_glc_calc_l2x_gx)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - - num_flux_fields = shr_string_listGetNum(trim(seq_flds_x2g_fluxes_from_lnd)) - num_state_fields = shr_string_listGetNum(trim(seq_flds_x2g_states_from_lnd)) - - do egi = 1,num_inst_glc - ! Use fortran mod to address ensembles in merge - eli = mod((egi-1),num_inst_lnd) + 1 - efi = mod((egi-1),num_inst_frc) + 1 - - do field_num = 1, num_flux_fields - call seq_flds_getField(fieldname, field_num, seq_flds_x2g_fluxes_from_lnd) - - if (trim(fieldname) == qice_fieldname) then - - ! Use a bilinear (Sl2g) mapper, as for states. - ! The Fg2l mapper is needed to map some glc fields to the land grid - ! for purposes of conservation. - call prep_glc_map_qice_conservative_lnd2glc(egi=egi, eli=eli, & - fractions_lx = fractions_lx(efi), & - mapper_Sl2g = mapper_Sl2g, & - mapper_Fg2l = mapper_Fg2l) - - else - write(logunit,*) subname,' ERROR: Flux fields other than ', & - qice_fieldname, ' currently are not handled in lnd2glc remapping.' - write(logunit,*) '(Attempt to handle flux field <', trim(fieldname), '>.)' - write(logunit,*) 'Substantial thought is needed to determine how to remap other fluxes' - write(logunit,*) 'in a smooth, conservative manner.' - call shr_sys_abort(subname//& - ' ERROR: Flux fields other than qice currently are not handled in lnd2glc remapping.') - endif ! qice_fieldname - - end do - - do field_num = 1, num_state_fields - call seq_flds_getField(fieldname, field_num, seq_flds_x2g_states_from_lnd) - call prep_glc_map_one_state_field_lnd2glc(egi=egi, eli=eli, & - fieldname = fieldname, & - fractions_lx = fractions_lx(efi), & - mapper = mapper_Sl2g) - end do - - enddo ! egi - - call t_drvstopf (trim(timer)) - - end subroutine prep_glc_calc_l2x_gx - - !================================================================================================ - - subroutine prep_glc_map_one_state_field_lnd2glc(egi, eli, fieldname, fractions_lx, mapper) - ! Maps a single field from the land grid to the glc grid. - ! - ! This mapping is not conservative, so should only be used for state fields. - ! - ! NOTE(wjs, 2017-05-10) We used to map each field separately because each field needed - ! its own vertical gradient calculator. Now that we don't need vertical gradient - ! calculators, we may be able to change this to map multiple fields at once, at least - ! for part of map_lnd2glc. - - use map_lnd2glc_mod, only : map_lnd2glc - - ! Arguments - integer, intent(in) :: egi ! glc instance index - integer, intent(in) :: eli ! lnd instance index - character(len=*), intent(in) :: fieldname ! base name of field to map (without elevation class suffix) - type(mct_aVect) , intent(in) :: fractions_lx ! fractions on the land grid, for this frac instance - type(seq_map), intent(inout) :: mapper - ! - ! Local Variables - type(mct_avect), pointer :: g2x_gx ! glc export, glc grid, cpl pes - allocated in driver - !--------------------------------------------------------------- - - g2x_gx => component_get_c2x_cx(glc(egi)) - - call map_lnd2glc(l2x_l = l2gacc_lx(eli), & - landfrac_l = fractions_lx, & - g2x_g = g2x_gx, & - fieldname = fieldname, & - mapper = mapper, & - l2x_g = l2x_gx(eli)) - - end subroutine prep_glc_map_one_state_field_lnd2glc - - !================================================================================================ - - subroutine prep_glc_calculate_subshelf_boundary_fluxes - - !--------------------------------------------------------------- - ! Description - ! On the ice sheet grid, calculate shelf boundary fluxes - - use shr_const_mod , only: SHR_CONST_KAPPA_LAND_ICE - - ! Local Variables - - integer :: gsize, n, egi - type(mct_aVect), pointer :: o2x_ox ! Ocn export, ocn grid, cpl pes - type(mct_aVect), pointer :: x2g_gx ! Glc import, glc grid, cpl pes - type(mct_aVect), pointer :: g2x_gx ! Glc import, glc grid, cpl pes - - integer :: index_x2g_So_blt - integer :: index_x2g_So_bls - integer :: index_x2g_So_htv - integer :: index_x2g_So_stv - integer :: index_x2g_So_rhoeff - integer :: index_g2x_Sg_tbot - integer :: index_g2x_Sg_dztbot - integer :: index_g2x_Sg_lithop - integer :: index_g2x_Sg_icemask_floating - - integer :: index_g2x_Sg_blis - integer :: index_g2x_Sg_blit - integer :: index_g2x_Fogx_qiceho - integer :: index_g2x_Fogx_qicelo - integer :: index_x2g_Fogx_qiceli - integer :: index_x2g_Fogx_qicehi - - character(*), parameter :: subname = '(prep_glc_calculate_subshelf_boundary_fluxes)' - !--------------------------------------------------------------- - - if (.not.(glc_present)) return - - do egi = 1,num_inst_glc - - o2x_ox => component_get_c2x_cx(ocn(egi)) - g2x_gx => component_get_c2x_cx(glc(egi)) - x2g_gx => component_get_x2c_cx(glc(egi)) - - !Remap relevant ocean variables to ice sheet grid. - !Done here instead of in glc-frequency mapping so it happens within ocean coupling interval. - ! Also could map o2x_ox->o2x_gx(1) but using x2g_gx as destination allows us to see - ! these fields on the GLC grid of the coupler history file, which helps with debugging. - call seq_map_map(mapper_So2g, o2x_ox, x2g_gx, & - fldlist=seq_flds_x2g_states_from_ocn,norm=.true.) - - ! inputs to melt flux calculation - index_x2g_So_blt = mct_avect_indexra(x2g_gx,'So_blt',perrwith='quiet') - index_x2g_So_bls = mct_avect_indexra(x2g_gx,'So_bls',perrwith='quiet') - index_x2g_So_htv = mct_avect_indexra(x2g_gx,'So_htv',perrwith='quiet') - index_x2g_So_stv = mct_avect_indexra(x2g_gx,'So_stv',perrwith='quiet') - index_x2g_So_rhoeff = mct_avect_indexra(x2g_gx,'So_rhoeff',perrwith='quiet') - - index_g2x_Sg_tbot = mct_avect_indexra(g2x_gx,'Sg_tbot',perrwith='quiet') - index_g2x_Sg_dztbot = mct_avect_indexra(g2x_gx,'Sg_dztbot',perrwith='quiet') - index_g2x_Sg_lithop = mct_avect_indexra(g2x_gx,'Sg_lithop',perrwith='quiet') - index_g2x_Sg_icemask_floating = mct_avect_indexra(g2x_gx,'Sg_icemask_floating',perrwith='quiet') - - ! outputs to melt flux calculation - index_g2x_Sg_blis = mct_avect_indexra(g2x_gx,'Sg_blis',perrwith='quiet') - index_g2x_Sg_blit = mct_avect_indexra(g2x_gx,'Sg_blit',perrwith='quiet') - index_g2x_Fogx_qiceho = mct_avect_indexra(g2x_gx,'Fogx_qiceho',perrwith='quiet') - index_g2x_Fogx_qicelo = mct_avect_indexra(g2x_gx,'Fogx_qicelo',perrwith='quiet') - index_x2g_Fogx_qiceli = mct_avect_indexra(x2g_gx,'Fogx_qiceli',perrwith='quiet') - index_x2g_Fogx_qicehi = mct_avect_indexra(x2g_gx,'Fogx_qicehi',perrwith='quiet') - - gsize = mct_aVect_lsize(g2x_gx) - - do n=1,gsize - !Extract glc and ocn-sourced coupler fields used as input to compute_melt_fluxes to local arrays... - - ! Fields from the ocean, now on the GLC grid - oceanTemperature(n) = x2g_gx%rAttr(index_x2g_So_blt,n) - oceanSalinity(n) = x2g_gx%rAttr(index_x2g_So_bls,n) - oceanHeatTransferVelocity(n) = x2g_gx%rAttr(index_x2g_So_htv,n) - oceanSaltTransferVelocity(n) = x2g_gx%rAttr(index_x2g_So_stv,n) - - ! Fields from the ice sheet model (still on the GLC grid) - iceTemperature(n) = g2x_gx%rAttr(index_g2x_Sg_tbot,n) - iceTemperatureDistance(n) = g2x_gx%rAttr(index_g2x_Sg_dztbot,n) - interfacePressure(n) = g2x_gx%rAttr(index_g2x_Sg_lithop,n) - iceFloatingMask(n) = g2x_gx%rAttr(index_g2x_Sg_icemask_floating,n) - - !... initialize local compute_melt_fluxes output arrays... - outInterfaceSalinity(n) = 0.0_r8 - outInterfaceTemperature(n) = 0.0_r8 - outFreshwaterFlux(n) = 0.0_r8 - outOceanHeatFlux(n) = 0.0_r8 - outIceHeatFlux(n) = 0.0_r8 - end do - - !...calculate fluxes... - call compute_melt_fluxes(oceanTemperature=oceanTemperature,& - oceanSalinity=oceanSalinity,& - oceanHeatTransferVelocity=oceanHeatTransferVelocity,& - oceanSaltTransferVelocity=oceanSaltTransferVelocity,& - interfacePressure=interfacePressure,& - iceTemperature=iceTemperature,& - iceTemperatureDistance=iceTemperatureDistance, & - iceFloatingMask=iceFloatingMask, & - outInterfaceSalinity=outInterfaceSalinity,& - outInterfaceTemperature=outInterfaceTemperature,& - outFreshwaterFlux=outFreshwaterFlux,& - outOceanHeatFlux=outOceanHeatFlux,& - outIceHeatFlux=outIceHeatFlux,& - gsize=gsize) - - !...and assign fluxes to glc and ocn-directed coupler fields - do n=1,gsize - !Assign outputs from compute_melt_fluxes back into coupler attributes - g2x_gx%rAttr(index_g2x_Sg_blis,n) = outInterfaceSalinity(n) !to ocean - g2x_gx%rAttr(index_g2x_Sg_blit,n) = outInterfaceTemperature(n) !to ocean - g2x_gx%rAttr(index_g2x_Fogx_qiceho,n) = outOceanHeatFlux(n) !to ocean - g2x_gx%rAttr(index_g2x_Fogx_qicelo,n)= outFreshwaterFlux(n) !to ocean - x2g_gx%rAttr(index_x2g_Fogx_qicehi,n) = outIceHeatFlux(n) !to ice sheet - x2g_gx%rAttr(index_x2g_Fogx_qiceli,n) = -1.0_r8 * outFreshwaterFlux(n) !to ice sheet - end do - - !Note: remap ocean-side outputs back onto ocean grid done in call to prep_ocn_shelf_calc_g2x_ox - - end do ! loop over GLC instances - - end subroutine prep_glc_calculate_subshelf_boundary_fluxes - - !================================================================================================ - - subroutine prep_glc_zero_fields() - - !--------------------------------------------------------------- - ! Description - ! Set glc inputs to zero - ! - ! This is appropriate during time intervals when we're not sending valid data to glc. - ! In principle we shouldn't need to zero the fields at these times (instead, glc - ! should just ignore the fields at these times). However, some tests (like an ERS or - ! ERI test that stops the final run segment mid-year) can fail if we don't explicitly - ! zero the fields, because these x2g fields can then differ upon restart. - - ! Local Variables - integer :: egi - type(mct_avect), pointer :: x2g_gx - !--------------------------------------------------------------- - - do egi = 1,num_inst_glc - x2g_gx => component_get_x2c_cx(glc(egi)) - call mct_aVect_zero(x2g_gx) - end do - end subroutine prep_glc_zero_fields - - !================================================================================================ - - subroutine prep_glc_map_qice_conservative_lnd2glc(egi, eli, fractions_lx, & - mapper_Sl2g, mapper_Fg2l) - - ! Maps the surface mass balance field (qice) from the land grid to the glc grid. - ! - ! Use a smooth, non-conservative (bilinear) mapping, followed by a correction for - ! conservation. - ! - ! For high-level design, see: - ! https://docs.google.com/document/d/1H_SuK6SfCv1x6dK91q80dFInPbLYcOkUj_iAa6WRnqQ/edit - - use map_lnd2glc_mod, only : map_lnd2glc - - ! Arguments - integer, intent(in) :: egi ! glc instance index - integer, intent(in) :: eli ! lnd instance index - type(mct_aVect) , intent(in) :: fractions_lx ! fractions on the land grid, for this frac instance - type(seq_map), intent(inout) :: mapper_Sl2g ! state mapper from land to glc grid; non-conservative - type(seq_map), intent(inout) :: mapper_Fg2l ! flux mapper from glc to land grid; conservative - ! - ! Local Variables - type(mct_aVect), pointer :: g2x_gx ! glc export, glc grid - - logical :: iamroot - - !Note: The sums in this subroutine use the coupler areas aream_l and aream_g. - ! The coupler areas can differ from the native areas area_l and area_g. - ! (For CISM with a polar stereographic projection, area_g can differ from aream_g - ! by up to ~10%.) - ! If so, then the calls to subroutine mct_avect_vecmult in component_mod.F90 - ! (just before and after the call to comp_run) should adjust the SMB fluxes - ! such that in each grid cell, the native value of area*flux is equal to the - ! coupler value of aream*flux. This assumes that the SMB field is contained in - ! seq_fields l2x_fluxes and seq_fields_x2g_fluxes. - - real(r8), dimension(:), allocatable :: aream_g ! cell areas on glc grid, for mapping - real(r8), dimension(:), allocatable :: area_g ! cell areas on glc grid, according to glc model - - type(mct_ggrid), pointer :: dom_g ! glc grid info - - integer :: lsize_g ! number of points on glc grid - - integer :: n - integer :: km, ka - - real(r8), pointer :: qice_g(:) ! qice data on glc grid - - !--------------------------------------------------------------- - - call seq_comm_getdata(CPLID, iamroot=iamroot) - - if (iamroot) then - write(logunit,*) ' ' - write(logunit,*) 'In prep_glc_map_qice_conservative_lnd2glc' - write(logunit,*) 'smb_renormalize = ', smb_renormalize - endif - - ! Get attribute vector needed for mapping and conservation - g2x_gx => component_get_c2x_cx(glc(egi)) - - ! get grid size - lsize_g = mct_aVect_lsize(l2x_gx(eli)) - - ! allocate and fill area arrays on the glc grid - ! (Note that we get domain information from instance 1, following what's done in - ! other parts of the coupler.) - dom_g => component_get_dom_cx(glc(1)) - - allocate(aream_g(lsize_g)) - km = mct_aVect_indexRa(dom_g%data, "aream" ) - aream_g(:) = dom_g%data%rAttr(km,:) - - allocate(area_g(lsize_g)) - ka = mct_aVect_indexRa(dom_g%data, "area" ) - area_g(:) = dom_g%data%rAttr(ka,:) - - ! Map the SMB from the land grid to the glc grid, using a non-conservative state mapper. - call map_lnd2glc(l2x_l = l2gacc_lx(eli), & - landfrac_l = fractions_lx, & - g2x_g = g2x_gx, & - fieldname = qice_fieldname, & - mapper = mapper_Sl2g, & - l2x_g = l2x_gx(eli)) - - ! Export the remapped SMB to a local array - allocate(qice_g(lsize_g)) - call mct_aVect_exportRattr(l2x_gx(eli), trim(qice_fieldname), qice_g) - - ! Make a preemptive adjustment to qice_g to account for area differences between CISM and the coupler. - ! In component_mod.F90, there is a call to mct_avect_vecmult, which multiplies the fluxes - ! by aream_g/area_g for conservation purposes. Where CISM areas are larger (area_g > aream_g), - ! the fluxes are reduced, and where CISM areas are smaller, the fluxes are increased. - ! As a result, an SMB of 1 m/yr in CLM would be converted to an SMB ranging from - ! ~0.9 to 1.05 m/yr in CISM (with smaller values where CISM areas are larger, and larger - ! values where CISM areas are smaller). - ! Here, to keep CISM values close to the CLM values in the corresponding locations, - ! we anticipate the later correction and multiply qice_g by area_g/aream_g. - ! Then the later call to mct_avect_vecmult will bring qice back to the original values - ! obtained from bilinear remapping. - ! If Flgl_qice were changed to a state (and not included in seq_flds_x2g_fluxes), - ! then we could skip this adjustment. - ! - ! Note that we are free to do this or any other adjustments we want to qice at this - ! point in the remapping, because the conservation correction will ensure that we - ! still conserve globally despite these adjustments (and smb_renormalize = .false. - ! should only be used in cases where conservation doesn't matter anyway). - - do n = 1, lsize_g - if (aream_g(n) > 0.0_r8) then - qice_g(n) = qice_g(n) * area_g(n)/aream_g(n) - else - qice_g(n) = 0.0_r8 - endif - enddo - - if (smb_renormalize) then - call prep_glc_renormalize_smb( & - eli = eli, & - fractions_lx = fractions_lx, & - g2x_gx = g2x_gx, & - mapper_Fg2l = mapper_Fg2l, & - aream_g = aream_g, & - qice_g = qice_g) - end if - - ! Put the adjusted SMB back into l2x_gx. - ! - ! If we are doing renormalization, then this is the renormalized SMB. Whether or not - ! we are doing renormalization, this captures the preemptive adjustment to qice_g to - ! account for area differences between CISM and the coupler. - call mct_aVect_importRattr(l2x_gx(eli), qice_fieldname, qice_g) - - ! clean up - - deallocate(aream_g) - deallocate(area_g) - deallocate(qice_g) - - end subroutine prep_glc_map_qice_conservative_lnd2glc - - !================================================================================================ - - subroutine prep_glc_renormalize_smb(eli, fractions_lx, g2x_gx, mapper_Fg2l, aream_g, qice_g) - - ! Renormalizes surface mass balance (smb, here named qice_g) so that the global - ! integral on the glc grid is equal to the global integral on the land grid. - ! - ! This is required for conservation - although conservation is only necessary if we - ! are running with a fully-interactive, two-way-coupled glc. - ! - ! For high-level design, see: - ! https://docs.google.com/document/d/1H_SuK6SfCv1x6dK91q80dFInPbLYcOkUj_iAa6WRnqQ/edit - - use map_glc2lnd_mod, only : map_glc2lnd_ec - - ! Arguments - integer , intent(in) :: eli ! lnd instance index - type(mct_aVect) , intent(in) :: fractions_lx ! fractions on the land grid, for this frac instance - type(mct_aVect) , intent(in) :: g2x_gx ! glc export, glc grid - type(seq_map) , intent(inout) :: mapper_Fg2l ! flux mapper from glc to land grid; conservative - real(r8) , intent(in) :: aream_g(:) ! cell areas on glc grid, for mapping - real(r8) , intent(inout) :: qice_g(:) ! qice data on glc grid - - ! - ! Local Variables - integer :: mpicom - logical :: iamroot - - type(mct_ggrid), pointer :: dom_l ! land grid info - - integer :: lsize_l ! number of points on land grid - integer :: lsize_g ! number of points on glc grid - - real(r8), dimension(:), allocatable :: aream_l ! cell areas on land grid, for mapping - - real(r8), pointer :: qice_l(:,:) ! SMB (Flgl_qice) on land grid - real(r8), pointer :: frac_l(:,:) ! EC fractions (Sg_ice_covered) on land grid - real(r8), pointer :: tmp_field_l(:) ! temporary field on land grid - - ! The following need to be pointers to satisfy the MCT interface - ! Note: Sg_icemask defines where the ice sheet model can receive a nonzero SMB from the land model. - real(r8), pointer :: Sg_icemask_g(:) ! icemask on glc grid - real(r8), pointer :: Sg_icemask_l(:) ! icemask on land grid - real(r8), pointer :: lfrac(:) ! land fraction on land grid - - type(mct_aVect) :: g2x_lx ! glc export, lnd grid (not a pointer: created locally) - type(mct_avect) :: Sg_icemask_l_av ! temporary attribute vector holding Sg_icemask on the land grid - - integer :: nEC ! number of elevation classes - integer :: n - integer :: ec - integer :: km - - ! various strings for building field names - character(len=:), allocatable :: elevclass_as_string - character(len=:), allocatable :: qice_field - character(len=:), allocatable :: frac_field - - ! local and global sums of accumulation and ablation; used to compute renormalization factors - - real(r8) :: local_accum_on_land_grid - real(r8) :: global_accum_on_land_grid - real(r8) :: local_accum_on_glc_grid - real(r8) :: global_accum_on_glc_grid - - real(r8) :: local_ablat_on_land_grid - real(r8) :: global_ablat_on_land_grid - real(r8) :: local_ablat_on_glc_grid - real(r8) :: global_ablat_on_glc_grid - - ! renormalization factors (should be close to 1, e.g. in range 0.95 to 1.05) - real(r8) :: accum_renorm_factor ! ratio between global accumulation on the two grids - real(r8) :: ablat_renorm_factor ! ratio between global ablation on the two grids - - real(r8) :: effective_area ! grid cell area multiplied by min(lfrac,Sg_icemask_l). - ! This is the area that can contribute SMB to the ice sheet model. - - - !--------------------------------------------------------------- - - lsize_g = size(qice_g) - SHR_ASSERT_FL((size(aream_g) == lsize_g), __FILE__, __LINE__) - - call seq_comm_setptrs(CPLID, mpicom=mpicom) - call seq_comm_getdata(CPLID, iamroot=iamroot) - lsize_l = mct_aVect_lsize(l2gacc_lx(eli)) - - ! allocate and fill area arrays on the land grid - ! (Note that we get domain information from instance 1, following what's done in - ! other parts of the coupler.) - dom_l => component_get_dom_cx(lnd(1)) - - allocate(aream_l(lsize_l)) - km = mct_aVect_indexRa(dom_l%data, "aream" ) - aream_l(:) = dom_l%data%rAttr(km,:) - - ! Export land fractions from fractions_lx to a local array - allocate(lfrac(lsize_l)) - call mct_aVect_exportRattr(fractions_lx, "lfrac", lfrac) - - ! Map Sg_icemask from the glc grid to the land grid. - ! This may not be necessary, if Sg_icemask_l has already been mapped from Sg_icemask_g. - ! It is done here for two reasons: - ! (1) The mapping will *not* have been done if we are running with dlnd (e.g., a TG case). - ! (2) Because of coupler lags, the current Sg_icemask_l might not be up to date with - ! Sg_icemask_g. This probably isn't a problem in practice, but doing the mapping - ! here ensures the mask is up to date. - ! - ! This mapping uses the same options as the standard glc -> lnd mapping done in - ! prep_lnd_calc_g2x_lx. If that mapping ever changed (e.g., changing norm to - ! .false.), then we should change this mapping, too. - ! - ! BUG(wjs, 2017-05-11, #1516) I think we actually want norm = .false. here, but this - ! requires some more thought - call mct_aVect_init(Sg_icemask_l_av, rList = Sg_icemask_field, lsize = lsize_l) - call seq_map_map(mapper = mapper_Fg2l, & - av_s = g2x_gx, & - av_d = Sg_icemask_l_av, & - fldlist = Sg_icemask_field, & - norm = .true.) - - ! Export Sg_icemask_l from the temporary attribute vector to a local array - allocate(Sg_icemask_l(lsize_l)) - call mct_aVect_exportRattr(Sg_icemask_l_av, Sg_icemask_field, Sg_icemask_l) - - ! Clean the temporary attribute vector - call mct_aVect_clean(Sg_icemask_l_av) - - ! Map Sg_ice_covered from the glc grid to the land grid. - ! This gives the fields Sg_ice_covered00, Sg_ice_covered01, etc. on the land grid. - ! These fields are needed to integrate the total SMB on the land grid, for conservation purposes. - ! As above, the mapping may not be necessary, because Sg_ice_covered might already have been mapped. - ! However, the mapping will not have been done in a TG case with dlnd, and it might not - ! be up to date because of coupler lags (though the latter probably isn't a problem - ! in practice). - ! - ! Note that, for a case with full two-way coupling, we will only conserve if the - ! actual land cover used over the course of the year matches these currently-remapped - ! values. This should generally be the case with the current coupling setup. - ! - ! One could argue that it would be safer (for conservation purposes) if LND sent its - ! grid cell average SMB values, or if it sent its own notion of the area in each - ! elevation class for the purpose of creating grid cell average SMB values here. But - ! these options cause problems if we're not doing full two-way coupling (e.g., in a TG - ! case with dlnd, or in the common case where GLC is a diagnostic component that - ! doesn't cause updates in the glacier areas in LND). In these cases without full - ! two-way coupling, if we use the LND's notion of the area in each elevation class, - ! then the conservation corrections would end up correcting for discrepancies in - ! elevation class areas between LND and GLC, rather than just correcting for - ! discrepancies arising from the remapping of SMB. (And before you get worried: It - ! doesn't matter that we are not conserving in these cases without full two-way - ! coupling, because GLC isn't connected with the rest of the system in terms of energy - ! and mass in these cases. So in these cases, it's okay that the LND integral computed - ! here differs from the integral that LND itself would compute.) - - ! Create an attribute vector g2x_lx to hold the mapped fields - call mct_aVect_init(g2x_lx, rList=g2x_lx_fields, lsize=lsize_l) - - ! Map Sg_ice_covered and Sg_topo from glc to land - call map_glc2lnd_ec( & - g2x_g = g2x_gx, & - frac_field = Sg_frac_field, & - topo_field = Sg_topo_field, & - icemask_field = Sg_icemask_field, & - extra_fields = ' ', & ! no extra fields - mapper = mapper_Fg2l, & - g2x_l = g2x_lx) - - ! Export qice and Sg_ice_covered in each elevation class to local arrays. - ! Note: qice comes from l2gacc_lx; frac comes from g2x_lx. - - nEC = glc_get_num_elevation_classes() - - allocate(qice_l(lsize_l,0:nEC)) - allocate(frac_l(lsize_l,0:nEC)) - allocate(tmp_field_l(lsize_l)) - - do ec = 0, nEC - elevclass_as_string = glc_elevclass_as_string(ec) - - frac_field = Sg_frac_field // elevclass_as_string ! Sg_ice_covered01, etc. - call mct_aVect_exportRattr(g2x_lx, trim(frac_field), tmp_field_l) - frac_l(:,ec) = tmp_field_l(:) - - qice_field = qice_fieldname // elevclass_as_string ! Flgl_qice01, etc. - call mct_aVect_exportRattr(l2gacc_lx(eli), trim(qice_field), tmp_field_l) - qice_l(:,ec) = tmp_field_l(:) - - enddo - - ! clean the temporary attribute vector g2x_lx - call mct_aVect_clean(g2x_lx) - - ! Sum qice over local land grid cells - - ! initialize qice sum - local_accum_on_land_grid = 0.0_r8 - local_ablat_on_land_grid = 0.0_r8 - - do n = 1, lsize_l - - effective_area = min(lfrac(n),Sg_icemask_l(n)) * aream_l(n) - - do ec = 0, nEC - - if (qice_l(n,ec) >= 0.0_r8) then - local_accum_on_land_grid = local_accum_on_land_grid & - + effective_area * frac_l(n,ec) * qice_l(n,ec) - else - local_ablat_on_land_grid = local_ablat_on_land_grid & - + effective_area * frac_l(n,ec) * qice_l(n,ec) - endif - - enddo ! ec - - enddo ! n - - call shr_mpi_sum(local_accum_on_land_grid, & - global_accum_on_land_grid, & - mpicom, 'accum_l') - - call shr_mpi_sum(local_ablat_on_land_grid, & - global_ablat_on_land_grid, & - mpicom, 'ablat_l') - - call shr_mpi_bcast(global_accum_on_land_grid, mpicom) - call shr_mpi_bcast(global_ablat_on_land_grid, mpicom) - - ! Sum qice_g over local glc grid cells. - ! Note: This sum uses the coupler areas (aream_g), which differ from the native CISM areas. - ! But since the original qice_g (from bilinear remapping) has been multiplied by - ! area_g/aream_g above, this calculation is equivalent to multiplying the original qice_g - ! by the native CISM areas (area_g). - ! If Flgl_qice were changed to a state (and not included in seq_flds_x2g_fluxes), - ! then it would be appropriate to use the native CISM areas in this sum. - - ! Export Sg_icemask from g2x_gx to a local array - allocate(Sg_icemask_g(lsize_g)) - call mct_aVect_exportRattr(g2x_gx, Sg_icemask_field, Sg_icemask_g) - - local_accum_on_glc_grid = 0.0_r8 - local_ablat_on_glc_grid = 0.0_r8 - - do n = 1, lsize_g - - if (qice_g(n) >= 0.0_r8) then - local_accum_on_glc_grid = local_accum_on_glc_grid & - + Sg_icemask_g(n) * aream_g(n) * qice_g(n) - else - local_ablat_on_glc_grid = local_ablat_on_glc_grid & - + Sg_icemask_g(n) * aream_g(n) * qice_g(n) - endif - - enddo ! n - - call shr_mpi_sum(local_accum_on_glc_grid, & - global_accum_on_glc_grid, & - mpicom, 'accum_g') - - call shr_mpi_sum(local_ablat_on_glc_grid, & - global_ablat_on_glc_grid, & - mpicom, 'ablat_g') - - call shr_mpi_bcast(global_accum_on_glc_grid, mpicom) - call shr_mpi_bcast(global_ablat_on_glc_grid, mpicom) - - ! Renormalize - - if (global_accum_on_glc_grid > 0.0_r8) then - accum_renorm_factor = global_accum_on_land_grid / global_accum_on_glc_grid - else - accum_renorm_factor = 0.0_r8 - endif - - if (global_ablat_on_glc_grid < 0.0_r8) then ! negative by definition - ablat_renorm_factor = global_ablat_on_land_grid / global_ablat_on_glc_grid - else - ablat_renorm_factor = 0.0_r8 - endif - - if (iamroot) then - write(logunit,*) 'accum_renorm_factor = ', accum_renorm_factor - write(logunit,*) 'ablat_renorm_factor = ', ablat_renorm_factor - endif - - do n = 1, lsize_g - if (qice_g(n) >= 0.0_r8) then - qice_g(n) = qice_g(n) * accum_renorm_factor - else - qice_g(n) = qice_g(n) * ablat_renorm_factor - endif - enddo - - deallocate(aream_l) - deallocate(lfrac) - deallocate(Sg_icemask_l) - deallocate(Sg_icemask_g) - deallocate(tmp_field_l) - deallocate(qice_l) - deallocate(frac_l) - - end subroutine prep_glc_renormalize_smb - - !================================================================================================ - - function prep_glc_get_l2x_gx() - type(mct_aVect), pointer :: prep_glc_get_l2x_gx(:) - prep_glc_get_l2x_gx => l2x_gx(:) - end function prep_glc_get_l2x_gx - - function prep_glc_get_l2gacc_lx() - type(mct_aVect), pointer :: prep_glc_get_l2gacc_lx(:) - prep_glc_get_l2gacc_lx => l2gacc_lx(:) - end function prep_glc_get_l2gacc_lx - - function prep_glc_get_l2gacc_lx_one_instance(lnd_inst) - integer, intent(in) :: lnd_inst - type(mct_aVect), pointer :: prep_glc_get_l2gacc_lx_one_instance - prep_glc_get_l2gacc_lx_one_instance => l2gacc_lx(lnd_inst) - end function prep_glc_get_l2gacc_lx_one_instance - - function prep_glc_get_l2gacc_lx_cnt() - integer, pointer :: prep_glc_get_l2gacc_lx_cnt - prep_glc_get_l2gacc_lx_cnt => l2gacc_lx_cnt - end function prep_glc_get_l2gacc_lx_cnt - - function prep_glc_get_o2x_gx() - type(mct_aVect), pointer :: prep_glc_get_o2x_gx(:) - prep_glc_get_o2x_gx => o2x_gx(:) - end function prep_glc_get_o2x_gx - - function prep_glc_get_x2gacc_gx() - type(mct_aVect), pointer :: prep_glc_get_x2gacc_gx(:) - prep_glc_get_x2gacc_gx => x2gacc_gx(:) - end function prep_glc_get_x2gacc_gx - - function prep_glc_get_x2gacc_gx_cnt() - integer, pointer :: prep_glc_get_x2gacc_gx_cnt - prep_glc_get_x2gacc_gx_cnt => x2gacc_gx_cnt - end function prep_glc_get_x2gacc_gx_cnt - - function prep_glc_get_mapper_Sl2g() - type(seq_map), pointer :: prep_glc_get_mapper_Sl2g - prep_glc_get_mapper_Sl2g => mapper_Sl2g - end function prep_glc_get_mapper_Sl2g - - function prep_glc_get_mapper_Fl2g() - type(seq_map), pointer :: prep_glc_get_mapper_Fl2g - prep_glc_get_mapper_Fl2g => mapper_Fl2g - end function prep_glc_get_mapper_Fl2g - - function prep_glc_get_mapper_So2g() - type(seq_map), pointer :: prep_glc_get_mapper_So2g - prep_glc_get_mapper_So2g=> mapper_So2g - end function prep_glc_get_mapper_So2g - - function prep_glc_get_mapper_Fo2g() - type(seq_map), pointer :: prep_glc_get_mapper_Fo2g - prep_glc_get_mapper_Fo2g=> mapper_Fo2g - end function prep_glc_get_mapper_Fo2g - -!*********************************************************************** -! -! routine compute_melt_fluxes -! -!> \brief Computes ocean and ice melt fluxes, etc. -!> \author Xylar Asay-Davis -!> \date 3/27/2015 -!> This routine computes melt fluxes (melt rate, temperature fluxes -!> into the ice and the ocean, and salt flux) as well as the interface -!> temperature and salinity. This routine expects an ice temperature -!> in the bottom layer of ice and ocean temperature and salinity in -!> the top ocean layer as well as the pressure at the ice/ocean interface. -!> -!> The ocean heat and salt transfer velocities are determined based on -!> observations of turbulent mixing rates in the under-ice boundary layer. -!> They should be the product of the friction velocity and a (possibly -!> spatially variable) non-dimenional transfer coefficient. -!> -!> The iceTemperatureDistance is the distance between the location -!> where the iceTemperature is supplied and the ice-ocean interface, -!> used to compute a temperature gradient. The ice thermal conductivity, -!> SHR_CONST_KAPPA_LAND_ICE, is zero for the freezing solution from Holland and Jenkins -!> (1999) in which the ice is purely insulating. -! -!----------------------------------------------------------------------- - - subroutine compute_melt_fluxes( & - oceanTemperature, & - oceanSalinity, & - oceanHeatTransferVelocity, & - oceanSaltTransferVelocity, & - interfacePressure, & - iceTemperature, & - iceTemperatureDistance, & - iceFloatingMask, & - outInterfaceSalinity, & - outInterfaceTemperature, & - outFreshwaterFlux, & - outOceanHeatFlux, & - outIceHeatFlux, & - gsize) - - use shr_const_mod, only: SHR_CONST_CPICE, & - SHR_CONST_CPSW, & - SHR_CONST_LATICE, & - SHR_CONST_RHOICE, & - SHR_CONST_RHOSW, & - SHR_CONST_DTF_DP, & - SHR_CONST_DTF_DS, & - SHR_CONST_DTF_DPDS, & - SHR_CONST_TF0, & - SHR_CONST_KAPPA_LAND_ICE - - !----------------------------------------------------------------- - ! - ! input variables - ! - !----------------------------------------------------------------- - - real (kind=r8), dimension(:), intent(in) :: & - oceanTemperature, & !< Input: ocean temperature in top layer - oceanSalinity, & !< Input: ocean salinity in top layer - oceanHeatTransferVelocity, & !< Input: ocean heat transfer velocity - oceanSaltTransferVelocity, & !< Input: ocean salt transfer velocity - interfacePressure, & !< Input: pressure at the ice-ocean interface - iceTemperature, & !< Input: ice temperature in bottom layer - iceTemperatureDistance !< Input: distance to ice temperature from ice-ocean interface - integer, dimension(:), intent(in) :: & - iceFloatingMask !< Input: mask of cells that contain floating ice - - integer, intent(in) :: gsize !< Input: number of values in each array - - !----------------------------------------------------------------- - ! - ! output variables - ! - !----------------------------------------------------------------- - - real (kind=r8), dimension(:), intent(out) :: & - outInterfaceSalinity, & !< Output: ocean salinity at the interface - outInterfaceTemperature, & !< Output: ice/ocean temperature at the interface - outFreshwaterFlux, & !< Output: ocean thickness flux (melt rate) - outOceanHeatFlux, & !< Output: the temperature flux into the ocean - outIceHeatFlux !< Output: the temperature flux into the ice - - !----------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------- - - real (kind=r8) :: T0, transferVelocityRatio, Tlatent, nu, a, b, c, eta, & - iceHeatFluxCoeff, iceDeltaT, dTf_dS - integer :: n - character(*), parameter :: subname = '(compute_melt_fluxes)' - - real (kind=r8), parameter :: minInterfaceSalinity = 0.001_r8 - - real (kind=r8), parameter :: referencePressure = 0.0_r8 ! Using reference pressure of 0 - - real (kind=r8) :: pressureOffset - - Tlatent = SHR_CONST_LATICE/SHR_CONST_CPSW - do n = 1, gsize - if (iceFloatingMask(n) == 0) cycle ! Only calculate on floating cells - - if (oceanHeatTransferVelocity(n) == 0.0_r8) then - write(logunit,*) 'compute_melt_fluxes ERROR: oceanHeatTransferVelocity value of 0 causes divide by 0 at index ', n - call shr_sys_abort('compute_melt_fluxes ERROR: oceanHeatTransferVelocity value of 0 causes divide by 0') - end if - - iceHeatFluxCoeff = SHR_CONST_RHOICE*SHR_CONST_CPICE*SHR_CONST_KAPPA_LAND_ICE/iceTemperatureDistance(n) - nu = iceHeatFluxCoeff/(SHR_CONST_RHOSW*SHR_CONST_CPSW*oceanHeatTransferVelocity(n)) - pressureOffset = max(interfacePressure(n) - referencePressure, 0.0_r8) - T0 = SHR_CONST_TF0 + SHR_CONST_DTF_DP * pressureOffset - !Note: These two terms for T0 are not needed because we are evaluating at salinity=0: - !+ SHR_CONST_DTF_DS * oceanSalinity(n) + SHR_CONST_DTF_DPDS * pressureOffset * oceanSalinity(n) - iceDeltaT = T0 - iceTemperature(n) - dTf_dS = SHR_CONST_DTF_DS + SHR_CONST_DTF_DPDS * pressureOffset - - transferVelocityRatio = oceanSaltTransferVelocity(n)/oceanHeatTransferVelocity(n) - - a = -1.0_r8 * dTf_dS * (1.0_r8 + nu) - b = transferVelocityRatio*Tlatent - nu*iceDeltaT + oceanTemperature(n) - T0 - c = -transferVelocityRatio*Tlatent*max(oceanSalinity(n), 0.0_r8) - ! a is non-negative; c is strictly non-positive so we never get imaginary roots. - ! Since a can be zero, we need a solution of the quadratic equation for 1/Si instead of Si. - ! Following: https://people.csail.mit.edu/bkph/articles/Quadratics.pdf - ! Since a and -c are are non-negative, the term in the square root is also always >= |b|. - ! In all reasonable cases, b will be strictly positive, since transferVelocityRatio*Tlatent ~ 2 C, - ! T0 ~ -1.8 C and oceanTemperature should never be able to get below about -3 C - ! As long as either b or both a and c are greater than zero, the strictly non-negative root is - outInterfaceSalinity(n) = max(-(2.0_r8*c)/(b + sqrt(b**2 - 4.0_r8*a*c)), minInterfaceSalinity) - - outInterfaceTemperature(n) = dTf_dS*outInterfaceSalinity(n)+T0 - - outFreshwaterFlux(n) = SHR_CONST_RHOSW*oceanSaltTransferVelocity(n) & - * (oceanSalinity(n)/outInterfaceSalinity(n) - 1.0_r8) - - ! According to Jenkins et al. (2001), the temperature fluxes into the ocean are: - ! 1. the advection of meltwater into the top layer (or removal for freezing) - ! 2. the turbulent transfer of heat across the boundary layer, based on the termal driving - outOceanHeatFlux(n) = SHR_CONST_CPSW*(outFreshwaterFlux(n)*outInterfaceTemperature(n) & - - SHR_CONST_RHOSW*oceanHeatTransferVelocity(n)*(oceanTemperature(n)-outInterfaceTemperature(n))) - - ! the temperature fluxes into the ice are: - ! 1. the advection of ice at the interface temperature out of the domain due to melting - ! (or in due to freezing) - ! 2. the diffusion (if any) of heat into the ice, based on temperature difference between - ! the reference point in the ice (either the surface or the middle of the bottom layer) - ! and the interface - outIceHeatFlux(n) = -SHR_CONST_CPICE*outFreshwaterFlux(n)*outInterfaceTemperature(n) - - outIceHeatFlux(n) = outIceHeatFlux(n) & - - iceHeatFluxCoeff*(iceTemperature(n) - outInterfaceTemperature(n)) - - end do - - !-------------------------------------------------------------------- - end subroutine compute_melt_fluxes - -end module prep_glc_mod diff --git a/src/drivers/mct/main/prep_iac_mod.F90 b/src/drivers/mct/main/prep_iac_mod.F90 deleted file mode 100644 index 1ab5f6d0284..00000000000 --- a/src/drivers/mct/main/prep_iac_mod.F90 +++ /dev/null @@ -1,168 +0,0 @@ -module prep_iac_mod - -#include "shr_assert.h" - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_kind_mod, only: cxx => SHR_KIND_CXX - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use seq_comm_mct, only: num_inst_lnd, num_inst_iac, num_inst_frc - use seq_comm_mct, only: CPLID, ROFID, logunit - use seq_comm_mct, only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getdata - use shr_log_mod , only: errMsg => shr_log_errMsg - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: iac, lnd - use prep_lnd_mod, only: prep_lnd_get_mapper_Fr2l - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_iac_init - public :: prep_iac_mrg - - public :: prep_iac_accum - public :: prep_iac_accum_avg - - public :: prep_iac_calc_l2x_zx - - public :: prep_iac_get_l2zacc_lx - public :: prep_iac_get_l2zacc_lx_cnt - public :: prep_iac_get_mapper_Fl2z - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_Fl2z - - ! attribute vectors - type(mct_aVect), pointer :: l2x_zx(:) - - ! accumulation variables - type(mct_aVect), pointer :: l2zacc_lx(:) ! lnd export, lnd grid, cpl pes - integer , target :: l2zacc_lx_cnt ! l2racc_lx: number of time samples accumulated - - ! other module variables - integer :: mpicom_CPLID ! MPI cpl communicator - - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_iac_init(infodata, lnd_c2_iac) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and all other non-mapping - ! module variables - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in) :: lnd_c2_iac ! .true. => lnd to iac coupling on - ! - ! Local Variables - - end subroutine prep_iac_init - - !================================================================================================ - - subroutine prep_iac_accum(timer) - - !--------------------------------------------------------------- - ! Description - ! Accumulate land input to iac - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - - end subroutine prep_iac_accum - - !================================================================================================ - - subroutine prep_iac_accum_avg(timer) - - !--------------------------------------------------------------- - ! Description - ! Finalize accumulation of land input to river component - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - - end subroutine prep_iac_accum_avg - - !================================================================================================ - - subroutine prep_iac_mrg(infodata, fractions_zx, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Merge iac inputs - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - type(mct_aVect) , intent(in) :: fractions_zx(:) - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - - end subroutine prep_iac_mrg - - !================================================================================================ - - !================================================================================================ - - subroutine prep_iac_calc_l2x_zx(timer) - !--------------------------------------------------------------- - ! Description - ! Create l2x_zx (note that l2x_zx is a local module variable) - ! - ! Arguments - ! Don't know if we need these fractions just yet - ! type(mct_aVect) , intent(in) :: fractions_lx(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - - end subroutine prep_iac_calc_l2x_zx - - !================================================================================================ - - function prep_iac_get_l2zacc_lx() - type(mct_aVect), pointer :: prep_iac_get_l2zacc_lx(:) - prep_iac_get_l2zacc_lx => l2zacc_lx(:) - end function prep_iac_get_l2zacc_lx - - function prep_iac_get_l2zacc_lx_cnt() - integer, pointer :: prep_iac_get_l2zacc_lx_cnt - prep_iac_get_l2zacc_lx_cnt => l2zacc_lx_cnt - end function prep_iac_get_l2zacc_lx_cnt - - function prep_iac_get_mapper_Fl2z() - type(seq_map), pointer :: prep_iac_get_mapper_Fl2z - prep_iac_get_mapper_Fl2z => mapper_Fl2z - end function prep_iac_get_mapper_Fl2z - -end module prep_iac_mod diff --git a/src/drivers/mct/main/prep_ice_mod.F90 b/src/drivers/mct/main/prep_ice_mod.F90 deleted file mode 100644 index 36c478f6fc2..00000000000 --- a/src/drivers/mct/main/prep_ice_mod.F90 +++ /dev/null @@ -1,638 +0,0 @@ -module prep_ice_mod - - use shr_kind_mod , only: r8 => SHR_KIND_R8 - use shr_kind_mod , only: cs => SHR_KIND_CS - use shr_kind_mod , only: cl => SHR_KIND_CL - use shr_sys_mod , only: shr_sys_abort, shr_sys_flush - use seq_comm_mct , only: num_inst_atm, num_inst_ocn, num_inst_glc - use seq_comm_mct , only: num_inst_ice, num_inst_frc, num_inst_rof - use seq_comm_mct , only: CPLID, ICEID, logunit - use seq_comm_mct , only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getdata - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: ice, atm, ocn, glc, rof - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_ice_init - public :: prep_ice_mrg - - public :: prep_ice_calc_a2x_ix - public :: prep_ice_calc_o2x_ix - public :: prep_ice_calc_r2x_ix - public :: prep_ice_calc_g2x_ix - public :: prep_ice_shelf_calc_g2x_ix - - public :: prep_ice_get_a2x_ix - public :: prep_ice_get_o2x_ix - public :: prep_ice_get_g2x_ix - public :: prep_ice_get_r2x_ix - - public :: prep_ice_get_mapper_SFo2i - public :: prep_ice_get_mapper_Rg2i - public :: prep_ice_get_mapper_Sg2i - public :: prep_ice_get_mapper_Fg2i - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: prep_ice_merge - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_SFo2i - type(seq_map), pointer :: mapper_Rg2i - type(seq_map), pointer :: mapper_Sg2i - type(seq_map), pointer :: mapper_Fg2i - type(seq_map), pointer :: mapper_Rr2i - - ! attribute vectors - type(mct_aVect), pointer :: a2x_ix(:) ! Atm export, ice grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: o2x_ix(:) ! Ocn export, ice grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: g2x_ix(:) ! Glc export, ice grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: r2x_ix(:) ! Rof export, ice grid, cpl pes - allocated in driver - - ! seq_comm_getData variables - integer :: mpicom_CPLID ! MPI cpl communicator - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_ice_init(infodata, ocn_c2_ice, glc_c2_ice, glcshelf_c2_ice, rof_c2_ice) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and all other non-mapping - ! module variables - ! - ! Arguments - type (seq_infodata_type) , intent(in) :: infodata - logical, intent(in) :: ocn_c2_ice ! .true. => ocn to ice coupling on - logical, intent(in) :: glc_c2_ice ! .true. => glc to ice coupling on - logical, intent(in) :: glcshelf_c2_ice ! .true. => glc ice shelf to ice coupling on - logical, intent(in) :: rof_c2_ice ! .true. => rof to ice coupling on - ! - ! Local Variables - integer :: lsize_i - integer :: eai, eoi, egi, eri - logical :: iamroot_CPLID ! .true. => CPLID masterproc - logical :: samegrid_ig ! samegrid glc and ice - logical :: samegrid_ro ! samegrid rof and ice/ocn - logical :: ice_present ! .true. => ice is present - logical :: esmf_map_flag ! .true. => use esmf for mapping - character(CL) :: ice_gnam ! ice grid - character(CL) :: ocn_gnam ! ocn grid - character(CL) :: glc_gnam ! glc grid - character(CL) :: rof_gnam ! rof grid - type(mct_avect), pointer :: i2x_ix - character(*), parameter :: subname = '(prep_ice_init)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call seq_infodata_getData(infodata, & - esmf_map_flag=esmf_map_flag , & - ice_present=ice_present , & - ice_gnam=ice_gnam , & - ocn_gnam=ocn_gnam , & - rof_gnam=rof_gnam , & - glc_gnam=glc_gnam) - - allocate(mapper_SFo2i) - allocate(mapper_Rg2i) - allocate(mapper_Sg2i) - allocate(mapper_Fg2i) - allocate(mapper_Rr2i) - - if (ice_present) then - - call seq_comm_getData(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - i2x_ix => component_get_c2x_cx(ice(1)) - lsize_i = mct_aVect_lsize(i2x_ix) - - allocate(a2x_ix(num_inst_atm)) - do eai = 1,num_inst_atm - call mct_aVect_init(a2x_ix(eai), rList=seq_flds_a2x_fields, lsize=lsize_i) - call mct_aVect_zero(a2x_ix(eai)) - end do - allocate(o2x_ix(num_inst_ocn)) - do eoi = 1,num_inst_ocn - call mct_aVect_init(o2x_ix(eoi), rList=seq_flds_o2x_fields, lsize=lsize_i) - call mct_aVect_zero(o2x_ix(eoi)) - enddo - allocate(g2x_ix(num_inst_glc)) - do egi = 1,num_inst_glc - call mct_aVect_init(g2x_ix(egi), rList=seq_flds_g2x_fields, lsize=lsize_i) - call mct_aVect_zero(g2x_ix(egi)) - enddo - allocate(r2x_ix(num_inst_rof)) - do eri = 1,num_inst_rof - call mct_aVect_init(r2x_ix(eri), rList=seq_flds_r2x_fields, lsize=lsize_i) - call mct_aVect_zero(r2x_ix(eri)) - end do - - samegrid_ig = .true. - samegrid_ro = .true. - if (trim(ice_gnam) /= trim(glc_gnam)) samegrid_ig = .false. - if (trim(rof_gnam) /= trim(ocn_gnam)) samegrid_ro = .false. - - if (ocn_c2_ice) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_SFo2i' - end if - call seq_map_init_rearrolap(mapper_SFo2i, ocn(1), ice(1), 'mapper_SFo2i') - endif - - if (glc_c2_ice) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Rg2i' - end if - call seq_map_init_rcfile(mapper_Rg2i, glc(1), ice(1), & - 'seq_maps.rc','glc2ice_rmapname:','glc2ice_rmaptype:',samegrid_ig, & - 'mapper_Rg2i initialization', esmf_map_flag) - endif - - if (glcshelf_c2_ice) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sg2i' - end if - call seq_map_init_rcfile(mapper_Sg2i, glc(1), ice(1), & - 'seq_maps.rc','glc2ice_smapname:','glc2ice_smaptype:',samegrid_ig, & - 'mapper_Sg2i initialization', esmf_map_flag) - - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fg2i' - end if - call seq_map_init_rcfile(mapper_Fg2i, glc(1), ice(1), & - 'seq_maps.rc','glc2ice_fmapname:','glc2ice_fmaptype:',samegrid_ig, & - 'mapper_Fg2i initialization', esmf_map_flag) - endif - - if (rof_c2_ice) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Rr2i' - end if - call seq_map_init_rcfile(mapper_Rr2i, rof(1), ice(1), & - 'seq_maps.rc','rof2ice_rmapname:','rof2ice_rmaptype:',samegrid_ro, & - 'mapper_Rr2i initialization', esmf_map_flag) - endif - call shr_sys_flush(logunit) - - end if - - end subroutine prep_ice_init - - !================================================================================================ - - subroutine prep_ice_mrg(infodata, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Prepare run phase, including running the merge - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - integer :: eoi, eai, egi, eii, eri - real(r8) :: flux_epbalfact ! adjusted precip factor - type(mct_avect), pointer :: x2i_ix - character(*), parameter :: subname = '(prep_ice_mrg)' - !--------------------------------------------------------------- - - call seq_infodata_GetData(infodata, & - flux_epbalfact=flux_epbalfact) - - call t_drvstartf (trim(timer_mrg),barrier=mpicom_CPLID) - do eii = 1,num_inst_ice - ! Use fortran mod to address ensembles in merge - eai = mod((eii-1),num_inst_atm) + 1 - eoi = mod((eii-1),num_inst_ocn) + 1 - eri = mod((eii-1),num_inst_rof) + 1 - egi = mod((eii-1),num_inst_glc) + 1 - - ! Apply correction to precipitation of requested driver namelist - x2i_ix => component_get_x2c_cx(ice(eii)) ! This is actually modifying x2i_ix - call prep_ice_merge(flux_epbalfact, a2x_ix(eai), o2x_ix(eoi), r2x_ix(eri), g2x_ix(egi), & - x2i_ix) - enddo - call t_drvstopf (trim(timer_mrg)) - - end subroutine prep_ice_mrg - - !================================================================================================ - - subroutine prep_ice_merge(flux_epbalfact, a2x_i, o2x_i, r2x_i, g2x_i, x2i_i ) - - !----------------------------------------------------------------------- - ! - ! Arguments - real(r8) , intent(inout) :: flux_epbalfact - type(mct_aVect) , intent(in) :: a2x_i - type(mct_aVect) , intent(in) :: o2x_i - type(mct_aVect) , intent(in) :: r2x_i - type(mct_aVect) , intent(in) :: g2x_i - type(mct_aVect) , intent(inout) :: x2i_i - ! - ! Local variables - integer :: i,i1,o1,lsize - integer :: niflds - integer, save :: index_a2x_Faxa_rainc - integer, save :: index_a2x_Faxa_rainl - integer, save :: index_a2x_Faxa_snowc - integer, save :: index_a2x_Faxa_snowl - integer, save :: index_g2x_Figg_rofi - integer, save :: index_r2x_Firr_rofi - integer, save :: index_x2i_Faxa_rain - integer, save :: index_x2i_Faxa_snow - integer, save :: index_x2i_Fixx_rofi - !wiso fields: - integer, save :: index_a2x_Faxa_rainc_16O - integer, save :: index_a2x_Faxa_rainl_16O - integer, save :: index_a2x_Faxa_snowc_16O - integer, save :: index_a2x_Faxa_snowl_16O - integer, save :: index_x2i_Faxa_rain_16O - integer, save :: index_x2i_Faxa_snow_16O - integer, save :: index_a2x_Faxa_rainc_18O - integer, save :: index_a2x_Faxa_rainl_18O - integer, save :: index_a2x_Faxa_snowc_18O - integer, save :: index_a2x_Faxa_snowl_18O - integer, save :: index_x2i_Faxa_rain_18O - integer, save :: index_x2i_Faxa_snow_18O - integer, save :: index_a2x_Faxa_rainc_HDO - integer, save :: index_a2x_Faxa_rainl_HDO - integer, save :: index_a2x_Faxa_snowc_HDO - integer, save :: index_a2x_Faxa_snowl_HDO - integer, save :: index_x2i_Faxa_rain_HDO - integer, save :: index_x2i_Faxa_snow_HDO - logical, save :: first_time = .true. - logical :: iamroot - character(CL),allocatable :: mrgstr(:) ! temporary string - character(CL) :: field ! string converted to char - type(mct_aVect_sharedindices),save :: o2x_sharedindices - type(mct_aVect_sharedindices),save :: a2x_sharedindices - type(mct_aVect_sharedindices),save :: g2x_sharedindices - character(*), parameter :: subname = '(prep_ice_merge) ' - !----------------------------------------------------------------------- - - call seq_comm_getdata(CPLID, iamroot=iamroot) - lsize = mct_aVect_lsize(x2i_i) - - if (first_time) then - niflds = mct_aVect_nRattr(x2i_i) - - allocate(mrgstr(niflds)) - index_a2x_Faxa_snowc = mct_aVect_indexRA(a2x_i,'Faxa_snowc') - index_a2x_Faxa_snowl = mct_aVect_indexRA(a2x_i,'Faxa_snowl') - index_a2x_Faxa_rainc = mct_aVect_indexRA(a2x_i,'Faxa_rainc') - index_a2x_Faxa_rainl = mct_aVect_indexRA(a2x_i,'Faxa_rainl') - index_g2x_Figg_rofi = mct_aVect_indexRA(g2x_i,'Figg_rofi') - index_r2x_Firr_rofi = mct_aVect_indexRA(r2x_i,'Firr_rofi') - index_x2i_Faxa_rain = mct_aVect_indexRA(x2i_i,'Faxa_rain' ) - index_x2i_Faxa_snow = mct_aVect_indexRA(x2i_i,'Faxa_snow' ) - index_x2i_Fixx_rofi = mct_aVect_indexRA(x2i_i,'Fixx_rofi') - - ! Water isotope fields - index_a2x_Faxa_snowc_16O = mct_aVect_indexRA(a2x_i,'Faxa_snowc_16O', perrWith='quiet') - index_a2x_Faxa_snowl_16O = mct_aVect_indexRA(a2x_i,'Faxa_snowl_16O', perrWith='quiet') - index_a2x_Faxa_rainc_16O = mct_aVect_indexRA(a2x_i,'Faxa_rainc_16O', perrWith='quiet') - index_a2x_Faxa_rainl_16O = mct_aVect_indexRA(a2x_i,'Faxa_rainl_16O', perrWith='quiet') - index_x2i_Faxa_rain_16O = mct_aVect_indexRA(x2i_i,'Faxa_rain_16O', perrWith='quiet' ) - index_x2i_Faxa_snow_16O = mct_aVect_indexRA(x2i_i,'Faxa_snow_16O', perrWith='quiet' ) - - index_a2x_Faxa_snowc_18O = mct_aVect_indexRA(a2x_i,'Faxa_snowc_18O', perrWith='quiet') - index_a2x_Faxa_snowl_18O = mct_aVect_indexRA(a2x_i,'Faxa_snowl_18O', perrWith='quiet') - index_a2x_Faxa_rainc_18O = mct_aVect_indexRA(a2x_i,'Faxa_rainc_18O', perrWith='quiet') - index_a2x_Faxa_rainl_18O = mct_aVect_indexRA(a2x_i,'Faxa_rainl_18O', perrWith='quiet') - index_x2i_Faxa_rain_18O = mct_aVect_indexRA(x2i_i,'Faxa_rain_18O', perrWith='quiet' ) - index_x2i_Faxa_snow_18O = mct_aVect_indexRA(x2i_i,'Faxa_snow_18O', perrWith='quiet' ) - - index_a2x_Faxa_snowc_HDO = mct_aVect_indexRA(a2x_i,'Faxa_snowc_HDO', perrWith='quiet') - index_a2x_Faxa_snowl_HDO = mct_aVect_indexRA(a2x_i,'Faxa_snowl_HDO', perrWith='quiet') - index_a2x_Faxa_rainc_HDO = mct_aVect_indexRA(a2x_i,'Faxa_rainc_HDO', perrWith='quiet') - index_a2x_Faxa_rainl_HDO = mct_aVect_indexRA(a2x_i,'Faxa_rainl_HDO', perrWith='quiet') - index_x2i_Faxa_rain_HDO = mct_aVect_indexRA(x2i_i,'Faxa_rain_HDO', perrWith='quiet' ) - index_x2i_Faxa_snow_HDO = mct_aVect_indexRA(x2i_i,'Faxa_snow_HDO', perrWith='quiet' ) - - do i = 1,niflds - field = mct_aVect_getRList2c(i, x2i_i) - mrgstr(i) = subname//'x2i%'//trim(field)//' =' - enddo - - call mct_aVect_setSharedIndices(o2x_i, x2i_i, o2x_SharedIndices) - call mct_aVect_setSharedIndices(a2x_i, x2i_i, a2x_SharedIndices) - call mct_aVect_setSharedIndices(g2x_i, x2i_i, g2x_SharedIndices) - - !--- document copy operations --- - do i=1,o2x_SharedIndices%shared_real%num_indices - i1=o2x_SharedIndices%shared_real%aVindices1(i) - o1=o2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, o2x_i) - mrgstr(o1) = trim(mrgstr(o1))//' = o2x%'//trim(field) - enddo - do i=1,a2x_SharedIndices%shared_real%num_indices - i1=a2x_SharedIndices%shared_real%aVindices1(i) - o1=a2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, a2x_i) - mrgstr(o1) = trim(mrgstr(o1))//' = a2x%'//trim(field) - enddo - do i=1,g2x_SharedIndices%shared_real%num_indices - i1=g2x_SharedIndices%shared_real%aVindices1(i) - o1=g2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, g2x_i) - mrgstr(o1) = trim(mrgstr(o1))//' = g2x%'//trim(field) - enddo - - !--- document manual merges --- - mrgstr(index_x2i_Faxa_rain) = trim(mrgstr(index_x2i_Faxa_rain))//' = '// & - '(a2x%Faxa_rainc + a2x%Faxa_rainl)*flux_epbalfact' - mrgstr(index_x2i_Faxa_snow) = trim(mrgstr(index_x2i_Faxa_snow))//' = '// & - '(a2x%Faxa_snowc + a2x%Faxa_snowl)*flux_epbalfact' - mrgstr(index_x2i_Fixx_rofi) = trim(mrgstr(index_x2i_Fixx_rofi))//' = '// & - '(g2x%Figg_rofi + r2x%Firr_rofi)*flux_epbalfact' - - !--- water isotope document manual merges --- - if ( index_x2i_Faxa_rain_16O /= 0 ) then - mrgstr(index_x2i_Faxa_rain_16O) = trim(mrgstr(index_x2i_Faxa_rain_16O))//' = '// & - '(a2x%Faxa_rainc_16O + a2x%Faxa_rainl_16O)*flux_epbalfact' - mrgstr(index_x2i_Faxa_snow_16O) = trim(mrgstr(index_x2i_Faxa_snow_16O))//' = '// & - '(a2x%Faxa_snowc_16O + a2x%Faxa_snowl_16O)*flux_epbalfact' - end if - if ( index_x2i_Faxa_rain_18O /= 0 ) then - mrgstr(index_x2i_Faxa_rain_18O) = trim(mrgstr(index_x2i_Faxa_rain_18O))//' = '// & - '(a2x%Faxa_rainc_18O + a2x%Faxa_rainl_18O)*flux_epbalfact' - mrgstr(index_x2i_Faxa_snow_18O) = trim(mrgstr(index_x2i_Faxa_snow_18O))//' = '// & - '(a2x%Faxa_snowc_18O + a2x%Faxa_snowl_18O)*flux_epbalfact' - end if - if ( index_x2i_Faxa_rain_HDO /= 0 ) then - mrgstr(index_x2i_Faxa_rain_HDO) = trim(mrgstr(index_x2i_Faxa_rain_HDO))//' = '// & - '(a2x%Faxa_rainc_HDO + a2x%Faxa_rainl_HDO)*flux_epbalfact' - mrgstr(index_x2i_Faxa_snow_HDO) = trim(mrgstr(index_x2i_Faxa_snow_HDO))//' = '// & - '(a2x%Faxa_snowc_HDO + a2x%Faxa_snowl_HDO)*flux_epbalfact' - end if - - endif - - ! call mct_aVect_copy(aVin=o2x_i, aVout=x2i_i, vector=mct_usevector) - ! call mct_aVect_copy(aVin=a2x_i, aVout=x2i_i, vector=mct_usevector) - ! call mct_aVect_copy(aVin=g2x_i, aVout=x2i_i, vector=mct_usevector) - call mct_aVect_copy(aVin=o2x_i, aVout=x2i_i, vector=mct_usevector, sharedIndices=o2x_SharedIndices) - call mct_aVect_copy(aVin=a2x_i, aVout=x2i_i, vector=mct_usevector, sharedIndices=a2x_SharedIndices) - call mct_aVect_copy(aVin=g2x_i, aVout=x2i_i, vector=mct_usevector, sharedIndices=g2x_SharedIndices) - - ! Merge total snow and precip for ice input - ! Scale total precip and runoff by flux_epbalfact - - do i = 1,lsize - x2i_i%rAttr(index_x2i_Faxa_rain,i) = a2x_i%rAttr(index_a2x_Faxa_rainc,i) + & - a2x_i%rAttr(index_a2x_Faxa_rainl,i) - x2i_i%rAttr(index_x2i_Faxa_snow,i) = a2x_i%rAttr(index_a2x_Faxa_snowc,i) + & - a2x_i%rAttr(index_a2x_Faxa_snowl,i) - x2i_i%rAttr(index_x2i_Fixx_rofi,i) = g2x_i%rAttr(index_g2x_Figg_rofi,i) + & - r2x_i%rAttr(index_r2x_Firr_rofi,i) - - x2i_i%rAttr(index_x2i_Faxa_rain,i) = x2i_i%rAttr(index_x2i_Faxa_rain,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_snow,i) = x2i_i%rAttr(index_x2i_Faxa_snow,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Fixx_rofi,i) = x2i_i%rAttr(index_x2i_Fixx_rofi,i) * flux_epbalfact - - ! For water isotopes - if ( index_x2i_Faxa_rain_16O /= 0 ) then - x2i_i%rAttr(index_x2i_Faxa_rain_16O,i) = a2x_i%rAttr(index_a2x_Faxa_rainc_16O,i) + & - a2x_i%rAttr(index_a2x_Faxa_rainl_16O,i) - x2i_i%rAttr(index_x2i_Faxa_snow_16O,i) = a2x_i%rAttr(index_a2x_Faxa_snowc_16O,i) + & - a2x_i%rAttr(index_a2x_Faxa_snowl_16O,i) - - x2i_i%rAttr(index_x2i_Faxa_rain_16O,i) = x2i_i%rAttr(index_x2i_Faxa_rain_16O,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_snow_16O,i) = x2i_i%rAttr(index_x2i_Faxa_snow_16O,i) * flux_epbalfact - end if - if ( index_x2i_Faxa_rain_18O /= 0 ) then - x2i_i%rAttr(index_x2i_Faxa_rain_18O,i) = a2x_i%rAttr(index_a2x_Faxa_rainc_18O,i) + & - a2x_i%rAttr(index_a2x_Faxa_rainl_18O,i) - x2i_i%rAttr(index_x2i_Faxa_snow_18O,i) = a2x_i%rAttr(index_a2x_Faxa_snowc_18O,i) + & - a2x_i%rAttr(index_a2x_Faxa_snowl_18O,i) - - x2i_i%rAttr(index_x2i_Faxa_rain_18O,i) = x2i_i%rAttr(index_x2i_Faxa_rain_18O,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_snow_18O,i) = x2i_i%rAttr(index_x2i_Faxa_snow_18O,i) * flux_epbalfact - end if - if ( index_x2i_Faxa_rain_HDO /= 0 ) then - x2i_i%rAttr(index_x2i_Faxa_rain_HDO,i) = a2x_i%rAttr(index_a2x_Faxa_rainc_HDO,i) + & - a2x_i%rAttr(index_a2x_Faxa_rainl_HDO,i) - x2i_i%rAttr(index_x2i_Faxa_snow_HDO,i) = a2x_i%rAttr(index_a2x_Faxa_snowc_HDO,i) + & - a2x_i%rAttr(index_a2x_Faxa_snowl_HDO,i) - - x2i_i%rAttr(index_x2i_Faxa_rain_HDO,i) = x2i_i%rAttr(index_x2i_Faxa_rain_HDO,i) * flux_epbalfact - x2i_i%rAttr(index_x2i_Faxa_snow_HDO,i) = x2i_i%rAttr(index_x2i_Faxa_snow_HDO,i) * flux_epbalfact - end if - - end do - - if (first_time) then - if (iamroot) then - write(logunit,'(A)') subname//' Summary:' - do i = 1,niflds - write(logunit,'(A)') trim(mrgstr(i)) - enddo - endif - deallocate(mrgstr) - endif - - first_time = .false. - - end subroutine prep_ice_merge - - !================================================================================================ - - subroutine prep_ice_calc_a2x_ix(a2x_ox, timer) - !--------------------------------------------------------------- - ! Description - ! Create a2x_ix (note that a2x_ix is a local module variable) - ! - ! Arguments - type(mct_aVect) , intent(in) :: a2x_ox(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eai - character(*), parameter :: subname = '(prep_ice_calc_a2x_ix)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eai = 1,num_inst_atm - call seq_map_map(mapper_SFo2i, a2x_ox(eai), a2x_ix(eai), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_ice_calc_a2x_ix - - !================================================================================================ - - subroutine prep_ice_calc_o2x_ix(timer) - !--------------------------------------------------------------- - ! Description - ! Create o2x_ix (note that o2x_ix is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eoi - type(mct_aVect) , pointer :: o2x_ox - character(*), parameter :: subname = '(prep_ice_calc_o2x_ix)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eoi = 1,num_inst_ocn - o2x_ox => component_get_c2x_cx(ocn(eoi)) - call seq_map_map(mapper_SFo2i, o2x_ox, o2x_ix(eoi), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_ice_calc_o2x_ix - - !================================================================================================ - - subroutine prep_ice_calc_r2x_ix(timer) - !--------------------------------------------------------------- - ! Description - ! Create r2x_ix (note that r2x_ix is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eri - type(mct_aVect), pointer :: r2x_rx - character(*), parameter :: subname = '(prep_ice_calc_r2x_ix)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eri = 1,num_inst_rof - r2x_rx => component_get_c2x_cx(rof(eri)) - - call seq_map_map(mapper_Rr2i, r2x_rx, r2x_ix(eri), & - fldlist='Firr_rofi', norm=.false.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_ice_calc_r2x_ix - - !================================================================================================ - - subroutine prep_ice_calc_g2x_ix(timer) - !--------------------------------------------------------------- - ! Description - ! Create g2x_ix (note that g2x_ix is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: egi - type(mct_aVect), pointer :: g2x_gx - character(*), parameter :: subname = '(prep_ice_calc_g2x_ix)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do egi = 1,num_inst_glc - g2x_gx => component_get_c2x_cx(glc(egi)) - call seq_map_map(mapper_Rg2i, g2x_gx, g2x_ix(egi), & - fldlist='Fixx_rofi', norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_ice_calc_g2x_ix - - !================================================================================================ - - subroutine prep_ice_shelf_calc_g2x_ix(timer) - !--------------------------------------------------------------- - ! Description - ! Create g2x_ix (note that g2x_ix is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: egi - type(mct_aVect), pointer :: g2x_gx - character(*), parameter :: subname = '(prep_ice_calc_g2x_ix)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do egi = 1,num_inst_rof - g2x_gx => component_get_c2x_cx(glc(egi)) - call seq_map_map(mapper_Sg2i, g2x_gx, g2x_ix(egi), & - fldlist='Sg_icemask_coupled_fluxes', norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_ice_shelf_calc_g2x_ix - - !================================================================================================ - - function prep_ice_get_a2x_ix() - type(mct_aVect), pointer :: prep_ice_get_a2x_ix(:) - prep_ice_get_a2x_ix => a2x_ix(:) - end function prep_ice_get_a2x_ix - - function prep_ice_get_o2x_ix() - type(mct_aVect), pointer :: prep_ice_get_o2x_ix(:) - prep_ice_get_o2x_ix => o2x_ix(:) - end function prep_ice_get_o2x_ix - - function prep_ice_get_g2x_ix() - type(mct_aVect), pointer :: prep_ice_get_g2x_ix(:) - prep_ice_get_g2x_ix => g2x_ix(:) - end function prep_ice_get_g2x_ix - - function prep_ice_get_r2x_ix() - type(mct_aVect), pointer :: prep_ice_get_r2x_ix(:) - prep_ice_get_r2x_ix => r2x_ix(:) - end function prep_ice_get_r2x_ix - - function prep_ice_get_mapper_SFo2i() - type(seq_map), pointer :: prep_ice_get_mapper_SFo2i - prep_ice_get_mapper_SFo2i => mapper_SFo2i - end function prep_ice_get_mapper_SFo2i - - function prep_ice_get_mapper_Rg2i() - type(seq_map), pointer :: prep_ice_get_mapper_Rg2i - prep_ice_get_mapper_Rg2i => mapper_Rg2i - end function prep_ice_get_mapper_Rg2i - - function prep_ice_get_mapper_Sg2i() - type(seq_map), pointer :: prep_ice_get_mapper_Sg2i - prep_ice_get_mapper_Sg2i => mapper_Sg2i - end function prep_ice_get_mapper_Sg2i - - function prep_ice_get_mapper_Fg2i() - type(seq_map), pointer :: prep_ice_get_mapper_Fg2i - prep_ice_get_mapper_Fg2i => mapper_Fg2i - end function prep_ice_get_mapper_Fg2i - -end module prep_ice_mod diff --git a/src/drivers/mct/main/prep_lnd_mod.F90 b/src/drivers/mct/main/prep_lnd_mod.F90 deleted file mode 100644 index 344637f3fdc..00000000000 --- a/src/drivers/mct/main/prep_lnd_mod.F90 +++ /dev/null @@ -1,549 +0,0 @@ -module prep_lnd_mod - - use shr_kind_mod , only: r8 => SHR_KIND_R8 - use shr_kind_mod , only: cs => SHR_KIND_CS - use shr_kind_mod , only: cl => SHR_KIND_CL - use shr_kind_mod , only: cxx => SHR_KIND_CXX - use shr_sys_mod , only: shr_sys_abort, shr_sys_flush - use seq_comm_mct , only: num_inst_atm, num_inst_rof, num_inst_glc - use seq_comm_mct , only: num_inst_lnd, num_inst_frc - use seq_comm_mct , only: CPLID, LNDID, logunit - use seq_comm_mct , only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getdata - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: lnd, atm, rof, glc - use map_glc2lnd_mod , only: map_glc2lnd_ec - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_lnd_init - public :: prep_lnd_mrg - - public :: prep_lnd_calc_a2x_lx - public :: prep_lnd_calc_r2x_lx - public :: prep_lnd_calc_g2x_lx - public :: prep_lnd_calc_z2x_lx - - public :: prep_lnd_get_a2x_lx - public :: prep_lnd_get_r2x_lx - public :: prep_lnd_get_g2x_lx - public :: prep_lnd_get_z2x_lx - - public :: prep_lnd_get_mapper_Sa2l - public :: prep_lnd_get_mapper_Fa2l - public :: prep_lnd_get_mapper_Fr2l - public :: prep_lnd_get_mapper_Sg2l - public :: prep_lnd_get_mapper_Fg2l - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: prep_lnd_merge - private :: prep_lnd_set_glc2lnd_fields - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_Sa2l ! needed in ccsm_comp_mod.F90 (setting of aream) - type(seq_map), pointer :: mapper_Fa2l ! needed in ccsm_comp_mod.F90 (seq_domain_check) - type(seq_map), pointer :: mapper_Fr2l ! needed in seq_frac_mct.F90 - type(seq_map), pointer :: mapper_Sg2l ! currently unused (all g2l mappings use the flux mapper) - type(seq_map), pointer :: mapper_Fg2l - - ! attribute vectors - type(mct_aVect), pointer :: a2x_lx(:) ! Atm export, lnd grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: r2x_lx(:) ! Rof export, lnd grid, lnd pes - allocated in lnd gc - type(mct_aVect), pointer :: g2x_lx(:) ! Glc export, lnd grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: z2x_lx(:) ! Iac export, lnd grid, cpl pes - allocated in driver - - ! seq_comm_getData variables - integer :: mpicom_CPLID ! MPI cpl communicator - - ! field names and lists, for fields that need to be treated specially - character(len=*), parameter :: glc_frac_field = 'Sg_ice_covered' - character(len=*), parameter :: glc_topo_field = 'Sg_topo' - character(len=*), parameter :: glc_icemask_field = 'Sg_icemask' - ! fields mapped from glc to lnd, NOT separated by elevation class - character(CXX) :: glc2lnd_non_ec_fields - ! other fields (besides frac_field and topo_field) that are mapped from glc to lnd, - ! separated by elevation class - character(CXX) :: glc2lnd_ec_extra_fields - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_lnd_init(infodata, atm_c2_lnd, rof_c2_lnd, glc_c2_lnd, iac_c2_lnd) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and all other non-mapping - ! module variables - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in) :: atm_c2_lnd ! .true. => atm to lnd coupling on - logical , intent(in) :: rof_c2_lnd ! .true. => rof to lnd coupling on - logical , intent(in) :: glc_c2_lnd ! .true. => glc to lnd coupling on - logical , intent(in) :: iac_c2_lnd ! .true. => iac to lnd coupling on - ! - ! Local Variables - integer :: lsize_l - integer :: eai, eri, egi - logical :: samegrid_al ! samegrid atm and land - logical :: samegrid_lr ! samegrid land and rof - logical :: samegrid_lg ! samegrid land and glc - logical :: esmf_map_flag ! .true. => use esmf for mapping - logical :: lnd_present ! .true. => land is present - logical :: iamroot_CPLID ! .true. => CPLID masterproc - character(CL) :: atm_gnam ! atm grid - character(CL) :: lnd_gnam ! lnd grid - character(CL) :: rof_gnam ! rof grid - character(CL) :: glc_gnam ! glc grid - type(mct_avect), pointer :: l2x_lx - character(*), parameter :: subname = '(prep_lnd_init)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call seq_infodata_getData(infodata, & - esmf_map_flag=esmf_map_flag, & - lnd_present=lnd_present, & - atm_gnam=atm_gnam, & - lnd_gnam=lnd_gnam, & - rof_gnam=rof_gnam, & - glc_gnam=glc_gnam) - - allocate(mapper_Sa2l) - allocate(mapper_Fa2l) - allocate(mapper_Fr2l) - allocate(mapper_Sg2l) - allocate(mapper_Fg2l) - - if (lnd_present) then - - call seq_comm_getData(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - l2x_lx => component_get_c2x_cx(lnd(1)) - lsize_l = mct_aVect_lsize(l2x_lx) - - allocate(a2x_lx(num_inst_atm)) - do eai = 1,num_inst_atm - call mct_aVect_init(a2x_lx(eai), rList=seq_flds_a2x_fields, lsize=lsize_l) - call mct_aVect_zero(a2x_lx(eai)) - enddo - allocate(r2x_lx(num_inst_rof)) - do eri = 1,num_inst_rof - call mct_aVect_init(r2x_lx(eri), rlist=seq_flds_r2x_fields, lsize=lsize_l) - call mct_aVect_zero(r2x_lx(eri)) - end do - allocate(g2x_lx(num_inst_glc)) - do egi = 1,num_inst_glc - call mct_aVect_init(g2x_lx(egi), rList=seq_flds_x2l_fields_from_glc, lsize=lsize_l) - call mct_aVect_zero(g2x_lx(egi)) - end do - - samegrid_al = .true. - samegrid_lr = .true. - samegrid_lg = .true. - if (trim(atm_gnam) /= trim(lnd_gnam)) samegrid_al = .false. - if (trim(lnd_gnam) /= trim(rof_gnam)) samegrid_lr = .false. - if (trim(lnd_gnam) /= trim(glc_gnam)) samegrid_lg = .false. - - if (rof_c2_lnd) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fr2l' - end if - call seq_map_init_rcfile(mapper_Fr2l, rof(1), lnd(1), & - 'seq_maps.rc','rof2lnd_fmapname:','rof2lnd_fmaptype:',samegrid_lr, & - string='mapper_Fr2l initialization',esmf_map=esmf_map_flag) - end if - call shr_sys_flush(logunit) - - if (atm_c2_lnd) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sa2l' - end if - call seq_map_init_rcfile(mapper_Sa2l, atm(1), lnd(1), & - 'seq_maps.rc','atm2lnd_smapname:','atm2lnd_smaptype:',samegrid_al, & - 'mapper_Sa2l initialization',esmf_map_flag) - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fa2l' - end if - call seq_map_init_rcfile(mapper_Fa2l, atm(1), lnd(1), & - 'seq_maps.rc','atm2lnd_fmapname:','atm2lnd_fmaptype:',samegrid_al, & - 'mapper_Fa2l initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - if (glc_c2_lnd) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sg2l' - end if - call seq_map_init_rcfile(mapper_Sg2l, glc(1), lnd(1), & - 'seq_maps.rc','glc2lnd_smapname:','glc2lnd_smaptype:',samegrid_lg, & - 'mapper_Sg2l initialization',esmf_map_flag) - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fg2l' - end if - call seq_map_init_rcfile(mapper_Fg2l, glc(1), lnd(1), & - 'seq_maps.rc','glc2lnd_fmapname:','glc2lnd_fmaptype:',samegrid_lg, & - 'mapper_Fg2l initialization',esmf_map_flag) - - call prep_lnd_set_glc2lnd_fields() - endif - call shr_sys_flush(logunit) - - end if - - end subroutine prep_lnd_init - - !================================================================================================ - - subroutine prep_lnd_set_glc2lnd_fields() - - !--------------------------------------------------------------- - ! Description - ! Sets the module-level glc2lnd_non_ec_fields and glc2lnd_ec_extra_fields variables. - ! - ! Local Variables - character(len=CXX) :: temp_list - - character(*), parameter :: subname = '(prep_lnd_set_glc2lnd_fields)' - !--------------------------------------------------------------- - - ! glc2lnd fields not separated by elevation class can be determined by finding fields - ! that exist in both the g2x_to_lnd list and the x2l_from_glc list - call shr_string_listIntersect(seq_flds_g2x_fields_to_lnd, & - seq_flds_x2l_fields_from_glc, & - glc2lnd_non_ec_fields) - - ! glc2lnd fields separated by elevation class are all fields not determined above. - ! However, we also need to remove glc_frac_field and glc_topo_field from this list, - ! because those are handled specially, so are not expected to be in this - ! "extra_fields" list. - ! - ! NOTE(wjs, 2015-04-24) I am going to the trouble of building this field list - ! dynamically, rather than simply hard-coding the necessary fields (currently just - ! 'Flgg_hflx'), so that new fields can be added in seq_flds_mod without needing to - ! change any other code. - call shr_string_listDiff(seq_flds_g2x_fields_to_lnd, & - glc2lnd_non_ec_fields, & - glc2lnd_ec_extra_fields) - temp_list = glc2lnd_ec_extra_fields - call shr_string_listDiff(temp_list, & - glc_frac_field, & - glc2lnd_ec_extra_fields) - temp_list = glc2lnd_ec_extra_fields - call shr_string_listDiff(temp_list, & - glc_topo_field, & - glc2lnd_ec_extra_fields) - - end subroutine prep_lnd_set_glc2lnd_fields - - !================================================================================================ - - subroutine prep_lnd_mrg(infodata, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Prepare run phase, including running the merge - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - integer :: eai, eri, egi, eli - type(mct_aVect), pointer :: x2l_lx - character(*), parameter :: subname = '(prep_lnd_mrg)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer_mrg),barrier=mpicom_CPLID) - do eli = 1,num_inst_lnd - ! Use fortran mod to address ensembles in merge - eai = mod((eli-1),num_inst_atm) + 1 - eri = mod((eli-1),num_inst_rof) + 1 - egi = mod((eli-1),num_inst_glc) + 1 - - x2l_lx => component_get_x2c_cx(lnd(eli)) ! This is actually modifying x2l_lx - call prep_lnd_merge( a2x_lx(eai), r2x_lx(eri), g2x_lx(egi), x2l_lx ) - enddo - call t_drvstopf (trim(timer_mrg)) - - end subroutine prep_lnd_mrg - - !================================================================================================ - - subroutine prep_lnd_merge( a2x_l, r2x_l, g2x_l, x2l_l ) - !--------------------------------------------------------------- - ! Description - ! Create input land state directly from atm, runoff and glc outputs - ! - ! Arguments - type(mct_aVect), intent(in) :: a2x_l - type(mct_aVect), intent(in) :: r2x_l - type(mct_aVect), intent(in) :: g2x_l - type(mct_aVect), intent(inout) :: x2l_l - !----------------------------------------------------------------------- - integer :: nflds,i,i1,o1 - logical :: iamroot - logical, save :: first_time = .true. - character(CL),allocatable :: mrgstr(:) ! temporary string - character(CL) :: field ! string converted to char - type(mct_aVect_sharedindices),save :: a2x_sharedindices - type(mct_aVect_sharedindices),save :: r2x_sharedindices - type(mct_aVect_sharedindices),save :: g2x_sharedindices - character(*), parameter :: subname = '(prep_lnd_merge) ' - - !----------------------------------------------------------------------- - - call seq_comm_getdata(CPLID, iamroot=iamroot) - - if (first_time) then - nflds = mct_aVect_nRattr(x2l_l) - - allocate(mrgstr(nflds)) - do i = 1,nflds - field = mct_aVect_getRList2c(i, x2l_l) - mrgstr(i) = subname//'x2l%'//trim(field)//' =' - enddo - - call mct_aVect_setSharedIndices(a2x_l, x2l_l, a2x_SharedIndices) - call mct_aVect_setSharedIndices(r2x_l, x2l_l, r2x_SharedIndices) - call mct_aVect_setSharedIndices(g2x_l, x2l_l, g2x_SharedIndices) - - !--- document copy operations --- - do i=1,a2x_SharedIndices%shared_real%num_indices - i1=a2x_SharedIndices%shared_real%aVindices1(i) - o1=a2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, a2x_l) - mrgstr(o1) = trim(mrgstr(o1))//' = a2x%'//trim(field) - enddo - do i=1,r2x_SharedIndices%shared_real%num_indices - i1=r2x_SharedIndices%shared_real%aVindices1(i) - o1=r2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, r2x_l) - mrgstr(o1) = trim(mrgstr(o1))//' = r2x%'//trim(field) - enddo - do i=1,g2x_SharedIndices%shared_real%num_indices - i1=g2x_SharedIndices%shared_real%aVindices1(i) - o1=g2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, g2x_l) - mrgstr(o1) = trim(mrgstr(o1))//' = g2x%'//trim(field) - enddo - endif - - call mct_aVect_copy(aVin=a2x_l, aVout=x2l_l, vector=mct_usevector, sharedIndices=a2x_SharedIndices) - call mct_aVect_copy(aVin=r2x_l, aVout=x2l_l, vector=mct_usevector, sharedIndices=r2x_SharedIndices) - call mct_aVect_copy(aVin=g2x_l, aVout=x2l_l, vector=mct_usevector, sharedIndices=g2x_SharedIndices) - - if (first_time) then - if (iamroot) then - write(logunit,'(A)') subname//' Summary:' - do i = 1,nflds - write(logunit,'(A)') trim(mrgstr(i)) - enddo - endif - deallocate(mrgstr) - endif - - first_time = .false. - - end subroutine prep_lnd_merge - - !================================================================================================ - - subroutine prep_lnd_calc_a2x_lx(timer) - !--------------------------------------------------------------- - ! Description - ! Create a2x_lx (note that a2x_lx is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eai - type(mct_aVect), pointer :: a2x_ax - character(*), parameter :: subname = '(prep_lnd_calc_a2x_lx)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eai = 1,num_inst_atm - a2x_ax => component_get_c2x_cx(atm(eai)) - call seq_map_map(mapper_Fa2l, a2x_ax, a2x_lx(eai), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_lnd_calc_a2x_lx - - !================================================================================================ - - subroutine prep_lnd_calc_r2x_lx(timer) - !--------------------------------------------------------------- - ! Description - ! Create r2x_lx (note that r2x_lx is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eri - type(mct_aVect) , pointer :: r2x_rx - character(*), parameter :: subname = '(prep_lnd_calc_r2x_lx)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eri = 1,num_inst_rof - r2x_rx => component_get_c2x_cx(rof(eri)) - - ! Note that one of these fields (a volr field) is remapped from rof -> lnd in - ! map_lnd2rof_irrig_mod, because it is needed as a normalization term. So, if the - ! details of this mapping call are changed in the future, it's possible that the - ! equivalent r2l mapping in map_lnd2rof_irrig_mod should be changed to keep the two - ! equivalent. - call seq_map_map(mapper_Fr2l, r2x_rx, r2x_lx(eri), & - fldlist=seq_flds_r2x_fluxes, norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_lnd_calc_r2x_lx - - !================================================================================================ - - subroutine prep_lnd_calc_g2x_lx(timer) - !--------------------------------------------------------------- - ! Description - ! Create g2x_lx (note that g2x_lx is a local module variable) - ! - ! Arguments - character(len=*) , intent(in) :: timer - ! - ! Local Variables - integer :: egi - type(mct_aVect), pointer :: g2x_gx - character(*), parameter :: subname = '(prep_lnd_calc_g2x_lx)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do egi = 1,num_inst_glc - g2x_gx => component_get_c2x_cx(glc(egi)) - - ! Map fields that are NOT separated by elevation class on the land grid - ! - ! These are mapped using a simple area-conservative remapping. (Note that we use - ! the flux mapper even though these contain states, because we need these icemask - ! fields to be mapped conservatively.) - ! - ! Note that this mapping is redone for Sg_icemask in prep_glc_mod: - ! prep_glc_map_qice_conservative_lnd2glc. If we ever change this mapping (e.g., - ! changing norm to .false.), then we should change the mapping there, too. - ! - ! BUG(wjs, 2017-05-11, #1516) I think we actually want norm = .false. here, but - ! this requires some more thought - call seq_map_map(mapper_Fg2l, g2x_gx, g2x_lx(egi), & - fldlist = glc2lnd_non_ec_fields, norm=.true.) - - ! Map fields that are separated by elevation class on the land grid - call map_glc2lnd_ec( & - g2x_g = g2x_gx, & - frac_field = glc_frac_field, & - topo_field = glc_topo_field, & - icemask_field = glc_icemask_field, & - extra_fields = glc2lnd_ec_extra_fields, & - mapper = mapper_Fg2l, & - g2x_l = g2x_lx(egi)) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_lnd_calc_g2x_lx - - !================================================================================================ - - subroutine prep_lnd_calc_z2x_lx(timer) - !--------------------------------------------------------------- - ! Description - ! Create z2x_lx (note that z2x_lx is a local module variable) - ! - ! Arguments - character(len=*) , intent(in) :: timer - ! - ! Local Variables - integer :: egi - type(mct_aVect), pointer :: z2x_gx - character(*), parameter :: subname = '(prep_lnd_calc_z2x_lx)' - !--------------------------------------------------------------- - - ! Stub - - end subroutine prep_lnd_calc_z2x_lx - - !================================================================================================ - - function prep_lnd_get_a2x_lx() - type(mct_aVect), pointer :: prep_lnd_get_a2x_lx(:) - prep_lnd_get_a2x_lx => a2x_lx(:) - end function prep_lnd_get_a2x_lx - - function prep_lnd_get_r2x_lx() - type(mct_aVect), pointer :: prep_lnd_get_r2x_lx(:) - prep_lnd_get_r2x_lx => r2x_lx(:) - end function prep_lnd_get_r2x_lx - - function prep_lnd_get_g2x_lx() - type(mct_aVect), pointer :: prep_lnd_get_g2x_lx(:) - prep_lnd_get_g2x_lx => g2x_lx(:) - end function prep_lnd_get_g2x_lx - - function prep_lnd_get_z2x_lx() - type(mct_aVect), pointer :: prep_lnd_get_z2x_lx(:) - prep_lnd_get_z2x_lx => z2x_lx(:) - end function prep_lnd_get_z2x_lx - - function prep_lnd_get_mapper_Sa2l() - type(seq_map), pointer :: prep_lnd_get_mapper_Sa2l - prep_lnd_get_mapper_Sa2l => mapper_Sa2l - end function prep_lnd_get_mapper_Sa2l - - function prep_lnd_get_mapper_Fa2l() - type(seq_map), pointer :: prep_lnd_get_mapper_Fa2l - prep_lnd_get_mapper_Fa2l => mapper_Fa2l - end function prep_lnd_get_mapper_Fa2l - - function prep_lnd_get_mapper_Fr2l() - type(seq_map), pointer :: prep_lnd_get_mapper_Fr2l - prep_lnd_get_mapper_Fr2l => mapper_Fr2l - end function prep_lnd_get_mapper_Fr2l - - function prep_lnd_get_mapper_Sg2l() - type(seq_map), pointer :: prep_lnd_get_mapper_Sg2l - prep_lnd_get_mapper_Sg2l => mapper_Sg2l - end function prep_lnd_get_mapper_Sg2l - - function prep_lnd_get_mapper_Fg2l() - type(seq_map), pointer :: prep_lnd_get_mapper_Fg2l - prep_lnd_get_mapper_Fg2l => mapper_Fg2l - end function prep_lnd_get_mapper_Fg2l - -end module prep_lnd_mod diff --git a/src/drivers/mct/main/prep_ocn_mod.F90 b/src/drivers/mct/main/prep_ocn_mod.F90 deleted file mode 100644 index e4f36146ea9..00000000000 --- a/src/drivers/mct/main/prep_ocn_mod.F90 +++ /dev/null @@ -1,1463 +0,0 @@ -module prep_ocn_mod - - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use seq_comm_mct, only: num_inst_atm, num_inst_rof, num_inst_ice - use seq_comm_mct, only: num_inst_glc, num_inst_wav, num_inst_ocn - use seq_comm_mct, only: num_inst_xao, num_inst_frc - use seq_comm_mct, only: num_inst_max - use seq_comm_mct, only: CPLID, OCNID, logunit - use seq_comm_mct, only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getdata - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: ocn, atm, ice, rof, wav, glc - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_ocn_init - public :: prep_ocn_mrg - - public :: prep_ocn_accum - public :: prep_ocn_accum_avg - - public :: prep_ocn_calc_a2x_ox - public :: prep_ocn_calc_i2x_ox - public :: prep_ocn_calc_r2x_ox - public :: prep_ocn_calc_g2x_ox - public :: prep_ocn_shelf_calc_g2x_ox - public :: prep_ocn_calc_w2x_ox - - public :: prep_ocn_get_a2x_ox - public :: prep_ocn_get_r2x_ox - public :: prep_ocn_get_i2x_ox - public :: prep_ocn_get_g2x_ox - public :: prep_ocn_get_w2x_ox - - public :: prep_ocn_get_x2oacc_ox - public :: prep_ocn_get_x2oacc_ox_cnt -#ifdef SUMMITDEV_PGI - ! Sarat: Dummy variable added to workaround PGI compiler bug (PGI 17.9) as of Oct 23, 2017 - public :: dummy_pgibugfix -#endif - - public :: prep_ocn_get_mapper_Sa2o - public :: prep_ocn_get_mapper_Va2o - public :: prep_ocn_get_mapper_Fa2o - public :: prep_ocn_get_mapper_Fr2o - public :: prep_ocn_get_mapper_Rr2o_liq - public :: prep_ocn_get_mapper_Rr2o_ice - public :: prep_ocn_get_mapper_SFi2o - public :: prep_ocn_get_mapper_Rg2o_liq - public :: prep_ocn_get_mapper_Rg2o_ice - public :: prep_ocn_get_mapper_Sg2o - public :: prep_ocn_get_mapper_Fg2o - public :: prep_ocn_get_mapper_Sw2o - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: prep_ocn_merge - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_Sa2o - type(seq_map), pointer :: mapper_Va2o - type(seq_map), pointer :: mapper_Fa2o - type(seq_map), pointer :: mapper_Fr2o - type(seq_map), pointer :: mapper_Rr2o_liq - type(seq_map), pointer :: mapper_Rr2o_ice - type(seq_map), pointer :: mapper_SFi2o - type(seq_map), pointer :: mapper_Rg2o_liq - type(seq_map), pointer :: mapper_Rg2o_ice - type(seq_map), pointer :: mapper_Fg2o - type(seq_map), pointer :: mapper_Sg2o - type(seq_map), pointer :: mapper_Sw2o - - ! attribute vectors - type(mct_aVect), pointer :: a2x_ox(:) ! Atm export, ocn grid, cpl pes - type(mct_aVect), pointer :: r2x_ox(:) ! Rof export, ocn grid, cpl pes - type(mct_aVect), pointer :: i2x_ox(:) ! Ice export, ocn grid, cpl pes - type(mct_aVect), pointer :: g2x_ox(:) ! Glc export, ocn grid, cpl pes - type(mct_aVect), pointer :: w2x_ox(:) ! Wav export, ocn grid, cpl pes - - type(mct_aVect), target :: x2o_ox_inst ! multi instance for averaging - - ! accumulation variables - type(mct_aVect), pointer :: x2oacc_ox(:) ! Ocn import, ocn grid, cpl pes - integer , target :: x2oacc_ox_cnt ! x2oacc_ox: number of time samples accumulated - - ! other module variables - integer :: mpicom_CPLID ! MPI cpl communicator - logical :: flood_present ! .true. => rof is computing flood - character(CS) :: vect_map ! vector mapping type - logical :: x2o_average ! logical for x2o averaging to 1 ocean instance from multi instances -#ifdef SUMMITDEV_PGI - ! Sarat: Dummy variable added to workaround PGI compiler bug (PGI 17.9) as of Oct 23, 2017 - logical :: dummy_pgibugfix -#endif - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_ocn_init(infodata, atm_c2_ocn, atm_c2_ice, ice_c2_ocn, rof_c2_ocn, & - wav_c2_ocn, glc_c2_ocn, glcshelf_c2_ocn) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and all other non-mapping - ! module variables except for accumulators - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in) :: atm_c2_ocn ! .true.=>atm to ocn coupling on - logical , intent(in) :: atm_c2_ice ! .true.=>atm to ice coupling on - logical , intent(in) :: ice_c2_ocn ! .true.=>ice to ocn coupling on - logical , intent(in) :: rof_c2_ocn ! .true.=>rof to ocn coupling on - logical , intent(in) :: wav_c2_ocn ! .true.=>wav to ocn coupling on - logical , intent(in) :: glc_c2_ocn ! .true.=>glc to ocn coupling on - logical , intent(in) :: glcshelf_c2_ocn ! .true.=>glc ice shelf to ocn coupling on - ! - ! Local Variables - logical :: esmf_map_flag ! .true. => use esmf for mapping - logical :: ocn_present ! .true. => ocn is present - logical :: atm_present ! .true. => atm is present - logical :: ice_present ! .true. => ice is present - logical :: iamroot_CPLID ! .true. => CPLID masterproc - logical :: samegrid_ao ! samegrid atm and ocean - logical :: samegrid_og ! samegrid glc and ocean - logical :: samegrid_ow ! samegrid ocean and wave - logical :: samegrid_ro ! samegrid runoff and ocean - integer :: atm_nx, atm_ny - integer :: lsize_o - integer :: egi, eri - integer :: ewi, eai, eii, eoi - character(CL) :: ocn_gnam ! ocn grid - character(CL) :: atm_gnam ! atm grid - character(CL) :: rof_gnam ! rof grid - character(CL) :: wav_gnam ! wav grid - character(CL) :: glc_gnam ! glc grid - type(mct_avect), pointer :: o2x_ox - type(mct_avect), pointer :: x2o_ox - character(*), parameter :: subname = '(prep_ocn_init)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - character(*), parameter :: F01 = "('"//subname//" : ', A, I8 )" - !--------------------------------------------------------------- - - call seq_infodata_getData(infodata , & - ocn_present=ocn_present , & - atm_present=atm_present , & - ice_present=ice_present , & - flood_present=flood_present , & - vect_map=vect_map , & - atm_gnam=atm_gnam , & - ocn_gnam=ocn_gnam , & - rof_gnam=rof_gnam , & - wav_gnam=wav_gnam , & - atm_nx=atm_nx , & - atm_ny=atm_ny , & - glc_gnam=glc_gnam , & - esmf_map_flag=esmf_map_flag ) - - allocate(mapper_Sa2o) - allocate(mapper_Va2o) - allocate(mapper_Fa2o) - allocate(mapper_Fr2o) - allocate(mapper_Rr2o_liq) - allocate(mapper_Rr2o_ice) - allocate(mapper_SFi2o) - allocate(mapper_Rg2o_liq) - allocate(mapper_Rg2o_ice) - allocate(mapper_Sg2o) - allocate(mapper_Fg2o) - allocate(mapper_Sw2o) - - if (ocn_present) then - - call seq_comm_getData(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - o2x_ox => component_get_c2x_cx(ocn(1)) - x2o_ox => component_get_x2c_cx(ocn(1)) - lsize_o = mct_aVect_lsize(o2x_ox) - - ! x2o_average setup logic - if (num_inst_max == num_inst_ocn) then - ! standard multi-instance merge - x2o_average = .false. - elseif (num_inst_max > 1 .and. num_inst_ocn == 1) then - ! averaging ocean merge - x2o_average = .true. - if (iamroot_CPLID) then - write(logunit,F01) 'x2o averaging on over instances =',num_inst_max - end if - call mct_aVect_init(x2o_ox_inst, x2o_ox, lsize_o) - call mct_aVect_zero(x2o_ox_inst) - else - ! not allowed - write(logunit,F00) ' ERROR in x2o_average setup logic ' - call shr_sys_abort(subname//' ERROR in x2o_average setup logic') - endif - - allocate(a2x_ox(num_inst_atm)) - do eai = 1,num_inst_atm - call mct_aVect_init(a2x_ox(eai), rList=seq_flds_a2x_fields, lsize=lsize_o) - call mct_aVect_zero(a2x_ox(eai)) - enddo - allocate(r2x_ox(num_inst_rof)) - do eri = 1,num_inst_rof - call mct_aVect_init(r2x_ox(eri), rList=seq_flds_r2x_fields, lsize=lsize_o) - call mct_aVect_zero(r2x_ox(eri)) - enddo - allocate(g2x_ox(num_inst_glc)) - do egi = 1,num_inst_glc - call mct_aVect_init(g2x_ox(egi), rList=seq_flds_g2x_fields, lsize=lsize_o) - call mct_aVect_zero(g2x_ox(egi)) - end do - allocate(w2x_ox(num_inst_wav)) - do ewi = 1,num_inst_wav - call mct_aVect_init(w2x_ox(ewi), rList=seq_flds_w2x_fields, lsize=lsize_o) - call mct_aVect_zero(w2x_ox(ewi)) - enddo - allocate(i2x_ox(num_inst_ice)) - do eii = 1,num_inst_ice - call mct_aVect_init(i2x_ox(eii), rList=seq_flds_i2x_fields, lsize=lsize_o) - call mct_aVect_zero(i2x_ox(eii)) - enddo - - allocate(x2oacc_ox(num_inst_ocn)) - do eoi = 1,num_inst_ocn - call mct_avect_init(x2oacc_ox(eoi), x2o_ox, lsize_o) - call mct_aVect_zero(x2oacc_ox(eoi)) - end do - x2oacc_ox_cnt = 0 - - samegrid_ao = .true. - samegrid_ro = .true. - samegrid_ow = .true. - samegrid_og = .true. - if (trim(atm_gnam) /= trim(ocn_gnam)) samegrid_ao = .false. - if (trim(rof_gnam) /= trim(ocn_gnam)) samegrid_ro = .false. - if (trim(ocn_gnam) /= trim(wav_gnam)) samegrid_ow = .false. - if (trim(ocn_gnam) /= trim(glc_gnam)) samegrid_og = .false. - - if (atm_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fa2o' - end if - call seq_map_init_rcfile(mapper_Fa2o, atm(1), ocn(1), & - 'seq_maps.rc','atm2ocn_fmapname:','atm2ocn_fmaptype:',samegrid_ao, & - 'mapper_Fa2o initialization',esmf_map_flag) - call shr_sys_flush(logunit) - end if - - ! atm_c2_ice flag is here because ice and ocn are constrained to be on the same - ! grid so the atm->ice mapping is set to the atm->ocn mapping to improve performance - if (atm_c2_ocn .or. atm_c2_ice) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sa2o' - end if - call seq_map_init_rcfile(mapper_Sa2o, atm(1), ocn(1), & - 'seq_maps.rc','atm2ocn_smapname:','atm2ocn_smaptype:',samegrid_ao, & - 'mapper_Sa2o initialization',esmf_map_flag) - - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Va2o' - end if - call seq_map_init_rcfile(mapper_Va2o, atm(1), ocn(1), & - 'seq_maps.rc','atm2ocn_vmapname:','atm2ocn_vmaptype:',samegrid_ao, & - 'mapper_Va2o initialization',esmf_map_flag) - - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Va2o vect with vect_map = ',trim(vect_map) - end if - call seq_map_initvect(mapper_Va2o, vect_map, atm(1), ocn(1), string='mapper_Va2o initvect') - endif - call shr_sys_flush(logunit) - - ! needed for domain checking - if (ice_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_SFi2o' - end if - call seq_map_init_rearrolap(mapper_SFi2o, ice(1), ocn(1), 'mapper_SFi2o') - endif - call shr_sys_flush(logunit) - - if (rof_c2_ocn) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Rr2o_liq' - end if - call seq_map_init_rcfile(mapper_Rr2o_liq, rof(1), ocn(1), & - 'seq_maps.rc', 'rof2ocn_liq_rmapname:', 'rof2ocn_liq_rmaptype:',samegrid_ro, & - 'mapper_Rr2o_liq initialization',esmf_map_flag) - - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Rr2o_ice' - end if - call seq_map_init_rcfile(mapper_Rr2o_ice, rof(1), ocn(1), & - 'seq_maps.rc', 'rof2ocn_ice_rmapname:', 'rof2ocn_ice_rmaptype:',samegrid_ro, & - 'mapper_Rr2o_ice initialization',esmf_map_flag) - - if (flood_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fr2o' - end if - call seq_map_init_rcfile( mapper_Fr2o, rof(1), ocn(1), & - 'seq_maps.rc', 'rof2ocn_fmapname:', 'rof2ocn_fmaptype:',samegrid_ro, & - string='mapper_Fr2o initialization', esmf_map=esmf_map_flag) - endif - endif - call shr_sys_flush(logunit) - - if (glc_c2_ocn) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Rg2o_liq' - end if - call seq_map_init_rcfile(mapper_Rg2o_liq, glc(1), ocn(1), & - 'seq_maps.rc', 'glc2ocn_liq_rmapname:', 'glc2ocn_liq_rmaptype:',samegrid_og, & - 'mapper_Rg2o_liq initialization',esmf_map_flag) - - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Rg2o_ice' - end if - call seq_map_init_rcfile(mapper_Rg2o_ice, glc(1), ocn(1), & - 'seq_maps.rc', 'glc2ocn_ice_rmapname:', 'glc2ocn_ice_rmaptype:',samegrid_og, & - 'mapper_Rg2o_ice initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - if (glcshelf_c2_ocn) then !ice shelf coupled properties - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sg2o' - end if - call seq_map_init_rcfile(mapper_Sg2o, glc(1), ocn(1), & - 'seq_maps.rc', 'glc2ocn_smapname:', 'glc2ocn_smaptype:',samegrid_og, & - 'mapper_Sg2o initialization',esmf_map_flag) - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fg2o' - end if - call seq_map_init_rcfile(mapper_Fg2o, glc(1), ocn(1), & - 'seq_maps.rc', 'glc2ocn_fmapname:', 'glc2ocn_fmaptype:',samegrid_og, & - 'mapper_Fg2o initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - if (wav_c2_ocn) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sw2o' - end if - call seq_map_init_rcfile(mapper_Sw2o, wav(1), ocn(1), & - 'seq_maps.rc', 'wav2ocn_smapname:', 'wav2ocn_smaptype:',samegrid_ow, & - 'mapper_Sw2o initialization') - endif - call shr_sys_flush(logunit) - - end if - - end subroutine prep_ocn_init - - !================================================================================================ - - subroutine prep_ocn_accum(timer) - !--------------------------------------------------------------- - ! Description - ! Accumulate ocn inputs - ! Form partial sum of tavg ocn inputs (virtual "send" to ocn) - ! NOTE: this is done AFTER the call to the merge in prep_ocn_mrg - ! - ! Arguments - character(len=*) , intent(in) :: timer - ! - ! Local Variables - integer :: eoi - type(mct_avect) , pointer :: x2o_ox - character(*) , parameter :: subname = '(prep_ocn_accum)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer), barrier=mpicom_CPLID) - do eoi = 1,num_inst_ocn - x2o_ox => component_get_x2c_cx(ocn(eoi)) - - if (x2oacc_ox_cnt == 0) then - call mct_avect_copy(x2o_ox, x2oacc_ox(eoi)) - else - call mct_avect_accum(x2o_ox, x2oacc_ox(eoi)) - endif - enddo - x2oacc_ox_cnt = x2oacc_ox_cnt + 1 - call t_drvstopf (trim(timer)) - - end subroutine prep_ocn_accum - - !================================================================================================ - - subroutine prep_ocn_accum_avg(timer_accum) - !--------------------------------------------------------------- - ! Description - ! Finish accumulation ocn inputs - ! - ! Arguments - character(len=*), intent(in) :: timer_accum - ! - ! Local Variables - integer :: eoi - type(mct_avect), pointer :: x2o_ox - character(*), parameter :: subname = '(prep_ocn_accum_avg)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer_accum), barrier=mpicom_CPLID) - do eoi = 1,num_inst_ocn - ! temporary formation of average - if (x2oacc_ox_cnt > 1) then - call mct_avect_avg(x2oacc_ox(eoi), x2oacc_ox_cnt) - end if - - ! ***NOTE***THE FOLLOWING ACTUALLY MODIFIES x2o_ox - x2o_ox => component_get_x2c_cx(ocn(eoi)) - call mct_avect_copy(x2oacc_ox(eoi), x2o_ox) - enddo - x2oacc_ox_cnt = 0 - call t_drvstopf (trim(timer_accum)) - - end subroutine prep_ocn_accum_avg - - !================================================================================================ - - subroutine prep_ocn_mrg(infodata, fractions_ox, xao_ox, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Merge all ocn inputs - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - type(mct_aVect) , intent(in) :: fractions_ox(:) - type(mct_aVect) , intent(in) :: xao_ox(:) ! Atm-ocn fluxes, ocn grid, cpl pes - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - integer :: eii, ewi, egi, eoi, eai, eri, exi, efi, emi - real(r8) :: flux_epbalfact ! adjusted precip factor - type(mct_avect), pointer :: x2o_ox - integer :: cnt - character(*), parameter :: subname = '(prep_ocn_mrg)' - !--------------------------------------------------------------- - - call seq_infodata_GetData(infodata, & - flux_epbalfact=flux_epbalfact) - - call t_drvstartf (trim(timer_mrg), barrier=mpicom_CPLID) - - ! Use emi here for instance averaging capability, num_inst_max = num_inst_ocn normally - ! if NOT x2o_average, just fill each instance of component_get_x2c_cx(ocn(eoi)) - ! if x2o_average, then computer merge into x2o_ox_inst and accumulate that to - ! component_get_x2c_cx(ocn(1)) and then average it at the end - - if (x2o_average) then - x2o_ox => component_get_x2c_cx(ocn(1)) - call mct_aVect_zero(x2o_ox) - endif - - cnt = 0 - do emi = 1,num_inst_max - ! Use fortran mod to address ensembles in merge - eoi = mod((emi-1),num_inst_ocn) + 1 - eai = mod((emi-1),num_inst_atm) + 1 - eii = mod((emi-1),num_inst_ice) + 1 - eri = mod((emi-1),num_inst_rof) + 1 - ewi = mod((emi-1),num_inst_wav) + 1 - egi = mod((emi-1),num_inst_glc) + 1 - exi = mod((emi-1),num_inst_xao) + 1 - efi = mod((emi-1),num_inst_frc) + 1 - - if (x2o_average) then - x2o_ox => x2o_ox_inst - else - x2o_ox => component_get_x2c_cx(ocn(eoi)) - endif - - call prep_ocn_merge( flux_epbalfact, a2x_ox(eai), i2x_ox(eii), r2x_ox(eri), & - w2x_ox(ewi), g2x_ox(egi), xao_ox(exi), fractions_ox(efi), x2o_ox ) - - if (x2o_average) then - x2o_ox => component_get_x2c_cx(ocn(1)) - call mct_aVect_accum(x2o_ox_inst, x2o_ox) - cnt = cnt + 1 - endif - enddo - - if (x2o_average) then - x2o_ox => component_get_x2c_cx(ocn(1)) - call mct_avect_avg(x2o_ox,cnt) - endif - - call t_drvstopf (trim(timer_mrg)) - - end subroutine prep_ocn_mrg - - !================================================================================================ - - subroutine prep_ocn_merge( flux_epbalfact, a2x_o, i2x_o, r2x_o, w2x_o, g2x_o, xao_o, & - fractions_o, x2o_o ) - - use prep_glc_mod, only: prep_glc_calculate_subshelf_boundary_fluxes - - !----------------------------------------------------------------------- - ! - ! Arguments - real(r8) , intent(in) :: flux_epbalfact - type(mct_aVect), intent(in) :: a2x_o - type(mct_aVect), intent(in) :: i2x_o - type(mct_aVect), intent(in) :: r2x_o - type(mct_aVect), intent(in) :: w2x_o - type(mct_aVect), intent(in) :: g2x_o - type(mct_aVect), intent(in) :: xao_o - type(mct_aVect), intent(in) :: fractions_o - type(mct_aVect), intent(inout) :: x2o_o - ! - ! Local variables - integer :: n,ka,ki,ko,kr,kw,kx,kir,kor,i,i1,o1 - integer :: kof,kif - integer :: lsize - integer :: noflds,naflds,niflds,nrflds,nwflds,nxflds,ngflds - real(r8) :: ifrac,ifracr - real(r8) :: afrac,afracr - real(r8) :: frac_sum - real(r8) :: avsdr, anidr, avsdf, anidf ! albedos - real(r8) :: fswabsv, fswabsi ! sw - character(CL),allocatable :: field_ocn(:) ! string converted to char - character(CL),allocatable :: field_atm(:) ! string converted to char - character(CL),allocatable :: field_ice(:) ! string converted to char - character(CL),allocatable :: field_rof(:) ! string converted to char - character(CL),allocatable :: field_wav(:) ! string converted to char - character(CL),allocatable :: field_xao(:) ! string converted to char - character(CL),allocatable :: field_glc(:) ! string converted to char - character(CL),allocatable :: itemc_ocn(:) ! string converted to char - character(CL),allocatable :: itemc_atm(:) ! string converted to char - character(CL),allocatable :: itemc_ice(:) ! string converted to char - character(CL),allocatable :: itemc_rof(:) ! string converted to char - character(CL),allocatable :: itemc_wav(:) ! string converted to char - character(CL),allocatable :: itemc_xao(:) ! string converted to char - character(CL),allocatable :: itemc_g2x(:) ! string converted to char - integer, save :: index_a2x_Faxa_swvdr - integer, save :: index_a2x_Faxa_swvdf - integer, save :: index_a2x_Faxa_swndr - integer, save :: index_a2x_Faxa_swndf - integer, save :: index_i2x_Fioi_swpen - integer, save :: index_xao_So_avsdr - integer, save :: index_xao_So_anidr - integer, save :: index_xao_So_avsdf - integer, save :: index_xao_So_anidf - integer, save :: index_a2x_Faxa_snowc - integer, save :: index_a2x_Faxa_snowl - integer, save :: index_a2x_Faxa_rainc - integer, save :: index_a2x_Faxa_rainl - integer, save :: index_r2x_Forr_rofl - integer, save :: index_r2x_Forr_rofi - integer, save :: index_r2x_Forr_rofl_16O - integer, save :: index_r2x_Forr_rofi_16O - integer, save :: index_r2x_Forr_rofl_18O - integer, save :: index_r2x_Forr_rofi_18O - integer, save :: index_r2x_Forr_rofl_HDO - integer, save :: index_r2x_Forr_rofi_HDO - integer, save :: index_r2x_Flrr_flood - integer, save :: index_g2x_Fogg_rofl - integer, save :: index_g2x_Fogg_rofi - integer, save :: index_x2o_Foxx_swnet - integer, save :: index_x2o_Faxa_snow - integer, save :: index_x2o_Faxa_rain - integer, save :: index_x2o_Faxa_prec - integer, save :: index_x2o_Foxx_rofl - integer, save :: index_x2o_Foxx_rofi - integer, save :: index_x2o_Sf_afrac - integer, save :: index_x2o_Sf_afracr - integer, save :: index_x2o_Foxx_swnet_afracr - integer, save :: index_x2o_Foxx_rofl_16O - integer, save :: index_x2o_Foxx_rofi_16O - integer, save :: index_x2o_Foxx_rofl_18O - integer, save :: index_x2o_Foxx_rofi_18O - integer, save :: index_x2o_Foxx_rofl_HDO - integer, save :: index_x2o_Foxx_rofi_HDO - integer, save :: index_a2x_Faxa_snowc_16O - integer, save :: index_a2x_Faxa_snowl_16O - integer, save :: index_a2x_Faxa_rainc_16O - integer, save :: index_a2x_Faxa_rainl_16O - integer, save :: index_x2o_Faxa_rain_16O - integer, save :: index_x2o_Faxa_snow_16O - integer, save :: index_x2o_Faxa_prec_16O - integer, save :: index_a2x_Faxa_snowc_18O - integer, save :: index_a2x_Faxa_snowl_18O - integer, save :: index_a2x_Faxa_rainc_18O - integer, save :: index_a2x_Faxa_rainl_18O - integer, save :: index_x2o_Faxa_rain_18O - integer, save :: index_x2o_Faxa_snow_18O - integer, save :: index_x2o_Faxa_prec_18O - integer, save :: index_a2x_Faxa_snowc_HDO - integer, save :: index_a2x_Faxa_snowl_HDO - integer, save :: index_a2x_Faxa_rainc_HDO - integer, save :: index_a2x_Faxa_rainl_HDO - integer, save :: index_x2o_Faxa_rain_HDO - integer, save :: index_x2o_Faxa_snow_HDO - integer, save :: index_x2o_Faxa_prec_HDO - logical :: iamroot - logical, save, pointer :: amerge(:),imerge(:),xmerge(:) - integer, save, pointer :: aindx(:), iindx(:), xindx(:) - character(CL),allocatable :: mrgstr(:) ! temporary string - type(mct_aVect_sharedindices),save :: a2x_sharedindices - type(mct_aVect_sharedindices),save :: i2x_sharedindices - type(mct_aVect_sharedindices),save :: r2x_sharedindices - type(mct_aVect_sharedindices),save :: w2x_sharedindices - type(mct_aVect_sharedindices),save :: xao_sharedindices - type(mct_aVect_sharedindices),save :: g2x_sharedindices - logical, save :: first_time = .true. - character(*),parameter :: subName = '(prep_ocn_merge) ' - !----------------------------------------------------------------------- - - call seq_comm_setptrs(CPLID, iamroot=iamroot) - - noflds = mct_aVect_nRattr(x2o_o) - naflds = mct_aVect_nRattr(a2x_o) - niflds = mct_aVect_nRattr(i2x_o) - nrflds = mct_aVect_nRattr(r2x_o) - nwflds = mct_aVect_nRattr(w2x_o) - nxflds = mct_aVect_nRattr(xao_o) - ngflds = mct_aVect_nRattr(g2x_o) - - if (first_time) then - index_a2x_Faxa_swvdr = mct_aVect_indexRA(a2x_o,'Faxa_swvdr') - index_a2x_Faxa_swvdf = mct_aVect_indexRA(a2x_o,'Faxa_swvdf') - index_a2x_Faxa_swndr = mct_aVect_indexRA(a2x_o,'Faxa_swndr') - index_a2x_Faxa_swndf = mct_aVect_indexRA(a2x_o,'Faxa_swndf') - index_i2x_Fioi_swpen = mct_aVect_indexRA(i2x_o,'Fioi_swpen') - index_xao_So_avsdr = mct_aVect_indexRA(xao_o,'So_avsdr') - index_xao_So_anidr = mct_aVect_indexRA(xao_o,'So_anidr') - index_xao_So_avsdf = mct_aVect_indexRA(xao_o,'So_avsdf') - index_xao_So_anidf = mct_aVect_indexRA(xao_o,'So_anidf') - index_x2o_Foxx_swnet = mct_aVect_indexRA(x2o_o,'Foxx_swnet') - - index_a2x_Faxa_snowc = mct_aVect_indexRA(a2x_o,'Faxa_snowc') - index_a2x_Faxa_snowl = mct_aVect_indexRA(a2x_o,'Faxa_snowl') - index_a2x_Faxa_rainc = mct_aVect_indexRA(a2x_o,'Faxa_rainc') - index_a2x_Faxa_rainl = mct_aVect_indexRA(a2x_o,'Faxa_rainl') - index_r2x_Forr_rofl = mct_aVect_indexRA(r2x_o,'Forr_rofl') - index_r2x_Forr_rofi = mct_aVect_indexRA(r2x_o,'Forr_rofi') - index_r2x_Flrr_flood = mct_aVect_indexRA(r2x_o,'Flrr_flood') - index_g2x_Fogg_rofl = mct_aVect_indexRA(g2x_o,'Fogg_rofl') - index_g2x_Fogg_rofi = mct_aVect_indexRA(g2x_o,'Fogg_rofi') - index_x2o_Faxa_snow = mct_aVect_indexRA(x2o_o,'Faxa_snow') - index_x2o_Faxa_rain = mct_aVect_indexRA(x2o_o,'Faxa_rain') - index_x2o_Faxa_prec = mct_aVect_indexRA(x2o_o,'Faxa_prec') - index_x2o_Foxx_rofl = mct_aVect_indexRA(x2o_o,'Foxx_rofl') - index_x2o_Foxx_rofi = mct_aVect_indexRA(x2o_o,'Foxx_rofi') - - if (seq_flds_i2o_per_cat) then - index_x2o_Sf_afrac = mct_aVect_indexRA(x2o_o,'Sf_afrac') - index_x2o_Sf_afracr = mct_aVect_indexRA(x2o_o,'Sf_afracr') - index_x2o_Foxx_swnet_afracr = mct_aVect_indexRA(x2o_o,'Foxx_swnet_afracr') - endif - - !wiso: - ! H2_16O - index_a2x_Faxa_snowc_16O = mct_aVect_indexRA(a2x_o,'Faxa_snowc_16O', perrWith='quiet') - index_a2x_Faxa_snowl_16O = mct_aVect_indexRA(a2x_o,'Faxa_snowl_16O', perrWith='quiet') - index_a2x_Faxa_rainc_16O = mct_aVect_indexRA(a2x_o,'Faxa_rainc_16O', perrWith='quiet') - index_a2x_Faxa_rainl_16O = mct_aVect_indexRA(a2x_o,'Faxa_rainl_16O', perrWith='quiet') - index_r2x_Forr_rofl_16O = mct_aVect_indexRA(r2x_o,'Forr_rofl_16O' , perrWith='quiet') - index_r2x_Forr_rofi_16O = mct_aVect_indexRA(r2x_o,'Forr_rofi_16O' , perrWith='quiet') - index_x2o_Faxa_rain_16O = mct_aVect_indexRA(x2o_o,'Faxa_rain_16O' , perrWith='quiet') - index_x2o_Faxa_snow_16O = mct_aVect_indexRA(x2o_o,'Faxa_snow_16O' , perrWith='quiet') - index_x2o_Faxa_prec_16O = mct_aVect_indexRA(x2o_o,'Faxa_prec_16O' , perrWith='quiet') - index_x2o_Foxx_rofl_16O = mct_aVect_indexRA(x2o_o,'Foxx_rofl_16O' , perrWith='quiet') - index_x2o_Foxx_rofi_16O = mct_aVect_indexRA(x2o_o,'Foxx_rofi_16O' , perrWith='quiet') - ! H2_18O - index_a2x_Faxa_snowc_18O = mct_aVect_indexRA(a2x_o,'Faxa_snowc_18O', perrWith='quiet') - index_a2x_Faxa_snowl_18O = mct_aVect_indexRA(a2x_o,'Faxa_snowl_18O', perrWith='quiet') - index_a2x_Faxa_rainc_18O = mct_aVect_indexRA(a2x_o,'Faxa_rainc_18O', perrWith='quiet') - index_a2x_Faxa_rainl_18O = mct_aVect_indexRA(a2x_o,'Faxa_rainl_18O', perrWith='quiet') - index_r2x_Forr_rofl_18O = mct_aVect_indexRA(r2x_o,'Forr_rofl_18O' , perrWith='quiet') - index_r2x_Forr_rofi_18O = mct_aVect_indexRA(r2x_o,'Forr_rofi_18O' , perrWith='quiet') - index_x2o_Faxa_rain_18O = mct_aVect_indexRA(x2o_o,'Faxa_rain_18O' , perrWith='quiet') - index_x2o_Faxa_snow_18O = mct_aVect_indexRA(x2o_o,'Faxa_snow_18O' , perrWith='quiet') - index_x2o_Faxa_prec_18O = mct_aVect_indexRA(x2o_o,'Faxa_prec_18O' , perrWith='quiet') - index_x2o_Foxx_rofl_18O = mct_aVect_indexRA(x2o_o,'Foxx_rofl_18O' , perrWith='quiet') - index_x2o_Foxx_rofi_18O = mct_aVect_indexRA(x2o_o,'Foxx_rofi_18O' , perrWith='quiet') - ! HDO - index_a2x_Faxa_snowc_HDO = mct_aVect_indexRA(a2x_o,'Faxa_snowc_HDO', perrWith='quiet') - index_a2x_Faxa_snowl_HDO = mct_aVect_indexRA(a2x_o,'Faxa_snowl_HDO', perrWith='quiet') - index_a2x_Faxa_rainc_HDO = mct_aVect_indexRA(a2x_o,'Faxa_rainc_HDO', perrWith='quiet') - index_a2x_Faxa_rainl_HDO = mct_aVect_indexRA(a2x_o,'Faxa_rainl_HDO', perrWith='quiet') - index_r2x_Forr_rofl_HDO = mct_aVect_indexRA(r2x_o,'Forr_rofl_HDO' , perrWith='quiet') - index_r2x_Forr_rofi_HDO = mct_aVect_indexRA(r2x_o,'Forr_rofi_HDO' , perrWith='quiet') - index_x2o_Faxa_rain_HDO = mct_aVect_indexRA(x2o_o,'Faxa_rain_HDO' , perrWith='quiet') - index_x2o_Faxa_snow_HDO = mct_aVect_indexRA(x2o_o,'Faxa_snow_HDO' , perrWith='quiet') - index_x2o_Faxa_prec_HDO = mct_aVect_indexRA(x2o_o,'Faxa_prec_HDO' , perrWith='quiet') - index_x2o_Foxx_rofl_HDO = mct_aVect_indexRA(x2o_o,'Foxx_rofl_HDO' , perrWith='quiet') - index_x2o_Foxx_rofi_HDO = mct_aVect_indexRA(x2o_o,'Foxx_rofi_HDO' , perrWith='quiet') - - ! Compute all other quantities based on standardized naming convention (see below) - ! Only ocn field states that have the name-prefix Sx_ will be merged - ! Only field names have the same name-suffix (after the "_") will be merged - ! (e.g. Si_fldname, Sa_fldname => merged to => Sx_fldname) - ! All fluxes will be scaled by the corresponding afrac or ifrac - ! EXCEPT for - ! -- Faxa_snnet, Faxa_snow, Faxa_rain, Faxa_prec (derived) - ! All i2x_o fluxes that have the name-suffix "Faii" (atm/ice fluxes) will be ignored - ! - only ice fluxes that are Fioi_... will be used in the ocean merges - - allocate(aindx(noflds), amerge(noflds)) - allocate(iindx(noflds), imerge(noflds)) - allocate(xindx(noflds), xmerge(noflds)) - allocate(field_atm(naflds), itemc_atm(naflds)) - allocate(field_ice(niflds), itemc_ice(niflds)) - allocate(field_ocn(noflds), itemc_ocn(noflds)) - allocate(field_rof(nrflds), itemc_rof(nrflds)) - allocate(field_wav(nwflds), itemc_wav(nwflds)) - allocate(field_xao(nxflds), itemc_xao(nxflds)) - allocate(field_glc(ngflds), itemc_g2x(ngflds)) - allocate(mrgstr(noflds)) - aindx(:) = 0 - iindx(:) = 0 - xindx(:) = 0 - amerge(:) = .true. - imerge(:) = .true. - xmerge(:) = .true. - - do ko = 1,noflds - field_ocn(ko) = mct_aVect_getRList2c(ko, x2o_o) - itemc_ocn(ko) = trim(field_ocn(ko)(scan(field_ocn(ko),'_'):)) - enddo - do ka = 1,naflds - field_atm(ka) = mct_aVect_getRList2c(ka, a2x_o) - itemc_atm(ka) = trim(field_atm(ka)(scan(field_atm(ka),'_'):)) - enddo - do ki = 1,niflds - field_ice(ki) = mct_aVect_getRList2c(ki, i2x_o) - itemc_ice(ki) = trim(field_ice(ki)(scan(field_ice(ki),'_'):)) - enddo - do kr = 1,nrflds - field_rof(kr) = mct_aVect_getRList2c(kr, r2x_o) - itemc_rof(kr) = trim(field_rof(kr)(scan(field_rof(kr),'_'):)) - enddo - do kw = 1,nwflds - field_wav(kw) = mct_aVect_getRList2c(kw, w2x_o) - itemc_wav(kw) = trim(field_wav(kw)(scan(field_wav(kw),'_'):)) - enddo - do kx = 1,nxflds - field_xao(kx) = mct_aVect_getRList2c(kx, xao_o) - itemc_xao(kx) = trim(field_xao(kx)(scan(field_xao(kx),'_'):)) - enddo - do kx = 1,ngflds - field_glc(kx) = mct_aVect_getRList2c(kx, g2x_o) - itemc_g2x(kx) = trim(field_glc(kx)(scan(field_glc(kx),'_'):)) - enddo - - call mct_aVect_setSharedIndices(a2x_o, x2o_o, a2x_SharedIndices) - call mct_aVect_setSharedIndices(i2x_o, x2o_o, i2x_SharedIndices) - call mct_aVect_setSharedIndices(r2x_o, x2o_o, r2x_SharedIndices) - call mct_aVect_setSharedIndices(w2x_o, x2o_o, w2x_SharedIndices) - call mct_aVect_setSharedIndices(xao_o, x2o_o, xao_SharedIndices) - call mct_aVect_setSharedIndices(g2x_o, x2o_o, g2x_SharedIndices) - - do ko = 1,noflds - !--- document merge --- - mrgstr(ko) = subname//'x2o%'//trim(field_ocn(ko))//' =' - if (field_ocn(ko)(1:2) == 'PF') then - cycle ! if flux has first character as P, pass straight through - end if - if (field_ocn(ko)(1:1) == 'S' .and. field_ocn(ko)(2:2) /= 'x') then - cycle ! ignore all ocn states that do not have a Sx_ prefix - end if - if (trim(field_ocn(ko)) == 'Foxx_swnet' .or. & - trim(field_ocn(ko)) == 'Faxa_snow' .or. & - trim(field_ocn(ko)) == 'Faxa_rain' .or. & - trim(field_ocn(ko)) == 'Faxa_prec' )then - cycle ! ignore swnet, snow, rain, prec - treated explicitly above - end if - if (index(field_ocn(ko), 'Faxa_snow_' ) == 1 .or. & - index(field_ocn(ko), 'Faxa_rain_' ) == 1 .or. & - index(field_ocn(ko), 'Faxa_prec_' ) == 1 )then - cycle ! ignore isotope snow, rain, prec - treated explicitly above - end if - ! if (trim(field_ocn(ko)(1:5)) == 'Foxx_') then - ! cycle ! ignore runoff fields from land - treated in coupler - ! end if - - do ka = 1,naflds - if (trim(itemc_ocn(ko)) == trim(itemc_atm(ka))) then - if ((trim(field_ocn(ko)) == trim(field_atm(ka)))) then - if (field_atm(ka)(1:1) == 'F') amerge(ko) = .false. - end if - ! --- make sure only one field matches --- - if (aindx(ko) /= 0) then - write(logunit,*) subname,' ERROR: found multiple ka field matches for ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR multiple ka field matches') - endif - aindx(ko) = ka - end if - end do - do ki = 1,niflds - if (field_ice(ki)(1:1) == 'F' .and. field_ice(ki)(2:4) == 'aii') then - cycle ! ignore all i2x_o fluxes that are ice/atm fluxes - end if - if (trim(itemc_ocn(ko)) == trim(itemc_ice(ki))) then - if ((trim(field_ocn(ko)) == trim(field_ice(ki)))) then - if (field_ice(ki)(1:1) == 'F') imerge(ko) = .false. - end if - ! --- make sure only one field matches --- - if (iindx(ko) /= 0) then - write(logunit,*) subname,' ERROR: found multiple ki field matches for ',trim(itemc_ice(ki)) - call shr_sys_abort(subname//' ERROR multiple ki field matches') - endif - iindx(ko) = ki - end if - end do - do kx = 1,nxflds - if (trim(itemc_ocn(ko)) == trim(itemc_xao(kx))) then - if ((trim(field_ocn(ko)) == trim(field_xao(kx)))) then - if (field_xao(kx)(1:1) == 'F') xmerge(ko) = .false. - end if - ! --- make sure only one field matches --- - if (xindx(ko) /= 0) then - write(logunit,*) subname,' ERROR: found multiple kx field matches for ',trim(itemc_xao(kx)) - call shr_sys_abort(subname//' ERROR multiple kx field matches') - endif - xindx(ko) = kx - end if - end do - - ! --- add some checks --- - - ! --- make sure no merge of BOTH atm and xao --- - if (aindx(ko) > 0 .and. xindx(ko) > 0) then - write(logunit,*) subname,' ERROR: aindx and xindx both non-zero, not allowed' - call shr_sys_abort(subname//' ERROR aindx and xindx both non-zero') - endif - - ! --- make sure all terms agree on merge or non-merge aspect --- - if (aindx(ko) > 0 .and. iindx(ko) > 0 .and. (amerge(ko) .neqv. imerge(ko))) then - write(logunit,*) subname,' ERROR: aindx and iindx merge logic error' - call shr_sys_abort(subname//' ERROR aindx and iindx merge logic error') - endif - if (aindx(ko) > 0 .and. xindx(ko) > 0 .and. (amerge(ko) .neqv. xmerge(ko))) then - write(logunit,*) subname,' ERROR: aindx and xindx merge logic error' - call shr_sys_abort(subname//' ERROR aindx and xindx merge logic error') - endif - if (xindx(ko) > 0 .and. iindx(ko) > 0 .and. (xmerge(ko) .neqv. imerge(ko))) then - write(logunit,*) subname,' ERROR: xindx and iindx merge logic error' - call shr_sys_abort(subname//' ERROR xindx and iindx merge logic error') - endif - - end do - - end if - - call mct_aVect_zero(x2o_o) - - !--- document copy operations --- - if (first_time) then - !--- document merge --- - do i=1,a2x_SharedIndices%shared_real%num_indices - i1=a2x_SharedIndices%shared_real%aVindices1(i) - o1=a2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = a2x%'//trim(field_atm(i1)) - enddo - do i=1,i2x_SharedIndices%shared_real%num_indices - i1=i2x_SharedIndices%shared_real%aVindices1(i) - o1=i2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = i2x%'//trim(field_ice(i1)) - enddo - do i=1,r2x_SharedIndices%shared_real%num_indices - i1=r2x_SharedIndices%shared_real%aVindices1(i) - o1=r2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = r2x%'//trim(field_rof(i1)) - enddo - do i=1,w2x_SharedIndices%shared_real%num_indices - i1=w2x_SharedIndices%shared_real%aVindices1(i) - o1=w2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = w2x%'//trim(field_wav(i1)) - enddo - do i=1,xao_SharedIndices%shared_real%num_indices - i1=xao_SharedIndices%shared_real%aVindices1(i) - o1=xao_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = xao%'//trim(field_xao(i1)) - enddo - do i=1,g2x_SharedIndices%shared_real%num_indices - i1=g2x_SharedIndices%shared_real%aVindices1(i) - o1=g2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = g2x%'//trim(field_glc(i1)) - enddo - endif - - ! call mct_aVect_copy(aVin=a2x_o, aVout=x2o_o, vector=mct_usevector) - ! call mct_aVect_copy(aVin=i2x_o, aVout=x2o_o, vector=mct_usevector) - ! call mct_aVect_copy(aVin=r2x_o, aVout=x2o_o, vector=mct_usevector) - ! call mct_aVect_copy(aVin=w2x_o, aVout=x2o_o, vector=mct_usevector) - ! call mct_aVect_copy(aVin=xao_o, aVout=x2o_o, vector=mct_usevector) - call mct_aVect_copy(aVin=a2x_o, aVout=x2o_o, vector=mct_usevector, sharedIndices=a2x_SharedIndices) - call mct_aVect_copy(aVin=i2x_o, aVout=x2o_o, vector=mct_usevector, sharedIndices=i2x_SharedIndices) - call mct_aVect_copy(aVin=r2x_o, aVout=x2o_o, vector=mct_usevector, sharedIndices=r2x_SharedIndices) - call mct_aVect_copy(aVin=w2x_o, aVout=x2o_o, vector=mct_usevector, sharedIndices=w2x_SharedIndices) - call mct_aVect_copy(aVin=xao_o, aVout=x2o_o, vector=mct_usevector, sharedIndices=xao_SharedIndices) - call mct_aVect_copy(aVin=g2x_o, aVout=x2o_o, vector=mct_usevector, sharedIndices=g2x_SharedIndices) - - !--- document manual merges --- - if (first_time) then - mrgstr(index_x2o_Foxx_swnet) = trim(mrgstr(index_x2o_Foxx_swnet))//' = '// & - 'afracr*(a2x%Faxa_swvdr*(1.0-xao%So_avsdr) + '// & - 'a2x%Faxa_swvdf*(1.0-xao%So_avsdf) + '// & - 'a2x%Faxa_swndr*(1.0-xao%So_anidr) + '// & - 'a2x%Faxa_swndf*(1.0-xao%So_anidf)) + '// & - 'ifrac*i2x%Fioi_swpen' - if (seq_flds_i2o_per_cat) then - mrgstr(index_x2o_Foxx_swnet_afracr) = trim(mrgstr(index_x2o_Foxx_swnet_afracr))//' = '// & - 'afracr*(a2x%Faxa_swvdr*(1.0-xao%So_avsdr) + '// & - 'a2x%Faxa_swvdf*(1.0-xao%So_avsdf) + '// & - 'a2x%Faxa_swndr*(1.0-xao%So_anidr) + '// & - 'a2x%Faxa_swndf*(1.0-xao%So_anidf))' - end if - mrgstr(index_x2o_Faxa_snow) = trim(mrgstr(index_x2o_Faxa_snow))//' = '// & - 'afrac*(a2x%Faxa_snowc + a2x%Faxa_snowl)*flux_epbalfact' - mrgstr(index_x2o_Faxa_rain) = trim(mrgstr(index_x2o_Faxa_rain))//' = '// & - 'afrac*(a2x%Faxa_rainc + a2x%Faxa_rainl)*flux_epbalfact' - mrgstr(index_x2o_Faxa_prec) = trim(mrgstr(index_x2o_Faxa_prec))//' = '// & - 'afrac*(a2x%Faxa_snowc + a2x%Faxa_snowl + a2x%Faxa_rainc + a2x%Faxa_rainl)*flux_epbalfact' - mrgstr(index_x2o_Foxx_rofl) = trim(mrgstr(index_x2o_Foxx_rofl))//' = '// & - '(r2x%Forr_rofl + r2x%Flrr_flood + g2x%Fogg_rofl)*flux_epbalfact' - mrgstr(index_x2o_Foxx_rofi) = trim(mrgstr(index_x2o_Foxx_rofi))//' = '// & - '(r2x%Forr_rofi + g2x%Fogg_rofi)*flux_epbalfact' - ! water isotope snow, rain prec - if ( index_x2o_Faxa_snow_16O /= 0 )then - mrgstr(index_x2o_Faxa_snow_16O) = trim(mrgstr(index_x2o_Faxa_snow_16O))//' = '// & - 'afrac*(a2x%Faxa_snowc_16O + a2x%Faxa_snowl_16O)*flux_epbalfact' - mrgstr(index_x2o_Faxa_rain_16O) = trim(mrgstr(index_x2o_Faxa_rain_16O))//' = '// & - 'afrac*(a2x%Faxa_rainc_16O + a2x%Faxa_rainl_16O)*flux_epbalfact' - mrgstr(index_x2o_Faxa_prec_16O) = trim(mrgstr(index_x2o_Faxa_prec_16O))//' = '// & - 'afrac*(a2x%Faxa_snowc_16O + a2x%Faxa_snowl_16O + a2x%Faxa_rainc_16O + '// & - 'a2x%Faxa_rainl_16O)*flux_epbalfact' - end if - if ( index_x2o_Faxa_snow_18O /= 0 )then - mrgstr(index_x2o_Faxa_snow_18O) = trim(mrgstr(index_x2o_Faxa_snow_18O))//' = '// & - 'afrac*(a2x%Faxa_snowc_18O + a2x%Faxa_snowl_18O)*flux_epbalfact' - mrgstr(index_x2o_Faxa_rain_18O) = trim(mrgstr(index_x2o_Faxa_rain_18O))//' = '// & - 'afrac*(a2x%Faxa_rainc_18O + a2x%Faxa_rainl_18O)*flux_epbalfact' - mrgstr(index_x2o_Faxa_prec_18O) = trim(mrgstr(index_x2o_Faxa_prec_18O))//' = '// & - 'afrac*(a2x%Faxa_snowc_18O + a2x%Faxa_snowl_18O + a2x%Faxa_rainc_18O + '// & - 'a2x%Faxa_rainl_18O)*flux_epbalfact' - end if - if ( index_x2o_Faxa_snow_HDO /= 0 )then - mrgstr(index_x2o_Faxa_snow_HDO) = trim(mrgstr(index_x2o_Faxa_snow_HDO))//' = '// & - 'afrac*(a2x%Faxa_snowc_HDO + a2x%Faxa_snowl_HDO)*flux_epbalfact' - mrgstr(index_x2o_Faxa_rain_HDO) = trim(mrgstr(index_x2o_Faxa_rain_HDO))//' = '// & - 'afrac*(a2x%Faxa_rainc_HDO + a2x%Faxa_rainl_HDO)*flux_epbalfact' - mrgstr(index_x2o_Faxa_prec_HDO) = trim(mrgstr(index_x2o_Faxa_prec_HDO))//' = '// & - 'afrac*(a2x%Faxa_snowc_HDO + a2x%Faxa_snowl_HDO + a2x%Faxa_rainc_HDO + '// & - 'a2x%Faxa_rainl_HDO)*flux_epbalfact' - end if - endif - - ! Compute input ocn state (note that this only applies to non-land portion of gridcell) - - kif = mct_aVect_indexRa(fractions_o,"ifrac",perrWith=subName) - kof = mct_aVect_indexRa(fractions_o,"ofrac",perrWith=subName) - kir = mct_aVect_indexRa(fractions_o,"ifrad",perrWith=subName) - kor = mct_aVect_indexRa(fractions_o,"ofrad",perrWith=subName) - lsize = mct_aVect_lsize(x2o_o) - do n = 1,lsize - - ifrac = fractions_o%rAttr(kif,n) - afrac = fractions_o%rAttr(kof,n) - frac_sum = ifrac + afrac - if ((frac_sum) /= 0._r8) then - ifrac = ifrac / (frac_sum) - afrac = afrac / (frac_sum) - endif - - ifracr = fractions_o%rAttr(kir,n) - afracr = fractions_o%rAttr(kor,n) - frac_sum = ifracr + afracr - if ((frac_sum) /= 0._r8) then - ifracr = ifracr / (frac_sum) - afracr = afracr / (frac_sum) - endif - - ! Derived: compute net short-wave - avsdr = xao_o%rAttr(index_xao_So_avsdr,n) - anidr = xao_o%rAttr(index_xao_So_anidr,n) - avsdf = xao_o%rAttr(index_xao_So_avsdf,n) - anidf = xao_o%rAttr(index_xao_So_anidf,n) - fswabsv = a2x_o%rAttr(index_a2x_Faxa_swvdr,n) * (1.0_R8 - avsdr) & - + a2x_o%rAttr(index_a2x_Faxa_swvdf,n) * (1.0_R8 - avsdf) - fswabsi = a2x_o%rAttr(index_a2x_Faxa_swndr,n) * (1.0_R8 - anidr) & - + a2x_o%rAttr(index_a2x_Faxa_swndf,n) * (1.0_R8 - anidf) - x2o_o%rAttr(index_x2o_Foxx_swnet,n) = (fswabsv + fswabsi) * afracr + & - i2x_o%rAttr(index_i2x_Fioi_swpen,n) * ifrac - - if (seq_flds_i2o_per_cat) then - x2o_o%rAttr(index_x2o_Sf_afrac,n) = afrac - x2o_o%rAttr(index_x2o_Sf_afracr,n) = afracr - x2o_o%rAttr(index_x2o_Foxx_swnet_afracr,n) = (fswabsv + fswabsi) * afracr - end if - - ! Derived: compute total precipitation - scale total precip and runoff - - x2o_o%rAttr(index_x2o_Faxa_snow ,n) = a2x_o%rAttr(index_a2x_Faxa_snowc,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_snowl,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_rain ,n) = a2x_o%rAttr(index_a2x_Faxa_rainc,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_rainl,n) * afrac - - x2o_o%rAttr(index_x2o_Faxa_snow ,n) = x2o_o%rAttr(index_x2o_Faxa_snow ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_rain ,n) = x2o_o%rAttr(index_x2o_Faxa_rain ,n) * flux_epbalfact - - x2o_o%rAttr(index_x2o_Faxa_prec ,n) = x2o_o%rAttr(index_x2o_Faxa_rain ,n) + & - x2o_o%rAttr(index_x2o_Faxa_snow ,n) - - x2o_o%rAttr(index_x2o_Foxx_rofl, n) = (r2x_o%rAttr(index_r2x_Forr_rofl , n) + & - r2x_o%rAttr(index_r2x_Flrr_flood, n) + & - g2x_o%rAttr(index_g2x_Fogg_rofl , n)) * flux_epbalfact - x2o_o%rAttr(index_x2o_Foxx_rofi, n) = (r2x_o%rAttr(index_r2x_Forr_rofi , n) + & - g2x_o%rAttr(index_g2x_Fogg_rofi , n)) * flux_epbalfact - - - if ( index_x2o_Foxx_rofl_16O /= 0 ) then - x2o_o%rAttr(index_x2o_Foxx_rofl_16O, n) = (r2x_o%rAttr(index_r2x_Forr_rofl_16O, n) + & - r2x_o%rAttr(index_r2x_Flrr_flood, n) + & - g2x_o%rAttr(index_g2x_Fogg_rofl , n)) * flux_epbalfact - x2o_o%rAttr(index_x2o_Foxx_rofi_16O, n) = (r2x_o%rAttr(index_r2x_Forr_rofi_16O , n) + & - g2x_o%rAttr(index_g2x_Fogg_rofi , n)) * flux_epbalfact - x2o_o%rAttr(index_x2o_Foxx_rofl_18O, n) = (r2x_o%rAttr(index_r2x_Forr_rofl_18O, n) + & - r2x_o%rAttr(index_r2x_Flrr_flood, n) + & - g2x_o%rAttr(index_g2x_Fogg_rofl , n)) * flux_epbalfact - x2o_o%rAttr(index_x2o_Foxx_rofi_18O, n) = (r2x_o%rAttr(index_r2x_Forr_rofi_18O , n) + & - g2x_o%rAttr(index_g2x_Fogg_rofi , n)) * flux_epbalfact - x2o_o%rAttr(index_x2o_Foxx_rofl_HDO, n) = (r2x_o%rAttr(index_r2x_Forr_rofl_HDO, n) + & - r2x_o%rAttr(index_r2x_Flrr_flood, n) + & - g2x_o%rAttr(index_g2x_Fogg_rofl , n)) * flux_epbalfact - x2o_o%rAttr(index_x2o_Foxx_rofi_HDO, n) = (r2x_o%rAttr(index_r2x_Forr_rofi_HDO , n) + & - g2x_o%rAttr(index_g2x_Fogg_rofi , n)) * flux_epbalfact - end if - - ! Derived: water isotopes total preciptiation and scaling - - if ( index_x2o_Faxa_snow_16O /= 0 )then - x2o_o%rAttr(index_x2o_Faxa_snow_16O ,n) = a2x_o%rAttr(index_a2x_Faxa_snowc_16O,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_snowl_16O,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_rain_16O ,n) = a2x_o%rAttr(index_a2x_Faxa_rainc_16O,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_rainl_16O,n) * afrac - - x2o_o%rAttr(index_x2o_Faxa_snow_16O ,n) = x2o_o%rAttr(index_x2o_Faxa_snow_16O ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_rain_16O ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_16O ,n) * flux_epbalfact - - x2o_o%rAttr(index_x2o_Faxa_prec_16O ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_16O ,n) + & - x2o_o%rAttr(index_x2o_Faxa_snow_16O ,n) - end if - - if ( index_x2o_Faxa_snow_18O /= 0 )then - x2o_o%rAttr(index_x2o_Faxa_snow_18O ,n) = a2x_o%rAttr(index_a2x_Faxa_snowc_18O,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_snowl_18O,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_rain_18O ,n) = a2x_o%rAttr(index_a2x_Faxa_rainc_18O,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_rainl_18O,n) * afrac - - x2o_o%rAttr(index_x2o_Faxa_snow_18O ,n) = x2o_o%rAttr(index_x2o_Faxa_snow_18O ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_rain_18O ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_18O ,n) * flux_epbalfact - - x2o_o%rAttr(index_x2o_Faxa_prec_18O ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_18O ,n) + & - x2o_o%rAttr(index_x2o_Faxa_snow_18O ,n) - end if - - if ( index_x2o_Faxa_snow_HDO /= 0 )then - x2o_o%rAttr(index_x2o_Faxa_snow_HDO ,n) = a2x_o%rAttr(index_a2x_Faxa_snowc_HDO,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_snowl_HDO,n) * afrac - x2o_o%rAttr(index_x2o_Faxa_rain_HDO ,n) = a2x_o%rAttr(index_a2x_Faxa_rainc_HDO,n) * afrac + & - a2x_o%rAttr(index_a2x_Faxa_rainl_HDO,n) * afrac - - x2o_o%rAttr(index_x2o_Faxa_snow_HDO ,n) = x2o_o%rAttr(index_x2o_Faxa_snow_HDO ,n) * flux_epbalfact - x2o_o%rAttr(index_x2o_Faxa_rain_HDO ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_HDO ,n) * flux_epbalfact - - x2o_o%rAttr(index_x2o_Faxa_prec_HDO ,n) = x2o_o%rAttr(index_x2o_Faxa_rain_HDO ,n) + & - x2o_o%rAttr(index_x2o_Faxa_snow_HDO ,n) - end if - end do - - do ko = 1,noflds - !--- document merge --- - if (first_time) then - if (iindx(ko) > 0) then - if (imerge(ko)) then - mrgstr(ko) = trim(mrgstr(ko))//' + ifrac*i2x%'//trim(field_ice(iindx(ko))) - else - mrgstr(ko) = trim(mrgstr(ko))//' = ifrac*i2x%'//trim(field_ice(iindx(ko))) - end if - end if - if (aindx(ko) > 0) then - if (amerge(ko)) then - mrgstr(ko) = trim(mrgstr(ko))//' + afrac*a2x%'//trim(field_atm(aindx(ko))) - else - mrgstr(ko) = trim(mrgstr(ko))//' = afrac*a2x%'//trim(field_atm(aindx(ko))) - end if - end if - if (xindx(ko) > 0) then - if (xmerge(ko)) then - mrgstr(ko) = trim(mrgstr(ko))//' + afrac*xao%'//trim(field_xao(xindx(ko))) - else - mrgstr(ko) = trim(mrgstr(ko))//' = afrac*xao%'//trim(field_xao(xindx(ko))) - end if - end if - endif - - do n = 1,lsize - ifrac = fractions_o%rAttr(kif,n) - afrac = fractions_o%rAttr(kof,n) - frac_sum = ifrac + afrac - if ((frac_sum) /= 0._r8) then - ifrac = ifrac / (frac_sum) - afrac = afrac / (frac_sum) - endif - if (iindx(ko) > 0) then - if (imerge(ko)) then - x2o_o%rAttr(ko,n) = x2o_o%rAttr(ko,n) + i2x_o%rAttr(iindx(ko),n) * ifrac - else - x2o_o%rAttr(ko,n) = i2x_o%rAttr(iindx(ko),n) * ifrac - end if - end if - if (aindx(ko) > 0) then - if (amerge(ko)) then - x2o_o%rAttr(ko,n) = x2o_o%rAttr(ko,n) + a2x_o%rAttr(aindx(ko),n) * afrac - else - x2o_o%rAttr(ko,n) = a2x_o%rAttr(aindx(ko),n) * afrac - end if - end if - if (xindx(ko) > 0) then - if (xmerge(ko)) then - x2o_o%rAttr(ko,n) = x2o_o%rAttr(ko,n) + xao_o%rAttr(xindx(ko),n) * afrac - else - x2o_o%rAttr(ko,n) = xao_o%rAttr(xindx(ko),n) * afrac - end if - end if - end do - end do - - if (first_time) then - if (iamroot) then - write(logunit,'(A)') subname//' Summary:' - do ko = 1,noflds - write(logunit,'(A)') trim(mrgstr(ko)) - enddo - endif - deallocate(mrgstr) - deallocate(field_atm,itemc_atm) - deallocate(field_ocn,itemc_ocn) - deallocate(field_ice,itemc_ice) - deallocate(field_rof,itemc_rof) - deallocate(field_wav,itemc_wav) - deallocate(field_xao,itemc_xao) - endif - - first_time = .false. - - end subroutine prep_ocn_merge - - !================================================================================================ - - subroutine prep_ocn_calc_a2x_ox(timer) - !--------------------------------------------------------------- - ! - ! Arguments - character(len=*) , intent(in) :: timer - ! - ! Local Variables - integer :: eai - type(mct_avect), pointer :: a2x_ax - character(*), parameter :: subname = '(prep_ocn_calc_a2x_ox)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eai = 1,num_inst_atm - a2x_ax => component_get_c2x_cx(atm(eai)) - - call seq_map_map(mapper_Sa2o, a2x_ax, a2x_ox(eai), fldlist=seq_flds_a2x_states, norm=.true.) - - call seq_map_map(mapper_Fa2o, a2x_ax, a2x_ox(eai), fldlist=seq_flds_a2x_fluxes, norm=.true.) - -#ifdef COMPARE_TO_NUOPC - call seq_map_mapvect(mapper_Va2o, vect_map, a2x_ax, a2x_ox(eai), 'Sa_u', 'Sa_v', norm=.true.) -#else - !--- tcx the norm should be true below, it's false for bfb backwards compatability - call seq_map_mapvect(mapper_Va2o, vect_map, a2x_ax, a2x_ox(eai), 'Sa_u', 'Sa_v', norm=.false.) -#endif - - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_ocn_calc_a2x_ox - - !================================================================================================ - - subroutine prep_ocn_calc_i2x_ox(timer) - !--------------------------------------------------------------- - ! Description - ! Create i2x_ox (note that i2x_ox is a local module variable) - ! - ! Arguments - character(len=*) , intent(in) :: timer - ! - ! Local Variables - integer :: eii - type(mct_avect), pointer :: i2x_ix - character(*), parameter :: subname = '(prep_ocn_calc_i2x_ox)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eii = 1,num_inst_ice - i2x_ix => component_get_c2x_cx(ice(eii)) - call seq_map_map(mapper_SFi2o, i2x_ix, i2x_ox(eii), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_ocn_calc_i2x_ox - - !================================================================================================ - - subroutine prep_ocn_calc_r2x_ox(timer) - !--------------------------------------------------------------- - ! Description - ! Create r2x_ox (note that r2x_ox is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eri - type(mct_avect), pointer :: r2x_rx - character(*), parameter :: subname = '(prep_ocn_calc_r2x_ox)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eri = 1,num_inst_rof - r2x_rx => component_get_c2x_cx(rof(eri)) - call seq_map_map(mapper_Rr2o_liq, r2x_rx, r2x_ox(eri), & - fldlist=seq_flds_r2o_liq_fluxes, norm=.false.) - - call seq_map_map(mapper_Rr2o_ice, r2x_rx, r2x_ox(eri), & - fldlist=seq_flds_r2o_ice_fluxes, norm=.false.) - - if (flood_present) then - call seq_map_map(mapper_Fr2o, r2x_rx, r2x_ox(eri), & - fldlist='Flrr_flood', norm=.true.) - endif - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_ocn_calc_r2x_ox - - !================================================================================================ - - subroutine prep_ocn_calc_g2x_ox(timer) - !--------------------------------------------------------------- - ! Description - ! Create g2x_ox (note that g2x_ox is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: egi - type(mct_avect), pointer :: g2x_gx - character(*), parameter :: subname = '(prep_ocn_calc_g2x_ox)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do egi = 1,num_inst_glc - g2x_gx => component_get_c2x_cx(glc(egi)) - call seq_map_map(mapper_Rg2o_liq, g2x_gx, g2x_ox(egi), & - fldlist=seq_flds_g2o_liq_fluxes, norm=.false.) - - call seq_map_map(mapper_Rg2o_ice, g2x_gx, g2x_ox(egi), & - fldlist=seq_flds_g2o_ice_fluxes, norm=.false.) - enddo - call t_drvstopf (trim(timer)) - end subroutine prep_ocn_calc_g2x_ox - - !================================================================================================ - - subroutine prep_ocn_shelf_calc_g2x_ox(timer) - !--------------------------------------------------------------- - ! Description - ! Create g2x_ox (note that g2x_ox is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: egi - type(mct_avect), pointer :: g2x_gx - character(*), parameter :: subname = '(prep_ocn_calc_g2x_ox)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do egi = 1,num_inst_glc - g2x_gx => component_get_c2x_cx(glc(egi)) - - call seq_map_map(mapper_Sg2o, g2x_gx, g2x_ox(egi), norm=.true.) - - call seq_map_map(mapper_Fg2o, g2x_gx, g2x_ox(egi),norm=.true.) - - - enddo - call t_drvstopf (trim(timer)) - end subroutine prep_ocn_shelf_calc_g2x_ox - - !================================================================================================ - - subroutine prep_ocn_calc_w2x_ox(timer) - !--------------------------------------------------------------- - ! Description - ! Create w2x_ox (note that w2x_ox is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: ewi - type(mct_avect), pointer :: w2x_wx - character(*), parameter :: subname = '(prep_ocn_calc_w2x_ox)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do ewi = 1,num_inst_wav - w2x_wx => component_get_c2x_cx(wav(ewi)) - call seq_map_map(mapper_Sw2o, w2x_wx, w2x_ox(ewi), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - end subroutine prep_ocn_calc_w2x_ox - - !================================================================================================ - - function prep_ocn_get_a2x_ox() - type(mct_aVect), pointer :: prep_ocn_get_a2x_ox(:) - prep_ocn_get_a2x_ox => a2x_ox(:) - end function prep_ocn_get_a2x_ox - - function prep_ocn_get_r2x_ox() - type(mct_aVect), pointer :: prep_ocn_get_r2x_ox(:) - prep_ocn_get_r2x_ox => r2x_ox(:) - end function prep_ocn_get_r2x_ox - - function prep_ocn_get_i2x_ox() - type(mct_aVect), pointer :: prep_ocn_get_i2x_ox(:) - prep_ocn_get_i2x_ox => i2x_ox(:) - end function prep_ocn_get_i2x_ox - - function prep_ocn_get_g2x_ox() - type(mct_aVect), pointer :: prep_ocn_get_g2x_ox(:) - prep_ocn_get_g2x_ox => g2x_ox(:) - end function prep_ocn_get_g2x_ox - - function prep_ocn_get_w2x_ox() - type(mct_aVect), pointer :: prep_ocn_get_w2x_ox(:) - prep_ocn_get_w2x_ox => w2x_ox(:) - end function prep_ocn_get_w2x_ox - - function prep_ocn_get_x2oacc_ox() - type(mct_aVect), pointer :: prep_ocn_get_x2oacc_ox(:) - prep_ocn_get_x2oacc_ox => x2oacc_ox(:) - end function prep_ocn_get_x2oacc_ox - - function prep_ocn_get_x2oacc_ox_cnt() - integer, pointer :: prep_ocn_get_x2oacc_ox_cnt - prep_ocn_get_x2oacc_ox_cnt => x2oacc_ox_cnt - end function prep_ocn_get_x2oacc_ox_cnt - - function prep_ocn_get_mapper_Sa2o() - type(seq_map), pointer :: prep_ocn_get_mapper_Sa2o - prep_ocn_get_mapper_Sa2o => mapper_Sa2o - end function prep_ocn_get_mapper_Sa2o - - function prep_ocn_get_mapper_Va2o() - type(seq_map), pointer :: prep_ocn_get_mapper_Va2o - prep_ocn_get_mapper_Va2o => mapper_Va2o - end function prep_ocn_get_mapper_Va2o - - function prep_ocn_get_mapper_Fa2o() - type(seq_map), pointer :: prep_ocn_get_mapper_Fa2o - prep_ocn_get_mapper_Fa2o => mapper_Fa2o - end function prep_ocn_get_mapper_Fa2o - - function prep_ocn_get_mapper_Fr2o() - type(seq_map), pointer :: prep_ocn_get_mapper_Fr2o - prep_ocn_get_mapper_Fr2o => mapper_Fr2o - end function prep_ocn_get_mapper_Fr2o - - function prep_ocn_get_mapper_Rr2o_liq() - type(seq_map), pointer :: prep_ocn_get_mapper_Rr2o_liq - prep_ocn_get_mapper_Rr2o_liq => mapper_Rr2o_liq - end function prep_ocn_get_mapper_Rr2o_liq - - function prep_ocn_get_mapper_Rr2o_ice() - type(seq_map), pointer :: prep_ocn_get_mapper_Rr2o_ice - prep_ocn_get_mapper_Rr2o_ice => mapper_Rr2o_ice - end function prep_ocn_get_mapper_Rr2o_ice - - function prep_ocn_get_mapper_SFi2o() - type(seq_map), pointer :: prep_ocn_get_mapper_SFi2o - prep_ocn_get_mapper_SFi2o => mapper_SFi2o - end function prep_ocn_get_mapper_SFi2o - - function prep_ocn_get_mapper_Rg2o_liq() - type(seq_map), pointer :: prep_ocn_get_mapper_Rg2o_liq - prep_ocn_get_mapper_Rg2o_liq => mapper_Rg2o_liq - end function prep_ocn_get_mapper_Rg2o_liq - - function prep_ocn_get_mapper_Rg2o_ice() - type(seq_map), pointer :: prep_ocn_get_mapper_Rg2o_ice - prep_ocn_get_mapper_Rg2o_ice => mapper_Rg2o_ice - end function prep_ocn_get_mapper_Rg2o_ice - - function prep_ocn_get_mapper_Sg2o() - type(seq_map), pointer :: prep_ocn_get_mapper_Sg2o - prep_ocn_get_mapper_Sg2o => mapper_Sg2o - end function prep_ocn_get_mapper_Sg2o - - function prep_ocn_get_mapper_Fg2o() - type(seq_map), pointer :: prep_ocn_get_mapper_Fg2o - prep_ocn_get_mapper_Fg2o => mapper_Fg2o - end function prep_ocn_get_mapper_Fg2o - - function prep_ocn_get_mapper_Sw2o() - type(seq_map), pointer :: prep_ocn_get_mapper_Sw2o - prep_ocn_get_mapper_Sw2o => mapper_Sw2o - end function prep_ocn_get_mapper_Sw2o - -end module prep_ocn_mod diff --git a/src/drivers/mct/main/prep_rof_mod.F90 b/src/drivers/mct/main/prep_rof_mod.F90 deleted file mode 100644 index acd116c9709..00000000000 --- a/src/drivers/mct/main/prep_rof_mod.F90 +++ /dev/null @@ -1,499 +0,0 @@ -module prep_rof_mod - -#include "shr_assert.h" - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_kind_mod, only: cxx => SHR_KIND_CXX - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use seq_comm_mct, only: num_inst_lnd, num_inst_rof, num_inst_frc - use seq_comm_mct, only: CPLID, ROFID, logunit - use seq_comm_mct, only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getdata - use shr_log_mod , only: errMsg => shr_log_errMsg - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: rof, lnd - use prep_lnd_mod, only: prep_lnd_get_mapper_Fr2l - use map_lnd2rof_irrig_mod, only: map_lnd2rof_irrig - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_rof_init - public :: prep_rof_mrg - - public :: prep_rof_accum - public :: prep_rof_accum_avg - - public :: prep_rof_calc_l2r_rx - - public :: prep_rof_get_l2racc_lx - public :: prep_rof_get_l2racc_lx_cnt - public :: prep_rof_get_mapper_Fl2r - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: prep_rof_merge - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_Fl2r - - ! attribute vectors - type(mct_aVect), pointer :: l2r_rx(:) - - ! accumulation variables - type(mct_aVect), pointer :: l2racc_lx(:) ! lnd export, lnd grid, cpl pes - integer , target :: l2racc_lx_cnt ! l2racc_lx: number of time samples accumulated - - ! other module variables - integer :: mpicom_CPLID ! MPI cpl communicator - - ! field names and lists, for fields that need to be treated specially - character(len=*), parameter :: irrig_flux_field = 'Flrl_irrig' - ! fluxes mapped from lnd to rof that don't need any special handling - character(CXX) :: lnd2rof_normal_fluxes - ! whether the model is being run with a separate irrigation field - logical :: have_irrig_field - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_rof_init(infodata, lnd_c2_rof) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and all other non-mapping - ! module variables - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in) :: lnd_c2_rof ! .true. => lnd to rof coupling on - ! - ! Local Variables - integer :: lsize_r - integer :: lsize_l - integer :: eli, eri - logical :: samegrid_lr ! samegrid land and rof - logical :: esmf_map_flag ! .true. => use esmf for mapping - logical :: rof_present ! .true. => rof is present - logical :: lnd_present ! .true. => lnd is present - logical :: iamroot_CPLID ! .true. => CPLID masterproc - character(CL) :: lnd_gnam ! lnd grid - character(CL) :: rof_gnam ! rof grid - type(mct_aVect) , pointer :: l2x_lx - type(mct_aVect) , pointer :: x2r_rx - integer :: index_irrig - character(*) , parameter :: subname = '(prep_rof_init)' - character(*) , parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call seq_infodata_getData(infodata , & - esmf_map_flag=esmf_map_flag , & - rof_present=rof_present , & - lnd_present=lnd_present , & - lnd_gnam=lnd_gnam , & - rof_gnam=rof_gnam ) - - allocate(mapper_Fl2r) - - if (rof_present) then - x2r_rx => component_get_x2c_cx(rof(1)) - index_irrig = mct_aVect_indexRA(x2r_rx, irrig_flux_field, perrWith='quiet') - if (index_irrig == 0) then - have_irrig_field = .false. - else - have_irrig_field = .true. - end if - else - ! If rof_present is false, have_irrig_field should be irrelevant; we arbitrarily - ! set it to false in this case. - have_irrig_field = .false. - end if - - if (rof_present .and. lnd_present) then - - call seq_comm_getData(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - lsize_r = mct_aVect_lsize(x2r_rx) - - l2x_lx => component_get_c2x_cx(lnd(1)) - lsize_l = mct_aVect_lsize(l2x_lx) - - allocate(l2racc_lx(num_inst_lnd)) - do eli = 1,num_inst_lnd - call mct_aVect_initSharedFields(l2x_lx, x2r_rx, l2racc_lx(eli), lsize=lsize_l) - call mct_aVect_zero(l2racc_lx(eli)) - end do - l2racc_lx_cnt = 0 - - allocate(l2r_rx(num_inst_rof)) - do eri = 1,num_inst_rof - call mct_avect_init(l2r_rx(eri), rList=seq_flds_x2r_fields, lsize=lsize_r) - call mct_avect_zero(l2r_rx(eri)) - end do - - samegrid_lr = .true. - if (trim(lnd_gnam) /= trim(rof_gnam)) samegrid_lr = .false. - - if (lnd_c2_rof) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fl2r' - end if - call seq_map_init_rcfile(mapper_Fl2r, lnd(1), rof(1), & - 'seq_maps.rc','lnd2rof_fmapname:','lnd2rof_fmaptype:',samegrid_lr, & - string='mapper_Fl2r initialization', esmf_map=esmf_map_flag) - - ! We'll map irrigation specially, so exclude this from the list of l2r fields - ! that are mapped "normally". Note that the following assumes that all - ! x2r_fluxes are lnd2rof (as opposed to coming from some other component). - ! - ! (This listDiff works even if have_irrig_field is false.) - call shr_string_listDiff( & - list1 = seq_flds_x2r_fluxes, & - list2 = irrig_flux_field, & - listout = lnd2rof_normal_fluxes) - endif - call shr_sys_flush(logunit) - - end if - - end subroutine prep_rof_init - - !================================================================================================ - - subroutine prep_rof_accum(timer) - - !--------------------------------------------------------------- - ! Description - ! Accumulate land input to river component - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eli - type(mct_aVect), pointer :: l2x_lx - character(*), parameter :: subname = '(prep_rof_accum)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eli = 1,num_inst_lnd - l2x_lx => component_get_c2x_cx(lnd(eli)) - if (l2racc_lx_cnt == 0) then - call mct_avect_copy(l2x_lx, l2racc_lx(eli)) - else - call mct_avect_accum(l2x_lx, l2racc_lx(eli)) - endif - end do - l2racc_lx_cnt = l2racc_lx_cnt + 1 - call t_drvstopf (trim(timer)) - - end subroutine prep_rof_accum - - !================================================================================================ - - subroutine prep_rof_accum_avg(timer) - - !--------------------------------------------------------------- - ! Description - ! Finalize accumulation of land input to river component - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eri, eli - character(*), parameter :: subname = '(prep_rof_accum_avg)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eri = 1,num_inst_rof - eli = mod((eri-1),num_inst_lnd) + 1 - call mct_avect_avg(l2racc_lx(eli),l2racc_lx_cnt) - end do - l2racc_lx_cnt = 0 - call t_drvstopf (trim(timer)) - - end subroutine prep_rof_accum_avg - - !================================================================================================ - - subroutine prep_rof_mrg(infodata, fractions_rx, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Merge rof inputs - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - type(mct_aVect) , intent(in) :: fractions_rx(:) - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - integer :: eri, efi - type(mct_aVect), pointer :: x2r_rx - character(*), parameter :: subname = '(prep_rof_mrg)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer_mrg), barrier=mpicom_CPLID) - do eri = 1,num_inst_rof - efi = mod((eri-1),num_inst_frc) + 1 - - x2r_rx => component_get_x2c_cx(rof(eri)) ! This is actually modifying x2r_rx - call prep_rof_merge(l2r_rx(eri), fractions_rx(efi), x2r_rx) - end do - call t_drvstopf (trim(timer_mrg)) - - end subroutine prep_rof_mrg - - !================================================================================================ - - subroutine prep_rof_merge(l2x_r, fractions_r, x2r_r) - - !----------------------------------------------------------------------- - ! Description - ! Merge land rof and ice forcing for rof input - ! - ! Arguments - type(mct_aVect),intent(in) :: l2x_r - type(mct_aVect),intent(in) :: fractions_r - type(mct_aVect),intent(inout) :: x2r_r - ! - ! Local variables - integer :: i - integer, save :: index_l2x_Flrl_rofsur - integer, save :: index_l2x_Flrl_rofgwl - integer, save :: index_l2x_Flrl_rofsub - integer, save :: index_l2x_Flrl_rofdto - integer, save :: index_l2x_Flrl_rofi - integer, save :: index_l2x_Flrl_irrig - integer, save :: index_x2r_Flrl_rofsur - integer, save :: index_x2r_Flrl_rofgwl - integer, save :: index_x2r_Flrl_rofsub - integer, save :: index_x2r_Flrl_rofdto - integer, save :: index_x2r_Flrl_rofi - integer, save :: index_x2r_Flrl_irrig - integer, save :: index_l2x_Flrl_rofl_16O - integer, save :: index_l2x_Flrl_rofi_16O - integer, save :: index_x2r_Flrl_rofl_16O - integer, save :: index_x2r_Flrl_rofi_16O - integer, save :: index_l2x_Flrl_rofl_18O - integer, save :: index_l2x_Flrl_rofi_18O - integer, save :: index_x2r_Flrl_rofl_18O - integer, save :: index_x2r_Flrl_rofi_18O - integer, save :: index_l2x_Flrl_rofl_HDO - integer, save :: index_l2x_Flrl_rofi_HDO - integer, save :: index_x2r_Flrl_rofl_HDO - integer, save :: index_x2r_Flrl_rofi_HDO - integer, save :: index_lfrac - logical, save :: first_time = .true. - logical, save :: flds_wiso_rof = .false. - real(r8) :: lfrac - integer :: nflds,lsize - logical :: iamroot - character(CL) :: field ! field string - character(CL),allocatable :: mrgstr(:) ! temporary string - character(*), parameter :: subname = '(prep_rof_merge) ' - - !----------------------------------------------------------------------- - - call seq_comm_getdata(CPLID, iamroot=iamroot) - lsize = mct_aVect_lsize(x2r_r) - - if (first_time) then - nflds = mct_aVect_nRattr(x2r_r) - - allocate(mrgstr(nflds)) - do i = 1,nflds - field = mct_aVect_getRList2c(i, x2r_r) - mrgstr(i) = subname//'x2r%'//trim(field)//' =' - enddo - - index_l2x_Flrl_rofsur = mct_aVect_indexRA(l2x_r,'Flrl_rofsur' ) - index_l2x_Flrl_rofgwl = mct_aVect_indexRA(l2x_r,'Flrl_rofgwl' ) - index_l2x_Flrl_rofsub = mct_aVect_indexRA(l2x_r,'Flrl_rofsub' ) - index_l2x_Flrl_rofdto = mct_aVect_indexRA(l2x_r,'Flrl_rofdto' ) - if (have_irrig_field) then - index_l2x_Flrl_irrig = mct_aVect_indexRA(l2x_r,'Flrl_irrig' ) - end if - index_l2x_Flrl_rofi = mct_aVect_indexRA(l2x_r,'Flrl_rofi' ) - index_x2r_Flrl_rofsur = mct_aVect_indexRA(x2r_r,'Flrl_rofsur' ) - index_x2r_Flrl_rofgwl = mct_aVect_indexRA(x2r_r,'Flrl_rofgwl' ) - index_x2r_Flrl_rofsub = mct_aVect_indexRA(x2r_r,'Flrl_rofsub' ) - index_x2r_Flrl_rofdto = mct_aVect_indexRA(x2r_r,'Flrl_rofdto' ) - index_x2r_Flrl_rofi = mct_aVect_indexRA(x2r_r,'Flrl_rofi' ) - if (have_irrig_field) then - index_x2r_Flrl_irrig = mct_aVect_indexRA(x2r_r,'Flrl_irrig' ) - end if - index_l2x_Flrl_rofl_16O = mct_aVect_indexRA(l2x_r,'Flrl_rofl_16O', perrWith='quiet' ) - - if ( index_l2x_Flrl_rofl_16O /= 0 ) flds_wiso_rof = .true. - if ( flds_wiso_rof ) then - index_l2x_Flrl_rofi_16O = mct_aVect_indexRA(l2x_r,'Flrl_rofi_16O' ) - index_x2r_Flrl_rofl_16O = mct_aVect_indexRA(x2r_r,'Flrl_rofl_16O' ) - index_x2r_Flrl_rofi_16O = mct_aVect_indexRA(x2r_r,'Flrl_rofi_16O' ) - - index_l2x_Flrl_rofl_18O = mct_aVect_indexRA(l2x_r,'Flrl_rofl_18O' ) - index_l2x_Flrl_rofi_18O = mct_aVect_indexRA(l2x_r,'Flrl_rofi_18O' ) - index_x2r_Flrl_rofl_18O = mct_aVect_indexRA(x2r_r,'Flrl_rofl_18O' ) - index_x2r_Flrl_rofi_18O = mct_aVect_indexRA(x2r_r,'Flrl_rofi_18O' ) - - index_l2x_Flrl_rofl_HDO = mct_aVect_indexRA(l2x_r,'Flrl_rofl_HDO' ) - index_l2x_Flrl_rofi_HDO = mct_aVect_indexRA(l2x_r,'Flrl_rofi_HDO' ) - index_x2r_Flrl_rofl_HDO = mct_aVect_indexRA(x2r_r,'Flrl_rofl_HDO' ) - index_x2r_Flrl_rofi_HDO = mct_aVect_indexRA(x2r_r,'Flrl_rofi_HDO' ) - end if - index_lfrac = mct_aVect_indexRA(fractions_r,"lfrac") - - index_lfrac = mct_aVect_indexRA(fractions_r,"lfrac") - - mrgstr(index_x2r_Flrl_rofsur) = trim(mrgstr(index_x2r_Flrl_rofsur))//' = '// & - 'lfrac*l2x%Flrl_rofsur' - mrgstr(index_x2r_Flrl_rofgwl) = trim(mrgstr(index_x2r_Flrl_rofgwl))//' = '// & - 'lfrac*l2x%Flrl_rofgwl' - mrgstr(index_x2r_Flrl_rofsub) = trim(mrgstr(index_x2r_Flrl_rofsub))//' = '// & - 'lfrac*l2x%Flrl_rofsub' - mrgstr(index_x2r_Flrl_rofdto) = trim(mrgstr(index_x2r_Flrl_rofdto))//' = '// & - 'lfrac*l2x%Flrl_rofdto' - mrgstr(index_x2r_Flrl_rofi) = trim(mrgstr(index_x2r_Flrl_rofi))//' = '// & - 'lfrac*l2x%Flrl_rofi' - if (have_irrig_field) then - mrgstr(index_x2r_Flrl_irrig) = trim(mrgstr(index_x2r_Flrl_irrig))//' = '// & - 'lfrac*l2x%Flrl_irrig' - end if - if ( flds_wiso_rof ) then - mrgstr(index_x2r_Flrl_rofl_16O) = trim(mrgstr(index_x2r_Flrl_rofl_16O))//' = '// & - 'lfrac*l2x%Flrl_rofl_16O' - mrgstr(index_x2r_Flrl_rofi_16O) = trim(mrgstr(index_x2r_Flrl_rofi_16O))//' = '// & - 'lfrac*l2x%Flrl_rofi_16O' - mrgstr(index_x2r_Flrl_rofl_18O) = trim(mrgstr(index_x2r_Flrl_rofl_18O))//' = '// & - 'lfrac*l2x%Flrl_rofl_18O' - mrgstr(index_x2r_Flrl_rofi_18O) = trim(mrgstr(index_x2r_Flrl_rofi_18O))//' = '// & - 'lfrac*l2x%Flrl_rofi_18O' - mrgstr(index_x2r_Flrl_rofl_HDO) = trim(mrgstr(index_x2r_Flrl_rofl_HDO))//' = '// & - 'lfrac*l2x%Flrl_rofl_HDO' - mrgstr(index_x2r_Flrl_rofi_HDO) = trim(mrgstr(index_x2r_Flrl_rofi_HDO))//' = '// & - 'lfrac*l2x%Flrl_rofi_HDO' - end if - end if - - do i = 1,lsize - lfrac = fractions_r%rAttr(index_lfrac,i) - x2r_r%rAttr(index_x2r_Flrl_rofsur,i) = l2x_r%rAttr(index_l2x_Flrl_rofsur,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofgwl,i) = l2x_r%rAttr(index_l2x_Flrl_rofgwl,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofsub,i) = l2x_r%rAttr(index_l2x_Flrl_rofsub,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofdto,i) = l2x_r%rAttr(index_l2x_Flrl_rofdto,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofi,i) = l2x_r%rAttr(index_l2x_Flrl_rofi,i) * lfrac - if (have_irrig_field) then - x2r_r%rAttr(index_x2r_Flrl_irrig,i) = l2x_r%rAttr(index_l2x_Flrl_irrig,i) * lfrac - end if - if ( flds_wiso_rof ) then - x2r_r%rAttr(index_x2r_Flrl_rofl_16O,i) = l2x_r%rAttr(index_l2x_Flrl_rofl_16O,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofi_16O,i) = l2x_r%rAttr(index_l2x_Flrl_rofi_16O,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofl_18O,i) = l2x_r%rAttr(index_l2x_Flrl_rofl_18O,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofi_18O,i) = l2x_r%rAttr(index_l2x_Flrl_rofi_18O,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofl_HDO,i) = l2x_r%rAttr(index_l2x_Flrl_rofl_HDO,i) * lfrac - x2r_r%rAttr(index_x2r_Flrl_rofi_HDO,i) = l2x_r%rAttr(index_l2x_Flrl_rofi_HDO,i) * lfrac - end if - end do - - if (first_time) then - if (iamroot) then - write(logunit,'(A)') subname//' Summary:' - do i = 1,nflds - write(logunit,'(A)') trim(mrgstr(i)) - enddo - endif - deallocate(mrgstr) - endif - - first_time = .false. - - end subroutine prep_rof_merge - - !================================================================================================ - - subroutine prep_rof_calc_l2r_rx(fractions_lx, timer) - !--------------------------------------------------------------- - ! Description - ! Create l2r_rx (note that l2r_rx is a local module variable) - ! - ! Arguments - type(mct_aVect) , intent(in) :: fractions_lx(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eri, eli, efi - type(mct_avect), pointer :: r2x_rx - type(seq_map) , pointer :: mapper_Fr2l ! flux mapper for mapping rof -> lnd - character(*), parameter :: subname = '(prep_rof_calc_l2r_rx)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eri = 1,num_inst_rof - eli = mod((eri-1),num_inst_lnd) + 1 - efi = mod((eri-1),num_inst_frc) + 1 - - ! If the options to this seq_map_map call change (e.g., the use of avwts), similar - ! changes should be made in map_lnd2rof_irrig. - call seq_map_map(mapper_Fl2r, l2racc_lx(eli), l2r_rx(eri), & - fldlist=lnd2rof_normal_fluxes, norm=.true., & - avwts_s=fractions_lx(efi), avwtsfld_s='lfrin') - - if (have_irrig_field) then - r2x_rx => component_get_c2x_cx(rof(eri)) - mapper_Fr2l => prep_lnd_get_mapper_Fr2l() - call map_lnd2rof_irrig( & - l2r_l = l2racc_lx(eli), & - r2x_r = r2x_rx, & - irrig_flux_field = irrig_flux_field, & - avwts_s = fractions_lx(efi), & - avwtsfld_s = 'lfrin', & - mapper_Fl2r = mapper_Fl2r, & - mapper_Fr2l = mapper_Fr2l, & - l2r_r = l2r_rx(eri)) - end if - end do - call t_drvstopf (trim(timer)) - - end subroutine prep_rof_calc_l2r_rx - - !================================================================================================ - - function prep_rof_get_l2racc_lx() - type(mct_aVect), pointer :: prep_rof_get_l2racc_lx(:) - prep_rof_get_l2racc_lx => l2racc_lx(:) - end function prep_rof_get_l2racc_lx - - function prep_rof_get_l2racc_lx_cnt() - integer, pointer :: prep_rof_get_l2racc_lx_cnt - prep_rof_get_l2racc_lx_cnt => l2racc_lx_cnt - end function prep_rof_get_l2racc_lx_cnt - - function prep_rof_get_mapper_Fl2r() - type(seq_map), pointer :: prep_rof_get_mapper_Fl2r - prep_rof_get_mapper_Fl2r => mapper_Fl2r - end function prep_rof_get_mapper_Fl2r - -end module prep_rof_mod diff --git a/src/drivers/mct/main/prep_wav_mod.F90 b/src/drivers/mct/main/prep_wav_mod.F90 deleted file mode 100644 index c929c8fddac..00000000000 --- a/src/drivers/mct/main/prep_wav_mod.F90 +++ /dev/null @@ -1,361 +0,0 @@ -module prep_wav_mod - - use shr_kind_mod , only: r8 => SHR_KIND_R8 - use shr_kind_mod , only: cs => SHR_KIND_CS - use shr_kind_mod , only: cl => SHR_KIND_CL - use shr_sys_mod , only: shr_sys_abort, shr_sys_flush - use seq_comm_mct , only: num_inst_atm, num_inst_ice, num_inst_ocn - use seq_comm_mct , only: num_inst_wav, num_inst_frc - use seq_comm_mct , only: CPLID, WAVID, logunit - use seq_comm_mct , only: seq_comm_getdata=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_getdata, seq_infodata_type - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: wav, ocn, ice, atm - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_wav_init - public :: prep_wav_mrg - - public :: prep_wav_calc_a2x_wx - public :: prep_wav_calc_o2x_wx - public :: prep_wav_calc_i2x_wx - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: prep_wav_merge - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_sa2w - type(seq_map), pointer :: mapper_so2w - type(seq_map), pointer :: mapper_si2w - - ! attribute vectors - type(mct_aVect), pointer :: o2x_wx(:) ! Ocn export, wav grid, cpl pes - type(mct_aVect), pointer :: i2x_wx(:) ! Ice export, wav grid, cpl pes - type(mct_aVect), pointer :: a2x_wx(:) ! Atm export, wav grid, cpl pes - - ! accumulation variables - ! none at this time - - ! seq_comm_getData variables - integer :: mpicom_CPLID ! MPI cpl communicator - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_wav_init(infodata, atm_c2_wav, ocn_c2_wav, ice_c2_wav) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and all other non-mapping - ! module variables - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in) :: atm_c2_wav ! .true. => atm to wav coupling on - logical , intent(in) :: ocn_c2_wav ! .true. => ocn to wav coupling on - logical , intent(in) :: ice_c2_wav ! .true. => ocn to wav coupling on - ! - ! Local Variables - integer :: eai , eoi, eii - integer :: lsize_w - logical :: samegrid_ow ! samegrid ocean and wave - logical :: samegrid_aw ! samegrid atm and wave - logical :: iamroot_CPLID ! .true. => CPLID masterproc - logical :: esmf_map_flag ! .true. => use esmf for mapping - logical :: wav_present ! .true. => wav is present - character(CL) :: atm_gnam ! atm grid - character(CL) :: ocn_gnam ! ocn grid - character(CL) :: wav_gnam ! wav grid - type(mct_avect) , pointer :: w2x_wx - character(*) , parameter :: subname = '(prep_wav_init)' - character(*) , parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call seq_infodata_getData(infodata, & - wav_present=wav_present , & - ocn_gnam=ocn_gnam , & - wav_gnam=wav_gnam , & - atm_gnam=atm_gnam , & - esmf_map_flag=esmf_map_flag ) - - allocate(mapper_sa2w) - allocate(mapper_so2w) - allocate(mapper_si2w) - - if (wav_present) then - - call seq_comm_getData(CPLID, mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - w2x_wx => component_get_c2x_cx(wav(1)) - lsize_w = mct_aVect_lsize(w2x_wx) - - allocate(a2x_wx(num_inst_atm)) - do eai = 1,num_inst_atm - call mct_aVect_init(a2x_wx(eai), rList=seq_flds_a2x_fields, lsize=lsize_w) - call mct_aVect_zero(a2x_wx(eai)) - enddo - allocate(o2x_wx(num_inst_ocn)) - do eoi = 1,num_inst_ocn - call mct_aVect_init(o2x_wx(eoi), rList=seq_flds_o2x_fields, lsize=lsize_w) - call mct_aVect_zero(o2x_wx(eoi)) - enddo - allocate(i2x_wx(num_inst_ice)) - do eii = 1,num_inst_ice - call mct_aVect_init(i2x_wx(eii), rList=seq_flds_i2x_fields, lsize=lsize_w) - call mct_aVect_zero(i2x_wx(eii)) - enddo - - samegrid_ow = .true. - samegrid_aw = .true. - if (trim(ocn_gnam) /= trim(wav_gnam)) samegrid_ow = .false. - if (trim(atm_gnam) /= trim(wav_gnam)) samegrid_aw = .false. - - if (atm_c2_wav) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sa2w' - end if - call seq_map_init_rcfile(mapper_Sa2w, atm(1), wav(1), & - 'seq_maps.rc','atm2wav_smapname:','atm2wav_smaptype:',samegrid_aw, & - 'mapper_Sa2w initialization') - endif - call shr_sys_flush(logunit) - if (ocn_c2_wav) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_So2w' - end if - call seq_map_init_rcfile(mapper_So2w, ocn(1), wav(1), & - 'seq_maps.rc','ocn2wav_smapname:','ocn2wav_smaptype:',samegrid_ow, & - 'mapper_So2w initialization') - endif - call shr_sys_flush(logunit) !TODO ??? is this in Tony's code - if (ice_c2_wav) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Si2w' - end if - call seq_map_init_rcfile(mapper_Si2w, ice(1), wav(1), & - 'seq_maps.rc','ice2wav_smapname:','ice2wav_smaptype:',samegrid_ow, & - 'mapper_Si2w initialization') - endif - call shr_sys_flush(logunit) - - end if - - end subroutine prep_wav_init - - !================================================================================================ - - subroutine prep_wav_mrg(infodata, fractions_wx, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Merge all wav inputs - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - type(mct_aVect) , intent(in) :: fractions_wx(:) - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - integer :: eai, eoi, eii, ewi, efi - type(mct_avect), pointer :: x2w_wx - character(*), parameter :: subname = '(prep_wav_mrg)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer_mrg),barrier=mpicom_CPLID) - do ewi = 1,num_inst_wav - ! Use fortran mod to address ensembles in merge - eai = mod((ewi-1),num_inst_atm) + 1 - eoi = mod((ewi-1),num_inst_ocn) + 1 - eii = mod((ewi-1),num_inst_ice) + 1 - efi = mod((ewi-1),num_inst_frc) + 1 - - x2w_wx => component_get_x2c_cx(wav(ewi)) - - call prep_wav_merge(a2x_wx(eai), o2x_wx(eoi), i2x_wx(eii), fractions_wx(efi), x2w_wx) - enddo - call t_drvstopf (trim(timer_mrg)) - - end subroutine prep_wav_mrg - - !================================================================================================ - - subroutine prep_wav_merge(a2x_w, o2x_w, i2x_w, frac_w, x2w_w) - - !----------------------------------------------------------------------- - ! Arguments - type(mct_aVect), intent(in) :: a2x_w ! input - type(mct_aVect), intent(in) :: o2x_w ! input - type(mct_aVect), intent(in) :: i2x_w ! input - type(mct_aVect), intent(in) :: frac_w ! input - type(mct_aVect), intent(inout) :: x2w_w ! output - !----------------------------------------------------------------------- - integer :: nflds,i,i1,o1 - logical :: iamroot - logical, save :: first_time = .true. - character(CL),allocatable :: mrgstr(:) ! temporary string - character(CL) :: field ! string converted to char - type(mct_aVect_sharedindices),save :: a2x_sharedindices - type(mct_aVect_sharedindices),save :: o2x_sharedindices - type(mct_aVect_sharedindices),save :: i2x_sharedindices - character(*), parameter :: subname = '(prep_wav_merge) ' - - !----------------------------------------------------------------------- - - call seq_comm_getdata(CPLID, iamroot=iamroot) - - if (first_time) then - nflds = mct_aVect_nRattr(x2w_w) - - allocate(mrgstr(nflds)) - do i = 1,nflds - field = mct_aVect_getRList2c(i, x2w_w) - mrgstr(i) = subname//'x2w%'//trim(field)//' =' - enddo - - call mct_aVect_setSharedIndices(a2x_w, x2w_w, a2x_SharedIndices) - ! QL, 150625, bug? - ! a2x_SharedIndices -> o2x_SharedIndices - ! a2x_SharedIndices -> i2x_SharedIndices - call mct_aVect_setSharedIndices(o2x_w, x2w_w, o2x_SharedIndices) - call mct_aVect_setSharedIndices(i2x_w, x2w_w, i2x_SharedIndices) - - !--- document copy operations --- - do i=1,a2x_SharedIndices%shared_real%num_indices - i1=a2x_SharedIndices%shared_real%aVindices1(i) - o1=a2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, a2x_w) - mrgstr(o1) = trim(mrgstr(o1))//' = a2x%'//trim(field) - enddo - do i=1,o2x_SharedIndices%shared_real%num_indices - i1=o2x_SharedIndices%shared_real%aVindices1(i) - o1=o2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, o2x_w) - mrgstr(o1) = trim(mrgstr(o1))//' = o2x%'//trim(field) - enddo - do i=1,i2x_SharedIndices%shared_real%num_indices - i1=i2x_SharedIndices%shared_real%aVindices1(i) - o1=i2x_SharedIndices%shared_real%aVindices2(i) - field = mct_aVect_getRList2c(i1, i2x_w) - mrgstr(o1) = trim(mrgstr(o1))//' = i2x%'//trim(field) - enddo - endif - - ! Create input wave state directly from atm, ocn, ice output state - - call mct_avect_zero(x2w_w) - call mct_aVect_copy(aVin=a2x_w, aVout=x2w_w, vector=mct_usevector, sharedIndices=a2x_SharedIndices) - call mct_aVect_copy(aVin=o2x_w, aVout=x2w_w, vector=mct_usevector, sharedIndices=o2x_SharedIndices) - call mct_aVect_copy(aVin=i2x_w, aVout=x2w_w, vector=mct_usevector, sharedIndices=i2x_SharedIndices) - - if (first_time) then - if (iamroot) then - write(logunit,'(A)') subname//' Summary:' - do i = 1,nflds - write(logunit,'(A)') trim(mrgstr(i)) - enddo - endif - deallocate(mrgstr) - endif - - first_time = .false. - - end subroutine prep_wav_merge - - !================================================================================================ - - subroutine prep_wav_calc_a2x_wx(timer) - !--------------------------------------------------------------- - ! Description - ! Create a2x_wx (note that a2x_wx is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eai - type(mct_aVect), pointer :: a2x_ax - character(*), parameter :: subname = '(prep_wav_calc_a2x_wx)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eai = 1,num_inst_atm - a2x_ax => component_get_c2x_cx(atm(eai)) - call seq_map_map(mapper_Sa2w, a2x_ax, a2x_wx(eai), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - end subroutine prep_wav_calc_a2x_wx - - !================================================================================================ - - subroutine prep_wav_calc_o2x_wx(timer) - !--------------------------------------------------------------- - ! Description - ! Create o2x_wx (note that o2x_wx is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eoi - type(mct_aVect), pointer :: o2x_ox - character(*), parameter :: subname = '(prep_wav_calc_o2x_wx)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eoi = 1,num_inst_ocn - o2x_ox => component_get_c2x_cx(ocn(eoi)) - call seq_map_map(mapper_So2w, o2x_ox, o2x_wx(eoi), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - end subroutine prep_wav_calc_o2x_wx - - !================================================================================================ - - subroutine prep_wav_calc_i2x_wx(timer) - !--------------------------------------------------------------- - ! Description - ! Create i2x_wx (note that i2x_wx is a local module variable) - ! - ! Arguments - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eii - type(mct_aVect), pointer :: i2x_ix - character(*), parameter :: subname = '(prep_wav_calc_i2x_wx)' - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eii = 1,num_inst_ice - i2x_ix => component_get_c2x_cx(ice(eii)) - call seq_map_map(mapper_Si2w, i2x_ix, i2x_wx(eii), norm=.true.) - enddo - call t_drvstopf (trim(timer)) - end subroutine prep_wav_calc_i2x_wx - -end module prep_wav_mod diff --git a/src/drivers/mct/main/seq_diag_mct.F90 b/src/drivers/mct/main/seq_diag_mct.F90 deleted file mode 100644 index 0665acafd05..00000000000 --- a/src/drivers/mct/main/seq_diag_mct.F90 +++ /dev/null @@ -1,2537 +0,0 @@ -!=============================================================================== -! -! !MODULE: seq_diag_mod -- computes spatial \& time averages of fluxed quatities -! -! !DESCRIPTION: -! The coupler is required to do certain diagnostics, those calculations are -! located in this module. -! -! !REMARKS: -! CESM sign convention for fluxes is positive downward with hierarchy being -! atm/glc/lnd/rof/ice/ocn -! Sign convention: -! positive value <=> the model is gaining water, heat, momentum, etc. -! Unit convention: -! heat flux ~ W/m^2 -! momentum flux ~ N/m^2 -! water flux ~ (kg/s)/m^2 -! salt flux ~ (kg/s)/m^2 -! -! !REVISION HISTORY: -! 2012-aug-20 - T. Craig - add rof component -! 2008-jul-10 - T. Craig - updated budget implementation -! 2007-may-07 - B. Kauffman - initial port to cpl7. -! 2002-nov-21 - R. Jacob - initial port to cpl6. -! 199x-mmm-dd - B. Kauffman - original version in cpl4. -! -! !INTERFACE: ------------------------------------------------------------------ - -module seq_diag_mct - ! !USES: - - use shr_kind_mod, only: r8 => shr_kind_r8, in=>shr_kind_in - use shr_kind_mod, only: i8 => shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_sys_mod, only : shr_sys_abort, shr_sys_flush - use shr_mpi_mod, only : shr_mpi_max, shr_mpi_sum - use shr_const_mod, only: shr_const_rearth, shr_const_pi, shr_const_latice, & - shr_const_ice_ref_sal, shr_const_ocn_ref_sal, shr_const_isspval - use mct_mod, only: mct_ggrid, mct_avect, mct_avect_lsize, mct_string, & - mct_string_tochar, mct_gsmap, mct_aVect_indexRA, MCT_AVECT_NRATTR, & - mct_string_clean, mct_avect_getrlist - use esmf, only : esmf_clock - use shr_log_mod, only: s_logunit=>shr_log_unit - use seq_comm_mct, only: logunit, cplid, seq_comm_setptrs, seq_comm_clean - use seq_timemgr_mod, only : seq_timemgr_EClockGetData - use component_type_mod, only : COMPONENT_GET_DOM_CX, COMPONENT_GET_C2X_CX, & - COMPONENT_GET_X2C_CX, COMPONENT_TYPE - use seq_infodata_mod, only : seq_infodata_type, seq_infodata_getdata - use shr_reprosum_mod, only: shr_reprosum_calc - - implicit none - save - private - - ! !PUBLIC TYPES: - - ! none - - !PUBLIC MEMBER FUNCTIONS: - - public seq_diag_zero_mct - public seq_diag_atm_mct - public seq_diag_lnd_mct - public seq_diag_rof_mct - public seq_diag_glc_mct - public seq_diag_ocn_mct - public seq_diag_ice_mct - public seq_diag_accum_mct - public seq_diag_sum0_mct - public seq_diag_print_mct - public seq_diag_avect_mct - public seq_diag_avloc_mct - public seq_diag_avdiff_mct - - !EOP - - !---------------------------------------------------------------------------- - ! Local data - !---------------------------------------------------------------------------- - - !----- local constants ----- - real(r8),parameter :: HFLXtoWFLX = & ! water flux implied by latent heat of fusion - & - (shr_const_ocn_ref_sal-shr_const_ice_ref_sal) / & - & (shr_const_ocn_ref_sal*shr_const_latice) - - real(r8),parameter :: SFLXtoWFLX = & ! water flux implied by salt flux - ! WFLX (kg/m^2s) = -SFLX (kg/m^2s) - ! / ocn_ref_sal (psu) (34.7g/kg) - ! / 1.e-3 kg/g - -1._r8/(shr_const_ocn_ref_sal*1.e-3_r8) - - - !--- C for component --- - !--- "r" is recieve in the coupler, "s" is send from the coupler - - integer(in),parameter :: c_size = 22 - - integer(in),parameter :: c_atm_as = 1 ! model index: atm - integer(in),parameter :: c_atm_ar = 2 ! model index: atm - integer(in),parameter :: c_inh_is = 3 ! model index: ice, northern - integer(in),parameter :: c_inh_ir = 4 ! model index: ice, northern - integer(in),parameter :: c_ish_is = 5 ! model index: ice, southern - integer(in),parameter :: c_ish_ir = 6 ! model index: ice, southern - integer(in),parameter :: c_lnd_ls = 7 ! model index: lnd - integer(in),parameter :: c_lnd_lr = 8 ! model index: lnd - integer(in),parameter :: c_ocn_os = 9 ! model index: ocn - integer(in),parameter :: c_ocn_or =10 ! model index: ocn - integer(in),parameter :: c_rof_rs =11 ! model index: rof - integer(in),parameter :: c_rof_rr =12 ! model index: rof - integer(in),parameter :: c_glc_gs =13 ! model index: glc - integer(in),parameter :: c_glc_gr =14 ! model index: glc - ! --- on atm grid --- - integer(in),parameter :: c_inh_as =15 ! model index: ice, northern - integer(in),parameter :: c_inh_ar =16 ! model index: ice, northern - integer(in),parameter :: c_ish_as =17 ! model index: ice, southern - integer(in),parameter :: c_ish_ar =18 ! model index: ice, southern - integer(in),parameter :: c_lnd_as =19 ! model index: lnd - integer(in),parameter :: c_lnd_ar =20 ! model index: lnd - integer(in),parameter :: c_ocn_as =21 ! model index: ocn - integer(in),parameter :: c_ocn_ar =22 ! model index: ocn - - character(len=8),parameter :: cname(c_size) = & - (/' c2a_atm',' a2c_atm',' c2i_inh',' i2c_inh',' c2i_ish',' i2c_ish', & - ' c2l_lnd',' l2c_lnd',' c2o_ocn',' o2c_ocn',' c2r_rof',' r2c_rof', & - ' c2g_glc',' g2c_glc', & - ' c2a_inh',' a2c_inh',' c2a_ish',' a2c_ish', & - ' c2a_lnd',' a2c_lnd',' c2a_ocn',' a2c_ocn' /) - - !--- F for field --- - - integer(in),parameter :: f_area = 1 ! area (wrt to unit sphere) - integer(in),parameter :: f_hfrz = 2 ! heat : latent, freezing - integer(in),parameter :: f_hmelt = 3 ! heat : latent, melting - integer(in),parameter :: f_hswnet = 4 ! heat : short wave, net - integer(in),parameter :: f_hlwdn = 5 ! heat : longwave down - integer(in),parameter :: f_hlwup = 6 ! heat : longwave up - integer(in),parameter :: f_hlatv = 7 ! heat : latent, vaporization - integer(in),parameter :: f_hlatf = 8 ! heat : latent, fusion, snow - integer(in),parameter :: f_hioff = 9 ! heat : latent, fusion, frozen runoff - integer(in),parameter :: f_hsen =10 ! heat : sensible - integer(in),parameter :: f_wfrz =11 ! water: freezing - integer(in),parameter :: f_wmelt =12 ! water: melting - integer(in),parameter :: f_wrain =13 ! water: precip, liquid - integer(in),parameter :: f_wsnow =14 ! water: precip, frozen - integer(in),parameter :: f_wevap =15 ! water: evaporation - integer(in),parameter :: f_wsalt =16 ! water: water equivalent of salt flux - integer(in),parameter :: f_wroff =17 ! water: runoff/flood - integer(in),parameter :: f_wioff =18 ! water: frozen runoff - integer(in),parameter :: f_wfrz_16O =19 ! water: freezing - integer(in),parameter :: f_wmelt_16O =20 ! water: melting - integer(in),parameter :: f_wrain_16O =21 ! water: precip, liquid - integer(in),parameter :: f_wsnow_16O =22 ! water: precip, frozen - integer(in),parameter :: f_wevap_16O =23 ! water: evaporation - integer(in),parameter :: f_wroff_16O =24 ! water: runoff/flood - integer(in),parameter :: f_wioff_16O =25 ! water: frozen runoff - integer(in),parameter :: f_wfrz_18O =26 ! water: freezing - integer(in),parameter :: f_wmelt_18O =27 ! water: melting - integer(in),parameter :: f_wrain_18O =28 ! water: precip, liquid - integer(in),parameter :: f_wsnow_18O =29 ! water: precip, frozen - integer(in),parameter :: f_wevap_18O =30 ! water: evaporation - integer(in),parameter :: f_wroff_18O =31 ! water: runoff/flood - integer(in),parameter :: f_wioff_18O =32 ! water: frozen runoff - integer(in),parameter :: f_wfrz_HDO =33 ! water: freezing - integer(in),parameter :: f_wmelt_HDO =34 ! water: melting - integer(in),parameter :: f_wrain_HDO =35 ! water: precip, liquid - integer(in),parameter :: f_wsnow_HDO =36 ! water: precip, frozen - integer(in),parameter :: f_wevap_HDO =37 ! water: evaporation - integer(in),parameter :: f_wroff_HDO =38 ! water: runoff/flood - integer(in),parameter :: f_wioff_HDO =39 ! water: frozen runoff - - integer(in),parameter :: f_size = f_wioff_HDO ! Total array size of all elements - integer(in),parameter :: f_a = f_area ! 1st index for area - integer(in),parameter :: f_a_end = f_area ! last index for area - integer(in),parameter :: f_h = f_hfrz ! 1st index for heat - integer(in),parameter :: f_h_end = f_hsen ! Last index for heat - integer(in),parameter :: f_w = f_wfrz ! 1st index for water - integer(in),parameter :: f_w_end = f_wioff ! Last index for water - integer(in),parameter :: f_16O = f_wfrz_16O ! 1st index for 16O water isotope - integer(in),parameter :: f_18O = f_wfrz_18O ! 1st index for 18O water isotope - integer(in),parameter :: f_HDO = f_wfrz_HDO ! 1st index for HDO water isotope - integer(in),parameter :: f_16O_end = f_wioff_16O ! Last index for 16O water isotope - integer(in),parameter :: f_18O_end = f_wioff_18O ! Last index for 18O water isotope - integer(in),parameter :: f_HDO_end = f_wioff_HDO ! Last index for HDO water isotope - - character(len=12),parameter :: fname(f_size) = & - - (/' area',' hfreeze',' hmelt',' hnetsw',' hlwdn', & - ' hlwup',' hlatvap',' hlatfus',' hiroff',' hsen', & - ' wfreeze',' wmelt',' wrain',' wsnow', & - ' wevap',' weqsaltf',' wrunoff',' wfrzrof', & - ' wfreeze_16O',' wmelt_16O',' wrain_16O',' wsnow_16O', & - ' wevap_16O',' wrunoff_16O',' wfrzrof_16O', & - ' wfreeze_18O',' wmelt_18O',' wrain_18O',' wsnow_18O', & - ' wevap_18O',' wrunoff_18O',' wfrzrof_18O', & - ' wfreeze_HDO',' wmelt_HDO',' wrain_HDO',' wsnow_HDO', & - ' wevap_HDO',' wrunoff_HDO',' wfrzrof_HDO'/) - - !--- P for period --- - - integer(in),parameter :: p_size = 5 - - integer(in),parameter :: p_inst = 1 - integer(in),parameter :: p_day = 2 - integer(in),parameter :: p_mon = 3 - integer(in),parameter :: p_ann = 4 - integer(in),parameter :: p_inf = 5 - - character(len=8),parameter :: pname(p_size) = & - (/' inst',' daily',' monthly',' annual','all_time' /) - - logical :: flds_wiso ! If water isotope fields are active - - ! !PUBLIC DATA MEMBERS - - !--- time-averaged (annual?) global budge diagnostics --- - !--- note: call sum0 then save budg_dataG and budg_ns on restart from/to root pe --- - real(r8),public :: budg_dataL(f_size,c_size,p_size) ! local sum, valid on all pes - real(r8),public :: budg_dataG(f_size,c_size,p_size) ! global sum, valid only on root pe - real(r8),public :: budg_ns (f_size,c_size,p_size) ! counter, valid only on root pe - - character(len=*),parameter :: afldname = 'aream' - character(len=*),parameter :: latname = 'lat' - character(len=*),parameter :: afracname = 'afrac' - character(len=*),parameter :: lfracname = 'lfrac' - character(len=*),parameter :: ofracname = 'ofrac' - character(len=*),parameter :: ifracname = 'ifrac' - - character(*),parameter :: modName = "(seq_diag_mct) " - - integer(in),parameter :: debug = 0 ! internal debug level - - ! !PRIVATE DATA MEMBERS - - integer :: index_a2x_Faxa_swnet - integer :: index_a2x_Faxa_lwdn - integer :: index_a2x_Faxa_rainc - integer :: index_a2x_Faxa_rainl - integer :: index_a2x_Faxa_snowc - integer :: index_a2x_Faxa_snowl - - integer :: index_x2a_Faxx_lwup - integer :: index_x2a_Faxx_lat - integer :: index_x2a_Faxx_sen - integer :: index_x2a_Faxx_evap - - integer :: index_l2x_Fall_swnet - integer :: index_l2x_Fall_lwup - integer :: index_l2x_Fall_lat - integer :: index_l2x_Fall_sen - integer :: index_l2x_Fall_evap - integer :: index_l2x_Flrl_rofsur - integer :: index_l2x_Flrl_rofgwl - integer :: index_l2x_Flrl_rofsub - integer :: index_l2x_Flrl_rofdto - integer :: index_l2x_Flrl_rofi - integer :: index_l2x_Flrl_irrig - - integer :: index_x2l_Faxa_lwdn - integer :: index_x2l_Faxa_rainc - integer :: index_x2l_Faxa_rainl - integer :: index_x2l_Faxa_snowc - integer :: index_x2l_Faxa_snowl - integer :: index_x2l_Flrr_flood - - integer :: index_r2x_Forr_rofl - integer :: index_r2x_Forr_rofi - integer :: index_r2x_Firr_rofi - integer :: index_r2x_Flrr_flood - - integer :: index_x2r_Flrl_rofsur - integer :: index_x2r_Flrl_rofgwl - integer :: index_x2r_Flrl_rofsub - integer :: index_x2r_Flrl_rofdto - integer :: index_x2r_Flrl_rofi - integer :: index_x2r_Flrl_irrig - - integer :: index_o2x_Fioo_frazil ! currently used by e3sm - integer :: index_o2x_Fioo_q ! currently used by cesm - - integer :: index_xao_Faox_lwup - integer :: index_xao_Faox_lat - integer :: index_xao_Faox_sen - integer :: index_xao_Faox_evap - - integer :: index_x2o_Foxx_lwup - integer :: index_x2o_Foxx_lat - integer :: index_x2o_Foxx_sen - integer :: index_x2o_Foxx_evap - integer :: index_x2o_Foxx_swnet - integer :: index_x2o_Foxx_rofl - integer :: index_x2o_Foxx_rofi - integer :: index_x2o_Faxa_lwdn - integer :: index_x2o_Faxa_rain - integer :: index_x2o_Faxa_snow - integer :: index_x2o_Fioi_melth - integer :: index_x2o_Fioi_meltw - integer :: index_x2o_Fioi_bergh - integer :: index_x2o_Fioi_bergw - integer :: index_x2o_Fioi_salt - - integer :: index_i2x_Fioi_melth - integer :: index_i2x_Fioi_meltw - integer :: index_i2x_Fioi_salt - integer :: index_i2x_Faii_swnet - integer :: index_i2x_Fioi_swpen - integer :: index_i2x_Faii_lwup - integer :: index_i2x_Faii_lat - integer :: index_i2x_Faii_sen - integer :: index_i2x_Faii_evap - - integer :: index_x2i_Faxa_lwdn - integer :: index_x2i_Faxa_rain - integer :: index_x2i_Faxa_snow - integer :: index_x2i_Fioo_frazil !currently used by e3sm - integer :: index_x2i_Fioo_q !currently used by cesm - integer :: index_x2i_Fixx_rofi - - integer :: index_g2x_Fogg_rofl - integer :: index_g2x_Fogg_rofi - integer :: index_g2x_Figg_rofi - - integer :: index_x2o_Foxx_rofl_16O - integer :: index_x2o_Foxx_rofi_16O - integer :: index_x2o_Foxx_rofl_18O - integer :: index_x2o_Foxx_rofi_18O - integer :: index_x2o_Foxx_rofl_HDO - integer :: index_x2o_Foxx_rofi_HDO - - integer :: index_a2x_Faxa_rainc_16O - integer :: index_a2x_Faxa_rainc_18O - integer :: index_a2x_Faxa_rainc_HDO - integer :: index_a2x_Faxa_rainl_16O - integer :: index_a2x_Faxa_rainl_18O - integer :: index_a2x_Faxa_rainl_HDO - integer :: index_a2x_Faxa_snowc_16O - integer :: index_a2x_Faxa_snowc_18O - integer :: index_a2x_Faxa_snowc_HDO - integer :: index_a2x_Faxa_snowl_16O - integer :: index_a2x_Faxa_snowl_18O - integer :: index_a2x_Faxa_snowl_HDO - - integer :: index_x2a_Faxx_evap_16O - integer :: index_x2a_Faxx_evap_18O - integer :: index_x2a_Faxx_evap_HDO - - integer :: index_l2x_Fall_evap_16O - integer :: index_l2x_Fall_evap_18O - integer :: index_l2x_Fall_evap_HDO - - integer :: index_l2x_Flrl_rofl_16O - integer :: index_l2x_Flrl_rofl_18O - integer :: index_l2x_Flrl_rofl_HDO - integer :: index_l2x_Flrl_rofi_16O - integer :: index_l2x_Flrl_rofi_18O - integer :: index_l2x_Flrl_rofi_HDO - - integer :: index_x2l_Faxa_rainc_16O - integer :: index_x2l_Faxa_rainc_18O - integer :: index_x2l_Faxa_rainc_HDO - integer :: index_x2l_Faxa_rainl_16O - integer :: index_x2l_Faxa_rainl_18O - integer :: index_x2l_Faxa_rainl_HDO - integer :: index_x2l_Faxa_snowc_16O - integer :: index_x2l_Faxa_snowc_18O - integer :: index_x2l_Faxa_snowc_HDO - integer :: index_x2l_Faxa_snowl_16O - integer :: index_x2l_Faxa_snowl_18O - integer :: index_x2l_Faxa_snowl_HDO - integer :: index_x2l_Flrr_flood_16O - integer :: index_x2l_Flrr_flood_18O - integer :: index_x2l_Flrr_flood_HDO - - integer :: index_r2x_Forr_rofl_16O - integer :: index_r2x_Forr_rofl_18O - integer :: index_r2x_Forr_rofl_HDO - integer :: index_r2x_Forr_rofi_16O - integer :: index_r2x_Forr_rofi_18O - integer :: index_r2x_Forr_rofi_HDO - integer :: index_r2x_Flrr_flood_16O - integer :: index_r2x_Flrr_flood_18O - integer :: index_r2x_Flrr_flood_HDO - - integer :: index_x2r_Flrl_rofl_16O - integer :: index_x2r_Flrl_rofl_18O - integer :: index_x2r_Flrl_rofl_HDO - integer :: index_x2r_Flrl_rofi_16O - integer :: index_x2r_Flrl_rofi_18O - integer :: index_x2r_Flrl_rofi_HDO - - integer :: index_xao_Faox_evap_16O - integer :: index_xao_Faox_evap_18O - integer :: index_xao_Faox_evap_HDO - - integer :: index_x2o_Fioi_meltw_16O - integer :: index_x2o_Fioi_meltw_18O - integer :: index_x2o_Fioi_meltw_HDO - integer :: index_x2o_Faxa_rain_16O - integer :: index_x2o_Faxa_rain_18O - integer :: index_x2o_Faxa_rain_HDO - integer :: index_x2o_Faxa_snow_16O - integer :: index_x2o_Faxa_snow_18O - integer :: index_x2o_Faxa_snow_HDO - - integer :: index_i2x_Fioi_meltw_16O - integer :: index_i2x_Fioi_meltw_18O - integer :: index_i2x_Fioi_meltw_HDO - integer :: index_i2x_Faii_evap_16O - integer :: index_i2x_Faii_evap_18O - integer :: index_i2x_Faii_evap_HDO - - integer :: index_x2i_Faxa_rain_16O - integer :: index_x2i_Faxa_rain_18O - integer :: index_x2i_Faxa_rain_HDO - integer :: index_x2i_Faxa_snow_16O - integer :: index_x2i_Faxa_snow_18O - integer :: index_x2i_Faxa_snow_HDO - - !=============================================================================== -contains - !=============================================================================== - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_zero_mct - zero out global budget diagnostic data. - ! - ! !DESCRIPTION: - ! Zero out global budget diagnostic data. - ! - ! !REVISION HISTORY: - ! 2008-jul-11 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_zero_mct(EClock,mode) - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(in),optional :: EClock - character(len=*), intent(in),optional :: mode - - !EOP - - integer(IN) :: ip,yr,mon,day,sec - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_zero_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - if (.not. present(EClock) .and. .not. present(mode)) then - call shr_sys_abort(subName//' ERROR EClock or mode should be present') - endif - - if (present(EClock)) then - call seq_timemgr_EClockGetData(EClock,curr_yr=yr, & - curr_mon=mon,curr_day=day,curr_tod=sec) - - do ip = 1,p_size - if (ip == p_inst) then - budg_dataL(:,:,ip) = 0.0_r8 - budg_dataG(:,:,ip) = 0.0_r8 - budg_ns(:,:,ip) = 0.0_r8 - endif - if (ip==p_day .and. sec==0) then - budg_dataL(:,:,ip) = 0.0_r8 - budg_dataG(:,:,ip) = 0.0_r8 - budg_ns(:,:,ip) = 0.0_r8 - endif - if (ip==p_mon .and. day==1 .and. sec==0) then - budg_dataL(:,:,ip) = 0.0_r8 - budg_dataG(:,:,ip) = 0.0_r8 - budg_ns(:,:,ip) = 0.0_r8 - endif - if (ip==p_ann .and. mon==1 .and. day==1 .and. sec==0) then - budg_dataL(:,:,ip) = 0.0_r8 - budg_dataG(:,:,ip) = 0.0_r8 - budg_ns(:,:,ip) = 0.0_r8 - endif - enddo - endif - - if (present(mode)) then - if (trim(mode) == 'inst') then - budg_dataL(:,:,p_inst) = 0.0_r8 - budg_dataG(:,:,p_inst) = 0.0_r8 - budg_ns(:,:,p_inst) = 0.0_r8 - elseif (trim(mode) == 'day') then - budg_dataL(:,:,p_day) = 0.0_r8 - budg_dataG(:,:,p_day) = 0.0_r8 - budg_ns(:,:,p_day) = 0.0_r8 - elseif (trim(mode) == 'mon') then - budg_dataL(:,:,p_mon) = 0.0_r8 - budg_dataG(:,:,p_mon) = 0.0_r8 - budg_ns(:,:,p_mon) = 0.0_r8 - elseif (trim(mode) == 'ann') then - budg_dataL(:,:,p_ann) = 0.0_r8 - budg_dataG(:,:,p_ann) = 0.0_r8 - budg_ns(:,:,p_ann) = 0.0_r8 - elseif (trim(mode) == 'inf') then - budg_dataL(:,:,p_inf) = 0.0_r8 - budg_dataG(:,:,p_inf) = 0.0_r8 - budg_ns(:,:,p_inf) = 0.0_r8 - elseif (trim(mode) == 'all') then - budg_dataL(:,:,:) = 0.0_r8 - budg_dataG(:,:,:) = 0.0_r8 - budg_ns(:,:,:) = 0.0_r8 - else - call shr_sys_abort(subname//' ERROR in mode '//trim(mode)) - endif - endif - - end subroutine seq_diag_zero_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_accum_mct - accum out global budget diagnostic data. - ! - ! !DESCRIPTION: - ! Accum out global budget diagnostic data. - ! - ! !REVISION HISTORY: - ! 2008-jul-11 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_accum_mct() - - ! !INPUT/OUTPUT PARAMETERS: - - !EOP - - integer(in) :: ip - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_accum_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - do ip = p_inst+1,p_size - budg_dataL(:,:,ip) = budg_dataL(:,:,ip) + budg_dataL(:,:,p_inst) - enddo - budg_ns(:,:,:) = budg_ns(:,:,:) + 1.0_r8 - - end subroutine seq_diag_accum_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_sum0_mct - sum local to global on root - ! - ! !DESCRIPTION: - ! Sum local values to global on root - ! - ! !REVISION HISTORY: - ! 2008-jul-19 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_sum0_mct() - - ! !INPUT/OUTPUT PARAMETERS: - - !EOP - - real(r8) :: budg_dataGtmp(f_size,c_size,p_size) ! temporary sum - integer(in) :: mpicom ! mpi comm - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_sum0_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(CPLID,mpicom=mpicom) - budg_dataGtmp = 0.0_r8 - call shr_mpi_sum(budg_dataL,budg_dataGtmp,mpicom,subName) - budg_dataG = budg_dataG + budg_dataGtmp - budg_dataL = 0.0_r8 - - end subroutine seq_diag_sum0_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_atm_mct - compute global atm input/output flux diagnostics - ! - ! !DESCRIPTION: - ! Compute global atm input/output flux diagnostics - ! - ! !REVISION HISTORY: - ! 2008-jul-10 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_atm_mct( atm, frac_a, infodata, do_a2x, do_x2a) - - ! !INPUT/OUTPUT PARAMETERS: - - type(component_type) , intent(in) :: atm ! component type for instance1 - type(mct_aVect) , intent(in) :: frac_a ! frac bundle - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in), optional :: do_a2x - logical , intent(in), optional :: do_x2a - - !EOP - - !----- local ----- - type(mct_aVect), pointer :: a2x_a ! model to drv bundle - type(mct_aVect), pointer :: x2a_a ! drv to model bundle - type(mct_ggrid), pointer :: dom_a - integer(in) :: k,n,ic,nf,ip ! generic index - integer(in) :: kArea ! index of area field in aVect - integer(in) :: kLat ! index of lat field in aVect - integer(in) :: kl,ka,ko,ki ! fraction indices - integer(in) :: lSize ! size of aVect - real(r8) :: ca_a ! area of a grid cell - logical,save :: first_time = .true. - logical,save :: flds_wiso_atm = .false. - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_atm_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - dom_a => component_get_dom_cx(atm) - a2x_a => component_get_c2x_cx(atm) - x2a_a => component_get_x2c_cx(atm) - - kArea = mct_aVect_indexRA(dom_a%data,afldname) - kLat = mct_aVect_indexRA(dom_a%data,latname) - ka = mct_aVect_indexRA(frac_a,afracname) - kl = mct_aVect_indexRA(frac_a,lfracname) - ko = mct_aVect_indexRA(frac_a,ofracname) - ki = mct_aVect_indexRA(frac_a,ifracname) - - !--------------------------------------------------------------------------- - ! add values found in this bundle to the budget table - !--------------------------------------------------------------------------- - - ip = p_inst - - if (present(do_a2x)) then - if (first_time) then - index_a2x_Faxa_swnet = mct_aVect_indexRA(a2x_a,'Faxa_swnet') - index_a2x_Faxa_lwdn = mct_aVect_indexRA(a2x_a,'Faxa_lwdn') - index_a2x_Faxa_rainc = mct_aVect_indexRA(a2x_a,'Faxa_rainc') - index_a2x_Faxa_rainl = mct_aVect_indexRA(a2x_a,'Faxa_rainl') - index_a2x_Faxa_snowc = mct_aVect_indexRA(a2x_a,'Faxa_snowc') - index_a2x_Faxa_snowl = mct_aVect_indexRA(a2x_a,'Faxa_snowl') - - index_a2x_Faxa_rainc_16O = mct_aVect_indexRA(a2x_a,'Faxa_rainc_16O',perrWith='quiet') - if ( index_a2x_Faxa_rainc_16O /= 0 ) flds_wiso_atm = .true. - if ( flds_wiso_atm )then - flds_wiso = .true. - index_a2x_Faxa_rainc_18O = mct_aVect_indexRA(a2x_a,'Faxa_rainc_18O') - index_a2x_Faxa_rainc_HDO = mct_aVect_indexRA(a2x_a,'Faxa_rainc_HDO') - index_a2x_Faxa_rainl_16O = mct_aVect_indexRA(a2x_a,'Faxa_rainl_16O') - index_a2x_Faxa_rainl_18O = mct_aVect_indexRA(a2x_a,'Faxa_rainl_18O') - index_a2x_Faxa_rainl_HDO = mct_aVect_indexRA(a2x_a,'Faxa_rainl_HDO') - index_a2x_Faxa_snowc_16O = mct_aVect_indexRA(a2x_a,'Faxa_snowc_16O') - index_a2x_Faxa_snowc_18O = mct_aVect_indexRA(a2x_a,'Faxa_snowc_18O') - index_a2x_Faxa_snowc_HDO = mct_aVect_indexRA(a2x_a,'Faxa_snowc_HDO') - index_a2x_Faxa_snowl_16O = mct_aVect_indexRA(a2x_a,'Faxa_snowl_16O') - index_a2x_Faxa_snowl_18O = mct_aVect_indexRA(a2x_a,'Faxa_snowl_18O') - index_a2x_Faxa_snowl_HDO = mct_aVect_indexRA(a2x_a,'Faxa_snowl_HDO') - end if - - end if - - lSize = mct_avect_lSize(a2x_a) - do n=1,lSize - do k=1,4 - - if (k == 1) then - ic = c_atm_ar - ca_a = -dom_a%data%rAttr(kArea,n) * frac_a%rAttr(ka,n) - elseif (k == 2) then - ic = c_lnd_ar - ca_a = dom_a%data%rAttr(kArea,n) * frac_a%rAttr(kl,n) - elseif (k == 3) then - ic = c_ocn_ar - ca_a = dom_a%data%rAttr(kArea,n) * frac_a%rAttr(ko,n) - elseif (k == 4) then - if (dom_a%data%rAttr(kLat,n) > 0.0_r8) then - ic = c_inh_ar - else - ic = c_ish_ar - endif - ca_a = dom_a%data%rAttr(kArea,n) * frac_a%rAttr(ki,n) - endif - - nf = f_area ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a - nf = f_hswnet; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a*a2x_a%rAttr(index_a2x_Faxa_swnet,n) - nf = f_hlwdn ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a*a2x_a%rAttr(index_a2x_Faxa_lwdn,n) - nf = f_wrain ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a*a2x_a%rAttr(index_a2x_Faxa_rainc,n) & - + ca_a*a2x_a%rAttr(index_a2x_Faxa_rainl,n) - nf = f_wsnow ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a*a2x_a%rAttr(index_a2x_Faxa_snowc,n) & - + ca_a*a2x_a%rAttr(index_a2x_Faxa_snowl,n) - if ( flds_wiso_atm )then - nf = f_wrain_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_rainc_16O,n) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_rainl_16O,n) - nf = f_wrain_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_rainc_18O,n) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_rainl_18O,n) - nf = f_wrain_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_rainc_HDO,n) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_rainl_HDO,n) - nf = f_wsnow_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_snowc_16O,n) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_snowl_16O,n) - nf = f_wsnow_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_snowc_18O,n) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_snowl_18O,n) - nf = f_wsnow_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_snowc_HDO,n) + & - ca_a*a2x_a%rAttr(index_a2x_Faxa_snowl_HDO,n) - end if - enddo - enddo - ! --- heat implied by snow flux --- - ic = c_atm_ar; budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - ic = c_lnd_ar; budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - ic = c_ocn_ar; budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - ic = c_inh_ar; budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - ic = c_ish_ar; budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - end if - - if (present(do_x2a)) then - if (first_time) then - index_x2a_Faxx_lwup = mct_aVect_indexRA(x2a_a,'Faxx_lwup') - index_x2a_Faxx_lat = mct_aVect_indexRA(x2a_a,'Faxx_lat') - index_x2a_Faxx_sen = mct_aVect_indexRA(x2a_a,'Faxx_sen') - index_x2a_Faxx_evap = mct_aVect_indexRA(x2a_a,'Faxx_evap') - - if ( flds_wiso_atm )then - index_x2a_Faxx_evap_16O = mct_aVect_indexRA(x2a_a,'Faxx_evap_16O') - index_x2a_Faxx_evap_18O = mct_aVect_indexRA(x2a_a,'Faxx_evap_18O') - index_x2a_Faxx_evap_HDO = mct_aVect_indexRA(x2a_a,'Faxx_evap_HDO') - end if - end if - - lSize = mct_avect_lSize(x2a_a) - do n=1,lSize - do k=1,4 - - if (k == 1) then - ic = c_atm_as - ca_a = -dom_a%data%rAttr(kArea,n) * frac_a%rAttr(ka,n) - elseif (k == 2) then - ic = c_lnd_as - ca_a = dom_a%data%rAttr(kArea,n) * frac_a%rAttr(kl,n) - elseif (k == 3) then - ic = c_ocn_as - ca_a = dom_a%data%rAttr(kArea,n) * frac_a%rAttr(ko,n) - elseif (k == 4) then - if (dom_a%data%rAttr(kLat,n) > 0.0_r8) then - ic = c_inh_as - else - ic = c_ish_as - endif - ca_a = dom_a%data%rAttr(kArea,n) * frac_a%rAttr(ki,n) - endif - - nf = f_area ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a - nf = f_hlwup; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a*x2a_a%rAttr(index_x2a_Faxx_lwup,n) - nf = f_hlatv; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a*x2a_a%rAttr(index_x2a_Faxx_lat,n) - nf = f_hsen ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a*x2a_a%rAttr(index_x2a_Faxx_sen,n) - nf = f_wevap; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_a*x2a_a%rAttr(index_x2a_Faxx_evap,n) - - if ( flds_wiso_atm )then - nf = f_wevap_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*x2a_a%rAttr(index_x2a_Faxx_evap_16O,n) - nf = f_wevap_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*x2a_a%rAttr(index_x2a_Faxx_evap_18O,n) - nf = f_wevap_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_a*x2a_a%rAttr(index_x2a_Faxx_evap_HDO,n) - end if - - enddo - enddo - end if - - first_time = .false. - - end subroutine seq_diag_atm_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_lnd_mct - compute global lnd input/output flux diagnostics - ! - ! !DESCRIPTION: - ! Compute global lnd input/output flux diagnostics - ! - ! !REVISION HISTORY: - ! 2008-jul-10 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_lnd_mct( lnd, frac_l, infodata, do_l2x, do_x2l) - - type(component_type) , intent(in) :: lnd ! component type for instance1 - type(mct_aVect) , intent(in) :: frac_l ! frac bundle - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in), optional :: do_l2x - logical , intent(in), optional :: do_x2l - - !EOP - - !----- local ----- - type(mct_aVect), pointer :: l2x_l ! model to drv bundle - type(mct_aVect), pointer :: x2l_l ! drv to model bundle - type(mct_ggrid), pointer :: dom_l - integer(in) :: n,ic,nf,ip ! generic index - integer(in) :: kArea ! index of area field in aVect - integer(in) :: kl ! fraction indices - integer(in) :: lSize ! size of aVect - real(r8) :: ca_l ! area of a grid cell - logical,save :: first_time = .true. - logical,save :: flds_wiso_lnd = .false. - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_lnd_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !--------------------------------------------------------------------------- - ! add values found in this bundle to the budget table - !--------------------------------------------------------------------------- - - dom_l => component_get_dom_cx(lnd) - l2x_l => component_get_c2x_cx(lnd) - x2l_l => component_get_x2c_cx(lnd) - - ip = p_inst - - kArea = mct_aVect_indexRA(dom_l%data,afldname) - kl = mct_aVect_indexRA(frac_l,lfracname) - - if (present(do_l2x)) then - if (first_time) then - index_l2x_Fall_swnet = mct_aVect_indexRA(l2x_l,'Fall_swnet') - index_l2x_Fall_lwup = mct_aVect_indexRA(l2x_l,'Fall_lwup') - index_l2x_Fall_lat = mct_aVect_indexRA(l2x_l,'Fall_lat') - index_l2x_Fall_sen = mct_aVect_indexRA(l2x_l,'Fall_sen') - index_l2x_Fall_evap = mct_aVect_indexRA(l2x_l,'Fall_evap') - index_l2x_Flrl_rofsur = mct_aVect_indexRA(l2x_l,'Flrl_rofsur') - index_l2x_Flrl_rofgwl = mct_aVect_indexRA(l2x_l,'Flrl_rofgwl') - index_l2x_Flrl_rofsub = mct_aVect_indexRA(l2x_l,'Flrl_rofsub') - index_l2x_Flrl_rofdto = mct_aVect_indexRA(l2x_l,'Flrl_rofdto') - index_l2x_Flrl_rofi = mct_aVect_indexRA(l2x_l,'Flrl_rofi') - index_l2x_Flrl_irrig = mct_aVect_indexRA(l2x_l,'Flrl_irrig', perrWith='quiet') - - index_l2x_Fall_evap_16O = mct_aVect_indexRA(l2x_l,'Fall_evap_16O',perrWith='quiet') - if ( index_l2x_Fall_evap_16O /= 0 ) flds_wiso_lnd = .true. - if ( flds_wiso_lnd )then - flds_wiso = .true. - index_l2x_Fall_evap_18O = mct_aVect_indexRA(l2x_l,'Fall_evap_18O') - index_l2x_Fall_evap_HDO = mct_aVect_indexRA(l2x_l,'Fall_evap_HDO') - index_l2x_Flrl_rofl_16O = mct_aVect_indexRA(l2x_l,'Flrl_rofl_16O') - index_l2x_Flrl_rofl_18O = mct_aVect_indexRA(l2x_l,'Flrl_rofl_18O') - index_l2x_Flrl_rofl_HDO = mct_aVect_indexRA(l2x_l,'Flrl_rofl_HDO') - index_l2x_Flrl_rofi_16O = mct_aVect_indexRA(l2x_l,'Flrl_rofi_16O') - index_l2x_Flrl_rofi_18O = mct_aVect_indexRA(l2x_l,'Flrl_rofi_18O') - index_l2x_Flrl_rofi_HDO = mct_aVect_indexRA(l2x_l,'Flrl_rofi_HDO') - end if - end if - - lSize = mct_avect_lSize(l2x_l) - ic = c_lnd_lr - do n=1,lSize - ca_l = dom_l%data%rAttr(kArea,n) * frac_l%rAttr(kl,n) - nf = f_area ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l - nf = f_hswnet; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l*l2x_l%rAttr(index_l2x_Fall_swnet,n) - nf = f_hlwup ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l*l2x_l%rAttr(index_l2x_Fall_lwup,n) - nf = f_hlatv ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l*l2x_l%rAttr(index_l2x_Fall_lat,n) - nf = f_hsen ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l*l2x_l%rAttr(index_l2x_Fall_sen,n) - nf = f_wevap ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l*l2x_l%rAttr(index_l2x_Fall_evap,n) - nf = f_wroff ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofsur,n) & - - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofgwl,n) & - - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofsub,n) & - - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofdto,n) - if (index_l2x_Flrl_irrig /= 0) then - nf = f_wroff ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_l*l2x_l%rAttr(index_l2x_Flrl_irrig,n) - end if - nf = f_wioff ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofi,n) - - if ( flds_wiso_lnd )then - nf = f_wevap_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*l2x_l%rAttr(index_l2x_Fall_evap_16O,n) - nf = f_wevap_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*l2x_l%rAttr(index_l2x_Fall_evap_18O,n) - nf = f_wevap_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*l2x_l%rAttr(index_l2x_Fall_evap_HDO,n) - - nf = f_wroff_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofl_16O,n) - nf = f_wroff_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofl_18O,n) - nf = f_wroff_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofl_HDO,n) - - nf = f_wioff_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofi_16O,n) - nf = f_wioff_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofi_18O,n) - nf = f_wioff_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*l2x_l%rAttr(index_l2x_Flrl_rofi_HDO,n) - end if - end do - budg_dataL(f_hioff,ic,ip) = -budg_dataL(f_wioff,ic,ip)*shr_const_latice - end if - - if (present(do_x2l)) then - if (first_time) then - index_x2l_Faxa_lwdn = mct_aVect_indexRA(x2l_l,'Faxa_lwdn') - index_x2l_Faxa_rainc = mct_aVect_indexRA(x2l_l,'Faxa_rainc') - index_x2l_Faxa_rainl = mct_aVect_indexRA(x2l_l,'Faxa_rainl') - index_x2l_Faxa_snowc = mct_aVect_indexRA(x2l_l,'Faxa_snowc') - index_x2l_Faxa_snowl = mct_aVect_indexRA(x2l_l,'Faxa_snowl') - index_x2l_Flrr_flood = mct_aVect_indexRA(x2l_l,'Flrr_flood') - - if ( flds_wiso_lnd )then - index_x2l_Faxa_rainc_16O = mct_aVect_indexRA(x2l_l,'Faxa_rainc_16O') - index_x2l_Faxa_rainc_18O = mct_aVect_indexRA(x2l_l,'Faxa_rainc_18O') - index_x2l_Faxa_rainc_HDO = mct_aVect_indexRA(x2l_l,'Faxa_rainc_HDO') - index_x2l_Faxa_rainl_16O = mct_aVect_indexRA(x2l_l,'Faxa_rainl_16O') - index_x2l_Faxa_rainl_18O = mct_aVect_indexRA(x2l_l,'Faxa_rainl_18O') - index_x2l_Faxa_rainl_HDO = mct_aVect_indexRA(x2l_l,'Faxa_rainl_HDO') - index_x2l_Faxa_snowc_16O = mct_aVect_indexRA(x2l_l,'Faxa_snowc_16O') - index_x2l_Faxa_snowc_18O = mct_aVect_indexRA(x2l_l,'Faxa_snowc_18O') - index_x2l_Faxa_snowc_HDO = mct_aVect_indexRA(x2l_l,'Faxa_snowc_HDO') - index_x2l_Faxa_snowl_16O = mct_aVect_indexRA(x2l_l,'Faxa_snowl_16O') - index_x2l_Faxa_snowl_18O = mct_aVect_indexRA(x2l_l,'Faxa_snowl_18O') - index_x2l_Faxa_snowl_HDO = mct_aVect_indexRA(x2l_l,'Faxa_snowl_HDO') - index_x2l_Flrr_flood_16O = mct_aVect_indexRA(x2l_l,'Flrr_flood_16O') - index_x2l_Flrr_flood_18O = mct_aVect_indexRA(x2l_l,'Flrr_flood_18O') - index_x2l_Flrr_flood_HDO = mct_aVect_indexRA(x2l_l,'Flrr_flood_HDO') - end if - end if - - lSize = mct_avect_lSize(x2l_l) - ic = c_lnd_ls - do n=1,lSize - ca_l = dom_l%data%rAttr(kArea,n) * frac_l%rAttr(kl,n) - nf = f_area ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l - nf = f_hlwdn; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l*x2l_l%rAttr(index_x2l_Faxa_lwdn,n) - nf = f_wrain; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l*x2l_l%rAttr(index_x2l_Faxa_rainc,n) & - + ca_l*x2l_l%rAttr(index_x2l_Faxa_rainl,n) - nf = f_wsnow; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_l*x2l_l%rAttr(index_x2l_Faxa_snowc,n) & - + ca_l*x2l_l%rAttr(index_x2l_Faxa_snowl,n) - nf = f_wroff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_l*x2l_l%rAttr(index_x2l_Flrr_flood,n) - - if ( flds_wiso_lnd )then - nf = f_wrain_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_rainc_16O,n) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_rainl_16O,n) - nf = f_wrain_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_rainc_18O,n) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_rainl_18O,n) - nf = f_wrain_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_rainc_HDO,n) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_rainl_HDO,n) - - nf = f_wsnow_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_snowc_16O,n) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_snowl_16O,n) - nf = f_wsnow_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_snowc_18O,n) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_snowl_18O,n) - nf = f_wsnow_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_snowc_HDO,n) + & - ca_l*x2l_l%rAttr(index_x2l_Faxa_snowl_HDO,n) - - nf = f_wroff_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*x2l_l%rAttr(index_x2l_Flrr_flood_16O,n) - nf = f_wroff_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*x2l_l%rAttr(index_x2l_Flrr_flood_18O,n) - nf = f_wroff_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_l*x2l_l%rAttr(index_x2l_Flrr_flood_HDO,n) - end if - end do - budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - end if - - first_time = .false. - - end subroutine seq_diag_lnd_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_rof_mct - compute global rof input/output flux diagnostics - ! - ! !DESCRIPTION: - ! Compute global rof input/output flux diagnostics - ! - ! !REVISION HISTORY: - ! 2008-jul-10 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_rof_mct( rof, frac_r, infodata) - - type(component_type) , intent(in) :: rof ! component type for instance1 - type(mct_aVect) , intent(in) :: frac_r ! frac bundle - type(seq_infodata_type) , intent(in) :: infodata - - !EOP - - !----- local ----- - type(mct_aVect), pointer :: r2x_r - type(mct_aVect), pointer :: x2r_r - type(mct_ggrid), pointer :: dom_r - integer(in) :: n,ic,nf,ip ! generic index - integer(in) :: kArea ! index of area field in aVect - integer(in) :: lSize ! size of aVect - real(r8) :: ca_r ! area of a grid cell - logical,save :: first_time = .true. - logical,save :: flds_wiso_rof = .false. - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_rof_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !--------------------------------------------------------------------------- - ! add values found in this bundle to the budget table - !--------------------------------------------------------------------------- - - dom_r => component_get_dom_cx(rof) - r2x_r => component_get_c2x_cx(rof) - x2r_r => component_get_x2c_cx(rof) - - if (first_time) then - index_x2r_Flrl_rofsur = mct_aVect_indexRA(x2r_r,'Flrl_rofsur') - index_x2r_Flrl_rofgwl = mct_aVect_indexRA(x2r_r,'Flrl_rofgwl') - index_x2r_Flrl_rofsub = mct_aVect_indexRA(x2r_r,'Flrl_rofsub') - index_x2r_Flrl_rofdto = mct_aVect_indexRA(x2r_r,'Flrl_rofdto') - index_x2r_Flrl_irrig = mct_aVect_indexRA(x2r_r,'Flrl_irrig', perrWith='quiet') - index_x2r_Flrl_rofi = mct_aVect_indexRA(x2r_r,'Flrl_rofi') - - index_x2r_Flrl_rofl_16O = mct_aVect_indexRA(x2r_r,'Flrl_rofl_16O', perrWith='quiet') - if ( index_x2r_Flrl_rofl_16O /= 0 ) flds_wiso_rof = .true. - if ( flds_wiso_rof )then - flds_wiso = .true. - index_x2r_Flrl_rofl_18O = mct_aVect_indexRA(x2r_r,'Flrl_rofl_18O') - index_x2r_Flrl_rofl_HDO = mct_aVect_indexRA(x2r_r,'Flrl_rofl_HDO') - index_x2r_Flrl_rofi_16O = mct_aVect_indexRA(x2r_r,'Flrl_rofi_16O') - index_x2r_Flrl_rofi_18O = mct_aVect_indexRA(x2r_r,'Flrl_rofi_18O') - index_x2r_Flrl_rofi_HDO = mct_aVect_indexRA(x2r_r,'Flrl_rofi_HDO') - end if - end if - - ip = p_inst - ic = c_rof_rr - kArea = mct_aVect_indexRA(dom_r%data,afldname) - lSize = mct_avect_lSize(x2r_r) - do n=1,lSize - ca_r = dom_r%data%rAttr(kArea,n) - nf = f_wroff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_r*x2r_r%rAttr(index_x2r_Flrl_rofsur,n) & - + ca_r*x2r_r%rAttr(index_x2r_Flrl_rofgwl,n) & - + ca_r*x2r_r%rAttr(index_x2r_Flrl_rofsub,n) & - + ca_r*x2r_r%rAttr(index_x2r_Flrl_rofdto,n) - if (index_x2r_Flrl_irrig /= 0) then - nf = f_wroff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_r*x2r_r%rAttr(index_x2r_Flrl_irrig,n) - end if - - nf = f_wioff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_r*x2r_r%rAttr(index_x2r_Flrl_rofi,n) - - if ( flds_wiso_rof )then - nf = f_wroff_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*x2r_r%rAttr(index_x2r_Flrl_rofl_16O,n) - nf = f_wroff_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*x2r_r%rAttr(index_x2r_Flrl_rofl_18O,n) - nf = f_wroff_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*x2r_r%rAttr(index_x2r_Flrl_rofl_HDO,n) - - nf = f_wioff_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*x2r_r%rAttr(index_x2r_Flrl_rofi_16O,n) - nf = f_wioff_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*x2r_r%rAttr(index_x2r_Flrl_rofi_18O,n) - nf = f_wioff_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*x2r_r%rAttr(index_x2r_Flrl_rofi_HDO,n) - end if - end do - budg_dataL(f_hioff,ic,ip) = -budg_dataL(f_wioff,ic,ip)*shr_const_latice - - if (first_time) then - index_r2x_Forr_rofl = mct_aVect_indexRA(r2x_r,'Forr_rofl') - index_r2x_Forr_rofi = mct_aVect_indexRA(r2x_r,'Forr_rofi') - index_r2x_Firr_rofi = mct_aVect_indexRA(r2x_r,'Firr_rofi') - index_r2x_Flrr_flood = mct_aVect_indexRA(r2x_r,'Flrr_flood') - - if ( flds_wiso_rof )then - index_r2x_Forr_rofl_16O = mct_aVect_indexRA(r2x_r,'Forr_rofl_16O') - index_r2x_Forr_rofl_18O = mct_aVect_indexRA(r2x_r,'Forr_rofl_18O') - index_r2x_Forr_rofl_HDO = mct_aVect_indexRA(r2x_r,'Forr_rofl_HDO') - index_r2x_Forr_rofi_16O = mct_aVect_indexRA(r2x_r,'Forr_rofi_16O') - index_r2x_Forr_rofi_18O = mct_aVect_indexRA(r2x_r,'Forr_rofi_18O') - index_r2x_Forr_rofi_HDO = mct_aVect_indexRA(r2x_r,'Forr_rofi_HDO') - index_r2x_Flrr_flood_16O = mct_aVect_indexRA(r2x_r,'Flrr_flood_16O') - index_r2x_Flrr_flood_18O = mct_aVect_indexRA(r2x_r,'Flrr_flood_18O') - index_r2x_Flrr_flood_HDO = mct_aVect_indexRA(r2x_r,'Flrr_flood_HDO') - end if - end if - - ip = p_inst - ic = c_rof_rs - kArea = mct_aVect_indexRA(dom_r%data,afldname) - lSize = mct_avect_lSize(r2x_r) - do n=1,lSize - ca_r = dom_r%data%rAttr(kArea,n) - nf = f_wroff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_r*r2x_r%rAttr(index_r2x_Forr_rofl,n) & - + ca_r*r2x_r%rAttr(index_r2x_Flrr_flood,n) - nf = f_wioff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_r*r2x_r%rAttr(index_r2x_Forr_rofi,n) & - - ca_r*r2x_r%rAttr(index_r2x_Firr_rofi,n) - - if ( flds_wiso_rof )then - nf = f_wroff_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_r*r2x_r%rAttr(index_r2x_Forr_rofl_16O,n) - nf = f_wroff_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_r*r2x_r%rAttr(index_r2x_Forr_rofl_18O,n) - nf = f_wroff_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_r*r2x_r%rAttr(index_r2x_Forr_rofl_HDO,n) - - nf = f_wioff_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_r*r2x_r%rAttr(index_r2x_Forr_rofi_16O,n) - nf = f_wioff_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_r*r2x_r%rAttr(index_r2x_Forr_rofi_18O,n) - nf = f_wioff_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_r*r2x_r%rAttr(index_r2x_Forr_rofi_HDO,n) - - nf = f_wroff_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*r2x_r%rAttr(index_r2x_Flrr_flood_16O,n) - nf = f_wroff_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*r2x_r%rAttr(index_r2x_Flrr_flood_18O,n) - nf = f_wroff_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_r*r2x_r%rAttr(index_r2x_Flrr_flood_HDO,n) - end if - end do - budg_dataL(f_hioff,ic,ip) = -budg_dataL(f_wioff,ic,ip)*shr_const_latice - - first_time = .false. - - end subroutine seq_diag_rof_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_glc_mct - compute global glc input/output flux diagnostics - ! - ! !DESCRIPTION: - ! Compute global glc input/output flux diagnostics - ! - ! !REVISION HISTORY: - ! 2008-jul-10 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_glc_mct( glc, frac_g, infodata) - - type(component_type) , intent(in) :: glc ! component type for instance1 - type(mct_aVect) , intent(in) :: frac_g ! frac bundle - type(seq_infodata_type) , intent(in) :: infodata - - !EOP - - !----- local ----- - type(mct_aVect), pointer :: g2x_g - type(mct_aVect), pointer :: x2g_g - type(mct_ggrid), pointer :: dom_g - integer(in) :: n,ic,nf,ip ! generic index - integer(in) :: kArea ! index of area field in aVect - integer(in) :: lSize ! size of aVect - real(r8) :: ca_g ! area of a grid cell - logical,save :: first_time = .true. - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_glc_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !--------------------------------------------------------------------------- - ! add values found in this bundle to the budget table - !--------------------------------------------------------------------------- - - dom_g => component_get_dom_cx(glc) - g2x_g => component_get_c2x_cx(glc) - x2g_g => component_get_x2c_cx(glc) - - if (first_time) then - index_g2x_Fogg_rofl = mct_aVect_indexRA(g2x_g,'Fogg_rofl') - index_g2x_Fogg_rofi = mct_aVect_indexRA(g2x_g,'Fogg_rofi') - index_g2x_Figg_rofi = mct_aVect_indexRA(g2x_g,'Figg_rofi') - end if - - ip = p_inst - ic = c_glc_gs - kArea = mct_aVect_indexRA(dom_g%data,afldname) - lSize = mct_avect_lSize(g2x_g) - do n=1,lSize - ca_g = dom_g%data%rAttr(kArea,n) - nf = f_wroff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_g*g2x_g%rAttr(index_g2x_Fogg_rofl,n) - nf = f_wioff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_g*g2x_g%rAttr(index_g2x_Fogg_rofi,n) & - - ca_g*g2x_g%rAttr(index_g2x_Figg_rofi,n) - end do - budg_dataL(f_hioff,ic,ip) = -budg_dataL(f_wioff,ic,ip)*shr_const_latice - - first_time = .false. - - end subroutine seq_diag_glc_mct - - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_ocn_mct - compute global ocn input/output flux diagnostics - ! - ! !DESCRIPTION: - ! Compute global ocn input/output flux diagnostics - ! - ! !REVISION HISTORY: - ! 2008-jul-10 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_ocn_mct( ocn, xao_o, frac_o, infodata, do_o2x, do_x2o, do_xao) - - type(component_type) , intent(in) :: ocn ! component type for instance1 - type(mct_aVect) , intent(in) :: frac_o ! frac bundle - type(mct_aVect) , intent(in) :: xao_o - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in),optional :: do_o2x - logical , intent(in),optional :: do_x2o - logical , intent(in),optional :: do_xao - - !EOP - - !----- local ----- - type(mct_aVect), pointer :: o2x_o ! model to drv bundle - type(mct_aVect), pointer :: x2o_o ! drv to model bundle - type(mct_ggrid), pointer :: dom_o - integer(in) :: n,nf,ic,ip ! generic index - integer(in) :: kArea ! index of area field in aVect - integer(in) :: ko,ki ! fraction indices - integer(in) :: lSize ! size of aVect - real(r8) :: ca_i,ca_o ! area of a grid cell - logical,save :: first_time = .true. - logical,save :: flds_wiso_ocn = .false. - character(len=cs) :: cime_model - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_ocn_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - if (.not. present(do_o2x) .and. & - .not. present(do_x2o) .and. & - .not. present(do_xao)) then - call shr_sys_abort(subName//"ERROR: must input a bundle") - end if - - !--------------------------------------------------------------------------- - ! add values found in this bundle to the budget table - !--------------------------------------------------------------------------- - - dom_o => component_get_dom_cx(ocn) - o2x_o => component_get_c2x_cx(ocn) - x2o_o => component_get_x2c_cx(ocn) - - ip = p_inst - - kArea = mct_aVect_indexRA(dom_o%data,afldname) - ko = mct_aVect_indexRA(frac_o,ofracname) - ki = mct_aVect_indexRA(frac_o,ifracname) - - call seq_infodata_GetData(infodata, cime_model=cime_model) - - if (present(do_o2x)) then - if (first_time) then - if (trim(cime_model) == 'e3sm') then - index_o2x_Fioo_frazil = mct_aVect_indexRA(o2x_o,'Fioo_frazil') - else if (trim(cime_model) == 'cesm') then - index_o2x_Fioo_q = mct_aVect_indexRA(o2x_o,'Fioo_q') - end if - end if - - lSize = mct_avect_lSize(o2x_o) - ic = c_ocn_or - do n=1,lSize - ca_o = dom_o%data%rAttr(kArea,n) * frac_o%rAttr(ko,n) - ca_i = dom_o%data%rAttr(kArea,n) * frac_o%rAttr(ki,n) - nf = f_area; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_o - if (trim(cime_model) == 'e3sm') then - nf = f_wfrz; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - (ca_o+ca_i)*max(0.0_r8,o2x_o%rAttr(index_o2x_Fioo_frazil,n)) - else if (trim(cime_model) == 'cesm') then - nf = f_hfrz; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*max(0.0_r8,o2x_o%rAttr(index_o2x_Fioo_q,n)) - end if - end do - if (trim(cime_model) == 'e3sm') then - budg_dataL(f_hfrz,ic,ip) = -budg_dataL(f_wfrz,ic,ip) * shr_const_latice - else if (trim(cime_model) == 'cesm') then - budg_dataL(f_wfrz,ic,ip) = budg_dataL(f_hfrz,ic,ip) * HFLXtoWFLX - end if - end if - - if (present(do_xao)) then - if (first_time) then - index_xao_Faox_lwup = mct_aVect_indexRA(xao_o,'Faox_lwup') - index_xao_Faox_lat = mct_aVect_indexRA(xao_o,'Faox_lat') - index_xao_Faox_sen = mct_aVect_indexRA(xao_o,'Faox_sen') - index_xao_Faox_evap = mct_aVect_indexRA(xao_o,'Faox_evap') - - index_xao_Faox_evap_16O = mct_aVect_indexRA(xao_o,'Faox_evap_16O',perrWith='quiet') - if ( index_xao_Faox_evap_16O /= 0 ) flds_wiso_ocn = .true. - if ( flds_wiso_ocn )then - flds_wiso = .true. - index_xao_Faox_evap_18O = mct_aVect_indexRA(xao_o,'Faox_evap_18O') - index_xao_Faox_evap_HDO = mct_aVect_indexRA(xao_o,'Faox_evap_HDO') - end if - end if - - lSize = mct_avect_lSize(xao_o) - ic = c_ocn_or - do n=1,lSize - ca_o = dom_o%data%rAttr(kArea,n) * frac_o%rAttr(ko,n) - nf = f_hlwup; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_o*xao_o%rAttr(index_xao_Faox_lwup,n) - nf = f_hlatv; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_o*xao_o%rAttr(index_xao_Faox_lat,n) - nf = f_hsen ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_o*xao_o%rAttr(index_xao_Faox_sen,n) - nf = f_wevap; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_o*xao_o%rAttr(index_xao_Faox_evap,n) - - if ( flds_wiso_ocn )then - nf = f_wevap_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_o*xao_o%rAttr(index_xao_Faox_evap_16O,n) - nf = f_wevap_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_o*xao_o%rAttr(index_xao_Faox_evap_18O,n) - nf = f_wevap_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_o*xao_o%rAttr(index_xao_Faox_evap_HDO,n) - end if - - end do - end if - - if (present(do_x2o)) then - if (first_time) then - index_x2o_Fioi_melth = mct_aVect_indexRA(x2o_o,'Fioi_melth') - index_x2o_Fioi_meltw = mct_aVect_indexRA(x2o_o,'Fioi_meltw') - index_x2o_Fioi_bergh = mct_aVect_indexRA(x2o_o,'PFioi_bergh', perrWith='quiet') - index_x2o_Fioi_bergw = mct_aVect_indexRA(x2o_o,'PFioi_bergw', perrWith='quiet') - index_x2o_Fioi_salt = mct_aVect_indexRA(x2o_o,'Fioi_salt') - index_x2o_Foxx_swnet = mct_aVect_indexRA(x2o_o,'Foxx_swnet') - index_x2o_Faxa_lwdn = mct_aVect_indexRA(x2o_o,'Faxa_lwdn') - index_x2o_Faxa_rain = mct_aVect_indexRA(x2o_o,'Faxa_rain') - index_x2o_Faxa_snow = mct_aVect_indexRA(x2o_o,'Faxa_snow') - index_x2o_Foxx_lwup = mct_aVect_indexRA(x2o_o,'Foxx_lwup') - index_x2o_Foxx_lat = mct_aVect_indexRA(x2o_o,'Foxx_lat') - index_x2o_Foxx_sen = mct_aVect_indexRA(x2o_o,'Foxx_sen') - index_x2o_Foxx_evap = mct_aVect_indexRA(x2o_o,'Foxx_evap') - index_x2o_Foxx_rofl = mct_aVect_indexRA(x2o_o,'Foxx_rofl') - index_x2o_Foxx_rofi = mct_aVect_indexRA(x2o_o,'Foxx_rofi') - - if ( flds_wiso_ocn )then - index_x2o_Fioi_meltw_16O = mct_aVect_indexRA(x2o_o,'Fioi_meltw_16O') - index_x2o_Fioi_meltw_18O = mct_aVect_indexRA(x2o_o,'Fioi_meltw_18O') - index_x2o_Fioi_meltw_HDO = mct_aVect_indexRA(x2o_o,'Fioi_meltw_HDO') - index_x2o_Faxa_rain_16O = mct_aVect_indexRA(x2o_o,'Faxa_rain_16O') - index_x2o_Faxa_rain_18O = mct_aVect_indexRA(x2o_o,'Faxa_rain_18O') - index_x2o_Faxa_rain_HDO = mct_aVect_indexRA(x2o_o,'Faxa_rain_HDO') - index_x2o_Faxa_snow_16O = mct_aVect_indexRA(x2o_o,'Faxa_snow_16O') - index_x2o_Faxa_snow_18O = mct_aVect_indexRA(x2o_o,'Faxa_snow_18O') - index_x2o_Faxa_snow_HDO = mct_aVect_indexRA(x2o_o,'Faxa_snow_HDO') - - index_x2o_Foxx_rofl_16O = mct_aVect_indexRA(x2o_o,'Foxx_rofl_16O') - index_x2o_Foxx_rofi_16O = mct_aVect_indexRA(x2o_o,'Foxx_rofi_16O') - index_x2o_Foxx_rofl_18O = mct_aVect_indexRA(x2o_o,'Foxx_rofl_18O') - index_x2o_Foxx_rofi_18O = mct_aVect_indexRA(x2o_o,'Foxx_rofi_18O') - index_x2o_Foxx_rofl_HDO = mct_aVect_indexRA(x2o_o,'Foxx_rofl_HDO') - index_x2o_Foxx_rofi_HDO = mct_aVect_indexRA(x2o_o,'Foxx_rofi_HDO') - end if - end if - - if (.not. present(do_xao)) then - ! these are in x2o but they really are the atm/ocean flux - ! computed in the coupler and are "like" an o2x - lSize = mct_avect_lSize(x2o_o) - ic = c_ocn_or - do n=1,lSize - ca_o = dom_o%data%rAttr(kArea,n) * frac_o%rAttr(ko,n) - ca_i = dom_o%data%rAttr(kArea,n) * frac_o%rAttr(ki,n) - nf = f_hlwup; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_lwup,n) - nf = f_hlatv; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_lat,n) - nf = f_hsen ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_sen,n) - nf = f_wevap; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_evap,n) - end do - endif - - lSize = mct_avect_lSize(x2o_o) - ic = c_ocn_os - do n=1,lSize - ca_o = dom_o%data%rAttr(kArea,n) * frac_o%rAttr(ko,n) - ca_i = dom_o%data%rAttr(kArea,n) * frac_o%rAttr(ki,n) - nf = f_area ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_o - - if (index_x2o_Fioi_bergw == 0) then - nf = f_wmelt ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Fioi_meltw,n) - else - nf = f_wmelt ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*(x2o_o%rAttr(index_x2o_Fioi_meltw,n)+x2o_o%rAttr(index_x2o_Fioi_bergw,n)) - endif - - if (index_x2o_Fioi_bergh == 0) then - nf = f_hmelt ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Fioi_melth,n) - else - nf = f_hmelt ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*(x2o_o%rAttr(index_x2o_Fioi_melth,n)+x2o_o%rAttr(index_x2o_Fioi_bergh,n)) - endif - - if (trim(cime_model) == 'cesm') then - nf = f_wsalt ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Fioi_salt,n) * SFLXtoWFLX - endif - nf = f_hswnet; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_swnet,n) - nf = f_hlwdn ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_lwdn,n) - nf = f_wrain ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_rain,n) - nf = f_wsnow ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_snow,n) - nf = f_wroff ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_rofl,n) - nf = f_wioff ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_rofi,n) - - if ( flds_wiso_ocn )then - nf = f_wmelt_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Fioi_meltw_16O,n) - nf = f_wmelt_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Fioi_meltw_18O,n) - nf = f_wmelt_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Fioi_meltw_HDO,n) - - nf = f_wrain_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_rain_16O,n) - nf = f_wrain_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_rain_18O,n) - nf = f_wrain_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_rain_HDO,n) - - nf = f_wsnow_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_snow_16O,n) - nf = f_wsnow_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_snow_18O,n) - nf = f_wsnow_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Faxa_snow_HDO,n) - nf = f_wroff_16O ; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_rofl_16O,n) - nf = f_wioff_16O ; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_rofi_16O,n) - nf = f_wroff_18O ; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_rofl_18O,n) - nf = f_wioff_18O ; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_rofi_18O,n) - nf = f_wroff_HDO ; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_rofl_HDO,n) - nf = f_wioff_HDO ; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + (ca_o+ca_i)*x2o_o%rAttr(index_x2o_Foxx_rofi_HDO,n) - end if - end do - budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - budg_dataL(f_hioff,ic,ip) = -budg_dataL(f_wioff,ic,ip)*shr_const_latice - end if - - ! EBK -- isotope r2x_Forr_rofl/i? - - first_time = .false. - - end subroutine seq_diag_ocn_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_ice_mct - compute global ice input/output flux diagnostics - ! - ! !DESCRIPTION: - ! Compute global ice input/output flux diagnostics - ! - ! !REVISION HISTORY: - ! 2008-jul-10 - T. Craig - update - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_diag_ice_mct( ice, frac_i, infodata, do_i2x, do_x2i) - - type(component_type) , intent(in) :: ice ! component type for instance1 - type(mct_aVect) , intent(in) :: frac_i ! frac bundle - type(seq_infodata_type) , intent(in) :: infodata - logical , intent(in), optional :: do_i2x - logical , intent(in), optional :: do_x2i - - !EOP - - !----- local ----- - type(mct_aVect), pointer :: i2x_i ! model to drv bundle - type(mct_aVect), pointer :: x2i_i ! drv to model bundle - type(mct_ggrid), pointer :: dom_i - integer(in) :: n,ic,nf,ip ! generic index - integer(in) :: kArea ! index of area field in aVect - integer(in) :: kLat ! index of lat field in aVect - integer(in) :: ko,ki ! fraction indices - integer(in) :: lSize ! size of aVect - real(r8) :: ca_i,ca_o ! area of a grid cell - logical,save :: first_time = .true. - logical,save :: flds_wiso_ice = .false. - logical,save :: flds_wiso_ice_x2i = .false. - character(len=cs) :: cime_model - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_ice_mct) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_infodata_GetData(infodata, cime_model=cime_model) - - !--------------------------------------------------------------------------- - ! add values found in this bundle to the budget table - !--------------------------------------------------------------------------- - - dom_i => component_get_dom_cx(ice) - i2x_i => component_get_c2x_cx(ice) - x2i_i => component_get_x2c_cx(ice) - - ip = p_inst - - kArea = mct_aVect_indexRA(dom_i%data,afldname) - kLat = mct_aVect_indexRA(dom_i%data,latname) - ki = mct_aVect_indexRA(frac_i,ifracname) - ko = mct_aVect_indexRA(frac_i,ofracname) - - if (present(do_i2x)) then - index_i2x_Fioi_melth = mct_aVect_indexRA(i2x_i,'Fioi_melth') - index_i2x_Fioi_meltw = mct_aVect_indexRA(i2x_i,'Fioi_meltw') - index_i2x_Fioi_swpen = mct_aVect_indexRA(i2x_i,'Fioi_swpen') - index_i2x_Faii_swnet = mct_aVect_indexRA(i2x_i,'Faii_swnet') - index_i2x_Faii_lwup = mct_aVect_indexRA(i2x_i,'Faii_lwup') - index_i2x_Faii_lat = mct_aVect_indexRA(i2x_i,'Faii_lat') - index_i2x_Faii_sen = mct_aVect_indexRA(i2x_i,'Faii_sen') - index_i2x_Faii_evap = mct_aVect_indexRA(i2x_i,'Faii_evap') - index_i2x_Fioi_salt = mct_aVect_indexRA(i2x_i,'Fioi_salt') - - index_i2x_Fioi_meltw_16O = mct_aVect_indexRA(i2x_i,'Fioi_meltw_16O',perrWith='quiet') - if ( index_i2x_Fioi_meltw_16O /= 0 ) flds_wiso_ice = .true. - if ( flds_wiso_ice )then - flds_wiso = .true. - index_i2x_Fioi_meltw_18O = mct_aVect_indexRA(i2x_i,'Fioi_meltw_18O') - index_i2x_Fioi_meltw_HDO = mct_aVect_indexRA(i2x_i,'Fioi_meltw_HDO') - index_i2x_Faii_evap_16O = mct_aVect_indexRA(i2x_i,'Faii_evap_16O') - index_i2x_Faii_evap_18O = mct_aVect_indexRA(i2x_i,'Faii_evap_18O') - index_i2x_Faii_evap_HDO = mct_aVect_indexRA(i2x_i,'Faii_evap_HDO') - end if - - lSize = mct_avect_lSize(i2x_i) - do n=1,lSize - if (dom_i%data%rAttr(kLat,n) > 0.0_r8) then - ic = c_inh_ir - else - ic = c_ish_ir - endif - ca_o = dom_i%data%rAttr(kArea,n) * frac_i%rAttr(ko,n) - ca_i = dom_i%data%rAttr(kArea,n) * frac_i%rAttr(ki,n) - nf = f_area ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i - nf = f_hmelt ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_i*i2x_i%rAttr(index_i2x_Fioi_melth,n) - nf = f_wmelt ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_i*i2x_i%rAttr(index_i2x_Fioi_meltw,n) - if (trim(cime_model) == 'cesm') then - nf = f_wsalt ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - ca_i*i2x_i%rAttr(index_i2x_Fioi_salt,n) * SFLXtoWFLX - endif - nf = f_hswnet; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*i2x_i%rAttr(index_i2x_Faii_swnet,n) & - - ca_i*i2x_i%rAttr(index_i2x_Fioi_swpen,n) - nf = f_hlwup ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*i2x_i%rAttr(index_i2x_Faii_lwup,n) - nf = f_hlatv ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*i2x_i%rAttr(index_i2x_Faii_lat,n) - nf = f_hsen ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*i2x_i%rAttr(index_i2x_Faii_sen,n) - nf = f_wevap ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*i2x_i%rAttr(index_i2x_Faii_evap,n) - - if ( flds_wiso_ice )then - nf = f_wmelt_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_i*i2x_i%rAttr(index_i2x_Fioi_meltw_16O,n) - nf = f_wmelt_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_i*i2x_i%rAttr(index_i2x_Fioi_meltw_18O,n) - nf = f_wmelt_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - ca_i*i2x_i%rAttr(index_i2x_Fioi_meltw_HDO,n) - - nf = f_wevap_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*i2x_i%rAttr(index_i2x_Faii_evap_16O,n) - nf = f_wevap_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*i2x_i%rAttr(index_i2x_Faii_evap_18O,n) - nf = f_wevap_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*i2x_i%rAttr(index_i2x_Faii_evap_HDO,n) - end if - end do - end if - - if (present(do_x2i)) then - if (first_time) then - index_x2i_Faxa_lwdn = mct_aVect_indexRA(x2i_i,'Faxa_lwdn') - index_x2i_Faxa_rain = mct_aVect_indexRA(x2i_i,'Faxa_rain') - index_x2i_Faxa_snow = mct_aVect_indexRA(x2i_i,'Faxa_snow') - if (trim(cime_model) == 'e3sm') then - index_x2i_Fioo_frazil = mct_aVect_indexRA(x2i_i,'Fioo_frazil') - else if (trim(cime_model) == 'cesm') then - index_x2i_Fioo_q = mct_aVect_indexRA(x2i_i,'Fioo_q') - end if - index_x2i_Fixx_rofi = mct_aVect_indexRA(x2i_i,'Fixx_rofi') - - index_x2i_Faxa_rain_16O = mct_aVect_indexRA(x2i_i,'Faxa_rain_16O', perrWith='quiet') - if ( index_x2i_Faxa_rain_16O /= 0 ) flds_wiso_ice_x2i = .true. - if ( flds_wiso_ice_x2i )then - flds_wiso = .true. - index_x2i_Faxa_rain_18O = mct_aVect_indexRA(x2i_i,'Faxa_rain_18O') - index_x2i_Faxa_rain_HDO = mct_aVect_indexRA(x2i_i,'Faxa_rain_HDO') - index_x2i_Faxa_snow_16O = mct_aVect_indexRA(x2i_i,'Faxa_snow_16O') - index_x2i_Faxa_snow_18O = mct_aVect_indexRA(x2i_i,'Faxa_snow_18O') - index_x2i_Faxa_snow_HDO = mct_aVect_indexRA(x2i_i,'Faxa_snow_HDO') - end if - end if - - lSize = mct_avect_lSize(x2i_i) - do n=1,lSize - if (dom_i%data%rAttr(kLat,n) > 0.0_r8) then - ic = c_inh_is - else - ic = c_ish_is - endif - ca_o = dom_i%data%rAttr(kArea,n) * frac_i%rAttr(ko,n) - ca_i = dom_i%data%rAttr(kArea,n) * frac_i%rAttr(ki,n) - nf = f_area ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i - nf = f_hlwdn; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*x2i_i%rAttr(index_x2i_Faxa_lwdn,n) - nf = f_wrain; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*x2i_i%rAttr(index_x2i_Faxa_rain,n) - nf = f_wsnow; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*x2i_i%rAttr(index_x2i_Faxa_snow,n) - nf = f_wioff; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + ca_i*x2i_i%rAttr(index_x2i_Fixx_rofi,n) - - if (trim(cime_model) == 'e3sm') then - nf = f_wfrz ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - (ca_o+ca_i)*max(0.0_r8,x2i_i%rAttr(index_x2i_Fioo_frazil,n)) - else if (trim(cime_model) == 'cesm') then - nf = f_hfrz ; budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) - & - (ca_o+ca_i)*max(0.0_r8,x2i_i%rAttr(index_x2i_Fioo_q,n)) - end if - if ( flds_wiso_ice_x2i )then - nf = f_wrain_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*x2i_i%rAttr(index_x2i_Faxa_rain_16O,n) - nf = f_wrain_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*x2i_i%rAttr(index_x2i_Faxa_rain_18O,n) - nf = f_wrain_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*x2i_i%rAttr(index_x2i_Faxa_rain_HDO,n) - - nf = f_wsnow_16O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*x2i_i%rAttr(index_x2i_Faxa_snow_16O,n) - nf = f_wsnow_18O; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*x2i_i%rAttr(index_x2i_Faxa_snow_18O,n) - nf = f_wsnow_HDO; - budg_dataL(nf,ic,ip) = budg_dataL(nf,ic,ip) + & - ca_i*x2i_i%rAttr(index_x2i_Faxa_snow_HDO,n) - end if - end do - ic = c_inh_is - budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - budg_dataL(f_hioff,ic,ip) = -budg_dataL(f_wioff,ic,ip)*shr_const_latice - if (trim(cime_model) == 'e3sm') then - budg_dataL(f_hfrz ,ic,ip) = -budg_dataL(f_wfrz ,ic,ip)*shr_const_latice - else if (trim(cime_model) == 'cesm') then - budg_dataL(f_wfrz ,ic,ip) = budg_dataL(f_hfrz ,ic,ip)*HFLXtoWFLX - end if - - ic = c_ish_is - budg_dataL(f_hlatf,ic,ip) = -budg_dataL(f_wsnow,ic,ip)*shr_const_latice - budg_dataL(f_hioff,ic,ip) = -budg_dataL(f_wioff,ic,ip)*shr_const_latice - if (trim(cime_model) == 'e3sm') then - budg_dataL(f_hfrz ,ic,ip) = -budg_dataL(f_wfrz ,ic,ip)*shr_const_latice - else if (trim(cime_model) == 'cesm') then - budg_dataL(f_wfrz ,ic,ip) = budg_dataL(f_hfrz ,ic,ip)*HFLXtoWFLX - end if - end if - - first_time = .false. - - end subroutine seq_diag_ice_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_print_mct - print global budget diagnostics - ! - ! !DESCRIPTION: - ! Print global budget diagnostics. - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_diag_print_mct(EClock, stop_alarm, & - budg_print_inst, budg_print_daily, budg_print_month, & - budg_print_ann, budg_print_ltann, budg_print_ltend, infodata) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(in) :: EClock - logical , intent(in) :: stop_alarm - integer , intent(in) :: budg_print_inst - integer , intent(in) :: budg_print_daily - integer , intent(in) :: budg_print_month - integer , intent(in) :: budg_print_ann - integer , intent(in) :: budg_print_ltann - integer , intent(in) :: budg_print_ltend - type(seq_infodata_type) , intent(in) :: infodata - - !EOP - - !--- local --- - integer(in) :: ic,nf,ip,is ! data array indicies - integer(in) :: ica,icl,icn,ics,ico - integer(in) :: icar,icxs,icxr,icas - integer(in) :: cdate,sec ! coded date, seconds - integer(in) :: yr,mon,day ! date - integer(in) :: iam ! pe number - integer(in) :: plev ! print level - logical :: sumdone ! has a sum been computed yet - character(len=40):: str ! string - character(len=cs):: cime_model - real(r8) :: dataGpr (f_size,c_size,p_size) ! values to print, scaled and such - integer, parameter :: nisotopes = 3 - character(len=5), parameter :: isoname(nisotopes) = (/ 'H216O', 'H218O', ' HDO' /) - integer, parameter :: iso0(nisotopes) = (/ f_16O, f_18O, f_hdO /) - integer, parameter :: isof(nisotopes) = (/ f_16O_end, f_18O_end, f_hdO_end /) - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_print_mct) ' - character(*),parameter :: F00 = "('(seq_diag_print_mct) ',4a)" - - !----- formats ----- - character(*),parameter :: FAH="(4a,i9,i6)" - character(*),parameter :: FA0= "(' ',12x,6(6x,a8,1x))" - character(*),parameter :: FA1= "(' ',a12,6f15.8)" - character(*),parameter :: FA0r="(' ',12x,8(6x,a8,1x))" - character(*),parameter :: FA1r="(' ',a12,8f15.8)" - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_infodata_GetData(infodata, cime_model=cime_model) - - !------------------------------------------------------------------------------- - ! print instantaneous budget data - !------------------------------------------------------------------------------- - - sumdone = .false. - call seq_comm_setptrs(CPLID,iam=iam) - call seq_timemgr_EClockGetData(EClock,curr_yr=yr, & - curr_mon=mon,curr_day=day,curr_tod=sec) - cdate = yr*10000+mon*100+day - - do ip = 1,p_size - plev = 0 - if (ip == p_inst) then - plev = max(plev,budg_print_inst) - endif - if (ip==p_day .and. sec==0) then - plev = max(plev,budg_print_daily) - endif - if (ip==p_mon .and. day==1 .and. sec==0) then - plev = max(plev,budg_print_month) - endif - if (ip==p_ann .and. mon==1 .and. day==1 .and. sec==0) then - plev = max(plev,budg_print_ann) - endif - if (ip==p_inf .and. mon==1 .and. day==1 .and. sec==0) then - plev = max(plev,budg_print_ltann) - endif - if (ip==p_inf .and. stop_alarm) then - plev = max(plev,budg_print_ltend) - endif - - if (plev > 0) then - ! ---- doprint ---- doprint ---- doprint ---- - - if (.not.sumdone) then - call seq_diag_sum0_mct() - dataGpr = budg_dataG - sumdone = .true. - - ! old budget normalizations (global area and 1e6 for water) - dataGpr = dataGpr/(4.0_r8*shr_const_pi) - dataGpr(f_w:f_w_end,:,:) = dataGpr(f_w:f_w_end,:,:) * 1.0e6_r8 - if ( flds_wiso )then - dataGpr(iso0(1):isof(nisotopes),:,:) = dataGpr(iso0(1):isof(nisotopes),:,:) * 1.0e6_r8 - end if - dataGpr = dataGpr/budg_ns - - if (iam /= 0) return - endif - - ! --------------------------------------------------------- - ! ---- detail atm budgets and breakdown into components --- - ! --------------------------------------------------------- - - if (plev >= 3) then - do ic = 1,2 - if (ic == 1) then - ica = c_atm_ar - icl = c_lnd_ar - icn = c_inh_ar - ics = c_ish_ar - ico = c_ocn_ar - str = "ATM_to_CPL" - elseif (ic == 2) then - ica = c_atm_as - icl = c_lnd_as - icn = c_inh_as - ics = c_ish_as - ico = c_ocn_as - str = "CPL_TO_ATM" - else - call shr_sys_abort(subname//' ERROR in ic index code 411') - endif - - write(logunit,*) ' ' - write(logunit,FAH) subname,trim(str)//' AREA BUDGET (m2/m2): period = ',trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0) cname(ica),cname(icl),cname(icn),cname(ics),cname(ico),' *SUM* ' - do nf = f_a, f_a_end - write(logunit,FA1) fname(nf),dataGpr(nf,ica,ip),dataGpr(nf,icl,ip), & - dataGpr(nf,icn,ip),dataGpr(nf,ics,ip),dataGpr(nf,ico,ip), & - dataGpr(nf,ica,ip)+dataGpr(nf,icl,ip)+ & - dataGpr(nf,icn,ip)+dataGpr(nf,ics,ip)+dataGpr(nf,ico,ip) - enddo - - write(logunit,*) ' ' - write(logunit,FAH) subname,trim(str)//' HEAT BUDGET (W/m2): period = ',trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0) cname(ica),cname(icl),cname(icn),cname(ics),cname(ico),' *SUM* ' - do nf = f_h, f_h_end - write(logunit,FA1) fname(nf),dataGpr(nf,ica,ip),dataGpr(nf,icl,ip), & - dataGpr(nf,icn,ip),dataGpr(nf,ics,ip),dataGpr(nf,ico,ip), & - dataGpr(nf,ica,ip)+dataGpr(nf,icl,ip)+ & - dataGpr(nf,icn,ip)+dataGpr(nf,ics,ip)+dataGpr(nf,ico,ip) - enddo - write(logunit,FA1) ' *SUM*' ,sum(dataGpr(f_h:f_h_end,ica,ip)),sum(dataGpr(f_h:f_h_end,icl,ip)), & - sum(dataGpr(f_h:f_h_end,icn,ip)),sum(dataGpr(f_h:f_h_end,ics,ip)),sum(dataGpr(f_h:f_h_end,ico,ip)), & - sum(dataGpr(f_h:f_h_end,ica,ip))+sum(dataGpr(f_h:f_h_end,icl,ip))+ & - sum(dataGpr(f_h:f_h_end,icn,ip))+sum(dataGpr(f_h:f_h_end,ics,ip))+sum(dataGpr(f_h:f_h_end,ico,ip)) - - write(logunit,*) ' ' - write(logunit,FAH) subname,trim(str)//' WATER BUDGET (kg/m2s*1e6): period = ',trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0) cname(ica),cname(icl),cname(icn),cname(ics),cname(ico),' *SUM* ' - do nf = f_w, f_w_end - if (nf == f_wsalt .and. trim(cime_model) == 'e3sm') cycle - write(logunit,FA1) fname(nf),dataGpr(nf,ica,ip),dataGpr(nf,icl,ip), & - dataGpr(nf,icn,ip),dataGpr(nf,ics,ip),dataGpr(nf,ico,ip), & - dataGpr(nf,ica,ip)+dataGpr(nf,icl,ip)+ & - dataGpr(nf,icn,ip)+dataGpr(nf,ics,ip)+dataGpr(nf,ico,ip) - enddo - write(logunit,FA1) ' *SUM*' ,sum(dataGpr(f_w:f_w_end,ica,ip)),sum(dataGpr(f_w:f_w_end,icl,ip)), & - sum(dataGpr(f_w:f_w_end,icn,ip)),sum(dataGpr(f_w:f_w_end,ics,ip)),sum(dataGpr(f_w:f_w_end,ico,ip)), & - sum(dataGpr(f_w:f_w_end,ica,ip))+sum(dataGpr(f_w:f_w_end,icl,ip))+ & - sum(dataGpr(f_w:f_w_end,icn,ip))+sum(dataGpr(f_w:f_w_end,ics,ip))+sum(dataGpr(f_w:f_w_end,ico,ip)) - - if ( flds_wiso )then - do is = 1, nisotopes - write(logunit,*) ' ' - write(logunit,FAH) subname,trim(str)//' '//isoname(is)//' WATER BUDGET (kg/m2s*1e6): period = ', & - trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0) cname(ica),cname(icl),cname(icn),cname(ics),cname(ico),' *SUM* ' - do nf = iso0(is), isof(is) - write(logunit,FA1) fname(nf),dataGpr(nf,ica,ip),dataGpr(nf,icl,ip), & - dataGpr(nf,icn,ip),dataGpr(nf,ics,ip),dataGpr(nf,ico,ip), & - dataGpr(nf,ica,ip)+dataGpr(nf,icl,ip)+ & - dataGpr(nf,icn,ip)+dataGpr(nf,ics,ip)+dataGpr(nf,ico,ip) - enddo - write(logunit,FA1) ' *SUM*', sum(dataGpr(iso0(is):isof(is),ica,ip)),& - sum(dataGpr(iso0(is):isof(is),icl,ip)), & - sum(dataGpr(iso0(is):isof(is),icn,ip)),& - sum(dataGpr(iso0(is):isof(is),ics,ip)), & - sum(dataGpr(iso0(is):isof(is),ico,ip)), & - sum(dataGpr(iso0(is):isof(is),ica,ip))+& - sum(dataGpr(iso0(is):isof(is),icl,ip))+ & - sum(dataGpr(iso0(is):isof(is),icn,ip))+& - sum(dataGpr(iso0(is):isof(is),ics,ip))+ & - sum(dataGpr(iso0(is):isof(is),ico,ip)) - end do - end if - - enddo - endif ! plev - - ! --------------------------------------------------------- - ! ---- detail lnd/ocn/ice component budgets ---- - ! --------------------------------------------------------- - - if (plev >= 2) then - do ic = 1,4 - if (ic == 1) then - icar = c_lnd_ar - icxs = c_lnd_ls - icxr = c_lnd_lr - icas = c_lnd_as - str = "LND" - elseif (ic == 2) then - icar = c_ocn_ar - icxs = c_ocn_os - icxr = c_ocn_or - icas = c_ocn_as - str = "OCN" - elseif (ic == 3) then - icar = c_inh_ar - icxs = c_inh_is - icxr = c_inh_ir - icas = c_inh_as - str = "ICE_NH" - elseif (ic == 4) then - icar = c_ish_ar - icxs = c_ish_is - icxr = c_ish_ir - icas = c_ish_as - str = "ICE_SH" - else - call shr_sys_abort(subname//' ERROR in ic index code 412') - endif - - write(logunit,*) ' ' - write(logunit,FAH) subname,trim(str)//' HEAT BUDGET (W/m2): period = ',trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0) cname(icar),cname(icxs),cname(icxr),cname(icas),' *SUM* ' - do nf = f_h, f_h_end - write(logunit,FA1) fname(nf),-dataGpr(nf,icar,ip),dataGpr(nf,icxs,ip), & - dataGpr(nf,icxr,ip),-dataGpr(nf,icas,ip), & - -dataGpr(nf,icar,ip)+dataGpr(nf,icxs,ip)+ & - dataGpr(nf,icxr,ip)-dataGpr(nf,icas,ip) - enddo - write(logunit,FA1) ' *SUM*',-sum(dataGpr(f_h:f_h_end,icar,ip)),sum(dataGpr(f_h:f_h_end,icxs,ip)), & - sum(dataGpr(f_h:f_h_end,icxr,ip)),-sum(dataGpr(f_h:f_h_end,icas,ip)), & - -sum(dataGpr(f_h:f_h_end,icar,ip))+sum(dataGpr(f_h:f_h_end,icxs,ip))+ & - sum(dataGpr(f_h:f_h_end,icxr,ip))-sum(dataGpr(f_h:f_h_end,icas,ip)) - - write(logunit,*) ' ' - write(logunit,FAH) subname,trim(str)//' WATER BUDGET (kg/m2s*1e6): period = ',trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0) cname(icar),cname(icxs),cname(icxr),cname(icas),' *SUM* ' - do nf = f_w, f_w_end - if (nf == f_wsalt .and. trim(cime_model) == 'e3sm') cycle - write(logunit,FA1) fname(nf),-dataGpr(nf,icar,ip),dataGpr(nf,icxs,ip), & - dataGpr(nf,icxr,ip),-dataGpr(nf,icas,ip), & - -dataGpr(nf,icar,ip)+dataGpr(nf,icxs,ip)+ & - dataGpr(nf,icxr,ip)-dataGpr(nf,icas,ip) - enddo - write(logunit,FA1) ' *SUM*',-sum(dataGpr(f_w:f_w_end,icar,ip)),sum(dataGpr(f_w:f_w_end,icxs,ip)), & - sum(dataGpr(f_w:f_w_end,icxr,ip)),-sum(dataGpr(f_w:f_w_end,icas,ip)), & - -sum(dataGpr(f_w:f_w_end,icar,ip))+sum(dataGpr(f_w:f_w_end,icxs,ip))+ & - sum(dataGpr(f_w:f_w_end,icxr,ip))-sum(dataGpr(f_w:f_w_end,icas,ip)) - - if ( flds_wiso ) then - do is = 1, nisotopes - write(logunit,*) ' ' - write(logunit,FAH) subname,trim(str)//isoname(is)//' WATER BUDGET (kg/m2s*1e6): period = ',trim(pname(ip)), & - ': date = ',cdate,sec - write(logunit,FA0) cname(icar),cname(icxs),cname(icxr),cname(icas),' *SUM* ' - do nf = iso0(is), isof(is) - write(logunit,FA1) fname(nf),-dataGpr(nf,icar,ip),dataGpr(nf,icxs,ip), & - dataGpr(nf,icxr,ip),-dataGpr(nf,icas,ip), & - -dataGpr(nf,icar,ip)+dataGpr(nf,icxs,ip)+ & - dataGpr(nf,icxr,ip)-dataGpr(nf,icas,ip) - enddo - write(logunit,FA1) ' *SUM*',-sum(dataGpr(iso0(is):isof(is),icar,ip)),& - sum(dataGpr(iso0(is):isof(is),icxs,ip)), & - sum(dataGpr(iso0(is):isof(is),icxr,ip)), & - -sum(dataGpr(iso0(is):isof(is),icas,ip)), & - -sum(dataGpr(iso0(is):isof(is),icar,ip)) & - +sum(dataGpr(iso0(is):isof(is),icxs,ip))+ & - sum(dataGpr(iso0(is):isof(is),icxr,ip)) & - -sum(dataGpr(iso0(is):isof(is),icas,ip)) - write(logunit,*) ' ' - write(logunit,FAH) subname,trim(str)//isoname(is)//' WATER BUDGET (kg/m2s*1e6): period = ',trim(pname(ip)),& - ': date = ',cdate,sec - write(logunit,FA0) cname(icar),cname(icxs),cname(icxr),cname(icas),' *SUM* ' - do nf = iso0(is), isof(is) - write(logunit,FA1) fname(nf),-dataGpr(nf,icar,ip),dataGpr(nf,icxs,ip), & - dataGpr(nf,icxr,ip),-dataGpr(nf,icas,ip), & - -dataGpr(nf,icar,ip)+dataGpr(nf,icxs,ip)+ & - dataGpr(nf,icxr,ip)-dataGpr(nf,icas,ip) - enddo - write(logunit,FA1) ' *SUM*',-sum(dataGpr(iso0(is):isof(is),icar,ip)),& - sum(dataGpr(iso0(is):isof(is),icxs,ip)), & - sum(dataGpr(iso0(is):isof(is),icxr,ip)), & - -sum(dataGpr(iso0(is):isof(is),icas,ip)), & - -sum(dataGpr(iso0(is):isof(is),icar,ip)) & - +sum(dataGpr(iso0(is):isof(is),icxs,ip))+ & - sum(dataGpr(iso0(is):isof(is),icxr,ip)) & - -sum(dataGpr(iso0(is):isof(is),icas,ip)) - end do - end if - enddo - endif ! plev - - ! --------------------------------------------------------- - ! ---- net summary budgets ---- - ! --------------------------------------------------------- - - if (plev >= 1) then - - write(logunit,*) ' ' - write(logunit,FAH) subname,'NET AREA BUDGET (m2/m2): period = ',trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0) ' atm',' lnd',' ocn',' ice nh',' ice sh',' *SUM* ' - do nf = f_a,f_a_end - write(logunit,FA1) fname(nf),dataGpr(nf,c_atm_ar,ip), & - dataGpr(nf,c_lnd_lr,ip), & - dataGpr(nf,c_ocn_or,ip), & - dataGpr(nf,c_inh_ir,ip), & - dataGpr(nf,c_ish_ir,ip), & - dataGpr(nf,c_atm_ar,ip)+ & - dataGpr(nf,c_lnd_lr,ip)+ & - dataGpr(nf,c_ocn_or,ip)+ & - dataGpr(nf,c_inh_ir,ip)+ & - dataGpr(nf,c_ish_ir,ip) - enddo - - write(logunit,*) ' ' - write(logunit,FAH) subname,'NET HEAT BUDGET (W/m2): period = ',trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0r) ' atm',' lnd',' rof',' ocn',' ice nh',' ice sh',' glc',' *SUM* ' - do nf = f_h, f_h_end - write(logunit,FA1r) fname(nf),dataGpr(nf,c_atm_ar,ip)+dataGpr(nf,c_atm_as,ip), & - dataGpr(nf,c_lnd_lr,ip)+dataGpr(nf,c_lnd_ls,ip), & - dataGpr(nf,c_rof_rr,ip)+dataGpr(nf,c_rof_rs,ip), & - dataGpr(nf,c_ocn_or,ip)+dataGpr(nf,c_ocn_os,ip), & - dataGpr(nf,c_inh_ir,ip)+dataGpr(nf,c_inh_is,ip), & - dataGpr(nf,c_ish_ir,ip)+dataGpr(nf,c_ish_is,ip), & - dataGpr(nf,c_glc_gr,ip)+dataGpr(nf,c_glc_gs,ip), & - dataGpr(nf,c_atm_ar,ip)+dataGpr(nf,c_atm_as,ip)+ & - dataGpr(nf,c_lnd_lr,ip)+dataGpr(nf,c_lnd_ls,ip)+ & - dataGpr(nf,c_rof_rr,ip)+dataGpr(nf,c_rof_rs,ip)+ & - dataGpr(nf,c_ocn_or,ip)+dataGpr(nf,c_ocn_os,ip)+ & - dataGpr(nf,c_inh_ir,ip)+dataGpr(nf,c_inh_is,ip)+ & - dataGpr(nf,c_ish_ir,ip)+dataGpr(nf,c_ish_is,ip)+ & - dataGpr(nf,c_glc_gr,ip)+dataGpr(nf,c_glc_gs,ip) - enddo - write(logunit,FA1r)' *SUM*',sum(dataGpr(f_h:f_h_end,c_atm_ar,ip))+sum(dataGpr(f_h:f_h_end,c_atm_as,ip)), & - sum(dataGpr(f_h:f_h_end,c_lnd_lr,ip))+sum(dataGpr(f_h:f_h_end,c_lnd_ls,ip)), & - sum(dataGpr(f_h:f_h_end,c_rof_rr,ip))+sum(dataGpr(f_h:f_h_end,c_rof_rs,ip)), & - sum(dataGpr(f_h:f_h_end,c_ocn_or,ip))+sum(dataGpr(f_h:f_h_end,c_ocn_os,ip)), & - sum(dataGpr(f_h:f_h_end,c_inh_ir,ip))+sum(dataGpr(f_h:f_h_end,c_inh_is,ip)), & - sum(dataGpr(f_h:f_h_end,c_ish_ir,ip))+sum(dataGpr(f_h:f_h_end,c_ish_is,ip)), & - sum(dataGpr(f_h:f_h_end,c_glc_gr,ip))+sum(dataGpr(f_h:f_h_end,c_glc_gs,ip)), & - sum(dataGpr(f_h:f_h_end,c_atm_ar,ip))+sum(dataGpr(f_h:f_h_end,c_atm_as,ip))+ & - sum(dataGpr(f_h:f_h_end,c_lnd_lr,ip))+sum(dataGpr(f_h:f_h_end,c_lnd_ls,ip))+ & - sum(dataGpr(f_h:f_h_end,c_rof_rr,ip))+sum(dataGpr(f_h:f_h_end,c_rof_rs,ip))+ & - sum(dataGpr(f_h:f_h_end,c_ocn_or,ip))+sum(dataGpr(f_h:f_h_end,c_ocn_os,ip))+ & - sum(dataGpr(f_h:f_h_end,c_inh_ir,ip))+sum(dataGpr(f_h:f_h_end,c_inh_is,ip))+ & - sum(dataGpr(f_h:f_h_end,c_ish_ir,ip))+sum(dataGpr(f_h:f_h_end,c_ish_is,ip))+ & - sum(dataGpr(f_h:f_h_end,c_glc_gr,ip))+sum(dataGpr(f_h:f_h_end,c_glc_gs,ip)) - - write(logunit,*) ' ' - write(logunit,FAH) subname,'NET WATER BUDGET (kg/m2s*1e6): period = ',trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0r) ' atm',' lnd',' rof',' ocn',' ice nh',' ice sh',' glc',' *SUM* ' - do nf = f_w, f_w_end - if (nf == f_wsalt .and. trim(cime_model) == 'e3sm') cycle - write(logunit,FA1r) fname(nf),dataGpr(nf,c_atm_ar,ip)+dataGpr(nf,c_atm_as,ip), & - dataGpr(nf,c_lnd_lr,ip)+dataGpr(nf,c_lnd_ls,ip), & - dataGpr(nf,c_rof_rr,ip)+dataGpr(nf,c_rof_rs,ip), & - dataGpr(nf,c_ocn_or,ip)+dataGpr(nf,c_ocn_os,ip), & - dataGpr(nf,c_inh_ir,ip)+dataGpr(nf,c_inh_is,ip), & - dataGpr(nf,c_ish_ir,ip)+dataGpr(nf,c_ish_is,ip), & - dataGpr(nf,c_glc_gr,ip)+dataGpr(nf,c_glc_gs,ip), & - dataGpr(nf,c_atm_ar,ip)+dataGpr(nf,c_atm_as,ip)+ & - dataGpr(nf,c_lnd_lr,ip)+dataGpr(nf,c_lnd_ls,ip)+ & - dataGpr(nf,c_rof_rr,ip)+dataGpr(nf,c_rof_rs,ip)+ & - dataGpr(nf,c_ocn_or,ip)+dataGpr(nf,c_ocn_os,ip)+ & - dataGpr(nf,c_inh_ir,ip)+dataGpr(nf,c_inh_is,ip)+ & - dataGpr(nf,c_ish_ir,ip)+dataGpr(nf,c_ish_is,ip)+ & - dataGpr(nf,c_glc_gr,ip)+dataGpr(nf,c_glc_gs,ip) - enddo - write(logunit,FA1r)' *SUM*',sum(dataGpr(f_w:f_w_end,c_atm_ar,ip))+sum(dataGpr(f_w:f_w_end,c_atm_as,ip)), & - sum(dataGpr(f_w:f_w_end,c_lnd_lr,ip))+sum(dataGpr(f_w:f_w_end,c_lnd_ls,ip)), & - sum(dataGpr(f_w:f_w_end,c_rof_rr,ip))+sum(dataGpr(f_w:f_w_end,c_rof_rs,ip)), & - sum(dataGpr(f_w:f_w_end,c_ocn_or,ip))+sum(dataGpr(f_w:f_w_end,c_ocn_os,ip)), & - sum(dataGpr(f_w:f_w_end,c_inh_ir,ip))+sum(dataGpr(f_w:f_w_end,c_inh_is,ip)), & - sum(dataGpr(f_w:f_w_end,c_ish_ir,ip))+sum(dataGpr(f_w:f_w_end,c_ish_is,ip)), & - sum(dataGpr(f_w:f_w_end,c_glc_gr,ip))+sum(dataGpr(f_w:f_w_end,c_glc_gs,ip)), & - sum(dataGpr(f_w:f_w_end,c_atm_ar,ip))+sum(dataGpr(f_w:f_w_end,c_atm_as,ip))+ & - sum(dataGpr(f_w:f_w_end,c_lnd_lr,ip))+sum(dataGpr(f_w:f_w_end,c_lnd_ls,ip))+ & - sum(dataGpr(f_w:f_w_end,c_rof_rr,ip))+sum(dataGpr(f_w:f_w_end,c_rof_rs,ip))+ & - sum(dataGpr(f_w:f_w_end,c_ocn_or,ip))+sum(dataGpr(f_w:f_w_end,c_ocn_os,ip))+ & - sum(dataGpr(f_w:f_w_end,c_inh_ir,ip))+sum(dataGpr(f_w:f_w_end,c_inh_is,ip))+ & - sum(dataGpr(f_w:f_w_end,c_ish_ir,ip))+sum(dataGpr(f_w:f_w_end,c_ish_is,ip))+ & - sum(dataGpr(f_w:f_w_end,c_glc_gr,ip))+sum(dataGpr(f_w:f_w_end,c_glc_gs,ip)) - - if ( flds_wiso ) then - - do is = 1, nisotopes - write(logunit,*) ' ' - write(logunit,FAH) subname,'NET '//isoname(is)//' WATER BUDGET (kg/m2s*1e6): period = ', & - trim(pname(ip)),': date = ',cdate,sec - write(logunit,FA0r) ' atm',' lnd',' rof',' ocn',' ice nh',' ice sh',' glc',' *SUM* ' - do nf = iso0(is), isof(is) - write(logunit,FA1r) fname(nf),dataGpr(nf,c_atm_ar,ip)+dataGpr(nf,c_atm_as,ip), & - dataGpr(nf,c_lnd_lr,ip)+dataGpr(nf,c_lnd_ls,ip), & - dataGpr(nf,c_rof_rr,ip)+dataGpr(nf,c_rof_rs,ip), & - dataGpr(nf,c_ocn_or,ip)+dataGpr(nf,c_ocn_os,ip), & - dataGpr(nf,c_inh_ir,ip)+dataGpr(nf,c_inh_is,ip), & - dataGpr(nf,c_ish_ir,ip)+dataGpr(nf,c_ish_is,ip), & - dataGpr(nf,c_glc_gr,ip)+dataGpr(nf,c_glc_gs,ip), & - dataGpr(nf,c_atm_ar,ip)+dataGpr(nf,c_atm_as,ip)+ & - dataGpr(nf,c_lnd_lr,ip)+dataGpr(nf,c_lnd_ls,ip)+ & - dataGpr(nf,c_rof_rr,ip)+dataGpr(nf,c_rof_rs,ip)+ & - dataGpr(nf,c_ocn_or,ip)+dataGpr(nf,c_ocn_os,ip)+ & - dataGpr(nf,c_inh_ir,ip)+dataGpr(nf,c_inh_is,ip)+ & - dataGpr(nf,c_ish_ir,ip)+dataGpr(nf,c_ish_is,ip)+ & - dataGpr(nf,c_glc_gr,ip)+dataGpr(nf,c_glc_gs,ip) - enddo - write(logunit,FA1r)' *SUM*',& - sum(dataGpr(iso0(is):isof(is),c_atm_ar,ip))+ & - sum(dataGpr(iso0(is):isof(is),c_atm_as,ip)),& - sum(dataGpr(iso0(is):isof(is),c_lnd_lr,ip))+ & - sum(dataGpr(iso0(is):isof(is),c_lnd_ls,ip)),& - sum(dataGpr(iso0(is):isof(is),c_rof_rr,ip))+& - sum(dataGpr(iso0(is):isof(is),c_rof_rs,ip)),& - sum(dataGpr(iso0(is):isof(is),c_ocn_or,ip))+& - sum(dataGpr(iso0(is):isof(is),c_ocn_os,ip)),& - sum(dataGpr(iso0(is):isof(is),c_inh_ir,ip))+& - sum(dataGpr(iso0(is):isof(is),c_inh_is,ip)),& - sum(dataGpr(iso0(is):isof(is),c_ish_ir,ip))+& - sum(dataGpr(iso0(is):isof(is),c_ish_is,ip)),& - sum(dataGpr(iso0(is):isof(is),c_glc_gr,ip))+ & - sum(dataGpr(iso0(is):isof(is),c_glc_gs,ip)),& - sum(dataGpr(iso0(is):isof(is),c_atm_ar,ip))+& - sum(dataGpr(iso0(is):isof(is),c_atm_as,ip))+& - sum(dataGpr(iso0(is):isof(is),c_lnd_lr,ip))+& - sum(dataGpr(iso0(is):isof(is),c_lnd_ls,ip))+& - sum(dataGpr(iso0(is):isof(is),c_rof_rr,ip))+& - sum(dataGpr(iso0(is):isof(is),c_rof_rs,ip))+& - sum(dataGpr(iso0(is):isof(is),c_ocn_or,ip))+& - sum(dataGpr(iso0(is):isof(is),c_ocn_os,ip))+& - sum(dataGpr(iso0(is):isof(is),c_inh_ir,ip))+& - sum(dataGpr(iso0(is):isof(is),c_inh_is,ip))+& - sum(dataGpr(iso0(is):isof(is),c_ish_ir,ip))+& - sum(dataGpr(iso0(is):isof(is),c_ish_is,ip))+& - sum(dataGpr(iso0(is):isof(is),c_glc_gr,ip))+& - sum(dataGpr(iso0(is):isof(is),c_glc_gs,ip)) - end do - end if - - endif - - write(logunit,*) ' ' - ! ---- doprint ---- doprint ---- doprint ---- - endif ! plev > 0 - enddo ! ip = 1,p_size - - end subroutine seq_diag_print_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_avect_mct - print global budget diagnostics - ! - ! !DESCRIPTION: - ! Print global diagnostics for AV/ID. - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type) , intent(in) :: infodata - integer(in) , intent(in) :: ID - type(mct_aVect) , intent(in) :: av - type(mct_gGrid) , pointer :: dom - type(mct_gsMap) , pointer :: gsmap - character(len=*) , intent(in), optional :: comment - - !EOP - - !--- local --- - logical :: bfbflag - integer(in) :: n,k ! counters - integer(in) :: npts,nptsg ! number of local/global pts in AV - integer(in) :: kflds ! number of fields in AV - real(r8), pointer :: sumbuf (:) ! sum buffer - real(r8), pointer :: maxbuf (:) ! max buffer - real(r8), pointer :: sumbufg(:) ! sum buffer reduced - real(r8), pointer :: maxbufg(:) ! max buffer reduced - integer(i8), pointer :: isumbuf (:) ! integer local sum - integer(i8), pointer :: isumbufg(:) ! integer global sum - integer(i8) :: ihuge ! huge - integer(in) :: mpicom ! mpi comm - integer(in) :: iam ! pe number - integer(in) :: km,ka ! field indices - integer(in) :: ns ! size of local AV - integer(in) :: rcode ! allocate return code - real(r8), pointer :: weight(:) ! weight - real(r8), allocatable :: weighted_data(:,:) ! weighted data - type(mct_string) :: mstring ! mct char type - character(CL) :: lcomment ! should be long enough - character(CL) :: itemc ! string converted to char - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_avect_mct) ' - character(*),parameter :: F00 = "('(seq_diag_avect_mct) ',4a)" - - !------------------------------------------------------------------------------- - ! print instantaneous budget data - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(ID, mpicom=mpicom, iam=iam) - call seq_infodata_GetData(infodata, bfbflag=bfbflag) - - lcomment = '' - if (present(comment)) then - lcomment=trim(comment) - endif - - ns = mct_aVect_lsize(AV) - npts = mct_aVect_lsize(dom%data) - if (ns /= npts) call shr_sys_abort(trim(subname)//' ERROR: size of AV,dom') - km = mct_aVect_indexRA(dom%data,'mask') - ka = mct_aVect_indexRA(dom%data,afldname) - kflds = mct_aVect_nRattr(AV) - allocate(sumbufg(kflds),stat=rcode) - if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate sumbufg') - - npts = mct_aVect_lsize(AV) - allocate(weight(npts),stat=rcode) - if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weight') - - weight(:) = 1.0_r8 - do n = 1,npts - if (dom%data%rAttr(km,n) <= 1.0e-06_R8) then - weight(n) = 0.0_r8 - else - weight(n) = dom%data%rAttr(ka,n)*shr_const_rearth*shr_const_rearth - endif - enddo - - if (bfbflag) then - allocate(weighted_data(npts,kflds),stat=rcode) - if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weighted_data') - - weighted_data = 0.0_r8 - do n = 1,npts - do k = 1,kflds - if (.not. shr_const_isspval(AV%rAttr(k,n))) then - weighted_data(n,k) = AV%rAttr(k,n)*weight(n) - endif - enddo - enddo - - call shr_reprosum_calc (weighted_data, sumbufg, npts, npts, kflds, & - commid=mpicom) - - deallocate(weighted_data) - - else - allocate(sumbuf(kflds),stat=rcode) - if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate sumbuf') - sumbuf = 0.0_r8 - - do n = 1,npts - do k = 1,kflds - if (.not. shr_const_isspval(AV%rAttr(k,n))) then - sumbuf(k) = sumbuf(k) + AV%rAttr(k,n)*weight(n) - endif - enddo - enddo - - !--- global reduction --- - call shr_mpi_sum(sumbuf,sumbufg,mpicom,subname) - - deallocate(sumbuf) - - endif - deallocate(weight) - - if (iam == 0) then - ! write(logunit,*) 'sdAV: *** writing ',trim(lcomment),': k fld min/max/sum ***' - do k = 1,kflds - call mct_aVect_getRList(mstring,k,AV) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - if (len_trim(lcomment) > 0) then - write(logunit,100) 'xxx','sorr',k,sumbufg(k),trim(lcomment),trim(itemc) - else - write(logunit,101) 'xxx','sorr',k,sumbufg(k),trim(itemc) - endif - enddo - call shr_sys_flush(logunit) - endif - - deallocate(sumbufg) - -100 format('comm_diag ',a3,1x,a4,1x,i3,es26.19,1x,a,1x,a) -101 format('comm_diag ',a3,1x,a4,1x,i3,es26.19,1x,a) - - end subroutine seq_diag_avect_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_avloc_mct - print local budget diagnostics - ! - ! !DESCRIPTION: - ! Print local diagnostics for AV/ID. - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_diag_avloc_mct(av, comment) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(mct_aVect) , intent(in) :: av - character(len=*), intent(in), optional :: comment - - !EOP - - !--- local --- - integer(in) :: n,k ! counters - integer(in) :: npts ! number of local/global pts in AV - integer(in) :: kflds ! number of fields in AV - real(r8), pointer :: sumbuf (:) ! sum buffer - type(mct_string) :: mstring ! mct char type - character(CL) :: lcomment ! should be long enough - character(CL) :: itemc ! string converted to char - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_avloc_mct) ' - character(*),parameter :: F00 = "('(seq_diag_avloc_mct) ',4a)" - - !------------------------------------------------------------------------------- - ! print instantaneous budget data - !------------------------------------------------------------------------------- - - lcomment = '' - if (present(comment)) then - lcomment=trim(comment) - endif - - npts = mct_aVect_lsize(AV) - kflds = mct_aVect_nRattr(AV) - allocate(sumbuf(kflds)) - - sumbuf = 0.0_r8 - do n = 1,npts - do k = 1,kflds - ! if (.not. shr_const_isspval(AV%rAttr(k,n))) then - sumbuf(k) = sumbuf(k) + AV%rAttr(k,n) - ! endif - enddo - enddo - - do k = 1,kflds - call mct_aVect_getRList(mstring,k,AV) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - if (len_trim(lcomment) > 0) then - write(logunit,100) 'xxx','sorr',k,sumbuf(k),trim(lcomment),trim(itemc) - else - write(logunit,101) 'xxx','sorr',k,sumbuf(k),trim(itemc) - endif - enddo - call shr_sys_flush(logunit) - - deallocate(sumbuf) - -100 format('avloc_diag ',a3,1x,a4,1x,i3,es26.19,1x,a,1x,a) -101 format('avloc_diag ',a3,1x,a4,1x,i3,es26.19,1x,a) - - end subroutine seq_diag_avloc_mct - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_diag_avdiff_mct - print global budget diagnostics - ! - ! !DESCRIPTION: - ! Print global diagnostics for AV/ID. - ! - ! !REVISION HISTORY: - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_diag_avdiff_mct(AV1,AV2,ID,comment) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(mct_aVect) , intent(in) :: AV1 - type(mct_aVect) , intent(in) :: AV2 - integer , intent(in) :: ID - character(len=*), intent(in), optional :: comment - - !EOP - - !--- local --- - integer(in) :: n,k,n1,k1,n2,k2 ! counters - integer(in) :: iam ! pe number - integer(in) :: cnt ! counter - real(r8) :: adiff,rdiff ! diff values - type(mct_string) :: mstring ! mct char type - character(len=64):: lcomment ! should be long enough - - !----- formats ----- - character(*),parameter :: subName = '(seq_diag_avdiff_mct) ' - character(*),parameter :: F00 = "('(seq_diag_avdiff_mct) ',4a)" - - !------------------------------------------------------------------------------- - ! print instantaneous budget data - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(ID,iam=iam) - - lcomment = '' - if (present(comment)) then - lcomment=trim(comment) - endif - - n1 = mct_aVect_lsize(AV1) - k1 = mct_aVect_nRattr(AV1) - n2 = mct_aVect_lsize(AV2) - k2 = mct_aVect_nRattr(AV2) - - if (n1 /= n2 .or. k1 /= k2) then - write(s_logunit,*) subname,trim(lcomment),' AV sizes different ',n1,n2,k1,k2 - return - endif - - do k = 1,k1 - cnt = 0 - adiff = 0. - rdiff = 0. - do n = 1,n1 - if (AV1%rAttr(k,n) /= AV2%rAttr(k,n)) then - cnt = cnt + 1 - adiff = max(adiff, abs(AV1%rAttr(k,n)-AV2%rAttr(k,n))) - rdiff = max(rdiff, abs(AV1%rAttr(k,n)-AV2%rAttr(k,n))/(abs(AV1%rAttr(k,n))+abs(AV2%rAttr(k,n)))) - endif - enddo - if (cnt > 0) then - call mct_aVect_getRList(mstring,k,AV1) - write(s_logunit,*) subname,trim(lcomment),' AVs fld k diff ', & - iam,mct_string_toChar(mstring),cnt,adiff,rdiff, & - minval(AV1%rAttr(k,:)),minval(AV1%rAttr(k,:)), & - maxval(AV1%rAttr(k,:)),maxval(AV2%rAttr(k,:)) - call mct_string_clean(mstring) - endif - enddo - - end subroutine seq_diag_avdiff_mct - -end module seq_diag_mct diff --git a/src/drivers/mct/main/seq_domain_mct.F90 b/src/drivers/mct/main/seq_domain_mct.F90 deleted file mode 100644 index 7437e566fd3..00000000000 --- a/src/drivers/mct/main/seq_domain_mct.F90 +++ /dev/null @@ -1,791 +0,0 @@ -module seq_domain_mct - - use shr_kind_mod, only: R8=>shr_kind_r8, IN=>shr_kind_in - use shr_kind_mod, only: CL=>shr_kind_cl - use shr_sys_mod, only: shr_sys_flush, shr_sys_abort - use shr_mpi_mod, only: shr_mpi_min, shr_mpi_max - - use mct_mod - use seq_comm_mct - use seq_infodata_mod - use seq_map_mod , only: seq_map_map - use seq_map_type_mod, only: seq_map - - use component_type_mod - - implicit none - private ! except -#include - save - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: seq_domain_check - public :: seq_domain_compare - public :: seq_domain_areafactinit - - !-------------------------------------------------------------------------- - ! Public variables - !-------------------------------------------------------------------------- - - real(R8), parameter :: eps_tiny = 1.0e-16_R8 ! roundoff eps - real(R8), parameter :: eps_big = 1.0e+02_R8 ! big eps - real(R8), parameter :: eps_frac_samegrid = 1.0e-9_R8 ! epsilon for fractions for samegrid - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: seq_domain_check_grid - - !================================================================================ -contains - !================================================================================ - - !================================================================================ - - subroutine seq_domain_check( infodata, & - atm, ice, lnd, ocn, rof, glc, & - samegrid_al, samegrid_ao, samegrid_ro, samegrid_lg) - - !----------------------------------------------------------- - ! Uses - ! - use prep_atm_mod, only: prep_atm_get_mapper_Fi2a - use prep_atm_mod, only: prep_atm_get_mapper_Fl2a - use prep_atm_mod, only: prep_atm_get_mapper_Fo2a - use prep_lnd_mod, only: prep_lnd_get_mapper_Fa2l - use prep_ocn_mod, only: prep_ocn_get_mapper_SFi2o - use prep_glc_mod, only: prep_glc_get_mapper_Fl2g - ! - ! Arguments - ! - type (seq_infodata_type) , intent(inout) :: infodata - type(component_type) , intent(in) :: atm - type(component_type) , intent(in) :: ice - type(component_type) , intent(in) :: lnd - type(component_type) , intent(in) :: ocn - type(component_type) , intent(in) :: rof - type(component_type) , intent(in) :: glc - logical , intent(in) :: samegrid_al ! atm lnd grid same - logical , intent(in) :: samegrid_ao ! atm ocn grid same - logical , intent(in) :: samegrid_ro ! rof ocn grid same - logical , intent(in) :: samegrid_lg ! lnd glc grid same - ! - ! Local variables - ! - type(seq_map) , pointer :: mapper_i2a ! inout needed for lower methods - type(seq_map) , pointer :: mapper_i2o ! inout needed for lower methods - type(seq_map) , pointer :: mapper_o2a ! - type(seq_map) , pointer :: mapper_l2g ! - type(seq_map) , pointer :: mapper_a2l ! - type(seq_map) , pointer :: mapper_l2a ! - ! - type(mct_gGrid) , pointer :: atmdom_a ! atm domain - type(mct_gGrid) , pointer :: icedom_i ! ice domain - type(mct_gGrid) , pointer :: lnddom_l ! lnd domain - type(mct_gGrid) , pointer :: ocndom_o ! ocn domain - type(mct_gGrid) , pointer :: glcdom_g ! glc domain - ! - type(mct_gsMap) , pointer :: gsMap_a ! atm global seg map - type(mct_gsMap) , pointer :: gsMap_i ! ice global seg map - type(mct_gsMap) , pointer :: gsMap_l ! lnd global seg map - type(mct_gsMap) , pointer :: gsMap_o ! ocn global seg map - type(mct_gsMap) , pointer :: gsMap_r ! ocn global seg map - type(mct_gsMap) , pointer :: gsMap_g ! glc global seg map - ! - type(mct_gGrid) :: lnddom_a ! lnd domain info on atm decomp - type(mct_gGrid) :: lnddom_g ! lnd domain info on glc decomp - type(mct_gGrid) :: icedom_a ! ice domain info on atm decomp (all grids same) - type(mct_gGrid) :: ocndom_a ! ocn domain info on atm decomp (all grids same) - type(mct_gGrid) :: icedom_o ! ocn domain info on ocn decomp (atm/ocn grid different) - ! - real(R8), pointer :: fracl(:) ! land fraction on atm decomp - real(R8), pointer :: fraco(:) ! ocn fraction on atm decomp - real(R8), pointer :: fraci(:) ! ice fraction on atm decomp - real(R8), pointer :: maskl(:) ! land mask on atm decomp (all grids same) - real(R8), pointer :: maski(:) ! ice mask on atm decomp (all grids same) - real(R8), pointer :: masko(:) ! ocn mask on atm decomp (all grids same) - ! - integer(IN) :: n ! indicies - ! - integer(IN) :: mpicom_cplid - ! - logical :: atm_present ! atm present flag - logical :: lnd_present ! lnd present flag - logical :: ocn_present ! ocn present flag - logical :: ice_present ! ice present flag - logical :: glc_present ! glc present flag - logical :: rof_present ! rof present flag - logical :: ocnrof_prognostic ! ocn rof prognostic flag - integer(IN) :: rcode ! error status - integer(IN) :: atmsize ! local size of atm grid - integer(IN) :: lndsize ! local size of land grid - integer(IN) :: ocnsize ! local size of ocn grid - integer(IN) :: icesize ! local size of ice grid - integer(IN) :: glcsize ! local size of glc grid - integer(IN) :: gatmsize ! global size of atm grid - integer(IN) :: glndsize ! global size of land grid - integer(IN) :: gocnsize ! global size of ocn grid - integer(IN) :: grofsize ! global size of ocn grid - integer(IN) :: gicesize ! global size of ice grid - integer(IN) :: gglcsize ! global size of glc grid - integer(IN) :: npts ! local size temporary - real(R8) :: diff,dmaxo,dmaxi ! difference tracker - logical :: iamroot ! local masterproc - real(R8) :: eps_frac ! epsilon for fractions - real(R8) :: eps_axmask ! epsilon for masks, atm/lnd - real(R8) :: eps_axgrid ! epsilon for grid coords, atm/lnd - real(R8) :: eps_axarea ! epsilon for areas, atm/lnd - real(R8) :: eps_oimask ! epsilon for masks, ocn/ice - real(R8) :: eps_oigrid ! epsilon for grid coords, ocn/ice - real(R8) :: eps_oiarea ! epsilon for areas, ocn/ice - real(R8) :: my_eps_frac ! local eps_frac value - ! - real(R8),allocatable :: mask (:) ! temporary real vector, domain mask - ! - character(*),parameter :: F00 = "('(seq_domain_check) ',4a)" - character(*),parameter :: F01 = "('(seq_domain_check) ',a,i6,a)" - character(*),parameter :: F02 = "('(seq_domain_check) ',a,g23.15)" - character(*),parameter :: F0R = "('(seq_domain_check) ',2A,2g23.15,A )" - character(*),parameter :: subName = '(seq_domain_check) ' - !----------------------------------------------------------- - - mapper_i2a => prep_atm_get_mapper_Fi2a() - mapper_i2o => prep_ocn_get_mapper_SFi2o() - mapper_o2a => prep_atm_get_mapper_Fo2a() - mapper_l2g => prep_glc_get_mapper_Fl2g() - mapper_a2l => prep_lnd_get_mapper_Fa2l() - mapper_l2a => prep_atm_get_mapper_Fl2a() - - call seq_comm_setptrs(CPLID,iamroot=iamroot, mpicom=mpicom_cplid) - - call seq_infodata_GetData( infodata, & - lnd_present=lnd_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - glc_present=glc_present, & - atm_present=atm_present, & - rof_present=rof_present, & - ocnrof_prognostic=ocnrof_prognostic, & - eps_frac=eps_frac, & - eps_amask=eps_axmask, & - eps_agrid=eps_axgrid, & - eps_aarea=eps_axarea, & - eps_omask=eps_oimask, & - eps_ogrid=eps_oigrid, & - eps_oarea=eps_oiarea ) - - ! Get info - - if (atm_present) then - gsmap_a => component_get_gsmap_cx(atm) ! gsmap_ax - atmdom_a => component_get_dom_cx(atm) ! dom_ax - atmsize = mct_avect_lsize(atmdom_a%data) - gatmsize = mct_gsMap_gsize(gsMap_a) - end if - - if (atm_present .and. lnd_present) then - gsmap_l => component_get_gsmap_cx(lnd) ! gsmap_lx - lnddom_l => component_get_dom_cx(lnd) ! dom_lx - lndsize = mct_avect_lsize(lnddom_l%data) - glndsize = mct_gsMap_gsize(gsMap_l) - - if (samegrid_al .and. gatmsize /= glndsize) then - write(logunit,*) subname,' error: global atmsize = ',& - gatmsize,' global lndsize= ',glndsize - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' atm and lnd grid must have the same global size') - end if - if (iamroot) write(logunit,F00) ' --- checking land maskfrac ---' - call seq_domain_check_fracmask(lnddom_l%data) - call mct_gGrid_init(oGGrid=lnddom_a, iGGrid=lnddom_l, lsize=atmsize) - call mct_aVect_zero(lnddom_a%data) - call seq_map_map(mapper_l2a, lnddom_l%data, lnddom_a%data, norm=.false.) - allocate(maskl(atmsize),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate maskl') - allocate(fracl(atmsize),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate fracl') - call mct_aVect_exportRAttr(lnddom_a%data, 'mask', maskl, atmsize) - call mct_aVect_exportRAttr(lnddom_a%data, 'frac', fracl, atmsize) - endif - - if (atm_present .and. ocn_present) then - gsmap_o => component_get_gsmap_cx(ocn) ! gsmap_ox - ocndom_o => component_get_dom_cx(ocn) ! dom_ox - ocnsize = mct_avect_lsize(ocndom_o%data) - gocnsize = mct_gsMap_gsize(gsMap_o) - - if (samegrid_ao .and. gatmsize /= gocnsize) then - write(logunit,*) subname,' error: global atmsize = ',gatmsize,' global ocnsize= ',gocnsize - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' atm and ocn grid must have the same global size') - end if - if (iamroot) write(logunit,F00) ' --- checking ocean maskfrac ---' - call seq_domain_check_fracmask(ocndom_o%data) - call mct_gGrid_init(oGGrid=ocndom_a, iGGrid=ocndom_o, lsize=atmsize) - call mct_aVect_zero(ocndom_a%data) - call seq_map_map(mapper_o2a, ocndom_o%data, ocndom_a%data, norm=.false.) - allocate(masko(atmsize),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate masko') - allocate(fraco(atmsize),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate fraco') - call mct_aVect_exportRAttr(ocndom_a%data, 'mask', masko, atmsize) - if (samegrid_ao) then - call mct_aVect_exportRattr(ocndom_a%data, 'frac', fraco, atmsize) - else - call mct_aVect_exportRattr(ocndom_a%data, 'mask', fraco, atmsize) - endif - endif - - if (atm_present .and. ice_present) then - gsmap_i => component_get_gsmap_cx(ice) ! gsmap_ix - icedom_i => component_get_dom_cx(ice) ! dom_ix - icesize = mct_avect_lsize(icedom_i%data) - gicesize = mct_gsMap_gsize(gsMap_i) - - if (samegrid_ao .and. gatmsize /= gicesize) then - write(logunit,*) subname,' error: global atmsize = ',& - gatmsize,' global icesize= ',gicesize - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' atm and ice grid must have the same global size') - end if - if (iamroot) write(logunit,F00) ' --- checking ice maskfrac ---' - call seq_domain_check_fracmask(icedom_i%data) - call mct_gGrid_init(oGGrid=icedom_a, iGGrid=icedom_i, lsize=atmsize) - call mct_aVect_zero(icedom_a%data) - call seq_map_map(mapper_i2a, icedom_i%data, icedom_a%data, norm=.false.) - allocate(maski(atmsize),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate maski') - allocate(fraci(atmsize),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate fraci') - call mct_aVect_exportRAttr(icedom_a%data, 'mask', maski, atmsize) - if (samegrid_ao) then - call mct_aVect_exportRattr(icedom_a%data, 'frac', fraci, atmsize) - else - call mct_aVect_exportRattr(icedom_a%data, 'mask', fraci, atmsize) - endif - endif - - if (lnd_present .and. glc_present) then - gsmap_l => component_get_gsmap_cx(lnd) ! gsmap_lx - lnddom_l => component_get_dom_cx(lnd) ! dom_lx - lndsize = mct_avect_lsize(lnddom_l%data) - glndsize = mct_gsMap_gsize(gsMap_l) - - gsmap_g => component_get_gsmap_cx(glc) ! gsmap_gx - glcdom_g => component_get_dom_cx(glc) ! dom_gx - glcsize = mct_avect_lsize(glcdom_g%data) - gglcsize = mct_gsMap_gsize(gsMap_g) - - if (samegrid_lg .and. gglcsize /= glndsize) then - write(logunit,*) subname,' error: global glcsize = ',gglcsize,' global lndsize= ',glndsize - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' glc and lnd grid must have the same global size') - end if - - if (iamroot) write(logunit,F00) ' --- checking glc maskfrac ---' - call seq_domain_check_fracmask(glcdom_g%data) - if (iamroot) write(logunit,F00) ' --- checking lnd maskfrac ---' - call seq_domain_check_fracmask(lnddom_l%data) - - if (samegrid_lg) then - call mct_gGrid_init(oGGrid=lnddom_g, iGGrid=lnddom_l, lsize=glcsize) - call mct_aVect_zero(lnddom_g%data) - call seq_map_map(mapper_l2g, lnddom_l%data, lnddom_g%data, norm=.false.) - if (iamroot) write(logunit,F00) ' --- checking glc/lnd domains ---' - npts = glcsize - allocate(mask(npts),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate mask') - call mct_aVect_getRAttr(lnddom_g%data,"mask",mask,rcode) - where (mask < eps_axmask) mask = 0.0_R8 - call seq_domain_check_grid(glcdom_g%data, lnddom_g%data, 'mask', eps=eps_axmask, mpicom=mpicom_cplid, mask=mask) - call seq_domain_check_grid(glcdom_g%data, lnddom_g%data, 'lat' , eps=eps_axgrid, mpicom=mpicom_cplid, mask=mask) - call seq_domain_check_grid(glcdom_g%data, lnddom_g%data, 'lon' , eps=eps_axgrid, mpicom=mpicom_cplid, mask=mask) - call seq_domain_check_grid(glcdom_g%data, lnddom_g%data, 'area', eps=eps_axarea, mpicom=mpicom_cplid, mask=mask) - deallocate(mask,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate mask') - end if - - endif - - if (ice_present .and. ocn_present) then - gsmap_i => component_get_gsmap_cx(ice) ! gsmap_ix - icedom_i => component_get_dom_cx(ice) ! dom_ix - icesize = mct_avect_lsize(icedom_i%data) - gicesize = mct_gsMap_gsize(gsMap_i) - - gsmap_o => component_get_gsmap_cx(ocn) ! gsmap_ox - ocndom_o => component_get_dom_cx(ocn) ! dom_ox - ocnsize = mct_avect_lsize(ocndom_o%data) - gocnsize = mct_gsMap_gsize(gsMap_o) - - if (gocnsize /= gicesize) then - write(logunit,*) subname,' error: global ocnsize = ',gocnsize,' global icesize= ',gicesize - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' ocean and ice grid must have the same global size') - endif - call mct_gGrid_init(oGGrid=icedom_o, iGGrid=icedom_i, lsize=ocnsize) - call mct_aVect_zero(icedom_o%data) - call seq_map_map(mapper_i2o, icedom_i%data, icedom_o%data, norm=.false.) - end if - - if (rof_present .and. ocnrof_prognostic .and. samegrid_ro) then - gsmap_r => component_get_gsmap_cx(glc) ! gsmap_gx - grofsize = mct_gsMap_gsize(gsMap_r) - - if (gocnsize /= grofsize) then - write(logunit,*) subname,' error: global ocnsize = ',gocnsize,' global rofsize= ',grofsize - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' ocean and rof grid must have the same global size') - endif - end if - - !------------------------------------------------------------------------------ - ! Check ice/ocean grid consistency - !------------------------------------------------------------------------------ - - if (ocn_present .and. ice_present) then - ! if (samegrid_oi) then ! doesn't yet exist - - npts = ocnsize - allocate(mask(npts),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate mask') - - if (iamroot) write(logunit,F00) ' --- checking ocn/ice domains ---' - call seq_domain_check_grid(ocndom_o%data, icedom_o%data,'mask', eps=eps_oigrid, mpicom=mpicom_cplid) - call mct_aVect_getRAttr(ocndom_o%data,"mask",mask,rcode) - where (mask < eps_oimask) mask = 0.0_R8 - - call seq_domain_check_grid(ocndom_o%data, icedom_o%data,'lat' , eps=eps_oigrid, mpicom=mpicom_cplid, mask=mask) - call seq_domain_check_grid(ocndom_o%data, icedom_o%data,'lon' , eps=eps_oigrid, mpicom=mpicom_cplid, mask=mask) - call seq_domain_check_grid(ocndom_o%data, icedom_o%data,'area', eps=eps_oiarea, mpicom=mpicom_cplid, mask=mask) - - deallocate(mask,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate mask') - - ! endif - endif - - !------------------------------------------------------------------------------ - ! Check atm/lnd grid consistency - !------------------------------------------------------------------------------ - - if (atm_present .and. lnd_present .and. samegrid_al) then - if (iamroot) write(logunit,F00) ' --- checking atm/land domains ---' - call seq_domain_check_grid(atmdom_a%data, lnddom_a%data, 'lat' , eps=eps_axgrid, mpicom=mpicom_cplid, mask=maskl) - call seq_domain_check_grid(atmdom_a%data, lnddom_a%data, 'lon' , eps=eps_axgrid, mpicom=mpicom_cplid, mask=maskl) - call seq_domain_check_grid(atmdom_a%data, lnddom_a%data, 'area', eps=eps_axarea, mpicom=mpicom_cplid, mask=maskl) - endif - - !------------------------------------------------------------------------------ - ! Check atm/ocn and atm/ice grid consistency (if samegrid) - !------------------------------------------------------------------------------ - - if (atm_present .and. ice_present .and. samegrid_ao) then - if (iamroot) write(logunit,F00) ' --- checking atm/ice domains ---' - call seq_domain_check_grid(atmdom_a%data, icedom_a%data, 'lat' , eps=eps_axgrid, mpicom=mpicom_cplid, mask=maski) - call seq_domain_check_grid(atmdom_a%data, icedom_a%data, 'lon' , eps=eps_axgrid, mpicom=mpicom_cplid, mask=maski) - call seq_domain_check_grid(atmdom_a%data, icedom_a%data, 'area', eps=eps_axarea, mpicom=mpicom_cplid, mask=maski) - endif - - if (atm_present .and. ocn_present .and. samegrid_ao) then - if (iamroot) write(logunit,F00) ' --- checking atm/ocn domains ---' - call seq_domain_check_grid(atmdom_a%data, ocndom_a%data, 'lat' , eps=eps_axgrid, mpicom=mpicom_cplid, mask=masko) - call seq_domain_check_grid(atmdom_a%data, ocndom_a%data, 'lon' , eps=eps_axgrid, mpicom=mpicom_cplid, mask=masko) - call seq_domain_check_grid(atmdom_a%data, ocndom_a%data, 'area', eps=eps_axarea, mpicom=mpicom_cplid, mask=masko) - endif - - !------------------------------------------------------------------------------ - ! Check consistency of land fraction with ocean mask on grid - !------------------------------------------------------------------------------ - - if (atm_present) then - my_eps_frac = eps_frac - if (samegrid_ao) my_eps_frac = eps_frac_samegrid - if (.not. samegrid_al) my_eps_frac = eps_big - - if (iamroot) write(logunit,F00) ' --- checking fractions in domains ---' - dmaxi = 0.0_R8 - dmaxo = 0.0_R8 - do n = 1,atmsize - if (lnd_present .and. ice_present) then - diff = abs(1._R8 - fracl(n) - fraci(n)) - dmaxi = max(diff,dmaxi) - if (diff > my_eps_frac) then - write(logunit,*)'inconsistency between land fraction and sea ice fraction' - write(logunit,*)'n= ',n,' fracl= ',fracl(n),' fraci= ',fraci(n),' sum= ',fracl(n)+fraci(n) - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' inconsistency between land fraction and sea ice fraction') - end if - if ((1._R8-fraci(n)) > eps_frac .and. fracl(n) < eps_tiny) then - write(logunit,*)'inconsistency between land mask and sea ice mask' - write(logunit,*)'n= ',n,' fracl= ',fracl(n),' fraci= ',fraci(n) - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' inconsistency between land mask and sea ice mask') - end if - endif - if (lnd_present .and. ocn_present) then - diff = abs(1._R8 - fracl(n) - fraco(n)) - dmaxo = max(diff,dmaxo) - if (diff > my_eps_frac) then - write(logunit,*)'inconsistency between land fraction and ocn land fraction' - write(logunit,*)'n= ',n,' fracl= ',fracl(n),' fraco= ',fraco(n),' sum= ',fracl(n)+fraco(n) - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' inconsistency between land fraction and ocn land fraction') - end if - if ((1._R8-fraco(n)) > eps_frac .and. fracl(n) < eps_tiny) then - write(logunit,*)'inconsistency between land mask and ocn land mask' - write(logunit,*)'n= ',n,' fracl= ',fracl(n),' fraco= ',fraco(n) - call shr_sys_flush(logunit) - call shr_sys_abort(subname//' inconsistency between land mask and ocn land mask') - end if - endif - end do - if (iamroot) then - write(logunit,F02) ' maximum difference for ofrac sum ',dmaxo - write(logunit,F02) ' maximum difference for ifrac sum ',dmaxi - write(logunit,F02) ' maximum allowable difference for frac sum ',my_eps_frac - write(logunit,F02) ' maximum allowable tolerance for valid frac ',eps_frac - call shr_sys_flush(logunit) - end if - end if - - !------------------------------------------------------------------------------ - ! Clean up allocated memory - !------------------------------------------------------------------------------ - - if (atm_present .and. lnd_present) then - deallocate(fracl,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate fracl') - deallocate(maskl,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate maskl') - call mct_gGrid_clean(lnddom_a, rcode) - if(rcode /= 0) call shr_sys_abort(subname//' clean lnddom_a') - endif - - if (atm_present .and. ocn_present) then - deallocate(fraco,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate fraco') - deallocate(masko,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate masko') - call mct_gGrid_clean(ocndom_a, rcode) - if(rcode /= 0) call shr_sys_abort(subname//' clean ocndom_a') - endif - - if (atm_present .and. ice_present) then - deallocate(fraci,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate fraci') - deallocate(maski,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate maski') - call mct_gGrid_clean(icedom_a, rcode) - if(rcode /= 0) call shr_sys_abort(subname//' clean icedom_o') - endif - - if (ocn_present .and. ice_present) then - call mct_gGrid_clean(icedom_o, rcode) - if(rcode /= 0) call shr_sys_abort(subname//' clean icedom_o') - endif - - call shr_sys_flush(logunit) - - end subroutine seq_domain_check - - !=============================================================================== - - subroutine seq_domain_compare(dom1, dom2, mpicom, eps) - - !----------------------------------------------------------- - - ! Arguments - - type(mct_gGrid) , intent(in) :: dom1 - type(mct_gGrid) , intent(in) :: dom2 - integer(IN) , intent(in) :: mpicom - real(R8),optional, intent(in) :: eps ! error condition for compare - - ! Local variables - real(R8) :: leps - character(*),parameter :: F00 = "('(seq_domain_compare) ',4a)" - character(*),parameter :: F01 = "('(seq_domain_compare) ',a,i12,a)" - character(*),parameter :: F02 = "('(seq_domain_compare) ',2a,g23.15)" - character(*),parameter :: F0R = "('(seq_domain_compare) ',2A,2g23.15,A )" - character(*),parameter :: subName = '(seq_domain_compare) ' - - leps = eps_tiny - if (present(eps)) then - leps = eps - endif - - call seq_domain_check_grid(dom1%data, dom2%data, 'mask', eps=leps, mpicom=mpicom) - call seq_domain_check_grid(dom1%data, dom2%data, 'lat' , eps=leps, mpicom=mpicom) - call seq_domain_check_grid(dom1%data, dom2%data, 'lon' , eps=leps, mpicom=mpicom) - call seq_domain_check_grid(dom1%data, dom2%data, 'area', eps=leps, mpicom=mpicom) - - end subroutine seq_domain_compare - - !=============================================================================== - - subroutine seq_domain_check_fracmask(dom1) - - !----------------------------------------------------------- - - ! Arguments - - type(mct_aVect) , intent(in) :: dom1 - - ! Local variables - integer(in) :: n,npts,ndiff - integer(in) :: rcode - real(R8), pointer :: dmask(:) ! temporaries - real(R8), pointer :: dfrac(:) ! temporaries - - character(*),parameter :: F00 = "('(seq_domain_check_fracmask) ',4a)" - character(*),parameter :: F01 = "('(seq_domain_check_fracmask) ',a,i12,a)" - character(*),parameter :: F02 = "('(seq_domain_check_fracmask) ',2a,g23.15)" - character(*),parameter :: F0R = "('(seq_domain_check_fracmask) ',2A,2g23.15,A )" - character(*),parameter :: subName = '(seq_domain_check_fracmask) ' - !----------------------------------------------------------- - - npts = mct_aVect_lsize(dom1) - - allocate(dmask(npts),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate dmask') - allocate(dfrac(npts),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate dfrac') - - call mct_aVect_exportRAttr(dom1, 'mask', dmask, npts) - call mct_aVect_exportRAttr(dom1, 'frac', dfrac, npts) - - ndiff = 0 - do n = 1,npts - if (abs(dfrac(n)) > eps_tiny .and. abs(dmask(n)) < eps_tiny) then - !debug write(logunit,*)'n= ',n,' dfrac= ',dfrac(n),' dmask= ',dmask(n) - ndiff = ndiff + 1 - endif - enddo - - if (ndiff > 0) then - write(logunit,*) trim(subname)," ERROR: incompatible domain mask and frac values" - call shr_sys_flush(logunit) - call shr_sys_abort(subName//" incompatible domain mask and frac values") - endif - - deallocate(dmask,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate dmask') - deallocate(dfrac,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate dfrac') - - end subroutine seq_domain_check_fracmask - - !=============================================================================== - - subroutine seq_domain_check_grid(dom1, dom2, attr, eps, mpicom, mask) - - !----------------------------------------------------------- - - ! Arguments - - type(mct_aVect) , intent(in) :: dom1 - type(mct_aVect) , intent(in) :: dom2 - character(len=*), intent(in) :: attr ! grid attribute to compare - real(R8) , intent(in) :: eps ! error condition for compare - integer(IN) , intent(in) :: mpicom - real(R8) , intent(in), optional :: mask(:) - - ! Local variables - - integer(in) :: n,ndiff ! indices - integer(in) :: npts1,npts2,npts ! counters - integer(in) :: rcode ! error code - real(R8) :: diff,max_diff ! temporaries - real(R8) :: tot_diff ! maximum diff across all pes - integer(IN) :: ier ! error code - real(R8), pointer :: data1(:) ! temporaries - real(R8), pointer :: data2(:) ! temporaries - real(R8), pointer :: lmask(:) ! temporaries - logical :: iamroot ! local masterproc - - character(*),parameter :: F00 = "('(seq_domain_check_grid) ',4a)" - character(*),parameter :: F01 = "('(seq_domain_check_grid) ',a,i12,a)" - character(*),parameter :: F02 = "('(seq_domain_check_grid) ',2a,g23.15)" - character(*),parameter :: F0R = "('(seq_domain_check_grid) ',2A,2g23.15,A )" - character(*),parameter :: subName = '(seq_domain_check_grid) ' - !----------------------------------------------------------- - - call seq_comm_setptrs(CPLID,iamroot=iamroot) - - npts1 = mct_aVect_lsize(dom1) - npts2 = mct_aVect_lsize(dom2) - npts = npts1 - - if (npts1 == npts2) then - if (iamroot) write(logunit,F01) " the domain size is = ", npts - else - write(logunit,*) trim(subname)," domain size #1 = ", npts1 - write(logunit,*) trim(subname)," domain size #2 = ", npts2 - write(logunit,*) trim(subname)," ERROR: domain size mis-match" - call shr_sys_abort(subName//" ERROR: domain size mis-match") - end if - - allocate(data1(npts),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate data1') - allocate(data2(npts),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate data2') - allocate(lmask(npts),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate lmask') - - call mct_aVect_exportRAttr(dom1, trim(attr), data1, npts) - call mct_aVect_exportRAttr(dom2, trim(attr), data2, npts) - lmask = 1.0_R8 - if (present(mask)) then - if (size(mask) /= npts) then - call shr_sys_abort(subName//" ERROR: mask size mis-match") - endif - lmask = mask - endif - - ! --- adjust lons to address wraparound issues, we're assuming degree here! --- - - if (trim(attr) == "lon") then - do n = 1,npts - if (data2(n) > data1(n)) then - do while ( (data1(n)+360.0_R8) < (data2(n)+180.0_R8) ) ! longitude is periodic - data1(n) = data1(n) + 360.0_R8 - end do - else - do while ( (data2(n)+360.0_R8) < (data1(n)+180.0_R8) ) ! longitude is periodic - data2(n) = data2(n) + 360.0_R8 - end do - endif - enddo - endif - - ! Only check consistency where mask is greater than zero, if mask is present - - max_diff = 0.0_R8 - ndiff = 0 - do n=1,npts - if (lmask(n) > eps_tiny) then - diff = abs(data1(n)-data2(n)) - max_diff = max(max_diff,diff) - if (diff > eps) then - write(logunit,150) n,data1(n),data2(n),diff,eps - ndiff = ndiff + 1 - endif - end if - end do -150 format('seq_domain_check_grid - n:',I3,' d1:',F12.6,' d2:',F12.6,' diff:',F18.14,' eps:',F18.14) - - call mpi_reduce(max_diff,tot_diff,1,MPI_REAL8,MPI_MAX,0,mpicom,ier) - if (iamroot) then - write(logunit,F02) " maximum difference for ",trim(attr),tot_diff - write(logunit,F02) " maximum allowable difference for ",trim(attr),eps - call shr_sys_flush(logunit) - endif - call mpi_barrier(mpicom,ier) - - if (ndiff > 0) then - write(logunit,*) trim(subname)," ERROR: incompatible domain grid coordinates" - call shr_sys_flush(logunit) - call shr_sys_abort(subName//" incompatible domain grid coordinates") - endif - - deallocate(data1,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate data1') - deallocate(data2,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate data2') - deallocate(lmask,stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' deallocate lmask') - - end subroutine seq_domain_check_grid - - !=============================================================================== - - subroutine seq_domain_areafactinit(domain, mdl2drv, drv2mdl, & - samegrid, mpicom, iamroot, comment) - !----------------------------------------------------------- - ! - ! Arguments - ! - type(mct_gGrid) , pointer :: domain ! component domain on component pes - real(R8) , pointer :: mdl2drv(:) ! comp->cpl factor on component pes - real(R8) , pointer :: drv2mdl(:) ! cpl->comp factor on component pes - logical , intent(in) :: samegrid ! true => two grids are same - integer , intent(in) :: mpicom ! mpi communicator on component pes - logical , intent(in) :: iamroot - character(len=*) , optional,intent(in) :: comment - ! - ! Local variables - ! - integer :: j1,j2,m1,n,rcode - integer :: gridsize - real(R8) :: rmin1,rmax1,rmin,rmax - real(R8) :: rmask,rarea,raream - character(cl) :: lcomment - character(len=*),parameter :: subName = '(seq_domain_areafactinit) ' - character(len=*),parameter :: F0R = "(2A,2g23.15,A )" - ! - !----------------------------------------------------------- - - lcomment = '' - if (present(comment)) lcomment = comment - - ! get sizes - - gridsize = mct_gGrid_lsize(domain) - allocate(drv2mdl(gridsize),mdl2drv(gridsize),stat=rcode) - if(rcode /= 0) call shr_sys_abort(subname//' allocate area correction factors') - - j1 = mct_gGrid_indexRA(domain,"area" ,dieWith=subName) - j2 = mct_gGrid_indexRA(domain,"aream" ,dieWith=subName) - m1 = mct_gGrid_indexRA(domain,"mask" ,dieWith=subName) - - mdl2drv(:)=1.0_R8 - drv2mdl(:)=1.0_R8 - - if (samegrid) then - ! default 1.0 - else - do n=1,gridsize - rmask = domain%data%rAttr(m1,n) - rarea = domain%data%rAttr(j1,n) - raream = domain%data%rAttr(j2,n) - if ( abs(rmask) >= 1.0e-06) then - if (rarea * raream /= 0.0_R8) then - mdl2drv(n) = rarea/raream - drv2mdl(n) = 1.0_R8/mdl2drv(n) - !if (mdl2drv(n) > 10.0 .or. mdl2drv(n) < 0.1) then - ! write(logunit,*) trim(subname),' WARNING area,aream= ', & - ! domain%data%rAttr(j1,n),domain%data%rAttr(j2,n),' in ',n,gridsize - !endif - else - write(logunit,*) trim(subname),' ERROR area,aream= ', & - rarea,raream,' in ',n,gridsize - call shr_sys_flush(logunit) - call shr_sys_abort() - endif - endif - enddo - end if - - rmin1 = minval(mdl2drv) - rmax1 = maxval(mdl2drv) - call shr_mpi_min(rmin1,rmin,mpicom) - call shr_mpi_max(rmax1,rmax,mpicom) - if (iamroot) write(logunit,F0R) trim(subname),' : min/max mdl2drv ',rmin,rmax,trim(lcomment) - - rmin1 = minval(drv2mdl) - rmax1 = maxval(drv2mdl) - call shr_mpi_min(rmin1,rmin,mpicom) - call shr_mpi_max(rmax1,rmax,mpicom) - if (iamroot) write(logunit,F0R) trim(subname),' : min/max drv2mdl ',rmin,rmax,trim(lcomment) - if (iamroot) call shr_sys_flush(logunit) - - end subroutine seq_domain_areafactinit - - !=============================================================================== - -end module seq_domain_mct diff --git a/src/drivers/mct/main/seq_flux_mct.F90 b/src/drivers/mct/main/seq_flux_mct.F90 deleted file mode 100644 index 14487c29f5c..00000000000 --- a/src/drivers/mct/main/seq_flux_mct.F90 +++ /dev/null @@ -1,1527 +0,0 @@ -module seq_flux_mct - - use shr_kind_mod, only: r8 => shr_kind_r8, in=>shr_kind_in - use shr_sys_mod, only: shr_sys_abort - use shr_flux_mod, only: shr_flux_atmocn, shr_flux_atmocn_ua, shr_flux_atmocn_diurnal, shr_flux_adjust_constants - use shr_orb_mod, only: shr_orb_params, shr_orb_cosz, shr_orb_decl - use shr_mct_mod, only: shr_mct_queryConfigFile, shr_mct_sMatReaddnc - - use mct_mod - use seq_flds_mod - use seq_comm_mct - use seq_infodata_mod - - use component_type_mod - - implicit none - private - save - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public seq_flux_init_mct - public seq_flux_initexch_mct - - public seq_flux_ocnalb_mct - - public seq_flux_atmocn_mct - public seq_flux_atmocnexch_mct - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - real(r8), pointer :: lats(:) ! latitudes (degrees) - real(r8), pointer :: lons(:) ! longitudes (degrees) - integer(in),allocatable :: mask(:) ! ocn domain mask: 0 <=> inactive cell - integer(in),allocatable :: emask(:) ! ocn mask on exchange grid decomp - - real(r8), allocatable :: uocn (:) ! ocn velocity, zonal - real(r8), allocatable :: vocn (:) ! ocn velocity, meridional - real(r8), allocatable :: tocn (:) ! ocean temperature - real(r8), allocatable :: zbot (:) ! atm level height - real(r8), allocatable :: ubot (:) ! atm velocity, zonal - real(r8), allocatable :: vbot (:) ! atm velocity, meridional - real(r8), allocatable :: thbot(:) ! atm potential T - real(r8), allocatable :: shum (:) ! atm specific humidity - real(r8), allocatable :: shum_16O (:) ! atm H2O tracer - real(r8), allocatable :: shum_HDO (:) ! atm HDO tracer - real(r8), allocatable :: shum_18O (:) ! atm H218O tracer - real(r8), allocatable :: roce_16O (:) ! ocn H2O ratio - real(r8), allocatable :: roce_HDO (:) ! ocn HDO ratio - real(r8), allocatable :: roce_18O (:) ! ocn H218O ratio - real(r8), allocatable :: dens (:) ! atm density - real(r8), allocatable :: tbot (:) ! atm bottom surface T - real(r8), allocatable :: pslv (:) ! sea level pressure (Pa) - real(r8), allocatable :: sen (:) ! heat flux: sensible - real(r8), allocatable :: lat (:) ! heat flux: latent - real(r8), allocatable :: lwup (:) ! lwup over ocean - real(r8), allocatable :: evap (:) ! water flux: evaporation - real(r8), allocatable :: evap_16O (:) !H2O flux: evaporation - real(r8), allocatable :: evap_HDO (:) !HDO flux: evaporation - real(r8), allocatable :: evap_18O (:) !H218O flux: evaporation - real(r8), allocatable :: taux (:) ! wind stress, zonal - real(r8), allocatable :: tauy (:) ! wind stress, meridional - real(r8), allocatable :: tref (:) ! diagnostic: 2m ref T - real(r8), allocatable :: qref (:) ! diagnostic: 2m ref Q - real(r8), allocatable :: duu10n(:) ! diagnostic: 10m wind speed squared - - real(r8), allocatable :: fswpen (:) ! fraction of sw penetrating ocn surface layer - real(r8), allocatable :: ocnsal (:) ! ocean salinity - real(r8), allocatable :: uGust (:) ! wind gust - real(r8), allocatable :: lwdn (:) ! long wave, downward - real(r8), allocatable :: swdn (:) ! short wave, downward - real(r8), allocatable :: swup (:) ! short wave, upward - real(r8), allocatable :: prec (:) ! precip - - ! Diurnal cycle variables wrt flux - - real(r8), allocatable :: tbulk (:) ! diagnostic: ocn bulk T - real(r8), allocatable :: tskin (:) ! diagnostic: ocn skin T - real(r8), allocatable :: tskin_night(:) ! diagnostic: ocn skin T - real(r8), allocatable :: tskin_day (:) ! diagnostic: ocn skin T - real(r8), allocatable :: cSkin (:) ! diagnostic: ocn cool skin - real(r8), allocatable :: cSkin_night(:) ! diagnostic: ocn cool skin - real(r8), allocatable :: warm (:) ! diagnostic: ocn warming - real(r8), allocatable :: salt (:) ! diagnostic: ocn salting - real(r8), allocatable :: speed (:) ! diagnostic: ocn speed - real(r8), allocatable :: regime (:) ! diagnostic: ocn regime - real(r8), allocatable :: warmMax (:) ! diagnostic: ocn warming, max daily value - real(r8), allocatable :: windMax (:) ! diagnostic: ocn wind , max daily value - real(r8), allocatable :: QsolAvg (:) ! diagnostic: ocn Qsol , daily avg - real(r8), allocatable :: windAvg (:) ! diagnostic: ocn wind , daily avg - real(r8), allocatable :: warmMaxInc (:) ! diagnostic: ocn warming, max daily value, increment - real(r8), allocatable :: windMaxInc (:) ! diagnostic: ocn wind , max daily value, increment - real(r8), allocatable :: qSolInc (:) ! diagnostic: ocn Qsol , daily avg, increment - real(r8), allocatable :: windInc (:) ! diagnostic: ocn wind , daily avg, increment - real(r8), allocatable :: nInc (:) ! diagnostic: a/o flux , increment - - real(r8), allocatable :: ustar(:) ! saved ustar - real(r8), allocatable :: re (:) ! saved re - real(r8), allocatable :: ssq (:) ! saved sq - - ! Conversion from degrees to radians - - real(r8),parameter :: const_pi = SHR_CONST_PI ! pi - real(r8),parameter :: const_deg2rad = const_pi/180.0_r8 ! deg to rads - - ! Coupler field indices - - integer :: index_a2x_Sa_z - integer :: index_a2x_Sa_u - integer :: index_a2x_Sa_v - integer :: index_a2x_Sa_tbot - integer :: index_a2x_Sa_ptem - integer :: index_a2x_Sa_shum - integer :: index_a2x_Sa_shum_16O - integer :: index_a2x_Sa_shum_HDO - integer :: index_a2x_Sa_shum_18O - integer :: index_a2x_Sa_dens - integer :: index_a2x_Sa_pslv - integer :: index_a2x_Faxa_swndr - integer :: index_a2x_Faxa_swndf - integer :: index_a2x_Faxa_swvdr - integer :: index_a2x_Faxa_swvdf - integer :: index_a2x_Faxa_lwdn - integer :: index_a2x_Faxa_rainc - integer :: index_a2x_Faxa_rainl - integer :: index_a2x_Faxa_snowc - integer :: index_a2x_Faxa_snowl - integer :: index_o2x_So_t - integer :: index_o2x_So_u - integer :: index_o2x_So_v - integer :: index_o2x_So_fswpen - integer :: index_o2x_So_s - integer :: index_o2x_So_roce_16O - integer :: index_o2x_So_roce_HDO - integer :: index_o2x_So_roce_18O - integer :: index_xao_So_tref - integer :: index_xao_So_qref - integer :: index_xao_So_avsdr - integer :: index_xao_So_avsdf - integer :: index_xao_So_anidr - integer :: index_xao_So_anidf - integer :: index_xao_Faox_taux - integer :: index_xao_Faox_tauy - integer :: index_xao_Faox_lat - integer :: index_xao_Faox_sen - integer :: index_xao_Faox_evap - integer :: index_xao_Faox_evap_16O - integer :: index_xao_Faox_evap_HDO - integer :: index_xao_Faox_evap_18O - integer :: index_xao_Faox_lwup - integer :: index_xao_Faox_swdn - integer :: index_xao_Faox_swup - integer :: index_xao_So_ustar - integer :: index_xao_So_re - integer :: index_xao_So_ssq - integer :: index_xao_So_duu10n - integer :: index_xao_So_u10 - integer :: index_xao_So_fswpen - integer :: index_xao_So_warm_diurn - integer :: index_xao_So_salt_diurn - integer :: index_xao_So_speed_diurn - integer :: index_xao_So_regime_diurn - integer :: index_xao_So_tskin_diurn - integer :: index_xao_So_tskin_day_diurn - integer :: index_xao_So_tskin_night_diurn - integer :: index_xao_So_cskin_diurn - integer :: index_xao_So_cskin_night_diurn - integer :: index_xao_So_tbulk_diurn - integer :: index_xao_So_warmmax_diurn - integer :: index_xao_So_windmax_diurn - integer :: index_xao_So_qsolavg_diurn - integer :: index_xao_So_windavg_diurn - integer :: index_xao_So_warmmaxinc_diurn - integer :: index_xao_So_windmaxinc_diurn - integer :: index_xao_So_qsolinc_diurn - integer :: index_xao_So_windinc_diurn - integer :: index_xao_So_ninc_diurn - - character(len=16) :: fluxsetting = 'unknown' - character(len=*),parameter :: fluxsetting_atmocn = 'atmocn' - character(len=*),parameter :: fluxsetting_exchange = 'exchange' - - !--- for exchange grid --- - type(mct_rearr) :: Re_a2e, Re_e2a, Re_o2e, Re_e2o ! atm/ocn/exch rearrangers - type(mct_sMat ) :: sMata2o, sMato2a ! decomp sMat - type(mct_gsMap) :: gsmap_ae, gsmap_oe ! gsmaps for atm/ocn on exch grid - integer(in) :: nloc_a2o,nloc_o2a,nloc_o,nloc_a,nloc_ae,nloc_oe - - !=============================================================================== -contains - !=============================================================================== - - subroutine seq_flux_init_mct(comp, fractions) - - !----------------------------------------------------------------------- - ! - ! Arguments - ! - type(component_type), intent(in) :: comp - type(mct_aVect), intent(in) :: fractions - ! - ! Local variables - ! - type(mct_gsMap), pointer :: gsMap - type(mct_gGrid), pointer :: dom - integer(in) :: nloc - integer :: ko,ki ! fractions indices - integer :: ier - real(r8), pointer :: rmask(:) ! ocn domain mask - character(*),parameter :: subName = '(seq_flux_init_mct) ' - !----------------------------------------------------------------------- - - gsmap => component_get_gsmap_cx(comp) - dom => component_get_dom_cx(comp) - - nloc = mct_avect_lsize(dom%data) - - ! Input fields atm - allocate( zbot(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate zbot',ier) - zbot = 0.0_r8 - allocate( ubot(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate ubot',ier) - ubot = 0.0_r8 - allocate( vbot(nloc)) - if(ier/=0) call mct_die(subName,'allocate vbot',ier) - vbot = 0.0_r8 - allocate(thbot(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate thbot',ier) - thbot = 0.0_r8 - allocate(shum(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate shum',ier) - shum = 0.0_r8 - allocate(shum_16O(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate shum_16O',ier) - shum_16O = 0.0_r8 - allocate(shum_HDO(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate shum_HDO',ier) - shum_HDO = 0.0_r8 - allocate(shum_18O(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate shum_18O',ier) - shum_18O = 0.0_r8 - allocate(dens(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate dens',ier) - dens = 0.0_r8 - allocate(tbot(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tbot',ier) - tbot = 0.0_r8 - allocate(pslv(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate pslv',ier) - pslv = 0.0_r8 - allocate(ustar(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate ustar',ier) - ustar = 0.0_r8 - allocate(re(nloc), stat=ier) - if(ier/=0) call mct_die(subName,'allocate re',ier) - re = 0.0_r8 - allocate(ssq(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate ssq',ier) - ssq = 0.0_r8 - allocate( uocn(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate uocn',ier) - uocn = 0.0_r8 - allocate( vocn(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate vocn',ier) - vocn = 0.0_r8 - allocate( tocn(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tocn',ier) - tocn = 0.0_r8 - allocate(roce_16O(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate roce_16O',ier) - roce_16O = 0.0_r8 - allocate(roce_HDO(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate roce_HDO',ier) - roce_HDO = 0.0_r8 - allocate(roce_18O(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate roce_18O',ier) - roce_18O = 0.0_r8 - - ! Output fields - allocate(sen (nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate sen',ier) - sen = 0.0_r8 - allocate(lat (nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate lat',ier) - lat = 0.0_r8 - allocate(evap(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate evap',ier) - evap = 0.0_r8 - allocate(evap_16O(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate evap_16O',ier) - evap_16O = 0.0_r8 - allocate(evap_HDO(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate evap_HDO',ier) - evap_HDO = 0.0_r8 - allocate(evap_18O(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate evap_18O',ier) - evap_18O = 0.0_r8 - allocate(lwup(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate lwup',ier) - lwup = 0.0_r8 - allocate(taux(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate taux',ier) - taux = 0.0_r8 - allocate(tauy(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tauy',ier) - tauy = 0.0_r8 - allocate(tref(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tref',ier) - tref = 0.0_r8 - allocate(qref(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate qref',ier) - qref = 0.0_r8 - allocate(duu10n(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate duu10n',ier) - duu10n = 0.0_r8 - - !--- flux_diurnal cycle flux fields --- - allocate(uGust(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate uGust',ier) - uGust = 0.0_r8 - allocate(lwdn(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate lwdn',ier) - lwdn = 0.0_r8 - allocate(swdn(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate swdn',ier) - swdn = 0.0_r8 - allocate(swup(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate swup',ier) - swup = 0.0_r8 - allocate(prec(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate prec',ier) - prec = 0.0_r8 - allocate(fswpen(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate fswpen',ier) - fswpen = 0.0_r8 - allocate(ocnsal(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate ocnsal',ier) - ocnsal = 0.0_r8 - - allocate(tbulk(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tbulk',ier) - tbulk = 0.0_r8 - allocate(tskin(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tskin',ier) - tskin = 0.0_r8 - allocate(tskin_day(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tskin_day',ier) - tskin_day = 0.0_r8 - allocate(tskin_night(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tskin_night',ier) - tskin_night = 0.0_r8 - allocate(cskin(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate cskin',ier) - cskin = 0.0_r8 - allocate(cskin_night(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate cskin_night',ier) - cskin_night = 0.0_r8 - - allocate(warm(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate warm',ier) - warm = 0.0_r8 - allocate(salt(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate salt',ier) - salt = 0.0_r8 - allocate(speed(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate speed',ier) - speed = 0.0_r8 - allocate(regime(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate regime',ier) - regime = 0.0_r8 - allocate(warmMax(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate warmMax',ier) - warmMax = 0.0_r8 - allocate(windMax(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate windMax',ier) - windMax = 0.0_r8 - allocate(qSolAvg(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate qSolAvg',ier) - qSolAvg = 0.0_r8 - allocate(windAvg(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate windAvg',ier) - windAvg = 0.0_r8 - - allocate(warmMaxInc(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate warmMaxInc',ier) - warmMaxInc = 0.0_r8 - allocate(windMaxInc(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate windMaxInc',ier) - windMaxInc = 0.0_r8 - allocate(qSolInc(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate qSolInc',ier) - qSolInc = 0.0_r8 - allocate(windInc(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate windInc',ier) - windInc = 0.0_r8 - allocate(nInc (nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate nInc',ier) - nInc = 0.0_r8 - - ! Grid fields - allocate( lats(nloc),stat=ier ) - if(ier/=0) call mct_die(subName,'allocate lats',ier) - lats = 0.0_r8 - allocate( lons(nloc),stat=ier ) - if(ier/=0) call mct_die(subName,'allocate lons',ier) - lons = 0.0_r8 - allocate( emask(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate emask',ier) - emask = 0.0_r8 - allocate(mask(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate mask',ier) - mask = 0.0_r8 - - ! Get lat, lon, mask, which is time-invariant - allocate(rmask(nloc),stat=ier) - if(ier/=0) call mct_die(subName,'allocate rmask',ier) - call mct_gGrid_exportRAttr(dom, 'lat' , lats , nloc) - call mct_gGrid_exportRAttr(dom, 'lon' , lons , nloc) - - ! setup the compute mask. - ! prefer to compute just where ocean exists, so setup a mask here. - ! this could be run with either the ocean or atm grid so need to be careful. - ! really want the ocean mask on ocean grid or ocean mask mapped to atm grid, - ! but do not have access to the ocean mask mapped to the atm grid. - ! the dom mask is a good place to start, on ocean grid, it should be what we want, - ! on the atm grid, it's just all 1's so not very useful. - ! next look at ofrac+ifrac in fractions. want to compute on all non-land points. - ! using ofrac alone will exclude points that are currently all sea ice but that later - ! could be less that 100% covered in ice. - - ! default compute everywhere, then "turn off" gridcells - mask = 1 - - ! use domain mask first - call mct_gGrid_exportRAttr(dom, 'mask', rmask, nloc) - where (rmask < 0.5_r8) mask = 0 ! like nint - deallocate(rmask) - - ! then check ofrac + ifrac - ko = mct_aVect_indexRA(fractions,"ofrac") - ki = mct_aVect_indexRA(fractions,"ifrac") - where (fractions%rAttr(ko,:)+fractions%rAttr(ki,:) <= 0.0_r8) mask(:) = 0 - - emask = mask - - fluxsetting = trim(fluxsetting_atmocn) - - end subroutine seq_flux_init_mct - - !=============================================================================== - - subroutine seq_flux_initexch_mct(atm, ocn, mpicom_cplid, cplid) - - !----------------------------------------------------------------------- - ! - ! Arguments - ! - type(component_type), intent(in) :: atm - type(component_type), intent(in) :: ocn - integer(in) , intent(in) :: mpicom_cplid - integer(in) , intent(in) :: cplid - ! - ! Local variables - ! - type(mct_gsMap), pointer :: gsmap_a - type(mct_gGrid), pointer :: dom_a - type(mct_gsMap), pointer :: gsmap_o - type(mct_gGrid), pointer :: dom_o - integer(in) :: ka,ko,ia,io,n - integer :: ier - integer :: mytask - integer(in) :: kmsk ! field indices - character(len=128) :: ConfigFileName ! config file to read - character(len=128) :: MapLabel ! map name - character(len=128) :: MapTypeLabel ! map type - character(len=256) :: fileName - character(len=1) :: maptype - character(len=3) :: Smaptype - type(mct_aVect) :: avdom_oe - type(mct_list) :: sort_keys - character(*),parameter :: subName = '(seq_flux_initexch_mct) ' - !----------------------------------------------------------------------- - - gsmap_a => component_get_gsmap_cx(atm) ! gsmap_ax - gsmap_o => component_get_gsmap_cx(ocn) ! gsmap_ox - dom_a => component_get_dom_cx(atm) ! dom_ax - dom_o => component_get_dom_cx(ocn) ! dom_ox - - call shr_mpi_commrank(mpicom_cplid, mytask) - - !--- Get mapping file info - do n = 1,2 - ConfigFileName = "seq_maps.rc" - if (n == 1) then - MapLabel = "atm2ocn_fmapname:" - MapTypeLabel = "atm2ocn_fmaptype:" - elseif (n == 2) then - MapLabel = "ocn2atm_fmapname:" - MapTypeLabel = "ocn2atm_fmaptype:" - else - call shr_sys_abort(trim(subname)//' do error1') - endif - - call shr_mct_queryConfigFile(mpicom_cplid, ConfigFilename, & - trim(MapLabel),fileName,trim(MapTypeLabel),maptype) - - !--- hardwire decomposition to gsmap_o - if (n == 1) then - Smaptype = "src" - call shr_mct_sMatReaddnc(sMata2o, gsmap_a, gsmap_o, Smaptype, & - filename=fileName, mytask=mytask, mpicom=mpicom_cplid) - elseif (n == 2) then - Smaptype = "dst" - call shr_mct_sMatReaddnc(sMato2a, gsmap_o, gsmap_a, Smaptype, & - filename=fileName, mytask=mytask, mpicom=mpicom_cplid) - else - call shr_sys_abort(trim(subname)//' do error2') - endif - - enddo - - !--- the two mapping files must have their local indices in identical order - !--- sort the global indices as a starting point - - call mct_list_init(sort_keys,'grow:gcol') - call mct_sMat_SortPermute(sMata2o,sort_keys) - call mct_list_clean(sort_keys) - call mct_list_init(sort_keys,'gcol:grow') - call mct_sMat_SortPermute(sMato2a,sort_keys) - call mct_list_clean(sort_keys) - - !--- now check that they are sorted properly - - nloc_a2o= mct_sMat_lsize(sMata2o) - nloc_o2a= mct_sMat_lsize(sMato2a) - - if (nloc_a2o /= nloc_o2a) then - write(logunit,*) trim(subname),' ERROR: sMat sizes',nloc_a2o,nloc_o2a - call shr_sys_abort(trim(subname)//' ERROR in sMat sizes') - endif - ko = mct_sMat_indexIA(sMata2o,'grow') ! local row (dst) index - ka = mct_sMat_indexIA(sMato2a,'gcol') ! local column (src) index - do n = 1,nloc_a2o - io = sMata2o%data%iAttr(ko,n) - ia = sMato2a%data%iAttr(ka,n) - if (io /= ia) then - write(logunit,*) trim(subname),' ERROR: sMat indices1 ',io,ia - call shr_sys_abort(trim(subname)//' ERROR in sMat indices1') - endif - enddo - ko = mct_sMat_indexIA(sMata2o,'gcol') ! local column (src) index - ka = mct_sMat_indexIA(sMato2a,'grow') ! local row (dst) index - do n = 1,nloc_a2o - io = sMata2o%data%iAttr(ko,n) - ia = sMato2a%data%iAttr(ka,n) - if (io /= ia) then - write(logunit,*) trim(subname),' ERROR: sMat indices2 ',io,ia - call shr_sys_abort(trim(subname)//' ERROR in sMat indices2') - endif - enddo - - !--- instantiate/create/compute various datatypes - - call mct_sMat_2XgsMap(sMata2o , gsmap_ae, 0, mpicom_cplid, cplid) - call mct_sMat_2YgsMap(sMata2o , gsmap_oe, 0, mpicom_cplid, cplid) - - call mct_rearr_init(gsmap_a , gsmap_ae, mpicom_cplid, Re_a2e) - call mct_rearr_init(gsmap_ae , gsmap_a, mpicom_cplid, Re_e2a) - call mct_rearr_init(gsmap_o , gsmap_oe, mpicom_cplid, Re_o2e) - call mct_rearr_init(gsmap_oe , gsmap_o, mpicom_cplid, Re_e2o) - - call mct_sMat_g2lMat(sMata2o , gsmap_ae, 'column',mpicom_cplid) - call mct_sMat_g2lMat(sMata2o , gsmap_oe, 'row', mpicom_cplid) - call mct_sMat_g2lMat(sMato2a , gsmap_ae, 'row', mpicom_cplid) - call mct_sMat_g2lMat(sMato2a , gsmap_oe, 'column',mpicom_cplid) - - nloc_a = mct_gsmap_lsize(gsmap_a , mpicom_cplid) - nloc_o = mct_gsmap_lsize(gsmap_o , mpicom_cplid) - nloc_ae = mct_gsmap_lsize(gsmap_ae , mpicom_cplid) - nloc_oe = mct_gsmap_lsize(gsmap_oe , mpicom_cplid) - - call mct_gsmap_clean(gsmap_ae) - call mct_gsmap_clean(gsmap_oe) - - ! Input fields atm - allocate( emask(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate emask',ier) - allocate( zbot(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate zbot',ier) - allocate( ubot(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate ubot',ier) - allocate( vbot(nloc_a2o)) - if(ier/=0) call mct_die(subName,'allocate vbot',ier) - allocate(thbot(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate thbot',ier) - allocate(shum(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate shum',ier) - allocate(shum_16O(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate shum_16O',ier) - allocate(shum_HDO(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate shum_HDO',ier) - allocate(shum_18O(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate shum_18O',ier) - allocate(dens(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate dens',ier) - allocate(tbot(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tbot',ier) - allocate(pslv(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate pslv',ier) - allocate(ustar(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate ustar',ier) - allocate(re(nloc_a2o), stat=ier) - if(ier/=0) call mct_die(subName,'allocate re',ier) - allocate(ssq(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate ssq',ier) - allocate( uocn(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate uocn',ier) - allocate( vocn(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate vocn',ier) - allocate( tocn(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tocn',ier) - - ! Output fields - allocate(sen (nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate sen',ier) - allocate(lat (nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate lat',ier) - allocate(evap(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate evap',ier) - allocate(evap_16O(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate evap_16O',ier) - allocate(evap_HDO(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate evap_HDO',ier) - allocate(evap_18O(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate evap_18O',ier) - allocate(lwup(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate lwup',ier) - allocate(taux(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate taux',ier) - allocate(tauy(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tauy',ier) - allocate(tref(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate tref',ier) - allocate(qref(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate qref',ier) - allocate(duu10n(nloc_a2o),stat=ier) - if(ier/=0) call mct_die(subName,'allocate duu10n',ier) - - ! set emask - - call mct_avect_init(avdom_oe,dom_o%data,lsize=nloc_oe) - call mct_rearr_rearrange(dom_o%data, avdom_oe, Re_o2e, VECTOR=mct_usevector, ALLTOALL=mct_usealltoall) - ko = mct_sMat_indexIA(sMata2o,'lrow') ! local dst index - kmsk = mct_aVect_indexRA(avdom_oe,"mask",dieWith=subName) - do n = 1,nloc_a2o - io = sMata2o%data%iAttr(ko,n) - emask(n) = nint(avdom_oe%rAttr(kmsk,io)) - if (emask(n) == 0) then - write(logunit,*) trim(subname),' ERROR: weights use masked ocean value' - call shr_sys_abort(trim(subname)//' ERROR: weights use masked ocean value') - endif - enddo - - call mct_aVect_clean(avdom_oe) - - fluxsetting = trim(fluxsetting_exchange) - - end subroutine seq_flux_initexch_mct - - !=============================================================================== - - subroutine seq_flux_ocnalb_mct( infodata, ocn, a2x_o, fractions_o, xao_o ) - - !----------------------------------------------------------------------- - ! - ! Arguments - ! - type(seq_infodata_type) , intent(in) :: infodata - type(component_type) , intent(in) :: ocn - type(mct_aVect) , intent(in) :: a2x_o - type(mct_aVect) , intent(inout) :: fractions_o - type(mct_aVect) , intent(inout) :: xao_o - ! - ! Local variables - ! - type(mct_gGrid), pointer :: dom_o - logical :: flux_albav ! flux avg option - integer(in) :: n ! indices - real(r8) :: rlat ! gridcell latitude in radians - real(r8) :: rlon ! gridcell longitude in radians - real(r8) :: cosz ! Cosine of solar zenith angle - real(r8) :: eccen ! Earth orbit eccentricity - real(r8) :: mvelpp ! Earth orbit - real(r8) :: lambm0 ! Earth orbit - real(r8) :: obliqr ! Earth orbit - real(r8) :: delta ! Solar declination angle in radians - real(r8) :: eccf ! Earth orbit eccentricity factor - real(r8) :: calday ! calendar day including fraction, at 0e - real(r8) :: nextsw_cday ! calendar day of next atm shortwave - real(r8) :: anidr ! albedo: near infrared, direct - real(r8) :: avsdr ! albedo: visible , direct - real(r8) :: anidf ! albedo: near infrared, diffuse - real(r8) :: avsdf ! albedo: visible , diffuse - real(r8) :: swdnc ! temporary swdn - real(r8) :: swupc ! temporary swup - integer(in) :: ier ! error code - integer(in) :: kx,kr ! fractions indices - integer(in) :: klat,klon ! field indices - logical :: update_alb ! was albedo updated - logical,save :: first_call = .true. - ! - real(r8),parameter :: albdif = 0.06_r8 ! 60 deg reference albedo, diffuse - real(r8),parameter :: albdir = 0.07_r8 ! 60 deg reference albedo, direct - character(*),parameter :: subName = '(seq_flux_ocnalb_mct) ' - ! - !----------------------------------------------------------------------- - - dom_o => component_get_dom_cx(ocn) ! dom_ox - - call seq_infodata_getData(infodata , & - flux_albav=flux_albav) - - ! Determine indices - - update_alb = .false. - - if (first_call) then - index_xao_So_anidr = mct_aVect_indexRA(xao_o,'So_anidr') - index_xao_So_anidf = mct_aVect_indexRA(xao_o,'So_anidf') - index_xao_So_avsdr = mct_aVect_indexRA(xao_o,'So_avsdr') - index_xao_So_avsdf = mct_aVect_indexRA(xao_o,'So_avsdf') - index_xao_Faox_swdn = mct_aVect_indexRA(xao_o,'Faox_swdn') - index_xao_Faox_swup = mct_aVect_indexRA(xao_o,'Faox_swup') - - index_a2x_Faxa_swndr = mct_aVect_indexRA(a2x_o,'Faxa_swndr') - index_a2x_Faxa_swndf = mct_aVect_indexRA(a2x_o,'Faxa_swndf') - index_a2x_Faxa_swvdr = mct_aVect_indexRA(a2x_o,'Faxa_swvdr') - index_a2x_Faxa_swvdf = mct_aVect_indexRA(a2x_o,'Faxa_swvdf') - - nloc_o = mct_ggrid_lsize(dom_o) - klat = mct_gGrid_indexRA(dom_o,"lat" ,dieWith=subName) - klon = mct_gGrid_indexRA(dom_o,"lon" ,dieWith=subName) - allocate( lats(nloc_o),stat=ier ) - if(ier/=0) call mct_die(subName,'allocate lats',ier) - allocate( lons(nloc_o),stat=ier ) - if(ier/=0) call mct_die(subName,'allocate lons',ier) - do n = 1,nloc_o - lats(n) = dom_o%data%rAttr(klat,n) - lons(n) = dom_o%data%rAttr(klon,n) - enddo - first_call = .false. - endif - - if (flux_albav) then - - do n=1,nloc_o - anidr = albdir - avsdr = albdir - anidf = albdif - avsdf = albdif - - ! Albedo is now function of latitude (will be new implementation) - !rlat = const_deg2rad * lats(n) - !anidr = 0.069_r8 - 0.011_r8 * cos(2._r8 * rlat) - !avsdr = anidr - !anidf = anidr - !avsdf = anidr - - xao_o%rAttr(index_xao_So_avsdr,n) = avsdr - xao_o%rAttr(index_xao_So_anidr,n) = anidr - xao_o%rAttr(index_xao_So_avsdf,n) = avsdf - xao_o%rAttr(index_xao_So_anidf,n) = anidf - end do - update_alb = .true. - - else - - !--- flux_atmocn needs swdn & swup = swdn*(-albedo) - !--- swdn & albedos are time-aligned BEFORE albedos get updated below --- - do n=1,nloc_o - avsdr = xao_o%rAttr(index_xao_So_avsdr,n) - anidr = xao_o%rAttr(index_xao_So_anidr,n) - avsdf = xao_o%rAttr(index_xao_So_avsdf,n) - anidf = xao_o%rAttr(index_xao_So_anidf,n) - swupc = a2x_o%rAttr(index_a2x_Faxa_swndr,n)*(-anidr) & - & + a2x_o%rAttr(index_a2x_Faxa_swndf,n)*(-anidf) & - & + a2x_o%rAttr(index_a2x_Faxa_swvdr,n)*(-avsdr) & - & + a2x_o%rAttr(index_a2x_Faxa_swvdf,n)*(-avsdf) - swdnc = a2x_o%rAttr(index_a2x_Faxa_swndr,n) & - & + a2x_o%rAttr(index_a2x_Faxa_swndf,n) & - & + a2x_o%rAttr(index_a2x_Faxa_swvdr,n) & - & + a2x_o%rAttr(index_a2x_Faxa_swvdf,n) - if ( anidr == 1.0_r8 ) then ! dark side of earth - swupc = 0.0_r8 - swdnc = 0.0_r8 - end if - xao_o%rAttr(index_xao_Faox_swdn,n) = swdnc - xao_o%rAttr(index_xao_Faox_swup,n) = swupc - end do - - ! Solar declination - ! Will only do albedo calculation if nextsw_cday is not -1. - - call seq_infodata_GetData(infodata,nextsw_cday=nextsw_cday,orb_eccen=eccen, & - orb_mvelpp=mvelpp, orb_lambm0=lambm0, orb_obliqr=obliqr) - if (nextsw_cday >= -0.5_r8) then - calday = nextsw_cday - call shr_orb_decl(calday, eccen, mvelpp,lambm0, obliqr, delta, eccf) - ! Compute albedos - do n=1,nloc_o - rlat = const_deg2rad * lats(n) - rlon = const_deg2rad * lons(n) - cosz = shr_orb_cosz( calday, rlat, rlon, delta ) - if (cosz > 0.0_r8) then !--- sun hit -- - anidr = (.026_r8/(cosz**1.7_r8 + 0.065_r8)) + & - (.150_r8*(cosz - 0.100_r8 ) * & - (cosz - 0.500_r8 ) * & - (cosz - 1.000_r8 ) ) - avsdr = anidr - anidf = albdif - avsdf = albdif - else !--- dark side of earth --- - anidr = 1.0_r8 - avsdr = 1.0_r8 - anidf = 1.0_r8 - avsdf = 1.0_r8 - end if - - xao_o%rAttr(index_xao_So_avsdr,n) = avsdr - xao_o%rAttr(index_xao_So_anidr,n) = anidr - xao_o%rAttr(index_xao_So_avsdf,n) = avsdf - xao_o%rAttr(index_xao_So_anidf,n) = anidf - - end do ! nloc_o - update_alb = .true. - endif ! nextsw_cday - end if ! flux_albav - - !--- update current ifrad/ofrad values if albedo was updated - - if (update_alb) then - kx = mct_aVect_indexRA(fractions_o,"ifrac") - kr = mct_aVect_indexRA(fractions_o,"ifrad") - fractions_o%rAttr(kr,:) = fractions_o%rAttr(kx,:) - kx = mct_aVect_indexRA(fractions_o,"ofrac") - kr = mct_aVect_indexRA(fractions_o,"ofrad") - fractions_o%rAttr(kr,:) = fractions_o%rAttr(kx,:) - endif - - end subroutine seq_flux_ocnalb_mct - - !=============================================================================== - - subroutine seq_flux_atmocnexch_mct( infodata, atm, ocn, fractions_a, fractions_o, & - xao_a, xao_o) - - !----------------------------------------------------------------------- - ! - ! Arguments - ! - type(seq_infodata_type) , intent(in) :: infodata - type(component_type) , intent(in) :: atm - type(component_type) , intent(in) :: ocn - type(mct_aVect) , intent(in) :: fractions_a - type(mct_aVect) , intent(in) :: fractions_o - type(mct_aVect) , intent(inout) :: xao_a - type(mct_aVect) , intent(inout) :: xao_o - ! - ! Local variables - ! - type(mct_aVect) , pointer :: a2x - type(mct_aVect) , pointer :: o2x - type(mct_gsmap) , pointer :: gsmap_a - type(mct_gsmap) , pointer :: gsmap_o - - type(mct_aVect) :: a2x_e - type(mct_aVect) :: o2x_e - type(mct_aVect) :: xaop_ae - type(mct_aVect) :: xaop_oe - type(mct_aVect) :: xaop_a - type(mct_aVect) :: xaop_o - type(mct_aVect) :: fractions_oe - - integer(in) :: kw,ka,ko,ia,io,kf - integer(in) :: n ! indices - logical :: dead_comps ! .true. => dead components are used - integer(in) :: index_tref - integer(in) :: index_qref - integer(in) :: index_duu10n - integer(in) :: index_ustar - integer(in) :: index_ssq - integer(in) :: index_re - integer(in) :: index_u10 - integer(in) :: index_taux - integer(in) :: index_tauy - integer(in) :: index_lat - integer(in) :: index_sen - integer(in) :: index_evap - integer(in) :: index_evap_16O - integer(in) :: index_evap_HDO - integer(in) :: index_evap_18O - integer(in) :: index_lwup - integer(in) :: index_sumwt - integer(in) :: atm_nx,atm_ny,ocn_nx,ocn_ny - real(r8) :: wt - integer(in) :: tod, dt - logical,save:: first_call = .true. - logical :: read_restart ! .true. => model starting from restart - logical :: ocn_prognostic ! .true. => ocn is prognostic - logical :: flux_diurnal ! .true. => turn on diurnal cycle in atm/ocn fluxes - integer :: ocn_surface_flux_scheme ! 0: E3SMv1 1: COARE 2: UA - logical :: cold_start ! .true. to initialize internal fields in shr_flux diurnal - character(len=256) :: fldlist ! subset of xao fields - ! - character(*),parameter :: subName = '(seq_flux_atmocnexch_mct) ' - ! - !----------------------------------------------------------------------- - - gsmap_a => component_get_gsmap_cx(atm) - gsmap_o => component_get_gsmap_cx(ocn) - a2x => component_get_c2x_cx(atm) ! a2x_ax - o2x => component_get_c2x_cx(ocn) ! o2x_ox - - if (trim(fluxsetting) /= trim(fluxsetting_exchange)) then - call shr_sys_abort(trim(subname)//' ERROR wrong fluxsetting') - endif - - ! Update ocean surface fluxes - ! Must fabricate "reasonable" data (using dead components) - - call seq_infodata_GetData(infodata, & - read_restart=read_restart, & - dead_comps=dead_comps, & - atm_nx=atm_nx, atm_ny=atm_ny, & - ocn_nx=ocn_nx, ocn_ny=ocn_ny, & - ocn_prognostic=ocn_prognostic, & - flux_diurnal=flux_diurnal, & - ocn_surface_flux_scheme=ocn_surface_flux_scheme) - - cold_start = .false. ! use restart data or data from last timestep - - if (first_call) then - if (.not.read_restart) cold_start = .true. - first_call = .false. - endif - - if (dead_comps) then - do n = 1,nloc_a2o - tocn(n) = 290.0_r8 ! ocn temperature ~ Kelvin - uocn(n) = 0.0_r8 ! ocn velocity, zonal ~ m/s - vocn(n) = 0.0_r8 ! ocn velocity, meridional ~ m/s - zbot(n) = 55.0_r8 ! atm height of bottom layer ~ m - ubot(n) = 0.0_r8 ! atm velocity, zonal ~ m/s - vbot(n) = 2.0_r8 ! atm velocity, meridional ~ m/s - thbot(n)= 301.0_r8 ! atm potential temperature ~ Kelvin - shum(n) = 1.e-2_r8 ! atm specific humidity ~ kg/kg - shum_16O(n) = 1.e-2_r8 ! H216O specific humidity ~ kg/kg - shum_HDO(n) = 1.e-2_r8 ! HD16O specificy humidity ~ kg/kg - shum_18O(n) = 1.e-2_r8 ! H218O specific humidity ~ kg/kg - roce_16O(n) = 1.0_r8 ! H216O ratio ~ mol/mol - roce_HDO(n) = 1.0_r8 ! HD16O ratio ~ mol/mol - roce_18O(n) = 1.0_r8 ! H218O ratio ~ mol/mol - dens(n) = 1.0_r8 ! atm density ~ kg/m^3 - tbot(n) = 300.0_r8 ! atm temperature ~ Kelvin - pslv(n) = 101300.0_r8 ! sea level pressure ~ Pa - enddo - else - - !--- instantiate exchange grid aVects - call mct_AVect_init(a2x_e, a2x, nloc_ae) - call mct_AVect_zero(a2x_e) - call mct_AVect_init(o2x_e, o2x, nloc_oe) - call mct_AVect_zero(o2x_e) - - !--- rearrange a2x and o2x into exchange grid - - call mct_rearr_rearrange(a2x, a2x_e, Re_a2e, VECTOR=mct_usevector, ALLTOALL=mct_usealltoall) - call mct_rearr_rearrange(o2x, o2x_e, Re_o2e, VECTOR=mct_usevector, ALLTOALL=mct_usealltoall) - - !--- extract fields from a2x and o2x (_e) into local arrays on exchange grid - - ko = mct_sMat_indexIA(sMata2o,'lrow') ! local row index - ka = mct_sMat_indexIA(sMata2o,'lcol') ! local column index - - do n = 1,nloc_a2o - io = sMata2o%data%iAttr(ko,n) - ia = sMata2o%data%iAttr(ka,n) - zbot(n) = a2x_e%rAttr(index_a2x_Sa_z ,ia) - ubot(n) = a2x_e%rAttr(index_a2x_Sa_u ,ia) - vbot(n) = a2x_e%rAttr(index_a2x_Sa_v ,ia) - thbot(n)= a2x_e%rAttr(index_a2x_Sa_ptem,ia) - shum(n) = a2x_e%rAttr(index_a2x_Sa_shum,ia) - shum_16O(n) = a2x_e%rAttr(index_a2x_Sa_shum_16O,ia) - shum_HDO(n) = a2x_e%rAttr(index_a2x_Sa_shum_HDO,ia) - shum_18O(n) = a2x_e%rAttr(index_a2x_Sa_shum_18O,ia) - dens(n) = a2x_e%rAttr(index_a2x_Sa_dens,ia) - tbot(n) = a2x_e%rAttr(index_a2x_Sa_tbot,ia) - pslv(n) = a2x_e%rAttr(index_a2x_Sa_pslv,ia) - tocn(n) = o2x_e%rAttr(index_o2x_So_t ,io) - uocn(n) = o2x_e%rAttr(index_o2x_So_u ,io) - vocn(n) = o2x_e%rAttr(index_o2x_So_v ,io) - roce_16O(n) = o2x_e%rAttr(index_o2x_So_roce_16O, io) - roce_HDO(n) = o2x_e%rAttr(index_o2x_So_roce_HDO, io) - roce_18O(n) = o2x_e%rAttr(index_o2x_So_roce_18O, io) - enddo - call mct_aVect_clean(a2x_e) - call mct_aVect_clean(o2x_e) - end if - - if (flux_diurnal) then - if (ocn_surface_flux_scheme.eq.2) then - call shr_sys_abort(trim(subname)//' ERROR cannot use flux_diurnal with UA flux scheme') - endif - call shr_flux_atmocn_diurnal (nloc_a2o , zbot , ubot, vbot, thbot, & - shum , shum_16O , shum_HDO, shum_18O, dens , tbot, uocn, vocn , & - tocn , emask, sen , lat , lwup , & - roce_16O, roce_HDO, roce_18O, & - evap , evap_16O, evap_HDO, evap_18O, taux , tauy, tref, qref , & - uGust, lwdn , swdn , swup, prec, & - fswpen, ocnsal, ocn_prognostic, flux_diurnal, & - ocn_surface_flux_scheme, & - lats , lons , warm , salt , speed, regime, & - warmMax, windMax, qSolAvg, windAvg, & - warmMaxInc, windMaxInc, qSolInc, windInc, nInc, & - tbulk, tskin, tskin_day, tskin_night, & - cskin, cskin_night, tod, dt, & - duu10n,ustar, re , ssq , missval = 0.0_r8, & - cold_start=cold_start) - else if (ocn_surface_flux_scheme.eq.2) then - call shr_flux_atmOcn_UA(nloc_a2o , zbot , ubot, vbot, thbot, & - shum , shum_16O , shum_HDO, shum_18O, dens , tbot, pslv, & - uocn, vocn , tocn , emask, sen , lat , lwup , & - roce_16O, roce_HDO, roce_18O, & - evap , evap_16O, evap_HDO, evap_18O, taux, tauy, tref, qref , & - duu10n,ustar, re , ssq , missval = 0.0_r8 ) - else - - call shr_flux_atmocn (nloc_a2o , zbot , ubot, vbot, thbot, & - shum , shum_16O , shum_HDO, shum_18O, dens , tbot, uocn, vocn , & - tocn , emask, sen , lat , lwup , & - roce_16O, roce_HDO, roce_18O, & - evap , evap_16O, evap_HDO, evap_18O, taux, tauy, tref, qref , & - ocn_surface_flux_scheme, & - duu10n,ustar, re , ssq , missval = 0.0_r8 ) - endif - - !--- create temporary aVects on exchange, atm, or ocn decomp as needed - - fldlist = trim(seq_flds_xao_states)//":"//trim(seq_flds_xao_fluxes)//":sumwt" - call mct_aVect_init(xaop_ae,rList=trim(fldlist),lsize=nloc_ae) - call mct_aVect_zero(xaop_ae) - call mct_aVect_init(xaop_oe,rList=trim(fldlist),lsize=nloc_oe) - call mct_aVect_zero(xaop_oe) - call mct_aVect_init(xaop_a, rList=trim(fldlist),lsize=nloc_a) - call mct_aVect_zero(xaop_a) - call mct_aVect_init(xaop_o, rList=trim(fldlist),lsize=nloc_o) - call mct_aVect_zero(xaop_o) - - index_tref = mct_aVect_indexRA(xaop_ae,"So_tref") - index_qref = mct_aVect_indexRA(xaop_ae,"So_qref") - index_duu10n = mct_aVect_indexRA(xaop_ae,"So_duu10n") - index_ustar = mct_aVect_indexRA(xaop_ae,"So_ustar") - index_ssq = mct_aVect_indexRA(xaop_ae,"So_ssq") - index_re = mct_aVect_indexRA(xaop_ae,"So_re") - index_u10 = mct_aVect_indexRA(xaop_ae,"So_u10") - index_taux = mct_aVect_indexRA(xaop_ae,"Faox_taux") - index_tauy = mct_aVect_indexRA(xaop_ae,"Faox_tauy") - index_lat = mct_aVect_indexRA(xaop_ae,"Faox_lat") - index_sen = mct_aVect_indexRA(xaop_ae,"Faox_sen") - index_evap = mct_aVect_indexRA(xaop_ae,"Faox_evap") - index_evap_16O = mct_aVect_indexRA(xaop_ae,"Faox_evap_16O", perrWith='quiet') - index_evap_HDO = mct_aVect_indexRA(xaop_ae,"Faox_evap_HDO", perrWith='quiet') - index_evap_18O = mct_aVect_indexRA(xaop_ae,"Faox_evap_18O", perrWith='quiet') - index_lwup = mct_aVect_indexRA(xaop_ae,"Faox_lwup") - index_sumwt = mct_aVect_indexRA(xaop_ae,"sumwt") - - !--- aggregate ocean values locally based on exchange grid decomp - - ko = mct_sMat_indexIA(sMata2o,'lrow') ! local row index - ka = mct_sMat_indexIA(sMata2o,'lcol') ! local column index - kw = mct_sMat_indexRA(sMata2o,'weight') ! weight index - - do n = 1,nloc_a2o - io = sMata2o%data%iAttr(ko,n) - ia = sMata2o%data%iAttr(ka,n) - wt = sMata2o%data%rAttr(kw,n) - xaop_oe%rAttr(index_sen ,io) = xaop_oe%rAttr(index_sen ,io) + sen(n) * wt - xaop_oe%rAttr(index_lat ,io) = xaop_oe%rAttr(index_lat ,io) + lat(n) * wt - xaop_oe%rAttr(index_taux ,io) = xaop_oe%rAttr(index_taux ,io) + taux(n)* wt - xaop_oe%rAttr(index_tauy ,io) = xaop_oe%rAttr(index_tauy ,io) + tauy(n)* wt - xaop_oe%rAttr(index_evap ,io) = xaop_oe%rAttr(index_evap ,io) + evap(n)* wt - if ( index_evap_16O /= 0 ) xaop_oe%rAttr(index_evap_16O ,io) = xaop_oe%rAttr(index_evap_16O ,io) + evap_16O(n)* wt - if ( index_evap_HDO /= 0 ) xaop_oe%rAttr(index_evap_HDO ,io) = xaop_oe%rAttr(index_evap_HDO ,io) + evap_HDO(n)* wt - if ( index_evap_18O /= 0 ) xaop_oe%rAttr(index_evap_18O ,io) = xaop_oe%rAttr(index_evap_18O ,io) + evap_18O(n)* wt - xaop_oe%rAttr(index_tref ,io) = xaop_oe%rAttr(index_tref ,io) + tref(n)* wt - xaop_oe%rAttr(index_qref ,io) = xaop_oe%rAttr(index_qref ,io) + qref(n)* wt - xaop_oe%rAttr(index_ustar ,io) = xaop_oe%rAttr(index_ustar ,io) + ustar(n)*wt ! friction velocity - xaop_oe%rAttr(index_re ,io) = xaop_oe%rAttr(index_re ,io) + re(n) * wt ! reynolds number - xaop_oe%rAttr(index_ssq ,io) = xaop_oe%rAttr(index_ssq ,io) + ssq(n) * wt ! s.hum. saturation at Ts - xaop_oe%rAttr(index_lwup ,io) = xaop_oe%rAttr(index_lwup ,io) + lwup(n)* wt - xaop_oe%rAttr(index_duu10n,io) = xaop_oe%rAttr(index_duu10n,io) + duu10n(n)*wt - xaop_oe%rAttr(index_u10 ,io) = xaop_oe%rAttr(index_u10 ,io) + sqrt(duu10n(n))*wt - xaop_oe%rAttr(index_sumwt ,io) = xaop_oe%rAttr(index_sumwt ,io) + wt - enddo - - !--- aggregate atm values locally based on exchange grid decomp - - ko = mct_sMat_indexIA(sMato2a,'lcol') ! local column index - ka = mct_sMat_indexIA(sMato2a,'lrow') ! local row index - kw = mct_sMat_indexRA(sMato2a,'weight') ! weight index - kf = mct_aVect_indexRA(fractions_o,"ofrac") - - !--- to apply fraction corrections, the indexing must be correct so rearrange - call mct_avect_init(fractions_oe,fractions_o,lsize=nloc_oe) - call mct_rearr_rearrange(fractions_o, fractions_oe, Re_o2e, VECTOR=mct_usevector, ALLTOALL=mct_usealltoall) - do n = 1,nloc_o2a - io = sMato2a%data%iAttr(ko,n) - ia = sMato2a%data%iAttr(ka,n) - !tcx wt = sMato2a%data%rAttr(kw,n) - wt = sMato2a%data%rAttr(kw,n) * fractions_oe%rAttr(kf,io) - xaop_ae%rAttr(index_sen ,ia) = xaop_ae%rAttr(index_sen ,ia) + sen(n) * wt - xaop_ae%rAttr(index_lat ,ia) = xaop_ae%rAttr(index_lat ,ia) + lat(n) * wt - xaop_ae%rAttr(index_taux ,ia) = xaop_ae%rAttr(index_taux ,ia) + taux(n)* wt - xaop_ae%rAttr(index_tauy ,ia) = xaop_ae%rAttr(index_tauy ,ia) + tauy(n)* wt - xaop_ae%rAttr(index_evap ,ia) = xaop_ae%rAttr(index_evap ,ia) + evap(n)* wt - if ( index_evap_16O /= 0 ) xaop_ae%rAttr(index_evap_16O ,ia) = xaop_ae%rAttr(index_evap_16O ,ia) + evap_16O(n)* wt - if ( index_evap_HDO /= 0 ) xaop_ae%rAttr(index_evap_HDO ,ia) = xaop_ae%rAttr(index_evap_HDO ,ia) + evap_HDO(n)* wt - if ( index_evap_18O /= 0 ) xaop_ae%rAttr(index_evap_18O ,ia) = xaop_ae%rAttr(index_evap_18O ,ia) + evap_18O(n)* wt - xaop_ae%rAttr(index_tref ,ia) = xaop_ae%rAttr(index_tref ,ia) + tref(n)* wt - xaop_ae%rAttr(index_qref ,ia) = xaop_ae%rAttr(index_qref ,ia) + qref(n)* wt - xaop_ae%rAttr(index_ustar ,ia) = xaop_ae%rAttr(index_ustar ,ia) + ustar(n)*wt ! friction velocity - xaop_ae%rAttr(index_re ,ia) = xaop_ae%rAttr(index_re ,ia) + re(n) * wt ! reynolds number - xaop_ae%rAttr(index_ssq ,ia) = xaop_ae%rAttr(index_ssq ,ia) + ssq(n) * wt ! s.hum. saturation at Ts - xaop_ae%rAttr(index_lwup ,ia) = xaop_ae%rAttr(index_lwup ,ia) + lwup(n)* wt - xaop_ae%rAttr(index_duu10n,ia) = xaop_ae%rAttr(index_duu10n,ia) + duu10n(n)*wt - xaop_ae%rAttr(index_u10 ,ia) = xaop_ae%rAttr(index_u10 ,ia) + sqrt(duu10n(n))*wt - xaop_ae%rAttr(index_sumwt ,ia) = xaop_ae%rAttr(index_sumwt ,ia) + wt - enddo - - call mct_aVect_clean(fractions_oe) - - !--- rearrange and sum from exchange grid to gsmap_a and gsmap_o decomps - - call mct_rearr_rearrange(xaop_ae, xaop_a, Re_e2a, sum=.true., & - VECTOR=mct_usevector, ALLTOALL=mct_usealltoall) - call mct_rearr_rearrange(xaop_oe, xaop_o, Re_e2o, sum=.true., & - VECTOR=mct_usevector, ALLTOALL=mct_usealltoall) - - !--- normalize by sum of wts associated with mapping - - do n = 1,nloc_a - wt = xaop_a%rAttr(index_sumwt,n) - if (wt /= 0.0_r8) then - wt = 1.0_r8/wt - else - wt = 1.0_r8 - endif - xaop_a%rAttr(:,n) = xaop_a%rAttr(:,n) * wt - enddo - - do n = 1,nloc_o - wt = xaop_o%rAttr(index_sumwt,n) - if (wt /= 0.0_r8) then - wt = 1.0_r8/wt - else - wt = 1.0_r8 - endif - xaop_o%rAttr(:,n) = xaop_o%rAttr(:,n) * wt - enddo - - !--- copy subset of fields to xao_a and xao_o and clean up - - call mct_avect_clean(xaop_ae) - call mct_avect_clean(xaop_oe) - - call mct_avect_copy(xaop_a, xao_a) - call mct_avect_copy(xaop_o, xao_o) - - call mct_avect_clean(xaop_a) - call mct_avect_clean(xaop_o) - - end subroutine seq_flux_atmocnexch_mct - - !=============================================================================== - - subroutine seq_flux_atmocn_mct(infodata, tod, dt, a2x, o2x, xao) - - !----------------------------------------------------------------------- - ! - ! Arguments - ! - type(seq_infodata_type) , intent(in) :: infodata - integer(in) , intent(in) :: tod,dt ! NEW - type(mct_aVect) , intent(in) :: a2x ! a2x_ax or a2x_ox - type(mct_aVect) , intent(in) :: o2x ! o2x_ax or o2x_ox - type(mct_aVect) , intent(inout) :: xao - ! - ! Local variables - ! - logical :: flux_albav ! flux avg option - logical :: dead_comps ! .true. => dead components are used - integer(in) :: n ! indices - integer(in) :: nloc, nloca, nloco ! number of gridcells - logical,save:: first_call = .true. - logical :: cold_start ! .true. to initialize internal fields in shr_flux diurnal - logical :: read_restart ! .true. => continue run - logical :: ocn_prognostic ! .true. => ocn is prognostic - logical :: flux_diurnal ! .true. => turn on diurnal cycle in atm/ocn fluxes - integer :: ocn_surface_flux_scheme ! 0: E3SMv1 1: COARE 2: UA - real(r8) :: flux_convergence ! convergence criteria for imlicit flux computation - integer(in) :: flux_max_iteration ! maximum number of iterations for convergence - logical :: coldair_outbreak_mod ! cold air outbreak adjustment (Mahrt & Sun 1995,MWR) - ! - real(r8),parameter :: albdif = 0.06_r8 ! 60 deg reference albedo, diffuse - real(r8),parameter :: albdir = 0.07_r8 ! 60 deg reference albedo, direct - character(*),parameter :: subName = '(seq_flux_atmocn_mct) ' - ! - !----------------------------------------------------------------------- - - call seq_infodata_getData(infodata , & - read_restart=read_restart, & - flux_albav=flux_albav, & - dead_comps=dead_comps, & - ocn_prognostic=ocn_prognostic, & - flux_diurnal=flux_diurnal, & - ocn_surface_flux_scheme=ocn_surface_flux_scheme) - - cold_start = .false. ! use restart data or data from last timestep - - if (first_call) then - call seq_infodata_getData(infodata , & - coldair_outbreak_mod=coldair_outbreak_mod, & - flux_convergence=flux_convergence, & - flux_max_iteration=flux_max_iteration) - - if (.not.read_restart) cold_start = .true. - index_xao_So_tref = mct_aVect_indexRA(xao,'So_tref') - index_xao_So_qref = mct_aVect_indexRA(xao,'So_qref') - index_xao_So_ustar = mct_aVect_indexRA(xao,'So_ustar') - index_xao_So_re = mct_aVect_indexRA(xao,'So_re') - index_xao_So_ssq = mct_aVect_indexRA(xao,'So_ssq') - index_xao_So_u10 = mct_aVect_indexRA(xao,'So_u10') - index_xao_So_duu10n = mct_aVect_indexRA(xao,'So_duu10n') - index_xao_Faox_taux = mct_aVect_indexRA(xao,'Faox_taux') - index_xao_Faox_tauy = mct_aVect_indexRA(xao,'Faox_tauy') - index_xao_Faox_lat = mct_aVect_indexRA(xao,'Faox_lat') - index_xao_Faox_sen = mct_aVect_indexRA(xao,'Faox_sen') - index_xao_Faox_evap = mct_aVect_indexRA(xao,'Faox_evap') - index_xao_Faox_evap_16O = mct_aVect_indexRA(xao,'Faox_evap_16O', perrWith='quiet') - index_xao_Faox_evap_HDO = mct_aVect_indexRA(xao,'Faox_evap_HDO', perrWith='quiet') - index_xao_Faox_evap_18O = mct_aVect_indexRA(xao,'Faox_evap_18O', perrWith='quiet') - index_xao_Faox_lwup = mct_aVect_indexRA(xao,'Faox_lwup') - index_xao_Faox_swdn = mct_aVect_indexRA(xao,'Faox_swdn') - index_xao_Faox_swup = mct_aVect_indexRA(xao,'Faox_swup') - index_xao_So_fswpen = mct_aVect_indexRA(xao,'So_fswpen') - index_xao_So_warm_diurn = mct_aVect_indexRA(xao,'So_warm_diurn') - index_xao_So_salt_diurn = mct_aVect_indexRA(xao,'So_salt_diurn') - index_xao_So_speed_diurn = mct_aVect_indexRA(xao,'So_speed_diurn') - index_xao_So_regime_diurn = mct_aVect_indexRA(xao,'So_regime_diurn') - index_xao_So_tskin_diurn = mct_aVect_indexRA(xao,'So_tskin_diurn') - index_xao_So_tskin_day_diurn = mct_aVect_indexRA(xao,'So_tskin_day_diurn') - index_xao_So_tskin_night_diurn = mct_aVect_indexRA(xao,'So_tskin_night_diurn') - index_xao_So_cskin_diurn = mct_aVect_indexRA(xao,'So_cskin_diurn') - index_xao_So_cskin_night_diurn = mct_aVect_indexRA(xao,'So_cskin_night_diurn') - index_xao_So_tbulk_diurn = mct_aVect_indexRA(xao,'So_tbulk_diurn') - index_xao_So_warmmax_diurn = mct_aVect_indexRA(xao,'So_warmmax_diurn') - index_xao_So_windmax_diurn = mct_aVect_indexRA(xao,'So_windmax_diurn') - index_xao_So_qsolavg_diurn = mct_aVect_indexRA(xao,'So_qsolavg_diurn') - index_xao_So_windavg_diurn = mct_aVect_indexRA(xao,'So_windavg_diurn') - index_xao_So_warmmaxinc_diurn = mct_aVect_indexRA(xao,'So_warmmaxinc_diurn') - index_xao_So_windmaxinc_diurn = mct_aVect_indexRA(xao,'So_windmaxinc_diurn') - index_xao_So_qsolinc_diurn = mct_aVect_indexRA(xao,'So_qsolinc_diurn') - index_xao_So_windinc_diurn = mct_aVect_indexRA(xao,'So_windinc_diurn') - index_xao_So_ninc_diurn = mct_aVect_indexRA(xao,'So_ninc_diurn') - - index_a2x_Sa_z = mct_aVect_indexRA(a2x,'Sa_z') - index_a2x_Sa_u = mct_aVect_indexRA(a2x,'Sa_u') - index_a2x_Sa_v = mct_aVect_indexRA(a2x,'Sa_v') - index_a2x_Sa_tbot = mct_aVect_indexRA(a2x,'Sa_tbot') - index_a2x_Sa_pslv = mct_aVect_indexRA(a2x,'Sa_pslv') - index_a2x_Sa_ptem = mct_aVect_indexRA(a2x,'Sa_ptem') - index_a2x_Sa_shum = mct_aVect_indexRA(a2x,'Sa_shum') - index_a2x_Sa_shum_16O = mct_aVect_indexRA(a2x,'Sa_shum_16O', perrWith='quiet') - index_a2x_Sa_shum_HDO = mct_aVect_indexRA(a2x,'Sa_shum_HDO', perrWith='quiet') - index_a2x_Sa_shum_18O = mct_aVect_indexRA(a2x,'Sa_shum_18O', perrWith='quiet') - index_a2x_Sa_dens = mct_aVect_indexRA(a2x,'Sa_dens') - index_a2x_Faxa_lwdn = mct_aVect_indexRA(a2x,'Faxa_lwdn') - index_a2x_Faxa_rainc= mct_aVect_indexRA(a2x,'Faxa_rainc') - index_a2x_Faxa_rainl= mct_aVect_indexRA(a2x,'Faxa_rainl') - index_a2x_Faxa_snowc= mct_aVect_indexRA(a2x,'Faxa_snowc') - index_a2x_Faxa_snowl= mct_aVect_indexRA(a2x,'Faxa_snowl') - - index_o2x_So_t = mct_aVect_indexRA(o2x,'So_t') - index_o2x_So_u = mct_aVect_indexRA(o2x,'So_u') - index_o2x_So_v = mct_aVect_indexRA(o2x,'So_v') - index_o2x_So_fswpen = mct_aVect_indexRA(o2x,'So_fswpen') - index_o2x_So_s = mct_aVect_indexRA(o2x,'So_s') - index_o2x_So_roce_16O = mct_aVect_indexRA(o2x,'So_roce_16O', perrWith='quiet') - index_o2x_So_roce_HDO = mct_aVect_indexRA(o2x,'So_roce_HDO', perrWith='quiet') - index_o2x_So_roce_18O = mct_aVect_indexRA(o2x,'So_roce_18O', perrWith='quiet') - call shr_flux_adjust_constants(flux_convergence_tolerance=flux_convergence, & - flux_convergence_max_iteration=flux_max_iteration, & - coldair_outbreak_mod=coldair_outbreak_mod) - first_call = .false. - end if - - if (trim(fluxsetting) /= trim(fluxsetting_atmocn)) then - call shr_sys_abort(trim(subname)//' ERROR wrong fluxsetting') - endif - - nloc = mct_aVect_lsize(xao) - nloca = mct_aVect_lsize(a2x) - nloco = mct_aVect_lsize(o2x) - - if (nloc /= nloca .or. nloc /= nloco) then - call shr_sys_abort(trim(subname)//' ERROR nloc sizes do not match') - endif - - ! Update ocean surface fluxes - ! Must fabricate "reasonable" data (when using dead components) - - emask = mask - if (dead_comps) then - do n = 1,nloc - mask(n) = 1 ! ocn domain mask ~ 0 <=> inactive cell - tocn(n) = 290.0_r8 ! ocn temperature ~ Kelvin - uocn(n) = 0.0_r8 ! ocn velocity, zonal ~ m/s - vocn(n) = 0.0_r8 ! ocn velocity, meridional ~ m/s - zbot(n) = 55.0_r8 ! atm height of bottom layer ~ m - ubot(n) = 0.0_r8 ! atm velocity, zonal ~ m/s - vbot(n) = 2.0_r8 ! atm velocity, meridional ~ m/s - thbot(n)= 301.0_r8 ! atm potential temperature ~ Kelvin - shum(n) = 1.e-2_r8 ! atm specific humidity ~ kg/kg - !wiso note: shum_* should be multiplied by Rstd_* here? - shum_16O(n) = 1.e-2_r8 ! H216O specific humidity ~ kg/kg - shum_HDO(n) = 1.e-2_r8 ! HD16O specific humidity ~ kg/kg - shum_18O(n) = 1.e-2_r8 ! H218O specific humidity ~ kg/kg - roce_16O(n) = 1.0_r8 ! H216O surface ratio ~ mol/mol - roce_HDO(n) = 1.0_r8 ! HDO surface ratio ~ mol/mol - roce_18O(n) = 1.0_r8 ! H218O surface ratio ~ mol/mol - dens(n) = 1.0_r8 ! atm density ~ kg/m^3 - tbot(n) = 300.0_r8 ! atm temperature ~ Kelvin - pslv(n) = 101300.0_r8 ! sea level pressure ~ Pa - uGust(n)= 0.0_r8 - lwdn(n) = 0.0_r8 - prec(n) = 0.0_r8 - fswpen(n)= 0.0_r8 - ocnsal(n)= 0.0_r8 - - warm (n) = 0.0_r8 - salt (n) = 0.0_r8 - speed (n) = 0.0_r8 - regime (n) = 0.0_r8 - warmMax (n) = 0.0_r8 - windMax (n) = 0.0_r8 - qSolAvg (n) = 0.0_r8 - windAvg (n) = 0.0_r8 - warmMaxInc (n) = 0.0_r8 - windMaxInc (n) = 0.0_r8 - qSolInc (n) = 0.0_r8 - windInc (n) = 0.0_r8 - nInc (n) = 0.0_r8 - tbulk (n) = 0.0_r8 - tskin (n) = 0.0_r8 - tskin_day (n) = 0.0_r8 - tskin_night(n) = 0.0_r8 - cskin (n) = 0.0_r8 - cskin_night(n) = 0.0_r8 - swdn (n) = 0.0_r8 - swup (n) = 0.0_r8 - enddo - else - do n = 1,nloc - nInc(n) = 0._r8 ! needed for minval/maxval calculation - if (mask(n) /= 0) then - zbot(n) = a2x%rAttr(index_a2x_Sa_z ,n) - ubot(n) = a2x%rAttr(index_a2x_Sa_u ,n) - vbot(n) = a2x%rAttr(index_a2x_Sa_v ,n) - thbot(n)= a2x%rAttr(index_a2x_Sa_ptem,n) - shum(n) = a2x%rAttr(index_a2x_Sa_shum,n) - if ( index_a2x_Sa_shum_16O /= 0 ) shum_16O(n) = a2x%rAttr(index_a2x_Sa_shum_16O,n) - if ( index_a2x_Sa_shum_HDO /= 0 ) shum_HDO(n) = a2x%rAttr(index_a2x_Sa_shum_HDO,n) - if ( index_a2x_Sa_shum_18O /= 0 ) shum_18O(n) = a2x%rAttr(index_a2x_Sa_shum_18O,n) - dens(n) = a2x%rAttr(index_a2x_Sa_dens,n) - tbot(n) = a2x%rAttr(index_a2x_Sa_tbot,n) - pslv(n) = a2x%rAttr(index_a2x_Sa_pslv,n) - tocn(n) = o2x%rAttr(index_o2x_So_t ,n) - uocn(n) = o2x%rAttr(index_o2x_So_u ,n) - vocn(n) = o2x%rAttr(index_o2x_So_v ,n) - if ( index_o2x_So_roce_16O /= 0 ) roce_16O(n) = o2x%rAttr(index_o2x_So_roce_16O, n) - if ( index_o2x_So_roce_HDO /= 0 ) roce_HDO(n) = o2x%rAttr(index_o2x_So_roce_HDO, n) - if ( index_o2x_So_roce_18O /= 0 ) roce_18O(n) = o2x%rAttr(index_o2x_So_roce_18O, n) - !--- mask missing atm or ocn data if found - if (dens(n) < 1.0e-12 .or. tocn(n) < 1.0) then - emask(n) = 0 - !write(logunit,*) 'aoflux tcx1',n,dens(n),tocn(n) - endif - ! !!uGust(n) = 1.5_r8*sqrt(uocn(n)**2 + vocn(n)**2) ! there is no wind gust data from ocn - uGust(n) = 0.0_r8 - lwdn (n) = a2x%rAttr(index_a2x_Faxa_lwdn ,n) - prec (n) = a2x%rAttr(index_a2x_Faxa_rainc,n) & - & + a2x%rAttr(index_a2x_Faxa_rainl,n) & - & + a2x%rAttr(index_a2x_Faxa_snowc,n) & - & + a2x%rAttr(index_a2x_Faxa_snowl,n) - fswpen(n)= o2x%rAttr(index_o2x_So_fswpen ,n) - ocnsal(n)= o2x%rAttr(index_o2x_So_s ,n) - - warm (n) = xao%rAttr(index_xao_So_warm_diurn ,n) - salt (n) = xao%rAttr(index_xao_So_salt_diurn ,n) - speed (n) = xao%rAttr(index_xao_So_speed_diurn ,n) - regime (n) = xao%rAttr(index_xao_So_regime_diurn ,n) - warmMax (n) = xao%rAttr(index_xao_So_warmMax_diurn ,n) - windMax (n) = xao%rAttr(index_xao_So_windMax_diurn ,n) - qSolAvg (n) = xao%rAttr(index_xao_So_qsolavg_diurn ,n) - windAvg (n) = xao%rAttr(index_xao_So_windavg_diurn ,n) - warmMaxInc (n) = xao%rAttr(index_xao_So_warmMaxInc_diurn,n) - windMaxInc (n) = xao%rAttr(index_xao_So_windMaxInc_diurn,n) - qSolInc (n) = xao%rAttr(index_xao_So_qSolInc_diurn ,n) - windInc (n) = xao%rAttr(index_xao_So_windInc_diurn ,n) - nInc (n) = xao%rAttr(index_xao_So_nInc_diurn ,n) - tbulk (n) = xao%rAttr(index_xao_So_tbulk_diurn ,n) - tskin (n) = xao%rAttr(index_xao_So_tskin_diurn ,n) - tskin_day (n) = xao%rAttr(index_xao_So_tskin_day_diurn ,n) - tskin_night(n) = xao%rAttr(index_xao_So_tskin_night_diurn,n) - cskin (n) = xao%rAttr(index_xao_So_cskin_diurn ,n) - cskin_night(n) = xao%rAttr(index_xao_So_cskin_night_diurn,n) - ! set in flux_ocnalb using data from previous timestep - swdn (n) = xao%rAttr(index_xao_Faox_swdn ,n) - swup (n) = xao%rAttr(index_xao_Faox_swup ,n) - end if - enddo - end if - - if (flux_diurnal) then - if (ocn_surface_flux_scheme.eq.2) then - call shr_sys_abort(trim(subname)//' ERROR cannot use flux_diurnal with UA flux scheme') - endif - - call shr_flux_atmocn_diurnal (nloc , zbot , ubot, vbot, thbot, & - shum , shum_16O , shum_HDO, shum_18O, dens , tbot, uocn, vocn , & - tocn , emask, sen , lat , lwup , & - roce_16O, roce_HDO, roce_18O, & - evap , evap_16O, evap_HDO, evap_18O, taux , tauy, tref, qref , & - uGust, lwdn , swdn , swup, prec, & - fswpen, ocnsal, ocn_prognostic, flux_diurnal, & - ocn_surface_flux_scheme, & - lats, lons , warm , salt , speed, regime, & - warmMax, windMax, qSolAvg, windAvg, & - warmMaxInc, windMaxInc, qSolInc, windInc, nInc, & - tbulk, tskin, tskin_day, tskin_night, & - cskin, cskin_night, tod, dt, & - duu10n,ustar, re , ssq, & - !missval should not be needed if flux calc - !consistent with mrgx2a fraction - !duu10n,ustar, re , ssq, missval = 0.0_r8 ) - cold_start=cold_start) - else if (ocn_surface_flux_scheme.eq.2) then - call shr_flux_atmOcn_UA(nloc , zbot , ubot, vbot, thbot, & - shum , shum_16O , shum_HDO, shum_18O, dens , tbot, pslv, & - uocn, vocn , tocn , emask, sen , lat , lwup , & - roce_16O, roce_HDO, roce_18O, & - evap , evap_16O, evap_HDO, evap_18O, taux , tauy, tref, qref , & - duu10n,ustar, re , ssq) - else - call shr_flux_atmocn (nloc , zbot , ubot, vbot, thbot, & - shum , shum_16O , shum_HDO, shum_18O, dens , tbot, uocn, vocn , & - tocn , emask, sen , lat , lwup , & - roce_16O, roce_HDO, roce_18O, & - evap , evap_16O, evap_HDO, evap_18O, taux , tauy, tref, qref , & - ocn_surface_flux_scheme, & - duu10n,ustar, re , ssq) - !missval should not be needed if flux calc - !consistent with mrgx2a fraction - !duu10n,ustar, re , ssq, missval = 0.0_r8 ) - endif - - do n = 1,nloc - if (mask(n) /= 0) then - xao%rAttr(index_xao_Faox_sen ,n) = sen(n) - xao%rAttr(index_xao_Faox_lat ,n) = lat(n) - xao%rAttr(index_xao_Faox_taux,n) = taux(n) - xao%rAttr(index_xao_Faox_tauy,n) = tauy(n) - xao%rAttr(index_xao_Faox_evap,n) = evap(n) - if ( index_xao_Faox_evap_16O /= 0 ) xao%rAttr(index_xao_Faox_evap_16O,n) = evap_16O(n) - if ( index_xao_Faox_evap_HDO /= 0 ) xao%rAttr(index_xao_Faox_evap_HDO,n) = evap_HDO(n) - if ( index_xao_Faox_evap_18O /= 0 ) xao%rAttr(index_xao_Faox_evap_18O,n) = evap_18O(n) - xao%rAttr(index_xao_So_tref ,n) = tref(n) - xao%rAttr(index_xao_So_qref ,n) = qref(n) - xao%rAttr(index_xao_So_ustar ,n) = ustar(n) ! friction velocity - xao%rAttr(index_xao_So_re ,n) = re(n) ! reynolds number - xao%rAttr(index_xao_So_ssq ,n) = ssq(n) ! s.hum. saturation at Ts - xao%rAttr(index_xao_Faox_lwup,n) = lwup(n) - xao%rAttr(index_xao_So_duu10n,n) = duu10n(n) - xao%rAttr(index_xao_So_u10 ,n) = sqrt(duu10n(n)) - xao%rAttr(index_xao_So_warm_diurn ,n) = warm(n) - xao%rAttr(index_xao_So_salt_diurn ,n) = salt(n) - xao%rAttr(index_xao_So_speed_diurn ,n) = speed(n) - xao%rAttr(index_xao_So_regime_diurn ,n) = regime(n) - xao%rAttr(index_xao_So_warmMax_diurn ,n) = warmMax(n) - xao%rAttr(index_xao_So_windMax_diurn ,n) = windMax(n) - xao%rAttr(index_xao_So_qSolAvg_diurn ,n) = qSolAvg(n) - xao%rAttr(index_xao_So_windAvg_diurn ,n) = windAvg(n) - xao%rAttr(index_xao_So_warmMaxInc_diurn ,n) = warmMaxInc(n) - xao%rAttr(index_xao_So_windMaxInc_diurn ,n) = windMaxInc(n) - xao%rAttr(index_xao_So_qSolInc_diurn ,n) = qSolInc(n) - xao%rAttr(index_xao_So_windInc_diurn ,n) = windInc(n) - xao%rAttr(index_xao_So_nInc_diurn ,n) = nInc(n) - xao%rAttr(index_xao_So_tbulk_diurn ,n) = tbulk(n) - xao%rAttr(index_xao_So_tskin_diurn ,n) = tskin(n) - xao%rAttr(index_xao_So_tskin_day_diurn ,n) = tskin_day(n) - xao%rAttr(index_xao_So_tskin_night_diurn,n) = tskin_night(n) - xao%rAttr(index_xao_So_cskin_diurn ,n) = cskin(n) - xao%rAttr(index_xao_So_cskin_night_diurn,n) = cskin_night(n) - xao%rAttr(index_xao_So_fswpen ,n) = fswpen(n) - end if - enddo - - end subroutine seq_flux_atmocn_mct - - !=============================================================================== - -end module seq_flux_mct diff --git a/src/drivers/mct/main/seq_frac_mct.F90 b/src/drivers/mct/main/seq_frac_mct.F90 deleted file mode 100644 index 11985ab9a58..00000000000 --- a/src/drivers/mct/main/seq_frac_mct.F90 +++ /dev/null @@ -1,840 +0,0 @@ -! !MODULE: seq_frac_mct -- handles surface fractions. -! -! Fraction Notes: tcraig, august 2008 -! Assumes is running on CPLID pes -! -! the fractions fields are now afrac, ifrac, ofrac, lfrac, and lfrin. -! afrac = fraction of atm on a grid -! lfrac = fraction of lnd on a grid -! ifrac = fraction of ice on a grid -! ofrac = fraction of ocn on a grid -! lfrin = land fraction defined by the land model -! ifrad = fraction of ocn on a grid at last radiation time -! ofrad = fraction of ice on a grid at last radiation time -! afrac, lfrac, ifrac, and ofrac are the self-consistent values in the -! system. lfrin is the fraction on the land grid and is allowed to -! vary from the self-consistent value as descibed below. ifrad -! and ofrad are needed for the swnet calculation. -! the fractions fields are defined for each grid in the fraction bundles as -! needed as follows. -! character(*),parameter :: fraclist_a = 'afrac:ifrac:ofrac:lfrac:lfrin' -! character(*),parameter :: fraclist_o = 'afrac:ifrac:ofrac:ifrad:ofrad' -! character(*),parameter :: fraclist_i = 'afrac:ifrac:ofrac' -! character(*),parameter :: fraclist_l = 'afrac:lfrac:lfrin' -! character(*),parameter :: fraclist_g = 'gfrac:lfrac' -! character(*),parameter :: fraclist_r = 'lfrac:rfrac' -! -! we assume ocean and ice are on the same grids, same masks -! we assume ocn2atm and ice2atm are masked maps -! we assume lnd2atm is a global map -! we assume that the ice fraction evolves in time but that -! the land model fraction does not. the ocean fraction then -! is just the complement of the ice fraction over the region -! of the ocean/ice mask. -! we assume that component domains are filled with the total -! potential mask/fraction on that grid, but that the fractions -! sent at run time are always the relative fraction covered. -! for example, if an ice cell can be up to 50% covered in -! ice and 50% land, then the ice domain should have a fraction -! value of 0.5 at that grid cell. at run time though, the ice -! fraction will be between 0.0 and 1.0 meaning that grid cells -! is covered with between 0.0 and 0.5 by ice. the "relative" fractions -! sent at run-time are corrected by the model to be total fractions -! such that -! in general, on every grid, -! fractions_*(afrac) = 1.0 -! fractions_*(ifrac) + fractions_*(ofrac) + fractions_*(lfrac) = 1.0 -! where fractions_* are a bundle of fractions on a particular grid and -! *frac (ie afrac) is the fraction of a particular component in the bundle. -! -! fraclist_g and fraclist_r don't yet interact with atm, lnd, ice, ocn. -! -! the fractions are computed fundamentally as follows (although the -! detailed implementation might be slightly different) -! initialization (frac_init): -! afrac is set on all grids -! fractions_a(afrac) = 1.0 -! fractions_o(afrac) = mapa2o(fractions_a(afrac)) -! fractions_i(afrac) = mapa2i(fractions_a(afrac)) -! fractions_l(afrac) = mapa2l(fractions_a(afrac)) -! initially assume ifrac on all grids is zero -! fractions_*(ifrac) = 0.0 -! fractions/masks provided by surface components -! fractions_o(ofrac) = dom_o(frac) ! ocean "mask" -! fractions_l(lfrin) = dom_l(frac) ! land model fraction -! then mapped to the atm model -! fractions_a(ofrac) = mapo2a(fractions_o(ofrac)) -! fractions_a(lfrin) = mapl2a(fractions_l(lfrin)) -! and a few things are then derived -! fractions_a(lfrac) = 1.0 - fractions_a(ofrac) -! this is truncated to zero for very small values (< 0.001) -! to attempt to preserve non-land gridcells. -! fractions_l(lfrac) = mapa2l(fractions_a(lfrac)) -! fractions_r(lfrac) = mapl2r(fractions_l(lfrac)) -! fractions_g(lfrac) = mapl2g(fractions_l(lfrac)) -! -! run-time (frac_set): -! update fractions on ice grid -! fractions_i(ifrac) = i2x_i(Si_ifrac) ! ice frac from ice model -! fractions_i(ofrac) = 1.0 - fractions_i(ifrac) -! note: the relative fractions are corrected to total fractions -! fractions_o(ifrac) = mapi2o(fractions_i(ifrac)) -! fractions_o(ofrac) = mapi2o(fractions_i(ofrac)) -! fractions_a(ifrac) = mapi2a(fractions_i(ifrac)) -! fractions_a(ofrac) = mapi2a(fractions_i(ofrac)) -! -! fractions used in merging are as follows -! mrg_x2a uses fractions_a(lfrac,ofrac,ifrac) -! mrg_x2o needs to use fractions_o(ofrac,ifrac) normalized to one -! normalization happens in mrg routine -! -! fraction corrections in mapping are as follows -! mapo2a uses *fractions_o(ofrac) and /fractions_a(ofrac) -! mapi2a uses *fractions_i(ifrac) and /fractions_a(ifrac) -! mapl2a uses *fractions_l(lfrin) and /fractions_a(lfrin) -! mapl2g weights by fractions_l(lfrac) with normalization, and multiplies by -! fractions_g(lfrac) -! mapa2* should use *fractions_a(afrac) and /fractions_*(afrac) but this -! has been defered since the ratio always close to 1.0 -! -! budgets use the standard afrac, ofrac, ifrac, and lfrac to compute -! -! fraction and domain checks -! initialization: -! dom_i = mapo2i(dom_o) ! lat, lon, mask, area -! where fractions_a(lfrac) > 0.0, fractions_a(lfrin) is also > 0.0 -! this ensures the land will provide data everywhere the atm needs it -! and allows the land frac to be subtlely different from the -! land fraction specified in the atm. -! dom_a = mapl2a(dom_l) ! if atm/lnd same grids -! dom_a = mapo2a(dom_o) ! if atm/ocn same grids -! dom_a = mapi2a(dom_i) ! if atm/ocn same grids -! 0.0-eps < fractions_*(*) < 1.0+eps -! run time: -! fractions_a(lfrac) + fractions_a(ofrac) + fractions_a(ifrac) ~ 1.0 -! 0.0-eps < fractions_*(*) < 1.0+eps -! -!! -! -! !REVISION HISTORY: -! 2007-may-07 - M. Vertenstein - initial port to cpl7. -! -! !INTERFACE: ------------------------------------------------------------------ - -module seq_frac_mct - - ! !USES: - - use shr_kind_mod , only: R8 => SHR_KIND_R8 - use shr_sys_mod - use shr_const_mod - - use mct_mod - use seq_infodata_mod - use seq_comm_mct, only: logunit, loglevel, seq_comm_mpicom, seq_comm_iamroot, CPLID - use seq_map_mod, only: seq_map_map - use seq_map_type_mod, only: seq_map - - use prep_lnd_mod, only: prep_lnd_get_mapper_Fa2l - use prep_ocn_mod, only: prep_ocn_get_mapper_Fa2o - use prep_ocn_mod, only: prep_ocn_get_mapper_SFi2o - use prep_ice_mod, only: prep_ice_get_mapper_SFo2i - use prep_rof_mod, only: prep_rof_get_mapper_Fl2r - use prep_atm_mod, only: prep_atm_get_mapper_Fo2a - use prep_atm_mod, only: prep_atm_get_mapper_Fi2a - use prep_atm_mod, only: prep_atm_get_mapper_Fl2a - use prep_glc_mod, only: prep_glc_get_mapper_Fl2g - - use component_type_mod - - implicit none - private - save - - ! !PUBLIC TYPES: - - ! none - - ! !PUBLIC MEMBER FUNCTIONS: - - public seq_frac_init - public seq_frac_set - - ! !PUBLIC DATA MEMBERS: - - !EOP - - ! !LOCAL DATA - - integer, private :: seq_frac_debug = 1 - logical, private :: seq_frac_abort = .true. - logical, private :: seq_frac_dead - - !--- standard --- - real(r8),parameter :: eps_fracsum = 1.0e-02 ! allowed error in sum of fracs - real(r8),parameter :: eps_fracval = 1.0e-02 ! allowed error in any frac +- 0,1 - real(r8),parameter :: eps_fraclim = 1.0e-03 ! truncation limit in fractions_a(lfrac) - logical ,parameter :: atm_frac_correct = .false. ! turn on frac correction on atm grid - !--- standard plus atm fraction consistency --- - ! real(r8),parameter :: eps_fracsum = 1.0e-12 ! allowed error in sum of fracs - ! real(r8),parameter :: eps_fracval = 1.0e-02 ! allowed error in any frac +- 0,1 - ! real(r8),parameter :: eps_fraclim = 1.0e-03 ! truncation limit in fractions_a(lfrac) - ! logical ,parameter :: atm_frac_correct = .true. ! turn on frac correction on atm grid - !--- unconstrained and area conserving? --- - ! real(r8),parameter :: eps_fracsum = 1.0e-12 ! allowed error in sum of fracs - ! real(r8),parameter :: eps_fracval = 1.0e-02 ! allowed error in any frac +- 0,1 - ! real(r8),parameter :: eps_fraclim = 1.0e-20 ! truncation limit in fractions_a(lfrac) - ! logical ,parameter :: atm_frac_correct = .true. ! turn on frac correction on atm grid - - type(seq_map) , pointer :: mapper_o2a - type(seq_map) , pointer :: mapper_i2a - type(seq_map) , pointer :: mapper_l2a - type(seq_map) , pointer :: mapper_o2i - type(seq_map) , pointer :: mapper_a2o - type(seq_map) , pointer :: mapper_i2o - type(seq_map) , pointer :: mapper_a2l - type(seq_map) , pointer :: mapper_l2r - type(seq_map) , pointer :: mapper_l2g - - private seq_frac_check - - !=============================================================================== -contains - !=============================================================================== - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_frac_init - ! - ! !DESCRIPTION: - ! Initialize fraction attribute vectors and necessary ocn/ice domain - ! fraction input if appropriate - ! - ! !REVISION HISTORY: - ! 2007-may-07 - M. Vertenstein - initial cpl7 version. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_frac_init( infodata, & - atm, ice, lnd, ocn, glc, rof, wav, iac,& - fractions_a, fractions_i, fractions_l, & - fractions_o, fractions_g, fractions_r, & - fractions_w, fractions_z) - - ! !INPUT/OUTPUT PARAMETERS: - type(seq_infodata_type) , intent(in) :: infodata - type(component_type) , intent(in) :: atm - type(component_type) , intent(in) :: ice - type(component_type) , intent(in) :: lnd - type(component_type) , intent(in) :: ocn - type(component_type) , intent(in) :: glc - type(component_type) , intent(in) :: rof - type(component_type) , intent(in) :: wav - type(component_type) , intent(in) :: iac - type(mct_aVect) , intent(inout) :: fractions_a ! Fractions on atm grid/decomp - type(mct_aVect) , intent(inout) :: fractions_i ! Fractions on ice grid/decomp - type(mct_aVect) , intent(inout) :: fractions_l ! Fractions on lnd grid/decomp - type(mct_aVect) , intent(inout) :: fractions_o ! Fractions on ocn grid/decomp - type(mct_aVect) , intent(inout) :: fractions_g ! Fractions on glc grid/decomp - type(mct_aVect) , intent(inout) :: fractions_r ! Fractions on rof grid/decomp - type(mct_aVect) , intent(inout) :: fractions_w ! Fractions on wav grid/decomp - type(mct_aVect) , intent(inout) :: fractions_z ! Fractions on iac grid/decomp - !EOP - - !----- local ----- - type(mct_ggrid), pointer :: dom_a - type(mct_ggrid), pointer :: dom_i - type(mct_ggrid), pointer :: dom_l - type(mct_ggrid), pointer :: dom_o - type(mct_ggrid), pointer :: dom_g - type(mct_ggrid), pointer :: dom_r - type(mct_ggrid), pointer :: dom_w - type(mct_ggrid), pointer :: dom_z - - logical :: atm_present ! .true. => atm is present - logical :: ice_present ! .true. => ice is present - logical :: ocn_present ! .true. => ocean is present - logical :: lnd_present ! .true. => land is present - logical :: glc_present ! .true. => glc is present - logical :: rof_present ! .true. => rof is present - logical :: wav_present ! .true. => wav is present - logical :: iac_present ! .true. => iac is present - logical :: dead_comps ! .true. => dead models present - - integer :: n ! indices - integer :: ka, ki, kl, ko ! indices - integer :: kf, kk, kr, kg ! indices - integer :: lsize ! local size of ice av - integer :: debug_old ! old debug value - - character(*),parameter :: fraclist_a = 'afrac:ifrac:ofrac:lfrac:lfrin' - character(*),parameter :: fraclist_o = 'afrac:ifrac:ofrac:ifrad:ofrad' - character(*),parameter :: fraclist_i = 'afrac:ifrac:ofrac' - character(*),parameter :: fraclist_l = 'afrac:lfrac:lfrin' - character(*),parameter :: fraclist_g = 'gfrac:lfrac' - character(*),parameter :: fraclist_r = 'lfrac:rfrac' - character(*),parameter :: fraclist_w = 'wfrac' - character(*),parameter :: fraclist_z = 'afrac:lfrac' - - !----- formats ----- - character(*),parameter :: subName = '(seq_frac_init) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_infodata_getData(infodata, & - atm_present=atm_present, & - lnd_present=lnd_present, & - rof_present=rof_present, & - ice_present=ice_present, & - ocn_present=ocn_present, & - glc_present=glc_present, & - wav_present=wav_present, & - iac_present=iac_present, & - dead_comps=dead_comps) - - dom_a => component_get_dom_cx(atm) - dom_l => component_get_dom_cx(lnd) - dom_i => component_get_dom_cx(ice) - dom_o => component_get_dom_cx(ocn) - dom_r => component_get_dom_cx(rof) - dom_g => component_get_dom_cx(glc) - dom_w => component_get_dom_cx(wav) - dom_z => component_get_dom_cx(iac) - - debug_old = seq_frac_debug - seq_frac_debug = 2 - - ! Initialize fractions on atm grid/decomp (initialize ice fraction to zero) - - if (atm_present) then - lSize = mct_aVect_lSize(dom_a%data) - call mct_aVect_init(fractions_a,rList=fraclist_a,lsize=lsize) - call mct_aVect_zero(fractions_a) - - ka = mct_aVect_indexRa(fractions_a,"afrac",perrWith=subName) - fractions_a%rAttr(ka,:) = 1.0_r8 - endif - - ! Initialize fractions on glc grid decomp, just an initial "guess", updated later - - if (glc_present) then - lSize = mct_aVect_lSize(dom_g%data) - call mct_aVect_init(fractions_g,rList=fraclist_g,lsize=lsize) - call mct_aVect_zero(fractions_g) - - kg = mct_aVect_indexRA(fractions_g,"gfrac",perrWith=subName) - kf = mct_aVect_indexRA(dom_g%data ,"frac" ,perrWith=subName) - fractions_g%rAttr(kg,:) = dom_g%data%rAttr(kf,:) - end if - - ! Initialize fractions on land grid decomp, just an initial "guess", updated later - - if (lnd_present) then - lSize = mct_aVect_lSize(dom_l%data) - call mct_aVect_init(fractions_l,rList=fraclist_l,lsize=lsize) - call mct_aVect_zero(fractions_l) - - kk = mct_aVect_indexRA(fractions_l,"lfrin",perrWith=subName) - kf = mct_aVect_indexRA(dom_l%data ,"frac" ,perrWith=subName) - fractions_l%rAttr(kk,:) = dom_l%data%rAttr(kf,:) - - if (atm_present) then - mapper_l2a => prep_atm_get_mapper_Fl2a() - mapper_a2l => prep_lnd_get_mapper_Fa2l() - call seq_map_map(mapper_l2a, fractions_l, fractions_a, fldlist='lfrin', norm=.false.) - call seq_map_map(mapper_a2l, fractions_a, fractions_l, fldlist='afrac', norm=.false.) - endif - - end if - - ! Initialize fractions on ice grid/decomp (initialize ice fraction to zero) - - if (rof_present) then - lSize = mct_aVect_lSize(dom_r%data) - call mct_aVect_init(fractions_r,rList=fraclist_r,lsize=lsize) - call mct_aVect_zero(fractions_r) - - kr = mct_aVect_indexRa(fractions_r,"rfrac",perrWith=subName) - kf = mct_aVect_indexRA(dom_r%data ,"frac" ,perrWith=subName) - fractions_r%rAttr(kr,:) = dom_r%data%rAttr(kf,:) - end if - - ! Initialize fractions on wav grid decomp, just an initial "guess", updated later - - if (wav_present) then - lSize = mct_aVect_lSize(dom_w%data) - call mct_aVect_init(fractions_w,rList=fraclist_w,lsize=lsize) - call mct_aVect_zero(fractions_w) - fractions_w%rAttr(:,:) = 1.0_r8 - end if - - ! Initialize fractions on iac grid decomp, just an initial "guess", updated later - - if (iac_present) then - lSize = mct_aVect_lSize(dom_z%data) - call mct_aVect_init(fractions_z,rList=fraclist_z,lsize=lsize) - call mct_aVect_zero(fractions_z) - fractions_z%rAttr(:,:) = 1.0_r8 - end if - - ! Initialize fractions on ice grid/decomp (initialize ice fraction to zero) - - if (ice_present) then - lSize = mct_aVect_lSize(dom_i%data) - call mct_aVect_init(fractions_i,rList=fraclist_i,lsize=lsize) - call mct_aVect_zero(fractions_i) - - ko = mct_aVect_indexRa(fractions_i,"ofrac",perrWith=subName) - kf = mct_aVect_indexRA(dom_i%data ,"frac" ,perrWith=subName) - fractions_i%rAttr(ko,:) = dom_i%data%rAttr(kf,:) - - if (atm_present) then - mapper_i2a => prep_atm_get_mapper_Fi2a() - call seq_map_map(mapper_i2a,fractions_i,fractions_a,fldlist='ofrac',norm=.false.) - endif - end if - - ! Initialize fractions on ocean grid/decomp (initialize ice fraction to zero) - ! These are initialize the same as for ice - - if (ocn_present) then - lSize = mct_aVect_lSize(dom_o%data) - call mct_aVect_init(fractions_o,rList=fraclist_o,lsize=lsize) - call mct_aVect_zero(fractions_o) - - if (ice_present) then - mapper_i2o => prep_ocn_get_mapper_SFi2o() - call seq_map_map(mapper_i2o,fractions_i,fractions_o,fldlist='ofrac',norm=.false.) - else - ko = mct_aVect_indexRa(fractions_o,"ofrac",perrWith=subName) - kf = mct_aVect_indexRA(dom_o%data ,"frac" ,perrWith=subName) - fractions_o%rAttr(ko,:) = dom_o%data%rAttr(kf,:) - mapper_o2a => prep_atm_get_mapper_Fo2a() - call seq_map_map(mapper_o2a, fractions_o, fractions_a, fldlist='ofrac',norm=.false.) - endif - - if (atm_present) then - mapper_a2o => prep_ocn_get_mapper_Fa2o() - call seq_map_map(mapper_a2o, fractions_a, fractions_o, fldlist='afrac',norm=.false.) - endif - if (ice_present) then - ! --- this should be an atm2ice call above, but atm2ice doesn't work - mapper_o2i => prep_ice_get_mapper_SFo2i() - call seq_map_map(mapper_o2i,fractions_o,fractions_i,fldlist='afrac',norm=.false.) - endif - end if - - ! --- Set ofrac and lfrac on atm grid. These should actually be mapo2a of - ! ofrac and lfrac but we can't map lfrac from o2a due to masked mapping - ! weights. So we have to settle for a residual calculation that is - ! truncated to zero to try to preserve "all ocean" cells. - - if (atm_present) then - ka = mct_aVect_indexRa(fractions_a,"afrac",perrWith=subName) - ki = mct_aVect_indexRa(fractions_a,"ifrac",perrWith=subName) - kl = mct_aVect_indexRa(fractions_a,"lfrac",perrWith=subName) - ko = mct_aVect_indexRa(fractions_a,"ofrac",perrWith=subName) - kk = mct_aVect_indexRa(fractions_a,"lfrin",perrWith=subName) - lSize = mct_aVect_lSize(fractions_a) - - if (ice_present .or. ocn_present) then - do n = 1,lsize - fractions_a%rAttr(kl,n) = 1.0_r8 - fractions_a%rAttr(ko,n) - if (abs(fractions_a%rAttr(kl,n)) < eps_fraclim) then - fractions_a%rAttr(kl,n) = 0.0_r8 - if (atm_frac_correct) fractions_a%rAttr(ko,n) = 1.0_r8 - endif - enddo - else if (lnd_present) then - do n = 1,lsize - fractions_a%rAttr(kl,n) = fractions_a%rAttr(kk,n) - fractions_a%rAttr(ko,n) = 1.0_r8 - fractions_a%rAttr(kl,n) - if (abs(fractions_a%rAttr(ko,n)) < eps_fraclim) then - fractions_a%rAttr(ko,n) = 0.0_r8 - if (atm_frac_correct) fractions_a%rAttr(kl,n) = 1.0_r8 - endif - enddo - endif - endif - - ! --- finally, set fractions_l(lfrac) from fractions_a(lfrac) - ! --- and fractions_r(lfrac) from fractions_l(lfrac) - ! --- and fractions_g(lfrac) from fractions_l(lfrac) - - if (lnd_present) then - if (atm_present) then - mapper_a2l => prep_lnd_get_mapper_Fa2l() - call seq_map_map(mapper_a2l, fractions_a, fractions_l, fldlist='lfrac', norm=.false.) - else - ! If the atmosphere is absent, then simply set fractions_l(lfrac) = fractions_l(lfrin) - kk = mct_aVect_indexRA(fractions_l,"lfrin",perrWith=subName) - kl = mct_aVect_indexRA(fractions_l,"lfrac",perrWith=subName) - fractions_l%rAttr(kl,:) = fractions_l%rAttr(kk,:) - end if - end if - if (lnd_present .and. rof_present) then - mapper_l2r => prep_rof_get_mapper_Fl2r() - call seq_map_map(mapper_l2r, fractions_l, fractions_r, fldlist='lfrac', norm=.false.) - endif - if (lnd_present .and. glc_present) then - mapper_l2g => prep_glc_get_mapper_Fl2g() - call seq_map_map(mapper_l2g, fractions_l, fractions_g, fldlist='lfrac', norm=.false.) - end if - - if (lnd_present) call seq_frac_check(fractions_l,'lnd init') - if (glc_present) call seq_frac_check(fractions_g,'glc init') - if (rof_present) call seq_frac_check(fractions_r,'rof init') - if (wav_present) call seq_frac_check(fractions_w,'wav init') - if (iac_present) call seq_frac_check(fractions_z,'iac init') - if (ice_present) call seq_frac_check(fractions_i,'ice init') - if (ocn_present) call seq_frac_check(fractions_o,'ocn init') - if (atm_present .and. (lnd_present.or.ice_present.or.ocn_present)) & - call seq_frac_check(fractions_a,'atm init') - seq_frac_debug = debug_old - - end subroutine seq_frac_init - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_frac_set - ! - ! !DESCRIPTION: - ! Update surface fractions - ! - ! !REVISION HISTORY: - ! 2007-may-07 - M. Vertenstein - initial cpl7 version. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_frac_set(infodata, ice, fractions_a, fractions_i, fractions_o) - - ! !INPUT/OUTPUT PARAMETERS: - type(seq_infodata_type) , intent(in) :: infodata - type(component_type) , intent(in) :: ice - type(mct_aVect) , intent(inout) :: fractions_a ! Fractions on atm - type(mct_aVect) , intent(inout) :: fractions_i ! Fractions on ice - type(mct_aVect) , intent(inout) :: fractions_o ! Fractions on ocn - !EOP - - !----- local ----- - type(mct_aVect), pointer :: i2x_i - type(mct_ggrid), pointer :: dom_i - logical :: atm_present ! true => atm is present - logical :: ice_present ! true => ice is present - logical :: ocn_present ! true => ocn is present - integer :: n - integer :: ki, kl, ko, kf - integer :: lsize - real(r8),allocatable :: fcorr(:) - - !----- formats ----- - character(*),parameter :: subName = '(seq_frac_set) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------- - ! Update fractions - ! - Update ice fraction on ice grid first, normalize to total fraction - ! available for cover - ! - Update ocn fraction on ice grid as residual - ! - Map ice/ocn fractions from ice grid to ocean and atm grids - !---------------------------------------------------------------------- - - call seq_infodata_getData(infodata, & - atm_present=atm_present, & - ice_present=ice_present, & - ocn_present=ocn_present) - - dom_i => component_get_dom_cx(ice) - i2x_i => component_get_c2x_cx(ice) - - if (ice_present) then - call mct_aVect_copy(i2x_i, fractions_i, "Si_ifrac", "ifrac") - - ki = mct_aVect_indexRA(fractions_i,"ifrac") - ko = mct_aVect_indexRA(fractions_i,"ofrac") - kf = mct_aVect_indexRA(dom_i%data ,"frac" ,perrWith=subName) - fractions_i%rAttr(ki,:) = fractions_i%rAttr(ki,:) * dom_i%data%rAttr(kf,:) - fractions_i%rAttr(ko,:) = dom_i%data%rAttr(kf,:) - fractions_i%rAttr(ki,:) - - call seq_frac_check(fractions_i,'ice set') - - if (ocn_present) then - mapper_i2o => prep_ocn_get_mapper_SFi2o() - call seq_map_map(mapper_i2o, fractions_i, fractions_o, & - fldlist='ofrac:ifrac',norm=.false.) - call seq_frac_check(fractions_o, 'ocn set') - endif - - if (atm_present) then - mapper_i2a => prep_atm_get_mapper_Fi2a() - call seq_map_map(mapper_i2a, fractions_i, fractions_a, & - fldlist='ofrac:ifrac', norm=.false.) - - !tcx--- fraction correction, this forces the fractions_a to sum to 1.0_r8. - ! --- but it introduces a conservation error in mapping - if (atm_frac_correct) then - ki = mct_aVect_indexRA(fractions_a,"ifrac") - ko = mct_aVect_indexRA(fractions_a,"ofrac") - kl = mct_aVect_indexRA(fractions_a,"lfrac") - lSize = mct_aVect_lSize(fractions_a) - allocate(fcorr(lsize)) - do n = 1,lsize - if ((fractions_a%rAttr(ki,n)+fractions_a%rAttr(ko,n)) > 0.0_r8) then - fcorr(n) = ((1.0_r8-fractions_a%rAttr(kl,n))/ & - (fractions_a%rAttr(ki,n)+fractions_a%rAttr(ko,n))) - else - fcorr(n) = 0.0_r8 - endif - enddo - fractions_a%rAttr(ki,:) = fractions_a%rAttr(ki,:) * fcorr(:) - fractions_a%rAttr(ko,:) = fractions_a%rAttr(ko,:) * fcorr(:) - deallocate(fcorr) - endif - - call seq_frac_check(fractions_a,'atm set') - endif - end if - - end subroutine seq_frac_set - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_frac_check - ! - ! !DESCRIPTION: - ! Check surface fractions - ! - ! !REVISION HISTORY: - ! 2008-jun-11 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_frac_check(fractions,string) - - ! !INPUT/OUTPUT PARAMETERS: - - type(mct_aVect) , intent(in) :: fractions ! Fractions datatype - character(len=*), intent(in), optional :: string ! character string - - !EOP - - !----- local ----- - integer :: n, lsize - integer :: ncnt - integer :: mpicom - logical :: iamroot - real(r8) :: sum,diff,maxerr - real(r8) :: aminval,amaxval ! used for lnd - real(r8) :: lminval,lmaxval ! used for lnd - real(r8) :: ominval,omaxval ! used for ocn - real(r8) :: iminval,imaxval ! used for ice - real(r8) :: gminval,gmaxval ! used for glc - real(r8) :: rminval,rmaxval ! used for rof - real(r8) :: wminval,wmaxval ! used for wav - real(r8) :: zminval,zmaxval ! used for iac - real(r8) :: kminval,kmaxval ! used for lnd, lfrin - real(r8) :: sminval,smaxval ! used for sum - real(r8) :: tmpmin, tmpmax ! global tmps - integer :: tmpsum ! global tmp - integer :: ka,kl,ki,ko,kg,kk,kr,kw,kz - character(len=128) :: lstring - logical :: error - - !----- formats ----- - character(*),parameter :: subName = '(seq_frac_check) ' - character(*),parameter :: F01 = "('(seq_frac_check) ',2a,i8,g26.18)" - character(*),parameter :: F02 = "('(seq_frac_check) ',2a,2g26.18)" - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - mpicom = seq_comm_mpicom(CPLID) - iamroot = seq_comm_iamroot(CPLID) - - if (present(string)) then - lstring='['//trim(string)//']' - else - lstring='' - endif - - ka = -1 - kl = -1 - ki = -1 - ko = -1 - kk = -1 - kg = -1 - kr = -1 - kw = -1 - kz = -1 - aminval = 999.0_r8 - amaxval = -999.0_r8 - lminval = 999.0_r8 - lmaxval = -999.0_r8 - iminval = 999.0_r8 - imaxval = -999.0_r8 - ominval = 999.0_r8 - omaxval = -999.0_r8 - gminval = 999.0_r8 - gmaxval = -999.0_r8 - kminval = 999.0_r8 - kmaxval = -999.0_r8 - sminval = 999.0_r8 - smaxval = -999.0_r8 - rminval = 999.0_r8 - rmaxval = -999.0_r8 - wminval = 999.0_r8 - wmaxval = -999.0_r8 - zminval = 999.0_r8 - zmaxval = -999.0_r8 - - lsize = mct_avect_lsize(fractions) - ka = mct_aVect_indexRA(fractions,"afrac",perrWith='quiet') - kl = mct_aVect_indexRA(fractions,"lfrac",perrWith='quiet') - ki = mct_aVect_indexRA(fractions,"ifrac",perrWith='quiet') - ko = mct_aVect_indexRA(fractions,"ofrac",perrWith='quiet') - kg = mct_aVect_indexRA(fractions,"gfrac",perrWith='quiet') - kr = mct_aVect_indexRA(fractions,"rfrac",perrWith='quiet') - kw = mct_aVect_indexRA(fractions,"wfrac",perrWith='quiet') - kz = mct_aVect_indexRA(fractions,"zfrac",perrWith='quiet') - kk = mct_aVect_indexRA(fractions,"lfrin",perrWith='quiet') - - if (ka > 0) then - aminval = minval(fractions%rAttr(ka,:)) - amaxval = maxval(fractions%rAttr(ka,:)) - endif - if (kl > 0) then - lminval = minval(fractions%rAttr(kl,:)) - lmaxval = maxval(fractions%rAttr(kl,:)) - endif - if (ko > 0) then - ominval = minval(fractions%rAttr(ko,:)) - omaxval = maxval(fractions%rAttr(ko,:)) - endif - if (ki > 0) then - iminval = minval(fractions%rAttr(ki,:)) - imaxval = maxval(fractions%rAttr(ki,:)) - endif - if (kg > 0) then - gminval = minval(fractions%rAttr(kg,:)) - gmaxval = maxval(fractions%rAttr(kg,:)) - endif - if (kr > 0) then - rminval = minval(fractions%rAttr(kr,:)) - rmaxval = maxval(fractions%rAttr(kr,:)) - endif - if (kw > 0) then - wminval = minval(fractions%rAttr(kw,:)) - wmaxval = maxval(fractions%rAttr(kw,:)) - endif - if (kz > 0) then - zminval = minval(fractions%rAttr(kz,:)) - zmaxval = maxval(fractions%rAttr(kz,:)) - endif - if (kk > 0) then - kminval = minval(fractions%rAttr(kk,:)) - kmaxval = maxval(fractions%rAttr(kk,:)) - endif - - ncnt = 0 - maxerr = 0.0_r8 - if (kl > 0 .and. ko > 0 .and. ki > 0) then - do n = 1,lsize - sum = fractions%rAttr(ko,n) + fractions%rAttr(kl,n) + fractions%rAttr(ki,n) - sminval = min(sum,sminval) - smaxval = max(sum,smaxval) - diff = abs(1.0_r8 - sum) - if (diff > eps_fracsum) then - ncnt = ncnt + 1 - maxerr = max(maxerr, diff) - !tcx debug write(logunit,*) trim(lstring),' err# ',ncnt, n, lsize, & - !fractions%rAttr(ko,n),fractions%rAttr(kl,n),fractions%rAttr(ki,n),sum - endif - enddo - endif - - error = .false. - if (ncnt > 0) error = .true. - if (aminval < 0.0_r8-eps_fracval .or. amaxval > 1.0_r8+eps_fracval) error = .true. - if (lminval < 0.0_r8-eps_fracval .or. lmaxval > 1.0_r8+eps_fracval) error = .true. - if (ominval < 0.0_r8-eps_fracval .or. omaxval > 1.0_r8+eps_fracval) error = .true. - if (iminval < 0.0_r8-eps_fracval .or. imaxval > 1.0_r8+eps_fracval) error = .true. - if (gminval < 0.0_r8-eps_fracval .or. gmaxval > 1.0_r8+eps_fracval) error = .true. - if (rminval < 0.0_r8-eps_fracval .or. rmaxval > 1.0_r8+eps_fracval) error = .true. - if (wminval < 0.0_r8-eps_fracval .or. wmaxval > 1.0_r8+eps_fracval) error = .true. - if (zminval < 0.0_r8-eps_fracval .or. zmaxval > 1.0_r8+eps_fracval) error = .true. - if (kminval < 0.0_r8-eps_fracval .or. kmaxval > 1.0_r8+eps_fracval) error = .true. - - if (error .or. seq_frac_debug > 1) then - if (ka > 0) then - call shr_mpi_min(aminval,tmpmin,mpicom,subname//':afrac',all=.false.) - call shr_mpi_max(amaxval,tmpmax,mpicom,subname//':afrac',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' afrac min/max = ',tmpmin,tmpmax - endif - if (kl > 0) then - call shr_mpi_min(lminval,tmpmin,mpicom,subname//':lfrac',all=.false.) - call shr_mpi_max(lmaxval,tmpmax,mpicom,subname//':lfrac',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' lfrac min/max = ',tmpmin,tmpmax - endif - if (kg > 0) then - call shr_mpi_min(gminval,tmpmin,mpicom,subname//':gfrac',all=.false.) - call shr_mpi_max(gmaxval,tmpmax,mpicom,subname//':gfrac',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' gfrac min/max = ',tmpmin,tmpmax - endif - if (ko > 0) then - call shr_mpi_min(ominval,tmpmin,mpicom,subname//':ofrac',all=.false.) - call shr_mpi_max(omaxval,tmpmax,mpicom,subname//':ofrac',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' ofrac min/max = ',tmpmin,tmpmax - endif - if (ki > 0) then - call shr_mpi_min(iminval,tmpmin,mpicom,subname//':ifrac',all=.false.) - call shr_mpi_max(imaxval,tmpmax,mpicom,subname//':ifrac',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' ifrac min/max = ',tmpmin,tmpmax - endif - if (kr > 0) then - call shr_mpi_min(rminval,tmpmin,mpicom,subname//':rfrac',all=.false.) - call shr_mpi_max(rmaxval,tmpmax,mpicom,subname//':rfrac',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' rfrac min/max = ',tmpmin,tmpmax - endif - if (kw > 0) then - call shr_mpi_min(wminval,tmpmin,mpicom,subname//':wfrac',all=.false.) - call shr_mpi_max(wmaxval,tmpmax,mpicom,subname//':wfrac',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' wfrac min/max = ',tmpmin,tmpmax - endif - if (kz > 0) then - call shr_mpi_min(kminval,tmpmin,mpicom,subname//':zfrac',all=.false.) - call shr_mpi_max(kmaxval,tmpmax,mpicom,subname//':zfrac',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' zfrac min/max = ',tmpmin,tmpmax - endif - if (kk > 0) then - call shr_mpi_min(kminval,tmpmin,mpicom,subname//':lfrin',all=.false.) - call shr_mpi_max(kmaxval,tmpmax,mpicom,subname//':lfrin',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' lfrin min/max = ',tmpmin,tmpmax - endif - if (kl > 0 .and. ko > 0 .and. ki > 0) then - call shr_mpi_min(sminval,tmpmin,mpicom,subname//':sum',all=.false.) - call shr_mpi_max(smaxval,tmpmax,mpicom,subname//':sum',all=.false.) - if (iamroot) write(logunit,F02) trim(lstring),' sum min/max = ',tmpmin,tmpmax - call shr_mpi_sum(ncnt ,tmpsum,mpicom,subname//':sum',all=.false.) - call shr_mpi_max(maxerr,tmpmax,mpicom,subname//':sum',all=.false.) - if (iamroot) write(logunit,F01) trim(lstring),' sum ncnt/maxerr = ',tmpsum,tmpmax - endif - if (error .and. .not. seq_frac_dead .and. seq_frac_abort) then - write(logunit,F02) trim(lstring),' ERROR aborting ' - call shr_sys_abort() - elseif (error) then - if (iamroot) write(logunit,F02) trim(lstring),' ERROR but NOT aborting ' - endif - endif - - end subroutine seq_frac_check - -end module seq_frac_mct diff --git a/src/drivers/mct/main/seq_hist_mod.F90 b/src/drivers/mct/main/seq_hist_mod.F90 deleted file mode 100644 index 43385493200..00000000000 --- a/src/drivers/mct/main/seq_hist_mod.F90 +++ /dev/null @@ -1,1578 +0,0 @@ -! !MODULE: seq_hist_mod -- cpl7 history writing routines -! -! !DESCRIPTION: -! -! Creates cpl7 history files, instantanious, time-avg, and auxilliary -! -! !REVISION HISTORY: -! 2009-Sep-25 - B. Kauffman - move from cpl7 main program into hist module -! 2009-mmm-dd - T. Craig - initial versions -! -! !INTERFACE: ------------------------------------------------------------------ - -module seq_hist_mod - - ! !USES: - - use shr_kind_mod, only: R8 => SHR_KIND_R8, IN => SHR_KIND_IN - use shr_kind_mod, only: CL => SHR_KIND_CL, CS => SHR_KIND_CS - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use shr_cal_mod, only: shr_cal_date2ymd, shr_cal_datetod2string, shr_cal_ymdtod2string - use mct_mod ! adds mct_ prefix to mct lib - use ESMF - - use seq_infodata_mod ! "infodata" gathers various control flags into one datatype - use seq_timemgr_mod ! clock & alarm routines - use seq_io_mod ! lower level io routines - - use seq_comm_mct , only: seq_comm_getdata=>seq_comm_setptrs - use seq_comm_mct, only: seq_comm_setnthreads, seq_comm_iamin - use seq_comm_mct, only: CPLID, GLOID, logunit, loglevel - use seq_comm_mct, only: num_inst_atm, num_inst_lnd, num_inst_ocn - use seq_comm_mct, only: num_inst_ice, num_inst_glc, num_inst_wav - use seq_comm_mct, only: num_inst_rof, num_inst_xao, num_inst_iac - - use prep_ocn_mod, only: prep_ocn_get_r2x_ox - use prep_ocn_mod, only: prep_ocn_get_x2oacc_ox - use prep_ocn_mod, only: prep_ocn_get_x2oacc_ox_cnt - use prep_atm_mod, only: prep_atm_get_o2x_ax - use prep_aoflux_mod, only: prep_aoflux_get_xao_ox - use prep_aoflux_mod, only: prep_aoflux_get_xao_ax - - use component_type_mod - - implicit none - - private - - ! !PUBLIC TYPES: - - ! no public types - - ! !PUBLIC MEMBER FUNCTIONS - - public :: seq_hist_write ! write instantaneous hist file - public :: seq_hist_writeavg ! write time-avg hist file - public :: seq_hist_writeaux ! write auxiliary hist files - public :: seq_hist_spewav ! write avs to history file for debugging - - ! !PUBLIC DATA MEMBERS: - - ! no public data - - !EOP - - !---------------------------------------------------------------------------- - ! local/module data - !---------------------------------------------------------------------------- - - logical :: iamin_CPLID ! pe associated with CPLID - integer(IN) :: mpicom_GLOID ! MPI global communicator - integer(IN) :: mpicom_CPLID ! MPI cpl communicator - - integer(IN) :: nthreads_GLOID ! OMP global number of threads - integer(IN) :: nthreads_CPLID ! OMP cpl number of threads - logical :: drv_threading ! driver threading control - - logical :: atm_present ! .true. => atm is present - logical :: lnd_present ! .true. => land is present - logical :: ice_present ! .true. => ice is present - logical :: ocn_present ! .true. => ocn is present - logical :: rof_present ! .true. => land runoff is present - logical :: glc_present ! .true. => glc is present - logical :: wav_present ! .true. => wav is present - logical :: iac_present ! .true. => iac is present - - logical :: atm_prognostic ! .true. => atm comp expects input - logical :: lnd_prognostic ! .true. => lnd comp expects input - logical :: ice_prognostic ! .true. => ice comp expects input - logical :: ocn_prognostic ! .true. => ocn comp expects input - logical :: ocnrof_prognostic ! .true. => ocn comp expects runoff input - logical :: rof_prognostic ! .true. => rof comp expects input - logical :: glc_prognostic ! .true. => glc comp expects input - logical :: wav_prognostic ! .true. => wav comp expects input - logical :: iac_prognostic ! .true. => iac comp expects input - - logical :: histavg_atm ! .true. => write atm fields to average history file - logical :: histavg_lnd ! .true. => write lnd fields to average history file - logical :: histavg_ocn ! .true. => write ocn fields to average history file - logical :: histavg_ice ! .true. => write ice fields to average history file - logical :: histavg_rof ! .true. => write rof fields to average history file - logical :: histavg_glc ! .true. => write glc fields to average history file - logical :: histavg_wav ! .true. => write wav fields to average history file - logical :: histavg_iac ! .true. => write iac fields to average history file - logical :: histavg_xao ! .true. => write flux xao fields to average history file - - logical :: single_column - - !--- domain equivalent 2d grid size --- - integer(IN) :: atm_nx, atm_ny ! nx,ny of 2d grid, if known - integer(IN) :: lnd_nx, lnd_ny ! nx,ny of 2d grid, if known - integer(IN) :: ice_nx, ice_ny ! nx,ny of 2d grid, if known - integer(IN) :: ocn_nx, ocn_ny ! nx,ny of 2d grid, if known - integer(IN) :: rof_nx, rof_ny ! nx,ny of 2d grid, if known - integer(IN) :: glc_nx, glc_ny ! nx,ny of 2d grid, if known - integer(IN) :: wav_nx, wav_ny ! nx,ny of 2d grid, if known - integer(IN) :: iac_nx, iac_ny ! nx,ny of 2d grid, if known - - !--- temporary pointers --- - type(mct_aVect), pointer :: r2x_ox(:) - type(mct_aVect), pointer :: x2oacc_ox(:) - integer , pointer :: x2oacc_ox_cnt - type(mct_aVect), pointer :: xao_ox(:) - type(mct_aVect), pointer :: xao_ax(:) - type(mct_aVect), pointer :: o2x_ax(:) - - !=============================================================================== -contains - !=============================================================================== - - subroutine seq_hist_write(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, fractions_rx, & - fractions_gx, fractions_wx, fractions_zx, cpl_inst_tag) - - implicit none - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - type (ESMF_Clock) , intent(in) :: EClock_d ! driver clock - type (component_type) , intent(inout) :: atm(:) - type (component_type) , intent(inout) :: lnd(:) - type (component_type) , intent(inout) :: ice(:) - type (component_type) , intent(inout) :: ocn(:) - type (component_type) , intent(inout) :: rof(:) - type (component_type) , intent(inout) :: glc(:) - type (component_type) , intent(inout) :: wav(:) - type (component_type) , intent(inout) :: iac(:) - type(mct_aVect) , intent(inout) :: fractions_ax(:) ! Fractions on atm grid/decomp - type(mct_aVect) , intent(inout) :: fractions_lx(:) ! Fractions on lnd grid/decomp - type(mct_aVect) , intent(inout) :: fractions_ix(:) ! Fractions on ice grid/decomp - type(mct_aVect) , intent(inout) :: fractions_ox(:) ! Fractions on ocn grid/decomp - type(mct_aVect) , intent(inout) :: fractions_rx(:) ! Fractions on rof grid/decomp - type(mct_aVect) , intent(inout) :: fractions_gx(:) ! Fractions on glc grid/decomp - type(mct_aVect) , intent(inout) :: fractions_wx(:) ! Fractions on wav grid/decomp - type(mct_aVect) , intent(inout) :: fractions_zx(:) ! Fractions on iac grid/decomp - character(len=*) , intent(in) :: cpl_inst_tag - ! - ! Local Variables - integer(IN) :: curr_ymd ! Current date YYYYMMDD - integer(IN) :: curr_tod ! Current time-of-day (s) - integer(IN) :: start_ymd ! Starting date YYYYMMDD - integer(IN) :: start_tod ! Starting time-of-day (s) - real(r8) :: curr_time ! Time interval since reference time - integer(IN) :: fk ! index - character(CL) :: time_units ! units of time variable - character(CL) :: calendar ! calendar type - character(CL) :: case_name ! case name - character(CL) :: hist_file ! Local path to history filename - real(r8) :: tbnds(2) ! CF1.0 time bounds - logical :: whead,wdata ! for writing restart/history cdf files - character(len=18) :: date_str - type(mct_gsMap), pointer :: gsmap - type(mct_gGrid), pointer :: dom ! comp domain on cpl pes - character(CL) :: model_doi_url - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! get required infodata - !---------------------------------------------------------------------------- - iamin_CPLID = seq_comm_iamin(CPLID) - - call seq_comm_getdata(GLOID,mpicom=mpicom_GLOID,nthreads=nthreads_GLOID) - call seq_comm_getdata(CPLID,mpicom=mpicom_CPLID,nthreads=nthreads_CPLID) - - call seq_infodata_getData(infodata, & - drv_threading=drv_threading, & - atm_present=atm_present, & - lnd_present=lnd_present, & - rof_present=rof_present, & - ice_present=ice_present, & - ocn_present=ocn_present, & - glc_present=glc_present, & - wav_present=wav_present, & - iac_present=iac_present, & - atm_prognostic=atm_prognostic, & - lnd_prognostic=lnd_prognostic, & - ice_prognostic=ice_prognostic, & - ocn_prognostic=ocn_prognostic, & - ocnrof_prognostic=ocnrof_prognostic, & - rof_prognostic=rof_prognostic, & - glc_prognostic=glc_prognostic, & - wav_prognostic=wav_prognostic, & - iac_prognostic=iac_prognostic, & - atm_nx=atm_nx, atm_ny=atm_ny, & - lnd_nx=lnd_nx, lnd_ny=lnd_ny, & - rof_nx=rof_nx, rof_ny=rof_ny, & - ice_nx=ice_nx, ice_ny=ice_ny, & - glc_nx=glc_nx, glc_ny=glc_ny, & - wav_nx=wav_nx, wav_ny=wav_ny, & - iac_nx=iac_nx, iac_ny=iac_ny, & - ocn_nx=ocn_nx, ocn_ny=ocn_ny, & - single_column=single_column, & - case_name=case_name, & - model_doi_url=model_doi_url) - - !--- Get current date from clock needed to label the history pointer file --- - - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=curr_ymd, curr_tod=curr_tod, & - start_ymd=start_ymd, start_tod=start_tod, curr_time=curr_time, & - calendar=calendar) - call shr_cal_datetod2string(date_str, curr_ymd, curr_tod) - - write(hist_file,"(6a)") & - trim(case_name), '.cpl',cpl_inst_tag,'.hi.', trim(date_str),'.nc' - - time_units = 'days since ' & - // trim(seq_io_date2yyyymmdd(start_ymd)) // ' ' // seq_io_sec2hms(start_tod) - - if (iamin_CPLID) then - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - call seq_io_wopen(hist_file,clobber=.true., model_doi_url=model_doi_url) - - ! loop twice, first time write header, second time write data for perf - - do fk = 1,2 - if (fk == 1) then - whead = .true. - wdata = .false. - elseif (fk == 2) then - whead = .false. - wdata = .true. - call seq_io_enddef(hist_file) - else - call shr_sys_abort('seq_hist_write fk illegal') - end if - - tbnds = curr_time - !------- tcx nov 2011 tbnds of same values causes problems in ferret - if (tbnds(1) >= tbnds(2)) then - call seq_io_write(hist_file,& - time_units=time_units, time_cal=calendar, time_val=curr_time, & - whead=whead, wdata=wdata) - else - call seq_io_write(hist_file, & - time_units=time_units, time_cal=calendar, time_val=curr_time, & - whead=whead, wdata=wdata, tbnds=tbnds) - endif - - if (atm_present) then - gsmap => component_get_gsmap_cx(atm(1)) - dom => component_get_dom_cx(atm(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='doma', & - scolumn=single_column) - call seq_io_write(hist_file, gsmap, fractions_ax, 'fractions_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='fraca', & - scolumn=single_column) - call seq_io_write(hist_file, atm, 'x2c', 'x2a_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='x2a', & - scolumn=single_column) - call seq_io_write(hist_file, atm, 'c2x', 'a2x_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='a2x', & - scolumn=single_column) - !call seq_io_write(hist_file, gsmap, l2x_ax, 'l2x_ax', & - ! nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='l2x_ax') - !call seq_io_write(hist_file, gsmap, o2x_ax, 'o2x_ax', & - ! nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='o2x_ax') - !call seq_io_write(hist_file, gsmap, i2x_ax, 'i2x_ax', & - ! nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='i2x_ax') - endif - - if (lnd_present) then - gsmap => component_get_gsmap_cx(lnd(1)) - dom => component_get_dom_cx(lnd(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_lx', & - nx=lnd_nx, ny=lnd_ny, nt=1, whead=whead, wdata=wdata, pre='doml') - call seq_io_write(hist_file, gsmap, fractions_lx, 'fractions_lx', & - nx=lnd_nx, ny=lnd_ny, nt=1, whead=whead, wdata=wdata, pre='fracl') - call seq_io_write(hist_file, lnd, 'c2x', 'l2x_lx', & - nx=lnd_nx, ny=lnd_ny, nt=1, whead=whead, wdata=wdata, pre='l2x') - call seq_io_write(hist_file, lnd, 'x2c', 'x2l_lx',& - nx=lnd_nx, ny=lnd_ny, nt=1, whead=whead, wdata=wdata, pre='x2l') - endif - - if (rof_present) then - gsmap => component_get_gsmap_cx(rof(1)) - dom => component_get_dom_cx(rof(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_rx', & - nx=rof_nx, ny=rof_ny, nt=1, whead=whead, wdata=wdata, pre='domr') - call seq_io_write(hist_file, gsmap, fractions_rx, 'fractions_rx', & - nx=rof_nx, ny=rof_ny, nt=1, whead=whead, wdata=wdata, pre='fracr') - call seq_io_write(hist_file, rof, 'c2x', 'r2x_rx', & - nx=rof_nx, ny=rof_ny, nt=1, whead=whead, wdata=wdata, pre='r2x') - call seq_io_write(hist_file, rof, 'x2c', 'x2r_rx', & - nx=rof_nx, ny=rof_ny, nt=1, whead=whead, wdata=wdata, pre='x2r') - endif - - if (rof_present .and. ocnrof_prognostic) then - gsmap => component_get_gsmap_cx(ocn(1)) - r2x_ox => prep_ocn_get_r2x_ox() - call seq_io_write(hist_file, gsmap, r2x_ox, 'r2x_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, pre='r2xo') - endif - - if (ocn_present) then - gsmap => component_get_gsmap_cx(ocn(1)) - dom => component_get_dom_cx(ocn(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, pre='domo') - call seq_io_write(hist_file, gsmap, fractions_ox, 'fractions_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, pre='fraco') - call seq_io_write(hist_file, ocn, 'c2x', 'o2x_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, pre='o2x') - !call seq_io_write(hist_file, ocn, 'x2c', 'x2o_ox', & - ! nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, pre='x2o') - - gsmap => component_get_gsmap_cx(ocn(1)) - x2oacc_ox => prep_ocn_get_x2oacc_ox() - call seq_io_write(hist_file, gsmap, x2oacc_ox, 'x2oacc_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, pre='x2oacc') - - gsmap => component_get_gsmap_cx(ocn(1)) - x2oacc_ox_cnt => prep_ocn_get_x2oacc_ox_cnt() - call seq_io_write(hist_file, x2oacc_ox_cnt, 'x2oacc_ox_cnt', & - whead=whead, wdata=wdata) - gsmap => component_get_gsmap_cx(ocn(1)) - xao_ox => prep_aoflux_get_xao_ox() - call seq_io_write(hist_file, gsmap, xao_ox, 'xao_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, pre='xaoo') - - gsmap => component_get_gsmap_cx(atm(1)) - o2x_ax => prep_atm_get_o2x_ax() - call seq_io_write(hist_file, gsmap, o2x_ax, 'o2x_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='o2xa') - - gsmap => component_get_gsmap_cx(atm(1)) - xao_ax => prep_aoflux_get_xao_ax() - call seq_io_write(hist_file, gsmap, xao_ax, 'xao_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='xaoa') - endif - - if (ice_present) then - gsmap => component_get_gsmap_cx(ice(1)) - dom => component_get_dom_cx(ice(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_ix', & - nx=ice_nx, ny=ice_ny, nt=1, whead=whead, wdata=wdata, pre='domi') - call seq_io_write(hist_file, gsmap, fractions_ix, 'fractions_ix', & - nx=ice_nx, ny=ice_ny, nt=1, whead=whead, wdata=wdata, pre='fraci') - call seq_io_write(hist_file, ice, 'c2x', 'i2x_ix', & - nx=ice_nx, ny=ice_ny, nt=1, whead=whead, wdata=wdata, pre='i2x') - call seq_io_write(hist_file, ice, 'x2c', 'x2i_ix', & - nx=ice_nx, ny=ice_ny, nt=1, whead=whead, wdata=wdata, pre='x2i') - endif - - if (glc_present) then - gsmap => component_get_gsmap_cx(glc(1)) - dom => component_get_dom_cx(glc(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_gx', & - nx=glc_nx, ny=glc_ny, nt=1, whead=whead, wdata=wdata, pre='domg') - call seq_io_write(hist_file, gsmap, fractions_gx, 'fractions_gx', & - nx=glc_nx, ny=glc_ny, nt=1, whead=whead, wdata=wdata, pre='fracg') - call seq_io_write(hist_file, glc, 'c2x', 'g2x_gx', & - nx=glc_nx, ny=glc_ny, nt=1, whead=whead, wdata=wdata, pre='g2x') - call seq_io_write(hist_file, glc, 'x2c', 'x2g_gx', & - nx=glc_nx, ny=glc_ny, nt=1, whead=whead, wdata=wdata, pre='x2g') - endif - - if (wav_present) then - gsmap => component_get_gsmap_cx(wav(1)) - dom => component_get_dom_cx(wav(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_wx', & - nx=wav_nx, ny=wav_ny, nt=1, whead=whead, wdata=wdata, pre='domw') - call seq_io_write(hist_file, gsmap, fractions_wx, 'fractions_wx', & - nx=wav_nx, ny=wav_ny, nt=1, whead=whead, wdata=wdata, pre='fracw') - call seq_io_write(hist_file, wav, 'c2x', 'w2x_wx', & - nx=wav_nx, ny=wav_ny, nt=1, whead=whead, wdata=wdata, pre='w2x') - call seq_io_write(hist_file, wav, 'x2c', 'x2w_wx', & - nx=wav_nx, ny=wav_ny, nt=1, whead=whead, wdata=wdata, pre='x2w') - endif - - if (iac_present) then - gsmap => component_get_gsmap_cx(iac(1)) - dom => component_get_dom_cx(iac(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_zx', & - nx=iac_nx, ny=iac_ny, nt=1, whead=whead, wdata=wdata, pre='domz') - call seq_io_write(hist_file, gsmap, fractions_zx, 'fractions_zx', & - nx=iac_nx, ny=iac_ny, nt=1, whead=whead, wdata=wdata, pre='fracz') - call seq_io_write(hist_file, iac, 'c2x', 'z2x_zx', & - nx=iac_nx, ny=iac_ny, nt=1, whead=whead, wdata=wdata, pre='w2x') - call seq_io_write(hist_file, iac, 'x2c', 'x2z_zx', & - nx=iac_nx, ny=iac_ny, nt=1, whead=whead, wdata=wdata, pre='x2w') - endif - enddo - - call seq_io_close(hist_file) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - - end subroutine seq_hist_write - - !=============================================================================== - - subroutine seq_hist_writeavg(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, iac, write_now, cpl_inst_tag) - - implicit none - - type(seq_infodata_type) , intent(in) :: infodata - type (ESMF_Clock) , intent(in) :: EClock_d ! driver clock - type (component_type) , intent(in) :: atm(:) - type (component_type) , intent(in) :: lnd(:) - type (component_type) , intent(in) :: ice(:) - type (component_type) , intent(in) :: ocn(:) - type (component_type) , intent(in) :: rof(:) - type (component_type) , intent(in) :: glc(:) - type (component_type) , intent(in) :: wav(:) - type (component_type) , intent(in) :: iac(:) - logical , intent(in) :: write_now ! write or accumulate - character(len=*) , intent(in) :: cpl_inst_tag - - integer(IN) :: curr_ymd ! Current date YYYYMMDD - integer(IN) :: curr_tod ! Current time-of-day (s) - integer(IN) :: prev_ymd ! Previous date YYYYMMDD - integer(IN) :: prev_tod ! Previous time-of-day (s) - integer(IN) :: start_ymd ! Starting date YYYYMMDD - integer(IN) :: start_tod ! Starting time-of-day (s) - real(r8) :: curr_time ! Time interval since reference time - real(r8) :: prev_time ! Time interval since reference time - real(r8) :: avg_time ! Average time of tavg - integer(IN) :: yy, mm, dd ! year, month, day - integer(IN) :: fk ! index - character(CL) :: time_units ! units of time variable - character(CL) :: calendar ! calendar type - integer(IN) :: lsize ! local size of an aVect - character(CL) :: case_name ! case name - character(CL) :: hist_file ! Local path to history filename - logical :: whead, wdata ! flags write header vs. data - integer(IN) :: iidx ! component instance counter - - type(mct_aVect), save :: a2x_ax_avg(num_inst_atm) ! tavg aVect/bundle - type(mct_aVect), save :: x2a_ax_avg(num_inst_atm) - type(mct_aVect), save :: l2x_lx_avg(num_inst_lnd) - type(mct_aVect), save :: x2l_lx_avg(num_inst_lnd) - type(mct_aVect), save :: r2x_rx_avg(num_inst_rof) - type(mct_aVect), save :: x2r_rx_avg(num_inst_rof) - type(mct_aVect), save :: o2x_ox_avg(num_inst_ocn) - type(mct_aVect), save :: x2o_ox_avg(num_inst_ocn) - type(mct_aVect), save :: i2x_ix_avg(num_inst_ice) - type(mct_aVect), save :: x2i_ix_avg(num_inst_ice) - type(mct_aVect), save :: g2x_gx_avg(num_inst_glc) - type(mct_aVect), save :: x2g_gx_avg(num_inst_glc) - type(mct_aVect), save :: w2x_wx_avg(num_inst_wav) - type(mct_aVect), save :: x2w_wx_avg(num_inst_wav) - type(mct_aVect), save :: z2x_zx_avg(num_inst_iac) - type(mct_aVect), save :: x2z_zx_avg(num_inst_iac) - type(mct_aVect), save, pointer :: xao_ox_avg(:) - type(mct_aVect), save, pointer :: xao_ax_avg(:) - - integer(IN) , save :: cnt ! counts samples in tavg - real(r8) , save :: tbnds(2) ! CF1.0 time bounds - character(len=18) :: date_str - - logical , save :: first_call = .true. ! flags 1st call of this routine - - type(mct_gsMap), pointer :: gsmap ! component decomp on cpl pes - type(mct_gGrid), pointer :: dom ! component domain on cpl pes - type(mct_avect), pointer :: c2x ! component->coupler avs on cpl pes - type(mct_avect), pointer :: x2c ! coupler->component avs on cpl pes - character(CL) :: model_doi_url - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! get required infodata - !---------------------------------------------------------------------------- - iamin_CPLID = seq_comm_iamin(CPLID) - call seq_comm_getdata(GLOID, & - mpicom=mpicom_GLOID, nthreads=nthreads_GLOID) - call seq_comm_getdata(CPLID, & - mpicom=mpicom_CPLID, nthreads=nthreads_CPLID) - - call seq_infodata_getData(infodata, & - drv_threading=drv_threading, & - atm_present=atm_present, & - lnd_present=lnd_present, & - rof_present=rof_present, & - ice_present=ice_present, & - ocn_present=ocn_present, & - glc_present=glc_present, & - wav_present=wav_present, & - iac_present=iac_present, & - atm_prognostic=atm_prognostic, & - lnd_prognostic=lnd_prognostic, & - ice_prognostic=ice_prognostic, & - ocn_prognostic=ocn_prognostic, & - ocnrof_prognostic=ocnrof_prognostic, & - glc_prognostic=glc_prognostic, & - wav_prognostic=wav_prognostic, & - atm_nx=atm_nx, atm_ny=atm_ny, & - lnd_nx=lnd_nx, lnd_ny=lnd_ny, & - rof_nx=rof_nx, rof_ny=rof_ny, & - ice_nx=ice_nx, ice_ny=ice_ny, & - glc_nx=glc_nx, glc_ny=glc_ny, & - wav_nx=wav_nx, wav_ny=wav_ny, & - iac_nx=iac_nx, iac_ny=iac_ny, & - ocn_nx=ocn_nx, ocn_ny=ocn_ny, & - histavg_atm=histavg_atm, & - histavg_lnd=histavg_lnd, & - histavg_ocn=histavg_ocn, & - histavg_ice=histavg_ice, & - histavg_rof=histavg_rof, & - histavg_glc=histavg_glc, & - histavg_wav=histavg_wav, & - histavg_iac=histavg_iac, & - histavg_xao=histavg_xao, & - model_doi_url=model_doi_url) - - ! Get current date from clock needed to label the histavg pointer file - - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=curr_ymd, curr_tod=curr_tod, & - start_ymd=start_ymd, start_tod=start_tod, curr_time=curr_time, prev_time=prev_time, & - calendar=calendar) - - if (first_call) then - if (atm_present .and. histavg_atm) then - do iidx = 1, num_inst_atm - c2x => component_get_c2x_cx(atm(iidx)) - lsize = mct_aVect_lsize(c2x) - call mct_aVect_init(a2x_ax_avg(iidx), c2x, lsize) - call mct_aVect_zero(a2x_ax_avg(iidx)) - - x2c => component_get_x2c_cx(atm(iidx)) - lsize = mct_aVect_lsize(x2c) - call mct_aVect_init(x2a_ax_avg(iidx), x2c, lsize) - call mct_aVect_zero(x2a_ax_avg(iidx)) - enddo - endif - if (lnd_present .and. histavg_lnd) then - do iidx = 1, num_inst_lnd - c2x => component_get_c2x_cx(lnd(iidx)) - lsize = mct_aVect_lsize(c2x) - call mct_aVect_init(l2x_lx_avg(iidx), c2x, lsize) - call mct_aVect_zero(l2x_lx_avg(iidx)) - - x2c => component_get_x2c_cx(lnd(iidx)) - lsize = mct_aVect_lsize(x2c) - call mct_aVect_init(x2l_lx_avg(iidx), x2c, lsize) - call mct_aVect_zero(x2l_lx_avg(iidx)) - enddo - endif - if (rof_present .and. histavg_rof) then - do iidx = 1, num_inst_rof - c2x => component_get_c2x_cx(rof(iidx)) - lsize = mct_aVect_lsize(c2x) - call mct_aVect_init(r2x_rx_avg(iidx), c2x, lsize) - call mct_aVect_zero(r2x_rx_avg(iidx)) - - x2c => component_get_x2c_cx(rof(iidx)) - lsize = mct_aVect_lsize(x2c) - call mct_aVect_init(x2r_rx_avg(iidx), x2c, lsize) - call mct_aVect_zero(x2r_rx_avg(iidx)) - enddo - endif - if (ocn_present .and. histavg_ocn) then - do iidx = 1, num_inst_ocn - c2x => component_get_c2x_cx(ocn(iidx)) - lsize = mct_aVect_lsize(c2x) - call mct_aVect_init(o2x_ox_avg(iidx), c2x, lsize) - call mct_aVect_zero(o2x_ox_avg(iidx)) - - x2c => component_get_x2c_cx(ocn(iidx)) - lsize = mct_aVect_lsize(x2c) - call mct_aVect_init(x2o_ox_avg(iidx), x2c, lsize) - call mct_aVect_zero(x2o_ox_avg(iidx)) - enddo - endif - if (ice_present .and. histavg_ice) then - do iidx = 1, num_inst_ice - c2x => component_get_c2x_cx(ice(iidx)) - lsize = mct_aVect_lsize(c2x) - call mct_aVect_init(i2x_ix_avg(iidx), c2x, lsize) - call mct_aVect_zero(i2x_ix_avg(iidx)) - - x2c => component_get_x2c_cx(ice(iidx)) - lsize = mct_aVect_lsize(x2c) - call mct_aVect_init(x2i_ix_avg(iidx), x2c, lsize) - call mct_aVect_zero(x2i_ix_avg(iidx)) - enddo - endif - if (glc_present .and. histavg_glc) then - do iidx = 1, num_inst_glc - c2x => component_get_c2x_cx(glc(iidx)) - lsize = mct_aVect_lsize(c2x) - call mct_aVect_init(g2x_gx_avg(iidx), c2x, lsize) - call mct_aVect_zero(g2x_gx_avg(iidx)) - - x2c => component_get_x2c_cx(glc(iidx)) - lsize = mct_aVect_lsize(x2c) - call mct_aVect_init(x2g_gx_avg(iidx), x2c, lsize) - call mct_aVect_zero(x2g_gx_avg(iidx)) - enddo - endif - if (wav_present .and. histavg_wav) then - do iidx = 1, num_inst_wav - c2x => component_get_c2x_cx(wav(iidx)) - lsize = mct_aVect_lsize(c2x) - call mct_aVect_init(w2x_wx_avg(iidx), c2x, lsize) - call mct_aVect_zero(w2x_wx_avg(iidx)) - - x2c => component_get_x2c_cx(wav(iidx)) - lsize = mct_aVect_lsize(x2c) - call mct_aVect_init(x2w_wx_avg(iidx), x2c, lsize) - call mct_aVect_zero(x2w_wx_avg(iidx)) - enddo - endif - if (iac_present .and. histavg_iac) then - do iidx = 1, num_inst_iac - c2x => component_get_c2x_cx(iac(iidx)) - lsize = mct_aVect_lsize(c2x) - call mct_aVect_init(z2x_zx_avg(iidx), c2x, lsize) - call mct_aVect_zero(z2x_zx_avg(iidx)) - - x2c => component_get_x2c_cx(iac(iidx)) - lsize = mct_aVect_lsize(x2c) - call mct_aVect_init(x2z_zx_avg(iidx), x2c, lsize) - call mct_aVect_zero(x2z_zx_avg(iidx)) - enddo - endif - if (ocn_present .and. histavg_xao) then - allocate(xao_ox_avg(num_inst_xao)) - xao_ox => prep_aoflux_get_xao_ox() - do iidx = 1, num_inst_xao - lsize = mct_aVect_lsize(xao_ox(iidx)) - call mct_aVect_init(xao_ox_avg(iidx), xao_ox(iidx), lsize) - call mct_aVect_zero(xao_ox_avg(iidx)) - enddo - endif - if (atm_present .and. histavg_xao) then - allocate(xao_ax_avg(num_inst_xao)) - xao_ax => prep_aoflux_get_xao_ax() - do iidx = 1, num_inst_xao - lsize = mct_aVect_lsize(xao_ax(iidx)) - call mct_aVect_init(xao_ax_avg(iidx), xao_ax(iidx), lsize) - call mct_aVect_zero(xao_ax_avg(iidx)) - enddo - endif - cnt = 0 - tbnds(1) = prev_time - first_call = .false. - endif - - if (.not.write_now) then - cnt = cnt + 1 - if (atm_present .and. histavg_atm) then - do iidx = 1, num_inst_atm - c2x => component_get_c2x_cx(atm(iidx)) - x2c => component_get_x2c_cx(atm(iidx)) - a2x_ax_avg(iidx)%rAttr = a2x_ax_avg(iidx)%rAttr + c2x%rAttr - x2a_ax_avg(iidx)%rAttr = x2a_ax_avg(iidx)%rAttr + x2c%rAttr - enddo - endif - if (lnd_present .and. histavg_lnd) then - do iidx = 1, num_inst_lnd - c2x => component_get_c2x_cx(lnd(iidx)) - x2c => component_get_x2c_cx(lnd(iidx)) - l2x_lx_avg(iidx)%rAttr = l2x_lx_avg(iidx)%rAttr + c2x%rAttr - x2l_lx_avg(iidx)%rAttr = x2l_lx_avg(iidx)%rAttr + x2c%rAttr - enddo - endif - if (rof_present .and. histavg_rof) then - do iidx = 1, num_inst_rof - c2x => component_get_c2x_cx(rof(iidx)) - x2c => component_get_x2c_cx(rof(iidx)) - r2x_rx_avg(iidx)%rAttr = r2x_rx_avg(iidx)%rAttr + c2x%rAttr - x2r_rx_avg(iidx)%rAttr = x2r_rx_avg(iidx)%rAttr + x2c%rAttr - enddo - endif - if (ocn_present .and. histavg_ocn) then - do iidx = 1, num_inst_ocn - c2x => component_get_c2x_cx(ocn(iidx)) - x2c => component_get_x2c_cx(ocn(iidx)) - o2x_ox_avg(iidx)%rAttr = o2x_ox_avg(iidx)%rAttr + c2x%rAttr - x2o_ox_avg(iidx)%rAttr = x2o_ox_avg(iidx)%rAttr + x2c%rAttr - enddo - endif - if (ice_present .and. histavg_ice) then - do iidx = 1, num_inst_ice - c2x => component_get_c2x_cx(ice(iidx)) - x2c => component_get_x2c_cx(ice(iidx)) - i2x_ix_avg(iidx)%rAttr = i2x_ix_avg(iidx)%rAttr + c2x%rAttr - x2i_ix_avg(iidx)%rAttr = x2i_ix_avg(iidx)%rAttr + x2c%rAttr - enddo - endif - if (glc_present .and. histavg_glc) then - do iidx = 1, num_inst_glc - c2x => component_get_c2x_cx(glc(iidx)) - x2c => component_get_x2c_cx(glc(iidx)) - g2x_gx_avg(iidx)%rAttr = g2x_gx_avg(iidx)%rAttr + c2x%rAttr - x2g_gx_avg(iidx)%rAttr = x2g_gx_avg(iidx)%rAttr + x2c%rAttr - enddo - endif - if (wav_present .and. histavg_wav) then - do iidx = 1, num_inst_wav - c2x => component_get_c2x_cx(wav(iidx)) - x2c => component_get_x2c_cx(wav(iidx)) - w2x_wx_avg(iidx)%rAttr = w2x_wx_avg(iidx)%rAttr + c2x%rAttr - x2w_wx_avg(iidx)%rAttr = x2w_wx_avg(iidx)%rAttr + x2c%rAttr - enddo - endif - if (iac_present .and. histavg_iac) then - do iidx = 1, num_inst_iac - c2x => component_get_c2x_cx(iac(iidx)) - x2c => component_get_x2c_cx(iac(iidx)) - z2x_zx_avg(iidx)%rAttr = z2x_zx_avg(iidx)%rAttr + c2x%rAttr - x2z_zx_avg(iidx)%rAttr = x2z_zx_avg(iidx)%rAttr + x2c%rAttr - enddo - endif - if (ocn_present .and. histavg_xao) then - xao_ox => prep_aoflux_get_xao_ox() - do iidx = 1, num_inst_ocn - xao_ox_avg(iidx)%rAttr = xao_ox_avg(iidx)%rAttr + xao_ox(iidx)%rAttr - enddo - endif - if (atm_present .and. histavg_xao) then - xao_ax => prep_aoflux_get_xao_ax() - do iidx = 1, num_inst_ocn - xao_ax_avg(iidx)%rAttr = xao_ax_avg(iidx)%rAttr + xao_ax(iidx)%rAttr - enddo - endif - - else - - cnt = cnt + 1 - tbnds(2) = curr_time - if (atm_present .and. histavg_atm) then - do iidx = 1, num_inst_atm - c2x => component_get_c2x_cx(atm(iidx)) - x2c => component_get_x2c_cx(atm(iidx)) - a2x_ax_avg(iidx)%rAttr = (a2x_ax_avg(iidx)%rAttr + c2x%rAttr) / (cnt * 1.0_r8) - x2a_ax_avg(iidx)%rAttr = (x2a_ax_avg(iidx)%rAttr + x2c%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (lnd_present .and. histavg_lnd) then - do iidx = 1, num_inst_lnd - c2x => component_get_c2x_cx(lnd(iidx)) - x2c => component_get_x2c_cx(lnd(iidx)) - l2x_lx_avg(iidx)%rAttr = (l2x_lx_avg(iidx)%rAttr + c2x%rAttr) / (cnt * 1.0_r8) - x2l_lx_avg(iidx)%rAttr = (x2l_lx_avg(iidx)%rAttr + x2c%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (rof_present .and. histavg_rof) then - do iidx = 1, num_inst_rof - c2x => component_get_c2x_cx(rof(iidx)) - x2c => component_get_x2c_cx(rof(iidx)) - r2x_rx_avg(iidx)%rAttr = (r2x_rx_avg(iidx)%rAttr + c2x%rAttr) / (cnt * 1.0_r8) - x2r_rx_avg(iidx)%rAttr = (x2r_rx_avg(iidx)%rAttr + x2c%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (ocn_present .and. histavg_ocn) then - do iidx = 1, num_inst_ocn - c2x => component_get_c2x_cx(ocn(iidx)) - x2c => component_get_x2c_cx(ocn(iidx)) - o2x_ox_avg(iidx)%rAttr = (o2x_ox_avg(iidx)%rAttr + c2x%rAttr) / (cnt * 1.0_r8) - x2o_ox_avg(iidx)%rAttr = (x2o_ox_avg(iidx)%rAttr + x2c%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (ice_present .and. histavg_ice) then - do iidx = 1, num_inst_ice - c2x => component_get_c2x_cx(ice(iidx)) - x2c => component_get_x2c_cx(ice(iidx)) - i2x_ix_avg(iidx)%rAttr = (i2x_ix_avg(iidx)%rAttr + c2x%rAttr) / (cnt * 1.0_r8) - x2i_ix_avg(iidx)%rAttr = (x2i_ix_avg(iidx)%rAttr + x2c%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (glc_present .and. histavg_glc) then - do iidx = 1, num_inst_glc - c2x => component_get_c2x_cx(glc(iidx)) - x2c => component_get_x2c_cx(glc(iidx)) - g2x_gx_avg(iidx)%rAttr = (g2x_gx_avg(iidx)%rAttr + c2x%rAttr) / (cnt * 1.0_r8) - x2g_gx_avg(iidx)%rAttr = (x2g_gx_avg(iidx)%rAttr + x2c%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (wav_present .and. histavg_wav) then - do iidx = 1, num_inst_wav - c2x => component_get_c2x_cx(wav(iidx)) - x2c => component_get_x2c_cx(wav(iidx)) - w2x_wx_avg(iidx)%rAttr = (w2x_wx_avg(iidx)%rAttr + c2x%rAttr) / (cnt * 1.0_r8) - x2w_wx_avg(iidx)%rAttr = (x2w_wx_avg(iidx)%rAttr + x2c%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (iac_present .and. histavg_iac) then - do iidx = 1, num_inst_iac - c2x => component_get_c2x_cx(iac(iidx)) - x2c => component_get_x2c_cx(iac(iidx)) - z2x_zx_avg(iidx)%rAttr = (z2x_zx_avg(iidx)%rAttr + c2x%rAttr) / (cnt * 1.0_r8) - x2z_zx_avg(iidx)%rAttr = (x2z_zx_avg(iidx)%rAttr + x2c%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (ocn_present .and. histavg_xao) then - xao_ox => prep_aoflux_get_xao_ox() - do iidx = 1, num_inst_ocn - xao_ox_avg(iidx)%rAttr = (xao_ox_avg(iidx)%rAttr + xao_ox(iidx)%rAttr) / (cnt * 1.0_r8) - enddo - endif - if (atm_present .and. histavg_xao) then - xao_ax => prep_aoflux_get_xao_ax() - do iidx = 1, num_inst_ocn - xao_ax_avg(iidx)%rAttr = (xao_ax_avg(iidx)%rAttr + xao_ax(iidx)%rAttr) / (cnt * 1.0_r8) - enddo - endif - - call seq_infodata_GetData( infodata, case_name=case_name) - call seq_timemgr_EClockGetData( EClock_d, prev_ymd=prev_ymd, prev_tod=prev_tod) - call shr_cal_date2ymd(prev_ymd, yy, mm, dd) - if (seq_timemgr_histavg_type == seq_timemgr_type_nyear) then - call shr_cal_ymdtod2string(date_str, yy) - else if (seq_timemgr_histavg_type == seq_timemgr_type_nmonth) then - call shr_cal_ymdtod2string(date_str, yy, mm) - else if (seq_timemgr_histavg_type == seq_timemgr_type_nday) then - call shr_cal_ymdtod2string(date_str, yy, mm, dd) - else - ! Notice that this uses curr_ymd and curr_tod rather than prev_ymd and prev_tod - call shr_cal_datetod2string(date_str, curr_ymd, curr_tod) - end if - write(hist_file, "(6a)") & - trim(case_name), '.cpl',cpl_inst_tag,'.ha.', trim(date_str), '.nc' - - time_units = 'days since ' & - // trim(seq_io_date2yyyymmdd(start_ymd)) // ' ' // seq_io_sec2hms(start_tod) - - if (iamin_CPLID) then - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - call seq_io_wopen(hist_file, clobber=.true., model_doi_url=model_doi_url) - - ! loop twice, first time write header, second time write data for perf - - do fk = 1, 2 - if (fk == 1) then - whead = .true. - wdata = .false. - elseif (fk == 2) then - whead = .false. - wdata = .true. - call seq_io_enddef(hist_file) - else - call shr_sys_abort('seq_hist_writeavg fk illegal') - end if - - avg_time = 0.5_r8 * (tbnds(1) + tbnds(2)) - !---------- tcx nov 2011 tbnds of same values causes problems in ferret - if (tbnds(1) >= tbnds(2)) then - call seq_io_write(hist_file, & - time_units=time_units, time_cal=calendar, time_val=avg_time, & - whead=whead, wdata=wdata) - else - call seq_io_write(hist_file, & - time_units=time_units, time_cal=calendar, time_val=avg_time, & - whead=whead, wdata=wdata, tbnds=tbnds) - endif - if (atm_present .and. histavg_atm) then - gsmap => component_get_gsmap_cx(atm(1)) - dom => component_get_dom_cx(atm(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, pre='doma') - - call seq_io_write(hist_file, gsmap, x2a_ax_avg, 'x2a_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, & - pre='x2aavg', tavg=.true.) - call seq_io_write(hist_file, gsmap, a2x_ax_avg, 'a2x_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, & - pre='a2xavg', tavg=.true.) - endif - if (lnd_present .and. histavg_lnd) then - gsmap => component_get_gsmap_cx(lnd(1)) - dom => component_get_dom_cx(lnd(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_lx', & - nx=lnd_nx, ny=lnd_ny, nt=1, whead=whead, wdata=wdata, pre='doml') - call seq_io_write(hist_file, gsmap, l2x_lx_avg, 'l2x_lx', & - nx=lnd_nx, ny=lnd_ny, nt=1, whead=whead, wdata=wdata, & - pre='l2xavg', tavg=.true.) - call seq_io_write(hist_file, gsmap, x2l_lx_avg, 'x2l_lx', & - nx=lnd_nx, ny=lnd_ny, nt=1, whead=whead, wdata=wdata, & - pre='x2lavg', tavg=.true.) - endif - - if (rof_present .and. histavg_rof) then - gsmap => component_get_gsmap_cx(rof(1)) - dom => component_get_dom_cx(rof(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_rx', & - nx=rof_nx, ny=rof_ny, nt=1, whead=whead, wdata=wdata, pre='domr') - call seq_io_write(hist_file, gsmap, r2x_rx_avg, 'r2x_rx', & - nx=rof_nx, ny=rof_ny, nt=1, whead=whead, wdata=wdata, & - pre='r2xavg', tavg=.true.) - call seq_io_write(hist_file, gsmap, x2r_rx_avg, 'x2r_rx', & - nx=rof_nx, ny=rof_ny, nt=1, whead=whead, wdata=wdata, & - pre='x2ravg', tavg=.true.) - endif - if (ocn_present .and. histavg_ocn) then - gsmap => component_get_gsmap_cx(ocn(1)) - dom => component_get_dom_cx(ocn(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, pre='domo') - call seq_io_write(hist_file, gsmap, o2x_ox_avg, 'o2x_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, & - pre='o2xavg', tavg=.true.) - call seq_io_write(hist_file, gsmap, x2o_ox_avg, 'x2o_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, & - pre='x2oavg', tavg=.true.) - endif - if (ice_present .and. histavg_ice) then - gsmap => component_get_gsmap_cx(ice(1)) - dom => component_get_dom_cx(ice(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_ix', & - nx=ice_nx, ny=ice_ny, nt=1, whead=whead, wdata=wdata, pre='domi') - call seq_io_write(hist_file, gsmap, i2x_ix_avg, 'i2x_ix', & - nx=ice_nx, ny=ice_ny, nt=1, whead=whead, wdata=wdata, & - pre='i2xavg', tavg=.true.) - call seq_io_write(hist_file, gsmap, x2i_ix_avg, 'x2i_ix', & - nx=ice_nx, ny=ice_ny, nt=1, whead=whead, wdata=wdata, & - pre='x2iavg', tavg=.true.) - endif - if (glc_present .and. histavg_glc) then - gsmap => component_get_gsmap_cx(glc(1)) - dom => component_get_dom_cx(glc(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_gx', & - nx=glc_nx, ny=glc_ny, nt=1, whead=whead, wdata=wdata, pre='domg') - call seq_io_write(hist_file, gsmap, g2x_gx_avg, 'g2x_gx', & - nx=glc_nx, ny=glc_ny, nt=1, whead=whead, wdata=wdata, & - pre='g2xavg', tavg=.true.) - call seq_io_write(hist_file, gsmap, x2g_gx_avg, 'x2g_gx', & - nx=glc_nx, ny=glc_ny, nt=1, whead=whead, wdata=wdata, & - pre='x2gavg', tavg=.true.) - endif - if (wav_present .and. histavg_wav) then - gsmap => component_get_gsmap_cx(wav(1)) - dom => component_get_dom_cx(wav(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_wx', & - nx=wav_nx, ny=wav_ny, nt=1, whead=whead, wdata=wdata, pre='domw') - call seq_io_write(hist_file, gsmap, w2x_wx_avg, 'w2x_wx', & - nx=wav_nx, ny=wav_ny, nt=1, whead=whead, wdata=wdata, & - pre='w2xavg', tavg=.true.) - call seq_io_write(hist_file, gsmap, x2w_wx_avg, 'x2w_wx', & - nx=wav_nx, ny=wav_ny, nt=1, whead=whead, wdata=wdata, & - pre='x2wavg', tavg=.true.) - endif - if (iac_present .and. histavg_iac) then - gsmap => component_get_gsmap_cx(iac(1)) - dom => component_get_dom_cx(iac(1)) - call seq_io_write(hist_file, gsmap, dom%data, 'dom_zx', & - nx=iac_nx, ny=iac_ny, nt=1, whead=whead, wdata=wdata, pre='domw') - call seq_io_write(hist_file, gsmap, z2x_zx_avg, 'z2x_zx', & - nx=iac_nx, ny=iac_ny, nt=1, whead=whead, wdata=wdata, & - pre='z2xavg', tavg=.true.) - call seq_io_write(hist_file, gsmap, x2z_zx_avg, 'x2z_zx', & - nx=iac_nx, ny=iac_ny, nt=1, whead=whead, wdata=wdata, & - pre='x2zavg', tavg=.true.) - endif - if (ocn_present .and. histavg_xao) then - gsmap => component_get_gsmap_cx(ocn(1)) - call seq_io_write(hist_file, gsmap, xao_ox_avg, 'xao_ox', & - nx=ocn_nx, ny=ocn_ny, nt=1, whead=whead, wdata=wdata, & - pre='xaooavg', tavg=.true.) - endif - if (atm_present .and. histavg_xao) then - gsmap => component_get_gsmap_cx(atm(1)) - call seq_io_write(hist_file, gsmap, xao_ax_avg, 'xao_ax', & - nx=atm_nx, ny=atm_ny, nt=1, whead=whead, wdata=wdata, & - pre='xaoaavg', tavg=.true.) - endif - enddo - - call seq_io_close(hist_file) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - if (atm_present .and. histavg_atm) then - do iidx = 1, num_inst_atm - call mct_aVect_zero(a2x_ax_avg(iidx)) - call mct_aVect_zero(x2a_ax_avg(iidx)) - enddo - endif - if (lnd_present .and. histavg_lnd) then - do iidx = 1, num_inst_lnd - call mct_aVect_zero(l2x_lx_avg(iidx)) - call mct_aVect_zero(x2l_lx_avg(iidx)) - enddo - endif - if (rof_present .and. histavg_rof) then - do iidx = 1, num_inst_rof - call mct_aVect_zero(r2x_rx_avg(iidx)) - call mct_aVect_zero(x2r_rx_avg(iidx)) - enddo - endif - if (ocn_present .and. histavg_ocn) then - do iidx = 1, num_inst_ocn - call mct_aVect_zero(o2x_ox_avg(iidx)) - call mct_aVect_zero(x2o_ox_avg(iidx)) - enddo - endif - if (ice_present .and. histavg_ice) then - do iidx = 1, num_inst_ice - call mct_aVect_zero(i2x_ix_avg(iidx)) - call mct_aVect_zero(x2i_ix_avg(iidx)) - enddo - endif - if (glc_present .and. histavg_glc) then - do iidx = 1, num_inst_glc - call mct_aVect_zero(g2x_gx_avg(iidx)) - call mct_aVect_zero(x2g_gx_avg(iidx)) - enddo - endif - if (wav_present .and. histavg_wav) then - do iidx = 1, num_inst_wav - call mct_aVect_zero(w2x_wx_avg(iidx)) - call mct_aVect_zero(x2w_wx_avg(iidx)) - enddo - endif - if (iac_present .and. histavg_iac) then - do iidx = 1, num_inst_wav - call mct_aVect_zero(z2x_zx_avg(iidx)) - call mct_aVect_zero(x2z_zx_avg(iidx)) - enddo - endif - if (ocn_present .and. histavg_xao) then - do iidx = 1, num_inst_xao - call mct_aVect_zero(xao_ox_avg(iidx)) - enddo - endif - if (atm_present .and. histavg_xao) then - do iidx = 1, num_inst_xao - call mct_aVect_zero(xao_ax_avg(iidx)) - enddo - endif - cnt = 0 - tbnds(1) = curr_time - - endif - endif - - end subroutine seq_hist_writeavg - - !=============================================================================== - - subroutine seq_hist_writeaux(infodata, EClock_d, comp, flow, aname, dname, inst_suffix, & - nx, ny, nt, write_now, flds, tbnds1_offset, yr_offset, av_to_write) - - implicit none - - !--- arguments --- - type (seq_infodata_type) , intent(inout) :: infodata - type(ESMF_Clock) , intent(in) :: EClock_d ! driver clock - type(component_type) , intent(in) :: comp ! component instance - character(len=3) , intent(in) :: flow ! 'x2c' or 'c2x' - character(*) , intent(in) :: aname ! avect name for hist file - character(*) , intent(in) :: dname ! domain name for hist file - character(*) , intent(in) :: inst_suffix ! instance number part of file name - integer(IN) , intent(in) :: nx ! 2d global size nx - integer(IN) , intent(in) :: ny ! 2d global size ny - integer(IN) , intent(in) :: nt ! number of time samples per file - logical , optional, intent(in) :: write_now ! write a sample now, if not used, write every call - character(*) , optional, intent(in) :: flds ! list of fields to write - - ! Offset for starting time bound, in fractional days. This should be negative. If - ! tbnds1_offset is provided, then: When it's time to write the file, create the lower - ! time bound as curr_time + tbnds1_offset. - ! - ! If tbnds1_offset is not provided, then the lower bound is either (a) the time from - ! the previous write, or (b) for the first write after restarting the model, the - ! model's prev_time from the first call to seq_hist_writeaux for this file. To achieve - ! accurate time bounds, it is important to provide this argument for (1) files for - ! which we do not call this every time step, but rather only call this when it's time - ! to write (which causes problems for (a)), and/or (2) files that are written - ! infrequently, for which there might be a model restart in the middle of an interval - ! (which causes problems for (b)). - real(r8) , optional, intent(in) :: tbnds1_offset - - ! Offset to apply to current year when generating file name. - ! For example, for a field written once a year, yr_offset=-1 will make it so the file - ! with fields from year 1 has time stamp 0001-01-01 rather than 0002-01-01, which can - ! simplify later reading by a data model. - integer , optional, intent(in) :: yr_offset - - ! If av_to_write is provided, then write fields from this attribute vector. - ! Otherwise, get the attribute vector from 'comp', based on 'flow'. - type(mct_avect), target , optional, intent(in) :: av_to_write - - !--- local --- - type(mct_gGrid), pointer :: dom - type(mct_avect), pointer :: av - type(mct_gsMap), pointer :: gsmap - character(CL) :: case_name ! case name - integer(IN) :: curr_ymd ! Current date YYYYMMDD - integer(IN) :: curr_tod ! Current time-of-day (s) - integer(IN) :: start_ymd ! Starting date YYYYMMDD - integer(IN) :: start_tod ! Starting time-of-day (s) - real(r8) :: curr_time ! Time interval since reference time - real(r8) :: prev_time ! Time interval since reference time - real(r8) :: avg_time ! Average time for time average - integer(IN) :: yy, mm, dd ! year, month, day - integer(IN) :: n, fk, fk1 ! index - character(CL) :: time_units ! units of time variable - character(CL) :: calendar ! calendar type - integer(IN) :: samples_per_file - integer(IN) :: lsize ! local size of an aVect - logical :: first_call - integer(IN) :: found = -10 - logical :: useavg - logical :: use_double ! if true, use double-precision - logical :: lwrite_now - logical :: whead, wdata ! for writing restart/history cdf files - real(r8) :: tbnds(2) - character(len=16) :: date_str - - integer(IN), parameter :: maxout = 20 - integer(IN) , save :: ntout = 0 - character(CS) , save :: tname(maxout) = 'x1y2z3' - integer(IN) , save :: ncnt(maxout) = -10 - character(CL) , save :: hist_file(maxout) ! local path to history filename - type(mct_aVect) , save :: avavg(maxout) ! av accumulator if needed - integer(IN) , save :: avcnt(maxout) = 0 ! accumulator counter - logical , save :: fwrite(maxout) = .true. ! first write - real(r8) , save :: tbnds1(maxout) ! first time_bnds - real(r8) , save :: tbnds2(maxout) ! second time_bnds - - type(mct_aVect) :: avflds ! non-avg av for a subset of fields - - real(r8), parameter :: c0 = 0.0_r8 ! zero - character(CL) :: model_doi_url - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! get required infodata - !---------------------------------------------------------------------------- - iamin_CPLID = seq_comm_iamin(CPLID) - - call seq_comm_getdata(GLOID, & - mpicom=mpicom_GLOID, nthreads=nthreads_GLOID) - call seq_comm_getdata(CPLID, & - mpicom=mpicom_CPLID, nthreads=nthreads_CPLID) - - lwrite_now = .true. - useavg = .false. - if (present(write_now)) then - useavg = .true. - lwrite_now = write_now - endif - - call seq_timemgr_EClockGetData( EClock_d, & - curr_ymd=curr_ymd, & - curr_tod=curr_tod, & - start_ymd=start_ymd, & - start_tod=start_tod, & - curr_time=curr_time, & - prev_time=prev_time, & - calendar=calendar) - - first_call = .true. - do n = 1, ntout - if (trim(tname(n)) == trim(aname)) then - first_call = .false. - found = n - endif - enddo - - if (iamin_CPLID) then - if (present(av_to_write)) then - av => av_to_write - else - if (flow == 'c2x') then - av => component_get_c2x_cx(comp) - else if (flow == 'x2c') then - av => component_get_x2c_cx(comp) - end if - end if - dom => component_get_dom_cx(comp) - gsmap => component_get_gsmap_cx(comp) - end if - - if (first_call) then - ntout = ntout + 1 - if (ntout > maxout) then - write(logunit, *) 'write_history_writeaux maxout exceeded', ntout, maxout - call shr_sys_abort() - endif - tname(ntout) = trim(aname) - ncnt(ntout) = -10 - if (iamin_CPLID .and. useavg) then - lsize = mct_aVect_lsize(av) - call mct_aVect_init(avavg(ntout), av, lsize) - call mct_aVect_zero(avavg(ntout)) - avcnt(ntout) = 0 - endif - tbnds1(ntout) = prev_time - found = ntout - endif - - if (iamin_CPLID) then !>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> - - samples_per_file = nt - - if (useavg) then - if (lwrite_now) then - avcnt(found) = avcnt(found) + 1 - avavg(found)%rAttr = (avavg(found)%rAttr + av%rAttr) / (avcnt(found) * 1.0_r8) - else - avcnt(found) = avcnt(found) + 1 - avavg(found)%rAttr = avavg(found)%rAttr + av%rAttr - endif - endif - - if (lwrite_now) then - - call seq_infodata_getData(infodata, & - drv_threading=drv_threading, & - histaux_double_precision = use_double) - - ncnt(found) = ncnt(found) + 1 - if (ncnt(found) < 1 .or. ncnt(found) > samples_per_file) ncnt(found) = 1 - - time_units = 'days since ' & - // trim(seq_io_date2yyyymmdd(start_ymd)) // ' ' // seq_io_sec2hms(start_tod) - tbnds2(found) = curr_time - - if (ncnt(found) == 1) then - fk1 = 1 - call seq_infodata_GetData( infodata, case_name=case_name) - call shr_cal_date2ymd(curr_ymd, yy, mm, dd) - - if (present(yr_offset)) then - yy = yy + yr_offset - end if - call shr_cal_ymdtod2string(date_str, yy, mm, dd, curr_tod) - write(hist_file(found), "(8a)") & - trim(case_name),'.cpl',trim(inst_suffix),'.h',trim(aname),'.',trim(date_str), '.nc' - else - fk1 = 2 - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (fk1 == 1) then - call seq_io_wopen(hist_file(found), clobber=.true., file_ind=found, model_doi_url=model_doi_url) - endif - - ! loop twice, first time write header, second time write data for perf - - tbnds(2) = tbnds2(found) - if (present(tbnds1_offset)) then - if (tbnds1_offset >= 0) then - call shr_sys_abort('seq_hist_writeaux: Expect negative tbnds1_offset for '// & - trim(aname)) - end if - tbnds(1) = tbnds(2) + tbnds1_offset - else - tbnds(1) = tbnds1(found) - end if - - do fk = fk1, 2 - if (fk == 1) then - whead = .true. - wdata = .false. - elseif (fk == 2) then - whead = .false. - wdata = .true. - else - call shr_sys_abort('seq_hist_writeaux fk illegal') - end if - - if (present(flds)) then - if (fk == fk1) then - lsize = mct_aVect_lsize(av) - call mct_aVect_init(avflds, rList=flds, lsize=lsize) - call mct_aVect_zero(avflds) - end if - end if - - avg_time = 0.5_r8 * (tbnds(1) + tbnds(2)) - !------- tcx nov 2011 tbnds of same values causes problems in ferret - if (tbnds(1) >= tbnds(2)) then - call seq_io_write(hist_file(found), & - time_units=time_units, time_cal=calendar, time_val=avg_time, & - nt=ncnt(found), whead=whead, wdata=wdata, file_ind=found) - else - call seq_io_write(hist_file(found), & - time_units=time_units, time_cal=calendar, time_val=avg_time, & - nt=ncnt(found), whead=whead, wdata=wdata, tbnds=tbnds, file_ind=found) - endif - - if (fwrite(found)) then - call seq_io_write(hist_file(found), gsmap, dom%data, trim(dname), & - nx=nx, ny=ny, whead=whead, wdata=wdata, fillval=c0, pre=trim(dname), file_ind=found) - endif - - if (useavg) then - if (present(flds)) then - call mct_aVect_copy(aVin=avavg(found), aVout=avflds) - call seq_io_write(hist_file(found), gsmap, avflds, trim(aname), & - nx=nx, ny=ny, nt=ncnt(found), whead=whead, wdata=wdata, & - pre=trim(aname), tavg=.true., use_float=(.not. use_double), & - file_ind=found) - else - call seq_io_write(hist_file(found), gsmap, avavg(found), trim(aname), & - nx=nx, ny=ny, nt=ncnt(found), whead=whead, wdata=wdata, & - pre=trim(aname), tavg=.true., use_float=(.not. use_double), & - file_ind=found) - end if - else if (present(flds)) then - call mct_aVect_copy(aVin=av, aVout=avflds) - call seq_io_write(hist_file(found), gsmap, avflds, trim(aname), & - nx=nx, ny=ny, nt=ncnt(found), whead=whead, wdata=wdata, pre=trim(aname), & - use_float=(.not. use_double), file_ind=found) - else - call seq_io_write(hist_file(found), gsmap, av, trim(aname), & - nx=nx, ny=ny, nt=ncnt(found), whead=whead, wdata=wdata, pre=trim(aname), & - use_float=(.not. use_double), file_ind=found) - endif - - if (present(flds)) then - if (fk == 2) then - call mct_aVect_clean(avflds) - end if - end if - - if (fk == 1) then - call seq_io_enddef(hist_file(found), file_ind=found) - end if - - if (fk == 2) then - fwrite(found) = .false. - if (useavg) then - call mct_aVect_zero(avavg(found)) - avcnt(found) = 0 - endif - tbnds1(found) = curr_time - endif - - enddo ! fk=1,2 - - if (ncnt(found) == nt) then - call seq_io_close(hist_file(found), file_ind=found) - end if - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - endif ! lwrite_now - - endif ! iamin_CPLID <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - - end subroutine seq_hist_writeaux - - !=============================================================================== - - subroutine seq_hist_spewav(infodata, aname, inst_suffix, gsmap, av, nx, ny, nt, write_now, flds) - - implicit none - - type(seq_infodata_type) , intent(in) :: infodata - character(*) , intent(in) :: aname ! avect name for hist file - character(*) , intent(in) :: inst_suffix ! instance number part of file name - type(mct_gsmap) , intent(in) :: gsmap ! gsmap - type(mct_aVect) , intent(in) :: av ! avect - integer(IN) , intent(in) :: nx ! 2d global size nx - integer(IN) , intent(in) :: ny ! 2d global size ny - integer(IN) , intent(in) :: nt ! number of time samples per file - logical , intent(in), optional :: write_now ! write a sample now, if not used, write every call - character(*) , intent(in), optional :: flds ! list of fields to write - - !--- local --- - character(CL) :: case_name ! case name - integer(IN) :: n,fk,fk1 ! index - integer(IN) :: samples_per_file - integer(IN) :: lsize ! local size of an aVect - logical :: first_call - integer(IN) :: found = -10 - logical :: useavg - logical :: lwrite_now - logical :: whead,wdata ! for writing restart/history cdf files - real(r8) :: tbnds(2) - - integer(IN),parameter :: maxout = 20 - integer(IN) ,save :: ntout = 0 - character(CS) ,save :: tname(maxout) = 'x1y2z3' - integer(IN) ,save :: ncnt(maxout) = -10 - integer(IN) ,save :: nfiles(maxout) = 0 - character(CL) ,save :: hist_file(maxout) ! local path to history filename - type(mct_aVect) ,save :: avavg(maxout) ! av accumulator if needed - integer(IN) ,save :: avcnt(maxout) = 0 ! accumulator counter - logical ,save :: fwrite(maxout) = .true. ! first write - - type(mct_aVect) :: avflds ! non-avg av for a subset of fields - - real(r8),parameter :: c0 = 0.0_r8 ! zero - character(CL) :: model_doi_url - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! get required infodata - !---------------------------------------------------------------------------- - iamin_CPLID = seq_comm_iamin(CPLID) - - call seq_comm_getdata(GLOID, mpicom=mpicom_GLOID, nthreads=nthreads_GLOID) - call seq_comm_getdata(CPLID, mpicom=mpicom_CPLID, nthreads=nthreads_CPLID) - - call seq_infodata_getData(infodata, & - drv_threading=drv_threading, & - model_doi_url=model_doi_url) - - lwrite_now = .true. - useavg = .false. - if (present(write_now)) then - useavg = .true. - lwrite_now = write_now - endif - - first_call = .true. - do n = 1, ntout - if (trim(tname(n)) == trim(aname)) then - first_call = .false. - found = n - endif - enddo - - if (first_call) then - ntout = ntout + 1 - if (ntout > maxout) then - write(logunit, *) 'write_history_spewAV maxout exceeded', ntout, maxout - call shr_sys_abort() - endif - tname(ntout) = trim(aname) - ncnt(ntout) = -10 - nfiles(ntout) = 0 - if (iamin_CPLID .and. useavg) then - lsize = mct_aVect_lsize(av) - call mct_aVect_init(avavg(ntout), av, lsize) - call mct_aVect_zero(avavg(ntout)) - avcnt(ntout) = 0 - endif - found = ntout - endif - - ! if (.not. iamin_CPLID) return - if (iamin_CPLID) then !>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> - - samples_per_file = nt - - if (useavg) then - if (lwrite_now) then - avcnt(found) = avcnt(found) + 1 - avavg(found)%rAttr = (avavg(found)%rAttr + av%rAttr) / (avcnt(found) * 1.0_r8) - else - avcnt(found) = avcnt(found) + 1 - avavg(found)%rAttr = avavg(found)%rAttr + av%rAttr - endif - endif - - if (lwrite_now) then - - ncnt(found) = ncnt(found) + 1 - if (ncnt(found) < 1 .or. ncnt(found) > samples_per_file) then - ncnt(found) = 1 - nfiles(found) = nfiles(found) + 1 - endif - - if (ncnt(found) == 1) then - fk1 = 1 - call seq_infodata_GetData( infodata, case_name=case_name) - write(hist_file(found), "(a, i4.4, a)") & - trim(case_name)//'.cpl'//trim(inst_suffix)//'.h'//trim(aname)//'.', nfiles(found), '.nc' - else - fk1 = 2 - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (fk1 == 1) then - call seq_io_wopen(hist_file(found), clobber=.true. , model_doi_url=model_doi_url) - else - call seq_io_wopen(hist_file(found), clobber=.false., model_doi_url=model_doi_url) - endif - - ! loop twice, first time write header, second time write data for perf - - do fk = fk1, 2 - if (fk == 1) then - whead = .true. - wdata = .false. - elseif (fk == 2) then - whead = .false. - wdata = .true. - else - call shr_sys_abort('seq_hist_spewav fk illegal') - end if - - if (present(flds)) then - if (fk == fk1) then - lsize = mct_aVect_lsize(av) - call mct_aVect_init(avflds, rList=flds, lsize=lsize) - call mct_aVect_zero(avflds) - end if - end if - - tbnds = real(ncnt(found), r8) - !------- tcx nov 2011 tbnds of same values causes problems in ferret - if (tbnds(1) >= tbnds(2)) then - call seq_io_write(hist_file(found), & - time_units='nstep', time_cal='nstep', time_val=real(ncnt(found), r8), & - nt=ncnt(found), whead=whead, wdata=wdata) - else - call seq_io_write(hist_file(found), & - time_units='nstep', time_cal='nstep', time_val=real(ncnt(found), r8), & - nt=ncnt(found), whead=whead, wdata=wdata, tbnds=tbnds) - endif - - if (useavg) then - if (present(flds)) then - call mct_aVect_copy(aVin=avavg(found), aVout=avflds) - call seq_io_write(hist_file(found), gsmap, avflds, trim(aname), & - nx=nx, ny=ny, nt=ncnt(found), whead=whead, wdata=wdata, & - pre=trim(aname), tavg=.true., use_float=.true.) - else - call seq_io_write(hist_file(found), gsmap, avavg(found), trim(aname), & - nx=nx, ny=ny, nt=ncnt(found), whead=whead, wdata=wdata, & - pre=trim(aname), tavg=.true., use_float=.true.) - end if - else if (present(flds)) then - call mct_aVect_copy(aVin=av, aVout=avflds) - call seq_io_write(hist_file(found), gsmap, avflds, trim(aname), & - nx=nx, ny=ny, nt=ncnt(found), whead=whead, wdata=wdata, pre=trim(aname), & - use_float=.true.) - else - call seq_io_write(hist_file(found), gsmap, av, trim(aname), & - nx=nx, ny=ny, nt=ncnt(found), whead=whead, wdata=wdata, pre=trim(aname), & - use_float=.true.) - endif - - if (present(flds)) then - if (fk == 2) then - call mct_aVect_clean(avflds) - end if - end if - - if (fk == 1) call seq_io_enddef(hist_file(found)) - if (fk == 2) then - fwrite(found) = .false. - if (useavg) then - call mct_aVect_zero(avavg(found)) - avcnt(found) = 0 - endif - endif - enddo - - call seq_io_close(hist_file(found)) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - endif ! lwrite_now - - endif ! iamin_CPLID <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - - end subroutine seq_hist_spewav - - !=============================================================================== - -end module seq_hist_mod diff --git a/src/drivers/mct/main/seq_io_mod.F90 b/src/drivers/mct/main/seq_io_mod.F90 deleted file mode 100644 index 40d62278f6d..00000000000 --- a/src/drivers/mct/main/seq_io_mod.F90 +++ /dev/null @@ -1,2253 +0,0 @@ -! !MODULE: seq_io_mod -- reads and writes driver files -! -! !DESCRIPTION: -! Writes attribute vectors to netcdf -! -! !REMARKS: -! -! !REVISION HISTORY: -! 2007-Oct-26 - T. Craig first version -! 2007-Dec-06 - T. Craig update and improve -! 2008-Feb-16 - J. Edwards convert to PIO -! 2010-Nov - J. Edwards move PIO init and namelists from components to driver -! Current Problems -! - the original use of seq_io will now ONLY work with the cpl because -! of hardwiring cpl_io_type and cpl_io_iosystem. want the original -! io capabilities to be usable by any component -! - the init1 method depends on seq_comm for name consistency but seq_comm_init -! wants to be called after init1 so the global_comm can be modified for -! async IO. this needs to be reconciled. -! - this routine stores information for all components but most methods are -! hardwired to work only for the coupler. should all the components info -! be stored here or should this be more a general set of methods that are -! reusable as it's original intent. -! -! !INTERFACE: ------------------------------------------------------------------ - -module seq_io_mod - - ! !USES: - - use shr_kind_mod, only: r4 => shr_kind_r4, r8 => shr_kind_r8, in => shr_kind_in - use shr_kind_mod, only: cl => shr_kind_cl, cs => shr_kind_cs - use shr_sys_mod, only: shr_sys_abort - use seq_comm_mct, only: logunit, CPLID, seq_comm_setptrs - use seq_comm_mct, only: seq_comm_namelen, seq_comm_name - use seq_flds_mod, only : seq_flds_lookup - use mct_mod ! mct wrappers - use pio - use component_type_mod - use seq_infodata_mod, only: seq_infodata_type - - implicit none - private - - ! !PUBLIC TYPES: - - ! none - - ! !PUBLIC MEMBER FUNCTIONS: - - public seq_io_wopen - public seq_io_close - public seq_io_redef - public seq_io_enddef - public seq_io_date2yyyymmdd - public seq_io_sec2hms - public seq_io_read - public seq_io_write - public seq_io_cpl_init - ! !PUBLIC DATA MEMBERS - - - ! none - - !EOP - - interface seq_io_read - module procedure seq_io_read_av - module procedure seq_io_read_avs - module procedure seq_io_read_avscomp - module procedure seq_io_read_int - module procedure seq_io_read_int1d - module procedure seq_io_read_r8 - module procedure seq_io_read_r81d - module procedure seq_io_read_char - end interface seq_io_read - interface seq_io_write - module procedure seq_io_write_av - module procedure seq_io_write_avs - module procedure seq_io_write_avscomp - module procedure seq_io_write_int - module procedure seq_io_write_int1d - module procedure seq_io_write_r8 - module procedure seq_io_write_r81d - module procedure seq_io_write_char - module procedure seq_io_write_time - end interface seq_io_write - - !------------------------------------------------------------------------------- - ! Local data - !------------------------------------------------------------------------------- - - character(*),parameter :: prefix = "seq_io_" - real(r8) ,parameter :: fillvalue = SHR_CONST_SPVAL - character(*),parameter :: modName = "(seq_io_mod) " - integer(in) ,parameter :: debug = 1 ! internal debug level - character(*),parameter :: version ='cpl7v10' - character(*),parameter :: version0='cpl7v00' - integer(in), parameter :: file_desc_t_cnt = 20 ! Note - this is hard-wired for now - - character(CL) :: wfilename = '' - type(file_desc_t), save :: cpl_io_file(0:file_desc_t_cnt) - integer(IN) :: cpl_pio_iotype - integer(IN) :: cpl_pio_ioformat - type(iosystem_desc_t), pointer :: cpl_io_subsystem - - character(CL) :: charvar ! buffer for string read/write - - !================================================================================= -contains - !================================================================================= - - subroutine seq_io_cpl_init() - use shr_pio_mod, only: shr_pio_getiosys, shr_pio_getiotype, shr_pio_getioformat - - cpl_io_subsystem=>shr_pio_getiosys(CPLID) - cpl_pio_iotype = shr_pio_getiotype(CPLID) - cpl_pio_ioformat = shr_pio_getioformat(CPLID) - - end subroutine seq_io_cpl_init - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_wopen - open netcdf file - ! - ! !DESCRIPTION: - ! open netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_wopen(filename,clobber,file_ind, model_doi_url) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(*),intent(in) :: filename - logical,optional,intent(in):: clobber - integer,optional,intent(in):: file_ind - character(CL), optional, intent(in) :: model_doi_url - - !EOP - - logical :: exists - logical :: lclobber - integer :: iam,mpicom - integer :: rcode - integer :: nmode - integer :: lfile_ind - character(CL) :: lversion - character(CL) :: lmodel_doi_url - character(*),parameter :: subName = '(seq_io_wopen) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lversion=trim(version0) - - lclobber = .false. - if (present(clobber)) lclobber=clobber - - lmodel_doi_url = 'unset' - if (present(model_doi_url)) lmodel_doi_url = model_doi_url - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID, iam=iam, mpicom=mpicom) - - if (.not. pio_file_is_open(cpl_io_file(lfile_ind))) then - ! filename not open - if (iam==0) inquire(file=trim(filename),exist=exists) - call shr_mpi_bcast(exists,mpicom,'seq_io_wopen exists') - if (exists) then - if (lclobber) then - nmode = pio_clobber - - ! only applies to classic NETCDF files. - if(cpl_pio_iotype == PIO_IOTYPE_NETCDF .or. & - cpl_pio_iotype == PIO_IOTYPE_PNETCDF) then - nmode = ior(nmode,cpl_pio_ioformat) - endif - - rcode = pio_createfile(cpl_io_subsystem, cpl_io_file(lfile_ind), cpl_pio_iotype, trim(filename), nmode) - if(iam==0) write(logunit,*) subname,' create file ',trim(filename) - rcode = pio_put_att(cpl_io_file(lfile_ind),pio_global,"file_version",version) - rcode = pio_put_att(cpl_io_file(lfile_ind),pio_global,"model_doi_url",lmodel_doi_url) - else - - rcode = pio_openfile(cpl_io_subsystem, cpl_io_file(lfile_ind), cpl_pio_iotype, trim(filename), pio_write) - if(iam==0) write(logunit,*) subname,' open file ',trim(filename) - call pio_seterrorhandling(cpl_io_file(lfile_ind),PIO_BCAST_ERROR) - rcode = pio_get_att(cpl_io_file(lfile_ind),pio_global,"file_version",lversion) - call pio_seterrorhandling(cpl_io_file(lfile_ind),PIO_INTERNAL_ERROR) - if (trim(lversion) /= trim(version)) then - rcode = pio_redef(cpl_io_file(lfile_ind)) - rcode = pio_put_att(cpl_io_file(lfile_ind),pio_global,"file_version",version) - rcode = pio_enddef(cpl_io_file(lfile_ind)) - endif - - endif - else - nmode = pio_noclobber - ! only applies to classic NETCDF files. - if(cpl_pio_iotype == PIO_IOTYPE_NETCDF .or. & - cpl_pio_iotype == PIO_IOTYPE_PNETCDF) then - nmode = ior(nmode,cpl_pio_ioformat) - endif - rcode = pio_createfile(cpl_io_subsystem, cpl_io_file(lfile_ind), cpl_pio_iotype, trim(filename), nmode) - if(iam==0) write(logunit,*) subname,' create file ',trim(filename) - rcode = pio_put_att(cpl_io_file(lfile_ind),pio_global,"file_version",version) - rcode = pio_put_att(cpl_io_file(lfile_ind),pio_global,"model_doi_url",lmodel_doi_url) - endif - elseif (trim(wfilename) /= trim(filename)) then - ! filename is open, better match open filename - if(iam==0) write(logunit,*) subname,' different file currently open ',trim(filename) - call shr_sys_abort(subname//'different file currently open '//trim(filename)) - else - ! filename is already open, just return - endif - - end subroutine seq_io_wopen - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_close - close netcdf file - ! - ! !DESCRIPTION: - ! close netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_close(filename,file_ind) - - use pio, only : pio_closefile - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(*),intent(in) :: filename - integer,optional,intent(in):: file_ind - - !EOP - - integer :: iam - integer :: lfile_ind - character(*),parameter :: subName = '(seq_io_close) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - if (.not. pio_file_is_open(cpl_io_file(lfile_ind))) then - ! filename not open, just return - elseif (trim(wfilename) /= trim(filename)) then - ! filename matches, close it - call pio_closefile(cpl_io_file(lfile_ind)) - else - ! different filename is open, abort - if(iam==0) write(logunit,*) subname,' different file currently open, aborting ',trim(filename) - call shr_sys_abort(subname//'different file currently open, aborting '//trim(filename)) - endif - - wfilename = '' - - end subroutine seq_io_close - - !=============================================================================== - - subroutine seq_io_redef(filename,file_ind) - character(len=*), intent(in) :: filename - - integer,optional,intent(in):: file_ind - integer :: lfile_ind - integer :: rcode - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - rcode = pio_redef(cpl_io_file(lfile_ind)) - end subroutine seq_io_redef - - !=============================================================================== - - subroutine seq_io_enddef(filename,file_ind) - character(len=*), intent(in) :: filename - integer,optional,intent(in):: file_ind - integer :: lfile_ind - integer :: rcode - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - rcode = pio_enddef(cpl_io_file(lfile_ind)) - end subroutine seq_io_enddef - - !=============================================================================== - - character(len=24) function seq_io_date2yyyymmdd (date) - - use shr_cal_mod, only : shr_cal_datetod2string - - ! Input arguments - - integer, intent(in) :: date ! date expressed as an integer: yyyymmdd - - !------------------------------------------------------------------------------- - - call shr_cal_datetod2string(date_str = seq_io_date2yyyymmdd, ymd = date) - - end function seq_io_date2yyyymmdd - - !=============================================================================== - - character(len=8) function seq_io_sec2hms (seconds) - - ! Input arguments - - integer, intent(in) :: seconds - - ! Local workspace - - integer :: hours ! hours of hh:mm:ss - integer :: minutes ! minutes of hh:mm:ss - integer :: secs ! seconds of hh:mm:ss - - !------------------------------------------------------------------------------- - - if (seconds < 0 .or. seconds > 86400) then - write(logunit,*)'seq_io_sec2hms: bad input seconds:', seconds - call shr_sys_abort('seq_io_sec2hms: bad input seconds') - end if - - hours = seconds / 3600 - minutes = (seconds - hours*3600) / 60 - secs = (seconds - hours*3600 - minutes*60) - - if (minutes < 0 .or. minutes > 60) then - write(logunit,*)'seq_io_sec2hms: bad minutes = ',minutes - call shr_sys_abort('seq_io_sec2hms: bad minutes') - end if - - if (secs < 0 .or. secs > 60) then - write(logunit,*)'seq_io_sec2hms: bad secs = ',secs - call shr_sys_abort('seq_io_sec2hms: bad secs') - end if - - write(seq_io_sec2hms,80) hours, minutes, secs -80 format(i2.2,':',i2.2,':',i2.2) - - end function seq_io_sec2hms - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_av - write AV to netcdf file - ! - ! !DESCRIPTION: - ! Write AV to netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_av(filename,gsmap,AV,dname,whead,wdata,nx,ny,nt,fillval,pre,tavg,& - use_float, file_ind, scolumn) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(mct_gsMap), intent(in) :: gsmap - type(mct_aVect) ,intent(in) :: AV ! data to be written - character(len=*),intent(in) :: dname ! name of data - logical,optional,intent(in) :: whead ! write header - logical,optional,intent(in) :: wdata ! write data - integer(in),optional,intent(in) :: nx ! 2d grid size if available - integer(in),optional,intent(in) :: ny ! 2d grid size if available - integer(in),optional,intent(in) :: nt ! time sample - real(r8),optional,intent(in) :: fillval ! fill value - character(len=*),optional,intent(in) :: pre ! prefix to variable name - logical,optional,intent(in) :: tavg ! is this a tavg - logical,optional,intent(in) :: use_float ! write output as float rather than double - integer,optional,intent(in) :: file_ind - logical,optional,intent(in) :: scolumn ! single column model flag - - !EOP - - integer(in) :: rcode - integer(in) :: iam - integer(in) :: nf,ns,ng - integer(in) :: k - integer(in),target :: dimid2(2) - integer(in),target :: dimid3(3) - integer(in),pointer :: dimid(:) - type(var_desc_t) :: varid - type(io_desc_t) :: iodesc - integer(kind=Pio_Offset_Kind) :: frame - type(mct_string) :: mstring ! mct char type - character(CL) :: itemc ! string converted to char - character(CL) :: name1 ! var name - character(CL) :: cunit ! var units - character(CL) :: lname ! long name - character(CL) :: sname ! standard name - character(CL) :: lpre ! local prefix - logical :: lwhead, lwdata - logical :: luse_float - integer(in) :: lnx,lny - real(r8) :: lfillvalue - character(*),parameter :: subName = '(seq_io_write_av) ' - integer, pointer :: Dof(:) - integer :: lfile_ind - logical :: lcolumn - - real(r8), allocatable :: tmpdata(:) - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lfillvalue = fillvalue - if (present(fillval)) then - lfillvalue = fillval - endif - - lpre = trim(dname) - if (present(pre)) then - lpre = trim(pre) - endif - - lwhead = .true. - lwdata = .true. - lcolumn = .false. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - if (present(scolumn)) lcolumn = scolumn - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - luse_float = .false. - if (present(use_float)) luse_float = use_float - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - ng = mct_gsmap_gsize(gsmap) - lnx = ng - lny = 1 - - nf = mct_aVect_nRattr(AV) - if (nf < 1) then - write(logunit,*) subname,' ERROR: nf = ',nf,trim(dname) - call shr_sys_abort(subname//'nf error') - endif - frame = -1 - if (present(nt)) then - frame = nt - endif - if (present(nx)) then - if (nx /= 0) lnx = nx - endif - if (present(ny)) then - if (ny /= 0) lny = ny - endif - if (lnx*lny /= ng .and. .not. lcolumn) then - if(iam==0) write(logunit,*) subname,' ERROR: grid2d size not consistent ',ng,lnx,lny,trim(dname) - call shr_sys_abort(subname//'ERROR: grid2d size not consistent ') - endif - - if (lwhead) then - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(lpre)//'_nx',lnx,dimid2(1)) - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(lpre)//'_ny',lny,dimid2(2)) - - if (present(nt)) then - dimid3(1:2) = dimid2 - rcode = pio_inq_dimid(cpl_io_file(lfile_ind),'time',dimid3(3)) - dimid => dimid3 - else - dimid => dimid2 - endif - - do k = 1,nf - call mct_aVect_getRList(mstring,k,AV) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - !-------tcraig, this is a temporary mod to NOT write hgt - if (trim(itemc) /= "hgt") then - name1 = trim(lpre)//'_'//trim(itemc) - call seq_flds_lookup(itemc,longname=lname,stdname=sname,units=cunit) - if (luse_float) then - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(name1),PIO_REAL,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"_FillValue",real(lfillvalue,r4)) - else - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(name1),PIO_DOUBLE,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"_FillValue",lfillvalue) - end if - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"units",trim(cunit)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"long_name",trim(lname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"standard_name",trim(sname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"internal_dname",trim(dname)) - if (present(tavg)) then - if (tavg) then - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"cell_methods","time: mean") - endif - endif - !-------tcraig - endif - enddo - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - end if - - if (lwdata) then - call mct_gsmap_OrderedPoints(gsmap, iam, Dof) - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny/), dof, iodesc) - ns = size(dof) - deallocate(dof) - allocate(tmpdata(ns)) - do k = 1,nf - call mct_aVect_getRList(mstring,k,AV) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - !-------tcraig, this is a temporary mod to NOT write hgt - if (trim(itemc) /= "hgt") then - name1 = trim(lpre)//'_'//trim(itemc) - rcode = pio_inq_varid(cpl_io_file(lfile_ind),trim(name1),varid) - call pio_setframe(cpl_io_file(lfile_ind),varid,frame) - tmpdata = av%rattr(k,:) - call pio_write_darray(cpl_io_file(lfile_ind), varid, iodesc, tmpdata, rcode, fillval=lfillvalue) - !-------tcraig - endif - enddo - deallocate(tmpdata) - call pio_freedecomp(cpl_io_file(lfile_ind), iodesc) - - end if - end subroutine seq_io_write_av - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_avs - write AVS to netcdf file - ! - ! !DESCRIPTION: - ! Write AV to netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_avs(filename,gsmap,AVS,dname,whead,wdata,nx,ny,nt,fillval,pre,tavg,& - use_float,file_ind,scolumn) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(mct_gsMap), intent(in) :: gsmap - type(mct_aVect) ,intent(in) :: AVS(:) ! data to be written - character(len=*),intent(in) :: dname ! name of data - logical,optional,intent(in) :: whead ! write header - logical,optional,intent(in) :: wdata ! write data - integer(in),optional,intent(in) :: nx ! 2d grid size if available - integer(in),optional,intent(in) :: ny ! 2d grid size if available - integer(in),optional,intent(in) :: nt ! time sample - real(r8),optional,intent(in) :: fillval ! fill value - character(len=*),optional,intent(in) :: pre ! prefix to variable name - logical,optional,intent(in) :: tavg ! is this a tavg - logical,optional,intent(in) :: use_float ! write output as float rather than double - integer,optional,intent(in) :: file_ind - logical,optional,intent(in) :: scolumn ! single column model flag - - !EOP - - integer(in) :: rcode - integer(in) :: iam - integer(in) :: nf,ns,ng,ni - integer(in) :: k,n,k1 - integer(in),target :: dimid2(2) - integer(in),target :: dimid3(3) - integer(in),target :: dimid4(4) - integer(in),pointer :: dimid(:) - type(var_desc_t) :: varid - type(io_desc_t) :: iodesc - integer(kind=Pio_Offset_Kind) :: frame - type(mct_string) :: mstring ! mct char type - character(CL) :: itemc ! string converted to char - character(CL) :: name1 ! var name - character(CL) :: cunit ! var units - character(CL) :: lname ! long name - character(CL) :: sname ! standard name - character(CL) :: lpre ! local prefix - logical :: lwhead, lwdata - logical :: luse_float - integer(in) :: lnx,lny - real(r8) :: lfillvalue - real(r8), allocatable :: data(:) - character(*),parameter :: subName = '(seq_io_write_avs) ' - integer, pointer :: Dof(:) - integer, pointer :: Dofn(:) - integer :: lfile_ind - logical :: lcolumn - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lfillvalue = fillvalue - if (present(fillval)) then - lfillvalue = fillval - endif - - lpre = trim(dname) - if (present(pre)) then - lpre = trim(pre) - endif - - lwhead = .true. - lwdata = .true. - lcolumn = .false. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - if (present(scolumn)) lcolumn = scolumn - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - luse_float = .false. - if (present(use_float)) luse_float = use_float - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - ni = size(AVS) - - ns = mct_aVect_lsize(AVS(1)) - ng = mct_gsmap_gsize(gsmap) - lnx = ng - lny = 1 - - nf = mct_aVect_nRattr(AVS(1)) - if (nf < 1) then - write(logunit,*) subname,' ERROR: nf = ',nf,trim(dname) - call shr_sys_abort(subname//'nf error') - endif - frame = -1 - if (present(nt)) then - frame = nt - endif - - if (present(nx)) then - if (nx /= 0) lnx = nx - endif - if (present(ny)) then - if (ny /= 0) lny = ny - endif - if (lnx*lny /= ng .and. .not. lcolumn) then - if(iam==0) write(logunit,*) subname,' ERROR: grid2d size not consistent ',ng,lnx,lny,trim(dname) - call shr_sys_abort(subname//' ERROR: grid2d size not consistent ') - endif - - if (lwhead) then - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(lpre)//'_nx',lnx,dimid2(1)) - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(lpre)//'_ny',lny,dimid2(2)) - - if (ni > 1) then - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(lpre)//'_ni',ni,dimid3(3)) - if (present(nt)) then - dimid4(1:2) = dimid2 - dimid4(3) = dimid3(3) - rcode = pio_inq_dimid(cpl_io_file(lfile_ind),'time',dimid4(4)) - dimid => dimid4 - else - dimid3(1:2) = dimid2 - dimid => dimid3 - endif - else - if (present(nt)) then - dimid3(1:2) = dimid2 - rcode = pio_inq_dimid(cpl_io_file(lfile_ind),'time',dimid3(3)) - dimid => dimid3 - else - dimid => dimid2 - endif - endif - - do k = 1,nf - call mct_aVect_getRList(mstring,k,AVS(1)) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - !-------tcraig, this is a temporary mod to NOT write hgt - if (trim(itemc) /= "hgt") then - name1 = trim(lpre)//'_'//trim(itemc) - call seq_flds_lookup(itemc,longname=lname,stdname=sname,units=cunit) - if (luse_float) then - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(name1),PIO_REAL,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"_FillValue",real(lfillvalue,r4)) - else - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(name1),PIO_DOUBLE,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"_FillValue",lfillvalue) - end if - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"units",trim(cunit)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"long_name",trim(lname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"standard_name",trim(sname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"internal_dname",trim(dname)) - if (present(tavg)) then - if (tavg) then - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"cell_methods","time: mean") - endif - endif - !-------tcraig - endif - enddo - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - end if - - if (lwdata) then - allocate(data(ns*ni)) - ! note: size of dof is ns - call mct_gsmap_OrderedPoints(gsmap, iam, Dof) - if (ni > 1) then - allocate(dofn(ns*ni)) - n = 0 - do k1 = 1,ni - dofn(n+1:n+ns) = (k1-1)*ng + dof(:) - n = n + ns - enddo - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny,ni/), dofn, iodesc) - deallocate(dofn) - else - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny/), dof, iodesc) - endif - deallocate(dof) - - do k = 1,nf - call mct_aVect_getRList(mstring,k,AVS(1)) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - !-------tcraig, this is a temporary mod to NOT write hgt - if (trim(itemc) /= "hgt") then - name1 = trim(lpre)//'_'//trim(itemc) - rcode = pio_inq_varid(cpl_io_file(lfile_ind),trim(name1),varid) - call pio_setframe(cpl_io_file(lfile_ind),varid,frame) - n = 0 - do k1 = 1,ni - data(n+1:n+ns) = AVS(k1)%rAttr(k,:) - n = n + ns - enddo - call pio_write_darray(cpl_io_file(lfile_ind), varid, iodesc, data, rcode, fillval=lfillvalue) - call pio_setdebuglevel(0) - !-------tcraig - endif - enddo - - deallocate(data) - call pio_freedecomp(cpl_io_file(lfile_ind), iodesc) - - end if - end subroutine seq_io_write_avs - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_avs - write AVS to netcdf file - ! - ! !DESCRIPTION: - ! Write AV to netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_avscomp(filename, comp, flow, dname, & - whead, wdata, nx, ny, nt, fillval, pre, tavg, use_float, file_ind, scolumn) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*) ,intent(in) :: filename ! file - type(component_type) ,intent(in) :: comp(:) ! data to be written - character(len=3) ,intent(in) :: flow ! 'c2x' or 'x2c' - character(len=*) ,intent(in) :: dname ! name of data - logical ,optional,intent(in) :: whead ! write header - logical ,optional,intent(in) :: wdata ! write data - integer(in) ,optional,intent(in) :: nx ! 2d grid size if available - integer(in) ,optional,intent(in) :: ny ! 2d grid size if available - integer(in) ,optional,intent(in) :: nt ! time sample - real(r8) ,optional,intent(in) :: fillval ! fill value - character(len=*) ,optional,intent(in) :: pre ! prefix to variable name - logical ,optional,intent(in) :: tavg ! is this a tavg - logical ,optional,intent(in) :: use_float ! write output as float rather than double - integer ,optional,intent(in) :: file_ind - logical ,optional,intent(in) :: scolumn ! single column model flag - - !EOP - - type(mct_gsMap), pointer :: gsmap ! global seg map on coupler processes - type(mct_avect), pointer :: avcomp1 - type(mct_avect), pointer :: avcomp - integer(in) :: rcode - integer(in) :: iam - integer(in) :: nf,ns,ng,ni - integer(in) :: k,n,k1,k2 - integer(in),target :: dimid2(2) - integer(in),target :: dimid3(3) - integer(in),target :: dimid4(4) - integer(in),pointer :: dimid(:) - type(var_desc_t) :: varid - type(io_desc_t) :: iodesc - integer(kind=Pio_Offset_Kind) :: frame - type(mct_string) :: mstring ! mct char type - character(CL) :: itemc ! string converted to char - character(CL) :: name1 ! var name - character(CL) :: cunit ! var units - character(CL) :: lname ! long name - character(CL) :: sname ! standard name - character(CL) :: lpre ! local prefix - logical :: lwhead, lwdata - logical :: luse_float - integer(in) :: lnx,lny - real(r8) :: lfillvalue - real(r8), allocatable :: data(:) - character(*),parameter :: subName = '(seq_io_write_avs) ' - integer, pointer :: Dof(:) - integer, pointer :: Dofn(:) - integer :: lfile_ind - logical :: lcolumn - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lfillvalue = fillvalue - if (present(fillval)) then - lfillvalue = fillval - endif - - lpre = trim(dname) - if (present(pre)) then - lpre = trim(pre) - endif - - lwhead = .true. - lwdata = .true. - lcolumn = .false. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - if (present(scolumn)) lcolumn = scolumn - frame = -1 - if (present(nt)) then - frame = nt - endif - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - luse_float = .false. - if (present(use_float)) luse_float = use_float - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - ni = size(comp) - if (trim(flow) == 'x2c') avcomp1 => component_get_x2c_cx(comp(1)) - if (trim(flow) == 'c2x') avcomp1 => component_get_c2x_cx(comp(1)) - gsmap => component_get_gsmap_cx(comp(1)) - ns = mct_aVect_lsize(avcomp1) - ng = mct_gsmap_gsize(gsmap) - lnx = ng - lny = 1 - - nf = mct_aVect_nRattr(avcomp1) - if (nf < 1) then - write(logunit,*) subname,' ERROR: nf = ',nf,trim(dname) - call shr_sys_abort(subname//'nf error') - endif - - if (present(nx)) then - if (nx /= 0) lnx = nx - endif - if (present(ny)) then - if (ny /= 0) lny = ny - endif - if (lnx*lny /= ng .and. .not. lcolumn) then - if(iam==0) then - write(logunit,*) subname,' ERROR: grid2d size not consistent ',& - ng,lnx,lny,trim(dname) - end if - call shr_sys_abort(subname//'ERROR: grid2d size not consistent ') - endif - - if (lwhead) then - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(lpre)//'_nx',lnx,dimid2(1)) - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(lpre)//'_ny',lny,dimid2(2)) - - if (ni > 1) then - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(lpre)//'_ni',ni,dimid3(3)) - if (present(nt)) then - dimid4(1:2) = dimid2 - dimid4(3) = dimid3(3) - rcode = pio_inq_dimid(cpl_io_file(lfile_ind),'time',dimid4(4)) - dimid => dimid4 - else - dimid3(1:2) = dimid2 - dimid => dimid3 - endif - else - if (present(nt)) then - dimid3(1:2) = dimid2 - rcode = pio_inq_dimid(cpl_io_file(lfile_ind),'time',dimid3(3)) - dimid => dimid3 - else - dimid => dimid2 - endif - endif - - do k = 1,nf - call mct_aVect_getRList(mstring,k,avcomp1) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - !-------tcraig, this is a temporary mod to NOT write hgt - if (trim(itemc) /= "hgt") then - name1 = trim(lpre)//'_'//trim(itemc) - call seq_flds_lookup(itemc,longname=lname,stdname=sname,units=cunit) - if (luse_float) then - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(name1),PIO_REAL,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"_FillValue",real(lfillvalue,r4)) - else - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(name1),PIO_DOUBLE,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"_FillValue",lfillvalue) - end if - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"units",trim(cunit)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"long_name",trim(lname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"standard_name",trim(sname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"internal_dname",trim(dname)) - if (present(tavg)) then - if (tavg) then - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"cell_methods","time: mean") - endif - endif - !-------tcraig - endif - enddo - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - end if - - if (lwdata) then - allocate(data(ns*ni)) - ! note: size of dof is ns - call mct_gsmap_OrderedPoints(gsmap, iam, Dof) - if (ni > 1) then - allocate(dofn(ns*ni)) - n = 0 - do k1 = 1,ni - do k2 = 1,ns - n = n + 1 - dofn(n) = (k1-1)*ng + dof(k2) - enddo - enddo - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny,ni/), dofn, iodesc) - deallocate(dofn) - else - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny/), dof, iodesc) - endif - deallocate(dof) - - do k = 1,nf - call mct_aVect_getRList(mstring,k,avcomp1) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - !-------tcraig, this is a temporary mod to NOT write hgt - if (trim(itemc) /= "hgt") then - name1 = trim(lpre)//'_'//trim(itemc) - rcode = pio_inq_varid(cpl_io_file(lfile_ind),trim(name1),varid) - call pio_setframe(cpl_io_file(lfile_ind),varid,frame) - n = 0 - do k1 = 1,ni - if (trim(flow) == 'x2c') avcomp => component_get_x2c_cx(comp(k1)) - if (trim(flow) == 'c2x') avcomp => component_get_c2x_cx(comp(k1)) - do k2 = 1,ns - n = n + 1 - data(n) = avcomp%rAttr(k,k2) - enddo - enddo - call pio_write_darray(cpl_io_file(lfile_ind), varid, iodesc, data, rcode, fillval=lfillvalue) - !-------tcraig - endif - enddo - - deallocate(data) - call pio_freedecomp(cpl_io_file(lfile_ind), iodesc) - - end if - end subroutine seq_io_write_avscomp - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_int - write scalar integer to netcdf file - ! - ! !DESCRIPTION: - ! Write scalar integer to netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_int(filename,idata,dname,whead,wdata,file_ind) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - integer(in) ,intent(in) :: idata ! data to be written - character(len=*),intent(in) :: dname ! name of data - logical,optional,intent(in) :: whead ! write header - logical,optional,intent(in) :: wdata ! write data - integer,optional,intent(in) :: file_ind - - !EOP - - integer(in) :: rcode - integer(in) :: iam - type(var_desc_t) :: varid - character(CL) :: cunit ! var units - character(CL) :: lname ! long name - character(CL) :: sname ! standard name - logical :: lwhead, lwdata - integer :: lfile_ind - character(*),parameter :: subName = '(seq_io_write_int) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lwhead = .true. - lwdata = .true. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - if (lwhead) then - call seq_flds_lookup(trim(dname),longname=lname,stdname=sname,units=cunit) - ! rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(dname)//'_nx',1,dimid(1)) - ! rcode = pio_def_var(cpl_io_file(lfile_ind),trim(dname),PIO_INT,dimid,varid) - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(dname),PIO_INT,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"units",trim(cunit)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"long_name",trim(lname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"standard_name",trim(sname)) - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - endif - - if (lwdata) then - rcode = pio_inq_varid(cpl_io_file(lfile_ind),trim(dname),varid) - rcode = pio_put_var(cpl_io_file(lfile_ind),varid,idata) - - ! write(logunit,*) subname,' wrote AV ',trim(dname),lwhead,lwdata - endif - - end subroutine seq_io_write_int - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_int1d - write 1d integer array to netcdf file - ! - ! !DESCRIPTION: - ! Write 1d integer array to netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_int1d(filename,idata,dname,whead,wdata,file_ind) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - integer(in) ,intent(in) :: idata(:) ! data to be written - character(len=*),intent(in) :: dname ! name of data - logical,optional,intent(in) :: whead ! write header - logical,optional,intent(in) :: wdata ! write data - integer,optional,intent(in) :: file_ind - - !EOP - - integer(in) :: rcode - integer(in) :: iam - integer(in) :: dimid(1) - type(var_desc_t) :: varid - character(CL) :: cunit ! var units - character(CL) :: lname ! long name - character(CL) :: sname ! standard name - integer(in) :: lnx - logical :: lwhead, lwdata - integer :: lfile_ind - character(*),parameter :: subName = '(seq_io_write_int1d) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lwhead = .true. - lwdata = .true. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - if (lwhead) then - call seq_flds_lookup(trim(dname),longname=lname,stdname=sname,units=cunit) - lnx = size(idata) - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(dname)//'_nx',lnx,dimid(1)) - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(dname),PIO_INT,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"units",trim(cunit)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"long_name",trim(lname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"standard_name",trim(sname)) - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - endif - - if (lwdata) then - rcode = pio_inq_varid(cpl_io_file(lfile_ind),trim(dname),varid) - rcode = pio_put_var(cpl_io_file(lfile_ind),varid,idata) - endif - - ! write(logunit,*) subname,' wrote AV ',trim(dname),lwhead,lwdata - - end subroutine seq_io_write_int1d - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_r8 - write scalar double to netcdf file - ! - ! !DESCRIPTION: - ! Write scalar double to netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_r8(filename,rdata,dname,whead,wdata,file_ind) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - real(r8) ,intent(in) :: rdata ! data to be written - character(len=*),intent(in) :: dname ! name of data - logical,optional,intent(in) :: whead ! write header - logical,optional,intent(in) :: wdata ! write data - integer,optional,intent(in) :: file_ind - - !EOP - - integer(in) :: rcode - integer(in) :: iam - type(var_desc_t) :: varid - character(CL) :: cunit ! var units - character(CL) :: lname ! long name - character(CL) :: sname ! standard name - logical :: lwhead, lwdata - integer :: lfile_ind - character(*),parameter :: subName = '(seq_io_write_r8) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lwhead = .true. - lwdata = .true. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - if (lwhead) then - call seq_flds_lookup(trim(dname),longname=lname,stdname=sname,units=cunit) - ! rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(dname)//'_nx',1,dimid(1)) - ! rcode = pio_def_var(cpl_io_file(lfile_ind),trim(dname),PIO_DOUBLE,dimid,varid) - - - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(dname),PIO_DOUBLE,varid) - if(rcode==PIO_NOERR) then - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"units",trim(cunit)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"long_name",trim(lname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"standard_name",trim(sname)) - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - end if - endif - - if (lwdata) then - rcode = pio_inq_varid(cpl_io_file(lfile_ind),trim(dname),varid) - rcode = pio_put_var(cpl_io_file(lfile_ind),varid,rdata) - endif - - - end subroutine seq_io_write_r8 - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_r81d - write 1d double array to netcdf file - ! - ! !DESCRIPTION: - ! Write 1d double array to netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_r81d(filename,rdata,dname,whead,wdata,file_ind) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - real(r8) ,intent(in) :: rdata(:) ! data to be written - character(len=*),intent(in) :: dname ! name of data - logical,optional,intent(in) :: whead ! write header - logical,optional,intent(in) :: wdata ! write data - integer,optional,intent(in) :: file_ind - - !EOP - - integer(in) :: rcode - integer(in) :: iam - integer(in) :: dimid(1) - type(var_desc_t) :: varid - character(CL) :: cunit ! var units - character(CL) :: lname ! long name - character(CL) :: sname ! standard name - integer(in) :: lnx - logical :: lwhead, lwdata - integer :: lfile_ind - character(*),parameter :: subName = '(seq_io_write_r81d) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lwhead = .true. - lwdata = .true. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - call seq_comm_setptrs(CPLID,iam=iam) - - if (lwhead) then - call seq_flds_lookup(trim(dname),longname=lname,stdname=sname,units=cunit) - lnx = size(rdata) - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(dname)//'_nx',lnx,dimid(1)) - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(dname),PIO_DOUBLE,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"units",trim(cunit)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"long_name",trim(lname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"standard_name",trim(sname)) - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - endif - - if (lwdata) then - rcode = pio_inq_varid(cpl_io_file(lfile_ind),trim(dname),varid) - rcode = pio_put_var(cpl_io_file(lfile_ind),varid,rdata) - - ! write(logunit,*) subname,' wrote AV ',trim(dname),lwhead,lwdata - endif - - end subroutine seq_io_write_r81d - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_char - write char string to netcdf file - ! - ! !DESCRIPTION: - ! Write char string to netcdf file - ! - ! !REVISION HISTORY: - ! 2010-July-06 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_char(filename,rdata,dname,whead,wdata,file_ind) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - character(len=*),intent(in) :: rdata ! data to be written - character(len=*),intent(in) :: dname ! name of data - logical,optional,intent(in) :: whead ! write header - logical,optional,intent(in) :: wdata ! write data - integer,optional,intent(in) :: file_ind - - !EOP - - integer(in) :: rcode - integer(in) :: iam - integer(in) :: dimid(1) - type(var_desc_t) :: varid - character(CL) :: cunit ! var units - character(CL) :: lname ! long name - character(CL) :: sname ! standard name - integer(in) :: lnx - logical :: lwhead, lwdata - integer :: lfile_ind - character(*),parameter :: subName = '(seq_io_write_char) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lwhead = .true. - lwdata = .true. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - if (lwhead) then - call seq_flds_lookup(trim(dname),longname=lname,stdname=sname,units=cunit) - lnx = len(charvar) - rcode = pio_def_dim(cpl_io_file(lfile_ind),trim(dname)//'_len',lnx,dimid(1)) - rcode = pio_def_var(cpl_io_file(lfile_ind),trim(dname),PIO_CHAR,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"units",trim(cunit)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"long_name",trim(lname)) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,"standard_name",trim(sname)) - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - endif - - if (lwdata) then - charvar = '' - charvar = trim(rdata) - rcode = pio_inq_varid(cpl_io_file(lfile_ind),trim(dname),varid) - rcode = pio_put_var(cpl_io_file(lfile_ind),varid,charvar) - endif - - end subroutine seq_io_write_char - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_write_time - write time variable to netcdf file - ! - ! !DESCRIPTION: - ! Write time variable to netcdf file - ! - ! !REVISION HISTORY: - ! 2009-Feb-11 - M. Vertenstein - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_write_time(filename,time_units,time_cal,time_val,nt,whead,wdata,tbnds,file_ind) - - use shr_cal_mod, only : shr_cal_calMaxLen, shr_cal_calendarName, & - shr_cal_noleap, shr_cal_gregorian - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - character(len=*),intent(in) :: time_units ! units of time - character(len=*),intent(in) :: time_cal ! calendar type - real(r8) ,intent(in) :: time_val ! data to be written - integer(in),optional,intent(in) :: nt - logical,optional,intent(in) :: whead ! write header - logical,optional,intent(in) :: wdata ! write data - real(r8),optional,intent(in) :: tbnds(2) ! time bounds - integer,optional,intent(in) :: file_ind - - !EOP - - integer(in) :: rcode - integer(in) :: iam - integer(in) :: dimid(1) - integer(in) :: dimid2(2) - type(var_desc_t) :: varid - logical :: lwhead, lwdata - integer :: start(4),count(4) - character(len=shr_cal_calMaxLen) :: lcalendar - real(r8) :: time_val_1d(1) - integer :: lfile_ind - character(*),parameter :: subName = '(seq_io_write_time) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lwhead = .true. - lwdata = .true. - if (present(whead)) lwhead = whead - if (present(wdata)) lwdata = wdata - - if (.not.lwhead .and. .not.lwdata) then - ! should we write a warning? - return - endif - - lfile_ind = 0 - if (present(file_ind)) lfile_ind=file_ind - - call seq_comm_setptrs(CPLID,iam=iam) - - if (lwhead) then - rcode = pio_def_dim(cpl_io_file(lfile_ind),'time',PIO_UNLIMITED,dimid(1)) - rcode = pio_def_var(cpl_io_file(lfile_ind),'time',PIO_DOUBLE,dimid,varid) - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,'units',trim(time_units)) - lcalendar = shr_cal_calendarName(time_cal,trap=.false.) - if (trim(lcalendar) == trim(shr_cal_noleap)) then - lcalendar = 'noleap' - elseif (trim(lcalendar) == trim(shr_cal_gregorian)) then - lcalendar = 'gregorian' - endif - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,'calendar',trim(lcalendar)) - if (present(tbnds)) then - rcode = pio_put_att(cpl_io_file(lfile_ind),varid,'bounds','time_bnds') - dimid2(2)=dimid(1) - rcode = pio_def_dim(cpl_io_file(lfile_ind),'ntb',2,dimid2(1)) - rcode = pio_def_var(cpl_io_file(lfile_ind),'time_bnds',PIO_DOUBLE,dimid2,varid) - endif - if (lwdata) call seq_io_enddef(filename, file_ind=lfile_ind) - endif - - if (lwdata) then - start = 1 - count = 1 - if (present(nt)) then - start(1) = nt - endif - time_val_1d(1) = time_val - rcode = pio_inq_varid(cpl_io_file(lfile_ind),'time',varid) - rcode = pio_put_var(cpl_io_file(lfile_ind),varid,start,count,time_val_1d) - if (present(tbnds)) then - rcode = pio_inq_varid(cpl_io_file(lfile_ind),'time_bnds',varid) - start = 1 - count = 1 - if (present(nt)) then - start(2) = nt - endif - count(1) = 2 - rcode = pio_put_var(cpl_io_file(lfile_ind),varid,start,count,tbnds) - endif - - ! write(logunit,*) subname,' wrote time ',lwhead,lwdata - endif - - end subroutine seq_io_write_time - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_av - read AV from netcdf file - ! - ! !DESCRIPTION: - ! Read AV from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_av(filename,gsmap,AV,dname,pre) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(mct_gsMap), intent(in) :: gsmap - type(mct_aVect) ,intent(inout):: AV ! data to be read - character(len=*),intent(in) :: dname ! name of data - character(len=*),intent(in),optional :: pre ! prefix name - - !EOP - - integer(in) :: rcode - integer(in) :: iam,mpicom - integer(in) :: nf,ns,ng - integer(in) :: k,n, ndims - logical :: exists - type(file_desc_t) :: pioid - integer(in) :: dimid(2) - type(var_desc_t) :: varid - integer(in) :: lnx,lny - type(mct_string) :: mstring ! mct char type - character(CL) :: itemc ! string converted to char - type(io_desc_t) :: iodesc - integer(in), pointer :: dof(:) - character(CL) :: lversion - character(CL) :: name1 - character(CL) :: lpre - character(*),parameter :: subName = '(seq_io_read_av) ' - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lversion = trim(version0) - - lpre = trim(dname) - if (present(pre)) then - lpre = trim(pre) - endif - - call seq_comm_setptrs(CPLID,iam=iam,mpicom=mpicom) - call mct_gsmap_OrderedPoints(gsmap, iam, Dof) - - ns = mct_aVect_lsize(AV) - nf = mct_aVect_nRattr(AV) - - if (iam==0) inquire(file=trim(filename),exist=exists) - call shr_mpi_bcast(exists,mpicom,'seq_io_read_av exists') - if (exists) then - rcode = pio_openfile(cpl_io_subsystem, pioid, cpl_pio_iotype, trim(filename),pio_nowrite) - if(iam==0) write(logunit,*) subname,' open file ',trim(filename) - call pio_seterrorhandling(pioid,PIO_BCAST_ERROR) - rcode = pio_get_att(pioid,pio_global,"file_version",lversion) - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - else - if(iam==0) write(logunit,*) subname,' ERROR: file invalid ',trim(filename),' ',trim(dname) - call shr_sys_abort(subname//'ERROR: file invalid '//trim(filename)//' '//trim(dname)) - endif - - do k = 1,nf - call mct_aVect_getRList(mstring,k,AV) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - if (trim(lversion) == trim(version)) then - name1 = trim(lpre)//'_'//trim(itemc) - else - name1 = trim(prefix)//trim(dname)//'_'//trim(itemc) - endif - call pio_seterrorhandling(pioid, PIO_BCAST_ERROR) - rcode = pio_inq_varid(pioid,trim(name1),varid) - if (rcode == pio_noerr) then - if (k==1) then - rcode = pio_inq_varndims(pioid, varid, ndims) - rcode = pio_inq_vardimid(pioid, varid, dimid(1:ndims)) - rcode = pio_inq_dimlen(pioid, dimid(1), lnx) - if (ndims>=2) then - rcode = pio_inq_dimlen(pioid, dimid(2), lny) - else - lny = 1 - end if - ng = lnx * lny - if (ng /= mct_gsmap_gsize(gsmap)) then - if (iam==0) write(logunit,*) subname,' ERROR: dimensions do not match',& - lnx,lny,mct_gsmap_gsize(gsmap) - call shr_sys_abort(subname//'ERROR: dimensions do not match') - end if - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny/), dof, iodesc) - deallocate(dof) - end if - call pio_read_darray(pioid,varid,iodesc, av%rattr(k,:), rcode) - else - write(logunit,*)'seq_io_readav warning: field ',trim(itemc),' is not on restart file' - write(logunit,*)'for backwards compatibility will set it to 0' - av%rattr(k,:) = 0.0_r8 - end if - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - - enddo - - !--- zero out fill value, this is somewhat arbitrary - do n = 1,ns - do k = 1,nf - if (AV%rAttr(k,n) == fillvalue) then - AV%rAttr(k,n) = 0.0_r8 - endif - enddo - enddo - - call pio_freedecomp(pioid, iodesc) - call pio_closefile(pioid) - - end subroutine seq_io_read_av - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_avs - read AV from netcdf file - ! - ! !DESCRIPTION: - ! Read AV from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_avs(filename,gsmap,AVS,dname,pre) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(mct_gsMap), intent(in) :: gsmap - type(mct_aVect) ,intent(inout):: AVS(:) ! data to be read - character(len=*),intent(in) :: dname ! name of data - character(len=*),intent(in),optional :: pre ! prefix name - - !EOP - - integer(in) :: rcode - integer(in) :: iam,mpicom - integer(in) :: nf,ns,ng,ni - integer(in) :: k,n,n1,n2,ndims - type(file_desc_t) :: pioid - integer(in) :: dimid(4) - type(var_desc_t) :: varid - integer(in) :: lnx,lny,lni - type(mct_string) :: mstring ! mct char type - character(CL) :: itemc ! string converted to char - logical :: exists - type(io_desc_t) :: iodesc - integer(in), pointer :: dof(:) - integer(in), pointer :: dofn(:) - real(r8), allocatable :: data(:) - character(CL) :: lversion - character(CL) :: name1 - character(CL) :: lpre - character(*),parameter :: subName = '(seq_io_read_avs) ' - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lversion = trim(version0) - - lpre = trim(dname) - if (present(pre)) then - lpre = trim(pre) - endif - - call seq_comm_setptrs(CPLID,iam=iam,mpicom=mpicom) - call mct_gsmap_OrderedPoints(gsmap, iam, Dof) - - ni = size(AVS) - ns = mct_aVect_lsize(AVS(1)) - nf = mct_aVect_nRattr(AVS(1)) - ng = mct_gsmap_gsize(gsmap) - - if (iam==0) inquire(file=trim(filename),exist=exists) - call shr_mpi_bcast(exists,mpicom,'seq_io_read_avs exists') - if (exists) then - rcode = pio_openfile(cpl_io_subsystem, pioid, cpl_pio_iotype, trim(filename),pio_nowrite) - if(iam==0) write(logunit,*) subname,' open file ',trim(filename) - call pio_seterrorhandling(pioid,PIO_BCAST_ERROR) - rcode = pio_get_att(pioid,pio_global,"file_version",lversion) - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - else - if(iam==0) write(logunit,*) subname,' ERROR: file invalid ',trim(filename),' ',trim(dname) - call shr_sys_abort(subname//'ERROR: file invalid '//trim(filename)//' '//trim(dname)) - endif - - allocate(data(ni*ns)) - - do k = 1,nf - call mct_aVect_getRList(mstring,k,AVS(1)) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - if (trim(lversion) == trim(version)) then - name1 = trim(lpre)//'_'//trim(itemc) - else - name1 = trim(prefix)//trim(dname)//'_'//trim(itemc) - endif - call pio_seterrorhandling(pioid, PIO_BCAST_ERROR) - rcode = pio_inq_varid(pioid,trim(name1),varid) - if (rcode == pio_noerr) then - if (k==1) then - rcode = pio_inq_varndims(pioid, varid, ndims) - rcode = pio_inq_vardimid(pioid, varid, dimid(1:ndims)) - rcode = pio_inq_dimlen(pioid, dimid(1), lnx) - if (ndims>=2) then - rcode = pio_inq_dimlen(pioid, dimid(2), lny) - else - lny = 1 - end if - if (lnx*lny /= ng) then - write(logunit,*) subname,' ERROR: dimensions do not match',& - lnx,lny,mct_gsmap_gsize(gsmap) - call shr_sys_abort(subname//'ERROR: dimensions do not match') - end if - if (ndims>=3) then - rcode = pio_inq_dimlen(pioid, dimid(3), lni) - else - lni = 1 - end if - if (ni /= lni) then - write(logunit,*) subname,' ERROR: ni dimensions do not match',ni,lni - call shr_sys_abort(subname//'ERROR: ni dimensions do not match') - end if - if (ni > 1) then - allocate(dofn(ns*ni)) - n = 0 - do n1 = 1,ni - do n2 = 1,ns - n = n + 1 - dofn(n) = (n1-1)*ng + dof(n2) - enddo - enddo - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny,lni/), dofn, iodesc) - deallocate(dofn) - else - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny/), dof, iodesc) - endif - deallocate(dof) - end if - - call pio_read_darray(pioid,varid,iodesc, data, rcode) - n = 0 - do n1 = 1,ni - do n2 = 1,ns - n = n + 1 - avs(n1)%rAttr(k,n2) = data(n) - enddo - enddo - else - write(logunit,*)'seq_io_readav warning: field ',trim(itemc),' is not on restart file' - write(logunit,*)'for backwards compatibility will set it to 0' - do n1 = 1,ni - avs(n1)%rattr(k,:) = 0.0_r8 - enddo - end if - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - enddo - - deallocate(data) - - !--- zero out fill value, this is somewhat arbitrary - do n1 = 1,ni - do n2 = 1,ns - do k = 1,nf - if (AVS(n1)%rAttr(k,n2) == fillvalue) then - AVS(n1)%rAttr(k,n2) = 0.0_r8 - endif - enddo - enddo - enddo - - call pio_freedecomp(pioid, iodesc) - call pio_closefile(pioid) - - end subroutine seq_io_read_avs - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_avs - read AV from netcdf file - ! - ! !DESCRIPTION: - ! Read AV from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_avscomp(filename, comp, flow, dname, pre) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*), intent(in) :: filename ! file - type(component_type), intent(inout) :: comp(:) - character(len=3), intent(in) :: flow ! 'c2x' or 'x2c' - character(len=*), intent(in) :: dname ! name of data - character(len=*), intent(in),optional :: pre ! prefix name - - !EOP - - type(mct_gsMap), pointer :: gsmap - type(mct_aVect), pointer :: avcomp - type(mct_aVect), pointer :: avcomp1 - integer(in) :: rcode - integer(in) :: iam,mpicom - integer(in) :: nf,ns,ng,ni - integer(in) :: k,n,n1,n2,ndims - type(file_desc_t) :: pioid - integer(in) :: dimid(4) - type(var_desc_t) :: varid - integer(in) :: lnx,lny,lni - type(mct_string) :: mstring ! mct char type - character(CL) :: itemc ! string converted to char - logical :: exists - type(io_desc_t) :: iodesc - integer(in), pointer :: dof(:) - integer(in), pointer :: dofn(:) - real(r8), allocatable :: data(:) - character(CL) :: lversion - character(CL) :: name1 - character(CL) :: lpre - character(*),parameter :: subName = '(seq_io_read_avs) ' - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - lversion = trim(version0) - - lpre = trim(dname) - if (present(pre)) then - lpre = trim(pre) - endif - - gsmap => component_get_gsmap_cx(comp(1)) - if (trim(flow) == 'x2c') avcomp1 => component_get_x2c_cx(comp(1)) - if (trim(flow) == 'c2x') avcomp1 => component_get_c2x_cx(comp(1)) - - call seq_comm_setptrs(CPLID,iam=iam,mpicom=mpicom) - call mct_gsmap_OrderedPoints(gsmap, iam, Dof) - - ni = size(comp) - ns = mct_aVect_lsize(avcomp1) - nf = mct_aVect_nRattr(avcomp1) - ng = mct_gsmap_gsize(gsmap) - - if (iam==0) inquire(file=trim(filename),exist=exists) - call shr_mpi_bcast(exists,mpicom,'seq_io_read_avs exists') - if (exists) then - rcode = pio_openfile(cpl_io_subsystem, pioid, cpl_pio_iotype, trim(filename),pio_nowrite) - if(iam==0) write(logunit,*) subname,' open file ',trim(filename) - call pio_seterrorhandling(pioid,PIO_BCAST_ERROR) - rcode = pio_get_att(pioid,pio_global,"file_version",lversion) - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - else - if(iam==0) write(logunit,*) subname,' ERROR: file invalid ',trim(filename),' ',trim(dname) - call shr_sys_abort(subname//'ERROR: file invalid '//trim(filename)//' '//trim(dname)) - endif - - allocate(data(ni*ns)) - - do k = 1,nf - call mct_aVect_getRList(mstring,k,avcomp1) - itemc = mct_string_toChar(mstring) - call mct_string_clean(mstring) - if (trim(lversion) == trim(version)) then - name1 = trim(lpre)//'_'//trim(itemc) - else - name1 = trim(prefix)//trim(dname)//'_'//trim(itemc) - endif - call pio_seterrorhandling(pioid, PIO_BCAST_ERROR) - rcode = pio_inq_varid(pioid,trim(name1),varid) - if (rcode == pio_noerr) then - if (k==1) then - rcode = pio_inq_varndims(pioid, varid, ndims) - rcode = pio_inq_vardimid(pioid, varid, dimid(1:ndims)) - rcode = pio_inq_dimlen(pioid, dimid(1), lnx) - if (ndims>=2) then - rcode = pio_inq_dimlen(pioid, dimid(2), lny) - else - lny = 1 - end if - if (lnx*lny /= ng) then - write(logunit,*) subname,' ERROR: dimensions do not match',& - lnx,lny,mct_gsmap_gsize(gsmap) - call shr_sys_abort(subname//'ERROR: dimensions do not match') - end if - if (ndims>=3) then - rcode = pio_inq_dimlen(pioid, dimid(3), lni) - else - lni = 1 - end if - if (ni /= lni) then - write(logunit,*) subname,' ERROR: ni dimensions do not match',ni,lni - call shr_sys_abort(subname//'ERROR: ni dimensions do not match') - end if - if (ni > 1) then - allocate(dofn(ns*ni)) - n = 0 - do n1 = 1,ni - do n2 = 1,ns - n = n + 1 - dofn(n) = (n1-1)*ng + dof(n2) - enddo - enddo - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny,lni/), dofn, iodesc) - deallocate(dofn) - else - call pio_initdecomp(cpl_io_subsystem, pio_double, (/lnx,lny/), dof, iodesc) - endif - deallocate(dof) - end if - - call pio_read_darray(pioid,varid,iodesc, data, rcode) - n = 0 - do n1 = 1,ni - if (trim(flow) == 'x2c') avcomp => component_get_x2c_cx(comp(n1)) - if (trim(flow) == 'c2x') avcomp => component_get_c2x_cx(comp(n1)) - do n2 = 1,ns - n = n + 1 - avcomp%rAttr(k,n2) = data(n) - enddo - enddo - else - write(logunit,*)'seq_io_readav warning: field ',trim(itemc),' is not on restart file' - write(logunit,*)'for backwards compatibility will set it to 0' - do n1 = 1,ni - if (trim(flow) == 'x2c') avcomp => component_get_x2c_cx(comp(n1)) - if (trim(flow) == 'c2x') avcomp => component_get_c2x_cx(comp(n1)) - avcomp%rattr(k,:) = 0.0_r8 - enddo - end if - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - enddo - - deallocate(data) - - !--- zero out fill value, this is somewhat arbitrary - do n1 = 1,ni - if (trim(flow) == 'x2c') avcomp => component_get_x2c_cx(comp(n1)) - if (trim(flow) == 'c2x') avcomp => component_get_c2x_cx(comp(n1)) - do n2 = 1,ns - do k = 1,nf - if (avcomp%rAttr(k,n2) == fillvalue) then - avcomp%rAttr(k,n2) = 0.0_r8 - endif - enddo - enddo - enddo - - call pio_freedecomp(pioid, iodesc) - call pio_closefile(pioid) - - end subroutine seq_io_read_avscomp - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_int - read scalar integer from netcdf file - ! - ! !DESCRIPTION: - ! Read scalar integer from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_int(filename,idata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - integer ,intent(inout):: idata ! integer data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - integer :: i1d(1) - character(*),parameter :: subName = '(seq_io_read_int) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_io_read_int1d(filename,i1d,dname) - idata = i1d(1) - - end subroutine seq_io_read_int - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_int1d - read 1d integer from netcdf file - ! - ! !DESCRIPTION: - ! Read 1d integer array from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_int1d(filename,idata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - integer(in) ,intent(inout):: idata(:) ! integer data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - integer(in) :: rcode - integer(in) :: iam,mpicom - type(file_desc_t) :: pioid - type(var_desc_t) :: varid - logical :: exists - character(CL) :: lversion - character(CL) :: name1 - character(*),parameter :: subName = '(seq_io_read_int1d) ' - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(CPLID,iam=iam,mpicom=mpicom) - lversion=trim(version0) - - if (iam==0) inquire(file=trim(filename),exist=exists) - call shr_mpi_bcast(exists,mpicom,'seq_io_read_int1d exists') - if (exists) then - rcode = pio_openfile(cpl_io_subsystem, pioid, cpl_pio_iotype, trim(filename),pio_nowrite) - ! write(logunit,*) subname,' open file ',trim(filename) - call pio_seterrorhandling(pioid,PIO_BCAST_ERROR) - rcode = pio_get_att(pioid,pio_global,"file_version",lversion) - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - else - if(iam==0) write(logunit,*) subname,' ERROR: file invalid ',trim(filename),' ',trim(dname) - call shr_sys_abort(subname//'ERROR: file invalid '//trim(filename)//' '//trim(dname)) - endif - - if (trim(lversion) == trim(version)) then - name1 = trim(dname) - else - name1 = trim(prefix)//trim(dname) - endif - rcode = pio_inq_varid(pioid,trim(name1),varid) - rcode = pio_get_var(pioid,varid,idata) - - call pio_closefile(pioid) - - ! write(logunit,*) subname,' read int ',trim(dname) - - - end subroutine seq_io_read_int1d - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_r8 - read scalar double from netcdf file - ! - ! !DESCRIPTION: - ! Read scalar double from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_r8(filename,rdata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - real(r8) ,intent(inout):: rdata ! real data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - real(r8) :: r1d(1) - character(*),parameter :: subName = '(seq_io_read_r8) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_io_read_r81d(filename,r1d,dname) - rdata = r1d(1) - - end subroutine seq_io_read_r8 - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_r81d - read 1d double array from netcdf file - ! - ! !DESCRIPTION: - ! Read 1d double array from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_r81d(filename,rdata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - real(r8) ,intent(inout):: rdata(:) ! real data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - integer(in) :: rcode - integer(in) :: iam,mpicom - type(file_desc_T) :: pioid - type(var_desc_t) :: varid - logical :: exists - character(CL) :: lversion - character(CL) :: name1 - character(*),parameter :: subName = '(seq_io_read_r81d) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(CPLID,iam=iam,mpicom=mpicom) - - lversion=trim(version0) - - if (iam==0) inquire(file=trim(filename),exist=exists) - call shr_mpi_bcast(exists,mpicom,'seq_io_read_r81d exists') - if (exists) then - rcode = pio_openfile(cpl_io_subsystem, pioid, cpl_pio_iotype, trim(filename),pio_nowrite) - ! write(logunit,*) subname,' open file ',trim(filename) - call pio_seterrorhandling(pioid,PIO_BCAST_ERROR) - rcode = pio_get_att(pioid,pio_global,"file_version",lversion) - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - else - if(iam==0) write(logunit,*) subname,' ERROR: file invalid ',trim(filename),' ',trim(dname) - call shr_sys_abort(subname//'ERROR: file invalid '//trim(filename)//' '//trim(dname)) - endif - - if (trim(lversion) == trim(version)) then - name1 = trim(dname) - else - name1 = trim(prefix)//trim(dname) - endif - rcode = pio_inq_varid(pioid,trim(name1),varid) - rcode = pio_get_var(pioid,varid,rdata) - - call pio_closefile(pioid) - - ! write(logunit,*) subname,' read int ',trim(dname) - - end subroutine seq_io_read_r81d - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_char - read char string from netcdf file - ! - ! !DESCRIPTION: - ! Read char string from netcdf file - ! - ! !REVISION HISTORY: - ! 2010-July-06 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_char(filename,rdata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - character(len=*),intent(inout):: rdata ! character data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - integer(in) :: rcode - integer(in) :: iam,mpicom - type(file_desc_T) :: pioid - type(var_desc_t) :: varid - logical :: exists - character(CL) :: lversion - character(CL) :: name1 - character(*),parameter :: subName = '(seq_io_read_char) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(CPLID,iam=iam,mpicom=mpicom) - - lversion=trim(version0) - - if (iam==0) inquire(file=trim(filename),exist=exists) - call shr_mpi_bcast(exists,mpicom,'seq_io_read_char exists') - if (exists) then - rcode = pio_openfile(cpl_io_subsystem, pioid, cpl_pio_iotype, trim(filename),pio_nowrite) - ! write(logunit,*) subname,' open file ',trim(filename) - call pio_seterrorhandling(pioid,PIO_BCAST_ERROR) - rcode = pio_get_att(pioid,pio_global,"file_version",lversion) - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - else - if(iam==0) write(logunit,*) subname,' ERROR: file invalid ',trim(filename),' ',trim(dname) - call shr_sys_abort(subname//'ERROR: file invalid '//trim(filename)//' '//trim(dname)) - endif - - if (trim(lversion) == trim(version)) then - name1 = trim(dname) - else - name1 = trim(prefix)//trim(dname) - endif - rcode = pio_inq_varid(pioid,trim(name1),varid) - rcode = pio_get_var(pioid,varid,charvar) - rdata = trim(charvar) - - call pio_closefile(pioid) - - end subroutine seq_io_read_char - - !=============================================================================== - !=============================================================================== -end module seq_io_mod diff --git a/src/drivers/mct/main/seq_map_mod.F90 b/src/drivers/mct/main/seq_map_mod.F90 deleted file mode 100644 index 5fa449950e2..00000000000 --- a/src/drivers/mct/main/seq_map_mod.F90 +++ /dev/null @@ -1,906 +0,0 @@ -module seq_map_mod - - !--------------------------------------------------------------------- - ! - ! Purpose: - ! - ! General mapping routines - ! including self normalizing mapping routine with optional fraction - ! - ! Author: T. Craig, Jan-28-2011 - ! - !--------------------------------------------------------------------- - - use shr_kind_mod ,only: R8 => SHR_KIND_R8, IN=>SHR_KIND_IN - use shr_kind_mod ,only: CL => SHR_KIND_CL, CX => SHR_KIND_CX - use shr_sys_mod - use shr_const_mod - use shr_mct_mod, only: shr_mct_sMatPInitnc, shr_mct_queryConfigFile - use mct_mod - use seq_comm_mct - use component_type_mod - use seq_map_type_mod - - implicit none - save - private ! except - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: seq_map_init_rcfile ! cpl pes - public :: seq_map_init_rearrolap ! cpl pes - public :: seq_map_initvect ! cpl pes - public :: seq_map_map ! cpl pes - public :: seq_map_mapvect ! cpl pes - public :: seq_map_readdata ! cpl pes - - interface seq_map_avNorm - module procedure seq_map_avNormArr - module procedure seq_map_avNormAvF - end interface seq_map_avNorm - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(*),parameter :: seq_map_stroff = 'variable_unset' - character(*),parameter :: seq_map_stron = 'StrinG_is_ON' - real(R8),parameter,private :: deg2rad = shr_const_pi/180.0_R8 ! deg to rads - - !======================================================================= -contains - !======================================================================= - - subroutine seq_map_init_rcfile( mapper, comp_s, comp_d, & - maprcfile, maprcname, maprctype, samegrid, string, esmf_map) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(seq_map) ,intent(inout),pointer :: mapper - type(component_type) ,intent(inout) :: comp_s - type(component_type) ,intent(inout) :: comp_d - character(len=*) ,intent(in) :: maprcfile - character(len=*) ,intent(in) :: maprcname - character(len=*) ,intent(in) :: maprctype - logical ,intent(in) :: samegrid - character(len=*) ,intent(in),optional :: string - logical ,intent(in),optional :: esmf_map - ! - ! Local Variables - ! - type(mct_gsmap), pointer :: gsmap_s ! temporary pointers - type(mct_gsmap), pointer :: gsmap_d ! temporary pointers - integer(IN) :: mpicom - character(CX) :: mapfile - character(CL) :: maptype - integer(IN) :: mapid - character(len=*),parameter :: subname = "(seq_map_init_rcfile) " - !----------------------------------------------------- - - if (seq_comm_iamroot(CPLID) .and. present(string)) then - write(logunit,'(A)') subname//' called for '//trim(string) - endif - - call seq_comm_setptrs(CPLID, mpicom=mpicom) - - gsmap_s => component_get_gsmap_cx(comp_s) - gsmap_d => component_get_gsmap_cx(comp_d) - - if (mct_gsmap_Identical(gsmap_s,gsmap_d)) then - call seq_map_mapmatch(mapid,gsmap_s=gsmap_s,gsmap_d=gsmap_d,strategy="copy") - - if (mapid > 0) then - call seq_map_mappoint(mapid,mapper) - else - call seq_map_mapinit(mapper,mpicom) - mapper%copy_only = .true. - mapper%strategy = "copy" - mapper%gsmap_s => component_get_gsmap_cx(comp_s) - mapper%gsmap_d => component_get_gsmap_cx(comp_d) - endif - - elseif (samegrid) then - call seq_map_mapmatch(mapid,gsmap_s=gsmap_s,gsmap_d=gsmap_d,strategy="rearrange") - - if (mapid > 0) then - call seq_map_mappoint(mapid,mapper) - else - ! --- Initialize rearranger - call seq_map_mapinit(mapper,mpicom) - mapper%rearrange_only = .true. - mapper%strategy = "rearrange" - mapper%gsmap_s => component_get_gsmap_cx(comp_s) - mapper%gsmap_d => component_get_gsmap_cx(comp_d) - call seq_map_gsmapcheck(gsmap_s, gsmap_d) - call mct_rearr_init(gsmap_s, gsmap_d, mpicom, mapper%rearr) - endif - - else - - ! --- Initialize Smatp - call shr_mct_queryConfigFile(mpicom,maprcfile,maprcname,mapfile,maprctype,maptype) - - call seq_map_mapmatch(mapid,gsMap_s=gsMap_s,gsMap_d=gsMap_d,mapfile=mapfile,strategy=maptype) - - if (mapid > 0) then - call seq_map_mappoint(mapid,mapper) - else - call seq_map_mapinit(mapper,mpicom) - mapper%mapfile = trim(mapfile) - mapper%strategy= trim(maptype) - mapper%gsmap_s => component_get_gsmap_cx(comp_s) - mapper%gsmap_d => component_get_gsmap_cx(comp_d) - - call shr_mct_sMatPInitnc(mapper%sMatp, mapper%gsMap_s, mapper%gsMap_d, trim(mapfile),trim(maptype),mpicom) - if (present(esmf_map)) mapper%esmf_map = esmf_map - - if (mapper%esmf_map) then - call shr_sys_abort(subname//' ERROR: esmf SMM not supported') - endif ! esmf_map - - endif ! mapid >= 0 - endif - - if (seq_comm_iamroot(CPLID)) then - write(logunit,'(2A,I6,4A)') subname,' mapper counter, strategy, mapfile = ', & - mapper%counter,' ',trim(mapper%strategy),' ',trim(mapper%mapfile) - call shr_sys_flush(logunit) - endif - - end subroutine seq_map_init_rcfile - - !======================================================================= - - subroutine seq_map_init_rearrolap(mapper, comp_s, comp_d, string) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(seq_map) ,intent(inout),pointer :: mapper - type(component_type) ,intent(inout) :: comp_s - type(component_type) ,intent(inout) :: comp_d - character(len=*) ,intent(in),optional :: string - ! - ! Local Variables - ! - integer(IN) :: mapid - type(mct_gsmap), pointer :: gsmap_s - type(mct_gsmap), pointer :: gsmap_d - integer(IN) :: mpicom - character(len=*),parameter :: subname = "(seq_map_init_rearrolap) " - !----------------------------------------------------- - - if (seq_comm_iamroot(CPLID) .and. present(string)) then - write(logunit,'(A)') subname//' called for '//trim(string) - endif - - call seq_comm_setptrs(CPLID, mpicom=mpicom) - - gsmap_s => component_get_gsmap_cx(comp_s) - gsmap_d => component_get_gsmap_cx(comp_d) - - if (mct_gsmap_Identical(gsmap_s,gsmap_d)) then - call seq_map_mapmatch(mapid,gsmap_s=gsmap_s,gsmap_d=gsmap_d,strategy="copy") - - if (mapid > 0) then - call seq_map_mappoint(mapid,mapper) - else - call seq_map_mapinit(mapper,mpicom) - mapper%copy_only = .true. - mapper%strategy = "copy" - mapper%gsmap_s => component_get_gsmap_cx(comp_s) - mapper%gsmap_d => component_get_gsmap_cx(comp_d) - endif - - else - call seq_map_mapmatch(mapid,gsmap_s=gsmap_s,gsmap_d=gsmap_d,strategy="rearrange") - - if (mapid > 0) then - call seq_map_mappoint(mapid,mapper) - else - ! --- Initialize rearranger - call seq_map_mapinit(mapper, mpicom) - mapper%rearrange_only = .true. - mapper%strategy = "rearrange" - mapper%gsmap_s => component_get_gsmap_cx(comp_s) - mapper%gsmap_d => component_get_gsmap_cx(comp_d) - call seq_map_gsmapcheck(gsmap_s, gsmap_d) - call mct_rearr_init(gsmap_s, gsmap_d, mpicom, mapper%rearr) - endif - - endif - - if (seq_comm_iamroot(CPLID)) then - write(logunit,'(2A,I6,4A)') subname,' mapper counter, strategy, mapfile = ', & - mapper%counter,' ',trim(mapper%strategy),' ',trim(mapper%mapfile) - call shr_sys_flush(logunit) - endif - - end subroutine seq_map_init_rearrolap - - !======================================================================= - - subroutine seq_map_map( mapper, av_s, av_d, fldlist, norm, avwts_s, avwtsfld_s, & - string, msgtag ) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(seq_map) ,intent(inout) :: mapper - type(mct_aVect) ,intent(in) :: av_s - type(mct_aVect) ,intent(inout) :: av_d - character(len=*),intent(in),optional :: fldlist - logical ,intent(in),optional :: norm - type(mct_aVect) ,intent(in),optional :: avwts_s - character(len=*),intent(in),optional :: avwtsfld_s - character(len=*),intent(in),optional :: string - integer(IN) ,intent(in),optional :: msgtag - ! - ! Local Variables - ! - logical :: lnorm - integer(IN),save :: ltag ! message tag for rearrange - character(len=*),parameter :: subname = "(seq_map_map) " - !----------------------------------------------------- - - if (seq_comm_iamroot(CPLID) .and. present(string)) then - write(logunit,'(A)') subname//' called for '//trim(string) - endif - - lnorm = .true. - if (present(norm)) then - lnorm = norm - endif - - if (present(msgtag)) then - ltag = msgtag - else - ltag = 2000 - endif - - if (present(avwts_s) .and. .not. present(avwtsfld_s)) then - write(logunit,*) subname,' ERROR: avwts_s present but avwtsfld_s not' - call shr_sys_abort(subname//' ERROR: avwts present') - endif - if (.not. present(avwts_s) .and. present(avwtsfld_s)) then - write(logunit,*) subname,' ERROR: avwtsfld_s present but avwts_s not' - call shr_sys_abort(subname//' ERROR: avwtsfld present') - endif - - if (mapper%copy_only) then - !------------------------------------------- - ! COPY data - !------------------------------------------- - if (present(fldlist)) then - call mct_aVect_copy(aVin=av_s,aVout=av_d,rList=fldlist,vector=mct_usevector) - else - call mct_aVect_copy(aVin=av_s,aVout=av_d,vector=mct_usevector) - endif - - else if (mapper%rearrange_only) then - !------------------------------------------- - ! REARRANGE data - !------------------------------------------- - if (present(fldlist)) then - call mct_rearr_rearrange_fldlist(av_s, av_d, mapper%rearr, tag=ltag, VECTOR=mct_usevector, & - ALLTOALL=mct_usealltoall, fldlist=fldlist) - else - call mct_rearr_rearrange(av_s, av_d, mapper%rearr, tag=ltag, VECTOR=mct_usevector, & - ALLTOALL=mct_usealltoall) - endif - - else - !------------------------------------------- - ! MAP data - !------------------------------------------- - if (present(avwts_s)) then - if (present(fldlist)) then - call seq_map_avNorm(mapper, av_s, av_d, avwts_s, trim(avwtsfld_s), & - rList=fldlist, norm=lnorm) - else - call seq_map_avNorm(mapper, av_s, av_d, avwts_s, trim(avwtsfld_s), & - norm=lnorm) - endif - else - if (present(fldlist)) then - call seq_map_avNorm(mapper, av_s, av_d, rList=fldlist, norm=lnorm) - else - call seq_map_avNorm(mapper, av_s, av_d, norm=lnorm) - endif - endif - end if - - end subroutine seq_map_map - - !======================================================================= - - subroutine seq_map_initvect(mapper, type, comp_s, comp_d, string) - - !----------------------------------------------------- - ! - ! Arguments - ! - type(seq_map) ,intent(inout) :: mapper - character(len=*) ,intent(in) :: type - type(component_type) ,intent(inout) :: comp_s - type(component_type) ,intent(inout) :: comp_d - character(len=*) ,intent(in),optional :: string - ! - ! Local Variables - ! - type(mct_gGrid), pointer :: dom_s - type(mct_gGrid), pointer :: dom_d - integer(IN) :: klon, klat, lsize, n - character(len=CL) :: lstring - character(len=*),parameter :: subname = "(seq_map_initvect) " - !----------------------------------------------------- - - lstring = ' ' - if (present(string)) then - if (seq_comm_iamroot(CPLID)) write(logunit,'(A)') subname//' called for '//trim(string) - lstring = trim(string) - endif - - dom_s => component_get_dom_cx(comp_s) - dom_d => component_get_dom_cx(comp_d) - - if (trim(type(1:6)) == 'cart3d') then - if (mapper%cart3d_init == trim(seq_map_stron)) return - - !--- compute these up front for vector mapping --- - lsize = mct_aVect_lsize(dom_s%data) - allocate(mapper%slon_s(lsize),mapper%clon_s(lsize), & - mapper%slat_s(lsize),mapper%clat_s(lsize)) - klon = mct_aVect_indexRa(dom_s%data, "lon" ) - klat = mct_aVect_indexRa(dom_s%data, "lat" ) - do n = 1,lsize - mapper%slon_s(n) = sin(dom_s%data%rAttr(klon,n)*deg2rad) - mapper%clon_s(n) = cos(dom_s%data%rAttr(klon,n)*deg2rad) - mapper%slat_s(n) = sin(dom_s%data%rAttr(klat,n)*deg2rad) - mapper%clat_s(n) = cos(dom_s%data%rAttr(klat,n)*deg2rad) - enddo - - lsize = mct_aVect_lsize(dom_d%data) - allocate(mapper%slon_d(lsize),mapper%clon_d(lsize), & - mapper%slat_d(lsize),mapper%clat_d(lsize)) - klon = mct_aVect_indexRa(dom_d%data, "lon" ) - klat = mct_aVect_indexRa(dom_d%data, "lat" ) - do n = 1,lsize - mapper%slon_d(n) = sin(dom_d%data%rAttr(klon,n)*deg2rad) - mapper%clon_d(n) = cos(dom_d%data%rAttr(klon,n)*deg2rad) - mapper%slat_d(n) = sin(dom_d%data%rAttr(klat,n)*deg2rad) - mapper%clat_d(n) = cos(dom_d%data%rAttr(klat,n)*deg2rad) - enddo - mapper%cart3d_init = trim(seq_map_stron) - endif - - end subroutine seq_map_initvect - - !======================================================================= - - subroutine seq_map_mapvect( mapper, type, av_s, av_d, fldu, fldv, norm, string ) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(seq_map) ,intent(inout) :: mapper - character(len=*),intent(in) :: type - type(mct_aVect) ,intent(in) :: av_s - type(mct_aVect) ,intent(inout) :: av_d - character(len=*),intent(in) :: fldu - character(len=*),intent(in) :: fldv - logical ,intent(in),optional :: norm - character(len=*),intent(in),optional :: string - ! - ! Local Variables - ! - logical :: lnorm - character(len=CL) :: lstring - character(len=*),parameter :: subname = "(seq_map_mapvect) " - !----------------------------------------------------- - - lstring = ' ' - if (present(string)) then - if (seq_comm_iamroot(CPLID)) write(logunit,'(A)') subname//' called for '//trim(string) - lstring = trim(string) - endif - - if (mapper%copy_only .or. mapper%rearrange_only) then - return - endif - - lnorm = .true. - if (present(norm)) then - lnorm = norm - endif - - if (trim(type(1:6)) == 'cart3d') then - if (mapper%cart3d_init /= trim(seq_map_stron)) then - call shr_sys_abort(trim(subname)//' ERROR: cart3d not initialized '//trim(lstring)) - endif - call seq_map_cart3d(mapper, type, av_s, av_d, fldu, fldv, norm=lnorm, string=string) - elseif (trim(type) == 'none') then - call seq_map_map(mapper, av_s, av_d, fldlist=trim(fldu)//':'//trim(fldv), norm=lnorm) - else - write(logunit,*) subname,' ERROR: type unsupported ',trim(type) - call shr_sys_abort(trim(subname)//' ERROR in type='//trim(type)) - end if - - end subroutine seq_map_mapvect - - !======================================================================= - - subroutine seq_map_cart3d( mapper, type, av_s, av_d, fldu, fldv, norm, string) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(seq_map) ,intent(inout) :: mapper - character(len=*),intent(in) :: type - type(mct_aVect) ,intent(in) :: av_s - type(mct_aVect) ,intent(inout) :: av_d - character(len=*),intent(in) :: fldu - character(len=*),intent(in) :: fldv - logical ,intent(in),optional :: norm - character(len=*),intent(in),optional :: string - ! - ! Local Variables - ! - integer :: lsize - logical :: lnorm - integer :: ku,kv,kux,kuy,kuz,n - real(r8) :: ue,un,ur,ux,uy,uz,speed - real(r8) :: urmaxl,urmax,uravgl,uravg,spavgl,spavg - type(mct_aVect) :: av3_s, av3_d - integer(in) :: mpicom,my_task,ierr,urcnt,urcntl - character(len=*),parameter :: subname = "(seq_map_cart3d) " - - lnorm = .true. - if (present(norm)) then - lnorm=norm - endif - - mpicom = mapper%mpicom - - ku = mct_aVect_indexRA(av_s, trim(fldu), perrwith='quiet') - kv = mct_aVect_indexRA(av_s, trim(fldv), perrwith='quiet') - - if (ku /= 0 .and. kv /= 0) then - lsize = mct_aVect_lsize(av_s) - call mct_avect_init(av3_s,rList='ux:uy:uz',lsize=lsize) - - lsize = mct_aVect_lsize(av_d) - call mct_avect_init(av3_d,rList='ux:uy:uz',lsize=lsize) - - kux = mct_aVect_indexRA(av3_s,'ux') - kuy = mct_aVect_indexRA(av3_s,'uy') - kuz = mct_aVect_indexRA(av3_s,'uz') - lsize = mct_aVect_lsize(av_s) - do n = 1,lsize - ur = 0.0_r8 - ue = av_s%rAttr(ku,n) - un = av_s%rAttr(kv,n) - ux = mapper%clon_s(n)*mapper%clat_s(n)*ur - & - mapper%clon_s(n)*mapper%slat_s(n)*un - & - mapper%slon_s(n)*ue - uy = mapper%slon_s(n)*mapper%clon_s(n)*ur - & - mapper%slon_s(n)*mapper%slat_s(n)*un + & - mapper%clon_s(n)*ue - uz = mapper%slat_s(n)*ur + & - mapper%clat_s(n)*un - av3_s%rAttr(kux,n) = ux - av3_s%rAttr(kuy,n) = uy - av3_s%rAttr(kuz,n) = uz - enddo - - call seq_map_map(mapper, av3_s, av3_d, norm=lnorm) - - kux = mct_aVect_indexRA(av3_d,'ux') - kuy = mct_aVect_indexRA(av3_d,'uy') - kuz = mct_aVect_indexRA(av3_d,'uz') - lsize = mct_aVect_lsize(av_d) - urmaxl = -1.0_r8 - uravgl = 0.0_r8 - urcntl = 0 - spavgl = 0.0_r8 - do n = 1,lsize - ux = av3_d%rAttr(kux,n) - uy = av3_d%rAttr(kuy,n) - uz = av3_d%rAttr(kuz,n) - ue = -mapper%slon_d(n) *ux + & - mapper%clon_d(n) *uy - un = -mapper%clon_d(n)*mapper%slat_d(n)*ux - & - mapper%slon_d(n)*mapper%slat_d(n)*uy + & - mapper%clat_d(n)*uz - ur = mapper%clon_d(n)*mapper%clat_d(n)*ux + & - mapper%slon_d(n)*mapper%clat_d(n)*uy - & - mapper%slat_d(n)*uz - speed = sqrt(ur*ur + ue*ue + un*un) - if (trim(type) == 'cart3d_diag' .or. trim(type) == 'cart3d_uvw_diag') then - if (speed /= 0.0_r8) then - urmaxl = max(urmaxl,abs(ur)) - uravgl = uravgl + abs(ur) - spavgl = spavgl + speed - urcntl = urcntl + 1 - endif - endif - if (type(1:10) == 'cart3d_uvw') then - !--- this adds ur to ue and un, while preserving u/v angle and total speed --- - if (un == 0.0_R8) then - !--- if ue is also 0.0 then just give speed to ue, this is arbitrary --- - av_d%rAttr(ku,n) = sign(speed,ue) - av_d%rAttr(kv,n) = 0.0_r8 - else if (ue == 0.0_R8) then - av_d%rAttr(ku,n) = 0.0_r8 - av_d%rAttr(kv,n) = sign(speed,un) - else - av_d%rAttr(ku,n) = sign(speed/sqrt(1.0_r8 + ((un*un)/(ue*ue))),ue) - av_d%rAttr(kv,n) = sign(speed/sqrt(1.0_r8 + ((ue*ue)/(un*un))),un) - endif - else - !--- this ignores ur --- - av_d%rAttr(ku,n) = ue - av_d%rAttr(kv,n) = un - endif - enddo - if (trim(type) == 'cart3d_diag' .or. trim(type) == 'cart3d_uvw_diag') then - call mpi_comm_rank(mpicom,my_task,ierr) - call shr_mpi_max(urmaxl,urmax,mpicom,'urmax') - call shr_mpi_sum(uravgl,uravg,mpicom,'uravg') - call shr_mpi_sum(spavgl,spavg,mpicom,'spavg') - call shr_mpi_sum(urcntl,urcnt,mpicom,'urcnt') - if (my_task == 0 .and. urcnt > 0) then - uravg = uravg / urcnt - spavg = spavg / urcnt - write(logunit,*) trim(subname),' cart3d uravg,urmax,spavg = ',uravg,urmax,spavg - endif - endif - - call mct_avect_clean(av3_s) - call mct_avect_clean(av3_d) - - endif ! ku,kv - - end subroutine seq_map_cart3d - - !======================================================================= - - subroutine seq_map_readdata(maprcfile, maprcname, mpicom, ID, & - ni_s, nj_s, av_s, gsmap_s, avfld_s, filefld_s, & - ni_d, nj_d, av_d, gsmap_d, avfld_d, filefld_d, string) - - !--- lifted from work by J Edwards, April 2011 - - use shr_pio_mod, only : shr_pio_getiosys, shr_pio_getiotype - use pio, only : pio_openfile, pio_closefile, pio_read_darray, pio_inq_dimid, & - pio_inq_dimlen, pio_inq_varid, file_desc_t, io_desc_t, iosystem_desc_t, & - var_desc_t, pio_int, pio_get_var, pio_double, pio_initdecomp, pio_freedecomp - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - character(len=*),intent(in) :: maprcfile - character(len=*),intent(in) :: maprcname - integer(IN) ,intent(in) :: mpicom - integer(IN) ,intent(in) :: ID - integer(IN) ,intent(out) ,optional :: ni_s - integer(IN) ,intent(out) ,optional :: nj_s - type(mct_avect) ,intent(inout),optional :: av_s - type(mct_gsmap) ,intent(in) ,optional :: gsmap_s - character(len=*),intent(in) ,optional :: avfld_s - character(len=*),intent(in) ,optional :: filefld_s - integer(IN) ,intent(out) ,optional :: ni_d - integer(IN) ,intent(out) ,optional :: nj_d - type(mct_avect) ,intent(inout),optional :: av_d - type(mct_gsmap) ,intent(in) ,optional :: gsmap_d - character(len=*),intent(in) ,optional :: avfld_d - character(len=*),intent(in) ,optional :: filefld_d - character(len=*),intent(in) ,optional :: string - ! - ! Local Variables - ! - type(iosystem_desc_t), pointer :: pio_subsystem - integer(IN) :: pio_iotype - type(file_desc_t) :: File ! PIO file pointer - type(io_desc_t) :: iodesc ! PIO parallel io descriptor - integer(IN) :: rcode ! pio routine return code - type(var_desc_t) :: vid ! pio variable ID - integer(IN) :: did ! pio dimension ID - integer(IN) :: na ! size of source domain - integer(IN) :: nb ! size of destination domain - integer(IN) :: i ! index - integer(IN) :: mytask ! my task - integer(IN), pointer :: dof(:) ! DOF pointers for parallel read - character(len=256):: fileName - character(len=64) :: lfld_s, lfld_d, lfile_s, lfile_d - character(*),parameter :: areaAV_field = 'aream' - character(*),parameter :: areafile_s = 'area_a' - character(*),parameter :: areafile_d = 'area_b' - character(len=*),parameter :: subname = "(seq_map_readdata) " - !----------------------------------------------------- - - if (seq_comm_iamroot(CPLID) .and. present(string)) then - write(logunit,'(A)') subname//' called for '//trim(string) - call shr_sys_flush(logunit) - endif - - call MPI_COMM_RANK(mpicom,mytask,rcode) - - lfld_s = trim(areaAV_field) - if (present(avfld_s)) then - lfld_s = trim(avfld_s) - endif - - lfld_d = trim(areaAV_field) - if (present(avfld_d)) then - lfld_s = trim(avfld_d) - endif - - lfile_s = trim(areafile_s) - if (present(filefld_s)) then - lfile_s = trim(filefld_s) - endif - - lfile_d = trim(areafile_d) - if (present(filefld_d)) then - lfile_d = trim(filefld_d) - endif - - call I90_allLoadF(trim(maprcfile),0,mpicom,rcode) - if(rcode /= 0) then - write(logunit,*)"Cant find maprcfile file ",trim(maprcfile) - call shr_sys_abort(trim(subname)//"i90_allLoadF File Not Found") - endif - - call i90_label(trim(maprcname),rcode) - if(rcode /= 0) then - write(logunit,*)"Cant find label ",maprcname - call shr_sys_abort(trim(subname)//"i90_label Not Found") - endif - - call i90_gtoken(filename,rcode) - if(rcode /= 0) then - write(logunit,*)"Error reading token ",filename - call shr_sys_abort(trim(subname)//"i90_gtoken Error on filename read") - endif - - pio_subsystem => shr_pio_getiosys(ID) - pio_iotype = shr_pio_getiotype(ID) - - rcode = pio_openfile(pio_subsystem, File, pio_iotype, filename) - - if (present(ni_s)) then - rcode = pio_inq_dimid (File, 'ni_a', did) ! number of lons in input grid - rcode = pio_inq_dimlen(File, did , ni_s) - end if - if(present(nj_s)) then - rcode = pio_inq_dimid (File, 'nj_a', did) ! number of lats in input grid - rcode = pio_inq_dimlen(File, did , nj_s) - end if - if(present(ni_d)) then - rcode = pio_inq_dimid (File, 'ni_b', did) ! number of lons in output grid - rcode = pio_inq_dimlen(File, did , ni_d) - end if - if(present(nj_d)) then - rcode = pio_inq_dimid (File, 'nj_b', did) ! number of lats in output grid - rcode = pio_inq_dimlen(File, did , nj_d) - endif - - !--- read and load area_a --- - if (present(av_s)) then - if (.not.present(gsmap_s)) then - call shr_sys_abort(trim(subname)//' ERROR av_s must have gsmap_s') - endif - rcode = pio_inq_dimid (File, 'n_a', did) ! size of input vector - rcode = pio_inq_dimlen(File, did , na) - i = mct_avect_indexra(av_s, trim(lfld_s)) - call mct_gsmap_OrderedPoints(gsMap_s, mytask, dof) - call pio_initdecomp(pio_subsystem, pio_double, (/na/), dof, iodesc) - deallocate(dof) - rcode = pio_inq_varid(File,trim(lfile_s),vid) - call pio_read_darray(File, vid, iodesc, av_s%rattr(i,:), rcode) - call pio_freedecomp(File,iodesc) - end if - - !--- read and load area_b --- - if (present(av_d)) then - if (.not.present(gsmap_d)) then - call shr_sys_abort(trim(subname)//' ERROR av_d must have gsmap_d') - endif - rcode = pio_inq_dimid (File, 'n_b', did) ! size of output vector - rcode = pio_inq_dimlen(File, did , nb) - i = mct_avect_indexra(av_d, trim(lfld_d)) - call mct_gsmap_OrderedPoints(gsMap_d, mytask, dof) - call pio_initdecomp(pio_subsystem, pio_double, (/nb/), dof, iodesc) - deallocate(dof) - rcode = pio_inq_varid(File,trim(lfile_d),vid) - call pio_read_darray(File, vid, iodesc, av_d%rattr(i,:), rcode) - call pio_freedecomp(File,iodesc) - endif - - - call pio_closefile(File) - - end subroutine seq_map_readdata - - !======================================================================= - - subroutine seq_map_avNormAvF(mapper, av_i, av_o, avf_i, avfifld, rList, norm) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(seq_map) , intent(inout) :: mapper ! mapper - type(mct_aVect) , intent(in) :: av_i ! input - type(mct_aVect) , intent(inout) :: av_o ! output - type(mct_aVect) , intent(in) :: avf_i ! extra src "weight" - character(len=*), intent(in) :: avfifld ! field name in avf_i - character(len=*), intent(in),optional :: rList ! fields list - logical , intent(in),optional :: norm ! normalize at end - ! - integer(IN) :: lsize_i, lsize_f, kf, j - real(r8),allocatable :: frac_i(:) - logical :: lnorm - character(*),parameter :: subName = '(seq_map_avNormAvF) ' - !----------------------------------------------------- - - lnorm = .true. - if (present(norm)) then - lnorm = norm - endif - - lsize_i = mct_aVect_lsize(av_i) - lsize_f = mct_aVect_lsize(avf_i) - - if (lsize_i /= lsize_f) then - write(logunit,*) subname,' ERROR: lsize_i ne lsize_f ',lsize_i,lsize_f - call shr_sys_abort(subname//' ERROR size_i ne lsize_f') - endif - - !--- extract frac_i field from avf_i to pass to seq_map_avNormArr --- - allocate(frac_i(lsize_i)) - do j = 1,lsize_i - kf = mct_aVect_indexRA(avf_i,trim(avfifld)) - frac_i(j) = avf_i%rAttr(kf,j) - enddo - - if (present(rList)) then - call seq_map_avNormArr(mapper, av_i, av_o, frac_i, rList=rList, norm=lnorm) - else - call seq_map_avNormArr(mapper, av_i, av_o, frac_i, norm=lnorm) - endif - - deallocate(frac_i) - - end subroutine seq_map_avNormAvF - - !======================================================================= - - subroutine seq_map_avNormArr(mapper, av_i, av_o, norm_i, rList, norm) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(seq_map) , intent(inout) :: mapper! mapper - type(mct_aVect) , intent(in) :: av_i ! input - type(mct_aVect) , intent(inout) :: av_o ! output - real(r8) , intent(in), optional :: norm_i(:) ! source "weight" - character(len=*), intent(in), optional :: rList ! fields list - logical , intent(in), optional :: norm ! normalize at end - ! - ! Local variables - ! - type(mct_aVect) :: avp_i , avp_o - integer(IN) :: j,kf - integer(IN) :: lsize_i,lsize_o - real(r8) :: normval - character(CX) :: lrList,appnd - logical :: lnorm - character(*),parameter :: subName = '(seq_map_avNormArr) ' - character(len=*),parameter :: ffld = 'norm8wt' ! want something unique - !----------------------------------------------------- - - lsize_i = mct_aVect_lsize(av_i) - lsize_o = mct_aVect_lsize(av_o) - - lnorm = .true. - if (present(norm)) then - lnorm = norm - endif - - if (present(norm_i)) then - if (.not.lnorm) call shr_sys_abort(subname//' ERROR norm_i and norm = false') - if (size(norm_i) /= lsize_i) call shr_sys_abort(subname//' ERROR size(norm_i) ne lsize_i') - endif - - !--- create temporary avs for mapping --- - - if (lnorm .or. present(norm_i)) then - appnd = ':'//ffld - else - appnd = '' - endif - if (present(rList)) then - call mct_aVect_init(avp_i, rList=trim( rList)//trim(appnd), lsize=lsize_i) - call mct_aVect_init(avp_o, rList=trim( rList)//trim(appnd), lsize=lsize_o) - else - lrList = mct_aVect_exportRList2c(av_i) - call mct_aVect_init(avp_i, rList=trim(lrList)//trim(appnd), lsize=lsize_i) - lrList = mct_aVect_exportRList2c(av_o) - call mct_aVect_init(avp_o, rList=trim(lrList)//trim(appnd), lsize=lsize_o) - endif - - !--- copy av_i to avp_i and set ffld value to 1.0 - !--- then multiply all fields by norm_i if norm_i exists - !--- this will do the right thing for the norm_i normalization - - call mct_aVect_copy(aVin=av_i, aVout=avp_i, VECTOR=mct_usevector) - if (lnorm .or. present(norm_i)) then - kf = mct_aVect_indexRA(avp_i,ffld) - do j = 1,lsize_i - avp_i%rAttr(kf,j) = 1.0_r8 - enddo - - if (present(norm_i)) then - !$omp simd - do j = 1,lsize_i - avp_i%rAttr(:,j) = avp_i%rAttr(:,j)*norm_i(j) - enddo - endif - endif - - !--- map --- - - if (mapper%esmf_map) then - call shr_sys_abort(subname//' ERROR: esmf SMM not supported') - else - ! MCT based SMM - call mct_sMat_avMult(avp_i, mapper%sMatp, avp_o, VECTOR=mct_usevector) - endif - - !--- renormalize avp_o by mapped norm_i --- - - if (lnorm) then - kf = mct_aVect_indexRA(avp_o,ffld) - !$omp simd - do j = 1,lsize_o - normval = avp_o%rAttr(kf,j) - if (normval /= 0.0_r8) then - normval = 1.0_r8/normval - endif - avp_o%rAttr(:,j) = avp_o%rAttr(:,j)*normval - enddo - endif - - !--- copy back into av_o and we are done --- - - call mct_aVect_copy(aVin=avp_o, aVout=av_o, VECTOR=mct_usevector) - - call mct_aVect_clean(avp_i) - call mct_aVect_clean(avp_o) - - end subroutine seq_map_avNormArr - -end module seq_map_mod diff --git a/src/drivers/mct/main/seq_map_type_mod.F90 b/src/drivers/mct/main/seq_map_type_mod.F90 deleted file mode 100644 index 303972f4298..00000000000 --- a/src/drivers/mct/main/seq_map_type_mod.F90 +++ /dev/null @@ -1,179 +0,0 @@ -module seq_map_type_mod - - use shr_kind_mod , only: R8 => SHR_KIND_R8, IN=>SHR_KIND_IN - use shr_kind_mod , only: CL => SHR_KIND_CL, CX => SHR_KIND_CX - use shr_mct_mod , only: shr_mct_sMatPInitnc, shr_mct_queryConfigFile - use shr_sys_mod - use shr_const_mod - use seq_comm_mct, only: logunit, CPLID, seq_comm_iamroot - use mct_mod - - type seq_map - logical :: copy_only - logical :: rearrange_only - logical :: esmf_map - type(mct_rearr) :: rearr - type(mct_sMatp) :: sMatp - ! - !---- for comparing - integer(IN) :: counter ! indicates which seq_maps this mapper points to - character(CL) :: strategy ! indicates the strategy for this mapper, (copy, rearrange, X, Y) - character(CX) :: mapfile ! indicates the mapping file used - type(mct_gsMap),pointer :: gsmap_s - type(mct_gsMap),pointer :: gsmap_d - ! - !---- for cart3d - character(CL) :: cart3d_init - real(R8), pointer :: slon_s(:) - real(R8), pointer :: clon_s(:) - real(R8), pointer :: slat_s(:) - real(R8), pointer :: clat_s(:) - real(R8), pointer :: slon_d(:) - real(R8), pointer :: clon_d(:) - real(R8), pointer :: slat_d(:) - real(R8), pointer :: clat_d(:) - integer(IN) :: mpicom ! mpicom - ! - end type seq_map - public seq_map - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! seq_map_maxcnt is the total number of mappings supported - ! seq_map_cnt is the total number of mappings initialized any any time - ! seq_maps are the mappers that have been initialized - - integer(IN),parameter :: seq_map_maxcnt = 5000 - integer(IN) :: seq_map_cnt = 0 - type(seq_map),private,target :: seq_maps(seq_map_maxcnt) - - ! tcraig, work-in-progress - ! type seq_map_node - ! type(seq_map_node), pointer :: next,prev - ! type(seq_map), pointer :: seq_map - ! end type seq_map_node - ! type(seq_map_node), pointer :: seq_map_list, seq_map_curr - - !=============================================================================== -contains - !=============================================================================== - - subroutine seq_map_mapmatch(mapid,gsMap_s,gsMap_d,mapfile,strategy) - - ! This method searches through the current seq_maps to find a - ! mapping file that matches the values passed in - - implicit none - integer ,intent(out) :: mapid - type(mct_gsMap) ,intent(in),optional :: gsMap_s - type(mct_gsMap) ,intent(in),optional :: gsMap_d - character(len=*),intent(in),optional :: mapfile - character(len=*),intent(in),optional :: strategy - - integer(IN) :: m - logical :: match - character(*),parameter :: subName = '(seq_map_mapmatch) ' - - mapid = -1 - ! tcraig - this return turns off the mapping reuse - ! RETURN - - do m = 1,seq_map_cnt - match = .true. - - if (match .and. present(mapfile)) then - if (trim(mapfile) /= trim(seq_maps(m)%mapfile)) match = .false. - endif - if (match .and. present(strategy)) then - if (trim(strategy) /= trim(seq_maps(m)%strategy)) match = .false. - endif - if (match .and. present(gsMap_s)) then - if (.not.mct_gsmap_Identical(gsmap_s,seq_maps(m)%gsmap_s)) match = .false. - endif - if (match .and. present(gsMap_d)) then - if (.not.mct_gsmap_Identical(gsmap_d,seq_maps(m)%gsmap_d)) match = .false. - endif - - if (match) then - mapid = m - if (seq_comm_iamroot(CPLID)) then - write(logunit,'(A,i6)') subname//' found match ',mapid - call shr_sys_flush(logunit) - endif - return - endif - enddo - - end subroutine seq_map_mapmatch - - !=============================================================================== - - subroutine seq_map_mapinit(mapper,mpicom) - - ! This method initializes a new seq_maps map datatype and - ! has the mapper passed in point to it - - implicit none - type(seq_map) ,intent(inout),pointer :: mapper - integer(IN) ,intent(in) :: mpicom - - character(*),parameter :: subName = '(seq_map_mapinit) ' - - ! set the seq_map data - seq_map_cnt = seq_map_cnt + 1 - if (seq_map_cnt > seq_map_maxcnt) then - write(logunit,*) trim(subname),'seq_map_cnt too large',seq_map_cnt - call shr_sys_abort(subName // "seq_map_cnt bigger than seq_map_maxcnt") - endif - mapper => seq_maps(seq_map_cnt) - mapper%counter = seq_map_cnt - - mapper%copy_only = .false. - mapper%rearrange_only = .false. - mapper%mpicom = mpicom - mapper%strategy = "undefined" - mapper%mapfile = "undefined" - - end subroutine seq_map_mapinit - - !=============================================================================== - - subroutine seq_map_mappoint(mapid,mapper) - - ! This method searches through the current seq_maps to find a - ! mapping file that matches the values passed in - - implicit none - integer ,intent(in) :: mapid - type(seq_map) ,intent(inout),pointer :: mapper - - mapper => seq_maps(mapid) - - end subroutine seq_map_mappoint - - !=============================================================================== - - subroutine seq_map_gsmapcheck(gsmap1,gsmap2) - - ! This method verifies that two gsmaps are of the same global size - - implicit none - type(mct_gsMap),intent(in) :: gsmap1 - type(mct_gsMap),intent(in) :: gsmap2 - - integer(IN) :: s1, s2 - character(*),parameter :: subName = '(seq_map_gsmapcheck) ' - - s1 = mct_gsMap_gsize(gsMap1) - s2 = mct_gsMap_gsize(gsMap2) - if (s1 /= s2) then - write(logunit,*) trim(subname),'gsmap global sizes different ',s1,s2 - call shr_sys_abort(subName // "different gsmap size") - endif - - end subroutine seq_map_gsmapcheck - - -end module seq_map_type_mod diff --git a/src/drivers/mct/main/seq_rest_mod.F90 b/src/drivers/mct/main/seq_rest_mod.F90 deleted file mode 100644 index 12437a4d433..00000000000 --- a/src/drivers/mct/main/seq_rest_mod.F90 +++ /dev/null @@ -1,592 +0,0 @@ -!=============================================================================== -!BOP =========================================================================== -! -! !MODULE: seq_rest_mod -- cpl7 restart reading/writing routines -! -! !DESCRIPTION: -! -! Reads & writes cpl7 restart files -! -! !REMARKS: -! -! aVect, domain, and fraction info accessed via seq_avdata_mod -! to avoid excessively long routine arg lists. -! -! !REVISION HISTORY: -! 2009-Sep-25 - B. Kauffman - move from cpl7 main program into rest module -! 2007-mmm-dd - T. Craig - initial restart functionality -! -! !INTERFACE: ------------------------------------------------------------------ - -module seq_rest_mod - - ! !USES: - - use shr_kind_mod, only: R8 => SHR_KIND_R8, IN => SHR_KIND_IN - use shr_kind_mod, only: CL => SHR_KIND_CL, CS => SHR_KIND_CS - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use shr_mpi_mod, only: shr_mpi_bcast - use shr_cal_mod, only: shr_cal_date2ymd - use shr_file_mod, only: shr_file_getunit, shr_file_freeunit - use mct_mod - use ESMF - use component_type_mod - - ! diagnostic routines - use seq_diag_mct, only : budg_dataG, budg_ns - - ! Sets mpi communicators, logunit and loglevel - use seq_comm_mct, only: seq_comm_getdata=>seq_comm_setptrs, seq_comm_setnthreads, & - seq_comm_iamin, CPLID, GLOID, logunit, loglevel - - ! "infodata" gathers various control flags into one datatype - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getData - - ! clock & alarm routines - use seq_timemgr_mod, only: seq_timemgr_type, seq_timemgr_EClockGetData - - ! diagnostic routines - use seq_diag_mct, only: budg_datag - - ! lower level io routines - use seq_io_mod, only: seq_io_read, seq_io_write, seq_io_enddef - use seq_io_mod, only: seq_io_wopen, seq_io_close - - ! prep modules - coupler communication between different components - use prep_ocn_mod, only: prep_ocn_get_x2oacc_ox - use prep_ocn_mod, only: prep_ocn_get_x2oacc_ox_cnt -#ifdef SUMMITDEV_PGI - use prep_ocn_mod, only: dummy_pgibugfix -#endif - use prep_rof_mod, only: prep_rof_get_l2racc_lx - use prep_rof_mod, only: prep_rof_get_l2racc_lx_cnt - use prep_glc_mod, only: prep_glc_get_l2gacc_lx - use prep_glc_mod, only: prep_glc_get_l2gacc_lx_cnt - use prep_glc_mod, only: prep_glc_get_x2gacc_gx - use prep_glc_mod, only: prep_glc_get_x2gacc_gx_cnt - - use prep_aoflux_mod, only: prep_aoflux_get_xao_ox - use prep_aoflux_mod, only: prep_aoflux_get_xao_ax - - implicit none - - private - - ! !PUBLIC TYPES: - - ! no public types - - ! !PUBLIC MEMBER FUNCTIONS - - public :: seq_rest_read ! read cpl7 restart data - public :: seq_rest_write ! write cpl7 restart data - - ! !PUBLIC DATA MEMBERS: - - ! no public data - - !EOP - - !---------------------------------------------------------------------------- - ! local data - !---------------------------------------------------------------------------- - - logical :: iamin_CPLID ! pe associated with CPLID - integer(IN) :: mpicom_GLOID ! MPI global communicator - integer(IN) :: mpicom_CPLID ! MPI cpl communicator - - integer(IN) :: nthreads_GLOID ! OMP global number of threads - integer(IN) :: nthreads_CPLID ! OMP cpl number of threads - logical :: drv_threading ! driver threading control - - logical :: atm_present ! .true. => atm is present - logical :: lnd_present ! .true. => land is present - logical :: ice_present ! .true. => ice is present - logical :: ocn_present ! .true. => ocn is present - logical :: rof_present ! .true. => land runoff is present - logical :: rof_prognostic ! .true. => rof comp expects input - logical :: glc_present ! .true. => glc is present - logical :: wav_present ! .true. => wav is present - logical :: esp_present ! .true. => esp is present - logical :: iac_present ! .true. => iac is present - - logical :: atm_prognostic ! .true. => atm comp expects input - logical :: lnd_prognostic ! .true. => lnd comp expects input - logical :: ice_prognostic ! .true. => ice comp expects input - logical :: ocn_prognostic ! .true. => ocn comp expects input - logical :: ocnrof_prognostic ! .true. => ocn comp expects runoff input - logical :: glc_prognostic ! .true. => glc comp expects input - logical :: wav_prognostic ! .true. => wav comp expects input - logical :: esp_prognostic ! .true. => esp comp expects input - logical :: iac_prognostic ! .true. => iac comp expects input - - logical :: ocn_c2_glcshelf ! .true. => ocn to glcshelf coupling on - - !--- temporary pointers --- - type(mct_gsMap), pointer :: gsmap - type(mct_aVect), pointer :: x2oacc_ox(:) - integer , pointer :: x2oacc_ox_cnt - type(mct_aVect), pointer :: l2racc_lx(:) - integer , pointer :: l2racc_lx_cnt - type(mct_aVect), pointer :: l2gacc_lx(:) - integer , pointer :: l2gacc_lx_cnt - type(mct_aVect), pointer :: x2gacc_gx(:) - integer , pointer :: x2gacc_gx_cnt - type(mct_aVect), pointer :: xao_ox(:) - type(mct_aVect), pointer :: xao_ax(:) - - !=============================================================================== -contains - !=============================================================================== - - subroutine seq_rest_read(rest_file, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, fractions_zx) - - implicit none - - character(*) , intent(in) :: rest_file ! restart file path/name - type(seq_infodata_type), intent(in) :: infodata - type (component_type) , intent(inout) :: atm(:) - type (component_type) , intent(inout) :: lnd(:) - type (component_type) , intent(inout) :: ice(:) - type (component_type) , intent(inout) :: ocn(:) - type (component_type) , intent(inout) :: rof(:) - type (component_type) , intent(inout) :: glc(:) - type (component_type) , intent(inout) :: wav(:) - type (component_type) , intent(inout) :: esp(:) - type (component_type) , intent(inout) :: iac(:) - type(mct_aVect) , intent(inout) :: fractions_ax(:) ! Fractions on atm grid/decomp - type(mct_aVect) , intent(inout) :: fractions_lx(:) ! Fractions on lnd grid/decomp - type(mct_aVect) , intent(inout) :: fractions_ix(:) ! Fractions on ice grid/decomp - type(mct_aVect) , intent(inout) :: fractions_ox(:) ! Fractions on ocn grid/decomp - type(mct_aVect) , intent(inout) :: fractions_rx(:) ! Fractions on rof grid/decomp - type(mct_aVect) , intent(inout) :: fractions_gx(:) ! Fractions on glc grid/decomp - type(mct_aVect) , intent(inout) :: fractions_wx(:) ! Fractions on wav grid/decomp - type(mct_aVect) , intent(inout) :: fractions_zx(:) ! Fractions on iac grid/decomp - - integer(IN) :: n,n1,n2,n3 - real(r8),allocatable :: ds(:) ! for reshaping diag data for restart file - real(r8),allocatable :: ns(:) ! for reshaping diag data for restart file - character(len=*), parameter :: subname = "(seq_rest_read) " - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! get required infodata - !---------------------------------------------------------------------------- - iamin_CPLID = seq_comm_iamin(CPLID) - - call seq_comm_getdata(GLOID,& - mpicom=mpicom_GLOID, nthreads=nthreads_GLOID) - call seq_comm_getdata(CPLID, & - mpicom=mpicom_CPLID, nthreads=nthreads_CPLID) - - call seq_infodata_getData(infodata, & - drv_threading=drv_threading, & - atm_present=atm_present, & - lnd_present=lnd_present, & - rof_present=rof_present, & - ice_present=ice_present, & - ocn_present=ocn_present, & - glc_present=glc_present, & - wav_present=wav_present, & - esp_present=esp_present, & - iac_present=iac_present, & - atm_prognostic=atm_prognostic, & - lnd_prognostic=lnd_prognostic, & - ice_prognostic=ice_prognostic, & - ocn_prognostic=ocn_prognostic, & - rof_prognostic=rof_prognostic, & - ocnrof_prognostic=ocnrof_prognostic, & - glc_prognostic=glc_prognostic, & - wav_prognostic=wav_prognostic, & - iac_prognostic=iac_prognostic, & - esp_prognostic=esp_prognostic, & - ocn_c2_glcshelf=ocn_c2_glcshelf) - - if (iamin_CPLID) then - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (atm_present) then - gsmap => component_get_gsmap_cx(atm(1)) - xao_ax => prep_aoflux_get_xao_ax() - call seq_io_read(rest_file, gsmap, fractions_ax, 'fractions_ax') - call seq_io_read(rest_file, atm, 'c2x', 'a2x_ax') - call seq_io_read(rest_file, gsmap, xao_ax, 'xao_ax') - endif - if (lnd_present) then - gsmap => component_get_gsmap_cx(lnd(1)) - call seq_io_read(rest_file, gsmap, fractions_lx, 'fractions_lx') - endif - if (lnd_present .and. rof_prognostic) then - gsmap => component_get_gsmap_cx(lnd(1)) - l2racc_lx => prep_rof_get_l2racc_lx() - l2racc_lx_cnt => prep_rof_get_l2racc_lx_cnt() - call seq_io_read(rest_file, gsmap, l2racc_lx, 'l2racc_lx') - call seq_io_read(rest_file, l2racc_lx_cnt ,'l2racc_lx_cnt') - end if - if (lnd_present .and. glc_prognostic) then - gsmap => component_get_gsmap_cx(lnd(1)) - l2gacc_lx => prep_glc_get_l2gacc_lx() - l2gacc_lx_cnt => prep_glc_get_l2gacc_lx_cnt() - call seq_io_read(rest_file, gsmap, l2gacc_lx, 'l2gacc_lx') - call seq_io_read(rest_file, l2gacc_lx_cnt ,'l2gacc_lx_cnt') - end if - - if (ocn_c2_glcshelf) then - gsmap => component_get_gsmap_cx(glc(1)) - x2gacc_gx => prep_glc_get_x2gacc_gx() - x2gacc_gx_cnt => prep_glc_get_x2gacc_gx_cnt() - call seq_io_read(rest_file, gsmap, x2gacc_gx, 'x2gacc_gx') - call seq_io_read(rest_file, x2gacc_gx_cnt ,'x2gacc_gx_cnt') - end if - - if (ocn_present) then - gsmap => component_get_gsmap_cx(ocn(1)) - x2oacc_ox => prep_ocn_get_x2oacc_ox() -#ifdef SUMMITDEV_PGI - dummy_pgibugfix = associated(x2oacc_ox) -#endif - x2oacc_ox_cnt => prep_ocn_get_x2oacc_ox_cnt() - xao_ox => prep_aoflux_get_xao_ox() - call seq_io_read(rest_file, gsmap, fractions_ox, 'fractions_ox') - call seq_io_read(rest_file, ocn, 'c2x', 'o2x_ox') ! get o2x_ox - call seq_io_read(rest_file, gsmap, x2oacc_ox, 'x2oacc_ox') - call seq_io_read(rest_file, x2oacc_ox_cnt, 'x2oacc_ox_cnt') - call seq_io_read(rest_file, gsmap, xao_ox, 'xao_ox') - endif - if (ice_present) then - gsmap => component_get_gsmap_cx(ice(1)) - call seq_io_read(rest_file, gsmap, fractions_ix, 'fractions_ix') - call seq_io_read(rest_file, ice, 'c2x', 'i2x_ix') - endif - if (rof_present) then - gsmap => component_get_gsmap_cx(rof(1)) - call seq_io_read(rest_file, gsmap, fractions_rx, 'fractions_rx') - call seq_io_read(rest_file, rof, 'c2x', 'r2x_rx') - endif - if (glc_present) then - gsmap => component_get_gsmap_cx(glc(1)) - call seq_io_read(rest_file, gsmap, fractions_gx, 'fractions_gx') - call seq_io_read(rest_file, glc, 'c2x', 'g2x_gx') - endif - if (wav_present) then - gsmap => component_get_gsmap_cx(wav(1)) - call seq_io_read(rest_file, gsmap, fractions_wx, 'fractions_wx') - call seq_io_read(rest_file, wav, 'c2x', 'w2x_wx') - endif - if (iac_present) then - gsmap => component_get_gsmap_cx(iac(1)) - call seq_io_read(rest_file, gsmap, fractions_zx, 'fractions_zx') - call seq_io_read(rest_file, iac, 'c2x', 'z2x_zx') - endif - ! Add ESP restart read here - - n = size(budg_dataG) - allocate(ds(n),ns(n)) - call seq_io_read(rest_file, ds, 'budg_dataG') - call seq_io_read(rest_file, ns, 'budg_ns') - - n = 0 - do n1 = 1,size(budg_dataG,dim=1) - do n2 = 1,size(budg_dataG,dim=2) - do n3 = 1,size(budg_dataG,dim=3) - n = n + 1 - budg_dataG(n1,n2,n3) = ds(n) - budg_ns (n1,n2,n3) = ns(n) - enddo - enddo - enddo - ! call shr_mpi_bcast(budg_dataG,cpl_io_root) ! not necessary, io lib does bcast - - deallocate(ds,ns) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - endif - - end subroutine seq_rest_read - - !=============================================================================== - - subroutine seq_rest_write(EClock_d, seq_SyncClock, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, iac, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, fractions_zx, & - tag, rest_file) - - implicit none - - type(ESMF_Clock) , intent(in) :: EClock_d ! driver clock - type(seq_timemgr_type) , intent(inout) :: seq_SyncClock ! contains ptr to driver clock - type(seq_infodata_type), intent(in) :: infodata - type (component_type) , intent(inout) :: atm(:) - type (component_type) , intent(inout) :: lnd(:) - type (component_type) , intent(inout) :: ice(:) - type (component_type) , intent(inout) :: ocn(:) - type (component_type) , intent(inout) :: rof(:) - type (component_type) , intent(inout) :: glc(:) - type (component_type) , intent(inout) :: wav(:) - type (component_type) , intent(inout) :: esp(:) - type (component_type) , intent(inout) :: iac(:) - type(mct_aVect) , intent(inout) :: fractions_ax(:) ! Fractions on atm grid/decomp - type(mct_aVect) , intent(inout) :: fractions_lx(:) ! Fractions on lnd grid/decomp - type(mct_aVect) , intent(inout) :: fractions_ix(:) ! Fractions on ice grid/decomp - type(mct_aVect) , intent(inout) :: fractions_ox(:) ! Fractions on ocn grid/decomp - type(mct_aVect) , intent(inout) :: fractions_rx(:) ! Fractions on rof grid/decomp - type(mct_aVect) , intent(inout) :: fractions_gx(:) ! Fractions on glc grid/decomp - type(mct_aVect) , intent(inout) :: fractions_wx(:) ! Fractions on wav grid/decomp - type(mct_aVect) , intent(inout) :: fractions_zx(:) ! Fractions on iac grid/decomp - character(len=*) , intent(in) :: tag - character(len=CL) , intent(out) :: rest_file ! Restart filename - - integer(IN) :: n,n1,n2,n3,fk - integer(IN) :: curr_ymd ! Current date YYYYMMDD - integer(IN) :: curr_tod ! Current time-of-day (s) - integer(IN) :: yy,mm,dd ! year, month, day - character(CL) :: case_name ! case name - character(CL) :: cvar ! char variable - integer(IN) :: ivar ! integer variable - real(r8) :: rvar ! real variable - logical :: whead,wdata ! flags header/data writing - logical :: cplroot ! root pe on cpl id - integer(IN) :: iun ! unit number - type(mct_gsMap),pointer :: gsmap - character(len=6) :: year_char - - real(r8),allocatable :: ds(:) ! for reshaping diag data for restart file - real(r8),allocatable :: ns(:) ! for reshaping diag data for restart file - character(CL) :: model_doi_url - character(len=*),parameter :: subname = "(seq_rest_write) " - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! get required infodata - !---------------------------------------------------------------------------- - iamin_CPLID = seq_comm_iamin(CPLID) - - call seq_comm_getdata(GLOID,& - mpicom=mpicom_GLOID, nthreads=nthreads_GLOID) - - call seq_comm_getdata(CPLID,& - mpicom=mpicom_CPLID, nthreads=nthreads_CPLID, iamroot=cplroot) - - call seq_infodata_getData(infodata, & - drv_threading=drv_threading, & - atm_present=atm_present, & - lnd_present=lnd_present, & - rof_present=rof_present, & - ice_present=ice_present, & - ocn_present=ocn_present, & - glc_present=glc_present, & - wav_present=wav_present, & - esp_present=esp_present, & - iac_present=iac_present, & - atm_prognostic=atm_prognostic, & - lnd_prognostic=lnd_prognostic, & - ice_prognostic=ice_prognostic, & - rof_prognostic=rof_prognostic, & - ocn_prognostic=ocn_prognostic, & - ocnrof_prognostic=ocnrof_prognostic, & - glc_prognostic=glc_prognostic, & - wav_prognostic=wav_prognostic, & - esp_prognostic=esp_prognostic, & - iac_prognostic=iac_prognostic, & - ocn_c2_glcshelf=ocn_c2_glcshelf, & - case_name=case_name, & - model_doi_url=model_doi_url) - - ! Write out infodata and time manager data to restart file - - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=curr_ymd, curr_tod=curr_tod) - call shr_cal_date2ymd(curr_ymd,yy,mm,dd) - write(year_char,'(i6.4)') yy - write(rest_file,"(4a,i2.2,a,i2.2,a,i5.5,a)") & - trim(case_name), '.cpl'//trim(tag)//'.r.',trim(adjustl(year_char)),'-',mm,'-',dd,'-',curr_tod,'.nc' - - ! Write driver data to restart file - - if (iamin_CPLID) then - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - ! copy budg_dataG into 1d array - n = size(budg_dataG) - allocate(ds(n),ns(n)) - call shr_mpi_bcast(budg_dataG,mpicom_CPLID) ! pio requires data on all pe's? - - n = 0 - do n1 = 1,size(budg_dataG,dim=1) - do n2 = 1,size(budg_dataG,dim=2) - do n3 = 1,size(budg_dataG,dim=3) - n = n + 1 - ds(n) = budg_dataG(n1,n2,n3) - ns(n) = budg_ns(n1,n2,n3) - enddo - enddo - enddo - - if (cplroot) then - iun = shr_file_getUnit() - call seq_infodata_GetData(infodata,restart_pfile=cvar) - if (loglevel > 0) write(logunit,"(3A)") subname," write rpointer file ", & - trim(cvar) - open(iun, file=cvar, form='FORMATTED') - write(iun,'(a)') rest_file - close(iun) - call shr_file_freeUnit( iun ) - endif - - call shr_mpi_bcast(rest_file,mpicom_CPLID) - call seq_io_wopen(rest_file,clobber=.true., model_doi_url=model_doi_url) - - ! loop twice (for perf), first time write header, second time write data - do fk = 1,2 - if (fk == 1) then - whead = .true. - wdata = .false. - elseif (fk == 2) then - whead = .false. - wdata = .true. - call seq_io_enddef(rest_file) - else - call shr_sys_abort('driver_write_rstart fk illegal') - end if - call seq_infodata_GetData(infodata,nextsw_cday=rvar) - call seq_io_write(rest_file,rvar,'seq_infodata_nextsw_cday',whead=whead,wdata=wdata) - call seq_infodata_GetData(infodata,precip_fact=rvar) - call seq_io_write(rest_file,rvar,'seq_infodata_precip_fact',whead=whead,wdata=wdata) - call seq_infodata_GetData(infodata,case_name=cvar) - call seq_io_write(rest_file,trim(cvar),'seq_infodata_case_name',whead=whead,wdata=wdata) - - call seq_timemgr_EClockGetData( EClock_d, start_ymd=ivar) - call seq_io_write(rest_file,ivar,'seq_timemgr_start_ymd',whead=whead,wdata=wdata) - call seq_timemgr_EClockGetData( EClock_d, start_tod=ivar) - call seq_io_write(rest_file,ivar,'seq_timemgr_start_tod',whead=whead,wdata=wdata) - call seq_timemgr_EClockGetData( EClock_d, ref_ymd=ivar) - call seq_io_write(rest_file,ivar,'seq_timemgr_ref_ymd' ,whead=whead,wdata=wdata) - call seq_timemgr_EClockGetData( EClock_d, ref_tod=ivar) - call seq_io_write(rest_file,ivar,'seq_timemgr_ref_tod' ,whead=whead,wdata=wdata) - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ivar) - call seq_io_write(rest_file,ivar,'seq_timemgr_curr_ymd' ,whead=whead,wdata=wdata) - call seq_timemgr_EClockGetData( EClock_d, curr_tod=ivar) - call seq_io_write(rest_file,ivar,'seq_timemgr_curr_tod' ,whead=whead,wdata=wdata) - - call seq_io_write(rest_file,ds,'budg_dataG',whead=whead,wdata=wdata) - call seq_io_write(rest_file,ns,'budg_ns',whead=whead,wdata=wdata) - - if (atm_present) then - gsmap => component_get_gsmap_cx(atm(1)) - xao_ax => prep_aoflux_get_xao_ax() - call seq_io_write(rest_file, gsmap, fractions_ax, 'fractions_ax', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, atm, 'c2x', 'a2x_ax', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, gsmap, xao_ax, 'xao_ax', & - whead=whead, wdata=wdata) - endif - if (lnd_present) then - gsmap => component_get_gsmap_cx(lnd(1)) - call seq_io_write(rest_file, gsmap, fractions_lx, 'fractions_lx', & - whead=whead, wdata=wdata) - endif - if (lnd_present .and. rof_prognostic) then - gsmap => component_get_gsmap_cx(lnd(1)) - l2racc_lx => prep_rof_get_l2racc_lx() - l2racc_lx_cnt => prep_rof_get_l2racc_lx_cnt() - call seq_io_write(rest_file, gsmap, l2racc_lx, 'l2racc_lx', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, l2racc_lx_cnt, 'l2racc_lx_cnt', & - whead=whead, wdata=wdata) - end if - if (lnd_present .and. glc_prognostic) then - gsmap => component_get_gsmap_cx(lnd(1)) - l2gacc_lx => prep_glc_get_l2gacc_lx() - l2gacc_lx_cnt => prep_glc_get_l2gacc_lx_cnt() - call seq_io_write(rest_file, gsmap, l2gacc_lx, 'l2gacc_lx', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, l2gacc_lx_cnt, 'l2gacc_lx_cnt', & - whead=whead, wdata=wdata) - end if - if (ocn_c2_glcshelf) then - gsmap => component_get_gsmap_cx(glc(1)) - x2gacc_gx => prep_glc_get_x2gacc_gx() - x2gacc_gx_cnt => prep_glc_get_x2gacc_gx_cnt() - call seq_io_write(rest_file, gsmap, x2gacc_gx , 'x2gacc_gx', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, x2gacc_gx_cnt, 'x2gacc_gx_cnt', & - whead=whead, wdata=wdata) - end if - if (ocn_present) then - gsmap => component_get_gsmap_cx(ocn(1)) - x2oacc_ox => prep_ocn_get_x2oacc_ox() -#ifdef SUMMITDEV_PGI - dummy_pgibugfix = associated(x2oacc_ox) -#endif - x2oacc_ox_cnt => prep_ocn_get_x2oacc_ox_cnt() - xao_ox => prep_aoflux_get_xao_ox() - call seq_io_write(rest_file, gsmap, fractions_ox, 'fractions_ox', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, ocn, 'c2x', 'o2x_ox', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, gsmap, x2oacc_ox, 'x2oacc_ox', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, x2oacc_ox_cnt, 'x2oacc_ox_cnt', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, gsmap, xao_ox, 'xao_ox', & - whead=whead, wdata=wdata) - endif - if (ice_present) then - gsmap => component_get_gsmap_cx(ice(1)) - call seq_io_write(rest_file, gsmap, fractions_ix, 'fractions_ix', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, ice, 'c2x', 'i2x_ix', & - whead=whead, wdata=wdata) - endif - if (rof_present) then - gsmap => component_get_gsmap_cx(rof(1)) - call seq_io_write(rest_file, gsmap, fractions_rx, 'fractions_rx', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, rof, 'c2x', 'r2x_rx', & - whead=whead, wdata=wdata) - endif - if (glc_present) then - gsmap => component_get_gsmap_cx(glc(1)) - call seq_io_write(rest_file, gsmap, fractions_gx, 'fractions_gx', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, glc, 'c2x', 'g2x_gx', & - whead=whead, wdata=wdata) - endif - if (wav_present) then - gsmap => component_get_gsmap_cx(wav(1)) - call seq_io_write(rest_file, gsmap, fractions_wx, 'fractions_wx', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, wav, 'c2x', 'w2x_wx', & - whead=whead, wdata=wdata) - endif - if (iac_present) then - gsmap => component_get_gsmap_cx(iac(1)) - call seq_io_write(rest_file, gsmap, fractions_zx, 'fractions_zx', & - whead=whead, wdata=wdata) - call seq_io_write(rest_file, iac, 'c2x', 'z2x_zx', & - whead=whead, wdata=wdata) - endif - ! Write ESP restart data here - enddo - - call seq_io_close(rest_file) - deallocate(ds,ns) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - end subroutine seq_rest_write - - !=============================================================================== - -end module seq_rest_mod diff --git a/src/drivers/mct/main/t_driver_timers_mod.F90 b/src/drivers/mct/main/t_driver_timers_mod.F90 deleted file mode 100644 index 9fbb5cb968b..00000000000 --- a/src/drivers/mct/main/t_driver_timers_mod.F90 +++ /dev/null @@ -1,115 +0,0 @@ -module t_drv_timers_mod - - use perf_mod - integer, private :: cpl_run_hash=0, cpl_comm_hash=0, cpl_budget_hash=0 - character(len=*),parameter :: strcpl = 'CPL:RUN' - character(len=*),parameter :: strcom = 'CPL:COMM' - character(len=*),parameter :: strbud = 'CPL:BUDGET' - -contains - - !=============================================================================== - - subroutine t_drvstartf(string,cplrun,cplcom,budget,barrier, hashint) - - implicit none - - character(len=*),intent(in) :: string - logical,intent(in),optional :: cplrun - logical,intent(in),optional :: cplcom - logical,intent(in),optional :: budget - integer,intent(in),optional :: barrier - integer,intent(inout), optional :: hashint - - character(len=128) :: strbar - - logical :: lcplrun,lcplcom,lbudget - !------------------------------------------------------------------------------- - - lcplrun = .false. - lcplcom = .false. - lbudget = .false. - if (present(cplrun)) then - lcplrun = cplrun - endif - if (present(cplcom)) then - lcplcom = cplcom - endif - if (present(budget)) then - lbudget = budget - endif - - if (present(barrier)) then - strbar = trim(string)//'_BARRIER' - call t_barrierf (trim(strbar), barrier) - endif - - if (lcplrun) then - call t_startf (trim(strcpl), cpl_run_hash) - call t_adj_detailf(+1) - endif - - if (lcplcom) then - call t_startf (trim(strcom), cpl_comm_hash) - call t_adj_detailf(+1) - endif - - if (lbudget) then - call t_startf (trim(strbud), cpl_budget_hash) - call t_adj_detailf(+1) - endif - - call t_startf (trim(string),hashint) - call t_adj_detailf(+1) - - end subroutine t_drvstartf - - !=============================================================================== - - subroutine t_drvstopf(string,cplrun,cplcom,budget,hashint) - - implicit none - - character(len=*),intent(in) :: string - logical,intent(in),optional :: cplrun - logical,intent(in),optional :: cplcom - logical,intent(in),optional :: budget - integer, intent(in), optional :: hashint - logical :: lcplrun,lcplcom,lbudget - - !------------------------------------------------------------------------------- - - lcplrun = .false. - lcplcom = .false. - lbudget = .false. - if (present(cplrun)) then - lcplrun = cplrun - endif - if (present(cplcom)) then - lcplcom = cplcom - endif - if (present(budget)) then - lbudget = budget - endif - - call t_adj_detailf(-1) - call t_stopf (trim(string), hashint) - - if (lbudget) then - call t_adj_detailf(-1) - call t_stopf (trim(strbud), cpl_budget_hash) - endif - - if (lcplrun) then - call t_adj_detailf(-1) - call t_stopf (trim(strcpl), cpl_run_hash) - endif - - if (lcplcom) then - call t_adj_detailf(-1) - call t_stopf (trim(strcom),cpl_comm_hash) - endif - - end subroutine t_drvstopf - -end module t_drv_timers_mod diff --git a/src/drivers/mct/shr/CMakeLists.txt b/src/drivers/mct/shr/CMakeLists.txt deleted file mode 100644 index 08d47cd358c..00000000000 --- a/src/drivers/mct/shr/CMakeLists.txt +++ /dev/null @@ -1,10 +0,0 @@ -list(APPEND drv_sources - glc_elevclass_mod.F90 - seq_cdata_mod.F90 - seq_comm_mct.F90 - seq_infodata_mod.F90 - seq_io_read_mod.F90 - seq_pauseresume_mod.F90 - ) - -sourcelist_to_parent(drv_sources) diff --git a/src/drivers/mct/shr/glc_elevclass_mod.F90 b/src/drivers/mct/shr/glc_elevclass_mod.F90 deleted file mode 100644 index a34e1d8e324..00000000000 --- a/src/drivers/mct/shr/glc_elevclass_mod.F90 +++ /dev/null @@ -1,423 +0,0 @@ -module glc_elevclass_mod - - !--------------------------------------------------------------------- - ! - ! Purpose: - ! - ! This module contains data and routines for operating on GLC elevation classes. - -#include "shr_assert.h" - use shr_kind_mod, only : r8 => shr_kind_r8 - use shr_sys_mod - use seq_comm_mct, only : logunit - use shr_log_mod, only : errMsg => shr_log_errMsg - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: glc_elevclass_init ! initialize GLC elevation class data - public :: glc_elevclass_clean ! deallocate memory allocated here - public :: glc_get_num_elevation_classes ! get the number of elevation classes - public :: glc_get_elevation_class ! get the elevation class index for a given elevation - public :: glc_get_elevclass_bounds ! get the boundaries of all elevation classes - public :: glc_mean_elevation_virtual ! get the mean elevation of a virtual elevation class - public :: glc_elevclass_as_string ! returns a string corresponding to a given elevation class - public :: glc_all_elevclass_strings ! returns an array of strings for all elevation classes - public :: glc_errcode_to_string ! convert an error code into a string describing the error - - interface glc_elevclass_init - module procedure glc_elevclass_init_default - module procedure glc_elevclass_init_override - end interface glc_elevclass_init - - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - ! Possible error code values - integer, parameter, public :: GLC_ELEVCLASS_ERR_NONE = 0 ! err_code indicating no error - integer, parameter, public :: GLC_ELEVCLASS_ERR_UNDEFINED = 1 ! err_code indicating elevation classes have not been defined - integer, parameter, public :: GLC_ELEVCLASS_ERR_TOO_LOW = 2 ! err_code indicating topo below lowest elevation class - integer, parameter, public :: GLC_ELEVCLASS_ERR_TOO_HIGH = 3 ! err_code indicating topo above highest elevation class - - ! String length for glc elevation classes represented as strings - integer, parameter, public :: GLC_ELEVCLASS_STRLEN = 2 - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! number of elevation classes - integer :: glc_nec - - ! upper elevation limit of each class (m) - ! indexing starts at 0, with topomax(0) giving the lower elevation limit of EC 1 - real(r8), allocatable :: topomax(:) - - -contains - - !----------------------------------------------------------------------- - subroutine glc_elevclass_init_default(my_glc_nec) - ! - ! !DESCRIPTION: - ! Initialize GLC elevation class data to default boundaries, based on given glc_nec - ! - ! !USES: - ! - ! !ARGUMENTS: - integer, intent(in) :: my_glc_nec ! number of GLC elevation classes - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'glc_elevclass_init' - !----------------------------------------------------------------------- - - glc_nec = my_glc_nec - allocate(topomax(0:glc_nec)) - - select case (glc_nec) - case(0) - ! do nothing - case(1) - topomax = [0._r8, 10000._r8] - case(3) - topomax = [0._r8, 1000._r8, 2000._r8, 10000._r8] - case(5) - topomax = [0._r8, 500._r8, 1000._r8, 1500._r8, 2000._r8, 10000._r8] - case(10) - topomax = [0._r8, 200._r8, 400._r8, 700._r8, 1000._r8, 1300._r8, & - 1600._r8, 2000._r8, 2500._r8, 3000._r8, 10000._r8] - case(36) - topomax = [ 0._r8, 200._r8, 400._r8, 600._r8, 800._r8, & - 1000._r8, 1200._r8, 1400._r8, 1600._r8, 1800._r8, & - 2000._r8, 2200._r8, 2400._r8, 2600._r8, 2800._r8, & - 3000._r8, 3200._r8, 3400._r8, 3600._r8, 3800._r8, & - 4000._r8, 4200._r8, 4400._r8, 4600._r8, 4800._r8, & - 5000._r8, 5200._r8, 5400._r8, 5600._r8, 5800._r8, & - 6000._r8, 6200._r8, 6400._r8, 6600._r8, 6800._r8, & - 7000._r8, 10000._r8] - case default - write(logunit,*) subname,' ERROR: unknown glc_nec: ', glc_nec - call shr_sys_abort(subname//' ERROR: unknown glc_nec') - end select - - end subroutine glc_elevclass_init_default - - !----------------------------------------------------------------------- - subroutine glc_elevclass_init_override(my_glc_nec, my_topomax) - ! - ! !DESCRIPTION: - ! Initialize GLC elevation class data to the given elevation class boundaries. - ! - ! The input, my_topomax, should have (my_glc_nec + 1) elements. - ! - ! !USES: - ! - ! !ARGUMENTS: - integer, intent(in) :: my_glc_nec ! number of GLC elevation classes - real(r8), intent(in) :: my_topomax(0:) ! elevation class boundaries (m) - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'glc_elevclass_init_override' - !----------------------------------------------------------------------- - - SHR_ASSERT_ALL_FL((ubound(my_topomax) == (/my_glc_nec/)), __FILE__, __LINE__) - - glc_nec = my_glc_nec - allocate(topomax(0:glc_nec)) - topomax = my_topomax - - end subroutine glc_elevclass_init_override - - !----------------------------------------------------------------------- - subroutine glc_elevclass_clean() - ! - ! !DESCRIPTION: - ! Deallocate memory allocated in this module - - character(len=*), parameter :: subname = 'glc_elevclass_clean' - !----------------------------------------------------------------------- - - if (allocated(topomax)) then - deallocate(topomax) - end if - glc_nec = 0 - - end subroutine glc_elevclass_clean - - !----------------------------------------------------------------------- - function glc_get_num_elevation_classes() result(num_elevation_classes) - ! - ! !DESCRIPTION: - ! Get the number of GLC elevation classes - ! - ! !ARGUMENTS: - integer :: num_elevation_classes ! function result - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'glc_get_num_elevation_classes' - !----------------------------------------------------------------------- - - num_elevation_classes = glc_nec - - end function glc_get_num_elevation_classes - - !----------------------------------------------------------------------- - subroutine glc_get_elevation_class(topo, elevation_class, err_code) - ! - ! !DESCRIPTION: - ! Get the elevation class index associated with a given topographic height. - ! - ! The returned elevation_class will be between 1 and num_elevation_classes, if this - ! topographic height is contained in an elevation class. In this case, err_code will - ! be GLC_ELEVCLASS_ERR_NONE (no error). - ! - ! If there are no elevation classes defined, the returned value will be 0, and - ! err_code will be GLC_ELEVCLASS_ERR_UNDEFINED - ! - ! If this topographic height is below the lowest elevation class, the returned value - ! will be 1, and err_code will be GLC_ELEVCLASS_ERR_TOO_LOW. - ! - ! If this topographic height is above the highest elevation class, the returned value - ! will be (num_elevation_classes), and err_code will be GLC_ELEVCLASS_ERR_TOO_HIGH. - ! - ! !USES: - ! - ! !ARGUMENTS: - real(r8), intent(in) :: topo ! topographic height (m) - integer, intent(out) :: elevation_class ! elevation class index - integer, intent(out) :: err_code ! error code (see above for possible codes) - ! - ! !LOCAL VARIABLES: - integer :: ec ! temporary elevation class - - character(len=*), parameter :: subname = 'glc_get_elevation_class' - !----------------------------------------------------------------------- - - if (glc_nec < 1) then - elevation_class = 0 - err_code = GLC_ELEVCLASS_ERR_UNDEFINED - else if (topo < topomax(0)) then - elevation_class = 1 - err_code = GLC_ELEVCLASS_ERR_TOO_LOW - else if (topo >= topomax(glc_nec)) then - elevation_class = glc_nec - err_code = GLC_ELEVCLASS_ERR_TOO_HIGH - else - err_code = GLC_ELEVCLASS_ERR_NONE - elevation_class = 0 - do ec = 1, glc_nec - if (topo >= topomax(ec - 1) .and. topo < topomax(ec)) then - elevation_class = ec - exit - end if - end do - - SHR_ASSERT(elevation_class > 0, subname//' elevation class was not assigned') - end if - - end subroutine glc_get_elevation_class - - !----------------------------------------------------------------------- - function glc_get_elevclass_bounds() result(elevclass_bounds) - ! - ! !DESCRIPTION: - ! Get the boundaries of all elevation classes. - ! - ! This returns an array of size glc_nec+1, since it contains both the lower and upper - ! bounds of each elevation class. - ! - ! !USES: - ! - ! !ARGUMENTS: - real(r8) :: elevclass_bounds(0:glc_nec) ! function result - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'glc_get_elevclass_bounds' - !----------------------------------------------------------------------- - - elevclass_bounds(:) = topomax(:) - - end function glc_get_elevclass_bounds - - - !----------------------------------------------------------------------- - function glc_mean_elevation_virtual(elevation_class) result(mean_elevation) - ! - ! !DESCRIPTION: - ! Returns the mean elevation of a virtual elevation class - ! - ! !ARGUMENTS: - real(r8) :: mean_elevation ! function result - integer, intent(in) :: elevation_class - ! - ! !LOCAL VARIABLES: - integer :: resulting_elevation_class - integer :: err_code - - character(len=*), parameter :: subname = 'glc_mean_elevation_virtual' - !----------------------------------------------------------------------- - - if (elevation_class == 0) then - ! Bare land "elevation class" - mean_elevation = 0._r8 - else - if (elevation_class < glc_nec) then - ! Normal case - mean_elevation = (topomax(elevation_class - 1) + topomax(elevation_class)) / 2._r8 - else if (elevation_class == glc_nec) then - ! In the top elevation class; in this case, assignment of a "mean" elevation is - ! somewhat arbitrary (because we expect the upper bound of the top elevation - ! class to be very high). - - if (glc_nec > 1) then - mean_elevation = 2._r8 * topomax(elevation_class - 1) - topomax(elevation_class - 2) - else - ! entirely arbitrary - mean_elevation = 1000._r8 - end if - else - write(logunit,*) subname,' ERROR: elevation class out of bounds: ', elevation_class - call shr_sys_abort(subname // ' ERROR: elevation class out of bounds') - end if - end if - - ! Ensure that the resulting elevation is within the given elevation class - if (elevation_class > 0) then - call glc_get_elevation_class(mean_elevation, resulting_elevation_class, err_code) - if (err_code /= GLC_ELEVCLASS_ERR_NONE) then - write(logunit,*) subname, ' ERROR: generated elevation that results in an error' - write(logunit,*) 'when trying to determine the resulting elevation class' - write(logunit,*) glc_errcode_to_string(err_code) - write(logunit,*) 'elevation_class, mean_elevation = ', elevation_class, mean_elevation - call shr_sys_abort(subname // ' ERROR: generated elevation that results in an error') - else if (resulting_elevation_class /= elevation_class) then - write(logunit,*) subname, ' ERROR: generated elevation outside the given elevation class' - write(logunit,*) 'elevation_class, mean_elevation, resulting_elevation_class = ', & - elevation_class, mean_elevation, resulting_elevation_class - call shr_sys_abort(subname // ' ERROR: generated elevation outside the given elevation class') - end if - end if - - end function glc_mean_elevation_virtual - - - !----------------------------------------------------------------------- - function glc_elevclass_as_string(elevation_class) result(ec_string) - ! - ! !DESCRIPTION: - ! Returns a string corresponding to a given elevation class. - ! - ! This string can be used as a suffix for fields in MCT attribute vectors. - ! - ! ! NOTE(wjs, 2015-01-19) This function doesn't fully belong in this module, since it - ! doesn't refer to the data stored in this module. However, I can't think of a more - ! appropriate place for it. - ! - ! !USES: - ! - ! !ARGUMENTS: - character(len=GLC_ELEVCLASS_STRLEN) :: ec_string ! function result - integer, intent(in) :: elevation_class - ! - ! !LOCAL VARIABLES: - character(len=16) :: format_string - - character(len=*), parameter :: subname = 'glc_elevclass_as_string' - !----------------------------------------------------------------------- - - ! e.g., for GLC_ELEVCLASS_STRLEN = 2, format_string will be '(i2.2)' - write(format_string,'(a,i0,a,i0,a)') '(i', GLC_ELEVCLASS_STRLEN, '.', GLC_ELEVCLASS_STRLEN, ')' - - write(ec_string,trim(format_string)) elevation_class - end function glc_elevclass_as_string - - !----------------------------------------------------------------------- - function glc_all_elevclass_strings(include_zero) result(ec_strings) - ! - ! !DESCRIPTION: - ! Returns an array of strings corresponding to all elevation classes from 1 to glc_nec - ! - ! If include_zero is present and true, then includes elevation class 0 - so goes from - ! 0 to glc_nec - ! - ! These strings can be used as suffixes for fields in MCT attribute vectors. - ! - ! !USES: - ! - ! !ARGUMENTS: - character(len=GLC_ELEVCLASS_STRLEN), allocatable :: ec_strings(:) ! function result - logical, intent(in), optional :: include_zero ! if present and true, include elevation class 0 (default is false) - ! - ! !LOCAL VARIABLES: - logical :: l_include_zero ! local version of optional include_zero argument - integer :: lower_bound - integer :: i - - character(len=*), parameter :: subname = 'glc_all_elevclass_strings' - !----------------------------------------------------------------------- - - if (present(include_zero)) then - l_include_zero = include_zero - else - l_include_zero = .false. - end if - - if (l_include_zero) then - lower_bound = 0 - else - lower_bound = 1 - end if - - allocate(ec_strings(lower_bound:glc_nec)) - do i = lower_bound, glc_nec - ec_strings(i) = glc_elevclass_as_string(i) - end do - - end function glc_all_elevclass_strings - - - !----------------------------------------------------------------------- - function glc_errcode_to_string(err_code) result(err_string) - ! - ! !DESCRIPTION: - ! - ! - ! !USES: - ! - ! !ARGUMENTS: - character(len=256) :: err_string ! function result - integer, intent(in) :: err_code ! error code (one of the GLC_ELEVCLASS_ERR* values) - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'glc_errcode_to_string' - !----------------------------------------------------------------------- - - select case (err_code) - case (GLC_ELEVCLASS_ERR_NONE) - err_string = '(no error)' - case (GLC_ELEVCLASS_ERR_UNDEFINED) - err_string = 'Elevation classes have not yet been defined' - case (GLC_ELEVCLASS_ERR_TOO_LOW) - err_string = 'Topographic height below the lower bound of the lowest elevation class' - case (GLC_ELEVCLASS_ERR_TOO_HIGH) - err_string = 'Topographic height above the upper bound of the highest elevation class' - case default - err_string = 'UNKNOWN ERROR' - end select - - end function glc_errcode_to_string - - -end module glc_elevclass_mod diff --git a/src/drivers/mct/shr/seq_cdata_mod.F90 b/src/drivers/mct/shr/seq_cdata_mod.F90 deleted file mode 100644 index 735726c5b5c..00000000000 --- a/src/drivers/mct/shr/seq_cdata_mod.F90 +++ /dev/null @@ -1,118 +0,0 @@ -module seq_cdata_mod - - use shr_kind_mod , only: r8=> shr_kind_r8 - use shr_kind_mod , only: CL => SHR_KIND_CL - use shr_sys_mod , only: shr_sys_flush - use shr_sys_mod , only: shr_sys_abort - use seq_infodata_mod , only: seq_infodata_type - use mct_mod - use seq_comm_mct - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - public :: seq_cdata_setptrs - public :: seq_cdata_init - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - ! in general, this type just groups together related data via pointers - - type seq_cdata - character(len=16) :: name ! user defined name - integer :: ID ! component id - integer :: mpicom ! mpi communicator - type(mct_gGrid) ,pointer :: dom => null() ! domain info - type(mct_gsMap) ,pointer :: gsMap => null() ! decomp info - type(seq_infodata_type) ,pointer :: infodata => null() ! Input init object - character(len=CL) ,pointer :: resume_filename => null() ! filename used by pause/resume processing - logical :: post_assim ! post assimilation - end type seq_cdata - - public seq_cdata - - !============================================================================== -contains - !============================================================================== - - subroutine seq_cdata_setptrs(cdata, ID, mpicom, dom, gsMap, infodata, name, post_assimilation, resume_filename) - - !----------------------------------------------------------------------- - ! - ! Arguments - type(seq_cdata) ,intent(in) :: cdata ! input - integer ,optional :: ID ! component id - integer ,optional :: mpicom ! mpi comm - type(mct_gGrid) ,optional,pointer :: dom ! domain - type(mct_gsMap) ,optional,pointer :: gsMap ! decomp - type(seq_infodata_type) ,optional,pointer :: infodata ! INIT object - character(len=*) ,optional :: name ! name - logical ,optional :: post_assimilation ! Restart is post assimilation - character(len=*) ,optional,pointer :: resume_filename ! filename used by pause/resume processing - ! - ! Local variables - character(*),parameter :: subName = '(seq_cdata_setptrs) ' - !----------------------------------------------------------------------- - - if (present(name )) name = cdata%name - if (present(ID )) ID = cdata%ID - if (present(mpicom )) mpicom = cdata%mpicom - if (present(post_assimilation )) post_assimilation = cdata%post_assim - if (present(resume_filename )) resume_filename => cdata%resume_filename - if (present(dom )) dom => cdata%dom - if (present(gsMap )) gsMap => cdata%gsMap - if (present(infodata )) infodata => cdata%infodata - - end subroutine seq_cdata_setptrs - - !============================================================================ - - subroutine seq_cdata_init(cdata,ID, name, dom, gsMap, infodata, post_assim) - - !----------------------------------------------------------------------- - ! Description - ! This is here only for backwards compatibility with current data model - ! xxx_comp_esmf.F90 interfaces - ! - ! Arguments - - type(seq_cdata) ,intent(inout) :: cdata ! initialized - integer ,intent(in) :: ID ! component id - character(len=*) ,intent(in),optional :: name ! component name - type(mct_gGrid) ,intent(in),target :: dom ! domain - type(mct_gsMap) ,intent(in),target :: gsMap ! decomp - type(seq_infodata_type) ,intent(in),target :: infodata ! INIT object - logical ,intent(in) :: post_assim - ! - ! Local variables - ! - integer :: mpicom ! mpi communicator - character(*),parameter :: subName = '(seq_cdata_init) ' - logical :: iamroot ! iamroot - !----------------------------------------------------------------------- - - call seq_comm_setptrs(ID, mpicom=mpicom, iamroot=iamroot) - - if (present(name)) then - cdata%name = name - else - cdata%name = 'undefined' - endif - cdata%ID = ID - cdata%mpicom = mpicom - cdata%dom => dom - cdata%gsMap => gsMap - cdata%infodata => infodata - cdata%post_assim = post_assim - - allocate(cdata%resume_filename) - cdata%resume_filename = '' - - end subroutine seq_cdata_init - -end module seq_cdata_mod diff --git a/src/drivers/mct/shr/seq_comm_mct.F90 b/src/drivers/mct/shr/seq_comm_mct.F90 deleted file mode 100644 index 4664143e0be..00000000000 --- a/src/drivers/mct/shr/seq_comm_mct.F90 +++ /dev/null @@ -1,1404 +0,0 @@ -module seq_comm_mct - - !--------------------------------------------------------------------- - ! - ! Purpose: Set up necessary communications - ! Note that if no MPI, will call MCTs fake version - ! (including mpif.h) will be utilized - ! - !--------------------------------------------------------------------- - - -!!! NOTE: If all atmospheres are identical in number of processes, -!!! number of threads, and grid layout, we should check that the -!!! user-provided number of processes and threads are consistent -!!! (or else, only accept one entry for these quantities when reading -!!! the namelist). ARE OTHER PROTECTIONS/CHECKS NEEDED??? - - use mct_mod , only : mct_world_init, mct_world_clean, mct_die - use shr_sys_mod , only : shr_sys_abort, shr_sys_flush - use shr_mpi_mod , only : shr_mpi_chkerr, shr_mpi_bcast, shr_mpi_max - use shr_file_mod , only : shr_file_getUnit, shr_file_freeUnit - ! gptl timing library is not built for unit tests but it is on - ! by default for Makefile (model) builds. -#ifdef TIMING - use shr_taskmap_mod, only : shr_taskmap_write - use perf_mod , only : t_startf, t_stopf -#endif - use esmf , only : ESMF_LogKind_Flag, ESMF_LOGKIND_NONE - use esmf , only : ESMF_LOGKIND_SINGLE, ESMF_LOGKIND_MULTI - - implicit none - - private -#include - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public seq_comm_init - public seq_comm_clean - public seq_comm_iamin - public seq_comm_iamroot - public seq_comm_mpicom - public seq_comm_iam - public seq_comm_gloiam - public seq_comm_gloroot - public seq_comm_cplpe - public seq_comm_cmppe - public seq_comm_name - public seq_comm_inst - public seq_comm_suffix - public seq_comm_setptrs - public seq_comm_setnthreads - public seq_comm_getnthreads - public seq_comm_printcomms - public seq_comm_get_ncomps - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - integer, public, parameter :: default_logunit = 6 - integer, public :: logunit = default_logunit ! log unit number - integer, public :: loglevel = 1 ! log level - - integer, public :: global_mype = -1 !! To be initialized - - !!! Note - NUM_COMP_INST_XXX are cpp variables set in buildlib.csm_share - - integer, parameter :: ncomptypes = 9 ! total number of component types - integer, parameter :: ncouplers = 1 ! number of couplers - integer, parameter, public :: num_inst_atm = NUM_COMP_INST_ATM - integer, parameter, public :: num_inst_lnd = NUM_COMP_INST_LND - integer, parameter, public :: num_inst_ocn = NUM_COMP_INST_OCN - integer, parameter, public :: num_inst_ice = NUM_COMP_INST_ICE - integer, parameter, public :: num_inst_glc = NUM_COMP_INST_GLC - integer, parameter, public :: num_inst_wav = NUM_COMP_INST_WAV - integer, parameter, public :: num_inst_rof = NUM_COMP_INST_ROF - integer, parameter, public :: num_inst_iac = NUM_COMP_INST_IAC - integer, parameter, public :: num_inst_esp = NUM_COMP_INST_ESP - - integer, parameter, public :: num_inst_total= num_inst_atm + & - num_inst_lnd + & - num_inst_ocn + & - num_inst_ice + & - num_inst_glc + & - num_inst_wav + & - num_inst_rof + & - num_inst_iac + & - num_inst_esp + 1 - - integer, public :: num_inst_min, num_inst_max - integer, public :: num_inst_xao ! for xao flux - integer, public :: num_inst_frc ! for fractions - integer, public :: num_inst_driver = 1 - -!!! Each component instance needs two communicators: one internal to the -!!! instance, and one for communicating with the coupler. -!!! Additionally, one communicator is needed for the coupler's -!!! internal communications, and one is needed for the global space. -!!! All instances of a component type also share a separate communicator -!!! All instances of a component type share a communicator with the coupler - - integer, parameter, public :: num_inst_phys = num_inst_atm + num_inst_lnd + & - num_inst_ocn + num_inst_ice + & - num_inst_glc + num_inst_rof + & - num_inst_wav + num_inst_esp + & - num_inst_iac - integer, parameter, public :: num_cpl_phys = num_inst_atm + num_inst_lnd + & - num_inst_ocn + num_inst_ice + & - num_inst_glc + num_inst_rof + & - num_inst_wav + num_inst_esp + & - num_inst_iac - integer, parameter :: ncomps = (1 + ncouplers + 2*ncomptypes + num_inst_phys + num_cpl_phys) - - integer, public :: GLOID - integer, public :: CPLID - - integer, public :: ALLATMID - integer, public :: ALLLNDID - integer, public :: ALLOCNID - integer, public :: ALLICEID - integer, public :: ALLGLCID - integer, public :: ALLROFID - integer, public :: ALLWAVID - integer, public :: ALLIACID - integer, public :: ALLESPID - - integer, public :: CPLALLATMID - integer, public :: CPLALLLNDID - integer, public :: CPLALLOCNID - integer, public :: CPLALLICEID - integer, public :: CPLALLGLCID - integer, public :: CPLALLROFID - integer, public :: CPLALLWAVID - integer, public :: CPLALLIACID - integer, public :: CPLALLESPID - - integer, public :: ATMID(num_inst_atm) - integer, public :: LNDID(num_inst_lnd) - integer, public :: OCNID(num_inst_ocn) - integer, public :: ICEID(num_inst_ice) - integer, public :: GLCID(num_inst_glc) - integer, public :: ROFID(num_inst_rof) - integer, public :: WAVID(num_inst_wav) - integer, public :: IACID(num_inst_iac) - integer, public :: ESPID(num_inst_esp) - - integer, public :: CPLATMID(num_inst_atm) - integer, public :: CPLLNDID(num_inst_lnd) - integer, public :: CPLOCNID(num_inst_ocn) - integer, public :: CPLICEID(num_inst_ice) - integer, public :: CPLGLCID(num_inst_glc) - integer, public :: CPLROFID(num_inst_rof) - integer, public :: CPLWAVID(num_inst_wav) - integer, public :: CPLIACID(num_inst_iac) - integer, public :: CPLESPID(num_inst_esp) - - integer, parameter, public :: seq_comm_namelen=16 - - ! taskmap output level specifications for components - ! (0:no output, 1:compact, 2:verbose) - integer, public :: info_taskmap_comp - - ! suffix for log and timing files if multi coupler driver - character(len=seq_comm_namelen), public :: cpl_inst_tag - - type seq_comm_type - character(len=seq_comm_namelen) :: name ! my name - character(len=seq_comm_namelen) :: suffix ! recommended suffix - integer :: inst ! my inst index - integer :: ID ! my id number - integer :: mpicom ! mpicom - integer :: mpigrp ! mpigrp - integer :: npes ! number of mpi tasks in comm - integer :: nthreads ! number of omp threads per task - integer :: iam ! my task number in mpicom - logical :: iamroot ! am i the root task in mpicom - - integer :: gloiam ! my task number in global_comm - integer :: gloroot ! the global task number of each comps root on all pes - - integer :: pethreads ! max number of threads on my task - integer :: cplpe ! a common task in mpicom from the cpl group for join mpicoms - ! cplpe is used to broadcast information from the coupler to the component - integer :: cmppe ! a common task in mpicom from the component group for join mpicoms - ! cmppe is used to broadcast information from the component to the coupler - logical :: set ! has this datatype been set - - end type seq_comm_type - - type(seq_comm_type) :: seq_comms(ncomps) - - character(*), parameter :: layout_concurrent = 'concurrent' - character(*), parameter :: layout_sequential = 'sequential' - - character(*), parameter :: F11 = "(a,a,'(',i3,' ',a,')',a, 3i6,' (',a,i6,')',' (',a,i3,')','(',a,a,')')" - character(*), parameter :: F12 = "(a,a,'(',i3,' ',a,')',a,2i6,6x,' (',a,i6,')',' (',a,i3,')','(',a,2i6,')')" - character(*), parameter :: F13 = "(a,a,'(',i3,' ',a,')',a,2i6,6x,' (',a,i6,')',' (',a,i3,')')" - character(*), parameter :: F14 = "(a,a,'(',i3,' ',a,')',a, 6x,' (',a,i6,')',' (',a,i3,')')" - - ! Exposed for use in the esp component, please don't use this elsewhere - integer, public :: Global_Comm - integer :: driver_comm - - character(len=32), public :: & - atm_layout, lnd_layout, ice_layout, glc_layout, rof_layout, & - ocn_layout, wav_layout, esp_layout, iac_layout - - logical :: seq_comm_mct_initialized = .false. ! whether this module has been initialized - - !======================================================================= -contains - !====================================================================== - integer function seq_comm_get_ncomps() - seq_comm_get_ncomps = ncomps - end function seq_comm_get_ncomps - - subroutine seq_comm_init(global_comm_in, driver_comm_in, nmlfile, drv_comm_id) - !---------------------------------------------------------- - ! - ! Arguments - implicit none - integer, intent(in) :: global_comm_in - integer, intent(in) :: driver_comm_in - character(len=*), intent(IN) :: nmlfile - integer, intent(in), optional :: drv_comm_id - ! - ! Local variables - ! - logical :: error_state - integer :: ierr, n, count - character(*), parameter :: subName = '(seq_comm_init) ' - integer :: mype,numpes,myncomps,max_threads,gloroot, global_numpes - integer :: pelist(3,1) ! start, stop, stride for group - integer, pointer :: comps(:) ! array with component ids - integer, pointer :: comms(:) ! array with mpicoms - integer :: nu - logical :: verbose_taskmap_output - integer :: drv_inst - character(len=8) :: c_drv_inst ! driver instance number - character(len=8) :: c_driver_numpes ! number of pes in driver - character(len=seq_comm_namelen) :: valid_comps(ncomps) - - integer :: & - atm_ntasks, atm_rootpe, atm_pestride, atm_nthreads, & - lnd_ntasks, lnd_rootpe, lnd_pestride, lnd_nthreads, & - ice_ntasks, ice_rootpe, ice_pestride, ice_nthreads, & - glc_ntasks, glc_rootpe, glc_pestride, glc_nthreads, & - wav_ntasks, wav_rootpe, wav_pestride, wav_nthreads, & - rof_ntasks, rof_rootpe, rof_pestride, rof_nthreads, & - ocn_ntasks, ocn_rootpe, ocn_pestride, ocn_nthreads, & - esp_ntasks, esp_rootpe, esp_pestride, esp_nthreads, & - iac_ntasks, iac_rootpe, iac_pestride, iac_nthreads, & - cpl_ntasks, cpl_rootpe, cpl_pestride, cpl_nthreads, & - info_taskmap_model - - namelist /cime_pes/ & - atm_ntasks, atm_rootpe, atm_pestride, atm_nthreads, atm_layout, & - lnd_ntasks, lnd_rootpe, lnd_pestride, lnd_nthreads, lnd_layout, & - ice_ntasks, ice_rootpe, ice_pestride, ice_nthreads, ice_layout, & - glc_ntasks, glc_rootpe, glc_pestride, glc_nthreads, glc_layout, & - wav_ntasks, wav_rootpe, wav_pestride, wav_nthreads, wav_layout, & - rof_ntasks, rof_rootpe, rof_pestride, rof_nthreads, rof_layout, & - ocn_ntasks, ocn_rootpe, ocn_pestride, ocn_nthreads, ocn_layout, & - esp_ntasks, esp_rootpe, esp_pestride, esp_nthreads, esp_layout, & - iac_ntasks, iac_rootpe, iac_pestride, iac_nthreads, iac_layout, & - cpl_ntasks, cpl_rootpe, cpl_pestride, cpl_nthreads, & - info_taskmap_model, info_taskmap_comp - !---------------------------------------------------------- - - ! make sure this is first pass and set comms unset - if (seq_comm_mct_initialized) then - write(logunit,*) trim(subname),' ERROR seq_comm_init already called ' - call shr_sys_abort() - endif - seq_comm_mct_initialized = .true. - global_comm = global_comm_in - driver_comm = driver_comm_in - - !! Initialize seq_comms elements - - do n = 1,ncomps - seq_comms(n)%name = 'unknown' - seq_comms(n)%suffix = ' ' - seq_comms(n)%inst = 0 - seq_comms(n)%set = .false. - seq_comms(n)%mpicom = MPI_COMM_NULL ! do some initialization here - seq_comms(n)%iam = -1 - seq_comms(n)%iamroot = .false. - seq_comms(n)%npes = -1 - seq_comms(n)%nthreads = -1 - seq_comms(n)%gloiam = -1 - seq_comms(n)%gloroot = -1 - seq_comms(n)%pethreads = -1 - seq_comms(n)%cplpe = -1 - seq_comms(n)%cmppe = -1 - enddo - - - ! Initialize MPI - ! Note that if no MPI, will call MCTs fake version - - call mpi_comm_size(GLOBAL_COMM_IN, global_numpes , ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size comm_world') - call mpi_comm_rank(DRIVER_COMM, mype , ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank driver') - call mpi_comm_size(DRIVER_COMM, numpes, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size driver') - - if (mod(global_numpes, numpes) .ne. 0) then - write(logunit,*) trim(subname),' ERROR: numpes driver: ', numpes, ' should divide global_numpes: ',global_numpes - call shr_sys_abort(trim(subname)//' ERROR decomposition error ') - endif - - ! Initialize gloiam on all IDs - - global_mype = mype - - do n = 1,ncomps - seq_comms(n)%gloiam = mype - enddo - - ! Set ntasks, rootpe, pestride, nthreads for all components - - if (mype == 0) then - !! Set up default component process parameters - call comp_pelayout_init(numpes, atm_ntasks, atm_rootpe, atm_pestride, atm_nthreads, atm_layout) - call comp_pelayout_init(numpes, lnd_ntasks, lnd_rootpe, lnd_pestride, lnd_nthreads, lnd_layout) - call comp_pelayout_init(numpes, ice_ntasks, ice_rootpe, ice_pestride, ice_nthreads, ice_layout) - call comp_pelayout_init(numpes, ocn_ntasks, ocn_rootpe, ocn_pestride, ocn_nthreads, ocn_layout) - call comp_pelayout_init(numpes, rof_ntasks, rof_rootpe, rof_pestride, rof_nthreads, rof_layout) - call comp_pelayout_init(numpes, wav_ntasks, wav_rootpe, wav_pestride, wav_nthreads, wav_layout) - call comp_pelayout_init(numpes, glc_ntasks, glc_rootpe, glc_pestride, glc_nthreads, glc_layout) - call comp_pelayout_init(numpes, esp_ntasks, esp_rootpe, esp_pestride, esp_nthreads, esp_layout) - call comp_pelayout_init(numpes, iac_ntasks, iac_rootpe, iac_pestride, iac_nthreads, iac_layout) - call comp_pelayout_init(numpes, cpl_ntasks, cpl_rootpe, cpl_pestride, cpl_nthreads) - info_taskmap_model = 0 - info_taskmap_comp = 0 - - ! Read namelist if it exists - - nu = shr_file_getUnit() - open(nu, file=trim(nmlfile), status='old', iostat=ierr) - - if (ierr == 0) then - ierr = 1 - do while( ierr > 0 ) - read(nu, nml=cime_pes, iostat=ierr) - end do - close(nu) - end if - call shr_file_freeUnit(nu) - end if - - call shr_mpi_bcast(atm_nthreads,DRIVER_COMM,'atm_nthreads') - call shr_mpi_bcast(lnd_nthreads,DRIVER_COMM,'lnd_nthreads') - call shr_mpi_bcast(ocn_nthreads,DRIVER_COMM,'ocn_nthreads') - call shr_mpi_bcast(ice_nthreads,DRIVER_COMM,'ice_nthreads') - call shr_mpi_bcast(glc_nthreads,DRIVER_COMM,'glc_nthreads') - call shr_mpi_bcast(wav_nthreads,DRIVER_COMM,'wav_nthreads') - call shr_mpi_bcast(rof_nthreads,DRIVER_COMM,'rof_nthreads') - call shr_mpi_bcast(esp_nthreads,DRIVER_COMM,'esp_nthreads') - call shr_mpi_bcast(iac_nthreads,DRIVER_COMM,'iac_nthreads') - call shr_mpi_bcast(cpl_nthreads,DRIVER_COMM,'cpl_nthreads') - - call shr_mpi_bcast(atm_layout,DRIVER_COMM,'atm_layout') - call shr_mpi_bcast(lnd_layout,DRIVER_COMM,'lnd_layout') - call shr_mpi_bcast(ocn_layout,DRIVER_COMM,'ocn_layout') - call shr_mpi_bcast(ice_layout,DRIVER_COMM,'ice_layout') - call shr_mpi_bcast(glc_layout,DRIVER_COMM,'glc_layout') - call shr_mpi_bcast(wav_layout,DRIVER_COMM,'wav_layout') - call shr_mpi_bcast(rof_layout,DRIVER_COMM,'rof_layout') - call shr_mpi_bcast(iac_layout,DRIVER_COMM,'iac_layout') - call shr_mpi_bcast(esp_layout,DRIVER_COMM,'esp_layout') - - call shr_mpi_bcast(info_taskmap_model,DRIVER_COMM,'info_taskmap_model') - call shr_mpi_bcast(info_taskmap_comp, DRIVER_COMM,'info_taskmap_comp' ) - -#ifdef TIMING - if (info_taskmap_model > 0) then - ! output task-to-node mapping - - if (info_taskmap_model == 1) then - verbose_taskmap_output = .false. - else - verbose_taskmap_output = .true. - endif - - if (present(drv_comm_id)) then - drv_inst = drv_comm_id - write(c_drv_inst,'(i8)') drv_inst - else - drv_inst = 0 - endif - - if (mype == 0) then - write(c_driver_numpes,'(i8)') numpes - if (drv_inst == 0) then - write(logunit,'(2A)') trim(adjustl(c_driver_numpes)), & - ' pes participating in computation of coupled model' - else - write(logunit,'(3A)') trim(adjustl(c_driver_numpes)), & - ' pes participating in computation of DRIVER instance #', & - trim(adjustl(c_drv_inst)) - endif - call shr_sys_flush(logunit) - endif - - call t_startf("shr_taskmap_write") - if (drv_inst == 0) then - call shr_taskmap_write(logunit, DRIVER_COMM, & - 'GLOBAL', & - verbose=verbose_taskmap_output) - else - call shr_taskmap_write(logunit, DRIVER_COMM, & - 'DRIVER #'//trim(adjustl(c_drv_inst)), & - verbose=verbose_taskmap_output) - endif - call t_stopf("shr_taskmap_write") - endif -#endif - - !--- compute some other num_inst values - - num_inst_xao = max(num_inst_atm,num_inst_ocn) - num_inst_frc = num_inst_ice - - !--- compute num_inst_min, num_inst_max - !--- instances must be either 1 or a constant across components - !--- checks for prognostic/present consistency in the driver - - error_state = .false. - num_inst_min = min(num_inst_atm, num_inst_lnd, num_inst_ocn,& - num_inst_ice, num_inst_glc, num_inst_wav, num_inst_rof,& - num_inst_esp, num_inst_iac) - num_inst_max = max(num_inst_atm, num_inst_lnd, num_inst_ocn,& - num_inst_ice, num_inst_glc, num_inst_wav, num_inst_rof,& - num_inst_esp, num_inst_iac) - - if (num_inst_min /= num_inst_max .and. num_inst_min /= 1) error_state = .true. - if (num_inst_atm /= num_inst_min .and. num_inst_atm /= num_inst_max) error_state = .true. - if (num_inst_lnd /= num_inst_min .and. num_inst_lnd /= num_inst_max) error_state = .true. - if (num_inst_ocn /= num_inst_min .and. num_inst_ocn /= num_inst_max) error_state = .true. - if (num_inst_ice /= num_inst_min .and. num_inst_ice /= num_inst_max) error_state = .true. - if (num_inst_glc /= num_inst_min .and. num_inst_glc /= num_inst_max) error_state = .true. - if (num_inst_wav /= num_inst_min .and. num_inst_wav /= num_inst_max) error_state = .true. - if (num_inst_rof /= num_inst_min .and. num_inst_rof /= num_inst_max) error_state = .true. - if (num_inst_iac /= num_inst_min .and. num_inst_iac /= num_inst_max) error_state = .true. - if (num_inst_esp /= num_inst_min .and. num_inst_esp /= num_inst_max) error_state = .true. - - if (error_state) then - write(logunit,*) trim(subname),' ERROR: num_inst inconsistent' - write(logunit,*) num_inst_atm, num_inst_lnd, num_inst_ocn,& - num_inst_ice, num_inst_glc, num_inst_wav, num_inst_rof,& - num_inst_esp, num_inst_min, num_inst_max - call shr_sys_abort(trim(subname)//' ERROR: num_inst inconsistent') - endif - - ! Initialize IDs - - count = 0 - - count = count + 1 - GLOID = count - count = count + 1 - CPLID = count - - if (mype == 0) then - pelist(1,1) = 0 - pelist(2,1) = numpes-1 - pelist(3,1) = 1 - end if - call mpi_bcast(pelist, size(pelist), MPI_INTEGER, 0, DRIVER_COMM, ierr) - call seq_comm_setcomm(GLOID, pelist,iname='GLOBAL') - - if (mype == 0) then - pelist(1,1) = cpl_rootpe - pelist(2,1) = cpl_rootpe + (cpl_ntasks -1) * cpl_pestride - pelist(3,1) = cpl_pestride - end if - - call mpi_bcast(pelist, size(pelist), MPI_INTEGER, 0, DRIVER_COMM, ierr) - call seq_comm_setcomm(CPLID,pelist,nthreads=cpl_nthreads,iname='CPL') - - call comp_comm_init(driver_comm, atm_rootpe, atm_nthreads, atm_layout, atm_ntasks, atm_pestride, num_inst_atm, & - CPLID, ATMID, CPLATMID, ALLATMID, CPLALLATMID, 'ATM', count, drv_comm_id) - call comp_comm_init(driver_comm, lnd_rootpe, lnd_nthreads, lnd_layout, lnd_ntasks, lnd_pestride, num_inst_lnd, & - CPLID, LNDID, CPLLNDID, ALLLNDID, CPLALLLNDID, 'LND', count, drv_comm_id) - call comp_comm_init(driver_comm, ice_rootpe, ice_nthreads, ice_layout, ice_ntasks, ice_pestride, num_inst_ice, & - CPLID, ICEID, CPLICEID, ALLICEID, CPLALLICEID, 'ICE', count, drv_comm_id) - call comp_comm_init(driver_comm, ocn_rootpe, ocn_nthreads, ocn_layout, ocn_ntasks, ocn_pestride, num_inst_ocn, & - CPLID, OCNID, CPLOCNID, ALLOCNID, CPLALLOCNID, 'OCN', count, drv_comm_id) - call comp_comm_init(driver_comm, rof_rootpe, rof_nthreads, rof_layout, rof_ntasks, rof_pestride, num_inst_rof, & - CPLID, ROFID, CPLROFID, ALLROFID, CPLALLROFID, 'ROF', count, drv_comm_id) - call comp_comm_init(driver_comm, glc_rootpe, glc_nthreads, glc_layout, glc_ntasks, glc_pestride, num_inst_glc, & - CPLID, GLCID, CPLGLCID, ALLGLCID, CPLALLGLCID, 'GLC', count, drv_comm_id) - call comp_comm_init(driver_comm, wav_rootpe, wav_nthreads, wav_layout, wav_ntasks, wav_pestride, num_inst_wav, & - CPLID, WAVID, CPLWAVID, ALLWAVID, CPLALLWAVID, 'WAV', count, drv_comm_id) - call comp_comm_init(driver_comm, esp_rootpe, esp_nthreads, esp_layout, esp_ntasks, esp_pestride, num_inst_esp, & - CPLID, ESPID, CPLESPID, ALLESPID, CPLALLESPID, 'ESP', count, drv_comm_id) - call comp_comm_init(driver_comm, iac_rootpe, iac_nthreads, iac_layout, iac_ntasks, iac_pestride, num_inst_iac, & - CPLID, IACID, CPLIACID, ALLIACID, CPLALLIACID, 'IAC', count, drv_comm_id) - - if (count /= ncomps) then - write(logunit,*) trim(subname),' ERROR in ID count ',count,ncomps - call shr_sys_abort(trim(subname)//' ERROR in ID count') - endif - !! Count the total number of threads - - max_threads = -1 - do n = 1,ncomps - max_threads = max(max_threads,seq_comms(n)%nthreads) - enddo - do n = 1,ncomps - seq_comms(n)%pethreads = max_threads - enddo - - ! compute each components root pe global id and broadcast so all pes have info - - do n = 1,ncomps - gloroot = -999 - if (seq_comms(n)%iamroot) gloroot = seq_comms(n)%gloiam - call shr_mpi_max(gloroot,seq_comms(n)%gloroot,DRIVER_COMM, & - trim(subname)//' gloroot',all=.true.) - enddo - - ! Initialize MCT - - ! ensure that all driver_comm processes initialized their comms - call mpi_barrier(DRIVER_COMM,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_barrier driver pre-mct-init') - - ! add up valid comps on local pe - - valid_comps = '*' - myncomps = 0 - do n = 1,ncomps - if (seq_comms(n)%mpicom /= MPI_COMM_NULL) then - myncomps = myncomps + 1 - valid_comps(n) = seq_comms(n)%name - endif - enddo - - ! set comps and comms - - allocate(comps(myncomps),comms(myncomps),stat=ierr) - if(ierr/=0) call mct_die(subName,'allocate comps comms',ierr) - - myncomps = 0 - do n = 1,ncomps - if (seq_comms(n)%mpicom /= MPI_COMM_NULL) then - myncomps = myncomps + 1 - if (myncomps > size(comps)) then - write(logunit,*) trim(subname),' ERROR in myncomps ',myncomps,size(comps) - call shr_sys_abort() - endif - comps(myncomps) = seq_comms(n)%ID - comms(myncomps) = seq_comms(n)%mpicom - endif - enddo - - if (myncomps /= size(comps)) then - write(logunit,*) trim(subname),' ERROR in myncomps ',myncomps,size(comps),comps,valid_comps - call shr_sys_abort() - endif - - call mct_world_init(ncomps, DRIVER_COMM, comms, comps) - - deallocate(comps,comms) - - - call seq_comm_printcomms() - - end subroutine seq_comm_init - - subroutine comp_comm_init(driver_comm, comp_rootpe, comp_nthreads, comp_layout, & - comp_ntasks, comp_pestride, num_inst_comp, & - CPLID, COMPID, CPLCOMPID, ALLCOMPID, CPLALLCOMPID, name, count, drv_comm_id) - integer, intent(in) :: driver_comm - integer, intent(in) :: comp_rootpe - integer, intent(in) :: comp_nthreads - character(len=*), intent(in) :: comp_layout - integer, intent(in) :: comp_ntasks - integer, intent(in) :: comp_pestride - integer, intent(in) :: num_inst_comp - integer, intent(in) :: CPLID - integer, intent(out) :: COMPID(num_inst_comp) - integer, intent(out) :: CPLCOMPID(num_inst_comp) - integer, intent(out) :: ALLCOMPID - integer, intent(out) :: CPLALLCOMPID - integer, intent(inout) :: count - integer, intent(in), optional :: drv_comm_id - character(len=*), intent(in) :: name - - character(len=*), parameter :: subname = "comp_comm_init" - integer :: comp_inst_tasks - integer :: droot - integer :: current_task_rootpe - integer :: cmin(num_inst_comp), cmax(num_inst_comp), cstr(num_inst_comp) - integer :: n - integer :: pelist (3,1) - integer :: ierr - integer :: mype - - call mpi_comm_rank(driver_comm, mype, ierr) - - count = count + 1 - ALLCOMPID = count - count = count + 1 - CPLALLCOMPID = count - do n = 1, num_inst_comp - count = count + 1 - COMPID(n) = count - count = count + 1 - CPLCOMPID(n) = count - enddo - - if (mype == 0) then - !--- validation of inputs --- - ! rootpes >= 0 - !! Determine the process layout - !! - !! We will assign comp_ntasks / num_inst_comp tasks to each component - !! instance. (This may lead to unallocated tasks if comp_ntasks is - !! not an integer multiple of num_inst_comp.) - - if (comp_rootpe < 0) then - call shr_sys_abort(trim(subname)//' ERROR: rootpes must be >= 0 for component '//trim(name)) - endif - - if (trim(comp_layout) == trim(layout_concurrent)) then - comp_inst_tasks = comp_ntasks / num_inst_comp - droot = (comp_inst_tasks * comp_pestride) - elseif (trim(comp_layout) == trim(layout_sequential)) then - comp_inst_tasks = comp_ntasks - droot = 0 - else - call shr_sys_abort(subname//' ERROR invalid comp_layout for component '//trim(name)) - endif - current_task_rootpe = comp_rootpe - do n = 1, num_inst_comp - cmin(n) = current_task_rootpe - cmax(n) = current_task_rootpe & - + ((comp_inst_tasks - 1) * comp_pestride) - cstr(n) = comp_pestride - current_task_rootpe = current_task_rootpe + droot - end do - endif - do n = 1, num_inst_comp - if (mype==0) then - pelist(1,1) = cmin(n) - pelist(2,1) = cmax(n) - pelist(3,1) = cstr(n) - endif - call mpi_bcast(pelist, size(pelist), MPI_INTEGER, 0, DRIVER_COMM, ierr) - if (present(drv_comm_id)) then - call seq_comm_setcomm(COMPID(n),pelist,nthreads=comp_nthreads,iname=name,inst=drv_comm_id) - else - call seq_comm_setcomm(COMPID(n),pelist,nthreads=comp_nthreads,iname=name,inst=n,tinst=num_inst_comp) - endif - call seq_comm_joincomm(CPLID, COMPID(n), CPLCOMPID(n), 'CPL'//name, n, num_inst_comp) - enddo - call seq_comm_jcommarr(COMPID, ALLCOMPID, 'ALL'//name//'ID', 1, 1) - call seq_comm_joincomm(CPLID, ALLCOMPID, CPLALLCOMPID, 'CPLALL'//name//'ID', 1, 1) - - end subroutine comp_comm_init - - subroutine comp_pelayout_init(numpes, ntasks, rootpe, pestride, nthreads, layout) - integer,intent(in) :: numpes - integer,intent(out) :: ntasks, rootpe, pestride, nthreads - character(len=*),optional :: layout - - ntasks = numpes - rootpe = 0 - pestride = 1 - nthreads = 1 - if(present(layout)) then - layout = trim(layout_concurrent) - endif - end subroutine comp_pelayout_init - - !--------------------------------------------------------- - subroutine seq_comm_clean() - ! Resets this module - freeing memory, etc. - ! - ! This potentially allows seq_comm_init can be called again, e.g., from unit tests. - ! - ! Also calls mct_world_clean, to be symmetric with the mct_world_init call from - ! seq_comm_init. - - character(*), parameter :: subName = '(seq_comm_clean) ' - !---------------------------------------------------------- - - if (.not. seq_comm_mct_initialized) then - write(logunit,*) trim(subname),' ERROR seq_comm_init has not been called ' - call shr_sys_abort() - end if - seq_comm_mct_initialized = .false. - - call mct_world_clean() - - end subroutine seq_comm_clean - - !--------------------------------------------------------- - subroutine seq_comm_setcomm(ID,pelist,nthreads,iname,inst,tinst) - - implicit none - integer,intent(IN) :: ID - integer,intent(IN) :: pelist(:,:) - integer,intent(IN),optional :: nthreads - character(len=*),intent(IN),optional :: iname ! name of component - integer,intent(IN),optional :: inst ! instance of component - integer,intent(IN),optional :: tinst ! total number of instances for this component - - integer :: mpigrp_world - integer :: mpigrp - integer :: mpicom - integer :: ntasks - integer :: ierr - character(len=seq_comm_namelen) :: cname - logical :: set_suffix - character(*),parameter :: subName = '(seq_comm_setcomm) ' - - if (ID < 1 .or. ID > ncomps) then - write(logunit,*) subname,' ID out of range, abort ',ID - call shr_sys_abort() - endif - - call mpi_comm_group(DRIVER_COMM, mpigrp_world, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_group mpigrp_world') - call mpi_group_range_incl(mpigrp_world, 1, pelist, mpigrp,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_group_range_incl mpigrp') - call mpi_comm_create(DRIVER_COMM, mpigrp, mpicom, ierr) - - call shr_mpi_chkerr(ierr,subname//' mpi_comm_create mpigrp') - - ntasks = ((pelist(2,1) - pelist(1,1)) / pelist(3,1)) + 1 - - seq_comms(ID)%set = .true. - seq_comms(ID)%ID = ID - - if (present(inst)) then - seq_comms(ID)%inst = inst - set_suffix = .true. - else - seq_comms(ID)%inst = 1 - set_suffix = .false. - endif - - if (present(tinst)) then - if (tinst == 1) set_suffix = .false. - endif - - if (present(iname)) then - seq_comms(ID)%name = trim(iname) - if (set_suffix) then - call seq_comm_mkname(cname,iname,seq_comms(ID)%inst) - seq_comms(ID)%name = trim(cname) - endif - endif - - if (set_suffix) then - call seq_comm_mkname(cname,'_',seq_comms(ID)%inst) - seq_comms(ID)%suffix = trim(cname) - else - seq_comms(ID)%suffix = ' ' - endif - - seq_comms(ID)%mpicom = mpicom - seq_comms(ID)%mpigrp = mpigrp - if (present(nthreads)) then - seq_comms(ID)%nthreads = nthreads - else - seq_comms(ID)%nthreads = 1 - endif - - if (mpicom /= MPI_COMM_NULL) then - call mpi_comm_size(mpicom,seq_comms(ID)%npes,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size') - call mpi_comm_rank(mpicom,seq_comms(ID)%iam,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank') - if (seq_comms(ID)%iam == 0) then - seq_comms(ID)%iamroot = .true. - else - seq_comms(ID)%iamroot = .false. - endif - else - seq_comms(ID)%npes = -1 - seq_comms(ID)%iam = -1 - seq_comms(ID)%nthreads = 1 - seq_comms(ID)%iamroot = .false. - endif - - if (seq_comms(ID)%iamroot) then - write(logunit,F11) trim(subname),' init ID ',ID,seq_comms(ID)%name, & - ' pelist =',pelist,' npes =',seq_comms(ID)%npes,' nthreads =',seq_comms(ID)%nthreads,& - ' suffix =',trim(seq_comms(ID)%suffix) - endif - - end subroutine seq_comm_setcomm - - !--------------------------------------------------------- - subroutine seq_comm_joincomm(ID1,ID2,ID,iname,inst,tinst) - - implicit none - integer,intent(IN) :: ID1 ! src id - integer,intent(IN) :: ID2 ! srd id - integer,intent(IN) :: ID ! computed id - character(len=*),intent(IN),optional :: iname ! comm name - integer,intent(IN),optional :: inst - integer,intent(IN),optional :: tinst - - integer :: mpigrp - integer :: mpicom - integer :: ierr - character(len=seq_comm_namelen) :: cname - logical :: set_suffix - integer,allocatable :: pe_t1(:),pe_t2(:) - character(*),parameter :: subName = '(seq_comm_joincomm) ' - - ! check that IDs are in valid range, that ID1 and ID2 have - ! been set, and that ID has not been set - - if (ID1 < 1 .or. ID1 > ncomps) then - write(logunit,*) subname,' ID1 out of range, abort ',ID1 - call shr_sys_abort() - endif - if (ID2 < 1 .or. ID2 > ncomps) then - write(logunit,*) subname,' ID2 out of range, abort ',ID2 - call shr_sys_abort() - endif - if (ID < 1 .or. ID > ncomps) then - write(logunit,*) subname,' ID out of range, abort ',ID - call shr_sys_abort() - endif - if (.not. seq_comms(ID1)%set .or. .not. seq_comms(ID2)%set) then - write(logunit,*) subname,' ID1 or ID2 not set ',ID1,ID2 - call shr_sys_abort() - endif - if (seq_comms(ID)%set) then - write(logunit,*) subname,' ID already set ',ID - call shr_sys_abort() - endif - - call mpi_group_union(seq_comms(ID1)%mpigrp,seq_comms(ID2)%mpigrp,mpigrp,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_union mpigrp') - call mpi_comm_create(DRIVER_COMM, mpigrp, mpicom, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_create mpigrp') - - seq_comms(ID)%set = .true. - seq_comms(ID)%ID = ID - - if (present(inst)) then - seq_comms(ID)%inst = inst - else - seq_comms(ID)%inst = 1 - endif - - set_suffix = .true. - if (present(tinst)) then - if (tinst == 1) set_suffix = .false. - endif - - if (present(iname)) then - seq_comms(ID)%name = trim(iname) - if (set_suffix) then - call seq_comm_mkname(cname,iname,seq_comms(ID)%inst) - seq_comms(ID)%name = trim(cname) - endif - endif - - if (set_suffix) then - call seq_comm_mkname(cname,'_',seq_comms(ID)%inst) - seq_comms(ID)%suffix = trim(cname) - else - seq_comms(ID)%suffix = ' ' - endif - - seq_comms(ID)%mpicom = mpicom - seq_comms(ID)%mpigrp = mpigrp - seq_comms(ID)%nthreads = max(seq_comms(ID1)%nthreads,seq_comms(ID2)%nthreads) - seq_comms(ID)%nthreads = max(seq_comms(ID)%nthreads,1) - - if (mpicom /= MPI_COMM_NULL) then - call mpi_comm_size(mpicom,seq_comms(ID)%npes,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size') - call mpi_comm_rank(mpicom,seq_comms(ID)%iam,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank') - if (seq_comms(ID)%iam == 0) then - seq_comms(ID)%iamroot = .true. - else - seq_comms(ID)%iamroot = .false. - endif - else - seq_comms(ID)%npes = -1 - seq_comms(ID)%iam = -1 - seq_comms(ID)%iamroot = .false. - endif - - allocate(pe_t1(1),pe_t2(1)) - pe_t1(1) = 0 - call mpi_group_translate_ranks(seq_comms(ID1)%mpigrp, 1, pe_t1, mpigrp, pe_t2, ierr) - seq_comms(ID)%cplpe = pe_t2(1) - pe_t1(1) = 0 - call mpi_group_translate_ranks(seq_comms(ID2)%mpigrp, 1, pe_t1, mpigrp, pe_t2, ierr) - seq_comms(ID)%cmppe = pe_t2(1) - deallocate(pe_t1,pe_t2) - - if (seq_comms(ID)%iamroot) then - if (loglevel > 1) then - write(logunit,F12) trim(subname),' init ID ',ID,seq_comms(ID)%name, & - ' join IDs =',ID1,ID2,' npes =',seq_comms(ID)%npes, & - ' nthreads =',seq_comms(ID)%nthreads, & - ' cpl/cmp pes =',seq_comms(ID)%cplpe,seq_comms(ID)%cmppe - else - write(logunit,F13) trim(subname),' init ID ',ID,seq_comms(ID)%name, & - ' join IDs =',ID1,ID2,' npes =',seq_comms(ID)%npes, & - ' nthreads =',seq_comms(ID)%nthreads - endif - endif - - end subroutine seq_comm_joincomm - - !--------------------------------------------------------- - subroutine seq_comm_jcommarr(IDs,ID,iname,inst,tinst) - - implicit none - integer,intent(IN) :: IDs(:) ! src id - integer,intent(IN) :: ID ! computed id - character(len=*),intent(IN),optional :: iname ! comm name - integer,intent(IN),optional :: inst - integer,intent(IN),optional :: tinst - - integer :: mpigrp, mpigrpp - integer :: mpicom, nids - integer :: ierr - integer :: n - character(len=seq_comm_namelen) :: cname - logical :: set_suffix - character(*),parameter :: subName = '(seq_comm_jcommarr) ' - - ! check that IDs are in valid range, that IDs have - ! been set, and that ID has not been set - - nids = size(IDs) - do n = 1,nids - if (IDs(n) < 1 .or. IDs(n) > ncomps) then - write(logunit,*) subname,' IDs out of range, abort ',n,IDs(n) - call shr_sys_abort() - endif - if (.not. seq_comms(IDs(n))%set) then - write(logunit,*) subname,' IDs not set ',n,IDs(n) - call shr_sys_abort() - endif - enddo - - if (ID < 1 .or. ID > ncomps) then - write(logunit,*) subname,' ID out of range, abort ',ID - call shr_sys_abort() - endif - if (seq_comms(ID)%set) then - write(logunit,*) subname,' ID already set ',ID - call shr_sys_abort() - endif - - mpigrp = seq_comms(IDs(1))%mpigrp - do n = 1,nids - mpigrpp = mpigrp - call mpi_group_union(mpigrpp,seq_comms(IDs(n))%mpigrp,mpigrp,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_union mpigrp') - enddo - ! The allcompid is created across multiple drivers. - call mpi_comm_create(GLOBAL_COMM, mpigrp, mpicom, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_create mpigrp') - - seq_comms(ID)%set = .true. - seq_comms(ID)%ID = ID - - if (present(inst)) then - seq_comms(ID)%inst = inst - else - seq_comms(ID)%inst = 1 - endif - - set_suffix = .true. - if (present(tinst)) then - if (tinst == 1) set_suffix = .false. - endif - - if (present(iname)) then - seq_comms(ID)%name = trim(iname) - if (set_suffix) then - call seq_comm_mkname(cname,iname,seq_comms(ID)%inst) - seq_comms(ID)%name = trim(cname) - endif - endif - - if (set_suffix) then - call seq_comm_mkname(cname,'_',seq_comms(ID)%inst) - seq_comms(ID)%suffix = trim(cname) - else - seq_comms(ID)%suffix = ' ' - endif - - seq_comms(ID)%mpicom = mpicom - seq_comms(ID)%mpigrp = mpigrp - - seq_comms(ID)%nthreads = 1 - do n = 1,nids - seq_comms(ID)%nthreads = max(seq_comms(ID)%nthreads,seq_comms(IDs(n))%nthreads) - enddo - - if (mpicom /= MPI_COMM_NULL) then - call mpi_comm_size(mpicom,seq_comms(ID)%npes,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size') - call mpi_comm_rank(mpicom,seq_comms(ID)%iam,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank') - if (seq_comms(ID)%iam == 0) then - seq_comms(ID)%iamroot = .true. - else - seq_comms(ID)%iamroot = .false. - endif - else - seq_comms(ID)%npes = -1 - seq_comms(ID)%iam = -1 - seq_comms(ID)%iamroot = .false. - endif - - seq_comms(ID)%cplpe = -1 - seq_comms(ID)%cmppe = -1 - - if (seq_comms(ID)%iamroot) then - if (loglevel > 1) then - write(logunit,F14) trim(subname),' init ID ',ID,seq_comms(ID)%name, & - ' join multiple comp IDs',' npes =',seq_comms(ID)%npes, & - ' nthreads =',seq_comms(ID)%nthreads - else - write(logunit,F14) trim(subname),' init ID ',ID,seq_comms(ID)%name, & - ' join multiple comp IDs',' npes =',seq_comms(ID)%npes, & - ' nthreads =',seq_comms(ID)%nthreads - endif - endif - - end subroutine seq_comm_jcommarr - - !--------------------------------------------------------- - subroutine seq_comm_printcomms() - - implicit none - character(*),parameter :: subName = '(seq_comm_printcomms) ' - integer :: n,mype,npes,ierr - - call mpi_comm_size(DRIVER_COMM, npes , ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size comm_world') - call mpi_comm_rank(DRIVER_COMM, mype , ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank comm_world') - - call shr_sys_flush(logunit) - call mpi_barrier(DRIVER_COMM,ierr) - if (mype == 0) then - do n = 1,ncomps - write(logunit,'(a,4i6,2x,3a)') trim(subName),n, & - seq_comms(n)%gloroot,seq_comms(n)%npes,seq_comms(n)%nthreads, & - trim(seq_comms(n)%name),':',trim(seq_comms(n)%suffix) - enddo - call shr_sys_flush(logunit) - endif - - end subroutine seq_comm_printcomms - - !--------------------------------------------------------- - subroutine seq_comm_setptrs(ID,mpicom,mpigrp,npes,nthreads,iam,iamroot,gloiam,gloroot, & - cplpe,cmppe,pethreads, name) - - implicit none - integer,intent(in) :: ID - integer,intent(out),optional :: mpicom - integer,intent(out),optional :: mpigrp - integer,intent(out),optional :: npes - integer,intent(out),optional :: nthreads - integer,intent(out),optional :: iam - logical,intent(out),optional :: iamroot - integer,intent(out),optional :: gloiam - integer,intent(out),optional :: gloroot - integer,intent(out),optional :: cplpe - integer,intent(out),optional :: cmppe - integer,intent(out),optional :: pethreads - character(len=seq_comm_namelen) , intent(out), optional :: name - character(*),parameter :: subName = '(seq_comm_setptrs) ' - - ! Negative ID means there is no comm, return default or inactive values - if ((ID == 0) .or. (ID > ncomps)) then - write(logunit,*) subname,' ID out of range, return ',ID - return - endif - - if (present(mpicom)) then - if (ID > 0) then - mpicom = seq_comms(ID)%mpicom - else - mpicom = MPI_COMM_NULL - end if - endif - - if (present(mpigrp)) then - if (ID > 0) then - mpigrp = seq_comms(ID)%mpigrp - else - mpigrp = MPI_GROUP_NULL - end if - endif - - if (present(npes)) then - if (ID > 0) then - npes = seq_comms(ID)%npes - else - npes = 0 - end if - endif - - if (present(nthreads)) then - if (ID > 0) then - nthreads = seq_comms(ID)%nthreads - else - nthreads = 1 - end if - endif - - if (present(iam)) then - if (ID > 0) then - iam = seq_comms(ID)%iam - else - iam = -1 - end if - endif - - if (present(iamroot)) then - if (ID > 0) then - iamroot = seq_comms(ID)%iamroot - else - iamroot = .false. - end if - endif - - if (present(gloiam)) then - if (ID > 0) then - gloiam = seq_comms(ID)%gloiam - else - gloiam = -1 - end if - endif - - if (present(gloroot)) then - if (ID > 0) then - gloroot = seq_comms(ID)%gloroot - else - gloroot = -1 - end if - endif - - if (present(cplpe)) then - if (ID > 0) then - cplpe = seq_comms(ID)%cplpe - else - cplpe = -1 - end if - endif - - if (present(cmppe)) then - if (ID > 0) then - cmppe = seq_comms(ID)%cmppe - else - cmppe = -1 - end if - endif - - if (present(pethreads)) then - if (ID > 0) then - pethreads = seq_comms(ID)%pethreads - else - pethreads = 1 - end if - endif - - if(present(name)) then - if (ID > 0) then - name = seq_comms(ID)%name - else - name = '' - end if - end if - - end subroutine seq_comm_setptrs - !--------------------------------------------------------- - subroutine seq_comm_setnthreads(nthreads) - - implicit none - integer,intent(in) :: nthreads - character(*),parameter :: subName = '(seq_comm_setnthreads) ' - -#ifdef _OPENMP - if (nthreads < 1) then - call shr_sys_abort(subname//' ERROR: nthreads less than one') - endif - call omp_set_num_threads(nthreads) -#endif - - end subroutine seq_comm_setnthreads - !--------------------------------------------------------- - integer function seq_comm_getnthreads() - - implicit none - character(*),parameter :: subName = '(seq_comm_getnthreads) ' -#ifdef _OPENMP - integer :: omp_get_num_threads - seq_comm_getnthreads = -1 - - !$OMP PARALLEL - seq_comm_getnthreads = omp_get_num_threads() - !$OMP END PARALLEL -#else - seq_comm_getnthreads = -1 -#endif - - end function seq_comm_getnthreads - !--------------------------------------------------------- - logical function seq_comm_iamin(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_iamin) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_iamin = .false. - else if (seq_comms(ID)%iam >= 0) then - seq_comm_iamin = .true. - else - seq_comm_iamin = .false. - endif - - end function seq_comm_iamin - !--------------------------------------------------------- - logical function seq_comm_iamroot(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_iamroot) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_iamroot = .false. - else - seq_comm_iamroot = seq_comms(ID)%iamroot - end if - - end function seq_comm_iamroot - !--------------------------------------------------------- - integer function seq_comm_mpicom(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_mpicom) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_mpicom = MPI_COMM_NULL - else - seq_comm_mpicom = seq_comms(ID)%mpicom - end if - - end function seq_comm_mpicom - !--------------------------------------------------------- - integer function seq_comm_iam(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_iam) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_iam = -1 - else - seq_comm_iam = seq_comms(ID)%iam - end if - - end function seq_comm_iam - !--------------------------------------------------------- - integer function seq_comm_gloiam(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_gloiam) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_gloiam = -1 - else - seq_comm_gloiam = seq_comms(ID)%gloiam - end if - - end function seq_comm_gloiam - !--------------------------------------------------------- - integer function seq_comm_gloroot(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_gloroot) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_gloroot = -1 - else - seq_comm_gloroot = seq_comms(ID)%gloroot - end if - - end function seq_comm_gloroot - !--------------------------------------------------------- - integer function seq_comm_cplpe(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_cplpe) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_cplpe = -1 - else - seq_comm_cplpe = seq_comms(ID)%cplpe - end if - - end function seq_comm_cplpe - !--------------------------------------------------------- - integer function seq_comm_cmppe(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_cmppe) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_cmppe = -1 - else - seq_comm_cmppe = seq_comms(ID)%cmppe - end if - - end function seq_comm_cmppe - !--------------------------------------------------------- - character(len=seq_comm_namelen) function seq_comm_name(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_name) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_name = '' - else - seq_comm_name = trim(seq_comms(ID)%name) - end if - - end function seq_comm_name - !--------------------------------------------------------- - character(len=seq_comm_namelen) function seq_comm_suffix(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_suffix) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_suffix = '' - else - seq_comm_suffix = trim(seq_comms(ID)%suffix) - end if - - end function seq_comm_suffix - !--------------------------------------------------------- - !--------------------------------------------------------- - integer function seq_comm_inst(ID) - - implicit none - integer,intent(in) :: ID - character(*),parameter :: subName = '(seq_comm_inst) ' - - if ((ID < 1) .or. (ID > ncomps)) then - seq_comm_inst = 0 - else - seq_comm_inst = seq_comms(ID)%inst - end if - - end function seq_comm_inst - !--------------------------------------------------------- - subroutine seq_comm_mkname(oname,str1,num) - implicit none - character(len=*),intent(out) :: oname - character(len=*),intent(in) :: str1 - integer,intent(in) :: num - character(*),parameter :: subName = '(seq_comm_mkname) ' - - character(len=8) :: cnum - - write(cnum,'(i4.4)') num - if (len_trim(str1) + len_trim(cnum) > len(oname)) then - write(logunit,*) trim(subname),' ERROR in str lens ',len(oname),trim(str1),trim(cnum) - call shr_sys_abort(trim(subname)) - endif - oname = trim(str1)//trim(cnum) - - end subroutine seq_comm_mkname - !--------------------------------------------------------- -end module seq_comm_mct diff --git a/src/drivers/mct/shr/seq_drydep_mod.F90 b/src/drivers/mct/shr/seq_drydep_mod.F90 deleted file mode 100644 index 071c35aafeb..00000000000 --- a/src/drivers/mct/shr/seq_drydep_mod.F90 +++ /dev/null @@ -1,913 +0,0 @@ -module seq_drydep_mod - - !======================================================================== - ! Module for handling dry depostion of tracers. - ! This module is shared by land and atmosphere models for the computations of - ! dry deposition of tracers - ! - ! !REVISION HISTORY: - ! 2008-Nov-12 - F. Vitt - creation. - ! 2009-Feb-19 - E. Kluzek - merge shr_drydep_tables module in. - ! 2009-Feb-20 - E. Kluzek - use shr_ coding standards, and check for namelist file. - ! 2009-Feb-20 - E. Kluzek - Put _r8 on all constants, remove namelist read out. - ! 2009-Mar-23 - F. Vitt - Some corrections/cleanup and addition of drydep_method. - ! 2009-Mar-27 - E. Kluzek - Get description and units from J.F. Lamarque. - !======================================================================== - - ! !USES: - - use shr_sys_mod, only : shr_sys_abort - use shr_log_mod, only : s_loglev => shr_log_Level - use shr_kind_mod, only : r8 => shr_kind_r8, CS => SHR_KIND_CS, CX => SHR_KIND_CX - use shr_const_mod, only : SHR_CONST_G, SHR_CONST_RDAIR, & - SHR_CONST_CPDAIR, SHR_CONST_MWWV - - implicit none - save - - private - - ! !PUBLIC MEMBER FUNCTIONS - - public :: seq_drydep_readnl ! Read namelist - public :: seq_drydep_init ! Initialization of drydep data - public :: seq_drydep_setHCoeff ! Calculate Henry's law coefficients - - ! !PRIVATE ARRAY SIZES - - integer, private, parameter :: maxspc = 100 ! Maximum number of species - integer, public, parameter :: n_species_table = 77 ! Number of species to work with - integer, private, parameter :: NSeas = 5 ! Number of seasons - integer, private, parameter :: NLUse = 11 ! Number of land-use types - - ! !PUBLIC DATA MEMBERS: - - ! method specification - character(16),public,parameter :: DD_XATM = 'xactive_atm'! dry-dep atmosphere - character(16),public,parameter :: DD_XLND = 'xactive_lnd'! dry-dep land - character(16),public,parameter :: DD_TABL = 'table' ! dry-dep table (atm and lnd) - character(16),public :: drydep_method = DD_XLND ! Which option choosen - - real(r8), public, parameter :: ph = 1.e-5_r8 ! measure of the acidity (dimensionless) - - logical, public :: lnd_drydep ! If dry-dep fields passed - integer, public :: n_drydep = 0 ! Number in drypdep list - character(len=32), public, dimension(maxspc) :: drydep_list = '' ! List of dry-dep species - - character(len=CS), public :: drydep_fields_token = '' ! First drydep fields token - - real(r8), public, allocatable, dimension(:) :: foxd ! reactivity factor for oxidation (dimensioness) - real(r8), public, allocatable, dimension(:) :: drat ! ratio of molecular diffusivity (D_H2O/D_species; dimensionless) - integer, public, allocatable, dimension(:) :: mapping ! mapping to species table - ! --- Indices for each species --- - integer, public :: h2_ndx, ch4_ndx, co_ndx, pan_ndx, mpan_ndx, so2_ndx, o3_ndx, o3a_ndx, xpan_ndx - - !--------------------------------------------------------------------------- - ! Table 1 from Wesely, Atmos. Environment, 1989, p1293 - ! Table 2 from Sheih, microfiche PB86-218104 and Walcek, Atmos. Environment, 1986, p949 - ! Table 3-5 compiled by P. Hess - ! - ! index #1 : season - ! 1 -> midsummer with lush vegetation - ! 2 -> autumn with unharvested cropland - ! 3 -> late autumn after frost, no snow - ! 4 -> winter, snow on ground, and subfreezing - ! 5 -> transitional spring with partially green short annuals - ! - ! index #2 : landuse type - ! 1 -> urban land - ! 2 -> agricultural land - ! 3 -> range land - ! 4 -> deciduous forest - ! 5 -> coniferous forest - ! 6 -> mixed forest including wetland - ! 7 -> water, both salt and fresh - ! 8 -> barren land, mostly desert - ! 9 -> nonforested wetland - ! 10 -> mixed agricultural and range land - ! 11 -> rocky open areas with low growing shrubs - ! - ! JFL August 2000 - !--------------------------------------------------------------------------- - - !--------------------------------------------------------------------------- - ! table to parameterize the impact of soil moisture on the deposition of H2 and - ! CO on soils (from Sanderson et al., J. Atmos. Chem., 46, 15-28, 2003). - !--------------------------------------------------------------------------- - - !--- deposition of h2 and CO on soils --- - real(r8), parameter, public :: h2_a(NLUse) = & - (/ 0.000_r8, 0.000_r8, 0.270_r8, 0.000_r8, 0.000_r8, & - 0.000_r8, 0.000_r8, 0.000_r8, 0.000_r8, 0.000_r8, 0.000_r8/) - !--- deposition of h2 and CO on soils --- - real(r8), parameter, public :: h2_b(NLUse) = & - (/ 0.000_r8,-41.390_r8, -0.472_r8,-41.900_r8,-41.900_r8, & - -41.900_r8, 0.000_r8, 0.000_r8, 0.000_r8,-41.390_r8, 0.000_r8/) - !--- deposition of h2 and CO on soils --- - real(r8), parameter, public :: h2_c(NLUse) = & - (/ 0.000_r8, 16.850_r8, 1.235_r8, 19.700_r8, 19.700_r8, & - 19.700_r8, 0.000_r8, 0.000_r8, 0.000_r8, 17.700_r8, 1.000_r8/) - - !--- deposition of h2 and CO on soils - ! - !--- ri: Richardson number (dimensionless) - !--- rlu: Resistance of leaves in upper canopy (s.m-1) - !--- rac: Aerodynamic resistance to lower canopy (s.m-1) - !--- rgss: Ground surface resistance for SO2 (s.m-1) - !--- rgso: Ground surface resistance for O3 (s.m-1) - !--- rcls: Lower canopy resistance for SO2 (s.m-1) - !--- rclo: Lower canopy resistance for O3 (s.m-1) - ! - real(r8), public, dimension(NSeas,NLUse) :: ri, rlu, rac, rgss, rgso, rcls, rclo - - data ri (1,1:NLUse) & - /1.e36_r8, 60._r8, 120._r8, 70._r8, 130._r8, 100._r8,1.e36_r8,1.e36_r8, 80._r8, 100._r8, 150._r8/ - data rlu (1,1:NLUse) & - /1.e36_r8,2000._r8,2000._r8,2000._r8,2000._r8,2000._r8,1.e36_r8,1.e36_r8,2500._r8,2000._r8,4000._r8/ - data rac (1,1:NLUse) & - / 100._r8, 200._r8, 100._r8,2000._r8,2000._r8,2000._r8, 0._r8, 0._r8, 300._r8, 150._r8, 200._r8/ - data rgss(1,1:NLUse) & - / 400._r8, 150._r8, 350._r8, 500._r8, 500._r8, 100._r8, 0._r8,1000._r8, 0._r8, 220._r8, 400._r8/ - data rgso(1,1:NLUse) & - / 300._r8, 150._r8, 200._r8, 200._r8, 200._r8, 300._r8,2000._r8, 400._r8,1000._r8, 180._r8, 200._r8/ - data rcls(1,1:NLUse) & - /1.e36_r8,2000._r8,2000._r8,2000._r8,2000._r8,2000._r8,1.e36_r8,1.e36_r8,2500._r8,2000._r8,4000._r8/ - data rclo(1,1:NLUse) & - /1.e36_r8,1000._r8,1000._r8,1000._r8,1000._r8,1000._r8,1.e36_r8,1.e36_r8,1000._r8,1000._r8,1000._r8/ - - data ri (2,1:NLUse) & - /1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8, 250._r8, 500._r8,1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8/ - data rlu (2,1:NLUse) & - /1.e36_r8,9000._r8,9000._r8,9000._r8,4000._r8,8000._r8,1.e36_r8,1.e36_r8,9000._r8,9000._r8,9000._r8/ - data rac (2,1:NLUse) & - / 100._r8, 150._r8, 100._r8,1500._r8,2000._r8,1700._r8, 0._r8, 0._r8, 200._r8, 120._r8, 140._r8/ - data rgss(2,1:NLUse) & - / 400._r8, 200._r8, 350._r8, 500._r8, 500._r8, 100._r8, 0._r8,1000._r8, 0._r8, 300._r8, 400._r8/ - data rgso(2,1:NLUse) & - / 300._r8, 150._r8, 200._r8, 200._r8, 200._r8, 300._r8,2000._r8, 400._r8, 800._r8, 180._r8, 200._r8/ - data rcls(2,1:NLUse) & - /1.e36_r8,9000._r8,9000._r8,9000._r8,2000._r8,4000._r8,1.e36_r8,1.e36_r8,9000._r8,9000._r8,9000._r8/ - data rclo(2,1:NLUse) & - /1.e36_r8, 400._r8, 400._r8, 400._r8,1000._r8, 600._r8,1.e36_r8,1.e36_r8, 400._r8, 400._r8, 400._r8/ - - data ri (3,1:NLUse) & - /1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8, 250._r8, 500._r8,1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8/ - data rlu (3,1:NLUse) & - /1.e36_r8,1.e36_r8,9000._r8,9000._r8,4000._r8,8000._r8,1.e36_r8,1.e36_r8,9000._r8,9000._r8,9000._r8/ - data rac (3,1:NLUse) & - / 100._r8, 10._r8, 100._r8,1000._r8,2000._r8,1500._r8, 0._r8, 0._r8, 100._r8, 50._r8, 120._r8/ - data rgss(3,1:NLUse) & - / 400._r8, 150._r8, 350._r8, 500._r8, 500._r8, 200._r8, 0._r8,1000._r8, 0._r8, 200._r8, 400._r8/ - data rgso(3,1:NLUse) & - / 300._r8, 150._r8, 200._r8, 200._r8, 200._r8, 300._r8,2000._r8, 400._r8,1000._r8, 180._r8, 200._r8/ - data rcls(3,1:NLUse) & - /1.e36_r8,1.e36_r8,9000._r8,9000._r8,3000._r8,6000._r8,1.e36_r8,1.e36_r8,9000._r8,9000._r8,9000._r8/ - data rclo(3,1:NLUse) & - /1.e36_r8,1000._r8, 400._r8, 400._r8,1000._r8, 600._r8,1.e36_r8,1.e36_r8, 800._r8, 600._r8, 600._r8/ - - data ri (4,1:NLUse) & - /1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8, 400._r8, 800._r8,1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8/ - data rlu (4,1:NLUse) & - /1.e36_r8,1.e36_r8,1.e36_r8,1.e36_r8,6000._r8,9000._r8,1.e36_r8,1.e36_r8,9000._r8,9000._r8,9000._r8/ - data rac (4,1:NLUse) & - / 100._r8, 10._r8, 10._r8,1000._r8,2000._r8,1500._r8, 0._r8, 0._r8, 50._r8, 10._r8, 50._r8/ - data rgss(4,1:NLUse) & - / 100._r8, 100._r8, 100._r8, 100._r8, 100._r8, 100._r8, 0._r8,1000._r8, 100._r8, 100._r8, 50._r8/ - data rgso(4,1:NLUse) & - / 600._r8,3500._r8,3500._r8,3500._r8,3500._r8,3500._r8,2000._r8, 400._r8,3500._r8,3500._r8,3500._r8/ - data rcls(4,1:NLUse) & - /1.e36_r8,1.e36_r8,1.e36_r8,9000._r8, 200._r8, 400._r8,1.e36_r8,1.e36_r8,9000._r8,1.e36_r8,9000._r8/ - data rclo(4,1:NLUse) & - /1.e36_r8,1000._r8,1000._r8, 400._r8,1500._r8, 600._r8,1.e36_r8,1.e36_r8, 800._r8,1000._r8, 800._r8/ - - data ri (5,1:NLUse) & - /1.e36_r8, 120._r8, 240._r8, 140._r8, 250._r8, 190._r8,1.e36_r8,1.e36_r8, 160._r8, 200._r8, 300._r8/ - data rlu (5,1:NLUse) & - /1.e36_r8,4000._r8,4000._r8,4000._r8,2000._r8,3000._r8,1.e36_r8,1.e36_r8,4000._r8,4000._r8,8000._r8/ - data rac (5,1:NLUse) & - / 100._r8, 50._r8, 80._r8,1200._r8,2000._r8,1500._r8, 0._r8, 0._r8, 200._r8, 60._r8, 120._r8/ - data rgss(5,1:NLUse) & - / 500._r8, 150._r8, 350._r8, 500._r8, 500._r8, 200._r8, 0._r8,1000._r8, 0._r8, 250._r8, 400._r8/ - data rgso(5,1:NLUse) & - / 300._r8, 150._r8, 200._r8, 200._r8, 200._r8, 300._r8,2000._r8, 400._r8,1000._r8, 180._r8, 200._r8/ - data rcls(5,1:NLUse) & - /1.e36_r8,4000._r8,4000._r8,4000._r8,2000._r8,3000._r8,1.e36_r8,1.e36_r8,4000._r8,4000._r8,8000._r8/ - data rclo(5,1:NLUse) & - /1.e36_r8,1000._r8, 500._r8, 500._r8,1500._r8, 700._r8,1.e36_r8,1.e36_r8, 600._r8, 800._r8, 800._r8/ - - !--------------------------------------------------------------------------- - ! ... roughness length - !--------------------------------------------------------------------------- - real(r8), public, dimension(NSeas,NLUse) :: z0 - - data z0 (1,1:NLUse) & - /1.000_r8,0.250_r8,0.050_r8,1.000_r8,1.000_r8,1.000_r8,0.0006_r8,0.002_r8,0.150_r8,0.100_r8,0.100_r8/ - data z0 (2,1:NLUse) & - /1.000_r8,0.100_r8,0.050_r8,1.000_r8,1.000_r8,1.000_r8,0.0006_r8,0.002_r8,0.100_r8,0.080_r8,0.080_r8/ - data z0 (3,1:NLUse) & - /1.000_r8,0.005_r8,0.050_r8,1.000_r8,1.000_r8,1.000_r8,0.0006_r8,0.002_r8,0.100_r8,0.020_r8,0.060_r8/ - data z0 (4,1:NLUse) & - /1.000_r8,0.001_r8,0.001_r8,1.000_r8,1.000_r8,1.000_r8,0.0006_r8,0.002_r8,0.001_r8,0.001_r8,0.040_r8/ - data z0 (5,1:NLUse) & - /1.000_r8,0.030_r8,0.020_r8,1.000_r8,1.000_r8,1.000_r8,0.0006_r8,0.002_r8,0.010_r8,0.030_r8,0.060_r8/ - - !real(r8), private, dimension(11,5), parameter :: z0xxx = reshape ( & - ! (/ 1.000,0.250,0.050,1.000,1.000,1.000,0.0006,0.002,0.150,0.100,0.100 , & - ! 1.000,0.100,0.050,1.000,1.000,1.000,0.0006,0.002,0.100,0.080,0.080 , & - ! 1.000,0.005,0.050,1.000,1.000,1.000,0.0006,0.002,0.100,0.020,0.060 , & - ! 1.000,0.001,0.001,1.000,1.000,1.000,0.0006,0.002,0.001,0.001,0.040 , & - ! 1.000,0.030,0.020,1.000,1.000,1.000,0.0006,0.002,0.010,0.030,0.060 /), (/11,5/) ) - - !--------------------------------------------------------------------------- - ! public chemical data - !--------------------------------------------------------------------------- - - !--- data for foxd (reactivity factor for oxidation) ---- - real(r8), public, parameter :: dfoxd(n_species_table) = & - (/ 1._r8 & - ,1._r8 & - ,1._r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,1._r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,0._r8 & - ,0._r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,1._r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,1._r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,.1_r8 & - ,.1_r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,.1_r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & - ,.1_r8 & - ,.1_r8 & - ,.1_r8 & - ,1.e-36_r8 & - ,1.e-36_r8 & ! HCN - ,1.e-36_r8 & ! CH3CN - ,1.e-36_r8 & ! SO2 - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - ,0.1_r8 & - /) - - ! PRIVATE DATA: - - Interface seq_drydep_setHCoeff ! overload subroutine - Module Procedure set_hcoeff_scalar - Module Procedure set_hcoeff_vector - End Interface seq_drydep_setHCoeff - - real(r8), private, parameter :: small_value = 1.e-36_r8 !--- smallest value to use --- - - !--------------------------------------------------------------------------- - ! private chemical data - !--------------------------------------------------------------------------- - - !--- Names of species that can work with --- - character(len=20), public, parameter :: species_name_table(n_species_table) = & - (/ 'OX ' & - ,'H2O2 ' & - ,'OH ' & - ,'HO2 ' & - ,'CO ' & - ,'CH4 ' & - ,'CH3O2 ' & - ,'CH3OOH ' & - ,'CH2O ' & - ,'CHOOH ' & - ,'NO ' & - ,'NO2 ' & - ,'HNO3 ' & - ,'CO2 ' & - ,'NH3 ' & - ,'N2O5 ' & - ,'NO3 ' & - ,'CH3OH ' & - ,'HO2NO2 ' & - ,'O1D ' & - ,'C2H6 ' & - ,'C2H5O2 ' & - ,'PO2 ' & - ,'MACRO2 ' & - ,'ISOPO2 ' & - ,'C4H10 ' & - ,'CH3CHO ' & - ,'C2H5OOH ' & - ,'C3H6 ' & - ,'POOH ' & - ,'C2H4 ' & - ,'PAN ' & - ,'CH3COOOH' & - ,'C10H16 ' & - ,'CHOCHO ' & - ,'CH3COCHO' & - ,'GLYALD ' & - ,'CH3CO3 ' & - ,'C3H8 ' & - ,'C3H7O2 ' & - ,'CH3COCH3' & - ,'C3H7OOH ' & - ,'RO2 ' & - ,'ROOH ' & - ,'Rn ' & - ,'ISOP ' & - ,'MVK ' & - ,'MACR ' & - ,'C2H5OH ' & - ,'ONITR ' & - ,'ONIT ' & - ,'ISOPNO3 ' & - ,'HYDRALD ' & - ,'HCN ' & - ,'CH3CN ' & - ,'SO2 ' & - ,'SOAGff0 ' & - ,'SOAGff1 ' & - ,'SOAGff2 ' & - ,'SOAGff3 ' & - ,'SOAGff4 ' & - ,'SOAGbg0 ' & - ,'SOAGbg1 ' & - ,'SOAGbg2 ' & - ,'SOAGbg3 ' & - ,'SOAGbg4 ' & - ,'SOAG0 ' & - ,'SOAG1 ' & - ,'SOAG2 ' & - ,'SOAG3 ' & - ,'SOAG4 ' & - ,'IVOC ' & - ,'SVOC ' & - ,'IVOCbb ' & - ,'IVOCff ' & - ,'SVOCbb ' & - ,'SVOCff ' & - /) - - !--- data for effective Henry's Law coefficient --- - real(r8), public, parameter :: dheff(n_species_table*6) = & - (/1.15e-02_r8, 2560._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,8.33e+04_r8, 7379._r8,2.2e-12_r8,-3730._r8,0._r8 , 0._r8 & - ,3.00e+01_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,2.00e+03_r8, 6600._r8,3.5e-05_r8, 0._r8,0._r8 , 0._r8 & - ,1.00e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.47e+00_r8, 5241._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.11e+02_r8, 5241._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,6.30e+03_r8, 6425._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,5.53e+03_r8, 5700._r8,1.8e-04_r8,-1510._r8,0._r8 , 0._r8 & - ,1.90e-03_r8, 1480._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,6.40e-03_r8, 2500._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,0._r8 , 0._r8,2.6e+06_r8, 8700._r8,0._r8 , 0._r8 & - ,3.40e-02_r8, 2420._r8,4.5e-07_r8,-1000._r8,3.6e-11_r8,-1760._r8 & - ,7.40e+01_r8, 3400._r8,1.7e-05_r8, -450._r8,1.0e-14_r8,-6716._r8 & - ,2.14e+00_r8, 3362._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,0.65e+00_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,2.20e+02_r8, 4934._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,0._r8 , 0._r8,3.2e+01_r8, 0._r8,0._r8 , 0._r8 & - ,1.00e-16_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.47e+00_r8, 5241._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.47e+00_r8, 5241._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.47e+00_r8, 5241._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.47e+00_r8, 5241._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.14e+01_r8, 6267._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.36e+02_r8, 5995._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,2.20e+02_r8, 5653._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,5.00e+00_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,8.37e+02_r8, 5308._r8,1.8e-04_r8,-1510._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.00e+05_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.71e+03_r8, 7541._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,4.14e+04_r8, 4630._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.47e+00_r8, 5241._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.45e-03_r8, 2700._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.00e+06_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,2.70e+01_r8, 5300._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.36e+02_r8, 5995._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.47e+00_r8, 5241._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.36e+02_r8, 5995._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,0.00e+00_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.70e-03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,2.00e+02_r8, 6500._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.51e+03_r8, 6485._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.00e+03_r8, 6000._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.00e+01_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.00e+01_r8, 6000._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.20e+01_r8, 5000._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,5.00e+01_r8, 4000._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.23e+00_r8, 3120._r8,1.23e-02_r8,1960._r8,0._r8 , 0._r8 & - ,1.3e+07_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.2e+05_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,4.0e+05_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.3e+05_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.6e+05_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,7.9e+11_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,6.3e+10_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.2e+09_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,6.3e+08_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.2e+07_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,4.0e+11_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.2e+10_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.6e+09_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,3.2e+08_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.6e+07_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.e+03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.e+03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.e+03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.e+03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.e+03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - ,1.e+03_r8, 0._r8,0._r8 , 0._r8,0._r8 , 0._r8 & - /) - - real(r8), private, parameter :: wh2o = SHR_CONST_MWWV - real(r8), private, parameter :: mol_wgts(n_species_table) = & - (/ 47.9981995_r8, 34.0135994_r8, 17.0067997_r8, 33.0061989_r8, 28.0104008_r8, & - 16.0405998_r8, 47.0320015_r8, 48.0393982_r8, 30.0251999_r8, 46.0246010_r8, & - 30.0061398_r8, 46.0055389_r8, 63.0123405_r8, 44.0098000_r8, 17.0289402_r8, & - 108.010483_r8, 62.0049400_r8, 32.0400009_r8, 79.0117416_r8, 15.9994001_r8, & - 30.0664005_r8, 61.0578003_r8, 91.0830002_r8, 119.093399_r8, 117.119797_r8, & - 58.1180000_r8, 44.0509987_r8, 62.0652008_r8, 42.0774002_r8, 92.0904007_r8, & - 28.0515995_r8, 121.047943_r8, 76.0497971_r8, 136.228394_r8, 58.0355988_r8, & - 72.0614014_r8, 60.0503998_r8, 75.0423965_r8, 44.0922012_r8, 75.0836029_r8, & - 58.0768013_r8, 76.0910034_r8, 31.9988003_r8, 33.0061989_r8, 222.000000_r8, & - 68.1141968_r8, 70.0877991_r8, 70.0877991_r8, 46.0657997_r8, 147.125946_r8, & - 119.074341_r8, 162.117935_r8, 100.112999_r8, 27.0256_r8 , 41.0524_r8 , & - 64.064800_r8, 250._r8, 250._r8, 250._r8, 250._r8, & - 250._r8, 250._r8, 250._r8, 250._r8, 250._r8, & - 250._r8, 250._r8, 250._r8, 250._r8, 250._r8, & - 250._r8, 170.3_r8, 170.3_r8, 170.3_r8, 170.3_r8, & - 170.3_r8, 170.3_r8 /) - - - !=============================================================================== -CONTAINS - !=============================================================================== - - !==================================================================================== - - subroutine seq_drydep_readnl(NLFilename, ID, seq_drydep_fields) - - !======================================================================== - ! reads drydep_inparm namelist and sets up CCSM driver list of fields for - ! land-atmosphere communications. - ! - ! !REVISION HISTORY: - ! 2009-Feb-20 - E. Kluzek - Separate out as subroutine from previous input_init - !======================================================================== - - use shr_file_mod,only : shr_file_getUnit, shr_file_freeUnit - use shr_log_mod, only : s_logunit => shr_log_Unit - use seq_comm_mct,only : seq_comm_iamroot, seq_comm_setptrs - use shr_mpi_mod, only : shr_mpi_bcast - use shr_nl_mod, only : shr_nl_find_group_name - implicit none - - character(len=*), intent(in) :: NLFilename ! Namelist filename - integer , intent(in) :: ID ! seq_comm ID - character(len=*), intent(out) :: seq_drydep_fields - - !----- local ----- - integer :: i ! Indices - integer :: unitn ! namelist unit number - integer :: ierr ! error code - logical :: exists ! if file exists or not - character(len=8) :: token ! dry dep field name to add - integer :: mpicom ! MPI communicator - - !----- formats ----- - character(*),parameter :: subName = '(seq_drydep_read) ' - character(*),parameter :: F00 = "('(seq_drydep_read) ',8a)" - character(*),parameter :: FI1 = "('(seq_drydep_init) ',a,I2)" - - namelist /drydep_inparm/ drydep_list, drydep_method - - !----------------------------------------------------------------------------- - ! Read namelist and figure out the drydep field list to pass - ! First check if file exists and if not, n_drydep will be zero - !----------------------------------------------------------------------------- - - !--- Open and read namelist --- - if ( len_trim(NLFilename) == 0 )then - call shr_sys_abort( subName//'ERROR: nlfilename not set' ) - end if - call seq_comm_setptrs(ID,mpicom=mpicom) - if (seq_comm_iamroot(ID)) then - inquire( file=trim(NLFileName), exist=exists) - if ( exists ) then - unitn = shr_file_getUnit() - open( unitn, file=trim(NLFilename), status='old' ) - if ( s_loglev > 0 ) write(s_logunit,F00) & - 'Read in drydep_inparm namelist from: ', trim(NLFilename) - call shr_nl_find_group_name(unitn, 'drydep_inparm', ierr) - if (ierr == 0) then - ierr = 1 - do while ( ierr /= 0 ) - read(unitn, drydep_inparm, iostat=ierr) - if (ierr < 0) then - call shr_sys_abort( subName//'ERROR: encountered end-of-file on namelist read' ) - endif - end do - else - write(s_logunit,*) 'seq_drydep_read: no drydep_inparm namelist found in ',NLFilename - endif - close( unitn ) - call shr_file_freeUnit( unitn ) - end if - end if - call shr_mpi_bcast( drydep_list, mpicom ) - call shr_mpi_bcast( drydep_method, mpicom ) - - n_drydep = 0 - - !--- Loop over species to fill list of fields to communicate for drydep --- - seq_drydep_fields = ' ' - do i=1,maxspc - if ( len_trim(drydep_list(i))==0 ) exit - write(token,333) i - seq_drydep_fields = trim(seq_drydep_fields)//':'//trim(token) - if ( i == 1 ) then - seq_drydep_fields = trim(token) - drydep_fields_token = trim(token) - endif - n_drydep = n_drydep+1 - enddo - - !--- Make sure method is valid and determine if land is passing drydep fields --- - lnd_drydep = n_drydep>0 .and. drydep_method == DD_XLND - - if ( s_loglev > 0 ) then - write(s_logunit,*) 'seq_drydep_read: drydep_method: ', trim(drydep_method) - if ( n_drydep == 0 )then - write(s_logunit,F00) 'No dry deposition fields will be transfered' - else - write(s_logunit,FI1) 'Number of dry deposition fields transfered is ', & - n_drydep - end if - end if - - if ( trim(drydep_method)/=trim(DD_XATM) .and. & - trim(drydep_method)/=trim(DD_XLND) .and. & - trim(drydep_method)/=trim(DD_TABL) ) then - if ( s_loglev > 0 ) then - write(s_logunit,*) 'seq_drydep_read: drydep_method : ', trim(drydep_method) - write(s_logunit,*) 'seq_drydep_read: drydep_method must be set to : ', & - DD_XATM,', ', DD_XLND,', or ', DD_TABL - end if - call shr_sys_abort('seq_drydep_read: incorrect dry deposition method specification') - endif - - ! Need to explicitly add Sl_ based on naming convention -333 format ('Sl_dd',i3.3) - - end subroutine seq_drydep_readnl - - !==================================================================================== - - subroutine seq_drydep_init( ) - - !======================================================================== - ! Initialization of dry deposition fields - ! reads drydep_inparm namelist and sets up CCSM driver list of fields for - ! land-atmosphere communications. - ! !REVISION HISTORY: - ! 2008-Nov-12 - F. Vitt - first version - ! 2009-Feb-20 - E. Kluzek - Check for existance of file if not return, set n_drydep=0 - ! 2009-Feb-20 - E. Kluzek - Move namelist read to separate subroutine - !======================================================================== - - use shr_log_mod, only : s_logunit => shr_log_Unit - use shr_infnan_mod, only: shr_infnan_posinf, assignment(=) - - implicit none - - !----- local ----- - integer :: i, l ! Indices - character(len=32) :: test_name ! field test name - !----- formats ----- - character(*),parameter :: subName = '(seq_drydep_init) ' - character(*),parameter :: F00 = "('(seq_drydep_init) ',8a)" - - !----------------------------------------------------------------------------- - ! Allocate and fill foxd, drat and mapping as well as species indices - !----------------------------------------------------------------------------- - - if ( n_drydep > 0 ) then - - allocate( foxd(n_drydep) ) - allocate( drat(n_drydep) ) - allocate( mapping(n_drydep) ) - - ! This initializes these variables to infinity. - foxd = shr_infnan_posinf - drat = shr_infnan_posinf - - mapping(:) = 0 - - end if - - h2_ndx=-1; ch4_ndx=-1; co_ndx=-1; mpan_ndx = -1; pan_ndx = -1; so2_ndx=-1; o3_ndx=-1; xpan_ndx=-1 - - !--- Loop over drydep species that need to be worked with --- - do i=1,n_drydep - if ( len_trim(drydep_list(i))==0 ) exit - - test_name = drydep_list(i) - - if( trim(test_name) == 'O3' ) then - test_name = 'OX' - end if - - !--- Figure out if species maps to a species in the species table --- - do l = 1,n_species_table - if( trim( test_name ) == trim( species_name_table(l) ) ) then - mapping(i) = l - exit - end if - end do - - !--- If it doesn't map to a species in the species table find species close enough --- - if( mapping(i) < 1 ) then - select case( trim(test_name) ) - case( 'H2' ) - test_name = 'CO' - case( 'HYAC', 'CH3COOH', 'EOOH', 'IEPOX' ) - test_name = 'CH2O' - case( 'O3S', 'O3INERT', 'MPAN' ) - test_name = 'OX' - case( 'ISOPOOH', 'MACROOH', 'Pb', 'XOOH', 'H2SO4' ) - test_name = 'HNO3' - case( 'ALKOOH', 'MEKOOH', 'TOLOOH', 'BENOOH', 'XYLOOH', 'SOGM','SOGI','SOGT','SOGB','SOGX' ) - test_name = 'CH3OOH' - case( 'SOA', 'SO4', 'CB1', 'CB2', 'OC1', 'OC2', 'NH3', 'NH4', 'SA1', 'SA2', 'SA3', 'SA4','HCN','CH3CN','HCOOH' ) - test_name = 'OX' ! this is just a place holder. values are explicitly set below - case( 'SOAM', 'SOAI', 'SOAT', 'SOAB', 'SOAX' ) - test_name = 'OX' ! this is just a place holder. values are explicitly set below - case( 'SOAGbb0' ) - test_name = 'SOAGff0' - case( 'SOAGbb1' ) - test_name = 'SOAGff1' - case( 'SOAGbb2' ) - test_name = 'SOAGff2' - case( 'SOAGbb3' ) - test_name = 'SOAGff3' - case( 'SOAGbb4' ) - test_name = 'SOAGff4' - case( 'NOA', 'ALKNIT', 'ISOPNITA', 'ISOPNITB', 'HONITR', 'ISOPNOOH', 'NC4CHO', 'NC4CH2OH', 'TERPNIT', 'NTERPOOH' ) - test_name = 'H2O2' - case( 'PHENOOH', 'BENZOOH', 'C6H5OOH', 'BZOOH', 'XYLOLOOH', 'XYLENOOH', 'HPALD' ) - test_name = 'CH3OOH' - case( 'TERPOOH', 'TERP2OOH', 'MBOOOH' ) - test_name = 'HNO3' - case( 'TERPROD1', 'TERPROD2' ) - test_name = 'CH2O' - case( 'HMPROP' ) - test_name = 'GLYALD' - case( 'O3A', 'XMPAN' ) - test_name = 'OX' - case( 'XPAN' ) - test_name = 'PAN' - case( 'XNO' ) - test_name = 'NO' - case( 'XNO2' ) - test_name = 'NO2' - case( 'XHNO3' ) - test_name = 'HNO3' - case( 'XONIT' ) - test_name = 'ONIT' - case( 'XONITR' ) - test_name = 'ONITR' - case( 'XHO2NO2') - test_name = 'HO2NO2' - case( 'XNH4NO3' ) - test_name = 'HNO3' - case( 'COhc','COme') - test_name = 'CO' ! this is just a place holder. values are set in drydep_fromlnd - case( 'CO01','CO02','CO03','CO04','CO05','CO06','CO07','CO08','CO09','CO10' ) - test_name = 'CO' ! this is just a place holder. values are set in drydep_fromlnd - case( 'CO11','CO12','CO13','CO14','CO15','CO16','CO17','CO18','CO19','CO20' ) - test_name = 'CO' ! this is just a place holder. values are set in drydep_fromlnd - case( 'CO21','CO22','CO23','CO24','CO25','CO26','CO27','CO28','CO29','CO30' ) - test_name = 'CO' ! this is just a place holder. values are set in drydep_fromlnd - case( 'CO31','CO32','CO33','CO34','CO35','CO36','CO37','CO38','CO39','CO40' ) - test_name = 'CO' ! this is just a place holder. values are set in drydep_fromlnd - case( 'CO41','CO42','CO43','CO44','CO45','CO46','CO47','CO48','CO49','CO50' ) - test_name = 'CO' ! this is just a place holder. values are set in drydep_fromlnd - case( 'NH4NO3' ) - test_name = 'HNO3' - case default - test_name = 'blank' - end select - - !--- If found a match check the species table again --- - if( trim(test_name) /= 'blank' ) then - do l = 1,n_species_table - if( trim( test_name ) == trim( species_name_table(l) ) ) then - mapping(i) = l - exit - end if - end do - else - if ( s_loglev > 0 ) write(s_logunit,F00) trim(drydep_list(i)), & - ' not in tables; will have dep vel = 0' - call shr_sys_abort( subName//': '//trim(drydep_list(i))//' is not in tables' ) - end if - end if - - !--- Figure out the specific species indices --- - if ( trim(drydep_list(i)) == 'H2' ) h2_ndx = i - if ( trim(drydep_list(i)) == 'CO' ) co_ndx = i - if ( trim(drydep_list(i)) == 'CH4' ) ch4_ndx = i - if ( trim(drydep_list(i)) == 'MPAN' ) mpan_ndx = i - if ( trim(drydep_list(i)) == 'PAN' ) pan_ndx = i - if ( trim(drydep_list(i)) == 'SO2' ) so2_ndx = i - if ( trim(drydep_list(i)) == 'OX' .or. trim(drydep_list(i)) == 'O3' ) o3_ndx = i - if ( trim(drydep_list(i)) == 'O3A' ) o3a_ndx = i - if ( trim(drydep_list(i)) == 'XPAN' ) xpan_ndx = i - - if( mapping(i) > 0) then - l = mapping(i) - foxd(i) = dfoxd(l) - drat(i) = sqrt(mol_wgts(l)/wh2o) - endif - - enddo - - where( rgss < 1._r8 ) - rgss = 1._r8 - endwhere - - where( rac < small_value) - rac = small_value - endwhere - - end subroutine seq_drydep_init - - !==================================================================================== - - subroutine set_hcoeff_scalar( sfc_temp, heff ) - - !======================================================================== - ! Interface to seq_drydep_setHCoeff when input is scalar - ! wrapper routine used when surface temperature is a scalar (single column) rather - ! than an array (multiple columns). - ! - ! !REVISION HISTORY: - ! 2008-Nov-12 - F. Vitt - first version - !======================================================================== - - implicit none - - real(r8), intent(in) :: sfc_temp ! Input surface temperature - real(r8), intent(out) :: heff(n_drydep) ! Output Henry's law coefficients - - !----- local ----- - real(r8) :: sfc_temp_tmp(1) ! surface temp - - sfc_temp_tmp(:) = sfc_temp - call set_hcoeff_vector( 1, sfc_temp_tmp, heff(:n_drydep) ) - - end subroutine set_hcoeff_scalar - - !==================================================================================== - - subroutine set_hcoeff_vector( ncol, sfc_temp, heff ) - - !======================================================================== - ! Interface to seq_drydep_setHCoeff when input is vector - ! sets dry depositions coefficients -- used by both land and atmosphere models - ! !REVISION HISTORY: - ! 2008-Nov-12 - F. Vitt - first version - !======================================================================== - - use shr_log_mod, only : s_logunit => shr_log_Unit - - implicit none - - integer, intent(in) :: ncol ! Input size of surface-temp vector - real(r8), intent(in) :: sfc_temp(ncol) ! Surface temperature - real(r8), intent(out) :: heff(ncol,n_drydep) ! Henry's law coefficients - - !----- local ----- - real(r8), parameter :: t0 = 298._r8 ! Standard Temperature - real(r8), parameter :: ph_inv = 1._r8/ph ! Inverse of PH - integer :: m, l, id ! indices - real(r8) :: e298 ! Henry's law coefficient @ standard temperature (298K) - real(r8) :: dhr ! temperature dependence of Henry's law coefficient - real(r8) :: dk1s(ncol) ! DK Work array 1 - real(r8) :: dk2s(ncol) ! DK Work array 2 - real(r8) :: wrk(ncol) ! Work array - - !----- formats ----- - character(*),parameter :: subName = '(seq_drydep_set_hcoeff) ' - character(*),parameter :: F00 = "('(seq_drydep_set_hcoeff) ',8a)" - - !------------------------------------------------------------------------------- - ! notes: - !------------------------------------------------------------------------------- - - wrk(:) = (t0 - sfc_temp(:))/(t0*sfc_temp(:)) - do m = 1,n_drydep - l = mapping(m) - id = 6*(l - 1) - e298 = dheff(id+1) - dhr = dheff(id+2) - heff(:,m) = e298*exp( dhr*wrk(:) ) - !--- Calculate coefficients based on the drydep tables --- - if( dheff(id+3) /= 0._r8 .and. dheff(id+5) == 0._r8 ) then - e298 = dheff(id+3) - dhr = dheff(id+4) - dk1s(:) = e298*exp( dhr*wrk(:) ) - where( heff(:,m) /= 0._r8 ) - heff(:,m) = heff(:,m)*(1._r8 + dk1s(:)*ph_inv) - elsewhere - heff(:,m) = dk1s(:)*ph_inv - endwhere - end if - !--- For coefficients that are non-zero AND CO2 or NH3 handle things this way --- - if( dheff(id+5) /= 0._r8 ) then - if( trim( drydep_list(m) ) == 'CO2' .or. trim( drydep_list(m) ) == 'NH3' ) then - e298 = dheff(id+3) - dhr = dheff(id+4) - dk1s(:) = e298*exp( dhr*wrk(:) ) - e298 = dheff(id+5) - dhr = dheff(id+6) - dk2s(:) = e298*exp( dhr*wrk(:) ) - !--- For Carbon dioxide --- - if( trim(drydep_list(m)) == 'CO2' ) then - heff(:,m) = heff(:,m)*(1._r8 + dk1s(:)*ph_inv)*(1._r8 + dk2s(:)*ph_inv) - !--- For NH3 --- - else if( trim( drydep_list(m) ) == 'NH3' ) then - heff(:,m) = heff(:,m)*(1._r8 + dk1s(:)*ph/dk2s(:)) - !--- This can't happen --- - else - write(s_logunit,F00) 'Bad species ',drydep_list(m) - call shr_sys_abort( subName//'ERROR: in assigning coefficients' ) - end if - end if - end if - end do - - end subroutine set_hcoeff_vector - - !=============================================================================== - -end module seq_drydep_mod diff --git a/src/drivers/mct/shr/seq_flds_mod.F90 b/src/drivers/mct/shr/seq_flds_mod.F90 deleted file mode 100644 index 02b6cd1851f..00000000000 --- a/src/drivers/mct/shr/seq_flds_mod.F90 +++ /dev/null @@ -1,3806 +0,0 @@ -module seq_flds_mod - - !==================================================================== - ! New standardized naming convention - !==================================================================== - ! - ! --------- - ! definitions: - ! --------- - ! state-prefix - ! first 3 characters: Sx_, Sa_, Si_, Sl_, So_ - ! one letter indices: x,a,l,i,o,g,r - ! x => coupler (mapping, merging, atm/ocn flux calc done on coupler procs) - ! a => atm - ! l => lnd - ! i => ice - ! o => ocn - ! g => glc - ! r => rof - ! w => wav - ! - ! state-name - ! what follows state prefix - ! - ! flux-prefix - ! first 5 characters: Flmn__ - ! lm => between components l and m - ! n => computed by component n - ! example: Fioi => ice/ocn flux computed by ice - ! example: Fall => atm/lnd flux computed by lnd - ! If flux prefix has first letter of P (so first five characters are PFlmn_) - ! then flux is passed straight through without scaling by the corresponding fraction) - ! - ! flux-name - ! what follows flux-prefix - ! - ! --------- - ! rules: - ! --------- - ! 1) states: - ! a) atm attributes fields that HAVE a state-prefix of Sx_ in seq_flds_x2a_states - ! rule: will merge all identical values of the state-names from - ! seq_flds_i2x_states - ! seq_flds_l2x_states - ! seq_flds_o2x_states - ! seq_flds_xao_states - ! to obtain output state-name in seq_flds_x2a_states - ! - ! rule: to merge input states that originate in the - ! lnd (l2x_a) will be scaled by the lndfrac - ! ice (i2x_a) will be scaled by the icefrac - ! cpl (xao_a) will be scaled by the ocnfrac - ! ocn (o2x_a) will be scaled by the ocnfrac - ! - ! example: - ! seq_flds_l2x_states = "Sl_t" - ! seq_flds_i2x_states = "Si_t" - ! seq_flds_o2x_states = "So_t" - ! seq_flds_x2a_states = "Sx_t" - ! attribute fields Sl_t, Si_t, So_t, in - ! attribute vectors l2x_a, i2x_a, o2x_a will be - ! merged to obtain attribute Sx_t in attribute vector x2a_a - ! - ! b) atm attribute fields that DO NOT HAVE a state-prefix of Sx_ in seq_flds_x2a_states - ! rule: copy directly all variables that identical state-prefix - ! AND state-name in - ! seq_flds_i2x_states and seq_flds_x2a_states - ! seq_flds_l2x_states and seq_flds_x2a_states - ! seq_flds_o2x_states and seq_flds_x2a_states - ! seq_flds_xao_states and seq_flds_x2a_states - ! - ! example - ! seq_flds_i2x_states = ":Si_snowh" - ! seq_flds_x2a_states = ":Si_snowh" - ! attribute field of Si_snowh in i2x_a will be copied to - ! attribute field Si_snowh in x2a_a - ! - ! 2) fluxes: - ! rule: will merge all identical values of the flux-names from - ! seq_flds_i2x_states - ! seq_flds_l2x_states - ! seq_flds_o2x_states - ! seq_flds_xao_states - ! to obtain output state-name in seq_flds_x2a_states - ! - ! rule: input flux fields that originate in the - ! lnd (l2x_a) will be scaled by the lndfrac - ! ice (i2x_a) will be scaled by the icefrac - ! - ignore all fluxes that are ice/ocn fluxes (e.g. Fioi_) - ! cpl (xao_a) will be scaled by the ocnfrac - ! ocn (o2x_a) will be scaled by the ocnfrac+icefrac - ! - !==================================================================== - ! - ! New user specified fields - ! - !==================================================================== - ! New fields that are user specidied can be added as namelist variables - ! by the user in the cpl namelist seq_flds_user using the namelist variable - ! array cplflds_customs. The user specified new fields must follow the - ! above naming convention. - ! As an example, say you want to add a new state 'foo' that is passed - ! from the land to the atm - you would do this as follows - ! &seq_flds_user - ! cplflds_custom = 'Sa_foo->a2x', 'Sa_foo->x2a' - ! / - ! This would add the field 'Sa_foo' to the character strings defining the - ! attribute vectors a2x and x2a. It is assumed that code would need to be - ! introduced in the atm and land components to deal with this new attribute - ! vector field. - ! Currently, the only way to add this is to edit $CASEROOT/user_nl_cpl - !==================================================================== - ! - ! Coupler fields use cases - ! - !==================================================================== - ! Previously, new fields that were needed to be passed between components - ! for certain compsets were specified by cpp-variables. This has been - ! modified to now be use cases. The use cases are specified in the - ! namelist cpl_flds_inparm and are currently triggered by the xml - ! variables CCSM_VOC, CCSM_BGC and GLC_NEC. - !==================================================================== - - use shr_kind_mod , only : CX => shr_kind_CX, CXX => shr_kind_CXX - use shr_sys_mod , only : shr_sys_abort - use seq_comm_mct , only : seq_comm_iamroot, seq_comm_setptrs, logunit - use seq_drydep_mod , only : seq_drydep_init, seq_drydep_readnl, lnd_drydep - use shr_megan_mod , only : shr_megan_readnl, shr_megan_mechcomps_n - use shr_fire_emis_mod , only : shr_fire_emis_readnl, shr_fire_emis_mechcomps_n, shr_fire_emis_ztop_token - use shr_carma_mod , only : shr_carma_readnl - use shr_ndep_mod , only : shr_ndep_readnl - use shr_flds_mod , only : seq_flds_dom_coord=>shr_flds_dom_coord, seq_flds_dom_other=>shr_flds_dom_other - - implicit none - public - - interface seq_flds_lookup; module procedure & - seq_flds_esmf_metadata_get - end interface seq_flds_lookup - - integer, parameter, private :: CSS = 256 ! use longer short character - integer, parameter, private :: CLL = 1024 - character(len=CXX) :: seq_drydep_fields ! List of dry-deposition fields - character(len=CXX) :: megan_voc_fields ! List of MEGAN VOC emission fields - character(len=CXX) :: fire_emis_fields ! List of fire emission fields - character(len=CX) :: carma_fields ! List of CARMA fields from lnd->atm - character(len=CX) :: ndep_fields ! List of nitrogen deposition fields from atm->lnd/ocn - integer :: ice_ncat ! number of sea ice thickness categories - logical :: seq_flds_i2o_per_cat! .true. if select per ice thickness category fields are passed from ice to ocean - logical :: add_ndep_fields ! .true. => add ndep fields - - !---------------------------------------------------------------------------- - ! metadata - !---------------------------------------------------------------------------- - - character(len=*),parameter :: undef = 'undefined' - integer ,parameter :: nmax = 1000 ! maximum number of entries in lookup_entry - integer :: n_entries = 0 ! actual number of entries in lookup_entry - character(len=CSS), dimension(nmax, 4) :: lookup_entry = undef - - !---------------------------------------------------------------------------- - ! state + flux fields - !---------------------------------------------------------------------------- - - character(CXX) :: seq_flds_a2x_states - character(CXX) :: seq_flds_a2x_fluxes - character(CXX) :: seq_flds_x2a_states - character(CXX) :: seq_flds_x2a_fluxes - - character(CXX) :: seq_flds_i2x_states - character(CXX) :: seq_flds_i2x_fluxes - character(CXX) :: seq_flds_x2i_states - character(CXX) :: seq_flds_x2i_fluxes - - character(CXX) :: seq_flds_l2x_states - character(CXX) :: seq_flds_l2x_states_to_glc - character(CXX) :: seq_flds_l2x_fluxes - character(CXX) :: seq_flds_l2x_fluxes_to_glc - character(CXX) :: seq_flds_x2l_states - character(CXX) :: seq_flds_x2l_states_from_glc - character(CXX) :: seq_flds_x2l_fluxes - character(CXX) :: seq_flds_x2l_fluxes_from_glc - - character(CXX) :: seq_flds_o2x_states - character(CXX) :: seq_flds_o2x_fluxes - character(CXX) :: seq_flds_x2o_states - character(CXX) :: seq_flds_x2o_fluxes - - character(CXX) :: seq_flds_g2x_states - character(CXX) :: seq_flds_g2x_states_to_lnd - character(CXX) :: seq_flds_g2x_fluxes - character(CXX) :: seq_flds_g2x_fluxes_to_lnd - character(CXX) :: seq_flds_g2o_liq_fluxes - character(CXX) :: seq_flds_g2o_ice_fluxes - character(CXX) :: seq_flds_x2g_states - character(CXX) :: seq_flds_x2g_states_from_lnd - character(CXX) :: seq_flds_x2g_states_from_ocn - character(CXX) :: seq_flds_x2g_fluxes - character(CXX) :: seq_flds_x2g_fluxes_from_lnd - - character(CXX) :: seq_flds_w2x_states - character(CXX) :: seq_flds_w2x_fluxes - character(CXX) :: seq_flds_x2w_states - character(CXX) :: seq_flds_x2w_fluxes - - character(CXX) :: seq_flds_xao_albedo - character(CXX) :: seq_flds_xao_states - character(CXX) :: seq_flds_xao_fluxes - character(CXX) :: seq_flds_xao_diurnl ! for diurnal cycle - - character(CXX) :: seq_flds_r2x_states - character(CXX) :: seq_flds_r2x_fluxes - character(CXX) :: seq_flds_x2r_states - character(CXX) :: seq_flds_x2r_fluxes - character(CXX) :: seq_flds_r2o_liq_fluxes - character(CXX) :: seq_flds_r2o_ice_fluxes - - !character(CXX) :: seq_flds_x2z_states - !character(CXX) :: seq_flds_z2x_states - character(CXX) :: seq_flds_z2x_fluxes - character(CXX) :: seq_flds_x2z_fluxes - - !---------------------------------------------------------------------------- - ! combined state/flux fields - !---------------------------------------------------------------------------- - - character(CXX) :: seq_flds_dom_fields - character(CXX) :: seq_flds_a2x_fields - character(CXX) :: seq_flds_x2a_fields - character(CXX) :: seq_flds_i2x_fields - character(CXX) :: seq_flds_x2i_fields - character(CXX) :: seq_flds_l2x_fields - character(CXX) :: seq_flds_l2x_fields_to_glc - character(CXX) :: seq_flds_x2l_fields - character(CXX) :: seq_flds_x2l_fields_from_glc - character(CXX) :: seq_flds_o2x_fields - character(CXX) :: seq_flds_x2o_fields - character(CXX) :: seq_flds_xao_fields - character(CXX) :: seq_flds_r2x_fields - character(CXX) :: seq_flds_x2r_fields - character(CXX) :: seq_flds_g2x_fields - character(CXX) :: seq_flds_g2x_fields_to_lnd - character(CXX) :: seq_flds_x2g_fields - character(CXX) :: seq_flds_w2x_fields - character(CXX) :: seq_flds_x2w_fields - - !---------------------------------------------------------------------------- - ! component names - !---------------------------------------------------------------------------- - - character(32) :: atmname='atm' - character(32) :: ocnname='ocn' - character(32) :: icename='ice' - character(32) :: lndname='lnd' - character(32) :: glcname='glc' - character(32) :: wavname='wav' - character(32) :: rofname='rof' - - ! namelist variables - logical :: nan_check_component_fields - - !---------------------------------------------------------------------------- -contains - !---------------------------------------------------------------------------- - - subroutine seq_flds_set(nmlfile, ID, infodata) - - ! !USES: - use shr_file_mod, only : shr_file_getUnit, shr_file_freeUnit - use shr_string_mod, only : shr_string_listIntersect - use shr_mpi_mod, only : shr_mpi_bcast - use glc_elevclass_mod, only : glc_elevclass_init - use seq_infodata_mod, only : seq_infodata_type, seq_infodata_getdata - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*), intent(in) :: nmlfile ! Name-list filename - integer , intent(in) :: ID ! seq_comm ID - type(seq_infodata_type), intent(in) :: infodata - - ! !LOCAL VARIABLES: - integer :: mpicom ! MPI communicator - integer :: ierr ! I/O error code - integer :: unitn ! Namelist unit number to read - - character(len=CSS) :: attname - character(len=CSS) :: units - character(len=CSS) :: longname - character(len=CSS) :: stdname - integer :: num - character(len= 2) :: cnum - character(len=CSS) :: name - character(len=CSS) :: cime_model - - character(CXX) :: dom_coord = '' - character(CXX) :: dom_other = '' - - character(CXX) :: a2x_states = '' - character(CXX) :: a2x_fluxes = '' - character(CXX) :: x2a_states = '' - character(CXX) :: x2a_fluxes = '' - character(CXX) :: i2x_states = '' - character(CXX) :: i2x_fluxes = '' - character(CXX) :: x2i_states = '' - character(CXX) :: x2i_fluxes = '' - character(CXX) :: l2x_states = '' - character(CXX) :: l2x_states_to_glc = '' - character(CXX) :: l2x_fluxes = '' - character(CXX) :: l2x_fluxes_to_glc = '' - character(CXX) :: x2l_states = '' - character(CXX) :: x2l_states_from_glc = '' - character(CXX) :: x2l_fluxes = '' - character(CXX) :: x2l_fluxes_from_glc = '' - character(CXX) :: o2x_states = '' - character(CXX) :: o2x_fluxes = '' - character(CXX) :: x2o_states = '' - character(CXX) :: x2o_fluxes = '' - character(CXX) :: g2x_states = '' - character(CXX) :: g2x_states_to_lnd = '' - character(CXX) :: g2x_fluxes = '' - character(CXX) :: g2x_fluxes_to_lnd = '' - character(CXX) :: g2o_liq_fluxes = '' - character(CXX) :: g2o_ice_fluxes = '' - character(CXX) :: x2g_states = '' - character(CXX) :: x2g_states_from_lnd = '' - character(CXX) :: x2g_states_from_ocn = '' - character(CXX) :: x2g_fluxes = '' - character(CXX) :: x2g_fluxes_from_lnd = '' - character(CXX) :: xao_albedo = '' - character(CXX) :: xao_states = '' - character(CXX) :: xao_fluxes = '' - character(CXX) :: xao_diurnl = '' - character(CXX) :: r2x_states = '' - character(CXX) :: r2x_fluxes = '' - character(CXX) :: x2r_states = '' - character(CXX) :: x2r_fluxes = '' - character(CXX) :: w2x_states = '' - character(CXX) :: w2x_fluxes = '' - character(CXX) :: x2w_states = '' - character(CXX) :: x2w_fluxes = '' - character(CXX) :: r2o_liq_fluxes = '' - character(CXX) :: r2o_ice_fluxes = '' - - character(CXX) :: stringtmp = '' - - !------ namelist ----- - character(len=CSS) :: fldname, fldflow - logical :: is_state, is_flux - integer :: i,n - - ! use cases namelists - logical :: flds_co2a - logical :: flds_co2b - logical :: flds_co2c - logical :: flds_co2_dmsa - logical :: flds_bgc_oi - logical :: flds_wiso - integer :: glc_nec - - namelist /seq_cplflds_inparm/ & - flds_co2a, flds_co2b, flds_co2c, flds_co2_dmsa, flds_wiso, glc_nec, & - ice_ncat, seq_flds_i2o_per_cat, flds_bgc_oi, nan_check_component_fields - - ! user specified new fields - integer, parameter :: nfldmax = 200 - character(len=CLL) :: cplflds_custom(nfldmax) = '' - - namelist /seq_cplflds_userspec/ & - cplflds_custom - - character(len=*),parameter :: subname = '(seq_flds_set) ' - - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(ID,mpicom=mpicom) - - call seq_infodata_GetData(infodata, cime_model=cime_model) - - !--------------------------------------------------------------------------- - ! Read in namelist for use cases - !--------------------------------------------------------------------------- - ! TODO: permit duplicates to occur - then check for this in seq_flds_add - ! TODO: add entries for lookup entry table for custom fields - !--------------------------------------------------------------------------- - - if (seq_comm_iamroot(ID)) then - flds_co2a = .false. - flds_co2b = .false. - flds_co2c = .false. - flds_co2_dmsa = .false. - flds_bgc_oi = .false. - flds_wiso = .false. - glc_nec = 0 - ice_ncat = 1 - seq_flds_i2o_per_cat = .false. - nan_check_component_fields = .false. - - unitn = shr_file_getUnit() - write(logunit,"(A)") subname//': read seq_cplflds_inparm namelist from: '& - //trim(nmlfile) - open( unitn, file=trim(nmlfile), status='old' ) - ierr = 1 - do while( ierr /= 0 ) - read(unitn,nml=seq_cplflds_inparm,iostat=ierr) - if (ierr < 0) then - call shr_sys_abort( & - subname//"ERROR: namelist read returns an EOF or EOR condition" ) - end if - end do - close(unitn) - call shr_file_freeUnit( unitn ) - end if - call shr_mpi_bcast(flds_co2a , mpicom) - call shr_mpi_bcast(flds_co2b , mpicom) - call shr_mpi_bcast(flds_co2c , mpicom) - call shr_mpi_bcast(flds_co2_dmsa, mpicom) - call shr_mpi_bcast(flds_bgc_oi , mpicom) - call shr_mpi_bcast(flds_wiso , mpicom) - call shr_mpi_bcast(glc_nec , mpicom) - call shr_mpi_bcast(ice_ncat , mpicom) - call shr_mpi_bcast(seq_flds_i2o_per_cat, mpicom) - call shr_mpi_bcast(nan_check_component_fields, mpicom) - - call glc_elevclass_init(glc_nec) - - !--------------------------------------------------------------------------- - ! Read in namelists for user specified new fields - !--------------------------------------------------------------------------- - ! TODO: permit duplicates to occur - then check for this in seq_flds_add - ! TODO: add entries for lookup entry table for custom fields - !--------------------------------------------------------------------------- - - if (seq_comm_iamroot(ID)) then - cplflds_custom(:) = ' ' - - unitn = shr_file_getUnit() - write(logunit,"(A)") subname//': read seq_cplflds_userspec namelist from: '& - //trim(nmlfile) - open( unitn, file=trim(nmlfile), status='old' ) - ierr = 1 - do while( ierr /= 0 ) - read(unitn,nml=seq_cplflds_userspec,iostat=ierr) - if (ierr < 0) then - call shr_sys_abort( & - subname//"ERROR: namelist read returns an EOF or EOR condition" ) - end if - end do - close(unitn) - call shr_file_freeUnit( unitn ) - end if - do n = 1, nfldmax - call shr_mpi_bcast(cplflds_custom(n), mpicom) - end do - - ! add customized fields through coupler - - do n = 1,nfldmax - if (cplflds_custom(n) /= ' ') then - i = scan(cplflds_custom(n),'->') - fldname = trim(adjustl(cplflds_custom(n)(:i-1))) - fldflow = trim(adjustl(cplflds_custom(n)(i+2:))) - - if (fldname(1:1) == 'S') then - is_state = .true. - is_flux = .false. - else if (fldname (1:1) == 'F') then - is_state = .false. - is_flux = .true. - else if (fldname (1:2) == 'PF') then - is_state = .false. - is_flux = .true. - else - write(logunit,*) subname//'ERROR: fldname must start with S,F,P, not ',trim(fldname) - call shr_sys_abort(subname//"ERROR: fldname must start with S, F, or P") - end if - - select case (trim(fldflow)) - case('a2x') - if (is_state) call seq_flds_add(a2x_states,trim(fldname)) - if (is_flux ) call seq_flds_add(a2x_fluxes,trim(fldname)) - case('x2a') - if (is_state) call seq_flds_add(x2a_states,trim(fldname)) - if (is_flux ) call seq_flds_add(x2a_fluxes,trim(fldname)) - case('l2x') - if (is_state) call seq_flds_add(l2x_states,trim(fldname)) - if (is_flux ) call seq_flds_add(l2x_fluxes,trim(fldname)) - case('x2l') - if (is_state) call seq_flds_add(x2l_states,trim(fldname)) - if (is_flux ) call seq_flds_add(x2l_fluxes,trim(fldname)) - case('r2x') - if (is_state) call seq_flds_add(r2x_states,trim(fldname)) - if (is_flux ) call seq_flds_add(r2x_fluxes,trim(fldname)) - case('x2r') - if (is_state) call seq_flds_add(x2r_states,trim(fldname)) - if (is_flux ) call seq_flds_add(x2r_fluxes,trim(fldname)) - case('i2x') - if (is_state) call seq_flds_add(i2x_states,trim(fldname)) - if (is_flux ) call seq_flds_add(i2x_fluxes,trim(fldname)) - case('x2i') - if (is_state) call seq_flds_add(x2i_states,trim(fldname)) - if (is_flux ) call seq_flds_add(x2i_fluxes,trim(fldname)) - case('o2x') - if (is_state) call seq_flds_add(o2x_states,trim(fldname)) - if (is_flux ) call seq_flds_add(o2x_fluxes,trim(fldname)) - case('x2o') - if (is_state) call seq_flds_add(x2o_states,trim(fldname)) - if (is_flux ) call seq_flds_add(x2o_fluxes,trim(fldname)) - case('g2x') - if (is_state) call seq_flds_add(g2x_states,trim(fldname)) - if (is_flux ) call seq_flds_add(g2x_fluxes,trim(fldname)) - case('x2g') - if (is_state) call seq_flds_add(x2g_states,trim(fldname)) - if (is_flux ) call seq_flds_add(x2g_fluxes,trim(fldname)) - case default - write(logunit,*) subname//'ERROR: ',trim(cplflds_custom(n)),& - ' not a recognized value' - call shr_sys_abort() - end select - else - exit - end if - end do - - !---------------------------------------------------------- - ! domain coordinates - !---------------------------------------------------------- - - call seq_flds_add(dom_coord,'lat') - longname = 'latitude' - stdname = 'latitude' - units = 'degrees north' - attname = 'lat' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(dom_coord,'lon') - longname = 'longitude' - stdname = 'longitude' - units = 'degrees east' - attname = 'lon' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(dom_coord,'hgt') - longname = 'height' - stdname = 'height, depth, or levels' - units = 'unitless' - attname = 'hgt' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(dom_other,'area') - longname = 'cell_area_model' - stdname = 'cell area from model' - units = 'radian^2' - attname = 'area' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(dom_other,'aream') - longname = 'cell_area_mapping' - stdname = 'cell area from mapping file' - units = 'radian^2' - attname = 'aream' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(dom_other,'mask') - longname = 'mask' - stdname = 'mask' - units = '1' - attname = 'mask' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(dom_other,'frac') - longname = 'area_fraction' - stdname = 'area fraction' - units = '1' - attname = 'frac' - call metadata_set(attname, longname, stdname, units) - - !---------------------------------------------------------- - ! states/fluxes from atm - !---------------------------------------------------------- - - ! height at the lowest model level (m) - call seq_flds_add(a2x_states,"Sa_z") - call seq_flds_add(x2l_states,"Sa_z") - call seq_flds_add(x2i_states,"Sa_z") - longname = 'Height at the lowest model level' - stdname = 'height' - units = 'm' - attname = 'Sa_z' - call metadata_set(attname, longname, stdname, units) - - ! topographic height (m) - call seq_flds_add(a2x_states,"Sa_topo") - call seq_flds_add(x2l_states,"Sa_topo") - longname = 'Surface height' - stdname = 'height' - units = 'm' - attname = 'Sa_topo' - call metadata_set(attname, longname, stdname, units) - - ! zonal wind at the lowest model level (m/s) - call seq_flds_add(a2x_states,"Sa_u") - call seq_flds_add(x2l_states,"Sa_u") - call seq_flds_add(x2i_states,"Sa_u") - call seq_flds_add(x2w_states,"Sa_u") - longname = 'Zonal wind at the lowest model level' - stdname = 'eastward_wind' - units = 'm s-1' - attname = 'Sa_u' - call metadata_set(attname, longname, stdname, units) - - ! meridional wind at the lowest model level (m/s) - call seq_flds_add(a2x_states,"Sa_v") - call seq_flds_add(x2l_states,"Sa_v") - call seq_flds_add(x2i_states,"Sa_v") - call seq_flds_add(x2w_states,"Sa_v") - longname = 'Meridional wind at the lowest model level' - stdname = 'northward_wind' - units = 'm s-1' - attname = 'Sa_v' - call metadata_set(attname, longname, stdname, units) - - ! temperature at the lowest model level (K) - call seq_flds_add(a2x_states,"Sa_tbot") - call seq_flds_add(x2l_states,"Sa_tbot") - call seq_flds_add(x2i_states,"Sa_tbot") - call seq_flds_add(x2w_states,"Sa_tbot") - longname = 'Temperature at the lowest model level' - stdname = 'air_temperature' - units = 'K' - attname = 'Sa_tbot' - call metadata_set(attname, longname, stdname, units) - - ! potential temperature at the lowest model level (K) - call seq_flds_add(a2x_states,"Sa_ptem") - call seq_flds_add(x2l_states,"Sa_ptem") - call seq_flds_add(x2i_states,"Sa_ptem") - longname = 'Potential temperature at the lowest model level' - stdname = 'air_potential_temperature' - units = 'K' - attname = 'Sa_ptem' - call metadata_set(attname, longname, stdname, units) - - ! specific humidity at the lowest model level (kg/kg) - call seq_flds_add(a2x_states,"Sa_shum") - call seq_flds_add(x2l_states,"Sa_shum") - call seq_flds_add(x2i_states,"Sa_shum") - longname = 'Specific humidity at the lowest model level' - stdname = 'specific_humidity' - units = 'kg kg-1' - attname = 'Sa_shum' - call metadata_set(attname, longname, stdname, units) - - ! pressure at the lowest model level (Pa) - call seq_flds_add(a2x_states,"Sa_pbot") - call seq_flds_add(x2l_states,"Sa_pbot") - call seq_flds_add(x2i_states,"Sa_pbot") - if (trim(cime_model) == 'e3sm') then - call seq_flds_add(x2o_states,"Sa_pbot") - end if - longname = 'Pressure at the lowest model level' - stdname = 'air_pressure' - units = 'Pa' - attname = 'Sa_pbot' - call metadata_set(attname, longname, stdname, units) - - ! air density at the lowest model level (kg/m**3) - call seq_flds_add(a2x_states,"Sa_dens") - call seq_flds_add(x2i_states,"Sa_dens") - longname = 'Density at the lowest model level' - stdname = 'air_density' - units = 'kg m-3' - attname = 'Sa_dens' - call metadata_set(attname, longname, stdname, units) - - ! convective precipitation rate - ! large-scale (stable) snow rate (water equivalent) - call seq_flds_add(a2x_fluxes,"Faxa_rainc") - call seq_flds_add(a2x_fluxes,"Faxa_rainl") - call seq_flds_add(x2l_fluxes,"Faxa_rainc") - call seq_flds_add(x2l_fluxes,"Faxa_rainl") - call seq_flds_add(x2i_fluxes,"Faxa_rain" ) - call seq_flds_add(x2o_fluxes,"Faxa_rain" ) - units = 'kg m-2 s-1' - longname = 'Convective precipitation rate' - stdname = 'convective_precipitation_flux' - attname = 'Faxa_rainc' - call metadata_set(attname, longname, stdname, units) - longname = 'Large-scale (stable) precipitation rate' - stdname = 'large_scale_precipitation_flux' - attname = 'Faxa_rainl' - call metadata_set(attname, longname, stdname, units) - longname = 'Water flux due to rain' - stdname = 'rainfall_flux' - attname = 'Faxa_rain' - call metadata_set(attname, longname, stdname, units) - - ! convective snow rate (water equivalent) - ! large-scale (stable) snow rate (water equivalent) - call seq_flds_add(a2x_fluxes,"Faxa_snowc") - call seq_flds_add(a2x_fluxes,"Faxa_snowl") - call seq_flds_add(x2l_fluxes,"Faxa_snowc") - call seq_flds_add(x2l_fluxes,"Faxa_snowl") - call seq_flds_add(x2i_fluxes,"Faxa_snow" ) - call seq_flds_add(x2o_fluxes,"Faxa_snow" ) - units = 'kg m-2 s-1' - longname = 'Convective snow rate (water equivalent)' - stdname = 'convective_snowfall_flux' - attname = 'Faxa_snowc' - call metadata_set(attname, longname, stdname, units) - longname = 'Large-scale (stable) snow rate (water equivalent)' - stdname = 'large_scale_snowfall_flux' - attname = 'Faxa_snowl' - call metadata_set(attname, longname, stdname, units) - longname = 'Water flux due to snow' - stdname = 'surface_snow_melt_flux' - attname = 'Faxa_snow' - call metadata_set(attname, longname, stdname, units) - - ! total precipitation to ocean - call seq_flds_add(x2o_fluxes,"Faxa_prec") ! derived rain+snow - longname = 'Water flux (rain+snow)' - stdname = 'precipitation_flux' - units = 'kg m-2 s-1' - attname = 'Faxa_prec' - call metadata_set(attname, longname, stdname, units) - - ! downward longwave heat flux (W/m**2) - call seq_flds_add(a2x_fluxes,"Faxa_lwdn") - call seq_flds_add(x2l_fluxes,"Faxa_lwdn") - call seq_flds_add(x2i_fluxes,"Faxa_lwdn") - call seq_flds_add(x2o_fluxes,"Faxa_lwdn") - longname = 'Downward longwave heat flux' - stdname = 'downwelling_longwave_flux' - units = 'W m-2' - attname = 'Faxa_lwdn' - call metadata_set(attname, longname, stdname, units) - - ! direct near-infrared incident solar radiation - call seq_flds_add(a2x_fluxes,"Faxa_swndr") - call seq_flds_add(x2i_fluxes,"Faxa_swndr") - call seq_flds_add(x2l_fluxes,"Faxa_swndr") - longname = 'Direct near-infrared incident solar radiation' - stdname = 'surface_downward_direct_shortwave_flux_due_to_near_infrared_radiation' - units = 'W m-2' - attname = 'Faxa_swndr' - call metadata_set(attname, longname, stdname, units) - - ! direct visible incident solar radiation - call seq_flds_add(a2x_fluxes,"Faxa_swvdr") - call seq_flds_add(x2i_fluxes,"Faxa_swvdr") - call seq_flds_add(x2l_fluxes,"Faxa_swvdr") - longname = 'Direct visible incident solar radiation' - stdname = 'surface_downward_direct_shortwave_flux_due_to_visible_radiation' - units = 'W m-2' - attname = 'Faxa_swvdr' - call metadata_set(attname, longname, stdname, units) - - ! diffuse near-infrared incident solar radiation - call seq_flds_add(a2x_fluxes,"Faxa_swndf") - call seq_flds_add(x2i_fluxes,"Faxa_swndf") - call seq_flds_add(x2l_fluxes,"Faxa_swndf") - longname = 'Diffuse near-infrared incident solar radiation' - stdname = 'surface_downward_diffuse_shortwave_flux_due_to_near_infrared_radiation' - units = 'W m-2' - attname = 'Faxa_swndf' - call metadata_set(attname, longname, stdname, units) - - ! diffuse visible incident solar radiation - call seq_flds_add(a2x_fluxes,"Faxa_swvdf") - call seq_flds_add(x2i_fluxes,"Faxa_swvdf") - call seq_flds_add(x2l_fluxes,"Faxa_swvdf") - longname = 'Diffuse visible incident solar radiation' - stdname = 'surface_downward_diffuse_shortwave_flux_due_to_visible_radiation' - units = 'W m-2' - attname = 'Faxa_swvdf' - call metadata_set(attname, longname, stdname, units) - - ! Net shortwave radiation - call seq_flds_add(a2x_fluxes,"Faxa_swnet") ! diagnostic - call seq_flds_add(l2x_fluxes,"Fall_swnet") ! diagnostic - call seq_flds_add(i2x_fluxes,"Faii_swnet") ! diagnostic - - call seq_flds_add(i2x_fluxes,"Fioi_swpen") ! used for Foxx_swnet below - call seq_flds_add(x2o_fluxes,"Foxx_swnet") ! derived using albedos, Faxa_swxxx and swpen - units = 'W m-2' - longname = 'Net shortwave radiation' - stdname = 'surface_net_shortwave_flux' - attname = 'Faxa_swnet' - call metadata_set(attname, longname, stdname, units) - attname = 'Fall_swnet' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_swnet' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_swnet' - call metadata_set(attname, longname, stdname, units) - longname = 'Net shortwave radiation penetrating into ice and ocean' - stdname = 'net_downward_shortwave_flux_in_sea_ice_due_to_penetration' - attname = 'Fioi_swpen' - call metadata_set(attname, longname, stdname, units) - - ! Black Carbon hydrophilic dry deposition - call seq_flds_add(a2x_fluxes,"Faxa_bcphidry" ) - call seq_flds_add(x2i_fluxes,"Faxa_bcphidry" ) - call seq_flds_add(x2l_fluxes,"Faxa_bcphidry" ) - call seq_flds_add(x2o_fluxes,"Faxa_bcphidry" ) - longname = 'Hydrophylic black carbon dry deposition flux' - stdname = 'dry_deposition_flux_of_hydrophylic_black_carbon' - units = 'kg m-2 s-1' - attname = 'Faxa_bcphidry' - call metadata_set(attname, longname, stdname, units) - - ! Black Carbon hydrophobic dry deposition - call seq_flds_add(a2x_fluxes,"Faxa_bcphodry" ) - call seq_flds_add(x2i_fluxes,"Faxa_bcphodry" ) - call seq_flds_add(x2l_fluxes,"Faxa_bcphodry" ) - call seq_flds_add(x2o_fluxes,"Faxa_bcphodry") - longname = 'Hydrophobic black carbon dry deposition flux' - stdname = 'dry_deposition_flux_of_hydrophobic_black_carbon' - units = 'kg m-2 s-1' - attname = 'Faxa_bcphodry' - call metadata_set(attname, longname, stdname, units) - - ! Black Carbon hydrophilic wet deposition - call seq_flds_add(a2x_fluxes,"Faxa_bcphiwet" ) - call seq_flds_add(x2i_fluxes,"Faxa_bcphiwet" ) - call seq_flds_add(x2l_fluxes,"Faxa_bcphiwet" ) - call seq_flds_add(x2o_fluxes,"Faxa_bcphiwet" ) - longname = 'Hydrophylic black carbon wet deposition flux' - stdname = 'wet_deposition_flux_of_hydrophylic_black_carbon' - units = 'kg m-2 s-1' - attname = 'Faxa_bcphiwet' - call metadata_set(attname, longname, stdname, units) - - ! Organic Carbon hydrophilic dry deposition - call seq_flds_add(a2x_fluxes,"Faxa_ocphidry" ) - call seq_flds_add(x2i_fluxes,"Faxa_ocphidry" ) - call seq_flds_add(x2l_fluxes,"Faxa_ocphidry" ) - call seq_flds_add(x2o_fluxes,"Faxa_ocphidry" ) - longname = 'Hydrophylic organic carbon dry deposition flux' - stdname = 'dry_deposition_flux_of_hydrophylic_organic_carbon' - units = 'kg m-2 s-1' - attname = 'Faxa_ocphidry' - call metadata_set(attname, longname, stdname, units) - - ! Organic Carbon hydrophobic dry deposition - call seq_flds_add(a2x_fluxes,"Faxa_ocphodry" ) - call seq_flds_add(x2i_fluxes,"Faxa_ocphodry" ) - call seq_flds_add(x2l_fluxes,"Faxa_ocphodry" ) - call seq_flds_add(x2o_fluxes,"Faxa_ocphodry" ) - longname = 'Hydrophobic organic carbon dry deposition flux' - stdname = 'dry_deposition_flux_of_hydrophobic_organic_carbon' - units = 'kg m-2 s-1' - attname = 'Faxa_ocphodry' - call metadata_set(attname, longname, stdname, units) - - ! Organic Carbon hydrophilic wet deposition - call seq_flds_add(a2x_fluxes,"Faxa_ocphiwet" ) - call seq_flds_add(x2i_fluxes,"Faxa_ocphiwet" ) - call seq_flds_add(x2l_fluxes,"Faxa_ocphiwet" ) - call seq_flds_add(x2o_fluxes,"Faxa_ocphiwet" ) - longname = 'Hydrophylic organic carbon wet deposition flux' - stdname = 'wet_deposition_flux_of_hydrophylic_organic_carbon' - units = 'kg m-2 s-1' - attname = 'Faxa_ocphiwet' - call metadata_set(attname, longname, stdname, units) - - ! Size 1 dust -- wet deposition - call seq_flds_add(a2x_fluxes,"Faxa_dstwet1" ) - call seq_flds_add(x2i_fluxes,"Faxa_dstwet1" ) - call seq_flds_add(x2l_fluxes,"Faxa_dstwet1" ) - call seq_flds_add(x2o_fluxes,"Faxa_dstwet1" ) - longname = 'Dust wet deposition flux (size 1)' - stdname = 'wet_deposition_flux_of_dust' - units = 'kg m-2 s-1' - attname = 'Faxa_dstwet1' - call metadata_set(attname, longname, stdname, units) - - ! Size 2 dust -- wet deposition - call seq_flds_add(a2x_fluxes,"Faxa_dstwet2" ) - call seq_flds_add(x2i_fluxes,"Faxa_dstwet2" ) - call seq_flds_add(x2l_fluxes,"Faxa_dstwet2" ) - call seq_flds_add(x2o_fluxes,"Faxa_dstwet2" ) - longname = 'Dust wet deposition flux (size 2)' - stdname = 'wet_deposition_flux_of_dust' - units = 'kg m-2 s-1' - attname = 'Faxa_dstwet2' - call metadata_set(attname, longname, stdname, units) - - ! Size 3 dust -- wet deposition - call seq_flds_add(a2x_fluxes,"Faxa_dstwet3" ) - call seq_flds_add(x2i_fluxes,"Faxa_dstwet3" ) - call seq_flds_add(x2l_fluxes,"Faxa_dstwet3" ) - call seq_flds_add(x2o_fluxes,"Faxa_dstwet3" ) - longname = 'Dust wet deposition flux (size 3)' - stdname = 'wet_deposition_flux_of_dust' - units = 'kg m-2 s-1' - attname = 'Faxa_dstwet3' - call metadata_set(attname, longname, stdname, units) - - ! Size 4 dust -- wet deposition - call seq_flds_add(a2x_fluxes,"Faxa_dstwet4" ) - call seq_flds_add(x2i_fluxes,"Faxa_dstwet4" ) - call seq_flds_add(x2l_fluxes,"Faxa_dstwet4" ) - call seq_flds_add(x2o_fluxes,"Faxa_dstwet4" ) - longname = 'Dust wet deposition flux (size 4)' - stdname = 'wet_deposition_flux_of_dust' - units = 'kg m-2 s-1' - attname = 'Faxa_dstwet4' - call metadata_set(attname, longname, stdname, units) - - ! Size 1 dust -- dry deposition - call seq_flds_add(a2x_fluxes,"Faxa_dstdry1" ) - call seq_flds_add(x2i_fluxes,"Faxa_dstdry1" ) - call seq_flds_add(x2l_fluxes,"Faxa_dstdry1" ) - call seq_flds_add(x2o_fluxes,"Faxa_dstdry1" ) - longname = 'Dust dry deposition flux (size 1)' - stdname = 'dry_deposition_flux_of_dust' - units = 'kg m-2 s-1' - attname = 'Faxa_dstdry1' - call metadata_set(attname, longname, stdname, units) - - ! Size 2 dust -- dry deposition - call seq_flds_add(a2x_fluxes,"Faxa_dstdry2" ) - call seq_flds_add(x2i_fluxes,"Faxa_dstdry2" ) - call seq_flds_add(x2l_fluxes,"Faxa_dstdry2" ) - call seq_flds_add(x2o_fluxes,"Faxa_dstdry2" ) - longname = 'Dust dry deposition flux (size 2)' - stdname = 'dry_deposition_flux_of_dust' - units = 'kg m-2 s-1' - attname = 'Faxa_dstdry2' - call metadata_set(attname, longname, stdname, units) - - ! Size 3 dust -- dry deposition - call seq_flds_add(a2x_fluxes,"Faxa_dstdry3" ) - call seq_flds_add(x2i_fluxes,"Faxa_dstdry3" ) - call seq_flds_add(x2l_fluxes,"Faxa_dstdry3" ) - call seq_flds_add(x2o_fluxes,"Faxa_dstdry3" ) - longname = 'Dust dry deposition flux (size 3)' - stdname = 'dry_deposition_flux_of_dust' - units = 'kg m-2 s-1' - attname = 'Faxa_dstdry3' - call metadata_set(attname, longname, stdname, units) - - ! Size 4 dust -- dry deposition - call seq_flds_add(a2x_fluxes,"Faxa_dstdry4" ) - call seq_flds_add(x2i_fluxes,"Faxa_dstdry4" ) - call seq_flds_add(x2l_fluxes,"Faxa_dstdry4" ) - call seq_flds_add(x2o_fluxes,"Faxa_dstdry4" ) - longname = 'Dust dry deposition flux (size 4)' - stdname = 'dry_deposition_flux_of_dust' - units = 'kg m-2 s-1' - attname = 'Faxa_dstdry4' - call metadata_set(attname, longname, stdname, units) - - !---------------------------------------------------------- - ! states/fluxes to atm (and ocean) - !---------------------------------------------------------- - - ! land/sea-ice/ocean fractions - call seq_flds_add(x2a_states,'Sf_lfrac') - call seq_flds_add(x2a_states,'Sf_ifrac') - call seq_flds_add(x2a_states,'Sf_ofrac') - longname = 'Surface land fraction' - stdname = 'land_area_fraction' - units = '1' - attname = 'Sf_lfrac' - call metadata_set(attname, longname, stdname, units) - longname = 'Surface ice fraction' - stdname = 'sea_ice_area_fraction' - attname = 'Sf_ifrac' - call metadata_set(attname, longname, stdname, units) - longname = 'Surface ocean fraction' - stdname = 'sea_area_fraction' - attname = 'Sf_ofrac' - call metadata_set(attname, longname, stdname, units) - - ! Direct albedo (visible radiation) - call seq_flds_add(i2x_states,"Si_avsdr") - call seq_flds_add(l2x_states,"Sl_avsdr") - call seq_flds_add(xao_albedo,"So_avsdr") - call seq_flds_add(x2a_states,"Sx_avsdr") - longname = 'Direct albedo (visible radiation)' - stdname = 'surface_direct_albedo_due_to_visible_radiation' - units = '1' - attname = 'Si_avsdr' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_avsdr' - call metadata_set(attname, longname, stdname, units) - attname = 'So_avsdr' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_avsdr' - call metadata_set(attname, longname, stdname, units) - - ! Direct albedo (near-infrared radiation) - call seq_flds_add(i2x_states,"Si_anidr") - call seq_flds_add(l2x_states,"Sl_anidr") - call seq_flds_add(xao_albedo,"So_anidr") - call seq_flds_add(x2a_states,"Sx_anidr") - longname = 'Direct albedo (near-infrared radiation)' - stdname = 'surface_direct_albedo_due_to_near_infrared_radiation' - units = '1' - attname = 'Si_anidr' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_anidr' - call metadata_set(attname, longname, stdname, units) - attname = 'So_anidr' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_anidr' - call metadata_set(attname, longname, stdname, units) - - ! Diffuse albedo (visible radiation) - call seq_flds_add(i2x_states,"Si_avsdf") - call seq_flds_add(l2x_states,"Sl_avsdf") - call seq_flds_add(xao_albedo,"So_avsdf") - call seq_flds_add(x2a_states,"Sx_avsdf") - longname = 'Diffuse albedo (visible radiation)' - stdname = 'surface_diffuse_albedo_due_to_visible_radiation' - units = '1' - attname = 'Si_avsdf' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_avsdf' - call metadata_set(attname, longname, stdname, units) - attname = 'So_avsdf' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_avsdf' - call metadata_set(attname, longname, stdname, units) - - ! Diffuse albedo (near-infrared radiation) - call seq_flds_add(i2x_states,"Si_anidf") - call seq_flds_add(l2x_states,"Sl_anidf") - call seq_flds_add(xao_albedo,"So_anidf") - call seq_flds_add(x2a_states,"Sx_anidf") - longname = 'Diffuse albedo (near-infrared radiation)' - stdname = 'surface_diffuse_albedo_due_to_near_infrared_radiation' - units = '1' - attname = 'Si_anidf' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_anidf' - call metadata_set(attname, longname, stdname, units) - attname = 'So_anidf' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_anidf' - call metadata_set(attname, longname, stdname, units) - - ! Reference temperature at 2 meters - call seq_flds_add(l2x_states,"Sl_tref") - call seq_flds_add(i2x_states,"Si_tref") - call seq_flds_add(xao_states,"So_tref") - call seq_flds_add(x2a_states,"Sx_tref") - longname = 'Reference temperature at 2 meters' - stdname = 'air_temperature' - units = 'K' - attname = 'Si_tref' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_tref' - call metadata_set(attname, longname, stdname, units) - attname = 'So_tref' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_tref' - call metadata_set(attname, longname, stdname, units) - - ! Reference specific humidity at 2 meters - call seq_flds_add(l2x_states,"Sl_qref") - call seq_flds_add(i2x_states,"Si_qref") - call seq_flds_add(xao_states,"So_qref") - call seq_flds_add(x2a_states,"Sx_qref") - longname = 'Reference specific humidity at 2 meters' - stdname = 'specific_humidity' - units = 'kg kg-1' - attname = 'Si_qref' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_qref' - call metadata_set(attname, longname, stdname, units) - attname = 'So_qref' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_qref' - call metadata_set(attname, longname, stdname, units) - - ! Surface temperature - call seq_flds_add(l2x_states,"Sl_t") - call seq_flds_add(i2x_states,"Si_t") - call seq_flds_add(x2a_states,"So_t") - call seq_flds_add(x2a_states,"Sx_t") - longname = 'Surface temperature' - stdname = 'surface_temperature' - units = 'K' - attname = 'Si_t' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_t' - call metadata_set(attname, longname, stdname, units) - attname = 'So_t' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_t' - call metadata_set(attname, longname, stdname, units) - - ! Surface friction velocity in land (land/atm only) - call seq_flds_add(l2x_states,"Sl_fv") - call seq_flds_add(x2a_states,"Sl_fv") - longname = 'Surface fraction velocity in land' - stdname = 'fraction_velocity' - units = 'm s-1' - attname = 'Sl_fv' - call metadata_set(attname, longname, stdname, units) - - ! Aerodynamical resistance (land/atm only) - call seq_flds_add(l2x_states,"Sl_ram1") - call seq_flds_add(x2a_states,"Sl_ram1") - longname = 'aerodynamic resistance' - stdname = 'aerodynamic_resistance' - attname = 'SI_ram1' - units = 's/m' - call metadata_set(attname, longname, stdname, units) - - - ! Surface snow water equivalent (land/atm only) - call seq_flds_add(l2x_states,"Sl_snowh") - call seq_flds_add(x2a_states,"Sl_snowh") - longname = 'Surface snow water equivalent' - stdname = 'surface_snow_water_equivalent' - units = 'm' - attname = 'Sl_snowh' - call metadata_set(attname, longname, stdname, units) - - ! Surface snow depth (ice/atm only) - call seq_flds_add(i2x_states,"Si_snowh") - call seq_flds_add(x2a_states,"Si_snowh") - longname = 'Surface snow depth' - stdname = 'surface_snow_thickness' - units = 'm' - attname = 'Si_snowh' - call metadata_set(attname, longname, stdname, units) - - ! Surface saturation specific humidity in ocean (ocn/atm only) - call seq_flds_add(xao_states,"So_ssq") - call seq_flds_add(x2a_states,"So_ssq") - longname = 'Surface saturation specific humidity in ocean' - stdname = 'specific_humidity_at_saturation' - units = 'kg kg-1' - attname = 'So_ssq' - call metadata_set(attname, longname, stdname, units) - - ! Square of exch. coeff (tracers) (ocn/atm only) - call seq_flds_add(xao_states,"So_re") - call seq_flds_add(x2a_states,"So_re") - longname = 'Square of exch. coeff (tracers)' - stdname = '' - units = '' - attname = 'So_re' - call metadata_set(attname, longname, stdname, units) - - ! 10 meter wind - call seq_flds_add(i2x_states,"Si_u10") - call seq_flds_add(xao_states,"So_u10") - call seq_flds_add(l2x_states,"Sl_u10") - call seq_flds_add(x2a_states,"Sx_u10") - longname = '10m wind' - stdname = '10m_wind' - units = 'm' - attname = 'u10' - call metadata_set(attname, longname, stdname, units) - - ! Zonal surface stress" - call seq_flds_add(l2x_fluxes,"Fall_taux") - call seq_flds_add(xao_fluxes,"Faox_taux") - call seq_flds_add(i2x_fluxes,"Faii_taux") - call seq_flds_add(x2a_fluxes,"Faxx_taux") - call seq_flds_add(i2x_fluxes,"Fioi_taux") - call seq_flds_add(x2o_fluxes,"Foxx_taux") - longname = 'Zonal surface stress' - stdname = 'surface_downward_eastward_stress' - units = 'N m-2' - attname = 'Fall_taux' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_taux' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_taux' - call metadata_set(attname, longname, stdname, units) - attname = 'Fioi_taux' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_taux' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_taux' - call metadata_set(attname, longname, stdname, units) - - ! Meridional surface stress - call seq_flds_add(l2x_fluxes,"Fall_tauy") - call seq_flds_add(xao_fluxes,"Faox_tauy") - call seq_flds_add(i2x_fluxes,"Faii_tauy") - call seq_flds_add(x2a_fluxes,"Faxx_tauy") - call seq_flds_add(i2x_fluxes,"Fioi_tauy") - call seq_flds_add(x2o_fluxes,"Foxx_tauy") - longname = 'Meridional surface stress' - stdname = 'surface_downward_northward_stress' - units = 'N m-2' - attname = 'Fall_tauy' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_tauy' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_tauy' - call metadata_set(attname, longname, stdname, units) - attname = 'Fioi_tauy' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_tauy' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_tauy' - call metadata_set(attname, longname, stdname, units) - - ! Surface latent heat flux - call seq_flds_add(l2x_fluxes,"Fall_lat") - call seq_flds_add(xao_fluxes,"Faox_lat") - call seq_flds_add(i2x_fluxes,"Faii_lat") - call seq_flds_add(x2a_fluxes,"Faxx_lat") - call seq_flds_add(x2o_fluxes,"Foxx_lat") - longname = 'Surface latent heat flux' - stdname = 'surface_upward_latent_heat_flux' - units = 'W m-2' - attname = 'Fall_lat' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_lat' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_lat' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_lat' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_lat' - call metadata_set(attname, longname, stdname, units) - - ! Surface sensible heat flux - call seq_flds_add(l2x_fluxes,"Fall_sen") - call seq_flds_add(xao_fluxes,"Faox_sen") - call seq_flds_add(i2x_fluxes,"Faii_sen") - call seq_flds_add(x2a_fluxes,"Faxx_sen") - call seq_flds_add(x2o_fluxes,"Foxx_sen") - longname = 'Sensible heat flux' - stdname = 'surface_upward_sensible_heat_flux' - units = 'W m-2' - attname = 'Fall_sen' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_sen' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_sen' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_sen' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_sen' - call metadata_set(attname, longname, stdname, units) - - ! Surface upward longwave heat flux - call seq_flds_add(l2x_fluxes,"Fall_lwup") - call seq_flds_add(xao_fluxes,"Faox_lwup") - call seq_flds_add(i2x_fluxes,"Faii_lwup") - call seq_flds_add(x2a_fluxes,"Faxx_lwup") - call seq_flds_add(x2o_fluxes,"Foxx_lwup") - longname = 'Surface upward longwave heat flux' - stdname = 'surface_net_upward_longwave_flux' - units = 'W m-2' - attname = 'Fall_lwup' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_lwup' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_lwup' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_lwup' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_lwup' - call metadata_set(attname, longname, stdname, units) - - ! Evaporation water flux - call seq_flds_add(l2x_fluxes,"Fall_evap") - call seq_flds_add(xao_fluxes,"Faox_evap") - call seq_flds_add(i2x_fluxes,"Faii_evap") - call seq_flds_add(x2a_fluxes,"Faxx_evap") - call seq_flds_add(x2o_fluxes,"Foxx_evap") - longname = 'Evaporation water flux' - stdname = 'water_evaporation_flux' - units = 'kg m-2 s-1' - attname = 'Fall_evap' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_evap' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_evap' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_evap' - call metadata_set(attname, longname, stdname, units) - - ! Dust flux (particle bin number 1) - call seq_flds_add(l2x_fluxes,"Fall_flxdst1") - call seq_flds_add(x2a_fluxes,"Fall_flxdst1") - longname = 'Dust flux (particle bin number 1)' - stdname = 'dust_flux' - units = 'kg m-2 s-1' - attname = 'Fall_flxdst1' - call metadata_set(attname, longname, stdname, units) - - ! Dust flux (particle bin number 2) - call seq_flds_add(l2x_fluxes,"Fall_flxdst2") - call seq_flds_add(x2a_fluxes,"Fall_flxdst2") - longname = 'Dust flux (particle bin number 2)' - stdname = 'dust_flux' - units = 'kg m-2 s-1' - attname = 'Fall_flxdst2' - call metadata_set(attname, longname, stdname, units) - - ! Dust flux (particle bin number 3) - call seq_flds_add(l2x_fluxes,"Fall_flxdst3") - call seq_flds_add(x2a_fluxes,"Fall_flxdst3") - longname = 'Dust flux (particle bin number 3)' - stdname = 'dust_flux' - units = 'kg m-2 s-1' - attname = 'Fall_flxdst3' - call metadata_set(attname, longname, stdname, units) - - ! Dust flux (particle bin number 4) - call seq_flds_add(l2x_fluxes,"Fall_flxdst4") - call seq_flds_add(x2a_fluxes,"Fall_flxdst4") - longname = 'Dust flux (particle bin number 4)' - stdname = 'dust_flux' - units = 'kg m-2 s-1' - attname = 'Fall_flxdst4' - call metadata_set(attname, longname, stdname, units) - - !----------------------------- - ! atm<->ocn only exchange - !----------------------------- - - ! Sea level pressure (Pa) - call seq_flds_add(a2x_states,"Sa_pslv") - call seq_flds_add(x2o_states,"Sa_pslv") - longname = 'Sea level pressure' - stdname = 'air_pressure_at_sea_level' - units = 'Pa' - attname = 'Sa_pslv' - call metadata_set(attname, longname, stdname, units) - - ! Wind speed squared at 10 meters - call seq_flds_add(xao_states,"So_duu10n") - call seq_flds_add(x2o_states,"So_duu10n") - longname = 'Wind speed squared at 10 meters' - stdname = 'square_of_wind_speed' - units = 'm2 s-2' - attname = 'So_duu10n' - call metadata_set(attname, longname, stdname, units) - - ! Surface friction velocity in ocean - call seq_flds_add(xao_states,"So_ustar") - call seq_flds_add(x2a_states,"So_ustar") - longname = 'Surface fraction velocity in ocean' - stdname = 'fraction_velocity' - units = 'm s-1' - attname = 'So_ustar' - call metadata_set(attname, longname, stdname, units) - - !----------------------------- - ! ice<->ocn only exchange - !----------------------------- - - ! Fractional ice coverage wrt ocean - call seq_flds_add(i2x_states,"Si_ifrac") - call seq_flds_add(x2o_states,"Si_ifrac") - call seq_flds_add(x2w_states,"Si_ifrac") - longname = 'Fractional ice coverage wrt ocean' - stdname = 'sea_ice_area_fraction' - units = '1' - attname = 'Si_ifrac' - call metadata_set(attname, longname, stdname, units) - - if (trim(cime_model) == 'e3sm') then - ! Sea ice basal pressure - call seq_flds_add(i2x_states,"Si_bpress") - call seq_flds_add(x2o_states,"Si_bpress") - longname = 'Sea ice basal pressure' - stdname = 'cice_basal_pressure' - units = 'Pa' - attname = 'Si_bpress' - call metadata_set(attname, longname, stdname, units) - end if - - ! Ocean melt and freeze potential - call seq_flds_add(o2x_fluxes,"Fioo_q") - call seq_flds_add(x2i_fluxes,"Fioo_q") - longname = 'Ocean melt and freeze potential' - stdname = 'surface_snow_and_ice_melt_heat_flux' - units = 'W m-2' - attname = 'Fioo_q' - call metadata_set(attname, longname, stdname, units) - - if (trim(cime_model) == 'e3sm') then - ! Ocean melt (q<0) potential - call seq_flds_add(o2x_fluxes,"Fioo_meltp") - call seq_flds_add(x2i_fluxes,"Fioo_meltp") - longname = 'Ocean melt (q<0) potential' - stdname = 'surface_snow_and_ice_melt_heat_flux' - units = 'W m-2' - attname = 'Fioo_meltp' - call metadata_set(attname, longname, stdname, units) - end if - - if (trim(cime_model) == 'e3sm') then - ! Ocean frazil production - call seq_flds_add(o2x_fluxes,"Fioo_frazil") - call seq_flds_add(x2i_fluxes,"Fioo_frazil") - longname = 'Ocean frazil production' - stdname = 'ocean_frazil_ice_production' - units = 'kg m-2 s-1' - attname = 'Fioo_frazil' - call metadata_set(attname, longname, stdname, units) - end if - - ! Heat flux from melting - call seq_flds_add(i2x_fluxes,"Fioi_melth") - call seq_flds_add(x2o_fluxes,"Fioi_melth") - longname = 'Heat flux from melting' - stdname = 'surface_snow_melt_heat_flux' - units = 'W m-2' - attname = 'Fioi_melth' - call metadata_set(attname, longname, stdname, units) - - ! Water flux from melting - call seq_flds_add(i2x_fluxes,"Fioi_meltw") - call seq_flds_add(x2o_fluxes,"Fioi_meltw") - longname = 'Water flux due to melting' - stdname = 'surface_snow_melt_flux' - units = 'kg m-2 s-1' - attname = 'Fioi_meltw' - call metadata_set(attname, longname, stdname, units) - - ! Heat flux from melting icebergs - call seq_flds_add(i2x_fluxes,"PFioi_bergh") - call seq_flds_add(x2o_fluxes,"PFioi_bergh") - longname = 'Heat flux from melting icebergs' - stdname = 'surface_iceberg_melt_heat_flux' - units = 'W m-2' - attname = 'PFioi_bergh' - call metadata_set(attname, longname, stdname, units) - - ! Water flux from melting icebergs - call seq_flds_add(i2x_fluxes,"PFioi_bergw") - call seq_flds_add(x2o_fluxes,"PFioi_bergw") - longname = 'Water flux due to melting icebergs' - stdname = 'surface_iceberg_melt_flux' - units = 'kg m-2 s-1' - attname = 'PFioi_bergw' - call metadata_set(attname, longname, stdname, units) - - ! Salt flux - call seq_flds_add(i2x_fluxes,"Fioi_salt") - call seq_flds_add(x2o_fluxes,"Fioi_salt") - longname = 'Salt flux' - stdname = 'virtual_salt_flux_into_sea_water' - units = 'kg m-2 s-1' - attname = 'Fioi_salt' - call metadata_set(attname, longname, stdname, units) - - ! Black Carbon hydrophilic deposition - call seq_flds_add(i2x_fluxes,"Fioi_bcphi" ) - call seq_flds_add(x2o_fluxes,"Fioi_bcphi" ) - longname = 'Hydrophylic black carbon deposition flux' - stdname = 'deposition_flux_of_hydrophylic_black_carbon' - units = 'kg m-2 s-1' - attname = 'Fioi_bcphi' - call metadata_set(attname, longname, stdname, units) - - ! Black Carbon hydrophobic deposition - call seq_flds_add(i2x_fluxes,"Fioi_bcpho" ) - call seq_flds_add(x2o_fluxes,"Fioi_bcpho" ) - longname = 'Hydrophobic black carbon deposition flux' - stdname = 'deposition_flux_of_hydrophobic_black_carbon' - units = 'kg m-2 s-1' - attname = 'Fioi_bcpho' - call metadata_set(attname, longname, stdname, units) - - ! Dust flux - call seq_flds_add(i2x_fluxes,"Fioi_flxdst") - call seq_flds_add(x2o_fluxes,"Fioi_flxdst") - longname = 'Dust flux' - stdname = 'dust_flux' - units = 'kg m-2 s-1' - attname = 'Fioi_flxdst' - call metadata_set(attname, longname, stdname, units) - - ! Sea surface temperature - call seq_flds_add(o2x_states,"So_t") - call seq_flds_add(x2i_states,"So_t") - call seq_flds_add(x2w_states,"So_t") - - ! Sea surface salinity - call seq_flds_add(o2x_states,"So_s") - call seq_flds_add(x2i_states,"So_s") - longname = 'Sea surface salinity' - stdname = 'sea_surface_salinity' - units = 'g kg-1' - attname = 'So_s' - call metadata_set(attname, longname, stdname, units) - - ! Zonal sea water velocity - call seq_flds_add(o2x_states,"So_u") - call seq_flds_add(x2i_states,"So_u") - call seq_flds_add(x2w_states,"So_u") - longname = 'Zonal sea water velocity' - stdname = 'eastward_sea_water_velocity' - units = 'm s-1' - attname = 'So_u' - call metadata_set(attname, longname, stdname, units) - - ! Meridional sea water velocity - call seq_flds_add(o2x_states,"So_v") - call seq_flds_add(x2i_states,"So_v") - call seq_flds_add(x2w_states,"So_v") - longname = 'Meridional sea water velocity' - stdname = 'northward_sea_water_velocity' - units = 'm s-1' - attname = 'So_v' - - ! Zonal sea surface slope - call seq_flds_add(o2x_states,"So_dhdx") - call seq_flds_add(x2i_states,"So_dhdx") - longname = 'Zonal sea surface slope' - stdname = 'sea_surface_eastward_slope' - units = 'm m-1' - attname = 'So_dhdx' - call metadata_set(attname, longname, stdname, units) - - ! Meridional sea surface slope - call seq_flds_add(o2x_states,"So_dhdy") - call seq_flds_add(x2i_states,"So_dhdy") - longname = 'Meridional sea surface slope' - stdname = 'sea_surface_northward_slope' - units = 'm m-1' - attname = 'So_dhdy' - call metadata_set(attname, longname, stdname, units) - - ! Boundary Layer Depth - call seq_flds_add(o2x_states,"So_bldepth") - call seq_flds_add(x2w_states,"So_bldepth") - longname = 'Ocean Boundary Layer Depth' - stdname = 'ocean_boundary_layer_depth' - units = 'm' - attname = 'So_bldepth' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_states,"So_fswpen") - call seq_flds_add(o2x_states,"So_fswpen") - longname = 'Fraction of sw penetrating surface layer for diurnal cycle' - stdname = 'Fraction of sw penetrating surface layer for diurnal cycle' - units = '1' - attname = 'So_fswpen' - call metadata_set(attname, longname, stdname, units) - - !------------------------------ - ! ice<->ocn only exchange - BGC - !------------------------------ - if (trim(cime_model) == 'e3sm' .and. flds_bgc_oi) then - - ! Ocean algae concentration 1 - diatoms? - call seq_flds_add(o2x_states,"So_algae1") - call seq_flds_add(x2i_states,"So_algae1") - longname = 'Ocean algae concentration 1 - diatoms' - stdname = 'ocean_algae_conc_1' - units = 'mmol C m-3' - attname = 'So_algae1' - call metadata_set(attname, longname, stdname, units) - - ! Ocean algae concentration 2 - flagellates? - call seq_flds_add(o2x_states,"So_algae2") - call seq_flds_add(x2i_states,"So_algae2") - longname = 'Ocean algae concentration 2 - flagellates' - stdname = 'ocean_algae_conc_2' - units = 'mmol C m-3' - attname = 'So_algae2' - call metadata_set(attname, longname, stdname, units) - - ! Ocean algae concentration 3 - phaeocyctis? - call seq_flds_add(o2x_states,"So_algae3") - call seq_flds_add(x2i_states,"So_algae3") - longname = 'Ocean algae concentration 3 - phaeocyctis' - stdname = 'ocean_algae_conc_3' - units = 'mmol C m-3' - attname = 'So_algae3' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dissolved organic carbon concentration 1 - saccharides? - call seq_flds_add(o2x_states,"So_doc1") - call seq_flds_add(x2i_states,"So_doc1") - longname = 'Ocean dissolved organic carbon concentration 1 - saccharides' - stdname = 'ocean_dissolved_organic_carbon_conc_1' - units = 'mmol C m-3' - attname = 'So_doc1' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dissolved organic carbon concentration 2 - lipids? - call seq_flds_add(o2x_states,"So_doc2") - call seq_flds_add(x2i_states,"So_doc2") - longname = 'Ocean dissolved organic carbon concentration 2 - lipids' - stdname = 'ocean_dissolved_organic_carbon_conc_2' - units = 'mmol C m-3' - attname = 'So_doc2' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dissolved organic carbon concentration 3 - tbd? - call seq_flds_add(o2x_states,"So_doc3") - call seq_flds_add(x2i_states,"So_doc3") - longname = 'Ocean dissolved organic carbon concentration 3 - tbd' - stdname = 'ocean_dissolved_organic_carbon_conc_3' - units = 'mmol C m-3' - attname = 'So_doc3' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dissolved inorganic carbon concentration 1 - call seq_flds_add(o2x_states,"So_dic1") - call seq_flds_add(x2i_states,"So_dic1") - longname = 'Ocean dissolved inorganic carbon concentration 1' - stdname = 'ocean_dissolved_inorganic_carbon_conc_1' - units = 'mmol C m-3' - attname = 'So_dic1' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dissolved organic nitrogen concentration 1 - call seq_flds_add(o2x_states,"So_don1") - call seq_flds_add(x2i_states,"So_don1") - longname = 'Ocean dissolved organic nitrogen concentration 1' - stdname = 'ocean_dissolved_organic_nitrogen_conc_1' - units = 'mmol N m-3' - attname = 'So_don1' - call metadata_set(attname, longname, stdname, units) - - ! Ocean nitrate concentration - call seq_flds_add(o2x_states,"So_no3") - call seq_flds_add(x2i_states,"So_no3") - longname = 'Ocean nitrate concentration' - stdname = 'ocean_nitrate_conc' - units = 'mmol N m-3' - attname = 'So_no3' - call metadata_set(attname, longname, stdname, units) - - ! Ocean silicate concentration - call seq_flds_add(o2x_states,"So_sio3") - call seq_flds_add(x2i_states,"So_sio3") - longname = 'Ocean silicate concentration' - stdname = 'ocean_silicate_conc' - units = 'mmol Si m-3' - attname = 'So_sio3' - call metadata_set(attname, longname, stdname, units) - - ! Ocean ammonium concentration - call seq_flds_add(o2x_states,"So_nh4") - call seq_flds_add(x2i_states,"So_nh4") - longname = 'Ocean ammonium concentration' - stdname = 'ocean_ammonium_conc' - units = 'mmol N m-3' - attname = 'So_nh4' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dimethyl sulfide (DMS) concentration - call seq_flds_add(o2x_states,"So_dms") - call seq_flds_add(x2i_states,"So_dms") - longname = 'Ocean dimethyl sulfide concentration' - stdname = 'ocean_dimethyl_sulfide_conc' - units = 'mmol S m-3' - attname = 'So_dms' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dimethylsulphonio-propionate (DMSP) concentration - call seq_flds_add(o2x_states,"So_dmsp") - call seq_flds_add(x2i_states,"So_dmsp") - longname = 'Ocean dimethylsulphonio-propionate concentration' - stdname = 'ocean_dimethylsulphoniopropionate_conc' - units = 'mmol S m-3' - attname = 'So_dmsp' - call metadata_set(attname, longname, stdname, units) - - ! Ocean DOCr concentration - call seq_flds_add(o2x_states,"So_docr") - call seq_flds_add(x2i_states,"So_docr") - longname = 'Ocean DOCr concentration' - stdname = 'ocean_DOCr_conc' - units = 'mmol C m-3' - attname = 'So_docr' - call metadata_set(attname, longname, stdname, units) - - ! Ocean particulate iron concentration 1 - call seq_flds_add(o2x_states,"So_fep1") - call seq_flds_add(x2i_states,"So_fep1") - longname = 'Ocean particulate iron concentration 1' - stdname = 'ocean_particulate_iron_conc_1' - units = 'umol Fe m-3' - attname = 'So_fep1' - call metadata_set(attname, longname, stdname, units) - - ! Ocean particulate iron concentration 2 - call seq_flds_add(o2x_states,"So_fep2") - call seq_flds_add(x2i_states,"So_fep2") - longname = 'Ocean particulate iron concentration 2' - stdname = 'ocean_particulate_iron_conc_2' - units = 'umol Fe m-3' - attname = 'So_fep2' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dissolved iron concentration 1 - call seq_flds_add(o2x_states,"So_fed1") - call seq_flds_add(x2i_states,"So_fed1") - longname = 'Ocean dissolved iron concentration 1' - stdname = 'ocean_dissolved_iron_conc_1' - units = 'umol Fe m-3' - attname = 'So_fed1' - call metadata_set(attname, longname, stdname, units) - - ! Ocean dissolved iron concentration 2 - call seq_flds_add(o2x_states,"So_fed2") - call seq_flds_add(x2i_states,"So_fed2") - longname = 'Ocean dissolved iron concentration 2' - stdname = 'ocean_dissolved_iron_conc_2' - units = 'umol Fe m-3' - attname = 'So_fed2' - call metadata_set(attname, longname, stdname, units) - - ! Ocean z-aerosol concentration 1 - call seq_flds_add(o2x_states,"So_zaer1") - call seq_flds_add(x2i_states,"So_zaer1") - longname = 'Ocean z-aerosol concentration 1' - stdname = 'ocean_z_aerosol_conc_1' - units = 'kg m-3' - attname = 'So_zaer1' - call metadata_set(attname, longname, stdname, units) - - ! Ocean z-aerosol concentration 2 - call seq_flds_add(o2x_states,"So_zaer2") - call seq_flds_add(x2i_states,"So_zaer2") - longname = 'Ocean z-aerosol concentration 2' - stdname = 'ocean_z_aerosol_conc_2' - units = 'kg m-3' - attname = 'So_zaer2' - call metadata_set(attname, longname, stdname, units) - - ! Ocean z-aerosol concentration 3 - call seq_flds_add(o2x_states,"So_zaer3") - call seq_flds_add(x2i_states,"So_zaer3") - longname = 'Ocean z-aerosol concentration 3' - stdname = 'ocean_z_aerosol_conc_3' - units = 'kg m-3' - attname = 'So_zaer3' - call metadata_set(attname, longname, stdname, units) - - ! Ocean z-aerosol concentration 4 - call seq_flds_add(o2x_states,"So_zaer4") - call seq_flds_add(x2i_states,"So_zaer4") - longname = 'Ocean z-aerosol concentration 4' - stdname = 'ocean_z_aerosol_conc_4' - units = 'kg m-3' - attname = 'So_zaer4' - call metadata_set(attname, longname, stdname, units) - - ! Ocean z-aerosol concentration 5 - call seq_flds_add(o2x_states,"So_zaer5") - call seq_flds_add(x2i_states,"So_zaer5") - longname = 'Ocean z-aerosol concentration 5' - stdname = 'ocean_z_aerosol_conc_5' - units = 'kg m-3' - attname = 'So_zaer5' - call metadata_set(attname, longname, stdname, units) - - ! Ocean z-aerosol concentration 6 - call seq_flds_add(o2x_states,"So_zaer6") - call seq_flds_add(x2i_states,"So_zaer6") - longname = 'Ocean z-aerosol concentration 6' - stdname = 'ocean_z_aerosol_conc_6' - units = 'kg m-3' - attname = 'So_zaer6' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice algae flux 1 - diatoms? - call seq_flds_add(i2x_fluxes,"Fioi_algae1") - call seq_flds_add(x2o_fluxes,"Fioi_algae1") - longname = 'Sea ice algae flux 1 - diatoms' - stdname = 'seaice_algae_flux_1' - units = 'mmol C m-2 s-1' - attname = 'Fioi_algae1' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice algae flux 2 - flagellates? - call seq_flds_add(i2x_fluxes,"Fioi_algae2") - call seq_flds_add(x2o_fluxes,"Fioi_algae2") - longname = 'Sea ice algae flux 2 - flagellates' - stdname = 'seaice_algae_flux_2' - units = 'mmol C m-2 s-1' - attname = 'Fioi_algae2' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice algae flux 3 - phaeocyctis? - call seq_flds_add(i2x_fluxes,"Fioi_algae3") - call seq_flds_add(x2o_fluxes,"Fioi_algae3") - longname = 'Sea ice algae flux 3 - phaeocyctis' - stdname = '_algae_flux_3' - units = 'mmol C m-2 s-1' - attname = 'Fioi_algae3' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice dissolved organic carbon flux 1 - saccharides? - call seq_flds_add(i2x_fluxes,"Fioi_doc1") - call seq_flds_add(x2o_fluxes,"Fioi_doc1") - longname = 'Sea ice dissolved organic carbon flux 1 - saccharides' - stdname = 'seaice_dissolved_organic_carbon_flux_1' - units = 'mmol C m-2 s-1' - attname = 'Fioi_doc1' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice dissolved organic carbon flux 2 - lipids? - call seq_flds_add(i2x_fluxes,"Fioi_doc2") - call seq_flds_add(x2o_fluxes,"Fioi_doc2") - longname = 'Sea ice dissolved organic carbon flux 2 - lipids' - stdname = 'seaice_dissolved_organic_carbon_flux_2' - units = 'mmol C m-2 s-1' - attname = 'Fioi_doc2' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice dissolved organic carbon flux 3 - tbd? - call seq_flds_add(i2x_fluxes,"Fioi_doc3") - call seq_flds_add(x2o_fluxes,"Fioi_doc3") - longname = 'Sea ice dissolved organic carbon flux 3 - tbd' - stdname = 'seaice_dissolved_organic_carbon_flux_3' - units = 'mmol C m-2 s-1' - attname = 'Fioi_doc3' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice dissolved inorganic carbon flux 1 - call seq_flds_add(i2x_fluxes,"Fioi_dic1") - call seq_flds_add(x2o_fluxes,"Fioi_dic1") - longname = 'Sea ice dissolved inorganic carbon flux 1' - stdname = 'seaice_dissolved_inorganic_carbon_flux_1' - units = 'mmol C m-2 s-1' - attname = 'Fioi_dic1' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice dissolved organic nitrogen flux 1 - call seq_flds_add(i2x_fluxes,"Fioi_don1") - call seq_flds_add(x2o_fluxes,"Fioi_don1") - longname = 'Sea ice dissolved organic nitrogen flux 1' - stdname = 'seaice_dissolved_organic_nitrogen_flux_1' - units = 'mmol N m-2 s-1' - attname = 'Fioi_don1' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice nitrate flux - call seq_flds_add(i2x_fluxes,"Fioi_no3") - call seq_flds_add(x2o_fluxes,"Fioi_no3") - longname = 'Sea ice nitrate flux' - stdname = 'seaice_nitrate_flux' - units = 'mmol N m-2 s-1' - attname = 'Fioi_no3' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice silicate flux - call seq_flds_add(i2x_fluxes,"Fioi_sio3") - call seq_flds_add(x2o_fluxes,"Fioi_sio3") - longname = 'Sea ice silicate flux' - stdname = 'seaice_silicate_flux' - units = 'mmol Si m-2 s-1' - attname = 'Fioi_sio3' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice ammonium flux - call seq_flds_add(i2x_fluxes,"Fioi_nh4") - call seq_flds_add(x2o_fluxes,"Fioi_nh4") - longname = 'Sea ice ammonium flux' - stdname = 'seaice_ammonium_flux' - units = 'mmol N m-2 s-1' - attname = 'Fioi_nh4' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice dimethyl sulfide (DMS) flux - call seq_flds_add(i2x_fluxes,"Fioi_dms") - call seq_flds_add(x2o_fluxes,"Fioi_dms") - longname = 'Sea ice dimethyl sulfide flux' - stdname = 'seaice_dimethyl_sulfide_flux' - units = 'mmol S m-2 s-1' - attname = 'Fioi_dms' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice DMSPp flux - call seq_flds_add(i2x_fluxes,"Fioi_dmspp") - call seq_flds_add(x2o_fluxes,"Fioi_dmspp") - longname = 'Sea ice DSMPp flux' - stdname = 'seaice_DSMPp_flux' - units = 'mmol S m-2 s-1' - attname = 'Fioi_dmspp' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice DMSPd flux - call seq_flds_add(i2x_fluxes,"Fioi_dmspd") - call seq_flds_add(x2o_fluxes,"Fioi_dmspd") - longname = 'Sea ice DSMPd flux' - stdname = 'seaice_DSMPd_flux' - units = 'mmol S m-2 s-1' - attname = 'Fioi_dmspd' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice DOCr flux - call seq_flds_add(i2x_fluxes,"Fioi_docr") - call seq_flds_add(x2o_fluxes,"Fioi_docr") - longname = 'Sea ice DOCr flux' - stdname = 'seaice_DOCr_flux' - units = 'mmol C m-2 s-1' - attname = 'Fioi_docr' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice particulate iron flux 1 - call seq_flds_add(i2x_fluxes,"Fioi_fep1") - call seq_flds_add(x2o_fluxes,"Fioi_fep1") - longname = 'Sea ice particulate iron flux 1' - stdname = 'seaice_particulate_iron_flux_1' - units = 'umol Fe m-2 s-1' - attname = 'Fioi_fep1' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice particulate iron flux 2 - call seq_flds_add(i2x_fluxes,"Fioi_fep2") - call seq_flds_add(x2o_fluxes,"Fioi_fep2") - longname = 'Sea ice particulate iron flux 2' - stdname = 'seaice_particulate_iron_flux_2' - units = 'umol Fe m-2 s-1' - attname = 'Fioi_fep2' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice dissolved iron flux 1 - call seq_flds_add(i2x_fluxes,"Fioi_fed1") - call seq_flds_add(x2o_fluxes,"Fioi_fed1") - longname = 'Sea ice dissolved iron flux 1' - stdname = 'seaice_dissolved_iron_flux_1' - units = 'umol Fe m-2 s-1' - attname = 'Fioi_fed1' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice dissolved iron flux 2 - call seq_flds_add(i2x_fluxes,"Fioi_fed2") - call seq_flds_add(x2o_fluxes,"Fioi_fed2") - longname = 'Sea ice dissolved iron flux 2' - stdname = 'seaice_dissolved_iron_flux_2' - units = 'umol Fe m-2 s-1' - attname = 'Fioi_fed2' - call metadata_set(attname, longname, stdname, units) - - ! Sea ice iron dust - call seq_flds_add(i2x_fluxes,"Fioi_dust1") - call seq_flds_add(x2o_fluxes,"Fioi_dust1") - longname = 'Sea ice iron dust 1' - stdname = 'seaice_iron_dust_1' - units = 'kg m-2 s-1' - attname = 'Fioi_dust1' - call metadata_set(attname, longname, stdname, units) - - endif - - - !----------------------------- - ! lnd->rof exchange - ! TODO: put in attributes below - !----------------------------- - - call seq_flds_add(l2x_fluxes,'Flrl_rofsur') - call seq_flds_add(x2r_fluxes,'Flrl_rofsur') - longname = 'Water flux from land (liquid surface)' - stdname = 'water_flux_into_runoff_surface' - units = 'kg m-2 s-1' - attname = 'Flrl_rofsur' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(l2x_fluxes,'Flrl_rofgwl') - call seq_flds_add(x2r_fluxes,'Flrl_rofgwl') - longname = 'Water flux from land (liquid glacier, wetland, and lake)' - stdname = 'water_flux_into_runoff_from_gwl' - units = 'kg m-2 s-1' - attname = 'Flrl_rofgwl' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(l2x_fluxes,'Flrl_rofsub') - call seq_flds_add(x2r_fluxes,'Flrl_rofsub') - longname = 'Water flux from land (liquid subsurface)' - stdname = 'water_flux_into_runoff_subsurface' - units = 'kg m-2 s-1' - attname = 'Flrl_rofsub' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(l2x_fluxes,'Flrl_rofdto') - call seq_flds_add(x2r_fluxes,'Flrl_rofdto') - longname = 'Water flux from land direct to ocean' - stdname = 'water_flux_direct_to_ocean' - units = 'kg m-2 s-1' - attname = 'Flrl_rofdto' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(l2x_fluxes,'Flrl_rofi') - call seq_flds_add(x2r_fluxes,'Flrl_rofi') - longname = 'Water flux from land (frozen)' - stdname = 'frozen_water_flux_into_runoff' - units = 'kg m-2 s-1' - attname = 'Flrl_rofi' - call metadata_set(attname, longname, stdname, units) - - ! Currently only the CESM land and runoff models treat irrigation as a separate - ! field: in E3SM, this field is folded in to the other runoff fields. Eventually, - ! E3SM may want to update its land and runoff models to map irrigation specially, as - ! CESM does. - ! - ! (Once E3SM is using this irrigation field, all that needs to be done is to remove - ! this conditional: Code in other places in the coupler is written to trigger off of - ! whether Flrl_irrig has been added to the field list, so it should Just Work if this - ! conditional is removed.) - if (trim(cime_model) == 'cesm') then - ! Irrigation flux (land/rof only) - call seq_flds_add(l2x_fluxes,"Flrl_irrig") - call seq_flds_add(x2r_fluxes,"Flrl_irrig") - longname = 'Irrigation flux (withdrawal from rivers)' - stdname = 'irrigation' - units = 'kg m-2 s-1' - attname = 'Flrl_irrig' - call metadata_set(attname, longname, stdname, units) - end if - - !----------------------------- - ! rof->ocn (runoff) and rof->lnd (flooding) - !----------------------------- - - call seq_flds_add(r2x_fluxes,'Forr_rofl') - call seq_flds_add(x2o_fluxes,'Foxx_rofl') - call seq_flds_add(r2o_liq_fluxes,'Forr_rofl') - longname = 'Water flux due to runoff (liquid)' - stdname = 'water_flux_into_sea_water' - units = 'kg m-2 s-1' - attname = 'Forr_rofl' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_rofl' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(r2x_fluxes,'Forr_rofi') - call seq_flds_add(x2o_fluxes,'Foxx_rofi') - call seq_flds_add(r2o_ice_fluxes,'Forr_rofi') - longname = 'Water flux due to runoff (frozen)' - stdname = 'frozen_water_flux_into_sea_water' - units = 'kg m-2 s-1' - attname = 'Forr_rofi' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_rofi' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(r2x_fluxes,'Firr_rofi') - call seq_flds_add(x2i_fluxes,'Fixx_rofi') - longname = 'Water flux due to runoff (frozen)' - stdname = 'frozen_water_flux_into_sea_ice' - units = 'kg m-2 s-1' - attname = 'Firr_rofi' - call metadata_set(attname, longname, stdname, units) - attname = 'Fixx_rofi' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(r2x_fluxes,'Flrr_flood') - call seq_flds_add(x2l_fluxes,'Flrr_flood') - longname = 'Waterrflux due to flooding' - stdname = 'flooding_water_flux' - units = 'kg m-2 s-1' - attname = 'Flrr_flood' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(r2x_fluxes,'Flrr_volr') - call seq_flds_add(x2l_fluxes,'Flrr_volr') - longname = 'River channel total water volume' - stdname = 'rtm_volr' - units = 'm' - attname = 'Flrr_volr' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(r2x_fluxes,'Flrr_volrmch') - call seq_flds_add(x2l_fluxes,'Flrr_volrmch') - longname = 'River channel main channel water volume' - stdname = 'rtm_volrmch' - units = 'm' - attname = 'Flrr_volrmch' - call metadata_set(attname, longname, stdname, units) - - !----------------------------- - ! wav->ocn and ocn->wav - !----------------------------- - - call seq_flds_add(w2x_states,'Sw_lamult') - call seq_flds_add(x2o_states,'Sw_lamult') - longname = 'Langmuir multiplier' - stdname = 'wave_model_langmuir_multiplier' - units = '' - attname = 'Sw_lamult' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(w2x_states,'Sw_ustokes') - call seq_flds_add(x2o_states,'Sw_ustokes') - longname = 'Stokes drift u component' - stdname = 'wave_model_stokes_drift_eastward_velocity' - units = 'm/s' - attname = 'Sw_ustokes' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(w2x_states,'Sw_vstokes') - call seq_flds_add(x2o_states,'Sw_vstokes') - longname = 'Stokes drift v component' - stdname = 'wave_model_stokes_drift_northward_velocity' - units = 'm/s' - attname = 'Sw_vstokes' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(w2x_states,'Sw_hstokes') - call seq_flds_add(x2o_states,'Sw_hstokes') - longname = 'Stokes drift depth' - stdname = 'wave_model_stokes_drift_depth' - units = 'm' - attname = 'Sw_hstokes' - call metadata_set(attname, longname, stdname, units) - - !----------------------------- - ! New xao_states diagnostic - ! fields for history output only - !----------------------------- - - call seq_flds_add(xao_fluxes,"Faox_swdn") - longname = 'Downward solar radiation' - stdname = 'surface_downward_shortwave_flux' - units = 'W m-2' - attname = 'Faox_swdn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_fluxes,"Faox_swup") - longname = 'Upward solar radiation' - stdname = 'surface_upward_shortwave_flux' - units = 'W m-2' - attname = 'Faox_swup' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_tbulk_diurn") - longname = 'atm/ocn flux temperature bulk' - stdname = 'aoflux_tbulk' - units = 'K' - attname = 'So_tbulk_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_tskin_diurn") - longname = 'atm/ocn flux temperature skin' - stdname = 'aoflux_tskin' - units = 'K' - attname = 'So_tskin_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_tskin_night_diurn") - longname = 'atm/ocn flux temperature skin at night' - stdname = 'aoflux_tskin_night' - units = 'K' - attname = 'So_tskin_night_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_tskin_day_diurn") - longname = 'atm/ocn flux temperature skin at day' - stdname = 'aoflux_tskin_day' - units = 'K' - attname = 'So_tskin_day_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_cskin_diurn") - longname = 'atm/ocn flux cool skin' - stdname = 'aoflux_cskin' - units = 'K' - attname = 'So_cskin_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_cskin_night_diurn") - longname = 'atm/ocn flux cool skin at night' - stdname = 'aoflux_cskin_night' - units = 'K' - attname = 'So_cskin_night_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_warm_diurn") - longname = 'atm/ocn flux warming' - stdname = 'aoflux_warm' - units = 'unitless' - attname = 'So_warm_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_salt_diurn") - longname = 'atm/ocn flux salting' - stdname = 'aoflux_salt' - units = 'unitless' - attname = 'So_salt_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_speed_diurn") - longname = 'atm/ocn flux speed' - stdname = 'aoflux_speed' - units = 'unitless' - attname = 'So_speed_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_regime_diurn") - longname = 'atm/ocn flux regime' - stdname = 'aoflux_regime' - units = 'unitless' - attname = 'So_regime_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_warmmax_diurn") - longname = 'atm/ocn flux warming dialy max' - stdname = 'aoflux_warmmax' - units = 'unitless' - attname = 'So_warmmax_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_windmax_diurn") - longname = 'atm/ocn flux wind daily max' - stdname = 'aoflux_windmax' - units = 'unitless' - attname = 'So_windmax_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_qsolavg_diurn") - longname = 'atm/ocn flux q-solar daily avg' - stdname = 'aoflux_qsolavg' - units = 'unitless' - attname = 'So_qsolavg_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_windavg_diurn") - longname = 'atm/ocn flux wind daily avg' - stdname = 'aoflux_windavg' - units = 'unitless' - attname = 'So_windavg_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_warmmaxinc_diurn") - longname = 'atm/ocn flux daily max increment' - stdname = 'aoflux_warmmaxinc' - units = 'unitless' - attname = 'So_warmmaxinc_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_windmaxinc_diurn") - longname = 'atm/ocn flux wind daily max increment' - stdname = 'aoflux_windmaxinc' - units = 'unitless' - attname = 'So_windmaxinc_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_qsolinc_diurn") - longname = 'atm/ocn flux q-solar increment' - stdname = 'aoflux_qsolinc' - units = 'unitless' - attname = 'So_qsolinc_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_windinc_diurn") - longname = 'atm/ocn flux wind increment' - stdname = 'aoflux_windinc' - units = 'unitless' - attname = 'So_windinc_diurn' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(xao_diurnl,"So_ninc_diurn") - longname = 'atm/ocn flux increment counter' - stdname = 'aoflux_ninc' - units = 'unitless' - attname = 'So_ninc_diurn' - call metadata_set(attname, longname, stdname, units) - - !----------------------------- - ! glc fields - !----------------------------- - - name = 'Fogg_rofl' - call seq_flds_add(g2x_fluxes,trim(name)) - ! Don't need to add this to x2o_fluxes, because Foxx_rofl is already added in the - ! course of adding Forr_rofl - call seq_flds_add(g2o_liq_fluxes,trim(name)) - longname = 'glc liquid runoff flux to ocean' - stdname = 'glacier_liquid_runoff_flux_to_ocean' - units = 'kg m-2 s-1' - attname = 'Fogg_rofl' - call metadata_set(attname, longname, stdname, units) - - name = 'Fogg_rofi' - call seq_flds_add(g2x_fluxes,trim(name)) - ! Don't need to add this to x2o_fluxes, because Foxx_rofi is already added in the - ! course of adding Forr_rofi - call seq_flds_add(g2o_ice_fluxes,trim(name)) - longname = 'glc frozen runoff flux to ocean' - stdname = 'glacier_frozen_runoff_flux_to_ocean' - units = 'kg m-2 s-1' - attname = 'Fogg_rofi' - call metadata_set(attname, longname, stdname, units) - - name = 'Figg_rofi' - call seq_flds_add(g2x_fluxes,trim(name)) - ! Don't need to add this to x2i_fluxes, because Fixx_rofi is already added in the - ! course of adding Firr_rofi - longname = 'glc frozen runoff_iceberg flux to ice' - stdname = 'glacier_frozen_runoff_flux_to_seaice' - units = 'kg m-2 s-1' - attname = 'Figg_rofi' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_icemask' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(g2x_states_to_lnd,trim(name)) - call seq_flds_add(x2l_states,trim(name)) - call seq_flds_add(x2l_states_from_glc,trim(name)) - if (trim(cime_model) == 'e3sm') then - call seq_flds_add(x2o_states,trim(name)) - endif - longname = 'Ice sheet grid coverage on global grid' - stdname = 'ice_sheet_grid_mask' - units = '1' - attname = 'Sg_icemask' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_icemask_coupled_fluxes' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(g2x_states_to_lnd,trim(name)) - call seq_flds_add(x2l_states,trim(name)) - call seq_flds_add(x2l_states_from_glc,trim(name)) - longname = 'Ice sheet mask where we are potentially sending non-zero fluxes' - stdname = 'icemask_coupled_fluxes' - units = '1' - attname = 'Sg_icemask_coupled_fluxes' - call metadata_set(attname, longname, stdname, units) - - ! glc fields with multiple elevation classes: lnd->glc - ! - ! Note that these fields are sent in multiple elevation classes from lnd->cpl, but - ! the fields sent from cpl->glc do NOT have elevation classes - ! - ! Also note that we need to keep track of the l2x fields destined for glc in the - ! additional variables, l2x_fluxes_to_glc and l2x_states_to_glc. This is needed so that - ! we can set up an additional attribute vector holding accumulated quantities of just - ! these fields. (We can't determine these field lists with a call to - ! mct_aVect_initSharedFields, because the field names differ between l2x and x2g.) - - name = 'Flgl_qice' - longname = 'New glacier ice flux' - stdname = 'ice_flux_out_of_glacier' - units = 'kg m-2 s-1' - attname = 'Flgl_qice' - call set_glc_elevclass_field(name, attname, longname, stdname, units, l2x_fluxes) - call set_glc_elevclass_field(name, attname, longname, stdname, units, l2x_fluxes_to_glc, & - additional_list = .true.) - call seq_flds_add(x2g_fluxes,trim(name)) - call seq_flds_add(x2g_fluxes_from_lnd,trim(name)) - call metadata_set(attname, longname, stdname, units) - - name = 'Sl_tsrf' - longname = 'Surface temperature of glacier' - stdname = 'surface_temperature' - units = 'deg C' - attname = 'Sl_tsrf' - call set_glc_elevclass_field(name, attname, longname, stdname, units, l2x_states) - call set_glc_elevclass_field(name, attname, longname, stdname, units, l2x_states_to_glc, & - additional_list = .true.) - call seq_flds_add(x2g_states,trim(name)) - call seq_flds_add(x2g_states_from_lnd,trim(name)) - call metadata_set(attname, longname, stdname, units) - - ! Sl_topo is sent from lnd -> cpl, but is NOT sent to glc (it is only used for the - ! remapping in the coupler) - name = 'Sl_topo' - longname = 'Surface height' - stdname = 'height' - units = 'm' - attname = 'Sl_topo' - call set_glc_elevclass_field(name, attname, longname, stdname, units, l2x_states) - call set_glc_elevclass_field(name, attname, longname, stdname, units, l2x_states_to_glc, & - additional_list = .true.) - - ! glc fields with multiple elevation classes: glc->lnd - ! - ! Note that the fields sent from glc->cpl do NOT have elevation classes, but the - ! fields from cpl->lnd are broken into multiple elevation classes - - name = 'Sg_ice_covered' - longname = 'Fraction of glacier area' - stdname = 'glacier_area_fraction' - units = '1' - attname = 'Sg_ice_covered' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(g2x_states_to_lnd,trim(name)) - call metadata_set(attname, longname, stdname, units) - call set_glc_elevclass_field(name, attname, longname, stdname, units, x2l_states) - call set_glc_elevclass_field(name, attname, longname, stdname, units, x2l_states_from_glc, & - additional_list = .true.) - - name = 'Sg_topo' - longname = 'Surface height of glacier' - stdname = 'height' - units = 'm' - attname = 'Sg_topo' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(g2x_states_to_lnd,trim(name)) - call metadata_set(attname, longname, stdname, units) - call set_glc_elevclass_field(name, attname, longname, stdname, units, x2l_states) - call set_glc_elevclass_field(name, attname, longname, stdname, units, x2l_states_from_glc, & - additional_list = .true.) - - name = 'Flgg_hflx' - longname = 'Downward heat flux from glacier interior' - stdname = 'downward_heat_flux_in_glacier' - units = 'W m-2' - attname = 'Flgg_hflx' - call seq_flds_add(g2x_fluxes,trim(name)) - call seq_flds_add(g2x_fluxes_to_lnd,trim(name)) - call metadata_set(attname, longname, stdname, units) - call set_glc_elevclass_field(name, attname, longname, stdname, units, x2l_fluxes) - call set_glc_elevclass_field(name, attname, longname, stdname, units, x2l_fluxes_from_glc, & - additional_list = .true.) - - if (trim(cime_model) == 'e3sm') then - name = 'So_blt' - call seq_flds_add(o2x_states,trim(name)) - call seq_flds_add(x2g_states,trim(name)) - call seq_flds_add(x2g_states_from_ocn,trim(name)) - longname = 'Ice shelf boundary layer ocean temperature' - stdname = 'Ice_shelf_boundary_layer_ocean_temperature' - units = 'C' - attname = 'So_blt' - call metadata_set(attname, longname, stdname, units) - - name = 'So_bls' - call seq_flds_add(o2x_states,trim(name)) - call seq_flds_add(x2g_states,trim(name)) - call seq_flds_add(x2g_states_from_ocn,trim(name)) - longname = 'Ice shelf boundary layer ocean salinity' - stdname = 'Ice_shelf_boundary_layer_ocean_salinity' - units = 'psu' - attname = 'So_bls' - call metadata_set(attname, longname, stdname, units) - - name = 'So_htv' - call seq_flds_add(o2x_states,trim(name)) - call seq_flds_add(x2g_states,trim(name)) - call seq_flds_add(x2g_states_from_ocn,trim(name)) - longname = 'Ice shelf ocean heat transfer velocity' - stdname = 'Ice_shelf_ocean_heat_transfer_velocity' - units = 'm/s' - attname = 'So_htv' - call metadata_set(attname, longname, stdname, units) - - name = 'So_stv' - call seq_flds_add(o2x_states,trim(name)) - call seq_flds_add(x2g_states,trim(name)) - call seq_flds_add(x2g_states_from_ocn,trim(name)) - longname = 'Ice shelf ocean salinity transfer velocity' - stdname = 'Ice_shelf_ocean_salinity_transfer_velocity' - units = 'm/s' - attname = 'So_stv' - call metadata_set(attname, longname, stdname, units) - - name = 'So_rhoeff' - call seq_flds_add(o2x_states,trim(name)) - call seq_flds_add(x2g_states,trim(name)) - call seq_flds_add(x2g_states_from_ocn,trim(name)) - longname = 'Ocean effective pressure' - stdname = 'Ocean_effective_pressure' - units = 'Pa' - attname = 'So_rhoeff' - call metadata_set(attname, longname, stdname, units) - - name = 'Fogx_qicelo' - call seq_flds_add(g2x_fluxes,trim(name)) - call seq_flds_add(x2o_fluxes,trim(name)) - longname = 'Subshelf liquid flux for ocean' - stdname = 'Subshelf_liquid_flux_for_ocean' - units = 'kg m-2 s-1' - attname = 'Fogx_qicelo' - call metadata_set(attname, longname, stdname, units) - - name = 'Fogx_qiceho' - call seq_flds_add(g2x_fluxes,trim(name)) - call seq_flds_add(x2o_fluxes,trim(name)) - longname = 'Subshelf heat flux for the ocean' - stdname = 'Subshelf_heat_flux_for_the_ocean' - units = 'W m-2' - attname = 'Fogx_qiceho' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_blit' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(x2o_states,trim(name)) - longname = 'Boundary layer interface temperature for ocean' - stdname = 'Boundary_layer_interface_temperature_for_ocean' - units = 'C' - attname = 'Sg_blit' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_blis' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(x2o_states,trim(name)) - longname = 'Boundary layer interface salinity for ocean' - stdname = 'Boundary_layer_interface_salinity_for_ocean' - units = 'psu' - attname = 'Sg_blis' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_lithop' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(x2o_states,trim(name)) - longname = 'Ice sheet lithostatic pressure' - stdname = 'Ice_sheet_lithostatic_pressure' - units = 'Pa' - attname = 'Sg_lithop' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_icemask_grounded' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(x2o_states,trim(name)) - longname = 'Grounded ice mask' - stdname = 'Grounded_ice_mask' - units = 'unitless' - attname = 'Sg_icemask_grounded' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_icemask_floating' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(x2o_states,trim(name)) - longname = 'Floating ice mask' - stdname = 'Floating_ice_mask' - units = 'unitless' - attname = 'Sg_icemask_floating' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_tbot' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(x2o_states,trim(name)) - longname = 'Bottom layer ice temperature' - stdname = 'Bottom_layer_ice_temperature' - units = 'C' - attname = 'Sg_tbot' - call metadata_set(attname, longname, stdname, units) - - name = 'Sg_dztbot' - call seq_flds_add(g2x_states,trim(name)) - call seq_flds_add(x2o_states,trim(name)) - longname = 'Bottom layer ice layer half thickness' - stdname = 'Bottom_layer_ice_layer_half_thickness' - units = 'm' - attname = 'Sg_dztbot' - call metadata_set(attname, longname, stdname, units) - - name = 'Fogx_qiceli' - call seq_flds_add(x2g_fluxes,trim(name)) - longname = 'Subshelf mass flux for ice sheet' - stdname = 'Subshelf_mass_flux_for_ice_sheet' - units = 'kg m-2 s-1' - attname = 'Fogx_qiceli' - call metadata_set(attname, longname, stdname, units) - - name = 'Fogx_qicehi' - call seq_flds_add(x2g_fluxes,trim(name)) - longname = 'Subshelf heat flux for ice sheet' - stdname = 'Subshelf_heat_flux_for_ice_sheet' - units = 'W m-2' - attname = 'Fogx_qicehi' - call metadata_set(attname, longname, stdname, units) - endif - - ! Done glc fields - - if (flds_co2a) then - - call seq_flds_add(a2x_states, "Sa_co2prog") - call seq_flds_add(x2l_states, "Sa_co2prog") - call seq_flds_add(x2o_states, "Sa_co2prog") - longname = 'Prognostic CO2 at the lowest model level' - stdname = '' - units = '1e-6 mol/mol' - attname = 'Sa_co2prog' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(a2x_states, "Sa_co2diag") - call seq_flds_add(x2l_states, "Sa_co2diag") - call seq_flds_add(x2o_states, "Sa_co2diag") - longname = 'Diagnostic CO2 at the lowest model level' - stdname = '' - units = '1e-6 mol/mol' - attname = 'Sa_co2diag' - call metadata_set(attname, longname, stdname, units) - - else if (flds_co2b) then - - call seq_flds_add(a2x_states, "Sa_co2prog") - call seq_flds_add(x2l_states, "Sa_co2prog") - longname = 'Prognostic CO2 at the lowest model level' - stdname = '' - units = '1e-6 mol/mol' - attname = 'Sa_co2prog' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(a2x_states, "Sa_co2diag") - call seq_flds_add(x2l_states, "Sa_co2diag") - longname = 'Diagnostic CO2 at the lowest model level' - stdname = '' - units = '1e-6 mol/mol' - attname = 'Sa_co2diag' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(l2x_fluxes, "Fall_fco2_lnd") - call seq_flds_add(x2a_fluxes, "Fall_fco2_lnd") - longname = 'Surface flux of CO2 from land' - stdname = 'surface_upward_flux_of_carbon_dioxide_where_land' - units = 'moles m-2 s-1' - attname = 'Fall_fco2_lnd' - call metadata_set(attname, longname, stdname, units) - - else if (flds_co2c) then - - call seq_flds_add(a2x_states, "Sa_co2prog") - call seq_flds_add(x2l_states, "Sa_co2prog") - call seq_flds_add(x2o_states, "Sa_co2prog") - longname = 'Prognostic CO2 at the lowest model level' - stdname = '' - units = '1e-6 mol/mol' - attname = 'Sa_co2prog' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(a2x_states, "Sa_co2diag") - call seq_flds_add(x2l_states, "Sa_co2diag") - call seq_flds_add(x2o_states, "Sa_co2diag") - longname = 'Diagnostic CO2 at the lowest model level' - stdname = '' - units = '1e-6 mol/mol' - attname = 'Sa_co2diag' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(l2x_fluxes, "Fall_fco2_lnd") - call seq_flds_add(x2a_fluxes, "Fall_fco2_lnd") - longname = 'Surface flux of CO2 from land' - stdname = 'surface_upward_flux_of_carbon_dioxide_where_land' - units = 'moles m-2 s-1' - attname = 'Fall_foc2_lnd' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(o2x_fluxes, "Faoo_fco2_ocn") - call seq_flds_add(x2a_fluxes, "Faoo_fco2_ocn") - longname = 'Surface flux of CO2 from ocean' - stdname = 'surface_upward_flux_of_carbon_dioxide_where_open_sea' - units = 'moles m-2 s-1' - attname = 'Faoo_fco2_ocn' - call metadata_set(attname, longname, stdname, units) - - else if (flds_co2_dmsa) then - - call seq_flds_add(a2x_states, "Sa_co2prog") - call seq_flds_add(x2l_states, "Sa_co2prog") - longname = 'Prognostic CO2 at the lowest model level' - stdname = '' - units = '1e-6 mol/mol' - attname = 'Sa_co2prog' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(a2x_states, "Sa_co2diag") - call seq_flds_add(x2l_states, "Sa_co2diag") - longname = 'Diagnostic CO2 at the lowest model level' - stdname = '' - units = '1e-6 mol/mol' - attname = 'Sa_co2diag' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(o2x_fluxes, "Faoo_fdms_ocn") - call seq_flds_add(x2a_fluxes, "Faoo_fdms_ocn") - longname = 'Surface flux of DMS' - stdname = 'surface_upward_flux_of_dimethyl_sulfide' - units = 'moles m-2 s-1' - attname = 'Faoo_fdms' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(l2x_fluxes, "Fall_fco2_lnd") - call seq_flds_add(x2a_fluxes, "Fall_fco2_lnd") - longname = 'Surface flux of CO2 from land' - stdname = 'surface_upward_flux_of_carbon_dioxide_where_land' - units = 'moles m-2 s-1' - attname = 'Fall_foc2_lnd' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(o2x_fluxes, "Faoo_fco2_ocn") - call seq_flds_add(x2a_fluxes, "Faoo_fco2_ocn") - longname = 'Surface flux of CO2 from ocean' - stdname = 'surface_upward_flux_of_carbon_dioxide_where_open_sea' - units = 'moles m-2 s-1' - attname = 'Faoo_fco2_ocn' - call metadata_set(attname, longname, stdname, units) - - endif - - if (flds_wiso) then - call seq_flds_add(o2x_states, "So_roce_16O") - call seq_flds_add(x2i_states, "So_roce_16O") - longname = 'Ratio of ocean surface level abund. H2_16O/H2O/Rstd' - stdname = '' - units = ' ' - attname = 'So_roce_16O' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(o2x_states, "So_roce_18O") - call seq_flds_add(x2i_states, "So_roce_18O") - longname = 'Ratio of ocean surface level abund. H2_18O/H2O/Rstd' - attname = 'So_roce_18O' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(o2x_states, "So_roce_HDO") - call seq_flds_add(x2i_states, "So_roce_HDO") - longname = 'Ratio of ocean surface level abund. HDO/H2O/Rstd' - attname = 'So_roce_HDO' - call metadata_set(attname, longname, stdname, units) - - !-------------------------------------------- - !Atmospheric specific humidty at lowest level: - !-------------------------------------------- - - ! specific humidity of H216O at the lowest model level (kg/kg) - call seq_flds_add(a2x_states,"Sa_shum_16O") - call seq_flds_add(x2l_states,"Sa_shum_16O") - call seq_flds_add(x2i_states,"Sa_shum_16O") - longname = 'Specific humidty of H216O at the lowest model level' - stdname = 'H216OV' - units = 'kg kg-1' - attname = 'Sa_shum_16O' - call metadata_set(attname, longname, stdname, units) - - ! specific humidity of HD16O at the lowest model level (kg/kg) - call seq_flds_add(a2x_states,"Sa_shum_HDO") - call seq_flds_add(x2l_states,"Sa_shum_HDO") - call seq_flds_add(x2i_states,"Sa_shum_HDO") - longname = 'Specific humidty of HD16O at the lowest model level' - stdname = 'HD16OV' - attname = 'Sa_shum_HDO' - call metadata_set(attname, longname, stdname, units) - - ! specific humidity of H218O at the lowest model level (kg/kg) - call seq_flds_add(a2x_states,"Sa_shum_18O") - call seq_flds_add(x2l_states,"Sa_shum_18O") - call seq_flds_add(x2i_states,"Sa_shum_18O") - longname = 'Specific humidty of H218O at the lowest model level' - stdname = 'H218OV' - attname = 'Sa_shum_18O' - call metadata_set(attname, longname, stdname, units) - - ! Surface snow water equivalent (land/atm only) - call seq_flds_add(l2x_states,"Sl_snowh_16O") - call seq_flds_add(l2x_states,"Sl_snowh_18O") - call seq_flds_add(l2x_states,"Sl_snowh_HDO") - call seq_flds_add(x2a_states,"Sl_snowh_16O") - call seq_flds_add(x2a_states,"Sl_snowh_18O") - call seq_flds_add(x2a_states,"Sl_snowh_HDO") - longname = 'Isotopic surface snow water equivalent' - stdname = 'surface_snow_water_equivalent' - units = 'm' - attname = 'Sl_snowh_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_snowh_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_snowh_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_snowh_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_snowh_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_snowh_HDO' - call metadata_set(attname, longname, stdname, units) - - !-------------- - !Isotopic Rain: - !-------------- - - !Isotopic Precipitation Fluxes: - units = 'kg m-2 s-1' - call seq_flds_add(a2x_fluxes,"Faxa_rainc_16O") - call seq_flds_add(a2x_fluxes,"Faxa_rainl_16O") - call seq_flds_add(x2o_fluxes, "Faxa_rain_16O") - call seq_flds_add(x2l_fluxes,"Faxa_rainc_16O") - call seq_flds_add(x2l_fluxes,"Faxa_rainl_16O") - call seq_flds_add(x2i_fluxes, "Faxa_rain_16O") - longname = 'Water flux due to H216O rain' !equiv. to bulk - stdname = 'H2_16O_rainfall_flux' - attname = 'Faxa_rain_16O' - call metadata_set(attname, longname, stdname, units) - longname = 'H216O Convective precipitation rate' - stdname = 'H2_16O_convective_precipitation_flux' - attname = 'Faxa_rainc_16O' - call metadata_set(attname, longname, stdname, units) - longname = 'H216O Large-scale (stable) precipitation rate' - stdname = 'H2_16O_large_scale_precipitation_flux' - attname = 'Faxa_rainl_16O' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(a2x_fluxes,"Faxa_rainc_18O") - call seq_flds_add(a2x_fluxes,"Faxa_rainl_18O") - call seq_flds_add(x2o_fluxes, "Faxa_rain_18O") - call seq_flds_add(x2l_fluxes,"Faxa_rainc_18O") - call seq_flds_add(x2l_fluxes,"Faxa_rainl_18O") - call seq_flds_add(x2i_fluxes, "Faxa_rain_18O") - longname = 'Water flux due to H218O rain' - stdname = 'h2_18o_rainfall_flux' - attname = 'Faxa_rain_18O' - call metadata_set(attname, longname, stdname, units) - longname = 'H218O Convective precipitation rate' - stdname = 'H2_18O_convective_precipitation_flux' - attname = 'Faxa_rainc_18O' - call metadata_set(attname, longname, stdname, units) - longname = 'H218O Large-scale (stable) precipitation rate' - stdname = 'H2_18O_large_scale_precipitation_flux' - attname = 'Faxa_rainl_18O' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(a2x_fluxes,"Faxa_rainc_HDO") - call seq_flds_add(a2x_fluxes,"Faxa_rainl_HDO") - call seq_flds_add(x2o_fluxes, "Faxa_rain_HDO") - call seq_flds_add(x2l_fluxes,"Faxa_rainc_HDO") - call seq_flds_add(x2l_fluxes,"Faxa_rainl_HDO") - call seq_flds_add(x2i_fluxes, "Faxa_rain_HDO") - longname = 'Water flux due to HDO rain' - stdname = 'hdo_rainfall_flux' - attname = 'Faxa_rain_HDO' - call metadata_set(attname, longname, stdname, units) - longname = 'HDO Convective precipitation rate' - stdname = 'HDO_convective_precipitation_flux' - attname = 'Faxa_rainc_HDO' - call metadata_set(attname, longname, stdname, units) - longname = 'HDO Large-scale (stable) precipitation rate' - stdname = 'HDO_large_scale_precipitation_flux' - attname = 'Faxa_rainl_HDO' - call metadata_set(attname, longname, stdname, units) - - !------------- - !Isotopic snow: - !------------- - - call seq_flds_add(a2x_fluxes,"Faxa_snowc_16O") - call seq_flds_add(a2x_fluxes,"Faxa_snowl_16O") - call seq_flds_add(x2o_fluxes, "Faxa_snow_16O") - call seq_flds_add(x2l_fluxes,"Faxa_snowc_16O") - call seq_flds_add(x2l_fluxes,"Faxa_snowl_16O") - call seq_flds_add(x2i_fluxes, "Faxa_snow_16O") - longname = 'Water equiv. H216O snow flux' - stdname = 'h2_16o_snowfall_flux' - attname = 'Faxa_snow_16O' - call metadata_set(attname, longname, stdname, units) - longname = 'H2_16O Convective snow rate (water equivalent)' - stdname = 'H2_16O_convective_snowfall_flux' - attname = 'Faxa_snowc_16O' - call metadata_set(attname, longname, stdname, units) - longname = 'H2_16O Large-scale (stable) snow rate (water equivalent)' - stdname = 'H2_16O_large_scale_snowfall_flux' - attname = 'Faxa_snowl_16O' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(a2x_fluxes,"Faxa_snowc_18O") - call seq_flds_add(a2x_fluxes,"Faxa_snowl_18O") - call seq_flds_add(x2o_fluxes, "Faxa_snow_18O") - call seq_flds_add(x2l_fluxes,"Faxa_snowc_18O") - call seq_flds_add(x2l_fluxes,"Faxa_snowl_18O") - call seq_flds_add(x2i_fluxes, "Faxa_snow_18O") - longname = 'Isotopic water equiv. snow flux of H218O' - stdname = 'h2_18o_snowfall_flux' - attname = 'Faxa_snow_18O' - call metadata_set(attname, longname, stdname, units) - longname = 'H2_18O Convective snow rate (water equivalent)' - stdname = 'H2_18O_convective_snowfall_flux' - attname = 'Faxa_snowc_18O' - call metadata_set(attname, longname, stdname, units) - longname = 'H2_18O Large-scale (stable) snow rate (water equivalent)' - stdname = 'H2_18O_large_scale_snowfall_flux' - attname = 'Faxa_snowl_18O' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(a2x_fluxes,"Faxa_snowc_HDO") - call seq_flds_add(a2x_fluxes,"Faxa_snowl_HDO") - call seq_flds_add(x2o_fluxes, "Faxa_snow_HDO") - call seq_flds_add(x2l_fluxes,"Faxa_snowc_HDO") - call seq_flds_add(x2l_fluxes,"Faxa_snowl_HDO") - call seq_flds_add(x2i_fluxes, "Faxa_snow_HDO") - longname = 'Isotopic water equiv. snow flux of HDO' - stdname = 'hdo_snowfall_flux' - attname = 'Faxa_snow_HDO' - call metadata_set(attname, longname, stdname, units) - longname = 'HDO Convective snow rate (water equivalent)' - stdname = 'HDO_convective_snowfall_flux' - attname = 'Faxa_snowc_HDO' - call metadata_set(attname, longname, stdname, units) - longname = 'HDO Large-scale (stable) snow rate (water equivalent)' - stdname = 'HDO_large_scale_snowfall_flux' - attname = 'Faxa_snowl_HDO' - call metadata_set(attname, longname, stdname, units) - - !---------------------------------- - !Isotopic precipitation (rain+snow): - !---------------------------------- - - call seq_flds_add(x2o_fluxes,"Faxa_prec_16O") ! derived rain+snow - longname = 'Isotopic Water flux (rain+snow) for H2_16O' - stdname = 'h2_18o_precipitation_flux' - attname = 'Faxa_prec_16O' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(x2o_fluxes,"Faxa_prec_18O") ! derived rain+snow - longname = 'Isotopic Water flux (rain+snow) for H2_18O' - stdname = 'h2_18o_precipitation_flux' - units = 'kg m-2 s-1' - attname = 'Faxa_prec_18O' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(x2o_fluxes,"Faxa_prec_HDO") ! derived rain+snow - longname = 'Isotopic Water flux (rain+snow) for HD_O' - stdname = 'hdo_precipitation_flux' - units = 'kg m-2 s-1' - attname = 'Faxa_prec_HDO' - call metadata_set(attname, longname, stdname, units) - - !------------------------------------- - !Isotopic two meter reference humidity: - !------------------------------------- - - ! H216O Reference specific humidity at 2 meters - call seq_flds_add(l2x_states,"Sl_qref_16O") - call seq_flds_add(i2x_states,"Si_qref_16O") - call seq_flds_add(xao_states,"So_qref_16O") - call seq_flds_add(x2a_states,"Sx_qref_16O") - longname = 'Reference H216O specific humidity at 2 meters' - stdname = 'H216O_specific_humidity' - units = 'kg kg-1' - attname = 'Si_qref_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_qref_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'So_qref_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_qref_16O' - call metadata_set(attname, longname, stdname, units) - - ! HD16O Reference specific humidity at 2 meters - call seq_flds_add(l2x_states,"Sl_qref_HDO") - call seq_flds_add(i2x_states,"Si_qref_HDO") - call seq_flds_add(xao_states,"So_qref_HDO") - call seq_flds_add(x2a_states,"Sx_qref_HDO") - longname = 'Reference HD16O specific humidity at 2 meters' - stdname = 'HD16O_specific_humidity' - units = 'kg kg-1' - attname = 'Si_qref_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_qref_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'So_qref_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_qref_HDO' - call metadata_set(attname, longname, stdname, units) - - ! H218O Reference specific humidity at 2 meters - call seq_flds_add(l2x_states,"Sl_qref_18O") - call seq_flds_add(i2x_states,"Si_qref_18O") - call seq_flds_add(xao_states,"So_qref_18O") - call seq_flds_add(x2a_states,"Sx_qref_18O") - longname = 'Reference H218O specific humidity at 2 meters' - stdname = 'H218O_specific_humidity' - units = 'kg kg-1' - attname = 'Si_qref_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Sl_qref_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'So_qref_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Sx_qref_18O' - call metadata_set(attname, longname, stdname, units) - - !------------------------- - !Isotopic Evaporation flux: - !------------------------- - - ! H216O Evaporation water flux - call seq_flds_add(l2x_fluxes,"Fall_evap_16O") - call seq_flds_add(i2x_fluxes,"Faii_evap_16O") - call seq_flds_add(xao_fluxes,"Faox_evap_16O") - call seq_flds_add(x2a_fluxes,"Faxx_evap_16O") - call seq_flds_add(x2o_fluxes,"Foxx_evap_16O") - longname = 'Evaporation H216O flux' - stdname = 'H216O_evaporation_flux' - units = 'kg m-2 s-1' - attname = 'Fall_evap_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_evap_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_evap_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_evap_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_evap_16O' - call metadata_set(attname, longname, stdname, units) - - ! HD16O Evaporation water flux - call seq_flds_add(l2x_fluxes,"Fall_evap_HDO") - call seq_flds_add(i2x_fluxes,"Faii_evap_HDO") - call seq_flds_add(xao_fluxes,"Faox_evap_HDO") - call seq_flds_add(x2a_fluxes,"Faxx_evap_HDO") - call seq_flds_add(x2o_fluxes,"Foxx_evap_HDO") - longname = 'Evaporation HD16O flux' - stdname = 'HD16O_evaporation_flux' - units = 'kg m-2 s-1' - attname = 'Fall_evap_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_evap_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_evap_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_evap_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_evap_HDO' - call metadata_set(attname, longname, stdname, units) - - ! H218O Evaporation water flux - call seq_flds_add(l2x_fluxes,"Fall_evap_18O") - call seq_flds_add(i2x_fluxes,"Faii_evap_18O") - call seq_flds_add(xao_fluxes,"Faox_evap_18O") - call seq_flds_add(x2a_fluxes,"Faxx_evap_18O") - call seq_flds_add(x2o_fluxes,"Foxx_evap_18O") - longname = 'Evaporation H218O flux' - stdname = 'H218O_evaporation_flux' - units = 'kg m-2 s-1' - attname = 'Fall_evap_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Faii_evap_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Faox_evap_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Faxx_evap_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_evap_18O' - call metadata_set(attname, longname, stdname, units) - - !----------------------------- - !Isotopic sea ice melting flux: - !----------------------------- - - ! H216O Water flux from melting - units = 'kg m-2 s-1' - call seq_flds_add(i2x_fluxes,"Fioi_meltw_16O") - call seq_flds_add(x2o_fluxes,"Fioi_meltw_16O") - longname = 'H2_16O flux due to melting' - stdname = 'h2_16o_surface_snow_melt_flux' - attname = 'Fioi_meltw_16O' - call metadata_set(attname, longname, stdname, units) - - ! H218O Water flux from melting - call seq_flds_add(i2x_fluxes,"Fioi_meltw_18O") - call seq_flds_add(x2o_fluxes,"Fioi_meltw_18O") - longname = 'H2_18O flux due to melting' - stdname = 'h2_18o_surface_snow_melt_flux' - attname = 'Fioi_meltw_18O' - call metadata_set(attname, longname, stdname, units) - - ! HDO Water flux from melting - units = 'kg m-2 s-1' - call seq_flds_add(i2x_fluxes,"Fioi_meltw_HDO") - call seq_flds_add(x2o_fluxes,"Fioi_meltw_HDO") - longname = 'HDO flux due to melting' - stdname = 'hdo_surface_snow_melt_flux' - attname = 'Fioi_meltw_HDO' - call metadata_set(attname, longname, stdname, units) - - !Iso-Runoff - ! r2o, l2x, x2r - - units = 'kg m-2 s-1' - call seq_flds_add(l2x_fluxes,'Flrl_rofi_16O') - call seq_flds_add(x2r_fluxes,'Flrl_rofi_16O') - longname = 'H2_16O Water flux from land (frozen)' - stdname = 'H2_16O_frozen_water_flux_into_runoff' - attname = 'Flrl_rofi_16O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(l2x_fluxes,'Flrl_rofi_18O') - call seq_flds_add(x2r_fluxes,'Flrl_rofi_18O') - longname = 'H2_18O Water flux from land (frozen)' - stdname = 'H2_18O_frozen_water_flux_into_runoff' - attname = 'Flrl_rofi_18O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(l2x_fluxes,'Flrl_rofi_HDO') - call seq_flds_add(x2r_fluxes,'Flrl_rofi_HDO') - longname = 'HDO Water flux from land (frozen)' - stdname = 'HDO_frozen_water_flux_into_runoff' - attname = 'Flrl_rofi_HDO' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(l2x_fluxes,'Flrl_rofl_16O') - call seq_flds_add(x2r_fluxes,'Flrl_rofl_16O') - longname = 'H2_16O Water flux from land (liquid)' - stdname = 'H2_16O_liquid_water_flux_into_runoff' - attname = 'Flrl_rofl_16O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(l2x_fluxes,'Flrl_rofl_18O') - call seq_flds_add(x2r_fluxes,'Flrl_rofl_18O') - longname = 'H2_18O Water flux from land (liquid)' - stdname = 'H2_18O_liquid_water_flux_into_runoff' - attname = 'Flrl_rofl_18O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(l2x_fluxes,'Flrl_rofl_HDO') - call seq_flds_add(x2r_fluxes,'Flrl_rofl_HDO') - longname = 'HDO Water flux from land (liquid)' - stdname = 'HDO_liquid_water_flux_into_runoff' - attname = 'Flrl_rofl_HDO' - call metadata_set(attname, longname, stdname, units) - - ! r2x, x2o - call seq_flds_add(r2x_fluxes,'Forr_rofl_16O') - call seq_flds_add(x2o_fluxes,'Foxx_rofl_16O') - call seq_flds_add(r2o_liq_fluxes,'Forr_rofl_16O') - longname = 'H2_16O Water flux due to liq runoff ' - stdname = 'H2_16O_water_flux_into_sea_water' - attname = 'Forr_rofl_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_rofl_16O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(r2x_fluxes,'Forr_rofl_18O') - call seq_flds_add(x2o_fluxes,'Foxx_rofl_18O') - call seq_flds_add(r2o_liq_fluxes,'Forr_rofl_18O') - longname = 'H2_18O Water flux due to liq runoff ' - stdname = 'H2_18O_water_flux_into_sea_water' - attname = 'Forr_rofl_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_rofl_18O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(r2x_fluxes,'Forr_rofl_HDO') - call seq_flds_add(x2o_fluxes,'Foxx_rofl_HDO') - call seq_flds_add(r2o_liq_fluxes,'Forr_rofl_HDO') - longname = 'HDO Water flux due to liq runoff ' - stdname = 'HDO_water_flux_into_sea_water' - attname = 'Forr_rofl_HDO' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_rofl_HDO' - call metadata_set(attname, longname, stdname, units) - - call seq_flds_add(r2x_fluxes,'Forr_rofi_16O') - call seq_flds_add(x2o_fluxes,'Foxx_rofi_16O') - call seq_flds_add(r2o_ice_fluxes,'Forr_rofi_16O') - longname = 'H2_16O Water flux due to ice runoff ' - stdname = 'H2_16O_water_flux_into_sea_water' - attname = 'Forr_rofi_16O' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_rofi_16O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(r2x_fluxes,'Forr_rofi_18O') - call seq_flds_add(x2o_fluxes,'Foxx_rofi_18O') - call seq_flds_add(r2o_ice_fluxes,'Forr_rofi_18O') - longname = 'H2_18O Water flux due to ice runoff ' - stdname = 'H2_18O_water_flux_into_sea_water' - attname = 'Forr_rofi_18O' - call metadata_set(attname, longname, stdname, units) - attname = 'Foxx_rofi_18O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(r2x_fluxes,'Forr_rofi_HDO') - call seq_flds_add(x2o_fluxes,'Foxx_rofi_HDO') - call seq_flds_add(r2o_ice_fluxes,'Forr_rofi_HDO') - longname = 'HDO Water flux due to ice runoff ' - stdname = 'HDO_water_flux_into_sea_water' - attname = 'Forr_rofi_HDO' - call metadata_set(attname, longname, stdname, units) - - ! r2x, x2l - call seq_flds_add(r2x_fluxes,'Flrr_flood_16O') - call seq_flds_add(x2l_fluxes,'Flrr_flood_16O') - longname = 'H2_16O waterrflux due to flooding' - stdname = 'H2_16O_flodding_water_flux_back_to_land' - attname = 'Flrr_flood_16O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(r2x_fluxes,'Flrr_flood_18O') - call seq_flds_add(x2l_fluxes,'Flrr_flood_18O') - longname = 'H2_18O waterrflux due to flooding' - stdname = 'H2_18O_flodding_water_flux_back_to_land' - attname = 'Flrr_flood_18O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(r2x_fluxes,'Flrr_flood_HDO') - call seq_flds_add(x2l_fluxes,'Flrr_flood_HDO') - longname = 'HDO Waterrflux due to flooding' - stdname = 'HDO_flodding_water_flux_back_to_land' - attname = 'Flrr_flood_HDO' - call metadata_set(attname, longname, stdname, units) - - units = 'm3' - call seq_flds_add(r2x_states,'Flrr_volr_16O') - call seq_flds_add(x2l_states,'Flrr_volr_16O') - longname = 'H2_16O river channel water volume ' - stdname = 'H2_16O_rtm_volr' - attname = 'Flrr_volr_16O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(r2x_states,'Flrr_volr_18O') - call seq_flds_add(x2l_states,'Flrr_volr_18O') - longname = 'H2_18O river channel water volume ' - stdname = 'H2_18O_rtm_volr' - attname = 'Flrr_volr_18O' - call metadata_set(attname, longname, stdname, units) - call seq_flds_add(r2x_states,'Flrr_volr_HDO') - call seq_flds_add(x2l_states,'Flrr_volr_HDO') - longname = 'HDO river channel water volume ' - stdname = 'HDO_rtm_volr' - attname = 'Flrr_volr_HDO' - call metadata_set(attname, longname, stdname, units) - - ! call seq_flds_add(r2x_fluxes,'Flrr_flood_HDO') - ! call seq_flds_add(x2l_fluxes,'Flrr_flood_HDO') - ! longname = 'H2_18O Waterrflux due to flooding' - ! stdname = 'H2_18O_flodding_water_flux_back_to_land' - ! attname = 'Flrr_flood_18O' - ! call metadata_set(attname, longname, stdname, units) - - !----------------------------- - - endif !Water isotopes - - !----------------------------------------------------------------------------- - ! optional per thickness category fields - !----------------------------------------------------------------------------- - - if (seq_flds_i2o_per_cat) then - do num = 1, ice_ncat - write(cnum,'(i2.2)') num - - ! Fractional ice coverage wrt ocean - - name = 'Si_ifrac_' // cnum - call seq_flds_add(i2x_states,name) - call seq_flds_add(x2o_states,name) - longname = 'fractional ice coverage wrt ocean for thickness category ' // cnum - stdname = 'sea_ice_area_fraction' - units = '1' - attname = name - call metadata_set(attname, longname, stdname, units) - - ! Net shortwave radiation - - name = 'PFioi_swpen_ifrac_' // cnum - call seq_flds_add(i2x_fluxes,name) - call seq_flds_add(x2o_fluxes,name) - longname = 'net shortwave radiation penetrating into ice and ocean times ice fraction for thickness category ' // cnum - stdname = 'product_of_net_downward_shortwave_flux_at_sea_water_surface_and_sea_ice_area_fraction' - units = 'W m-2' - attname = name - call metadata_set(attname, longname, stdname, units) - - end do - - ! Fractional atmosphere coverage wrt ocean - - name = 'Sf_afrac' - call seq_flds_add(x2o_states,name) - longname = 'fractional atmosphere coverage wrt ocean' - stdname = 'atmosphere_area_fraction' - units = '1' - attname = name - call metadata_set(attname, longname, stdname, units) - - name = 'Sf_afracr' - call seq_flds_add(x2o_states,name) - longname = 'fractional atmosphere coverage used in radiation computations wrt ocean' - stdname = 'atmosphere_area_fraction' - units = '1' - attname = name - call metadata_set(attname, longname, stdname, units) - - ! Net shortwave radiation - - name = 'Foxx_swnet_afracr' - call seq_flds_add(x2o_fluxes,name) - longname = 'net shortwave radiation times atmosphere fraction' - stdname = 'product_of_net_downward_shortwave_flux_at_sea_water_surface_and_atmosphere_area_fraction' - units = 'W m-2' - attname = name - call metadata_set(attname, longname, stdname, units) - endif - - !----------------------------------------------------------------------------- - ! Read namelist for CARMA - ! if carma_flds are specified then setup fields for CLM to CAM communication - !----------------------------------------------------------------------------- - - call shr_carma_readnl(nlfilename='drv_flds_in', carma_fields=carma_fields) - if (carma_fields /= ' ') then - call seq_flds_add(l2x_fluxes, trim(carma_fields)) - call seq_flds_add(x2a_fluxes, trim(carma_fields)) - longname = 'Volumetric soil water' - stdname = 'soil_water' - units = 'm3/m3' - call metadata_set(carma_fields, longname, stdname, units) - endif - - !----------------------------------------------------------------------------- - ! Read namelist for MEGAN - ! if MEGAN emission are specified then setup fields for CLM to CAM communication - ! (emissions fluxes) - !----------------------------------------------------------------------------- - - call shr_megan_readnl(nlfilename='drv_flds_in', ID=ID, megan_fields=megan_voc_fields) - if (shr_megan_mechcomps_n>0) then - call seq_flds_add(l2x_fluxes, trim(megan_voc_fields)) - call seq_flds_add(x2a_fluxes, trim(megan_voc_fields)) - longname = 'MEGAN emission fluxes' - stdname = 'megan_fluxes' - units = 'molecules/m2/sec' - call metadata_set(megan_voc_fields, longname, stdname, units) - endif - - !----------------------------------------------------------------------------- - ! Read namelist for Fire Emissions - ! if fire emission are specified then setup fields for CLM to CAM communication - ! (emissions fluxes) - !----------------------------------------------------------------------------- - - call shr_fire_emis_readnl(nlfilename='drv_flds_in', ID=ID, emis_fields=fire_emis_fields) - if (shr_fire_emis_mechcomps_n>0) then - call seq_flds_add(l2x_fluxes, trim(fire_emis_fields)) - call seq_flds_add(x2a_fluxes, trim(fire_emis_fields)) - longname = 'wild fire emission fluxes' - stdname = 'fire_emis' - units = 'kg/m2/sec' - call metadata_set(fire_emis_fields, longname, stdname, units) - - call seq_flds_add(l2x_states, trim(shr_fire_emis_ztop_token)) - call seq_flds_add(x2a_states, trim(shr_fire_emis_ztop_token)) - longname = 'wild fire plume height' - stdname = 'fire_plume_top' - units = 'm' - - call metadata_set(shr_fire_emis_ztop_token, longname, stdname, units) - endif - - !----------------------------------------------------------------------------- - ! Dry Deposition fields - ! First read namelist and figure out the drydep field list to pass - ! Then check if file exists and if not, n_drydep will be zero - ! Then add dry deposition fields to land export and atmosphere import states - ! Then initialize dry deposition fields - ! Note: CAM and CLM will then call seq_drydep_setHCoeff - !----------------------------------------------------------------------------- - - call seq_drydep_readnl(nlfilename="drv_flds_in", ID=ID, seq_drydep_fields=seq_drydep_fields) - if ( lnd_drydep ) then - call seq_flds_add(l2x_states, seq_drydep_fields) - call seq_flds_add(x2a_states, seq_drydep_fields) - - longname = 'dry deposition velocity' - stdname = 'drydep_vel' - units = 'cm/sec' - - call metadata_set(seq_drydep_fields, longname, stdname, units) - endif - call seq_drydep_init( ) - - !----------------------------------------------------------------------------- - ! Nitrogen Deposition fields - ! First read namelist and figure out the ndepdep field list to pass - ! Then check if file exists and if not, n_drydep will be zero - ! Then add nitrogen deposition fields to atm export, lnd import and ocn import - !----------------------------------------------------------------------------- - - call shr_ndep_readnl(nlfilename="drv_flds_in", ID=ID, ndep_fields=ndep_fields, add_ndep_fields=add_ndep_fields) - if (add_ndep_fields) then - call seq_flds_add(a2x_fluxes, ndep_fields) - call seq_flds_add(x2l_fluxes, ndep_fields) - call seq_flds_add(x2o_fluxes, ndep_fields) - - longname = 'nitrogen deposition flux' - stdname = 'nitrogen_deposition' - units = 'kg(N)/m2/sec' - - call metadata_set(ndep_fields, longname, stdname, units) - end if - - !---------------------------------------------------------------------------- - ! state + flux fields - !---------------------------------------------------------------------------- - - seq_flds_dom_coord = trim(dom_coord ) - seq_flds_a2x_states = trim(a2x_states) - seq_flds_x2a_states = trim(x2a_states) - seq_flds_i2x_states = trim(i2x_states) - seq_flds_x2i_states = trim(x2i_states) - seq_flds_l2x_states = trim(l2x_states) - seq_flds_l2x_states_to_glc = trim(l2x_states_to_glc) - seq_flds_x2l_states = trim(x2l_states) - seq_flds_x2l_states_from_glc = trim(x2l_states_from_glc) - seq_flds_o2x_states = trim(o2x_states) - seq_flds_x2o_states = trim(x2o_states) - seq_flds_g2x_states = trim(g2x_states) - seq_flds_g2x_states_to_lnd = trim(g2x_states_to_lnd) - seq_flds_x2g_states = trim(x2g_states) - seq_flds_x2g_states_from_lnd = trim(x2g_states_from_lnd) - seq_flds_x2g_states_from_ocn = trim(x2g_states_from_ocn) - seq_flds_xao_states = trim(xao_states) - seq_flds_xao_albedo = trim(xao_albedo) - seq_flds_xao_diurnl = trim(xao_diurnl) - seq_flds_r2x_states = trim(r2x_states) - seq_flds_x2r_states = trim(x2r_states) - seq_flds_w2x_states = trim(w2x_states) - seq_flds_x2w_states = trim(x2w_states) - - seq_flds_dom_other = trim(dom_other ) - seq_flds_a2x_fluxes = trim(a2x_fluxes) - seq_flds_x2a_fluxes = trim(x2a_fluxes) - seq_flds_i2x_fluxes = trim(i2x_fluxes) - seq_flds_x2i_fluxes = trim(x2i_fluxes) - seq_flds_l2x_fluxes = trim(l2x_fluxes) - seq_flds_l2x_fluxes_to_glc = trim(l2x_fluxes_to_glc) - seq_flds_x2l_fluxes = trim(x2l_fluxes) - seq_flds_x2l_fluxes_from_glc = trim(x2l_fluxes_from_glc) - seq_flds_o2x_fluxes = trim(o2x_fluxes) - seq_flds_x2o_fluxes = trim(x2o_fluxes) - seq_flds_g2x_fluxes = trim(g2x_fluxes) - seq_flds_g2x_fluxes_to_lnd = trim(g2x_fluxes_to_lnd) - seq_flds_g2o_liq_fluxes = trim(g2o_liq_fluxes) - seq_flds_g2o_ice_fluxes = trim(g2o_ice_fluxes) - seq_flds_x2g_fluxes = trim(x2g_fluxes) - seq_flds_x2g_fluxes_from_lnd = trim(x2g_fluxes_from_lnd) - seq_flds_xao_fluxes = trim(xao_fluxes) - seq_flds_r2x_fluxes = trim(r2x_fluxes) - seq_flds_x2r_fluxes = trim(x2r_fluxes) - seq_flds_w2x_fluxes = trim(w2x_fluxes) - seq_flds_x2w_fluxes = trim(x2w_fluxes) - seq_flds_r2o_liq_fluxes = trim(r2o_liq_fluxes) - seq_flds_r2o_ice_fluxes = trim(r2o_ice_fluxes) - - if (seq_comm_iamroot(ID)) then - write(logunit,*) subname//': seq_flds_a2x_states= ',trim(seq_flds_a2x_states) - write(logunit,*) subname//': seq_flds_a2x_fluxes= ',trim(seq_flds_a2x_fluxes) - write(logunit,*) subname//': seq_flds_x2a_states= ',trim(seq_flds_x2a_states) - write(logunit,*) subname//': seq_flds_x2a_fluxes= ',trim(seq_flds_x2a_fluxes) - write(logunit,*) subname//': seq_flds_l2x_states= ',trim(seq_flds_l2x_states) - write(logunit,*) subname//': seq_flds_l2x_fluxes= ',trim(seq_flds_l2x_fluxes) - write(logunit,*) subname//': seq_flds_x2l_states= ',trim(seq_flds_x2l_states) - write(logunit,*) subname//': seq_flds_x2l_fluxes= ',trim(seq_flds_x2l_fluxes) - write(logunit,*) subname//': seq_flds_i2x_states= ',trim(seq_flds_i2x_states) - write(logunit,*) subname//': seq_flds_i2x_fluxes= ',trim(seq_flds_i2x_fluxes) - write(logunit,*) subname//': seq_flds_x2i_states= ',trim(seq_flds_x2i_states) - write(logunit,*) subname//': seq_flds_x2i_fluxes= ',trim(seq_flds_x2i_fluxes) - write(logunit,*) subname//': seq_flds_o2x_states= ',trim(seq_flds_o2x_states) - write(logunit,*) subname//': seq_flds_o2x_fluxes= ',trim(seq_flds_o2x_fluxes) - write(logunit,*) subname//': seq_flds_x2o_states= ',trim(seq_flds_x2o_states) - write(logunit,*) subname//': seq_flds_x2o_fluxes= ',trim(seq_flds_x2o_fluxes) - write(logunit,*) subname//': seq_flds_g2x_states= ',trim(seq_flds_g2x_states) - write(logunit,*) subname//': seq_flds_g2x_fluxes= ',trim(seq_flds_g2x_fluxes) - write(logunit,*) subname//': seq_flds_x2g_states= ',trim(seq_flds_x2g_states) - write(logunit,*) subname//': seq_flds_x2g_fluxes= ',trim(seq_flds_x2g_fluxes) - write(logunit,*) subname//': seq_flds_xao_states= ',trim(seq_flds_xao_states) - write(logunit,*) subname//': seq_flds_xao_fluxes= ',trim(seq_flds_xao_fluxes) - write(logunit,*) subname//': seq_flds_xao_albedo= ',trim(seq_flds_xao_albedo) - write(logunit,*) subname//': seq_flds_xao_diurnl= ',trim(seq_flds_xao_diurnl) - write(logunit,*) subname//': seq_flds_r2x_states= ',trim(seq_flds_r2x_states) - write(logunit,*) subname//': seq_flds_r2x_fluxes= ',trim(seq_flds_r2x_fluxes) - write(logunit,*) subname//': seq_flds_x2r_states= ',trim(seq_flds_x2r_states) - write(logunit,*) subname//': seq_flds_x2r_fluxes= ',trim(seq_flds_x2r_fluxes) - write(logunit,*) subname//': seq_flds_w2x_states= ',trim(seq_flds_w2x_states) - write(logunit,*) subname//': seq_flds_w2x_fluxes= ',trim(seq_flds_w2x_fluxes) - write(logunit,*) subname//': seq_flds_x2w_states= ',trim(seq_flds_x2w_states) - write(logunit,*) subname//': seq_flds_x2w_fluxes= ',trim(seq_flds_x2w_fluxes) - end if - - call catFields(seq_flds_dom_fields, seq_flds_dom_coord , seq_flds_dom_other ) - call catFields(seq_flds_a2x_fields, seq_flds_a2x_states, seq_flds_a2x_fluxes) - call catFields(seq_flds_x2a_fields, seq_flds_x2a_states, seq_flds_x2a_fluxes) - call catFields(seq_flds_i2x_fields, seq_flds_i2x_states, seq_flds_i2x_fluxes) - call catFields(seq_flds_x2i_fields, seq_flds_x2i_states, seq_flds_x2i_fluxes) - call catFields(seq_flds_l2x_fields, seq_flds_l2x_states, seq_flds_l2x_fluxes) - call catFields(seq_flds_l2x_fields_to_glc, seq_flds_l2x_states_to_glc, seq_flds_l2x_fluxes_to_glc) - call catFields(seq_flds_x2l_fields, seq_flds_x2l_states, seq_flds_x2l_fluxes) - call catFields(seq_flds_x2l_fields_from_glc, seq_flds_x2l_states_from_glc, seq_flds_x2l_fluxes_from_glc) - call catFields(seq_flds_o2x_fields, seq_flds_o2x_states, seq_flds_o2x_fluxes) - call catFields(seq_flds_x2o_fields, seq_flds_x2o_states, seq_flds_x2o_fluxes) - call catFields(seq_flds_g2x_fields, seq_flds_g2x_states, seq_flds_g2x_fluxes) - call catFields(seq_flds_g2x_fields_to_lnd, seq_flds_g2x_states_to_lnd, seq_flds_g2x_fluxes_to_lnd) - call catFields(seq_flds_x2g_fields, seq_flds_x2g_states, seq_flds_x2g_fluxes) - call catFields(seq_flds_xao_fields, seq_flds_xao_albedo, seq_flds_xao_states) - call catFields(stringtmp , seq_flds_xao_fields, seq_flds_xao_fluxes) - call catFields(seq_flds_xao_fields, stringtmp , seq_flds_xao_diurnl) - call catFields(seq_flds_r2x_fields, seq_flds_r2x_states, seq_flds_r2x_fluxes) - call catFields(seq_flds_x2r_fields, seq_flds_x2r_states, seq_flds_x2r_fluxes) - call catFields(seq_flds_w2x_fields, seq_flds_w2x_states, seq_flds_w2x_fluxes) - call catFields(seq_flds_x2w_fields, seq_flds_x2w_states, seq_flds_x2w_fluxes) - - end subroutine seq_flds_set - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_flds_add - ! - ! !DESCRIPTION: - ! Returns new concatentated field list - ! in the output character string {\tt outfld}. - ! - ! !REVISION HISTORY: - ! 2011-Nov-27 - M. Vertenstein - first version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_flds_add(outfld, str) - - ! !USES: - - ! !INPUT/OUTPUT PARAMETERS: - - character(len=*),intent(in) :: str ! string - character(len=*),intent(inout) :: outfld ! output field name - - !EOP - - character(len=*),parameter :: subname = '(seq_flds_add) ' - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - if (trim(outfld) == '') then - outfld = trim(str) - else - outfld = trim(outfld)//':'//trim(str) - end if - if (len_trim(outfld) >= CXX) then - write(logunit,*)'fields are = ',trim(outfld) - write(logunit,*)'fields length = ',len_trim(outfld) - call shr_sys_abort(subname//'ERROR: maximum length of xxx_states or xxx_fluxes has been exceeded') - end if - - end subroutine seq_flds_add - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: catFields - ! - ! !DESCRIPTION: - ! Returns {\tt nfld} concatentated field lists - ! in the output character string {\tt outfield}. - ! - ! !REVISION HISTORY: - ! 2003-Jan-24 - T. Craig - first version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine catFields(outfield, str1, str2) - - ! !USES: - - ! !INPUT/OUTPUT PARAMETERS: - - character(len=*),intent(inout) :: outfield ! output field name - character(len=*),intent(in) :: str1 ! string1 - character(len=*),intent(in ) :: str2 ! string2 - - !EOP - - character(len=*),parameter :: subname = '(seq_flds_catFields) ' - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - outfield = '' - if (len_trim(str1) > 0 .and. len_trim(str2) > 0) then - if (len_trim(str1) + len_trim(str2) + 1 > len(outfield)) then - call shr_sys_abort(subname//' ERROR: maximum length of string has been exceeded sum') - endif - outfield = trim(str1)//':'//trim(str2) - else - if (len_trim(str1) > 0) then - if (len_trim(str1) > len(outfield)) then - call shr_sys_abort(subname//' ERROR: maximum length of string has been exceeded str1') - endif - outfield = trim(str1) - endif - if (len_trim(str2) > 0) then - if (len_trim(str2) > len(outfield)) then - call shr_sys_abort(subname//' ERROR: maximum length of string has been exceeded str2') - endif - outfield = trim(str2) - endif - endif - - end subroutine catFields - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_flds_getField - ! - ! !DESCRIPTION: - ! Returns {\tt nfld} element of the colon-delimited string {\tt cstring} - ! in the output character string {\tt outfield}. - ! - ! !REVISION HISTORY: - ! 2003-Jan-24 - T. Craig - first version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_flds_getField(outfield, nfld, cstring) - - ! !USES: - use mct_mod - - ! !INPUT/OUTPUT PARAMETERS: - - character(len=*),intent(out) :: outfield ! output field name - integer ,intent(in ) :: nfld ! field number - character(len=*),intent(in ) :: cstring ! colon delimited field string - - !EOP - - type(mct_list) :: mctIstr ! mct list from input cstring - type(mct_string) :: mctOStr ! mct string for output outfield - character(len=*),parameter :: subname = '(seq_flds_getField) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - outfield = '' - - call mct_list_init(mctIstr,cstring) - call mct_list_get(mctOStr,nfld,mctIstr) - outfield = mct_string_toChar(mctOStr) - call mct_list_clean(mctIstr) - call mct_string_clean(mctOStr) - - end subroutine seq_flds_getField - - !=============================================================================== - ! If the attname passed in contains colons it is assumed to be a list of fields - ! all of which have the same names and units - subroutine metadata_set(attname , longname, stdname , units ) - - ! !USES: - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*), intent(in) :: attname - character(len=*), intent(in) :: longname - character(len=*), intent(in) :: stdname - character(len=*), intent(in) :: units - - !EOP - character(len=*),parameter :: subname = '(seq_flds_metadata_set) ' - integer :: i, j - - i = index(attname,':') - j=1 - - do while(i>j .and. i<=len_trim(attname)) - n_entries = n_entries + 1 - lookup_entry(n_entries,1) = attname(j:i-1) - lookup_entry(n_entries,2) = trim(longname) - lookup_entry(n_entries,3) = trim(stdname ) - lookup_entry(n_entries,4) = trim(units ) - j=i+1 - i = index(attname(j:),':') + j - 1 - enddo - n_entries = n_entries + 1 - i = len_trim(attname) - lookup_entry(n_entries,1) = attname(j:i) - lookup_entry(n_entries,2) = trim(longname) - lookup_entry(n_entries,3) = trim(stdname ) - lookup_entry(n_entries,4) = trim(units ) - - - - - if (n_entries .ge. nmax) then - write(logunit,*)'n_entries= ',n_entries,' nmax = ',nmax,' attname= ',trim(attname) - call shr_sys_abort(subname//'ERROR: nmax fields in lookup_entry table exceeded') - end if - - end subroutine metadata_set - - !=============================================================================== - - subroutine set_glc_elevclass_field(name, attname, longname, stdname, units, fieldlist, & - additional_list) - - ! Sets a coupling field for all glc elevation classes (1:glc_nec) plus bare land - ! (index 0). - ! - ! Note that, if glc_nec = 0, then we don't create any coupling fields (not even the - ! bare land (0) index) - ! - ! Puts the coupling fields in the given fieldlist, and also does the appropriate - ! metadata_set calls. - ! - ! additional_list should be .false. (or absent) the first time this is called for a - ! given set of coupling fields. However, if this same set of coupling fields is being - ! added to multiple field lists, then additional_list should be set to true for the - ! second and subsequent calls; in this case, the metadata_set calls are not done - ! (because they have already been done). - ! - ! name, attname and longname give the base name of the field; the elevation class - ! index will be appended as a suffix - - ! !USES: - use glc_elevclass_mod, only : glc_get_num_elevation_classes, glc_elevclass_as_string - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*), intent(in) :: name ! base field name to add to fieldlist - character(len=*), intent(in) :: attname ! base field name for metadata - character(len=*), intent(in) :: longname ! base long name for metadata - character(len=*), intent(in) :: stdname ! standard name for metadata - character(len=*), intent(in) :: units ! units for metadata - character(len=*), intent(inout) :: fieldlist ! field list into which the fields should be added - - logical, intent(in), optional :: additional_list ! whether this is an additional list for the same set of coupling fields (see above for details; defaults to false) - - !EOP - integer :: num - character(len= 16) :: cnum - logical :: l_additional_list ! local version of the optional additional_list argument - - l_additional_list = .false. - if (present(additional_list)) then - l_additional_list = additional_list - end if - - if (glc_get_num_elevation_classes() > 0) then - do num = 0, glc_get_num_elevation_classes() - cnum = glc_elevclass_as_string(num) - - call seq_flds_add(fieldlist, trim(name) // trim(cnum)) - - if (.not. l_additional_list) then - call metadata_set(attname = trim(attname) // trim(cnum), & - longname = trim(longname) // ' of elevation class ' // trim(cnum), & - stdname = stdname, & - units = units) - end if - end do - end if - end subroutine set_glc_elevclass_field - - !=============================================================================== - - subroutine seq_flds_esmf_metadata_get(shortname, longname, stdname, units) - - ! !USES: - use shr_string_mod, only : shr_string_lastindex - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - character(len=*), intent(in) :: shortname - character(len=*),optional, intent(out) :: longname - character(len=*),optional, intent(out) :: stdname - character(len=*),optional, intent(out) :: units - - !EOP - - !--- local --- - integer :: i,n - character(len=CSS) :: llongname, lstdname, lunits, lshortname ! local copies - character(len=*),parameter :: undef = 'undefined' - character(len=*),parameter :: unknown = 'unknown' - logical :: found - character(len=*),parameter :: subname = '(seq_flds_esmf_metadata_get) ' - - !--- define field metadata (name, long_name, standard_name, units) --- - - llongname = trim(unknown) - lstdname = trim(unknown) - lunits = trim(unknown) - - found = .false. - - if (.not.found) then - i = 1 - do while (i <= n_entries .and. .not.found) - lshortname = trim(shortname) - if (trim(lshortname) == trim(lookup_entry(i,1))) then - llongname = trim(lookup_entry(i,2)) - lstdname = trim(lookup_entry(i,3)) - lunits = trim(lookup_entry(i,4)) - found =.true. - end if - i = i + 1 - end do - endif - - if (.not.found) then - i = 1 - do while (i <= n_entries .and. .not.found) - n = shr_string_lastIndex(shortname,"_") - lshortname = "" - if (n < len_trim(shortname)) lshortname = shortname(n+1:len_trim(shortname)) - if (trim(lshortname) == trim(lookup_entry(i,1))) then - llongname = trim(lookup_entry(i,2)) - lstdname = trim(lookup_entry(i,3)) - lunits = trim(lookup_entry(i,4)) - found = .true. - end if - i = i + 1 - end do - endif - - if (present(longname)) then - longname = trim(llongname) - endif - if (present(stdname)) then - stdname = trim(lstdname) - endif - if (present(units)) then - units = trim(lunits) - endif - - end subroutine seq_flds_esmf_metadata_get - -end module seq_flds_mod diff --git a/src/drivers/mct/shr/seq_infodata_mod.F90 b/src/drivers/mct/shr/seq_infodata_mod.F90 deleted file mode 100644 index 784498c2912..00000000000 --- a/src/drivers/mct/shr/seq_infodata_mod.F90 +++ /dev/null @@ -1,2889 +0,0 @@ -! !MODULE: seq_infodata_mod --- Module for input data shared between CCSM components -! -! !DESCRIPTION: -! -! A module to get, put, and store some standard scalar data -! -! Typical usage: -! -! -! !REMARKS: -! -! !REVISION HISTORY: -! 2005-Nov-11 - E. Kluzek - creation of shr_inputinfo_mod -! 2007-Nov-15 - T. Craig - refactor for ccsm4 system and move to seq_infodata_mod -! 2016-Dec-08 - R. Montuoro - updated for multiple driver instances -! -! !INTERFACE: ------------------------------------------------------------------ - -MODULE seq_infodata_mod - - ! !USES: - - use shr_kind_mod, only: SHR_KIND_CS, SHR_KIND_CL, SHR_KIND_IN, & - SHR_KIND_R8, SHR_KIND_I8 - use shr_sys_mod, only: shr_sys_flush, shr_sys_abort, shr_sys_getenv - use seq_comm_mct, only: logunit, loglevel, CPLID, seq_comm_gloroot - use seq_comm_mct, only: seq_comm_setptrs, seq_comm_iamroot, seq_comm_iamin - use seq_comm_mct, only: num_inst_atm, num_inst_lnd, num_inst_rof - use seq_comm_mct, only: num_inst_ocn, num_inst_ice, num_inst_glc - use seq_comm_mct, only: num_inst_wav, num_inst_iac - use shr_orb_mod, only: SHR_ORB_UNDEF_INT, SHR_ORB_UNDEF_REAL, shr_orb_params - - implicit none - - private ! default private - - ! !PUBLIC TYPES: - - public :: seq_infodata_type - - ! !PUBLIC MEMBER FUNCTIONS - - public :: seq_infodata_Init ! Initialize - public :: seq_infodata_Init2 ! Init after clocks initialized - public :: seq_infodata_GetData ! Get values from object - public :: seq_infodata_PutData ! Change values - public :: seq_infodata_Print ! print current info - public :: seq_infodata_Exchange ! exchange data across pes - - ! !PUBLIC DATA MEMBERS: - - !EOP - - ! Strings of valid start_type options - character(len=*), public, parameter :: seq_infodata_start_type_start = "startup" - character(len=*), public, parameter :: seq_infodata_start_type_cont = "continue" - character(len=*), public, parameter :: seq_infodata_start_type_brnch = "branch" - character(len=*), public, parameter :: seq_infodata_orb_fixed_year = 'fixed_year' - character(len=*), public, parameter :: seq_infodata_orb_variable_year = 'variable_year' - character(len=*), public, parameter :: seq_infodata_orb_fixed_parameters = 'fixed_parameters' - - ! InputInfo derived type - - type seq_infodata_type - private ! This type is opaque - - !--- set via namelist and held fixed ---- - character(SHR_KIND_CS) :: cime_model ! e3sm or cesm - character(SHR_KIND_CL) :: start_type ! Type of startup - character(SHR_KIND_CL) :: case_name ! Short case identification - character(SHR_KIND_CL) :: case_desc ! Long description of this case - character(SHR_KIND_CL) :: model_version ! Model version - character(SHR_KIND_CS) :: username ! Current user - character(SHR_KIND_CS) :: hostname ! Current machine - character(SHR_KIND_CL) :: timing_dir ! Dir for timing files - character(SHR_KIND_CL) :: tchkpt_dir ! Dir for timing checkpoint files - logical :: aqua_planet ! No ice/lnd, analytic ocn, perpetual time - integer(SHR_KIND_IN) :: aqua_planet_sst ! aqua planet analytic sst type - logical :: run_barriers ! barrier component run calls - logical :: brnch_retain_casename ! If branch and can use same casename - logical :: read_restart ! read the restart file, based on start_type - character(SHR_KIND_CL) :: restart_pfile ! Restart pointer file - character(SHR_KIND_CL) :: restart_file ! Full archive path to restart file - logical :: single_column ! single column mode - real (SHR_KIND_R8) :: scmlat ! single column lat - real (SHR_KIND_R8) :: scmlon ! single column lon - character(SHR_KIND_CS) :: logFilePostFix ! postfix for output log files - character(SHR_KIND_CL) :: outPathRoot ! root for output log files - logical :: perpetual ! perpetual flag - integer(SHR_KIND_IN) :: perpetual_ymd ! perpetual date - integer(SHR_KIND_IN) :: orb_iyear ! orbital year - integer(SHR_KIND_IN) :: orb_iyear_align ! model year associated with orb year - character(SHR_KIND_CL) :: orb_mode ! orbital mode - real(SHR_KIND_R8) :: orb_eccen ! See shr_orb_mod - real(SHR_KIND_R8) :: orb_obliq ! See shr_orb_mod - real(SHR_KIND_R8) :: orb_mvelp ! See shr_orb_mod - real(SHR_KIND_R8) :: orb_obliqr ! See shr_orb_mod - real(SHR_KIND_R8) :: orb_lambm0 ! See shr_orb_mod - real(SHR_KIND_R8) :: orb_mvelpp ! See shr_orb_mod - character(SHR_KIND_CS) :: wv_sat_scheme ! Water vapor saturation pressure scheme - real(SHR_KIND_R8) :: wv_sat_transition_start ! Saturation transition range - logical :: wv_sat_use_tables ! Saturation pressure lookup tables - real(SHR_KIND_R8) :: wv_sat_table_spacing! Saturation pressure table resolution - character(SHR_KIND_CS) :: tfreeze_option ! Freezing point calculation - character(SHR_KIND_CL) :: flux_epbal ! selects E,P,R adjustment technique - logical :: flux_albav ! T => no diurnal cycle in ocn albedos - logical :: flux_diurnal ! T => diurnal cycle in atm/ocn fluxes - integer :: ocn_surface_flux_scheme ! 0: E3SMv1 1: COARE 2: UA - logical :: coldair_outbreak_mod ! (Mahrt & Sun 1995,MWR) - real(SHR_KIND_R8) :: flux_convergence ! atmocn flux calc convergence value - integer :: flux_max_iteration ! max number of iterations of atmocn flux loop - character(SHR_KIND_CL) :: glc_renormalize_smb ! Whether to renormalize smb sent from lnd -> glc - real(SHR_KIND_R8) :: wall_time_limit ! force stop time limit (hours) - character(SHR_KIND_CS) :: force_stop_at ! when to force a stop (month, day, etc) - character(SHR_KIND_CL) :: atm_gnam ! atm grid - character(SHR_KIND_CL) :: lnd_gnam ! lnd grid - character(SHR_KIND_CL) :: ocn_gnam ! ocn grid - character(SHR_KIND_CL) :: ice_gnam ! ice grid - character(SHR_KIND_CL) :: rof_gnam ! rof grid - character(SHR_KIND_CL) :: glc_gnam ! glc grid - character(SHR_KIND_CL) :: wav_gnam ! wav grid - character(SHR_KIND_CL) :: iac_gnam ! iac grid - logical :: shr_map_dopole ! pole corrections in shr_map_mod - character(SHR_KIND_CL) :: vect_map ! vector mapping option, none, cart3d, cart3d_diag, cart3d_uvw, cart3d_uvw_diag - character(SHR_KIND_CS) :: aoflux_grid ! grid for atm ocn flux calc - integer :: cpl_decomp ! coupler decomp - character(SHR_KIND_CL) :: cpl_seq_option ! coupler sequencing option - - logical :: do_budgets ! do heat/water budgets diagnostics - logical :: do_histinit ! write out initial history file - integer :: budget_inst ! instantaneous budget level - integer :: budget_daily ! daily budget level - integer :: budget_month ! monthly budget level - integer :: budget_ann ! annual budget level - integer :: budget_ltann ! long term budget level written at end of year - integer :: budget_ltend ! long term budget level written at end of run - logical :: drv_threading ! is threading control in driver turned on - logical :: histaux_a2x ! cpl writes aux hist files: a2x every c2a comm - logical :: histaux_a2x1hri ! cpl writes aux hist files: a2x 1hr instaneous values - logical :: histaux_a2x1hr ! cpl writes aux hist files: a2x 1hr - logical :: histaux_a2x3hr ! cpl writes aux hist files: a2x 3hr states - logical :: histaux_a2x3hrp ! cpl writes aux hist files: a2x 3hr precip - logical :: histaux_a2x24hr ! cpl writes aux hist files: a2x daily all - logical :: histaux_l2x1yrg ! cpl writes aux hist files: l2x annual glc forcings - logical :: histaux_l2x ! cpl writes aux hist files: l2x every c2l comm - logical :: histaux_r2x ! cpl writes aux hist files: r2x daily - logical :: histaux_double_precision ! if true, use double-precision for cpl aux hist files - logical :: histavg_atm ! cpl writes atm fields in average history file - logical :: histavg_lnd ! cpl writes lnd fields in average history file - logical :: histavg_ocn ! cpl writes ocn fields in average history file - logical :: histavg_ice ! cpl writes ice fields in average history file - logical :: histavg_rof ! cpl writes rof fields in average history file - logical :: histavg_glc ! cpl writes glc fields in average history file - logical :: histavg_wav ! cpl writes wav fields in average history file - logical :: histavg_iac ! cpl writes iac fields in average history file - logical :: histavg_xao ! cpl writes flux xao fields in average history file - real(SHR_KIND_R8) :: eps_frac ! fraction error tolerance - real(SHR_KIND_R8) :: eps_amask ! atm mask error tolerance - real(SHR_KIND_R8) :: eps_agrid ! atm grid error tolerance - real(SHR_KIND_R8) :: eps_aarea ! atm area error tolerance - real(SHR_KIND_R8) :: eps_omask ! ocn mask error tolerance - real(SHR_KIND_R8) :: eps_ogrid ! ocn grid error tolerance - real(SHR_KIND_R8) :: eps_oarea ! ocn area error tolerance - logical :: mct_usealltoall ! flag for mct alltoall - logical :: mct_usevector ! flag for mct vector - - logical :: reprosum_use_ddpdd ! use ddpdd algorithm - logical :: reprosum_allow_infnan ! allow INF and NaN summands - real(SHR_KIND_R8) :: reprosum_diffmax ! maximum difference tolerance - logical :: reprosum_recompute ! recompute reprosum with nonscalable algorithm - ! if reprosum_diffmax is exceeded - - !--- set via namelist and may be time varying --- - integer(SHR_KIND_IN) :: info_debug ! debug level - logical :: bfbflag ! turn on bfb option - logical :: esmf_map_flag ! do we use esmf mapping - - !--- set via components and held fixed --- - logical :: atm_present ! does component model exist - logical :: atm_prognostic ! does component model need input data from driver - logical :: lnd_present ! does component model exist - logical :: lnd_prognostic ! does component model need input data from driver - logical :: rof_present ! does rof component exist - logical :: rofice_present ! does rof have iceberg coupling on - logical :: rof_prognostic ! does rof component need input data - logical :: flood_present ! does rof have flooding on - logical :: ocn_present ! does component model exist - logical :: ocn_prognostic ! does component model need input data from driver - logical :: ocnrof_prognostic ! does component need rof data - logical :: ocn_c2_glcshelf ! will ocn component send data for ice shelf fluxes in driver - logical :: ice_present ! does component model exist - logical :: ice_prognostic ! does component model need input data from driver - logical :: iceberg_prognostic ! does the ice model support icebergs - logical :: glc_present ! does component model exist - logical :: glclnd_present ! does glc have land coupling fields on - logical :: glcocn_present ! does glc have ocean runoff on - logical :: glcice_present ! does glc have iceberg coupling on - logical :: glc_prognostic ! does component model need input data from driver - logical :: glc_coupled_fluxes ! does glc send fluxes to other components (only relevant if glc_present is .true.) - logical :: wav_present ! does component model exist - logical :: wav_prognostic ! does component model need input data from driver - logical :: esp_present ! does component model exist - logical :: esp_prognostic ! does component model need input data from driver - logical :: iac_present ! does component model exist - logical :: iac_prognostic ! does component model need input data from driver - logical :: dead_comps ! do we have dead models - integer(SHR_KIND_IN) :: atm_nx ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: atm_ny ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: lnd_nx ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: lnd_ny ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: ice_nx ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: ice_ny ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: ocn_nx ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: ocn_ny ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: rof_nx ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: rof_ny ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: glc_nx ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: glc_ny ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: wav_nx ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: wav_ny ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: iac_nx ! nx, ny of "2d" grid - integer(SHR_KIND_IN) :: iac_ny ! nx, ny of "2d" grid - - !--- set via components and may be time varying --- - real(SHR_KIND_R8) :: nextsw_cday ! calendar of next atm shortwave - real(SHR_KIND_R8) :: precip_fact ! precip factor - integer(SHR_KIND_IN) :: atm_phase ! atm phase - integer(SHR_KIND_IN) :: lnd_phase ! lnd phase - integer(SHR_KIND_IN) :: ice_phase ! ice phase - integer(SHR_KIND_IN) :: ocn_phase ! ocn phase - integer(SHR_KIND_IN) :: glc_phase ! glc phase - integer(SHR_KIND_IN) :: rof_phase ! rof phase - integer(SHR_KIND_IN) :: wav_phase ! wav phase - integer(SHR_KIND_IN) :: esp_phase ! esp phase - integer(SHR_KIND_IN) :: iac_phase ! iac phase - logical :: atm_aero ! atmosphere aerosols - logical :: glc_g2lupdate ! update glc2lnd fields in lnd model - real(shr_kind_r8) :: max_cplstep_time ! abort if cplstep time exceeds this value - !--- set from restart file --- - character(SHR_KIND_CL) :: rest_case_name ! Short case identification - !--- set by driver and may be time varying - logical :: glc_valid_input ! is valid accumulated data being sent to prognostic glc - character(SHR_KIND_CL) :: model_doi_url - end type seq_infodata_type - - ! --- public interfaces -------------------------------------------------------- - interface seq_infodata_GetData - module procedure seq_infodata_GetData_explicit -#ifndef CPRPGI - module procedure seq_infodata_GetData_bytype -#endif - ! ^ ifndef CPRPGI - end interface seq_infodata_GetData - - interface seq_infodata_PutData - module procedure seq_infodata_PutData_explicit -#ifndef CPRPGI - module procedure seq_infodata_PutData_bytype -#endif - ! ^ ifndef CPRPGI - end interface seq_infodata_PutData - - ! --- Private local data ------------------------------------------------------- - - character(len=*),parameter :: sp_str = 'str_undefined' - - !=============================================================================== -CONTAINS - !=============================================================================== - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_infodata_Init -- read in CIME shared namelist - ! - ! !DESCRIPTION: - ! - ! Read in input from seq_infodata_inparm namelist, output cime derived type for - ! miscillaneous info. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid, cpl_tag) - - ! !USES: - - use shr_file_mod, only : shr_file_getUnit, shr_file_freeUnit - use shr_string_mod, only : shr_string_toUpper, shr_string_listAppend - use shr_mpi_mod, only : shr_mpi_bcast - use seq_timemgr_mod, only : seq_timemgr_pause_active - use seq_io_read_mod, only : seq_io_read - use pio, only : file_desc_t - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type), intent(INOUT) :: infodata ! infodata object - character(len=*), intent(IN) :: nmlfile ! Name-list filename - integer(SHR_KIND_IN), intent(IN) :: ID ! seq_comm ID - type(file_desc_T) :: pioid - character(len=*), optional, intent(IN) :: cpl_tag ! cpl instance suffix - !EOP - - !----- local ----- - character(len=*), parameter :: subname = '(seq_infodata_Init) ' - integer(SHR_KIND_IN),parameter :: aqua_perpetual_ymd = 321 - - integer :: mpicom ! MPI communicator - integer :: ierr ! I/O error code - integer :: unitn ! Namelist unit number to read - - !------ namelist ----- - character(SHR_KIND_CS) :: cime_model ! e3sm or cesm - character(SHR_KIND_CL) :: case_desc ! Case long description - character(SHR_KIND_CL) :: case_name ! Case short name - character(SHR_KIND_CL) :: model_version ! Model version - character(SHR_KIND_CS) :: username ! Current user - character(SHR_KIND_CS) :: hostname ! Current machine - character(SHR_KIND_CL) :: start_type ! Startup-type: startup, continue, branch - character(SHR_KIND_CL) :: timing_dir ! Dir for timing files - character(SHR_KIND_CL) :: tchkpt_dir ! Dir for timing checkpoint files - logical :: aqua_planet ! Aqua-planet mode (surface is all ocean) - integer(SHR_KIND_IN) :: aqua_planet_sst ! analytic sst field - logical :: run_barriers ! barrier component run calls - logical :: brnch_retain_casename ! If retain casename for branch - integer(SHR_KIND_IN) :: info_debug ! debug flag - logical :: bfbflag ! bit for bit flag - logical :: esmf_map_flag ! esmf mapping flag - character(SHR_KIND_CL) :: restart_pfile ! Restart pointer filename - character(SHR_KIND_CL) :: restart_file ! Restart filename - - logical :: single_column ! single column mode - real (SHR_KIND_R8) :: scmlat ! single column mode latitude - real (SHR_KIND_R8) :: scmlon ! single column mode longitude - character(SHR_KIND_CS) :: logFilePostFix ! postfix for output log files - character(SHR_KIND_CL) :: outPathRoot ! root output files - logical :: perpetual ! perpetual mode - integer(SHR_KIND_IN) :: perpetual_ymd ! perpetual ymd - integer(SHR_KIND_IN) :: orb_iyear ! orbital year - integer(SHR_KIND_IN) :: orb_iyear_align ! model year associated with orb year - character(SHR_KIND_CL) :: orb_mode ! orbital mode - real(SHR_KIND_R8) :: orb_obliq ! Obliquity of orbit - real(SHR_KIND_R8) :: orb_eccen ! Eccentricity of orbit - real(SHR_KIND_R8) :: orb_mvelp ! Location of vernal equinox - real(SHR_KIND_R8) :: orb_obliqr ! Obliquity in radians - real(SHR_KIND_R8) :: orb_lambm0 ! lon of per at vernal equ - real(SHR_KIND_R8) :: orb_mvelpp ! mvelp plus pi - character(SHR_KIND_CS) :: wv_sat_scheme ! Water vapor saturation pressure scheme - real(SHR_KIND_R8) :: wv_sat_transition_start! Saturation transition range - logical :: wv_sat_use_tables ! Saturation pressure lookup tables - real(SHR_KIND_R8) :: wv_sat_table_spacing ! Saturation pressure table resolution - character(SHR_KIND_CS) :: tfreeze_option ! Freezing point calculation - character(SHR_KIND_CL) :: flux_epbal ! selects E,P,R adjustment technique - logical :: flux_albav ! T => no diurnal cycle in ocn albedos - logical :: flux_diurnal ! T => diurnal cycle in atm/ocn fluxes - integer :: ocn_surface_flux_scheme ! 0: E3SMv1 1: COARE 2: UA - logical :: coldair_outbreak_mod ! (Mahrt & Sun 1995,MWR) - real(SHR_KIND_R8) :: flux_convergence ! atmocn flux calc convergence value - integer :: flux_max_iteration ! max number of iterations of atmocn flux loop - character(SHR_KIND_CL) :: glc_renormalize_smb ! Whether to renormalize smb sent from lnd -> glc - real(SHR_KIND_R8) :: wall_time_limit ! force stop time limit (hours) - character(SHR_KIND_CS) :: force_stop_at ! when to force a stop (month, day, etc) - character(SHR_KIND_CL) :: atm_gnam ! atm grid - character(SHR_KIND_CL) :: lnd_gnam ! lnd grid - character(SHR_KIND_CL) :: ocn_gnam ! ocn grid - character(SHR_KIND_CL) :: ice_gnam ! ice grid - character(SHR_KIND_CL) :: rof_gnam ! rof grid - character(SHR_KIND_CL) :: glc_gnam ! glc grid - character(SHR_KIND_CL) :: wav_gnam ! wav grid - character(SHR_KIND_CL) :: iac_gnam ! iac grid - logical :: shr_map_dopole ! pole corrections in shr_map_mod - character(SHR_KIND_CL) :: vect_map ! vector mapping option - character(SHR_KIND_CS) :: aoflux_grid ! grid for atm ocn flux calc - integer :: cpl_decomp ! coupler decomp - character(SHR_KIND_CL) :: cpl_seq_option ! coupler sequencing option - - logical :: do_budgets ! do heat/water budgets diagnostics - logical :: do_histinit ! write out initial history file - integer :: budget_inst ! instantaneous budget level - integer :: budget_daily ! daily budget level - integer :: budget_month ! monthly budget level - integer :: budget_ann ! annual budget level - integer :: budget_ltann ! long term budget level written at end of year - integer :: budget_ltend ! long term budget level written at end of run - logical :: histaux_a2x ! cpl writes aux hist files: a2x every c2a comm - logical :: histaux_a2x1hri ! cpl writes aux hist files: a2x 1hr instaneous values - logical :: histaux_a2x1hr ! cpl writes aux hist files: a2x 1hr - logical :: histaux_a2x3hr ! cpl writes aux hist files: a2x 3hr states - logical :: histaux_a2x3hrp ! cpl writes aux hist files: a2x 2hr precip - logical :: histaux_a2x24hr ! cpl writes aux hist files: a2x daily all - logical :: histaux_l2x1yrg ! cpl writes aux hist files: l2x annual glc forcings - logical :: histaux_l2x ! cpl writes aux hist files: l2x every c2l comm - logical :: histaux_r2x ! cpl writes aux hist files: r2x daily - logical :: histaux_double_precision ! if true, use double-precision for cpl aux hist files - logical :: histavg_atm ! cpl writes atm fields in average history file - logical :: histavg_lnd ! cpl writes lnd fields in average history file - logical :: histavg_ocn ! cpl writes ocn fields in average history file - logical :: histavg_ice ! cpl writes ice fields in average history file - logical :: histavg_rof ! cpl writes rof fields in average history file - logical :: histavg_glc ! cpl writes glc fields in average history file - logical :: histavg_wav ! cpl writes wav fields in average history file - logical :: histavg_iac ! cpl writes wav fields in average history file - logical :: histavg_xao ! cpl writes flux xao fields in average history file - logical :: drv_threading ! is threading control in driver turned on - real(SHR_KIND_R8) :: eps_frac ! fraction error tolerance - real(SHR_KIND_R8) :: eps_amask ! atm mask error tolerance - real(SHR_KIND_R8) :: eps_agrid ! atm grid error tolerance - real(SHR_KIND_R8) :: eps_aarea ! atm area error tolerance - real(SHR_KIND_R8) :: eps_omask ! ocn mask error tolerance - real(SHR_KIND_R8) :: eps_ogrid ! ocn grid error tolerance - real(SHR_KIND_R8) :: eps_oarea ! ocn area error tolerance - logical :: reprosum_use_ddpdd ! use ddpdd algorithm - logical :: reprosum_allow_infnan ! allow INF and NaN summands - real(SHR_KIND_R8) :: reprosum_diffmax ! maximum difference tolerance - logical :: reprosum_recompute ! recompute reprosum with nonscalable algorithm - ! if reprosum_diffmax is exceeded - logical :: mct_usealltoall ! flag for mct alltoall - logical :: mct_usevector ! flag for mct vector - real(shr_kind_r8) :: max_cplstep_time ! abort if cplstep time exceeds this value - character(SHR_KIND_CL) :: model_doi_url - - namelist /seq_infodata_inparm/ & - cime_model, case_desc, case_name, start_type, tchkpt_dir, & - model_version, username, hostname, timing_dir, & - aqua_planet,aqua_planet_sst, & - brnch_retain_casename, info_debug, bfbflag, & - restart_pfile, restart_file, run_barriers, & - single_column, scmlat, force_stop_at, & - scmlon, logFilePostFix, outPathRoot, flux_diurnal,& - ocn_surface_flux_scheme, & - coldair_outbreak_mod, & - flux_convergence, flux_max_iteration, & - perpetual, perpetual_ymd, flux_epbal, flux_albav, & - orb_iyear_align, orb_mode, wall_time_limit, & - orb_iyear, orb_obliq, orb_eccen, orb_mvelp, & - wv_sat_scheme, wv_sat_transition_start, & - wv_sat_use_tables, wv_sat_table_spacing, & - tfreeze_option, glc_renormalize_smb, & - ice_gnam, rof_gnam, glc_gnam, wav_gnam, & - atm_gnam, lnd_gnam, ocn_gnam, iac_gnam, cpl_decomp, & - shr_map_dopole, vect_map, aoflux_grid, do_histinit, & - do_budgets, drv_threading, & - budget_inst, budget_daily, budget_month, & - budget_ann, budget_ltann, budget_ltend, & - histaux_a2x,histaux_a2x1hri,histaux_a2x1hr, & - histaux_a2x3hr,histaux_a2x3hrp, & - histaux_a2x24hr,histaux_l2x ,histaux_r2x, & - histaux_double_precision, & - histavg_atm, histavg_lnd, histavg_ocn, histavg_ice, & - histavg_rof, histavg_glc, histavg_wav, histavg_xao, & - histavg_iac, & - histaux_l2x1yrg, cpl_seq_option, & - eps_frac, eps_amask, & - eps_agrid, eps_aarea, eps_omask, eps_ogrid, & - eps_oarea, esmf_map_flag, & - reprosum_use_ddpdd, reprosum_allow_infnan, & - reprosum_diffmax, reprosum_recompute, & - mct_usealltoall, mct_usevector, max_cplstep_time, model_doi_url - - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(ID,mpicom=mpicom) - - !--------------------------------------------------------------------------- - ! Set infodata on root pe - !--------------------------------------------------------------------------- - if (seq_comm_iamroot(ID)) then - - !--------------------------------------------------------------------------- - ! Set namelist defaults - !--------------------------------------------------------------------------- - cime_model = 'unknown' - case_desc = ' ' - case_name = ' ' - model_version = 'unknown' - username = 'unknown' - hostname = 'unknown' - timing_dir = '.' - tchkpt_dir = '.' - start_type = ' ' - aqua_planet = .false. - aqua_planet_sst = 1 - run_barriers = .false. - brnch_retain_casename = .false. - info_debug = 1 - bfbflag = .false. - esmf_map_flag = .false. - restart_pfile = 'rpointer.drv' - restart_file = trim(sp_str) - single_column = .false. - scmlat = -999. - scmlon = -999. - logFilePostFix = '.log' - outPathRoot = './' - perpetual = .false. - perpetual_ymd = -999 - orb_mode = seq_infodata_orb_fixed_year - orb_iyear = SHR_ORB_UNDEF_INT - orb_iyear_align = SHR_ORB_UNDEF_INT - orb_obliq = SHR_ORB_UNDEF_REAL - orb_eccen = SHR_ORB_UNDEF_REAL - orb_mvelp = SHR_ORB_UNDEF_REAL - wv_sat_scheme = "GoffGratch" - wv_sat_transition_start = 20.0 - wv_sat_use_tables = .false. - wv_sat_table_spacing = 1.0 - tfreeze_option = 'minus1p8' - flux_epbal = 'off' - flux_albav = .false. - flux_diurnal = .false. - ocn_surface_flux_scheme = 0 - coldair_outbreak_mod = .false. - flux_convergence = 0.0_SHR_KIND_R8 - flux_max_iteration = 2 - glc_renormalize_smb = 'on_if_glc_coupled_fluxes' - wall_time_limit = -1.0 - force_stop_at = 'month' - atm_gnam = 'undefined' - lnd_gnam = 'undefined' - ocn_gnam = 'undefined' - ice_gnam = 'undefined' - rof_gnam = 'undefined' - glc_gnam = 'undefined' - wav_gnam = 'undefined' - iac_gnam = 'undefined' - shr_map_dopole = .true. - vect_map = 'cart3d' - aoflux_grid = 'ocn' - cpl_decomp = 0 - cpl_seq_option = 'CESM1_MOD' - do_budgets = .false. - do_histinit = .false. - budget_inst = 0 - budget_daily = 0 - budget_month = 1 - budget_ann = 1 - budget_ltann = 1 - budget_ltend = 0 - histaux_a2x = .false. - histaux_a2x1hri = .false. - histaux_a2x1hr = .false. - histaux_a2x3hr = .false. - histaux_a2x3hrp = .false. - histaux_a2x24hr = .false. - histaux_l2x1yrg = .false. - histaux_l2x = .false. - histaux_r2x = .false. - histaux_double_precision = .false. - histavg_atm = .true. - histavg_lnd = .true. - histavg_ocn = .true. - histavg_ice = .true. - histavg_rof = .true. - histavg_glc = .true. - histavg_wav = .true. - histavg_iac = .true. - histavg_xao = .true. - drv_threading = .false. - eps_frac = 1.0e-02_SHR_KIND_R8 - eps_amask = 1.0e-13_SHR_KIND_R8 - eps_agrid = 1.0e-12_SHR_KIND_R8 - eps_aarea = 9.0e-07_SHR_KIND_R8 - eps_omask = 1.0e-06_SHR_KIND_R8 - eps_ogrid = 1.0e-02_SHR_KIND_R8 - eps_oarea = 1.0e-01_SHR_KIND_R8 - reprosum_use_ddpdd = .false. - reprosum_allow_infnan = .false. - reprosum_diffmax = -1.0e-8 - reprosum_recompute = .false. - mct_usealltoall = .false. - mct_usevector = .false. - max_cplstep_time = 0.0 - model_doi_url = 'unset' - - !--------------------------------------------------------------------------- - ! Read in namelist - !--------------------------------------------------------------------------- - unitn = shr_file_getUnit() - write(logunit,"(A)") subname,' read seq_infodata_inparm namelist from: '//trim(nmlfile) - open( unitn, file=trim(nmlfile), status='old' ) - ierr = 1 - do while( ierr /= 0 ) - read(unitn,nml=seq_infodata_inparm,iostat=ierr) - if (ierr < 0) then - call shr_sys_abort( subname//':: namelist read returns an'// & - ' end of file or end of record condition' ) - end if - end do - close(unitn) - call shr_file_freeUnit( unitn ) - - !--------------------------------------------------------------------------- - ! Set infodata on root pe - !--------------------------------------------------------------------------- - infodata%cime_model = cime_model - infodata%case_desc = case_desc - infodata%case_name = case_name - infodata%model_version = model_version - infodata%username = username - infodata%hostname = hostname - infodata%start_type = start_type - infodata%timing_dir = timing_dir - infodata%tchkpt_dir = tchkpt_dir - infodata%aqua_planet = aqua_planet - infodata%aqua_planet_sst = aqua_planet_sst - infodata%run_barriers = run_barriers - infodata%brnch_retain_casename = brnch_retain_casename - infodata%restart_pfile = restart_pfile - infodata%restart_file = restart_file - if (present(cpl_tag)) then - if (len(cpl_tag) > 0) then - if (trim(restart_file) /= trim(sp_str)) then - write(logunit,*) trim(subname),' ERROR: restart_file can '//& - 'only be read from restart pointer files when using multiple couplers ' - call shr_sys_abort(subname//' ERROR: invalid settings for restart_file ') - end if - end if - infodata%restart_file = restart_file - infodata%restart_pfile = trim(restart_pfile) // trim(cpl_tag) - else - infodata%restart_pfile = restart_pfile - infodata%restart_file = restart_file - end if - infodata%single_column = single_column - infodata%scmlat = scmlat - infodata%scmlon = scmlon - infodata%logFilePostFix = logFilePostFix - infodata%outPathRoot = outPathRoot - infodata%perpetual = perpetual - infodata%perpetual_ymd = perpetual_ymd - infodata%wv_sat_scheme = wv_sat_scheme - infodata%wv_sat_transition_start = wv_sat_transition_start - infodata%wv_sat_use_tables = wv_sat_use_tables - infodata%wv_sat_table_spacing = wv_sat_table_spacing - infodata%tfreeze_option = tfreeze_option - infodata%flux_epbal = flux_epbal - infodata%flux_albav = flux_albav - infodata%flux_diurnal = flux_diurnal - infodata%ocn_surface_flux_scheme = ocn_surface_flux_scheme - infodata%flux_convergence = flux_convergence - infodata%coldair_outbreak_mod = coldair_outbreak_mod - infodata%flux_max_iteration = flux_max_iteration - infodata%glc_renormalize_smb = glc_renormalize_smb - infodata%wall_time_limit = wall_time_limit - infodata%force_stop_at = force_stop_at - infodata%atm_gnam = atm_gnam - infodata%lnd_gnam = lnd_gnam - infodata%ocn_gnam = ocn_gnam - infodata%ice_gnam = ice_gnam - infodata%rof_gnam = rof_gnam - infodata%glc_gnam = glc_gnam - infodata%wav_gnam = wav_gnam - infodata%iac_gnam = iac_gnam - infodata%shr_map_dopole = shr_map_dopole -#ifdef COMPARE_TO_NUOPC - infodata%vect_map = 'none' -#else - infodata%vect_map = vect_map -#endif - infodata%aoflux_grid = aoflux_grid - infodata%cpl_decomp = cpl_decomp - infodata%cpl_seq_option = cpl_seq_option - infodata%do_budgets = do_budgets - infodata%do_histinit = do_histinit - infodata%budget_inst = budget_inst - infodata%budget_daily = budget_daily - infodata%budget_month = budget_month - infodata%budget_ann = budget_ann - infodata%budget_ltann = budget_ltann - infodata%budget_ltend = budget_ltend - infodata%histaux_a2x = histaux_a2x - infodata%histaux_a2x1hri = histaux_a2x1hri - infodata%histaux_a2x1hr = histaux_a2x1hr - infodata%histaux_a2x3hr = histaux_a2x3hr - infodata%histaux_a2x3hrp = histaux_a2x3hrp - infodata%histaux_a2x24hr = histaux_a2x24hr - infodata%histaux_l2x1yrg = histaux_l2x1yrg - infodata%histaux_l2x = histaux_l2x - infodata%histaux_r2x = histaux_r2x - infodata%histaux_double_precision = histaux_double_precision - infodata%histavg_atm = histavg_atm - infodata%histavg_lnd = histavg_lnd - infodata%histavg_ocn = histavg_ocn - infodata%histavg_ice = histavg_ice - infodata%histavg_rof = histavg_rof - infodata%histavg_glc = histavg_glc - infodata%histavg_wav = histavg_wav - infodata%histavg_iac = histavg_iac - infodata%histavg_xao = histavg_xao - infodata%drv_threading = drv_threading - infodata%eps_frac = eps_frac - infodata%eps_amask = eps_amask - infodata%eps_agrid = eps_agrid - infodata%eps_aarea = eps_aarea - infodata%eps_omask = eps_omask - infodata%eps_ogrid = eps_ogrid - infodata%eps_oarea = eps_oarea - infodata%reprosum_use_ddpdd = reprosum_use_ddpdd - infodata%reprosum_allow_infnan = reprosum_allow_infnan - infodata%reprosum_diffmax = reprosum_diffmax - infodata%reprosum_recompute = reprosum_recompute - infodata%mct_usealltoall = mct_usealltoall - infodata%mct_usevector = mct_usevector - - infodata%info_debug = info_debug - infodata%bfbflag = bfbflag - infodata%esmf_map_flag = esmf_map_flag - - infodata%atm_present = .true. - infodata%lnd_present = .true. - infodata%rof_present = .true. - infodata%rofice_present = .true. - infodata%flood_present = .true. - infodata%ocn_present = .true. - infodata%ice_present = .true. - infodata%glc_present = .true. - infodata%wav_present = .true. - infodata%glclnd_present = .true. - infodata%glcocn_present = .true. - infodata%glcice_present = .true. - infodata%esp_present = .true. - infodata%iac_present = .true. - - infodata%atm_prognostic = .false. - infodata%lnd_prognostic = .false. - infodata%rof_prognostic = .false. - infodata%ocn_prognostic = .false. - infodata%ocnrof_prognostic = .false. - infodata%ocn_c2_glcshelf = .false. - infodata%ice_prognostic = .false. - infodata%glc_prognostic = .false. - ! It's safest to assume glc_coupled_fluxes = .true. if it's not set elsewhere, - ! because this is needed for conservation in some cases. Note that it is ignored - ! if glc_present is .false., so it's okay to just start out assuming it's .true. - ! in all cases. - infodata%glc_coupled_fluxes = .true. - infodata%wav_prognostic = .false. - infodata%iac_prognostic = .false. - infodata%iceberg_prognostic = .false. - infodata%esp_prognostic = .false. - infodata%dead_comps = .false. - - infodata%atm_nx = 0 - infodata%atm_ny = 0 - infodata%lnd_nx = 0 - infodata%lnd_ny = 0 - infodata%rof_nx = 0 - infodata%rof_ny = 0 - infodata%ice_nx = 0 - infodata%ice_ny = 0 - infodata%ocn_nx = 0 - infodata%ocn_ny = 0 - infodata%glc_nx = 0 - infodata%glc_ny = 0 - infodata%wav_nx = 0 - infodata%wav_ny = 0 - infodata%iac_nx = 0 - infodata%iac_ny = 0 - - infodata%nextsw_cday = -1.0_SHR_KIND_R8 - infodata%precip_fact = 1.0_SHR_KIND_R8 - infodata%atm_phase = 1 - infodata%lnd_phase = 1 - infodata%ocn_phase = 1 - infodata%ice_phase = 1 - infodata%glc_phase = 1 - infodata%rof_phase = 1 - infodata%wav_phase = 1 - infodata%iac_phase = 1 - infodata%atm_aero = .false. - infodata%glc_g2lupdate = .false. - infodata%glc_valid_input = .true. - - infodata%max_cplstep_time = max_cplstep_time - infodata%model_doi_url = model_doi_url - !--------------------------------------------------------------- - ! check orbital mode, reset unused parameters, validate settings - !--------------------------------------------------------------- - if (trim(orb_mode) == trim(seq_infodata_orb_fixed_year)) then - orb_obliq = SHR_ORB_UNDEF_REAL - orb_eccen = SHR_ORB_UNDEF_REAL - orb_mvelp = SHR_ORB_UNDEF_REAL - if (orb_iyear == SHR_ORB_UNDEF_INT) then - write(logunit,*) trim(subname),' ERROR: invalid settings orb_mode =',trim(orb_mode) - write(logunit,*) trim(subname),' ERROR: fixed_year settings = ',orb_iyear - call shr_sys_abort(subname//' ERROR: invalid settings for orb_mode '//trim(orb_mode)) - endif - elseif (trim(orb_mode) == trim(seq_infodata_orb_variable_year)) then - orb_obliq = SHR_ORB_UNDEF_REAL - orb_eccen = SHR_ORB_UNDEF_REAL - orb_mvelp = SHR_ORB_UNDEF_REAL - if (orb_iyear == SHR_ORB_UNDEF_INT .or. & - orb_iyear_align == SHR_ORB_UNDEF_INT) then - write(logunit,*) trim(subname),' ERROR: invalid settings orb_mode =',trim(orb_mode) - write(logunit,*) trim(subname),' ERROR: variable_year settings = ',orb_iyear,orb_iyear_align - call shr_sys_abort(subname//' ERROR: invalid settings for orb_mode '//trim(orb_mode)) - endif - elseif (trim(orb_mode) == trim(seq_infodata_orb_fixed_parameters)) then - !-- force orb_iyear to undef to make sure shr_orb_params works properly - orb_iyear = SHR_ORB_UNDEF_INT - orb_iyear_align = SHR_ORB_UNDEF_INT - if (orb_eccen == SHR_ORB_UNDEF_REAL .or. & - orb_obliq == SHR_ORB_UNDEF_REAL .or. & - orb_mvelp == SHR_ORB_UNDEF_REAL) then - write(logunit,*) trim(subname),' ERROR: invalid settings orb_mode =',trim(orb_mode) - write(logunit,*) trim(subname),' ERROR: orb_eccen = ',orb_eccen - write(logunit,*) trim(subname),' ERROR: orb_obliq = ',orb_obliq - write(logunit,*) trim(subname),' ERROR: orb_mvelp = ',orb_mvelp - call shr_sys_abort(subname//' ERROR: invalid settings for orb_mode '//trim(orb_mode)) - endif - else - call shr_sys_abort(subname//' ERROR: invalid orb_mode '//trim(orb_mode)) - endif - - call shr_orb_params(orb_iyear, orb_eccen, orb_obliq, orb_mvelp, & - orb_obliqr, orb_lambm0, orb_mvelpp, .true.) - - infodata%orb_mode = orb_mode - infodata%orb_iyear = orb_iyear - infodata%orb_iyear_align = orb_iyear_align - infodata%orb_eccen = orb_eccen - infodata%orb_obliq = orb_obliq - infodata%orb_mvelp = orb_mvelp - infodata%orb_obliqr = orb_obliqr - infodata%orb_lambm0 = orb_lambm0 - infodata%orb_mvelpp = orb_mvelpp - - !--- Derive a few things --- - infodata%rest_case_name = ' ' - infodata%read_restart = .false. - if (trim(start_type) == trim(seq_infodata_start_type_cont) .or. & - trim(start_type) == trim(seq_infodata_start_type_brnch)) then - infodata%read_restart = .true. - endif - - end if - - !----------------------------------------------------- - ! Read Restart (seq_io_read must be called on all pes) - !----------------------------------------------------- - call shr_mpi_bcast(infodata%read_restart,mpicom) - if (infodata%read_restart) then - !--- read rpointer if restart_file is set to sp_str --- - if (seq_comm_iamroot(ID)) then - if (trim(infodata%restart_file) == trim(sp_str)) then - unitn = shr_file_getUnit() - if (loglevel > 0) write(logunit,"(3A)") subname," read rpointer file ", & - trim(infodata%restart_pfile) - open(unitn, file=infodata%restart_pfile, form='FORMATTED', status='old',iostat=ierr) - if (ierr < 0) then - call shr_sys_abort( subname//':: rpointer file open returns an'// & - ' error condition' ) - end if - read(unitn,'(a)', iostat=ierr) infodata%restart_file - if (ierr < 0) then - call shr_sys_abort( subname//':: rpointer file read returns an'// & - ' error condition' ) - end if - close(unitn) - call shr_file_freeUnit( unitn ) - write(logunit,"(3A)") subname,' restart file from rpointer= ', & - trim(infodata%restart_file) - endif - endif - call shr_mpi_bcast(infodata%restart_file,mpicom) - !--- NOTE: use CPLID here because seq_io is only value on CPLID - if (seq_comm_iamin(CPLID)) then - call seq_io_read(infodata%restart_file,pioid,infodata%nextsw_cday ,'seq_infodata_nextsw_cday') - call seq_io_read(infodata%restart_file,pioid,infodata%precip_fact ,'seq_infodata_precip_fact') - call seq_io_read(infodata%restart_file,pioid,infodata%rest_case_name,'seq_infodata_case_name') - endif - !--- Send from CPLID ROOT to GLOBALID ROOT, use bcast as surrogate - call shr_mpi_bcast(infodata%nextsw_cday,mpicom,pebcast=seq_comm_gloroot(CPLID)) - call shr_mpi_bcast(infodata%precip_fact,mpicom,pebcast=seq_comm_gloroot(CPLID)) - call shr_mpi_bcast(infodata%rest_case_name,mpicom,pebcast=seq_comm_gloroot(CPLID)) - endif - - if (seq_comm_iamroot(ID)) then - if (infodata%aqua_planet) then - infodata%atm_present = .true. - infodata%lnd_present = .false. - infodata%rof_present = .false. - infodata%rofice_present = .false. - infodata%flood_present = .false. - infodata%ice_present = .false. - infodata%ocn_present = .true. - infodata%glc_present = .false. - infodata%wav_present = .false. - infodata%iac_present = .false. - infodata%glclnd_present = .false. - infodata%glcocn_present = .false. - infodata%glcice_present = .false. - infodata%esp_present = .false. - end if - - if ( infodata%aqua_planet ) then - infodata%aqua_planet_sst = 1 - infodata%perpetual = .true. - infodata%perpetual_ymd = aqua_perpetual_ymd - endif - - ! --- Error check the input values ------ - call seq_infodata_Check( infodata ) - - end if - - call seq_infodata_bcast(infodata,mpicom) - - END SUBROUTINE seq_infodata_Init - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_infodata_Init2 -- initialize infodata structures - ! - ! !DESCRIPTION: - ! - ! Initialize infodata items that depend on the time manager setup - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_infodata_Init2(infodata, ID) - - ! !USES: - - use seq_timemgr_mod, only : seq_timemgr_pause_active - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type), intent(INOUT) :: infodata ! infodata object - integer(SHR_KIND_IN), intent(IN) :: ID ! seq_comm ID - !EOP - - !----- local ----- - integer :: mpicom ! MPI communicator - - call seq_comm_setptrs(ID, mpicom=mpicom) - call seq_infodata_bcast(infodata, mpicom) - - END SUBROUTINE seq_infodata_Init2 - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_infodata_GetData_explicit -- Get values from infodata object - ! - ! !DESCRIPTION: - ! - ! Get values out of the infodata object. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_desc, timing_dir, & - model_version, username, hostname, rest_case_name, tchkpt_dir, & - start_type, restart_pfile, restart_file, perpetual, perpetual_ymd, & - aqua_planet,aqua_planet_sst, brnch_retain_casename, & - single_column, scmlat,scmlon,logFilePostFix, outPathRoot, & - atm_present, atm_prognostic, & - lnd_present, lnd_prognostic, & - rof_present, rof_prognostic, & - ocn_present, ocn_prognostic, ocnrof_prognostic, ocn_c2_glcshelf, & - ice_present, ice_prognostic, & - glc_present, glc_prognostic, & - iac_present, iac_prognostic, & - glc_coupled_fluxes, & - flood_present, wav_present, wav_prognostic, rofice_present, & - glclnd_present, glcocn_present, glcice_present, iceberg_prognostic,& - esp_present, esp_prognostic, & - bfbflag, lnd_gnam, cpl_decomp, cpl_seq_option, & - ice_gnam, rof_gnam, glc_gnam, wav_gnam, iac_gnam, & - atm_gnam, ocn_gnam, info_debug, dead_comps, read_restart, & - shr_map_dopole, vect_map, aoflux_grid, flux_epbalfact, & - nextsw_cday, precip_fact, flux_epbal, flux_albav, & - glc_g2lupdate, atm_aero, run_barriers, esmf_map_flag, & - do_budgets, do_histinit, drv_threading, flux_diurnal, & - ocn_surface_flux_scheme, & - coldair_outbreak_mod, & - flux_convergence, flux_max_iteration, & - budget_inst, budget_daily, budget_month, wall_time_limit, & - budget_ann, budget_ltann, budget_ltend , force_stop_at, & - histaux_a2x , histaux_a2x1hri, histaux_a2x1hr, & - histaux_a2x3hr, histaux_a2x3hrp , histaux_l2x1yrg, & - histaux_a2x24hr, histaux_l2x , histaux_r2x , histaux_double_precision, & - orb_obliq, histavg_atm, histavg_lnd, histavg_ocn, histavg_ice, & - histavg_rof, histavg_glc, histavg_wav, histavg_xao, histavg_iac, & - orb_iyear, orb_iyear_align, orb_mode, orb_mvelp, & - orb_eccen, orb_obliqr, orb_lambm0, orb_mvelpp, wv_sat_scheme, & - wv_sat_transition_start, wv_sat_use_tables, wv_sat_table_spacing, & - tfreeze_option, glc_renormalize_smb, & - glc_phase, rof_phase, atm_phase, lnd_phase, ocn_phase, ice_phase, & - wav_phase, iac_phase, esp_phase, wav_nx, wav_ny, atm_nx, atm_ny, & - lnd_nx, lnd_ny, rof_nx, rof_ny, ice_nx, ice_ny, ocn_nx, ocn_ny, & - iac_nx, iac_ny, glc_nx, glc_ny, eps_frac, eps_amask, & - eps_agrid, eps_aarea, eps_omask, eps_ogrid, eps_oarea, & - reprosum_use_ddpdd, reprosum_allow_infnan, & - reprosum_diffmax, reprosum_recompute, & - mct_usealltoall, mct_usevector, max_cplstep_time, model_doi_url, & - glc_valid_input) - - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type), intent(IN) :: infodata ! Input CCSM structure - character(len=*), optional, intent(OUT) :: cime_model ! CIME model (e3sm or cesm) - character(len=*), optional, intent(OUT) :: start_type ! Start type - character(len=*), optional, intent(OUT) :: case_name ! Short case identification - character(len=*), optional, intent(OUT) :: case_desc ! Long case description - character(len=*), optional, intent(OUT) :: model_version ! Model version - character(len=*), optional, intent(OUT) :: username ! Username - character(len=*), optional, intent(OUT) :: hostname ! Hostname - character(len=*), optional, intent(OUT) :: rest_case_name ! restart casename - character(len=*), optional, intent(OUT) :: timing_dir ! timing dir name - character(len=*), optional, intent(OUT) :: tchkpt_dir ! timing checkpoint dir name - logical, optional, intent(OUT) :: aqua_planet ! aqua_planet mode - integer(SHR_KIND_IN), optional, intent(OUT) :: aqua_planet_sst ! aqua_planet sst_type - logical, optional, intent(OUT) :: run_barriers ! barrier component run calls - logical, optional, intent(OUT) :: brnch_retain_casename - logical, optional, intent(OUT) :: read_restart ! read restart flag - character(len=*), optional, intent(OUT) :: restart_pfile ! Restart pointer file - character(len=*), optional, intent(OUT) :: restart_file ! Restart file pathname - logical, optional, intent(OUT) :: single_column - real (SHR_KIND_R8), optional, intent(OUT) :: scmlat - real (SHR_KIND_R8), optional, intent(OUT) :: scmlon - character(len=*), optional, intent(OUT) :: logFilePostFix ! output log file postfix - character(len=*), optional, intent(OUT) :: outPathRoot ! output file root - logical, optional, intent(OUT) :: perpetual ! If this is perpetual - integer, optional, intent(OUT) :: perpetual_ymd ! If perpetual, date - character(len=*), optional, intent(OUT) :: orb_mode ! orbital mode - integer, optional, intent(OUT) :: orb_iyear ! orbital year - integer, optional, intent(OUT) :: orb_iyear_align ! orbital year model year align - real(SHR_KIND_R8), optional, intent(OUT) :: orb_eccen ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(OUT) :: orb_obliqr ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(OUT) :: orb_obliq ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(OUT) :: orb_lambm0 ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(OUT) :: orb_mvelpp ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(OUT) :: orb_mvelp ! See shr_orb_mod - character(len=*), optional, intent(OUT) :: wv_sat_scheme ! Water vapor saturation pressure scheme - real(SHR_KIND_R8), optional, intent(OUT) :: wv_sat_transition_start ! Saturation transition range - logical, optional, intent(OUT) :: wv_sat_use_tables ! Saturation pressure lookup tables - real(SHR_KIND_R8), optional, intent(OUT) :: wv_sat_table_spacing ! Saturation pressure table resolution - character(len=*), optional, intent(OUT) :: tfreeze_option ! Freezing point of salt water - character(len=*), optional, intent(OUT) :: flux_epbal ! selects E,P,R adjustment technique - logical, optional, intent(OUT) :: flux_albav ! T => no diurnal cycle in ocn albedos - logical, optional, intent(OUT) :: flux_diurnal ! T => diurnal cycle in atm/ocn flux - integer, optional, intent(OUT) :: ocn_surface_flux_scheme ! 0: E3SMv1 1: COARE 2: UA - real(SHR_KIND_R8), optional, intent(out) :: flux_convergence ! atmocn flux calc convergence value - logical, optional, intent(out) :: coldair_outbreak_mod ! (Mahrt & Sun 1995, MWR) - integer, optional, intent(OUT) :: flux_max_iteration ! max number of iterations of atmocn flux loop - - character(len=*), optional, intent(OUT) :: glc_renormalize_smb ! Whether to renormalize smb sent from lnd -> glc - real(SHR_KIND_R8), optional, intent(OUT) :: wall_time_limit ! force stop wall time (hours) - character(len=*), optional, intent(OUT) :: force_stop_at ! force stop at next (month, day, etc) - character(len=*), optional, intent(OUT) :: atm_gnam ! atm grid - character(len=*), optional, intent(OUT) :: lnd_gnam ! lnd grid - character(len=*), optional, intent(OUT) :: ocn_gnam ! ocn grid - character(len=*), optional, intent(OUT) :: ice_gnam ! ice grid - character(len=*), optional, intent(OUT) :: rof_gnam ! rof grid - character(len=*), optional, intent(OUT) :: glc_gnam ! glc grid - character(len=*), optional, intent(OUT) :: wav_gnam ! wav grid - character(len=*), optional, intent(OUT) :: iac_gnam ! iac grid - logical, optional, intent(OUT) :: shr_map_dopole ! pole corrections in shr_map_mod - character(len=*), optional, intent(OUT) :: vect_map ! vector mapping option - character(len=*), optional, intent(OUT) :: aoflux_grid ! grid for atm ocn flux calc - integer, optional, intent(OUT) :: cpl_decomp ! coupler decomp - character(len=*), optional, intent(OUT) :: cpl_seq_option ! coupler sequencing option - logical, optional, intent(OUT) :: do_budgets ! heat/water budgets - logical, optional, intent(OUT) :: do_histinit ! initial history file - integer, optional, intent(OUT) :: budget_inst ! inst budget - integer, optional, intent(OUT) :: budget_daily ! daily budget - integer, optional, intent(OUT) :: budget_month ! month budget - integer, optional, intent(OUT) :: budget_ann ! ann budget - integer, optional, intent(OUT) :: budget_ltann ! ltann budget - integer, optional, intent(OUT) :: budget_ltend ! ltend budget - logical, optional, intent(OUT) :: histaux_a2x - logical, optional, intent(OUT) :: histaux_a2x1hri - logical, optional, intent(OUT) :: histaux_a2x1hr - logical, optional, intent(OUT) :: histaux_a2x3hr - logical, optional, intent(OUT) :: histaux_a2x3hrp - logical, optional, intent(OUT) :: histaux_a2x24hr - logical, optional, intent(OUT) :: histaux_l2x1yrg - logical, optional, intent(OUT) :: histaux_l2x - logical, optional, intent(OUT) :: histaux_r2x - logical, optional, intent(OUT) :: histaux_double_precision - logical, optional, intent(OUT) :: histavg_atm - logical, optional, intent(OUT) :: histavg_lnd - logical, optional, intent(OUT) :: histavg_ocn - logical, optional, intent(OUT) :: histavg_ice - logical, optional, intent(OUT) :: histavg_rof - logical, optional, intent(OUT) :: histavg_glc - logical, optional, intent(OUT) :: histavg_wav - logical, optional, intent(OUT) :: histavg_iac - logical, optional, intent(OUT) :: histavg_xao - logical, optional, intent(OUT) :: drv_threading ! driver threading control flag - real(SHR_KIND_R8), optional, intent(OUT) :: eps_frac ! fraction error tolerance - real(SHR_KIND_R8), optional, intent(OUT) :: eps_amask ! atm mask error tolerance - real(SHR_KIND_R8), optional, intent(OUT) :: eps_agrid ! atm grid error tolerance - real(SHR_KIND_R8), optional, intent(OUT) :: eps_aarea ! atm area error tolerance - real(SHR_KIND_R8), optional, intent(OUT) :: eps_omask ! ocn mask error tolerance - real(SHR_KIND_R8), optional, intent(OUT) :: eps_ogrid ! ocn grid error tolerance - real(SHR_KIND_R8), optional, intent(OUT) :: eps_oarea ! ocn area error tolerance - logical, optional, intent(OUT) :: reprosum_use_ddpdd ! use ddpdd algorithm - logical, optional, intent(OUT) :: reprosum_allow_infnan ! allow INF and NaN summands - real(SHR_KIND_R8), optional, intent(OUT) :: reprosum_diffmax ! maximum difference tolerance - logical, optional, intent(OUT) :: reprosum_recompute ! recompute if tolerance exceeded - logical, optional, intent(OUT) :: mct_usealltoall ! flag for mct alltoall - logical, optional, intent(OUT) :: mct_usevector ! flag for mct vector - - integer(SHR_KIND_IN), optional, intent(OUT) :: info_debug - logical, optional, intent(OUT) :: bfbflag - logical, optional, intent(OUT) :: esmf_map_flag - logical, optional, intent(OUT) :: dead_comps ! do we have dead models - - logical, optional, intent(OUT) :: atm_present ! provide data - logical, optional, intent(OUT) :: atm_prognostic ! need data - logical, optional, intent(OUT) :: lnd_present - logical, optional, intent(OUT) :: lnd_prognostic - logical, optional, intent(OUT) :: rof_present - logical, optional, intent(OUT) :: rofice_present - logical, optional, intent(OUT) :: rof_prognostic - logical, optional, intent(OUT) :: flood_present - logical, optional, intent(OUT) :: ocn_present - logical, optional, intent(OUT) :: ocn_prognostic - logical, optional, intent(OUT) :: ocnrof_prognostic - logical, optional, intent(OUT) :: ocn_c2_glcshelf - logical, optional, intent(OUT) :: ice_present - logical, optional, intent(OUT) :: ice_prognostic - logical, optional, intent(OUT) :: iceberg_prognostic - logical, optional, intent(OUT) :: glc_present - logical, optional, intent(OUT) :: glclnd_present - logical, optional, intent(OUT) :: glcocn_present - logical, optional, intent(OUT) :: glcice_present - logical, optional, intent(OUT) :: glc_prognostic - logical, optional, intent(OUT) :: glc_coupled_fluxes - logical, optional, intent(OUT) :: wav_present - logical, optional, intent(OUT) :: wav_prognostic - logical, optional, intent(OUT) :: iac_present - logical, optional, intent(OUT) :: iac_prognostic - logical, optional, intent(OUT) :: esp_present - logical, optional, intent(OUT) :: esp_prognostic - integer(SHR_KIND_IN), optional, intent(OUT) :: atm_nx ! nx,ny 2d grid size global - integer(SHR_KIND_IN), optional, intent(OUT) :: atm_ny ! nx,ny 2d grid size global - integer(SHR_KIND_IN), optional, intent(OUT) :: lnd_nx - integer(SHR_KIND_IN), optional, intent(OUT) :: lnd_ny - integer(SHR_KIND_IN), optional, intent(OUT) :: rof_nx - integer(SHR_KIND_IN), optional, intent(OUT) :: rof_ny - integer(SHR_KIND_IN), optional, intent(OUT) :: ice_nx - integer(SHR_KIND_IN), optional, intent(OUT) :: ice_ny - integer(SHR_KIND_IN), optional, intent(OUT) :: ocn_nx - integer(SHR_KIND_IN), optional, intent(OUT) :: ocn_ny - integer(SHR_KIND_IN), optional, intent(OUT) :: glc_nx - integer(SHR_KIND_IN), optional, intent(OUT) :: glc_ny - integer(SHR_KIND_IN), optional, intent(OUT) :: wav_nx - integer(SHR_KIND_IN), optional, intent(OUT) :: wav_ny - integer(SHR_KIND_IN), optional, intent(OUT) :: iac_nx - integer(SHR_KIND_IN), optional, intent(OUT) :: iac_ny - - real(SHR_KIND_R8), optional, intent(OUT) :: nextsw_cday ! calendar of next atm shortwave - real(SHR_KIND_R8), optional, intent(OUT) :: precip_fact ! precip factor - real(SHR_KIND_R8), optional, intent(OUT) :: flux_epbalfact ! adjusted precip factor - integer(SHR_KIND_IN), optional, intent(OUT) :: atm_phase ! atm phase - integer(SHR_KIND_IN), optional, intent(OUT) :: lnd_phase ! lnd phase - integer(SHR_KIND_IN), optional, intent(OUT) :: ice_phase ! ice phase - integer(SHR_KIND_IN), optional, intent(OUT) :: ocn_phase ! ocn phase - integer(SHR_KIND_IN), optional, intent(OUT) :: glc_phase ! glc phase - integer(SHR_KIND_IN), optional, intent(OUT) :: rof_phase ! rof phase - integer(SHR_KIND_IN), optional, intent(OUT) :: wav_phase ! wav phase - integer(SHR_KIND_IN), optional, intent(OUT) :: iac_phase ! wav phase - integer(SHR_KIND_IN), optional, intent(OUT) :: esp_phase ! wav phase - logical, optional, intent(OUT) :: atm_aero ! atmosphere aerosols - logical, optional, intent(OUT) :: glc_g2lupdate ! update glc2lnd fields in lnd model - real(shr_kind_r8), optional, intent(out) :: max_cplstep_time - character(SHR_KIND_CL), optional, intent(OUT) :: model_doi_url - logical, optional, intent(OUT) :: glc_valid_input - - !----- local ----- - character(len=*), parameter :: subname = '(seq_infodata_GetData_explicit) ' - - !------------------------------------------------------------------------------- - - if ( present(cime_model) ) cime_model = infodata%cime_model - if ( present(start_type) ) start_type = infodata%start_type - if ( present(case_name) ) case_name = infodata%case_name - if ( present(case_desc) ) case_desc = infodata%case_desc - if ( present(model_version) ) model_version = infodata%model_version - if ( present(username) ) username = infodata%username - if ( present(hostname) ) hostname = infodata%hostname - if ( present(rest_case_name) ) rest_case_name = infodata%rest_case_name - if ( present(timing_dir) ) timing_dir = infodata%timing_dir - if ( present(tchkpt_dir) ) tchkpt_dir = infodata%tchkpt_dir - if ( present(aqua_planet) ) aqua_planet = infodata%aqua_planet - if ( present(aqua_planet_sst)) aqua_planet_sst= infodata%aqua_planet_sst - if ( present(run_barriers) ) run_barriers = infodata%run_barriers - if ( present(brnch_retain_casename) ) & - brnch_retain_casename = infodata%brnch_retain_casename - if ( present(read_restart) ) read_restart = infodata%read_restart - if ( present(restart_pfile) ) restart_pfile = infodata%restart_pfile - if ( present(restart_file) ) restart_file = infodata%restart_file - if ( present(single_column) ) single_column = infodata%single_column - if ( present(scmlat) ) scmlat = infodata%scmlat - if ( present(scmlon) ) scmlon = infodata%scmlon - if ( present(logFilePostFix) ) logFilePostFix = infodata%logFilePostFix - if ( present(outPathRoot) ) outPathRoot = infodata%outPathRoot - if ( present(perpetual) ) perpetual = infodata%perpetual - if ( present(perpetual_ymd) ) perpetual_ymd = infodata%perpetual_ymd - if ( present(orb_iyear) ) orb_iyear = infodata%orb_iyear - if ( present(orb_iyear_align)) orb_iyear_align= infodata%orb_iyear_align - if ( present(orb_mode) ) orb_mode = infodata%orb_mode - if ( present(orb_eccen) ) orb_eccen = infodata%orb_eccen - if ( present(orb_obliqr) ) orb_obliqr = infodata%orb_obliqr - if ( present(orb_obliq) ) orb_obliq = infodata%orb_obliq - if ( present(orb_lambm0) ) orb_lambm0 = infodata%orb_lambm0 - if ( present(orb_mvelpp) ) orb_mvelpp = infodata%orb_mvelpp - if ( present(orb_mvelp) ) orb_mvelp = infodata%orb_mvelp - if ( present(wv_sat_scheme) ) wv_sat_scheme = infodata%wv_sat_scheme - if ( present(wv_sat_transition_start)) & - wv_sat_transition_start = infodata%wv_sat_transition_start - if ( present(wv_sat_use_tables)) wv_sat_use_tables = infodata%wv_sat_use_tables - if ( present(wv_sat_table_spacing)) wv_sat_table_spacing = infodata%wv_sat_table_spacing - if ( present(tfreeze_option) ) tfreeze_option = infodata%tfreeze_option - if ( present(flux_epbal) ) flux_epbal = infodata%flux_epbal - if ( present(flux_albav) ) flux_albav = infodata%flux_albav - if ( present(flux_diurnal) ) flux_diurnal = infodata%flux_diurnal - if ( present(ocn_surface_flux_scheme) ) ocn_surface_flux_scheme = & - infodata%ocn_surface_flux_scheme - if ( present(coldair_outbreak_mod)) coldair_outbreak_mod = infodata%coldair_outbreak_mod - if ( present(flux_convergence)) flux_convergence = infodata%flux_convergence - if ( present(flux_max_iteration)) flux_max_iteration = infodata%flux_max_iteration - if ( present(glc_renormalize_smb)) glc_renormalize_smb = infodata%glc_renormalize_smb - if ( present(wall_time_limit)) wall_time_limit= infodata%wall_time_limit - if ( present(force_stop_at) ) force_stop_at = infodata%force_stop_at - if ( present(atm_gnam) ) atm_gnam = infodata%atm_gnam - if ( present(lnd_gnam) ) lnd_gnam = infodata%lnd_gnam - if ( present(ocn_gnam) ) ocn_gnam = infodata%ocn_gnam - if ( present(ice_gnam) ) ice_gnam = infodata%ice_gnam - if ( present(rof_gnam) ) rof_gnam = infodata%rof_gnam - if ( present(glc_gnam) ) glc_gnam = infodata%glc_gnam - if ( present(wav_gnam) ) wav_gnam = infodata%wav_gnam - if ( present(iac_gnam) ) iac_gnam = infodata%iac_gnam - if ( present(shr_map_dopole) ) shr_map_dopole = infodata%shr_map_dopole - if ( present(vect_map) ) vect_map = infodata%vect_map - if ( present(aoflux_grid) ) aoflux_grid = infodata%aoflux_grid - if ( present(cpl_decomp) ) cpl_decomp = infodata%cpl_decomp - if ( present(cpl_seq_option) ) cpl_seq_option = infodata%cpl_seq_option - if ( present(do_budgets) ) do_budgets = infodata%do_budgets - if ( present(do_histinit) ) do_histinit = infodata%do_histinit - if ( present(budget_inst) ) budget_inst = infodata%budget_inst - if ( present(budget_daily) ) budget_daily = infodata%budget_daily - if ( present(budget_month) ) budget_month = infodata%budget_month - if ( present(budget_ann) ) budget_ann = infodata%budget_ann - if ( present(budget_ltann) ) budget_ltann = infodata%budget_ltann - if ( present(budget_ltend) ) budget_ltend = infodata%budget_ltend - if ( present(histaux_a2x) ) histaux_a2x = infodata%histaux_a2x - if ( present(histaux_a2x1hri)) histaux_a2x1hri= infodata%histaux_a2x1hri - if ( present(histaux_a2x1hr) ) histaux_a2x1hr = infodata%histaux_a2x1hr - if ( present(histaux_a2x3hr) ) histaux_a2x3hr = infodata%histaux_a2x3hr - if ( present(histaux_a2x3hrp)) histaux_a2x3hrp= infodata%histaux_a2x3hrp - if ( present(histaux_a2x24hr)) histaux_a2x24hr= infodata%histaux_a2x24hr - if ( present(histaux_l2x1yrg)) histaux_l2x1yrg= infodata%histaux_l2x1yrg - if ( present(histaux_l2x) ) histaux_l2x = infodata%histaux_l2x - if ( present(histaux_r2x) ) histaux_r2x = infodata%histaux_r2x - if ( present(histaux_double_precision)) histaux_double_precision = infodata%histaux_double_precision - if ( present(histavg_atm) ) histavg_atm = infodata%histavg_atm - if ( present(histavg_lnd) ) histavg_lnd = infodata%histavg_lnd - if ( present(histavg_ocn) ) histavg_ocn = infodata%histavg_ocn - if ( present(histavg_ice) ) histavg_ice = infodata%histavg_ice - if ( present(histavg_rof) ) histavg_rof = infodata%histavg_rof - if ( present(histavg_glc) ) histavg_glc = infodata%histavg_glc - if ( present(histavg_wav) ) histavg_wav = infodata%histavg_wav - if ( present(histavg_iac) ) histavg_iac = infodata%histavg_iac - if ( present(histavg_xao) ) histavg_xao = infodata%histavg_xao - if ( present(drv_threading) ) drv_threading = infodata%drv_threading - if ( present(eps_frac) ) eps_frac = infodata%eps_frac - if ( present(eps_amask) ) eps_amask = infodata%eps_amask - if ( present(eps_agrid) ) eps_agrid = infodata%eps_agrid - if ( present(eps_aarea) ) eps_aarea = infodata%eps_aarea - if ( present(eps_omask) ) eps_omask = infodata%eps_omask - if ( present(eps_ogrid) ) eps_ogrid = infodata%eps_ogrid - if ( present(eps_oarea) ) eps_oarea = infodata%eps_oarea - if ( present(reprosum_use_ddpdd)) reprosum_use_ddpdd = infodata%reprosum_use_ddpdd - if ( present(reprosum_allow_infnan)) reprosum_allow_infnan = infodata%reprosum_allow_infnan - if ( present(reprosum_diffmax) ) reprosum_diffmax = infodata%reprosum_diffmax - if ( present(reprosum_recompute)) reprosum_recompute = infodata%reprosum_recompute - if ( present(mct_usealltoall)) mct_usealltoall = infodata%mct_usealltoall - if ( present(mct_usevector) ) mct_usevector = infodata%mct_usevector - - if ( present(info_debug) ) info_debug = infodata%info_debug - if ( present(bfbflag) ) bfbflag = infodata%bfbflag - if ( present(esmf_map_flag) ) esmf_map_flag = infodata%esmf_map_flag - if ( present(dead_comps) ) dead_comps = infodata%dead_comps - - if ( present(atm_present) ) atm_present = infodata%atm_present - if ( present(atm_prognostic) ) atm_prognostic = infodata%atm_prognostic - if ( present(lnd_present) ) lnd_present = infodata%lnd_present - if ( present(lnd_prognostic) ) lnd_prognostic = infodata%lnd_prognostic - if ( present(rof_present) ) rof_present = infodata%rof_present - if ( present(rofice_present) ) rofice_present = infodata%rofice_present - if ( present(rof_prognostic) ) rof_prognostic = infodata%rof_prognostic - if ( present(flood_present) ) flood_present = infodata%flood_present - if ( present(ocn_present) ) ocn_present = infodata%ocn_present - if ( present(ocn_prognostic) ) ocn_prognostic = infodata%ocn_prognostic - if ( present(ocnrof_prognostic) ) ocnrof_prognostic = infodata%ocnrof_prognostic - if ( present(ocn_c2_glcshelf) ) ocn_c2_glcshelf = infodata%ocn_c2_glcshelf - if ( present(ice_present) ) ice_present = infodata%ice_present - if ( present(ice_prognostic) ) ice_prognostic = infodata%ice_prognostic - if ( present(iceberg_prognostic)) iceberg_prognostic = infodata%iceberg_prognostic - if ( present(glc_present) ) glc_present = infodata%glc_present - if ( present(glclnd_present) ) glclnd_present = infodata%glclnd_present - if ( present(glcocn_present) ) glcocn_present = infodata%glcocn_present - if ( present(glcice_present) ) glcice_present = infodata%glcice_present - if ( present(glc_prognostic) ) glc_prognostic = infodata%glc_prognostic - if ( present(glc_coupled_fluxes)) glc_coupled_fluxes = infodata%glc_coupled_fluxes - if ( present(wav_present) ) wav_present = infodata%wav_present - if ( present(wav_prognostic) ) wav_prognostic = infodata%wav_prognostic - if ( present(esp_present) ) esp_present = infodata%esp_present - if ( present(esp_prognostic) ) esp_prognostic = infodata%esp_prognostic - if ( present(iac_present) ) iac_present = infodata%iac_present - if ( present(iac_prognostic) ) iac_prognostic = infodata%iac_prognostic - if ( present(atm_nx) ) atm_nx = infodata%atm_nx - if ( present(atm_ny) ) atm_ny = infodata%atm_ny - if ( present(lnd_nx) ) lnd_nx = infodata%lnd_nx - if ( present(lnd_ny) ) lnd_ny = infodata%lnd_ny - if ( present(rof_nx) ) rof_nx = infodata%rof_nx - if ( present(rof_ny) ) rof_ny = infodata%rof_ny - if ( present(ice_nx) ) ice_nx = infodata%ice_nx - if ( present(ice_ny) ) ice_ny = infodata%ice_ny - if ( present(ocn_nx) ) ocn_nx = infodata%ocn_nx - if ( present(ocn_ny) ) ocn_ny = infodata%ocn_ny - if ( present(glc_nx) ) glc_nx = infodata%glc_nx - if ( present(glc_ny) ) glc_ny = infodata%glc_ny - if ( present(wav_nx) ) wav_nx = infodata%wav_nx - if ( present(wav_ny) ) wav_ny = infodata%wav_ny - if ( present(iac_nx) ) iac_nx = infodata%iac_nx - if ( present(iac_ny) ) iac_ny = infodata%iac_ny - - if ( present(nextsw_cday) ) nextsw_cday = infodata%nextsw_cday - if ( present(precip_fact) ) precip_fact = infodata%precip_fact - if ( present(flux_epbalfact) ) then - flux_epbalfact = 1.0_SHR_KIND_R8 - if (trim(infodata%flux_epbal) == 'ocn') then - flux_epbalfact = infodata%precip_fact - end if - if (flux_epbalfact <= 0.0_SHR_KIND_R8) then - if (loglevel > 0) write(logunit,'(2a,e16.6)') & - trim(subname),' WARNING: factor from ocn = ',flux_epbalfact - if (loglevel > 0) write(logunit,'(2a)') & - trim(subname),' WARNING: resetting flux_epbalfact to 1.0' - flux_epbalfact = 1.0_SHR_KIND_R8 - end if - endif - if ( present(atm_phase) ) atm_phase = infodata%atm_phase - if ( present(lnd_phase) ) lnd_phase = infodata%lnd_phase - if ( present(ice_phase) ) ice_phase = infodata%ice_phase - if ( present(ocn_phase) ) ocn_phase = infodata%ocn_phase - if ( present(glc_phase) ) glc_phase = infodata%glc_phase - if ( present(rof_phase) ) rof_phase = infodata%rof_phase - if ( present(wav_phase) ) wav_phase = infodata%wav_phase - if ( present(esp_phase) ) esp_phase = infodata%esp_phase - if ( present(iac_phase) ) iac_phase = infodata%iac_phase - if ( present(atm_aero) ) atm_aero = infodata%atm_aero - if ( present(glc_g2lupdate) ) glc_g2lupdate = infodata%glc_g2lupdate - if ( present(max_cplstep_time) ) max_cplstep_time = infodata%max_cplstep_time - if ( present(model_doi_url) ) model_doi_url = infodata%model_doi_url - - if ( present(glc_valid_input)) glc_valid_input = infodata%glc_valid_input - - END SUBROUTINE seq_infodata_GetData_explicit - -#ifndef CPRPGI - !=============================================================================== - ! !IROUTINE: seq_infodata_GetData_bytype -- Get values from infodata object - ! - ! !DESCRIPTION: - ! - ! Get values out of the infodata object. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_infodata_GetData_bytype( component_firstletter, infodata, & - comp_present, comp_prognostic, comp_gnam, histavg_comp, & - comp_phase, comp_nx, comp_ny) - - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - character(len=1), intent(IN) :: component_firstletter - type(seq_infodata_type), intent(IN) :: infodata ! Input CCSM structure - logical, optional, intent(OUT) :: comp_present ! provide data - logical, optional, intent(OUT) :: comp_prognostic ! need data - character(len=*), optional, intent(OUT) :: comp_gnam ! comp grid - integer(SHR_KIND_IN), optional, intent(OUT) :: comp_nx ! nx,ny 2d grid size global - integer(SHR_KIND_IN), optional, intent(OUT) :: comp_ny ! nx,ny 2d grid size global - integer(SHR_KIND_IN), optional, intent(OUT) :: comp_phase - logical, optional, intent(OUT) :: histavg_comp - - !----- local ----- - character(len=*), parameter :: subname = '(seq_infodata_GetData_bytype) ' - - !------------------------------------------------------------------------------- - - if (component_firstletter == 'a') then - call seq_infodata_GetData(infodata, atm_present=comp_present, & - atm_prognostic=comp_prognostic, atm_gnam=comp_gnam, & - atm_phase=comp_phase, atm_nx=comp_nx, atm_ny=comp_ny, & - histavg_atm=histavg_comp) - else if (component_firstletter == 'l') then - call seq_infodata_GetData(infodata, lnd_present=comp_present, & - lnd_prognostic=comp_prognostic, lnd_gnam=comp_gnam, & - lnd_phase=comp_phase, lnd_nx=comp_nx, lnd_ny=comp_ny, & - histavg_lnd=histavg_comp) - else if (component_firstletter == 'i') then - call seq_infodata_GetData(infodata, ice_present=comp_present, & - ice_prognostic=comp_prognostic, ice_gnam=comp_gnam, & - ice_phase=comp_phase, ice_nx=comp_nx, ice_ny=comp_ny, & - histavg_ice=histavg_comp) - else if (component_firstletter == 'o') then - call seq_infodata_GetData(infodata, ocn_present=comp_present, & - ocn_prognostic=comp_prognostic, ocn_gnam=comp_gnam, & - ocn_phase=comp_phase, ocn_nx=comp_nx, ocn_ny=comp_ny, & - histavg_ocn=histavg_comp) - else if (component_firstletter == 'r') then - call seq_infodata_GetData(infodata, rof_present=comp_present, & - rof_prognostic=comp_prognostic, rof_gnam=comp_gnam, & - rof_phase=comp_phase, rof_nx=comp_nx, rof_ny=comp_ny, & - histavg_rof=histavg_comp) - else if (component_firstletter == 'g') then - call seq_infodata_GetData(infodata, glc_present=comp_present, & - glc_prognostic=comp_prognostic, glc_gnam=comp_gnam, & - glc_phase=comp_phase, glc_nx=comp_nx, glc_ny=comp_ny, & - histavg_glc=histavg_comp) - else if (component_firstletter == 'w') then - call seq_infodata_GetData(infodata, wav_present=comp_present, & - wav_prognostic=comp_prognostic, wav_gnam=comp_gnam, & - wav_phase=comp_phase, wav_nx=comp_nx, wav_ny=comp_ny, & - histavg_wav=histavg_comp) - else if (component_firstletter == 'z') then - call seq_infodata_GetData(infodata, iac_present=comp_present, & - iac_prognostic=comp_prognostic, iac_gnam=comp_gnam, & - iac_phase=comp_phase, iac_nx=comp_nx, iac_ny=comp_ny, & - histavg_iac=histavg_comp) - else if (component_firstletter == 'e') then - if (present(comp_gnam)) then - comp_gnam = '' - if ((loglevel > 1) .and. seq_comm_iamroot(1)) then - write(logunit,*) trim(subname),' Note: ESP type has no gnam property' - end if - end if - if (present(comp_nx)) then - comp_nx = 1 - if ((loglevel > 1) .and. seq_comm_iamroot(1)) then - write(logunit,*) trim(subname),' Note: ESP type has no nx property' - end if - end if - if (present(comp_ny)) then - comp_ny = 1 - if ((loglevel > 1) .and. seq_comm_iamroot(1)) then - write(logunit,*) trim(subname),' Note: ESP type has no ny property' - end if - end if - if (present(histavg_comp)) then - histavg_comp = .false. - if ((loglevel > 1) .and. seq_comm_iamroot(1)) then - write(logunit,*) trim(subname),' Note: ESP type has no histavg property' - end if - end if - - call seq_infodata_GetData(infodata, esp_present=comp_present, & - esp_prognostic=comp_prognostic, esp_phase=comp_phase) - else - call shr_sys_abort( subname//": unknown component-type first letter,'"//component_firstletter//"', aborting") - end if - END SUBROUTINE seq_infodata_GetData_bytype -#endif - ! ^ ifndef CPRPGI - - !=============================================================================== - ! !IROUTINE: seq_infodata_PutData_explicit -- Put values into infodata object - ! - ! !DESCRIPTION: - ! - ! Put values into the infodata object. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_desc, timing_dir, & - model_version, username, hostname, rest_case_name, tchkpt_dir, & - start_type, restart_pfile, restart_file, perpetual, perpetual_ymd, & - aqua_planet,aqua_planet_sst, brnch_retain_casename, & - single_column, scmlat,scmlon,logFilePostFix, outPathRoot, & - atm_present, atm_prognostic, & - lnd_present, lnd_prognostic, & - rof_present, rof_prognostic, & - ocn_present, ocn_prognostic, ocnrof_prognostic, ocn_c2_glcshelf, & - ice_present, ice_prognostic, & - glc_present, glc_prognostic, & - glc_coupled_fluxes, & - flood_present, wav_present, wav_prognostic, rofice_present, & - glclnd_present, glcocn_present, glcice_present, iceberg_prognostic,& - esp_present, esp_prognostic, & - iac_present, iac_prognostic, & - bfbflag, lnd_gnam, cpl_decomp, cpl_seq_option, & - ice_gnam, rof_gnam, glc_gnam, wav_gnam, iac_gnam, & - atm_gnam, ocn_gnam, info_debug, dead_comps, read_restart, & - shr_map_dopole, vect_map, aoflux_grid, run_barriers, & - nextsw_cday, precip_fact, flux_epbal, flux_albav, & - glc_g2lupdate, atm_aero, esmf_map_flag, wall_time_limit, & - do_budgets, do_histinit, drv_threading, flux_diurnal, & - ocn_surface_flux_scheme, & - coldair_outbreak_mod, & - flux_convergence, flux_max_iteration, & - budget_inst, budget_daily, budget_month, force_stop_at, & - budget_ann, budget_ltann, budget_ltend , & - histaux_a2x , histaux_a2x1hri, histaux_a2x1hr, & - histaux_a2x3hr, histaux_a2x3hrp , histaux_l2x1yrg, & - histaux_a2x24hr, histaux_l2x , histaux_r2x , histaux_double_precision, & - orb_obliq, histavg_atm, histavg_lnd, histavg_ocn, histavg_ice, & - histavg_rof, histavg_glc, histavg_wav, histavg_xao, histavg_iac, & - orb_iyear, orb_iyear_align, orb_mode, orb_mvelp, & - orb_eccen, orb_obliqr, orb_lambm0, orb_mvelpp, wv_sat_scheme, & - wv_sat_transition_start, wv_sat_use_tables, wv_sat_table_spacing, & - tfreeze_option, glc_renormalize_smb, & - glc_phase, rof_phase, atm_phase, lnd_phase, ocn_phase, ice_phase, & - wav_phase, iac_phase, esp_phase, wav_nx, wav_ny, atm_nx, atm_ny, & - lnd_nx, lnd_ny, rof_nx, rof_ny, ice_nx, ice_ny, ocn_nx, ocn_ny, & - iac_nx, iac_ny, glc_nx, glc_ny, eps_frac, eps_amask, & - eps_agrid, eps_aarea, eps_omask, eps_ogrid, eps_oarea, & - reprosum_use_ddpdd, reprosum_allow_infnan, & - reprosum_diffmax, reprosum_recompute, & - mct_usealltoall, mct_usevector, glc_valid_input) - - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type), intent(INOUT) :: infodata ! Input CCSM structure - character(len=*), optional, intent(IN) :: cime_model ! CIME model (e3sm or cesm) - character(len=*), optional, intent(IN) :: start_type ! Start type - character(len=*), optional, intent(IN) :: case_name ! Short case identification - character(len=*), optional, intent(IN) :: case_desc ! Long case description - character(len=*), optional, intent(IN) :: model_version ! Model version - character(len=*), optional, intent(IN) :: username ! Username - character(len=*), optional, intent(IN) :: hostname ! Hostname - character(len=*), optional, intent(IN) :: rest_case_name ! restart casename - character(len=*), optional, intent(IN) :: timing_dir ! timing dir name - character(len=*), optional, intent(IN) :: tchkpt_dir ! timing checkpoint dir name - logical, optional, intent(IN) :: aqua_planet ! aqua_planet mode - integer(SHR_KIND_IN), optional, intent(IN) :: aqua_planet_sst ! aqua_planet sst type - logical, optional, intent(IN) :: run_barriers ! barrier component run calls - logical, optional, intent(IN) :: brnch_retain_casename - logical, optional, intent(IN) :: read_restart ! read restart flag - character(len=*), optional, intent(IN) :: restart_pfile ! Restart pointer file - character(len=*), optional, intent(IN) :: restart_file ! Restart file pathname - logical, optional, intent(IN) :: single_column - real (SHR_KIND_R8), optional, intent(IN) :: scmlat - real (SHR_KIND_R8), optional, intent(IN) :: scmlon - character(len=*), optional, intent(IN) :: logFilePostFix ! output log file postfix - character(len=*), optional, intent(IN) :: outPathRoot ! output file root - logical, optional, intent(IN) :: perpetual ! If this is perpetual - integer, optional, intent(IN) :: perpetual_ymd ! If perpetual, date - character(len=*), optional, intent(IN) :: orb_mode ! orbital mode - integer, optional, intent(IN) :: orb_iyear ! orbital year - integer, optional, intent(IN) :: orb_iyear_align ! orbital year model year align - real(SHR_KIND_R8), optional, intent(IN) :: orb_eccen ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(IN) :: orb_obliqr ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(IN) :: orb_obliq ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(IN) :: orb_lambm0 ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(IN) :: orb_mvelpp ! See shr_orb_mod - real(SHR_KIND_R8), optional, intent(IN) :: orb_mvelp ! See shr_orb_mod - character(len=*), optional, intent(IN) :: wv_sat_scheme ! Water vapor saturation pressure scheme - real(SHR_KIND_R8), optional, intent(IN) :: wv_sat_transition_start ! Saturation transition range - logical, optional, intent(IN) :: wv_sat_use_tables ! Saturation pressure lookup tables - real(SHR_KIND_R8), optional, intent(IN) :: wv_sat_table_spacing ! Saturation pressure table resolution - character(len=*), optional, intent(IN) :: tfreeze_option ! Freezing point of salt water - character(len=*), optional, intent(IN) :: flux_epbal ! selects E,P,R adjustment technique - logical, optional, intent(IN) :: flux_albav ! T => no diurnal cycle in ocn albedos - logical, optional, intent(IN) :: flux_diurnal ! T => diurnal cycle in atm/ocn flux - integer, optional, intent(IN) :: ocn_surface_flux_scheme ! 0: E3SMv1 1: COARE 2:UA - logical, optional, intent(in) :: coldair_outbreak_mod - real(SHR_KIND_R8), optional, intent(IN) :: flux_convergence ! atmocn flux calc convergence value - integer, optional, intent(IN) :: flux_max_iteration ! max number of iterations of atmocn flux loop - character(len=*), optional, intent(IN) :: glc_renormalize_smb ! Whether to renormalize smb sent from lnd -> glc - real(SHR_KIND_R8), optional, intent(IN) :: wall_time_limit ! force stop wall time (hours) - character(len=*), optional, intent(IN) :: force_stop_at ! force a stop at next (month, day, etc) - character(len=*), optional, intent(IN) :: atm_gnam ! atm grid - character(len=*), optional, intent(IN) :: lnd_gnam ! lnd grid - character(len=*), optional, intent(IN) :: ocn_gnam ! ocn grid - character(len=*), optional, intent(IN) :: ice_gnam ! ice grid - character(len=*), optional, intent(IN) :: rof_gnam ! rof grid - character(len=*), optional, intent(IN) :: glc_gnam ! glc grid - character(len=*), optional, intent(IN) :: wav_gnam ! wav grid - character(len=*), optional, intent(IN) :: iac_gnam ! iac grid - logical, optional, intent(IN) :: shr_map_dopole ! pole corrections in shr_map_mod - character(len=*), optional, intent(IN) :: vect_map ! vector mapping option - character(len=*), optional, intent(IN) :: aoflux_grid ! grid for atm ocn flux calc - integer, optional, intent(IN) :: cpl_decomp ! coupler decomp - character(len=*), optional, intent(IN) :: cpl_seq_option ! coupler sequencing option - logical, optional, intent(IN) :: do_budgets ! heat/water budgets - logical, optional, intent(IN) :: do_histinit ! initial history file - integer, optional, intent(IN) :: budget_inst ! inst budget - integer, optional, intent(IN) :: budget_daily ! daily budget - integer, optional, intent(IN) :: budget_month ! month budget - integer, optional, intent(IN) :: budget_ann ! ann budget - integer, optional, intent(IN) :: budget_ltann ! ltann budget - integer, optional, intent(IN) :: budget_ltend ! ltend budget - logical, optional, intent(IN) :: histaux_a2x - logical, optional, intent(IN) :: histaux_a2x1hri - logical, optional, intent(IN) :: histaux_a2x1hr - logical, optional, intent(IN) :: histaux_a2x3hr - logical, optional, intent(IN) :: histaux_a2x3hrp - logical, optional, intent(IN) :: histaux_a2x24hr - logical, optional, intent(IN) :: histaux_l2x1yrg - logical, optional, intent(IN) :: histaux_double_precision - logical, optional, intent(IN) :: histaux_l2x - logical, optional, intent(IN) :: histaux_r2x - logical, optional, intent(IN) :: histavg_atm - logical, optional, intent(IN) :: histavg_lnd - logical, optional, intent(IN) :: histavg_ocn - logical, optional, intent(IN) :: histavg_ice - logical, optional, intent(IN) :: histavg_rof - logical, optional, intent(IN) :: histavg_glc - logical, optional, intent(IN) :: histavg_wav - logical, optional, intent(IN) :: histavg_xao - logical, optional, intent(IN) :: histavg_iac - logical, optional, intent(IN) :: drv_threading ! driver threading control flag - real(SHR_KIND_R8), optional, intent(IN) :: eps_frac ! fraction error tolerance - real(SHR_KIND_R8), optional, intent(IN) :: eps_amask ! atm mask error tolerance - real(SHR_KIND_R8), optional, intent(IN) :: eps_agrid ! atm grid error tolerance - real(SHR_KIND_R8), optional, intent(IN) :: eps_aarea ! atm area error tolerance - real(SHR_KIND_R8), optional, intent(IN) :: eps_omask ! ocn mask error tolerance - real(SHR_KIND_R8), optional, intent(IN) :: eps_ogrid ! ocn grid error tolerance - real(SHR_KIND_R8), optional, intent(IN) :: eps_oarea ! ocn area error tolerance - logical, optional, intent(IN) :: reprosum_use_ddpdd ! use ddpdd algorithm - logical, optional, intent(IN) :: reprosum_allow_infnan ! allow INF and NaN summands - real(SHR_KIND_R8), optional, intent(IN) :: reprosum_diffmax ! maximum difference tolerance - logical, optional, intent(IN) :: reprosum_recompute ! recompute if tolerance exceeded - logical, optional, intent(IN) :: mct_usealltoall ! flag for mct alltoall - logical, optional, intent(IN) :: mct_usevector ! flag for mct vector - - integer(SHR_KIND_IN), optional, intent(IN) :: info_debug - logical, optional, intent(IN) :: bfbflag - logical, optional, intent(IN) :: esmf_map_flag - logical, optional, intent(IN) :: dead_comps ! do we have dead models - - logical, optional, intent(IN) :: atm_present ! provide data - logical, optional, intent(IN) :: atm_prognostic ! need data - logical, optional, intent(IN) :: lnd_present - logical, optional, intent(IN) :: lnd_prognostic - logical, optional, intent(IN) :: rof_present - logical, optional, intent(IN) :: rofice_present - logical, optional, intent(IN) :: rof_prognostic - logical, optional, intent(IN) :: flood_present - logical, optional, intent(IN) :: ocn_present - logical, optional, intent(IN) :: ocn_prognostic - logical, optional, intent(IN) :: ocnrof_prognostic - logical, optional, intent(IN) :: ocn_c2_glcshelf - logical, optional, intent(IN) :: ice_present - logical, optional, intent(IN) :: ice_prognostic - logical, optional, intent(IN) :: iceberg_prognostic - logical, optional, intent(IN) :: glc_present - logical, optional, intent(IN) :: glclnd_present - logical, optional, intent(IN) :: glcocn_present - logical, optional, intent(IN) :: glcice_present - logical, optional, intent(IN) :: glc_prognostic - logical, optional, intent(IN) :: glc_coupled_fluxes - logical, optional, intent(IN) :: wav_present - logical, optional, intent(IN) :: wav_prognostic - logical, optional, intent(IN) :: esp_present - logical, optional, intent(IN) :: esp_prognostic - logical, optional, intent(IN) :: iac_present - logical, optional, intent(IN) :: iac_prognostic - integer(SHR_KIND_IN), optional, intent(IN) :: atm_nx ! nx,ny 2d grid size global - integer(SHR_KIND_IN), optional, intent(IN) :: atm_ny ! nx,ny 2d grid size global - integer(SHR_KIND_IN), optional, intent(IN) :: lnd_nx - integer(SHR_KIND_IN), optional, intent(IN) :: lnd_ny - integer(SHR_KIND_IN), optional, intent(IN) :: rof_nx - integer(SHR_KIND_IN), optional, intent(IN) :: rof_ny - integer(SHR_KIND_IN), optional, intent(IN) :: ice_nx - integer(SHR_KIND_IN), optional, intent(IN) :: ice_ny - integer(SHR_KIND_IN), optional, intent(IN) :: ocn_nx - integer(SHR_KIND_IN), optional, intent(IN) :: ocn_ny - integer(SHR_KIND_IN), optional, intent(IN) :: glc_nx - integer(SHR_KIND_IN), optional, intent(IN) :: glc_ny - integer(SHR_KIND_IN), optional, intent(IN) :: wav_nx - integer(SHR_KIND_IN), optional, intent(IN) :: wav_ny - integer(SHR_KIND_IN), optional, intent(IN) :: iac_nx - integer(SHR_KIND_IN), optional, intent(IN) :: iac_ny - - real(SHR_KIND_R8), optional, intent(IN) :: nextsw_cday ! calendar of next atm shortwave - real(SHR_KIND_R8), optional, intent(IN) :: precip_fact ! precip factor - integer(SHR_KIND_IN), optional, intent(IN) :: atm_phase ! atm phase - integer(SHR_KIND_IN), optional, intent(IN) :: lnd_phase ! lnd phase - integer(SHR_KIND_IN), optional, intent(IN) :: ice_phase ! ice phase - integer(SHR_KIND_IN), optional, intent(IN) :: ocn_phase ! ocn phase - integer(SHR_KIND_IN), optional, intent(IN) :: glc_phase ! glc phase - integer(SHR_KIND_IN), optional, intent(IN) :: rof_phase ! rof phase - integer(SHR_KIND_IN), optional, intent(IN) :: wav_phase ! wav phase - integer(SHR_KIND_IN), optional, intent(IN) :: iac_phase ! iac phase - integer(SHR_KIND_IN), optional, intent(IN) :: esp_phase ! esp phase - logical, optional, intent(IN) :: atm_aero ! atm aerosols - logical, optional, intent(IN) :: glc_g2lupdate ! update glc2lnd fields in lnd model - logical, optional, intent(IN) :: glc_valid_input - - !EOP - - !----- local ----- - character(len=*), parameter :: subname = '(seq_infodata_PutData_explicit) ' - - !------------------------------------------------------------------------------- - - if ( present(cime_model) ) infodata%cime_model = cime_model - if ( present(start_type) ) infodata%start_type = start_type - if ( present(case_name) ) infodata%case_name = case_name - if ( present(case_desc) ) infodata%case_desc = case_desc - if ( present(model_version) ) infodata%model_version = model_version - if ( present(username) ) infodata%username = username - if ( present(hostname) ) infodata%hostname = hostname - if ( present(rest_case_name) ) infodata%rest_case_name = rest_case_name - if ( present(timing_dir) ) infodata%timing_dir = timing_dir - if ( present(tchkpt_dir) ) infodata%tchkpt_dir = tchkpt_dir - if ( present(aqua_planet) ) infodata%aqua_planet = aqua_planet - if ( present(aqua_planet_sst)) infodata%aqua_planet_sst= aqua_planet_sst - if ( present(run_barriers) ) infodata%run_barriers = run_barriers - if ( present(brnch_retain_casename)) infodata%brnch_retain_casename = brnch_retain_casename - if ( present(read_restart) ) infodata%read_restart = read_restart - if ( present(restart_pfile) ) infodata%restart_pfile = restart_pfile - if ( present(restart_file) ) infodata%restart_file = restart_file - if ( present(single_column) ) infodata%single_column = single_column - if ( present(scmlat) ) infodata%scmlat = scmlat - if ( present(scmlon) ) infodata%scmlon = scmlon - if ( present(logFilePostFix) ) infodata%logFilePostFix = logFilePostFix - if ( present(outPathRoot) ) infodata%outPathRoot = outPathRoot - if ( present(perpetual) ) infodata%perpetual = perpetual - if ( present(perpetual_ymd) ) infodata%perpetual_ymd = perpetual_ymd - if ( present(orb_iyear) ) infodata%orb_iyear = orb_iyear - if ( present(orb_iyear_align)) infodata%orb_iyear_align= orb_iyear_align - if ( present(orb_mode) ) infodata%orb_mode = orb_mode - if ( present(orb_eccen) ) infodata%orb_eccen = orb_eccen - if ( present(orb_obliqr) ) infodata%orb_obliqr = orb_obliqr - if ( present(orb_obliq) ) infodata%orb_obliq = orb_obliq - if ( present(orb_lambm0) ) infodata%orb_lambm0 = orb_lambm0 - if ( present(orb_mvelpp) ) infodata%orb_mvelpp = orb_mvelpp - if ( present(orb_mvelp) ) infodata%orb_mvelp = orb_mvelp - if ( present(wv_sat_scheme) ) infodata%wv_sat_scheme = wv_sat_scheme - if ( present(wv_sat_transition_start)) & - infodata%wv_sat_transition_start = wv_sat_transition_start - if ( present(wv_sat_use_tables)) infodata%wv_sat_use_tables = wv_sat_use_tables - if ( present(wv_sat_table_spacing)) infodata%wv_sat_table_spacing = wv_sat_table_spacing - if ( present(tfreeze_option) ) infodata%tfreeze_option = tfreeze_option - if ( present(flux_epbal) ) infodata%flux_epbal = flux_epbal - if ( present(flux_albav) ) infodata%flux_albav = flux_albav - if ( present(flux_diurnal) ) infodata%flux_diurnal = flux_diurnal - if ( present(ocn_surface_flux_scheme) ) infodata%ocn_surface_flux_scheme = & - ocn_surface_flux_scheme - if ( present(coldair_outbreak_mod) ) infodata%coldair_outbreak_mod = coldair_outbreak_mod - if ( present(flux_convergence)) infodata%flux_convergence = flux_convergence - if ( present(flux_max_iteration)) infodata%flux_max_iteration = flux_max_iteration - if ( present(glc_renormalize_smb)) infodata%glc_renormalize_smb = glc_renormalize_smb - if ( present(wall_time_limit)) infodata%wall_time_limit= wall_time_limit - if ( present(force_stop_at) ) infodata%force_stop_at = force_stop_at - if ( present(atm_gnam) ) infodata%atm_gnam = atm_gnam - if ( present(lnd_gnam) ) infodata%lnd_gnam = lnd_gnam - if ( present(ocn_gnam) ) infodata%ocn_gnam = ocn_gnam - if ( present(ice_gnam) ) infodata%ice_gnam = ice_gnam - if ( present(rof_gnam) ) infodata%rof_gnam = rof_gnam - if ( present(glc_gnam) ) infodata%glc_gnam = glc_gnam - if ( present(wav_gnam) ) infodata%wav_gnam = wav_gnam - if ( present(iac_gnam) ) infodata%iac_gnam = iac_gnam - if ( present(shr_map_dopole) ) infodata%shr_map_dopole = shr_map_dopole - if ( present(vect_map) ) infodata%vect_map = vect_map - if ( present(aoflux_grid) ) infodata%aoflux_grid = aoflux_grid - if ( present(cpl_decomp) ) infodata%cpl_decomp = cpl_decomp - if ( present(cpl_seq_option) ) infodata%cpl_seq_option = cpl_seq_option - if ( present(do_budgets) ) infodata%do_budgets = do_budgets - if ( present(do_histinit) ) infodata%do_histinit = do_histinit - if ( present(budget_inst) ) infodata%budget_inst = budget_inst - if ( present(budget_daily) ) infodata%budget_daily = budget_daily - if ( present(budget_month) ) infodata%budget_month = budget_month - if ( present(budget_ann) ) infodata%budget_ann = budget_ann - if ( present(budget_ltann) ) infodata%budget_ltann = budget_ltann - if ( present(budget_ltend) ) infodata%budget_ltend = budget_ltend - if ( present(histaux_a2x) ) infodata%histaux_a2x = histaux_a2x - if ( present(histaux_a2x1hri)) infodata%histaux_a2x1hri= histaux_a2x1hri - if ( present(histaux_a2x1hr) ) infodata%histaux_a2x1hr = histaux_a2x1hr - if ( present(histaux_a2x3hr) ) infodata%histaux_a2x3hr = histaux_a2x3hr - if ( present(histaux_a2x3hrp)) infodata%histaux_a2x3hrp= histaux_a2x3hrp - if ( present(histaux_a2x24hr)) infodata%histaux_a2x24hr= histaux_a2x24hr - if ( present(histaux_l2x1yrg)) infodata%histaux_l2x1yrg= histaux_l2x1yrg - if ( present(histaux_l2x) ) infodata%histaux_l2x = histaux_l2x - if ( present(histaux_r2x) ) infodata%histaux_r2x = histaux_r2x - if ( present(histaux_double_precision)) infodata%histaux_double_precision = histaux_double_precision - if ( present(histavg_atm) ) infodata%histavg_atm = histavg_atm - if ( present(histavg_lnd) ) infodata%histavg_lnd = histavg_lnd - if ( present(histavg_ocn) ) infodata%histavg_ocn = histavg_ocn - if ( present(histavg_ice) ) infodata%histavg_ice = histavg_ice - if ( present(histavg_rof) ) infodata%histavg_rof = histavg_rof - if ( present(histavg_glc) ) infodata%histavg_glc = histavg_glc - if ( present(histavg_wav) ) infodata%histavg_wav = histavg_wav - if ( present(histavg_iac) ) infodata%histavg_iac = histavg_iac - if ( present(histavg_xao) ) infodata%histavg_xao = histavg_xao - if ( present(drv_threading) ) infodata%drv_threading = drv_threading - if ( present(eps_frac) ) infodata%eps_frac = eps_frac - if ( present(eps_amask) ) infodata%eps_amask = eps_amask - if ( present(eps_agrid) ) infodata%eps_agrid = eps_agrid - if ( present(eps_aarea) ) infodata%eps_aarea = eps_aarea - if ( present(eps_omask) ) infodata%eps_omask = eps_omask - if ( present(eps_ogrid) ) infodata%eps_ogrid = eps_ogrid - if ( present(eps_oarea) ) infodata%eps_oarea = eps_oarea - if ( present(reprosum_use_ddpdd)) infodata%reprosum_use_ddpdd = reprosum_use_ddpdd - if ( present(reprosum_allow_infnan)) infodata%reprosum_allow_infnan = reprosum_allow_infnan - if ( present(reprosum_diffmax) ) infodata%reprosum_diffmax = reprosum_diffmax - if ( present(reprosum_recompute)) infodata%reprosum_recompute = reprosum_recompute - if ( present(mct_usealltoall)) infodata%mct_usealltoall = mct_usealltoall - if ( present(mct_usevector) ) infodata%mct_usevector = mct_usevector - - if ( present(info_debug) ) infodata%info_debug = info_debug - if ( present(bfbflag) ) infodata%bfbflag = bfbflag - if ( present(esmf_map_flag) ) infodata%esmf_map_flag = esmf_map_flag - if ( present(dead_comps) ) infodata%dead_comps = dead_comps - - if ( present(atm_present) ) infodata%atm_present = atm_present - if ( present(atm_prognostic) ) infodata%atm_prognostic = atm_prognostic - if ( present(lnd_present) ) infodata%lnd_present = lnd_present - if ( present(lnd_prognostic) ) infodata%lnd_prognostic = lnd_prognostic - if ( present(rof_present) ) infodata%rof_present = rof_present - if ( present(rofice_present) ) infodata%rofice_present = rofice_present - if ( present(rof_prognostic) ) infodata%rof_prognostic = rof_prognostic - if ( present(flood_present) ) infodata%flood_present = flood_present - if ( present(ocn_present) ) infodata%ocn_present = ocn_present - if ( present(ocn_prognostic) ) infodata%ocn_prognostic = ocn_prognostic - if ( present(ocnrof_prognostic)) infodata%ocnrof_prognostic = ocnrof_prognostic - if ( present(ocn_c2_glcshelf)) infodata%ocn_c2_glcshelf = ocn_c2_glcshelf - if ( present(ice_present) ) infodata%ice_present = ice_present - if ( present(ice_prognostic) ) infodata%ice_prognostic = ice_prognostic - if ( present(iceberg_prognostic)) infodata%iceberg_prognostic = iceberg_prognostic - if ( present(glc_present) ) infodata%glc_present = glc_present - if ( present(glclnd_present) ) infodata%glclnd_present = glclnd_present - if ( present(glcocn_present) ) infodata%glcocn_present = glcocn_present - if ( present(glcice_present) ) infodata%glcice_present = glcice_present - if ( present(glc_prognostic) ) infodata%glc_prognostic = glc_prognostic - if ( present(glc_coupled_fluxes)) infodata%glc_coupled_fluxes = glc_coupled_fluxes - if ( present(wav_present) ) infodata%wav_present = wav_present - if ( present(wav_prognostic) ) infodata%wav_prognostic = wav_prognostic - if ( present(iac_present) ) infodata%iac_present = iac_present - if ( present(iac_prognostic) ) infodata%iac_prognostic = iac_prognostic - if ( present(esp_present) ) infodata%esp_present = esp_present - if ( present(esp_prognostic) ) infodata%esp_prognostic = esp_prognostic - if ( present(atm_nx) ) infodata%atm_nx = atm_nx - if ( present(atm_ny) ) infodata%atm_ny = atm_ny - if ( present(lnd_nx) ) infodata%lnd_nx = lnd_nx - if ( present(lnd_ny) ) infodata%lnd_ny = lnd_ny - if ( present(rof_nx) ) infodata%rof_nx = rof_nx - if ( present(rof_ny) ) infodata%rof_ny = rof_ny - if ( present(ice_nx) ) infodata%ice_nx = ice_nx - if ( present(ice_ny) ) infodata%ice_ny = ice_ny - if ( present(ocn_nx) ) infodata%ocn_nx = ocn_nx - if ( present(ocn_ny) ) infodata%ocn_ny = ocn_ny - if ( present(glc_nx) ) infodata%glc_nx = glc_nx - if ( present(glc_ny) ) infodata%glc_ny = glc_ny - if ( present(wav_nx) ) infodata%wav_nx = wav_nx - if ( present(wav_ny) ) infodata%wav_ny = wav_ny - if ( present(iac_nx) ) infodata%iac_nx = iac_nx - if ( present(iac_ny) ) infodata%iac_ny = iac_ny - - if ( present(nextsw_cday) ) infodata%nextsw_cday = nextsw_cday - if ( present(precip_fact) ) infodata%precip_fact = precip_fact - if ( present(atm_phase) ) infodata%atm_phase = atm_phase - if ( present(lnd_phase) ) infodata%lnd_phase = lnd_phase - if ( present(ice_phase) ) infodata%ice_phase = ice_phase - if ( present(ocn_phase) ) infodata%ocn_phase = ocn_phase - if ( present(glc_phase) ) infodata%glc_phase = glc_phase - if ( present(rof_phase) ) infodata%rof_phase = rof_phase - if ( present(wav_phase) ) infodata%wav_phase = wav_phase - if ( present(iac_phase) ) infodata%iac_phase = iac_phase - if ( present(esp_phase) ) infodata%esp_phase = esp_phase - if ( present(atm_aero) ) infodata%atm_aero = atm_aero - if ( present(glc_g2lupdate) ) infodata%glc_g2lupdate = glc_g2lupdate - if ( present(glc_valid_input) ) infodata%glc_valid_input = glc_valid_input - - END SUBROUTINE seq_infodata_PutData_explicit - -#ifndef CPRPGI - !=============================================================================== - ! !IROUTINE: seq_infodata_PutData_bytype -- Put values into infodata object - ! - ! !DESCRIPTION: - ! - ! Put values into the infodata object. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_infodata_PutData_bytype( component_firstletter, infodata, & - comp_present, comp_prognostic, comp_gnam, & - histavg_comp, comp_phase, comp_nx, comp_ny) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - character(len=1), intent(IN) :: component_firstletter - type(seq_infodata_type), intent(INOUT) :: infodata ! Input CCSM structure - logical, optional, intent(IN) :: comp_present ! provide data - logical, optional, intent(IN) :: comp_prognostic ! need data - character(len=*), optional, intent(IN) :: comp_gnam ! comp grid - integer(SHR_KIND_IN), optional, intent(IN) :: comp_nx ! nx,ny 2d grid size global - integer(SHR_KIND_IN), optional, intent(IN) :: comp_ny ! nx,ny 2d grid size global - integer(SHR_KIND_IN), optional, intent(IN) :: comp_phase - logical, optional, intent(IN) :: histavg_comp - - !EOP - - !----- local ----- - character(len=*), parameter :: subname = '(seq_infodata_PutData_bytype) ' - - !------------------------------------------------------------------------------- - - if (component_firstletter == 'a') then - call seq_infodata_PutData(infodata, atm_present=comp_present, & - atm_prognostic=comp_prognostic, atm_gnam=comp_gnam, & - atm_phase=comp_phase, atm_nx=comp_nx, atm_ny=comp_ny, & - histavg_atm=histavg_comp) - else if (component_firstletter == 'l') then - call seq_infodata_PutData(infodata, lnd_present=comp_present, & - lnd_prognostic=comp_prognostic, lnd_gnam=comp_gnam, & - lnd_phase=comp_phase, lnd_nx=comp_nx, lnd_ny=comp_ny, & - histavg_lnd=histavg_comp) - else if (component_firstletter == 'i') then - call seq_infodata_PutData(infodata, ice_present=comp_present, & - ice_prognostic=comp_prognostic, ice_gnam=comp_gnam, & - ice_phase=comp_phase, ice_nx=comp_nx, ice_ny=comp_ny, & - histavg_ice=histavg_comp) - else if (component_firstletter == 'o') then - call seq_infodata_PutData(infodata, ocn_present=comp_present, & - ocn_prognostic=comp_prognostic, ocn_gnam=comp_gnam, & - ocn_phase=comp_phase, ocn_nx=comp_nx, ocn_ny=comp_ny, & - histavg_ocn=histavg_comp) - else if (component_firstletter == 'r') then - call seq_infodata_PutData(infodata, rof_present=comp_present, & - rof_prognostic=comp_prognostic, rof_gnam=comp_gnam, & - rof_phase=comp_phase, rof_nx=comp_nx, rof_ny=comp_ny, & - histavg_rof=histavg_comp) - else if (component_firstletter == 'g') then - call seq_infodata_PutData(infodata, glc_present=comp_present, & - glc_prognostic=comp_prognostic, glc_gnam=comp_gnam, & - glc_phase=comp_phase, glc_nx=comp_nx, glc_ny=comp_ny, & - histavg_glc=histavg_comp) - else if (component_firstletter == 'w') then - call seq_infodata_PutData(infodata, wav_present=comp_present, & - wav_prognostic=comp_prognostic, wav_gnam=comp_gnam, & - wav_phase=comp_phase, wav_nx=comp_nx, wav_ny=comp_ny, & - histavg_wav=histavg_comp) - else if (component_firstletter == 'z') then - call seq_infodata_PutData(infodata, iac_present=comp_present, & - iac_prognostic=comp_prognostic, iac_gnam=comp_gnam, & - iac_phase=comp_phase, iac_nx=comp_nx, iac_ny=comp_ny, & - histavg_iac=histavg_comp) - else if (component_firstletter == 'e') then - if ((loglevel > 1) .and. seq_comm_iamroot(1)) then - if (present(comp_gnam)) then - write(logunit,*) trim(subname),' Note: ESP type has no gnam property' - end if - if (present(comp_nx)) then - write(logunit,*) trim(subname),' Note: ESP type has no nx property' - end if - if (present(comp_ny)) then - write(logunit,*) trim(subname),' Note: ESP type has no ny property' - end if - if (present(histavg_comp)) then - write(logunit,*) trim(subname),' Note: ESP type has no histavg property' - end if - - end if - - call seq_infodata_PutData(infodata, esp_present=comp_present, & - esp_prognostic=comp_prognostic, esp_phase=comp_phase) - else - call shr_sys_abort( subname//": unknown component-type first letter,'"//component_firstletter//"', aborting") - end if - - END SUBROUTINE seq_infodata_PutData_bytype -#endif - ! ^ ifndef CPRPGI - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_infodata_bcast -- Broadcast an infodata from root pe - ! - ! !DESCRIPTION: - ! - ! Broadcast an infodata across pes - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_infodata_bcast(infodata,mpicom) - - use shr_mpi_mod, only : shr_mpi_bcast - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type), intent(INOUT) :: infodata ! assume valid on root pe - integer(SHR_KIND_IN), intent(IN) :: mpicom ! mpi comm - - !EOP - - !----- local ----- - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - call shr_mpi_bcast(infodata%cime_model, mpicom) - call shr_mpi_bcast(infodata%start_type, mpicom) - call shr_mpi_bcast(infodata%case_desc, mpicom) - call shr_mpi_bcast(infodata%model_version, mpicom) - call shr_mpi_bcast(infodata%username, mpicom) - call shr_mpi_bcast(infodata%hostname, mpicom) - call shr_mpi_bcast(infodata%case_name, mpicom) - call shr_mpi_bcast(infodata%timing_dir, mpicom) - call shr_mpi_bcast(infodata%tchkpt_dir, mpicom) - call shr_mpi_bcast(infodata%aqua_planet, mpicom) - call shr_mpi_bcast(infodata%aqua_planet_sst, mpicom) - call shr_mpi_bcast(infodata%run_barriers, mpicom) - call shr_mpi_bcast(infodata%brnch_retain_casename, mpicom) - call shr_mpi_bcast(infodata%read_restart, mpicom) - call shr_mpi_bcast(infodata%restart_pfile, mpicom) - call shr_mpi_bcast(infodata%restart_file, mpicom) - call shr_mpi_bcast(infodata%single_column, mpicom) - call shr_mpi_bcast(infodata%scmlat, mpicom) - call shr_mpi_bcast(infodata%scmlon, mpicom) - call shr_mpi_bcast(infodata%logFilePostFix, mpicom) - call shr_mpi_bcast(infodata%outPathRoot, mpicom) - call shr_mpi_bcast(infodata%perpetual, mpicom) - call shr_mpi_bcast(infodata%perpetual_ymd, mpicom) - call shr_mpi_bcast(infodata%orb_iyear, mpicom) - call shr_mpi_bcast(infodata%orb_iyear_align, mpicom) - call shr_mpi_bcast(infodata%orb_mode, mpicom) - call shr_mpi_bcast(infodata%orb_eccen, mpicom) - call shr_mpi_bcast(infodata%orb_obliq, mpicom) - call shr_mpi_bcast(infodata%orb_mvelp, mpicom) - call shr_mpi_bcast(infodata%orb_obliqr, mpicom) - call shr_mpi_bcast(infodata%orb_lambm0, mpicom) - call shr_mpi_bcast(infodata%orb_mvelpp, mpicom) - call shr_mpi_bcast(infodata%wv_sat_scheme, mpicom) - call shr_mpi_bcast(infodata%wv_sat_transition_start, mpicom) - call shr_mpi_bcast(infodata%wv_sat_use_tables, mpicom) - call shr_mpi_bcast(infodata%wv_sat_table_spacing, mpicom) - call shr_mpi_bcast(infodata%tfreeze_option, mpicom) - call shr_mpi_bcast(infodata%flux_epbal, mpicom) - call shr_mpi_bcast(infodata%flux_albav, mpicom) - call shr_mpi_bcast(infodata%flux_diurnal, mpicom) - call shr_mpi_bcast(infodata%ocn_surface_flux_scheme, mpicom) - call shr_mpi_bcast(infodata%coldair_outbreak_mod, mpicom) - call shr_mpi_bcast(infodata%flux_convergence, mpicom) - call shr_mpi_bcast(infodata%flux_max_iteration, mpicom) - call shr_mpi_bcast(infodata%glc_renormalize_smb, mpicom) - call shr_mpi_bcast(infodata%wall_time_limit, mpicom) - call shr_mpi_bcast(infodata%force_stop_at, mpicom) - call shr_mpi_bcast(infodata%atm_gnam, mpicom) - call shr_mpi_bcast(infodata%lnd_gnam, mpicom) - call shr_mpi_bcast(infodata%ocn_gnam, mpicom) - call shr_mpi_bcast(infodata%ice_gnam, mpicom) - call shr_mpi_bcast(infodata%rof_gnam, mpicom) - call shr_mpi_bcast(infodata%glc_gnam, mpicom) - call shr_mpi_bcast(infodata%wav_gnam, mpicom) - call shr_mpi_bcast(infodata%iac_gnam, mpicom) - call shr_mpi_bcast(infodata%shr_map_dopole, mpicom) - call shr_mpi_bcast(infodata%vect_map, mpicom) - call shr_mpi_bcast(infodata%aoflux_grid, mpicom) - call shr_mpi_bcast(infodata%cpl_decomp, mpicom) - call shr_mpi_bcast(infodata%cpl_seq_option, mpicom) - call shr_mpi_bcast(infodata%do_budgets, mpicom) - call shr_mpi_bcast(infodata%do_histinit, mpicom) - call shr_mpi_bcast(infodata%budget_inst, mpicom) - call shr_mpi_bcast(infodata%budget_daily, mpicom) - call shr_mpi_bcast(infodata%budget_month, mpicom) - call shr_mpi_bcast(infodata%budget_ann, mpicom) - call shr_mpi_bcast(infodata%budget_ltann, mpicom) - call shr_mpi_bcast(infodata%budget_ltend, mpicom) - call shr_mpi_bcast(infodata%histaux_a2x , mpicom) - call shr_mpi_bcast(infodata%histaux_a2x1hri , mpicom) - call shr_mpi_bcast(infodata%histaux_a2x1hr , mpicom) - call shr_mpi_bcast(infodata%histaux_a2x3hr , mpicom) - call shr_mpi_bcast(infodata%histaux_a2x3hrp , mpicom) - call shr_mpi_bcast(infodata%histaux_a2x24hr , mpicom) - call shr_mpi_bcast(infodata%histaux_l2x1yrg , mpicom) - call shr_mpi_bcast(infodata%histaux_l2x , mpicom) - call shr_mpi_bcast(infodata%histaux_r2x , mpicom) - call shr_mpi_bcast(infodata%histaux_double_precision,mpicom) - call shr_mpi_bcast(infodata%histavg_atm , mpicom) - call shr_mpi_bcast(infodata%histavg_lnd , mpicom) - call shr_mpi_bcast(infodata%histavg_ocn , mpicom) - call shr_mpi_bcast(infodata%histavg_ice , mpicom) - call shr_mpi_bcast(infodata%histavg_rof , mpicom) - call shr_mpi_bcast(infodata%histavg_glc , mpicom) - call shr_mpi_bcast(infodata%histavg_wav , mpicom) - call shr_mpi_bcast(infodata%histavg_iac , mpicom) - call shr_mpi_bcast(infodata%histavg_xao , mpicom) - call shr_mpi_bcast(infodata%drv_threading, mpicom) - call shr_mpi_bcast(infodata%eps_frac, mpicom) - call shr_mpi_bcast(infodata%eps_amask, mpicom) - call shr_mpi_bcast(infodata%eps_agrid, mpicom) - call shr_mpi_bcast(infodata%eps_aarea, mpicom) - call shr_mpi_bcast(infodata%eps_omask, mpicom) - call shr_mpi_bcast(infodata%eps_ogrid, mpicom) - call shr_mpi_bcast(infodata%eps_oarea, mpicom) - call shr_mpi_bcast(infodata%reprosum_use_ddpdd, mpicom) - call shr_mpi_bcast(infodata%reprosum_allow_infnan, mpicom) - call shr_mpi_bcast(infodata%reprosum_diffmax, mpicom) - call shr_mpi_bcast(infodata%reprosum_recompute, mpicom) - call shr_mpi_bcast(infodata%mct_usealltoall, mpicom) - call shr_mpi_bcast(infodata%mct_usevector, mpicom) - - call shr_mpi_bcast(infodata%info_debug, mpicom) - call shr_mpi_bcast(infodata%bfbflag, mpicom) - call shr_mpi_bcast(infodata%esmf_map_flag, mpicom) - call shr_mpi_bcast(infodata%dead_comps, mpicom) - - call shr_mpi_bcast(infodata%atm_present, mpicom) - call shr_mpi_bcast(infodata%atm_prognostic, mpicom) - call shr_mpi_bcast(infodata%lnd_present, mpicom) - call shr_mpi_bcast(infodata%lnd_prognostic, mpicom) - call shr_mpi_bcast(infodata%rof_present, mpicom) - call shr_mpi_bcast(infodata%rofice_present, mpicom) - call shr_mpi_bcast(infodata%rof_prognostic, mpicom) - call shr_mpi_bcast(infodata%flood_present, mpicom) - call shr_mpi_bcast(infodata%ocn_present, mpicom) - call shr_mpi_bcast(infodata%ocn_prognostic, mpicom) - call shr_mpi_bcast(infodata%ocnrof_prognostic, mpicom) - call shr_mpi_bcast(infodata%ocn_c2_glcshelf, mpicom) - call shr_mpi_bcast(infodata%ice_present, mpicom) - call shr_mpi_bcast(infodata%ice_prognostic, mpicom) - call shr_mpi_bcast(infodata%iceberg_prognostic, mpicom) - call shr_mpi_bcast(infodata%glc_present, mpicom) - call shr_mpi_bcast(infodata%glclnd_present, mpicom) - call shr_mpi_bcast(infodata%glcocn_present, mpicom) - call shr_mpi_bcast(infodata%glcice_present, mpicom) - call shr_mpi_bcast(infodata%glc_prognostic, mpicom) - call shr_mpi_bcast(infodata%glc_coupled_fluxes, mpicom) - call shr_mpi_bcast(infodata%wav_present, mpicom) - call shr_mpi_bcast(infodata%wav_prognostic, mpicom) - call shr_mpi_bcast(infodata%esp_present, mpicom) - call shr_mpi_bcast(infodata%esp_prognostic, mpicom) - call shr_mpi_bcast(infodata%iac_present, mpicom) - call shr_mpi_bcast(infodata%iac_prognostic, mpicom) - - call shr_mpi_bcast(infodata%atm_nx, mpicom) - call shr_mpi_bcast(infodata%atm_ny, mpicom) - call shr_mpi_bcast(infodata%lnd_nx, mpicom) - call shr_mpi_bcast(infodata%lnd_ny, mpicom) - call shr_mpi_bcast(infodata%rof_nx, mpicom) - call shr_mpi_bcast(infodata%rof_ny, mpicom) - call shr_mpi_bcast(infodata%ice_nx, mpicom) - call shr_mpi_bcast(infodata%ice_ny, mpicom) - call shr_mpi_bcast(infodata%ocn_nx, mpicom) - call shr_mpi_bcast(infodata%ocn_ny, mpicom) - call shr_mpi_bcast(infodata%glc_nx, mpicom) - call shr_mpi_bcast(infodata%glc_ny, mpicom) - call shr_mpi_bcast(infodata%wav_nx, mpicom) - call shr_mpi_bcast(infodata%wav_ny, mpicom) - call shr_mpi_bcast(infodata%iac_nx, mpicom) - call shr_mpi_bcast(infodata%iac_ny, mpicom) - - call shr_mpi_bcast(infodata%nextsw_cday, mpicom) - call shr_mpi_bcast(infodata%precip_fact, mpicom) - call shr_mpi_bcast(infodata%atm_phase, mpicom) - call shr_mpi_bcast(infodata%lnd_phase, mpicom) - call shr_mpi_bcast(infodata%ice_phase, mpicom) - call shr_mpi_bcast(infodata%ocn_phase, mpicom) - call shr_mpi_bcast(infodata%glc_phase, mpicom) - call shr_mpi_bcast(infodata%rof_phase, mpicom) - call shr_mpi_bcast(infodata%wav_phase, mpicom) - call shr_mpi_bcast(infodata%iac_phase, mpicom) - call shr_mpi_bcast(infodata%atm_aero, mpicom) - call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom) - call shr_mpi_bcast(infodata%glc_valid_input, mpicom) - call shr_mpi_bcast(infodata%model_doi_url, mpicom) - - end subroutine seq_infodata_bcast - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_infodata_Exchange -- Broadcast a subset of infodata between pes - ! - ! !DESCRIPTION: - ! - ! Broadcast a subset of infodata data between pes to support "exchange" of information - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_infodata_Exchange(infodata,ID,type) - - use shr_mpi_mod, only : shr_mpi_bcast - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type), intent(INOUT) :: infodata ! assume valid on root pe - integer(SHR_KIND_IN), intent(IN) :: ID ! mpi comm - character(len=*), intent(IN) :: type ! type - - !EOP - - !----- local ----- - integer(SHR_KIND_IN) :: mpicom ! mpicom - integer(SHR_KIND_IN) :: cmppe ! component 'root' for broadcast - integer(SHR_KIND_IN) :: cplpe ! coupler 'root' for broadcast - logical :: atm2cpli,atm2cplr - logical :: lnd2cpli,lnd2cplr - logical :: rof2cpli,rof2cplr - logical :: ocn2cpli,ocn2cplr - logical :: ice2cpli,ice2cplr - logical :: glc2cpli,glc2cplr - logical :: wav2cpli,wav2cplr - logical :: iac2cpli,iac2cplr - logical :: esp2cpli - logical :: cpl2i,cpl2r - logical :: logset - logical :: deads ! local variable to hold info temporarily - character(len=*), parameter :: subname = '(seq_infodata_Exchange) ' - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - call seq_comm_setptrs(ID, mpicom=mpicom, cmppe=cmppe, cplpe=cplpe) - - logset = .false. - - atm2cpli = .false. - atm2cplr = .false. - lnd2cpli = .false. - lnd2cplr = .false. - rof2cpli = .false. - rof2cplr = .false. - ocn2cpli = .false. - ocn2cplr = .false. - ice2cpli = .false. - ice2cplr = .false. - glc2cpli = .false. - glc2cplr = .false. - wav2cpli = .false. - wav2cplr = .false. - iac2cpli = .false. - iac2cplr = .false. - esp2cpli = .false. - cpl2i = .false. - cpl2r = .false. - - ! --- translate type into logicals --- - - if (trim(type) == 'atm2cpl_init') then - atm2cpli = .true. - atm2cplr = .true. - logset = .true. - endif - if (trim(type) == 'atm2cpl_run') then - atm2cplr = .true. - logset = .true. - endif - - if (trim(type) == 'lnd2cpl_init') then - lnd2cpli = .true. - lnd2cplr = .true. - logset = .true. - endif - if (trim(type) == 'lnd2cpl_run') then - lnd2cplr = .true. - logset = .true. - endif - - if (trim(type) == 'rof2cpl_init') then - rof2cpli = .true. - rof2cplr = .true. - logset = .true. - endif - if (trim(type) == 'rof2cpl_run') then - rof2cplr = .true. - logset = .true. - endif - - if (trim(type) == 'ocn2cpl_init') then - ocn2cpli = .true. - ocn2cplr = .true. - logset = .true. - endif - if (trim(type) == 'ocn2cpl_run') then - ocn2cplr = .true. - logset = .true. - endif - - if (trim(type) == 'ice2cpl_init') then - ice2cpli = .true. - ice2cplr = .true. - logset = .true. - endif - if (trim(type) == 'ice2cpl_run') then - ice2cplr = .true. - logset = .true. - endif - - if (trim(type) == 'glc2cpl_init') then - glc2cpli = .true. - glc2cplr = .true. - logset = .true. - endif - if (trim(type) == 'glc2cpl_run') then - glc2cplr = .true. - logset = .true. - endif - - if (trim(type) == 'wav2cpl_init') then - wav2cpli = .true. - wav2cplr = .true. - logset = .true. - endif - if (trim(type) == 'wav2cpl_run') then - wav2cplr = .true. - logset = .true. - endif - - if (trim(type) == 'iac2cpl_init') then - iac2cpli = .true. - iac2cplr = .true. - logset = .true. - endif - if (trim(type) == 'iac2cpl_run') then - iac2cplr = .true. - logset = .true. - endif - - if (trim(type) == 'esp2cpl_init') then - esp2cpli = .true. - logset = .true. - endif - - if (trim(type) == 'cpl2atm_init' .or. & - trim(type) == 'cpl2lnd_init' .or. & - trim(type) == 'cpl2rof_init' .or. & - trim(type) == 'cpl2ocn_init' .or. & - trim(type) == 'cpl2glc_init' .or. & - trim(type) == 'cpl2wav_init' .or. & - trim(type) == 'cpl2iac_init' .or. & - trim(type) == 'cpl2esp_init' .or. & - trim(type) == 'cpl2ice_init') then - cpl2i = .true. - cpl2r = .true. - logset = .true. - endif - - if (trim(type) == 'cpl2atm_run' .or. & - trim(type) == 'cpl2lnd_run' .or. & - trim(type) == 'cpl2rof_run' .or. & - trim(type) == 'cpl2ocn_run' .or. & - trim(type) == 'cpl2glc_run' .or. & - trim(type) == 'cpl2wav_run' .or. & - trim(type) == 'cpl2iac_run' .or. & - trim(type) == 'cpl2ice_run') then - cpl2r = .true. - logset = .true. - endif - - ! --- make sure the type was valid --- - - if (.not. logset) then - write(logunit,*) trim(subname),' ERROR: type invalid ',trim(type) - call shr_sys_abort() - endif - - ! --- now execute exchange --- - - if (atm2cpli) then - call shr_mpi_bcast(infodata%atm_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%atm_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%atm_nx, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%atm_ny, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%atm_aero, mpicom, pebcast=cmppe) - ! dead_comps is true if it's ever set to true - deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) - if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. - endif - - if (lnd2cpli) then - call shr_mpi_bcast(infodata%lnd_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%lnd_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%lnd_nx, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%lnd_ny, mpicom, pebcast=cmppe) - ! dead_comps is true if it's ever set to true - deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) - if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. - endif - - if (rof2cpli) then - call shr_mpi_bcast(infodata%rof_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%rofice_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%rof_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%rof_nx, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%rof_ny, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%flood_present, mpicom, pebcast=cmppe) - ! dead_comps is true if it's ever set to true - deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) - if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. - endif - - if (ocn2cpli) then - call shr_mpi_bcast(infodata%ocn_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%ocn_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%ocnrof_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%ocn_c2_glcshelf, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%ocn_nx, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%ocn_ny, mpicom, pebcast=cmppe) - ! dead_comps is true if it's ever set to true - deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) - if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. - endif - - if (ice2cpli) then - call shr_mpi_bcast(infodata%ice_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%ice_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%iceberg_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%ice_nx, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%ice_ny, mpicom, pebcast=cmppe) - ! dead_comps is true if it's ever set to true - deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) - if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. - endif - - if (glc2cpli) then - call shr_mpi_bcast(infodata%glc_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%glclnd_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%glcocn_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%glcice_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%glc_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%glc_coupled_fluxes, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%glc_nx, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%glc_ny, mpicom, pebcast=cmppe) - ! dead_comps is true if it's ever set to true - deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) - if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. - endif - - if (wav2cpli) then - call shr_mpi_bcast(infodata%wav_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%wav_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%wav_nx, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%wav_ny, mpicom, pebcast=cmppe) - ! dead_comps is true if it's ever set to true - deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) - if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. - endif - - if (iac2cpli) then - call shr_mpi_bcast(infodata%iac_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%iac_prognostic, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%iac_nx, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%iac_ny, mpicom, pebcast=cmppe) - ! dead_comps is true if it's ever set to true - deads = infodata%dead_comps - call shr_mpi_bcast(deads, mpicom, pebcast=cmppe) - if (deads .or. infodata%dead_comps) infodata%dead_comps = .true. - endif - - if (esp2cpli) then - call shr_mpi_bcast(infodata%esp_present, mpicom, pebcast=cmppe) - call shr_mpi_bcast(infodata%esp_prognostic, mpicom, pebcast=cmppe) - endif - - if (cpl2i) then - call shr_mpi_bcast(infodata%atm_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%atm_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%lnd_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%lnd_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%rof_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%rofice_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%rof_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%flood_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%ocn_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%ocn_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%ocnrof_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%ocn_c2_glcshelf, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%ice_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%ice_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%iceberg_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glc_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glclnd_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glcocn_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glcice_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glc_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glc_coupled_fluxes, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%wav_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%wav_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%iac_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%iac_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%esp_present, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%esp_prognostic, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%dead_comps, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%atm_aero, mpicom, pebcast=cplpe) - endif - - ! Run-time data exchanges - if (atm2cplr) then - call shr_mpi_bcast(infodata%nextsw_cday, mpicom, pebcast=cmppe) - endif - - if (ocn2cplr) then - call shr_mpi_bcast(infodata%precip_fact, mpicom, pebcast=cmppe) - endif - - if (cpl2r) then - call shr_mpi_bcast(infodata%nextsw_cday, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%precip_fact, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glc_g2lupdate, mpicom, pebcast=cplpe) - call shr_mpi_bcast(infodata%glc_valid_input, mpicom, pebcast=cplpe) - endif - - end subroutine seq_infodata_Exchange - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_infodata_Check -- Check that input InputInfo derived type is valid - ! - ! !DESCRIPTION: - ! - ! Check that input infodata object has reasonable values - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_infodata_Check( infodata ) - - ! !USES: - - use shr_assert_mod, only: shr_assert_in_domain - use shr_string_mod, only: shr_string_listIntersect - use shr_wv_sat_mod, only: shr_wv_sat_get_scheme_idx, shr_wv_sat_valid_idx - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type), intent(INOUT) :: infodata ! Output CCSM structure - - !EOP - - !----- local ----- - character(len=*), parameter :: subname = '(seq_infodata_Check) ' - integer :: lastchar ! Last character index - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - ! --- CIME model ------ - if ( trim(infodata%cime_model) /= 'e3sm' .and. trim(infodata%cime_model) /= 'cesm') then - call shr_sys_abort( subname//': cime_model must be set to e3sm or cesm, aborting') - end if - - ! --- Case name ------ - lastchar = len(infodata%case_name) - if ( len_trim(infodata%case_name) == 0) then - call shr_sys_abort( subname//': variable case_name must be set, aborting') - end if - if (infodata%case_name(lastchar:lastchar) /= ' ') then - write(logunit,"(A,I4,A)")'ERROR: case_name must not exceed ', len(infodata%case_name)-1, & - ' characters' - call shr_sys_abort( subname//': variable case_name must be set, aborting') - end if - - ! --- Restart pointer file ----- - if ( len_trim(infodata%restart_pfile) == 0 ) then - call shr_sys_abort( subname//': restart_pfile must be set' ) - end if - - ! --- LogFile ending name ----- - if ( len_trim(infodata%logFilePostFix) == 0 ) then - call shr_sys_abort( subname//': logFilePostFix must be set to something not blank' ) - end if - - ! --- Output path root directory ----- - if ( len_trim(infodata%outPathRoot) == 0 ) then - call shr_sys_abort( subname//': outPathRoot must be set' ) - end if - if ( index(infodata%outPathRoot,"/",back=.true.) /= & - len_trim(infodata%outPathRoot) ) then - call shr_sys_abort( subname//': outPathRoot must end with a slash' ) - end if - - ! --- Start-type ------ - if ((trim(infodata%start_type) /= seq_infodata_start_type_start) .and. & - (trim(infodata%start_type) /= seq_infodata_start_type_cont ) .and. & - (trim(infodata%start_type) /= seq_infodata_start_type_brnch)) then - call shr_sys_abort(subname//': start_type invalid = '//trim(infodata%start_type)) - end if - - if ((trim(infodata%start_type) == seq_infodata_start_type_cont ) .and. & - (trim(infodata%case_name) /= trim(infodata%rest_case_name))) then - write(logunit,'(10a)') subname,' case_name =',trim(infodata%case_name),':', & - ' rest_case_name =',trim(infodata%rest_case_name),':' - call shr_sys_abort(subname//': invalid continue restart case name = '//trim(infodata%rest_case_name)) - endif - - if (infodata%orb_eccen == SHR_ORB_UNDEF_REAL .or. & - infodata%orb_obliqr == SHR_ORB_UNDEF_REAL .or. & - infodata%orb_mvelpp == SHR_ORB_UNDEF_REAL .or. & - infodata%orb_lambm0 == SHR_ORB_UNDEF_REAL) then - call shr_sys_abort(subname//': orb params incorrect') - endif - - if (.not. shr_wv_sat_valid_idx(shr_wv_sat_get_scheme_idx(trim(infodata%wv_sat_scheme)))) then - call shr_sys_abort(subname//': "'//trim(infodata%wv_sat_scheme)//'" & - &is not a recognized saturation vapor pressure scheme name') - end if - - ! A transition range averaging method in CAM is only valid for: - ! - ! -40 deg C <= T <= 0 deg C - ! - ! shr_wv_sat_mod itself checks for values with the wrong sign, but we - ! have to check that the range is no more than 40 deg C here. Even - ! though this is a CAM-specific restriction, it's not really likely - ! that any other parameterization will be dealing with mixed-phase - ! water below 40 deg C anyway. - call shr_assert_in_domain(infodata%wv_sat_transition_start, & - ge=0._SHR_KIND_R8, le=40._SHR_KIND_R8, & - varname="wv_sat_transition_start",& - msg="Invalid transition temperature range.") - - if ((trim(infodata%aoflux_grid) /= 'ocn') .and. & - (trim(infodata%aoflux_grid) /= 'atm') .and. & - (trim(infodata%aoflux_grid) /= 'exch')) then - write(logunit,'(2a)') 'ERROR aoflux_grid not supported = ',trim(infodata%aoflux_grid) - call shr_sys_abort(subname//': aoflux_grid invalid = '//trim(infodata%aoflux_grid)) - endif - - if ((trim(infodata%vect_map) /= 'none') .and. & - (trim(infodata%vect_map) /= 'cart3d') .and. & - (trim(infodata%vect_map) /= 'cart3d_diag') .and. & - (trim(infodata%vect_map) /= 'cart3d_uvw') .and. & - (trim(infodata%vect_map) /= 'cart3d_uvw_diag')) then - write(logunit,'(2a)') 'ERROR vect_map not supported = ',trim(infodata%vect_map) - call shr_sys_abort(subname//': vect_map invalid = '//trim(infodata%vect_map)) - endif - - END SUBROUTINE seq_infodata_Check - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_infodata_print -- Print out values to log file - ! - ! !DESCRIPTION: - ! - ! Print derivied type out to screen. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE seq_infodata_print( infodata ) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_infodata_type), intent(IN) :: infodata ! Output CCSM structure - - !EOP - - !----- local ----- - integer :: ind - character(len=*), parameter :: subname = '(seq_infodata_print) ' - character(len=*), parameter :: F0A = "(2A,A)" - character(len=*), parameter :: F0L = "(2A,L3)" - character(len=*), parameter :: F0I = "(2A,I10)" - character(len=*), parameter :: FIA = "(2A,I5,2A)" - character(len=*), parameter :: F0S = "(2A,I4)" - character(len=*), parameter :: F0R = "(2A,g22.14)" - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - ! if (loglevel > 0) then - write(logunit,F0A) subname,'CIME model = ', trim(infodata%cime_model) - write(logunit,F0A) subname,'Start type = ', trim(infodata%start_type) - write(logunit,F0A) subname,'Case name = ', trim(infodata%case_name) - write(logunit,F0A) subname,'Case description = ', trim(infodata%case_desc) - write(logunit,F0A) subname,'Model version = ', trim(infodata%model_version) - write(logunit,F0A) subname,'Username = ', trim(infodata%username) - write(logunit,F0A) subname,'Hostname = ', trim(infodata%hostname) - write(logunit,F0A) subname,'Timing Dir = ', trim(infodata%timing_dir) - write(logunit,F0A) subname,'Timing Checkpoint Dir = ', trim(infodata%tchkpt_dir) - write(logunit,F0A) subname,'Restart case name = ', trim(infodata%rest_case_name) - - write(logunit,F0L) subname,'aqua_planet mode = ', infodata%aqua_planet - write(logunit,F0I) subname,'aqua_planet analytic sst = ', infodata%aqua_planet_sst - write(logunit,F0L) subname,'brnch_retain_casename = ', infodata%brnch_retain_casename - - write(logunit,F0L) subname,'read_restart flag = ', infodata%read_restart - write(logunit,F0A) subname,'Restart pointer file = ', trim(infodata%restart_pfile) - write(logunit,F0A) subname,'Restart file (full path) = ', trim(infodata%restart_file) - - write(logunit,F0L) subname,'single_column = ', infodata%single_column - write(logunit,F0R) subname,'scmlat = ', infodata%scmlat - write(logunit,F0R) subname,'scmlon = ', infodata%scmlon - - write(logunit,F0A) subname,'Log output end name = ', trim(infodata%logFilePostFix) - write(logunit,F0A) subname,'Output path dir = ', trim(infodata%outPathRoot) - - write(logunit,F0L) subname,'perpetual = ', infodata%perpetual - write(logunit,F0I) subname,'perpetual_ymd = ', infodata%perpetual_ymd - - write(logunit,F0A) subname,'orb_mode = ', trim(infodata%orb_mode) - if (trim(infodata%orb_mode) == trim(seq_infodata_orb_fixed_parameters)) then - write(logunit,F0R) subname,'orb_eccen = ', infodata%orb_eccen - write(logunit,F0R) subname,'orb_obliq = ', infodata%orb_obliq - write(logunit,F0R) subname,'orb_mvelp = ', infodata%orb_mvelp - write(logunit,F0R) subname,'orb_obliqr = ', infodata%orb_obliqr - write(logunit,F0R) subname,'orb_mvelpp = ', infodata%orb_mvelpp - write(logunit,F0R) subname,'orb_lambm0 = ', infodata%orb_lambm0 - elseif (trim(infodata%orb_mode) == trim(seq_infodata_orb_fixed_year)) then - write(logunit,F0I) subname,'orb_iyear = ', infodata%orb_iyear - write(logunit,F0R) subname,'orb_eccen = ', infodata%orb_eccen - write(logunit,F0R) subname,'orb_obliq = ', infodata%orb_obliq - write(logunit,F0R) subname,'orb_mvelp = ', infodata%orb_mvelp - write(logunit,F0R) subname,'orb_obliqr = ', infodata%orb_obliqr - write(logunit,F0R) subname,'orb_mvelpp = ', infodata%orb_mvelpp - write(logunit,F0R) subname,'orb_lambm0 = ', infodata%orb_lambm0 - elseif (trim(infodata%orb_mode) == trim(seq_infodata_orb_variable_year)) then - write(logunit,F0I) subname,'orb_iyear = ', infodata%orb_iyear - write(logunit,F0I) subname,'orb_iyear_align = ', infodata%orb_iyear_align - endif - - write(logunit,F0A) subname,'wv_sat_scheme = ', trim(infodata%wv_sat_scheme) - write(logunit,F0R) subname,'wv_sat_transition_start = ', infodata%wv_sat_transition_start - write(logunit,F0L) subname,'wv_sat_use_tables = ', infodata%wv_sat_use_tables - write(logunit,F0R) subname,'wv_sat_table_spacing = ', infodata%wv_sat_table_spacing - - write(logunit,F0A) subname,'tfreeze_option = ', trim(infodata%tfreeze_option) - write(logunit,F0A) subname,'flux_epbal = ', trim(infodata%flux_epbal) - write(logunit,F0L) subname,'flux_albav = ', infodata%flux_albav - write(logunit,F0L) subname,'flux_diurnal = ', infodata%flux_diurnal - write(logunit,F0L) subname,'ocn_surface_flux_scheme = ', infodata%ocn_surface_flux_scheme - write(logunit,F0L) subname,'coldair_outbreak_mod = ', infodata%coldair_outbreak_mod - write(logunit,F0R) subname,'flux_convergence = ', infodata%flux_convergence - write(logunit,F0I) subname,'flux_max_iteration = ', infodata%flux_max_iteration - write(logunit,F0A) subname,'glc_renormalize_smb = ', trim(infodata%glc_renormalize_smb) - write(logunit,F0R) subname,'wall_time_limit = ', infodata%wall_time_limit - write(logunit,F0A) subname,'force_stop_at = ', trim(infodata%force_stop_at) - write(logunit,F0A) subname,'atm_gridname = ', trim(infodata%atm_gnam) - write(logunit,F0A) subname,'lnd_gridname = ', trim(infodata%lnd_gnam) - write(logunit,F0A) subname,'ocn_gridname = ', trim(infodata%ocn_gnam) - write(logunit,F0A) subname,'ice_gridname = ', trim(infodata%ice_gnam) - write(logunit,F0A) subname,'rof_gridname = ', trim(infodata%rof_gnam) - write(logunit,F0A) subname,'glc_gridname = ', trim(infodata%glc_gnam) - write(logunit,F0A) subname,'wav_gridname = ', trim(infodata%wav_gnam) - write(logunit,F0A) subname,'iac_gridname = ', trim(infodata%iac_gnam) - write(logunit,F0L) subname,'shr_map_dopole = ', infodata%shr_map_dopole - write(logunit,F0A) subname,'vect_map = ', trim(infodata%vect_map) - write(logunit,F0A) subname,'aoflux_grid = ', trim(infodata%aoflux_grid) - write(logunit,F0A) subname,'cpl_seq_option = ', trim(infodata%cpl_seq_option) - write(logunit,F0S) subname,'cpl_decomp = ', infodata%cpl_decomp - write(logunit,F0L) subname,'do_budgets = ', infodata%do_budgets - write(logunit,F0L) subname,'do_histinit = ', infodata%do_histinit - write(logunit,F0S) subname,'budget_inst = ', infodata%budget_inst - write(logunit,F0S) subname,'budget_daily = ', infodata%budget_daily - write(logunit,F0S) subname,'budget_month = ', infodata%budget_month - write(logunit,F0S) subname,'budget_ann = ', infodata%budget_ann - write(logunit,F0S) subname,'budget_ltann = ', infodata%budget_ltann - write(logunit,F0S) subname,'budget_ltend = ', infodata%budget_ltend - write(logunit,F0L) subname,'histaux_a2x = ', infodata%histaux_a2x - write(logunit,F0L) subname,'histaux_a2x1hri = ', infodata%histaux_a2x1hri - write(logunit,F0L) subname,'histaux_a2x1hr = ', infodata%histaux_a2x1hr - write(logunit,F0L) subname,'histaux_a2x3hr = ', infodata%histaux_a2x3hr - write(logunit,F0L) subname,'histaux_a2x3hrp = ', infodata%histaux_a2x3hrp - write(logunit,F0L) subname,'histaux_a2x24hr = ', infodata%histaux_a2x24hr - write(logunit,F0L) subname,'histaux_l2x1yrg = ', infodata%histaux_l2x1yrg - write(logunit,F0L) subname,'histaux_l2x = ', infodata%histaux_l2x - write(logunit,F0L) subname,'histaux_r2x = ', infodata%histaux_r2x - write(logunit,F0L) subname,'histaux_double_precision = ', infodata%histaux_double_precision - write(logunit,F0L) subname,'histavg_atm = ', infodata%histavg_atm - write(logunit,F0L) subname,'histavg_lnd = ', infodata%histavg_lnd - write(logunit,F0L) subname,'histavg_ocn = ', infodata%histavg_ocn - write(logunit,F0L) subname,'histavg_ice = ', infodata%histavg_ice - write(logunit,F0L) subname,'histavg_rof = ', infodata%histavg_rof - write(logunit,F0L) subname,'histavg_glc = ', infodata%histavg_glc - write(logunit,F0L) subname,'histavg_wav = ', infodata%histavg_wav - write(logunit,F0L) subname,'histavg_iac = ', infodata%histavg_iac - write(logunit,F0L) subname,'histavg_xao = ', infodata%histavg_xao - write(logunit,F0L) subname,'drv_threading = ', infodata%drv_threading - - write(logunit,F0R) subname,'eps_frac = ', infodata%eps_frac - write(logunit,F0R) subname,'eps_amask = ', infodata%eps_amask - write(logunit,F0R) subname,'eps_agrid = ', infodata%eps_agrid - write(logunit,F0R) subname,'eps_aarea = ', infodata%eps_aarea - write(logunit,F0R) subname,'eps_omask = ', infodata%eps_omask - write(logunit,F0R) subname,'eps_ogrid = ', infodata%eps_ogrid - write(logunit,F0R) subname,'eps_oarea = ', infodata%eps_oarea - - write(logunit,F0L) subname,'reprosum_use_ddpdd = ', infodata%reprosum_use_ddpdd - write(logunit,F0L) subname,'reprosum_allow_infnan = ', infodata%reprosum_allow_infnan - write(logunit,F0R) subname,'reprosum_diffmax = ', infodata%reprosum_diffmax - write(logunit,F0L) subname,'reprosum_recompute = ', infodata%reprosum_recompute - - write(logunit,F0L) subname,'mct_usealltoall = ', infodata%mct_usealltoall - write(logunit,F0L) subname,'mct_usevector = ', infodata%mct_usevector - - write(logunit,F0S) subname,'info_debug = ', infodata%info_debug - write(logunit,F0L) subname,'bfbflag = ', infodata%bfbflag - write(logunit,F0L) subname,'esmf_map_flag = ', infodata%esmf_map_flag - write(logunit,F0L) subname,'dead_comps = ', infodata%dead_comps - write(logunit,F0L) subname,'run_barriers = ', infodata%run_barriers - - write(logunit,F0L) subname,'atm_present = ', infodata%atm_present - write(logunit,F0L) subname,'atm_prognostic = ', infodata%atm_prognostic - write(logunit,F0L) subname,'lnd_present = ', infodata%lnd_present - write(logunit,F0L) subname,'lnd_prognostic = ', infodata%lnd_prognostic - write(logunit,F0L) subname,'rof_present = ', infodata%rof_present - write(logunit,F0L) subname,'rofice_present = ', infodata%rofice_present - write(logunit,F0L) subname,'rof_prognostic = ', infodata%rof_prognostic - write(logunit,F0L) subname,'flood_present = ', infodata%flood_present - write(logunit,F0L) subname,'ocn_present = ', infodata%ocn_present - write(logunit,F0L) subname,'ocn_prognostic = ', infodata%ocn_prognostic - write(logunit,F0L) subname,'ocnrof_prognostic = ', infodata%ocnrof_prognostic - write(logunit,F0L) subname,'ocn_c2_glcshelf = ', infodata%ocn_c2_glcshelf - write(logunit,F0L) subname,'ice_present = ', infodata%ice_present - write(logunit,F0L) subname,'ice_prognostic = ', infodata%ice_prognostic - write(logunit,F0L) subname,'iceberg_prognostic = ', infodata%iceberg_prognostic - write(logunit,F0L) subname,'glc_present = ', infodata%glc_present - write(logunit,F0L) subname,'glclnd_present = ', infodata%glclnd_present - write(logunit,F0L) subname,'glcocn_present = ', infodata%glcocn_present - write(logunit,F0L) subname,'glcice_present = ', infodata%glcice_present - write(logunit,F0L) subname,'glc_prognostic = ', infodata%glc_prognostic - write(logunit,F0L) subname,'glc_coupled_fluxes = ', infodata%glc_coupled_fluxes - write(logunit,F0L) subname,'wav_present = ', infodata%wav_present - write(logunit,F0L) subname,'wav_prognostic = ', infodata%wav_prognostic - write(logunit,F0L) subname,'iac_present = ', infodata%iac_present - write(logunit,F0L) subname,'iac_prognostic = ', infodata%iac_prognostic - write(logunit,F0L) subname,'esp_present = ', infodata%esp_present - write(logunit,F0L) subname,'esp_prognostic = ', infodata%esp_prognostic - - write(logunit,F0I) subname,'atm_nx = ', infodata%atm_nx - write(logunit,F0I) subname,'atm_ny = ', infodata%atm_ny - write(logunit,F0I) subname,'lnd_nx = ', infodata%lnd_nx - write(logunit,F0I) subname,'lnd_ny = ', infodata%lnd_ny - write(logunit,F0I) subname,'rof_nx = ', infodata%rof_nx - write(logunit,F0I) subname,'rof_ny = ', infodata%rof_ny - write(logunit,F0I) subname,'ice_nx = ', infodata%ice_nx - write(logunit,F0I) subname,'ice_ny = ', infodata%ice_ny - write(logunit,F0I) subname,'ocn_nx = ', infodata%ocn_nx - write(logunit,F0I) subname,'ocn_ny = ', infodata%ocn_ny - write(logunit,F0I) subname,'glc_nx = ', infodata%glc_nx - write(logunit,F0I) subname,'glc_ny = ', infodata%glc_ny - write(logunit,F0I) subname,'wav_nx = ', infodata%wav_nx - write(logunit,F0I) subname,'wav_ny = ', infodata%wav_ny - write(logunit,F0I) subname,'iac_nx = ', infodata%iac_nx - write(logunit,F0I) subname,'iac_ny = ', infodata%iac_ny - - write(logunit,F0R) subname,'nextsw_cday = ', infodata%nextsw_cday - write(logunit,F0R) subname,'precip_fact = ', infodata%precip_fact - write(logunit,F0L) subname,'atm_aero = ', infodata%atm_aero - - write(logunit,F0S) subname,'atm_phase = ', infodata%atm_phase - write(logunit,F0S) subname,'lnd_phase = ', infodata%lnd_phase - write(logunit,F0S) subname,'ocn_phase = ', infodata%ocn_phase - write(logunit,F0S) subname,'ice_phase = ', infodata%ice_phase - write(logunit,F0S) subname,'glc_phase = ', infodata%glc_phase - write(logunit,F0S) subname,'rof_phase = ', infodata%rof_phase - write(logunit,F0S) subname,'wav_phase = ', infodata%wav_phase - write(logunit,F0S) subname,'iac_phase = ', infodata%iac_phase - - write(logunit,F0L) subname,'glc_g2lupdate = ', infodata%glc_g2lupdate - ! endif - - END SUBROUTINE seq_infodata_print - - !=============================================================================== - !=============================================================================== - -END MODULE seq_infodata_mod diff --git a/src/drivers/mct/shr/seq_io_read_mod.F90 b/src/drivers/mct/shr/seq_io_read_mod.F90 deleted file mode 100644 index ef0456383a5..00000000000 --- a/src/drivers/mct/shr/seq_io_read_mod.F90 +++ /dev/null @@ -1,325 +0,0 @@ -! !MODULE: seq_io_read_mod -- reads integer, real arrays and chacter of driver files -! -! !REMARKS: -! -! !REVISION HISTORY: -! 2007-Oct-26 - T. Craig first version -! 2007-Dec-06 - T. Craig update and improve -! 2008-Feb-16 - J. Edwards convert to PIO -! 2010-Nov - J. Edwards move PIO init and namelists from components to driver -! Current Problems -! - the original use of seq_io will now ONLY work with the cpl because -! of hardwiring cpl_io_type and cpl_io_iosystem. want the original -! io capabilities to be usable by any component -! - the init1 method depends on seq_comm for name consistency but seq_comm_init -! wants to be called after init1 so the global_comm can be modified for -! async IO. this needs to be reconciled. -! - this routine stores information for all components but most methods are -! hardwired to work only for the coupler. should all the components info -! be stored here or should this be more a general set of methods that are -! reusable as it's original intent. -! -! !INTERFACE: ------------------------------------------------------------------ - -module seq_io_read_mod - - ! !USES: - - use shr_kind_mod, only: r8 => shr_kind_r8, in => shr_kind_in - use shr_kind_mod, only: cl => shr_kind_cl, cs => shr_kind_cs - use shr_pio_mod, only: shr_pio_getiosys, shr_pio_getiotype - use shr_sys_mod ! system calls - use seq_comm_mct - use mct_mod ! mct wrappers - use pio - - implicit none - private - - ! !PUBLIC TYPES: - - ! none - - ! !PUBLIC MEMBER FUNCTIONS: - - public seq_io_read - - ! !PUBLIC DATA MEMBERS - - ! none - - !EOP - - interface seq_io_read - module procedure seq_io_read_int - module procedure seq_io_read_int1d - module procedure seq_io_read_r8 - module procedure seq_io_read_r81d - module procedure seq_io_read_char - end interface seq_io_read - - !------------------------------------------------------------------------------- - ! Local data - !------------------------------------------------------------------------------- - - character(*) , parameter :: prefix = "seq_io_" - character(*) , parameter :: version ='cpl7v10' - character(*) , parameter :: version0='cpl7v00' - character(CL) :: charvar ! buffer for string read/write - - !================================================================================= -contains - !================================================================================= - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_int - read scalar integer from netcdf file - ! - ! !DESCRIPTION: - ! Read scalar integer from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_int(filename,pioid,idata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(file_desc_t) :: pioid - integer ,intent(inout) :: idata ! integer data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - integer :: i1d(1) - character(*),parameter :: subName = '(seq_io_read_int) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_io_read_int1d(filename,pioid,i1d,dname) - idata = i1d(1) - - end subroutine seq_io_read_int - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_int1d - read 1d integer from netcdf file - ! - ! !DESCRIPTION: - ! Read 1d integer array from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_int1d(filename,pioid,idata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(file_desc_t) :: pioid - integer(in) ,intent(inout):: idata(:) ! integer data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - integer(in) :: rcode - type(var_desc_t) :: varid - character(CL) :: name1 - character(*),parameter :: subName = '(seq_io_read_int1d) ' - logical :: addprefix - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - call seq_io_read_openfile(filename,pioid,addprefix) - - if (addprefix) then - name1 = trim(prefix)//trim(dname) - else - name1 = trim(dname) - endif - rcode = pio_inq_varid(pioid,trim(name1),varid) - rcode = pio_get_var(pioid,varid,idata) - - end subroutine seq_io_read_int1d - - subroutine seq_io_read_openfile(filename,pioid,addprefix) - character(len=*), intent(in) :: filename - type(file_desc_t) :: pioid - logical, intent(out) :: addprefix - logical :: exists - integer(in) :: iam,mpicom - type(iosystem_desc_t) , pointer :: cpl_io_subsystem - character(len=seq_comm_namelen) :: cpl_name - integer(in) :: cpl_pio_iotype - logical, save :: laddprefix - integer :: rcode - character(CL) :: lversion - character(*),parameter :: subName = '(seq_io_read_openfile) ' - - if(.not. pio_file_is_open(pioid)) then - cpl_name = seq_comm_name(CPLID) - cpl_io_subsystem => shr_pio_getiosys(cpl_name) - cpl_pio_iotype = shr_pio_getiotype(cpl_name) - - call seq_comm_setptrs(CPLID,iam=iam,mpicom=mpicom) - if (iam==0) inquire(file=trim(filename),exist=exists) - call shr_mpi_bcast(exists,mpicom,'seq_io_read_openfile') - if (exists) then - rcode = pio_openfile(cpl_io_subsystem, pioid, cpl_pio_iotype, trim(filename),pio_nowrite) - call pio_seterrorhandling(pioid,PIO_BCAST_ERROR) - rcode = pio_get_att(pioid,pio_global,"file_version",lversion) - call pio_seterrorhandling(pioid,PIO_INTERNAL_ERROR) - if (trim(lversion) == trim(version)) then - laddprefix=.false. - else - laddprefix=.true. - endif - else - if(iam==0) write(logunit,*) subname,' ERROR: file invalid ',trim(filename) - call shr_sys_abort() - endif - endif - addprefix = laddprefix - - end subroutine seq_io_read_openfile - - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_r8 - read scalar double from netcdf file - ! - ! !DESCRIPTION: - ! Read scalar double from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_r8(filename,pioid,rdata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(file_desc_t) :: pioid - real(r8) ,intent(inout) :: rdata ! real data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - real(r8) :: r1d(1) - character(*),parameter :: subName = '(seq_io_read_r8) ' - - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - - call seq_io_read_r81d(filename,pioid,r1d,dname) - rdata = r1d(1) - - end subroutine seq_io_read_r8 - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_r81d - read 1d double array from netcdf file - ! - ! !DESCRIPTION: - ! Read 1d double array from netcdf file - ! - ! !REVISION HISTORY: - ! 2007-Oct-26 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_r81d(filename,pioid,rdata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(file_desc_t) :: pioid - real(r8) ,intent(inout) :: rdata(:) ! real data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - type(var_desc_t) :: varid - character(CL) :: name1 - character(*),parameter :: subName = '(seq_io_read_r81d) ' - logical :: addprefix - integer :: rcode - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - call seq_io_read_openfile(filename,pioid,addprefix) - - if (addprefix) then - name1 = trim(prefix)//trim(dname) - else - name1 = trim(dname) - endif - - rcode = pio_inq_varid(pioid,trim(name1),varid) - rcode = pio_get_var(pioid,varid,rdata) - - end subroutine seq_io_read_r81d - - !=============================================================================== - !BOP =========================================================================== - ! - ! !IROUTINE: seq_io_read_char - read char string from netcdf file - ! - ! !DESCRIPTION: - ! Read char string from netcdf file - ! - ! !REVISION HISTORY: - ! 2010-July-06 - T. Craig - initial version - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_io_read_char(filename,pioid,rdata,dname) - - ! !INPUT/OUTPUT PARAMETERS: - implicit none - character(len=*),intent(in) :: filename ! file - type(file_desc_t) :: pioid - character(len=*),intent(inout) :: rdata ! character data - character(len=*),intent(in) :: dname ! name of data - - !EOP - - type(var_desc_t) :: varid - character(CL) :: name1 - character(*),parameter :: subName = '(seq_io_read_char) ' - logical :: addprefix - integer :: rcode - !------------------------------------------------------------------------------- - ! - !------------------------------------------------------------------------------- - call seq_io_read_openfile(filename,pioid,addprefix) - - if (addprefix) then - name1 = trim(prefix)//trim(dname) - else - name1 = trim(dname) - endif - - rcode = pio_inq_varid(pioid,trim(name1),varid) - rcode = pio_get_var(pioid,varid,charvar) - rdata = trim(charvar) - - end subroutine seq_io_read_char - - !=============================================================================== - !=============================================================================== -end module seq_io_read_mod diff --git a/src/drivers/mct/shr/seq_pauseresume_mod.F90 b/src/drivers/mct/shr/seq_pauseresume_mod.F90 deleted file mode 100644 index 9d56f0cc613..00000000000 --- a/src/drivers/mct/shr/seq_pauseresume_mod.F90 +++ /dev/null @@ -1,439 +0,0 @@ -! !MODULE: seq_pauseresume_mod --- Module for managing pause/resume data for ESP components -! -! !DESCRIPTION: -! -! A module to collect and distribute pause/resume information -! -! -! !INTERFACE: ------------------------------------------------------------------ - -module seq_pauseresume_mod - - ! !USES: - - use shr_kind_mod, only: CL => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_flush, shr_sys_abort - use seq_comm_mct, only: num_inst_driver - - implicit none - - private -#include - - ! !PUBLIC INTERFACES: - public :: seq_resume_store_comp ! Store component resume filenames - public :: seq_resume_get_files ! Retrieve pointer to resume filenames - public :: seq_resume_free ! Free resume filename storage - public :: seq_resume_broadcast ! Broadcast component filenames to all PEs - - ! Type to hold resume filenames - type seq_resume_type - character(len=CL), pointer :: atm_resume(:) => NULL() ! atm resume file(s) - character(len=CL), pointer :: lnd_resume(:) => NULL() ! lnd resume file(s) - character(len=CL), pointer :: ice_resume(:) => NULL() ! ice resume file(s) - character(len=CL), pointer :: ocn_resume(:) => NULL() ! ocn resume file(s) - character(len=CL), pointer :: glc_resume(:) => NULL() ! glc resume file(s) - character(len=CL), pointer :: rof_resume(:) => NULL() ! rof resume file(s) - character(len=CL), pointer :: wav_resume(:) => NULL() ! wav resume file(s) - character(len=CL), pointer :: cpl_resume(:) => NULL() ! cpl resume file(s) - end type seq_resume_type - - type(seq_resume_type), pointer :: resume => NULL() ! For storing pause/resume files - - private :: seq_resume_broadcast_array - -CONTAINS - - !=========================================================================== - - !BOP ======================================================================= - ! - ! !IROUTINE: seq_resume_store_comp - Allocate space & store resume filenames - ! - ! !DESCRIPTION: - ! - ! Allocate data for resume filenames from all component instances - ! Store resume filenames for requested component - ! - ! Assumptions about instance numbers: - ! Multi-driver: num_inst_driver = total number of instances of each - ! num_inst_ = 1 - ! Single-driver: num_inst_driver = 1 - ! num_inst_ = total number of instances - ! - ! Assumption about resume names: All PEs should have the same value - ! for %cdata_cc%resume_filename - ! - ! !INTERFACE: -------------------------------------------------------------- - - subroutine seq_resume_store_comp(oid, filename, num_inst_comp, ninst, iamroot) - character(len=1), intent(in) :: oid ! 1 letter comp type - character(len=*), intent(in) :: filename ! resume filename - integer, intent(in) :: num_inst_comp ! # comp instances - integer, intent(in) :: ninst ! comp instance # - logical, intent(in) :: iamroot ! is comp root? - - integer :: num_inst ! # store instances - character(len=CL), pointer :: fname_ptr(:) - character(len=*), parameter :: subname = 'seq_resume_store_comp' - - nullify(fname_ptr) - - if (.not. associated(resume)) then - allocate(resume) - end if - - if (len_trim(filename) > 0) then - num_inst = num_inst_comp * num_inst_driver - else - num_inst = 0 - end if - - ! Make sure each comp field is allocated correctly - select case(oid) - case ('a') - if (associated(resume%atm_resume)) then - if ((num_inst == 0) .or. (size(resume%atm_resume) /= num_inst)) then - deallocate(resume%atm_resume) - nullify(resume%atm_resume) - end if - end if - if (num_inst > 0) then - if (.not. associated(resume%atm_resume)) then - allocate(resume%atm_resume(num_inst)) - end if - fname_ptr => resume%atm_resume - end if - case ('l') - if (associated(resume%lnd_resume)) then - if ((num_inst == 0) .or. (size(resume%lnd_resume) /= num_inst)) then - deallocate(resume%lnd_resume) - nullify(resume%lnd_resume) - end if - end if - if (num_inst > 0) then - if (.not. associated(resume%lnd_resume)) then - allocate(resume%lnd_resume(num_inst)) - end if - fname_ptr => resume%lnd_resume - end if - case ('o') - if (associated(resume%ocn_resume)) then - if ((num_inst == 0) .or. (size(resume%ocn_resume) /= num_inst)) then - deallocate(resume%ocn_resume) - nullify(resume%ocn_resume) - end if - end if - if (num_inst > 0) then - if (.not. associated(resume%ocn_resume)) then - allocate(resume%ocn_resume(num_inst)) - end if - fname_ptr => resume%ocn_resume - end if - case ('i') - if (associated(resume%ice_resume)) then - if ((num_inst == 0) .or. (size(resume%ice_resume) /= num_inst)) then - deallocate(resume%ice_resume) - nullify(resume%ice_resume) - end if - end if - if (num_inst > 0) then - if (.not. associated(resume%ice_resume)) then - allocate(resume%ice_resume(num_inst)) - end if - fname_ptr => resume%ice_resume - end if - case ('r') - if (associated(resume%rof_resume)) then - if ((num_inst == 0) .or. (size(resume%rof_resume) /= num_inst)) then - deallocate(resume%rof_resume) - nullify(resume%rof_resume) - end if - end if - if (num_inst > 0) then - if (.not. associated(resume%rof_resume)) then - allocate(resume%rof_resume(num_inst)) - end if - fname_ptr => resume%rof_resume - end if - case ('g') - if (associated(resume%glc_resume)) then - if ((num_inst == 0) .or. (size(resume%glc_resume) /= num_inst)) then - deallocate(resume%glc_resume) - nullify(resume%glc_resume) - end if - end if - if (num_inst > 0) then - if (.not. associated(resume%glc_resume)) then - allocate(resume%glc_resume(num_inst)) - end if - fname_ptr => resume%glc_resume - end if - case ('w') - if (associated(resume%wav_resume)) then - if ((num_inst == 0) .or. (size(resume%wav_resume) /= num_inst)) then - deallocate(resume%wav_resume) - nullify(resume%wav_resume) - end if - end if - if (num_inst > 0) then - if (.not. associated(resume%wav_resume)) then - allocate(resume%wav_resume(num_inst)) - end if - fname_ptr => resume%wav_resume - end if - case ('x') - if (associated(resume%cpl_resume)) then - if ((num_inst == 0) .or. (size(resume%cpl_resume) /= num_inst)) then - deallocate(resume%cpl_resume) - nullify(resume%cpl_resume) - end if - end if - if (num_inst > 0) then - if (.not. associated(resume%cpl_resume)) then - allocate(resume%cpl_resume(num_inst)) - end if - fname_ptr => resume%cpl_resume - end if - case default - call shr_sys_abort(subname//': Bad component id, '//oid) - end select - - ! Copy in the resume filename if it exists - if (associated(fname_ptr)) then - fname_ptr(ninst) = filename - end if - - end subroutine seq_resume_store_comp - - !=========================================================================== - !BOP ======================================================================= - ! - ! !IROUTINE: seq_resume_get_files -- Return resume filename info - ! - ! !DESCRIPTION: - ! - ! Return resume filename info - ! - ! !INTERFACE: -------------------------------------------------------------- - subroutine seq_resume_get_files(oneletterid, files, bcast) - character(len=1), intent(in) :: oneletterid - character(len=*), pointer :: files(:) - logical, optional, intent(in) :: bcast - - character(len=*), parameter :: subname = 'seq_resume_get_files' - - nullify(files) - if (present(bcast)) then - if (bcast) then - call seq_resume_broadcast(oneletterid) - end if - ! No else: if not present, assume false - end if - select case(oneletterid) - case ('a') - if (associated(resume%atm_resume)) then - files => resume%atm_resume - end if - case ('l') - if (associated(resume%lnd_resume)) then - files => resume%lnd_resume - end if - case ('o') - if (associated(resume%ocn_resume)) then - files => resume%ocn_resume - end if - case ('i') - if (associated(resume%ice_resume)) then - files => resume%ice_resume - end if - case ('r') - if (associated(resume%rof_resume)) then - files => resume%rof_resume - end if - case ('g') - if (associated(resume%glc_resume)) then - files => resume%glc_resume - end if - case ('w') - if (associated(resume%wav_resume)) then - files => resume%wav_resume - end if - case ('x') - if (associated(resume%cpl_resume)) then - files => resume%cpl_resume - end if - case default - call shr_sys_abort(subname//': Bad component id, '//oneletterid) - end select - end subroutine seq_resume_get_files - - !=========================================================================== - !BOP ======================================================================= - ! - ! !IROUTINE: seq_resume_free -- Free space for resume filenames - ! - ! !DESCRIPTION: - ! - ! Free data for resume filenames from all component instances - ! - ! !INTERFACE: -------------------------------------------------------------- - - subroutine seq_resume_free() - - if (associated(resume)) then - if (associated(resume%atm_resume)) then - deallocate(resume%atm_resume) - nullify(resume%atm_resume) - end if - - if (associated(resume%lnd_resume)) then - deallocate(resume%lnd_resume) - nullify(resume%lnd_resume) - end if - - if (associated(resume%ocn_resume)) then - deallocate(resume%ocn_resume) - nullify(resume%ocn_resume) - end if - - if (associated(resume%ice_resume)) then - deallocate(resume%ice_resume) - nullify(resume%ice_resume) - end if - - if (associated(resume%rof_resume)) then - deallocate(resume%rof_resume) - nullify(resume%rof_resume) - end if - - if (associated(resume%glc_resume)) then - deallocate(resume%glc_resume) - nullify(resume%glc_resume) - end if - - if (associated(resume%wav_resume)) then - deallocate(resume%wav_resume) - nullify(resume%wav_resume) - end if - - if (associated(resume%cpl_resume)) then - deallocate(resume%cpl_resume) - nullify(resume%cpl_resume) - end if - end if - end subroutine seq_resume_free - - !=========================================================================== - !BOP ======================================================================= - ! - ! !IROUTINE: seq_resume_broadcast - ! - ! !DESCRIPTION: - ! - ! Broadcast a component type's resume filenames to all PEs - ! - ! !INTERFACE: -------------------------------------------------------------- - - subroutine seq_resume_broadcast(oneletterid) - character(len=1), intent(in) :: oneletterid - - character(len=CL), pointer :: fname_ptr(:) - character(len=*), parameter :: subname = 'seq_resume_broadcast' - - ! This interface does a pointer dance. Because the array - ! passed to seq_resume_broadcast_array may be NULL on input - ! but allocated on output, we need to 'reconnect' it to the - ! resume structure - select case(oneletterid) - case ('a') - fname_ptr => resume%atm_resume - call seq_resume_broadcast_array(fname_ptr) - resume%atm_resume => fname_ptr - case ('l') - fname_ptr => resume%lnd_resume - call seq_resume_broadcast_array(fname_ptr) - resume%lnd_resume => fname_ptr - case ('o') - fname_ptr => resume%ocn_resume - call seq_resume_broadcast_array(fname_ptr) - resume%ocn_resume => fname_ptr - case ('i') - fname_ptr => resume%ice_resume - call seq_resume_broadcast_array(fname_ptr) - resume%ice_resume => fname_ptr - case ('r') - fname_ptr => resume%rof_resume - call seq_resume_broadcast_array(fname_ptr) - resume%rof_resume => fname_ptr - case ('g') - fname_ptr => resume%glc_resume - call seq_resume_broadcast_array(fname_ptr) - resume%glc_resume => fname_ptr - case ('w') - fname_ptr => resume%wav_resume - call seq_resume_broadcast_array(fname_ptr) - resume%wav_resume => fname_ptr - case ('x') - fname_ptr => resume%cpl_resume - call seq_resume_broadcast_array(fname_ptr) - resume%cpl_resume => fname_ptr - case default - call shr_sys_abort(subname//': Bad component id, '//oneletterid) - end select - end subroutine seq_resume_broadcast - - subroutine seq_resume_broadcast_array(filename_array) - use shr_mpi_mod, only: shr_mpi_bcast - ! Used to bcast component filenames across multiple drivers - use seq_comm_mct, only: global_comm - - character(len=CL), pointer :: filename_array(:) - - integer, allocatable :: active_entries(:) - integer :: global_numpes - integer :: num_entries - integer :: my_entry - integer :: index - integer :: ierr - character(len=128) :: errmsg - character(len=*), parameter :: subname = "(fill_array_pes)" - - call MPI_comm_rank(global_comm, global_numpes, ierr) - allocate(active_entries(global_numpes)) - - ! Find filled array element (if any) - ! Note, it is an error to find more than one. - active_entries = 0 - my_entry = 0 - if (associated(filename_array)) then - do index = 1, size(filename_array) - if (len_trim(filename_array(index)) > 0) then - if (my_entry > 0) then - write(errmsg, '(2(a,i0))') ': Bad entry, ', index, & - ', already have ',my_entry - call shr_sys_abort(subname//trim(errmsg)) - end if - my_entry = index - end if - end do - end if - ! Share my_entry with other PEs - call MPI_allgather(my_entry, 1, MPI_INTEGER, active_entries, 1, MPI_INTEGER, global_comm, ierr) - ! Allocate our array if needed - num_entries = MAXVAL(active_entries) - if ((num_entries > 0) .and. (.not. associated(filename_array))) then - allocate(filename_array(num_entries)) - end if - do index = 1, global_numpes - my_entry = active_entries(index) - if (my_entry > 0) then - call shr_mpi_bcast(filename_array(my_entry), global_comm, & - subname//': bcast', pebcast=index-1) - end if - end do - - deallocate(active_entries) - end subroutine seq_resume_broadcast_array - -end module seq_pauseresume_mod diff --git a/src/drivers/mct/shr/seq_timemgr_mod.F90 b/src/drivers/mct/shr/seq_timemgr_mod.F90 deleted file mode 100644 index e945f28367d..00000000000 --- a/src/drivers/mct/shr/seq_timemgr_mod.F90 +++ /dev/null @@ -1,2752 +0,0 @@ -!=============================================================================== -! -! !MODULE: seq_timemgr_mod --- Time-manager module -! -! !DESCRIPTION: -! -! A module to create derived types to manage time and clock information -! for use with CIME drivers and models. -! -! !REMARKS: -! -! !REVISION HISTORY: -! 2005-Nov-11 - E. Kluzek - creation as eshr_timemgr_mod -! 2007-Sep-12 - T. Craig - extended -! 2007-Oct-05 - T. Craig - refactored to support concurrent models -! 2007-Nov-15 - T. Craig - refactored for ccsm4 and renamed seq_timemgr_mod -! -! !INTERFACE: ------------------------------------------------------------------ - -module seq_timemgr_mod - - ! !USES: - use ESMF - use shr_cal_mod - use SHR_KIND_mod, only: SHR_KIND_IN, SHR_KIND_R8, SHR_KIND_CS, & - SHR_KIND_CL, SHR_KIND_I8 - use seq_comm_mct, only: logunit, loglevel, seq_comm_iamin, CPLID, & - seq_comm_gloroot, seq_comm_iamroot - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - - implicit none - - private ! default private - - ! ! PUBLIC TYPES: - - public :: seq_timemgr_type ! Wrapped clock object - - ! ! PUBLIC MEMBER FUNCTIONS: - - ! --- Clock object methods -------------------------------------------------- - public :: seq_timemgr_clockInit ! Setup the sync clock - public :: seq_timemgr_clockAdvance ! Advance the sync clock - public :: seq_timemgr_clockPrint ! Print sync clock information - - public :: seq_timemgr_EClockGetData ! Get data from an ESMF clock - - public :: seq_timemgr_EClockDateInSync ! compare EClock to ymd/tod - public :: seq_timemgr_alarmSetOn ! Turn an alarm on - public :: seq_timemgr_alarmSetOff ! Turn an alarm off - public :: seq_timemgr_alarmIsOn ! Is an alarm ringing - public :: seq_timemgr_ETimeInit ! Create ESMF_Time object - public :: seq_timemgr_ETimeGet ! Query ESMF_Time object - - ! --- For usability, built on interfaces above --- - public :: seq_timemgr_restartAlarmIsOn ! Is a restart alarm ringing - public :: seq_timemgr_stopAlarmIsOn ! Is a stop alarm ringing - public :: seq_timemgr_historyAlarmIsOn ! Is a history alarm ringing - public :: seq_timemgr_pauseAlarmIsOn ! Is a pause alarm ringing - ! --- ESP components need to know about the state of other components - public :: seq_timemgr_pause_active ! Pause/resume is enabled - public :: seq_timemgr_pause_component_index ! Index of named component - public :: seq_timemgr_pause_component_active ! .true. is comp should pause - - ! --- Alert components if they need to do post-data assimilation processing - public :: seq_timemgr_data_assimilation_active ! .true. do post-DA - - ! ! PUBLIC PARAMETERS: - - integer(SHR_KIND_IN),public :: seq_timemgr_histavg_type - integer(SHR_KIND_IN),public,parameter :: seq_timemgr_type_other = -1 - integer(SHR_KIND_IN),public,parameter :: seq_timemgr_type_never = 1 - integer(SHR_KIND_IN),public,parameter :: seq_timemgr_type_nhour = 2 - integer(SHR_KIND_IN),public,parameter :: seq_timemgr_type_nday = 3 - integer(SHR_KIND_IN),public,parameter :: seq_timemgr_type_nmonth = 4 - integer(SHR_KIND_IN),public,parameter :: seq_timemgr_type_nyear = 5 - - character(SHR_KIND_CS),public,parameter :: seq_timemgr_noleap = shr_cal_noleap - character(SHR_KIND_CS),public,parameter :: seq_timemgr_gregorian = shr_cal_gregorian - - ! These are public but declared in the private area for clarity - - ! clocknames: - ! character(len=*),public,parameter :: & - ! seq_timemgr_clock_drv - ! seq_timemgr_clock_atm - ! seq_timemgr_clock_lnd - ! seq_timemgr_clock_rof - ! seq_timemgr_clock_ocn - ! seq_timemgr_clock_ice - ! seq_timemgr_clock_glc - ! seq_timemgr_clock_wav - ! seq_timemgr_clock_esp - - ! alarmnames: - ! character(len=*),public,parameter :: & - ! seq_timemgr_alarm_restart - ! seq_timemgr_alarm_run - ! seq_timemgr_alarm_stop - ! seq_timemgr_alarm_datestop - ! seq_timemgr_alarm_history - ! seq_timemgr_alarm_atmrun - ! seq_timemgr_alarm_lndrun - ! seq_timemgr_alarm_rofrun - ! seq_timemgr_alarm_ocnrun - ! seq_timemgr_alarm_icerun - ! seq_timemgr_alarm_glcrun - ! seq_timemgr_alarm_glcrun_avg - ! seq_timemgr_alarm_wavrun - ! seq_timemgr_alarm_esprun - ! seq_timemgr_alarm_ocnnext - ! seq_timemgr_alarm_tprof - ! seq_timemgr_alarm_histavg - ! seq_timemgr_alarm_pause - ! seq_timemgr_alarm_barrier - - private:: seq_timemgr_alarmGet - private:: seq_timemgr_alarmInit - private:: seq_timemgr_EClockInit - private:: seq_timemgr_ESMFDebug - private:: seq_timemgr_ESMFCodeCheck - - character(len=*), private, parameter :: & - seq_timemgr_optNONE = "none" , & - seq_timemgr_optNever = "never" , & - seq_timemgr_optNSteps = "nsteps" , & - seq_timemgr_optNStep = "nstep" , & - seq_timemgr_optNSeconds = "nseconds" , & - seq_timemgr_optNSecond = "nsecond" , & - seq_timemgr_optNMinutes = "nminutes" , & - seq_timemgr_optNMinute = "nminute" , & - seq_timemgr_optNHours = "nhours" , & - seq_timemgr_optNHour = "nhour" , & - seq_timemgr_optNDays = "ndays" , & - seq_timemgr_optNDay = "nday" , & - seq_timemgr_optNMonths = "nmonths" , & - seq_timemgr_optNMonth = "nmonth" , & - seq_timemgr_optNYears = "nyears" , & - seq_timemgr_optNYear = "nyear" , & - seq_timemgr_optMonthly = "monthly" , & - seq_timemgr_optYearly = "yearly" , & - seq_timemgr_optDate = "date" , & - seq_timemgr_optIfdays0 = "ifdays0" , & - seq_timemgr_optEnd = "end" , & - seq_timemgr_optGLCCouplingPeriod = "glc_coupling_period" - - integer(SHR_KIND_IN),private,parameter :: & - seq_timemgr_nclock_drv = 1, & - seq_timemgr_nclock_atm = 2, & - seq_timemgr_nclock_lnd = 3, & - seq_timemgr_nclock_ocn = 4, & - seq_timemgr_nclock_ice = 5, & - seq_timemgr_nclock_glc = 6, & - seq_timemgr_nclock_wav = 7, & - seq_timemgr_nclock_rof = 8, & - seq_timemgr_nclock_iac = 9, & - seq_timemgr_nclock_esp = 10 - - integer(SHR_KIND_IN),private,parameter :: max_clocks = 10 - character(len=*),public,parameter :: & - seq_timemgr_clock_drv = 'seq_timemgr_clock_drv' , & - seq_timemgr_clock_atm = 'seq_timemgr_clock_atm' , & - seq_timemgr_clock_lnd = 'seq_timemgr_clock_lnd' , & - seq_timemgr_clock_ocn = 'seq_timemgr_clock_ocn' , & - seq_timemgr_clock_ice = 'seq_timemgr_clock_ice' , & - seq_timemgr_clock_glc = 'seq_timemgr_clock_glc' , & - seq_timemgr_clock_wav = 'seq_timemgr_clock_wav' , & - seq_timemgr_clock_rof = 'seq_timemgr_clock_rof' , & - seq_timemgr_clock_esp = 'seq_timemgr_clock_esp' - character(len=8),private,parameter :: seq_timemgr_clocks(max_clocks) = & - (/'drv ','atm ','lnd ','ocn ', & - 'ice ','glc ','wav ','rof ', & - 'iac ','esp '/) - - ! Alarms on both component clocks and driver clock - integer(SHR_KIND_IN),private,parameter :: & - seq_timemgr_nalarm_restart = 1 , & ! driver and component clock alarm - seq_timemgr_nalarm_run = 2 , & ! driver and component clock alarm - seq_timemgr_nalarm_stop = 3 , & ! driver and component clock alarm - seq_timemgr_nalarm_datestop = 4 , & ! driver and component clock alarm - seq_timemgr_nalarm_history = 5 , & ! driver and component clock alarm - seq_timemgr_nalarm_atmrun = 6 , & ! driver only clock alarm - seq_timemgr_nalarm_lndrun = 7 , & ! driver only clock alarm - seq_timemgr_nalarm_ocnrun = 8 , & ! driver only clock alarm - seq_timemgr_nalarm_icerun = 9 , & ! driver only clock alarm - seq_timemgr_nalarm_glcrun =10 , & ! driver only clock alarm - seq_timemgr_nalarm_glcrun_avg =11 , & ! driver only clock alarm - seq_timemgr_nalarm_ocnnext =12 , & ! driver only clock alarm - seq_timemgr_nalarm_tprof =13 , & ! driver and component clock alarm - seq_timemgr_nalarm_histavg =14 , & ! driver and component clock alarm - seq_timemgr_nalarm_rofrun =15 , & ! driver only clock alarm - seq_timemgr_nalarm_wavrun =16 , & ! driver only clock alarm - seq_timemgr_nalarm_iacrun =17 , & ! driver only clock alarm - seq_timemgr_nalarm_esprun =18 , & ! driver only clock alarm - seq_timemgr_nalarm_pause =19 , & - seq_timemgr_nalarm_barrier =20 , & ! driver and component clock alarm - max_alarms = seq_timemgr_nalarm_barrier - - character(len=*),public,parameter :: & - seq_timemgr_alarm_restart = 'seq_timemgr_alarm_restart ', & - seq_timemgr_alarm_run = 'seq_timemgr_alarm_run ', & - seq_timemgr_alarm_stop = 'seq_timemgr_alarm_stop ', & - seq_timemgr_alarm_datestop = 'seq_timemgr_alarm_datestop', & - seq_timemgr_alarm_history = 'seq_timemgr_alarm_history ', & - seq_timemgr_alarm_atmrun = 'seq_timemgr_alarm_atmrun ', & - seq_timemgr_alarm_lndrun = 'seq_timemgr_alarm_lndrun ', & - seq_timemgr_alarm_ocnrun = 'seq_timemgr_alarm_ocnrun ', & - seq_timemgr_alarm_icerun = 'seq_timemgr_alarm_icerun ', & - seq_timemgr_alarm_glcrun = 'seq_timemgr_alarm_glcrun ', & - seq_timemgr_alarm_glcrun_avg = 'seq_timemgr_alarm_glcrun_avg' , & - seq_timemgr_alarm_ocnnext = 'seq_timemgr_alarm_ocnnext ', & - seq_timemgr_alarm_tprof = 'seq_timemgr_alarm_tprof ', & - seq_timemgr_alarm_histavg = 'seq_timemgr_alarm_histavg ', & - seq_timemgr_alarm_rofrun = 'seq_timemgr_alarm_rofrun ', & - seq_timemgr_alarm_wavrun = 'seq_timemgr_alarm_wavrun ', & - seq_timemgr_alarm_iacrun = 'seq_timemgr_alarm_iacrun ', & - seq_timemgr_alarm_esprun = 'seq_timemgr_alarm_esprun ', & - seq_timemgr_alarm_pause = 'seq_timemgr_alarm_pause ', & - seq_timemgr_alarm_barrier = 'seq_timemgr_alarm_barrier ' - - ! Active pause - resume components - logical, private :: pause_active(max_clocks) = .false. - - ! Active post-data assimilation components - logical, private :: data_assimilation_active(max_clocks) = .false. - - type EClock_pointer ! needed for array of pointers - type(ESMF_Clock),pointer :: EClock => null() - end type EClock_pointer - - type seq_timemgr_type - private - type(EClock_pointer) :: ECP(max_clocks) ! ESMF clocks, array of pointers - type(ESMF_Alarm) :: EAlarm(max_clocks,max_alarms) ! array of clock alarms - end type seq_timemgr_type - - ! --- Private local data ---------------------------------------------------- - - type(ESMF_Calendar), target, save :: seq_timemgr_cal ! calendar - character(SHR_KIND_CL) ,save :: seq_timemgr_calendar ! calendar string - integer, parameter :: SecPerDay = 86400 ! Seconds per day - - integer :: seq_timemgr_pause_sig_index ! Index of pause comp with smallest dt - logical :: seq_timemgr_esp_run_on_pause ! Run ESP component on pause cycle - logical :: seq_timemgr_end_restart ! write restarts at end of run? - - !=============================================================================== - -contains - - !=============================================================================== - ! !IROUTINE: seq_timemgr_clockInit -- Initializes clocks - ! - ! !DESCRIPTION: - ! - ! Initializes clock - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_clockInit(SyncClock, nmlfile, restart, restart_file, pioid, mpicom, & - EClock_drv, EClock_atm, EClock_lnd, EClock_ocn, EClock_ice, Eclock_glc, & - Eclock_rof, EClock_wav, Eclock_esp, Eclock_iac) - - ! !USES: - use pio, only : file_desc_T - use shr_file_mod, only : shr_file_getunit, shr_file_freeunit - use shr_mpi_mod, only : shr_mpi_bcast - use seq_io_read_mod - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_timemgr_type), intent(INOUT) :: SyncClock ! sync clock - character(len=*), intent(IN) :: nmlfile ! namelist file - integer, intent(IN) :: mpicom ! MPI communicator - logical, intent(IN) :: restart ! restart logical - character(len=*), intent(IN) :: restart_file - type(ESMF_clock),target, intent(IN) :: EClock_drv ! drv clock - type(ESMF_clock),target, intent(IN) :: EClock_atm ! atm clock - type(ESMF_clock),target, intent(IN) :: EClock_lnd ! lnd clock - type(ESMF_clock),target, intent(IN) :: EClock_ocn ! ocn clock - type(ESMF_clock),target, intent(IN) :: EClock_ice ! ice clock - type(ESMF_clock),target, intent(IN) :: EClock_glc ! glc clock - type(ESMF_clock),target, intent(IN) :: EClock_rof ! rof clock - type(ESMF_clock),target, intent(IN) :: EClock_wav ! wav clock - type(ESMF_clock),target, intent(IN) :: EClock_iac ! iac clock - type(ESMF_clock),target, intent(IN) :: EClock_esp ! esp clock - type(file_desc_t) :: pioid - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_clockInit) ' - type(ESMF_Time) :: StartTime ! Start time - type(ESMF_Time) :: RefTime ! Reference time - type(ESMF_Time) :: CurrTime ! Current time - type(ESMF_Time) :: OffsetTime ! local computed time - type(ESMF_Time) :: StopTime1 ! Stop time - type(ESMF_Time) :: StopTime2 ! Stop time - type(ESMF_Time) :: StopTime, minStopTime ! Stop time - type(ESMF_TimeInterval) :: TimeStep ! Clock time-step - type(ESMF_TimeInterval) :: cplTimeStep ! Clock time-step - type(ESMF_CalKind_Flag) :: esmf_caltype ! local esmf calendar - integer :: rc ! Return code - integer :: n, i ! index - integer :: min_dt ! smallest time step - integer :: dtime(max_clocks) ! time-step to use - integer :: offset(max_clocks) ! run offset - integer :: unitn ! i/o unit number - integer :: iam ! pe rank - - character(SHR_KIND_CS) :: calendar ! Calendar type - character(SHR_KIND_CS) :: stop_option ! Stop option units - integer(SHR_KIND_IN) :: stop_n ! Number until stop - integer(SHR_KIND_IN) :: stop_ymd ! Stop date (YYYYMMDD) - integer(SHR_KIND_IN) :: stop_tod ! Stop time-of-day - character(SHR_KIND_CS) :: restart_option ! Restart option units - integer(SHR_KIND_IN) :: restart_n ! Number until restart interval - integer(SHR_KIND_IN) :: restart_ymd ! Restart date (YYYYMMDD) - character(SHR_KIND_CS) :: pause_option ! Pause option units - integer(SHR_KIND_IN) :: pause_n ! Number between pause intervals - integer(SHR_KIND_IN) :: RestInterval ! Component Restart Interval - integer(SHR_KIND_IN) :: drvrestinterval ! Driver Restart Interval - - logical :: pause_active_atm - logical :: pause_active_cpl - logical :: pause_active_ocn - logical :: pause_active_wav - logical :: pause_active_glc - logical :: pause_active_ice - logical :: pause_active_rof - logical :: pause_active_lnd - logical :: pause_active_iac - - logical :: data_assimilation_atm - logical :: data_assimilation_cpl - logical :: data_assimilation_ocn - logical :: data_assimilation_wav - logical :: data_assimilation_glc - logical :: data_assimilation_ice - logical :: data_assimilation_rof - logical :: data_assimilation_lnd - logical :: data_assimilation_iac - - character(SHR_KIND_CS) :: history_option ! History option units - integer(SHR_KIND_IN) :: history_n ! Number until history interval - integer(SHR_KIND_IN) :: history_ymd ! History date (YYYYMMDD) - character(SHR_KIND_CS) :: histavg_option ! Histavg option units - integer(SHR_KIND_IN) :: histavg_n ! Number until histavg interval - integer(SHR_KIND_IN) :: histavg_ymd ! Histavg date (YYYYMMDD) - character(SHR_KIND_CS) :: barrier_option ! Barrier option units - integer(SHR_KIND_IN) :: barrier_n ! Number until barrier interval - integer(SHR_KIND_IN) :: barrier_ymd ! Barrier date (YYYYMMDD) - character(SHR_KIND_CS) :: tprof_option ! tprof option units - integer(SHR_KIND_IN) :: tprof_n ! Number until tprof interval - integer(SHR_KIND_IN) :: tprof_ymd ! tprof date (YYYYMMDD) - integer(SHR_KIND_IN) :: start_ymd ! Start date ([YY]YYYYMMDD) - integer(SHR_KIND_IN) :: start_tod ! Start time of day (seconds) - integer(SHR_KIND_IN) :: curr_ymd ! Current ymd ([YY]YYYYMMDD) - integer(SHR_KIND_IN) :: curr_tod ! Current tod (seconds) - integer(SHR_KIND_IN) :: ref_ymd ! Reference date (YYYYMMDD) - integer(SHR_KIND_IN) :: ref_tod ! Reference time of day (seconds) - integer(SHR_KIND_IN) :: atm_cpl_dt ! Atmosphere coupling interval - integer(SHR_KIND_IN) :: lnd_cpl_dt ! Land coupling interval - integer(SHR_KIND_IN) :: ice_cpl_dt ! Sea-Ice coupling interval - integer(SHR_KIND_IN) :: ocn_cpl_dt ! Ocean coupling interval - integer(SHR_KIND_IN) :: glc_cpl_dt ! Glc coupling interval - character(SHR_KIND_CS) :: glc_avg_period ! Glc avering coupling period - integer(SHR_KIND_IN) :: rof_cpl_dt ! Runoff coupling interval - integer(SHR_KIND_IN) :: wav_cpl_dt ! Wav coupling interval - integer(SHR_KIND_IN) :: iac_cpl_dt ! Iac coupling interval - integer(SHR_KIND_IN) :: esp_cpl_dt ! Esp coupling interval - integer(SHR_KIND_IN) :: atm_cpl_offset ! Atmosphere coupling interval - integer(SHR_KIND_IN) :: lnd_cpl_offset ! Land coupling interval - integer(SHR_KIND_IN) :: ice_cpl_offset ! Sea-Ice coupling interval - integer(SHR_KIND_IN) :: ocn_cpl_offset ! Ocean coupling interval - integer(SHR_KIND_IN) :: glc_cpl_offset ! Glc coupling interval - integer(SHR_KIND_IN) :: wav_cpl_offset ! Wav coupling interval - integer(SHR_KIND_IN) :: rof_cpl_offset ! Runoff coupling interval - integer(SHR_KIND_IN) :: esp_cpl_offset ! Esp coupling interval - integer(SHR_KIND_IN) :: iac_cpl_offset ! Iac coupling interval - logical :: esp_run_on_pause ! Run ESP on pause cycle - logical :: end_restart ! Write restart at end of run - integer(SHR_KIND_IN) :: ierr ! Return code - - character(len=*), parameter :: F0A = "(2A,A)" - character(len=*), parameter :: F0I = "(2A,I10)" - character(len=*), parameter :: F0L = "(2A,L3)" - - namelist /seq_timemgr_inparm/ calendar, curr_ymd, curr_tod, & - stop_option, stop_n, stop_ymd, stop_tod, & - restart_option, restart_n, restart_ymd, & - pause_option, & - pause_n, & - pause_active_atm, & - pause_active_cpl, & - pause_active_ocn, & - pause_active_wav, & - pause_active_iac, & - pause_active_glc, & - pause_active_ice, & - pause_active_rof, & - pause_active_lnd, & - data_assimilation_atm, & - data_assimilation_cpl, & - data_assimilation_ocn, & - data_assimilation_wav, & - data_assimilation_iac, & - data_assimilation_glc, & - data_assimilation_ice, & - data_assimilation_rof, & - data_assimilation_lnd, & - history_option, history_n, history_ymd, & - histavg_option, histavg_n, histavg_ymd, & - barrier_option, barrier_n, barrier_ymd, & - tprof_option, tprof_n, tprof_ymd, & - start_ymd, start_tod, ref_ymd, ref_tod, & - atm_cpl_dt, ocn_cpl_dt, ice_cpl_dt, lnd_cpl_dt, & - atm_cpl_offset, lnd_cpl_offset, ocn_cpl_offset, & - ice_cpl_offset, glc_cpl_dt, glc_cpl_offset, glc_avg_period, & - wav_cpl_dt, wav_cpl_offset, esp_cpl_dt, esp_cpl_offset, & - iac_cpl_dt, iac_cpl_offset, & - rof_cpl_dt, rof_cpl_offset, esp_run_on_pause, end_restart - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - SyncClock%ECP(seq_timemgr_nclock_drv)%EClock => EClock_drv - SyncClock%ECP(seq_timemgr_nclock_atm)%EClock => EClock_atm - SyncClock%ECP(seq_timemgr_nclock_lnd)%EClock => EClock_lnd - SyncClock%ECP(seq_timemgr_nclock_ocn)%EClock => EClock_ocn - SyncClock%ECP(seq_timemgr_nclock_ice)%EClock => EClock_ice - SyncClock%ECP(seq_timemgr_nclock_glc)%EClock => EClock_glc - SyncClock%ECP(seq_timemgr_nclock_rof)%EClock => EClock_rof - SyncClock%ECP(seq_timemgr_nclock_wav)%EClock => EClock_wav - SyncClock%ECP(seq_timemgr_nclock_esp)%EClock => EClock_esp - SyncClock%ECP(seq_timemgr_nclock_iac)%EClock => EClock_iac - - call mpi_comm_rank(mpicom,iam,ierr) - - !--------------------------------------------------------------------------- - ! Set syncclock on root pe - !--------------------------------------------------------------------------- - - if (iam == 0) then - - !--------------------------------------------------------------------------- - ! Set namelist defaults - !--------------------------------------------------------------------------- - calendar = seq_timemgr_noleap - stop_option = ' ' - stop_n = -1 - stop_ymd = -1 - stop_tod = 0 - restart_option = seq_timemgr_optYearly - restart_n = -1 - restart_ymd = -1 - pause_option = seq_timemgr_optNever - pause_n = -1 - pause_active_atm = .false. - pause_active_cpl = .false. - pause_active_ocn = .false. - pause_active_wav = .false. - pause_active_glc = .false. - pause_active_ice = .false. - pause_active_rof = .false. - pause_active_lnd = .false. - pause_active_iac = .false. - data_assimilation_atm = .false. - data_assimilation_cpl = .false. - data_assimilation_ocn = .false. - data_assimilation_wav = .false. - data_assimilation_glc = .false. - data_assimilation_ice = .false. - data_assimilation_rof = .false. - data_assimilation_lnd = .false. - data_assimilation_iac = .false. - - history_option = seq_timemgr_optNever - history_n = -1 - history_ymd = -1 - histavg_option = seq_timemgr_optNever - histavg_n = -1 - histavg_ymd = -1 - barrier_option = seq_timemgr_optNever - barrier_n = -1 - barrier_ymd = -1 - tprof_option = seq_timemgr_optNever - tprof_n = -1 - tprof_ymd = -1 - start_ymd = 0 - start_tod = 0 - ref_ymd = 0 - ref_tod = 0 - curr_ymd = 0 - curr_tod = 0 - atm_cpl_dt = 0 - lnd_cpl_dt = 0 - ice_cpl_dt = 0 - ocn_cpl_dt = 0 - glc_cpl_dt = 0 - glc_avg_period = seq_timemgr_optGLCCouplingPeriod - rof_cpl_dt = 0 - wav_cpl_dt = 0 - esp_cpl_dt = 0 - iac_cpl_dt = 0 - atm_cpl_offset = 0 - lnd_cpl_offset = 0 - ice_cpl_offset = 0 - ocn_cpl_offset = 0 - glc_cpl_offset = 0 - rof_cpl_offset = 0 - wav_cpl_offset = 0 - esp_cpl_offset = 0 - iac_cpl_offset = 0 - esp_run_on_pause = .true. - end_restart = .true. - - !--------------------------------------------------------------------------- - ! Read in namelist - !--------------------------------------------------------------------------- - unitn = shr_file_getUnit() - write(logunit,F0A) trim(subname),' Read in seq_timemgr_inparm namelist from: '//trim(nmlfile) - open( unitn, file=trim(nmlfile), status='old' ) - ierr = 1 - do while( ierr /= 0 ) - read(unitn,nml=seq_timemgr_inparm,iostat=ierr) - if (ierr < 0) then - call shr_sys_abort( subname//':: namelist read returns an'// & - ' end of file or end of record condition' ) - end if - end do - close(unitn) - call shr_file_freeUnit( unitn ) - endif - - !--------------------------------------------------------------------------- - ! Read Restart (seq_io is called on all CPLID pes) - ! NOTE: slightly messy, seq_io is only valid on CPLID - !--------------------------------------------------------------------------- - if (restart) then - if (seq_comm_iamin(CPLID)) then - call seq_io_read(restart_file,pioid,start_ymd,'seq_timemgr_start_ymd') - call seq_io_read(restart_file,pioid,start_tod,'seq_timemgr_start_tod') - call seq_io_read(restart_file,pioid,ref_ymd ,'seq_timemgr_ref_ymd') - call seq_io_read(restart_file,pioid,ref_tod ,'seq_timemgr_ref_tod') - call seq_io_read(restart_file,pioid,curr_ymd ,'seq_timemgr_curr_ymd') - call seq_io_read(restart_file,pioid,curr_tod ,'seq_timemgr_curr_tod') - endif - !--- Send from CPLID ROOT to GLOBALID ROOT, use bcast as surrogate - call shr_mpi_bcast(start_ymd,mpicom,pebcast=seq_comm_gloroot(CPLID)) - call shr_mpi_bcast(start_tod,mpicom,pebcast=seq_comm_gloroot(CPLID)) - call shr_mpi_bcast( ref_ymd,mpicom,pebcast=seq_comm_gloroot(CPLID)) - call shr_mpi_bcast( ref_tod,mpicom,pebcast=seq_comm_gloroot(CPLID)) - call shr_mpi_bcast( curr_ymd,mpicom,pebcast=seq_comm_gloroot(CPLID)) - call shr_mpi_bcast( curr_tod,mpicom,pebcast=seq_comm_gloroot(CPLID)) - endif - - if (iam == 0) then - !--------------------------------------------------------------------------- - ! Modify namelist as needed - !--------------------------------------------------------------------------- - - if (lnd_cpl_dt == 0) lnd_cpl_dt = atm_cpl_dt ! Copy atm coupling time into lnd - if (rof_cpl_dt == 0) rof_cpl_dt = atm_cpl_dt ! Copy atm coupling time into rof - if (ice_cpl_dt == 0) ice_cpl_dt = atm_cpl_dt ! Copy atm coupling time into ice - if (ocn_cpl_dt == 0) ocn_cpl_dt = atm_cpl_dt ! Copy atm coupling time into ocn - if (glc_cpl_dt == 0) glc_cpl_dt = atm_cpl_dt ! Copy atm coupling time into glc - if (wav_cpl_dt == 0) wav_cpl_dt = atm_cpl_dt ! Copy atm coupling time into wav - if (esp_cpl_dt == 0) esp_cpl_dt = atm_cpl_dt ! Copy atm coupling time into esp - if (iac_cpl_dt == 0) iac_cpl_dt = atm_cpl_dt ! Copy atm coupling time into iac - - if ( ref_ymd == 0 ) then - ref_ymd = start_ymd - ref_tod = start_tod - endif - - if ( curr_ymd == 0 ) then - curr_ymd = start_ymd - curr_tod = start_tod - endif - - if ( stop_ymd < 0) then - ! If we want to go beyond this date we need date vars to be i8 - stop_ymd = 2147480101 - stop_tod = 0 - endif - - if (trim(restart_option) == trim(seq_timemgr_optNone) .or. & - trim(restart_option) == trim(seq_timemgr_optNever)) then - if (end_restart) then - end_restart = .false. - write(logunit,F0A) trim(subname),' WARNING: overriding end_restart to '// & - 'false based on restart_option ' - endif - endif - - if (trim(restart_option) == trim(seq_timemgr_optEnd)) then - restart_option = seq_timemgr_optNone - write(logunit,F0A) trim(subname),' WARNING: overriding restart_option to '// & - 'none and verifying end_restart flag is true ' - if (.not. end_restart) then - end_restart = .true. - write(logunit,F0A) trim(subname),' WARNING: overriding end_restart to '// & - 'true based on restart_option (end) ' - endif - endif - - !--------------------------------------------------------------------------- - ! Print out the namelist settings - !--------------------------------------------------------------------------- - - write(logunit,F0A) ' ' - write(logunit,F0A) trim(subname),' Clock Init Settings:' - write(logunit,F0A) trim(subname),' calendar = ',trim(calendar) - write(logunit,F0A) trim(subname),' stop_option = ',trim(stop_option) - write(logunit,F0I) trim(subname),' stop_n = ',stop_n - write(logunit,F0I) trim(subname),' stop_ymd = ',stop_ymd - write(logunit,F0I) trim(subname),' stop_tod = ',stop_tod - write(logunit,F0A) trim(subname),' restart_option = ',trim(restart_option) - write(logunit,F0I) trim(subname),' restart_n = ',restart_n - write(logunit,F0I) trim(subname),' restart_ymd = ',restart_ymd - write(logunit,F0L) trim(subname),' end_restart = ',end_restart - write(logunit,F0A) trim(subname),' pause_option = ',& - trim(pause_option) - write(logunit,F0I) trim(subname),' pause_n = ',& - pause_n - write(logunit,F0L) trim(subname),' esp_run_on_pause = ',esp_run_on_pause - write(logunit,F0A) trim(subname),' history_option = ',trim(history_option) - write(logunit,F0I) trim(subname),' history_n = ',history_n - write(logunit,F0I) trim(subname),' history_ymd = ',history_ymd - write(logunit,F0A) trim(subname),' histavg_option = ',trim(histavg_option) - write(logunit,F0I) trim(subname),' histavg_n = ',histavg_n - write(logunit,F0I) trim(subname),' histavg_ymd = ',histavg_ymd - write(logunit,F0A) trim(subname),' barrier_option = ',trim(barrier_option) - write(logunit,F0I) trim(subname),' barrier_n = ',barrier_n - write(logunit,F0I) trim(subname),' barrier_ymd = ',barrier_ymd - write(logunit,F0A) trim(subname),' tprof_option = ',trim(tprof_option) - write(logunit,F0I) trim(subname),' tprof_n = ',tprof_n - write(logunit,F0I) trim(subname),' tprof_ymd = ',tprof_ymd - write(logunit,F0I) trim(subname),' start_ymd = ',start_ymd - write(logunit,F0I) trim(subname),' start_tod = ',start_tod - write(logunit,F0I) trim(subname),' ref_ymd = ',ref_ymd - write(logunit,F0I) trim(subname),' ref_tod = ',ref_tod - write(logunit,F0I) trim(subname),' atm_cpl_dt = ',atm_cpl_dt - write(logunit,F0I) trim(subname),' lnd_cpl_dt = ',lnd_cpl_dt - write(logunit,F0I) trim(subname),' ice_cpl_dt = ',ice_cpl_dt - write(logunit,F0I) trim(subname),' ocn_cpl_dt = ',ocn_cpl_dt - write(logunit,F0I) trim(subname),' glc_cpl_dt = ',glc_cpl_dt - write(logunit,F0A) trim(subname),' glc_avg_period = ',glc_avg_period - write(logunit,F0I) trim(subname),' rof_cpl_dt = ',rof_cpl_dt - write(logunit,F0I) trim(subname),' wav_cpl_dt = ',wav_cpl_dt - write(logunit,F0I) trim(subname),' esp_cpl_dt = ',esp_cpl_dt - write(logunit,F0I) trim(subname),' iac_cpl_dt = ',iac_cpl_dt - write(logunit,F0I) trim(subname),' atm_cpl_offset = ',atm_cpl_offset - write(logunit,F0I) trim(subname),' lnd_cpl_offset = ',lnd_cpl_offset - write(logunit,F0I) trim(subname),' ice_cpl_offset = ',ice_cpl_offset - write(logunit,F0I) trim(subname),' ocn_cpl_offset = ',ocn_cpl_offset - write(logunit,F0I) trim(subname),' glc_cpl_offset = ',glc_cpl_offset - write(logunit,F0I) trim(subname),' rof_cpl_offset = ',rof_cpl_offset - write(logunit,F0I) trim(subname),' wav_cpl_offset = ',wav_cpl_offset - write(logunit,F0I) trim(subname),' esp_cpl_offset = ',esp_cpl_offset - write(logunit,F0I) trim(subname),' iac_cpl_offset = ',iac_cpl_offset - write(logunit,F0A) ' ' - - !--------------------------------------------------------------------------- - ! Check a few things - !--------------------------------------------------------------------------- - - ! --- Coupling intervals ------------------------------------------------ - if ( atm_cpl_dt <= 0 .or. & - lnd_cpl_dt /= atm_cpl_dt .or. & - ice_cpl_dt /= atm_cpl_dt .or. & - ocn_cpl_dt <= 0 .or. glc_cpl_dt <= 0 .or. rof_cpl_dt <=0 .or. & - wav_cpl_dt <=0 .or. esp_cpl_dt <=0 .or. iac_cpl_dt <=0) then - write(logunit,*) trim(subname),' ERROR: aliogrwe _cpl_dt = ', & - atm_cpl_dt, lnd_cpl_dt, ice_cpl_dt, ocn_cpl_dt, glc_cpl_dt, & - rof_cpl_dt, wav_cpl_dt, esp_cpl_dt, iac_cpl_dt - call shr_sys_abort( subname//': ERROR coupling intervals invalid' ) - end if - ! --- Coupling offsets -------------------------------------------------- - if ( abs(atm_cpl_offset) > atm_cpl_dt .or. & - abs(lnd_cpl_offset) > lnd_cpl_dt .or. & - abs(ice_cpl_offset) > ice_cpl_dt .or. & - abs(glc_cpl_offset) > glc_cpl_dt .or. & - abs(rof_cpl_offset) > rof_cpl_dt .or. & - abs(wav_cpl_offset) > wav_cpl_dt .or. & - abs(esp_cpl_offset) > esp_cpl_dt .or. & - abs(iac_cpl_offset) > iac_cpl_dt .or. & - abs(ocn_cpl_offset) > ocn_cpl_dt) then - write(logunit,*) trim(subname),' ERROR: aliogrwe _cpl_offset = ', & - atm_cpl_offset, lnd_cpl_offset, ice_cpl_offset, ocn_cpl_offset, & - glc_cpl_offset, rof_cpl_offset, wav_cpl_offset, esp_cpl_offset, & - iac_cpl_offset - call shr_sys_abort( subname//': ERROR coupling offsets invalid' ) - end if - - ! --- Start time date --------------------------------------------------- - ! If we want to go beyond this date we need date vars to be i8 - if ( (start_ymd < 101) .or. (start_ymd > 2147471231)) then - write(logunit,*) subname,' ERROR: illegal start_ymd',start_ymd - call shr_sys_abort( subname//': ERROR invalid start_ymd') - end if - - endif - - !--------------------------------------------------------------------------- - ! Broadcast namelist data - !--------------------------------------------------------------------------- - call shr_mpi_bcast( calendar, mpicom ) - call shr_mpi_bcast( stop_n, mpicom ) - call shr_mpi_bcast( stop_option, mpicom ) - call shr_mpi_bcast( stop_ymd, mpicom ) - call shr_mpi_bcast( stop_tod, mpicom ) - call shr_mpi_bcast( restart_n, mpicom ) - call shr_mpi_bcast( restart_option, mpicom ) - call shr_mpi_bcast( restart_ymd, mpicom ) - call shr_mpi_bcast( pause_n, mpicom ) - call shr_mpi_bcast( pause_option, mpicom ) - call shr_mpi_bcast(pause_active_atm, mpicom ) - call shr_mpi_bcast(pause_active_cpl, mpicom ) - call shr_mpi_bcast(pause_active_ocn, mpicom ) - call shr_mpi_bcast(pause_active_wav, mpicom ) - call shr_mpi_bcast(pause_active_glc, mpicom ) - call shr_mpi_bcast(pause_active_ice, mpicom ) - call shr_mpi_bcast(pause_active_rof, mpicom ) - call shr_mpi_bcast(pause_active_lnd, mpicom ) - call shr_mpi_bcast(pause_active_iac, mpicom ) - call shr_mpi_bcast(data_assimilation_atm, mpicom ) - call shr_mpi_bcast(data_assimilation_cpl, mpicom ) - call shr_mpi_bcast(data_assimilation_ocn, mpicom ) - call shr_mpi_bcast(data_assimilation_wav, mpicom ) - call shr_mpi_bcast(data_assimilation_glc, mpicom ) - call shr_mpi_bcast(data_assimilation_ice, mpicom ) - call shr_mpi_bcast(data_assimilation_rof, mpicom ) - call shr_mpi_bcast(data_assimilation_lnd, mpicom ) - call shr_mpi_bcast(data_assimilation_iac, mpicom ) - - call shr_mpi_bcast( history_n, mpicom ) - call shr_mpi_bcast( history_option, mpicom ) - call shr_mpi_bcast( history_ymd, mpicom ) - call shr_mpi_bcast( histavg_n, mpicom ) - call shr_mpi_bcast( histavg_option, mpicom ) - call shr_mpi_bcast( histavg_ymd, mpicom ) - call shr_mpi_bcast( tprof_n, mpicom ) - call shr_mpi_bcast( barrier_n, mpicom ) - call shr_mpi_bcast( barrier_option, mpicom ) - call shr_mpi_bcast( barrier_ymd, mpicom ) - call shr_mpi_bcast( tprof_option, mpicom ) - call shr_mpi_bcast( tprof_ymd, mpicom ) - call shr_mpi_bcast( start_ymd, mpicom ) - call shr_mpi_bcast( start_tod, mpicom ) - call shr_mpi_bcast( ref_ymd, mpicom ) - call shr_mpi_bcast( ref_tod, mpicom ) - call shr_mpi_bcast( curr_ymd, mpicom ) - call shr_mpi_bcast( curr_tod, mpicom ) - call shr_mpi_bcast( atm_cpl_dt, mpicom ) - call shr_mpi_bcast( lnd_cpl_dt, mpicom ) - call shr_mpi_bcast( ice_cpl_dt, mpicom ) - call shr_mpi_bcast( ocn_cpl_dt, mpicom ) - call shr_mpi_bcast( glc_cpl_dt, mpicom ) - call shr_mpi_bcast( glc_avg_period, mpicom ) - call shr_mpi_bcast( rof_cpl_dt, mpicom ) - call shr_mpi_bcast( wav_cpl_dt, mpicom ) - call shr_mpi_bcast( esp_cpl_dt, mpicom ) - call shr_mpi_bcast( iac_cpl_dt, mpicom ) - call shr_mpi_bcast( atm_cpl_offset, mpicom ) - call shr_mpi_bcast( lnd_cpl_offset, mpicom ) - call shr_mpi_bcast( ice_cpl_offset, mpicom ) - call shr_mpi_bcast( ocn_cpl_offset, mpicom ) - call shr_mpi_bcast( glc_cpl_offset, mpicom ) - call shr_mpi_bcast( rof_cpl_offset, mpicom ) - call shr_mpi_bcast( wav_cpl_offset, mpicom ) - call shr_mpi_bcast( esp_cpl_offset, mpicom ) - call shr_mpi_bcast( iac_cpl_offset, mpicom ) - call shr_mpi_bcast( esp_run_on_pause, mpicom ) - call shr_mpi_bcast( end_restart, mpicom ) - - ! --- derive a couple things --- - if (trim(histavg_option) == trim(seq_timemgr_optNever) .or. & - trim(histavg_option) == trim(seq_timemgr_optNone)) then - seq_timemgr_histavg_type = seq_timemgr_type_never - elseif (trim(histavg_option) == trim(seq_timemgr_optNHours) .or. & - trim(histavg_option) == trim(seq_timemgr_optNHour)) then - seq_timemgr_histavg_type = seq_timemgr_type_nhour - elseif (trim(histavg_option) == trim(seq_timemgr_optNDays) .or. & - trim(histavg_option) == trim(seq_timemgr_optNDay)) then - seq_timemgr_histavg_type = seq_timemgr_type_nday - elseif (trim(histavg_option) == trim(seq_timemgr_optNMonths) .or. & - trim(histavg_option) == trim(seq_timemgr_optNMonth) .or. & - trim(histavg_option) == trim(seq_timemgr_optMonthly)) then - seq_timemgr_histavg_type = seq_timemgr_type_nmonth - elseif (trim(histavg_option) == trim(seq_timemgr_optNYears) .or. & - trim(histavg_option) == trim(seq_timemgr_optNYear) .or. & - trim(histavg_option) == trim(seq_timemgr_optYearly)) then - seq_timemgr_histavg_type = seq_timemgr_type_nyear - else - seq_timemgr_histavg_type = seq_timemgr_type_other - endif - - - ! --- Initialize generic stuff --- - seq_timemgr_calendar = shr_cal_calendarName(calendar) - seq_timemgr_esp_run_on_pause = esp_run_on_pause - seq_timemgr_end_restart = end_restart - rc = 1 - i = 1 - ! --- Figure out which components (if any) are doing pause this run - pause_active(seq_timemgr_nclock_atm) = pause_active_atm - pause_active(seq_timemgr_nclock_drv) = pause_active_cpl - pause_active(seq_timemgr_nclock_ocn) = pause_active_ocn - pause_active(seq_timemgr_nclock_wav) = pause_active_wav - pause_active(seq_timemgr_nclock_glc) = pause_active_glc - pause_active(seq_timemgr_nclock_ice) = pause_active_ice - pause_active(seq_timemgr_nclock_rof) = pause_active_rof - pause_active(seq_timemgr_nclock_lnd) = pause_active_lnd - pause_active(seq_timemgr_nclock_iac) = pause_active_iac - - ! Figure out which compoments need to do post-data assimilation processing - data_assimilation_active(seq_timemgr_nclock_atm) = data_assimilation_atm - data_assimilation_active(seq_timemgr_nclock_drv) = data_assimilation_cpl - data_assimilation_active(seq_timemgr_nclock_ocn) = data_assimilation_ocn - data_assimilation_active(seq_timemgr_nclock_wav) = data_assimilation_wav - data_assimilation_active(seq_timemgr_nclock_glc) = data_assimilation_glc - data_assimilation_active(seq_timemgr_nclock_ice) = data_assimilation_ice - data_assimilation_active(seq_timemgr_nclock_rof) = data_assimilation_rof - data_assimilation_active(seq_timemgr_nclock_lnd) = data_assimilation_lnd - data_assimilation_active(seq_timemgr_nclock_iac) = data_assimilation_iac - - if ( ANY(pause_active) .and. & - (trim(pause_option) /= seq_timemgr_optNONE) .and. & - (trim(pause_option) /= seq_timemgr_optNever)) then - do n = 1, max_clocks - if (pause_active(n) .and. (iam == 0)) then - write(logunit, '(4a)') subname, ': Pause active for ', & - trim(seq_timemgr_clocks(n)),' component' - end if - end do - end if - - ! --- Create the new calendar if not already set ------ - if ( trim(seq_timemgr_calendar) == trim(seq_timemgr_noleap)) then - esmf_caltype = ESMF_CALKIND_NOLEAP - else if ( trim(seq_timemgr_calendar) == trim(seq_timemgr_gregorian)) then - esmf_caltype = ESMF_CALKIND_GREGORIAN - else - write(logunit,*) subname//': unrecognized ESMF calendar specified: '// & - trim(seq_timemgr_calendar) - call shr_sys_abort( subname//'ERROR:: bad calendar for ESMF' ) - end if - - seq_timemgr_cal = ESMF_CalendarCreate( name='CIME_'//seq_timemgr_calendar, & - calkindflag=esmf_caltype, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, subname//': error return from ESMF_CalendarCreate' ) - - ! --- Initialize start, ref, and current date --- - - call seq_timemgr_ETimeInit( StartTime, start_ymd, start_tod, "Start date" ) - call seq_timemgr_ETimeInit( RefTime , ref_ymd , ref_tod , "Reference date" ) - call seq_timemgr_ETimeInit( CurrTime , curr_ymd , curr_tod , "Current date") - - ! --- Figure out what time-stepping interval should be. --------------- - - dtime = 0 - dtime(seq_timemgr_nclock_atm ) = atm_cpl_dt - dtime(seq_timemgr_nclock_lnd ) = lnd_cpl_dt - dtime(seq_timemgr_nclock_ocn ) = ocn_cpl_dt - dtime(seq_timemgr_nclock_ice ) = ice_cpl_dt - dtime(seq_timemgr_nclock_glc ) = glc_cpl_dt - dtime(seq_timemgr_nclock_rof ) = rof_cpl_dt - dtime(seq_timemgr_nclock_wav ) = wav_cpl_dt - dtime(seq_timemgr_nclock_esp ) = esp_cpl_dt - dtime(seq_timemgr_nclock_iac ) = iac_cpl_dt - - ! --- this finds the min of dtime excluding the driver value --- - dtime(seq_timemgr_nclock_drv) = maxval(dtime) - dtime(seq_timemgr_nclock_drv) = minval(dtime) - - ! --- For figuring pause cycle - min_dt = maxval(dtime) - seq_timemgr_pause_sig_index = -1 - - do n = 1,max_clocks - if ( mod(dtime(n),dtime(seq_timemgr_nclock_drv)) /= 0) then - write(logunit,*) trim(subname),' ERROR: dtime inconsistent = ',dtime - call shr_sys_abort( subname//' :coupling intervals not compatible' ) - endif - if (pause_active(n) .and. (dtime(n) <= min_dt)) then - min_dt = dtime(n) - seq_timemgr_pause_sig_index = n - end if - enddo - if (ANY(pause_active)) then - if (seq_timemgr_pause_sig_index < 1) then - write(logunit, *) subname,"ERROR: No pause_sig_index even with active pause" - call shr_sys_abort(subname//"ERROR: No pause_sig_index even with active pause") - end if - else - ! Don't try to run ESP on non-existent pauses - seq_timemgr_esp_run_on_pause = .false. - end if - - ! --- Initialize component and driver clocks and alarms common to components and driver clocks --- - call ESMF_TimeIntervalSet( CplTimeStep, s=dtime(seq_timemgr_nclock_drv), rc=rc ) - do n = 1,max_clocks - call ESMF_TimeIntervalSet( TimeStep, s=dtime(n), rc=rc ) - - call seq_timemgr_ESMFCodeCheck( rc, subname//': error ESMF_TimeIntervalSet' ) - - call seq_timemgr_EClockInit( TimeStep, StartTime, RefTime, CurrTime, SyncClock%ECP(n)%EClock) - - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_run), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(n), & - RefTime = CurrTime, & - alarmname = trim(seq_timemgr_alarm_run)) - - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_stop), & - option = stop_option, & - opt_n = stop_n, & - opt_ymd = stop_ymd, & - opt_tod = stop_tod, & - RefTime = CurrTime, & - cplTimeStep = cplTimeStep, & - alarmname = trim(seq_timemgr_alarm_stop)) - - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_datestop), & - option = seq_timemgr_optDate, & - opt_ymd = stop_ymd, & - opt_tod = stop_tod, & - RefTime = StartTime, & - alarmname = trim(seq_timemgr_alarm_datestop)) - - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_restart), & - option = restart_option, & - opt_n = restart_n, & - opt_ymd = restart_ymd, & - RefTime = CurrTime, & - cplTimeStep = cplTimeStep, & - alarmname = trim(seq_timemgr_alarm_restart)) - - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_history), & - option = history_option, & - opt_n = history_n, & - opt_ymd = history_ymd, & - RefTime = StartTime, & - cplTimeStep = cplTimeStep, & - alarmname = trim(seq_timemgr_alarm_history)) - - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_histavg), & - option = histavg_option, & - opt_n = histavg_n, & - opt_ymd = histavg_ymd, & - RefTime = StartTime, & - cplTimeStep = cplTimeStep, & - alarmname = trim(seq_timemgr_alarm_histavg)) - - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_barrier), & - option = barrier_option, & - opt_n = barrier_n, & - opt_ymd = barrier_ymd, & - RefTime = CurrTime, & - cplTimeStep = cplTimeStep, & - alarmname = trim(seq_timemgr_alarm_barrier)) - - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_tprof), & - option = tprof_option, & - opt_n = tprof_n, & - opt_ymd = tprof_ymd, & - RefTime = StartTime, & - cplTimeStep = cplTimeStep, & - alarmname = trim(seq_timemgr_alarm_tprof)) - - call ESMF_AlarmGet(SyncClock%EAlarm(n,seq_timemgr_nalarm_stop), RingTime=StopTime1, rc=rc ) - call ESMF_AlarmGet(SyncClock%EAlarm(n,seq_timemgr_nalarm_datestop), RingTime=StopTime2, rc=rc ) - - if (StopTime2 < StopTime1) then - StopTime = StopTime2 - else - StopTime = StopTime1 - endif - if (n == 1) then - minStopTIme = StopTime - elseif (StopTime < minStopTime) then - minStopTime = StopTime - endif - call ESMF_ClockSet(SyncClock%ECP(n)%EClock, StopTime=StopTime) - - ! Set the pause option if pause/resume is active - if (pause_active(n)) then - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_pause), & - option = pause_option, & - opt_n = pause_n, & - RefTime = CurrTime, & - alarmname = trim(seq_timemgr_alarm_pause)) - else - call seq_timemgr_alarmInit(SyncClock%ECP(n)%EClock, & - EAlarm = SyncClock%EAlarm(n,seq_timemgr_nalarm_pause), & - option = seq_timemgr_optNever, & - opt_n = -1, & - RefTime = StartTime, & - alarmname = trim(seq_timemgr_alarm_pause)) - endif - - enddo - - ! -------------------------------------------------------------------- - ! Set the timing run alarms, these alarms are synced to the driver - ! clock and determine when the component clocks are advanced. - ! We need an offset here of the driver timestep because of the - ! implementation. We are advancing the clock first and we want - ! components to run as soon as possible. Without the driver offset - ! the alarms would go off at the last possible timestep, not first. - ! In addition, we allow the user to set other offsets if desired - ! via namelist. tcraig, 10/2007 - ! -------------------------------------------------------------------- - - offset(seq_timemgr_nclock_drv) = 0 - offset(seq_timemgr_nclock_atm) = atm_cpl_offset - offset(seq_timemgr_nclock_lnd) = lnd_cpl_offset - offset(seq_timemgr_nclock_ocn) = ocn_cpl_offset - offset(seq_timemgr_nclock_ice) = ice_cpl_offset - offset(seq_timemgr_nclock_glc) = glc_cpl_offset - offset(seq_timemgr_nclock_rof) = rof_cpl_offset - offset(seq_timemgr_nclock_wav) = wav_cpl_offset - offset(seq_timemgr_nclock_esp) = esp_cpl_offset - offset(seq_timemgr_nclock_iac) = iac_cpl_offset - - call seq_timemgr_alarmGet(SyncClock%EAlarm(seq_timemgr_nclock_drv, & - seq_timemgr_nalarm_restart), IntSec=drvRestInterval) - - do n = 1,max_clocks - if (abs(offset(n)) > dtime(n)) then - write(logunit,*) subname,' ERROR: offset too large',n,dtime(n),offset(n) - call shr_sys_abort() - endif - - !--- this is the required driver timestep offset --- - offset(n) = offset(n) + dtime(seq_timemgr_nclock_drv) - - if (mod(offset(n),dtime(seq_timemgr_nclock_drv)) /= 0) then - write(logunit,*) subname,' ERROR: offset not multiple',n,dtime(seq_timemgr_nclock_drv),offset(n) - call shr_sys_abort() - endif - call ESMF_TimeIntervalSet( TimeStep, s=dtime(n), rc=rc ) - if(CurrTime + TimeStep > minStopTime ) then - write(logunit,*) subname//" WARNING: Stop time too short, not all components will be advanced & - &and restarts won't be written" - endif - if (n /= seq_timemgr_nclock_esp .and. trim(restart_option) .ne. & - trim(seq_timemgr_optNone) .and. & - trim(restart_option) .ne. trim(seq_timemgr_optNever)) then - call seq_timemgr_alarmGet(SyncClock%EAlarm(n,seq_timemgr_nalarm_restart), IntSec=RestInterval) - if (RestInterval .ne. drvRestInterval) then - write(logunit,*) subname, 'RestInterval=',RestInterval,& - ' drvrestinterval=',drvrestinterval - call shr_sys_abort(trim(subname)//"Component RestInterval inconsistant with driver") - endif - endif - enddo - - ! Set component run alarms on driver clock - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_atm), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_atmrun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_atm), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_atmrun)) - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_lnd), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_lndrun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_lnd), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_lndrun)) - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_rof), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_rofrun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_rof), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_rofrun)) - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_ice), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_icerun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_ice), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_icerun)) - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_wav), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_wavrun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_wav), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_wavrun)) - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_iac), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_iacrun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_iac), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_iacrun)) - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_glc), rc=rc ) - OffsetTime = CurrTime + TimeStep - call ESMF_TimeIntervalSet( TimeStep, s=-offset(seq_timemgr_nclock_drv), rc=rc ) - OffsetTime = OffsetTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_glc), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_glcrun)) - if (glc_avg_period == seq_timemgr_optGLCCouplingPeriod) then - ! Create this alarm identically to the glcrun alarm (which is created above) - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun_avg), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_glc), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_glcrun_avg)) - else if (glc_avg_period == seq_timemgr_optYearly) then - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun_avg), & - option = seq_timemgr_optYearly, & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_glcrun_avg)) - else - call shr_sys_abort(subname//':: glc_avg_period can only be glc_coupling_period or yearly') - end if - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_ocn), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_ocnrun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_ocn), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_ocnrun)) - - ! --- this is the ocnrun alarm (there ^) offset by a -dtime of the driver - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_ocn), rc=rc ) - OffsetTime = CurrTime + TimeStep - call ESMF_TimeIntervalSet( TimeStep, s=-offset(seq_timemgr_nclock_drv), rc=rc ) - OffsetTime = OffsetTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_ocnnext), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_ocn), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_ocnnext)) - - call ESMF_TimeIntervalSet( TimeStep, s=offset(seq_timemgr_nclock_esp), rc=rc ) - OffsetTime = CurrTime + TimeStep - call seq_timemgr_alarmInit(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, & - EAlarm = SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_esprun), & - option = seq_timemgr_optNSeconds, & - opt_n = dtime(seq_timemgr_nclock_esp), & - RefTime = OffsetTime, & - alarmname = trim(seq_timemgr_alarm_esprun)) - - end subroutine seq_timemgr_clockInit - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_EClockGetData -- Get information from the clock - ! - ! !DESCRIPTION: - ! - ! Get various values from the clock. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_EClockGetData( EClock, curr_yr, curr_mon, curr_day, & - curr_ymd, curr_tod, prev_ymd, prev_tod, start_ymd, & - start_tod, StepNo, ref_ymd, ref_tod, & - stop_ymd, stop_tod, dtime, ECurrTime, alarmcount, & - curr_cday, next_cday, curr_time, prev_time, calendar) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(IN) :: EClock ! Input clock object - integer(SHR_KIND_IN), intent(OUT), optional :: curr_yr ! Current year - integer(SHR_KIND_IN), intent(OUT), optional :: curr_mon ! Current month - integer(SHR_KIND_IN), intent(OUT), optional :: curr_day ! Current day in month - integer(SHR_KIND_IN), intent(OUT), optional :: curr_ymd ! Current date YYYYMMDD - integer(SHR_KIND_IN), intent(OUT), optional :: curr_tod ! Current time of day (s) - integer(SHR_KIND_IN), intent(OUT), optional :: prev_ymd ! Previous date YYYYMMDD - integer(SHR_KIND_IN), intent(OUT), optional :: prev_tod ! Previous time of day (s) - integer(SHR_KIND_IN), intent(OUT), optional :: start_ymd ! Starting date YYYYMMDD - integer(SHR_KIND_IN), intent(OUT), optional :: start_tod ! Starting time-of-day (s) - integer(SHR_KIND_IN), intent(OUT), optional :: StepNo ! Number of steps taken - integer(SHR_KIND_IN), intent(OUT), optional :: ref_ymd ! Reference date YYYYMMDD - integer(SHR_KIND_IN), intent(OUT), optional :: ref_tod ! Reference time-of-day (s) - integer(SHR_KIND_IN), intent(OUT), optional :: stop_ymd ! Stop date YYYYMMDD - integer(SHR_KIND_IN), intent(OUT), optional :: stop_tod ! Stop time-of-day (s) - integer(SHR_KIND_IN), intent(OUT), optional :: dtime ! Time-step (seconds) - integer(SHR_KIND_IN), intent(OUT), optional :: alarmcount ! Number of Valid Alarms - type(ESMF_Time), intent(OUT), optional :: ECurrTime ! Current ESMF time - real(SHR_KIND_R8) , intent(OUT), optional :: curr_cday ! current calendar day - real(SHR_KIND_R8) , intent(OUT), optional :: next_cday ! current calendar day - real(SHR_KIND_R8) , intent(OUT), optional :: curr_time ! time interval between current time - ! and reference date - real(SHR_KIND_R8) , intent(OUT), optional :: prev_time ! time interval between previous time - ! and reference date - character(len=*) , intent(OUT), optional :: calendar ! calendar type - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_EClockGetData) ' - type(ESMF_Time) :: CurrentTime ! Current time - type(ESMF_Time) :: PreviousTime ! Previous time - type(ESMF_Time) :: StartTime ! Start time - type(ESMF_Time) :: StopTime ! Stop time - type(ESMF_Time) :: RefTime ! Ref time - type(ESMF_TimeInterval) :: timeStep ! Clock, time-step - type(ESMF_TimeInterval) :: timediff ! Used to calculate curr_time - integer(SHR_KIND_IN) :: rc ! Return code - integer(SHR_KIND_I8) :: advSteps ! Number of time-steps that have advanced - integer(SHR_KIND_IN) :: yy, mm, dd, sec ! Return time values - integer(SHR_KIND_IN) :: ymd ! Date (YYYYMMDD) - integer(SHR_KIND_IN) :: tod ! time of day (sec) - integer(SHR_KIND_IN) :: ldtime ! local dtime - integer(SHR_KIND_IN) :: days ! number of whole days in time interval - integer(SHR_KIND_IN) :: seconds ! number of seconds in time interval - integer(SHR_KIND_IN) :: acount ! number of valid alarms - real(SHR_KIND_R8) :: doy, tmpdoy ! day of year - real(SHR_KIND_R8),parameter :: c1 = 1.0_SHR_KIND_R8 - - type(ESMF_Time) :: tmpTime ! tmp time, needed for next_cday - type(ESMF_TimeInterval) :: tmpDTime ! tmp time interval, needed for next_cday - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - if (present(calendar)) calendar = trim(seq_timemgr_calendar) - - call ESMF_ClockGet( EClock, currTime=CurrentTime, & - advanceCount=advSteps, prevTime=previousTime, TimeStep=timeStep, & - startTime=StartTime, stopTime=stopTime, refTime=RefTime, & - AlarmCount=acount, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_ClockGet" ) - - call ESMF_TimeGet( CurrentTime, yy=yy, mm=mm, dd=dd, s=sec, dayofyear_r8=doy, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_TimeGet" ) - call seq_timemgr_ETimeGet( CurrentTime, ymd=ymd, tod=tod ) - call ESMF_TimeIntervalGet( timeStep, s=ldtime, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_TimeIntervalGet" ) - - if ( present(curr_yr) ) curr_yr = yy - if ( present(curr_mon) ) curr_mon = mm - if ( present(curr_day) ) curr_day = dd - if ( present(curr_tod) ) curr_tod = tod - if ( present(curr_ymd) ) curr_ymd = ymd - if ( present(ECurrTime)) ECurrTime= CurrentTime - if ( present(StepNo) ) StepNo = advSteps - if ( present(dtime) ) dtime = ldtime - if ( present(curr_cday)) curr_cday = doy - if ( present(alarmcount)) alarmcount = acount - if ( present(next_cday)) then - call ESMF_TimeSet(tmpTime, yy=yy, mm=mm, dd=dd, s=tod, calendar=seq_timemgr_cal, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from TimeSet tmpTime") - call ESMF_TimeIntervalSet( tmpDTime, d=1, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from TimeIntSet tmpDTime") - tmpTime = tmpTime + tmpDTime - call ESMF_TimeGet(tmpTime, dayOfYear_r8=tmpdoy, rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from TimeGet tmpdoy") - next_cday = tmpdoy - endif - - ! ---Current Time (the time interval between the current date and the reference date) --- - if ( present(curr_time)) then - timediff = CurrentTime - RefTime - call ESMF_TimeIntervalGet(timediff, d=days, s=seconds, rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from TimeIntervalGet timediff") - curr_time = days + seconds/real(SecPerDay,SHR_KIND_R8) - end if - - ! ---Previous Time (the time interval between the previous date and the reference date) --- - if ( present(prev_time)) then - timediff = PreviousTime - RefTime - call ESMF_TimeIntervalGet(timediff, d=days, s=seconds, rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from TimeIntervalGet timediff") - prev_time = days + seconds/real(SecPerDay,SHR_KIND_R8) - end if - - ! --- Previous time -------------------------------------------------------- - if ( present(prev_ymd) .or. present(prev_tod) )then - call seq_timemgr_ETimeGet( PreviousTime, ymd=ymd, tod=tod ) - if ( present(prev_ymd) ) prev_ymd = ymd - if ( present(prev_tod) ) prev_tod = tod - end if - - ! --- If want start date ----------------------------------------------- - if ( present(start_ymd) .or. present(start_tod) )then - call seq_timemgr_ETimeGet( StartTime, ymd=ymd, tod=tod ) - if ( present(start_ymd) ) start_ymd = ymd - if ( present(start_tod) ) start_tod = tod - end if - - ! --- If want stop date ----------------------------------------------- - if ( present(stop_ymd) .or. present(stop_tod) )then - call seq_timemgr_ETimeGet( stopTime, ymd=ymd, tod=tod ) - if ( present(stop_ymd) ) stop_ymd = ymd - if ( present(stop_tod) ) stop_tod = tod - end if - - ! --- If want ref date ----------------------------------------------- - if ( present(ref_ymd) .or. present(ref_tod) )then - call seq_timemgr_ETimeGet( RefTime, ymd=ymd, tod=tod ) - if ( present(ref_ymd) ) ref_ymd = ymd - if ( present(ref_tod) ) ref_tod = tod - end if - - end subroutine seq_timemgr_EClockGetData - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_clockAdvance -- Advance the syncclock - ! - ! !DESCRIPTION: - ! - ! Advance this clock - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_clockAdvance( SyncClock, force_stop, force_stop_ymd, force_stop_tod ) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_timemgr_type), intent(INOUT) :: SyncClock ! Advancing clock - logical, optional, intent(in) :: force_stop ! force stop - integer, optional, intent(in) :: force_stop_ymd ! force stop ymd - integer, optional, intent(in) :: force_stop_tod ! force stop tod - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_clockAdvance) ' - integer :: n - type(ESMF_Time) :: NextAlarm ! Next restart alarm time - integer :: rc ! Return code - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - ! --- set datestop alarm to force_stop alarm --- - - do n = 1,max_clocks - call seq_timemgr_alarmSetOff(SyncClock%ECP(n)%EClock) - if (present(force_stop) .and. present(force_stop_ymd) .and. present(force_stop_tod)) then - if (force_stop) then - if (n == 1 .and. seq_comm_iamroot(CPLID)) then - write(logunit,*) subname,'force stop at ',force_stop_ymd, force_stop_tod - endif - if (force_stop_ymd < 0 .or. force_stop_tod < 0) then - call shr_sys_abort(subname//': force_stop_ymd, force_stop_tod invalid') - endif - seq_timemgr_end_restart = .true. - call seq_timemgr_ETimeInit(NextAlarm, force_stop_ymd, force_stop_tod, "optDate") - CALL ESMF_AlarmSet( SyncClock%EAlarm(n,seq_timemgr_nalarm_datestop), & - name = trim(seq_timemgr_alarm_datestop), & - RingTime=NextAlarm, & - rc=rc ) - endif - endif - enddo - - ! --- advance driver clock and all driver alarms --- - - call ESMF_ClockAdvance( SyncClock%ECP(seq_timemgr_nclock_drv)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from drv ESMF_ClockAdvance") - - ! --- advance other clocks if driver component run alarm is ringing --- - - if (ESMF_AlarmIsRinging(SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_atmrun))) then - call ESMF_ClockAdvance(SyncClock%ECP(seq_timemgr_nclock_atm)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from atm ESMF_ClockAdvance") - endif - - if (ESMF_AlarmIsRinging(SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_lndrun))) then - call ESMF_ClockAdvance(SyncClock%ECP(seq_timemgr_nclock_lnd)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from lnd ESMF_ClockAdvance") - endif - - if (ESMF_AlarmIsRinging(SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_rofrun))) then - call ESMF_ClockAdvance(SyncClock%ECP(seq_timemgr_nclock_rof)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from rof ESMF_ClockAdvance") - endif - - if (ESMF_AlarmIsRinging(SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_ocnrun))) then - call ESMF_ClockAdvance(SyncClock%ECP(seq_timemgr_nclock_ocn)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from ocn ESMF_ClockAdvance") - endif - - if (ESMF_AlarmIsRinging(SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_icerun))) then - call ESMF_ClockAdvance(SyncClock%ECP(seq_timemgr_nclock_ice)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from ice ESMF_ClockAdvance") - endif - - if (ESMF_AlarmIsRinging(SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_glcrun))) then - call ESMF_ClockAdvance(SyncClock%ECP(seq_timemgr_nclock_glc)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from glc ESMF_ClockAdvance") - endif - - if (ESMF_AlarmIsRinging(SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_wavrun))) then - call ESMF_ClockAdvance(SyncClock%ECP(seq_timemgr_nclock_wav)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from wav ESMF_ClockAdvance") - endif - - if (ESMF_AlarmIsRinging(SyncClock%EAlarm(seq_timemgr_nclock_drv,seq_timemgr_nalarm_esprun))) then - call ESMF_ClockAdvance(SyncClock%ECP(seq_timemgr_nclock_esp)%EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, msg=subname//"Error from esp ESMF_ClockAdvance") - endif - - ! Special handling of ESP component if linked to pause cycles - if (seq_timemgr_esp_run_on_pause) then - ! We need to figure out if any pause clock is ringing - call seq_timemgr_alarmSetOff(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock,seq_timemgr_alarm_esprun) - if (seq_timemgr_alarmIsOn(SyncClock%ECP(seq_timemgr_pause_sig_index)%EClock,seq_timemgr_alarm_pause)) then - call seq_timemgr_alarmSetOn(SyncClock%ECP(seq_timemgr_nclock_drv)%EClock,seq_timemgr_alarm_esprun) - end if - end if - - ! Special handling of restart alarm if end_restart is .true. - if (seq_timemgr_end_restart) then - do n = 1,max_clocks - if (seq_timemgr_alarmIsOn(SyncClock%ECP(n)%EClock,seq_timemgr_alarm_stop) .or. & - seq_timemgr_alarmIsOn(SyncClock%ECP(n)%EClock,seq_timemgr_alarm_datestop)) then - call seq_timemgr_alarmSetOn(SyncClock%ECP(n)%EClock,seq_timemgr_alarm_restart) - endif - enddo - endif - - end subroutine seq_timemgr_clockAdvance - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_alarmInit -- Set an alarm - ! - ! !DESCRIPTION: - ! - ! Setup an alarm in a clock - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_alarmInit( EClock, EAlarm, option, opt_n, opt_ymd, opt_tod, RefTime, cplTimeStep, alarmname) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(INOUT) :: EClock ! clock - type(ESMF_Alarm) , intent(INOUT) :: EAlarm ! alarm - character(len=*) , intent(IN) :: option ! alarm option - integer(SHR_KIND_IN),optional, intent(IN) :: opt_n ! alarm freq - integer(SHR_KIND_IN),optional, intent(IN) :: opt_ymd ! alarm ymd - integer(SHR_KIND_IN),optional, intent(IN) :: opt_tod ! alarm tod (sec) - type(ESMF_TimeInterval), optional, intent(IN) :: CplTimeStep ! coupler timestep for nstep alarm option - type(ESMF_Time) ,optional, intent(IN) :: RefTime ! ref time - character(len=*) ,optional, intent(IN) :: alarmname ! alarm name - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_alarmInit): ' - integer :: rc ! Return code - integer :: lymd ! local ymd - integer :: ltod ! local tod - integer :: cyy,cmm,cdd,csec ! time info - integer :: nyy,nmm,ndd,nsec ! time info - character(len=64) :: lalarmname ! local alarm name - logical :: update_nextalarm ! update next alarm - type(ESMF_Time) :: CurrTime ! Current Time - type(ESMF_Time) :: NextAlarm ! Next restart alarm time - type(ESMF_TimeInterval) :: AlarmInterval ! Alarm interval - - !------------------------------------------------------------------------------- - ! Notes: This is slightly screwed up because of the way the ESMF alarm - ! initializes. The ringtime sent to AlarmCreate MUST be the next - ! alarm time. If you send an arbitrary but proper ringtime from - ! the past and the ring interval, the alarm will always go off on - ! the next clock advance and this will cause serious problems. - ! So, even if it makes sense to initialize an alarm with some - ! reference time and the alarm interval, that reference time has - ! to be advance forward to be >= the current time. In the logic - ! below, we set an appropriate "NextAlarm" and then we make sure - ! to advance it properly based on the ring interval. - !------------------------------------------------------------------------------- - - lalarmname = 'alarm_unknown' - if (present(alarmname)) then - lalarmname = trim(alarmname) - endif - - ltod = 0 - if (present(opt_tod)) then - ltod = opt_tod - endif - - lymd = -1 - if (present(opt_ymd)) then - lymd = opt_ymd - endif - - call ESMF_ClockGet(EClock, CurrTime=CurrTime, rc=rc) - call ESMF_TimeGet(CurrTime, yy=cyy, mm=cmm, dd=cdd, s=csec, rc=rc ) - - ! --- initial guess of next alarm, this will be updated below --- - if (present(RefTime)) then - NextAlarm = RefTime - else - NextAlarm = CurrTime - endif - call ESMF_TimeGet(CurrTime, yy=nyy, mm=nmm, dd=ndd, s=nsec, rc=rc ) - - update_nextalarm = .true. - - selectcase (trim(option)) - - case (seq_timemgr_optNONE) - !--- tcx seems we need an alarm interval or the alarm create fails, - !--- problem in esmf_wrf_timemgr? - call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - call ESMF_TimeSet( NextAlarm, yy=9999, mm=12, dd=1, s=0, calendar=seq_timemgr_cal, rc=rc ) - update_nextalarm = .false. - - case (seq_timemgr_optNever) - !--- tcx seems we need an alarm interval or the alarm create fails, - !--- problem in esmf_wrf_timemgr? - call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - call ESMF_TimeSet( NextAlarm, yy=9999, mm=12, dd=1, s=0, calendar=seq_timemgr_cal, rc=rc ) - update_nextalarm = .false. - - case (seq_timemgr_optDate) - !--- tcx seems we need an alarm interval or the alarm create fails, - !--- problem in esmf_wrf_timemgr? - call ESMF_TimeIntervalSet(AlarmInterval, yy=9999, rc=rc) - if (.not. present(opt_ymd)) call shr_sys_abort(subname//trim(option)//' requires opt_ymd') - if (lymd < 0 .or. ltod < 0) then - call shr_sys_abort(subname//trim(option)//'opt_ymd, opt_tod invalid') - endif - call seq_timemgr_ETimeInit(NextAlarm, lymd, ltod, "optDate") - update_nextalarm = .false. - - case (seq_timemgr_optIfdays0) - call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (.not. present(opt_ymd)) call shr_sys_abort(subname//trim(option)//' requires opt_ymd') - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=opt_n, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optNSteps) - if (present(CplTimeStep)) then - AlarmInterval = CplTimeStep - else - call shr_sys_abort(subname//trim(option)//' requires CplTimeStep') - endif - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - - case (seq_timemgr_optNStep) - if (present(CplTimeStep)) then - AlarmInterval = CplTimeStep - else - call shr_sys_abort(subname//trim(option)//' requires CplTimeStep') - endif - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - - case (seq_timemgr_optNSeconds) - call ESMF_TimeIntervalSet(AlarmInterval, s=1, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - - case (seq_timemgr_optNSecond) - call ESMF_TimeIntervalSet(AlarmInterval, s=1, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - - case (seq_timemgr_optNMinutes) - call ESMF_TimeIntervalSet(AlarmInterval, s=60, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - - case (seq_timemgr_optNMinute) - call ESMF_TimeIntervalSet(AlarmInterval, s=60, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - - case (seq_timemgr_optNHours) - call ESMF_TimeIntervalSet(AlarmInterval, s=3600, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - - case (seq_timemgr_optNHour) - call ESMF_TimeIntervalSet(AlarmInterval, s=3600, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - - case (seq_timemgr_optNDays) - call ESMF_TimeIntervalSet(AlarmInterval, d=1, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - ! call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=cdd, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optNDay) - call ESMF_TimeIntervalSet(AlarmInterval, d=1, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - ! call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=cdd, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optNMonths) - call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - ! call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=1, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optNMonth) - call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - ! call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=1, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optMonthly) - call ESMF_TimeIntervalSet(AlarmInterval, mm=1, rc=rc) - call ESMF_TimeSet( NextAlarm, yy=cyy, mm=cmm, dd=1, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optNYears) - call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - ! call ESMF_TimeSet( NextAlarm, yy=cyy, mm=1, dd=1, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optNYear) - call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - if (.not.present(opt_n)) call shr_sys_abort(subname//trim(option)//' requires opt_n') - if (opt_n <= 0) call shr_sys_abort(subname//trim(option)//' invalid opt_n') - AlarmInterval = AlarmInterval * opt_n - ! call ESMF_TimeSet( NextAlarm, yy=cyy, mm=1, dd=1, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optYearly) - call ESMF_TimeIntervalSet(AlarmInterval, yy=1, rc=rc) - call ESMF_TimeSet( NextAlarm, yy=cyy, mm=1, dd=1, s=0, calendar=seq_timemgr_cal, rc=rc ) - - case (seq_timemgr_optEnd) - call shr_sys_abort(subname//'deprecated option '//trim(option)) - - case default - call shr_sys_abort(subname//'unknown option '//trim(option)) - - end select - - ! -------------------------------------------------------------------------------- - ! --- AlarmInterval and NextAlarm should be set --- - ! -------------------------------------------------------------------------------- - - ! --- advance Next Alarm so it won't ring on first timestep for - ! --- most options above. go back one alarminterval just to be careful - - if (update_nextalarm) then - NextAlarm = NextAlarm - AlarmInterval - do while (NextAlarm <= CurrTime) - NextAlarm = NextAlarm + AlarmInterval - enddo - endif - - EAlarm = ESMF_AlarmCreate( name=lalarmname, clock=EClock, ringTime=NextAlarm, & - ringInterval=AlarmInterval, rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_AlarmCreate" ) - - end subroutine seq_timemgr_AlarmInit - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_alarmGet -- Get information from the alarm - ! - ! !DESCRIPTION: - ! - ! Get various values from the alarm. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_alarmGet( EAlarm, next_ymd, next_tod, prev_ymd, prev_tod, & - IntSec, IntMon, IntYrs, name) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Alarm) , intent(INOUT) :: EAlarm ! Input Alarm object - integer(SHR_KIND_IN), intent(OUT), optional :: next_ymd ! alarm date yyyymmdd - integer(SHR_KIND_IN), intent(OUT), optional :: next_tod ! alarm tod sec - integer(SHR_KIND_IN), intent(OUT), optional :: prev_ymd ! alarm date yyyymmdd - integer(SHR_KIND_IN), intent(OUT), optional :: prev_tod ! alarm tod sec - integer(SHR_KIND_IN), intent(OUT), optional :: IntSec ! alarm int sec - integer(SHR_KIND_IN), intent(OUT), optional :: IntMon ! alarm int mon - integer(SHR_KIND_IN), intent(OUT), optional :: IntYrs ! alarm int yrs - character(len=*) , intent(OUT), optional :: name ! alarm name - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_alarmGet) ' - integer :: yy, mm, dd, sec ! Return time values - integer :: ymd ! Date (YYYYMMDD) - integer :: tod ! time of day (sec) - integer :: rc ! error code - type(ESMF_TimeInterval) :: alarmInterval ! Alarm interval - type(ESMF_Time) :: ringTime ! Next alarm ring time - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - if (present(name)) then - call ESMF_AlarmGet( EAlarm, name=name, rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_AlarmGet name" ) - endif - - call ESMF_AlarmGet( EAlarm, RingTime=RingTime, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_AlarmGet RingTime" ) - call seq_timemgr_ETimeGet( RingTime, ymd=ymd, tod=tod) - if ( present(next_ymd) ) next_ymd = ymd - if ( present(next_tod) ) next_tod = tod - - call ESMF_AlarmGet( EAlarm, PrevRingTime=RingTime, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_AlarmGet PrevRingTime") - call seq_timemgr_ETimeGet( RingTime, ymd=ymd, tod=tod) - if ( present(prev_ymd) ) prev_ymd = ymd - if ( present(prev_tod) ) prev_tod = tod - - yy = 0 - mm = 0 - dd = 0 - sec = 0 - call ESMF_AlarmGet( EAlarm, RingInterval=AlarmInterval, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_AlarmGet RingInterval") - call ESMF_TimeIntervalGet( alarmInterval, yy=yy, mm=mm, d=dd, s=sec, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_TimeIntervalGet" ) - sec = sec + dd*(SecPerDay) - - ! --- If want restart next interval information ------------------------- - if ( present(IntSec) ) IntSec = sec - if ( present(IntMon) ) IntMon = mm - if ( present(IntYrs) ) IntYrs = yy - - end subroutine seq_timemgr_alarmGet - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_alarmSetOn -- turn alarm on - ! - ! !DESCRIPTION: - ! - ! turn alarm on - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_AlarmSetOn( EClock, alarmname) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(INOUT) :: EClock ! clock/alarm - character(len=*), intent(IN), optional :: alarmname ! alarmname - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_alarmSetOn) ' - character(len=*), parameter :: xalarm = 'unset' - integer :: n - integer :: rc - logical :: found - logical :: set - character(len=64) :: name - type(ESMF_Alarm),pointer :: EAlarm_list(:) - integer(SHR_KIND_IN) :: AlarmCount ! Number of valid alarms - - !------------------------------------------------------------------------------- - ! Notes: The Alarm_list is returned and only a subset of the alarms may - ! be initialized. In the esmf_wrf_timemgr, numalarms is not used internally, - ! and the alarm pointer is valid if it's associated. If it's not associated - ! the AlarmGet calls will generally return an error code. What we really - ! want is to ignore the unset alarms. So below, we have to kind of kludge - ! this up. We set name=xalarm, a special value, before the AlarmGet call so - ! if Alarm_list(n) is not associated, the name will remain the value of - ! xalarm. Then we check whether it's a valid alarm by first checking - ! the name vs xalarm. If name is not xalarm, then it must be a valid alarm - ! and we either set found to true if we are setting all alarms or we compare - ! the name returned to the alarm name we're looking for and only set found - ! to true if the names match. - !------------------------------------------------------------------------------- - - set = .false. - - call seq_timemgr_EClockGetData(EClock, AlarmCount=AlarmCount) -#ifdef USE_ESMF_LIB - allocate(EAlarm_list(AlarmCount)) - call ESMF_ClockGetAlarmList(EClock, alarmListFlag=ESMF_ALARMLIST_ALL, & - alarmList=EAlarm_list, alarmCount=AlarmCount, rc=rc) -#else - call ESMF_ClockGetAlarmList(EClock, EAlarm_list, rc=rc) -#endif - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_ClockGetAlarmList" ) - do n = 1,AlarmCount - found = .false. - if (present(alarmname)) then - call ESMF_AlarmGet(EAlarm_list(n), name=name) - if (trim(name) == trim(alarmname)) found = .true. - else - found = .true. - endif - if (found) then - set = .true. - call ESMF_AlarmRingerOn( EAlarm_list(n), rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_AlarmRingerOn" ) - endif - enddo - - if (present(alarmname) .and. .not. set) then - write(logunit,*) subname,' ERROR in alarmname ',trim(alarmname) - call shr_sys_abort() - endif -#ifdef USE_ESMF_LIB - deallocate(EAlarm_list) -#endif - - end subroutine seq_timemgr_AlarmSetOn - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_alarmSetOff -- turn alarm off - ! - ! !DESCRIPTION: - ! - ! turn alarm off - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_AlarmSetOff( EClock, alarmname) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(INOUT) :: EClock ! clock/alarm - character(len=*), intent(IN), optional :: alarmname ! alarmname - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_alarmSetOff) ' - character(len=*), parameter :: xalarm = 'unset' - integer :: n - integer :: rc - logical :: found - logical :: set - character(len=64) :: name - type(ESMF_Alarm),pointer :: EAlarm_list(:) - integer(SHR_KIND_IN) :: AlarmCount ! Number of valid alarms - - !------------------------------------------------------------------------------- - ! Notes: The Alarm_list is returned and only a subset of the alarms may - ! be initialized. In the esmf_wrf_timemgr, numalarms is not used internally, - ! and the alarm pointer is valid if it's associated. If it's not associated - ! the AlarmGet calls will generally return an error code. What we really - ! want is to ignore the unset alarms. So below, we have to kind of kludge - ! this up. We set name=xalarm, a special value, before the AlarmGet call so - ! if Alarm_list(n) is not associated, the name will remain the value of - ! xalarm. Then we check whether it's a valid alarm by first checking - ! the name vs xalarm. If name is not xalarm, then it must be a valid alarm - ! and we either set found to true if we are setting all alarms or we compare - ! the name returned to the alarm name we're looking for and only set found - ! to true if the names match. - !------------------------------------------------------------------------------- - - set = .false. - - call seq_timemgr_EClockGetData(EClock, AlarmCount=AlarmCount) -#ifdef USE_ESMF_LIB - allocate(EAlarm_list(AlarmCount)) - call ESMF_ClockGetAlarmList(EClock, alarmListFlag=ESMF_ALARMLIST_ALL, & - alarmList=EAlarm_list, alarmCount=AlarmCount, rc=rc) -#else - call ESMF_ClockGetAlarmList(EClock, EAlarm_list, rc=rc) -#endif - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_ClockGetAlarmList" ) - do n = 1,AlarmCount - found = .false. - if (present(alarmname)) then - call ESMF_AlarmGet(EAlarm_list(n), name=name) - if (trim(name) == trim(alarmname)) found = .true. - else - found = .true. - endif - if (found) then - set = .true. - call ESMF_AlarmRingerOff( EAlarm_list(n), rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_AlarmRingerOff" ) - endif - enddo - - if (present(alarmname) .and. .not. set) then - write(logunit,*) subname,' ERROR in alarmname ',trim(alarmname) - call shr_sys_abort() - endif -#ifdef USE_ESMF_LIB - deallocate(EAlarm_list) -#endif - - end subroutine seq_timemgr_AlarmSetOff - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_alarmIsOn -- check if an alarm is ringing - ! - ! !DESCRIPTION: - ! - ! check if an alarm is ringing - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_alarmIsOn( EClock, alarmname) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(IN) :: EClock ! clock/alarm - character(len=*), intent(IN) :: alarmname ! which alarm - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_alarmIsOn) ' - character(len=*), parameter :: xalarm = 'unset' - integer :: n - integer :: rc - logical :: found - character(len=64) :: name - type(ESMF_Time) :: ETime1, ETime2 - type(ESMF_Alarm),pointer :: EAlarm_list(:) - integer(SHR_KIND_IN) :: AlarmCount ! Number of valid alarms - - !------------------------------------------------------------------------------- - ! Notes: Because of the esmf_wrf_timemgr implementation with regards to - ! valid alarms in the alarm_list, we initialize name to xalarm before - ! querying the alarm name, and if the alarm is not valid, name will not - ! be updated and we can tell that the alarm is not valid and we should - ! just ignore it. - ! Use found to verify alarm was valid. If not, abort - !------------------------------------------------------------------------------- - - seq_timemgr_alarmIsOn = .false. - found = .false. - - call seq_timemgr_EClockGetData(EClock, AlarmCount=AlarmCount) -#ifdef USE_ESMF_LIB - allocate(EAlarm_list(AlarmCount)) - call ESMF_ClockGetAlarmList(EClock, alarmListFlag=ESMF_ALARMLIST_ALL, & - alarmList=EAlarm_list, alarmCount=AlarmCount, rc=rc) -#else - call ESMF_ClockGetAlarmList(EClock, EAlarm_list, rc=rc) -#endif - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_ClockGetAlarmList" ) - do n = 1,AlarmCount - name = trim(xalarm) - call ESMF_AlarmGet(EAlarm_list(n), name=name) - if (trim(name) == trim(alarmname)) then - found = .true. - seq_timemgr_alarmIsOn = ESMF_AlarmIsRinging(alarm=EAlarm_list(n),rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname// & - "Error from ESMF_AlarmIsRinging" ) - ! --- make sure the datestop will always stop with dates >= stop_date - if (trim(alarmname) == trim(seq_timemgr_alarm_datestop)) then - call ESMF_ClockGet(EClock, CurrTime = ETime1, rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_ClockGet CurrTime" ) - call ESMF_AlarmGet(EAlarm_list(n), RingTime = ETime2, rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_AlarmGet RingTime" ) - if (ETime1 >= ETime2) seq_timemgr_alarmIsOn = .true. - endif - endif - enddo - - if (.not.found) then - write(logunit,*) subname//': ERROR alarm not valid for EClock '//trim(alarmname) - call shr_sys_abort( subname//'ERROR: alarm invalid '//trim(alarmname) ) - endif -#ifdef USE_ESMF_LIB - deallocate(EAlarm_list) -#endif - - end function seq_timemgr_alarmIsOn - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_restartAlarmIsOn -- check if an alarm is ringing - ! - ! !DESCRIPTION: - ! - ! check if an alarm is ringing - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_restartAlarmIsOn( EClock) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(IN) :: EClock ! clock/alarm - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_restartAlarmIsOn) ' - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - seq_timemgr_restartAlarmIsOn = & - seq_timemgr_alarmIsOn(EClock, alarmname=seq_timemgr_alarm_restart) - - end function seq_timemgr_restartAlarmIsOn - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_stopAlarmIsOn -- check if an alarm is ringing - ! - ! !DESCRIPTION: - ! - ! check if an alarm is ringing - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_stopAlarmIsOn( EClock) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(IN) :: EClock ! clock/alarm - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_stopAlarmIsOn) ' - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - seq_timemgr_stopAlarmIsOn = & - seq_timemgr_alarmIsOn(EClock, alarmname=seq_timemgr_alarm_stop) - - end function seq_timemgr_stopAlarmIsOn - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_historyAlarmIsOn -- check if an alarm is ringing - ! - ! !DESCRIPTION: - ! - ! check if an alarm is ringing - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_historyAlarmIsOn( EClock) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(IN) :: EClock ! clock/alarm - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_historyAlarmIsOn) ' - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - seq_timemgr_historyAlarmIsOn = & - seq_timemgr_alarmIsOn(EClock, alarmname=seq_timemgr_alarm_history) - - end function seq_timemgr_historyAlarmIsOn - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_pauseAlarmIsOn -- check if an alarm is ringing - ! - ! !DESCRIPTION: - ! - ! check if an alarm is ringing - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_pauseAlarmIsOn( EClock) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock) , intent(IN) :: EClock ! clock/alarm - - !EOP - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_pauseAlarmIsOn) ' - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - seq_timemgr_pauseAlarmIsOn = & - seq_timemgr_alarmIsOn(EClock, alarmname=seq_timemgr_alarm_pause) - - end function seq_timemgr_pauseAlarmIsOn - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_pause_active -- Is pause/resume active this run? - ! - ! !DESCRIPTION: - ! - ! Return .true. if any component is configured for pause/resume - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_pause_active() - - ! !INPUT/OUTPUT PARAMETERS: - - !EOP - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - seq_timemgr_pause_active = ANY(pause_active) - - end function seq_timemgr_pause_active - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_pause_component_index -- return an index for a component - ! - ! !DESCRIPTION: - ! - ! Look up a component's internal index for faster processing - ! - ! !INTERFACE: ------------------------------------------------------------------ - - integer function seq_timemgr_pause_component_index(component_name) - - ! !INPUT/OUTPUT PARAMETERS: - - character(len=*), intent(IN) :: component_name - - !EOP - - !----- local ----- - integer :: ind - character(len=*), parameter :: subname = '(seq_timemgr_pause_component_index) ' - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - seq_timemgr_pause_component_index = 0 - do ind = 1, max_clocks - if (trim(component_name) == trim(seq_timemgr_clocks(ind))) then - seq_timemgr_pause_component_index = ind - exit - end if - end do - if (seq_timemgr_pause_component_index < 1) then - if (trim(component_name) == 'cpl') then - seq_timemgr_pause_component_index = seq_timemgr_nclock_drv - end if - end if - if (seq_timemgr_pause_component_index < 1) then - call shr_sys_abort(subname//': No index for component '//trim(component_name)) - end if - - end function seq_timemgr_pause_component_index - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_pause_component_active -- Check if component paused - ! - ! !DESCRIPTION: - ! - ! Return .true. if component is active in driver pause - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_pause_component_active(component_index) - - ! !INPUT/OUTPUT PARAMETERS: - - integer, intent(IN) :: component_index - - !EOP - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_pause_component_active) ' - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - if ((component_index < 1) .or. (component_index > max_clocks)) then - call shr_sys_abort(subname//': component_index out of range') - end if - seq_timemgr_pause_component_active = pause_active(component_index) - - end function seq_timemgr_pause_component_active - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_data_assimilation_active -- Check if component paused - ! - ! !DESCRIPTION: - ! - ! Return .true. if component is active in driver pause - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_data_assimilation_active(component_ntype) - - ! !INPUT/OUTPUT PARAMETERS: - - character(len=3), intent(IN) :: component_ntype - - !EOP - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_data_assimilation_active) ' - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - select case(component_ntype) - case ('atm') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_atm) - case ('cpl') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_drv) - case ('ocn') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_ocn) - case ('wav') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_wav) - case ('glc') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_glc) - case ('ice') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_ice) - case ('rof') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_rof) - case ('lnd') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_lnd) - case ('iac') - seq_timemgr_data_assimilation_active = data_assimilation_active(seq_timemgr_nclock_iac) - case ('esp') - seq_timemgr_data_assimilation_active = .FALSE. - case default - call shr_sys_abort(subname//': component_ntype, "'//component_ntype//'" not recognized"') - end select - - end function seq_timemgr_data_assimilation_active - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_ETimeInit -- Create ESMF_Time object based on YMD values - ! - ! !DESCRIPTION: - ! - ! Create the ESMF_Time object corresponding to the given input time, given in - ! YMD (Year Month Day) and TOD (Time-of-day) format. - ! Set the time by an integer as YYYYMMDD and integer seconds in the day - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_ETimeInit( ETime, ymd, tod, desc ) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Time) , intent(inout) :: ETime ! Time - integer , intent(in) :: ymd ! Year, month, day YYYYMMDD - integer , intent(in), optional :: tod ! Time of day in seconds - character(len=*), intent(in), optional :: desc ! Description of time to set - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_ETimeInit) ' - integer :: yr, mon, day ! Year, month, day as integers - integer :: ltod ! local tod - character(SHR_KIND_CL) :: ldesc ! local desc - integer :: rc ! return code - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - ltod = 0 - if (present(tod)) then - ltod = tod - endif - - ldesc = '' - if (present(desc)) then - ldesc = desc - endif - - if ( (ymd < 0) .or. (ltod < 0) .or. (ltod > SecPerDay) )then - write(logunit,*) subname//': ERROR yymmdd is a negative number or '// & - 'time-of-day out of bounds', ymd, ltod - call shr_sys_abort( subname//'ERROR: Bad input' ) - end if - - call shr_cal_date2ymd(ymd,yr,mon,day) - - call ESMF_TimeSet( ETime, yy=yr, mm=mon, dd=day, s=ltod, & - calendar=seq_timemgr_cal, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, subname//': error return from '// & - 'ESMF_TimeSet: setting '//trim(ldesc)) - - end subroutine seq_timemgr_ETimeInit - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_ETimeGet -- Get the date in YYYYMMDD from from ESMF Time - ! - ! !DESCRIPTION: - ! - ! Get the date in YYYYMMDD format from a ESMF time object. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_ETimeGet( ETime, offset, ymd, tod ) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Time), intent(IN) :: ETime ! Input ESMF time - integer, optional, intent(IN) :: offset ! Offset from input time (sec) - integer, optional, intent(OUT) :: ymd ! date of day - integer, optional, intent(OUT) :: tod ! Time of day - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_ETimeGet) ' - type(ESMF_Time) :: ETimeAdd ! ESMF time + offset - type(ESMF_TimeInterval) :: ETimeOff ! ESMF offset time-interval - integer :: year ! Year - integer :: month ! Month - integer :: day ! Day in month - integer :: sec ! Day in month - integer :: rc ! Return code - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - ETimeAdd = ETime - if ( present(offset) )then - if ( offset > 0 )then - call ESMF_TimeIntervalSet( ETimeOff, s=offset, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname// & - ": Error from ESMF_TimeIntervalSet" ) - ETimeAdd = ETime + ETimeOff - else if ( offset < 0 )then - call ESMF_TimeIntervalSet( ETimeOff, s=-offset, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname// & - ": Error from ESMF_TimeIntervalSet" ) - ETimeAdd = ETime - ETimeOff - end if - end if - - call ESMF_TimeGet( ETimeAdd, yy=year, mm=month, dd=day, s=sec, rc=rc ) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname// & - ": Error from ESMF_TimeGet" ) - - ! shr_cal has restrictions and then "stops", so override that - - if ( present(ymd) ) then - call shr_cal_ymd2date(year,month,day,ymd) - endif - if ( present(tod) ) then - tod = sec - endif - - end subroutine seq_timemgr_ETimeGet - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_EClockInit -- Initialize the ESMF clock in the shared clock - ! - ! !DESCRIPTION: - ! - ! Private method: - ! - ! Setup the ESMF clock inside the wrapped CIME clock - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_EClockInit( TimeStep, StartTime, RefTime, CurrTime, EClock ) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_TimeInterval), intent(IN) :: TimeStep ! Time-step of clock - type(ESMF_Time) , intent(IN) :: StartTime ! Start time - type(ESMF_Time) , intent(IN) :: RefTime ! Reference time - type(ESMF_Time) , intent(IN) :: CurrTime ! Current time - type(ESMF_Clock) , intent(OUT) :: EClock ! Output ESMF clock - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_EClockInit) ' - integer :: rc ! ESMF return code - character(len=SHR_KIND_CL) :: description ! Description of this clock - type(ESMF_Time) :: clocktime ! Current time - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - description = 'CIME shared Time-manager clock' - - ! ------ Create ESMF Clock with input characteristics ------------------- - ! --- NOTE: StopTime is required in interface but not used, so use ----- - ! --- something arbitrary. Stop handled via alarm ----- - - call seq_timemgr_ETimeInit(clocktime, 99990101, 0, "artificial stop date") - - EClock = ESMF_ClockCreate(name=trim(description), & - TimeStep=TimeStep, startTime=StartTime, & - refTime=RefTime, stopTime=clocktime, rc=rc) - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//': Error from ESMF_ClockCreate') - - ! ------ Advance clock to the current time (in case of a restart) ------- - call ESMF_ClockGet(EClock, currTime=clocktime, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, subname//': Error from ESMF_ClockGet') - do while( clocktime < CurrTime) - call ESMF_ClockAdvance( EClock, rc=rc ) - call seq_timemgr_ESMFCodeCheck(rc, subname//': Error from ESMF_ClockAdvance') - call ESMF_ClockGet( EClock, currTime=clocktime ) - call seq_timemgr_ESMFCodeCheck(rc, subname//': Error from ESMF_ClockGet') - end do - - if (clocktime /= CurrTime) then - if (loglevel > 0) write(logunit,*) trim(subname), & - ' : WARNING clocktime and currtime inconsistent' - endif - - end subroutine seq_timemgr_EClockInit - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_EClockDateInSync -- Check that input date in sync with clock - ! - ! !DESCRIPTION: - ! - ! Check that the given input date/time is in sync with clock time - ! - ! !INTERFACE: ------------------------------------------------------------------ - - logical function seq_timemgr_EClockDateInSync( EClock, ymd, tod, prev) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), intent(IN) :: Eclock ! Input clock to compare - integer, intent(IN) :: ymd ! Date (YYYYMMDD) - integer, intent(IN) :: tod ! Time of day (sec) - logical, optional,intent(IN) :: prev ! If should get previous time - - !----- local ----- - character(len=*), parameter :: subname = "(seq_timemgr_EClockDateInSync) " - type(ESMF_Time) :: ETime - integer :: ymd1 ! Date (YYYYMMDD) - integer :: tod1 ! Time of day - logical :: previous ! If need to get previous time for comparison - integer :: rc ! error code - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - previous = .false. - if ( present(prev) )then - previous = prev - end if - - if (previous )then - call ESMF_ClockGet( EClock, prevTime=ETime, rc=rc) - else - call ESMF_ClockGet( EClock, currTime=ETime, rc=rc) - end if - call seq_timemgr_ETimeGet( ETime, ymd=ymd1, tod=tod1 ) - - ! --- If current dates agree return true -- else false - - if ( (ymd == ymd1) .and. (tod == tod1) )then - seq_timemgr_EClockDateInSync = .true. - else - seq_timemgr_EClockDateInSync = .false. - end if - - end function seq_timemgr_EClockDateInSync - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_clockPrint -- Print clock information out - ! - ! !DESCRIPTION: - ! - ! Print clock information out. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_clockPrint( SyncClock ) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(seq_timemgr_type), intent(in) :: SyncClock ! Input clock to print - - character(len=*), parameter :: subname = "(seq_timemgr_clockPrint) " - integer(SHR_KIND_IN) :: m,n - integer(SHR_KIND_IN) :: curr_ymd ! Current date YYYYMMDD - integer(SHR_KIND_IN) :: curr_tod ! Current time of day (s) - integer(SHR_KIND_IN) :: StepNo ! Number of steps taken - integer(SHR_KIND_IN) :: start_ymd ! Starting date YYYYMMDD - integer(SHR_KIND_IN) :: start_tod ! Starting time-of-day (s) - integer(SHR_KIND_IN) :: stop_ymd ! Stop date YYYYMMDD - integer(SHR_KIND_IN) :: stop_tod ! Stop time-of-day (s) - integer(SHR_KIND_IN) :: ref_ymd ! Reference date YYYYMMDD - integer(SHR_KIND_IN) :: ref_tod ! Reference time-of-day (s) - integer(SHR_KIND_IN) :: DTime ! Time-step (seconds) - integer(SHR_KIND_IN) :: prev_ymd ! Prev restart alarm date (YYYYMMDD) - integer(SHR_KIND_IN) :: prev_tod ! Prev restart alarm time-of-day (sec) - integer(SHR_KIND_IN) :: next_ymd ! Next restart alarm date (YYYYMMDD) - integer(SHR_KIND_IN) :: next_tod ! Next restart alarm time-of-day (sec) - integer(SHR_KIND_IN) :: IntSec ! Alarm interval for seconds - integer(SHR_KIND_IN) :: IntMon ! Alarm interval for months - integer(SHR_KIND_IN) :: IntYrs ! Alarm interval for years - integer(SHR_KIND_IN) :: AlarmCount ! Number of valid alarms - character(len=64) :: alarmname ! Alarm name - character(len=*), parameter :: xalarm = 'unset' - type(ESMF_Alarm),pointer :: EAlarm_list(:) ! EAlarm list associated with EClock - integer(SHR_KIND_IN) :: rc ! error code - - character(len=*), parameter :: F06 = "(2A,L3)" - character(len=*), parameter :: F07 = "(3A)" - character(len=*), parameter :: F08 = "(2A,I12.8,3x,I5.5)" - character(len=*), parameter :: F09 = "(2A,2I8,I12)" - character(len=*), parameter :: F10 = "(2A,I2,2x,A)" - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - if (loglevel <= 0) return - - write(logunit,F07) subname,'calendar = ', trim(seq_timemgr_calendar) - write(logunit,F06) subname,'end_restart = ', seq_timemgr_end_restart - write(logunit,F07) '' - - do n = 1,max_clocks - call seq_timemgr_EClockGetData( SyncClock%ECP(n)%EClock, curr_ymd=curr_ymd, & - curr_tod=curr_tod, start_ymd=start_ymd, & - start_tod=start_tod, StepNo=StepNo, & - ref_ymd=ref_ymd, ref_tod=ref_tod, & - stop_ymd=stop_ymd, stop_tod=stop_tod, & - dtime = dtime, alarmcount=AlarmCount) -#ifdef USE_ESMF_LIB - allocate(EAlarm_list(AlarmCount)) - call ESMF_ClockGetAlarmList(SyncClock%ECP(n)%EClock, alarmListFlag=ESMF_ALARMLIST_ALL, & - alarmList=EAlarm_list, alarmCount=AlarmCount, rc=rc) -#else - call ESMF_ClockGetAlarmList(SyncClock%ECP(n)%EClock, EAlarm_list, rc=rc) -#endif - call seq_timemgr_ESMFCodeCheck( rc, msg=subname//"Error from ESMF_ClockGetAlarmList" ) - - write(logunit,F09) subname,"Clock = "//seq_timemgr_clocks(n),n - write(logunit,F08) subname," Start Time = ", start_ymd, start_tod - write(logunit,F08) subname," Curr Time = ", curr_ymd, curr_tod - write(logunit,F08) subname," Ref Time = ", ref_ymd, ref_tod - write(logunit,F08) subname," Stop Time = ", stop_ymd, stop_tod - write(logunit,F09) subname," Step number = ", StepNo - write(logunit,F09) subname," Dtime = ", DTime - - do m = 1,alarmCount - call seq_timemgr_alarmGet( EAlarm_list(m), & - next_ymd=next_ymd, next_tod=next_tod, prev_ymd=prev_ymd, prev_tod=prev_tod, & - IntSec=IntSec, IntMon=IntMon, IntYrs=IntYrs, name=alarmname ) - write(logunit,F10) subname," Alarm = ",m,trim(alarmname) - write(logunit,F08) subname," Prev Time = ", prev_ymd,prev_tod - write(logunit,F08) subname," Next Time = ", next_ymd,next_tod - write(logunit,F09) subname," Intervl yms = ", IntYrs,IntMon,IntSec - enddo - - write(logunit,*) '' -#ifdef USE_ESMF_LIB - deallocate(EAlarm_list) -#endif - enddo - - end subroutine seq_timemgr_clockPrint - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_ESMFDebug -- Print ESMF stuff for debugging - ! - ! !DESCRIPTION: - ! - ! Print ESMF stuff for debugging - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_ESMFDebug( EClock, ETime, ETimeInterval, istring ) - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - type(ESMF_Clock), optional, intent(in) :: EClock ! ESMF Clock - type(ESMF_Time) , optional, intent(inout) :: ETime ! ESMF Time - type(ESMF_TimeInterval), optional, intent(inout) :: ETimeInterval ! ESMF Time Interval - character(len=*), optional, intent(in) :: istring - - !----- local ----- - character(len=*), parameter :: subname = '(seq_timemgr_ESMFDebug) ' - character(len=128) :: timestring - integer :: yy,mm,dd,s ! ymds - type(ESMF_Time) :: LTime - type(ESMF_TimeInterval) :: LTimeInterval - integer(SHR_KIND_I8) :: LStep - integer :: rc ! return code - - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - if (present(ETime)) then - write(logunit,*) subname,' ETime ',trim(istring) - call ESMF_TimeGet(ETime, yy=yy,mm=mm,dd=dd,s=s,timestring=timestring,rc=rc) - write(logunit,*) subname,rc,'ymds=',yy,mm,dd,s,trim(timestring) - endif - - if (present(ETimeInterval)) then - write(logunit,*) subname,' ETimeInterval ',trim(istring) - call ESMF_TimeIntervalGet(ETimeInterval, yy=yy,mm=mm,d=dd,s=s,timestring=timestring,rc=rc) - write(logunit,*) subname,rc,'ymds=',yy,mm,dd,s,trim(timestring) - endif - - if (present(EClock)) then - write(logunit,*) subname,' EClock ',trim(istring) - call ESMF_ClockGet( EClock, StartTime=LTime ) - call ESMF_TimeGet(LTime, yy=yy,mm=mm,dd=dd,s=s,timestring=timestring,rc=rc) - write(logunit,*) subname,rc,'start ymds=',yy,mm,dd,s,trim(timestring) - call ESMF_ClockGet( EClock, CurrTime=LTime ) - call ESMF_TimeGet(LTime, yy=yy,mm=mm,dd=dd,s=s,timestring=timestring,rc=rc) - write(logunit,*) subname,rc,'curr ymds=',yy,mm,dd,s,trim(timestring) - call ESMF_ClockGet( EClock, StopTime=LTime ) - call ESMF_TimeGet(LTime, yy=yy,mm=mm,dd=dd,s=s,timestring=timestring,rc=rc) - write(logunit,*) subname,rc,'stop ymds=',yy,mm,dd,s,trim(timestring) - call ESMF_ClockGet( EClock, PrevTime=LTime ) - call ESMF_TimeGet(LTime, yy=yy,mm=mm,dd=dd,s=s,timestring=timestring,rc=rc) - write(logunit,*) subname,rc,'prev ymds=',yy,mm,dd,s,trim(timestring) - call ESMF_ClockGet( EClock, RefTime=LTime ) - call ESMF_TimeGet(LTime, yy=yy,mm=mm,dd=dd,s=s,timestring=timestring,rc=rc) - write(logunit,*) subname,rc,'ref ymds=',yy,mm,dd,s,trim(timestring) - call ESMF_ClockGet( EClock, TimeStep=LTimeInterval ) - call ESMF_TimeIntervalGet(LTimeInterval, yy=yy,mm=mm,d=dd,s=s,timestring=timestring,rc=rc) - write(logunit,*) subname,rc,'tint ymds=',yy,mm,dd,s,trim(timestring) - call ESMF_ClockGet( EClock, AdvanceCount=LStep ) - write(logunit,*) subname,rc,'advcnt =',LStep - endif - - end subroutine seq_timemgr_ESMFDebug - - !=============================================================================== - !=============================================================================== - ! !IROUTINE: seq_timemgr_ESMFCodeCheck -- Check return-code from ESMF -- abort if not - ! - ! !DESCRIPTION: - ! - ! Check ESMF return code and abort if not successful. - ! - ! !INTERFACE: ------------------------------------------------------------------ - - subroutine seq_timemgr_ESMFCodeCheck( rc, msg ) - - ! !USES: - - implicit none - - ! !INPUT/OUTPUT PARAMETERS: - - integer, intent(in) :: rc ! return code from ESMF - character(len=*),optional,intent(in) :: msg ! error message - - character(len=*),parameter :: subname = 'seq_timemgr_ESMFCodeCheck' - !------------------------------------------------------------------------------- - ! Notes: - !------------------------------------------------------------------------------- - - if ( rc == ESMF_SUCCESS ) return - if ( present(msg)) then - write(logunit,*) trim(subname),' error= ',rc,trim(msg) - else - write(logunit,*) trim(subname),' error= ',rc - endif - call shr_sys_flush(logunit) - call shr_sys_abort(trim(subname)) - - end subroutine seq_timemgr_ESMFCodeCheck - - !=============================================================================== - !=============================================================================== - -end module seq_timemgr_mod diff --git a/src/drivers/mct/shr/shr_carma_mod.F90 b/src/drivers/mct/shr/shr_carma_mod.F90 deleted file mode 100644 index d06d783d747..00000000000 --- a/src/drivers/mct/shr/shr_carma_mod.F90 +++ /dev/null @@ -1,70 +0,0 @@ -!================================================================================ -! This reads the carma_inparm namelist in drv_flds_in and makes the relavent -! information available to CAM, CLM, and driver. The driver sets up CLM to CAM -! communication for the VOC flux fields. CLM needs to know what specific VOC -! fluxes need to be passed to the coupler and how to assimble the fluxes. -! CAM needs to know what specific VOC fluxes to expect from CLM. -! -! Mariana Vertenstein -- 24 Sep 2012 -!================================================================================ -module shr_carma_mod - - use shr_kind_mod,only : r8 => shr_kind_r8 - use shr_kind_mod,only : CL => SHR_KIND_CL, CX => SHR_KIND_CX, CS => SHR_KIND_CS - use shr_sys_mod, only : shr_sys_abort - use shr_log_mod, only : loglev => shr_log_Level - use shr_log_mod, only : logunit => shr_log_Unit - - implicit none - save - private - - public :: shr_carma_readnl ! reads carma_inparm namelist - -contains - - !------------------------------------------------------------------------- - ! This reads the carma_emis_nl namelist group in drv_flds_in and parses the - ! namelist information for the driver, CLM, and CAM. - !------------------------------------------------------------------------- - subroutine shr_carma_readnl( NLFileName, carma_fields ) - - use shr_nl_mod, only : shr_nl_find_group_name - use shr_file_mod, only : shr_file_getUnit, shr_file_freeUnit - - character(len=*), intent(in) :: NLFileName - character(len=CX), intent(out) :: carma_fields - - integer :: unitn ! namelist unit number - integer :: ierr ! error code - logical :: exists ! if file exists or not - character(*),parameter :: F00 = "('(shr_carma_readnl) ',2a)" - - namelist /carma_inparm/ carma_fields - - carma_fields = ' ' - - inquire( file=trim(NLFileName), exist=exists) - if ( exists ) then - unitn = shr_file_getUnit() - open( unitn, file=trim(NLFilename), status='old' ) - if ( loglev > 0 ) write(logunit,F00) & - 'Read in carma_inparm namelist from: ', trim(NLFilename) - - call shr_nl_find_group_name(unitn, 'carma_inparm', status=ierr) - ! If ierr /= 0, no namelist present. - - if (ierr == 0) then - read(unitn, carma_inparm, iostat=ierr) - if (ierr > 0) then - call shr_sys_abort( 'problem on read of carma_inparm namelist in shr_carma_readnl' ) - endif - end if - - close( unitn ) - call shr_file_freeUnit( unitn ) - end if - - end subroutine shr_carma_readnl - -endmodule shr_carma_mod diff --git a/src/drivers/mct/shr/shr_expr_parser_mod.F90 b/src/drivers/mct/shr/shr_expr_parser_mod.F90 deleted file mode 100644 index 4a70b88647c..00000000000 --- a/src/drivers/mct/shr/shr_expr_parser_mod.F90 +++ /dev/null @@ -1,185 +0,0 @@ -!============================================================================= -! expression parser utility -- -! for parsing simple linear mathematical expressions of the form -! X = a*Y + b*Z + ... -! -!============================================================================= -module shr_expr_parser_mod - use shr_kind_mod,only : r8 => shr_kind_r8 - use shr_kind_mod,only : cx => shr_kind_cx - - implicit none - private - - public :: shr_exp_parse ! parses simple strings which contain expressions - public :: shr_exp_item_t ! user defined type which contains an expression component - public :: shr_exp_list_destroy ! destroy the linked list returned by shr_exp_parse - - ! contains componets of expression - type shr_exp_item_t - character(len=64) :: name - character(len=64),pointer :: vars(:) => null() - real(r8) ,pointer :: coeffs(:) => null() - integer :: n_terms = 0 - type(shr_exp_item_t), pointer :: next_item => null() - end type shr_exp_item_t - -contains - - ! ----------------------------------------------------------------- - ! parses expressions provided in array of strings - ! ----------------------------------------------------------------- - function shr_exp_parse( exp_array, nitems ) result(exp_items_list) - - character(len=*), intent(in) :: exp_array(:) ! contains a expressions - integer, optional, intent(out) :: nitems ! number of expressions parsed - type(shr_exp_item_t), pointer :: exp_items_list ! linked list of items returned - - integer :: i,j, jj, nmax, nterms, n_exp_items - character(len=cx) :: tmp_str - type(shr_exp_item_t), pointer :: exp_item, list_item - - nullify( exp_items_list ) - nullify( exp_item ) - nullify( list_item ) - - n_exp_items = 0 - nmax = size( exp_array ) - - do i = 1,nmax - if (len_trim(exp_array(i))>0) then - - j = scan( exp_array(i), '=' ) - - if ( j>0 ) then - - n_exp_items = n_exp_items + 1 - - allocate( exp_item ) - exp_item%n_terms = 0 - exp_item%name = trim(adjustl(exp_array(i)(:j-1))) - - tmp_str = trim(adjustl(exp_array(i)(j+1:))) - - nterms = 1 - jj = scan( tmp_str, '+' ) - do while(jj>0) - nterms = nterms + 1 - tmp_str = tmp_str(jj+1:) - jj = scan( tmp_str, '+' ) - enddo - - allocate( exp_item%vars(nterms) ) - allocate( exp_item%coeffs(nterms) ) - - tmp_str = trim(adjustl(exp_array(i)(j+1:))) - - j = scan( tmp_str, '+' ) - - if (j>0) then - call set_coefvar( tmp_str(:j-1), exp_item ) - tmp_str = tmp_str(j-1:) - else - call set_coefvar( tmp_str, exp_item ) - endif - - else - - tmp_str = trim(adjustl(exp_array(i))) ! assumed to begin with '+' - - endif - - ! at this point tmp_str begins with '+' - j = scan( tmp_str, '+' ) - - if (j>0) then - - ! remove the leading + ... - tmp_str = tmp_str(j+1:) - j = scan( tmp_str, '+' ) - - do while(j>0) - - call set_coefvar( tmp_str(:j-1), exp_item ) - - tmp_str = tmp_str(j+1:) - j = scan( tmp_str, '+' ) - - enddo - - call set_coefvar( tmp_str, exp_item ) - - endif - - - if (associated(exp_item)) then - if (associated(exp_items_list)) then - list_item => exp_items_list - do while(associated(list_item%next_item)) - list_item => list_item%next_item - enddo - list_item%next_item => exp_item - else - exp_items_list => exp_item - endif - endif - - endif - enddo - - if ( present(nitems) ) then - nitems = n_exp_items - endif - - end function shr_exp_parse - - ! ----------------------------------------------------------------- - ! deallocates memory occupied by linked list - ! ----------------------------------------------------------------- - subroutine shr_exp_list_destroy( list ) - type(shr_exp_item_t), pointer, intent(inout) :: list - - type(shr_exp_item_t), pointer :: item, next - - item => list - do while(associated(item)) - next => item%next_item - if (associated(item%vars)) then - deallocate(item%vars) - nullify(item%vars) - deallocate(item%coeffs) - nullify(item%coeffs) - endif - deallocate(item) - nullify(item) - item => next - enddo - - end subroutine shr_exp_list_destroy - - !========================== - ! Private Methods - - ! ----------------------------------------------------------------- - ! ----------------------------------------------------------------- - subroutine set_coefvar( term, item ) - character(len=*), intent(in) :: term - type(shr_exp_item_t) , intent(inout) :: item - - integer :: k, n - - item%n_terms = item%n_terms + 1 - n = item%n_terms - - k = scan( term, '*' ) - if (k>0) then - item%vars(n) = trim(adjustl(term(k+1:))) - read( term(:k-1), *) item%coeffs(n) - else - item%vars(n) = trim(adjustl(term)) - item%coeffs(n) = 1.0_r8 - endif - - end subroutine set_coefvar - -end module shr_expr_parser_mod diff --git a/src/drivers/mct/shr/shr_fire_emis_mod.F90 b/src/drivers/mct/shr/shr_fire_emis_mod.F90 deleted file mode 100644 index 08cc4270756..00000000000 --- a/src/drivers/mct/shr/shr_fire_emis_mod.F90 +++ /dev/null @@ -1,299 +0,0 @@ -!================================================================================ -! Coordinates carbon emissions fluxes from CLM fires for use as sources of -! chemical constituents in CAM -! -! This module reads fire_emis_nl namelist which specifies the compound fluxes -! that are to be passed through the model coupler. -!================================================================================ -module shr_fire_emis_mod - - use shr_kind_mod,only : r8 => shr_kind_r8 - use shr_kind_mod,only : CL => SHR_KIND_CL, CX => SHR_KIND_CX, CS => SHR_KIND_CS - use shr_sys_mod, only : shr_sys_abort - use shr_log_mod, only : loglev => shr_log_Level - - implicit none - save - private - - public :: shr_fire_emis_readnl ! reads fire_emis_nl namelist - public :: shr_fire_emis_mechcomps ! points to an array of chemical compounds (in CAM-Chem mechanism) than have fire emissions - public :: shr_fire_emis_mechcomps_n ! number of unique compounds in the CAM chemical mechanism that have fire emissions - public :: shr_fire_emis_comps_n ! number of unique emissions components - public :: shr_fire_emis_linkedlist ! points to linked list of shr_fire_emis_comp_t objects - public :: shr_fire_emis_elevated ! elevated emissions in ATM - public :: shr_fire_emis_comp_ptr ! user defined type that points to fire emis data obj (shr_fire_emis_comp_t) - public :: shr_fire_emis_comp_t ! emission component data type - public :: shr_fire_emis_mechcomp_t ! data type for chemical compound in CAM mechanism than has fire emissions - - logical :: shr_fire_emis_elevated = .true. - - character(len=CS), public :: shr_fire_emis_fields_token = '' ! emissions fields token - character(len=CL), public :: shr_fire_emis_factors_file = '' ! a table of basic fire emissions compounds - character(len=CS), public :: shr_fire_emis_ztop_token = 'Sl_fztop' ! token for emissions top of vertical distribution - integer, parameter :: name_len=16 - ! fire emissions component data structure (or user defined type) - type shr_fire_emis_comp_t - character(len=name_len) :: name ! emissions component name (in fire emissions input table) - integer :: index - real(r8), pointer :: emis_factors(:) ! function of plant-function-type (PFT) - real(r8) :: coeff ! emissions component coeffecient - real(r8) :: molec_weight ! molecular weight of the fire emissions compound (g/mole) - type(shr_fire_emis_comp_t), pointer :: next_emiscomp ! points to next member in the linked list - endtype shr_fire_emis_comp_t - - type shr_fire_emis_comp_ptr - type(shr_fire_emis_comp_t), pointer :: ptr ! points to fire emis data obj (shr_fire_emis_comp_t) - endtype shr_fire_emis_comp_ptr - - ! chemical compound in CAM mechanism that has fire emissions - type shr_fire_emis_mechcomp_t - character(len=name_len) :: name ! compound name - type(shr_fire_emis_comp_ptr), pointer :: emis_comps(:) ! an array of pointers to fire emis components - integer :: n_emis_comps ! number of fire emis compounds that make up the emissions for this mechanis compound - end type shr_fire_emis_mechcomp_t - - type(shr_fire_emis_mechcomp_t), pointer :: shr_fire_emis_mechcomps(:) ! array of chemical compounds (in CAM mechanism) that have fire emissions - type(shr_fire_emis_comp_t), pointer :: shr_fire_emis_linkedlist ! points to linked list top - - integer :: shr_fire_emis_comps_n = 0 ! number of unique fire components - integer :: shr_fire_emis_mechcomps_n = 0 ! number of unique compounds in the CAM chemical mechanism that have fire emissions - -contains - - !------------------------------------------------------------------------- - ! - ! This reads the fire_emis_nl namelist group in drv_flds_in and parses the - ! namelist information for the driver, CLM, and CAM. - ! - ! Namelist variables: - ! fire_emis_specifier, fire_emis_factors_file, fire_emis_elevated - ! - ! fire_emis_specifier (array of strings) -- Each array element specifies - ! how CAM-Chem constituents are mapped to basic smoke compounds in - ! the fire emissions factors table (fire_emis_factors_file). Each - ! chemistry constituent name (left of '=' sign) is mapped to one or more - ! smoke compound (separated by + sign if more than one), which can be - ! proceeded by a multiplication factor (separated by '*'). - ! Example: - ! fire_emis_specifier = 'bc_a1 = BC','pom_a1 = 1.4*OC','SO2 = SO2' - ! - ! fire_emis_factors_file (string) -- Input file that contains the table - ! of basic compounds that make up the smoke from the CLM fires. This is - ! used in CLM module FireEmisFactorsMod. - ! - ! fire_emis_elevated (locical) -- If true then CAM-Chem treats the fire - ! emission sources as 3-D vertically distributed forcings for the - ! corresponding chemical tracers. - ! - !------------------------------------------------------------------------- - subroutine shr_fire_emis_readnl( NLFileName, ID, emis_fields ) - - use shr_nl_mod, only : shr_nl_find_group_name - use shr_file_mod, only : shr_file_getUnit, shr_file_freeUnit - use seq_comm_mct, only : seq_comm_iamroot, seq_comm_setptrs, logunit - use shr_mpi_mod, only : shr_mpi_bcast - - character(len=*), intent(in) :: NLFileName ! name of namelist file - integer , intent(in) :: ID ! seq_comm ID - character(len=*), intent(out) :: emis_fields ! emis flux fields - - integer :: unitn ! namelist unit number - integer :: ierr ! error code - logical :: exists ! if file exists or not - integer :: mpicom ! MPI communicator - - integer, parameter :: maxspc = 100 - - character(len=2*CX) :: fire_emis_specifier(maxspc) = ' ' - character(len=CL) :: fire_emis_factors_file = ' ' - - character(*),parameter :: F00 = "('(shr_fire_emis_readnl) ',2a)" - - logical :: fire_emis_elevated = .true. - - namelist /fire_emis_nl/ fire_emis_specifier, fire_emis_factors_file, fire_emis_elevated - - call seq_comm_setptrs(ID,mpicom=mpicom) - if (seq_comm_iamroot(ID)) then - - inquire( file=trim(NLFileName), exist=exists) - - if ( exists ) then - - unitn = shr_file_getUnit() - open( unitn, file=trim(NLFilename), status='old' ) - if ( loglev > 0 ) write(logunit,F00) & - 'Read in fire_emis_readnl namelist from: ', trim(NLFilename) - - call shr_nl_find_group_name(unitn, 'fire_emis_nl', status=ierr) - ! If ierr /= 0, no namelist present. - - if (ierr == 0) then - read(unitn, fire_emis_nl, iostat=ierr) - - if (ierr > 0) then - call shr_sys_abort( 'problem on read of fire_emis_nl namelist in shr_fire_emis_readnl' ) - endif - endif - - close( unitn ) - call shr_file_freeUnit( unitn ) - end if - end if - call shr_mpi_bcast( fire_emis_specifier, mpicom) - call shr_mpi_bcast( fire_emis_factors_file, mpicom) - call shr_mpi_bcast( fire_emis_elevated, mpicom) - - shr_fire_emis_factors_file = fire_emis_factors_file - shr_fire_emis_elevated = fire_emis_elevated - - ! parse the namelist info and initialize the module data - call shr_fire_emis_init( fire_emis_specifier, emis_fields ) - - end subroutine shr_fire_emis_readnl - - !----------------------------------------------------------------------- - ! module data initializer - !------------------------------------------------------------------------ - subroutine shr_fire_emis_init( specifier, emis_fields ) - - use shr_expr_parser_mod, only : shr_exp_parse, shr_exp_item_t, shr_exp_list_destroy - - character(len=*), intent(in) :: specifier(:) - character(len=*), intent(out) :: emis_fields - - integer :: n_entries - integer :: i, j, k - - type(shr_exp_item_t), pointer :: items_list, item - character(len=12) :: token ! fire emis field name to add - - nullify(shr_fire_emis_linkedlist) - - items_list => shr_exp_parse( specifier, nitems=n_entries ) - - allocate(shr_fire_emis_mechcomps(n_entries)) - shr_fire_emis_mechcomps(:)%n_emis_comps = 0 - - emis_fields = '' - - item => items_list - i = 1 - do while(associated(item)) - - do k=1,shr_fire_emis_mechcomps_n - if ( trim(shr_fire_emis_mechcomps(k)%name) == trim(item%name) ) then - call shr_sys_abort( 'shr_fire_emis_init : multiple emissions definitions specified for : '//trim(item%name)) - endif - enddo - if (len_trim(item%name) .le. name_len) then - shr_fire_emis_mechcomps(i)%name = item%name(1:name_len) - else - call shr_sys_abort("shr_file_emis_init : name too long for data structure :"//trim(item%name)) - endif - shr_fire_emis_mechcomps(i)%n_emis_comps = item%n_terms - allocate(shr_fire_emis_mechcomps(i)%emis_comps(item%n_terms)) - - do j = 1,item%n_terms - shr_fire_emis_mechcomps(i)%emis_comps(j)%ptr => add_emis_comp( item%vars(j), item%coeffs(j) ) - enddo - shr_fire_emis_mechcomps_n = shr_fire_emis_mechcomps_n+1 - - write(token,333) shr_fire_emis_mechcomps_n - - if ( shr_fire_emis_mechcomps_n == 1 ) then - ! do not prepend ":" to the string for the first token - emis_fields = trim(token) - shr_fire_emis_fields_token = token - else - emis_fields = trim(emis_fields)//':'//trim(token) - endif - - item => item%next_item - i = i+1 - enddo - if (associated(items_list)) call shr_exp_list_destroy(items_list) - - ! Need to explicitly add Fl_ based on naming convention -333 format ('Fall_fire',i3.3) - - end subroutine shr_fire_emis_init - - !------------------------------------------------------------------------- - ! private methods... - - - !------------------------------------------------------------------------- - !------------------------------------------------------------------------- - function add_emis_comp( name, coeff ) result(emis_comp) - - character(len=*), intent(in) :: name - real(r8), intent(in) :: coeff - type(shr_fire_emis_comp_t), pointer :: emis_comp - - emis_comp => get_emis_comp_by_name(shr_fire_emis_linkedlist, name) - if(associated(emis_comp)) then - ! already in the list so return... - return - endif - - ! create new emissions component and add it to the list - allocate(emis_comp) - - ! element%index = lookup_element( name ) - ! element%emis_factors = get_factors( list_elem%index ) - - emis_comp%index = shr_fire_emis_comps_n+1 - - emis_comp%name = trim(name) - emis_comp%coeff = coeff - nullify(emis_comp%next_emiscomp) - - call add_emis_comp_to_list(emis_comp) - - end function add_emis_comp - - !------------------------------------------------------------------------- - !------------------------------------------------------------------------- - recursive function get_emis_comp_by_name(list_comp, name) result(emis_comp) - - type(shr_fire_emis_comp_t), pointer :: list_comp - character(len=*), intent(in) :: name ! variable name - type(shr_fire_emis_comp_t), pointer :: emis_comp ! returned object - - if(associated(list_comp)) then - if(list_comp%name .eq. name) then - emis_comp => list_comp - else - emis_comp => get_emis_comp_by_name(list_comp%next_emiscomp, name) - end if - else - nullify(emis_comp) - end if - - end function get_emis_comp_by_name - - !------------------------------------------------------------------------- - !------------------------------------------------------------------------- - subroutine add_emis_comp_to_list( new_emis_comp ) - - type(shr_fire_emis_comp_t), target, intent(in) :: new_emis_comp - - type(shr_fire_emis_comp_t), pointer :: list_comp - - if(associated(shr_fire_emis_linkedlist)) then - list_comp => shr_fire_emis_linkedlist - do while(associated(list_comp%next_emiscomp)) - list_comp => list_comp%next_emiscomp - end do - list_comp%next_emiscomp => new_emis_comp - else - shr_fire_emis_linkedlist => new_emis_comp - end if - - shr_fire_emis_comps_n = shr_fire_emis_comps_n + 1 - - end subroutine add_emis_comp_to_list - -endmodule shr_fire_emis_mod diff --git a/src/drivers/mct/shr/shr_megan_mod.F90 b/src/drivers/mct/shr/shr_megan_mod.F90 deleted file mode 100644 index ccab085b395..00000000000 --- a/src/drivers/mct/shr/shr_megan_mod.F90 +++ /dev/null @@ -1,313 +0,0 @@ -!================================================================================ -! Handles MEGAN VOC emissions metadata for CLM produced chemical emissions -! MEGAN = Model of Emissions of Gases and Aerosols from Nature -! -! This reads the megan_emis_nl namelist in drv_flds_in and makes the relavent -! information available to CAM, CLM, and driver. The driver sets up CLM to CAM -! communication for the VOC flux fields. CLM needs to know what specific VOC -! fluxes need to be passed to the coupler and how to assimble the fluxes. -! CAM needs to know what specific VOC fluxes to expect from CLM. -! -! Francis Vitt -- 26 Oct 2011 -!================================================================================ -module shr_megan_mod - - use shr_kind_mod,only : r8 => shr_kind_r8 - use shr_kind_mod,only : CL => SHR_KIND_CL, CX => SHR_KIND_CX, CS => SHR_KIND_CS - use shr_sys_mod, only : shr_sys_abort - use shr_log_mod, only : loglev => shr_log_Level - use shr_log_mod, only : logunit => shr_log_Unit - - implicit none - save - private - - public :: shr_megan_readnl ! reads megan_emis_nl namelist - public :: shr_megan_mechcomps ! points to an array of chemical compounds (in CAM-Chem mechanism) that have MEGAN emissions - public :: shr_megan_mechcomps_n ! number of unique compounds in the CAM chemical mechanism that have MEGAN emissions - public :: shr_megan_megcomps_n ! number of unique MEGAN compounds - public :: shr_megan_megcomp_t ! MEGAN compound data type - public :: shr_megan_mechcomp_t ! data type for chemical compound in CAM mechanism that has MEGAN emissions - public :: shr_megan_linkedlist ! points to linked list of shr_megan_comp_t objects - public :: shr_megan_mapped_emisfctrs ! switch to use mapped emission factors - public :: shr_megan_comp_ptr - - character(len=CS), public :: shr_megan_fields_token = '' ! First drydep fields token - character(len=CL), public :: shr_megan_factors_file = '' - - ! MEGAN compound data structure (or user defined type) - type shr_megan_megcomp_t - character(len=16) :: name ! MEGAN compound name (in MEGAN input table) - integer :: index - real(r8), pointer :: emis_factors(:) ! function of plant-function-type (PFT) - integer :: class_number ! MEGAN class number - real(r8) :: coeff ! emissions component coeffecient - real(r8) :: molec_weight ! molecular weight of the MEGAN compound (g/mole) - type(shr_megan_megcomp_t), pointer :: next_megcomp ! points to next member in the linked list - endtype shr_megan_megcomp_t - - type shr_megan_comp_ptr - type(shr_megan_megcomp_t), pointer :: ptr - endtype shr_megan_comp_ptr - - ! chemical compound in CAM mechanism that has MEGAN emissions - type shr_megan_mechcomp_t - character(len=16) :: name ! compound name - type(shr_megan_comp_ptr), pointer :: megan_comps(:) ! an array of pointers to megan emis compounds - integer :: n_megan_comps ! number of megan emis compounds that make up the emissions for this mechanis compound - end type shr_megan_mechcomp_t - - type(shr_megan_mechcomp_t), pointer :: shr_megan_mechcomps(:) ! array of chemical compounds (in CAM mechanism) that have MEGAN emissions - type(shr_megan_megcomp_t), pointer :: shr_megan_linkedlist ! points to linked list top - - integer :: shr_megan_megcomps_n = 0 ! number of unique megan compounds - integer :: shr_megan_mechcomps_n = 0 ! number of unique compounds in the CAM chemical mechanism that have MEGAN emissions - - ! switch to use mapped emission factors - logical :: shr_megan_mapped_emisfctrs = .false. - -contains - - !------------------------------------------------------------------------- - ! - ! This reads the megan_emis_nl namelist group in drv_flds_in and parses the - ! namelist information for the driver, CLM, and CAM. - ! - ! Namelist variables: - ! megan_specifier, megan_mapped_emisfctrs, megan_factors_file - ! - ! megan_specifier is a series of strings where each string contains one - ! CAM chemistry constituent name (left of = sign) and one or more MEGAN - ! compound (separated by + sign if more than one). Each MEGAN compound - ! can be proceeded by a multiplication factor (separated by *). The - ! specification of the MEGAN compounds to the right of the = signs tells - ! the MEGAN VOC model within CLM how to construct the VOC fluxes using - ! the factors in megan_factors_file and land surface state. - ! - ! megan_factors_file read by CLM contains valid MEGAN compound names, - ! MEGAN class groupings and scalar emission factors - ! - ! megan_mapped_emisfctrs switch is used to tell the MEGAN model to use - ! mapped emission factors read in from the CLM surface data input file - ! rather than the scalar factors from megan_factors_file - ! - ! Example: - ! &megan_emis_nl - ! megan_specifier = 'ISOP = isoprene', - ! 'C10H16 = myrcene + sabinene + limonene + carene_3 + ocimene_t_b + pinene_b + ...', - ! 'CH3OH = methanol', - ! 'C2H5OH = ethanol', - ! 'CH2O = formaldehyde', - ! 'CH3CHO = acetaldehyde', - ! ... - ! megan_factors_file = '$datapath/megan_emis_factors.nc' - ! / - !------------------------------------------------------------------------- - subroutine shr_megan_readnl( NLFileName, ID, megan_fields ) - - use shr_nl_mod, only : shr_nl_find_group_name - use shr_file_mod, only : shr_file_getUnit, shr_file_freeUnit - use seq_comm_mct, only : seq_comm_iamroot, seq_comm_setptrs - use shr_mpi_mod, only : shr_mpi_bcast - - character(len=*), intent(in) :: NLFileName - integer , intent(in) :: ID ! seq_comm ID - character(len=*), intent(out) :: megan_fields - - integer :: unitn ! namelist unit number - integer :: ierr ! error code - logical :: exists ! if file exists or not - integer :: mpicom ! MPI communicator - - integer, parameter :: maxspc = 100 - - character(len=2*CX) :: megan_specifier(maxspc) = ' ' - logical :: megan_mapped_emisfctrs = .false. - character(len=CL) :: megan_factors_file = ' ' - - character(*),parameter :: F00 = "('(shr_megan_readnl) ',2a)" - - namelist /megan_emis_nl/ megan_specifier, megan_factors_file, megan_mapped_emisfctrs - - call seq_comm_setptrs(ID,mpicom=mpicom) - if (seq_comm_iamroot(ID)) then - inquire( file=trim(NLFileName), exist=exists) - - if ( exists ) then - - unitn = shr_file_getUnit() - open( unitn, file=trim(NLFilename), status='old' ) - if ( loglev > 0 ) write(logunit,F00) & - 'Read in megan_emis_readnl namelist from: ', trim(NLFilename) - - call shr_nl_find_group_name(unitn, 'megan_emis_nl', status=ierr) - ! If ierr /= 0, no namelist present. - - if (ierr == 0) then - read(unitn, megan_emis_nl, iostat=ierr) - - if (ierr > 0) then - call shr_sys_abort( 'problem on read of megan_emis_nl namelist in shr_megan_readnl' ) - endif - endif - - close( unitn ) - call shr_file_freeUnit( unitn ) - - end if - end if - call shr_mpi_bcast( megan_specifier, mpicom ) - call shr_mpi_bcast( megan_factors_file, mpicom ) - call shr_mpi_bcast( megan_mapped_emisfctrs, mpicom ) - - shr_megan_factors_file = megan_factors_file - shr_megan_mapped_emisfctrs = megan_mapped_emisfctrs - - ! parse the namelist info and initialize the module data - call shr_megan_init( megan_specifier, megan_fields ) - - end subroutine shr_megan_readnl - - !------------------------------------------------------------------------- - ! module data initializer - !------------------------------------------------------------------------- - subroutine shr_megan_init( specifier, megan_fields ) - - use shr_expr_parser_mod, only : shr_exp_parse, shr_exp_item_t, shr_exp_list_destroy - - character(len=*), intent(in) :: specifier(:) - character(len=*), intent(out) :: megan_fields - - integer :: n_entries - integer :: i, j, k - - type(shr_exp_item_t), pointer :: items_list, item - character(len=12) :: token ! megan field name to add - - nullify(shr_megan_linkedlist) - - items_list => shr_exp_parse( specifier, nitems=n_entries ) - - allocate(shr_megan_mechcomps(n_entries)) - shr_megan_mechcomps(:)%n_megan_comps = 0 - - megan_fields = '' - - item => items_list - i = 1 - do while(associated(item)) - - do k=1,shr_megan_mechcomps_n - if ( trim(shr_megan_mechcomps(k)%name) == trim(item%name) ) then - call shr_sys_abort( 'shr_megan_init : duplicate compound names : '//trim(item%name)) - endif - enddo - if (len_trim(item%name) .le. len(shr_megan_mechcomps(i)%name)) then - shr_megan_mechcomps(i)%name = item%name(1:len(shr_megan_mechcomps(i)%name)) - else - call shr_sys_abort( 'shr_megan_init : name too long for data structure : '//trim(item%name)) - endif - shr_megan_mechcomps(i)%n_megan_comps = item%n_terms - allocate(shr_megan_mechcomps(i)%megan_comps(item%n_terms)) - - do j = 1,item%n_terms - shr_megan_mechcomps(i)%megan_comps(j)%ptr => add_megan_comp( item%vars(j), item%coeffs(j) ) - enddo - shr_megan_mechcomps_n = shr_megan_mechcomps_n+1 - - write(token,333) shr_megan_mechcomps_n - - if ( shr_megan_mechcomps_n == 1 ) then - ! do not prepend ":" to the string for the first token - megan_fields = trim(token) - shr_megan_fields_token = token - else - megan_fields = trim(megan_fields)//':'//trim(token) - endif - - item => item%next_item - i = i+1 - enddo - if (associated(items_list)) call shr_exp_list_destroy(items_list) - - ! Need to explicitly add Fl_ based on naming convention -333 format ('Fall_voc',i3.3) - - end subroutine shr_megan_init - - !------------------------------------------------------------------------- - ! private methods... - - !------------------------------------------------------------------------- - !------------------------------------------------------------------------- - function add_megan_comp( name, coeff ) result(megan_comp) - - character(len=16), intent(in) :: name - real(r8), intent(in) :: coeff - type(shr_megan_megcomp_t), pointer :: megan_comp - - megan_comp => get_megan_comp_by_name(shr_megan_linkedlist, name) - if(associated(megan_comp)) then - ! already in the list so return... - return - endif - - ! create new megan compound and add it to the list - allocate(megan_comp) - - ! element%index = lookup_element( name ) - ! element%emis_factors = get_factors( list_elem%index ) - - megan_comp%index = shr_megan_megcomps_n+1 - - megan_comp%name = trim(name) - megan_comp%coeff = coeff - nullify(megan_comp%next_megcomp) - - call add_megan_comp_to_list(megan_comp) - - end function add_megan_comp - - !------------------------------------------------------------------------- - !------------------------------------------------------------------------- - recursive function get_megan_comp_by_name(list_comp, name) result(megan_comp) - - type(shr_megan_megcomp_t), pointer :: list_comp - character(len=*), intent(in) :: name ! variable name - type(shr_megan_megcomp_t), pointer :: megan_comp ! returned object - - if(associated(list_comp)) then - if(list_comp%name .eq. name) then - megan_comp => list_comp - else - megan_comp => get_megan_comp_by_name(list_comp%next_megcomp, name) - end if - else - nullify(megan_comp) - end if - - end function get_megan_comp_by_name - - !------------------------------------------------------------------------- - !------------------------------------------------------------------------- - subroutine add_megan_comp_to_list( new_megan_comp ) - - type(shr_megan_megcomp_t), target, intent(in) :: new_megan_comp - - type(shr_megan_megcomp_t), pointer :: list_comp - - if(associated(shr_megan_linkedlist)) then - list_comp => shr_megan_linkedlist - do while(associated(list_comp%next_megcomp)) - list_comp => list_comp%next_megcomp - end do - list_comp%next_megcomp => new_megan_comp - else - shr_megan_linkedlist => new_megan_comp - end if - - shr_megan_megcomps_n = shr_megan_megcomps_n + 1 - - end subroutine add_megan_comp_to_list - -endmodule shr_megan_mod diff --git a/src/drivers/mct/shr/shr_ndep_mod.F90 b/src/drivers/mct/shr/shr_ndep_mod.F90 deleted file mode 100644 index eb4e36a5fd1..00000000000 --- a/src/drivers/mct/shr/shr_ndep_mod.F90 +++ /dev/null @@ -1,116 +0,0 @@ -module shr_ndep_mod - - !======================================================================== - ! Module for handling nitrogen depostion of tracers. - ! This module is shared by land and atmosphere models for the computations of - ! dry deposition of tracers - !======================================================================== - - !USES: - use shr_sys_mod, only : shr_sys_abort - use shr_log_mod, only : s_loglev => shr_log_Level - use shr_kind_mod, only : r8 => shr_kind_r8, CS => SHR_KIND_CS, CX => SHR_KIND_CX - - implicit none - save - - private - - ! !PUBLIC MEMBER FUNCTIONS - public :: shr_ndep_readnl ! Read namelist - !==================================================================================== - -CONTAINS - - !==================================================================================== - subroutine shr_ndep_readnl(NLFilename, ID, ndep_fields, add_ndep_fields) - - !======================================================================== - ! reads ndep_inparm namelist and sets up driver list of fields for - ! atmosphere -> land and atmosphere -> ocn communications. - !======================================================================== - - use shr_file_mod , only : shr_file_getUnit, shr_file_freeUnit - use shr_log_mod , only : s_logunit => shr_log_Unit - use seq_comm_mct , only : seq_comm_iamroot, seq_comm_setptrs - use shr_mpi_mod , only : shr_mpi_bcast - use shr_nl_mod , only : shr_nl_find_group_name - implicit none - - character(len=*), intent(in) :: NLFilename ! Namelist filename - integer , intent(in) :: ID ! seq_comm ID - character(len=*), intent(out) :: ndep_fields - logical , intent(out) :: add_ndep_fields - - !----- local ----- - integer :: i ! Indices - integer :: unitn ! namelist unit number - integer :: ierr ! error code - logical :: exists ! if file exists or not - integer :: mpicom ! MPI communicator - - integer, parameter :: maxspc = 100 ! Maximum number of species - character(len=32) :: ndep_list(maxspc) = '' ! List of ndep species - - !----- formats ----- - character(*),parameter :: subName = '(shr_ndep_read) ' - character(*),parameter :: F00 = "('(shr_ndep_read) ',8a)" - character(*),parameter :: FI1 = "('(shr_ndep_init) ',a,I2)" - - namelist /ndep_inparm/ ndep_list - - !----------------------------------------------------------------------------- - ! Read namelist and figure out the ndep field list to pass - ! First check if file exists and if not, n_ndep will be zero - !----------------------------------------------------------------------------- - - !--- Open and read namelist --- - if ( len_trim(NLFilename) == 0 ) then - call shr_sys_abort( subName//'ERROR: nlfilename not set' ) - end if - call seq_comm_setptrs(ID,mpicom=mpicom) - if (seq_comm_iamroot(ID)) then - inquire( file=trim(NLFileName), exist=exists) - if ( exists ) then - unitn = shr_file_getUnit() - open( unitn, file=trim(NLFilename), status='old' ) - if ( s_loglev > 0 ) then - write(s_logunit,F00) 'Read in ndep_inparm namelist from: ', trim(NLFilename) - end if - call shr_nl_find_group_name(unitn, 'ndep_inparm', ierr) - if (ierr == 0) then - ierr = 1 - do while ( ierr /= 0 ) - read(unitn, ndep_inparm, iostat=ierr) - if (ierr < 0) then - call shr_sys_abort( subName//'ERROR: encountered end-of-file on namelist read' ) - endif - end do - else - write(s_logunit,*) 'shr_ndep_readnl: no ndep_inparm namelist found in ',NLFilename - endif - close( unitn ) - call shr_file_freeUnit( unitn ) - end if - end if - call shr_mpi_bcast( ndep_list, mpicom ) - - ndep_fields = ' ' - if (len_trim(ndep_list(1)) == 0) then - add_ndep_fields = .false. - else - ! Loop over species to fill list of fields to communicate for ndep - add_ndep_fields = .true. - do i=1,maxspc - if ( len_trim(ndep_list(i))==0 ) exit - if ( i == 1 ) then - ndep_fields = 'Faxa_' // trim(ndep_list(i)) - else - ndep_fields = trim(ndep_fields)//':'//'Faxa_' // trim(ndep_list(i)) - endif - enddo - end if - - end subroutine shr_ndep_readnl - -end module shr_ndep_mod diff --git a/src/drivers/mct/unit_test/CMakeLists.txt b/src/drivers/mct/unit_test/CMakeLists.txt deleted file mode 100644 index 289a49c8599..00000000000 --- a/src/drivers/mct/unit_test/CMakeLists.txt +++ /dev/null @@ -1,70 +0,0 @@ -set(DRV_ROOT "${CIME_ROOT}/src/drivers/mct") - -add_definitions( - -DNUM_COMP_INST_ATM=1 - -DNUM_COMP_INST_LND=1 - -DNUM_COMP_INST_OCN=1 - -DNUM_COMP_INST_ICE=1 - -DNUM_COMP_INST_GLC=1 - -DNUM_COMP_INST_WAV=1 - -DNUM_COMP_INST_ROF=1 - -DNUM_COMP_INST_ESP=1 - -DNUM_COMP_INST_IAC=1 - ) - -# The following definitions are needed when building with the mpi-serial library -if (USE_MPI_SERIAL) - add_definitions(-DNO_MPI2 -DNO_MPIMOD) -endif() - -# Add source directories from stubs. This should be done first, so that in the -# case of name collisions, the drv versions take precedence (when there are two -# files with the same name, the one added later wins). -add_subdirectory(${CIME_ROOT}/src/share/unit_test_stubs/pio pio) - -# Add drv source directories -add_subdirectory(${DRV_ROOT}/shr drv_shr) -add_subdirectory(${DRV_ROOT}/main drv_main) - -# Add general unit test directories (stubbed out files, etc.) -add_subdirectory(utils drv_unit_test_utils) -add_subdirectory(stubs drv_unit_test_stubs) - -# Build libraries containing stuff needed for the unit tests. -# -# Eventually, these add_library calls should probably be distributed into the -# correct location, rather than being in this top-level CMakeLists.txt file. -# -# Note that we are including the stub pio in the csm_share library for simplicity. -# -# We include the esmf_wrf_timemgr_sources in the csm_share library -# rather than building it into its own library because of circular -# dependencies: shr_cal_mod (in share_sources) depends on esmf, but esmf -# depends on shr_sys_mod (also in share_sources). -add_library(csm_share ${share_sources} ${share_mct_sources} ${share_pio_sources} - ${pio_sources} ${esmf_wrf_timemgr_sources}) -declare_generated_dependencies(csm_share "${share_genf90_sources}") -declare_generated_dependencies(csm_share "${pio_genf90_sources}") -add_dependencies(csm_share mct_project) - -add_library(drv ${drv_sources}) -add_dependencies(drv csm_share) - -include_directories(${CMAKE_CURRENT_BINARY_DIR}) - -# Set the list of libraries needed for these unit tests. Note that not all unit -# tests need all of these libraries, but it's easiest just to set the same list -# for everyone. -set(DRV_UNIT_TEST_LIBS drv;csm_share;mct;mpeu) -if (USE_MPI_SERIAL) - list(APPEND DRV_UNIT_TEST_LIBS mpi-serial) -endif() -list(APPEND DRV_UNIT_TEST_LIBS ${NETCDF_LIBRARIES}) - -# Add the test directories -add_subdirectory(avect_wrapper_test) -add_subdirectory(seq_map_test) -add_subdirectory(glc_elevclass_test) -add_subdirectory(map_glc2lnd_test) -add_subdirectory(map_lnd2rof_irrig_test) -add_subdirectory(check_fields_test) diff --git a/src/drivers/mct/unit_test/avect_wrapper_test/CMakeLists.txt b/src/drivers/mct/unit_test/avect_wrapper_test/CMakeLists.txt deleted file mode 100644 index f22aacc633d..00000000000 --- a/src/drivers/mct/unit_test/avect_wrapper_test/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -create_pFUnit_test(avect_wrapper avect_wrapper_exe - "test_avect_wrapper.pf" "") - -target_link_libraries(avect_wrapper_exe ${DRV_UNIT_TEST_LIBS}) diff --git a/src/drivers/mct/unit_test/avect_wrapper_test/test_avect_wrapper.pf b/src/drivers/mct/unit_test/avect_wrapper_test/test_avect_wrapper.pf deleted file mode 100644 index 4558c20eccb..00000000000 --- a/src/drivers/mct/unit_test/avect_wrapper_test/test_avect_wrapper.pf +++ /dev/null @@ -1,87 +0,0 @@ -module test_avect_wrapper - - ! Tests of avect_wrapper_mod, a module with some unit test utilities - - use pfunit_mod - use avect_wrapper_mod - use mct_mod - use mct_wrapper_mod, only : mct_init, mct_clean - use shr_kind_mod, only : r8 => shr_kind_r8 - - implicit none - - @TestCase - type, extends(TestCase) :: TestCreateAvect - type(mct_aVect) :: av - contains - procedure :: setUp - procedure :: tearDown - end type TestCreateAvect - -contains - - subroutine setUp(this) - class(TestCreateAvect), intent(inout) :: this - - call mct_init() - end subroutine setUp - - subroutine tearDown(this) - class(TestCreateAvect), intent(inout) :: this - - call mct_aVect_clean(this%av) - call mct_clean() - end subroutine tearDown - - @Test - subroutine createAVectWithoutData_1Field_checkField(this) - class(TestCreateAvect), intent(inout) :: this - character(len=*), parameter :: attr_tag = 'foo' - integer, parameter :: lsize = 5 ! not important for this test - character(len=64) :: actual_rlist - - call create_aVect_without_data(this%av, [attr_tag], lsize) - - actual_rlist = mct_aVect_exportRList2c(this%av) - @assertEqual('foo', trim(actual_rlist)) - end subroutine createAVectWithoutData_1Field_checkField - - @Test - subroutine createAVectWithoutData_3Field_checkFields(this) - class(TestCreateAvect), intent(inout) :: this - character(len=*), parameter :: attr_tag1 = 'foo1' - character(len=*), parameter :: attr_tag2 = 'foo2' - character(len=*), parameter :: attr_tag3 = 'bar ' - character(len=*), parameter :: expected_rlist = 'foo1:foo2:bar' - integer, parameter :: lsize = 5 ! not important for this test - character(len=64) :: actual_rlist - - call create_aVect_without_data(this%av, [attr_tag1, attr_tag2, attr_tag3], lsize) - - actual_rlist = mct_aVect_exportRList2c(this%av) - @assertEqual(expected_rlist, actual_rlist) - end subroutine createAVectWithoutData_3Field_checkFields - - @Test - subroutine createAvectWithData_2Fields_checkData(this) - class(TestCreateAvect), intent(inout) :: this - integer, parameter :: lsize = 3 - ! note that the two attributes have different trimmed length - character(len=4), parameter :: attr_tag1 = 'foo' - character(len=4), parameter :: attr_tag2 = 'bar2' - real(r8), parameter :: data1(lsize) = [1._r8, 2._r8, 3._r8] - real(r8), parameter :: data2(lsize) = [11._r8, 12._r8, 13._r8] - real(r8), allocatable :: actual_data1(:), actual_data2(:) - - call create_aVect_with_data_rows_are_points(this%av, & - attr_tags = [attr_tag1, attr_tag2], & - data = reshape([data1, data2], [lsize, 2])) - - actual_data1 = aVect_exportRattr(this%av, attr_tag1) - @assertEqual(data1, actual_data1) - actual_data2 = aVect_exportRattr(this%av, attr_tag2) - @assertEqual(data2, actual_data2) - - end subroutine createAvectWithData_2Fields_checkData - -end module test_avect_wrapper diff --git a/src/drivers/mct/unit_test/check_fields_test/CMakeLists.txt b/src/drivers/mct/unit_test/check_fields_test/CMakeLists.txt deleted file mode 100644 index 1c6aeefb79d..00000000000 --- a/src/drivers/mct/unit_test/check_fields_test/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -create_pFUnit_test(check_fields check_fields_exe - "test_check_fields.pf" "") - -target_link_libraries(check_fields_exe ${DRV_UNIT_TEST_LIBS}) diff --git a/src/drivers/mct/unit_test/check_fields_test/test_check_fields.pf b/src/drivers/mct/unit_test/check_fields_test/test_check_fields.pf deleted file mode 100644 index 772d22ec0d5..00000000000 --- a/src/drivers/mct/unit_test/check_fields_test/test_check_fields.pf +++ /dev/null @@ -1,99 +0,0 @@ -module test_check_fields - - ! Tests of check_fields in the component_type_mod, check_fields looks for NaN values - ! in fields passed from components to the coupler - - use pfunit_mod - use component_type_mod - use mct_mod - use mct_wrapper_mod, only : mct_init, mct_clean - use avect_wrapper_mod - use create_mapper_mod - use shr_kind_mod, only : r8 => shr_kind_r8 - use shr_infnan_mod, only : shr_infnan_nan, assignment(=) - implicit none - - @TestCase - type, extends(TestCase) :: TestCheckFields - type(component_type) :: comp - contains - procedure :: setUp - procedure :: tearDown - end type TestCheckFields - -contains - - subroutine setUp(this) - class(TestCheckFields), intent(inout) :: this - - call mct_init() - end subroutine setUp - - subroutine tearDown(this) - class(TestCheckFields), intent(inout) :: this - call mct_aVect_clean(this%comp%c2x_cc) - call mct_clean() - end subroutine tearDown - - @Test - subroutine createAVectWithoutData_1Field_checkField(this) - class(TestCheckFields), intent(inout) :: this - character(len=*), parameter :: attr_tag = 'foo' - integer, parameter :: lsize = 5 ! - character(len=64) :: actual_rlist - real(r8) :: nan - - nan = shr_infnan_nan - if(.not. associated(this%comp%c2x_cc)) allocate(this%comp%c2x_cc) - call create_aVect_without_data(this%comp%c2x_cc, [attr_tag], lsize) - - actual_rlist = mct_aVect_exportRList2c(this%comp%c2x_cc) - @assertEqual('foo', trim(actual_rlist)) - - this%comp%c2x_cc%rattr(1,3) = nan - - this%comp%name = 'pfunittest' - - if(.not. associated(this%comp%gsmap_cc)) allocate(this%comp%gsmap_cc) - - call create_gsmap(this%comp%gsmap_cc, lsize) - - call check_fields(this%comp, 1) - @assertExceptionRaised('ABORTED: component_mod:check_fields NaN found in pfunittest instance: 1 field foo 1d global index: 3') - - end subroutine createAVectWithoutData_1Field_checkField - - @Test - subroutine createAVectWithoutData_3Field_checkFields(this) - class(TestCheckFields), intent(inout) :: this - character(len=*), parameter :: attr_tag1 = 'foo1' - character(len=*), parameter :: attr_tag2 = 'foo2' - character(len=*), parameter :: attr_tag3 = 'bar ' - character(len=*), parameter :: expected_rlist = 'foo1:foo2:bar' - integer, parameter :: lsize = 5 ! not important for this test - character(len=64) :: actual_rlist - real(r8) :: nan - - nan = shr_infnan_nan - - this%comp%name = 'pfunittest' - - if(.not. associated(this%comp%c2x_cc)) allocate(this%comp%c2x_cc) - - call create_aVect_without_data(this%comp%c2x_cc, [attr_tag1, attr_tag2, attr_tag3], lsize) - - actual_rlist = mct_aVect_exportRList2c(this%comp%c2x_cc) - @assertEqual(expected_rlist, actual_rlist) - - if(.not. associated(this%comp%gsmap_cc)) allocate(this%comp%gsmap_cc) - - this%comp%c2x_cc%rattr(2,3) = nan - - call create_gsmap(this%comp%gsmap_cc, lsize) - - call check_fields(this%comp, 1) - - @assertExceptionRaised('ABORTED: component_mod:check_fields NaN found in pfunittest instance: 1 field foo2 1d global index: 3') - end subroutine createAVectWithoutData_3Field_checkFields - -end module test_check_fields diff --git a/src/drivers/mct/unit_test/glc_elevclass_test/CMakeLists.txt b/src/drivers/mct/unit_test/glc_elevclass_test/CMakeLists.txt deleted file mode 100644 index 25831e414e0..00000000000 --- a/src/drivers/mct/unit_test/glc_elevclass_test/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -create_pFUnit_test(glc_elevclass glc_elevclass_exe - "test_glc_elevclass.pf" "") - -target_link_libraries(glc_elevclass_exe ${DRV_UNIT_TEST_LIBS}) diff --git a/src/drivers/mct/unit_test/glc_elevclass_test/test_glc_elevclass.pf b/src/drivers/mct/unit_test/glc_elevclass_test/test_glc_elevclass.pf deleted file mode 100644 index f0f8ebed22a..00000000000 --- a/src/drivers/mct/unit_test/glc_elevclass_test/test_glc_elevclass.pf +++ /dev/null @@ -1,286 +0,0 @@ -module test_glc_elevclass - - ! Tests of glc_elevclass_mod - - use pfunit_mod - use glc_elevclass_mod - use shr_kind_mod, only : r8 => shr_kind_r8 - - implicit none - - @TestCase - type, extends(TestCase) :: TestGLCElevclass - contains - procedure :: setUp - procedure :: tearDown - end type TestGLCElevclass - -contains - - subroutine setUp(this) - class(TestGLCElevclass), intent(inout) :: this - end subroutine setUp - - subroutine tearDown(this) - class(TestGLCElevclass), intent(inout) :: this - - call glc_elevclass_clean() - end subroutine tearDown - - ! ------------------------------------------------------------------------ - ! Tests of glc_elevclass_init - ! ------------------------------------------------------------------------ - - @Test - subroutine test_init_with_0ECs(this) - class(TestGLCElevclass), intent(inout) :: this - integer :: num_elevation_classes - - call glc_elevclass_init(0) - - num_elevation_classes = glc_get_num_elevation_classes() - @assertEqual(0, num_elevation_classes) - end subroutine test_init_with_0ECs - - @Test - subroutine test_init_with_1EC(this) - class(TestGLCElevclass), intent(inout) :: this - integer :: num_elevation_classes - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(1) - - num_elevation_classes = glc_get_num_elevation_classes() - @assertEqual(1, num_elevation_classes) - call glc_get_elevation_class(9999._r8, elevation_class, err_code) - @assertEqual(1, elevation_class) - end subroutine test_init_with_1EC - - @Test - subroutine test_init_with_10ECs(this) - class(TestGLCElevclass), intent(inout) :: this - integer :: num_elevation_classes - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(10) - - num_elevation_classes = glc_get_num_elevation_classes() - @assertEqual(10, num_elevation_classes) - call glc_get_elevation_class(9999._r8, elevation_class, err_code) - @assertEqual(10, elevation_class) - end subroutine test_init_with_10ECs - - ! ------------------------------------------------------------------------ - ! Tests of glc_get_elevation_class - ! ------------------------------------------------------------------------ - - @Test - subroutine test_glc_get_elevation_class_lowest(this) - ! Test an elevation in the lowest elevation class - class(TestGLCElevclass), intent(inout) :: this - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 300._r8]) - - call glc_get_elevation_class(1._r8, elevation_class, err_code) - @assertEqual(1, elevation_class) - end subroutine test_glc_get_elevation_class_lowest - - @Test - subroutine test_glc_get_elevation_class_mid(this) - ! Test an elevation in a middle elevation class - class(TestGLCElevclass), intent(inout) :: this - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 300._r8]) - - call glc_get_elevation_class(150._r8, elevation_class, err_code) - @assertEqual(2, elevation_class) - end subroutine test_glc_get_elevation_class_mid - - @Test - subroutine test_glc_get_elevation_class_highest(this) - ! Test an elevation in the highest elevation class - class(TestGLCElevclass), intent(inout) :: this - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 300._r8]) - - call glc_get_elevation_class(250._r8, elevation_class, err_code) - @assertEqual(3, elevation_class) - end subroutine test_glc_get_elevation_class_highest - - - ! Test glc_get_elevation_class error return values (one test for each possibility) - - @Test - subroutine test_glc_get_elevation_class_err_none(this) - class(TestGLCElevclass), intent(inout) :: this - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 300._r8]) - - call glc_get_elevation_class(1._r8, elevation_class, err_code) - @assertEqual(GLC_ELEVCLASS_ERR_NONE, err_code) - end subroutine test_glc_get_elevation_class_err_none - - @Test - subroutine test_glc_get_elevation_class_err_low(this) - class(TestGLCElevclass), intent(inout) :: this - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 300._r8]) - - call glc_get_elevation_class(-1._r8, elevation_class, err_code) - @assertEqual(GLC_ELEVCLASS_ERR_TOO_LOW, err_code) - @assertEqual(1, elevation_class) - end subroutine test_glc_get_elevation_class_err_low - - @Test - subroutine test_glc_get_elevation_class_err_high(this) - class(TestGLCElevclass), intent(inout) :: this - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 300._r8]) - - call glc_get_elevation_class(301._r8, elevation_class, err_code) - @assertEqual(GLC_ELEVCLASS_ERR_TOO_HIGH, err_code) - @assertEqual(3, elevation_class) - end subroutine test_glc_get_elevation_class_err_high - - @Test - subroutine test_glc_get_elevation_class_err_undefined(this) - class(TestGLCElevclass), intent(inout) :: this - integer :: elevation_class - integer :: err_code - - call glc_elevclass_init(0) - - call glc_get_elevation_class(1._r8, elevation_class, err_code) - @assertEqual(GLC_ELEVCLASS_ERR_UNDEFINED, err_code) - @assertEqual(0, elevation_class) - end subroutine test_glc_get_elevation_class_err_undefined - - ! ------------------------------------------------------------------------ - ! Tests of glc_mean_elevation_virtual - ! ------------------------------------------------------------------------ - - @Test - subroutine test_glc_mean_elevation_virtual_EC0(this) - class(TestGLCElevclass), intent(inout) :: this - real(r8) :: mean_elevation - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 300._r8]) - mean_elevation = glc_mean_elevation_virtual(0) - @assertEqual(0._r8, mean_elevation) - end subroutine test_glc_mean_elevation_virtual_EC0 - - @Test - subroutine test_glc_mean_elevation_virtual_EC_mid(this) - ! Tests an elevation class in the middle of the range (normal case) - class(TestGLCElevclass), intent(inout) :: this - real(r8) :: mean_elevation - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 300._r8]) - mean_elevation = glc_mean_elevation_virtual(2) - @assertEqual(150._r8, mean_elevation) - end subroutine test_glc_mean_elevation_virtual_EC_mid - - @Test - subroutine test_glc_mean_elevation_virtual_EC_top(this) - ! Tests an elevation class at the top of the range - class(TestGLCElevclass), intent(inout) :: this - real(r8) :: mean_elevation - - call glc_elevclass_init(3, [0._r8, 100._r8, 200._r8, 1000._r8]) - mean_elevation = glc_mean_elevation_virtual(3) - @assertEqual(300._r8, mean_elevation) - end subroutine test_glc_mean_elevation_virtual_EC_top - - @Test - subroutine test_glc_mean_elevation_virtual_EC_oneEC(this) - ! Tests a single elevation class - class(TestGLCElevclass), intent(inout) :: this - real(r8) :: mean_elevation - - call glc_elevclass_init(1) - mean_elevation = glc_mean_elevation_virtual(1) - @assertEqual(1000._r8, mean_elevation) - end subroutine test_glc_mean_elevation_virtual_EC_oneEC - - ! ------------------------------------------------------------------------ - ! Tests of glc_elevclass_as_string - ! ------------------------------------------------------------------------ - - @Test - subroutine test_glc_elevclass_as_string_0(this) - class(TestGLCElevclass), intent(inout) :: this - character(len=GLC_ELEVCLASS_STRLEN) :: str - - str = glc_elevclass_as_string(0) - @assertEqual('00', trim(str)) - end subroutine test_glc_elevclass_as_string_0 - - @Test - subroutine test_glc_elevclass_as_string_1digit(this) - class(TestGLCElevclass), intent(inout) :: this - character(len=GLC_ELEVCLASS_STRLEN) :: str - - str = glc_elevclass_as_string(2) - @assertEqual('02', trim(str)) - end subroutine test_glc_elevclass_as_string_1digit - - @Test - subroutine test_glc_elevclass_as_string_2digits(this) - class(TestGLCElevclass), intent(inout) :: this - character(len=GLC_ELEVCLASS_STRLEN) :: str - - str = glc_elevclass_as_string(12) - @assertEqual('12', trim(str)) - end subroutine test_glc_elevclass_as_string_2digits - - ! ------------------------------------------------------------------------ - ! Tests of glc_all_elevclass_strings - ! ------------------------------------------------------------------------ - - @Test - subroutine test_glc_all_elevclass_strings(this) - class(TestGLCElevclass), intent(inout) :: this - character(len=GLC_ELEVCLASS_STRLEN), allocatable :: elevclass_strings(:) - - call glc_elevclass_init(3) - elevclass_strings = glc_all_elevclass_strings() - - @assertEqual(3, size(elevclass_strings)) - ! There doesn't seem to be an assertEqual method for an array of strings - @assertEqual('01', elevclass_strings(1)) - @assertEqual('02', elevclass_strings(2)) - @assertEqual('03', elevclass_strings(3)) - end subroutine test_glc_all_elevclass_strings - - @Test - subroutine test_glc_all_elevclass_strings_include_zero(this) - class(TestGLCElevclass), intent(inout) :: this - character(len=GLC_ELEVCLASS_STRLEN), allocatable :: elevclass_strings(:) - - call glc_elevclass_init(3) - elevclass_strings = glc_all_elevclass_strings(include_zero=.true.) - - @assertEqual(4, size(elevclass_strings)) - ! There doesn't seem to be an assertEqual method for an array of strings - @assertEqual('00', elevclass_strings(1)) - @assertEqual('01', elevclass_strings(2)) - @assertEqual('02', elevclass_strings(3)) - @assertEqual('03', elevclass_strings(4)) - end subroutine test_glc_all_elevclass_strings_include_zero - - -end module test_glc_elevclass diff --git a/src/drivers/mct/unit_test/map_glc2lnd_test/CMakeLists.txt b/src/drivers/mct/unit_test/map_glc2lnd_test/CMakeLists.txt deleted file mode 100644 index a35e066321d..00000000000 --- a/src/drivers/mct/unit_test/map_glc2lnd_test/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -set (pfunit_sources - test_map_glc2lnd.pf - ) - -create_pFUnit_test(map_glc2lnd map_glc2lnd_exe - "${pfunit_sources}" "") - -target_link_libraries(map_glc2lnd_exe ${DRV_UNIT_TEST_LIBS}) diff --git a/src/drivers/mct/unit_test/map_glc2lnd_test/test_map_glc2lnd.pf b/src/drivers/mct/unit_test/map_glc2lnd_test/test_map_glc2lnd.pf deleted file mode 100644 index f48cccac7e0..00000000000 --- a/src/drivers/mct/unit_test/map_glc2lnd_test/test_map_glc2lnd.pf +++ /dev/null @@ -1,985 +0,0 @@ -module test_map_glc2lnd - - ! Tests of map_glc2lnd_mod - - use pfunit_mod - use map_glc2lnd_mod - use glc_elevclass_mod, only : glc_elevclass_init, glc_elevclass_clean, & - glc_mean_elevation_virtual, glc_elevclass_as_string - use mct_mod, only : mct_aVect, mct_aVect_clean, mct_aVect_lsize - use seq_map_type_mod, only : seq_map - use mct_wrapper_mod, only : mct_init, mct_clean - use avect_wrapper_mod - use simple_map_mod - use create_mapper_mod - use shr_kind_mod, only : r8 => shr_kind_r8 - - implicit none - - real(r8), parameter :: tol = 1.e-11_r8 - - integer, parameter :: n_elev_classes = 3 - - ! Assume 3 elevation classes, with boundaries of: - ! (1) 0 - 100 m - ! (2) 100 - 200 m - ! (3) 200 - 1000 m - real(r8), parameter :: elev_class_boundaries(0:n_elev_classes) = & - [0._r8, 100._r8, 200._r8, 1000._r8] - - ! This type holds data for a single field in a single land grid cell - type :: lnd_field_type - ! Index 0 is bare land - real(r8) :: data(0:n_elev_classes) - end type lnd_field_type - - @TestCase - type, extends(TestCase) :: TestMapGlc2lnd - type(seq_map) :: mapper - type(mct_aVect) :: data_g ! data on the GLC (source) grid - type(mct_aVect) :: data_l ! data on the LND (destination) GRID - contains - procedure :: setUp - procedure :: tearDown - procedure :: setup_inputs - procedure :: setup_inputs_lnd_fully_outside_static_glc_domain - procedure :: setup_inputs_lnd_partially_outside_static_glc_domain - procedure :: setup_inputs_lnd_fully_outside_dynamic_glc_domain - procedure :: setup_inputs_lnd_partially_outside_dynamic_glc_domain - procedure :: run_map_glc2lnd_ec - procedure :: verify_data_l_field - end type TestMapGlc2lnd - -contains - - ! ======================================================================== - ! Utility routines - ! ======================================================================== - - subroutine setUp(this) - class(TestMapGlc2lnd), intent(inout) :: this - - call mct_init() - - end subroutine setUp - - subroutine tearDown(this) - class(TestMapGlc2lnd), intent(inout) :: this - - call clean_mapper(this%mapper) - call mct_aVect_clean(this%data_l) - call mct_aVect_clean(this%data_g) - call glc_elevclass_clean() - call mct_clean() - end subroutine tearDown - - subroutine setup_inputs(this, frac_glc, topo_glc, my_map, data_glc, icemask_glc) - ! This utility function sets up inputs that are needed for the map_glc2lnd_ec call - class(TestMapGlc2lnd), intent(inout) :: this - real(r8), intent(in) :: frac_glc(:) ! ice fraction in each glc cell - real(r8), intent(in) :: topo_glc(:) ! ice topographic height in each glc cell - type(simple_map_type), intent(in) :: my_map ! mapping information from glc to land - - ! Optional extra data field on the glc grid, put in field named 'data' (if not - ! present, the 'data' field is filled with all 0's). - real(r8), intent(in), optional :: data_glc(:) - - ! Optional ice mask on the glc grid. If not present, it is filled with all 1's - real(r8), intent(in), optional :: icemask_glc(:) - - real(r8), allocatable :: l_data_glc(:) ! local version of data_glc - real(r8), allocatable :: l_icemask_glc(:) ! local version of icemask_glc - integer :: npts_glc - integer :: npts_lnd - - ! ------------------------------------------------------------------------ - ! Do some initial error-checking to make sure this routine is being called properly - ! ------------------------------------------------------------------------ - - npts_glc = size(frac_glc) - @assertEqual(npts_glc, size(topo_glc)) - @assertEqual(npts_glc, my_map%get_n_source_points()) - if (present(data_glc)) then - @assertEqual(npts_glc, size(data_glc)) - end if - - ! ------------------------------------------------------------------------ - ! Set optional variables - ! ------------------------------------------------------------------------ - - if (present(data_glc)) then - l_data_glc = data_glc - else - allocate(l_data_glc(npts_glc)) - l_data_glc(:) = 0._r8 - end if - - if (present(icemask_glc)) then - l_icemask_glc = icemask_glc - else - allocate(l_icemask_glc(npts_glc)) - l_icemask_glc(:) = 1._r8 - end if - - ! ------------------------------------------------------------------------ - ! Setup - ! ------------------------------------------------------------------------ - - call glc_elevclass_init(n_elev_classes, elev_class_boundaries) - - call create_aVect_with_data_rows_are_points(this%data_g, & - attr_tags = ['Sg_ice_covered', 'Sg_topo ', 'Sg_icemask ', 'data '], & - data = reshape([frac_glc, topo_glc, l_icemask_glc, l_data_glc], [npts_glc, 4])) - - npts_lnd = my_map%get_n_dest_points() - ! The following assumes that n_elev_classes is 3: - call create_aVect_without_data(this%data_l, lsize = npts_lnd, & - attr_tags = ['Sg_ice_covered00', 'Sg_ice_covered01', 'Sg_ice_covered02', 'Sg_ice_covered03', & - 'Sg_topo00 ', 'Sg_topo01 ', 'Sg_topo02 ', 'Sg_topo03 ', & - 'data00 ', 'data01 ', 'data02 ', 'data03 ']) - - call create_mapper(this%mapper, my_map) - - end subroutine setup_inputs - - subroutine setup_inputs_lnd_fully_outside_static_glc_domain(this, frac_glc, topo_glc, data_glc) - ! Calls setup_inputs with a domain that has 2 lnd cells, 1 glc cell. - ! - ! The lnd cell of interest (#1) is fully outside the static glc domain (i.e., there is - ! no overlap in the mapping weights). The intention is that lnd cell #2 will be - ! ignored in verification; it is only included so that we can set up a non-null map - ! (since simple_map_type won't let you include mapping weights of 0). (Lnd cell #2 - ! can be ignored by setting first_lnd_index_to_verify=1, last_lnd_index_to_verify=1 - ! in the call to verify_data_l_field.) - class(TestMapGlc2lnd), intent(inout) :: this - real(r8), intent(in) :: frac_glc ! frac in the single glc cell - real(r8), intent(in) :: topo_glc ! topo in the single glc cell - real(r8), intent(in), optional :: data_glc ! data in the single glc cell - - real(r8) :: l_data_glc ! local version of data_glc - type(simple_map_type) :: my_map - - l_data_glc = 0._r8 - if (present(data_glc)) then - l_data_glc = data_glc - end if - - my_map = simple_map_type( & - source_indices = [1], & - dest_indices = [2], & - overlap_weights = [1._r8]) - - call this%setup_inputs([frac_glc], [topo_glc], my_map, data_glc = [l_data_glc]) - end subroutine setup_inputs_lnd_fully_outside_static_glc_domain - - subroutine setup_inputs_lnd_partially_outside_static_glc_domain(this, frac_glc, & - topo_glc, data_glc) - ! Calls setup_inputs with a domain that has 1 lnd cell, 1 glc cell. - ! - ! The lnd cell is partially outside the static glc domain, with an overlap of 0.25. - class(TestMapGlc2lnd), intent(inout) :: this - real(r8), intent(in) :: frac_glc ! frac in the single glc cell - real(r8), intent(in) :: topo_glc ! topo in the single glc cell - real(r8), intent(in), optional :: data_glc ! data in the single glc cell - - real(r8) :: l_data_glc ! local version of data_glc - type(simple_map_type) :: my_map - - l_data_glc = 0._r8 - if (present(data_glc)) then - l_data_glc = data_glc - end if - - my_map = simple_map_type( & - source_indices = [1], & - dest_indices = [1], & - overlap_weights = [0.25_r8]) - - call this%setup_inputs([frac_glc], [topo_glc], my_map, data_glc = [l_data_glc]) - end subroutine setup_inputs_lnd_partially_outside_static_glc_domain - - subroutine setup_inputs_lnd_fully_outside_dynamic_glc_domain(this, frac_glc, topo_glc, & - data_glc) - ! Calls setup_inputs with a domain that has 1 lnd cell, 1 glc cell. - ! - ! The lnd cell is entirely within the static glc domain (defined by the mapping - ! file), but entirely outside the dynamic domain (defined by the icemask field). - class(TestMapGlc2lnd), intent(inout) :: this - real(r8), intent(in) :: frac_glc ! frac in the single glc cell - real(r8), intent(in) :: topo_glc ! topo in the single glc cell - real(r8), intent(in), optional :: data_glc ! data in the single glc cell - - real(r8) :: l_data_glc ! local version of data_glc - type(simple_map_type) :: my_map - - l_data_glc = 0._r8 - if (present(data_glc)) then - l_data_glc = data_glc - end if - - my_map = create_simple_map_with_one_source(ndest = 1) - - call this%setup_inputs([frac_glc], [topo_glc], my_map, data_glc = [l_data_glc], & - icemask_glc = [0._r8]) - end subroutine setup_inputs_lnd_fully_outside_dynamic_glc_domain - - subroutine setup_inputs_lnd_partially_outside_dynamic_glc_domain(this, & - frac_glc_in_domain, frac_glc_outside_domain, & - topo_glc_in_domain, topo_glc_outside_domain, & - data_glc_in_domain, data_glc_outside_domain) - ! Calls setup_inputs with a domain that has 1 lnd cell, 2 glc cells. - ! - ! The lnd cell is entirely within the static glc domain (defined by the mapping - ! file), but partially outside the dynamic domain (defined by the icemask field). - ! - ! Specifically: - ! - glc cell 1 (arguments with '_in_domain' suffix) has icemask = 1, overlap = 0.75. - ! - glc cell 2 (arguments with '_outside_domain' suffix) has icemask = 0, overlap = 0.25. - class(TestMapGlc2lnd), intent(inout) :: this - real(r8), intent(in) :: frac_glc_in_domain ! frac in the glc cell with icemask = 1 - real(r8), intent(in) :: frac_glc_outside_domain ! frac in the glc cell with icemask = 0 - real(r8), intent(in) :: topo_glc_in_domain ! topo in the glc cell with icemask = 1 - real(r8), intent(in) :: topo_glc_outside_domain ! topo in the glc cell with icemask = 0 - real(r8), intent(in), optional :: data_glc_in_domain ! data in the glc cell with icemask = 1 - real(r8), intent(in), optional :: data_glc_outside_domain ! data in the glc cell with icemask = 0 - - real(r8) :: l_data_glc_in_domain ! local version of data_glc_in_domain - real(r8) :: l_data_glc_outside_domain ! local version of data_glc_outside_domain - type(simple_map_type) :: my_map - - l_data_glc_in_domain = 0._r8 - if (present(data_glc_in_domain)) then - l_data_glc_in_domain = data_glc_in_domain - end if - l_data_glc_outside_domain = 0._r8 - if (present(data_glc_outside_domain)) then - l_data_glc_outside_domain = data_glc_outside_domain - end if - - my_map = simple_map_type( & - source_indices = [1, 2], & - dest_indices = [1, 1], & - overlap_weights = [0.75_r8, 0.25_r8]) - - call this%setup_inputs( & - [frac_glc_in_domain, frac_glc_outside_domain], & - [topo_glc_in_domain, topo_glc_outside_domain], & - my_map, & - data_glc = [l_data_glc_in_domain, l_data_glc_outside_domain], & - icemask_glc = [1._r8, 0._r8]) - end subroutine setup_inputs_lnd_partially_outside_dynamic_glc_domain - - subroutine run_map_glc2lnd_ec(this, extra_fields) - ! This utility function wraps the call to the map_glc2lnd_ec routine - class(TestMapGlc2lnd), intent(inout) :: this - character(len=*), intent(in), optional :: extra_fields ! extra fields to map - - character(len=:), allocatable :: l_extra_fields ! local version of extra_fields - - l_extra_fields = ' ' - if (present(extra_fields)) then - l_extra_fields = extra_fields - end if - - call map_glc2lnd_ec(g2x_g = this%data_g, & - frac_field = 'Sg_ice_covered', topo_field = 'Sg_topo', icemask_field = 'Sg_icemask', & - extra_fields = l_extra_fields, & - mapper = this%mapper, g2x_l = this%data_l) - - end subroutine run_map_glc2lnd_ec - - subroutine verify_data_l_field(this, fieldname, expected_lnd, message, & - first_lnd_index_to_verify, last_lnd_index_to_verify) - ! Verify one field on the land grid - class(TestMapGlc2lnd), intent(in) :: this - character(len=*), intent(in) :: fieldname ! base name of field (no elev class suffix) - type(lnd_field_type), intent(in) :: expected_lnd(:) - character(len=*), intent(in) :: message - - ! Specify the following if you only want to verify a subset of the total land points. - ! You must either specify both or neither of these. - ! If these are specified, then expected_lnd should be of size - ! (last_lnd_index_to_verify - first_lnd_index_to_verify + 1) - integer, intent(in), optional :: first_lnd_index_to_verify - integer, intent(in), optional :: last_lnd_index_to_verify - - integer :: first, last - integer :: n - logical :: args_okay - character(len=:), allocatable :: ec_string - character(len=:), allocatable :: fieldname_ec - character(len=:), allocatable :: full_message - real(r8), allocatable :: actual_lnd_this_ec(:) - - ! Handle optional arguments - - args_okay = .false. - if (.not. present(first_lnd_index_to_verify) .and. & - .not. present(last_lnd_index_to_verify)) then - first = 1 - last = mct_aVect_lsize(this%data_l) - @assertEqual(size(expected_lnd), last, message=message//': number of points') - args_okay = .true. - else if (present(first_lnd_index_to_verify) .and. & - present(last_lnd_index_to_verify)) then - first = first_lnd_index_to_verify - last = last_lnd_index_to_verify - @assertEqual(size(expected_lnd), last-first+1, message=message//': number of points') - args_okay = .true. - end if - @assertTrue(args_okay, message=message//': optional arguments') - - ! Do the verification - do n = 0, n_elev_classes - ec_string = glc_elevclass_as_string(n) - fieldname_ec = fieldname // ec_string - actual_lnd_this_ec = aVect_exportRattr(this%data_l, fieldname_ec) - full_message = message//': elevation class ' // ec_string - @assertEqual(expected_lnd(:)%data(n), actual_lnd_this_ec(first:last), message=full_message, tolerance=tol) - end do - end subroutine verify_data_l_field - - subroutine set_topo_to_mean_elevation_virtual(topo_data) - ! Sets topo_data to glc_mean_elevation_virtual for all grid cells and all elevation - ! classes - type(lnd_field_type), intent(out) :: topo_data(:) - - integer :: n - - do n = 0, n_elev_classes - topo_data(:)%data(n) = glc_mean_elevation_virtual(n) - end do - end subroutine set_topo_to_mean_elevation_virtual - - ! ======================================================================== - ! Actual tests - ! ======================================================================== - - ! ------------------------------------------------------------------------ - ! Tests of mapped ice fraction - ! ------------------------------------------------------------------------ - - @Test - subroutine test_mapGlc2lndEC_frac_with_EC0(this) - ! Do a test of the map_glc2lnd_ec routine with only an elevation class 0 source - ! point. Check the mapped frac. - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nlnd = 1 - integer, parameter :: nglc = 1 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - type(lnd_field_type) :: frac_lnd_expected(nlnd) - type(simple_map_type) :: my_map - real(r8), parameter :: irrelevant_topo = 125._r8 ! irrelevant for this test - - ! Setup - - my_map = create_simple_map_with_one_source(ndest = nlnd) - - frac_glc(1) = 0._r8 - topo_glc(1) = irrelevant_topo - - call this%setup_inputs(frac_glc, topo_glc, my_map) - - ! Exercise - - call this%run_map_glc2lnd_ec() - - ! Verify - - frac_lnd_expected(1)%data(:) = 0._r8 - frac_lnd_expected(1)%data(0) = 1._r8 - - call this%verify_data_l_field(fieldname='Sg_ice_covered', expected_lnd=frac_lnd_expected, & - message = 'test_mapGlc2lndEC_frac_with_EC0: Sg_ice_covered') - end subroutine test_mapGlc2lndEC_frac_with_EC0 - - @Test - subroutine test_mapGlc2lndEC_frac_with_EC2(this) - ! Do a test of the map_glc2lnd_ec routine with only an elevation class 2 source - ! point. Check the mapped frac. - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nlnd = 1 - integer, parameter :: nglc = 1 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - type(lnd_field_type) :: frac_lnd_expected(nlnd) - type(simple_map_type) :: my_map - - ! Setup - - my_map = create_simple_map_with_one_source(ndest = nlnd) - - frac_glc(1) = 1._r8 - topo_glc(1) = 125._r8 - - call this%setup_inputs(frac_glc, topo_glc, my_map) - - ! Exercise - - call this%run_map_glc2lnd_ec() - - ! Verify - - frac_lnd_expected(1)%data(:) = 0._r8 - frac_lnd_expected(1)%data(2) = 1._r8 - - call this%verify_data_l_field(fieldname='Sg_ice_covered', expected_lnd=frac_lnd_expected, & - message = 'test_mapGlc2lndEC_frac_with_EC2: Sg_ice_covered') - end subroutine test_mapGlc2lndEC_frac_with_EC2 - - @Test - subroutine test_mapGlc2lndEC_frac_with_allECs(this) - ! Do a test of the map_glc2lnd_ec routine with source points from each elevation - ! class. Check the mapped frac. - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nlnd = 1 - integer, parameter :: nglc = 4 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - type(lnd_field_type) :: frac_lnd_expected(nlnd) - type(simple_map_type) :: my_map - real(r8), parameter :: irrelevant_topo = 125._r8 - - ! Setup - - frac_glc(:) = [1._r8, 1._r8, 1._r8, 0._r8] - topo_glc(:) = [225._r8, 125._r8, 25._r8, irrelevant_topo] - - my_map = simple_map_type( & - source_indices = [1, 2, 3, 4], & - dest_indices = [1, 1, 1, 1], & - overlap_weights = [0.4_r8, 0.3_r8, 0.2_r8, 0.1_r8]) - - call this%setup_inputs(frac_glc, topo_glc, my_map) - - ! Exercise - - call this%run_map_glc2lnd_ec() - - ! Verify - - frac_lnd_expected(1)%data(:) = [0.1_r8, 0.2_r8, 0.3_r8, 0.4_r8] - - call this%verify_data_l_field(fieldname='Sg_ice_covered', expected_lnd=frac_lnd_expected, & - message = 'test_mapGlc2lndEC_frac_with_allECs: Sg_ice_covered') - end subroutine test_mapGlc2lndEC_frac_with_allECs - - @Test - subroutine test_mapGlc2lndEC_frac_fully_outside_static_domain(this) - ! Test mapped fraction with a land cell that is fully outside the static GLC domain - ! (i.e., mapping weight is 0). - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: frac_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_fully_outside_static_glc_domain( & - frac_glc = 1._r8, topo_glc = 125._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - frac_lnd_expected(1)%data(:) = 0._r8 - call this%verify_data_l_field(fieldname='Sg_ice_covered', expected_lnd=frac_lnd_expected, & - message = 'test_mapGlc2lndEC_frac_fully_outside_static_domain: Sg_ice_covered', & - first_lnd_index_to_verify = 1, last_lnd_index_to_verify = 1) - end subroutine test_mapGlc2lndEC_frac_fully_outside_static_domain - - @Test - subroutine test_mapGlc2lndEC_frac_partially_outside_static_domain(this) - ! Test mapped fraction with a land cell that is partially outside the static GLC domain - ! (i.e., mapping weight is 0). - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: frac_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_partially_outside_static_glc_domain( & - frac_glc = 1._r8, topo_glc = 125._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - ! Note that we expect the land cell to end up fully covered with ice, despite being - ! partially outside the GLC domain. This is achieved by mapping with normalization. - frac_lnd_expected(1)%data(:) = [0._r8, 0._r8, 1._r8, 0._r8] - call this%verify_data_l_field(fieldname='Sg_ice_covered', expected_lnd=frac_lnd_expected, & - message = 'test_mapGlc2lndEC_frac_partially_outside_static_domain: Sg_ice_covered') - end subroutine test_mapGlc2lndEC_frac_partially_outside_static_domain - - @Test - subroutine test_mapGlc2lndEC_frac_fully_outside_dynamic_domain(this) - ! Test mapped fraction with a land cell that is fully outside the dynamic GLC domain - ! (i.e., icemask is 0) - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: frac_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_fully_outside_dynamic_glc_domain( & - frac_glc = 1._r8, topo_glc = 125._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - frac_lnd_expected(1)%data(:) = 0._r8 - call this%verify_data_l_field(fieldname='Sg_ice_covered', expected_lnd=frac_lnd_expected, & - message = 'test_mapGlc2lndEC_frac_fully_outside_dynamic_domain: Sg_ice_covered') - end subroutine test_mapGlc2lndEC_frac_fully_outside_dynamic_domain - - @Test - subroutine test_mapGlc2lndEC_frac_partially_outside_dynamic_domain_diffECs(this) - ! Test mapped fraction with a land cell that is partially outside the dynamic GLC - ! domain, with two GLC points in different elevation classes - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: frac_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_partially_outside_dynamic_glc_domain( & - frac_glc_in_domain = 1._r8, frac_glc_outside_domain = 1._r8, & - topo_glc_in_domain = 125._r8, topo_glc_outside_domain = 25._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - frac_lnd_expected(1)%data(:) = [0._r8, 0._r8, 1._r8, 0._r8] - call this%verify_data_l_field(fieldname='Sg_ice_covered', expected_lnd=frac_lnd_expected, & - message = 'test_mapGlc2lndEC_frac_partially_outside_dynamic_domain_diffECs: Sg_ice_covered') - end subroutine test_mapGlc2lndEC_frac_partially_outside_dynamic_domain_diffECs - - @Test - subroutine test_mapGlc2lndEC_frac_partially_outside_dynamic_domain_sameEC(this) - ! Test mapped fraction with a land cell that is partially outside the dynamic GLC - ! domain, with two GLC points in the same elevation class - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: frac_lnd_expected(1) - - ! Setup - ! Note that we are specifying fracs that are between 0 and 1. This situation - ! currently won't arise in practice, but using a fraction of 1 for both glc cells - ! doesn't have much testing power for this case: we couldn't tell if the point - ! outside the mask is being ignored or not. - call this%setup_inputs_lnd_partially_outside_dynamic_glc_domain( & - frac_glc_in_domain = 0.6_r8, frac_glc_outside_domain = 0.4_r8, & - topo_glc_in_domain = 125._r8, topo_glc_outside_domain = 125._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - frac_lnd_expected(1)%data(:) = [0.4_r8, 0._r8, 0.6_r8, 0._r8] - call this%verify_data_l_field(fieldname='Sg_ice_covered', expected_lnd=frac_lnd_expected, & - message = 'test_mapGlc2lndEC_frac_partially_outside_dynamic_domain_sameEC: Sg_ice_covered') - end subroutine test_mapGlc2lndEC_frac_partially_outside_dynamic_domain_sameEC - - ! ------------------------------------------------------------------------ - ! Tests of mapped topo - ! ------------------------------------------------------------------------ - - @Test - subroutine test_mapGlc2lndEC_topo_with_EC0(this) - ! Do a test of the map_glc2lnd_ec routine with only an elevation class 0 source - ! point. Check the mapped topo. - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nlnd = 1 - integer, parameter :: nglc = 1 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - type(simple_map_type) :: my_map - real(r8) :: topo_expected_ec0(nlnd) - real(r8), allocatable :: topo_actual_ec0(:) - - ! Setup - - my_map = create_simple_map_with_one_source(ndest = nlnd) - - frac_glc(1) = 0._r8 - topo_glc(1) = 125._r8 - - call this%setup_inputs(frac_glc, topo_glc, my_map) - - ! Exercise - - call this%run_map_glc2lnd_ec() - - ! Verify - - topo_expected_ec0(1) = topo_glc(1) - topo_actual_ec0 = aVect_exportRattr(this%data_l, 'Sg_topo00') - @assertEqual(topo_expected_ec0, topo_actual_ec0) - end subroutine test_mapGlc2lndEC_topo_with_EC0 - - @Test - subroutine test_mapGlc2lndEC_topo_with_EC2(this) - ! Do a test of the map_glc2lnd_ec routine with only an elevation class 2 source - ! point. Check the mapped topo. - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nlnd = 1 - integer, parameter :: nglc = 1 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - type(simple_map_type) :: my_map - real(r8) :: topo_expected_ec2(nlnd) - real(r8), allocatable :: topo_actual_ec2(:) - - ! Setup - - my_map = create_simple_map_with_one_source(ndest = nlnd) - - frac_glc(1) = 1._r8 - topo_glc(1) = 125._r8 - - call this%setup_inputs(frac_glc, topo_glc, my_map) - - ! Exercise - - call this%run_map_glc2lnd_ec() - - ! Verify - - topo_expected_ec2(1) = topo_glc(1) - topo_actual_ec2 = aVect_exportRattr(this%data_l, 'Sg_topo02') - @assertEqual(topo_expected_ec2, topo_actual_ec2) - end subroutine test_mapGlc2lndEC_topo_with_EC2 - - @Test - subroutine test_mapGlc2lndEC_topo_virtual_elevation_class(this) - ! Do a test of the map_glc2lnd_ec routine, checking mapped topo for virtual elevation - ! classes. - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nlnd = 1 - integer, parameter :: nglc = 1 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - type(simple_map_type) :: my_map - type(lnd_field_type) :: topo_lnd_expected(nlnd) - - ! Setup - - my_map = create_simple_map_with_one_source(ndest = nlnd) - - ! We have a non-virtual elevation class 2; all other elevation classes (including - ! bare land) are virtual. - frac_glc(1) = 1._r8 - topo_glc(1) = 125._r8 - - call this%setup_inputs(frac_glc, topo_glc, my_map) - - ! Exercise - - call this%run_map_glc2lnd_ec() - - ! Verify - - topo_lnd_expected(1)%data(0) = glc_mean_elevation_virtual(0) - topo_lnd_expected(1)%data(1) = glc_mean_elevation_virtual(1) - topo_lnd_expected(1)%data(2) = topo_glc(1) ! non-virtual - topo_lnd_expected(1)%data(3) = glc_mean_elevation_virtual(3) - call this%verify_data_l_field(fieldname='Sg_topo', expected_lnd=topo_lnd_expected, & - message = 'test_mapGlc2lndEC_topo_virtual_elevation_class: Sg_topo') - - end subroutine test_mapGlc2lndEC_topo_virtual_elevation_class - - @Test - subroutine test_mapGlc2lndEC_topo_virtual_elevation_class_multiple_points(this) - ! Do a test of the map_glc2lnd_ec routine, checking mapped topo for virtual elevation - ! classes, with multiple points (to ensure that real points aren't getting assigned - ! the elevation from virtual elevation classes). - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nlnd = 2 - integer, parameter :: nglc = 2 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - type(simple_map_type) :: my_map - type(lnd_field_type) :: topo_lnd_expected(nlnd) - integer :: n - - ! Setup - - frac_glc(:) = [1._r8, 1._r8] - topo_glc(:) = [125._r8, 225._r8] - - my_map = simple_map_type( & - source_indices = [1, 2], & - dest_indices = [1, 2], & - overlap_weights = [1._r8, 1._r8]) - - call this%setup_inputs(frac_glc, topo_glc, my_map) - - ! Exercise - - call this%run_map_glc2lnd_ec() - - ! Verify - - call set_topo_to_mean_elevation_virtual(topo_lnd_expected) - ! But set non-virtual points: - topo_lnd_expected(1)%data(2) = 125._r8 - topo_lnd_expected(2)%data(3) = 225._r8 - call this%verify_data_l_field(fieldname='Sg_topo', expected_lnd=topo_lnd_expected, & - message = 'test_mapGlc2lndEC_topo_virtual_elevation_class_multiple_points: Sg_topo') - - end subroutine test_mapGlc2lndEC_topo_virtual_elevation_class_multiple_points - - @Test - subroutine test_mapGlc2lndEC_topo_fully_outside_static_domain(this) - ! Test mapped topo with a land cell that is fully outside the static GLC domain - ! (i.e., mapping weight is 0). - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: topo_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_fully_outside_static_glc_domain( & - frac_glc = 1._r8, topo_glc = 125._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - call set_topo_to_mean_elevation_virtual(topo_lnd_expected) - call this%verify_data_l_field(fieldname='Sg_topo', expected_lnd=topo_lnd_expected, & - message = 'test_mapGlc2lndEC_topo_fully_outside_static_domain: Sg_topo', & - first_lnd_index_to_verify = 1, last_lnd_index_to_verify = 1) - end subroutine test_mapGlc2lndEC_topo_fully_outside_static_domain - - @Test - subroutine test_mapGlc2lndEC_topo_partially_outside_static_domain(this) - ! Test mapped topo with a land cell that is partially outside the static GLC domain - ! (i.e., mapping weight is 0). - ! - ! Note: the logic is the same in this respect for topo and any other data field, so - ! this can also be considered to be a test for other data fields, when a land cell is - ! partially outside the static GLC domain. - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: topo_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_partially_outside_static_glc_domain( & - frac_glc = 1._r8, topo_glc = 125._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - call set_topo_to_mean_elevation_virtual(topo_lnd_expected) - topo_lnd_expected(1)%data(2) = 125._r8 - call this%verify_data_l_field(fieldname='Sg_topo', expected_lnd=topo_lnd_expected, & - message = 'test_mapGlc2lndEC_topo_partially_outside_static_domain: Sg_topo') - end subroutine test_mapGlc2lndEC_topo_partially_outside_static_domain - - @Test - subroutine test_mapGlc2lndEC_topo_fully_outside_dynamic_domain(this) - ! Test mapped topo with a land cell that is fully outside the dynamic GLC domain - ! (i.e., icemask is 0). - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: topo_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_fully_outside_dynamic_glc_domain( & - frac_glc = 1._r8, topo_glc = 125._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - call set_topo_to_mean_elevation_virtual(topo_lnd_expected) - call this%verify_data_l_field(fieldname='Sg_topo', expected_lnd=topo_lnd_expected, & - message = 'test_mapGlc2lndEC_topo_fully_outside_dynamic_domain: Sg_topo') - end subroutine test_mapGlc2lndEC_topo_fully_outside_dynamic_domain - - @Test - subroutine test_mapGlc2lndEC_topo_partially_outside_dynamic_domain_sameEC(this) - ! Test mapped topo with a land cell that is partially outside the dynamic GLC - ! domain, with two GLC points in the same elevation class - ! - ! Note: the logic is the same in this respect for topo and any other data field, so - ! this can also be considered to be a test for other data fields, when a land cell is - ! partially outside the dynamic GLC domain. - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: topo_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_partially_outside_dynamic_glc_domain( & - frac_glc_in_domain = 1._r8, frac_glc_outside_domain = 1._r8, & - topo_glc_in_domain = 125._r8, topo_glc_outside_domain = 175._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - call set_topo_to_mean_elevation_virtual(topo_lnd_expected) - topo_lnd_expected(1)%data(2) = 125._r8 - call this%verify_data_l_field(fieldname='Sg_topo', expected_lnd=topo_lnd_expected, & - message = 'test_mapGlc2lndEC_topo_partially_outside_dynamic_domain_sameEC: Sg_topo') - end subroutine test_mapGlc2lndEC_topo_partially_outside_dynamic_domain_sameEC - - @Test - subroutine test_mapGlc2lndEC_topo_partially_outside_dynamic_domain_diffECs(this) - ! Test mapped topo with a land cell that is partially outside the dynamic GLC - ! domain, with two GLC points in different elevation classes - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: topo_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_partially_outside_dynamic_glc_domain( & - frac_glc_in_domain = 1._r8, frac_glc_outside_domain = 1._r8, & - topo_glc_in_domain = 125._r8, topo_glc_outside_domain = 25._r8) - - ! Exercise - call this%run_map_glc2lnd_ec() - - ! Verify - call set_topo_to_mean_elevation_virtual(topo_lnd_expected) - topo_lnd_expected(1)%data(2) = 125._r8 - call this%verify_data_l_field(fieldname='Sg_topo', expected_lnd=topo_lnd_expected, & - message = 'test_mapGlc2lndEC_topo_partially_outside_dynamic_domain_diffECs: Sg_topo') - end subroutine test_mapGlc2lndEC_topo_partially_outside_dynamic_domain_diffECs - - @Test - subroutine test_mapGlc2lndEC_topo_multiple_sources(this) - ! Test mapped topo with multiple source points, with a variety of ice masks and - ! elevation classes. - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nglc = 6 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - real(r8) :: icemask(nglc) - real(r8) :: overlap(nglc) - type(simple_map_type) :: my_map - type(lnd_field_type) :: topo_lnd_expected(1) - - ! Setup - - ! EC2 , EC2-fractional, EC0 , MASK0-FRAC1, MASK0-FRAC0, EC3 - icemask(:) = [1._r8 , 1._r8 , 1._r8 , 0._r8 , 0._r8 , 1._r8] - frac_glc(:) = [1._r8 , 0.8_r8 , 0._r8 , 1._r8 , 0._r8 , 1._r8] - topo_glc(:) = [110._r8, 120._r8 , 130._r8, 140._r8 , 150._r8 , 210._r8] - overlap(:) = [0.2_r8 , 0.2_r8 , 0.2_r8 , 0.2_r8 , 0.1_r8 , 0.1_r8] - - my_map = simple_map_type( & - source_indices = [1, 2, 3, 4, 5, 6], & - dest_indices = [1, 1, 1, 1, 1, 1], & - overlap_weights = overlap) - - call this%setup_inputs(frac_glc, topo_glc, my_map, icemask_glc = icemask) - - ! Exercise - - call this%run_map_glc2lnd_ec() - - ! Verify - - call set_topo_to_mean_elevation_virtual(topo_lnd_expected) - topo_lnd_expected(1)%data(0) = (130._r8 + 120._r8 * 0.2_r8) / 1.2_r8 - topo_lnd_expected(1)%data(2) = (110._r8 + 120._r8 * 0.8_r8) / 1.8_r8 - topo_lnd_expected(1)%data(3) = 210._r8 - call this%verify_data_l_field(fieldname='Sg_topo', expected_lnd=topo_lnd_expected, & - message = 'test_mapGlc2lndEC_topo_multiple_sources: Sg_topo') - end subroutine test_mapGlc2lndEC_topo_multiple_sources - - ! ------------------------------------------------------------------------ - ! Tests of mapped data field - ! ------------------------------------------------------------------------ - - @Test - subroutine test_mapGlc2lndEC_data(this) - ! Do a test of the map_glc2lnd_ec routine with only an elevation class 2 source - ! point. Check the mapped data field. Point is to make sure that extra fields - ! (besides frac and topo) get mapped properly. - class(TestMapGlc2lnd), intent(inout) :: this - integer, parameter :: nlnd = 1 - integer, parameter :: nglc = 1 - real(r8) :: frac_glc(nglc) - real(r8) :: topo_glc(nglc) - real(r8) :: data_glc(nglc) - type(simple_map_type) :: my_map - real(r8) :: data_expected_ec2(nlnd) - real(r8), allocatable :: data_actual_ec2(:) - - ! Setup - - my_map = create_simple_map_with_one_source(ndest = nlnd) - - frac_glc(1) = 1._r8 - topo_glc(1) = 125._r8 - data_glc(1) = 12345._r8 - - call this%setup_inputs(frac_glc, topo_glc, my_map, data_glc=data_glc) - - ! Exercise - - call this%run_map_glc2lnd_ec(extra_fields = 'data') - - ! Verify - - data_expected_ec2(1) = data_glc(1) - data_actual_ec2 = aVect_exportRattr(this%data_l, 'data02') - @assertEqual(data_expected_ec2, data_actual_ec2) - end subroutine test_mapGlc2lndEC_data - - @Test - subroutine test_mapGlc2lndEC_data_fully_outside_static_domain(this) - ! Test mapped data with a land cell that is fully outside the static GLC domain - ! (i.e., mapping weight is 0). - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: data_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_fully_outside_static_glc_domain( & - frac_glc = 1._r8, topo_glc = 125._r8, data_glc = 12345._r8) - - ! Exercise - call this%run_map_glc2lnd_ec(extra_fields = 'data') - - ! Verify - data_lnd_expected(1)%data(:) = 0._r8 - call this%verify_data_l_field(fieldname='data', expected_lnd=data_lnd_expected, & - message = 'test_mapGlc2lndEC_data_fully_outside_static_domain: data', & - first_lnd_index_to_verify = 1, last_lnd_index_to_verify = 1) - end subroutine test_mapGlc2lndEC_data_fully_outside_static_domain - - @Test - subroutine test_mapGlc2lndEC_data_fully_outside_dynamic_domain(this) - ! Test mapped data with a land cell that is fully outside the dynamic GLC domain - ! (i.e., icemask is 0). - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: data_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_fully_outside_dynamic_glc_domain( & - frac_glc = 1._r8, topo_glc = 125._r8, data_glc = 12345._r8) - - ! Exercise - call this%run_map_glc2lnd_ec(extra_fields = 'data') - - ! Verify - data_lnd_expected(1)%data(:) = 0._r8 - call this%verify_data_l_field(fieldname='data', expected_lnd=data_lnd_expected, & - message = 'test_mapGlc2lndEC_data_fully_outside_dynamic_domain: data') - end subroutine test_mapGlc2lndEC_data_fully_outside_dynamic_domain - - @Test - subroutine test_mapGlc2lndEC_data_partially_outside_dynamic_domain_diffECs(this) - ! Test mapped data with a land cell that is partially outside the dynamic GLC - ! domain, with two GLC points in different elevation classes - class(TestMapGlc2lnd), intent(inout) :: this - type(lnd_field_type) :: data_lnd_expected(1) - - ! Setup - call this%setup_inputs_lnd_partially_outside_dynamic_glc_domain( & - frac_glc_in_domain = 1._r8, frac_glc_outside_domain = 1._r8, & - topo_glc_in_domain = 125._r8, topo_glc_outside_domain = 25._r8, & - data_glc_in_domain = 12345._r8, data_glc_outside_domain = 6789._r8) - - ! Exercise - call this%run_map_glc2lnd_ec(extra_fields = 'data') - - ! Verify - data_lnd_expected(1)%data(:) = [0._r8, 0._r8, 12345._r8, 0._r8] - call this%verify_data_l_field(fieldname='data', expected_lnd=data_lnd_expected, & - message = 'test_mapGlc2lndEC_data_partially_outside_dynamic_domain_diffECs: data') - end subroutine test_mapGlc2lndEC_data_partially_outside_dynamic_domain_diffECs - -end module test_map_glc2lnd diff --git a/src/drivers/mct/unit_test/map_lnd2rof_irrig_test/CMakeLists.txt b/src/drivers/mct/unit_test/map_lnd2rof_irrig_test/CMakeLists.txt deleted file mode 100644 index 199341061ce..00000000000 --- a/src/drivers/mct/unit_test/map_lnd2rof_irrig_test/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -set (pfunit_sources - test_map_lnd2rof_irrig.pf - ) - -create_pFUnit_test(map_lnd2rof_irrig map_lnd2rof_irrig_exe - "${pfunit_sources}" "") - -target_link_libraries(map_lnd2rof_irrig_exe ${DRV_UNIT_TEST_LIBS}) diff --git a/src/drivers/mct/unit_test/map_lnd2rof_irrig_test/test_map_lnd2rof_irrig.pf b/src/drivers/mct/unit_test/map_lnd2rof_irrig_test/test_map_lnd2rof_irrig.pf deleted file mode 100644 index f50bf8f3b1a..00000000000 --- a/src/drivers/mct/unit_test/map_lnd2rof_irrig_test/test_map_lnd2rof_irrig.pf +++ /dev/null @@ -1,242 +0,0 @@ -module test_map_lnd2rof_irrig - - ! Tests of map_lnd2rof_irrig_mod - -#include "shr_assert.h" - use pfunit_mod - use map_lnd2rof_irrig_mod - use shr_kind_mod , only : r8 => shr_kind_r8 - use shr_log_mod, only : errMsg => shr_log_errMsg - use mct_mod, only : mct_aVect, mct_aVect_clean, mct_aVect_lsize - use seq_map_type_mod, only : seq_map - use mct_wrapper_mod, only : mct_init, mct_clean - use avect_wrapper_mod - use simple_map_mod - use create_mapper_mod - - implicit none - - @TestCase - type, extends(TestCase) :: TestMapL2RIrrig - type(mct_aVect) :: l2r_l ! data on the lnd (source) grid - type(mct_aVect) :: l2r_r ! data on the rof (destination) grid - type(mct_aVect) :: r2x_r ! auxiliary data on the rof grid - type(seq_map) :: mapper_Fl2r - type(seq_map) :: mapper_Fr2l - contains - procedure :: setUp - procedure :: tearDown - procedure :: setup_inputs - procedure :: run_map_lnd2rof_irrig ! wrapper to the SUT - end type TestMapL2RIrrig - - real(r8), parameter :: tol = 1.e-13_r8 - - character(len=*), parameter :: irrig_flux_field = 'irrig' - - character(len=*), parameter, private :: sourcefile = & - __FILE__ -contains - - ! ======================================================================== - ! Utility routines - ! ======================================================================== - - subroutine setUp(this) - class(TestMapL2RIrrig), intent(inout) :: this - - call mct_init() - end subroutine setUp - - subroutine tearDown(this) - class(TestMapL2RIrrig), intent(inout) :: this - - call clean_mapper(this%mapper_Fl2r) - call clean_mapper(this%mapper_Fr2l) - call mct_aVect_clean(this%l2r_l) - call mct_aVect_clean(this%l2r_r) - call mct_aVect_clean(this%r2x_r) - call mct_clean() - end subroutine tearDown - - subroutine setup_inputs(this, irrig_l, volr_r, map_l2r, map_r2l) - ! This utility function sets up inputs that are needed for the map_lnd2rof_irrig call - class(TestMapL2RIrrig), intent(inout) :: this - real(r8), intent(in) :: irrig_l(:) ! irrigation on the land grid - real(r8), intent(in) :: volr_r(:) ! river volume on the rof grid - type(simple_map_type), intent(in) :: map_l2r - type(simple_map_type), intent(in) :: map_r2l - - integer :: nlnd - integer :: nrof - character(len=*), parameter :: volr_field = 'Flrr_volrmch' - - nlnd = map_l2r%get_n_source_points() - nrof = map_l2r%get_n_dest_points() - call shr_assert(map_r2l%get_n_dest_points() == nlnd, file=sourcefile, line=__LINE__) - call shr_assert(map_r2l%get_n_source_points() == nrof, file=sourcefile, line=__LINE__) - call shr_assert(size(irrig_l) == nlnd, file=sourcefile, line=__LINE__) - call shr_assert(size(volr_r) == nrof, file=sourcefile, line=__LINE__) - - call create_aVect_with_data_rows_are_points(this%l2r_l, & - attr_tags = [irrig_flux_field], & - data = reshape(irrig_l, [nlnd, 1])) - - call create_aVect_without_data(this%l2r_r, attr_tags = [irrig_flux_field], lsize = nrof) - - call create_aVect_with_data_rows_are_points(this%r2x_r, & - attr_tags = [volr_field], & - data = reshape(volr_r, [nrof, 1])) - - call create_mapper(this%mapper_Fl2r, map_l2r) - call create_mapper(this%mapper_Fr2l, map_r2l) - - end subroutine setup_inputs - - subroutine run_map_lnd2rof_irrig(this) - ! This utility function wraps the call to the map_lnd2rof_irrig routine - ! - ! It uses an avwts_s set to 1 everywhere - class(TestMapL2RIrrig), intent(inout) :: this - - integer :: nlnd - real(r8), allocatable :: avwts(:) - type(mct_aVect) :: avwts_s - character(len=*), parameter :: avwtsfld_s = 'my_avwtsfld' - - ! Set up avwts_s with weights set to 1 everywhere - nlnd = mct_aVect_lsize(this%l2r_l) - allocate(avwts(nlnd)) - avwts(:) = 1._r8 - call create_aVect_with_data_rows_are_points(avwts_s, & - attr_tags = [avwtsfld_s], & - data = reshape(avwts, [nlnd, 1])) - - ! Do the main SUT call - call map_lnd2rof_irrig( & - l2r_l = this%l2r_l, & - r2x_r = this%r2x_r, & - irrig_flux_field = irrig_flux_field, & - avwts_s = avwts_s, & - avwtsfld_s = avwtsfld_s, & - mapper_Fl2r = this%mapper_Fl2r, & - mapper_Fr2l = this%mapper_Fr2l, & - l2r_r = this%l2r_r) - - ! Clean up - deallocate(avwts) - call mct_aVect_clean(avwts_s) - end subroutine run_map_lnd2rof_irrig - - ! ======================================================================== - ! Actual tests - ! ======================================================================== - - @Test - subroutine test_standardCase_oneLand_twoRof(this) - ! Standard case with one land (source) cell and two rof (destination) cells - class(TestMapL2RIrrig), intent(inout) :: this - integer, parameter :: nlnd = 1 - integer, parameter :: nrof = 2 - real(r8), parameter :: irrig_l(nlnd) = [100._r8] - real(r8), parameter :: volr_r(nrof) = [1._r8, 3._r8] - real(r8) :: irrig_r(nrof) - real(r8) :: expected_volr_l - real(r8) :: expected_irrig_r(nrof) - real(r8) :: sum_irrig_r - type(simple_map_type) :: map_l2r - type(simple_map_type) :: map_r2l - - ! Setup - map_l2r = create_simple_map_with_one_source(ndest = nrof) - map_r2l = simple_map_type( & - source_indices = [1, 2], & - dest_indices = [1, 1], & - overlap_weights = [0.4_r8, 0.6_r8]) - call this%setup_inputs( & - irrig_l = irrig_l, & - volr_r = volr_r, & - map_l2r = map_l2r, & - map_r2l = map_r2l) - - ! Exercise - call this%run_map_lnd2rof_irrig() - - ! Verify - irrig_r = aVect_exportRattr(this%l2r_r, irrig_flux_field) - expected_volr_l = 0.4_r8 * 1._r8 + 0.6_r8 * 3._r8 - expected_irrig_r(1) = irrig_l(1) * 1._r8 / expected_volr_l - expected_irrig_r(2) = irrig_l(1) * 3._r8 / expected_volr_l - @assertEqual(expected_irrig_r, irrig_r, tolerance=tol) - ! Also make sure this is conservative: - sum_irrig_r = 0.4_r8*irrig_r(1) + 0.6_r8*irrig_r(2) - @assertEqual(irrig_l(1), sum_irrig_r, tolerance=tol) - end subroutine test_standardCase_oneLand_twoRof - - @Test - subroutine test_zero_and_negative(this) - ! This tests the handling of rof cells with 0 or negative volr. It includes two land - ! (source) points, to ensure that each land cell gets its own handling and to ensure - ! that R2 (which overlaps L1 and L2) gets the appropriate value. - ! - ! This has the following setup: - ! - ! L (2 cells): 111222 - ! R (3 cells): 112233 - ! R1 has volr = 0 - ! R2 has volr = -10 - ! R3 has volr = 2 - ! - ! Then Irrig_L(1) should be mapped evenly to R1 and R2 (regular mapping); Irrig_L(2) - ! should be mapped entirely to R3 (because R2 should be reset to 0, then it should use - ! normalized mapping). - class(TestMapL2RIrrig), intent(inout) :: this - integer, parameter :: nlnd = 2 - integer, parameter :: nrof = 3 - real(r8), parameter :: irrig_l(nlnd) = [10._r8, 100._r8] - real(r8), parameter :: volr_r(nrof) = [0._r8, -10._r8, 2._r8] - real(r8) :: irrig_r(nrof) - real(r8) :: expected_irrig_r2 - type(simple_map_type) :: map_l2r - type(simple_map_type) :: map_r2l - real(r8) :: sum_irrig_l - real(r8) :: sum_irrig_r - - ! Setup - map_l2r = simple_map_type( & - source_indices = [1, 1, 2, 2], & - dest_indices = [1, 2, 2, 3], & - overlap_weights = [1._r8, 0.5_r8, 0.5_r8, 1._r8]) - - map_r2l = simple_map_type( & - source_indices = [1, 2, 2, 3], & - dest_indices = [1, 1, 2, 2], & - overlap_weights = [2._r8/3._r8, 1._r8/3._r8, 1._r8/3._r8, 2._r8/3._r8]) - - call this%setup_inputs( & - irrig_l = irrig_l, & - volr_r = volr_r, & - map_l2r = map_l2r, & - map_r2l = map_r2l) - - ! Exercise - call this%run_map_lnd2rof_irrig() - - ! Verify - irrig_r = aVect_exportRattr(this%l2r_r, irrig_flux_field) - ! L1 is mapped without normalization, so the flux in R1 is simply the flux in L1 - @assertEqual(irrig_l(1), irrig_r(1), tolerance=tol) - ! L2 is mapped with normalization; all of its irrigation goes into R3 - @assertEqual(irrig_l(2) * 3._r8/2._r8, irrig_r(3), tolerance=tol) - ! R2 overlaps L1 and L2, with 50% in each. From L1 it gets the irrigation flux mapped - ! directly; from L2 it gets 0 (since L2 is mapped with normalization, and R2's volr < - ! 0). - @assertEqual(irrig_l(1) * 0.5_r8, irrig_r(2), tolerance=tol) - ! Also make sure this is conservative - sum_irrig_l = 3._r8 * irrig_l(1) + 3._r8 * irrig_l(2) - sum_irrig_r = 2._r8 * irrig_r(1) + 2._r8 * irrig_r(2) + 2._r8 * irrig_r(3) - @assertEqual(sum_irrig_l, sum_irrig_r) - - end subroutine test_zero_and_negative - -end module test_map_lnd2rof_irrig diff --git a/src/drivers/mct/unit_test/seq_map_test/CMakeLists.txt b/src/drivers/mct/unit_test/seq_map_test/CMakeLists.txt deleted file mode 100644 index 6e12c40dda0..00000000000 --- a/src/drivers/mct/unit_test/seq_map_test/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -create_pFUnit_test(seq_map seq_map_exe - "test_seq_map.pf" "") - -target_link_libraries(seq_map_exe ${DRV_UNIT_TEST_LIBS}) diff --git a/src/drivers/mct/unit_test/seq_map_test/test_seq_map.pf b/src/drivers/mct/unit_test/seq_map_test/test_seq_map.pf deleted file mode 100644 index 47d05a44f77..00000000000 --- a/src/drivers/mct/unit_test/seq_map_test/test_seq_map.pf +++ /dev/null @@ -1,136 +0,0 @@ -module test_seq_map - - ! Tests of seq_map_mod - - use pfunit_mod - use seq_map_mod - use seq_map_type_mod - use mct_mod - use mct_wrapper_mod, only : mct_init, mct_clean - use avect_wrapper_mod - use simple_map_mod - use create_mapper_mod - use shr_kind_mod, only : r8 => shr_kind_r8 - - implicit none - - - real(r8), parameter :: tol = 1.e-13_r8 - - @TestCase - type, extends(TestCase) :: TestSeqMap - type(seq_map) :: mapper - type(mct_aVect) :: av_s ! data on the source grid - type(mct_aVect) :: av_d ! data on the destination grid - contains - procedure :: setUp - procedure :: tearDown - end type TestSeqMap - -contains - - subroutine setUp(this) - class(TestSeqMap), intent(inout) :: this - - call mct_init() - end subroutine setUp - - subroutine tearDown(this) - class(TestSeqMap), intent(inout) :: this - - call clean_mapper(this%mapper) - call mct_aVect_clean(this%av_s) - call mct_aVect_clean(this%av_d) - call mct_clean() - end subroutine tearDown - - @Test - subroutine test_seqMapMap_1DestPt_returnsCorrectAV(this) - class(TestSeqMap), intent(inout) :: this - integer, parameter :: npts_source = 3 - integer, parameter :: npts_dest = 1 - real(r8), parameter :: data_source(npts_source) = [1._r8, 11._r8, 12._r8] - real(r8) :: expected_data(npts_dest) - real(r8), allocatable :: actual_data(:) - type(simple_map_type) :: my_map - - ! Setup - - ! Set up an area-conservative mapping that looks like the following: - ! Source: AABC - ! Dest: 1111 - ! i.e., there is a single destination grid cell, which is overlapped by 3 source grid - ! cells: A (50%), B (25%), C (25%) - - ! Set up attribute vectors - call create_aVect_with_data_rows_are_points(this%av_s, & - attr_tags = ['data'], & - data = reshape(data_source, [npts_source,1])) - call create_aVect_without_data(this%av_d, & - attr_tags = ['data'], & - lsize = npts_dest) - - ! Set up mapper. This is an area-conservative remapping. - my_map = simple_map_type( & - source_indices = [1, 2, 3], & - dest_indices = [1, 1, 1], & - overlap_weights = [0.5_r8, 0.25_r8, 0.25_r8]) - call create_mapper(this%mapper, my_map) - - ! Exercise - call seq_map_map(this%mapper, this%av_s, this%av_d) - - ! Verify - - actual_data = aVect_exportRattr(this%av_d, 'data') - expected_data(1) = & - 0.5_r8 * data_source(1) + & - 0.25_r8 * data_source(2) + & - 0.25_r8 * data_source(3) - @assertEqual(expected_data, actual_data, tolerance=tol) - end subroutine test_seqMapMap_1DestPt_returnsCorrectAV - - @Test - subroutine test_seqMapMap_2DestPt_returnsCorrectAV(this) - class(TestSeqMap), intent(inout) :: this - integer, parameter :: npts_source = 3 - integer, parameter :: npts_dest = 2 - real(r8), parameter :: data_source(npts_source) = [1._r8, 11._r8, 12._r8] - real(r8) :: expected_data(npts_dest) - real(r8), allocatable :: actual_data(:) - type(simple_map_type) :: my_map - - ! Setup - - ! Set up attribute vectors - call create_aVect_with_data_rows_are_points(this%av_s, & - attr_tags = ['data'], & - data = reshape(data_source, [npts_source,1])) - call create_aVect_without_data(this%av_d, & - attr_tags = ['data'], & - lsize = npts_dest) - - ! Set up mapper. This is an area-conservative remapping. - my_map = simple_map_type( & - source_indices = [1, 2, 3, 2, 3], & - dest_indices = [1, 1, 1, 2, 2], & - overlap_weights = [0.5_r8, 0.25_r8, 0.25_r8, 0.5_r8, 0.5_r8]) - call create_mapper(this%mapper, my_map) - - ! Exercise - call seq_map_map(this%mapper, this%av_s, this%av_d) - - ! Verify - - actual_data = aVect_exportRattr(this%av_d, 'data') - expected_data(1) = & - 0.5_r8 * data_source(1) + & - 0.25_r8 * data_source(2) + & - 0.25_r8 * data_source(3) - expected_data(2) = & - 0.5_r8 * data_source(2) + & - 0.5_r8 * data_source(3) - @assertEqual(expected_data, actual_data, tolerance=tol) - end subroutine test_seqMapMap_2DestPt_returnsCorrectAV - -end module test_seq_map diff --git a/src/drivers/mct/unit_test/stubs/CMakeLists.txt b/src/drivers/mct/unit_test/stubs/CMakeLists.txt deleted file mode 100644 index 572c3166057..00000000000 --- a/src/drivers/mct/unit_test/stubs/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -list(APPEND drv_sources - seq_timemgr_mod.F90 - ) - -sourcelist_to_parent(drv_sources) \ No newline at end of file diff --git a/src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 b/src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 deleted file mode 100644 index f88a96d2da8..00000000000 --- a/src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 +++ /dev/null @@ -1,19 +0,0 @@ -module seq_timemgr_mod - - ! Stub for routines from seq_timemgr_mod that are needed by other modules built by the - ! unit tests. - - implicit none - private - - public :: seq_timemgr_pause_active - -contains - - logical function seq_timemgr_pause_active() - ! Stub for seq_timemgr_pause_active - always returns .false. - - seq_timemgr_pause_active = .false. - end function seq_timemgr_pause_active - -end module seq_timemgr_mod diff --git a/src/drivers/mct/unit_test/utils/CMakeLists.txt b/src/drivers/mct/unit_test/utils/CMakeLists.txt deleted file mode 100644 index ca74c7f0181..00000000000 --- a/src/drivers/mct/unit_test/utils/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -list(APPEND drv_sources - avect_wrapper_mod.F90 - create_mapper_mod.F90 - mct_wrapper_mod.F90 - simple_map_mod.F90 - ) - -sourcelist_to_parent(drv_sources) \ No newline at end of file diff --git a/src/drivers/mct/unit_test/utils/avect_wrapper_mod.F90 b/src/drivers/mct/unit_test/utils/avect_wrapper_mod.F90 deleted file mode 100644 index e1bc78015a2..00000000000 --- a/src/drivers/mct/unit_test/utils/avect_wrapper_mod.F90 +++ /dev/null @@ -1,180 +0,0 @@ -module avect_wrapper_mod - ! This module supports building attribute vectors for use in unit tests, as well as - ! performing other operations on attribute vectors. - - use shr_kind_mod, only : r8 => shr_kind_r8 - use mct_mod - - implicit none - private - save - - - ! The following two routines are the same, except for the meaning of the two dimensions - ! of the 'data' array - public :: create_aVect_with_data_rows_are_points ! creates an attribute vector with a given set of real-valued fields, and fills it with the given data - public :: create_aVect_with_data_rows_are_fields ! creates an attribute vector with a given set of real-valued fields, and fills it with the given data - - public :: create_aVect_without_data ! creates an attribute vector with a given set of real-valued fields - public :: aVect_importRattr ! wrapper to mct_aVect_importRattr which doesn't require a pointer input - public :: aVect_exportRattr ! wrapper to mct_aVect_exportRattr which doesn't require pointer management for the output - -contains - - !----------------------------------------------------------------------- - subroutine create_aVect_with_data_rows_are_points(av, attr_tags, data) - ! - ! !DESCRIPTION: - ! Creates an attribute vector with a given set of fields, which are all assumed to be - ! real-valued. Then fills it with the given data. - ! - ! The data should be given as a 2-d array, [point, field]. So the second dimension - ! should be the same size as the attr_tags array, with data(:,i) being used to fill - ! the attr_tags(i) variable. - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect), intent(inout) :: av - character(len=*), intent(in) :: attr_tags(:) - real(r8), intent(in) :: data(:,:) - ! - ! !LOCAL VARIABLES: - integer :: nfields - integer :: npoints - integer :: field_index - - character(len=*), parameter :: subname = 'create_aVect_with_data_rows_are_points' - !----------------------------------------------------------------------- - - npoints = size(data, 1) - nfields = size(data, 2) - - if (size(attr_tags) /= nfields) then - print *, subname, ' ERROR: dimensionality mismatch between attr_tags and data' - stop - end if - - call create_aVect_without_data(av, attr_tags, npoints) - - do field_index = 1, nfields - call aVect_importRattr(av, trim(attr_tags(field_index)), data(:,field_index)) - end do - - end subroutine create_aVect_with_data_rows_are_points - - !----------------------------------------------------------------------- - subroutine create_aVect_with_data_rows_are_fields(av, attr_tags, data) - ! - ! !DESCRIPTION: - ! Creates an attribute vector with a given set of fields, which are all assumed to be - ! real-valued. Then fills it with the given data. - ! - ! The data should be given as a 2-d array, [field, point]. So the first dimension - ! should be the same size as the attr_tags array, with data(i,:) being used to fill - ! the attr_tags(i) variable. - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect), intent(inout) :: av - character(len=*), intent(in) :: attr_tags(:) - real(r8), intent(in) :: data(:,:) - !----------------------------------------------------------------------- - - call create_aVect_with_data_rows_are_points(av, attr_tags, transpose(data)) - - end subroutine create_aVect_with_data_rows_are_fields - - !----------------------------------------------------------------------- - subroutine create_aVect_without_data(av, attr_tags, lsize) - ! - ! !DESCRIPTION: - ! Creates an attribute vector with a given set of fields, with space for the given - ! number of points in each field. - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect), intent(inout) :: av - character(len=*), intent(in) :: attr_tags(:) - integer, intent(in) :: lsize - ! - ! !LOCAL VARIABLES: - integer :: nfields - integer :: field_index - integer :: list_length - character(len=:), allocatable :: attr_list - - character(len=*), parameter :: subname = 'create_aVect_without_data' - !----------------------------------------------------------------------- - - nfields = size(attr_tags) - list_length = nfields * (len(attr_tags) + 1) - allocate(character(len=list_length) :: attr_list) - - attr_list = trim(attr_tags(1)) - do field_index = 2, nfields - attr_list = trim(attr_list) // ":" // trim(attr_tags(field_index)) - end do - - call mct_aVect_init(av, rList = attr_list, lsize = lsize) - - end subroutine create_aVect_without_data - - !----------------------------------------------------------------------- - subroutine aVect_importRattr(av, attr_tag, data) - ! - ! !DESCRIPTION: - ! This routine is similar to mct_aVect_importRattr, but it doesn't require a pointer - ! input - so it is often more convenient than calling mct_aVect_importRattr directly. - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_aVect), intent(inout) :: av - character(len=*), intent(in) :: attr_tag - real(r8), intent(in) :: data(:) - ! - ! !LOCAL VARIABLES: - real(r8), pointer :: data_ptr(:) - - character(len=*), parameter :: subname = 'aVect_importRattr' - !----------------------------------------------------------------------- - - allocate(data_ptr(size(data))) - data_ptr(:) = data(:) - call mct_aVect_importRattr(av, trim(attr_tag), data_ptr) - deallocate(data_ptr) - - end subroutine aVect_importRattr - - !----------------------------------------------------------------------- - function aVect_exportRattr(av, attr_tag) result(data) - ! - ! !DESCRIPTION: - ! This function is similar to mct_aVect_exportRattr, but (1) it is a function rather - ! than a subroutine (so that it can be included inline in other statements), and (2) - ! it handles the pointer management for you, so that the caller doesn't have to. - ! - ! !USES: - ! - ! !ARGUMENTS: - real(r8), allocatable :: data(:) ! function result - type(mct_aVect), intent(in) :: av - character(len=*), intent(in) :: attr_tag - ! - ! !LOCAL VARIABLES: - real(r8), pointer :: data_ptr(:) - - character(len=*), parameter :: subname = 'aVect_exportRattr' - !----------------------------------------------------------------------- - - nullify(data_ptr) - call mct_aVect_exportRattr(av, trim(attr_tag), data_ptr) - data = data_ptr - deallocate(data_ptr) - end function aVect_exportRattr - - -end module avect_wrapper_mod diff --git a/src/drivers/mct/unit_test/utils/create_mapper_mod.F90 b/src/drivers/mct/unit_test/utils/create_mapper_mod.F90 deleted file mode 100644 index 7afc11692ec..00000000000 --- a/src/drivers/mct/unit_test/utils/create_mapper_mod.F90 +++ /dev/null @@ -1,176 +0,0 @@ -module create_mapper_mod - ! This module supports building mappers for use in unit tests - - use mct_mod - use mct_wrapper_mod, only : mct_communicator, mct_compid - use shr_kind_mod, only : r8 => shr_kind_r8 - use seq_map_type_mod, only : seq_map - use simple_map_mod, only : simple_map_type - - implicit none - private - save - - public :: create_mapper ! create a simple mapper - public :: clean_mapper ! deallocate memory associated with a mapper - public :: create_gsmap ! used in test_check_fields - -contains - - !----------------------------------------------------------------------- - subroutine create_mapper(mapper, simple_map) - ! - ! !DESCRIPTION: - ! Create a simple mapper - ! - ! !USES: - ! - ! !ARGUMENTS: - type(seq_map), intent(out) :: mapper - class(simple_map_type), intent(in) :: simple_map - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'create_mapper' - !----------------------------------------------------------------------- - - mapper%copy_only = .false. - mapper%rearrange_only = .false. - mapper%esmf_map = .false. - mapper%mapfile = ' ' - - ! The strategy just relates to whether the mapping is done on the source or - ! destination decomposition, which is irrelevant for a single-processor unit test - mapper%strategy = 'X' - - ! May need to make this more sophisticated if it causes problems to use 0 for all - ! mappers - mapper%counter = 0 - - allocate(mapper%gsmap_s) - call create_gsmap(mapper%gsmap_s, simple_map%get_n_source_points()) - allocate(mapper%gsmap_d) - call create_gsmap(mapper%gsmap_d, simple_map%get_n_dest_points()) - - call mct_rearr_init(mapper%gsmap_s, mapper%gsmap_d, mct_communicator, mapper%rearr) - - call create_sMatp(mapper%sMatp, simple_map, mapper%gsmap_s, mapper%gsmap_d) - - end subroutine create_mapper - - - !----------------------------------------------------------------------- - subroutine clean_mapper(mapper) - ! - ! !DESCRIPTION: - ! Deallocate memory associated with a mapper. - ! - ! This currently only deallocates the memory used in all mappers, NOT the - ! cart3d-specific memory. - ! - ! This assumes that gsmaps were created specially for this mapper, as is done in the - ! convenience functions in this module (as opposed to having the mapper's gsmap - ! pointers simply pointing to existing gsmaps). - ! - ! !USES: - ! - ! !ARGUMENTS: - type(seq_map), intent(inout) :: mapper - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'clean_mapper' - !----------------------------------------------------------------------- - - call mct_rearr_clean(mapper%rearr) - call mct_sMatP_clean(mapper%sMatp) - - call mct_gsMap_clean(mapper%gsmap_s) - deallocate(mapper%gsmap_s) - call mct_gsMap_clean(mapper%gsmap_d) - deallocate(mapper%gsmap_d) - - end subroutine clean_mapper - - !----------------------------------------------------------------------- - subroutine create_gsmap(gsmap, npts) - ! - ! !DESCRIPTION: - ! Creates a simple, single-processor gsmap - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_gsMap), intent(out) :: gsmap - integer, intent(in) :: npts - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'create_gsmap' - !----------------------------------------------------------------------- - - call mct_gsMap_init(GSMap = gsmap, & - comp_id = mct_compid, & - ngseg = 1, & - gsize = npts, & - start = [1], & - length = [npts], & - pe_loc = [0]) - - end subroutine create_gsmap - - !----------------------------------------------------------------------- - subroutine create_sMatp(sMatp, simple_map, gsmap_s, gsmap_d) - ! - ! !DESCRIPTION: - ! Creates an sMatp object - ! - ! !USES: - ! - ! !ARGUMENTS: - type(mct_sMatp), intent(out) :: sMatp - class(simple_map_type), intent(in) :: simple_map - type(mct_gsMap), intent(in) :: gsmap_s - type(mct_gsMap), intent(in) :: gsmap_d - ! - ! !LOCAL VARIABLES: - integer :: n_elements ! number of elements in the sparse matrix - type(mct_sMat) :: sMati ! non-parallel sparse matrix - - ! The following pointers are needed because the MCT routines want pointer inputs - integer, pointer :: source_indices(:) - integer, pointer :: dest_indices(:) - real(r8), pointer :: matrix_elements(:) - - character(len=*), parameter :: subname = 'create_sMatp' - !----------------------------------------------------------------------- - - n_elements = simple_map%get_n_overlaps() - - call mct_sMat_init(sMati, & - nrows = simple_map%get_n_dest_points(), & - ncols = simple_map%get_n_source_points(), & - lsize = n_elements) - - allocate(source_indices(n_elements)) - source_indices = simple_map%get_source_indices() - call mct_sMat_ImpGColI(sMati, source_indices, n_elements) - deallocate(source_indices) - - allocate(dest_indices(n_elements)) - dest_indices = simple_map%get_dest_indices() - call mct_sMat_ImpGRowI(sMati, dest_indices, n_elements) - deallocate(dest_indices) - - allocate(matrix_elements(n_elements)) - matrix_elements = simple_map%get_overlap_weights() - call mct_sMat_ImpMatrix(sMati, matrix_elements, n_elements) - deallocate(matrix_elements) - - call mct_sMatP_Init(sMatP, sMati, gsmap_s, gsmap_d, 0, mct_communicator, gsmap_s%comp_id) - - call mct_sMat_Clean(sMati) - end subroutine create_sMatp - - -end module create_mapper_mod diff --git a/src/drivers/mct/unit_test/utils/mct_wrapper_mod.F90 b/src/drivers/mct/unit_test/utils/mct_wrapper_mod.F90 deleted file mode 100644 index c93d4924f75..00000000000 --- a/src/drivers/mct/unit_test/utils/mct_wrapper_mod.F90 +++ /dev/null @@ -1,66 +0,0 @@ -module mct_wrapper_mod - ! This module provides some variables and convenience functions for the sake of unit - ! tests that use mct. - ! - ! Any test that uses mct should call mct_init in its initialization, and mct_clean in - ! its teardown. - - implicit none - private - -#include - - public :: mct_init ! initialize data structures needed to use mct - public :: mct_clean ! clean up mct data structures that were set up by mct_init - - ! MPI communicator that can be used wherever mct routines expect a communicator - integer, parameter, public :: mct_communicator = MPI_COMM_WORLD - - ! value that can be used wherever mct routines expect a component ID - integer, parameter, public :: mct_compid = 1 - -contains - - !----------------------------------------------------------------------- - subroutine mct_init() - ! - ! !DESCRIPTION: - ! Initializes data structures needed to use mct. - ! - ! Expects that mpi_init has already been called. - ! - ! !USES: - use seq_comm_mct, only : seq_comm_init - ! - ! !ARGUMENTS: - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'mct_init' - !----------------------------------------------------------------------- - - call seq_comm_init(mct_communicator, mct_communicator, nmlfile = ' ') - end subroutine mct_init - - !----------------------------------------------------------------------- - subroutine mct_clean() - ! - ! !DESCRIPTION: - ! Cleans up mct data structures that were set up by mct_init. - ! - ! !USES: - use seq_comm_mct, only : seq_comm_clean - ! - ! !ARGUMENTS: - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'mct_clean' - !----------------------------------------------------------------------- - - call seq_comm_clean() - - end subroutine mct_clean - - -end module mct_wrapper_mod diff --git a/src/drivers/mct/unit_test/utils/simple_map_mod.F90 b/src/drivers/mct/unit_test/utils/simple_map_mod.F90 deleted file mode 100644 index 485dca44481..00000000000 --- a/src/drivers/mct/unit_test/utils/simple_map_mod.F90 +++ /dev/null @@ -1,326 +0,0 @@ -module simple_map_mod - - ! This module defines a class for holding data describing a mapping between two grids - -#include "shr_assert.h" - use shr_kind_mod, only : r8 => shr_kind_r8 - use shr_log_mod, only : errMsg => shr_log_errMsg - - implicit none - private - - - type, public :: simple_map_type - private - integer :: n_overlaps ! number of overlaps between the source & destination grid (size of sparse matrix) - integer, allocatable :: source_indices(:) - integer, allocatable :: dest_indices(:) - real(r8), allocatable :: overlap_weights(:) - contains - procedure, public :: get_n_overlaps ! get number of overlaps (size of sparse matrix) - procedure, public :: get_n_source_points ! get number of source points - procedure, public :: get_n_dest_points ! get number of destination points - procedure, public :: get_source_indices ! get source indices in the sparse matrix - procedure, public :: get_dest_indices ! get dest indices in the sparse matrix - procedure, public :: get_overlap_weights ! get overlap weights (the values in the sparse matrix) - - procedure, private :: check_okay ! check if the data in this object are valid - procedure, private :: check_for_duplicate_overlaps - procedure, private :: check_for_nonpositive_weights - end type simple_map_type - - interface simple_map_type - module procedure constructor - end interface simple_map_type - - ! Note: This could be written as a constructor, but instead is made a module-level - ! routine so that it can be called with a more meaningful name. - public :: create_simple_map_with_one_source ! create a simple_map_type instance with a single source cell - -contains - - ! ======================================================================== - ! Constructors and creation methods - ! ======================================================================== - - !----------------------------------------------------------------------- - function constructor(source_indices, dest_indices, overlap_weights) result(this) - ! - ! !DESCRIPTION: - ! Create a simple_map_type instance. - ! - ! The sizes of source_indices, dest_indices and overlap_weights must all be the same. - ! - ! !USES: - ! - ! !ARGUMENTS: - type(simple_map_type) :: this ! function result - integer, intent(in) :: source_indices(:) - integer, intent(in) :: dest_indices(:) - real(r8), intent(in) :: overlap_weights(:) - ! - ! !LOCAL VARIABLES: - integer :: n_overlaps - - character(len=*), parameter :: subname = 'constructor' - !----------------------------------------------------------------------- - - n_overlaps = size(overlap_weights) - call shr_assert(size(source_indices) == n_overlaps, file=__FILE__, line=__LINE__) - call shr_assert(size(dest_indices) == n_overlaps, file=__FILE__, line=__LINE__) - - this%n_overlaps = n_overlaps - this%source_indices = source_indices - this%dest_indices = dest_indices - this%overlap_weights = overlap_weights - - ! Perform some error-checking - call this%check_okay() - end function constructor - - !----------------------------------------------------------------------- - function create_simple_map_with_one_source(ndest) result(simple_map) - ! - ! !DESCRIPTION: - ! Create a simple_map_type instance with a single source cell. - ! - ! Assumes that all destination cells are fully contained within this single source cell. - ! - ! !USES: - ! - ! !ARGUMENTS: - type(simple_map_type) :: simple_map ! function return value - integer, intent(in) :: ndest ! number of destination cells - ! - ! !LOCAL VARIABLES: - integer :: dest_index - integer :: source_indices(ndest) - integer :: dest_indices(ndest) - real(r8) :: overlap_weights(ndest) - - character(len=*), parameter :: subname = 'create_simple_map_with_one_source' - !----------------------------------------------------------------------- - - source_indices(:) = 1 - dest_indices = [(dest_index, dest_index = 1, ndest)] - overlap_weights(:) = 1._r8 - simple_map = simple_map_type(source_indices=source_indices, dest_indices=dest_indices,& - overlap_weights=overlap_weights) - - end function create_simple_map_with_one_source - - - ! ======================================================================== - ! Class methods - ! ======================================================================== - - !----------------------------------------------------------------------- - function get_n_overlaps(this) result(n_overlaps) - ! - ! !DESCRIPTION: - ! Get number of overlaps (size of sparse matrix) - ! - ! !USES: - ! - ! !ARGUMENTS: - integer :: n_overlaps ! function result - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'get_n_overlaps' - !----------------------------------------------------------------------- - - n_overlaps = this%n_overlaps - end function get_n_overlaps - - !----------------------------------------------------------------------- - function get_n_source_points(this) result(n_source_points) - ! - ! !DESCRIPTION: - ! Get number of source points - ! - ! !USES: - ! - ! !ARGUMENTS: - integer :: n_source_points ! function result - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'get_n_source_points' - !----------------------------------------------------------------------- - - n_source_points = maxval(this%source_indices) - end function get_n_source_points - - !----------------------------------------------------------------------- - function get_n_dest_points(this) result(n_dest_points) - ! - ! !DESCRIPTION: - ! Get number of destination points - ! - ! !USES: - ! - ! !ARGUMENTS: - integer :: n_dest_points ! function result - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'get_n_dest_points' - !----------------------------------------------------------------------- - - n_dest_points = maxval(this%dest_indices) - end function get_n_dest_points - - !----------------------------------------------------------------------- - function get_source_indices(this) result(source_indices) - ! - ! !DESCRIPTION: - ! Get source indices in the sparse matrix - ! - ! !USES: - ! - ! !ARGUMENTS: - integer, allocatable, dimension(:) :: source_indices ! function result - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'get_source_indices' - !----------------------------------------------------------------------- - - source_indices = this%source_indices - end function get_source_indices - - !----------------------------------------------------------------------- - function get_dest_indices(this) result(dest_indices) - ! - ! !DESCRIPTION: - ! Get dest indices in the sparse matrix - ! - ! !USES: - ! - ! !ARGUMENTS: - integer, allocatable, dimension(:) :: dest_indices ! function result - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'get_dest_indices' - !----------------------------------------------------------------------- - - dest_indices = this%dest_indices - end function get_dest_indices - - !----------------------------------------------------------------------- - function get_overlap_weights(this) result(overlap_weights) - ! - ! !DESCRIPTION: - ! Get overlap weights (the values in the sparse matrix) - ! - ! !USES: - ! - ! !ARGUMENTS: - real(r8), allocatable, dimension(:) :: overlap_weights ! function result - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'get_overlap_weights' - !----------------------------------------------------------------------- - - overlap_weights = this%overlap_weights - end function get_overlap_weights - - !----------------------------------------------------------------------- - subroutine check_okay(this) - ! - ! !DESCRIPTION: - ! Makes sure that the data in this object are valid - ! - ! !USES: - ! - ! !ARGUMENTS: - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - - character(len=*), parameter :: subname = 'check_okay' - !----------------------------------------------------------------------- - - call this%check_for_duplicate_overlaps() - call this%check_for_nonpositive_weights() - end subroutine check_okay - - - - !----------------------------------------------------------------------- - subroutine check_for_duplicate_overlaps(this) - ! - ! !DESCRIPTION: - ! Confirms that there are not multiple overlaps with the same source and destination - ! indices. - ! - ! Aborts if any duplicates are found. - ! - ! !USES: - ! - ! !ARGUMENTS: - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - logical, allocatable :: overlap_found(:,:) - integer :: overlap_index - integer :: source_index - integer :: dest_index - - character(len=*), parameter :: subname = 'check_for_duplicate_overlaps' - !----------------------------------------------------------------------- - - allocate(overlap_found(this%get_n_source_points(), this%get_n_dest_points()), source=.false.) - - do overlap_index = 1, this%get_n_overlaps() - source_index = this%source_indices(overlap_index) - dest_index = this%dest_indices(overlap_index) - if (overlap_found(source_index, dest_index)) then - print *, subname, ' ERROR: duplicate found at: ', overlap_index, source_index, & - dest_index - stop - end if - overlap_found(source_index, dest_index) = .true. - end do - - end subroutine check_for_duplicate_overlaps - - !----------------------------------------------------------------------- - subroutine check_for_nonpositive_weights(this) - ! - ! !DESCRIPTION: - ! Confirms that all weights are positive. - ! - ! Aborts if any zero or negative weights are found. - ! - ! !USES: - ! - ! !ARGUMENTS: - class(simple_map_type), intent(in) :: this - ! - ! !LOCAL VARIABLES: - integer :: overlap_index - - character(len=*), parameter :: subname = 'check_for_nonpositive_weights' - !----------------------------------------------------------------------- - - do overlap_index = 1, this%get_n_overlaps() - if (this%overlap_weights(overlap_index) <= 0) then - print *, subname, ' ERROR: non-positive weight found at: ', overlap_index, & - this%overlap_weights(overlap_index) - stop - end if - end do - end subroutine check_for_nonpositive_weights - - -end module simple_map_mod diff --git a/src/drivers/moab/cime_config/buildexe b/src/drivers/moab/cime_config/buildexe deleted file mode 100755 index 686f6fb7098..00000000000 --- a/src/drivers/moab/cime_config/buildexe +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python - -""" -build model executable -""" - -import sys, os - -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..") -sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) - -from standard_script_setup import * -from CIME.buildlib import parse_input -from CIME.case import Case -from CIME.utils import expect, run_cmd - -logger = logging.getLogger(__name__) - -############################################################################### -def _main_func(): -############################################################################### - - caseroot, libroot, _ = parse_input(sys.argv) - - logger.info("Building a single executable version of target coupled model") - - with Case(caseroot) as case: - casetools = case.get_value("CASETOOLS") - cimeroot = case.get_value("CIMEROOT") - exeroot = case.get_value("EXEROOT") - gmake = case.get_value("GMAKE") - gmake_j = case.get_value("GMAKE_J") - model = case.get_value("MODEL") - num_esp = case.get_value("NUM_COMP_INST_ESP") - os.environ["PIO_VERSION"] = str(case.get_value("PIO_VERSION")) - - expect((num_esp is None) or (int(num_esp) == 1), "ESP component restricted to one instance") - - - with open('Filepath', 'w') as out: - out.write(os.path.join(caseroot, "SourceMods", "src.drv") + "\n") - out.write(os.path.join(cimeroot, "src", "drivers", "moab", "main") + "\n") - - # build model executable - - makefile = os.path.join(casetools, "Makefile") - exename = os.path.join(exeroot, model + ".exe") - - cmd = "%s exec_se -j %d EXEC_SE=%s MODEL=%s LIBROOT=%s -f %s "\ - % (gmake, gmake_j, exename, "driver", libroot, makefile) - - rc, out, _ = run_cmd(cmd, combine_output=True) - expect(rc==0,"Command %s failed rc=%d\nout=%s"%(cmd,rc,out)) - logger.info(out) - -############################################################################### - -if __name__ == "__main__": - _main_func() diff --git a/src/drivers/moab/cime_config/buildnml b/src/drivers/moab/cime_config/buildnml deleted file mode 120000 index dc822902f96..00000000000 --- a/src/drivers/moab/cime_config/buildnml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/buildnml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/config_archive.xml b/src/drivers/moab/cime_config/config_archive.xml deleted file mode 120000 index d2bd1148044..00000000000 --- a/src/drivers/moab/cime_config/config_archive.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/config_archive.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/config_component.xml b/src/drivers/moab/cime_config/config_component.xml deleted file mode 120000 index cfd45c480d2..00000000000 --- a/src/drivers/moab/cime_config/config_component.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/config_component.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/config_component_cesm.xml b/src/drivers/moab/cime_config/config_component_cesm.xml deleted file mode 120000 index 0c645819ab0..00000000000 --- a/src/drivers/moab/cime_config/config_component_cesm.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/config_component_cesm.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/config_component_e3sm.xml b/src/drivers/moab/cime_config/config_component_e3sm.xml deleted file mode 120000 index 0ba66ab9375..00000000000 --- a/src/drivers/moab/cime_config/config_component_e3sm.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/config_component_e3sm.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/config_compsets.xml b/src/drivers/moab/cime_config/config_compsets.xml deleted file mode 120000 index 00289a3511a..00000000000 --- a/src/drivers/moab/cime_config/config_compsets.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/config_compsets.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/config_pes.xml b/src/drivers/moab/cime_config/config_pes.xml deleted file mode 120000 index 9ec3e2f9897..00000000000 --- a/src/drivers/moab/cime_config/config_pes.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/config_pes.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/namelist_definition_drv.xml b/src/drivers/moab/cime_config/namelist_definition_drv.xml deleted file mode 120000 index 54663e10980..00000000000 --- a/src/drivers/moab/cime_config/namelist_definition_drv.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/namelist_definition_drv.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/namelist_definition_drv_flds.xml b/src/drivers/moab/cime_config/namelist_definition_drv_flds.xml deleted file mode 120000 index 638e3d69c6a..00000000000 --- a/src/drivers/moab/cime_config/namelist_definition_drv_flds.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/namelist_definition_drv_flds.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/namelist_definition_modelio.xml b/src/drivers/moab/cime_config/namelist_definition_modelio.xml deleted file mode 120000 index ebe7496cf5b..00000000000 --- a/src/drivers/moab/cime_config/namelist_definition_modelio.xml +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/namelist_definition_modelio.xml \ No newline at end of file diff --git a/src/drivers/moab/cime_config/user_nl_cpl b/src/drivers/moab/cime_config/user_nl_cpl deleted file mode 120000 index 0a10fe3a4c3..00000000000 --- a/src/drivers/moab/cime_config/user_nl_cpl +++ /dev/null @@ -1 +0,0 @@ -../../mct/cime_config/user_nl_cpl \ No newline at end of file diff --git a/src/drivers/moab/main/CMakeLists.txt b/src/drivers/moab/main/CMakeLists.txt deleted file mode 120000 index 2027145439b..00000000000 --- a/src/drivers/moab/main/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/CMakeLists.txt \ No newline at end of file diff --git a/src/drivers/moab/main/cime_comp_mod.F90 b/src/drivers/moab/main/cime_comp_mod.F90 deleted file mode 100644 index 8694d418825..00000000000 --- a/src/drivers/moab/main/cime_comp_mod.F90 +++ /dev/null @@ -1,4201 +0,0 @@ -module cime_comp_mod - - !------------------------------------------------------------------------------- - ! - ! Purpose: Main program for CIME cpl7. Can have different - ! land, sea-ice, and ocean models plugged in at compile-time. - ! These models can be either: stub, dead, data, or active - ! components or some combination of the above. - ! - ! stub -------- Do nothing. - ! dead -------- Send analytic data back. - ! data -------- Send data back interpolated from input files. - ! prognostic -- Prognostically simulate the given component. - ! - ! Method: Call appropriate initialization, run (time-stepping), and - ! finalization routines. - ! - !------------------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! share code & libs - !---------------------------------------------------------------------------- - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use shr_const_mod, only: shr_const_cday - use shr_file_mod, only: shr_file_setLogLevel, shr_file_setLogUnit - use shr_file_mod, only: shr_file_setIO, shr_file_getUnit, shr_file_freeUnit - use shr_scam_mod, only: shr_scam_checkSurface - use shr_map_mod, only: shr_map_setDopole - use shr_mpi_mod, only: shr_mpi_min, shr_mpi_max - use shr_mpi_mod, only: shr_mpi_bcast, shr_mpi_commrank, shr_mpi_commsize - use shr_mem_mod, only: shr_mem_init, shr_mem_getusage - use shr_cal_mod, only: shr_cal_date2ymd, shr_cal_ymd2date, shr_cal_advdateInt - use shr_cal_mod, only: shr_cal_ymds2rday_offset - use shr_orb_mod, only: shr_orb_params - use shr_frz_mod, only: shr_frz_freezetemp_init - use shr_reprosum_mod, only: shr_reprosum_setopts - use mct_mod ! mct_ wrappers for mct lib - use perf_mod - use ESMF - - !---------------------------------------------------------------------------- - ! component model interfaces (init, run, final methods) - !---------------------------------------------------------------------------- - - use atm_comp_mct , only: atm_init=>atm_init_mct, atm_run=>atm_run_mct, atm_final=>atm_final_mct - use lnd_comp_mct , only: lnd_init=>lnd_init_mct, lnd_run=>lnd_run_mct, lnd_final=>lnd_final_mct - use ocn_comp_mct , only: ocn_init=>ocn_init_mct, ocn_run=>ocn_run_mct, ocn_final=>ocn_final_mct - use ice_comp_mct , only: ice_init=>ice_init_mct, ice_run=>ice_run_mct, ice_final=>ice_final_mct - use glc_comp_mct , only: glc_init=>glc_init_mct, glc_run=>glc_run_mct, glc_final=>glc_final_mct - use wav_comp_mct , only: wav_init=>wav_init_mct, wav_run=>wav_run_mct, wav_final=>wav_final_mct - use rof_comp_mct , only: rof_init=>rof_init_mct, rof_run=>rof_run_mct, rof_final=>rof_final_mct - use esp_comp_mct , only: esp_init=>esp_init_mct, esp_run=>esp_run_mct, esp_final=>esp_final_mct - - !---------------------------------------------------------------------------- - ! cpl7 modules - !---------------------------------------------------------------------------- - - ! mpi comm data & routines, plus logunit and loglevel - use seq_comm_mct, only: CPLID, GLOID, logunit, loglevel - use seq_comm_mct, only: ATMID, LNDID, OCNID, ICEID, GLCID, ROFID, WAVID, ESPID - use seq_comm_mct, only: ALLATMID,ALLLNDID,ALLOCNID,ALLICEID,ALLGLCID,ALLROFID,ALLWAVID,ALLESPID - use seq_comm_mct, only: CPLALLATMID,CPLALLLNDID,CPLALLOCNID,CPLALLICEID - use seq_comm_mct, only: CPLALLGLCID,CPLALLROFID,CPLALLWAVID,CPLALLESPID - use seq_comm_mct, only: CPLATMID,CPLLNDID,CPLOCNID,CPLICEID,CPLGLCID,CPLROFID,CPLWAVID,CPLESPID - use seq_comm_mct, only: num_inst_atm, num_inst_lnd, num_inst_rof - use seq_comm_mct, only: num_inst_ocn, num_inst_ice, num_inst_glc - use seq_comm_mct, only: num_inst_wav, num_inst_esp - use seq_comm_mct, only: num_inst_xao, num_inst_frc, num_inst_phys - use seq_comm_mct, only: num_inst_total, num_inst_max - use seq_comm_mct, only: seq_comm_iamin, seq_comm_name, seq_comm_namelen - use seq_comm_mct, only: seq_comm_init, seq_comm_setnthreads, seq_comm_getnthreads - use seq_comm_mct, only: seq_comm_getinfo => seq_comm_setptrs - use seq_comm_mct, only: cpl_inst_tag - - ! clock & alarm routines and variables - use seq_timemgr_mod, only: seq_timemgr_type - use seq_timemgr_mod, only: seq_timemgr_clockInit - use seq_timemgr_mod, only: seq_timemgr_clockAdvance - use seq_timemgr_mod, only: seq_timemgr_clockPrint - use seq_timemgr_mod, only: seq_timemgr_EClockGetData - use seq_timemgr_mod, only: seq_timemgr_alarmIsOn - use seq_timemgr_mod, only: seq_timemgr_histavg_type - use seq_timemgr_mod, only: seq_timemgr_type_never - use seq_timemgr_mod, only: seq_timemgr_alarm_restart - use seq_timemgr_mod, only: seq_timemgr_alarm_stop - use seq_timemgr_mod, only: seq_timemgr_alarm_datestop - use seq_timemgr_mod, only: seq_timemgr_alarm_history - use seq_timemgr_mod, only: seq_timemgr_alarm_atmrun - use seq_timemgr_mod, only: seq_timemgr_alarm_lndrun - use seq_timemgr_mod, only: seq_timemgr_alarm_ocnrun - use seq_timemgr_mod, only: seq_timemgr_alarm_icerun - use seq_timemgr_mod, only: seq_timemgr_alarm_glcrun - use seq_timemgr_mod, only: seq_timemgr_alarm_glcrun_avg - use seq_timemgr_mod, only: seq_timemgr_alarm_ocnnext - use seq_timemgr_mod, only: seq_timemgr_alarm_tprof - use seq_timemgr_mod, only: seq_timemgr_alarm_histavg - use seq_timemgr_mod, only: seq_timemgr_alarm_rofrun - use seq_timemgr_mod, only: seq_timemgr_alarm_wavrun - use seq_timemgr_mod, only: seq_timemgr_alarm_esprun - use seq_timemgr_mod, only: seq_timemgr_alarm_barrier - use seq_timemgr_mod, only: seq_timemgr_alarm_pause - use seq_timemgr_mod, only: seq_timemgr_pause_active - use seq_timemgr_mod, only: seq_timemgr_pause_component_active - use seq_timemgr_mod, only: seq_timemgr_pause_component_index - - ! "infodata" gathers various control flags into one datatype - use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData - use seq_infodata_mod, only: seq_infodata_init, seq_infodata_exchange - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_orb_variable_year - use seq_infodata_mod, only: seq_infodata_print, seq_infodata_init2 - - ! domain related routines - use seq_domain_mct, only : seq_domain_check - - ! history file routines - use seq_hist_mod, only : seq_hist_write, seq_hist_writeavg, seq_hist_writeaux - - ! restart file routines - use seq_rest_mod, only : seq_rest_read, seq_rest_write - - ! flux calc routines - use seq_flux_mct, only: seq_flux_init_mct, seq_flux_initexch_mct, seq_flux_ocnalb_mct - use seq_flux_mct, only: seq_flux_atmocn_mct, seq_flux_atmocnexch_mct - - ! domain fraction routines - use seq_frac_mct, only : seq_frac_init, seq_frac_set - - ! i/o subroutines - use seq_io_mod, only : seq_io_cpl_init - - ! rearrange type routines - use cplcomp_exchange_mod, only: seq_mctext_decomp - - ! diagnostic routines - use seq_diag_mct, only : seq_diag_zero_mct , seq_diag_avect_mct, seq_diag_lnd_mct - use seq_diag_mct, only : seq_diag_rof_mct , seq_diag_ocn_mct , seq_diag_atm_mct - use seq_diag_mct, only : seq_diag_ice_mct , seq_diag_accum_mct, seq_diag_print_mct - - ! list of fields transferred between components - use seq_flds_mod, only : seq_flds_a2x_fluxes, seq_flds_x2a_fluxes - use seq_flds_mod, only : seq_flds_i2x_fluxes, seq_flds_x2i_fluxes - use seq_flds_mod, only : seq_flds_l2x_fluxes, seq_flds_x2l_fluxes - use seq_flds_mod, only : seq_flds_o2x_fluxes, seq_flds_x2o_fluxes - use seq_flds_mod, only : seq_flds_g2x_fluxes, seq_flds_x2g_fluxes - use seq_flds_mod, only : seq_flds_w2x_fluxes, seq_flds_x2w_fluxes - use seq_flds_mod, only : seq_flds_r2x_fluxes, seq_flds_x2r_fluxes - use seq_flds_mod, only : seq_flds_set - - ! component type and accessor functions - use component_type_mod , only: component_get_iamin_compid, component_get_suffix - use component_type_mod , only: component_get_name, component_get_c2x_cx - use component_type_mod , only: atm, lnd, ice, ocn, rof, glc, wav, esp - use component_mod , only: component_init_pre - use component_mod , only: component_init_cc, component_init_cx, component_run, component_final - use component_mod , only: component_init_areacor, component_init_aream - use component_mod , only: component_exch, component_diag - - ! prep routines (includes mapping routines between components and merging routines) - use prep_lnd_mod - use prep_ice_mod - use prep_wav_mod - use prep_rof_mod - use prep_glc_mod - use prep_ocn_mod - use prep_atm_mod - use prep_aoflux_mod - - !--- mapping routines --- - use seq_map_type_mod - use seq_map_mod ! generic mapping - - ! --- timing routines --- - use t_drv_timers_mod - - implicit none - - private - - public cime_pre_init1, cime_pre_init2, cime_init, cime_run, cime_final - public timing_dir, mpicom_GLOID - -#include - - !---------------------------------------------------------------------------- - ! temporary variables - !---------------------------------------------------------------------------- - - !- from prep routines (arrays of instances) - type(mct_aVect) , pointer :: a2x_ox(:) => null() - type(mct_aVect) , pointer :: o2x_ax(:) => null() - type(mct_aVect) , pointer :: xao_ox(:) => null() - type(mct_aVect) , pointer :: xao_ax(:) => null() - - !- from component type (single instance inside array of components) - type(mct_aVect) , pointer :: o2x_ox => null() - type(mct_aVect) , pointer :: a2x_ax => null() - - character(len=CL) :: suffix - logical :: iamin_id - character(len=seq_comm_namelen) :: compname - - !---------------------------------------------------------------------------- - ! domains & related - !---------------------------------------------------------------------------- - - !--- domain fractions (only defined on cpl pes) --- - type(mct_aVect) , pointer :: fractions_ax(:) ! Fractions on atm grid, cpl processes - type(mct_aVect) , pointer :: fractions_lx(:) ! Fractions on lnd grid, cpl processes - type(mct_aVect) , pointer :: fractions_ix(:) ! Fractions on ice grid, cpl processes - type(mct_aVect) , pointer :: fractions_ox(:) ! Fractions on ocn grid, cpl processes - type(mct_aVect) , pointer :: fractions_gx(:) ! Fractions on glc grid, cpl processes - type(mct_aVect) , pointer :: fractions_rx(:) ! Fractions on rof grid, cpl processes - type(mct_aVect) , pointer :: fractions_wx(:) ! Fractions on wav grid, cpl processes - - !--- domain equivalent 2d grid size --- - integer :: atm_nx, atm_ny ! nx, ny of 2d grid, if known - integer :: lnd_nx, lnd_ny - integer :: ice_nx, ice_ny - integer :: ocn_nx, ocn_ny - integer :: rof_nx, rof_ny - integer :: glc_nx, glc_ny - integer :: wav_nx, wav_ny - - !---------------------------------------------------------------------------- - ! Infodata: inter-model control flags, domain info - !---------------------------------------------------------------------------- - - type (seq_infodata_type), target :: infodata ! single instance for cpl and all comps - - !---------------------------------------------------------------------------- - ! time management - !---------------------------------------------------------------------------- - - type (seq_timemgr_type), SAVE :: seq_SyncClock ! array of all clocks & alarm - type (ESMF_Clock), target :: EClock_d ! driver clock - type (ESMF_Clock), target :: EClock_a ! atmosphere clock - type (ESMF_Clock), target :: EClock_l ! land clock - type (ESMF_Clock), target :: EClock_o ! ocean clock - type (ESMF_Clock), target :: EClock_i ! ice clock - type (ESMF_Clock), target :: EClock_g ! glc clock - type (ESMF_Clock), target :: EClock_r ! rof clock - type (ESMF_Clock), target :: EClock_w ! wav clock - type (ESMF_Clock), target :: EClock_e ! esp clock - - logical :: restart_alarm ! restart alarm - logical :: history_alarm ! history alarm - logical :: histavg_alarm ! history alarm - logical :: stop_alarm ! stop alarm - logical :: atmrun_alarm ! atm run alarm - logical :: lndrun_alarm ! lnd run alarm - logical :: icerun_alarm ! ice run alarm - logical :: ocnrun_alarm ! ocn run alarm - logical :: ocnnext_alarm ! ocn run alarm on next timestep - logical :: glcrun_alarm ! glc run alarm - logical :: glcrun_avg_alarm ! glc run averaging alarm - logical :: rofrun_alarm ! rof run alarm - logical :: wavrun_alarm ! wav run alarm - logical :: esprun_alarm ! esp run alarm - logical :: tprof_alarm ! timing profile alarm - logical :: barrier_alarm ! barrier alarm - logical :: t1hr_alarm ! alarm every hour - logical :: t2hr_alarm ! alarm every two hours - logical :: t3hr_alarm ! alarm every three hours - logical :: t6hr_alarm ! alarm every six hours - logical :: t12hr_alarm ! alarm every twelve hours - logical :: t24hr_alarm ! alarm every twentyfour hours - logical :: t1yr_alarm ! alarm every year, at start of year - logical :: pause_alarm ! pause alarm - integer :: drv_index ! seq_timemgr index for driver - - real(r8) :: days_per_year = 365.0 ! days per year - - integer :: dtime ! dt of one coupling interval - integer :: ncpl ! number of coupling intervals per day - integer :: ymd ! Current date (YYYYMMDD) - integer :: year ! Current date (YYYY) - integer :: month ! Current date (MM) - integer :: day ! Current date (DD) - integer :: tod ! Current time of day (seconds) - integer :: ymdtmp ! temporary date (YYYYMMDD) - integer :: todtmp ! temporary time of day (seconds) - character(CL) :: orb_mode ! orbital mode - character(CS) :: tfreeze_option ! Freezing point calculation - integer :: orb_iyear ! orbital year - integer :: orb_iyear_align ! associated with model year - integer :: orb_cyear ! orbital year for current orbital computation - integer :: orb_nyear ! orbital year associated with currrent model year - real(r8) :: orb_eccen ! orbital eccentricity - real(r8) :: orb_obliq ! obliquity in degrees - real(r8) :: orb_mvelp ! moving vernal equinox long - real(r8) :: orb_obliqr ! Earths obliquity in rad - real(r8) :: orb_lambm0 ! Mean long of perihelion at vernal equinox (radians) - real(r8) :: orb_mvelpp ! moving vernal equinox long - real(r8) :: wall_time_limit ! wall time limit in hours - real(r8) :: wall_time ! current wall time used - character(CS) :: force_stop_at ! force stop at next (month, day, etc) - logical :: force_stop ! force the model to stop - integer :: force_stop_ymd ! force stop ymd - integer :: force_stop_tod ! force stop tod - - !--- for documenting speed of the model --- - character(8) :: dstr ! date string - character(10) :: tstr ! time string - integer :: begStep, endStep ! Begining and ending step number - character(CL) :: calendar ! calendar name - real(r8) :: simDays ! Number of simulated days - real(r8) :: SYPD ! Simulated years per day - real(r8) :: Time_begin ! Start time - real(r8) :: Time_end ! Ending time - real(r8) :: Time_bstep ! Start time - real(r8) :: Time_estep ! Ending time - real(r8) :: time_brun ! Start time - real(r8) :: time_erun ! Ending time - real(r8) :: cktime ! delta time - real(r8) :: cktime_acc(10) ! cktime accumulator array 1 = all, 2 = atm, etc - integer :: cktime_cnt(10) ! cktime counter array - real(r8) :: max_cplstep_time - character(CL) :: timing_file ! Local path to tprof filename - character(CL) :: timing_dir ! timing directory - character(CL) :: tchkpt_dir ! timing checkpoint directory - - !---------------------------------------------------------------------------- - ! control flags - !---------------------------------------------------------------------------- - - logical :: atm_present ! .true. => atm is present - logical :: lnd_present ! .true. => land is present - logical :: ice_present ! .true. => ice is present - logical :: ocn_present ! .true. => ocn is present - logical :: glc_present ! .true. => glc is present - logical :: glclnd_present ! .true. => glc is computing land coupling - logical :: glcocn_present ! .true. => glc is computing ocean runoff - logical :: glcice_present ! .true. => glc is computing icebergs - logical :: rofice_present ! .true. => rof is computing icebergs - logical :: rof_present ! .true. => rof is present - logical :: flood_present ! .true. => rof is computing flood - logical :: wav_present ! .true. => wav is present - logical :: esp_present ! .true. => esp is present - - logical :: atm_prognostic ! .true. => atm comp expects input - logical :: lnd_prognostic ! .true. => lnd comp expects input - logical :: ice_prognostic ! .true. => ice comp expects input - logical :: iceberg_prognostic ! .true. => ice comp can handle iceberg input - logical :: ocn_prognostic ! .true. => ocn comp expects input - logical :: ocnrof_prognostic ! .true. => ocn comp expects runoff input - logical :: glc_prognostic ! .true. => glc comp expects input - logical :: rof_prognostic ! .true. => rof comp expects input - logical :: wav_prognostic ! .true. => wav comp expects input - logical :: esp_prognostic ! .true. => esp comp expects input - - logical :: atm_c2_lnd ! .true. => atm to lnd coupling on - logical :: atm_c2_ocn ! .true. => atm to ocn coupling on - logical :: atm_c2_ice ! .true. => atm to ice coupling on - logical :: atm_c2_wav ! .true. => atm to wav coupling on - logical :: lnd_c2_atm ! .true. => lnd to atm coupling on - logical :: lnd_c2_rof ! .true. => lnd to rof coupling on - logical :: lnd_c2_glc ! .true. => lnd to glc coupling on - logical :: ocn_c2_atm ! .true. => ocn to atm coupling on - logical :: ocn_c2_ice ! .true. => ocn to ice coupling on - logical :: ocn_c2_wav ! .true. => ocn to wav coupling on - logical :: ice_c2_atm ! .true. => ice to atm coupling on - logical :: ice_c2_ocn ! .true. => ice to ocn coupling on - logical :: ice_c2_wav ! .true. => ice to wav coupling on - logical :: rof_c2_lnd ! .true. => rof to lnd coupling on - logical :: rof_c2_ocn ! .true. => rof to ocn coupling on - logical :: rof_c2_ice ! .true. => rof to ice coupling on - logical :: glc_c2_lnd ! .true. => glc to lnd coupling on - logical :: glc_c2_ocn ! .true. => glc to ocn coupling on - logical :: glc_c2_ice ! .true. => glc to ice coupling on - logical :: wav_c2_ocn ! .true. => wav to ocn coupling on - - logical :: dead_comps ! .true. => dead components - logical :: esmf_map_flag ! .true. => use esmf for mapping - - logical :: areafact_samegrid ! areafact samegrid flag - logical :: single_column ! scm mode logical - real(r8) :: scmlon ! single column lon - real(r8) :: scmlat ! single column lat - logical :: aqua_planet ! aqua planet mode - real(r8) :: nextsw_cday ! radiation control - logical :: atm_aero ! atm provides aerosol data - - character(CL) :: cpl_seq_option ! coupler sequencing option - logical :: skip_ocean_run ! skip the ocean model first pass - logical :: cpl2ocn_first ! use to call initial cpl2ocn timer - logical :: run_barriers ! barrier the component run calls - - character(CS) :: aoflux_grid ! grid for a/o flux calc: atm xor ocn - character(CS) :: vect_map ! vector mapping type - - character(CL) :: atm_gnam ! atm grid - character(CL) :: lnd_gnam ! lnd grid - character(CL) :: ocn_gnam ! ocn grid - character(CL) :: ice_gnam ! ice grid - character(CL) :: rof_gnam ! rof grid - character(CL) :: glc_gnam ! glc grid - character(CL) :: wav_gnam ! wav grid - - logical :: samegrid_ao ! samegrid atm and ocean - logical :: samegrid_al ! samegrid atm and land - logical :: samegrid_lr ! samegrid land and rof - logical :: samegrid_oi ! samegrid ocean and ice - logical :: samegrid_ro ! samegrid runoff and ocean - logical :: samegrid_aw ! samegrid atm and wave - logical :: samegrid_ow ! samegrid ocean and wave - logical :: samegrid_lg ! samegrid glc and land - logical :: samegrid_og ! samegrid glc and ocean - logical :: samegrid_ig ! samegrid glc and ice - logical :: samegrid_alo ! samegrid atm, lnd, ocean - - logical :: read_restart ! local read restart flag - character(CL) :: rest_file ! restart file path + filename - - logical :: shr_map_dopole ! logical for dopole in shr_map_mod - logical :: domain_check ! .true. => check consistency of domains - logical :: reprosum_use_ddpdd ! setup reprosum, use ddpdd - logical :: reprosum_allow_infnan ! setup reprosum, allow INF and NaN in summands - real(r8) :: reprosum_diffmax ! setup reprosum, set rel_diff_max - logical :: reprosum_recompute ! setup reprosum, recompute if tolerance exceeded - - logical :: output_perf = .false. ! require timing data output for this pe - logical :: in_first_day = .true. ! currently simulating first day - - !--- history & budgets --- - logical :: do_budgets ! heat/water budgets on - logical :: do_histinit ! initial hist file - logical :: do_histavg ! histavg on or off - logical :: do_hist_r2x ! create aux files: r2x - logical :: do_hist_l2x ! create aux files: l2x - logical :: do_hist_a2x24hr ! create aux files: a2x - logical :: do_hist_l2x1yrg ! create aux files: l2x 1yr glc forcings - logical :: do_hist_a2x ! create aux files: a2x - logical :: do_hist_a2x3hrp ! create aux files: a2x 3hr precip - logical :: do_hist_a2x3hr ! create aux files: a2x 3hr states - logical :: do_hist_a2x1hri ! create aux files: a2x 1hr instantaneous - logical :: do_hist_a2x1hr ! create aux files: a2x 1hr - integer :: budget_inst ! instantaneous budget flag - integer :: budget_daily ! daily budget flag - integer :: budget_month ! monthly budget flag - integer :: budget_ann ! annual budget flag - integer :: budget_ltann ! long term budget flag for end of year writing - integer :: budget_ltend ! long term budget flag for end of run writing - - character(CL) :: hist_a2x_flds = & - 'Faxa_swndr:Faxa_swvdr:Faxa_swndf:Faxa_swvdf' - - character(CL) :: hist_a2x3hrp_flds = & - 'Faxa_rainc:Faxa_rainl:Faxa_snowc:Faxa_snowl' - - character(CL) :: hist_a2x24hr_flds = & - 'Faxa_bcphiwet:Faxa_bcphodry:Faxa_bcphidry:Faxa_ocphiwet:Faxa_ocphidry:& - &Faxa_ocphodry:Faxa_dstwet1:Faxa_dstdry1:Faxa_dstwet2:Faxa_dstdry2:Faxa_dstwet3:& - &Faxa_dstdry3:Faxa_dstwet4:Faxa_dstdry4:Sa_co2prog:Sa_co2diag' - - character(CL) :: hist_a2x1hri_flds = & - 'Faxa_swndr:Faxa_swvdr:Faxa_swndf:Faxa_swvdf' - - character(CL) :: hist_a2x1hr_flds = & - 'Sa_u:Sa_v' - - character(CL) :: hist_a2x3hr_flds = & - 'Sa_z:Sa_topo:Sa_u:Sa_v:Sa_tbot:Sa_ptem:Sa_shum:Sa_dens:Sa_pbot:Sa_pslv:Faxa_lwdn:& - &Faxa_rainc:Faxa_rainl:Faxa_snowc:Faxa_snowl:& - &Faxa_swndr:Faxa_swvdr:Faxa_swndf:Faxa_swvdf:& - &Sa_co2diag:Sa_co2prog' - - ! --- other --- - - integer :: ocnrun_count ! number of times ocn run alarm went on - logical :: exists ! true if file exists - integer :: ierr ! MPI error return - - character(*), parameter :: NLFileName = "drv_in" ! input namelist filename - - integer :: info_debug = 0 ! local info_debug level - - !---------------------------------------------------------------------------- - ! memory monitoring - !---------------------------------------------------------------------------- - real(r8) :: msize,msize0,msize1 ! memory size (high water) - real(r8) :: mrss ,mrss0 ,mrss1 ! resident size (current memory use) - - !---------------------------------------------------------------------------- - ! threading control - !---------------------------------------------------------------------------- - integer :: nthreads_GLOID ! OMP global number of threads - integer :: nthreads_CPLID ! OMP cpl number of threads - integer :: nthreads_ATMID ! OMP atm number of threads - integer :: nthreads_LNDID ! OMP lnd number of threads - integer :: nthreads_ICEID ! OMP ice number of threads - integer :: nthreads_OCNID ! OMP ocn number of threads - integer :: nthreads_GLCID ! OMP glc number of threads - integer :: nthreads_ROFID ! OMP glc number of threads - integer :: nthreads_WAVID ! OMP wav number of threads - integer :: nthreads_ESPID ! OMP esp number of threads - - integer :: pethreads_GLOID ! OMP number of threads per task - - logical :: drv_threading ! driver threading control - - !---------------------------------------------------------------------------- - ! communicator groups and related - !---------------------------------------------------------------------------- - integer :: global_comm - integer :: mpicom_GLOID ! MPI global communicator - integer :: mpicom_CPLID ! MPI cpl communicator - integer :: mpicom_OCNID ! MPI ocn communicator for ensemble member 1 - - integer :: mpicom_CPLALLATMID ! MPI comm for CPLALLATMID - integer :: mpicom_CPLALLLNDID ! MPI comm for CPLALLLNDID - integer :: mpicom_CPLALLICEID ! MPI comm for CPLALLICEID - integer :: mpicom_CPLALLOCNID ! MPI comm for CPLALLOCNID - integer :: mpicom_CPLALLGLCID ! MPI comm for CPLALLGLCID - integer :: mpicom_CPLALLROFID ! MPI comm for CPLALLROFID - integer :: mpicom_CPLALLWAVID ! MPI comm for CPLALLWAVID - - integer :: iam_GLOID ! pe number in global id - logical :: iamin_CPLID ! pe associated with CPLID - logical :: iamroot_GLOID ! GLOID masterproc - logical :: iamroot_CPLID ! CPLID masterproc - - logical :: iamin_CPLALLATMID ! pe associated with CPLALLATMID - logical :: iamin_CPLALLLNDID ! pe associated with CPLALLLNDID - logical :: iamin_CPLALLICEID ! pe associated with CPLALLICEID - logical :: iamin_CPLALLOCNID ! pe associated with CPLALLOCNID - logical :: iamin_CPLALLGLCID ! pe associated with CPLALLGLCID - logical :: iamin_CPLALLROFID ! pe associated with CPLALLROFID - logical :: iamin_CPLALLWAVID ! pe associated with CPLALLWAVID - - - !---------------------------------------------------------------------------- - ! complist: list of comps on this pe - !---------------------------------------------------------------------------- - - ! allow enough room for names of all physical components + coupler, - ! where each string can be up to (max_inst_name_len+1) characters - ! long (+1 allows for a space before each name) - character(len=(seq_comm_namelen+1)*(num_inst_phys+1)) :: complist - - !---------------------------------------------------------------------------- - ! comp_num_: unique component number for each component type - !---------------------------------------------------------------------------- - integer, parameter :: comp_num_atm = 1 - integer, parameter :: comp_num_lnd = 2 - integer, parameter :: comp_num_ice = 3 - integer, parameter :: comp_num_ocn = 4 - integer, parameter :: comp_num_glc = 5 - integer, parameter :: comp_num_rof = 6 - integer, parameter :: comp_num_wav = 7 - integer, parameter :: comp_num_esp = 8 - - !---------------------------------------------------------------------------- - ! misc - !---------------------------------------------------------------------------- - - integer, parameter :: ens1=1 ! use first instance of ensemble only - integer, parameter :: fix1=1 ! temporary hard-coding to first ensemble, needs to be fixed - integer :: eai, eli, eoi, eii, egi, eri, ewi, eei, exi, efi ! component instance counters - - !---------------------------------------------------------------------------- - ! formats - !---------------------------------------------------------------------------- - character(*), parameter :: subname = '(seq_mct_drv)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - character(*), parameter :: F0L = "('"//subname//" : ', A, L6 )" - character(*), parameter :: F01 = "('"//subname//" : ', A, 2i8, 3x, A )" - character(*), parameter :: F0R = "('"//subname//" : ', A, 2g23.15 )" - character(*), parameter :: FormatA = '(A,": =============== ", A44, " ===============")' - character(*), parameter :: FormatD = '(A,": =============== ", A20,I10.8,I8,8x, " ===============")' - character(*), parameter :: FormatR = '(A,": =============== ", A31,F12.3,1x, " ===============")' - character(*), parameter :: FormatQ = '(A,": =============== ", A20,2F10.2,4x," ===============")' - !=============================================================================== -contains - !=============================================================================== - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_pre_init1() - use shr_pio_mod, only : shr_pio_init1, shr_pio_init2 - use seq_comm_mct, only: num_inst_driver - !---------------------------------------------------------- - !| Initialize MCT and MPI communicators and IO - !---------------------------------------------------------- - - integer, dimension(num_inst_total) :: comp_id, comp_comm, comp_comm_iam - logical :: comp_iamin(num_inst_total) - character(len=seq_comm_namelen) :: comp_name(num_inst_total) - integer :: it - integer :: driver_id - integer :: driver_comm - - call mpi_init(ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_init') - call mpi_comm_dup(MPI_COMM_WORLD, global_comm, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_dup') - - comp_comm = MPI_COMM_NULL - time_brun = mpi_wtime() - - !--- Initialize multiple driver instances, if requested --- - call cime_cpl_init(global_comm, driver_comm, num_inst_driver, driver_id) - - call shr_pio_init1(num_inst_total,NLFileName, driver_comm) - ! - ! If pio_async_interface is true Global_comm is MPI_COMM_NULL on the servernodes - ! and server nodes do not return from shr_pio_init2 - ! - ! if (Global_comm /= MPI_COMM_NULL) then - - if (num_inst_driver > 1) then - call seq_comm_init(global_comm, driver_comm, NLFileName, drv_comm_ID=driver_id) - write(cpl_inst_tag,'("_",i4.4)') driver_id - else - call seq_comm_init(global_comm, driver_comm, NLFileName) - cpl_inst_tag = '' - end if - - !--- set task based threading counts --- - call seq_comm_getinfo(GLOID,pethreads=pethreads_GLOID,iam=iam_GLOID) - call seq_comm_setnthreads(pethreads_GLOID) - - !--- get some general data --- - it=1 - call seq_comm_getinfo(GLOID,mpicom=mpicom_GLOID,& - iamroot=iamroot_GLOID,nthreads=nthreads_GLOID) - if (iamroot_GLOID) output_perf = .true. - - call seq_comm_getinfo(CPLID,mpicom=mpicom_CPLID,& - iamroot=iamroot_CPLID,nthreads=nthreads_CPLID,& - iam=comp_comm_iam(it)) - if (iamroot_CPLID) output_perf = .true. - - if (iamin_CPLID) complist = trim(complist)//' cpl' - - comp_id(it) = CPLID - comp_comm(it) = mpicom_CPLID - iamin_CPLID = seq_comm_iamin(CPLID) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - - do eai = 1,num_inst_atm - it=it+1 - comp_id(it) = ATMID(eai) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(ATMID(eai), mpicom=comp_comm(it), & - nthreads=nthreads_ATMID, iam=comp_comm_iam(it)) - if (seq_comm_iamin(ATMID(eai))) then - complist = trim(complist)//' '//trim(seq_comm_name(ATMID(eai))) - endif - if (seq_comm_iamroot(ATMID(eai))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLATMID, mpicom=mpicom_CPLALLATMID) - iamin_CPLALLATMID = seq_comm_iamin(CPLALLATMID) - - do eli = 1,num_inst_lnd - it=it+1 - comp_id(it) = LNDID(eli) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(LNDID(eli), mpicom=comp_comm(it), & - nthreads=nthreads_LNDID, iam=comp_comm_iam(it)) - if (seq_comm_iamin(LNDID(eli))) then - complist = trim(complist)//' '//trim(seq_comm_name(LNDID(eli))) - endif - if (seq_comm_iamroot(LNDID(eli))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLLNDID, mpicom=mpicom_CPLALLLNDID) - iamin_CPLALLLNDID = seq_comm_iamin(CPLALLLNDID) - - do eoi = 1,num_inst_ocn - it=it+1 - comp_id(it) = OCNID(eoi) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(OCNID(eoi), mpicom=comp_comm(it), & - nthreads=nthreads_OCNID, iam=comp_comm_iam(it)) - if (seq_comm_iamin (OCNID(eoi))) then - complist = trim(complist)//' '//trim(seq_comm_name(OCNID(eoi))) - endif - if (seq_comm_iamroot(OCNID(eoi))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLOCNID, mpicom=mpicom_CPLALLOCNID) - iamin_CPLALLOCNID = seq_comm_iamin(CPLALLOCNID) - - do eii = 1,num_inst_ice - it=it+1 - comp_id(it) = ICEID(eii) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(ICEID(eii), mpicom=comp_comm(it), & - nthreads=nthreads_ICEID, iam=comp_comm_iam(it)) - if (seq_comm_iamin (ICEID(eii))) then - complist = trim(complist)//' '//trim(seq_comm_name(ICEID(eii))) - endif - if (seq_comm_iamroot(ICEID(eii))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLICEID, mpicom=mpicom_CPLALLICEID) - iamin_CPLALLICEID = seq_comm_iamin(CPLALLICEID) - - do egi = 1,num_inst_glc - it=it+1 - comp_id(it) = GLCID(egi) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(GLCID(egi), mpicom=comp_comm(it), nthreads=nthreads_GLCID, iam=comp_comm_iam(it)) - if (seq_comm_iamin (GLCID(egi))) then - complist = trim(complist)//' '//trim(seq_comm_name(GLCID(egi))) - endif - if (seq_comm_iamroot(GLCID(egi))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLGLCID, mpicom=mpicom_CPLALLGLCID) - iamin_CPLALLGLCID = seq_comm_iamin(CPLALLGLCID) - - do eri = 1,num_inst_rof - it=it+1 - comp_id(it) = ROFID(eri) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(ROFID(eri), mpicom=comp_comm(it), & - nthreads=nthreads_ROFID, iam=comp_comm_iam(it)) - if (seq_comm_iamin(ROFID(eri))) then - complist = trim(complist)//' '//trim( seq_comm_name(ROFID(eri))) - endif - if (seq_comm_iamroot(ROFID(eri))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLROFID, mpicom=mpicom_CPLALLROFID) - iamin_CPLALLROFID = seq_comm_iamin(CPLALLROFID) - - do ewi = 1,num_inst_wav - it=it+1 - comp_id(it) = WAVID(ewi) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(WAVID(ewi), mpicom=comp_comm(it), & - nthreads=nthreads_WAVID, iam=comp_comm_iam(it)) - if (seq_comm_iamin(WAVID(ewi))) then - complist = trim(complist)//' '//trim(seq_comm_name(WAVID(ewi))) - endif - if (seq_comm_iamroot(WAVID(ewi))) output_perf = .true. - enddo - call seq_comm_getinfo(CPLALLWAVID, mpicom=mpicom_CPLALLWAVID) - iamin_CPLALLWAVID = seq_comm_iamin(CPLALLWAVID) - - do eei = 1,num_inst_esp - it=it+1 - comp_id(it) = ESPID(eei) - comp_iamin(it) = seq_comm_iamin(comp_id(it)) - comp_name(it) = seq_comm_name(comp_id(it)) - call seq_comm_getinfo(ESPID(eei), mpicom=comp_comm(it), & - nthreads=nthreads_ESPID, iam=comp_comm_iam(it)) - if (seq_comm_iamin (ESPID(eei))) then - complist = trim(complist)//' '//trim(seq_comm_name(ESPID(eei))) - endif - enddo - ! ESP components do not use the coupler (they are 'external') - - !---------------------------------------------------------- - !| Set logging parameters both for shr code and locally - !---------------------------------------------------------- - - if (iamroot_CPLID) then - inquire(file='cpl_modelio.nml'//trim(cpl_inst_tag),exist=exists) - if (exists) then - logunit = shr_file_getUnit() - call shr_file_setIO('cpl_modelio.nml'//trim(cpl_inst_tag),logunit) - call shr_file_setLogUnit(logunit) - loglevel = 1 - call shr_file_setLogLevel(loglevel) - endif - else - loglevel = 0 - call shr_file_setLogLevel(loglevel) - endif - - !---------------------------------------------------------- - ! Log info about the environment settings - !---------------------------------------------------------- - - if (iamroot_CPLID) then -#ifdef USE_ESMF_LIB - write(logunit,'(2A)') subname,' USE_ESMF_LIB is set' -#else - write(logunit,'(2A)') subname,' USE_ESMF_LIB is NOT set, using esmf_wrf_timemgr' -#endif - write(logunit,'(2A)') subname,' MCT_INTERFACE is set' - if (num_inst_driver > 1) & - write(logunit,'(2A,I0,A)') subname,' Driver is running with',num_inst_driver,'instances' - endif - - ! - ! When using io servers (pio_async_interface=.true.) the server tasks do not return from - ! shr_pio_init2 - ! - call shr_pio_init2(comp_id,comp_name,comp_iamin,comp_comm,comp_comm_iam) - - end subroutine cime_pre_init1 - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_pre_init2() - use pio, only : file_desc_t, pio_closefile, pio_file_is_open - use shr_const_mod, only: shr_const_tkfrz, shr_const_tktrip, & - shr_const_mwwv, shr_const_mwdair - use shr_wv_sat_mod, only: shr_wv_sat_set_default, shr_wv_sat_init, & - ShrWVSatTableSpec, shr_wv_sat_make_tables - - type(file_desc_t) :: pioid - integer :: maxthreads - - character(CS) :: wv_sat_scheme - real(r8) :: wv_sat_transition_start - logical :: wv_sat_use_tables - real(r8) :: wv_sat_table_spacing - character(CL) :: errstring - - type(ShrWVSatTableSpec) :: liquid_spec, ice_spec, mixed_spec - - real(r8), parameter :: epsilo = shr_const_mwwv/shr_const_mwdair - - !---------------------------------------------------------- - !| Timer initialization (has to be after mpi init) - !---------------------------------------------------------- - maxthreads = max(nthreads_GLOID,nthreads_CPLID,nthreads_ATMID, & - nthreads_LNDID,nthreads_ICEID,nthreads_OCNID,nthreads_GLCID, & - nthreads_ROFID, nthreads_WAVID, nthreads_ESPID, pethreads_GLOID ) - - call t_initf(NLFileName, LogPrint=.true., mpicom=mpicom_GLOID, & - MasterTask=iamroot_GLOID,MaxThreads=maxthreads) - - if (iamin_CPLID) then - call seq_io_cpl_init() - endif - - !---------------------------------------------------------- - !| Memory test - !---------------------------------------------------------- - - !mt call shr_mem_init(prt=.true.) - call shr_mem_init(prt=iamroot_CPLID) - - !---------------------------------------------------------- - !| Initialize infodata - !---------------------------------------------------------- - - if (len_trim(cpl_inst_tag) > 0) then - call seq_infodata_init(infodata,nlfilename, GLOID, pioid, & - cpl_tag=cpl_inst_tag) - else - call seq_infodata_init(infodata,nlfilename, GLOID, pioid) - end if - - !---------------------------------------------------------- - ! Print Model heading and copyright message - !---------------------------------------------------------- - - if (iamroot_CPLID) call seq_cime_printlogheader() - - !---------------------------------------------------------- - !| Initialize coupled fields (depends on infodata) - !---------------------------------------------------------- - - call seq_flds_set(nlfilename, GLOID, infodata) - - !---------------------------------------------------------- - !| Obtain infodata info - !---------------------------------------------------------- - - call seq_infodata_GetData(infodata, & - info_debug=info_debug) - - if (info_debug > 1 .and. iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,'(2A)') 'Status of infodata after seq_infodata_init' - call seq_infodata_print( infodata ) - write(logunit,*) ' ' - endif - - call seq_infodata_GetData(infodata , & - read_restart=read_restart , & - restart_file=rest_file , & - timing_dir=timing_dir , & - tchkpt_dir=tchkpt_dir , & - info_debug=info_debug , & - atm_present=atm_present , & - lnd_present=lnd_present , & - ice_present=ice_present , & - ocn_present=ocn_present , & - glc_present=glc_present , & - rof_present=rof_present , & - wav_present=wav_present , & - esp_present=esp_present , & - single_column=single_column , & - aqua_planet=aqua_planet , & - cpl_seq_option=cpl_seq_option , & - drv_threading=drv_threading , & - do_histinit=do_histinit , & - do_budgets=do_budgets , & - budget_inst=budget_inst , & - budget_daily=budget_daily , & - budget_month=budget_month , & - budget_ann=budget_ann , & - budget_ltann=budget_ltann , & - budget_ltend=budget_ltend , & - histaux_a2x=do_hist_a2x , & - histaux_a2x1hri=do_hist_a2x1hri , & - histaux_a2x1hr=do_hist_a2x1hr , & - histaux_a2x3hr =do_hist_a2x3hr , & - histaux_a2x3hrp=do_hist_a2x3hrp , & - histaux_a2x24hr=do_hist_a2x24hr , & - histaux_l2x=do_hist_l2x , & - histaux_l2x1yrg=do_hist_l2x1yrg , & - histaux_r2x=do_hist_r2x , & - run_barriers=run_barriers , & - mct_usealltoall=mct_usealltoall , & - mct_usevector=mct_usevector , & - aoflux_grid=aoflux_grid , & - vect_map=vect_map , & - atm_gnam=atm_gnam , & - lnd_gnam=lnd_gnam , & - ocn_gnam=ocn_gnam , & - ice_gnam=ice_gnam , & - rof_gnam=rof_gnam , & - glc_gnam=glc_gnam , & - wav_gnam=wav_gnam , & - tfreeze_option = tfreeze_option , & - cpl_decomp=seq_mctext_decomp , & - shr_map_dopole=shr_map_dopole , & - wall_time_limit=wall_time_limit , & - force_stop_at=force_stop_at , & - reprosum_use_ddpdd=reprosum_use_ddpdd , & - reprosum_allow_infnan=reprosum_allow_infnan, & - reprosum_diffmax=reprosum_diffmax , & - reprosum_recompute=reprosum_recompute, & - max_cplstep_time=max_cplstep_time) - - ! above - cpl_decomp is set to pass the cpl_decomp value to seq_mctext_decomp - ! (via a use statement) - - call shr_map_setDopole(shr_map_dopole) - - call shr_reprosum_setopts(& - repro_sum_use_ddpdd_in = reprosum_use_ddpdd, & - repro_sum_allow_infnan_in = reprosum_allow_infnan, & - repro_sum_rel_diff_max_in = reprosum_diffmax, & - repro_sum_recompute_in = reprosum_recompute) - - ! Check cpl_seq_option - - if (trim(cpl_seq_option) /= 'CESM1_ORIG' .and. & - trim(cpl_seq_option) /= 'CESM1_ORIG_TIGHT' .and. & - trim(cpl_seq_option) /= 'CESM1_MOD' .and. & - trim(cpl_seq_option) /= 'CESM1_MOD_TIGHT' .and. & - trim(cpl_seq_option) /= 'RASM_OPTION1' .and. & - trim(cpl_seq_option) /= 'RASM_OPTION2' ) then - call shr_sys_abort(subname//' invalid cpl_seq_option = '//trim(cpl_seq_option)) - endif - - !---------------------------------------------------------- - !| Test Threading Setup in driver - ! happens to be valid on all pes for all IDs - !---------------------------------------------------------- - - if (drv_threading) then - if (iamroot_GLOID) write(logunit,*) ' ' - if (iamroot_GLOID) write(logunit,'(2A) ') subname,' Test Threading in driver' - call seq_comm_setnthreads(nthreads_GLOID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_GLOID = ',& - nthreads_GLOID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_CPLID = ',& - nthreads_CPLID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_ATMID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_ATMID = ',& - nthreads_ATMID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_LNDID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_LNDID = ',& - nthreads_LNDID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_OCNID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_OCNID = ',& - nthreads_OCNID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_ICEID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_ICEID = ',& - nthreads_ICEID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_GLCID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_GLCID = ',& - nthreads_GLCID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_ROFID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_ROFID = ',& - nthreads_ROFID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_WAVID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_WAVID = ',& - nthreads_WAVID,seq_comm_getnthreads() - call seq_comm_setnthreads(nthreads_ESPID) - if (iamroot_GLOID) write(logunit,'(2A,2I4)') subname,' nthreads_ESPID = ',& - nthreads_ESPID,seq_comm_getnthreads() - if (iamroot_GLOID) write(logunit,*) ' ' - - call seq_comm_setnthreads(nthreads_GLOID) - endif - - !---------------------------------------------------------- - !| Initialize time manager - !---------------------------------------------------------- - - call seq_timemgr_clockInit(seq_SyncClock, nlfilename, & - read_restart, rest_file, pioid, mpicom_gloid, & - EClock_d, EClock_a, EClock_l, EClock_o, & - EClock_i, Eclock_g, Eclock_r, Eclock_w, Eclock_e) - - if (iamroot_CPLID) then - call seq_timemgr_clockPrint(seq_SyncClock) - endif - - !---------------------------------------------------------- - !| Initialize infodata items which need the clocks - !---------------------------------------------------------- - call seq_infodata_init2(infodata, GLOID) - - call seq_infodata_getData(infodata, & - orb_iyear=orb_iyear, & - orb_iyear_align=orb_iyear_align, & - orb_mode=orb_mode) - - !---------------------------------------------------------- - ! Initialize freezing point calculation for all components - !---------------------------------------------------------- - - call shr_frz_freezetemp_init(tfreeze_option) - - if (trim(orb_mode) == trim(seq_infodata_orb_variable_year)) then - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd) - - call shr_cal_date2ymd(ymd,year,month,day) - orb_cyear = orb_iyear + (year - orb_iyear_align) - - call shr_orb_params(orb_cyear, orb_eccen, orb_obliq, orb_mvelp, & - orb_obliqr, orb_lambm0, orb_mvelpp, iamroot_CPLID) - - call seq_infodata_putData(infodata, & - orb_eccen=orb_eccen, & - orb_obliqr=orb_obliqr, & - orb_lambm0=orb_lambm0, & - orb_mvelpp=orb_mvelpp) - endif - - call seq_infodata_getData(infodata, & - wv_sat_scheme=wv_sat_scheme, & - wv_sat_transition_start=wv_sat_transition_start, & - wv_sat_use_tables=wv_sat_use_tables, & - wv_sat_table_spacing=wv_sat_table_spacing) - - if (.not. shr_wv_sat_set_default(wv_sat_scheme)) then - call shr_sys_abort('Invalid wv_sat_scheme.') - end if - - call shr_wv_sat_init(shr_const_tkfrz, shr_const_tktrip, & - wv_sat_transition_start, epsilo, errstring) - - if (errstring /= "") then - call shr_sys_abort('shr_wv_sat_init: '//trim(errstring)) - end if - - ! The below produces internal lookup tables in the range 175-374K for - ! liquid water, and 125-274K for ice, with a resolution set by the - ! option wv_sat_table_spacing. - ! In theory these ranges could be specified in the namelist, but in - ! practice users will want to change them *very* rarely if ever, which - ! is why only the spacing is in the namelist. - if (wv_sat_use_tables) then - liquid_spec = ShrWVSatTableSpec(ceiling(200._r8/wv_sat_table_spacing), & - 175._r8, wv_sat_table_spacing) - ice_spec = ShrWVSatTableSpec(ceiling(150._r8/wv_sat_table_spacing), & - 125._r8, wv_sat_table_spacing) - mixed_spec = ShrWVSatTableSpec(ceiling(250._r8/wv_sat_table_spacing), & - 125._r8, wv_sat_table_spacing) - call shr_wv_sat_make_tables(liquid_spec, ice_spec, mixed_spec) - end if - - call seq_infodata_putData(infodata, & - atm_phase=1, & - lnd_phase=1, & - ocn_phase=1, & - ice_phase=1, & - glc_phase=1, & - wav_phase=1, & - esp_phase=1) - - !---------------------------------------------------------- - !| Set aqua_planet and single_column flags - ! If in single column mode, overwrite flags according to focndomain file - ! in ocn_in namelist. SCAM can reset the "present" flags for lnd, - ! ocn, ice, rof, and flood. - !---------------------------------------------------------- - - if (.not.aqua_planet .and. single_column) then - call seq_infodata_getData( infodata, & - scmlon=scmlon, scmlat=scmlat) - - call seq_comm_getinfo(OCNID(ens1), mpicom=mpicom_OCNID) - - call shr_scam_checkSurface(scmlon, scmlat, & - OCNID(ens1), mpicom_OCNID, & - lnd_present=lnd_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - rof_present=rof_present, & - flood_present=flood_present, & - rofice_present=rofice_present) - - call seq_infodata_putData(infodata, & - lnd_present=lnd_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - rof_present=rof_present, & - flood_present=flood_present, & - rofice_present=rofice_present) - endif - if(PIO_FILE_IS_OPEN(pioid)) then - call pio_closefile(pioid) - endif - - end subroutine cime_pre_init2 - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_init() - - character(CL), allocatable :: comp_resume(:) - - -104 format( A, i10.8, i8) - - !----------------------------------------------------------------------------- - !| Component Initialization - ! Note that within each component initialization, the relevant x_present flag - ! part of CIMEInit can be modified - ! By default, all these flags are set to true - ! The atm can reset the lnd_present, ice_present and ocn_present flags based - ! on aqua_planet, ideal_phys and adiabatic modes - ! The stub components will reset the present flags to false, all other - ! components will set them to true for the purposes of symmetry - !----------------------------------------------------------------------------- - - call t_startf('CPL:cime_init') - call t_adj_detailf(+1) - - call t_startf('CPL:init_comps') - if (iamroot_CPLID )then - write(logunit,*) ' ' - write(logunit,F00) 'Initialize each component: atm, lnd, rof, ocn, ice, glc, wav, esp' - call shr_sys_flush(logunit) - endif - - call t_startf('CPL:comp_init_pre_all') - call component_init_pre(atm, ATMID, CPLATMID, CPLALLATMID, infodata, ntype='atm') - call component_init_pre(lnd, LNDID, CPLLNDID, CPLALLLNDID, infodata, ntype='lnd') - call component_init_pre(rof, ROFID, CPLROFID, CPLALLROFID, infodata, ntype='rof') - call component_init_pre(ocn, OCNID, CPLOCNID, CPLALLOCNID, infodata, ntype='ocn') - call component_init_pre(ice, ICEID, CPLICEID, CPLALLICEID, infodata, ntype='ice') - call component_init_pre(glc, GLCID, CPLGLCID, CPLALLGLCID, infodata, ntype='glc') - call component_init_pre(wav, WAVID, CPLWAVID, CPLALLWAVID, infodata, ntype='wav') - call component_init_pre(esp, ESPID, CPLESPID, CPLALLESPID, infodata, ntype='esp') - call t_stopf('CPL:comp_init_pre_all') - - call t_startf('CPL:comp_init_cc_atm') - call t_adj_detailf(+2) - - call component_init_cc(Eclock_a, atm, atm_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_atm') - - call t_startf('CPL:comp_init_cc_lnd') - call t_adj_detailf(+2) - call component_init_cc(Eclock_l, lnd, lnd_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_lnd') - - call t_startf('CPL:comp_init_cc_rof') - call t_adj_detailf(+2) - call component_init_cc(Eclock_r, rof, rof_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_rof') - - call t_startf('CPL:comp_init_cc_ocn') - call t_adj_detailf(+2) - call component_init_cc(Eclock_o, ocn, ocn_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_ocn') - - call t_startf('CPL:comp_init_cc_ice') - call t_adj_detailf(+2) - call component_init_cc(Eclock_i, ice, ice_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_ice') - - call t_startf('CPL:comp_init_cc_glc') - call t_adj_detailf(+2) - call component_init_cc(Eclock_g, glc, glc_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_glc') - - call t_startf('CPL:comp_init_cc_wav') - call t_adj_detailf(+2) - call component_init_cc(Eclock_w, wav, wav_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_wav') - - call t_startf('CPL:comp_init_cc_esp') - call t_adj_detailf(+2) - call component_init_cc(Eclock_e, esp, esp_init, infodata, NLFilename) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_esp') - - call t_startf('CPL:comp_init_cx_all') - call t_adj_detailf(+2) - call component_init_cx(atm, infodata) - call component_init_cx(lnd, infodata) - call component_init_cx(rof, infodata) - call component_init_cx(ocn, infodata) - call component_init_cx(ice, infodata) - call component_init_cx(glc, infodata) - call component_init_cx(wav, infodata) - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cx_all') - - ! Determine complist (list of comps for each id) - - call t_startf('CPL:comp_list_all') - call t_adj_detailf(+2) - complist = " " - if (iamin_CPLID) complist = trim(complist)//' cpl' - - do eai = 1,num_inst_atm - iamin_ID = component_get_iamin_compid(atm(eai)) - if (iamin_ID) then - compname = component_get_name(atm(eai)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do eli = 1,num_inst_lnd - iamin_ID = component_get_iamin_compid(lnd(eli)) - if (iamin_ID) then - compname = component_get_name(lnd(eli)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do eii = 1,num_inst_ice - iamin_ID = component_get_iamin_compid(ice(eii)) - if (iamin_ID) then - compname = component_get_name(ice(eii)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do eoi = 1,num_inst_ocn - iamin_ID = component_get_iamin_compid(ocn(eoi)) - if (iamin_ID) then - compname = component_get_name(ocn(eoi)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do egi = 1,num_inst_glc - iamin_ID = component_get_iamin_compid(glc(egi)) - if (iamin_ID) then - compname = component_get_name(glc(egi)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - do ewi = 1,num_inst_wav - iamin_ID = component_get_iamin_compid(wav(ewi)) - if (iamin_ID) then - compname = component_get_name(wav(ewi)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - - do eei = 1,num_inst_esp - iamin_ID = component_get_iamin_compid(esp(eei)) - if (iamin_ID) then - compname = component_get_name(esp(eei)) - complist = trim(complist)//' '//trim(compname) - endif - enddo - - call t_adj_detailf(-2) - call t_stopf('CPL:comp_list_all') - - call t_stopf('CPL:init_comps') - !---------------------------------------------------------- - !| Determine coupling interactions based on present and prognostic flags - !---------------------------------------------------------- - - if (iamin_CPLALLATMID) call seq_infodata_exchange(infodata,CPLALLATMID,'cpl2atm_init') - if (iamin_CPLALLLNDID) call seq_infodata_exchange(infodata,CPLALLLNDID,'cpl2lnd_init') - if (iamin_CPLALLOCNID) call seq_infodata_exchange(infodata,CPLALLOCNID,'cpl2ocn_init') - if (iamin_CPLALLICEID) call seq_infodata_exchange(infodata,CPLALLICEID,'cpl2ice_init') - if (iamin_CPLALLGLCID) call seq_infodata_exchange(infodata,CPLALLGLCID,'cpl2glc_init') - if (iamin_CPLALLROFID) call seq_infodata_exchange(infodata,CPLALLROFID,'cpl2rof_init') - if (iamin_CPLALLWAVID) call seq_infodata_exchange(infodata,CPLALLWAVID,'cpl2wav_init') - - if (iamroot_CPLID) then - write(logunit,F00) 'Determine final settings for presence of surface components' - call shr_sys_flush(logunit) - endif - - call seq_infodata_getData(infodata, & - atm_present=atm_present, & - lnd_present=lnd_present, & - ice_present=ice_present, & - ocn_present=ocn_present, & - glc_present=glc_present, & - glclnd_present=glclnd_present, & - glcocn_present=glcocn_present, & - glcice_present=glcice_present, & - rof_present=rof_present, & - rofice_present=rofice_present, & - wav_present=wav_present, & - esp_present=esp_present, & - flood_present=flood_present, & - atm_prognostic=atm_prognostic, & - lnd_prognostic=lnd_prognostic, & - ice_prognostic=ice_prognostic, & - iceberg_prognostic=iceberg_prognostic, & - ocn_prognostic=ocn_prognostic, & - ocnrof_prognostic=ocnrof_prognostic, & - glc_prognostic=glc_prognostic, & - rof_prognostic=rof_prognostic, & - wav_prognostic=wav_prognostic, & - esp_prognostic=esp_prognostic, & - dead_comps=dead_comps, & - esmf_map_flag=esmf_map_flag, & - atm_nx=atm_nx, atm_ny=atm_ny, & - lnd_nx=lnd_nx, lnd_ny=lnd_ny, & - rof_nx=rof_nx, rof_ny=rof_ny, & - ice_nx=ice_nx, ice_ny=ice_ny, & - glc_nx=glc_nx, glc_ny=glc_ny, & - ocn_nx=ocn_nx, ocn_ny=ocn_ny, & - wav_nx=wav_nx, wav_ny=wav_ny, & - atm_aero=atm_aero ) - - ! derive samegrid flags - - samegrid_ao = .true. - samegrid_al = .true. - samegrid_lr = .true. - samegrid_oi = .true. - samegrid_ro = .true. - samegrid_aw = .true. - samegrid_ow = .true. - samegrid_lg = .true. - samegrid_og = .true. - samegrid_ig = .true. - samegrid_alo = .true. - - ! set samegrid to true for single column - if (.not. single_column) then - if (trim(atm_gnam) /= trim(ocn_gnam)) samegrid_ao = .false. - if (trim(atm_gnam) /= trim(lnd_gnam)) samegrid_al = .false. - if (trim(lnd_gnam) /= trim(rof_gnam)) samegrid_lr = .false. - if (trim(rof_gnam) /= trim(ocn_gnam)) samegrid_ro = .false. - if (trim(ocn_gnam) /= trim(ice_gnam)) samegrid_oi = .false. - if (trim(atm_gnam) /= trim(wav_gnam)) samegrid_aw = .false. - if (trim(ocn_gnam) /= trim(wav_gnam)) samegrid_ow = .false. - if (trim(lnd_gnam) /= trim(glc_gnam)) samegrid_lg = .false. - if (trim(ocn_gnam) /= trim(glc_gnam)) samegrid_og = .false. - if (trim(ice_gnam) /= trim(glc_gnam)) samegrid_ig = .false. - samegrid_alo = (samegrid_al .and. samegrid_ao) - endif - - ! derive coupling connection flags - - atm_c2_lnd = .false. - atm_c2_ocn = .false. - atm_c2_ice = .false. - atm_c2_wav = .false. - lnd_c2_atm = .false. - lnd_c2_rof = .false. - lnd_c2_glc = .false. - ocn_c2_atm = .false. - ocn_c2_ice = .false. - ocn_c2_wav = .false. - ice_c2_atm = .false. - ice_c2_ocn = .false. - ice_c2_wav = .false. - rof_c2_lnd = .false. - rof_c2_ocn = .false. - rof_c2_ice = .false. - glc_c2_lnd = .false. - glc_c2_ocn = .false. - glc_c2_ice = .false. - wav_c2_ocn = .false. - - if (atm_present) then - if (lnd_prognostic) atm_c2_lnd = .true. - if (ocn_prognostic) atm_c2_ocn = .true. - if (ocn_present ) atm_c2_ocn = .true. ! needed for aoflux calc if aoflux=ocn - if (ice_prognostic) atm_c2_ice = .true. - if (wav_prognostic) atm_c2_wav = .true. - endif - if (lnd_present) then - if (atm_prognostic) lnd_c2_atm = .true. - if (rof_prognostic) lnd_c2_rof = .true. - if (glc_prognostic) lnd_c2_glc = .true. - endif - if (ocn_present) then - if (atm_prognostic) ocn_c2_atm = .true. - if (atm_present ) ocn_c2_atm = .true. ! needed for aoflux calc if aoflux=atm - if (ice_prognostic) ocn_c2_ice = .true. - if (wav_prognostic) ocn_c2_wav = .true. - endif - if (ice_present) then - if (atm_prognostic) ice_c2_atm = .true. - if (ocn_prognostic) ice_c2_ocn = .true. - if (wav_prognostic) ice_c2_wav = .true. - endif - if (rof_present) then - if (lnd_prognostic ) rof_c2_lnd = .true. - if (ocnrof_prognostic) rof_c2_ocn = .true. - if (rofice_present .and. iceberg_prognostic) rof_c2_ice = .true. - endif - if (glc_present) then - if (glclnd_present .and. lnd_prognostic) glc_c2_lnd = .true. - if (glcocn_present .and. ocn_prognostic) glc_c2_ocn = .true. - if (glcice_present .and. iceberg_prognostic) glc_c2_ice = .true. - endif - if (wav_present) then - if (ocn_prognostic) wav_c2_ocn = .true. - endif - - !---------------------------------------------------------- - ! Set domain check and other flag - !---------------------------------------------------------- - - domain_check = .true. - if (single_column ) domain_check = .false. - if (dead_comps ) domain_check = .false. - - ! set skip_ocean_run flag, used primarily for ocn run on first timestep - ! use reading a restart as a surrogate from whether this is a startup run - - skip_ocean_run = .true. - if ( read_restart) skip_ocean_run = .false. - ocnrun_count = 0 - cpl2ocn_first = .true. - - do_histavg = .true. - if (seq_timemgr_histavg_type == seq_timemgr_type_never) then - do_histavg = .false. - endif - - !---------------------------------------------------------- - !| Write component and coupler setup information - !---------------------------------------------------------- - - if (iamroot_CPLID) then - write(logunit,* )' ' - write(logunit,F00)'After component initialization:' - write(logunit,F0L)'atm model present = ',atm_present - write(logunit,F0L)'lnd model present = ',lnd_present - write(logunit,F0L)'ocn model present = ',ocn_present - write(logunit,F0L)'ice model present = ',ice_present - write(logunit,F0L)'glc model present = ',glc_present - write(logunit,F0L)'glc/lnd present = ',glclnd_present - write(logunit,F0L)'glc/ocn present = ',glcocn_present - write(logunit,F0L)'glc/ice present = ',glcice_present - write(logunit,F0L)'rof model present = ',rof_present - write(logunit,F0L)'rof/ice present = ',rofice_present - write(logunit,F0L)'rof/flood present = ',flood_present - write(logunit,F0L)'wav model present = ',wav_present - write(logunit,F0L)'esp model present = ',esp_present - - write(logunit,F0L)'atm model prognostic = ',atm_prognostic - write(logunit,F0L)'lnd model prognostic = ',lnd_prognostic - write(logunit,F0L)'ocn model prognostic = ',ocn_prognostic - write(logunit,F0L)'ice model prognostic = ',ice_prognostic - write(logunit,F0L)'iceberg prognostic = ',iceberg_prognostic - write(logunit,F0L)'glc model prognostic = ',glc_prognostic - write(logunit,F0L)'rof model prognostic = ',rof_prognostic - write(logunit,F0L)'ocn rof prognostic = ',ocnrof_prognostic - write(logunit,F0L)'wav model prognostic = ',wav_prognostic - write(logunit,F0L)'esp model prognostic = ',esp_prognostic - - write(logunit,F0L)'atm_c2_lnd = ',atm_c2_lnd - write(logunit,F0L)'atm_c2_ocn = ',atm_c2_ocn - write(logunit,F0L)'atm_c2_ice = ',atm_c2_ice - write(logunit,F0L)'atm_c2_wav = ',atm_c2_wav - write(logunit,F0L)'lnd_c2_atm = ',lnd_c2_atm - write(logunit,F0L)'lnd_c2_rof = ',lnd_c2_rof - write(logunit,F0L)'lnd_c2_glc = ',lnd_c2_glc - write(logunit,F0L)'ocn_c2_atm = ',ocn_c2_atm - write(logunit,F0L)'ocn_c2_ice = ',ocn_c2_ice - write(logunit,F0L)'ocn_c2_wav = ',ocn_c2_wav - write(logunit,F0L)'ice_c2_atm = ',ice_c2_atm - write(logunit,F0L)'ice_c2_ocn = ',ice_c2_ocn - write(logunit,F0L)'ice_c2_wav = ',ice_c2_wav - write(logunit,F0L)'rof_c2_lnd = ',rof_c2_lnd - write(logunit,F0L)'rof_c2_ocn = ',rof_c2_ocn - write(logunit,F0L)'rof_c2_ice = ',rof_c2_ice - write(logunit,F0L)'glc_c2_lnd = ',glc_c2_lnd - write(logunit,F0L)'glc_c2_ocn = ',glc_c2_ocn - write(logunit,F0L)'glc_c2_ice = ',glc_c2_ice - write(logunit,F0L)'wav_c2_ocn = ',wav_c2_ocn - - write(logunit,F0L)'dead components = ',dead_comps - write(logunit,F0L)'domain_check = ',domain_check - write(logunit,F01)'atm_nx,atm_ny = ',atm_nx,atm_ny,trim(atm_gnam) - write(logunit,F01)'lnd_nx,lnd_ny = ',lnd_nx,lnd_ny,trim(lnd_gnam) - write(logunit,F01)'rof_nx,rof_ny = ',rof_nx,rof_ny,trim(rof_gnam) - write(logunit,F01)'ice_nx,ice_ny = ',ice_nx,ice_ny,trim(ice_gnam) - write(logunit,F01)'ocn_nx,ocn_ny = ',ocn_nx,ocn_ny,trim(ocn_gnam) - write(logunit,F01)'glc_nx,glc_ny = ',glc_nx,glc_ny,trim(glc_gnam) - write(logunit,F01)'wav_nx,wav_ny = ',wav_nx,wav_ny,trim(wav_gnam) - write(logunit,F0L)'samegrid_ao = ',samegrid_ao - write(logunit,F0L)'samegrid_al = ',samegrid_al - write(logunit,F0L)'samegrid_ro = ',samegrid_ro - write(logunit,F0L)'samegrid_aw = ',samegrid_aw - write(logunit,F0L)'samegrid_ow = ',samegrid_ow - write(logunit,F0L)'skip init ocean run = ',skip_ocean_run - write(logunit,F00)'cpl sequence option = ',trim(cpl_seq_option) - write(logunit,F0L)'do_histavg = ',do_histavg - write(logunit,F0L)'atm_aero = ',atm_aero - write(logunit,* )' ' - call shr_sys_flush(logunit) - endif - - !---------------------------------------------------------- - !| Present and prognostic consistency checks - !---------------------------------------------------------- - - if (atm_prognostic .and. .not.atm_present) then - call shr_sys_abort(subname//' ERROR: if prognostic atm must also have atm present') - endif - if (ocn_prognostic .and. .not.ocn_present) then - call shr_sys_abort(subname//' ERROR: if prognostic ocn must also have ocn present') - endif - if (lnd_prognostic .and. .not.lnd_present) then - call shr_sys_abort(subname//' ERROR: if prognostic lnd must also have lnd present') - endif - if (ice_prognostic .and. .not.ice_present) then - call shr_sys_abort(subname//' ERROR: if prognostic ice must also have ice present') - endif - if (iceberg_prognostic .and. .not.ice_prognostic) then - call shr_sys_abort(subname//' ERROR: if prognostic iceberg must also have ice prognostic') - endif - if (glc_prognostic .and. .not.glc_present) then - call shr_sys_abort(subname//' ERROR: if prognostic glc must also have glc present') - endif - if (rof_prognostic .and. .not.rof_present) then - call shr_sys_abort(subname//' ERROR: if prognostic rof must also have rof present') - endif - if (wav_prognostic .and. .not.wav_present) then - call shr_sys_abort(subname//' ERROR: if prognostic wav must also have wav present') - endif - if (esp_prognostic .and. .not.esp_present) then - call shr_sys_abort(subname//' ERROR: if prognostic esp must also have esp present') - endif -#ifndef CPL_BYPASS - if ((ice_prognostic .or. ocn_prognostic .or. lnd_prognostic) .and. .not. atm_present) then - call shr_sys_abort(subname//' ERROR: if prognostic surface model must also have atm present') - endif -#endif - if ((glclnd_present .or. glcocn_present .or. glcice_present) .and. .not.glc_present) then - call shr_sys_abort(subname//' ERROR: if glcxxx present must also have glc present') - endif - if (rofice_present .and. .not.rof_present) then - call shr_sys_abort(subname//' ERROR: if rofice present must also have rof present') - endif - if (ocnrof_prognostic .and. .not.rof_present) then - if (iamroot_CPLID) then - write(logunit,F00) 'WARNING: ocnrof_prognostic is TRUE but rof_present is FALSE' - call shr_sys_flush(logunit) - endif - endif - - !---------------------------------------------------------- - !| Samegrid checks - !---------------------------------------------------------- - - if (.not. samegrid_oi) then - call shr_sys_abort(subname//' ERROR: samegrid_oi is false') - endif - - !---------------------------------------------------------- - !| Check instances of prognostic components - !---------------------------------------------------------- - - if (atm_prognostic .and. num_inst_atm /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: atm_prognostic but num_inst_atm not num_inst_max') - if (lnd_prognostic .and. num_inst_lnd /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: lnd_prognostic but num_inst_lnd not num_inst_max') - if (ocn_prognostic .and. (num_inst_ocn /= num_inst_max .and. num_inst_ocn /= 1)) & - call shr_sys_abort(subname//' ERROR: ocn_prognostic but num_inst_ocn not 1 or num_inst_max') - if (ice_prognostic .and. num_inst_ice /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: ice_prognostic but num_inst_ice not num_inst_max') - if (glc_prognostic .and. num_inst_glc /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: glc_prognostic but num_inst_glc not num_inst_max') - if (rof_prognostic .and. num_inst_rof /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: rof_prognostic but num_inst_rof not num_inst_max') - if (wav_prognostic .and. num_inst_wav /= num_inst_max) & - call shr_sys_abort(subname//' ERROR: wav_prognostic but num_inst_wav not num_inst_max') - - !---------------------------------------------------------- - !| Initialize attribute vectors for prep_c2C_init_avs routines and fractions - !| Initialize mapping between components - !---------------------------------------------------------- - - if (iamin_CPLID) then - - call t_startf('CPL:init_maps') - call t_adj_detailf(+2) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call prep_atm_init(infodata, ocn_c2_atm, ice_c2_atm, lnd_c2_atm) - - call prep_lnd_init(infodata, atm_c2_lnd, rof_c2_lnd, glc_c2_lnd) - - call prep_ocn_init(infodata, atm_c2_ocn, atm_c2_ice, ice_c2_ocn, rof_c2_ocn, wav_c2_ocn, glc_c2_ocn) - - call prep_ice_init(infodata, ocn_c2_ice, glc_c2_ice, rof_c2_ice ) - - call prep_rof_init(infodata, lnd_c2_rof) - - call prep_glc_init(infodata, lnd_c2_glc) - - call prep_wav_init(infodata, atm_c2_wav, ocn_c2_wav, ice_c2_wav) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_adj_detailf(-2) - call t_stopf('CPL:init_maps') - - endif - - ! need to finish up the computation of the atm - ocean map (tempest) - if (iamin_CPLALLATMID .and. ocn_c2_atm) call prep_atm_ocn_moab(infodata) - - !---------------------------------------------------------- - !| Update aream in domains where appropriate - !---------------------------------------------------------- - - if (iamin_CPLID) then - call t_startf ('CPL:init_aream') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_init_aream(infodata, rof_c2_ocn, samegrid_ao, samegrid_al, & - samegrid_ro, samegrid_lg) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_aream') - endif ! iamin_CPLID - - !---------------------------------------------------------- - !| Check domains - ! This must be done after the mappers are initialized since - ! checking is done on each processor and not with a global gather - !---------------------------------------------------------- - - if (iamin_CPLID) then - call t_startf ('CPL:init_domain_check') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (domain_check) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Performing domain checking' - call shr_sys_flush(logunit) - endif - - call seq_domain_check( infodata, & - atm(ens1), ice(ens1), lnd(ens1), ocn(ens1), rof(ens1), glc(ens1), & - samegrid_al, samegrid_ao, samegrid_ro, samegrid_lg) - - endif - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_domain_check') - endif ! iamin_CPLID - - !---------------------------------------------------------- - !| Initialize area corrections based on aream (read in map_init) and area - !| Area correct component initialization output fields - !| Map initial component AVs from component to coupler pes - !---------------------------------------------------------- - - areafact_samegrid = .false. -#if (defined E3SM_SCM_REPLAY ) - if (.not.samegrid_alo) then - call shr_sys_abort(subname//' ERROR: samegrid_alo is false - Must run with same atm/ocn/lnd grids when configured for scam iop') - else - areafact_samegrid = .true. - endif -#endif - if (single_column) areafact_samegrid = .true. - - call t_startf ('CPL:init_areacor') - call t_adj_detailf(+2) - - call mpi_barrier(mpicom_GLOID,ierr) - if (atm_present) call component_init_areacor(atm, areafact_samegrid, seq_flds_a2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (lnd_present) call component_init_areacor(lnd, areafact_samegrid, seq_flds_l2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (rof_present) call component_init_areacor(rof, areafact_samegrid, seq_flds_r2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (ocn_present) call component_init_areacor(ocn, areafact_samegrid, seq_flds_o2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (ice_present) call component_init_areacor(ice, areafact_samegrid, seq_flds_i2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (glc_present) call component_init_areacor(glc, areafact_samegrid, seq_flds_g2x_fluxes) - - call mpi_barrier(mpicom_GLOID,ierr) - if (wav_present) call component_init_areacor(wav, areafact_samegrid, seq_flds_w2x_fluxes) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_areacor') - - !---------------------------------------------------------- - !| global sum diagnostics for IC data - !---------------------------------------------------------- - - if (iamin_CPLID .and. info_debug > 1) then - call t_startf ('CPL:init_diag') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (atm_present) then - call component_diag(infodata, atm, flow='c2x', comment='recv IC atm', & - info_debug=info_debug) - endif - if (ice_present) then - call component_diag(infodata, ice, flow='c2x', comment='recv IC ice', & - info_debug=info_debug) - endif - if (lnd_present) then - call component_diag(infodata, lnd, flow='c2x', comment='recv IC lnd', & - info_debug=info_debug) - endif - if (rof_present) then - call component_diag(infodata, rof, flow='c2x', comment='recv IC rof', & - info_debug=info_debug) - endif - if (ocn_present) then - call component_diag(infodata, ocn, flow='c2x', comment='recv IC ocn', & - info_debug=info_debug) - endif - if (glc_present) then - call component_diag(infodata, glc, flow='c2x', comment='recv IC glc', & - info_debug=info_debug) - endif - if (wav_present) then - call component_diag(infodata, wav, flow='c2x', comment='recv IC wav', & - info_debug=info_debug) - endif - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_diag') - endif - - !---------------------------------------------------------- - !| Initialize fractions - !---------------------------------------------------------- - - if (iamin_CPLID) then - call t_startf ('CPL:init_fracs') - call t_adj_detailf(+2) - - allocate(fractions_ax(num_inst_frc)) - allocate(fractions_lx(num_inst_frc)) - allocate(fractions_ox(num_inst_frc)) - allocate(fractions_ix(num_inst_frc)) - allocate(fractions_gx(num_inst_frc)) - allocate(fractions_rx(num_inst_frc)) - allocate(fractions_wx(num_inst_frc)) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - do efi = 1,num_inst_frc - eii = mod((efi-1),num_inst_ice) + 1 - - if (iamroot_CPLID) then - write(logunit,*) ' ' - if (efi == 1) write(logunit,F00) 'Initializing fractions' - endif - - call seq_frac_init(infodata, & - atm(ens1), ice(ens1), lnd(ens1), & - ocn(ens1), glc(ens1), rof(ens1), & - wav(ens1), & - fractions_ax(efi), fractions_ix(efi), fractions_lx(efi), & - fractions_ox(efi), fractions_gx(efi), fractions_rx(efi), & - fractions_wx(efi)) - - if (iamroot_CPLID) then - write(logunit,*) ' ' - if (efi == 1) write(logunit,F00) 'Setting fractions' - endif - - call seq_frac_set(infodata, ice(eii), & - fractions_ax(efi), fractions_ix(efi), fractions_ox(efi)) - - enddo - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_fracs') - endif - - !---------------------------------------------------------- - !| Initialize prep_aoflux_mod module variables - !---------------------------------------------------------- - - if (iamin_CPLID) then - call prep_aoflux_init(infodata, fractions_ox, fractions_ax) - endif - - !---------------------------------------------------------- - !| Initialize atm/ocn flux component and compute ocean albedos - !---------------------------------------------------------- - - if (iamin_CPLID) then - if (ocn_present) then - call t_startf ('CPL:init_aoflux') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing atm/ocn flux component' - endif - - if (trim(aoflux_grid) == 'ocn') then - - call seq_flux_init_mct(ocn(ens1), fractions_ox(ens1)) - - elseif (trim(aoflux_grid) == 'atm') then - - call seq_flux_init_mct(atm(ens1), fractions_ax(ens1)) - - elseif (trim(aoflux_grid) == 'exch') then - - call shr_sys_abort(subname//' aoflux_grid = exch not validated') - call seq_flux_initexch_mct(atm(ens1), ocn(ens1), mpicom_cplid, cplid) - - else - call shr_sys_abort(subname//' aoflux_grid = '//trim(aoflux_grid)//' not available') - - endif - - do exi = 1,num_inst_xao - !tcx is this correct? relation between xao and frc for ifrad and ofrad - efi = mod((exi-1),num_inst_frc) + 1 - eai = mod((exi-1),num_inst_atm) + 1 - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - a2x_ox => prep_ocn_get_a2x_ox() - call seq_flux_ocnalb_mct(infodata, ocn(1), a2x_ox(eai), fractions_ox(efi), xao_ox(exi)) - enddo - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_aoflux') - endif - endif - - !---------------------------------------------------------- - !| ATM PREP for recalculation of initial solar - ! Note that ocean albedos are ALWAYS CALCULATED on the ocean grid - ! If aoflux_grid = 'ocn' , xao_ox is input for atm/ocn fluxes and xao_ax is output - ! If aoflux_grid = 'atm' , xao_ax is input for atm/ocn fluxes and xao_ox is not used - ! If aoflux_grid = 'exch', xao_ax is input for atm/ocn /fluxes and xao_ox is not used - ! Merge atmosphere input state and run atmospheric radiation - !---------------------------------------------------------- - - if (atm_prognostic) then - if (iamin_CPLID) then - - if (lnd_present) then - ! Get lnd output on atm grid - call prep_atm_calc_l2x_ax(fractions_lx, timer='CPL:init_atminit') - endif - - if (ice_present) then - ! Get ice output on atm grid - call prep_atm_calc_i2x_ax(fractions_ix, timer='CPL:init_atminit') - endif - - if (ocn_present) then - ! Get ocn output on atm grid - call prep_atm_calc_o2x_ax(fractions_ox, timer='CPL:init_atminit') - endif - - if (ocn_present) then - ! Get albedos on atm grid - call prep_aoflux_calc_xao_ax(fractions_ox, flds='albedos', timer='CPL:init_atminit') - - ! Get atm/ocn fluxes on atm grid - if (trim(aoflux_grid) == 'ocn') then - call prep_aoflux_calc_xao_ax(fractions_ox, flds='states_and_fluxes', & - timer='CPL:init_atminit') - endif - endif - - if (lnd_present .or. ocn_present) then - ! Merge input to atmosphere on coupler pes - xao_ax => prep_aoflux_get_xao_ax() - if (associated(xao_ax)) then - call prep_atm_mrg(infodata, & - fractions_ax=fractions_ax, xao_ax=xao_ax, timer_mrg='CPL:init_atminit') - endif - endif - - call component_diag(infodata, atm, flow='x2c', comment='send atm', info_debug=info_debug) - - endif - - endif ! atm_prognostic - - !---------------------------------------------------------- - !| Second phase of atmosphere component initialization - ! Recalculate solar based on input albedo's from surface components. - ! Data or dead atmosphere may just return on this phase. - !---------------------------------------------------------- - - if (atm_present) then - call t_startf('CPL:comp_init_cc_atm2') - call t_adj_detailf(+2) - - if (iamroot_CPLID) then - write(logunit,F00) 'Calling atm_init_mct phase 2' - endif - - ! Send atm input data from coupler pes to atm pes - if (atm_prognostic) then - call component_exch(atm, flow='x2c', infodata=infodata, & - infodata_string='cpl2atm_init') - endif - - ! Set atm init phase to 2 for all atm instances on component instance pes - do eai = 1,num_inst_atm - if (component_get_iamin_compid(atm(eai))) then - call seq_infodata_putData(infodata, atm_phase=2) - endif - enddo - - ! Run atm_init_mct with init phase of 2 - call component_init_cc(Eclock_a, atm, atm_init, & - infodata, NLFilename, & - seq_flds_x2c_fluxes=seq_flds_x2a_fluxes, & - seq_flds_c2x_fluxes=seq_flds_a2x_fluxes) - - ! Map atm output data from atm pes to cpl pes - call component_exch(atm, flow='c2x', infodata=infodata, & - infodata_string='atm2cpl_init') - - if (iamin_CPLID) then - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - call component_diag(infodata, atm, flow='c2x', comment= 'recv IC2 atm', & - info_debug=info_debug) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - - call t_adj_detailf(-2) - call t_stopf('CPL:comp_init_cc_atm2') - endif ! atm present - - !---------------------------------------------------------- - !| Get time manager's index for driver - !---------------------------------------------------------- - drv_index = seq_timemgr_pause_component_index('drv') - - !---------------------------------------------------------- - !| Read driver restart file, overwrite anything previously sent or computed - !---------------------------------------------------------- - - call t_startf('CPL:init_readrestart') - call t_adj_detailf(+2) - - call seq_diag_zero_mct(mode='all') - if (read_restart .and. iamin_CPLID) then - call seq_rest_read(rest_file, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx) - endif - - call t_adj_detailf(-2) - call t_stopf ('CPL:init_readrestart') - - !---------------------------------------------------------- - !| Map initial r2x_rx and g2x_gx to _ox, _ix and _lx - !---------------------------------------------------------- - - if (iamin_CPLID ) then - if (rof_c2_ocn) then - call prep_ocn_calc_r2x_ox(timer='CPL:init_rof2ocn') - endif - if (glc_c2_ocn) then - call prep_ocn_calc_g2x_ox(timer='CPL:init_glc2ocn') - endif - if (rof_c2_ice) then - call prep_ice_calc_r2x_ix(timer='CPL:init_rof2ice') - endif - if (glc_c2_ice) then - call prep_ice_calc_g2x_ix(timer='CPL:init_glc2ice') - endif - if (rof_c2_lnd) then - call prep_lnd_calc_r2x_lx(timer='CPL:init_rof2lnd') - endif - if (glc_c2_lnd) then - call prep_lnd_calc_g2x_lx(timer='CPL:init_gllndnd') - endif - endif - - !---------------------------------------------------------- - !| Clear all resume signals - !---------------------------------------------------------- - allocate(comp_resume(num_inst_max)) - comp_resume = '' - call seq_infodata_putData(infodata, & - atm_resume=comp_resume(1:num_inst_atm), & - lnd_resume=comp_resume(1:num_inst_lnd), & - ocn_resume=comp_resume(1:num_inst_ocn), & - ice_resume=comp_resume(1:num_inst_ice), & - glc_resume=comp_resume(1:num_inst_glc), & - rof_resume=comp_resume(1:num_inst_rof), & - wav_resume=comp_resume(1:num_inst_wav), & - cpl_resume=comp_resume(1)) - deallocate(comp_resume) - - !---------------------------------------------------------- - !| Write histinit output file - !---------------------------------------------------------- - - if (do_histinit) then - if (iamin_CPLID) then - call t_startf('CPL:init_histinit') - call t_adj_detailf(+2) - - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd, curr_tod=tod ) - write(logunit,104) ' Write history file at ',ymd,tod - call shr_sys_flush(logunit) - endif - call seq_hist_write(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, trim(cpl_inst_tag)) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - call t_adj_detailf(-2) - call t_stopf('CPL:init_histinit') - endif - endif - - if (iamroot_CPLID )then - write(logunit,*) ' ' - write(logunit,F00) 'Model initialization complete ' - write(logunit,*) ' ' - call shr_sys_flush(logunit) - endif - - call t_adj_detailf(-1) - call t_stopf('CPL:cime_init') - - end subroutine cime_init - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_run() - use seq_comm_mct, only: atm_layout, lnd_layout, ice_layout, glc_layout, & - rof_layout, ocn_layout, wav_layout, esp_layout - use shr_string_mod, only: shr_string_listGetIndexF - use seq_comm_mct, only: num_inst_driver - - ! gptl timer lookup variables - integer, parameter :: hashcnt=7 - integer :: hashint(hashcnt) - ! Driver pause/resume - logical :: drv_pause ! Driver writes pause restart file - character(len=CL) :: drv_resume ! Driver resets state from restart file - - type(ESMF_Time) :: etime_curr ! Current model time - real(r8) :: tbnds1_offset ! Time offset for call to seq_hist_writeaux - logical :: lnd2glc_averaged_now ! Whether lnd2glc averages were taken this timestep - -101 format( A, i10.8, i8, 12A, A, F8.2, A, F8.2 ) -102 format( A, i10.8, i8, A, 8L3 ) -103 format( 5A ) -104 format( A, i10.8, i8) -105 format( A, i10.8, i8, A, f10.2, A, f10.2, A, A, i5, A, A) -108 format( A, f10.2, A, i8.8) -109 format( A, 2f10.3) - - - hashint = 0 - - - call seq_infodata_putData(infodata,atm_phase=1,lnd_phase=1,ocn_phase=1,ice_phase=1) - call seq_timemgr_EClockGetData( EClock_d, stepno=begstep) - call seq_timemgr_EClockGetData( EClock_d, dtime=dtime) - call seq_timemgr_EClockGetData( EClock_d, calendar=calendar) - ncpl = 86400/dtime - cktime_acc = 0._r8 - cktime_cnt = 0 - stop_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_stop) - if (seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_datestop)) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,103) subname,' NOTE: Stopping from alarm STOP DATE' - write(logunit,*) ' ' - endif - stop_alarm = .true. - endif - force_stop = .false. - force_stop_ymd = -1 - force_stop_tod = -1 - - !|---------------------------------------------------------- - !| Beginning of driver time step loop - !|---------------------------------------------------------- - - call t_startf ('CPL:RUN_LOOP_BSTART') - call mpi_barrier(mpicom_GLOID,ierr) - call t_stopf ('CPL:RUN_LOOP_BSTART') - Time_begin = mpi_wtime() - Time_bstep = mpi_wtime() - do while ( .not. stop_alarm) - - call t_startf('CPL:RUN_LOOP', hashint(1)) - call t_startf('CPL:CLOCK_ADVANCE') - - !---------------------------------------------------------- - !| Advance Clock - ! (this is time that models should have before they return - ! to the driver). Write timestamp and run alarm status - !---------------------------------------------------------- - - call seq_timemgr_clockAdvance( seq_SyncClock, force_stop, force_stop_ymd, force_stop_tod) - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd, curr_tod=tod) - call shr_cal_date2ymd(ymd,year,month,day) - stop_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_stop) - atmrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_atmrun) - lndrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_lndrun) - rofrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_rofrun) - icerun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_icerun) - glcrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_glcrun) - wavrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_wavrun) - esprun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_esprun) - ocnrun_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_ocnrun) - ocnnext_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_ocnnext) - restart_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_restart) - history_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_history) - histavg_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_histavg) - tprof_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_tprof) - barrier_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_barrier) - pause_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_pause) - - ! Does the driver need to pause? - drv_pause = pause_alarm .and. seq_timemgr_pause_component_active(drv_index) - - if (glc_prognostic) then - ! Is it time to average fields to pass to glc? - ! - ! Note that the glcrun_avg_alarm just controls what is passed to glc in terms - ! of averaged fields - it does NOT control when glc is called currently - - ! glc will be called on the glcrun_alarm setting - but it might not be passed relevant - ! info if the time averaging period to accumulate information passed to glc is greater - ! than the glcrun interval - glcrun_avg_alarm = seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_glcrun_avg) - if (glcrun_avg_alarm .and. .not. glcrun_alarm) then - write(logunit,*) 'ERROR: glcrun_avg_alarm is true, but glcrun_alarm is false' - write(logunit,*) 'Make sure that NCPL_BASE_PERIOD, GLC_NCPL and GLC_AVG_PERIOD' - write(logunit,*) 'are set so that glc averaging only happens at glc coupling times.' - write(logunit,*) '(It is allowable for glc coupling to be more frequent than glc averaging,' - write(logunit,*) 'but not for glc averaging to be more frequent than glc coupling.)' - call shr_sys_abort(subname//' glcrun_avg_alarm is true, but glcrun_alarm is false') - end if - else - ! glcrun_avg_alarm shouldn't matter in this case - glcrun_avg_alarm = .false. - end if - - ! this probably belongs in seq_timemgr somewhere using proper clocks - t1hr_alarm = .false. - t2hr_alarm = .false. - t3hr_alarm = .false. - t6hr_alarm = .false. - t12hr_alarm = .false. - t24hr_alarm = .false. - t1yr_alarm = .false. - if (mod(tod, 3600) == 0) t1hr_alarm = .true. - if (mod(tod, 7200) == 0) t2hr_alarm = .true. - if (mod(tod,10800) == 0) t3hr_alarm = .true. - if (mod(tod,21600) == 0) t6hr_alarm = .true. - if (mod(tod,43200) == 0) t12hr_alarm = .true. - if (tod == 0) t24hr_alarm = .true. - if (month==1 .and. day==1 .and. tod==0) t1yr_alarm = .true. - - lnd2glc_averaged_now = .false. - - if (seq_timemgr_alarmIsOn(EClock_d,seq_timemgr_alarm_datestop)) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,103) subname,' NOTE: Stopping from alarm STOP DATE' - write(logunit,*) ' ' - endif - stop_alarm = .true. - endif - - ! update the orbital data as needed - if (trim(orb_mode) == trim(seq_infodata_orb_variable_year)) then - orb_nyear = orb_iyear + (year - orb_iyear_align) - if (orb_nyear /= orb_cyear) then - orb_cyear = orb_nyear - call shr_orb_params(orb_cyear, orb_eccen, orb_obliq, orb_mvelp, & - orb_obliqr, orb_lambm0, orb_mvelpp, iamroot_CPLID) - call seq_infodata_putData(infodata,orb_eccen=orb_eccen,orb_obliqr=orb_obliqr, & - orb_lambm0=orb_lambm0,orb_mvelpp=orb_mvelpp) - endif - endif - - ! override ocnrun_alarm and ocnnext_alarm for first ocn run - ! skip_ocean_run is initialized above to true if it's a startup - ! if it's not a startup, ignore all of this - ! stop the overide on the second ocnrun_alarm - - if (ocnrun_alarm) ocnrun_count = ocnrun_count + 1 - if (ocnrun_count > 1) skip_ocean_run = .false. - if (skip_ocean_run) then - ocnrun_alarm = .false. - ocnnext_alarm = .false. - endif - - if (iamroot_CPLID) then - if (loglevel > 1) then - write(logunit,102) ' Alarm_state: model date = ',ymd,tod, & - ' aliogrw run alarms = ', atmrun_alarm, lndrun_alarm, & - icerun_alarm, ocnrun_alarm, glcrun_alarm, & - rofrun_alarm, wavrun_alarm, esprun_alarm - write(logunit,102) ' Alarm_state: model date = ',ymd,tod, & - ' 1.2.3.6.12.24 run alarms = ', t1hr_alarm, t2hr_alarm, & - t3hr_alarm, t6hr_alarm, t12hr_alarm, t24hr_alarm - call shr_sys_flush(logunit) - endif - endif - - call t_stopf ('CPL:CLOCK_ADVANCE') - - !---------------------------------------------------------- - !| MAP ATM to OCN - ! Set a2x_ox as a module variable in prep_ocn_mod - ! This will be used later in the ice prep and in the - ! atm/ocn flux calculation - !---------------------------------------------------------- - - if (iamin_CPLID .and. (atm_c2_ocn .or. atm_c2_ice)) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:OCNPRE1_BARRIER') - call t_drvstartf ('CPL:OCNPRE1',cplrun=.true.,barrier=mpicom_CPLID,hashint=hashint(3)) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call prep_ocn_calc_a2x_ox(timer='CPL:ocnpre1_atm2ocn') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:OCNPRE1',cplrun=.true.,hashint=hashint(3)) - endif - - !---------------------------------------------------------- - !| ATM/OCN SETUP (rasm_option1) - !---------------------------------------------------------- - - if ((trim(cpl_seq_option) == 'RASM_OPTION1') .and. & - iamin_CPLID .and. ocn_present) then - - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ATMOCN1_BARRIER') - call t_drvstartf ('CPL:ATMOCN1',cplrun=.true.,barrier=mpicom_CPLID,hashint=hashint(4)) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (ocn_prognostic) then - ! Map ice to ocn - if (ice_c2_ocn) call prep_ocn_calc_i2x_ox(timer='CPL:atmocnp_ice2ocn') - - ! Map wav to ocn - if (wav_c2_ocn) call prep_ocn_calc_w2x_ox(timer='CPL:atmocnp_wav2ocn') - endif - - !---------------------------------------------------------- - !| atm/ocn flux on atm grid (rasm_option1 and aoflux='atm') - !---------------------------------------------------------- - - if (trim(aoflux_grid) == 'atm') then - ! compute o2x_ax for flux_atmocn, will be updated before atm merge - ! do not use fractions because fractions here are NOT consistent with fractions in atm_mrg - if (ocn_c2_atm) call prep_atm_calc_o2x_ax(timer='CPL:atmoca_ocn2atm') - - call t_drvstartf ('CPL:atmocna_fluxa',barrier=mpicom_CPLID) - do exi = 1,num_inst_xao - eai = mod((exi-1),num_inst_atm) + 1 - eoi = mod((exi-1),num_inst_ocn) + 1 - efi = mod((exi-1),num_inst_frc) + 1 - a2x_ax => component_get_c2x_cx(atm(eai)) - o2x_ax => prep_atm_get_o2x_ax() ! array over all instances - xao_ax => prep_aoflux_get_xao_ax() ! array over all instances - call seq_flux_atmocn_mct(infodata, tod, dtime, a2x_ax, o2x_ax(eoi), xao_ax(exi)) - enddo - call t_drvstopf ('CPL:atmocna_fluxa') - - if (atm_c2_ocn) call prep_aoflux_calc_xao_ox(timer='CPL:atmocna_atm2ocn') - endif ! aoflux_grid - - !---------------------------------------------------------- - !| atm/ocn flux on ocn grid (rasm_option1 and aoflux='ocn') - !---------------------------------------------------------- - - if (trim(aoflux_grid) == 'ocn') then - call t_drvstartf ('CPL:atmocnp_fluxo',barrier=mpicom_CPLID,hashint=hashint(6)) - do exi = 1,num_inst_xao - eai = mod((exi-1),num_inst_atm) + 1 - eoi = mod((exi-1),num_inst_ocn) + 1 - efi = mod((exi-1),num_inst_frc) + 1 - a2x_ox => prep_ocn_get_a2x_ox() - o2x_ox => component_get_c2x_cx(ocn(eoi)) - xao_ox => prep_aoflux_get_xao_ox() - call seq_flux_atmocn_mct(infodata, tod, dtime, a2x_ox(eai), o2x_ox, xao_ox(exi)) - enddo - call t_drvstopf ('CPL:atmocnp_fluxo',hashint=hashint(6)) - endif - - !---------------------------------------------------------- - !| ocn prep-merge (rasm_option1) - !---------------------------------------------------------- - - xao_ox => prep_aoflux_get_xao_ox() - call prep_ocn_mrg(infodata, fractions_ox, xao_ox=xao_ox, timer_mrg='CPL:atmocnp_mrgx2o') - - ! Accumulate ocn inputs - form partial sum of tavg ocn inputs (virtual "send" to ocn) - call prep_ocn_accum(timer='CPL:atmocnp_accum') - - !---------------------------------------------------------- - !| ocn albedos (rasm_option1) - ! (MUST BE AFTER prep_ocn_mrg for swnet to ocn to be computed properly - !---------------------------------------------------------- - - call t_drvstartf ('CPL:atmocnp_ocnalb', barrier=mpicom_CPLID,hashint=hashint(5)) - do exi = 1,num_inst_xao - efi = mod((exi-1),num_inst_frc) + 1 - eai = mod((exi-1),num_inst_atm) + 1 - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - a2x_ox => prep_ocn_get_a2x_ox() - call seq_flux_ocnalb_mct(infodata, ocn(1), a2x_ox(eai), fractions_ox(efi), xao_ox(exi)) - enddo - call t_drvstopf ('CPL:atmocnp_ocnalb',hashint=hashint(5)) - - !---------------------------------------------------------- - !| ocn budget (rasm_option1) - !---------------------------------------------------------- - - if (do_budgets) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:BUDGET0_BARRIER') - call t_drvstartf ('CPL:BUDGET0',budget=.true.,barrier=mpicom_CPLID) - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - call seq_diag_ocn_mct(ocn(ens1), xao_ox(1), fractions_ox(ens1), infodata, & - do_o2x=.true., do_x2o=.true., do_xao=.true.) - call t_drvstopf ('CPL:BUDGET0',budget=.true.) - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ATMOCN1',cplrun=.true.,hashint=hashint(4)) - endif - - !---------------------------------------------------------- - !| ATM/OCN SETUP-SEND (cesm1_orig, cesm1_orig_tight, cesm1_mod, cesm1_mod_tight, or rasm_option1) - !---------------------------------------------------------- - - if ((trim(cpl_seq_option) == 'CESM1_ORIG' .or. & - trim(cpl_seq_option) == 'CESM1_ORIG_TIGHT' .or. & - trim(cpl_seq_option) == 'CESM1_MOD' .or. & - trim(cpl_seq_option) == 'CESM1_MOD_TIGHT' .or. & - trim(cpl_seq_option) == 'RASM_OPTION1' ) .and. & - ocn_present .and. ocnrun_alarm) then - - !---------------------------------------------------- - ! "startup" wait (cesm1_orig, cesm1_mod, or rasm_option1) - !---------------------------------------------------- - - if (iamin_CPLALLOCNID) then - ! want to know the time the ocean pes waited for the cpl pes - ! at the first ocnrun_alarm, min ocean wait is wait time - ! do not use t_barrierf here since it can be "off", use mpi_barrier - do eoi = 1,num_inst_ocn - if (ocn(eoi)%iamin_compid) call t_drvstartf ('CPL:C2O_INITWAIT') - enddo - call mpi_barrier(mpicom_CPLALLOCNID,ierr) - do eoi = 1,num_inst_ocn - if (ocn(eoi)%iamin_compid) call t_drvstopf ('CPL:C2O_INITWAIT') - enddo - cpl2ocn_first = .false. - endif - - !---------------------------------------------------- - !| ocn average (cesm1_orig, cesm1_orig_tight, cesm1_mod, cesm1_mod_tight, or rasm_option1) - !---------------------------------------------------- - - if (iamin_CPLID .and. ocn_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:OCNPREP_BARRIER') - call t_drvstartf ('CPL:OCNPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - ! finish accumulating ocean inputs - ! reset the value of x2o_ox with the value in x2oacc_ox - ! (module variable in prep_ocn_mod) - call prep_ocn_accum_avg(timer_accum='CPL:ocnprep_avg') - - call component_diag(infodata, ocn, flow='x2c', comment= 'send ocn', & - info_debug=info_debug, timer_diag='CPL:ocnprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:OCNPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - !| cpl -> ocn (cesm1_orig, cesm1_orig_tight, cesm1_mod, cesm1_mod_tight, or rasm_option1) - !---------------------------------------------------- - - if (iamin_CPLALLOCNID .and. ocn_prognostic) then - call component_exch(ocn, flow='x2c', & - infodata=infodata, infodata_string='cpl2ocn_run', & - mpicom_barrier=mpicom_CPLALLOCNID, run_barriers=run_barriers, & - timer_barrier='CPL:C2O_BARRIER', timer_comp_exch='CPL:C2O', & - timer_map_exch='CPL:c2o_ocnx2ocno', timer_infodata_exch='CPL:c2o_infoexch') - endif - - endif ! end of OCN SETUP - - !---------------------------------------------------------- - !| LND SETUP-SEND - !---------------------------------------------------------- - - if (lnd_present .and. lndrun_alarm) then - - !---------------------------------------------------- - !| lnd prep-merge - !---------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:LNDPREP_BARRIER') - call t_drvstartf ('CPL:LNDPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (atm_c2_lnd) then - call prep_lnd_calc_a2x_lx(timer='CPL:lndprep_atm2lnd') - endif - - if (lnd_prognostic) then - call prep_lnd_mrg(infodata, timer_mrg='CPL:lndprep_mrgx2l') - - call component_diag(infodata, lnd, flow='x2c', comment= 'send lnd', & - info_debug=info_debug, timer_diag='CPL:lndprep_diagav') - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:LNDPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - !| cpl -> lnd - !---------------------------------------------------- - - if (iamin_CPLALLLNDID) then - call component_exch(lnd, flow='x2c', & - infodata=infodata, infodata_string='cpl2lnd_run', & - mpicom_barrier=mpicom_CPLALLLNDID, run_barriers=run_barriers, & - timer_barrier='CPL:C2L_BARRIER', timer_comp_exch='CPL:C2L', & - timer_map_exch='CPL:c2l_lndx2lndl', timer_infodata_exch='CPL:c2l_infoexch') - endif - - endif - - !---------------------------------------------------------- - !| ICE SETUP-SEND - ! Note that for atm->ice mapping below will leverage the assumption that the - ! ice and ocn are on the same grid and that mapping of atm to ocean is - ! done already for use by atmocn flux and ice model prep - !---------------------------------------------------------- - - if (ice_present .and. icerun_alarm) then - - !---------------------------------------------------- - !| ice prep-merge - !---------------------------------------------------- - - if (iamin_CPLID .and. ice_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ICEPREP_BARRIER') - - call t_drvstartf ('CPL:ICEPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - - if (ocn_c2_ice) then - call prep_ice_calc_o2x_ix(timer='CPL:iceprep_ocn2ice') - endif - - if (atm_c2_ice) then - ! This is special to avoid remapping atm to ocn - ! Note it is constrained that different prep modules cannot - ! use or call each other - a2x_ox => prep_ocn_get_a2x_ox() ! array - call prep_ice_calc_a2x_ix(a2x_ox, timer='CPL:iceprep_atm2ice') - endif - - call prep_ice_mrg(infodata, timer_mrg='CPL:iceprep_mrgx2i') - - call component_diag(infodata, ice, flow='x2c', comment= 'send ice', & - info_debug=info_debug, timer_diag='CPL:iceprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ICEPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - !| cpl -> ice - !---------------------------------------------------- - - if (iamin_CPLALLICEID .and. ice_prognostic) then - call component_exch(ice, flow='x2c', & - infodata=infodata, infodata_string='cpl2ice_run', & - mpicom_barrier=mpicom_CPLALLICEID, run_barriers=run_barriers, & - timer_barrier='CPL:C2I_BARRIER', timer_comp_exch='CPL:C2I', & - timer_map_exch='CPL:c2i_icex2icei', timer_infodata_exch='CPL:ice_infoexch') - endif - - endif - - !---------------------------------------------------------- - !| WAV SETUP-SEND - !---------------------------------------------------------- - if (wav_present .and. wavrun_alarm) then - - !---------------------------------------------------------- - !| wav prep-merge - !---------------------------------------------------------- - - if (iamin_CPLID .and. wav_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:WAVPREP_BARRIER') - - call t_drvstartf ('CPL:WAVPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (atm_c2_wav) then - call prep_wav_calc_a2x_wx(timer='CPL:wavprep_atm2wav') - endif - - if (ocn_c2_wav) then - call prep_wav_calc_o2x_wx(timer='CPL:wavprep_ocn2wav') - endif - - if (ice_c2_wav) then - call prep_wav_calc_i2x_wx(timer='CPL:wavprep_ice2wav') - endif - - call prep_wav_mrg(infodata, fractions_wx, timer_mrg='CPL:wavprep_mrgx2w') - - call component_diag(infodata, wav, flow='x2c', comment= 'send wav', & - info_debug=info_debug, timer_diag='CPL:wavprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:WAVPREP',cplrun=.true.) - endif - - !---------------------------------------------------------- - !| cpl -> wav - !---------------------------------------------------------- - - if (iamin_CPLALLWAVID .and. wav_prognostic) then - call component_exch(wav, flow='x2c', & - infodata=infodata, infodata_string='cpl2wav_run', & - mpicom_barrier=mpicom_CPLALLWAVID, run_barriers=run_barriers, & - timer_barrier='CPL:C2W_BARRIER', timer_comp_exch='CPL:C2W', & - timer_map_exch='CPL:c2w_wavx2wavw', timer_infodata_exch='CPL:c2w_infoexch') - endif - - endif - - !---------------------------------------------------------- - !| ROF SETUP-SEND - !---------------------------------------------------------- - - if (rof_present .and. rofrun_alarm) then - - !---------------------------------------------------- - !| rof prep-merge - !---------------------------------------------------- - - if (iamin_CPLID .and. rof_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ROFPREP_BARRIER') - - call t_drvstartf ('CPL:ROFPREP', cplrun=.true., barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call prep_rof_accum_avg(timer='CPL:rofprep_l2xavg') - - if (lnd_c2_rof) then - call prep_rof_calc_l2r_rx(fractions_lx, timer='CPL:rofprep_lnd2rof') - endif - - call prep_rof_mrg(infodata, fractions_rx, timer_mrg='CPL:rofprep_mrgx2r') - - call component_diag(infodata, rof, flow='x2c', comment= 'send rof', & - info_debug=info_debug, timer_diag='CPL:rofprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ROFPREP',cplrun=.true.) - endif - - !---------------------------------------------------- - !| cpl -> rof - !---------------------------------------------------- - - if (iamin_CPLALLROFID .and. rof_prognostic) then - call component_exch(rof, flow='x2c', & - infodata=infodata, infodata_string='cpl2rof_run', & - mpicom_barrier=mpicom_CPLALLLNDID, run_barriers=run_barriers, & - timer_barrier='CPL:C2R_BARRIER', timer_comp_exch='CPL:C2R', & - timer_map_exch='CPL:c2r_rofx2rofr', timer_infodata_exch='CPL:c2r_infoexch') - endif - - endif - - !---------------------------------------------------------- - !| RUN ICE MODEL - !---------------------------------------------------------- - - if (ice_present .and. icerun_alarm) then - call component_run(Eclock_i, ice, ice_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2i_fluxes, & - seq_flds_c2x_fluxes=seq_flds_i2x_fluxes, & - comp_prognostic=ice_prognostic, comp_num=comp_num_ice, & - timer_barrier= 'CPL:ICE_RUN_BARRIER', timer_comp_run='CPL:ICE_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=ice_layout) - endif - - !---------------------------------------------------------- - !| RUN LND MODEL - !---------------------------------------------------------- - - if (lnd_present .and. lndrun_alarm) then - call component_run(Eclock_l, lnd, lnd_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2l_fluxes, & - seq_flds_c2x_fluxes=seq_flds_l2x_fluxes, & - comp_prognostic=lnd_prognostic, comp_num=comp_num_lnd, & - timer_barrier= 'CPL:LND_RUN_BARRIER', timer_comp_run='CPL:LND_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=lnd_layout) - endif - - !---------------------------------------------------------- - !| RUN ROF MODEL - !---------------------------------------------------------- - - if (rof_present .and. rofrun_alarm) then - call component_run(Eclock_r, rof, rof_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2r_fluxes, & - seq_flds_c2x_fluxes=seq_flds_r2x_fluxes, & - comp_prognostic=rof_prognostic, comp_num=comp_num_rof, & - timer_barrier= 'CPL:ROF_RUN_BARRIER', timer_comp_run='CPL:ROF_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=rof_layout) - endif - - !---------------------------------------------------------- - !| RUN WAV MODEL - !---------------------------------------------------------- - - if (wav_present .and. wavrun_alarm) then - call component_run(Eclock_w, wav, wav_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2w_fluxes, & - seq_flds_c2x_fluxes=seq_flds_w2x_fluxes, & - comp_prognostic=wav_prognostic, comp_num=comp_num_wav, & - timer_barrier= 'CPL:WAV_RUN_BARRIER', timer_comp_run='CPL:WAV_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=wav_layout) - endif - - !---------------------------------------------------------- - !| RUN OCN MODEL (cesm1_orig_tight or cesm1_mod_tight) - !---------------------------------------------------------- - - if ((trim(cpl_seq_option) == 'CESM1_ORIG_TIGHT' .or. & - trim(cpl_seq_option) == 'CESM1_MOD_TIGHT' ) .and. & - ocn_present .and. ocnrun_alarm) then - call component_run(Eclock_o, ocn, ocn_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2o_fluxes, & - seq_flds_c2x_fluxes=seq_flds_o2x_fluxes, & - comp_prognostic=ocn_prognostic, comp_num=comp_num_ocn, & - timer_barrier= 'CPL:OCNT_RUN_BARRIER', timer_comp_run='CPL:OCNT_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=ocn_layout) - endif - - !---------------------------------------------------------- - !| OCN RECV-POST (cesm1_orig_tight or cesm1_mod_tight) - !---------------------------------------------------------- - - if ((trim(cpl_seq_option) == 'CESM1_ORIG_TIGHT' .or. & - trim(cpl_seq_option) == 'CESM1_MOD_TIGHT' ) .and. & - ocn_present .and. ocnnext_alarm) then - - !---------------------------------------------------------- - !| ocn -> cpl (cesm1_orig_tight or cesm1_mod_tight) - !---------------------------------------------------------- - - if (iamin_CPLALLOCNID) then - call component_exch(ocn, flow='c2x', & - infodata=infodata, infodata_string='ocn2cpl_run', & - mpicom_barrier=mpicom_CPLALLOCNID, run_barriers=run_barriers, & - timer_barrier='CPL:O2CT_BARRIER', timer_comp_exch='CPL:O2CT', & - timer_map_exch='CPL:o2c_ocno2ocnx', timer_infodata_exch='CPL:o2c_infoexch') - endif - - !---------------------------------------------------------- - !| ocn post (cesm1_orig_tight or cesm1_mod_tight) - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:OCNPOSTT_BARRIER') - call t_drvstartf ('CPL:OCNPOSTT',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, ocn, flow='c2x', comment= 'recv ocn', & - info_debug=info_debug, timer_diag='CPL:ocnpost_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:OCNPOSTT',cplrun=.true.) - endif - - endif - - !---------------------------------------------------------- - !| ATM/OCN SETUP (cesm1_orig, cesm1_orig_tight, cesm1_mod or cesm1_mod_tight) - !---------------------------------------------------------- - if ((trim(cpl_seq_option) == 'CESM1_ORIG' .or. & - trim(cpl_seq_option) == 'CESM1_ORIG_TIGHT' .or. & - trim(cpl_seq_option) == 'CESM1_MOD' .or. & - trim(cpl_seq_option) == 'CESM1_MOD_TIGHT' ) .and. & - iamin_CPLID .and. ocn_present) then - - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ATMOCNP_BARRIER') - call t_drvstartf ('CPL:ATMOCNP',cplrun=.true.,barrier=mpicom_CPLID,hashint=hashint(7)) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - !---------------------------------------------------------- - !| ocn prep-merge (cesm1_orig or cesm1_orig_tight) - !---------------------------------------------------------- - - if (ocn_prognostic) then - ! Map ice to ocn - if (ice_c2_ocn) call prep_ocn_calc_i2x_ox(timer='CPL:atmocnp_ice2ocn') - - ! Map wav to ocn - if (wav_c2_ocn) call prep_ocn_calc_w2x_ox(timer='CPL:atmocnp_wav2ocn') - - if (cpl_seq_option == 'CESM1_ORIG' .or. & - cpl_seq_option == 'CESM1_ORIG_TIGHT') then - xao_ox => prep_aoflux_get_xao_ox() - call prep_ocn_mrg(infodata, fractions_ox, xao_ox=xao_ox, timer_mrg='CPL:atmocnp_mrgx2o') - - ! Accumulate ocn inputs - form partial sum of tavg ocn inputs (virtual "send" to ocn) - call prep_ocn_accum(timer='CPL:atmocnp_accum') - endif - endif - - !---------------------------------------------------------- - !| atm/ocn flux on atm grid ((cesm1_orig, cesm1_orig_tight, cesm1_mod or cesm1_mod_tight) and aoflux='atm') - !---------------------------------------------------------- - - if (trim(aoflux_grid) == 'atm') then - ! compute o2x_ax for flux_atmocn, will be updated before atm merge - ! do not use fractions because fractions here are NOT consistent with fractions in atm_mrg - if (ocn_c2_atm) call prep_atm_calc_o2x_ax(timer='CPL:atmoca_ocn2atm') - - call t_drvstartf ('CPL:atmocna_fluxa',barrier=mpicom_CPLID) - do exi = 1,num_inst_xao - eai = mod((exi-1),num_inst_atm) + 1 - eoi = mod((exi-1),num_inst_ocn) + 1 - efi = mod((exi-1),num_inst_frc) + 1 - a2x_ax => component_get_c2x_cx(atm(eai)) - o2x_ax => prep_atm_get_o2x_ax() ! array over all instances - xao_ax => prep_aoflux_get_xao_ax() ! array over all instances - call seq_flux_atmocn_mct(infodata, tod, dtime, a2x_ax, o2x_ax(eoi), xao_ax(exi)) - enddo - call t_drvstopf ('CPL:atmocna_fluxa') - - if (atm_c2_ocn) call prep_aoflux_calc_xao_ox(timer='CPL:atmocna_atm2ocn') - endif ! aoflux_grid - - !---------------------------------------------------------- - !| atm/ocn flux on ocn grid ((cesm1_orig, cesm1_orig_tight, cesm1_mod or cesm1_mod_tight) and aoflux='ocn') - !---------------------------------------------------------- - - if (trim(aoflux_grid) == 'ocn') then - call t_drvstartf ('CPL:atmocnp_fluxo',barrier=mpicom_CPLID) - do exi = 1,num_inst_xao - eai = mod((exi-1),num_inst_atm) + 1 - eoi = mod((exi-1),num_inst_ocn) + 1 - efi = mod((exi-1),num_inst_frc) + 1 - a2x_ox => prep_ocn_get_a2x_ox() - o2x_ox => component_get_c2x_cx(ocn(eoi)) - xao_ox => prep_aoflux_get_xao_ox() - call seq_flux_atmocn_mct(infodata, tod, dtime, a2x_ox(eai), o2x_ox, xao_ox(exi)) - enddo - call t_drvstopf ('CPL:atmocnp_fluxo') - ! else if (trim(aoflux_grid) == 'atm') then - ! !--- compute later --- - ! - ! else if (trim(aoflux_grid) == 'exch') then - ! xao_ax => prep_aoflux_get_xao_ax() - ! xao_ox => prep_aoflux_get_xao_ox() - ! - ! call t_drvstartf ('CPL:atmocnp_fluxe',barrier=mpicom_CPLID) - ! call seq_flux_atmocnexch_mct( infodata, atm(eai), ocn(eoi), & - ! fractions_ax(efi), fractions_ox(efi), xao_ax(exi), xao_ox(exi) ) - ! call t_drvstopf ('CPL:atmocnp_fluxe') - endif ! aoflux_grid - - !---------------------------------------------------------- - !| ocn prep-merge (cesm1_mod or cesm1_mod_tight) - !---------------------------------------------------------- - - if (ocn_prognostic) then - if (cpl_seq_option == 'CESM1_MOD' .or. & - cpl_seq_option == 'CESM1_MOD_TIGHT') then - - xao_ox => prep_aoflux_get_xao_ox() - call prep_ocn_mrg(infodata, fractions_ox, xao_ox=xao_ox, timer_mrg='CPL:atmocnp_mrgx2o') - - ! Accumulate ocn inputs - form partial sum of tavg ocn inputs (virtual "send" to ocn) - call prep_ocn_accum(timer='CPL:atmocnp_accum') - endif - endif - - !---------------------------------------------------------- - !| ocn albedos (cesm1_orig, cesm1_orig_tight, cesm1_mod or cesm1_mod_tight) - ! (MUST BE AFTER prep_ocn_mrg for swnet to ocn to be computed properly - !---------------------------------------------------------- - - call t_drvstartf ('CPL:atmocnp_ocnalb', barrier=mpicom_CPLID) - do exi = 1,num_inst_xao - efi = mod((exi-1),num_inst_frc) + 1 - eai = mod((exi-1),num_inst_atm) + 1 - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - a2x_ox => prep_ocn_get_a2x_ox() - call seq_flux_ocnalb_mct(infodata, ocn(1), a2x_ox(eai), fractions_ox(efi), xao_ox(exi)) - enddo - call t_drvstopf ('CPL:atmocnp_ocnalb') - - !---------------------------------------------------------- - !| ocn budget (cesm1_orig, cesm1_orig_tight, cesm1_mod or cesm1_mod_tight) - !---------------------------------------------------------- - - if (do_budgets) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:BUDGET0_BARRIER') - call t_drvstartf ('CPL:BUDGET0',budget=.true.,barrier=mpicom_CPLID) - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - call seq_diag_ocn_mct(ocn(ens1), xao_ox(1), fractions_ox(ens1), infodata, & - do_o2x=.true., do_x2o=.true., do_xao=.true.) - call t_drvstopf ('CPL:BUDGET0',budget=.true.) - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ATMOCNP',cplrun=.true.,hashint=hashint(7)) - endif - - !---------------------------------------------------------- - !| LND RECV-POST - !---------------------------------------------------------- - - if (lnd_present .and. lndrun_alarm) then - - !---------------------------------------------------------- - !| lnd -> cpl - !---------------------------------------------------------- - - if (iamin_CPLALLLNDID) then - call component_exch(lnd, flow='c2x', infodata=infodata, infodata_string='lnd2cpl_run', & - mpicom_barrier=mpicom_CPLALLLNDID, run_barriers=run_barriers, & - timer_barrier='CPL:L2C_BARRIER', timer_comp_exch='CPL:L2C', & - timer_map_exch='CPL:l2c_lndl2lndx', timer_infodata_exch='lnd2cpl_run') - endif - - !---------------------------------------------------------- - !| lnd post - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:LNDPOST_BARRIER') - call t_drvstartf ('CPL:LNDPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, lnd, flow='c2x', comment='recv lnd', & - info_debug=info_debug, timer_diag='CPL:lndpost_diagav') - - ! Accumulate rof and glc inputs (module variables in prep_rof_mod and prep_glc_mod) - if (lnd_c2_rof) then - call prep_rof_accum(timer='CPL:lndpost_accl2r') - endif - if (lnd_c2_glc) then - call prep_glc_accum(timer='CPL:lndpost_accl2g' ) - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:LNDPOST',cplrun=.true.) - endif - endif - - !---------------------------------------------------------- - !| GLC SETUP-SEND - !---------------------------------------------------------- - - if (glc_present .and. glcrun_alarm) then - - !---------------------------------------------------- - !| glc prep-merge - !---------------------------------------------------- - - if (iamin_CPLID .and. glc_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPREP_BARRIER') - call t_drvstartf ('CPL:GLCPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (lnd_c2_glc) then - ! NOTE - only create appropriate input to glc if the avg_alarm is on - if (glcrun_avg_alarm) then - call prep_glc_accum_avg(timer='CPL:glcprep_avg') - lnd2glc_averaged_now = .true. - - ! Note that l2x_gx is obtained from mapping the module variable l2gacc_lx - call prep_glc_calc_l2x_gx(fractions_lx, timer='CPL:glcprep_lnd2glc') - - call prep_glc_mrg(infodata, fractions_gx, timer_mrg='CPL:glcprep_mrgx2g') - - call component_diag(infodata, glc, flow='x2c', comment='send glc', & - info_debug=info_debug, timer_diag='CPL:glcprep_diagav') - - else - call prep_glc_zero_fields() - end if ! glcrun_avg_alarm - end if ! lnd_c2_glc - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:GLCPREP',cplrun=.true.) - - end if ! iamin_CPLID .and. glc_prognostic - - ! Set the infodata field on all tasks (not just those with iamin_CPLID). - if (glc_prognostic) then - if (glcrun_avg_alarm) then - call seq_infodata_PutData(infodata, glc_valid_input=.true.) - else - call seq_infodata_PutData(infodata, glc_valid_input=.false.) - end if - end if - - !---------------------------------------------------- - !| cpl -> glc - !---------------------------------------------------- - - if (iamin_CPLALLGLCID .and. glc_prognostic) then - call component_exch(glc, flow='x2c', & - infodata=infodata, infodata_string='cpl2glc_run', & - mpicom_barrier=mpicom_CPLALLGLCID, run_barriers=run_barriers, & - timer_barrier='CPL:C2G_BARRIER', timer_comp_exch='CPL:C2G', & - timer_map_exch='CPL:c2g_glcx2glcg', timer_infodata_exch='CPL:c2g_infoexch') - endif - - endif - - !---------------------------------------------------------- - !| ROF RECV-POST - !---------------------------------------------------------- - - if (rof_present .and. rofrun_alarm) then - - !---------------------------------------------------------- - !| rof -> cpl - !---------------------------------------------------------- - - if (iamin_CPLALLROFID) then - call component_exch(rof, flow='c2x', & - infodata=infodata, infodata_string='rof2cpl_run', & - mpicom_barrier=mpicom_CPLALLROFID, run_barriers=run_barriers, & - timer_barrier='CPL:R2C_BARRIER', timer_comp_exch='CPL:R2C', & - timer_map_exch='CPL:r2c_rofr2rofx', timer_infodata_exch='CPL:r2c_infoexch') - endif - - !---------------------------------------------------------- - !| rof post - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ROFPOST_BARRIER') - call t_drvstartf ('CPL:ROFPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, rof, flow='c2x', comment= 'recv rof', & - info_debug=info_debug, timer_diag='CPL:rofpost_diagav') - - if (rof_c2_lnd) then - call prep_lnd_calc_r2x_lx(timer='CPL:rofpost_rof2lnd') - endif - - if (rof_c2_ice) then - call prep_ice_calc_r2x_ix(timer='CPL:rofpost_rof2ice') - endif - - if (rof_c2_ocn) then - call prep_ocn_calc_r2x_ox(timer='CPL:rofpost_rof2ocn') - endif - - call t_drvstopf ('CPL:ROFPOST', cplrun=.true.) - endif - endif - - if (rof_present) then - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='DRIVER_ROFPOST_BARRIER') - call t_drvstartf ('DRIVER_ROFPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (do_hist_r2x) then - call t_drvstartf ('driver_rofpost_histaux', barrier=mpicom_CPLID) - do eri = 1,num_inst_rof - suffix = component_get_suffix(rof(eri)) - call seq_hist_writeaux(infodata, EClock_d, rof(eri), flow='c2x', & - aname='r2x'//trim(suffix), dname='domrb', & - nx=rof_nx, ny=rof_ny, nt=1, write_now=t24hr_alarm) - enddo - call t_drvstopf ('driver_rofpost_histaux') - endif - call t_drvstopf ('DRIVER_ROFPOST', cplrun=.true.) - endif - endif - - !---------------------------------------------------------- - !| Budget with old fractions - !---------------------------------------------------------- - - ! WJS (2-17-11): I am just using the first instance for the budgets because we - ! don't expect budgets to be conserved for our case (I case). Also note that we - ! don't expect budgets to be conserved for the interactive ensemble use case either. - ! tcraig (aug 2012): put this after rof->cpl so the budget sees the new r2x_rx. - ! it will also use the current r2x_ox here which is the value from the last timestep - ! consistent with the ocean coupling - - if (iamin_CPLID .and. do_budgets) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:BUDGET1_BARRIER') - call t_drvstartf ('CPL:BUDGET1',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - if (lnd_present) then - call seq_diag_lnd_mct(lnd(ens1), fractions_lx(ens1), infodata, & - do_l2x=.true., do_x2l=.true.) - endif - if (rof_present) then - call seq_diag_rof_mct(rof(ens1), fractions_rx(ens1), infodata) - endif - if (ice_present) then - call seq_diag_ice_mct(ice(ens1), fractions_ix(ens1), infodata, & - do_x2i=.true.) - endif - call t_drvstopf ('CPL:BUDGET1',cplrun=.true.,budget=.true.) - endif - - - !---------------------------------------------------------- - !| ICE RECV-POST - !---------------------------------------------------------- - - if (ice_present .and. icerun_alarm) then - - !---------------------------------------------------------- - !| ice -> cpl - !---------------------------------------------------------- - - if (iamin_CPLALLICEID) then - call component_exch(ice, flow='c2x', & - infodata=infodata, infodata_string='ice2cpl_run', & - mpicom_barrier=mpicom_CPLALLICEID, run_barriers=run_barriers, & - timer_barrier='CPL:I2C_BARRIER', timer_comp_exch='CPL:I2C', & - timer_map_exch='CPL:i2c_icei2icex', timer_infodata_exch='CPL:i2c_infoexch') - endif - - !---------------------------------------------------------- - !| ice post - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ICEPOST_BARRIER') - call t_drvstartf ('CPL:ICEPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, ice, flow='c2x', comment= 'recv ice', & - info_debug=info_debug, timer_diag='CPL:icepost_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ICEPOST',cplrun=.true.) - endif - endif - - !---------------------------------------------------------- - !| Update fractions based on new ice fractions - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:FRACSET_BARRIER') - call t_drvstartf ('CPL:FRACSET',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - call t_drvstartf ('CPL:fracset_fracset',barrier=mpicom_CPLID) - - do efi = 1,num_inst_frc - eii = mod((efi-1),num_inst_ice) + 1 - - call seq_frac_set(infodata, ice(eii), & - fractions_ax(efi), fractions_ix(efi), fractions_ox(efi)) - enddo - call t_drvstopf ('CPL:fracset_fracset') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:FRACSET',cplrun=.true.) - endif - - !---------------------------------------------------------- - !| ATM/OCN SETUP (rasm_option2) - !---------------------------------------------------------- - - if ((trim(cpl_seq_option) == 'RASM_OPTION2') .and. & - iamin_CPLID .and. ocn_present) then - - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ATMOCN2_BARRIER') - call t_drvstartf ('CPL:ATMOCN2',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (ocn_prognostic) then - ! Map ice to ocn - if (ice_c2_ocn) call prep_ocn_calc_i2x_ox(timer='CPL:atmocnp_ice2ocn') - - ! Map wav to ocn - if (wav_c2_ocn) call prep_ocn_calc_w2x_ox(timer='CPL:atmocnp_wav2ocn') - endif - - !---------------------------------------------------------- - !| atm/ocn flux on atm grid (rasm_option2 and aoflux_grid='atm') - !---------------------------------------------------------- - - if (trim(aoflux_grid) == 'atm') then - ! compute o2x_ax for flux_atmocn, will be updated before atm merge - ! can use fractions because fractions here are consistent with fractions in atm_mrg - if (ocn_c2_atm) call prep_atm_calc_o2x_ax(fractions_ox,timer='CPL:atmoca_ocn2atm') - - call t_drvstartf ('CPL:atmocna_fluxa',barrier=mpicom_CPLID) - do exi = 1,num_inst_xao - eai = mod((exi-1),num_inst_atm) + 1 - eoi = mod((exi-1),num_inst_ocn) + 1 - efi = mod((exi-1),num_inst_frc) + 1 - a2x_ax => component_get_c2x_cx(atm(eai)) - o2x_ax => prep_atm_get_o2x_ax() ! array over all instances - xao_ax => prep_aoflux_get_xao_ax() ! array over all instances - call seq_flux_atmocn_mct(infodata, tod, dtime, a2x_ax, o2x_ax(eoi), xao_ax(exi)) - enddo - call t_drvstopf ('CPL:atmocna_fluxa') - - if (atm_c2_ocn) call prep_aoflux_calc_xao_ox(timer='CPL:atmocna_atm2ocn') - endif ! aoflux_grid - - !---------------------------------------------------------- - !| atm/ocn flux on ocn grid (rasm_option2 and aoflux_grid='ocn') - !---------------------------------------------------------- - - if (trim(aoflux_grid) == 'ocn') then - call t_drvstartf ('CPL:atmocnp_fluxo',barrier=mpicom_CPLID) - do exi = 1,num_inst_xao - eai = mod((exi-1),num_inst_atm) + 1 - eoi = mod((exi-1),num_inst_ocn) + 1 - efi = mod((exi-1),num_inst_frc) + 1 - a2x_ox => prep_ocn_get_a2x_ox() - o2x_ox => component_get_c2x_cx(ocn(eoi)) - xao_ox => prep_aoflux_get_xao_ox() - call seq_flux_atmocn_mct(infodata, tod, dtime, a2x_ox(eai), o2x_ox, xao_ox(exi)) - enddo - call t_drvstopf ('CPL:atmocnp_fluxo') - endif ! aoflux_grid - - !---------------------------------------------------------- - !| ocn prep-merge (rasm_option2) - !---------------------------------------------------------- - - xao_ox => prep_aoflux_get_xao_ox() - call prep_ocn_mrg(infodata, fractions_ox, xao_ox=xao_ox, timer_mrg='CPL:atmocnp_mrgx2o') - - ! Accumulate ocn inputs - form partial sum of tavg ocn inputs (virtual "send" to ocn) - call prep_ocn_accum(timer='CPL:atmocnp_accum') - - !---------------------------------------------------------- - !| ocn albedos (rasm_option2) - ! (MUST BE AFTER prep_ocn_mrg for swnet to ocn to be computed properly - !---------------------------------------------------------- - - call t_drvstartf ('CPL:atmocnp_ocnalb', barrier=mpicom_CPLID) - do exi = 1,num_inst_xao - efi = mod((exi-1),num_inst_frc) + 1 - eai = mod((exi-1),num_inst_atm) + 1 - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - a2x_ox => prep_ocn_get_a2x_ox() - call seq_flux_ocnalb_mct(infodata, ocn(1), a2x_ox(eai), fractions_ox(efi), xao_ox(exi)) - enddo - call t_drvstopf ('CPL:atmocnp_ocnalb') - - !---------------------------------------------------------- - !| ocn budget (rasm_option2) - !---------------------------------------------------------- - - if (do_budgets) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:BUDGET0_BARRIER') - call t_drvstartf ('CPL:BUDGET0',budget=.true.,barrier=mpicom_CPLID) - xao_ox => prep_aoflux_get_xao_ox() ! array over all instances - call seq_diag_ocn_mct(ocn(ens1), xao_ox(1), fractions_ox(ens1), infodata, & - do_o2x=.true., do_x2o=.true., do_xao=.true.) - call t_drvstopf ('CPL:BUDGET0',budget=.true.) - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ATMOCN2',cplrun=.true.) - endif - - !---------------------------------------------------------- - !| OCN SETUP-SEND (rasm_option2) - !---------------------------------------------------------- - - if ((trim(cpl_seq_option) == 'RASM_OPTION2' ) .and. & - ocn_present .and. ocnrun_alarm) then - - !---------------------------------------------------- - ! "startup" wait (rasm_option2) - !---------------------------------------------------- - - if (iamin_CPLALLOCNID) then - ! want to know the time the ocean pes waited for the cpl pes - ! at the first ocnrun_alarm, min ocean wait is wait time - ! do not use t_barrierf here since it can be "off", use mpi_barrier - do eoi = 1,num_inst_ocn - if (ocn(eoi)%iamin_compid) call t_drvstartf ('CPL:C2O_INITWAIT') - enddo - call mpi_barrier(mpicom_CPLALLOCNID,ierr) - do eoi = 1,num_inst_ocn - if (ocn(eoi)%iamin_compid) call t_drvstopf ('CPL:C2O_INITWAIT') - enddo - cpl2ocn_first = .false. - endif - - !---------------------------------------------------- - !| ocn average (rasm_option2) - !---------------------------------------------------- - - if (iamin_CPLID .and. ocn_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:OCNPRE2_BARRIER') - call t_drvstartf ('CPL:OCNPRE2',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - ! finish accumulating ocean inputs - ! reset the value of x2o_ox with the value in x2oacc_ox - ! (module variable in prep_ocn_mod) - call prep_ocn_accum_avg(timer_accum='CPL:ocnprep_avg') - - call component_diag(infodata, ocn, flow='x2c', comment= 'send ocn', & - info_debug=info_debug, timer_diag='CPL:ocnprep_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:OCNPRE2',cplrun=.true.) - endif - - !---------------------------------------------------- - !| cpl -> ocn (rasm_option2) - !---------------------------------------------------- - - if (iamin_CPLALLOCNID .and. ocn_prognostic) then - call component_exch(ocn, flow='x2c', & - infodata=infodata, infodata_string='cpl2ocn_run', & - mpicom_barrier=mpicom_CPLALLOCNID, run_barriers=run_barriers, & - timer_barrier='CPL:C2O2_BARRIER', timer_comp_exch='CPL:C2O2', & - timer_map_exch='CPL:c2o2_ocnx2ocno', timer_infodata_exch='CPL:c2o2_infoexch') - endif - - endif - - !---------------------------------------------------------- - !| ATM SETUP-SEND - !---------------------------------------------------------- - - if (atm_present .and. atmrun_alarm) then - - !---------------------------------------------------------- - !| atm prep-merge - !---------------------------------------------------------- - - if (iamin_CPLID .and. atm_prognostic) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ATMPREP_BARRIER') - call t_drvstartf ('CPL:ATMPREP',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - if (ocn_c2_atm) then - if (trim(aoflux_grid) == 'ocn') then - ! map xao_ox states and fluxes to xao_ax if fluxes were computed on ocn grid - call prep_aoflux_calc_xao_ax(fractions_ox, flds='states_and_fluxes', & - timer='CPL:atmprep_xao2atm') - endif - - ! recompute o2x_ax now for the merge with fractions associated with merge - call prep_atm_calc_o2x_ax(fractions_ox, timer='CPL:atmprep_ocn2atm') - - ! map xao_ox albedos to the atm grid, these are always computed on the ocean grid - call prep_aoflux_calc_xao_ax(fractions_ox, flds='albedos', timer='CPL:atmprep_alb2atm') - endif - - if (ice_c2_atm) then - call prep_atm_calc_i2x_ax(fractions_ix, timer='CPL:atmprep_ice2atm') - endif - - if (lnd_c2_atm) then - call prep_atm_calc_l2x_ax(fractions_lx, timer='CPL:atmprep_lnd2atm') - endif - - if (associated(xao_ax)) then - call prep_atm_mrg(infodata, fractions_ax, xao_ax=xao_ax, timer_mrg='CPL:atmprep_mrgx2a') - endif - - call component_diag(infodata, atm, flow='x2c', comment= 'send atm', info_debug=info_debug, & - timer_diag='CPL:atmprep_diagav') - - call t_drvstopf ('CPL:ATMPREP',cplrun=.true.) - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - - !---------------------------------------------------------- - !| cpl -> atm - !---------------------------------------------------------- - - if (iamin_CPLALLATMID .and. atm_prognostic) then - call component_exch(atm, flow='x2c', infodata=infodata, infodata_string='cpl2atm_run', & - mpicom_barrier=mpicom_CPLALLATMID, run_barriers=run_barriers, & - timer_barrier='CPL:C2A_BARRIER', timer_comp_exch='CPL:C2A', & - timer_map_exch='CPL:c2a_atmx2atmg', timer_infodata_exch='CPL:c2a_infoexch') - endif - - endif - - !---------------------------------------------------------- - !| RUN OCN MODEL (NOT cesm1_orig_tight or cesm1_mod_tight) - !---------------------------------------------------------- - - if ((trim(cpl_seq_option) /= 'CESM1_ORIG_TIGHT' .and. & - trim(cpl_seq_option) /= 'CESM1_MOD_TIGHT' ) .and. & - ocn_present .and. ocnrun_alarm) then - call component_run(Eclock_o, ocn, ocn_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2o_fluxes, & - seq_flds_c2x_fluxes=seq_flds_o2x_fluxes, & - comp_prognostic=ocn_prognostic, comp_num=comp_num_ocn, & - timer_barrier= 'CPL:OCN_RUN_BARRIER', timer_comp_run='CPL:OCN_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=ocn_layout) - endif - - !---------------------------------------------------------- - !| RUN ATM MODEL - !---------------------------------------------------------- - - if (atm_present .and. atmrun_alarm) then - call component_run(Eclock_a, atm, atm_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2a_fluxes, & - seq_flds_c2x_fluxes=seq_flds_a2x_fluxes, & - comp_prognostic=atm_prognostic, comp_num=comp_num_atm, & - timer_barrier= 'CPL:ATM_RUN_BARRIER', timer_comp_run='CPL:ATM_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod, comp_layout=atm_layout) - endif - - !---------------------------------------------------------- - !| RUN GLC MODEL - !---------------------------------------------------------- - - if (glc_present .and. glcrun_alarm) then - call component_run(Eclock_g, glc, glc_run, infodata, & - seq_flds_x2c_fluxes=seq_flds_x2g_fluxes, & - seq_flds_c2x_fluxes=seq_flds_g2x_fluxes, & - comp_prognostic=glc_prognostic, comp_num=comp_num_glc, & - timer_barrier= 'CPL:GLC_RUN_BARRIER', timer_comp_run='CPL:GLC_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=glc_layout) - endif - - !---------------------------------------------------------- - !| WAV RECV-POST - !---------------------------------------------------------- - - if (wav_present .and. wavrun_alarm) then - - !---------------------------------------------------------- - !| wav -> cpl - !---------------------------------------------------------- - - if (iamin_CPLALLWAVID) then - call component_exch(wav, flow='c2x', infodata=infodata, infodata_string='wav2cpl_run', & - mpicom_barrier=mpicom_CPLALLWAVID, run_barriers=run_barriers, & - timer_barrier='CPL:W2C_BARRIER', timer_comp_exch='CPL:W2C', & - timer_map_exch='CPL:w2c_wavw2wavx', timer_infodata_exch='CPL:w2c_infoexch') - endif - - !---------------------------------------------------------- - !| wav post - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:WAVPOST_BARRIER') - call t_drvstartf ('CPL:WAVPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, wav, flow='c2x', comment= 'recv wav', & - info_debug=info_debug, timer_diag='CPL:wavpost_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:WAVPOST',cplrun=.true.) - endif - endif - - !---------------------------------------------------------- - !| GLC RECV-POST - !---------------------------------------------------------- - - if (glc_present .and. glcrun_alarm) then - - !---------------------------------------------------------- - !| glc -> cpl - !---------------------------------------------------------- - - if (iamin_CPLALLGLCID) then - call component_exch(glc, flow='c2x', infodata=infodata, infodata_string='glc2cpl_run', & - mpicom_barrier=mpicom_CPLALLGLCID, run_barriers=run_barriers, & - timer_barrier='CPL:G2C_BARRIER', timer_comp_exch='CPL:G2C', & - timer_map_exch='CPL:g2c_glcg2glcx', timer_infodata_exch='CPL:g2c_infoexch') - endif - - !---------------------------------------------------------- - !| glc post - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:GLCPOST_BARRIER') - call t_drvstartf ('CPL:GLCPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, glc, flow='c2x', comment= 'recv glc', & - info_debug=info_debug, timer_diag='CPL:glcpost_diagav') - - if (glc_c2_lnd) then - call prep_lnd_calc_g2x_lx(timer='CPL:glcpost_glc2lnd') - endif - - if (glc_c2_ice) then - call prep_ice_calc_g2x_ix(timer='CPL:glcpost_glc2ice') - endif - - if (glc_c2_ocn) then - call prep_ocn_calc_g2x_ox(timer='CPL:glcpost_glc2ocn') - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:GLCPOST',cplrun=.true.) - endif - endif - - !---------------------------------------------------------- - !| ATM RECV-POST - !---------------------------------------------------------- - - if (atm_present .and. atmrun_alarm) then - - !---------------------------------------------------------- - !| atm -> cpl - !---------------------------------------------------------- - - if (iamin_CPLALLATMID) then - call component_exch(atm, flow='c2x', infodata=infodata, infodata_string='atm2cpl_run', & - mpicom_barrier=mpicom_CPLALLATMID, run_barriers=run_barriers, & - timer_barrier='CPL:A2C_BARRIER', timer_comp_exch='CPL:A2C', & - timer_map_exch='CPL:a2c_atma2atmx', timer_infodata_exch='CPL:a2c_infoexch') - ! will migrate the tag from component pes to coupler pes, on atm mesh - call prep_atm_migrate_moab(infodata) - endif - - !---------------------------------------------------------- - !| atm post - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:ATMPOST_BARRIER') - call t_drvstartf ('CPL:ATMPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, atm, flow='c2x', comment= 'recv atm', & - info_debug=info_debug, timer_diag='CPL:atmpost_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:ATMPOST',cplrun=.true.) - endif - endif - - !---------------------------------------------------------- - !| Budget with new fractions - !---------------------------------------------------------- - - if (iamin_CPLID .and. do_budgets) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:BUDGET2_BARRIER') - - call t_drvstartf ('CPL:BUDGET2',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - if (atm_present) then - call seq_diag_atm_mct(atm(ens1), fractions_ax(ens1), infodata, & - do_a2x=.true., do_x2a=.true.) - endif - if (ice_present) then - call seq_diag_ice_mct(ice(ens1), fractions_ix(ens1), infodata, & - do_i2x=.true.) - endif - call t_drvstopf ('CPL:BUDGET2',cplrun=.true.,budget=.true.) - - call t_drvstartf ('CPL:BUDGET3',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - call seq_diag_accum_mct() - call t_drvstopf ('CPL:BUDGET3',cplrun=.true.,budget=.true.) - - call t_drvstartf ('CPL:BUDGETF',cplrun=.true.,budget=.true.,barrier=mpicom_CPLID) - if (.not. dead_comps) then - call seq_diag_print_mct(EClock_d,stop_alarm,budget_inst, & - budget_daily, budget_month, budget_ann, budget_ltann, budget_ltend) - endif - call seq_diag_zero_mct(EClock=EClock_d) - - call t_drvstopf ('CPL:BUDGETF',cplrun=.true.,budget=.true.) - endif - - !---------------------------------------------------------- - !| OCN RECV-POST (NOT cesm1_orig_tight and cesm1_mod_tight) - !---------------------------------------------------------- - - if ((trim(cpl_seq_option) /= 'CESM1_ORIG_TIGHT' .and. & - trim(cpl_seq_option) /= 'CESM1_MOD_TIGHT' ) .and. & - ocn_present .and. ocnnext_alarm) then - - !---------------------------------------------------------- - !| ocn -> cpl (NOT cesm1_orig_tight and cesm1_mod_tight) - !---------------------------------------------------------- - - if (iamin_CPLALLOCNID) then - call component_exch(ocn, flow='c2x', & - infodata=infodata, infodata_string='ocn2cpl_run', & - mpicom_barrier=mpicom_CPLALLOCNID, run_barriers=run_barriers, & - timer_barrier='CPL:O2C_BARRIER', timer_comp_exch='CPL:O2C', & - timer_map_exch='CPL:o2c_ocno2ocnx', timer_infodata_exch='CPL:o2c_infoexch') - endif - - !---------------------------------------------------------- - !| ocn post (NOT cesm1_orig_tight and cesm1_mod_tight) - !---------------------------------------------------------- - - if (iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:OCNPOST_BARRIER') - call t_drvstartf ('CPL:OCNPOST',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - - call component_diag(infodata, ocn, flow='c2x', comment= 'recv ocn', & - info_debug=info_debug, timer_diag='CPL:ocnpost_diagav') - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:OCNPOST',cplrun=.true.) - endif - endif - - !---------------------------------------------------------- - !| Write driver restart file - !---------------------------------------------------------- - if ( (restart_alarm .or. drv_pause) .and. iamin_CPLID) then - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:RESTART_BARRIER') - call t_drvstartf ('CPL:RESTART',cplrun=.true.,barrier=mpicom_CPLID) - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - write(logunit,104) ' Write restart file at ',ymd,tod - call shr_sys_flush(logunit) - endif - - call seq_rest_write(EClock_d, seq_SyncClock, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, trim(cpl_inst_tag)) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - call t_drvstopf ('CPL:RESTART',cplrun=.true.) - endif - - !---------------------------------------------------------- - !| Write history file, only AVs on CPLID - !---------------------------------------------------------- - - if (iamin_CPLID) then - - call cime_comp_barriers(mpicom=mpicom_CPLID, timer='CPL:HISTORY_BARRIER') - call t_drvstartf ('CPL:HISTORY',cplrun=.true.,barrier=mpicom_CPLID) - if ( history_alarm) then - if (drv_threading) call seq_comm_setnthreads(nthreads_CPLID) - if (iamroot_CPLID) then - write(logunit,104) ' Write history file at ',ymd,tod - call shr_sys_flush(logunit) - endif - - call seq_hist_write(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx, trim(cpl_inst_tag)) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - - if (do_histavg) then - call seq_hist_writeavg(infodata, EClock_d, & - atm, lnd, ice, ocn, rof, glc, wav, histavg_alarm, & - trim(cpl_inst_tag)) - endif - - if (do_hist_a2x) then - do eai = 1,num_inst_atm - suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=ncpl) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=ncpl, flds=hist_a2x_flds) - endif - enddo - endif - - if (do_hist_a2x1hri .and. t1hr_alarm) then - do eai = 1,num_inst_atm - suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x1hri_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1hi'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=24) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1hi'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=24, flds=hist_a2x1hri_flds) - endif - enddo - endif - - if (do_hist_a2x1hr) then - do eai = 1,num_inst_atm - suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x1hr_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1h'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=24, write_now=t1hr_alarm) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1h'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=24, write_now=t1hr_alarm, flds=hist_a2x1hr_flds) - endif - enddo - endif - - if (do_hist_a2x3hr) then - do eai = 1,num_inst_atm - suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x3hr_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x3h'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=8, write_now=t3hr_alarm) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x3h'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=8, write_now=t3hr_alarm, flds=hist_a2x3hr_flds) - endif - enddo - endif - - if (do_hist_a2x3hrp) then - do eai = 1,num_inst_atm - suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x3hrp_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x3h_prec'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=8, write_now=t3hr_alarm) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x3h_prec'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=8, write_now=t3hr_alarm, flds=hist_a2x3hrp_flds) - endif - enddo - endif - - if (do_hist_a2x24hr) then - do eai = 1,num_inst_atm - suffix = component_get_suffix(atm(eai)) - if (trim(hist_a2x24hr_flds) == 'all') then - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1d'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=1, write_now=t24hr_alarm) - else - call seq_hist_writeaux(infodata, EClock_d, atm(eai), flow='c2x', & - aname='a2x1d'//trim(suffix), dname='doma', & - nx=atm_nx, ny=atm_ny, nt=1, write_now=t24hr_alarm, flds=hist_a2x24hr_flds) - endif - enddo - endif - - if (do_hist_l2x1yrg) then - ! We use a different approach here than for other aux hist files: For other - ! files, we let seq_hist_writeaux accumulate fields in time. However, if we - ! stop in the middle of an accumulation period, these accumulated fields get - ! reset (because they aren't written to the cpl restart file); this is - ! potentially a problem for this year-long accumulation. Thus, here, we use - ! the existing accumulated fields from prep_glc_mod, because those *do* - ! continue properly through a restart. - - ! The logic here assumes that we average the lnd2glc fields exactly at the - ! year boundary - no more and no less. If that's not the case, we're likely - ! to be writing the wrong thing to these aux files, so we check that - ! assumption here. - if (t1yr_alarm .and. .not. lnd2glc_averaged_now) then - write(logunit,*) 'ERROR: histaux_l2x1yrg requested;' - write(logunit,*) 'it is the year boundary, but lnd2glc fields were not averaged this time step.' - write(logunit,*) 'One possible reason is that you are running with a stub glc model.' - write(logunit,*) '(It only works to request histaux_l2x1yrg if running with a prognostic glc model.)' - call shr_sys_abort(subname// & - ' do_hist_l2x1yrg and t1yr_alarm are true, but lnd2glc_averaged_now is false') - end if - if (lnd2glc_averaged_now .and. .not. t1yr_alarm) then - ! If we're averaging more frequently than yearly, then just writing the - ! current values of the averaged fields once per year won't give the true - ! annual averages. - write(logunit,*) 'ERROR: histaux_l2x1yrg requested;' - write(logunit,*) 'lnd2glc fields were averaged this time step, but it is not the year boundary.' - write(logunit,*) '(It only works to request histaux_l2x1yrg if GLC_AVG_PERIOD is yearly.)' - call shr_sys_abort(subname// & - ' do_hist_l2x1yrg and lnd2glc_averaged_now are true, but t1yr_alarm is false') - end if - - if (t1yr_alarm) then - call seq_timemgr_EClockGetData( EClock_d, ECurrTime = etime_curr) - ! We need to pass in tbnds1_offset because (unlike with most - ! seq_hist_writeaux calls) here we don't call seq_hist_writeaux every time - ! step, so the automatically determined lower time bound can be wrong. For - ! typical runs with a noleap calendar, we want tbnds1_offset = - ! -365. However, to determine this more generally, based on the calendar - ! we're using, we call this shr_cal routine. - call shr_cal_ymds2rday_offset(etime=etime_curr, & - rdays_offset = tbnds1_offset, & - years_offset = -1) - do eli = 1,num_inst_lnd - suffix = component_get_suffix(lnd(eli)) - ! Use yr_offset=-1 so the file with fields from year 1 has time stamp - ! 0001-01-01 rather than 0002-01-01, etc. - call seq_hist_writeaux(infodata, EClock_d, lnd(eli), flow='c2x', & - aname='l2x1yr_glc'//trim(suffix), dname='doml', & - nx=lnd_nx, ny=lnd_ny, nt=1, write_now=.true., & - tbnds1_offset = tbnds1_offset, yr_offset=-1, & - av_to_write=prep_glc_get_l2gacc_lx_one_instance(eli)) - enddo - endif - endif - - if (do_hist_l2x) then - do eli = 1,num_inst_lnd - suffix = component_get_suffix(lnd(eli)) - call seq_hist_writeaux(infodata, EClock_d, lnd(eli), flow='c2x', & - aname='l2x'//trim(suffix), dname='doml', & - nx=lnd_nx, ny=lnd_ny, nt=ncpl) - enddo - endif - call t_drvstopf ('CPL:HISTORY',cplrun=.true.) - - endif - !---------------------------------------------------------- - !| RUN ESP MODEL - !---------------------------------------------------------- - if (esp_present .and. esprun_alarm) then - ! Make sure that all couplers are here in multicoupler mode before running ESP component - if (num_inst_driver > 1) then - call mpi_barrier(global_comm, ierr) - endif - call component_run(Eclock_e, esp, esp_run, infodata, & - comp_prognostic=esp_prognostic, comp_num=comp_num_esp, & - timer_barrier= 'CPL:ESP_RUN_BARRIER', timer_comp_run='CPL:ESP_RUN', & - run_barriers=run_barriers, ymd=ymd, tod=tod,comp_layout=esp_layout) - !--------------------------------------------------------------------- - !| ESP computes resume options for other components -- update everyone - !--------------------------------------------------------------------- - call seq_infodata_exchange(infodata, CPLALLESPID, 'esp2cpl_run') - endif - - !---------------------------------------------------------- - !| RESUME (read restart) if signaled - !---------------------------------------------------------- - call seq_infodata_GetData(infodata, cpl_resume=drv_resume) - if (len_trim(drv_resume) > 0) then - if (iamroot_CPLID) then - write(logunit,103) subname,' Reading restart (resume) file ',trim(drv_resume) - call shr_sys_flush(logunit) - end if - if (iamin_CPLID) then - call seq_rest_read(drv_resume, infodata, & - atm, lnd, ice, ocn, rof, glc, wav, esp, & - fractions_ax, fractions_lx, fractions_ix, fractions_ox, & - fractions_rx, fractions_gx, fractions_wx) - end if - ! Clear the resume file so we don't try to read it again - drv_resume = ' ' - call seq_infodata_PutData(infodata, cpl_resume=drv_resume) - end if - - !---------------------------------------------------------- - !| Timing and memory diagnostics - !---------------------------------------------------------- - - call t_drvstartf ('CPL:TSTAMP_WRITE',cplrun=.true.) - if (tod == 0 .or. info_debug > 1) then - if (iamroot_CPLID) then - call date_and_time(dstr,tstr) - Time_estep = mpi_wtime() - cktime = time_estep-time_bstep - cktime_acc(1) = cktime_acc(1) + cktime - cktime_cnt(1) = cktime_cnt(1) + 1 -#ifndef CPL_BYPASS - write(logunit,101) ' tStamp_write: model date = ',ymd,tod, & - ' wall clock = ',dstr(1:4),'-',dstr(5:6),'-',dstr(7:8),' ',& - tstr(1:2),':',tstr(3:4),':',tstr(5:6), & - ' avg dt = ',cktime_acc(1)/cktime_cnt(1),' dt = ',cktime -#endif - Time_bstep = mpi_wtime() - call shr_sys_flush(logunit) - if(cktime > max_cplstep_time .and. max_cplstep_time > 0.0) then - call shr_sys_abort(subname//'Wall clock time exceeds max_cplstep_time') - else if(max_cplstep_time < -0.05) then - ! if max_cplstep_time is < 0 we use abs(max_cplstep_time) - ! times the initial cktime value as a threshhold - max_cplstep_time = -(max_cplstep_time)*cktime - endif - endif - end if - if (tod == 0 .and. wall_time_limit > 0.0_r8 .and. .not. force_stop) then - time_erun = mpi_wtime() - ! time_*run is seconds, wall_time_limit is hours - wall_time = (time_erun - time_brun) / 3600._r8 ! convert secs to hrs - write(logunit,109) subname//' check wall_time_limit: ',wall_time, wall_time_limit - if (wall_time > wall_time_limit) then - force_stop = .true. - force_stop_tod = 0 - if (trim(force_stop_at) == 'month') then - call shr_cal_date2ymd(ymd,year,month,day) - month = month + 1 - do while (month > 12) - month = month - 12 - year = year + 1 - enddo - call shr_cal_ymd2date(year,month,1,force_stop_ymd) - elseif (trim(force_stop_at) == 'year') then ! next year - call shr_cal_date2ymd(ymd,year,month,day) - call shr_cal_ymd2date(year+1,1,1,force_stop_ymd) - elseif (trim(force_stop_at) == 'day') then ! next day - ymdtmp = ymd - call shr_cal_advDateInt(1,'days' ,ymdtmp,0,force_stop_ymd,todtmp,calendar) - else ! day is default - ymdtmp = ymd - call shr_cal_advDateInt(1,'days' ,ymdtmp,0,force_stop_ymd,todtmp,calendar) - endif - write(logunit,108) subname//' reached wall_time_limit (hours) =',wall_time_limit, & - ' :stop at ',force_stop_ymd - endif - endif -#ifndef CPL_BYPASS - if (tod == 0 .or. info_debug > 1) then - !! Report on memory usage - !! For now, just look at the first instance of each component - if ( iamroot_CPLID .or. & - ocn(ens1)%iamroot_compid .or. & - atm(ens1)%iamroot_compid .or. & - lnd(ens1)%iamroot_compid .or. & - ice(ens1)%iamroot_compid .or. & - glc(ens1)%iamroot_compid .or. & - wav(ens1)%iamroot_compid) then - call shr_mem_getusage(msize,mrss,.true.) - - write(logunit,105) ' memory_write: model date = ',ymd,tod, & - ' memory = ',msize,' MB (highwater) ',mrss,' MB (usage)', & - ' (pe=',iam_GLOID,' comps=',trim(complist)//')' - endif - endif -#endif - if (info_debug > 1) then - if (iamroot_CPLID) then - call seq_infodata_GetData(infodata,nextsw_cday=nextsw_cday) - ! write(logunit,106) ' nextsw_cday = ',nextsw_cday - write(logunit,*) ' nextsw_cday = ',nextsw_cday - endif - endif - call t_drvstopf ('CPL:TSTAMP_WRITE',cplrun=.true.) - - call t_stopf ('CPL:RUN_LOOP', hashint(1)) - - ! --- Write out performance data - call t_startf ('CPL:TPROF_WRITE') - if ((tprof_alarm) .or. ((tod == 0) .and. in_first_day)) then - - if ((tod == 0) .and. in_first_day) then - in_first_day = .false. - endif - call t_adj_detailf(+1) - - call t_startf("CPL:sync1_tprof") - call mpi_barrier(mpicom_GLOID,ierr) - call t_stopf("CPL:sync1_tprof") - - write(timing_file,'(a,i8.8,a1,i5.5)') & - trim(tchkpt_dir)//"/model_timing"//trim(cpl_inst_tag)//"_",ymd,"_",tod - - call t_set_prefixf("CPL:") - if (output_perf) then - call t_prf(filename=trim(timing_file), mpicom=mpicom_GLOID, & - num_outpe=0, output_thispe=output_perf) - else - call t_prf(filename=trim(timing_file), mpicom=mpicom_GLOID, & - num_outpe=0) - endif - call t_unset_prefixf() - - call t_startf("CPL:sync2_tprof") - call mpi_barrier(mpicom_GLOID,ierr) - call t_stopf("CPL:sync2_tprof") - - call t_adj_detailf(-1) - endif - call t_stopf ('CPL:TPROF_WRITE') - - if (barrier_alarm) then - call t_drvstartf ('CPL:BARRIERALARM',cplrun=.true.) - call mpi_barrier(mpicom_GLOID,ierr) - call t_drvstopf ('CPL:BARRIERALARM',cplrun=.true.) - endif - - enddo ! driver run loop - - !|---------------------------------------------------------- - !| End of driver time step loop - !|--------------------------------------------------------- - - call t_startf ('CPL:RUN_LOOP_BSTOP') - call mpi_barrier(mpicom_GLOID,ierr) - call t_stopf ('CPL:RUN_LOOP_BSTOP') - - Time_end = mpi_wtime() - - end subroutine cime_run - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine cime_final() - - use shr_pio_mod, only : shr_pio_finalize - use shr_wv_sat_mod, only: shr_wv_sat_final - character(len=cs) :: cime_model - - !------------------------------------------------------------------------ - ! Finalization of all models - !------------------------------------------------------------------------ - - call t_barrierf ('CPL:FINAL_BARRIER', mpicom_GLOID) - call t_startf ('CPL:FINAL') - call t_adj_detailf(+1) - - call t_startf('CPL:cime_final') - call t_adj_detailf(+1) - - call seq_timemgr_EClockGetData( EClock_d, stepno=endstep) - call shr_mem_getusage(msize,mrss) - - call component_final(EClock_a, atm, atm_final) - call component_final(EClock_l, lnd, lnd_final) - call component_final(EClock_r, rof, rof_final) - call component_final(EClock_i, ice, ice_final) - call component_final(EClock_o, ocn, ocn_final) - call component_final(EClock_g, glc, glc_final) - call component_final(EClock_w, wav, wav_final) - - !------------------------------------------------------------------------ - ! End the run cleanly - !------------------------------------------------------------------------ - - call shr_wv_sat_final() - call seq_infodata_GetData(infodata, cime_model=cime_model) - call shr_pio_finalize( ) - - call shr_mpi_min(msize ,msize0,mpicom_GLOID,' driver msize0', all=.true.) - call shr_mpi_max(msize ,msize1,mpicom_GLOID,' driver msize1', all=.true.) - call shr_mpi_min(mrss ,mrss0,mpicom_GLOID,' driver mrss0', all=.true.) - call shr_mpi_max(mrss ,mrss1,mpicom_GLOID,' driver mrss1', all=.true.) - - if (iamroot_CPLID )then - call seq_timemgr_EClockGetData( EClock_d, curr_ymd=ymd, curr_tod=tod, dtime=dtime) - simDays = (endStep-begStep)*dtime/(24._r8*3600._r8) - write(logunit,'(//)') - write(logunit,FormatA) subname, 'SUCCESSFUL TERMINATION OF CPL7-'//trim(cime_model) - write(logunit,FormatD) subname, ' at YMD,TOD = ',ymd,tod - write(logunit,FormatR) subname, '# simulated days (this run) = ', simDays - write(logunit,FormatR) subname, 'compute time (hrs) = ', (Time_end-Time_begin)/3600._r8 - if ( (Time_end /= Time_begin) .and. (simDays /= 0.0_r8) )then - SYPD = shr_const_cday*simDays/(days_per_year*(Time_end-Time_begin)) - write(logunit,FormatR) subname, '# simulated years / cmp-day = ', SYPD - endif - write(logunit,FormatR) subname,' pes min memory highwater (MB) = ',msize0 - write(logunit,FormatR) subname,' pes max memory highwater (MB) = ',msize1 - write(logunit,FormatR) subname,' pes min memory last usage (MB) = ',mrss0 - write(logunit,FormatR) subname,' pes max memory last usage (MB) = ',mrss1 - write(logunit,'(//)') - close(logunit) - endif - - call t_adj_detailf(-1) - call t_stopf('CPL:cime_final') - - call t_adj_detailf(-1) - call t_stopf ('CPL:FINAL') - - call t_startf("sync3_tprof") - call mpi_barrier(mpicom_GLOID,ierr) - call t_stopf("sync3_tprof") - - if (output_perf) then - call t_prf(trim(timing_dir)//'/model_timing'//trim(cpl_inst_tag), & - mpicom=mpicom_GLOID, output_thispe=output_perf) - else - call t_prf(trim(timing_dir)//'/model_timing'//trim(cpl_inst_tag), & - mpicom=mpicom_GLOID) - endif - - call t_finalizef() - - end subroutine cime_final - - !=============================================================================== - !******************************************************************************* - !=============================================================================== - - subroutine seq_cime_printlogheader() - - !----------------------------------------------------------------------- - ! - ! Purpose: Print basic information on what this driver program is - ! to the logfile. - ! - !----------------------------------------------------------------------- - ! - ! Local variables - ! - - character(len=8) :: cdate ! System date - character(len=8) :: ctime ! System time - integer :: values(8) - character :: date*8, time*10, zone*5 - character(len=cs) :: cime_model - - !------------------------------------------------------------------------------- - - call date_and_time (date, time, zone, values) - call seq_infodata_GetData(infodata, cime_model=cime_model) - cdate(1:2) = date(5:6) - cdate(3:3) = '/' - cdate(4:5) = date(7:8) - cdate(6:6) = '/' - cdate(7:8) = date(3:4) - ctime(1:2) = time(1:2) - ctime(3:3) = ':' - ctime(4:5) = time(3:4) - ctime(6:6) = ':' - ctime(7:8) = time(5:6) - write(logunit,F00) '------------------------------------------------------------' - write(logunit,F00) ' Common Infrastructure for Modeling the Earth (CIME) CPL7 ' - write(logunit,F00) '------------------------------------------------------------' - write(logunit,F00) ' (Online documentation is available on the CIME ' - write(logunit,F00) ' github: http://esmci.github.io/cime/) ' - write(logunit,F00) ' License information is available as a link from above ' - write(logunit,F00) '------------------------------------------------------------' - write(logunit,F00) ' MODEL ',cime_model - write(logunit,F00) '------------------------------------------------------------' - write(logunit,F00) ' DATE ',cdate, ' TIME ', ctime - write(logunit,F00) '------------------------------------------------------------' - write(logunit,*)' ' - write(logunit,*)' ' - - end subroutine seq_cime_printlogheader - - !=============================================================================== - - subroutine cime_comp_barriers(mpicom, timer) - integer , intent(in) :: mpicom - character(len=*), intent(in) :: timer - integer :: ierr - - if (run_barriers) then - call t_drvstartf (trim(timer)) - call mpi_barrier(mpicom,ierr) - call t_drvstopf (trim(timer)) - endif - end subroutine cime_comp_barriers - - subroutine cime_cpl_init(comm_in, comm_out, num_inst_driver, id) - !----------------------------------------------------------------------- - ! - ! Initialize multiple coupler instances, if requested - ! - !----------------------------------------------------------------------- - - implicit none - - integer , intent(in) :: comm_in - integer , intent(out) :: comm_out - integer , intent(out) :: num_inst_driver - integer , intent(out) :: id ! instance ID, starts from 1 - ! - ! Local variables - ! - integer :: ierr, mype, nu, numpes !, pes - integer :: ninst_driver, drvpes - character(len=*), parameter :: subname = '(cime_cpl_init) ' - - namelist /cime_driver_inst/ ninst_driver - - call shr_mpi_commrank(comm_in, mype , ' cime_cpl_init') - call shr_mpi_commsize(comm_in, numpes, ' cime_cpl_init') - - num_inst_driver = 1 - id = 0 - - if (mype == 0) then - ! Read coupler namelist if it exists - ninst_driver = 1 - nu = shr_file_getUnit() - open(unit = nu, file = NLFileName, status = 'old', iostat = ierr) - rewind(unit = nu) - ierr = 1 - do while ( ierr /= 0 ) - read(unit = nu, nml = cime_driver_inst, iostat = ierr) - if (ierr < 0) then - call shr_sys_abort( subname//':: namelist read returns an'// & - ' end of file or end of record condition' ) - endif - enddo - close(unit = nu) - call shr_file_freeUnit(nu) - num_inst_driver = max(ninst_driver, 1) - end if - - call shr_mpi_bcast(num_inst_driver, comm_in, 'ninst_driver') - - if (mod(numpes, num_inst_driver) /= 0) then - call shr_sys_abort(subname // & - ' : Total PE number must be a multiple of coupler instance number') - end if - - if (num_inst_driver == 1) then - call mpi_comm_dup(comm_in, comm_out, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_dup') - else - id = mype * num_inst_driver / numpes + 1 - call mpi_comm_split(comm_in, id, 0, comm_out, ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_split') - end if - call shr_mpi_commsize(comm_out, drvpes, ' cime_cpl_init') - end subroutine cime_cpl_init - -end module cime_comp_mod diff --git a/src/drivers/moab/main/cime_driver.F90 b/src/drivers/moab/main/cime_driver.F90 deleted file mode 120000 index 33dc6d71aef..00000000000 --- a/src/drivers/moab/main/cime_driver.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/cime_driver.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/component_mod.F90 b/src/drivers/moab/main/component_mod.F90 deleted file mode 100644 index 8b1c3bb6a1d..00000000000 --- a/src/drivers/moab/main/component_mod.F90 +++ /dev/null @@ -1,949 +0,0 @@ -module component_mod - - !---------------------------------------------------------------------------- - ! share code & libs - !---------------------------------------------------------------------------- - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use shr_const_mod, only: shr_const_cday - use shr_file_mod, only: shr_file_setLogLevel, shr_file_setLogUnit - use shr_file_mod, only: shr_file_setIO, shr_file_getUnit - use shr_scam_mod, only: shr_scam_checkSurface - use shr_mpi_mod, only: shr_mpi_min, shr_mpi_max - use shr_mem_mod, only: shr_mem_init, shr_mem_getusage - use shr_cal_mod, only: shr_cal_date2ymd - use shr_orb_mod, only: shr_orb_params - use shr_reprosum_mod, only: shr_reprosum_setopts - use seq_comm_mct, only: GLOID, CPLID, logunit - use seq_comm_mct, only: seq_comm_iamin, seq_comm_namelen, num_inst_frc - use seq_comm_mct, only: seq_comm_suffix, seq_comm_name, seq_comm_setnthreads - use seq_comm_mct, only: seq_comm_getinfo => seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_putData, seq_infodata_GetData - use seq_infodata_mod, only: seq_infodata_exchange, seq_infodata_type - use seq_diag_mct, only: seq_diag_avect_mct - use seq_map_type_mod - use seq_map_mod - use t_drv_timers_mod - use component_type_mod - use seq_cdata_mod, only : seq_cdata - use mct_mod ! mct_ wrappers for mct lib - use perf_mod - use ESMF - use seq_flds_mod, only: nan_check_component_fields - implicit none - -#include - - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: component_init_pre - public :: component_init_cc ! mct and esmf versions - public :: component_init_cx - public :: component_init_aream - public :: component_init_areacor - public :: component_run ! mct and esmf versions - public :: component_final ! mct and esmf versions - public :: component_exch - public :: component_diag - - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - logical :: iamroot_GLOID, iamroot_CPLID ! GLOID, CPLID masterproc - logical :: iamin_CPLID ! true => pe associated with CPLID - integer :: mpicom_GLOID, mpicom_CPLID ! GLOID, CPLID mpi communicator - integer :: nthreads_GLOID, nthreads_CPLID - logical :: drv_threading - - !=============================================================================== - -contains - - !=============================================================================== - - subroutine component_init_pre(comp, compid, cplcompid, cplallcompid, & - infodata, ntype) - use seq_timemgr_mod, only: seq_timemgr_data_assimilation_active - - !--------------------------------------------------------------- - ! Initialize driver rearrangers and AVs on driver - ! Initialize cdata_*x data - ! Zero out x2*_** in case it never gets used then it'll produce zeros in diags - ! For ensembles, create only a single dom_*x for the coupler based on the - ! first ensemble member. otherwise, just extend the dom_** and dom_*x to - ! other ensemble members. - ! - ! Arguments - type(component_type) , intent(inout) :: comp(:) - integer , intent(in) :: compid(:) - integer , intent(in) :: cplcompid(:) - integer , intent(in) :: cplallcompid - type (seq_infodata_type) , intent(inout), target :: infodata - character(len=3) , intent(in) :: ntype - ! - ! Local Variables - integer :: eci ! index - character(len=cl), allocatable :: comp_resume(:) ! Set if comp needs post-DA process - character(*), parameter :: subname = '(component_init_pre)' - !--------------------------------------------------------------- - - ! initialize module variables (this is repetitive here- but does not require a different routine) - - call seq_infodata_getdata(infodata, drv_threading=drv_threading) - call seq_comm_getinfo(GLOID, mpicom=mpicom_GLOID, iamroot=iamroot_GLOID, nthreads=nthreads_GLOID) - call seq_comm_getinfo(CPLID, mpicom=mpicom_CPLID, iamroot=iamroot_CPLID, nthreads=nthreads_CPLID) - iamin_CPLID = seq_comm_iamin(CPLID) - - ! Initialize component type variables - allocate(comp_resume(size(comp))) - do eci = 1,size(comp) - - comp(eci)%compid = compid(eci) - comp(eci)%cplcompid = cplcompid(eci) - comp(eci)%cplallcompid = cplallcompid - - call seq_comm_getinfo(comp(eci)%cplallcompid, mpicom=comp(eci)%mpicom_cplallcompid) - call seq_comm_getinfo(comp(eci)%cplcompid , mpicom=comp(eci)%mpicom_cplcompid) - call seq_comm_getinfo(comp(eci)%compid , mpicom=comp(eci)%mpicom_compid) - call seq_comm_getinfo(comp(eci)%compid , iamroot=comp(eci)%iamroot_compid) - call seq_comm_getinfo(comp(eci)%compid , nthreads=comp(eci)%nthreads_compid) - - comp(eci)%iamin_compid = seq_comm_iamin (comp(eci)%compid) - comp(eci)%iamin_cplcompid = seq_comm_iamin (comp(eci)%cplcompid) - comp(eci)%iamin_cplallcompid = seq_comm_iamin (comp(eci)%cplallcompid) - comp(eci)%suffix = seq_comm_suffix(comp(eci)%compid) - comp(eci)%name = seq_comm_name (comp(eci)%compid) - comp(eci)%ntype = ntype(1:3) - comp(eci)%oneletterid = ntype(1:1) - - if (eci == 1) then - allocate(comp(1)%dom_cx) - allocate(comp(1)%gsmap_cx) - else - comp(eci)%dom_cx => comp(1)%dom_cx - comp(eci)%gsmap_cx => comp(1)%gsmap_cx - end if - - ! Set cdata_cc - unique for each instance - allocate(comp(eci)%dom_cc) - allocate(comp(eci)%gsmap_cc) - allocate(comp(eci)%cdata_cc) - comp(eci)%cdata_cc%name = 'cdata_'//ntype(1:1)//ntype(1:1) - comp(eci)%cdata_cc%ID = comp(eci)%compid - comp(eci)%cdata_cc%mpicom = comp(eci)%mpicom_compid - comp(eci)%cdata_cc%dom => comp(eci)%dom_cc - comp(eci)%cdata_cc%gsmap => comp(eci)%gsmap_cc - comp(eci)%cdata_cc%infodata => infodata - - ! Does this component need to do post-data assimilation processing? - if (seq_timemgr_data_assimilation_active(ntype(1:3))) then - comp_resume(:) = 'TRUE' - else - comp_resume(:) = '' - end if - - ! Determine initial value of comp_present in infodata - to do - add this to component -#ifdef CPRPGI - if (comp(1)%oneletterid == 'a') then - call seq_infodata_getData(infodata, atm_present=comp(eci)%present) - call seq_infodata_PutData(infodata, atm_resume=comp_resume) - end if - if (comp(1)%oneletterid == 'l') then - call seq_infodata_getData(infodata, lnd_present=comp(eci)%present) - call seq_infodata_PutData(infodata, lnd_resume=comp_resume) - end if - if (comp(1)%oneletterid == 'i') then - call seq_infodata_getData(infodata, ice_present=comp(eci)%present) - call seq_infodata_PutData(infodata, ice_resume=comp_resume) - end if - if (comp(1)%oneletterid == 'o') then - call seq_infodata_getData(infodata, ocn_present=comp(eci)%present) - call seq_infodata_PutData(infodata, ocn_resume=comp_resume) - end if - if (comp(1)%oneletterid == 'r') then - call seq_infodata_getData(infodata, rof_present=comp(eci)%present) - call seq_infodata_PutData(infodata, rof_resume=comp_resume) - end if - if (comp(1)%oneletterid == 'g') then - call seq_infodata_getData(infodata, glc_present=comp(eci)%present) - call seq_infodata_PutData(infodata, glc_resume=comp_resume) - end if - if (comp(1)%oneletterid == 'w') then - call seq_infodata_getData(infodata, wav_present=comp(eci)%present) - call seq_infodata_PutData(infodata, wav_resume=comp_resume) - end if - if (comp(1)%oneletterid == 'e') then - call seq_infodata_getData(infodata, esp_present=comp(eci)%present) - end if -#else - call seq_infodata_getData(comp(1)%oneletterid, infodata, comp_present=comp(eci)%present) - - ! Does this component need to do post-data assimilation processing? - call seq_infodata_PutData(comp(1)%oneletterid, infodata, comp_resume=comp_resume) -#endif - end do - deallocate(comp_resume) - - end subroutine component_init_pre - - !=============================================================================== - - subroutine component_init_cc(Eclock, comp, comp_init, infodata, NLFilename, & - seq_flds_x2c_fluxes, seq_flds_c2x_fluxes) - - !--------------------------------------------------------------- - ! - ! Arguments - type(ESMF_Clock) , intent(inout) :: EClock - type(component_type) , intent(inout) :: comp(:) - interface - subroutine comp_init( Eclock, cdata, x2c, c2x, nlfilename) - use ESMF , only: ESMF_Clock - use seq_cdata_mod, only: seq_cdata - use mct_mod , only: mct_avect - implicit none - type(ESMF_Clock), intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2c - type(mct_aVect) , intent(inout) :: c2x - character(len=*), optional, intent(IN) :: NLFilename ! Namelist filename - end subroutine comp_init - end interface - type (seq_infodata_type) , intent(inout) :: infodata - character(len=*) , intent(in) :: NLFilename - character(len=*) , intent(in), optional :: seq_flds_x2c_fluxes - character(len=*) , intent(in), optional :: seq_flds_c2x_fluxes - ! - ! Local Variables - integer :: k1, k2 - integer :: eci - character(*), parameter :: subname = '(component_init_cc:mct)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - ! **** Initialize component - this initializes x2c_cc and c2x_cc *** - ! the following will call the appropriate comp_init_mct routine - - call t_set_prefixf(comp(1)%oneletterid//"_i:") - - if (comp(1)%iamin_cplallcompid) then - call seq_infodata_exchange(infodata, comp(1)%cplallcompid, & - 'cpl2'//comp(1)%ntype(1:3)//'_init') - end if - - ! The following initializes the component instance cdata_cc (gsmap and dom), - ! x2c_cc and c2x_cc - - do eci = 1,size(comp) - if (iamroot_CPLID .and. comp(eci)%present) then - write(logunit,F00) 'Initialize component '//trim(comp(eci)%ntype) - call shr_sys_flush(logunit) - endif - - if (.not. associated(comp(eci)%x2c_cc)) allocate(comp(eci)%x2c_cc) - if (.not. associated(comp(eci)%c2x_cc)) then - allocate(comp(eci)%c2x_cc) - ! this is needed for check_fields - nullify(comp(eci)%c2x_cc%rattr) - endif - if (comp(eci)%iamin_compid .and. comp(eci)%present) then - if (drv_threading) call seq_comm_setnthreads(comp(eci)%nthreads_compid) - call shr_sys_flush(logunit) - - if (present(seq_flds_x2c_fluxes)) then - call mct_avect_vecmult(comp(eci)%x2c_cc, comp(eci)%drv2mdl, seq_flds_x2c_fluxes, mask_spval=.true.) - end if - - call t_startf('comp_init') - call comp_init( EClock, comp(eci)%cdata_cc, comp(eci)%x2c_cc, comp(eci)%c2x_cc, & - NLFilename=NLFilename ) - call t_stopf('comp_init') - if(nan_check_component_fields) then - call t_drvstartf ('check_fields') - call check_fields(comp(eci), eci) - call t_drvstopf ('check_fields') - end If - - if (present(seq_flds_c2x_fluxes)) then - call mct_avect_vecmult(comp(eci)%c2x_cc, comp(eci)%mdl2drv, seq_flds_c2x_fluxes, mask_spval=.true.) - end if - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - end if - end do - - if (comp(1)%iamin_cplcompid) then - call seq_infodata_exchange(infodata, comp(1)%cplcompid, & - comp(1)%ntype(1:3)//'2cpl_init') - endif - - ! Determine final value of comp_present in infodata (after component initialization) - - do eci = 1,size(comp) -#ifdef CPRPGI - if (comp(1)%oneletterid == 'a') call seq_infodata_getData(infodata, atm_present=comp(eci)%present) - if (comp(1)%oneletterid == 'l') call seq_infodata_getData(infodata, lnd_present=comp(eci)%present) - if (comp(1)%oneletterid == 'i') call seq_infodata_getData(infodata, ice_present=comp(eci)%present) - if (comp(1)%oneletterid == 'o') call seq_infodata_getData(infodata, ocn_present=comp(eci)%present) - if (comp(1)%oneletterid == 'r') call seq_infodata_getData(infodata, rof_present=comp(eci)%present) - if (comp(1)%oneletterid == 'g') call seq_infodata_getData(infodata, glc_present=comp(eci)%present) - if (comp(1)%oneletterid == 'w') call seq_infodata_getData(infodata, wav_present=comp(eci)%present) - if (comp(1)%oneletterid == 'e') call seq_infodata_getData(infodata, esp_present=comp(eci)%present) -#else - call seq_infodata_getData(comp(1)%oneletterid, infodata, comp_present=comp(eci)%present) -#endif - end do - - - ! Initialize aream, set it to area for now until maps are read - ! in some cases, maps are not read at all !! - ! Entire domain must have reasonable values before calling xxx2xxx init - - do eci = 1,size(comp) - if (comp(eci)%iamin_compid .and. comp(eci)%present .and. & - (comp(1)%oneletterid /= 'e')) then - if (drv_threading) call seq_comm_setnthreads(comp(eci)%nthreads_compid) - k1 = mct_aVect_indexRa(comp(eci)%cdata_cc%dom%data, "area" ,perrWith='aa area ') - k2 = mct_aVect_indexRa(comp(eci)%cdata_cc%dom%data, "aream" ,perrWith='aa aream') - - comp(eci)%cdata_cc%dom%data%rAttr(k2,:) = comp(eci)%cdata_cc%dom%data%rAttr(k1,:) - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - endif - end do - - call t_unset_prefixf() - - end subroutine component_init_cc - - !=============================================================================== - - subroutine component_init_cx(comp, infodata) - - !--------------------------------------------------------------- - ! Uses - use cplcomp_exchange_mod, only: seq_mctext_gsmapinit, seq_mctext_avInit - use cplcomp_exchange_mod, only: seq_mctext_avExtend, seq_mctext_gGridInit - use cplcomp_exchange_mod, only: seq_map_init_exchange, seq_map_map_exchange - use cplcomp_exchange_mod, only: cplcomp_moab_Init - use seq_domain_mct, only: seq_domain_compare - use mct_mod, only: mct_ggrid_clean - ! - ! Arguments - type(component_type) , intent(inout) :: comp(:) - type (seq_infodata_type) , intent(inout) :: infodata - ! - ! Local Variables - integer :: eci - integer :: rc ! return code - type(mct_gGrid) :: dom_tmp ! temporary - character(*), parameter :: subname = '(component_init_cx)' - character(*), parameter :: F0I = "('"//subname//" : ', A, 2i8 )" - !--------------------------------------------------------------- - - ! Initialize driver rearrangers and AVs on driver - ! Initialize cdata_*x data - ! Zero out x2*_** in case it never gets used then it'll produce zeros in diags - ! For ensembles, create only a single dom_*x for the coupler based on the - ! first ensemble member. otherwise, just extend the dom_** and dom_*x to - ! other ensemble members. - - do eci = 1,size(comp) - if (comp(eci)%present) then - - if (iamroot_CPLID) then - write(logunit,*) ' ' - call shr_sys_flush(logunit) - end if - - if (comp(eci)%iamin_cplcompid) then - - ! Create gsmap_cx (note that comp(eci)%gsmap_cx all point to comp(1)%gsmap_cx - ! This will only be valid on the coupler pes - if (eci == 1) then - if (iamroot_CPLID) then - write(logunit,F0I) 'creating gsmap_cx for '//comp(eci)%ntype(1:3) - call shr_sys_flush(logunit) - end if - call seq_mctext_gsmapInit(comp(1)) - call cplcomp_moab_Init(comp(1)) - endif - - ! Create mapper_Cc2x and mapper_Cx2c - allocate(comp(eci)%mapper_Cc2x, comp(eci)%mapper_Cx2c) - if (iamroot_CPLID) then - write(logunit,F0I) 'Initializing mapper_C'//comp(eci)%ntype(1:1)//'2x',eci - call shr_sys_flush(logunit) - end if - call seq_map_init_exchange(comp(eci), flow='c2x', mapper=comp(eci)%mapper_Cc2x) - if (iamroot_CPLID) then - write(logunit,F0I) 'Initializing mapper_Cx2'//comp(eci)%ntype(1:1),eci - call shr_sys_flush(logunit) - end if - call seq_map_init_exchange(comp(eci), flow='x2c', mapper=comp(eci)%mapper_Cx2c) - - ! Create x2c_cx and c2x_cx - allocate(comp(eci)%x2c_cx, comp(eci)%c2x_cx) - call seq_mctext_avinit(comp(eci), flow='x2c') - call seq_mctext_avinit(comp(eci), flow='c2x') - - ! Create dom_cx (note that comp(eci)%dom_cx all point to comp(1)%dom_cx - ! Then verify other ensembles have same domain by comparing to dom_cx - if (eci == 1) then ! create dom_cx - if (iamroot_CPLID) then - write(logunit,F0I) 'creating dom_cx' - call shr_sys_flush(logunit) - end if - call seq_mctext_gGridInit(comp(1)) - call seq_map_map_exchange(comp(1), flow='c2x', dom_flag=.true., msgtag=comp(1)%cplcompid*10000+1*10+1) - else if (eci > 1) then - if (iamroot_CPLID) then - write(logunit,F0I) 'comparing comp domain ensemble number ',eci - call shr_sys_flush(logunit) - end if - call seq_mctext_avExtend(comp(eci)%dom_cx%data, cplid, comp(eci)%cplcompid) - call seq_mctext_gGridInit(comp(eci), dom_tmp) - call seq_map_map_exchange(comp(eci), flow='c2x', dom_flag=.true., dom_tmp=dom_tmp) - if (iamin_CPLID) then - call seq_domain_compare(comp(eci)%dom_cx, dom_tmp, mpicom_CPLID) - end if - call mct_ggrid_clean(dom_tmp,rc) - endif - - call mct_avect_zero(comp(eci)%x2c_cc) - call mct_avect_zero(comp(eci)%x2c_cx) - - end if ! if comp(eci)%iamin_cplcompid - end if ! if comp(eci)%present - end do ! end of eci loop - - end subroutine component_init_cx - - !=============================================================================== - - subroutine component_init_aream(infodata, rof_c2_ocn, samegrid_ao, samegrid_al, & - samegrid_ro, samegrid_lg) - - !--------------------------------------------------------------- - ! Description - ! Update (read) aream in domains where appropriate - ON cpl pes - ! - ! Uses - use prep_ocn_mod, only : prep_ocn_get_mapper_Fa2o - use prep_lnd_mod, only : prep_lnd_get_mapper_Sa2l - use prep_ice_mod, only : prep_ice_get_mapper_SFo2i - use prep_glc_mod, only : prep_glc_get_mapper_Sl2g - use component_type_mod, only : atm, lnd, ice, ocn, rof, glc - ! - ! Arguments - type (seq_infodata_type) , intent(inout) :: infodata - logical , intent(in) :: rof_c2_ocn - logical , intent(in) :: samegrid_ao - logical , intent(in) :: samegrid_al - logical , intent(in) :: samegrid_ro - logical , intent(in) :: samegrid_lg ! lnd & glc on same grid - ! - ! Local variables - type(mct_gsmap), pointer :: gsmap_s, gsmap_d - type(mct_ggrid), pointer :: dom_s, dom_d - type(seq_map) , pointer :: mapper_Fa2o - type(seq_map) , pointer :: mapper_Sa2l - type(seq_map) , pointer :: mapper_SFo2i - type(seq_map) , pointer :: mapper_Sl2g - logical :: atm_present ! atm present flag - logical :: lnd_present ! lnd present flag - logical :: ocn_present ! ocn present flag - logical :: ice_present ! ice present flag - logical :: glc_present ! glc present flag - integer :: ka,km - character(*), parameter :: subname = '(component_init_aream)' - !--------------------------------------------------------------- - - ! Note that the following is assumed to hold - all gsmaps_cx for a given - ! instance of a component (e.g. atm(i)) are identical on the coupler processes - - mapper_Fa2o => prep_ocn_get_mapper_Fa2o() - mapper_Sa2l => prep_lnd_get_mapper_Sa2l() - mapper_SFo2i => prep_ice_get_mapper_SFo2i() - mapper_Sl2g => prep_glc_get_mapper_Sl2g() - - call seq_infodata_GetData( infodata, & - atm_present=atm_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - lnd_present=lnd_present, & - glc_present=glc_present) - - if (atm_present .and. ocn_present) then - if (samegrid_ao) then - dom_s => component_get_dom_cx(atm(1)) !dom_ax - dom_d => component_get_dom_cx(ocn(1)) !dom_ox - ka = mct_aVect_indexRa(dom_s%data, "area" ) - km = mct_aVect_indexRa(dom_s%data, "aream" ) - dom_s%data%rAttr(km,:) = dom_s%data%rAttr(ka,:) - - call seq_map_map(mapper_Fa2o, av_s=dom_s%data, av_d=dom_d%data, fldlist='aream') - else - gsmap_s => component_get_gsmap_cx(ocn(1)) ! gsmap_ox - gsmap_d => component_get_gsmap_cx(atm(1)) ! gsmap_ax - dom_s => component_get_dom_cx(ocn(1)) ! dom_ox - dom_d => component_get_dom_cx(atm(1)) ! dom_ax - - call seq_map_readdata('seq_maps.rc','ocn2atm_fmapname:', mpicom_CPLID, CPLID, & - gsmap_s=gsmap_s, av_s=dom_s%data, avfld_s='aream', filefld_s='area_a', & - gsmap_d=gsmap_d, av_d=dom_d%data, avfld_d='aream', filefld_d='area_b', & - string='ocn2atm aream initialization') - endif - end if - - if (ice_present .and. ocn_present) then - dom_s => component_get_dom_cx(ocn(1)) !dom_ox - dom_d => component_get_dom_cx(ice(1)) !dom_ix - - call seq_map_map(mapper_SFo2i, av_s=dom_s%data, av_d=dom_d%data, fldlist='aream') - endif - - if (rof_c2_ocn) then - if (.not.samegrid_ro) then - gsmap_s => component_get_gsmap_cx(rof(1)) ! gsmap_rx - dom_s => component_get_dom_cx(rof(1)) ! dom_rx - - call seq_map_readdata('seq_maps.rc', 'rof2ocn_liq_rmapname:',mpicom_CPLID, CPLID, & - gsmap_s=gsmap_s, av_s=dom_s%data, avfld_s='aream', filefld_s='area_a', & - string='rof2ocn liq aream initialization') - - call seq_map_readdata('seq_maps.rc', 'rof2ocn_ice_rmapname:',mpicom_CPLID, CPLID, & - gsmap_s=gsmap_s, av_s=dom_s%data, avfld_s='aream', filefld_s='area_a', & - string='rof2ocn ice aream initialization') - endif - end if - - if (lnd_present .and. atm_present) then - if (samegrid_al) then - dom_s => component_get_dom_cx(atm(1)) !dom_ax - dom_d => component_get_dom_cx(lnd(1)) !dom_lx - - call seq_map_map(mapper_Sa2l, av_s=dom_s%data, av_d=dom_d%data, fldlist='aream') - else - gsmap_d => component_get_gsmap_cx(lnd(1)) ! gsmap_lx - dom_d => component_get_dom_cx(lnd(1)) ! dom_lx - - call seq_map_readdata('seq_maps.rc','atm2lnd_fmapname:',mpicom_CPLID, CPLID, & - gsmap_d=gsmap_d, av_d=dom_d%data, avfld_d='aream', filefld_d='area_b', & - string='atm2lnd aream initialization') - endif - end if - - if (lnd_present .and. glc_present) then - if (samegrid_lg) then - dom_s => component_get_dom_cx(lnd(1)) !dom_lx - dom_d => component_get_dom_cx(glc(1)) !dom_gx - - call seq_map_map(mapper_Sl2g, av_s=dom_s%data, av_d=dom_d%data, fldlist='aream') - else - gsmap_d => component_get_gsmap_cx(glc(1)) ! gsmap_gx - dom_d => component_get_dom_cx(glc(1)) ! dom_gx - - call seq_map_readdata('seq_maps.rc','lnd2glc_fmapname:',mpicom_CPLID, CPLID, & - gsmap_d=gsmap_d, av_d=dom_d%data, avfld_d='aream', filefld_d='area_b', & - string='lnd2glc aream initialization') - endif - endif - - end subroutine component_init_aream - - !=============================================================================== - - subroutine component_init_areacor(comp, samegrid, seq_flds_c2x_fluxes) - !--------------------------------------------------------------- - ! COMPONENT PES and CPL/COMPONENT (for exchange only) - ! - ! Uses - use seq_domain_mct, only : seq_domain_areafactinit - ! - ! Arguments - type(component_type) , intent(inout) :: comp(:) - logical , intent(in) :: samegrid - character(len=*) , intent(in) :: seq_flds_c2x_fluxes - ! - ! Local Variables - integer :: eci, num_inst - character(*), parameter :: subname = '(component_init_areacor)' - !--------------------------------------------------------------- - - num_inst = size(comp) - do eci = 1,num_inst - - ! For joint cpl-component pes - if (comp(eci)%iamin_cplcompid) then - - ! Map component domain from coupler to component processes - call seq_map_map(comp(eci)%mapper_Cx2c, comp(eci)%dom_cx%data, & - comp(eci)%dom_cc%data, msgtag=comp(eci)%cplcompid*10000+eci*10+5) - - ! For only component pes - if (comp(eci)%iamin_compid) then - - ! Allocate and initialize area correction factors on component processes - ! Note that the following call allocates comp(eci)%mld2drv(:) and comp(eci)%drv2mdl(:) - call seq_domain_areafactinit(comp(eci)%dom_cc, & - comp(eci)%mdl2drv, comp(eci)%drv2mdl, samegrid, & - comp(eci)%mpicom_compid, comp(eci)%iamroot_compid, & - 'areafact_'//comp(eci)%oneletterid//'_'//trim(comp(eci)%name)) - - ! Area correct component initialization output fields - call mct_avect_vecmult(comp(eci)%c2x_cc, comp(eci)%mdl2drv, seq_flds_c2x_fluxes, mask_spval=.true.) - - endif - - ! Map corrected initial component AVs from component to coupler pes - call seq_map_map(comp(eci)%mapper_cc2x, comp(eci)%c2x_cc, & - comp(eci)%c2x_cx, msgtag=comp(eci)%cplcompid*10000+eci*10+7) - - endif - enddo - - end subroutine component_init_areacor - - !=============================================================================== - - subroutine component_run(Eclock, comp, comp_run, infodata, & - seq_flds_x2c_fluxes, seq_flds_c2x_fluxes, & - comp_prognostic, comp_num, timer_barrier, timer_comp_run, & - run_barriers, ymd, tod, comp_layout) - - !--------------------------------------------------------------- - ! Description - ! Run component model - ! Note that the optional arguments, seq_flds_x2c_fluxes and - ! seq_flds_c2x_fluxes, are not passed for external models (ESP) - ! since these type of models do not interact through the coupler. - ! The absence of these inputs should be used to avoid coupler- - ! based actions in component_run - ! - ! Arguments - type(ESMF_Clock) , intent(inout) :: EClock - type(component_type) , intent(inout) :: comp(:) - interface - subroutine comp_run( Eclock, cdata, x2c, c2x) - use ESMF, only : ESMF_Clock - use seq_cdata_mod, only : seq_cdata - use mct_mod, only : mct_avect - implicit none - type(ESMF_Clock), intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2c - type(mct_aVect) , intent(inout) :: c2x - end subroutine comp_run - end interface - type (seq_infodata_type) , intent(inout) :: infodata - character(len=*) , intent(in), optional :: seq_flds_x2c_fluxes - character(len=*) , intent(in), optional :: seq_flds_c2x_fluxes - logical , intent(in) :: comp_prognostic - integer , intent(in), optional :: comp_num - character(len=*) , intent(in), optional :: timer_barrier - character(len=*) , intent(in), optional :: timer_comp_run - logical , intent(in), optional :: run_barriers - integer , intent(in), optional :: ymd ! Current date (YYYYMMDD) - integer , intent(in), optional :: tod ! Current time of day (seconds) - character(len=*) , intent(in), optional :: comp_layout - ! - ! Local Variables - integer :: eci - integer :: ierr - integer :: num_inst - real(r8) :: time_brun ! Start time - real(r8) :: time_erun ! Ending time - real(r8) :: cktime ! delta time - real(r8) :: cktime_acc(10) ! cktime accumulator array 1 = all, 2 = atm, etc - integer :: cktime_cnt(10) ! cktime counter array - logical :: seq_multi_inst ! a special case of running multiinstances on the same pes. - integer :: phase, phasemin, phasemax ! phase support - logical :: firstloop ! first time around phase loop - character(*), parameter :: subname = '(component_run:mct)' - !--------------------------------------------------------------- - - num_inst = size(comp) - seq_multi_inst = .false. - phasemin = 1 - phasemax = 1 - - if(present(comp_layout)) then - if(comp_layout .eq. "sequential" .and. num_inst > 1) then - seq_multi_inst=.true. - phasemin = 0 - endif - endif - - do phase = phasemin,phasemax - if (phase == phasemin) then - firstloop = .true. - else - firstloop = .false. - endif -#ifdef CPRPGI - if (comp(1)%oneletterid == 'a') call seq_infodata_putData(infodata, atm_phase=phase) - if (comp(1)%oneletterid == 'l') call seq_infodata_putData(infodata, lnd_phase=phase) - if (comp(1)%oneletterid == 'i') call seq_infodata_putData(infodata, ice_phase=phase) - if (comp(1)%oneletterid == 'o') call seq_infodata_putData(infodata, ocn_phase=phase) - if (comp(1)%oneletterid == 'r') call seq_infodata_putData(infodata, rof_phase=phase) - if (comp(1)%oneletterid == 'g') call seq_infodata_putData(infodata, glc_phase=phase) - if (comp(1)%oneletterid == 'w') call seq_infodata_putData(infodata, wav_phase=phase) - if (comp(1)%oneletterid == 'e') call seq_infodata_putData(infodata, esp_phase=phase) -#else - call seq_infodata_putData(comp(1)%oneletterid, infodata, comp_phase=phase) -#endif - - do eci = 1,num_inst - if (comp(eci)%iamin_compid) then - - if (present(timer_barrier)) then - if (present(run_barriers)) then - if (run_barriers) then - call t_drvstartf (trim(timer_barrier)) - call mpi_barrier(comp(eci)%mpicom_compid, ierr) - call t_drvstopf (trim(timer_barrier)) - time_brun = mpi_wtime() - endif - end if - end if - - if (present(timer_comp_run)) then - call t_drvstartf (trim(timer_comp_run), barrier=comp(eci)%mpicom_compid) - end if - if (drv_threading) call seq_comm_setnthreads(comp(1)%nthreads_compid) - - if (comp_prognostic .and. firstloop .and. present(seq_flds_x2c_fluxes)) then - call mct_avect_vecmult(comp(eci)%x2c_cc, comp(eci)%drv2mdl, seq_flds_x2c_fluxes, mask_spval=.true.) - end if - - call t_set_prefixf(comp(1)%oneletterid//":") - call comp_run(EClock, comp(eci)%cdata_cc, comp(eci)%x2c_cc, comp(eci)%c2x_cc) - if(nan_check_component_fields) then - call t_drvstartf ('check_fields') - call check_fields(comp(eci), eci) - call t_drvstopf ('check_fields') - endif - call t_unset_prefixf() - - if ((phase == 1) .and. present(seq_flds_c2x_fluxes)) then - call mct_avect_vecmult(comp(eci)%c2x_cc, comp(eci)%mdl2drv, seq_flds_c2x_fluxes, mask_spval=.true.) - endif - - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - - if (present(timer_comp_run)) then - call t_drvstopf (trim(timer_comp_run)) - end if - - if (present(comp_num)) then - if (present(run_barriers)) then - if (run_barriers) then - time_erun = mpi_wtime() - cktime = time_erun - time_brun - cktime_acc(comp_num) = cktime_acc(comp_num) + cktime - cktime_cnt(comp_num) = cktime_cnt(comp_num) + 1 - if (present(ymd) .and. present(tod)) then - write(logunit,107) ' rstamp ',trim(comp(eci)%name), & - '_run_time: model date = ',ymd,tod, & - ' avg dt = ',cktime_acc(comp_num)/cktime_cnt(comp_num), & - ' dt = ',cktime, ' phase = ',phase - end if - endif - end if - end if - - endif - enddo ! eci - - enddo ! phase - -107 format( 3A, 2i8, A, f12.4, A, f12.4 ) - - end subroutine component_run - - !=============================================================================== - - subroutine component_final(Eclock, comp, comp_final) - - !--------------------------------------------------------------- - ! Description - ! Run component model - ! - ! Arguments - type(ESMF_Clock) , intent(inout) :: EClock - type(component_type) , intent(inout) :: comp(:) - interface - subroutine comp_final( Eclock, cdata, x2c, c2x) - use ESMF, only : ESMF_Clock - use seq_cdata_mod, only : seq_cdata - use mct_mod, only : mct_avect - implicit none - type(ESMF_Clock), intent(inout) :: EClock - type(seq_cdata) , intent(inout) :: cdata - type(mct_aVect) , intent(inout) :: x2c - type(mct_aVect) , intent(inout) :: c2x - end subroutine comp_final - end interface - ! - ! Local Variables - integer :: eci - integer :: num_inst - character(*), parameter :: subname = '(component_final:mct)' - !--------------------------------------------------------------- - - num_inst = size(comp) - do eci = 1,num_inst - if (comp(eci)%iamin_compid) then - if (drv_threading) call seq_comm_setnthreads(comp(1)%nthreads_compid) - call t_set_prefixf(comp(1)%oneletterid//"_f:") - call comp_final(EClock, comp(eci)%cdata_cc, comp(eci)%x2c_cc, comp(eci)%c2x_cc) - call t_unset_prefixf() - if (drv_threading) call seq_comm_setnthreads(nthreads_GLOID) - end if - end do - - end subroutine component_final - - !=============================================================================== - - subroutine component_exch(comp, flow, infodata, infodata_string, & - mpicom_barrier, run_barriers, & - timer_barrier, timer_comp_exch, timer_map_exch, timer_infodata_exch) - - !--------------------------------------------------------------- - ! Description - ! Map x2m_mx to x2m_mm (component input av from - ! coupler processes to component model processes) - ! - ! Arguments - implicit none - type(component_type) , intent(inout) :: comp(:) - character(len=3) , intent(in) :: flow - type(seq_infodata_type) , intent(inout) :: infodata - character(len=*) , intent(in) :: infodata_string - integer , intent(in), optional :: mpicom_barrier ! mpicom for barrier call - logical , intent(in), optional :: run_barriers - character(len=*) , intent(in), optional :: timer_barrier ! timer - character(len=*) , intent(in), optional :: timer_comp_exch - character(len=*) , intent(in), optional :: timer_map_exch - character(len=*) , intent(in), optional :: timer_infodata_exch - ! - ! Local Variables - integer :: eci - integer :: ierr - character(*), parameter :: subname = '(component_exch)' - !--------------------------------------------------------------- - - if (present(timer_barrier)) then - if (run_barriers) then - call t_drvstartf (trim(timer_barrier)) - call mpi_barrier(comp(1)%mpicom_cplallcompid,ierr) - call t_drvstopf (trim(timer_barrier)) - endif - end if - - if (present(timer_comp_exch)) then - if (present(mpicom_barrier)) then - call t_drvstartf (trim(timer_comp_exch), cplcom=.true., barrier=mpicom_barrier) - end if - end if - - do eci = 1,size(comp) - if (comp(eci)%iamin_cplcompid) then - if (present(timer_map_exch)) then - call t_drvstartf (trim(timer_map_exch), barrier=comp(eci)%mpicom_cplcompid) - end if - - if (flow == 'x2c') then ! coupler to component - call seq_map_map(comp(eci)%mapper_Cx2c, comp(eci)%x2c_cx, comp(eci)%x2c_cc, & - msgtag=comp(eci)%cplcompid*10000+eci*10+2) - else if (flow == 'c2x') then ! component to coupler - call seq_map_map(comp(eci)%mapper_Cc2x, comp(eci)%c2x_cc, comp(eci)%c2x_cx, & - msgtag=comp(eci)%cplcompid*10000+eci*10+4) - end if - - if (present(timer_map_exch)) then - call t_drvstopf (trim(timer_map_exch)) - end if - endif - enddo - - if (present(timer_infodata_exch)) then - call t_drvstartf (trim(timer_infodata_exch), barrier=mpicom_barrier) - end if - if (flow == 'c2x') then - if (comp(1)%iamin_cplcompid) then - call seq_infodata_exchange(infodata, comp(1)%cplcompid, trim(infodata_string)) - end if - else if (flow == 'x2c') then - if (comp(1)%iamin_cplallcompid) then - call seq_infodata_exchange(infodata, comp(1)%cplallcompid, trim(infodata_string)) - end if - endif - if (present(timer_infodata_exch)) then - call t_drvstopf (trim(timer_infodata_exch)) - end if - - if (present(timer_comp_exch)) then - if (present(mpicom_barrier)) then - call t_drvstopf (trim(timer_comp_exch), cplcom=.true.) - end if - end if - - end subroutine component_exch - - !=============================================================================== - - subroutine component_diag(infodata, comp, flow, comment, info_debug, timer_diag ) - - !--------------------------------------------------------------- - ! Description - ! Component diagnostics for send/recv to coupler - ! - ! Arguments - type (seq_infodata_type) , intent(inout) :: infodata - type(component_type) , intent(in) :: comp(:) - character(len=3) , intent(in) :: flow - character(len=*) , intent(in) :: comment - integer , intent(in) :: info_debug - character(len=*) , intent(in), optional :: timer_diag - ! - ! Local Variables - integer :: eci - character(*), parameter :: subname = '(component_diag)' - !--------------------------------------------------------------- - - if (info_debug > 1) then - if (present(timer_diag)) then - call t_drvstartf (trim(timer_diag), barrier=mpicom_CPLID) - end if - - do eci = 1,size(comp) - if (flow == 'x2c') then ! coupler to component - call seq_diag_avect_mct(infodata, CPLID, comp(eci)%x2c_cx, & - comp(eci)%dom_cx, comp(eci)%gsmap_cx, trim(comment)//comp(eci)%suffix) - end if - if (flow == 'c2x') then ! component to coupler - call seq_diag_avect_mct(infodata, CPLID, comp(eci)%c2x_cx, & - comp(eci)%dom_cx, comp(eci)%gsmap_cx, trim(comment)//comp(eci)%suffix) - end if - enddo - - if (present(timer_diag)) then - call t_drvstopf (trim(timer_diag)) - end if - endif - - end subroutine component_diag - -end module component_mod diff --git a/src/drivers/moab/main/component_type_mod.F90 b/src/drivers/moab/main/component_type_mod.F90 deleted file mode 100644 index 83a6be70c00..00000000000 --- a/src/drivers/moab/main/component_type_mod.F90 +++ /dev/null @@ -1,266 +0,0 @@ -module component_type_mod - - !---------------------------------------------------------------------------- - ! share code & libs - !---------------------------------------------------------------------------- - use shr_kind_mod , only: r8 => SHR_KIND_R8 - use shr_kind_mod , only: cs => SHR_KIND_CS - use shr_kind_mod , only: cl => SHR_KIND_CL - use shr_kind_mod , only: IN => SHR_KIND_IN - use seq_cdata_mod , only: seq_cdata - use seq_map_type_mod , only: seq_map - use seq_comm_mct , only: seq_comm_namelen - use seq_comm_mct , only: num_inst_atm, num_inst_lnd, num_inst_rof - use seq_comm_mct , only: num_inst_ocn, num_inst_ice, num_inst_glc - use seq_comm_mct , only: num_inst_wav, num_inst_esp - use mct_mod - - implicit none - save - private - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - ! - ! on component pes - public :: component_get_c2x_cc - public :: component_get_x2c_cc - public :: component_get_dom_cc - public :: component_get_gsmap_cc - public :: component_get_cdata_cc - public :: component_get_iamroot_compid - public :: check_fields - ! - ! on cpl pes - public :: component_get_x2c_cx - public :: component_get_c2x_cx - public :: component_get_dom_cx - public :: component_get_gsmap_cx - public :: component_get_drv2mdl - public :: component_get_mdl2drv - ! - ! on union coupler/component pes - public :: component_get_mapper_Cc2x - public :: component_get_mapper_Cx2c - ! - ! on driver pes (all pes) - public :: component_get_name - public :: component_get_suffix - public :: component_get_iamin_compid - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - type component_type - ! - ! Coupler pes - ! used by prep_xxx and all other coupler based routines - ! - type(mct_ggrid) , pointer :: dom_cx => null() ! component domain (same for all instances) - type(mct_gsMap) , pointer :: gsMap_cx => null() ! decomposition on coupler pes (same for all instances) - type(mct_aVect) , pointer :: x2c_cx => null() ! - type(mct_aVect) , pointer :: c2x_cx => null() - ! - ! Component pes - ! - type(seq_cdata) , pointer :: cdata_cc => null() - type(mct_ggrid) , pointer :: dom_cc => null() - type(mct_gsMap) , pointer :: gsMap_cc => null() ! decomposition on component pes - type(mct_aVect) , pointer :: x2c_cc => null() - type(mct_aVect) , pointer :: c2x_cc => null() - real(r8) , pointer :: drv2mdl(:) => null() ! area correction factors - real(r8) , pointer :: mdl2drv(:) => null() ! area correction factors - ! - ! Union of coupler/component pes - used by exchange routines - ! - type(seq_map) , pointer :: mapper_Cc2x => null() ! coupler -> component rearranging - type(seq_map) , pointer :: mapper_Cx2c => null() ! component -> coupler rearranging - ! - ! Driver pes (all pes) - ! - integer :: compid - integer :: cplcompid - integer :: cplallcompid - integer :: mpicom_compid - integer :: mpicom_cplcompid - integer :: mpicom_cplallcompid - integer :: mbcpid - logical :: iamin_compid - logical :: iamin_cplcompid - logical :: iamin_cplallcompid - logical :: iamroot_compid - logical :: present ! true => component is present and not stub - integer :: nthreads_compid - integer :: instn - character(len=CL) :: suffix - character(len=1) :: oneletterid - character(len=3) :: ntype - character(len=seq_comm_namelen) :: name - end type component_type - - public :: component_type - - !---------------------------------------------------------------------------- - ! Component type instances - !---------------------------------------------------------------------------- - - type(component_type), target :: atm(num_inst_atm) - type(component_type), target :: lnd(num_inst_lnd) - type(component_type), target :: rof(num_inst_rof) - type(component_type), target :: ocn(num_inst_ocn) - type(component_type), target :: ice(num_inst_ice) - type(component_type), target :: glc(num_inst_glc) - type(component_type), target :: wav(num_inst_wav) - type(component_type), target :: esp(num_inst_esp) - - public :: atm, lnd, rof, ocn, ice, glc, wav, esp - - !=============================================================================== - -contains - - !=============================================================================== - ! Accessor functions into component instance - !=============================================================================== - - function component_get_c2x_cc(comp) - type(component_type), intent(in), target :: comp - type(mct_avect), pointer :: component_get_c2x_cc - component_get_c2x_cc => comp%c2x_cc - end function component_get_c2x_cc - - function component_get_c2x_cx(comp) - type(component_type), intent(in), target :: comp - type(mct_avect), pointer :: component_get_c2x_cx - component_get_c2x_cx => comp%c2x_cx - end function component_get_c2x_cx - - function component_get_x2c_cc(comp) - type(component_type), intent(in), target :: comp - type(mct_avect), pointer :: component_get_x2c_cc - component_get_x2c_cc => comp%x2c_cc - end function component_get_x2c_cc - - function component_get_x2c_cx(comp) - type(component_type), intent(in), target :: comp - type(mct_avect), pointer :: component_get_x2c_cx - component_get_x2c_cx => comp%x2c_cx - end function component_get_x2c_cx - - function component_get_name(comp) - type(component_type), intent(in), target :: comp - character(len=seq_comm_namelen) :: component_get_name - component_get_name = comp%name - end function component_get_name - - function component_get_iamin_compid(comp) - type(component_type), intent(in), target :: comp - logical :: component_get_iamin_compid - component_get_iamin_compid = comp%iamin_compid - end function component_get_iamin_compid - - function component_get_iamroot_compid(comp) - type(component_type), intent(in), target :: comp - logical :: component_get_iamroot_compid - component_get_iamroot_compid = comp%iamroot_compid - end function component_get_iamroot_compid - - function component_get_suffix(comp) - type(component_type), intent(in), target :: comp - character(len=CL) :: component_get_suffix - component_get_suffix = comp%suffix - end function component_get_suffix - - function component_get_dom_cx(comp) - type(component_type), intent(in), target :: comp - type(mct_ggrid), pointer :: component_get_dom_cx - component_get_dom_cx => comp%dom_cx - end function component_get_dom_cx - - function component_get_dom_cc(comp) - type(component_type), intent(in), target :: comp - type(mct_ggrid), pointer :: component_get_dom_cc - component_get_dom_cc => comp%dom_cc - end function component_get_dom_cc - - function component_get_gsmap_cx(comp) - type(component_type), intent(in), target :: comp - type(mct_gsmap), pointer :: component_get_gsmap_cx - component_get_gsmap_cx => comp%gsmap_cx - end function component_get_gsmap_cx - - function component_get_gsmap_cc(comp) - type(component_type), intent(in), target :: comp - type(mct_gsmap), pointer :: component_get_gsmap_cc - component_get_gsmap_cc => comp%gsmap_cc - end function component_get_gsmap_cc - - function component_get_cdata_cc(comp) - type(component_type), intent(in), target :: comp - type(seq_cdata), pointer :: component_get_cdata_cc - component_get_cdata_cc => comp%cdata_cc - end function component_get_cdata_cc - - function component_get_drv2mdl(comp) - type(component_type), intent(in), target :: comp - real(r8), pointer :: component_get_drv2mdl(:) - component_get_drv2mdl => comp%drv2mdl - end function component_get_drv2mdl - - function component_get_mdl2drv(comp) - type(component_type), intent(in), target :: comp - real(r8), pointer :: component_get_mdl2drv(:) - component_get_mdl2drv => comp%mdl2drv - end function component_get_mdl2drv - - function component_get_mapper_Cc2x(comp) - type(component_type), intent(in), target :: comp - type(seq_map), pointer :: component_get_mapper_Cc2x - component_get_mapper_Cc2x => comp%mapper_Cc2x - end function component_get_mapper_Cc2x - - function component_get_mapper_Cx2c(comp) - type(component_type), intent(in), target :: comp - type(seq_map), pointer :: component_get_mapper_Cx2c - component_get_mapper_Cx2c => comp%mapper_Cx2c - end function component_get_mapper_Cx2c - - subroutine check_fields(comp, comp_index) - use shr_infnan_mod, only: shr_infnan_isnan - use mct_mod, only: mct_avect_getrlist2c, mct_gsMap_orderedPoints - type(component_type), intent(in) :: comp - integer(in), intent(in) :: comp_index - - integer(IN) :: lsize ! size of attr vect - integer(IN) :: nflds ! number of attr vects - integer(in) :: fld, n ! iterators - integer(IN) :: rank - integer(IN) :: ierr - integer(IN), pointer :: gpts(:) - character(len=CL) :: msg - - if(associated(comp%c2x_cc) .and. associated(comp%c2x_cc%rattr)) then - lsize = mct_avect_lsize(comp%c2x_cc) - nflds = size(comp%c2x_cc%rattr,1) - ! c2x_cc is allocated even if not used such as in stub models - ! do not test this case. - if(lsize <= 1 .and. nflds <= 1) return - if(any(shr_infnan_isnan(comp%c2x_cc%rattr))) then - do fld=1,nflds - do n=1,lsize - if(shr_infnan_isnan(comp%c2x_cc%rattr(fld,n))) then - call mpi_comm_rank(comp%mpicom_compid, rank, ierr) - call mct_gsMap_orderedPoints(comp%gsmap_cc, rank, gpts) - write(msg,'(a,a,a,i4,a,a,a,i8)')'component_mod:check_fields NaN found in ',trim(comp%name),' instance: ',& - comp_index,' field ',trim(mct_avect_getRList2c(fld, comp%c2x_cc)), ' 1d global index: ',gpts(n) - call shr_sys_abort(msg) - endif - enddo - enddo - endif - endif - end subroutine check_fields - -end module component_type_mod diff --git a/src/drivers/moab/main/cplcomp_exchange_mod.F90 b/src/drivers/moab/main/cplcomp_exchange_mod.F90 deleted file mode 100644 index b664d2fedfd..00000000000 --- a/src/drivers/moab/main/cplcomp_exchange_mod.F90 +++ /dev/null @@ -1,1076 +0,0 @@ -module cplcomp_exchange_mod - - use shr_kind_mod, only: R8 => SHR_KIND_R8, IN=>SHR_KIND_IN - use shr_kind_mod, only: CL => SHR_KIND_CL, CX => SHR_KIND_CX, CXX => SHR_KIND_CXX - use shr_sys_mod - use shr_const_mod - use shr_mct_mod, only: shr_mct_sMatPInitnc, shr_mct_queryConfigFile - use mct_mod - use seq_map_type_mod - use component_type_mod - use seq_flds_mod, only: seq_flds_dom_coord, seq_flds_dom_other - use seq_comm_mct, only: cplid, logunit - use seq_comm_mct, only: seq_comm_getinfo => seq_comm_setptrs, seq_comm_iamin - use seq_diag_mct - - use seq_comm_mct, only : mhid, mpoid, mbaxid, mboxid ! iMOAB app ids, for atm, ocean, ax mesh, ox mesh - use shr_mpi_mod, only: shr_mpi_max - - implicit none - private ! except -#include - save - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: seq_map_init_exchange ! union of cpl/component pes - public :: seq_map_map_exchange ! union of cpl/component pes - public :: seq_mctext_gsmapInit - public :: seq_mctext_avInit - public :: seq_mctext_gGridInit - public :: seq_mctext_avExtend - public :: cplcomp_moab_Init ! called to migrate MOAB mesh from - ! component pes to coupler pes - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - ! Shared routines for extension and computation of gsmaps, avs, and ggrids - private :: seq_mctext_gsmapIdentical - private :: seq_mctext_gsmapExtend - private :: seq_mctext_gsmapCreate - private :: seq_mctext_avCreate - - !-------------------------------------------------------------------------- - ! Public data - !-------------------------------------------------------------------------- - - integer,public :: seq_mctext_decomp - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - character(*),parameter :: subName = '(seq_mctext_mct)' - real(r8),parameter :: c1 = 1.0_r8 - - !======================================================================= -contains - !======================================================================= - - subroutine seq_map_init_exchange( comp, mapper, flow, string) - - implicit none - !----------------------------------------------------- - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - type(seq_map) , intent(inout), pointer :: mapper - character(len=3), intent(in) :: flow - character(len=*), intent(in),optional :: string - ! - ! Local Variables - ! - integer(IN) :: ID_s - integer(IN) :: ID_d - integer(IN) :: ID_join - integer(IN) :: mapid - integer(IN) :: mpicom_s, mpicom_d, mpicom_join - type(mct_gsmap) , pointer :: gsmap_s - type(mct_gsmap) , pointer :: gsmap_d - type(mct_gsmap) :: gsmap_s_join - type(mct_gsmap) :: gsmap_d_join - character(len=*),parameter :: subname = "(seq_map_init_rearrsplit) " - !----------------------------------------------------- - - if (seq_comm_iamroot(CPLID) .and. present(string)) then - write(logunit,'(A)') subname//' called for '//trim(string) - endif - - id_join = comp%cplcompid - call seq_comm_getinfo(ID_join, mpicom=mpicom_join) - - if (flow == 'c2x') then - gsmap_s => component_get_gsmap_cc(comp) - gsmap_d => component_get_gsmap_cx(comp) - end if - if (flow == 'x2c') then - gsmap_s => component_get_gsmap_cx(comp) - gsmap_d => component_get_gsmap_cc(comp) - end if - - if (mct_gsmap_Identical(gsmap_s,gsmap_d)) then - - call seq_map_mapmatch(mapid, gsmap_s=gsmap_s, gsmap_d=gsmap_d, strategy="copy") - - if (mapid > 0) then - call seq_map_mappoint(mapid, mapper) - else - call seq_map_mapinit(mapper, mpicom_join) - mapper%copy_only = .true. - mapper%strategy = "copy" - if (flow == 'c2x') then - mapper%gsmap_s => component_get_gsmap_cc(comp) - mapper%gsmap_d => component_get_gsmap_cx(comp) - end if - if (flow == 'x2c') then - mapper%gsmap_s => component_get_gsmap_cx(comp) - mapper%gsmap_d => component_get_gsmap_cc(comp) - end if - endif - - if (seq_comm_iamroot(ID_join)) then - write(logunit,'(2A,L2)') subname,' gsmaps ARE IDENTICAL, copyoption = ',mapper%copy_only - endif - - else - - if (seq_comm_iamroot(ID_join)) write(logunit,'(2A)') subname,' gsmaps are not identical' - - if (flow == 'c2x') then - id_s = comp%compid - id_d = cplid - end if - if (flow == 'x2c') then - id_s = cplid - id_d = comp%compid - end if - call seq_comm_getinfo(ID_s , mpicom=mpicom_s) - call seq_comm_getinfo(ID_d , mpicom=mpicom_d) - call seq_comm_getinfo(ID_join, mpicom=mpicom_join) - - ! --- Extend gsmaps to join group of pes - - call seq_mctext_gsmapExtend(gsmap_s, mpicom_s, gsmap_s_join, mpicom_join, ID_join) - call seq_mctext_gsmapExtend(gsmap_d, mpicom_d, gsmap_d_join, mpicom_join, ID_join) - - ! --- Initialize rearranger based on join gsmaps - ! --- test for the gsmaps instead of the gsmap joins because the gsmap joins are temporary - - ! ------------------------------- - ! tcx tcraig mapmatch is a problem here because we're comparing gsmaps that may not be defined - ! on some pes. first issue is whether gsmap_identical in underlying routine will abort. - ! second issue is whether different pes return different values. use mapidmin, mapidmax to - ! confirm all mapids returned are the same. if not, then just set mapid to -1 and compute - ! a new rearranger. - ! tcx not clear this works all the time, so just do not do map matching here for time being - ! Sept 2013. - ! ------------------------------- - ! mapid = -1 - ! call seq_map_mapmatch(mapid,gsmap_s=gsmap_s,gsmap_d=gsmap_d,strategy="rearrange") - ! call shr_mpi_min(mapid,mapidmin,mpicom_join,subname//' min') - ! call shr_mpi_max(mapid,mapidmax,mpicom_join,subname//' max') - ! if (mapidmin /= mapidmax) mapid = -1 - ! ------------------------------- - - ! --- Initialize rearranger - ! --- the gsmap joins are temporary so store the regular gsmaps in the mapper - call seq_map_mapinit(mapper, mpicom_join) - mapper%rearrange_only = .true. - mapper%strategy = "rearrange" - if (flow == 'c2x') then - mapper%gsmap_s => component_get_gsmap_cc(comp) - mapper%gsmap_d => component_get_gsmap_cx(comp) - end if - if (flow == 'x2c') then - mapper%gsmap_s => component_get_gsmap_cx(comp) - mapper%gsmap_d => component_get_gsmap_cc(comp) - end if - call seq_map_gsmapcheck(gsmap_s_join, gsmap_d_join) - call mct_rearr_init(gsmap_s_join, gsmap_d_join, mpicom_join, mapper%rearr) - - ! --- Clean up temporary gsmaps - - call mct_gsMap_clean(gsmap_s_join) - call mct_gsMap_clean(gsmap_d_join) - - endif - - if (seq_comm_iamroot(CPLID)) then - write(logunit,'(2A,I6,4A)') subname,' mapper counter, strategy, mapfile = ', & - mapper%counter,' ',trim(mapper%strategy),' ',trim(mapper%mapfile) - call shr_sys_flush(logunit) - endif - - end subroutine seq_map_init_exchange - - !=============================================================================== - - subroutine seq_map_map_exchange( comp, flow, dom_flag, dom_tmp, string, msgtag ) - - !----------------------------------------------------- - ! - ! Arguments - ! - type(component_type) , intent(inout) :: comp - character(len=3) , intent(in) :: flow - logical , intent(in),optional :: dom_flag - type(mct_gGrid) , intent(in),optional, target :: dom_tmp - character(len=*) , intent(in),optional :: string - integer(IN) , intent(in),optional :: msgtag - ! - ! Local Variables - ! - type(seq_map) , pointer :: mapper - type(mct_aVect), pointer :: av_s - type(mct_aVect), pointer :: av_d - type(mct_gGrid), pointer :: dom_s - type(mct_gGrid), pointer :: dom_d - integer(IN),save :: ltag ! message tag for rearrange - character(len=*),parameter :: subname = "(seq_map_map) " - !----------------------------------------------------- - - if (seq_comm_iamroot(CPLID) .and. present(string)) then - write(logunit,'(A)') subname//' called for '//trim(string) - endif - - if (flow == 'c2x') then - if (present(dom_flag)) then - dom_s => component_get_dom_cc(comp) - dom_d => component_get_dom_cx(comp) - ! Overwrite dom_d pointer if dom_tmp is present - ! Needed for backwards compatibility with domain checker in component_init_cx - if (present(dom_tmp)) then - dom_d => dom_tmp - end if - else - av_s => component_get_c2x_cc(comp) - av_d => component_get_c2x_cx(comp) - end if - mapper => component_get_mapper_Cc2x(comp) - end if - if (flow == 'x2c') then - if (present(dom_flag)) then - dom_s => component_get_dom_cx(comp) - dom_d => component_get_dom_cc(comp) - else - av_s => component_get_x2c_cx(comp) - av_d => component_get_x2c_cc(comp) - end if - mapper => component_get_mapper_Cx2c(comp) - end if - - if (present(msgtag)) then - ltag = msgtag - else - ltag = 2000 - endif - - if (mapper%copy_only) then - !------------------------------------------- - ! COPY data - !------------------------------------------- - if (present(dom_flag)) then - call mct_aVect_copy(aVin=dom_s%data, aVout=dom_d%data, vector=mct_usevector) - else - call mct_aVect_copy(aVin=av_s, aVout=av_d, vector=mct_usevector) - end if - - else if (mapper%rearrange_only) then - !------------------------------------------- - ! REARRANGE data - !------------------------------------------- - if (present(dom_flag)) then - call mct_rearr_rearrange(dom_s%data, dom_d%data, mapper%rearr, tag=ltag, VECTOR=mct_usevector, & - ALLTOALL=mct_usealltoall) - else - call mct_rearr_rearrange(av_s, av_d, mapper%rearr, tag=ltag, VECTOR=mct_usevector, & - ALLTOALL=mct_usealltoall) - end if - end if - - end subroutine seq_map_map_exchange - - !======================================================================= - - subroutine seq_mctext_gsmapInit(comp) - - ! This routine initializes a gsmap based on another gsmap potentially - ! on other pes. It addresses non-overlap of pes. - - !----------------------------------------------------- - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - ! - ! Local Variables - ! - integer :: mpicom_cplid - integer :: mpicom_old - integer :: mpicom_new - integer :: mpicom_join - integer :: ID_old - integer :: ID_new - integer :: ID_join - type(mct_gsMap), pointer :: gsmap_old - type(mct_gsMap), pointer :: gsmap_new - type(mct_gsMap) :: gsmap_old_join ! gsmap_old on joined id, temporary - character(len=*),parameter :: subname = "(seq_mctext_gsmapInit) " - !----------------------------------------------------- - - call seq_comm_getinfo(CPLID, mpicom=mpicom_CPLID) - - id_new = cplid - id_old = comp%compid - id_join = comp%cplcompid - - mpicom_new = mpicom_cplid - mpicom_old = comp%mpicom_compid - mpicom_join = comp%mpicom_cplcompid - - gsmap_new => component_get_gsmap_cx(comp) - gsmap_old => component_get_gsmap_cc(comp) - - call seq_comm_getinfo(ID_old ,mpicom=mpicom_old) - call seq_comm_getinfo(ID_new ,mpicom=mpicom_new) - call seq_comm_getinfo(ID_join,mpicom=mpicom_join) - - ! --- Set gsmaps - ! --- Extend the old one to now span all pes on ID_join - ! --- Create a new gsmap on pes associated with ID_new using info from the old one - - call seq_mctext_gsmapExtend(gsmap_old , mpicom_old , gsmap_old_join, mpicom_join, ID_join) - call seq_mctext_gsmapCreate(gsmap_old_join, mpicom_join , gsmap_new , mpicom_new , ID_new ) - - call mct_gsMap_clean(gsmap_old_join) - - end subroutine seq_mctext_gsmapInit - - !======================================================================= - - subroutine seq_mctext_avInit( comp, flow ) - - !----------------------------------------------------- - ! This routine initializes Avs that may need to be extended - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - character(len=3) , intent(in) :: flow - ! - ! Local Variables - ! - integer :: lsize - integer :: mpicom_cplid - integer :: mpicom_new - integer :: ID_old - integer :: ID_new - integer :: ID_join - type(mct_aVect), pointer :: AV1_old - type(mct_aVect), pointer :: AV1_new - type(mct_gsmap), pointer :: gsmap_new - character(len=*),parameter :: subname = "(seq_mctext_avInit) " - !----------------------------------------------------- - - ! --- Setup data for use and make sure the old ID is ok - - call seq_comm_getinfo(CPLID ,mpicom=mpicom_CPLID) - - id_new = cplid - id_old = comp%compid - id_join = comp%cplcompid - - mpicom_new = mpicom_cplid - - gsmap_new => component_get_gsmap_cx(comp) - - if (flow == 'c2x') then - av1_old => component_get_c2x_cc(comp) - av1_new => component_get_c2x_cx(comp) - end if - if (flow == 'x2c') then - av1_old => component_get_x2c_cc(comp) - av1_new => component_get_x2c_cx(comp) - end if - - ! --- Extend old avs and initialize new avs for use in the future - - lsize = 0 - if (seq_comm_iamin(ID_new)) then - lsize = mct_gsMap_lsize(gsMap_new, mpicom_new) - endif - call seq_mctext_avExtend(AV1_old, ID_old, ID_join) - call seq_mctext_avCreate(AV1_old, ID_old, AV1_new, ID_join, lsize) - - end subroutine seq_mctext_avInit - - !======================================================================= - - subroutine seq_mctext_gGridInit(comp, ggrid_new) - - !----------------------------------------------------- - ! This routine initializes gGrids that may need to be extended - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - type(mct_gGrid), optional, target, intent(inout) :: ggrid_new - ! - ! Local Variables - ! - integer :: mpicom_cplid - integer :: lsize - integer :: mpicom_new - integer :: ID_old - integer :: ID_new - integer :: ID_join - type(mct_gGrid), pointer :: GG1_old - type(mct_gGrid), pointer :: GG1_new - type(mct_gsmap), pointer :: gsmap_new - character(len=*),parameter :: subname = "(seq_mctext_gGridInit) " - !----------------------------------------------------- - - ! --- Setup data for use and make sure the old ID is ok - - call seq_comm_getinfo(CPLID, mpicom=mpicom_CPLID) - - id_new = cplid - id_old = comp%compid - id_join = comp%cplcompid - - mpicom_new = mpicom_cplid - - gsmap_new => component_get_gsmap_cx(comp) - - gg1_old => component_get_dom_cc(comp) - gg1_new => component_get_dom_cx(comp) - - ! --- Extend old ggrids and initialize new ggrids for use in the future - - lsize = 0 - if (seq_comm_iamin(ID_new)) then - lsize = mct_gsMap_lsize(gsMap_new,mpicom_new) - endif - call seq_mctext_avExtend(GG1_old%data, ID_old, ID_join) - - if (present(ggrid_new)) then - call mct_gGrid_init(GGrid=ggrid_new, CoordChars=seq_flds_dom_coord, OtherChars=seq_flds_dom_other, lsize=lsize ) - call mct_avect_zero(ggrid_new%data) - else - call mct_gGrid_init(GGrid=GG1_new, CoordChars=seq_flds_dom_coord, OtherChars=seq_flds_dom_other, lsize=lsize ) - call mct_avect_zero(GG1_new%data) - end if - - end subroutine seq_mctext_gGridInit - - !======================================================================= - - subroutine seq_mctext_gsmapExtend(gsmapi, mpicomi, gsmapo, mpicomo, compido) - - !---------------------------------------------------------------- - ! Extend/Convert a gsmap from one mpicom to another mpicom that contains - ! at least all the pes that gsmap uses, but with different ranks - !---------------------------------------------------------------- - - implicit none - type(mct_gsMap), intent(IN) :: gsmapi - integer , intent(IN) :: mpicomi - type(mct_gsMap), intent(OUT):: gsmapo - integer , intent(IN) :: mpicomo - integer , intent(IN) :: compido - - character(len=*),parameter :: subname = "(seq_mctext_gsmapExtend) " - integer :: n - integer :: ngseg - integer :: gsize - integer :: msizei,msizeo - integer :: mrank,mranko,mrankog ! sets pe rank of root mpicomi pe in mpicomo - integer :: mpigrpi,mpigrpo - integer :: ierr - integer, pointer :: pei(:),peo(:) - integer, pointer :: start(:),length(:),peloc(:) - - mranko = -1 - - ! --- create the new gsmap on the mpicomi root only - - if (mpicomi /= MPI_COMM_NULL) then - call mpi_comm_rank(mpicomi,mrank,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_rank i') - if (mrank == 0) then - call mpi_comm_group(mpicomi,mpigrpi,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_group i') - call mpi_comm_group(mpicomo,mpigrpo,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_group o') - call mpi_comm_size(mpicomi,msizei,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_size i') - call mpi_comm_size(mpicomo,msizeo,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_comm_size o') - - ! --- setup the translation of pe numbers from the old gsmap(mpicom) - ! --- to the new one, pei -> peo - - allocate(pei(0:msizei-1),peo(0:msizei-1)) - do n = 0,msizei-1 - pei(n) = n - enddo - - peo = -1 - call mpi_group_translate_ranks(mpigrpi,msizei,pei,mpigrpo,peo,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_group_translate_ranks') - - do n = 0,msizei-1 - if (peo(n) < 0 .or. peo(n) > msizeo-1) then - write(logunit,*) subname,' peo out of bounds ',peo(n),msizeo - call shr_sys_abort() - endif - enddo - - mranko = peo(0) - - ! --- compute the new gsmap which has the same start and length values - ! --- but peloc is now the mapping of pei to peo - - ngseg = gsmapi%ngseg - gsize = gsmapi%gsize - allocate(start(ngseg),length(ngseg),peloc(ngseg)) - do n = 1,ngseg - start(n) = gsmapi%start(n) - length(n) = gsmapi%length(n) - peloc(n) = peo(gsmapi%pe_loc(n)) - enddo - - ! --- initialize the gsmap on the root pe - - call mct_gsmap_init(gsmapo,compido,ngseg,gsize,start,length,peloc) - - deallocate(pei,peo,start,length,peloc) - endif - endif - - ! --- broadcast via allreduce the mpicomi root pe in mpicomo space - ! --- mranko is -1 except on the root pe where is it peo of that pe - - call mpi_allreduce(mranko,mrankog,1,MPI_INTEGER,MPI_MAX,mpicomo,ierr) - call shr_mpi_chkerr(ierr,subname//' gsm_cop mpi_allreduce max') - - ! --- broadcast the gsmap to all pes in mpicomo from mrankog - - call mct_gsmap_bcast(gsmapo, mrankog, mpicomo) - - ! tcx summarize decomp info -#if (1 == 0) - write(logunit,*) trim(subname),'tcxa ',mpicomi,mpicomo - call shr_sys_flush(logunit) - call mpi_barrier(mpicomo,ierr) - - if (mpicomi /= MPI_COMM_NULL) then - call mpi_comm_rank(mpicomi,mrank,ierr) - write(logunit,*) 'tcxbi ',mrank - if (mrank == 0) then - write(logunit,*) 'tcxci ',gsmapi%ngseg,size(gsmapi%start),gsmapi%gsize,gsmapi%comp_id - do n = 1,gsmapi%ngseg - write(logunit,*) 'tcx gsmti ',n,gsmapi%start(n),gsmapi%length(n),gsmapi%pe_loc(n) - enddo - call shr_sys_flush(logunit) - endif - endif - - if (mpicomo /= MPI_COMM_NULL) then - call mpi_comm_rank(mpicomo,mrank,ierr) - write(logunit,*) 'tcxbo ',mrank - if (mrank == 0) then - write(logunit,*) 'tcxco ',gsmapo%ngseg,size(gsmapo%start),gsmapo%gsize,gsmapo%comp_id - do n = 1,gsmapo%ngseg - write(logunit,*) 'tcx gsmto ',n,gsmapo%start(n),gsmapo%length(n),gsmapo%pe_loc(n) - enddo - call shr_sys_flush(logunit) - endif - endif - - call shr_sys_flush(logunit) - call mpi_barrier(mpicomo,ierr) -#endif - - - end subroutine seq_mctext_gsmapExtend - - !======================================================================= - - subroutine seq_mctext_gsmapCreate(gsmapi, mpicomi, gsmapo, mpicomo, compido) - - !--------------------------------------------------------------------- - ! creates a new gsmap on a subset of pes, requires setting a new decomp - !--------------------------------------------------------------------- - - implicit none - type(mct_gsMap), intent(IN) :: gsmapi - integer , intent(IN) :: mpicomi - type(mct_gsMap), intent(OUT):: gsmapo - integer , intent(IN) :: mpicomo - integer , intent(IN) :: compido - - character(len=*),parameter :: subname = "(seq_mctext_gsmapCreate) " - integer :: n,m,k - integer :: ktot ! number of active cells in gsmap - integer :: apesi, apeso ! number of active pes in gsmap - integer :: lsizeo ! local size for lindex - integer :: ngsegi,ngsego ! ngseg of mpicomi, mpicomo - integer :: gsizei,gsizeo ! gsize of mpicomi, mpicomo - integer :: msizei,msizeo ! size of mpicomi, mpicomo - integer :: mranki,mranko ! rank in mpicomi, mpicomo - integer :: ierr - integer :: decomp_type - integer, pointer :: start(:),length(:),peloc(:),perm(:),gindex(:),lindex(:) - real(r8):: rpeloc - logical :: gsmap_bfbflag = .false. ! normally this should be set to false - - ! --- create a new gsmap on new pes based on the old gsmap - ! --- gsmapi must be known on all mpicomo pes, compute the same - ! --- thing on all pes in parallel - - if (mpicomo /= MPI_COMM_NULL) then - call mpi_comm_rank(mpicomi,mranki,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank i') - call mpi_comm_size(mpicomi,msizei,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size i') - call mpi_comm_rank(mpicomo,mranko,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_rank o') - call mpi_comm_size(mpicomo,msizeo,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_comm_size o') - - ngsegi = gsmapi%ngseg - gsizei = gsmapi%gsize - gsizeo = gsizei - call mct_gsMap_activepes(gsmapi,apesi) - - decomp_type = 0 - - if (seq_mctext_decomp == 0) then - if (msizeo == apesi) then ! preserve segments and decomp - ! For testing - set decomp_type to 1 - to have gsmapi and gsmapo identical - if (gsmap_bfbflag) then - decomp_type = 1 ! better in cpl to have all decomps "same-ish" - else - decomp_type = 2 - end if - elseif (ngsegi >= msizeo) then ! preserve segments, new decomp - decomp_type = 2 - else ! new segments - decomp_type = 3 - endif - else - decomp_type = seq_mctext_decomp - endif - - !tcx decomp_type = 3 ! over ride setting above for testing - ! if (mranko == 0) write(logunit,'(2A,4I)') trim(subname),' decomp_type =',decomp_type,ngsegi,msizeo,apesi - - select case (decomp_type) - - case(1) ! --- preserve segments and decomp --------------------- - - ! -- copy the gsmap and translate the pes - call mct_gsMap_copy(gsmapi,gsmapo) - ngsego = ngsegi - do n = 1,ngsego - gsmapo%pe_loc(n) = mod(gsmapo%pe_loc(n),msizeo) ! translate pes 1:1 from old to new - enddo - - case(2) ! --- preserve segments, new decomp -------------------- - - ! --- preserve segments, sort the start and length, assign a new pe list - ngsego = ngsegi - allocate(start(ngsego),length(ngsego),peloc(ngsego),perm(ngsego)) - do n = 1,ngsego - start(n) = gsmapi%start(n) - length(n) = gsmapi%length(n) - enddo - ! --- sort gsmap to minimize permute cost in mct - call mct_indexset(perm) - call mct_indexsort(ngsego,perm,start) - call mct_permute(start,perm,ngsego) - call mct_permute(length,perm,ngsego) - ! --- give each pe "equal" number of segments, use reals to avoid integer overflow - do n = 1,ngsego - rpeloc = (((msizeo*c1)*((n-1)*c1))/(ngsego*c1)) ! give each pe "equal" number of segments, use reals to avoid integer overflow - peloc(n) = int(rpeloc) - enddo - call mct_gsmap_init(gsmapo,ngsego,start,length,peloc,0,mpicomo,compido,gsizeo) - deallocate(start,length,peloc,perm) - - case(3) ! --- new segments, new decomp ------------------------- - - ! --- new segments, compute gindex, then parse the gridcells out evenly - - k = 0 - do n = 1,ngsegi - do m = 1,gsmapi%length(n) - k = k + 1 - if (k > gsizei) then - write(logunit,*) trim(subname),' ERROR in gindex ',k,gsizei - call shr_sys_abort() - endif - enddo - enddo - ktot = k - - allocate(gindex(ktot),perm(ktot)) - - k = 0 - do n = 1,ngsegi - do m = 1,gsmapi%length(n) - k = k + 1 - gindex(k) = gsmapi%start(n) + m - 1 - enddo - enddo - call mct_indexset(perm) - call mct_indexsort(ktot,perm,gindex) - call mct_permute(gindex,perm,ktot) - - k = 0 - do m = 0,msizeo-1 - lsizeo = ktot/msizeo - if (m < (ktot - lsizeo*msizeo)) lsizeo = lsizeo + 1 - if (mranko == m) then - allocate(lindex(lsizeo)) - if (k+lsizeo > ktot) then - write(logunit,*) trim(subname),' ERROR: decomp out of bounds ',mranko,k,lsizeo,ktot - call shr_sys_abort() - endif - lindex(1:lsizeo) = gindex(k+1:k+lsizeo) - ! write(logunit,*) trim(subname),' decomp is ',mranko,lsizeo,k+1,k+lsizeo - endif - k = k + lsizeo - enddo - if (k /= ktot) then - write(logunit,*) trim(subname),' ERROR: decomp incomplete ',k,ktot - call shr_sys_abort() - endif - - call mct_gsmap_init(gsmapo,lindex,mpicomo,compido,size(lindex),gsizeo) - deallocate(gindex,perm,lindex) - - case default ! --- unknown --- - write(logunit,*) trim(subname),' ERROR decomp_type unknown ',decomp_type - call shr_sys_abort(trim(subname)//' ERROR decomp_type unknown') - - end select - - if (mranko == 0) then - write(logunit,102) trim(subname),' created new gsmap decomp_type =',decomp_type - write(logunit,102) trim(subname),' ngseg/gsize = ', & - mct_gsmap_ngseg(gsmapo),mct_gsmap_gsize(gsmapo) - call mct_gsmap_activepes(gsmapo,apeso) - write(logunit,102) trim(subname),' mpisize/active_pes = ', & - msizeo,apeso - write(logunit,102) trim(subname),' avg seg per pe/ape = ', & - mct_gsmap_ngseg(gsmapo)/msizeo,mct_gsmap_ngseg(gsmapo)/apeso - write(logunit,102) trim(subname),' nlseg/maxnlsegs = ', & - mct_gsmap_nlseg(gsmapo,0),mct_gsmap_maxnlseg(gsmapo) -102 format(2A,2I8) - endif - - ! if (.not. mct_gsmap_increasing(gsmapo) ) then - ! write(logunit,*) trim(subname),' ERROR: gsmapo not increasing' - ! call shr_sys_abort() - ! endif - - endif - - end subroutine seq_mctext_gsmapCreate - - !======================================================================= - - subroutine seq_mctext_avExtend(AVin,IDin,ID) - - !----------------------------------------------------------------------- - ! Extend an AV to a larger set of pes or - ! Initialize an AV on another set of pes - ! - ! Arguments - ! - type(mct_aVect), intent(INOUT):: AVin - integer ,intent(IN) :: IDin ! ID associated with AVin - integer , intent(IN) :: ID ! ID to initialize over - ! - ! Local variables - ! - character(len=*),parameter :: subname = "(seq_mctext_avExtend) " - integer :: mpicom - integer :: rank - integer :: lsizei, lsizen - integer :: srank,srankg - integer :: ierr - character(len=CXX) :: iList,rList - !----------------------------------------------------------------------- - - call seq_comm_getinfo(ID,mpicom=mpicom,iam=rank) - - ! --- lsizen is the size of the newly initialized AV, zero is valid - ! --- lsizei is -1 on any peszero on any pes where AV is not yet initialized - - lsizei = -1 - if (seq_comm_iamin(IDin)) lsizei = mct_aVect_lsize(AVin) - lsizen = 0 - - ! --- find a pe that already has AVin allocated, use MPI_MAX to do so - ! --- set the pe and broadcast it to all other pes using mpi_allreduce - - srank = -1 - srankg = -1 - if (lsizei > 0) srank = rank - - call mpi_allreduce(srank,srankg,1,MPI_INTEGER,MPI_MAX,mpicom,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_allreduce max') - - if (srankg < 0) then - write(logunit,*) subname,' WARNING AVin empty ' - return - endif - - ! --- set the iList and rList from the broadcast pe (srankg) and - ! --- broadcast the lists - - iList = " " - rList = " " - if (rank == srankg) then - if (mct_aVect_nIAttr(AVin) /= 0) iList = mct_aVect_ExportIList2c(AVin) - if (mct_aVect_nRattr(AVin) /= 0) rList = mct_aVect_ExportRList2c(AVin) - endif - - call mpi_bcast(iList,len(iList),MPI_CHARACTER,srankg,mpicom,ierr) - call mpi_bcast(rList,len(rList),MPI_CHARACTER,srankg,mpicom,ierr) - - ! --- now allocate the AV on any pes where the orig size is zero. those - ! --- should be pes that either have no data and may have been allocated - ! --- before (no harm in doing it again) or have never been allocated - - if (lsizei <= 0) then - if(len_trim(iList) > 0 .and. len_trim(rList) > 0) then - call mct_aVect_init(AVin,iList=iList,rList=rList,lsize=lsizen) - elseif (len_trim(iList) > 0 .and. len_trim(rList) == 0) then - call mct_aVect_init(AVin,iList=iList,lsize=lsizen) - elseif (len_trim(iList) == 0 .and. len_trim(rList) > 0) then - call mct_aVect_init(AVin,rList=rList,lsize=lsizen) - endif - endif - - end subroutine seq_mctext_avExtend - - !======================================================================= - - subroutine seq_mctext_avCreate(AVin,IDin,AVout,ID,lsize) - - !----------------------------------------------------------------------- - ! Extend an AV to a larger set of pes or - ! Initialize an AV on another set of pes - !----------------------------------------------------------------------- - - implicit none - type(mct_aVect), intent(INOUT):: AVin - integer ,intent(IN) :: IDin ! ID associated with AVin - type(mct_aVect), intent(INOUT):: AVout - integer , intent(IN) :: ID ! ID to initialize over - integer , intent(IN) :: lsize - - ! Local variables - - character(len=*),parameter :: subname = "(seq_mctext_avCreate) " - integer :: mpicom - integer :: rank - integer :: lsizei, lsizen - integer :: srank,srankg - integer :: ierr - character(len=CXX) :: iList,rList - - call seq_comm_getinfo(ID,mpicom=mpicom,iam=rank) - - ! --- lsizen is the size of the newly initialized AV, zero is valid - - lsizei = -1 - if (seq_comm_iamin(IDin)) lsizei = mct_aVect_lsize(AVin) - lsizen = lsize - - ! --- find a pe that already has AVin allocated, use MPI_MAX to do so - ! --- set the pe and broadcast it to all other pes - - srank = -1 - srankg = -1 - if (lsizei > 0) srank = rank - - call mpi_allreduce(srank,srankg,1,MPI_INTEGER,MPI_MAX,mpicom,ierr) - call shr_mpi_chkerr(ierr,subname//' mpi_allreduce max') - - if (srankg < 0) then - write(logunit,*) subname,' ERROR AVin not initialized ' - call shr_sys_abort() - endif - - ! --- set the iList and rList from the broadcast pe (srankg) and - ! --- broadcast the lists - - iList = " " - rList = " " - if (rank == srankg) then - if (mct_aVect_nIAttr(AVin) /= 0) iList = mct_aVect_ExportIList2c(AVin) - if (mct_aVect_nRattr(AVin) /= 0) rList = mct_aVect_ExportRList2c(AVin) - endif - - call mpi_bcast(iList,len(iList),MPI_CHARACTER,srankg,mpicom,ierr) - call mpi_bcast(rList,len(rList),MPI_CHARACTER,srankg,mpicom,ierr) - - ! --- now allocate the AV on all pes. the AV should not exist before. - ! --- If it does, mct should die. - - if(len_trim(iList) > 0 .and. len_trim(rList) > 0) then - call mct_aVect_init(AVout,iList=iList,rList=rList,lsize=lsizen) - elseif (len_trim(iList) > 0 .and. len_trim(rList) == 0) then - call mct_aVect_init(AVout,iList=iList,lsize=lsizen) - elseif (len_trim(iList) == 0 .and. len_trim(rList) > 0) then - call mct_aVect_init(AVout,rList=rList,lsize=lsizen) - endif - - end subroutine seq_mctext_avCreate - - !======================================================================= - - logical function seq_mctext_gsmapIdentical(gsmap1,gsmap2) - - implicit none - type(mct_gsMap), intent(IN):: gsmap1 - type(mct_gsMap), intent(IN):: gsmap2 - - ! Local variables - - character(len=*),parameter :: subname = "(seq_mctext_gsmapIdentical) " - integer :: n - logical :: identical - - !----------------------- - - identical = .true. - - ! --- continue compare --- - if (identical) then - if (mct_gsMap_gsize(gsmap1) /= mct_gsMap_gsize(gsmap2)) identical = .false. - if (mct_gsMap_ngseg(gsmap1) /= mct_gsMap_ngseg(gsmap2)) identical = .false. - endif - - ! --- continue compare --- - if (identical) then - do n = 1,mct_gsMap_ngseg(gsmap1) - if (gsmap1%start(n) /= gsmap2%start(n) ) identical = .false. - if (gsmap1%length(n) /= gsmap2%length(n)) identical = .false. - if (gsmap1%pe_loc(n) /= gsmap2%pe_loc(n)) identical = .false. - enddo - endif - - seq_mctext_gsmapIdentical = identical - - end function seq_mctext_gsmapIdentical - - !======================================================================= - - subroutine cplcomp_moab_Init(comp) - - ! This routine initializes an iMOAB app on the coupler pes, - ! corresponding to the component pes. It uses send/receive - ! from iMOAB to replicate the mesh on coupler pes - - !----------------------------------------------------- - ! - ! Arguments - ! - type(component_type), intent(inout) :: comp - ! - ! Local Variables - ! - integer :: mpicom_cplid - integer :: mpicom_old - integer :: mpicom_new - integer :: mpicom_join - integer :: ID_old - integer :: ID_new - integer :: ID_join - - character(len=*),parameter :: subname = "(cplcomp_moab_Init) " - - integer :: mpigrp_cplid ! coupler pes - integer :: mpigrp_old ! component group pes - integer, external :: iMOAB_RegisterFortranApplication, iMOAB_ReceiveMesh, iMOAB_SendMesh - integer, external :: iMOAB_WriteMesh, iMOAB_DefineTagStorage - integer :: ierr - character*32 :: appname, outfile, wopts, tagnameProj - integer :: maxMH, maxMPO ! max pids for moab apps - integer :: tagtype, numco, tagindex - - !----------------------------------------------------- - - call seq_comm_getinfo(CPLID, mpicom=mpicom_CPLID) - - id_new = cplid - id_old = comp%compid - id_join = comp%cplcompid - - mpicom_new = mpicom_cplid - mpicom_old = comp%mpicom_compid - mpicom_join = comp%mpicom_cplcompid - - call seq_comm_getinfo(ID_old ,mpicom=mpicom_old) - call seq_comm_getinfo(ID_new ,mpicom=mpicom_new) - call seq_comm_getinfo(ID_join,mpicom=mpicom_join) - - call shr_mpi_max(mhid, maxMH, mpicom_join, all=.true.) - call shr_mpi_max(mpoid, maxMPO, mpicom_join, all=.true.) - if (seq_comm_iamroot(CPLID) ) then - write(logunit, *) "MOAB coupling: maxMH: ", maxMH, " maxMPO: ", maxMPO - endif - ! this works now for atmosphere; - if ( comp%oneletterid == 'a' .and. maxMH /= -1) then - call seq_comm_getinfo(cplid ,mpigrp=mpigrp_cplid) ! receiver group - call seq_comm_getinfo(id_old,mpigrp=mpigrp_old) ! component group pes - ! now, if on coupler pes, receive mesh; if on comp pes, send mesh - if (MPI_COMM_NULL /= mpicom_old ) then ! it means we are on the component pes (atmosphere) - ! send mesh to coupler - ierr = iMOAB_SendMesh(mhid, mpicom_join, mpigrp_cplid, id_join); - endif - if (MPI_COMM_NULL /= mpicom_new ) then ! we are on the coupler pes - appname = "COUPLE_ATM"//CHAR(0) - ! migrated mesh gets another app id, moab atm to coupler (mbax) - ierr = iMOAB_RegisterFortranApplication(trim(appname), mpicom_new, id_join, mbaxid) - ierr = iMOAB_ReceiveMesh(mbaxid, mpicom_join, mpigrp_old, id_old) - ! debug test - outfile = 'recMeshAtm.h5m'//CHAR(0) - wopts = ';PARALLEL=WRITE_PART'//CHAR(0) -! write out the mesh file to disk - ierr = iMOAB_WriteMesh(mbaxid, trim(outfile), trim(wopts)) - endif - endif - if (comp%oneletterid == 'o' .and. maxMPO /= -1) then - call seq_comm_getinfo(cplid ,mpigrp=mpigrp_cplid) ! receiver group - call seq_comm_getinfo(id_old,mpigrp=mpigrp_old) ! component group pes - - if (MPI_COMM_NULL /= mpicom_old ) then ! it means we are on the component pes (atmosphere) - ! send mesh to coupler - ierr = iMOAB_SendMesh(mpoid, mpicom_join, mpigrp_cplid, id_join); - endif - if (MPI_COMM_NULL /= mpicom_new ) then ! we are on the coupler pes - appname = "COUPLE_MPASO"//CHAR(0) - ! migrated mesh gets another app id, moab ocean to coupler (mbox) - ierr = iMOAB_RegisterFortranApplication(trim(appname), mpicom_new, id_join, mboxid) - ierr = iMOAB_ReceiveMesh(mboxid, mpicom_join, mpigrp_old, id_old) - ! debug test - outfile = 'recMeshOcn.h5m'//CHAR(0) - wopts = ';PARALLEL=WRITE_PART'//CHAR(0) ! - - ! define here the tag that will be projected from atmosphere - tagnameProj = 'a2oTAG_proj'//CHAR(0) - tagtype = 1 ! dense, double - numco = 1 ! one value per cell - ierr = iMOAB_DefineTagStorage(mboxid, tagnameProj, tagtype, numco, tagindex ) - -! write out the mesh file to disk - ierr = iMOAB_WriteMesh(mboxid, trim(outfile), trim(wopts)) - endif - endif - - - - end subroutine cplcomp_moab_Init - -end module cplcomp_exchange_mod diff --git a/src/drivers/moab/main/map_glc2lnd_mod.F90 b/src/drivers/moab/main/map_glc2lnd_mod.F90 deleted file mode 120000 index b002e7703d2..00000000000 --- a/src/drivers/moab/main/map_glc2lnd_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/map_glc2lnd_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/map_lnd2glc_mod.F90 b/src/drivers/moab/main/map_lnd2glc_mod.F90 deleted file mode 120000 index c28b2f47e57..00000000000 --- a/src/drivers/moab/main/map_lnd2glc_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/map_lnd2glc_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/map_lnd2rof_irrig_mod.F90 b/src/drivers/moab/main/map_lnd2rof_irrig_mod.F90 deleted file mode 120000 index 372d06246eb..00000000000 --- a/src/drivers/moab/main/map_lnd2rof_irrig_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/map_lnd2rof_irrig_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/mrg_mod.F90 b/src/drivers/moab/main/mrg_mod.F90 deleted file mode 120000 index 39c5efe74f8..00000000000 --- a/src/drivers/moab/main/mrg_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/mrg_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/prep_aoflux_mod.F90 b/src/drivers/moab/main/prep_aoflux_mod.F90 deleted file mode 120000 index a43c2c57aa5..00000000000 --- a/src/drivers/moab/main/prep_aoflux_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/prep_aoflux_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/prep_atm_mod.F90 b/src/drivers/moab/main/prep_atm_mod.F90 deleted file mode 100644 index b614457db57..00000000000 --- a/src/drivers/moab/main/prep_atm_mod.F90 +++ /dev/null @@ -1,950 +0,0 @@ -module prep_atm_mod - - use shr_kind_mod, only: r8 => SHR_KIND_R8 - use shr_kind_mod, only: cs => SHR_KIND_CS - use shr_kind_mod, only: cl => SHR_KIND_CL - use shr_sys_mod, only: shr_sys_abort, shr_sys_flush - use seq_comm_mct, only: num_inst_atm, num_inst_ocn, num_inst_ice, num_inst_lnd, num_inst_xao, & - num_inst_frc, num_inst_max, CPLID, ATMID, logunit - use seq_comm_mct, only: seq_comm_getData=>seq_comm_setptrs - use seq_infodata_mod, only: seq_infodata_type, seq_infodata_getdata - use seq_map_type_mod - use seq_map_mod - use seq_flds_mod - use t_drv_timers_mod - use mct_mod - use perf_mod - use component_type_mod, only: component_get_x2c_cx, component_get_c2x_cx - use component_type_mod, only: atm, lnd, ocn, ice - - use shr_mpi_mod, only: shr_mpi_commrank - use seq_comm_mct, only : mbaxid ! iMOAB id for atm migrated mesh to coupler pes - use seq_comm_mct, only : mboxid ! iMOAB id for mpas ocean migrated mesh to coupler pes - use seq_comm_mct, only : mbintxoa ! iMOAB id for intx mesh between ocean and atmosphere; output from this - use seq_comm_mct, only : mhid ! iMOAB id for atm instance - use seq_comm_mct, only : seq_comm_getinfo => seq_comm_setptrs - use dimensions_mod, only : np ! for atmosphere - - - implicit none - save - PRIVATE - - !-------------------------------------------------------------------------- - ! Public interfaces - !-------------------------------------------------------------------------- - - public :: prep_atm_init - public :: prep_atm_mrg - - public :: prep_atm_get_l2x_ax - public :: prep_atm_get_i2x_ax - public :: prep_atm_get_o2x_ax - - public :: prep_atm_calc_l2x_ax - public :: prep_atm_calc_i2x_ax - public :: prep_atm_calc_o2x_ax - - public :: prep_atm_get_mapper_So2a - public :: prep_atm_get_mapper_Fo2a - public :: prep_atm_get_mapper_Sl2a - public :: prep_atm_get_mapper_Fl2a - public :: prep_atm_get_mapper_Si2a - public :: prep_atm_get_mapper_Fi2a - - public :: prep_atm_ocn_moab, prep_atm_migrate_moab - - !-------------------------------------------------------------------------- - ! Private interfaces - !-------------------------------------------------------------------------- - - private :: prep_atm_merge - - !-------------------------------------------------------------------------- - ! Private data - !-------------------------------------------------------------------------- - - ! mappers - type(seq_map), pointer :: mapper_So2a - type(seq_map), pointer :: mapper_Sl2a - type(seq_map), pointer :: mapper_Si2a - type(seq_map), pointer :: mapper_Fo2a ! needed for seq_frac_init - type(seq_map), pointer :: mapper_Fl2a ! needed for seq_frac_init - type(seq_map), pointer :: mapper_Fi2a ! needed for seq_frac_init - - ! attribute vectors - type(mct_aVect), pointer :: l2x_ax(:) ! Lnd export, atm grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: i2x_ax(:) ! Ice export, atm grid, cpl pes - allocated in driver - type(mct_aVect), pointer :: o2x_ax(:) ! Ocn export, atm grid, cpl pes - allocated in driver - - ! other module variables - integer :: mpicom_CPLID ! MPI cpl communicator - logical :: iamroot_CPLID ! .true. => CPLID masterproc - !================================================================================================ - -contains - - !================================================================================================ - - subroutine prep_atm_init(infodata, ocn_c2_atm, ice_c2_atm, lnd_c2_atm) - - !--------------------------------------------------------------- - ! Description - ! Initialize module attribute vectors and mappers - ! - ! Arguments - type (seq_infodata_type) , intent(inout) :: infodata - logical , intent(in) :: ocn_c2_atm ! .true. => ocn to atm coupling on - logical , intent(in) :: ice_c2_atm ! .true. => ice to atm coupling on - logical , intent(in) :: lnd_c2_atm ! .true. => lnd to atm coupling on - ! - ! Local Variables - integer :: lsize_a - integer :: eli, eii, emi - logical :: samegrid_ao ! samegrid atm and ocean - logical :: samegrid_al ! samegrid atm and land - logical :: esmf_map_flag ! .true. => use esmf for mapping - logical :: atm_present ! .true. => atm is present - logical :: ocn_present ! .true. => ocn is present - logical :: ice_present ! .true. => ice is present - logical :: lnd_present ! .true. => lnd is prsent - character(CL) :: ocn_gnam ! ocn grid - character(CL) :: atm_gnam ! atm grid - character(CL) :: lnd_gnam ! lnd grid - type(mct_avect), pointer :: a2x_ax - character(*), parameter :: subname = '(prep_atm_init)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - integer, external :: iMOAB_ComputeMeshIntersectionOnSphere, iMOAB_RegisterFortranApplication, & - iMOAB_WriteMesh - integer ierr, idintx, rank - character*32 :: appname, outfile, wopts, lnum - !--------------------------------------------------------------- - - - call seq_infodata_getData(infodata, & - atm_present=atm_present, & - ocn_present=ocn_present, & - ice_present=ice_present, & - lnd_present=lnd_present, & - atm_gnam=atm_gnam, & - ocn_gnam=ocn_gnam, & - lnd_gnam=lnd_gnam, & - esmf_map_flag=esmf_map_flag) - - allocate(mapper_So2a) - allocate(mapper_Sl2a) - allocate(mapper_Si2a) - allocate(mapper_Fo2a) - allocate(mapper_Fl2a) - allocate(mapper_Fi2a) - - if (atm_present) then - - call seq_comm_getData(CPLID, & - mpicom=mpicom_CPLID, iamroot=iamroot_CPLID) - - a2x_ax => component_get_c2x_cx(atm(1)) - lsize_a = mct_aVect_lsize(a2x_ax) - - allocate(l2x_ax(num_inst_lnd)) - do eli = 1,num_inst_lnd - call mct_aVect_init(l2x_ax(eli), rList=seq_flds_l2x_fields, lsize=lsize_a) - call mct_aVect_zero(l2x_ax(eli)) - end do - allocate(o2x_ax(num_inst_max)) - do emi = 1,num_inst_max - call mct_aVect_init(o2x_ax(emi), rList=seq_flds_o2x_fields, lsize=lsize_a) - call mct_aVect_zero(o2x_ax(emi)) - enddo - allocate(i2x_ax(num_inst_ice)) - do eii = 1,num_inst_ice - call mct_aVect_init(i2x_ax(eii), rList=seq_flds_i2x_fields, lsize=lsize_a) - call mct_aVect_zero(i2x_ax(eii)) - enddo - - samegrid_al = .true. - samegrid_ao = .true. - if (trim(atm_gnam) /= trim(lnd_gnam)) samegrid_al = .false. - if (trim(atm_gnam) /= trim(ocn_gnam)) samegrid_ao = .false. - - if (ocn_c2_atm) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_So2a' - end if - call seq_map_init_rcfile(mapper_So2a, ocn(1), atm(1), & - 'seq_maps.rc','ocn2atm_smapname:','ocn2atm_smaptype:',samegrid_ao, & - 'mapper_So2a initialization',esmf_map_flag) - - appname = "ATM_OCN_COU"//CHAR(0) - ! idintx is a unique number of MOAB app that takes care of intx between ocn and atm mesh - idintx = atm(1)%cplcompid + 100*ocn(1)%cplcompid ! something different, to differentiate it - ierr = iMOAB_RegisterFortranApplication(trim(appname), mpicom_CPLID, idintx, mbintxoa) - ierr = iMOAB_ComputeMeshIntersectionOnSphere (mbaxid, mboxid, mbintxoa) - wopts = CHAR(0) - call shr_mpi_commrank( mpicom_CPLID, rank ) - if (rank .lt. 5) then - write(lnum,"(I0.2)")rank ! - outfile = 'intx'//trim(lnum)// '.h5m' // CHAR(0) - ierr = iMOAB_WriteMesh(mbintxoa, outfile, wopts) ! write local intx file - endif - end if - - ! needed for domain checking - if (ocn_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fo2a' - end if - call seq_map_init_rcfile(mapper_Fo2a, ocn(1), atm(1), & - 'seq_maps.rc','ocn2atm_fmapname:','ocn2atm_fmaptype:',samegrid_ao, & - 'mapper_Fo2a initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - if (ice_c2_atm) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Si2a' - end if - call seq_map_init_rcfile(mapper_Si2a, ice(1), atm(1), & - 'seq_maps.rc','ice2atm_smapname:','ice2atm_smaptype:',samegrid_ao, & - 'mapper_Si2a initialization',esmf_map_flag) - end if - - ! needed for domain checking - if (ice_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fi2a' - end if - call seq_map_init_rcfile(mapper_Fi2a, ice(1), atm(1), & - 'seq_maps.rc','ice2atm_fmapname:','ice2atm_fmaptype:',samegrid_ao, & - 'mapper_Fi2a initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - ! needed for domain checking - if (lnd_present) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Fl2a' - end if - call seq_map_init_rcfile(mapper_Fl2a, lnd(1), atm(1), & - 'seq_maps.rc','lnd2atm_fmapname:','lnd2atm_fmaptype:',samegrid_al, & - 'mapper_Fl2a initialization',esmf_map_flag) - endif - call shr_sys_flush(logunit) - - if (lnd_c2_atm) then - if (iamroot_CPLID) then - write(logunit,*) ' ' - write(logunit,F00) 'Initializing mapper_Sl2a' - end if - call seq_map_init_rcfile(mapper_Sl2a, lnd(1), atm(1), & - 'seq_maps.rc','lnd2atm_smapname:','lnd2atm_smaptype:',samegrid_al, & - 'mapper_Sl2a initialization',esmf_map_flag) - end if - - - end if - - end subroutine prep_atm_init - - subroutine prep_atm_ocn_moab(infodata) - !--------------------------------------------------------------- - ! Description - ! After intersection of atm and ocean mesh, correct the communication graph - ! between atm instance and atm on coupler (due to coverage) - ! also, compute the map; this would be equivalent to seq_map_init_rcfile on the - ! mapping file computed offline (this will be now online) - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - - integer :: ierr - - logical :: atm_present ! .true. => atm is present - logical :: ocn_present ! .true. => ocn is present - integer :: id_join - integer :: mpicom_join - integer :: atmid - character*32 :: dm1, dm2, dofnameATM, dofnameOCN - integer :: orderOCN, orderATM, volumetric, noConserve, validate - - integer, external :: iMOAB_CoverageGraph, iMOAB_ComputeScalarProjectionWeights - - call seq_infodata_getData(infodata, & - atm_present=atm_present, & - ocn_present=ocn_present) - - ! it involves initial atm app; mhid; also migrate atm mesh on coupler pes, mbaxid - ! intx ocean atm are in mbintxoa ; remapper also has some info about coverage mesh - ! after this, the sending of tags from atm pes to coupler pes will use the new par comm graph, that has more precise info about - ! how to get mpicomm for joint atm + coupler - id_join = atm(1)%cplcompid - atmid = atm(1)%compid - call seq_comm_getinfo(ID_join,mpicom=mpicom_join) - - ! it happens over joint communicator - ierr = iMOAB_CoverageGraph(mpicom_join, mhid, atmid, mbaxid, id_join, mbintxoa); - - dm1 = "cgll"//CHAR(0) - dm2 = "fv"//CHAR(0) - dofnameATM="GLOBAL_DOFS"//CHAR(0) - dofnameOCN="GLOBAL_ID"//CHAR(0) - orderATM = np ! it should be 4 - orderOCN = 1 ! not much arguing - volumetric = 0 - noConserve = 0 - validate = 1 - if (mbintxoa .ge. 0 ) then - ierr = iMOAB_ComputeScalarProjectionWeights ( mbintxoa, & - trim(dm1), orderATM, trim(dm2), orderOCN, & - volumetric, noConserve, validate, & - trim(dofnameATM), trim(dofnameOCN) ) - endif - end subroutine prep_atm_ocn_moab - - subroutine prep_atm_migrate_moab(infodata) - !--------------------------------------------------------------- - ! Description - ! After a2oTAG was loaded on atm mesh, it needs to be migrated to the coupler pes, for weight application later - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - - integer :: ierr - - logical :: atm_present ! .true. => atm is present - logical :: ocn_present ! .true. => ocn is present - integer :: id_join - integer :: mpicom_join - integer :: atmid - character*32 :: dm1, dm2, tagName - character*32 :: outfile, wopts, tagnameProj - integer :: orderOCN, orderATM, volumetric, noConserve, validate - - integer, external :: iMOAB_SendElementTag, iMOAB_ReceiveElementTag, iMOAB_FreeSenderBuffers - integer, external :: iMOAB_ApplyScalarProjectionWeights, iMOAB_WriteMesh - - call seq_infodata_getData(infodata, & - atm_present=atm_present, & - ocn_present=ocn_present) - - ! it involves initial atm app; mhid; also migrate atm mesh on coupler pes, mbaxid - ! intx ocean atm are in mbintxoa ; remapper also has some info about coverage mesh - ! after this, the sending of tags from atm pes to coupler pes will use the new par comm graph, that has more precise info about - ! how to get mpicomm for joint atm + coupler - id_join = atm(1)%cplcompid - atmid = atm(1)%compid - call seq_comm_getinfo(ID_join,mpicom=mpicom_join) - - - - ! now send the tag a2oTAG from original atmosphere mhid(pid1) towards migrated coverage mesh (pid3), using the new coverage graph communicator - tagName = 'a2oTAG'//CHAR(0) ! it is defined in semoab_mod.F90!!! - tagNameProj = 'a2oTAG_proj'//CHAR(0) - if (mhid .ge. 0) then ! send because we are on atm pes - - ! basically, adjust the migration of the tag we want to project; it was sent initially with - ! trivial partitioning, now we need to adjust it for "coverage" mesh - ! as always, use nonblocking sends - - ierr = iMOAB_SendElementTag(mhid, atmid, id_join, tagName, mpicom_join) - - endif - if (mbaxid .ge. 0 ) then ! we are on coupler pes, for sure - ! receive on atm on coupler pes, that was redistributed according to coverage - ierr = iMOAB_ReceiveElementTag(mbaxid, id_join, atmid, tagName, mpicom_join) - !CHECKRC(ierr, "cannot receive tag values") - endif - - ! we can now free the sender buffers - if (mhid .ge. 0) then - ierr = iMOAB_FreeSenderBuffers(mhid, mpicom_join, id_join) - ! CHECKRC(ierr, "cannot free buffers used to resend atm mesh tag towards the coverage mesh") - endif - - ! we could do the projection now, on the ocean mesh, because we are on the coupler pes; - ! the actual migrate could happen later , from coupler pes to the ocean pes - if (mbintxoa .ge. 0 ) then ! we are on coupler pes, for sure - ! we could apply weights - ierr = iMOAB_ApplyScalarProjectionWeights ( mbintxoa, tagName, tagNameProj) - - ! we can also write the ocean mesh to file, just to see the projectd tag - ! write out the mesh file to disk - outfile = 'ocn_proj.h5m'//CHAR(0) - wopts = ';PARALLEL=WRITE_PART'//CHAR(0) ! - ierr = iMOAB_WriteMesh(mboxid, trim(outfile), trim(wopts)) - - !CHECKRC(ierr, "cannot receive tag values") - endif - - end subroutine prep_atm_migrate_moab - - !================================================================================================ - - subroutine prep_atm_mrg(infodata, fractions_ax, xao_ax, timer_mrg) - - !--------------------------------------------------------------- - ! Description - ! Prepare run phase, including running the merge - ! - ! Arguments - type(seq_infodata_type) , intent(in) :: infodata - type(mct_aVect) , intent(in) :: fractions_ax(:) - type(mct_aVect) , intent(in) :: xao_ax(:) - character(len=*) , intent(in) :: timer_mrg - ! - ! Local Variables - integer :: eli, eoi, eii, exi, efi, eai, emi - type(mct_avect), pointer :: x2a_ax - character(*), parameter :: subname = '(prep_atm_mrg)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer_mrg),barrier=mpicom_CPLID) - do eai = 1,num_inst_atm - ! Use fortran mod to address ensembles in merge - eli = mod((eai-1),num_inst_lnd) + 1 - eoi = mod((eai-1),num_inst_ocn) + 1 - eii = mod((eai-1),num_inst_ice) + 1 - exi = mod((eai-1),num_inst_xao) + 1 - efi = mod((eai-1),num_inst_frc) + 1 - emi = mod((eai-1),num_inst_max) + 1 - - x2a_ax => component_get_x2c_cx(atm(eai)) ! This is actually modifying x2a_ax - call prep_atm_merge(l2x_ax(eli), o2x_ax(emi), xao_ax(exi), i2x_ax(eii), & - fractions_ax(efi), x2a_ax) - enddo - call t_drvstopf (trim(timer_mrg)) - - end subroutine prep_atm_mrg - - !================================================================================================ - - subroutine prep_atm_merge( l2x_a, o2x_a, xao_a, i2x_a, fractions_a, x2a_a ) - - !----------------------------------------------------------------------- - ! - ! Arguments - type(mct_aVect), intent(in) :: l2x_a - type(mct_aVect), intent(in) :: o2x_a - type(mct_aVect), intent(in) :: xao_a - type(mct_aVect), intent(in) :: i2x_a - type(mct_aVect), intent(in) :: fractions_a - type(mct_aVect), intent(inout) :: x2a_a - ! - ! Local workspace - real(r8) :: fracl, fraci, fraco - integer :: n,ka,ki,kl,ko,kx,kof,kif,klf,i,i1,o1 - integer :: lsize - integer :: index_x2a_Sf_lfrac - integer :: index_x2a_Sf_ifrac - integer :: index_x2a_Sf_ofrac - character(CL),allocatable :: field_atm(:) ! string converted to char - character(CL),allocatable :: field_lnd(:) ! string converted to char - character(CL),allocatable :: field_ice(:) ! string converted to char - character(CL),allocatable :: field_xao(:) ! string converted to char - character(CL),allocatable :: field_ocn(:) ! string converted to char - character(CL),allocatable :: itemc_atm(:) ! string converted to char - character(CL),allocatable :: itemc_lnd(:) ! string converted to char - character(CL),allocatable :: itemc_ice(:) ! string converted to char - character(CL),allocatable :: itemc_xao(:) ! string converted to char - character(CL),allocatable :: itemc_ocn(:) ! string converted to char - logical :: iamroot - character(CL),allocatable :: mrgstr(:) ! temporary string - logical, save :: first_time = .true. - type(mct_aVect_sharedindices),save :: l2x_sharedindices - type(mct_aVect_sharedindices),save :: o2x_sharedindices - type(mct_aVect_sharedindices),save :: i2x_sharedindices - type(mct_aVect_sharedindices),save :: xao_sharedindices - logical, pointer, save :: lmerge(:),imerge(:),xmerge(:),omerge(:) - integer, pointer, save :: lindx(:), iindx(:), oindx(:),xindx(:) - integer, save :: naflds, nlflds,niflds,noflds,nxflds - character(*), parameter :: subname = '(prep_atm_merge) ' - !----------------------------------------------------------------------- - ! - call seq_comm_getdata(CPLID, iamroot=iamroot) - - if (first_time) then - - naflds = mct_aVect_nRattr(x2a_a) - nlflds = mct_aVect_nRattr(l2x_a) - niflds = mct_aVect_nRattr(i2x_a) - noflds = mct_aVect_nRattr(o2x_a) - nxflds = mct_aVect_nRattr(xao_a) - - allocate(lindx(naflds), lmerge(naflds)) - allocate(iindx(naflds), imerge(naflds)) - allocate(xindx(naflds), xmerge(naflds)) - allocate(oindx(naflds), omerge(naflds)) - allocate(field_atm(naflds), itemc_atm(naflds)) - allocate(field_lnd(nlflds), itemc_lnd(nlflds)) - allocate(field_ice(niflds), itemc_ice(niflds)) - allocate(field_ocn(noflds), itemc_ocn(noflds)) - allocate(field_xao(nxflds), itemc_xao(nxflds)) - allocate(mrgstr(naflds)) - - lindx(:) = 0 - iindx(:) = 0 - xindx(:) = 0 - oindx(:) = 0 - lmerge(:) = .true. - imerge(:) = .true. - xmerge(:) = .true. - omerge(:) = .true. - - do ka = 1,naflds - field_atm(ka) = mct_aVect_getRList2c(ka, x2a_a) - itemc_atm(ka) = trim(field_atm(ka)(scan(field_atm(ka),'_'):)) - enddo - do kl = 1,nlflds - field_lnd(kl) = mct_aVect_getRList2c(kl, l2x_a) - itemc_lnd(kl) = trim(field_lnd(kl)(scan(field_lnd(kl),'_'):)) - enddo - do ki = 1,niflds - field_ice(ki) = mct_aVect_getRList2c(ki, i2x_a) - itemc_ice(ki) = trim(field_ice(ki)(scan(field_ice(ki),'_'):)) - enddo - do ko = 1,noflds - field_ocn(ko) = mct_aVect_getRList2c(ko, o2x_a) - itemc_ocn(ko) = trim(field_ocn(ko)(scan(field_ocn(ko),'_'):)) - enddo - do kx = 1,nxflds - field_xao(kx) = mct_aVect_getRList2c(kx, xao_a) - itemc_xao(kx) = trim(field_xao(kx)(scan(field_xao(kx),'_'):)) - enddo - - call mct_aVect_setSharedIndices(l2x_a, x2a_a, l2x_SharedIndices) - call mct_aVect_setSharedIndices(o2x_a, x2a_a, o2x_SharedIndices) - call mct_aVect_setSharedIndices(i2x_a, x2a_a, i2x_SharedIndices) - call mct_aVect_setSharedIndices(xao_a, x2a_a, xao_SharedIndices) - - ! Field naming rules - ! Only atm states that are Sx_... will be merged - ! Only fluxes that are F??x_... will be merged - ! All fluxes will be multiplied by corresponding component fraction - - do ka = 1,naflds - !--- document merge --- - mrgstr(ka) = subname//'x2a%'//trim(field_atm(ka))//' =' - if (field_atm(ka)(1:2) == 'PF') then - cycle ! if flux has first character as P, pass straight through - end if - if (field_atm(ka)(1:1) == 'S' .and. field_atm(ka)(2:2) /= 'x') then - cycle ! any state fields that are not Sx_ will just be copied - end if - - do kl = 1,nlflds - if (trim(itemc_atm(ka)) == trim(itemc_lnd(kl))) then - if ((trim(field_atm(ka)) == trim(field_lnd(kl)))) then - if (field_lnd(kl)(1:1) == 'F') lmerge(ka) = .false. - end if - ! --- make sure only one field matches --- - if (lindx(ka) /= 0) then - write(logunit,*) subname,' ERROR: found multiple kl field matches for ',trim(itemc_lnd(kl)) - call shr_sys_abort(subname//' ERROR multiple kl field matches') - endif - lindx(ka) = kl - end if - end do - do ki = 1,niflds - if (field_ice(ki)(1:1) == 'F' .and. field_ice(ki)(2:4) == 'ioi') then - cycle ! ignore all fluxes that are ice/ocn fluxes - end if - if (trim(itemc_atm(ka)) == trim(itemc_ice(ki))) then - if ((trim(field_atm(ka)) == trim(field_ice(ki)))) then - if (field_ice(ki)(1:1) == 'F') imerge(ka) = .false. - end if - ! --- make sure only one field matches --- - if (iindx(ka) /= 0) then - write(logunit,*) subname,' ERROR: found multiple ki field matches for ',trim(itemc_ice(ki)) - call shr_sys_abort(subname//' ERROR multiple ki field matches') - endif - iindx(ka) = ki - end if - end do - do kx = 1,nxflds - if (trim(itemc_atm(ka)) == trim(itemc_xao(kx))) then - if ((trim(field_atm(ka)) == trim(field_xao(kx)))) then - if (field_xao(kx)(1:1) == 'F') xmerge(ka) = .false. - end if - ! --- make sure only one field matches --- - if (xindx(ka) /= 0) then - write(logunit,*) subname,' ERROR: found multiple kx field matches for ',trim(itemc_xao(kx)) - call shr_sys_abort(subname//' ERROR multiple kx field matches') - endif - xindx(ka) = kx - end if - end do - do ko = 1,noflds - if (trim(itemc_atm(ka)) == trim(itemc_ocn(ko))) then - if ((trim(field_atm(ka)) == trim(field_ocn(ko)))) then - if (field_ocn(ko)(1:1) == 'F') omerge(ka) = .false. - end if - ! --- make sure only one field matches --- - if (oindx(ka) /= 0) then - write(logunit,*) subname,' ERROR: found multiple ko field matches for ',trim(itemc_ocn(ko)) - call shr_sys_abort(subname//' ERROR multiple ko field matches') - endif - oindx(ka) = ko - end if - end do - - ! --- add some checks --- - - ! --- make sure all terms agree on merge or non-merge aspect --- - if (oindx(ka) > 0 .and. xindx(ka) > 0) then - write(logunit,*) subname,' ERROR: oindx and xindx both non-zero, not allowed ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR oindx and xindx both non-zero') - endif - - ! --- make sure all terms agree on merge or non-merge aspect --- - if (lindx(ka) > 0 .and. iindx(ka) > 0 .and. (lmerge(ka) .neqv. imerge(ka))) then - write(logunit,*) subname,' ERROR: lindx and iindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR lindx and iindx merge logic error') - endif - if (lindx(ka) > 0 .and. xindx(ka) > 0 .and. (lmerge(ka) .neqv. xmerge(ka))) then - write(logunit,*) subname,' ERROR: lindx and xindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR lindx and xindx merge logic error') - endif - if (lindx(ka) > 0 .and. oindx(ka) > 0 .and. (lmerge(ka) .neqv. omerge(ka))) then - write(logunit,*) subname,' ERROR: lindx and oindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR lindx and oindx merge logic error') - endif - if (xindx(ka) > 0 .and. iindx(ka) > 0 .and. (xmerge(ka) .neqv. imerge(ka))) then - write(logunit,*) subname,' ERROR: xindx and iindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR xindx and iindx merge logic error') - endif - if (xindx(ka) > 0 .and. oindx(ka) > 0 .and. (xmerge(ka) .neqv. omerge(ka))) then - write(logunit,*) subname,' ERROR: xindx and oindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR xindx and oindx merge logic error') - endif - if (iindx(ka) > 0 .and. oindx(ka) > 0 .and. (imerge(ka) .neqv. omerge(ka))) then - write(logunit,*) subname,' ERROR: iindx and oindx merge logic error ',trim(itemc_atm(ka)) - call shr_sys_abort(subname//' ERROR iindx and oindx merge logic error') - endif - - end do - end if - - ! Zero attribute vector - - call mct_avect_zero(x2a_a) - - ! Update surface fractions - - kif=mct_aVect_indexRA(fractions_a,"ifrac") - klf=mct_aVect_indexRA(fractions_a,"lfrac") - kof=mct_aVect_indexRA(fractions_a,"ofrac") - lsize = mct_avect_lsize(x2a_a) - - index_x2a_Sf_lfrac = mct_aVect_indexRA(x2a_a,'Sf_lfrac') - index_x2a_Sf_ifrac = mct_aVect_indexRA(x2a_a,'Sf_ifrac') - index_x2a_Sf_ofrac = mct_aVect_indexRA(x2a_a,'Sf_ofrac') - do n = 1,lsize - x2a_a%rAttr(index_x2a_Sf_lfrac,n) = fractions_a%Rattr(klf,n) - x2a_a%rAttr(index_x2a_Sf_ifrac,n) = fractions_a%Rattr(kif,n) - x2a_a%rAttr(index_x2a_Sf_ofrac,n) = fractions_a%Rattr(kof,n) - end do - - !--- document fraction operations --- - if (first_time) then - mrgstr(index_x2a_sf_lfrac) = trim(mrgstr(index_x2a_sf_lfrac))//' = fractions_a%lfrac' - mrgstr(index_x2a_sf_ifrac) = trim(mrgstr(index_x2a_sf_ifrac))//' = fractions_a%ifrac' - mrgstr(index_x2a_sf_ofrac) = trim(mrgstr(index_x2a_sf_ofrac))//' = fractions_a%ofrac' - endif - - ! Copy attributes that do not need to be merged - ! These are assumed to have the same name in - ! (o2x_a and x2a_a) and in (l2x_a and x2a_a), etc. - - !--- document copy operations --- - if (first_time) then - !--- document merge --- - do i=1,l2x_SharedIndices%shared_real%num_indices - i1=l2x_SharedIndices%shared_real%aVindices1(i) - o1=l2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = l2x%'//trim(field_lnd(i1)) - enddo - do i=1,o2x_SharedIndices%shared_real%num_indices - i1=o2x_SharedIndices%shared_real%aVindices1(i) - o1=o2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = o2x%'//trim(field_ocn(i1)) - enddo - do i=1,i2x_SharedIndices%shared_real%num_indices - i1=i2x_SharedIndices%shared_real%aVindices1(i) - o1=i2x_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = i2x%'//trim(field_ice(i1)) - enddo - do i=1,xao_SharedIndices%shared_real%num_indices - i1=xao_SharedIndices%shared_real%aVindices1(i) - o1=xao_SharedIndices%shared_real%aVindices2(i) - mrgstr(o1) = trim(mrgstr(o1))//' = xao%'//trim(field_xao(i1)) - enddo - endif - - ! call mct_aVect_copy(aVin=l2x_a, aVout=x2a_a, vector=mct_usevector) - ! call mct_aVect_copy(aVin=o2x_a, aVout=x2a_a, vector=mct_usevector) - ! call mct_aVect_copy(aVin=i2x_a, aVout=x2a_a, vector=mct_usevector) - ! call mct_aVect_copy(aVin=xao_a, aVout=x2a_a, vector=mct_usevector) - call mct_aVect_copy(aVin=l2x_a, aVout=x2a_a, vector=mct_usevector, sharedIndices=l2x_SharedIndices) - call mct_aVect_copy(aVin=o2x_a, aVout=x2a_a, vector=mct_usevector, sharedIndices=o2x_SharedIndices) - call mct_aVect_copy(aVin=i2x_a, aVout=x2a_a, vector=mct_usevector, sharedIndices=i2x_SharedIndices) - call mct_aVect_copy(aVin=xao_a, aVout=x2a_a, vector=mct_usevector, sharedIndices=xao_SharedIndices) - - ! If flux to atm is coming only from the ocean (based on field being in o2x_a) - - ! -- then scale by both ocean and ice fraction - ! If flux to atm is coming only from the land or ice or coupler - ! -- then do scale by fraction above - - do ka = 1,naflds - !--- document merge --- - if (first_time) then - if (lindx(ka) > 0) then - if (lmerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + lfrac*l2x%'//trim(field_lnd(lindx(ka))) - else - mrgstr(ka) = trim(mrgstr(ka))//' = lfrac*l2x%'//trim(field_lnd(lindx(ka))) - end if - end if - if (iindx(ka) > 0) then - if (imerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + ifrac*i2x%'//trim(field_ice(iindx(ka))) - else - mrgstr(ka) = trim(mrgstr(ka))//' = ifrac*i2x%'//trim(field_ice(iindx(ka))) - end if - end if - if (xindx(ka) > 0) then - if (xmerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + ofrac*xao%'//trim(field_xao(xindx(ka))) - else - mrgstr(ka) = trim(mrgstr(ka))//' = ofrac*xao%'//trim(field_xao(xindx(ka))) - end if - end if - if (oindx(ka) > 0) then - if (omerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + ofrac*o2x%'//trim(field_ocn(oindx(ka))) - end if - if (.not. omerge(ka)) then - mrgstr(ka) = trim(mrgstr(ka))//' + (ifrac+ofrac)*o2x%'//trim(field_ocn(oindx(ka))) - end if - end if - endif - - do n = 1,lsize - fracl = fractions_a%Rattr(klf,n) - fraci = fractions_a%Rattr(kif,n) - fraco = fractions_a%Rattr(kof,n) - if (lindx(ka) > 0 .and. fracl > 0._r8) then - if (lmerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + l2x_a%rAttr(lindx(ka),n) * fracl - else - x2a_a%rAttr(ka,n) = l2x_a%rAttr(lindx(ka),n) * fracl - end if - end if - if (iindx(ka) > 0 .and. fraci > 0._r8) then - if (imerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + i2x_a%rAttr(iindx(ka),n) * fraci - else - x2a_a%rAttr(ka,n) = i2x_a%rAttr(iindx(ka),n) * fraci - end if - end if - if (xindx(ka) > 0 .and. fraco > 0._r8) then - if (xmerge(ka)) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + xao_a%rAttr(xindx(ka),n) * fraco - else - x2a_a%rAttr(ka,n) = xao_a%rAttr(xindx(ka),n) * fraco - end if - end if - if (oindx(ka) > 0) then - if (omerge(ka) .and. fraco > 0._r8) then - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + o2x_a%rAttr(oindx(ka),n) * fraco - end if - if (.not. omerge(ka)) then - !--- NOTE: This IS using the ocean fields and ice fraction !! --- - x2a_a%rAttr(ka,n) = o2x_a%rAttr(oindx(ka),n) * fraci - x2a_a%rAttr(ka,n) = x2a_a%rAttr(ka,n) + o2x_a%rAttr(oindx(ka),n) * fraco - end if - end if - end do - end do - - if (first_time) then - if (iamroot) then - write(logunit,'(A)') subname//' Summary:' - do ka = 1,naflds - write(logunit,'(A)') trim(mrgstr(ka)) - enddo - endif - deallocate(mrgstr) - deallocate(field_atm,itemc_atm) - deallocate(field_lnd,itemc_lnd) - deallocate(field_ice,itemc_ice) - deallocate(field_ocn,itemc_ocn) - deallocate(field_xao,itemc_xao) - endif - - first_time = .false. - - end subroutine prep_atm_merge - - !================================================================================================ - - subroutine prep_atm_calc_o2x_ax(fractions_ox, timer) - !--------------------------------------------------------------- - ! Description - ! Create o2x_ax (note that o2x_ax is a local module variable) - ! - ! Arguments - type(mct_aVect) , optional, intent(in) :: fractions_ox(:) - character(len=*), optional, intent(in) :: timer - ! - ! Local Variables - integer :: eoi, efi, emi - type(mct_aVect) , pointer :: o2x_ox - character(*), parameter :: subname = '(prep_atm_calc_o2x_ax)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do emi = 1,num_inst_max - eoi = mod((emi-1),num_inst_ocn) + 1 - efi = mod((emi-1),num_inst_frc) + 1 - - o2x_ox => component_get_c2x_cx(ocn(eoi)) - if (present(fractions_ox)) then - call seq_map_map(mapper_So2a, o2x_ox, o2x_ax(emi),& - fldlist=seq_flds_o2x_states,norm=.true., & - avwts_s=fractions_ox(efi),avwtsfld_s='ofrac') - else - call seq_map_map(mapper_So2a, o2x_ox, o2x_ax(emi),& - fldlist=seq_flds_o2x_states,norm=.true.) - endif - call seq_map_map(mapper_Fo2a, o2x_ox, o2x_ax(emi),& - fldlist=seq_flds_o2x_fluxes,norm=.true.) - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_atm_calc_o2x_ax - - !================================================================================================ - - subroutine prep_atm_calc_i2x_ax(fractions_ix, timer) - !--------------------------------------------------------------- - ! Description - ! Create i2x_ax (note that i2x_ax is a local module variable) - ! - ! Arguments - type(mct_aVect) , intent(in) :: fractions_ix(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eii, efi - type(mct_aVect) , pointer :: i2x_ix - character(*), parameter :: subname = '(prep_atm_calc_i2x_ax)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eii = 1,num_inst_ice - efi = mod((eii-1),num_inst_frc) + 1 - - i2x_ix => component_get_c2x_cx(ice(eii)) - call seq_map_map(mapper_Si2a, i2x_ix, i2x_ax(eii), & - fldlist=seq_flds_i2x_states, & - avwts_s=fractions_ix(eii), avwtsfld_s='ifrac') - call seq_map_map(mapper_Fi2a, i2x_ix, i2x_ax(eii), & - fldlist=seq_flds_i2x_fluxes, & - avwts_s=fractions_ix(eii), avwtsfld_s='ifrac') - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_atm_calc_i2x_ax - - !================================================================================================ - - subroutine prep_atm_calc_l2x_ax(fractions_lx, timer) - !--------------------------------------------------------------- - ! Description - ! Create l2x_ax (note that l2x_ax is a local module variable) - ! - ! Arguments - type(mct_aVect) , intent(in) :: fractions_lx(:) - character(len=*), intent(in) :: timer - ! - ! Local Variables - integer :: eli, efi - type(mct_avect), pointer :: l2x_lx - character(*), parameter :: subname = '(prep_atm_calc_l2x_ax)' - character(*), parameter :: F00 = "('"//subname//" : ', 4A )" - !--------------------------------------------------------------- - - call t_drvstartf (trim(timer),barrier=mpicom_CPLID) - do eli = 1,num_inst_lnd - efi = mod((eli-1),num_inst_frc) + 1 - - l2x_lx => component_get_c2x_cx(lnd(eli)) - call seq_map_map(mapper_Sl2a, l2x_lx, l2x_ax(eli), & - fldlist=seq_flds_l2x_states, norm=.true., & - avwts_s=fractions_lx(efi), avwtsfld_s='lfrin') - call seq_map_map(mapper_Fl2a, l2x_lx, l2x_ax(eli), & - fldlist=seq_flds_l2x_fluxes, norm=.true., & - avwts_s=fractions_lx(efi), avwtsfld_s='lfrin') - enddo - call t_drvstopf (trim(timer)) - - end subroutine prep_atm_calc_l2x_ax - - !================================================================================================ - - function prep_atm_get_l2x_ax() - type(mct_aVect), pointer :: prep_atm_get_l2x_ax(:) - prep_atm_get_l2x_ax => l2x_ax(:) - end function prep_atm_get_l2x_ax - - function prep_atm_get_i2x_ax() - type(mct_aVect), pointer :: prep_atm_get_i2x_ax(:) - prep_atm_get_i2x_ax => i2x_ax(:) - end function prep_atm_get_i2x_ax - - function prep_atm_get_o2x_ax() - type(mct_aVect), pointer :: prep_atm_get_o2x_ax(:) - prep_atm_get_o2x_ax => o2x_ax(:) - end function prep_atm_get_o2x_ax - - function prep_atm_get_mapper_So2a() - type(seq_map), pointer :: prep_atm_get_mapper_So2a - prep_atm_get_mapper_So2a => mapper_So2a - end function prep_atm_get_mapper_So2a - - function prep_atm_get_mapper_Fo2a() - type(seq_map), pointer :: prep_atm_get_mapper_Fo2a - prep_atm_get_mapper_Fo2a => mapper_Fo2a - end function prep_atm_get_mapper_Fo2a - - function prep_atm_get_mapper_Sl2a() - type(seq_map), pointer :: prep_atm_get_mapper_Sl2a - prep_atm_get_mapper_Sl2a => mapper_Sl2a - end function prep_atm_get_mapper_Sl2a - - function prep_atm_get_mapper_Fl2a() - type(seq_map), pointer :: prep_atm_get_mapper_Fl2a - prep_atm_get_mapper_Fl2a => mapper_Fl2a - end function prep_atm_get_mapper_Fl2a - - function prep_atm_get_mapper_Si2a() - type(seq_map), pointer :: prep_atm_get_mapper_Si2a - prep_atm_get_mapper_Si2a => mapper_Si2a - end function prep_atm_get_mapper_Si2a - - function prep_atm_get_mapper_Fi2a() - type(seq_map), pointer :: prep_atm_get_mapper_Fi2a - prep_atm_get_mapper_Fi2a => mapper_Fi2a - end function prep_atm_get_mapper_Fi2a - - !================================================================================================ - -end module prep_atm_mod diff --git a/src/drivers/moab/main/prep_glc_mod.F90 b/src/drivers/moab/main/prep_glc_mod.F90 deleted file mode 120000 index 1197d7edd4a..00000000000 --- a/src/drivers/moab/main/prep_glc_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/prep_glc_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/prep_ice_mod.F90 b/src/drivers/moab/main/prep_ice_mod.F90 deleted file mode 120000 index 3e2cb709e6c..00000000000 --- a/src/drivers/moab/main/prep_ice_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/prep_ice_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/prep_lnd_mod.F90 b/src/drivers/moab/main/prep_lnd_mod.F90 deleted file mode 120000 index 466a61fb932..00000000000 --- a/src/drivers/moab/main/prep_lnd_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/prep_lnd_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/prep_ocn_mod.F90 b/src/drivers/moab/main/prep_ocn_mod.F90 deleted file mode 120000 index 0b653e8ccc5..00000000000 --- a/src/drivers/moab/main/prep_ocn_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/prep_ocn_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/prep_rof_mod.F90 b/src/drivers/moab/main/prep_rof_mod.F90 deleted file mode 120000 index eebf226afb1..00000000000 --- a/src/drivers/moab/main/prep_rof_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/prep_rof_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/prep_wav_mod.F90 b/src/drivers/moab/main/prep_wav_mod.F90 deleted file mode 120000 index 6fac50fc1c1..00000000000 --- a/src/drivers/moab/main/prep_wav_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/prep_wav_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_diag_mct.F90 b/src/drivers/moab/main/seq_diag_mct.F90 deleted file mode 120000 index a0acab32ac3..00000000000 --- a/src/drivers/moab/main/seq_diag_mct.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_diag_mct.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_domain_mct.F90 b/src/drivers/moab/main/seq_domain_mct.F90 deleted file mode 120000 index c1c87c7f3e4..00000000000 --- a/src/drivers/moab/main/seq_domain_mct.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_domain_mct.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_flux_mct.F90 b/src/drivers/moab/main/seq_flux_mct.F90 deleted file mode 120000 index 9401b5e8617..00000000000 --- a/src/drivers/moab/main/seq_flux_mct.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_flux_mct.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_frac_mct.F90 b/src/drivers/moab/main/seq_frac_mct.F90 deleted file mode 120000 index a504d803649..00000000000 --- a/src/drivers/moab/main/seq_frac_mct.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_frac_mct.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_hist_mod.F90 b/src/drivers/moab/main/seq_hist_mod.F90 deleted file mode 120000 index 6d8e2d7cf58..00000000000 --- a/src/drivers/moab/main/seq_hist_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_hist_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_io_mod.F90 b/src/drivers/moab/main/seq_io_mod.F90 deleted file mode 120000 index 0d9751fb58d..00000000000 --- a/src/drivers/moab/main/seq_io_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_io_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_map_mod.F90 b/src/drivers/moab/main/seq_map_mod.F90 deleted file mode 120000 index 642fcf290b5..00000000000 --- a/src/drivers/moab/main/seq_map_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_map_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_map_type_mod.F90 b/src/drivers/moab/main/seq_map_type_mod.F90 deleted file mode 120000 index 40f864c8129..00000000000 --- a/src/drivers/moab/main/seq_map_type_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_map_type_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/seq_rest_mod.F90 b/src/drivers/moab/main/seq_rest_mod.F90 deleted file mode 120000 index 88f692ba85e..00000000000 --- a/src/drivers/moab/main/seq_rest_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/seq_rest_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/main/t_driver_timers_mod.F90 b/src/drivers/moab/main/t_driver_timers_mod.F90 deleted file mode 120000 index 214a6163848..00000000000 --- a/src/drivers/moab/main/t_driver_timers_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/main/t_driver_timers_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/CMakeLists.txt b/src/drivers/moab/shr/CMakeLists.txt deleted file mode 120000 index fca71ccdbd6..00000000000 --- a/src/drivers/moab/shr/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/CMakeLists.txt \ No newline at end of file diff --git a/src/drivers/moab/shr/glc_elevclass_mod.F90 b/src/drivers/moab/shr/glc_elevclass_mod.F90 deleted file mode 120000 index 4061b5b7624..00000000000 --- a/src/drivers/moab/shr/glc_elevclass_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/glc_elevclass_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/seq_cdata_mod.F90 b/src/drivers/moab/shr/seq_cdata_mod.F90 deleted file mode 120000 index a80426bb219..00000000000 --- a/src/drivers/moab/shr/seq_cdata_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/seq_cdata_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/seq_comm_mct.F90 b/src/drivers/moab/shr/seq_comm_mct.F90 deleted file mode 120000 index 51819f1c2f6..00000000000 --- a/src/drivers/moab/shr/seq_comm_mct.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/seq_comm_mct.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/seq_drydep_mod.F90 b/src/drivers/moab/shr/seq_drydep_mod.F90 deleted file mode 120000 index a0962db2b8d..00000000000 --- a/src/drivers/moab/shr/seq_drydep_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/seq_drydep_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/seq_flds_mod.F90 b/src/drivers/moab/shr/seq_flds_mod.F90 deleted file mode 120000 index e0a03d2ecca..00000000000 --- a/src/drivers/moab/shr/seq_flds_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/seq_flds_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/seq_infodata_mod.F90 b/src/drivers/moab/shr/seq_infodata_mod.F90 deleted file mode 120000 index e23bf6c1ac6..00000000000 --- a/src/drivers/moab/shr/seq_infodata_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/seq_infodata_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/seq_io_read_mod.F90 b/src/drivers/moab/shr/seq_io_read_mod.F90 deleted file mode 120000 index 8dfa8024c97..00000000000 --- a/src/drivers/moab/shr/seq_io_read_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/seq_io_read_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/seq_timemgr_mod.F90 b/src/drivers/moab/shr/seq_timemgr_mod.F90 deleted file mode 120000 index e21cfd757dd..00000000000 --- a/src/drivers/moab/shr/seq_timemgr_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/seq_timemgr_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/shr_carma_mod.F90 b/src/drivers/moab/shr/shr_carma_mod.F90 deleted file mode 120000 index 24f10077f3f..00000000000 --- a/src/drivers/moab/shr/shr_carma_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/shr_carma_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/shr_expr_parser_mod.F90 b/src/drivers/moab/shr/shr_expr_parser_mod.F90 deleted file mode 120000 index f36ff1efa13..00000000000 --- a/src/drivers/moab/shr/shr_expr_parser_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/shr_expr_parser_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/shr_fire_emis_mod.F90 b/src/drivers/moab/shr/shr_fire_emis_mod.F90 deleted file mode 120000 index 880d3f976ac..00000000000 --- a/src/drivers/moab/shr/shr_fire_emis_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/shr_fire_emis_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/shr_megan_mod.F90 b/src/drivers/moab/shr/shr_megan_mod.F90 deleted file mode 120000 index cdc6f725a95..00000000000 --- a/src/drivers/moab/shr/shr_megan_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/shr_megan_mod.F90 \ No newline at end of file diff --git a/src/drivers/moab/shr/shr_ndep_mod.F90 b/src/drivers/moab/shr/shr_ndep_mod.F90 deleted file mode 120000 index 2d632816948..00000000000 --- a/src/drivers/moab/shr/shr_ndep_mod.F90 +++ /dev/null @@ -1 +0,0 @@ -../../mct/shr/shr_ndep_mod.F90 \ No newline at end of file diff --git a/src/externals/genf90/ChangeLog b/src/externals/genf90/ChangeLog deleted file mode 100644 index 32db415b0ce..00000000000 --- a/src/externals/genf90/ChangeLog +++ /dev/null @@ -1,62 +0,0 @@ -================================================================================ -SVN $Id: ChangeLog 45058 2013-03-20 16:12:21Z jedwards $ -SVN $URL: https://svn-ccsm-models.cgd.ucar.edu/tools/genf90/trunk/ChangeLog $ -================================================================================ -This file describes what tags were created and why -=========================== -Originator: jedwards -Date: Jan 21, 2014 -Model: genf90 -Version: genf90_140121 -One-line summary: add nctype and ctype data type support - - -=========================== -Originator: jedwards -Date: Nov 20, 2013 -Model: genf90 -Version: genf90_131120 -One-line summary: Added a documentation header and a logical type - -=========================== -Originator: jedwards -Date: Nov 14, 2013 -Model: genf90 -Version: genf90_131114 -One-line summary: minor change to avoid undefined variable - -=========================== -Originator: jedwards -Date: Nov 13, 2013 -Model: genf90 -Version: genf90_131113 -One-line summary: handles derived types and interface blocks better (Sean -Santos) - - -=========================== -Originator: jedwards -Date: Apr 02, 2013 -Model: genf90 -Version: genf90_130402 -One-line summary: correct when dtypes.h is generated - - M genf90.pl - -=========================== -Originator: jedwards -Date: Mar 20, 2013 -Model: genf90 -Version: genf90_130320a -One-line summary: generate helper file dtypes.h - M genf90.pl - -=========================== -Originator: jedwards -Date: Mar 20, 2013 -Model: genf90 -Version: genf90_130320 -One-line summary: Move to new directory - -=========================== - diff --git a/src/externals/genf90/genf90.pl b/src/externals/genf90/genf90.pl deleted file mode 100755 index 5d35112e953..00000000000 --- a/src/externals/genf90/genf90.pl +++ /dev/null @@ -1,387 +0,0 @@ -#!/usr/bin/env perl -use strict; -my $outfile; -# Beginning with F90, Fortran has strict typing of variables based on "TKR" -# (type, kind, and rank). In many cases we want to write subroutines that -# provide the same functionality for different variable types and ranks. In -# order to do this without cut-and-paste duplication of code, we create a -# template file with the extension ".F90.in", which can be parsed by this script -# to generate F90 code for all of the desired specific types. -# -# Keywords are delimited by curly brackets: {} -# -# {TYPE} and {DIMS} are used to generate the specific subroutine names from the -# generic template -# {TYPE} : Variable type name; implemented types are character, 4 or 8 byte real, -# and 4 or 8 byte integer. -# allowed values: text, real, double, int, long, logical -# default values: text, real, double, int -# {VTYPE} : Used to generate variable declarations to match the specific type. -# if {TYPE}=double then {VTYPE} is "real(r8)" -# {ITYPE}, {ITYPENAME} : Used to generate CPP statements for the specific type. -# {MPITYPE} : Used to generate MPI types corresponding to the specific type. -# -# {DIMS} : Rank of arrays, "0" for scalar. -# allowed values: 0-7 -# default values : 0-5 -# {DIMSTR} : Generates the parenthesis and colons used for a variable -# declaration of {DIMS} dimensions. -# if {DIMS}=3 then {DIMSTR} is (:,:,:) -# {REPEAT} : Repeats an expression for each number from 1 to {DIMS}, with each -# iteration separated by commas. -# {REPEAT: foo(#, bar)} -# expands to this: -# foo(1, bar), foo(2, bar), foo(3, bar), ... - -# defaults -my @types = qw(text real double int); -my $vtype = {'text' => 'character(len=*)', - 'real' => 'real(r4)', - 'double' => 'real(r8)', - 'int' => 'integer(i4)', - 'long' => 'integer(i8)', - 'logical' => 'logical' }; -my $itype = {'text' => 100, - 'real' => 101, - 'double' => 102, - 'int' => 103, - 'long' => 104, - 'logical' => 105}; -my $itypename = {'text' => 'TYPETEXT', - 'real' => 'TYPEREAL', - 'double' => 'TYPEDOUBLE', - 'int' => 'TYPEINT', - 'long' => 'TYPELONG', - 'logical' => 'TYPELOGICAL'}; -my $mpitype = {'text' => 'MPI_CHARACTER', - 'real' => 'MPI_REAL4', - 'double' => 'MPI_REAL8', - 'int' => 'MPI_INTEGER'}; -# Netcdf C datatypes -my $nctype = {'text' => 'text', - 'real' => 'float', - 'double' => 'double', - 'int' => 'int'}; -# C interoperability types -my $ctype = {'text' => 'character(C_CHAR)', - 'real' => 'real(C_FLOAT)', - 'double' => 'real(C_DOUBLE)', - 'int' => 'integer(C_INT)'}; - - - -my @dims =(0..5); - -my $write_dtypes = "no"; -# begin - -foreach(@ARGV){ - my $infile = $_; - usage() unless($infile =~ /(.*.F90).in/); - $outfile = $1; - open(F,"$infile") || die "$0 Could not open $infile to read"; - my @parsetext; - my $cnt=0; - foreach(){ - $cnt++; - if(/^\s*contains/i){ - push(@parsetext,"# $cnt \"$infile\"\n"); - } - if(/^\s*interface/i){ - push(@parsetext,"# $cnt \"$infile\"\n"); - } - if(/^[^!]*subroutine/i){ - push(@parsetext,"# $cnt \"$infile\"\n"); - } - if(/^[^!]*function/i){ - push(@parsetext,"# $cnt \"$infile\"\n"); - } - - push(@parsetext,$_); - } - - close(F); - - my $end; - my $contains=0; - my $in_type_block=0; - my @unit; - my $unitcnt=0; - my $date = localtime(); - my $preamble = -"!=================================================== -! DO NOT EDIT THIS FILE, it was generated using $0 -! Any changes you make to this file may be lost -!===================================================\n"; - my @output ; - push(@output,$preamble); - - my $line; - my $dimmodifier; - my $typemodifier; - my $itypeflag; - my $block; - my $block_type; - my $cppunit; - foreach $line (@parsetext){ -# skip parser comments - next if($line =~ /\s*!pl/); - - $itypeflag=1 if($line =~ /{ITYPE}/); - $itypeflag=1 if($line =~ /TYPETEXT/); - $itypeflag=1 if($line =~ /TYPEREAL/); - $itypeflag=1 if($line =~ /TYPEDOUBLE/); - $itypeflag=1 if($line =~ /TYPEINT/); - $itypeflag=1 if($line =~ /TYPELONG/); - - - if($contains==0){ - if($line=~/\s*!\s*DIMS\s+[\d,]+!*/){ - $dimmodifier=$line; - next; - } - if($line=~/\s*!\s*TYPE\s+[^!]+!*$/){ - $typemodifier=$line; - next; - } - if ((defined $typemodifier or defined $dimmodifier) - and not defined $block and $line=~/^\s*#[^{]*$/) { - push(@output, $line); - next; - } - # Figure out the bounds of a type statement. - # Type blocks start with "type," "type foo" or "type::" but not - # "type(". - $in_type_block=1 if($line=~/^\s*type\s*[,:[:alpha:]]/i); - $in_type_block=0 if($line=~/^\s*end\s*type/i); - if(not defined $block) { - if ($line=~/^\s*type[^[:alnum:]_].*(\{TYPE\}|\{DIMS\})/i or - $line=~/^[^!]*(function|subroutine).*(\{TYPE\}|\{DIMS\})/i) { - $block=$line; - next; - } - if ($line=~/^\s*interface.*(\{TYPE\}|\{DIMS\})/i) { - $block_type="interface"; - $block=$line; - next; - } - } - if(not defined $block_type and - ($line=~/^\s*end\s+type\s+.*(\{TYPE\}|\{DIMS\})/i or - $line=~/^\s*end\s+(function|subroutine)\s+.*(\{TYPE\}|\{DIMS\})/i)){ - - $line = $block.$line; - undef $block; - } - if ($line=~/^\s*end\s*interface/i and - defined $block) { - $line = $block.$line; - undef $block; - undef $block_type; - } - if(defined $block){ - $block = $block.$line; - next; - } - if(defined $dimmodifier){ - $line = $dimmodifier.$line; - undef $dimmodifier; - } - if(defined $typemodifier){ - $line = $typemodifier.$line; - undef $typemodifier; - } - - push(@output, buildout($line)); - if(($line =~ /^\s*contains\s*!*/i && ! $in_type_block) or - ($line =~ /^\s*!\s*Not a module/i)){ - $contains=1; - next; - } - } - if($line=~/^\s*end module\s*/){ - $end = $line; - last; - } - - if($contains==1){ - # first parse into functions or subroutines - if($cppunit || !(defined($unit[$unitcnt]))){ - # Make cpp lines and blanks between routines units. - if($line =~ /^\s*\#(?!\s[[:digit:]]+)/ || $line =~/^\s*$/ || $line=~/^\s*!(?!\s*(TYPE|DIMS))/){ - push(@{$unit[$unitcnt]},$line); - $cppunit=1; - next; - } else { - $cppunit=0; - $unitcnt++; - } - } - - - push(@{$unit[$unitcnt]},$line); - if ($line=~/^\s*interface/i) { - $block_type="interface"; - $block=$line; - } - if ($line=~/^\s*end\s*interface/i) { - undef $block_type; - undef $block; - } - unless(defined $block){ - if($line =~ /\s*end function/i or $line =~ /\s*end subroutine/i){ - $unitcnt++; - } - } - } - } - my $i; - - - for($i=0;$i<$unitcnt;$i++){ - if(defined($unit[$i])){ - my $func = join('',@{$unit[$i]}); - push(@output, buildout($func)); - } - } - push(@output,@{$unit[$#unit]}) if($unitcnt==$#unit); - push(@output, $end); - if($itypeflag==1){ - my $str; - $str.="#include \"dtypes.h\"\n"; - $write_dtypes = "yes"; - print $str; - } - print @output; - writedtypes() if(!(-e "dtypes.h") && $write_dtypes == "yes"); - - -} - - -sub usage{ - die("$0 Expected input filename of the form .*.F90.in"); -} - -sub build_repeatstr{ - my($dims) = @_; - # Create regex to repeat expression DIMS times. - my $repeatstr; - for(my $i=1;$i<=$dims;$i++){ - $repeatstr .="\$\{1\}$i\$\{2\},&\n"; - } - if(defined $repeatstr){ - $repeatstr="\"$repeatstr"; - chop $repeatstr; - chop $repeatstr; - chop $repeatstr; - $repeatstr.="\""; - }else{ - $repeatstr=''; - } -} - -sub writedtypes{ - open(F,">dtypes.h"); - print F -"#define TYPETEXT 100 -#define TYPEREAL 101 -#define TYPEDOUBLE 102 -#define TYPEINT 103 -#define TYPELONG 104 -#define TYPELOGICAL 105 -"; - close(F); -} - -sub buildout{ - my ($func) = @_; - - my $outstr; - my(@ldims, @ltypes); - - if($func=~/\s*!\s*DIMS\s+([\d,]+)\s*/){ - @ldims = split(/,/,$1); - }else{ - @ldims = @dims; - } - if($func=~/\s*!\s*TYPE\s+([^!\s]+)\s*/){ - @ltypes = split(/,/,$1); -# print ">$func<>@ltypes<\n"; - }else{ - @ltypes = @types; - } - - - if(($func =~ /{TYPE}/ && $func =~ /{DIMS}/) ){ - my ($type, $dims); - foreach $type (@ltypes){ - foreach $dims (@ldims){ - my $dimstr; - for(my $i=1;$i<=$dims;$i++){ - $dimstr .=':,'; - } - if(defined $dimstr){ - $dimstr="($dimstr"; - chop $dimstr; - $dimstr.=')'; - }else{ - $dimstr=''; - } - - my $repeatstr = build_repeatstr($dims); - - my $str = $func; - $str =~ s/{TYPE}/$type/g; - $str =~ s/{VTYPE}/$vtype->{$type}/g; - $str =~ s/{ITYPE}/$itype->{$type}/g; - $str =~ s/{MPITYPE}/$mpitype->{$type}/g; - $str =~ s/{NCTYPE}/$nctype->{$type}/g; - $str =~ s/{CTYPE}/$ctype->{$type}/g; - $str =~ s/{DIMS}/$dims/g; - $str =~ s/{DIMSTR}/$dimstr/g; - $str =~ s/{REPEAT:([^#}]*)#([^#}]*)}/$repeatstr/eeg; - $outstr .= $str; - } - } - }elsif($func =~ /{DIMS}/){ - my $dims; - foreach $dims (@ldims){ - my $dimstr; - for(my $i=1;$i<=$dims;$i++){ - $dimstr .=':,'; - } - if(defined $dimstr){ - $dimstr="($dimstr"; - chop $dimstr; - $dimstr.=')'; - }else{ - $dimstr=''; - } - - my $repeatstr = build_repeatstr($dims); - - my $str = $func; - $str =~ s/{DIMS}/$dims/g; - $str =~ s/{DIMSTR}/$dimstr/g; - $str =~ s/{REPEAT:([^#}]*)#([^#}]*)}/$repeatstr/eeg; - $outstr .= $str; - } - }elsif($func =~ /{TYPE}/){ - my ($type); - foreach $type (@ltypes){ - my $str = $func; - $str =~ s/{TYPE}/$type/g; - $str =~ s/{VTYPE}/$vtype->{$type}/g; - $str =~ s/{ITYPE}/$itype->{$type}/g; - $str =~ s/{MPITYPE}/$mpitype->{$type}/g; - $str =~ s/{NCTYPE}/$nctype->{$type}/g; - $str =~ s/{CTYPE}/$ctype->{$type}/g; - $outstr.=$str; - } - }else{ - $outstr=$func; - } - - return $outstr; -} diff --git a/src/externals/mct/.gitignore b/src/externals/mct/.gitignore deleted file mode 100644 index 6e04052969b..00000000000 --- a/src/externals/mct/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -Makefile.conf -config.log -config.status -config.h -autom4te.cache -*.o -*.mod -lib*.a -data diff --git a/src/externals/mct/COPYRIGHT b/src/externals/mct/COPYRIGHT deleted file mode 100644 index f4aa22117eb..00000000000 --- a/src/externals/mct/COPYRIGHT +++ /dev/null @@ -1,51 +0,0 @@ - Modeling Coupling Toolkit (MCT) Software - -Copyright © 2011, UChicago Argonne, LLC as Operator of Argonne National Laboratory. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - -1. Redistributions of source code must retain the above copyright notice, this list of conditions - and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions - and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. The end-user documentation included with the redistribution, if any, must include the following - acknowledgment: - - "This product includes software developed by the UChicago Argonne, LLC, as Operator of Argonne - National Laboratory." - - Alternately, this acknowledgment may appear in the software itself, if and wherever such third-party - acknowledgments normally appear. - -This software was authored by: - -Argonne National Laboratory Climate Modeling Group -Robert Jacob, tel: (630) 252-2983, E-mail: jacob@mcs.anl.gov -Jay Larson, E-mail: larson@mcs.anl.gov -Everest Ong -Ray Loy -Mathematics and Computer Science Division -Argonne National Laboratory, Argonne IL 60439 - - -4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS" WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, - THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND THEIR EMPLOYEES: (1) DISCLAIM ANY - WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY OR - RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT - USE OF THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4) DO NOT WARRANT THAT THE SOFTWARE WILL - FUNCTION UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL BE CORRECTED. - -5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT HOLDER, THE UNITED STATES, THE UNITED STATES - DEPARTMENT OF ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT, INCIDENTAL, CONSEQUENTIAL, SPECIAL - OR PUNITIVE DAMAGES OF ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF PROFITS OR LOSS OF - DATA, FOR ANY REASON WHATSOEVER, WHETHER SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT - (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE, EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED - OF THE POSSIBILITY OF SUCH LOSS OR DAMAGES. - - diff --git a/src/externals/mct/Makefile b/src/externals/mct/Makefile deleted file mode 100644 index 6b5bfe7a444..00000000000 --- a/src/externals/mct/Makefile +++ /dev/null @@ -1,33 +0,0 @@ - -SHELL = /bin/sh - -include Makefile.conf - -SUBDIRS = $(MPISERPATH) $(MPEUPATH) $(MCTPATH) - -# TARGETS -subdirs: - @set -e; for dir in $(SUBDIRS); do \ - cd $$dir; \ - $(MAKE); \ - cd $(abs_top_builddir); \ - done - -clean: - @set -e; for dir in $(SUBDIRS); do \ - cd $$dir; \ - $(MAKE) clean; \ - cd $(abs_top_builddir); \ - done - -install: subdirs - @set -e; for dir in $(SUBDIRS); do \ - cd $$dir; \ - $(MAKE) install; \ - cd $(abs_top_builddir); \ - done - -examples: subdirs - @cd $(EXAMPLEPATH) && $(MAKE) - - diff --git a/src/externals/mct/Makefile.conf.in b/src/externals/mct/Makefile.conf.in deleted file mode 100644 index bc2896d90ef..00000000000 --- a/src/externals/mct/Makefile.conf.in +++ /dev/null @@ -1,89 +0,0 @@ -# Source location -SRCDIR = @abs_srcdir@ -FDEPENDS=$(SRCDIR)/fdepends.awk - -# COMPILER, LIBRARY, AND MACHINE MAKE VARIABLES - -# FORTRAN COMPILER VARIABLES # - -# FORTRAN COMPILER COMMAND -FC = @MPIFC@ - -# FORTRAN AND FORTRAN90 COMPILER FLAGS -FCFLAGS = @OPT@ @DEBUG@ @FCFLAGS@ @BIT64@ - -FC_DEFINE = @FC_DEFINE@ - -# FORTRAN COMPILE FLAG FOR AUTOPROMOTION -# OF NATIVE REAL TO 8 BIT REAL -REAL8 = @REAL8@ - -# FORTRAN COMPILE FLAGS FOR EXAMPLE PROGRAMS -PROGFCFLAGS = @PROGFCFLAGS@ - -# FORTRAN COMPILE FLAG FOR CHANGING BYTE ORDERING -ENDIAN = @ENDIAN@ - -# INCLUDE FLAG FOR LOCATING MODULES (-I, -M, or -p) -INCFLAG = @INCLUDEFLAG@ - -# INCLUDE PATHS (PREPEND INCLUDE FLAGS -I, -M or -p) -INCPATH = @INCLUDEPATH@ @MPIHEADER@ - -# MPI LIBRARIES (USUALLY -lmpi) -MPILIBS = @MPILIBS@ - -# PREPROCESSOR VARIABLES # - -# COMPILER AND OS DEFINE FLAGS -CPPDEFS = @CPPDEFS@ - -FPPDEFS=$(patsubst -D%,$(FC_DEFINE)%,$(CPPDEFS)) - -# C COMPILER VARIABLES # - -# C COMPILER -CC = @CC@ - -# C COMPILER FLAGS - APPEND CFLAGS -CFLAGS = @CFLAGS@ -CPPFLAGS = @CPPFLAGS@ - -# LIBRARY SPECIFIC VARIABLES # - -# USED BY MCT BABEL BINDINGS -COMPILER_ROOT = @COMPILER_ROOT@ -BABELROOT = @BABELROOT@ -PYTHON = @PYTHON@ -PYTHONOPTS = @PYTHONOPTS@ - -# USED BY MPI-SERIAL LIBRARY - -# SIZE OF FORTRAN REAL AND DOUBLE -FORT_SIZE = @FORT_SIZE@ - - -# INSTALLATION VARIABLES # - -# INSTALL COMMANDS -INSTALL = @abs_top_builddir@/install-sh -c -MKINSTALLDIRS = @abs_top_builddir@/mkinstalldirs - -# INSTALLATION DIRECTORIES -abs_top_builddir= @abs_top_builddir@ -MCTPATH = @abs_top_builddir@/mct -MPEUPATH = @abs_top_builddir@/mpeu -EXAMPLEPATH = @abs_top_builddir@/examples -MPISERPATH = @MPISERPATH@ -libdir = @prefix@/lib -includedir = @prefix@/include - -# OTHER COMMANDS # -RANLIB = @RANLIB@ -AR = @AR@ -RM = rm -f - - - - - diff --git a/src/externals/mct/README b/src/externals/mct/README deleted file mode 100644 index e6718bea0d8..00000000000 --- a/src/externals/mct/README +++ /dev/null @@ -1,199 +0,0 @@ -###################################################################### - - -- Argonne National Laboratory - - Model Coupling Toolkit (MCT) - - Robert Jacob - Jay Larson - Everest Ong - Ray Loy - - For more information, see http://www.mcs.anl.gov/mct - - See MCT/COPYRIGHT for license. - -###################################################################### - - This is version 2.10 of the Model Coupling Toolkit (MCT). - - Our purpose in creating this toolkit is to support the construction - of highly portable and extensible high-performance couplers - for distributed memory parallel coupled models. - -###################################################################### - - - Current Contents of the directory MCT: - - README -- this file - - COPYRIGHT - copyright statement and license. - - mct/ -- Source code for the Model Coupling Toolkit. - - mpeu/ -- Source code for the message-passing environment utilities - library (MPEU), which provides support for MCT - - mpi-serial/ -- Source code for optional mpi replacement library. - - examples/-- Source code for examples which demonstrate the use of MCT. - - doc/ -- documentation for MCT - - protex/ -- tool for constructing documentation from source code - - data/ -- input data for running example programs. Not needed to - compile the library. - - m4/ -- files for autoconf (not needed to build). - -Optional Contents available - - babel/ -- multi language interface for MCT using BABEL. - See babel/README for more information. - NO LONGER SUPPORTED - -###################################################################### - REQUIREMENTS: - - Building MCT requires a Fortran90 compiler. - - A full MPI library is now optional. To compile without MPI, add - --enable-mpiserial to the configure command below. Note that - not all the examples will work without MPI. See mpi-serial/README - for more information. - - - The MCT library builds and the examples run on the following - platforms/compilers: - - Linux: Portland Group, Intel, gfortran, Absoft, Pathscale, Lahey, NAG - MacOSX: gfortran - IBM (AIX) xlf - IBM BlueGene (see PLATFORM NOTE below) - SGI Altix - Cray XT/XK - Compaq Compaq Fortran Compiler (X5.5-2801-48CAG or later) - SUN (Solaris) f90 WorkShop - NEC - Fujitsu - - Running some of the examples requires a full MPI installation with mpirun - Memory requirements are modest. - -###################################################################### - BUILD INSTRUCTIONS: - - In the top level directory (the location of this README): - > ./configure - > make - - "make examples" will build the example programs. - - BUILD HELP: - Try "./configure -help" for a list of options. - - The correct Fortran90 compiler must be in your current path. - A frequent problem on Linux is when more than one F90 compiler - is in your path and configure finds one and later finds mpif90 - for another. - - Example: If configure has trouble finding the correct F90 compiler: - > ./configure FC=pgf90. - - You can also direct configure through environment variables: - > setenv FC xlf90 - > ./configure - - If the build fails, please do the following: - > ./configure >& config.out - > make >& make.out - and send us config.out, make.out and config.log (which is produced by the - configure command) - - PLATFORM NOTES: - On a BlueGene, use: - > ./configure FC=bgxlf90_r CC=mpixlc_r MPIFC=mpixlf90_r (can also use versions without _r) - -###################################################################### - INSTALLATION INSTRUCTIONS: - - "make install" will copy the .mod files to the /usr/include directory - and the *lib.a files to /usr/lib. To override these choices, use - "-prefix" when running configure: - > ./configure --prefix=/home/$USER - With the above option, "make install" will place .mod's in /home/$USER/include - and *lib.a's in /home/$USER/lib - -###################################################################### - BUILDING AND RUNNING THE EXAMPLES - - The programs in MCT/examples/simple require no input. - - The programs in MCT/examples/climate_concur1 and MCT/examples/climate_sequen1 - require some input data in a directory called MCT/data. The dataset is available with MCT - or separately from the website. - - To build them, type "make examples" in the top level directory or - cd to examples and type "make". - -###################################################################### - - Both MCT and MPEU source code are self-documenting. All modules - and routines contain prologues that can be extracted and processed - into LaTeX source code by the public-domain tool ProTeX. ProTeX is - included in the MCT source and available from: - http://gmao.gsfc.nasa.gov/software/protex/ - - You can build the documentation with protex and latex by following - the directions in the doc directory. - -###################################################################### - - REVISION HISTORY: - - 18 Oct, 2000 -- Initial prototype - 09 Feb, 2001 -- working MxN transfer - 27 Apr, 2001 -- Sparse Matrix Multiply - 13 Jun, 2001 -- General Grid - 23 Aug, 2001 -- Linux PGF90 port - 14 Dec, 2001 -- PCM support - 29 Mar, 2002 -- Rearranger - 14 Nov, 2002 -- version 1.0.0 -- first public release - 11 Feb, 2003 -- version 1.0.4 - 12 Mar, 2003 -- version 1.0.5 - 02 Apr, 2003 -- version 1.0.7 - 03 Jul, 2003 -- version 1.0.9 - 26 Aug, 2003 -- version 1.0.12 - 12 Sep, 2003 -- version 1.0.14 - 21 Jan, 2004 -- version 1.4.0 - 05 Feb, 2004 -- version 1.6.0 - 23 Apr, 2004 -- version 2.0.0 - 18 May, 2004 -- version 2.0.1 - 11 Jul, 2004 -- version 2.0.2 - 19 Oct, 2004 -- version 2.0.3 (not released) - 21 Jan, 2005 -- version 2.1.0 - 01 Dec, 2005 -- version 2.2.0 - 22 Apr, 2006 -- version 2.2.1 (not released) - 08 Sep, 2006 -- version 2.2.2 - 16 Oct, 2006 -- version 2.2.3 - 10 Jan, 2007 -- version 2.3.0 - 17 Aug, 2007 -- version 2.4.0 - 21 Nov, 2007 -- version 2.4.1 - 20 Dec, 2007 -- version 2.4.2 (not released) - 21 Jan, 2008 -- version 2.4.3 (not released) - 28 Jan, 2008 -- version 2.5.0 - 20 May, 2008 -- version 2.5.1 - 05 Mar, 2009 -- version 2.6.0 - 05 Jan, 2010 -- version 2.7.0 (released only in CCSM4) - 28 Feb, 2010 -- version 2.7.1 (released only in CESM1) - 30 Nov, 2010 -- version 2.7.2 (released only in CESM1.0.3) - 25 Jan, 2011 -- version 2.7.3 (not released) - 07 Mar, 2012 -- version 2.7.4 (not released) - 30 Apr, 2012 -- version 2.8.0 - 05 Jul, 2012 -- version 2.8.1 (not released) - 12 Sep, 2012 -- version 2.8.2 (not released) - 16 Dec, 2012 -- version 2.8.3 - 19 Jun, 2015 -- version 2.9.0 - 12 Mar, 2018 -- version 2.10.0 diff --git a/src/externals/mct/aclocal.m4 b/src/externals/mct/aclocal.m4 deleted file mode 100644 index ae3d396d8c8..00000000000 --- a/src/externals/mct/aclocal.m4 +++ /dev/null @@ -1,16 +0,0 @@ -# generated automatically by aclocal 1.10 -*- Autoconf -*- - -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, -# 2005, 2006 Free Software Foundation, Inc. -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -m4_include([m4/acx_mpi.m4]) -m4_include([m4/ax_fc_version.m4]) -m4_include([m4/fortran.m4]) diff --git a/src/externals/mct/benchmarks/.gitignore b/src/externals/mct/benchmarks/.gitignore deleted file mode 100644 index 1c6273f3704..00000000000 --- a/src/externals/mct/benchmarks/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -importBench -RouterTestDis -RouterTestOvr -fort.* diff --git a/src/externals/mct/benchmarks/Makefile b/src/externals/mct/benchmarks/Makefile deleted file mode 100644 index 75d393ff55a..00000000000 --- a/src/externals/mct/benchmarks/Makefile +++ /dev/null @@ -1,58 +0,0 @@ - -SHELL = /bin/sh - -# SOURCE FILES - -SRCS_F90 = importBench.F90 RouterTestDis.F90 RouterTestOvr.F90 - -OBJS_ALL = $(SRCS_F90:.F90=.o) - -# MACHINE AND COMPILER FLAGS - -include ../Makefile.conf - -# ADDITIONAL FLAGS SPECIFIC FOR UTMCT COMPILATION - -MCTLIBS = -L$(MPEUPATH) -L$(MCTPATH) -lmct -lmpeu -UTLDFLAGS = $(REAL8) -UTCMPFLAGS = $(REAL8) $(INCFLAG)$(MPEUPATH) $(INCFLAG)$(MCTPATH) - -# TARGETS - -all: importBench RouterTestDis RouterTestOvr - -importBench: importBench.o - $(FC) -o $@ importBench.o $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -RouterTestDis: RouterTestDis.o - $(FC) -o $@ RouterTestDis.o $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -RouterTestOvr: RouterTestOvr.o - $(FC) -o $@ RouterTestOvr.o $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -# RULES - -.SUFFIXES: -.SUFFIXES: .F90 .o - -.F90.o: - $(FC) -c $(INCPATH) $(FPPDEFS) $(FCFLAGS) $(MCTFLAGS) $(UTCMPFLAGS) $< - - -clean: - ${RM} *.o *.mod importBench RouterTestDis RouterTestOvr - -# DEPENDENCIES: - -$(OBJS_ALL): $(MCTPATH)/libmct.a - - - - - - - - - - - diff --git a/src/externals/mct/benchmarks/RouterTestDis.F90 b/src/externals/mct/benchmarks/RouterTestDis.F90 deleted file mode 100644 index 635acca2a64..00000000000 --- a/src/externals/mct/benchmarks/RouterTestDis.F90 +++ /dev/null @@ -1,200 +0,0 @@ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -!BOP ------------------------------------------------------------------- -! -! !PROGRAM: RouterTestDis - Test building a router. -! -! -! !DESCRIPTION: Test building a router from output GSMaps on -! 2 disjoint sets of processors. -! -program RouterTestDis - -! -! !USES: -! - - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: GSMap_init => init - use m_GlobalSegMap,only: GSMap_lsize => lsize - - use m_Router,only: Router - use m_Router,only: Router_init => init - - use m_MCTWorld,only: MCTWorld_init => init - use m_ioutil, only : luavail - use m_stdio, only : stdout,stderr - use m_die, only : die - use m_mpif90 - use m_zeit - - implicit none - - include "mpif.h" - -! -!EOP ------------------------------------------------------------------- - -! local variables - - character(len=*), parameter :: myname_='RouterTestDis' - - integer,dimension(:),pointer :: comps ! array with component ids - - - - type(GlobalSegMap) :: comp1GSMap - type(GlobalSegMap) :: comp2GSMap - type(Router) :: myRout - -! other variables - integer :: comm1, comm2, rank, nprocs,compid, myID, ier,color - integer :: mdev1, mdev2, nprocs1,nprocs2,ngseg,gsize - character*24 :: filename1, filename2 - integer :: lrank,newcomm,n,junk - integer, dimension(:), allocatable :: root_start, root_length, root_pe_loc - -!----------------------------------------------------------------------- -! The Main program. -! -! This main program initializes MCT - -! Initialize MPI - call MPI_INIT(ier) - -! Get basic MPI information - call MPI_COMM_SIZE(MPI_COMM_WORLD,nprocs,ier) - call MPI_COMM_RANK(MPI_COMM_WORLD,rank,ier) - - filename1="T42.8pR" - filename2="T42.8pC" - -! open up the two files with the GSMap information. - - if(rank == 0) then - mdev1 = luavail() - open(mdev1,file=trim(filename1),status='old') - - mdev2 = luavail() - open(mdev2,file=trim(filename2),status='old') - - - read(mdev1,*) nprocs1 - read(mdev2,*) nprocs2 - - -! This is the disjoint test so need to have enough processors. - if(nprocs1+nprocs2 .ne. nprocs) then - write(0,*)"Wrong processor count for exactly 2 disjoint communicators." - write(0,*)"Need",nprocs1+nprocs2,"got",nprocs - call die("main","nprocs check") - endif - close(mdev1) - close(mdev2) - endif - - call MPI_BCAST(nprocs1,1,MP_INTEGER,0,MPI_COMM_WORLD,ier) - call MPI_BCAST(nprocs2,1,MP_INTEGER,0,MPI_COMM_WORLD,ier) - -! Split world into 2 pieces for each component - color=0 - if(rank < nprocs1) color=1 - - call MPI_COMM_SPLIT(MPI_COMM_WORLD,color,rank,newcomm,ier) - -! ******************************* -! Component 1 -! ******************************* - if(color == 0) then - call MPI_COMM_RANK(newcomm,lrank,ier) - -! build an MCTWorld with 2 components - call MCTWorld_init(2,MPI_COMM_WORLD,newcomm,1) - -! on non-root proccessors, allocate with length 1 - if(lrank .ne. 0) then - - allocate(root_start(1), root_length(1), & - root_pe_loc(1), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - endif - - if(lrank == 0) then - mdev1 = luavail() - open(mdev1,file=trim(filename1),status='old') - read(mdev1,*) junk - read(mdev1,*) junk - read(mdev1,*) ngseg - read(mdev1,*) gsize - allocate(root_start(ngseg), root_length(ngseg), & - root_pe_loc(ngseg), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - do n=1,ngseg - read(mdev1,*) root_start(n),root_length(n), & - root_pe_loc(n) - enddo - endif - -! initalize the GSMap from root - call GSMap_init(comp1GSMap, ngseg, root_start, root_length, & - root_pe_loc, 0, newcomm, 1) - - -! initalize the Router with component 2 - call Router_init(2,comp1GSMap,newcomm,myRout,"Dis1") - call zeit_allflush(newcomm,0,6) - -! ******************************* -! Component 2 -! ******************************* - else - call MPI_COMM_RANK(newcomm,lrank,ier) - -! build an MCTWorld with 2 components - call MCTWorld_init(2,MPI_COMM_WORLD,newcomm,2) -! on non-root proccessors, allocate with length 1 - if(lrank .ne. 0) then - - allocate(root_start(1), root_length(1), & - root_pe_loc(1), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - endif - - if(lrank == 0) then - mdev2 = luavail() - open(mdev2,file=trim(filename2),status='old') - read(mdev2,*) junk - read(mdev2,*) junk - read(mdev2,*) ngseg - read(mdev2,*) gsize - allocate(root_start(ngseg), root_length(ngseg), & - root_pe_loc(ngseg), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - do n=1,ngseg - read(mdev2,*) root_start(n),root_length(n), & - root_pe_loc(n) - enddo - endif - -! initalize the GSMap from root - call GSMap_init(comp2GSMap, ngseg, root_start, root_length, & - root_pe_loc, 0, newcomm, 2) - -! initalize the Router with component 1 - call Router_init(1,comp2GSMap,newcomm,myRout,"Dis2") - call zeit_allflush(newcomm,0,6) - endif - - call MPI_Finalize(ier) - -end program RouterTestDis diff --git a/src/externals/mct/benchmarks/RouterTestOvr.F90 b/src/externals/mct/benchmarks/RouterTestOvr.F90 deleted file mode 100644 index b9895b0dd9f..00000000000 --- a/src/externals/mct/benchmarks/RouterTestOvr.F90 +++ /dev/null @@ -1,195 +0,0 @@ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -!BOP ------------------------------------------------------------------- -! -! !PROGRAM: RouterTestOvr - Test building a router. -! -! -! !DESCRIPTION: Test building a router from output GSMaps on -! overlapping processors -! -program RouterTestOvr - -! -! !USES: -! - - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: GSMap_init => init - use m_GlobalSegMap,only: GSMap_lsize => lsize - - use m_Router,only: Router - use m_Router,only: Router_init => init - - use m_MCTWorld,only: MCTWorld_init => init - use m_ioutil, only : luavail - use m_stdio, only : stdout,stderr - use m_die, only : die - use m_mpif90 - - implicit none - - include "mpif.h" - -! -!EOP ------------------------------------------------------------------- - -! local variables - - character(len=*), parameter :: myname_='RouterTestOvr' - - integer :: ncomps = 2 ! Must know total number of - ! components in coupled system - - integer,dimension(:),pointer :: comps ! array with component ids - - type(GlobalSegMap) :: comp1GSMap - type(GlobalSegMap) :: comp2GSMap - type(Router) :: myRout - -! other variables - integer :: comm1, comm2, rank, nprocs,compid, myID, ier,color - integer :: mdev1, mdev2, nprocs1,nprocs2,ngseg,gsize - character*24 :: filename1, filename2 - integer :: lrank,newcomm,n,junk - integer, dimension(:), allocatable :: root_start, root_length, root_pe_loc - -!----------------------------------------------------------------------- -! The Main program. -! -! This main program initializes MCT - -! Initialize MPI - call MPI_INIT(ier) - -! Get basic MPI information - call MPI_COMM_SIZE(MPI_COMM_WORLD,nprocs,ier) - call MPI_COMM_RANK(MPI_COMM_WORLD,rank,ier) - - filename1="gx1.8pR" - filename2="gx1.8pC" - -! open up the two files with the GSMap information. -! and read the total number of processors needed - - if(rank == 0) then - mdev1 = luavail() - open(mdev1,file=trim(filename1),status='old') - - mdev2 = luavail() - open(mdev2,file=trim(filename2),status='old') - - - read(mdev1,*) nprocs1 - read(mdev2,*) nprocs2 - - -! Need to have enough processors. - if(nprocs .lt. max(nprocs1,nprocs2)) then - write(0,*)"Wrong processor count for 2 overlapping communicators." - write(0,*)"Need",max(nprocs1,nprocs2),"got",nprocs - call die("main","nprocs check") - endif - close(mdev1) - close(mdev2) - endif - - call MPI_BCAST(nprocs1,1,MP_INTEGER,0,MPI_COMM_WORLD,ier) - call MPI_BCAST(nprocs2,1,MP_INTEGER,0,MPI_COMM_WORLD,ier) - - call mpi_comm_dup(MPI_COMM_WORLD,comm1,ier) - call mpi_comm_dup(MPI_COMM_WORLD,comm2,ier) - -! Initialize MCT - allocate(comps(ncomps),stat=ier) - comps(1)=1 - comps(2)=2 - call MCTWorld_init(ncomps,MPI_COMM_WORLD,comm1,myids=comps) - - - -! ******************************* -! Component 1 -! ******************************* - call MPI_COMM_RANK(comm1,lrank,ier) - -! on non-root proccessors, allocate with length 1 - if(lrank .ne. 0) then - - allocate(root_start(1), root_length(1), & - root_pe_loc(1), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - endif - - if(lrank == 0) then - mdev1 = luavail() - open(mdev1,file=trim(filename1),status='old') - read(mdev1,*) junk - read(mdev1,*) junk - read(mdev1,*) ngseg - read(mdev1,*) gsize - allocate(root_start(ngseg), root_length(ngseg), & - root_pe_loc(ngseg), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - do n=1,ngseg - read(mdev1,*) root_start(n),root_length(n), & - root_pe_loc(n) - enddo - endif - -! initalize the GSMap from root - call GSMap_init(comp1GSMap, ngseg, root_start, root_length, & - root_pe_loc, 0, comm1, 1) - - deallocate(root_start,root_length,root_pe_loc) - -! ******************************* -! Component 2 -! ******************************* - call MPI_COMM_RANK(comm2,lrank,ier) - -! on non-root proccessors, allocate with length 1 - if(lrank .ne. 0) then - - allocate(root_start(1), root_length(1), & - root_pe_loc(1), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - endif - - if(lrank == 0) then - mdev2 = luavail() - open(mdev2,file=trim(filename2),status='old') - read(mdev2,*) junk - read(mdev2,*) junk - read(mdev2,*) ngseg - read(mdev2,*) gsize - allocate(root_start(ngseg), root_length(ngseg), & - root_pe_loc(ngseg), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - do n=1,ngseg - read(mdev2,*) root_start(n),root_length(n), & - root_pe_loc(n) - enddo - endif - -! initalize the GSMap from root - call GSMap_init(comp2GSMap, ngseg, root_start, root_length, & - root_pe_loc, 0, comm2, 2) - -! now initialize the Router - call Router_init(comp1GSMap,comp2GSMap,comm1,myRout,"Over") - - - call MPI_Finalize(ier) - -end program RouterTestOvr diff --git a/src/externals/mct/benchmarks/T42.8pC b/src/externals/mct/benchmarks/T42.8pC deleted file mode 100644 index f80c0b8b0b7..00000000000 --- a/src/externals/mct/benchmarks/T42.8pC +++ /dev/null @@ -1,516 +0,0 @@ - 8 - 1 - 512 - 8192 - 1 16 0 - 129 16 0 - 257 16 0 - 385 16 0 - 513 16 0 - 641 16 0 - 769 16 0 - 897 16 0 - 1025 16 0 - 1153 16 0 - 1281 16 0 - 1409 16 0 - 1537 16 0 - 1665 16 0 - 1793 16 0 - 1921 16 0 - 2049 16 0 - 2177 16 0 - 2305 16 0 - 2433 16 0 - 2561 16 0 - 2689 16 0 - 2817 16 0 - 2945 16 0 - 3073 16 0 - 3201 16 0 - 3329 16 0 - 3457 16 0 - 3585 16 0 - 3713 16 0 - 3841 16 0 - 3969 16 0 - 4097 16 0 - 4225 16 0 - 4353 16 0 - 4481 16 0 - 4609 16 0 - 4737 16 0 - 4865 16 0 - 4993 16 0 - 5121 16 0 - 5249 16 0 - 5377 16 0 - 5505 16 0 - 5633 16 0 - 5761 16 0 - 5889 16 0 - 6017 16 0 - 6145 16 0 - 6273 16 0 - 6401 16 0 - 6529 16 0 - 6657 16 0 - 6785 16 0 - 6913 16 0 - 7041 16 0 - 7169 16 0 - 7297 16 0 - 7425 16 0 - 7553 16 0 - 7681 16 0 - 7809 16 0 - 7937 16 0 - 8065 16 0 - 17 16 1 - 145 16 1 - 273 16 1 - 401 16 1 - 529 16 1 - 657 16 1 - 785 16 1 - 913 16 1 - 1041 16 1 - 1169 16 1 - 1297 16 1 - 1425 16 1 - 1553 16 1 - 1681 16 1 - 1809 16 1 - 1937 16 1 - 2065 16 1 - 2193 16 1 - 2321 16 1 - 2449 16 1 - 2577 16 1 - 2705 16 1 - 2833 16 1 - 2961 16 1 - 3089 16 1 - 3217 16 1 - 3345 16 1 - 3473 16 1 - 3601 16 1 - 3729 16 1 - 3857 16 1 - 3985 16 1 - 4113 16 1 - 4241 16 1 - 4369 16 1 - 4497 16 1 - 4625 16 1 - 4753 16 1 - 4881 16 1 - 5009 16 1 - 5137 16 1 - 5265 16 1 - 5393 16 1 - 5521 16 1 - 5649 16 1 - 5777 16 1 - 5905 16 1 - 6033 16 1 - 6161 16 1 - 6289 16 1 - 6417 16 1 - 6545 16 1 - 6673 16 1 - 6801 16 1 - 6929 16 1 - 7057 16 1 - 7185 16 1 - 7313 16 1 - 7441 16 1 - 7569 16 1 - 7697 16 1 - 7825 16 1 - 7953 16 1 - 8081 16 1 - 33 16 2 - 161 16 2 - 289 16 2 - 417 16 2 - 545 16 2 - 673 16 2 - 801 16 2 - 929 16 2 - 1057 16 2 - 1185 16 2 - 1313 16 2 - 1441 16 2 - 1569 16 2 - 1697 16 2 - 1825 16 2 - 1953 16 2 - 2081 16 2 - 2209 16 2 - 2337 16 2 - 2465 16 2 - 2593 16 2 - 2721 16 2 - 2849 16 2 - 2977 16 2 - 3105 16 2 - 3233 16 2 - 3361 16 2 - 3489 16 2 - 3617 16 2 - 3745 16 2 - 3873 16 2 - 4001 16 2 - 4129 16 2 - 4257 16 2 - 4385 16 2 - 4513 16 2 - 4641 16 2 - 4769 16 2 - 4897 16 2 - 5025 16 2 - 5153 16 2 - 5281 16 2 - 5409 16 2 - 5537 16 2 - 5665 16 2 - 5793 16 2 - 5921 16 2 - 6049 16 2 - 6177 16 2 - 6305 16 2 - 6433 16 2 - 6561 16 2 - 6689 16 2 - 6817 16 2 - 6945 16 2 - 7073 16 2 - 7201 16 2 - 7329 16 2 - 7457 16 2 - 7585 16 2 - 7713 16 2 - 7841 16 2 - 7969 16 2 - 8097 16 2 - 49 16 3 - 177 16 3 - 305 16 3 - 433 16 3 - 561 16 3 - 689 16 3 - 817 16 3 - 945 16 3 - 1073 16 3 - 1201 16 3 - 1329 16 3 - 1457 16 3 - 1585 16 3 - 1713 16 3 - 1841 16 3 - 1969 16 3 - 2097 16 3 - 2225 16 3 - 2353 16 3 - 2481 16 3 - 2609 16 3 - 2737 16 3 - 2865 16 3 - 2993 16 3 - 3121 16 3 - 3249 16 3 - 3377 16 3 - 3505 16 3 - 3633 16 3 - 3761 16 3 - 3889 16 3 - 4017 16 3 - 4145 16 3 - 4273 16 3 - 4401 16 3 - 4529 16 3 - 4657 16 3 - 4785 16 3 - 4913 16 3 - 5041 16 3 - 5169 16 3 - 5297 16 3 - 5425 16 3 - 5553 16 3 - 5681 16 3 - 5809 16 3 - 5937 16 3 - 6065 16 3 - 6193 16 3 - 6321 16 3 - 6449 16 3 - 6577 16 3 - 6705 16 3 - 6833 16 3 - 6961 16 3 - 7089 16 3 - 7217 16 3 - 7345 16 3 - 7473 16 3 - 7601 16 3 - 7729 16 3 - 7857 16 3 - 7985 16 3 - 8113 16 3 - 65 16 4 - 193 16 4 - 321 16 4 - 449 16 4 - 577 16 4 - 705 16 4 - 833 16 4 - 961 16 4 - 1089 16 4 - 1217 16 4 - 1345 16 4 - 1473 16 4 - 1601 16 4 - 1729 16 4 - 1857 16 4 - 1985 16 4 - 2113 16 4 - 2241 16 4 - 2369 16 4 - 2497 16 4 - 2625 16 4 - 2753 16 4 - 2881 16 4 - 3009 16 4 - 3137 16 4 - 3265 16 4 - 3393 16 4 - 3521 16 4 - 3649 16 4 - 3777 16 4 - 3905 16 4 - 4033 16 4 - 4161 16 4 - 4289 16 4 - 4417 16 4 - 4545 16 4 - 4673 16 4 - 4801 16 4 - 4929 16 4 - 5057 16 4 - 5185 16 4 - 5313 16 4 - 5441 16 4 - 5569 16 4 - 5697 16 4 - 5825 16 4 - 5953 16 4 - 6081 16 4 - 6209 16 4 - 6337 16 4 - 6465 16 4 - 6593 16 4 - 6721 16 4 - 6849 16 4 - 6977 16 4 - 7105 16 4 - 7233 16 4 - 7361 16 4 - 7489 16 4 - 7617 16 4 - 7745 16 4 - 7873 16 4 - 8001 16 4 - 8129 16 4 - 81 16 5 - 209 16 5 - 337 16 5 - 465 16 5 - 593 16 5 - 721 16 5 - 849 16 5 - 977 16 5 - 1105 16 5 - 1233 16 5 - 1361 16 5 - 1489 16 5 - 1617 16 5 - 1745 16 5 - 1873 16 5 - 2001 16 5 - 2129 16 5 - 2257 16 5 - 2385 16 5 - 2513 16 5 - 2641 16 5 - 2769 16 5 - 2897 16 5 - 3025 16 5 - 3153 16 5 - 3281 16 5 - 3409 16 5 - 3537 16 5 - 3665 16 5 - 3793 16 5 - 3921 16 5 - 4049 16 5 - 4177 16 5 - 4305 16 5 - 4433 16 5 - 4561 16 5 - 4689 16 5 - 4817 16 5 - 4945 16 5 - 5073 16 5 - 5201 16 5 - 5329 16 5 - 5457 16 5 - 5585 16 5 - 5713 16 5 - 5841 16 5 - 5969 16 5 - 6097 16 5 - 6225 16 5 - 6353 16 5 - 6481 16 5 - 6609 16 5 - 6737 16 5 - 6865 16 5 - 6993 16 5 - 7121 16 5 - 7249 16 5 - 7377 16 5 - 7505 16 5 - 7633 16 5 - 7761 16 5 - 7889 16 5 - 8017 16 5 - 8145 16 5 - 97 16 6 - 225 16 6 - 353 16 6 - 481 16 6 - 609 16 6 - 737 16 6 - 865 16 6 - 993 16 6 - 1121 16 6 - 1249 16 6 - 1377 16 6 - 1505 16 6 - 1633 16 6 - 1761 16 6 - 1889 16 6 - 2017 16 6 - 2145 16 6 - 2273 16 6 - 2401 16 6 - 2529 16 6 - 2657 16 6 - 2785 16 6 - 2913 16 6 - 3041 16 6 - 3169 16 6 - 3297 16 6 - 3425 16 6 - 3553 16 6 - 3681 16 6 - 3809 16 6 - 3937 16 6 - 4065 16 6 - 4193 16 6 - 4321 16 6 - 4449 16 6 - 4577 16 6 - 4705 16 6 - 4833 16 6 - 4961 16 6 - 5089 16 6 - 5217 16 6 - 5345 16 6 - 5473 16 6 - 5601 16 6 - 5729 16 6 - 5857 16 6 - 5985 16 6 - 6113 16 6 - 6241 16 6 - 6369 16 6 - 6497 16 6 - 6625 16 6 - 6753 16 6 - 6881 16 6 - 7009 16 6 - 7137 16 6 - 7265 16 6 - 7393 16 6 - 7521 16 6 - 7649 16 6 - 7777 16 6 - 7905 16 6 - 8033 16 6 - 8161 16 6 - 113 16 7 - 241 16 7 - 369 16 7 - 497 16 7 - 625 16 7 - 753 16 7 - 881 16 7 - 1009 16 7 - 1137 16 7 - 1265 16 7 - 1393 16 7 - 1521 16 7 - 1649 16 7 - 1777 16 7 - 1905 16 7 - 2033 16 7 - 2161 16 7 - 2289 16 7 - 2417 16 7 - 2545 16 7 - 2673 16 7 - 2801 16 7 - 2929 16 7 - 3057 16 7 - 3185 16 7 - 3313 16 7 - 3441 16 7 - 3569 16 7 - 3697 16 7 - 3825 16 7 - 3953 16 7 - 4081 16 7 - 4209 16 7 - 4337 16 7 - 4465 16 7 - 4593 16 7 - 4721 16 7 - 4849 16 7 - 4977 16 7 - 5105 16 7 - 5233 16 7 - 5361 16 7 - 5489 16 7 - 5617 16 7 - 5745 16 7 - 5873 16 7 - 6001 16 7 - 6129 16 7 - 6257 16 7 - 6385 16 7 - 6513 16 7 - 6641 16 7 - 6769 16 7 - 6897 16 7 - 7025 16 7 - 7153 16 7 - 7281 16 7 - 7409 16 7 - 7537 16 7 - 7665 16 7 - 7793 16 7 - 7921 16 7 - 8049 16 7 - 8177 16 7 diff --git a/src/externals/mct/benchmarks/T42.8pR b/src/externals/mct/benchmarks/T42.8pR deleted file mode 100644 index 5f3cd204fb5..00000000000 --- a/src/externals/mct/benchmarks/T42.8pR +++ /dev/null @@ -1,12 +0,0 @@ - 8 - 1 - 8 - 8192 - 1 1024 0 - 1025 1024 1 - 2049 1024 2 - 3073 1024 3 - 4097 1024 4 - 5121 1024 5 - 6145 1024 6 - 7169 1024 7 diff --git a/src/externals/mct/benchmarks/gx1.8pC b/src/externals/mct/benchmarks/gx1.8pC deleted file mode 100644 index a183292daf3..00000000000 --- a/src/externals/mct/benchmarks/gx1.8pC +++ /dev/null @@ -1,3076 +0,0 @@ - 8 - 2 - 3072 - 122880 - 1 40 0 - 321 40 0 - 641 40 0 - 961 40 0 - 1281 40 0 - 1601 40 0 - 1921 40 0 - 2241 40 0 - 2561 40 0 - 2881 40 0 - 3201 40 0 - 3521 40 0 - 3841 40 0 - 4161 40 0 - 4481 40 0 - 4801 40 0 - 5121 40 0 - 5441 40 0 - 5761 40 0 - 6081 40 0 - 6401 40 0 - 6721 40 0 - 7041 40 0 - 7361 40 0 - 7681 40 0 - 8001 40 0 - 8321 40 0 - 8641 40 0 - 8961 40 0 - 9281 40 0 - 9601 40 0 - 9921 40 0 - 10241 40 0 - 10561 40 0 - 10881 40 0 - 11201 40 0 - 11521 40 0 - 11841 40 0 - 12161 40 0 - 12481 40 0 - 12801 40 0 - 13121 40 0 - 13441 40 0 - 13761 40 0 - 14081 40 0 - 14401 40 0 - 14721 40 0 - 15041 40 0 - 15361 40 0 - 15681 40 0 - 16001 40 0 - 16321 40 0 - 16641 40 0 - 16961 40 0 - 17281 40 0 - 17601 40 0 - 17921 40 0 - 18241 40 0 - 18561 40 0 - 18881 40 0 - 19201 40 0 - 19521 40 0 - 19841 40 0 - 20161 40 0 - 20481 40 0 - 20801 40 0 - 21121 40 0 - 21441 40 0 - 21761 40 0 - 22081 40 0 - 22401 40 0 - 22721 40 0 - 23041 40 0 - 23361 40 0 - 23681 40 0 - 24001 40 0 - 24321 40 0 - 24641 40 0 - 24961 40 0 - 25281 40 0 - 25601 40 0 - 25921 40 0 - 26241 40 0 - 26561 40 0 - 26881 40 0 - 27201 40 0 - 27521 40 0 - 27841 40 0 - 28161 40 0 - 28481 40 0 - 28801 40 0 - 29121 40 0 - 29441 40 0 - 29761 40 0 - 30081 40 0 - 30401 40 0 - 30721 40 0 - 31041 40 0 - 31361 40 0 - 31681 40 0 - 32001 40 0 - 32321 40 0 - 32641 40 0 - 32961 40 0 - 33281 40 0 - 33601 40 0 - 33921 40 0 - 34241 40 0 - 34561 40 0 - 34881 40 0 - 35201 40 0 - 35521 40 0 - 35841 40 0 - 36161 40 0 - 36481 40 0 - 36801 40 0 - 37121 40 0 - 37441 40 0 - 37761 40 0 - 38081 40 0 - 38401 40 0 - 38721 40 0 - 39041 40 0 - 39361 40 0 - 39681 40 0 - 40001 40 0 - 40321 40 0 - 40641 40 0 - 40961 40 0 - 41281 40 0 - 41601 40 0 - 41921 40 0 - 42241 40 0 - 42561 40 0 - 42881 40 0 - 43201 40 0 - 43521 40 0 - 43841 40 0 - 44161 40 0 - 44481 40 0 - 44801 40 0 - 45121 40 0 - 45441 40 0 - 45761 40 0 - 46081 40 0 - 46401 40 0 - 46721 40 0 - 47041 40 0 - 47361 40 0 - 47681 40 0 - 48001 40 0 - 48321 40 0 - 48641 40 0 - 48961 40 0 - 49281 40 0 - 49601 40 0 - 49921 40 0 - 50241 40 0 - 50561 40 0 - 50881 40 0 - 51201 40 0 - 51521 40 0 - 51841 40 0 - 52161 40 0 - 52481 40 0 - 52801 40 0 - 53121 40 0 - 53441 40 0 - 53761 40 0 - 54081 40 0 - 54401 40 0 - 54721 40 0 - 55041 40 0 - 55361 40 0 - 55681 40 0 - 56001 40 0 - 56321 40 0 - 56641 40 0 - 56961 40 0 - 57281 40 0 - 57601 40 0 - 57921 40 0 - 58241 40 0 - 58561 40 0 - 58881 40 0 - 59201 40 0 - 59521 40 0 - 59841 40 0 - 60161 40 0 - 60481 40 0 - 60801 40 0 - 61121 40 0 - 61441 40 0 - 61761 40 0 - 62081 40 0 - 62401 40 0 - 62721 40 0 - 63041 40 0 - 63361 40 0 - 63681 40 0 - 64001 40 0 - 64321 40 0 - 64641 40 0 - 64961 40 0 - 65281 40 0 - 65601 40 0 - 65921 40 0 - 66241 40 0 - 66561 40 0 - 66881 40 0 - 67201 40 0 - 67521 40 0 - 67841 40 0 - 68161 40 0 - 68481 40 0 - 68801 40 0 - 69121 40 0 - 69441 40 0 - 69761 40 0 - 70081 40 0 - 70401 40 0 - 70721 40 0 - 71041 40 0 - 71361 40 0 - 71681 40 0 - 72001 40 0 - 72321 40 0 - 72641 40 0 - 72961 40 0 - 73281 40 0 - 73601 40 0 - 73921 40 0 - 74241 40 0 - 74561 40 0 - 74881 40 0 - 75201 40 0 - 75521 40 0 - 75841 40 0 - 76161 40 0 - 76481 40 0 - 76801 40 0 - 77121 40 0 - 77441 40 0 - 77761 40 0 - 78081 40 0 - 78401 40 0 - 78721 40 0 - 79041 40 0 - 79361 40 0 - 79681 40 0 - 80001 40 0 - 80321 40 0 - 80641 40 0 - 80961 40 0 - 81281 40 0 - 81601 40 0 - 81921 40 0 - 82241 40 0 - 82561 40 0 - 82881 40 0 - 83201 40 0 - 83521 40 0 - 83841 40 0 - 84161 40 0 - 84481 40 0 - 84801 40 0 - 85121 40 0 - 85441 40 0 - 85761 40 0 - 86081 40 0 - 86401 40 0 - 86721 40 0 - 87041 40 0 - 87361 40 0 - 87681 40 0 - 88001 40 0 - 88321 40 0 - 88641 40 0 - 88961 40 0 - 89281 40 0 - 89601 40 0 - 89921 40 0 - 90241 40 0 - 90561 40 0 - 90881 40 0 - 91201 40 0 - 91521 40 0 - 91841 40 0 - 92161 40 0 - 92481 40 0 - 92801 40 0 - 93121 40 0 - 93441 40 0 - 93761 40 0 - 94081 40 0 - 94401 40 0 - 94721 40 0 - 95041 40 0 - 95361 40 0 - 95681 40 0 - 96001 40 0 - 96321 40 0 - 96641 40 0 - 96961 40 0 - 97281 40 0 - 97601 40 0 - 97921 40 0 - 98241 40 0 - 98561 40 0 - 98881 40 0 - 99201 40 0 - 99521 40 0 - 99841 40 0 - 100161 40 0 - 100481 40 0 - 100801 40 0 - 101121 40 0 - 101441 40 0 - 101761 40 0 - 102081 40 0 - 102401 40 0 - 102721 40 0 - 103041 40 0 - 103361 40 0 - 103681 40 0 - 104001 40 0 - 104321 40 0 - 104641 40 0 - 104961 40 0 - 105281 40 0 - 105601 40 0 - 105921 40 0 - 106241 40 0 - 106561 40 0 - 106881 40 0 - 107201 40 0 - 107521 40 0 - 107841 40 0 - 108161 40 0 - 108481 40 0 - 108801 40 0 - 109121 40 0 - 109441 40 0 - 109761 40 0 - 110081 40 0 - 110401 40 0 - 110721 40 0 - 111041 40 0 - 111361 40 0 - 111681 40 0 - 112001 40 0 - 112321 40 0 - 112641 40 0 - 112961 40 0 - 113281 40 0 - 113601 40 0 - 113921 40 0 - 114241 40 0 - 114561 40 0 - 114881 40 0 - 115201 40 0 - 115521 40 0 - 115841 40 0 - 116161 40 0 - 116481 40 0 - 116801 40 0 - 117121 40 0 - 117441 40 0 - 117761 40 0 - 118081 40 0 - 118401 40 0 - 118721 40 0 - 119041 40 0 - 119361 40 0 - 119681 40 0 - 120001 40 0 - 120321 40 0 - 120641 40 0 - 120961 40 0 - 121281 40 0 - 121601 40 0 - 121921 40 0 - 122241 40 0 - 122561 40 0 - 41 40 1 - 361 40 1 - 681 40 1 - 1001 40 1 - 1321 40 1 - 1641 40 1 - 1961 40 1 - 2281 40 1 - 2601 40 1 - 2921 40 1 - 3241 40 1 - 3561 40 1 - 3881 40 1 - 4201 40 1 - 4521 40 1 - 4841 40 1 - 5161 40 1 - 5481 40 1 - 5801 40 1 - 6121 40 1 - 6441 40 1 - 6761 40 1 - 7081 40 1 - 7401 40 1 - 7721 40 1 - 8041 40 1 - 8361 40 1 - 8681 40 1 - 9001 40 1 - 9321 40 1 - 9641 40 1 - 9961 40 1 - 10281 40 1 - 10601 40 1 - 10921 40 1 - 11241 40 1 - 11561 40 1 - 11881 40 1 - 12201 40 1 - 12521 40 1 - 12841 40 1 - 13161 40 1 - 13481 40 1 - 13801 40 1 - 14121 40 1 - 14441 40 1 - 14761 40 1 - 15081 40 1 - 15401 40 1 - 15721 40 1 - 16041 40 1 - 16361 40 1 - 16681 40 1 - 17001 40 1 - 17321 40 1 - 17641 40 1 - 17961 40 1 - 18281 40 1 - 18601 40 1 - 18921 40 1 - 19241 40 1 - 19561 40 1 - 19881 40 1 - 20201 40 1 - 20521 40 1 - 20841 40 1 - 21161 40 1 - 21481 40 1 - 21801 40 1 - 22121 40 1 - 22441 40 1 - 22761 40 1 - 23081 40 1 - 23401 40 1 - 23721 40 1 - 24041 40 1 - 24361 40 1 - 24681 40 1 - 25001 40 1 - 25321 40 1 - 25641 40 1 - 25961 40 1 - 26281 40 1 - 26601 40 1 - 26921 40 1 - 27241 40 1 - 27561 40 1 - 27881 40 1 - 28201 40 1 - 28521 40 1 - 28841 40 1 - 29161 40 1 - 29481 40 1 - 29801 40 1 - 30121 40 1 - 30441 40 1 - 30761 40 1 - 31081 40 1 - 31401 40 1 - 31721 40 1 - 32041 40 1 - 32361 40 1 - 32681 40 1 - 33001 40 1 - 33321 40 1 - 33641 40 1 - 33961 40 1 - 34281 40 1 - 34601 40 1 - 34921 40 1 - 35241 40 1 - 35561 40 1 - 35881 40 1 - 36201 40 1 - 36521 40 1 - 36841 40 1 - 37161 40 1 - 37481 40 1 - 37801 40 1 - 38121 40 1 - 38441 40 1 - 38761 40 1 - 39081 40 1 - 39401 40 1 - 39721 40 1 - 40041 40 1 - 40361 40 1 - 40681 40 1 - 41001 40 1 - 41321 40 1 - 41641 40 1 - 41961 40 1 - 42281 40 1 - 42601 40 1 - 42921 40 1 - 43241 40 1 - 43561 40 1 - 43881 40 1 - 44201 40 1 - 44521 40 1 - 44841 40 1 - 45161 40 1 - 45481 40 1 - 45801 40 1 - 46121 40 1 - 46441 40 1 - 46761 40 1 - 47081 40 1 - 47401 40 1 - 47721 40 1 - 48041 40 1 - 48361 40 1 - 48681 40 1 - 49001 40 1 - 49321 40 1 - 49641 40 1 - 49961 40 1 - 50281 40 1 - 50601 40 1 - 50921 40 1 - 51241 40 1 - 51561 40 1 - 51881 40 1 - 52201 40 1 - 52521 40 1 - 52841 40 1 - 53161 40 1 - 53481 40 1 - 53801 40 1 - 54121 40 1 - 54441 40 1 - 54761 40 1 - 55081 40 1 - 55401 40 1 - 55721 40 1 - 56041 40 1 - 56361 40 1 - 56681 40 1 - 57001 40 1 - 57321 40 1 - 57641 40 1 - 57961 40 1 - 58281 40 1 - 58601 40 1 - 58921 40 1 - 59241 40 1 - 59561 40 1 - 59881 40 1 - 60201 40 1 - 60521 40 1 - 60841 40 1 - 61161 40 1 - 61481 40 1 - 61801 40 1 - 62121 40 1 - 62441 40 1 - 62761 40 1 - 63081 40 1 - 63401 40 1 - 63721 40 1 - 64041 40 1 - 64361 40 1 - 64681 40 1 - 65001 40 1 - 65321 40 1 - 65641 40 1 - 65961 40 1 - 66281 40 1 - 66601 40 1 - 66921 40 1 - 67241 40 1 - 67561 40 1 - 67881 40 1 - 68201 40 1 - 68521 40 1 - 68841 40 1 - 69161 40 1 - 69481 40 1 - 69801 40 1 - 70121 40 1 - 70441 40 1 - 70761 40 1 - 71081 40 1 - 71401 40 1 - 71721 40 1 - 72041 40 1 - 72361 40 1 - 72681 40 1 - 73001 40 1 - 73321 40 1 - 73641 40 1 - 73961 40 1 - 74281 40 1 - 74601 40 1 - 74921 40 1 - 75241 40 1 - 75561 40 1 - 75881 40 1 - 76201 40 1 - 76521 40 1 - 76841 40 1 - 77161 40 1 - 77481 40 1 - 77801 40 1 - 78121 40 1 - 78441 40 1 - 78761 40 1 - 79081 40 1 - 79401 40 1 - 79721 40 1 - 80041 40 1 - 80361 40 1 - 80681 40 1 - 81001 40 1 - 81321 40 1 - 81641 40 1 - 81961 40 1 - 82281 40 1 - 82601 40 1 - 82921 40 1 - 83241 40 1 - 83561 40 1 - 83881 40 1 - 84201 40 1 - 84521 40 1 - 84841 40 1 - 85161 40 1 - 85481 40 1 - 85801 40 1 - 86121 40 1 - 86441 40 1 - 86761 40 1 - 87081 40 1 - 87401 40 1 - 87721 40 1 - 88041 40 1 - 88361 40 1 - 88681 40 1 - 89001 40 1 - 89321 40 1 - 89641 40 1 - 89961 40 1 - 90281 40 1 - 90601 40 1 - 90921 40 1 - 91241 40 1 - 91561 40 1 - 91881 40 1 - 92201 40 1 - 92521 40 1 - 92841 40 1 - 93161 40 1 - 93481 40 1 - 93801 40 1 - 94121 40 1 - 94441 40 1 - 94761 40 1 - 95081 40 1 - 95401 40 1 - 95721 40 1 - 96041 40 1 - 96361 40 1 - 96681 40 1 - 97001 40 1 - 97321 40 1 - 97641 40 1 - 97961 40 1 - 98281 40 1 - 98601 40 1 - 98921 40 1 - 99241 40 1 - 99561 40 1 - 99881 40 1 - 100201 40 1 - 100521 40 1 - 100841 40 1 - 101161 40 1 - 101481 40 1 - 101801 40 1 - 102121 40 1 - 102441 40 1 - 102761 40 1 - 103081 40 1 - 103401 40 1 - 103721 40 1 - 104041 40 1 - 104361 40 1 - 104681 40 1 - 105001 40 1 - 105321 40 1 - 105641 40 1 - 105961 40 1 - 106281 40 1 - 106601 40 1 - 106921 40 1 - 107241 40 1 - 107561 40 1 - 107881 40 1 - 108201 40 1 - 108521 40 1 - 108841 40 1 - 109161 40 1 - 109481 40 1 - 109801 40 1 - 110121 40 1 - 110441 40 1 - 110761 40 1 - 111081 40 1 - 111401 40 1 - 111721 40 1 - 112041 40 1 - 112361 40 1 - 112681 40 1 - 113001 40 1 - 113321 40 1 - 113641 40 1 - 113961 40 1 - 114281 40 1 - 114601 40 1 - 114921 40 1 - 115241 40 1 - 115561 40 1 - 115881 40 1 - 116201 40 1 - 116521 40 1 - 116841 40 1 - 117161 40 1 - 117481 40 1 - 117801 40 1 - 118121 40 1 - 118441 40 1 - 118761 40 1 - 119081 40 1 - 119401 40 1 - 119721 40 1 - 120041 40 1 - 120361 40 1 - 120681 40 1 - 121001 40 1 - 121321 40 1 - 121641 40 1 - 121961 40 1 - 122281 40 1 - 122601 40 1 - 81 40 2 - 401 40 2 - 721 40 2 - 1041 40 2 - 1361 40 2 - 1681 40 2 - 2001 40 2 - 2321 40 2 - 2641 40 2 - 2961 40 2 - 3281 40 2 - 3601 40 2 - 3921 40 2 - 4241 40 2 - 4561 40 2 - 4881 40 2 - 5201 40 2 - 5521 40 2 - 5841 40 2 - 6161 40 2 - 6481 40 2 - 6801 40 2 - 7121 40 2 - 7441 40 2 - 7761 40 2 - 8081 40 2 - 8401 40 2 - 8721 40 2 - 9041 40 2 - 9361 40 2 - 9681 40 2 - 10001 40 2 - 10321 40 2 - 10641 40 2 - 10961 40 2 - 11281 40 2 - 11601 40 2 - 11921 40 2 - 12241 40 2 - 12561 40 2 - 12881 40 2 - 13201 40 2 - 13521 40 2 - 13841 40 2 - 14161 40 2 - 14481 40 2 - 14801 40 2 - 15121 40 2 - 15441 40 2 - 15761 40 2 - 16081 40 2 - 16401 40 2 - 16721 40 2 - 17041 40 2 - 17361 40 2 - 17681 40 2 - 18001 40 2 - 18321 40 2 - 18641 40 2 - 18961 40 2 - 19281 40 2 - 19601 40 2 - 19921 40 2 - 20241 40 2 - 20561 40 2 - 20881 40 2 - 21201 40 2 - 21521 40 2 - 21841 40 2 - 22161 40 2 - 22481 40 2 - 22801 40 2 - 23121 40 2 - 23441 40 2 - 23761 40 2 - 24081 40 2 - 24401 40 2 - 24721 40 2 - 25041 40 2 - 25361 40 2 - 25681 40 2 - 26001 40 2 - 26321 40 2 - 26641 40 2 - 26961 40 2 - 27281 40 2 - 27601 40 2 - 27921 40 2 - 28241 40 2 - 28561 40 2 - 28881 40 2 - 29201 40 2 - 29521 40 2 - 29841 40 2 - 30161 40 2 - 30481 40 2 - 30801 40 2 - 31121 40 2 - 31441 40 2 - 31761 40 2 - 32081 40 2 - 32401 40 2 - 32721 40 2 - 33041 40 2 - 33361 40 2 - 33681 40 2 - 34001 40 2 - 34321 40 2 - 34641 40 2 - 34961 40 2 - 35281 40 2 - 35601 40 2 - 35921 40 2 - 36241 40 2 - 36561 40 2 - 36881 40 2 - 37201 40 2 - 37521 40 2 - 37841 40 2 - 38161 40 2 - 38481 40 2 - 38801 40 2 - 39121 40 2 - 39441 40 2 - 39761 40 2 - 40081 40 2 - 40401 40 2 - 40721 40 2 - 41041 40 2 - 41361 40 2 - 41681 40 2 - 42001 40 2 - 42321 40 2 - 42641 40 2 - 42961 40 2 - 43281 40 2 - 43601 40 2 - 43921 40 2 - 44241 40 2 - 44561 40 2 - 44881 40 2 - 45201 40 2 - 45521 40 2 - 45841 40 2 - 46161 40 2 - 46481 40 2 - 46801 40 2 - 47121 40 2 - 47441 40 2 - 47761 40 2 - 48081 40 2 - 48401 40 2 - 48721 40 2 - 49041 40 2 - 49361 40 2 - 49681 40 2 - 50001 40 2 - 50321 40 2 - 50641 40 2 - 50961 40 2 - 51281 40 2 - 51601 40 2 - 51921 40 2 - 52241 40 2 - 52561 40 2 - 52881 40 2 - 53201 40 2 - 53521 40 2 - 53841 40 2 - 54161 40 2 - 54481 40 2 - 54801 40 2 - 55121 40 2 - 55441 40 2 - 55761 40 2 - 56081 40 2 - 56401 40 2 - 56721 40 2 - 57041 40 2 - 57361 40 2 - 57681 40 2 - 58001 40 2 - 58321 40 2 - 58641 40 2 - 58961 40 2 - 59281 40 2 - 59601 40 2 - 59921 40 2 - 60241 40 2 - 60561 40 2 - 60881 40 2 - 61201 40 2 - 61521 40 2 - 61841 40 2 - 62161 40 2 - 62481 40 2 - 62801 40 2 - 63121 40 2 - 63441 40 2 - 63761 40 2 - 64081 40 2 - 64401 40 2 - 64721 40 2 - 65041 40 2 - 65361 40 2 - 65681 40 2 - 66001 40 2 - 66321 40 2 - 66641 40 2 - 66961 40 2 - 67281 40 2 - 67601 40 2 - 67921 40 2 - 68241 40 2 - 68561 40 2 - 68881 40 2 - 69201 40 2 - 69521 40 2 - 69841 40 2 - 70161 40 2 - 70481 40 2 - 70801 40 2 - 71121 40 2 - 71441 40 2 - 71761 40 2 - 72081 40 2 - 72401 40 2 - 72721 40 2 - 73041 40 2 - 73361 40 2 - 73681 40 2 - 74001 40 2 - 74321 40 2 - 74641 40 2 - 74961 40 2 - 75281 40 2 - 75601 40 2 - 75921 40 2 - 76241 40 2 - 76561 40 2 - 76881 40 2 - 77201 40 2 - 77521 40 2 - 77841 40 2 - 78161 40 2 - 78481 40 2 - 78801 40 2 - 79121 40 2 - 79441 40 2 - 79761 40 2 - 80081 40 2 - 80401 40 2 - 80721 40 2 - 81041 40 2 - 81361 40 2 - 81681 40 2 - 82001 40 2 - 82321 40 2 - 82641 40 2 - 82961 40 2 - 83281 40 2 - 83601 40 2 - 83921 40 2 - 84241 40 2 - 84561 40 2 - 84881 40 2 - 85201 40 2 - 85521 40 2 - 85841 40 2 - 86161 40 2 - 86481 40 2 - 86801 40 2 - 87121 40 2 - 87441 40 2 - 87761 40 2 - 88081 40 2 - 88401 40 2 - 88721 40 2 - 89041 40 2 - 89361 40 2 - 89681 40 2 - 90001 40 2 - 90321 40 2 - 90641 40 2 - 90961 40 2 - 91281 40 2 - 91601 40 2 - 91921 40 2 - 92241 40 2 - 92561 40 2 - 92881 40 2 - 93201 40 2 - 93521 40 2 - 93841 40 2 - 94161 40 2 - 94481 40 2 - 94801 40 2 - 95121 40 2 - 95441 40 2 - 95761 40 2 - 96081 40 2 - 96401 40 2 - 96721 40 2 - 97041 40 2 - 97361 40 2 - 97681 40 2 - 98001 40 2 - 98321 40 2 - 98641 40 2 - 98961 40 2 - 99281 40 2 - 99601 40 2 - 99921 40 2 - 100241 40 2 - 100561 40 2 - 100881 40 2 - 101201 40 2 - 101521 40 2 - 101841 40 2 - 102161 40 2 - 102481 40 2 - 102801 40 2 - 103121 40 2 - 103441 40 2 - 103761 40 2 - 104081 40 2 - 104401 40 2 - 104721 40 2 - 105041 40 2 - 105361 40 2 - 105681 40 2 - 106001 40 2 - 106321 40 2 - 106641 40 2 - 106961 40 2 - 107281 40 2 - 107601 40 2 - 107921 40 2 - 108241 40 2 - 108561 40 2 - 108881 40 2 - 109201 40 2 - 109521 40 2 - 109841 40 2 - 110161 40 2 - 110481 40 2 - 110801 40 2 - 111121 40 2 - 111441 40 2 - 111761 40 2 - 112081 40 2 - 112401 40 2 - 112721 40 2 - 113041 40 2 - 113361 40 2 - 113681 40 2 - 114001 40 2 - 114321 40 2 - 114641 40 2 - 114961 40 2 - 115281 40 2 - 115601 40 2 - 115921 40 2 - 116241 40 2 - 116561 40 2 - 116881 40 2 - 117201 40 2 - 117521 40 2 - 117841 40 2 - 118161 40 2 - 118481 40 2 - 118801 40 2 - 119121 40 2 - 119441 40 2 - 119761 40 2 - 120081 40 2 - 120401 40 2 - 120721 40 2 - 121041 40 2 - 121361 40 2 - 121681 40 2 - 122001 40 2 - 122321 40 2 - 122641 40 2 - 121 40 3 - 441 40 3 - 761 40 3 - 1081 40 3 - 1401 40 3 - 1721 40 3 - 2041 40 3 - 2361 40 3 - 2681 40 3 - 3001 40 3 - 3321 40 3 - 3641 40 3 - 3961 40 3 - 4281 40 3 - 4601 40 3 - 4921 40 3 - 5241 40 3 - 5561 40 3 - 5881 40 3 - 6201 40 3 - 6521 40 3 - 6841 40 3 - 7161 40 3 - 7481 40 3 - 7801 40 3 - 8121 40 3 - 8441 40 3 - 8761 40 3 - 9081 40 3 - 9401 40 3 - 9721 40 3 - 10041 40 3 - 10361 40 3 - 10681 40 3 - 11001 40 3 - 11321 40 3 - 11641 40 3 - 11961 40 3 - 12281 40 3 - 12601 40 3 - 12921 40 3 - 13241 40 3 - 13561 40 3 - 13881 40 3 - 14201 40 3 - 14521 40 3 - 14841 40 3 - 15161 40 3 - 15481 40 3 - 15801 40 3 - 16121 40 3 - 16441 40 3 - 16761 40 3 - 17081 40 3 - 17401 40 3 - 17721 40 3 - 18041 40 3 - 18361 40 3 - 18681 40 3 - 19001 40 3 - 19321 40 3 - 19641 40 3 - 19961 40 3 - 20281 40 3 - 20601 40 3 - 20921 40 3 - 21241 40 3 - 21561 40 3 - 21881 40 3 - 22201 40 3 - 22521 40 3 - 22841 40 3 - 23161 40 3 - 23481 40 3 - 23801 40 3 - 24121 40 3 - 24441 40 3 - 24761 40 3 - 25081 40 3 - 25401 40 3 - 25721 40 3 - 26041 40 3 - 26361 40 3 - 26681 40 3 - 27001 40 3 - 27321 40 3 - 27641 40 3 - 27961 40 3 - 28281 40 3 - 28601 40 3 - 28921 40 3 - 29241 40 3 - 29561 40 3 - 29881 40 3 - 30201 40 3 - 30521 40 3 - 30841 40 3 - 31161 40 3 - 31481 40 3 - 31801 40 3 - 32121 40 3 - 32441 40 3 - 32761 40 3 - 33081 40 3 - 33401 40 3 - 33721 40 3 - 34041 40 3 - 34361 40 3 - 34681 40 3 - 35001 40 3 - 35321 40 3 - 35641 40 3 - 35961 40 3 - 36281 40 3 - 36601 40 3 - 36921 40 3 - 37241 40 3 - 37561 40 3 - 37881 40 3 - 38201 40 3 - 38521 40 3 - 38841 40 3 - 39161 40 3 - 39481 40 3 - 39801 40 3 - 40121 40 3 - 40441 40 3 - 40761 40 3 - 41081 40 3 - 41401 40 3 - 41721 40 3 - 42041 40 3 - 42361 40 3 - 42681 40 3 - 43001 40 3 - 43321 40 3 - 43641 40 3 - 43961 40 3 - 44281 40 3 - 44601 40 3 - 44921 40 3 - 45241 40 3 - 45561 40 3 - 45881 40 3 - 46201 40 3 - 46521 40 3 - 46841 40 3 - 47161 40 3 - 47481 40 3 - 47801 40 3 - 48121 40 3 - 48441 40 3 - 48761 40 3 - 49081 40 3 - 49401 40 3 - 49721 40 3 - 50041 40 3 - 50361 40 3 - 50681 40 3 - 51001 40 3 - 51321 40 3 - 51641 40 3 - 51961 40 3 - 52281 40 3 - 52601 40 3 - 52921 40 3 - 53241 40 3 - 53561 40 3 - 53881 40 3 - 54201 40 3 - 54521 40 3 - 54841 40 3 - 55161 40 3 - 55481 40 3 - 55801 40 3 - 56121 40 3 - 56441 40 3 - 56761 40 3 - 57081 40 3 - 57401 40 3 - 57721 40 3 - 58041 40 3 - 58361 40 3 - 58681 40 3 - 59001 40 3 - 59321 40 3 - 59641 40 3 - 59961 40 3 - 60281 40 3 - 60601 40 3 - 60921 40 3 - 61241 40 3 - 61561 40 3 - 61881 40 3 - 62201 40 3 - 62521 40 3 - 62841 40 3 - 63161 40 3 - 63481 40 3 - 63801 40 3 - 64121 40 3 - 64441 40 3 - 64761 40 3 - 65081 40 3 - 65401 40 3 - 65721 40 3 - 66041 40 3 - 66361 40 3 - 66681 40 3 - 67001 40 3 - 67321 40 3 - 67641 40 3 - 67961 40 3 - 68281 40 3 - 68601 40 3 - 68921 40 3 - 69241 40 3 - 69561 40 3 - 69881 40 3 - 70201 40 3 - 70521 40 3 - 70841 40 3 - 71161 40 3 - 71481 40 3 - 71801 40 3 - 72121 40 3 - 72441 40 3 - 72761 40 3 - 73081 40 3 - 73401 40 3 - 73721 40 3 - 74041 40 3 - 74361 40 3 - 74681 40 3 - 75001 40 3 - 75321 40 3 - 75641 40 3 - 75961 40 3 - 76281 40 3 - 76601 40 3 - 76921 40 3 - 77241 40 3 - 77561 40 3 - 77881 40 3 - 78201 40 3 - 78521 40 3 - 78841 40 3 - 79161 40 3 - 79481 40 3 - 79801 40 3 - 80121 40 3 - 80441 40 3 - 80761 40 3 - 81081 40 3 - 81401 40 3 - 81721 40 3 - 82041 40 3 - 82361 40 3 - 82681 40 3 - 83001 40 3 - 83321 40 3 - 83641 40 3 - 83961 40 3 - 84281 40 3 - 84601 40 3 - 84921 40 3 - 85241 40 3 - 85561 40 3 - 85881 40 3 - 86201 40 3 - 86521 40 3 - 86841 40 3 - 87161 40 3 - 87481 40 3 - 87801 40 3 - 88121 40 3 - 88441 40 3 - 88761 40 3 - 89081 40 3 - 89401 40 3 - 89721 40 3 - 90041 40 3 - 90361 40 3 - 90681 40 3 - 91001 40 3 - 91321 40 3 - 91641 40 3 - 91961 40 3 - 92281 40 3 - 92601 40 3 - 92921 40 3 - 93241 40 3 - 93561 40 3 - 93881 40 3 - 94201 40 3 - 94521 40 3 - 94841 40 3 - 95161 40 3 - 95481 40 3 - 95801 40 3 - 96121 40 3 - 96441 40 3 - 96761 40 3 - 97081 40 3 - 97401 40 3 - 97721 40 3 - 98041 40 3 - 98361 40 3 - 98681 40 3 - 99001 40 3 - 99321 40 3 - 99641 40 3 - 99961 40 3 - 100281 40 3 - 100601 40 3 - 100921 40 3 - 101241 40 3 - 101561 40 3 - 101881 40 3 - 102201 40 3 - 102521 40 3 - 102841 40 3 - 103161 40 3 - 103481 40 3 - 103801 40 3 - 104121 40 3 - 104441 40 3 - 104761 40 3 - 105081 40 3 - 105401 40 3 - 105721 40 3 - 106041 40 3 - 106361 40 3 - 106681 40 3 - 107001 40 3 - 107321 40 3 - 107641 40 3 - 107961 40 3 - 108281 40 3 - 108601 40 3 - 108921 40 3 - 109241 40 3 - 109561 40 3 - 109881 40 3 - 110201 40 3 - 110521 40 3 - 110841 40 3 - 111161 40 3 - 111481 40 3 - 111801 40 3 - 112121 40 3 - 112441 40 3 - 112761 40 3 - 113081 40 3 - 113401 40 3 - 113721 40 3 - 114041 40 3 - 114361 40 3 - 114681 40 3 - 115001 40 3 - 115321 40 3 - 115641 40 3 - 115961 40 3 - 116281 40 3 - 116601 40 3 - 116921 40 3 - 117241 40 3 - 117561 40 3 - 117881 40 3 - 118201 40 3 - 118521 40 3 - 118841 40 3 - 119161 40 3 - 119481 40 3 - 119801 40 3 - 120121 40 3 - 120441 40 3 - 120761 40 3 - 121081 40 3 - 121401 40 3 - 121721 40 3 - 122041 40 3 - 122361 40 3 - 122681 40 3 - 161 40 4 - 481 40 4 - 801 40 4 - 1121 40 4 - 1441 40 4 - 1761 40 4 - 2081 40 4 - 2401 40 4 - 2721 40 4 - 3041 40 4 - 3361 40 4 - 3681 40 4 - 4001 40 4 - 4321 40 4 - 4641 40 4 - 4961 40 4 - 5281 40 4 - 5601 40 4 - 5921 40 4 - 6241 40 4 - 6561 40 4 - 6881 40 4 - 7201 40 4 - 7521 40 4 - 7841 40 4 - 8161 40 4 - 8481 40 4 - 8801 40 4 - 9121 40 4 - 9441 40 4 - 9761 40 4 - 10081 40 4 - 10401 40 4 - 10721 40 4 - 11041 40 4 - 11361 40 4 - 11681 40 4 - 12001 40 4 - 12321 40 4 - 12641 40 4 - 12961 40 4 - 13281 40 4 - 13601 40 4 - 13921 40 4 - 14241 40 4 - 14561 40 4 - 14881 40 4 - 15201 40 4 - 15521 40 4 - 15841 40 4 - 16161 40 4 - 16481 40 4 - 16801 40 4 - 17121 40 4 - 17441 40 4 - 17761 40 4 - 18081 40 4 - 18401 40 4 - 18721 40 4 - 19041 40 4 - 19361 40 4 - 19681 40 4 - 20001 40 4 - 20321 40 4 - 20641 40 4 - 20961 40 4 - 21281 40 4 - 21601 40 4 - 21921 40 4 - 22241 40 4 - 22561 40 4 - 22881 40 4 - 23201 40 4 - 23521 40 4 - 23841 40 4 - 24161 40 4 - 24481 40 4 - 24801 40 4 - 25121 40 4 - 25441 40 4 - 25761 40 4 - 26081 40 4 - 26401 40 4 - 26721 40 4 - 27041 40 4 - 27361 40 4 - 27681 40 4 - 28001 40 4 - 28321 40 4 - 28641 40 4 - 28961 40 4 - 29281 40 4 - 29601 40 4 - 29921 40 4 - 30241 40 4 - 30561 40 4 - 30881 40 4 - 31201 40 4 - 31521 40 4 - 31841 40 4 - 32161 40 4 - 32481 40 4 - 32801 40 4 - 33121 40 4 - 33441 40 4 - 33761 40 4 - 34081 40 4 - 34401 40 4 - 34721 40 4 - 35041 40 4 - 35361 40 4 - 35681 40 4 - 36001 40 4 - 36321 40 4 - 36641 40 4 - 36961 40 4 - 37281 40 4 - 37601 40 4 - 37921 40 4 - 38241 40 4 - 38561 40 4 - 38881 40 4 - 39201 40 4 - 39521 40 4 - 39841 40 4 - 40161 40 4 - 40481 40 4 - 40801 40 4 - 41121 40 4 - 41441 40 4 - 41761 40 4 - 42081 40 4 - 42401 40 4 - 42721 40 4 - 43041 40 4 - 43361 40 4 - 43681 40 4 - 44001 40 4 - 44321 40 4 - 44641 40 4 - 44961 40 4 - 45281 40 4 - 45601 40 4 - 45921 40 4 - 46241 40 4 - 46561 40 4 - 46881 40 4 - 47201 40 4 - 47521 40 4 - 47841 40 4 - 48161 40 4 - 48481 40 4 - 48801 40 4 - 49121 40 4 - 49441 40 4 - 49761 40 4 - 50081 40 4 - 50401 40 4 - 50721 40 4 - 51041 40 4 - 51361 40 4 - 51681 40 4 - 52001 40 4 - 52321 40 4 - 52641 40 4 - 52961 40 4 - 53281 40 4 - 53601 40 4 - 53921 40 4 - 54241 40 4 - 54561 40 4 - 54881 40 4 - 55201 40 4 - 55521 40 4 - 55841 40 4 - 56161 40 4 - 56481 40 4 - 56801 40 4 - 57121 40 4 - 57441 40 4 - 57761 40 4 - 58081 40 4 - 58401 40 4 - 58721 40 4 - 59041 40 4 - 59361 40 4 - 59681 40 4 - 60001 40 4 - 60321 40 4 - 60641 40 4 - 60961 40 4 - 61281 40 4 - 61601 40 4 - 61921 40 4 - 62241 40 4 - 62561 40 4 - 62881 40 4 - 63201 40 4 - 63521 40 4 - 63841 40 4 - 64161 40 4 - 64481 40 4 - 64801 40 4 - 65121 40 4 - 65441 40 4 - 65761 40 4 - 66081 40 4 - 66401 40 4 - 66721 40 4 - 67041 40 4 - 67361 40 4 - 67681 40 4 - 68001 40 4 - 68321 40 4 - 68641 40 4 - 68961 40 4 - 69281 40 4 - 69601 40 4 - 69921 40 4 - 70241 40 4 - 70561 40 4 - 70881 40 4 - 71201 40 4 - 71521 40 4 - 71841 40 4 - 72161 40 4 - 72481 40 4 - 72801 40 4 - 73121 40 4 - 73441 40 4 - 73761 40 4 - 74081 40 4 - 74401 40 4 - 74721 40 4 - 75041 40 4 - 75361 40 4 - 75681 40 4 - 76001 40 4 - 76321 40 4 - 76641 40 4 - 76961 40 4 - 77281 40 4 - 77601 40 4 - 77921 40 4 - 78241 40 4 - 78561 40 4 - 78881 40 4 - 79201 40 4 - 79521 40 4 - 79841 40 4 - 80161 40 4 - 80481 40 4 - 80801 40 4 - 81121 40 4 - 81441 40 4 - 81761 40 4 - 82081 40 4 - 82401 40 4 - 82721 40 4 - 83041 40 4 - 83361 40 4 - 83681 40 4 - 84001 40 4 - 84321 40 4 - 84641 40 4 - 84961 40 4 - 85281 40 4 - 85601 40 4 - 85921 40 4 - 86241 40 4 - 86561 40 4 - 86881 40 4 - 87201 40 4 - 87521 40 4 - 87841 40 4 - 88161 40 4 - 88481 40 4 - 88801 40 4 - 89121 40 4 - 89441 40 4 - 89761 40 4 - 90081 40 4 - 90401 40 4 - 90721 40 4 - 91041 40 4 - 91361 40 4 - 91681 40 4 - 92001 40 4 - 92321 40 4 - 92641 40 4 - 92961 40 4 - 93281 40 4 - 93601 40 4 - 93921 40 4 - 94241 40 4 - 94561 40 4 - 94881 40 4 - 95201 40 4 - 95521 40 4 - 95841 40 4 - 96161 40 4 - 96481 40 4 - 96801 40 4 - 97121 40 4 - 97441 40 4 - 97761 40 4 - 98081 40 4 - 98401 40 4 - 98721 40 4 - 99041 40 4 - 99361 40 4 - 99681 40 4 - 100001 40 4 - 100321 40 4 - 100641 40 4 - 100961 40 4 - 101281 40 4 - 101601 40 4 - 101921 40 4 - 102241 40 4 - 102561 40 4 - 102881 40 4 - 103201 40 4 - 103521 40 4 - 103841 40 4 - 104161 40 4 - 104481 40 4 - 104801 40 4 - 105121 40 4 - 105441 40 4 - 105761 40 4 - 106081 40 4 - 106401 40 4 - 106721 40 4 - 107041 40 4 - 107361 40 4 - 107681 40 4 - 108001 40 4 - 108321 40 4 - 108641 40 4 - 108961 40 4 - 109281 40 4 - 109601 40 4 - 109921 40 4 - 110241 40 4 - 110561 40 4 - 110881 40 4 - 111201 40 4 - 111521 40 4 - 111841 40 4 - 112161 40 4 - 112481 40 4 - 112801 40 4 - 113121 40 4 - 113441 40 4 - 113761 40 4 - 114081 40 4 - 114401 40 4 - 114721 40 4 - 115041 40 4 - 115361 40 4 - 115681 40 4 - 116001 40 4 - 116321 40 4 - 116641 40 4 - 116961 40 4 - 117281 40 4 - 117601 40 4 - 117921 40 4 - 118241 40 4 - 118561 40 4 - 118881 40 4 - 119201 40 4 - 119521 40 4 - 119841 40 4 - 120161 40 4 - 120481 40 4 - 120801 40 4 - 121121 40 4 - 121441 40 4 - 121761 40 4 - 122081 40 4 - 122401 40 4 - 122721 40 4 - 201 40 5 - 521 40 5 - 841 40 5 - 1161 40 5 - 1481 40 5 - 1801 40 5 - 2121 40 5 - 2441 40 5 - 2761 40 5 - 3081 40 5 - 3401 40 5 - 3721 40 5 - 4041 40 5 - 4361 40 5 - 4681 40 5 - 5001 40 5 - 5321 40 5 - 5641 40 5 - 5961 40 5 - 6281 40 5 - 6601 40 5 - 6921 40 5 - 7241 40 5 - 7561 40 5 - 7881 40 5 - 8201 40 5 - 8521 40 5 - 8841 40 5 - 9161 40 5 - 9481 40 5 - 9801 40 5 - 10121 40 5 - 10441 40 5 - 10761 40 5 - 11081 40 5 - 11401 40 5 - 11721 40 5 - 12041 40 5 - 12361 40 5 - 12681 40 5 - 13001 40 5 - 13321 40 5 - 13641 40 5 - 13961 40 5 - 14281 40 5 - 14601 40 5 - 14921 40 5 - 15241 40 5 - 15561 40 5 - 15881 40 5 - 16201 40 5 - 16521 40 5 - 16841 40 5 - 17161 40 5 - 17481 40 5 - 17801 40 5 - 18121 40 5 - 18441 40 5 - 18761 40 5 - 19081 40 5 - 19401 40 5 - 19721 40 5 - 20041 40 5 - 20361 40 5 - 20681 40 5 - 21001 40 5 - 21321 40 5 - 21641 40 5 - 21961 40 5 - 22281 40 5 - 22601 40 5 - 22921 40 5 - 23241 40 5 - 23561 40 5 - 23881 40 5 - 24201 40 5 - 24521 40 5 - 24841 40 5 - 25161 40 5 - 25481 40 5 - 25801 40 5 - 26121 40 5 - 26441 40 5 - 26761 40 5 - 27081 40 5 - 27401 40 5 - 27721 40 5 - 28041 40 5 - 28361 40 5 - 28681 40 5 - 29001 40 5 - 29321 40 5 - 29641 40 5 - 29961 40 5 - 30281 40 5 - 30601 40 5 - 30921 40 5 - 31241 40 5 - 31561 40 5 - 31881 40 5 - 32201 40 5 - 32521 40 5 - 32841 40 5 - 33161 40 5 - 33481 40 5 - 33801 40 5 - 34121 40 5 - 34441 40 5 - 34761 40 5 - 35081 40 5 - 35401 40 5 - 35721 40 5 - 36041 40 5 - 36361 40 5 - 36681 40 5 - 37001 40 5 - 37321 40 5 - 37641 40 5 - 37961 40 5 - 38281 40 5 - 38601 40 5 - 38921 40 5 - 39241 40 5 - 39561 40 5 - 39881 40 5 - 40201 40 5 - 40521 40 5 - 40841 40 5 - 41161 40 5 - 41481 40 5 - 41801 40 5 - 42121 40 5 - 42441 40 5 - 42761 40 5 - 43081 40 5 - 43401 40 5 - 43721 40 5 - 44041 40 5 - 44361 40 5 - 44681 40 5 - 45001 40 5 - 45321 40 5 - 45641 40 5 - 45961 40 5 - 46281 40 5 - 46601 40 5 - 46921 40 5 - 47241 40 5 - 47561 40 5 - 47881 40 5 - 48201 40 5 - 48521 40 5 - 48841 40 5 - 49161 40 5 - 49481 40 5 - 49801 40 5 - 50121 40 5 - 50441 40 5 - 50761 40 5 - 51081 40 5 - 51401 40 5 - 51721 40 5 - 52041 40 5 - 52361 40 5 - 52681 40 5 - 53001 40 5 - 53321 40 5 - 53641 40 5 - 53961 40 5 - 54281 40 5 - 54601 40 5 - 54921 40 5 - 55241 40 5 - 55561 40 5 - 55881 40 5 - 56201 40 5 - 56521 40 5 - 56841 40 5 - 57161 40 5 - 57481 40 5 - 57801 40 5 - 58121 40 5 - 58441 40 5 - 58761 40 5 - 59081 40 5 - 59401 40 5 - 59721 40 5 - 60041 40 5 - 60361 40 5 - 60681 40 5 - 61001 40 5 - 61321 40 5 - 61641 40 5 - 61961 40 5 - 62281 40 5 - 62601 40 5 - 62921 40 5 - 63241 40 5 - 63561 40 5 - 63881 40 5 - 64201 40 5 - 64521 40 5 - 64841 40 5 - 65161 40 5 - 65481 40 5 - 65801 40 5 - 66121 40 5 - 66441 40 5 - 66761 40 5 - 67081 40 5 - 67401 40 5 - 67721 40 5 - 68041 40 5 - 68361 40 5 - 68681 40 5 - 69001 40 5 - 69321 40 5 - 69641 40 5 - 69961 40 5 - 70281 40 5 - 70601 40 5 - 70921 40 5 - 71241 40 5 - 71561 40 5 - 71881 40 5 - 72201 40 5 - 72521 40 5 - 72841 40 5 - 73161 40 5 - 73481 40 5 - 73801 40 5 - 74121 40 5 - 74441 40 5 - 74761 40 5 - 75081 40 5 - 75401 40 5 - 75721 40 5 - 76041 40 5 - 76361 40 5 - 76681 40 5 - 77001 40 5 - 77321 40 5 - 77641 40 5 - 77961 40 5 - 78281 40 5 - 78601 40 5 - 78921 40 5 - 79241 40 5 - 79561 40 5 - 79881 40 5 - 80201 40 5 - 80521 40 5 - 80841 40 5 - 81161 40 5 - 81481 40 5 - 81801 40 5 - 82121 40 5 - 82441 40 5 - 82761 40 5 - 83081 40 5 - 83401 40 5 - 83721 40 5 - 84041 40 5 - 84361 40 5 - 84681 40 5 - 85001 40 5 - 85321 40 5 - 85641 40 5 - 85961 40 5 - 86281 40 5 - 86601 40 5 - 86921 40 5 - 87241 40 5 - 87561 40 5 - 87881 40 5 - 88201 40 5 - 88521 40 5 - 88841 40 5 - 89161 40 5 - 89481 40 5 - 89801 40 5 - 90121 40 5 - 90441 40 5 - 90761 40 5 - 91081 40 5 - 91401 40 5 - 91721 40 5 - 92041 40 5 - 92361 40 5 - 92681 40 5 - 93001 40 5 - 93321 40 5 - 93641 40 5 - 93961 40 5 - 94281 40 5 - 94601 40 5 - 94921 40 5 - 95241 40 5 - 95561 40 5 - 95881 40 5 - 96201 40 5 - 96521 40 5 - 96841 40 5 - 97161 40 5 - 97481 40 5 - 97801 40 5 - 98121 40 5 - 98441 40 5 - 98761 40 5 - 99081 40 5 - 99401 40 5 - 99721 40 5 - 100041 40 5 - 100361 40 5 - 100681 40 5 - 101001 40 5 - 101321 40 5 - 101641 40 5 - 101961 40 5 - 102281 40 5 - 102601 40 5 - 102921 40 5 - 103241 40 5 - 103561 40 5 - 103881 40 5 - 104201 40 5 - 104521 40 5 - 104841 40 5 - 105161 40 5 - 105481 40 5 - 105801 40 5 - 106121 40 5 - 106441 40 5 - 106761 40 5 - 107081 40 5 - 107401 40 5 - 107721 40 5 - 108041 40 5 - 108361 40 5 - 108681 40 5 - 109001 40 5 - 109321 40 5 - 109641 40 5 - 109961 40 5 - 110281 40 5 - 110601 40 5 - 110921 40 5 - 111241 40 5 - 111561 40 5 - 111881 40 5 - 112201 40 5 - 112521 40 5 - 112841 40 5 - 113161 40 5 - 113481 40 5 - 113801 40 5 - 114121 40 5 - 114441 40 5 - 114761 40 5 - 115081 40 5 - 115401 40 5 - 115721 40 5 - 116041 40 5 - 116361 40 5 - 116681 40 5 - 117001 40 5 - 117321 40 5 - 117641 40 5 - 117961 40 5 - 118281 40 5 - 118601 40 5 - 118921 40 5 - 119241 40 5 - 119561 40 5 - 119881 40 5 - 120201 40 5 - 120521 40 5 - 120841 40 5 - 121161 40 5 - 121481 40 5 - 121801 40 5 - 122121 40 5 - 122441 40 5 - 122761 40 5 - 241 40 6 - 561 40 6 - 881 40 6 - 1201 40 6 - 1521 40 6 - 1841 40 6 - 2161 40 6 - 2481 40 6 - 2801 40 6 - 3121 40 6 - 3441 40 6 - 3761 40 6 - 4081 40 6 - 4401 40 6 - 4721 40 6 - 5041 40 6 - 5361 40 6 - 5681 40 6 - 6001 40 6 - 6321 40 6 - 6641 40 6 - 6961 40 6 - 7281 40 6 - 7601 40 6 - 7921 40 6 - 8241 40 6 - 8561 40 6 - 8881 40 6 - 9201 40 6 - 9521 40 6 - 9841 40 6 - 10161 40 6 - 10481 40 6 - 10801 40 6 - 11121 40 6 - 11441 40 6 - 11761 40 6 - 12081 40 6 - 12401 40 6 - 12721 40 6 - 13041 40 6 - 13361 40 6 - 13681 40 6 - 14001 40 6 - 14321 40 6 - 14641 40 6 - 14961 40 6 - 15281 40 6 - 15601 40 6 - 15921 40 6 - 16241 40 6 - 16561 40 6 - 16881 40 6 - 17201 40 6 - 17521 40 6 - 17841 40 6 - 18161 40 6 - 18481 40 6 - 18801 40 6 - 19121 40 6 - 19441 40 6 - 19761 40 6 - 20081 40 6 - 20401 40 6 - 20721 40 6 - 21041 40 6 - 21361 40 6 - 21681 40 6 - 22001 40 6 - 22321 40 6 - 22641 40 6 - 22961 40 6 - 23281 40 6 - 23601 40 6 - 23921 40 6 - 24241 40 6 - 24561 40 6 - 24881 40 6 - 25201 40 6 - 25521 40 6 - 25841 40 6 - 26161 40 6 - 26481 40 6 - 26801 40 6 - 27121 40 6 - 27441 40 6 - 27761 40 6 - 28081 40 6 - 28401 40 6 - 28721 40 6 - 29041 40 6 - 29361 40 6 - 29681 40 6 - 30001 40 6 - 30321 40 6 - 30641 40 6 - 30961 40 6 - 31281 40 6 - 31601 40 6 - 31921 40 6 - 32241 40 6 - 32561 40 6 - 32881 40 6 - 33201 40 6 - 33521 40 6 - 33841 40 6 - 34161 40 6 - 34481 40 6 - 34801 40 6 - 35121 40 6 - 35441 40 6 - 35761 40 6 - 36081 40 6 - 36401 40 6 - 36721 40 6 - 37041 40 6 - 37361 40 6 - 37681 40 6 - 38001 40 6 - 38321 40 6 - 38641 40 6 - 38961 40 6 - 39281 40 6 - 39601 40 6 - 39921 40 6 - 40241 40 6 - 40561 40 6 - 40881 40 6 - 41201 40 6 - 41521 40 6 - 41841 40 6 - 42161 40 6 - 42481 40 6 - 42801 40 6 - 43121 40 6 - 43441 40 6 - 43761 40 6 - 44081 40 6 - 44401 40 6 - 44721 40 6 - 45041 40 6 - 45361 40 6 - 45681 40 6 - 46001 40 6 - 46321 40 6 - 46641 40 6 - 46961 40 6 - 47281 40 6 - 47601 40 6 - 47921 40 6 - 48241 40 6 - 48561 40 6 - 48881 40 6 - 49201 40 6 - 49521 40 6 - 49841 40 6 - 50161 40 6 - 50481 40 6 - 50801 40 6 - 51121 40 6 - 51441 40 6 - 51761 40 6 - 52081 40 6 - 52401 40 6 - 52721 40 6 - 53041 40 6 - 53361 40 6 - 53681 40 6 - 54001 40 6 - 54321 40 6 - 54641 40 6 - 54961 40 6 - 55281 40 6 - 55601 40 6 - 55921 40 6 - 56241 40 6 - 56561 40 6 - 56881 40 6 - 57201 40 6 - 57521 40 6 - 57841 40 6 - 58161 40 6 - 58481 40 6 - 58801 40 6 - 59121 40 6 - 59441 40 6 - 59761 40 6 - 60081 40 6 - 60401 40 6 - 60721 40 6 - 61041 40 6 - 61361 40 6 - 61681 40 6 - 62001 40 6 - 62321 40 6 - 62641 40 6 - 62961 40 6 - 63281 40 6 - 63601 40 6 - 63921 40 6 - 64241 40 6 - 64561 40 6 - 64881 40 6 - 65201 40 6 - 65521 40 6 - 65841 40 6 - 66161 40 6 - 66481 40 6 - 66801 40 6 - 67121 40 6 - 67441 40 6 - 67761 40 6 - 68081 40 6 - 68401 40 6 - 68721 40 6 - 69041 40 6 - 69361 40 6 - 69681 40 6 - 70001 40 6 - 70321 40 6 - 70641 40 6 - 70961 40 6 - 71281 40 6 - 71601 40 6 - 71921 40 6 - 72241 40 6 - 72561 40 6 - 72881 40 6 - 73201 40 6 - 73521 40 6 - 73841 40 6 - 74161 40 6 - 74481 40 6 - 74801 40 6 - 75121 40 6 - 75441 40 6 - 75761 40 6 - 76081 40 6 - 76401 40 6 - 76721 40 6 - 77041 40 6 - 77361 40 6 - 77681 40 6 - 78001 40 6 - 78321 40 6 - 78641 40 6 - 78961 40 6 - 79281 40 6 - 79601 40 6 - 79921 40 6 - 80241 40 6 - 80561 40 6 - 80881 40 6 - 81201 40 6 - 81521 40 6 - 81841 40 6 - 82161 40 6 - 82481 40 6 - 82801 40 6 - 83121 40 6 - 83441 40 6 - 83761 40 6 - 84081 40 6 - 84401 40 6 - 84721 40 6 - 85041 40 6 - 85361 40 6 - 85681 40 6 - 86001 40 6 - 86321 40 6 - 86641 40 6 - 86961 40 6 - 87281 40 6 - 87601 40 6 - 87921 40 6 - 88241 40 6 - 88561 40 6 - 88881 40 6 - 89201 40 6 - 89521 40 6 - 89841 40 6 - 90161 40 6 - 90481 40 6 - 90801 40 6 - 91121 40 6 - 91441 40 6 - 91761 40 6 - 92081 40 6 - 92401 40 6 - 92721 40 6 - 93041 40 6 - 93361 40 6 - 93681 40 6 - 94001 40 6 - 94321 40 6 - 94641 40 6 - 94961 40 6 - 95281 40 6 - 95601 40 6 - 95921 40 6 - 96241 40 6 - 96561 40 6 - 96881 40 6 - 97201 40 6 - 97521 40 6 - 97841 40 6 - 98161 40 6 - 98481 40 6 - 98801 40 6 - 99121 40 6 - 99441 40 6 - 99761 40 6 - 100081 40 6 - 100401 40 6 - 100721 40 6 - 101041 40 6 - 101361 40 6 - 101681 40 6 - 102001 40 6 - 102321 40 6 - 102641 40 6 - 102961 40 6 - 103281 40 6 - 103601 40 6 - 103921 40 6 - 104241 40 6 - 104561 40 6 - 104881 40 6 - 105201 40 6 - 105521 40 6 - 105841 40 6 - 106161 40 6 - 106481 40 6 - 106801 40 6 - 107121 40 6 - 107441 40 6 - 107761 40 6 - 108081 40 6 - 108401 40 6 - 108721 40 6 - 109041 40 6 - 109361 40 6 - 109681 40 6 - 110001 40 6 - 110321 40 6 - 110641 40 6 - 110961 40 6 - 111281 40 6 - 111601 40 6 - 111921 40 6 - 112241 40 6 - 112561 40 6 - 112881 40 6 - 113201 40 6 - 113521 40 6 - 113841 40 6 - 114161 40 6 - 114481 40 6 - 114801 40 6 - 115121 40 6 - 115441 40 6 - 115761 40 6 - 116081 40 6 - 116401 40 6 - 116721 40 6 - 117041 40 6 - 117361 40 6 - 117681 40 6 - 118001 40 6 - 118321 40 6 - 118641 40 6 - 118961 40 6 - 119281 40 6 - 119601 40 6 - 119921 40 6 - 120241 40 6 - 120561 40 6 - 120881 40 6 - 121201 40 6 - 121521 40 6 - 121841 40 6 - 122161 40 6 - 122481 40 6 - 122801 40 6 - 281 40 7 - 601 40 7 - 921 40 7 - 1241 40 7 - 1561 40 7 - 1881 40 7 - 2201 40 7 - 2521 40 7 - 2841 40 7 - 3161 40 7 - 3481 40 7 - 3801 40 7 - 4121 40 7 - 4441 40 7 - 4761 40 7 - 5081 40 7 - 5401 40 7 - 5721 40 7 - 6041 40 7 - 6361 40 7 - 6681 40 7 - 7001 40 7 - 7321 40 7 - 7641 40 7 - 7961 40 7 - 8281 40 7 - 8601 40 7 - 8921 40 7 - 9241 40 7 - 9561 40 7 - 9881 40 7 - 10201 40 7 - 10521 40 7 - 10841 40 7 - 11161 40 7 - 11481 40 7 - 11801 40 7 - 12121 40 7 - 12441 40 7 - 12761 40 7 - 13081 40 7 - 13401 40 7 - 13721 40 7 - 14041 40 7 - 14361 40 7 - 14681 40 7 - 15001 40 7 - 15321 40 7 - 15641 40 7 - 15961 40 7 - 16281 40 7 - 16601 40 7 - 16921 40 7 - 17241 40 7 - 17561 40 7 - 17881 40 7 - 18201 40 7 - 18521 40 7 - 18841 40 7 - 19161 40 7 - 19481 40 7 - 19801 40 7 - 20121 40 7 - 20441 40 7 - 20761 40 7 - 21081 40 7 - 21401 40 7 - 21721 40 7 - 22041 40 7 - 22361 40 7 - 22681 40 7 - 23001 40 7 - 23321 40 7 - 23641 40 7 - 23961 40 7 - 24281 40 7 - 24601 40 7 - 24921 40 7 - 25241 40 7 - 25561 40 7 - 25881 40 7 - 26201 40 7 - 26521 40 7 - 26841 40 7 - 27161 40 7 - 27481 40 7 - 27801 40 7 - 28121 40 7 - 28441 40 7 - 28761 40 7 - 29081 40 7 - 29401 40 7 - 29721 40 7 - 30041 40 7 - 30361 40 7 - 30681 40 7 - 31001 40 7 - 31321 40 7 - 31641 40 7 - 31961 40 7 - 32281 40 7 - 32601 40 7 - 32921 40 7 - 33241 40 7 - 33561 40 7 - 33881 40 7 - 34201 40 7 - 34521 40 7 - 34841 40 7 - 35161 40 7 - 35481 40 7 - 35801 40 7 - 36121 40 7 - 36441 40 7 - 36761 40 7 - 37081 40 7 - 37401 40 7 - 37721 40 7 - 38041 40 7 - 38361 40 7 - 38681 40 7 - 39001 40 7 - 39321 40 7 - 39641 40 7 - 39961 40 7 - 40281 40 7 - 40601 40 7 - 40921 40 7 - 41241 40 7 - 41561 40 7 - 41881 40 7 - 42201 40 7 - 42521 40 7 - 42841 40 7 - 43161 40 7 - 43481 40 7 - 43801 40 7 - 44121 40 7 - 44441 40 7 - 44761 40 7 - 45081 40 7 - 45401 40 7 - 45721 40 7 - 46041 40 7 - 46361 40 7 - 46681 40 7 - 47001 40 7 - 47321 40 7 - 47641 40 7 - 47961 40 7 - 48281 40 7 - 48601 40 7 - 48921 40 7 - 49241 40 7 - 49561 40 7 - 49881 40 7 - 50201 40 7 - 50521 40 7 - 50841 40 7 - 51161 40 7 - 51481 40 7 - 51801 40 7 - 52121 40 7 - 52441 40 7 - 52761 40 7 - 53081 40 7 - 53401 40 7 - 53721 40 7 - 54041 40 7 - 54361 40 7 - 54681 40 7 - 55001 40 7 - 55321 40 7 - 55641 40 7 - 55961 40 7 - 56281 40 7 - 56601 40 7 - 56921 40 7 - 57241 40 7 - 57561 40 7 - 57881 40 7 - 58201 40 7 - 58521 40 7 - 58841 40 7 - 59161 40 7 - 59481 40 7 - 59801 40 7 - 60121 40 7 - 60441 40 7 - 60761 40 7 - 61081 40 7 - 61401 40 7 - 61721 40 7 - 62041 40 7 - 62361 40 7 - 62681 40 7 - 63001 40 7 - 63321 40 7 - 63641 40 7 - 63961 40 7 - 64281 40 7 - 64601 40 7 - 64921 40 7 - 65241 40 7 - 65561 40 7 - 65881 40 7 - 66201 40 7 - 66521 40 7 - 66841 40 7 - 67161 40 7 - 67481 40 7 - 67801 40 7 - 68121 40 7 - 68441 40 7 - 68761 40 7 - 69081 40 7 - 69401 40 7 - 69721 40 7 - 70041 40 7 - 70361 40 7 - 70681 40 7 - 71001 40 7 - 71321 40 7 - 71641 40 7 - 71961 40 7 - 72281 40 7 - 72601 40 7 - 72921 40 7 - 73241 40 7 - 73561 40 7 - 73881 40 7 - 74201 40 7 - 74521 40 7 - 74841 40 7 - 75161 40 7 - 75481 40 7 - 75801 40 7 - 76121 40 7 - 76441 40 7 - 76761 40 7 - 77081 40 7 - 77401 40 7 - 77721 40 7 - 78041 40 7 - 78361 40 7 - 78681 40 7 - 79001 40 7 - 79321 40 7 - 79641 40 7 - 79961 40 7 - 80281 40 7 - 80601 40 7 - 80921 40 7 - 81241 40 7 - 81561 40 7 - 81881 40 7 - 82201 40 7 - 82521 40 7 - 82841 40 7 - 83161 40 7 - 83481 40 7 - 83801 40 7 - 84121 40 7 - 84441 40 7 - 84761 40 7 - 85081 40 7 - 85401 40 7 - 85721 40 7 - 86041 40 7 - 86361 40 7 - 86681 40 7 - 87001 40 7 - 87321 40 7 - 87641 40 7 - 87961 40 7 - 88281 40 7 - 88601 40 7 - 88921 40 7 - 89241 40 7 - 89561 40 7 - 89881 40 7 - 90201 40 7 - 90521 40 7 - 90841 40 7 - 91161 40 7 - 91481 40 7 - 91801 40 7 - 92121 40 7 - 92441 40 7 - 92761 40 7 - 93081 40 7 - 93401 40 7 - 93721 40 7 - 94041 40 7 - 94361 40 7 - 94681 40 7 - 95001 40 7 - 95321 40 7 - 95641 40 7 - 95961 40 7 - 96281 40 7 - 96601 40 7 - 96921 40 7 - 97241 40 7 - 97561 40 7 - 97881 40 7 - 98201 40 7 - 98521 40 7 - 98841 40 7 - 99161 40 7 - 99481 40 7 - 99801 40 7 - 100121 40 7 - 100441 40 7 - 100761 40 7 - 101081 40 7 - 101401 40 7 - 101721 40 7 - 102041 40 7 - 102361 40 7 - 102681 40 7 - 103001 40 7 - 103321 40 7 - 103641 40 7 - 103961 40 7 - 104281 40 7 - 104601 40 7 - 104921 40 7 - 105241 40 7 - 105561 40 7 - 105881 40 7 - 106201 40 7 - 106521 40 7 - 106841 40 7 - 107161 40 7 - 107481 40 7 - 107801 40 7 - 108121 40 7 - 108441 40 7 - 108761 40 7 - 109081 40 7 - 109401 40 7 - 109721 40 7 - 110041 40 7 - 110361 40 7 - 110681 40 7 - 111001 40 7 - 111321 40 7 - 111641 40 7 - 111961 40 7 - 112281 40 7 - 112601 40 7 - 112921 40 7 - 113241 40 7 - 113561 40 7 - 113881 40 7 - 114201 40 7 - 114521 40 7 - 114841 40 7 - 115161 40 7 - 115481 40 7 - 115801 40 7 - 116121 40 7 - 116441 40 7 - 116761 40 7 - 117081 40 7 - 117401 40 7 - 117721 40 7 - 118041 40 7 - 118361 40 7 - 118681 40 7 - 119001 40 7 - 119321 40 7 - 119641 40 7 - 119961 40 7 - 120281 40 7 - 120601 40 7 - 120921 40 7 - 121241 40 7 - 121561 40 7 - 121881 40 7 - 122201 40 7 - 122521 40 7 - 122841 40 7 diff --git a/src/externals/mct/benchmarks/gx1.8pR b/src/externals/mct/benchmarks/gx1.8pR deleted file mode 100644 index c90fd783a54..00000000000 --- a/src/externals/mct/benchmarks/gx1.8pR +++ /dev/null @@ -1,12 +0,0 @@ - 8 - 2 - 8 - 122880 - 1 15360 0 - 15361 15360 1 - 30721 15360 2 - 46081 15360 3 - 61441 15360 4 - 76801 15360 5 - 92161 15360 6 - 107521 15360 7 diff --git a/src/externals/mct/benchmarks/importBench.F90 b/src/externals/mct/benchmarks/importBench.F90 deleted file mode 100644 index ac7603e9d47..00000000000 --- a/src/externals/mct/benchmarks/importBench.F90 +++ /dev/null @@ -1,215 +0,0 @@ -! Av import/export benchmark -! - program importBench - - use m_MCTWorld,only : MCTWorld_init => init - use m_MCTWorld,only : MCTWorld_clean => clean - use m_MCTWorld,only : ThisMCTWorld - use m_AttrVect,only : AttrVect - use m_AttrVect,only : AttrVect_init => init - use m_AttrVect,only : AttrVect_nRattr => nRattr - use m_AttrVect,only : AttrVect_nIattr => nIattr - use m_AttrVect,only : AttrVect_size => lsize - use m_AttrVect,only : AttrVect_indexRA => indexRA - use m_AttrVect,only : AttrVect_importRA => importRAttr - use m_AttrVect,only : AttrVect_exportRA => exportRAttr - - use m_mpif90 - use m_ioutil, only : luavail - - implicit none - -! declarations - include 'mpif.h' - - character(len=*), parameter :: myname='MCT_importBench' - - integer, parameter :: nTrials=1000 ! Number of timing measurements - ! per test. Keep high WRT - ! value of MaxNumAtts to ensure - ! timings are representative - - integer, parameter :: lmax = 17 ! Maximum AV length = 2**(lmax-1) - ! Don't increase--segv on login.mcs - ! for larger values! - - integer, parameter :: MaxNumAtts = 26 ! maximum number of - ! attributes used in - ! timing tests. Leave - ! fixed for now! - - character(len=2*MaxNumAtts-1) :: dummyAList ! character array for - ! synthetic attribute - ! lists - - integer comm1, mysize,myproc,ier,i - - real*8, dimension(:), pointer :: inputData(:) - real*8, dimension(:), pointer :: outputData(:) - - integer :: currLength, k, l, n - integer :: colInd, lettInd, attInd, charInd - - real*8 :: startTime, finishTime - real*8, dimension(:), pointer :: impTimings - real*8, dimension(:), pointer :: expTimings - real*8 :: impMeanTime, expMeanTime - real*8 :: impStdDevTime, expStdDevTime - - integer :: impAvD, impMinD, impMaxD, impSDD - integer :: expAvD, expMinD, expMaxD, expSDD - - type(AttrVect) :: myAV - -! -! Initialize MPI and copy MPI_COMM_WORLD... -! - call MPI_init(ier) - - call mpi_comm_size(MPI_COMM_WORLD, mysize,ier) - call mpi_comm_rank(MPI_COMM_WORLD, myproc,ier) - write(0,*) myproc, "MPI size proc", mysize - - call mpi_comm_dup(MPI_COMM_WORLD,comm1,ier) - - myproc = 0 - -! create storage impTimings(:) and expTimings(:) -! - allocate(impTimings(nTrials), expTimings(nTrials), stat=ier) - write(0,'(a,2(a,i8))') myname,':: nTrials = ',nTrials,' ier=',ier - -! set up files for timing statistics and open them -! - impAvD = luavail() - open(impAvD, file='benchAV_importAvgTime.d',status='new') - impMinD = luavail() - open(impMinD, file='benchAV_importMinTime.d',status='new') - impMaxD = luavail() - open(impMaxD, file='benchAV_importMaxTime.d',status='new') - impSDD = luavail() - open(impSDD, file='benchAV_importStdDevTime.d',status='new') - expAvD = luavail() - open(expAvD, file='benchAV_exportAvgTime.d',status='new') - expMinD = luavail() - open(expMinD, file='benchAV_exportMinTime.d',status='new') - expMaxD = luavail() - open(expMaxD, file='benchAV_exportMaxTime.d',status='new') - expSDD = luavail() - open(expSDD, file='benchAV_exportStdDevTime.d',status='new') - -! Initialize MCTWorld - call MCTWorld_init(1,MPI_COMM_WORLD,comm1,1) - - dummyAList = '' - do k=1,MaxNumAtts - - ! construct dummy attribute list AttrVect_init() invoked with - ! trim(dummyAList) as a string literal argument for rList (see below) - if(k == 1) then ! bootstrap the process with just a single attribute - dummyAList(k:k) = achar(65) ! the letter 'A' - else - colInd = 2 * (k-1) - lettInd = 2*k - 1 - dummyAList(colInd:colInd) = achar(58) ! a colon ':' - dummyAList(lettInd:lettInd) = achar(64+k) - endif - - do l=1,lmax -! -! Set current AV length currLength, create inputData(:) and outputData(:), -! and initialize entries of inputData(:)... -! - currLength = 2 ** (l-1) - ! write(0,'(a,2(a,i8))') myname,":: l = ",l," currLength = ",currLength - - allocate(inputData(currLength), outputData(currLength),stat=ier) - do i=1,currLength - inputData(i)=real(i) - end do - - ! create an Av with k attributes - call AttrVect_init(myAV, rList=trim(dummyAList), lsize=currLength) - - ! Import/Export timing tests: - impMeanTime = 0. - expMeanTime = 0. - do n=1,nTrials - ! circulate through the k attributes so that we get more-or-less - ! equal representation of the attributes among the import/export - ! calls. Setting nTrials to a large number ensures the disparities - ! among how frequently the attributes are called will be minimal. - attInd = mod(n,k) - charInd = 65 + attInd ! offset from "A" - startTime = MPI_WTIME() - call AttrVect_importRA(myAV, achar(charInd), inputData, currLength) - finishTime = MPI_WTIME() - impTimings(n) = finishTime - startTime - impMeanTime = impMeanTime + impTimings(n) - - startTime = MPI_WTIME() - call AttrVect_exportRA(myAV, achar(charInd), outputData, currLength) - finishTime = MPI_WTIME() - expTimings(n) = finishTime - startTime - expMeanTime = expMeanTime + expTimings(n) - - end do - impMeanTime = impMeanTime / float(nTrials) - expMeanTime = expMeanTime / float(nTrials) - ! Compute Standard Deviation for timings - impStdDevTime = 0. - expStdDevTime = 0. - do n=1,nTrials - impStdDevTime = impStdDevTime + (impTimings(n) - impMeanTime)**2 - expStdDevTime = expStdDevTime + (expTimings(n) - expMeanTime)**2 - end do - impStdDevTime = sqrt(impStdDevTime / float(nTrials-1)) - expStdDevTime = sqrt(expStdDevTime / float(nTrials-1)) - - write(*,'(a,2(a,i8),4(a,g12.6))') myname, & - ":: Import timings for k=",k,"attributes. AV length=", & - currLength," elements: Mean = ",impMeanTime," Min= ", & - minval (impTimings)," Max = ",maxval(impTimings), & - " Std. Dev. = ",impStdDevTime - - write(*,'(a,2(a,i8),4(a,g12.6))') myname, & - ":: Export timings for k=",k,"attributes. AV length=", & - currLength," elements: Mean = ",expMeanTime," Min = ", & - minval(expTimings)," Max = ",maxval(expTimings), & - " Std. Dev. = ",impStdDevTime - - ! Write statistics to individual files for subsequent - ! visualization: - write(impAvD,'(2(i8,2x),g12.6)') l-1, k, impMeanTime - write(impMinD,'(2(i8,2x),g12.6)') l-1, k, minval(impTimings) - write(impMaxD,'(2(i8,2x),g12.6)') l-1, k, maxval(impTimings) - write(impSDD,'(2(i8,2x),g12.6)') l-1, k, impStdDevTime - write(expAvD,'(2(i8,2x),g12.6)') l-1, k, expMeanTime - write(expMinD,'(2(i8,2x),g12.6)') l-1, k, minval(expTimings) - write(expMaxD,'(2(i8,2x),g12.6)') l-1, k, maxval(expTimings) - write(expSDD,'(2(i8,2x),g12.6)') l-1, k, expStdDevTime - - ! Clean up for this value of l: -! write(*,'(2a,i8)') myname,':: cleaning up for l = ',l - deallocate(inputData, outputData,stat=ier) - - end do ! l=1,lmax - end do ! k=1,MaxNumAtts - -! Close output files: - close(impAvD) - close(impMinD) - close(impMaxD) - close(impSDD) - close(expAvD) - close(expMinD) - close(expMaxD) - close(expSDD) - - call MCTWorld_clean -! write(*,'(2a,i8)') myname,':: clean up completed for l = ',l - -! call MPI_FINALIZE(MPI_COMM_WORLD, ier) - - end program importBench - diff --git a/src/externals/mct/config.h.in b/src/externals/mct/config.h.in deleted file mode 100644 index 5ea9c79519e..00000000000 --- a/src/externals/mct/config.h.in +++ /dev/null @@ -1,81 +0,0 @@ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Define if building universal (internal helper macro) */ -#undef AC_APPLE_UNIVERSAL_BUILD - -/* Define to dummy `main' function (if any) required to link to the Fortran - libraries. */ -#undef FC_DUMMY_MAIN - -/* Define if F77 and FC dummy `main' functions are identical. */ -#undef FC_DUMMY_MAIN_EQ_F77 - -/* Define to a macro mangling the given C identifier (in lower and upper - case), which must not contain underscores, for linking with Fortran. */ -#undef FC_FUNC - -/* As FC_FUNC, but for C identifiers containing underscores. */ -#undef FC_FUNC_ - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define if you have the MPI library. */ -#undef HAVE_MPI - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the home page for this package. */ -#undef PACKAGE_URL - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most - significant byte first (like Motorola and SPARC, unlike Intel). */ -#if defined AC_APPLE_UNIVERSAL_BUILD -# if defined __BIG_ENDIAN__ -# define WORDS_BIGENDIAN 1 -# endif -#else -# ifndef WORDS_BIGENDIAN -# undef WORDS_BIGENDIAN -# endif -#endif diff --git a/src/externals/mct/configure b/src/externals/mct/configure deleted file mode 100755 index 83614aa08db..00000000000 --- a/src/externals/mct/configure +++ /dev/null @@ -1,6849 +0,0 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for MCT 2.8. -# -# -# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. -# -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# Use a proper internal environment variable to ensure we don't fall - # into an infinite loop, continuously re-executing ourselves. - if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then - _as_can_reexec=no; export _as_can_reexec; - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -as_fn_exit 255 - fi - # We don't want this to propagate to other subprocesses. - { _as_can_reexec=; unset _as_can_reexec;} -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1 -test -x / || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - export CONFIG_SHELL - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -exit 255 -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # If we had to re-execute with $CONFIG_SHELL, we're ensured to have - # already done that, so ensure we don't try to do so again and fall - # in an infinite loop. This has already happened in practice. - _as_can_reexec=no; export _as_can_reexec - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -test -n "$DJDIR" || exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME='MCT' -PACKAGE_TARNAME='mct' -PACKAGE_VERSION='2.8' -PACKAGE_STRING='MCT 2.8' -PACKAGE_BUGREPORT='' -PACKAGE_URL='' - -# Factoring default headers for most tests. -ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef STDC_HEADERS -# include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif -# include -#endif -#ifdef HAVE_STRINGS_H -# include -#endif -#ifdef HAVE_INTTYPES_H -# include -#endif -#ifdef HAVE_STDINT_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif" - -enable_option_checking=no -ac_subst_vars='LTLIBOBJS -LIBOBJS -subdirs -CPPDEFS -CRULE -FCLIBS -FC_DEFINE -FCFLAGS_F -MPISERPATH -MPIFC -FCFLAGS_F90 -ac_ct_FC -EGREP -GREP -CPP -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CC -PYTHONOPTS -PYTHON -FORT_SIZE -COMPILER_ROOT -BABELROOT -RANLIB -AR -INCLUDEPATH -INCLUDEFLAG -ENDIAN -BIT64 -REAL8 -OPT -DEBUG -CFLAGS -PROGFCFLAGS -FCFLAGS -FC -FPPFLAGS -FPP -MPIHEADER -MPILIBS -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -enable_mpiserial -enable_debugging -enable_selectedrealkind -enable_sequence -enable_babel -' - ac_precious_vars='build_alias -host_alias -target_alias -MPILIBS -MPIHEADER -FPP -FPPFLAGS -FC -FCFLAGS -PROGFCFLAGS -CFLAGS -DEBUG -OPT -REAL8 -BIT64 -ENDIAN -INCLUDEFLAG -INCLUDEPATH -AR -RANLIB -BABELROOT -COMPILER_ROOT -FORT_SIZE -CC -LDFLAGS -LIBS -CPPFLAGS -CPP -MPIFC' -ac_subdirs_all='mpi-serial' - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *=) ac_optarg= ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error $? "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information" - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error $? "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error $? "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error $? "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures MCT 2.8 to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking ...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/mct] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF -_ACEOF -fi - -if test -n "$ac_init_help"; then - case $ac_init_help in - short | recursive ) echo "Configuration of MCT 2.8:";; - esac - cat <<\_ACEOF - -Optional Features: - --disable-option-checking ignore unrecognized --enable/--with options - --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) - --enable-FEATURE[=ARG] include FEATURE [ARG=yes] - --enable-mpiserial Use the included MPI replacement library for single - processor - --enable-debugging Use the debugging flag and disable the optimization - flag - --enable-selectedrealkind - define single precision and double precision numbers - using the selected_real_kind function. Default uses - the kind inquiry function. - --enable-sequence Modify MCT types to make them contiguous in memory. - --enable-babel Supply this option if you plan on building the Babel - bindings to MCT - -Some influential environment variables: - MPILIBS MPI library command line invocation - MPIHEADER MPI header include path with INCLUDEFLAG - FPP C-preprocessor for Fortran source code - FPPFLAGS C-preprocessing flags for Fortran source code - FC The Fortran compiler - FCFLAGS User-defined Fortran compiler flags - PROGFCFLAGS User-defined Fortran compiler flags for example programs - CFLAGS Customized C source compilation flags - DEBUG Fortran compiler flag for generating symbolic debugging - information - OPT Fortran compiler flag for optimization level - REAL8 Fortran compiler flag for setting the default REAL size to - REAL(KIND=8) - BIT64 Fortran compiler flag for generating 64-bit objects - ENDIAN Fortran compiler flag for converting big-endian to little-endian - INCLUDEFLAG Fortran compiler flag for specifying module search path - INCLUDEPATH Additional library and module paths with INCLUDEFLAG - AR Archive command - RANLIB Archive index update command - BABELROOT Root directory of your Babel installation. i.e.: - $BABELROOT/bin/babel $BABELROOT/lib/libsidl.so - COMPILER_ROOT - Root directory of your FORTRAN compiler - FORT_SIZE Number of bits in Fortran real and double kind - CC C compiler command - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - CPP C preprocessor - MPIFC MPI Fortran compiler command - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to the package provider. -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -MCT configure 2.8 -generated by GNU Autoconf 2.69 - -Copyright (C) 2012 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } > conftest.i && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_compile - -# ac_fn_fc_try_compile LINENO -# --------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_fc_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_fc_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_fc_try_compile - -# ac_fn_fc_try_link LINENO -# ------------------------ -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_fc_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_fc_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - test -x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_fc_try_link - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - test -x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by MCT $as_me 2.8, which was -generated by GNU Autoconf 2.69. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - $as_echo "## ---------------- ## -## Cache variables. ## -## ---------------- ##" - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - $as_echo "## ----------------- ## -## Output variables. ## -## ----------------- ##" - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## -## File substitutions. ## -## ------------------- ##" - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - $as_echo "## ----------- ## -## confdefs.h. ## -## ----------- ##" - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5; } - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -# PROCESS THE FOLLOWING MAKEFILES - -ac_config_files="$ac_config_files Makefile.conf" - -ac_config_headers="$ac_config_headers config.h" - - -# DECLARE PACKAGE OPTIONS - -# Check whether --enable-mpiserial was given. -if test "${enable_mpiserial+set}" = set; then : - enableval=$enable_mpiserial; DONOTCHECKMPI="DONOTCHECKMPI" - -fi - - -# Check whether --enable-debugging was given. -if test "${enable_debugging+set}" = set; then : - enableval=$enable_debugging; DEBUGGING="ENABLED" - -fi - - -# Check whether --enable-selectedrealkind was given. -if test "${enable_selectedrealkind+set}" = set; then : - enableval=$enable_selectedrealkind; SRKDEF="SELECTEDREALKIND" - -fi - - -# Check whether --enable-sequence was given. -if test "${enable_sequence+set}" = set; then : - enableval=$enable_sequence; SRKDEF="SEQUENCE" -fi - - -# Check whether --enable-babel was given. -if test "${enable_babel+set}" = set; then : - enableval=$enable_babel; SRKDEF="SEQUENCE" -fi - - - - -# DECLARE THE FOLLOWING PRECIOUS VARIABLES - - - - - - - - - - - - - - - - - - - - - - -# INCLUDE BABELROOT and COMPILER_ROOT in Makefile.conf(autoconf output) - - - - - -# SET TEMPORARY VARIABLES - -# OS AND PLATFORM NAME -test "$osname"=NONE && osname=`uname -s` -test "$machinename"=NONE && machinename=`uname -m` -fullhostname=`hostname -f` - - -# HARDCODE SPECIFIC MACHINES FOR EXTRAORDINARY CIRCUMSTANCES - -# CHECK IF WE ARE ON THE EARTH SIMULATOR -ES="NO" -if echo $osname | grep -i esos >/dev/null 2>&1; then - ES="YES" -fi -if echo $osname | grep -i hp-ux >/dev/null 2>&1; then - if test "$ac_hostname" = "moon"; then - ES="YES" - # TELLS CONFIGURE NOT TO RUN ANY TESTS THAT REQUIRE EXECUTION - cross_compiling="yes" - fi -fi -if test "$ES" = "YES"; then - echo "Using preset configuration values for the Earth Simulator" - if test -z "$CC"; then - CC="escc" - fi - if test -z "$FC"; then - FC="esf90" - fi - if test -z "$MPIFC"; then - MPIFC="esmpif90" - fi - if test -z "$AR"; then - AR="esar cqs" - fi - if test -z "FPP"; then - FPPFLAGS=" " - fi - if test -z "$FCFLAGS"; then - FCFLAGS="-EP -Wf'-pvctl fullmsg -L fmtlist transform map'" - fi - if test -z "$OPT"; then - OPT="-C vopt" - fi - if test -z "$CPPDEFS"; then - CPPDEFS="-DESVEC" - fi -fi - -# Check if we are on the ANL BG/P - -if echo $fullhostname | egrep -q '.\.(challenger|intrepid)\.alcf\.anl\.gov' - then if test -z "$FC"; then - FC=bgxlf90_r - fi - if test -z "$MPIFC"; then - MPIFC=mpixlf90_r - fi - if test -z "$CC"; then - CC=mpixlc_r - fi -fi - - - -# START TESTS - -# CHECK FOR THE C COMPILER -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - for ac_prog in cc - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cc -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "C compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -struct stat; -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# CHECK FOR BYTE ORDERING - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if ${ac_cv_prog_CPP+:} false; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if ${ac_cv_path_GREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_GREP" || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if ${ac_cv_path_EGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_EGREP" || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 -$as_echo_n "checking whether byte ordering is bigendian... " >&6; } -if ${ac_cv_c_bigendian+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_c_bigendian=unknown - # See if we're dealing with a universal compiler. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifndef __APPLE_CC__ - not a universal capable compiler - #endif - typedef int dummy; - -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - - # Check for potential -arch flags. It is not universal unless - # there are at least two -arch flags with different values. - ac_arch= - ac_prev= - for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do - if test -n "$ac_prev"; then - case $ac_word in - i?86 | x86_64 | ppc | ppc64) - if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then - ac_arch=$ac_word - else - ac_cv_c_bigendian=universal - break - fi - ;; - esac - ac_prev= - elif test "x$ac_word" = "x-arch"; then - ac_prev=arch - fi - done -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - if test $ac_cv_c_bigendian = unknown; then - # See if sys/param.h defines the BYTE_ORDER macro. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - #include - -int -main () -{ -#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ - && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ - && LITTLE_ENDIAN) - bogus endian macros - #endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - # It does; now see whether it defined to BIG_ENDIAN or not. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - #include - -int -main () -{ -#if BYTE_ORDER != BIG_ENDIAN - not big endian - #endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_bigendian=yes -else - ac_cv_c_bigendian=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - fi - if test $ac_cv_c_bigendian = unknown; then - # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -int -main () -{ -#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) - bogus endian macros - #endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - # It does; now see whether it defined to _BIG_ENDIAN or not. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -int -main () -{ -#ifndef _BIG_ENDIAN - not big endian - #endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_bigendian=yes -else - ac_cv_c_bigendian=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - fi - if test $ac_cv_c_bigendian = unknown; then - # Compile a test program. - if test "$cross_compiling" = yes; then : - # Try to guess by grepping values from an object file. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -short int ascii_mm[] = - { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; - short int ascii_ii[] = - { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; - int use_ascii (int i) { - return ascii_mm[i] + ascii_ii[i]; - } - short int ebcdic_ii[] = - { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; - short int ebcdic_mm[] = - { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; - int use_ebcdic (int i) { - return ebcdic_mm[i] + ebcdic_ii[i]; - } - extern int foo; - -int -main () -{ -return use_ascii (foo) == use_ebcdic (foo); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then - ac_cv_c_bigendian=yes - fi - if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then - if test "$ac_cv_c_bigendian" = unknown; then - ac_cv_c_bigendian=no - else - # finding both strings is unlikely to happen, but who knows? - ac_cv_c_bigendian=unknown - fi - fi -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default -int -main () -{ - - /* Are we little or big endian? From Harbison&Steele. */ - union - { - long int l; - char c[sizeof (long int)]; - } u; - u.l = 1; - return u.c[sizeof (long int) - 1] == 1; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - ac_cv_c_bigendian=no -else - ac_cv_c_bigendian=yes -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 -$as_echo "$ac_cv_c_bigendian" >&6; } - case $ac_cv_c_bigendian in #( - yes) - $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h -;; #( - no) - ;; #( - universal) - -$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h - - ;; #( - *) - as_fn_error $? "unknown endianness - presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; - esac - - -# CHECK FOR THE FORTRAN COMPILER -# RLJ- specify the order, include PathScale and do not search for F77 -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -if test -n "$ac_tool_prefix"; then - for ac_prog in nagfor xlf95 pgf95 ifort gfortran pathf95 ftn lf95 f95 fort ifc efc g95 xlf90 pgf90 pathf90 epcf90 pghpf - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_FC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$FC"; then - ac_cv_prog_FC="$FC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_FC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -FC=$ac_cv_prog_FC -if test -n "$FC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FC" >&5 -$as_echo "$FC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$FC" && break - done -fi -if test -z "$FC"; then - ac_ct_FC=$FC - for ac_prog in nagfor xlf95 pgf95 ifort gfortran pathf95 ftn lf95 f95 fort ifc efc g95 xlf90 pgf90 pathf90 epcf90 pghpf -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_FC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_FC"; then - ac_cv_prog_ac_ct_FC="$ac_ct_FC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_FC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_FC=$ac_cv_prog_ac_ct_FC -if test -n "$ac_ct_FC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FC" >&5 -$as_echo "$ac_ct_FC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_FC" && break -done - - if test "x$ac_ct_FC" = x; then - FC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - FC=$ac_ct_FC - fi -fi - - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done -rm -f a.out - -# If we don't use `.F' as extension, the preprocessor is not run on the -# input file. (Note that this only needs to work for GNU compilers.) -ac_save_ext=$ac_ext -ac_ext=F -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU Fortran compiler" >&5 -$as_echo_n "checking whether we are using the GNU Fortran compiler... " >&6; } -if ${ac_cv_fc_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat > conftest.$ac_ext <<_ACEOF - program main -#ifndef __GNUC__ - choke me -#endif - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_fc_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_compiler_gnu" >&5 -$as_echo "$ac_cv_fc_compiler_gnu" >&6; } -ac_ext=$ac_save_ext -ac_test_FCFLAGS=${FCFLAGS+set} -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS= -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $FC accepts -g" >&5 -$as_echo_n "checking whether $FC accepts -g... " >&6; } -if ${ac_cv_prog_fc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - FCFLAGS=-g -cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_cv_prog_fc_g=yes -else - ac_cv_prog_fc_g=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_g" >&5 -$as_echo "$ac_cv_prog_fc_g" >&6; } -if test "$ac_test_FCFLAGS" = set; then - FCFLAGS=$ac_save_FCFLAGS -elif test $ac_cv_prog_fc_g = yes; then - if test "x$ac_cv_fc_compiler_gnu" = xyes; then - FCFLAGS="-g -O2" - else - FCFLAGS="-g" - fi -else - if test "x$ac_cv_fc_compiler_gnu" = xyes; then - FCFLAGS="-O2" - else - FCFLAGS= - fi -fi - -if test $ac_compiler_gnu = yes; then - GFC=yes -else - GFC= -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# CHECK FOR MPI LIBRARIES -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - - - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran flag to compile .F90 files" >&5 -$as_echo_n "checking for Fortran flag to compile .F90 files... " >&6; } -if ${ac_cv_fc_srcext_F90+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_ext=F90 -ac_fcflags_srcext_save=$ac_fcflags_srcext -ac_fcflags_srcext= -ac_cv_fc_srcext_F90=unknown -case $ac_ext in #( - [fF]77) ac_try=f77;; #( - *) ac_try=f95;; -esac -for ac_flag in none -qsuffix=f=F90 -Tf "-x $ac_try"; do - test "x$ac_flag" != xnone && ac_fcflags_srcext="$ac_flag" - cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_cv_fc_srcext_F90=$ac_flag; break -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -done -rm -f conftest.$ac_objext conftest.F90 -ac_fcflags_srcext=$ac_fcflags_srcext_save - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_srcext_F90" >&5 -$as_echo "$ac_cv_fc_srcext_F90" >&6; } -if test "x$ac_cv_fc_srcext_F90" = xunknown; then - as_fn_error $? "Fortran could not compile .F90 files" "$LINENO" 5 -else - ac_fc_srcext=F90 - if test "x$ac_cv_fc_srcext_F90" = xnone; then - ac_fcflags_srcext="" - FCFLAGS_F90="" - else - ac_fcflags_srcext=$ac_cv_fc_srcext_F90 - FCFLAGS_F90=$ac_cv_fc_srcext_F90 - fi - - -fi -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - - -OLDFCFLAGS="$FCFLAGS" - -if test -n "$MPIHEADER"; then - FCFLAGS="$FCFLAGS $MPIHEADER" -fi - -# CHECK MPI BY DEFAULT -if test -z "$DONOTCHECKMPI"; then - - - - - - for ac_prog in mpif90 hf90 mpxlf90 mpxlf95 mpf90 cmpifc cmpif90c -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_MPIFC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$MPIFC"; then - ac_cv_prog_MPIFC="$MPIFC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_MPIFC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -MPIFC=$ac_cv_prog_MPIFC -if test -n "$MPIFC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MPIFC" >&5 -$as_echo "$MPIFC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$MPIFC" && break -done -test -n "$MPIFC" || MPIFC="$FC" - - acx_mpi_save_FC="$FC" - FC="$MPIFC" - - - -if test x = x"$MPILIBS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI_Init" >&5 -$as_echo_n "checking for MPI_Init... " >&6; } - cat > conftest.$ac_ext <<_ACEOF - program main - call MPI_Init - end -_ACEOF -if ac_fn_fc_try_link "$LINENO"; then : - MPILIBS=" " - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi - - if test x = x"$MPILIBS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI_Init in -lfmpi" >&5 -$as_echo_n "checking for MPI_Init in -lfmpi... " >&6; } -if ${ac_cv_lib_fmpi_MPI_Init+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lfmpi $LIBS" -cat > conftest.$ac_ext <<_ACEOF - program main - call MPI_Init - end -_ACEOF -if ac_fn_fc_try_link "$LINENO"; then : - ac_cv_lib_fmpi_MPI_Init=yes -else - ac_cv_lib_fmpi_MPI_Init=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_fmpi_MPI_Init" >&5 -$as_echo "$ac_cv_lib_fmpi_MPI_Init" >&6; } -if test "x$ac_cv_lib_fmpi_MPI_Init" = xyes; then : - MPILIBS="-lfmpi" -fi - - fi - if test x = x"$MPILIBS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI_Init in -lmpichf90" >&5 -$as_echo_n "checking for MPI_Init in -lmpichf90... " >&6; } -if ${ac_cv_lib_mpichf90_MPI_Init+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lmpichf90 $LIBS" -cat > conftest.$ac_ext <<_ACEOF - program main - call MPI_Init - end -_ACEOF -if ac_fn_fc_try_link "$LINENO"; then : - ac_cv_lib_mpichf90_MPI_Init=yes -else - ac_cv_lib_mpichf90_MPI_Init=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mpichf90_MPI_Init" >&5 -$as_echo "$ac_cv_lib_mpichf90_MPI_Init" >&6; } -if test "x$ac_cv_lib_mpichf90_MPI_Init" = xyes; then : - MPILIBS="-lmpichf90" -fi - - fi - -if test x = x"$MPILIBS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI_Init in -lmpi" >&5 -$as_echo_n "checking for MPI_Init in -lmpi... " >&6; } -if ${ac_cv_lib_mpi_MPI_Init+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lmpi $LIBS" -cat > conftest.$ac_ext <<_ACEOF - program main - call MPI_Init - end -_ACEOF -if ac_fn_fc_try_link "$LINENO"; then : - ac_cv_lib_mpi_MPI_Init=yes -else - ac_cv_lib_mpi_MPI_Init=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mpi_MPI_Init" >&5 -$as_echo "$ac_cv_lib_mpi_MPI_Init" >&6; } -if test "x$ac_cv_lib_mpi_MPI_Init" = xyes; then : - MPILIBS="-lmpi" -fi - -fi -if test x = x"$MPILIBS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI_Init in -lmpich" >&5 -$as_echo_n "checking for MPI_Init in -lmpich... " >&6; } -if ${ac_cv_lib_mpich_MPI_Init+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lmpich $LIBS" -cat > conftest.$ac_ext <<_ACEOF - program main - call MPI_Init - end -_ACEOF -if ac_fn_fc_try_link "$LINENO"; then : - ac_cv_lib_mpich_MPI_Init=yes -else - ac_cv_lib_mpich_MPI_Init=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mpich_MPI_Init" >&5 -$as_echo "$ac_cv_lib_mpich_MPI_Init" >&6; } -if test "x$ac_cv_lib_mpich_MPI_Init" = xyes; then : - MPILIBS="-lmpich" -fi - -fi - -if test x != x"$MPILIBS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mpif.h" >&5 -$as_echo_n "checking for mpif.h... " >&6; } - cat > conftest.$ac_ext <<_ACEOF - program main - include 'mpif.h' - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - MPILIBS="" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi - -FC="$acx_mpi_save_FC" - - - -# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: -if test x = x"$MPILIBS"; then - - : -else - -$as_echo "#define HAVE_MPI 1" >>confdefs.h - - : -fi - -fi - -# DONT CHECK MPI IF SERIALMPI OPTION IS ENABLED -if test -n "$DONOTCHECKMPI"; then - echo "MPISERIAL ENABLED: BYPASSING MPI CHECK" - if test -z "$MPIFC"; then - MPIFC=$FC - fi - if test -z "$FORT_SIZE"; then - FORT_SIZE="real4double8" - echo "FORT_SIZE IS PRESET TO $FORT_SIZE" - fi - abs_top_builddir=`pwd` - MPISERPATH=$abs_top_builddir/mpi-serial - - MPIHEADER=-I$MPISERPATH - MPILIBS="-L$MPISERPATH -lmpi-serial" -fi - -FCFLAGS="$OLDFCFLAGS" - -# A HACK TO FIX ACX_MPI TO GET MPILIBS TO BE AN EMPTY STRING -if test "$MPILIBS" = " "; then - MPILIBS="" -fi - -# SET FC TO MPIFC. IF MPILIBS IS PRESENT, SET FC TO FC. -if test -z "$FC"; then - FC=$MPIFC - if test "$FC" != "$MPIFC"; then - if test -n "$MPILIBS"; then - FC=$FC - fi - fi -fi - -# FOR SANITY, CHECK THAT FILENAME EXTENSION FOR FC IS CONSISTENT WITH FC -OLDFC="$FC" -FC="$FC" - -cat > conftest.$ac_ext <<_ACEOF - subroutine oof() - return - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $FC FAILED TO COMPILE FILENAME EXTENSION $ac_ext" >&5 -$as_echo "$as_me: WARNING: $FC FAILED TO COMPILE FILENAME EXTENSION $ac_ext" >&2;} - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - - -FC="$OLDFC" - -# CHECK HOW TO GET THE COMPILER VERSION. -echo "Checking Compiler Version" -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get the version output from $FC" >&5 -$as_echo_n "checking how to get the version output from $FC... " >&6; } -if ${ac_cv_prog_fc_version+:} false; then : - $as_echo_n "(cached) " >&6 -else - -cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_cv_prog_fc_version= -# Try some options frequently used verbose output -for ac_version in -V -version --version +version -qversion; do - ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - -cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF - -# Compile and link our simple test program by passing a flag (argument -# 1 to this macro) to the Fortran 90 compiler in order to get "version" output -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS="$FCFLAGS $ac_version" -(eval echo $as_me:4070: \"$ac_link\") >&5 -ac_fc_version_output=`eval $ac_link 5>&1 2>&1 | grep -v 'Driving:'` -echo "$ac_fc_version_output" >&5 -FCFLAGS=$ac_save_FCFLAGS - -rm -f conftest.* -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - - - # look for "copyright" constructs in the output - for ac_arg in $ac_fc_version_output; do - case $ac_arg in - COPYRIGHT | copyright | Copyright | '(c)' | '(C)' | Compiler | Compilers | Version | Version:) - ac_cv_prog_fc_version=$ac_version - break 2 ;; - esac - done -done -if test -z "$ac_cv_prog_fc_version"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot determine how to obtain version information from $FC" >&5 -$as_echo "$as_me: WARNING: cannot determine how to obtain version information from $FC" >&2;} -fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: compilation failed" >&5 -$as_echo "$as_me: WARNING: compilation failed" >&2;} -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_version" >&5 -$as_echo "$ac_cv_prog_fc_version" >&6; } - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# Check how to use the cpp with fortran - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -ac_fc_pp_define_srcext_save=$ac_fc_srcext -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran flag to compile preprocessed .F files" >&5 -$as_echo_n "checking for Fortran flag to compile preprocessed .F files... " >&6; } -if ${ac_cv_fc_pp_srcext_F+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_ext=F -ac_fcflags_pp_srcext_save=$ac_fcflags_srcext -ac_fcflags_srcext= -ac_cv_fc_pp_srcext_F=unknown -case $ac_ext in #( - [fF]77) ac_try=f77-cpp-input;; #( - *) ac_try=f95-cpp-input;; -esac -for ac_flag in none -ftpp -fpp -Tf "-fpp -Tf" -xpp=fpp -Mpreprocess "-e Z" \ - -cpp -xpp=cpp -qsuffix=cpp=F "-x $ac_try" +cpp -Cpp; do - test "x$ac_flag" != xnone && ac_fcflags_srcext="$ac_flag" - cat > conftest.$ac_ext <<_ACEOF - program main - -#if 0 -#include - choke me -#endif - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - cat > conftest.$ac_ext <<_ACEOF - program main - -#if 1 -#include - choke me -#endif - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - -else - ac_cv_fc_pp_srcext_F=$ac_flag; break -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -done -rm -f conftest.$ac_objext conftest.F -ac_fcflags_srcext=$ac_fcflags_pp_srcext_save - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_pp_srcext_F" >&5 -$as_echo "$ac_cv_fc_pp_srcext_F" >&6; } -if test "x$ac_cv_fc_pp_srcext_F" = xunknown; then - as_fn_error $? "Fortran could not compile preprocessed .F files" "$LINENO" 5 -else - ac_fc_srcext=F - if test "x$ac_cv_fc_pp_srcext_F" = xnone; then - ac_fcflags_srcext="" - FCFLAGS_F="" - else - ac_fcflags_srcext=$ac_cv_fc_pp_srcext_F - FCFLAGS_F=$ac_cv_fc_pp_srcext_F - fi - - -fi -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to define symbols for preprocessed Fortran" >&5 -$as_echo_n "checking how to define symbols for preprocessed Fortran... " >&6; } -if ${ac_cv_fc_pp_define+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_fc_pp_define_srcext_save=$ac_fc_srcext -ac_cv_fc_pp_define=unknown -ac_fc_pp_define_FCFLAGS_save=$FCFLAGS -for ac_flag in -D -WF,-D -Wp,-D -Wc,-D -do - FCFLAGS="$ac_fc_pp_define_FCFLAGS_save ${ac_flag}FOOBAR ${ac_flag}ZORK=42" - cat > conftest.$ac_ext <<_ACEOF - program main - -#ifndef FOOBAR - choke me -#endif -#if ZORK != 42 - choke me -#endif - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_cv_fc_pp_define=$ac_flag -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - test x"$ac_cv_fc_pp_define" != xunknown && break -done -FCFLAGS=$ac_fc_pp_define_FCFLAGS_save - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_pp_define" >&5 -$as_echo "$ac_cv_fc_pp_define" >&6; } -ac_fc_srcext=$ac_fc_pp_define_srcext_save -if test "x$ac_cv_fc_pp_define" = xunknown; then - FC_DEFINE= - as_fn_error 77 "Fortran does not allow to define preprocessor symbols" "$LINENO" 5 -else - FC_DEFINE=$ac_cv_fc_pp_define - -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -# CHECK HOW TO NAME MANGLE C FUNCTIONS SO THAT IT CAN BE CALLED FROM FORTRAN -OLDFC="$FC" - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get verbose linking output from $FC" >&5 -$as_echo_n "checking how to get verbose linking output from $FC... " >&6; } -if ${ac_cv_prog_fc_v+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_cv_prog_fc_v= -# Try some options frequently used verbose output -for ac_verb in -v -verbose --verbose -V -\#\#\#; do - cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF - -# Compile and link our simple test program by passing a flag (argument -# 1 to this macro) to the Fortran compiler in order to get -# "verbose" output that we can then parse for the Fortran linker -# flags. -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS="$FCFLAGS $ac_verb" -eval "set x $ac_link" -shift -$as_echo "$as_me:${as_lineno-$LINENO}: $*" >&5 -# gfortran 4.3 outputs lines setting COLLECT_GCC_OPTIONS, COMPILER_PATH, -# LIBRARY_PATH; skip all such settings. -ac_fc_v_output=`eval $ac_link 5>&1 2>&1 | - sed '/^Driving:/d; /^Configured with:/d; - '"/^[_$as_cr_Letters][_$as_cr_alnum]*=/d"` -$as_echo "$ac_fc_v_output" >&5 -FCFLAGS=$ac_save_FCFLAGS - -rm -rf conftest* - -# On HP/UX there is a line like: "LPATH is: /foo:/bar:/baz" where -# /foo, /bar, and /baz are search directories for the Fortran linker. -# Here, we change these into -L/foo -L/bar -L/baz (and put it first): -ac_fc_v_output="`echo $ac_fc_v_output | - grep 'LPATH is:' | - sed 's|.*LPATH is\(: *[^ ]*\).*|\1|;s|: */| -L/|g'` $ac_fc_v_output" - -# FIXME: we keep getting bitten by quoted arguments; a more general fix -# that detects unbalanced quotes in FLIBS should be implemented -# and (ugh) tested at some point. -case $ac_fc_v_output in - # With xlf replace commas with spaces, - # and remove "-link" and closing parenthesis. - *xlfentry*) - ac_fc_v_output=`echo $ac_fc_v_output | - sed ' - s/,/ /g - s/ -link / /g - s/) *$// - ' - ` ;; - - # With Intel ifc, ignore the quoted -mGLOB_options_string stuff (quoted - # $LIBS confuse us, and the libraries appear later in the output anyway). - *mGLOB_options_string*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"-mGLOB[^"]*"/ /g'` ;; - - # Portland Group compiler has singly- or doubly-quoted -cmdline argument - # Singly-quoted arguments were reported for versions 5.2-4 and 6.0-4. - # Doubly-quoted arguments were reported for "PGF90/x86 Linux/x86 5.0-2". - *-cmdline\ * | *-ignore\ * | *-def\ *) - ac_fc_v_output=`echo $ac_fc_v_output | sed "\ - s/-cmdline *'[^']*'/ /g; s/-cmdline *\"[^\"]*\"/ /g - s/-ignore *'[^']*'/ /g; s/-ignore *\"[^\"]*\"/ /g - s/-def *'[^']*'/ /g; s/-def *\"[^\"]*\"/ /g"` ;; - - # If we are using fort77 (the f2c wrapper) then filter output and delete quotes. - *fort77*f2c*gcc*) - ac_fc_v_output=`echo "$ac_fc_v_output" | sed -n ' - /:[ ]\+Running[ ]\{1,\}"gcc"/{ - /"-c"/d - /[.]c"*/d - s/^.*"gcc"/"gcc"/ - s/"//gp - }'` ;; - - # If we are using Cray Fortran then delete quotes. - *cft90*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"//g'` ;; -esac - - - # look for -l* and *.a constructs in the output - for ac_arg in $ac_fc_v_output; do - case $ac_arg in - [\\/]*.a | ?:[\\/]*.a | -[lLRu]*) - ac_cv_prog_fc_v=$ac_verb - break 2 ;; - esac - done -done -if test -z "$ac_cv_prog_fc_v"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot determine how to obtain linking information from $FC" >&5 -$as_echo "$as_me: WARNING: cannot determine how to obtain linking information from $FC" >&2;} -fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: compilation failed" >&5 -$as_echo "$as_me: WARNING: compilation failed" >&2;} -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_v" >&5 -$as_echo "$ac_cv_prog_fc_v" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran libraries of $FC" >&5 -$as_echo_n "checking for Fortran libraries of $FC... " >&6; } -if ${ac_cv_fc_libs+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "x$FCLIBS" != "x"; then - ac_cv_fc_libs="$FCLIBS" # Let the user override the test. -else - -cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF - -# Compile and link our simple test program by passing a flag (argument -# 1 to this macro) to the Fortran compiler in order to get -# "verbose" output that we can then parse for the Fortran linker -# flags. -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS="$FCFLAGS $ac_cv_prog_fc_v" -eval "set x $ac_link" -shift -$as_echo "$as_me:${as_lineno-$LINENO}: $*" >&5 -# gfortran 4.3 outputs lines setting COLLECT_GCC_OPTIONS, COMPILER_PATH, -# LIBRARY_PATH; skip all such settings. -ac_fc_v_output=`eval $ac_link 5>&1 2>&1 | - sed '/^Driving:/d; /^Configured with:/d; - '"/^[_$as_cr_Letters][_$as_cr_alnum]*=/d"` -$as_echo "$ac_fc_v_output" >&5 -FCFLAGS=$ac_save_FCFLAGS - -rm -rf conftest* - -# On HP/UX there is a line like: "LPATH is: /foo:/bar:/baz" where -# /foo, /bar, and /baz are search directories for the Fortran linker. -# Here, we change these into -L/foo -L/bar -L/baz (and put it first): -ac_fc_v_output="`echo $ac_fc_v_output | - grep 'LPATH is:' | - sed 's|.*LPATH is\(: *[^ ]*\).*|\1|;s|: */| -L/|g'` $ac_fc_v_output" - -# FIXME: we keep getting bitten by quoted arguments; a more general fix -# that detects unbalanced quotes in FLIBS should be implemented -# and (ugh) tested at some point. -case $ac_fc_v_output in - # With xlf replace commas with spaces, - # and remove "-link" and closing parenthesis. - *xlfentry*) - ac_fc_v_output=`echo $ac_fc_v_output | - sed ' - s/,/ /g - s/ -link / /g - s/) *$// - ' - ` ;; - - # With Intel ifc, ignore the quoted -mGLOB_options_string stuff (quoted - # $LIBS confuse us, and the libraries appear later in the output anyway). - *mGLOB_options_string*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"-mGLOB[^"]*"/ /g'` ;; - - # Portland Group compiler has singly- or doubly-quoted -cmdline argument - # Singly-quoted arguments were reported for versions 5.2-4 and 6.0-4. - # Doubly-quoted arguments were reported for "PGF90/x86 Linux/x86 5.0-2". - *-cmdline\ * | *-ignore\ * | *-def\ *) - ac_fc_v_output=`echo $ac_fc_v_output | sed "\ - s/-cmdline *'[^']*'/ /g; s/-cmdline *\"[^\"]*\"/ /g - s/-ignore *'[^']*'/ /g; s/-ignore *\"[^\"]*\"/ /g - s/-def *'[^']*'/ /g; s/-def *\"[^\"]*\"/ /g"` ;; - - # If we are using fort77 (the f2c wrapper) then filter output and delete quotes. - *fort77*f2c*gcc*) - ac_fc_v_output=`echo "$ac_fc_v_output" | sed -n ' - /:[ ]\+Running[ ]\{1,\}"gcc"/{ - /"-c"/d - /[.]c"*/d - s/^.*"gcc"/"gcc"/ - s/"//gp - }'` ;; - - # If we are using Cray Fortran then delete quotes. - *cft90*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"//g'` ;; -esac - - - -ac_cv_fc_libs= - -# Save positional arguments (if any) -ac_save_positional="$@" - -set X $ac_fc_v_output -while test $# != 1; do - shift - ac_arg=$1 - case $ac_arg in - [\\/]*.a | ?:[\\/]*.a) - ac_exists=false - for ac_i in $ac_cv_fc_libs; do - if test x"$ac_arg" = x"$ac_i"; then - ac_exists=true - break - fi - done - - if test x"$ac_exists" = xtrue; then : - -else - ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" -fi - ;; - -bI:*) - ac_exists=false - for ac_i in $ac_cv_fc_libs; do - if test x"$ac_arg" = x"$ac_i"; then - ac_exists=true - break - fi - done - - if test x"$ac_exists" = xtrue; then : - -else - if test "$ac_compiler_gnu" = yes; then - for ac_link_opt in $ac_arg; do - ac_cv_fc_libs="$ac_cv_fc_libs -Xlinker $ac_link_opt" - done -else - ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" -fi -fi - ;; - # Ignore these flags. - -lang* | -lcrt*.o | -lc | -lgcc* | -lSystem | -libmil | -little \ - |-LANG:=* | -LIST:* | -LNO:* | -link | -list | -lnuma ) - ;; - -lkernel32) - test x"$CYGWIN" != xyes && ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" - ;; - -[LRuYz]) - # These flags, when seen by themselves, take an argument. - # We remove the space between option and argument and re-iterate - # unless we find an empty arg or a new option (starting with -) - case $2 in - "" | -*);; - *) - ac_arg="$ac_arg$2" - shift; shift - set X $ac_arg "$@" - ;; - esac - ;; - -YP,*) - for ac_j in `$as_echo "$ac_arg" | sed -e 's/-YP,/-L/;s/:/ -L/g'`; do - ac_exists=false - for ac_i in $ac_cv_fc_libs; do - if test x"$ac_j" = x"$ac_i"; then - ac_exists=true - break - fi - done - - if test x"$ac_exists" = xtrue; then : - -else - ac_arg="$ac_arg $ac_j" - ac_cv_fc_libs="$ac_cv_fc_libs $ac_j" -fi - done - ;; - -[lLR]*) - ac_exists=false - for ac_i in $ac_cv_fc_libs; do - if test x"$ac_arg" = x"$ac_i"; then - ac_exists=true - break - fi - done - - if test x"$ac_exists" = xtrue; then : - -else - ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" -fi - ;; - -zallextract*| -zdefaultextract) - ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" - ;; - # Ignore everything else. - esac -done -# restore positional arguments -set X $ac_save_positional; shift - -# We only consider "LD_RUN_PATH" on Solaris systems. If this is seen, -# then we insist that the "run path" must be an absolute path (i.e. it -# must begin with a "/"). -case `(uname -sr) 2>/dev/null` in - "SunOS 5"*) - ac_ld_run_path=`$as_echo "$ac_fc_v_output" | - sed -n 's,^.*LD_RUN_PATH *= *\(/[^ ]*\).*$,-R\1,p'` - test "x$ac_ld_run_path" != x && - if test "$ac_compiler_gnu" = yes; then - for ac_link_opt in $ac_ld_run_path; do - ac_cv_fc_libs="$ac_cv_fc_libs -Xlinker $ac_link_opt" - done -else - ac_cv_fc_libs="$ac_cv_fc_libs $ac_ld_run_path" -fi - ;; -esac -fi # test "x$[]_AC_LANG_PREFIX[]LIBS" = "x" - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_libs" >&5 -$as_echo "$ac_cv_fc_libs" >&6; } -FCLIBS="$ac_cv_fc_libs" - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dummy main to link with Fortran libraries" >&5 -$as_echo_n "checking for dummy main to link with Fortran libraries... " >&6; } -if ${ac_cv_fc_dummy_main+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_fc_dm_save_LIBS=$LIBS - LIBS="$LIBS $FCLIBS" - ac_fortran_dm_var=FC_DUMMY_MAIN - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - # First, try linking without a dummy main: - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_fortran_dummy_main=none -else - ac_cv_fortran_dummy_main=unknown -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - - if test $ac_cv_fortran_dummy_main = unknown; then - for ac_func in MAIN__ MAIN_ __main MAIN _MAIN __MAIN main_ main__ _main; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#define $ac_fortran_dm_var $ac_func -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_fortran_dummy_main=$ac_func; break -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - fi - ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - ac_cv_fc_dummy_main=$ac_cv_fortran_dummy_main - rm -rf conftest* - LIBS=$ac_fc_dm_save_LIBS - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_dummy_main" >&5 -$as_echo "$ac_cv_fc_dummy_main" >&6; } -FC_DUMMY_MAIN=$ac_cv_fc_dummy_main -if test "$FC_DUMMY_MAIN" != unknown; then : - if test $FC_DUMMY_MAIN != none; then - -cat >>confdefs.h <<_ACEOF -#define FC_DUMMY_MAIN $FC_DUMMY_MAIN -_ACEOF - - if test "x$ac_cv_fc_dummy_main" = "x$ac_cv_f77_dummy_main"; then - -$as_echo "#define FC_DUMMY_MAIN_EQ_F77 1" >>confdefs.h - - fi -fi -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "linking to Fortran libraries from C fails -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran name-mangling scheme" >&5 -$as_echo_n "checking for Fortran name-mangling scheme... " >&6; } -if ${ac_cv_fc_mangling+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat > conftest.$ac_ext <<_ACEOF - subroutine foobar() - return - end - subroutine foo_bar() - return - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - mv conftest.$ac_objext cfortran_test.$ac_objext - - ac_save_LIBS=$LIBS - LIBS="cfortran_test.$ac_objext $LIBS $FCLIBS" - - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - ac_success=no - for ac_foobar in foobar FOOBAR; do - for ac_underscore in "" "_"; do - ac_func="$ac_foobar$ac_underscore" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $ac_func (); -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -return $ac_func (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_success=yes; break 2 -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - done - ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - - if test "$ac_success" = "yes"; then - case $ac_foobar in - foobar) - ac_case=lower - ac_foo_bar=foo_bar - ;; - FOOBAR) - ac_case=upper - ac_foo_bar=FOO_BAR - ;; - esac - - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - ac_success_extra=no - for ac_extra in "" "_"; do - ac_func="$ac_foo_bar$ac_underscore$ac_extra" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $ac_func (); -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -return $ac_func (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_success_extra=yes; break -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - - if test "$ac_success_extra" = "yes"; then - ac_cv_fc_mangling="$ac_case case" - if test -z "$ac_underscore"; then - ac_cv_fc_mangling="$ac_cv_fc_mangling, no underscore" - else - ac_cv_fc_mangling="$ac_cv_fc_mangling, underscore" - fi - if test -z "$ac_extra"; then - ac_cv_fc_mangling="$ac_cv_fc_mangling, no extra underscore" - else - ac_cv_fc_mangling="$ac_cv_fc_mangling, extra underscore" - fi - else - ac_cv_fc_mangling="unknown" - fi - else - ac_cv_fc_mangling="unknown" - fi - - LIBS=$ac_save_LIBS - rm -rf conftest* - rm -f cfortran_test* -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compile a simple Fortran program -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_mangling" >&5 -$as_echo "$ac_cv_fc_mangling" >&6; } - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -case $ac_cv_fc_mangling in - "lower case, no underscore, no extra underscore") - $as_echo "#define FC_FUNC(name,NAME) name" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) name" >>confdefs.h - ;; - "lower case, no underscore, extra underscore") - $as_echo "#define FC_FUNC(name,NAME) name" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) name ## _" >>confdefs.h - ;; - "lower case, underscore, no extra underscore") - $as_echo "#define FC_FUNC(name,NAME) name ## _" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) name ## _" >>confdefs.h - ;; - "lower case, underscore, extra underscore") - $as_echo "#define FC_FUNC(name,NAME) name ## _" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) name ## __" >>confdefs.h - ;; - "upper case, no underscore, no extra underscore") - $as_echo "#define FC_FUNC(name,NAME) NAME" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) NAME" >>confdefs.h - ;; - "upper case, no underscore, extra underscore") - $as_echo "#define FC_FUNC(name,NAME) NAME" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) NAME ## _" >>confdefs.h - ;; - "upper case, underscore, no extra underscore") - $as_echo "#define FC_FUNC(name,NAME) NAME ## _" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) NAME ## _" >>confdefs.h - ;; - "upper case, underscore, extra underscore") - $as_echo "#define FC_FUNC(name,NAME) NAME ## _" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) NAME ## __" >>confdefs.h - ;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unknown Fortran name-mangling scheme" >&5 -$as_echo "$as_me: WARNING: unknown Fortran name-mangling scheme" >&2;} - ;; -esac - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -FC="$OLDFC" - -# CHECK THAT THE FORTRAN COMPILER CAN CORRECTLY PROCESS THESE DIRECTIVES -# IF NOT, USE THE EXTERNAL C PREPROCESSOR -OLDFC="$FC" - -defineflag="-Daardvark" -if test "$OLDFC" = "xlf90"; then - defineflag="-WF,-Daardvark" -fi -if test "$OLDFC" = "frt"; then - defineflag="-Wp,-Daardvark" -fi - -FC="$OLDFC" - -# DEFINE VARIABLES ACCORDING TO OS AND COMPILER - -echo "Hostname=$ac_hostname" -echo "Machine=$machinename" -echo "OS=$osname" - -# CHECK OS NAME -if echo $osname | grep -i aix >/dev/null 2>&1; then - SYSDEF="AIX" -fi -if echo $osname | grep -i darwin >/dev/null 2>&1; then - SYSDEF="DARWIN" -fi -if echo $osname | grep -i unix_system_v >/dev/null 2>&1; then - SYSDEF="UNIXSYSTEMV" -fi -if echo $osname | grep -i irix >/dev/null 2>&1; then - SYSDEF="IRIX" -fi -if echo $osname | grep -i irix64 >/dev/null 2>&1; then - SYSDEF="IRIX64" -fi -if echo $osname | grep -i linux >/dev/null 2>&1; then - SYSDEF="LINUX" -fi -if echo $osname | grep -i osf1 >/dev/null 2>&1; then - SYSDEF="OSF1" -fi -if echo $osname | grep -i super >/dev/null 2>&1; then - SYSDEF="SUPERUX" -fi -if echo $osname | grep -i sun >/dev/null 2>&1; then - SYSDEF="SUNOS" -fi -if echo $osname | grep -i t3e >/dev/null 2>&1; then - SYSDEF="T3E" -fi -if echo $osname | grep -i unicos >/dev/null 2>&1; then - SYSDEF="UNICOS" -fi -if test -z "$SYSDEF"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: OPERATING SYSTEM UNKNOWN" >&5 -$as_echo "$as_me: WARNING: OPERATING SYSTEM UNKNOWN" >&2;} - SYSDEF="UNKNOWNOS" -fi - -# Set the default FCFLAGS for non-gfortran compilers. -# NOTE: This may change with a new version of autoconf. -DEFFCFLAGS="-g" - -##################################################### -# CHECK COMPILER NAME and add specific flags -if echo $FC | grep xlf >/dev/null 2>&1; then - echo "Fortran Compiler is XLF" - CPRDEF="XLF" - if test -z "$REAL8"; then - REAL8="-qrealsize=8" - fi - if test -z "$OPT"; then - OPT="-O2 -qarch=auto" - fi - if test -z "$DEBUG"; then - DEBUG="-qdbg" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="" - fi -elif echo $FC | grep pgf >/dev/null 2>&1; then - echo "Fortran Compiler is Portland Group" - CPRDEF="PGI" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$BIT64"; then - BIT64="-pc 64" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="" - fi - if test -z "$ENDIAN"; then - ENDIAN="-byteswapio" - fi - if test -z "$OPT"; then - OPT="-O2" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi -elif echo $FC | grep ftn >/dev/null 2>&1; then - if echo $ac_fc_version_output | grep -i Portland >/dev/null 2>&1; then - echo "Fortran Compiler is Portland Group, Cray" - CPRDEF="PGI" - SYSDEF="CNLINUX" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$BIT64"; then - BIT64="-pc 64" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="" - fi - if test -z "$ENDIAN"; then - ENDIAN="-byteswapio" - fi - if test -z "$OPT"; then - OPT="-O2" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi - fi -elif echo $FC | grep ifort >/dev/null 2>&1; then - echo "Fortran Compiler is Intel ifort" - CPRDEF="INTEL" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="-w -ftz" - fi - if test -z "$PROGFCFLAGS"; then - PROGFCFLAGS="-assume byterecl" - fi - if test -z "$ENDIAN"; then - ENDIAN="-convert big_endian" - fi - if test -z "$OPT"; then - OPT="-O2" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi -elif echo $FC | grep g95 >/dev/null 2>&1; then - echo "Fortran Compiler is GNU" - CPRDEF="GNU" -elif echo $FC | grep gfortran >/dev/null 2>&1; then - echo "Fortran Compiler is GNU" - CPRDEF="GNU" -# For gfortran, default flags are different - if test "$FCFLAGS" = "-g -O2"; then - FCFLAGS="" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $ac_fc_version_output | grep -i nag >/dev/null 2>&1; then - echo "Fortran Compiler is NAG" - CPRDEF="NAG" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="-wmismatch=mpi_send,mpi_recv,mpi_bcast,mpi_allreduce,mpi_reduce,mpi_gatherv,mpi_gather,mpi_rsend,mpi_irecv,mpi_isend,mpi_scatterv,mpi_alltoallv -dusty" - fi - if test -z "$ENDIAN"; then - ENDIAN="-convert=BIG_IEEE" - fi - if test -z "$OPT"; then - OPT="-O2" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi -########################################################### -# the compiler flags below have not been verified recently -########################################################### -elif echo $FC | grep frt >/dev/null 2>&1; then - echo "Fortran Compiler is UXP/V" - echo "Suggested additional vectorization flags: -Wv,-s5,-t3,-noalias,-ilfunc,-md" - CPRDEF="FUJITSU" - if test -z "$F90FLAGS"; then - F90FLAGS="-Am -X9" - fi - if test -z "$BIT64"; then - BIT64="-KA64" - fi - if test -z "$REAL8"; then - REAL8="-Ad" - fi -elif echo $ac_fc_version_output | grep Lahey >/dev/null 2>&1; then - echo "Fortran Compiler is Lahey" - CPRDEF="LAHEY" -elif echo $FC | grep ifc >/dev/null 2>&1; then - echo "Fortran Compiler is Intel 7.x or earlier" - echo "Intel ifc compiler must set the environment variable F_UFMTENDIAN=big to do endian conversion" - CPRDEF="INTEL" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$F90FLAGS"; then - F90FLAGS="-w" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $FC | grep efc >/dev/null 2>&1; then - echo "Fortran Compiler is Intel 7.x or earlier for IA-64" - echo "Intel efc compiler must set the environment variable F_UFMTENDIAN=big to do endian conversion" - CPRDEF="INTEL" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$F90FLAGS"; then - F90FLAGS="-w -ftz" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $FC | grep pathf90 >/dev/null 2>&1; then - echo "Fortran Compiler is PathScale" - CPRDEF="PATHSC" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$BIT64"; then - BIT64="-m64" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $ac_fc_version_output | grep -i absoft >/dev/null 2>&1; then - echo "Fortran Compiler is Absoft" - CPRDEF="ABSOFT" - if test -z "$REAL8"; then - REAL8="-N113" - fi - if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-p" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $ac_fc_version_output | grep -i workshop >/dev/null 2>&1; then - echo "Fortran Compiler is Workshop" - CPRDEF="WORKSHOP" - if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-M" - fi -elif echo $ac_fc_version_output | grep -i mipspro >/dev/null 2>&1; then - echo "Fortran Compiler is MIPSPro" - CPRDEF="MIPSPRO" - EXTRACFLAGS="-64" - if test -z "$OPT"; then - OPT="-O3" - fi - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$BIT64"; then - BIT64="-64" - fi -elif echo $ac_fc_version_output | grep -i compaq >/dev/null 2>&1; then - echo "Fortran Compiler is Compaq" - CPRDEF="COMPAQ" - MPILIBS="$MPILIBS -lelan" - if test -z "$OPT"; then - OPT="-fast" - fi - if test -z "$REAL8"; then - REAL8="-real_size 64" - fi - if test -z "$ENDIAN"; then - ENDIAN="-convert big_endian" - fi - -# Compaq Fortran changed its name to HP Fortran. -# Lets support both versions for now. -elif echo $ac_fc_version_output | grep HP >/dev/null 2>&1; then - echo "Fortran Compiler is HP" - CPRDEF="COMPAQ" - MPILIBS="$MPILIBS -lelan" - if test -z "$OPT"; then - OPT="-fast" - fi - if test -z "$REAL8"; then - REAL8="-real_size 64" - fi - if test -z "$ENDIAN"; then - ENDIAN="-convert big_endian" - fi - -elif echo $ac_fc_version_output | grep -i sx >/dev/null 2>&1; then - echo "Fortran Compiler is SX" - CPRDEF="SX" - if test -z "$F90FLAGS"; then - F90FLAGS="-EP -Wf'-pvctl noassoc'" - fi - if test -z "$OPT"; then - OPT="-Chopt" - fi -fi - -########################################################### -# END of compiler-specific flag setting -########################################################### - -CPPDEFS="$CPPDEFS -DSYS$SYSDEF -DCPR$CPRDEF" -if test -n "$SRKDEF"; then - CPPDEFS="$CPPDEFS -D$SRKDEF" -fi - -# IF DEBUGGING ENABLED, DISABLE OPTIMIZATION FLAG -if test "$DEBUGGING" = "ENABLED"; then - OPT="" -else - DEBUG="" -fi - -# SET HARDCODED VARIABLES AS A LAST RESORT - -# ALWAYS ENABLE CRULE IN MAKEFILE -CRULE=.c.o - - - - -# INCLUDE FLAG IF NOT ALREADY SET IS MOST LIKELY -I -if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-I" -fi - -# ARCHIVE COMMAND SIMILAR ACROSS ALL PLATFORMS -if test -z "$AR"; then - AR="ar cq" -fi - -# RANLIB -if test -z "$RANLIB"; then - # Necessary on Darwin to deal with common symbols (particularly when - # using ifort). - if test "$SYSDEF"x = DARWINx; then - RANLIB="ranlib -c" - else - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. -set dummy ${ac_tool_prefix}ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$RANLIB"; then - ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -RANLIB=$ac_cv_prog_RANLIB -if test -n "$RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 -$as_echo "$RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_RANLIB"; then - ac_ct_RANLIB=$RANLIB - # Extract the first word of "ranlib", so it can be a program name with args. -set dummy ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_RANLIB"; then - ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_RANLIB="ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB -if test -n "$ac_ct_RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 -$as_echo "$ac_ct_RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_RANLIB" = x; then - RANLIB=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - RANLIB=$ac_ct_RANLIB - fi -else - RANLIB="$ac_cv_prog_RANLIB" -fi - - fi -fi - -echo -echo Output Variables: {CC=$CC} {CFLAGS=$CFLAGS} \ -{FC=$FC} {FCFLAGS=$FCFLAGS} {PROGFCFLAGS=$PROGFCFLAGS}\ -{CPPDEFS=$CPPDEFS} {OPT=$OPT} {DEBUG=$DEBUG} {REAL8=$REAL8} \ -{BIT64=$BIT64} {ENDIAN=$ENDIAN} {MPIFC=$MPIFC} \ -{MPILIBS=$MPILIBS} {MPIHEADER=$MPIHEADER} \ -{INCLUDEFLAG=$INCLUDEFLAG} {INCLUDEPATH=$INCLUDEPATH} \ -{AR=$AR} {RANLIB=$RANLIB} {BABELROOT=$BABELROOT} {COMPILER_ROOT=$COMPILER_ROOT} \ -{PYTHON=$PYTHON} {PYTHONOPTS=$PYTHONOPTS} {FORT_SIZE=$FORT_SIZE} {prefix=$prefix} \ -{SRCDIR=$SRCDIR} {FC_DEFINE=$FC_DEFINE} -echo - -if test -n "$DONOTCHECKMPI"; then - echo "MPISERIAL ENABLED: CONFIGURING mpi-serial" - ac_aux_dir= -for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do - if test -f "$ac_dir/install-sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install-sh -c" - break - elif test -f "$ac_dir/install.sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install.sh -c" - break - elif test -f "$ac_dir/shtool"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/shtool install -c" - break - fi -done -if test -z "$ac_aux_dir"; then - as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 -fi - -# These three variables are undocumented and unsupported, -# and are intended to be withdrawn in a future Autoconf release. -# They can cause serious problems if a builder's source tree is in a directory -# whose full name contains unusual characters. -ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. -ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. -ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. - - - - -subdirs="$subdirs mpi-serial" - -fi - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - if test ! -f "$cache_file" || test -h "$cache_file"; then - cat confcache >"$cache_file" - else - case $cache_file in #( - */* | ?:*) - mv -f confcache "$cache_file"$$ && - mv -f "$cache_file"$$ "$cache_file" ;; #( - *) - mv -f confcache "$cache_file" ;; - esac - fi - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -U= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - - -: "${CONFIG_STATUS=./config.status}" -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by MCT $as_me 2.8, which was -generated by GNU Autoconf 2.69. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" -config_headers="$ac_config_headers" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration files: -$config_files - -Configuration headers: -$config_headers - -Report bugs to the package provider." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -MCT config.status 2.8 -configured by $0, generated by GNU Autoconf 2.69, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2012 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=?*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - --*=) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg= - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - '') as_fn_error $? "missing file argument" ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error $? "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error $? "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "Makefile.conf") CONFIG_FILES="$CONFIG_FILES Makefile.conf" ;; - "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; - - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= ac_tmp= - trap 'exit_status=$? - : "${ac_tmp:=$tmp}" - { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 -ac_tmp=$tmp - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$ac_tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\)..*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\)..*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' >$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ - || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove sole $(srcdir), -# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ -h -s/// -s/^/:/ -s/[ ]*$/:/ -s/:\$(srcdir):/:/g -s/:\${srcdir}:/:/g -s/:@srcdir@:/:/g -s/^:*// -s/:*$// -x -s/\(=[ ]*\).*/\1/ -G -s/\n// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$ac_tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_tt=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_tt"; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' >$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$ac_tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$ac_tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ - >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ - "$ac_tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&2;} - - rm -f "$ac_tmp/stdin" - case $ac_file in - -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; - *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; - esac \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - ;; - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" - } >"$ac_tmp/config.h" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$ac_tmp/config.h" "$ac_file" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error $? "could not create -" "$LINENO" 5 - fi - ;; - - - esac - -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit 1 -fi - -# -# CONFIG_SUBDIRS section. -# -if test "$no_recursion" != yes; then - - # Remove --cache-file, --srcdir, and --disable-option-checking arguments - # so they do not pile up. - ac_sub_configure_args= - ac_prev= - eval "set x $ac_configure_args" - shift - for ac_arg - do - if test -n "$ac_prev"; then - ac_prev= - continue - fi - case $ac_arg in - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* \ - | --c=*) - ;; - --config-cache | -C) - ;; - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - ;; - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - ;; - --disable-option-checking) - ;; - *) - case $ac_arg in - *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append ac_sub_configure_args " '$ac_arg'" ;; - esac - done - - # Always prepend --prefix to ensure using the same prefix - # in subdir configurations. - ac_arg="--prefix=$prefix" - case $ac_arg in - *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - ac_sub_configure_args="'$ac_arg' $ac_sub_configure_args" - - # Pass --silent - if test "$silent" = yes; then - ac_sub_configure_args="--silent $ac_sub_configure_args" - fi - - # Always prepend --disable-option-checking to silence warnings, since - # different subdirs can have different --enable and --with options. - ac_sub_configure_args="--disable-option-checking $ac_sub_configure_args" - - ac_popdir=`pwd` - for ac_dir in : $subdirs; do test "x$ac_dir" = x: && continue - - # Do not complain, so a configure script can configure whichever - # parts of a large source tree are present. - test -d "$srcdir/$ac_dir" || continue - - ac_msg="=== configuring in $ac_dir (`pwd`/$ac_dir)" - $as_echo "$as_me:${as_lineno-$LINENO}: $ac_msg" >&5 - $as_echo "$ac_msg" >&6 - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - cd "$ac_dir" - - # Check for guested configure; otherwise get Cygnus style configure. - if test -f "$ac_srcdir/configure.gnu"; then - ac_sub_configure=$ac_srcdir/configure.gnu - elif test -f "$ac_srcdir/configure"; then - ac_sub_configure=$ac_srcdir/configure - elif test -f "$ac_srcdir/configure.in"; then - # This should be Cygnus configure. - ac_sub_configure=$ac_aux_dir/configure - else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: no configuration information is in $ac_dir" >&5 -$as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2;} - ac_sub_configure= - fi - - # The recursion is here. - if test -n "$ac_sub_configure"; then - # Make the cache file name correct relative to the subdirectory. - case $cache_file in - [\\/]* | ?:[\\/]* ) ac_sub_cache_file=$cache_file ;; - *) # Relative name. - ac_sub_cache_file=$ac_top_build_prefix$cache_file ;; - esac - - { $as_echo "$as_me:${as_lineno-$LINENO}: running $SHELL $ac_sub_configure $ac_sub_configure_args --cache-file=$ac_sub_cache_file --srcdir=$ac_srcdir" >&5 -$as_echo "$as_me: running $SHELL $ac_sub_configure $ac_sub_configure_args --cache-file=$ac_sub_cache_file --srcdir=$ac_srcdir" >&6;} - # The eval makes quoting arguments work. - eval "\$SHELL \"\$ac_sub_configure\" $ac_sub_configure_args \ - --cache-file=\"\$ac_sub_cache_file\" --srcdir=\"\$ac_srcdir\"" || - as_fn_error $? "$ac_sub_configure failed for $ac_dir" "$LINENO" 5 - fi - - cd "$ac_popdir" - done -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - - -echo Please check the Makefile.conf -echo Have a nice day! - -# test -z is true for empty strings -# test -n is true for non-empty strings - - - - diff --git a/src/externals/mct/configure.ac b/src/externals/mct/configure.ac deleted file mode 100644 index 24870887023..00000000000 --- a/src/externals/mct/configure.ac +++ /dev/null @@ -1,611 +0,0 @@ -# -*- Autoconf -*- -# Process this file with autoconf to produce a configure script. - -AC_INIT(MCT, 2.8) - -# PROCESS THE FOLLOWING MAKEFILES -AC_CONFIG_MACRO_DIR([m4]) -AC_CONFIG_FILES(Makefile.conf) -AC_CONFIG_HEADER(config.h) - -# DECLARE PACKAGE OPTIONS - -AC_ARG_ENABLE(mpiserial, -AC_HELP_STRING([--enable-mpiserial], -[Use the included MPI replacement library for single processor]), -[DONOTCHECKMPI="DONOTCHECKMPI"] -) - -AC_ARG_ENABLE(debugging, -AC_HELP_STRING([--enable-debugging], -[Use the debugging flag and disable the optimization flag]), -[DEBUGGING="ENABLED"] -) - -AC_ARG_ENABLE(selectedrealkind, -AC_HELP_STRING([--enable-selectedrealkind], -[define single precision and double precision numbers using the selected_real_kind function. Default uses the kind inquiry function.]), -[SRKDEF="SELECTEDREALKIND"] -) - -AC_ARG_ENABLE(sequence, -AC_HELP_STRING([--enable-sequence],[Modify MCT types to make them contiguous in memory.]), -[SRKDEF="SEQUENCE"],) - -AC_ARG_ENABLE(babel, -AC_HELP_STRING([--enable-babel],[Supply this option if you plan on building the Babel bindings to MCT]), -[SRKDEF="SEQUENCE"],) - - - -# DECLARE THE FOLLOWING PRECIOUS VARIABLES - -AC_ARG_VAR(MPILIBS,[MPI library command line invocation]) -AC_ARG_VAR(MPIHEADER,[MPI header include path with INCLUDEFLAG]) -AC_ARG_VAR(FPP,C-preprocessor for Fortran source code) -AC_ARG_VAR(FPPFLAGS,C-preprocessing flags for Fortran source code) -AC_ARG_VAR(FC,The Fortran compiler) -AC_ARG_VAR(FCFLAGS,User-defined Fortran compiler flags) -AC_ARG_VAR(PROGFCFLAGS,User-defined Fortran compiler flags for example programs) -AC_ARG_VAR(CFLAGS,Customized C source compilation flags) -AC_ARG_VAR(DEBUG,Fortran compiler flag for generating symbolic debugging information) -AC_ARG_VAR(OPT,Fortran compiler flag for optimization level) -AC_ARG_VAR(REAL8,[Fortran compiler flag for setting the default REAL size to REAL(KIND=8)]) -AC_ARG_VAR(BIT64,Fortran compiler flag for generating 64-bit objects) -AC_ARG_VAR(ENDIAN,Fortran compiler flag for converting big-endian to little-endian) -AC_ARG_VAR(INCLUDEFLAG,Fortran compiler flag for specifying module search path) -AC_ARG_VAR(INCLUDEPATH,Additional library and module paths with INCLUDEFLAG) -AC_ARG_VAR(AR,Archive command) -AC_ARG_VAR(RANLIB,Archive index update command) -AC_ARG_VAR(BABELROOT,Root directory of your Babel installation. i.e.: $BABELROOT/bin/babel $BABELROOT/lib/libsidl.so) -AC_ARG_VAR(COMPILER_ROOT,Root directory of your FORTRAN compiler) -AC_ARG_VAR(FORT_SIZE, Number of bits in Fortran real and double kind) - -# INCLUDE BABELROOT and COMPILER_ROOT in Makefile.conf(autoconf output) -AC_SUBST(BABELROOT) -AC_SUBST(COMPILER_ROOT) -AC_SUBST(PYTHON) -AC_SUBST(PYTHONOPTS) - -# SET TEMPORARY VARIABLES - -# OS AND PLATFORM NAME -test "$osname"=NONE && osname=`uname -s` -test "$machinename"=NONE && machinename=`uname -m` -fullhostname=`hostname -f` - - -# HARDCODE SPECIFIC MACHINES FOR EXTRAORDINARY CIRCUMSTANCES - -# CHECK IF WE ARE ON THE EARTH SIMULATOR -ES="NO" -if echo $osname | grep -i esos >/dev/null 2>&1; then - ES="YES" -fi -if echo $osname | grep -i hp-ux >/dev/null 2>&1; then - if test "$ac_hostname" = "moon"; then - ES="YES" - # TELLS CONFIGURE NOT TO RUN ANY TESTS THAT REQUIRE EXECUTION - cross_compiling="yes" - fi -fi -if test "$ES" = "YES"; then - echo "Using preset configuration values for the Earth Simulator" - if test -z "$CC"; then - CC="escc" - fi - if test -z "$FC"; then - FC="esf90" - fi - if test -z "$MPIFC"; then - MPIFC="esmpif90" - fi - if test -z "$AR"; then - AR="esar cqs" - fi - if test -z "FPP"; then - FPPFLAGS=" " - fi - if test -z "$FCFLAGS"; then - FCFLAGS="-EP -Wf'-pvctl fullmsg -L fmtlist transform map'" - fi - if test -z "$OPT"; then - OPT="-C vopt" - fi - if test -z "$CPPDEFS"; then - CPPDEFS="-DESVEC" - fi -fi - -# Check if we are on the ANL BG/P - -if echo $fullhostname | egrep -q '.\.(challenger|intrepid)\.alcf\.anl\.gov' - then if test -z "$FC"; then - FC=bgxlf90_r - fi - if test -z "$MPIFC"; then - MPIFC=mpixlf90_r - fi - if test -z "$CC"; then - CC=mpixlc_r - fi -fi - - - -# START TESTS - -# CHECK FOR THE C COMPILER -AC_PROG_CC([cc]) - -# CHECK FOR BYTE ORDERING -AC_C_BIGENDIAN - -# CHECK FOR THE FORTRAN COMPILER -# RLJ- specify the order, include PathScale and do not search for F77 -AC_PROG_FC([nagfor xlf95 pgf95 ifort gfortran pathf95 ftn lf95 f95 fort ifc efc g95 xlf90 pgf90 pathf90 epcf90 pghpf]) - -# CHECK FOR MPI LIBRARIES -AC_LANG_PUSH(Fortran) - -AC_FC_SRCEXT(F90) - -OLDFCFLAGS="$FCFLAGS" - -if test -n "$MPIHEADER"; then - FCFLAGS="$FCFLAGS $MPIHEADER" -fi - -# CHECK MPI BY DEFAULT -if test -z "$DONOTCHECKMPI"; then - ACX_MPI -fi - -# DONT CHECK MPI IF SERIALMPI OPTION IS ENABLED -if test -n "$DONOTCHECKMPI"; then - echo "MPISERIAL ENABLED: BYPASSING MPI CHECK" - if test -z "$MPIFC"; then - MPIFC=$FC - fi - if test -z "$FORT_SIZE"; then - FORT_SIZE="real4double8" - echo "FORT_SIZE IS PRESET TO $FORT_SIZE" - fi - abs_top_builddir=`pwd` - MPISERPATH=$abs_top_builddir/mpi-serial - AC_SUBST(MPISERPATH) - MPIHEADER=-I$MPISERPATH - MPILIBS="-L$MPISERPATH -lmpi-serial" -fi - -FCFLAGS="$OLDFCFLAGS" - -# A HACK TO FIX ACX_MPI TO GET MPILIBS TO BE AN EMPTY STRING -if test "$MPILIBS" = " "; then - MPILIBS="" -fi - -# SET FC TO MPIFC. IF MPILIBS IS PRESENT, SET FC TO FC. -if test -z "$FC"; then - FC=$MPIFC - if test "$FC" != "$MPIFC"; then - if test -n "$MPILIBS"; then - FC=$FC - fi - fi -fi - -# FOR SANITY, CHECK THAT FILENAME EXTENSION FOR FC IS CONSISTENT WITH FC -OLDFC="$FC" -FC="$FC" - -AC_COMPILE_IFELSE( - [ subroutine oof() - return - end], [], - [AC_MSG_WARN([$FC FAILED TO COMPILE FILENAME EXTENSION $ac_ext]) - ]) - - - -FC="$OLDFC" - -# CHECK HOW TO GET THE COMPILER VERSION. -echo "Checking Compiler Version" -AX_FC_VERSION() - -AC_LANG_POP(Fortran) - -# Check how to use the cpp with fortran - -AC_FC_PP_DEFINE() - - -# CHECK HOW TO NAME MANGLE C FUNCTIONS SO THAT IT CAN BE CALLED FROM FORTRAN -OLDFC="$FC" - -AC_FC_WRAPPERS() - -FC="$OLDFC" - -# CHECK THAT THE FORTRAN COMPILER CAN CORRECTLY PROCESS THESE DIRECTIVES -# IF NOT, USE THE EXTERNAL C PREPROCESSOR -OLDFC="$FC" - -defineflag="-Daardvark" -if test "$OLDFC" = "xlf90"; then - defineflag="-WF,-Daardvark" -fi -if test "$OLDFC" = "frt"; then - defineflag="-Wp,-Daardvark" -fi - -FC="$OLDFC" - -# DEFINE VARIABLES ACCORDING TO OS AND COMPILER - -echo "Hostname=$ac_hostname" -echo "Machine=$machinename" -echo "OS=$osname" - -# CHECK OS NAME -if echo $osname | grep -i aix >/dev/null 2>&1; then - SYSDEF="AIX" -fi -if echo $osname | grep -i darwin >/dev/null 2>&1; then - SYSDEF="DARWIN" -fi -if echo $osname | grep -i unix_system_v >/dev/null 2>&1; then - SYSDEF="UNIXSYSTEMV" -fi -if echo $osname | grep -i irix >/dev/null 2>&1; then - SYSDEF="IRIX" -fi -if echo $osname | grep -i irix64 >/dev/null 2>&1; then - SYSDEF="IRIX64" -fi -if echo $osname | grep -i linux >/dev/null 2>&1; then - SYSDEF="LINUX" -fi -if echo $osname | grep -i osf1 >/dev/null 2>&1; then - SYSDEF="OSF1" -fi -if echo $osname | grep -i super >/dev/null 2>&1; then - SYSDEF="SUPERUX" -fi -if echo $osname | grep -i sun >/dev/null 2>&1; then - SYSDEF="SUNOS" -fi -if echo $osname | grep -i t3e >/dev/null 2>&1; then - SYSDEF="T3E" -fi -if echo $osname | grep -i unicos >/dev/null 2>&1; then - SYSDEF="UNICOS" -fi -if test -z "$SYSDEF"; then - AC_MSG_WARN([OPERATING SYSTEM UNKNOWN]) - SYSDEF="UNKNOWNOS" -fi - -# Set the default FCFLAGS for non-gfortran compilers. -# NOTE: This may change with a new version of autoconf. -DEFFCFLAGS="-g" - -##################################################### -# CHECK COMPILER NAME and add specific flags -if echo $FC | grep xlf >/dev/null 2>&1; then - echo "Fortran Compiler is XLF" - CPRDEF="XLF" - if test -z "$REAL8"; then - REAL8="-qrealsize=8" - fi - if test -z "$OPT"; then - OPT="-O2 -qarch=auto" - fi - if test -z "$DEBUG"; then - DEBUG="-qdbg" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="" - fi -elif echo $FC | grep pgf >/dev/null 2>&1; then - echo "Fortran Compiler is Portland Group" - CPRDEF="PGI" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$BIT64"; then - BIT64="-pc 64" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="" - fi - if test -z "$ENDIAN"; then - ENDIAN="-byteswapio" - fi - if test -z "$OPT"; then - OPT="-O2" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi -elif echo $FC | grep ftn >/dev/null 2>&1; then - if echo $ac_fc_version_output | grep -i Portland >/dev/null 2>&1; then - echo "Fortran Compiler is Portland Group, Cray" - CPRDEF="PGI" - SYSDEF="CNLINUX" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$BIT64"; then - BIT64="-pc 64" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="" - fi - if test -z "$ENDIAN"; then - ENDIAN="-byteswapio" - fi - if test -z "$OPT"; then - OPT="-O2" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi - fi -elif echo $FC | grep ifort >/dev/null 2>&1; then - echo "Fortran Compiler is Intel ifort" - CPRDEF="INTEL" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="-w -ftz" - fi - if test -z "$PROGFCFLAGS"; then - PROGFCFLAGS="-assume byterecl" - fi - if test -z "$ENDIAN"; then - ENDIAN="-convert big_endian" - fi - if test -z "$OPT"; then - OPT="-O2" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi -elif echo $FC | grep g95 >/dev/null 2>&1; then - echo "Fortran Compiler is GNU" - CPRDEF="GNU" -elif echo $FC | grep gfortran >/dev/null 2>&1; then - echo "Fortran Compiler is GNU" - CPRDEF="GNU" -# For gfortran, default flags are different - if test "$FCFLAGS" = "-g -O2"; then - FCFLAGS="" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $ac_fc_version_output | grep -i nag >/dev/null 2>&1; then - echo "Fortran Compiler is NAG" - CPRDEF="NAG" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test "$FCFLAGS" = "$DEFFCFLAGS"; then - FCFLAGS="-wmismatch=mpi_send,mpi_recv,mpi_bcast,mpi_allreduce,mpi_reduce,mpi_gatherv,mpi_gather,mpi_rsend,mpi_irecv,mpi_isend,mpi_scatterv,mpi_alltoallv -dusty" - fi - if test -z "$ENDIAN"; then - ENDIAN="-convert=BIG_IEEE" - fi - if test -z "$OPT"; then - OPT="-O2" - fi - if test -z "$DEBUG"; then - DEBUG="-g" - fi -########################################################### -# the compiler flags below have not been verified recently -########################################################### -elif echo $FC | grep frt >/dev/null 2>&1; then - echo "Fortran Compiler is UXP/V" - echo "Suggested additional vectorization flags: -Wv,-s5,-t3,-noalias,-ilfunc,-md" - CPRDEF="FUJITSU" - if test -z "$F90FLAGS"; then - F90FLAGS="-Am -X9" - fi - if test -z "$BIT64"; then - BIT64="-KA64" - fi - if test -z "$REAL8"; then - REAL8="-Ad" - fi -elif echo $ac_fc_version_output | grep Lahey >/dev/null 2>&1; then - echo "Fortran Compiler is Lahey" - CPRDEF="LAHEY" -elif echo $FC | grep ifc >/dev/null 2>&1; then - echo "Fortran Compiler is Intel 7.x or earlier" - echo "Intel ifc compiler must set the environment variable F_UFMTENDIAN=big to do endian conversion" - CPRDEF="INTEL" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$F90FLAGS"; then - F90FLAGS="-w" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $FC | grep efc >/dev/null 2>&1; then - echo "Fortran Compiler is Intel 7.x or earlier for IA-64" - echo "Intel efc compiler must set the environment variable F_UFMTENDIAN=big to do endian conversion" - CPRDEF="INTEL" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$F90FLAGS"; then - F90FLAGS="-w -ftz" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $FC | grep pathf90 >/dev/null 2>&1; then - echo "Fortran Compiler is PathScale" - CPRDEF="PATHSC" - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$BIT64"; then - BIT64="-m64" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $ac_fc_version_output | grep -i absoft >/dev/null 2>&1; then - echo "Fortran Compiler is Absoft" - CPRDEF="ABSOFT" - if test -z "$REAL8"; then - REAL8="-N113" - fi - if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-p" - fi - if test -z "$OPT"; then - OPT="-O2" - fi -elif echo $ac_fc_version_output | grep -i workshop >/dev/null 2>&1; then - echo "Fortran Compiler is Workshop" - CPRDEF="WORKSHOP" - if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-M" - fi -elif echo $ac_fc_version_output | grep -i mipspro >/dev/null 2>&1; then - echo "Fortran Compiler is MIPSPro" - CPRDEF="MIPSPRO" - EXTRACFLAGS="-64" - if test -z "$OPT"; then - OPT="-O3" - fi - if test -z "$REAL8"; then - REAL8="-r8" - fi - if test -z "$BIT64"; then - BIT64="-64" - fi -elif echo $ac_fc_version_output | grep -i compaq >/dev/null 2>&1; then - echo "Fortran Compiler is Compaq" - CPRDEF="COMPAQ" - MPILIBS="$MPILIBS -lelan" - if test -z "$OPT"; then - OPT="-fast" - fi - if test -z "$REAL8"; then - REAL8="-real_size 64" - fi - if test -z "$ENDIAN"; then - ENDIAN="-convert big_endian" - fi - -# Compaq Fortran changed its name to HP Fortran. -# Lets support both versions for now. -elif echo $ac_fc_version_output | grep HP >/dev/null 2>&1; then - echo "Fortran Compiler is HP" - CPRDEF="COMPAQ" - MPILIBS="$MPILIBS -lelan" - if test -z "$OPT"; then - OPT="-fast" - fi - if test -z "$REAL8"; then - REAL8="-real_size 64" - fi - if test -z "$ENDIAN"; then - ENDIAN="-convert big_endian" - fi - -elif echo $ac_fc_version_output | grep -i sx >/dev/null 2>&1; then - echo "Fortran Compiler is SX" - CPRDEF="SX" - if test -z "$F90FLAGS"; then - F90FLAGS="-EP -Wf'-pvctl noassoc'" - fi - if test -z "$OPT"; then - OPT="-Chopt" - fi -fi - -########################################################### -# END of compiler-specific flag setting -########################################################### - -CPPDEFS="$CPPDEFS -DSYS$SYSDEF -DCPR$CPRDEF" -if test -n "$SRKDEF"; then - CPPDEFS="$CPPDEFS -D$SRKDEF" -fi - -# IF DEBUGGING ENABLED, DISABLE OPTIMIZATION FLAG -if test "$DEBUGGING" = "ENABLED"; then - OPT="" -else - DEBUG="" -fi - -# SET HARDCODED VARIABLES AS A LAST RESORT - -# ALWAYS ENABLE CRULE IN MAKEFILE -AC_SUBST(CRULE,[.c.o]) - -AC_SUBST(CPPDEFS) - -# INCLUDE FLAG IF NOT ALREADY SET IS MOST LIKELY -I -if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-I" -fi - -# ARCHIVE COMMAND SIMILAR ACROSS ALL PLATFORMS -if test -z "$AR"; then - AR="ar cq" -fi - -# RANLIB -if test -z "$RANLIB"; then - # Necessary on Darwin to deal with common symbols (particularly when - # using ifort). - if test "$SYSDEF"x = DARWINx; then - RANLIB="ranlib -c" - else - AC_PROG_RANLIB - fi -fi - -echo -echo Output Variables: {CC=$CC} {CFLAGS=$CFLAGS} \ -{FC=$FC} {FCFLAGS=$FCFLAGS} {PROGFCFLAGS=$PROGFCFLAGS}\ -{CPPDEFS=$CPPDEFS} {OPT=$OPT} {DEBUG=$DEBUG} {REAL8=$REAL8} \ -{BIT64=$BIT64} {ENDIAN=$ENDIAN} {MPIFC=$MPIFC} \ -{MPILIBS=$MPILIBS} {MPIHEADER=$MPIHEADER} \ -{INCLUDEFLAG=$INCLUDEFLAG} {INCLUDEPATH=$INCLUDEPATH} \ -{AR=$AR} {RANLIB=$RANLIB} {BABELROOT=$BABELROOT} {COMPILER_ROOT=$COMPILER_ROOT} \ -{PYTHON=$PYTHON} {PYTHONOPTS=$PYTHONOPTS} {FORT_SIZE=$FORT_SIZE} {prefix=$prefix} \ -{SRCDIR=$SRCDIR} {FC_DEFINE=$FC_DEFINE} -echo - -if test -n "$DONOTCHECKMPI"; then - echo "MPISERIAL ENABLED: CONFIGURING mpi-serial" - AC_CONFIG_SUBDIRS(mpi-serial) -fi - -AC_OUTPUT - -echo Please check the Makefile.conf -echo Have a nice day! - -# test -z is true for empty strings -# test -n is true for non-empty strings - - - - diff --git a/src/externals/mct/doc/.gitignore b/src/externals/mct/doc/.gitignore deleted file mode 100644 index aadc44c8399..00000000000 --- a/src/externals/mct/doc/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -*.toc -*.log -*.dvi -*.aux -*.blg -*.bbl -*.pdf diff --git a/src/externals/mct/doc/Makefile b/src/externals/mct/doc/Makefile deleted file mode 100644 index 48d6e1e122c..00000000000 --- a/src/externals/mct/doc/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/make -#----------------------------------------------------------------------- -# Documentation -all: - cd texsrc; make - make apis - -html: - latex2html -white -toc_depth 5 -split 4 -show_section_numbers \ - -address "jacob@mcs.anl.gov" \ - mct_APIs.tex -apis: - cd texsrc; make - make apisdvi - -apisdvi: mct_APIs.dvi - -clean: - cd texsrc; make clean - rm -f *.dvi *.log *.bbl *.blg *.aux *.toc - -.SUFFIXES: .dvi .tex - -.tex.dvi: - latex $*.tex - -#. diff --git a/src/externals/mct/doc/README b/src/externals/mct/doc/README deleted file mode 100644 index 9ccfdfe50e3..00000000000 --- a/src/externals/mct/doc/README +++ /dev/null @@ -1,20 +0,0 @@ - -To build the .dvi files for the documentation. type "make". - -This will build the API's document. - -To build the APIs, type "make apis" - -NOTE: this build system isn't working perfectly yet. It will -build a .dvi file but you will need to run "bibtex" manually to -build the bibliography. - -To build "by hand" using the design doc as an example: -cd to texsrc, type "make" -cd back to doc directory then do: - -latex mct_APIs -bibtex mct_APIs -latex mct_APIs -latex mct_APIs - diff --git a/src/externals/mct/doc/coupler.bib b/src/externals/mct/doc/coupler.bib deleted file mode 100644 index 9d583a0326d..00000000000 --- a/src/externals/mct/doc/coupler.bib +++ /dev/null @@ -1,254 +0,0 @@ -@article{gaspari-1999a, - author = "G.~Gaspari and S.~E.~Cohn", - title = {{Construction of Correlation Functions in Two and Three Dimensions}}, - journal ={Quart.~J.~Roy.~Met.~Soc.}, - year = "1999", - volume = "125", - pages = "723--757", -} -@article{jones-1999, - author = "P.~W.~Jones", - title = {{First- and Second-order Conservative Remapping Schemes for Grids in Spherical Coordinates}}, - journal ={Monthly Weather Reveiw}, - year = "1999", - volume = "127", - pages = "2204-2210", -} -@Techreport{gaspari-1998, - author = "G.~Gaspari and S.~E.~Cohn and D.~P.~Dee and J.~Guo and A.~M.~da~Silva", - title = {{Construction of the PSAS Multi-level Forecast Error Covariance Models}}, - year = "1998", - institution = "NASA/Goddard Space Flight Center", - number = "DAO Office Note 98-06 {\bf http://dao.gsfc.nasa.gov/subpages/office-notes.html}", - address = "Greenbelt, Maryland." -} -@techreport{dasilva-1998a, - author = "A.~da Silva and M.~Tippett and J.~Guo", - title = {{The PSAS Users' Manual}}, - year = "1999", - institution = "NASA/Goddard Space Flight Center", - number = "To be published as DAO Office Note 99-XX", - address = "Greenbelt, Maryland" -} -@Techreport{guo+al-1998a, - author = "J.~Guo and J.~W.~Larson and G.~Gaspari and A.~da~Silva and P.~M.~Lyster", - title = {{Documentation of the Physical-space Statistical Analysis System (PSAS) Part II: The Factored-Operator Formulation of Error Covariances}}, - year = "1998", - institution = "NASA/Goddard Space Flight Center", - number = "DAO Office Note 98-04 {\bf http://dao.gsfc.nasa.gov/subpages/office-notes.html}", - address = "Greenbelt, Maryland." -} -@techreport{ODS-95, - author = "A.~M.~da Silva and C.~Redder", - title = {{Documentation of the GEOS/DAS Observation Data Stream (ODS), Version 1.01}}, - year = "1995", - institution = "NASA/Goddard Space Flight Center", - number = "DAO Office Note 95-01", - address = "Greenbelt, Maryland" -} -@techreport{farrell-1996a, - author = "W.~E.~Farrell and A.~J.~Busalacchi and A.~Davis - and W.~P.~Dannevik and G-R.~Hoffmann and M.~Kafatos and R.~W.~Moore - and J.~Sloan and T.~Sterling", - title = {{Report of the Data Assimilation Office Computer Advisory - Panel to the Laboratory for Atmospheres}}, - year = "1996", - institution = "NASA/Goddard Space Flight Center", - address = "Greenbelt, Maryland" -} -@techreport{lam+daS-1996a, - author = "D.~Lamich and A.~da~Silva", - title = {{Architectural Design for the GEOS-2.1 Data Assimilation System Document Version 1}}, - year = "1996", - institution = "NASA/Goddard Space Flight Center", - number = "DAO Office Note 96-XX", - address = "Greenbelt, Maryland" -} -@techreport{atbd-1996a, - author = "D.~A.~O.~Staff", - title = {{Algorithm Theoretical Basis Document, Version 1.01}}, - year = "1996", - institution = "NASA/Goddard Space Flight Center", - address = "Greenbelt, Maryland {\bf http://dao.gsfc.nasa.gov/subpages/atbd.html}" -} -@techreport{suarez-1995a, - author = "M.~J.~Suarez and L.~L.~Takacs", - title = {{Documentation of the Aries-GEOS Dynamical Core: Version 2}}, - year = "1995", - institution = "NASA/Goddard Space Flight Center", - number = "NASA Techinical Memorandum 104606, Vol. 5", - address = "Greenbelt, Maryland" -} -@techreport{takacs-1994a, - author = "L.~L.~Takacs and A.~Molod and T.~Wang", - title = {{Documentation of the Goddard Earth Observing - System (GEOS) General Circulation Model--Version 1}}, - year = "1994", - institution = "NASA/Goddard Space Flight Center", - number = "NASA Techinical Memorandum 104606, Vol. 1", - address = "Greenbelt, Maryland" -} - -@techreport{pfaendtner-1995a, - author = "J.~W.~Pfaendtner and J.~S.~Bloom and D.~Lamich and - and M.~Seablom and M.~Sienkiewicz and J.~Stobie and A.~da~Silva", - title = {{Documentation of the Goddard Earth Observing System - (GEOS) Data Assimilation System -- Version 1}}, - year = "1995", - institution = "NASA/Goddard Space Flight Center", - number = "Tech. Memo No. 104606, Vol. 4", - address = "Greenbelt, Maryland." -} -@techreport{pfaendtner-1996a, - author = "J.~W.~Pfaendtner", - title = {{Notes on the Icosahedral Domain Decompostion in PSAS}}, - year = "1996", - institution = "NASA/Goddard Space Flight Center", - number = "DAO Office Note 96-04 {\bf http://dao.gsfc.nasa.gov/subpages/office-notes.html}", - address = "Greenbelt, Maryland." -} -@Conference{seablom-1991a, - author = "M.~Seablom and J.~Pfaendtner and P.~E.~Piraino", - title = {{Quality Control techniques for the interactive GLA - retrieval/assimilation system}}, - year = "1991", - pages="28-29", - booktitle={{AMS Ninth Conference on Numerical Weather Prediction, - Denver, Colorado, October 14-18, 1991}}, -} -@Conference{daSilva-1995a, - author = "A.~da Silva and J.~Pfaendtner and J.~Guo and - M.~Sienkiewicz and S.~Cohn", - title = {{Assessing the Effects of Data Selection with - DAO's Physical-space Statistical Analysis System}}, - year = "1995", - booktitle="Proceedings of the Second International Symposium on the - Assimilation of Observations in Meteorology and Oceanography, Tokyo Japan" -} -@techreport{zero-1996a, - author = "J.~Zero and R.~Lucchesi and R.~Rood", - title = {{Data Assimilation Office (DAO) Strategy Statement: - Evolution Towards the 1998 Computing Environment}}, - year = "1996", - institution = "NASA/Goddard Space Flight Center", - number = "Tech. Memo No. 104606, Vol. 4", - address = "Greenbelt, Maryland" -} -@techreport{daSilva-1996a, - author = "A.~da Silva and J.~Guo", - title = {{Documentation of the Physical-space Statistical Analysis - System (PSAS) Part I: The Conjugate Gradient Solver, Version - PSAS-1.00}}, - year = "1996", - institution = "NASA/Goddard Space Flight Center", - number = "DAO Office Note No.~96-02 {\bf http://dao.gsfc.nasa.gov/subpages/office-notes.html}", - address = "Greenbelt, Maryland" -} -@techreport{stobie-1996a, - author = "J.~Stobie", - title = {{GEOS 3.0 System Requirements}}, - institution = "NASA/Goddard Space Flight Center", - address = "Greenbelt, Maryland" -} -@Conference{ding-1995a, - author = "C.~Ding and R.~D.~Ferraro", - title = {{An 18 GFLOPS Parallel Data Assimilation PSAS Package}}, - year = "1995", - pages="70", - booktitle={{Proceedings of the Intel Supercomputer Users Group - Conference}} -} -@Conference{ding-1995b, - author = "C.~Ding and R.~D.~Ferraro", - title = {{A General Purpose Parallel Sparse-Matrix Solver Package}}, - year = "1995", - pages="70", - booktitle={{Proceedings of the 9th International Parallel Processing Symposium}} -} -@Conference{ding-1996a, - author = "C.~Ding and R.~D.~Ferraro", - title = {{Climate Data Assimilation on a Massively Parallel Computer}}, - year = "1996", - booktitle={{Proceedings of Supercomputing, 96}} -} -@techreport{hennecke-1996a, - author = "M.~Hennecke", - title = {{A Fortran 90 Interface to MPI Version 1.1}}, - institution = "RZ Universitat Karlsruhe", - year = "1996", - number = "Internal Report 63/96", - address = "Karlsruhe, Germany" -} -@techreport{daSilva-1996b, - author = "A.~da Silva and C.~Redder", - title = {{Documentation of the GEOS/DAS Observation Data - Stream (ODS) Version 1.01}}, - institution = "NASA/Goddard Space Flight Center", - number = "DAO Office Note No. 96-01", - address = "Greenbelt, Maryland" -} -@book{gol+vloan-1989, - author = "G.~H.~Golub and C.~F.~van Loan", - title = {Matrix Computations}, - edition = "second", - publisher = "The John Hopkins University Press", - year = "1989", - pages = "642", - address = "Baltimore" -} -@book{NumRec-1992, - author = " W.~H.~Press and S.~A.~Teukolsky and W.~T.~Vetterling", - title = {{Numerical Recipes in Fortran: The Art of Scientific - Computing}}, - edition = "second", - publisher = "Cambridge University Press", - year = "1992", - pages = "963", - address = "Cambridge" -} -@book{daley-1991, - author = "R.~Daley", - title = {{Atmospheric Data Analysis}}, - publisher = "Cambridge Press", - year = "1991", - pages = "457", - address = "Cambridge" -} -@phdthesis{vonlasz-1996a, - author = "G.~ von Laszewski", - title = {{The Parallel Data Assimilation System and its Implications on a Metacomputing Environment}}, - school = "Syracuse University", - year = "1996", - address = "Syracuse, New York" -} -@proposal{lyster-1995a, - author = "P.~M.~Lyster", - title = {{Four Dimensional Data Assimilation of the Atmosphere}}, - program = "NASA Cooperative Agreement for High Performance Computing - and Communications (HPCC) initiative", - agency = "National Aeronautics and Space Administration", - address = "Washington, D.~C.~" -} -@book{arfken, - author = "G.~Arfken", - title = {{Mathematical Methods for Physicists}}, - publisher = "Academic Press", - year = "1970", - pages = "815", - address = "New York" -} -@article{cohn-1998, - author="S.~E.~Cohn and A.~da~Silva and J.~Guo and M.~Sienkiewicz and D.~Lamich", - title={{Assessing the effects of data selection with the DAO Physical-space Statistical Analysis System}}, - journal={Mon.~Wea.~Rev.}, - volume="126", - pages="2913--2926", - year="1998" -} -@article{lyster-1998, - author="P.~M.~Lyster", - title={{The Computational Complexity of Atmospheric Data Assimilation}}, - journal="Submitted to {Int.~J.~Appl.~Sci.~Comp.}", - note="Available on-line from {\bf http://dao.gsfc.nasa.gov/DAO\_people/lys/complexity}", - year="1998" -} diff --git a/src/externals/mct/doc/mct_APIs.tex b/src/externals/mct/doc/mct_APIs.tex deleted file mode 100755 index c558e80bbd6..00000000000 --- a/src/externals/mct/doc/mct_APIs.tex +++ /dev/null @@ -1,340 +0,0 @@ -%mct API Specification -% J.W. Larson / MCS, Argonne National Laboratory -% R.L. Jacob -% First Version Begun 8/28/00 -% -% -\documentclass{article} -\usepackage{epsfig} -\usepackage{graphicx} -%\usepackage{fancyheadings} - -% Keep these dimensions - -\textheight 9in \topmargin 0pt \headsep 22pt -\headheight 0pt - -\textwidth 6in \oddsidemargin 0in \evensidemargin 0in - -\marginparpush 0pt \pagestyle{plain} - -\setlength{\hoffset}{0.25in} - -% Headings -% -------- -\pagestyle{plain} % AFTER redefining \textheight etc. - -% \lhead[]{{\em NGC Design Document}} % left part of header -% \chead[]{} % center part of header -% \rhead[]{\em {\today}} % right part of header - - % \cfoot{\roman{page}} - %\lfoot[]{} % left part of footer - % \rfoot[]{} % right part of footer - % \headrulewidth 0pt % if you don't want a rule under the header - % \footrulewidth 0pt % if you don't want a rule above the footer - -%...................................................................... -%.............begin document............. - -\begin{document} - -\begin{sloppypar} -{\huge\bf -%%% -%%% Enter your title below (after deleting mine) -%%% -The Model Coupling Toolkit API Reference Manual: MCT v. 2.10 -\\ } %%% IMPORTANT: Keep this \\ before the } -\end{sloppypar} - -%%% -%%% Author names and affiliations go below, follow example -%%% -\vspace{.3in} -\noindent J.~W.~Larson\\ -R.~L.~Jacob\\ -E.~Ong\\ -R.~Loy\\ -\vspace{.2in} {\em Mathematics and Computer Science Division, -Argonne National Laboratory\\} - -\vfill - -%%% -%%% These lines are standard - keep them! -%%% Edit the ``has not been published'' as appropriated. -{\em This paper has not been published and should be regarded as -an Internal Report from MCS. Permission to quote from this -Technical Note should be obtained from the MCS Division of -Argonne National Laboratory.} - -\vspace{0.4in} - - -\thispagestyle{empty} -\newpage - -%.......................... END FIRST PAGE ...................... - -\pagenumbering{roman} - -%......................... REVISION HISTORY .......................... - -\newpage -\setcounter{page}{2} %%%% Revision History starts at page ii - -\addcontentsline{toc}{part}{Revision History} - -\vspace*{\fill} - -\centerline{\huge\bf Revision History} - -\bigskip -\noindent{This Technical Note was produced for the Scientific -Discovery through Advanced Computing (SciDAC) project.} - -\begin{center} -\begin{tabular}{|l|l|l|l|}\hline -{\bf Version} & {\bf Version} & {\bf Pages Affected/} & {\bf Aproval}\\ -{\bf Number} & {\bf Date} & {\bf Extent of Changes} & {\bf Authority}\\ -\hline -\hline -Version 1$\beta$ & December 13, 2000 & First draft (before review) & -\\\hline -Version 1$\beta2$ & February 16, 2001 & Add more routines & -\\\hline -Version 1$\beta3$ & June 6, 2001 & Convert to pure API's doc & -\\\hline -Version 1$\beta4$ & Apr 24, 2002 & Update with latest source & -\\\hline -Version 1.0 & Nov 14, 2002 & 1.0 Version & -\\\hline -Version 2.0.0 & Apr 23, 2004 & 2.0.0 Version & -\\\hline -Version 2.0.1 & May 18, 2004 & 2.0.1 Version & -\\\hline -Version 2.1.0 & Feb 11, 2005 & 2.1.0 Version & -\\\hline -Version 2.2.0 & Dec 01, 2005 & 2.2.0 Version & -\\\hline -Version 2.2.1 & Apr 22, 2006 & 2.2.1 Version & -\\\hline -Version 2.2.2 & Sep 08, 2006 & 2.2.2 Version & -\\\hline -Version 2.2.3 & Oct 16, 2006 & 2.2.3 Version & -\\\hline -Version 2.3.0 & Jan 10, 2007 & 2.3.0 Version & -\\\hline -Version 2.4.0 & Aug 17, 2007 & 2.4.0 Version & -\\\hline -Version 2.4.1 & Nov 21, 2007 & 2.4.1 Version & -\\\hline -Version 2.5.0 & Jan 28, 2008 & 2.5.0 Version & -\\\hline -Version 2.5.1 & May 20, 2008 & 2.5.1 Version & -\\\hline -Version 2.6.0 & Mar 05, 2009 & 2.6.0 Version & -\\\hline -Version 2.7.0 & Jan 05, 2010 & 2.7.0 Version & -\\\hline -Version 2.7.1 & Feb 28, 2010 & 2.7.1 Version & -\\\hline -Version 2.7.2 & Nov 30, 2010 & 2.7.2 Version & -\\\hline -Version 2.7.3 & Jan 25, 2011 & 2.7.3 Version & -\\\hline -Version 2.7.4 & Mar 07, 2012 & 2.7.4 Version & -\\\hline -Version 2.8.0 & Apr 30, 2012 & 2.8.0 Version & -\\\hline -Version 2.8.1 & Jul 05, 2012 & 2.8.1 Version & -\\\hline -Version 2.8.2 & Sep 12, 2012 & 2.8.2 Version & -\\\hline -Version 2.8.3 & Dec 17, 2012 & 2.8.3 Version & -\\\hline -Version 2.9.0 & Jun 19, 2015 & 2.9.0 Version & -\\\hline -Version 2.10.0 & Mar 12, 2018 & 2.10.0 Version & -\\\hline -\end{tabular} -\end{center} - -\vspace*{\fill} - - -%.......................... ABSTRACT .................................. -\newpage -\setcounter{page}{3} %%%% abstract starts at page iii -\addcontentsline{toc}{part}{Preface} - -\vspace*{\fill} - -This document describes the Application Program Interfaces (APIs) -for the Model Coupling Toolkit (MCT). - -For functions that take a Fortran90 {\tt real} argument, either a scalar or -a vector, MCT provides both double and single precision versions. Only -the single precision version are described here denoted by SP. The double precision versions -are otherwise identical. - -\vspace*{\fill} -\newpage - -\tableofcontents -\newpage - -% Switch page numbering to arabic numerals - -\pagenumbering{arabic} - -\part{Basic API's and associated communication routines} -% -\section{MCTWorld} -\input{texsrc/m_MCTWorld} -\vspace*{\fill} -\newpage -% -% -\section{The Attribute Vector} -\input{texsrc/m_AttrVect} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_AttrVectComms} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_AttrVectReduce} -\vspace*{\fill} -\newpage -% -% -\section{Global Segment Map} -\input{texsrc/m_GlobalSegMap} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_GlobalSegMapComms} -\vspace*{\fill} -\newpage -% -% -\section{The Router} -\input{texsrc/m_Router} -\vspace*{\fill} -\newpage -% -% -\section{The General Grid} -\input{texsrc/m_GeneralGrid} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_GeneralGridComms} -\vspace*{\fill} -\newpage -% -% -\section{The Navigator} -\input{texsrc/m_Navigator} -\vspace*{\fill} -\newpage -% -% -\section{The Global Map} -\input{texsrc/m_GlobalMap} -\vspace*{\fill} -\newpage -% -% -\part{High Level API's} -% -\section{Sending and Receiving Attribute Vectors} -\input{texsrc/m_Transfer} -\vspace*{\fill} -\newpage -% -\section{Rearranging Attribute Vectors} -\input{texsrc/m_Rearranger} -\vspace*{\fill} -\newpage -% -\section{Sprase Matrix Support} -\input{texsrc/m_SparseMatrix} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_SparseMatrixComms} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_SparseMatrixDecomp} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_SparseMatrixToMaps} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_SparseMatrixPlus} -\vspace*{\fill} -\newpage -% -% -\section{Matrix Vector Multiplication} -\input{texsrc/m_MatAttrVectMul} -\vspace*{\fill} -\newpage -% -\section{Spatial Integration and Averaging} -\input{texsrc/m_SpatialIntegral} -\vspace*{\fill} -\newpage -\input{texsrc/m_SpatialIntegralV} -\vspace*{\fill} -\newpage -% -\section{Merging of Flux and State Data from Multiple Sources} -\input{texsrc/m_Merge} -\vspace*{\fill} -\newpage -% -\section{Time Averaging} -\input{texsrc/m_Accumulator} -\vspace*{\fill} -\newpage -% -\input{texsrc/m_AccumulatorComms} -\vspace*{\fill} -\newpage -% -\section{Global To Local Index Translation} -\input{texsrc/m_GlobalToLocal} -\vspace*{\fill} -\newpage -% -\section{Convert From Global Map To Global Segment Map} -\input{texsrc/m_ConvertMaps} -\vspace*{\fill} -\newpage - -\part{Documentation of MPEU Datatypes Used to Define MCT Datatypes} -% -\section{The String Datatype} -\input{texsrc/m_String} -\vspace*{\fill} -\newpage -% -\section{The List Datatype} -\input{texsrc/m_List} -\vspace*{\fill} -\newpage - -%\addcontentsline{toc}{part}{References} - -%\bibliographystyle{apalike} % for BibTeX - uses [Name, year] method?? - -%\bibliography{coupler} -\end{document} diff --git a/src/externals/mct/doc/texsrc/.gitignore b/src/externals/mct/doc/texsrc/.gitignore deleted file mode 100644 index 89a588f6713..00000000000 --- a/src/externals/mct/doc/texsrc/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.tex -*.F90 diff --git a/src/externals/mct/doc/texsrc/Makefile b/src/externals/mct/doc/texsrc/Makefile deleted file mode 100644 index 7d4049643f3..00000000000 --- a/src/externals/mct/doc/texsrc/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/make - -TEXFILES = -include SRCS_tex.mk - -PROTEXLOC = ../../protex/protex - -PROTEX = perl $(PROTEXLOC) -b # bare mode--no TOC - -#----------------------------------------------------------------------- -# Documentation -all: - cp ../../mct/*.F90 . - cp ../../mpeu/m_String.F90 . - cp ../../mpeu/m_List.F90 . - make doc - -doc: $(TEXFILES) - -clean: - rm -f *.F90 - rm -f *.tex - -.SUFFIXES: .F90 .tex - -.F90.tex: - $(PROTEX) $*.F90 > $*.tex - -#. diff --git a/src/externals/mct/doc/texsrc/SRCS_tex.mk b/src/externals/mct/doc/texsrc/SRCS_tex.mk deleted file mode 100644 index 556c7218bcc..00000000000 --- a/src/externals/mct/doc/texsrc/SRCS_tex.mk +++ /dev/null @@ -1,31 +0,0 @@ -TEXFILES= \ -m_Accumulator.tex \ -m_AccumulatorComms.tex \ -m_AttrVect.tex \ -m_AttrVectComms.tex \ -m_AttrVectReduce.tex \ -m_ConvertMaps.tex \ -m_ExchangeMaps.tex \ -m_GeneralGrid.tex \ -m_GeneralGridComms.tex \ -m_GlobalMap.tex \ -m_GlobalSegMap.tex \ -m_GlobalSegMapComms.tex \ -m_GlobalToLocal.tex \ -m_MCTWorld.tex \ -m_MatAttrVectMul.tex \ -m_Merge.tex \ -m_Navigator.tex \ -m_Rearranger.tex \ -m_Router.tex \ -m_SparseMatrix.tex \ -m_SparseMatrixComms.tex \ -m_SparseMatrixDecomp.tex \ -m_SparseMatrixToMaps.tex \ -m_SparseMatrixPlus.tex \ -m_SpatialIntegral.tex \ -m_SpatialIntegralV.tex \ -m_String.tex \ -m_Transfer.tex \ -m_List.tex - diff --git a/src/externals/mct/examples/Makefile b/src/externals/mct/examples/Makefile deleted file mode 100644 index dfd79727493..00000000000 --- a/src/externals/mct/examples/Makefile +++ /dev/null @@ -1,20 +0,0 @@ - -SHELL = /bin/sh - -SUBDIRS = simple climate_concur1 climate_sequen1 - -# TARGETS -subdirs: - @for dir in $(SUBDIRS); do \ - cd $$dir; \ - $(MAKE); \ - cd ..; \ - done - -clean: - @for dir in $(SUBDIRS); do \ - cd $$dir; \ - $(MAKE) clean; \ - cd ..; \ - done - diff --git a/src/externals/mct/examples/README b/src/externals/mct/examples/README deleted file mode 100644 index a7e19528ead..00000000000 --- a/src/externals/mct/examples/README +++ /dev/null @@ -1,22 +0,0 @@ - -Directories containing example programs showing -the use of MCT. - -simple/ - Multiple single-source file examples showing how to set - up MCTWorld, GSMaps and send/recv data in various two-component - coupled configurations (sequential and concurrent). Require - no input data. - -climate_concur1/ - A small program demonstrating MCT features - in a configuration which mimics part of a concurrently executing - climate model. Uses real climate model numerical grids. Requires - the MCT/data directory. - - -climate_sequen1/ - A small program demonstrating MCT features - in a configuration which mimics part of a sequentially executing - climate model. Uses real climate model numerical grids. Requires - the MCT/data directory - - -More examples will be available in future releases. diff --git a/src/externals/mct/examples/climate_concur1/.gitignore b/src/externals/mct/examples/climate_concur1/.gitignore deleted file mode 100644 index d4f2ff7e9f3..00000000000 --- a/src/externals/mct/examples/climate_concur1/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -climate -*.mod -poe.* -*.script -*.o* diff --git a/src/externals/mct/examples/climate_concur1/Makefile b/src/externals/mct/examples/climate_concur1/Makefile deleted file mode 100644 index 3f4f30e1ed4..00000000000 --- a/src/externals/mct/examples/climate_concur1/Makefile +++ /dev/null @@ -1,52 +0,0 @@ - -SHELL = /bin/sh - -# SOURCE FILES - -SRCS_F90 = master.F90 coupler.F90 model.F90 - -OBJS_ALL = $(SRCS_F90:.F90=.o) - -# MACHINE AND COMPILER FLAGS - -include ../../Makefile.conf - -# ADDITIONAL FLAGS SPECIFIC FOR UTMCT COMPILATION - -MCTLIBS = -L$(MPEUPATH) -L$(MCTPATH) -lmct -lmpeu -UTLDFLAGS = $(REAL8) -UTCMPFLAGS = $(REAL8) $(INCFLAG)$(MPEUPATH) $(INCFLAG)$(MCTPATH) - -# TARGETS - -all: climate - -climate: $(OBJS_ALL) - $(FC) -o $@ $(OBJS_ALL) $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -# RULES - -.SUFFIXES: -.SUFFIXES: .F90 .o - -.F90.o: - $(FC) -c $(INCPATH) $(FPPDEFS) $(FCFLAGS) $(MCTFLAGS) $(UTCMPFLAGS) $< - - -clean: - ${RM} *.o *.mod climate - -# DEPENDENCIES: - -$(OBJS_ALL): $(MCTPATH)/libmct.a - - - - - - - - - - - diff --git a/src/externals/mct/examples/climate_concur1/README b/src/externals/mct/examples/climate_concur1/README deleted file mode 100644 index b7b61d9c1ea..00000000000 --- a/src/externals/mct/examples/climate_concur1/README +++ /dev/null @@ -1,38 +0,0 @@ - -This program demonstrates the use of MCT in a simple -coupled system consisting of a "model" and a "coupler". - -The grids used are taken from a real climate model. -"model" uses an atmosphere grid and "coupler" interpolates -data on it to an ocean grid. - -The model and coupler run on separate pools of processors. - -master.F90 - the top level program -model.F90 - the first component, an atmosphere model. - sends data to the coupler. -coupler.F90 - the second component, a coupler which takes - the received atmosphere data and maps it to - the ocean grid. - ------------------------------------------------------ -To compile: -First make sure you have compiled MCT. See instructions in -MCT/README - -Type "make" here or "make examples" in the top-level directory. - -The executable is called "climate" - ------------------------------------------------------ -To run: -"climate" requires a data file of interpolation weights in -the directory MCT/data. If this directory was not present when -you untarred MCT, you can get it from the MCT website. - -climate requires at least 2 MPI processes to run but can run on -any even number of processors. Consult your -local documentation for how to run parallel programs. -Typical command: mpirun -np 8 climate - -This program will not work with mpi-serial. diff --git a/src/externals/mct/examples/climate_concur1/coupler.F90 b/src/externals/mct/examples/climate_concur1/coupler.F90 deleted file mode 100644 index 465781a8b41..00000000000 --- a/src/externals/mct/examples/climate_concur1/coupler.F90 +++ /dev/null @@ -1,315 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: coupler.F90,v 1.8 2004-04-23 20:57:10 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: coupler -- coupler for unit tester -! -! !DESCRIPTION: -! A coupler subroutine to test functionality of MCT. -! -! !INTERFACE: -! - subroutine coupler (comm,ncomps,compid) -! -! !USES: -! -! Get the things needed from MCT by "Use,only" with renaming: -! -! ---------- first group is identical to what model.F90 uses ---- -! -!---Component Model Registry - use m_MCTWorld,only: MCTWorld_init => init - use m_MCTWorld,only: MCTWorld_clean => clean -!---Domain Decomposition Descriptor DataType and associated methods - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: GlobalSegMap_init => init - use m_GlobalSegMap,only: GlobalSegMap_lsize => lsize - use m_GlobalSegMap,only: GlobalSegMap_clean => clean - use m_GlobalSegMap,only: GlobalSegMap_Ordpnts => OrderedPoints -!---Field Storage DataType and associated methods - use m_AttrVect,only : AttrVect - use m_AttrVect,only : AttrVect_init => init - use m_AttrVect,only : AttrVect_clean => clean - use m_AttrVect,only : AttrVect_importRAttr => importRAttr -!---Intercomponent communications scheduler - use m_Router,only: Router - use m_Router,only: Router_init => init - use m_Router,only: Router_clean => clean -!---Intercomponent transfer - use m_Transfer,only : MCT_Send => send - use m_Transfer,only : MCT_Recv => recv - -! ---------- because coupler will do the interpolation --------- -! it needs more methods -! -!---Sparse Matrix DataType and associated methods - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_init => init - use m_SparseMatrix, only : SparseMatrix_importGRowInd => & - importGlobalRowIndices - use m_SparseMatrix, only : SparseMatrix_importGColInd => & - importGlobalColumnIndices - use m_SparseMatrix, only : SparseMatrix_importMatrixElts => & - importMatrixElements - use m_SparseMatrixPlus, only : SparseMatrixPlus - use m_SparseMatrixPlus, only : SparseMatrixPlus_init => init - use m_SparseMatrixPlus, only : SparseMatrixPlus_clean => clean - use m_SparseMatrixPlus, only : Xonly ! Decompose matrix by row -!---Matrix-Vector multiply methods - use m_MatAttrVectMul, only: MCT_MatVecMul => sMatAvMult - -!---MPEU I/O utilities - use m_stdio - use m_ioutil - - implicit none - - include "mpif.h" - -! !INPUT PARAMETERS: - - integer,intent(in) :: comm - integer,intent(in) :: ncomps - integer,intent(in) :: compid -! -!EOP ___________________________________________________________________ - -! Local variables - - character(len=*), parameter :: cplname='coupler.F90' - - integer :: nxa ! number of points in x-direction, atmos - integer :: nya ! number of points in y-direction, atmos - integer :: nxo ! number of points in x-direction, ocean - integer :: nyo ! number of points in y-direction, ocean - - character(len=100),parameter :: & - RemapMatrixFile='../../data/t42_to_popx1_c_mat.asc' - -! Loop indicies - integer :: i,j,k,n - - logical :: match - -! MPI variables - integer :: rank, nprocs, root, ierr -! MCTWorld variables - integer :: AtmID -! Grid variables - integer :: localsize -! GlobalSegMap variables - type(GlobalSegMap) :: AtmGSMap, OcnGSMap - integer,dimension(1) :: start,length - integer, dimension(:), pointer :: points - integer :: latsize, lonsize - integer :: rowindex, colindex, boxvertex -! AttVect variables - type(AttrVect) :: AtmAV, OcnAV - integer :: aavsize,oavsize -! Router variables - type(Router) :: Rout -! SparseMatrix variables - integer :: mdev - integer :: num_elements, nRows, nColumns - integer, dimension(2) :: src_dims, dst_dims - integer, dimension(:), pointer :: rows, columns - real, dimension(:), pointer :: weights -! A2O SparseMatrix elements on root - type(SparseMatrix) :: sMat -! A2O distributed SparseMatrixPlus variables - type(SparseMatrixPlus) :: A2OMatPlus -! _____________________________________________________________________ - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! INITIALIZATION PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - ! LOCAL RANK AND SIZE - call MPI_COMM_RANK(comm,rank,ierr) - call MPI_COMM_SIZE(comm,nprocs,ierr) - root = 0 - - if(rank==0) write(6,*) cplname,' MyID ', compid - if(rank==0) write(6,*) cplname,' Num procs ', nprocs - - ! Initialize MCTworld - call MCTWorld_init(ncomps,MPI_COMM_WORLD,comm,compid) - - ! Set the atm component id. Must be known to this - ! component. (MCT doesn't handle that). - AtmID=1 - - ! Set grid dimensions for atmosphere and ocean grids. - ! MCT could be used for this (by defining a GeneralGrid in - ! each and sending them to the coupler) but for this simple - ! example, we'll assume they're known to the coupler - nxa = 128 - nya = 64 - - nxo = 320 - nyo = 384 - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Read matrix weights for interpolation from a file. - if (rank == root) then - mdev = luavail() - open(mdev, file=trim(RemapMatrixFile), status="old") - read(mdev,*) num_elements - read(mdev,*) src_dims(1), src_dims(2) - read(mdev,*) dst_dims(1), dst_dims(2) - - allocate(rows(num_elements), columns(num_elements), & - weights(num_elements), stat=ierr) - - do n=1, num_elements - read(mdev,*) rows(n), columns(n), weights(n) - end do - - close(mdev) - - ! Initialize a Sparsematrix - nRows = dst_dims(1) * dst_dims(2) - nColumns = src_dims(1) * src_dims(2) - call SparseMatrix_init(sMat,nRows,nColumns,num_elements) - call SparseMatrix_importGRowInd(sMat, rows, size(rows)) - call SparseMatrix_importGColInd(sMat, columns, size(columns)) - call SparseMatrix_importMatrixElts(sMat, weights, size(weights)) - - deallocate(rows, columns, weights, stat=ierr) - - endif - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Initialize a Global Segment Map for the Ocean - - ! Set up a 1-d decomposition. - ! There is just 1 segment per processor - localsize = nxo*nyo / nprocs - - ! we'll use the distributed init of GSMap so - ! initialize start and length arrays for this processor - start(1) = (rank*localsize) + 1 - length(1) = localsize - - ! initialize the GSMap - call GlobalSegMap_init(OcnGSMap,start,length,root,comm,compid) - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Initialize a Global Segment Map for the Atmosphere - - ! Set up a 1-d decomposition. - ! There is just 1 segment per processor - localsize = nxa*nya / nprocs - - ! we'll use the distributed init of GSMap so - ! initialize start and length arrays for this processor - start(1) = (rank*localsize) + 1 - length(1) = localsize - - ! initialize the GSMap - call GlobalSegMap_init(AtmGSMap,start,length,root,comm,compid) - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - ! Use a GSMap function: - ! return the points local to this processor - ! in their assumed order. - call GlobalSegMap_Ordpnts(AtmGSMap,rank,points) - - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Build a SparseMatrixPlus for doing the interpolation - ! Specify matrix decomposition to be by row. - ! following the atmosphere's decomposition. - call SparseMatrixPlus_init(A2OMatPlus, sMat, AtmGSMap, OcnGSMap, & - Xonly, root, comm, compid) - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Initialize and Attribute vector the atmosphere grid - aavsize = GlobalSegMap_lsize(AtmGSMap,comm) - if(rank==0) write(6,*) cplname, ' localsize: Atm ', aavsize - call AttrVect_init(AtmAV,rList="field1:field2",lsize=aavsize) - - - ! Initialize and Attribute vector the ocean grid - oavsize = GlobalSegMap_lsize(OcnGSMap,comm) - if(rank==0) write(6,*) cplname, ' localsize: Ocn ', oavsize - call AttrVect_init(OcnAV,rList="field1:field2",lsize=oavsize) - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Initialize a Router - call Router_init(AtmID,AtmGSMap,comm,Rout) - -!!! END OF INIT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! RUN PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - do j=1,10 ! "timestep" loop - - - ! coupler calculations here - - match=.TRUE. - - ! Receive the data - call MCT_Recv(AtmAV,Rout) - - ! The 2nd attribute has the values of each gridpoint in - ! the index numbering scheme. Check the received values - ! against the points on the this processor. They should - ! match exactly. - do i=1,aavsize - if( int(AtmAV%rAttr(2,i)) .ne. points(i)) then - write(6,*) cplname,rank, " Data doesn't match ",i - match=.FALSE. - endif - enddo - if(match .and. j==10) & - write(6,*) cplname," Last step, All points match on ",rank - - if(rank==0) write(6,*) cplname, " Received data step ",j - - ! Interpolate by doing a parallel sparsematrix-attrvect multiply - ! Note: it doesn't make much sense to interpolate "field2" which - ! is the grid point indicies but MatVecMul will interpolate all - ! real attributes. - call MCT_MatVecMul(AtmAV, A2OMatPlus, OcnAV) - if(rank==0) write(6,*) cplname," Data transformed step ",j - - - ! pass interpolated data on to ocean model and/or - ! do more calculations - - enddo - - -!!! END OF RUN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! FINALIZE PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - ! deallocate memory - call Router_clean(Rout) - call AttrVect_clean(AtmAV) - call AttrVect_clean(OcnAV) - call GlobalSegMap_clean(AtmGSMap) - call GlobalSegMap_clean(OcnGSMap) - call MCTWorld_clean() - if(rank==0) write(6,*) cplname, " done" - - end subroutine coupler - diff --git a/src/externals/mct/examples/climate_concur1/master.F90 b/src/externals/mct/examples/climate_concur1/master.F90 deleted file mode 100644 index e9252daa9ee..00000000000 --- a/src/externals/mct/examples/climate_concur1/master.F90 +++ /dev/null @@ -1,89 +0,0 @@ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: master.F90,v 1.7 2004-04-23 05:43:11 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: master -- driver for simple concurrent coupled model -! -! !DESCRIPTION: Provide a simple example of using MCT to connect to -! components executing concurrently in a single executable. -! -! !INTERFACE: -! - program master -! -! !USES: -! - - implicit none - - include "mpif.h" - -! -!EOP ___________________________________________________________________ - -! local variables - - character(len=*), parameter :: mastername='master.F90' - - integer, parameter :: ncomps = 2 ! Must know total number of - ! components in coupled system - - integer, parameter :: AtmID = 1 ! pick an id for the atmosphere - integer, parameter :: CplID = 2 ! pick an id for the coupler - - - - -! MPI variables - integer :: splitcomm, rank, nprocs,compid, myID, ierr,color - integer :: anprocs,cnprocs - -!----------------------------------------------------------------------- -! The Main program. -! We are implementing a single-executable, concurrent-execution system. -! -! This small main program carves up MPI_COMM_WORLD and then starts -! each component on its own processor set. - - ! Initialize MPI - call MPI_INIT(ierr) - - ! Get basic MPI information - call MPI_COMM_SIZE(MPI_COMM_WORLD,nprocs,ierr) - call MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr) - - ! Create MPI communicators for each component - ! - ! each component will run on half the processors - ! - ! set color - if (rank .lt. nprocs/2) then - color = 0 - else - color = 1 - endif - - - ! Split MPI_COMM_WORLD into communicators for each component. - call MPI_COMM_SPLIT(MPI_COMM_WORLD,color,0,splitcomm,ierr) - - - ! Start the components - select case (color) - case(0) - call model(splitcomm,ncomps,AtmID) - case(1) - call coupler(splitcomm,ncomps,CplID) - case default - print *, "color error, color = ", color - end select - - ! Components are done - call MPI_FINALIZE(ierr) - - - end program master diff --git a/src/externals/mct/examples/climate_concur1/model.F90 b/src/externals/mct/examples/climate_concur1/model.F90 deleted file mode 100644 index 60a245a3f32..00000000000 --- a/src/externals/mct/examples/climate_concur1/model.F90 +++ /dev/null @@ -1,198 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: model.F90,v 1.8 2004-04-23 20:56:23 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: model -- generic model for unit tester -! -! !DESCRIPTION: -! A generic model subroutine to test functionality of MCT. -! -! !INTERFACE: -! - subroutine model (comm,ncomps,compid) -! -! !USES: -! -! Get the things needed from MCT by "Use,only" with renaming: -! -!---Component Model Registry - use m_MCTWorld,only: MCTWorld_init => init - use m_MCTWorld,only: MCTWorld_clean => clean -!---Domain Decomposition Descriptor DataType and associated methods - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: GlobalSegMap_init => init - use m_GlobalSegMap,only: GlobalSegMap_lsize => lsize - use m_GlobalSegMap,only: GlobalSegMap_clean => clean - use m_GlobalSegMap,only: GlobalSegMap_Ordpnts => OrderedPoints -!---Field Storage DataType and associated methods - use m_AttrVect,only : AttrVect - use m_AttrVect,only : AttrVect_init => init - use m_AttrVect,only : AttrVect_clean => clean - use m_AttrVect,only : AttrVect_indxR => indexRA - use m_AttrVect,only : AttrVect_importRAttr => importRAttr -!---Intercomponent communications scheduler - use m_Router,only: Router - use m_Router,only: Router_init => init - use m_Router,only: Router_clean => clean -!---Intercomponent transfer - use m_Transfer,only : MCT_Send => send - use m_Transfer,only : MCT_Recv => recv -!---Stored Grid data - - implicit none - - include "mpif.h" - -! !INPUT PARAMETERS: - - integer,intent(in) :: comm ! MPI communicator for this component - integer,intent(in) :: ncomps ! total number of models in coupled system - integer,intent(in) :: compid ! the integer id of this model -! -!EOP ___________________________________________________________________ - -! local variables - -! parameters for this model - character(len=*), parameter :: modelname='model.F90' - integer,parameter :: nxa = 128 ! number of points in x-direction - integer,parameter :: nya = 64 ! number of points in y-direction - - integer :: i,j,k - -! note decleration of instances of MCT defined types. -! MPI variables - integer :: rank, nprocs, root, CplID, ierr -! Grid variables - integer :: localsize -! GlobalSegMap variables - type(GlobalSegMap) :: GSMap ! MCT defined type - integer,dimension(1) :: start,length - integer, dimension(:), pointer :: points -! AttrVect variables - type(AttrVect) :: AV ! MCT defined type - real, dimension(:), pointer :: avdata - integer :: avsize -! Router variables - type(Router) :: Rout ! MCT defined type -! _____________________________________________________________________ - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! INITIALIZATION PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - ! Get local rank and size - call MPI_COMM_RANK (comm,rank, ierr) - call MPI_COMM_SIZE(comm,nprocs,ierr) - root = 0 - - if(rank==0) write(6,*) modelname,' MyID ', compid - if(rank==0) write(6,*) modelname,' Num procs ', nprocs - - ! Initialize MCTworld - call MCTWorld_init(ncomps,MPI_COMM_WORLD,comm,compid) - - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Initialize a Global Segment Map - - ! set up a 1-d decomposition. - ! there is just 1 segment per processor - localsize = nxa*nya / nprocs - - ! we'll use the distributed init of GSMap so - ! initialize start and length arrays for this processor - start(1) = (rank*localsize) + 1 - length(1) = localsize - - ! initialize the GSMap - call GlobalSegMap_init(GSMap,start,length,root,comm,compid) - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - ! Use a GSMap function: - ! return the points local to this processor - ! in their assumed order. - call GlobalSegMap_Ordpnts(GSMap,rank,points) - - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Initialize an Attribute vector - - ! size is the number of grid point on this processor - avsize = GlobalSegMap_lsize(GSMap,comm) - if(rank==0) write(6,*) modelname, ' localsize ', avsize - - ! initialize Av with two real attributes. - call AttrVect_init(AV,rList="field1:field2",lsize=avsize) - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Initialize a router to the coupler component. - ! - ! Need to know the integer ID of the coupler. - CplID = 2 - call Router_init(CplID,GSMap,comm,Rout) - - ! create an array used in RUN - allocate(avdata(avsize),stat=ierr) -!!! END OF INIT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! RUN PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - do j=1,10 ! "timestep" loop - - - ! model calculations - - - ! load data into aV - ! load the first field using "import" method. - ! First field will be a constant real number. - avdata=30.0 - call AttrVect_importRAttr(AV,"field1",avdata) - - ! Load the second field using direct access - ! Second field will be the indicies of each grid point - ! in the grid point numbering scheme. - do i=1,avsize - AV%rAttr(AttrVect_indxR(AV,"field2"),i) = points(i) - enddo - - ! Send the data - ! this is a synchronization point between the coupler and - ! this model. - if(rank==0) write(6,*) modelname,' sending data step ',j - call MCT_Send(AV,Rout) - - - ! more model calculations - - - enddo - -!!! END OF RUN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! FINALIZE PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! clean up - call Router_clean(Rout) - call AttrVect_clean(AV) - call GlobalSegMap_clean(GSMap) - call MCTWorld_clean() - if(rank==0) write(6,*) modelname,' done' -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - end subroutine model - diff --git a/src/externals/mct/examples/climate_sequen1/.gitignore b/src/externals/mct/examples/climate_sequen1/.gitignore deleted file mode 100644 index f2d3c73f037..00000000000 --- a/src/externals/mct/examples/climate_sequen1/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.mod -climate -TS1out.dat -*.script -*.o* diff --git a/src/externals/mct/examples/climate_sequen1/Makefile b/src/externals/mct/examples/climate_sequen1/Makefile deleted file mode 100644 index 7992fa00f9c..00000000000 --- a/src/externals/mct/examples/climate_sequen1/Makefile +++ /dev/null @@ -1,51 +0,0 @@ - -SHELL = /bin/sh - -# SOURCE FILES - -SRCS_F90 = mutils.F90 srcmodel.F90 dstmodel.F90 coupler.F90 master.F90 - -OBJS_ALL = $(SRCS_F90:.F90=.o) - -# MACHINE AND COMPILER FLAGS - -include ../../Makefile.conf - -# ADDITIONAL FLAGS SPECIFIC FOR UTMCT COMPILATION - -MCTLIBS = -L$(MPEUPATH) -L$(MCTPATH) -lmct -lmpeu -UTLDFLAGS = $(REAL8) -UTCMPFLAGS = $(REAL8) $(INCFLAG)$(MPEUPATH) $(INCFLAG)$(MCTPATH) - -# TARGETS - -all: climate - -climate: $(OBJS_ALL) - $(FC) -o $@ $(OBJS_ALL) $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -# RULES - -.SUFFIXES: -.SUFFIXES: .F90 .o - -.F90.o: - $(FC) -c $(INCPATH) $(FPPDEFS) $(FCFLAGS) $(MCTFLAGS) $(UTCMPFLAGS) $< - -clean: - ${RM} *.o *.mod climate - -# DEPENDENCIES: - -$(OBJS_ALL): $(MCTPATH)/libmct.a - - - - - - - - - - - diff --git a/src/externals/mct/examples/climate_sequen1/README b/src/externals/mct/examples/climate_sequen1/README deleted file mode 100644 index fa7f9afb57d..00000000000 --- a/src/externals/mct/examples/climate_sequen1/README +++ /dev/null @@ -1,42 +0,0 @@ - -This program demonstrates the use of MCT in a simple -coupled system consisting of two models and a coupler. - -The grids used are taken from a real climate model. -"srcmodel" uses an atmosphere grid and "coupler" interpolates -data on it to an ocean grid in "dstmodel" - -The srcmodel reads in a temperature field TS1.dat on the the atmosphere grid. -dstmodel outputs the interpolated temperature field to TS1out.dat - -srcmodel,dstmodel and coupler are broken into init, run and finalize phases. - -The model and coupler run sequentially on a pool of processors - -master.F90 - the top level program -srcmodel.F90 - the first component, an atmosphere model. -dstmodel.F90 - the second component, an ocean model. -coupler.F90 - the third component, a coupler which takes - the atmosphere data and maps it to - the ocean grid. - ------------------------------------------------------ -To compile: -First make sure you have compiled MCT. See instructions in -MCT/README - -Type "make" here or "make examples" in the top-level directory. - -The executable is called "climate" - ------------------------------------------------------ -To run: -"climate" requires a data file of interpolation weights in -the directory MCT/data. If this directory was not present when -you untarred MCT, you can get it from the MCT website. - -climate requires at least 1 MPI processes to run but can run on -any even number of processors. Consult your -local documentation for how to run parallel programs. - -Typical command: mpirun -np 8 climate diff --git a/src/externals/mct/examples/climate_sequen1/TS1.dat b/src/externals/mct/examples/climate_sequen1/TS1.dat deleted file mode 100644 index 6e9ce15fbec..00000000000 --- a/src/externals/mct/examples/climate_sequen1/TS1.dat +++ /dev/null @@ -1,8193 +0,0 @@ -128 64 -210.598221 -210.370956 -210.200317 -209.999313 -209.773987 -209.545242 -209.338638 -209.079834 -208.818771 -208.530273 -208.189346 -207.917847 -207.668228 -207.482681 -207.322525 -207.134918 -206.982986 -206.822006 -206.676392 -206.721191 -206.731567 -206.764267 -206.714890 -206.735657 -206.747650 -206.827255 -206.850861 -206.983688 -207.129868 -207.300278 -207.427399 -207.649628 -207.937622 -208.207809 -208.546432 -208.819489 -209.170090 -209.519623 -209.858063 -210.218704 -210.569855 -210.952911 -211.282089 -211.552551 -211.894699 -212.337753 -212.782440 -213.256454 -213.748413 -214.255295 -214.766602 -215.275497 -215.744263 -216.132645 -216.580765 -217.098587 -217.593170 -218.020859 -218.403473 -218.774872 -219.153122 -219.486679 -219.813370 -220.131027 -220.357315 -220.542770 -220.604584 -220.937531 -221.070450 -221.289825 -221.557281 -221.853806 -222.160858 -222.459793 -222.693054 -222.786880 -222.874527 -222.896362 -222.825470 -222.752060 -222.734604 -222.658218 -222.471939 -222.252823 -222.029297 -221.792542 -221.557983 -221.311111 -220.990540 -220.650986 -220.355820 -219.980530 -219.635452 -219.326462 -218.962769 -218.577789 -218.222351 -217.879379 -217.496918 -217.081879 -216.670471 -216.243790 -215.800110 -215.363174 -214.910431 -214.569214 -214.127563 -213.685287 -213.289276 -212.880142 -213.003998 -213.130981 -213.308594 -213.289597 -212.976425 -212.866028 -212.840363 -212.713272 -212.476395 -212.316055 -212.394791 -212.154297 -211.890137 -211.663696 -211.405029 -211.171967 -211.026276 -210.856613 -213.167435 -212.562256 -212.014374 -211.504456 -211.025330 -210.481094 -209.938019 -209.340591 -208.417923 -207.137955 -205.945206 -204.466553 -202.577072 -200.389435 -198.414307 -196.707825 -195.323639 -194.249283 -192.984116 -191.853104 -190.434647 -190.028534 -189.964020 -189.970642 -190.117691 -190.522552 -191.164093 -191.902359 -192.807236 -193.725388 -194.719238 -195.973923 -197.250046 -199.003326 -200.910721 -202.147079 -203.195541 -203.966690 -205.127243 -206.276566 -206.781235 -207.342941 -207.898224 -208.407654 -209.211517 -210.186508 -211.245926 -212.426620 -213.630768 -214.591736 -215.851624 -217.332382 -218.879410 -220.246094 -221.651947 -223.145447 -224.005936 -224.752151 -225.357178 -226.345139 -227.101364 -228.065659 -228.966431 -229.100510 -230.123230 -230.977768 -231.241364 -231.654526 -231.855270 -235.246109 -235.445724 -235.503387 -235.447708 -235.248032 -233.090515 -232.651276 -232.059616 -231.876999 -231.778717 -231.750641 -231.459259 -230.894257 -230.159058 -229.533539 -229.958359 -229.331268 -228.658630 -227.984222 -227.349060 -226.695679 -225.077408 -223.466232 -221.725479 -221.504913 -221.443954 -221.182358 -220.250595 -220.077789 -220.920624 -222.083618 -222.760086 -222.779373 -222.677231 -222.581085 -222.494858 -222.415268 -222.327332 -222.277954 -222.224213 -222.164093 -222.143753 -222.175461 -222.210510 -222.056595 -221.827530 -221.473419 -221.094986 -220.646164 -220.121704 -219.539032 -218.877380 -218.169312 -217.399780 -216.614441 -215.835999 -215.063995 -214.336258 -213.638046 -215.556839 -214.143646 -212.680069 -211.400772 -210.178741 -208.861877 -207.274490 -205.058029 -202.457718 -199.698135 -197.382919 -195.492950 -194.106613 -192.725250 -191.600372 -190.947159 -190.290909 -189.465027 -186.893555 -185.100830 -184.374512 -184.130112 -183.782822 -183.367874 -183.250015 -183.133804 -183.022476 -183.095383 -183.638931 -184.575165 -185.803024 -187.062988 -188.894089 -190.552078 -192.286118 -193.417603 -195.194046 -197.707962 -200.826935 -203.688507 -205.255905 -205.875717 -206.337860 -206.663528 -207.365692 -207.999008 -208.404755 -209.958054 -211.473389 -213.113861 -214.949188 -217.032959 -219.286453 -221.279465 -223.460953 -225.035309 -226.788651 -227.175400 -228.037872 -235.137680 -236.596985 -237.839508 -238.908478 -239.669586 -240.169785 -240.624069 -240.848145 -240.866089 -240.343811 -240.016068 -239.366928 -238.294479 -237.757645 -237.197769 -226.152878 -225.532440 -224.927856 -225.174408 -225.600693 -226.000214 -226.331573 -226.283493 -226.036835 -225.600281 -224.700211 -223.377274 -222.089157 -220.716812 -220.822800 -219.998932 -218.664337 -217.075836 -216.318375 -216.012772 -216.365967 -216.833740 -217.248184 -218.718140 -220.790848 -224.842697 -227.980240 -228.560654 -228.919907 -229.107925 -229.643784 -230.317825 -230.689423 -231.045853 -231.218002 -231.631363 -232.147446 -232.725494 -233.351242 -233.979721 -234.391464 -233.847321 -233.173294 -232.267868 -231.373795 -229.994705 -228.617630 -226.905823 -225.032150 -223.399902 -221.907013 -220.051224 -218.387192 -216.907990 -216.803253 -214.330490 -212.666077 -211.305756 -209.714539 -208.160431 -206.624924 -204.745132 -202.388794 -200.705963 -199.604858 -198.958603 -198.529282 -198.494705 -198.366898 -198.511841 -198.526398 -198.483109 -198.481888 -196.241806 -194.316101 -195.219955 -196.749954 -198.072418 -198.284668 -197.068298 -194.908432 -191.956192 -189.250092 -186.491058 -184.236938 -183.249725 -183.637604 -184.810577 -186.584885 -189.267258 -191.118713 -192.881485 -194.483322 -196.234467 -197.321030 -198.656067 -199.488083 -200.601730 -201.736862 -202.883148 -204.424118 -205.990936 -207.530136 -209.261200 -211.170059 -213.168182 -215.410522 -217.802292 -221.807510 -225.082321 -227.731567 -230.305405 -234.507309 -236.748611 -238.697388 -240.269302 -241.385284 -241.801010 -242.278534 -242.475616 -241.820312 -240.339005 -238.250641 -235.985336 -235.027740 -235.721375 -235.590530 -236.166336 -227.330338 -227.829422 -225.608932 -225.338638 -226.067764 -226.972641 -228.235962 -229.247818 -229.865173 -229.445694 -228.165390 -226.516830 -224.984421 -223.781662 -222.570251 -220.534103 -219.150116 -218.304306 -217.669205 -217.182922 -217.995422 -219.271072 -220.411377 -222.585175 -224.228394 -225.086761 -228.922073 -232.388336 -232.858475 -233.568344 -234.537537 -235.637100 -236.969269 -238.463333 -239.950439 -233.884460 -234.261917 -235.947968 -237.480743 -246.237122 -246.724487 -246.729843 -242.308350 -241.468597 -238.629440 -233.350159 -232.267059 -230.252396 -227.592026 -224.840179 -222.396255 -223.133240 -220.707642 -219.132935 -217.075272 -212.818604 -209.693283 -207.163101 -205.337723 -203.381042 -202.002075 -200.424271 -199.532822 -200.810440 -202.920486 -204.497971 -205.334732 -205.805710 -206.097427 -206.785324 -207.351974 -207.284286 -207.248703 -206.631348 -204.505142 -204.932251 -205.157883 -205.533630 -205.957703 -205.994156 -206.462448 -206.938202 -206.845764 -205.541077 -203.267120 -199.568680 -196.129684 -193.607971 -194.003128 -195.612839 -197.663803 -198.066711 -198.175110 -198.009781 -197.872818 -197.890167 -197.997025 -197.955460 -198.879669 -200.427582 -201.480591 -202.802475 -204.993851 -207.465668 -209.653366 -212.042679 -214.537018 -217.207047 -220.008331 -222.901230 -225.834305 -228.763412 -231.508957 -235.266205 -237.701294 -239.822601 -241.556580 -242.849701 -243.441254 -243.835724 -244.082169 -243.937134 -243.518021 -242.543228 -239.571030 -237.307816 -235.659332 -235.331055 -237.555786 -237.983734 -227.302689 -226.626282 -226.627762 -226.923370 -227.548691 -228.518768 -233.553268 -233.180557 -231.671539 -229.297409 -226.110962 -224.788040 -224.307465 -224.276642 -224.250275 -224.226349 -224.249619 -224.540970 -224.784851 -224.899216 -224.615601 -224.265488 -224.227798 -225.339493 -227.531403 -228.531525 -229.157959 -230.206482 -231.741028 -236.652466 -238.289352 -240.121048 -242.116043 -244.084290 -246.014084 -247.713684 -248.578842 -248.167892 -248.566437 -248.895050 -249.638535 -249.198868 -240.670593 -238.254501 -234.596741 -231.307693 -229.274506 -228.187164 -225.986938 -227.384964 -224.577286 -221.570511 -233.674194 -231.766693 -230.290558 -228.810776 -226.972916 -224.844421 -221.683044 -219.777649 -218.624634 -218.878876 -219.472687 -220.139496 -220.303497 -219.374481 -217.834381 -216.290939 -214.593552 -212.577652 -211.100952 -211.091675 -210.422791 -208.266266 -207.504425 -209.187073 -211.201569 -209.356781 -209.906860 -212.042496 -214.548691 -220.531525 -220.352341 -215.327454 -207.993683 -205.045975 -205.060623 -205.866959 -205.786850 -205.538513 -204.943970 -204.880371 -204.659744 -204.709122 -203.280991 -202.153885 -203.263855 -204.885345 -205.702637 -206.837646 -208.926666 -211.051392 -213.485641 -215.877701 -218.358414 -220.931931 -223.410675 -225.733551 -228.032791 -230.226654 -232.499664 -234.857681 -237.059677 -240.249039 -242.322754 -243.976822 -245.226059 -246.032501 -246.406494 -246.443176 -246.180862 -245.751923 -245.185150 -243.538345 -240.405624 -239.348785 -238.994873 -239.435455 -240.171463 -240.269211 -240.193787 -239.949814 -239.742096 -239.719116 -240.075119 -240.776276 -241.193054 -234.442001 -240.280457 -239.104736 -238.409805 -237.937637 -237.865448 -237.386124 -236.494034 -232.030960 -233.395798 -233.877304 -234.296600 -234.369064 -234.960968 -235.306015 -235.436157 -235.978806 -236.175079 -236.420105 -237.009018 -237.998825 -239.358215 -242.386658 -244.207108 -246.046600 -247.820633 -249.402542 -251.017044 -252.288940 -252.520233 -252.438110 -251.875015 -250.971695 -248.615524 -246.374710 -243.145737 -240.921326 -231.697464 -230.287155 -229.651566 -231.731689 -236.549210 -235.273453 -241.647888 -241.563858 -241.577255 -241.488342 -241.050125 -240.381561 -238.435043 -236.724564 -236.911743 -237.364456 -238.172852 -238.720047 -239.087982 -239.064896 -237.673416 -235.307007 -231.666153 -226.408035 -221.938004 -219.849472 -219.547806 -217.750092 -217.415985 -219.228638 -222.837601 -226.718109 -231.130997 -236.459869 -239.557327 -239.733368 -237.241928 -232.325134 -226.267807 -221.730621 -220.718689 -222.741821 -221.684662 -220.566879 -219.095505 -217.708908 -216.635040 -216.239120 -216.149796 -216.250519 -216.348984 -216.308731 -216.181351 -216.253021 -216.634979 -217.657379 -219.376923 -221.368530 -223.879532 -226.487610 -229.064529 -231.016373 -232.477173 -233.763885 -234.739105 -235.854614 -237.255753 -240.270844 -242.283463 -244.228958 -245.873520 -246.957321 -247.533920 -247.733551 -247.620361 -247.240509 -246.763062 -246.293381 -245.924805 -245.536118 -244.966049 -244.618484 -244.249115 -244.080643 -243.951401 -243.963531 -244.089920 -244.293488 -244.600174 -244.911285 -245.035950 -244.990753 -244.516235 -243.776459 -243.028473 -242.634949 -241.989227 -241.559769 -240.780396 -240.651596 -240.678833 -240.986801 -241.551239 -242.032562 -242.347351 -242.470444 -242.354141 -242.065781 -241.810623 -240.813141 -240.803055 -241.600952 -242.604828 -244.836624 -246.318222 -247.883957 -249.172119 -250.429871 -251.156616 -251.757172 -253.076294 -253.409271 -252.756409 -250.574570 -249.624023 -247.305374 -246.143738 -244.985901 -244.100540 -242.950470 -242.065292 -238.393127 -240.074860 -242.011047 -248.560425 -248.369232 -248.228989 -248.124649 -247.922943 -247.543182 -246.991180 -246.257065 -245.402115 -244.688126 -244.208969 -244.054367 -244.114288 -243.681351 -243.043854 -239.749405 -239.431793 -237.868866 -235.753128 -233.133560 -230.818161 -229.447495 -229.773438 -232.099960 -235.966873 -242.815628 -248.433365 -251.144333 -251.804398 -248.780624 -246.646194 -244.207458 -239.708435 -238.320419 -237.095413 -236.195862 -235.080215 -234.204468 -233.162674 -232.286148 -231.844482 -231.671051 -231.545959 -231.209808 -230.402130 -229.014374 -227.202591 -225.376328 -224.090317 -223.763229 -224.561493 -226.258163 -228.323685 -232.464325 -227.765762 -235.967834 -236.834442 -237.007812 -237.224182 -237.560440 -236.882858 -238.725052 -240.611023 -242.861053 -245.012619 -246.806793 -248.132980 -248.892868 -249.370560 -249.504425 -249.000244 -248.332611 -247.642822 -246.833740 -246.374100 -246.109879 -246.124512 -246.426910 -246.857285 -247.159607 -247.252274 -247.155426 -246.818588 -246.232391 -245.663559 -245.243240 -245.042068 -244.669373 -244.152802 -243.503418 -242.897263 -242.431366 -242.032593 -241.791153 -241.841644 -242.249863 -242.987534 -243.963226 -245.015060 -246.023743 -246.797180 -247.412277 -247.853516 -248.207443 -248.381622 -247.375809 -248.929626 -249.250320 -249.813873 -250.607330 -251.437881 -251.941452 -252.757812 -253.399261 -253.757568 -253.844879 -253.831039 -252.951538 -251.238876 -249.642685 -249.233582 -248.723206 -248.573288 -248.478531 -248.782654 -248.899124 -248.792480 -248.734940 -258.557220 -258.505646 -257.810181 -256.639191 -255.411148 -254.010483 -252.732178 -251.542892 -250.241898 -248.964355 -247.691467 -246.481705 -245.583878 -245.857437 -245.969803 -246.261154 -246.706787 -246.671906 -245.939911 -244.849060 -243.761597 -243.161652 -243.664917 -245.415344 -248.177887 -252.080460 -256.314117 -258.248413 -258.366577 -257.311859 -255.547623 -252.558380 -250.714615 -250.067398 -249.588455 -249.108063 -248.532639 -247.699600 -246.767624 -245.961929 -245.740891 -246.115860 -246.831268 -247.425919 -247.374924 -246.367233 -244.456070 -242.050140 -239.797256 -238.327301 -237.909103 -238.621552 -239.992477 -241.275772 -241.817230 -241.753815 -241.639816 -241.436356 -241.920883 -242.477615 -243.142319 -244.339874 -245.988068 -247.566025 -249.295059 -251.188568 -253.076019 -254.260925 -255.326767 -256.095917 -256.057953 -255.536560 -254.447784 -252.639938 -251.036072 -250.247498 -250.327698 -250.976669 -252.456467 -253.944321 -254.573898 -254.902069 -254.253860 -253.066162 -252.260345 -251.261536 -250.158615 -249.016098 -247.917328 -247.096786 -246.412323 -245.697128 -244.972504 -244.631882 -244.938675 -245.872604 -247.344315 -249.211578 -251.333481 -253.471939 -254.680649 -255.519562 -256.252106 -257.019043 -257.411438 -256.582733 -254.931778 -254.267059 -253.746490 -253.566498 -253.864548 -254.528732 -255.179321 -255.838425 -256.246979 -256.338745 -256.289581 -255.836777 -254.964218 -253.893906 -252.838593 -252.136093 -252.008408 -252.580963 -253.771606 -255.261963 -256.737152 -257.906799 -268.072296 -268.586670 -268.314728 -267.356903 -265.898376 -263.972809 -261.564636 -259.257660 -256.889832 -254.743210 -253.002686 -251.409027 -249.706772 -248.684296 -247.991470 -248.406128 -248.770508 -249.065582 -250.180084 -250.634506 -250.590302 -250.362122 -250.363251 -250.912277 -252.114761 -253.839066 -256.294708 -258.334869 -259.414337 -259.174652 -259.972626 -259.292542 -258.190369 -257.421173 -256.768372 -256.124115 -255.614548 -255.188187 -254.860031 -255.158752 -256.130188 -257.863007 -260.090881 -262.369293 -271.407104 -271.512756 -271.641418 -271.765808 -271.796997 -271.808960 -271.768951 -271.646973 -271.497284 -271.481445 -271.474243 -271.506073 -271.515991 -271.434570 -255.415192 -271.654144 -271.763824 -272.168732 -272.224487 -272.197266 -272.163483 -272.145477 -272.128143 -272.110657 -272.098419 -272.067688 -271.997772 -271.876465 -271.465485 -271.413879 -271.349854 -258.953918 -258.900208 -260.095337 -271.470551 -271.554962 -271.883484 -272.018219 -272.294312 -272.577179 -272.774384 -272.880585 -272.948944 -273.015839 -273.076111 -273.137329 -273.200195 -273.261505 -273.305878 -273.322662 -273.321625 -273.310822 -273.270386 -273.179657 -272.995392 -272.698151 -272.339844 -272.260284 -272.112274 -271.890900 -272.402832 -271.172791 -268.963959 -265.997772 -263.500885 -261.785156 -260.678436 -259.832703 -259.157074 -258.772949 -258.687775 -258.697235 -258.765533 -258.751404 -258.503113 -258.026428 -257.532501 -257.265747 -257.572113 -258.619415 -260.379700 -262.604553 -264.846283 -266.785828 -270.714447 -271.029266 -270.822845 -270.120819 -269.055450 -267.379028 -265.740967 -264.233521 -262.387878 -260.484711 -259.021179 -257.235382 -255.900681 -254.511719 -251.002579 -248.309540 -248.258606 -248.981857 -249.812683 -250.839996 -251.797379 -252.427017 -252.498260 -252.765457 -253.601242 -254.423065 -255.435699 -256.653656 -258.129517 -259.548370 -260.590057 -260.701477 -260.686798 -260.622101 -260.905792 -260.850006 -261.009827 -261.527191 -262.515961 -271.518890 -271.827606 -271.955597 -272.035309 -272.287994 -272.507965 -272.627350 -272.728058 -272.865021 -272.964447 -273.001373 -272.985474 -272.926086 -272.876160 -272.850677 -272.896454 -273.191833 -273.441223 -273.699860 -273.726074 -274.204285 -274.557953 -274.742188 -274.693085 -274.565399 -274.483582 -274.443176 -274.376892 -274.278168 -274.163635 -274.032410 -273.866669 -273.647919 -273.345520 -273.040497 -272.714417 -272.538574 -272.443848 -272.477875 -272.736816 -273.032104 -273.386261 -273.679108 -273.928436 -274.113708 -274.205719 -274.248596 -274.285492 -274.354309 -274.451874 -274.552246 -274.667023 -274.760010 -274.823334 -274.849518 -274.844025 -274.863708 -274.905609 -274.935944 -274.947937 -274.921448 -274.870575 -274.776398 -274.600922 -274.326385 -273.843353 -273.264709 -271.743927 -270.968689 -269.189331 -267.882751 -266.812286 -265.737976 -264.570312 -263.305206 -262.012085 -261.010956 -260.522156 -260.523224 -260.655334 -260.951477 -261.415100 -262.028168 -262.957001 -264.139435 -265.624054 -267.299805 -268.891418 -270.065155 -271.449585 -271.680939 -271.506927 -270.984467 -270.481415 -269.705719 -268.961823 -268.373444 -271.658875 -271.964172 -272.144928 -272.241730 -272.277893 -272.254395 -272.170410 -272.075409 -272.026581 -272.048004 -272.151215 -272.212280 -272.266785 -272.283783 -272.304749 -272.269958 -272.207794 -272.053253 -271.560242 -260.486389 -261.257874 -262.219055 -263.694794 -272.160645 -272.418304 -272.498627 -272.471100 -272.407471 -272.371246 -272.388123 -272.469757 -272.517059 -272.778229 -273.005920 -273.233826 -273.555145 -273.840118 -273.998077 -274.133453 -274.339233 -274.468933 -274.486938 -274.449738 -274.410980 -274.528900 -274.778778 -275.048370 -275.376251 -275.677216 -275.976410 -276.429626 -276.972961 -277.367889 -277.409882 -277.118195 -276.829285 -276.718750 -276.700836 -276.632233 -276.487823 -276.326996 -276.174164 -275.991699 -275.811554 -275.597260 -275.307495 -275.006012 -274.751526 -274.639252 -274.760834 -274.984955 -275.135895 -275.243683 -275.402191 -275.629425 -275.811523 -275.843781 -275.792206 -275.762970 -275.804810 -275.903320 -276.010223 -276.100647 -276.153351 -276.207031 -276.251465 -276.271423 -276.304596 -276.368958 -276.456451 -276.561340 -276.664917 -276.786102 -276.910797 -276.964294 -276.956848 -276.847473 -276.503845 -275.660797 -274.836426 -274.098358 -273.389648 -272.348511 -272.424744 -271.422394 -270.162079 -268.572601 -266.545807 -264.845734 -263.863159 -263.600647 -263.784760 -264.395325 -264.868195 -265.653107 -266.823608 -268.033905 -269.270874 -270.186707 -270.969818 -272.166168 -272.242493 -272.227539 -272.192017 -272.151001 -272.252350 -272.284912 -272.246368 -272.520691 -272.757721 -273.000366 -273.174927 -273.242950 -273.183472 -273.045593 -272.918823 -272.881653 -272.952576 -273.082336 -273.184662 -273.246033 -273.310883 -273.341461 -273.308075 -273.276367 -273.169861 -272.976562 -272.798462 -272.685699 -272.786224 -273.143738 -273.594330 -273.915833 -273.919281 -273.744476 -273.607849 -273.523407 -273.498199 -273.530365 -273.601501 -273.855042 -274.201080 -274.587036 -275.033600 -275.421204 -275.703125 -275.909119 -276.153534 -276.294647 -276.265503 -276.209015 -276.265442 -276.608948 -277.140717 -277.556519 -277.862396 -278.105194 -278.421051 -278.930450 -279.433960 -279.715393 -279.647919 -279.166534 -278.745972 -278.639954 -278.716064 -278.764557 -278.676544 -278.530701 -278.422729 -278.318420 -278.244507 -278.149109 -277.983917 -277.782471 -277.602814 -277.491699 -277.519318 -277.581909 -277.527679 -277.423218 -277.408600 -277.496552 -277.549744 -277.449097 -277.267151 -277.136169 -277.151581 -277.253143 -277.327820 -277.337585 -277.309113 -277.310974 -277.332184 -277.350342 -277.371002 -277.413727 -277.501526 -277.658966 -277.901001 -278.253052 -278.599304 -278.686584 -277.688416 -277.686249 -278.073700 -277.575256 -276.995178 -276.327271 -275.642975 -275.024689 -274.384247 -273.866302 -273.626190 -273.574432 -273.555695 -273.355865 -272.845093 -272.391205 -272.477448 -272.726593 -272.828857 -272.806305 -272.693298 -272.512878 -272.317657 -272.172119 -272.102264 -274.039978 -274.109344 -274.111389 -274.056213 -273.944183 -273.887817 -273.796143 -273.622620 -273.679413 -273.936462 -274.249146 -274.508026 -274.529724 -274.380249 -274.193817 -274.073517 -274.053619 -274.114014 -274.240631 -274.344360 -274.415375 -274.496948 -274.501923 -274.427734 -274.398834 -274.333649 -274.177124 -274.047943 -274.056030 -274.325531 -274.890472 -275.531738 -275.915375 -275.773041 -275.390472 -275.153473 -275.054382 -275.012207 -275.022308 -275.101379 -275.407990 -275.867371 -276.387939 -276.937134 -277.466980 -277.906616 -278.183594 -278.396332 -278.509094 -278.499847 -278.527222 -278.689270 -279.085022 -279.602264 -279.978333 -280.203674 -280.340668 -280.567383 -280.979248 -281.248199 -281.140320 -280.822540 -280.384430 -280.063110 -280.043396 -280.266479 -280.526794 -280.571472 -280.474274 -280.417969 -280.415924 -280.448364 -280.427124 -280.329132 -280.188385 -280.039093 -279.883362 -279.736237 -279.584503 -279.419220 -279.246338 -279.086853 -278.965057 -278.836212 -278.627747 -278.368530 -278.192078 -278.218414 -278.334534 -278.380432 -278.333374 -278.239258 -278.176666 -278.153046 -278.155975 -278.183807 -278.239319 -278.356659 -278.590668 -278.973755 -279.478882 -279.805115 -278.479431 -277.843658 -279.070831 -278.827118 -278.490631 -278.067444 -277.599335 -277.165955 -276.806213 -276.346832 -275.821106 -275.448792 -275.257996 -275.093811 -274.849152 -274.545837 -274.388458 -274.431793 -274.513580 -274.496002 -274.353912 -274.117035 -273.809387 -273.603760 -273.609772 -273.778046 -276.265076 -276.390015 -276.394897 -276.369202 -276.338013 -276.287415 -276.111877 -275.803436 -275.684357 -275.814728 -276.169342 -276.441803 -276.343262 -276.037811 -275.793610 -275.691620 -275.601898 -275.468140 -275.452911 -275.606903 -275.932922 -276.254150 -276.253448 -275.992493 -275.867523 -275.944336 -276.055389 -276.143005 -276.261475 -276.608185 -277.272430 -277.931610 -278.196381 -277.948120 -277.481659 -277.219269 -277.194031 -277.273651 -277.438171 -277.666046 -278.060730 -278.497986 -278.895172 -279.328247 -279.825562 -280.230865 -280.459839 -280.616455 -280.729919 -280.807068 -280.913605 -281.068207 -281.339203 -281.665802 -281.874146 -281.948914 -281.988312 -282.153839 -282.484375 -282.571930 -282.120422 -281.533478 -281.167969 -281.029419 -281.135803 -281.504791 -281.954193 -282.128662 -282.075073 -281.980225 -281.925049 -281.906921 -281.833038 -281.668030 -281.452148 -281.239441 -281.033752 -280.820190 -280.605743 -280.435272 -280.250885 -280.021912 -279.828949 -279.673615 -279.487793 -279.306488 -279.234375 -279.299896 -279.399384 -279.433380 -279.365601 -279.216187 -279.093140 -279.039642 -279.043793 -279.104767 -279.224915 -279.418182 -279.714722 -280.150543 -280.686951 -280.958160 -278.410828 -278.998016 -279.947113 -279.610962 -279.149048 -278.749481 -278.675598 -278.907135 -279.085663 -278.902435 -278.498505 -278.107117 -277.812958 -277.511780 -277.191071 -276.865448 -276.633911 -276.555847 -276.606812 -276.616333 -276.412292 -276.089813 -275.720367 -275.505371 -275.623413 -275.919128 -278.866974 -279.017242 -279.019623 -279.044678 -279.143311 -279.154175 -279.088928 -278.939819 -278.796814 -278.760162 -278.972565 -279.032623 -278.685944 -278.180359 -277.809174 -277.604919 -277.354065 -276.982452 -276.869690 -277.302948 -278.300598 -279.315216 -279.574463 -279.134338 -278.842468 -279.066559 -279.428284 -279.560242 -279.506317 -279.662842 -280.120941 -280.484436 -280.487427 -280.248596 -279.981537 -279.929718 -280.063934 -280.268585 -280.512451 -280.719330 -280.978363 -281.181152 -281.326935 -281.514771 -281.769867 -281.966431 -282.052399 -282.137604 -282.282410 -282.468475 -282.680908 -282.900635 -283.168793 -283.418152 -283.454926 -283.341919 -283.309174 -283.461243 -283.748993 -283.839569 -278.015259 -282.732483 -282.329407 -282.269989 -282.482544 -282.884521 -283.274078 -283.420349 -283.337830 -283.156769 -282.972931 -282.811890 -282.650024 -282.447968 -282.202881 -281.965057 -281.766663 -281.597778 -281.427338 -281.297058 -281.161102 -280.974304 -280.821350 -280.715240 -280.598206 -280.538605 -280.584778 -280.668396 -280.731720 -280.741272 -280.642426 -280.449615 -280.284363 -280.229248 -280.270844 -280.380524 -280.562897 -280.803925 -281.096985 -281.480347 -281.916046 -282.167267 -279.546204 -280.074402 -280.618073 -280.696594 -280.060455 -279.596985 -279.978790 -281.135834 -281.682709 -281.395905 -280.920441 -280.587769 -280.348846 -280.102936 -279.875092 -279.585266 -279.304810 -279.165375 -279.216187 -279.200531 -278.919556 -278.615936 -278.413879 -278.335968 -278.446503 -278.633972 -281.455231 -281.611389 -281.686890 -281.870850 -282.260803 -282.373383 -282.655365 -282.881287 -282.868195 -282.631592 -282.665161 -282.500763 -281.965149 -281.389679 -280.811615 -280.496887 -280.316925 -279.790222 -279.611816 -280.481110 -282.035278 -283.469421 -283.896393 -283.460510 -283.100647 -283.203827 -283.314514 -283.142975 -282.904205 -282.801117 -282.828888 -282.789124 -282.646484 -282.568878 -282.572754 -282.626953 -282.695557 -282.778595 -282.838776 -282.876282 -282.966858 -283.005280 -283.026398 -283.078613 -283.168762 -283.247620 -283.250824 -283.314514 -283.534515 -283.828674 -284.147156 -284.447418 -284.747314 -284.977600 -284.970734 -284.808533 -284.709930 -284.794769 -285.018555 -285.176575 -284.984955 -282.542572 -284.117340 -284.151489 -284.410156 -284.664948 -284.774261 -284.692017 -284.453735 -284.167755 -283.913269 -283.696838 -283.524017 -283.380798 -283.220581 -283.034027 -282.865021 -282.752716 -282.655823 -282.597626 -282.563232 -282.475159 -282.359131 -282.272186 -282.219757 -282.257965 -282.324951 -282.339752 -282.362946 -282.361206 -282.195068 -281.978271 -281.807068 -281.735870 -281.818024 -281.997620 -282.221344 -282.465851 -282.735199 -283.045044 -283.204285 -283.303223 -283.225067 -280.283234 -281.014496 -282.368347 -281.322571 -280.663361 -281.089142 -283.009674 -283.676483 -283.140259 -282.454712 -282.256439 -282.294830 -282.338043 -282.462341 -282.444489 -282.324524 -282.225220 -282.171783 -281.982758 -281.678711 -281.583496 -281.662659 -281.635498 -281.485291 -281.393890 -283.435089 -283.570801 -283.809479 -284.233917 -284.971436 -285.555267 -286.519226 -287.048279 -287.073059 -286.590759 -286.809753 -286.840912 -286.458893 -286.117828 -285.467133 -285.300995 -285.430511 -284.920715 -284.507874 -285.090027 -286.069519 -286.704498 -286.688721 -286.365448 -286.055939 -285.880585 -285.677185 -285.340240 -285.089935 -284.906982 -284.695831 -284.504120 -284.337189 -284.258362 -284.182404 -284.128967 -284.116211 -284.152588 -284.214661 -284.359772 -284.614960 -284.769165 -284.776184 -284.728577 -284.679749 -284.651245 -284.609100 -284.683899 -284.954468 -285.273895 -285.533417 -285.716064 -285.930389 -286.246582 -286.485138 -286.489777 -286.403900 -286.385986 -286.439423 -286.480469 -286.392609 -286.099884 -285.926147 -286.065338 -286.281830 -286.279846 -286.076111 -285.800262 -285.498627 -285.218384 -285.003967 -284.836212 -284.706604 -284.624908 -284.565857 -284.478210 -284.369751 -284.299438 -284.282806 -284.325012 -284.362122 -284.300018 -284.183258 -284.084381 -284.091492 -284.156403 -284.211823 -284.229706 -284.263000 -284.198700 -283.899689 -283.670288 -283.528564 -283.443695 -283.536133 -283.778564 -284.004120 -284.246002 -284.471008 -284.560486 -284.431793 -284.346344 -280.741974 -279.108429 -278.454071 -282.496155 -282.700256 -281.932068 -281.985168 -283.801117 -285.261475 -285.256805 -284.825073 -284.885712 -285.047943 -285.023071 -285.150726 -285.224792 -285.138763 -284.980499 -284.769775 -284.421234 -284.193817 -284.219147 -284.191376 -284.001373 -283.656586 -283.449402 -285.652130 -285.717560 -285.894409 -286.320801 -286.972321 -287.686157 -288.399963 -288.954315 -289.474701 -289.867126 -290.060974 -289.996216 -289.767700 -289.459869 -289.113953 -288.907593 -288.713287 -288.419983 -288.181946 -288.069672 -288.027527 -287.910553 -287.648651 -287.381714 -287.163055 -286.926514 -286.617920 -286.278259 -286.042053 -285.841919 -285.619965 -285.422424 -285.280792 -285.173096 -285.100220 -285.136383 -285.268768 -285.463226 -285.698853 -286.072968 -286.631378 -287.002502 -286.962830 -286.653015 -286.365021 -286.193268 -286.109619 -286.158356 -286.320343 -286.468079 -286.540497 -282.316742 -283.526581 -284.467957 -288.143921 -288.404053 -288.348816 -288.204620 -288.001617 -287.765106 -287.579590 -287.439728 -287.418152 -287.559723 -287.607208 -287.351990 -286.974823 -286.705597 -286.542297 -286.413483 -286.313965 -286.249542 -286.175293 -286.138855 -286.122894 -286.115723 -286.090912 -286.071411 -286.061493 -286.097046 -286.134216 -286.158630 -286.141541 -286.045105 -286.060120 -286.123932 -286.167908 -286.267212 -286.320801 -286.127106 -285.835632 -285.592041 -285.503448 -285.436646 -285.458862 -285.642792 -285.774323 -285.846436 -285.842590 -285.756683 -285.508331 -285.215027 -280.187653 -277.929016 -279.047333 -283.485840 -283.648376 -284.570221 -283.606201 -284.962280 -286.947540 -287.692505 -287.782928 -287.731079 -287.546814 -287.388947 -287.313232 -287.248413 -287.111481 -286.918884 -286.677002 -286.435364 -286.317474 -286.287903 -286.056366 -285.799500 -285.607025 -285.598358 -287.815826 -287.847870 -287.934784 -288.120544 -288.409973 -288.612976 -288.660553 -288.867889 -289.622681 -290.815063 -291.717316 -291.900848 -291.632843 -291.260193 -290.925537 -290.608856 -290.294159 -290.026672 -289.787689 -289.496002 -289.179779 -288.906555 -288.658630 -288.440552 -288.225067 -287.976929 -287.691223 -287.405670 -287.194153 -287.009949 -286.817047 -286.637024 -286.500671 -286.410950 -286.400818 -286.529999 -286.761139 -287.080200 -287.506775 -288.115753 -288.897797 -289.311523 -280.162598 -288.288971 -287.720184 -287.468445 -287.404572 -287.403870 -287.303802 -281.890076 -281.348999 -282.851105 -284.979309 -286.534637 -289.987061 -290.402588 -290.330566 -290.038849 -289.619446 -289.195740 -288.903351 -288.796753 -288.843964 -288.917450 -288.816925 -288.491882 -288.160919 -287.967072 -287.902222 -287.867798 -287.850769 -287.847443 -287.846161 -287.870361 -287.901428 -287.936951 -287.950378 -287.924286 -287.885254 -287.881470 -287.905212 -287.947052 -288.023682 -288.083618 -288.148895 -288.206024 -288.290680 -288.311066 -288.263062 -288.109894 -287.905579 -287.669708 -287.603577 -287.501923 -287.413025 -287.402893 -287.376801 -287.307098 -287.166809 -286.947754 -286.597137 -286.098907 -285.679199 -278.495911 -280.161224 -284.164642 -282.039001 -284.617615 -284.625336 -286.333862 -288.271088 -289.552551 -289.694061 -289.435242 -289.200104 -289.073212 -288.986206 -288.875488 -288.740723 -288.589203 -288.458862 -288.411163 -288.378082 -288.282684 -288.128906 -287.956146 -287.863770 -287.823212 -289.321259 -289.252930 -289.186798 -289.114166 -288.999023 -288.662689 -288.215851 -288.893188 -286.024628 -284.168182 -284.723175 -293.258850 -293.141388 -292.806702 -292.485748 -292.157104 -291.798309 -291.453796 -291.166321 -290.884766 -290.606628 -290.377777 -290.194061 -290.000549 -289.747528 -289.477142 -289.243683 -289.050751 -288.882385 -288.692383 -288.512329 -288.381653 -288.283020 -288.224487 -288.234985 -288.347198 -288.546967 -288.851440 -289.351776 -290.084320 -290.839905 -291.083801 -279.753662 -279.925995 -280.488678 -279.807343 -280.257935 -280.924805 -282.458740 -283.678436 -286.368774 -288.661774 -290.148834 -288.921204 -291.026550 -292.042999 -291.862762 -291.452576 -291.047455 -290.696777 -290.430725 -290.302521 -290.304535 -290.301910 -290.179779 -289.963043 -289.785858 -289.680695 -289.638702 -289.628479 -289.626831 -289.629791 -289.644226 -289.685730 -289.746185 -289.809967 -289.865387 -289.890076 -289.882355 -289.894745 -289.920715 -289.954193 -290.025330 -290.115814 -290.205902 -290.271454 -290.353394 -290.359711 -290.311401 -290.152466 -290.005524 -289.805786 -289.734192 -289.598328 -289.440460 -289.259186 -289.031799 -288.825867 -288.577515 -288.301270 -287.860565 -287.095917 -286.390045 -276.211823 -279.537811 -284.495789 -283.138763 -285.102844 -287.175873 -286.810822 -289.297913 -290.892517 -291.351257 -291.085602 -290.807678 -290.689087 -290.650909 -290.600677 -290.509003 -290.389313 -290.263123 -290.160980 -290.047852 -289.927124 -289.827698 -289.695404 -289.554382 -289.415466 -290.393250 -290.209198 -289.979034 -289.646698 -289.111176 -288.228302 -288.232849 -290.025238 -286.815094 -282.847168 -281.594666 -283.535126 -294.401062 -294.256653 -294.037964 -293.880280 -293.643768 -293.261566 -292.874237 -292.576660 -292.354065 -292.153931 -291.945374 -291.700684 -291.435028 -291.201447 -291.019043 -290.889862 -290.746704 -290.550934 -290.363708 -290.257202 -290.192017 -290.155731 -290.176788 -290.266235 -290.449738 -290.706604 -291.163910 -291.826538 -292.354309 -283.564514 -283.075470 -282.623169 -283.148193 -284.259857 -284.790894 -284.071075 -284.190826 -287.397766 -291.603790 -293.262207 -292.349915 -291.898560 -291.543488 -293.286438 -293.014160 -292.631226 -292.378632 -292.182373 -292.006775 -291.897339 -291.860626 -291.841217 -291.771423 -291.692413 -291.659698 -291.639374 -291.629150 -291.609100 -291.592377 -291.588135 -291.581482 -291.586548 -291.622040 -291.681030 -291.756287 -291.850037 -291.943634 -292.021851 -292.077087 -292.116058 -292.158783 -292.216492 -292.232056 -292.232910 -292.223846 -292.250763 -292.280914 -292.188751 -292.039703 -291.775726 -291.575409 -291.381805 -291.101135 -290.799957 -290.510406 -290.208069 -289.867920 -289.527924 -288.955017 -288.046661 -287.164307 -276.955933 -279.344818 -284.930206 -286.620300 -287.154938 -287.205536 -286.837524 -289.439606 -292.110901 -293.014008 -293.005615 -292.718903 -292.499054 -292.371490 -292.319672 -292.234955 -292.065460 -291.879791 -291.714386 -291.517273 -291.324158 -291.171783 -291.002655 -290.820129 -290.599365 -291.295776 -290.936493 -290.504791 -289.892578 -288.917999 -287.647858 -289.456970 -290.754700 -289.466278 -285.027618 -282.986938 -284.096405 -295.286530 -295.401917 -295.260162 -295.182190 -295.114563 -294.916931 -294.605560 -294.313507 -294.093018 -293.848572 -293.585175 -293.341766 -293.134033 -292.967072 -292.819885 -292.690704 -292.533539 -292.355011 -292.196930 -292.068634 -291.968445 -291.884186 -291.876373 -291.976654 -292.193909 -292.444885 -292.826813 -293.333038 -293.667084 -288.203796 -286.947418 -287.037598 -288.023499 -288.828400 -289.583740 -286.984406 -287.145660 -291.378052 -293.595764 -294.155457 -293.750946 -291.541504 -291.264038 -294.332275 -294.046661 -293.734039 -293.627655 -293.600342 -293.583954 -293.564789 -293.535431 -293.496918 -293.476807 -293.481323 -293.509857 -293.545471 -293.597809 -293.628998 -293.637299 -293.618378 -293.591583 -293.597443 -293.646759 -293.685303 -293.755554 -293.864319 -293.977020 -294.039612 -294.076111 -294.119507 -294.175598 -294.178772 -294.106964 -294.018372 -293.945190 -293.870117 -293.825256 -293.735382 -293.519592 -293.207245 -292.872223 -292.544739 -292.187225 -291.839569 -291.529938 -291.187927 -290.813538 -290.407959 -289.750397 -288.827515 -288.008728 -277.443115 -277.459137 -284.165344 -288.688568 -289.414337 -288.875153 -288.508087 -290.050995 -293.083984 -294.107025 -294.504303 -294.459778 -294.181061 -293.934937 -293.755920 -293.569214 -293.343384 -293.172791 -293.009521 -292.779663 -292.532410 -292.302673 -292.091919 -291.892090 -291.632355 -291.835663 -291.343292 -290.740814 -289.937317 -288.690979 -287.441040 -286.734833 -291.289368 -291.159454 -288.255157 -285.837921 -286.575836 -289.157745 -296.336487 -296.330048 -296.212006 -288.954163 -287.554871 -295.793304 -295.607910 -295.396332 -295.133301 -294.874054 -294.697601 -294.572296 -294.440643 -294.309509 -294.163574 -294.012543 -293.861267 -293.714813 -293.564087 -293.436493 -293.352051 -293.374756 -293.531158 -293.769684 -293.999176 -294.298920 -294.702209 -295.029388 -291.572937 -291.981598 -292.789185 -292.277161 -291.259064 -289.367737 -287.776062 -289.437592 -293.522034 -294.296783 -294.449371 -292.303711 -291.007294 -295.146912 -295.384094 -295.152130 -294.882568 -294.860779 -294.987793 -295.119537 -295.205414 -295.211578 -295.207550 -295.238892 -295.283905 -295.306641 -295.366913 -295.481232 -295.580261 -295.611572 -295.582184 -295.544250 -295.560699 -295.615997 -295.693634 -295.785400 -295.893250 -295.950745 -295.931274 -295.896637 -295.873718 -295.855957 -295.782867 -295.638916 -295.483521 -295.335724 -295.157349 -294.927460 -294.669586 -294.391724 -294.089630 -293.742981 -293.365387 -292.959320 -292.574860 -292.192810 -291.801025 -291.409851 -290.960205 -290.298126 -289.491425 -288.825806 -277.846161 -276.241669 -281.621887 -289.213684 -290.526367 -290.951111 -290.692474 -290.863739 -292.914764 -292.419373 -295.204559 -295.490082 -295.460724 -295.278259 -295.057068 -294.793335 -294.530823 -294.334167 -294.171631 -293.934082 -293.611786 -293.273407 -292.984314 -292.694275 -292.308105 -292.096649 -291.531830 -290.838440 -289.910797 -288.670166 -285.336884 -288.645294 -293.072906 -291.100250 -289.406799 -286.990387 -287.514984 -289.198486 -296.998962 -297.274353 -297.252014 -290.976166 -289.557678 -296.425232 -296.321075 -296.178009 -295.989807 -295.825500 -295.731262 -295.649384 -295.555145 -295.448395 -295.312836 -295.189178 -295.080597 -294.960205 -294.844147 -294.786652 -294.804596 -294.930908 -295.145416 -295.365417 -295.525299 -295.734375 -296.023590 -296.297974 -296.493011 -293.076416 -295.032471 -293.578857 -291.733704 -290.114471 -291.922180 -294.146393 -295.072601 -296.475769 -294.575470 -292.312744 -292.296539 -296.256775 -296.491577 -296.371796 -296.212585 -296.259979 -296.448456 -296.667450 -296.810120 -296.865540 -296.892944 -296.960693 -297.050873 -297.117706 -297.219025 -297.346741 -297.441010 -297.455261 -297.439301 -297.437958 -297.431458 -297.420441 -297.429565 -297.458008 -297.477417 -297.460022 -297.420624 -297.358276 -297.254333 -297.121643 -296.940979 -296.718292 -296.494781 -296.258209 -295.986145 -295.665222 -295.321167 -294.964600 -294.619965 -294.244812 -293.837341 -293.448730 -293.077301 -292.612946 -292.124573 -291.709137 -291.201996 -290.499634 -289.781128 -289.288727 -289.270233 -277.787079 -283.211273 -289.655121 -292.303009 -293.520508 -293.854462 -294.049835 -294.113617 -292.601654 -292.824768 -296.344177 -296.536865 -296.443329 -296.204620 -295.896820 -295.608704 -295.333099 -295.013092 -294.647247 -294.285797 -293.935181 -293.547852 -293.114014 -292.623505 -292.322144 -291.698151 -290.984619 -290.117279 -289.236847 -291.332703 -292.539429 -292.529419 -290.884430 -289.377930 -287.475433 -287.633545 -289.795105 -292.185364 -297.872620 -298.005066 -294.963654 -293.153473 -296.984619 -296.899506 -296.804504 -296.662231 -296.592896 -296.569214 -296.524902 -296.471069 -296.427612 -296.359772 -296.287048 -296.231903 -296.204956 -296.233093 -296.328796 -296.479401 -296.669922 -296.839386 -296.961029 -297.037872 -297.153717 -297.329865 -297.551544 -297.784729 -297.944305 -298.056976 -292.640900 -292.971527 -294.016876 -294.201202 -295.198029 -296.872589 -295.122498 -293.724670 -293.818604 -297.130981 -297.522064 -297.666443 -297.702118 -297.746460 -297.843506 -298.002655 -298.184479 -298.330902 -298.439575 -298.524750 -298.643646 -298.789307 -298.888641 -298.937683 -298.966766 -298.983398 -298.991760 -299.008606 -299.008301 -298.974274 -298.907715 -298.815796 -298.689270 -298.573914 -298.493805 -298.434418 -298.340240 -298.179871 -297.990479 -297.756165 -297.493164 -297.184509 -296.843262 -296.492432 -296.140900 -295.780212 -295.432739 -295.083374 -294.676971 -294.239319 -293.827698 -293.392151 -292.843781 -292.286987 -291.784790 -291.170868 -290.379425 -289.688049 -289.356934 -282.514069 -283.609100 -289.651978 -291.793945 -294.104950 -295.111603 -296.135742 -296.009399 -294.173676 -292.586243 -293.111542 -293.225098 -297.424469 -297.324768 -297.052185 -296.704468 -296.311890 -295.929047 -295.558319 -295.186310 -294.823792 -294.390381 -293.902954 -293.389801 -292.877075 -292.842072 -292.223206 -291.653351 -291.186340 -290.957428 -292.978485 -292.436554 -291.584015 -290.851379 -290.137054 -288.570923 -288.037659 -290.126312 -293.203979 -296.016479 -298.358978 -298.246216 -294.330994 -297.548248 -297.477509 -297.435425 -297.341492 -297.287079 -297.282043 -297.293854 -297.307800 -297.335480 -297.342346 -297.339966 -297.388641 -297.515656 -297.713684 -297.942383 -298.133270 -298.276550 -298.332855 -298.320862 -298.294128 -298.326111 -298.462769 -298.698395 -298.931519 -299.112732 -299.222229 -299.227112 -293.868866 -293.187256 -293.997803 -293.811768 -297.848114 -297.679840 -296.229767 -297.805847 -298.246338 -298.610748 -298.795074 -298.993256 -299.197052 -299.319885 -299.410400 -299.510345 -299.638916 -299.792816 -299.949402 -300.089508 -300.193695 -300.253510 -300.248932 -300.210236 -300.176941 -300.150055 -300.128815 -300.085510 -300.008911 -299.925781 -299.810028 -299.617035 -299.416351 -299.273254 -299.137421 -298.957794 -298.735352 -298.512299 -298.268036 -298.017822 -297.734100 -297.378387 -297.002258 -296.614990 -296.246155 -295.897644 -295.553833 -295.156189 -294.706512 -294.242737 -293.717377 -293.096924 -292.401550 -291.764648 -291.064331 -290.175049 -289.532898 -289.153351 -288.543152 -289.466705 -291.497406 -292.662476 -295.351532 -296.947144 -296.876251 -295.196106 -294.099640 -293.581512 -294.036346 -293.850464 -298.094604 -298.019592 -297.743195 -297.355652 -296.971649 -296.645142 -296.289825 -295.889893 -295.468842 -294.979370 -294.442657 -293.911285 -293.409210 -293.818024 -293.303558 -292.982178 -292.942963 -293.066376 -293.601410 -292.981567 -291.411591 -291.014893 -291.320923 -289.883698 -289.632294 -290.142731 -293.214417 -295.427155 -298.418671 -298.386078 -298.150696 -297.955322 -297.976013 -298.067474 -298.097351 -298.080566 -298.071228 -298.093079 -298.155518 -298.236481 -298.299805 -298.366486 -298.499634 -298.726715 -298.997711 -299.228119 -299.343781 -299.367828 -299.328156 -299.239929 -299.131744 -299.072968 -299.136292 -299.337952 -299.558075 -299.748383 -299.867981 -299.884064 -299.804840 -299.674347 -294.436646 -295.925934 -298.722015 -298.521118 -298.380524 -298.524750 -298.951508 -299.398102 -299.756836 -300.092804 -300.359436 -300.492096 -300.547485 -300.605133 -300.687103 -300.811859 -300.975464 -301.090240 -301.121368 -301.125488 -301.121094 -301.091858 -301.041077 -300.941406 -300.849762 -300.740448 -300.638550 -300.557312 -300.445831 -300.252197 -300.023224 -299.795044 -299.564819 -299.315613 -299.052948 -298.808075 -298.549286 -298.304016 -298.068451 -297.813324 -297.513580 -297.152588 -296.779816 -296.402832 -296.039032 -295.645386 -295.200439 -294.707031 -294.151306 -293.454132 -292.686432 -291.966217 -291.118134 -290.208588 -291.867218 -288.441132 -291.549866 -293.822388 -293.409363 -295.232666 -296.244720 -296.529022 -295.597168 -294.927734 -293.896698 -293.914825 -294.143707 -294.101379 -298.530487 -298.553223 -298.376465 -298.061737 -297.775360 -297.517792 -297.175476 -296.737152 -296.271881 -295.812347 -295.328888 -294.840424 -294.356171 -294.849976 -294.495758 -294.369019 -294.419891 -294.389282 -296.702820 -296.701843 -294.873444 -293.261902 -292.115265 -290.991699 -290.919830 -291.944305 -293.715118 -294.807800 -298.385895 -298.360077 -298.266144 -298.264862 -298.426727 -298.657837 -298.826843 -298.914795 -298.973907 -299.008301 -299.042084 -299.127594 -299.252747 -299.388611 -299.548096 -299.759521 -299.968475 -300.095337 -300.115570 -300.077087 -300.008484 -299.921997 -299.805298 -299.720032 -299.694244 -299.790039 -299.939270 -300.092468 -300.190369 -300.167938 -300.033783 -299.817902 -299.547821 -299.262909 -299.121460 -299.060883 -299.054169 -299.260773 -299.715698 -300.262878 -300.690125 -301.007721 -301.227936 -301.362366 -301.412109 -301.446198 -301.468475 -301.513672 -301.579559 -301.587280 -301.578094 -301.585266 -301.589722 -301.543732 -301.427765 -301.279572 -301.183044 -301.061951 -300.931824 -300.821411 -300.705383 -300.520142 -300.278015 -300.002563 -299.727478 -299.457153 -299.181305 -298.927155 -298.659973 -298.383179 -298.148224 -297.940491 -297.693695 -297.383484 -297.042175 -296.678162 -296.312073 -295.939240 -295.549225 -295.109314 -294.595154 -293.932190 -293.159241 -292.276398 -291.246948 -290.527222 -290.844757 -292.866821 -294.952667 -294.463715 -295.459808 -296.820953 -296.431366 -296.026550 -296.129181 -296.438690 -296.309692 -295.760498 -294.690613 -294.424835 -294.939087 -298.951904 -298.915558 -298.761353 -298.539886 -298.237610 -297.859161 -297.429321 -297.006500 -296.604462 -296.185364 -295.742310 -295.291626 -295.585022 -295.438812 -295.431396 -295.370575 -295.063904 -295.051025 -297.901093 -296.928192 -295.357056 -294.413666 -292.460754 -292.355377 -291.997864 -293.243713 -298.372223 -298.398224 -298.389252 -298.433289 -298.637238 -298.958496 -299.286743 -299.560028 -299.747833 -299.878876 -299.955231 -299.999329 -300.057068 -300.166656 -300.297455 -300.420776 -300.551239 -300.660248 -300.721649 -300.746613 -300.751587 -300.739624 -300.718964 -300.692474 -300.636200 -298.146362 -300.448029 -300.467804 -300.508972 -300.450531 -300.322906 -300.140228 -299.892792 -299.643433 -299.539795 -299.669800 -298.644928 -298.981384 -298.401611 -300.853882 -301.260071 -301.523865 -301.691528 -301.829742 -301.922119 -301.932831 -301.899170 -301.862579 -301.847168 -301.836670 -301.796204 -301.717285 -301.661865 -301.681000 -301.599548 -301.420532 -301.245941 -301.163177 -301.059204 -300.915802 -300.768433 -300.612946 -300.425232 -300.170776 -299.894012 -299.631622 -299.364594 -299.085083 -298.827881 -298.569702 -298.279755 -297.998291 -297.752045 -297.485168 -297.171143 -296.827484 -296.486389 -296.165649 -295.836121 -295.495575 -295.146515 -294.767914 -294.285492 -293.583832 -292.604645 -291.629822 -290.003235 -291.972076 -293.050751 -294.747528 -295.297913 -295.996826 -297.379303 -296.489075 -296.385681 -296.562378 -297.315918 -297.450073 -297.226807 -296.610352 -296.064087 -296.817078 -299.230499 -299.204987 -299.094727 -298.833832 -298.462585 -298.056274 -297.647278 -297.278351 -296.922943 -296.539581 -296.179016 -295.849670 -296.188385 -296.230957 -296.305847 -296.200226 -295.818115 -293.745544 -296.478424 -298.015778 -297.295624 -296.032288 -294.108246 -292.908051 -290.448212 -292.081360 -293.869385 -298.484863 -298.511108 -298.670044 -299.011932 -299.472656 -299.930847 -300.301666 -300.545959 -300.681793 -300.760529 -300.815216 -300.852631 -300.890808 -300.945923 -300.999542 -301.047516 -301.097687 -301.172241 -301.252472 -301.310516 -301.361755 -301.414307 -298.886078 -301.327698 -301.122040 -300.961761 -300.972137 -300.996948 -300.860260 -300.607086 -300.433746 -300.319733 -300.252838 -297.837189 -297.504211 -297.450104 -297.046417 -301.515320 -301.813141 -301.976807 -302.027954 -302.049408 -302.090820 -302.114410 -302.085144 -301.995117 -301.917938 -301.867828 -301.819946 -301.754913 -301.617676 -301.507629 -301.487335 -301.409485 -301.242828 -301.064301 -300.961517 -300.867676 -300.716003 -300.532288 -300.354187 -300.108795 -299.772369 -299.454041 -299.229858 -298.978668 -298.740509 -298.523071 -298.258331 -297.932709 -297.613953 -297.301971 -296.984436 -296.634460 -296.262573 -295.912628 -295.610870 -295.362579 -295.132507 -294.904999 -294.691620 -294.460175 -294.066833 -293.508911 -293.237854 -292.770294 -293.455933 -295.067780 -296.574921 -296.820312 -297.472046 -299.048889 -298.478302 -297.940918 -297.527893 -298.116028 -298.255127 -298.978210 -299.759552 -298.140076 -299.491119 -299.376862 -299.260010 -299.102356 -298.787567 -298.376678 -297.944733 -297.529449 -297.185455 -296.857819 -296.540405 -296.331665 -296.219971 -297.096100 -297.260681 -297.371490 -297.273743 -294.124146 -294.905548 -295.343781 -297.027405 -298.362915 -297.244537 -294.089355 -292.797302 -289.382599 -291.934235 -295.737335 -298.635895 -298.620300 -298.822876 -299.196655 -299.733887 -300.326233 -300.804382 -301.085083 -301.239471 -301.305420 -301.321899 -301.296265 -301.269958 -301.240692 -301.211151 -301.205780 -301.248138 -301.345734 -301.473511 -301.592468 -301.709106 -298.290070 -299.702240 -301.593018 -301.433136 -298.011414 -297.947845 -301.547791 -298.350342 -301.144104 -300.966492 -301.034668 -297.685272 -301.403503 -301.625183 -301.781311 -301.912659 -302.060730 -302.219849 -302.264252 -302.225891 -302.166870 -302.125977 -302.093994 -302.039490 -301.956177 -301.837952 -301.776398 -301.724792 -301.624329 -301.473602 -301.354553 -301.272644 -301.189209 -301.075592 -300.926025 -300.791046 -300.687805 -300.546295 -300.361298 -300.123016 -299.832947 -299.507019 -299.170135 -298.917542 -298.684113 -298.466858 -298.272705 -297.991882 -297.692749 -297.419037 -297.112396 -296.759796 -296.381317 -296.034821 -295.738892 -295.501587 -295.370972 -295.329529 -295.285034 -295.263550 -295.315277 -295.409241 -295.586029 -296.123505 -294.539124 -294.463440 -296.096466 -298.150757 -297.633392 -297.931976 -298.936829 -299.181793 -298.539917 -298.741241 -299.055450 -298.954681 -300.330414 -300.038605 -299.891022 -299.768799 -299.549622 -299.321991 -299.121460 -298.798981 -298.392334 -297.956360 -297.581573 -297.280609 -296.991760 -296.781006 -296.769409 -296.915405 -297.958008 -298.229858 -298.418762 -298.364349 -294.698730 -294.817871 -295.047760 -296.540619 -297.193054 -296.918610 -295.346863 -294.692749 -293.746643 -294.313599 -297.404724 -297.895386 -297.997803 -298.774231 -299.115723 -299.658356 -300.320587 -300.905182 -301.289093 -301.509888 -301.579376 -301.535309 -301.419678 -301.308380 -301.221710 -301.147614 -301.145325 -301.206543 -301.315613 -301.486938 -301.719574 -301.943115 -296.955872 -301.899109 -301.758362 -301.716949 -297.252502 -297.132080 -297.507355 -301.880432 -301.650360 -301.483032 -301.557312 -301.747192 -301.922791 -302.030487 -302.077942 -302.136292 -302.225861 -302.306854 -302.341888 -302.317413 -302.256439 -302.182434 -302.116669 -302.048431 -301.989655 -301.911560 -301.832977 -301.735291 -301.638184 -301.503204 -301.389008 -301.275726 -301.186340 -301.092407 -300.972198 -300.833649 -300.712616 -300.593262 -300.451477 -300.222656 -299.958191 -299.701172 -299.452179 -299.226105 -298.956177 -298.703766 -298.523407 -298.315002 -298.115997 -297.939056 -297.704651 -297.429291 -297.163818 -296.968384 -296.837677 -296.758698 -296.812164 -296.990326 -297.169525 -297.297852 -297.443085 -297.709137 -298.099762 -298.669189 -295.777008 -294.405334 -295.476257 -297.982971 -297.852753 -296.755646 -297.511261 -298.853027 -299.153625 -298.848236 -298.858795 -300.835052 -300.610046 -300.446320 -300.363098 -300.204926 -299.928375 -299.649719 -299.458649 -299.221252 -298.885864 -298.519257 -298.250305 -298.056519 -297.861603 -297.673065 -297.571045 -297.716766 -298.158813 -298.555237 -298.881836 -294.619904 -294.836426 -295.747772 -297.301727 -297.678925 -296.904419 -296.889771 -298.362396 -298.671906 -297.682648 -295.565277 -293.900085 -294.643188 -296.873535 -297.309448 -298.834442 -299.245850 -299.893921 -300.604584 -301.148010 -301.451508 -301.531158 -301.473755 -301.294128 -301.088501 -300.947693 -300.964783 -301.049866 -301.146973 -301.249146 -301.424500 -301.723328 -302.029938 -298.760773 -301.975098 -301.862762 -301.847748 -301.908936 -298.265533 -299.400574 -301.991730 -301.886993 -301.798615 -301.815735 -301.906067 -302.023804 -302.104004 -302.129761 -302.162628 -302.239197 -302.311462 -302.360107 -302.380005 -302.378113 -302.354065 -302.293152 -302.218323 -302.148682 -302.082397 -301.998260 -301.888214 -301.731445 -301.599121 -301.505127 -301.422089 -301.363617 -301.305145 -301.226501 -301.116119 -301.007477 -300.916412 -300.844299 -300.711029 -300.526978 -300.342773 -300.209686 -300.075500 -299.861359 -299.642578 -299.507996 -299.402771 -299.267975 -299.127441 -299.000916 -298.934113 -298.914062 -298.895355 -298.857574 -298.862366 -298.974548 -299.161285 -299.315521 -299.394348 -299.438477 -299.565155 -299.790253 -300.063110 -300.340698 -294.683319 -294.706940 -298.071381 -298.361664 -297.090668 -297.561737 -298.202148 -300.460358 -299.718323 -301.159485 -301.116547 -301.051514 -300.992950 -300.888519 -300.701996 -300.450531 -300.208069 -300.047577 -299.910065 -299.706726 -299.447601 -299.257141 -299.136749 -298.965515 -298.581024 -298.113800 -297.990295 -296.335297 -297.848236 -296.904510 -295.924194 -297.890717 -300.567017 -299.672394 -298.758606 -297.212311 -297.660400 -300.895203 -301.447784 -298.483917 -292.571472 -291.851959 -293.469208 -295.956665 -299.247742 -298.763031 -298.772339 -299.248383 -300.010040 -300.680542 -301.056305 -301.198334 -301.184021 -300.990021 -300.718750 -300.691803 -298.045074 -301.138367 -301.217407 -301.268280 -301.376343 -301.609406 -301.908020 -302.040894 -301.973267 -301.861389 -301.808960 -301.840576 -301.924774 -301.977783 -301.974518 -301.961761 -301.963165 -301.956757 -301.975983 -302.035706 -302.092255 -302.115234 -302.150848 -302.227173 -302.301361 -302.367706 -302.420074 -302.461823 -302.502014 -302.488037 -302.409424 -302.297394 -302.195587 -302.078461 -301.924042 -301.755096 -301.602539 -301.493073 -301.450775 -301.441589 -301.436310 -301.388397 -301.308044 -301.223572 -301.155914 -301.122314 -301.086975 -300.987915 -300.858612 -300.770966 -300.714172 -300.641968 -300.566376 -300.481262 -300.388306 -300.277069 -300.189301 -300.147308 -300.169434 -300.220123 -300.250305 -300.262299 -300.299683 -300.383331 -300.461975 -300.481781 -300.457520 -300.453156 -300.577240 -300.775696 -300.924866 -301.056152 -296.442230 -296.018890 -298.283173 -299.887909 -299.382843 -299.440094 -299.333771 -301.462677 -301.409149 -301.413177 -301.453461 -301.425568 -301.298767 -301.122833 -300.947937 -300.758911 -300.585632 -300.433044 -300.315979 -300.193939 -300.029327 -299.897705 -299.779358 -296.480194 -296.359222 -295.964600 -295.338531 -297.145325 -297.757751 -298.607147 -299.284363 -300.183014 -300.698364 -301.343536 -300.280518 -298.442169 -297.867249 -300.332153 -302.059967 -297.745148 -293.873230 -292.992645 -296.687408 -297.237000 -300.133179 -298.173553 -298.539307 -298.746063 -299.390381 -300.055908 -300.548645 -300.837769 -300.914886 -300.737610 -300.533051 -300.132355 -301.193481 -301.408173 -301.414490 -301.371338 -301.360077 -301.468903 -301.745941 -301.958130 -301.966095 -300.648376 -301.734589 -301.742157 -301.849640 -301.954376 -301.991852 -302.030487 -302.113129 -302.046600 -302.041595 -302.060760 -302.091370 -302.122192 -302.134216 -302.208038 -302.311188 -302.393433 -302.431213 -302.448090 -302.498199 -302.482086 -302.335510 -302.238098 -302.149994 -302.024628 -301.871704 -301.707947 -301.523163 -301.368622 -301.301483 -301.276306 -301.258118 -301.206116 -301.135162 -301.084351 -301.057220 -301.048676 -301.037903 -300.981293 -300.911896 -300.861694 -300.846191 -300.851807 -300.866669 -300.820007 -300.758820 -300.732819 -300.759918 -300.778961 -300.797821 -300.846802 -300.913788 -300.978363 -301.060852 -301.148621 -301.213257 -301.228180 -301.200897 -301.183777 -301.283844 -301.434235 -301.495728 -301.495605 -301.468353 -299.373444 -299.477448 -300.454193 -301.025909 -300.564484 -301.696198 -301.634003 -301.536865 -301.484222 -301.467804 -301.359985 -301.171204 -300.985474 -300.848083 -300.743073 -300.635132 -300.514984 -300.400604 -300.331543 -300.297729 -300.241089 -298.342529 -298.234650 -298.262543 -297.953857 -297.440887 -302.950104 -301.960205 -301.894623 -301.283752 -302.746124 -301.734589 -304.326141 -303.492493 -303.233337 -302.275238 -303.477081 -302.829132 -299.862762 -296.475189 -297.584595 -297.827118 -302.988068 -301.515656 -299.585388 -298.464966 -298.407410 -298.880249 -299.521362 -300.122040 -300.563416 -300.733032 -300.630615 -298.753418 -301.031494 -301.488800 -301.645325 -301.596344 -301.490417 -301.385498 -301.418121 -297.992859 -297.768768 -299.624847 -301.261780 -301.815125 -301.780640 -301.857819 -301.967255 -302.033325 -302.174561 -302.212067 -302.166595 -302.270142 -302.308228 -302.319794 -302.330353 -302.257812 -302.326233 -302.378937 -302.483704 -302.386230 -302.341888 -302.202850 -302.307983 -302.155945 -302.001678 -301.957367 -301.889771 -301.787354 -301.650574 -301.452393 -301.280975 -301.156830 -301.019775 -300.891083 -300.794678 -300.707764 -300.635742 -300.615875 -300.602325 -300.571838 -300.537506 -300.521027 -300.523376 -300.555786 -300.625702 -300.694580 -300.704071 -300.712952 -300.784058 -300.892120 -300.982758 -301.062439 -301.174438 -301.296631 -301.466248 -301.658447 -301.822784 -301.931976 -301.992004 -301.972717 -301.916382 -301.876129 -298.363373 -301.768646 -301.660309 -301.549438 -301.445129 -301.448120 -301.540558 -301.621704 -301.674805 -301.653870 -301.548645 -301.398407 -301.235779 -301.061981 -300.851135 -300.651184 -300.496338 -300.398438 -300.379730 -300.382294 -300.331085 -300.254364 -300.294647 -300.486237 -300.524536 -300.744110 -301.927887 -303.189362 -303.469482 -304.991486 -306.292419 -307.250702 -307.270142 -303.888580 -304.452728 -305.322388 -306.232147 -305.622467 -303.816803 -305.418213 -306.091248 -306.394989 -303.986298 -300.683990 -298.290253 -304.405518 -300.126129 -301.697235 -298.898529 -298.484833 -298.119751 -298.499603 -299.220123 -299.906647 -300.409576 -300.640686 -300.645325 -299.342163 -300.652435 -301.654053 -301.788513 -301.740967 -301.605499 -301.455872 -301.445831 -299.277863 -299.184021 -299.939728 -301.156616 -302.046051 -301.922821 -301.924805 -302.022858 -301.534454 -302.301147 -302.147858 -302.100922 -302.343445 -302.468689 -302.444641 -302.386444 -302.327240 -302.388641 -302.476654 -302.550323 -302.463654 -302.375763 -302.160065 -302.113159 -302.134644 -302.024933 -301.863434 -301.770905 -301.693268 -301.568207 -301.400452 -301.247375 -301.083923 -300.866516 -300.655548 -300.510071 -300.355103 -300.187103 -300.046173 -299.928070 -299.808868 -299.727173 -299.683563 -299.682770 -299.752655 -299.881470 -299.989075 -300.047241 -300.158356 -300.348114 -300.546051 -300.754181 -300.969788 -301.202576 -301.463287 -301.811096 -302.140350 -302.338104 -302.385773 -302.411072 -299.930969 -299.098663 -298.610138 -299.703888 -301.984924 -301.858398 -301.744293 -301.659882 -301.656219 -301.716339 -301.727631 -301.643127 -301.501770 -301.319214 -301.105316 -300.871155 -300.595795 -300.327515 -300.110657 -299.924622 -299.785736 -299.740417 -299.759491 -299.720428 -299.658936 -299.824097 -300.265076 -300.287964 -304.994446 -306.411713 -304.711365 -304.706909 -305.189178 -305.429932 -306.707062 -307.168304 -304.022766 -304.071625 -304.671631 -304.795380 -305.136932 -305.385864 -306.360565 -307.219391 -307.541229 -307.109131 -303.669342 -304.681427 -301.656586 -303.716614 -304.827911 -305.471313 -302.574463 -298.533691 -298.461487 -299.215485 -299.959534 -300.435822 -300.680298 -298.407349 -299.681274 -300.204529 -301.612762 -301.851196 -301.859283 -301.718475 -301.528625 -299.713715 -299.896881 -301.067444 -300.469025 -302.386993 -302.138519 -301.942841 -301.935272 -302.003845 -302.069397 -302.191589 -302.030975 -301.915894 -302.050323 -302.359009 -302.424957 -302.328308 -302.346283 -302.274292 -302.550568 -302.655701 -302.585358 -302.488281 -302.431732 -302.255249 -302.181274 -302.004639 -301.933136 -301.796265 -301.633331 -301.489166 -301.335785 -301.188263 -301.011627 -300.782471 -300.537750 -300.350555 -300.162323 -299.901581 -299.602386 -299.327454 -299.078857 -298.877045 -298.717834 -298.586731 -298.546204 -298.590271 -298.631012 -298.681763 -298.835938 -299.115112 -299.431183 -299.814697 -300.267029 -300.812225 -301.452179 -302.086639 -302.475098 -297.745239 -298.293030 -299.198151 -300.848663 -300.831238 -302.308929 -302.322662 -302.288849 -302.227081 -302.099915 -301.993073 -299.521118 -301.811493 -301.698639 -301.523468 -301.340729 -301.146545 -300.912567 -300.648224 -300.352570 -300.063904 -299.800812 -299.536438 -299.297913 -299.129242 -298.982422 -298.840759 -298.725952 -298.814728 -299.103912 -299.030212 -307.551544 -305.832031 -304.066986 -305.053955 -305.198090 -306.606659 -305.583832 -305.263794 -303.308533 -301.670532 -300.473450 -300.460724 -301.296204 -302.777374 -304.766205 -306.551849 -307.132446 -307.905090 -303.746979 -304.186920 -305.452393 -304.273834 -306.626068 -309.547180 -307.058472 -304.161865 -299.372681 -299.794983 -300.264435 -300.601837 -300.766418 -299.383820 -299.647125 -300.793854 -302.733887 -303.889862 -302.660675 -301.650482 -300.908539 -299.962219 -298.238312 -298.449005 -298.282166 -298.399414 -299.606323 -301.643585 -301.646271 -301.701202 -301.953583 -302.131195 -302.066498 -301.888245 -301.794067 -301.922272 -302.031982 -302.096619 -302.163361 -302.117279 -302.352814 -302.559052 -302.605804 -302.409027 -302.291321 -302.155762 -302.169586 -301.934204 -301.886688 -301.690948 -301.501740 -301.335632 -301.165314 -300.990417 -300.801483 -300.592255 -300.373291 -300.198608 -300.000092 -299.696869 -299.302063 -298.921387 -298.566498 -298.246185 -297.926147 -297.612335 -297.358337 -297.182861 -297.050751 -296.996887 -297.080872 -297.353638 -297.768982 -298.268219 -298.978363 -300.082367 -301.408264 -302.360596 -298.118622 -294.244843 -296.106506 -302.224121 -302.274139 -302.296661 -301.764587 -302.498749 -302.589142 -302.520844 -302.296967 -302.120667 -301.973114 -301.837463 -301.590363 -301.357880 -301.209198 -301.039917 -300.811737 -300.551208 -300.261902 -299.971466 -299.669922 -299.347351 -299.044586 -298.741852 -298.447113 -298.191132 -297.967957 -297.758209 -297.408081 -300.488251 -307.019409 -305.900665 -304.456787 -302.292206 -304.204651 -305.554138 -305.084290 -302.931793 -300.088287 -298.077545 -297.818085 -298.994171 -299.939545 -301.370789 -303.266266 -304.862366 -305.910950 -306.007721 -302.855713 -302.294617 -302.692566 -304.474396 -307.378143 -309.101074 -305.109131 -303.951965 -301.303009 -300.894928 -300.596680 -300.610229 -302.289093 -300.998169 -301.370148 -302.172791 -303.530121 -302.795898 -302.661041 -302.381165 -300.854584 -299.559937 -295.856201 -295.160126 -296.914185 -297.370667 -298.733215 -300.440430 -300.495270 -300.865448 -301.251221 -301.772980 -301.928070 -301.713470 -301.663422 -301.742584 -301.824524 -301.855896 -301.861176 -302.001709 -302.088562 -302.106781 -302.177582 -302.106750 -302.025757 -301.824158 -301.735046 -301.570129 -301.479187 -301.329773 -301.182037 -301.021759 -300.842377 -300.664154 -300.482758 -300.296204 -300.126831 -299.973083 -299.782440 -299.479645 -299.088623 -298.682190 -298.266663 -297.847046 -297.424438 -296.974701 -296.507599 -296.106567 -295.779877 -295.565979 -295.518433 -295.691528 -296.032562 -296.487213 -297.412872 -299.289673 -301.439972 -299.616730 -295.322235 -294.984894 -296.014008 -302.223236 -302.391083 -302.445831 -302.499176 -302.598053 -302.592621 -302.481476 -302.189758 -302.033539 -301.930634 -301.813293 -301.535919 -301.269684 -301.142700 -300.988953 -300.774994 -300.516724 -300.249390 -299.951752 -299.632599 -299.304840 -298.963226 -298.612610 -298.259613 -297.928040 -297.586639 -297.164337 -296.462219 -295.597900 -305.801086 -305.203827 -303.030365 -300.201935 -302.094727 -304.296631 -304.083130 -301.402374 -298.825562 -297.090698 -297.329010 -298.553925 -299.361176 -300.682343 -302.527252 -303.795166 -304.806396 -302.247467 -300.071289 -300.968719 -300.671021 -302.833374 -305.192139 -304.987854 -305.222412 -300.102448 -303.753662 -302.198090 -302.358887 -303.937042 -305.698364 -305.730103 -304.368225 -302.199890 -299.997284 -297.279663 -294.609161 -293.448578 -294.210938 -293.251495 -291.978058 -293.178925 -297.029999 -298.598907 -299.478485 -300.304443 -300.666382 -300.146362 -300.928467 -301.330566 -301.730316 -301.655640 -301.550629 -301.542847 -301.627533 -301.547485 -301.419983 -301.534973 -301.696503 -301.732452 -301.683838 -301.622406 -301.597443 -301.478760 -301.398590 -301.258453 -301.051239 -300.877502 -300.700684 -300.548950 -300.408630 -300.263519 -300.108765 -299.956360 -299.798889 -299.642944 -299.466553 -299.213593 -298.877106 -298.500977 -298.071960 -297.568787 -297.061066 -296.539642 -295.981812 -295.450256 -294.981995 -294.598358 -294.337311 -294.266632 -294.402374 -294.765717 -295.857178 -298.258820 -299.574524 -299.851685 -297.551666 -295.929596 -299.374786 -302.211975 -302.517670 -302.544922 -302.523834 -302.706726 -300.810455 -302.437469 -302.094025 -301.893555 -301.788910 -301.667603 -301.525604 -301.376099 -301.184784 -300.953583 -300.745758 -300.517639 -300.249725 -299.958344 -299.646912 -299.328400 -298.996826 -298.633453 -298.256775 -297.873810 -297.448761 -296.966095 -296.310883 -295.497681 -294.826111 -303.483643 -302.670074 -301.004486 -302.024200 -300.528046 -300.934143 -299.461792 -297.651245 -296.781342 -296.834473 -297.592316 -295.958801 -298.043488 -299.560181 -300.446259 -300.791656 -302.470917 -302.831207 -301.594849 -302.681580 -303.752014 -301.188690 -297.052612 -295.676270 -295.371277 -297.403412 -297.658722 -298.580780 -300.751312 -305.886810 -306.856689 -302.590057 -295.740997 -291.926331 -289.706146 -287.588928 -288.504944 -286.694946 -284.176422 -285.776306 -291.207489 -296.253693 -299.591248 -299.820648 -300.645782 -300.900665 -300.941986 -300.726013 -300.972839 -301.371735 -301.515747 -301.512512 -301.448303 -301.430603 -301.324860 -301.105743 -301.047913 -301.136810 -301.102142 -301.042603 -301.005859 -300.955414 -300.855164 -300.767731 -300.679901 -300.485046 -300.293335 -300.068817 -299.926544 -299.824188 -299.721252 -299.605682 -299.466248 -299.309998 -299.138275 -298.987671 -298.779083 -298.492004 -298.174805 -297.789124 -297.287048 -296.741760 -296.183563 -295.634827 -295.057831 -294.472809 -293.888153 -293.357025 -293.004425 -292.975403 -293.414429 -294.606537 -299.408722 -303.931549 -301.345581 -300.344269 -299.941528 -301.003784 -301.121887 -302.407562 -299.685211 -302.230103 -302.175323 -300.012115 -302.022736 -301.653168 -301.415039 -301.393433 -301.302979 -301.272980 -301.246918 -301.124603 -300.858521 -300.647919 -300.455963 -300.166931 -299.897644 -299.609375 -299.312317 -299.010864 -298.672241 -298.293457 -297.878113 -297.426453 -296.926544 -296.369690 -295.720886 -295.035431 -294.563568 -299.421509 -299.418671 -299.967041 -295.743347 -296.960297 -297.388367 -297.366272 -297.061401 -297.153778 -299.180267 -299.112885 -295.713867 -298.707306 -298.949524 -299.535736 -300.362823 -302.668213 -304.464172 -304.334137 -301.749329 -296.251770 -293.219177 -290.234589 -291.887909 -291.941986 -293.843719 -293.789978 -296.581726 -299.465546 -299.401306 -292.697479 -286.152710 -282.887421 -282.326080 -281.362305 -283.588501 -284.254822 -284.451477 -285.828522 -289.790619 -295.984558 -298.658539 -299.961548 -301.563660 -302.966858 -302.674316 -300.817505 -299.330994 -300.347107 -300.663452 -300.840637 -301.144684 -300.920624 -300.931885 -300.776337 -300.557678 -300.480438 -300.359344 -300.203186 -300.087677 -299.974396 -299.814240 -299.655731 -299.608490 -299.357880 -299.236420 -299.151184 -299.047577 -298.967377 -298.884277 -298.792328 -298.680176 -298.544495 -298.396149 -298.251953 -298.079956 -297.842041 -297.578918 -297.259003 -296.858124 -296.368866 -295.839569 -295.285065 -294.656311 -293.953918 -293.179535 -292.359772 -291.739960 -291.716156 -292.436371 -299.293396 -303.045380 -302.220215 -301.706573 -301.432037 -303.447418 -304.893005 -303.477478 -301.021393 -300.019287 -300.730408 -301.343048 -299.311371 -301.796967 -301.394409 -300.884247 -300.690247 -300.628418 -300.672028 -300.647308 -300.545319 -300.414368 -300.253937 -300.102478 -299.891937 -299.669891 -299.390625 -299.106354 -298.812347 -298.507538 -298.152679 -297.753204 -297.339355 -296.872894 -296.365814 -295.813263 -295.252930 -294.813446 -295.113556 -294.755402 -294.986511 -293.457764 -294.789520 -295.631317 -296.693237 -298.509827 -298.828979 -298.924225 -298.737305 -298.290527 -298.073303 -298.437286 -299.283875 -300.108398 -300.171936 -303.906677 -302.970551 -299.990601 -295.965179 -293.225952 -292.254669 -291.845520 -291.572479 -290.479004 -291.076782 -291.170135 -289.754425 -287.061584 -284.064606 -282.807343 -281.942200 -282.732330 -283.481079 -285.574219 -286.276459 -286.455200 -287.185242 -292.246735 -295.703247 -299.095001 -301.855347 -303.831299 -305.358856 -305.079865 -296.845276 -298.094452 -298.024231 -298.707458 -297.712677 -297.699677 -297.421448 -299.666870 -300.068420 -299.807373 -299.683075 -299.562866 -299.238831 -298.982880 -298.882294 -298.664062 -298.386261 -298.279449 -298.087585 -297.932007 -297.808472 -297.718628 -297.657745 -297.593414 -297.531006 -297.457092 -297.369171 -297.265411 -297.137695 -296.992615 -296.809448 -296.604431 -296.359558 -296.051819 -295.667145 -295.213531 -294.706940 -294.082794 -293.326508 -292.364288 -291.200378 -290.285461 -290.281281 -296.628448 -302.099487 -302.837402 -299.096619 -298.140900 -299.401306 -301.917572 -304.040771 -302.881744 -302.096375 -302.703064 -302.649506 -301.481323 -300.612274 -299.112152 -300.565369 -300.406586 -300.443329 -300.174561 -300.002136 -299.867218 -299.723511 -299.579529 -299.511261 -299.454163 -299.249054 -299.089081 -298.867004 -298.604797 -298.299744 -297.961823 -297.601471 -297.212280 -296.863586 -296.512878 -296.102905 -295.617584 -295.147766 -294.787354 -294.624817 -293.441010 -291.247864 -296.962402 -297.419434 -297.569366 -297.754547 -298.037659 -298.274658 -298.333374 -298.130402 -292.202515 -297.337585 -293.012238 -292.849945 -292.584045 -293.673340 -296.287567 -295.282288 -294.423798 -295.498810 -297.950409 -298.175171 -295.257050 -293.142242 -293.708221 -292.763275 -290.063263 -287.023499 -284.275604 -284.955780 -287.595520 -287.350311 -285.841980 -285.082092 -287.199707 -289.274048 -290.592010 -291.785828 -294.066223 -297.106903 -300.612457 -302.468231 -301.962524 -301.741547 -300.114594 -295.991669 -295.684601 -298.056335 -296.704651 -297.592194 -298.177246 -298.155609 -297.360809 -297.187531 -297.553680 -297.506653 -297.397400 -297.204163 -296.904327 -296.828461 -296.674347 -296.485809 -296.193726 -296.024963 -295.892487 -295.771759 -295.726440 -295.715271 -295.677216 -295.625305 -295.585480 -295.562775 -295.522522 -295.464691 -295.385010 -295.262726 -295.111084 -294.926727 -294.710815 -294.460602 -294.142456 -293.746460 -293.250977 -292.577240 -291.545502 -290.019623 -288.795898 -293.294312 -296.716797 -301.417419 -300.553680 -297.979523 -293.763580 -296.035980 -299.090302 -301.215851 -300.486481 -299.572998 -300.784698 -300.622650 -301.042603 -300.069733 -300.128326 -299.823395 -297.870361 -299.566223 -300.084076 -300.150818 -299.793762 -299.422485 -299.226257 -299.087860 -298.920258 -298.553619 -298.254639 -298.013672 -297.763245 -297.462250 -297.078156 -296.639038 -296.237274 -295.967529 -295.712311 -295.407898 -295.014526 -294.542755 -294.102631 -293.952332 -293.632782 -292.706757 -291.928467 -296.675720 -296.803223 -296.976410 -297.242493 -297.468933 -291.098633 -288.933533 -289.673676 -289.768768 -289.757263 -288.766327 -289.317688 -288.643738 -288.186707 -289.885406 -291.209106 -295.085297 -295.881714 -295.919769 -298.113831 -297.031067 -296.097412 -295.568542 -293.139465 -289.733368 -287.455688 -288.870972 -290.752258 -292.561249 -289.734314 -288.709412 -288.439880 -289.886993 -290.282013 -290.775330 -294.823029 -297.306274 -298.111908 -297.630402 -296.591217 -294.211029 -293.678894 -293.853851 -295.818726 -296.959595 -294.972137 -294.920410 -295.261688 -296.304810 -297.691895 -293.429901 -293.354797 -293.962189 -294.326874 -294.001648 -293.212128 -292.885620 -292.854248 -293.141815 -293.225098 -292.951263 -292.860260 -292.838898 -292.858704 -292.897797 -292.904694 -292.906494 -292.918793 -292.971527 -293.030823 -293.067688 -293.084381 -293.069885 -292.990021 -292.882599 -292.805023 -292.724884 -292.602661 -292.403137 -292.143555 -291.742065 -290.891327 -289.303528 -288.964691 -291.270477 -295.180328 -298.883453 -297.914490 -297.807220 -294.924835 -294.526031 -296.403503 -297.687805 -296.973236 -296.265656 -297.214569 -297.441559 -297.381683 -295.895264 -296.624512 -297.034851 -296.638397 -293.850189 -293.972321 -296.441101 -297.800964 -297.975922 -298.107758 -297.617462 -296.927521 -296.946198 -297.153534 -296.795349 -296.556213 -296.398682 -296.060455 -295.588989 -295.126923 -294.875397 -294.595490 -294.312897 -294.001038 -293.567261 -293.030029 -293.302765 -293.158264 -292.883148 -292.102203 -293.242157 -293.503326 -295.974396 -293.326263 -291.883423 -291.435364 -291.310089 -290.428009 -289.687103 -289.667664 -295.529694 -295.652832 -295.685120 -295.605255 -290.002838 -291.046387 -292.908020 -294.577454 -297.472656 -297.264191 -293.076965 -294.451324 -296.201508 -295.230469 -293.910583 -292.614014 -292.236023 -292.400574 -292.011597 -292.195038 -293.238190 -293.823303 -292.376526 -291.053253 -291.006470 -293.480103 -296.285370 -296.188934 -295.093689 -294.221558 -293.329132 -290.981873 -291.743103 -293.265076 -292.926727 -292.801697 -291.811157 -291.960236 -293.124512 -292.822876 -293.127563 -288.209167 -287.934418 -288.749634 -289.730347 -289.755615 -289.440063 -288.744751 -288.182373 -288.179321 -288.521973 -288.949493 -289.255035 -289.366516 -289.427002 -289.503326 -289.595123 -289.698456 -289.833130 -289.978058 -290.093994 -290.195343 -290.311951 -290.396301 -290.444489 -290.515350 -290.624664 -290.727020 -290.780121 -290.806305 -290.755127 -290.307953 -289.069183 -285.902466 -286.913635 -289.391418 -293.178589 -295.527100 -297.573181 -300.425720 -298.371338 -298.324066 -297.302795 -296.942139 -296.476410 -295.965149 -293.814117 -294.755920 -293.213257 -292.017029 -293.587341 -294.177582 -296.185577 -286.829041 -289.201996 -291.021576 -291.690735 -292.546082 -291.982452 -290.474884 -291.793396 -294.731659 -295.205658 -294.542969 -294.425537 -294.317902 -294.052460 -293.683105 -293.412872 -293.155914 -292.958801 -292.750732 -292.462524 -292.079651 -291.954834 -289.706390 -293.372589 -291.808685 -290.876587 -291.196594 -290.482056 -290.536346 -291.496490 -292.665405 -292.886139 -292.482697 -292.504211 -293.113892 -291.983704 -292.633911 -292.441803 -294.089539 -292.827362 -291.889038 -292.237488 -293.777954 -293.745087 -294.569031 -292.030548 -291.951721 -294.917877 -296.342987 -296.616364 -297.011383 -297.269775 -295.423676 -293.147705 -291.759827 -291.808411 -291.689606 -290.620789 -289.431396 -289.608795 -290.725494 -293.109192 -295.576630 -295.918335 -295.450195 -294.173096 -290.929932 -289.442108 -289.385986 -289.115936 -288.417755 -287.902557 -287.580505 -287.133881 -289.772247 -288.120148 -286.866577 -285.093842 -283.322815 -284.449982 -285.715637 -286.339844 -286.223907 -285.611115 -285.217316 -285.374908 -285.742371 -286.075226 -286.213837 -286.269531 -286.379242 -286.533661 -286.706573 -286.887695 -287.078796 -287.247620 -287.407074 -287.614685 -287.854218 -288.064667 -288.268311 -288.479004 -288.720581 -289.002167 -289.308258 -289.579468 -289.490662 -288.648712 -285.440186 -286.210602 -285.765472 -289.552734 -294.216858 -297.963867 -301.654938 -302.736450 -300.957672 -299.744293 -297.207581 -297.460693 -296.338501 -292.990601 -291.859253 -289.745819 -291.035614 -293.000275 -294.951294 -296.790405 -297.077240 -295.380859 -289.208862 -288.317047 -287.566101 -287.041473 -286.155792 -286.521881 -289.215027 -291.737122 -292.174164 -291.779877 -291.626587 -291.756042 -291.714539 -291.604370 -291.543854 -291.480072 -291.402557 -291.284821 -291.147491 -291.100067 -291.307037 -291.721954 -289.838562 -290.430939 -289.285065 -288.332733 -289.355560 -290.625397 -292.103699 -292.559174 -293.117950 -292.656250 -292.724670 -292.659149 -293.004486 -294.114288 -294.067169 -294.641083 -294.396454 -294.981079 -293.704773 -292.791229 -292.888428 -292.915649 -292.191193 -292.565796 -293.528992 -294.465607 -295.954285 -297.402161 -296.804413 -294.575378 -292.429749 -289.345795 -288.511688 -286.596466 -287.846985 -285.151581 -284.705109 -286.582489 -290.348114 -293.109283 -293.444733 -292.507507 -290.938202 -288.180908 -286.515656 -286.935822 -288.018402 -287.261688 -286.899017 -287.745789 -286.813141 -286.502899 -285.546448 -284.738617 -282.560730 -282.261627 -283.315399 -284.141418 -284.408081 -284.176025 -283.920563 -283.819183 -283.876709 -283.892609 -283.931702 -284.012177 -284.138092 -284.326385 -284.558014 -284.807922 -285.047607 -285.262299 -285.439178 -285.670837 -285.933685 -286.183777 -286.454132 -286.720123 -287.057709 -287.519684 -288.013580 -288.324493 -288.148651 -287.490601 -284.279968 -284.191101 -284.142975 -286.769653 -290.928741 -297.216583 -301.250732 -302.908569 -301.762329 -299.528198 -297.117523 -295.208710 -293.490814 -290.954529 -290.478851 -289.116333 -289.459076 -291.756500 -294.006683 -295.990173 -296.947662 -296.035736 -286.808167 -287.006500 -292.812042 -284.573334 -283.868988 -284.017120 -285.539642 -288.352570 -289.476990 -289.290863 -289.379822 -289.666870 -289.747986 -289.784027 -289.875519 -289.951508 -289.989594 -289.986023 -289.970032 -289.926605 -289.935089 -287.847168 -286.928772 -289.564758 -288.335663 -290.085602 -291.348877 -291.954742 -292.366638 -293.018646 -292.235413 -291.197815 -291.245850 -292.060760 -291.700409 -292.009705 -291.380524 -291.887665 -293.516388 -293.173676 -294.155914 -292.904694 -290.639648 -290.504425 -290.455566 -290.461426 -290.252167 -290.206787 -291.040039 -293.486603 -295.629730 -295.930389 -293.695007 -292.646118 -290.613495 -288.961884 -288.729004 -288.534088 -288.774078 -288.247681 -288.968079 -291.082825 -291.615112 -291.103149 -289.200867 -287.585815 -287.166290 -287.788208 -288.495178 -288.501587 -288.824524 -288.635834 -286.540710 -285.533203 -284.795258 -284.687653 -283.631073 -282.980804 -284.976685 -283.621307 -283.666412 -283.308289 -283.103607 -282.852631 -282.647339 -282.528839 -282.525848 -282.606903 -282.719391 -282.884796 -283.180450 -283.580597 -283.965912 -284.281708 -284.503571 -284.740204 -284.965302 -285.206665 -285.498322 -285.824127 -286.234131 -286.742279 -287.145599 -287.141968 -286.614471 -283.214081 -283.047882 -282.816986 -283.266510 -284.043762 -287.484528 -293.373810 -298.784271 -301.579773 -300.162720 -297.202850 -296.650024 -295.827850 -295.259552 -293.820892 -291.422089 -288.046509 -288.180115 -289.294037 -290.844086 -291.814453 -293.035034 -294.496826 -294.790894 -294.447113 -293.850159 -281.295746 -282.192688 -284.430908 -285.836334 -286.317749 -286.177460 -286.244873 -286.700104 -287.183807 -287.567505 -287.970032 -288.294098 -288.523193 -288.646484 -288.708344 -288.723755 -288.692688 -288.630310 -286.791779 -287.790100 -288.411255 -288.998077 -286.777863 -287.117340 -288.398499 -289.636230 -288.865479 -289.363098 -290.411652 -291.583496 -292.026459 -291.744202 -290.529083 -289.409058 -291.041107 -291.921326 -292.247467 -291.949066 -292.108032 -290.896057 -290.645630 -291.331329 -292.173645 -292.831329 -292.525360 -292.419525 -293.168915 -293.452667 -294.328064 -293.976562 -293.697479 -292.150116 -291.322937 -291.176147 -291.295227 -290.611359 -289.838593 -288.981079 -287.841125 -289.391449 -289.819122 -289.898071 -289.732178 -289.521088 -289.119202 -288.760681 -288.904266 -289.418274 -289.327972 -285.406982 -284.363129 -284.086609 -284.081879 -283.702972 -283.418335 -284.885223 -284.710754 -283.645630 -283.191925 -282.762268 -282.419861 -282.194916 -282.066650 -281.982117 -281.977417 -281.987762 -282.057007 -282.292694 -282.706207 -283.236359 -283.770111 -284.142334 -284.417145 -284.645447 -284.908569 -285.242157 -285.613312 -286.015808 -286.346680 -286.325317 -285.926697 -283.421753 -283.069641 -282.845612 -283.497406 -285.073029 -284.874176 -285.878998 -289.425873 -292.465942 -296.132996 -297.852722 -297.915283 -297.945526 -298.086060 -297.760193 -296.726318 -293.549469 -280.805664 -281.837280 -286.450562 -286.436371 -286.889618 -287.213776 -286.292725 -287.934814 -288.938019 -279.075684 -279.819214 -282.094330 -283.445099 -283.463684 -283.470123 -283.692047 -284.080109 -284.483307 -285.147980 -285.897736 -286.523956 -287.009399 -287.293274 -287.465179 -287.564636 -287.601807 -287.533295 -287.411926 -286.533569 -286.748779 -287.360229 -287.981323 -288.485229 -288.680054 -287.301483 -286.770569 -288.632690 -288.843536 -289.614288 -290.603882 -291.741180 -291.221466 -289.682831 -288.624481 -291.023224 -292.252838 -292.389709 -292.711853 -292.103119 -291.473999 -291.092834 -292.294281 -293.489105 -294.230316 -294.544617 -294.377411 -294.561401 -294.526703 -294.161957 -293.370728 -292.325867 -291.090179 -290.270599 -289.726105 -289.311676 -289.222290 -289.260559 -288.951935 -288.106079 -287.260925 -287.517181 -288.265259 -288.999878 -289.554413 -290.021454 -289.631927 -289.333801 -289.777740 -290.704926 -284.280273 -283.878540 -283.557190 -283.232117 -282.859985 -282.726776 -282.861023 -285.685608 -283.456360 -283.297516 -282.782104 -282.281586 -282.048004 -281.922211 -281.824982 -281.792572 -281.791718 -281.769775 -281.902496 -282.288452 -282.828125 -283.336853 -281.639160 -284.247864 -284.495605 -284.894318 -285.347626 -285.718964 -285.929932 -285.815796 -283.624207 -283.178864 -283.142975 -281.798828 -283.280579 -285.267517 -287.360168 -288.213196 -288.009125 -286.955566 -287.233124 -289.172424 -291.141876 -294.225891 -295.449249 -295.816010 -278.877014 -278.096039 -278.594238 -279.668030 -280.460205 -288.878204 -288.350677 -287.700836 -287.204651 -284.363373 -276.345154 -277.527679 -279.968323 -281.416321 -282.293335 -282.031036 -281.672028 -282.354340 -282.816620 -283.282928 -283.945251 -284.780670 -285.363037 -285.696014 -285.996796 -286.228424 -286.389771 -286.519379 -286.563721 -286.462860 -286.323181 -285.534637 -285.782654 -286.342865 -284.147095 -284.883270 -284.478424 -285.121521 -286.229340 -287.395447 -287.722839 -288.539398 -289.551147 -290.036591 -288.306091 -288.407227 -289.097839 -291.289520 -292.510864 -292.952545 -292.634705 -292.362000 -291.782257 -290.941956 -290.670624 -291.066223 -291.409515 -291.263702 -290.676147 -289.801971 -288.553223 -287.832947 -286.298157 -284.994324 -283.817200 -283.088989 -283.026062 -283.272095 -283.399109 -283.801422 -284.664459 -283.669769 -283.479950 -283.557373 -284.402618 -285.979645 -287.684662 -289.117218 -290.126190 -290.506592 -290.440002 -290.370361 -290.220520 -289.042236 -284.780396 -283.235199 -284.146484 -284.505524 -282.315430 -282.795868 -284.328796 -283.745941 -282.761475 -282.081573 -281.721130 -281.551453 -281.498016 -281.533142 -281.507477 -281.424469 -281.637543 -282.335541 -279.907928 -280.498749 -280.484711 -280.159821 -281.442810 -281.422119 -285.395844 -280.177643 -279.818237 -272.036560 -280.127686 -280.359589 -281.075134 -282.287750 -284.189545 -286.508392 -288.399323 -289.305878 -289.206024 -287.911804 -288.410309 -288.850037 -290.311157 -291.941040 -291.877655 -279.124573 -278.809998 -278.604950 -278.402069 -278.506866 -279.138214 -289.673492 -291.432953 -290.380096 -274.998230 -275.103729 -276.737030 -278.986206 -280.523376 -280.281555 -279.104950 -277.844788 -277.587006 -279.831909 -281.992126 -282.615326 -283.170654 -283.740143 -284.258728 -284.677338 -284.989044 -285.206451 -285.339264 -285.403320 -285.357758 -285.243073 -285.203094 -285.357391 -284.737183 -285.285919 -285.688416 -283.617065 -283.901947 -284.428986 -285.237610 -286.125885 -287.972473 -289.369690 -289.590210 -288.237000 -286.961670 -287.253571 -288.760559 -290.579407 -291.443695 -291.904816 -291.534363 -290.556091 -289.064209 -287.397858 -285.934174 -284.865967 -284.200073 -283.637451 -282.926056 -282.175781 -281.568634 -280.672302 -279.640808 -278.470642 -277.488708 -276.943298 -276.997833 -277.381927 -277.779297 -278.125671 -279.160187 -279.911957 -280.105927 -280.003876 -281.988861 -284.235718 -286.576782 -288.527863 -289.300140 -289.067627 -287.968231 -286.471893 -285.134308 -283.441803 -279.746674 -276.872192 -278.688019 -280.075684 -281.145660 -283.345734 -284.917236 -286.324280 -286.661346 -286.234894 -284.715942 -283.830383 -280.775818 -280.883850 -280.757507 -280.612640 -281.115784 -282.403687 -278.213654 -280.326630 -280.154785 -280.675659 -281.616028 -282.119904 -280.760406 -281.250519 -281.204132 -280.990234 -282.673920 -283.120850 -283.900482 -285.434357 -287.262726 -289.395233 -290.641571 -290.449951 -290.490448 -290.469910 -290.833679 -291.400726 -292.106812 -292.852753 -293.167145 -290.929840 -278.080811 -277.704651 -277.530426 -276.030945 -276.591675 -276.533936 -275.966034 -275.413788 -283.996796 -274.667267 -275.666809 -278.410034 -278.982239 -278.126221 -276.791718 -269.863403 -268.858337 -270.736084 -280.338135 -281.660461 -282.681824 -282.971344 -283.072601 -283.349091 -283.619751 -283.787811 -283.857147 -283.746948 -283.533691 -283.481628 -283.731842 -284.173248 -283.688751 -284.223694 -284.600250 -284.830872 -284.865570 -284.239929 -285.526337 -287.121124 -287.425446 -289.457336 -289.938599 -289.282928 -286.555817 -284.387054 -285.367584 -286.790497 -288.640839 -289.577515 -289.443268 -288.836945 -287.771118 -286.345825 -284.843628 -283.404785 -282.201416 -281.276764 -279.154907 -278.650024 -278.872162 -277.894440 -276.654266 -275.247803 -273.978760 -273.000153 -272.566315 -272.784760 -273.077820 -273.506012 -274.437775 -275.178192 -276.101471 -277.364807 -278.969025 -280.875275 -282.923523 -284.487061 -285.174866 -284.524078 -282.946838 -281.140808 -278.894440 -276.176575 -273.472687 -272.490448 -272.766052 -274.027893 -276.121704 -278.885651 -282.430176 -285.458374 -287.089539 -286.565918 -287.236328 -286.689301 -285.002014 -283.662537 -280.345215 -278.857391 -280.247498 -281.757965 -278.047791 -280.060150 -280.083344 -280.514832 -281.578949 -282.520172 -282.430603 -283.550293 -284.551666 -285.308960 -285.938721 -285.749146 -286.000732 -286.840271 -287.878021 -289.031464 -290.899750 -291.942871 -292.876862 -293.176636 -293.302856 -293.547485 -293.703918 -293.507202 -292.490662 -291.755249 -290.724884 -288.368561 -286.046661 -272.548737 -272.625305 -272.857880 -278.842316 -277.999725 -276.970184 -273.359802 -273.227386 -276.533722 -277.525269 -277.711273 -267.149841 -264.276276 -262.784698 -263.186554 -268.674347 -278.361908 -279.198059 -279.715302 -280.170288 -280.565826 -280.669525 -279.267731 -279.148804 -281.307587 -281.225037 -281.460938 -282.158142 -282.979553 -282.240204 -282.721375 -283.119110 -283.453949 -283.715332 -283.831909 -284.525696 -286.813232 -287.879639 -289.172058 -289.097900 -288.516724 -286.893677 -283.315582 -281.149231 -280.721771 -280.425934 -280.354950 -280.229706 -287.434265 -286.913544 -285.462891 -285.257843 -282.916595 -280.659576 -280.320892 -278.195740 -278.417816 -277.413818 -276.997498 -276.766296 -275.425507 -274.022858 -272.975342 -273.460876 -272.319977 -271.549835 -271.569855 -272.068146 -272.710785 -273.409729 -274.276550 -275.154449 -275.975494 -276.716187 -277.198578 -277.449707 -276.876648 -275.997406 -274.822876 -273.187408 -271.477478 -269.552277 -269.177673 -269.275146 -270.656738 -272.395416 -274.655457 -277.642212 -280.589996 -282.447815 -284.113464 -285.019714 -284.542511 -283.340485 -276.055664 -277.366119 -278.087341 -278.992615 -279.813354 -276.721008 -277.585175 -277.375214 -278.542969 -278.241699 -279.071045 -279.918213 -280.547180 -281.056030 -280.959442 -280.781433 -280.628387 -280.501373 -280.546906 -281.280029 -282.609802 -282.665527 -283.443878 -283.698059 -283.831085 -284.340698 -284.265747 -284.005219 -284.154724 -283.762207 -284.462830 -283.270111 -281.965576 -280.669922 -278.513184 -272.496887 -272.628021 -275.908325 -275.033325 -274.969788 -273.201477 -273.274658 -276.177551 -277.204651 -270.631104 -264.436646 -258.455017 -255.816544 -255.436493 -261.729645 -264.632507 -267.855469 -270.997162 -272.951508 -276.972504 -273.710266 -276.041595 -277.703430 -278.839874 -279.273773 -279.952576 -280.810333 -281.626099 -280.216949 -280.663452 -281.139954 -281.507416 -281.906616 -282.169189 -282.044983 -281.709137 -281.584198 -281.542603 -281.436371 -281.205444 -280.809723 -280.310913 -279.959564 -279.730072 -279.508820 -279.267639 -278.951691 -278.574738 -278.067719 -277.329773 -276.501862 -276.135315 -276.341187 -277.135834 -275.673950 -275.487701 -275.065247 -275.148743 -274.786499 -274.375763 -273.533051 -272.091858 -272.367432 -272.380188 -272.174683 -272.236908 -272.587036 -273.178619 -272.468231 -272.338928 -272.366913 -272.344910 -272.300171 -272.344025 -272.529602 -271.960846 -272.106049 -271.434753 -270.815796 -269.765747 -270.904053 -271.076752 -270.596252 -269.575165 -269.906372 -270.420135 -270.993347 -271.639069 -272.295746 -272.596527 -272.429657 -272.177795 -271.767914 -271.182892 -270.742615 -270.604034 -270.811249 -271.808197 -272.850616 -272.873932 -272.858429 -272.834503 -272.867859 -272.926483 -273.047272 -273.105011 -273.119690 -273.035370 -272.877747 -272.698700 -272.563263 -272.483459 -273.604492 -274.638000 -273.375793 -275.342743 -275.439850 -275.498962 -275.727539 -275.303680 -273.061188 -275.698944 -275.788361 -275.482117 -271.949463 -275.268127 -274.058044 -273.240234 -272.791290 -271.514191 -272.797028 -275.252563 -275.276459 -274.384155 -275.357208 -276.268799 -276.968811 -265.696289 -262.326385 -257.855072 -255.296890 -252.147491 -252.953232 -256.956635 -261.638367 -265.078094 -268.167389 -271.389618 -273.861542 -273.160004 -273.160004 -276.169525 -277.746094 -278.645752 -279.184052 -279.705261 -278.039154 -278.584869 -279.159119 -279.617157 -279.910339 -279.924500 -279.565674 -279.063934 -278.878143 -278.948883 -279.064972 -279.138123 -278.847107 -278.304138 -277.968506 -277.865936 -277.850494 -277.755096 -277.486450 -277.180725 -278.671082 -277.404633 -275.843719 -275.573242 -275.554840 -275.609039 -275.729858 -273.631012 -273.479858 -273.139282 -272.884949 -272.983368 -273.661926 -273.419495 -273.039825 -273.103546 -273.179749 -272.896027 -272.634186 -272.196075 -271.390747 -270.197266 -272.271057 -272.860565 -273.858368 -274.348450 -274.853302 -274.837219 -274.203217 -273.936371 -273.567108 -270.862579 -270.774323 -270.608246 -270.513672 -270.400970 -270.157867 -269.543579 -269.149811 -268.693756 -268.821198 -268.989990 -269.261353 -269.653046 -270.379242 -271.329132 -272.402740 -271.838348 -272.146545 -272.861603 -272.880798 -272.829437 -272.836121 -272.818420 -272.764893 -272.683319 -272.550415 -272.453705 -272.373444 -272.341339 -272.338257 -272.336487 -272.311920 -272.224426 -273.224152 -273.308563 -273.080780 -272.001465 -272.044373 -272.149902 -272.187225 -272.029205 -271.639709 -274.132874 -270.146179 -274.175171 -269.346649 -274.315613 -274.654999 -274.267303 -272.741974 -274.416382 -274.731842 -274.996582 -274.661987 -275.066620 -275.517731 -276.180023 -264.799500 -260.009399 -258.191284 -256.258545 -253.799637 -249.704056 -248.135910 -251.185364 -256.387390 -262.733856 -266.038208 -268.558319 -271.113647 -273.262848 -273.160004 -273.160004 -273.160004 -275.783386 -276.847076 -277.442169 -274.315979 -276.040466 -277.468323 -277.956421 -278.123810 -277.876099 -278.324493 -276.778076 -275.282959 -275.359131 -275.527832 -275.747528 -275.658783 -275.214935 -275.103058 -275.361938 -275.679443 -275.725128 -275.617920 -275.559448 -275.515869 -274.983154 -274.023132 -273.061707 -272.701721 -272.378937 -272.396088 -272.361511 -272.121002 -269.743958 -270.946259 -271.461761 -271.902924 -271.938385 -271.863281 -271.728851 -272.423676 -272.328857 -271.852661 -271.462738 -270.531281 -270.391083 -270.218658 -269.991028 -269.766144 -269.433044 -269.094299 -269.118835 -269.290833 -271.382965 -269.774231 -269.934052 -270.094147 -270.339355 -270.690765 -270.787140 -270.638855 -270.437897 -270.225403 -270.286011 -270.460815 -271.251251 -271.857697 -272.196381 -272.573975 -272.875183 -273.076843 -273.148865 -273.153290 -273.151276 -273.079346 -272.962372 -272.850281 -272.665710 -272.463623 -272.282990 -272.146576 -272.040558 -271.966400 -271.867615 -271.737488 -271.629791 -271.589142 -271.603607 -271.518494 -271.346649 -271.156281 -271.867126 -270.971954 -271.886108 -270.702209 -270.627838 -270.403473 -272.796844 -273.185760 -273.437592 -273.411621 -273.180054 -271.949127 -271.121124 -273.887085 -273.884369 -274.073669 -273.046143 -268.846649 -267.447021 -266.081024 -262.998810 -258.428864 -256.790771 -254.743103 -251.862961 -248.482697 -245.968689 -245.964249 -248.300507 -251.218216 -257.772980 -264.981812 -268.133484 -270.124542 -272.009338 -272.524567 -273.056274 -273.160004 -273.160004 -273.160004 -273.160004 -272.515106 -272.538910 -272.617493 -272.660431 -272.678772 -272.691162 -272.819214 -274.043060 -274.012238 -273.141235 -273.111877 -273.084381 -273.092804 -273.090546 -273.129883 -273.110474 -273.001617 -272.898895 -272.699463 -272.475250 -272.018005 -271.310883 -270.301239 -268.845764 -266.873291 -266.021576 -266.085632 -266.233246 -266.405640 -266.786560 -267.358612 -267.838715 -268.463226 -269.070129 -269.575287 -269.388885 -269.661407 -269.916626 -269.793457 -269.503723 -269.204376 -268.936523 -268.860168 -268.709961 -268.580322 -268.581757 -268.614655 -268.552612 -268.504486 -268.717133 -268.613953 -268.580170 -268.685486 -268.836395 -268.991455 -269.163574 -269.340210 -269.578674 -269.826202 -270.054291 -270.239075 -270.416290 -270.549927 -270.700012 -270.833649 -270.931885 -271.016937 -271.059784 -270.968689 -270.800354 -270.516602 -269.985443 -269.436615 -268.954010 -268.625427 -268.393585 -268.106873 -267.878906 -267.749756 -267.721008 -267.772797 -267.921448 -268.158295 -268.402313 -268.550537 -268.658905 -268.706726 -268.731750 -268.755554 -268.829071 -268.936005 -269.066864 -269.228516 -269.376312 -270.570587 -270.000854 -269.390900 -268.719604 -268.009583 -266.747009 -259.411224 -258.847107 -265.733734 -267.014282 -272.066895 -259.051941 -255.327560 -255.465134 -254.452377 -253.838074 -253.049667 -251.834518 -250.848831 -250.031586 -249.872620 -251.120132 -253.707199 -257.204590 -262.515076 -268.323853 -270.448242 -271.562775 -271.861420 -272.159607 -272.390808 -272.598969 -272.809082 -272.916901 -273.006805 -273.024811 -272.965057 -272.955872 -272.901886 -272.647552 -272.739227 -272.765259 -272.779633 -272.670349 -272.505676 -272.329437 -272.013428 -271.722076 -271.546295 -271.287018 -271.000671 -270.696045 -270.350555 -269.909515 -269.317200 -268.497894 -267.461426 -266.589539 -266.018036 -265.668945 -265.411194 -265.199554 -265.182709 -265.301422 -265.490265 -265.724274 -266.055878 -266.427887 -267.129089 -267.936890 -268.668762 -269.182281 -269.876038 -270.670166 -270.844208 -271.045349 -270.980164 -270.827881 -270.675842 -270.533264 -270.401947 -270.281006 -270.165741 -270.056396 -269.974701 -269.913605 -269.764679 -268.978210 -267.489990 -266.824677 -265.655792 -265.206177 -265.117554 -265.093445 -265.384918 -265.941223 -266.335297 -266.520172 -266.523773 -266.430298 -266.311829 -266.166779 -266.028290 -265.859619 -265.732605 -265.598480 -265.397980 -265.188354 -265.060730 -265.082520 -265.246826 -265.392609 -265.504211 -265.649658 -265.869019 -266.121185 -266.347473 -266.557953 -266.777985 -267.141174 -267.922394 -268.623871 -269.416595 -270.117462 -270.460632 -270.604523 -270.626038 -270.586029 -270.554321 -270.605072 -270.897797 -270.806641 -270.679626 -269.667542 -268.787201 -268.285431 -267.963287 -267.677979 -267.422699 -267.336365 -266.865265 -266.589355 -266.347504 -266.137726 -264.177368 -262.639130 -261.067261 -261.915771 -261.225983 -259.934723 -259.434235 -261.237000 -264.277039 -267.839355 -270.397491 -270.681824 -271.313049 -271.869110 -272.258057 -272.537476 -272.772614 -272.915649 -272.630951 -272.452972 -272.327606 -272.134186 -272.021606 -271.924225 -271.841705 -271.746307 -271.617859 -271.437775 -271.271545 -271.083832 -270.872406 -270.692871 -270.521881 -270.338348 -270.143646 -269.937042 -269.705017 -269.433960 -269.108490 -268.706482 -268.249878 -267.821686 -267.410828 -267.058868 -266.787811 -266.613831 -266.514099 -266.468842 -266.467743 -266.827301 -267.515137 -268.799683 -269.790314 -271.181885 -271.801514 -272.356506 -272.584961 -272.656281 -272.695587 -272.717926 -272.721375 -272.710815 -272.681854 -272.642883 -272.598602 -272.548309 -272.481354 -272.397095 -272.280701 -272.042236 -271.515900 -270.179199 -268.577240 -268.098663 -267.992035 -268.031433 -267.947296 -267.936035 -267.907562 -267.928711 -267.874725 -267.860565 -267.999725 -268.316040 -268.576904 -268.648102 -268.702881 -268.727905 -268.912262 -269.044617 -269.256561 -269.321411 -269.381561 -269.420685 -269.525116 -269.754272 -269.957825 -270.261780 -270.537781 -270.841003 -271.272034 -271.702484 -272.089661 -272.397888 -272.519928 -272.582703 -272.595520 -272.539429 -272.450256 -272.349091 -272.245850 -272.144348 -272.050354 -271.957184 -271.839325 -271.720734 -271.662537 -271.610321 -271.512329 -271.369904 -271.299072 -271.211426 -271.063629 -270.876770 -270.705383 -270.365082 -270.354797 -270.425934 -270.507050 -270.636841 -270.814575 -271.046600 -271.274689 -271.479889 -271.647766 -271.832977 -271.966858 -272.099579 -272.244080 -272.356567 -272.449524 -272.522644 -272.576599 -272.709167 -272.771149 -272.748962 -270.299133 -270.284485 -270.265839 -270.243866 -270.216736 -270.182800 -270.144928 -270.103485 -270.056671 -270.004364 -269.948120 -269.885956 -269.817047 -269.728088 -269.631226 -269.549683 -269.471008 -269.382660 -269.308655 -269.233307 -269.111145 -268.972717 -268.886963 -268.812866 -268.751740 -268.696014 -268.654663 -268.611206 -268.575897 -268.547058 -268.517487 -268.492401 -268.476654 -268.463959 -268.436737 -268.471680 -268.477600 -268.498291 -268.510345 -268.539825 -268.574036 -268.598785 -268.644409 -268.695435 -268.743225 -268.799744 -268.858154 -268.899445 -268.937866 -268.976562 -269.036255 -269.084869 -269.151337 -269.184143 -269.214539 -269.228210 -269.258606 -269.246552 -269.243011 -269.233185 -269.220795 -269.156250 -269.172089 -269.167297 -269.157379 -269.179779 -269.208130 -269.231079 -269.248688 -269.291931 -269.372620 -269.427155 -269.498596 -269.551178 -269.611023 -269.677307 -269.729370 -269.754578 -269.798462 -269.841583 -269.882172 -269.897278 -269.920227 -269.939758 -269.961273 -269.986816 -270.036224 -270.109283 -270.154083 -270.298187 -270.337616 -270.443970 -270.510345 -270.540314 -270.619202 -270.677979 -270.613495 -270.584045 -270.471680 -270.441010 -270.392487 -270.276031 -270.250977 -270.208801 -270.130859 -270.083374 -270.041534 -270.046814 -270.027893 -270.041870 -270.011993 -270.018951 -270.033447 -270.072815 -270.109467 -270.124359 -270.133179 -270.185852 -270.210388 -270.235107 -270.241913 -270.250580 -270.257477 -270.277802 -270.307007 -270.311890 -270.308350 -270.308655 diff --git a/src/externals/mct/examples/climate_sequen1/coupler.F90 b/src/externals/mct/examples/climate_sequen1/coupler.F90 deleted file mode 100644 index 7ea8bacf4ba..00000000000 --- a/src/externals/mct/examples/climate_sequen1/coupler.F90 +++ /dev/null @@ -1,214 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: coupler.F90,v 1.6 2006-10-17 21:46:35 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: coupler -- coupler for sequential model example -! -! !DESCRIPTION: -! A coupler subroutine for sequential climate model example. -! -! !INTERFACE: -! -module coupler -! -! !USES: -! -! Get the things needed from MCT by "Use,only" with renaming: -! -!---Domain Decomposition Descriptor DataType and associated methods -use m_GlobalSegMap,only: GlobalSegMap - -!---Field Storage DataType and associated methods -use m_AttrVect,only : AttrVect - -!---Sparse Matrix DataType and associated methods -use m_SparseMatrix, only : SparseMatrix -use m_SparseMatrix, only : SparseMatrix_clean => clean -use m_SparseMatrix, only : SparseMatrix_init => init -use m_SparseMatrix, only : SparseMatrix_importGRowInd => & - importGlobalRowIndices -use m_SparseMatrix, only : SparseMatrix_importGColInd => & - importGlobalColumnIndices -use m_SparseMatrix, only : SparseMatrix_importMatrixElts => & - importMatrixElements -use m_SparseMatrixPlus, only : SparseMatrixPlus -use m_SparseMatrixPlus, only : SparseMatrixPlus_init => init -use m_SparseMatrixPlus, only : SparseMatrixPlus_clean => clean -use m_SparseMatrixPlus, only : Xonly ! Decompose matrix by row -!---Matrix-Vector multiply methods -use m_MatAttrVectMul, only: MCT_MatVecMul => sMatAvMult - -!---MPEU I/O utilities -use m_stdio -use m_ioutil - -implicit none - -private - -! !PUBLIC MEMBER FUNCTIONS: - -public cplinit -public cplrun -public cplfin - -! !PRIVATE DATA MEMBERS -type(SparseMatrixPlus) :: Src2DstMatPlus ! the mapping weights - -character(len=*), parameter :: cplname='coupler.F90' -integer :: rank - -!EOP ___________________________________________________________________ - -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: cplinit - initialize the coupler -! -! !INTERFACE: - -subroutine cplinit(SrcGSMap,DstGSMap,comm,compid) - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: SrcGSMap,DstGSMap ! GSmaps for source and dst - integer,intent(in) :: comm ! local MPI communicator - integer,intent(in) :: compid ! coupler's component ID -! -!EOP ___________________________________________________________________ - -! Local variables - character(len=100),parameter :: & - RemapMatrixFile='../../data/t42_to_popx1_c_mat.asc' - -! Loop indicies - integer :: i,j,k,n - -! MPI variables - integer :: nprocs, root, ierr -! SparseMatrix variables - integer :: mdev - integer :: num_elements, nRows, nColumns - integer, dimension(2) :: src_dims, dst_dims - integer, dimension(:), pointer :: rows, columns - real, dimension(:), pointer :: weights -! SparseMatrix elements on root - type(SparseMatrix) :: sMat -! _____________________________________________________________________ - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! INITIALIZATION PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - ! LOCAL RANK AND SIZE - call MPI_COMM_RANK(comm,rank,ierr) - call MPI_COMM_SIZE(comm,nprocs,ierr) - root = 0 - - if(rank==0) write(6,*) cplname,' init start' - if(rank==0) write(6,*) cplname,' MyID ', compid - if(rank==0) write(6,*) cplname,' Num procs ', nprocs - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Read matrix weights for interpolation from a file. - if (rank == root) then - mdev = luavail() - open(mdev, file=trim(RemapMatrixFile), status="old") - read(mdev,*) num_elements - read(mdev,*) src_dims(1), src_dims(2) - read(mdev,*) dst_dims(1), dst_dims(2) - - allocate(rows(num_elements), columns(num_elements), & - weights(num_elements), stat=ierr) - - do n=1, num_elements - read(mdev,*) rows(n), columns(n), weights(n) - end do - - close(mdev) - - ! Initialize a Sparsematrix - nRows = dst_dims(1) * dst_dims(2) - nColumns = src_dims(1) * src_dims(2) - call SparseMatrix_init(sMat,nRows,nColumns,num_elements) - call SparseMatrix_importGRowInd(sMat, rows, size(rows)) - call SparseMatrix_importGColInd(sMat, columns, size(columns)) - call SparseMatrix_importMatrixElts(sMat, weights, size(weights)) - - deallocate(rows, columns, weights, stat=ierr) - - endif - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Build a SparseMatrixPlus for doing the interpolation - ! Specify matrix decomposition to be by row. - ! following the atmosphere's decomposition. - call SparseMatrixPlus_init(Src2DstMatPlus, sMat, SrcGSMap, DstGSMap, & - Xonly, root, comm, compid) - - ! no longer need the matrix defined on root - if(rank==0) call SparseMatrix_clean(sMat) - if(rank==0) write(6,*) cplname, ' init done' - - -!!! END OF INIT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -end subroutine cplinit - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! RUN PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: cplrun - coupler's run method - -subroutine cplrun(IMPORT,EXPORT) - -! !INPUT PARAMETERS: - type(AttrVect),intent(in) :: IMPORT - type(AttrVect),intent(out) :: EXPORT -!EOP ------------------------------------------------------------------- - - if(rank==0) write(6,*) cplname, ' run start' - - ! Interpolate by doing a parallel sparsematrix-attrvect multiply - ! Note: this will interpolate all fields with the same names - - call MCT_MatVecMul(IMPORT, Src2DstMatPlus, EXPORT) - - ! possibly do more calculations - - if(rank==0) write(6,*) cplname, ' run done' -!!! END OF RUN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -end subroutine cplrun - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! FINALIZE PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: cplfin - coupler's finalize method - -subroutine cplfin - -! -!EOP ------------------------------------------------------------------- - - call SparseMatrixPlus_clean(Src2DstMatPlus) - if(rank==0) write(6,*) cplname, " done" -end subroutine cplfin - -end module coupler - diff --git a/src/externals/mct/examples/climate_sequen1/dst.rc b/src/externals/mct/examples/climate_sequen1/dst.rc deleted file mode 100644 index cbb9449b80d..00000000000 --- a/src/externals/mct/examples/climate_sequen1/dst.rc +++ /dev/null @@ -1,6 +0,0 @@ -# Resource file for dst model -# nx and ny:: global grid size in x and y - - nx: 320 - ny: 384 - decomp: R diff --git a/src/externals/mct/examples/climate_sequen1/dstmodel.F90 b/src/externals/mct/examples/climate_sequen1/dstmodel.F90 deleted file mode 100644 index 3344e7604ca..00000000000 --- a/src/externals/mct/examples/climate_sequen1/dstmodel.F90 +++ /dev/null @@ -1,231 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: dstmodel.F90,v 1.8 2006-10-17 21:47:56 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !MODULE: dstmodel -- generic model for sequential climate model -! -! !DESCRIPTION: -! init run and finalize methods for destination model -! -! !INTERFACE: -! -module dstmodel - -! -! !USES: -! -! Get the things needed from MCT by "Use,only" with renaming: -! -!---Domain Decomposition Descriptor DataType and associated methods -use m_GlobalSegMap,only: GlobalSegMap -use m_GlobalSegMap,only: GlobalSegMap_init => init -use m_GlobalSegMap,only: GlobalSegMap_lsize => lsize -use m_GlobalSegMap,only: GlobalSegMap_clean => clean -!---Field Storage DataType and associated methods -use m_AttrVect,only : AttrVect -use m_AttrVect,only : AttrVect_init => init -use m_AttrVect,only : AttrVect_lsize => lsize -use m_AttrVect,only : AttrVect_clean => clean -use m_AttrVect,only : AttrVect_copy => copy -use m_AttrVect,only : AttrVect_indxR => indexRA -use m_AttrVect,only : AttrVect_importRAttr => importRAttr -use m_AttrVectcomms,only : AttrVect_gather => gather - -! Get things from MPEU -use m_inpak90 ! Resource files -use m_stdio ! I/O utils -use m_ioutil - - -! Get utilities for this program. -use mutils - -implicit none - -private -! except - -! !PUBLIC MEMBER FUNCTIONS: -! -public dstinit -public dstrun -public dstfin - -! module variables -character(len=*), parameter :: modelname='dstmodel.F90' -integer :: rank, lcomm - -!EOP ------------------------------------------------------------------- - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dstinit - Destination model initialization - -subroutine dstinit(GSMap,IMPORT,EXPORT,comm,compid) - -! !INPUT PARAMETERS: - type(GlobalSegMap),intent(inout) :: GSMap ! decomposition - type(AttrVect),intent(inout) :: IMPORT,EXPORT ! state data - integer,intent(in) :: comm ! MPI communicator - integer,intent(in) :: compid ! component ID -! -!EOP ___________________________________________________________________ - -! local variables - -! parameters for this model - integer :: nxa ! number of points in x-direction - integer :: nya ! number of points in y-direction - - integer :: i,j,k,idx - - integer :: nprocs, root, ier - -! GlobalSegMap variables - integer,dimension(:),pointer :: lindex - -! AttrVect variables - integer :: avsize - - character*2, ldecomp - - - call MPI_COMM_RANK(comm,rank, ier) - call MPI_COMM_SIZE(comm,nprocs,ier) - -! save local communicator - lcomm=comm - - if(rank==0) then - write(6,*) modelname, ' init start' - write(6,*) modelname,' MyID ', compid - write(6,*) modelname,' Num procs ', nprocs - endif - -! Get configuration - call i90_LoadF('dst.rc',ier) - - call i90_label('nx:',ier) - nxa=i90_gint(ier) - call i90_label('ny:',ier) - nya=i90_gint(ier) - if(rank==0) write(6,*) modelname, ' x,y ', nxa,nya - - call i90_label('decomp:',ier) - call i90_Gtoken(ldecomp, ier) - if(rank==0) write(6,*) modelname, ' decomp ', ldecomp - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Initialize a Global Segment Map - - - call get_index(ldecomp,nprocs,rank,nxa,nya,lindex) - - call GlobalSegMap_init(GSMap,lindex,comm,compid,gsize=nxa*nya) - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - if(rank==0) write(6,*) modelname, ' GSMap ',GSMap%ngseg,GSMap%gsize - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Initialize import and export Attribute vectors - -! size is the number of grid points on this processor - avsize = GlobalSegMap_lsize(GSMap,comm) - if(rank==0) write(6,*) modelname, ' localsize ', avsize - -! initialize Avs with two real attributes. - call AttrVect_init(IMPORT,rList="field3:field4",lsize=avsize) - call AttrVect_init(EXPORT,rList="field5:field6",lsize=avsize) -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if(rank==0) write(6,*) modelname, ' init done' -end subroutine dstinit -!!! END OF INIT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! RUN PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dstrun - Destination model run method - -subroutine dstrun(IMPORT,EXPORT) - -! !INPUT PARAMETERS: - type(AttrVect),intent(inout) :: IMPORT,EXPORT ! Input and Output states - -!EOP ------------------------------------------------------------------- - -! local variables - integer :: avsize,ier,i,index - - if(rank==0) write(6,*) modelname, ' run start' - -! Copy input data to output data using translation between different names - call AttrVect_copy(IMPORT,EXPORT,rList="field3:field4", & - TrList="field5:field6") - - if(rank==0) write(6,*) modelname, ' run done' - -end subroutine dstrun -!!! END OF RUN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! FINALIZE PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dstfin - Destination model finalize method - -subroutine dstfin(IMPORT,EXPORT,GSMap) - -! !INPUT PARAMETERS: - type(AttrVect),intent(inout) :: IMPORT,EXPORT ! MCT defined type - type(GlobalSegMap),intent(inout) :: GSMap - -!EOP ------------------------------------------------------------------- - type(AttrVect) :: GlobalD - integer :: lsize,ier,mdev,i - - if(rank==0) write(6,*) modelname,' fin start' -! gather data to node 0 and write it out - call AttrVect_gather(EXPORT,GlobalD,GSMap,0,lcomm,ier) - -! write out gathered data - if(rank==0) then - mdev=luavail() - lsize=AttrVect_lsize(GlobalD) - open(mdev, file="TS1out.dat") - do i=1,lsize - write(mdev,*) GlobalD%rAttr(1,i) - enddo - close(mdev) - endif - - ! clean up - call AttrVect_clean(IMPORT) - call AttrVect_clean(EXPORT) - if(rank==0)call AttrVect_clean(GlobalD) - call GlobalSegMap_clean(GSMap) - if(rank==0) write(6,*) modelname,' fin done' -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -endsubroutine dstfin - -end module dstmodel diff --git a/src/externals/mct/examples/climate_sequen1/master.F90 b/src/externals/mct/examples/climate_sequen1/master.F90 deleted file mode 100644 index 0f9a4786782..00000000000 --- a/src/externals/mct/examples/climate_sequen1/master.F90 +++ /dev/null @@ -1,103 +0,0 @@ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: master.F90,v 1.5 2009-02-23 23:22:47 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !PROGRAM: master -- driver for sequential coupled model example -! -! !DESCRIPTION: Provide a simple example of using MCT to connect to -! components executing sequentially in a single executable. -! -program master - -! -! !USES: -! - - use m_AttrVect,only : AttrVect - use m_GlobalSegMap,only: GlobalSegMap - use m_MCTWorld,only: MCTWorld_init => init - - use srcmodel - use dstmodel - use coupler - - implicit none - - include "mpif.h" - -! -!EOP ------------------------------------------------------------------- - -! local variables - - character(len=*), parameter :: mastername='master.F90' - - integer :: ncomps = 3 ! Must know total number of - ! components in coupled system - - integer,dimension(:),pointer :: comps ! array with component ids - - - type(AttrVect) :: srcImp,srcExp ! import and export states for src and - type(AttrVect) :: dstImp,dstExp ! destination models - - type(GlobalSegMap) :: srcGSMap ! decomposition descriptors for src and - type(GlobalSegMap) :: dstGSMap ! desitnation models - -! other variables - integer :: comm1, comm2, rank, nprocs,compid, myID, ier,color - integer :: anprocs,cnprocs - -!----------------------------------------------------------------------- -! The Main program. -! We are implementing a single-executable, sequential-execution system. -! -! This main program initializes MCT and runs the whole model. - -! Initialize MPI - call MPI_INIT(ier) - -! Get basic MPI information - call MPI_COMM_SIZE(MPI_COMM_WORLD,nprocs,ier) - call MPI_COMM_RANK(MPI_COMM_WORLD,rank,ier) - -! Get communicators for each model - call mpi_comm_dup(MPI_COMM_WORLD,comm1,ier) - call mpi_comm_dup(MPI_COMM_WORLD,comm2,ier) - -! Initialize MCT - allocate(comps(ncomps),stat=ier) - comps(1)=1 - comps(2)=2 - comps(3)=3 - call MCTWorld_init(ncomps,MPI_COMM_WORLD,comm1,myids=comps) - - -! Initialize the model - call srcinit(srcGSMap,srcImp,srcExp,comm1,1) - call dstinit(dstGSMap,dstImp,dstExp,comm2,2) - call cplinit(srcGSMap,dstGSMap,comm1,3) - -! Run the model - -! source does something with srcImp and produces export - call srcrun(srcImp,srcExp) - -! map the source model's Export to the destination model's Import - call cplrun(srcExp,dstImp) - -! destination model does something with dstImp - call dstrun(dstImp,dstExp) - -! Finalize - call srcfin(srcImp,srcExp,srcGSMap) - call dstfin(dstImp,dstExp,dstGSMap) - call cplfin - - call MPI_FINALIZE(ier) - -end program master diff --git a/src/externals/mct/examples/climate_sequen1/mutils.F90 b/src/externals/mct/examples/climate_sequen1/mutils.F90 deleted file mode 100644 index 0a1829f0a59..00000000000 --- a/src/externals/mct/examples/climate_sequen1/mutils.F90 +++ /dev/null @@ -1,139 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: mutils.F90,v 1.8 2005-11-18 23:15:38 rloy Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !MODULE: mutils -- utilities for the sequential climate example -! -! !DESCRIPTION: -! -! !INTERFACE: -! -module mutils - -! module of utilties for the sequential climate example -! - - implicit none - - private -! except - -! !PUBLIC TYPES: - - public get_index - - contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: get_index - get local index array and size -! for 3 standard decompositions of a grid. -! -! !DESCRIPTION: -! The routine get_index will return a local index array and size that can -! be passed to a GSMap_init routine for three possible decompositions: -! R - by row or latitude -! C - by column or longitude -! RC - row and column or checkerboard -! choice is determined by the value of ldecomp. -! -! !INTERFACE: - -subroutine get_index(ldecomp,nprocs,myproc,gnx,gny,gridbuf) -! !INPUT PARAMETERS: -! - character(len=*),intent(inout) :: ldecomp ! decomp choice - integer,intent(in) :: nprocs ! total number of MPI processes - integer,intent(in) :: myproc ! my rank in local communicator - integer,intent(in) :: gnx ! total points in X direction - integer,intent(in) :: gny ! total points in Y direction - -! !OUTPUT PARAMETERS: -! - integer,dimension(:),pointer :: gridbuf ! local index array -! -!EOP ___________________________________________________________________ - - integer :: npesx,npesy,ng,ny,n,i,j,nx,ig,jg,nseg,factor - - -! default decomp is R - if((trim(ldecomp) .ne. 'R') .and. (ldecomp .ne. 'C') .and. (ldecomp .ne. 'RC')) then - ldecomp = 'R' - endif - -! A 'by-row' or 'by-latitude' decomposition - if(trim(ldecomp) .eq. 'R') then - npesx=1 - npesy=nprocs - nx=gnx - ny=gny/npesy - allocate(gridbuf(nx*ny)) - n=0 - do j=1,ny - do i=1,nx - n=n+1 - ig=i - jg = j + myProc*ny - ng =(jg-1)*gnx + ig - gridbuf(n)=ng - enddo - enddo - -! A 'by-column' or 'by-longitude' decomposition - else if (ldecomp .eq. 'C') then - npesx=nprocs - npesy=1 - nx=gnx/npesx - ny=gny - allocate(gridbuf(nx*ny)) - n=0 - do j=1,ny - do i=1,nx - n=n+1 - ig=i + myProc*nx - jg= j - ng=(jg-1)*gnx + ig - gridbuf(n)=ng - enddo - enddo - -! A 'row-columen' or 'checkerboard' decomposition - else if (ldecomp .eq. 'RC') then - ! find the closest square - factor=1 - do i=2,INT(sqrt(FLOAT(nprocs))) - if ( (nprocs/i) * i .eq. nprocs) then - factor = i - endif - enddo - npesx=factor - npesy=nprocs/factor - nx=gnx/npesx - ny=gny/npesy -! write(6,*) 'RC',factor,npesy,nx,ny - allocate(gridbuf(nx*ny)) - n=0 - do j=1,ny - do i=1,nx - n=n+1 - ig=mod(myProc,npesx)*nx+i - jg=(myProc/npesx)*ny+j - ng=(jg-1)*gnx + ig - gridbuf(n)=ng - enddo - enddo - - - endif - -end subroutine get_index - - - - -end module mutils diff --git a/src/externals/mct/examples/climate_sequen1/src.rc b/src/externals/mct/examples/climate_sequen1/src.rc deleted file mode 100644 index 1dd5275e538..00000000000 --- a/src/externals/mct/examples/climate_sequen1/src.rc +++ /dev/null @@ -1,6 +0,0 @@ -# Resource file for src model -# nx and ny:: global grid size in x and y - - nx: 128 - ny: 64 - decomp: R diff --git a/src/externals/mct/examples/climate_sequen1/srcmodel.F90 b/src/externals/mct/examples/climate_sequen1/srcmodel.F90 deleted file mode 100644 index b0c8be56db4..00000000000 --- a/src/externals/mct/examples/climate_sequen1/srcmodel.F90 +++ /dev/null @@ -1,248 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: srcmodel.F90,v 1.8 2005-11-18 23:15:38 rloy Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !MODULE: srcmodel -- generic model for unit tester -! -! !DESCRIPTION: -! init run and finalize methods for source model -! -module srcmodel - -! -! !USES: -! -! Get the things needed from MCT by "Use,only" with renaming: -! -!---Domain Decomposition Descriptor DataType and associated methods -use m_GlobalSegMap,only: GlobalSegMap -use m_GlobalSegMap,only: GlobalSegMap_init => init -use m_GlobalSegMap,only: GlobalSegMap_lsize => lsize -use m_GlobalSegMap,only: GlobalSegMap_clean => clean -!---Field Storage DataType and associated methods -use m_AttrVect,only : AttrVect -use m_AttrVect,only : AttrVect_init => init -use m_AttrVect,only : AttrVect_lsize => lsize -use m_AttrVect,only : AttrVect_clean => clean -use m_AttrVect,only : AttrVect_copy => copy -use m_AttrVect,only : AttrVect_zero => zero -use m_AttrVect,only : AttrVect_indxR => indexRA -use m_AttrVect,only : AttrVect_importRAttr => importRAttr -use m_AttrVectComms,only : AttrVect_scatter => scatter - -! Get things from MPEU -use m_inpak90 ! Resource files -use m_stdio ! I/O utils -use m_ioutil - -! Get utilities for this program. -use mutils - -implicit none - -private -! except - -! !PUBLIC MEMBER FUNCTIONS: - -public srcinit -public srcrun -public srcfin - -! private module variables -character(len=*), parameter :: modelname='srcmodel.F90' -integer :: rank -real, dimension(:), pointer :: avdata - -!EOP ------------------------------------------------------------------- - -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: srcinit - Source model initialization - -subroutine srcinit(GSMap,IMPORT,EXPORT,comm,compid) - -! !INPUT PARAMETERS: - type(GlobalSegMap),intent(inout) :: GSMap ! decomposition - type(AttrVect),intent(inout) :: IMPORT,EXPORT ! state data - integer,intent(in) :: comm ! MPI communicator - integer,intent(in) :: compid ! component ID -! -!EOP ___________________________________________________________________ - -! local variables - -! parameters for this model - integer :: nxa ! number of points in x-direction - integer :: nya ! number of points in y-direction - - integer :: i,j,k,mdev,fx,fy - integer :: nprocs, root, ier,fileno - -! GlobalSegMap variables - integer,dimension(:),pointer :: lindex - -! AttrVect variables - integer :: avsize - type(AttrVect) :: GlobalD ! Av to hold global data - - real,dimension(:),pointer :: rootdata - - character*2 :: ldecomp - - - call MPI_COMM_RANK(comm,rank, ier) - call MPI_COMM_SIZE(comm,nprocs,ier) - - if(rank==0) then - write(6,*) modelname, ' init start' - write(6,*) modelname,' MyID ', compid - write(6,*) modelname,' Num procs ', nprocs - endif - -! Get configuration - call i90_LoadF('src.rc',ier) - - call i90_label('nx:',ier) - nxa=i90_gint(ier) - call i90_label('ny:',ier) - nya=i90_gint(ier) - if(rank==0) write(6,*) modelname, ' x,y ', nxa,nya - - call i90_label('decomp:',ier) - call i90_Gtoken(ldecomp, ier) - if(rank==0) write(6,*) modelname, ' decomp ', ldecomp - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Initialize a Global Segment Map - - - call get_index(ldecomp,nprocs,rank,nxa,nya,lindex) - - call GlobalSegMap_init(GSMap,lindex,comm,compid,gsize=nxa*nya) - - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - if(rank==0) write(6,*) modelname, ' GSMap ',GSMap%ngseg,GSMap%gsize - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Initialize import and export Attribute vectors - -! size is the number of grid points on this processor - avsize = GlobalSegMap_lsize(GSMap,comm) - if(rank==0) write(6,*) modelname, ' localsize ', avsize - -! Initialize the IMPORT Av by scattering from a root Av -! with real data. - -! Read in data from root and scatter to nodes - if(rank==0) then - call AttrVect_init(GlobalD,rList="field1:field2",lsize=nxa*nya) - mdev=luavail() - open(mdev, file="TS1.dat",status="old") - read(mdev,*) fx,fy - do i=1,nxa*nya - read(mdev,*) GlobalD%rAttr(1,i) - enddo - write(6,*) modelname,'Global init ',GlobalD%rAttr(1,1),GlobalD%rAttr(1,8000) - endif - -! this scatter will create IMPORT if it hasn't already been initialized - call AttrVect_scatter(GlobalD,IMPORT,GSMap,0,comm,ier) - -! initialize EXPORT Av with two real attributes. - call AttrVect_init(EXPORT,rList="field3:field4",lsize=avsize) - - call AttrVect_zero(EXPORT) - - if(rank==0) then - write(6,*) modelname, rank,' IMPORT field1', IMPORT%rAttr(1,1) - write(6,*) modelname, rank,' IMPORt field2', IMPORT%rAttr(2,1) - write(6,*) modelname, rank,' EXPORT field3', EXPORT%rAttr(1,1) - write(6,*) modelname, rank,' EXPORT field4', EXPORT%rAttr(2,1) - endif - -! allocate buffer for use in run method - allocate(avdata(avsize),stat=ier) - - if(rank==0) write(6,*) modelname, ' init done' -end subroutine srcinit -!!! END OF INIT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! RUN PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: srcrun - Source model run method - -subroutine srcrun(IMPORT,EXPORT) - -! !INPUT PARAMETERS: - type(AttrVect),intent(inout) :: IMPORT,EXPORT ! Input and Output states - -!EOP ------------------------------------------------------------------- -! local variables - integer :: avsize,ier,i - -! Nothing to do with IMPORT - - -! Fill EXPORT with data - if(rank==0) write(6,*) modelname, ' run start' - -! Use Av copy to copy input data from field1 in Imp to field3 in EXPORT - call AttrVect_copy(IMPORT,EXPORT,rList='field1',TrList='field3') - -! Use import to load data in second field - avdata=30.0 - call AttrVect_importRAttr(EXPORT,"field4",avdata) - - if(rank==0) write(6,*) modelname, ' In field1', IMPORT%rAttr(1,1) - if(rank==0) write(6,*) modelname, ' In field2', IMPORT%rAttr(2,1) - if(rank==0) write(6,*) modelname, ' Out field3', EXPORT%rAttr(1,1) - if(rank==0) write(6,*) modelname, ' Out field4', EXPORT%rAttr(2,1) - - if(rank==0) write(6,*) modelname, ' run done' - -end subroutine srcrun -!!! END OF RUN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! FINALIZE PHASE -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: srcfin - Source model finalize method - -subroutine srcfin(IMPORT,EXPORT,GSMap) - -! !INPUT PARAMETERS: - type(AttrVect),intent(inout) :: IMPORT,EXPORT ! imp,exp states - type(GlobalSegMap),intent(inout) :: GSMap -!EOP ------------------------------------------------------------------- - ! clean up - call AttrVect_clean(IMPORT) - call AttrVect_clean(EXPORT) - call GlobalSegMap_clean(GSMap) - deallocate(avdata) - if(rank==0) write(6,*) modelname,' fin done' -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -endsubroutine srcfin - -end module srcmodel diff --git a/src/externals/mct/examples/simple/.gitignore b/src/externals/mct/examples/simple/.gitignore deleted file mode 100644 index 40296985e55..00000000000 --- a/src/externals/mct/examples/simple/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -twocon -twoseq -twosequn -twoseqNB diff --git a/src/externals/mct/examples/simple/Makefile b/src/externals/mct/examples/simple/Makefile deleted file mode 100644 index 773fb149744..00000000000 --- a/src/externals/mct/examples/simple/Makefile +++ /dev/null @@ -1,53 +0,0 @@ - -SHELL = /bin/sh - -# SOURCE FILES - -SRCS_F90 = twocmp.con.F90 \ - twocmp.seq.F90 \ - twocmp.seqUnvn.F90 \ - twocmp.seqNB.F90 \ - -OBJS_ALL = $(SRCS_F90:.F90=.o) - -# MACHINE AND COMPILER FLAGS - -include ../../Makefile.conf - -# ADDITIONAL DEFINITIONS SPECIFIC FOR UTMCT COMPILATION - -MCTLIBS = -L$(MPEUPATH) -L$(MCTPATH) -lmct -lmpeu -UTLDFLAGS = $(REAL8) -UTCMPFLAGS = $(REAL8) $(INCFLAG)$(MPEUPATH) $(INCFLAG)$(MCTPATH) - -# TARGETS - -all: twocon twoseq twosequn twoseqNB - -twocon: twocmp.con.o - $(FC) -o $@ twocmp.con.o $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -twoseq: twocmp.seq.o - $(FC) -o $@ twocmp.seq.o $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -twosequn: twocmp.seqUnvn.o - $(FC) -o $@ twocmp.seqUnvn.o $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -twoseqNB: twocmp.seqNB.o - $(FC) -o $@ twocmp.seqNB.o $(FCFLAGS) $(MCTLIBS) $(MPILIBS) - -# RULES - -.SUFFIXES: -.SUFFIXES: .F90 .o - -.F90.o: - $(FC) -c $(INCPATH) $(FPPDEFS) $(FCFLAGS) $(MCTFLAGS) $(UTCMPFLAGS) $< - - -clean: - ${RM} *.o *.mod twocon twoseq twosequn twoseqNB - -# DEPENDENCIES: - -$(OBJS_ALL): $(MCTPATH)/libmct.a diff --git a/src/externals/mct/examples/simple/README b/src/externals/mct/examples/simple/README deleted file mode 100644 index 037bde5bcf4..00000000000 --- a/src/externals/mct/examples/simple/README +++ /dev/null @@ -1,51 +0,0 @@ - - -The programs in this directory demonstrate how to use basic -functions of MCT in several possible coupled configurations of -two components. - -Each example is contained in one .F90 file. - -To compile: -First make sure you have compiled MCT. See instructions in -MCT/README - -Type "make" here or "make examples" in the top-level directory. - -To run: Consult your local documentation for how to run a parallel -program. The examples below assume mpirun is available and you -can run interactively. "script.babyblue" is an example of run script -for IBM systems which use a queue manager. - ----------------------------------------------------------------------- -twocomponent.concurrent.F90 - two components running concurrently on - separate pools of processors. - - requires: at least 3 MPI processes - to run: mpirun -np 3 twocon - note: will not work with mpi-serial - ------------------------------------------- -twocomponent.sequential.F90 - two components running sequentially on - the same processors. Uses arguments to pass data between models. - Shows use of Rearranger. - - requires: at least 1 MPI process - to run: mpirun -np 1 twoseq - ------------------------------------------- -twocomponent.seqNB.F90 - two components running sequentially on - the same processors. Uses non-blocking MCT calls to pass data between - models - - requires: at least 1 MPI process - to run: mpirun -np 1 twoseqNB - ------------------------------------------- -twocomponentUneven.sequential.F90 - two components running sequentially but - one model is only running on some of the shared processors. - - requires: no more than 12 processors - to run: mpirun -np 2 twosequn - ------------------------------------------- diff --git a/src/externals/mct/examples/simple/script.babyblue b/src/externals/mct/examples/simple/script.babyblue deleted file mode 100644 index a30fea12731..00000000000 --- a/src/externals/mct/examples/simple/script.babyblue +++ /dev/null @@ -1,29 +0,0 @@ -#! /usr/bin/csh -f -#################################################### -# -# Example run script for LoadLeveler, the queue -# system used on most IBM's. -# -# Your site may require different options. -# -#################################################### -# @ output = utmct.stdout.$(jobid).$(stepid) -# @ error = utmct.stderr.$(jobid).$(stepid) -# @ job_name = mctsimple -# @ job_type = parallel -# @ node = 4,4 -# @ tasks_per_node = 4 -# @ checkpoint = no -# @ node_usage = not_shared -# @ network.MPI = csss,not_shared,us -# @ class = share -# @ notification = never -# @ queue - -setenv MP_STDOUTMODE ordered -setenv MP_INFOLEVEL 2 - -echo "`date` -- UTMCT EXECUTION BEGINS HERE" -poe twocon -echo "`date` -- UTMCT EXECUTION finishes HERE" - diff --git a/src/externals/mct/examples/simple/twocmp.con.F90 b/src/externals/mct/examples/simple/twocmp.con.F90 deleted file mode 100644 index 8bbd1916b3d..00000000000 --- a/src/externals/mct/examples/simple/twocmp.con.F90 +++ /dev/null @@ -1,222 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: twocmp.con.F90,v 1.4 2006-07-25 22:31:34 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: twocomponent.concurrent -! -! !DESCRIPTION: Provide a simple example of using MCT to connect two -! components executing concurrently in a single executable. -! -! -! !INTERFACE: -! - program twocon -! -! !USES: -! -!--- Use only the things needed from MCT - use m_MCTWorld,only: MCTWorld_init => init - - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: MCT_GSMap_init => init - use m_GlobalSegMap,only: MCT_GSMap_lsize => lsize - - use m_AttrVect,only : AttrVect - use m_AttrVect,only : MCT_AtrVt_init => init - use m_AttrVect,only : MCT_AtrVt_zero => zero - use m_AttrVect,only : MCT_AtrVt_lsize => lsize - use m_AttrVect,only : MCT_AtrVt_indexRA => indexRA - use m_AttrVect,only : MCT_AtrVt_importRA => importRAttr - - use m_Router,only: Router - use m_Router,only: MCT_Router_init => init - - use m_Transfer,only : MCT_Send => send - use m_Transfer,only : MCT_Recv => recv - - implicit none - - include 'mpif.h' -!----------------------------------------------------------------------- - ! Local variables - - integer,parameter :: npoints = 24 ! number of grid points - - integer ier,nprocs - integer color,myrank,mycomm -!----------------------------------------------------------------------- -! The Main program. -! We are implementing a single-executable, concurrent-execution system. -! This small main program carves up MPI_COMM_WORLD and then starts -! each component on its own processor set. - - call MPI_init(ier) - - call mpi_comm_size(MPI_COMM_WORLD, nprocs,ier) - call mpi_comm_rank(MPI_COMM_WORLD, myrank,ier) - - if((nprocs .gt. 14).or.(nprocs .lt. 3)) then - write(6,*)"The small problem size in this example & - &requires between 3 and 14 processors." - write(6,*)"nprocs =",nprocs - stop - endif - - -! Force the model1 to run on the first 2 processors - color =1 - if (myrank .lt. 2) then - color = 0 - endif - -! Split MPI_COMM_WORLD into a communicator for each model - call mpi_comm_split(MPI_COMM_WORLD,color,0,mycomm,ier) - -! Start up the the models, pass in the communicators - if(color .eq. 0) then - call model1(mycomm) - else - call model2(mycomm) - endif - -! Models are finished. - call mpi_finalize(ier) - - contains - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model1(comm1) ! the first model - - implicit none - - integer :: comm1,mysize,ier,asize,myproc - integer :: fieldindx,avsize,i - integer,dimension(1) :: start,length - real,pointer :: testarray(:) - - type(GlobalSegMap) :: GSmap - type(AttrVect) :: av1 - type(Router) :: Rout -!--------------------------- - -! find local rank and size - call mpi_comm_size(comm1,mysize,ier) - call mpi_comm_rank(comm1,myproc,ier) - write(6,*)"model1 size",mysize - -! initialize ThisMCTWorld - call MCTWorld_init(2,MPI_COMM_WORLD,comm1,1) - -! set up a grid and decomposition - asize = npoints/mysize - - start(1)= (myproc*asize) +1 - length(1)=asize - -! describe decomposition with MCT GSmap type - call MCT_GSMap_init(GSMap,start,length,0,comm1,1) - - write(6,*)"model 1 GSMap ngseg",myproc,GSMap%ngseg,start(1) - -! Initialize an Attribute Vector - call MCT_AtrVt_init(av1,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap,comm1)) - - avsize = MCT_AtrVt_lsize(av1) - write(6,*)"model 1 av size", avsize - -! Fill Av with some data -! fill first attribute the direct way - fieldindx = MCT_AtrVt_indexRA(av1,"field1") - do i=1,avsize - av1%rAttr(fieldindx,i) = float(i) - enddo - -! fill second attribute using Av import function - allocate(testarray(avsize)) - do i=1,avsize - testarray(i)= cos((float(i)/npoints) * 3.14) - enddo - call MCT_AtrVt_importRA(av1,"field2",testarray) - -! initialize a Router - call MCT_Router_init(2,GSMap,comm1,Rout) - -! print out Av data - do i=1,asize - write(6,*) "model 1 data", myproc,i,av1%rAttr(1,i),av1%rAttr(2,i) - enddo - -! send the data - call MCT_Send(av1,Rout) - - - - end subroutine model1 - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model2(comm2) - - implicit none - - integer :: comm2,mysize,ier,asize,myproc - integer :: i - integer,dimension(1) :: start,length - type(GlobalSegMap) :: GSmap - type(AttrVect) :: av1 - type(Router) :: Rout -!--------------------------- - -! find local rank and size - call mpi_comm_size(comm2,mysize,ier) - call mpi_comm_rank(comm2,myproc,ier) - write(6,*)"model2 size",mysize - -! initialize ThisMCTWorld - call MCTWorld_init(2,MPI_COMM_WORLD,comm2,2) - -! set up a grid and decomposition - asize = npoints/mysize - - start(1)= (myproc*asize) +1 - length(1)=asize - -! describe decomposition with MCT GSmap type - call MCT_GSMap_init(GSMap,start,length,0,comm2,2) - - write(6,*)"model 2 GSMap ngseg",myproc,GSMap%ngseg,start(1) - -! Initialize an Attribute Vector - call MCT_AtrVt_init(av1,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap,comm2)) - - write(6,*)"model 2 av size", MCT_AtrVt_lsize(av1) - -! initialize Av to be zero everywhere - call MCT_AtrVt_zero(av1) - -! initialize a Router - call MCT_Router_init(1,GSMap,comm2,Rout) - -! print out Av data before Recv - do i=1,asize - write(6,*) "model 2 data", myproc,i,av1%rAttr(1,i),av1%rAttr(2,i) - enddo - -! Recv the data - call MCT_Recv(av1,Rout) - -! print out Av data after Recv. - do i=1,asize - write(6,*) "model 2 data after", myproc,i,av1%rAttr(1,i),av1%rAttr(2,i) - enddo - - - end subroutine model2 - - end diff --git a/src/externals/mct/examples/simple/twocmp.seq.F90 b/src/externals/mct/examples/simple/twocmp.seq.F90 deleted file mode 100644 index d828d38f496..00000000000 --- a/src/externals/mct/examples/simple/twocmp.seq.F90 +++ /dev/null @@ -1,204 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: twocmp.seq.F90,v 1.6 2006-07-25 17:09:42 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: twocomponent.sequential -! -! -! !DESCRIPTION: Provide a simple example of using MCT to connect -! two components executing in sequence in a single executable. -! -! Data is passed between models by using input/output arguments -! in the run method. Compare with twocmp.seqNB.F90 -! -! !INTERFACE: -! - program twoseq -! -! !USES: -! -!--- Get only the things needed from MCT - use m_MCTWorld,only: MCTWorld_init => init - - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: MCT_GSMap_init => init - use m_GlobalSegMap,only: MCT_GSMap_lsize => lsize - - use m_AttrVect,only : AttrVect - use m_AttrVect,only : MCT_AtrVt_init => init - use m_AttrVect,only : MCT_AtrVt_zero => zero - use m_AttrVect,only : MCT_AtrVt_lsize => lsize - use m_AttrVect,only : MCT_AtrVt_indexRA => indexRA - use m_AttrVect,only : MCT_AtrVt_importRA => importRAttr - - use m_Rearranger,only: Rearranger - use m_Rearranger,only: MCT_Rearranger_init => init - use m_Rearranger,only: MCT_Rearrange => Rearrange - - implicit none - - include 'mpif.h' - - integer,parameter :: ngx = 6 ! points in x-direction - integer,parameter :: ngy = 4 ! points in y-direction - integer ier,nprocs - integer,dimension(:),pointer :: myids - integer :: comm1,comm2,asize,mysize,i,myproc - integer,dimension(1) :: start1,length1 - integer,dimension(:),pointer :: start2,length2 -!----------------------------------------------------------------------- -! The Main program. -! We are implementing a single-executable, sequential-execution system. -! In this example, communication occurs through main using -! arguments. Both components share the same processors. - - type(GlobalSegMap) :: GSmap1,GSmap2 - type(AttrVect) :: av1,av2 - type(Rearranger) :: Rearr -!----------------------------------------------------------------------- - - call MPI_init(ier) - - call mpi_comm_size(MPI_COMM_WORLD, mysize,ier) - if(mysize .gt. 4) then - write(6,*)"The small problem size in this example & - &requires ", ngy,"or fewer processors." - stop - endif - call mpi_comm_rank(MPI_COMM_WORLD, myproc,ier) - - call mpi_comm_dup(MPI_COMM_WORLD,comm1,ier) - call mpi_comm_dup(MPI_COMM_WORLD,comm2,ier) - - allocate(myids(2)) - myids(1)=1 - myids(2)=2 - - call MCTWorld_init(2,MPI_COMM_WORLD,comm1,myids=myids) - -! set up a grid and decomposition -! first gsmap is the grid decomposed by rows -! theres 1 segment per processor - length1(1)= ngx * (ngy/mysize) - start1(1)= myproc * length1(1) + 1 - - write(6,*)'gsmap1', myproc,length1(1),start1(1) - call MCT_GSMap_init(GSMap1,start1,length1,0,comm1,1) - -! second gsmap is the grid decomposed by columns - allocate(length2(ngy),start2(ngy)) - - do i=1,ngy - length2(i)=ngx/mysize - start2(i)= (i-1)*ngx + 1 + myproc*length2(i) - write(6,*) 'gsmap2',myproc,i,length2(i),start2(i) - enddo - - - call MCT_GSMap_init(GSMap2,start2,length2,0,comm2,2) - - call MCT_AtrVt_init(av1,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap1,comm1)) - - call MCT_AtrVt_init(av2,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap2,comm2)) - - -! create a rearranger - call MCT_Rearranger_init(GSMap1,GSMap2,MPI_COMM_WORLD,Rearr) - -!-------------end of initialization steps - - -! Start up model1 which fills av1 with data. - call model1(comm1,av1) - -! print out Av data - do i=1,MCT_AtrVt_lsize(av1) - write(6,*) "model 1 data", myproc,i,av1%rAttr(1,i),av1%rAttr(2,i) - enddo - -! rearrange data from model1 so that model2 can use it. - call MCT_Rearrange(av1,av2,Rearr) - -! pass data to model2 (which will print it out) - call model2(comm2,av2) - - -! all done - call mpi_finalize(ier) - - contains - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model1(comm1,mod1av) ! the first model - - implicit none - - integer :: comm1,mysize,ier,asize,myproc - integer :: fieldindx,avsize,i - integer,dimension(1) :: start,length - real,pointer :: testarray(:) - - type(GlobalSegMap) :: GSmap - type(AttrVect) :: mod1av -!--------------------------- - -! find local rank and size - call mpi_comm_size(comm1,mysize,ier) - call mpi_comm_rank(comm1,myproc,ier) - write(6,*)"model1 size",mysize - - - avsize = MCT_AtrVt_lsize(mod1av) - write(6,*)"model 1 av size", avsize - -! Fill Av with some data -! fill first attribute the direct way - fieldindx = MCT_AtrVt_indexRA(mod1av,"field1") - do i=1,avsize - mod1av%rAttr(fieldindx,i) = float(i+ 20*myproc) - enddo - -! fill second attribute using Av import function - allocate(testarray(avsize)) - do i=1,avsize - testarray(i)= cos((float(i+ 20*myproc)/24.) * 3.14) - enddo - call MCT_AtrVt_importRA(mod1av,"field2",testarray) - - - end subroutine model1 - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model2(comm2,mod2av) - - implicit none - - integer :: comm2,mysize,ier,asize,myproc - integer :: i - type(AttrVect) :: mod2av -!--------------------------- - -! find local rank and size - call mpi_comm_size(comm2,mysize,ier) - call mpi_comm_rank(comm2,myproc,ier) - write(6,*)"model2 size",mysize - - asize = MCT_AtrVt_lsize(mod2av) - write(6,*)"model 2 av size", asize - -! print out Av data - do i=1,asize - write(6,*) "model 2 data after", myproc,i,mod2av%rAttr(1,i),mod2av%rAttr(2,i) - enddo - - - end subroutine model2 - - end diff --git a/src/externals/mct/examples/simple/twocmp.seqNB.F90 b/src/externals/mct/examples/simple/twocmp.seqNB.F90 deleted file mode 100644 index 82c93610e50..00000000000 --- a/src/externals/mct/examples/simple/twocmp.seqNB.F90 +++ /dev/null @@ -1,283 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: twocmp.seqNB.F90,v 1.4 2004-06-24 21:07:01 eong Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: twocmp.seqNB -! -! !DESCRIPTION: Provide a simple example of using MCT to connect to -! components executing sequentially in a single executable using -! the non-blocking communications to transfer data. -! -! -! !INTERFACE: -! - program twocmpseqNB -! -! !USES: -! -!--- Use only the things needed from MCT - use m_MCTWorld,only: MCTWorld_init => init - - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: MCT_GSMap_init => init - use m_GlobalSegMap,only: MCT_GSMap_lsize => lsize - use m_GlobalSegMapComms,only: MCT_GSMap_recv => recv - use m_GlobalSegMapComms,only: MCT_GSMap_isend => isend - use m_GlobalSegMapComms,only: MCT_GSMap_bcast => bcast - - use m_AttrVect,only : AttrVect - use m_AttrVect,only : MCT_AtrVt_init => init - use m_AttrVect,only : MCT_AtrVt_zero => zero - use m_AttrVect,only : MCT_AtrVt_lsize => lsize - use m_AttrVect,only : MCT_AtrVt_indexRA => indexRA - use m_AttrVect,only : MCT_AtrVt_importRA => importRAttr - - use m_Router,only: Router - use m_Router,only: MCT_Router_init => init - - use m_Transfer,only : MCT_ISend => isend - use m_Transfer,only : MCT_Recv => recv - - implicit none - - include 'mpif.h' - - integer,parameter :: npoints = 24 ! total number of grid points - integer ier,nprocs,i - integer color,myrank,comm1,comm2 - integer,dimension(:),pointer :: myids - integer,dimension(:),pointer :: req1,req2 -!----------------------------------------------------------------------- -! The Main program. -! We are implementing a single-executable, seqeuntial-execution system. -! This small main program sets up MCTWorld, calls each "init" method -! and then calls each component in turn. - - type(GlobalSegMap) :: GSMap1,GSMap2 - type(AttrVect) :: Av1,Av2 - - call MPI_init(ier) - - call mpi_comm_size(MPI_COMM_WORLD, nprocs,ier) - call mpi_comm_rank(MPI_COMM_WORLD, myrank,ier) - -! Duplicate MPI_COMM_WORLD into a communicator for each model - call mpi_comm_dup(MPI_COMM_WORLD,comm1,ier) - call mpi_comm_dup(MPI_COMM_WORLD,comm2,ier) - - allocate(myids(2)) - myids(1)=1 - myids(2)=2 - -! Initialize MCT world - call MCTWorld_init(2,MPI_COMM_WORLD,comm1,myids=myids) - -! Initialize the models, pass in the communicators - call model1init(comm1,req1,GSMap1,Av1) - call model2init(comm2,req2,GSMap2,Av2) - -!-----------------end of initialization phase ------ -! Run the models, pass in the communicators - do i=1,5 - write(6,*) " " - write(6,*) "Step ",i - call model1(comm1,GSMap1,Av1) - call model2(comm2,GSMap2,Av2) - enddo - -! Models are finished. - call mpi_finalize(ier) - - contains - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model1init(comm1,req1,GSmap,av1) ! init the first model - - implicit none - - integer :: comm1,mysize,ier,asize,myproc - integer :: fieldindx,avsize,i - integer,dimension(1) :: start,length - real,pointer :: testarray(:) - integer,pointer :: req1(:) - - type(GlobalSegMap) :: GSmap - type(AttrVect) :: av1 -!--------------------------- - -! find local rank and size - call mpi_comm_size(comm1,mysize,ier) - call mpi_comm_rank(comm1,myproc,ier) - write(6,*)myproc,"model1 size",mysize - -! set up a grid and decomposition - asize = npoints/mysize - - start(1)= (myproc*asize) +1 - length(1)=asize - -! describe decomposition with MCT GSmap type - call MCT_GSMap_init(GSMap,start,length,0,comm1,1) - - write(6,*)myproc,"model 1 GSMap ngseg",GSMap%ngseg,start(1) - - if(myproc .eq. 0) call MCT_GSMap_Isend(GSMap,2,100,req1) - -! Initialize an Attribute Vector - call MCT_AtrVt_init(av1,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap,comm1)) - write(6,*)myproc,"model1 got an aV" - - avsize = MCT_AtrVt_lsize(av1) - write(6,*)myproc,"model 1 av size", avsize - - end subroutine model1init - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- - subroutine model1(comm1,GSmap,av1) ! run the first model - - implicit none - - integer :: comm1,mysize,ier,asize,myproc - integer :: fieldindx,avsize,i - integer,dimension(1) :: start,length - real,pointer :: testarray(:) - - type(GlobalSegMap) :: GSmap,GSmap2 - type(AttrVect) :: av1 - type(Router),save :: Rout - logical,save :: firsttime=.FALSE. - - call mpi_comm_rank(comm1,myproc,ier) - - if(.not.firsttime) then -! get other GSMap - if(myproc .eq. 0) call MCT_GSMap_recv(GSmap2,2,110) - call MCT_GSMap_bcast(GSmap2,0,comm1) -! initialize a router - call MCT_Router_init(GSMap,GSmap2,comm1,Rout) - endif - firsttime=.TRUE. - - avsize = MCT_AtrVt_lsize(av1) - -! Fill Av with some data -! fill first attribute the direct way - fieldindx = MCT_AtrVt_indexRA(av1,"field1") - do i=1,avsize - av1%rAttr(fieldindx,i) = float(i +20*myproc) - enddo - -! fill second attribute using Av import function - allocate(testarray(avsize)) - do i=1,avsize - testarray(i)= cos((float(i+ 20*myproc)/npoints) * 3.14) - enddo - call MCT_AtrVt_importRA(av1,"field2",testarray) - -! print out Av data - do i=1,avsize - write(6,*)myproc, "model 1 data", i,av1%rAttr(1,i),av1%rAttr(2,i) - enddo - -! send the data - call MCT_ISend(av1,Rout) - - - - end subroutine model1 - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model2init(comm2,req2,GSmap,av1) ! init model 2 - - implicit none - - integer :: comm2,mysize,ier,asize,myproc - integer :: i - integer,dimension(1) :: start,length - type(GlobalSegMap) :: GSmap - type(AttrVect) :: av1 - integer,pointer :: req2(:) -!--------------------------- - -! find local rank and size - call mpi_comm_size(comm2,mysize,ier) - call mpi_comm_rank(comm2,myproc,ier) - write(6,*)myproc,"model2 size",mysize - -! set up a grid and decomposition - asize = npoints/mysize - - start(1)= (myproc*asize) +1 - length(1)=asize - -! describe decomposition with MCT GSmap type - call MCT_GSMap_init(GSMap,start,length,0,comm2,2) - - write(6,*)myproc, "model 2 GSMap ngseg",GSMap%ngseg,start(1) - - if(myproc .eq. 0) call MCT_GSMap_Isend(GSMap,1,110,req2) - -! Initialize an Attribute Vector - call MCT_AtrVt_init(av1,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap,comm2)) - write(6,*)myproc,"model2 got an aV" - - write(6,*)myproc, "model 2 av size", MCT_AtrVt_lsize(av1) - - end subroutine model2init - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model2(comm2,GSmap,av1) - - implicit none - - integer :: comm2,mysize,ier,avsize,myproc - integer :: i - integer,dimension(1) :: start,length - type(GlobalSegMap) :: GSmap,GSmap2 - type(AttrVect) :: av1 - type(Router),save :: Rout - logical,save :: firsttime=.FALSE. -!--------------------------- - -! initialize Av to be zero everywhere - call MCT_AtrVt_zero(av1) - - call mpi_comm_rank(comm2,myproc,ier) - if(.not.firsttime) then -! receive other GSMap - if(myproc .eq. 0) call MCT_GSMap_recv(GSmap2,1,100) - call MCT_GSMap_bcast(GSmap2,0,comm2) -! initialize a Router - call MCT_Router_init(GSMap,GSmap2,comm2,Rout) - endif - firsttime=.TRUE. - - avsize = MCT_AtrVt_lsize(av1) - -! print out Av data before Recv - do i=1,avsize - write(6,*) myproc,"model 2 data", i,av1%rAttr(1,i),av1%rAttr(2,i) - enddo - -! Recv the data - call MCT_Recv(av1,Rout) - -! print out Av data after Recv. - do i=1,avsize - write(6,*) myproc,"model 2 data after", i,av1%rAttr(1,i),av1%rAttr(2,i) - enddo - - - end subroutine model2 - - end diff --git a/src/externals/mct/examples/simple/twocmp.seqUnvn.F90 b/src/externals/mct/examples/simple/twocmp.seqUnvn.F90 deleted file mode 100644 index 7e36e5a26a9..00000000000 --- a/src/externals/mct/examples/simple/twocmp.seqUnvn.F90 +++ /dev/null @@ -1,242 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: twocmp.seqUnvn.F90,v 1.6 2007-12-19 17:13:17 rloy Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: twocomponentUneven.sequential -! -! !DESCRIPTION: Provide a simple example of using MCT to connect two components -! In this case the models are running sequentialy but the second model -! is only running on 1 processor. -! -! !INTERFACE: -! - program twosequn -! -! !USES: -! -!--- Get only the things needed from MCT - use m_MCTWorld,only: MCTWorld_init => init - - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: MCT_GSMap_init => init - use m_GlobalSegMap,only: MCT_GSMap_lsize => lsize - - use m_AttrVect,only : AttrVect - use m_AttrVect,only : MCT_AtrVt_init => init - use m_AttrVect,only : MCT_AtrVt_zero => zero - use m_AttrVect,only : MCT_AtrVt_lsize => lsize - use m_AttrVect,only : MCT_AtrVt_indexRA => indexRA - use m_AttrVect,only : MCT_AtrVt_importRA => importRAttr - - use m_Rearranger,only: Rearranger - use m_Rearranger,only: MCT_Rearranger_init => init - use m_Rearranger,only: MCT_Rearrange => Rearrange - - implicit none - - include 'mpif.h' - - integer,parameter :: ngx = 6 ! points in x-direction - integer,parameter :: ngy = 4 ! points in y-direction - - integer ier,world_group,model2_group,myrank2,myrank3 - integer,dimension(:),pointer :: myids,mycomms,peloc2 - integer,dimension(:,:),pointer :: GlobalId - integer :: comm1,comm2,asize,mysize,i,myproc - integer :: commsize - integer,dimension(1) :: start1,length1,ranks - integer,dimension(:),allocatable :: start2,length2 -!----------------------------------------------------------------------- -! The Main program. -! We are implementing a single-executable, sequential-execution system. -! Because its sequential, communication occurs through the main using -! arguments. The second component is only running on 1 processor - - type(GlobalSegMap) :: GSmap1,GSmap2 - type(AttrVect) :: av1,av2 - type(Rearranger) :: Rearr - - call MPI_init(ier) - - call mpi_comm_size(MPI_COMM_WORLD, mysize,ier) - if(mysize .gt. 12) then - write(6,*)"Must run on less than 12 processors" - stop - endif - call mpi_comm_rank(MPI_COMM_WORLD, myproc,ier) - -! the first model is running on all the processors so give -! it a dubplicate of MPI_COMM_WORLD for its communicator - call mpi_comm_dup(MPI_COMM_WORLD,comm1,ier) - -! the second model is only running on one processor -! so use mpi_groups methods to define its communicator - call mpi_comm_group(MPI_COMM_WORLD,world_group,ier) - -! need a communicator that only has the first processor - ranks(1)=0 -! define the group - call mpi_group_incl(world_group,1,ranks,model2_group,ier) -! now define the communicator - ! first initialize it - comm2=MPI_COMM_NULL - call mpi_comm_create(MPI_COMM_WORLD,model2_group,comm2,ier) - -! don't need the groups anymore - call mpi_group_free(world_group,ier) - call mpi_group_free(model2_group,ier) - -! allocate arrays for the ids and comms - allocate(myids(2),mycomms(2)) - -! Set the arrays to their values. - myids(1)=1 - myids(2)=2 - mycomms(1)=comm1 - mycomms(2)=comm2 - -! now call the initm_ version of MCTWorld_init - call MCTWorld_init(2,MPI_COMM_WORLD,mycomms,myids) - - -! first gsmap is the grid decomposed in one dimension -! there is 1 segment per processor - length1(1)= (ngx * ngy)/mysize - start1(1)= myproc * length1(1) + 1 - - write(6,*)'gsmap1', myproc,length1(1),start1(1) - call MCT_GSMap_init(GSMap1,start1,length1,0,comm1,1) - -! second gsmap is the grid on one processor - -! for GSMap init to work, the size of the start and length arrays -! must equal the number of local segments. So I must allocate -! size zero arrays on the other processors. - if(myproc .eq. 0) then - allocate(start2(1),length2(1)) - length2(1) = ngx*ngy - start2(1) = 1 - else - allocate(start2(0),length2(0)) - endif - - call MCT_GSMap_init(GSMap2,start2,length2,0,comm1,2) - write(6,*)'gsmap2', myproc,GSMap2%ngseg,GSmap2%gsize,GSmap2%start(1), & - GSmap2%pe_loc(1),GSmap2%length(1) - - -! initialize an Av on each GSMap - call MCT_AtrVt_init(av1,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap1,comm1)) - -! Use comm1 because lsize of GSMap2 on comm1 will return 0 on non-root processors. -! We need av2 to be full-sized on proc 0 and 0 size on other processors. - call MCT_AtrVt_init(av2,rList="field1:field2",lsize=MCT_GSMap_lsize(GSMap2,comm1)) - - -! create a rearranger. Use the communicator which contains all processors -! involved in the rearrangement, comm1 - call MCT_Rearranger_init(GSMap1,GSMap2,comm1,Rearr) - -!-------------end of initialization steps - - -! Start up model1 which fills av1 with data. - call model1(comm1,av1) - -! print out Av data - do i=1,MCT_AtrVt_lsize(av1) - write(6,*) "model 1 data", myproc,i,av1%rAttr(1,i),av1%rAttr(2,i) - enddo - -! rearrange data from model1 so that model2 can use it. - call MCT_Rearrange(av1,av2,Rearr) - -! pass data to model2 (which will print it out) -! model2 should only run on one processor. - if(myproc .eq. 0) then - call model2(comm2,av2) - endif - - -! all done - call MPI_Barrier(MPI_COMM_WORLD,ier) - if (myproc==0) write(6,*) 'All Done' - - call mpi_finalize(ier) - - contains - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model1(comm1,mod1av) ! the first model - - implicit none - - integer :: comm1,mysize,ier,asize,myproc - integer :: fieldindx,avsize,i - integer,dimension(1) :: start,length - real,pointer :: testarray(:) - - type(GlobalSegMap) :: GSmap - type(AttrVect) :: mod1av -!--------------------------- - -! find local rank and size - call mpi_comm_size(comm1,mysize,ier) - call mpi_comm_rank(comm1,myproc,ier) - write(6,*)"model1 myproc,mysize",myproc,mysize - - - avsize = MCT_AtrVt_lsize(mod1av) - write(6,*)"model 1 myproc, av size", myproc,avsize - -! Fill Av with some data -! fill first attribute the direct way - fieldindx = MCT_AtrVt_indexRA(mod1av,"field1") - do i=1,avsize - mod1av%rAttr(fieldindx,i) = float(i+ 20*myproc) - enddo - -! fill second attribute using Av import function - allocate(testarray(avsize)) - do i=1,avsize - testarray(i)= cos((float(i+ 20*myproc)/24.) * 3.14) - enddo - call MCT_AtrVt_importRA(mod1av,"field2",testarray) - - - end subroutine model1 - -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- -! !ROUTINE: - subroutine model2(comm2,mod2av) - - implicit none - - integer :: comm2,mysize,ier,asize,myproc - integer :: i - type(AttrVect) :: mod2av -!--------------------------- - -! find local rank and size - call mpi_comm_size(comm2,mysize,ier) - call mpi_comm_rank(comm2,myproc,ier) - write(6,*)"model2 myproc,mysize",myproc,mysize - - asize = MCT_AtrVt_lsize(mod2av) - write(6,*)"model 2 myproc, av size", myproc,asize - -! print out Av data - do i=1,asize - write(6,*) "model 2 data after", myproc,i,mod2av%rAttr(1,i),mod2av%rAttr(2,i) - enddo - - - end subroutine model2 - - end diff --git a/src/externals/mct/install-sh b/src/externals/mct/install-sh deleted file mode 100755 index 36f96f3e033..00000000000 --- a/src/externals/mct/install-sh +++ /dev/null @@ -1,276 +0,0 @@ -#!/bin/sh -# -# install - install a program, script, or datafile -# This comes from X11R5 (mit/util/scripts/install.sh). -# -# Copyright 1991 by the Massachusetts Institute of Technology -# -# Permission to use, copy, modify, distribute, and sell this software and its -# documentation for any purpose is hereby granted without fee, provided that -# the above copyright notice appear in all copies and that both that -# copyright notice and this permission notice appear in supporting -# documentation, and that the name of M.I.T. not be used in advertising or -# publicity pertaining to distribution of the software without specific, -# written prior permission. M.I.T. makes no representations about the -# suitability of this software for any purpose. It is provided "as is" -# without express or implied warranty. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# `make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. It can only install one file at a time, a restriction -# shared with many OS's install programs. - - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit="${DOITPROG-}" - - -# put in absolute paths if you don't have them in your path; or use env. vars. - -mvprog="${MVPROG-mv}" -cpprog="${CPPROG-cp}" -chmodprog="${CHMODPROG-chmod}" -chownprog="${CHOWNPROG-chown}" -chgrpprog="${CHGRPPROG-chgrp}" -stripprog="${STRIPPROG-strip}" -rmprog="${RMPROG-rm}" -mkdirprog="${MKDIRPROG-mkdir}" - -transformbasename="" -transform_arg="" -instcmd="$mvprog" -chmodcmd="$chmodprog 0755" -chowncmd="" -chgrpcmd="" -stripcmd="" -rmcmd="$rmprog -f" -mvcmd="$mvprog" -src="" -dst="" -dir_arg="" - -while [ x"$1" != x ]; do - case $1 in - -c) instcmd=$cpprog - shift - continue;; - - -d) dir_arg=true - shift - continue;; - - -m) chmodcmd="$chmodprog $2" - shift - shift - continue;; - - -o) chowncmd="$chownprog $2" - shift - shift - continue;; - - -g) chgrpcmd="$chgrpprog $2" - shift - shift - continue;; - - -s) stripcmd=$stripprog - shift - continue;; - - -t=*) transformarg=`echo $1 | sed 's/-t=//'` - shift - continue;; - - -b=*) transformbasename=`echo $1 | sed 's/-b=//'` - shift - continue;; - - *) if [ x"$src" = x ] - then - src=$1 - else - # this colon is to work around a 386BSD /bin/sh bug - : - dst=$1 - fi - shift - continue;; - esac -done - -if [ x"$src" = x ] -then - echo "$0: no input file specified" >&2 - exit 1 -else - : -fi - -if [ x"$dir_arg" != x ]; then - dst=$src - src="" - - if [ -d "$dst" ]; then - instcmd=: - chmodcmd="" - else - instcmd=$mkdirprog - fi -else - -# Waiting for this to be detected by the "$instcmd $src $dsttmp" command -# might cause directories to be created, which would be especially bad -# if $src (and thus $dsttmp) contains '*'. - - if [ -f "$src" ] || [ -d "$src" ] - then - : - else - echo "$0: $src does not exist" >&2 - exit 1 - fi - - if [ x"$dst" = x ] - then - echo "$0: no destination specified" >&2 - exit 1 - else - : - fi - -# If destination is a directory, append the input filename; if your system -# does not like double slashes in filenames, you may need to add some logic - - if [ -d "$dst" ] - then - dst=$dst/`basename "$src"` - else - : - fi -fi - -## this sed command emulates the dirname command -dstdir=`echo "$dst" | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` - -# Make sure that the destination directory exists. -# this part is taken from Noah Friedman's mkinstalldirs script - -# Skip lots of stat calls in the usual case. -if [ ! -d "$dstdir" ]; then -defaultIFS=' - ' -IFS="${IFS-$defaultIFS}" - -oIFS=$IFS -# Some sh's can't handle IFS=/ for some reason. -IFS='%' -set - `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'` -IFS=$oIFS - -pathcomp='' - -while [ $# -ne 0 ] ; do - pathcomp=$pathcomp$1 - shift - - if [ ! -d "$pathcomp" ] ; - then - $mkdirprog "$pathcomp" - else - : - fi - - pathcomp=$pathcomp/ -done -fi - -if [ x"$dir_arg" != x ] -then - $doit $instcmd "$dst" && - - if [ x"$chowncmd" != x ]; then $doit $chowncmd "$dst"; else : ; fi && - if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd "$dst"; else : ; fi && - if [ x"$stripcmd" != x ]; then $doit $stripcmd "$dst"; else : ; fi && - if [ x"$chmodcmd" != x ]; then $doit $chmodcmd "$dst"; else : ; fi -else - -# If we're going to rename the final executable, determine the name now. - - if [ x"$transformarg" = x ] - then - dstfile=`basename "$dst"` - else - dstfile=`basename "$dst" $transformbasename | - sed $transformarg`$transformbasename - fi - -# don't allow the sed command to completely eliminate the filename - - if [ x"$dstfile" = x ] - then - dstfile=`basename "$dst"` - else - : - fi - -# Make a couple of temp file names in the proper directory. - - dsttmp=$dstdir/#inst.$$# - rmtmp=$dstdir/#rm.$$# - -# Trap to clean up temp files at exit. - - trap 'status=$?; rm -f "$dsttmp" "$rmtmp" && exit $status' 0 - trap '(exit $?); exit' 1 2 13 15 - -# Move or copy the file name to the temp name - - $doit $instcmd "$src" "$dsttmp" && - -# and set any options; do chmod last to preserve setuid bits - -# If any of these fail, we abort the whole thing. If we want to -# ignore errors from any of these, just make sure not to ignore -# errors from the above "$doit $instcmd $src $dsttmp" command. - - if [ x"$chowncmd" != x ]; then $doit $chowncmd "$dsttmp"; else :;fi && - if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd "$dsttmp"; else :;fi && - if [ x"$stripcmd" != x ]; then $doit $stripcmd "$dsttmp"; else :;fi && - if [ x"$chmodcmd" != x ]; then $doit $chmodcmd "$dsttmp"; else :;fi && - -# Now remove or move aside any old file at destination location. We try this -# two ways since rm can't unlink itself on some systems and the destination -# file might be busy for other reasons. In this case, the final cleanup -# might fail but the new file should still install successfully. - -{ - if [ -f "$dstdir/$dstfile" ] - then - $doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null || - $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null || - { - echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2 - (exit 1); exit - } - else - : - fi -} && - -# Now rename the file to the real destination. - - $doit $mvcmd "$dsttmp" "$dstdir/$dstfile" - -fi && - -# The final little trick to "correctly" pass the exit status to the exit trap. - -{ - (exit 0); exit -} diff --git a/src/externals/mct/m4/README b/src/externals/mct/m4/README deleted file mode 100644 index b748178e2c7..00000000000 --- a/src/externals/mct/m4/README +++ /dev/null @@ -1,5 +0,0 @@ -This directory contains some specific tests used in the MCT autoconf system. -They are placed here to make the configure.ac a little cleaner. - -These are only needed if you are trying to recreate the "configure" script from -the "configure.ac" file. diff --git a/src/externals/mct/m4/acx_mpi.m4 b/src/externals/mct/m4/acx_mpi.m4 deleted file mode 100644 index 77f433d8217..00000000000 --- a/src/externals/mct/m4/acx_mpi.m4 +++ /dev/null @@ -1,146 +0,0 @@ -dnl @synopsis ACX_MPI([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) -dnl -dnl @summary figure out how to compile/link code with MPI -dnl -dnl This macro tries to find out how to compile programs that use MPI -dnl (Message Passing Interface), a standard API for parallel process -dnl communication (see http://www-unix.mcs.anl.gov/mpi/) -dnl -dnl On success, it sets the MPICC, MPICXX, or MPIF77 output variable to -dnl the name of the MPI compiler, depending upon the current language. -dnl (This may just be $CC/$CXX/$F77, but is more often something like -dnl mpicc/mpiCC/mpif77.) It also sets MPILIBS to any libraries that are -dnl needed for linking MPI (e.g. -lmpi, if a special -dnl MPICC/MPICXX/MPIF77 was not found). -dnl -dnl If you want to compile everything with MPI, you should set: -dnl -dnl CC="$MPICC" #OR# CXX="$MPICXX" #OR# F77="$MPIF77" -dnl LIBS="$MPILIBS $LIBS" -dnl -dnl NOTE: The above assumes that you will use $CC (or whatever) for -dnl linking as well as for compiling. (This is the default for automake -dnl and most Makefiles.) -dnl -dnl The user can force a particular library/compiler by setting the -dnl MPICC/MPICXX/MPIF77 and/or MPILIBS environment variables. -dnl -dnl ACTION-IF-FOUND is a list of shell commands to run if an MPI -dnl library is found, and ACTION-IF-NOT-FOUND is a list of commands to -dnl run it if it is not found. If ACTION-IF-FOUND is not specified, the -dnl default action will define HAVE_MPI. -dnl -dnl @category InstalledPackages -dnl @author Steven G. Johnson -dnl @author Julian Cummings -dnl @version 2006-10-13 -dnl @license GPLWithACException - -AC_DEFUN([ACX_MPI], [ -AC_PREREQ(2.50) dnl for AC_LANG_CASE - -AC_LANG_CASE([C], [ - AC_REQUIRE([AC_PROG_CC]) - AC_ARG_VAR(MPICC,[MPI C compiler command]) - AC_CHECK_PROGS(MPICC, mpicc hcc mpxlc_r mpxlc mpcc cmpicc, $CC) - acx_mpi_save_CC="$CC" - CC="$MPICC" - AC_SUBST(MPICC) -], -[C++], [ - AC_REQUIRE([AC_PROG_CXX]) - AC_ARG_VAR(MPICXX,[MPI C++ compiler command]) - AC_CHECK_PROGS(MPICXX, mpic++ mpicxx mpiCC hcp mpxlC_r mpxlC mpCC cmpic++, $CXX) - acx_mpi_save_CXX="$CXX" - CXX="$MPICXX" - AC_SUBST(MPICXX) -], -[Fortran 77], [ - AC_REQUIRE([AC_PROG_F77]) - AC_ARG_VAR(MPIF77,[MPI Fortran 77 compiler command]) - AC_CHECK_PROGS(MPIF77, mpif77 hf77 mpxlf mpf77 mpif90 mpf90 mpxlf90 mpxlf95 mpxlf_r cmpifc cmpif90c, $F77) - acx_mpi_save_F77="$F77" - F77="$MPIF77" - AC_SUBST(MPIF77) -], -[Fortran], [ - AC_REQUIRE([AC_PROG_FC]) - AC_ARG_VAR(MPIFC,[MPI Fortran compiler command]) - AC_CHECK_PROGS(MPIFC, mpif90 hf90 mpxlf90 mpxlf95 mpf90 cmpifc cmpif90c, $FC) - acx_mpi_save_FC="$FC" - FC="$MPIFC" - AC_SUBST(MPIFC) -]) - -if test x = x"$MPILIBS"; then - AC_LANG_CASE([C], [AC_CHECK_FUNC(MPI_Init, [MPILIBS=" "])], - [C++], [AC_CHECK_FUNC(MPI_Init, [MPILIBS=" "])], - [Fortran 77], [AC_MSG_CHECKING([for MPI_Init]) - AC_LINK_IFELSE([AC_LANG_PROGRAM([],[ call MPI_Init])],[MPILIBS=" " - AC_MSG_RESULT(yes)], [AC_MSG_RESULT(no)])], - [Fortran], [AC_MSG_CHECKING([for MPI_Init]) - AC_LINK_IFELSE([AC_LANG_PROGRAM([],[ call MPI_Init])],[MPILIBS=" " - AC_MSG_RESULT(yes)], [AC_MSG_RESULT(no)])]) -fi -AC_LANG_CASE([Fortran 77], [ - if test x = x"$MPILIBS"; then - AC_CHECK_LIB(fmpi, MPI_Init, [MPILIBS="-lfmpi"]) - fi - if test x = x"$MPILIBS"; then - AC_CHECK_LIB(fmpich, MPI_Init, [MPILIBS="-lfmpich"]) - fi -], -[Fortran], [ - if test x = x"$MPILIBS"; then - AC_CHECK_LIB(fmpi, MPI_Init, [MPILIBS="-lfmpi"]) - fi - if test x = x"$MPILIBS"; then - AC_CHECK_LIB(mpichf90, MPI_Init, [MPILIBS="-lmpichf90"]) - fi -]) -if test x = x"$MPILIBS"; then - AC_CHECK_LIB(mpi, MPI_Init, [MPILIBS="-lmpi"]) -fi -if test x = x"$MPILIBS"; then - AC_CHECK_LIB(mpich, MPI_Init, [MPILIBS="-lmpich"]) -fi - -dnl We have to use AC_TRY_COMPILE and not AC_CHECK_HEADER because the -dnl latter uses $CPP, not $CC (which may be mpicc). -AC_LANG_CASE([C], [if test x != x"$MPILIBS"; then - AC_MSG_CHECKING([for mpi.h]) - AC_TRY_COMPILE([#include ],[],[AC_MSG_RESULT(yes)], [MPILIBS="" - AC_MSG_RESULT(no)]) -fi], -[C++], [if test x != x"$MPILIBS"; then - AC_MSG_CHECKING([for mpi.h]) - AC_TRY_COMPILE([#include ],[],[AC_MSG_RESULT(yes)], [MPILIBS="" - AC_MSG_RESULT(no)]) -fi], -[Fortran 77], [if test x != x"$MPILIBS"; then - AC_MSG_CHECKING([for mpif.h]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[ include 'mpif.h'])],[AC_MSG_RESULT(yes)], [MPILIBS="" - AC_MSG_RESULT(no)]) -fi], -[Fortran], [if test x != x"$MPILIBS"; then - AC_MSG_CHECKING([for mpif.h]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[ include 'mpif.h'])],[AC_MSG_RESULT(yes)], [MPILIBS="" - AC_MSG_RESULT(no)]) -fi]) - -AC_LANG_CASE([C], [CC="$acx_mpi_save_CC"], - [C++], [CXX="$acx_mpi_save_CXX"], - [Fortran 77], [F77="$acx_mpi_save_F77"], - [Fortran], [FC="$acx_mpi_save_FC"]) - -AC_SUBST(MPILIBS) - -# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: -if test x = x"$MPILIBS"; then - $2 - : -else - ifelse([$1],,[AC_DEFINE(HAVE_MPI,1,[Define if you have the MPI library.])],[$1]) - : -fi -])dnl ACX_MPI diff --git a/src/externals/mct/m4/ax_fc_version.m4 b/src/externals/mct/m4/ax_fc_version.m4 deleted file mode 100644 index c7e2eaec3c7..00000000000 --- a/src/externals/mct/m4/ax_fc_version.m4 +++ /dev/null @@ -1,51 +0,0 @@ -#AX_FC_VERSION_OUTPUT([FLAG = $ac_cv_prog_fc_version]) -# ------------------------------------------------- -# Link a trivial Fortran program, compiling with a version output FLAG -# (which default value, $ac_cv_prog_fc_version, is computed by -# AX_FC_VERSION), and return the output in $ac_fc_version_output. -AC_DEFUN([AX_FC_VERSION_OUTPUT], -[AC_REQUIRE([AC_PROG_FC])dnl -AC_LANG_PUSH(Fortran)dnl - -AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) - -# Compile and link our simple test program by passing a flag (argument -# 1 to this macro) to the Fortran 90 compiler in order to get "version" output -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS="$FCFLAGS m4_default([$1], [$ac_cv_prog_fc_version])" -(eval echo $as_me:__oline__: \"$ac_link\") >&AS_MESSAGE_LOG_FD -ac_fc_version_output=`eval $ac_link AS_MESSAGE_LOG_FD>&1 2>&1 | grep -v 'Driving:'` -echo "$ac_fc_version_output" >&AS_MESSAGE_LOG_FD -FCFLAGS=$ac_save_FCFLAGS - -rm -f conftest.* -AC_LANG_POP(Fortran)dnl - -])# AX_FC_VERSION_OUTPUT - -# AX_FC_VERSION -# -------------- -# -AC_DEFUN([AX_FC_VERSION], -[AC_CACHE_CHECK([how to get the version output from $FC], - [ac_cv_prog_fc_version], -[AC_LANG_ASSERT(Fortran) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], -[ac_cv_prog_fc_version= -# Try some options frequently used verbose output -for ac_version in -V -version --version +version -qversion; do - AX_FC_VERSION_OUTPUT($ac_version) - # look for "copyright" constructs in the output - for ac_arg in $ac_fc_version_output; do - case $ac_arg in - COPYRIGHT | copyright | Copyright | '(c)' | '(C)' | Compiler | Compilers | Version | Version:) - ac_cv_prog_fc_version=$ac_version - break 2 ;; - esac - done -done -if test -z "$ac_cv_prog_fc_version"; then - AC_MSG_WARN([cannot determine how to obtain version information from $FC]) -fi], - [AC_MSG_WARN([compilation failed])]) -])])# AX_FC_VERSION diff --git a/src/externals/mct/m4/fortran.m4 b/src/externals/mct/m4/fortran.m4 deleted file mode 100644 index c835ce232aa..00000000000 --- a/src/externals/mct/m4/fortran.m4 +++ /dev/null @@ -1,855 +0,0 @@ -# This file is part of Autoconf. -*- Autoconf -*- -# Fortran languages support. -# Copyright (C) 2001, 2003-2011 Free Software Foundation, Inc. - -# This file is part of Autoconf. This program is free -# software; you can redistribute it and/or modify it under the -# terms of the GNU General Public License as published by the -# Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# Under Section 7 of GPL version 3, you are granted additional -# permissions described in the Autoconf Configure Script Exception, -# version 3.0, as published by the Free Software Foundation. -# -# You should have received a copy of the GNU General Public License -# and a copy of the Autoconf Configure Script Exception along with -# this program; see the files COPYINGv3 and COPYING.EXCEPTION -# respectively. If not, see . - -# Written by David MacKenzie, with help from -# Franc,ois Pinard, Karl Berry, Richard Pixley, Ian Lance Taylor, -# Roland McGrath, Noah Friedman, david d zuhn, and many others. - - -# Table of Contents: -# -# Preamble -# -# 0. Utility macros -# -# 1. Language selection -# and routines to produce programs in a given language. -# -# 2. Producing programs in a given language. -# -# 3. Looking for a compiler -# And possibly the associated preprocessor. -# -# 4. Compilers' characteristics. - -# AC_FC_PP_SRCEXT(EXT, [ACTION-IF-SUCCESS], [ACTION-IF-FAILURE]) -# -------------------------------------------------------------- -# Like AC_FC_SRCEXT, set the source-code extension used in Fortran (FC) tests -# to EXT (which defaults to f). Also, look for any necessary additional -# FCFLAGS needed to allow this extension for preprocessed Fortran, and store -# them in the output variable FCFLAGS_ (e.g. FCFLAGS_f90 for EXT=f90). -# If successful, call ACTION-IF-SUCCESS. If unable to compile preprocessed -# source code with EXT, call ACTION-IF-FAILURE, which defaults to failing with -# an error message. -# -# Some compilers allow preprocessing with either a Fortran preprocessor or -# with the C preprocessor (cpp). Prefer the Fortran preprocessor, to deal -# correctly with continuation lines, `//' (not a comment), and preserve white -# space (for fixed form). -# -# (The flags for the current source-code extension, if any, are stored in -# $ac_fcflags_srcext and used automatically in subsequent autoconf tests.) -# -# For ordinary extensions like f90, etcetera, the modified FCFLAGS -# are needed for IBM's xlf*. Also, for Intel's ifort compiler, the -# $FCFLAGS_ variable *must* go immediately before the source file on the -# command line, unlike other $FCFLAGS. Ugh. -# -# Known extensions that enable preprocessing by default, and flags to force it: -# GNU: .F .F90 .F95 .F03 .F08, -cpp for most others, -# -x f77-cpp-input for .f77 .F77; -x f95-cpp-input for gfortran < 4.4 -# SGI: .F .F90, -ftpp or -cpp for .f .f90, -E write preproc to stdout -# -macro_expand enable macro expansion everywhere (with -ftpp) -# -P preproc only, save in .i, no #line's -# SUN: .F .F95, -fpp for others; -xpp={fpp,cpp} for preprocessor selection -# -F preprocess only (save in lowercase extension) -# IBM: .F .F77 .F90 .F95 .F03, -qsuffix=cpp=EXT for extension .EXT to invoke cpp -# -WF,-qnofpp -WF,-qfpp=comment:linecont:nocomment:nolinecont -# -WF,-qlanglvl=classic or not -qnoescape (trigraph problems) -# -d no #line in output, -qnoobject for preprocessing only (output in .f) -# -q{no,}ppsuborigarg substitute original macro args before expansion -# HP: .F, +cpp={yes|no|default} use cpp, -cpp, +cpp_keep save in .i/.i90 -# PGI: -Mpreprocess -# Absoft: .F .FOR .F90 .F95, -cpp for others -# Cray: .F .F90 .FTN, -e Z for others; -F enable macro expansion everywhere -# Intel: .F .F90, -fpp for others, but except for .f and .f90, -Tf may also be -# needed right before the source file name -# PathScale: .F .F90 .F95, -ftpp or -cpp for .f .f90 .f95 -# -macro_expand for expansion everywhere, -P for no #line in output -# Lahey: .F .FOR .F90 .F95, -Cpp -# NAGWare: .F .F90 .F95, .ff .ff90 .ff95 (new), -fpp for others -# Compaq/Tru64: .F .F90, -cpp, -P keep .i file, -P keep .i file -# f2c: .F, -cpp -# g95: .F .FOR .F90 .F95 .F03, -cpp -no-cpp, -E for stdout -AC_DEFUN([AC_FC_PP_SRCEXT], -[AC_LANG_PUSH(Fortran)dnl -AC_CACHE_CHECK([for Fortran flag to compile preprocessed .$1 files], - ac_cv_fc_pp_srcext_$1, -[ac_ext=$1 -ac_fcflags_pp_srcext_save=$ac_fcflags_srcext -ac_fcflags_srcext= -ac_cv_fc_pp_srcext_$1=unknown -case $ac_ext in #( - [[fF]]77) ac_try=f77-cpp-input;; #( - *) ac_try=f95-cpp-input;; -esac -for ac_flag in none -ftpp -fpp -Tf "-fpp -Tf" -xpp=fpp -Mpreprocess "-e Z" \ - -cpp -xpp=cpp -qsuffix=cpp=$1 "-x $ac_try" +cpp -Cpp; do - test "x$ac_flag" != xnone && ac_fcflags_srcext="$ac_flag" - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [[ -#if 0 -#include - choke me -#endif]])], - [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [[ -#if 1 -#include - choke me -#endif]])], - [], - [ac_cv_fc_pp_srcext_$1=$ac_flag; break])]) -done -rm -f conftest.$ac_objext conftest.$1 -ac_fcflags_srcext=$ac_fcflags_pp_srcext_save -]) -if test "x$ac_cv_fc_pp_srcext_$1" = xunknown; then - m4_default([$3], - [AC_MSG_ERROR([Fortran could not compile preprocessed .$1 files])]) -else - ac_fc_srcext=$1 - if test "x$ac_cv_fc_pp_srcext_$1" = xnone; then - ac_fcflags_srcext="" - FCFLAGS_[]$1[]="" - else - ac_fcflags_srcext=$ac_cv_fc_pp_srcext_$1 - FCFLAGS_[]$1[]=$ac_cv_fc_pp_srcext_$1 - fi - AC_SUBST(FCFLAGS_[]$1) - $2 -fi -AC_LANG_POP(Fortran)dnl -])# AC_FC_PP_SRCEXT - -# AC_FC_PP_DEFINE([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# ------------------------------------------------------------------- -# Find a flag to specify defines for preprocessed Fortran. Not all -# Fortran compilers use -D. Substitute FC_DEFINE with the result and -# call ACTION-IF-SUCCESS (defaults to nothing) if successful, and -# ACTION-IF-FAILURE (defaults to failing with an error message) if not. -# -# Known flags: -# IBM: -WF,-D -# Lahey/Fujitsu: -Wp,-D older versions??? -# f2c: -D or -Wc,-D -# others: -D -AC_DEFUN([AC_FC_PP_DEFINE], -[AC_LANG_PUSH([Fortran])dnl -ac_fc_pp_define_srcext_save=$ac_fc_srcext -AC_FC_PP_SRCEXT([F]) -AC_CACHE_CHECK([how to define symbols for preprocessed Fortran], - [ac_cv_fc_pp_define], -[ac_fc_pp_define_srcext_save=$ac_fc_srcext -ac_cv_fc_pp_define=unknown -ac_fc_pp_define_FCFLAGS_save=$FCFLAGS -for ac_flag in -D -WF,-D -Wp,-D -Wc,-D -do - FCFLAGS="$ac_fc_pp_define_FCFLAGS_save ${ac_flag}FOOBAR ${ac_flag}ZORK=42" - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [[ -#ifndef FOOBAR - choke me -#endif -#if ZORK != 42 - choke me -#endif]])], - [ac_cv_fc_pp_define=$ac_flag]) - test x"$ac_cv_fc_pp_define" != xunknown && break -done -FCFLAGS=$ac_fc_pp_define_FCFLAGS_save -]) -ac_fc_srcext=$ac_fc_pp_define_srcext_save -if test "x$ac_cv_fc_pp_define" = xunknown; then - FC_DEFINE= - m4_default([$2], - [AC_MSG_ERROR([Fortran does not allow to define preprocessor symbols], 77)]) -else - FC_DEFINE=$ac_cv_fc_pp_define - $1 -fi -AC_SUBST([FC_DEFINE])dnl -AC_LANG_POP([Fortran])dnl -]) - - -# AC_FC_FREEFORM([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# ------------------------------------------------------------------ -# Look for a compiler flag to make the Fortran (FC) compiler accept -# free-format source code, and adds it to FCFLAGS. Call -# ACTION-IF-SUCCESS (defaults to nothing) if successful (i.e. can -# compile code using new extension) and ACTION-IF-FAILURE (defaults to -# failing with an error message) if not. (Defined via DEFUN_ONCE to -# prevent flag from being added to FCFLAGS multiple times.) -# -# The known flags are: -# -ffree-form: GNU g77, gfortran, g95 -# -FR, -free: Intel compiler (icc, ecc, ifort) -# -free: Compaq compiler (fort), Sun compiler (f95) -# -qfree: IBM compiler (xlf) -# -Mfree, -Mfreeform: Portland Group compiler -# -freeform: SGI compiler -# -8, -f free: Absoft Fortran -# +source=free: HP Fortran -# (-)-nfix, -Free: Lahey/Fujitsu Fortran -# -free: NAGWare -# -f, -Wf,-f: f2c (but only a weak form of "free-form" and long lines) -# We try to test the "more popular" flags first, by some prejudiced -# notion of popularity. -AC_DEFUN_ONCE([AC_FC_FREEFORM], -[AC_LANG_PUSH([Fortran])dnl -AC_CACHE_CHECK([for Fortran flag needed to accept free-form source], - [ac_cv_fc_freeform], -[ac_cv_fc_freeform=unknown -ac_fc_freeform_FCFLAGS_save=$FCFLAGS -for ac_flag in none -ffree-form -FR -free -qfree -Mfree -Mfreeform \ - -freeform "-f free" -8 +source=free -nfix --nfix -Free -do - test "x$ac_flag" != xnone && FCFLAGS="$ac_fc_freeform_FCFLAGS_save $ac_flag" -dnl Use @&t@ below to ensure that editors don't turn 8+ spaces into tab. - AC_COMPILE_IFELSE([[ - program freeform - ! FIXME: how to best confuse non-freeform compilers? - print *, 'Hello ', & - @&t@ 'world.' - end]], - [ac_cv_fc_freeform=$ac_flag; break]) -done -rm -f conftest.err conftest.$ac_objext conftest.$ac_ext -FCFLAGS=$ac_fc_freeform_FCFLAGS_save -]) -if test "x$ac_cv_fc_freeform" = xunknown; then - m4_default([$2], - [AC_MSG_ERROR([Fortran does not accept free-form source], 77)]) -else - if test "x$ac_cv_fc_freeform" != xnone; then - FCFLAGS="$FCFLAGS $ac_cv_fc_freeform" - fi - $1 -fi -AC_LANG_POP([Fortran])dnl -])# AC_FC_FREEFORM - - -# AC_FC_FIXEDFORM([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# ------------------------------------------------------------------ -# Look for a compiler flag to make the Fortran (FC) compiler accept -# fixed-format source code, and adds it to FCFLAGS. Call -# ACTION-IF-SUCCESS (defaults to nothing) if successful (i.e. can -# compile code using new extension) and ACTION-IF-FAILURE (defaults to -# failing with an error message) if not. (Defined via DEFUN_ONCE to -# prevent flag from being added to FCFLAGS multiple times.) -# -# The known flags are: -# -ffixed-form: GNU g77, gfortran, g95 -# -fixed: Intel compiler (ifort), Sun compiler (f95) -# -qfixed: IBM compiler (xlf*) -# -Mfixed: Portland Group compiler -# -fixedform: SGI compiler -# -f fixed: Absoft Fortran -# +source=fixed: HP Fortran -# (-)-fix, -Fixed: Lahey/Fujitsu Fortran -# -fixed: NAGWare -# Since compilers may accept fixed form based on file name extension, -# but users may want to use it with others as well, call AC_FC_SRCEXT -# with the respective source extension before calling this macro. -AC_DEFUN_ONCE([AC_FC_FIXEDFORM], -[AC_LANG_PUSH([Fortran])dnl -AC_CACHE_CHECK([for Fortran flag needed to accept fixed-form source], - [ac_cv_fc_fixedform], -[ac_cv_fc_fixedform=unknown -ac_fc_fixedform_FCFLAGS_save=$FCFLAGS -for ac_flag in none -ffixed-form -fixed -qfixed -Mfixed -fixedform "-f fixed" \ - +source=fixed -fix --fix -Fixed -do - test "x$ac_flag" != xnone && FCFLAGS="$ac_fc_fixedform_FCFLAGS_save $ac_flag" - AC_COMPILE_IFELSE([[ -C This comment should confuse free-form compilers. - program main - end]], - [ac_cv_fc_fixedform=$ac_flag; break]) -done -rm -f conftest.err conftest.$ac_objext conftest.$ac_ext -FCFLAGS=$ac_fc_fixedform_FCFLAGS_save -]) -if test "x$ac_cv_fc_fixedform" = xunknown; then - m4_default([$2], - [AC_MSG_ERROR([Fortran does not accept fixed-form source], 77)]) -else - if test "x$ac_cv_fc_fixedform" != xnone; then - FCFLAGS="$FCFLAGS $ac_cv_fc_fixedform" - fi - $1 -fi -AC_LANG_POP([Fortran])dnl -])# AC_FC_FIXEDFORM - - -# AC_FC_LINE_LENGTH([LENGTH], [ACTION-IF-SUCCESS], -# [ACTION-IF-FAILURE = FAILURE]) -# ------------------------------------------------ -# Look for a compiler flag to make the Fortran (FC) compiler accept long lines -# in the current (free- or fixed-format) source code, and adds it to FCFLAGS. -# The optional LENGTH may be 80, 132 (default), or `unlimited' for longer -# lines. Note that line lengths above 254 columns are not portable, and some -# compilers (hello ifort) do not accept more than 132 columns at least for -# fixed format. Call ACTION-IF-SUCCESS (defaults to nothing) if successful -# (i.e. can compile code using new extension) and ACTION-IF-FAILURE (defaults -# to failing with an error message) if not. (Defined via DEFUN_ONCE to -# prevent flag from being added to FCFLAGS multiple times.) -# You should call AC_FC_FREEFORM or AC_FC_FIXEDFORM to set the desired format -# prior to using this macro. -# -# The known flags are: -# -f{free,fixed}-line-length-N with N 72, 80, 132, or 0 or none for none. -# -ffree-line-length-none: GNU gfortran -# -ffree-line-length-huge: g95 (also -ffixed-line-length-N as above) -# -qfixed=132 80 72: IBM compiler (xlf) -# -Mextend: Cray -# -132 -80 -72: Intel compiler (ifort) -# Needs to come before -extend_source because ifort -# accepts that as well with an optional parameter and -# doesn't fail but only warns about unknown arguments. -# -extend_source: SGI compiler -# -W, -WNN (132, 80, 72): Absoft Fortran -# +es, +extend_source: HP Fortran (254 in either form, default is 72 fixed, -# 132 free) -# -w, (-)-wide: Lahey/Fujitsu Fortran (255 cols in fixed form) -# -e: Sun Fortran compiler (132 characters) -# -132: NAGWare -# -72, -f, -Wf,-f: f2c (a weak form of "free-form" and long lines). -# /XLine: Open Watcom -AC_DEFUN_ONCE([AC_FC_LINE_LENGTH], -[AC_LANG_PUSH([Fortran])dnl -m4_case(m4_default([$1], [132]), - [unlimited], [ac_fc_line_len_string=unlimited - ac_fc_line_len=0 - ac_fc_line_length_test=' - subroutine longer_than_132(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,'\ -'arg9,arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19)'], - [132], [ac_fc_line_len=132 - ac_fc_line_length_test=' - subroutine longer_than_80(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,'\ -'arg10)'], - [80], [ac_fc_line_len=80 - ac_fc_line_length_test=' - subroutine longer_than_72(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9)'], - [m4_warning([Invalid length argument `$1'])]) -: ${ac_fc_line_len_string=$ac_fc_line_len} -AC_CACHE_CHECK( -[for Fortran flag needed to accept $ac_fc_line_len_string column source lines], - [ac_cv_fc_line_length], -[ac_cv_fc_line_length=unknown -ac_fc_line_length_FCFLAGS_save=$FCFLAGS -for ac_flag in none \ - -ffree-line-length-none -ffixed-line-length-none \ - -ffree-line-length-huge \ - -ffree-line-length-$ac_fc_line_len \ - -ffixed-line-length-$ac_fc_line_len \ - -qfixed=$ac_fc_line_len -Mextend \ - -$ac_fc_line_len -extend_source \ - -W$ac_fc_line_len -W +extend_source +es -wide --wide -w -e \ - -f -Wf,-f -xline -do - test "x$ac_flag" != xnone && FCFLAGS="$ac_fc_line_length_FCFLAGS_save $ac_flag" - AC_COMPILE_IFELSE([[$ac_fc_line_length_test - end subroutine]], - [ac_cv_fc_line_length=$ac_flag; break]) -done -rm -f conftest.err conftest.$ac_objext conftest.$ac_ext -FCFLAGS=$ac_fc_line_length_FCFLAGS_save -]) -if test "x$ac_cv_fc_line_length" = xunknown; then - m4_default([$3], - [AC_MSG_ERROR([Fortran does not accept long source lines], 77)]) -else - if test "x$ac_cv_fc_line_length" != xnone; then - FCFLAGS="$FCFLAGS $ac_cv_fc_line_length" - fi - $2 -fi -AC_LANG_POP([Fortran])dnl -])# AC_FC_LINE_LENGTH - - -# AC_FC_CHECK_BOUNDS([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# ---------------------------------------------------------------------- -# Look for a compiler flag to turn on array bounds checking for the -# Fortran (FC) compiler, and adds it to FCFLAGS. Call -# ACTION-IF-SUCCESS (defaults to nothing) if successful (i.e. can -# compile code using new extension) and ACTION-IF-FAILURE (defaults to -# failing with an error message) if not. (Defined via DEFUN_ONCE to -# prevent flag from being added to FCFLAGS multiple times.) -# -# The known flags are: -# -fcheck=all, -fbounds-check: gfortran -# -fbounds-check: g77, g95 -# -CB, -check bounds: Intel compiler (icc, ecc, ifort) -# -C: Sun/Oracle compiler (f95) -# -C, -qcheck: IBM compiler (xlf) -# -Mbounds: Portland Group compiler -# -C ,-Mbounds: Cray -# -C, -check_bounds: SGI compiler -# -check_bounds, +check=all: HP Fortran -# -C, -Rb -Rc: Absoft (-Rb: array boundaries, -Rc: array conformance) -# --chk e,s -chk (e,s): Lahey -# -C -C=all: NAGWare -# -C, -ffortran-bounds-check: PathScale pathf90 -# -C: f2c -# -BOunds: Open Watcom -AC_DEFUN_ONCE([AC_FC_CHECK_BOUNDS], -[AC_LANG_PUSH([Fortran])dnl -AC_CACHE_CHECK([for Fortran flag to enable array-bounds checking], - [ac_cv_fc_check_bounds], -[ac_cv_fc_check_bounds=unknown -ac_fc_check_bounds_FCFLAGS_save=$FCFLAGS -for ac_flag in -fcheck=bounds -fbounds-check -check_bounds -Mbounds -qcheck \ - '-check bounds' +check=all --check '-Rb -Rc' -CB -C=all -C \ - -ffortran-bounds-check "--chk e,s" "-chk e -chk s" -bounds -do - FCFLAGS="$ac_fc_check_bounds_FCFLAGS_save $ac_flag" - # We should be able to link a correct program. - AC_LINK_IFELSE([AC_LANG_PROGRAM([], [])], - [AC_LINK_IFELSE([[ - subroutine sub(a) - integer a(:) - a(8) = 0 - end subroutine - - program main - integer a(1:7) - interface - subroutine sub(a) - integer a(:) - end subroutine - end interface - - call sub(a) - end program]], - [# If we can run the program, require failure at run time. - # In cross-compiling mode, we rely on the compiler not accepting - # unknown options. - AS_IF([test "$cross_compiling" = yes], - [ac_cv_fc_check_bounds=$ac_flag; break], - [AS_IF([_AC_DO_TOKENS(./conftest$ac_exeext)], - [], - [ac_cv_fc_check_bounds=$ac_flag; break])])])]) -done -rm -f conftest$ac_exeext conftest.err conftest.$ac_objext conftest.$ac_ext -FCFLAGS=$ac_fc_check_bounds_FCFLAGS_save -]) -if test "x$ac_cv_fc_check_bounds" = xunknown; then - m4_default([$2], - [AC_MSG_ERROR([no Fortran flag for bounds checking found], 77)]) -else - if test "x$ac_cv_fc_check_bounds" != xnone; then - FCFLAGS="$FCFLAGS $ac_cv_fc_check_bounds" - fi - $1 -fi -AC_LANG_POP([Fortran])dnl -])# AC_FC_CHECK_BOUNDS - - -# _AC_FC_IMPLICIT_NONE([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# ------------------------------------------------------------------------ -# Look for a flag to disallow implicit declarations, and add it to FCFLAGS. -# Call ACTION-IF-SUCCESS (defaults to nothing) if successful and -# ACTION-IF-FAILURE (defaults to failing with an error message) if not. -# -# Known flags: -# GNU gfortran, g95: -fimplicit-none, g77: -Wimplicit -# Intel: -u, -implicitnone; might also need '-warn errors' to turn into error. -# Sun/Oracle: -u -# HP: +implicit_none -# IBM: -u, -qundef -# SGI: -u -# Compaq: -u, -warn declarations -# NAGWare: -u -# Lahey: -in, --in, -AT -# Cray: -Mdclchk -e I -# PGI: -Mcdlchk -# f2c: -u -AC_DEFUN([_AC_FC_IMPLICIT_NONE], -[_AC_FORTRAN_ASSERT()dnl -AC_CACHE_CHECK([for flag to disallow _AC_LANG implicit declarations], - [ac_cv_[]_AC_LANG_ABBREV[]_implicit_none], -[ac_cv_[]_AC_LANG_ABBREV[]_implicit_none=unknown -ac_fc_implicit_none_[]_AC_LANG_PREFIX[]FLAGS_save=$[]_AC_LANG_PREFIX[]FLAGS -for ac_flag in none -fimplicit-none -u -Wimplicit -implicitnone +implicit_none \ - -qundef "-warn declarations" -in --in -AT "-e I" -Mdclchk \ - "-u -warn errors" -do - if test "x$ac_flag" != xnone; then - _AC_LANG_PREFIX[]FLAGS="$ac_fc_implicit_none_[]_AC_LANG_PREFIX[]FLAGS_save $ac_flag" - fi - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], - [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [[ - i = 0 - print *, i]])], - [], - [ac_cv_[]_AC_LANG_ABBREV[]_implicit_none=$ac_flag; break])]) -done -rm -f conftest.err conftest.$ac_objext conftest.$ac_ext -_AC_LANG_PREFIX[]FLAGS=$ac_fc_implicit_none_[]_AC_LANG_PREFIX[]FLAGS_save -]) -if test "x$ac_cv_[]_AC_LANG_ABBREV[]_implicit_none" = xunknown; then - m4_default([$3], - [AC_MSG_ERROR([no Fortran flag to disallow implicit declarations found], 77)]) -else - if test "x$ac_cv_[]_AC_LANG_ABBREV[]_implicit_none" != xnone; then - _AC_LANG_PREFIX[]FLAGS="$_AC_LANG_PREFIX[]FLAGS $ac_cv_[]_AC_LANG_ABBREV[]_implicit_none" - fi - $2 -fi -])# _AC_FC_IMPLICIT_NONE - - -# AC_F77_IMPLICIT_NONE([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# ------------------------------------------------------------------------ -AC_DEFUN([AC_F77_IMPLICIT_NONE], -[AC_LANG_PUSH([Fortran 77])dnl -_AC_FC_IMPLICIT_NONE($@) -AC_LANG_POP([Fortran 77])dnl -])# AC_F77_IMPLICIT_NONE - - -# AC_FC_IMPLICIT_NONE([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# ----------------------------------------------------------------------- -AC_DEFUN([AC_FC_IMPLICIT_NONE], -[AC_LANG_PUSH([Fortran])dnl -_AC_FC_IMPLICIT_NONE($@) -AC_LANG_POP([Fortran])dnl -])# AC_FC_IMPLICIT_NONE - - -# AC_FC_MODULE_EXTENSION -# ---------------------- -# Find the Fortran 90 module file extension. The module extension is stored -# in the variable FC_MODEXT and empty if it cannot be determined. The result -# or "unknown" is cached in the cache variable ac_cv_fc_module_ext. -AC_DEFUN([AC_FC_MODULE_EXTENSION], -[AC_CACHE_CHECK([Fortran 90 module extension], [ac_cv_fc_module_ext], -[AC_LANG_PUSH(Fortran) -mkdir conftest.dir -cd conftest.dir -ac_cv_fc_module_ext=unknown -AC_COMPILE_IFELSE([[ - module conftest_module - contains - subroutine conftest_routine - write(*,'(a)') 'gotcha!' - end subroutine - end module]], - [ac_cv_fc_module_ext=`ls | sed -n 's,conftest_module\.,,p'` - if test x$ac_cv_fc_module_ext = x; then -dnl Some F90 compilers use upper case characters for the module file name. - ac_cv_fc_module_ext=`ls | sed -n 's,CONFTEST_MODULE\.,,p'` - fi]) -cd .. -rm -rf conftest.dir -AC_LANG_POP(Fortran) -]) -FC_MODEXT=$ac_cv_fc_module_ext -if test "$FC_MODEXT" = unknown; then - FC_MODEXT= -fi -AC_SUBST([FC_MODEXT])dnl -]) - - -# AC_FC_MODULE_FLAG([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# --------------------------------------------------------------------- -# Find a flag to include Fortran 90 modules from another directory. -# If successful, run ACTION-IF-SUCCESS (defaults to nothing), otherwise -# run ACTION-IF-FAILURE (defaults to failing with an error message). -# The module flag is cached in the ac_cv_fc_module_flag variable. -# It may contain significant trailing whitespace. -# -# Known flags: -# gfortran: -Idir, -I dir (-M dir, -Mdir (deprecated), -Jdir for writing) -# g95: -I dir (-fmod=dir for writing) -# SUN: -Mdir, -M dir (-moddir=dir for writing; -# -Idir for includes is also searched) -# HP: -Idir, -I dir (+moddir=dir for writing) -# IBM: -Idir (-qmoddir=dir for writing) -# Intel: -Idir -I dir (-mod dir for writing) -# Absoft: -pdir -# Lahey: -Idir (-Mdir or -mod dir for writing) -# Cray: -module dir, -p dir (-J dir for writing) -# -e m is needed to enable writing .mod files at all -# Compaq: -Idir -# NAGWare: -I dir -# PathScale: -I dir (but -module dir is looked at first) -# Portland: -module dir (first -module also names dir for writing) -# Fujitsu: -Am -Idir (-Mdir for writing is searched first, then '.', then -I) -# (-Am indicates how module information is saved) -AC_DEFUN([AC_FC_MODULE_FLAG],[ -AC_CACHE_CHECK([Fortran 90 module inclusion flag], [ac_cv_fc_module_flag], -[AC_LANG_PUSH([Fortran]) -ac_cv_fc_module_flag=unknown -mkdir conftest.dir -cd conftest.dir -AC_COMPILE_IFELSE([[ - module conftest_module - contains - subroutine conftest_routine - write(*,'(a)') 'gotcha!' - end subroutine - end module]], - # For Lahey -M will also write module and object files to that directory - # make it read-only so that lahey fails over to -I - [chmod -w . - cd .. - ac_fc_module_flag_FCFLAGS_save=$FCFLAGS - # Flag ordering is significant for gfortran and Sun. - for ac_flag in -M -I '-I ' '-M ' -p '-mod ' '-module ' '-Am -I'; do - # Add the flag twice to prevent matching an output flag. - FCFLAGS="$ac_fc_module_flag_FCFLAGS_save ${ac_flag}conftest.dir ${ac_flag}conftest.dir" - AC_COMPILE_IFELSE([[ - module conftest_main - use conftest_module - contains - subroutine conftest - call conftest_routine - end subroutine - end module]], - [ac_cv_fc_module_flag="$ac_flag"]) - if test "$ac_cv_fc_module_flag" != unknown; then - break - fi - done - FCFLAGS=$ac_fc_module_flag_FCFLAGS_save -]) -chmod +w conftest.dir -rm -rf conftest.dir -AC_LANG_POP([Fortran]) -]) -if test "$ac_cv_fc_module_flag" != unknown; then - FC_MODINC=$ac_cv_fc_module_flag - $1 -else - FC_MODINC= - m4_default([$2], - [AC_MSG_ERROR([unable to find compiler flag for module search path])]) -fi -AC_SUBST([FC_MODINC]) -# Ensure trailing whitespace is preserved in a Makefile. -AC_SUBST([ac_empty], [""]) -AC_CONFIG_COMMANDS_PRE([case $FC_MODINC in #( - *\ ) FC_MODINC=$FC_MODINC'${ac_empty}' ;; -esac])dnl -]) - - -# AC_FC_MODULE_OUTPUT_FLAG([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = FAILURE]) -# ---------------------------------------------------------------------------- -# Find a flag to write Fortran 90 module information to another directory. -# If successful, run ACTION-IF-SUCCESS (defaults to nothing), otherwise -# run ACTION-IF-FAILURE (defaults to failing with an error message). -# The module flag is cached in the ac_cv_fc_module_output_flag variable. -# It may contain significant trailing whitespace. -# -# For known flags, see the documentation of AC_FC_MODULE_FLAG above. -AC_DEFUN([AC_FC_MODULE_OUTPUT_FLAG],[ -AC_CACHE_CHECK([Fortran 90 module output flag], [ac_cv_fc_module_output_flag], -[AC_LANG_PUSH([Fortran]) -mkdir conftest.dir conftest.dir/sub -cd conftest.dir -ac_cv_fc_module_output_flag=unknown -ac_fc_module_output_flag_FCFLAGS_save=$FCFLAGS -# Flag ordering is significant: put flags late which some compilers use -# for the search path. -for ac_flag in -J '-J ' -fmod= -moddir= +moddir= -qmoddir= '-mod ' \ - '-module ' -M '-Am -M' '-e m -J '; do - FCFLAGS="$ac_fc_module_output_flag_FCFLAGS_save ${ac_flag}sub" - AC_COMPILE_IFELSE([[ - module conftest_module - contains - subroutine conftest_routine - write(*,'(a)') 'gotcha!' - end subroutine - end module]], - [cd sub - AC_COMPILE_IFELSE([[ - program main - use conftest_module - call conftest_routine - end program]], - [ac_cv_fc_module_output_flag="$ac_flag"]) - cd .. - if test "$ac_cv_fc_module_output_flag" != unknown; then - break - fi]) -done -FCFLAGS=$ac_fc_module_output_flag_FCFLAGS_save -cd .. -rm -rf conftest.dir -AC_LANG_POP([Fortran]) -]) -if test "$ac_cv_fc_module_output_flag" != unknown; then - FC_MODOUT=$ac_cv_fc_module_output_flag - $1 -else - FC_MODOUT= - m4_default([$2], - [AC_MSG_ERROR([unable to find compiler flag to write module information to])]) -fi -AC_SUBST([FC_MODOUT]) -# Ensure trailing whitespace is preserved in a Makefile. -AC_SUBST([ac_empty], [""]) -AC_CONFIG_COMMANDS_PRE([case $FC_MODOUT in #( - *\ ) FC_MODOUT=$FC_MODOUT'${ac_empty}' ;; -esac])dnl -]) - -# _AC_FC_LIBRARY_LDFLAGS -# ---------------------- -# -# Determine the linker flags (e.g. "-L" and "-l") for the Fortran -# intrinsic and runtime libraries that are required to successfully -# link a Fortran program or shared library. The output variable -# FLIBS/FCLIBS is set to these flags. -# -# This macro is intended to be used in those situations when it is -# necessary to mix, e.g. C++ and Fortran, source code into a single -# program or shared library. -# -# For example, if object files from a C++ and Fortran compiler must -# be linked together, then the C++ compiler/linker must be used for -# linking (since special C++-ish things need to happen at link time -# like calling global constructors, instantiating templates, enabling -# exception support, etc.). -# -# However, the Fortran intrinsic and runtime libraries must be -# linked in as well, but the C++ compiler/linker doesn't know how to -# add these Fortran libraries. Hence, the macro -# "AC_F77_LIBRARY_LDFLAGS" was created to determine these Fortran -# libraries. -# -# This macro was packaged in its current form by Matthew D. Langston. -# However, nearly all of this macro came from the "OCTAVE_FLIBS" macro -# in "octave-2.0.13/aclocal.m4", and full credit should go to John -# W. Eaton for writing this extremely useful macro. Thank you John. -AC_DEFUN([_AC_FC_LIBRARY_LDFLAGS], -[_AC_FORTRAN_ASSERT()dnl -_AC_PROG_FC_V -AC_CACHE_CHECK([for _AC_LANG libraries of $[]_AC_FC[]], ac_cv_[]_AC_LANG_ABBREV[]_libs, -[if test "x$[]_AC_LANG_PREFIX[]LIBS" != "x"; then - ac_cv_[]_AC_LANG_ABBREV[]_libs="$[]_AC_LANG_PREFIX[]LIBS" # Let the user override the test. -else - -_AC_PROG_FC_V_OUTPUT - -ac_cv_[]_AC_LANG_ABBREV[]_libs= - -# Save positional arguments (if any) -ac_save_positional="$[@]" - -set X $ac_[]_AC_LANG_ABBREV[]_v_output -while test $[@%:@] != 1; do - shift - ac_arg=$[1] - case $ac_arg in - [[\\/]]*.a | ?:[[\\/]]*.a) - _AC_LIST_MEMBER_IF($ac_arg, $ac_cv_[]_AC_LANG_ABBREV[]_libs, , - ac_cv_[]_AC_LANG_ABBREV[]_libs="$ac_cv_[]_AC_LANG_ABBREV[]_libs $ac_arg") - ;; - -bI:*) - _AC_LIST_MEMBER_IF($ac_arg, $ac_cv_[]_AC_LANG_ABBREV[]_libs, , - [_AC_LINKER_OPTION([$ac_arg], ac_cv_[]_AC_LANG_ABBREV[]_libs)]) - ;; - # Ignore these flags. - -lang* | -lcrt*.o | -lc | -lgcc* | -lSystem | -libmil | -little \ - |-LANG:=* | -LIST:* | -LNO:* | -link | -list | -lnuma ) - ;; - -lkernel32) - test x"$CYGWIN" != xyes && ac_cv_[]_AC_LANG_ABBREV[]_libs="$ac_cv_[]_AC_LANG_ABBREV[]_libs $ac_arg" - ;; - -[[LRuYz]]) - # These flags, when seen by themselves, take an argument. - # We remove the space between option and argument and re-iterate - # unless we find an empty arg or a new option (starting with -) - case $[2] in - "" | -*);; - *) - ac_arg="$ac_arg$[2]" - shift; shift - set X $ac_arg "$[@]" - ;; - esac - ;; - -YP,*) - for ac_j in `AS_ECHO(["$ac_arg"]) | sed -e 's/-YP,/-L/;s/:/ -L/g'`; do - _AC_LIST_MEMBER_IF($ac_j, $ac_cv_[]_AC_LANG_ABBREV[]_libs, , - [ac_arg="$ac_arg $ac_j" - ac_cv_[]_AC_LANG_ABBREV[]_libs="$ac_cv_[]_AC_LANG_ABBREV[]_libs $ac_j"]) - done - ;; - -[[lLR]]*) - _AC_LIST_MEMBER_IF($ac_arg, $ac_cv_[]_AC_LANG_ABBREV[]_libs, , - ac_cv_[]_AC_LANG_ABBREV[]_libs="$ac_cv_[]_AC_LANG_ABBREV[]_libs $ac_arg") - ;; - -zallextract*| -zdefaultextract) - ac_cv_[]_AC_LANG_ABBREV[]_libs="$ac_cv_[]_AC_LANG_ABBREV[]_libs $ac_arg" - ;; - # Ignore everything else. - esac -done -# restore positional arguments -set X $ac_save_positional; shift - -# We only consider "LD_RUN_PATH" on Solaris systems. If this is seen, -# then we insist that the "run path" must be an absolute path (i.e. it -# must begin with a "/"). -case `(uname -sr) 2>/dev/null` in - "SunOS 5"*) - ac_ld_run_path=`AS_ECHO(["$ac_[]_AC_LANG_ABBREV[]_v_output"]) | - sed -n 's,^.*LD_RUN_PATH *= *\(/[[^ ]]*\).*$,-R\1,p'` - test "x$ac_ld_run_path" != x && - _AC_LINKER_OPTION([$ac_ld_run_path], ac_cv_[]_AC_LANG_ABBREV[]_libs) - ;; -esac -fi # test "x$[]_AC_LANG_PREFIX[]LIBS" = "x" -]) -[]_AC_LANG_PREFIX[]LIBS="$ac_cv_[]_AC_LANG_ABBREV[]_libs" -AC_SUBST([]_AC_LANG_PREFIX[]LIBS) -])# _AC_FC_LIBRARY_LDFLAGS - - -# AC_F77_LIBRARY_LDFLAGS -# ---------------------- -AC_DEFUN([AC_F77_LIBRARY_LDFLAGS], -[AC_REQUIRE([AC_PROG_F77])dnl -AC_LANG_PUSH(Fortran 77)dnl -_AC_FC_LIBRARY_LDFLAGS -AC_LANG_POP(Fortran 77)dnl -])# AC_F77_LIBRARY_LDFLAGS - - -# AC_FC_LIBRARY_LDFLAGS -# --------------------- -AC_DEFUN([AC_FC_LIBRARY_LDFLAGS], -[AC_REQUIRE([AC_PROG_FC])dnl -AC_LANG_PUSH(Fortran)dnl -_AC_FC_LIBRARY_LDFLAGS -AC_LANG_POP(Fortran)dnl -])# AC_FC_LIBRARY_LDFLAGS diff --git a/src/externals/mct/mct/Makefile b/src/externals/mct/mct/Makefile deleted file mode 100644 index 97aa186e77b..00000000000 --- a/src/externals/mct/mct/Makefile +++ /dev/null @@ -1,110 +0,0 @@ -.NOTPARALLEL: -SHELL = /bin/sh -VPATH=$(SRCDIR)/mct -# SOURCE FILES - -MODULE = mct - -SRCS_F90 = m_MCTWorld.F90 \ - m_AttrVect.F90 \ - m_GlobalMap.F90 \ - m_GlobalSegMap.F90 \ - m_GlobalSegMapComms.F90 \ - m_Accumulator.F90 \ - m_SparseMatrix.F90 \ - m_Navigator.F90 \ - m_AttrVectComms.F90 \ - m_AttrVectReduce.F90 \ - m_AccumulatorComms.F90 \ - m_GeneralGrid.F90 \ - m_GeneralGridComms.F90 \ - m_SpatialIntegral.F90 \ - m_SpatialIntegralV.F90 \ - m_MatAttrVectMul.F90 \ - m_Merge.F90 \ - m_GlobalToLocal.F90 \ - m_ExchangeMaps.F90 \ - m_ConvertMaps.F90 \ - m_SparseMatrixDecomp.F90 \ - m_SparseMatrixToMaps.F90 \ - m_SparseMatrixComms.F90 \ - m_SparseMatrixPlus.F90 \ - m_Router.F90 \ - m_Rearranger.F90 \ - m_SPMDutils.F90 \ - m_Transfer.F90 - -OBJS_ALL = $(SRCS_F90:.F90=.o) - -# MACHINE AND COMPILER FLAGS - -include ../Makefile.conf - -# TARGETS - -all: lib$(MODULE).a - -lib$(MODULE).a: $(OBJS_ALL) - $(RM) $@ - $(AR) $@ $(OBJS_ALL) - $(RANLIB) $@ - -# ADDITIONAL FLAGS SPECIFIC FOR MCT COMPILATION - -MCTFLAGS = $(INCFLAG)$(MPEUPATH) - -# RULES - -.SUFFIXES: -.SUFFIXES: .F90 .o - -.F90.o: - $(FC) -c $(INCPATH) $(FPPDEFS) $(FCFLAGS) $(MCTFLAGS) $< - - -clean: - ${RM} *.o *.mod lib$(MODULE).a - -install: all - $(MKINSTALLDIRS) $(libdir) $(includedir) - $(INSTALL) lib$(MODULE).a -m 644 $(libdir) - @for modfile in *.mod; do \ - echo $(INSTALL) $$modfile -m 644 $(includedir); \ - $(INSTALL) $$modfile -m 644 $(includedir); \ - done - -# DEPENDENCIES - -$(OBJS_ALL): $(MPEUPATH)/libmpeu.a - -m_AttrVect.o: -m_Accumulator.o: m_AttrVect.o -m_GlobalMap.o: -m_GlobalSegMap.o: -m_GlobalSegMapComms.o: m_GlobalSegMap.o -m_Navigator.o: -m_SPMDutils.o: -m_AttrVectComms.o: m_AttrVect.o m_GlobalMap.o -m_AttrVectReduce.o: m_AttrVect.o -m_AccumulatorComms.o: m_AttrVect.o m_GlobalMap.o m_AttrVectComms.o -m_SparseMatrix.o: m_AttrVect.o m_GlobalMap.o m_AttrVectComms.o -m_GeneralGrid.o: m_AttrVect.o -m_GeneralGridComms.o: m_AttrVect.o m_GeneralGrid.o m_AttrVectComms.o m_GlobalMap.o m_GlobalSegMap.o -m_MatAttrVectMul.o: m_AttrVect.o m_SparseMatrix.o m_GlobalMap.o m_GlobalSegMap.o m_SparseMatrixPlus.o m_Rearranger.o -m_Merge.o: m_AttrVect.o m_GeneralGrid.o -m_Router.o: m_GlobalToLocal.o m_MCTWorld.o m_GlobalSegMap.o m_ExchangeMaps.o -m_Rearranger.o: m_Router.o m_MCTWorld.o m_GlobalSegMap.o m_AttrVect.o m_SPMDutils.o -m_GlobalToLocal.o: m_GlobalSegMap.o -m_ExchangeMaps.o: m_GlobalMap.o m_GlobalSegMap.o m_MCTWorld.o m_ConvertMaps.o -m_ConvertMaps.o: m_GlobalMap.o m_GlobalSegMap.o m_MCTWorld.o -m_SparseMatrixDecomp.o: m_SparseMatrix.o m_GlobalSegMap.o -m_SparseMatrixToMaps.o: m_SparseMatrix.o m_GlobalSegMap.o -m_SparseMatrixComms.o: m_SparseMatrix.o m_SparseMatrixDecomp.o m_GlobalSegMap.o m_AttrVectComms.o -accumulate.o: m_AttrVect.o m_Accumulator.o -m_SpatialIntegral.o: m_SpatialIntegralV.o m_GeneralGrid.o m_AttrVect.o m_AttrVectReduce.o -m_SpatialIntegralV.o: m_AttrVect.o m_AttrVectReduce.o -m_Transfer.o: m_AttrVect.o m_Router.o m_MCTWorld.o -m_SparseMatrixPlus.o: m_GlobalSegMap.o m_Rearranger.o m_SparseMatrix.o m_SparseMatrixComms.o m_SparseMatrixToMaps.o m_GlobalToLocal.o - - - diff --git a/src/externals/mct/mct/README b/src/externals/mct/mct/README deleted file mode 100644 index 139553c6c2d..00000000000 --- a/src/externals/mct/mct/README +++ /dev/null @@ -1,39 +0,0 @@ -###################################################################### - - -- Mathematics + Computer Science Div. / Argonne National Laboratory - - Model Coupling Toolkit (MCT) - - Jay Larson - Robert Jacob - Everest Ong - - For more information, see http://www.mcs.anl.gov/mct - -###################################################################### -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- - -This directory contains the basic MCT source code. - -MCT distribution contents: -MCT/ -MCT/COPYRIGHT -MCT/doc/ -MCT/examples/ -MCT/mct/ <- You are here -MCT/mpeu/ -MCT/protex/ - -A complete distribution of MCT can be obtained from http://www.mcs.anl.gov/mct. - ---------------------------------------------------- -Build instructions: -In the top level, type "make" to build mct and mpeu. - -If ./configure was already run and mpeu was already built, -you can type "make" in this directory. - ---------------------------------------------------- diff --git a/src/externals/mct/mct/m_Accumulator.F90 b/src/externals/mct/mct/m_Accumulator.F90 deleted file mode 100644 index c7b1e29054d..00000000000 --- a/src/externals/mct/mct/m_Accumulator.F90 +++ /dev/null @@ -1,2471 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_Accumulator - Time Averaging/Accumlation Buffer -! -! !DESCRIPTION: -! -! An {\em accumulator} is a data class used for computing running sums -! and/or time averages of {\tt AttrVect} class data. -! The period of time over which data are accumulated/averaged is the -! {\em accumulation cycle}, which is defined by the total number -! of accumulation steps (the component {\tt Accumulator\%num\_steps}). When -! the accumulation routine {\tt accumulate\_} is invoked, the number -! of accumulation cycle steps (the component -! {\tt Accumulator\%steps\_done})is incremented, and compared with -! the number of steps in the accumulation cycle to determine if the -! accumulation cycle has been completed. The accumulation buffers -! of the {\tt Accumulator} are stored in an {\tt AttrVect} (namely -! the component {\tt Accumulator\%data}), which allows the user to -! define the number of variables and their names at run-time. -! Finally, one can define for each field -! being accumulated the specific accumulation {\em action}. Currently, -! there are two options: Time Averaging and Time Summation. The -! user chooses the specific action by setting an integer action -! flag for each attribute being accumulated. The supported options -! are defined by the public data member constants {\tt MCT\_SUM} and -! {\tt MCT\_AVG}. -! \\ -! This module also supports a simple usage of accumulator where all -! the actions are SUM ({\tt inits\_} and {\tt initavs\_}) and the user -! must call {\tt average\_} to calculate the average from the current -! value of {\tt Accumulator\%steps\_done}. {\tt Accumulator\%num\_steps} -! is ignored in this case. -! -! !INTERFACE: - - module m_Accumulator -! -! !USES: -! - use m_List, only : List - use m_AttrVect, only : AttrVect - use m_realkinds,only : SP,DP,FP - - implicit none - - private ! except - -! !PUBLIC TYPES: - - public :: Accumulator ! The class data structure - - Type Accumulator -#ifdef SEQUENCE - sequence -#endif - integer :: num_steps ! total number of accumulation steps - integer :: steps_done ! number of accumulation steps performed - integer, pointer, dimension(:) :: iAction ! index of integer actions - integer, pointer, dimension(:) :: rAction ! index of real actions - type(AttrVect) :: data ! accumulated sum field storage - End Type Accumulator - -! !PUBLIC MEMBER FUNCTIONS: -! - public :: init ! creation method - public :: initp ! partial creation method (MCT USE ONLY) - public :: clean ! destruction method - public :: initialized ! check if initialized - public :: lsize ! local length of the data arrays - public :: NumSteps ! number of steps in a cycle - public :: StepsDone ! number of steps completed in the - ! current cycle - public :: nIAttr ! number of integer fields - public :: nRAttr ! number of real fields - public :: indexIA ! index the integer fields - public :: indexRA ! index the real fields - public :: getIList ! Return tag from INTEGER - ! attribute list - public :: getRList ! Return tag from REAL attribute - ! list - public :: exportIAttr ! Return INTEGER attribute as a vector - public :: exportRAttr ! Return REAL attribute as a vector - public :: importIAttr ! Insert INTEGER vector as attribute - public :: importRAttr ! Insert REAL vector as attribute - public :: zero ! Clear an accumulator - public :: SharedAttrIndexList ! Returns the number of shared - ! attributes, and lists of the - ! respective locations of these - ! shared attributes - public :: accumulate ! Add AttrVect data into an Accumulator - public :: average ! Calculate an average in an Accumulator - -! Definition of interfaces for the methods for the Accumulator: - - interface init ; module procedure & - init_, & - inits_, & - initv_, & - initavs_ - end interface - interface initp ; module procedure initp_ ; end interface - interface clean ; module procedure clean_ ; end interface - interface initialized; module procedure initialized_ ; end interface - interface lsize ; module procedure lsize_ ; end interface - interface NumSteps ; module procedure NumSteps_ ; end interface - interface StepsDone ; module procedure StepsDone_ ; end interface - interface nIAttr ; module procedure nIAttr_ ; end interface - interface nRAttr ; module procedure nRAttr_ ; end interface - interface indexIA; module procedure indexIA_; end interface - interface indexRA; module procedure indexRA_; end interface - interface getIList; module procedure getIList_; end interface - interface getRList; module procedure getRList_; end interface - interface exportIAttr ; module procedure exportIAttr_ ; end interface - interface exportRAttr ; module procedure & - exportRAttrSP_, & - exportRAttrDP_ - end interface - interface importIAttr ; module procedure importIAttr_ ; end interface - interface importRAttr ; module procedure & - importRAttrSP_, & - importRAttrDP_ - end interface - interface zero ; module procedure zero_ ; end interface - interface SharedAttrIndexList ; module procedure & - aCaCSharedAttrIndexList_, & - aVaCSharedAttrIndexList_ - end interface - interface accumulate ; module procedure accumulate_ ; end interface - interface average ; module procedure average_ ; end interface - -! !PUBLIC DATA MEMBERS: -! - public :: MCT_SUM - public :: MCT_AVG - - integer, parameter :: MCT_SUM = 1 - integer, parameter :: MCT_AVG = 2 - -! !REVISION HISTORY: -! 7Sep00 - Jay Larson - initial prototype -! 7Feb01 - Jay Larson - Public interfaces -! to getIList() and getRList(). -! 9Aug01 - E.T. Ong - added initialized and -! initp_ routines. Added 'action' in Accumulator type. -! 6May02 - Jay Larson - added import/export -! routines. -! 26Aug02 - E.T. Ong - thourough code revision; -! no added routines -! 10Jan08 - R. Jacob - add simple accumulator -! use support and check documentation. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_Accumulator' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_ - Initialize an Accumulator and its Registers -! -! !DESCRIPTION: -! This routine allocates space for the output {\tt Accumulator} argument -! {\tt aC}, and at a minimum sets the number of time steps in an -! accumulation cycle (defined by the input {\tt INTEGER} argument -! {\tt num\_steps}), and the {\em length} of the {\tt Accumulator} -! register buffer (defined by the input {\tt INTEGER} argument {\tt -! lsize}). If one wishes to accumulate integer fields, the list of -! these fields is defined by the input {\tt CHARACTER} argument -! {\tt iList}, which is specified as a colon-delimited set of -! substrings (further information regarding this is available in the -! routine {\tt init\_()} of the module {\tt m\_AttrVect}). If no -! value of {\tt iList} is supplied, no integer attribute accumulation -! buffers will be allocated. The accumulation action on each of the -! integer attributes can be defined by supplying the input {\tt INTEGER} -! array argument {\tt iAction(:)} (whose length must correspond to the -! number of items in {\tt iList}). The values of the elements of -! {\tt iAction(:)} must be one of the values among the public data -! members defined in the declaration section of this module. If the -! integer attributes are to be accumulated (i.e. one supplies {\tt iList}), -! but {\tt iAction(:)} is not specified, the default action for all -! integer accumulation operations will be summation. The input arguments -! {\tt rList} and {\tt rAction(:)} define the names of the real variables -! to be accumulated and the accumulation action for each. The arguments -! {\tt rList} and {\tt rAction(:)} are related to each other the same -! way as {\tt iList} and {\tt iAction(:)}. Finally, the user can -! manually set the number of completed steps in an accumulation cycle -! (e.g. for restart purposes) by supplying a value for the optional -! input {\tt INTEGER} argument {\tt steps\_done}. -! -! !INTERFACE: - - subroutine init_(aC, iList, iAction, rList, rAction, lsize, & - num_steps,steps_done) -! -! !USES: -! - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - - use m_List, only: List - use m_List, only: List_nullify => nullify - use m_List, only: List_init => init - use m_List, only: List_nitem => nitem - use m_List, only: List_clean => clean - - use m_stdio - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), optional, intent(in) :: iList - integer, dimension(:), optional, intent(in) :: iAction - character(len=*), optional, intent(in) :: rList - integer, dimension(:), optional, intent(in) :: rAction - integer, intent(in) :: lsize - integer, intent(in) :: num_steps - integer, optional, intent(in) :: steps_done - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: aC - -! !REVISION HISTORY: -! 11Sep00 - Jay Larson - initial prototype -! 27JUL01 - E.T. Ong - added iAction, rAction, -! niAction, and nrAction to accumulator type. Also defined -! MCT_SUM and MCT_AVG for accumulator module. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::init_' - integer :: my_steps_done, nIAttr, nRAttr, ierr - integer, dimension(:), pointer :: my_iAction, my_rAction - logical :: status - type(List) :: temp_iList, temp_rList - - nullify(my_iAction) - nullify(my_rAction) - - call List_nullify(temp_iList) - call List_nullify(temp_rList) - - ! Argument consistency checks: - - ! 1) Terminate with error message if optional argument iAction (rAction) - ! is supplied but optional argument iList (rList) is not. - - if(present(iAction) .and. (.not. present(iList))) then - write(stderr,'(2a)') myname_,'::FATAL--Argument iAction supplied but action iList absent!' - call die(myname_) - endif - - if(present(rAction) .and. (.not. present(rList))) then - write(stderr,'(2a)') myname_,'::FATAL--Argument rAction supplied but action rList absent!' - call die(myname_) - endif - - ! 2) For iList and rList, generate temporary List data structures to facilitate - ! attribute counting. - - if(present(iList)) then ! create temp_iList - call List_init(temp_iList, iList) - nIAttr = List_nitem(temp_iList) - endif - - if(present(rList)) then ! create temp_iList - call List_init(temp_rList, rList) - nRAttr = List_nitem(temp_rList) - endif - - ! 3) Terminate with error message if optional arguments iAction (rAction) - ! and iList (rList) are supplied but the size of iAction (rAction) does not - ! match the number of items in iList (rList). - - if(present(iAction) .and. present(iList)) then - if(size(iAction) /= nIAttr) then - write(stderr,'(2a,2(a,i8))') myname_, & - '::FATAL--Size mismatch between iAction and iList! ', & - 'size(iAction)=',size(iAction),', ','No. items in iList=',nIAttr - call die(myname_) - endif - endif - - if(present(rAction) .and. present(rList)) then - if(size(rAction) /= nRAttr) then - write(stderr,'(2a,2(a,i8))') myname_, & - '::FATAL--Size mismatch between rAction and rList! ', & - 'size(rAction)=',size(rAction),', ','No items in rList=',nRAttr - call die(myname_) - endif - endif - - ! Initialize the Accumulator components. - - ! steps_done: - - if(present(steps_done)) then - my_steps_done = steps_done - else - my_steps_done = 0 - endif - - ! my_iAction (if iList is present) - - if(present(iList)) then ! set up my_iAction - - allocate(my_iAction(nIAttr), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - '::FATAL: allocate(my_iAction) failed with ierr=',ierr - call die(myname_) - endif - - if(present(iAction)) then ! use its values - my_iAction = iAction - else ! go with default summation by assigning value MCT_SUM - my_iAction = MCT_SUM - endif - - endif - - ! my_rAction (if rList is present) - - if(present(rList)) then ! set up my_rAction - - allocate(my_rAction(nRAttr), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - '::FATAL: allocate(my_rAction) failed with ierr=',ierr - call die(myname_) - endif - - if(present(rAction)) then ! use its values - my_rAction = rAction - else ! go with default summation by assigning value MCT_SUM - my_rAction = MCT_SUM - endif - - endif - - ! Build the Accumulator aC minus its data component: - - if(present(iList) .and. present(rList)) then ! Both REAL and INTEGER registers - - call initp_(aC,my_iAction,my_rAction,num_steps,my_steps_done) - - deallocate(my_iAction, my_rAction, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - '::FATAL: deallocate(my_iAction, my_rAction) failed with ierr=',ierr - call die(myname_) - endif - - else ! Either only REAL or only INTEGER registers in aC - - if(present(iList)) then ! Only INTEGER REGISTERS - - call initp_(aC=aC, iAction=my_iAction, num_steps=num_steps, & - steps_done=my_steps_done) - - deallocate(my_iAction, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - '::FATAL: deallocate(my_iAction) failed with ierr=',ierr - call die(myname_) - endif - - endif - - if(present(rList)) then ! Only REAL REGISTERS - - call initp_(aC=aC, rAction=my_rAction, num_steps=num_steps, & - steps_done=my_steps_done) - - deallocate(my_rAction, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - '::FATAL: deallocate(my_rAction) failed with ierr=',ierr - call die(myname_) - endif - - endif - - endif - - ! Initialize the AttrVect data component for aC: - - if(present(iList) .and. present(rList)) then - call AttrVect_init(aC%data,iList,rList,lsize) - else - if(present(iList)) then - call AttrVect_init(aV=aC%data,iList=iList,lsize=lsize) - endif - if(present(rList)) then - call AttrVect_init(aV=aC%data,rList=rList,lsize=lsize) - endif - endif - - call AttrVect_zero(aC%data) - - ! Clean up - - if(present(iList)) call List_clean(temp_iList) - if(present(rList)) call List_clean(temp_rList) - - ! Check that aC has been properly initialized - - status = initialized_(aC=aC,die_flag=.true.,source_name=myname_) - - end subroutine init_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: inits_ - Initialize a simple Accumulator and its Registers -! -! !DESCRIPTION: -! This routine allocates space for the output simple {\tt Accumulator} argument -! {\tt aC}, and sets the {\em length} of the {\tt Accumulator} -! register buffer (defined by the input {\tt INTEGER} argument {\tt -! lsize}). If one wishes to accumulate integer fields, the list of -! these fields is defined by the input {\tt CHARACTER} argument -! {\tt iList}, which is specified as a colon-delimited set of -! substrings (further information regarding this is available in the -! routine {\tt init\_()} of the module {\tt m\_AttrVect}). If no -! value of {\tt iList} is supplied, no integer attribute accumulation -! buffers will be allocated. The input argument {\tt rList} define -! the names of the real variables to be accumulated. Finally, the user can -! manually set the number of completed steps in an accumulation cycle -! (e.g. for restart purposes) by supplying a value for the optional -! input {\tt INTEGER} argument {\tt steps\_done}. -! Its default value is zero. -! -! In a simple accumulator, the action is always SUM. -! -! -! !INTERFACE: - - subroutine inits_(aC, iList, rList, lsize,steps_done) -! -! !USES: -! - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_nitem => nitem - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), optional, intent(in) :: iList - character(len=*), optional, intent(in) :: rList - integer, intent(in) :: lsize - integer, optional, intent(in) :: steps_done - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: aC - -! !REVISION HISTORY: -! 10Jan08 - R. Jacob - initial version based on init_ -! -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::inits_' - type(List) :: tmplist - integer :: my_steps_done,ier,i,actsize - logical :: status - - ! Initialize the Accumulator components. - - if(present(steps_done)) then - my_steps_done = steps_done - else - my_steps_done = 0 - endif - - aC%num_steps = -1 ! special value for simple aC - aC%steps_done = my_steps_done - - nullify(aC%iAction,aC%rAction) - - if(present(iList)) then - call List_init(tmplist,iList) - actsize=List_nitem(tmplist) - allocate(aC%iAction(actsize),stat=ier) - if(ier /= 0) call die(myname_,"iAction allocate",ier) - do i=1,lsize - aC%iAction=MCT_SUM - enddo - call List_clean(tmplist) - endif - - if(present(rList)) then - call List_init(tmplist,rList) - actsize=List_nitem(tmpList) - allocate(aC%rAction(actsize),stat=ier) - if(ier /= 0) call die(myname_,"rAction allocate",ier) - do i=1,lsize - aC%rAction=MCT_SUM - enddo - call List_clean(tmplist) - endif - - ! Initialize the AttrVect component aC: - - if(present(iList) .and. present(rList)) then - call AttrVect_init(aC%data,iList,rList,lsize) - else - if(present(iList)) then - call AttrVect_init(aV=aC%data,iList=iList,lsize=lsize) - endif - if(present(rList)) then - call AttrVect_init(aV=aC%data,rList=rList,lsize=lsize) - endif - endif - - call AttrVect_zero(aC%data) - - ! Check that aC has been properly initialized - - status = initialized_(aC=aC,die_flag=.true.,source_name=myname_) - - end subroutine inits_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initp_ - Initialize an Accumulator but not its Registers -! -! !DESCRIPTION: -! This routine is an internal service routine for use by the other -! initialization routines in this module. It sets up some---but not -! all---of the components of the output {\tt Accumulator} argument -! {\tt aC}. This routine can set up the following components of -! {\tt aC}: -! \begin{enumerate} -! \item {\tt aC\%iAction}, the array of accumlation actions for the -! integer attributes of {\tt aC} (if the input {\tt INTEGER} array -! argument {\tt iAction(:)} is supplied); -! \item {\tt aC\%rAction}, the array of accumlation actions for the -! real attributes of {\tt aC} (if the input {\tt INTEGER} array -! argument {\tt rAction(:)} is supplied); -! \item {\tt aC\%num\_steps}, the number of steps in an accumulation -! cycle (if the input {\tt INTEGER} argument {\tt num\_steps} is -! supplied); and -! \item {\tt aC\%steps\_done}, the number of steps completed so far -! in an accumulation cycle (if the input {\tt INTEGER} argument -! {\tt steps\_done} is supplied). -! \end{enumerate} -! -! !INTERFACE: - - subroutine initp_(aC, iAction, rAction, num_steps, steps_done) - -! -! !USES: -! - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - integer, dimension(:), optional, intent(in) :: iAction - integer, dimension(:), optional, intent(in) :: rAction - integer, intent(in) :: num_steps - integer, optional, intent(in) :: steps_done - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: aC - -! !REVISION HISTORY: -! 11Sep00 - Jay Larson - initial prototype -! 27JUL01 - E.T. Ong - added iAction, rAction, -! niAction, and nrAction to accumulator type. Also defined -! MCT_SUM and MCT_AVG for accumulator module. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initp_' - integer :: i,ier - integer :: steps_completed - - ! if the argument steps_done is not present, assume - ! the accumulator is starting at step zero, that is, - ! set steps_completed to zero - - steps_completed = 0 - if(present(steps_done)) steps_completed = steps_done - - ! Set the stepping info: - - aC%num_steps = num_steps - aC%steps_done = steps_completed - - - ! Assign iAction and niAction components - - nullify(aC%iAction,aC%rAction) - - if(present(iAction)) then - - if(size(iAction)>0) then - - allocate(aC%iAction(size(iAction)),stat=ier) - if(ier /= 0) call die(myname_,"iAction allocate",ier) - - do i=1,size(iAction) - aC%iAction(i) = iAction(i) - enddo - - endif - - endif - - if(present(rAction)) then - - if(size(rAction)>0) then - - allocate(aC%rAction(size(rAction)),stat=ier) - if(ier /= 0) call die(myname_,"iAction allocate",ier) - - do i=1,size(rAction) - aC%rAction(i) = rAction(i) - enddo - - endif - - endif - - end subroutine initp_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initv_ - Initialize One Accumulator using Another -! -! !DESCRIPTION: -! This routine takes the integer and real attribute information (including -! accumulation action settings for each attribute) from a previously -! initialized {\tt Accumulator} (the input argument {\tt bC}), and uses -! it to create another {\tt Accumulator} (the output argument {\tt aC}). -! In the absence of the {\tt INTEGER} input arguments {\tt lsize}, -! {\tt num\_steps}, and {\tt steps\_done}, {\tt aC} will inherit from -! {\tt bC} its length, the number of steps in its accumulation cycle, and -! the number of steps completed in its present accumulation cycle, -! respectively. -! -! !INTERFACE: - - subroutine initv_(aC, bC, lsize, num_steps, steps_done) -! -! !USES: -! - use m_List, only : List - use m_List, only : ListExportToChar => exportToChar - use m_List, only : List_copy => copy - use m_List, only : List_allocated => allocated - use m_List, only : List_clean => clean - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: bC - integer, optional, intent(in) :: lsize - integer, optional, intent(in) :: num_steps - integer, optional, intent(in) :: steps_done - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: aC - -! !REVISION HISTORY: -! 11Sep00 - Jay Larson - initial prototype -! 17May01 - R. Jacob - change string_get to -! list_get -! 27JUL01 - E.T. Ong - added iaction,raction -! compatibility -! 2Aug02 - J. Larson made argument num_steps -! optional -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initv_' - - type(List) :: temp_iList, temp_rList - integer :: myNumSteps, myStepsDone - integer :: aC_lsize - integer :: niActions, nrActions - integer, dimension(:), allocatable :: iActionArray, rActionArray - integer :: i,ier - logical :: status - - ! Check that bC has been initialized - - status = initialized(aC=bC,die_flag=.true.,source_name=myname_) - - ! If the argument steps_done is present, set myStepsDone - ! to this value; otherwise, set it to zero - - if(present(num_steps)) then ! set it manually - myNumSteps = num_steps - else ! inherit it from bC - myNumSteps = bC%num_steps - endif - - ! If the argument steps_done is present, set myStepsDone - ! to this value; otherwise, set it to zero - - if(present(steps_done)) then ! set it manually - myStepsDone= steps_done - else ! inherit it from bC - myStepsDone = bC%steps_done - endif - - ! If the argument lsize is present, - ! set aC_lsize to this value; otherwise, set it to the lsize of bC - - if(present(lsize)) then ! set it manually - aC_lsize = lsize - else ! inherit it from bC - aC_lsize = lsize_(bC) - endif - - ! Convert the two Lists to two Strings - - niActions = 0 - nrActions = 0 - - if(List_allocated(bC%data%iList)) then - call List_copy(temp_iList,bC%data%iList) - niActions = nIAttr_(bC) - endif - - if(List_allocated(bC%data%rList)) then - call List_copy(temp_rList,bC%data%rList) - nrActions = nRAttr_(bC) - endif - - ! Convert the pointers to arrays - - allocate(iActionArray(niActions),rActionArray(nrActions),stat=ier) - if(ier /= 0) call die(myname_,"iActionArray/rActionArray allocate",ier) - - if( niActions>0 ) then - do i=1,niActions - iActionArray(i)=bC%iAction(i) - enddo - endif - - if( nrActions>0 ) then - do i=1,nrActions - rActionArray(i)=bC%rAction(i) - enddo - endif - - ! Call init with present arguments - - if( (niActions>0) .and. (nrActions>0) ) then - - call init_(aC, iList=ListExportToChar(temp_iList), & - iAction=iActionArray, & - rList=ListExportToChar(temp_rList), & - rAction=rActionArray, & - lsize=aC_lsize, & - num_steps=myNumSteps, & - steps_done=myStepsDone) - - else - - if( niActions>0 ) then - - call init_(aC, iList=ListExportToChar(temp_iList), & - iAction=iActionArray, & - lsize=aC_lsize, & - num_steps=myNumSteps, & - steps_done=myStepsDone) - - endif - - if( nrActions>0 ) then - - call init_(aC, rList=ListExportToChar(temp_rList), & - rAction=rActionArray, & - lsize=aC_lsize, & - num_steps=myNumSteps, & - steps_done=myStepsDone) - endif - - endif - - if(List_allocated(bC%data%iList)) call List_clean(temp_iList) - if(List_allocated(bC%data%rList)) call List_clean(temp_rList) - - deallocate(iActionArray,rActionArray,stat=ier) - if(ier /= 0) call die(myname_,"iActionArray/rActionArray deallocate",ier) - - ! Check that aC as been properly initialized - - status = initialized(aC=aC,die_flag=.true.,source_name=myname_) - - end subroutine initv_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initavs_ - Initialize a simple Accumulator from an AttributeVector -! -! !DESCRIPTION: -! This routine takes the integer and real attribute information (including -! from a previously initialized {\tt AttributeVector} (the input argument {\tt aV}), and uses -! it to create a simple (sum only) {\tt Accumulator} (the output argument {\tt aC}). -! In the absence of the {\tt INTEGER} input argument {\tt lsize}, -! {\tt aC} will inherit from {\tt Av} its length. In the absence of the -! optional INTEGER argument, {\tt steps\_done} will be set to zero. -! -! !INTERFACE: - - subroutine initavs_(aC, aV, acsize, steps_done) -! -! !USES: -! - use m_AttrVect, only: AttrVect_lsize => lsize - use m_AttrVect, only: AttrVect_nIAttr => nIAttr - use m_AttrVect, only: AttrVect_nRAttr => nRAttr - use m_AttrVect, only: AttrVect_exIL2c => exportIListToChar - use m_AttrVect, only: AttrVect_exRL2c => exportRListToChar - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV - integer, optional, intent(in) :: acsize - integer, optional, intent(in) :: steps_done - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: aC - -! !REVISION HISTORY: -! 10Jan08 - R. Jacob - initial version based on initv_ -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initavs_' - - integer :: myNumSteps, myStepsDone - integer :: aC_lsize - integer :: i,ier - integer :: nIatt,nRatt - logical :: status - - - ! If the argument steps_done is present, set myStepsDone - ! to this value; otherwise, set it to zero - - if(present(steps_done)) then ! set it manually - myStepsDone= steps_done - else ! set it to zero - myStepsDone = 0 - endif - - ! If the argument acsize is present, - ! set aC_lsize to this value; otherwise, set it to the lsize of bC - - if(present(acsize)) then ! set it manually - aC_lsize = acsize - else ! inherit it from bC - aC_lsize = AttrVect_lsize(aV) - endif - nIatt=AttrVect_nIAttr(aV) - nRatt=AttrVect_nRAttr(aV) - - if((nIAtt>0) .and. (nRatt>0)) then - call inits_(aC,AttrVect_exIL2c(aV),AttrVect_exRL2c(aV), & - aC_lsize,myStepsDone) - else - if(nIatt>0) then - call inits_(aC,iList=AttrVect_exIL2c(aV),lsize=aC_lsize, & - steps_done=myStepsDone) - endif - if(nRatt>0) then - call inits_(aC,rList=AttrVect_exRL2c(aV),lsize=aC_lsize, & - steps_done=myStepsDone) - endif - endif - - - ! Check that aC as been properly initialized - - status = initialized(aC=aC,die_flag=.true.,source_name=myname_) - - end subroutine initavs_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Destroy an Accumulator -! -! !DESCRIPTION: -! This routine deallocates all allocated memory structures associated -! with the input/output {\tt Accumulator} argument {\tt aC}. The -! success (failure) of this operation is signified by the zero (non-zero) -! value of the optional {\tt INTEGER} output argument {\tt stat}. If -! {\tt clean\_()} is invoked with {\tt stat} present, it is the user's -! obligation to check this return code and act accordingly. If {\tt stat} -! is not supplied and any of the deallocation operations fail, this -! routine will terminate execution with an error statement. -! -! !INTERFACE: - - subroutine clean_(aC, stat) -! -! !USES: -! - use m_mall - use m_stdio - use m_die - use m_AttrVect, only : AttrVect_clean => clean - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(Accumulator), intent(inout) :: aC - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 11Sep00 - Jay Larson - initial prototype -! 27JUL01 - E.T. Ong - deallocate pointers iAction -! and rAction. -! 1Mar02 - E.T. Ong removed the die to prevent -! crashes and added stat argument. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - if(present(stat)) then - stat=0 - call AttrVect_clean(aC%data,stat) - else - call AttrVect_clean(aC%data) - endif - - if( associated(aC%iAction) ) then - - deallocate(aC%iAction,stat=ier) - - if(ier /= 0) then - if(present(stat)) then - stat=ier - else - call warn(myname_,'deallocate(aC%iAction)',ier) - endif - endif - - endif - - if( associated(aC%rAction) ) then - - deallocate(aC%rAction,stat=ier) - - if(ier /= 0) then - if(present(stat)) then - stat=ier - else - call warn(myname_,'deallocate(aC%rAction)',ier) - endif - endif - - endif - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initialized_ - Check if an Accumulator is Initialized -! -! !DESCRIPTION: -! This logical function returns a value of {\tt .TRUE.} if the input -! {\tt Accumulator} argument {\tt aC} is initialized correctly. The -! term "correctly initialized" means there is internal consistency -! between the number of integer and real attributes in {\tt aC}, and -! their respective data structures for accumulation registers, and -! accumulation action flags. The optional {\tt LOGICAL} input argument -! {\tt die\_flag} if present, can result in messages written to -! {\tt stderr}: -! \begin {itemize} -! \item if {\tt die\_flag} is true and {\tt aC} is correctly initialized, -! and -! \item if {\tt die\_flag} is false and {\tt aC} is incorrectly -! initialized. -! \end{itemize} -! Otherwise, inconsistencies in how {\tt aC} is set up will result in -! termination with an error message. -! The optional {\tt CHARACTER} input argument {\tt source\_name} allows -! the user to, in the event of error, generate traceback information -! (e.g., the name of the routine that invoked this one). -! -! !INTERFACE: - - logical function initialized_(aC, die_flag, source_name) -! -! !USES: -! - - use m_stdio - use m_die - use m_List, only : List - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : Attr_nIAttr => nIAttr - use m_AttrVect, only : Attr_nRAttr => nRAttr - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: aC - logical, optional, intent(in) :: die_flag - character(len=*), optional, intent(in) :: source_name - -! !REVISION HISTORY: -! 7AUG01 - E.T. Ong - initital prototype -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initialized_' - integer :: i - logical :: kill - logical :: aC_associated - - if(present(die_flag)) then - kill = .true. - else - kill = .false. - endif - - ! Initial value - initialized_ = .true. - aC_associated = .true. - - ! Check the association status of pointers in aC - - if( associated(aC%iAction) .or. associated(aC%rAction) ) then - aC_associated = .true. - else - initialized_ = .false. - aC_associated = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, myname_, & - ":: ERROR, Neither aC%iAction nor aC%rAction are associated" - call die(myname_,"Neither aC%iAction nor aC%rAction are associated") - endif - endif - - if( List_allocated(aC%data%iList) .or. List_allocated(aC%data%rList) ) then - aC_associated = .true. - else - initialized_ = .false. - aC_associated = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, myname_, & - ":: ERROR, Neither aC%data%iList nor aC%data%rList are allocated" - call die(myname_,"Neither aC%data%iList nor aC%data%rList are allocated") - endif - endif - - ! Make sure iAction and rAction sizes are greater than zero - - if(associated(aC%iAction)) then - if(size(aC%iAction)<=0) then - initialized_ = .false. - aC_associated = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, myname_, & - ":: ERROR, size(aC%iAction<=0), size = ", size(aC%iAction) - call die(myname_,"size(aC%iAction<=0), size = ", size(aC%iAction)) - endif - endif - endif - - if(associated(aC%rAction)) then - if(size(aC%rAction)<=0) then - initialized_ = .false. - aC_associated = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, myname_, & - ":: ERROR, size(aC%rAction<=0), size = ", size(aC%rAction) - call die(myname_,"size(aC%rAction<=0), size = ", size(aC%rAction)) - endif - endif - endif - - ! More sanity checking... - - if( aC_associated ) then - - if( (Attr_nIAttr(aC%data) == 0) .and. (Attr_nRAttr(aC%data) == 0) ) then - initialized_ = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, myname_, & - ":: ERROR, No attributes found in aC%data" - call die(myname_,"No attributes found in aC%data") - endif - endif - - if(Attr_nIAttr(aC%data) > 0) then - - if( size(aC%iAction) /= Attr_nIAttr(aC%data) ) then - initialized_ = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, myname_, & - ":: ERROR, size(aC%iAction) /= nIAttr(aC%data)" - call die(myname_,"size(aC%iAction) /= nIAttr(aC%data)") - endif - endif - - do i=1,Attr_nIAttr(aC%data) - if( (aC%iAction(i) /= MCT_SUM) .and. & - (aC%iAction(i) /= MCT_AVG) ) then - initialized_ = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, & - myname_, ":: ERROR, Invalid value found in aC%iAction" - call die(myname_,"Invalid value found in aC%iAction", & - aC%iAction(i)) - endif - endif - enddo - - endif ! if(Attr_nIAttr(aC%data) > 0) - - if(Attr_nRAttr(aC%data) > 0) then - - if( size(aC%rAction) /= Attr_nRAttr(aC%data) ) then - initialized_ = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, & - myname_, ":: ERROR, size(aC%rAction) /= nRAttr(aC%data)" - call die(myname_,"size(aC%rAction) /= nRAttr(aC%data)") - endif - endif - - do i=1,Attr_nRAttr(aC%data) - if( (aC%rAction(i) /= MCT_SUM) .and. & - (aC%rAction(i) /= MCT_AVG) ) then - initialized_ = .false. - if(kill) then - if(present(source_name)) write(stderr,*) source_name, & - myname_, ":: ERROR, Invalid value found in aC%rAction", & - aC%rAction(i) - call die(myname_,"Invalid value found in aC%rAction", & - aC%iAction(i)) - endif - endif - enddo - - endif ! if(Attr_nRAttr(aC%data) > 0) - - endif ! if (aC_associated) - - end function initialized_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: lsize_ - Length of an Accumulator -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the number of data points -! for which the input {\tt Accumulator} argument {\tt aC} is performing -! accumulation. This value corresponds to the length of the {\tt AttrVect} -! component {\tt aC\%data} that stores the accumulation registers. -! -! !INTERFACE: - - integer function lsize_(aC) -! -! !USES: -! - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: aC - -! !REVISION HISTORY: -! 12Sep00 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::lsize_' - - - ! The function AttrVect_lsize is called to return - ! its local size data - - lsize_=AttrVect_lsize(aC%data) - - end function lsize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: NumSteps_ - Number of Accumulation Cycle Time Steps -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the number of time steps in an -! accumulation cycle for the input {\tt Accumulator} argument {\tt aC}. -! -! !INTERFACE: - - integer function NumSteps_(aC) -! -! !USES: -! - use m_die, only : die - use m_stdio, only : stderr - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: aC - -! !REVISION HISTORY: -! 7Aug02 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::NumSteps_' - - integer :: myNumSteps - - - ! Retrieve the number of cycle steps from aC: - - myNumSteps = aC%num_steps - - if(myNumSteps <= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: FATAL--illegal number of steps in an accumulation cycle = ',& - myNumSteps - call die(myname_) - endif - - NumSteps_ = myNumSteps - - end function NumSteps_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: StepsDone_ - Number of Completed Steps in the Current Cycle -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the of time steps that have -! been completed in the current accumulation cycle for the input -! {\tt Accumulator} argument {\tt aC}. -! -! !INTERFACE: - - integer function StepsDone_(aC) -! -! !USES: -! - use m_die, only : die - use m_stdio, only : stderr - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: aC - -! !REVISION HISTORY: -! 7Aug02 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::StepsDone_' - - integer :: myStepsDone - - ! Retrieve the number of completed steps from aC: - - myStepsDone = aC%steps_done - - if(myStepsDone < 0) then - write(stderr,'(2a,i8)') myname_, & - ':: FATAL--illegal number of completed steps = ',& - myStepsDone - call die(myname_) - endif - - StepsDone_ = myStepsDone - - end function StepsDone_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nIAttr_ - Return the Number of INTEGER Attributes -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the number of integer -! attributes that are stored in the input {\tt Accumulator} argument -! {\tt aC}. This value is equal to the number of integer attributes -! in the {\tt AttrVect} component {\tt aC\%data} that stores the -! accumulation registers. -! -! !INTERFACE: - - integer function nIAttr_(aC) -! -! !USES: -! - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator),intent(in) :: aC - -! !REVISION HISTORY: -! 12Sep00 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nIAttr_' - - ! The function AttrVect_nIAttr is called to return the - ! number of integer fields - - nIAttr_=AttrVect_nIAttr(aC%data) - - end function nIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nRAttr_ - number of REAL fields stored in the Accumulator. -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the number of real -! attributes that are stored in the input {\tt Accumulator} argument -! {\tt aC}. This value is equal to the number of real attributes -! in the {\tt AttrVect} component {\tt aC\%data} that stores the -! accumulation registers. -! -! !INTERFACE: - - integer function nRAttr_(aC) -! -! !USES: -! - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator),intent(in) :: aC - -! !REVISION HISTORY: -! 12Sep00 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nRAttr_' - - ! The function AttrVect_nRAttr is called to return the - ! number of real fields - - nRAttr_=AttrVect_nRAttr(aC%data) - - end function nRAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getIList_ - Retrieve a Numbered INTEGER Attribute Name -! -! !DESCRIPTION: -! This routine returns as a {\tt String} (see the mpeu module -! {\tt m\_String} for information) the name of the {\tt ith} item in -! the integer registers of the {\tt Accumulator} argument {\tt aC}. -! -! !INTERFACE: - - subroutine getIList_(item, ith, aC) -! -! !USES: -! - use m_AttrVect, only : AttrVect_getIList => getIList - use m_String, only : String - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: ith - type(Accumulator), intent(in) :: aC - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: item - -! !REVISION HISTORY: -! 12Sep00 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::getIList_' - - call AttrVect_getIList(item,ith,aC%data) - - end subroutine getIList_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getRList_ - Retrieve a Numbered REAL Attribute Name -! -! !DESCRIPTION: -! This routine returns as a {\tt String} (see the mpeu module -! {\tt m\_String} for information) the name of the {\tt ith} item in -! the real registers of the {\tt Accumulator} argument {\tt aC}. -! -! !INTERFACE: - - subroutine getRList_(item, ith, aC) -! -! !USES: -! - use m_AttrVect, only : AttrVect_getRList => getRList - use m_String, only : String - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: ith - type(Accumulator),intent(in) :: aC - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: item - -! !REVISION HISTORY: -! 12Sep00 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::getRList_' - - call AttrVect_getRList(item,ith,aC%data) - - end subroutine getRList_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexIA_ - Index an INTEGER Attribute -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the index in the integer -! accumulation register buffer of the {\tt Accumulator} argument {\tt aC} -! the attribute named by the {\tt CHARACTER} argument {\tt item}. That -! is, all the accumulator running tallies for the attribute named -! {\tt item} reside in -!\begin{verbatim} -! aC%data%iAttr(indexIA_(aC,item),:). -!\end{verbatim} -! The user may request traceback information (e.g., the name of the -! routine from which this one is called) by providing values for either -! of the optional {\tt CHARACTER} arguments {\tt perrWith} or {\tt dieWith} -! In the event {\tt indexIA\_()} can not find {\tt item} in {\tt aC}, -! the routine behaves as follows: -! \begin{enumerate} -! \item if neither {\tt perrWith} nor {\tt dieWith} are present, -! {\tt indexIA\_()} returns a value of zero; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, an error -! message is written to {\tt stderr} incorporating user-supplied traceback -! information stored in the argument {\tt perrWith}; -! \item if {\tt dieWith} is present, execution terminates with an error -! message written to {\tt stderr} that incorporates user-supplied traceback -! information stored in the argument {\tt dieWith}. -! \end{enumerate} -! !INTERFACE: - - integer function indexIA_(aC, item, perrWith, dieWith) -! -! !USES: -! - use m_AttrVect, only : AttrVect_indexIA => indexIA - use m_die, only : die - use m_stdio,only : stderr - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: item - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !REVISION HISTORY: -! 14Sep00 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::indexIA_' - - indexIA_=AttrVect_indexIA(aC%data,item) - - if(indexIA_==0) then - if(.not.present(dieWith)) then - if(present(perrWith)) write(stderr,'(4a)') perrWith, & - '" indexIA_() error, not found "',trim(item),'"' - else - write(stderr,'(4a)') dieWith, & - '" indexIA_() error, not found "',trim(item),'"' - call die(dieWith) - endif - endif - - end function indexIA_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexRA_ - index the Accumulator real attribute list. -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the index in the real -! accumulation register buffer of the {\tt Accumulator} argument {\tt aC} -! the attribute named by the {\tt CHARACTER} argument {\tt item}. That -! is, all the accumulator running tallies for the attribute named -! {\tt item} reside in -!\begin{verbatim} -! aC%data%rAttr(indexRA_(aC,item),:). -!\end{verbatim} -! The user may request traceback information (e.g., the name of the -! routine from which this one is called) by providing values for either -! of the optional {\tt CHARACTER} arguments {\tt perrWith} or {\tt dieWith} -! In the event {\tt indexRA\_()} can not find {\tt item} in {\tt aC}, -! the routine behaves as follows: -! \begin{enumerate} -! \item if neither {\tt perrWith} nor {\tt dieWith} are present, -! {\tt indexRA\_()} returns a value of zero; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, an error -! message is written to {\tt stderr} incorporating user-supplied traceback -! information stored in the argument {\tt perrWith}; -! \item if {\tt dieWith} is present, execution terminates with an error -! message written to {\tt stderr} that incorporates user-supplied traceback -! information stored in the argument {\tt dieWith}. -! \end{enumerate} -! -! !INTERFACE: - - integer function indexRA_(aC, item, perrWith, dieWith) -! -! !USES: -! - use m_AttrVect, only : AttrVect_indexRA => indexRA - use m_die, only : die - use m_stdio,only : stderr - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: item - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !REVISION HISTORY: -! 14Sep00 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::indexRA_' - - indexRA_=AttrVect_indexRA(aC%data,item) - - if(indexRA_==0) then - if(.not.present(dieWith)) then - if(present(perrWith)) write(stderr,'(4a)') perrWith, & - '" indexRA_() error, not found "',trim(item),'"' - else - write(stderr,'(4a)') dieWith, & - '" indexRA_() error, not found "',trim(item),'"' - call die(dieWith) - endif - endif - - end function indexRA_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportIAttr_ - Export INTEGER Attribute to a Vector -! -! !DESCRIPTION: -! This routine extracts from the input {\tt Accumulator} argument -! {\tt aC} the integer attribute corresponding to the tag defined in -! the input {\tt CHARACTER} argument {\tt AttrTag}, and returns it in -! the {\tt INTEGER} output array {\tt outVect}, and its length in the -! output {\tt INTEGER} argument {\tt lsize}. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt Accumulator} {\tt List} component {\tt aC\%data\%iList}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt outVect} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt outVect}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) at the time this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt outVect}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportIAttr_(aC, AttrTag, outVect, lsize) -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_exportIAttr => exportIAttr - - implicit none - -! !INPUT PARAMETERS: - - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: AttrTag - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: - -! 6May02 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportIAttr_' - - ! Export the data (inheritance from AttrVect) - if(present(lsize)) then - call AttrVect_exportIAttr(aC%data, AttrTag, outVect, lsize) - else - call AttrVect_exportIAttr(aC%data, AttrTag, outVect) - endif - - end subroutine exportIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportRAttrSP_ - Export REAL Attribute to a Vector -! -! !DESCRIPTION: -! This routine extracts from the input {\tt Accumulator} argument -! {\tt aC} the real attribute corresponding to the tag defined in -! the input {\tt CHARACTER} argument {\tt AttrTag}, and returns it in -! the {\tt REAL} output array {\tt outVect}, and its length in the -! output {\tt INTEGER} argument {\tt lsize}. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt Accumulator} {\tt List} component {\tt aC\%data\%iList}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt outVect} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt outVect}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) at the time this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt outVect}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportRAttrSP_(aC, AttrTag, outVect, lsize) -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_exportRAttr => exportRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: AttrTag - -! !OUTPUT PARAMETERS: - - real(SP), dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: -! 6May02 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportRAttrSP_' - - ! Export the data (inheritance from AttrVect) - - if(present(lsize)) then - call AttrVect_exportRAttr(aC%data, AttrTag, outVect, lsize) - else - call AttrVect_exportRAttr(aC%data, AttrTag, outVect) - endif - - end subroutine exportRAttrSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: exportRAttrDP_ - Export REAL Attribute to a Vector -! -! !DESCRIPTION: -! Double precision version of exportRAttrSP_ -! -! !INTERFACE: - - subroutine exportRAttrDP_(aC, AttrTag, outVect, lsize) -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_exportRAttr => exportRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: AttrTag - -! !OUTPUT PARAMETERS: - - real(DP), dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: -! 6May02 - J.W. Larson - initial prototype. -! -! ______________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportRAttrDP_' - - ! Export the data (inheritance from AttrVect) - - if(present(lsize)) then - call AttrVect_exportRAttr(aC%data, AttrTag, outVect, lsize) - else - call AttrVect_exportRAttr(aC%data, AttrTag, outVect) - endif - - end subroutine exportRAttrDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importIAttr_ - Import INTEGER Attribute from a Vector -! -! !DESCRIPTION: -! This routine imports data provided in the input {\tt INTEGER} vector -! {\tt inVect} into the {\tt Accumulator} argument {\tt aC}, storing -! it as the integer attribute corresponding to the tag defined in -! the input {\tt CHARACTER} argument {\tt AttrTag}. The input -! {\tt INTEGER} argument {\tt lsize} is used to ensure there is -! sufficient space in the {\tt Accumulator} to store the data. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt Accumulator} {\tt List} component {\tt aC\%data\%rList}. -! -! !INTERFACE: - - subroutine importIAttr_(aC, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die - use m_stdio , only : stderr - - use m_AttrVect, only : AttrVect_importIAttr => importIAttr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - integer, dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(Accumulator), intent(inout) :: aC - -! !REVISION HISTORY: -! 6May02 - J.W. Larson - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importIAttr_' - - ! Argument Check: - - if(lsize > lsize_(aC)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(aC).', & - 'lsize = ',lsize,'lsize_(aC) = ',lsize_(ac) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importIAttr(aC%data, AttrTag, inVect, lsize) - - end subroutine importIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importRAttrSP_ - Import REAL Attribute from a Vector -! -! !DESCRIPTION: -! This routine imports data provided in the input {\tt REAL} vector -! {\tt inVect} into the {\tt Accumulator} argument {\tt aC}, storing -! it as the real attribute corresponding to the tag defined in -! the input {\tt CHARACTER} argument {\tt AttrTag}. The input -! {\tt INTEGER} argument {\tt lsize} is used to ensure there is -! sufficient space in the {\tt Accumulator} to store the data. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt Accumulator} {\tt List} component {\tt aC\%data\%rList}. -! -! !INTERFACE: - - subroutine importRAttrSP_(aC, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die - use m_stdio , only : stderr - - use m_AttrVect, only : AttrVect_importRAttr => importRAttr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - real(SP), dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(Accumulator), intent(inout) :: aC - -! !REVISION HISTORY: -! 6May02 - J.W. Larson - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importRAttrSP_' - - ! Argument Check: - - if(lsize > lsize_(aC)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(aC).', & - 'lsize = ',lsize,'lsize_(aC) = ',lsize_(ac) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importRAttr(aC%data, AttrTag, inVect, lsize) - - end subroutine importRAttrSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: importRAttrDP_ - Import REAL Attribute from a Vector -! -! !DESCRIPTION: -! Double precision version of importRAttrSP_ -! -! !INTERFACE: - - subroutine importRAttrDP_(aC, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die - use m_stdio , only : stderr - - use m_AttrVect, only : AttrVect_importRAttr => importRAttr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - real(DP), dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(Accumulator), intent(inout) :: aC - -! !REVISION HISTORY: -! 6May02 - J.W. Larson - initial prototype. -! ______________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importRAttrDP_' - - ! Argument Check: - - if(lsize > lsize_(aC)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(aC).', & - 'lsize = ',lsize,'lsize_(aC) = ',lsize_(ac) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importRAttr(aC%data, AttrTag, inVect, lsize) - - end subroutine importRAttrDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: zero_ - Zero an Accumulator -! -! !DESCRIPTION: -! This subroutine clears the the {\tt Accumulator} argument {\tt aC}. -! This is accomplished by setting the number of completed steps in the -! accumulation cycle to zero, and zeroing out all of the accumlation -! registers. -! -! !INTERFACE: - - subroutine zero_(aC) -! -! !USES: -! - use m_AttrVect, only : AttrVect_zero => zero - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(Accumulator), intent(inout) :: aC - -! !REVISION HISTORY: -! 7Aug02 - Jay Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::zero_' - - ! Set number of completed cycle steps to zero: - - aC%steps_done = 0 - - ! Zero out the accumulation registers: - - call AttrVect_zero(aC%data) - - end subroutine zero_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: aCaCSharedAttrIndexList_ - Cross-index Two Accumulators -! -! !DESCRIPTION: {\tt aCaCSharedAttrIndexList\_()} takes a pair of -! user-supplied {\tt Accumulator} variables {\tt aC1} and {\tt aC2}, -! and for choice of either {\tt REAL} or {\tt INTEGER} attributes (as -! specified literally in the input {\tt CHARACTER} argument {\tt attrib}) -! returns the number of shared attributes {\tt NumShared}, and arrays of -! indices {\tt Indices1} and {\tt Indices2} to their storage locations -! in {\tt aC1} and {\tt aC2}, respectively. -! -! {\bf N.B.:} This routine returns two allocated arrays---{\tt Indices1(:)} -! and {\tt Indices2(:)}---which must be deallocated once the user no longer -! needs them. Failure to do this will create a memory leak. -! -! !INTERFACE: - - subroutine aCaCSharedAttrIndexList_(aC1, aC2, attrib, NumShared, & - Indices1, Indices2) - -! -! !USES: -! - use m_stdio - use m_die, only : MP_perr_die, die, warn - - use m_List, only : GetSharedListIndices - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: aC1 - type(Accumulator), intent(in) :: aC2 - character*7, intent(in) :: attrib - -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: NumShared - integer,dimension(:), pointer :: Indices1 - integer,dimension(:), pointer :: Indices2 - -! !REVISION HISTORY: -! 7Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::aCaCSharedAttrIndexList_' - - integer :: ierr - - ! Based on the value of the argument attrib, pass the - ! appropriate pair of Lists for comparison... - - select case(trim(attrib)) - case('REAL','real') - call GetSharedListIndices(aC1%data%rList, aC2%data%rList, NumShared, & - Indices1, Indices2) - case('INTEGER','integer') - call GetSharedListIndices(aC1%data%iList, aC2%data%iList, NumShared, & - Indices1, Indices2) - case default - write(stderr,'(4a)') myname_,":: value of argument attrib=",attrib, & - " not recognized. Allowed values: REAL, real, INTEGER, integer" - ierr = 1 - call die(myname_, 'invalid value for attrib', ierr) - end select - - end subroutine aCaCSharedAttrIndexList_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: aVaCSharedAttrIndexList_ - Cross-index with an AttrVect -! -! !DESCRIPTION: {\tt aVaCSharedAttrIndexList\_()} a user-supplied -! {\tt AttrVect} variable {\tt aV} and an {\tt Accumulator} variable -! {\tt aC}, and for choice of either {\tt REAL} or {\tt INTEGER} -! attributes (as ! specified literally in the input {\tt CHARACTER} -! argument {\tt attrib}) returns the number of shared attributes -! {\tt NumShared}, and arrays of indices {\tt Indices1} and {\tt Indices2} -! to their storage locations in {\tt aV} and {\tt aC}, respectively. -! -! {\bf N.B.:} This routine returns two allocated arrays---{\tt Indices1(:)} -! and {\tt Indices2(:)}---which must be deallocated once the user no longer -! needs them. Failure to do this will create a memory leak. -! -! !INTERFACE: - - subroutine aVaCSharedAttrIndexList_(aV, aC, attrib, NumShared, & - Indices1, Indices2) - -! -! !USES: -! - use m_stdio - use m_die, only : MP_perr_die, die, warn - - use m_AttrVect, only : AttrVect - - use m_List, only : GetSharedListIndices - - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: attrib - -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: NumShared - integer,dimension(:), pointer :: Indices1 - integer,dimension(:), pointer :: Indices2 - -! !REVISION HISTORY: -! 7Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::aVaCSharedAttrIndexList_' - - integer :: ierr - - ! Based on the value of the argument attrib, pass the - ! appropriate pair of Lists for comparison... - - select case(trim(attrib)) - case('REAL','real') - call GetSharedListIndices(aV%rList, aC%data%rList, NumShared, & - Indices1, Indices2) - case('INTEGER','integer') - call GetSharedListIndices(aV%iList, aC%data%iList, NumShared, & - Indices1, Indices2) - case default - write(stderr,'(4a)') myname_,":: value of argument attrib=",attrib, & - " not recognized. Allowed values: REAL, real, INTEGER, integer" - ierr = 1 - call die(myname_, 'invalid value for attrib', ierr) - end select - - end subroutine aVaCSharedAttrIndexList_ - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: accumulate_--Acumulate from an AttrVect to an Accumulator. -! -! !DESCRIPTION: -! This routine performs time {\em accumlation} of data present in an -! MCT field data {\tt AttrVect} variable {\tt aV} and combines it with -! the running tallies stored in the MCT {\tt Accumulator} variable {\tt aC}. -! This routine automatically identifies which -! fields are held in common by {\tt aV} and {\tt aC} and uses the -! accumulation action information stored in {\tt aC} to decide how -! each field in {\tt aV} is to be combined into its corresponding -! running tally in {\tt aC}. The accumulation operations currently -! supported are: -! \begin {itemize} -! \item {\tt MCT\_SUM}: Add the current values in the {\tt Av} to the current values in {\tt Ac}. -! \item {\tt MCT\_AVG}: Same as {\tt MCT\_SUM} except when {\tt steps\_done} is equal -! to {\tt num\_steps} then perform one more sum and replaced with average. -! \end {itemize} -! -! This routine also automatically increments the counter in {\tt aC} -! signifying the number of steps completed in the accumulation cycle. -! -! NOTE: The user must reset (zero) the {\tt Accumulator} after the average -! has been formed or the next call to {\tt accumulate} will add to the average. -! -! !INTERFACE: - - subroutine accumulate_(aV, aC) - -! -! !USES: -! - use m_stdio, only : stdout,stderr - use m_die, only : die - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - use m_AttrVect, only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV ! Input AttrVect - -! !INPUT/OUTPUT PARAMETERS: -! - type(Accumulator), intent(inout) :: aC ! Output Accumulator - -! !REVISION HISTORY: -! 18Sep00 - J.W. Larson -- initial version. -! 7Feb01 - J.W. Larson -- General version. -! 10Jun01 - E.T. Ong -- fixed divide-by-zero problem in integer -! attribute accumulation. -! 27Jul01 - E.T. Ong -- removed action argument. -! Make compatible with new Accumulator type. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::accumulate_' - -! Overlapping attribute index number - integer :: num_indices - -! Overlapping attribute index storage arrays: - integer, dimension(:), pointer :: aCindices, aVindices - integer :: aCindex, aVindex - -! Error flag and loop indices - integer :: ierr, l, n - -! Averaging time-weighting factor: - real(FP) :: step_weight - integer :: num_steps - -! Character variable used as a data type flag: - character*7 :: data_flag - - ! Sanity check of arguments: - - if(lsize_(aC) /= AttrVect_lsize(aV)) then - write(stderr,'(2a,i8,a,i8)') myname_, & - ':: Mismatched Accumulator/AttrVect lengths. AttrVect_lsize(aV) = ',& - AttrVect_lsize(aV), 'lsize_(aC) = ',lsize_(aC) - call die(myname_) - endif - - if(aC%num_steps == 0) then - write(stderr,'(2a)') myname,':: FATAL--Zero steps in accumulation cycle.' - call die(myname_) - endif - - ! Set num_steps from aC: - - num_steps = aC%num_steps - - ! Accumulation of REAL attribute data: - - if( associated(aC%rAction) ) then ! if summing or avergaging reals... - - ! Accumulate only if fields are present - - data_flag = 'REAL' - call aVaCSharedAttrIndexList_(aV, aC, data_flag, num_indices, & - aVindices, aCindices) - - if(num_indices > 0) then - do n=1,num_indices - aVindex = aVindices(n) - aCindex = aCindices(n) - - ! Accumulate if the action is MCT_SUM or MCT_AVG - if( (aC%rAction(aCindex) == MCT_SUM).or. & - (aC%rAction(aCindex) == MCT_AVG) ) then - do l=1,AttrVect_lsize(aV) - aC%data%rAttr(aCindex,l) = aC%data%rAttr(aCindex,l) + & - aV%rAttr(aVindex,l) - end do - endif - end do - - deallocate(aVindices, aCindices, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Error in first deallocate(aVindices...), ierr = ',ierr - call die(myname_) - endif - - endif ! if(num_indices > 0) - - endif ! if( associated(aC%rAction) ) - - - ! Accumulation of INTEGER attribute data: - - if( associated(aC%iAction) ) then ! if summing or avergaging ints... - - ! Accumulate only if fields are present - - - data_flag = 'INTEGER' - call aVaCSharedAttrIndexList_(aV, aC, data_flag, num_indices, & - aVindices, aCindices) - - if(num_indices > 0) then - - do n=1,num_indices - aVindex = aVindices(n) - aCindex = aCindices(n) - - ! Accumulate if the action is MCT_SUM or MCT_AVG - if( (aC%iAction(aCindex) == MCT_SUM) .or. & - (aC%iAction(aCindex) == MCT_AVG) ) then - do l=1,AttrVect_lsize(aV) - aC%data%iAttr(aCindex,l) = aC%data%iAttr(aCindex,l) + & - aV%iAttr(aVindex,l) - end do - endif - end do - - deallocate(aVindices, aCindices, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Error in second deallocate(aVindices...), ierr = ',ierr - call die(myname_) - endif - - endif ! if(num_indices > 0) - - endif ! if( associated(aC%iAction) ) - - ! Increment aC%steps_done: - - aC%steps_done = aC%steps_done + 1 - - ! If we are at the end of an averaging period, compute the - ! average (if desired). - - if(aC%steps_done == num_steps) then - - step_weight = 1.0_FP / REAL(num_steps,FP) - do n=1,nRAttr_(aC) - if( aC%rAction(n) == MCT_AVG ) then - do l=1,lsize_(aC) - aC%data%rAttr(n,l) = step_weight * aC%data%rAttr(n,l) - enddo - endif - enddo - - do n=1,nIAttr_(aC) - if( aC%iAction(n) == MCT_AVG ) then - do l=1,lsize_(aC) - aC%data%iAttr(n,l) = aC%data%iAttr(n,l) / num_steps - enddo - endif - enddo - - endif - - end subroutine accumulate_ - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: average_ -- Force an average to be taken on an Accumulator -! -! !DESCRIPTION: -! This routine will compute the average of the current values in an -! {\tt Accumulator} using the current value of {\tt steps\_done} -! in the {\tt Accumulator} -! -! !INTERFACE: - - subroutine average_(aC) - -! -! !USES: -! - use m_stdio, only : stdout,stderr - use m_die, only : die - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - use m_AttrVect, only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(Accumulator), intent(inout) :: aC ! Output Accumulator - -! !REVISION HISTORY: -! 11Jan08 - R.Jacob -- initial version based on accumulate_ -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::average_' - -! Overlapping attribute index number - integer :: num_indices - -! Overlapping attribute index storage arrays: - integer, dimension(:), pointer :: aCindices, aVindices - integer :: aCindex, aVindex - -! Error flag and loop indices - integer :: ierr, l, n - -! Averaging time-weighting factor: - real(FP) :: step_weight - integer :: steps_done - - - if(aC%num_steps == 0) then - write(stderr,'(2a)') myname_,':: FATAL--Zero steps in accumulation cycle.' - call die(myname_) - endif - - if(aC%steps_done == 0) then - write(stderr,'(2a)') myname_,':: FATAL--Zero steps completed in accumulation cycle.' - call die(myname_) - endif - - ! Set num_steps from aC: - - steps_done = aC%steps_done - - - step_weight = 1.0_FP / REAL(steps_done,FP) - do n=1,nRAttr_(aC) - do l=1,lsize_(aC) - aC%data%rAttr(n,l) = step_weight * aC%data%rAttr(n,l) - enddo - enddo - - do n=1,nIAttr_(aC) - do l=1,lsize_(aC) - aC%data%iAttr(n,l) = aC%data%iAttr(n,l) / steps_done - enddo - enddo - - - end subroutine average_ - - end module m_Accumulator diff --git a/src/externals/mct/mct/m_AccumulatorComms.F90 b/src/externals/mct/mct/m_AccumulatorComms.F90 deleted file mode 100644 index e790418c30c..00000000000 --- a/src/externals/mct/mct/m_AccumulatorComms.F90 +++ /dev/null @@ -1,803 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_AccumulatorComms - MPI Communication Methods for the Accumulator -! -! -! !DESCRIPTION: -! -! This module contains communications methods for the {\tt Accumulator} -! datatype (see {\tt m\_Accumulator} for details). MCT's communications -! are implemented in terms of the Message Passing Interface (MPI) standard, -! and we have as best as possible, made the interfaces to these routines -! appear as similar as possible to the corresponding MPI routines. For the -! { \tt Accumulator}, we currently support only the following collective -! operations: broadcast, gather, and scatter. The gather and scatter -! operations rely on domain decomposition descriptors that are defined -! elsewhere in MCT: the {\tt GlobalMap}, which is a one-dimensional -! decomposition (see the MCT module {\tt m\_GlobalMap} for more details); -! and the {\tt GlobalSegMap}, which is a segmented decomposition capable -! of supporting multidimensional domain decompositions (see the MCT module -! {\tt m\_GlobalSegMap} for more details). -! -! !INTERFACE: - - module m_AccumulatorComms -! -! !USES: -! -! No external modules are used in the declaration section of this module. - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: -! -! List of communications Methods for the Accumulator class - - public :: gather ! gather all local vectors to the root - public :: scatter ! scatter from the root to all PEs - public :: bcast ! bcast from root to all PEs - -! Definition of interfaces for the communication methods for -! the Accumulator: - - interface gather ; module procedure & - GM_gather_, & - GSM_gather_ - end interface - interface scatter ; module procedure & - GM_scatter_, & - GSM_scatter_ - end interface - interface bcast ; module procedure bcast_ ; end interface - -! !REVISION HISTORY: -! 31Oct00 - Jay Larson - initial prototype-- -! These routines were separated from the module m_Accumulator -! 15Jan01 - Jay Larson - Specification of -! APIs for the routines GSM_gather_() and GSM_scatter_(). -! 10May01 - Jay Larson - Changes in the -! comms routine to match the MPI model for collective -! communications, and general clean-up of prologues. -! 9Aug01 - E.T. Ong - Added private routine -! bcastp_. Used new Accumulator routines initp_ and -! initialized_ to simplify the routines. -! 26Aug02 - E.T. Ong - thourough code revision; -! no added routines -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_AccumulatorComms' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GM_gather_ - Gather Accumulator Distributed by a GlobalMap -! -! !DESCRIPTION: {\tt GM\_gather()} takes a distributed (across the -! communicator associated with the handle {\tt comm}) input -! {\tt Accumulator} argument {\tt iC} and gathers its data to the -! {\tt Accumulator} {\tt oC} on the {\tt root}. The decomposition of -! {\tt iC} is described by the input {\tt GlobalMap} argument {\tt Gmap}. -! The success (failure) of this operation is signified by the zero (nonzero) -! value of the optional output argument {\tt stat}. -! -! !INTERFACE: - - subroutine GM_gather_(iC, oC, GMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalMap, only : GlobalMap - use m_AttrVect, only : AttrVect_clean => clean - use m_Accumulator, only : Accumulator - use m_Accumulator, only : Accumulator_initialized => initialized - use m_Accumulator, only : Accumulator_initv => init - use m_AttrVectComms, only : AttrVect_gather => gather - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: iC - type(GlobalMap) , intent(in) :: GMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: oC - integer, optional,intent(out) :: stat - -! !REVISION HISTORY: -! 13Sep00 - Jay Larson - initial prototype -! 31Oct00 - Jay Larson - relocated to the -! module m_AccumulatorComms -! 15Jan01 - Jay Larson - renamed GM_gather_ -! 10May01 - Jay Larson - revamped comms -! model to match MPI comms model, and cleaned up prologue -! 9Aug01 - E.T. Ong - 2nd prototype. Used the -! intiialized_ and accumulator init routines. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GM_gather_' - integer :: myID, ier, i - logical :: status - - ! Initialize status flag (if present) - - if(present(stat)) stat=0 - - call MP_comm_rank(comm, myID, ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - ! Argument check of iC: kill if iC is not initialized - ! on all processes - - status = Accumulator_initialized(iC,die_flag=.true.,source_name=myname_) - - ! NOTE: removed argument check for oC on the root. - ! Is there any good way to check if an accumulator is NOT initialized? - - ! Initialize oC from iC. Clean oC%data - we don't want this av. - - if(myID == root) then - - call Accumulator_initv(oC,iC,lsize=1, & - num_steps=iC%num_steps,steps_done=iC%steps_done) - call AttrVect_clean(oC%data) - - endif - - ! Initialize oC%data. Gather distributed iC%data to oC%data on the root - - call AttrVect_gather(iC%data, oC%data, GMap, root, comm, ier) - - if(ier /= 0) then - call perr(myname_,'AttrVect_gather(iC%data, oC%data...',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - ! Check oC to see if its valid - - if(myID == root) then - status = Accumulator_initialized(oC,die_flag=.true.,source_name=myname_) - endif - - end subroutine GM_gather_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GSM_gather_ - Gather Accumulator Distributed by a GlobalSegMap -! -! !DESCRIPTION: This routine takes the distrubuted (on the communcator -! associated with the handle {\tt comm}) input {\tt Accumulator} -! argument {\tt iC} gathers it to the the {\tt Accumulator} argument -! {\tt oC} (valid only on the {\tt root}). The decompositon of {\tt iC} -! is contained in the input {\tt GlobalSegMap} argument {\tt GSMap}. -! The success (failure) of this operation is signified by the zero -! (nonzero) returned value of the {\tt INTEGER} flag {\tt stat}. -! -! !INTERFACE: - - subroutine GSM_gather_(iC, oC, GSMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalSegMap, only : GlobalSegMap - use m_AttrVect, only : AttrVect_clean => clean - use m_Accumulator, only : Accumulator - use m_Accumulator, only : Accumulator_initv => init - use m_Accumulator, only : Accumulator_initialized => initialized - use m_AttrVectComms, only : AttrVect_gather => gather - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: iC - type(GlobalSegMap), intent(in) :: GSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: oC - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 10May01 - Jay Larson - Initial code and -! cleaned up prologue. -! 09Aug01 - E.T. Ong - 2nd prototype. Used the -! intiialized_ and accumulator init routines. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GSM_gather_' - integer :: myID, ier, i - logical :: status - - ! Initialize status flag (if present) - - if(present(stat)) stat=0 - - call MP_comm_rank(comm, myID, ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - ! Argument check of iC - - status = Accumulator_initialized(iC,die_flag=.true.,source_name=myname_) - - ! NOTE: removed argument check for oC on the root. - ! Is there any good way to check if an accumulator is NOT initialized? - - ! Initialize oC from iC. Clean oC%data - we don't want this av. - - if(myID == root) then - call Accumulator_initv(oC,iC,lsize=1, & - num_steps=iC%num_steps,steps_done=iC%steps_done) - call AttrVect_clean(oC%data) - endif - - ! Gather distributed iC%data to oC%data on the root - - call AttrVect_gather(iC%data, oC%data, GSMap, root, comm, ier) - - if(ier /= 0) then - call perr(myname_,'AttrVect_gather(iC%data, oC%data...',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - ! Check oC to see if its valid - - if(myID == root) then - status = Accumulator_initialized(oC,die_flag=.true.,source_name=myname_) - endif - - - end subroutine GSM_gather_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GM_scatter_ - Scatter an Accumulator using a GlobalMap -! -! !DESCRIPTION: This routine takes the input {\tt Accumulator} argument -! {\tt iC} (valid only on the {\tt root}), and scatters it to the -! distributed {\tt Accumulator} argument {\tt oC} on the processes -! associated with the communicator handle {\tt comm}. The decompositon -! used to scatter the data is contained in the input {\tt GlobalMap} -! argument {\tt GMap}. The success (failure) of this operation is -! signified by the zero (nonzero) returned value of the {\tt INTEGER} -! flag {\tt stat}. -! -! !INTERFACE: - - subroutine GM_scatter_(iC, oC, GMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalMap, only : GlobalMap - use m_Accumulator, only : Accumulator - use m_Accumulator, only : Accumulator_initv => init - use m_Accumulator, only : Accumulator_initialized => initialized - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVectComms, only : AttrVect_scatter => scatter - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: iC - type(GlobalMap), intent(in) :: GMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: oC - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 14Sep00 - Jay Larson - initial prototype -! 31Oct00 - Jay Larson - moved from the module -! m_Accumulator to m_AccumulatorComms -! 15Jan01 - Jay Larson - renamed GM_scatter_. -! 10May01 - Jay Larson - revamped code to fit -! MPI-like comms model, and cleaned up prologue. -! 09Aug01 - E.T. Ong - 2nd prototype. Used the -! initialized_, Accumulator init_, and bcastp_ routines. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GM_scatter_' - - integer :: myID, ier - logical :: status - - ! Initialize status flag (if present) - - if(present(stat)) stat=0 - - call MP_comm_rank(comm, myID, ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - ! Argument check of iC - - if(myID==root) then - status = Accumulator_initialized(iC,die_flag=.true.,source_name=myname_) - endif - - ! NOTE: removed argument check for oC on all processes. - ! Is there any good way to check if an accumulator is NOT initialized? - - ! Copy accumulator from iC to oC - ! Clean up oC%data on root. - - if(myID == root) then - call Accumulator_initv(oC,iC,lsize=1,num_steps=iC%num_steps, & - steps_done=iC%steps_done) - call AttrVect_clean(oC%data) - endif - - ! Broadcast oC (except for oC%data) - - call bcastp_(oC, root, comm, stat) - - ! Scatter the AttrVect component of iC - - call AttrVect_scatter(iC%data, oC%data, GMap, root, comm, ier) - - if(ier /= 0) then - call perr(myname_,'AttrVect_scatter(iC%data, oC%data...',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - ! Check oC to see if its valid - - status = Accumulator_initialized(oC,die_flag=.true.,source_name=myname_) - - end subroutine GM_scatter_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GSM_scatter_ - Scatter an Accumulator using a GlobalSegMap -! -! !DESCRIPTION: This routine takes the input {\tt Accumulator} argument -! {\tt iC} (valid only on the {\tt root}), and scatters it to the -! distributed {\tt Accumulator} argument {\tt oC} on the processes -! associated with the communicator handle {\tt comm}. The decompositon -! used to scatter the data is contained in the input {\tt GlobalSegMap} -! argument {\tt GSMap}. The success (failure) of this operation is -! signified by the zero (nonzero) returned value of the {\tt INTEGER} -! flag {\tt stat}. -! -! !INTERFACE: - - subroutine GSM_scatter_(iC, oC, GSMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalSegMap, only : GlobalSegMap - use m_Accumulator, only : Accumulator - use m_Accumulator, only : Accumulator_initv => init - use m_Accumulator, only : Accumulator_initialized => initialized - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVectComms, only : AttrVect_scatter => scatter - - implicit none - -! !INPUT PARAMETERS: -! - type(Accumulator), intent(in) :: iC - type(GlobalSegMap), intent(in) :: GSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(Accumulator), intent(out) :: oC - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 10May01 - Jay Larson - Initial code/prologue -! 09Aug01 - E.T. Ong 2nd prototype. Used the -! initialized and accumulator init routines. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GSM_scatter_' - - integer :: myID, ier - logical :: status - - ! Initialize status flag (if present) - - if(present(stat)) stat=0 - - call MP_comm_rank(comm, myID, ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - ! Argument check of iC - - if(myID == root) then - status = Accumulator_initialized(iC,die_flag=.true.,source_name=myname_) - endif - - ! NOTE: removed argument check for oC on all processes. - ! Is there any good way to check if an accumulator is NOT initialized? - - ! Copy accumulator from iC to oC - ! Clean up oC%data on root. - - if(myID == root) then - call Accumulator_initv(oC,iC,lsize=1,num_steps=iC%num_steps, & - steps_done=iC%steps_done) - call AttrVect_clean(oC%data) - endif - - ! Broadcast oC (except for oC%data) - - call bcastp_(oC, root, comm, stat) - - ! Scatter the AttrVect component of aC - - call AttrVect_scatter(iC%data, oC%data, GSMap, root, comm, ier) - - if(ier /= 0) then - call perr(myname_,'AttrVect_scatter(iC%data, oC%data...',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - ! Check oC if its valid - - status = Accumulator_initialized(oC,die_flag=.true.,source_name=myname_) - - - end subroutine GSM_scatter_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bcast_ - Broadcast an Accumulator -! -! !DESCRIPTION: This routine takes the input {\tt Accumulator} argument -! {\tt aC} (on input valid only on the {\tt root}), and broadcasts it -! to all the processes associated with the communicator handle -! {\tt comm}. The success (failure) of this operation is signified by -! the zero (nonzero) returned value of the {\tt INTEGER} flag {\tt stat}. -! -! !INTERFACE: -! - subroutine bcast_(aC, root, comm, stat) - -! -! !USES: -! - use m_die - use m_mpif90 - use m_AttrVectComms, only : AttrVect_bcast => bcast - - use m_Accumulator, only : Accumulator - use m_Accumulator, only : Accumulator_initialized => initialized - - implicit none - -! !INPUT PARAMETERS: -! - integer,intent(in) :: root - integer,intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(Accumulator), intent(inout) :: aC ! (IN) on root, (OUT) elsewhere - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 14Sep00 - Jay Larson - initial prototype -! 31Oct00 - Jay Larson - moved from the module -! m_Accumulator to m_AccumulatorComms -! 09May01 - Jay Larson - cleaned up prologue -! 09Aug01 - E.T. Ong - 2nd prototype. Made use of -! bcastp_ routine. Also more argument checks. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bcast_' - - integer :: myID - integer :: ier - logical :: status - - if(present(stat)) stat=0 - - call MP_comm_rank(comm,myID,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - ! Argument check : Kill if the root aC is not initialized, - ! or if the non-root aC is initialized - - if(myID == root) then - status = Accumulator_initialized(aC,die_flag=.true.,source_name=myname_) - endif - - ! NOTE: removed argument check for aC on all non-root processes. - ! Is there any good way to check if an accumulator is NOT initialized? - - call bcastp_(aC, root, comm, stat) - - - ! Broadcast the root value of aC%data - - call AttrVect_bcast(aC%data, root, comm, ier) - - if(ier /= 0) then - call perr(myname_,'AttrVect_bcast(aC%data)',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - ! Check that aC on all processes are initialized - - status = Accumulator_initialized(aC,die_flag=.true.,source_name=myname_) - - - end subroutine bcast_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bcastp_ - Broadcast an Accumulator (but Not its Registers) -! -! !DESCRIPTION: This routine broadcasts all components of the accumulator -! aC except for aC%data. This is a private routine, only meant -! to be used by accumulator scatter and gather routines. -! -! -! !INTERFACE: -! - subroutine bcastp_(aC, root, comm, stat) - -! -! !USES: -! - use m_die - use m_mpif90 - use m_AttrVectComms, only : AttrVect_bcast => bcast - use m_Accumulator, only : Accumulator - use m_Accumulator, only : Accumulator_initp => initp - use m_Accumulator, only : Accumulator_nIAttr => nIAttr - use m_Accumulator, only : Accumulator_nRAttr => nRAttr - - implicit none - -! !INPUT PARAMETERS: -! - integer,intent(in) :: root - integer,intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(Accumulator), intent(inout) :: aC ! (IN) on root, (OUT) elsewhere - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 09Aug01 - E.T. Ong - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bcastp_' - - integer :: myID - integer :: ier, i - integer :: aC_num_steps, aC_steps_done, aC_nIAttr, aC_nRAttr - integer :: FirstiActionIndex, LastiActionIndex - integer :: FirstrActionIndex, LastrActionIndex - integer :: AccBuffSize - integer :: nIAttr, nRAttr - integer, dimension(:), allocatable :: AccBuff, aC_iAction, aC_rAction - logical :: status - - if(present(stat)) stat=0 - - call MP_comm_rank(comm,myID,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - ! STEP 1: Pack broadcast buffer. - - ! On the root, load up the Accumulator Buffer: Buffer Size = - ! num_steps {1} + steps_done {1} + nIAttr {1} + nRAttr {1} + - ! iAction {nIAttr} + rAction {nRAttr} - - - if(myID == root) then - - if(associated(aC%iAction)) then - nIAttr = size(aC%iAction) - else - nIAttr = 0 - endif - - if(associated(aC%rAction)) then - nRAttr = size(aC%rAction) - else - nRAttr = 0 - endif - - AccBuffSize = 4+nIAttr+nRAttr - - endif - - ! Use AccBuffSize to initialize AccBuff on all processes - - call MPI_BCAST(AccBuffSize, 1, MP_INTEGER, root, comm, ier) - - if(ier /= 0) call MP_perr_die(myname_,'AttrVect_bcast(AccBuffSize)',ier) - - allocate(AccBuff(AccBuffSize),stat=ier) - if(ier /= 0) call MP_perr_die(myname_,"AccBuff allocate",ier) - - if(myID == root) then - - ! load up iC%num_steps and iC%steps_done - - AccBuff(1) = aC%num_steps - AccBuff(2) = aC%steps_done - - ! Load up nIAttr and nRAttr - - AccBuff(3) = nIAttr - AccBuff(4) = nRAttr - - ! Load up aC%iAction (pointer copy) - - do i=1,nIAttr - AccBuff(4+i) = aC%iAction(i) - enddo - - ! Load up aC%rAction (pointer copy) - - do i=1,nRAttr - AccBuff(4+nIAttr+i) = aC%rAction(i) - enddo - endif - - ! STEP 2: Broadcast - - ! Broadcast the root value of AccBuff - - call MPI_BCAST(AccBuff, AccBuffSize, MP_INTEGER, root, comm, ier) - - if(ier /= 0) call MP_perr_die(myname_,'MPI_bcast(AccBuff...',ier) - - - ! STEP 3: Unpack broadcast buffer. - - ! On all processes unload aC_num_steps, aC_steps_done - ! aC_nIAttr, and aC_nRAttr from StepBuff - - aC_num_steps = AccBuff(1) - aC_steps_done = AccBuff(2) - aC_nIAttr = AccBuff(3) - aC_nRAttr = AccBuff(4) - - ! Unload iC%iAction and iC%rAction - - if(aC_nIAttr > 0) then - allocate(aC_iAction(aC_nIAttr),stat=ier) - if(ier /= 0) call die(myname_,"allocate aC_iAction",ier) - - FirstiActionIndex = 5 - LastiActionIndex = 4+aC_nIAttr - aC_iAction(1:aC_nIAttr) = AccBuff(FirstiActionIndex:LastiActionIndex) - - endif - - if(aC_nRAttr > 0) then - allocate(aC_rAction(aC_nRAttr),stat=ier) - if(ier /= 0) call die(myname_,"allocate aC_rAction",ier) - - FirstrActionIndex = 5+aC_nIAttr - LastrActionIndex = 4+aC_nIAttr+aC_nRAttr - aC_rAction(1:aC_nRAttr) = AccBuff(FirstrActionIndex:LastrActionIndex) - - endif - - ! Initialize aC on non-root processes - - if( (aC_nIAttr > 0).and.(aC_nRAttr > 0) ) then - - if(myID /= root) then - call Accumulator_initp(aC,iAction=aC_iAction,rAction=aC_rAction, & - num_steps=aC_num_steps, & - steps_done=aC_steps_done) - endif - - deallocate(aC_iAction,aC_rAction,stat=ier) - if(ier /= 0) call die(myname_,"deallocate aC_iAction...",ier) - - else - - if (aC_nIAttr > 0) then - if(myID /= root) then - call Accumulator_initp(aC,iAction=aC_iAction, & - num_steps=aC_num_steps, & - steps_done=aC_steps_done) - endif - deallocate(aC_iAction,stat=ier) - if(ier /= 0) call die(myname_,"deallocate aC_iAction...",ier) - endif - - if (aC_nRAttr > 0) then - if(myID /= root) then - call Accumulator_initp(aC,rAction=aC_rAction, & - num_steps=aC_num_steps, & - steps_done=aC_steps_done) - endif - deallocate(aC_rAction,stat=ier) - if(ier /= 0) call die(myname_,"deallocate aC_iAction...",ier) - endif - - endif - - ! Clean up allocated arrays - - deallocate(AccBuff,stat=ier) - if(ier /= 0) call die(myname_,"deallocate(AccBuff)",ier) - - - end subroutine bcastp_ - - - end module m_AccumulatorComms - - - - - - - diff --git a/src/externals/mct/mct/m_AttrVect.F90 b/src/externals/mct/mct/m_AttrVect.F90 deleted file mode 100644 index d186fb84ce5..00000000000 --- a/src/externals/mct/mct/m_AttrVect.F90 +++ /dev/null @@ -1,4138 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_AttrVect - Multi-field Storage -! -! !DESCRIPTION: -! -! An {\em attribute vector} is a scheme for storing bundles of integer -! and real data vectors, indexed by the names of the fields stored in -! {\tt List} format (see the mpeu module {\tt m\_List} for more -! information about the {\tt List} datatype). The ordering of the -! fieldnames in the integer and real attribute {\tt List} components -! ({\tt AttrVect\%iList} and {\tt AttrVect\%rList}, respectively) -! corresponds to the storage order of the attributes in their respective -! data buffers (the components {\tt AttrVect\%iAttr(:,:)} and -! {\tt AttrVect\%rAttr(:,:)}, respectively). The organization of -! the fieldnames in {\tt List} format, along with the direct mapping -! between {\tt List} items and locations in the data buffer, allows -! the user to have {\em random access} to the field data. This -! approach also allows the user to set the number and the names of fields -! stored in an {\tt AttrVect} at run-time. -! -! The {\tt AttrVect} stores field data in a {\em pointwise} fashion -! (that is, the data are grouped so that all the integer or real data -! associated with an individual point are adjacent to each other in memory. -! This amounts to the having the integer and real field data arrays in -! the {\tt AttrVect} (the components {\tt AttrVect\%iAttr(:,:)} and -! {\tt AttrVect\%rAttr(:,:)}, respectively) having the attribute index -! as the major (or fastest-varying) index. A prime example of this is -! observational data input to a data assimilation system. In the Model -! Coupling Toolkit, this datatype is the fundamental type for storing -! field data exchanged by component models, and forms a basis for other -! MCT datatypes that encapsulate time accumulation/averaging buffers (the -! {\tt Accumulator} datatype defined in the module {\tt m\_Accumulator}), -! coordinate grid information (the {\tt GeneralGrid} datatype defined in -! the module {\tt m\_GeneralGrid}), and sparse interpolation matrices -! (the {\tt SparseMatrix} datatype defined in the module -! {\tt m\_SparseMatrix}). -! -! The attribute vector is implemented in Fortran 90 using the -! {\tt AttrVect} derived type. This module contains the definition -! of the {\tt AttrVect}, and the numerous methods that service it. There -! are a number of initialization (creation) schemes, and a routine for -! zeroing out the elements of an {\tt AttrVect}. There is a method -! to {\em clean} up allocated memory used by an {\tt AttrVect} -! (destruction). There are numerous query methods that return: the -! number of datapoints (or {\em length}; the numbers of integer and -! real attributes; the data buffer index of a given real or integer -! attribute; and return the lists of real and integer attributes. There -! also exist methods for exporting a given attribute as a one-dimensional -! array and importing a given attribute from a one-dimensional array. -! There is a method for copying attributes from one {\tt AttrVect} to -! another. There is also a method for cross-indexing the attributes in -! two {\tt AttrVect} variables. In addition, there are methods that -! return those cross-indexed attributes along with some auxiliary data -! in a {\tt AVSharedIndicesOneType} or {\tt AVSharedIndices} structure. -! Finally, there are methods for sorting and permuting {\tt AttrVect} -! entries using a MergeSort scheme keyed by the attributes of the {\tt -! AttrVect}. -! -! !INTERFACE: - - module m_AttrVect -! -! !USES: -! - use m_realkinds,only : SP,DP,FP ! Real types definitions - - use m_List, only : List ! Support for rList and iList components. - - implicit none - - private ! except - -! !PUBLIC TYPES: - - public :: AttrVect ! The class data structure - public :: AVSharedIndicesOneType ! Data structure recording shared indices between - ! two attribute vectors, for a single data type - ! (e.g., shared real attributes) - public :: AVSharedIndices ! Data structure recording shared indices between two - ! attribute vectors, for all data types - - type AttrVect -#ifdef SEQUENCE - sequence -#endif - type(List) :: iList - type(List) :: rList - integer,dimension(:,:),pointer :: iAttr - real(FP) ,dimension(:,:),pointer :: rAttr - end type AttrVect - - type AVSharedIndicesOneType - integer :: num_indices ! number of shared items - logical :: contiguous ! true if index segments are contiguous in memory - character*7 :: data_flag ! data type flag (e.g., 'REAL' or 'INTEGER') - - ! arrays of indices to storage locations of shared attributes between the two - ! attribute vectors: - integer, dimension(:), pointer :: aVindices1 - integer, dimension(:), pointer :: aVindices2 - end type AVSharedIndicesOneType - - type AVSharedIndices - type(AVSharedIndicesOneType) :: shared_real ! shared indices of type REAL - type(AVSharedIndicesOneType) :: shared_integer ! shared indices of type INTEGER - end type AVSharedIndices - - -! !PUBLIC MEMBER FUNCTIONS: - - public :: init ! create a local vector - public :: clean ! clean the local vector - public :: zero ! zero the local vector - public :: lsize ! size of the local vector - public :: nIAttr ! number of integer attributes on local - public :: nRAttr ! number of real attributes on local - public :: indexIA ! index the integer attributes - public :: indexRA ! index the real attributes - public :: getIList ! return list of integer attributes - public :: getRList ! return list of real attributes - public :: getIListtoChar ! return list of integer attributes as Char - public :: getRListtoChar ! return list of real attributes as Char - public :: exportIList ! export INTEGER attibute List - public :: exportRList ! export REAL attibute List - public :: exportIListToChar ! export INTEGER attibute List as Char - public :: exportRListToChar ! export REAL attibute List as Char - public :: appendIAttr ! append INTEGER attribute List - public :: appendRAttr ! append REAL attribute List - public :: exportIAttr ! export INTEGER attribute to vector - public :: exportRAttr ! export REAL attribute to vector - public :: importIAttr ! import INTEGER attribute from vector - public :: importRAttr ! import REAL attribute from vector - public :: Copy ! copy attributes from one Av to another - public :: RCopy ! copy real attributes from one Av to another - public :: ICopy ! copy integer attributes from one Av to another - public :: Sort ! sort entries, and return permutation - public :: Permute ! permute entries - public :: Unpermute ! Unpermute entries - public :: SortPermute ! sort and permute entries - public :: SharedAttrIndexList ! Cross-indices of shared - ! attributes of two AttrVects - public :: SharedIndices ! Given two AttrVects, create an AVSharedIndices structure - public :: SharedIndicesOneType ! Given two AttrVects, create an - ! AVSharedIndicesOneType structure for a single type - public :: cleanSharedIndices ! clean a AVSharedIndices structure - public :: cleanSharedIndicesOneType ! clean a AVSharedIndicesOneType structure - - - interface init ; module procedure & - init_, & - initv_, & - initl_ - end interface - interface clean ; module procedure clean_ ; end interface - interface zero ; module procedure zero_ ; end interface - interface lsize ; module procedure lsize_ ; end interface - interface nIAttr ; module procedure nIAttr_ ; end interface - interface nRAttr ; module procedure nRAttr_ ; end interface - interface indexIA; module procedure indexIA_; end interface - interface indexRA; module procedure indexRA_; end interface - interface getIList; module procedure getIList_; end interface - interface getRList; module procedure getRList_; end interface - interface getIListToChar; module procedure getIListToChar_; end interface - interface getRListToChar; module procedure getRListToChar_; end interface - interface exportIList; module procedure exportIList_; end interface - interface exportRList; module procedure exportRList_; end interface - interface exportIListToChar - module procedure exportIListToChar_ - end interface - interface exportRListToChar - module procedure exportRListToChar_ - end interface - interface appendIAttr ; module procedure appendIAttr_ ; end interface - interface appendRAttr ; module procedure appendRAttr_ ; end interface - interface exportIAttr; module procedure exportIAttr_; end interface - interface exportRAttr; module procedure & - exportRAttrSP_, & - exportRAttrDP_ - end interface - interface importIAttr; module procedure importIAttr_; end interface - interface importRAttr; module procedure & - importRAttrSP_, & - importRAttrDP_ - end interface - interface Copy ; module procedure Copy_ ; end interface - interface RCopy ; module procedure & - RCopy_, & - RCopyL_ - end interface - interface ICopy ; module procedure & - ICopy_, & - ICopyL_ - end interface - interface Sort ; module procedure Sort_ ; end interface - interface Permute ; module procedure Permute_ ; end interface - interface Unpermute ; module procedure Unpermute_ ; end interface - interface SortPermute ; module procedure SortPermute_ ; end interface - interface SharedAttrIndexList ; module procedure & - aVaVSharedAttrIndexList_ - end interface - interface SharedIndices ; module procedure SharedIndices_ ; end interface - interface SharedIndicesOneType ; module procedure SharedIndicesOneType_ ; end interface - interface cleanSharedIndices ; module procedure cleanSharedIndices_ ; end interface - interface cleanSharedIndicesOneType ; module procedure cleanSharedIndicesOneType_ ; end interface - -! !REVISION HISTORY: -! 10Apr98 - Jing Guo - initial prototype/prolog/code -! 10Oct00 - J.W. Larson - made getIList -! and getRList functions public and added appropriate -! interface definitions -! 20Oct00 - J.W. Larson - added Sort, -! Permute, and SortPermute functions. -! 09May01 - J.W. Larson - added initl_(). -! 19Oct01 - J.W. Larson - added routines -! exportIattr(), exportRAttr(), importIAttr(), -! and importRAttr(). Also cleaned up module and -! routine prologues. -! 13Dec01 - J.W. Larson - made importIAttr() -! and importRAttr() public (bug fix). -! 14Dec01 - J.W. Larson - added exportIList() -! and exportRList(). -! 14Feb02 - J.W. Larson - added CHARCTER -! functions exportIListToChar() and exportRListToChar() -! 26Feb02 - J.W. Larson - corrected of usage -! of m_die routines throughout this module. -! 16Apr02 - J.W. Larson - added the method -! LocalReduce(), and the public data members AttrVectSUM, -! AttrVectMIN, and AttrVectMAX. -! 7May02 - J.W. Larson - Refactoring. Moved -! LocalReduce() and the public data members AttrVectSUM, -! AttrVectMIN, and AttrVectMAX to a new module named -! m_AttrVectReduce. -! 12Jun02 - R.L. Jacob - add Copy function -! 13Jun02 - R.L. Jacob - move aVavSharedAttrIndexList -! to this module from old m_SharedAttrIndicies -! 28Apr11 - W.J. Sacks - added AVSharedIndices and -! AVSharedIndicesOneType derived types, and associated -! subroutines -! 10Apr12 - W.J. Sacks - modified AVSharedIndices code -! to be Fortran-90 compliant -! 10Jan13 - T.Craig - add getRListToChar and getIListToChar -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_AttrVect' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_ - Initialize an AttrVect Given Attribute Lists and Length -! -! !DESCRIPTION: -! This routine creates an {\tt AttrVect} (the output argument {\tt aV}) -! using the optional input {\tt CHARACTER} arguments {\tt iList}, and -! {\tt rList} to define its integer and real attributes, respectively. -! The optional input {\tt INTEGER} argument {\tt lsize} defines the -! number of points for which we are storing attributes, or the -! {\em length} of {\tt aV}. The expected form for the arguments -! {\tt iList} and {\tt rList} are colon-delimited strings where each -! substring defines an attribute. Suppose we wish to store {\tt N} -! observations that have the real attributes {\tt 'latitude'}, -! {\tt 'longitude'}, {\tt pressure}, {\tt 'u-wind'}, and -! {\tt 'v-wind'}. Suppose we also wish to store the integer -! attributes {\tt 'hour'}, {\tt 'day'}, {\tt 'month'}, {\tt 'year'}, -! and {\tt 'data source'}. This can be accomplished by invoking -! {\tt init\_()} as follows: -! \begin{verbatim} -! call init_(aV, 'hour:day:month:year:data source', & -! 'latitude:longitude:pressure:u-wind:v-wind', N) -! \end{verbatim} -! The resulting {\tt AttrVect} {\tt aV} will have five integer -! attributes, five real attributes, and length {\tt N}. -! -! !INTERFACE: - - subroutine init_(aV, iList, rList, lsize) -! -! !USES: -! - use m_List, only : List - use m_List, only : init,nitem - use m_List, only : List_nullify => nullify - use m_mall - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), optional, intent(in) :: iList - character(len=*), optional, intent(in) :: rList - integer, optional, intent(in) :: lsize - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(out) :: aV - -! !REVISION HISTORY: -! 09Apr98 - Jing Guo - initial prototype/prolog/code -! 09Oct01 - J.W. Larson - added feature to -! nullify all pointers before usage. This was done to -! accomodate behavior of the f90 ASSOCIATED intrinsic -! function on the AIX platform. -! 07Dec01 - E.T. Ong - added support for -! intialization with blank character strings for iList -! and rList -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::init_' - integer :: nIA,nRA,n,ier - - ! Initially, nullify all pointers in the AttrVect aV: - - nullify(aV%iAttr) - nullify(aV%rAttr) - call List_nullify(aV%iList) - call List_nullify(aV%rList) - - if(present(rList)) then - if(len_trim(rList) > 0) then - call init(aV%rList,rList) ! init.List() - endif - endif - - if(present(iList)) then - if(len_trim(iList) > 0) then - call init(aV%iList,iList) ! init.List() - endif - endif - - nIA=nitem(aV%iList) ! nitem.List() - nRA=nitem(aV%rList) ! nitem.List() - - n=0 - if(present(lsize)) n=lsize - - allocate( aV%iAttr(nIA,n),aV%rAttr(nRA,n), stat=ier) - if(ier /= 0) call die(myname_,'allocate()',ier) - -#ifdef MALL_ON - call mall_ci(size(transfer(aV%iAttr,(/1/)),myname_) - call mall_ci(size(transfer(aV%rAttr,(/1/)),myname_) -#endif - - end subroutine init_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initv_ - Initialize One AttrVect from Another -! -! !DESCRIPTION: This routine takes an input {\tt AttrVect} argument -! {\tt bV}, and uses its attribute list information to create an output -! {\tt AttrVect} variable {\tt aV}. The length of {\tt aV} is defined -! by the input {\tt INTEGER} argument {\tt lsize}. -! -! !INTERFACE: - - subroutine initv_(aV, bV, lsize) -! -! !USES: -! - use m_String, only : String,char - use m_String, only : String_clean => clean - use m_List, only : get - use m_List, only : List_nullify => nullify - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect),intent(in) :: bV - integer, intent(in) :: lsize - -! !OUTPUT PARAMETERS: -! - type(AttrVect),intent(out) :: aV - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -! 17May01 - R. Jacob - add a check to see if -! input argument has been defined. SGI will dump -! core if its not. -! 10Oct01 - J. Larson - Nullify all pointers -! in ouput AttrVect aV before initializing aV. -! 19Sep08 - J. Wolfe - plug memory leak from not deallocating -! strings. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initv_' - type(String) :: iLStr,rLStr - - ! Step One: Nullify all pointers in aV. We will set - ! only the pointers we really need for aV based on those - ! currently ASSOCIATED in bV. - - call List_nullify(aV%iList) - call List_nullify(aV%rList) - nullify(aV%iAttr) - nullify(aV%rAttr) - - ! Convert the two Lists to two Strings - - if(.not.associated(bv%iList%bf) .and. & - .not.associated(bv%rList%bf)) then - write(stderr,'(2a)')myname_, & - 'MCTERROR: Trying to initialize a new AttrVect off an undefined AttrVect' - call die(myname_,'undefined input argument',0) - endif - - if(associated(bv%iList%bf)) then - call get(iLStr,bv%iList) - endif - - if(associated(bv%rList%bf)) then - call get(rLStr,bv%rList) - endif - - ! Initialize the AttrVect aV depending on which parts of - ! the input bV are valid: - - if(associated(bv%iList%bf) .and. associated(bv%rList%bf)) then - call init_(aV,iList=char(iLStr),rList=char(rLStr),lsize=lsize) - endif - if(.not.associated(bv%iList%bf) .and. associated(bv%rList%bf)) then - call init_(aV,rList=char(rLStr),lsize=lsize) - endif - if(associated(bv%iList%bf) .and. .not.associated(bv%rList%bf)) then - call init_(aV,iList=char(iLStr),lsize=lsize) - endif - - if(associated(bv%iList%bf)) then - call String_clean(iLStr) - endif - if(associated(bv%rList%bf)) then - call String_clean(rLStr) - endif - - end subroutine initv_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initl_ - Initialize an AttrVect Using the List Type -! -! !DESCRIPTION: This routine initializes an {\tt AttrVect} directly -! from input {\tt List} data type arguments {\tt iList} and {\tt rList} -! (see the module {\tt m\_List} in mpeu for further details), and an -! input length {\tt lsize}. The resulting {\tt AttrVect} is returned in -! the argument {\tt aV}. -! -! {\bf N.B.}: If the user supplies an empty list for the arguments -! {\tt iList} ({\tt rList}), then {\tt aV} will be created only with -! {\tt REAL} ({\tt INTEGER}) attributes. If both arguments {\tt iList} -! and {\tt rList} are empty, the routine will terminate execution and -! report an error. -! -! {\bf N.B.}: The outcome of this routine, {\tt aV} represents -! allocated memory. When this {\tt AttrVect} is no longer needed, -! it must be deallocated by invoking the routine {\tt AttrVect\_clean()}. -! Failure to do so will spawn a memory leak. -! -! !INTERFACE: - - subroutine initl_(aV, iList, rList, lsize) - -! -! !USES: -! - use m_die - use m_stdio - - use m_String, only : String - use m_String, only : String_clean => clean - use m_String, only : String_toChar => toChar - - use m_List, only : List - use m_List, only : List_nitem => nitem - use m_List, only : List_exportToChar => exportToChar - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: iList - type(List), intent(in) :: rList - integer, intent(in) :: lsize - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(out) :: aV - -! !REVISION HISTORY: -! 09May98 - J.W. Larson - initial version. -! 08Aug01 - E.T. Ong - change list assignment(=) -! to list copy to avoid compiler errors with pgf90. -! 10Oct01 - J. Larson - Nullify all pointers -! in ouput AttrVect aV before initializing aV. Also, -! greater caution taken regarding validity of input -! arguments iList and rList. -! 15May08 - J. Larson - Simplify to use -! the init_ routine. Better argument checking. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initl_' - - ! Basic argument sanity checks: - - if (List_nitem(iList) < 0) then - write(stderr,'(2a,i8,a)') myname_, & - ':: FATAL: List argument iList has a negative number ( ',List_nitem(iList), & - ' ) of attributes!' - call die(myname_) - endif - - if (List_nitem(rList) < 0) then - write(stderr,'(2a,i8,a)') myname_, & - ':: FATAL: List argument rList has a negative number ( ',List_nitem(rList), & - ' ) of attributes!' - call die(myname_) - endif - - if ((List_nitem(iList) > 0) .and. (List_nitem(rList) > 0)) then - - call init_(aV, List_exportToChar(iList), List_exportToChar(rList), lsize) - - else ! Then solely REAL or solely INTEGER attributes: - - if (List_nitem(iList) > 0) then ! solely INTEGER attributes - - call init_(aV, iList=List_exportToChar(iList), lsize=lsize) - - endif ! if (List_nitem(iList) > 0) then... - - if (List_nitem(rList) > 0) then ! solely REAL attributes - - call init_(aV, rList=List_exportToChar(rList), lsize=lsize) - - endif ! if (List_nitem(rList) > 0) then... - - endif ! if ((List_nitem(iList) > 0) .and. (List_nitem(rList) > 0)) then... - - end subroutine initl_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Deallocate Allocated Memory Structures of an AttrVect -! -! !DESCRIPTION: -! This routine deallocates the allocated memory structures of the -! input/output {\tt AttrVect} argument {\tt aV}. This amounts to -! cleaning the {\tt List} structures {\tt aV\%iList} and {\tt av\%rList}, -! and deallocating the arrays {\tt aV\%iAttr(:,:)} and -! {\tt aV\%rAttr(:,:)}. The success (failure) of this operation is -! signified by a zero (non-zero) value of the optional {\tt INTEGER} -! output argument {\tt stat}. If {\tt clean\_()} is invoked without -! supplying {\tt stat}, and any of the deallocation operations fail, -! the routine will terminate with an error message. -! -! !INTERFACE: - - subroutine clean_(aV, stat) -! -! !USES: -! - use m_mall - use m_stdio - use m_die - use m_List, only : List_clean => clean - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(inout) :: aV - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 09Apr98 - Jing Guo - initial prototype/prolog/code -! 10Oct01 - J. Larson - various fixes to -! prevent deallocation of UNASSOCIATED pointers. -! 01Mar01 - E.T. Ong - removed dies to prevent -! crashes when cleaning uninitialized attrvects. Added -! optional stat argument. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - ! Note that an undefined pointer may either crash the process - ! or return either .true. or .false. to the associated() test. - ! One should therefore avoid using the function on an - ! undefined pointer. - - ! Clean up INTEGER attribute list: - - if(present(stat)) stat=0 - - if(associated(aV%iList%bf)) then - - if(present(stat)) then - call List_clean(aV%iList,ier) - if(ier/=0) stat=ier - else - call List_clean(aV%iList) - endif - - endif - - ! Clean up REAL attribute list: - - if(associated(aV%rList%bf)) then - - if(present(stat)) then - call List_clean(aV%rList,ier) - if(ier/=0) stat=ier - else - call List_clean(aV%rList) - endif - - endif - - ! Clean up INTEGER attributes: - - if(associated(aV%iAttr)) then - -#ifdef MALL_ON - call mall_co(size(transfer(aV%iAttr,(/1/)),myname_) -#endif - - deallocate(aV%iAttr,stat=ier) - - if(ier /= 0) then - if(present(stat)) then - stat=ier - else - call warn(myname_,'deallocate(aV%iAttr)',ier) - endif - endif - - endif ! if(associated(aV%iAttr))... - - ! Clean up REAL attributes: - - if(associated(aV%rAttr)) then - -#ifdef MALL_ON - call mall_co(size(transfer(aV%rAttr,(/1/)),myname_) -#endif - - deallocate(aV%rAttr,stat=ier) - - if(ier /= 0) then - if(present(stat)) then - stat=ier - else - call warn(myname_,'deallocate(aV%rAttr)',ier) - endif - endif - - endif ! if(associated(aV%rAttr))... - - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: lsize_ - Length of an AttrVect -! -! !DESCRIPTION: -! This function returns the number of elements, or {\em length} of the -! input {\tt AttrVect} argument {\tt aV}. This function examines the -! length of the second dimension of the arrays {\tt aV\%iAttr(:,:)} -! and {\tt aV\%rAttr(:,:)}. If neither {\tt aV\%iAttr(:,:)} nor -! {\tt aV\%rAttr(:,:)} are associated, then ${\tt lsize\_(aV)} = 0$. -! If {\tt aV\%iAttr(:,:)} is associated, but {\tt aV\%rAttr(:,:)} is -! not, then ${\tt lsize\_(aV)} = {\tt size(aV\%iAttr,2)}$. If -! {\tt aV\%iAttr(:,:)} is not associated, but {\tt aV\%rAttr(:,:)} is, -! then ${\tt lsize\_(aV)} = {\tt size(aV\%rAttr,2)}$. If both -! {\tt aV\%iAttr(:,:)} and {\tt aV\%rAttr(:,:)} are associated, the -! function {\tt lsize\_()} will do one of two things: If -! ${\tt size(aV\%iAttr,2)} = {\tt size(aV\%rAttr,2)}$, this equal value -! will be returned. If ${\tt size(aV\%iAttr,2)} \neq -! {\tt size(aV\%rAttr,2)}$, termination with an error message will occur. -! -! !INTERFACE: - - integer function lsize_(aV) - -! !USES: - - use m_List, only : List - use m_List, only : List_allocated => allocated - - use m_stdio, only : stderr - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV - -! !REVISION HISTORY: -! 09Apr98 - Jing Guo - initial prototype/prolog/code -! 10Oct01 - J. Larson - made code more robust -! to handle cases where the length of either aV%iAttr or -! aV%rAttr is zero, but the other is positive. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::lsize_' - integer :: iLength, rLength - - ! One should try to avoid using this function on an undefined - ! or disassocated pointer. However, it is understandable - ! that an undefined or disassocated pointer has a size 0, if - ! the associated() test sucesses. - - lsize_=0 - - if(List_allocated(aV%iList) .and. associated(aV%iAttr)) then - iLength = size(aV%iAttr,2) - else - iLength = 0 - endif - - if(List_allocated(aV%rList) .and. associated(aV%rAttr)) then - rLength = size(aV%rAttr,2) - else - rLength = 0 - endif - - if(iLength /= rLength) then - - if((rLength > 0) .and. (iLength > 0)) then - call die(myname_,'attribute array length mismatch', & - iLength-rLength) - endif - - if((rLength > 0) .and. (iLength == 0)) then - lsize_ = rLength - endif - - if((iLength > 0) .and. (rLength == 0)) then - lsize_ = iLength - endif - - endif - - if(iLength == rLength) lsize_ = iLength - - end function lsize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: zero_ - Set AttrVect Field Data to Zero -! -! !DESCRIPTION: -! This routine sets all of the point values of the integer and real -! attributes of an the input/output {\tt AttrVect} argument {\tt aV} -! to zero. The default action is to set the values of all the real and -! integer attributes to zero. The user may prevent the zeroing of the -! real (integer) attributes invoking {\tt zero\_()} with the optional -! {\tt LOGICAL} argument {\tt zeroReals} ({\tt zeroInts}) set with value -! {\tt .FALSE.} -! -! !INTERFACE: - - subroutine zero_(aV, zeroReals, zeroInts) - -! !USES: - - - use m_die,only : die - use m_stdio,only : stderr - - use m_List, only : List - use m_List, only : List_allocated => allocated - - implicit none - -! !INPUT PARAMETERS: - - logical, optional, intent(IN) :: zeroReals - logical, optional, intent(IN) :: zeroInts - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: aV - -! !REVISION HISTORY: -! 17May01 - R. Jacob - initial prototype/code -! 15Oct01 - J. Larson - switched loop order -! for cache optimization. -! 03Dec01 - E.T. Ong - eliminated looping method of -! of zeroing. "Compiler assignment" of attrvect performs faster -! on the IBM SP with mpxlf90 compiler. -! 05Jan10 - R. Jacob - zeroing an uninitialized aV is no -! longer a fatal error. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::zero_' - - logical myZeroReals, myZeroInts - - if(present(zeroReals)) then - myZeroReals = zeroReals - else - myZeroReals = .TRUE. - endif - - if(present(zeroInts)) then - myZeroInts = zeroInts - else - myZeroInts = .TRUE. - endif - -! if((.not. List_allocated(aV%iList)) .and. (.not. List_allocated(aV%rList))) then -! write(stderr,'(2a)')myname_, & -! 'MCTERROR: Trying to zero an uninitialized AttrVect' -! call die(myname_) -! endif - - if(myZeroInts) then ! zero out INTEGER attributes - if(List_allocated(aV%iList)) then - if(associated(aV%iAttr) .and. (nIAttr_(aV)>0)) then -!DIR$ COLLAPSE - aV%iAttr=0 - endif - endif - endif - - if(myZeroReals) then ! zero out REAL attributes - if(List_allocated(aV%rList)) then - if(associated(aV%rAttr) .and. (nRAttr_(aV)>0)) then -!DIR$ COLLAPSE - aV%rAttr=0._FP - endif - endif - endif - - end subroutine zero_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nIAttr_ - Return the Number of Integer Attributes -! -! !DESCRIPTION: -! This integer function returns the number of integer attributes -! present in the input {\tt AttrVect} argument {\tt aV}. -! -! !INTERFACE: - - integer function nIAttr_(aV) -! -! !USES: -! - use m_List, only : nitem - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect),intent(in) :: aV - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -! 10Oct01 - J. Larson - made code more robust -! by checking status of pointers in aV%iList -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nIAttr_' - - if(associated(aV%iList%bf)) then - nIAttr_ = nitem(aV%iList) - else - nIAttr_ = 0 - endif - - end function nIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nRAttr_ - Return the Number of Real Attributes -! -! !DESCRIPTION: -! This integer function returns the number of real attributes -! present in the input {\tt AttrVect} argument {\tt aV}. - -! !INTERFACE: - - integer function nRAttr_(aV) -! -! !USES: -! - use m_List, only : nitem - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect),intent(in) :: aV - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -! 10Oct01 - J. Larson - made code more robust -! by checking status of pointers in aV%iList -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nRAttr_' - - if(associated(aV%rList%bf)) then - nRAttr_ = nitem(aV%rList) - else - nRAttr_ = 0 - endif - - end function nRAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getIList_ - Retrieve the Name of a Numbered Integer Attribute -! -! !DESCRIPTION: -! This routine returns the name of the {\tt ith} integer attribute of -! the input {\tt AttrVect} argument {\tt aVect}. The name is returned -! in the output {\tt String} argument {\tt item} (see the mpeu module -! {\tt m\_String} for more information regarding the {\tt String} type). -! -! !INTERFACE: - - subroutine getIList_(item, ith, aVect) -! -! !USES: -! - use m_String, only : String - use m_List, only : get - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: ith - type(AttrVect),intent(in) :: aVect - -! !OUTPUT PARAMETERS: -! - type(String),intent(out) :: item - -! !REVISION HISTORY: -! 24Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::getIList_' - - call get(item, ith, aVect%iList) - - end subroutine getIList_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getRList_ - Retrieve the Name of a Numbered Real Attribute -! -! !DESCRIPTION: -! This routine returns the name of the {\tt ith} real attribute of -! the input {\tt AttrVect} argument {\tt aVect}. The name is returned -! in the output {\tt String} argument {\tt item} (see the mpeu module -! {\tt m\_String} for more information regarding the {\tt String} type). -! -! !INTERFACE: - - subroutine getRList_(item, ith, aVect) -! -! !USES: -! - use m_String, only : String - use m_List, only : get - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: ith - type(AttrVect), intent(in) :: aVect - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: item - -! !REVISION HISTORY: -! 24Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::getRList_' - - call get(item,ith,aVect%rList) - - end subroutine getRList_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getIListToChar_ - Retrieve the Name of a Numbered Integer Attribute -! -! !DESCRIPTION: -! This routine returns the name of the {\tt ith} integer attribute of -! the input {\tt AttrVect} argument {\tt aVect}. The name is returned -! in the function {\tt char} argument. -! -! !INTERFACE: - - function getIListToChar_(ith, aVect) -! -! !USES: -! - use m_String, only : String - use m_String, only : String_ToChar => ToChar - use m_String, only : String_clean => clean - use m_List, only : get - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: ith - type(AttrVect),intent(in) :: aVect - -! !OUTPUT PARAMETERS: -! - character(len=size(aVect%iList%bf,1)) :: getIListToChar_ - -! !REVISION HISTORY: -! 10Jan13 - T. Craig - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - type(String) :: item - character(len=*),parameter :: myname_=myname//'::getIListToChar_' - - call get(item, ith, aVect%iList) - getIListToChar_ = String_toChar(item) - call String_clean(item) - - end function getIListToChar_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getRListToChar_ - Retrieve the Name of a Numbered Integer Attribute -! -! !DESCRIPTION: -! This routine returns the name of the {\tt ith} integer attribute of -! the input {\tt AttrVect} argument {\tt aVect}. The name is returned -! in the function {\tt char} argument. -! -! !INTERFACE: - - function getRListToChar_(ith, aVect) -! -! !USES: -! - use m_String, only : String - use m_String, only : String_ToChar => ToChar - use m_String, only : String_clean => clean - use m_List, only : get - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: ith - type(AttrVect),intent(in) :: aVect - -! !OUTPUT PARAMETERS: -! - character(len=size(aVect%rList%bf,1)) :: getRListToChar_ - -! !REVISION HISTORY: -! 10Jan13 - T. Craig - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - type(String) :: item - character(len=*),parameter :: myname_=myname//'::getRListToChar_' - - call get(item, ith, aVect%rList) - getRListToChar_ = String_toChar(item) - call String_clean(item) - - end function getRListToChar_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexIA_ - Index an Integer Attribute -! -! !DESCRIPTION: -! This function returns an {\tt INTEGER}, corresponding to the location -! of an integer attribute within the input {\tt AttrVect} argument -! {\tt aV}. For example, suppose {\tt aV} has the following attributes -! {\tt 'month'}, {\tt 'day'}, and {\tt 'year'}. The array of integer -! values for the attribute {\tt 'day'} is stored in -!% \begin{verbatim} -! {\tt aV\%iAttr(indexIA\_(aV,'day'),:)}. -!% \end{verbatim} -! If {\tt indexIA\_()} is unable to match {\tt item} to any of the integer -! attributes in {\tt aV}, the resulting value is zero which is equivalent -! to an error. The optional input {\tt CHARACTER} arguments {\tt perrWith} -! and {\tt dieWith} control how such errors are handled. -! \begin{enumerate} -! \item if neither {\tt perrWith} nor {\tt dieWith} are present, -! {\tt indexIA\_()} terminates execution with an internally generated -! error message; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, an error -! message is written to {\tt stderr} incorporating user-supplied traceback -! information stored in the argument {\tt perrWith}; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, and -! {\tt perrWith} is equal to ``quiet'', no error message is written. -! \item if {\tt dieWith} is present, execution terminates with an error -! message written to {\tt stderr} that incorporates user-supplied traceback -! information stored in the argument {\tt dieWith}; and -! \item if both {\tt perrWith} and {\tt dieWith} are present, execution -! terminates with an error message using {\tt dieWith}, and the argument -! {\tt perrWith} is ignored. -! \end{enumerate} -! -! !INTERFACE: - - integer function indexIA_(aV, item, perrWith, dieWith) -! -! !USES: -! - use m_die, only : die - use m_stdio,only : stderr - - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_List, only : index - - use m_TraceBack, only : GenTraceBackString - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: item - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !REVISION HISTORY: -! 27Apr98 - Jing Guo - initial prototype/prolog/code -! 2Aug02 - J. Larson - Solidified error handling using perrWith/dieWith -! 1Jan05 - R. Jacob - add quiet option for error handling -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::indexIA_' - - type(String) :: myTrace - - if(present(dieWith)) then - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then - call GenTraceBackString(myTrace, perrWith, myname_) - else - call GenTraceBackString(myTrace, myname_) - endif - endif - - indexIA_=index(aV%iList,item) - - if(indexIA_==0) then ! The attribute was not found! - ! As per the prologue, decide how to handle this error - if(present(perrWith) .and. (.not. present(dieWith))) then - if (trim(perrWith).eq.'quiet') then - ! do nothing - else - write(stderr,'(5a)') myname_, & - ':: ERROR--attribute not found: "',trim(item),'" ', & - 'Traceback: ',String_ToChar(myTrace) - endif - else ! Shutdown - write(stderr,'(5a)') myname_, & - ':: FATAL--attribute not found: "',trim(item),'" ', & - 'Traceback: ',String_ToChar(myTrace) - call die(myname_) - endif - endif - - call String_clean(myTrace) - - end function indexIA_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexRA_ - Index a Real Attribute -! -! !DESCRIPTION: -! This function returns an {\tt INTEGER}, corresponding to the location -! of a real attribute within the input {\tt AttrVect} argument -! {\tt aV}. For example, suppose {\tt aV} has the following attributes -! {\tt 'latitude'}, {\tt 'longitude'}, and {\tt 'pressure'}. The array -! of real values for the attribute {\tt 'longitude'} is stored in -!% \begin{verbatim} -! {\tt aV\%iAttr(indexRA\_(aV,'longitude'),:)}. -!% \end{verbatim} -! If {\tt indexRA\_()} is unable to match {\tt item} to any of the real -! attributes in {\tt aV}, the resulting value is zero which is equivalent -! to an error. The optional input {\tt CHARACTER} arguments {\tt perrWith} -! and {\tt dieWith} control how such errors are handled. -! \begin{enumerate} -! \item if neither {\tt perrWith} nor {\tt dieWith} are present, -! {\tt indexRA\_()} terminates execution with an internally generated -! error message; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, an error -! message is written to {\tt stderr} incorporating user-supplied traceback -! information stored in the argument {\tt perrWith}; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, and -! {\tt perrWith} is equal to ``quiet'', no error message is written. -! \item if {\tt dieWith} is present, execution terminates with an error -! message written to {\tt stderr} that incorporates user-supplied traceback -! information stored in the argument {\tt dieWith}; and -! \item if both {\tt perrWith} and {\tt dieWith} are present, execution -! terminates with an error message using {\tt dieWith}, and the argument -! {\tt perrWith} is ignored. -! \end{enumerate} -! -! !INTERFACE: - - integer function indexRA_(aV, item, perrWith, dieWith) -! -! !USES: -! - use m_die, only : die - use m_stdio,only : stderr - - use m_List, only : index - - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_TraceBack, only : GenTraceBackString - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: item - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !REVISION HISTORY: -! 27Apr98 - Jing Guo - initial prototype/prolog/code -! 2Aug02 - J. Larson - Solidified error handling using perrWith/dieWith -! 18Jan05 - R. Jacob - add quiet option for error handling -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::indexRA_' - - type(String) :: myTrace - - if(present(dieWith)) then ! Append onto TraceBack - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then ! Append onto TraceBack - call GenTraceBackString(myTrace, perrWith, myname_) - else ! Start a TraceBackString - call GenTraceBackString(myTrace, myname_) - endif - endif - - indexRA_=index(aV%rList,item) - - if(indexRA_==0) then ! The attribute was not found! - ! As per the prologue, decide how to handle this error - if(present(perrWith) .and. (.not. present(dieWith))) then - if (trim(perrWith).eq.'quiet') then - ! do nothing - else - write(stderr,'(5a)') myname_, & - ':: ERROR--attribute not found: "',trim(item),'" ', & - 'Traceback: ',String_ToChar(myTrace) - endif - else ! Shutdown if dieWith or no arguments present - write(stderr,'(5a)') myname_, & - ':: FATAL--attribute not found: "',trim(item),'" ', & - 'Traceback: ',String_ToChar(myTrace) - call die(myname_) - endif - endif - - call String_clean(myTrace) - - end function indexRA_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! DOE/ANL Mathematics and Computer Science Division ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: appendIAttr_ - Append one or more attributes onto the INTEGER part of an AttrVect. -! -! !DESCRIPTION: This routine takes an input {\tt AttrVect} argument -! {\tt aV}, and an input character string {\tt rList} and Appends {\tt rList} -! to the INTEGER part of {\tt aV}. The success (failure) of this operation is -! signified by a zero (nonzero) value for the optional {\tt INTEGER} -! output argument {\tt status}. -! -! !INTERFACE: - - subroutine appendIAttr_(aV, iList, status) -! -! !USES: -! - use m_List, only : List_init => init - use m_List, only : List_append => append - use m_List, only : List_clean => clean - use m_List, only : List_nullify => nullify - use m_List, only : List_allocated => allocated - use m_List, only : List_copy => copy - use m_List, only : List - use m_die - use m_stdio - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect),intent(inout) :: aV - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: iList - -! !OUTPUT PARAMETERS: -! - integer,optional,intent(out) :: status - -! !REVISION HISTORY: -! 08Jul03 - R. Jacob - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::appendIAttr_' - - type(List) :: avRList,avIList ! placeholders for the aV attributes - type(List) :: addIlist ! for the input string - type(AttrVect) :: tempaV ! placeholder for aV data. - integer :: locsize ! size of aV - integer :: rlstatus,cstatus ! status flags - integer :: ilstatus - - if(present(status)) status = 0 - - call List_nullify(avIList) - call List_nullify(avRList) - -! save the local size and current int and real attributes - locsize = lsize_(aV) - call exportRList_(aV,avRList,rlstatus) - call exportIList_(aV,avIList,ilstatus) - -! create and fill a temporary AttrVect to hold any data currently in the aV - call initv_(tempaV,aV,lsize=locsize) - call Copy_(aV,tempaV) - -! create a List with the new attributes - call List_init(addIlist,iList) - -! append addIlist to current avIList if it has attributes. - if(List_allocated(avIList)) then - call List_append(avIList,addIlist) -! copy addIlist to avIList - else - call List_copy(avIList,addIlist) - endif - -! now delete the input aV and recreate it - call clean_(aV,cstatus) - call initl_(aV,avIList,avRList,locsize) - -! copy back the data - call Copy_(tempaV,aV) - -! clean up. - call List_clean(avRList,cstatus) - - call clean_(tempaV,cstatus) - call List_clean(addIlist,cstatus) - call List_clean(avIList,cstatus) - - if(present(status)) status = cstatus - - end subroutine appendIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! DOE/ANL Mathematics and Computer Science Division ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: appendRAttr_ - Append one or more attributes onto the REAL part of an AttrVect. -! -! !DESCRIPTION: This routine takes an input {\tt AttrVect} argument -! {\tt aV}, and an input character string {\tt rList} and Appends {\tt rList} -! to the REAL part of {\tt aV}. The success (failure) of this operation is -! signified by a zero (nonzero) value for the optional {\tt INTEGER} -! output argument {\tt status}. -! -! !INTERFACE: - - subroutine appendRAttr_(aV, rList, status) -! -! !USES: -! - use m_List, only : List_init => init - use m_List, only : List_append => append - use m_List, only : List_clean => clean - use m_List, only : List_nullify => nullify - use m_List, only : List_allocated => allocated - use m_List, only : List_copy => copy - use m_List, only : List - use m_die - use m_stdio - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect),intent(inout) :: aV - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: rList - -! !OUTPUT PARAMETERS: -! - integer,optional,intent(out) :: status - -! !REVISION HISTORY: -! 04Jun03 - R. Jacob - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::appendRAttr_' - - type(List) :: avRList,avIList ! placeholders for the aV attributes - type(List) :: addRlist ! for the input string - type(AttrVect) :: tempaV ! placeholder for aV data. - integer :: locsize ! size of aV - integer :: rlstatus,cstatus ! status flags - integer :: ilstatus - - if(present(status)) status = 0 - - call List_nullify(avIList) - call List_nullify(avRList) - -! save the local size and current int and real attributes - locsize = lsize_(aV) - call exportRList_(aV,avRList,rlstatus) - call exportIList_(aV,avIList,ilstatus) - -! create and fill a temporary AttrVect to hold any data currently in the aV - call initv_(tempaV,aV,lsize=locsize) - call Copy_(aV,tempaV) - -! create a List with the new attributes - call List_init(addRlist,rList) - -! append addRlist to current avRList if it has attributes. - if(List_allocated(avRList)) then - call List_append(avRList,addRlist) -! copy addRlist to avRList - else - call List_copy(avRList,addRlist) - endif - -! now delete the input aV and recreate it - call clean_(aV,cstatus) - call initl_(aV,avIList,avRList,locsize) - -! copy back the data - call Copy_(tempaV,aV) - -! clean up. - call List_clean(avIList,cstatus) - - call clean_(tempaV,cstatus) - call List_clean(addRlist,cstatus) - call List_clean(avRList,cstatus) - - if(present(status)) status = cstatus - - end subroutine appendRAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportIList_ - Return INTEGER Attribute List -! -! !DESCRIPTION: -! This routine extracts from the input {\tt AttrVect} argument {\tt aV} -! the integer attribute list, and returns it as the {\tt List} output -! argument {\tt outIList}. The success (failure) of this operation is -! signified by a zero (nonzero) value for the optional {\tt INTEGER} -! output argument {\tt status}. -! -! {\bf N.B.:} This routine returns an allocated {\tt List} data -! structure ({\tt outIList}). The user is responsible for deallocating -! this structure by invoking {\tt List\_clean()} (see the module -! {\tt m\_List} for details) once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportIList_(aV, outIList, status) - -! -! !USES: -! - use m_die , only : die - use m_stdio, only : stderr - - use m_List, only : List - use m_List, only : List_allocated => allocated - use m_List, only : List_copy => copy - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aV - -! !OUTPUT PARAMETERS: - - type(List), intent(out) :: outIList - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 14Dec01 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportIList_' - - ! Initialize status flag (if present) to success value of zero. - - if(present(status)) status = 0 - - if(List_allocated(aV%iList)) then - call List_copy(outIList, aV%iList) - else - call List_nullify(outIList) - if(present(status)) then - status = 1 - else - call die(myname_) - endif - endif - - end subroutine exportIList_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportRList_ - Return REAL attribute List -! -! !DESCRIPTION: -! This routine extracts from the input {\tt AttrVect} argument {\tt aV} -! the real attribute list, and returns it as the {\tt List} output -! argument {\tt outRList}. The success (failure) of this operation is -! signified by a zero (nonzero) value for the optional {\tt INTEGER} -! output argument {\tt status}. -! -! {\bf N.B.:} This routine returns an allocated {\tt List} data -! structure ({\tt outRList}). The user is responsible for deallocating -! this structure by invoking {\tt List\_clean()} (see the module -! {\tt m\_List} for details) once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportRList_(aV, outRList, status) - -! -! !USES: -! - use m_die , only : die - use m_stdio, only : stderr - - use m_List, only : List - use m_List, only : List_allocated => allocated - use m_List, only : List_copy => copy - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aV - -! !OUTPUT PARAMETERS: - - type(List), intent(out) :: outRList - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 14Dec01 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportRList_' - - ! Initialize status flag (if present) to success value of zero. - - if(present(status)) status = 0 - - if(List_allocated(aV%rList)) then - call List_copy(outRList, aV%rList) - else - call List_nullify(outRList) - if(present(status)) then - status = 1 - else - call die(myname_) - endif - endif - - end subroutine exportRList_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportIListToChar_ - Return AttrVect\%iList as CHARACTER -! -! !DESCRIPTION: -! This routine extracts from the input {\tt AttrVect} argument {\tt aV} -! the integer attribute list (see the mpeu module {\tt m\_List} for more -! information regarding the {\tt List} type), and returns it as a -! {\tt CHARACTER} suitable for printing. An example of its usage is -! \begin{verbatim} -! write(stdout,'(1a)') exportIListToChar_(aV) -! \end{verbatim} -! which writes the contents of {\tt aV\%iList\%bf} to the Fortran device -! {\tt stdout}. -! -! !INTERFACE: - - function exportIListToChar_(aV) - -! -! !USES: -! - use m_die , only : die - use m_stdio, only : stderr - - use m_List, only : List - use m_List, only : List_allocated => allocated - use m_List, only : List_copy => copy - use m_List, only : List_exportToChar => exportToChar - use m_List, only : List_clean => clean - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aV - -! !OUTPUT PARAMETERS: - - character(len=size(aV%iList%bf,1)) :: exportIListToChar_ - -! !REVISION HISTORY: -! 13Feb02 - J.W. Larson - initial prototype. -! 05Jun03 - R. Jacob - return a blank instead of dying -! to avoid I/O errors when this function is used in a write statement. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportIListToChar_' - - ! The following extraneous list copy avoids a bug in the - ! SGI MIPSpro Fortran 90 compiler version 7.30. and the - ! Sun Fortran 90 Workshop compiler 5.0. If this line is removed, - ! the following error will occur during compile time: - - ! Signal: Segmentation fault in IR->WHIRL Conversion phase. - ! "m_AttrVect.F90": Error: Signal Segmentation fault in phase IR->WHIRL - ! Conversion -- processing aborted - ! f90 ERROR: /opt/MIPSpro/73/usr/lib32/cmplrs/mfef90 died due to signal 4 - ! f90 ERROR: core dumped - ! *** Error code 32 (bu21) - - type(List) :: iListCopy - - ! Extract the INTEGER attribute list to a character: - - if(List_allocated(aV%iList)) then - call List_copy(iListCopy,aV%iList) - exportIListToChar_ = List_exportToChar(iListCopy) - call List_clean(iListCopy) - else - exportIListToChar_ = '' - endif - - end function exportIListToChar_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportRListToChar_ - Return AttrVect\%rList as CHARACTER -! -! !DESCRIPTION: -! This routine extracts from the input {\tt AttrVect} argument {\tt aV} -! the real attribute list (see the mpeu module {\tt m\_List} for more -! information regarding the {\tt List} type), and returns it as a -! {\tt CHARACTER} suitable for printing. An example of its usage is -! \begin{verbatim} -! write(stdout,'(1a)') exportRListToChar_(aV) -! \end{verbatim} -! which writes the contents of {\tt aV\%rList\%bf} to the Fortran device -! {\tt stdout}. -! -! !INTERFACE: - - function exportRListToChar_(aV) - -! -! !USES: -! - use m_die , only : die - use m_stdio, only : stderr - - use m_List, only : List - use m_List, only : List_allocated => allocated - use m_List, only : List_copy => copy - use m_List, only : List_exportToChar => exportToChar - use m_List, only : List_clean => clean - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aV - -! !OUTPUT PARAMETERS: - - character(len=size(aV%rList%bf,1)) :: exportRListToChar_ - -! !REVISION HISTORY: -! 13Feb02 - J.W. Larson - initial prototype. -! 05Jun03 - R. Jacob - return a blank instead of dying -! to avoid I/O errors when this function is used in a write statement. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportRListToChar_' - - ! The following extraneous list copy avoids a bug in the - ! SGI MIPSpro Fortran 90 compiler version 7.30. and the - ! Sun Fortran 90 Workshop compiler 5.0. If this line is removed, - ! the following error will occur during compile time: - - ! Signal: Segmentation fault in IR->WHIRL Conversion phase. - ! "m_AttrVect.F90": Error: Signal Segmentation fault in phase IR->WHIRL - ! Conversion -- processing aborted - ! f90 ERROR: /opt/MIPSpro/73/usr/lib32/cmplrs/mfef90 died due to signal 4 - ! f90 ERROR: core dumped - ! *** Error code 32 (bu21) - - type(List) :: rListCopy - - ! Extract the REAL attribute list to a character: - - if(List_allocated(aV%rList)) then - call List_copy(rListCopy,aV%rList) - exportRListToChar_ = List_exportToChar(rListCopy) - call List_clean(rListCopy) - else - exportRListToChar_ = '' - endif - - end function exportRListToChar_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportIAttr_ - Return INTEGER Attribute as a Vector -! -! !DESCRIPTION: -! This routine extracts from the input {\tt AttrVect} argument {\tt aV} -! the integer attribute corresponding to the tag defined in the input -! {\tt CHARACTER} argument {\tt AttrTag}, and returns it in the -! {\tt INTEGER} output array {\tt outVect}, and its length in the output -! {\tt INTEGER} argument {\tt lsize}. The optional input {\tt CHARACTER} -! arguments {\tt perrWith} and {\tt dieWith} control how errors are -! handled. -! \begin{enumerate} -! \item if neither {\tt perrWith} nor {\tt dieWith} are present, -! {\tt exportIAttr\_()} terminates execution with an internally generated -! error message; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, an error -! message is written to {\tt stderr} incorporating user-supplied traceback -! information stored in the argument {\tt perrWith}; -! \item if {\tt dieWith} is present, execution terminates with an error -! message written to {\tt stderr} that incorporates user-supplied traceback -! information stored in the argument {\tt dieWith}; and -! \item if both {\tt perrWith} and {\tt dieWith} are present, execution -! terminates with an error message using {\tt dieWith}, and the argument -! {\tt perrWith} is ignored. -! \end{enumerate} -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt AttrVect} {\tt List} component {\tt aV\%iList}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt outVect} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt outVect}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) before this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt outVect}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportIAttr_(aV, AttrTag, outVect, lsize, perrWith, dieWith) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_TraceBack, only : GenTraceBackString - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: AttrTag - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: -! 19Oct01 - J.W. Larson - initial (slow) -! prototype. -! 6May02 - J.W. Larson - added capability -! to work with pre-allocated outVect. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportIAttr_' - - integer :: index, ierr, n, myLsize - type(String) :: myTrace - - if(present(dieWith)) then ! Append onto TraceBack - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then ! Append onto TraceBack - call GenTraceBackString(myTrace, perrWith, myname_) - else ! Start a TraceBackString - call GenTraceBackString(myTrace, myname_) - endif - endif - - ! Index the attribute we wish to extract: - - index = indexIA_(aV, attrTag, dieWith=String_ToChar(myTrace)) - - ! Determine the number of data points: - - myLsize = lsize_(aV) - - ! Allocate space for outVect (if it is not already dimensioned) - - if(associated(outVect)) then ! check the size of outVect - if(size(outVect) < myLsize) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR length of output array outVect ', & - ' less than length of aV. size(outVect)=',size(outVect), & - ', length of aV=',myLsize - write(stderr,'(2a)') 'Traceback: ',String_ToChar(myTrace) - call die(myname_) - endif - else ! allocate space for outVect - allocate(outVect(myLsize), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Error - allocate(outVect(...) failed. ierr = ',ierr - write(stderr,'(2a)') 'Traceback: ',String_ToChar(myTrace) - call die(myname_) - endif - endif - - ! Copy the attribute data into outVect - -!$OMP PARALLEL DO PRIVATE(n) - do n=1,myLsize - outVect(n) = aV%iAttr(index,n) - end do - - ! return optional output argument lsize: - if(present(lsize)) lsize = myLsize - - call String_clean(myTrace) - - end subroutine exportIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportRAttrSP_ - Return REAL Attribute as a Pointer to Array -! -! !DESCRIPTION: -! This routine extracts from the input {\tt AttrVect} argument {\tt aV} -! the real attribute corresponding to the tag defined in the input -! {\tt CHARACTER} argument {\tt AttrTag}, and returns it in the -! {\tt REAL} output array {\tt outVect}, and its length in the output -! {\tt INTEGER} argument {\tt lsize}. The optional input {\tt CHARACTER} -! arguments {\tt perrWith} and {\tt dieWith} control how errors are -! handled. -! \begin{enumerate} -! \item if neither {\tt perrWith} nor {\tt dieWith} are present, -! {\tt exportRAttr\_()} terminates execution with an internally generated -! error message; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, an error -! message is written to {\tt stderr} incorporating user-supplied traceback -! information stored in the argument {\tt perrWith}; -! \item if {\tt dieWith} is present, execution terminates with an error -! message written to {\tt stderr} that incorporates user-supplied traceback -! information stored in the argument {\tt dieWith}; and -! \item if both {\tt perrWith} and {\tt dieWith} are present, execution -! terminates with an error message using {\tt dieWith}, and the argument -! {\tt perrWith} is ignored. -! \end{enumerate} -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt AttrVect} {\tt List} component {\tt aV\%iList}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt outVect} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt outVect}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) before this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt outVect}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportRAttrSP_(aV, AttrTag, outVect, lsize, perrWith, dieWith) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_TraceBack, only : GenTraceBackString - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: AttrTag - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !OUTPUT PARAMETERS: - - real(SP), dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: -! 19Oct01 - J.W. Larson - initial (slow) -! prototype. -! 6May02 - J.W. Larson - added capability -! to work with pre-allocated outVect. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportRAttrSP_' - - integer :: index, ierr, n, myLsize - type(String) :: myTrace - - if(present(dieWith)) then ! Append onto TraceBack - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then ! Append onto TraceBack - call GenTraceBackString(myTrace, perrWith, myname_) - else ! Start a TraceBackString - call GenTraceBackString(myTrace, myname_) - endif - endif - - ! Index the attribute we wish to extract: - - index = indexRA_(aV, attrTag, dieWith=String_ToChar(myTrace)) - - ! Determine the number of data points: - - myLsize = lsize_(aV) - - ! Allocate space for outVect (if it is not already dimensioned) - - if(associated(outVect)) then ! check the size of outVect - if(size(outVect) < myLsize) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR length of output array outVect ', & - ' less than length of aV. size(outVect)=',size(outVect), & - ', length of aV=',myLsize - write(stderr,'(2a)') 'Traceback: ',String_ToChar(myTrace) - call die(myname_) - endif - else ! allocate space for outVect - allocate(outVect(myLsize), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Error - allocate(outVect(...) failed. ierr = ',ierr - write(stderr,'(2a)') 'Traceback: ',String_ToChar(myTrace) - call die(myname_) - endif - endif - - ! Copy the attribute data into outVect - -!$OMP PARALLEL DO PRIVATE(n) - do n=1,myLsize - outVect(n) = aV%rAttr(index,n) - end do - - call String_clean(myTrace) - - ! return optional argument lsize - if(present(lsize)) lsize = myLsize - - end subroutine exportRAttrSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: exportRAttrDP_ - Return REAL Attribute as a Pointer to Array -! -! !DESCRIPTION: -! Double precision version of exportRAttrSP_ -! -! !INTERFACE: - - subroutine exportRAttrDP_(aV, AttrTag, outVect, lsize, perrWith, dieWith) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_TraceBack, only : GenTraceBackString - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: AttrTag - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !OUTPUT PARAMETERS: - - real(DP), dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: -! 19Oct01 - J.W. Larson - initial (slow) -! prototype. -! 6May02 - J.W. Larson - added capability -! to work with pre-allocated outVect. -! -! ______________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportRAttrDP_' - - integer :: index, ierr, n, myLsize - type(String) :: myTrace - - if(present(dieWith)) then ! Append onto TraceBack - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then ! Append onto TraceBack - call GenTraceBackString(myTrace, perrWith, myname_) - else ! Start a TraceBackString - call GenTraceBackString(myTrace, myname_) - endif - endif - - ! Index the attribute we wish to extract: - - index = indexRA_(aV, attrTag, dieWith=String_ToChar(myTrace)) - - ! Determine the number of data points: - - myLsize = lsize_(aV) - - ! Allocate space for outVect (if it is not already dimensioned) - - if(associated(outVect)) then ! check the size of outVect - if(size(outVect) < myLsize) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR length of output array outVect ', & - ' less than length of aV. size(outVect)=',size(outVect), & - ', length of aV=',myLsize - write(stderr,'(2a)') 'Traceback: ',String_ToChar(myTrace) - call die(myname_) - endif - else ! allocate space for outVect - allocate(outVect(myLsize), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Error - allocate(outVect(...) failed. ierr = ',ierr - write(stderr,'(2a)') 'Traceback: ',String_ToChar(myTrace) - call die(myname_) - endif - endif - - ! Copy the attribute data into outVect - -!$OMP PARALLEL DO PRIVATE(n) - do n=1,myLsize - outVect(n) = aV%rAttr(index,n) - end do - - call String_clean(myTrace) - - ! return optional argument lsize - if(present(lsize)) lsize = myLsize - - end subroutine exportRAttrDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importIAttr_ - Import INTEGER Vector as an Attribute -! -! !DESCRIPTION: -! This routine imports into the input/output {\tt AttrVect} argument -! {\tt aV} the integer attribute corresponding to the tag defined in the -! input {\tt CHARACTER} argument {\tt AttrTag}. The data to be imported -! is provided in the {\tt INTEGER} input array {\tt inVect}, and the -! number of entries to be imported in the optional input {\tt INTEGER} -! argument {\tt lsize}. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt AttrVect} {\tt List} component {\tt aV\%iList}. -! -! !INTERFACE: - - subroutine importIAttr_(aV, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - integer, dimension(:), pointer :: inVect - integer, optional, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: aV - -! !REVISION HISTORY: -! 19Oct01 - J.W. Larson - initial (slow) -! prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importIAttr_' - - integer :: index, aVsize, ierr, n, mysize - - ! Index the attribute we wish to extract: - - index = indexIA_(aV, attrTag) - - ! Determine the number of data points: - - aVsize = lsize_(aV) - - ! Check input array size vs. lsize_(aV): - - if(present(lsize)) then - if(aVsize < lsize) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR--attempt to import too many entries ', & - 'into AttrVect aV. AttrVect_lsize(aV)=',aVsize, & - ', number of entries to be imported=',lsize - call die(myname_) - endif - mysize=lsize - else - if(aVsize < size(inVect)) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR--attempt to import too many entries ', & - 'into AttrVect aV. AttrVect_lsize(aV)=',aVsize, & - ' , number of entries to be imported=',size(inVect) - call die(myname_) - endif - mysize = aVsize - endif - - ! Copy the data from inVect to its attribute slot: - -!$OMP PARALLEL DO PRIVATE(n) - do n=1,mysize - aV%iAttr(index,n) = inVect(n) - end do - - end subroutine importIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importRAttrSP_ - Import REAL Vector as an Attribute -! -! !DESCRIPTION: -! This routine imports into the input/output {\tt AttrVect} argument -! {\tt aV} the real attribute corresponding to the tag defined in the -! input {\tt CHARACTER} argument {\tt AttrTag}. The data to be imported -! is provided in the {\tt REAL} input array {\tt inVect}, and its -! length in the optional input {\tt INTEGER} argument {\tt lsize}. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt AttrVect} {\tt List} component {\tt aV\%rList}. -! -! !INTERFACE: - - subroutine importRAttrSP_(aV, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - real(SP), dimension(:), pointer :: inVect - integer, optional, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: aV - - - -! !REVISION HISTORY: -! 19Oct01 - J.W. Larson - initial (slow) -! prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importRAttrSP_' - - integer :: index, aVsize, ierr, n, mysize - - ! Index the attribute we wish to extract: - - index = indexRA_(aV, attrTag) - - ! Determine the number of data points: - - aVsize = lsize_(aV) - - ! Check input array size vs. lsize_(aV): - - if(present(lsize)) then - if(aVsize < lsize) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR--attempt to import too many entries ', & - 'into AttrVect aV. AttrVect_lsize(aV)=',aVsize, & - ', number of entries to be imported=',lsize - call die(myname_) - endif - mysize=lsize - else - if(aVsize < size(inVect)) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR--attempt to import too many entries ', & - 'into AttrVect aV. AttrVect_lsize(aV)=',aVsize, & - ' , number of entries to be imported=',size(inVect) - call die(myname_) - endif - mysize=aVsize - endif - - ! Copy the attribute data into outVect - -!$OMP PARALLEL DO PRIVATE(n) - do n=1,mysize - aV%rAttr(index,n) = inVect(n) - end do - - end subroutine importRAttrSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: importRAttrDP_ - Import REAL Vector as an Attribute -! -! !DESCRIPTION: -! Double precision version of importRAttrSP_ -! -! !INTERFACE: - - subroutine importRAttrDP_(aV, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - real(DP), dimension(:), pointer :: inVect - integer, optional, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: aV - - - -! !REVISION HISTORY: -! 19Oct01 - J.W. Larson - initial (slow) -! prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importRAttrDP_' - - integer :: index, aVsize, ierr, n, mysize - - ! Index the attribute we wish to extract: - - index = indexRA_(aV, attrTag) - - ! Determine the number of data points: - - aVsize = lsize_(aV) - - ! Check input array size vs. lsize_(aV): - - if(present(lsize)) then - if(aVsize < lsize) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR--attempt to import too many entries ', & - 'into AttrVect aV. AttrVect_lsize(aV)=',aVsize, & - ', number of entries to be imported=',lsize - call die(myname_) - endif - mysize=lsize - else - if(aVsize < size(inVect)) then - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR--attempt to import too many entries ', & - 'into AttrVect aV. AttrVect_lsize(aV)=',aVsize, & - ' , number of entries to be imported=',size(inVect) - call die(myname_) - endif - mysize=aVsize - endif - - ! Copy the attribute data into outVect - -!$OMP PARALLEL DO PRIVATE(n) - do n=1,mysize - aV%rAttr(index,n) = inVect(n) - end do - - end subroutine importRAttrDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: RCopy_ - Copy Real Attributes from One AttrVect to Another -! -! !DESCRIPTION: -! This routine copies from input argment {\tt aVin} into the output -! {\tt AttrVect} argument {\tt aVout} the shared real attributes. -! -! If the optional argument {\tt Vector} is present and true, the vector -! architecture-friendly portions of this routine will be invoked. -! -! If the optional argument {\tt sharedIndices} is present, it should be -! the result of the call {\tt SharedIndicesOneType\_(aVin, aVout, 'REAL', -! sharedIndices)}. Providing this argument speeds up this routine -! substantially. For example, you can compute a {\tt sharedIndices} -! structure once for a given pair of {\tt AttrVect}s, then use that same -! structure for all copies between those two {\tt AttrVect}s (although -! note that a different {\tt sharedIndices} variable would be needed if -! {\tt aVin} and {\tt aVout} were reversed). -! -! {\bf N.B.:} This routine will fail if the {\tt aVout} is not initialized. -! -! !INTERFACE: - - subroutine RCopy_(aVin, aVout, vector, sharedIndices) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aVin - logical, optional, intent(in) :: vector - type(AVSharedIndicesOneType), optional, intent(in) :: sharedIndices - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: aVout - - -! !REVISION HISTORY: -! 18Aug06 - R. Jacob - initial version. -! 28Apr11 - W.J. Sacks - added sharedIndices argument -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::RCopy_' - - integer :: i,j,ier ! dummy variables - integer :: aVsize ! The lsize of aVin and aVout - integer :: inxmin, outxmin ! Index variables - logical :: usevector ! true if vector flag is present and true. - character*7 :: data_flag ! character variable used as data type flag - type(AVSharedIndicesOneType) :: mySharedIndices ! copied from sharedIndices, or - ! computed if sharedIndices is not - ! present - logical :: clean_mySharedIndices ! true if we need to clean mySharedIndices before - ! returning (will be true if we did allocation in this - ! subroutine) - - - ! Check the arguments - aVsize = lsize_(aVin) - if(lsize_(aVin) /= lsize_(aVout)) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: Input aV and output aV do not have the same size' - call die(myname_,'MCTERROR: Input aV and output aV & - &do not have the same size',2) - endif - - data_flag = 'REAL' - - if (present(sharedIndices)) then - ! do some error checking on sharedIndices - if (.not. (associated(sharedIndices%aVindices1) .and. associated(sharedIndices%aVindices2))) then - call die(myname_,'MCTERROR: provided sharedIndices structure is uninitialized',3) - endif - if (trim(sharedIndices%data_flag) /= data_flag) then - call die(myname_,'MCTERROR: provided sharedIndices structure has incorrect data_flag',4) - endif - - ! copy into local variable - mySharedIndices = sharedIndices - clean_mySharedIndices = .false. - else - ! Check REAL attributes for matching indices - call SharedIndicesOneType_(aVin, aVout, data_flag, mySharedIndices) - clean_mySharedIndices = .true. - endif - - if(mySharedIndices%num_indices <= 0) then - if (clean_mySharedIndices) then - call cleanSharedIndicesOneType_(mySharedIndices,stat=ier) - if(ier /= 0) call die(myname_,'MCTERROR: in cleanSharedIndicesOneType_',ier) - endif - return - endif - - ! check vector flag. - usevector = .false. - if (present(vector)) then - if(vector) usevector = .true. - endif - - ! Start copying - - if(mySharedIndices%contiguous) then - - outxmin=mySharedIndices%aVindices2(1)-1 - inxmin=mySharedIndices%aVindices1(1)-1 - if(usevector) then -!$OMP PARALLEL DO PRIVATE(i,j) - do i=1,mySharedIndices%num_indices -!CDIR SELECT(VECTOR) -!DIR$ IVDEP - do j=1,aVsize - aVout%rAttr(outxmin+i,j) = aVin%rAttr(inxmin+i,j) - enddo - enddo - else -!$OMP PARALLEL DO PRIVATE(j,i) COLLAPSE(2) - do j=1,aVsize - do i=1,mySharedIndices%num_indices - aVout%rAttr(outxmin+i,j) = aVin%rAttr(inxmin+i,j) - enddo - enddo - endif - - else - -!$OMP PARALLEL DO PRIVATE(j,i) COLLAPSE(2) - do j=1,aVsize - do i=1,mySharedIndices%num_indices - aVout%rAttr(mySharedIndices%aVindices2(i),j) = aVin%rAttr(mySharedIndices%aVindices1(i),j) - enddo - enddo - - endif - - - if (clean_mySharedIndices) then - call cleanSharedIndicesOneType_(mySharedIndices,stat=ier) - if(ier /= 0) call die(myname_,'MCTERROR: in cleanSharedIndicesOneType_',ier) - endif - - end subroutine RCopy_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: RCopyL_ - Copy Specific Real Attributes from One AttrVect to Another -! -! !DESCRIPTION: -! This routine copies from input argment {\tt aVin} into the output -! {\tt AttrVect} argument {\tt aVout} the real attributes specified in -! input {\tt CHARACTER} argument {\tt rList}. The attributes can -! be listed in any order. -! -! If any attributes in {\tt aVout} have different names but represent the -! the same quantity and should still be copied, you must provide a translation -! argument {\tt TrList}. The translation arguments should -! be identical in length to the {\tt rList} but with the correct {\tt aVout} -! name subsititued at the appropriate place. -! -! If the optional argument {\tt Vector} is present and true, the vector -! architecture-friendly portions of this routine will be invoked. -! -! {\bf N.B.:} This routine will fail if the {\tt aVout} is not initialized or -! if any of the specified attributes are not present in either {\tt aVout} or {\tt aVin}. -! -! !INTERFACE: - - subroutine RCopyL_(aVin, aVout, rList, TrList, vector) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - use m_List, only : GetIndices => get_indices - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aVin - character(len=*), intent(in) :: rList - character(len=*), optional, intent(in) :: TrList - logical, optional, intent(in) :: vector - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: aVout - - -! !REVISION HISTORY: -! 16Aug06 - R. Jacob - initial version from breakup -! of Copy_. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::RCopyL_' - - integer :: i,j,ier ! dummy variables - integer :: num_indices ! Overlapping attribute index number - integer :: aVsize ! The lsize of aVin and aVout - integer :: inxmin, outxmin ! Index variables - logical :: TrListIsPresent ! true if list argument is present - logical :: contiguous ! true if index segments are contiguous in memory - logical :: usevector ! true if vector flag is present and true. - - ! Overlapping attribute index storage arrays: - integer, dimension(:), pointer :: aVinindices, aVoutindices - - - ! Check the arguments - aVsize = lsize_(aVin) - if(lsize_(aVin) /= lsize_(aVout)) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: Input aV and output aV do not have the same size' - call die(myname_,'MCTERROR: Input aV and output aV & - &do not have the same size',2) - endif - - if(len_trim(rList) <= 0) return - ! Copy the listed real attributes - - ! Index rList with the AttrVects - call GetIndices(aVinindices,aVin%rList,trim(rList)) - -! TrList is present if it is provided and its length>0 - TrListIsPresent = .false. - if(present(TrList)) then - if(len_trim(TrList) > 0) then - TrListIsPresent = .true. - endif - endif - - if(TrListIsPresent) then - call GetIndices(aVoutindices,aVout%rList,trim(TrList)) - - if(size(aVinindices) /= size(aVoutindices)) then - call die(myname_,"Arguments rList and TrList do not& - &contain the same number of items") - endif - else - call GetIndices(aVoutindices,aVout%rList,trim(rList)) - endif - - num_indices=size(aVoutindices) - - ! nothing to do if num_indices <=0 - if (num_indices <= 0) then - deallocate(aVinindices, aVoutindices, stat=ier) - if(ier/=0) call die(myname_,"deallocate(aVinindices...)",ier) - return - endif - - ! check vector flag. - usevector = .false. - if (present(vector)) then - if(vector) usevector = .true. - endif - -! Check if the indices are contiguous in memory for faster copy - contiguous=.true. - do i=2,num_indices - if(aVinindices(i) /= aVinindices(i-1)+1) then - contiguous = .false. - exit - endif - enddo - if(contiguous) then - do i=2,num_indices - if(aVoutindices(i) /= aVoutindices(i-1)+1) then - contiguous=.false. - exit - endif - enddo - endif - -! Start copying (arranged loop order optimized for xlf90) - if(contiguous) then - - outxmin=aVoutindices(1)-1 - inxmin=aVinindices(1)-1 - if(usevector) then -!$OMP PARALLEL DO PRIVATE(i,j) - do i=1,num_indices -!DIR$ IVDEP - do j=1,aVsize - aVout%rAttr(outxmin+i,j) = aVin%rAttr(inxmin+i,j) - enddo - enddo - else -!$OMP PARALLEL DO PRIVATE(j,i) COLLAPSE(2) - do j=1,aVsize - do i=1,num_indices - aVout%rAttr(outxmin+i,j) = aVin%rAttr(inxmin+i,j) - enddo - enddo - endif - - else - -!$OMP PARALLEL DO PRIVATE(j,i) COLLAPSE(2) - do j=1,aVsize - do i=1,num_indices - aVout%rAttr(aVoutindices(i),j) = aVin%rAttr(aVinindices(i),j) - enddo - enddo - - endif - - deallocate(aVinindices, aVoutindices, stat=ier) - if(ier/=0) call die(myname_,"deallocate(aVinindices...)",ier) - - end subroutine RCopyL_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ICopy_ - Copy Integer Attributes from One AttrVect to Another -! -! !DESCRIPTION: -! This routine copies from input argment {\tt aVin} into the output -! {\tt AttrVect} argument {\tt aVout} the shared integer attributes. -! -! If the optional argument {\tt Vector} is present and true, the vector -! architecture-friendly portions of this routine will be invoked. -! -! If the optional argument {\tt sharedIndices} is present, it should be -! the result of the call {\tt SharedIndicesOneType\_(aVin, aVout, 'INTEGER', -! sharedIndices)}. Providing this argument speeds up this routine -! substantially. For example, you can compute a {\tt sharedIndices} -! structure once for a given pair of {\tt AttrVect}s, then use that same -! structure for all copies between those two {\tt AttrVect}s (although -! note that a different {\tt sharedIndices} variable would be needed if -! {\tt aVin} and {\tt aVout} were reversed). -! -! {\bf N.B.:} This routine will fail if the {\tt aVout} is not initialized. -! -! !INTERFACE: - - subroutine ICopy_(aVin, aVout, vector, sharedIndices) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aVin - logical, optional, intent(in) :: vector - type(AVSharedIndicesOneType), optional, intent(in) :: sharedIndices - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: aVout - - -! !REVISION HISTORY: -! 16Aug06 - R. Jacob - initial version. -! 28Apr11 - W.J. Sacks - added sharedIndices argument -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ICopy_' - - integer :: i,j,ier ! dummy variables - integer :: aVsize ! The lsize of aVin and aVout - integer :: inxmin, outxmin ! Index variables - logical :: usevector ! true if vector flag is present and true. - character*7 :: data_flag ! character variable used as data type flag - type(AVSharedIndicesOneType) :: mySharedIndices ! copied from sharedIndices, or - ! computed if sharedIndices is not - ! present - logical :: clean_mySharedIndices ! true if we need to clean mySharedIndices before - ! returning (will be true if we did allocation in this - ! subroutine) - - - ! Check the arguments - aVsize = lsize_(aVin) - if(lsize_(aVin) /= lsize_(aVout)) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: Input aV and output aV do not have the same size' - call die(myname_,'MCTERROR: Input aV and output aV & - &do not have the same size',2) - endif - - data_flag = 'INTEGER' - - if (present(sharedIndices)) then - ! do some error checking on sharedIndices - if (.not. (associated(sharedIndices%aVindices1) .and. associated(sharedIndices%aVindices2))) then - call die(myname_,'MCTERROR: provided sharedIndices structure is uninitialized',3) - endif - if (trim(sharedIndices%data_flag) /= data_flag) then - call die(myname_,'MCTERROR: provided sharedIndices structure has incorrect data_flag',4) - endif - - ! copy into local variable - mySharedIndices = sharedIndices - clean_mySharedIndices = .false. - else - ! Check INTEGER attributes for matching indices - call SharedIndicesOneType_(aVin, aVout, data_flag, mySharedIndices) - clean_mySharedIndices = .true. - endif - - if(mySharedIndices%num_indices <= 0) then - if (clean_mySharedIndices) then - call cleanSharedIndicesOneType_(mySharedIndices,stat=ier) - if(ier /= 0) call die(myname_,'MCTERROR: in cleanSharedIndicesOneType_',ier) - endif - return - endif - - ! check vector flag. - usevector = .false. - if (present(vector)) then - if(vector) usevector = .true. - endif - - - if(mySharedIndices%contiguous) then - - outxmin=mySharedIndices%aVindices2(1)-1 - inxmin=mySharedIndices%aVindices1(1)-1 - if(usevector) then -!$OMP PARALLEL DO PRIVATE(i,j) - do i=1,mySharedIndices%num_indices -!CDIR SELECT(VECTOR) -!DIR$ IVDEP - do j=1,aVsize - aVout%iAttr(outxmin+i,j) = aVin%iAttr(inxmin+i,j) - enddo - enddo - else -!$OMP PARALLEL DO PRIVATE(j,i) COLLAPSE(2) - do j=1,aVsize - do i=1,mySharedIndices%num_indices - aVout%iAttr(outxmin+i,j) = aVin%iAttr(inxmin+i,j) - enddo - enddo - endif - - else - -!$OMP PARALLEL DO PRIVATE(j,i) COLLAPSE(2) - do j=1,aVsize - do i=1,mySharedIndices%num_indices - aVout%iAttr(mySharedIndices%aVindices2(i),j) = aVin%iAttr(mySharedIndices%aVindices1(i),j) - enddo - enddo - - endif - - if (clean_mySharedIndices) then - call cleanSharedIndicesOneType_(mySharedIndices,stat=ier) - if(ier /= 0) call die(myname_,'MCTERROR: in cleanSharedIndicesOneType_',ier) - endif - - end subroutine ICopy_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ICopyL_ - Copy Specific Integer Attributes from One AttrVect to Another -! -! !DESCRIPTION: -! This routine copies from input argment {\tt aVin} into the output -! {\tt AttrVect} argument {\tt aVout} the integer attributes specified in -! input {\tt CHARACTER} argument {\tt iList}. The attributes can -! be listed in any order. -! -! If any attributes in {\tt aVout} have different names but represent the -! the same quantity and should still be copied, you must provide a translation -! argument {\tt TiList}. The translation arguments should -! be identical in length to the {\tt iList} but with the correct {\tt aVout} -! name subsititued at the appropriate place. -! -! If the optional argument {\tt Vector} is present and true, the vector -! architecture-friendly portions of this routine will be invoked. -! -! {\bf N.B.:} This routine will fail if the {\tt aVout} is not initialized or -! if any of the specified attributes are not present in either {\tt aVout} or {\tt aVin}. -! -! !INTERFACE: - - subroutine ICopyL_(aVin, aVout, iList, TiList, vector) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - use m_List, only : GetIndices => get_indices - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aVin - character(len=*) , intent(in) :: iList - character(len=*), optional, intent(in) :: TiList - logical, optional, intent(in) :: vector - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: aVout - - -! !REVISION HISTORY: -! 16Aug06 - R. Jacob - initial version from breakup -! of Copy_. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ICopyL_' - - integer :: i,j,ier ! dummy variables - integer :: num_indices ! Overlapping attribute index number - integer :: aVsize ! The lsize of aVin and aVout - integer :: inxmin, outxmin ! Index variables - logical :: TiListIsPresent ! true if list argument is present - logical :: contiguous ! true if index segments are contiguous in memory - logical :: usevector ! true if vector flag is present and true. - - ! Overlapping attribute index storage arrays: - integer, dimension(:), pointer :: aVinindices, aVoutindices - - - ! Check the arguments - aVsize = lsize_(aVin) - if(lsize_(aVin) /= lsize_(aVout)) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: Input aV and output aV do not have the same size' - call die(myname_,'MCTERROR: Input aV and output aV & - &do not have the same size',2) - endif - - if(len_trim(iList) <= 0) return - ! Copy the listed real attributes - - -! Index rList with the AttrVects - call GetIndices(aVinindices,aVin%iList,trim(iList)) - -! TiList is present if its provided and its length>0 - TiListIsPresent = .false. - if(present(TiList)) then - if(len_trim(TiList) > 0) then - TiListIsPresent = .true. - endif - endif - - if(TiListIsPresent) then - call GetIndices(aVoutindices,aVout%iList,trim(TiList)) - if(size(aVinindices) /= size(aVoutindices)) then - call die(myname_,"Arguments iList and TiList do not& - &contain the same number of items") - endif - else - call GetIndices(aVoutindices,aVout%iList,trim(iList)) - endif - - num_indices=size(aVoutindices) - - ! nothing to do if num_indices <=0 - if (num_indices <= 0) then - deallocate(aVinindices, aVoutindices, stat=ier) - if(ier/=0) call die(myname_,"deallocate(aVinindices...)",ier) - return - endif - - ! check vector flag. - usevector = .false. - if (present(vector)) then - if(vector) usevector = .true. - endif - -! Check if the indices are contiguous in memory for faster copy - contiguous=.true. - do i=2,num_indices - if(aVinindices(i) /= aVinindices(i-1)+1) then - contiguous = .false. - exit - endif - enddo - if(contiguous) then - do i=2,num_indices - if(aVoutindices(i) /= aVoutindices(i-1)+1) then - contiguous=.false. - exit - endif - enddo - endif - -! Start copying (arranged loop order optimized for xlf90) - if(contiguous) then - - outxmin=aVoutindices(1)-1 - inxmin=aVinindices(1)-1 - if(usevector) then -!$OMP PARALLEL DO PRIVAtE(i,j) - do i=1,num_indices -!CDIR SELECT(VECTOR) -!DIR$ IVDEP - do j=1,aVsize - aVout%iAttr(outxmin+i,j) = aVin%iAttr(inxmin+i,j) - enddo - enddo - else -!$OMP PARALLEL DO PRIVATE(j,i) COLLAPSE(2) - do j=1,aVsize - do i=1,num_indices - aVout%iAttr(outxmin+i,j) = aVin%iAttr(inxmin+i,j) - enddo - enddo - endif - - else - -!$OMP PARALLEL DO PRIVATE(j,i) COLLAPSE(2) - do j=1,aVsize - do i=1,num_indices - aVout%iAttr(aVoutindices(i),j) = aVin%iAttr(aVinindices(i),j) - enddo - enddo - - endif - - deallocate(aVinindices, aVoutindices, stat=ier) - if(ier/=0) call die(myname_,"deallocate(aVinindices...)",ier) - - end subroutine ICopyL_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Copy_ - Copy Real and Integer Attributes from One AttrVect to Another -! -! !DESCRIPTION: -! This routine copies from input argment {\tt aVin} into the output -! {\tt AttrVect} argument {\tt aVout} the real and integer attributes specified in -! input {\tt CHARACTER} argument {\tt iList} and {\tt rList}. The attributes can -! be listed in any order. If neither {\tt iList} nor {\tt rList} are provided, -! all attributes shared between {\tt aVin} and {\tt aVout} will be copied. -! -! If any attributes in {\tt aVout} have different names but represent the -! the same quantity and should still be copied, you must provide a translation -! argument {\tt TrList} and/or {\tt TiList}. The translation arguments should -! be identical to the {\tt rList} or {\tt iList} but with the correct {\tt aVout} -! name subsititued at the appropriate place. -! -! This routines combines the functions of {\tt RCopy\_}, {\tt RCopyL\_}, -! {\tt ICopy\_} and {\tt ICopyL\_}. If you know you only want to copy real -! attributes, use the {\tt RCopy} functions. If you know you only want to -! copy integer attributes, use the {\tt ICopy} functions. -! -! If the optional argument {\tt Vector} is present and true, the vector -! architecture-friendly portions of this routine will be invoked. -! -! If the optional argument {\tt sharedIndices} is present, it should be -! the result of the call {\tt SharedIndices\_(aVin, aVout, -! sharedIndices)}. Providing this argument speeds up this routine -! substantially. For example, you can compute a {\tt sharedIndices} -! structure once for a given pair of {\tt AttrVect}s, then use that same -! structure for all copies between those two {\tt AttrVect}s (although -! note that a different {\tt sharedIndices} variable would be needed if -! {\tt aVin} and {\tt aVout} were reversed). Note, however, that {\tt -! sharedIndices} is ignored if either {\tt rList} or {\tt iList} are -! given. -! -! {\bf N.B.:} This routine will fail if the {\tt aVout} is not initialized or -! if any of the specified attributes are not present in either {\tt aVout} or {\tt aVin}. -! -! !INTERFACE: - - subroutine Copy_(aVin, aVout, rList, TrList, iList, TiList, vector, sharedIndices) - -! -! !USES: -! - use m_die , only : die, warn - use m_stdio , only : stderr - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aVin - character(len=*), optional, intent(in) :: iList - character(len=*), optional, intent(in) :: rList - character(len=*), optional, intent(in) :: TiList - character(len=*), optional, intent(in) :: TrList - logical, optional, intent(in) :: vector - type(AVSharedIndices), optional, intent(in) :: sharedIndices - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: aVout - - -! !REVISION HISTORY: -! 12Jun02 - R. Jacob - initial version. -! 13Jun02 - R. Jacob - copy shared attributes -! if no attribute lists are specified. -! 30Sep02 - R. Jacob - new argument order with all -! optional arguments last -! 19Feb02 - E. Ong - new implementation using -! new list function get_indices and faster memory copy -! 28Oct03 - R. Jacob - add optional vector -! argument to use vector-friendly code provided by Fujitsu -! 16Aug06 - R. Jacob - split into 4 routines: -! RCopy_,RCopyL_,ICopy_,ICopyL_ -! 28Apr11 - W.J. Sacks - added sharedIndices argument -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Copy_' - - integer :: i,j,ier ! dummy variables - integer :: num_indices ! Overlapping attribute index number - integer :: aVsize ! The lsize of aVin and aVout - integer :: num_inindices, num_outindices ! Number of matching indices in aV - integer :: inxmin, outxmin, inx, outx ! Index variables - logical :: TiListIsPresent, TrListIsPresent! true if list argument is present - logical :: contiguous ! true if index segments are contiguous in memory - logical :: usevector ! true if vector flag is present and true. - - ! Overlapping attribute index storage arrays: - integer, dimension(:), pointer :: aVinindices, aVoutindices - - - ! Check the arguments - aVsize = lsize_(aVin) - if(lsize_(aVin) /= lsize_(aVout)) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: Input aV and output aV do not have the same size' - call die(myname_,'MCTERROR: Input aV and output aV & - &do not have the same size',2) - endif - - ! check vector flag. - usevector = .false. - if (present(vector)) then - if(vector) usevector = .true. - endif - - ! Copy the listed real attributes - if(present(rList)) then - ! TrList is present if it is provided and its length>0 - TrListIsPresent = .false. - if(present(TrList)) then - if(len_trim(TrList) > 0) then - TrListIsPresent = .true. - endif - endif - - if(present(sharedIndices)) then - call warn(myname_,'Use of sharedIndices not implemented in RCopyL; & - &ignoring sharedIndices',1) - endif - - if(TrListIsPresent) then - call RCopyL_(aVin,aVout,rList,TrList,vector=usevector) - else - call RCopyL_(aVin,aVout,rList,vector=usevector) - endif - - endif ! if(present(rList) - - ! Copy the listed integer attributes - if(present(iList)) then - - ! TiList is present if its provided and its length>0 - TiListIsPresent = .false. - if(present(TiList)) then - if(len_trim(TiList) > 0) then - TiListIsPresent = .true. - endif - endif - - if(present(sharedIndices)) then - call warn(myname_,'Use of sharedIndices not implemented in ICopyL; & - &ignoring sharedIndices',1) - endif - - if(TiListIsPresent) then - call ICopyL_(aVin,aVout,iList,TiList,vector=usevector) - else - call ICopyL_(aVin,aVout,iList,vector=usevector) - endif - - endif ! if(present(iList)) - - ! If neither rList nor iList is present, copy shared attibutes - ! from in to out. - if( .not.present(rList) .and. .not.present(iList)) then - - if (present(sharedIndices)) then - call RCopy_(aVin, Avout, vector=usevector, sharedIndices=sharedIndices%shared_real) - call ICopy_(aVin, Avout, vector=usevector, sharedIndices=sharedIndices%shared_integer) - else - call RCopy_(aVin, Avout, vector=usevector) - call ICopy_(aVin, Avout, vector=usevector) - endif - - endif - - end subroutine Copy_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Sort_ - Use Attributes as Keys to Generate an Index Permutation -! -! !DESCRIPTION: -! The subroutine {\tt Sort\_()} uses a list of keys defined by the {\tt List} -! {\tt key\_list}, searches for the appropriate integer or real attributes -! referenced by the items in {\tt key\_list} ( that is, it identifies the -! appropriate entries in {aV\%iList} and {\tt aV\%rList}), and then -! uses these keys to generate a permutation {\tt perm} that will put -! the entries of the attribute vector {\tt aV} in lexicographic order -! as defined by {\tt key\_list} (the ordering in {\tt key\_list} being from -! left to right. -! -! {\bf N.B.:} This routine will fail if {\tt aV\%iList} and -! {\tt aV\%rList} share one or more common entries. -! -! {\bf N.B.:} This routine will fail if one of the sorting keys presented is -! not present in {\tt aV\%iList} nor {\tt aV\%rList}. -! -! !INTERFACE: - - subroutine Sort_(aV, key_list, perm, descend, perrWith, dieWith) -! -! !USES: -! - use m_String, only : String - use m_String, only : String_tochar => tochar - use m_String, only : String_clean => clean - use m_List , only : List_allocated => allocated - use m_List , only : List_index => index - use m_List , only : List_nitem => nitem - use m_List , only : List_get => get - use m_die , only : die - use m_stdio , only : stderr - use m_SortingTools , only : IndexSet - use m_SortingTools , only : IndexSort - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV - type(List), intent(in) :: key_list - logical, dimension(:), optional, intent(in) :: descend - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !OUTPUT PARAMETERS: -! - integer, dimension(:), pointer :: perm - - -! !REVISION HISTORY: -! 20Oct00 - J.W. Larson - initial prototype -! 25Apr01 - R.L. Jacob - add -1 to make a -! backwards loop go backwards -! 14Jun01 - J. Larson / E. Ong -- Fixed logic bug in REAL attribute -! sort (discovered by E. Ong), and cleaned up error / -! shutdown logic. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Sort_' - -! local variables - - ! storage for key extracted from key_list: - - type(String) :: key - - ! number of keys, loop index, error flag, and length: - - integer :: nkeys, n, ierr, length - - ! key indices for av%rAttr and av%iAttr, respectively: - - integer, dimension(:), allocatable :: rIndex, iIndex - - ! copy of descend argument - - logical, dimension(:), allocatable :: descend_copy - - ! count the sorting keys: - - nkeys = List_nitem(key_list) - - ! Check the descend argument. Note: the unnecessary copy - ! circumvents an optimization bug in the compaq compiler - - if(present(descend)) then - if(size(descend)/=nkeys) then - call die(myname_,"Size of descend argument is not equal & - &to the number of keys") - endif - allocate(descend_copy(nkeys),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(descend_copy)",ierr) - descend_copy=descend - endif - - - ! allocate and initialize rIndex and iIndex to - ! zero (the null return values from the functions - ! indexRA_() and indexIA_() ). - - allocate(rIndex(nkeys), iIndex(nkeys), stat=ierr) - if(ierr/=0) call die(myname_,"allocate(rindex,iIndex)",ierr) - - rIndex = 0 - iIndex = 0 - - ! Loop over the keys in the list, and identify the - ! appropriate integer or real attribute, storing the - ! attribute index in iIndex(:) or rIndex(:), respectively. - - do n = 1, nkeys - - ! grab the next key - - call List_get(key, n, key_list) - - ! determine wheter this key refers to an - ! integer or real attribute: -! jwl commented out in favor of below code block unitl an error -! handling strategy is settled upon for indexIA_() and indexRA_(). -! rIndex(n) = indexRA_(aV, String_tochar(key), dieWith=myname_) -! iIndex(n) = indexIA_(aV, String_tochar(key), dieWith=myname_) - - if(List_allocated(aV%rList)) then - rIndex(n) = List_index(aV%rList, String_tochar(key)) - else - rIndex(n) = 0 - endif - if(List_allocated(aV%iList)) then - iIndex(n) = List_index(aV%iList, String_tochar(key)) - else - iIndex(n) = 0 - endif - - ! If both rIndex(n) and iIndex(n) are greater than - ! zero, then we have an integer attribute sharing - ! the same name as a real attribute, and there is - ! no clear path as to which one is the sort key. - ! This is a fatal error that triggers shutdown. - - if ((rIndex(n) > 0) .and. (iIndex(n) > 0)) then - if(.not.present(dieWith)) then - if(present(perrWith)) write(stderr,'(4a)') myname, & - ":: ambiguous key, ", perrWith, & - " both iIndex(n) and rIndex(n) positive." - call die(myname_,":: both iIndex(n) and rIndex(n) > 0.") - else - if(present(perrWith)) then - write(stderr,'(4a)') myname_,":: ", perrWith, & - " both iIndex(n) and rIndex(n) positive." - endif - call die(myname_,dieWith) - endif - endif - - ! If both rIndex(n) and iIndex(n) are nonpositive, - ! then the requested sort key is not present in either - ! aV%rList or aV%iList, and we cannot perform the sort. - ! This is a fatal error that triggers shutdown. - - if ((rIndex(n) <= 0) .and. (iIndex(n) <= 0)) then - if(.not.present(dieWith)) then - if(present(perrWith)) write(stderr,'(4a)') myname,":: ", & - perrWith, & - " both iIndex(n) and rIndex(n) nonpositive" - call die(myname_,":: both iIndex(n) and rIndex(n) <= 0.") - else - if(present(perrWith)) then - write(stderr,'(4a)') myname_,":: ", perrWith, & - " both iIndex(n) and rIndex(n) nonpositive" - endif - call die(myname_,dieWith) - endif - endif - - ! If only one of rIndex(n) or iIndex(n) is positive, - ! set the other value to zero. - - if (iIndex(n) > 0) rIndex(n) = 0 - if (rIndex(n) > 0) iIndex(n) = 0 - - ! Clean up temporary string -key- - - call String_clean(key) - - enddo ! do n=1,nkeys - - ! Now we have the locations of the keys in the integer and - ! real attribute storage areas aV%iAttr and aV%rAttr, respectively. - ! our next step is to construct and initialize the permutation - ! array perm. First step--determine the length of aV using - ! lsize_(): - - length = lsize_(aV) - - allocate(perm(length), stat=ierr) - if(ierr/=0) call die(myname_,"allocate(perm)",ierr) - - ! Initialize perm(i)=i, for i=1,length - - call IndexSet(perm) - - ! Now we can perform the stable successive keyed sorts to - ! transform perm into the permutation that will place the - ! entries of the attribute arrays in the lexicographic order - ! defined by key_list. This is achieved by successive calls to - ! IndexSort(), but in reverse order to the order of the keys - ! as they appear in key_list. - - do n=nkeys, 1, -1 - if(iIndex(n) > 0) then - if(present(descend)) then - call IndexSort(length, perm, aV%iAttr(iIndex(n),:), & - descend_copy(n)) - else - call IndexSort(length, perm, aV%iAttr(iIndex(n),:), & - descend=.false.) - endif ! if(present(descend)... - else - if(rIndex(n) > 0) then - if(present(descend)) then - call IndexSort(length, perm, aV%rAttr(rIndex(n),:), & - descend_copy(n)) - else - call IndexSort(length, perm, aV%rAttr(rIndex(n),:), & - descend=.false.) - endif ! if(present(descend)... - endif ! if (rIndex(n) > 0)... - endif ! if (iIndex(n) > 0)... - enddo - - ! Now perm(1:length) is the transformation we seek--we are - ! finished. - - deallocate(iIndex, rIndex, stat=ierr) ! clean up allocated arrays. - if(ierr/=0) call die(myname_,"deallocate(iIndex,rIndex)",ierr) - - if(present(descend)) deallocate(descend_copy,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(descend_copy)",ierr) - - end subroutine Sort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Permute_ - Permute AttrVect Elements -! -! !DESCRIPTION: -! The subroutine {\tt Permute\_()} uses a a permutation {\tt perm} (which can -! be generated by the routine {\tt Sort\_()} in this module) to rearrange -! the entries in the attribute integer and real storage areas of the -! input attribute vector {\tt aV}--{\tt aV\%iAttr} and {\tt aV\%rAttr}, -! respectively. -! -! !INTERFACE: - - subroutine Permute_(aV, perm, perrWith, dieWith) -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - use m_SortingTools , only : Permute - - implicit none - -! !INPUT PARAMETERS: -! - integer, dimension(:), intent(in) :: perm - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(inout) :: aV - -! !REVISION HISTORY: -! 23Oct00 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Permute_' - -! local variables - - integer :: i - - ! Check input arguments for compatibility--assure - ! lsize_(aV) = size(perm); that is, make sure the - ! index permutation is the same length as the vectors - ! it will re-arrange. - - if (size(perm) /= lsize_(aV)) then - if(.not.present(dieWith)) then - if(present(perrWith)) write(stderr,'(4a,i8,a,i8)') myname, & - ":: size mismatch, ", perrWith, & - "size(perm)=",size(perm)," lsize_(aV)=",lsize_(aV) - else - write(stderr,'(4a,i8,a,i8)') myname, & - ":: size mismatch, ", dieWith, & - "size(perm)=",size(perm)," lsize_(aV)=",lsize_(aV) - call die(dieWith) - endif - endif - - if(size(perm) == lsize_(aV)) then - - ! Permute integer attributes: - if(nIAttr_(aV) /= 0) then - do i=1,nIAttr_(aV) - call Permute(aV%iAttr(i,:),perm,lsize_(aV)) - end do - endif - - ! Permute real attributes: - if(nRAttr_(aV) /= 0) then - do i=1,nRAttr_(aV) - call Permute(aV%rAttr(i,:),perm,lsize_(aV)) - end do - endif - - endif - - end subroutine Permute_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Unpermute_ - Unpermute AttrVect Elements -! -! !DESCRIPTION: -! The subroutine {\tt Unpermute\_()} uses a a permutation {\tt perm} (which can -! be generated by the routine {\tt Sort\_()} in this module) to rearrange -! the entries in the attribute integer and real storage areas of the -! input attribute vector {\tt aV}--{\tt aV\%iAttr} and {\tt aV\%rAttr}, -! respectively. This is meant to be called on an {\tt aV} that has already -! been permuted but it could also be used to perform the inverse operation -! implied by {\tt perm} on an unpermuted {\tt aV}. -! -! !INTERFACE: - - subroutine Unpermute_(aV, perm, perrWith, dieWith) -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - use m_SortingTools , only : Unpermute - - implicit none - -! !INPUT PARAMETERS: -! - integer, dimension(:), intent(in) :: perm - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(inout) :: aV - -! !REVISION HISTORY: -! 23Nov05 - R. Jacob - based on Permute -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Unpermute_' - -! local variables - - integer :: i - - ! Check input arguments for compatibility--assure - ! lsize_(aV) = size(perm); that is, make sure the - ! index permutation is the same length as the vectors - ! it will re-arrange. - - if (size(perm) /= lsize_(aV)) then - if(.not.present(dieWith)) then - if(present(perrWith)) write(stderr,'(4a,i8,a,i8)') myname, & - ":: size mismatch, ", perrWith, & - "size(perm)=",size(perm)," lsize_(aV)=",lsize_(aV) - else - write(stderr,'(4a,i8,a,i8)') myname, & - ":: size mismatch, ", dieWith, & - "size(perm)=",size(perm)," lsize_(aV)=",lsize_(aV) - call die(dieWith) - endif - endif - - if(size(perm) == lsize_(aV)) then - - ! Unpermute integer attributes: - if(nIAttr_(aV) /= 0) then - do i=1,nIAttr_(aV) - call Unpermute(aV%iAttr(i,:),perm,lsize_(aV)) - end do - endif - - ! Permute real attributes: - if(nRAttr_(aV) /= 0) then - do i=1,nRAttr_(aV) - call Unpermute(aV%rAttr(i,:),perm,lsize_(aV)) - end do - endif - - endif - - end subroutine Unpermute_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SortPermute_ - In-place Lexicographic Sort of an AttrVect -! -! !DESCRIPTION: -! -! The subroutine {\tt SortPermute\_()} uses the routine {\tt Sort\_()} -! to create an index permutation {\tt perm} that will place the AttrVect -! entries in the lexicographic order defined by the keys in the List -! variable {\tt key\_list}. This permutation is then used by the routine -! {\tt Permute\_()} to place the AttreVect entries in lexicographic order. -! -! !INTERFACE: - - subroutine SortPermute_(aV, key_list, descend, perrWith, dieWith) -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: key_list - logical , dimension(:), optional, intent(in) :: descend - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(inout) :: aV - -! !REVISION HISTORY: -! 24Oct00 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Permute_' - -! local variables - - ! Permutation array pointer perm(:) - integer, dimension(:), pointer :: perm - ! Error flag ierr - integer :: ierr - - ! Step One: Generate the index permutation perm(:) - - if(present(descend)) then - call Sort_(aV, key_list, perm, descend, perrWith, dieWith) - else - call Sort_(aV, key_list, perm, perrWith=perrWith, & - dieWith=dieWith) - endif - - ! Step Two: Apply the index permutation perm(:) - - call Permute_(aV, perm, perrWith, dieWith) - - ! Step Three: deallocate temporary array used to - ! store the index permutation (this was allocated - ! in the routine Sort_() - - deallocate(perm, stat=ierr) - - end subroutine SortPermute_ - -! Sorting: -! -! aV%iVect(:,:) = & -! aV%iVect((/(indx(i),i=1,lsize(aV))/),:) -! -! aV%iVect((/(indx(i),i=1,lsize(aV))/),:) = & -! aV%iVect(:,:) -! -! aV%iVect(:,ikx),aV%iVect(:,iks) -! -! - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: aVaVSharedAttrIndexList_ - AttrVect shared attributes. -! -! !DESCRIPTION: {\tt aVaVSharedAttrIndexList\_()} takes a pair of -! user-supplied {\tt AttrVect} variables {\tt aV1} and {\tt aV2}, -! and for choice of either {\tt REAL} or {\tt INTEGER} attributes (as -! specified literally in the input {\tt CHARACTER} argument {\tt attrib}) -! returns the number of shared attributes {\tt NumShared}, and arrays of -! indices {\tt Indices1} and {\tt Indices2} to their storage locations -! in {\tt aV1} and {\tt aV2}, respectively. -! -! {\bf N.B.:} This routine returns two allocated arrays---{\tt Indices1(:)} -! and {\tt Indices2(:)}---which must be deallocated once the user no longer -! needs them. Failure to do this will create a memory leak. -! -! !INTERFACE: - - subroutine aVaVSharedAttrIndexList_(aV1, aV2, attrib, NumShared, & - Indices1, Indices2) - -! -! !USES: -! - use m_stdio - use m_die, only : MP_perr_die, die, warn - - use m_List, only : GetSharedListIndices - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV1 - type(AttrVect), intent(in) :: aV2 - character(len=*), intent(in) :: attrib - -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: NumShared - integer, dimension(:), pointer :: Indices1 - integer, dimension(:), pointer :: Indices2 - -! !REVISION HISTORY: -! 07Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::aVaVSharedAttrIndexList_' - - integer :: ierr - - ! Based on the value of the argument attrib, pass the - ! appropriate pair of Lists for comparison... - - select case(trim(attrib)) - case('REAL','real') - call GetSharedListIndices(aV1%rList, aV2%rList, NumShared, & - Indices1, Indices2) - case('INTEGER','integer') - call GetSharedListIndices(aV1%iList, aV2%iList, NumShared, & - Indices1, Indices2) - case default - write(stderr,'(4a)') myname_,":: value of argument attrib=",attrib, & - " not recognized. Allowed values: REAL, real, INTEGER, integer" - ierr = 1 - call die(myname_, 'invalid value for attrib', ierr) - end select - - end subroutine aVaVSharedAttrIndexList_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Climate and Global Dynamics Division, National Center for Atmospheric Research ! -!BOP ----------------------------------------------------------------------------- -! -! !IROUTINE: SharedIndices_ - AttrVect shared attributes and auxiliary information -! -! !DESCRIPTION: {\tt SharedIndices\_()} takes a pair of user-supplied -! {\tt AttrVect} variables {\tt aV1} and {\tt aV2}, and returns a -! structure of type {\tt AVSharedIndices} ({\tt sharedIndices}). This -! structure contains arrays of indices to the locations of the shared -! attributes, as well as auxiliary information. The structure contains -! information on both the {\tt REAL} and {\tt INTEGER} attributes. See -! documentation for the {\tt SharedIndicesOneType\_} subroutine for some -! additional details, as much of the work is done there. -! -! {\bf N.B.:} The returned structure, {\tt sharedIndices}, contains -! allocated arrays that must be deallocated once the user no longer -! needs them. This should be done through a call to {\tt -! cleanSharedIndices\_}. -! -! !INTERFACE: - - subroutine SharedIndices_(aV1, aV2, sharedIndices) - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV1 - type(AttrVect), intent(in) :: aV2 - -! !INPUT/OUTPUT PARAMETERS: -! - type(AVSharedIndices), intent(inout) :: sharedIndices - -! !REVISION HISTORY: -! 28Apr11 - W.J. Sacks - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SharedIndices_' - - call SharedIndicesOneType_(aV1, aV2, 'REAL', sharedIndices%shared_real) - call SharedIndicesOneType_(aV1, aV2, 'INTEGER', sharedIndices%shared_integer) - - end subroutine SharedIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Climate and Global Dynamics Division, National Center for Atmospheric Research ! -!BOP ----------------------------------------------------------------------------- -! -! !IROUTINE: SharedIndicesOneType_ - AttrVect shared attributes and auxiliary information, for one data type -! -! !DESCRIPTION: {\tt SharedIndicesOneType\_()} takes a pair of -! user-supplied {\tt AttrVect} variables {\tt aV1} and {\tt aV2}, and -! for choice of either {\tt REAL} or {\tt INTEGER} attributes (as -! specified literally in the input {\tt CHARACTER} argument {\tt -! attrib}) returns a structure of type {\tt AVSharedIndicesOneType} ({\tt -! sharedIndices}). This structure contains arrays of indices to the -! locations of the shared attributes of the given type, as well as -! auxiliary information. -! -! The {\tt aVindices1} and {\tt aVindices2} components of {\tt -! sharedIndices} will be indices into {\tt aV1} and {\tt aV2}, -! respectively. -! -! {\bf N.B.:} The returned structure, {\tt sharedIndices}, contains -! allocated arrays that must be deallocated once the user no longer -! needs them. This should be done through a call to {\tt -! cleanSharedIndicesOneType\_}. Even if there are no attributes in -! common between {\tt aV1} and {\tt aV2}, {\tt sharedIndices} will still -! be initialized, and memory will still be allocated. Furthermore, if an -! already-initialized {\tt sharedIndices} variable is to be given new -! values, {\tt cleanSharedIndicesOneType\_} must be called before {\tt -! SharedIndicesOneType\_} is called a second time, in order to prevent a -! memory leak. -! -! !INTERFACE: - - subroutine SharedIndicesOneType_(aV1, aV2, attrib, sharedIndices) - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: aV1 - type(AttrVect), intent(in) :: aV2 - character(len=*), intent(in) :: attrib - -! !INPUT/OUTPUT PARAMETERS: -! - type(AVSharedIndicesOneType), intent(inout) :: sharedIndices - -! !REVISION HISTORY: -! 28Apr11 - W.J. Sacks - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SharedIndicesOneType_' - integer :: i - - ! Check appropriate attributes (real or integer) for matching indices - call aVaVSharedAttrIndexList_(aV1, aV2, attrib, sharedIndices%num_indices, & - sharedIndices%aVindices1, sharedIndices%aVindices2) - - sharedIndices%data_flag = attrib - - ! Check indices for contiguous segments in memory - sharedIndices%contiguous=.true. - do i=2,sharedIndices%num_indices - if(sharedIndices%aVindices1(i) /= sharedIndices%aVindices1(i-1)+1) then - sharedIndices%contiguous = .false. - exit - endif - enddo - if(sharedIndices%contiguous) then - do i=2,sharedIndices%num_indices - if(sharedIndices%aVindices2(i) /= sharedIndices%aVindices2(i-1)+1) then - sharedIndices%contiguous=.false. - exit - endif - enddo - endif - - end subroutine SharedIndicesOneType_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Climate and Global Dynamics Division, National Center for Atmospheric Research ! -!BOP ----------------------------------------------------------------------------- -! -! !IROUTINE: cleanSharedIndices_ - Deallocate allocated memory structures of an AVSharedIndices structure -! -! !DESCRIPTION: This routine deallocates the allocated memory structures -! of the input/output {\tt AVSharedIndicesOneType} argument {\tt -! sharedIndices}, if they are currently associated. It also resets -! other components of this structure to a default state. The success -! (failure) of this operation is signified by a zero (non-zero) value of -! the optional {\tt INTEGER} output argument {\tt stat}. If {\tt -! clean\_()} is invoked without supplying {\tt stat}, and any of the -! deallocation operations fail, the routine will terminate with an error -! message. If multiple errors occur, {\tt stat} will give the error -! condition for the last error. -! -! !INTERFACE: - - subroutine cleanSharedIndices_(sharedIndices, stat) - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(AVSharedIndices), intent(inout) :: sharedIndices - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 28Apr11 - W.J. Sacks - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::cleanSharedIndices_' - integer :: ier - - if(present(stat)) stat=0 - - call cleanSharedIndicesOneType_(sharedIndices%shared_real, stat=ier) - if(present(stat) .and. ier /= 0) then - stat = ier - end if - - call cleanSharedIndicesOneType_(sharedIndices%shared_integer, stat=ier) - if(present(stat) .and. ier /= 0) then - stat = ier - end if - - end subroutine cleanSharedIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Climate and Global Dynamics Division, National Center for Atmospheric Research ! -!BOP ----------------------------------------------------------------------------- -! -! !IROUTINE: cleanSharedIndicesOneType_ - Deallocate allocated memory structures of an AVSharedIndicesOneType structure -! -! !DESCRIPTION: This routine deallocates the allocated memory structures -! of the input/output {\tt AVSharedIndices} argument {\tt -! sharedIndices}, if they are currently associated. It also resets -! other components of this structure to a default state. The success -! (failure) of this operation is signified by a zero (non-zero) value of -! the optional {\tt INTEGER} output argument {\tt stat}. If {\tt -! clean\_()} is invoked without supplying {\tt stat}, and any of the -! deallocation operations fail, the routine will terminate with an error -! message. If multiple errors occur, {\tt stat} will give the error -! condition for the last error. -! -! !INTERFACE: - - subroutine cleanSharedIndicesOneType_(sharedIndices, stat) -! -! !USES: -! - use m_die, only : die - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(AVSharedIndicesOneType), intent(inout) :: sharedIndices - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 28Apr11 - W.J. Sacks - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::cleanSharedIndicesOneType_' - integer :: ier - - if(present(stat)) stat=0 - - if(associated(sharedIndices%aVindices1)) then - - deallocate(sharedIndices%aVindices1,stat=ier) - - if (ier /= 0) then - if(present(stat)) then - stat=ier - else - call die(myname_,'deallocate(sharedIndices%aVindices1)',ier) - endif - endif - - endif - - if(associated(sharedIndices%aVindices2)) then - - deallocate(sharedIndices%aVindices2,stat=ier) - - if (ier /= 0) then - if(present(stat)) then - stat=ier - else - call die(myname_,'deallocate(sharedIndices%aVindices2)',ier) - endif - endif - - endif - - ! Reset other components to default values - sharedIndices%num_indices = 0 - sharedIndices%contiguous = .false. - sharedIndices%data_flag = ' ' - - end subroutine cleanSharedIndicesOneType_ - - end module m_AttrVect -!. - - - - diff --git a/src/externals/mct/mct/m_AttrVectComms.F90 b/src/externals/mct/mct/m_AttrVectComms.F90 deleted file mode 100644 index 777a1e504ad..00000000000 --- a/src/externals/mct/mct/m_AttrVectComms.F90 +++ /dev/null @@ -1,1683 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_AttrVectComms - MPI Communications Methods for the AttrVect -! -! !DESCRIPTION: -! -! This module defines the communications methods for the {\tt AttrVect} -! datatype (see the module {\tt m\_AttrVect} for more information about -! this class and its methods). MCT's communications are implemented -! in terms of the Message Passing Interface (MPI) standard, and we have -! as best as possible, made the interfaces to these routines appear as -! similar as possible to the corresponding MPI routines. For the -! { \tt AttrVect}, we supply {\em blocking} point-to-point send and -! receive operations. We also supply the following collective -! operations: broadcast, gather, and scatter. The gather and scatter -! operations rely on domain decomposition descriptors that are defined -! elsewhere in MCT: the {\tt GlobalMap}, which is a one-dimensional -! decomposition (see the MCT module {\tt m\_GlobalMap} for more details); -! and the {\tt GlobalSegMap}, which is a segmented decomposition capable -! of supporting multidimensional domain decompositions (see the MCT module -! {\tt m\_GlobalSegMap} for more details). -! -! !INTERFACE: - module m_AttrVectComms -! -! !USES: -! - use m_AttrVect ! AttrVect class and its methods - - implicit none - - private ! except - - public :: gather ! gather all local vectors to the root - public :: scatter ! scatter from the root to all PEs - public :: bcast ! bcast from root to all PEs - public :: send ! send an AttrVect - public :: recv ! receive an AttrVect - - interface gather ; module procedure & - GM_gather_, & - GSM_gather_ - end interface - interface scatter ; module procedure & - GM_scatter_, & - GSM_scatter_ - end interface - interface bcast ; module procedure bcast_ ; end interface - interface send ; module procedure send_ ; end interface - interface recv ; module procedure recv_ ; end interface - -! !REVISION HISTORY: -! 27Oct00 - J.W. Larson - relocated routines -! from m_AttrVect to create this module. -! 15Jan01 - J.W. Larson - Added APIs for -! GSM_gather_() and GSM_scatter_(). -! 9May01 - J.W. Larson - Modified GM_scatter_ -! so its communication model agrees with MPI_scatter(). -! Also tidied up prologues in all module routines. -! 7Jun01 - J.W. Larson - Added send() -! and recv(). -! 3Aug01 - E.T. Ong - in GSM_scatter, call -! GlobalMap_init with actual shaped array to satisfy -! Fortran 90 standard. See comment in subroutine. -! 23Aug01 - E.T. Ong - replaced assignment(=) -! with copy for list type to avoid compiler bugs in pgf90. -! Added more error checking in gsm scatter. Fixed minor bugs -! in gsm and gm gather. -! 13Dec01 - E.T. Ong - GSM_scatter, allow users -! to scatter with a haloed GSMap. Fixed some bugs in -! GM_scatter. -! 19Dec01 - E.T. Ong - allow bcast of an AttrVect -! with only an integer or real attribute. -! 27Mar02 - J.W. Larson - Corrected usage of -! m_die routines throughout this module. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_AttrVectComms' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: send_ - Point-to-point Send of an AttrVect -! -! !DESCRIPTION: This routine takes an input {\tt AttrVect} argument -! {\tt inAV} and sends it to processor {\tt dest} on the communicator -! associated with the Fortran {\tt INTEGER} MPI communicator handle -! {\tt comm}. The overalll message is tagged by the input {\tt INTEGER} -! argument {\tt TagBase}. The success (failure) of this operation is -! reported in the zero (nonzero) optional output argument {\tt status}. -! -! {\bf N.B.}: One must avoid assigning elsewhere the MPI tag values -! between {\tt TagBase} and {\tt TagBase+7}, inclusive. This is -! because {\tt send\_()} performs the send of the {\tt AttrVect} as -! a series of eight send operations. -! -! !INTERFACE: - - subroutine send_(inAV, dest, TagBase, comm, status) -! -! !USES: -! - use m_stdio - use m_mpif90 - use m_die - - use m_List, only : List - use m_List, only : List_allocated => allocated - use m_List, only : List_nitem => nitem - use m_List, only : List_send => send - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: inAV - integer, intent(in) :: dest - integer, intent(in) :: TagBase - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 7Jun01 - J.W. Larson - initial version. -! 13Jun01 - J.W. Larson - Initialize status -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::send_' - - logical :: ListAssoc(2) - integer :: ierr - integer :: AVlength - - ! Initialize status (if present) - - if(present(status)) status = 0 - - - ! Step 1. Are inAV%iList and inAV%rList filled? Store - ! the answers in the LOGICAL array ListAssoc and send. - - ListAssoc(1) = List_allocated(inAV%iList) - ListAssoc(2) = List_allocated(inAV%rList) - - if(.NOT. (ListAssoc(1).or.ListAssoc(2)) ) then - call die(myname_,"inAV has not been initialized") - endif - - call MPI_SEND(ListAssoc, 2, MP_LOGICAL, dest, TagBase, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: MPI_SEND(ListAssoc...',ierr) - endif - - - ! Step 2. Send non-blank inAV%iList and inAV%rList. - - if(ListAssoc(1)) then - call List_send(inAV%iList, dest, TagBase+1, comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_send(inAV%iList...' - status = ierr - return - else - call die(myname_,':: call List_send(inAV%iList...',ierr) - endif - endif - endif - - if(ListAssoc(2)) then - call List_send(inAV%rList, dest, TagBase+3, comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_send(inAV%rList...' - status = ierr - return - else - call die(myname_,':: call List_send(inAV%rList...',ierr) - endif - endif - endif - - ! Step 3. Determine and send the lengths of inAV%iAttr(:,:) - ! and inAV%rAttr(:,:). - - AVlength = AttrVect_lsize(inAV) - - if(AVlength<=0) then - call die(myname_,"Size of inAV <= 0",AVLength) - endif - - call MPI_SEND(AVlength, 1, MP_type(AVlength), dest, TagBase+5, & - comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_SEND(AVlength...',ierr) - endif - - ! Step 4. If AVlength > 0, we may have INTEGER and REAL - ! data to send. Send as needed. - - if(AVlength > 0) then - - if(ListAssoc(1)) then - - ! Send the INTEGER data stored in inAV%iAttr(:,:) - - call MPI_SEND(inAV%iAttr(1,1), AVlength*List_nitem(inAV%iList), & - MP_type(inAV%iAttr(1,1)), dest, TagBase+6, & - comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_SEND(inAV%iAttr...',ierr) - endif - - endif ! if(associated(inAV%rList)) - - if(ListAssoc(2)) then - - ! Send the REAL data stored in inAV%rAttr(:,:) - - call MPI_SEND(inAV%rAttr(1,1), AVlength*List_nitem(inAV%rList), & - MP_type(inAV%rAttr(1,1)), dest, TagBase+7, & - comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_SEND(inAV%rAttr...',ierr) - endif - - endif ! if(associated(inAV%rList)) - - endif ! if (AVlength > 0) - - end subroutine send_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: recv_ - Point-to-point Receive of an AttrVect -! -! !DESCRIPTION: This routine receives the output {\tt AttrVect} argument -! {\tt outAV} from processor {\tt source} on the communicator associated -! with the Fortran {\tt INTEGER} MPI communicator handle {\tt comm}. The -! overall message is tagged by the input {\tt INTEGER} argument -! {\tt TagBase}. The success (failure) of this operation is reported in -! the zero (nonzero) optional output argument {\tt status}. -! -! {\bf N.B.}: One must avoid assigning elsewhere the MPI tag values -! between {\tt TagBase} and {\tt TagBase+7}, inclusive. This is -! because {\tt recv\_()} performs the receive of the {\tt AttrVect} as -! a series of eight receive operations. -! -! !INTERFACE: - - subroutine recv_(outAV, dest, TagBase, comm, status) -! -! !USES: -! - use m_stdio - use m_mpif90 - use m_die - - use m_List, only : List - use m_List, only : List_nitem => nitem - use m_List, only : List_recv => recv - - use m_AttrVect, only : AttrVect - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: dest - integer, intent(in) :: TagBase - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(out) :: outAV - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 7Jun01 - J.W. Larson - initial working version. -! 13Jun01 - J.W. Larson - Initialize status -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::recv_' - - logical :: ListAssoc(2) - integer :: ierr - integer :: AVlength - integer :: MPstatus(MP_STATUS_SIZE) - - ! Initialize status (if present) - - if(present(status)) status = 0 - - - ! Step 1. Are outAV%iList and outAV%rList filled? TRUE - ! entries in the LOGICAL array ListAssoc(:) correspond - ! to Non-blank Lists...that is: - ! - ! ListAssoc(1) = .TRUE. <==> associated(outAV%iList%bf) - ! ListAssoc(2) = .TRUE. <==> associated(outAV%rList%bf) - - call MPI_RECV(ListAssoc, 2, MP_LOGICAL, dest, TagBase, comm, & - MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: MPI_RECV(ListAssoc...',ierr) - endif - - - ! Step 2. Receive non-blank outAV%iList and outAV%rList. - - if(ListAssoc(1)) then - call List_recv(outAV%iList, dest, TagBase+1, comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_recv(outAV%iList...' - status = ierr - return - else - call die(myname_,':: call List_recv(outAV%iList...',ierr) - endif - endif - endif - - if(ListAssoc(2)) then - call List_recv(outAV%rList, dest, TagBase+3, comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_recv(outAV%rList...' - status = ierr - return - else - call die(myname_,':: call List_recv(outAV%rList...',ierr) - endif - endif - endif - - ! Step 3. Receive the lengths of outAV%iAttr(:,:) and outAV%rAttr(:,:). - - call MPI_RECV(AVlength, 1, MP_type(AVlength), dest, TagBase+5, & - comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_RECV(AVlength...',ierr) - endif - - ! Step 4. If AVlength > 0, we may have to receive INTEGER - ! and/or REAL data. Receive as needed. - - if(AVlength > 0) then - - if(ListAssoc(1)) then - - ! Allocate outAV%iAttr(:,:) - - allocate(outAV%iAttr(List_nitem(outAV%iList),AVlength), stat=ierr) - if(ierr/=0) call die(myname_,"allocate(outAV%iAttr)",ierr) - - ! Receive the INTEGER data to outAV%iAttr(:,:) - - call MPI_RECV(outAV%iAttr(1,1), AVlength*List_nitem(outAV%iList), & - MP_type(outAV%iAttr(1,1)), dest, TagBase+6, & - comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_RECV(outAV%iAttr...',ierr) - endif - - endif ! if(associated(outAV%rList)) - - if(ListAssoc(2)) then - - ! Allocate outAV%rAttr(:,:) - - allocate(outAV%rAttr(List_nitem(outAV%rList),AVlength), stat=ierr) - if(ierr/=0) call die(myname_,"allocate(outAV%rAttr)",ierr) - - ! Receive the REAL data to outAV%rAttr(:,:) - - call MPI_RECV(outAV%rAttr(1,1), AVlength*List_nitem(outAV%rList), & - MP_type(outAV%rAttr(1,1)), dest, TagBase+7, & - comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_RECV(outAV%rAttr...',ierr) - endif - - endif ! if(associated(outAV%rList)) - - endif ! if (AVlength > 0) - - end subroutine recv_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GM_gather_ - Gather an AttrVect Distributed by a GlobalMap -! -! !DESCRIPTION: -! This routine gathers a {\em distributed} {\tt AttrVect} {\tt iV} to -! the {\tt root} process, and returns it in the output {\tt AttrVect} -! argument {\tt oV}. The decomposition of {\tt iV} is described by -! the input {\tt GlobalMap} argument {\tt GMap}. The input {\tt INTEGER} -! argument {\tt comm} is the Fortran integer MPI communicator handle. -! The success (failure) of this operation corresponds to a zero (nonzero) -! value of the optional output {\tt INTEGER} argument {\tt stat}. -! -! !INTERFACE: - - subroutine GM_gather_(iV, oV, GMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : FP - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_lsize => lsize - use m_GlobalMap, only : GlobalMap_gsize => gsize - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_clean => clean - use m_FcComms, only : fc_gatherv_int, fc_gatherv_fp - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: iV - type(GlobalMap), intent(in) :: GMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(out) :: oV - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Apr98 - Jing Guo - initial prototype/prolog/code -! 27Oct00 - J.W. Larson - relocated from -! m_AttrVect -! 15Jan01 - J.W. Larson - renamed GM_gather_ -! 9May01 - J.W. Larson - tidied up prologue -! 18May01 - R.L. Jacob - use MP_Type function -! to determine type for mpi_gatherv -! 31Jan09 - P.H. Worley - replaced call to -! MPI_gatherv with call to flow controlled gather routines -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GM_gather_' - integer :: nIA,nRA,niV,noV,ier - integer :: myID - integer :: mp_type_Av - type(AttrVect) :: nonRootAV - - if(present(stat)) stat=0 - - call MP_comm_rank(comm, myID, ier) - if(ier /= 0) then - call MP_perr_die(myname_,':: call MP_COMM_RANK()',ier) - endif - - ! Verify the input: a _scatterd_ vector - - niV=GlobalMap_lsize(GMap) - noV=AttrVect_lsize(iV) - - if(niV /= noV) then - write(stderr,'(2a,i4,a,i4,a,i4)') myname_, & - ': invalid input, lsize(GMap) =',niV, & - ', lsize(iV) =',noV, 'myID =', myID - if(.not.present(stat)) call die(myname_) - stat=-1 - return - endif - - noV=GlobalMap_gsize(GMap) ! the gathered local size, as for the output - - if(myID == root) then - call AttrVect_init(oV,iV,noV) - call AttrVect_zero(oV) - else - call AttrVect_init(nonRootAV,iV,1) - call AttrVect_zero(nonRootAV) - endif - - niV=GlobalMap_lsize(GMap) ! the scattered local size, as for the input - - nIA=AttrVect_nIAttr(iV) ! number of INTEGER attributes - nRA=AttrVect_nRAttr(iV) ! number of REAL attributes - - mp_type_Av = MP_Type(1._FP) ! set mpi type to same as AV%rAttr - - if(nIA > 0) then - - if(myID == root) then - - call fc_gatherv_int(iV%iAttr,niV*nIA,MP_INTEGER, & - oV%iAttr,GMap%counts*nIA,GMap%displs*nIA, & - MP_INTEGER,root,comm) - - else - - call fc_gatherv_int(iV%iAttr,niV*nIA,MP_INTEGER, & - nonRootAV%iAttr,GMap%counts*nIA,GMap%displs*nIA, & - MP_INTEGER,root,comm) - - endif ! if(myID == root) - - endif ! if(nIA > 0) - - if(nRA > 0) then - - if(myID == root) then - - call fc_gatherv_fp(iV%rAttr,niV*nRA,mp_type_Av, & - oV%rAttr,GMap%counts*nRA,GMap%displs*nRA, & - mp_type_Av,root,comm) - - else - - call fc_gatherv_fp(iV%rAttr,niV*nRA,mp_type_Av, & - nonRootAV%rAttr,GMap%counts*nRA,GMap%displs*nRA, & - mp_type_Av,root,comm) - - endif ! if(myID == root) - - endif ! if(nRA > 0) - - - - if(myID /= root) then - call AttrVect_clean(nonRootAV,ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ':: AttrVect_clean(nonRootAV) failed for non-root & - &process: myID = ', myID - call die(myname_,':: AttrVect_clean failed & - &for nonRootAV off of root',ier) - endif - endif - - end subroutine GM_gather_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GSM_gather_ - Gather an AttrVect Distributed by a GlobalSegMap -! -! !DESCRIPTION: -! The routine {\tt GSM\_gather\_()} takes a distributed input -! {\tt AttrVect} argument {\tt iV}, whose decomposition is described -! by the input {\tt GlobalSegMap} argument {\tt GSMap}, and gathers -! it to the output {\tt AttrVect} argument {\tt oV}. The gathered -! {\tt AttrVect} {\tt oV} is valid only on the root process specified -! by the input argument {\tt root}. The communicator used to gather -! the data is specified by the argument {\tt comm}. The success (failure) -! is reported in the zero (non-zero) value of the output argument -! {\tt stat}. -! -! {\tt GSM\_gather\_()} converts the problem of gathering data -! according to a {\tt GlobalSegMap} into the simpler problem of -! gathering data as specified by a {\tt GlobalMap}. The {\tt GlobalMap} -! variable {\tt GMap} is created based on the local storage requirements -! for each distributed piece of {\tt iV}. On the root, a complete -! (including halo points) gathered copy of {\tt iV} is collected into -! the temporary {\tt AttrVect} variable {\tt workV} (the length of -! {\tt workV} is the larger of {\tt GlobalSegMap\_GlobalStorage(GSMap)} or -! {\tt GlobalSegMap\_GlobalSize(GSMap)}). The -! variable {\tt workV} is segmented by process, and segments are -! copied into it by process, but ordered in the same order the segments -! appear in {\tt GSMap}. Once {\tt workV} is loaded, the data are -! copied segment-by-segment to their appropriate locations in the output -! {\tt AttrVect} {\tt oV}. -! -! !INTERFACE: - - subroutine GSM_gather_(iV, oV, GSMap, root, comm, stat, rdefault, idefault) -! -! !USES: -! -! Message-passing environment utilities (mpeu) modules: - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only: FP -! GlobalSegMap and associated services: - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_comp_id => comp_id - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - use m_GlobalSegMap, only : GlobalSegMap_lsize => lsize - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - use m_GlobalSegMap, only : GlobalSegMap_haloed => haloed - use m_GlobalSegMap, only : GlobalSegMap_GlobalStorage => GlobalStorage -! AttrVect and associated services: - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_clean => clean -! GlobalMap and associated services: - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_init => init - use m_GlobalMap, only : GlobalMap_clean => clean - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: iV - type(GlobalSegMap), intent(in) :: GSMap - integer, intent(in) :: root - integer, intent(in) :: comm - real(FP), optional, intent(in) :: rdefault - integer, optional, intent(in) :: idefault - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(out) :: oV - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Jan01 - J.W. Larson - API specification. -! 25Feb01 - J.W. Larson - Prototype code. -! 26Apr01 - R.L. Jacob - add use statement for -! AttVect_clean -! 9May01 - J.W. Larson - tidied up prologue -! 13Jun01 - J.W. Larson - Initialize stat -! (if present). -! 20Aug01 - E.T. Ong - Added error checking for -! matching processors in gsmap and comm. Corrected -! current_pos assignment. -! 23Nov01 - R. Jacob - zero the oV before copying in -! gathered data. -! 27Jul07 - R. Loy - add Tony's suggested improvement -! for a default value in the output AV -! 11Aug08 - R. Jacob - add Pat Worley's faster way -! to initialize lns -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GSM_gather_' - -! Temporary workspace AttrVect: - type(AttrVect) :: workV -! Component ID and number of segments for GSMap: - integer :: comp_id, ngseg, iseg -! Total length of GSMap segments laid end-to-end: - integer :: global_storage -! Error Flag - integer :: ierr -! Number of processes on communicator, and local rank: - integer :: NumProcs, myID -! Total local storage on each pe according to GSMap: - integer, dimension(:), allocatable :: lns -! Temporary GlobalMap used to scatter the segmented (by pe) data - type(GlobalMap) :: workGMap -! Loop counters and temporary indices: - integer :: m, n, ilb, iub, olb, oub, pe -! workV segment tracking index array: - integer, dimension(:), allocatable :: current_pos -! workV sizes - integer :: gssize, gstorage - - ! Initialize stat (if present) - - if(present(stat)) stat = 0 - - ! Initial Check: If GSMap contains halo points, die - - if(GlobalSegMap_haloed(GSMap)) then - ierr = 1 - call die(myname_,"Input GlobalSegMap haloed--not allowed",ierr) - endif - - ! Which process am I? - - call MPI_COMM_RANK(comm, myID, ierr) - - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_COMM_RANK()',ierr) - endif - ! How many processes are there on this communicator? - - call MPI_COMM_SIZE(comm, NumProcs, ierr) - - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_COMM_SIZE()',ierr) - endif - - ! Processor Check: Do the processors on GSMap match those in comm? - - if(MAXVAL(GSMap%pe_loc) > (NumProcs-1)) then - stat=2 - write(stderr,*) myname_, & - ":: Procs in GSMap%pe_loc do not match procs in communicator ", & - NumProcs-1, MAXVAL(GSMap%pe_loc) - call die(myname_, & - "Procs in GSMap%pe_loc do not match procs in communicator",stat) - endif - - if(myID == root) then - - ! Allocate a precursor to a GlobalMap accordingly... - - allocate(lns(0:NumProcs-1), stat=ierr) - - ! And Load it... - - lns(:)=0 - do iseg=1,GSMap%ngseg - n = GSMap%pe_loc(iseg) - lns(n) = lns(n) + GSMap%length(iseg) - end do - - else - - allocate(lns(0)) ! This conforms to F90 standard for shaped arguments. - - endif ! if(myID == root) - - ! Determine the component id of GSMap: - - comp_id = GlobalSegMap_comp_id(GSMap) - - ! Create working GlobalMap workGMap (used for the gather): - - call GlobalMap_init(workGMap, comp_id, lns, root, comm) - - ! Gather the Data process-by-process to workV... - ! do not include stat argument; bypass an argument check in gm_gather. - - call GM_gather_(iV, workV, workGMap, root, comm, stat) - - ! On the root, initialize oV, and load the contents of - !workV into it... - - if(myID == root) then - -! bug fix: gstorage will be bigger than gssize if GSmap is -! haloed. But gstorage may be smaller than gsize if GSmap -! is masked. So take the maximum. RLJ - gstorage = GlobalSegMap_GlobalStorage(GSMap) - gssize = GlobalSegMap_gsize(GSMap) - global_storage = MAX(gstorage,gssize) - - call AttrVect_init(oV,iV,global_storage) - call AttrVect_zero(oV) - - if (present(rdefault)) then - if (AttrVect_nRAttr(oV) > 0) oV%rAttr=rdefault - endif - if (present(idefault)) then - if (AttrVect_nIAttr(oV) > 0) oV%iAttr=idefault - endif - - ! On the root, allocate current position index for - ! each process chunk: - - allocate(current_pos(0:NumProcs-1), stat=ierr) - - if(ierr /= 0) then - write(stderr,*) myname_,':: allocate(current_pos(..) failed,', & - 'stat = ',ierr - if(present(stat)) then - stat=ierr - else - call die(myname_,'allocate(current_pos(..) failed.' ) - endif - endif - - ! Initialize current_pos(:) using GMap%displs(:) - - do n=0,NumProcs-1 - current_pos(n) = workGMap%displs(n) + 1 - end do - - ! Load each segment of iV into its appropriate segment - ! of workV: - - ngseg = GlobalSegMap_ngseg(GSMap) - - do n=1,ngseg - - ! Determine which process owns segment n: - - pe = GSMap%pe_loc(n) - - ! Input map (lower/upper indicess) of segment of iV: - - ilb = current_pos(pe) - iub = current_pos(pe) + GSMap%length(n) - 1 - - ! Output map of (lower/upper indicess) segment of workV: - - olb = GSMap%start(n) - oub = GSMap%start(n) + GSMap%length(n) - 1 - - ! Increment current_pos(n) for next time: - - current_pos(pe) = current_pos(pe) + GSMap%length(n) - - ! Now we are equipped to do the copy: - - do m=1,AttrVect_nIAttr(iV) - oV%iAttr(m,olb:oub) = workV%iAttr(m,ilb:iub) - end do - - do m=1,AttrVect_nRAttr(iV) - oV%rAttr(m,olb:oub) = workV%rAttr(m,ilb:iub) - end do - - end do ! do n=1,ngseg - - ! Clean up current_pos, which was only allocated on the root - - deallocate(current_pos, stat=ierr) - if(ierr /= 0) then - write(stderr,*) myname_,'error in deallocate(current_pos), stat=',ierr - if(present(stat)) then - stat=ierr - else - call die(myname_) - endif - endif - endif ! if(myID == root) - - ! At this point, we are finished. The data have been gathered - ! to oV - - ! Finally, clean up allocated structures: - - if(myID == root) call AttrVect_clean(workV) - call GlobalMap_clean(workGMap) - - deallocate(lns, stat=ierr) - - if(ierr /= 0) then - write(stderr,*) myname_,'error in deallocate(lns), stat=',ierr - if(present(stat)) then - stat=ierr - else - call die(myname_) - endif - endif - - end subroutine GSM_gather_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GM_scatter_ - Scatter an AttrVect Using a GlobalMap -! -! !DESCRIPTION: -! The routine {\tt GM\_scatter\_} takes an input {\tt AttrVect} type -! {\tt iV} (valid only on the root), and scatters it to a distributed -! {\tt AttrVect} {\tt oV}. The input {\tt GlobalMap} argument -! {\tt GMap} dictates how {\tt iV} is scattered to {\tt oV}. The -! success (failure) of this routine is reported in the zero (non-zero) -! value of the output argument {\tt stat}. -! -! {\bf N.B.}: The output {\tt AttrVect} argument {\tt oV} represents -! dynamically allocated memory. When it is no longer needed, it should -! be deallocated by invoking {\tt AttrVect\_clean()} (see the module -! {\tt m\_AttrVect} for more details). -! -! !INTERFACE: - - subroutine GM_scatter_(iV, oV, GMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : FP - - use m_List, only : List - use m_List, only : List_copy => copy - use m_List, only : List_bcast => bcast - use m_List, only : List_clean => clean - use m_List, only : List_nullify => nullify - use m_List, only : List_nitem => nitem - - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_lsize => lsize - use m_GlobalMap, only : GlobalMap_gsize => gsize - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_clean => clean - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: iV - type(GlobalMap), intent(in) :: GMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(out) :: oV - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 21Apr98 - Jing Guo - initial prototype/prolog/code -! 27Oct00 - J.W. Larson - relocated from -! m_AttrVect -! 15Jan01 - J.W. Larson - renamed GM_scatter_ -! 8Feb01 - J.W. Larson - add logic to prevent -! empty calls (i.e. no data in buffer) to MPI_SCATTERV() -! 27Apr01 - R.L. Jacob - small bug fix to -! integer attribute scatter -! 9May01 - J.W. Larson - Re-vamped comms model -! to reflect MPI comms model for the scatter. Tidied up -! the prologue, too. -! 18May01 - R.L. Jacob - use MP_Type function -! to determine type for mpi_scatterv -! 8Aug01 - E.T. Ong - replace list assignment(=) -! with list copy to avoid compiler errors in pgf90. -! 13Dec01 - E.T. Ong - allow scatter with an -! AttrVect containing only an iList or rList. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GM_scatter_' - integer :: nIA,nRA,niV,noV,ier - integer :: myID - integer :: mp_type_Av - type(List) :: iList, rList - type(AttrVect) :: nonRootAV - - if(present(stat)) stat=0 - - call MP_comm_rank(comm,myID,ier) - if(ier /= 0) then - call MP_perr_die(myname_,'MP_comm_rank()',ier) - endif - - ! Verify the input: a _gathered_ vector - - if(myID == root) then - - niV = GlobalMap_gsize(GMap) ! the _gathered_ local size - noV = AttrVect_lsize(iV) ! the length of the input AttrVect iV - - if(niV /= noV) then - write(stderr,'(2a,i5,a,i8,a,i8)') myname_, & - ': myID = ',myID,'. Invalid input on root, gsize(GMap) =',& - niV,', lsize(iV) =',noV - if(present(stat)) then - stat=-1 - else - call die(myname_) - endif - endif - - endif - - ! On the root, read the integer and real attribute - ! lists off of iV. - - call List_nullify(iList) - call List_nullify(rList) - - if(myID == root) then - - ! Count the number of real and integer attributes - - nIA = AttrVect_nIAttr(iV) ! number of INTEGER attributes - nRA = AttrVect_nRAttr(iV) ! number of REAL attributes - - if(nIA > 0) then - call List_copy(iList,iV%iList) - endif - - if(nRA > 0) then - call List_copy(rList,iV%rList) - endif - - endif - - ! From the root, broadcast iList and rList - - call MPI_BCAST(nIA,1,MP_INTEGER,root,comm,ier) - if(ier /= 0) call MP_perr(myname_,'MPI_BCAST(nIA)',ier) - - call MPI_BCAST(nRA,1,MP_INTEGER,root,comm,ier) - if(ier /= 0) call MP_perr(myname_,'MPI_BCAST(nRA)',ier) - - if(nIA>0) call List_bcast(iList, root, comm) - if(nRA>0) call List_bcast(rList, root, comm) - - noV = GlobalMap_lsize(GMap) ! the _scatterd_ local size - - ! On all processes, use List data and noV to initialize oV - - call AttrVect_init(oV, iList, rList, noV) - call AttrVect_zero(oV) - - ! Initialize a dummy AttrVect for non-root MPI calls - - if(myID/=root) then - call AttrVect_init(nonRootAV,oV,1) - call AttrVect_zero(nonRootAV) - endif - - - if(nIA > 0) then - - if(myID == root) then - - call MPI_scatterv(iV%iAttr,GMap%counts*nIA, & - GMap%displs*nIA,MP_INTEGER,oV%iAttr, & - noV*nIA,MP_INTEGER,root,comm,ier ) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_scatterv(iAttr) on root',ier) - endif - - else - - call MPI_scatterv(nonRootAV%iAttr,GMap%counts*nIA, & - GMap%displs*nIA,MP_INTEGER,oV%iAttr, & - noV*nIA,MP_INTEGER,root,comm,ier ) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_scatterv(iAttr) off root',ier) - endif - - endif ! if(myID == root) - - call List_clean(iList) - - endif ! if(nIA > 0) - - mp_type_Av = MP_Type(1._FP) ! set mpi type to same as AV%rAttr - - if(nRA > 0) then - - if(myID == root) then - - - call MPI_scatterv(iV%rAttr,GMap%counts*nRA, & - GMap%displs*nRA,mp_type_Av,oV%rAttr, & - noV*nRA,mp_type_Av,root,comm,ier ) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_scatterv(rAttr) on root',ier) - endif - - else - - - call MPI_scatterv(nonRootAV%rAttr,GMap%counts*nRA, & - GMap%displs*nRA,mp_type_Av,oV%rAttr, & - noV*nRA,mp_type_Av,root,comm,ier ) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_scatterv(rAttr) off root',ier) - endif - - endif - - call List_clean(rList) - - endif - - if(myID /= root) then - call AttrVect_clean(nonRootAV,ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ':: AttrVect_clean(nonRootAV) failed for non-root & - &process: myID = ', myID - call die(myname_,':: AttrVect_clean failed & - &for nonRootAV off of root',ier) - endif - endif - - end subroutine GM_scatter_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GSM_scatter_ - Scatter an AttrVect using a GlobalSegMap -! -! !DESCRIPTION: -! The routine {\tt GSM\_scatter\_} takes an input {\tt AttrVect} type -! {\tt iV} (valid only on the root), and scatters it to a distributed -! {\tt AttrVect} {\tt oV}. The input {\tt GlobalSegMap} argument -! {\tt GSMap} dictates how {\tt iV} is scattered to {\tt oV}. The -! success (failure) of this routine is reported in the zero (non-zero) -! value of the output argument {\tt stat}. -! -! {\tt GSM\_scatter\_()} converts the problem of scattering data -! according to a {\tt GlobalSegMap} into the simpler problem of -! scattering data as specified by a {\tt GlobalMap}. The {\tt GlobalMap} -! variable {\tt GMap} is created based on the local storage requirements -! for each distributed piece of {\tt iV}. On the root, a complete -! (including halo points) copy of {\tt iV} is stored in -! the temporary {\tt AttrVect} variable {\tt workV} (the length of -! {\tt workV} is {\tt GlobalSegMap\_GlobalStorage(GSMap)}). The -! variable {\tt workV} is segmented by process, and segments are -! copied into it by process, but ordered in the same order the segments -! appear in {\tt GSMap}. Once {\tt workV} is loaded, the data are -! scattered to the output {\tt AttrVect} {\tt oV} by a call to the -! routine {\tt GM\_scatter\_()} defined in this module, with {\tt workV} -! and {\tt GMap} as the input arguments. -! -! {\bf N.B.:} This algorithm assumes that memory access times are much -! shorter than message-passing transmission times. -! -! {\bf N.B.}: The output {\tt AttrVect} argument {\tt oV} represents -! dynamically allocated memory. When it is no longer needed, it should -! be deallocated by invoking {\tt AttrVect\_clean()} (see the module -! {\tt m\_AttrVect} for more details). -! -! !INTERFACE: - - subroutine GSM_scatter_(iV, oV, GSMap, root, comm, stat) -! -! !USES: -! -! Environment utilities from mpeu: - - use m_stdio - use m_die - use m_mpif90 - - use m_List, only : List_nullify => nullify - -! GlobalSegMap and associated services: - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_comp_id => comp_id - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - use m_GlobalSegMap, only : GlobalSegMap_lsize => lsize - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - use m_GlobalSegMap, only : GlobalSegMap_GlobalStorage => GlobalStorage -! AttrVect and associated services: - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_clean => clean -! GlobalMap and associated services: - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_init => init - use m_GlobalMap, only : GlobalMap_clean => clean - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(in) :: iV - type(GlobalSegMap), intent(in) :: GSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(out) :: oV - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Jan01 - J.W. Larson - API specification. -! 8Feb01 - J.W. Larson - Initial code. -! 25Feb01 - J.W. Larson - Bug fix--replaced -! call to GlobalSegMap_lsize with call to the new fcn. -! GlobalSegMap_ProcessStorage(). -! 26Apr01 - R.L. Jacob - add use statement for -! AttVect_clean -! 26Apr01 - J.W. Larson - bug fixes--data -! misalignment in use of the GlobalMap to compute the -! memory map into workV, and initialization of workV -! on all processes. -! 9May01 - J.W. Larson - tidied up prologue -! 15May01 - Larson / Jacob - stopped initializing -! workV on off-root processes (no longer necessary). -! 13Jun01 - J.W. Larson - Initialize stat -! (if present). -! 20Jun01 - J.W. Larson - Fixed a subtle bug -! appearing on AIX regarding the fact workV is uninitial- -! ized on non-root processes. This is fixed by nullifying -! all the pointers in workV for non-root processes. -! 20Aug01 - E.T. Ong - Added argument check -! for matching processors in gsmap and comm. -! 13Dec01 - E.T. Ong - got rid of restriction -! GlobalStorage(GSMap)==AttrVect_lsize(AV) to allow for -! GSMap to be haloed. -! 11Aug08 - R. Jacob - remove call to ProcessStorage -! and replace with faster algorithm provided by Pat Worley -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GSM_scatter_' - -! Temporary workspace AttrVect: - type(AttrVect) :: workV -! Component ID and number of segments for GSMap: - integer :: comp_id, ngseg, iseg -! Total length of GSMap segments laid end-to-end: - integer :: global_storage -! Error Flag - integer :: ierr -! Number of processes on communicator, and local rank: - integer :: NumProcs, myID -! Total local storage on each pe according to GSMap: - integer, dimension(:), allocatable :: lns -! Temporary GlobalMap used to scatter the segmented (by pe) data - type(GlobalMap) :: GMap -! Loop counters and temporary indices: - integer :: m, n, ilb, iub, olb, oub, pe -! workV segment tracking index array: - integer, dimension(:), allocatable :: current_pos - - ! Initialize stat (if present) - - if(present(stat)) stat = 0 - - ! Which process am I? - - call MPI_COMM_RANK(comm, myID, ierr) - - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_RANK',ierr) - endif - - if(myID == root) then - - if(GSMap%gsize > AttrVect_lsize(iV)) then - write(stderr,'(2a,i5,a,i8,a,i8)') myname_, & - ': myID = ',myID,'. Invalid input, GSMap%gsize =',& - GSMap%gsize, ', lsize(iV) =',AttrVect_lsize(iV) - if(present(stat)) then - stat=-1 - else - call die(myname_) - endif - endif - - endif - - ! On the root, initialize a work AttrVect type of the - ! above length, and with the same attribute lists as iV. - ! on other processes, initialize workV only with the - ! attribute information, but no storage. - - if(myID == root) then - - global_storage = GlobalSegMap_GlobalStorage(GSMap) - call AttrVect_init(workV, iV, global_storage) - call AttrVect_zero(workV) - - else - ! nullify workV just to be safe - - call List_nullify(workV%iList) - call List_nullify(workV%rList) - nullify(workV%iAttr) - nullify(workV%rAttr) - - endif - - ! Return to processing on the root to load workV: - - ! How many processes are there on this communicator? - - call MPI_COMM_SIZE(comm, NumProcs, ierr) - - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_SIZE',ierr) - endif - - ! Processor Check: Do the processors on GSMap match those in comm? - - if(MAXVAL(GSMap%pe_loc) > (NumProcs-1)) then - write(stderr,*) myname_, & - ":: Procs in GSMap%pe_loc do not match procs in communicator ", & - NumProcs-1, MAXVAL(GSMap%pe_loc) - if(present(stat)) then - stat=1 - return - else - call die(myname_) - endif - endif - - if(myID == root) then - - ! Allocate a precursor to a GlobalMap accordingly... - - allocate(lns(0:NumProcs-1), stat=ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: allocate(lns...) failed, stat=',ierr - if(present(stat)) then - stat=ierr - else - call die(myname_,'allocate(lns)',ierr) - endif - endif - - ! And Load it... - - lns(:)=0 - do iseg=1,GSMap%ngseg - n = GSMap%pe_loc(iseg) - lns(n) = lns(n) + GSMap%length(iseg) - end do - - endif ! if(myID == root) - - ! Non-root processes call GlobalMap_init with lns, - ! although this argument is not used in the - ! subroutine. Since it correspond to a dummy shaped array arguments - ! in GlobslMap_init, the Fortran 90 standard dictates that the actual - ! argument must contain complete shape information. Therefore, - ! the array argument must be allocated on all processes. - - if(myID /= root) then - - allocate(lns(1),stat=ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: allocate(lns...) failed, stat=',ierr - if(present(stat)) then - stat=ierr - return - else - call die(myname_,'allocate(lns(1))',ierr) - endif - endif - - endif ! if(myID /= root)... - - ! Create a GlobalMap describing the 1-D decomposition - ! of workV: - - comp_id = GlobalSegMap_comp_id(GSMap) - - call GlobalMap_init(GMap, comp_id, lns, root, comm) - - ! On the root, load workV: - - if(myID == root) then - - ! On the root, allocate current position index for - ! each process chunk: - - allocate(current_pos(0:NumProcs-1), stat=ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: allocate(current_pos..) failed, stat=', & - ierr - if(present(stat)) then - stat=ierr - return - else - call die(myname_,'allocate(current_pos)',ierr) - endif - endif - - ! Initialize current_pos(:) using GMap%displs(:) - - do n=0,NumProcs-1 - current_pos(n) = GMap%displs(n) + 1 - end do - - ! Load each segment of iV into its appropriate segment - ! of workV: - - ngseg = GlobalSegMap_ngseg(GSMap) - - do n=1,ngseg - - ! Determine which process owns segment n: - - pe = GSMap%pe_loc(n) - - ! Input map (lower/upper indicess) of segment of iV: - - ilb = GSMap%start(n) - iub = GSMap%start(n) + GSMap%length(n) - 1 - - ! Output map of (lower/upper indicess) segment of workV: - - olb = current_pos(pe) - oub = current_pos(pe) + GSMap%length(n) - 1 - - ! Increment current_pos(n) for next time: - - current_pos(pe) = current_pos(pe) + GSMap%length(n) - - ! Now we are equipped to do the copy: - - do m=1,AttrVect_nIAttr(iV) - workV%iAttr(m,olb:oub) = iV%iAttr(m,ilb:iub) - end do - - do m=1,AttrVect_nRAttr(iV) - workV%rAttr(m,olb:oub) = iV%rAttr(m,ilb:iub) - end do - - end do ! do n=1,ngseg - - ! Clean up current_pos, which was only allocated on the root - - deallocate(current_pos, stat=ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: deallocate(current_pos) failed. ', & - 'stat = ',ierr - if(present(stat)) then - stat=ierr - return - else - call die(myname_,'deallocate(current_pos)',ierr) - endif - endif - - endif ! if(myID == root) - - ! Now we are in business...we have: 1) an AttrVect laid out - ! in contiguous segments, each segment corresponding to a - ! process, and in the same order dictated by GSMap; - ! 2) a GlobalMap telling us which segment of workV goes to - ! which process. Thus, we can us GM_scatter_() to achieve - ! our goal. - - call GM_scatter_(workV, oV, GMap, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname,':: ERROR in return from GM_scatter_(), ierr=',& - ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_,'ERROR returning from GM_scatter_()',ierr) - endif - endif - - ! Finally, clean up allocated structures: - - if(myID == root) then - call AttrVect_clean(workV) - endif - - call GlobalMap_clean(GMap) - - deallocate(lns, stat=ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: ERROR in deallocate(lns), ierr=',ierr - if(present(stat)) then - stat=ierr - return - else - call die(myname_,'deallocate(lns)',ierr) - endif - endif - - end subroutine GSM_scatter_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bcast_ - Broadcast an AttrVect -! -! !DESCRIPTION: This routine takes an {\tt AttrVect} argument {\tt aV} -! (at input, valid on the root only), and broadcasts it to all the -! processes associated with the communicator handle {\tt comm}. The -! success (failure) of this routine is reported in the zero (non-zero) -! value of the output argument {\tt stat}. -! -! {\bf N.B.}: The output (on non-root processes) {\tt AttrVect} argument -! {\tt aV} represents dynamically allocated memory. When it is no longer -! needed, it should be deallocated by invoking {\tt AttrVect\_clean()} -! (see the module {\tt m\_AttrVect} for details). -! -! !INTERFACE: - - subroutine bcast_(aV, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - use m_String, only : String,bcast,char,String_clean - use m_String, only : String_bcast => bcast - use m_List, only : List_get => get - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(inout) :: aV ! (IN) on the root, - ! (OUT) elsewhere - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 27Apr98 - Jing Guo - initial prototype/prologue/code -! 27Oct00 - J.W. Larson - relocated from -! m_AttrVect -! 9May01 - J.W. Larson - tidied up prologue -! 18May01 - R.L. Jacob - use MP_Type function -! to determine type for bcast -! 19Dec01 - E.T. Ong - adjusted for case of AV with -! only integer or real attribute -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bcast_' - type(String) :: iLStr,rLStr - integer :: nIA, nRA, lsize - integer :: myID - integer :: ier - integer :: mp_Type_aV - - if(present(stat)) stat=0 - - call MP_comm_rank(comm,myID,ier) - if(ier /= 0) then - call MP_perr_die(myname_,'MP_comm_rank()',ier) - endif - - ! Broadcaast to all PEs - - if(myID == root) then - nIA = AttrVect_nIAttr(aV) - nRA = AttrVect_nRAttr(aV) - lsize = AttrVect_lsize(aV) - endif - - call MPI_bcast(nIA,1,MP_INTEGER,root,comm,ier) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_bcast(nIA)',ier) - endif - - call MPI_bcast(nRA,1,MP_INTEGER,root,comm,ier) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_bcast(nRA)',ier) - endif - - call MPI_bcast(lsize,1,MP_INTEGER,root,comm,ier) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_bcast(lsize)',ier) - endif - - ! Convert the two Lists to two Strings - - if(nIA>0) then - - if(myID == root) call List_get(iLStr,aV%iList) - - call String_bcast(iLStr,root,comm,stat=ier) ! bcast.String() - - if(ier /= 0) then - write(stderr,*) myname_,'bcast.String(iLstr), ier=',ier - if(present(stat)) then - stat=ier - return - else - call die(myname_,'String_bcast(iLStr) failed',ier) - endif - endif ! if(ier /= 0)... - - endif ! if(nIA > 0)... - - - if(nRA>0) then - - if(myID == root) call List_get(rLStr,aV%rList) - - call String_bcast(rLStr,root,comm,stat=ier) ! bcast.String() - if(ier /= 0) then - write(stderr,*) myname_,'bcast.String(iLstr), ier=',ier - if(present(stat)) then - stat=ier - return - else - call die(myname_,'String_bcast(iLStr) failed',ier) - endif - endif ! if(ier /= 0)... - - endif ! if(nRA > 0)... - - if(myID /= root) then - - if( (nIA>0) .and. (nRA>0) ) then - call AttrVect_init(aV,iList=char(iLStr),rList=char(rLStr), & - lsize=lsize) - endif - - if( (nIA>0) .and. (nRA<=0) ) then - call AttrVect_init(aV,iList=char(iLStr),lsize=lsize) - endif - - if( (nIA<=0) .and. (nRA>0) ) then - call AttrVect_init(aV,rList=char(rLStr),lsize=lsize) - endif - - if( (nIA<=0) .and. (nRA<=0) ) then - write(stderr,*) myname_,':: Nonpositive numbers of both ',& - 'real AND integer attributes. nIA =',nIA,' nRA=',nRA - if(present(stat)) then - stat = -1 - return - else - call die(myname_,'AV has not been initialized',-1) - endif - endif ! if((nIA<= 0) .and. (nRA<=0))... - - call AttrVect_zero(aV) - - - endif ! if(myID /= root)... - - if(nIA > 0) then - - mp_Type_aV=MP_Type(av%iAttr) - call MPI_bcast(aV%iAttr,nIA*lsize,mp_Type_aV,root,comm,ier) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_bcast(iAttr) failed.',ier) - endif - - call String_clean(iLStr) - - endif - - if(nRA > 0) then - - mp_Type_aV=MP_Type(av%rAttr) - call MPI_bcast(aV%rAttr,nRA*lsize,mp_Type_aV,root,comm,ier) - if(ier /= 0) then - call MP_perr_die(myname_,'MPI_bcast(rAttr) failed.',ier) - endif - - call String_clean(rLStr) - - endif - - end subroutine bcast_ - - end module m_AttrVectComms - - - diff --git a/src/externals/mct/mct/m_AttrVectReduce.F90 b/src/externals/mct/mct/m_AttrVectReduce.F90 deleted file mode 100644 index e05eda342e3..00000000000 --- a/src/externals/mct/mct/m_AttrVectReduce.F90 +++ /dev/null @@ -1,1108 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_AttrVectReduce - Local/Distributed AttrVect Reduction Ops. -! -! !DESCRIPTION: This module provides routines to perform reductions on -! the {\tt AttrVect} datatype. These reductions can either be the types -! of operations supported by MPI (currently, summation, minimum and -! maximum are available) that are applied either to all the attributes -! (both integer and real), or specific reductions applicable only to the -! real attributes of an {\tt AttrVect}. This module provides services -! for both local (i.e., one address space) and global (distributed) -! reductions. The type of reduction is defined through use of one of -! the public data members of this module: -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|c|c|} -!\hline -!{\bf Value} & {\bf Action} \\ -!\hline -!{\tt AttrVectSUM} & Sum \\ -!\hline -!{\tt AttrVectMIN} & Minimum \\ -!\hline -!{\tt AttrVectMAX} & Maximum \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! !INTERFACE: - - module m_AttrVectReduce -! -! !USES: -! -! No modules are used in the declaration section of this module. - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: LocalReduce ! Local reduction of all attributes - public :: LocalReduceRAttr ! Local reduction of REAL attributes - public :: AllReduce ! AllReduce for distributed AttrVect - public :: GlobalReduce ! Local Reduce followed by AllReduce - public :: LocalWeightedSumRAttr ! Local weighted sum of - ! REAL attributes - public :: GlobalWeightedSumRAttr ! Global weighted sum of REAL - ! attributes for a distrubuted - ! AttrVect - - interface LocalReduce ; module procedure LocalReduce_ ; end interface - interface LocalReduceRAttr - module procedure LocalReduceRAttr_ - end interface - interface AllReduce - module procedure AllReduce_ - end interface - interface GlobalReduce - module procedure GlobalReduce_ - end interface - interface LocalWeightedSumRAttr; module procedure & - LocalWeightedSumRAttrSP_, & - LocalWeightedSumRAttrDP_ - end interface - interface GlobalWeightedSumRAttr; module procedure & - GlobalWeightedSumRAttrSP_, & - GlobalWeightedSumRAttrDP_ - end interface - -! !PUBLIC DATA MEMBERS: - - public :: AttrVectSUM - public :: AttrVectMIN - public :: AttrVectMAX - - integer, parameter :: AttrVectSUM = 1 - integer, parameter :: AttrVectMIN = 2 - integer, parameter :: AttrVectMAX = 3 - -! !REVISION HISTORY: -! -! 7May02 - J.W. Larson - Created module -! using routines originally prototyped in m_AttrVect. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_AttrVectReduce' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: LocalReduce_ - Local Reduction of INTEGER and REAL Attributes -! -! !DESCRIPTION: -! -! The subroutine {\tt LocalReduce\_()} takes the input {\tt AttrVect} -! argument {\tt inAV}, and reduces each of its integer and real -! attributes, returning them in the output {\tt AttrVect} argument -! {\tt outAV} (which is created by this routine). The type of -! reduction is defined by the input {\tt INTEGER} argument {\tt action}. -! Allowed values for action are defined as public data members to this -! module, and are summarized below: -! -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|c|c|} -!\hline -!{\bf Value} & {\bf Action} \\ -!\hline -!{\tt AttrVectSUM} & Sum \\ -!\hline -!{\tt AttrVectMIN} & Minimum \\ -!\hline -!{\tt AttrVectMAX} & Maximum \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! {\bf N.B.}: The output {\tt AttrVect} argument {\tt outAV} is -! allocated memory, and must be destroyed by invoking the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine LocalReduce_(inAV, outAV, action) -! -! !USES: -! - use m_realkinds, only : FP - use m_die , only : die - use m_stdio , only : stderr - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAV - integer, intent(IN) :: action - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(OUT) :: outAV - -! !REVISION HISTORY: -! 16Apr02 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::LocalReduce_' - - integer :: i,j - - ! First Step: create outAV from inAV (but with one element) - - call AttrVect_init(outAV, inAV, lsize=1) - - call AttrVect_zero(outAV) - - select case(action) - case(AttrVectSUM) ! sum up each attribute... - - ! Compute INTEGER and REAL attribute sums: - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nIAttr(outAV) - outAV%iAttr(i,1) = outAV%iAttr(i,1) + inAV%iAttr(i,j) - end do - end do - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nRAttr(outAV) - outAV%rAttr(i,1) = outAV%rAttr(i,1) + inAV%rAttr(i,j) - end do - end do - - case(AttrVectMIN) ! find the minimum of each attribute... - - ! Initialize INTEGER and REAL attribute minima: - - do i=1,AttrVect_nIAttr(outAV) - outAV%iAttr(i,1) = inAV%iAttr(i,1) - end do - - do i=1,AttrVect_nRAttr(outAV) - outAV%rAttr(i,1) = inAV%rAttr(i,1) - end do - - ! Compute INTEGER and REAL attribute minima: - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nIAttr(outAV) - if(inAV%iAttr(i,j) < outAV%iAttr(i,1)) then - outAV%iAttr(i,1) = inAV%iAttr(i,j) - endif - end do - end do - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nRAttr(outAV) - if(inAV%rAttr(i,j) < outAV%rAttr(i,1)) then - outAV%rAttr(i,1) = inAV%rAttr(i,j) - endif - end do - end do - - case(AttrVectMAX) ! find the maximum of each attribute... - - ! Initialize INTEGER and REAL attribute maxima: - - do i=1,AttrVect_nIAttr(outAV) - outAV%iAttr(i,1) = inAV%iAttr(i,1) - end do - - do i=1,AttrVect_nRAttr(outAV) - outAV%rAttr(i,1) = inAV%rAttr(i,1) - end do - - ! Compute INTEGER and REAL attribute maxima: - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nIAttr(outAV) - if(inAV%iAttr(i,j) > outAV%iAttr(i,1)) then - outAV%iAttr(i,1) = inAV%iAttr(i,j) - endif - end do - end do - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nRAttr(outAV) - if(inAV%rAttr(i,j) > outAV%rAttr(i,1)) then - outAV%rAttr(i,1) = inAV%rAttr(i,j) - endif - end do - end do - - case default - - write(stderr,'(2a,i8)') myname_,':: unrecognized action = ',action - call die(myname_) - - end select - - end subroutine LocalReduce_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: LocalReduceRAttr_ - Local Reduction of REAL Attributes -! -! !DESCRIPTION: -! -! The subroutine {\tt LocalReduceRAttr\_()} takes the input -! {\tt AttrVect} argument {\tt inAV}, and reduces each of its {\tt REAL} -! attributes, returning them in the output {\tt AttrVect} argument -! {\tt outAV} (which is created by this routine). The type of reduction -! is defined by the input {\tt INTEGER} argument {\tt action}. Allowed -! values for action are defined as public data members to this module -! (see the declaration section of {\tt m\_AttrVect}, and are summarized below: -! -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|c|c|} -!\hline -!{\bf Value} & {\bf Action} \\ -!\hline -!{\tt AttrVectSUM} & Sum \\ -!\hline -!{\tt AttrVectMIN} & Minimum \\ -!\hline -!{\tt AttrVectMAX} & Maximum \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! {\bf N.B.}: The output {\tt AttrVect} argument {\tt outAV} is -! allocated memory, and must be destroyed by invoking the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: -! - subroutine LocalReduceRAttr_(inAV, outAV, action) - -! -! !USES: -! - use m_realkinds, only : FP - - use m_die , only : die - use m_stdio , only : stderr - - use m_List, only : List - use m_List, only : List_copy => copy - use m_List, only : List_exportToChar => exportToChar - use m_List, only : List_clean => clean - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAV - integer, intent(IN) :: action - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(OUT) :: outAV - -! !REVISION HISTORY: -! 16Apr02 - J.W. Larson - initial prototype -! 6May02 - J.W. Larson - added optional -! argument weights(:) -! 8May02 - J.W. Larson - modified interface -! to return it to being a pure reduction operation. -! 9May02 - J.W. Larson - renamed from -! LocalReduceReals_() to LocalReduceRAttr_() to make -! the name more consistent with other module procedure -! names in this module. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::LocalReduceRAttr_' - - integer :: i,j - type(List) :: rList_copy - - - ! First Step: create outAV from inAV (but with one element) - - ! Superflous list copy circumvents SGI compiler bug - call List_copy(rList_copy,inAV%rList) - call AttrVect_init(outAV, rList=List_exportToChar(rList_copy), lsize=1) - call AttrVect_zero(outAV) - call List_clean(rList_copy) - - select case(action) - case(AttrVectSUM) ! sum up each attribute... - - ! Compute REAL attribute sums: - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nRAttr(outAV) - outAV%rAttr(i,1) = outAV%rAttr(i,1) + inAV%rAttr(i,j) - end do - end do - - case(AttrVectMIN) ! find the minimum of each attribute... - - ! Initialize REAL attribute minima: - - do i=1,AttrVect_nRAttr(outAV) - outAV%rAttr(i,1) = inAV%rAttr(i,1) - end do - - ! Compute REAL attribute minima: - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nRAttr(outAV) - if(inAV%rAttr(i,j) < outAV%rAttr(i,1)) then - outAV%rAttr(i,1) = inAV%rAttr(i,j) - endif - end do - end do - - case(AttrVectMAX) ! find the maximum of each attribute... - - ! Initialize REAL attribute maxima: - - do i=1,AttrVect_nRAttr(outAV) - outAV%rAttr(i,1) = inAV%rAttr(i,1) - end do - - ! Compute REAL attribute maxima: - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nRAttr(outAV) - if(inAV%rAttr(i,j) > outAV%rAttr(i,1)) then - outAV%rAttr(i,1) = inAV%rAttr(i,j) - endif - end do - end do - - case default - - write(stderr,'(2a,i8)') myname_,':: unrecognized action = ',action - call die(myname_) - - end select - - end subroutine LocalReduceRAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: AllReduce_ - Reduction of INTEGER and REAL Attributes -! -! !DESCRIPTION: -! -! The subroutine {\tt AllReduce\_()} takes the distributed input -! {\tt AttrVect} argument {\tt inAV}, and performs a global reduction -! of all its attributes across the MPI communicator associated with -! the Fortran90 {\tt INTEGER} handle {\tt comm}, and returns these -! reduced values to all processes in the {\tt AttrVect} argument -! {\tt outAV} (which is created by this routine). The reduction -! operation is specified by the user, and must have one of the values -! listed in the table below: -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|c|c|} -!\hline -!{\bf Value} & {\bf Action} \\ -!\hline -!{\tt AttrVectSUM} & Sum \\ -!\hline -!{\tt AttrVectMIN} & Minimum \\ -!\hline -!{\tt AttrVectMAX} & Maximum \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! {\bf N.B.}: The output {\tt AttrVect} argument {\tt outAV} is -! allocated memory, and must be destroyed by invoking the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: -! - - subroutine AllReduce_(inAV, outAV, ReductionOp, comm, ierr) - -! -! !USES: -! - use m_die - use m_stdio , only : stderr - use m_mpif90 - - use m_List, only : List - use m_List, only : List_exportToChar => exportToChar - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAV - integer, intent(IN) :: ReductionOp - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(OUT) :: outAV - integer, optional, intent(OUT) :: ierr - -! !REVISION HISTORY: -! 8May02 - J.W. Larson - initial version. -! 9Jul02 - J.W. Larson - slight modification; -! use List_allocated() to determine if there is attribute -! data to be reduced (this patch is to support the Sun -! F90 compiler). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::AllReduce_' - - integer :: BufferSize, myID, ier - - ! Initialize ierr (if present) to "success" value - if(present(ierr)) ierr = 0 - - call MPI_COMM_RANK(comm, myID, ier) - if(ier /= 0) then - write(stderr,'(2a)') myname_,':: MPI_COMM_RANK() failed.' - call MP_perr_die(myname_, 'MPI_COMM_RANK() failed.', ier) - endif - - call AttrVect_init(outAV, inAV, lsize=AttrVect_lsize(inAV)) - call AttrVect_zero(outAV) - - if(List_allocated(inAV%rList)) then ! invoke MPI_AllReduce() for the real - ! attribute data. - BufferSize = AttrVect_lsize(inAV) * AttrVect_nRAttr(inAV) - - select case(ReductionOp) - case(AttrVectSUM) - call MPI_AllReduce(inAV%rAttr, outAV%rAttr, BufferSize, & - MP_Type(inAV%rAttr(1,1)), MP_SUM, & - comm, ier) - case(AttrVectMIN) - call MPI_AllReduce(inAV%rAttr, outAV%rAttr, BufferSize, & - MP_Type(inAV%rAttr(1,1)), MP_MIN, & - comm, ier) - case(AttrVectMAX) - call MPI_AllReduce(inAV%rAttr, outAV%rAttr, BufferSize, & - MP_Type(inAV%rAttr(1,1)), MP_MAX, & - comm, ier) - case default - write(stderr,'(2a,i8,a)') myname_, & - '::FATAL ERROR--value of RedctionOp=', & - ReductionOp,' not supported.' - end select - - if(ier /= 0) then - write(stderr,*) myname_, & - ':: Fatal Error in MPI_AllReduce(), myID = ',myID - call MP_perr_die(myname_, 'MPI_AllReduce() failed.', ier) - endif - - endif ! if(List_allocated(inAV%rList))... - - if(List_allocated(inAV%iList)) then ! invoke MPI_AllReduce() for the - ! integer attribute data. - - BufferSize = AttrVect_lsize(inAV) * AttrVect_nIAttr(inAV) - - select case(ReductionOp) - case(AttrVectSUM) - call MPI_AllReduce(inAV%iAttr, outAV%iAttr, BufferSize, & - MP_Type(inAV%iAttr(1,1)), MP_SUM, & - comm, ier) - case(AttrVectMIN) - call MPI_AllReduce(inAV%iAttr, outAV%iAttr, BufferSize, & - MP_Type(inAV%iAttr(1,1)), MP_MIN, & - comm, ier) - case(AttrVectMAX) - call MPI_AllReduce(inAV%iAttr, outAV%iAttr, BufferSize, & - MP_Type(inAV%iAttr(1,1)), MP_MAX, & - comm, ier) - case default - write(stderr,'(2a,i8,a)') myname_, & - '::FATAL ERROR--value of RedctionOp=', & - ReductionOp,' not supported.' - end select - - if(ierr /= 0) then - write(stderr,*) myname_, & - ':: Fatal Error in MPI_AllReduce(), myID = ',myID - call MP_perr_die(myname_, 'MPI_AllReduce() failed.', ier) - endif - endif ! if(List_allocated(inAV%iList))... - - if(present(ierr)) ierr = ier - - end subroutine AllReduce_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalReduce_ - Reduction of INTEGER and REAL Attributes -! -! !DESCRIPTION: -! -! The subroutine {\tt GlobalReduce\_()} takes the distributed input -! {\tt AttrVect} argument {\tt inAV}, and performs a local reduction of -! all its integer and real attributes, followed by a an {\tt AllReduce} -! of all the result of the local reduction across the MPI communicator -! associated with the Fortran90 {\tt INTEGER} handle {\tt comm}, and -! returns these reduced values to all processes in the {\tt AttrVect} -! argument {\tt outAV} (which is created by this routine). The reduction -! operation is specified by the user, and must have one of the values -! listed in the table below: -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|c|c|} -!\hline -!{\bf Value} & {\bf Action} \\ -!\hline -!{\tt AttrVectSUM} & Sum \\ -!\hline -!{\tt AttrVectMIN} & Minimum \\ -!\hline -!{\tt AttrVectMAX} & Maximum \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! {\bf N.B.}: The output {\tt AttrVect} argument {\tt outAV} is -! allocated memory, and must be destroyed by invoking the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: -! - - subroutine GlobalReduce_(inAV, outAV, ReductionOp, comm, ierr) - -! -! !USES: -! - use m_die - use m_stdio , only : stderr - use m_mpif90 - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_clean => clean - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAV - integer, intent(IN) :: ReductionOp - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(OUT) :: outAV - integer, optional, intent(OUT) :: ierr - -! !REVISION HISTORY: -! 6May03 - J.W. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalReduce_' - type(AttrVect) :: LocalResult - - ! Step One: On-PE reduction - - call LocalReduce_(inAV, LocalResult, ReductionOp) - - ! Step Two: An AllReduce on the distributed local reduction results - - if(present(ierr)) then - call AllReduce_(LocalResult, outAV, ReductionOp, comm, ierr) - else - call AllReduce_(LocalResult, outAV, ReductionOp, comm) - endif - - ! Step Three: Clean up and return. - - call AttrVect_clean(LocalResult) - - end subroutine GlobalReduce_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: LocalWeightedSumRAttrSP_ - Local Weighted Sum of REAL Attributes -! -! !DESCRIPTION: -! -! The subroutine {\tt LocalWeightedSumRAttr\_()} takes the input -! {\tt AttrVect} argument {\tt inAV}, and performs a weighted sum -! of each of its {\tt REAL} attributes, returning them in the output -! {\tt AttrVect} argument {\tt outAV} (which is created by this routine -! and will contain {\em no} integer attributes). The weights used -! for the summation are provided by the user in the input argument -! {\tt Weights(:)}. If the sum of the weights is desired, this can be -! returned as an attribute in {\tt outAV} if the optional {\tt CHARACTER} -! argument {\tt WeightSumAttr} is provided (which will be concatenated -! onto the list of real attributes in {\tt inAV}). -! -! {\bf N.B.}: The argument {\tt WeightSumAttr} must not be identical -! to any of the real attribute names in {\tt inAV}. -! -! {\bf N.B.}: The output {\tt AttrVect} argument {\tt outAV} is -! allocated memory, and must be destroyed by invoking the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: -! - subroutine LocalWeightedSumRAttrSP_(inAV, outAV, Weights, WeightSumAttr) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - use m_realkinds, only : SP, FP - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_exportToChar => exportToChar - use m_List, only : List_concatenate => concatenate - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAV - real(SP), dimension(:), pointer :: Weights - character(len=*), optional, intent(IN) :: WeightSumAttr - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(OUT) :: outAV - -! !REVISION HISTORY: -! 8May02 - J.W. Larson - initial version. -! 14Jun02 - J.W. Larson - bug fix regarding -! accumulation of weights when invoked with argument -! weightSumAttr. Now works in MCT unit tester. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::LocalWeightedSumRAttrSP_' - - integer :: i,j - type(List) dummyList1, dummyList2 - - ! Check for consistencey between inAV and the weights array - - if(size(weights) /= AttrVect_lsize(inAV)) then - write(stderr,'(4a)') myname_,':: ERROR--mismatch in lengths of ', & - 'input array array argument weights(:) and input AttrVect ',& - 'inAV.' - write(stderr,'(2a,i8)') myname_,':: size(weights)=',size(weights) - write(stderr,'(2a,i8)') myname_,':: length of inAV=', & - AttrVect_lsize(inAV) - call die(myname_) - endif - - ! First Step: create outAV from inAV (but with one element) - - if(present(WeightSumAttr)) then - call List_init(dummyList1,WeightSumAttr) - call List_concatenate(inAV%rList, dummyList1, dummyList2) - call AttrVect_init(outAV, rList=List_exportToChar(dummyList2), & - lsize=1) - call List_clean(dummyList1) - call List_clean(dummyList2) - else - call AttrVect_init(outAV, rList=List_exportToChar(inAV%rList), lsize=1) - endif - - ! Initialize REAL attribute sums: - call AttrVect_zero(outAV) - - ! Compute REAL attribute sums: - - if(present(WeightSumAttr)) then ! perform weighted sum AND sum weights - - do j=1,AttrVect_lsize(inAV) - - do i=1,AttrVect_nRAttr(inAV) - outAV%rAttr(i,1) = outAV%rAttr(i,1) + inAV%rAttr(i,j) * weights(j) - end do - ! The final attribute is the sum of the weights - outAV%rAttr(AttrVect_nRAttr(outAV),1) = & - outAV%rAttr(AttrVect_nRAttr(outAV),1) + weights(j) - end do - - else ! only perform weighted sum - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nRAttr(inAV) - outAV%rAttr(i,1) = outAV%rAttr(i,1) + inAV%rAttr(i,j) * weights(j) - end do - end do - - endif ! if(present(WeightSumAttr))... - - end subroutine LocalWeightedSumRAttrSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: LocalWeightedSumRAttrDP_ - Local Weighted Sum of REAL Attributes -! -! !DESCRIPTION: -! Double precision version of LocalWeightedSumRAttrSP_ -! -! !INTERFACE: -! - subroutine LocalWeightedSumRAttrDP_(inAV, outAV, Weights, WeightSumAttr) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - use m_realkinds, only : DP, FP - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_exportToChar => exportToChar - use m_List, only : List_concatenate => concatenate - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_nIAttr => nIAttr - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAV - real(DP), dimension(:), pointer :: Weights - character(len=*), optional, intent(IN) :: WeightSumAttr - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(OUT) :: outAV - -! !REVISION HISTORY: -! 8May02 - J.W. Larson - initial version. -! 14Jun02 - J.W. Larson - bug fix regarding -! accumulation of weights when invoked with argument -! weightSumAttr. Now works in MCT unit tester. -! ______________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::LocalWeightedSumRAttrDP_' - - integer :: i,j - type(List) dummyList1, dummyList2 - - ! Check for consistencey between inAV and the weights array - - if(size(weights) /= AttrVect_lsize(inAV)) then - write(stderr,'(4a)') myname_,':: ERROR--mismatch in lengths of ', & - 'input array array argument weights(:) and input AttrVect ',& - 'inAV.' - write(stderr,'(2a,i8)') myname_,':: size(weights)=',size(weights) - write(stderr,'(2a,i8)') myname_,':: length of inAV=', & - AttrVect_lsize(inAV) - call die(myname_) - endif - - ! First Step: create outAV from inAV (but with one element) - - if(present(WeightSumAttr)) then - call List_init(dummyList1,WeightSumAttr) - call List_concatenate(inAV%rList, dummyList1, dummyList2) - call AttrVect_init(outAV, rList=List_exportToChar(dummyList2), & - lsize=1) - call List_clean(dummyList1) - call List_clean(dummyList2) - else - call AttrVect_init(outAV, rList=List_exportToChar(inAV%rList), lsize=1) - endif - - ! Initialize REAL attribute sums: - call AttrVect_zero(outAV) - - ! Compute REAL attribute sums: - - if(present(WeightSumAttr)) then ! perform weighted sum AND sum weights - - do j=1,AttrVect_lsize(inAV) - - do i=1,AttrVect_nRAttr(inAV) - outAV%rAttr(i,1) = outAV%rAttr(i,1) + inAV%rAttr(i,j) * weights(j) - end do - ! The final attribute is the sum of the weights - outAV%rAttr(AttrVect_nRAttr(outAV),1) = & - outAV%rAttr(AttrVect_nRAttr(outAV),1) + weights(j) - end do - - else ! only perform weighted sum - - do j=1,AttrVect_lsize(inAV) - do i=1,AttrVect_nRAttr(inAV) - outAV%rAttr(i,1) = outAV%rAttr(i,1) + inAV%rAttr(i,j) * weights(j) - end do - end do - - endif ! if(present(WeightSumAttr))... - - end subroutine LocalWeightedSumRAttrDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalWeightedSumRAttrSP_ - Global Weighted Sum of REAL Attributes -! -! !DESCRIPTION: -! -! The subroutine {\tt GlobalWeightedSumRAttr\_()} takes the -! distributed input {\tt AttrVect} argument {\tt inAV}, and performs -! a weighted global sum across the MPI communicator associated with -! the Fortran90 {\tt INTEGER} handle {\tt comm} of each of its -! {\tt REAL} attributes, returning the sums to each process in the -! {\tt AttrVect} argument {\tt outAV} (which is created by this routine -! and will contain {\em no} integer attributes). The weights used for -! the summation are provided by the user in the input argument -! {\tt weights(:)}. If the sum of the weights is desired, this can be -! returned as an attribute in {\tt outAV} if the optional {\tt CHARACTER} -! argument {\tt WeightSumAttr} is provided (which will be concatenated -! onto the list of real attributes in {\tt inAV} to form the list of -! real attributes for {\tt outAV}). -! -! {\bf N.B.}: The argument {\tt WeightSumAttr} must not be identical -! to any of the real attribute names in {\tt inAV}. -! -! {\bf N.B.}: The output {\tt AttrVect} argument {\tt outAV} is -! allocated memory, and must be destroyed by invoking the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: -! - subroutine GlobalWeightedSumRAttrSP_(inAV, outAV, Weights, comm, & - WeightSumAttr) - -! -! !USES: -! - use m_die - use m_stdio , only : stderr - use m_mpif90 - use m_realkinds, only : SP - - use m_List, only : List - use m_List, only : List_exportToChar => exportToChar - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAV - real(SP), dimension(:), pointer :: Weights - integer, intent(IN) :: comm - character(len=*), optional, intent(IN) :: WeightSumAttr - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(OUT) :: outAV - -! !REVISION HISTORY: -! 8May02 - J.W. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalWeightedSumRAttrSP_' - - type(AttrVect) :: LocallySummedAV - integer :: myID, ierr - - ! Get local process rank (for potential error reporting purposes) - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: MPI_COMM_RANK() error.',ierr) - endif - - ! Check for consistencey between inAV and the weights array - - if(size(weights) /= AttrVect_lsize(inAV)) then - write(stderr,'(2a,i8,3a)') myname_,':: myID=',myID, & - 'ERROR--mismatch in lengths of ', & - 'input array array argument weights(:) and input AttrVect ',& - 'inAV.' - write(stderr,'(2a,i8)') myname_,':: size(weights)=',size(weights) - write(stderr,'(2a,i8)') myname_,':: length of inAV=', & - AttrVect_lsize(inAV) - call die(myname_) - endif - - if(present(WeightSumAttr)) then - call LocalWeightedSumRAttrSP_(inAV, LocallySummedAV, Weights, & - WeightSumAttr) - else - call LocalWeightedSumRAttrSP_(inAV, LocallySummedAV, Weights) - endif - - call AllReduce_(LocallySummedAV, outAV, AttrVectSUM, comm, ierr) - - ! Clean up intermediate local sums - - call AttrVect_clean(LocallySummedAV) - - end subroutine GlobalWeightedSumRAttrSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: GlobalWeightedSumRAttrDP_ - Global Weighted Sum of REAL Attributes -! -! !DESCRIPTION: -! Double precision version of GlobalWeightedSumRAttrSP_ -! -! !INTERFACE: -! - subroutine GlobalWeightedSumRAttrDP_(inAV, outAV, Weights, comm, & - WeightSumAttr) - -! -! !USES: -! - use m_die - use m_stdio , only : stderr - use m_mpif90 - use m_realkinds, only : DP - - use m_List, only : List - use m_List, only : List_exportToChar => exportToChar - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAV - real(DP), dimension(:), pointer :: Weights - integer, intent(IN) :: comm - character(len=*), optional, intent(IN) :: WeightSumAttr - -! !OUTPUT PARAMETERS: -! - type(AttrVect), intent(OUT) :: outAV - -! !REVISION HISTORY: -! 8May02 - J.W. Larson - initial version. -! ______________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalWeightedSumRAttrDP_' - - type(AttrVect) :: LocallySummedAV - integer :: myID, ierr - - ! Get local process rank (for potential error reporting purposes) - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: MPI_COMM_RANK() error.',ierr) - endif - - ! Check for consistencey between inAV and the weights array - - if(size(weights) /= AttrVect_lsize(inAV)) then - write(stderr,'(2a,i8,3a)') myname_,':: myID=',myID, & - 'ERROR--mismatch in lengths of ', & - 'input array array argument weights(:) and input AttrVect ',& - 'inAV.' - write(stderr,'(2a,i8)') myname_,':: size(weights)=',size(weights) - write(stderr,'(2a,i8)') myname_,':: length of inAV=', & - AttrVect_lsize(inAV) - call die(myname_) - endif - - if(present(WeightSumAttr)) then - call LocalWeightedSumRAttrDP_(inAV, LocallySummedAV, Weights, & - WeightSumAttr) - else - call LocalWeightedSumRAttrDP_(inAV, LocallySummedAV, Weights) - endif - - call AllReduce_(LocallySummedAV, outAV, AttrVectSUM, comm, ierr) - - ! Clean up intermediate local sums - - call AttrVect_clean(LocallySummedAV) - - end subroutine GlobalWeightedSumRAttrDP_ - - end module m_AttrVectReduce -!. - - - - diff --git a/src/externals/mct/mct/m_ConvertMaps.F90 b/src/externals/mct/mct/m_ConvertMaps.F90 deleted file mode 100644 index 5132a697d7d..00000000000 --- a/src/externals/mct/mct/m_ConvertMaps.F90 +++ /dev/null @@ -1,438 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_ConvertMaps - Conversion Between MCT Domain Decomposition Descriptors -! -! !DESCRIPTION: -! -! This module contains routines to convert between the {\tt GlobalMap} -! and {\tt GlobalSegMap} types. Since the {\tt GlobalMap} is a 1-D -! decomposition with one contiguous segment per process, it is always -! possible to create a {\tt GlobalSegMap} containing the same decomposition -! information. In the unusual case that a {\tt GlobalSegMap} contains -! {\em at most} one segment per process, and no two segments overlap, it -! is possible to create a {\tt GlobalMap} describing the same decomposition. -! -! !INTERFACE: - - module m_ConvertMaps -! -! !USES: -! - use m_GlobalMap, only : GlobalMap - use m_GlobalSegMap, only : GlobalSegMap - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: GlobalMapToGlobalSegMap - public :: GlobalSegMapToGlobalMap - - - interface GlobalMapToGlobalSegMap ; module procedure & - GlobalMapToGlobalSegMap_ - end interface - interface GlobalSegMapToGlobalMap ; module procedure & - GlobalSegMapToGlobalMap_ - end interface - -! !REVISION HISTORY: -! 12Feb01 - J.W. Larson - initial module -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_ConvertMap' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalMapToGlobalSegMap_ - Convert GlobalMap to GlobalSegMap -! -! !DESCRIPTION: -! This routine takes an input {\tt GlobalMap} argument {\tt GMap}, and -! converts its decomposition information into the output {\tt GlobalSegMap} -! argument {\tt GSMap}. Since the {\tt GlobalMap} is a very special case -! of the more general {\tt GlobalSegMap} decomposition, this conversion is -! always possible. -! -! The motivation of this routine is the fact that the majority of the -! APIs for MCT services require the user to supply a {\tt GlobalSegMap} -! as a domain decomposition descriptor argument. This routine is the -! means by which the user can enjoy the convenience and simplicity of -! the {\tt GlobalMap} datatype (where it is appropriate), but still -! access all of the MCT's functionality. -! -! {\bf N.B.:} This routine creates an allocated structure {\tt GSMap}. -! The user is responsible for deleting this structure using the {\tt clean()} -! method for the {\tt GlobalSegMap} when {\tt GSMap} is no longer needed. -! Failure to do so will create a memory leak. -! -! !INTERFACE: - - subroutine GlobalMapToGlobalSegMap_(GMap, GSMap) - -! -! !USES: -! - use m_stdio, only : stderr - use m_die, only : MP_perr_die, die, warn - - use m_GlobalMap, only : GlobalMap - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_init => init - - use m_MCTWorld, only : ThisMCTWorld - use m_MCTWorld, only : MCTWorld_ComponentNumProcs => ComponentNumProcs - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: GMap - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap), intent(out) :: GSMap - -! !REVISION HISTORY: -! 12Feb01 - J.W. Larson - Prototype code. -! 24Feb01 - J.W. Larson - Finished code. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalMapToGlobalSegMap_' - - integer :: ierr, n, NumProcs - integer, dimension(:), allocatable :: start, length, pe_loc - - ! Sanity Check -- is GMap the right size? - - NumProcs = MCTWorld_ComponentNumProcs(ThisMCTWorld, GMap%comp_id) - if(NumProcs /= size(GMap%displs)) then - call warn(myname_,"component/GlobalMap size mismatch") - call die(myname_,":: Size mismatch-NumProcs = ", & - NumProcs,"size(GMap%displs) = ",size(GMap%displs)) - endif - - ! Allocate space for process location - - allocate(start(NumProcs), length(NumProcs), pe_loc(NumProcs), stat=ierr) - if(ierr /= 0) call die(myname_,"allocate(start(NumProcs...",ierr) - - ! Load the arrays: - - do n=1,NumProcs - start(n) = GMap%displs(n-1) + 1 - length(n) = GMap%counts(n-1) - pe_loc(n) = n-1 - end do - - call GlobalSegMap_init(GSMap, GMap%comp_id, NumProcs, GMap%gsize, & - start, length, pe_loc) - - ! Clean up... - - deallocate(start, length, pe_loc, stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(start,...",ierr) - - end subroutine GlobalMapToGlobalSegMap_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalSegMapToGlobalMap_ - Convert GlobalSegMap to GlobalMap -! -! !DESCRIPTION: -! This routine takes an input {\tt GlobalSegMap} argument {\tt GSMap}, -! and examines it to determine whether or not it may be expressed in -! {\tt GlobalMap} form. A {\tt GlobalSegMap} can be converted to a -! {\tt GlobalMap} if and only if: -! \begin{enumerate} -! \item Each process on the communicator covered by the -! {\tt GlobalSegMap} contains {\em at most one} segment; -! \item The {\tt GlobalSegMap} is {\em not} haloed (that is, none of -! the segments overlap); and -! \item The start indices of the segments are in the same order as their -! respective process ID numbers. -! \end{enumerate} -! If these conditions are satisfied, {\tt GlobalSegMapToGlobalMap\_()} -! creates an output {\tt GlobalMap} argument {\tt GMap} describing the -! same decomposition as {\tt GSMap}. If these conditions are not satisfied, -! map conversion can not occur, and {\tt GlobalSegMapToGlobalMap\_()} -! has one of two outcomes: -! \begin{enumerate} -! \item If the optional output {\tt INTEGER} argument {\tt status} is -! provided, {\tt GlobalSegMapToGlobalMap\_()} returns without creating -! {\tt GMap}, and returns a non-zero value for {\tt status}. -! \item If the optional output {\tt INTEGER} argument {\tt status} is -! not provided, execution will terminate with an error message. -! \end{enumerate} -! -! The optional output {\tt INTEGER} argument {\tt status}, if provided -! will be returned from {\tt GlobalSegMapToGlobalMap\_()} with a value -! explained by the table below: -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|c|c|} -!\hline -!{\bf Value of {\tt status}} & {\bf Significance} \\ -!\hline -!{\tt 0} & Map Conversion Successful \\ -!\hline -!{\tt 1} & Unsuccessful--more than one segment per process, \\ -! & or a negative numer of segments (ERROR) \\ -!\hline -!{\tt 2} & Unsuccessful--{\tt GSMap} haloed \\ -!\hline -!{\tt 3} & Unsuccessful--{\tt GSMap} segments out-of-order \\ -! & with respect to resident process ID ranks \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! {\bf N.B.:} This routine creates an allocated structure {\tt GMap}. -! The user is responsible for deleting this structure using the {\tt clean()} -! method for the {\tt GlobalMap} when {\tt GMap} is no longer needed. -! Failure to do so will create a memory leak. -! -! !INTERFACE: - - subroutine GlobalSegMapToGlobalMap_(GSMap, GMap, status) -! -! !USES: -! - use m_stdio, only : stderr - use m_die, only : MP_perr_die, die - - use m_SortingTools , only : IndexSet - use m_SortingTools , only : IndexSort - use m_SortingTools , only : Permute - - use m_MCTWorld, only : MCTWorld - use m_MCTWorld, only : ThisMCTWorld - use m_MCTWorld, only : ComponentNumProcs - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_comp_id => comp_id - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - use m_GlobalSegMap, only : GlobalSegMap_haloed => haloed - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - use m_GlobalSegMap, only : GlobalSegMap_nlseg => nlseg - use m_GlobalSegMap, only : GlobalSegMap_active_pes => active_pes - - use m_GlobalMap, only : GlobalMap - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap - -! !OUTPUT PARAMETERS: - - type(GlobalMap), intent(out) :: GMap - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 12Feb01 - J.W. Larson - API / first prototype. -! 21Sep02 - J.W. Larson - Near-complete Implementation, -! still, do not call! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalSegMapToGlobalMap_' - - integer :: i, ierr, n - integer :: nlseg, NumActive, NumProcs, NumPEs, NGSegs - integer, dimension(:), pointer :: NumSegs - integer, dimension(:), pointer :: GSMstarts, GSMlengths, GSMpe_locs, perm - logical :: convertible - - ! If the status flag is present, set it to the "success" value: - - if(present(status)) then - status = 0 - endif - - ! How many segments are there in GSMap? If the number of - ! segments is greater than the number of processes on the - ! GlobalSegMap's native communicator conversion to a - ! GlobalMap is not possible. If the number of segments is - ! fewer than the number of PEs, further checks are necessary - ! to determine whether map conversion is possible. - - NumPEs = ComponentNumProcs(ThisMCTWorld, GlobalSegMap_comp_id(GSMap)) - NGSegs = GlobalSegMap_ngseg(GSMap) - - if(NGSegs > NumPEs) then - write(stderr,'(3a,i8,a,i8,2a)') myname_, & - ':: Conversion of input GlobalSegMap to GlobalMap not possible.', & - ' Number of segments is greater than number of PEs. NumPEs = ', & - NumPEs,' NGSegs = ', NGSegs,'. See MCT API Document for more', & - ' information.' - if(present(status)) then - status = 1 - return - else - call die(myname_) - endif - endif - - ! Is GSMap haloed? If it is, map conversion is impossible - - if(GlobalSegMap_haloed(GSMap)) then - write(stderr,'(3a)') myname_, & - ':: input GlobalSegMap is haloed. Conversion to GlobalMap ', & - ' type not possible. See MCT API Document for details.' - if(present(status)) then - status = 2 - return - else - call die(myname_) - endif - endif - - ! At this point, we've done the easy tests. - - ! Return to the first condition: at most one segment per PE. - ! We've eliminated the obvious case of more segments than PEs. - ! Now, we examine the case of fewer segments than PEs, to see - ! if any single PE has more than one segment. - - allocate(NumSegs(0:NumPes-1), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate(NumSegs(1:NumPes-1))=',ierr) - - do n=0,NumPes-1 - - ! Is there at most one segment per process? If not, then - ! map conversion is impossible. - - NumSegs(n) = GlobalSegMap_nlseg(GSMap, n) - - if((NumSegs(n) > 1) .or. (NumSegs(n) < 0)) then ! fails GMap - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: ERROR: Map conversion not possible due to ', & - 'inappropriate number of segments on PE number ', & - n,'. Number of segments = ',NumSegs(n) - deallocate(NumSegs, stat=ierr) - if(ierr /= 0) then ! problem cleaning up - write(stderr,'(3a)') myname_, & - ':: Encountered error deallocating NumSegs ', & - 'while exiting.' - endif - if(present(status)) then ! return with error code - status = 1 - return - else - call die(myname_) - endif - endif - - end do ! do n=0,NumPes-1 - - deallocate(NumSegs, stat=ierr) - if(ierr /= 0) call die(myname_,'deallocate(NumSegs,...)',ierr) - - ! If execution has reached this point in the code, GSMap has - ! satisfied the first two criteria for conversion to a GlobalMap. - ! The final test is whether or not the global start indices for - ! the segments (which we know by now are at most one per PE) are - ! in the same order as their resident process ID ranks. - - ! Extract start, length, and PE location arrays from GSMap: - - allocate(GSMstarts(NGSegs), GSMlengths(NGSegs), GSMpe_locs(NGSegs), & - perm(NGSegs), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate(GSMstarts,...)=',ierr) - - do i=1,NGSegs - GSMstarts(i) = GSMap%start(i) - GSMlengths(i) = GSMap%length(i) - GSMpe_locs(i) = GSMap%pe_loc(i) - end do - - ! Begin sorting process. First, set index permutation. - call IndexSet(perm) - ! Generate sort permutation keyed by PE location - call IndexSort(NGSegs, perm, GSMpe_locs, descend=.false.) - ! Permute segment info arrays using perm(:) - call Permute(GSMstarts, perm, NGSegs) - call Permute(GSMlengths, perm, NGSegs) - call Permute(GSMpe_locs, perm, NGSegs) - - ! Now that these arrays are ordered by PE location, we - ! can check the segment start ordering to see if it is - ! the same. Start with the assumption they are in order, - ! corrsponding to convertible=.TRUE. - - convertible = .TRUE. - ORDER_TEST: do i=1,NGSegs-1 - if(GSMstarts(i) <= GSMstarts(i+1)) then - CYCLE - else - convertible = .FALSE. - EXIT - endif - end do ORDER_TEST - - if(convertible) then ! build output GlobalMap GMAP - - ! Integer components: - - GMap%comp_id = GlobalSegMap_comp_id(GSMap) - GMap%gsize = GlobalSegMap_gsize(GSMap) - - ! lsize is not defined in this case!!! -ETO -! GMap%lsize = GlobalSegMap_lsize(GSMap) - GMap%lsize = -1 - - ! Indexing components: - - allocate(GMap%displs(0:NumPEs-1), GMap%counts(0:NumPEs-1), stat=ierr) - - ! Set the counts(:) values to zero, then copy in the non-zero - ! segment length values - - GMap%counts = 0 - do i=1,NGSegs - GMap%counts(GSMpe_locs(i)) = GSMlengths(i) - end do - - ! From counts(:), build displs(:) - GMap%displs(0) = 0 - do i=1,NumPEs-1 - GMap%displs(i) = GMap%displs(i-1) + GMap%counts(i-1) - end do - - else ! Nullify it - - GMap%comp_id = -1 - GMap%gsize = -1 - GMap%lsize = -1 - nullify(GMap%displs) - nullify(GMap%counts) - - endif - - deallocate(GSMstarts, GSMlengths, GSMpe_locs, perm, stat=ierr) - if(ierr /= 0) call die(myname_,'deallocate(GSMstarts,...)=',ierr) - - end subroutine GlobalSegMapToGlobalMap_ - - end module m_ConvertMaps - - - - - diff --git a/src/externals/mct/mct/m_ExchangeMaps.F90 b/src/externals/mct/mct/m_ExchangeMaps.F90 deleted file mode 100644 index cb6100b23de..00000000000 --- a/src/externals/mct/mct/m_ExchangeMaps.F90 +++ /dev/null @@ -1,613 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_ExchangeMaps - Exchange of Global Mapping Objects. -! -! !DESCRIPTION: -! This module contains routines that support the exchange of domain -! decomposition descriptors (DDDs) between two MCT components. There is -! support for {\em handshaking} between the two components to determine -! the types of domain decomposition descriptors they employ, {\em loading} -! of data contained within domain decomposition descriptors, and {\em -! map exchange}, resulting in the creation of a remote component's domain -! decomposition descriptor for use by a local component. These routines -! are largely used by MCT's {\tt Router} to create intercomponent -! communications scheduler, and normally should not be used by an MCT -! user. -! -! Currently, the types of map exchange supported by the public routine -! {\tt ExchangeMap()} are summarized in the table below. The first column -! lists the type of DDD used locally on the component invoking -! {\tt ExchangeMap()} (i.e., the input DDD). The second comlumn lists -! the DDD type used on the remote component (i.e., the output DDD). -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|c|c|} -!\hline -!{\bf Local DDD Type} & {\bf Remote DDD Type} \\ -!\hline -!{\tt GlobalMap} & {\tt GlobalSegMap} \\ -!\hline -!{\tt GlobalSegMap} & {\tt GlobalSegMap} \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! Currently, we do not support intercomponent map exchange where a -! {\tt GlobalMap} is output. The rationale for this is that any {\tt GlobalMap} -! may always be expressed as a {\tt GlobalSegMap}. -! -! !INTERFACE: - - module m_ExchangeMaps - -! !USES: -! No external modules are used in the declaration section of this module. - - implicit none - - private ! except -! -! !PUBLIC MEMBER FUNCTIONS: -! - public :: ExchangeMap - - interface ExchangeMap ; module procedure & - ExGSMapGSMap_, & ! GlobalSegMap for GlobalSegMap - ExGMapGSMap_ - end interface - -! !SEE ALSO: -! The MCT module m_ConvertMaps for more information regarding the -! relationship between the GlobalMap and GlobalSegMap types. -! The MCT module m_Router to see where these services are used to -! create intercomponent communications schedulers. -! -! !REVISION HISTORY: -! 3Feb01 - J.W. Larson - initial module -! 3Aug01 - E.T. Ong - in ExGSMapGSMap, -! call GlobalSegMap_init with actual shaped arrays -! for non-root processes to satisfy Fortran 90 standard. -! See comments in subroutine. -! 15Feb02 - R. Jacob - use MCT_comm instead of -! MP_COMM_WORLD -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname='MCT::m_ExchangeMaps' - -! Map Handshaking Parameters: Map handshaking occurs via -! exchange of an array of INTEGER flags. - - ! Number of Handshaking Parameters; i.e.size of exhcanged parameters array - - integer, parameter :: NumHandshakePars = 4 - - ! ComponentIDIndex defines the storage location of the flag - ! signifying the component number in MCTWorld - - integer, parameter :: ComponentIDIndex = 1 - - ! MapTypeIndex defines the storage location in the handshake array - ! of the type of map offered for exchange - - integer, parameter :: MapTypeIndex = 2 - - ! NumMapTypes is the number of legitimate MapTypeIndex Values: - - integer, parameter :: NumMapTypes = 2 - - ! Recognized MapTypeIndex Values: - - integer, parameter :: GlobalMapFlag = 1 - integer, parameter :: GlobalSegMapFlag = 2 - - ! GsizeIndex defines the location of the grid size (number of points) - ! for the map. This size is - - integer, parameter :: GsizeIndex = 3 - - ! NumSegIndex defines the location of the number of segments in the - ! map. For a GlobalMap, this is the number of processes in the map. - ! For a GlobalSegMap, this is the number of global segments (ngseg). - - integer, parameter :: NumSegIndex = 4 - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MapHandshake_ - Exchange Map descriptors. -! -! !DESCRIPTION: -! This routine takes input Map descriptors stored in the {\tt INTEGER} -! array {\tt LocalMapPars}, the local communicator on which this map is -! defined ({\tt LocalComm}), and the remote component ID -! {\tt RemoteCompID}, and effects an exchange of map descriptors with -! the remote component, which are returned in the {\tt INTEGER} array -! {\tt RemoteMapPars}. -! -! {\bf N.B.: } The values present in {\tt LocalMapPars} need to be valid -! only on the root of {\tt LocalComm}. Likewise, the returned values in -! {\tt RemoteMapPars} will be valid on the root of {\tt LocalComm}. -! -! !INTERFACE: - - subroutine MapHandshake_(LocalMapPars, LocalComm, RemoteCompID, & - RemoteMapPars) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die - use m_stdio - use m_MCTWorld, only : ThisMCTWorld - use m_MCTWorld, only : ComponentRootRank - - implicit none -! -! !INPUT PARAMETERS: -! - integer, intent(in) :: LocalMapPars(NumHandshakePars) - integer, intent(in) :: LocalComm - integer, intent(in) :: RemoteCompID -! -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: RemoteMapPars(NumHandshakePars) - -! !REVISION HISTORY: -! 6Feb01 - J.W. Larson - API specification. -! 20Apr01 - R.L. Jacob - add status argument -! to MPI_RECV -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MapHandshake_' - - integer :: ierr, myID, RemoteRootID, SendTag, RecvTag - integer,dimension(MP_STATUS_SIZE) :: status - - call MP_COMM_RANK(LocalComm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'call MP_COMM_RANK()',ierr) - - RemoteRootID = ComponentRootRank(RemoteCompID, ThisMCTWorld) - - if(myID == 0) then ! I am the root on LocalComm - - ! Compute send/receive tags: - - SendTag = 10 * LocalMapPars(ComponentIDIndex) + RemoteCompID - RecvTag = LocalMapPars(ComponentIDIndex) + 10 * RemoteCompID - - ! Post send to RemoteRootID: - - call MPI_SEND(LocalMapPars, NumHandshakePars, MP_INTEGER, & - RemoteRootID, SendTag, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'call MPI_SEND()',ierr) - - ! Post receive from RemoteRootID: - - call MPI_RECV(RemoteMapPars, NumHandshakePars, MP_INTEGER, & - RemoteRootID, RecvTag, ThisMCTWorld%MCT_comm, status, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'call MPI_RECV()',ierr) - - endif ! if(myID == 0) - - end subroutine MapHandshake_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: LoadGlobalMapPars_ - Load GlobalMap descriptors. -! -! !DESCRIPTION: -! This routine takes an input {\tt GlobalMap} variable {\tt Gmap}, and -! loads its descriptors the output {\tt INTEGER} array {\tt MapPars}. -! The dimensions of this array, and loading order are all defined in -! the declaration section of this module. -! -! !INTERFACE: - - subroutine LoadGlobalMapPars_(GMap, MapPars) - -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_comp_id => comp_id - use m_GlobalMap, only : GlobalMap_gsize => gsize -! use m_GlobalMap, only : GlobalMap_nprocs => nprocs - - implicit none -! -! !INPUT PARAMETERS: -! - type(GlobalMap), intent(in) :: GMap -! -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: MapPars(NumHandshakePars) - -! !REVISION HISTORY: -! 6Feb01 - J.W. Larson - Initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::LoadGlobalMapPars_' - - MapPars(ComponentIDIndex) = GlobalMap_comp_id(GMap) - MapPars(MapTypeIndex) = GlobalMapFlag - MapPars(GsizeIndex) = GlobalMap_gsize(GMap) -! MapPars(NumSegIndex) = GlobalMap_nprocs(GSMap) - - end subroutine LoadGlobalMapPars_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: LoadGlobalSegMapPars_ - Load GlobalSegMap descriptors. -! -! !DESCRIPTION: -! This routine takes an input {\tt GlobalSegMap} variable {\tt Gmap}, and -! loads its descriptors the output {\tt INTEGER} array {\tt MapPars}. -! The dimensions of this array, and loading order are all defined in -! the declaration section of this module. -! -! !INTERFACE: - - subroutine LoadGlobalSegMapPars_(GSMap, MapPars) - -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_comp_id => comp_id - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - - - implicit none -! -! !INPUT PARAMETERS: -! - type(GlobalSegMap), intent(in) :: GSMap -! -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: MapPars(NumHandshakePars) - -! !REVISION HISTORY: -! 6Feb01 - J.W. Larson - Initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::LoadGlobalSegMapPars_' - - MapPars(ComponentIDIndex) = GlobalSegMap_comp_id(GSMap) - MapPars(MapTypeIndex) = GlobalSegMapFlag - MapPars(GsizeIndex) = GlobalSegMap_gsize(GSMap) - MapPars(NumSegIndex) = GlobalSegMap_ngseg(GSMap) - - end subroutine LoadGlobalSegMapPars_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ExGSMapGSMap_ - Trade of GlobalSegMap structures. -! -! !DESCRIPTION: -! This routine effects the exchange between two components of their -! data decomposition descriptors, each of which is a {\tt GlobalSegMap}. -! The component invoking this routine provides its domain decomposition -! in the form of the input {\tt GlobalSegMap} argument {\tt LocalGSMap}. -! The component with which map exchange takes place is specified by the -! MCT integer component identification number defined by the input -! {\tt INTEGER} argument {\tt RemoteCompID}. The -! !INTERFACE: - - subroutine ExGSMapGSMap_(LocalGSMap, LocalComm, RemoteGSMap, & - RemoteCompID, ierr) - -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_init => init - - use m_MCTWorld, only : ThisMCTWorld - use m_MCTWorld, only : ComponentRootRank - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: LocalGSMap ! Local GlobalSegMap - integer, intent(in) :: LocalComm ! Local Communicator - integer , intent(in) :: RemoteCompID ! Remote component id - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap), intent(out) :: RemoteGSMap ! Remote GlobalSegMap - integer, intent(out) :: ierr ! Error Flag - -! !REVISION HISTORY: -! 3Feb01 - J.W. Larson - API specification. -! 7Feb01 - J.W. Larson - First full version. -! 20Apr01 - R.L. Jacob - add status argument -! to MPI_RECV -! 25Apr01 - R.L. Jacob - set SendTag and -! RecvTag values -! 3May01 - R.L. Jacob - change MPI_SEND to -! MPI_ISEND to avoid possible buffering problems seen -! on IBM SP. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ExGSMapGSMap_' - -! root ID on local communicator: - integer, parameter :: root = 0 -! Storage for local and remote map descriptors: - integer :: LocalMapPars(NumHandshakePars) - integer :: RemoteMapPars(NumHandshakePars) -! Send and Receive Buffers - integer, dimension(:), allocatable :: SendBuf - integer, dimension(:), allocatable :: RecvBuf -! Send and Receive Tags - integer :: SendTag, RecvTag -! Storage arrays for Remote GlobalSegMap data: - integer, dimension(:), allocatable :: start, length, pe_loc - - integer :: myID, ngseg, remote_root,req - integer :: local_ngseg, remote_ngseg - integer,dimension(MP_STATUS_SIZE) :: status,wstatus - - ! Determine rank on local communicator: - - call MP_COMM_RANK(LocalComm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'call MP_COMM_RANK()',ierr) - - ! If the root, exchange map handshake descriptors, - ! and information needed to initialize the remote map - ! on the local communicator. - - if(myID == root) then - - call LoadGlobalSegMapPars_(LocalGSMap, LocalMapPars) - - call MapHandshake_(LocalMapPars, LocalComm, RemoteCompID, & - RemoteMapPars) - - ! Consistency Checks between LocalMapPars and RemoteMapPars: - - if(LocalMapPars(MapTypeIndex) /= RemoteMapPars(MapTypeIndex)) then - ierr = 2 - write(stderr,*) myname_,":: MCTERROR, Map Type mismatch ", & - "LocalMap Type = ",LocalMapPars(MapTypeIndex)," RemoteMap Type = ", & - RemoteMapPars(MapTypeIndex) - call die(myname_,'Map Type mismatch',ierr) - endif - - if(LocalMapPars(GsizeIndex) /= RemoteMapPars(GsizeIndex)) then - ierr = 3 - write(stderr,*) myname_,":: MCTERROR, Grid Size mismatch ", & - "LocalMap Gsize = ",LocalMapPars(GsizeIndex)," RemoteMap Gsize = ", & - RemoteMapPars(GsizeIndex) - call die(myname_,'Map Grid Size mismatch',ierr) - endif - - if(RemoteCompID /= RemoteMapPars(ComponentIDIndex)) then - ierr = 4 - write(stderr,*) myname_,":: MCTERROR, Component ID mismatch ", & - "RemoteCompID = ",RemoteCompID," RemoteMap CompID = ", & - RemoteMapPars(ComponentIDIndex) - call die(myname_,'Component ID mismatch',ierr) - endif - - ! SendBuf will hold the arrays LocalGSMap%start, LocalGSMap%length, - ! and LocalGSMap%pe_loc in that order. - - allocate(SendBuf(3*LocalMapPars(NumSegIndex)), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate(SendBuf...)',ierr) - - ! RecvBuf will hold the arrays RemoteGSMap%start, RemoteGSMap%length, - ! and RemoteGSMap%pe_loc in that order. - - allocate(RecvBuf(3*RemoteMapPars(NumSegIndex)), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate(RecvBuf...)',ierr) - - ! Load SendBuf in the order described above: - local_ngseg = LocalMapPars(NumSegIndex) - SendBuf(1:local_ngseg) = & - LocalGSMap%start(1:local_ngseg) - SendBuf(local_ngseg+1:2*local_ngseg) = & - LocalGSMap%length(1:local_ngseg) - SendBuf(2*local_ngseg+1:3*local_ngseg) = & - LocalGSMap%pe_loc(1:local_ngseg) - - ! Determine the remote component root: - - remote_root = ComponentRootRank(RemoteMapPars(ComponentIDIndex), & - ThisMCTWorld) - - SendTag = 10 * LocalMapPars(ComponentIDIndex) + RemoteCompID - RecvTag = LocalMapPars(ComponentIDIndex) + 10 * RemoteCompID - - ! Send off SendBuf to the remote component root: - - call MPI_ISEND(SendBuf(1), 3*LocalMapPars(NumSegIndex), MP_INTEGER, & - remote_root, SendTag, ThisMCTWorld%MCT_comm, req, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MPI_SEND(SendBuf...',ierr) - - ! Receive RecvBuf from the remote component root: - - call MPI_RECV(RecvBuf, 3*RemoteMapPars(NumSegIndex), MP_INTEGER, & - remote_root, RecvTag, ThisMCTWorld%MCT_comm, status, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MPI_Recv(RecvBuf...',ierr) - - call MPI_WAIT(req,wstatus,ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MPI_WAIT(SendBuf..',ierr) - - ! Allocate arrays start(:), length(:), and pe_loc(:) - - allocate(start(RemoteMapPars(NumSegIndex)), & - length(RemoteMapPars(NumSegIndex)), & - pe_loc(RemoteMapPars(NumSegIndex)), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate(start...',ierr) - - ! Unpack RecvBuf into arrays start(:), length(:), and pe_loc(:) - remote_ngseg = RemoteMapPars(NumSegIndex) - start(1:remote_ngseg) = RecvBuf(1:remote_ngseg) - length(1:remote_ngseg) = & - RecvBuf(remote_ngseg+1:2*remote_ngseg) - pe_loc(1:remote_ngseg) = & - RecvBuf(2*remote_ngseg+1:3*remote_ngseg) - - endif ! if(myID == root) - - ! Non-root processes call GlobalSegMap_init with start, - ! length, and pe_loc, although these arguments are - ! not used in the subroutine. Since these correspond to dummy - ! shaped array arguments in GlobalSegMap_init, the Fortran 90 - ! standard dictates that the actual arguments must contain - ! complete shape information. Therefore, these array arguments - ! must be allocated on all processes. - - if(myID /= root) then - - allocate(start(1), length(1), pe_loc(1), stat=ierr) - if(ierr /= 0) call die(myname_,'non-root allocate(start...',ierr) - - endif - - - ! Initialize the Remote GlobalSegMap RemoteGSMap - - call GlobalSegMap_init(RemoteGSMap, RemoteMapPars(NumSegIndex), & - start, length, pe_loc, root, LocalComm, & - RemoteCompID, RemoteMapPars(GsizeIndex)) - - - ! Deallocate allocated arrays - - deallocate(start, length, pe_loc, stat=ierr) - if(ierr /= 0) then - call die(myname_,'deallocate(start...',ierr) - endif - - ! Deallocate allocated arrays on the root: - - if(myID == root) then - - deallocate(SendBuf, RecvBuf, stat=ierr) - if(ierr /= 0) then - call die(myname_,'deallocate(SendBuf...',ierr) - endif - - endif ! if(myID == root) - - end subroutine ExGSMapGSMap_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ExGMapGSMap_ - Trade of GlobalMap for GlobalSegMap. -! -! !DESCRIPTION: -! This routine allows a component to report its domain decomposition -! using a {\tt GlobalMap} (the input argument {\tt LocalGMap}), and -! receive the domain decomposition of a remote component in the form -! of a {\tt GlobalSegMap} (the output argument {\tt RemoteGSMap}. The -! component with which map exchange occurs is defined by its component -! ID number (the input {\tt INTEGER} argument {\tt RemoteCompID}). -! Currently, this operation is implemented as an exchange of maps between -! the root nodes of each component's communicator, and then propagated -! across the local component's communicator. This requires the user to -! provide the local communicator (the input {\tt INTEGER} argument -! {\tt LocalComm}). The success (failure) of this operation is reported -! in the zero (nonzero) value of the output {\tt INTEGER} argument -! {\tt ierr}. -! -! !INTERFACE: - - subroutine ExGMapGSMap_(LocalGMap, LocalComm, RemoteGSMap, & - RemoteCompID, ierr) - -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - - use m_GlobalMap, only : GlobalMap - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_init => init - use m_GlobalSegMap, only : GlobalSegMap_clean => clean - - use m_ConvertMaps, only : GlobalMapToGlobalSegMap - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: LocalGMap ! Local GlobalMap - integer, intent(in) :: LocalComm ! Local Communicator - integer, intent(in) :: RemoteCompID ! Remote component id - - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap), intent(out) :: RemoteGSMap ! Remote GlobalSegMap - integer, intent(out) :: ierr ! Error Flag - -! !REVISION HISTORY: -! 3Feb01 - J.W. Larson - API specification. -! 26Sep02 - J.W. Larson - Implementation. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ExGMapGSMap_' - type(GlobalSegMap) :: LocalGSMap - - ! Convert LocalGMap to a GlobalSegMap - - call GlobalMapToGlobalSegMap(LocalGMap, LocalGSMap) - - ! Exchange local decomposition in GlobalSegMap form with - ! the remote component: - - call ExGSMapGSMap_(LocalGSMap, LocalComm, RemoteGSMap, & - RemoteCompID, ierr) - - ! Destroy LocalGSMap - - call GlobalSegMap_clean(LocalGSMap) - - end subroutine ExGMapGSMap_ - - end module m_ExchangeMaps - - - - - - - diff --git a/src/externals/mct/mct/m_GeneralGrid.F90 b/src/externals/mct/mct/m_GeneralGrid.F90 deleted file mode 100644 index 474fbf9089a..00000000000 --- a/src/externals/mct/mct/m_GeneralGrid.F90 +++ /dev/null @@ -1,3315 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_GeneralGrid -- Physical Coordinate Grid Information Storage -! -! !DESCRIPTION: -! The {\tt GeneralGrid} data type is a flexible, generic structure for -! storing physical coordinate grid information. The {\tt GeneralGrid} -! may be employed to store coordinate grids of arbitrary dimension, and -! is also capable of supporting unstructured grids such as meteorological -! observation data streams. The grid is representated by a literal -! listing of the gridpoint coordinates, along with other integer and real -! {\em attributes} associated with each location. Examples of real -! non-coordinate attributes are grid cell length, cross-sectional area, and -! volume elements, projections of local directional unit vectors onto -! {\em et cetera} A {\tt GeneralGrid} as at minimum one integer -! attribute---{\em the global grid point number}, or {\tt GlobGridNum}, -! which serves as a unique identifier for each physical grid location. -! -! The real attributes of of the {\tt GeneralGrid} are grouped as {\tt List} -! components: -! \begin{itemize} -! \item {\tt GGrid\%coordinate\_list} contains the list of the physical -! dimension names of the grid. The user initializes a {\tt List} by -! supplying the items in it as a string with the items delimitted by -! colons. For example, setting the coordinates for Euclidean 3-space -! is accomplished by a choice of {\tt 'x:y:z'}, cylindrical coordinates -! by {\tt 'rho:theta:z'}, spherical coordinates by {\tt 'r:theta:phi'}, -! {\em et cetera}. -! \item {\tt GGrid\%weight\_list} contains the names of the spatial -! cell length, area, and volume weights associated with the grid. These -! are also stored in {\tt List} form, and are set by the user in the same -! fashion as described above for coordinates. For example, one might -! wish create cell weight attributes for a cylindrical grid by defining -! a weight list of {\tt 'drho:dphi:rhodphi:dz}. -! \item {\tt GGrid\%other\_list} is space for the user to define other -! real attributes. For example, one might wish to do vector calculus -! operatons in spherical coordinates. Since the spherical coordinate -! unit vectors ${\hat r}$, ${\hat \theta}$, and ${\hat \phi}$ -! vary in space, it is sometimes useful to store their projections on -! the fixed Euclidean unit vectors ${\bf \hat x}$, ${\bf \hat y}$, and -! ${\bf \hat z}$. To do this one might set up a list of attributes -! using the string -! \begin{verbatim} -! 'rx:ry:rz:thetax:thetay:thetaz:phix:phiy:phyz' -! \end{verbatim} -! \item {\tt GGrid\%index\_list} provides space for the user to define -! integer attributes such as alternative indexing schemes, indices for -! defining spatial regions, {\em et cetera}. This attribute list contains -! all the integer attributes for the {\tt GeneralGrid} save one: the -! with the ever-present {\em global gridpoint number attribute} -! {\tt GlobGridNum}, which is set automatically by MCT. -! \end{itemize} -! -! This module contains the definition of the {\tt GeneralGrid} datatype, -! various methods for creating and destroying it, query methods, and tools -! for multiple-key sorting of gridpoints. -! -! !INTERFACE: - - module m_GeneralGrid - -! -! !USES: -! - use m_List, only : List ! Support for List components. - - use m_AttrVect, only : AttrVect ! Support for AttrVect component. - - implicit none - - private ! except - -! !PUBLIC TYPES: - - public :: GeneralGrid ! The class data structure - - Type GeneralGrid -#ifdef SEQUENCE - sequence -#endif - type(List) :: coordinate_list - type(List) :: coordinate_sort_order - logical, dimension(:), pointer :: descend - type(List) :: weight_list - type(List) :: other_list - type(List) :: index_list - type(AttrVect) :: data - End Type GeneralGrid - -! !PUBLIC MEMBER FUNCTIONS: - - public :: init ! Create a GeneralGrid - public :: initCartesian ! - public :: initUnstructured ! - public :: clean ! Destroy a GeneralGrid - public :: zero ! Zero data in a GeneralGrid - - ! Query functions----------------- - public :: dims ! Return dimensionality of the GeneralGrid - public :: indexIA ! Index integer attribute (indices) - public :: indexRA ! Index integer attribute (coords/weights) - public :: lsize ! Return local number of points - public :: exportIAttr ! Return INTEGER attribute as a vector - public :: exportRAttr ! Return REAL attribute as a vector - - ! Manipulation-------------------- - public :: importIAttr ! Insert INTEGER vector as attribute - public :: importRAttr ! Insert REAL vector as attribute - public :: Sort ! Sort point data by coordinates -> permutation - public :: Permute ! Rearrange point data using input permutation - public :: SortPermute ! Sort and Permute point data - - interface init ; module procedure & - init_, & - initl_, & - initgg_ - end interface - interface initCartesian ; module procedure & - initCartesianSP_, & - initCartesianDP_ - end interface - interface initUnstructured ; module procedure & - initUnstructuredSP_, & - initUnstructuredDP_ - end interface - interface clean ; module procedure clean_ ; end interface - interface zero ; module procedure zero_ ; end interface - - interface dims ; module procedure dims_ ; end interface - interface indexIA ; module procedure indexIA_ ; end interface - interface indexRA ; module procedure indexRA_ ; end interface - interface lsize ; module procedure lsize_ ; end interface - - interface exportIAttr ; module procedure exportIAttr_ ; end interface - interface exportRAttr ; module procedure & - exportRAttrSP_, & - exportRAttrDP_ - end interface - interface importIAttr ; module procedure importIAttr_ ; end interface - interface importRAttr ; module procedure & - importRAttrSP_, & - importRAttrDP_ - end interface - - interface Sort ; module procedure Sort_ ; end interface - interface Permute ; module procedure Permute_ ; end interface - interface SortPermute ; module procedure SortPermute_ ; end interface - -! !PUBLIC DATA MEMBERS: - -! CHARACTER Tag for GeneralGrid Global Grid Point Identification Number - - character(len=*), parameter :: GlobGridNum='GlobGridNum' - -! !SEE ALSO: -! The MCT module m_AttrVect and the mpeu module m_List. - -! !REVISION HISTORY: -! 25Sep00 - J.W. Larson - initial prototype -! 31Oct00 - J.W. Larson - modified the -! GeneralGrid type to allow inclusion of grid cell -! dimensions (lengths) and area/volume weights. -! 15Jan01 - J.W. Larson implemented new GeneralGrid type -! definition and added numerous APIs. -! 17Jan01 - J.W. Larson fixed minor bug in module header use -! statement. -! 19Jan01 - J.W. Larson added other_list and coordinate_sort_order -! components to the GeneralGrid type. -! 21Mar01 - J.W. Larson - deleted the initv_ API (more study -! needed before implementation. -! 2May01 - J.W. Larson - added initgg_ API (replaces old initv_). -! 13Dec01 - J.W. Larson - added import and export methods. -! 27Mar02 - J.W. Larson - Corrected usage of -! m_die routines throughout this module. -! 5Aug02 - E. Ong - Modified GeneralGrid usage -! to allow user-defined grid numbering schemes. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_GeneralGrid' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_ - Create an Empty GeneralGrid -! -! !DESCRIPTION: -! The routine {\tt init\_()} creates the storage space for grid point -! coordinates, area/volume weights, and other coordinate data ({\em e.g.}, -! local cell dimensions). These data are referenced by {\tt List} -! components that are also created by this routine (see the documentation -! of the declaration section of this module for more details about setting -! list information). Each of the input {\tt CHARACTER} arguments is a -! colon-delimited string of attribute names, each corrsponding to a -! {\tt List} element of the output {\tt GeneralGrid} argument {\tt GGrid}, -! and are summarized in the table below: -! -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|l|l|l|l|} -!\hline -!{\bf Argument} & {\bf Component of {\tt GGrid}} & {\bf Significance} & {\bf Required?} \\ -!\hline -!{\tt CoordChars} & {\tt GGrid\%coordinate\_list} & Dimension Names & Yes \\ -!\hline -!{\tt CoordSortOrder} & {\tt GGrid\%coordinate\_sort\_order} & Grid Point & No \\ -! & & Sorting Keys & \\ -!\hline -!{\tt WeightChars} & {\tt GGrid\%weight\_list} & Grid Cell & No \\ -! & & Length, Area, and & \\ -! & & Volume Weights & \\ -!\hline -!{\tt OtherChars} & {\tt GGrid\%other\_list} & All Other & No \\ -! & & Real Attributes & \\ -!\hline -!{\tt IndexChars} & {\tt GGrid\%index\_list} & All Other & No \\ -! & & Integer Attributes & \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! The input {\tt INTEGER} argument {\tt lsize} defines the number of grid points -! to be stored in {\tt GGrid}. -! -! If a set of sorting keys is supplied in the argument {\tt CoordSortOrder}, -! the user can control whether the sorting by each key is in descending or -! ascending order by supplying the input {\tt LOGICAL} array {\tt descend(:)}. -! By default, all sorting is in {\em ascending} order for each key if the -! argument {\tt descend} is not provided. -! -! {\bf N.B.}: The output {\tt GeneralGrid} {\tt GGrid} is dynamically -! allocated memory. When one no longer needs {\tt GGrid}, one should -! release this space by invoking {\tt clean()} for the {\tt GeneralGrid}. -! -! !INTERFACE: - - subroutine init_(GGrid, CoordChars, CoordSortOrder, descend, WeightChars, & - OtherChars, IndexChars, lsize ) -! -! !USES: -! - use m_stdio - use m_die - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_nitem => nitem - use m_List, only : List_shared => GetSharedListIndices - use m_List, only : List_append => append - use m_List, only : List_copy => copy - use m_List, only : List_nullify => nullify - use m_List, only : List_clean => clean - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: CoordChars - character(len=*), optional, intent(in) :: CoordSortOrder - character(len=*), optional, intent(in) :: WeightChars - logical, dimension(:), optional, pointer :: descend - character(len=*), optional, intent(in) :: OtherChars - character(len=*), optional, intent(in) :: IndexChars - integer, optional, intent(in) :: lsize - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: GGrid - -! !REVISION HISTORY: -! 25Sep00 - Jay Larson - initial prototype -! 15Jan01 - Jay Larson - modified to fit -! new GeneralGrid definition. -! 19Mar01 - Jay Larson - added OtherChars -! 25Apr01 - Jay Larson - added GlobGridNum -! as a mandatory integer attribute. -! 13Jun01 - Jay Larson - No longer define -! blank List attributes of the GeneralGrid. Previous -! versions of this routine had this feature, and this -! caused problems with the GeneralGrid Send and Receive -! operations on the AIX platform. -! 13Jun01 - R. Jacob - nullify any pointers -! for lists not declared. -! 15Feb02 - Jay Larson - made the input -! argument CoordSortOrder mandatory (rather than -! optional). -! 18Jul02 - E. Ong - replaced this version of -! init with one that calls initl_. -! 5Aug02 - E. Ong - made the input argument -! CoordSortOrder optional to allow user-defined grid -! numbering schemes. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::init_' - - ! List to store real and integer attributes - type(List) :: RAList, IAList - - ! Overlapping index storage arrays: - integer, dimension(:), pointer :: & - CoordListIndices, CoordSortOrderIndices - - ! Temporary vars - integer :: NumShared, nitems, i, l, ierr - - ! Let's begin by nullifying everything: - - call List_nullify(GGrid%coordinate_list) - call List_nullify(GGrid%coordinate_sort_order) - call List_nullify(GGrid%weight_list) - call List_nullify(GGrid%other_list) - call List_nullify(GGrid%index_list) - nullify(GGrid%descend) - - ! Convert the Character arguments to the appropriate - ! GeneralGrid components. - - ! Set up the integer and real attribute lists. - - call List_init(GGrid%coordinate_list,trim(CoordChars)) - call List_copy(RAList,GGrid%coordinate_list) - - if(present(CoordSortOrder)) then - call List_init(GGrid%coordinate_sort_order,trim(CoordSortOrder)) - endif - - if(present(WeightChars)) then - call List_init(GGrid%weight_list,trim(WeightChars)) - call List_append(RAList, GGrid%weight_list) - endif - - if(present(OtherChars)) then - call List_init(GGrid%other_list,trim(OtherChars)) - call List_append(RAList, GGrid%other_list) - endif - - call List_init(IAList,GlobGridNum) - - if(present(IndexChars)) then - call List_init(GGrid%index_list,trim(IndexChars)) - call List_append(IAList, GGrid%index_list) - endif - - ! Check the lists that we've initialized : - - nitems = List_nitem(GGrid%coordinate_list) - - ! Check the number of coordinates - - if(nitems <= 0) then - write(stderr,*) myname_, & - ':: ERROR CoordList is empty!' - call die(myname_,'List_nitem(CoordList) <= 0',nitems) - endif - - ! Check the items in the coordinate list and the - ! coordinate grid sort keys...they should contain - ! the same items. - - if(present(CoordSortOrder)) then - - call List_shared(GGrid%coordinate_list,GGrid%coordinate_sort_order, & - NumShared,CoordListIndices,CoordSortOrderIndices) - - deallocate(CoordListIndices,CoordSortOrderIndices,stat=ierr) - if(ierr/=0) call die(myname_,'deallocate(CoordListIndices..)',ierr) - - if(NumShared /= nitems) then - call die(myname_,'CoordSortOrder must have the same items & - & as CoordList',abs(nitems-NumShared)) - endif - - endif - - ! If the LOGICAL argument descend is present, check the - ! number of entries to ensure they match the grid dimensionality. - ! If descend is not present, assume all coordinate grid point - ! sortings will be in ascending order. - - if(present(descend)) then - - if( ( (.not.associated(descend)) .or. & - (.not.present(CoordSortOrder)) ) .or. & - (size(descend) /= nitems) ) then - - write(stderr,*) myname_, & - ':: ERROR using descend argument, & - &associated(descend) = ', associated(descend), & - ' present(CoordSortOrder) = ', present(CoordSortOrder), & - ' size(descend) = ', size(descend), & - ' List_nitem(CoordSortOrder) = ', & - List_nitem(GGrid%coordinate_sort_order) - call die(myname_, 'ERROR using -descend- argument; & - & see stderr file for details') - endif - - endif - - ! Finally, Initialize GGrid%descend from descend(:). - ! If descend argument is not present, set it to the default .false. - - if(present(CoordSortOrder)) then - - allocate(GGrid%descend(nitems), stat=ierr) - if(ierr /= 0) call die(myname_,"allocate GGrid%descend...",ierr) - - if(present(descend)) then - - do i=1,nitems - GGrid%descend(i) = descend(i) - enddo - - else - - do i=1,nitems - GGrid%descend(i) = .FALSE. - enddo - - endif - - endif - - ! Initialize GGrid%data using IAList, RAList, and lsize (if - ! present). - - l = 0 - if(present(lsize)) l=lsize - - call AttrVect_init(GGrid%data, IAList, RAList, l) - - - ! Deallocate the temporary variables - - call List_clean(IAList) - call List_clean(RAList) - - end subroutine init_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initl_ - Create an Empty GeneralGrid from Lists -! -! !DESCRIPTION: -! The routine {\tt initl\_()} creates the storage space for grid point -! coordinates, area/volume weights, and other coordinate data ({\em e.g.}, -! local cell dimensions). These data are referenced by {\tt List} -! components that are also created by this routine (see the documentation -! of the declaration section of this module for more details about setting -! list information). Each of the input {\tt List} arguments is used -! directly to create the corresponding -! {\tt List} element of the output {\tt GeneralGrid} argument {\tt GGrid}, -! and are summarized in the table below: -! -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|l|l|l|l|} -!\hline -!{\bf Argument} & {\bf Component of {\tt GGrid}} & {\bf Significance} & {\bf Required?} \\ -!\hline -!{\tt CoordList} & {\tt GGrid\%coordinate\_list} & Dimension Names & Yes \\ -!\hline -!{\tt CoordSortOrder} & {\tt GGrid\%coordinate\_sort\_order} & Grid Point & No \\ -! & & Sorting Keys & \\ -!\hline -!{\tt WeightList} & {\tt GGrid\%weight\_list} & Grid Cell & No \\ -! & & Length, Area, and & \\ -! & & Volume Weights & \\ -!\hline -!{\tt OtherList} & {\tt GGrid\%other\_list} & All Other & No \\ -! & & Real Attributes & \\ -!\hline -!{\tt IndexList} & {\tt GGrid\%index\_list} & All Other & No \\ -! & & Integer Attributes & \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! The input {\tt INTEGER} argument {\tt lsize} defines the number of grid points -! to be stored in {\tt GGrid}. -! -! If a set of sorting keys is supplied in the argument {\tt CoordSortOrder}, -! the user can control whether the sorting by each key is in descending or -! ascending order by supplying the input {\tt LOGICAL} array {\tt descend(:)}. -! By default, all sorting is in {\em ascending} order for each key if the -! argument {\tt descend} is not provided. -! -! {\bf N.B.}: The output {\tt GeneralGrid} {\tt GGrid} is dynamically -! allocated memory. When one no longer needs {\tt GGrid}, one should -! release this space by invoking {\tt clean()} for the {\tt GeneralGrid}. -! -! !INTERFACE: - - subroutine initl_(GGrid, CoordList, CoordSortOrder, descend, WeightList, & - OtherList, IndexList, lsize ) -! -! !USES: -! - - use m_stdio - use m_die - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_allocated => allocated - use m_List, only : List_nitem => nitem - use m_List, only : List_shared => GetSharedListIndices - use m_List, only : List_append => append - use m_List, only : List_copy => copy - use m_List, only : List_nullify => nullify - use m_List, only : List_clean => clean - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - - implicit none - -! !INPUT PARAMETERS: -! - Type(List), intent(in) :: CoordList - Type(List), optional, intent(in) :: CoordSortOrder - Type(List), optional, intent(in) :: WeightList - logical, dimension(:), optional, pointer :: descend - Type(List), optional, intent(in) :: OtherList - Type(List), optional, intent(in) :: IndexList - integer, optional, intent(in) :: lsize - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: GGrid - -! !REVISION HISTORY: -! 10May01 - Jay Larson - initial version -! 8Aug01 - E.T. Ong - changed list assignment(=) -! to list copy to avoid compiler bugs with pgf90 -! 17Jul02 - E. Ong - general revision; -! added error checks -! 5Aug02 - E. Ong - made input argument -! CoordSortOrder optional to allow for user-defined -! grid numbering schemes -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initl_' - - ! List to store real and integer attributes - type(List) :: RAList, IAList - - ! Overlapping attribute index storage arrays: - integer, dimension(:), pointer :: & - CoordListIndices, CoordSortOrderIndices - - ! Temporary vars - integer :: NumShared, nitems, i, l, ierr - - ! Let's begin by nullifying everything: - - call List_nullify(GGrid%coordinate_list) - call List_nullify(GGrid%coordinate_sort_order) - call List_nullify(GGrid%weight_list) - call List_nullify(GGrid%other_list) - call List_nullify(GGrid%index_list) - nullify(GGrid%descend) - - ! Check the arguments: - - nitems = List_nitem(CoordList) - - ! Check the number of coordinates - - if(nitems <= 0) then - write(stderr,*) myname_, & - ':: ERROR CoordList is empty!' - call die(myname_,'List_nitem(CoordList) <= 0',nitems) - endif - - ! Check the items in the coordinate list and the - ! coordinate grid sort keys...they should contain - ! the same items. - - if(present(CoordSortOrder)) then - - call List_shared(CoordList,CoordSortOrder,NumShared, & - CoordListIndices,CoordSortOrderIndices) - - deallocate(CoordListIndices,CoordSortOrderIndices,stat=ierr) - if(ierr/=0) call die(myname_,'deallocate(CoordListIndices..)',ierr) - - if(NumShared /= nitems) then - call die(myname_,'CoordSortOrder must have the same items & - & as CoordList',abs(nitems-NumShared)) - endif - - endif - - ! If the LOGICAL argument descend is present, check the - ! number of entries to ensure they match the grid dimensionality. - ! If descend is not present, assume all coordinate grid point - ! sortings will be in ascending order. - - if(present(descend)) then - - if( ( (.not.associated(descend)) .or. & - (.not.present(CoordSortOrder)) ) .or. & - (size(descend) /= nitems) ) then - - write(stderr,*) myname_, & - ':: ERROR using descend argument, & - &associated(descend) = ', associated(descend), & - ' present(CoordSortOrder) = ', present(CoordSortOrder), & - ' size(descend) = ', size(descend), & - ' List_nitem(CoordSortOrder) = ', & - List_nitem(CoordSortOrder) - call die(myname_, 'ERROR using -descend- argument; & - &stderr file for details') - endif - - endif - - ! Initialize GGrid%descend from descend(:), if present. If - ! the argument descend(:) was not passed, set GGrid%descend - ! to the default .false. - - if(present(CoordSortOrder)) then - - allocate(GGrid%descend(nitems), stat=ierr) - if(ierr /= 0) call die(myname_,"allocate GGrid%descend...",ierr) - - if(present(descend)) then - - do i=1,nitems - GGrid%descend(i) = descend(i) - enddo - - else - - do i=1,nitems - GGrid%descend(i) = .FALSE. - enddo - - endif - - endif - - ! Process input lists and create the appropriate GeneralGrid - ! List components - - call List_copy(GGrid%coordinate_list,CoordList) - call List_copy(RAList,CoordList) - - if(present(CoordSortOrder)) then - if(List_allocated(CoordSortOrder)) then - call List_copy(GGrid%coordinate_sort_order,CoordSortOrder) - else - call die(myname_,"Argument CoortSortOrder not allocated") - endif - endif - - ! Concatenate present input Lists to create RAList, and - ! at the same time assign the List components of GGrid - - if(present(WeightList)) then - if(List_allocated(WeightList)) then - call List_copy(GGrid%weight_list,WeightList) - call List_append(RAList, WeightList) - else - call die(myname_,"Argument WeightList not allocated") - endif - endif - - if(present(OtherList)) then - if(List_allocated(OtherList)) then - call List_copy(GGrid%other_list,OtherList) - call List_append(RAList, OtherList) - else - call die(myname_,"Argument OtherList not allocated") - endif - endif - - ! Concatenate present input Lists to create IAList - - call List_init(IAList,GlobGridNum) - - if(present(IndexList)) then - call List_copy(GGrid%index_list,IndexList) - call List_append(IAList, IndexList) - endif - - ! Initialize GGrid%data using IAList, RAList, and lsize (if - ! present). - - l = 0 - if(present(lsize)) l = lsize - - call AttrVect_init(GGrid%data, IAList, RAList, l) - - ! Deallocate the temporary variables - - call List_clean(IAList) - call List_clean(RAList) - - end subroutine initl_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initgg_ - Create a GeneralGrid from Another -! -! !DESCRIPTION: -! The routine {\tt initgg\_()} creates the storage space for grid point -! coordinates, area/volume weights, and other coordinate data ({\em e.g.}, -! nearest-neighbor coordinates). These data are all copied from the -! already initialized input {\tt GeneralGrid} argument {\tt iGGrid}. This -! routine initializes the output {\tt GeneralGrid} argument {\tt oGGrid} -! with the same {\tt List} data as {\tt iGGrid}, but with storage space -! for {\tt lsize} gridpoints. -! -! {\bf N.B.}: Though the attribute lists and gridpoint sorting strategy -! of {\tt iGGrid} is copied to {\tt oGGrid}, the actual values of the -! attributes are not. -! -! {\bf N.B.}: It is assumed that {\tt iGGrid} has been initialized. -! -! {\bf N.B.}: The output {\tt GeneralGrid} {\tt oGGrid} is dynamically -! allocated memory. When one no longer needs {\tt oGGrid}, one should -! release this space by invoking {\tt GeneralGrid\_clean()}. -! -! !INTERFACE: - - subroutine initgg_(oGGrid, iGGrid, lsize) -! -! !USES: -! - use m_stdio - use m_die - - use m_List, only : List - use m_List, only : List_allocated => allocated - use m_List, only : List_copy => copy - use m_List, only : List_nitems => nitem - use m_List, only : List_nullify => nullify - - use m_AttrVect, only: AttrVect - use m_AttrVect, only: AttrVect_init => init - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: iGGrid - integer, optional, intent(in) :: lsize - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: oGGrid - -! !REVISION HISTORY: -! 2May01 - Jay Larson - Initial version. -! 13Jun01 - Jay Larson - Now, undefined List -! components of the GeneralGrid iGGrid are no longer -! copied to oGGrid. -! 8Aug01 - E.T. Ong - changed list assignment(=) -! to list copy to avoid compiler bugs with pgf90 -! 24Jul02 - E.T. Ong - updated this init version -! to correspond with initl_ -! 5Aug02 - E. Ong - made input argument -! CoordSortOrder optional to allow for user-defined -! grid numbering schemes -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initgg_' -! Number of grid points, number of grid dimensions - integer :: n, ncoord, norder -! Loop index and Error Flag - integer :: i, ierr - - ! Start by nullifying everything: - - call List_nullify(oGGrid%coordinate_list) - call List_nullify(oGGrid%coordinate_sort_order) - call List_nullify(oGGrid%weight_list) - call List_nullify(oGGrid%other_list) - call List_nullify(oGGrid%index_list) - nullify(oGGrid%descend) - - ! Brief argument check: - - ncoord = dims_(iGGrid) ! dimensionality of the GeneralGrid - - if(associated(iGGrid%descend)) then - - if(size(iGGrid%descend) /= ncoord) then ! size mismatch - call die(myname_,"size(iGGrid%descend) must equal ncoord, & - & size(iGGrid%descend) = ", size(iGGrid%descend), & - "ncoord = ", ncoord ) - endif - - endif - - ! If iGGrid%descend has been allocated, copy its contents; - ! allocate and fill oGGrid%descend - - if(associated(iGGrid%descend)) then - - allocate(oGGrid%descend(ncoord), stat=ierr) - if(ierr /= 0) then - call die(myname_,"allocate(oGGrid%descend...", ierr) - endif - - do i=1,ncoord - oGGrid%descend(i) = iGGrid%descend(i) - end do - - endif - - ! Copy list data from iGGrid to oGGrid. - - call List_copy(oGGrid%coordinate_list,iGGrid%coordinate_list) - if(List_allocated(iGGrid%coordinate_sort_order)) then - call List_copy(oGGrid%coordinate_sort_order,iGGrid%coordinate_sort_order) - endif - if(List_allocated(iGGrid%weight_list)) then - call List_copy(oGGrid%weight_list,iGGrid%weight_list) - endif - if(List_allocated(iGGrid%other_list)) then - call List_copy(oGGrid%other_list,iGGrid%other_list) - endif - if(List_allocated(iGGrid%index_list)) then - call List_copy(oGGrid%index_list,iGGrid%index_list) - endif - - ! if lsize is present, use it to set n; if not, set n=0 - - n = 0 - if(present(lsize)) n=lsize - - ! Now, initialize oGGrid%data from iGGrid%data, but - ! with length n. - - call AttrVect_init(oGGrid%data, iGGrid%data, n) - - end subroutine initgg_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initCartesianSP_ - Initialize a Cartesian GeneralGrid -! -! !DESCRIPTION: -! The routine {\tt initCartesian\_()} creates the storage space for grid point -! coordinates, area and volume weights, and other coordinate data ({\em e.g.}, -! cell area and volume weights). The names of the Cartesian axes are supplied -! by the user as a colon-delimitted string in the input {\tt CHARACTER} -! argument {\tt CoordChars}. For example, a Cartesian grid for Euclidian -! 3-space would have ${\tt CoordChars} = {\tt 'x:y:z'}$. The user can -! define named real attributes for spatial weighting data in the input -! {\tt CHARACTER} argument {\tt WeightChars}. For example, one could -! define attributes for Euclidean 3-space length elements by setting -! ${\tt WeightChars} = {\tt 'dx:dy:dz'}$. The input {\tt CHARCTER} -! argument {\tt OtherChars} provides space for defining other real -! attributes (again as a colon-delimited string of attribute names). -! One can define integer attributes by supplying a colon-delimitted -! string of names in the input {\tt CHARACTER} argument -! {\tt IndexChars}. For example, on could set aside storage space -! for the {\tt x}-, {\tt y}-, and {\tt z}-indices by setting -! ${\tt IndexChars} = {\tt 'xIndex:yIndex:zIndex'}$. -! -! Once the storage space in {\tt GGrid} is initialized, The gridpoint -! coordinates are evaluated using the input arguments {\tt Dims} (the -! number of points on each coordinate axis) and {\tt AxisData} (the -! coordinate values on all of the points of all of the axes). The user -! presents the axes with each axis stored in a column of {\tt AxisData}, -! and the axes are laid out in the same order as the ordering of the -! axis names in {\tt CoordChars}. The number of points on each axis -! is defined by the entries of the input {\tt INTEGER} array -! {\tt Dims(:)}. Continuing with the Euclidean 3-space example given -! above, setting ${\tt Dims(1:3)} = {\tt (256, 256, 128)}$ will result -! in a Cartesian grid with 256 points in the {\tt x}- and {\tt y}-directions, -! and 128 points in the {\tt z}-direction. Thus the appropriate dimensions -! of {\tt AxisData} are 256 rows (the maximum number of axis points among -! all the axes) by 3 columns (the number of physical dimensions). The -! {\tt x}-axis points are stored in {\tt AxisData(1:256,1)}, the -! {\tt y}-axis points are stored in {\tt AxisData(1:256,2)}, and the -! {\tt z}-axis points are stored in {\tt AxisData(1:128,3)}. -! -! The sorting order of the gridpoints can be either user-defined, or -! set automatically by MCT. If the latter is desired, the user must -! supply the argument {\tt CoordSortOrder}, which defines the -! lexicographic ordering (by coordinate). The entries optional input -! {\tt LOGICAL} array {\tt descend(:)} stipulates whether the ordering -! with respect to the corresponding key in {\tt CoordChars} is to be -! {\em descending}. If {\tt CoordChars} is supplied, but {\tt descend(:)} -! is not, the gridpoint information is placed in {\em ascending} order -! for each key. Returning to our Euclidian 3-space example, a choice of -! ${\tt CoordSortOrder} = {\tt y:x:z}$ and ${\tt descend(1:3)} = -! ({\tt .TRUE.}, {\tt .FALSE.}, {\tt .FALSE.})$ will result in the entries of -! {\tt GGrid} being orderd lexicographically by {\tt y} (in descending -! order), {\tt x} (in ascending order), and {\tt z} (in ascending order). -! Regardless of the gridpoint sorting strategy, MCT will number each of -! the gridpoints in {\tt GGrid}, storing this information in the integer -! attribute named {\tt 'GlobGridNum'}. -! -! !INTERFACE: - - subroutine initCartesianSP_(GGrid, CoordChars, CoordSortOrder, descend, & - WeightChars, OtherChars, IndexChars, Dims, & - AxisData) -! -! !USES: -! - use m_stdio - use m_die - use m_realkinds, only : SP - - use m_String, only : String - use m_String, only : String_ToChar => ToChar - use m_String, only : String_clean => clean - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_nullify => nullify - use m_List, only : List_append => append - use m_List, only : List_nitem => nitem - use m_List, only : List_get => get - use m_List, only : List_shared => GetSharedListIndices - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: CoordChars - character(len=*), optional, intent(in) :: CoordSortOrder - character(len=*), optional, intent(in) :: WeightChars - logical, dimension(:), optional, pointer :: descend - character(len=*), optional, intent(in) :: OtherChars - character(len=*), optional, intent(in) :: IndexChars - integer, dimension(:), pointer :: Dims - real(SP), dimension(:,:), pointer :: AxisData - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: GGrid - -! !REVISION HISTORY: -! 7Jun01 - Jay Larson - API Specification. -! 12Aug02 - Jay Larson - Implementation. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initCartesianSP_' - - type(List) :: IAList, RAList - type(String) :: AxisName - integer, dimension(:), pointer :: & - CoordListIndices, CoordSortOrderIndices - integer :: DimMax, NumDims, NumGridPoints, NumShared - integer :: ierr, iAxis, i, j, k, n, nCycles, nRepeat - integer :: index - - ! Nullify GeneralGrid components - - call List_nullify(GGrid%coordinate_list) - call List_nullify(GGrid%coordinate_sort_order) - call List_nullify(GGrid%weight_list) - call List_nullify(GGrid%other_list) - call List_nullify(GGrid%index_list) - nullify(GGrid%descend) - - ! Sanity check on axis definition arguments: - - ! Ensure each axis has a positive number of points, and - ! determine DimMax, the maximum entry in Dims(:). - - DimMax = 1 - do i=1,size(Dims) - if(Dims(i) > DimMax) DimMax = Dims(i) - if(Dims(i) <= 0) then - write(stderr,'(2a,i8,a,i8)') myname_, & - ':: FATAL--illegal number of axis points in Dims(',i,') = ', & - Dims(i) - call die(myname_) - endif - end do - - ! Are the definitions of Dims(:) and AxisData(:,:) compatible? - ! The number of elements in Dims(:) should match the number of - ! columns in AxisData(:,:), and the maximum value stored in Dims(:) - ! (DimMax determined above in this routine) must not exceed the - ! number of rows in AxisData(:,:). - - if(size(AxisData,2) /= size(Dims)) then - write(stderr,'(4a,i8,a,i8)') myname_, & - ':: FATAL-- The number of axes (elements) referenced in Dims(:) ', & - 'does not equal the number of columns in AxisData(:,:). ', & - 'size(Dims) = ',size(Dims),' size(AxisData,2) = ',size(AxisData,2) - call die(myname_) - endif - - if(size(AxisData,1) < DimMax) then - write(stderr,'(4a,i8,a,i8)') myname_, & - ':: FATAL-- Maximum number of axis points max(Dims) is ', & - 'greater than the number of rows in AxisData(:,:). ', & - 'max(Dims) = ',DimMax,' size(AxisData,1) = ',size(AxisData,1) - call die(myname_) - endif - - ! If the LOGICAL descend(:) flags for sorting are present, - ! make sure that (1) descend is associated, and - ! (2) CoordSortOrder is also present, and - ! (3) The size of descend(:) matches the size of Dims(:), - ! both of which correspond to the number of axes on the - ! Cartesian Grid. - - if(present(descend)) then - - if(.not.associated(descend)) then - call die(myname_,'descend argument must be associated') - endif - - if(.not. present(CoordSortOrder)) then - write(stderr,'(4a)') myname_, & - ':: FATAL -- Invocation with the argument descend(:) present ', & - 'requires the presence of the argument CoordSortOrder, ', & - 'which was not provided.' - call die(myname_, 'Argument CoordSortOrder was not provided') - endif - - if(size(descend) /= size(Dims)) then - write(stderr,'(4a,i8,a,i8)') myname_, & - ':: FATAL-- The sizes of the arrays descend(:) and Dims(:) ', & - 'must match (they both must equal the number of dimensions ', & - 'of the Cartesian Grid). size(Dims) = ',size(Dims), & - ' size(descend) = ',size(descend) - call die(myname_,'size of and arguments must match') - endif - - endif - - ! Initialize GGrid%coordinate_list and use the number of items - ! in it to set the number of dimensions of the Cartesian - ! Grid (NumDims): - - call List_init(GGrid%coordinate_list, CoordChars) - - NumDims = List_nitem(GGrid%coordinate_list) - - ! Check the number of arguments - - if(NumDims <= 0) then - write(stderr,*) myname_, & - ':: ERROR CoordList is empty!' - call die(myname_,'List_nitem(CoordList) <= 0',NumDims) - endif - - ! Do the number of coordinate names specified match the number - ! of coordinate axes (i.e., the number of columns in AxisData(:,:))? - - if(NumDims /= size(AxisData,2)) then - write(stderr,'(6a,i8,a,i8)') myname_, & - ':: FATAL-- Number of axes specified in argument CoordChars ', & - 'does not equal the number of axes stored in AxisData(:,:). ', & - 'CoordChars = ', CoordChars, & - 'Number of axes = ',NumDims, & - ' size(AxisData,2) = ',size(AxisData,2) - call die(myname_) - endif - - ! End of argument sanity checks. - - ! Create other List components of GGrid and build REAL - ! and INTEGER attribute lists for the AttrVect GGrid%data - - ! Start off with things *guaranteed* to be in IAList and RAList. - ! The variable GlobGridNum is a CHARACTER parameter inherited - ! from the declaration section of this module. - - call List_init(IAList, GlobGridNum) - call List_init(RAList, CoordChars) - - if(present(CoordSortOrder)) then - - call List_init(GGrid%coordinate_sort_order, CoordSortOrder) - - ! Check the items in the coordinate list and the - ! coordinate grid sort keys...they should contain - ! the same items. - - call List_shared(GGrid%coordinate_list,GGrid%coordinate_sort_order, & - NumShared,CoordListIndices,CoordSortOrderIndices) - - deallocate(CoordListIndices,CoordSortOrderIndices,stat=ierr) - if(ierr/=0) call die(myname_,'deallocate(CoordListIndices..)',ierr) - - if(NumShared /= NumDims) then - call die(myname_,'CoordSortOrder must have the same items & - & as CoordList',abs(NumDims-NumShared)) - endif - - endif - - if(present(WeightChars)) then - call List_init(GGrid%weight_list, WeightChars) - call List_append(RAList, GGrid%weight_list) - endif - - if(present(OtherChars)) then - call List_init(GGrid%other_list, OtherChars) - call List_append(RAList, GGrid%other_list) - endif - - if(present(IndexChars)) then - call List_init(GGrid%index_list, IndexChars) - call List_append(IAList, GGrid%index_list) - endif - - ! Finally, Initialize GGrid%descend from descend(:). - ! If descend argument is not present, set it to the default .false. - - if(present(CoordSortOrder)) then - - allocate(GGrid%descend(NumDims), stat=ierr) - if(ierr /= 0) call die(myname_,"allocate GGrid%descend...",ierr) - - if(present(descend)) then - do n=1,NumDims - GGrid%descend(n) = descend(n) - end do - else - do n=1,NumDims - GGrid%descend(n) = .FALSE. - end do - endif - - endif ! if(present(CoordSortOrder))... - - ! Compute the total number of grid points in the GeneralGrid. - ! This is merely the product of the elements of Dims(:) - - NumGridPoints = 1 - do i=1,NumDims - NumGridPoints = NumGridPoints * Dims(i) - end do - - ! Now we are prepared to create GGrid%data: - - call AttrVect_init(GGrid%data, IAList, RAList, NumGridPoints) - call AttrVect_zero(GGrid%data) - - ! Now, store Cartesian gridpoint data, in the order - ! defined by how the user laid out AxisData(:,:) - - do n=1,NumDims - - ! Retrieve first coordinate axis name from GGrid%coordinate_list - ! (as a String) - call List_get(AxisName, n, GGrid%coordinate_list) - - ! Index this real attribute of GGrid - iAxis = indexRA_(GGrid, String_ToChar(AxisName)) - - if(iAxis <= 0) then - write(stderr,'(4a)') myname_, & - ':: REAL Attribute "',String_ToChar(AxisName),'" not found.' - call die(myname_) - endif - - ! Now, clear the String AxisName for use in the next - ! cycle of this loop: - - call String_clean(AxisName) - - ! Compute the number of times we cycle through the axis - ! values (nCycles), and the number of times each axis - ! value is repeated in each cycle (nRepeat) - - nCycles = 1 - if(n > 1) then - do i=1,n-1 - nCycles = nCycles * Dims(i) - end do - endif - - nRepeat = 1 - if(n < NumDims) then - do i=n+1,NumDims - nRepeat = nRepeat * Dims(i) - end do - endif - - ! Loop over the number of cycles for which we run through - ! all the axis points. Within each cycle, loop over all - ! of the axis points, repeating each value nRepeat times. - ! This produces a set of grid entries that are in - ! lexicographic order with respect to how the axes are - ! presented to this routine. - - index = 1 - do i=1,nCycles - do j=1,Dims(n) - do k=1,nRepeat - GGrid%data%rAttr(iAxis,index) = AxisData(j,n) - index = index+1 - end do ! do k=1,nRepeat - end do ! do j=1,Dims(n) - end do ! do i=1,nCycles - - end do ! do n=1,NumDims... - - ! If the argument CoordSortOrder was supplied, the entries - ! of GGrid will be sorted/permuted with this lexicographic - ! ordering, and the values of the GGrid INTEGER attribute - ! GlobGridNum will be numbered to reflect this new ordering - ! scheme. - - index = indexIA_(GGrid, GlobGridNum) - - if(present(CoordSortOrder)) then ! Sort permute entries before - ! numbering them - - call SortPermute_(GGrid) ! Sort / permute - - endif ! if(present(CoordSortOrder))... - - ! Number the gridpoints based on the AttrVect point index - ! (i.e., the second index in GGrid%data%iAttr) - - do i=1, lsize_(GGrid) - GGrid%data%iAttr(index,i) = i - end do - - ! Finally, clean up intermediate Lists - - call List_clean(IAList) - call List_clean(RAList) - - end subroutine initCartesianSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: initCartesianDP_ - Initialize a Cartesian GeneralGrid -! -! !DESCRIPTION: -! Double Precision version of initCartesianSP_ -! -! !INTERFACE: - - subroutine initCartesianDP_(GGrid, CoordChars, CoordSortOrder, descend, & - WeightChars, OtherChars, IndexChars, Dims, & - AxisData) -! -! !USES: -! - use m_stdio - use m_die - use m_realkinds, only : DP - - use m_String, only : String - use m_String, only : String_ToChar => ToChar - use m_String, only : String_clean => clean - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_nullify => nullify - use m_List, only : List_append => append - use m_List, only : List_nitem => nitem - use m_List, only : List_get => get - use m_List, only : List_shared => GetSharedListIndices - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: CoordChars - character(len=*), optional, intent(in) :: CoordSortOrder - character(len=*), optional, intent(in) :: WeightChars - logical, dimension(:), optional, pointer :: descend - character(len=*), optional, intent(in) :: OtherChars - character(len=*), optional, intent(in) :: IndexChars - integer, dimension(:), pointer :: Dims - real(DP), dimension(:,:), pointer :: AxisData - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: GGrid - -! !REVISION HISTORY: -! 7Jun01 - Jay Larson - API Specification. -! 12Aug02 - Jay Larson - Implementation. -! ______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initCartesianDP_' - - type(List) :: IAList, RAList - type(String) :: AxisName - integer, dimension(:), pointer :: & - CoordListIndices, CoordSortOrderIndices - integer :: DimMax, NumDims, NumGridPoints, NumShared - integer :: ierr, iAxis, i, j, k, n, nCycles, nRepeat - integer :: index - - ! Nullify GeneralGrid components - - call List_nullify(GGrid%coordinate_list) - call List_nullify(GGrid%coordinate_sort_order) - call List_nullify(GGrid%weight_list) - call List_nullify(GGrid%other_list) - call List_nullify(GGrid%index_list) - nullify(GGrid%descend) - - ! Sanity check on axis definition arguments: - - ! Ensure each axis has a positive number of points, and - ! determine DimMax, the maximum entry in Dims(:). - - DimMax = 1 - do i=1,size(Dims) - if(Dims(i) > DimMax) DimMax = Dims(i) - if(Dims(i) <= 0) then - write(stderr,'(2a,i8,a,i8)') myname_, & - ':: FATAL--illegal number of axis points in Dims(',i,') = ', & - Dims(i) - call die(myname_) - endif - end do - - ! Are the definitions of Dims(:) and AxisData(:,:) compatible? - ! The number of elements in Dims(:) should match the number of - ! columns in AxisData(:,:), and the maximum value stored in Dims(:) - ! (DimMax determined above in this routine) must not exceed the - ! number of rows in AxisData(:,:). - - if(size(AxisData,2) /= size(Dims)) then - write(stderr,'(4a,i8,a,i8)') myname_, & - ':: FATAL-- The number of axes (elements) referenced in Dims(:) ', & - 'does not equal the number of columns in AxisData(:,:). ', & - 'size(Dims) = ',size(Dims),' size(AxisData,2) = ',size(AxisData,2) - call die(myname_) - endif - - if(size(AxisData,1) < DimMax) then - write(stderr,'(4a,i8,a,i8)') myname_, & - ':: FATAL-- Maximum number of axis points max(Dims) is ', & - 'greater than the number of rows in AxisData(:,:). ', & - 'max(Dims) = ',DimMax,' size(AxisData,1) = ',size(AxisData,1) - call die(myname_) - endif - - ! If the LOGICAL descend(:) flags for sorting are present, - ! make sure that (1) descend is associated, and - ! (2) CoordSortOrder is also present, and - ! (3) The size of descend(:) matches the size of Dims(:), - ! both of which correspond to the number of axes on the - ! Cartesian Grid. - - if(present(descend)) then - - if(.not.associated(descend)) then - call die(myname_,'descend argument must be associated') - endif - - if(.not. present(CoordSortOrder)) then - write(stderr,'(4a)') myname_, & - ':: FATAL -- Invocation with the argument descend(:) present ', & - 'requires the presence of the argument CoordSortOrder, ', & - 'which was not provided.' - call die(myname_, 'Argument CoordSortOrder was not provided') - endif - - if(size(descend) /= size(Dims)) then - write(stderr,'(4a,i8,a,i8)') myname_, & - ':: FATAL-- The sizes of the arrays descend(:) and Dims(:) ', & - 'must match (they both must equal the number of dimensions ', & - 'of the Cartesian Grid). size(Dims) = ',size(Dims), & - ' size(descend) = ',size(descend) - call die(myname_,'size of and arguments must match') - endif - - endif - - ! Initialize GGrid%coordinate_list and use the number of items - ! in it to set the number of dimensions of the Cartesian - ! Grid (NumDims): - - call List_init(GGrid%coordinate_list, CoordChars) - - NumDims = List_nitem(GGrid%coordinate_list) - - ! Check the number of arguments - - if(NumDims <= 0) then - write(stderr,*) myname_, & - ':: ERROR CoordList is empty!' - call die(myname_,'List_nitem(CoordList) <= 0',NumDims) - endif - - ! Do the number of coordinate names specified match the number - ! of coordinate axes (i.e., the number of columns in AxisData(:,:))? - - if(NumDims /= size(AxisData,2)) then - write(stderr,'(6a,i8,a,i8)') myname_, & - ':: FATAL-- Number of axes specified in argument CoordChars ', & - 'does not equal the number of axes stored in AxisData(:,:). ', & - 'CoordChars = ', CoordChars, & - 'Number of axes = ',NumDims, & - ' size(AxisData,2) = ',size(AxisData,2) - call die(myname_) - endif - - ! End of argument sanity checks. - - ! Create other List components of GGrid and build REAL - ! and INTEGER attribute lists for the AttrVect GGrid%data - - ! Start off with things *guaranteed* to be in IAList and RAList. - ! The variable GlobGridNum is a CHARACTER parameter inherited - ! from the declaration section of this module. - - call List_init(IAList, GlobGridNum) - call List_init(RAList, CoordChars) - - if(present(CoordSortOrder)) then - - call List_init(GGrid%coordinate_sort_order, CoordSortOrder) - - ! Check the items in the coordinate list and the - ! coordinate grid sort keys...they should contain - ! the same items. - - call List_shared(GGrid%coordinate_list,GGrid%coordinate_sort_order, & - NumShared,CoordListIndices,CoordSortOrderIndices) - - deallocate(CoordListIndices,CoordSortOrderIndices,stat=ierr) - if(ierr/=0) call die(myname_,'deallocate(CoordListIndices..)',ierr) - - if(NumShared /= NumDims) then - call die(myname_,'CoordSortOrder must have the same items & - & as CoordList',abs(NumDims-NumShared)) - endif - - endif - - if(present(WeightChars)) then - call List_init(GGrid%weight_list, WeightChars) - call List_append(RAList, GGrid%weight_list) - endif - - if(present(OtherChars)) then - call List_init(GGrid%other_list, OtherChars) - call List_append(RAList, GGrid%other_list) - endif - - if(present(IndexChars)) then - call List_init(GGrid%index_list, IndexChars) - call List_append(IAList, GGrid%index_list) - endif - - ! Finally, Initialize GGrid%descend from descend(:). - ! If descend argument is not present, set it to the default .false. - - if(present(CoordSortOrder)) then - - allocate(GGrid%descend(NumDims), stat=ierr) - if(ierr /= 0) call die(myname_,"allocate GGrid%descend...",ierr) - - if(present(descend)) then - do n=1,NumDims - GGrid%descend(n) = descend(n) - end do - else - do n=1,NumDims - GGrid%descend(n) = .FALSE. - end do - endif - - endif ! if(present(CoordSortOrder))... - - ! Compute the total number of grid points in the GeneralGrid. - ! This is merely the product of the elements of Dims(:) - - NumGridPoints = 1 - do i=1,NumDims - NumGridPoints = NumGridPoints * Dims(i) - end do - - ! Now we are prepared to create GGrid%data: - - call AttrVect_init(GGrid%data, IAList, RAList, NumGridPoints) - call AttrVect_zero(GGrid%data) - - ! Now, store Cartesian gridpoint data, in the order - ! defined by how the user laid out AxisData(:,:) - - do n=1,NumDims - - ! Retrieve first coordinate axis name from GGrid%coordinate_list - ! (as a String) - call List_get(AxisName, n, GGrid%coordinate_list) - - ! Index this real attribute of GGrid - iAxis = indexRA_(GGrid, String_ToChar(AxisName)) - - if(iAxis <= 0) then - write(stderr,'(4a)') myname_, & - ':: REAL Attribute "',String_ToChar(AxisName),'" not found.' - call die(myname_) - endif - - ! Now, clear the String AxisName for use in the next - ! cycle of this loop: - - call String_clean(AxisName) - - ! Compute the number of times we cycle through the axis - ! values (nCycles), and the number of times each axis - ! value is repeated in each cycle (nRepeat) - - nCycles = 1 - if(n > 1) then - do i=1,n-1 - nCycles = nCycles * Dims(i) - end do - endif - - nRepeat = 1 - if(n < NumDims) then - do i=n+1,NumDims - nRepeat = nRepeat * Dims(i) - end do - endif - - ! Loop over the number of cycles for which we run through - ! all the axis points. Within each cycle, loop over all - ! of the axis points, repeating each value nRepeat times. - ! This produces a set of grid entries that are in - ! lexicographic order with respect to how the axes are - ! presented to this routine. - - index = 1 - do i=1,nCycles - do j=1,Dims(n) - do k=1,nRepeat - GGrid%data%rAttr(iAxis,index) = AxisData(j,n) - index = index+1 - end do ! do k=1,nRepeat - end do ! do j=1,Dims(n) - end do ! do i=1,nCycles - - end do ! do n=1,NumDims... - - ! If the argument CoordSortOrder was supplied, the entries - ! of GGrid will be sorted/permuted with this lexicographic - ! ordering, and the values of the GGrid INTEGER attribute - ! GlobGridNum will be numbered to reflect this new ordering - ! scheme. - - index = indexIA_(GGrid, GlobGridNum) - - if(present(CoordSortOrder)) then ! Sort permute entries before - ! numbering them - - call SortPermute_(GGrid) ! Sort / permute - - endif ! if(present(CoordSortOrder))... - - ! Number the gridpoints based on the AttrVect point index - ! (i.e., the second index in GGrid%data%iAttr) - - do i=1, lsize_(GGrid) - GGrid%data%iAttr(index,i) = i - end do - - ! Finally, clean up intermediate Lists - - call List_clean(IAList) - call List_clean(RAList) - - end subroutine initCartesianDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initUnstructuredSP_ - Initialize an Unstructured GeneralGrid -! -! !DESCRIPTION: -! This routine creates the storage space for grid point -! coordinates, area/volume weights, and other coordinate data ({\em e.g.}, -! local cell dimensions), and fills in user-supplied values for the grid -! point coordinates. These data are referenced by {\tt List} -! components that are also created by this routine (see the documentation -! of the declaration section of this module for more details about setting -! list information). Each of the input {\tt CHARACTER} arguments is a -! colon-delimited string of attribute names, each corrsponding to a -! {\tt List} element of the output {\tt GeneralGrid} argument {\tt GGrid}, -! and are summarized in the table below: -! -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|l|l|l|l|} -!\hline -!{\bf Argument} & {\bf Component of {\tt GGrid}} & {\bf Significance} & {\bf Required?} \\ -!\hline -!{\tt CoordChars} & {\tt GGrid\%coordinate\_list} & Dimension Names & Yes \\ -!\hline -!{\tt CoordSortOrder} & {\tt GGrid\%coordinate\_sort\_order} & Grid Point & No \\ -! & & Sorting Keys & \\ -!\hline -!{\tt WeightChars} & {\tt GGrid\%weight\_list} & Grid Cell & No \\ -! & & Length, Area, and & \\ -! & & Volume Weights & \\ -!\hline -!{\tt OtherChars} & {\tt GGrid\%other\_list} & All Other & No \\ -! & & Real Attributes & \\ -!\hline -!{\tt IndexChars} & {\tt GGrid\%index\_list} & All Other & No \\ -! & & Integer Attributes & \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! The number of physical dimensions of the grid is set by the user in -! the input {\tt INTEGER} argument {\tt nDims}, and the number of grid -! points stored in {\tt GGrid} is set using the input {\tt INTEGER} -! argument {\tt nPoints}. The grid point coordinates are input via the -! {\tt REAL} array {\tt PointData(:)}. The number of entries in -! {\tt PointData} must equal the product of {\tt nDims} and {\tt nPoints}. -! The grid points are grouped in {\tt nPoints} consecutive groups of -! {\tt nDims} entries, with the coordinate values for each point set in -! the same order as the dimensions are named in the list {\tt CoordChars}. -! -! If a set of sorting keys is supplied in the argument {\tt CoordSortOrder}, -! the user can control whether the sorting by each key is in descending or -! ascending order by supplying the input {\tt LOGICAL} array {\tt descend(:)}. -! By default, all sorting is in {\em ascending} order for each key if the -! argument {\tt descend} is not provided. -! -! {\bf N.B.}: The output {\tt GeneralGrid} {\tt GGrid} is dynamically -! allocated memory. When one no longer needs {\tt GGrid}, one should -! release this space by invoking {\tt clean()} for the {\tt GeneralGrid}. -! -! !INTERFACE: - - subroutine initUnstructuredSP_(GGrid, CoordChars, CoordSortOrder, descend, & - WeightChars, OtherChars, IndexChars, nDims, & - nPoints, PointData) -! -! !USES: -! - use m_stdio - use m_die - use m_realkinds,only : SP - - use m_String, only : String, char - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_nitem => nitem - use m_List, only : List_nullify => nullify - use m_List, only : List_copy => copy - use m_List, only : List_append => append - use m_List, only : List_shared => GetSharedListIndices - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: CoordChars - character(len=*), optional, intent(in) :: CoordSortOrder - character(len=*), optional, intent(in) :: WeightChars - logical, dimension(:), optional, pointer :: descend - character(len=*), optional, intent(in) :: OtherChars - character(len=*), optional, intent(in) :: IndexChars - integer, intent(in) :: nDims - integer, intent(in) :: nPoints - real(SP), dimension(:), pointer :: PointData - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: GGrid - -! !REVISION HISTORY: -! 7Jun01 - Jay Larson - API specification. -! 22Aug02 - J. Larson - Implementation. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initUnstructuredSP_' - - integer :: i, ierr, index, n, nOffSet, NumShared - integer, dimension(:), pointer :: & - CoordListIndices, CoordSortOrderIndices - type(List) :: IAList, RAList - - ! Nullify all GeneralGrid components - - call List_nullify(GGrid%coordinate_list) - call List_nullify(GGrid%coordinate_sort_order) - call List_nullify(GGrid%weight_list) - call List_nullify(GGrid%other_list) - call List_nullify(GGrid%index_list) - nullify(GGrid%descend) - - ! Sanity checks on input arguments: - - ! If the LOGICAL descend(:) flags for sorting are present, - ! make sure that (1) it is associated, - ! (2) CoordSortOrder is also present, and - ! (3) The size of descend(:) matches the size of Dims(:), - ! both of which correspond to the number of axes on the - ! Cartesian Grid. - - if(present(descend)) then - - if(.not.associated(descend)) then - call die(myname_,'descend argument must be associated') - endif - - if(.not. present(CoordSortOrder)) then - write(stderr,'(4a)') myname_, & - ':: FATAL -- Invocation with the argument descend(:) present ', & - 'requires the presence of the argument CoordSortOrder, ', & - 'which was not provided.' - call die(myname_,'Argument CoordSortOrder was not provided') - endif - - if(present(descend)) then - if(size(descend) /= nDims) then - write(stderr,'(4a,i8,a,i8)') myname_, & - ':: FATAL-- The size of the array descend(:) and nDims ', & - 'must be equal (they both must equal the number of dimensions ', & - 'of the unstructured Grid). nDims = ',nDims, & - ' size(descend) = ',size(descend) - call die(myname_,'size(descend)/=nDims') - endif - endif - - endif - - ! Initialize GGrid%coordinate_list and comparethe number of items - ! to the number of dimensions of the unstructured nDims: - - call List_init(GGrid%coordinate_list, CoordChars) - - ! Check the coordinate_list - - if(nDims /= List_nitem(GGrid%coordinate_list)) then - write(stderr,'(4a,i8,3a,i8)') myname_, & - ':: FATAL-- The number of coordinate names supplied in the ', & - 'argument CoordChars must equal the number of dimensions ', & - 'specified by the argument nDims. nDims = ',nDims, & - ' CoordChars = ',CoordChars, ' number of dimensions in CoordChars = ', & - List_nitem(GGrid%coordinate_list) - call die(myname_) - endif - - if(nDims <= 0) then - write(stderr,*) myname_, ':: ERROR nDims=0!' - call die(myname_,'nDims <= 0',nDims) - endif - - ! PointData is a one-dimensional array containing all the gridpoint - ! coordinates. As such, its size must equal nDims * nPoints. True? - - if(size(PointData) /= nDims * nPoints) then - write(stderr,'(3a,3(a,i8))') myname_, & - ':: FATAL-- The length of the array PointData(:) must match ', & - 'the product of the input arguments nDims and nPoints. ', & - 'nDims = ',nDims, ' nPoints = ',nPoints,& - ' size(PointData) = ',size(PointData) - call die(myname_) - endif - - ! End of input argument sanity checks. - - ! Create other List components of GGrid and build REAL - ! and INTEGER attribute lists for the AttrVect GGrid%data - - ! Start off with things *guaranteed* to be in IAList and RAList. - ! The variable GlobGridNum is a CHARACTER parameter inherited - ! from the declaration section of this module. - - call List_init(IAList, GlobGridNum) - call List_init(RAList, CoordChars) - - if(present(CoordSortOrder)) then - - call List_init(GGrid%coordinate_sort_order, CoordSortOrder) - - call List_shared(GGrid%coordinate_list,GGrid%coordinate_sort_order, & - NumShared,CoordListIndices,CoordSortOrderIndices) - - deallocate(CoordListIndices,CoordSortOrderIndices,stat=ierr) - if(ierr/=0) call die(myname_,'deallocate(CoordListIndices..)',ierr) - - if(NumShared /= nDims) then - call die(myname_,'CoordSortOrder must have the same items & - & as CoordList',abs(nDims-NumShared)) - endif - - endif - - if(present(WeightChars)) then - call List_init(GGrid%weight_list, WeightChars) - call List_append(RAList, GGrid%weight_list) - endif - - if(present(OtherChars)) then - call List_init(GGrid%other_list, OtherChars) - call List_append(RAList, GGrid%other_list) - endif - - if(present(IndexChars)) then - call List_init(GGrid%index_list, IndexChars) - call List_append(IAList, GGrid%index_list) - endif - - ! Initialize GGrid%descend from descend(:). - ! If descend argument is not present, set it to the default .false. - - if(present(CoordSortOrder)) then - - allocate(GGrid%descend(nDims), stat=ierr) - if(ierr /= 0) call die(myname_,"allocate GGrid%descend...",ierr) - - if(present(descend)) then - do n=1,nDims - GGrid%descend(n) = descend(n) - end do - else - do n=1,nDims - GGrid%descend(n) = .FALSE. - end do - endif - - endif ! if(present(CoordSortOrder))... - - ! Create Grid attribute data storage AttrVect GGrid%data: - - call AttrVect_init(GGrid%data, IAList, RAList, nPoints) - call AttrVect_zero(GGrid%data) - - ! Load up gridpoint coordinate data into GGrid%data. - ! Given how we've set up the real attributes of GGrid%data, - ! we have guaranteed the first nDims real attributes are - ! the gridpoint coordinates. - - do n=1,nPoints - nOffSet = (n-1) * nDims - do i=1,nDims - GGrid%data%rAttr(i,n) = PointData(nOffset + i) - end do - end do - - ! If the argument CoordSortOrder was supplied, the entries - ! of GGrid will be sorted/permuted with this lexicographic - ! ordering, and the values of the GGrid INTEGER attribute - ! GlobGridNum will be numbered to reflect this new ordering - ! scheme. - - index = indexIA_(GGrid, GlobGridNum) - - if(present(CoordSortOrder)) then ! Sort permute entries before - ! numbering them - - call SortPermute_(GGrid) ! Sort / permute - - endif ! if(present(CoordSortOrder))... - - ! Number the gridpoints based on the AttrVect point index - ! (i.e., the second index in GGrid%data%iAttr) - - do i=1, lsize_(GGrid) - GGrid%data%iAttr(index,i) = i - end do - - ! Clean up temporary allocated structures: - - call List_clean(IAList) - call List_clean(RAList) - - end subroutine initUnstructuredSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: initUnstructuredDP_ - Initialize an Unstructured GeneralGrid -! -! !DESCRIPTION: -! Double precision version of initUnstructuredSP_ -! -! !INTERFACE: - - subroutine initUnstructuredDP_(GGrid, CoordChars, CoordSortOrder, descend, & - WeightChars, OtherChars, IndexChars, nDims, & - nPoints, PointData) -! -! !USES: -! - use m_stdio - use m_die - use m_realkinds,only : DP - - use m_String, only : String, char - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_nitem => nitem - use m_List, only : List_nullify => nullify - use m_List, only : List_copy => copy - use m_List, only : List_append => append - use m_List, only : List_shared => GetSharedListIndices - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: CoordChars - character(len=*), optional, intent(in) :: CoordSortOrder - character(len=*), optional, intent(in) :: WeightChars - logical, dimension(:), optional, pointer :: descend - character(len=*), optional, intent(in) :: OtherChars - character(len=*), optional, intent(in) :: IndexChars - integer, intent(in) :: nDims - integer, intent(in) :: nPoints - real(DP), dimension(:), pointer :: PointData - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: GGrid - -! !REVISION HISTORY: -! 7Jun01 - Jay Larson - API specification. -! 22Aug02 - J. Larson - Implementation. -! ______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initUnstructuredDP_' - - integer :: i, ierr, index, n, nOffSet, NumShared - integer, dimension(:), pointer :: & - CoordListIndices, CoordSortOrderIndices - type(List) :: IAList, RAList - - ! Nullify all GeneralGrid components - - call List_nullify(GGrid%coordinate_list) - call List_nullify(GGrid%coordinate_sort_order) - call List_nullify(GGrid%weight_list) - call List_nullify(GGrid%other_list) - call List_nullify(GGrid%index_list) - nullify(GGrid%descend) - - ! Sanity checks on input arguments: - - ! If the LOGICAL descend(:) flags for sorting are present, - ! make sure that (1) it is associated, - ! (2) CoordSortOrder is also present, and - ! (3) The size of descend(:) matches the size of Dims(:), - ! both of which correspond to the number of axes on the - ! Cartesian Grid. - - if(present(descend)) then - - if(.not.associated(descend)) then - call die(myname_,'descend argument must be associated') - endif - - if(.not. present(CoordSortOrder)) then - write(stderr,'(4a)') myname_, & - ':: FATAL -- Invocation with the argument descend(:) present ', & - 'requires the presence of the argument CoordSortOrder, ', & - 'which was not provided.' - call die(myname_,'Argument CoordSortOrder was not provided') - endif - - if(present(descend)) then - if(size(descend) /= nDims) then - write(stderr,'(4a,i8,a,i8)') myname_, & - ':: FATAL-- The size of the array descend(:) and nDims ', & - 'must be equal (they both must equal the number of dimensions ', & - 'of the unstructured Grid). nDims = ',nDims, & - ' size(descend) = ',size(descend) - call die(myname_,'size(descend)/=nDims') - endif - endif - - endif - - ! Initialize GGrid%coordinate_list and comparethe number of items - ! to the number of dimensions of the unstructured nDims: - - call List_init(GGrid%coordinate_list, CoordChars) - - ! Check the coordinate_list - - if(nDims /= List_nitem(GGrid%coordinate_list)) then - write(stderr,'(4a,i8,3a,i8)') myname_, & - ':: FATAL-- The number of coordinate names supplied in the ', & - 'argument CoordChars must equal the number of dimensions ', & - 'specified by the argument nDims. nDims = ',nDims, & - ' CoordChars = ',CoordChars, ' number of dimensions in CoordChars = ', & - List_nitem(GGrid%coordinate_list) - call die(myname_) - endif - - if(nDims <= 0) then - write(stderr,*) myname_, ':: ERROR nDims=0!' - call die(myname_,'nDims <= 0',nDims) - endif - - ! PointData is a one-dimensional array containing all the gridpoint - ! coordinates. As such, its size must equal nDims * nPoints. True? - - if(size(PointData) /= nDims * nPoints) then - write(stderr,'(3a,3(a,i8))') myname_, & - ':: FATAL-- The length of the array PointData(:) must match ', & - 'the product of the input arguments nDims and nPoints. ', & - 'nDims = ',nDims, ' nPoints = ',nPoints,& - ' size(PointData) = ',size(PointData) - call die(myname_) - endif - - ! End of input argument sanity checks. - - ! Create other List components of GGrid and build REAL - ! and INTEGER attribute lists for the AttrVect GGrid%data - - ! Start off with things *guaranteed* to be in IAList and RAList. - ! The variable GlobGridNum is a CHARACTER parameter inherited - ! from the declaration section of this module. - - call List_init(IAList, GlobGridNum) - call List_init(RAList, CoordChars) - - if(present(CoordSortOrder)) then - - call List_init(GGrid%coordinate_sort_order, CoordSortOrder) - - call List_shared(GGrid%coordinate_list,GGrid%coordinate_sort_order, & - NumShared,CoordListIndices,CoordSortOrderIndices) - - deallocate(CoordListIndices,CoordSortOrderIndices,stat=ierr) - if(ierr/=0) call die(myname_,'deallocate(CoordListIndices..)',ierr) - - if(NumShared /= nDims) then - call die(myname_,'CoordSortOrder must have the same items & - & as CoordList',abs(nDims-NumShared)) - endif - - endif - - if(present(WeightChars)) then - call List_init(GGrid%weight_list, WeightChars) - call List_append(RAList, GGrid%weight_list) - endif - - if(present(OtherChars)) then - call List_init(GGrid%other_list, OtherChars) - call List_append(RAList, GGrid%other_list) - endif - - if(present(IndexChars)) then - call List_init(GGrid%index_list, IndexChars) - call List_append(IAList, GGrid%index_list) - endif - - ! Initialize GGrid%descend from descend(:). - ! If descend argument is not present, set it to the default .false. - - if(present(CoordSortOrder)) then - - allocate(GGrid%descend(nDims), stat=ierr) - if(ierr /= 0) call die(myname_,"allocate GGrid%descend...",ierr) - - if(present(descend)) then - do n=1,nDims - GGrid%descend(n) = descend(n) - end do - else - do n=1,nDims - GGrid%descend(n) = .FALSE. - end do - endif - - endif ! if(present(CoordSortOrder))... - - ! Create Grid attribute data storage AttrVect GGrid%data: - - call AttrVect_init(GGrid%data, IAList, RAList, nPoints) - call AttrVect_zero(GGrid%data) - - ! Load up gridpoint coordinate data into GGrid%data. - ! Given how we've set up the real attributes of GGrid%data, - ! we have guaranteed the first nDims real attributes are - ! the gridpoint coordinates. - - do n=1,nPoints - nOffSet = (n-1) * nDims - do i=1,nDims - GGrid%data%rAttr(i,n) = PointData(nOffset + i) - end do - end do - - ! If the argument CoordSortOrder was supplied, the entries - ! of GGrid will be sorted/permuted with this lexicographic - ! ordering, and the values of the GGrid INTEGER attribute - ! GlobGridNum will be numbered to reflect this new ordering - ! scheme. - - index = indexIA_(GGrid, GlobGridNum) - - if(present(CoordSortOrder)) then ! Sort permute entries before - ! numbering them - - call SortPermute_(GGrid) ! Sort / permute - - endif ! if(present(CoordSortOrder))... - - ! Number the gridpoints based on the AttrVect point index - ! (i.e., the second index in GGrid%data%iAttr) - - do i=1, lsize_(GGrid) - GGrid%data%iAttr(index,i) = i - end do - - ! Clean up temporary allocated structures: - - call List_clean(IAList) - call List_clean(RAList) - - end subroutine initUnstructuredDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Destroy a GeneralGrid -! -! !DESCRIPTION: -! This routine deallocates all attribute storage space for the input/output -! {\tt GeneralGrid} argument {\tt GGrid}, and destroys all of its {\tt List} -! components and sorting flags. The success (failure) of this operation is -! signified by the zero (non-zero) value of the optional {\tt INTEGER} -! output argument {\tt stat}. -! -! !INTERFACE: - - subroutine clean_(GGrid, stat) -! -! !USES: -! - use m_stdio - use m_die - - use m_List, only : List_clean => clean - use m_List, only : List_allocated => allocated - use m_AttrVect, only : AttrVect_clean => clean - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(inout) :: GGrid - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 25Sep00 - J.W. Larson - initial prototype -! 20Mar01 - J.W. Larson - complete version. -! 1Mar01 - E.T. Ong - removed dies to prevent -! crashes when cleaning uninitialized attrvects. Added -! optional stat argument. -! 5Aug02 - E. Ong - a more rigorous revision -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ierr - - if(present(stat)) then - - stat=0 - call AttrVect_clean(GGrid%data,ierr) - if(ierr/=0) stat=ierr - - call List_clean(GGrid%coordinate_list,ierr) - if(ierr/=0) stat=ierr - - if(List_allocated(GGrid%coordinate_sort_order)) then - call List_clean(GGrid%coordinate_sort_order,ierr) - if(ierr/=0) stat=ierr - endif - - if(List_allocated(GGrid%weight_list)) then - call List_clean(GGrid%weight_list,ierr) - if(ierr/=0) stat=ierr - endif - - if(List_allocated(GGrid%other_list)) then - call List_clean(GGrid%other_list,ierr) - if(ierr/=0) stat=ierr - endif - - if(List_allocated(GGrid%index_list)) then - call List_clean(GGrid%index_list,ierr) - if(ierr/=0) stat=ierr - endif - - if(associated(GGrid%descend)) then - deallocate(GGrid%descend, stat=ierr) - if(ierr/=0) stat=ierr - endif - - else - - call AttrVect_clean(GGrid%data) - - call List_clean(GGrid%coordinate_list) - - if(List_allocated(GGrid%coordinate_sort_order)) then - call List_clean(GGrid%coordinate_sort_order) - endif - - if(List_allocated(GGrid%weight_list)) then - call List_clean(GGrid%weight_list) - endif - - if(List_allocated(GGrid%other_list)) then - call List_clean(GGrid%other_list) - endif - - if(List_allocated(GGrid%index_list)) then - call List_clean(GGrid%index_list) - endif - - if(associated(GGrid%descend)) then - deallocate(GGrid%descend, stat=ierr) - if(ierr/=0) call die(myname_,'deallocate(GGrid%descend)',ierr) - endif - - endif - - end subroutine clean_ - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: zero_ - Set GeneralGrid Data to Zero -! -! !DESCRIPTION: -! This routine sets all of the point values of the integer and real -! attributes of an the input/output {\tt GeneralGrid} argument {\tt GGrid} -! to zero. The default action is to set the values of all the real and -! integer attributes to zero. -! -! !INTERFACE: - - subroutine zero_(GGrid, zeroReals, zeroInts) - -! !USES: - - - use m_die,only : die - use m_stdio,only : stderr - - use m_AttrVect, only : AttrVect_zero => zero - - implicit none -! !INPUT/OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(INOUT) :: GGrid - -! !INPUT PARAMETERS: - - logical, optional, intent(IN) :: zeroReals - logical, optional, intent(IN) :: zeroInts - - -! !REVISION HISTORY: -! 11May08 - R. Jacob - initial prototype/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::zero_' - - logical myZeroReals, myZeroInts - - if(present(zeroReals)) then - myZeroReals = zeroReals - else - myZeroReals = .TRUE. - endif - - if(present(zeroInts)) then - myZeroInts = zeroInts - else - myZeroInts = .TRUE. - endif - - call AttrVect_zero(GGrid%data,myZeroReals,myZeroInts) - - end subroutine zero_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dims_ - Return the Dimensionality of a GeneralGrid -! -! !DESCRIPTION: -! This {\tt INTEGER} function returns the number of physical dimensions -! of the input {\tt GeneralGrid} argument {\tt GGrid}. -! -! !INTERFACE: - - integer function dims_(GGrid) -! -! !USES: -! - use m_stdio - use m_die - - use m_List, only : List_nitem => nitem - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: GGrid - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - initial version -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::dims_' - - - dims_ = List_nitem(GGrid%coordinate_list) - - if(dims_<=0) then - call die(myname_,"GGrid has zero dimensions",dims_) - endif - - end function dims_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexIA - Index an Integer Attribute -! -! !DESCRIPTION: -! This function returns an {\tt INTEGER}, corresponding to the location -! of an integer attribute within the input {\tt GeneralGrid} argument -! {\tt GGrid}. For example, every {\tt GGrid} has at least one integer -! attribute (namely the global gridpoint index {\tt 'GlobGridNum'}). -! The array of integer values for the attribute {\tt 'GlobGridNum'} is -! stored in -! \begin{verbatim} -! {\tt GGrid%data%iAttr(indexIA_(GGrid,'GlobGridNum'),:)}. -! \end{verbatim} -! If {\tt indexIA\_()} is unable to match {\tt item} to any of the integer -! attributes present in {\tt GGrid}, the resulting value is zero which is -! equivalent to an error. The optional input {\tt CHARACTER} arguments -! {\tt perrWith} and {\tt dieWith} control how such errors are handled. -! Below are the rules how error handling is controlled by using -! {\tt perrWith} and {\tt dieWith}: -! \begin{enumerate} -! \item if neither {\tt perrWith} nor {\tt dieWith} are present, -! {\tt indexIA\_()} terminates execution with an internally generated -! error message; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, an error -! message is written to {\tt stderr} incorporating user-supplied -! traceback information stored in the argument {\tt perrWith}; -! \item if {\tt dieWith} is present, execution terminates with an error -! message written to {\tt stderr} that incorporates user-supplied -! traceback information stored in the argument {\tt dieWith}; and -! \item if both {\tt perrWith} and {\tt dieWith} are present, execution -! terminates with an error message using {\tt dieWith}, and the argument -! {\tt perrWith} is ignored. -! \end{enumerate} -! -! !INTERFACE: - - integer function indexIA_(GGrid, item, perrWith, dieWith) - -! -! !USES: -! - use m_die - use m_stdio - - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_TraceBack, only : GenTraceBackString - - use m_AttrVect, only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: item - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - Initial version. -! 27Mar02 - Jay Larson - Cleaned up error -! handling logic. -! 2Aug02 - Jay Larson - Further refinement -! of error handling. -!EOP ___________________________________________________________________ -! - - character(len=*), parameter :: myname_=myname//'::indexIA_' - - type(String) :: myTrace - - ! Generate a traceback String - - if(present(dieWith)) then - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then - call GenTraceBackString(myTrace, perrWith, myname_) - else - call GenTraceBackString(myTrace, myname_) - endif - endif - - ! Call AttrVect_indexIA() accordingly: - - if( present(dieWith) .or. & - ((.not. present(dieWith)) .and. (.not. present(perrWith))) ) then - indexIA_ = AttrVect_indexIA(GGrid%data, item, & - dieWith=String_ToChar(myTrace)) - else ! perrWith but no dieWith case - indexIA_ = AttrVect_indexIA(GGrid%data, item, & - perrWith=String_ToChar(myTrace)) - endif - - call String_clean(myTrace) - - end function indexIA_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexRA - Index a Real Attribute -! -! !DESCRIPTION: - -! This function returns an {\tt INTEGER}, corresponding to the location -! of an integer attribute within the input {\tt GeneralGrid} argument -! {\tt GGrid}. For example, every {\tt GGrid} has at least one integer -! attribute (namely the global gridpoint index {\tt 'GlobGridNum'}). -! The array of integer values for the attribute {\tt 'GlobGridNum'} is -! stored in -! \begin{verbatim} -! {\tt GGrid%data%iAttr(indexRA_(GGrid,'GlobGridNum'),:)}. -! \end{verbatim} -! If {\tt indexRA\_()} is unable to match {\tt item} to any of the integer -! attributes present in {\tt GGrid}, the resulting value is zero which is -! equivalent to an error. The optional input {\tt CHARACTER} arguments -! {\tt perrWith} and {\tt dieWith} control how such errors are handled. -! Below are the rules how error handling is controlled by using -! {\tt perrWith} and {\tt dieWith}: -! \begin{enumerate} -! \item if neither {\tt perrWith} nor {\tt dieWith} are present, -! {\tt indexRA\_()} terminates execution with an internally generated -! error message; -! \item if {\tt perrWith} is present, but {\tt dieWith} is not, an error -! message is written to {\tt stderr} incorporating user-supplied -! traceback information stored in the argument {\tt perrWith}; -! \item if {\tt dieWith} is present, execution terminates with an error -! message written to {\tt stderr} that incorporates user-supplied -! traceback information stored in the argument {\tt dieWith}; and -! \item if both {\tt perrWith} and {\tt dieWith} are present, execution -! terminates with an error message using {\tt dieWith}, and the argument -! {\tt perrWith} is ignored. -! \end{enumerate} -! -! !INTERFACE: - - integer function indexRA_(GGrid, item, perrWith, dieWith) -! -! !USES: -! - use m_stdio - use m_die - - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_TraceBack, only : GenTraceBackString - - use m_AttrVect, only : AttrVect_indexRA => indexRA - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: item - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - Initial version. -! 27Mar02 - Jay Larson - Cleaned up error -! handling logic. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::indexRA_' - - - type(String) :: myTrace - - ! Generate a traceback String - - if(present(dieWith)) then ! append myname_ onto dieWith - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then ! append myname_ onto perrwith - call GenTraceBackString(myTrace, perrWith, myname_) - else ! Start a TraceBack String - call GenTraceBackString(myTrace, myname_) - endif - endif - - ! Call AttrVect_indexRA() accordingly: - - if( present(dieWith) .or. & - ((.not. present(dieWith)) .and. (.not. present(perrWith))) ) then - indexRA_ = AttrVect_indexRA(GGrid%data, item, & - dieWith=String_ToChar(myTrace)) - else ! perrWith but no dieWith case - indexRA_ = AttrVect_indexRA(GGrid%data, item, & - perrWith=String_ToChar(myTrace)) - endif - - call String_clean(myTrace) - - end function indexRA_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: lsize - Number of Grid Points -! -! !DESCRIPTION: -! This {\tt INTEGER} function returns the number of grid points stored -! in the input {\tt GeneralGrid} argument {\tt GGrid}. Note that the -! value returned will be the number of points stored on a local process -! in the case of a distributed {\tt GeneralGrid}. -! -! !INTERFACE: - - integer function lsize_(GGrid) -! -! !USES: -! - use m_List, only : List - use m_List, only : List_allocated => allocated - use m_AttrVect, only : AttrVect_lsize => lsize - use m_die, only : die - - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: GGrid - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - Initial version. -! 27Mar02 - Jay Larson - slight logic change. -! 27Mar02 - Jay Larson - Bug fix and use of -! List_allocated() function to check for existence of -! attributes. -! 5Aug02 - E. Ong - more rigorous revision -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::lsize_' - - if(List_allocated(GGrid%data%rList) .and. & - List_allocated(GGrid%data%iList)) then - - lsize_ = AttrVect_lsize( GGrid%data ) - - else - - call die(myname_,"Argument GGrid%data is not associated!") - - endif - - end function lsize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportIAttr_ - Return GeneralGrid INTEGER Attribute as a Vector -! -! !DESCRIPTION: -! This routine extracts from the input {\tt GeneralGrid} argument -! {\tt GGrid} the integer attribute corresponding to the tag defined in -! the input {\tt CHARACTER} argument {\tt AttrTag}, and returns it in -! the {\tt INTEGER} output array {\tt outVect}, and its length in the -! output {\tt INTEGER} argument {\tt lsize}. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt GeneralGrid} {\tt List} component {\tt GGrid\%data\%iList}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt outVect} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt outVect}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) before this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt outVect}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportIAttr_(GGrid, AttrTag, outVect, lsize) -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_exportIAttr => exportIAttr - - implicit none - -! !INPUT PARAMETERS: - - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: AttrTag - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: -! 13Dec01 - J.W. Larson - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportIAttr_' - - ! Export the data (inheritance from AttrVect) - if(present(lsize)) then - call AttrVect_exportIAttr(GGrid%data, AttrTag, outVect, lsize) - else - call AttrVect_exportIAttr(GGrid%data, AttrTag, outVect) - endif - - end subroutine exportIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportRAttrSP_ - Return GeneralGrid REAL Attribute as a Vector -! -! !DESCRIPTION: -! This routine extracts from the input {\tt GeneralGrid} argument -! {\tt GGrid} the real attribute corresponding to the tag defined in -! the input {\tt CHARACTER} argument {\tt AttrTag}, and returns it in -! the {\tt REAL} output array {\tt outVect}, and its length in the -! output {\tt INTEGER} argument {\tt lsize}. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt GeneralGrid} {\tt List} component {\tt GGrid\%data\%rList}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt outVect} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt outVect}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) before this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt outVect}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportRAttrSP_(GGrid, AttrTag, outVect, lsize) -! -! !USES: -! - use m_die - use m_stdio - - use m_realkinds, only : SP - - use m_AttrVect, only : AttrVect_exportRAttr => exportRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: AttrTag - -! !OUTPUT PARAMETERS: - - real(SP), dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: -! 13Dec01 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportRAttrSP_' - - ! Export the data (inheritance from AttrVect) - - if(present(lsize)) then - call AttrVect_exportRAttr(GGrid%data, AttrTag, outVect, lsize) - else - call AttrVect_exportRAttr(GGrid%data, AttrTag, outVect) - endif - - end subroutine exportRAttrSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! --------------------------------------------------------------------- -! -! !IROUTINE: exportRAttrDP_ - Return GeneralGrid REAL Attribute as a Vector -! -! !DESCRIPTION: -! double precision version of exportRAttrSP_ -! -! !INTERFACE: - - subroutine exportRAttrDP_(GGrid, AttrTag, outVect, lsize) -! -! !USES: -! - use m_die - use m_stdio - - use m_realkinds, only : DP - - use m_AttrVect, only : AttrVect_exportRAttr => exportRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: AttrTag - -! !OUTPUT PARAMETERS: - - real(DP), dimension(:), pointer :: outVect - integer, optional, intent(out) :: lsize - -! !REVISION HISTORY: -! 13Dec01 - J.W. Larson - initial prototype. -! -!_______________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportRAttrDP_' - - ! Export the data (inheritance from AttrVect) - if(present(lsize)) then - call AttrVect_exportRAttr(GGrid%data, AttrTag, outVect, lsize) - else - call AttrVect_exportRAttr(GGrid%data, AttrTag, outVect) - endif - - end subroutine exportRAttrDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importIAttr_ - Import GeneralGrid INTEGER Attribute -! -! !DESCRIPTION: -! This routine imports data provided in the input {\tt INTEGER} vector -! {\tt inVect} into the {\tt GeneralGrid} argument {\tt GGrid}, storing -! it as the integer attribute corresponding to the tag defined in -! the input {\tt CHARACTER} argument {\tt AttrTag}. The input -! {\tt INTEGER} argument {\tt lsize} is used to ensure there is -! sufficient space in the {\tt GeneralGrid} to store the data. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt GeneralGrid} {\tt List} component {\tt GGrid\%data\%iList}. -! -! !INTERFACE: - - subroutine importIAttr_(GGrid, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_importIAttr => importIAttr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - integer, dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(GeneralGrid), intent(inout) :: GGrid - -! !REVISION HISTORY: -! 13Dec01 - J.W. Larson - initial prototype. -! 27Mar02 - Jay Larson - improved error handling. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importIAttr_' - - ! Argument Check: - - if(lsize > lsize_(GGrid)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(GGrid).', & - 'lsize = ',lsize,'lsize_(GGrid) = ',lsize_(GGrid) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importIAttr(GGrid%data, AttrTag, inVect, lsize) - - end subroutine importIAttr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importRAttrSP_ - Import GeneralGrid REAL Attribute -! -! !DESCRIPTION: -! This routine imports data provided in the input {\tt REAL} vector -! {\tt inVect} into the {\tt GeneralGrid} argument {\tt GGrid}, storing -! it as the real attribute corresponding to the tag defined in -! the input {\tt CHARACTER} argument {\tt AttrTag}. The input -! {\tt INTEGER} argument {\tt lsize} is used to ensure there is -! sufficient space in the {\tt GeneralGrid} to store the data. -! -! {\bf N.B.:} This routine will fail if the {\tt AttrTag} is not in -! the {\tt GeneralGrid} {\tt List} component {\tt GGrid\%data\%rList}. -! -! !INTERFACE: - - subroutine importRAttrSP_(GGrid, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die , only : die - use m_die , only : MP_perr_die - use m_stdio , only : stderr - - use m_realkinds, only : SP - - use m_AttrVect, only : AttrVect_importRAttr => importRAttr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - real(SP), dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(GeneralGrid), intent(inout) :: GGrid - -! !REVISION HISTORY: -! 13Dec01 - J.W. Larson - initial prototype. -! 27Mar02 - Jay Larson - improved error handling. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importRAttrSP_' - - ! Argument Check: - - if(lsize > lsize_(GGrid)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(GGrid).', & - 'lsize = ',lsize,'lsize_(GGrid) = ',lsize_(GGrid) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importRAttr(GGrid%data, AttrTag, inVect, lsize) - - end subroutine importRAttrSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! -! !IROUTINE: importRAttrDP_ - Import GeneralGrid REAL Attribute -! -! !DESCRIPTION: -! Double precision version of importRAttrSP_ -! -! !INTERFACE: - - subroutine importRAttrDP_(GGrid, AttrTag, inVect, lsize) -! -! !USES: -! - use m_die , only : die - use m_die , only : MP_perr_die - use m_stdio , only : stderr - - use m_realkinds, only : DP - - use m_AttrVect, only : AttrVect_importRAttr => importRAttr - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: AttrTag - real(DP), dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(GeneralGrid), intent(inout) :: GGrid - -! !REVISION HISTORY: -! 13Dec01 - J.W. Larson - initial prototype. -! 27Mar02 - Jay Larson - improved error handling. -!_______________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importRAttrDP_' - - ! Argument Check: - - if(lsize > lsize_(GGrid)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(GGrid).', & - 'lsize = ',lsize,'lsize_(GGrid) = ',lsize_(GGrid) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importRAttr(GGrid%data, AttrTag, inVect, lsize) - - end subroutine importRAttrDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Sort_ - Generate Sort Permutation Defined by Arbitrary Keys. -! -! !DESCRIPTION: -! The subroutine {\tt Sort\_()} uses the list of keys present in the -! input {\tt List} variable {\tt key\_List}. This list of keys is -! checked to ensure that {\em only} coordinate attributes are present -! in the sorting keys, and that there are no redundant keys. Once -! checked, this list is used to find the appropriate real attributes -! referenced by the items in {\tt key\_list} ( that is, it identifies the -! appropriate entries in {\tt GGrid\%data\%rList}), and then uses these -! keys to generate a an output permutation {\tt perm} that will put -! the entries of the attribute vector {\tt GGrid\%data} in lexicographic -! order as defined by {\tt key\_list} (the ordering in {\tt key\_list} -! being from left to right. -! -! !INTERFACE: - - subroutine Sort_(GGrid, key_List, perm, descend) - -! -! !USES: -! - use m_stdio - use m_die - - use m_AttrVect, only : AttrVect_Sort => Sort - use m_List, only : List_nitem => nitem - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: GGrid - type(List), intent(in) :: key_list - logical, dimension(:), optional, intent(in) :: descend - -! !OUTPUT PARAMETERS: -! - integer, dimension(:), pointer :: perm - - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - Initial version. -! 20Mar01 - Jay Larson - Final working version. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::Sort_' - logical, dimension(:), allocatable :: descending - integer :: n, ierr - - ! Here is how we transmit the sort order keys stored - ! in descending (if present): - - n = List_nitem(key_list) - allocate(descending(n), stat=ierr) - if(ierr /= 0) then - call die(myname_,"allocate(descending...",ierr) - endif - - if(present(descend)) then - descending = descend - else - descending = .false. - endif - - ! This is a straightforward call to AttrVect_Sort(). - - call AttrVect_Sort(GGrid%data, key_list, perm, descending) - - ! Clean up... - - deallocate(descending, stat=ierr) - if(ierr /= 0) then - call die(myname_,"deallocate(descending...",ierr) - endif - - end subroutine Sort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Sortg_ - Generate Sort Permutation Based on GeneralGrid Keys. -! -! !DESCRIPTION: -! The subroutine {\tt Sortg\_()} uses the list of sorting keys present in -! the input {\tt GeneralGrid} variable {\tt GGrid\%coordinate\_sort\_order} -! to create a sort permutation {\tt perm(:)}. Sorting is either in ascending -! or descending order based on the entries of {\tt GGrid\%descend(:)}. -! The output index permutation is stored in the array {\tt perm(:)} that -! will put the entries of the attribute vector {\tt GGrid\%data} in -! lexicographic order as defined by {\tt GGrid\%coordinate\_sort\_order}. The -! ordering in {\tt GGrid\%coordinate\_sort\_order} being from left to right. -! -! {\bf N.B.:} This routine returnss an allocatable array perm(:). This -! allocated array must be deallocated when the user no longer needs it. -! Failure to do so will cause a memory leak. -! -! {\bf N.B.:} This routine will fail if {\tt GGrid} has not been initialized -! with sort keys in the {\tt List} component {\tt GGrid\%coordinate\_sort\_order}. -! -! !INTERFACE: - - subroutine Sortg_(GGrid, perm) - -! -! !USES: -! - use m_List, only : List_allocated => allocated - use m_die, only : die - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: GGrid - -! !OUTPUT PARAMETERS: -! - integer, dimension(:), pointer :: perm - -! !REVISION HISTORY: -! 22Mar01 - Jay Larson - Initial version. -! 5Aug02 - E. Ong - revise with more error checking. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::Sortg_' - - if(.not.List_allocated(GGrid%coordinate_sort_order)) then - call die(myname_, "GGrid%coordinate_aort_order must be & - &allocated for use in any sort function") - endif - - if(associated(GGrid%descend)) then - call Sort_(GGrid, GGrid%coordinate_sort_order, & - perm, GGrid%descend) - else - call Sort_(GGrid=GGrid, key_list=GGrid%coordinate_sort_order, & - perm=perm) - endif - - end subroutine Sortg_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Permute_ - Permute GeneralGrid Attributes Using Supplied Index Permutation -! -! !DESCRIPTION: -! The subroutine {\tt Permute\_()} uses an input index permutation {\tt perm} -! to re-order the coordinate data stored in the {\tt GeneralGrid} argument -! {\tt GGrid}. This permutation can be generated by either of the routines -! {\tt Sort\_()} or {\tt Sortg\_()} contained in this module. -! -! !INTERFACE: - - subroutine Permute_(GGrid, perm) - -! -! !USES: -! - - use m_stdio - use m_die - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_Permute => Permute - - implicit none - -! !INPUT PARAMETERS: -! - integer, dimension(:), intent(in) :: perm - -! !INPUT/OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(inout) :: GGrid - - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 10Apr01 - Jay Larson - API modified, working -! code. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::Permute_' - - ! This is a straightforward call to AttrVect_Permute: - - call AttrVect_Permute(GGrid%data, perm) - - end subroutine Permute_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SortPermute_ - Sort and Permute GeneralGrid Attributes -! -! !DESCRIPTION: -! The subroutine {\tt SortPermute\_()} uses the list of keys defined in -! {\tt GGrid\%coordinate\_sort\_order} to create an index permutation -! {\tt perm}, which is then applied to re-order the coordinate data stored -! in the {\tt GeneralGrid} argument {\tt GGrid} (more specifically, the -! gridpoint data stored in {\tt GGrid\%data}. This permutation is generated -! by the routine {\tt Sortg\_()} contained in this module. The permutation -! is carried out by the routine {\tt Permute\_()} contained in this module. -! -! {\bf N.B.:} This routine will fail if {\tt GGrid} has not been initialized -! with sort keys in the {\tt List} component {\tt GGrid\%coordinate\_sort\_order}. -! -! !INTERFACE: - - subroutine SortPermute_(GGrid) - -! -! !USES: -! - use m_stdio - use m_die - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(inout) :: GGrid - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 10Apr01 - Jay Larson - API modified, working -! code. -! 13Apr01 - Jay Larson - Simplified API and -! code (Thanks to Tony Craig of NCAR for detecting the -! bug that inspired these changes). -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::SortPermute_' - - integer, dimension(:), pointer :: perm - integer :: ierr - - call Sortg_(GGrid, perm) - - call Permute_(GGrid, perm) - -! Clean up--deallocate temporary permutation array: - - deallocate(perm, stat=ierr) - if(ierr /= 0) then - call die(myname_,"deallocate(perm)",ierr) - endif - - end subroutine SortPermute_ - - end module m_GeneralGrid - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/externals/mct/mct/m_GeneralGridComms.F90 b/src/externals/mct/mct/m_GeneralGridComms.F90 deleted file mode 100644 index f5118309694..00000000000 --- a/src/externals/mct/mct/m_GeneralGridComms.F90 +++ /dev/null @@ -1,1536 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_GeneralGridComms - Communications for the GeneralGrid type. -! -! !DESCRIPTION: -! -! In this module, we define communications methods specific to the -! {\tt GeneralGrid} class (see the module {\tt m\_GeneralGrid} for more -! information about this class and its methods). -! -! !INTERFACE: - module m_GeneralGridComms -! -! !USES: -! - use m_GeneralGrid ! GeneralGrid class and its methods - - - implicit none - - private ! except - - public :: gather ! gather all local vectors to the root - public :: scatter ! scatter from the root to all PEs - public :: bcast ! bcast from root to all PEs - public :: send ! Blocking SEND - public :: recv ! Blocking RECEIVE - - interface gather ; module procedure & - GM_gather_, & - GSM_gather_ - end interface - interface scatter ; module procedure & - GM_scatter_, & - GSM_scatter_ - end interface - interface bcast ; module procedure bcast_ ; end interface - interface send ; module procedure send_ ; end interface - interface recv ; module procedure recv_ ; end interface - -! !REVISION HISTORY: -! 27Apr01 - J.W. Larson - Initial module/APIs -! 07Jun01 - J.W. Larson - Added point-to-point -! 27Mar02 - J.W. Larson - Overhaul of error -! handling calls throughout this module. -! 05Aug02 - E. Ong - Added buffer association -! error checks to avoid making bad MPI calls -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_GeneralGridComms' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: send_ - Point-to-point blocking send for the GeneralGrid. -! -! !DESCRIPTION: The point-to-point send routine {\tt send\_()} sends -! the input {\tt GeneralGrid} argument {\tt iGGrid} to component -! {\tt comp\_id}. -! The message is identified by the tag defined by the {\tt INTEGER} -! argument {\tt TagBase}. The value of {\tt TagBase} must match the -! value used in the call to {\tt recv\_()} on process {\tt dest}. The -! success (failure) of this operation corresponds to a zero (nonzero) -! value for the output {\tt INTEGER} flag {\tt status}. -! The argument will be sent to the local root of the component. -! -! {\bf N.B.}: One must avoid assigning elsewhere the MPI tag values -! between {\tt TagBase} and {\tt TagBase+20}, inclusive. This is -! because {\tt send\_()} performs one send operation set up the header -! transfer, up to five {\tt List\_send} operations (two {\tt MPI\_SEND} -! calls in each), two send operations to transfer {\tt iGGrid\%descend(:)}, -! and finally the send of the {\tt AttrVect} component {\tt iGGrid\%data} -! (which comprises eight {\tt MPI\_SEND} operations). -! -! !INTERFACE: - - subroutine send_(iGGrid, comp_id, TagBase, status) - -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_init => init - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - use m_MCTWorld, only : ComponentToWorldRank - use m_MCTWorld, only : ThisMCTWorld - - use m_AttrVectComms,only : AttrVect_send => send - - use m_List, only : List_send => send - use m_List, only : List_allocated => allocated - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: iGGrid - integer, intent(in) :: comp_id - integer, intent(in) :: TagBase - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 04Jun01 - J.W. Larson - API Specification. -! 07Jun01 - J.W. Larson - Initial version. -! 10Jun01 - J.W. Larson - Bug fixes--now works. -! 11Jun01 - R. Jacob use component id as input -! argument. -! 13Jun01 - J.W. Larson - Initialize status -! (if present). -! 15Feb02 - J.W. Larson - Made input argument -! comm optional. -! 13Jun02 - J.W. Larson - Removed the argument -! comm. This routine is now explicitly for intercomponent -! communications only. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::send_' - - integer :: ierr - integer :: dest - logical :: HeaderAssoc(6) - - ! Initialize status (if present) - - if(present(status)) status = 0 - - dest = ComponentToWorldRank(0, comp_id, ThisMCTWorld) - - ! Step 1. Check elements of the GeneralGrid header to see - ! which components of it are allocated. Load the results - ! into HeaderAssoc(:), and send it to process dest. - - HeaderAssoc(1) = List_allocated(iGGrid%coordinate_list) - HeaderAssoc(2) = List_allocated(iGGrid%coordinate_sort_order) - HeaderAssoc(3) = associated(iGGrid%descend) - HeaderAssoc(4) = List_allocated(iGGrid%weight_list) - HeaderAssoc(5) = List_allocated(iGGrid%other_list) - HeaderAssoc(6) = List_allocated(iGGrid%index_list) - - call MPI_SEND(HeaderAssoc, 6, MP_LOGICAL, dest, TagBase, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: MPI_SEND(HeaderAssoc...',ierr) - endif - - ! Step 2. If iGGrid%coordinate_list is defined, send it. - - if(HeaderAssoc(1)) then - call List_send(iGGrid%coordinate_list, dest, TagBase+1, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: call List_send(iGGrid%coordinate_list...', & - 'Error flag ierr = ',ierr - if(present(status)) then - status = ierr - return - else - call die(myname_,':: call List_send(iGGrid%coordinate_list...',ierr) - endif - endif - else ! This constitutes an error, as a GeneralGrid must have coordinates - - if(present(status)) then - write(stderr,*) myname_,':: Error. GeneralGrid%coordinate_list undefined.' - status = -1 - return - else - call die(myname_,':: Error. GeneralGrid%coordinate_list undefined.',-1) - endif - - endif ! if(HeaderAssoc(1))... - - ! Step 3. If iGGrid%coordinate_sort_order is defined, send it. - - if(HeaderAssoc(2)) then - call List_send(iGGrid%coordinate_sort_order, dest, TagBase+3, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_send(iGGrid%coordinate_sort_order...' - status = ierr - return - else - call die(myname_,':: call List_send(iGGrid%coordinate_sort_order...',ierr) - endif - endif - - endif ! if(HeaderAssoc(2))... - - ! Step 4. If iGGrid%descend is allocated, determine its size, - ! send this size, and then send the elements of iGGrid%descend. - - if(HeaderAssoc(3)) then - - if(size(iGGrid%descend)<=0) call die(myname_,'size(iGGrid%descend)<=0') - - call MPI_SEND(size(iGGrid%descend), 1, MP_type(size(iGGrid%descend)), & - dest, TagBase+5, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_SEND(size(iGGrid%descend)...',ierr) - endif - - call MPI_SEND(iGGrid%descend, size(iGGrid%descend), MP_type(iGGrid%descend(1)), & - dest, TagBase+6, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_SEND(iGGrid%descend...',ierr) - endif - - endif ! if(HeaderAssoc(3))... - - ! Step 5. If iGGrid%weight_list is defined, send it. - - if(HeaderAssoc(4)) then - - call List_send(iGGrid%weight_list, dest, TagBase+7, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_send(iGGrid%weight_list...' - status = ierr - return - else - call die(myname_,':: call List_send(iGGrid%weight_list...',ierr) - endif - endif - - endif ! if(HeaderAssoc(4))... - - ! Step 6. If iGGrid%other_list is defined, send it. - - if(HeaderAssoc(5)) then - - call List_send(iGGrid%other_list, dest, TagBase+9, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_send(iGGrid%other_list...' - status = ierr - return - else - call die(myname_,':: call List_send(iGGrid%other_list...',ierr) - endif - endif - - endif ! if(HeaderAssoc(5))... - - ! Step 7. If iGGrid%index_list is defined, send it. - - if(HeaderAssoc(6)) then - - call List_send(iGGrid%index_list, dest, TagBase+11, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_send(iGGrid%index_list...' - status = ierr - return - else - call die(myname_,':: call List_send(iGGrid%index_list...',ierr) - endif - endif - - else ! This constitutes an error, as a GeneralGrid must at a minimum - ! contain the index GlobGridNum - - if(present(status)) then - write(stderr,*) myname_,':: Error. GeneralGrid%index_list undefined.' - status = -2 - return - else - call die(myname_,':: Error. GeneralGrid%index_list undefined.',-2) - endif - - endif ! if(HeaderAssoc(6))... - - ! Step 8. Finally, send the AttrVect iGGrid%data. - - call AttrVect_send(iGGrid%data, dest, TagBase+13, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call AttrVect_send(iGGrid%data...' - status = ierr - return - else - call die(myname_,':: call AttrVect_send(iGGrid%data...',ierr) - endif - endif - - ! The GeneralGrid send is now complete. - - end subroutine send_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: recv_ - Point-to-point blocking recv for the GeneralGrid. -! -! !DESCRIPTION: The point-to-point receive routine {\tt recv\_()} -! receives the output {\tt GeneralGrid} argument {\tt oGGrid} from component -! {\tt comp\_id}. The message is identified by the tag defined by the -! {\tt INTEGER} argument {\tt TagBase}. The value of {\tt TagBase} must -! match the value used in the call to {\tt send\_()} on the other component. -! The success (failure) of this operation corresponds to a zero (nonzero) -! value for the output {\tt INTEGER} flag {\tt status}. -! -! {\bf N.B.}: This routine assumes that the {\tt GeneralGrid} argument -! {\tt oGGrid} is uninitialized on input; that is, all the {\tt List} -! components are blank, the {\tt LOGICAL} array {\tt oGGrid\%descend} is -! unallocated, and the {\tt AttrVect} component {\tt oGGrid\%data} is -! uninitialized. The {\tt GeneralGrid} {\tt oGGrid} represents allocated -! memory. When the user no longer needs {\tt oGGrid}, it should be -! deallocated by invoking {\tt GeneralGrid\_clean()} (see -! {\tt m\_GeneralGrid} for further details). -! -! {\bf N.B.}: One must avoid assigning elsewhere the MPI tag values -! between {\tt TagBase} and {\tt TagBase+20}, inclusive. This is -! because {\tt recv\_()} performs one receive operation set up the header -! transfer, up to five {\tt List\_recv} operations (two {\tt MPI\_RECV} -! calls in each), two receive operations to transfer {\tt iGGrid\%descend(:)}, -! and finally the receive of the {\tt AttrVect} component {\tt iGGrid\%data} -! (which comprises eight {\tt MPI\_RECV} operations). -! -! !INTERFACE: - - subroutine recv_(oGGrid, comp_id, TagBase, status) - -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_init => init - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - use m_MCTWorld, only : ComponentToWorldRank - use m_MCTWorld, only : ThisMCTWorld - - use m_AttrVectComms,only : AttrVect_recv => recv - - use m_List,only : List_recv => recv - use m_List,only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: comp_id - integer, intent(in) :: TagBase - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: oGGrid - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 04Jun01 - J.W. Larson - API Specification. -! 07Jun01 - J.W. Larson - Initial version. -! 10Jun01 - J.W. Larson - Bug fixes--now works. -! 11Jun01 - R. Jacob use component id as input -! argument. -! 13Jun01 - J.W. Larson - Initialize status -! (if present). -! 13Jun02 - J.W. Larson - Removed the argument -! comm. This routine is now explicitly for intercomponent -! communications only. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::recv_' - - integer :: ierr - integer :: source - integer :: MPstatus(MP_STATUS_SIZE), DescendSize - logical :: HeaderAssoc(6) - -! for now, assume the components root is the source. - source = ComponentToWorldRank(0, comp_id, ThisMCTWorld) - - ! Step 1. Receive the elements of the LOGICAL flag array - ! HeaderAssoc. TRUE entries in this array correspond to - ! Check elements of the GeneralGrid header that are not - ! blank, and are being sent by process source. - ! - ! The significance of the entries of HeaderAssoc has been - ! defined in send_(). Here are the definitions of these - ! values: - ! - ! HeaderAssoc(1) = List_allocated(oGGrid%coordinate_list) - ! HeaderAssoc(2) = List_allocated(oGGrid%coordinate_sort_order) - ! HeaderAssoc(3) = associated(oGGrid%descend) - ! HeaderAssoc(4) = List_allocated(oGGrid%weight_list) - ! HeaderAssoc(5) = List_allocated(oGGrid%other_list) - ! HeaderAssoc(6) = List_allocated(oGGrid%index_list) - - ! Initialize status (if present) - - if(present(status)) status = 0 - - ! Step 1. Nullify oGGrid components, set HeaderAssoc(:) to .FALSE., - ! then receive incoming HeaderAssoc(:) data - - call List_nullify(oGGrid%coordinate_list) - call List_nullify(oGGrid%coordinate_sort_order) - call List_nullify(oGGrid%weight_list) - call List_nullify(oGGrid%other_list) - call List_nullify(oGGrid%index_list) - nullify(oGGrid%descend) - - HeaderAssoc = .FALSE. - - call MPI_RECV(HeaderAssoc, 6, MP_LOGICAL, source, TagBase, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: MPI_RECV(HeaderAssoc...',ierr) - endif - - ! Step 2. If oGGrid%coordinate_list is defined, receive it. - - if(HeaderAssoc(1)) then - call List_recv(oGGrid%coordinate_list, source, TagBase+1, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_recv(oGGrid%coordinate_list...' - status = ierr - return - else - call die(myname_,':: call List_recv(oGGrid%coordinate_list...',ierr) - endif - endif - else ! This constitutes an error, as a GeneralGrid must have coordinates - - if(present(status)) then - write(stderr,*) myname_,':: Error. GeneralGrid%coordinate_list undefined.' - status = -1 - return - else - call die(myname_,':: Error. GeneralGrid%coordinate_list undefined.',-1) - endif - - endif ! if(HeaderAssoc(1))... - - ! Step 3. If oGGrid%coordinate_sort_order is defined, receive it. - - if(HeaderAssoc(2)) then - call List_recv(oGGrid%coordinate_sort_order, source, TagBase+3, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: Error calling ',& - 'List_recv(oGGrid%coordinate_sort_order...' - status = ierr - return - else - call die(myname_,':: call List_recv(oGGrid%coordinate_sort_order...', ierr) - endif - endif - endif ! if(HeaderAssoc(2))... - - ! Step 4. If oGGrid%descend is allocated, determine its size, - ! receive this size, allocate oGGrid%descend, and then receive - ! the elements of oGGrid%descend. - - if(HeaderAssoc(3)) then - - call MPI_RECV(DescendSize, 1, MP_type(DescendSize), & - source, TagBase+5, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_RECV(size(oGGrid%descend)...',ierr) - endif - - allocate(oGGrid%descend(DescendSize), stat=ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: allocate(oGGrid%descend...' - status = ierr - return - else - call die(myname_,':: allocate(oGGrid%descend... failed.',ierr) - endif - endif - - call MPI_RECV(oGGrid%descend, DescendSize, MP_type(oGGrid%descend(1)), & - source, TagBase+6, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,':: call MPI_RECV(oGGrid%descend...',ierr) - endif - - endif ! if(HeaderAssoc(3))... - - ! Step 5. If oGGrid%weight_list is defined, receive it. - - if(HeaderAssoc(4)) then - - call List_recv(oGGrid%weight_list, source, TagBase+7, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_recv(oGGrid%weight_list...' - status = ierr - return - else - call die(myname_,':: call List_recv(oGGrid%weight_list...',ierr) - endif - endif - - endif ! if(HeaderAssoc(4))... - - ! Step 6. If oGGrid%other_list is defined, receive it. - - if(HeaderAssoc(5)) then - - call List_recv(oGGrid%other_list, source, TagBase+9, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_recv(oGGrid%other_list...' - status = ierr - return - else - call die(myname_,':: call List_recv(oGGrid%other_list...',ierr) - endif - endif - - endif ! if(HeaderAssoc(5))... - - ! Step 7. If oGGrid%index_list is defined, receive it. - - if(HeaderAssoc(6)) then - - call List_recv(oGGrid%index_list, source, TagBase+11, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call List_recv(oGGrid%index_list...' - status = ierr - return - else - call die(myname_,':: call List_recv(oGGrid%index_list...',ierr) - endif - endif - - else ! This constitutes an error, as a GeneralGrid must at a minimum - ! contain the index GlobGridNum - - if(present(status)) then - write(stderr,*) myname_,':: Error. GeneralGrid%index_list undefined.' - status = -2 - return - else - call die(myname_,':: Error. GeneralGrid%index_list undefined.',-2) - endif - - endif ! if(HeaderAssoc(6))... - - ! Step 8. Finally, receive the AttrVect oGGrid%data. - - call AttrVect_recv(oGGrid%data, source, TagBase+13, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,*) myname_,':: call AttrVect_recv(oGGrid%data...' - status = ierr - return - else - call die(myname_,':: call AttrVect_recv(oGGrid%data...',ierr) - endif - endif - - ! The GeneralGrid receive is now complete. - - end subroutine recv_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GM_gather_ - gather a GeneralGrid using input GlobalMap. -! -! !DESCRIPTION: {\tt GM\_gather\_()} takes an input {\tt GeneralGrid} -! argument {\tt iG} whose decomposition on the communicator associated -! with the F90 handle {\tt comm} is described by the {\tt GlobalMap} -! argument {\tt GMap}, and gathers it to the {\tt GeneralGrid} output -! argument {\tt oG} on the {\tt root}. The success (failure) of this -! operation is reported as a zero (nonzero) value in the optional -! {\tt INTEGER} output argument {\tt stat}. - -! {\bf N.B.}: An important assumption made here is that the distributed -! {\tt GeneralGrid} {\tt iG} has been initialized with the same -! coordinate system, sort order, other real attributes, and the same -! indexing attributes for all processes on {\tt comm}. -! -! {\bf N.B.}: Once the gridpoint data of the {\tt GeneralGrid} are assembled -! on the {\tt root}, they are stored in the order determined by the input -! {\tt GlobalMap} {\tt GMap}. The user may need to sorted these gathered -! data to order them in accordance with the {\tt coordinate\_sort\_order} -! attribute of {\tt iG}. -! -! {\bf N.B.}: The output {\tt GeneralGrid} {\tt oG} represents allocated -! memory on the {\tt root}. When the user no longer needs {\tt oG} it -! should be deallocated using {\tt GeneralGrid\_clean()} to avoid a memory -! leak -! -! !INTERFACE: -! - subroutine GM_gather_(iG, oG, GMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_gsize => gsize - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_init => init - - use m_AttrVectComms,only : AttrVect_Gather => gather - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: iG - type(GlobalMap), intent(in) :: GMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: oG - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 27Apr01 - J.W. Larson - API Specification. -! 02May01 - J.W. Larson - Initial code. -! 13Jun01 - J.W. Larson - Initialize stat -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GM_gather_' -!Process ID - integer :: myID -!Error flag - integer :: ierr -!Number of points on the _Gathered_ grid: - integer :: length - - ! Initialize stat (if present) - - if(present(stat)) stat = 0 - - ! Which process am I? - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'call MPI_COMM_RANK()',ierr) - endif - - if(myID == root) then ! prepare oG: - - ! The length of the _gathered_ GeneralGrid oG is determined by - ! the GlobalMap function GlobalMap_gsize() - - length = GlobalMap_gsize(GMap) - - ! Initialize attributes of oG from iG - call copyGeneralGridHeader_(iG,oG) - - endif - - ! Gather gridpoint data in iG%data to oG%data - - call AttrVect_Gather(iG%data, oG%data, GMap, root, comm, ierr) - - if(ierr /= 0) then - write(stderr,*) myname_,':: Error--call AttrVect_Gather() failed.', & - ' ierr = ',ierr - if(present(stat)) then - stat=ierr - return - else - call die(myname_,'call AttrVect_Gather(ig%data...',ierr) - endif - endif - - end subroutine GM_gather_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GSM_gather_ - gather a GeneralGrid using input GlobalSegMap. -! -! !DESCRIPTION: {\tt GMS\_gather\_()} takes an input {\tt GeneralGrid} -! argument {\tt iG} whose decomposition on the communicator associated -! with the F90 handle {\tt comm} is described by the {\tt GlobalSegMap} -! argument {\tt GSMap}, and gathers it to the {\tt GeneralGrid} output -! argument {\tt oG} on the {\tt root}. The success (failure) of this -! operation is reported as a zero (nonzero) value in the optional -! {\tt INTEGER} output argument {\tt stat}. -! -! {\bf N.B.}: An important assumption made here is that the distributed -! {\tt GeneralGrid} {\tt iG} has been initialized with the same -! coordinate system, sort order, other real attributes, and the same -! indexing attributes for all processes on {\tt comm}. -! -! {\bf N.B.}: Once the gridpoint data of the {\tt GeneralGrid} are assembled -! on the {\tt root}, they are stored in the order determined by the input -! {\tt GlobalSegMap} {\tt GSMap}. The user may need to sorted these gathered -! data to order them in accordance with the {\tt coordinate\_sort\_order} -! attribute of {\tt iG}. -! -! {\bf N.B.}: The output {\tt GeneralGrid} {\tt oG} represents allocated -! memory on the {\tt root}. When the user no longer needs {\tt oG} it -! should be deallocated using {\tt GeneralGrid\_clean()} to avoid a memory -! leak -! -! !INTERFACE: - - subroutine GSM_gather_(iG, oG, GSMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_lsize => lsize - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_init => init - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - use m_AttrVectComms,only : AttrVect_Gather => gather - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: iG - type(GlobalSegMap), intent(in) :: GSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: oG - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 27Apr01 - J.W. Larson - API Specification. -! 01May01 - J.W. Larson - Working Version. -! 13Jun01 - J.W. Larson - Initialize stat -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GSM_gather_' - -!Process ID - integer :: myID -!Error flag - integer :: ierr -!Number of points on the _Gathered_ grid: - integer :: length - - ! Initialize stat (if present) - - if(present(stat)) stat = 0 - - ! Which process am I? - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_RANK()',ierr) - endif - - if(myID == root) then ! prepare oG: - - ! The length of the _gathered_ GeneralGrid oG is determined by - ! the GlobalMap function GlobalSegMap_gsize() - - length = GlobalSegMap_gsize(GSMap) - - ! Initialize attributes of oG from iG - call copyGeneralGridHeader_(iG,oG) - - endif - - ! Gather gridpoint data in iG%data to oG%data - - call AttrVect_Gather(iG%data, oG%data, GSMap, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: ERROR--call AttrVect_Gather() failed.', & - ' ierr = ',ierr - if(present(stat)) then - stat=ierr - return - else - call die(myname_) - endif - endif - - end subroutine GSM_gather_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GM_scatter_ - scatter a GeneralGrid using input GlobalMap. -! -! !DESCRIPTION: {\tt GM\_scatter\_()} takes an input {\tt GeneralGrid} -! argument {\tt iG} (valid only on the {\tt root} process), and scatters -! it to the distributed {\tt GeneralGrid} variable {\tt oG}. The -! {\tt GeneralGrid} {\tt oG} is distributed on the communicator -! associated with the F90 handle {\tt comm} using the domain -! decomposition described by the {\tt GlobalMap} argument {\tt GMap}. -! The success (failure) of this operation is reported as a zero (nonzero) -! value in the optional {\tt INTEGER} output argument {\tt stat}. -! -! {\bf N.B.}: The output {\tt GeneralGrid} {\tt oG} represents allocated -! memory on the {\tt root}. When the user no longer needs {\tt oG} it -! should be deallocated using {\tt GeneralGrid\_clean()} to avoid a memory -! leak. -! -! !INTERFACE: - - subroutine GM_scatter_(iG, oG, GMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_lsize => lsize - use m_GlobalMap, only : GlobalMap_gsize => gsize - - use m_AttrVectComms, only : AttrVect_scatter => scatter - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_init => init - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: iG - type(GlobalMap), intent(in) :: GMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: oG - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 27Apr01 - J.W. Larson - API Specification. -! 04Jun01 - J.W. Larson - Changed comms model -! to MPI-style (i.e. iG valid on root only). -! 13Jun01 - J.W. Larson - Initialize stat -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GM_scatter_' - - logical :: DescendAssoc - integer :: DescendSize - integer :: ierr, myID - - ! Initialize status (if present) - - if(present(stat)) stat = 0 - - ! Step 1. Determine process ID number myID - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_RANK(comm...',ierr) - endif - - ! Step 2. On the root, initialize the List and LOGICAL - ! attributes of the GeneralGrid variable iG to oG. - - if(myID == root) then - call copyGeneralGridHeader_(iG, oG) - endif - - ! Step 3. Broadcast from the root the List and LOGICAL - ! attributes of the GeneralGrid variable oG. - - call bcastGeneralGridHeader_(oG, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: Error calling bcastGeneralGridHeader_().',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_,'call bcastGeneralGridHeader_(oG...',ierr) - endif - endif - - - ! Step 4. Using the GeneralMap GMap, scatter the AttrVect - ! portion of the input GeneralGrid iG to the GeneralGrid oG. - - call AttrVect_scatter(iG%data, oG%data, GMap, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: Error calling AttrVect_scatter(iG%data...',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_,'call AttrVect_scatter(iG%data...',ierr) - endif - endif - - ! The GeneralGrid scatter is now complete. - - end subroutine GM_scatter_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GSM_scatter_ - scatter a GeneralGrid using input GlobalSegMap. -! -! !DESCRIPTION: {\tt GM\_scatter\_()} takes an input {\tt GeneralGrid} -! argument {\tt iG} (valid only on the {\tt root} process), and scatters -! it to the distributed {\tt GeneralGrid} variable {\tt oG}. The -! {\tt GeneralGrid} {\tt oG} is distributed on the communicator -! associated with the F90 handle {\tt comm} using the domain -! decomposition described by the {\tt GlobalSegMap} argument {\tt GSMap}. -! The success (failure) of this operation is reported as a zero (nonzero) -! value in the optional {\tt INTEGER} output argument {\tt stat}. -! -! {\bf N.B.}: The output {\tt GeneralGrid} {\tt oG} represents allocated -! memory on the {\tt root}. When the user no longer needs {\tt oG} it -! should be deallocated using {\tt GeneralGrid\_clean()} to avoid a memory -! leak. -! -! !INTERFACE: - - subroutine GSM_scatter_(iG, oG, GSMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_lsize => lsize - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - - use m_AttrVectComms, only : AttrVect_scatter => scatter - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_init => init - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: iG - type(GlobalSegMap), intent(in) :: GSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: oG - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 27Apr01 - J.W. Larson - API Specification. -! 04Jun01 - J.W. Larson - Initial code. -! 13Jun01 - J.W. Larson - Initialize stat -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GSM_scatter_' - - integer :: ierr, myID - - ! Initialize stat (if present) - - if(present(stat)) stat = 0 - - ! Step 1. Determine process ID number myID - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_RANK(comm...',ierr) - endif - - ! Step 2. On the root, initialize the List and LOGICAL - ! attributes of the GeneralGrid variable iG to oG. - - if(myID == root) then - call copyGeneralGridHeader_(iG, oG) - endif - - ! Step 3. Broadcast from the root the List and LOGICAL - ! attributes of the GeneralGrid variable oG. - - call bcastGeneralGridHeader_(oG, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: Error calling bcastGeneralGridHeader_(...',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_,'bcastGeneralGridHeader_(oG...',ierr) - endif - endif - - ! Step 4. Using the GeneralSegMap GSMap, scatter the AttrVect - ! portion of the input GeneralGrid iG to the GeneralGrid oG. - - call AttrVect_scatter(iG%data, oG%data, GSMap, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: Error calling AttrVect_scatter(iG%data...',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_,'call AttrVect_scatter(iG%data...',ierr) - endif - endif - - ! The GeneralGrid scatter is now complete. - - end subroutine GSM_scatter_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bcast_ - Broadcast a GeneralGrid. -! -! !DESCRIPTION: {\tt bcast\_()} takes an input {\tt GeneralGrid} -! argument {\tt ioG} (valid only on the {\tt root} process), and -! broadcasts it to all processes on the communicator associated with the -! F90 handle {\tt comm}. The success (failure) of this operation is -! reported as a zero (nonzero) value in the optional {\tt INTEGER} -! output argument {\tt stat}. -! -! {\bf N.B.}: On the non-root processes, the output {\tt GeneralGrid} -! {\tt ioG} represents allocated memory. When the user no longer needs -! {\tt ioG} it should be deallocated by invoking {\tt GeneralGrid\_clean()}. -! Failure to do so risks a memory leak. -! -! !INTERFACE: - - subroutine bcast_(ioG, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_lsize => lsize - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_init => init - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - use m_AttrVectComms,only : AttrVect_bcast => bcast - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(inout) :: ioG - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 27Apr01 - J.W. Larson - API Specification. -! 02May01 - J.W. Larson - Initial version. -! 13Jun01 - J.W. Larson - Initialize stat -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bcast_' - - integer :: ierr, myID - - ! Initialize status (if present) - - if(present(stat)) stat = 0 - - ! Step 1. Determine process ID number myID - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_RANK(comm...',ierr) - endif - - ! Step 2. Broadcast from the root the List and LOGICAL - ! attributes of the GeneralGrid variable ioG. - - call bcastGeneralGridHeader_(ioG, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: Error calling bcastGeneralGridHeader_(...',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_) - endif - endif - - ! Step 3. Broadcast ioG%data from the root. - - call AttrVect_bcast(ioG%data, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: Error calling AttrVect_scatter(iG%data...',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_) - endif - endif - - ! The GeneralGrid broadcast is now complete. - - end subroutine bcast_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bcastGeneralGridHeader_ - Broadcast the GeneralGrid Header. -! -! !DESCRIPTION: This routine broadcasts the header information from -! the input {\tt GeneralGrid} argument {\tt ioGGrid} (on input valid -! on the {\tt root} only). This broadcast is from the {\tt root} to -! all processes on the communicator associated with the fortran 90 -! {\tt INTEGER} handle {\tt comm}. The success (failure) of this operation -! corresponds to a zero (nonzero) value for the output {\tt INTEGER} flag -! {\tt stat}. -! -! The {\em header information} in a {\tt GeneralGrid} variable comprises -! all the non-{\tt AttrVect} components of the {\tt GeneralGrid}; that -! is, everything except the gridpoint coordinate, geometry, and index -! data stored in {\tt iGGrid\%data}. This information includes: -! \begin{enumerate} -! \item The coordinates in {\tt iGGrid\%coordinate\_list} -! \item The coordinate sort order in {\tt iGGrid\%coordinate\_sort\_order} -! \item The area/volume weights in {\tt iGGrid\%weight\_list} -! \item Other {\tt REAL} geometric information in {\tt iGGrid\%other\_list} -! \item Indexing information in {\tt iGGrid\%index\_list} -! \item The {\tt LOGICAL} descending/ascending order sort flags in -! {\tt iGGrid\%descend(:)}. -! \end{enumerate} -! -! !INTERFACE: - - subroutine bcastGeneralGridHeader_(ioGGrid, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_lsize => lsize - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_init => init - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - use m_List, only : List - use m_List, only : List_allocated => allocated - use m_List, only : List_nullify => nullify - use m_List, only : List_bcast => bcast - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(inout) :: ioGGrid - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 05Jun01 - J.W. Larson - Initial code. -! 13Jun01 - J.W. Larson - Initialize stat -! (if present). -! 05Aug02 - E. Ong - added association checking -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bcastGeneralGridHeader_' - -! Process ID - integer :: myID -! Error flag - integer :: ierr -! Size of array ioGGrid%descend(:) - integer :: DescendSize -! Header-Assocation array - logical :: HeaderAssoc(6) - - ! Initialize stat (if present) - - if(present(stat)) stat = 0 - - ! Determine process ID number myID - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_RANK(comm...',ierr) - endif - - ! Step 0.5. Check elements of the GeneralGrid header to see - ! which components of it are allocated. Load the results - ! into HeaderAssoc(:), and broadcast it. - - if(myID == root) then - - HeaderAssoc(1) = List_allocated(ioGGrid%coordinate_list) - HeaderAssoc(2) = List_allocated(ioGGrid%coordinate_sort_order) - HeaderAssoc(3) = List_allocated(ioGGrid%weight_list) - HeaderAssoc(4) = List_allocated(ioGGrid%other_list) - HeaderAssoc(5) = List_allocated(ioGGrid%index_list) - HeaderAssoc(6) = associated(ioGGrid%descend) - - else - - call List_nullify(ioGGrid%coordinate_list) - call List_nullify(ioGGrid%coordinate_sort_order) - call List_nullify(ioGGrid%weight_list) - call List_nullify(ioGGrid%other_list) - call List_nullify(ioGGrid%index_list) - nullify(ioGGrid%descend) - - endif - - call MPI_BCAST(HeaderAssoc,6,MP_LOGICAL,root,comm,ierr) - - ! Step 1. Broadcast List attributes of the GeneralGrid. - - if(HeaderAssoc(1)) then - call List_bcast(ioGGrid%coordinate_list, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,'List_bcast(ioGGrid%coordinate_list... failed.',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_) - endif - endif - endif - - if(HeaderAssoc(2)) then - call List_bcast(ioGGrid%coordinate_sort_order, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,'List_bcast(ioGGrid%coordinate_sort_order... failed', & - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_) - endif - endif - endif - - if(HeaderAssoc(3)) then - call List_bcast(ioGGrid%weight_list, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,'List_bcast(ioGGrid%weight_list... failed',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_) - endif - endif - endif - - if(HeaderAssoc(4)) then - call List_bcast(ioGGrid%other_list, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,'List_bcast(ioGGrid%other_list... failed',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_) - endif - endif - endif - - if(HeaderAssoc(5)) then - call List_bcast(ioGGrid%index_list, root, comm, ierr) - if(ierr /= 0) then - write(stderr,*) myname_,'List_bcast(ioGGrid%index_list... failed',& - ' ierr = ',ierr - if(present(stat)) then - stat = ierr - return - else - call die(myname_) - endif - endif - endif - - ! If ioGGrid%descend is associated on the root, prepare and - ! execute its broadcast - - if(HeaderAssoc(6)) then - - ! On the root, get the size of ioGGrid%descend(:) - - if(myID == root) then - DescendSize = size(ioGGrid%descend) - if(DescendSize<=0) call die(myname_,'size(ioGGrid%descend)<=0') - endif - - ! Broadcast the size of ioGGrid%descend(:) from the root. - - call MPI_BCAST(DescendSize, 1, MP_INTEGER, root, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_BCAST(DescendSize...',ierr) - endif - - ! Off the root, allocate ioGGrid%descend(:) - - if(myID /= root) then - allocate(ioGGrid%descend(DescendSize), stat=ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: ERROR in allocate(ioGGrid%descend...',& - ' ierr = ',ierr - call die(myname_) - endif - endif - - ! Finally, broadcast ioGGrid%descend(:) from the root - - call MPI_BCAST(ioGGrid%descend, DescendSize, MP_LOGICAL, root, & - comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_BCAST(ioGGrid%descend...',ierr) - endif - - endif - - ! The broadcast of the GeneralGrid Header from the & - ! root is complete. - - - end subroutine bcastGeneralGridHeader_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: copyGeneralGridHeader_ - Copy the GeneralGrid Header. -! -! !DESCRIPTION: This routine copies the header information from the -! input {\tt GeneralGrid} argument {\tt iGGrid} to the output -! {\tt GeneralGrid} argument {\tt oGGrid}. The {\em header information} -! in a {\tt GeneralGrid} variable comprises all the non-{\tt AttrVect} -! components of the {\tt GeneralGrid}; that is, everything except the -! gridpoint coordinate, geometry, and index data stored in -! {\tt iGGrid\%data}. This information includes: -! \begin{enumerate} -! \item The coordinates in {\tt iGGrid\%coordinate\_list} -! \item The coordinate sort order in {\tt iGGrid\%coordinate\_sort\_order} -! \item The area/volume weights in {\tt iGGrid\%weight\_list} -! \item Other {\tt REAL} geometric information in {\tt iGGrid\%other\_list} -! \item Indexing information in {\tt iGGrid\%index\_list} -! \item The {\tt LOGICAL} descending/ascending order sort flags in -! {\tt iGGrid\%descend(:)}. -! \end{enumerate} -! -! !INTERFACE: - - subroutine copyGeneralGridHeader_(iGGrid, oGGrid) -! -! !USES: -! - use m_stdio - use m_die - - use m_List, only : List - use m_List, only : List_copy => copy - use m_List, only : List_allocated => allocated - use m_List, only : List_nullify => nullify - - use m_GeneralGrid, only : GeneralGrid - - implicit none - -! !INPUT PARAMETERS: -! - type(GeneralGrid), intent(in) :: iGGrid - -! !OUTPUT PARAMETERS: -! - type(GeneralGrid), intent(out) :: oGGrid - -! !REVISION HISTORY: -! 05Jun01 - J.W. Larson - Initial code. -! 08Aug01 - E.T. Ong - changed list assignments(=) -! to list copy. -! 05Aug02 - E. Ong - added association checking -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::copyGeneralGridHeader_' - - logical :: DescendAssoc - integer :: DescendSize, i, ierr - - ! Step 1. Copy GeneralGrid List attributes from iGGrid - ! to oGGrid. - - call List_nullify(oGGrid%coordinate_list) - call List_nullify(oGGrid%coordinate_sort_order) - call List_nullify(oGGrid%weight_list) - call List_nullify(oGGrid%other_list) - call List_nullify(oGGrid%index_list) - nullify(oGGrid%descend) - - if(List_allocated(iGGrid%coordinate_list)) then - call List_copy(oGGrid%coordinate_list,iGGrid%coordinate_list) - endif - - if(List_allocated(iGGrid%coordinate_sort_order)) then - call List_copy(oGGrid%coordinate_sort_order,iGGrid%coordinate_sort_order) - endif - - if(List_allocated(iGGrid%weight_list)) then - call List_copy(oGGrid%weight_list,iGGrid%weight_list) - endif - - if(List_allocated(iGGrid%other_list)) then - call List_copy(oGGrid%other_list,iGGrid%other_list) - endif - - if(List_allocated(iGGrid%index_list)) then - call List_copy(oGGrid%index_list,iGGrid%index_list) - endif - - DescendAssoc = associated(iGGrid%descend) - if(DescendAssoc) then - - DescendSize = size(iGGrid%descend) - allocate(oGGrid%descend(DescendSize), stat=ierr) - if(ierr /= 0) then - write(stderr,*) myname_,':: ERROR--allocate(iGGrid%descend(... failed.',& - ' ierr = ', ierr, 'DescendSize = ', DescendSize - call die(myname_) - endif - do i=1,DescendSize - oGGrid%descend(i) = iGGrid%descend(i) - end do - - endif - - ! The GeneralGrid header copy is now complete. - - end subroutine copyGeneralGridHeader_ - - end module m_GeneralGridComms - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/externals/mct/mct/m_GlobalMap.F90 b/src/externals/mct/mct/m_GlobalMap.F90 deleted file mode 100644 index b5273e566b7..00000000000 --- a/src/externals/mct/mct/m_GlobalMap.F90 +++ /dev/null @@ -1,672 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_GlobalMap - One-Dimensional Domain Decomposition Descriptor -! -! !DESCRIPTION: -! The {\tt GlobalMap} is a datatype used to store descriptors of a -! one-dimensional domain decomposition for a vector on an MPI communicator. -! It is defined with three assumptions: -! \begin{enumerate} -! \item Each process ID owns only one segment; -! \item No two segments in the decomposition overlap; and -! \item The segments are laid out in identical order to the MPI rank of -! each process participating in the decomposition. -! \end{enumerate} -! per process ID). It is the simpler of the two domain decomposition -! descriptors offerd by MCT (the other being the {\tt GlobalSegMap}). -! It consists of the following components: -! \begin{itemize} -! \item The MCT component identification number (see the module -! {\tt m\_MCTWorld} for more information about MCT's component model -! registry); -! \item The {\em global} number of elements in the distributed vector; -! \item The number of elements {\em stored locally}; -! \item The number of elements {\em stored on each process} on the -! communicator over which the vector is distributed; and -! \item The index of the elemnent {\em immediately before} the starting -! element of each local segment (this choice allows for direct use of -! this information with MPI's scatter and gather operations). We refer -! to this quantity as the {\em displacement} of the segment, a term used -! both here and in the definition of the MCT {\tt Navigator} datatype. -! \end{itemize} -! -! Both the segment displacement and length data are stored in arrays -! whose indices run from zero to $N-1$, where $N$ is the number of MPI -! processes on the communicator on which the {\tt GlobalMap} is defined. -! This is done so this information corresponds directly to the MPI process -! ID's on whihc the segments reside. -! -! This module contains the definition of the {\tt GlobalMap} datatype, -! all-processor and an on-root creation methods (both of which can be -! used to create a {\tt GlobalMap} on the local communicator), a creation -! method to create/propagate a {\tt GlobalMap} native to a remote -! communicator, a destruction method, and a variety of query methods. -! -! !INTERFACE: - - module m_GlobalMap - -! !USES -! No external modules are used in the declaration section of this module. - - implicit none - - private ! except - -! !PUBLIC TYPES: - - public :: GlobalMap ! The class data structure - - Type GlobalMap - integer :: comp_id ! Component ID number - integer :: gsize ! the Global size - integer :: lsize ! my local size - integer,dimension(:),pointer :: counts ! all local sizes - integer,dimension(:),pointer :: displs ! PE ordered locations - End Type GlobalMap - -! !PUBLIC MEMBER FUNCTIONS: - - public :: gsize - public :: lsize - public :: init - public :: init_remote - public :: clean - public :: rank - public :: bounds - public :: comp_id - - interface gsize; module procedure gsize_; end interface - interface lsize; module procedure lsize_; end interface - interface init ; module procedure & - initd_, & ! initialize from all PEs - initr_ ! initialize from the root - end interface - interface init_remote; module procedure init_remote_; end interface - interface clean; module procedure clean_; end interface - interface rank ; module procedure rank_ ; end interface - interface bounds; module procedure bounds_; end interface - interface comp_id ; module procedure comp_id_ ; end interface - -! !SEE ALSO: -! The MCT module m_MCTWorld for more information regarding component -! ID numbers. -! -! !REVISION HISTORY: -! 21Apr98 - Jing Guo - initial prototype/prolog/code -! 9Nov00 - J.W. Larson - added init_remote -! interface. -! 26Jan01 - J.W. Larson - added storage for -! component ID number GlobalMap%comp_id, and associated -! method comp_id_() -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_GlobalMap' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initd_ - Collective Creation on the Local Communicator -! -! !DESCRIPTION: -! This routine creates the {\tt GlobalMap} {\tt GMap} from distributed -! data spread across the MPI communicatior associated with the input -! {\tt INTEGER} handle {\tt comm}. The {\tt INTEGER} input argument -! {\tt comp\_id} is used to define the MCT component ID for {\tt GMap}. -! The input {\tt INTEGER} argument {\tt ln} is the number of elements -! in the local vector segment. -! -! !INTERFACE: - - subroutine initd_(GMap, comp_id, ln, comm) - -! !USES: - - use m_mpif90 - use m_die - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: comp_id ! Component ID - integer, intent(in) :: ln ! the local size - integer, intent(in) :: comm ! f90 MPI communicator - ! handle - -! !OUTPUT PARAMETERS: - - type(GlobalMap), intent(out) :: GMap - -! !SEE ALSO: -! The MCT module m_MCTWorld for more information regarding component -! ID numbers. -! -! !REVISION HISTORY: -! 21Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initd_' - integer :: nPEs,myID,ier,l,i - - call MP_comm_size(comm,nPEs,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_size()',ier) - - call MP_comm_rank(comm,myID,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - allocate(GMap%counts(0:nPEs-1),GMap%displs(0:nPEs-1),stat=ier) - if(ier /= 0) call die(myname_,'allocate()',ier) - -#ifdef MALL_ON - call mall_ci(size(transfer(GMap%counts,(/1/))),myname_) - call mall_ci(size(transfer(GMap%displs,(/1/))),myname_) -#endif - - call MPI_allgather(ln,1,MP_INTEGER,GMap%counts,1,MP_INTEGER,comm,ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_allgather()',ier) - - l=0 - do i=0,nPEs-1 - GMap%displs(i)=l - l=l+GMap%counts(i) - end do - - GMap%lsize=GMap%counts(myID) ! the local size - GMap%gsize=l ! the global size - GMap%comp_id = comp_id ! the component ID number - - end subroutine initd_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initr_ Create a GlobalMap from the Root Process -! -! !DESCRIPTION: -! This routine creates the {\tt GlobalMap} {\tt GMap}, and propagates -! it to all processes on the communicator associated with the MPI -! {\tt INTEGER} handle {\tt comm}. The input {\tt INTEGER} arguments -! {\tt comp\_id} (the MCT component ID number) and {\tt lns(:)} need -! only be valid on the process whose rank is equal to {\tt root} on -! {\tt comm}. The array {\tt lns(:)} should have length equal to the -! number of processes on {\tt comm}, and contains the length of each -! local segment. -! -! !INTERFACE: - - subroutine initr_(GMap, comp_id, lns, root, comm) - -! !USES: - - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: comp_id ! component ID number - integer, dimension(:), intent(in) :: lns ! segment lengths - integer, intent(in) :: root ! root process ID - integer, intent(in) :: comm ! communicator ID - -! !OUTPUT PARAMETERS: - - type(GlobalMap), intent(out) :: GMap - -! !SEE ALSO: -! The MCT module m_MCTWorld for more information regarding component -! ID numbers. -! -! !REVISION HISTORY: -! 29May98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initr_' - integer :: nPEs,myID,ier,l,i - - call MP_comm_size(comm,nPEs,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_size()',ier) - - call MP_comm_rank(comm,myID,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - allocate(GMap%counts(0:nPEs-1),GMap%displs(0:nPEs-1),stat=ier) - if(ier /= 0) call die(myname_,'allocate()',ier) - -#ifdef MALL_ON - call mall_ci(size(transfer(GMap%counts,(/1/))),myname_) - call mall_ci(size(transfer(GMap%displs,(/1/))),myname_) -#endif - - if(myID == root) then - if(size(lns(:)) /= nPEs) then - write(stderr,'(2a,2(a,i4))') myname_, & - ': _root_ argument error', & - ', size(lns) =',size(lns), & - ', nPEs =',nPEs - call die(myname_) - endif - - GMap%counts(:)=lns(:) - endif - - call MPI_bcast(GMap%counts, nPEs, MP_INTEGER, root, comm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_bcast()',ier) - - ! on each process, use GMap%counts(:) to compute GMap%displs(:) - - l=0 - do i=0,nPEs-1 - GMap%displs(i)=l - l=l+GMap%counts(i) - end do - - GMap%lsize=GMap%counts(myID) ! the local size - GMap%gsize=l ! the global size - - ! finally, set and broadcast the component ID number GMap%comp_id - - if(myID == root) GMap%comp_id = comp_id - - call MPI_bcast(GMap%comp_id,1,MP_INTEGER,root,comm,ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_bcast()',ier) - - end subroutine initr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_remote_ Initialize Remote GlobalMap from the Root -! -! !DESCRIPTION: -! This routine creates and propagates across the local communicator a -! {\tt GlobalMap} associated with a remote component. The controlling -! process in this operation has MPI process ID defined by the input -! {\tt INTEGER} argument {\tt my\_root}, and its MPI communinicator -! is defined by the input {\tt INTEGER} argument {\tt my\_comm}. The -! input {\tt INTEGER} argument {\tt remote\_npes} is the number of MPI -! processes on the remote component's communicator (which need be valid -! only on the process {\tt my\_root}). The input the {\tt INTEGER} -! array {\tt remote\_lns(:)}, and the {\tt INTEGER} argument -! {\tt remote\_comp\_id} need only be valid on the process -! whose rank on the communicator {\tt my\_comm} is {\tt my\_root}. The -! argument {\tt remote\_lns(:)} defines the vector segment length on each -! process of the remote component's communicator, and the argument -! {\tt remote\_comp\_id} defines the remote component's ID number in -! the MCT component registry {\tt MCTWorld}. -! -! !INTERFACE: - - subroutine init_remote_(GMap, remote_lns, remote_npes, my_root, & - my_comm, remote_comp_id) -! !USES: - - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer, dimension(:), intent(in) :: remote_lns - integer, intent(in) :: remote_npes - integer, intent(in) :: my_root - integer, intent(in) :: my_comm - integer, intent(in) :: remote_comp_id - -! !OUTPUT PARAMETERS: - - type(GlobalMap), intent(out) :: GMap - -! !SEE ALSO: -! The MCT module m_MCTWorld for more information regarding component -! ID numbers. -! -! !REVISION HISTORY: -! 8Nov00 - J.W. Larson - initial prototype -! 26Jan01 - J.W. Larson - slight change--remote -! communicator is replaced by remote component ID number -! in argument remote_comp_id. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::init_remote_' - integer :: nPEs,myID,ier,l,i - - - ! Which processor am I on communicator my_comm? Store - ! the answer in myID: - - call MP_comm_rank(my_comm, myID, ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - ! allocate counts and displacements component arrays - ! for the sake of compactness, store the value of remote_npes - ! in the more tersely named variable nPEs. - - if(myID == my_root) nPEs = remote_npes - - call MPI_bcast(nPEs, 1, MP_INTEGER, my_root, my_comm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_bcast(nPEs...)',ier) - - allocate(GMap%counts(0:nPEs-1),GMap%displs(0:nPEs-1),stat=ier) - if(ier /= 0) call die(myname_,'allocate()',ier) - -#ifdef MALL_ON - call mall_ci(size(transfer(GMap%counts,(/1/))),myname_) - call mall_ci(size(transfer(GMap%displs,(/1/))),myname_) -#endif - - ! On the Root processor, check the size of remote_lns(:) - ! to see it is equal to nPEs, the number of remote processes, - ! then store it as GMap%counts and broadcast it. - - if(myID == my_root) then - if(size(remote_lns(:)) /= nPEs) then - write(stderr,'(2a,2(a,i4))') myname_, & - ': _root_ argument error', & - ', size(remote_lns) =',size(remote_lns), & - ', nPEs =',nPEs - call die(myname_) - endif - - GMap%counts(:)=remote_lns(:) - endif - - call MPI_bcast(GMap%counts, nPEs, MP_INTEGER, my_root, my_comm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_bcast()',ier) - - ! Now, on each processor of my_comm, compute from - ! GMap%counts(:) the entries of GMap%displs(:) - - l=0 - do i=0,nPEs-1 - GMap%displs(i)=l - l=l+GMap%counts(i) - end do - - GMap%lsize = -1 ! In this case, the local size is invalid!!! - GMap%gsize = l ! the global size - - ! Finally, set GMap's component ID (recall only the value on - ! process my_root is valid). - - if(myID == my_root) GMap%comp_id = remote_comp_id - call MPI_bcast(GMap%comp_id, 1, MP_INTEGER, my_root, my_comm,ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_bcast(GMap%comp_id...)',ier) - - end subroutine init_remote_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Destroy a GlobalMap -! -! !DESCRIPTION: -! This routine deallocates all allocated memory associated with the -! input/output {\tt GlobalMap} argument {\tt GMap}, and sets to zero -! all of its statically defined components. The success (failure) of -! this operation is signified by the zero (non-zero) value of the -! optional output {\tt INTEGER} argument {\tt stat}. -! -! !INTERFACE: - - subroutine clean_(GMap, stat) - -! !USES: - - use m_die - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - type(GlobalMap), intent(inout) :: GMap - -! !OUTPUT PARAMETERS: - - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 21Apr98 - Jing Guo - initial prototype/prolog/code -! 26Jan01 - J. Larson incorporated comp_id. -! 1Mar02 - E.T. Ong removed the die to prevent -! crashes and added stat argument. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - deallocate(GMap%counts,GMap%displs,stat=ier) - - if(present(stat)) then - stat=ier - else - if(ier /= 0) call warn(myname_,'deallocate(GMap%...)',ier) - endif - - if(ier == 0) then - -#ifdef MALL_ON - call mall_co(size(transfer(GMap%counts,(/1/))),myname_) - call mall_co(size(transfer(GMap%displs,(/1/))),myname_) -#endif - - endif - - GMap%lsize = 0 - GMap%gsize = 0 - GMap%comp_id = 0 - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: lsize_ - Return Local Segment Length -! -! !DESCRIPTION: -! This {\tt INTEGER} function returns the length of the local vector -! segment as defined by the input {\tt GlobalMap} argument {\tt GMap}. - -! !INTERFACE: - - integer function lsize_(GMap) - -! !USES: - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: GMap - -! !REVISION HISTORY: -! 21Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::lsize_' - - lsize_=GMap%lsize - - end function lsize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: gsize_ - Return Global Vector Length -! -! !DESCRIPTION: -! This {\tt INTEGER} function returns the global length of a vector -! that is decomposed according to the input {\tt GlobalMap} argument -! {\tt GMap}. -! -! !INTERFACE: - - integer function gsize_(GMap) - -! !USES: - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: GMap - - -! !REVISION HISTORY: -! 21Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::gsize_' - - gsize_=GMap%gsize - - end function gsize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rank_ - Process ID Location of a Given Vector Element -! -! !DESCRIPTION: -! This routine uses the input {\tt GlobalMap} argument {\tt GMap} to -! determine the process ID (on the communicator on which {\tt GMap} was -! defined) of the vector element with global index {\tt i\_g}. This -! process ID is returned in the output {\tt INTEGER} argument {\tt rank}. -! -! !INTERFACE: - - subroutine rank_(GMap, i_g, rank) - -! !USES: - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: GMap - integer, intent(in) :: i_g - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: rank - -! !REVISION HISTORY: -! 5May98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::rank_' - integer :: i,ilc,ile - - rank=-1 ! if nowhere fits - do i=0,size(GMap%displs)-1 - ilc=GMap%displs(i) - ile=ilc+GMap%counts(i) - - ! If i_g in (ilc,ile]. Note that i_g := [1:..] - - if(ilc < i_g .and. i_g <= ile) then - rank=i - return - endif - end do - - end subroutine rank_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bounds_ - First/Last Global Indicies for a Process' Segment -! -! !DESCRIPTION: -! This routine takes as input a process ID (defined by the input -! {\tt INTEGER} argument {\tt pe\_no}), examines the input {\tt GlobalMap} -! argument {\tt GMap}, and returns the global indices for the first and -! last elements of the segment owned by this process in the output -! {\tt INTEGER} arguments {\tt lbnd} and {\tt ubnd}, respectively. -! -! !INTERFACE: - - subroutine bounds_(GMap, pe_no, lbnd, ubnd) - -! !USES: - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: GMap - integer, intent(in) :: pe_no - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: lbnd - integer, intent(out) :: ubnd - -! !REVISION HISTORY: -! 30Jan01 - J. Larson - initial code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bounds_' - - lbnd = GMap%displs(pe_no) + 1 - ubnd = lbnd + GMap%counts(pe_no) - 1 - - end subroutine bounds_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: comp_id_ - Return the Component ID Number -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the MCT component ID number -! stored in the input {\tt GlobalMap} argument {\tt GMap}. -! -! !INTERFACE: - - integer function comp_id_(GMap) - -! !USES: - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: GMap - -! !SEE ALSO: -! The MCT module m_MCTWorld for more information regarding component -! ID numbers. -! -! !REVISION HISTORY: -! 25Jan02 - J. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::comp_id_' - - comp_id_ = GMap%comp_id - - end function comp_id_ - - end module m_GlobalMap diff --git a/src/externals/mct/mct/m_GlobalSegMap.F90 b/src/externals/mct/mct/m_GlobalSegMap.F90 deleted file mode 100644 index a1960885fa8..00000000000 --- a/src/externals/mct/mct/m_GlobalSegMap.F90 +++ /dev/null @@ -1,2667 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: m_GlobalSegMap.F90,v 1.56 2009-03-17 16:51:49 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_GlobalSegMap - a nontrivial 1-D decomposition of an array. -! -! !DESCRIPTION: -! Consider the problem of the 1-dimensional decomposition of an array -! across multiple processes. If each process owns only one contiguous -! segment, then the {\tt GlobalMap} (see {\tt m\_GlobalMap} or details) -! is sufficient to describe the decomposition. If, however, each -! process owns multiple, non-adjacent segments of the array, a more -! sophisticated approach is needed. The {\tt GlobalSegMap} data type -! allows one to describe a one-dimensional decomposition of an array -! with each process owning multiple, non-adjacent segments of the array. -! -! In the current implementation of the {\tt GlobalSegMap}, there is no -! santity check to guarantee that -!$${\tt GlobalSegMap\%gsize} = \sum_{{\tt i}=1}^{\tt ngseg} -! {\tt GlobalSegMap\%length(i)} . $$ -! The reason we have not implemented such a check is to allow the user -! to use the {\tt GlobalSegMap} type to support decompositions of both -! {\em haloed} and {\em masked} data. -! -! !INTERFACE: - - module m_GlobalSegMap - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: GlobalSegMap ! The class data structure - public :: init ! Create - public :: clean ! Destroy - public :: comp_id ! Return component ID number - public :: gsize ! Return global vector size (excl. halos) - public :: GlobalStorage ! Return total number of points in map, - ! including halo points (if present). - public :: ProcessStorage ! Return local storage on a given process. - public :: OrderedPoints ! Return grid points of a given process in - ! MCT-assumed order. - public :: lsize ! Return local--that is, on-process--storage - ! size (incl. halos) - public :: ngseg ! Return global number of segments - public :: nlseg ! Return local number of segments - public :: max_nlseg ! Return max local number of segments - public :: active_pes ! Return number of pes with at least 1 - ! datum, and if requested, a list of them. - public :: peLocs ! Given an input list of point indices, - ! return its (unique) process ID. - public :: haloed ! Is the input GlobalSegMap haloed? - public :: rank ! Rank which process owns a datum - public :: Sort ! compute index permutation to re-order - ! GlobalSegMap%start, GlobalSegMap%length, - ! and GlobalSegMap%pe_loc - public :: Permute ! apply index permutation to re-order - ! GlobalSegMap%start, GlobalSegMap%length, - ! and GlobalSegMap%pe_loc - public :: SortPermute ! compute index permutation and apply it to - ! re-order the GlobalSegMap components - ! GlobalSegMap%start, GlobalSegMap%length, - ! and GlobalSegMap%pe_loc - public :: increasing ! Are the indices for each pe strictly - ! increasing? - public :: copy ! Copy the gsmap - public :: print ! Print the contents of the GSMap - -! !PUBLIC TYPES: - - type GlobalSegMap -#ifdef SEQUENCE - sequence -#endif - integer :: comp_id ! Component ID number - integer :: ngseg ! No. of Global segments - integer :: gsize ! No. of Global elements - integer,dimension(:),pointer :: start ! global seg. start index - integer,dimension(:),pointer :: length ! segment lengths - integer,dimension(:),pointer :: pe_loc ! PE locations - end type GlobalSegMap - - interface init ; module procedure & - initd_, & ! initialize from all PEs - initr_, & ! initialize from the root - initp_, & ! initialize in parallel from replicated arrays - initp1_, & ! initialize in parallel from 1 replicated array - initp0_, & ! null constructor using replicated data - init_index_ ! initialize from local index arrays - end interface - - interface clean ; module procedure clean_ ; end interface - interface comp_id ; module procedure comp_id_ ; end interface - interface gsize ; module procedure gsize_ ; end interface - interface GlobalStorage ; module procedure & - GlobalStorage_ - end interface - interface ProcessStorage ; module procedure & - ProcessStorage_ - end interface - interface OrderedPoints ; module procedure & - OrderedPoints_ - end interface - interface lsize ; module procedure lsize_ ; end interface - interface ngseg ; module procedure ngseg_ ; end interface - interface nlseg ; module procedure nlseg_ ; end interface - interface max_nlseg ; module procedure max_nlseg_ ; end interface - interface active_pes ; module procedure active_pes_ ; end interface - interface peLocs ; module procedure peLocs_ ; end interface - interface haloed ; module procedure haloed_ ; end interface - interface rank ; module procedure & - rank1_ , & ! single rank case - rankm_ ! degenerate (multiple) ranks for halo case - end interface - interface Sort ; module procedure Sort_ ; end interface - interface Permute ; module procedure & - PermuteInPlace_ - end interface - interface SortPermute ; module procedure & - SortPermuteInPlace_ - end interface - interface increasing ; module procedure increasing_ ; end interface - interface copy ; module procedure copy_ ; end interface - interface print ; module procedure & - print_ ,& - printFromRootnp_ - end interface - - -! !REVISION HISTORY: -! 28Sep00 - J.W. Larson - initial prototype -! 26Jan01 - J.W. Larson - replaced the component -! GlobalSegMap%comm with GlobalSegMap%comp_id. -! 06Feb01 - J.W. Larson - removed the -! GlobalSegMap%lsize component. Also, added the -! GlobalStorage query function. -! 24Feb01 - J.W. Larson - Added the replicated -! initialization routines initp_() and initp1(). -! 25Feb01 - J.W. Larson - Added the routine -! ProcessStorage_(). -! 18Apr01 - J.W. Larson - Added the routine -! peLocs(). -! 26Apr01 - R. Jacob - Added the routine -! OrderedPoints_(). -! 03Aug01 - E. Ong - In initd_, call initr_ -! with actual shaped arguments on non-root processes to satisfy -! F90 standard. See comments in initd. -! 18Oct01 - J.W. Larson - Added the routine -! bcast(), and also cleaned up prologues. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_GlobalSegMap' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initd_ - define the map from distributed data -! -! !DESCRIPTION: -! This routine takes the {\em scattered} input {\tt INTEGER} arrays -! {\tt start}, {\tt length}, and {\tt pe\_loc}, gathers these data to -! the {\tt root} process, and from them creates a {\em global} set of -! segment information for the output {\tt GlobalSegMap} argument -! {\tt GSMap}. The input {\tt INTEGER} arguments {\tt comp\_id}, -! {\tt gsize} provide the {\tt GlobalSegMap} component ID number and -! global grid size, respectively. The input argument {\tt my\_comm} is -! the F90 {\tt INTEGER} handle for the MPI communicator. If the input -! arrays are overdimensioned, optional argument {\em numel} can be -! used to specify how many elements should be used. -! -! -! !INTERFACE: - - subroutine initd_(GSMap, start, length, root, my_comm, & - comp_id, pe_loc, gsize, numel) - -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - use m_FcComms, only : fc_gather_int, fc_gatherv_int - - implicit none - -! !INPUT PARAMETERS: - - integer,dimension(:),intent(in) :: start ! segment local start - ! indices - integer,dimension(:),intent(in) :: length ! segment local lengths - integer,intent(in) :: root ! root on my_com - integer,intent(in) :: my_comm ! local communicatior - integer,intent(in) :: comp_id ! component model ID - integer,dimension(:), pointer, optional :: pe_loc ! process location - integer,intent(in), optional :: gsize ! global vector size - ! (optional). It can - ! be computed by this - ! routine if no haloing - ! is assumed. - integer,intent(in), optional :: numel ! specify number of elements - ! to use in start, length - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap),intent(out) :: GSMap ! Output GlobalSegMap - -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -! 14Nov00 - J.W. Larson - final working version -! 09Jan01 - J.W. Larson - repaired: a subtle -! bug concerning the usage of the argument pe_loc (result -! was the new pointer variable my_pe_loc); a mistake in -! the tag arguments to MPI_IRECV; a bug in the declaration -! of the array status used by MPI_WAITALL. -! 26Jan01 - J.W. Larson - replaced optional -! argument gsm_comm with required argument comp_id. -! 23Sep02 - Add optional argument numel to allow start, length -! arrays to be overdimensioned. -! 31Jan09 - P.H. Worley - replaced irecv/send/waitall -! logic with calls to flow controlled gather routines -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initd_' - integer :: nPEs, myID, ier, l, i - integer :: ngseg ! number of global segments - integer :: nlseg ! number of local segments - integer :: nlseg_tmp(1) ! workaround for explicit interface expecting an array - - ! arrays allocated on the root to which data are gathered - integer, dimension(:), allocatable :: root_start, root_length, root_pe_loc - ! arrays allocated on the root to coordinate gathering of - ! data and non-blocking receives by the root - integer, dimension(:), allocatable :: counts, displs - ! data and non-blocking receives by the root - integer, dimension(:), pointer :: my_pe_loc - - ! Determine local process ID: - - call MP_COMM_RANK(my_comm, myID, ier) - - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - - - ! Check consistency of sizes of input arrays: - - if(size(length) /= size(start)) then - ier = -1 - call die(myname_,'length/start array size mismatch',ier) - endif - - if(present(pe_loc)) then - if(size(pe_loc) /= size(start)) then - ier = -1 - call die(myname_,'pe_loc/start array size mismatch',ier) - endif - endif - - ! Store in the variable nlseg the local size - ! array start(:) - - if(present(numel)) then - nlseg=numel - else - nlseg = size(start) - endif - - ! If the argument pe_loc is not present, then we are - ! initializing the GlobalSegMap on the communicator - ! my_comm. We will need pe_loc to be allocated and - ! with local size given by the input value of nlseg, - ! and then initialize it with the local process id myID. - - if(present(pe_loc)) then - my_pe_loc => pe_loc - else - allocate(my_pe_loc(nlseg), stat=ier) - if(ier /= 0) call die(myname_,'allocate(my_pe_loc)',ier) - my_pe_loc = myID - endif - - call MPI_COMM_SIZE(my_comm, npes, ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_COMM_SIZE()',ier) - - ! Allocate an array of displacements (displs) and counts - ! to hold the local values of nlseg on the root - - if(myID == root) then - allocate(counts(0:npes-1), displs(0:npes-1), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate(counts,...',ier) - endif - else - allocate(counts(1), displs(1), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate(counts,...',ier) - endif - endif - - ! Send local number of segments to the root. - - nlseg_tmp(1) = nlseg - call fc_gather_int(nlseg_tmp, 1, MP_INTEGER, counts, 1, MP_INTEGER, & - root, my_comm) - - ! On the root compute the value of ngseg, along with - ! the entries of counts and displs. - - if(myID == root) then - ngseg = 0 - do i=0,npes-1 - ngseg = ngseg + counts(i) - if(i == 0) then - displs(i) = 0 - else - displs(i) = displs(i-1) + counts(i-1) - endif - end do - endif - - ! Now only the root has the correct value of ngseg. - - ! On the root, allocate memory for the arrays root_start, - ! and root_length. If the argument pe_loc is present, - ! allocate root_pe_loc, too. - - ! Non-root processes call initr_ with root_start, root_length, - ! and root_pe_loc, although these arguments are not used in the - ! subroutine. Since these correspond to dummy shaped array arguments - ! in initr_, the Fortran 90 standard dictates that the actual - ! arguments must contain complete shape information. Therefore, - ! these array arguments must be allocated on all processes. - - if(myID == root) then - - allocate(root_start(ngseg), root_length(ngseg), & - root_pe_loc(ngseg), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate(root_start...',ier) - endif - - else - - allocate(root_start(1), root_length(1), & - root_pe_loc(1), stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate((non)root_start...',ier) - endif - - endif - - ! Now, each process sends its values of start(:) to fill in - ! the appropriate portion of root_start(:y) on the root. - - call fc_gatherv_int(start, nlseg, MP_INTEGER, & - root_start, counts, displs, MP_INTEGER, & - root, my_comm) - - ! Next, each process sends its values of length(:) to fill in - ! the appropriate portion of root_length(:) on the root. - - call fc_gatherv_int(length, nlseg, MP_INTEGER, & - root_length, counts, displs, MP_INTEGER, & - root, my_comm) - - ! Finally, if the argument pe_loc is present, each process sends - ! its values of pe_loc(:) to fill in the appropriate portion of - ! root_pe_loc(:) on the root. - - call fc_gatherv_int(my_pe_loc, nlseg, MP_INTEGER, & - root_pe_loc, counts, displs, MP_INTEGER, & - root, my_comm) - - call MPI_BARRIER(my_comm, ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_BARRIER my_pe_loc',ier) - - ! Now, we have everything on the root needed to call initr_(). - - if(present(gsize)) then - call initr_(GSMap, ngseg, root_start, root_length, & - root_pe_loc, root, my_comm, comp_id, gsize) - else - call initr_(GSMap, ngseg, root_start, root_length, & - root_pe_loc, root, my_comm, comp_id) - endif - - - ! Clean up the array pe_loc(:) if it was allocated - - if(present(pe_loc)) then - nullify(my_pe_loc) - else - deallocate(my_pe_loc, stat=ier) - if(ier /= 0) call die(myname_, 'deallocate(my_pe_loc)', ier) - endif - - ! Clean up the arrays root_start(:), et cetera... - - deallocate(root_start, root_length, root_pe_loc, stat=ier) - if(ier /= 0) then - call die(myname_, 'deallocate(root_start,...)', ier) - endif - - ! Clean up the arrays counts(:) and displs(:) - - deallocate(counts, displs, stat=ier) - if(ier /= 0) then - call die(myname_, 'deallocate(counts,...)', ier) - endif - - end subroutine initd_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initr_ initialize the map from the root -! -! !DESCRIPTION: -! This routine takes the input {\tt INTEGER} arrays {\tt start}, -! {\tt length}, and {\tt pe\_loc} (all valid only on the {\tt root} -! process), and from them creates a {\em global} set of segment -! information for the output {\tt GlobalSegMap} argument -! {\tt GSMap}. The input {\tt INTEGER} arguments {\tt ngseg}, -! {\tt comp\_id}, {\tt gsize} (again, valid only on the {\tt root} -! process) provide the {\tt GlobalSegMap} global segment count, component -! ID number, and global grid size, respectively. The input argument -! {\tt my\_comm} is the F90 {\tt INTEGER} handle for the MPI communicator. -! -! !INTERFACE: - - subroutine initr_(GSMap, ngseg, start, length, pe_loc, root, & - my_comm, comp_id, gsize) -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: ngseg ! no. of global segments - integer,dimension(:),intent(in) :: start ! segment local start index - integer,dimension(:),intent(in) :: length ! the distributed sizes - integer,dimension(:),intent(in) :: pe_loc ! process location - integer,intent(in) :: root ! root on my_com - integer,intent(in) :: my_comm ! local communicatior - integer,intent(in) :: comp_id ! component id number - integer,intent(in), optional :: gsize ! global vector size - ! (optional). It can - ! be computed by this - ! routine if no haloing - ! is assumed. - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap),intent(out) :: GSMap ! Output GlobalSegMap - -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -! 09Nov00 - J.W. Larson - final working version -! 10Jan01 - J.W. Larson - minor bug fix -! 12Jan01 - J.W. Larson - minor bug fix regarding -! disparities in ngseg on -! the root and other -! processes -! 26Jan01 - J.W. Larson - replaced optional -! argument gsm_comm with required argument comp_id. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initr_' - integer :: myID,ier,l,i - - ! Determine the local process ID myID: - - call MPI_COMM_RANK(my_comm, myID, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_COMM_RANK()',ier) - - ! Argument checking: check to make sure the arrays - ! start, length, and pe_loc each have ngseg elements. - ! If not, stop with an error. This is done on the - ! root process since it owns the initialization data. - - if(myID == root) then - if( size(start(:)) /= ngseg ) then - write(stderr,'(2a,2(a,i4))') myname_, & - ': _root_ argument error', & - ', size(start) =',size(start), & - ', ngseg =',ngseg - call die(myname_) - endif - if( size(length(:)) /= ngseg ) then - write(stderr,'(2a,2(a,i4))') myname_, & - ': _root_ argument error', & - ', size(length) =',size(length), & - ', ngseg =',ngseg - call die(myname_) - endif - if( size(pe_loc(:)) /= ngseg ) then - write(stderr,'(2a,2(a,i4))') myname_, & - ': _root_ argument error', & - ', size(pe_loc) =',size(pe_loc), & - ', ngseg =',ngseg - call die(myname_) - endif - endif - - ! Initialize GSMap%ngseg and GSMap%comp_id on the root: - - if(myID == root) then - GSMap%ngseg = ngseg - GSMap%comp_id = comp_id - endif - - ! Broadcast the value of GSMap%ngseg - - call MPI_BCAST(GSMap%ngseg, 1, MP_INTEGER, root, my_comm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_BCAST(GSmap%ngseg)',ier) - - ! Broadcast the value of GSMap%comp_id - - call MPI_BCAST(GSMap%comp_id, 1, MP_INTEGER, root, my_comm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_BCAST(GSmap%comp_id)',ier) - - ! Allocate the components GSMap%start(:), GSMap%length(:), - ! and GSMap%pe_loc(:) - - allocate(GSMap%start(GSMap%ngseg), GSMap%length(GSMap%ngseg), & - GSMap%pe_loc(GSMap%ngseg), stat = ier) - if(ier/=0) call die(myname_,'allocate(GSmap%start(:),...',ier) - -#ifdef MALL_ON - call mall_ci(size(transfer(GSMap%start,(/1/))),myname_) - call mall_ci(size(transfer(GSMap%length,(/1/))),myname_) - call mall_ci(size(transfer(GSMap%pe_loc,(/1/))),myname_) -#endif - - ! On the root process, initialize GSMap%start(:), GSMap%length(:), - ! and GSMap%pe_loc(:) with the data contained in start(:), - ! length(:) and pe_loc(:), respectively - - if(myID == root) then - GSMap%start(1:GSMap%ngseg) = start(1:GSMap%ngseg) - GSMap%length(1:GSMap%ngseg) = length(1:GSMap%ngseg) - GSMap%pe_loc(1:GSMap%ngseg) = pe_loc(1:GSMap%ngseg) - endif - - ! Broadcast the root values of GSMap%start(:), GSMap%length(:), - ! and GSMap%pe_loc(:) - - call MPI_BCAST(GSMap%start, GSMap%ngseg, MP_INTEGER, root, my_comm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_BCAST(GSMap%start)',ier) - - call MPI_BCAST(GSMap%length, GSMap%ngseg, MP_INTEGER, root, my_comm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_BCAST(GSMap%length)',ier) - - call MPI_BCAST(GSMap%pe_loc, GSMap%ngseg, MP_INTEGER, root, my_comm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_BCAST(GSMap%pe_loc)',ier) - - ! If the argument gsize is present, use the root value to - ! set GSMap%gsize and broadcast it. If it is not present, - ! this will be computed by summing the entries of GSM%length(:). - ! Again, note that if one is storing halo points, the sum will - ! produce a result larger than the actual global vector. If - ! halo points are to be used in the mapping we advise strongly - ! that the user specify the value gsize as an argument. - - if(present(gsize)) then - if(myID == root) then - GSMap%gsize = gsize - endif - call MPI_BCAST(GSMap%gsize, 1, MP_INTEGER, root, my_comm, ier) - if(ier/=0) call MP_perr_die(myname_, 'MPI_BCAST(GSMap%gsize)', ier) - else - GSMap%gsize = 0 - do i=1,GSMap%ngseg - GSMap%gsize = GSMap%gsize + GSMap%length(i) - end do - endif - - end subroutine initr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initp_ - define the map from replicated data. -! -! !DESCRIPTION: -! -! The routine {\tt initp\_()} takes the input {\em replicated} arguments -! {\tt comp\_id}, {\tt ngseg}, {\tt gsize}, {\tt start(:)}, -! {\tt length(:)}, and {\tt pe\_loc(:)}, and uses them to initialize an -! output {\tt GlobalSegMap} {\tt GSMap}. This routine operates on the -! assumption that these data are replicated across the communicator on -! which the {\tt GlobalSegMap} is being created. -! -! !INTERFACE: - - subroutine initp_(GSMap, comp_id, ngseg, gsize, start, length, pe_loc) - -! -! !USES: -! - use m_mpif90 - use m_die, only : die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer,intent(in) :: comp_id ! component model ID - integer,intent(in) :: ngseg ! global number of segments - integer,intent(in) :: gsize ! global vector size - integer,dimension(:),intent(in) :: start ! segment local start index - integer,dimension(:),intent(in) :: length ! the distributed sizes - integer,dimension(:),intent(in) :: pe_loc ! process location - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap),intent(out) :: GSMap ! Output GlobalSegMap - -! !REVISION HISTORY: -! 24Feb01 - J.W. Larson - Initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initp_' - integer :: ierr, n - - ! Argument Checks -- Is comp_id positive? - - if(comp_id <= 0) then - call die(myname_,'non-positive value of comp_id',comp_id) - endif - - ! Is gsize positive? - - if(gsize <= 0) then - call die(myname_,'non-positive value of gsize',gsize) - endif - - - ! Is ngseg positive? - - if(ngseg <= 0) then - call die(myname_,'non-positive value of ngseg',ngseg) - endif - - ! Are the arrays start(:), length(:), and pe_loc(:) the - !correct size? - - if(size(start) /= ngseg) then - call die(myname_,'start(:)/ngseg size mismatch',ngseg) - endif - if (size(length) /= ngseg) then - call die(myname_,'length(:)/ngseg size mismatch',ngseg) - endif - if (size(pe_loc) /= ngseg) then - call die(myname_,'pe_loc(:)/ngseg size mismatch',ngseg) - endif - - ! Allocate index and location arrays for GSMap: - - allocate(GSMap%start(ngseg), GSMap%length(ngseg), GSMap%pe_loc(ngseg), & - stat = ierr) - if (ierr /= 0) then - call die(myname_,'allocate(GSMap%start...',ngseg) - endif - - ! Assign the components of GSMap: - - GSMap%comp_id = comp_id - GSMap%ngseg = ngseg - GSMap%gsize = gsize - - do n=1,ngseg - GSMap%start(n) = start(n) - GSMap%length(n) = length(n) - GSMap%pe_loc(n) = pe_loc(n) - end do - - end subroutine initp_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initp1_ - define the map from replicated data using 1 array. -! -! !DESCRIPTION: -! -! The routine {\tt initp1\_()} takes the input {\em replicated} arguments -! {\tt comp\_id}, {\tt ngseg}, {\tt gsize}, and {\tt all\_arrays(:)}, -! and uses them to initialize an output {\tt GlobalSegMap} {\tt GSMap}. -! This routine operates on the assumption that these data are replicated -! across the communicator on which the {\tt GlobalSegMap} is being created. -! The input array {\tt all\_arrays(:)} should be of length {\tt 2 * ngseg}, -! and is packed so that -! $$ {\tt all\_arrays(1:ngseg)} = {\tt GSMap\%start(1:ngseg)} $$ -! $$ {\tt all\_arrays(ngseg+1:2*ngseg)} = {\tt GSMap\%length(1:ngseg)} $$ -! $$ {\tt all\_arrays(2*ngseg+1:3*ngseg)} = {\tt GSMap\%pe\_loc(1:ngseg)} .$$ -! -! !INTERFACE: - - subroutine initp1_(GSMap, comp_id, ngseg, gsize, all_arrays) - -! -! !USES: -! - use m_mpif90 - use m_die, only : die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer,intent(in) :: comp_id ! component model ID - integer,intent(in) :: ngseg ! global no. of segments - integer,intent(in) :: gsize ! global vector size - integer,dimension(:),intent(in) :: all_arrays ! packed array of length - ! 3*ngseg containing (in - ! this order): start(:), - ! length(:), and pe_loc(:) - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap),intent(out) :: GSMap ! Output GlobalSegMap - -! !REVISION HISTORY: -! 24Feb01 - J.W. Larson - Initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initp1_' - integer :: ierr, n - - ! Argument Checks -- Is comp_id positive? - - if(comp_id <= 0) then - call die(myname_,'non-positive value of comp_id',comp_id) - endif - - ! Is gsize positive? - - if(gsize <= 0) then - call die(myname_,'non-positive value of gsize',gsize) - endif - - - ! Is ngseg positive? - - if(ngseg <= 0) then - call die(myname_,'non-positive value of ngseg',ngseg) - endif - - ! Is the array all_arrays(:) the right length? - - if(size(all_arrays) /= 3*ngseg) then - call die(myname_,'all_arrays(:)/3*ngseg size mismatch',ngseg) - endif - - ! Allocate index and location arrays for GSMap: - - allocate(GSMap%start(ngseg), GSMap%length(ngseg), GSMap%pe_loc(ngseg), & - stat = ierr) - if (ierr /= 0) then - call die(myname_,'allocate(GSMap%start...',ngseg) - endif - - ! Assign the components of GSMap: - - GSMap%comp_id = comp_id - GSMap%ngseg = ngseg - GSMap%gsize = gsize - - do n=1,ngseg - GSMap%start(n) = all_arrays(n) - GSMap%length(n) = all_arrays(ngseg + n) - GSMap%pe_loc(n) = all_arrays(2*ngseg + n) - end do - - end subroutine initp1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initp0_ - Null Constructor Using Replicated Data -! -! !DESCRIPTION: -! -! The routine {\tt initp0\_()} takes the input {\em replicated} arguments -! {\tt comp\_id}, {\tt ngseg}, {\tt gsize}, and uses them perform null -! construction of the output {\tt GlobalSegMap} {\tt GSMap}. This is a -! null constructor in the sense that we are not filling in the segment -! information arrays. This routine operates on the assumption that these -! data are replicated across the communicator on which the -! {\tt GlobalSegMap} is being created. -! -! !INTERFACE: - - subroutine initp0_(GSMap, comp_id, ngseg, gsize) - -! -! !USES: -! - use m_die, only : die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer,intent(in) :: comp_id ! component model ID - integer,intent(in) :: ngseg ! global number of segments - integer,intent(in) :: gsize ! global vector size - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap),intent(out) :: GSMap ! Output GlobalSegMap - -! !REVISION HISTORY: -! 13Aug03 - J.W. Larson - Initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initp0_' - - integer :: ierr - - nullify(GSMap%start) - nullify(GSMap%length) - nullify(GSMap%pe_loc) - - GSMap%comp_id = comp_id - GSMap%ngseg = ngseg - GSMap%gsize = gsize - - allocate(GSMap%start(ngseg), GSMap%length(ngseg), GSMap%pe_loc(ngseg), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_, & - ':: FATAL--allocate of segment information storage space failed.', & - ' ierr = ',ierr - call die(myname_) - endif - - end subroutine initp0_ - - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_index_ - initialize GSM from local index arrays -! -! !DESCRIPTION: -! -! The routine {\tt init\_index\_()} takes a local array of indices -! {\tt lindx} and uses them to create a {\tt GlobalSegMap}. -! {\tt lindx} is parsed to determine the lengths of the runs, and -! then a call is made to {\tt initd\_}. The optional argument -! {\tt lsize} can be used if only the first {\tt lsize} number -! of elements of {\tt lindx} are valid. The optional argument -! {\tt gsize} is used to specify the global number of unique points -! if this can not be determined from the collective {\tt lindx}. -! -! -! !INTERFACE: - - subroutine init_index_(GSMap, lindx, my_comm, comp_id, lsize, gsize) - -! -! !USES: -! - -! use m_GlobalSegMap,only: GlobalSegMap -! use m_GlobalSegMap,only: MCT_GSMap_init => init - -! use shr_sys_mod - - use m_die - implicit none - -! !INPUT PARAMETERS: - - integer , dimension(:),intent(in) :: lindx ! index buffer - integer , intent(in) :: my_comm ! mpi communicator group (mine) - integer , intent(in) :: comp_id ! component id (mine) - - integer , intent(in),optional :: lsize ! size of index buffer - integer , intent(in),optional :: gsize ! global vector size - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap),intent(out) :: GSMap ! Output GlobalSegMap - - -! !REVISION HISTORY: -! 30Jul02 - T. Craig - initial version in cpl6. -! 17Nov05 - R. Loy - install into MCT -! 18Nov05 - R. Loy - make lsize optional -! 25Jul06 - R. Loy - error check on lindex/alloc/dealloc -!EOP ___________________________________________________________________ - - - !--- local --- - - character(len=*),parameter :: myname_=myname//'::init_index_' - - integer :: i,j,k,n ! generic indicies - integer :: nseg ! counts number of segments for GSMap - integer,allocatable :: start(:) ! used to init GSMap - integer,allocatable :: count(:) ! used to init GSMap - integer,parameter :: pid0=0 ! mpi process id for root pe - integer,parameter :: debug=0 ! - - integer rank,ierr - integer mysize - - - if (present(lsize)) then - mysize=lsize - else - mysize=size(lindx) - endif - - if (mysize<0) call die(myname_, & - 'lindx size is negative (you may have run out of points)') - -!! -!! Special case if this processor doesn't have any data indices -!! - if (mysize==0) then - allocate(start(0),count(0),stat=ierr) - if(ierr/=0) call die(myname_,'allocate(start,count)',ierr) - - nseg=0 - else - - call MPI_COMM_RANK(my_comm,rank, ierr) - - ! compute segment's start indicies and length counts - - ! first pass - count how many runs of consecutive numbers - - nseg=1 - do n = 2,mysize - i = lindx(n-1) - j = lindx(n) - if ( j-i /= 1) nseg=nseg+1 - end do - - allocate(start(nseg),count(nseg),stat=ierr) - if(ierr/=0) call die(myname_,'allocate(start,count)',ierr) - - ! second pass - determine how long each run is - - nseg = 1 - start(nseg) = lindx(1) - count(nseg) = 1 - do n = 2,mysize - i = lindx(n-1) - j = lindx(n) - if ( j-i /= 1) then - nseg = nseg+1 - start(nseg) = lindx(n) - count(nseg) = 1 - else - count(nseg) = count(nseg)+1 - end if - end do - - endif ! if mysize==0 - - - if (debug.ne.0) then - write(6,*) rank,'init_index: SIZE ',nseg - - do n=1,nseg - write(6,*) rank,'init_index: START,COUNT ',start(n),count(n) - end do - endif - - - if (present(gsize)) then - call initd_( GSMap, start, count, pid0, my_comm, & - comp_id, gsize=gsize) - else - call initd_( GSMap, start, count, pid0, my_comm, & - comp_id) - endif - - - deallocate(start, count, stat=ierr) - if(ierr/=0) call warn(myname_,'deallocate(start,count)',ierr) - - - end subroutine init_index_ - - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - clean the map -! -! !DESCRIPTION: -! This routine deallocates the array components of the {\tt GlobalSegMap} -! argument {\tt GSMap}: {\tt GSMap\%start}, {\tt GSMap\%length}, and -! {\tt GSMap\%pe\_loc}. It also zeroes out the values of the integer -! components {\tt GSMap\%ngseg}, {\tt GSMap\%comp\_id}, and -! {\tt GSMap\%gsize}. -! -! !INTERFACE: - - subroutine clean_(GSMap,stat) -! -! !USES: -! - use m_die - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - type(GlobalSegMap), intent(inout) :: GSMap - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -! 01Mar02 - E.T. Ong - added stat argument. -! Removed dies to prevent crashing. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - -#ifdef MALL_ON - - if( (associated(GSMap%start) .and. associated(GSMap%length)) & - .and. associated(GSMap%pe_loc) ) - call mall_co(size(transfer(GSMap%start,(/1/))),myname_) - call mall_co(size(transfer(GSMap%length,(/1/))),myname_) - call mall_co(size(transfer(GSMap%pe_loc,(/1/))),myname_) - endif - -#endif - - deallocate(GSMap%start, GSMap%length, GSMap%pe_loc, stat=ier) - - if(present(stat)) then - stat=ier - else - if(ier /= 0) call warn(myname_,'deallocate(GSMap%start,...)',ier) - endif - - GSMap%ngseg = 0 - GSMap%comp_id = 0 - GSMap%gsize = 0 - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ngseg_ - Return the global number of segments from the map -! -! !DESCRIPTION: -! The function {\tt ngseg\_()} returns the global number of vector -! segments in the {\tt GlobalSegMap} argument {\tt GSMap}. This is -! merely the value of {\tt GSMap\%ngseg}. -! -! !INTERFACE: - - integer function ngseg_(GSMap) - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: GSMap - -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ngseg_' - - ngseg_=GSMap%ngseg - - end function ngseg_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nlseg_ - Return the local number of segments from the map -! -! !DESCRIPTION: -! The function {\tt nlseg\_()} returns the number of vector segments -! in the {\tt GlobalSegMap} argument {\tt GSMap} that reside on the -! process specified by the input argument {\tt pID}. This is the -! number of entries {\tt GSMap\%pe\_loc} whose value equals {\tt pID}. -! -! !INTERFACE: - - integer function nlseg_(GSMap, pID) - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: GSMap - integer, intent(in) :: pID - -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -! 14Jun01 - J.W. Larson - Bug fix in lower -! limit of loop over elements of GSMap%pe_loc(:). The -! original code had this lower limit set to 0, which -! was out-of-bounds (but uncaught). The correct lower -! index is 1. This bug was discovered by Everest Ong. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nlseg_' - integer :: i, nlocseg - - ! Initialize the number of segments residing on pID, nlocseg - - nlocseg = 0 - - ! Compute the number of segments residing on pID, nlocseg - - do i=1,GSMap%ngseg - if(GSMap%pe_loc(i) == pID) then - nlocseg = nlocseg + 1 - endif - end do - - ! Return the total - - nlseg_ = nlocseg - - end function nlseg_ - - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: max_nlseg_ - Return the max number of segments over all procs -! -! !DESCRIPTION: -! The function {\tt max\_nlseg\_()} returns the maximum number -! over all processors of the vector -! segments in the {\tt GlobalSegMap} argument {\tt gsap} -! E.g. max\_p(nlseg(gsmap,p)) but computed more efficiently -! -! !INTERFACE: - - integer function max_nlseg_(gsmap) - -! !USES: - - use m_MCTWorld, only :ThisMCTWorld - use m_mpif90 - use m_die - - use m_stdio ! rml - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: gsmap - - -! !REVISION HISTORY: -! 17Jan07 - R. Loy - initial prototype -!EOP ___________________________________________________________________ - - - -! Local variables - - character(len=*),parameter :: myname_=myname//'::max_local_segs' - - integer i - integer this_comp_id - integer nprocs - - integer, allocatable:: segcount(:) ! segments on proc i - integer ier - - integer this_ngseg - integer segment_pe - integer max_segcount - - -! Start of routine - - this_comp_id = comp_id(gsmap) - nprocs=ThisMCTWorld%nprocspid(this_comp_id) - - allocate( segcount(nprocs), stat=ier ) - if (ier/=0) call die(myname_,'allocate segcount') - - segcount=0 - - this_ngseg=ngseg(gsmap) - - do i=1,this_ngseg - - segment_pe = gsmap%pe_loc(i) + 1 ! want value 1..nprocs - - if (segment_pe < 1 .OR. segment_pe > nprocs) then - call die(myname_,'bad segment location',segment_pe) - endif - - segcount(segment_pe) = segcount(segment_pe) + 1 - enddo - - max_segcount=0 - do i=1,nprocs - max_segcount= max( max_segcount, segcount(i) ) - enddo - - deallocate(segcount, stat=ier) - if (ier/=0) call die(myname_,'deallocate segcount') - - - max_nlseg_=max_segcount - - end function max_nlseg_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: comp_id_ - Return the commponent ID from the GlobalSegMap. -! -! !DESCRIPTION: -! The function {\tt comp\_id\_()} returns component ID number stored in -! {\tt GSMap\%comp\_id}. -! -! !INTERFACE: - - integer function comp_id_(GSMap) - -! !USES: - - use m_die,only: die - use m_stdio, only :stderr - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: GSMap - -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -! 26Jan01 - J.W. Larson - renamed comp_id_ -! to fit within MCT_World component ID context. -! 01May01 - R.L. Jacob - make sure GSMap -! is defined. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::comp_id_' - - if(.not.associated(GSMap%start) ) then - write(stderr,'(2a)') myname_, & - ' MCTERROR: GSMap argument not initialized...exiting' - call die(myname_) - endif - - comp_id_ = GSMap%comp_id - - end function comp_id_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: gsize_ - Return the global vector size from the GlobalSegMap. -! -! !DESCRIPTION: -! The function {\tt gsize\_()} takes the input {\tt GlobalSegMap} -! arguement {\tt GSMap} and returns the global vector length stored -! in {\tt GlobalSegMap\%gsize}. -! -! !INTERFACE: - - integer function gsize_(GSMap) - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: GSMap - -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::gsize_' - - gsize_=GSMap%gsize - - end function gsize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalStorage_ - Return global storage space required. -! -! !DESCRIPTION: -! The function {\tt GlobalStorage\_()} takes the input {\tt GlobalSegMap} -! arguement {\tt GSMap} and returns the global storage space required -! ({\em i.e.}, the vector length) to hold all the data specified by -! {\tt GSMap}. -! -! {\bf N.B.: } If {\tt GSMap} contains halo or masked points, the value -! by {\tt GlobalStorage\_()} may differ from {\tt GSMap\%gsize}. -! -! !INTERFACE: - - integer function GlobalStorage_(GSMap) - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: GSMap - -! !REVISION HISTORY: -! 06Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalStorage_' - - integer :: global_storage, ngseg, n - - ! Return global number of segments: - - ngseg = ngseg_(GSMap) - - ! Initialize global_storage (the total number of points in the - ! GlobalSegMap: - - global_storage = 0 - - ! Add up the number of points present in the GlobalSegMap: - - do n=1,ngseg - global_storage = global_storage + GSMap%length(n) - end do - - GlobalStorage_ = global_storage - - end function GlobalStorage_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ProcessStorage_ - Number of points on a given process. -! -! !DESCRIPTION: -! The function {\tt ProcessStorage\_()} takes the input {\tt GlobalSegMap} -! arguement {\tt GSMap} and returns the storage space required by process -! {\tt PEno} ({\em i.e.}, the vector length) to hold all the data specified -! by {\tt GSMap}. -! -! !INTERFACE: - - integer function ProcessStorage_(GSMap, PEno) - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: GSMap - integer, intent(in) :: PEno - -! !REVISION HISTORY: -! 06Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ProcessStorage_' - - integer :: pe_storage, ngseg, n - - ! Return global number of segments: - - ngseg = ngseg_(GSMap) - - ! Initialize pe_storage (the total number of points on process - ! PEno in the GlobalSegMap): - - pe_storage = 0 - - ! Add up the number of points on process PEno in the GlobalSegMap: - - do n=1,ngseg - if(GSMap%pe_loc(n) == PEno) then - pe_storage = pe_storage + GSMap%length(n) - endif - end do - - ProcessStorage_ = pe_storage - - end function ProcessStorage_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: OrderedPoints_ - The grid points on a given process -! returned in the assumed MCT order. -! -! !DESCRIPTION: -! The function {\tt OrderedPoints\_()} takes the input {\tt GlobalSegMap} -! arguement {\tt GSMap} and returns a vector of the points owned by -! {\tt PEno}. {\tt Points} is allocated here. The calling process -! is responsible for deallocating the space. -! -! !INTERFACE: - - subroutine OrderedPoints_(GSMap, PEno, Points) - -! -! !USES: -! - use m_die,only: die - - implicit none - - ! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! input GlobalSegMap - integer, intent(in) :: PEno ! input process number - integer,dimension(:),pointer :: Points ! the vector of points - -! !REVISION HISTORY: -! 25Apr01 - R. Jacob - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::OrderedPoints_' - integer :: nlsegs,mysize,ier,i,j,k - integer,dimension(:),allocatable :: mystarts,mylengths - - nlsegs = nlseg(GSMap,PEno) - mysize=ProcessStorage(GSMap,PEno) - - allocate(mystarts(nlsegs),mylengths(nlsegs), & - Points(mysize),stat=ier) - if(ier/=0) call die(myname_,'allocate(mystarts,..)',ier) - -! pull out the starts and lengths that PEno owns in the order -! they appear in the GSMap. - j=1 - do i=1,GSMap%ngseg - if(GSMap%pe_loc(i)==PEno) then - mystarts(j)=GSMap%start(i) - mylengths(j)=GSMap%length(i) - j=j+1 - endif - enddo - -! now recalculate the values of the grid point numbers -! based on the starts and lengths -! form one long vector which is all local GSMap points - i=1 - do j=1,nlsegs - do k=1,mylengths(j) - Points(i)=mystarts(j)+k-1 - i=i+1 - enddo - enddo - - deallocate(mystarts,mylengths, stat=ier) - if(ier/=0) call die(myname_,'deallocate(mystarts,..)',ier) - - end subroutine OrderedPoints_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: lsize_ - find the local storage size from the map -! -! !DESCRIPTION: -! This function returns the number of points owned by the local process, -! as defined by the input {\tt GlobalSegMap} argument {\tt GSMap}. The -! local process ID is determined through use of the input {\tt INTEGER} -! argument {\tt comm}, which is the Fortran handle for the MPI -! communicator. -! -! !INTERFACE: - - integer function lsize_(GSMap, comm) -! -! !USES: -! - use m_mpif90 - use m_die , only : MP_perr_die - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap - integer, intent(in) :: comm - - -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -! 06Feb01 - J.W. Larson - Computed directly -! from the GlobalSegMap, rather than returning a hard- -! wired local attribute. This required the addition of -! the communicator argument. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::lsize_' - integer :: ierr, local_size, myID, n, ngseg - - ! Determine local rank myID: - - call MP_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MP_COMM_RANK',ierr) - - ! Determine global number of segments: - - ngseg = ngseg_(GSMap) - - ! Compute the local size of the distributed vector by summing - ! the entries of GSMap%length(:) whose corresponding values in - ! GSMap%pe_loc(:) equal the local process ID. This automatically - ! takes into account haloing (if present). - - local_size = 0 - - do n=1,ngseg - if(GSMap%pe_loc(n) == myID) then - local_size = local_size + GSMap%length(n) - endif - end do - - lsize_ = local_size - - end function lsize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rank1_ - rank which process owns a datum with given global -! index. -! -! !DESCRIPTION: -! This routine assumes that there is one process that owns the datum with -! a given global index. It should not be used when the input -! {\tt GlobalSegMap} argument {\tt GSMap} has been built to incorporate -! halo points. -! -! !INTERFACE: - - subroutine rank1_(GSMap, i_g, rank) - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! input GlobalSegMap - integer, intent(in) :: i_g ! a global index - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: rank ! the pe on which this - ! element resides -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::rank1_' - integer :: i,ilc,ile - - ! Initially, set the rank to -1 (invalid). - rank=-1 - - do i=1,size(GSMap%start) - ilc = GSMap%start(i) - ile = ilc + GSMap%length(i) - 1 - - ! If i_g in [ilc,ile]. Note that i_g := [1:..] - - if(ilc <= i_g .and. i_g <= ile) then - rank = GSMap%pe_loc(i) - return - endif - end do - - end subroutine rank1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rankm_ - rank which processes own a datum with given global -! index. -! -! !DESCRIPTION: -! This routine assumes that there may be more than one process that owns -! the datum with a given global index. This routine should be used when -! the input {\tt GlobalSegMap} argument {\tt GSMap} has been built to -! incorporate ! halo points. {\em Nota Bene}: The output array {\tt rank} -! is allocated in this routine and must be deallocated by the routine calling -! {\tt rankm\_()}. Failure to do so could result in a memory leak. -! -! !INTERFACE: - - subroutine rankm_(GSMap, i_g, num_loc, rank) - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! input GlobalSegMap - integer, intent(in) :: i_g ! a global index - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: num_loc ! the number of processes - ! which own element i_g - integer, dimension(:), pointer :: rank ! the process(es) on which - ! element i_g resides -! !REVISION HISTORY: -! 29Sep00 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::rankm_' - integer :: i, ilc, ile, ier, n - - ! First sweep: determine the number of processes num_loc - ! that own the given datum: - - num_loc = 0 - - do i=1,size(GSMap%start) - - ilc = GSMap%start(i) - ile = ilc + GSMap%length(i) - 1 - - ! If i_g in [ilc,ile]. Note that i_g := [1:..] - - if(ilc <= i_g .and. i_g <= ile) then - num_loc = num_loc + 1 - endif - - end do - - if(num_loc == 0) then - - ! If i_g is nowhere to be found in GSMap, set num_loc to - ! unity and return a null value for rank - - num_loc = 1 - allocate(rank(num_loc), stat=ier) - rank = -1 ! null value - return - - else - ! Allocate output array rank(1:num_loc) - - allocate(rank(num_loc), stat=ier) - - ! Second sweep: fill in the entries to rank(:) - - n = 0 ! counter - - do i=1,size(GSMap%start) - - ilc = GSMap%start(i) - ile = ilc + GSMap%length(i) - 1 - - ! If i_g in [ilc,ile]. Note that i_g := [1:..] - - if(ilc <= i_g .and. i_g <= ile) then - n = n + 1 - rank(n) = GSMap%pe_loc(i) - endif - - end do - - endif - - end subroutine rankm_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: active_pes_ - number of processes that own data. -! index. -! -! !DESCRIPTION: -! This routine scans the pe location list of the input {\tt GlobalSegMap} -! {\tt GSMap\%pe\_loc(:)}, and counts the number of pe locations that -! own at least one datum. This value is returned in the {\tt INTEGER} -! argument {\tt n\_active}. If the optional {\tt INTEGER} array argument -! {\tt list} is included in the call, a sorted list (in ascending order) of -! the active processes will be returned. -! -! {\bf N.B.:} If {\tt active\_pes\_()} is invoked with the optional argument -! {\tt pe\_list} included, this routine will allocate and return this array. -! The user must deallocate this array once it is no longer needed. Failure -! to do so will result in a memory leak. -! -! !INTERFACE: - - subroutine active_pes_(GSMap, n_active, pe_list) -! -! !USES: -! - use m_die , only : die - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: n_active - integer, dimension(:), pointer, optional :: pe_list - -! !REVISION HISTORY: -! 03Feb01 - J.W. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::active_pes_' - - integer :: count, i, n, ngseg, ierr - integer :: max_activepe, p - logical, dimension(:), allocatable :: process_list - - ! retrieve total number of segments in the map: - - ngseg = ngseg_(GSMap) - - ! retrieve maximum active process id in the map: - - max_activepe = maxval(GSMap%pe_loc(:)) - - ! allocate workspace to tally process id list: - - allocate(process_list(0:max_activepe), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate(process_list)',ierr) - - ! initialize process_list to false (i.e. no active pes) - - process_list = .false. - - ! initialize the distinct active process count: - - count = 0 - - ! scan entries of GSMap%pe_loc to count active processes: - - do n=1,ngseg - if(GSMap%pe_loc(n) >= 0) then ! a legitimate pe_location - - if (.not. process_list(GSMap%pe_loc(n))) then - process_list(GSMap%pe_loc(n)) = .true. - count = count + 1 - endif - - else ! a negative entry in GSMap%pe_loc(n) - ierr = 2 - call die(myname_,'negative value of GSMap%pe_loc',ierr) - endif - end do - - ! If the argument pe_list is present, we must allocate this - ! array and fill it - - if(present(pe_list)) then - - ! allocate pe_list - - allocate(pe_list(count), stat=ierr) - if (ierr /= 0) then - call die(myname_,'allocate(pe_list)',ierr) - endif - - i = 0 - do p=0,max_activepe - if (process_list(p)) then - i = i+1 - if (i > count) exit - pe_list(i) = p - endif - enddo - - if (i > count) then - call die(myname_,'pe_list fill error',count) - endif - - endif ! if(present(pe_list))... - - ! deallocate work array process_list... - - deallocate(process_list, stat=ierr) - if (ierr /= 0) then - call die(myname_,'deallocate(process_list)',ierr) - endif - - ! finally, store the active process count in output variable - ! n_active: - - n_active = count - - end subroutine active_pes_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: peLocs_ - process ID locations for distributed points. -! index. -! -! !DESCRIPTION: -! This routine takes an input {\tt INTEGER} array of point indices -! {\tt points(:)}, compares them with an input {\tt GlobalSegMap} -! {\tt pointGSMap}, and returns the {\em unique} process ID location -! for each point. Note the emphasize on unique. The assumption here -! (which is tested) is that {\tt pointGSMap} is not haloed. The process -! ID locations for the points is returned in the array {\tt pe\_locs(:)}. -! -! {\bf N.B.:} The test of {\tt pointGSMap} for halo points, and the -! subsequent search for the process ID for each point is very slow. This -! first version of the routine is serial. A parallel version of this -! routine will need to be developed. -! -! !INTERFACE: - - subroutine peLocs_(pointGSMap, npoints, points, pe_locs) -! -! !USES: -! - use m_die , only : die - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: pointGSMap - integer, intent(in) :: npoints - integer, dimension(:), intent(in) :: points - -! !OUTPUT PARAMETERS: - - integer, dimension(:), intent(out) :: pe_locs - -! !REVISION HISTORY: -! 18Apr01 - J.W. Larson - initial version. -! 18Oct16 - P. Worley - added algorithm options: -! new default changes complexity from O(npoints*ngseg) to -! O(gsize + ngseg) (worst case), and much better in current -! usage. Worst case memory requirements are O(gsize), but -! not seen in current usage. Other new algorithm is a little -! slower in practice, and worst case memory requirement is -! O(ngseg), which is also not seen in current usage. -! Original algorithm is recovered if compiled with -! LOW_MEMORY_PELOCS defined. Otherwise nondefault new -! algorithm is enabled if compiled with MEDIUM_MEMORY_PELOCS -! defined. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::peLocs_' - integer :: ierr - integer :: iseg, ngseg, ipoint - integer :: lower_index, upper_index - integer :: min_points_index, max_points_index -#if defined MEDIUM_MEMORY_PELOCS - integer :: ifseg, nfseg - integer, dimension(:), allocatable :: feasible_seg -#else - integer, dimension(:), allocatable :: pindices_to_pes -#endif - -! Input argument checks: - - if (npoints < 1) then - return - endif - - if(size(points) < npoints) then - ierr = size(points) - call die(myname_,'input points list array too small',ierr) - endif - - if(size(pe_locs) < npoints) then - ierr = size(pe_locs) - call die(myname_,'output pe_locs array too small',ierr) - endif - - if(haloed_(pointGSMap)) then - ierr = 1 - call die(myname_,'input pointGSMap haloed--not valid',ierr) - endif - -! Brute-force indexing...no assumptions regarding sorting of points(:) -! or pointGSMap%start(:) - -! Number of segments in pointGSMap: - - ngseg = ngseg_(pointGSMap) - -#if defined LOW_MEMORY_PELOCS - - do ipoint=1,npoints ! loop over points - - do iseg=1,ngseg ! loop over segments - - lower_index = pointGSMap%start(iseg) - upper_index = lower_index + pointGSMap%length(iseg) - 1 - - if((points(ipoint) >= lower_index) .and. & - (points(ipoint) <= upper_index)) then - pe_locs(ipoint) = pointGSMap%pe_loc(iseg) - - exit - - endif - - end do ! do iseg=1, ngseg - - end do ! do ipoint=1,npoints - -#elif defined MEDIUM_MEMORY_PELOCS - -! Determine index range for points vector - max_points_index = 0 - min_points_index = pointGSMap%gsize + 1 - do ipoint=1,npoints ! loop over points - - max_points_index = max(points(ipoint), max_points_index) - min_points_index = min(points(ipoint), min_points_index) - - end do ! do ipoint=1,npoints - -! Determine number of segments that need to be examined - nfseg = 0 - do iseg=1,ngseg ! loop over segments - - lower_index = pointGSMap%start(iseg) - upper_index = lower_index + pointGSMap%length(iseg) - 1 - - if ((lower_index <= max_points_index) .and. & - (upper_index >= min_points_index) ) then - - nfseg = nfseg + 1 - - endif - - end do ! do iseg=1, ngseg - - if(nfseg < 1) then - ierr = nfseg - call die(myname_,'no feasible segments',ierr) - endif - - ! Allocate temporary array - allocate(feasible_seg(nfseg), stat=ierr) - if (ierr /= 0) then - call die(myname_,'allocate(feasible_seg)',ierr) - endif - - ! Determine segments that need to be examined - feasible_seg(:) = 1 - nfseg = 0 - do iseg=1,ngseg ! loop over segments - - lower_index = pointGSMap%start(iseg) - upper_index = lower_index + pointGSMap%length(iseg) - 1 - - if ((lower_index <= max_points_index) .and. & - (upper_index >= min_points_index) ) then - - nfseg = nfseg + 1 - feasible_seg(nfseg) = iseg - - endif - - end do ! do iseg=1, ngseg - - ! Calculate map from local points to pes - do ipoint=1,npoints ! loop over points - - do ifseg=1,nfseg ! loop over feasible segments - - iseg = feasible_seg(ifseg) - lower_index = pointGSMap%start(iseg) - upper_index = lower_index + pointGSMap%length(iseg) - 1 - - if((points(ipoint) >= lower_index) .and. & - (points(ipoint) <= upper_index) ) then - pe_locs(ipoint) = pointGSMap%pe_loc(iseg) - exit - endif - - end do ! do ifseg=1,nfseg - end do ! do ipoint=1,npoints - - ! Clean up - deallocate(feasible_seg, stat=ierr) - if (ierr /= 0) then - call die(myname_,'deallocate(feasible_seg)',ierr) - endif - -#else - -! Determine index range for points assigned to points vector - max_points_index = 0 - min_points_index = pointGSMap%gsize + 1 - do ipoint=1,npoints ! loop over points - - max_points_index = max(points(ipoint), max_points_index) - min_points_index = min(points(ipoint), min_points_index) - - end do ! do ipoint=1,npoints - -! Allocate temporary array - allocate(pindices_to_pes(min_points_index:max_points_index), stat=ierr) - if (ierr /= 0) then - call die(myname_,'allocate(pindices_to_pes)',ierr) - endif - -! Calculate map from (global) point indices to pes - do iseg=1,ngseg ! loop over segments - - lower_index = pointGSMap%start(iseg) - upper_index = lower_index + pointGSMap%length(iseg) - 1 - - lower_index = max(lower_index, min_points_index) - upper_index = min(upper_index, max_points_index) - - if (lower_index <= upper_index) then - do ipoint=lower_index,upper_index - pindices_to_pes(ipoint) = pointGSMap%pe_loc(iseg) - enddo - endif - - end do ! do iseg=1, ngseg - -! Calculate map from local point indices to pes - do ipoint=1,npoints ! loop over points - - pe_locs(ipoint) = pindices_to_pes(points(ipoint)) - - end do ! do ipoint=1,npoints - -! Clean up - deallocate(pindices_to_pes, stat=ierr) - if (ierr /= 0) then - call die(myname_,'deallocate(pindices_to_pes)',ierr) - endif - -#endif - - end subroutine peLocs_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: haloed_ - test GlobalSegMap for presence of halo points. -! index. -! -! !DESCRIPTION: -! This {\tt LOGICAL} function tests the input {\tt GlobalSegMap} -! {\tt GSMap} for the presence of halo points. Halo points are points -! that appear in more than one segment of a {\tt GlobalSegMap}. If -! {\em any} halo point is found, the function {\tt haloed\_()} returns -! immediately with value {\tt .TRUE.} If, after an exhaustive search -! of the map has been completed, no halo points are found, the function -! {\tt haloed\_()} returns with value {\tt .FALSE.} -! -! The search algorithm is: -! -! \begin{enumerate} -! \item Extract the segment start and length information from -! {\tt GSMap\%start} and {\tt GSMap\%length} into the temporary -! arrays {\tt start(:)} and {\tt length(:)}. -! \item Sort these arrays in {\em ascending order} keyed by {\tt start}. -! \item Scan the arrays {\tt start} and{\tt length}. A halo point is -! present if for at least one value of the index -! $1 \leq {\tt n} \leq {\tt GSMap\%ngseg}$ -! $${\tt start(n)} + {\tt length(n)} - 1 \geq {\tt start(n+1)}$$. -! \end{enumerate} -! -! {\bf N.B.:} Beware that the search for halo points is potentially -! expensive. -! -! !INTERFACE: - - logical function haloed_(GSMap) -! -! !USES: -! - use m_die , only : die - use m_SortingTools , only : IndexSet - use m_SortingTools , only : IndexSort - use m_SortingTools , only : Permute - - implicit none - - ! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap - -! !REVISION HISTORY: -! 08Feb01 - J.W. Larson - initial version. -! 26Apr01 - J.W. Larson - Bug fix. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::haloed_' - -! Error Flag - - integer :: ierr - -! Loop index and storage for number of segments in GSMap - - integer :: n, ngseg - -! Temporary storage for GSMap%start, GSMap%length, and index -! permutation array: - - integer, dimension(:), allocatable :: start, length, perm - -! Logical flag indicating segment overlap - - logical :: overlap - - ! How many segments in GSMap? - - ngseg = ngseg_(GSMap) - - ! allocate temporary arrays: - - allocate(start(ngseg), length(ngseg), perm(ngseg), stat=ierr) - if (ierr /= 0) then - call die(myname_,'allocate(start...',ierr) - endif - - ! Fill the temporary arrays start(:) and length(:) - - do n=1,ngseg - start(n) = GSMap%start(n) - length(n) = GSMap%length(n) - end do - - ! Initialize the index permutation array: - - call IndexSet(perm) - - ! Create the index permutation that will order the data so the - ! entries of start(:) appear in ascending order: - - call IndexSort(ngseg, perm, start, descend=.false.) - - ! Permute the data so the entries of start(:) are now in - ! ascending order: - - call Permute(start,perm,ngseg) - - ! Apply this same permutation to length(:) - - call Permute(length,perm,ngseg) - - ! Set LOGICAL flag indicating segment overlap to .FALSE. - - overlap = .FALSE. - - ! Now, scan the segments, looking for overlapping segments. Upon - ! discovery of the first overlapping pair of segments, set the - ! flag overlap to .TRUE. and exit. - - n = 0 - - SCAN_LOOP: do - n = n + 1 - if(n == ngseg) EXIT ! we are finished, and there were no halo pts. - if((start(n) + length(n) - 1) >= start(n+1)) then ! found overlap - overlap = .TRUE. - EXIT - endif - end do SCAN_LOOP - - ! Clean up allocated memory: - - deallocate(start, length, perm, stat=ierr) - if (ierr /= 0) then - call die(myname_,'deallocate(start...',ierr) - endif - - ! Assign function return value: - - haloed_ = overlap - - end function haloed_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Sort_ - generate index permutation for GlobalSegMap. -! -! !DESCRIPTION: -! {\tt Sort\_()} uses the supplied keys {\tt key1} and {\tt key2} to -! generate a permutation {\tt perm} that will put the entries of the -! components {\tt GlobalSegMap\%start}, {\tt GlobalSegMap\%length} and -! {\tt GlobalSegMap\%pe\_loc} in {\em ascending} lexicographic order. -! -! {\bf N.B.:} {\tt Sort\_()} returns an allocated array {\tt perm(:)}. It -! the user must deallocate this array once it is no longer needed. Failure -! to do so could create a memory leak. -! -! !INTERFACE: - - subroutine Sort_(GSMap, key1, key2, perm) -! -! !USES: -! - use m_die , only : die - use m_SortingTools , only : IndexSet - use m_SortingTools , only : IndexSort - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! input GlobalSegMap - integer, dimension(:), intent(in) :: key1 ! first sort key - integer, dimension(:), intent(in), optional :: key2 ! second sort key - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: perm ! output index permutation - -! !REVISION HISTORY: -! 02Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Sort_' - - integer :: ierr, length - - length = ngseg_(GSMap) - - ! Argument checking. are key1 and key2 (if supplied) the - ! same length as the components of GSMap? If not, stop with - ! an error. - - ierr = 0 - - if(size(key1) /= length) then - ierr = 1 - call die(myname_,'key1 GSMap size mismatch',ierr) - endif - - if(present(key2)) then - if(size(key2) /= length) then - ierr = 2 - call die(myname_,'key2 GSMap size mismatch',ierr) - endif - if(size(key1) /= size(key2)) then - ierr = 3 - call die(myname_,'key1 key2 size mismatch',ierr) - endif - endif - - ! allocate space for permutation array perm(:) - - allocate(perm(length), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate(perm)',ierr) - - ! Initialize perm(i)=i, for i=1,length - - call IndexSet(perm) - - ! Index permutation is achieved by successive calls to IndexSort(), - ! with the keys supplied one at a time in the order reversed from - ! the desired sort order. - - if(present(key2)) then - call IndexSort(length, perm, key2, descend=.false.) - endif - - call IndexSort(length, perm, key1, descend=.false.) - - ! Yes, it is that simple. The desired index permutation is now - ! stored in perm(:) - - end subroutine Sort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: PermuteInPlace_ - apply index permutation to GlobalSegMap. -! -! !DESCRIPTION: -! {\tt PermuteInPlace\_()} uses a supplied index permutation {\tt perm} -! to re-order {\tt GlobalSegMap\%start}, {\tt GlobalSegMap\%length} and -! {\tt GlobalSegMap\%pe\_loc}. -! -! !INTERFACE: - - subroutine PermuteInPlace_(GSMap, perm) -! -! !USES: -! - use m_die , only : die - use m_SortingTools , only : Permute - - implicit none - -! !INPUT PARAMETERS: - - integer, dimension(:), intent(in) :: perm - -! !INPUT/OUTPUT PARAMETERS: - - type(GlobalSegMap), intent(inout) :: GSMap - -! !REVISION HISTORY: -! 02Feb01 - J.W. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::PermuteInPlace_' - - integer :: length, ierr - - length = ngseg_(GSMap) - - ! Argument checking. Do the components of GSMap - ! (e.g. GSMap%start) have the same length as the - ! permutation array perm? If not, stop with an error. - - ierr = 0 - - if(size(perm) /= length) then - ierr = 1 - call die(myname_,'perm GSMap size mismatch',ierr) - endif - - ! In-place index permutation using perm(:) : - - call Permute(GSMap%start,perm,length) - call Permute(GSMap%length,perm,length) - call Permute(GSMap%pe_loc,perm,length) - - ! Now, the components of GSMap are ordered according to - ! perm(:). - - end subroutine PermuteInPlace_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SortPermuteInPlace_ - Sort in-place GlobalSegMap components. -! -! !DESCRIPTION: -! {\tt SortPermuteInPlace\_()} uses a the supplied key(s) to generate -! and apply an index permutation that will place the {\tt GlobalSegMap} -! components {\tt GlobalSegMap\%start}, {\tt GlobalSegMap\%length} and -! {\tt GlobalSegMap\%pe\_loc} in lexicographic order. -! -! !INTERFACE: - - subroutine SortPermuteInPlace_(GSMap, key1, key2) -! -! !USES: -! - use m_die , only : die - - implicit none - -! !INPUT PARAMETERS: - - integer, dimension(:), intent(in) :: key1 - integer, dimension(:), intent(in), optional :: key2 - -! !INPUT/OUTPUT PARAMETERS: - - type(GlobalSegMap), intent(inout) :: GSMap - -! !REVISION HISTORY: -! 02Feb01 - J.W. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SortPermuteInPlace_' - - integer :: length, ierr - integer, dimension(:), pointer :: perm - - length = ngseg_(GSMap) - - ! Argument checking. are key1 and key2 (if supplied) the - ! same length as the components of GSMap? If not, stop with - ! an error. - ierr = 0 - if(size(key1) /= length) then - ierr = 1 - call die(myname_,'key1 GSMap size mismatch',ierr) - endif - - if(present(key2)) then - if(size(key2) /= length) then - ierr = 2 - call die(myname_,'key2 GSMap size mismatch',ierr) - endif - if(size(key1) /= size(key2)) then - ierr = 3 - call die(myname_,'key1 key2 size mismatch',ierr) - endif - endif - - ! Generate desired index permutation: - - if(present(key2)) then - call Sort_(GSMap, key1, key2, perm) - else - call Sort_(GSMap, key1=key1, perm=perm) - endif - - ! Apply index permutation: - - call PermuteInPlace_(GSMap, perm) - - ! Now the components of GSMap have been re-ordered. - ! Deallocate the index permutation array perm(:) - - deallocate(perm, stat=ierr) - if(ierr /= 0) call die(myname_,'deallocate(perm...)',ierr) - - end subroutine SortPermuteInPlace_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: increasing_ - Return .TRUE. if GSMap has increasing indices -! -! !DESCRIPTION: -! The function {\tt increasing\_()} returns .TRUE. if each proc's -! indices in the {\tt GlobalSegMap} argument {\tt GSMap} have -! strictly increasing indices. I.e. the proc's segments have indices -! in ascending order and are non-overlapping. -! -! !INTERFACE: - - logical function increasing_(gsmap) - -! !USES: - use m_MCTWorld, only: ThisMCTWorld - use m_die - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: gsmap - -! !REVISION HISTORY: -! 06Jun07 - R. Loy - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::increasing_' - - integer comp_id - integer nprocs - integer i - integer this_ngseg - integer ier - integer, allocatable:: last_index(:) - integer pe_loc - - comp_id = gsmap%comp_id - nprocs=ThisMCTWorld%nprocspid(comp_id) - - allocate( last_index(nprocs), stat=ier ) - if (ier/=0) call die(myname_,'allocate last_index') - - last_index= -1 - increasing_ = .TRUE. - this_ngseg=ngseg(gsmap) - - iloop: do i=1,this_ngseg - pe_loc=gsmap%pe_loc(i)+1 ! want value 1..nprocs - if (gsmap%start(i) <= last_index(pe_loc)) then - increasing_ = .FALSE. - exit iloop - endif - last_index(pe_loc)=gsmap%start(i)+gsmap%length(i)-1 - enddo iloop - - deallocate( last_index, stat=ier ) - if (ier/=0) call die(myname_,'deallocate last_index') - - end function increasing_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: copy_ - Copy the gsmap to a new gsmap -! -! !DESCRIPTION: -! Make a copy of a gsmap. -! Note this is a deep copy of all arrays. -! -! !INTERFACE: - - subroutine copy_(src,dest) - -! !USES: - use m_MCTWorld, only: ThisMCTWorld - use m_die - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap),intent(in) :: src - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap),intent(out) :: dest - - -! !REVISION HISTORY: -! 27Jul07 - R. Loy - initial version -!EOP ___________________________________________________________________ - - - call initp_( dest, src%comp_id, src%ngseg, src%gsize, & - src%start, src%length, src%pe_loc ) - - end subroutine copy_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: print_ - Print GSMap info -! -! !DESCRIPTION: -! Print out contents of GSMAP on unit number 'lun' -! -! !INTERFACE: - - subroutine print_(gsmap,lun) -! -! !USES: -! - use m_die - - implicit none - -!INPUT/OUTPUT PARAMETERS: - type(GlobalSegMap), intent(in) :: gsmap - integer, intent(in) :: lun - -! !REVISION HISTORY: -! 06Jul12 - R. Jacob - initial version -!EOP ___________________________________________________________________ - - - integer n - character(len=*),parameter :: myname_=myname//'::print_' - - write(lun,*) gsmap%comp_id - write(lun,*) gsmap%ngseg - write(lun,*) gsmap%gsize - do n=1,gsmap%ngseg - write(lun,*) gsmap%start(n),gsmap%length(n),gsmap%pe_loc(n) - end do - - end subroutine print_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: printFromRoot_ - Print GSMap info -! -! !DESCRIPTION: -! Print out contents of GSMAP on unit number 'lun' -! -! !INTERFACE: - - subroutine printFromRootnp_(gsmap,mycomm,lun) -! -! !USES: -! - use m_MCTWorld, only : printnp - use m_die - use m_mpif90 - - implicit none - -!INPUT/OUTPUT PARAMETERS: - type(GlobalSegMap), intent(in) :: gsmap - integer, intent(in) :: mycomm - integer, intent(in) :: lun - -! !REVISION HISTORY: -! 06Jul12 - R. Jacob - initial version -!EOP ___________________________________________________________________ - - - integer myrank - integer ier - character(len=*),parameter :: myname_=myname//'::print_' - - call MP_comm_rank(mycomm,myrank,ier) - if(ier/=0) call MP_perr_die(myname_,'MP_comm_rank',ier) - - if (myrank == 0) then - call printnp(gsmap%comp_id,lun) - call print_(gsmap,lun) - endif - - end subroutine printFromRootnp_ - - - - - end module m_GlobalSegMap - diff --git a/src/externals/mct/mct/m_GlobalSegMapComms.F90 b/src/externals/mct/mct/m_GlobalSegMapComms.F90 deleted file mode 100644 index a5192a3b3e4..00000000000 --- a/src/externals/mct/mct/m_GlobalSegMapComms.F90 +++ /dev/null @@ -1,555 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_GlobalSegMapComms - GlobalSegMap Communications Support -! -! !DESCRIPTION: -! -! This module provides communications support for the {\tt GlobalSegMap} -! datatype. Both blocking and non-blocking point-to-point communications -! are provided for send (analogues to {\tt MPI\_SEND()/MPI\_ISEND()}) -! A receive and broadcast method is also supplied. -! -! !INTERFACE: - - module m_GlobalSegMapComms - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: send - public :: recv - public :: isend - public :: bcast - - interface bcast ; module procedure bcast_ ; end interface - interface send ; module procedure send_ ; end interface - interface recv ; module procedure recv_ ; end interface - interface isend ; module procedure isend_ ; end interface - -! !REVISION HISTORY: -! 11Aug03 - J.W. Larson - initial version -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_GlobalSegMapComms' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: send_ - Point-to-point blocking Send of a GlobalSegMap -! -! !DESCRIPTION: -! This routine performs a blocking send of a {\tt GlobalSegMap} (the -! input argument {\tt outgoingGSMap}) to the root processor on component -! {\tt comp\_id}. The input {\tt INTEGER} argument {\tt TagBase} -! is used to generate tags for the messages associated with this operation; -! there are six messages involved, so the user should avoid using tag -! values {\tt TagBase} and {\tt TagBase + 5}. All six messages are blocking. -! The success (failure) of this operation is reported in the zero -! (non-zero) value of the optional {\tt INTEGER} output variable {\tt status}. -! -! !INTERFACE: - - subroutine send_(outgoingGSMap, comp_id, TagBase, status) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die,die - use m_stdio - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - use m_GlobalSegMap, only : GlobalSegMap_comp_id => comp_ID - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - - use m_MCTWorld, only : ComponentToWorldRank - use m_MCTWorld, only : ThisMCTWorld - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(IN) :: outgoingGSMap - integer, intent(IN) :: comp_id - integer, intent(IN) :: TagBase - -! !OUTPUT PARAMETERS: - - integer, optional, intent(OUT) :: status - -! !REVISION HISTORY: -! 13Aug03 - J.W. Larson - API and initial version. -! 26Aug03 - R. Jacob - use same method as isend_ -! 05Mar04 - R. Jacob - match new isend_ method. -!EOP ___________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::send_' - - integer :: ierr - integer :: destID - integer :: nsegs - - if(present(status)) status = 0 ! the success value - - destID = ComponentToWorldRank(0, comp_id, ThisMCTWorld) - - ! Next, send the buffer size to destID so it can prepare a - ! receive buffer of the correct size. - nsegs = GlobalSegMap_ngseg(outgoingGSMap) - - call MPI_SEND(outgoingGSMap%comp_id, 1, MP_Type(outgoingGSMap%comp_id), destID, & - TagBase, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send compid failed',ierr) - endif - - call MPI_SEND(outgoingGSMap%ngseg, 1, MP_Type(outgoingGSMap%ngseg), destID, & - TagBase+1, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send ngseg failed',ierr) - endif - - call MPI_SEND(outgoingGSMap%gsize, 1, MP_Type(outgoingGSMap%gsize), destID, & - TagBase+2, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send gsize failed',ierr) - endif - - - ! Send segment information data (3 messages) - - call MPI_SEND(outgoingGSMap%start, nsegs, & - MP_Type(outgoingGSMap%start(1)), & - destID, TagBase+3, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send outgoingGSMap%start failed',ierr) - endif - - call MPI_SEND(outgoingGSMap%length, nsegs, & - MP_Type(outgoingGSMap%length(1)), & - destID, TagBase+4, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send outgoingGSMap%length failed',ierr) - endif - - call MPI_SEND(outgoingGSMap%pe_loc, nsegs, & - MP_Type(outgoingGSMap%pe_loc(1)), & - destID, TagBase+5, ThisMCTWorld%MCT_comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send outgoingGSMap%pe_loc failed',ierr) - endif - - end subroutine send_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: isend_ - Point-to-point Non-blocking Send of a GlobalSegMap -! -! !DESCRIPTION: -! This routine performs a non-blocking send of a {\tt GlobalSegMap} (the -! input argument {\tt outgoingGSMap}) to the root processor on component -! {\tt comp\_id} The input {\tt INTEGER} argument {\tt TagBase} -! is used to generate tags for the messages associated with this operation; -! there are six messages involved, so the user should avoid using tag -! values {\tt TagBase} and {\tt TagBase + 5}. All six messages are non- -! blocking, and the request handles for them are returned in the output -! {\tt INTEGER} array {\tt reqHandle}, which can be checked for completion -! using any of MPI's wait functions. The success (failure) of -! this operation is reported in the zero (non-zero) value of the optional -! {\tt INTEGER} output variable {\tt status}. -! -! {\bf N.B.}: Data is sent directly out of {\tt outgoingGSMap} so it -! must not be deleted until the send has completed. -! -! {\bf N.B.}: The array {\tt reqHandle} represents allocated memory that -! must be deallocated when it is no longer needed. Failure to do so will -! create a memory leak. -! -! !INTERFACE: - - subroutine isend_(outgoingGSMap, comp_id, TagBase, reqHandle, status) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die,die - use m_stdio - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - - use m_MCTWorld, only : ComponentToWorldRank - use m_MCTWorld, only : ThisMCTWorld - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(IN) :: outgoingGSMap - integer, intent(IN) :: comp_id - integer, intent(IN) :: TagBase - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: reqHandle - integer, optional, intent(OUT) :: status - -! !REVISION HISTORY: -! 13Aug03 - J.W. Larson - API and initial version. -! 05Mar04 - R. Jacob - Send everything directly out -! of input GSMap. Don't use a SendBuffer. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::isend_' - - integer :: ierr,destID,nsegs - - if(present(status)) status = 0 ! the success value - - destID = ComponentToWorldRank(0, comp_id, ThisMCTWorld) - - allocate(reqHandle(6), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - 'FATAL--allocation of send buffer failed with ierr=',ierr - call die(myname_) - endif - - ! Next, send the buffer size to destID so it can prepare a - ! receive buffer of the correct size (3 messages). - nsegs = GlobalSegMap_ngseg(outgoingGSMap) - - call MPI_ISEND(outgoingGSMap%comp_id, 1, MP_Type(outgoingGSMap%comp_id), destID, & - TagBase, ThisMCTWorld%MCT_comm, reqHandle(1), ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send compid failed',ierr) - endif - - call MPI_ISEND(outgoingGSMap%ngseg, 1, MP_Type(outgoingGSMap%ngseg), destID, & - TagBase+1, ThisMCTWorld%MCT_comm, reqHandle(2), ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send ngseg failed',ierr) - endif - - call MPI_ISEND(outgoingGSMap%gsize, 1, MP_Type(outgoingGSMap%gsize), destID, & - TagBase+2, ThisMCTWorld%MCT_comm, reqHandle(3), ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send gsize failed',ierr) - endif - - ! Send segment information data (3 messages) - - call MPI_ISEND(outgoingGSMap%start, nsegs, & - MP_Type(outgoingGSMap%start(1)), & - destID, TagBase+3, ThisMCTWorld%MCT_comm, reqHandle(4), ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send outgoingGSMap%start failed',ierr) - endif - - call MPI_ISEND(outgoingGSMap%length, nsegs, & - MP_Type(outgoingGSMap%length(1)), & - destID, TagBase+4, ThisMCTWorld%MCT_comm, reqHandle(5), ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send outgoingGSMap%length failed',ierr) - endif - - call MPI_ISEND(outgoingGSMap%pe_loc, nsegs, & - MP_Type(outgoingGSMap%pe_loc(1)), & - destID, TagBase+5, ThisMCTWorld%MCT_comm, reqHandle(6), ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Send outgoingGSMap%pe_loc failed',ierr) - endif - - end subroutine isend_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: recv_ - Point-to-point blocking Receive of a GlobalSegMap -! -! !DESCRIPTION: -! This routine performs a blocking receive of a {\tt GlobalSegMap} (the -! input argument {\tt outgoingGSMap}) from the root processor on component -! {\tt comp\_id}. The input {\tt INTEGER} argument {\tt TagBase} -! is used to generate tags for the messages associated with this operation; -! there are six messages involved, so the user should avoid using tag -! values {\tt TagBase} and {\tt TagBase + 5}. The success (failure) of this -! operation is reported in the zero (non-zero) value of the optional {\tt INTEGER} -! output variable {\tt status}. -! -! !INTERFACE: - - subroutine recv_(incomingGSMap, comp_id, TagBase, status) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die, die - use m_stdio - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_init => init - - use m_MCTWorld, only : ComponentToWorldRank - use m_MCTWorld, only : ThisMCTWorld - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(IN) :: comp_id - integer, intent(IN) :: TagBase - -! !OUTPUT PARAMETERS: - - type(GlobalSegMap), intent(OUT) :: incomingGSMap - integer, optional, intent(OUT) :: status - -! !REVISION HISTORY: -! 13Aug03 - J.W. Larson - API and initial version. -! 25Aug03 - R.Jacob - rename to recv_. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::recv_' - - integer :: ierr,sourceID - integer :: MPstatus(MP_STATUS_SIZE) - integer :: RecvBuffer(3) - - if(present(status)) status = 0 ! the success value - - sourceID = ComponentToWorldRank(0, comp_id, ThisMCTWorld) - - ! Receive the GlobalSegMap's basic constants: component id, - ! grid size, and number of segments. The number of segments - ! is needed to construct the arrays into which segment - ! information will be received. Thus, this receive blocks. - - call MPI_RECV(RecvBuffer(1), 1, MP_Type(RecvBuffer(1)), sourceID, & - TagBase, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Receive of compid failed',ierr) - endif - call MPI_RECV(RecvBuffer(2), 1, MP_Type(RecvBuffer(2)), sourceID, & - TagBase+1, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Receive of ngseg failed',ierr) - endif - call MPI_RECV(RecvBuffer(3), 1, MP_Type(RecvBuffer(3)), sourceID, & - TagBase+2, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Receive of gsize failed',ierr) - endif - - ! Create Empty GlobaSegMap into which segment information - ! will be received - - call GlobalSegMap_init(incomingGSMap, RecvBuffer(1), RecvBuffer(2), & - RecvBuffer(3)) - - ! Receive segment information data (3 messages) - - call MPI_RECV(incomingGSMap%start, RecvBuffer(2), & - MP_Type(incomingGSMap%start(1)), & - sourceID, TagBase+3, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Recv incomingGSMap%start failed',ierr) - endif - - call MPI_RECV(incomingGSMap%length, RecvBuffer(2), & - MP_Type(incomingGSMap%length(1)), & - sourceID, TagBase+4, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Recv incomingGSMap%length failed',ierr) - endif - - call MPI_RECV(incomingGSMap%pe_loc, RecvBuffer(2), & - MP_Type(incomingGSMap%pe_loc(1)), & - sourceID, TagBase+5, ThisMCTWorld%MCT_comm, MPstatus, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_, 'Recv incomingGSMap%pe_loc failed',ierr) - endif - - end subroutine recv_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bcast_ - broadcast a GlobalSegMap object -! -! !DESCRIPTION: -! -! The routine {\tt bcast\_()} takes the input/output {\em GlobalSegMap} -! argument {\tt GSMap} (on input valid only on the {\tt root} process, -! on output valid on all processes) and broadcasts it to all processes -! on the communicator associated with the F90 handle {\tt comm}. The -! success (failure) of this operation is returned as a zero (non-zero) -! value of the optional output {\tt INTEGER} argument {\tt status}. -! -! !INTERFACE: - - subroutine bcast_(GSMap, root, comm, status) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die,die - use m_stdio - - use m_GlobalSegMap, only : GlobalSegMap - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: - - type(GlobalSegMap), intent(inout) :: GSMap ! Output GlobalSegMap - -! !OUTPUT PARAMETERS: - - integer, optional, intent(out) :: status ! global vector size - -! !REVISION HISTORY: -! 17Oct01 - J.W. Larson - Initial version. -! 11Aug03 - J.W. Larson - Relocated from original -! location in m_GlobalSegMap. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bcast_' - - integer :: myID, ierr, n - integer, dimension(:), allocatable :: IntBuffer - - ! Step One: which process am I? - - call MP_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ierr) - - ! Step Two: Broadcast the scalar bits of the GlobalSegMap from - ! the root. - - allocate(IntBuffer(3), stat=ierr) ! allocate buffer space (all PEs) - if(ierr /= 0) then - if(.not. present(status)) then - call die(myname_,'allocate(IntBuffer)',ierr) - else - write(stderr,*) myname_,':: error during allocate(IntBuffer)' - status = 2 - return - endif - endif - - if(myID == root) then ! pack the buffer - IntBuffer(1) = GSMap%comp_id - IntBuffer(2) = GSMap%ngseg - IntBuffer(3) = GSMap%gsize - endif - - call MPI_BCAST(IntBuffer, 3, MP_type(IntBuffer(1)), root, comm, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MPI_BCAST(IntBuffer)',ierr) - - if(myID /= root) then ! unpack from buffer to GSMap - GSMap%comp_id = IntBuffer(1) - GSMap%ngseg = IntBuffer(2) - GSMap%gsize = IntBuffer(3) - endif - - deallocate(IntBuffer, stat=ierr) ! deallocate buffer space - if(ierr /= 0) then - if(.not. present(status)) then - call die(myname_,'deallocate(IntBuffer)',ierr) - else - write(stderr,*) myname_,':: error during deallocate(IntBuffer)' - status = 4 - return - endif - endif - - ! Step Three: Broadcast the vector bits of GSMap from the root. - ! Pack them into one big array to save latency costs associated - ! with multiple broadcasts. - - allocate(IntBuffer(3*GSMap%ngseg), stat=ierr) ! allocate buffer space (all PEs) - if(ierr /= 0) then - if(.not. present(status)) then - call die(myname_,'second allocate(IntBuffer)',ierr) - else - write(stderr,*) myname_,':: error during second allocate(IntBuffer)' - status = 5 - return - endif - endif - - if(myID == root) then ! pack outgoing broadcast buffer - do n=1,GSMap%ngseg - IntBuffer(n) = GSMap%start(n) - IntBuffer(GSMap%ngseg+n) = GSMap%length(n) - IntBuffer(2*GSMap%ngseg+n) = GSMap%pe_loc(n) - end do - endif - - call MPI_BCAST(IntBuffer, 3*GSMap%ngseg, MP_Type(IntBuffer(1)), root, comm, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'Error in second MPI_BCAST(IntBuffer)',ierr) - - if(myID /= root) then ! Allocate GSMap%start, GSMap%length,...and fill them - - allocate(GSMap%start(GSMap%ngseg), GSMap%length(GSMap%ngseg), & - GSMap%pe_loc(GSMap%ngseg), stat=ierr) - if(ierr /= 0) then - if(.not. present(status)) then - call die(myname_,'off-root allocate(GSMap%start...)',ierr) - else - write(stderr,*) myname_,':: error during off-root allocate(GSMap%start...)' - status = 7 - return - endif - endif - - do n=1,GSMap%ngseg ! unpack the buffer into the GlobalSegMap - GSMap%start(n) = IntBuffer(n) - GSMap%length(n) = IntBuffer(GSMap%ngseg+n) - GSMap%pe_loc(n) = IntBuffer(2*GSMap%ngseg+n) - end do - - endif - - ! Clean up buffer space: - - deallocate(IntBuffer, stat=ierr) - if(ierr /= 0) then - if(.not. present(status)) then - call die(myname_,'second deallocate(IntBuffer)',ierr) - else - write(stderr,*) myname_,':: error during second deallocate(IntBuffer)' - status = 8 - return - endif - endif - - end subroutine bcast_ - - end module m_GlobalSegMapComms diff --git a/src/externals/mct/mct/m_GlobalToLocal.F90 b/src/externals/mct/mct/m_GlobalToLocal.F90 deleted file mode 100644 index 0b80a836274..00000000000 --- a/src/externals/mct/mct/m_GlobalToLocal.F90 +++ /dev/null @@ -1,719 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_GlobalToLocal - Global to Local Index Translation -! -! !DESCRIPTION: -! This module contains routines for translating global array indices -! into their local counterparts (that is, the indices into the local -! data structure holding a given process' chunk of a distributed array). -! The MCT domain decomposition descriptors {\tt GlobalMap} and -! {\tt GlobalSegMap} are both supported. Indices can be translated -! one-at-a-time using the {\tt GlobalToLocalIndex} routine or many -! at once using the {\tt GlobalToLocalIndices} routine. -! -! This module also provides facilities for setting the local row and -! column indices for a {\tt SparseMatrix} through the -! {\tt GlobalToLocalMatrix} routines. -! -! !INTERFACE: - - module m_GlobalToLocal - -! !USES: -! No external modules are used in the declaration section of this module. - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: GlobalToLocalIndex ! Translate Global to Local index - ! (i.e. recover local index for a - ! point from its global index). - - public :: GlobalToLocalIndices ! Translate Global to Local indices - ! (i.e. recover local starts/lengths - ! of distributed data segments). - - public :: GlobalToLocalMatrix ! Re-indexing of row or column - ! indices for a SparseMatrix - - interface GlobalToLocalIndices ; module procedure & - GlobalSegMapToIndices_, & ! local arrays of starts/lengths - GlobalSegMapToNavigator_, & ! return local indices as Navigator - GlobalSegMapToIndexArr_ - end interface - - interface GlobalToLocalIndex ; module procedure & - GlobalSegMapToIndex_, & - GlobalMapToIndex_ - end interface - - interface GlobalToLocalMatrix ; module procedure & - GlobalSegMapToLocalMatrix_ - end interface - - -! !SEE ALSO: -! -! The MCT modules {\tt m\_GlobalMap} and {m\_GlobalSegMap} for more -! information regarding MCT's domain decomposition descriptors. -! -! The MCT module {\tt m\_SparseMatrix} for more information regarding -! the {\tt SparseMatrix} datatype. -! -! !REVISION HISTORY: -! 2Feb01 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_GlobalToLocal' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalSegMapToIndices_ - Return _local_ indices in arrays. -! -! !DESCRIPTION: {\tt GlobalSegMapToIndices\_()} takes a user-supplied -! {\tt GlobalSegMap} data type {\tt GSMap}, which desribes a decomposition -! on the input MPI communicator corresponding to the Fortran {\tt INTEGER} -! handle {\tt comm} to translate the global directory of segment locations -! into local indices for referencing the on-pe storage of the mapped -! distributed data. -! -! {\bf N.B.:} This routine returns two allocated arrays---{\tt start(:)} -! and {\tt length(:)}---which must be deallocated once the user no longer -! needs them. Failure to do this will create a memory leak. -! -! !INTERFACE: - - subroutine GlobalSegMapToIndices_(GSMap, comm, start, length) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die, die, warn - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - use m_GlobalSegMap, only : GlobalSegMap_nlseg => nlseg - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! Output GlobalSegMap - integer, intent(in) :: comm ! communicator handle - -! !OUTPUT PARAMETERS: - - integer,dimension(:), pointer :: start ! local segment start indices - integer,dimension(:), pointer :: length ! local segment sizes - -! !REVISION HISTORY: -! 2Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalSegMapToIndices_' - - integer :: myID, ierr, ngseg, nlseg, n, count - - ! determine local process id myID - - call MP_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MP_COMM_RANK',ierr) - - ! determine number of global segments ngseg: - - ngseg = GlobalSegMap_ngseg(GSMap) - - ! determine number of local segments on process myID nlseg: - - nlseg = GlobalSegMap_nlseg(GSMap, myID) - - ! allocate arrays start(:) and length(:) to store local - ! segment information. - - allocate(start(nlseg), length(nlseg), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate(start...',ierr) - - ! Loop over GlobalSegMap%pe_loc(:) values to isolate - ! global index values of local data. Record number of - ! matches in the INTEGER count. - - count = 0 - do n=1, ngseg - if(GSMap%pe_loc(n) == myID) then - count = count + 1 - if(count > nlseg) then - ierr = 2 - call die(myname_,'too many pe matches',ierr) - endif - start(count) = GSMap%start(n) - length(count) = GSMap%length(n) - endif - end do - - if(count < nlseg) then - ierr = 3 - call die(myname_,'too few pe matches',ierr) - endif - - ! translate global start indices to their local - ! values, based on their storage order and number - ! of elements in each segment - - do n=1, count - if(n == 1) then - start(n) = 1 - else - start(n) = start(n-1) + length(n-1) - endif - end do - - end subroutine GlobalSegMapToIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalSegMapToIndex_ - Global to Local Index Translation -! -! !DESCRIPTION: This {\tt INTEGER} query function takes a user-supplied -! {\tt GlobalSegMap} data type {\tt GSMap}, which desribes a decomposition -! on the input MPI communicator corresponding to the Fortran {\tt INTEGER} -! handle {\tt comm}, and the input global index value {\tt i\_g}, and -! returns a positive local index value if the datum {\tt i\_g}. If -! the datum {\tt i\_g} is not stored on the local process ID, a value -! of {\tt -1} is returned. -! -! !INTERFACE: - - - integer function GlobalSegMapToIndex_(GSMap, i_g, comm) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die, die, warn - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - use m_GlobalSegMap, only : GlobalSegMap_nlseg => nlseg - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! Output GlobalSegMap - integer, intent(in) :: i_g ! global index - integer, intent(in) :: comm ! communicator handle - -! !REVISION HISTORY: -! 2Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalSegMapToIndex_' - - integer :: myID - integer :: count, ierr, ngseg, nlseg, n - integer :: lower_bound, upper_bound - integer :: local_start, local_index - logical :: found - - ! Determine local process id myID: - - call MP_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MP_COMM_RANK()',ierr) - - ! Extract the global number of segments in GSMap - - ngseg = GlobalSegMap_ngseg(GSMap) - - ! Extract the global number of segments in GSMap for myID - - nlseg = GlobalSegMap_nlseg(GSMap, myID) - - ! set the counter count, which records the number of times myID - ! matches entries in GSMap%pe_loc(:) - - count = 0 - - ! set local_start, which is the current local storage segment - ! starting position - - local_start = 1 - - ! set logical flag found to signify we havent found i_g: - - found = .false. - - n = 0 - - SEARCH_LOOP: do - - n = n+1 - if (n > ngseg) EXIT - - if(GSMap%pe_loc(n) == myID) then - - ! increment / check the pe_loc match counter - - count = count + 1 - if(count > nlseg) then - ierr = 2 - call die(myname_,'too many pe matches',ierr) - endif - - ! is i_g in this segment? - - lower_bound = GSMap%start(n) - upper_bound = GSMap%start(n) + GSMap%length(n) - 1 - - if((lower_bound <= i_g) .and. (i_g <= upper_bound)) then - local_index = local_start + (i_g - GSMap%start(n)) - found = .true. - EXIT - else - local_start = local_start + GSMap%length(n) - endif - - endif - end do SEARCH_LOOP - - ! We either found the local index, or have exhausted our options. - - if(found) then - GlobalSegMapToIndex_ = local_index - else - GlobalSegMapToIndex_ = -1 - endif - - end function GlobalSegMapToIndex_ - - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalSegMapToIndexArr_ - Global to Local Index Array Translation -! -! !DESCRIPTION: Given a {\tt GlobalSegMap} data type {\tt GSMap} -! and MPI communicator corresponding to the Fortran {\tt INTEGER} -! handle {\tt comm}, convert an array of global index values -! {\tt i\_global()} to an array of local index values {\tt i\_local()}. If -! the datum {\tt i\_global(j)} is not stored on the local process ID, -! then {\tt i\_local(j)} will be set to {\tt -1}/ -! -! !INTERFACE: - - -subroutine GlobalSegMapToIndexArr_(GSMap, i_global, i_local, nindex, comm) - -! -! !USES: -! - use m_stdio - use m_mpif90 - use m_die, only : MP_perr_die, die, warn - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - use m_GlobalSegMap, only : GlobalSegMap_nlseg => nlseg - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! Output GlobalSegMap - integer, intent(in) :: i_global(:) ! global index - integer, intent(out) :: i_local(:) ! local index - integer, intent(in) :: nindex ! size of i_global() - integer, intent(in) :: comm ! communicator handle - -! !REVISION HISTORY: -! 12-apr-2006 R. Loy - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalSegMapToIndexArr_' - - integer :: myID - integer :: count, ierr, ngseg, nlseg - integer,allocatable :: mygs_lb(:),mygs_ub(:),mygs_len(:),mygs_lstart(:) - - integer :: i,j,n,startj - - ! Determine local process id myID: - - call MP_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MP_COMM_RANK()',ierr) - - - ngseg = GlobalSegMap_ngseg(GSMap) - nlseg = GlobalSegMap_nlseg(GSMap, myID) - - if (nlseg <= 0) return; - - allocate( mygs_lb(nlseg), mygs_ub(nlseg), mygs_len(nlseg) ) - allocate( mygs_lstart(nlseg) ) - - -!! -!! determine the global segments on this processor -!! just once, so the info be used repeatedly below -!! - - n = 0 - do i=1,ngseg - if (GSMap%pe_loc(i) == myID ) then - n=n+1 - mygs_lb(n)=GSMap%start(i) - mygs_ub(n)=GSMap%start(i) + GSMap%length(i) -1 - mygs_len(n)=GSMap%length(i) - endif - enddo - - if (n .ne. nlseg) then - write(stderr,*) myname_,"mismatch nlseg",n,nlseg - call die(myname) - endif - - mygs_lstart(1)=1 - do j=2,nlseg - mygs_lstart(j)=mygs_lstart(j-1)+mygs_len(j-1) - enddo - - -!! -!! this loop is optimized for the case that the indices in iglobal() -!! are in the same order that they appear in the global segments, -!! which seems usually (always?) to be the case. -!! -!! note that the j loop exit condition is only executed when the index -!! is not found in the current segment, which saves a factor of 2 -!! since many consecutive indices are in the same segment. -!! - - - j=1 - do i=1,nindex - - i_local(i)= -1 - - startj=j - SEARCH_LOOP: do - - if ( (mygs_lb(j) <= i_global(i)) .and. & - (i_global(i) <= mygs_ub(j))) then - i_local(i) = mygs_lstart(j) + (i_global(i) - mygs_lb(j)) - EXIT SEARCH_LOOP - else - j=j+1 - if (j > nlseg) j=1 ! wrap around - if (j == startj) EXIT SEARCH_LOOP - endif - - end do SEARCH_LOOP - - end do - -!!!! this version vectorizes (outer loop) -!!!! performance for in-order input is slightly slower than the above -!!!! but performance on out-of-order input is probably much better -!!!! at the moment we are going on the assumption that caller is -!!!! likely providing in-order, so we won't use this version. -!! -!! do i=1,nindex -!! -!! i_local(i)= -1 -!! -!! SEARCH_LOOP: do j=1,nlseg -!! -!! if ( (mygs_lb(j) <= i_global(i)) .and. & -!! (i_global(i) <= mygs_ub(j))) then -!! i_local(i) = mygs_lstart(j) + (i_global(i) - mygs_lb(j)) -!! endif -!! -!! end do SEARCH_LOOP -!! -!! end do - - - deallocate( mygs_lb, mygs_ub, mygs_len, mygs_lstart ) - - end subroutine GlobalSegMapToIndexArr_ - - - - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalMapToIndex_ - Global to Local Index Translation -! -! !DESCRIPTION: -! This {\tt INTEGER} query function takes as its input a user-supplied -! {\tt GlobalMap} data type {\tt GMap}, which desribes a decomposition -! on the input MPI communicator corresponding to the Fortran {\tt INTEGER} -! handle {\tt comm}, and the input global index value {\tt i\_g}, and -! returns a positive local index value if the datum {\tt i\_g}. If -! the datum {\tt i\_g} is not stored on the local process ID, a value -! of {\tt -1} is returned. -! -! !INTERFACE: - - - integer function GlobalMapToIndex_(GMap, i_g, comm) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die, die, warn - use m_GlobalMap, only : GlobalMap - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: GMap ! Input GlobalMap - integer, intent(in) :: i_g ! global index - integer, intent(in) :: comm ! communicator handle - -! !REVISION HISTORY: -! 2Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalMapToIndex_' - - integer :: myID - integer :: count, ierr, ngseg, nlseg, n - integer :: lower_bound, upper_bound - integer :: local_start, local_index - logical :: found - - ! Determine local process id myID: - - call MP_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MP_COMM_RANK()',ierr) - - ! Initialize logical "point located" flag found as false - - found = .false. - - lower_bound = GMap%displs(myID) + 1 - upper_bound = GMap%displs(myID) + GMap%counts(myID) - - if((lower_bound <= i_g) .and. (i_g <= upper_bound)) then - found = .true. - local_index = i_g - lower_bound + 1 - endif - - if(found) then - GlobalMapToIndex_ = local_index - else - GlobalMapToIndex_ = -1 - endif - - end function GlobalMapToIndex_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalSegMapToNavigator_ - Return Navigator to Local Segments -! -! !DESCRIPTION: -! This routine takes as its input takes a user-supplied -! {\tt GlobalSegMap} data type {\tt GSMap}, which desribes a decomposition -! on the input MPI communicator corresponding to the Fortran {\tt INTEGER} -! handle {\tt comm}, and returns the local segment start index and length -! information for referencing the on-pe storage of the mapped distributed -! data. These data are returned in the form of the output {\tt Navigator} -! argument {Nav}. -! -! {\bf N.B.:} This routine returns a {\tt Navigator} variable {\tt Nav}, -! which must be deallocated once the user no longer needs it. Failure to -! do this will create a memory leak. -! -! !INTERFACE: - - subroutine GlobalSegMapToNavigator_(GSMap, comm, oNav) - -! -! !USES: -! - use m_mpif90 - use m_die, only : MP_perr_die, die, warn - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_ngseg => ngseg - use m_GlobalSegMap, only : GlobalSegMap_nlseg => nlseg - use m_Navigator, only : Navigator - use m_Navigator, only : Navigator_init => init - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! Input GlobalSegMap - integer, intent(in) :: comm ! communicator handle - -! !OUTPUT PARAMETERS: - - type(Navigator), intent(out) :: oNav ! Output Navigator - -! !REVISION HISTORY: -! 2Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalSegMapToNavigator_' - - integer :: myID, ierr, ngseg, nlseg, n, count - - ! determine local process id myID - - call MP_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) call MP_perr_die(myname_,'MP_COMM_RANK',ierr) - - ! determine number of global segments ngseg: - - ngseg = GlobalSegMap_ngseg(GSMap) - - ! determine number of local segments on process myID nlseg: - - nlseg = GlobalSegMap_nlseg(GSMap, myID) - - ! Allocate space for the Navigator oNav: - - call Navigator_init(oNav, nlseg, ierr) - if(ierr /= 0) call die(myname_,'Navigator_init',ierr) - - call GlobalSegMapToIndices_(GSMap, comm, oNav%displs, oNav%counts) - - end subroutine GlobalSegMapToNavigator_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalSegMapToLocalMatrix_ - Set Local SparseMatrix Indices -! -! !DESCRIPTION: -! This routine takes as its input a user-supplied {\tt GlobalSegMap} -! domain decomposition {\tt GSMap}, which describes the decomposition of -! either the rows or columns of the input/output {\tt SparseMatrix} -! argument {\tt sMat} on the communicator associated with the {\tt INTEGER} -! handle {\tt comm}, and to translate the global row or column indices -! of {\tt sMat} into their local counterparts. The choice of either row -! or column is governed by the value of the input {\tt CHARACTER} -! argument {\tt RCFlag}. One sets this variable to either {\tt 'ROW'} or -! {\tt 'row'} to specify row re-indexing (which are stored in -! {\tt sMat} and retrieved by indexing the attribute {\tt lrow}), and -! {\tt 'COLUMN'} or {\tt 'column'} to specify column re-indexing (which -! are stored in {\tt sMat} and retrieved by indexing the {\tt SparseMatrix} -! attribute {\tt lcol}). -! -! !INTERFACE: - - subroutine GlobalSegMapToLocalMatrix_(sMat, GSMap, RCFlag, comm) - -! -! !USES: -! - use m_stdio - use m_die, only : die - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_indexIA => indexIA - use m_SparseMatrix, only : SparseMatrix_lsize => lsize - - use m_GlobalSegMap, only : GlobalSegMap - - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap ! Input GlobalSegMap - character(len=*), intent(in) :: RCFlag ! 'row' or 'column' - integer, intent(in) :: comm ! communicator handle - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !SEE ALSO: -! The MCT module m_SparseMatrix for more information about the -! SparseMatrix type and its storage of global and local row-and -! column indices. -! -! !REVISION HISTORY: -! 3May01 - J.W. Larson - initial version, which -! is _extremely_ slow, but safe. This must be re-examined -! later. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GlobalSegMapToLocalMatrix_' - - - integer :: i, GlobalIndex, gindex, lindex, lsize - - integer, allocatable :: temp_gindex(:) !! rml - integer, allocatable :: temp_lindex(:) !! rml - - - ! What are we re-indexing, rows or columns? - - select case(RCFlag) - case('ROW','row') - gindex = SparseMatrix_indexIA(sMat, 'grow', dieWith=myname_) - lindex = SparseMatrix_indexIA(sMat,'lrow', dieWith=myname_) - case('COLUMN','column') - gindex = SparseMatrix_indexIA(sMat,'gcol', dieWith=myname_) - lindex = SparseMatrix_indexIA(sMat,'lcol', dieWith=myname_) - case default - write(stderr,'(3a)') myname_,":: unrecognized value of RCFLag ",RCFlag - call die(myname) - end select - - - ! How many matrix elements are there? - - lsize = SparseMatrix_lsize(sMat) - - - !! rml new code from here down - do the mapping all in one - !! function call which has been tuned for speed - - allocate( temp_gindex(lsize) ) - allocate( temp_lindex(lsize) ) - - - do i=1,lsize - temp_gindex(i) = sMat%data%iAttr(gindex,i) - end do - - call GlobalSegMapToIndexArr_(GSMap, temp_gindex, temp_lindex, lsize, comm) - - do i=1,lsize - sMat%data%iAttr(lindex,i) = temp_lindex(i) - end do - - - deallocate(temp_gindex) ! rml - deallocate(temp_lindex) ! rml - - - end subroutine GlobalSegMapToLocalMatrix_ - - end module m_GlobalToLocal diff --git a/src/externals/mct/mct/m_MCTWorld.F90 b/src/externals/mct/mct/m_MCTWorld.F90 deleted file mode 100644 index d30582f489d..00000000000 --- a/src/externals/mct/mct/m_MCTWorld.F90 +++ /dev/null @@ -1,882 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS m_MCTWorld.F90,v 1.26 2007/06/01 19:56:25 rloy Exp -! CVS MCT_2_4_0 -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_MCTWorld -- MCTWorld Class -! -! !DESCRIPTION: -! MCTWorld is a datatype which acts as a component model registry. -! All models communicating through MCT must participate in initialization -! of MCTWorld. The single instance of MCTWorld, {\tt ThisMCTWorld} stores -! the component id and local and global processor rank of each component. -! This module contains methods for creating and destroying {\tt ThisMCTWorld} -! as well as inquiry functions. -! -! !INTERFACE: - - module m_MCTWorld -! -! !USES: - use m_List, only : List ! Support for List components. - - implicit none - - private ! except - -! !PUBLIC TYPES: - - public :: MCTWorld ! The MCTWorld class data structure - - type MCTWorld - integer :: MCT_comm ! MCT communicator - integer :: ncomps ! Total number of components - integer :: mygrank ! Rank of this processor in - ! global communicator. - integer,dimension(:),pointer :: nprocspid => null() ! Number of processes - ! each component is on (e.g. rank of its - ! local communicator. - integer,dimension(:,:),pointer :: idGprocid => null() ! Translate between local component rank - ! rank in global communicator. - ! idGprocid(modelid,localrank)=globalrank - end type MCTWorld - -! !PUBLIC DATA MEMBERS: - - type(MCTWorld) :: ThisMCTWorld ! declare the MCTWorld - -! !PUBLIC MEMBER FUNCTIONS: - public :: initialized ! Determine if MCT is initialized - public :: init ! Create a MCTWorld - public :: clean ! Destroy a MCTWorld - public :: printnp ! Print contents of a MCTWorld - public :: NumComponents ! Number of Components in the MCTWorld - public :: ComponentNumProcs ! Number of processes owned by a given - ! component - public :: ComponentToWorldRank ! Given the rank of a process on a - ! component, return its rank on the - ! world communicator - public :: ComponentRootRank ! Return the rank on the world - ! communicator of the root process of - ! a component - public :: ThisMCTWorld ! Instantiation of the MCTWorld - -! - - interface initialized ; module procedure & - initialized_ - end interface - interface init ; module procedure & - initd_, & - initm_, & - initr_ - end interface - interface clean ; module procedure clean_ ; end interface - interface printnp ; module procedure printnp_ ; end interface - interface NumComponents ; module procedure & - NumComponents_ - end interface - interface ComponentNumProcs ; module procedure & - ComponentNumProcs_ - end interface - interface ComponentToWorldRank ; module procedure & - ComponentToWorldRank_ - end interface - interface ComponentRootRank ; module procedure & - ComponentRootRank_ - end interface - - - -! !REVISION HISTORY: -! 19Jan01 - R. Jacob - initial prototype -! 05Feb01 - J. Larson - added query and -! local-to-global mapping services NumComponents, -! ComponentNumProcs, ComponentToWorldRank, and ComponentRootRank -! 08Feb01 - R. Jacob - add mylrank and mygrank -! to datatype -! 20Apr01 - R. Jacob - remove allids from -! MCTWorld datatype. Not needed because component -! ids are always from 1 to number-of-components. -! 07Jun01 - R. Jacob - remove myid, mynprocs -! and mylrank from MCTWorld datatype because they are not -! clearly defined in PCM mode. Add MCT_comm for future use. -! 03Aug01 - E. Ong - explicity specify starting -! address in mpi_irecv -! 27Nov01 - E. Ong - added R. Jacob's version of initd_ -! to support PCM mode. -! 15Feb02 - R. Jacob - elminate use of MP_COMM_WORLD. Use -! argument globalcomm instead. Create MCT_comm from -! globalcomm -!EOP __________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_MCTWorld' - - contains - - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initialized_ - determine if MCTWorld is initialized -! -! !DESCRIPTION: -! This routine may be used to determine whether {\tt MCTWorld::init} -! has been called. If not, the user must call {\tt init} before -! performing any other MCT library calls. -! -! !INTERFACE: - - logical function initialized_() - -! -! !USES: -! - -! !INPUT PARAMETERS: - - -! !REVISION HISTORY: -! 01June07 - R. Loy - initial version -!EOP ___________________________________________________________________ -! - - initialized_ = associated(ThisMCTWorld%nprocspid) - - end function initialized_ - - - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initm_ - initialize MCTWorld -! -! !DESCRIPTION: -! Do a distributed init of MCTWorld for the case where a set of processors -! contains more then one model and the models may not span the set of processors. -! {\tt ncomps} is the total number of components in the entire coupled system. -! {\tt globalcomm} encompasses all the models (typically this can be MPI\_COMM\_WORLD). -! {\tt mycomms} is an array of MPI communicators, each sized for the appropriate model -! and {\tt myids} is a corresponding array of integers containing the model ids for -! the models on this particular set of processors. -! -! This routine is called once for the models covered by the set of processors. -! -! !INTERFACE: - - subroutine initm_(ncomps,globalcomm,mycomms,myids) -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: ncomps ! number of components - integer, intent(in) :: globalcomm ! global communicator - integer, dimension(:),pointer :: mycomms ! my communicators - integer, dimension(:),pointer :: myids ! component ids - -! !REVISION HISTORY: -! 20Sep07 - T. Craig migrated code from initd routine -! 20Sep07 - T. Craig - made mycomms an array -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initm_' - integer :: ier,myGid,myLid,i,mysize,Gsize,j - -! arrays allocated on the root to coordinate gathring of data -! and non-blocking receives by the root - integer, dimension(:), allocatable :: compids,reqs,nprocs,Gprocids - integer, dimension(:), allocatable :: root_nprocs - integer, dimension(:,:),allocatable :: status,root_idGprocid - integer, dimension(:,:),pointer :: tmparray - integer,dimension(:),pointer :: apoint -! ------------------------------------------------------------------ - -! Check that ncomps is a legal value - if(ncomps < 1) then - call die(myname_, "argument ncomps can't less than one!",ncomps) - endif - - if (size(myids) /= size(mycomms)) then - call die(myname_, "size of myids and mycomms inconsistent") - endif - -! make sure this has not been called already - if(associated(ThisMCTWorld%nprocspid) ) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: MCTWorld has already been initialized...Continuing' - RETURN - endif - -! determine overall size - call MP_comm_size(globalcomm,Gsize,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_size()',ier) - -! determine my rank in comm_world - call MP_comm_rank(globalcomm,myGid,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - -! allocate space on global root to receive info about -! the other components - if(myGid == 0) then - allocate(nprocs(ncomps),compids(ncomps),& - reqs(ncomps),status(MP_STATUS_SIZE,ncomps),& - root_nprocs(ncomps),stat=ier) - if (ier /= 0) then - call die(myname_, 'allocate(nprocs,...)',ier) - endif - endif - - -!!!!!!!!!!!!!!!!!! -! Gather the number of procs from the root of each component -!!!!!!!!!!!!!!!!!! -! -! First on the global root, post a receive for each component - if(myGid == 0) then - do i=1,ncomps - call MPI_IRECV(root_nprocs(i), 1, MP_INTEGER, MP_ANY_SOURCE,i, & - globalcomm, reqs(i), ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_IRECV(root_nprocs)',ier) - enddo - endif - -! The local root on each component sends - do i=1,size(myids) - if(mycomms(i)/=MP_COMM_NULL) then - call MP_comm_size(mycomms(i),mysize,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_size()',ier) - call MP_comm_rank(mycomms(i),myLid,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - if(myLid == 0) then - call MPI_SEND(mysize,1,MP_INTEGER,0,myids(i),globalcomm,ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_SEND(mysize)',ier) - endif - endif - enddo - -! Global root waits for all sends - if(myGid == 0) then - call MPI_WAITALL(size(reqs), reqs, status, ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITALL()',ier) - endif -! Global root now knows how many processors each component is using - -!!!!!!!!!!!!!!!!!! -! end of nprocs -!!!!!!!!!!!!!!!!!! - - -! allocate a tmp array for the receive on root. - if(myGid == 0) then - allocate(tmparray(0:Gsize-1,ncomps),stat=ier) - if(ier/=0) call die(myname_,'allocate(tmparray)',ier) - -! fill tmparray with a bad rank value for later error checking - tmparray = -1 - endif - -!!!!!!!!!!!!!!!!!! -! Gather the Gprocids from each local root -!!!!!!!!!!!!!!!!!! -! -! First on the global root, post a receive for each component - if(myGid == 0) then - do i=1,ncomps - apoint => tmparray(0:root_nprocs(i)-1,i) - call MPI_IRECV(apoint, root_nprocs(i),MP_INTEGER, & - MP_ANY_SOURCE,i,globalcomm, reqs(i), ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_IRECV()',ier) - enddo - endif - -! The root on each component sends - do i=1,size(myids) - if(mycomms(i)/=MP_COMM_NULL) then - call MP_comm_size(mycomms(i),mysize,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_size()',ier) - call MP_comm_rank(mycomms(i),myLid,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - -! make the master list of global proc ids -! -! allocate space to hold global ids -! only needed on root, but allocate everywhere to avoid complaints. - allocate(Gprocids(mysize),stat=ier) - if(ier/=0) call die(myname_,'allocate(Gprocids)',ier) -! gather over the LOCAL comm - call MPI_GATHER(myGid,1,MP_INTEGER,Gprocids,1,MP_INTEGER,0,mycomms(i),ier) - if(ier/=0) call die(myname_,'MPI_GATHER Gprocids',ier) - - if(myLid == 0) then - call MPI_SEND(Gprocids,mysize,MP_INTEGER,0,myids(i),globalcomm,ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_SEND(Gprocids)',ier) - endif - - deallocate(Gprocids,stat=ier) - if(ier/=0) call die(myname_,'deallocate(Gprocids)',ier) - endif - enddo - -! Global root waits for all sends - if(myGid == 0) then - call MPI_WAITALL(size(reqs), reqs, status, ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITALL(Gprocids)',ier) - endif - -! Now store the Gprocids in the World description and Broadcast - - if(myGid == 0) then - allocate(root_idGprocid(ncomps,0:Gsize-1),stat=ier) - if(ier/=0) call die(myname_,'allocate(root_idGprocid)',ier) - - root_idGprocid = transpose(tmparray) - endif - - if(myGid /= 0) then - allocate(root_nprocs(1),root_idGprocid(1,1),stat=ier) - if(ier/=0) call die(myname_,'non-root allocate(root_idGprocid)',ier) - endif - -!!!!!!!!!!!!!!!!!! -! end of Gprocids -!!!!!!!!!!!!!!!!!! - -! now call the init from root. - call initr_(ncomps,globalcomm,root_nprocs,root_idGprocid) - -! if(myGid==0 .or. myGid==17) then -! write(*,*)'MCTA',myGid,ThisMCTWorld%ncomps,ThisMCTWorld%MCT_comm,ThisMCTWorld%nprocspid -! do i=1,ThisMCTWorld%ncomps -! write(*,*)'MCTK',myGid,i,ThisMCTWorld%idGprocid(i,0:ThisMCTWorld%nprocspid(i)-1) -! enddo -! endif - -! deallocate temporary arrays - deallocate(root_nprocs,root_idGprocid,stat=ier) - if(ier/=0) call die(myname_,'deallocate(root_nprocs,..)',ier) - if(myGid == 0) then - deallocate(compids,reqs,status,nprocs,tmparray,stat=ier) - if(ier/=0) call die(myname_,'deallocate(compids,..)',ier) - endif - - end subroutine initm_ - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initd_ - initialize MCTWorld -! -! !DESCRIPTION: -! Do a distributed init of MCTWorld using the total number of components -! {\tt ncomps} and either a unique integer component id {\tt myid} or, -! if more than one model is placed on a processor, an array of integer ids -! specifying the models {\tt myids}. Also required is -! the local communicator {\tt mycomm} and global communicator {\tt globalcomm} -! which encompasses all the models (typically this can be MPI\_COMM\_WORLD). -! This routine must be called once by each component (using {\em myid}) or -! component group (using {\em myids}). -! -! !INTERFACE: - - subroutine initd_(ncomps,globalcomm,mycomm,myid,myids) -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: ncomps ! number of components - integer, intent(in) :: globalcomm ! global communicator - integer, intent(in) :: mycomm ! my communicator - integer, intent(in),optional :: myid ! my component id - integer, dimension(:),pointer,optional :: myids ! component ids - -! !REVISION HISTORY: -! 19Jan01 - R. Jacob - initial prototype -! 07Feb01 - R. Jacob - non fatal error -! if init is called a second time. -! 08Feb01 - R. Jacob - initialize the new -! mygrank and mylrank -! 20Apr01 - R. Jacob - remove allids from -! MCTWorld datatype. Not needed because component -! ids are always from 1 to number-of-components. -! 22Jun01 - R. Jacob - move Bcast and init -! of MCTWorld to initr_ -! 20Sep07 - T. Craig migrated code to new initm routine -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initd_' - integer :: msize,ier - integer, dimension(:), pointer :: mycomm1d,myids1d - -! ------------------------------------------------------------------ - - -! only one of myid and myids should be present - if(present(myid) .and. present(myids)) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: Must define myid or myids in MCTWord init' - call die(myname_) - endif - - if(.not.present(myid) .and. .not.present(myids)) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: Must define one of myid or myids in MCTWord init' - call die(myname_) - endif - - if (present(myids)) then - msize = size(myids) - else - msize = 1 - endif - - allocate(mycomm1d(msize),myids1d(msize),stat=ier) - if(ier/=0) call die(myname_,'non-root allocate(root_idGprocid)',ier) - mycomm1d(:) = mycomm - - if (present(myids)) then - myids1d(:) = myids(:) - else - myids1d(:) = myid - endif - - call initm_(ncomps,globalcomm,mycomm1d,myids1d) - - deallocate(mycomm1d,myids1d) - - end subroutine initd_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initr_ - initialize MCTWorld from global root -! -! !DESCRIPTION: -! Initialize MCTWorld using information valid only on the global root. -! This is called by initm\_ but could also be called by the user -! for very complex model--processor geometries. -! -! !INTERFACE: - - subroutine initr_(ncomps,globalcomm,rnprocspid,ridGprocid) -! -! !USES: -! - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: ncomps ! total number of components - integer, intent(in) :: globalcomm ! the global communicator - integer, dimension(:),intent(in) :: rnprocspid ! number of processors for each component - integer, dimension(:,:),intent(in) :: ridGprocid ! an array of size (1:ncomps) x (0:Gsize-1) - ! which maps local ranks to global ranks - ! it's actually 1:Gsize here - -! !REVISION HISTORY: -! 22Jun01 - R. Jacob - initial prototype -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initr_' - integer :: ier,Gsize,myGid,MCTcomm,i,j - -! Check that ncomps is a legal value - if(ncomps < 1) then - call die(myname_, "argument ncomps can't less than one!",ncomps) - endif - -! determine overall size - call MP_comm_size(globalcomm,Gsize,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_size()',ier) - -! determine my rank in comm_world - call MP_comm_rank(globalcomm,myGid,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_rank()',ier) - -! create the MCT comm world - call MP_comm_dup(globalcomm,MCTcomm,ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_dup()',ier) - - allocate(ThisMCTWorld%nprocspid(ncomps),stat=ier) - if(ier/=0) call die(myname_,'allocate(MCTWorld%nprocspid(:),...',ier) - allocate(ThisMCTWorld%idGprocid(ncomps,0:Gsize-1),stat=ier) - if(ier/=0) call die(myname_,'allocate(MCTWorld%nprocspid(:),...',ier) - -! set the MCTWorld - ThisMCTWorld%ncomps = ncomps - ThisMCTWorld%MCT_comm = MCTcomm - ThisMCTWorld%mygrank = myGid - -! Now store the component ids in the World description and Broadcast - if(myGid == 0) then - ThisMCTWorld%nprocspid(1:ncomps) = rnprocspid(1:ncomps) - ThisMCTWorld%idGprocid = ridGprocid - endif - - call MPI_BCAST(ThisMCTWorld%nprocspid, ncomps, MP_INTEGER, 0, MCTcomm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_BCast nprocspid',ier) - - call MPI_BCAST(ThisMCTWorld%idGprocid, ncomps*Gsize,MP_INTEGER, 0,MCTcomm, ier) - if(ier/=0) call MP_perr_die(myname_,'MPI_BCast Gprocids',ier) - -! if(myGid==17) then -! do i=1,ThisMCTWorld%ncomps -! do j=1,ThisMCTWorld%nprocspid(i) -! write(*,*)'MCTK',myGid,i,j-1,ThisMCTWorld%idGprocid(i,j-1) -! enddo -! enddo -! endif - - end subroutine initr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Destroy a MCTWorld -! -! !DESCRIPTION: -! This routine deallocates the arrays of {\tt ThisMCTWorld} -! It also zeros out the integer components. -! -! !INTERFACE: - - subroutine clean_() -! -! !USES: -! - use m_mpif90 - use m_die - - implicit none - -! !REVISION HISTORY: -! 19Jan01 - R. Jacob - initial prototype -! 08Feb01 - R. Jacob - clean the new -! mygrank and mylrank -! 20Apr01 - R. Jacob - remove allids from -! MCTWorld datatype. Not needed because component -! ids are always from 1 to number-of-components. -! 07Jun01 - R. Jacob - remove myid,mynprocs -! and mylrank. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - deallocate(ThisMCTWorld%nprocspid,ThisMCTWorld%idGprocid,stat=ier) - if(ier /= 0) call warn(myname_,'deallocate(MCTW,...)',ier) - - call MP_comm_free(ThisMCTWorld%MCT_comm, ier) - if(ier /= 0) call MP_perr_die(myname_,'MP_comm_free()',ier) - - ThisMCTWorld%ncomps = 0 - ThisMCTWorld%mygrank = 0 - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: NumComponents_ - Determine number of components in World. -! -! !DESCRIPTION: -! The function {\tt NumComponents\_} takes an input {\tt MCTWorld} -! argument {\tt World}, and returns the number of component models -! present. -! -! !INTERFACE: - - integer function NumComponents_(World) -! -! !USES: -! - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - - type(MCTWorld), intent(in) :: World - -! !REVISION HISTORY: -! 05Feb01 - J. Larson - initial version -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::NumComponents_' - - integer :: ncomps - - ncomps = World%ncomps - - if(ncomps <= 0) then - write(stderr,'(2a,1i3)') myname,":: invalid no. of components = ",ncomps - call die(myname_,'ncomps = ',ncomps) - endif - - NumComponents_ = ncomps - - end function NumComponents_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ComponentNumProcs_ - Number of processes a component owns. -! -! !DESCRIPTION: -! The function {\tt ComponentNumProcs\_} takes an input {\tt MCTWorld} -! argument {\tt World}, and a component ID {\tt comp\_id}, and returns -! the number of processes owned by that component. -! -! !INTERFACE: - - integer function ComponentNumProcs_(World, comp_id) -! -! !USES: -! - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - type(MCTWorld), intent(in) :: World - integer, intent(in) :: comp_id - -! !REVISION HISTORY: -! 05Feb01 - J. Larson - initial version -! 07Jun01 - R. Jacob - modify to use -! nprocspid and comp_id instead of World%mynprocs -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::ComponentNumPros_' - - integer :: mynprocs - - mynprocs = World%nprocspid(comp_id) - - if(mynprocs <= 0) then - write(stderr,'(2a,1i6)') myname,":: invalid no. of processes = ",mynprocs - call die(myname_,'Number of processes = ',mynprocs) - endif - - ComponentNumProcs_ = mynprocs - - end function ComponentNumProcs_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ComponentToWorldRank_ - Determine rank on COMM_WORLD. -! -! !DESCRIPTION: -! The function {\tt ComponentToWorldRank\_} takes an input component ID -! {\tt comp\_id} and input rank on that component communicator -! {\tt comp\_rank}, and returns the rank of that process on the world -! communicator of {\tt MCTWorld}. -! -! !INTERFACE: - - integer function ComponentToWorldRank_(comp_rank, comp_id, World) -! -! !USES: -! - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - integer, intent(in) :: comp_rank ! process rank on the communicator - ! associated with comp_id - integer, intent(in) :: comp_id ! component id - type(MCTWorld), intent(in) :: World ! World - - -! !REVISION HISTORY: -! 05Feb01 - J. Larson - initial version -! 14Jul02 - E. Ong - made argument checking required -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::ComponentToWorldRank_' - - logical :: valid - integer :: n, world_rank - - - ! Do we want the potentially time-consuming argument checks? - ! The first time we use this function during execution on a - ! given set of components and component ranks, we will. In - ! later invocations, these argument checks are probably not - ! necessary (unless one alters MCTWorld), and impose a cost - ! one may wish to avoid. - - ! These checks are just conditional statements and are - ! not particularly time-consuming. It's better to be safe - ! than sorry. -EONG - - - ! Check argument comp_id for validity--assume initially it is not... - - valid = .false. - n = 0 - - if((comp_id <= World%ncomps) .and. & - (comp_id > 0)) then - valid = .true. - endif - - if(.not. valid) then - write(stderr,'(2a,1i7)') myname,":: invalid component id no. = ",& - comp_id - call die(myname_,'invalid comp_id = ',comp_id) - endif - - ! Check argument comp_rank for validity on the communicator associated - ! with comp_id. Assume initialy it is invalid. - - valid = .false. - - if((0 <= comp_rank) .or. & - (comp_rank < ComponentNumProcs_(World, comp_id))) then - valid = .true. - endif - - if(.not. valid) then - write(stderr,'(2a,1i5,1a,1i2)') myname, & - ":: invalid process ID. = ", & - comp_rank, "on component ",comp_id - call die(myname_,'invalid comp_rank = ',comp_rank) - endif - - - ! If we have reached this point, the input data are valid. - ! Return the global rank for comp_rank on component comp_id - - world_rank = World%idGprocid(comp_id, comp_rank) - - if(world_rank < 0) then - write(stderr,'(2a,1i6)') myname,":: negative world rank = ",world_rank - call die(myname_,'negative world rank = ',world_rank) - endif - - ComponentToWorldRank_ = world_rank - - end function ComponentToWorldRank_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ComponentRootRank_ - Rank of component root on COMM_WORLD. -! -! !DESCRIPTION: -! The function {\tt ComponentRootRank\_} takes an input component ID -! {\tt comp\_id} and input {\tt MCTWorld} variable {\tt World}, and -! returns the global rank of the root of this component. -! -! !INTERFACE: - - integer function ComponentRootRank_(comp_id, World) -! -! !USES: -! - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: - integer, intent(in) :: comp_id ! component id - type(MCTWorld), intent(in) :: World ! World - -! !REVISION HISTORY: -! 05Feb01 - J. Larson - initial version -! 14Jul02 - E. Ong - made argument checking required -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::ComponentRootRank_' - - integer :: world_comp_root - - ! Call ComponentToWorldRank_ assuming the root on a remote component - ! has rank zero on the communicator associated with that component. - - world_comp_root = ComponentToWorldRank_(0, comp_id, World) - - if(world_comp_root < 0) then - write(stderr,'(2a,1i6)') myname,":: negative world rank = ",& - world_comp_root - call die(myname_,'invalid root id = ',world_comp_root) - endif - - ComponentRootRank_ = world_comp_root - - end function ComponentRootRank_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: printnp_ - Print number of procs for a component id. -! -! !DESCRIPTION: -! Print out number of MPI processes for the givin component id. -! -! !INTERFACE: - - subroutine printnp_(compid,lun) -! -! !USES: -! - use m_die - use m_mpif90 - - implicit none - -!INPUT/OUTPUT PARAMETERS: - integer, intent(in) :: compid - integer, intent(in) :: lun - -! !REVISION HISTORY: -! 06Jul12 - R. Jacob - initial version -!EOP ___________________________________________________________________ - - - integer ier - character(len=*),parameter :: myname_=myname//'::printnp_' - - write(lun,*) ThisMCTWorld%nprocspid(compid) - - end subroutine printnp_ - - - end module m_MCTWorld diff --git a/src/externals/mct/mct/m_MatAttrVectMul.F90 b/src/externals/mct/mct/m_MatAttrVectMul.F90 deleted file mode 100644 index 080214c677a..00000000000 --- a/src/externals/mct/mct/m_MatAttrVectMul.F90 +++ /dev/null @@ -1,642 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math + Computer Science Division / Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_MatAttrVectMul - Sparse Matrix AttrVect Multipication. -! -! !DESCRIPTION: -! -! This module contains routines supporting the sparse matrix-vector -! multiplication -! $${\bf y} = {\bf M} {\bf x},$$ -! where the vectors {\bf x} and {\bf y} are stored using the MCT -! {\tt AttrVect} datatype, and {\bf M} is stored using either the MCT -! {\tt SparseMatrix} or {\tt SparseMatrixPlus} type. The {\tt SparseMatrix} -! type is used to represent {\bf M} if the multiplication process is -! purely data-local (e.g., in a global address space, or if the process -! has been rendered embarrasingly parallel by earlier or subsequent -! vector data redistributions). If the multiplication process is to -! be explicitly distributed-memory parallel, then the {\tt SparseMatrixPlus} -! type is used to store the elements of {\bf M} and all information needed -! to coordinate data redistribution and reduction of partial sums. -! -! {\bf N.B.:} The matrix-vector multiplication routines in this module -! process only the {\bf real} attributes of the {\tt AttrVect} arguments -! corresponding to {\bf x} and {\bf y}. They ignore the integer attributes. -! -! !INTERFACE: - - module m_MatAttrVectMul - - private ! except - - public :: sMatAvMult ! The master Sparse Matrix - - ! Attribute Vector multipy API - - interface sMatAvMult ; module procedure & - sMatAvMult_DataLocal_, & - sMatAvMult_sMPlus_ - end interface - -! !SEE ALSO: -! The MCT module m_AttrVect for more information about the AttrVect type. -! The MCT module m_SparseMatrix for more information about the SparseMatrix -! type. -! The MCT module m_SparseMatrixPlus for more details about the master class -! for parallel sparse matrix-vector multiplication, the SparseMatrixPlus. - -! !REVISION HISTORY: -! 12Jan01 - J.W. Larson - initial module. -! 26Sep02 - J.W. Larson - added high-level, distributed -! matrix-vector multiply routine using the SparseMatrixPlus class. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_MatAttrVectMul' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math + Computer Science Division / Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: sMatAvMult_DataLocal -- Purely local matrix-vector multiply -! -! !DESCRIPTION: -! -! The sparse matrix-vector multiplication routine {\tt sMatAvMult\_DataLocal\_()} -! operates on the assumption of total data locality, which is equivalent -! to the following two conditions: -! \begin{enumerate} -! \item The input {\tt AttrVect} {\tt xAV} contains all the values referenced -! by the local column indices stored in the input {\tt SparsMatrix} argument -! {\tt sMat}; and -! \item The output {\tt AttrVect} {\tt yAV} contains all the values referenced -! by the local row indices stored in the input {\tt SparsMatrix} argument -! {\tt sMat}. -! \end{enumerate} -! By default, the multiplication occurs for each of the common {\tt REAL} attributes -! shared by {\tt xAV} and {\tt yAV}. This routine is capable of -! cross-indexing the attributes and performing the necessary multiplications. -! -! If the optional argument {\tt rList} is present, only the attributes listed will -! be multiplied. If the attributes have different names in {\tt yAV}, the optional -! {\tt TrList} argument can be used to provide the translation. -! -! If the optional argument {\tt Vector} is present and true, the vector -! architecture-friendly portions of this routine will be invoked. It -! will also cause the vector parts of {\\ sMat} to be initialized if they -! have not been already. -! -! !INTERFACE: - - subroutine sMatAvMult_DataLocal_(xAV, sMat, yAV, Vector, rList, TrList) -! -! !USES: -! - use m_realkinds, only : FP - use m_stdio, only : stderr - use m_die, only : MP_perr_die, die, warn - - use m_List, only : List_identical => identical - use m_List, only : List_nitem => nitem - use m_List, only : GetIndices => get_indices - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_indexRA => indexRA - use m_AttrVect, only : SharedAttrIndexList - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_lsize => lsize - use m_SparseMatrix, only : SparseMatrix_indexIA => indexIA - use m_SparseMatrix, only : SparseMatrix_indexRA => indexRA - use m_SparseMatrix, only : SparseMatrix_vecinit => vecinit - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: xAV - logical,optional, intent(in) :: Vector - character(len=*),optional, intent(in) :: rList - character(len=*),optional, intent(in) :: TrList - - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - type(AttrVect), intent(inout) :: yAV - -! !REVISION HISTORY: -! 15Jan01 - J.W. Larson - API specification. -! 10Feb01 - J.W. Larson - Prototype code. -! 24Apr01 - J.W. Larson - Modified to accomodate -! changes to the SparseMatrix datatype. -! 25Apr01 - J.W. Larson - Reversed loop order -! for cache-friendliness -! 17May01 - R. Jacob - Zero the output -! attribute vector -! 10Oct01 - J. Larson - Added optional LOGICAL -! input argument InterpInts to make application of the -! multiply to INTEGER attributes optional -! 15Oct01 - J. Larson - Added feature to -! detect when attribute lists are identical, and cross- -! indexing of attributes is not needed. -! 29Nov01 - E.T. Ong - Removed MP_PERR_DIE if -! there are zero elements in sMat. This allows for -! decompositions where a process may own zero points. -! 29Oct03 - R. Jacob - add Vector argument to -! optionally use the vector-friendly version provided by -! Fujitsu -! 21Nov06 - R. Jacob - Allow attributes to be -! to be multiplied to be specified with rList and TrList. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::sMatAvMult_DataLocal_' - -! Matrix element count: - integer :: num_elements - -! Matrix row, column, and weight indices: - integer :: icol, irow, iwgt - -! Overlapping attribute index number - integer :: num_indices - -! Overlapping attribute index storage arrays: - integer, dimension(:), pointer :: xAVindices, yAVindices - -! Temporary variables for multiply do-loop - integer :: row, col - real(FP) :: wgt - -! Error flag and loop indices - integer :: ierr, i, m, n, l,ier - integer :: inxmin,outxmin - integer :: ysize, numav,j - -! Character variable used as a data type flag: - character*7 :: data_flag - -! logical flag - logical :: usevector,TrListIsPresent,rListIsPresent - logical :: contiguous,ycontiguous - - - usevector = .false. - if(present(Vector)) then - if(Vector) usevector = .true. - endif - - rListIsPresent = .false. - if(present(rList)) then - rListIsPresent = .true. - endif - -! TrList is present if it is provided and its length>0 - TrListIsPresent = .false. - if(present(TrList)) then - if(.not.present(rList)) then - call die(myname_,'MCTERROR: TrList provided without rList',2) - endif - if(len_trim(TrList) > 0) then - TrListIsPresent = .true. - endif - endif - - - ! Retrieve the number of elements in sMat: - - num_elements = SparseMatrix_lsize(sMat) - - ! Indexing the sparse matrix sMat: - - irow = SparseMatrix_indexIA(sMat,'lrow') ! local row index - icol = SparseMatrix_indexIA(sMat,'lcol') ! local column index - iwgt = SparseMatrix_indexRA(sMat,'weight') ! weight index - - - ! Multiplication sMat by REAL attributes in xAV: - - if(List_identical(xAV%rList, yAV%rList).and. & - .not.rListIsPresent) then ! no cross-indexing - - ! zero the output AttributeVector - call AttrVect_zero(yAV, zeroInts=.FALSE.) - - num_indices = List_nitem(xAV%rList) - - if(usevector) then - - if(.not.sMat%vecinit) then - call SparseMatrix_vecinit(sMat) - endif - -!DIR$ IVDEP - do m=1,num_indices - do l=1,sMat%tbl_end -!CDIR NOLOOPCHG -!DIR$ IVDEP - do i=sMat%row_s(l),sMat%row_e(l) - col = sMat%tcol(i,l) - wgt = sMat%twgt(i,l) - if (col < 0) cycle - yAV%rAttr(m,i) = yAV%rAttr(m,i) + wgt * xAV%rAttr(m,col) - enddo - enddo - enddo - - else - - do n=1,num_elements - - row = sMat%data%iAttr(irow,n) - col = sMat%data%iAttr(icol,n) - wgt = sMat%data%rAttr(iwgt,n) - - ! loop over attributes being regridded. - -!DIR$ IVDEP - do m=1,num_indices - - yAV%rAttr(m,row) = yAV%rAttr(m,row) + wgt * xAV%rAttr(m,col) - - end do ! m=1,num_indices - - end do ! n=1,num_elements - - endif - -! lists are not identical or only want to do part. - else - - if(rListIsPresent) then - call GetIndices(xAVindices,xAV%rList,trim(rList)) - - if(TrListIsPresent) then - call GetIndices(yAVindices,yAV%rList,trim(TrList)) - - if(size(xAVindices) /= size(yAVindices)) then - call die(myname_,"Arguments rList and TrList do not& - &contain the same number of items") - endif - - else - call GetIndices(yAVindices,yAV%rList,trim(rList)) - endif - - num_indices=size(yAVindices) - - ! nothing to do if num_indices <=0 - if (num_indices <= 0) then - deallocate(xaVindices, yAVindices, stat=ier) - if(ier/=0) call die(myname_,"deallocate(xAVindices...)",ier) - return - endif - - else - - data_flag = 'REAL' - call SharedAttrIndexList(xAV, yAV, data_flag, num_indices, & - xAVindices, yAVindices) - - ! nothing to do if num_indices <=0 - if (num_indices <= 0) then - deallocate(xaVindices, yAVindices, stat=ier) - call warn(myname_,"No matching indicies found, returning.") - if(ier/=0) call die(myname_,"deallocate(xaVinindices...)",ier) - return - endif - endif - -! Check if the indices are contiguous in memory for faster copy - contiguous=.true. - ycontiguous=.true. - do i=2,num_indices - if(xaVindices(i) /= xAVindices(i-1)+1) then - contiguous = .false. - exit - endif - enddo - if(contiguous) then - do i=2,num_indices - if(yAVindices(i) /= yAVindices(i-1)+1) then - contiguous=.false. - ycontiguous=.false. - exit - endif - enddo - endif - - ! zero the right parts of the output AttributeVector - ysize = AttrVect_lsize(yAV) - numav=size(yAVindices) - - if(ycontiguous) then - outxmin=yaVindices(1)-1 -!dir$ collapse - do j=1,ysize - do i=1,numav - yAV%rAttr(outxmin+i,j)=0._FP - enddo - enddo - else - do j=1,ysize - do i=1,numav - yAV%rAttr(yaVindices(i),j)=0._FP - enddo - enddo - endif - - ! loop over matrix elements - - if(contiguous) then - outxmin=yaVindices(1)-1 - inxmin=xaVindices(1)-1 - do n=1,num_elements - - row = sMat%data%iAttr(irow,n) - col = sMat%data%iAttr(icol,n) - wgt = sMat%data%rAttr(iwgt,n) - - ! loop over attributes being regridded. -!DIR$ IVDEP - do m=1,num_indices - yAV%rAttr(outxmin+m,row) = & - yAV%rAttr(outxmin+m,row) + & - wgt * xAV%rAttr(inxmin+m,col) - end do ! m=1,num_indices - end do ! n=1,num_elements - else - do n=1,num_elements - - row = sMat%data%iAttr(irow,n) - col = sMat%data%iAttr(icol,n) - wgt = sMat%data%rAttr(iwgt,n) - - ! loop over attributes being regridded. -!DIR$ IVDEP - do m=1,num_indices - yAV%rAttr(yAVindices(m),row) = & - yAV%rAttr(yAVindices(m),row) + & - wgt * xAV%rAttr(xAVindices(m),col) - end do ! m=1,num_indices - end do ! n=1,num_elements - endif - - - deallocate(xAVindices, yAVindices, stat=ierr) - if(ierr /= 0) call die(myname_,'first deallocate(xAVindices...',ierr) - - endif ! if(List_identical(xAV%rAttr, yAV%rAttr))... - ! And we are finished! - - end subroutine sMatAvMult_DataLocal_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math + Computer Science Division / Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: sMatAvMult_SMPlus_ - Parallel Multiply Using SparseMatrixPlus -! -! !DESCRIPTION: -! This routine performs distributed parallel sparse matrix-vector -! multiplication ${\bf y} = {\bf M} {\bf x}$, where {\bf y} and -! {\bf x} are represented by the {\tt AttrVect} arguments {\tt yAV} and -! {\tt xAV}, respectively. The matrix {\bf M} is stored in the input -! {\tt SparseMatrixPlus} argument {\tt sMatPlus}, which also contains -! all the information needed to coordinate the communications required to -! gather intermediate vectors used in the multiplication process, and to -! reduce partial sums as needed. -! By default, the multiplication occurs for each of the common {\tt REAL} attributes -! shared by {\tt xAV} and {\tt yAV}. This routine is capable of -! cross-indexing the attributes and performing the necessary multiplications. -! -! If the optional argument {\tt rList} is present, only the attributes listed will -! be multiplied. If the attributes have different names in {\tt yAV}, the optional -! {\tt TrList} argument can be used to provide the translation. -! -! If the optional argument {\tt Vector} is present and true, the vector -! architecture-friendly portions of this routine will be invoked. It -! will also cause the vector parts of {\tt sMatPlus} to be initialized if they -! have not been already. -! -! !INTERFACE: - - subroutine sMatAvMult_SMPlus_(xAV, sMatPlus, yAV, Vector, rList, TrList) -! -! !USES: -! - use m_stdio - use m_die - use m_mpif90 - - use m_String, only : String - use m_String, only : String_ToChar => ToChar - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_Rcopy => Rcopy - use m_AttrVect, only : AttrVect_zero => zero - - use m_Rearranger, only : Rearranger - use m_Rearranger, only : Rearrange - - use m_SparseMatrixPlus, only : SparseMatrixPlus - use m_SparseMatrixPlus, only : Xonly - use m_SparseMatrixPlus, only : Yonly - use m_SparseMatrixPlus, only : XandY - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: xAV - logical, optional, intent(in) :: Vector - character(len=*),optional, intent(in) :: rList - character(len=*),optional, intent(in) :: TrList - -! !INPUT/OUTPUT PARAMETERS: - - type(AttrVect), intent(inout) :: yAV - type(SparseMatrixPlus), intent(inout) :: sMatPlus - -! !SEE ALSO: -! The MCT module m_AttrVect for more information about the AttrVect type. -! The MCT module m_SparseMatrixPlus for more information about the -! SparseMatrixPlus type. - -! !REVISION HISTORY: -! 26Sep02 - J.W. Larson - API specification and -! implementation. -! 29Oct03 - R. Jacob - add vector argument to all -! calls to Rearrange and DataLocal_. Add optional input -! argument to change value (assumed false) -! 22Nov06 - R. Jacob - add rList,TrList arguments -! 10Jan08 - T. Craig - zero out intermediate aVs before -! they are used -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::sMatAvMult_SMPlus_' - type(AttrVect) :: xPrimeAV, yPrimeAV - type(AttrVect) :: yAVre - integer :: ierr - logical :: usevector - character(len=5) :: strat - - ! check arguments - if(present(TrList)) then - if(.not.present(rList)) then - call die(myname_,'MCTERROR: TrList provided without rList',2) - endif - endif - - usevector = .FALSE. - if(present(Vector)) then - if(Vector)usevector = .TRUE. - endif - ! Examine the parallelization strategy, and act accordingly - - strat = String_ToChar(sMatPlus%Strategy) - select case( strat ) - case('Xonly') - ! Create intermediate AttrVect for x' - call AttrVect_init(xPrimeAV, xAV, sMatPlus%XPrimeLength) - call AttrVect_zero(xPrimeAV) - ! Rearrange data from x to get x' - call Rearrange(xAV, xPrimeAV, sMatPlus%XToXPrime, & - tag=sMatPlus%Tag, vector=usevector,& - alltoall=.true., handshake=.true. ) - - ! Perform perfectly data-local multiply y = Mx' - if (present(TrList).and.present(rList)) then - call sMatAvMult_DataLocal_(xPrimeAV, sMatPlus%Matrix, yaV, & - Vector=usevector,rList=rList,TrList=TrList) - else if(.not.present(TrList) .and. present(rList)) then - call sMatAvMult_DataLocal_(xPrimeAV, sMatPlus%Matrix, yaV, & - Vector=usevector,rList=rList) - else - call sMatAvMult_DataLocal_(xPrimeAV, sMatPlus%Matrix, yaV, & - Vector=usevector) - endif - - ! Clean up space occupied by x' - call AttrVect_clean(xPrimeAV, ierr) - case('Yonly') - ! Create intermediate AttrVect for y' - if (present(TrList).and.present(rList)) then - call AttrVect_init(yPrimeAV, rList=TrList, lsize=sMatPlus%YPrimeLength) - else if(.not.present(TrList) .and. present(rList)) then - call AttrVect_init(yPrimeAV, rList=rList, lsize=sMatPlus%YPrimeLength) - else - call AttrVect_init(yPrimeAV, yAV, sMatPlus%YPrimeLength) - endif - call AttrVect_zero(yPrimeAV) - - if (present(TrList).or.present(rList)) then - call AttrVect_init(yAVre, yPrimeAV , lsize=AttrVect_lsize(yAV)) - call AttrVect_zero(yAVre) - endif - - ! Perform perfectly data-local multiply y' = Mx - if (present(TrList).and.present(rList)) then - call sMatAvMult_DataLocal_(xAV, sMatPlus%Matrix, yPrimeAV, & - Vector=usevector,rList=rList,TrList=TrList) - else if(.not.present(TrList) .and. present(rList)) then - call sMatAvMult_DataLocal_(xAV, sMatPlus%Matrix, yPrimeAV, & - Vector=usevector,rList=rList) - else - call sMatAvMult_DataLocal_(xAV, sMatPlus%Matrix, yPrimeAV, & - Vector=usevector) - endif - - ! Rearrange/reduce partial sums in y' to get y - if (present(TrList).or.present(rList)) then - call Rearrange(yPrimeAV, yAVre, sMatPlus%YPrimeToY, & - tag=sMatPlus%Tag, sum=.TRUE., Vector=usevector, & - alltoall=.true., handshake=.true. ) - call AttrVect_Rcopy(yAVre,yAV,vector=usevector) - call AttrVect_clean(yAVre, ierr) - else - call Rearrange(yPrimeAV, yAV, sMatPlus%YPrimeToY, & - tag=sMatPlus%Tag, sum=.TRUE., Vector=usevector, & - alltoall=.true., handshake=.true. ) - endif - ! Clean up space occupied by y' - call AttrVect_clean(yPrimeAV, ierr) - - case('XandY') - ! Create intermediate AttrVect for x' - call AttrVect_init(xPrimeAV, xAV, sMatPlus%XPrimeLength) - call AttrVect_zero(xPrimeAV) - - ! Create intermediate AttrVect for y' - if (present(TrList).and.present(rList)) then - call AttrVect_init(yPrimeAV, rList=TrList, lsize=sMatPlus%YPrimeLength) - else if(.not.present(TrList) .and. present(rList)) then - call AttrVect_init(yPrimeAV, rList=rList, lsize=sMatPlus%YPrimeLength) - else - call AttrVect_init(yPrimeAV, yAV, sMatPlus%YPrimeLength) - endif - call AttrVect_zero(yPrimeAV) - - if (present(TrList).or.present(rList)) then - call AttrVect_init(yAVre, yPrimeAV , lsize=AttrVect_lsize(yAV)) - call AttrVect_zero(yAVre) - endif - - ! Rearrange data from x to get x' - call Rearrange(xAV, xPrimeAV, sMatPlus%XToXPrime, & - tag=sMatPlus%Tag, Vector=usevector, & - alltoall=.true., handshake=.true. ) - - ! Perform perfectly data-local multiply y' = Mx' - if (present(TrList).and.present(rList)) then - call sMatAvMult_DataLocal_(xPrimeAV, sMatPlus%Matrix, yPrimeAV, & - Vector=usevector,rList=rList,TrList=TrList) - else if(.not.present(TrList) .and. present(rList)) then - call sMatAvMult_DataLocal_(xPrimeAV, sMatPlus%Matrix, yPrimeAV, & - Vector=usevector,rList=rList) - else - call sMatAvMult_DataLocal_(xPrimeAV, sMatPlus%Matrix, yPrimeAV, & - Vector=usevector) - endif - - ! Rearrange/reduce partial sums in y' to get y - if (present(TrList).or.present(rList)) then - call Rearrange(yPrimeAV, yAVre, sMatPlus%YPrimeToY, & - tag=sMatPlus%Tag, sum=.TRUE., Vector=usevector, & - alltoall=.true., handshake=.true. ) - call AttrVect_Rcopy(yAVre,yAV,vector=usevector) - call AttrVect_clean(yAVre, ierr) - else - call Rearrange(yPrimeAV, yAV, sMatPlus%YPrimeToY, & - tag=sMatPlus%Tag, sum=.TRUE., Vector=usevector, & - alltoall=.true., handshake=.true. ) - endif - - ! Clean up space occupied by x' - call AttrVect_clean(xPrimeAV, ierr) - ! Clean up space occupied by y' - call AttrVect_clean(yPrimeAV, ierr) - case default - write(stderr,'(4a)') myname_, & - ':: FATAL ERROR--parallelization strategy name ',& - String_ToChar(sMatPlus%Strategy),' not supported.' - call die(myname_) - end select - - end subroutine sMatAvMult_SMPlus_ - - end module m_MatAttrVectMul - - - - diff --git a/src/externals/mct/mct/m_Merge.F90 b/src/externals/mct/mct/m_Merge.F90 deleted file mode 100644 index 6700c3bc228..00000000000 --- a/src/externals/mct/mct/m_Merge.F90 +++ /dev/null @@ -1,2912 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_Merge - Merge flux and state data from multiple sources. -! -! !DESCRIPTION: This module supports {\em merging} of state and flux -! data from multiple components with overlapping spatial domains for use -! by another component. For example, let the vectors ${\bf a}$ and -! ${\bf b}$ be data from Components $A$ and $B$ that have been -! interpolated onto the physical grid of another component $C$. We wish -! to combine the data from $A$ and $B$ to get a vector ${\bf c}$, which -! represents the merged data on the grid of component $C$. This merge -! process is an element-by-element masked weighted average: -! $$ c_i = {{{{\prod_{j=1}^J} M_{i}^j} {{\prod_{k=1}^K} F_{i}^k} a_i + -! {{\prod_{p=1}^P} N_{i}^p} {{\prod_{q=1}^Q} G_{i}^q} b_i} \over -! {{{\prod_{j=1}^J} M_{i}^j} {{\prod_{k=1}^K} F_{i}^k} + -! {{\prod_{p=1}^P} N_{i}^p} {{\prod_{q=1}^Q} G_{i}^q}}}, $$ -! Where ${M_{i}^j}$ and ${N_{i}^p}$ are {\em integer masks} (which have -! value either $0$ or $1$), and ${F_{i}^k}$ and ${G_{i}^q}$ are {\em real -! masks} (which are in the closed interval $[0,1]$). -! -! Currently, we assume that the integer and real masks are stored in -! the same {\tt GeneralGrid} datatype. We also assume--and this is of -! critical importance to the user--that the attributes to be merged are -! the same for all the inputs and output. If the user violates this -! assumption, incorrect merges will occur for any attributes that are -! present in only some (that is not all) of the inputs. -! -! This module supports explicitly the merging data from two, three, and -! four components. There is also a routine named {\tt MergeInData} that -! allows the user to construct other merging schemes. -! -! !INTERFACE: - - module m_Merge - -! -! !USES: -! -! No other modules used in the declaration section of this module. - - implicit none - - private ! except - -! !PUBLIC TYPES: - -! None. - -! !PUBLIC MEMBER FUNCTIONS: - - public :: MergeTwo ! Merge Output from two components - ! for use by a third. - public :: MergeThree ! Merge Output from three components - ! for use by a fourth. - public :: MergeFour ! Merge Output from four components - ! for use by a fifth. - public :: MergeInData ! Merge in data from a single component. - - interface MergeTwo ; module procedure & - MergeTwoGGSP_, & - MergeTwoGGDP_ - end interface - interface MergeThree ; module procedure & - MergeThreeGGSP_, & - MergeThreeGGDP_ - end interface - interface MergeFour ; module procedure & - MergeFourGGSP_, & - MergeFourGGDP_ - end interface - interface MergeInData ; module procedure & - MergeInDataGGSP_, & - MergeInDataGGDP_ - end interface - -! !PUBLIC DATA MEMBERS: - -! None. - -! !REVISION HISTORY: -! 19Jun02 - J.W. Larson - Initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_Merge' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MergeTwoGGSP_ - Merge Data from Two Sources -! -! !DESCRIPTION: This routine merges {\tt REAL} attribute data from -! two input {\tt AttrVect} arguments {\tt inAv1} and {\tt inAv2} to -! a third {\tt AttrVect} {\tt outAv}. The attributes to be merged are -! determined entirely by the real attributes of {\tt outAv}. If -! {\tt outAv} shares one or more attributes with either of the inputs -! {\tt inAv1} or {\tt inAv2}, a merge is performed on the individual -! {\em intersections} of attributes between the pairs $({\tt outAv}, -! {\tt inAv1})$ and $({\tt outAv},{\tt inAv1})$. Currently, it is assumed -! that these pairwise intersections are all equal. This assumption is of -! critical importance to the user. If the user violates this -! assumption, incorrect merges of attributes that are present in some -! (but not all) of the inputs will result. -! -! The merge operatrion is a masked -! weighted element-by-element sum, as outlined in the following example. -! Let the vectors ${\bf a}$ and ${\bf b}$ be data from Components $A$ -! and $B$ that have been interpolated onto the physical grid of another -! component $C$. We wish to combine the data from $A$ and $B$ to get -! a vector ${\bf c}$, which represents the merged data on the grid of -! component $C$. The merge relation to obtain the $i$th element of -! {\bf c} is -! $$ c_i = {1 \over {W_i}} \bigg\{ {{\prod_{j=1}^J} \kappa_{i}^j} -! {{\prod_{k=1}^K} \alpha_{i}^k} {a_i} + {{\prod_{l=1}^L} \lambda_{i}^l} -! {{\prod_{m=1}^M} \beta_{i}^m} {b_i} \bigg\} , $$ -! where -! $$ {W_i} = {{\prod_{j=1}^J} \kappa_{i}^j} {{\prod_{k=1}^K} \alpha_{i}^k} + -! {{\prod_{l=1}^L} \lambda_{i}^l} {{\prod_{m=1}^M} \beta_{i}^m}. $$ -! The quantities ${\kappa_{i}^j}$ and ${\lambda_{i}^l}$ are {\em integer -! masks} (which have value either $0$ or $1$), and ${\alpha_{i}^k}$ and -! ${\beta_{i}^m}$ are {\em real masks} (which are in the closed interval -! $[0,1]$). -! -! The integer and real masks are stored as attributes to the same input -! {\tt GeneralGrid} argument {\tt GGrid}. The mask attribute names are -! stored as substrings to the colon-separated strings contained in the -! input {\tt CHARACTER} arguments {\tt iMaskTags1}, {\tt iMaskTags2}, -! {\tt rMaskTags1}, and {\tt rMaskTags2}. The {\tt LOGICAL} input -! argument {\tt CheckMasks} governs how the masks are applied. If -! ${\tt CheckMasks} = {\tt .TRUE.}$, the entries are checked to ensure -! they meet the definitions of real and integer masks. If -! ${\tt CheckMasks} = {\tt .TRUE.}$ then the masks are multiplied -! together on an element-by-element basis with no validation of their -! entries (this option results in slightly higher performance). -! -! This routine returns the sume of the masked weights as a diagnostic. -! This quantity is returned in the output {\tt REAL} array {\tt WeightSum}. -! -! The correspondence between the quantities in the above merge relation -! and the arguments to this routine are summarized in the table. -! \begin{center} -! \begin{tabular}{|l|l|l|}\hline -! {\bf Quantity} & {\bf Stored in} & {\bf Referenced by} \\ -! & {\bf Argument} & {\bf Argument} \\ -! \hline -! \hline -! $ {a_i} $ & {\tt inAv1} & \\ -! \hline -! $ {b_i} $ & {\tt inAv2} & \\ -! \hline -! $ {c_i} $ & {\tt outAv} & \\ -! \hline -! $ {\kappa_i^j}, j=1,\ldots,J $ & {\tt GGrid} & {\tt iMaskTags1}\\ -! & & ($J$ items) \\ -! \hline -! $ {\alpha_i^k}, k=1,\ldots,K $ & {\tt GGrid} & {\tt rMaskTags1}\\ -! & & ($K$ items) \\ -! \hline -! $ {\lambda_i^l}, l=1,\ldots,L $ & {\tt GGrid} & {\tt iMaskTags2}\\ -! & & ($L$ items) \\ -! \hline -! $ {\beta_i^m}, m=1,\ldots,M $ & {\tt GGrid} & {\tt rMaskTags2}\\ -! & & ($M$ items) \\ -! \hline -! $ {W_i} $ & {\tt WeightSum} & \\ -! \hline -! \end{tabular} -! \end{center} -! -! !INTERFACE: - - subroutine MergeTwoGGSP_(inAv1, iMaskTags1, rMaskTags1, & - inAv2, iMaskTags2, rMaskTags2, & - GGrid, CheckMasks, outAv, WeightSum) -! -! !USES: -! - use m_stdio - use m_die - - use m_realkinds, only : SP, FP - - use m_List, only : List - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAv1 - character(len=*), optional, intent(IN) :: iMaskTags1 - character(len=*), optional, intent(IN) :: rMaskTags1 - type(AttrVect), intent(IN) :: inAv2 - character(len=*), optional, intent(IN) :: iMaskTags2 - character(len=*), optional, intent(IN) :: rMaskTags2 - type(GeneralGrid), intent(IN) :: GGrid - logical, intent(IN) :: CheckMasks - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: outAv - real(SP), dimension(:), pointer :: WeightSum - -! !REVISION HISTORY: -! 19Jun02 - Jay Larson - Interface spec. -! 3Jul02 - Jay Larson - Implementation. -! 10Jul02 - J. Larson - Improved argument -! checking. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::MergeTwoGGSP_' - - integer :: i, j - real(FP) :: invWeightSum - - ! Begin argument sanity checks... - - ! Have the input arguments been allocated? - - if(.not.(List_allocated(inAv1%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv1 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv2%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv2 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(outaV%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUTPUT argument outAv has no real attributes!' - call die(myname_) - endif - - if(present(iMaskTags1) .or. present(iMaskTags2)) then - if(.not.(List_allocated(GGrid%data%iList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Integer masking requested, but input argument GGrid ', & - 'has no integer attributes!' - call die(myname_) - endif - endif - - if(present(rMaskTags1) .or. present(rMaskTags2)) then - if(.not.(List_allocated(GGrid%data%rList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Real masking requested, but input argument GGrid ', & - 'has no real attributes!' - call die(myname_) - endif - endif - - if(.not.(associated(WeightSum))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUPUT argument WeightSum has not been allocated!' - call die(myname_) - endif - - ! Do the vector lengths match? - - if(AttrVect_lsize(inAv1) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv1 and outAv must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv2 and outAv must match.', & - 'AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and GGrid must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= size(WeightSum)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and WeightSum must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'size(WeightSum) = ',size(WeightSum) - call die(myname_) - endif - - ! ...end argument sanity checks. - - ! Initialize the elements of WeightSum(:) to zero: - - do i=1,size(WeightSum) - WeightSum(i) = 0._FP - end do - - ! Process the incoming data one input AttrVect and mask tag - ! combination at a time. - - ! First input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags1 and - ! rMaskTags1. - - if(present(iMaskTags1)) then - - if(present(rMaskTags1)) then ! both real and integer masks - call MergeInDataGGSP_(inAv1, iMaskTags1, rMaskTags1, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv1, iMaskTags=iMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags1)) then ! only real masks - call MergeInDataGGSP_(inAv1, rMaskTags=rMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags1))... - - ! Second input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags2 and - ! rMaskTags2. - - if(present(iMaskTags2)) then - - if(present(rMaskTags2)) then ! both real and integer masks - call MergeInDataGGSP_(inAv2, iMaskTags2, rMaskTags2, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv2, iMaskTags=iMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags2)) then ! only real masks - call MergeInDataGGSP_(inAv2, rMaskTags=rMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags2))... - - ! Now we must renormalize the entries in outAv by dividing - ! element-by-element by the sums of the merge weights, which - ! were accumulated in WeightSum(:) - - do i=1,AttrVect_lsize(outAv) - - if(WeightSum(i) /= 0._FP) then - invWeightSum = 1._FP / WeightSum(i) - else - write(stderr,'(2a,i8,a)') myname_,':: FATAL--WeightSum(', & - i,') is zero!' - call die(myname_) - endif - - do j=1,AttrVect_nRAttr(outAv) - outAv%rAttr(j,i) = invWeightSum * outAv%rAttr(j,i) - end do - - end do - - ! The merge is now complete. - - end subroutine MergeTwoGGSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! -! !IROUTINE: MergeTwoGGDP_ - merge data from two components. -! -! !DESCRIPTION: -! Double precision version of MergeTwoGGSP_ -! -! !INTERFACE: - - subroutine MergeTwoGGDP_(inAv1, iMaskTags1, rMaskTags1, & - inAv2, iMaskTags2, rMaskTags2, & - GGrid, CheckMasks, outAv, WeightSum) -! -! !USES: -! - use m_stdio - use m_die - - use m_realkinds, only : DP, FP - - use m_List, only : List - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAv1 - character(len=*), optional, intent(IN) :: iMaskTags1 - character(len=*), optional, intent(IN) :: rMaskTags1 - type(AttrVect), intent(IN) :: inAv2 - character(len=*), optional, intent(IN) :: iMaskTags2 - character(len=*), optional, intent(IN) :: rMaskTags2 - type(GeneralGrid), intent(IN) :: GGrid - logical, intent(IN) :: CheckMasks - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: outAv - real(DP), dimension(:), pointer :: WeightSum - -! !REVISION HISTORY: -! 19Jun02 - Jay Larson - Interface spec. -! 3Jul02 - Jay Larson - Implementation. -! 10Jul02 - J. Larson - Improved argument -! checking. -!_______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::MergeTwoGGDP_' - - integer :: i, j - real(FP) :: invWeightSum - - ! Begin argument sanity checks... - - ! Have the input arguments been allocated? - - if(.not.(List_allocated(inAv1%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv1 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv2%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv2 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(outaV%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUTPUT argument outAv has no real attributes!' - call die(myname_) - endif - - if(present(iMaskTags1) .or. present(iMaskTags2)) then - if(.not.(List_allocated(GGrid%data%iList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Integer masking requested, but input argument GGrid ', & - 'has no integer attributes!' - call die(myname_) - endif - endif - - if(present(rMaskTags1) .or. present(rMaskTags2)) then - if(.not.(List_allocated(GGrid%data%rList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Real masking requested, but input argument GGrid ', & - 'has no real attributes!' - call die(myname_) - endif - endif - - if(.not.(associated(WeightSum))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUPUT argument WeightSum has not been allocated!' - call die(myname_) - endif - - ! Do the vector lengths match? - - if(AttrVect_lsize(inAv1) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv1 and outAv must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv2 and outAv must match.', & - 'AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and GGrid must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= size(WeightSum)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and WeightSum must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'size(WeightSum) = ',size(WeightSum) - call die(myname_) - endif - - ! ...end argument sanity checks. - - ! Initialize the elements of WeightSum(:) to zero: - - do i=1,size(WeightSum) - WeightSum(i) = 0._FP - end do - - ! Process the incoming data one input AttrVect and mask tag - ! combination at a time. - - ! First input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags1 and - ! rMaskTags1. - - if(present(iMaskTags1)) then - - if(present(rMaskTags1)) then ! both real and integer masks - call MergeInDataGGDP_(inAv1, iMaskTags1, rMaskTags1, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv1, iMaskTags=iMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags1)) then ! only real masks - call MergeInDataGGDP_(inAv1, rMaskTags=rMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags1))... - - ! Second input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags2 and - ! rMaskTags2. - - if(present(iMaskTags2)) then - - if(present(rMaskTags2)) then ! both real and integer masks - call MergeInDataGGDP_(inAv2, iMaskTags2, rMaskTags2, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv2, iMaskTags=iMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags2)) then ! only real masks - call MergeInDataGGDP_(inAv2, rMaskTags=rMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags2))... - - ! Now we must renormalize the entries in outAv by dividing - ! element-by-element by the sums of the merge weights, which - ! were accumulated in WeightSum(:) - - do i=1,AttrVect_lsize(outAv) - - if(WeightSum(i) /= 0._FP) then - invWeightSum = 1._FP / WeightSum(i) - else - write(stderr,'(2a,i8,a)') myname_,':: FATAL--WeightSum(', & - i,') is zero!' - call die(myname_) - endif - - do j=1,AttrVect_nRAttr(outAv) - outAv%rAttr(j,i) = invWeightSum * outAv%rAttr(j,i) - end do - - end do - - ! The merge is now complete. - - end subroutine MergeTwoGGDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MergeThreeGGSP_ - Merge Data from Three Sources -! -! !DESCRIPTION: This routine merges {\tt REAL} attribute data from -! three input {\tt AttrVect} arguments {\tt inAv1} , {\tt inAv2}, and -! {\tt inAv3} to a fourth {\tt AttrVect} {\tt outAv}. The attributes to -! be merged are determined entirely by the real attributes of {\tt outAv}. -! If {\tt outAv} shares one or more attributes with any of the inputs -! {\tt inAv1}, {\tt inAv2}, or {\tt inAv3}, a merge is performed on the -! individual {\em intersections} of attributes between the pairs -! $({\tt outAv},{\tt inAv1})$, $({\tt outAv},{\tt inAv2})$, -! and $({\tt outAv},{\tt inAv3})$. Currently, it is assumed that these -! pairwise intersections are all equal. This assumption is of -! critical importance to the user. If the user violates this -! assumption, incorrect merges of any attributes present only in some -! (but not all) inputs will result. -! -! The merge operatrion is a masked -! weighted element-by-element sum, as outlined in the following example. -! Let the vectors ${\bf a}$,${\bf b}$, and ${\bf c}$ be data from -! Components $A$, $B$, and $C$ that have been interpolated onto the -! physical grid of another component $D$. We wish to combine the data -! from $A$, $B$ and $C$ to get a vector ${\bf d}$, which represents the -! merged data on the grid of component $D$. The merge relation to obtain -! the $i$th element of ${\bf d}$ is -! $$ d_i = {1 \over {W_i}} \bigg\{ {{\prod_{j=1}^J} \kappa_{i}^j} -! {{\prod_{k=1}^K} \alpha_{i}^k} {a_i} + {{\prod_{l=1}^L} \lambda_{i}^l} -! {{\prod_{m=1}^M} \beta_{i}^m} {b_i} + {{\prod_{p=1}^P} \mu_{i}^p} -! {{\prod_{q=1}^Q} \gamma_{i}^q} {c_i} \bigg\} , $$ -! where -! $$ {W_i} = {{\prod_{j=1}^J} \kappa_{i}^j} {{\prod_{k=1}^K} \alpha_{i}^k} + -! {{\prod_{l=1}^L} \lambda_{i}^l} {{\prod_{m=1}^M} \beta_{i}^m} + -! {{\prod_{p=1}^P} \mu_{i}^p} {{\prod_{q=1}^Q} \gamma_{i}^q}. $$ -! The quantities ${\kappa_{i}^j}$, ${\lambda_{i}^p}$, and ${\mu_{i}^p}$ are -! {\em integer masks} (which have value either $0$ or $1$), and -! ${\alpha_{i}^k}$, ${\beta_{i}^m}$, and ${\gamma_{i}^q}$ are {\em real -! masks} (which are in the closed interval $[0,1]$). -! -! The integer and real masks are stored as attributes to the same input -! {\tt GeneralGrid} argument {\tt GGrid}. The mask attribute names are -! stored as substrings to the colon-separated strings contained in the -! input {\tt CHARACTER} arguments {\tt iMaskTags1}, {\tt iMaskTags2}, -! {\tt iMaskTags3}, {\tt rMaskTags1}, {\tt rMaskTags2}, and -! {\tt rMaskTags3}. The {\tt LOGICAL} input argument {\tt CheckMasks} -! governs how the masks are applied. If ${\tt CheckMasks} = {\tt .TRUE.}$, -! the entries are checked to ensure they meet the definitions of real -! and integer masks. If ${\tt CheckMasks} = {\tt .FALSE.}$ then the masks -! are multiplied together on an element-by-element basis with no validation -! of their entries (this option results in slightly higher performance). -! -! This routine returns the sum of the masked weights as a diagnostic. -! This quantity is returned in the output {\tt REAL} array {\tt WeightSum}. -! -! The correspondence between the quantities in the above merge relation -! and the arguments to this routine are summarized in the table. -! \begin{center} -! \begin{tabular}{|l|l|l|}\hline -! {\bf Quantity} & {\bf Stored in} & {\bf Referenced by} \\ -! & {\bf Argument} & {\bf Argument} \\ -! \hline -! \hline -! $ {a_i} $ & {\tt inAv1} & \\ -! \hline -! $ {b_i} $ & {\tt inAv2} & \\ -! \hline -! $ {c_i} $ & {\tt inAv3} & \\ -! \hline -! $ {d_i} $ & {\tt outAv} & \\ -! \hline -! $ {\kappa_i^j}, j=1,\ldots,J $ & {\tt GGrid} & {\tt iMaskTags1}\\ -! & & ($J$ items) \\ -! \hline -! $ {\alpha_i^k}, k=1,\ldots,K $ & {\tt GGrid} & {\tt rMaskTags1}\\ -! & & ($K$ items) \\ -! \hline -! $ {\lambda_i^l}, l=1,\ldots,L $ & {\tt GGrid} & {\tt iMaskTags2}\\ -! & & ($L$ items) \\ -! \hline -! $ {\beta_i^m}, m=1,\ldots,M $ & {\tt GGrid} & {\tt rMaskTags2}\\ -! & & ($M$ items) \\ -! \hline -! $ {\mu_i^p}, p=1,\ldots,P $ & {\tt GGrid} & {\tt iMaskTags3}\\ -! & & ($L$ items) \\ -! \hline -! $ {\gamma_i^q}, q=1,\ldots,Q $ & {\tt GGrid} & {\tt rMaskTags3}\\ -! & & ($M$ items) \\ -! \hline -! $ {W_i} $ & {\tt WeightSum} & \\ -! \hline -! \end{tabular} -! \end{center} -! -! !INTERFACE: - - subroutine MergeThreeGGSP_(inAv1, iMaskTags1, rMaskTags1, & - inAv2, iMaskTags2, rMaskTags2, & - inAv3, iMaskTags3, rMaskTags3, & - GGrid, CheckMasks, outAv, WeightSum) -! -! !USES: -! - use m_stdio - use m_die - - use m_realkinds, only : SP, FP - - use m_List, only : List - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAv1 - character(len=*), optional, intent(IN) :: iMaskTags1 - character(len=*), optional, intent(IN) :: rMaskTags1 - type(AttrVect), intent(IN) :: inAv2 - character(len=*), optional, intent(IN) :: iMaskTags2 - character(len=*), optional, intent(IN) :: rMaskTags2 - type(AttrVect), intent(IN) :: inAv3 - character(len=*), optional, intent(IN) :: iMaskTags3 - character(len=*), optional, intent(IN) :: rMaskTags3 - type(GeneralGrid), intent(IN) :: GGrid - logical, intent(IN) :: CheckMasks - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: outAv - real(SP), dimension(:), pointer :: WeightSum - -! !REVISION HISTORY: -! 19Jun02 - Jay Larson - Interface spec. -! 3Jul02 - Jay Larson - Implementation. -! 10Jul02 - J. Larson - Improved argument -! checking. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::MergeThreeGGSP_' - - integer :: i, j - real(FP) :: invWeightSum - - ! Begin argument sanity checks... - - ! Have the input arguments been allocated? - - if(.not.(List_allocated(inAv1%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv1 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv2%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv2 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv3%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv3 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(outaV%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUTPUT argument outAv has no real attributes!' - call die(myname_) - endif - - if(present(iMaskTags1) .or. present(iMaskTags2) .or. present(iMaskTags3)) then - if(.not.(List_allocated(GGrid%data%iList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Integer masking requested, but input argument GGrid ', & - 'has no integer attributes!' - call die(myname_) - endif - endif - - if(present(rMaskTags1) .or. present(rMaskTags2) .or. present(rMaskTags3)) then - if(.not.(List_allocated(GGrid%data%rList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Real masking requested, but input argument GGrid ', & - 'has no real attributes!' - call die(myname_) - endif - endif - - if(.not.(associated(WeightSum))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUPUT argument WeightSum has not been allocated!' - call die(myname_) - endif - - ! Do the vector lengths match? - - if(AttrVect_lsize(inAv1) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv1 and outAv must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv2 and outAv must match.', & - 'AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv3) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv3 and outAv must match.', & - 'AttrVect_lsize(inAv3) = ',AttrVect_lsize(inAv3), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and GGrid must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= size(WeightSum)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and WeightSum must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'size(WeightSum) = ',size(WeightSum) - call die(myname_) - endif - - ! ...end argument sanity checks. - - ! Initialize the elements of WeightSum(:) to zero: - - do i=1,size(WeightSum) - WeightSum(i) = 0._FP - end do - - ! Process the incoming data one input AttrVect and mask tag - ! combination at a time. - - ! First input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags1 and - ! rMaskTags1. - - if(present(iMaskTags1)) then - - if(present(rMaskTags1)) then ! both real and integer masks - call MergeInDataGGSP_(inAv1, iMaskTags1, rMaskTags1, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv1, iMaskTags=iMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags1)) then ! only real masks - call MergeInDataGGSP_(inAv1, rMaskTags=rMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags1))... - - ! Second input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags2 and - ! rMaskTags2. - - if(present(iMaskTags2)) then - - if(present(rMaskTags2)) then ! both real and integer masks - call MergeInDataGGSP_(inAv2, iMaskTags2, rMaskTags2, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv2, iMaskTags=iMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags2)) then ! only real masks - call MergeInDataGGSP_(inAv2, rMaskTags=rMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags2))... - - ! Third input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags3 and - ! rMaskTags3. - - if(present(iMaskTags3)) then - - if(present(rMaskTags3)) then ! both real and integer masks - call MergeInDataGGSP_(inAv3, iMaskTags3, rMaskTags3, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv3, iMaskTags=iMaskTags3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags3)) then ! only real masks - call MergeInDataGGSP_(inAv3, rMaskTags=rMaskTags3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags3))... - - ! Now we must renormalize the entries in outAv by dividing - ! element-by-element by the sums of the merge weights, which - ! were accumulated in WeightSum(:) - - do i=1,AttrVect_lsize(outAv) - - if(WeightSum(i) /= 0._FP) then - invWeightSum = 1._FP / WeightSum(i) - else - write(stderr,'(2a,i8,a)') myname_,':: FATAL--WeightSum(', & - i,') is zero!' - call die(myname_) - endif - - do j=1,AttrVect_nRAttr(outAv) - outAv%rAttr(j,i) = invWeightSum * outAv%rAttr(j,i) - end do - - end do - - ! The merge is now complete. - - end subroutine MergeThreeGGSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! -! !IROUTINE: MergeThreeGGDP_ - merge data from three components. -! -! !DESCRIPTION: -! Double precision version of MergeThreeGGSP_ -! -! !INTERFACE: - - subroutine MergeThreeGGDP_(inAv1, iMaskTags1, rMaskTags1, & - inAv2, iMaskTags2, rMaskTags2, & - inAv3, iMaskTags3, rMaskTags3, & - GGrid, CheckMasks, outAv, WeightSum) -! -! !USES: -! - use m_stdio - use m_die - - use m_realkinds, only : DP, FP - - use m_List, only : List - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAv1 - character(len=*), optional, intent(IN) :: iMaskTags1 - character(len=*), optional, intent(IN) :: rMaskTags1 - type(AttrVect), intent(IN) :: inAv2 - character(len=*), optional, intent(IN) :: iMaskTags2 - character(len=*), optional, intent(IN) :: rMaskTags2 - type(AttrVect), intent(IN) :: inAv3 - character(len=*), optional, intent(IN) :: iMaskTags3 - character(len=*), optional, intent(IN) :: rMaskTags3 - type(GeneralGrid), intent(IN) :: GGrid - logical, intent(IN) :: CheckMasks - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: outAv - real(DP), dimension(:), pointer :: WeightSum - -! !REVISION HISTORY: -! 19Jun02 - Jay Larson - Interface spec. -! 3Jul02 - Jay Larson - Implementation. -! 10Jul02 - J. Larson - Improved argument -! checking. -!_______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::MergeThreeGGDP_' - - integer :: i, j - real(FP) :: invWeightSum - - ! Begin argument sanity checks... - - ! Have the input arguments been allocated? - - if(.not.(List_allocated(inAv1%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv1 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv2%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv2 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv3%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv3 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(outaV%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUTPUT argument outAv has no real attributes!' - call die(myname_) - endif - - if(present(iMaskTags1) .or. present(iMaskTags2) .or. present(iMaskTags3)) then - if(.not.(List_allocated(GGrid%data%iList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Integer masking requested, but input argument GGrid ', & - 'has no integer attributes!' - call die(myname_) - endif - endif - - if(present(rMaskTags1) .or. present(rMaskTags2) .or. present(rMaskTags3)) then - if(.not.(List_allocated(GGrid%data%rList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Real masking requested, but input argument GGrid ', & - 'has no real attributes!' - call die(myname_) - endif - endif - - if(.not.(associated(WeightSum))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUPUT argument WeightSum has not been allocated!' - call die(myname_) - endif - - ! Do the vector lengths match? - - if(AttrVect_lsize(inAv1) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv1 and outAv must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv2 and outAv must match.', & - 'AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv3) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv3 and outAv must match.', & - 'AttrVect_lsize(inAv3) = ',AttrVect_lsize(inAv3), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and GGrid must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= size(WeightSum)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and WeightSum must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'size(WeightSum) = ',size(WeightSum) - call die(myname_) - endif - - ! ...end argument sanity checks. - - ! Initialize the elements of WeightSum(:) to zero: - - do i=1,size(WeightSum) - WeightSum(i) = 0._FP - end do - - ! Process the incoming data one input AttrVect and mask tag - ! combination at a time. - - ! First input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags1 and - ! rMaskTags1. - - if(present(iMaskTags1)) then - - if(present(rMaskTags1)) then ! both real and integer masks - call MergeInDataGGDP_(inAv1, iMaskTags1, rMaskTags1, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv1, iMaskTags=iMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags1)) then ! only real masks - call MergeInDataGGDP_(inAv1, rMaskTags=rMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags1))... - - ! Second input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags2 and - ! rMaskTags2. - - if(present(iMaskTags2)) then - - if(present(rMaskTags2)) then ! both real and integer masks - call MergeInDataGGDP_(inAv2, iMaskTags2, rMaskTags2, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv2, iMaskTags=iMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags2)) then ! only real masks - call MergeInDataGGDP_(inAv2, rMaskTags=rMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags2))... - - ! Third input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags3 and - ! rMaskTags3. - - if(present(iMaskTags3)) then - - if(present(rMaskTags3)) then ! both real and integer masks - call MergeInDataGGDP_(inAv3, iMaskTags3, rMaskTags3, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv3, iMaskTags=iMaskTags3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags3)) then ! only real masks - call MergeInDataGGDP_(inAv3, rMaskTags=rMaskTags3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags3))... - - ! Now we must renormalize the entries in outAv by dividing - ! element-by-element by the sums of the merge weights, which - ! were accumulated in WeightSum(:) - - do i=1,AttrVect_lsize(outAv) - - if(WeightSum(i) /= 0._FP) then - invWeightSum = 1._FP / WeightSum(i) - else - write(stderr,'(2a,i8,a)') myname_,':: FATAL--WeightSum(', & - i,') is zero!' - call die(myname_) - endif - - do j=1,AttrVect_nRAttr(outAv) - outAv%rAttr(j,i) = invWeightSum * outAv%rAttr(j,i) - end do - - end do - - ! The merge is now complete. - - end subroutine MergeThreeGGDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MergeFourGGSP_ - Merge Data from Four Sources -! -! !DESCRIPTION: This routine merges {\tt REAL} attribute data from -! four input {\tt AttrVect} arguments {\tt inAv1} , {\tt inAv2}, -! {\tt inAv3}, and {\tt inAv4} to a fifth {\tt AttrVect} {\tt outAv}. The -! attributes to be merged are determined entirely by the real attributes -! of {\tt outAv}. If {\tt outAv} shares one or more attributes with any of -! the inputs {\tt inAv1}, {\tt inAv2}, {\tt inAv3}, or {\tt inAv4}, a merge -! is performed on the individual {\em intersections} of attributes between -! the pairs $({\tt outAv},{\tt inAv1})$, $({\tt outAv},{\tt inAv2})$, -! $({\tt outAv},{\tt inAv3})$, and $({\tt outAv},{\tt inAv3})$. Currently, -! it is assumed that these pairwise intersections are all equal. This -! assumption is of critical importance to the user. If the user violates -! this assumption, incorrect merges of any attributes present only in some -! (but not all) the inputs will result. -! -! The merge operatrion is a masked -! weighted element-by-element sum, as outlined in the following example. -! Let the vectors ${\bf a}$,${\bf b}$, ${\bf c}$ and ${\bf d}$ be data from -! Components $A$, $B$, $C$, and $D$ that have been interpolated onto the -! physical grid of another component $E$. We wish to combine the data -! from $A$, $B$, $C$, and $D$ to get a vector ${\bf e}$, which represents the -! merged data on the grid of component $E$. The merge relation to obtain -! the $i$th element of {\bf e} is -! $$ e_i = {1 \over {W_i}} \bigg\{ {{\prod_{j=1}^J} \kappa_{i}^j} -! {{\prod_{k=1}^K} \alpha_{i}^k} {a_i} + {{\prod_{l=1}^L} \lambda_{i}^l} -! {{\prod_{m=1}^M} \beta_{i}^m} {b_i} + {{\prod_{p=1}^P} \mu_{i}^p} -! {{\prod_{q=1}^Q} \gamma_{i}^q} {c_i} + -! {{\prod_{r=1}^R} \nu_{i}^r} {{\prod_{s=1}^S} \delta_{i}^s} {d_i} \bigg\} , $$ -! where -! $$ {W_i} = {{\prod_{j=1}^J} \kappa_{i}^j} {{\prod_{k=1}^K} \alpha_{i}^k} + -! {{\prod_{l=1}^L} \lambda_{i}^l} {{\prod_{m=1}^M} \beta_{i}^m} + -! {{\prod_{p=1}^P} \mu_{i}^p} {{\prod_{q=1}^Q} \gamma_{i}^q} + -! {{\prod_{r=1}^R} \nu_{i}^r} {{\prod_{s=1}^S} \delta_{i}^s}. $$ -! The quantities ${\kappa_{i}^j}$, ${\lambda_{i}^p}$, ${\mu_{i}^p}$, and -! ${\nu_{i}^r}$ are {\em integer masks} (which have value either $0$ or $1$), -! and ${\alpha_{i}^k}$, ${\beta_{i}^m}$, ${\gamma_{i}^q}$, and ${\delta_{i}^s}$ -! are {\em real masks} (which are in the closed interval $[0,1]$). -! -! The integer and real masks are stored as attributes to the same input -! {\tt GeneralGrid} argument {\tt GGrid}. The mask attribute names are -! stored as substrings to the colon-separated strings contained in the -! input {\tt CHARACTER} arguments {\tt iMaskTags1}, {\tt iMaskTags2}, -! {\tt iMaskTags3}, {\tt iMaskTags4}, {\tt rMaskTags1}, and {\tt rMaskTags2}, -! {\tt rMaskTags3}, and {\tt rMaskTags4}, . The {\tt LOGICAL} input -! argument {\tt CheckMasks} governs how the masks are applied. If -! ${\tt CheckMasks} = {\tt .TRUE.}$, the entries are checked to ensure -! they meet the definitions of real and integer masks. If ${\tt CheckMasks} -! = {\tt .FALSE.}$ then the masks are multiplied together on an -! element-by-element basis with no validation of their entries (this option -! results in slightly higher performance). -! -! This routine returns the sume of the masked weights as a diagnostic. -! This quantity is returned in the output {\tt REAL} array {\tt WeightSum}. -! -! The correspondence between the quantities in the above merge relation -! and the arguments to this routine are summarized in the table. -! \begin{center} -! \begin{tabular}{|l|l|l|}\hline -! {\bf Quantity} & {\bf Stored in} & {\bf Referenced by} \\ -! & {\bf Argument} & {\bf Argument} \\ -! \hline -! \hline -! $ {a_i} $ & {\tt inAv1} & \\ -! \hline -! $ {b_i} $ & {\tt inAv2} & \\ -! \hline -! $ {c_i} $ & {\tt inAv3} & \\ -! \hline -! $ {d_i} $ & {\tt inAv4} & \\ -! \hline -! $ {e_i} $ & {\tt outAv} & \\ -! \hline -! $ {\kappa_i^j}, j=1,\ldots,J $ & {\tt GGrid} & {\tt iMaskTags1}\\ -! & & ($J$ items) \\ -! \hline -! $ {\alpha_i^k}, k=1,\ldots,K $ & {\tt GGrid} & {\tt rMaskTags1}\\ -! & & ($K$ items) \\ -! \hline -! $ {\lambda_i^l}, l=1,\ldots,L $ & {\tt GGrid} & {\tt iMaskTags2}\\ -! & & ($L$ items) \\ -! \hline -! $ {\beta_i^m}, m=1,\ldots,M $ & {\tt GGrid} & {\tt rMaskTags2}\\ -! & & ($M$ items) \\ -! \hline -! $ {\mu_i^p}, p=1,\ldots,P $ & {\tt GGrid} & {\tt iMaskTags3}\\ -! & & ($L$ items) \\ -! \hline -! $ {\gamma_i^q}, q=1,\ldots,Q $ & {\tt GGrid} & {\tt rMaskTags3}\\ -! & & ($M$ items) \\ -! \hline -! $ {\nu_i^r}, r=1,\ldots,R $ & {\tt GGrid} & {\tt iMaskTags4}\\ -! & & ($L$ items) \\ -! \hline -! $ {\delta_i^s}, s=1,\ldots,S $ & {\tt GGrid} & {\tt rMaskTags4}\\ -! & & ($M$ items) \\ -! \hline -! $ {W_i} $ & {\tt WeightSum} & \\ -! \hline -! \end{tabular} -! \end{center} -! -! !INTERFACE: - - subroutine MergeFourGGSP_(inAv1, iMaskTags1, rMaskTags1, & - inAv2, iMaskTags2, rMaskTags2, & - inAv3, iMaskTags3, rMaskTags3, & - inAv4, iMaskTags4, rMaskTags4, & - GGrid, CheckMasks, outAv, WeightSum) -! -! !USES: -! - use m_stdio - use m_die - - use m_realkinds, only : SP, FP - - use m_List, only : List - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAv1 - character(len=*), optional, intent(IN) :: iMaskTags1 - character(len=*), optional, intent(IN) :: rMaskTags1 - type(AttrVect), intent(IN) :: inAv2 - character(len=*), optional, intent(IN) :: iMaskTags2 - character(len=*), optional, intent(IN) :: rMaskTags2 - type(AttrVect), intent(IN) :: inAv3 - character(len=*), optional, intent(IN) :: iMaskTags3 - character(len=*), optional, intent(IN) :: rMaskTags3 - type(AttrVect), intent(IN) :: inAv4 - character(len=*), optional, intent(IN) :: iMaskTags4 - character(len=*), optional, intent(IN) :: rMaskTags4 - type(GeneralGrid), intent(IN) :: GGrid - logical, intent(IN) :: CheckMasks - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: outAv - real(SP), dimension(:), pointer :: WeightSum - -! !REVISION HISTORY: -! 19Jun02 - Jay Larson - Interface spec. -! 3Jul02 - Jay Larson - Implementation. -! 10Jul02 - J. Larson - Improved argument -! checking. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::MergeFourGGSP_' - - integer :: i, j - real(FP) :: invWeightSum - - ! Begin argument sanity checks... - - ! Have the input arguments been allocated? - - if(.not.(List_allocated(inAv1%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv1 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv2%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv2 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv3%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv3 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv4%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv4 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(outaV%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUTPUT argument outAv has no real attributes!' - call die(myname_) - endif - - if(present(iMaskTags1) .or. present(iMaskTags2) .or. & - present(iMaskTags3) .or. present(iMaskTags4)) then - if(.not.(List_allocated(GGrid%data%iList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Integer masking requested, but input argument GGrid ', & - 'has no integer attributes!' - call die(myname_) - endif - endif - - if(present(rMaskTags1) .or. present(rMaskTags2) .or. & - present(rMaskTags3) .or. present(rMaskTags4)) then - if(.not.(List_allocated(GGrid%data%rList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Real masking requested, but input argument GGrid ', & - 'has no real attributes!' - call die(myname_) - endif - endif - - if(.not.(associated(WeightSum))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUPUT argument WeightSum has not been allocated!' - call die(myname_) - endif - - ! Do the vector lengths match? - - if(AttrVect_lsize(inAv1) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv1 and outAv must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv2 and outAv must match.', & - 'AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv3) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv3 and outAv must match.', & - 'AttrVect_lsize(inAv3) = ',AttrVect_lsize(inAv3), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv4) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv4 and outAv must match.', & - 'AttrVect_lsize(inAv4) = ',AttrVect_lsize(inAv4), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and GGrid must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= size(WeightSum)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and WeightSum must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'size(WeightSum) = ',size(WeightSum) - call die(myname_) - endif - - ! ...end argument sanity checks. - - ! Initialize the elements of WeightSum(:) to zero: - - do i=1,size(WeightSum) - WeightSum(i) = 0._FP - end do - - ! Process the incoming data one input AttrVect and mask tag - ! combination at a time. - - ! First input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags1 and - ! rMaskTags1. - - if(present(iMaskTags1)) then - - if(present(rMaskTags1)) then ! both real and integer masks - call MergeInDataGGSP_(inAv1, iMaskTags1, rMaskTags1, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv1, iMaskTags=iMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags1)) then ! only real masks - call MergeInDataGGSP_(inAv1, rMaskTags=rMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags1))... - - ! Second input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags2 and - ! rMaskTags2. - - if(present(iMaskTags2)) then - - if(present(rMaskTags2)) then ! both real and integer masks - call MergeInDataGGSP_(inAv2, iMaskTags2, rMaskTags2, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv2, iMaskTags=iMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags2)) then ! only real masks - call MergeInDataGGSP_(inAv2, rMaskTags=rMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags2))... - - ! Third input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags3 and - ! rMaskTags3. - - if(present(iMaskTags3)) then - - if(present(rMaskTags3)) then ! both real and integer masks - call MergeInDataGGSP_(inAv3, iMaskTags3, rMaskTags3, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv3, iMaskTags=iMaskTags3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags3)) then ! only real masks - call MergeInDataGGSP_(inAv3, rMaskTags=rMaskTags3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags3))... - - ! Fourth input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags4 and - ! rMaskTags4. - - if(present(iMaskTags4)) then - - if(present(rMaskTags4)) then ! both real and integer masks - call MergeInDataGGSP_(inAv4, iMaskTags4, rMaskTags4, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGSP_(inAv4, iMaskTags=iMaskTags4, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags4)) then ! only real masks - call MergeInDataGGSP_(inAv4, rMaskTags=rMaskTags4, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGSP_(inAv4, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags4))... - - ! Now we must renormalize the entries in outAv by dividing - ! element-by-element by the sums of the merge weights, which - ! were accumulated in WeightSum(:) - - do i=1,AttrVect_lsize(outAv) - - if(WeightSum(i) /= 0._FP) then - invWeightSum = 1._FP / WeightSum(i) - else - write(stderr,'(2a,i8,a)') myname_,':: FATAL--WeightSum(', & - i,') is zero!' - call die(myname_) - endif - - do j=1,AttrVect_nRAttr(outAv) - outAv%rAttr(j,i) = invWeightSum * outAv%rAttr(j,i) - end do - - end do - - ! The merge is now complete. - - end subroutine MergeFourGGSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! -! !IROUTINE: MergeFourGGDP_ - merge data from four components. -! -! !DESCRIPTION: -! Double precision versions of MergeFourGGSP_ -! -! !INTERFACE: - - subroutine MergeFourGGDP_(inAv1, iMaskTags1, rMaskTags1, & - inAv2, iMaskTags2, rMaskTags2, & - inAv3, iMaskTags3, rMaskTags3, & - inAv4, iMaskTags4, rMaskTags4, & - GGrid, CheckMasks, outAv, WeightSum) -! -! !USES: -! - use m_stdio - use m_die - - use m_realkinds, only : DP, FP - - use m_List, only : List - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAv1 - character(len=*), optional, intent(IN) :: iMaskTags1 - character(len=*), optional, intent(IN) :: rMaskTags1 - type(AttrVect), intent(IN) :: inAv2 - character(len=*), optional, intent(IN) :: iMaskTags2 - character(len=*), optional, intent(IN) :: rMaskTags2 - type(AttrVect), intent(IN) :: inAv3 - character(len=*), optional, intent(IN) :: iMaskTags3 - character(len=*), optional, intent(IN) :: rMaskTags3 - type(AttrVect), intent(IN) :: inAv4 - character(len=*), optional, intent(IN) :: iMaskTags4 - character(len=*), optional, intent(IN) :: rMaskTags4 - type(GeneralGrid), intent(IN) :: GGrid - logical, intent(IN) :: CheckMasks - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: outAv - real(DP), dimension(:), pointer :: WeightSum - -! !REVISION HISTORY: -! 19Jun02 - Jay Larson - Interface spec. -! 3Jul02 - Jay Larson - Implementation. -! 10Jul02 - J. Larson - Improved argument -! checking. -!_______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::MergeFourGGDP_' - - integer :: i, j - real(FP) :: invWeightSum - - ! Begin argument sanity checks... - - ! Have the input arguments been allocated? - - if(.not.(List_allocated(inAv1%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv1 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv2%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv2 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv3%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv3 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(inAv4%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv4 has no real attributes!' - call die(myname_) - endif - - if(.not.(List_allocated(outaV%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUTPUT argument outAv has no real attributes!' - call die(myname_) - endif - - if(present(iMaskTags1) .or. present(iMaskTags2) .or. & - present(iMaskTags3) .or. present(iMaskTags4)) then - if(.not.(List_allocated(GGrid%data%iList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Integer masking requested, but input argument GGrid ', & - 'has no integer attributes!' - call die(myname_) - endif - endif - - if(present(rMaskTags1) .or. present(rMaskTags2) .or. & - present(rMaskTags3) .or. present(rMaskTags4)) then - if(.not.(List_allocated(GGrid%data%rList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Real masking requested, but input argument GGrid ', & - 'has no real attributes!' - call die(myname_) - endif - endif - - if(.not.(associated(WeightSum))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUPUT argument WeightSum has not been allocated!' - call die(myname_) - endif - - ! Do the vector lengths match? - - if(AttrVect_lsize(inAv1) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv1 and outAv must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv2 and outAv must match.', & - 'AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv3) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv3 and outAv must match.', & - 'AttrVect_lsize(inAv3) = ',AttrVect_lsize(inAv3), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv4) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv4 and outAv must match.', & - 'AttrVect_lsize(inAv4) = ',AttrVect_lsize(inAv4), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and GGrid must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'AttrVect_lsize(outAv) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(AttrVect_lsize(inAv1) /= size(WeightSum)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv1 and WeightSum must match.', & - 'AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - 'size(WeightSum) = ',size(WeightSum) - call die(myname_) - endif - - ! ...end argument sanity checks. - - ! Initialize the elements of WeightSum(:) to zero: - - do i=1,size(WeightSum) - WeightSum(i) = 0._FP - end do - - ! Process the incoming data one input AttrVect and mask tag - ! combination at a time. - - ! First input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags1 and - ! rMaskTags1. - - if(present(iMaskTags1)) then - - if(present(rMaskTags1)) then ! both real and integer masks - call MergeInDataGGDP_(inAv1, iMaskTags1, rMaskTags1, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv1, iMaskTags=iMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags1)) then ! only real masks - call MergeInDataGGDP_(inAv1, rMaskTags=rMaskTags1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv1, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags1))... - - ! Second input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags2 and - ! rMaskTags2. - - if(present(iMaskTags2)) then - - if(present(rMaskTags2)) then ! both real and integer masks - call MergeInDataGGDP_(inAv2, iMaskTags2, rMaskTags2, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv2, iMaskTags=iMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags2)) then ! only real masks - call MergeInDataGGDP_(inAv2, rMaskTags=rMaskTags2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv2, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags2))... - - ! Third input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags3 and - ! rMaskTags3. - - if(present(iMaskTags3)) then - - if(present(rMaskTags3)) then ! both real and integer masks - call MergeInDataGGDP_(inAv3, iMaskTags3, rMaskTags3, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv3, iMaskTags=iMaskTags3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags3)) then ! only real masks - call MergeInDataGGDP_(inAv3, rMaskTags=rMaskTags3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv3, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags3))... - - ! Fourth input AttrVect/mask combination...must work through - ! all the possible cases for optional arguments iMaskTags4 and - ! rMaskTags4. - - if(present(iMaskTags4)) then - - if(present(rMaskTags4)) then ! both real and integer masks - call MergeInDataGGDP_(inAv4, iMaskTags4, rMaskTags4, GGrid, & - CheckMasks, outAv, WeightSum) - else ! only integer masks - call MergeInDataGGDP_(inAv4, iMaskTags=iMaskTags4, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - else - - if(present(rMaskTags4)) then ! only real masks - call MergeInDataGGDP_(inAv4, rMaskTags=rMaskTags4, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - else ! no masks at all - call MergeInDataGGDP_(inAv4, GGrid=GGrid, & - CheckMasks=CheckMasks, outAv=outAv, & - WeightSum=WeightSum) - endif - - endif ! if(present(iMaskTags4))... - - ! Now we must renormalize the entries in outAv by dividing - ! element-by-element by the sums of the merge weights, which - ! were accumulated in WeightSum(:) - - do i=1,AttrVect_lsize(outAv) - - if(WeightSum(i) /= 0._FP) then - invWeightSum = 1._FP / WeightSum(i) - else - write(stderr,'(2a,i8,a)') myname_,':: FATAL--WeightSum(', & - i,') is zero!' - call die(myname_) - endif - - do j=1,AttrVect_nRAttr(outAv) - outAv%rAttr(j,i) = invWeightSum * outAv%rAttr(j,i) - end do - - end do - - ! The merge is now complete. - - end subroutine MergeFourGGDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MergeInDataGGSP_ - Add Data into a Merge -! -! !DESCRIPTION: This routine takes input field data from the input -! {\tt AttrVect} argument {\tt inAv}, and merges the real attributes it -! shares with the input/output {\tt AttrVect} argument {\tt outAv}. -! The merge is a masked merge of the form -! $$ c_i = c_i + {{\prod_{j=1}^J} M_{i}^j} {{\prod_{k=1}^K} F_{i}^k} -! a_i , $$ -! where ${c_i}$ represents one element of one of the real attributes of -! {\tt outAv}, and ${a_i}$ represents one element of one of the real -! attributes of {\tt inAv}. The ${M_{i}^j}$ are {\em integer masks} which -! have value either $0$ or $1$, and are integer attributes of the input -! {\tt GeneralGrid} argument {\tt GGrid}. The ${F_{i}^k}$ are {\em real -! masks} whose values are in the closed interval $[0,1]$, and are real -! attributes of the input {\tt GeneralGrid} argument {\tt GGrid}. The -! input {\tt CHARACTER} argument {\tt iMaskTags} is a string of colon- -! delimited strings that name the integer attributes in {\tt GGrid} -! that are used as the masks ${M_{i}^j}$. The input {\tt CHARACTER} -! argument {\tt rMaskTags} is a string of colon-delimited strings -! that name the real attributes in {\tt GGrid} that are used as the -! masks ${F_{i}^k}$. The output {\tt REAL} array {\tt WeightSum} is -! used to store a running sum of the product of the masks. The -! {\tt LOGICAL} input argument {\tt CheckMasks} governs how the masks -! are applied. If ${\tt CheckMasks} = {\tt .TRUE.}$, the entries are -! checked to ensure they meet the definitions of real and integer masks. -! If ${\tt CheckMasks} = {\tt .FALSE.}$ then the masks are multiplied -! together on an element-by-element basis with no validation of their -! entries (this option results in slightly higher performance). -! -! {\tt N.B.:} The lengths of the {\tt AttrVect} arguments {\tt inAv} -! and {\tt outAv} must be equal, and this length must also equal the -! lengths of {\tt GGrid} and {\tt WeightSum}. -! -! {\tt N.B.:} This algorithm assumes the {\tt AttrVect} argument -! {\tt outAv} has been created, and its real attributes have been -! initialized. -! -! {\tt N.B.:} This algorithm assumes that the array {\tt WeightSum} -! has been created and initialized. -! -! !INTERFACE: - - subroutine MergeInDataGGSP_(inAv, iMaskTags, rMaskTags, GGrid, & - CheckMasks, outAv, WeightSum) -! -! !USES: -! - use m_stdio - use m_die - - use m_realkinds, only : SP, FP - - use m_String, only : String - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => toChar - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_nitem => nitem - use m_List, only : List_get => get - use m_List, only : List_identical => identical - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : SharedAttrIndexList - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_exportIAttr => exportIAttr - use m_GeneralGrid, only : GeneralGrid_exportRAttr => exportRAttr - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAv - character(len=*), optional, intent(IN) :: iMaskTags - character(len=*), optional, intent(IN) :: rMaskTags - type(GeneralGrid), intent(IN) :: GGrid - logical, intent(IN) :: CheckMasks - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: outAv - real(SP), dimension(:), pointer :: WeightSum - -! !REVISION HISTORY: -! 19Jun02 - Jay Larson - initial verson. -! 10Jul02 - J. Larson - Improved argument -! checking. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::MergeInDataGGSP_' - - integer :: i, ierr, j, length - type(String) :: DummStr - type(List) :: iMaskList, rMaskList - integer, dimension(:), pointer :: iMask,iDummy ! INTEGER mask workspace - real(FP), dimension(:), pointer :: rMask,rDummy ! REAL mask workspace - - logical :: RAttrIdentical ! flag to identify identical REAL attribute - ! lists in inAv and outAv - integer :: NumSharedRAttr ! number of REAL attributes shared by inAv,outAv - ! Cross-index storage for shared REAL attributes of inAv,outAv - integer, dimension(:), pointer :: inAvIndices, outAvIndices - - ! Begin argument sanity checks... - - ! Have the input arguments been allocated? - - if(.not.(List_allocated(inAv%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv has no real attributes.' - call die(myname_) - endif - - if(.not.(List_allocated(outaV%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUTPUT argument outAv has no real attributes.' - call die(myname_) - endif - - if(present(iMaskTags)) then - if(.not.(List_allocated(GGrid%data%iList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Integer masking requested, but input argument GGrid ', & - 'has no integer attributes.' - call die(myname_) - endif - endif - - if(present(rMaskTags)) then - if(.not.(List_allocated(GGrid%data%rList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Real masking requested, but input argument GGrid ', & - 'has no real attributes.' - call die(myname_) - endif - endif - - if(.not.(associated(WeightSum))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUPUT argument WeightSum has not been allocated.' - call die(myname_) - endif - - ! Do the vector lengths match? - - if(AttrVect_lsize(inAv) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv and outAv must match.', & - 'AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv) /= GeneralGrid_lsize(GGrid)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv and GGrid must match.', & - 'AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - 'AttrVect_lsize(outAv) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(AttrVect_lsize(inAv) /= size(WeightSum)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv and WeightSum must match.', & - 'AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - 'size(WeightSum) = ',size(WeightSum) - call die(myname_) - endif - - ! ...end argument sanity checks. - - ! Check for INTEGER masks. If they are present, retrieve - ! them and combine them into a single integer mask iMask(:) - - if(present(iMaskTags)) then - - ! allocate two arrays: iMask (the final product), - ! and iDummy (storage space for each mask as it is retrieved) - - allocate(iMask(AttrVect_lsize(inAv)), iDummy(AttrVect_lsize(inAv)), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: allocate(iMask(...)...) failed with ierr = ',ierr - call die(myname_) - endif - - ! Initialize all the elements of iMask to unity: - iMask = 1 - - ! turn the colon-delimited string of tags into a List: - call List_init(iMaskList,iMaskTags) - - ! Loop over the items in iMaskList, retrieving each mask - ! into the array iDummy, checking it (if CheckMasks=.TRUE.), - ! and multiplying it element-by-element into the array iMask. - - do i=1,List_nitem(iMaskList) - ! grab item as a String - call List_get(DummStr, i, iMaskList) - ! use this String to identify an INTEGER GeneralGrid attribute - ! for export to iDummy(:) - call GeneralGrid_exportIAttr(GGrid, String_ToChar(DummStr), & - iDummy, length) - - if(.not.(CheckMasks)) then ! Merely multiply iMask by iDummy: - do j=1,length - iMask(j) = iMask(j) * iDummy(j) - end do - else ! check mask elements and include their effect on iMask - do j=1,length - select case(iDummy(j)) - case(0) ! zeroes out iMask(j) - iMask(j) = 0 - case(1) ! leaves iMask(j) untouched - case default ! shut down with an error - write(stderr,'(5a,i8,a,i8)') myname_, & - ':: ERROR--illegal mask value (must be 0 or 1).', & - 'Illegal value stored in mask ', & - String_ToChar(DummStr),'(',j,')=',iDummy(j) - call die(myname_) - end select - end do - endif ! if(CheckMasks)... - ! clean up dummy String DummStr - call String_clean(DummStr) - end do ! do i=1,List_nitem(iMaskList)... - - endif ! if(present(iMaskTags))... - - ! Check for REAL masks. If they are present, retrieve - ! them and combine them into a single real mask rMask(:) - - if(present(rMaskTags)) then - - ! allocate two arrays: rMask (the final product), - ! and rDummy (storage space for each mask as it is retrieved) - - allocate(rMask(AttrVect_lsize(inAv)), rDummy(AttrVect_lsize(inAv)), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: allocate(rMask(...)...) failed with ierr = ',ierr - call die(myname_) - endif - - ! Initialize all the elements of rMask to unity: - rMask = 1._FP - - ! turn the colon-delimited string of tags into a List: - call List_init(rMaskList,rMaskTags) - - ! Loop over the items in rMaskList, retrieving each mask - ! into the array rDummy, checking it (if CheckMasks=.TRUE.), - ! and multiplying it element-by-element into the array rMask. - - do i=1,List_nitem(rMaskList) - ! grab item as a String - call List_get(DummStr, i, rMaskList) - ! use this String to identify an INTEGER GeneralGrid attribute - ! for export to rDummy(:) - call GeneralGrid_exportRAttr(GGrid, String_ToChar(DummStr), & - rDummy, length) - - if(.not.(CheckMasks)) then ! Merely multiply rMask by rDummy: - do j=1,length - rMask(j) = rMask(j) * rDummy(j) - end do - else ! check mask elements and include their effect on rMask - do j=1,length - if((iDummy(j) >= 0.) .and. (iDummy(j) <= 1.)) then ! in [0,1] - rMask(j) = rMask(j) * rDummy(j) - else - write(stderr,'(5a,i8,a,i8)') myname_, & - ':: ERROR--illegal mask value (must be in [0.,1.]).', & - 'Illegal value stored in mask ', & - String_ToChar(DummStr),'(',j,')=',rDummy(j) - call die(myname_) - endif - end do - endif ! if(CheckMasks)... - ! clean up dummy String DummStr - call String_clean(DummStr) - end do ! do i=1,List_nitem(rMaskList)... - - endif ! if(present(rMaskTags))... - - ! Now we have (at most) a single INTEGER mask iMask(:) and - ! a single REAL mask rMask(:). Before we perform the merge, - ! we must tackle one more issue: are the REAL attributes - ! of inAv and outAv identical and in the same order? If they - ! are, the merge is a straightforward double loop over the - ! elements and over all the attributes. If the attribute lists - ! differ, we must cross-reference common attributes, and store - ! their indices. - - RAttrIdentical = List_identical(inAv%rList, outAv%rList) - if(.not.(RAttrIdentical)) then - ! Determine the number of shared REAL attributes NumSharedRAttr, - ! and form cross-index tables inAvIndices, outAvIndices. - call SharedAttrIndexList(inAv, outAv, 'REAL', NumSharedRAttr, & - inAvIndices, outAvIndices) - endif - - if(present(rMaskTags)) then ! REAL masking stored in rMask(:) - - if(present(iMaskTags)) then ! also INTEGER mask iMask(:) - - if(RAttrIdentical) then ! straight masked multiply - do i=1, AttrVect_lsize(inAv) - do j=1,AttrVect_nRAttr(inAv) - outAv%rAttr(j,i) = outAv%rAttr(j,i) + & - rMask(i) * iMask(i) * inAv%rAttr(j,i) - end do ! do j=1,AttrVect_nRAttr(inAv) - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + iMask(i) * rMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - else ! use previously generated cross-indices - do i=1, AttrVect_lsize(inAv) - do j=1,NumSharedRAttr - outAv%rAttr(outAVIndices(j),i) = & - outAv%rAttr(outAvIndices(j),i) + & - rMask(i) * iMask(i) * & - inAv%rAttr(inAvIndices(j),i) - end do ! do j=1,NumSharedRAttr - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + iMask(i) * rMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - endif ! if(RAttrIdentical)... - - else ! rMask(:), but no iMask(:) - - if(RAttrIdentical) then ! straight masked multiply - do i=1, AttrVect_lsize(inAv) - do j=1,AttrVect_nRAttr(inAv) - outAv%rAttr(j,i) = outAv%rAttr(j,i) + & - rMask(i) * inAv%rAttr(j,i) - end do ! do j=1,AttrVect_nRAttr(inAv) - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + rMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - else ! use previously generated cross-indices - do i=1, AttrVect_lsize(inAv) - do j=1,NumSharedRAttr - outAv%rAttr(outAVIndices(j),i) = & - outAv%rAttr(outAvIndices(j),i) + & - rMask(i) * inAv%rAttr(inAvIndices(j),i) - end do ! do j=1,NumSharedRAttr - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + rMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - endif ! if(RAttrIdentical) - - endif ! if(present(iMaskTags))... - - else ! No REAL Mask - - if(present(iMaskTags)) then ! Have iMask(:), but no rMask(:) - - if(RAttrIdentical) then ! straight masked multiply - do i=1, AttrVect_lsize(inAv) - do j=1,AttrVect_nRAttr(inAv) - outAv%rAttr(j,i) = outAv%rAttr(j,i) + & - iMask(i) * inAv%rAttr(j,i) - end do ! do j=1,AttrVect_nRAttr(inAv) - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + iMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - else ! use previously generated cross-indices - do i=1, AttrVect_lsize(inAv) - do j=1,NumSharedRAttr - outAv%rAttr(outAVIndices(j),i) = & - outAv%rAttr(outAvIndices(j),i) + & - iMask(i) * inAv%rAttr(inAvIndices(j),i) - end do ! do j=1,NumSharedRAttr - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + iMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - endif ! if(RAttrIdentical) - - else ! Neither iMask(:) nor rMask(:)--all elements weighted by unity - - if(RAttrIdentical) then ! straight masked multiply - do i=1, AttrVect_lsize(inAv) - do j=1,AttrVect_nRAttr(inAv) - outAv%rAttr(j,i) = outAv%rAttr(j,i) + inAv%rAttr(j,i) - end do ! do j=1,AttrVect_nRAttr(inAv) - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + 1._FP - end do ! do i=1,AttrVect_lsize(inAv)... - else ! use previously generated cross-indices - do i=1, AttrVect_lsize(inAv) - do j=1,NumSharedRAttr - outAv%rAttr(outAVIndices(j),i) = & - outAv%rAttr(outAvIndices(j),i) + & - inAv%rAttr(inAvIndices(j),i) - end do ! do j=1,NumSharedRAttr - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + 1._FP - end do ! do i=1,AttrVect_lsize(inAv)... - endif ! if(RAttrIdentical) - - endif ! if(present(iMaskTags))... - - endif ! if(present(rMaskTags))... - - ! At this point the merge has been completed. Now clean - ! up all allocated structures and temporary arrays. - - if(present(iMaskTags)) then ! clean up integer mask work space - deallocate(iMask, iDummy, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: deallocate(iMask,...) failed with ierr = ',ierr - call die(myname_) - endif - call List_clean(iMaskList) - endif - - if(present(rMaskTags)) then ! clean up real mask work space - deallocate(rMask, rDummy, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: deallocate(rMask,...) failed with ierr = ',ierr - call die(myname_) - endif - call List_clean(rMaskList) - endif - - if(.not.(RAttrIdentical)) then ! clean up cross-reference tables - deallocate(inAvIndices, outAvIndices, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: deallocate(inAvIndices,...) failed with ierr = ',ierr - call die(myname_) - endif - endif - - end subroutine MergeInDataGGSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! -! !IROUTINE: MergeInDataGGDP_ - merge in data from a component. -! -! !DESCRIPTION: -! Double precision version of MergeInDataGGSP_ -! -! !INTERFACE: - - subroutine MergeInDataGGDP_(inAv, iMaskTags, rMaskTags, GGrid, & - CheckMasks, outAv, WeightSum) -! -! !USES: -! - use m_stdio - use m_die - - use m_realkinds, only : DP, FP - - use m_String, only : String - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => toChar - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_nitem => nitem - use m_List, only : List_get => get - use m_List, only : List_identical => identical - use m_List, only : List_allocated => allocated - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : SharedAttrIndexList - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_exportIAttr => exportIAttr - use m_GeneralGrid, only : GeneralGrid_exportRAttr => exportRAttr - - implicit none - -! !INPUT PARAMETERS: -! - type(AttrVect), intent(IN) :: inAv - character(len=*), optional, intent(IN) :: iMaskTags - character(len=*), optional, intent(IN) :: rMaskTags - type(GeneralGrid), intent(IN) :: GGrid - logical, intent(IN) :: CheckMasks - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(INOUT) :: outAv - real(DP), dimension(:), pointer :: WeightSum - -! !REVISION HISTORY: -! 19Jun02 - Jay Larson - initial verson. -! 10Jul02 - J. Larson - Improved argument -! checking. -!_______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::MergeInDataGGDP_' - - integer :: i, ierr, j, length - type(String) :: DummStr - type(List) :: iMaskList, rMaskList - integer, dimension(:), pointer :: iMask,iDummy ! INTEGER mask workspace - real(FP), dimension(:), pointer :: rMask,rDummy ! REAL mask workspace - - logical :: RAttrIdentical ! flag to identify identical REAL attribute - ! lists in inAv and outAv - integer :: NumSharedRAttr ! number of REAL attributes shared by inAv,outAv - ! Cross-index storage for shared REAL attributes of inAv,outAv - integer, dimension(:), pointer :: inAvIndices, outAvIndices - - ! Begin argument sanity checks... - - ! Have the input arguments been allocated? - - if(.not.(List_allocated(inAv%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT argument inAv has no real attributes.' - call die(myname_) - endif - - if(.not.(List_allocated(outaV%rList))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUTPUT argument outAv has no real attributes.' - call die(myname_) - endif - - if(present(iMaskTags)) then - if(.not.(List_allocated(GGrid%data%iList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Integer masking requested, but input argument GGrid ', & - 'has no integer attributes.' - call die(myname_) - endif - endif - - if(present(rMaskTags)) then - if(.not.(List_allocated(GGrid%data%rList))) then - write(stderr,'(3a)') myname_, & - 'ERROR--Real masking requested, but input argument GGrid ', & - 'has no real attributes.' - call die(myname_) - endif - endif - - if(.not.(associated(WeightSum))) then - write(stderr,'(2a)') myname_, & - 'ERROR--INPUT/OUPUT argument WeightSum has not been allocated.' - call die(myname_) - endif - - ! Do the vector lengths match? - - if(AttrVect_lsize(inAv) /= AttrVect_lsize(outAv)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of AttrVect arguments inAv and outAv must match.', & - 'AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - 'AttrVect_lsize(outAv) = ',AttrVect_lsize(outAv) - call die(myname_) - endif - - if(AttrVect_lsize(inAv) /= GeneralGrid_lsize(GGrid)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv and GGrid must match.', & - 'AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - 'AttrVect_lsize(outAv) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(AttrVect_lsize(inAv) /= size(WeightSum)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: ERROR--Lengths of arguments inAv and WeightSum must match.', & - 'AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - 'size(WeightSum) = ',size(WeightSum) - call die(myname_) - endif - - ! ...end argument sanity checks. - - ! Check for INTEGER masks. If they are present, retrieve - ! them and combine them into a single integer mask iMask(:) - - if(present(iMaskTags)) then - - ! allocate two arrays: iMask (the final product), - ! and iDummy (storage space for each mask as it is retrieved) - - allocate(iMask(AttrVect_lsize(inAv)), iDummy(AttrVect_lsize(inAv)), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: allocate(iMask(...)...) failed with ierr = ',ierr - call die(myname_) - endif - - ! Initialize all the elements of iMask to unity: - iMask = 1 - - ! turn the colon-delimited string of tags into a List: - call List_init(iMaskList,iMaskTags) - - ! Loop over the items in iMaskList, retrieving each mask - ! into the array iDummy, checking it (if CheckMasks=.TRUE.), - ! and multiplying it element-by-element into the array iMask. - - do i=1,List_nitem(iMaskList) - ! grab item as a String - call List_get(DummStr, i, iMaskList) - ! use this String to identify an INTEGER GeneralGrid attribute - ! for export to iDummy(:) - call GeneralGrid_exportIAttr(GGrid, String_ToChar(DummStr), & - iDummy, length) - - if(.not.(CheckMasks)) then ! Merely multiply iMask by iDummy: - do j=1,length - iMask(j) = iMask(j) * iDummy(j) - end do - else ! check mask elements and include their effect on iMask - do j=1,length - select case(iDummy(j)) - case(0) ! zeroes out iMask(j) - iMask(j) = 0 - case(1) ! leaves iMask(j) untouched - case default ! shut down with an error - write(stderr,'(5a,i8,a,i8)') myname_, & - ':: ERROR--illegal mask value (must be 0 or 1).', & - 'Illegal value stored in mask ', & - String_ToChar(DummStr),'(',j,')=',iDummy(j) - call die(myname_) - end select - end do - endif ! if(CheckMasks)... - ! clean up dummy String DummStr - call String_clean(DummStr) - end do ! do i=1,List_nitem(iMaskList)... - - endif ! if(present(iMaskTags))... - - ! Check for REAL masks. If they are present, retrieve - ! them and combine them into a single real mask rMask(:) - - if(present(rMaskTags)) then - - ! allocate two arrays: rMask (the final product), - ! and rDummy (storage space for each mask as it is retrieved) - - allocate(rMask(AttrVect_lsize(inAv)), rDummy(AttrVect_lsize(inAv)), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: allocate(rMask(...)...) failed with ierr = ',ierr - call die(myname_) - endif - - ! Initialize all the elements of rMask to unity: - rMask = 1._FP - - ! turn the colon-delimited string of tags into a List: - call List_init(rMaskList,rMaskTags) - - ! Loop over the items in rMaskList, retrieving each mask - ! into the array rDummy, checking it (if CheckMasks=.TRUE.), - ! and multiplying it element-by-element into the array rMask. - - do i=1,List_nitem(rMaskList) - ! grab item as a String - call List_get(DummStr, i, rMaskList) - ! use this String to identify an INTEGER GeneralGrid attribute - ! for export to rDummy(:) - call GeneralGrid_exportRAttr(GGrid, String_ToChar(DummStr), & - rDummy, length) - - if(.not.(CheckMasks)) then ! Merely multiply rMask by rDummy: - do j=1,length - rMask(j) = rMask(j) * rDummy(j) - end do - else ! check mask elements and include their effect on rMask - do j=1,length - if((iDummy(j) >= 0.) .and. (iDummy(j) <= 1.)) then ! in [0,1] - rMask(j) = rMask(j) * rDummy(j) - else - write(stderr,'(5a,i8,a,i8)') myname_, & - ':: ERROR--illegal mask value (must be in [0.,1.]).', & - 'Illegal value stored in mask ', & - String_ToChar(DummStr),'(',j,')=',rDummy(j) - call die(myname_) - endif - end do - endif ! if(CheckMasks)... - ! clean up dummy String DummStr - call String_clean(DummStr) - end do ! do i=1,List_nitem(rMaskList)... - - endif ! if(present(rMaskTags))... - - ! Now we have (at most) a single INTEGER mask iMask(:) and - ! a single REAL mask rMask(:). Before we perform the merge, - ! we must tackle one more issue: are the REAL attributes - ! of inAv and outAv identical and in the same order? If they - ! are, the merge is a straightforward double loop over the - ! elements and over all the attributes. If the attribute lists - ! differ, we must cross-reference common attributes, and store - ! their indices. - - RAttrIdentical = List_identical(inAv%rList, outAv%rList) - if(.not.(RAttrIdentical)) then - ! Determine the number of shared REAL attributes NumSharedRAttr, - ! and form cross-index tables inAvIndices, outAvIndices. - call SharedAttrIndexList(inAv, outAv, 'REAL', NumSharedRAttr, & - inAvIndices, outAvIndices) - endif - - if(present(rMaskTags)) then ! REAL masking stored in rMask(:) - - if(present(iMaskTags)) then ! also INTEGER mask iMask(:) - - if(RAttrIdentical) then ! straight masked multiply - do i=1, AttrVect_lsize(inAv) - do j=1,AttrVect_nRAttr(inAv) - outAv%rAttr(j,i) = outAv%rAttr(j,i) + & - rMask(i) * iMask(i) * inAv%rAttr(j,i) - end do ! do j=1,AttrVect_nRAttr(inAv) - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + iMask(i) * rMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - else ! use previously generated cross-indices - do i=1, AttrVect_lsize(inAv) - do j=1,NumSharedRAttr - outAv%rAttr(outAVIndices(j),i) = & - outAv%rAttr(outAvIndices(j),i) + & - rMask(i) * iMask(i) * & - inAv%rAttr(inAvIndices(j),i) - end do ! do j=1,NumSharedRAttr - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + iMask(i) * rMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - endif ! if(RAttrIdentical)... - - else ! rMask(:), but no iMask(:) - - if(RAttrIdentical) then ! straight masked multiply - do i=1, AttrVect_lsize(inAv) - do j=1,AttrVect_nRAttr(inAv) - outAv%rAttr(j,i) = outAv%rAttr(j,i) + & - rMask(i) * inAv%rAttr(j,i) - end do ! do j=1,AttrVect_nRAttr(inAv) - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + rMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - else ! use previously generated cross-indices - do i=1, AttrVect_lsize(inAv) - do j=1,NumSharedRAttr - outAv%rAttr(outAVIndices(j),i) = & - outAv%rAttr(outAvIndices(j),i) + & - rMask(i) * inAv%rAttr(inAvIndices(j),i) - end do ! do j=1,NumSharedRAttr - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + rMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - endif ! if(RAttrIdentical) - - endif ! if(present(iMaskTags))... - - else ! No REAL Mask - - if(present(iMaskTags)) then ! Have iMask(:), but no rMask(:) - - if(RAttrIdentical) then ! straight masked multiply - do i=1, AttrVect_lsize(inAv) - do j=1,AttrVect_nRAttr(inAv) - outAv%rAttr(j,i) = outAv%rAttr(j,i) + & - iMask(i) * inAv%rAttr(j,i) - end do ! do j=1,AttrVect_nRAttr(inAv) - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + iMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - else ! use previously generated cross-indices - do i=1, AttrVect_lsize(inAv) - do j=1,NumSharedRAttr - outAv%rAttr(outAVIndices(j),i) = & - outAv%rAttr(outAvIndices(j),i) + & - iMask(i) * inAv%rAttr(inAvIndices(j),i) - end do ! do j=1,NumSharedRAttr - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + iMask(i) - end do ! do i=1,AttrVect_lsize(inAv)... - endif ! if(RAttrIdentical) - - else ! Neither iMask(:) nor rMask(:)--all elements weighted by unity - - if(RAttrIdentical) then ! straight masked multiply - do i=1, AttrVect_lsize(inAv) - do j=1,AttrVect_nRAttr(inAv) - outAv%rAttr(j,i) = outAv%rAttr(j,i) + inAv%rAttr(j,i) - end do ! do j=1,AttrVect_nRAttr(inAv) - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + 1._FP - end do ! do i=1,AttrVect_lsize(inAv)... - else ! use previously generated cross-indices - do i=1, AttrVect_lsize(inAv) - do j=1,NumSharedRAttr - outAv%rAttr(outAVIndices(j),i) = & - outAv%rAttr(outAvIndices(j),i) + & - inAv%rAttr(inAvIndices(j),i) - end do ! do j=1,NumSharedRAttr - ! add in mask contribution to total of merge weights - WeightSum(i) = WeightSum(i) + 1._FP - end do ! do i=1,AttrVect_lsize(inAv)... - endif ! if(RAttrIdentical) - - endif ! if(present(iMaskTags))... - - endif ! if(present(rMaskTags))... - - ! At this point the merge has been completed. Now clean - ! up all allocated structures and temporary arrays. - - if(present(iMaskTags)) then ! clean up integer mask work space - deallocate(iMask, iDummy, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: deallocate(iMask,...) failed with ierr = ',ierr - call die(myname_) - endif - call List_clean(iMaskList) - endif - - if(present(rMaskTags)) then ! clean up real mask work space - deallocate(rMask, rDummy, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: deallocate(rMask,...) failed with ierr = ',ierr - call die(myname_) - endif - call List_clean(rMaskList) - endif - - if(.not.(RAttrIdentical)) then ! clean up cross-reference tables - deallocate(inAvIndices, outAvIndices, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: deallocate(inAvIndices,...) failed with ierr = ',ierr - call die(myname_) - endif - endif - - end subroutine MergeInDataGGDP_ - - end module m_Merge diff --git a/src/externals/mct/mct/m_Navigator.F90 b/src/externals/mct/mct/m_Navigator.F90 deleted file mode 100644 index 6c43ab36a92..00000000000 --- a/src/externals/mct/mct/m_Navigator.F90 +++ /dev/null @@ -1,666 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_Navigator - An Object for Indexing Segments of a Vector -! -! !DESCRIPTION: -! A {\em Navigator} is a table used to {\em index} or {\em Navigate} -! segments of a vector, or segments of a dimension of a -! higher-dimensional array. In MCT, this concept is embodied in -! the {\tt Navigator} datatype, which contains -! the following components: -! \begin{itemize} -! \item The {\em number} of segments; -! \item The {\em displacement} of the starting index of each segment -! from the vector's first element (i.e. the starting index minus 1); -! \item The {\em length} of each segment; and -! \item The {\em total length} of the vector or array dimension for which -! segments are defined. This last item is optional, but if defined -! provides the ability for the {\tt Navigator} to check for erroneous -! segment entries (i.e., segments that are out-of-bounds). -! \end{itemize} -! -! This module defines the {\tt Navigator} datatype, creation and -! destruction methods, a variety of query methods, and a method for -! resizing the {\tt Navigator}. -! -! !INTERFACE: - - module m_Navigator - -! !USES: -! No external modules are used in the declaration section of this module. - - implicit none - - private ! except - -! !PUBLIC TYPES: - - public :: Navigator ! The class data structure - - Type Navigator - integer :: NumSegments ! Number of defined Segments - integer :: VectorLength ! Length of the Vector being indexed - integer,pointer,dimension(:) :: displs ! Segment start displacements - integer,pointer,dimension(:) :: counts ! Segment lengths - End Type Navigator - -! !PUBLIC MEMBER FUNCTIONS: - - public :: Navigator_init,init ! initialize an object - public :: clean ! clean an object - public :: NumSegments ! number of vector segments - public :: VectorLength ! indexed vector's total length - public :: msize ! the maximum size - public :: resize ! adjust the true size - public :: get ! get an entry - public :: ptr_displs ! referencing %displs(:) - public :: ptr_counts ! referencing %counts(:) - - interface Navigator_init; module procedure & - init_ - end interface - interface init ; module procedure init_ ; end interface - interface clean ; module procedure clean_ ; end interface - interface NumSegments ; module procedure & - NumSegments_ - end interface - interface VectorLength ; module procedure & - VectorLength_ - end interface - interface msize ; module procedure msize_ ; end interface - interface resize; module procedure resize_; end interface - interface get ; module procedure get_ ; end interface - interface ptr_displs; module procedure & - ptr_displs_ - end interface - interface ptr_counts; module procedure & - ptr_counts_ - end interface - -! !REVISION HISTORY: -! 22May00 - Jing Guo - initial prototype/prolog/code -! 26Aug02 - J. Larson - expanded datatype to inlcude -! VectorLength component. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_Navigator' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_ - Create a Navigator -! -! !DESCRIPTION: -! This routine creates a {\tt Navigator} {\tt Nav} capable of storing -! information about {\tt NumSegments} segments. The user can supply the -! length of the vector (or array subspace) being indexed by supplying the -! optional input {\tt INTEGER} argument {\tt VectorLength} (if it is not -! supplied, this component of {\tt Nav} will be set to zero, signifying -! to other {\tt Navigator} routines that vector length information is -! unavailable). The success (failure) of this operation is signified by -! the zero (non-zero) value of the optional output {\tt INTEGER} argument -! {\tt stat}. -! -! !INTERFACE: - - subroutine init_(Nav, NumSegments, VectorLength, stat) - -! !USES: - - use m_mall,only : mall_ison,mall_mci - use m_die ,only : die,perr - use m_stdio, only : stderr - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: NumSegments - integer, optional, intent(in) :: VectorLength - -! !OUTPUT PARAMETERS: - - type(Navigator), intent(out) :: Nav - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 22May00 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::init_' - integer :: ier - -! If the argument VectorLength is present, use this value to set -! Nav%VectorLength. Otherwise, set Nav%VectorLength to zero. - - if(present(VectorLength)) then - if(VectorLength < 0) then - write(stderr,'(2a,i8)') myname_, & - ':: FATAL -- illegal value of VectorLength=',VectorLength - call die(myname_) - endif - Nav%VectorLength = VectorLength - else - Nav%VectorLength = 0 - endif - -! Allocate segment attribute table arrays: - - allocate(Nav%displs(NumSegments),Nav%counts(NumSegments),stat=ier) - if(ier/=0) then - call perr(myname_,'allocate()',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - if(mall_ison()) then - call mall_mci(Nav%displs,myname) - call mall_mci(Nav%counts,myname) - endif - - Nav%NumSegments=NumSegments - - end subroutine init_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Destroy a Navigator -! -! !DESCRIPTION: -! This routine deallocates allocated memory associated with the -! input/output {\tt Navigator} argument {\tt Nav}, and clears the -! vector length and number of segments components The success (failure) -! of this operation is signified by the zero (non-zero) value of the -! optional output {\tt INTEGER} argument {\tt stat}. -! -! !INTERFACE: - - subroutine clean_(Nav, stat) - -! !USES: - - use m_mall, only : mall_ison,mall_mco - use m_die, only : warn - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - type(Navigator),intent(inout) :: Nav - -! !OUTPUT PARAMETERS: - - integer,optional,intent(out) :: stat - -! !REVISION HISTORY: -! 22May00 - Jing Guo initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - if(mall_ison()) then - if(associated(Nav%displs)) call mall_mco(Nav%displs,myname_) - if(associated(Nav%counts)) call mall_mco(Nav%counts,myname_) - endif - - deallocate(Nav%displs,Nav%counts,stat=ier) - - if(present(stat)) then - stat=ier - else - if(ier /= 0) call warn(myname_,'deallocate(Nav%...)',ier) - endif - - Nav%NumSegments = 0 - Nav%VectorLength = 0 - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: NumSegments_ - Return the Number of Segments -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the number of segments -! in the input {\tt Navigator} argument {\tt Nav} for which segment -! start and length information are defined . -! -! !INTERFACE: - - integer function NumSegments_(Nav) - -! !USES: - - implicit none - -! !INPUT PARAMETERS: - - type(Navigator), intent(in) :: Nav - -! !REVISION HISTORY: -! 22May00 - Jing Guo initial prototype/prolog/code -! 1Mar02 - E.T. Ong - removed die to prevent crashes. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::NumSegments_' - - NumSegments_=Nav%NumSegments - - end function NumSegments_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: msize_ - Return the Maximum Capacity for Segment Storage -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the maximum number of -! segments for which start and length information can be stored in the -! input {\tt Navigator} argument {\tt Nav}. -! -! !INTERFACE: - - integer function msize_(Nav) - -! !USES: - - implicit none - -! !INPUT PARAMETERS: - - type(Navigator),intent(in) :: Nav - -! !REVISION HISTORY: -! 22May00 - Jing Guo initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::msize_' - - msize_=size(Nav%displs) - - end function msize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: VectorLength_ - Return the Navigated Vector's Length -! -! !DESCRIPTION: -! This {\tt INTEGER} query function returns the total length of the -! vector navigated by the input {\tt Navigator} argument {\tt Nav}. -! Note that the vector length is a quantity the user must have set -! when {\tt Nav} was initialized. If it has not been set, the return -! value will be zero. -! -! !INTERFACE: - - integer function VectorLength_(Nav) - -! !USES: - - implicit none - -! !INPUT PARAMETERS: - - type(Navigator), intent(in) :: Nav - -! !REVISION HISTORY: -! 26Aug02 - J. Larson - initial implementation -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::VectorLength_' - - VectorLength_=Nav%VectorLength - - end function VectorLength_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: resize_ - Reset the Number of Segments -! -! !DESCRIPTION: -! This routine resets the number of segments stored in the input/output -! {\tt Navigator} argument {\tt Nav}. It behaves in one of two modes: -! If the optional {\tt INTEGER} input argument {\tt NumSegments} is -! provided, then this value is taken to be the new number of segments. -! If this routine is invoked without {\tt NumSegments} provided, then -! the new number of segments is set as per the result of the Fortran -! {\tt size()} function applied to the segment table arrays. -! -! !INTERFACE: - - subroutine resize_(Nav, NumSegments) - -! !USES: - - use m_stdio, only : stderr - use m_die, only : die - - implicit none - -! !INPUT PARAMETERS: - - integer,optional,intent(in) :: NumSegments - -! !INPUT/OUTPUT PARAMETERS: - - type(Navigator),intent(inout) :: Nav - -! !REVISION HISTORY: -! 22May00 - Jing Guo initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::resize_' - integer :: m - - m=msize_(Nav) - - if(present(NumSegments)) then - if(NumSegments > m) then - write(stderr,'(3a,2(i8,a))') myname_, & - ':: FATAL value of argument NumSegments exceeds maximum ', & - ' storage for this Navigator. NumSegments = ',NumSegments, & - ' Maximum storage capacity = ',m,' segments.' - call die(myname_) - endif - Nav%NumSegments=NumSegments - else - Nav%NumSegments=m - endif - - end subroutine resize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: get_ - Retrieve Characteristics of a Segment -! -! !DESCRIPTION: -! This multi-purpose query routine can be used to retrieve various -! characteristics of a given segment (identified by the input -! {\tt INTEGER} argument {\tt iSeg}) stored in the input {\tt Navigator} -! argument {\tt Nav}: -! \begin{enumerate} -! \item The {\em displacement} of the first element in this segment from -! the first element of the vector. This quantity is returned in the -! optional output {\tt INTEGER} argument {\tt displ} -! \item The {\em number of elements} in this segment. This quantity -! is returned in the optional output {\tt INTEGER} argument {\tt displ} -! \item The {\em index} of the first element in this segment This -! quantity is returned in the optional output {\tt INTEGER} argument -! {\tt lc}. -! \item The {\em index} of the final element in this segment This -! quantity is returned in the optional output {\tt INTEGER} argument -! {\tt le}. -! \end{enumerate} -! Any combination of the above characteristics may be obtained by -! invoking this routine with the corresponding optional arguments. -! -! !INTERFACE: - - subroutine get_(Nav, iSeg, displ, count, lc, le) - -! !USES: - - use m_stdio, only : stderr - use m_die, only : die - - implicit none - -! !INPUT PARAMETERS: - - type(Navigator), intent(in) :: Nav - integer, intent(in) :: iSeg - -! !OUTPUT PARAMETERS: - - integer, optional, intent(out) :: displ - integer, optional, intent(out) :: count - integer, optional, intent(out) :: lc - integer, optional, intent(out) :: le - -! !REVISION HISTORY: -! 22May00 - Jing Guo initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::get_' - - - ! Argument sanity check: - - if(iSeg > msize_(Nav)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: FATAL -- Segment index out of Navigator table bounds, ', & - 'Size of Navigator table = ',msize_(Nav),' iSeg = ',iSeg - call die(myname_) - endif - - if(present(displ)) displ=Nav%displs(iSeg) - if(present(count)) count=Nav%counts(iSeg) - if(present(lc)) lc=Nav%displs(iSeg)+1 - if(present(le)) le=Nav%displs(iSeg)+Nav%counts(iSeg) - - end subroutine get_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ptr_displs_ - Returns Pointer to the displs(:) Component -! -! !DESCRIPTION: -! This pointer-valued query function returns a pointer to the -! {\em displacements} information (the displacement of the first element -! of each segment from the beginning of the vector) contained in the -! input {\tt Navigator} argument {\tt Nav}. It has four basic modes -! of behavior, depending on which (if any) of the optional input -! {\tt INTEGER} arguments {\tt lbnd} and {\tt ubnd} are supplied. -! \begin{enumerate} -! \item If neither {\tt lbnd} nor {\tt ubnd} is supplied, then -! {\tt ptr\_displs\_} returns a pointer to {\em all} the elements in -! the array {\tt Nav\%displs(:)}. -! \item If both {\tt lbnd} and {\tt ubnd} are supplied, then -! {\tt ptr\_displs\_} returns a pointer to the segment of the -! array {\tt Nav\%displs(lbnd:ubnd)}. -! \item If {\tt lbnd} is supplied but {\tt ubnd} is not, then -! {\tt ptr\_displs\_} returns a pointer to the segment of the -! array {\tt Nav\%displs(lbnd:msize)}, where {\tt msize} is the -! length of the array {\tt Nav\%displs(:)}. -! \item If {\tt lbnd} is not supplied but {\tt ubnd} is, then -! {\tt ptr\_displs\_} returns a pointer to the segment of the -! array {\tt Nav\%displs(1:ubnd)}. -! \end{enumerate} -! -! !INTERFACE: - - function ptr_displs_(Nav, lbnd, ubnd) - -! !USES: - - use m_stdio, only : stderr - use m_die, only : die - - implicit none - -! !INPUT PARAMETERS: - - type(Navigator), intent(in) :: Nav - integer, optional, intent(in) :: lbnd - integer, optional, intent(in) :: ubnd - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: ptr_displs_ - -! !REVISION HISTORY: -! 22May00 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ptr_displs_' - integer :: lc,le - - ! Argument sanity checks - - if(present(lbnd)) then - if(lbnd <= 0) then - write(stderr,'(3a,i8)') myname_, & - ':: FATAL -- illegal lower bound, which must be >= 1.', & - 'lbnd = ',lbnd - call die(myname_) - endif - endif - - if(present(ubnd)) then - if(ubnd > msize_(Nav)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: FATAL -- illegal upper bound, which must be <= msize(Nav).', & - 'msize(Nav) = ',msize_(Nav),' ubnd = ',ubnd - call die(myname_) - endif - endif - - if(present(lbnd) .and. present(ubnd)) then - if(lbnd > ubnd) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: FATAL -- upper bound, must be >= lower bound.', & - 'Lower bound lbnd = ',lbnd,' Upper bound ubnd = ',ubnd - call die(myname_) - endif - endif - - ! End argument sanity checks - - if(present(lbnd).or.present(ubnd)) then - lc=lbound(Nav%displs,1) - if(present(lbnd)) lc=lbnd - le=ubound(Nav%displs,1) - if(present(ubnd)) le=ubnd - ptr_displs_ => Nav%displs(lc:le) - else - le=Nav%NumSegments - ptr_displs_ => Nav%displs(1:le) - endif - - end function ptr_displs_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ptr_counts_ - Returns Pointer to counts(:) Component -! -! !DESCRIPTION: -! This pointer-valued query function returns a pointer to the -! {\em counts} information (that is, the number of elements in each -! of each segment the vector being navigated) contained in the -! input {\tt Navigator} argument {\tt Nav}. It has four basic modes -! of behavior, depending on which (if any) of the optional input -! {\tt INTEGER} arguments {\tt lbnd} and {\tt ubnd} are supplied. -! \begin{enumerate} -! \item If neither {\tt lbnd} nor {\tt ubnd} is supplied, then -! {\tt ptr\_counts\_} returns a pointer to {\em all} the elements in -! the array {\tt Nav\%counts(:)}. -! \item If both {\tt lbnd} and {\tt ubnd} are supplied, then -! {\tt ptr\_counts\_} returns a pointer to the segment of the -! array {\tt Nav\%counts(lbnd:ubnd)}. -! \item If {\tt lbnd} is supplied but {\tt ubnd} is not, then -! {\tt ptr\_counts\_} returns a pointer to the segment of the -! array {\tt Nav\%counts(lbnd:msize)}, where {\tt msize} is the -! length of the array {\tt Nav\%counts(:)}. -! \item If {\tt lbnd} is not supplied but {\tt ubnd} is, then -! {\tt ptr\_counts\_} returns a pointer to the segment of the -! array {\tt Nav\%counts(1:ubnd)}. -! \end{enumerate} -! -! !INTERFACE: - - function ptr_counts_(Nav, lbnd, ubnd) - -! !USES: - - use m_stdio, only : stderr - use m_die, only : die - - implicit none - -! !INPUT PARAMETERS: - - type(Navigator), intent(in) :: Nav - integer, optional, intent(in) :: lbnd - integer, optional, intent(in) :: ubnd - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: ptr_counts_ - -! !REVISION HISTORY: -! 22May00 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ptr_counts_' - integer :: lc,le - - ! Argument sanity checks - - if(present(lbnd)) then - if(lbnd <= 0) then - write(stderr,'(3a,i8)') myname_, & - ':: FATAL -- illegal lower bound, which must be >= 1.', & - 'lbnd = ',lbnd - call die(myname_) - endif - endif - - if(present(ubnd)) then - if(ubnd > msize_(Nav)) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: FATAL -- illegal upper bound, which must be <= msize(Nav).', & - 'msize(Nav) = ',msize_(Nav),' ubnd = ',ubnd - call die(myname_) - endif - endif - - if(present(lbnd) .and. present(ubnd)) then - if(lbnd > ubnd) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: FATAL -- upper bound, must be >= lower bound.', & - 'Lower bound lbnd = ',lbnd,' Upper bound ubnd = ',ubnd - call die(myname_) - endif - endif - - ! End argument sanity checks - - if(present(lbnd).or.present(ubnd)) then - lc=lbound(Nav%counts,1) - if(present(lbnd)) lc=lbnd - le=ubound(Nav%counts,1) - if(present(ubnd)) le=ubnd - ptr_counts_ => Nav%counts(lc:le) - else - le=Nav%NumSegments - ptr_counts_ => Nav%counts(1:le) - endif - - end function ptr_counts_ - - end module m_Navigator diff --git a/src/externals/mct/mct/m_Rearranger.F90 b/src/externals/mct/mct/m_Rearranger.F90 deleted file mode 100644 index b2c7af5f400..00000000000 --- a/src/externals/mct/mct/m_Rearranger.F90 +++ /dev/null @@ -1,1377 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_Rearranger -- Remaps an AttrVect within a group of processes -! -! !DESCRIPTION: -! This module provides routines and datatypes for rearranging data -! between two {\tt Attribute Vectors} defined on the same grid but -! with two different {\tt GlobalSegMaps}. ''Rearrange'' is a -! generalized form of a parallel matrix transpose. -! A parallel matrix transpose can take advantage of symmetry in the -! data movement algorithm. An MCT Rearranger makes no assumptions -! about symmetry. -! -! When data needs to move between two components and the components -! share any processors, use m\_Rearranger. If the components are on -! distinct sets of processors, use m\_Transfer. -! -! !SEE ALSO: -! m_Transfer -! -! -! !INTERFACE: - - module m_Rearranger - -! -! !USES: - - use m_Router, only : Router - - implicit none - - private ! except - -! !PUBLIC DATA MEMBERS: - - public :: Rearranger ! The class data structure - - type :: Rearranger -#ifdef SEQUENCE - sequence -#endif - private - type(Router) :: SendRouter - type(Router) :: RecvRouter - integer,dimension(:,:),pointer :: LocalPack - integer :: LocalSize - end type Rearranger - -! !PRIVATE DATA MEMBERS: - integer :: max_nprocs ! size of MPI_COMM_WORLD used for generation of - ! local automatic arrays - -! !PUBLIC MEMBER FUNCTIONS: - - public :: init ! creation method - - public :: rearrange ! the rearrange routine - - public :: clean ! destruction method - public :: print ! print out comm info - - interface init ; module procedure init_ ; end interface - interface Rearrange ; module procedure Rearrange_ ; end interface - interface clean ; module procedure clean_ ; end interface - interface print ; module procedure print_ ; end interface - -! !DEFINED PARAMETERS: - - integer,parameter :: DefaultTag = 500 - - -! !REVISION HISTORY: -! 31Jan02 - E.T. Ong - initial prototype -! 04Jun02 - E.T. Ong - changed local copy structure to -! LocalSize. Made myPid a global process in MCTWorld. -! 27Sep02 - R. Jacob - Remove SrcAVsize and TrgAVsize -! and use Router%lAvsize instead for sanity check. -! 25Jan08 - R. Jacob - Add ability to handle unordered -! gsmaps. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_Rearranger' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Init_ - Initialize a Rearranger -! -! !DESCRIPTION: -! This routine takes two {\tt GlobalSegMap} inputs, {\tt SourceGSMap} -! and {\tt TargetGSMap} and build a Rearranger {\tt OutRearranger} -! between them. {\tt myComm} is used for the internal communication. -! -! {\bf N.B.} The two {\tt GlolbalSegMap} inputs must be initialized so -! that the index values on a processor are in ascending order. -! -! !INTERFACE: - - subroutine init_(SourceGSMap,TargetGSMap,myComm,OutRearranger) - -! -! !USES: -! - - use m_MCTWorld, only : ThisMCTWorld - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GSMap_lsize => lsize - use m_GlobalSegMap, only : GSMap_increasing => increasing - use m_Router, only : Router - use m_Router, only : Router_init => init - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT PARAMETERS: -! - type(GlobalSegMap), intent(in) :: SourceGSMap, TargetGSMap - integer, intent(in) :: myComm - -! !OUTPUT PARAMETERS: -! - type(Rearranger), intent(out) :: OutRearranger - -! !REVISION HISTORY: -! 31Jan02 - E.T. Ong - initial prototype -! 20Mar02 - E.T. Ong - working code -! 05Jun02 - E.T. Ong - Use LocalPack -! 30Mar06 - P. Worley - added max_nprocs, -! used in communication optimizations in rearrange -!EOP ___________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::init_' - integer,dimension(:,:),pointer :: temp_seg_starts,temp_seg_lengths - integer,dimension(:),pointer :: temp_pe_list,temp_numsegs,temp_locsize - integer :: temp_maxsize,temp_nprocs,maxsegcount - integer :: procindex,nprocs,nseg,len,myPid - integer :: src_seg_start,src_seg_length,trg_seg_start,trg_seg_length - integer :: i,j,k,l,m,n,ier - logical :: SendingToMyself,ReceivingFromMyself - - - ! Initialize Router component of Rearranger - call Router_init(SourceGSMap,TargetGSMap,myComm,OutRearranger%SendRouter) - call Router_init(TargetGSMap,SourceGSMap,myComm,OutRearranger%RecvRouter) - - call MP_comm_size(MP_COMM_WORLD,max_nprocs,ier) - if(ier/=0) call MP_perr_die(myname_,'MP_comm_size',ier) - - ! SANITY CHECK: Make sure that if SendRouter is sending to self, then, - ! by definition, RecvRouter is also receiving from self. If this is not - ! true, then write to stderr and die. - - call MP_comm_rank(ThisMCTWorld%MCT_comm,myPid,ier) - if(ier/=0) call MP_perr_die(myname_,'MP_comm_rank',ier) - - SendingToMyself = .false. - ReceivingFromMyself = .false. - - do i=1,OutRearranger%SendRouter%nprocs - if(OutRearranger%SendRouter%pe_list(i) == myPid) then - SendingToMyself = .true. - endif - enddo - - do i=1,OutRearranger%RecvRouter%nprocs - if(OutRearranger%RecvRouter%pe_list(i) == myPid) then - ReceivingFromMyself = .true. - endif - enddo - - if( SendingToMyself.or.ReceivingFromMyself ) then - if( .not. (SendingToMyself.and.ReceivingFromMyself) ) then - call die(myname_,"SendRouter is not compatible with RecvRouter") - endif - endif - - - ! If not sending to nor receiving from own processor then initialize - ! the rearranger so that no local copy can be made. Then end the routine. - - if( .not. (SendingToMyself.or.ReceivingFromMyself) ) then - nullify(OutRearranger%LocalPack) - allocate(OutRearranger%LocalPack(0,0),stat=ier) - if(ier/=0) call die(myname_,'allocate(OutRearranger%LocalPack(0,0))',ier) - OutRearranger%LocalSize=0 - endif - - - ! Start the process of Router modification: Router information for - ! the local processor is extracted out and put into the local copy - ! structure- Rearranger%LocalPack. Router structures are then reassigned - ! to exclude the local copy information. - - - ! Operate on SendRouter and create local copy structures. - - if( SendingToMyself.and.ReceivingFromMyself ) then - - temp_nprocs = OutRearranger%SendRouter%nprocs-1 - maxsegcount = SIZE(OutRearranger%SendRouter%seg_starts,2) - - ! Allocate temporary Router structures to be used for modifying SendRouter - nullify(temp_seg_starts,temp_seg_lengths,temp_pe_list, & - temp_numsegs,temp_locsize) - allocate(temp_seg_starts(temp_nprocs,maxsegcount), & - temp_seg_lengths(temp_nprocs,maxsegcount), & - temp_pe_list(temp_nprocs), & - temp_numsegs(temp_nprocs), & - temp_locsize(temp_nprocs), stat=ier) - if(ier/=0) call die(myname_,'allocate(temp_seg_starts...)',ier) - - temp_maxsize=0 - procindex=0 - nullify(OutRearranger%LocalPack) - - ! Start assigning Rearranger copy structures and - ! non-local Router components - do i=1,OutRearranger%SendRouter%nprocs - - ! Gather local copy information - if(OutRearranger%SendRouter%pe_list(i) == myPid) then - - ! Allocate Rearranger copy structure - allocate(OutRearranger%LocalPack(2, & - OutRearranger%SendRouter%locsize(i)),stat=ier) - if(ier/=0) call die(myname_,'allocate(OutRearranger%LocalPack)',ier) - OutRearranger%LocalPack = 0 - - m=0 - do nseg = 1,OutRearranger%SendRouter%num_segs(i) - src_seg_start = OutRearranger%SendRouter%seg_starts(i,nseg) - src_seg_length = OutRearranger%SendRouter%seg_lengths(i,nseg)-1 - do len=0,src_seg_length - m=m+1 - OutRearranger%LocalPack(2,m) = src_seg_start+len - enddo - enddo - - else - - ! Gather non-local Router information - procindex = procindex+1 - temp_seg_starts(procindex,1:maxsegcount) = & - OutRearranger%SendRouter%seg_starts(i,1:maxsegcount) - temp_seg_lengths(procindex,1:maxsegcount) = & - OutRearranger%SendRouter%seg_lengths(i,1:maxsegcount) - temp_pe_list(procindex) = OutRearranger%SendRouter%pe_list(i) - temp_numsegs(procindex) = OutRearranger%SendRouter%num_segs(i) - temp_locsize(procindex) = OutRearranger%SendRouter%locsize(i) - temp_maxsize = max(temp_locsize(procindex),temp_maxsize) - - endif - - enddo - - ! Copy SendRouter components back in - - ! Deallocate existing SendRouter components - deallocate(OutRearranger%SendRouter%seg_starts,& - OutRearranger%SendRouter%seg_lengths, & - OutRearranger%SendRouter%pe_list, & - OutRearranger%SendRouter%num_segs, & - OutRearranger%SendRouter%locsize,stat=ier) - if(ier/=0) call die(myname_, & - 'deallocate(OutRearranger%SendRouter%seg_starts...)',ier) - - ! Re-allocate SendRouter components - allocate(OutRearranger%SendRouter%seg_starts(temp_nprocs,maxsegcount), & - OutRearranger%SendRouter%seg_lengths(temp_nprocs,maxsegcount), & - OutRearranger%SendRouter%pe_list(temp_nprocs), & - OutRearranger%SendRouter%num_segs(temp_nprocs), & - OutRearranger%SendRouter%locsize(temp_nprocs),stat=ier) - if(ier/=0) call die(myname_, & - 'allocate(OutRearranger%SendRouter%seg_starts...)',ier) - - ! Copy back in the spliced router information - OutRearranger%SendRouter%nprocs = temp_nprocs - OutRearranger%SendRouter%seg_starts(1:temp_nprocs,1:maxsegcount) = & - temp_seg_starts(1:temp_nprocs,1:maxsegcount) - OutRearranger%SendRouter%seg_lengths(1:temp_nprocs,1:maxsegcount) = & - temp_seg_lengths(1:temp_nprocs,1:maxsegcount) - OutRearranger%SendRouter%pe_list(1:temp_nprocs) = & - temp_pe_list(1:temp_nprocs) - OutRearranger%SendRouter%num_segs(1:temp_nprocs) = & - temp_numsegs(1:temp_nprocs) - OutRearranger%SendRouter%locsize(1:temp_nprocs) = & - temp_locsize(1:temp_nprocs) - OutRearranger%SendRouter%maxsize = temp_maxsize - - deallocate(temp_seg_starts,temp_seg_lengths,temp_pe_list, & - temp_numsegs,temp_locsize,stat=ier) - if(ier/=0) call die(myname_,'deallocate(temp_seg_starts...)',ier) - - - ! ::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - - ! Operate on RecvRouter and create local copy structures. - - temp_nprocs = OutRearranger%RecvRouter%nprocs-1 - maxsegcount = SIZE(OutRearranger%RecvRouter%seg_starts,2) - - ! Allocate temporary Router structures to be used for modifying RecvRouter - nullify(temp_seg_starts,temp_seg_lengths,temp_pe_list, & - temp_numsegs,temp_locsize) - allocate(temp_seg_starts(temp_nprocs,maxsegcount), & - temp_seg_lengths(temp_nprocs,maxsegcount), & - temp_pe_list(temp_nprocs),temp_numsegs(temp_nprocs), & - temp_locsize(temp_nprocs),stat=ier) - if(ier/=0) call die(myname_,'allocate(temp_seg_starts...)',ier) - - temp_maxsize=0 - procindex = 0 - - ! Start assigning Rearranger copy structures and - ! non-local Router components - do i=1,OutRearranger%RecvRouter%nprocs - - ! Gather local copy information - if(OutRearranger%RecvRouter%pe_list(i) == myPid) then - - ! Senity Check for Router%locsize - if( (SIZE(OutRearranger%LocalPack,2) /= & - OutRearranger%RecvRouter%locsize(i)) ) then - call die(myname_, & - 'Router Error: Local RecvRouter%locsize(myPid) /= & - & Local SendRouter%locsize(myPid)') - endif - - OutRearranger%LocalSize = OutRearranger%RecvRouter%locsize(i) - - m=0 - do nseg = 1,OutRearranger%RecvRouter%num_segs(i) - trg_seg_start = OutRearranger%RecvRouter%seg_starts(i,nseg) - trg_seg_length = OutRearranger%RecvRouter%seg_lengths(i,nseg)-1 - do len=0,trg_seg_length - m=m+1 - OutRearranger%LocalPack(1,m) = trg_seg_start+len - enddo - enddo - - else - - ! Gather non-local Router information - procindex = procindex+1 - temp_seg_starts(procindex,1:maxsegcount) = & - OutRearranger%RecvRouter%seg_starts(i,1:maxsegcount) - temp_seg_lengths(procindex,1:maxsegcount) = & - OutRearranger%RecvRouter%seg_lengths(i,1:maxsegcount) - temp_pe_list(procindex) = OutRearranger%RecvRouter%pe_list(i) - temp_numsegs(procindex) = OutRearranger%RecvRouter%num_segs(i) - temp_locsize(procindex) = OutRearranger%RecvRouter%locsize(i) - temp_maxsize = max(temp_locsize(procindex),temp_maxsize) - - endif - - enddo - - ! Copy RecvRouter components back in - - ! Deallocate existing SendRouter components - deallocate(OutRearranger%RecvRouter%seg_starts, & - OutRearranger%RecvRouter%seg_lengths, & - OutRearranger%RecvRouter%pe_list, & - OutRearranger%RecvRouter%num_segs, & - OutRearranger%RecvRouter%locsize,stat=ier) - if(ier/=0) call die(myname_, & - 'deallocate(OutRearranger%RecvRouter%seg_starts...)',ier) - - ! Re-allocate RecvRouter components - allocate(OutRearranger%RecvRouter%seg_starts(temp_nprocs,maxsegcount), & - OutRearranger%RecvRouter%seg_lengths(temp_nprocs,maxsegcount), & - OutRearranger%RecvRouter%pe_list(temp_nprocs), & - OutRearranger%RecvRouter%num_segs(temp_nprocs), & - OutRearranger%RecvRouter%locsize(temp_nprocs),stat=ier) - if(ier/=0) call die(myname_, & - 'allocate(OutRearranger%RecvRouter%seg_starts...)',ier) - - ! Copy back in the spliced router information - OutRearranger%RecvRouter%nprocs = temp_nprocs - OutRearranger%RecvRouter%seg_starts(1:temp_nprocs,1:maxsegcount) = & - temp_seg_starts(1:temp_nprocs,1:maxsegcount) - OutRearranger%RecvRouter%seg_lengths(1:temp_nprocs,1:maxsegcount) = & - temp_seg_lengths(1:temp_nprocs,1:maxsegcount) - OutRearranger%RecvRouter%pe_list(1:temp_nprocs) = & - temp_pe_list(1:temp_nprocs) - OutRearranger%RecvRouter%num_segs(1:temp_nprocs) = & - temp_numsegs(1:temp_nprocs) - OutRearranger%RecvRouter%locsize(1:temp_nprocs) = & - temp_locsize(1:temp_nprocs) - OutRearranger%RecvRouter%maxsize = temp_maxsize - - deallocate(temp_seg_starts,temp_seg_lengths,temp_pe_list, & - temp_numsegs,temp_locsize,stat=ier) - if(ier/=0) call die(myname_,'deallocate(temp_seg_starts...)',ier) - - endif - - end subroutine init_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Clean a Rearranger -! -! !DESCRIPTION: -! This routine deallocates allocated memory associated with the -! input/output {\tt Rearranger} argument {\tt ReArr}. The success -! (failure) of this operation is reported in the zero (nonzero) value of -! the optional output {\tt INTEGER} argument {\tt status}. -! -! !INTERFACE: - - subroutine clean_(ReArr, status) - -! -! !USES: -! - use m_Router,only : Router - use m_Router,only : Router_clean => clean - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(Rearranger), intent(inout) :: ReArr - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 31Jan02 - E.T. Ong - initial prototype -! 20Mar02 - E.T. Ong - working code -!EOP ___________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - ! Set output status flag (if present) to zero, which assumes - ! success. - - if(present(status)) status = 0 - - ! Clean up send and receive Routers: - - call Router_clean(ReArr%SendRouter,ier) - if(ier /= 0) then - if(present(status)) then - status = ier - return - else - write(stderr,'(2a,i8)') myname_, & - ':: ERROR--Router_clean(ReArr%SendRouter) failed with ier=',ier - endif - endif - - call Router_clean(ReArr%RecvRouter,ier) - if(ier /= 0) then - if(present(status)) then - status = ier - return - else - write(stderr,'(2a,i8)') myname_, & - ':: ERROR--Router_clean(ReArr%RecvRouter) failed with ier=',ier - endif - endif - - ! Clean up Local on-PE copy buffer: - - if(associated(ReArr%LocalPack)) then - deallocate(ReArr%LocalPack, stat=ier) - if(ier /= 0) then - if(present(status)) then - status=ier - else - write(stderr,'(2a,i8)') myname_, & - ':: ERROR--deallocate(ReArr%LocalPack) failed with stat=',ier - endif - endif - endif - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rearrange_ - Rearrange data between two Attribute Vectors -! -! !DESCRIPTION: -! This subroutine will take data in the {\tt SourceAv} Attribute -! Vector and rearrange it to match the GlobalSegMap used to define -! the {\tt TargetAv} Attribute Vector using the Rearrnger -! {\tt InRearranger}. -! -! The optional argument {\tt Tag} can be used to set the tag value used in -! the rearrangement. DefaultTag will be used otherwise. -! -! If the optional argument {\tt Sum} is present and true, data for the same -! physical point coming from two or more processes will be summed. -! Otherwise, data is overwritten. -! -! If the optional argument {\tt Vector} is present and true, -! vector architecture-friendly parts of this routine will be invoked. -! -! If the optional argument {\tt AlltoAll} is present and true, -! the communication will be done with an alltoall call instead of -! individual sends and receives. -! -! The size of the {\tt SourceAv} and {\tt TargetAv} -! argument must match those stored in the {\tt InRearranger} or -! and error will result. -! -! {\bf N.B.:} {\tt SourceAv} and {\tt TargetAv} are -! assumed to have exactly the same attributes -! in exactly the same order. -! -! !INTERFACE: - - subroutine rearrange_(SourceAVin,TargetAV,InRearranger,Tag,Sum,& - Vector,AlltoAll,HandShake,ISend,MaxReq) - -! -! !USES: -! - - use m_MCTWorld,only :MCTWorld - use m_MCTWorld,only :ThisMCTWorld - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_copy => copy - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : nIAttr,nRAttr - use m_AttrVect, only : Permute,Unpermute - use m_Router, only : Router - use m_SPMDutils, only : m_swapm_int, m_swapm_FP - use m_realkinds, only : FP - use m_mpif90 - use m_die - use m_stdio - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(AttrVect), intent(inout) :: TargetAV - -! !INPUT PARAMETERS: -! - type(AttrVect), target, intent(in) :: SourceAVin - type(Rearranger), target, intent(in) :: InRearranger - integer, optional, intent(in) :: Tag - logical, optional, intent(in) :: Sum - logical, optional, intent(in) :: Vector - logical, optional, intent(in) :: AlltoAll - logical, optional, intent(in) :: HandShake - logical, optional, intent(in) :: ISend - integer, optional, intent(in) :: MaxReq - -! !REVISION HISTORY: -! 31Jan02 - E.T. Ong - initial prototype -! 20Mar02 - E.T. Ong - working code -! 08Jul02 - E.T. Ong - change intent of Target,Source -! 29Oct03 - R. Jacob - add optional argument vector -! to control use of vector-friendly mods provided by Fujitsu. -! 30Mar06 - P. Worley - added alltoall option and -! reordered send/receive order to improve communication -! performance. Also remove replace allocated arrays with -! automatic. -! 14Oct06 - R. Jacob - check value of Sum argument. -! 25Jan08 - R. Jacob - Permute/unpermute if the internal -! routers permarr is defined. -! 29Sep16 - P. Worley - added swapm variant of -! alltoall option -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Rearrange_' - integer :: numi,numr,i,j,k,ier - integer :: VectIndex,AttrIndex,seg_start,seg_end - integer :: localindex,SrcVectIndex,TrgVectIndex,IAttrIndex,RAttrIndex - integer :: proc,numprocs,nseg,pe,pe_shift,max_pe,myPid - integer :: mp_Type_rp - integer :: mytag - integer :: ISendSize, RSendSize, IRecvSize, RRecvSize - logical :: usevector, usealltoall, useswapm - logical :: DoSum - logical :: Sendunordered - logical :: Recvunordered - real(FP):: realtyp -!----------------------------------------------------------------------- - - ! DECLARE STRUCTURES FOR MPI ARGUMENTS. - - ! declare arrays mapping from all processes to those sending to - ! or receiving from - integer :: SendList(0:max_nprocs-1) - integer :: RecvList(0:max_nprocs-1) - - ! declare arrays to hold count and locations where data is to be sent from - integer :: ISendLoc(max_nprocs) - integer :: RSendLoc(max_nprocs) - - integer :: ISendCnts(0:max_nprocs-1) - integer :: RSendCnts(0:max_nprocs-1) - - integer :: ISdispls(0:max_nprocs-1) - integer :: RSdispls(0:max_nprocs-1) - - ! declare arrays to hold data to be sent - integer,dimension(:),allocatable :: ISendBuf - real(FP),dimension(:),allocatable :: RSendBuf - - ! declare arrays to hold count and locations where data is to be received into - integer :: IRecvLoc(max_nprocs) - integer :: RRecvLoc(max_nprocs) - - integer :: IRecvCnts(0:max_nprocs-1) - integer :: RRecvCnts(0:max_nprocs-1) - - integer :: IRdispls(0:max_nprocs-1) - integer :: RRdispls(0:max_nprocs-1) - - ! declare arrays to hold data to be received - integer,dimension(:),allocatable :: IRecvBuf - real(FP),dimension(:),allocatable :: RRecvBuf - - ! declare arrays to hold MPI data types for m_swapm_XXX calls - integer :: ITypes(0:max_nprocs-1) - integer :: RTypes(0:max_nprocs-1) - - ! Structure to hold MPI request information for sends - integer :: send_ireqs(max_nprocs) - integer :: send_rreqs(max_nprocs) - - ! Structure to hold MPI request information for sends - integer :: recv_ireqs(max_nprocs) - integer :: recv_rreqs(max_nprocs) - - ! Structure to hold MPI status information for sends - integer :: send_istatus(MP_STATUS_SIZE,max_nprocs) - integer :: send_rstatus(MP_STATUS_SIZE,max_nprocs) - - ! Structure to hold MPI status information for sends - integer :: recv_istatus(MP_STATUS_SIZE,max_nprocs) - integer :: recv_rstatus(MP_STATUS_SIZE,max_nprocs) - - ! Pointer structure to make Router access simpler - type(Router), pointer :: SendRout, RecvRout - type(AttrVect),pointer :: SourceAv - type(AttrVect),target :: SourceAvtmp - - ! local swapm protocol variables and defaults - logical,parameter :: DEF_SWAPM_HS = .true. - logical swapm_hs - - logical,parameter :: DEF_SWAPM_ISEND = .false. - logical swapm_isend - - integer,parameter :: DEF_SWAPM_MAXREQ = 512 - integer swapm_maxreq - -!:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - Sendunordered=associated(InRearranger%SendRouter%permarr) - Recvunordered=associated(InRearranger%RecvRouter%permarr) - - if(Sendunordered) then - call AttrVect_init(SourceAvtmp,SourceAvin,AttrVect_lsize(SourceAvin)) - call AttrVect_copy(SourceAvin, SourceAvtmp) - call Permute(SourceAvtmp,InRearranger%SendRouter%permarr) - SourceAv => SourceAvtmp - else - SourceAv => SourceAvin - endif - - if(Recvunordered) call Permute(TargetAv,InRearranger%RecvRouter%permarr) - - ! CHECK ARGUMENTS - - ! Check the size of the Source AttrVect - if(InRearranger%SendRouter%lAvsize /= AttrVect_lsize(SourceAV)) then - call warn(myname_,"SourceAV size is not appropriate for this Rearranger") - call die(myname_,"InRearranger%SendRouter%lAvsize",InRearranger%SendRouter%lAvsize, & - "AttrVect_lsize(SourceAV)", AttrVect_lsize(SourceAV)) - endif - - ! Check the size of the Target AttrVect - if(InRearranger%RecvRouter%lAvsize /= AttrVect_lsize(TargetAV)) then - call warn(myname_,"TargetAV size is not appropriate for this Rearranger") - call die(myname_,"InRearranger%RecvRouter%lAvsize",InRearranger%RecvRouter%lAvsize, & - "AttrVect_lsize(TargetAV)", AttrVect_lsize(TargetAV)) - endif - - ! Check the number of integer attributes - if(nIAttr(SourceAV) /= nIAttr(TargetAV)) then - call warn(myname_, & - "Number of attributes in SourceAV and TargetAV do not match") - call die(myname_,"nIAttr(SourceAV)", nIAttr(SourceAV), & - "nIAttr(TargetAV)", nIAttr(TargetAV)) - endif - - ! Check the number of real attributes - if(nRAttr(SourceAV) /= nRAttr(TargetAV)) then - call warn(myname_, & - "Number of attributes in SourceAV and TargetAV do not match") - call die(myname_,"nRAttr(SourceAV)", nRAttr(SourceAV), & - "nRAttr(TargetAV)", nRAttr(TargetAV)) - endif - - usevector=.false. - if(present(Vector)) then - if(Vector) usevector=.true. - endif - - usealltoall=.false. - if(present(AlltoAll)) then - if(AlltoAll) usealltoall=.true. - endif - - useswapm=.false. - if (usealltoall) then - ! if any swapm-related optional parameters are present, - ! enable swapm variant of alltoall - - swapm_hs = DEF_SWAPM_HS - if(present(HandShake)) then - if(HandShake) swapm_hs=.true. - useswapm=.true. - endif - - swapm_isend = DEF_SWAPM_ISEND - if(present(ISend)) then - if(ISend) swapm_isend=.true. - useswapm=.true. - endif - - swapm_maxreq = DEF_SWAPM_MAXREQ - if(present(MaxReq)) then - swapm_maxreq=MaxReq - useswapm=.true. - endif - - endif - - DoSum=.false. - if(present(Sum)) then - if(Sum) DoSum=.true. - endif - - ! ASSIGN VARIABLES - - - ! Get the number of integer and real attributes - numi = nIAttr(SourceAV) - numr = nRAttr(SourceAV) - - ! Assign the pointers - nullify(SendRout,RecvRout) - SendRout => InRearranger%SendRouter - RecvRout => InRearranger%RecvRouter - - mp_Type_rp=MP_Type(realtyp) - -!:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! ALLOCATE DATA STRUCTURES ! - - ! IF SENDING OR RECEIVING INTEGER DATA - if (numi .ge. 1) then - - ! allocate buffer to hold all outgoing data - ISendSize = 1 - do proc=1,SendRout%nprocs - ISendLoc(proc) = ISendSize - ISendSize = ISendSize + SendRout%locsize(proc)*numi - enddo - if (ISendSize > 1) ISendSize = ISendSize - 1 - allocate(ISendBuf(ISendSize),stat=ier) - if (ier/=0) call die(myname_,'allocate(ISendBuf)',ier) - - ! allocate buffer to hold all incoming data - IRecvSize = 1 - do proc=1,RecvRout%nprocs - IRecvLoc(proc) = IRecvSize - IRecvSize = IRecvSize + RecvRout%locsize(proc)*numi - enddo - if (IRecvSize > 1) IRecvSize = IRecvSize - 1 - allocate(IRecvBuf(IRecvSize),stat=ier) - if(ier/=0) call die(myname_,'allocate(IRecvBuf)',ier) - - endif - - ! IF SENDING OR RECEIVING REAL DATA - if (numr .ge. 1) then - - ! allocate buffer to hold all outgoing data - RSendSize = 1 - do proc=1,SendRout%nprocs - RSendLoc(proc) = RSendSize - RSendSize = RSendSize + SendRout%locsize(proc)*numr - enddo - if (RSendSize > 1) RSendSize = RSendSize - 1 - allocate(RSendBuf(RSendSize),stat=ier) - if (ier/=0) call die(myname_,'allocate(RSendBuf)',ier) - - ! allocate buffer to hold all incoming data - RRecvSize = 1 - do proc=1,RecvRout%nprocs - RRecvLoc(proc) = RRecvSize - RRecvSize = RRecvSize + RecvRout%locsize(proc)*numr - enddo - if (RRecvSize > 1) RRecvSize = RRecvSize - 1 - allocate(RRecvBuf(RRecvSize),stat=ier) - if(ier/=0) call die(myname_,'allocate(RRecvBuf)',ier) - - endif - -!:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! INVERT PE LIST ! - call MP_comm_rank(ThisMCTWorld%MCT_comm,myPid,ier) - if(ier/=0) call MP_perr_die(myname_,'MP_comm_rank',ier) - - call MP_comm_size(ThisMCTWorld%MCT_comm, max_pe, ier) - if(ier/=0) call MP_perr_die(myname_,'MP_comm_size',ier) - - SendList(:) = -1 - do proc = 1,SendRout%nprocs - SendList(SendRout%pe_list(proc)) = proc - enddo - - RecvList(:) = -1 - do proc = 1,RecvRout%nprocs - RecvList(RecvRout%pe_list(proc)) = proc - enddo - - if (usealltoall) then - ! CONSTRUCT CNTS AND DISPLS FOR ALLTOALLV ! - ISendCnts(:) = 0 - ISdispls(:) = 0 - RSendCnts(:) = 0 - RSdispls(:) = 0 - IRecvCnts(:) = 0 - IRdispls(:) = 0 - RRecvCnts(:) = 0 - RRdispls(:) = 0 - do pe = 0,max_pe-1 - proc = SendList(pe) - if (proc .ne. -1) then - ISendCnts(pe) = SendRout%locsize(proc)*numi - ISdispls(pe) = ISendLoc(proc) - 1 - - RSendCnts(pe) = SendRout%locsize(proc)*numr - RSdispls(pe) = RSendLoc(proc) - 1 - endif - - proc = RecvList(pe) - if (proc .ne. -1) then - IRecvCnts(pe) = RecvRout%locsize(proc)*numi - IRdispls(pe) = IRecvLoc(proc) - 1 - - RRecvCnts(pe) = RecvRout%locsize(proc)*numr - RRdispls(pe) = RRecvLoc(proc) - 1 - endif - enddo - - ! SET MPI DATA TYPES - ITypes(:) = MP_INTEGER - RTypes(:) = mp_Type_rp - endif - -!:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -if (usealltoall) then - - ! Load data going to each processor - do proc = 1,SendRout%nprocs - j=0 - k=0 - - ! load the correct pieces of the integer and real vectors - do nseg = 1,SendRout%num_segs(proc) - seg_start = SendRout%seg_starts(proc,nseg) - seg_end = seg_start + SendRout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,numi - ISendBuf(ISendLoc(proc)+j) = SourceAV%iAttr(AttrIndex,VectIndex) - j=j+1 - enddo - do AttrIndex = 1,numr - RSendBuf(RSendLoc(proc)+k) = SourceAV%rAttr(AttrIndex,VectIndex) - k=k+1 - enddo - enddo - enddo - enddo - -else - ! POST MPI_IRECV - - ! Load data coming from each processor - do pe_shift = 1,max_pe - proc = RecvList(mod(myPid+pe_shift,max_pe)) - if (proc .ne. -1) then - - ! receive the integer data - if(numi .ge. 1) then - - ! set tag - mytag = DefaultTag - if(present(Tag)) mytag=Tag - - if( (RecvRout%num_segs(proc) > 1) .or. DoSum ) then - - call MPI_IRECV(IRecvBuf(IRecvLoc(proc)), & - RecvRout%locsize(proc)*numi,MP_INTEGER, & - RecvRout%pe_list(proc),mytag, & - ThisMCTWorld%MCT_comm,recv_ireqs(proc),ier) - - else - - call MPI_IRECV(TargetAV%iAttr(1,RecvRout%seg_starts(proc,1)), & - RecvRout%locsize(proc)*numi,MP_INTEGER, & - RecvRout%pe_list(proc),mytag, & - ThisMCTWorld%MCT_comm,recv_ireqs(proc),ier) - - endif - - if(ier /= 0) call MP_perr_die(myname_,'MPI_IRECV(ints)',ier) - - endif - - ! receive the real data - if(numr .ge. 1) then - - ! set tag - mytag = DefaultTag + 1 - if(present(Tag)) mytag=Tag +1 - - if( (RecvRout%num_segs(proc) > 1) .or. DoSum ) then - - call MPI_IRECV(RRecvBuf(RRecvLoc(proc)), & - RecvRout%locsize(proc)*numr,mp_Type_rp, & - RecvRout%pe_list(proc),mytag, & - ThisMCTWorld%MCT_comm,recv_rreqs(proc),ier) - - else - - call MPI_IRECV(TargetAV%rAttr(1,RecvRout%seg_starts(proc,1)), & - RecvRout%locsize(proc)*numr,mp_Type_rp, & - RecvRout%pe_list(proc),mytag, & - ThisMCTWorld%MCT_comm,recv_rreqs(proc),ier) - - endif - - if(ier /= 0) call MP_perr_die(myname_,'MPI_IRECV(reals)',ier) - - endif - endif - enddo - -!:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! POST MPI_ISEND - - ! Load data going to each processor - do pe_shift = max_pe,1,-1 - proc = SendList(mod(myPid+pe_shift,max_pe)) - if (proc .ne. -1) then - - if( SendRout%num_segs(proc) > 1 ) then - - j=0 - k=0 - - ! load the correct pieces of the integer and real vectors - do nseg = 1,SendRout%num_segs(proc) - seg_start = SendRout%seg_starts(proc,nseg) - seg_end = seg_start + SendRout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,numi - ISendBuf(ISendLoc(proc)+j) = SourceAV%iAttr(AttrIndex,VectIndex) - j=j+1 - enddo - do AttrIndex = 1,numr - RSendBuf(RSendLoc(proc)+k) = SourceAV%rAttr(AttrIndex,VectIndex) - k=k+1 - enddo - enddo - enddo - - endif - - ! send the integer data - if(numi .ge. 1) then - - ! set tag - mytag = DefaultTag - if(present(Tag)) mytag=Tag - - if( SendRout%num_segs(proc) > 1 ) then - - call MPI_ISEND(ISendBuf(ISendLoc(proc)), & - SendRout%locsize(proc)*numi,MP_INTEGER, & - SendRout%pe_list(proc),mytag, & - ThisMCTWorld%MCT_comm,send_ireqs(proc),ier) - - else - - call MPI_ISEND(SourceAV%iAttr(1,SendRout%seg_starts(proc,1)), & - SendRout%locsize(proc)*numi,MP_INTEGER, & - SendRout%pe_list(proc),mytag, & - ThisMCTWorld%MCT_comm,send_ireqs(proc),ier) - - endif - - if(ier /= 0) call MP_perr_die(myname_,'MPI_ISEND(ints)',ier) - - endif - - ! send the real data - if(numr .ge. 1) then - - ! set tag - mytag = DefaultTag +1 - if(present(Tag)) mytag=Tag +1 - - if( SendRout%num_segs(proc) > 1 ) then - - call MPI_ISEND(RSendBuf(RSendLoc(proc)), & - SendRout%locsize(proc)*numr,mp_Type_rp, & - SendRout%pe_list(proc),mytag, & - ThisMCTWorld%MCT_comm,send_rreqs(proc),ier) - - else - - call MPI_ISEND(SourceAV%rAttr(1,SendRout%seg_starts(proc,1)), & - SendRout%locsize(proc)*numr,mp_Type_rp, & - SendRout%pe_list(proc),mytag, & - ThisMCTWorld%MCT_comm,send_rreqs(proc),ier) - - endif - - if(ier /= 0) call MP_perr_die(myname_,'MPI_ISEND(reals)',ier) - - endif - endif - enddo -endif ! end of else for if(usealltoall) -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! ZERO TARGETAV WHILE WAITING FOR MESSAGES TO COMPLETE - - if(DoSum) call AttrVect_zero(TargetAV) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! LOAD THE LOCAL PIECES OF THE INTEGER AND REAL VECTOR - - if(usevector) then -!$OMP PARALLEL DO PRIVATE(IAttrIndex,localindex,TrgVectIndex,SrcVectIndex) - do IAttrIndex=1,numi -!CDIR SELECT(VECTOR) -!DIR$ CONCURRENT -!DIR$ PREFERVECTOR - do localindex=1,InRearranger%LocalSize - TrgVectIndex = InRearranger%LocalPack(1,localindex) - SrcVectIndex = InRearranger%LocalPack(2,localindex) - TargetAV%iAttr(IAttrIndex,TrgVectIndex) = & - SourceAV%iAttr(IAttrIndex,SrcVectIndex) - enddo - enddo -!$OMP PARALLEL DO PRIVATE(RAttrIndex,localindex,TrgVectIndex,SrcVectIndex) - do RAttrIndex=1,numr -!CDIR SELECT(VECTOR) -!DIR$ CONCURRENT -!DIR$ PREFERVECTOR - do localindex=1,InRearranger%LocalSize - TrgVectIndex = InRearranger%LocalPack(1,localindex) - SrcVectIndex = InRearranger%LocalPack(2,localindex) - TargetAV%rAttr(RAttrIndex,TrgVectIndex) = & - SourceAV%rAttr(RAttrIndex,SrcVectIndex) - enddo - enddo - - else -!$OMP PARALLEL DO PRIVATE(localindex,TrgVectIndex,SrcVectIndex,IAttrIndex,RAttrIndex) - do localindex=1,InRearranger%LocalSize - TrgVectIndex = InRearranger%LocalPack(1,localindex) - SrcVectIndex = InRearranger%LocalPack(2,localindex) - do IAttrIndex=1,numi - TargetAV%iAttr(IAttrIndex,TrgVectIndex) = & - SourceAV%iAttr(IAttrIndex,SrcVectIndex) - enddo - do RAttrIndex=1,numr - TargetAV%rAttr(RAttrIndex,TrgVectIndex) = & - SourceAV%rAttr(RAttrIndex,SrcVectIndex) - enddo - enddo - endif - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - -if (usealltoall) then - - if (useswapm) then - - if (numi .ge. 1) then - call m_swapm_int(max_pe, myPid, & - ISendBuf, ISendSize, ISendCnts, ISdispls, ITypes, & - IRecvBuf, IRecvSize, IRecvCnts, IRdispls, ITypes, & - ThisMCTWorld%MCT_comm, & - swapm_hs, swapm_isend, swapm_maxreq ) - endif - - if (numr .ge. 1) then - call m_swapm_FP (max_pe, myPid, & - RSendBuf, RSendSize, RSendCnts, RSdispls, RTypes, & - RRecvBuf, RRecvSize, RRecvCnts, RRdispls, RTypes, & - ThisMCTWorld%MCT_comm, & - swapm_hs, swapm_isend, swapm_maxreq ) - endif - - else - - if (numi .ge. 1) then - call MPI_Alltoallv(ISendBuf, ISendCnts, ISdispls, MP_INTEGER, & - IRecvBuf, IRecvCnts, IRdispls, MP_INTEGER, & - ThisMCTWorld%MCT_comm,ier) - endif - - if (numr .ge. 1) then - call MPI_Alltoallv(RSendBuf, RSendCnts, RSdispls, mp_Type_rp, & - RRecvBuf, RRecvCnts, RRdispls, mp_Type_rp, & - ThisMCTWorld%MCT_comm,ier) - endif - - endif - -else - - ! WAIT FOR THE NONBLOCKING SENDS TO COMPLETE - - if(SendRout%nprocs > 0) then - - if(numi .ge. 1) then - - call MPI_WAITALL(SendRout%nprocs,send_ireqs,send_istatus,ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITALL(ints)',ier) - - endif - - if(numr .ge. 1) then - - call MPI_WAITALL(SendRout%nprocs,send_rreqs,send_rstatus,ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITALL(reals)',ier) - - endif - - endif - -endif -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! WAIT FOR THE NONBLOCKING RECEIVES TO COMPLETE AND UNPACK BUFFER - - do numprocs = 1,RecvRout%nprocs - - if(numi .ge. 1) then - -if (usealltoall) then - proc = numprocs -else - if(DoSum) then - proc = numprocs - call MPI_WAIT(recv_ireqs(proc),recv_istatus,ier) - else - call MPI_WAITANY(RecvRout%nprocs,recv_ireqs,proc,recv_istatus,ier) - endif -endif - - if(DoSum) then - - ! load the correct pieces of the integer vectors - j=0 - do nseg = 1,RecvRout%num_segs(proc) - seg_start = RecvRout%seg_starts(proc,nseg) - seg_end = seg_start + RecvRout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,numi - TargetAV%iAttr(AttrIndex,VectIndex)= & - TargetAV%iAttr(AttrIndex,VectIndex) + IRecvBuf(IRecvLoc(proc)+j) - j=j+1 - enddo - enddo - enddo - - else - - if (( RecvRout%num_segs(proc) > 1 ) .or. (usealltoall)) then - - ! load the correct pieces of the integer vectors - j=0 - do nseg = 1,RecvRout%num_segs(proc) - seg_start = RecvRout%seg_starts(proc,nseg) - seg_end = seg_start + RecvRout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,numi - TargetAV%iAttr(AttrIndex,VectIndex)=IRecvBuf(IRecvLoc(proc)+j) - j=j+1 - enddo - enddo - enddo - - endif - - endif ! end of if DoSum - - endif ! end of in numi>1 - - if(numr .ge. 1) then - -if (usealltoall) then - proc = numprocs -else - if(DoSum) then - proc = numprocs - call MPI_WAIT(recv_rreqs(proc),recv_rstatus,ier) - else - call MPI_WAITANY(RecvRout%nprocs,recv_rreqs,proc,recv_rstatus,ier) - endif -endif - - if(DoSum) then - - ! load the correct pieces of the integer vectors - k=0 - do nseg = 1,RecvRout%num_segs(proc) - seg_start = RecvRout%seg_starts(proc,nseg) - seg_end = seg_start + RecvRout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,numr - TargetAV%rAttr(AttrIndex,VectIndex) = & - TargetAV%rAttr(AttrIndex,VectIndex) + RRecvBuf(RRecvLoc(proc)+k) - k=k+1 - enddo - enddo - enddo - - else - - if (( RecvRout%num_segs(proc) > 1 ) .or. (usealltoall)) then - - ! load the correct pieces of the integer vectors - k=0 - do nseg = 1,RecvRout%num_segs(proc) - seg_start = RecvRout%seg_starts(proc,nseg) - seg_end = seg_start + RecvRout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,numr - TargetAV%rAttr(AttrIndex,VectIndex)=RRecvBuf(RRecvLoc(proc)+k) - k=k+1 - enddo - enddo - enddo - - endif - - endif ! end if DoSum - - endif ! endif if numr>1 - - enddo - - if(Sendunordered) then - call AttrVect_clean(SourceAvtmp) - nullify(SourceAv) - else - nullify(SourceAv) - endif - - if(Recvunordered) call Unpermute(TargetAv,RecvRout%permarr) - -!:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! DEALLOCATE ALL STRUCTURES - - if (numi .ge. 1) then - - ! Deallocate the send buffer - deallocate(ISendBuf,stat=ier) - if(ier/=0) call die(myname_,'deallocate(ISendBuf)',ier) - - ! Deallocate the receive buffer - deallocate(IRecvBuf,stat=ier) - if(ier/=0) call die(myname_,'deallocate(IRecvBuf)',ier) - - endif - - if (numr .ge. 1) then - - ! Deallocate the send buffer - deallocate(RSendBuf,stat=ier) - if(ier/=0) call die(myname_,'deallocate(RSendBuf)',ier) - - ! Deallocate the receive buffer - deallocate(RRecvBuf,stat=ier) - if(ier/=0) call die(myname_,'deallocate(RRecvBuf)',ier) - - endif - - nullify(SendRout,RecvRout) - - end subroutine rearrange_ - - - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: print_ - Print rearranger communication info -! -! !DESCRIPTION: -! Print out communication info for both routers in a -! rearranger. Print out on unit number 'lun' -! e.g. (source,destination,length) -! -! !INTERFACE: - - subroutine print_(rearr,mycomm,lun) -! -! !USES: -! - use m_die - use m_Router, only: router_print => print - - implicit none - -!INPUT/OUTPUT PARAMETERS: - type(Rearranger), intent(in) :: rearr - integer, intent(in) :: mycomm - integer, intent(in) :: lun - -! !REVISION HISTORY: -! 27Jul07 - R. Loy initial version -!EOP ___________________________________________________________________ - - - call router_print(rearr%SendRouter,mycomm,lun) - call router_print(rearr%RecvRouter,mycomm,lun) - - end subroutine print_ - - -end module m_Rearranger - - - - - diff --git a/src/externals/mct/mct/m_Router.F90 b/src/externals/mct/mct/m_Router.F90 deleted file mode 100644 index f8788d608f7..00000000000 --- a/src/externals/mct/mct/m_Router.F90 +++ /dev/null @@ -1,869 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_Router -- Router class -! -! !DESCRIPTION: -! The Router data type contains all the information needed -! to send an AttrVect between a component on M MPI-processes and a component -! on N MPI-processes. This module defines the Router datatype and provides -! methods to create and destroy one. -! -! !INTERFACE: - - module m_Router - - use m_realkinds, only : FP - use m_zeit - - implicit none - - private ! except - -! !declare a private pointer structure for the real data - type :: rptr -#ifdef SEQUENCE - sequence -#endif - real(FP),dimension(:),pointer :: pr - end type - -! !declare a private pointer structure for the integer data - type :: iptr -#ifdef SEQUENCE - sequence -#endif - integer,dimension(:),pointer :: pi - end type - -! !PUBLIC TYPES: - public :: Router ! The class data structure - - public :: rptr,iptr ! pointer types used in Router -!\end{verbatim} -!% On return, pe_list is the processor ranks of the other -!% component to receive from/send to. num_segs is the -!% number of segments out of my local AttrVect which must -!% be sent/received. (In general, these wont coincide exactly -!% with the segments used to define the GlobalMap) -!% seg_start is the start *in the local AttrVect* of each segment -!% (start goes from 1 to lsize(GSMap)) -!% and seg_lengths is the length. -!\begin{verbatim} - - type Router -#ifdef SEQUENCE - sequence -#endif - integer :: comp1id ! myid - integer :: comp2id ! id of second component - integer :: nprocs ! number of procs to talk to - integer :: maxsize ! maximum amount of data going to a processor - integer :: lAvsize ! The local size of AttrVect which can be - ! used with this Router in MCT_Send/MCT_Recv - integer :: numiatt ! Number of integer attributes currently in use - integer :: numratt ! Number of real attributes currently in use - integer,dimension(:),pointer :: pe_list ! processor ranks of send/receive in MCT_comm - integer,dimension(:),pointer :: num_segs ! number of segments to send/receive - integer,dimension(:),pointer :: locsize ! total of seg_lengths for a proc - integer,dimension(:),pointer :: permarr ! possible permutation array - integer,dimension(:,:),pointer :: seg_starts ! starting index - integer,dimension(:,:),pointer :: seg_lengths! total length - type(rptr),dimension(:),pointer :: rp1 ! buffer to hold real data - type(iptr),dimension(:),pointer :: ip1 ! buffer to hold integer data - integer,dimension(:),pointer :: ireqs,rreqs ! buffer for MPI_Requests - integer,dimension(:,:),pointer :: istatus,rstatus ! buffer for MPI_Status - end type Router - -! !PUBLIC MEMBER FUNCTIONS: - public :: init ! Create a Router - public :: clean ! Destroy a Router - public :: print ! Print info about a Router - - - interface init ; module procedure & - initd_, & ! initialize a Router between two seperate components - initp_ ! initialize a Router locally with two GSMaps - end interface - interface clean ; module procedure clean_ ; end interface - interface print ; module procedure print_ ; end interface - -! !REVISION HISTORY: -! 15Jan01 - R. Jacob - initial prototype -! 08Feb01 - R. Jacob add locsize and maxsize -! to Router type -! 25Sep02 - R. Jacob Remove type string. Add lAvsize -! 23Jul03 - R. Jacob Add status and reqs arrays used -! in send/recv to the Router datatype. -! 24Jul03 - R. Jacob Add real and integer buffers -! for send/recv to the Router datatype. -! 22Jan08 - R. Jacob Add ability to handle an unordered -! GSMap by creating a new, ordered one and building Router from -! that. Save permutation info in Router datatype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_Router' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initd_ - initialize a Router between two seperate components -! -! !DESCRIPTION: -! The routine {\tt initd\_()} exchanges the {\tt GSMap} with the -! component identified by {\tt othercomp} and then calls {\tt initp\_()} -! to build a Router {\tt Rout} between them. -! -! {\bf N.B.} The {\tt GSMap} argument must be declared so that the index values -! on a processor are in ascending order. -! -! !INTERFACE: - - subroutine initd_(othercomp,GSMap,mycomm,Rout,name ) -! -! !USES: -! - use m_GlobalSegMap, only :GlobalSegMap - use m_ExchangeMaps,only: MCT_ExGSMap => ExchangeMap - use m_mpif90 - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: othercomp - integer, intent(in) :: mycomm - type(GlobalSegMap),intent(in) :: GSMap ! of the calling comp - character(len=*), intent(in),optional :: name - -! !OUTPUT PARAMETERS: -! - type(Router), intent(out) :: Rout - -! !REVISION HISTORY: -! 15Jan01 - R. Jacob - initial prototype -! 06Feb01 - R. Jacob - Finish initialization -! of the Router. Router now works both ways. -! 25Apr01 - R. Jacob - Eliminate early -! custom code to exchange GSMap components and instead -! the more general purpose routine in m_ExchangeMaps. -! Use new subroutine OrderedPoints in m_GlobalSegMap -! to construct the vector of local and remote GSMaps. -! Clean-up code a little. -! 03May01 - R. Jacob - rename to initd and -! move most of code to new initp routine -! -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initd_' - character(len=40) :: tagname - - type(GlobalSegMap) :: RGSMap ! the other GSMap - integer :: ier - -!--------------------------begin code----------------------- - -!!!!!!!!!!!!!!!!!Exchange of global map data - - if(present(name)) then - tagname='01'//name//'ExGSMap' - - call zeit_ci(trim(tagname)) - call MCT_ExGSMap(GSMap,mycomm,RGSMap,othercomp,ier) - if(ier /= 0) call die(myname_,'ExGSMap',ier) - call zeit_co(trim(tagname)) - -!!!!!!!!!!!!!!!!!Begin comparison of globalsegmaps - - call initp_(GSMap,RGSMap, mycomm, Rout,name) - else - call MCT_ExGSMap(GSMap,mycomm,RGSMap,othercomp,ier) - call initp_(GSMap,RGSMap, mycomm, Rout) - endif - - end subroutine initd_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initp_ - initialize a Router from two GlobalSegMaps -! -! !DESCRIPTION: -! -! Given two GlobalSegmentMaps {\tt GSMap} and {\tt RGSMap}, intialize a -! Router {\tt Rout} between them. Use local communicator {\tt mycomm}. -! -! {\bf N.B.} The two {\tt GSMap} arguments must be declared so that the index values -! on a processor are in ascending order. -! -! !INTERFACE: - - subroutine initp_(inGSMap,inRGSMap,mycomm,Rout,name ) -! -! !USES: -! - use m_GlobalSegMap, only :GlobalSegMap - use m_GlobalSegMap, only :ProcessStorage - use m_GlobalSegMap, only :GSMap_comp_id => comp_id - use m_GlobalSegMap, only :GSMap_increasing => increasing - use m_GlobalSegMap, only :GlobalSegMap_copy => copy - use m_GlobalSegMap, only :GlobalSegMap_init => init - use m_GlobalSegMap, only :GlobalSegMap_clean => clean - use m_GlobalSegMap, only :GlobalSegMap_OPoints => OrderedPoints - use m_GlobalSegMap, only :GlobalSegMap_ngseg => ngseg ! rml - use m_GlobalSegMap, only :GlobalSegMap_nlseg => nlseg ! rml - use m_GlobalSegMap, only :GlobalSegMap_max_nlseg => max_nlseg ! rml - - use m_GlobalToLocal, only :GlobalToLocalIndex - use m_MCTWorld, only :MCTWorld - use m_MCTWorld, only :ThisMCTWorld - - use m_Permuter ,only:Permute - use m_MergeSorts ,only:IndexSet - use m_MergeSorts ,only:IndexSort - - use m_mpif90 - use m_die - -! use m_zeit - - - use m_stdio ! rml -! use shr_timer_mod ! rml timers - - implicit none - -! !INPUT PARAMETERS: -! - type(GlobalSegMap), intent(in) :: inGSMap - type(GlobalSegMap), intent(in) :: inRGSMap - integer , intent(in) :: mycomm - character(len=*), intent(in),optional :: name - -! !OUTPUT PARAMETERS: -! - type(Router), intent(out) :: Rout - -! !REVISION HISTORY: -! 03May01 - R.L. Jacob - Initial code brought -! in from old init routine. -! 31Jul01 - Jace A Mogill -! Rewrote to reduce number of loops and temp storage -! 26Apr06 - R. Loy - recode the search through -! the remote GSMap to improve efficiency -! 05Jan07 - R. Loy - improved bound on size of -! tmpsegcount and tmpsegstart -! 15May07 - R. Loy - improved bound on size of -! rgs_lb and rgs_ub -! 25Jan08 - R. Jacob - Dont die if GSMap is not -! increasing. Instead, permute it to increasing and proceed. -! 07Sep12 - T. Craig - Replace a double loop with a single -! to improve speed for large proc and segment counts. -! 12Nov16 - P. Worley - eliminate iterations in nested -! loop that can be determined to be unnecessary -!EOP ------------------------------------------------------------------- - - character(len=*),parameter :: myname_=myname//'::initp_' - integer :: ier,i,j,k,m,n - integer :: mysize,myPid,othercomp - integer :: lmaxsize,totallength - integer :: maxsegcount,count - logical, dimension(:), allocatable :: tmppe_list - integer, dimension(:,:), pointer :: tmpsegcount,tmpsegstart - - - integer :: my_left ! Left point in local segment (global memory) - integer :: my_right ! Right point in local segment (global memory) - integer :: my_leftmost ! Leftmost point in local segments (global memory) - integer :: my_rightmost ! Rightmost point in local segments (global memory) - integer :: r_left ! Left point in remote segment (global memory) - integer :: r_right ! Right point in remote segment (global memory) - integer :: r_leftmost ! Leftmost point and rightmost point - integer :: r_rightmost ! in remote segments in given process (global memory) - integer :: nsegs_overlap ! Number of segments that overlap between two procs - - - integer :: ngseg, nlseg - integer :: myseg, rseg - integer :: rseg_leftbase, rseg_start - integer :: prev_right ! Rightmost local point in previous overlapped segment - integer :: local_left, local_right - integer,allocatable :: mygs_lb(:),mygs_ub(:),mygs_len(:),mygs_lstart(:) - integer :: r_ngseg - integer,allocatable :: rgs_count(:),rgs_lb(:,:),rgs_ub(:,:) - integer,allocatable :: nsegs_overlap_arr(:) - - integer :: overlap_left, overlap_right, overlap_diff - - integer :: proc, nprocs - integer :: feas_proc, feas_nprocs - integer,allocatable :: feas_procs(:), inv_feas_procs(:) - - integer :: max_rgs_count, max_overlap_segs - type(GlobalSegMap) :: GSMap - type(GlobalSegMap) :: RGSMap - integer, dimension(:), pointer :: gpoints - integer, dimension(:), pointer :: permarr - integer, dimension(:), pointer :: rpermarr - integer :: gmapsize - character(len=40) :: tagname - - - integer,save :: t_initialized=0 ! rml timers - integer,save :: t_loop ! rml timers - integer,save :: t_loop2 ! rml timers - integer,save :: t_load ! rml timers - - call MP_comm_rank(mycomm,myPid,ier) - if(ier/=0) call MP_perr_die(myname_,'MP_comm_rank',ier) - - nullify(Rout%permarr) - - if(present(name)) then - tagname='02'//name//'incheck' - call zeit_ci(trim(tagname)) - endif - if (.not. GSMap_increasing(inGSMap)) then - if(myPid == 0) call warn(myname_,'GSMap indices not increasing...Will correct') - call GlobalSegMap_OPoints(inGSMap,myPid,gpoints) - gmapsize=ProcessStorage(inGSMap,myPid) - allocate(permarr(gmapsize), stat=ier) - if(ier/=0) call die(myname_,'allocate permarr',ier) - call IndexSet(permarr) - call IndexSort(permarr,gpoints) - call Permute(gpoints,permarr,gmapsize) - call GlobalSegMap_init(GSMap,gpoints,mycomm,inGSMap%comp_id,gsize=inGSMap%gsize) - - allocate(Rout%permarr(gmapsize),stat=ier) - if(ier/=0) call die(myname_,'allocate Router%permarr',ier) - Rout%permarr(:)=permarr(:) - - deallocate(gpoints,permarr, stat=ier) - if(ier/=0) call die(myname_,'deallocate gpoints,permarr',ier) - - else - call GlobalSegMap_copy(inGSMap,GSMap) - endif - - if (.not. GSMap_increasing(inRGSMap)) then - if(myPid == 0) call warn(myname_,'RGSMap indices not increasing...Will correct') - call GlobalSegMap_OPoints(inRGSMap,myPid,gpoints) - gmapsize=ProcessStorage(inRGSMap,myPid) - allocate(rpermarr(gmapsize), stat=ier) - if(ier/=0) call die(myname_,'allocate rpermarr',ier) - call IndexSet(rpermarr) - call IndexSort(rpermarr,gpoints) - call Permute(gpoints,rpermarr,gmapsize) - - call GlobalSegMap_init(RGSMap,gpoints,mycomm,inRGSMap%comp_id,gsize=inRGSMap%gsize) - - deallocate(gpoints,rpermarr, stat=ier) - if(ier/=0) call die(myname_,'deallocate gpoints,rpermarr',ier) - else - call GlobalSegMap_copy(inRGSMap,RGSMap) - endif - if(present(name)) then - call zeit_co(trim(tagname)) - endif - - - mysize = ProcessStorage(GSMap,myPid) - othercomp = GSMap_comp_id(RGSMap) - - -!. . . . . . . . . . . . . . . . . . . . . . . . - - - -!! -!! determine the global segments on this processor -!! just once, so the info be used repeatedly below -!! same code was used in m_GlobalToLocal - should make a subroutine... -!! - if(present(name)) then - tagname='03'//name//'lloop' - call zeit_ci(trim(tagname)) - endif - - ngseg = GlobalSegMap_ngseg(GSMap) - nlseg = GlobalSegMap_nlseg(GSMap, myPid) - - allocate( mygs_lb(nlseg), mygs_ub(nlseg), mygs_len(nlseg), & - mygs_lstart(nlseg), stat=ier ) - if(ier/=0) call die(myname_,'allocate mygs',ier) - - n = 0 - do i=1,ngseg - if (GSMap%pe_loc(i) == myPid ) then - n=n+1 - mygs_lb(n)=GSMap%start(i) - mygs_ub(n)=GSMap%start(i) + GSMap%length(i) -1 - mygs_len(n)=GSMap%length(i) - endif - enddo - - if (n .ne. nlseg) then - write(stderr,*) myname_,"mismatch nlseg",n,nlseg - call die(myname) - endif - - if (nlseg > 0) mygs_lstart(1)=1 - do i=2,nlseg - mygs_lstart(i)=mygs_lstart(i-1)+mygs_len(i-1) - enddo - if(present(name)) then - call zeit_co(trim(tagname)) - endif - -!! -!! determine the possibly overlapping segments -!! in RGSMap that are local to each proc -!! - nprocs=ThisMCTWorld%nprocspid(othercomp) - r_ngseg = GlobalSegMap_ngseg(RGSMap) - - if (nlseg > 0) then - my_leftmost = mygs_lb(1) - my_rightmost = mygs_ub(nlseg) - -!! -!! count number of potentially overlapping remote segments -!! and which and how many processes hold these -!! - if(present(name)) then - tagname='04'//name//'rloop' - call zeit_ci(trim(tagname)) - endif - - !! number of potentially overlapping segments in RGSMap local to proc - !! and mapping from processes that hold these to actual process id - allocate( rgs_count(nprocs), feas_procs(nprocs), & - inv_feas_procs(nprocs), stat=ier ) - if(ier/=0) call die(myname_,'allocate rgs_count, feas_procs',ier) - - rgs_count = 0 - do i=1,r_ngseg - r_left = RGSMap%start(i) - r_right = RGSMap%start(i) + RGSMap%length(i) - 1 - - if (.not. (my_rightmost < r_left .or. & ! potential overlap - my_leftmost > r_right ) ) then - proc = RGSMap%pe_loc(i) + 1 -! if (proc < 1 .or. proc > nprocs) then -! write(stderr,*) myname_,"proc pe_loc error",i,proc -! call die(myname_,'pe_loc error',0) -! endif - rgs_count(proc) = rgs_count(proc) + 1 - endif - - enddo - - feas_nprocs = 0 - feas_procs = -1 - inv_feas_procs = -1 - do proc=1,nprocs - if (rgs_count(proc) > 0) then - feas_nprocs = feas_nprocs + 1 - feas_procs(feas_nprocs) = proc - inv_feas_procs(proc) = feas_nprocs - endif - enddo - -!! -!! build list of potentially overlapping remote segments -!! - !! original size of rgs_lb()/ub() was (r_ngseg,nprocs) - !! at the cost of looping to compute it (within GlobalSegMap_max_nlseg), - !! reduced size to (r_max_nlseg,nprocs) - !! then further reduced to (max_rgs_count,feas_nprocs) - - max_rgs_count=0 - do proc=1,nprocs - max_rgs_count = max( max_rgs_count, rgs_count(proc) ) - enddo - - allocate( rgs_lb(max_rgs_count,feas_nprocs), & - rgs_ub(max_rgs_count,feas_nprocs), & - nsegs_overlap_arr(feas_nprocs), stat=ier ) - if(ier/=0) call die(myname_,'allocate rgs, nsegs',ier) - - !! (note: redefining rgs_count to be indexed as 1:feas_nprocs - !! instead of as 1:nprocs) - rgs_count = 0 - do i=1,r_ngseg - r_left = RGSMap%start(i) - r_right = RGSMap%start(i) + RGSMap%length(i) -1 - - if (.not. (my_rightmost < r_left .or. & ! potential overlap - my_leftmost > r_right) ) then - proc = RGSMap%pe_loc(i) + 1 - feas_proc = inv_feas_procs(proc) - rgs_count(feas_proc) = rgs_count(feas_proc) + 1 - rgs_lb( rgs_count(feas_proc) , feas_proc ) = RGSMap%start(i) - rgs_ub( rgs_count(feas_proc) , feas_proc ) = RGSMap%start(i) + RGSMap%length(i) -1 - endif - - enddo - - deallocate(inv_feas_procs,stat=ier) - if(ier/=0) call die(myname_,'deallocate inv_feas_procs',ier) - - if(present(name)) then - call zeit_co(trim(tagname)) - endif - - else - - max_rgs_count = 0 - feas_nprocs = 0 - - endif - -!!!!!!!!!!!!!!!!!! - -! allocate space for searching -! overlap segments to a given remote proc cannot be more than -! the max of the local segments and the remote segments - - if(present(name)) then - tagname='06'//name//'loop2' - call zeit_ci(trim(tagname)) - endif - - max_overlap_segs = max(nlseg,max_rgs_count) - - allocate(tmpsegcount(feas_nprocs, max_overlap_segs),& - tmpsegstart(feas_nprocs, max_overlap_segs),& - tmppe_list(feas_nprocs),stat=ier) - if(ier/=0) & - call die( myname_,'allocate tmpsegcount etc. size ', & - feas_nprocs, ' by ',max_overlap_segs) - - if (feas_nprocs > 0) then - tmpsegcount=0 - tmpsegstart=0 - endif - count =0 - maxsegcount=0 - -!!!!!!!!!!!!!!!!!! - - do feas_proc = 1, feas_nprocs - nsegs_overlap = 0 - tmppe_list(feas_proc) = .FALSE. ! no overlaps with proc yet - - r_leftmost = rgs_lb(1,feas_proc) - r_rightmost = rgs_ub(rgs_count(feas_proc),feas_proc) - - rseg_leftbase = 0 - do myseg = 1, nlseg ! loop over local segs on 'myPID' - - my_left = mygs_lb(myseg) - my_right= mygs_ub(myseg) - - ! determine whether any overlap - if (.not. (my_right < r_leftmost .or. & - my_left > r_rightmost) ) then - - rseg_start = rseg_leftbase + 1 ! rseg loop index to start searching from - - ! loop over candidate overlapping remote segs on 'feas_proc' - do rseg = rseg_start, rgs_count(feas_proc) - - r_right = rgs_ub(rseg,feas_proc) - if (r_right < my_left ) then ! to the left - rseg_leftbase = rseg ! remember to start to the right of - ! this for next myseg - cycle ! try the next remote segment - endif - - r_left = rgs_lb(rseg,feas_proc) - if (r_left > my_right) exit ! to the right, so no more segments - ! need to be examined - - ! otherwise, overlaps - if (nsegs_overlap == 0) then ! first overlap w/this proc - count = count + 1 - tmppe_list(feas_proc) = .TRUE. - prev_right = -9999 - else - prev_right = local_right - endif - - overlap_left=max(my_left, r_left) - overlap_right=min(my_right, r_right) - overlap_diff= overlap_right - overlap_left - - local_left = mygs_lstart(myseg) + (overlap_left - my_left) - local_right = local_left + overlap_diff - - ! non-contiguous w/prev one - if (local_left /= (prev_right+1) ) then - nsegs_overlap = nsegs_overlap + 1 - tmpsegstart(count, nsegs_overlap) = local_left - endif - - tmpsegcount(count, nsegs_overlap) = & - tmpsegcount(count, nsegs_overlap) + overlap_diff + 1 - - enddo - - endif - - enddo - - nsegs_overlap_arr(feas_proc)=nsegs_overlap - enddo - - !! pull this out of the loop to vectorize - do feas_proc = 1, feas_nprocs - maxsegcount=max(maxsegcount,nsegs_overlap_arr(feas_proc)) - enddo - - if (maxsegcount > max_overlap_segs) & - call die( myname_,'overran max_overlap_segs =', & - max_overlap_segs, ' count = ',maxsegcount) - -! write(stderr,*) 'max_overlap_segs =', max_overlap_segs, & -! 'maxsegcount =',maxsegcount, & -! 'mysize =',mysize - - - deallocate( mygs_lb, mygs_ub, mygs_len, mygs_lstart, stat=ier) - if(ier/=0) call die(myname_,'deallocate mygs,nsegs',ier) - - if (nlseg > 0) then - deallocate( rgs_count, rgs_lb, rgs_ub, & - nsegs_overlap_arr, stat=ier) - if(ier/=0) call die(myname_,'deallocate p_rgs, nsegs',ier) - endif - -! call shr_timer_stop(t_loop2) ! rml timers - if(present(name)) then - call zeit_co(trim(tagname)) - endif - - -!. . . . . . . . . . . . . . . . . . . . . . . . - - -!!!!!!!!!!!!!!!!!!!!end of search through remote GSMap - -! start loading up the Router with data - - if(present(name)) then - tagname='07'//name//'load' - call zeit_ci(trim(tagname)) - endif - - Rout%comp1id = GSMap_comp_id(GSMap) - Rout%comp2id = othercomp - Rout%nprocs = count - Rout%numiatt = 0 - Rout%numratt = 0 - - allocate(Rout%pe_list(count),Rout%num_segs(count), & - Rout%seg_starts(count,maxsegcount), & - Rout%seg_lengths(count,maxsegcount), & - Rout%locsize(count),stat=ier) - if(ier/=0) call die(myname_,'allocate(Rout..)',ier) - - allocate(Rout%istatus(MP_STATUS_SIZE,count), & - Rout%rstatus(MP_STATUS_SIZE,count), & - Rout%rreqs(count),Rout%ireqs(count),stat=ier) - if(ier/=0) call die(myname_,'allocate(status,reqs,...)',ier) - -! allocate the number of pointers needed - allocate(Rout%ip1(count),stat=ier) - if(ier/=0) call die(myname_,'allocate(ip1)',ier) - -! allocate the number of pointers needed - allocate(Rout%rp1(count),stat=ier) - if(ier/=0) call die(myname_,'allocate(rp1)',ier) - - m=0 - do i=1,feas_nprocs - if(tmppe_list(i))then - m=m+1 - ! load processor rank in MCT_comm - proc = feas_procs(i) - Rout%pe_list(m)=ThisMCTWorld%idGprocid(othercomp,proc-1) - endif - enddo - - lmaxsize=0 - do i=1,count - totallength=0 - do j=1,maxsegcount - if(tmpsegcount(i,j) /= 0) then - Rout%num_segs(i)=j - Rout%seg_starts(i,j)=tmpsegstart(i,j) - Rout%seg_lengths(i,j)=tmpsegcount(i,j) - totallength=totallength+Rout%seg_lengths(i,j) - endif - enddo - Rout%locsize(i)=totallength - lmaxsize=MAX(lmaxsize,totallength) - enddo - - Rout%maxsize=lmaxsize - Rout%lAvsize=mysize - - if (nlseg > 0) then - deallocate(feas_procs,stat=ier) - if(ier/=0) call die(myname_,'deallocate feas_procs',ier) - endif - - deallocate(tmpsegstart,tmpsegcount,tmppe_list,stat=ier) - if(ier/=0) call die(myname_,'deallocate tmp',ier) - - call GlobalSegMap_clean(RGSMap) - call GlobalSegMap_clean(GSMap) - - if(present(name)) then - call zeit_co(trim(tagname)) - endif - - end subroutine initp_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Destroy a Router -! -! !DESCRIPTION: -! Deallocate Router internal data structures and set integer parts to zero. -! -! !INTERFACE: - - subroutine clean_(Rout,stat) -! -! !USES: -! - use m_die - - implicit none - -!INPUT/OUTPUT PARAMETERS: - type(Router), intent(inout) :: Rout - -!OUTPUT PARAMETERS: - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Jan01 - R. Jacob - initial prototype -! 08Feb01 - R. Jacob - add code to clean -! the maxsize and locsize -! 01Mar02 - E.T. Ong removed the die to prevent -! crashes and added stat argument. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - deallocate(Rout%pe_list,Rout%num_segs,Rout%seg_starts, & - Rout%locsize,Rout%seg_lengths,stat=ier) - if(present(stat)) then - stat=ier - else - if(ier /= 0) call warn(myname_,'deallocate(Rout%pe_list,...)',ier) - endif - - deallocate(Rout%rreqs,Rout%ireqs,Rout%rstatus,& - Rout%istatus,stat=ier) - if(present(stat)) then - stat=ier - else - if(ier /= 0) call warn(myname_,'deallocate(Rout%rreqs,...)',ier) - endif - - deallocate(Rout%ip1,Rout%rp1,stat=ier) - if(present(stat)) then - stat=ier - else - if(ier /= 0) call warn(myname_,'deallocate(Rout%ip1,...)',ier) - endif - - if(associated(Rout%permarr)) then - deallocate(Rout%permarr,stat=ier) - if(present(stat)) then - stat=ier - else - if(ier /= 0) call warn(myname_,'deallocate(Rout%ip1,...)',ier) - endif - endif - - Rout%comp1id = 0 - Rout%comp2id = 0 - Rout%nprocs = 0 - Rout%maxsize = 0 - Rout%lAvsize = 0 - Rout%numiatt = 0 - Rout%numratt = 0 - - - end subroutine clean_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: print_ - Print router info -! -! !DESCRIPTION: -! Print out communication info about router on unit number 'lun' -! e.g. (source,destination,length) -! -! !INTERFACE: - - subroutine print_(rout,mycomm,lun) -! -! !USES: -! - use m_die - use m_mpif90 - - implicit none - -!INPUT/OUTPUT PARAMETERS: - type(Router), intent(in) :: Rout - integer, intent(in) :: mycomm - integer, intent(in) :: lun - -! !REVISION HISTORY: -! 27Jul07 - R. Loy initial version -!EOP ___________________________________________________________________ - - - integer iproc - integer myrank - integer ier - character(len=*),parameter :: myname_=myname//'::print_' - - call MP_comm_rank(mycomm,myrank,ier) - if(ier/=0) call MP_perr_die(myname_,'MP_comm_rank',ier) - - - do iproc=1,rout%nprocs - if (rout%num_segs(iproc) > 0) then - write(lun,*) myrank,rout%pe_list(iproc),rout%locsize(iproc) - endif - end do - - - end subroutine print_ - - - end module m_Router - diff --git a/src/externals/mct/mct/m_SPMDutils.F90 b/src/externals/mct/mct/m_SPMDutils.F90 deleted file mode 100644 index d2bbd59cfa5..00000000000 --- a/src/externals/mct/mct/m_SPMDutils.F90 +++ /dev/null @@ -1,1148 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SPMDutils -- Communication operators to address performance -! issues for specific communication patterns -! -! !DESCRIPTION: -! This module provides the swapm equivalent to MPI_Alltoallv that -! has proven to be more robust with respect to performance than the -! MPI collective or the native MCT communication algorithms when the -! communication pattern is sparse and when load imbalance or send/receive -! asymmetry leads some processes to be flooded by unexpected messages. -! -! Based on algorithms implemented in CAM, but this version modelled after -! pio_spmd_utils.F90 in PIO1 -! -! !SEE ALSO: -! m_Rearranger -! -! -! !INTERFACE: - -! Disable the use of the MPI ready send protocol by default, to -! address recurrent issues with poor performance or incorrect -! functionality in MPI libraries. When support is known to be robust, -! or for experimentation, can be re-enabled by defining the CPP token -! _USE_MPI_RSEND during the build process. -! -#ifndef _USE_MPI_RSEND -#define MPI_RSEND MPI_SEND -#define mpi_rsend mpi_send -#define MPI_IRSEND MPI_ISEND -#define mpi_irsend mpi_isend -#endif - - module m_SPMDutils - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: m_swapm_int ! swapm alternative to MPI_AlltoallV for integer data - public :: m_swapm_FP ! swapm alternative to MPI_AlltoallV for FP data - -! !DEFINED PARAMETERS: - - character(len=*), parameter :: myname='MCT::m_SPMDutils' - -! !REVISION HISTORY: -! 28Sep16 - P. Worley - initial prototype -!EOP ___________________________________________________________________ - - contains - -!======================================================================== -! - - integer function pair(np,p,k) - - integer np,p,k,q - q = ieor(p,k) - if(q.gt.np-1) then - pair = -1 - else - pair = q - endif - return - - end function pair - -! -!======================================================================== -! - - integer function ceil2(n) - integer n,p - p=1 - do while(p.lt.n) - p=p*2 - enddo - ceil2=p - return - end function ceil2 - -! -!======================================================================== -! - subroutine m_swapm_int ( nprocs, mytask, & - sndbuf, sbuf_siz, sndlths, sdispls, stypes, & - rcvbuf, rbuf_siz, rcvlths, rdispls, rtypes, & - comm, comm_hs, comm_isend, comm_maxreq ) - -!----------------------------------------------------------------------- -! -!> Purpose: -!! Reduced version of original swapm (for swap of multiple messages -!! using MPI point-to-point routines), more efficiently implementing a -!! subset of the swap protocols. -!! -!! Method: -!! comm_protocol: -!! comm_isend == .true.: use nonblocking send, else use blocking send -!! comm_hs == .true.: use handshaking protocol -!! comm_maxreq: -!! =-1,0: do not limit number of outstanding send/receive requests -!! >0: do not allow more than min(comm_maxreq, steps) outstanding -!! nonblocking send requests or nonblocking receive requests -!! -!! Author of original version: P. Worley -!! Ported from PIO1: P. Worley, September 2016 -!< -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- - use m_mpif90 - use m_realkinds, only : FP - use m_die, only : MP_perr_die - - implicit none -!---------------------------Input arguments-------------------------- -! - integer, intent(in) :: nprocs ! size of communicator - integer, intent(in) :: mytask ! MPI task id with communicator - integer, intent(in) :: sbuf_siz ! size of send buffer - integer, intent(in) :: rbuf_siz ! size of receive buffer - - integer, intent(in) :: sndlths(0:nprocs-1)! length of outgoing message - integer, intent(in) :: sdispls(0:nprocs-1)! offset from beginning of send - ! buffer where outgoing messages - ! should be sent from - integer, intent(in) :: stypes(0:nprocs-1) ! MPI data types - integer, intent(in) :: rcvlths(0:nprocs-1)! length of incoming messages - integer, intent(in) :: rdispls(0:nprocs-1)! offset from beginning of receive - ! buffer where incoming messages - ! should be placed - integer, intent(in) :: rtypes(0:nprocs-1) ! MPI data types - integer, intent(in) :: sndbuf(sbuf_siz) ! outgoing message buffer - - integer, intent(in) :: comm ! MPI communicator - logical, intent(in) :: comm_hs ! handshaking protocol? - logical, intent(in) :: comm_isend ! nonblocking send protocol? - integer, intent(in) :: comm_maxreq ! maximum number of outstanding - ! nonblocking requests - -!---------------------------Output arguments-------------------------- -! - integer, intent(out) :: rcvbuf(rbuf_siz) ! incoming message buffer - -! -!---------------------------Local workspace------------------------------------------- -! - character(len=*), parameter :: subName=myname//'::m_swapm_int' - - integer :: steps ! number of swaps to initiate - integer :: swapids(nprocs) ! MPI process id of swap partners - integer :: p ! process index - integer :: istep ! loop index - integer :: tag ! MPI message tag - integer :: offset_t ! MPI message tag offset, for addressing - ! message conflict bug (if necessary) - integer :: offset_s ! index of message beginning in - ! send buffer - integer :: offset_r ! index of message beginning in - ! receive buffer - integer :: sndids(nprocs) ! send request ids - integer :: rcvids(nprocs) ! receive request ids - integer :: hs_rcvids(nprocs) ! handshake receive request ids - - integer :: maxreq, maxreqh ! maximum number of outstanding - ! nonblocking requests (and half) - integer :: hs ! handshake variable - integer :: rstep ! "receive" step index - - logical :: handshake, sendd ! protocol option flags - - integer :: ier ! return error status - integer :: status(MP_STATUS_SIZE) ! MPI status -! -!------------------------------------------------------------------------------------- -! -#ifdef _NO_M_SWAPM_TAG_OFFSET - offset_t = 0 -#else - offset_t = nprocs -#endif -! - ! if necessary, send to self - if (sndlths(mytask) > 0) then - tag = mytask + offset_t - - offset_r = rdispls(mytask)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(mytask), rtypes(mytask), & - mytask, tag, comm, rcvids(1), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - offset_s = sdispls(mytask)+1 - call mpi_send( sndbuf(offset_s), sndlths(mytask), stypes(mytask), & - mytask, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - - call mpi_wait( rcvids(1), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - ! calculate swap partners and communication ordering - steps = 0 - do istep=1,ceil2(nprocs)-1 - p = pair(nprocs,istep,mytask) - if (p >= 0) then - if (sndlths(p) > 0 .or. rcvlths(p) > 0) then - steps = steps + 1 - swapids(steps) = p - end if - end if - end do - - if (steps .eq. 0) return - - ! identify communication protocol - if (comm_isend) then - sendd = .false. - else - sendd = .true. - endif - handshake = comm_hs - - ! identify maximum number of outstanding nonblocking requests to permit - if (steps .eq. 1) then - maxreq = 1 - maxreqh = 1 - else - if (comm_maxreq >= -1) then - maxreq = comm_maxreq - else - maxreq = steps - endif - - if ((maxreq .le. steps) .and. (maxreq > 0)) then - if (maxreq > 1) then - maxreqh = maxreq/2 - else - maxreq = 2 - maxreqh = 1 - endif - else - maxreq = steps - maxreqh = steps - endif - endif - -! Four protocol options: -! (1) handshaking + blocking sends - if ((handshake) .and. (sendd)) then - - ! Initialize hs variable - hs = 1 - - ! Post initial handshake receive requests - do istep=1,maxreq - p = swapids(istep) - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MP_INTEGER, p, tag, comm, & - hs_rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - enddo - - ! Post initial receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - call mpi_send ( hs, 1, MP_INTEGER, p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new rsend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_wait ( hs_rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - - call mpi_rsend ( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_RSEND',ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - - ! Submit a new handshake irecv request - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MP_INTEGER, p, tag, comm, & - hs_rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - - ! Submit a new irecv request - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - call mpi_send ( hs, 1, MP_INTEGER, p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - endif - - endif -! - enddo - - ! wait for rest of receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - enddo - -! (2) handshaking + nonblocking sends - elseif ((handshake) .and. (.not. sendd)) then - - ! Initialize hs variable - hs = 1 - - ! Post initial handshake receive requests - do istep=1,maxreq - p = swapids(istep) - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MP_INTEGER, p, tag, comm, & - hs_rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - enddo - - ! Post initial receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - call mpi_send ( hs, 1, MP_INTEGER, p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new irsend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_wait ( hs_rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - - call mpi_irsend( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, sndids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRSEND',ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - - ! Submit a new handshake irecv request - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MP_INTEGER, p, tag, comm, & - hs_rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - - ! Submit a new irecv request - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - call mpi_send ( hs, 1, MP_INTEGER, p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - endif - - ! Wait for outstanding i(r)send request to complete - p = swapids(istep-maxreqh) - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - enddo - -! (3) no handshaking + blocking sends - elseif ((.not. handshake) .and. (sendd)) then - - ! Post receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new send request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_send( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - ! Submit a new irecv request - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - enddo - -! (4) no handshaking + nonblocking sends - elseif ((.not. handshake) .and. (.not. sendd)) then - - ! Post receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new isend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_isend( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, sndids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_ISEND',ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - ! Submit a new irecv request - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - endif - - ! Wait for outstanding i(r)send request to complete - p = swapids(istep-maxreqh) - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - enddo - - endif - - return - - end subroutine m_swapm_int - -! -!======================================================================== -! - subroutine m_swapm_FP ( nprocs, mytask, & - sndbuf, sbuf_siz, sndlths, sdispls, stypes, & - rcvbuf, rbuf_siz, rcvlths, rdispls, rtypes, & - comm, comm_hs, comm_isend, comm_maxreq ) - -!----------------------------------------------------------------------- -! -!> Purpose: -!! Reduced version of original swapm (for swap of multiple messages -!! using MPI point-to-point routines), more efficiently implementing a -!! subset of the swap protocols. -!! -!! Method: -!! comm_protocol: -!! comm_isend == .true.: use nonblocking send, else use blocking send -!! comm_hs == .true.: use handshaking protocol -!! comm_maxreq: -!! =-1,0: do not limit number of outstanding send/receive requests -!! >0: do not allow more than min(comm_maxreq, steps) outstanding -!! nonblocking send requests or nonblocking receive requests -!! -!! Author of original version: P. Worley -!! Ported from PIO1: P. Worley, September 2016 -!< -!----------------------------------------------------------------------- -!----------------------------------------------------------------------- - use m_mpif90 - use m_realkinds, only : FP - use m_die, only : MP_perr_die - - implicit none -!---------------------------Input arguments-------------------------- -! - integer, intent(in) :: nprocs ! size of communicator - integer, intent(in) :: mytask ! MPI task id with communicator - integer, intent(in) :: sbuf_siz ! size of send buffer - integer, intent(in) :: rbuf_siz ! size of receive buffer - - integer, intent(in) :: sndlths(0:nprocs-1)! length of outgoing message - integer, intent(in) :: sdispls(0:nprocs-1)! offset from beginning of send - ! buffer where outgoing messages - ! should be sent from - integer, intent(in) :: stypes(0:nprocs-1) ! MPI data types - integer, intent(in) :: rcvlths(0:nprocs-1)! length of incoming messages - integer, intent(in) :: rdispls(0:nprocs-1)! offset from beginning of receive - ! buffer where incoming messages - ! should be placed - integer, intent(in) :: rtypes(0:nprocs-1) ! MPI data types - real(FP),intent(in) :: sndbuf(sbuf_siz) ! outgoing message buffer - - integer, intent(in) :: comm ! MPI communicator - logical, intent(in) :: comm_hs ! handshaking protocol? - logical, intent(in) :: comm_isend ! nonblocking send protocol? - integer, intent(in) :: comm_maxreq ! maximum number of outstanding - ! nonblocking requests - -!---------------------------Output arguments-------------------------- -! - real(FP), intent(out) :: rcvbuf(rbuf_siz) ! incoming message buffer - -! -!---------------------------Local workspace------------------------------------------- -! - character(len=*), parameter :: subName=myname//'::m_swapm_FP' - - integer :: steps ! number of swaps to initiate - integer :: swapids(nprocs) ! MPI process id of swap partners - integer :: p ! process index - integer :: istep ! loop index - integer :: tag ! MPI message tag - integer :: offset_t ! MPI message tag offset, for addressing - ! message conflict bug (if necessary) - integer :: offset_s ! index of message beginning in - ! send buffer - integer :: offset_r ! index of message beginning in - ! receive buffer - integer :: sndids(nprocs) ! send request ids - integer :: rcvids(nprocs) ! receive request ids - integer :: hs_rcvids(nprocs) ! handshake receive request ids - - integer :: maxreq, maxreqh ! maximum number of outstanding - ! nonblocking requests (and half) - integer :: hs ! handshake variable - integer :: rstep ! "receive" step index - - logical :: handshake, sendd ! protocol option flags - - integer :: ier ! return error status - integer :: status(MP_STATUS_SIZE) ! MPI status -! -!------------------------------------------------------------------------------------- -! -#ifdef _NO_M_SWAPM_TAG_OFFSET - offset_t = 0 -#else - offset_t = nprocs -#endif -! - ! if necessary, send to self - if (sndlths(mytask) > 0) then - tag = mytask + offset_t - - offset_r = rdispls(mytask)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(mytask), rtypes(mytask), & - mytask, tag, comm, rcvids(1), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - offset_s = sdispls(mytask)+1 - call mpi_send( sndbuf(offset_s), sndlths(mytask), stypes(mytask), & - mytask, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - - call mpi_wait( rcvids(1), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - ! calculate swap partners and communication ordering - steps = 0 - do istep=1,ceil2(nprocs)-1 - p = pair(nprocs,istep,mytask) - if (p >= 0) then - if (sndlths(p) > 0 .or. rcvlths(p) > 0) then - steps = steps + 1 - swapids(steps) = p - end if - end if - end do - - if (steps .eq. 0) return - - ! identify communication protocol - if (comm_isend) then - sendd = .false. - else - sendd = .true. - endif - handshake = comm_hs - - ! identify maximum number of outstanding nonblocking requests to permit - if (steps .eq. 1) then - maxreq = 1 - maxreqh = 1 - else - if (comm_maxreq >= -1) then - maxreq = comm_maxreq - else - maxreq = steps - endif - - if ((maxreq .le. steps) .and. (maxreq > 0)) then - if (maxreq > 1) then - maxreqh = maxreq/2 - else - maxreq = 2 - maxreqh = 1 - endif - else - maxreq = steps - maxreqh = steps - endif - endif - -! Four protocol options: -! (1) handshaking + blocking sends - if ((handshake) .and. (sendd)) then - - ! Initialize hs variable - hs = 1 - - ! Post initial handshake receive requests - do istep=1,maxreq - p = swapids(istep) - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MP_INTEGER, p, tag, comm, & - hs_rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - enddo - - ! Post initial receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - call mpi_send ( hs, 1, MP_INTEGER, p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new rsend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_wait ( hs_rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - - call mpi_rsend ( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_RSEND',ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - - ! Submit a new handshake irecv request - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MP_INTEGER, p, tag, comm, & - hs_rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - - ! Submit a new irecv request - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - call mpi_send ( hs, 1, MP_INTEGER, p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - endif - - endif -! - enddo - - ! wait for rest of receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - enddo - -! (2) handshaking + nonblocking sends - elseif ((handshake) .and. (.not. sendd)) then - - ! Initialize hs variable - hs = 1 - - ! Post initial handshake receive requests - do istep=1,maxreq - p = swapids(istep) - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MP_INTEGER, p, tag, comm, & - hs_rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - enddo - - ! Post initial receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - call mpi_send ( hs, 1, MP_INTEGER, p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new irsend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_wait ( hs_rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - - call mpi_irsend( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, sndids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRSEND',ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - - ! Submit a new handshake irecv request - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MP_INTEGER, p, tag, comm, & - hs_rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - - ! Submit a new irecv request - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - - call mpi_send ( hs, 1, MP_INTEGER, p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - endif - - ! Wait for outstanding i(r)send request to complete - p = swapids(istep-maxreqh) - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - enddo - -! (3) no handshaking + blocking sends - elseif ((.not. handshake) .and. (sendd)) then - - ! Post receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new send request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_send( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_SEND',ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - ! Submit a new irecv request - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - enddo - -! (4) no handshaking + nonblocking sends - elseif ((.not. handshake) .and. (.not. sendd)) then - - ! Post receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new isend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_isend( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, sndids(istep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_ISEND',ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - ! Submit a new irecv request - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_IRECV',ier) - endif - endif - - ! Wait for outstanding i(r)send request to complete - p = swapids(istep-maxreqh) - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep-maxreqh), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep), status, ier ) - if(ier /= 0) call MP_perr_die(subName,'MPI_WAIT',ier) - endif - enddo - - endif - - return - - end subroutine m_swapm_FP - -end module m_SPMDutils - - - - - diff --git a/src/externals/mct/mct/m_SparseMatrix.F90 b/src/externals/mct/mct/m_SparseMatrix.F90 deleted file mode 100644 index 29716c5fd41..00000000000 --- a/src/externals/mct/mct/m_SparseMatrix.F90 +++ /dev/null @@ -1,2767 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SparseMatrix -- Sparse Matrix Object -! -! !DESCRIPTION: -! The {\tt SparseMatrix} data type is MCT's object for storing sparse -! matrices. In MCT, intergrid interpolation is implemented as a sparse -! matrix-vector multiplication, with the {\tt AttrVect} type playing the -! roles of the input and output vectors. The interpolation matrices tend -! to be {\em extremely} sparse. For ${\bf x} \in \Re^{N_x}$, and -! ${\bf y} \in \Re^{N_y}$, the interpolation matrix {\bf M} used to effect -! ${\bf y} = {\bf M} {\bf x}$ will typically have ${\cal O}({N_y})$ -! non-zero elements. For that reason, the {\tt SparseMatrix} type -! stores {\em only} information about non-zero matrix elements, along -! with the number of rows and columns in the full matrix. The nonzero -! matrix elements are stored in {\tt AttrVect} form (see the module -! {\tt m\_AttrVect} for more details), and the set of attributes are -! listed below: -! -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|l|l|l|} -!\hline -!{\bf Attribute Name} & {\bf Significance} & {\tt Type} \\ -!\hline -!{\tt grow} & Global Row Index & {\tt INTEGER} \\ -!\hline -!{\tt gcol} & Global Column Index & {\tt INTEGER} \\ -!\hline -!{\tt lrow} & Local Row Index & {\tt INTEGER} \\ -!\hline -!{\tt lcol} & Local Column Index & {\tt INTEGER} \\ -!\hline -!{\tt weight} & Matrix Element ${M_{ij}}$ & {\tt REAL} \\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! The provision of both local and global column and row indices is -! made because this datatype can be used in either shared-memory or -! distributed-memory parallel matrix-vector products. -! -! This module contains the definition of the {\tt SparseMatrix} type, -! creation and destruction methods, a variety of accessor methods, -! routines for testing the suitability of the matrix for interpolation -! (i.e. the sum of each row is either zero or unity), and methods for -! sorting and permuting matrix entries. -! -! For better performance of the Matrix-Vector multiply on vector -! architectures, the {\tt SparseMatrix} object also contains arrays -! for holding the sparse matrix data in a more vector-friendly form. -! -! -! !INTERFACE: - - module m_SparseMatrix -! -! !USES: -! - use m_realkinds, only : FP - use m_AttrVect, only : AttrVect - - - private ! except - -! !PUBLIC TYPES: - - public :: SparseMatrix ! The class data structure - - Type SparseMatrix -#ifdef SEQUENCE - sequence -#endif - integer :: nrows - integer :: ncols - type(AttrVect) :: data - logical :: vecinit ! additional data for the vectorized sMat - integer,dimension(:),pointer :: row_s, row_e - integer, dimension(:,:), pointer :: tcol - real(FP), dimension(:,:), pointer :: twgt - integer :: row_max, row_min - integer :: tbl_end - End Type SparseMatrix - -! !PUBLIC MEMBER FUNCTIONS: - - public :: init ! Create a SparseMatrix - public :: vecinit ! Initialize the vector parts - public :: clean ! Destroy a SparseMatrix - public :: lsize ! Local number of elements - public :: indexIA ! Index integer attribute - public :: indexRA ! Index real attribute - public :: nRows ! Total number of rows - public :: nCols ! Total number of columns - - public :: exportGlobalRowIndices ! Return global row indices - ! for matrix elements - public :: exportGlobalColumnIndices ! Return global column indices - ! for matrix elements - public :: exportLocalRowIndices ! Return local row indices - ! for matrix elements - public :: exportLocalColumnIndices ! Return local column indices - ! for matrix elements - public :: exportMatrixElements ! Return matrix elements - - public :: importGlobalRowIndices ! Set global row indices - ! using - public :: importGlobalColumnIndices ! Return global column indices - ! for matrix elements - public :: importLocalRowIndices ! Return local row indices - ! for matrix elements - public :: importLocalColumnIndices ! Return local column indices - ! for matrix elements - public :: importMatrixElements ! Return matrix elements - public :: Copy ! Copy a SparseMatrix - - public :: GlobalNumElements ! Total number of nonzero elements - public :: ComputeSparsity ! Fraction of matrix that is nonzero - public :: local_row_range ! Local (on-process) row range - public :: global_row_range ! Local (on-process) row range - public :: local_col_range ! Local (on-process) column range - public :: global_col_range ! Local (on-process) column range - public :: CheckBounds ! Check row and column values - ! for out-of-bounds values - public :: row_sum ! Return SparseMatrix row sums - public :: row_sum_check ! Check SparseMatrix row sums against - ! input "valid" values - public :: Sort ! Sort matrix entries to generate an - ! index permutation (to be used by - ! Permute() - public :: Permute ! Permute matrix entries using index - ! permutation gernerated by Sort() - public :: SortPermute ! Sort/Permute matrix entries - - interface init ; module procedure init_ ; end interface - interface vecinit ; module procedure vecinit_ ; end interface - interface clean ; module procedure clean_ ; end interface - interface lsize ; module procedure lsize_ ; end interface - interface indexIA ; module procedure indexIA_ ; end interface - interface indexRA ; module procedure indexRA_ ; end interface - interface nRows ; module procedure nRows_ ; end interface - interface nCols ; module procedure nCols_ ; end interface - - interface exportGlobalRowIndices ; module procedure & - exportGlobalRowIndices_ - end interface - - interface exportGlobalColumnIndices ; module procedure & - exportGlobalColumnIndices_ - end interface - - interface exportLocalRowIndices ; module procedure & - exportLocalRowIndices_ - end interface - - interface exportLocalColumnIndices ; module procedure & - exportLocalColumnIndices_ - end interface - - interface exportMatrixElements ; module procedure & - exportMatrixElementsSP_, & - exportMatrixElementsDP_ - end interface - - interface importGlobalRowIndices ; module procedure & - importGlobalRowIndices_ - end interface - - interface importGlobalColumnIndices ; module procedure & - importGlobalColumnIndices_ - end interface - - interface importLocalRowIndices ; module procedure & - importLocalRowIndices_ - end interface - - interface importLocalColumnIndices ; module procedure & - importLocalColumnIndices_ - end interface - - interface importMatrixElements ; module procedure & - importMatrixElementsSP_, & - importMatrixElementsDP_ - end interface - - interface Copy ; module procedure Copy_ ; end interface - - interface GlobalNumElements ; module procedure & - GlobalNumElements_ - end interface - - interface ComputeSparsity ; module procedure & - ComputeSparsitySP_, & - ComputeSparsityDP_ - end interface - - interface local_row_range ; module procedure & - local_row_range_ - end interface - - interface global_row_range ; module procedure & - global_row_range_ - end interface - - interface local_col_range ; module procedure & - local_col_range_ - end interface - - interface global_col_range ; module procedure & - global_col_range_ - end interface - - interface CheckBounds; module procedure & - CheckBounds_ - end interface - - interface row_sum ; module procedure & - row_sumSP_, & - row_sumDP_ - end interface - - interface row_sum_check ; module procedure & - row_sum_checkSP_, & - row_sum_checkDP_ - end interface - - interface Sort ; module procedure Sort_ ; end interface - interface Permute ; module procedure Permute_ ; end interface - interface SortPermute ; module procedure SortPermute_ ; end interface - -! !REVISION HISTORY: -! 19Sep00 - J.W. Larson - initial prototype -! 15Jan01 - J.W. Larson - added numerous APIs -! 25Feb01 - J.W. Larson - changed from row/column -! attributes to global and local row and column attributes -! 23Apr01 - J.W. Larson - added number of rows -! and columns to the SparseMatrix type. This means the -! SparseMatrix is no longer a straight AttrVect type. This -! also made necessary the addition of lsize(), indexIA(), -! and indexRA(). -! 29Oct03 - R. Jacob - extend the SparseMatrix type -! to include mods from Fujitsu for a vector-friendly MatVecMul -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_SparseMatrix' - -! SparseMatrix_iList components: - character(len=*),parameter :: SparseMatrix_iList='grow:gcol:lrow:lcol' - integer,parameter :: SparseMatrix_igrow=1 - integer,parameter :: SparseMatrix_igcol=2 - integer,parameter :: SparseMatrix_ilrow=3 - integer,parameter :: SparseMatrix_ilcol=4 - -! SparseMatrix_rList components: - character(len=*),parameter :: SparseMatrix_rList='weight' - integer,parameter :: SparseMatrix_iweight=1 - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_ - Initialize an Empty SparseMatrix -! -! !DESCRIPTION: This routine creates the storage space for the -! entries of a {\tt SparseMatrix}, and sets the number of rows and -! columns in it. The input {\tt INTEGER} arguments {\tt nrows} and -! {\tt ncols} specify the number of rows and columns respectively. -! The optional input argument {\tt lsize} specifies the number of -! nonzero entries in the {\tt SparseMatrix}. The initialized -! {\tt SparseMatrix} is returned in the output argument {\tt sMat}. -! -! {\bf N.B.}: This routine is allocating dynamical memory in the form -! of a {\tt SparseMatrix}. The user must deallocate this space when -! the {\tt SparseMatrix} is no longer needed by invoking the routine -! {\tt clean\_()}. -! -! !INTERFACE: - - subroutine init_(sMat, nrows, ncols, lsize) -! -! !USES: -! - use m_AttrVect, only : AttrVect_init => init - use m_die - - implicit none - -! !INPUT PARAMETERS: - - integer, intent(in) :: nrows - integer, intent(in) :: ncols - integer, optional, intent(in) :: lsize - -! !OUTPUT PARAMETERS: - - type(SparseMatrix), intent(out) :: sMat - -! !REVISION HISTORY: -! 19Sep00 - Jay Larson - initial prototype -! 23Apr01 - Jay Larson - added arguments -! nrows and ncols--number of rows and columns in the -! SparseMatrix -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::init_' - - integer :: n - - ! if lsize is present, use it to set n; if not, set n=0 - - n = 0 - if(present(lsize)) n=lsize - - ! Initialize number of rows and columns: - - sMat%nrows = nrows - sMat%ncols = ncols - - ! Initialize sMat%data using AttrVect_init - - call AttrVect_init(sMat%data, SparseMatrix_iList, & - SparseMatrix_rList, n) - - ! vecinit is off by default - sMat%vecinit = .FALSE. - - end subroutine init_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: vecinit_ - Initialize vector parts of a SparseMatrix -! -! !DESCRIPTION: This routine creates the storage space for -! and intializes the vector parts of a {\tt SparseMatrix}. -! -! {\bf N.B.}: This routine assumes the locally indexed parts of a -! {\tt SparseMatrix} have been initialized. This is -! accomplished by either importing the values directly with -! {\tt importLocalRowIndices} and {\tt importLocalColIndices} or by -! importing the Global Row and Col Indices and making two calls to -! {\tt GlobalToLocalMatrix}. -! -! {\bf N.B.}: The vector portion can use a large amount of -! memory so it is highly recommended that this routine only -! be called on a {\tt SparseMatrix} that has been scattered -! or otherwise sized locally. -! -! !INTERFACE: - - subroutine vecinit_(sMat) -! -! !USES: -! - use m_die - use m_stdio - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !REVISION HISTORY: -! 27Oct03 - R. Jacob - initial version -! using code provided by Yoshi et. al. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::vecinit_' - - integer :: irow,icol,iwgt - integer :: num_elements - integer :: row,col - integer :: ier,l,n - integer, dimension(:) , allocatable :: nr, rn - - if(sMat%vecinit) then - write(stderr,'(2a)') myname_, & - 'MCTERROR: sMat vector parts have already been initialized...Continuing' - RETURN - endif - - write(6,*) myname_,'Initializing vecMat' - irow = indexIA_(sMat,'lrow',dieWith=myname_) - icol = indexIA_(sMat,'lcol',dieWith=myname_) - iwgt = indexRA_(sMat,'weight',dieWith=myname_) - - num_elements = lsize_(sMat) - - sMat%row_min = sMat%data%iAttr(irow,1) - sMat%row_max = sMat%row_min - do n=1,num_elements - row = sMat%data%iAttr(irow,n) - if ( row > sMat%row_max ) sMat%row_max = row - if ( row < sMat%row_min ) sMat%row_min = row - enddo - - allocate( nr(sMat%row_max), rn(num_elements), stat=ier) - if(ier/=0) call die(myname_,'allocate(nr,rn)',ier) - - sMat%tbl_end = 0 - nr(:) = 0 - do n=1,num_elements - row = sMat%data%iAttr(irow,n) - nr(row) = nr(row)+1 - rn(n) = nr(row) - enddo - sMat%tbl_end = maxval(rn) - - allocate( sMat%tcol(sMat%row_max,sMat%tbl_end), & - sMat%twgt(sMat%row_max,sMat%tbl_end), stat=ier ) - if(ier/=0) call die(myname_,'allocate(tcol,twgt)',ier) - -!CDIR COLLAPSE - sMat%tcol(:,:) = -1 - do n=1,num_elements - row = sMat%data%iAttr(irow,n) - sMat%tcol(row,rn(n)) = sMat%data%iAttr(icol,n) - sMat%twgt(row,rn(n)) = sMat%data%rAttr(iwgt,n) - enddo - - allocate( sMat%row_s(sMat%tbl_end) , sMat%row_e(sMat%tbl_end), & - stat=ier ) - if(ier/=0) call die(myname_,'allocate(row_s,row_e',ier) - sMat%row_s = sMat%row_min - sMat%row_e = sMat%row_max - do l=1,sMat%tbl_end - do n=sMat%row_min,sMat%row_max - if (nr(n) >= l) then - sMat%row_s(l) = n - exit - endif - enddo - do n = sMat%row_max,sMat%row_min,-1 - if (nr(n) >= l) then - sMat%row_e(l) = n - exit - endif - enddo - enddo - - deallocate(nr,rn, stat=ier) - if(ier/=0) call die(myname_,'deallocate()',ier) - - sMat%vecinit = .TRUE. - - end subroutine vecinit_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Destroy a SparseMatrix. -! -! !DESCRIPTION: This routine deallocates dynamical memory held by the -! input {\tt SparseMatrix} argument {\tt sMat}. It also sets the number -! of rows and columns in the {\tt SparseMatrix} to zero. -! -! !INTERFACE: - - subroutine clean_(sMat,stat) -! -! !USES: -! - use m_AttrVect,only : AttrVect_clean => clean - use m_die - - implicit none - -! !INPUT/OUTPTU PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !OUTPUT PARAMETERS: - - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 19Sep00 - J.W. Larson - initial prototype -! 23Apr00 - J.W. Larson - added changes to -! accomodate clearing nrows and ncols. -! 01Mar02 - E.T. Ong Added stat argument. -! 03Oct03 - R. Jacob - clean vector parts -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - ! Deallocate memory held by sMat: - - if(present(stat)) then - call AttrVect_clean(sMat%data,stat) - else - call AttrVect_clean(sMat%data) - endif - - ! Set the number of rows and columns in sMat to zero: - - sMat%nrows = 0 - sMat%ncols = 0 - - if(sMat%vecinit) then - sMat%row_max = 0 - sMat%row_min = 0 - sMat%tbl_end = 0 - deallocate(sMat%row_s,sMat%row_e,stat=ier) - if(ier/=0) then - if(present(stat)) then - stat=ier - else - call warn(myname_,'deallocate(row_s,row_e)',ier) - endif - endif - - deallocate(sMat%tcol,sMat%twgt,stat=ier) - if(ier/=0) then - if(present(stat)) then - stat=ier - else - call warn(myname_,'deallocate(tcol,twgt)',ier) - endif - endif - sMat%vecinit = .FALSE. - endif - - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: lsize_ - Local Number Non-zero Elements -! -! !DESCRIPTION: This {\tt INTEGER} function reports on-processor storage -! of the number of nonzero elements in the input {\tt SparseMatrix} -! argument {\tt sMat}. -! -! !INTERFACE: - - integer function lsize_(sMat) -! -! !USES: -! - use m_AttrVect,only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !REVISION HISTORY: -! 23Apr00 - J.W. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::lsize_' - - lsize_ = AttrVect_lsize(sMat%data) - - end function lsize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GlobalNumElements_ - Global Number of Non-zero Elements -! -! !DESCRIPTION: This routine computes the number of nonzero elements -! in a distributed {\tt SparseMatrix} variable {\tt sMat}. The input -! {\tt SparseMatrix} argument {\tt sMat} is examined on each process -! to determine the number of nonzero elements it holds, and this value -! is summed across the communicator associated with the input -! {\tt INTEGER} handle {\tt comm}, with the total returned {\em on each -! process on the communicator}. -! -! !INTERFACE: - - integer function GlobalNumElements_(sMat, comm) - -! -! !USES: -! - use m_die - use m_mpif90 - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, optional, intent(in) :: comm - -! !REVISION HISTORY: -! 24Apr01 - Jay Larson - New routine. -! -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//':: GlobalNumElements_' - - integer :: MyNumElements, GNumElements, ierr - - ! Determine the number of locally held nonzero elements: - - MyNumElements = lsize_(sMat) - - call MPI_ALLREDUCE(MyNumElements, GNumElements, 1, MP_INTEGER, & - MP_SUM, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_ALLREDUCE(MyNumElements...",ierr) - endif - - GlobalNumElements_ = GNumElements - - end function GlobalNumElements_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexIA_ - Index an Integer Attribute -! -! !DESCRIPTION: This {\tt INTEGER} function reports the row index -! for a given {\tt INTEGER} attribute of the input {\tt SparseMatrix} -! argument {\tt sMat}. The attribute requested is represented by the -! input {\tt CHARACTER} variable {\tt attribute}. The list of integer -! attributes one can request is defined in the description block of the -! header of this module ({\tt m\_SparseMatrix}). -! -! Here is how {\tt indexIA\_} provides access to integer attribute data -! in a {\tt SparseMatrix} variable {\tt sMat}. Suppose we wish to access -! global row information. This attribute has associated with it the -! string tag {\tt grow}. The corresponding index returned ({\tt igrow}) -! is determined by invoking {\tt indexIA\_}: -! \begin{verbatim} -! igrow = indexIA_(sMat, 'grow') -! \end{verbatim} -! -! Access to the global row index data in {\tt sMat} is thus obtained by -! referencing {\tt sMat\%data\%iAttr(igrow,:)}. -! -! -! !INTERFACE: - - integer function indexIA_(sMat, item, perrWith, dieWith) -! -! !USES: -! - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_TraceBack, only : GenTraceBackString - - use m_AttrVect,only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - character(len=*), intent(in) :: item - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !REVISION HISTORY: -! 23Apr00 - J.W. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::indexIA_' - type(String) :: myTrace - - ! Generate a traceback String - - if(present(dieWith)) then - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then - call GenTraceBackString(myTrace, perrWith, myname_) - else - call GenTraceBackString(myTrace, myname_) - endif - endif - - ! Call AttrVect_indexIA() accordingly: - - if( present(dieWith) .or. & - ((.not. present(dieWith)) .and. (.not. present(perrWith))) ) then - indexIA_ = AttrVect_indexIA(sMat%data, item, & - dieWith=String_ToChar(myTrace)) - else ! perrWith but no dieWith case - indexIA_ = AttrVect_indexIA(sMat%data, item, & - perrWith=String_ToChar(myTrace)) - endif - - call String_clean(myTrace) - - end function indexIA_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexRA_ - Index a Real Attribute -! -! !DESCRIPTION: This {\tt INTEGER} function reports the row index -! for a given {\tt REAL} attribute of the input {\tt SparseMatrix} -! argument {\tt sMat}. The attribute requested is represented by the -! input {\tt CHARACTER} variable {\tt attribute}. The list of real -! attributes one can request is defined in the description block of the -! header of this module ({\tt m\_SparseMatrix}). -! -! Here is how {\tt indexRA\_} provides access to integer attribute data -! in a {\tt SparseMatrix} variable {\tt sMat}. Suppose we wish to access -! matrix element values. This attribute has associated with it the -! string tag {\tt weight}. The corresponding index returned ({\tt iweight}) -! is determined by invoking {\tt indexRA\_}: -! \begin{verbatim} -! iweight = indexRA_(sMat, 'weight') -! \end{verbatim} -! -! Access to the matrix element data in {\tt sMat} is thus obtained by -! referencing {\tt sMat\%data\%rAttr(iweight,:)}. -! -! !INTERFACE: - - integer function indexRA_(sMat, item, perrWith, dieWith) -! -! !USES: -! - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String_ToChar => ToChar - - use m_TraceBack, only : GenTraceBackString - - use m_AttrVect,only : AttrVect_indexRA => indexRA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - character(len=*), intent(in) :: item - character(len=*), optional, intent(in) :: perrWith - character(len=*), optional, intent(in) :: dieWith - -! !REVISION HISTORY: -! 24Apr00 - J.W. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::indexRA_' - - type(String) :: myTrace - - ! Generate a traceback String - - if(present(dieWith)) then ! append myname_ onto dieWith - call GenTraceBackString(myTrace, dieWith, myname_) - else - if(present(perrWith)) then ! append myname_ onto perrwith - call GenTraceBackString(myTrace, perrWith, myname_) - else ! Start a TraceBack String - call GenTraceBackString(myTrace, myname_) - endif - endif - - ! Call AttrVect_indexRA() accordingly: - - if( present(dieWith) .or. & - ((.not. present(dieWith)) .and. (.not. present(perrWith))) ) then - indexRA_ = AttrVect_indexRA(sMat%data, item, & - dieWith=String_ToChar(myTrace)) - else ! perrWith but no dieWith case - indexRA_ = AttrVect_indexRA(sMat%data, item, & - perrWith=String_ToChar(myTrace)) - endif - - call String_clean(myTrace) - - end function indexRA_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nRows_ - Return the Number of Rows -! -! !DESCRIPTION: This routine returns the {\em total} number of rows -! in the input {\tt SparseMatrix} argument {\tt sMat}. This number of -! rows is a constant, and not dependent on the decomposition of the -! {\tt SparseMatrix}. -! -! !INTERFACE: - - integer function nRows_(sMat) -! -! !USES: -! - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !REVISION HISTORY: -! 19Apr01 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nRows_' - - nRows_ = sMat%nrows - - end function nRows_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nCols_ - Return the Number of Columns -! -! !DESCRIPTION: This routine returns the {\em total} number of columns -! in the input {\tt SparseMatrix} argument {\tt sMat}. This number of -! columns is a constant, and not dependent on the decomposition of the -! {\tt SparseMatrix}. -! -! !INTERFACE: - - integer function nCols_(sMat) -! -! !USES: -! - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !REVISION HISTORY: -! 19Apr01 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nCols_' - - nCols_ = sMat%ncols - - end function nCols_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportGlobalRowIndices_ - Return Global Row Indices -! -! !DESCRIPTION: -! This routine extracts from the input {\tt SparseMatrix} argument -! {\tt sMat} its global row indices, and returns them in the {\tt INTEGER} -! output array {\tt GlobalRows}, and its length in the output {\tt INTEGER} -! argument {\tt length}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt GlobalRows} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt GlobalRows}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) at the time this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt GlobalRows}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportGlobalRowIndices_(sMat, GlobalRows, length) -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_exportIAttr => exportIAttr - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: GlobalRows - integer, optional, intent(out) :: length - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial version. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportGlobalRowIndices_' - - ! Export the data (inheritance from AttrVect) - if(present(length)) then - call AttrVect_exportIAttr(sMat%data, 'grow', GlobalRows, length) - else - call AttrVect_exportIAttr(sMat%data, 'grow', GlobalRows) - endif - - end subroutine exportGlobalRowIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportGlobalColumnIndices_ - Return Global Column Indices -! -! !DESCRIPTION: -! This routine extracts from the input {\tt SparseMatrix} argument -! {\tt sMat} its global column indices, and returns them in the {\tt INTEGER} -! output array {\tt GlobalColumns}, and its length in the output {\tt INTEGER} -! argument {\tt length}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt GlobalColumns} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt GlobalColumns}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) at the time this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt GlobalColumns}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportGlobalColumnIndices_(sMat, GlobalColumns, length) - -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_exportIAttr => exportIAttr - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: GlobalColumns - integer, optional, intent(out) :: length - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial version. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportGlobalColumnIndices_' - - ! Export the data (inheritance from AttrVect) - if(present(length)) then - call AttrVect_exportIAttr(sMat%data, 'gcol', GlobalColumns, length) - else - call AttrVect_exportIAttr(sMat%data, 'gcol', GlobalColumns) - endif - - end subroutine exportGlobalColumnIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportLocalRowIndices_ - Return Local Row Indices -! -! !DESCRIPTION: -! This routine extracts from the input {\tt SparseMatrix} argument -! {\tt sMat} its local row indices, and returns them in the {\tt INTEGER} -! output array {\tt LocalRows}, and its length in the output {\tt INTEGER} -! argument {\tt length}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt LocalRows} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt LocalRows}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) at the time this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt LocalRows}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportLocalRowIndices_(sMat, LocalRows, length) -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_exportIAttr => exportIAttr - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: LocalRows - integer, optional, intent(out) :: length - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial version. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportLocalRowIndices_' - - ! Export the data (inheritance from AttrVect) - if(present(length)) then - call AttrVect_exportIAttr(sMat%data, 'lrow', LocalRows, length) - else - call AttrVect_exportIAttr(sMat%data, 'lrow', LocalRows) - endif - - end subroutine exportLocalRowIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportLocalColumnIndices_ - Return Local Column Indices -! -! !DESCRIPTION: -! This routine extracts from the input {\tt SparseMatrix} argument -! {\tt sMat} its local column indices, and returns them in the {\tt INTEGER} -! output array {\tt LocalColumns}, and its length in the output {\tt INTEGER} -! argument {\tt length}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt LocalColumns} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt LocalColumns}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) at the time this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt LocalColumns}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! !INTERFACE: - - subroutine exportLocalColumnIndices_(sMat, LocalColumns, length) - -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_exportIAttr => exportIAttr - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: LocalColumns - integer, optional, intent(out) :: length - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial version. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportLocalColumnIndices_' - - ! Export the data (inheritance from AttrVect) - if(present(length)) then - call AttrVect_exportIAttr(sMat%data, 'lcol', LocalColumns, length) - else - call AttrVect_exportIAttr(sMat%data, 'lcol', LocalColumns) - endif - - end subroutine exportLocalColumnIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportMatrixElementsSP_ - Return Matrix Elements as Array -! -! !DESCRIPTION: -! This routine extracts the matrix elements from the input {\tt SparseMatrix} -! argument {\tt sMat}, and returns them in the {\tt REAL} output array -! {\tt MatrixElements}, and its length in the output {\tt INTEGER} -! argument {\tt length}. -! -! {\bf N.B.:} The flexibility of this routine regarding the pointer -! association status of the output argument {\tt MatrixElements} means the -! user must invoke this routine with care. If the user wishes this -! routine to fill a pre-allocated array, then obviously this array -! must be allocated prior to calling this routine. If the user wishes -! that the routine {\em create} the output argument array {\tt MatrixElements}, -! then the user must ensure this pointer is not allocated (i.e. the user -! must nullify this pointer) at the time this routine is invoked. -! -! {\bf N.B.:} If the user has relied on this routine to allocate memory -! associated with the pointer {\tt MatrixElements}, then the user is responsible -! for deallocating this array once it is no longer needed. Failure to -! do so will result in a memory leak. -! -! The native precision version is described here. A double precision version -! is also available. -! -! !INTERFACE: - - subroutine exportMatrixelementsSP_(sMat, MatrixElements, length) - -! -! !USES: -! - use m_die - use m_stdio - use m_realkinds, only : SP - - use m_AttrVect, only : AttrVect_exportRAttr => exportRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - real(SP), dimension(:), pointer :: MatrixElements - integer, optional, intent(out) :: length - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial version. -! 6Jan04 - R. Jacob - SP and DP versions -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportMatrixElementsSP_' - - ! Export the data (inheritance from AttrVect) - if(present(length)) then - call AttrVect_exportRAttr(sMat%data, 'weight', MatrixElements, length) - else - call AttrVect_exportRAttr(sMat%data, 'weight', MatrixElements) - endif - - end subroutine exportMatrixElementsSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ------------------------------------------------------------------- -! -! !IROUTINE: exportMatrixElementsDP_ - Return Matrix Elements as Array -! -! !DESCRIPTION: -! Double precision version of exportMatrixElementsSP_ -! -! !INTERFACE: - - subroutine exportMatrixelementsDP_(sMat, MatrixElements, length) - -! -! !USES: -! - use m_die - use m_stdio - use m_realkinds, only : DP - - use m_AttrVect, only : AttrVect_exportRAttr => exportRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - real(DP), dimension(:), pointer :: MatrixElements - integer, optional, intent(out) :: length - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial version. -! -! ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportMatrixElementsDP_' - - ! Export the data (inheritance from AttrVect) - if(present(length)) then - call AttrVect_exportRAttr(sMat%data, 'weight', MatrixElements, length) - else - call AttrVect_exportRAttr(sMat%data, 'weight', MatrixElements) - endif - - end subroutine exportMatrixElementsDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importGlobalRowIndices_ - Set Global Row Indices of Elements -! -! !DESCRIPTION: -! This routine imports global row index data into the {\tt SparseMatrix} -! argument {\tt sMat}. The user provides the index data in the input -! {\tt INTEGER} vector {\tt inVect}. The input {\tt INTEGER} argument -! {\tt lsize} is used as a consistencey check to ensure the user is -! sufficient space in the {\tt SparseMatrix} to store the data. -! -! !INTERFACE: - - subroutine importGlobalRowIndices_(sMat, inVect, lsize) - -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_importIAttr => importIAttr - - implicit none - -! !INPUT PARAMETERS: - - integer, dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importGlobalRowIndices_' - - ! Argument Check: - - if(lsize > lsize_(sMat)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(sMat).', & - 'lsize = ',lsize,'lsize_(sMat) = ',lsize_(sMat) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importIAttr(sMat%data, 'grow', inVect, lsize) - - end subroutine importGlobalRowIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importGlobalColumnIndices_ - Set Global Column Indices of Elements -! -! !DESCRIPTION: -! This routine imports global column index data into the {\tt SparseMatrix} -! argument {\tt sMat}. The user provides the index data in the input -! {\tt INTEGER} vector {\tt inVect}. The input {\tt INTEGER} argument -! {\tt lsize} is used as a consistencey check to ensure the user is -! sufficient space in the {\tt SparseMatrix} to store the data. -! -! !INTERFACE: - - subroutine importGlobalColumnIndices_(sMat, inVect, lsize) - -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_importIAttr => importIAttr - - implicit none - -! !INPUT PARAMETERS: - - integer, dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importGlobalColumnIndices_' - - ! Argument Check: - - if(lsize > lsize_(sMat)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(sMat).', & - 'lsize = ',lsize,'lsize_(sMat) = ',lsize_(sMat) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importIAttr(sMat%data, 'gcol', inVect, lsize) - - end subroutine importGlobalColumnIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importLocalRowIndices_ - Set Local Row Indices of Elements -! -! !DESCRIPTION: -! This routine imports local row index data into the {\tt SparseMatrix} -! argument {\tt sMat}. The user provides the index data in the input -! {\tt INTEGER} vector {\tt inVect}. The input {\tt INTEGER} argument -! {\tt lsize} is used as a consistencey check to ensure the user is -! sufficient space in the {\tt SparseMatrix} to store the data. -! -! !INTERFACE: - - subroutine importLocalRowIndices_(sMat, inVect, lsize) - -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_importIAttr => importIAttr - - implicit none - -! !INPUT PARAMETERS: - - integer, dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importLocalRowIndices_' - - ! Argument Check: - - if(lsize > lsize_(sMat)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(sMat).', & - 'lsize = ',lsize,'lsize_(sMat) = ',lsize_(sMat) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importIAttr(sMat%data, 'lrow', inVect, lsize) - - end subroutine importLocalRowIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importLocalColumnIndices_ - Set Local Column Indices of Elements -! -! !DESCRIPTION: -! This routine imports local column index data into the {\tt SparseMatrix} -! argument {\tt sMat}. The user provides the index data in the input -! {\tt INTEGER} vector {\tt inVect}. The input {\tt INTEGER} argument -! {\tt lsize} is used as a consistencey check to ensure the user is -! sufficient space in the {\tt SparseMatrix} to store the data. -! -! !INTERFACE: - - subroutine importLocalColumnIndices_(sMat, inVect, lsize) - -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect_importIAttr => importIAttr - - implicit none - -! !INPUT PARAMETERS: - - integer, dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importLocalColumnIndices_' - - ! Argument Check: - - if(lsize > lsize_(sMat)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(sMat).', & - 'lsize = ',lsize,'lsize_(sMat) = ',lsize_(sMat) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importIAttr(sMat%data, 'lcol', inVect, lsize) - - end subroutine importLocalColumnIndices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: importMatrixElementsSP_ - Import Non-zero Matrix Elements -! -! !DESCRIPTION: -! This routine imports matrix elements index data into the -! {\tt SparseMatrix} argument {\tt sMat}. The user provides the index -! data in the input {\tt REAL} vector {\tt inVect}. The input -! {\tt INTEGER} argument {\tt lsize} is used as a consistencey check -! to ensure the user is sufficient space in the {\tt SparseMatrix} -! to store the data. -! -! !INTERFACE: - - subroutine importMatrixElementsSP_(sMat, inVect, lsize) - -! -! !USES: -! - use m_die - use m_stdio - use m_realkinds, only : SP - - use m_AttrVect, only : AttrVect_importRAttr => importRAttr - - implicit none - -! !INPUT PARAMETERS: - - real(SP), dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial prototype. -! 6Jan04 - R. Jacob - Make SP and DP versions. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importMatrixElementsSP_' - - ! Argument Check: - - if(lsize > lsize_(sMat)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(sMat).', & - 'lsize = ',lsize,'lsize_(sMat) = ',lsize_(sMat) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importRAttr(sMat%data, 'weight', inVect, lsize) - - end subroutine importMatrixElementsSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ------------------------------------------------------------------- -! -! !IROUTINE: importMatrixElementsDP_ - Import Non-zero Matrix Elements -! -! !DESCRIPTION: -! Double precision version of importMatrixElementsSP_ -! -! !INTERFACE: - - subroutine importMatrixElementsDP_(sMat, inVect, lsize) - -! -! !USES: -! - use m_die - use m_stdio - use m_realkinds, only : DP - - use m_AttrVect, only : AttrVect_importRAttr => importRAttr - - implicit none - -! !INPUT PARAMETERS: - - real(DP), dimension(:), pointer :: inVect - integer, intent(in) :: lsize - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !REVISION HISTORY: -! 7May02 - J.W. Larson - initial prototype. -! -! ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::importMatrixElementsDP_' - - ! Argument Check: - - if(lsize > lsize_(sMat)) then - write(stderr,*) myname_,':: ERROR, lsize > lsize_(sMat).', & - 'lsize = ',lsize,'lsize_(sMat) = ',lsize_(sMat) - call die(myname_) - endif - - ! Import the data (inheritance from AttrVect) - - call AttrVect_importRAttr(sMat%data, 'weight', inVect, lsize) - - end subroutine importMatrixElementsDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Copy_ - Create a Copy of an Input SparseMatrix -! -! !DESCRIPTION: -! This routine creates a copy of the input {\tt SparseMatrix} argument -! {\tt sMat}, returning it as the output {\tt SparseMatrix} argument -! {\tt sMatCopy}. -! -! {\bf N.B.:} The output argument {\tt sMatCopy} represents allocated -! memory the user must deallocate when it is no longer needed. The -! MCT routine to use for this purpose is {\tt clean()} from this module. -! -! !INTERFACE: - - subroutine Copy_(sMat, sMatCopy) - -! -! !USES: -! - use m_die - use m_stdio - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_Copy => Copy - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - type(SparseMatrix), intent(out) :: sMatCopy - -! !REVISION HISTORY: -! 27Sep02 - J.W. Larson - initial prototype. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Copy_' - - ! Step one: copy the integer components of sMat: - - sMatCopy%nrows = sMat%nrows - sMatCopy%ncols = sMat%ncols - - sMatCopy%vecinit = .FALSE. - - ! Step two: Initialize the AttrVect sMatCopy%data off of sMat: - - call AttrVect_init(sMatCopy%data, sMat%data, AttrVect_lsize(sMat%data)) - - ! Step three: Copy sMat%data to sMatCopy%data: - - call AttrVect_Copy(sMat%data, aVout=sMatCopy%data) - - if(sMat%vecinit) call vecinit_(sMatCopy) - - end subroutine Copy_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: local_row_range_ - Local Row Extent of Non-zero Elements -! -! !DESCRIPTION: This routine examines the input distributed -! {\tt SparseMatrix} variable {\tt sMat}, and returns the range of local -! row values having nonzero elements. The first local row with -! nonzero elements is returned in the {\tt INTEGER} argument -! {\tt start\_row}, the last row in {\tt end\_row}. -! -! !INTERFACE: - - subroutine local_row_range_(sMat, start_row, end_row) -! -! !USES: -! - use m_die - - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: start_row - integer, intent(out) :: end_row - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 25Feb01 - Jay Larson - Initial prototype. -! 23Apr01 - Jay Larson - Modified to accomodate -! changes to the SparseMatrix type. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::local_row_range_' - - integer :: i, ilrow, lsize - - ilrow = AttrVect_indexIA(sMat%data, 'lrow') - lsize = AttrVect_lsize(sMat%data) - - ! Initialize start_row and end_row: - - start_row = sMat%data%iAttr(ilrow,1) - end_row = sMat%data%iAttr(ilrow,1) - - do i=1,lsize - start_row = min(start_row, sMat%data%iAttr(ilrow,i)) - end_row = max(end_row, sMat%data%iAttr(ilrow,i)) - end do - - end subroutine local_row_range_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: global_row_range_ - Global Row Extent of Non-zero Elements -! -! !DESCRIPTION: This routine examines the input distributed -! {\tt SparseMatrix} variable {\tt sMat}, and returns the range of -! global row values having nonzero elements. The first local row with -! nonzero elements is returned in the {\tt INTEGER} argument -! {\tt start\_row}, the last row in {\tt end\_row}. -! -! !INTERFACE: - - subroutine global_row_range_(sMat, comm, start_row, end_row) -! -! !USES: -! - use m_die - - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: start_row - integer, intent(out) :: end_row - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 25Feb01 - Jay Larson - Initial prototype. -! 23Apr01 - Jay Larson - Modified to accomodate -! changes to the SparseMatrix type. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::global_row_range_' - - integer :: i, igrow, lsize - - igrow = AttrVect_indexIA(sMat%data, 'grow', dieWith=myname_) - lsize = AttrVect_lsize(sMat%data) - - ! Initialize start_row and end_row: - - start_row = sMat%data%iAttr(igrow,1) - end_row = sMat%data%iAttr(igrow,1) - - do i=1,lsize - start_row = min(start_row, sMat%data%iAttr(igrow,i)) - end_row = max(end_row, sMat%data%iAttr(igrow,i)) - end do - - end subroutine global_row_range_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: local_col_range_ - Local Column Extent of Non-zero Elements -! -! !DESCRIPTION: This routine examines the input distributed -! {\tt SparseMatrix} variable {\tt sMat}, and returns the range of -! local column values having nonzero elements. The first local column -! with nonzero elements is returned in the {\tt INTEGER} argument -! {\tt start\_col}, the last column in {\tt end\_col}. -! -! !INTERFACE: - - subroutine local_col_range_(sMat, start_col, end_col) -! -! !USES: -! - use m_die - - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: start_col - integer, intent(out) :: end_col - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 25Feb01 - Jay Larson - Initial prototype. -! 23Apr01 - Jay Larson - Modified to accomodate -! changes to the SparseMatrix type. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::local_col_range_' - - integer :: i, ilcol, lsize - - ilcol = AttrVect_indexIA(sMat%data, 'lcol') - lsize = AttrVect_lsize(sMat%data) - - ! Initialize start_col and end_col: - - start_col = sMat%data%iAttr(ilcol,1) - end_col = sMat%data%iAttr(ilcol,1) - - do i=1,lsize - start_col = min(start_col, sMat%data%iAttr(ilcol,i)) - end_col = max(end_col, sMat%data%iAttr(ilcol,i)) - end do - - end subroutine local_col_range_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: global_col_range_ - Global Column Extent of Non-zero Elements -! -! !DESCRIPTION: This routine examines the input distributed -! {\tt SparseMatrix} variable {\tt sMat}, and returns the range of -! global column values having nonzero elements. The first global -! column with nonzero elements is returned in the {\tt INTEGER} argument -! {\tt start\_col}, the last column in {\tt end\_col}. -! -! !INTERFACE: - - subroutine global_col_range_(sMat, comm, start_col, end_col) -! -! !USES: -! - use m_die - - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: start_col - integer, intent(out) :: end_col - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 25Feb01 - Jay Larson - Initial prototype. -! 23Apr01 - Jay Larson - Modified to accomodate -! changes to the SparseMatrix type. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::global_col_range_' - - integer :: i, igcol, lsize - - igcol = AttrVect_indexIA(sMat%data, 'gcol') - lsize = AttrVect_lsize(sMat%data) - - ! Initialize start_col and end_col: - - start_col = sMat%data%iAttr(igcol,1) - end_col = sMat%data%iAttr(igcol,1) - - do i=1,lsize - start_col = min(start_col, sMat%data%iAttr(igcol,i)) - end_col = max(end_col, sMat%data%iAttr(igcol,i)) - end do - - end subroutine global_col_range_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ComputeSparsitySP_ - Compute Matrix Sparsity -! -! !DESCRIPTION: This routine computes the sparsity of a consolidated -! (all on one process) or distributed {\tt SparseMatrix}. The input -! {\tt SparseMatrix} argument {\tt sMat} is examined to determine the -! number of nonzero elements it holds, and this value is divided by the -! product of the number of rows and columns in {\tt sMat}. If the -! optional input argument {\tt comm} is given, then the distributed -! elements are counted and the sparsity computed accordingly, and the -! resulting value of {\tt sparsity} is returned {\em to all processes}. -! -! Given the inherent problems with multiplying and dividing large integers, -! the work in this routine is performed using floating point arithmetic on -! the logarithms of the number of rows, columns, and nonzero elements. -! -! !INTERFACE: - - subroutine ComputeSparsitySP_(sMat, sparsity, comm) - -! -! !USES: -! - use m_die - use m_mpif90 - use m_realkinds, only : SP, FP - - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, optional, intent(in) :: comm - -! !OUTPUT PARAMETERS: - - real(SP), intent(out) :: sparsity - -! !REVISION HISTORY: -! 23Apr01 - Jay Larson - New routine. -! -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::ComputeSparsitySP_' - - integer :: num_elements, num_rows, num_cols - real(FP) :: Lnum_elements, Lnum_rows, Lnum_cols, LMySparsity - real(FP) :: MySparsity - integer :: ierr - - ! Extract number of nonzero elements and compute its logarithm - - num_elements = lsize_(sMat) - Lnum_elements = log(REAL(num_elements,FP)) - - ! Extract number of rows and compute its logarithm - - num_rows = nRows_(sMat) - Lnum_rows = log(REAL(num_rows,FP)) - - ! Extract number of columns and compute its logarithm - - num_cols = nCols_(sMat) - Lnum_cols = log(REAL(num_cols,FP)) - - ! Compute logarithm of the (local) sparsity - - LMySparsity = Lnum_elements - Lnum_rows - Lnum_cols - - ! Compute the (local) sparsity from its logarithm. - - MySparsity = exp(LMySparsity) - - ! If a communicator handle is present, sum up the - ! distributed sparsity values to all processes. If not, - ! return the value of MySparsity computed above. - - if(present(comm)) then - call MPI_ALLREDUCE(MySparsity, sparsity, 1, MP_INTEGER, & - MP_SUM, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_ALLREDUCE(MySparsity...",ierr) - endif - else - sparsity = MySparsity - endif - - end subroutine ComputeSparsitySP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: ComputeSparsityDP_ - Compute Matrix Sparsity -! -! !DESCRIPTION: -! Double precision version of ComputeSparsitySP_ -! -! !INTERFACE: - - subroutine ComputeSparsityDP_(sMat, sparsity, comm) - -! -! !USES: -! - use m_die - use m_mpif90 - use m_realkinds, only : DP, FP - - use m_AttrVect, only : AttrVect_lsize => lsize - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, optional, intent(in) :: comm - -! !OUTPUT PARAMETERS: - - real(DP), intent(out) :: sparsity - -! !REVISION HISTORY: -! 23Apr01 - Jay Larson - New routine. -! -! ______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::ComputeSparsityDP_' - - integer :: num_elements, num_rows, num_cols - real(FP) :: Lnum_elements, Lnum_rows, Lnum_cols, LMySparsity - real(FP) :: MySparsity - integer :: ierr - - ! Extract number of nonzero elements and compute its logarithm - - num_elements = lsize_(sMat) - Lnum_elements = log(REAL(num_elements,FP)) - - ! Extract number of rows and compute its logarithm - - num_rows = nRows_(sMat) - Lnum_rows = log(REAL(num_rows,FP)) - - ! Extract number of columns and compute its logarithm - - num_cols = nCols_(sMat) - Lnum_cols = log(REAL(num_cols,FP)) - - ! Compute logarithm of the (local) sparsity - - LMySparsity = Lnum_elements - Lnum_rows - Lnum_cols - - ! Compute the (local) sparsity from its logarithm. - - MySparsity = exp(LMySparsity) - - ! If a communicator handle is present, sum up the - ! distributed sparsity values to all processes. If not, - ! return the value of MySparsity computed above. - - if(present(comm)) then - call MPI_ALLREDUCE(MySparsity, sparsity, 1, MP_INTEGER, & - MP_SUM, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_ALLREDUCE(MySparsity...",ierr) - endif - else - sparsity = MySparsity - endif - - end subroutine ComputeSparsityDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: CheckBounds_ - Check for Out-of-Bounds Row/Column Values -! -! !DESCRIPTION: This routine examines the input distributed -! {\tt SparseMatrix} variable {\tt sMat}, and examines the global row -! and column index for each element, comparing them with the known -! maximum values for each (as returned by the routines {\tt nRows\_()} -! and {\tt nCols\_()}, respectively). If global row or column entries -! are non-positive, or greater than the defined maximum values, this -! routine stops execution with an error message. If no out-of-bounds -! values are detected, the output {\tt INTEGER} status {\tt ierror} is -! set to zero. -! -! !INTERFACE: - - subroutine CheckBounds_(sMat, ierror) -! -! !USES: -! - use m_die - - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_indexIA => indexIA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: ierror - -! !REVISION HISTORY: -! 24Apr01 - Jay Larson - Initial prototype. -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::CheckBounds_' - - integer :: MaxRow, MaxCol, NumElements - integer :: igrow, igcol - integer :: i - - ! Initially, set ierror to zero (success): - - ierror = 0 - - ! Query sMat to find the number of rows and columns: - - MaxRow = nRows_(sMat) - MaxCol = nCols_(sMat) - - ! Query sMat for the number of nonzero elements: - - NumElements = lsize_(sMat) - - ! Query sMat to index global row and column storage indices: - - igrow = indexIA_(sMat=sMat,item='grow',dieWith=myname_) - igcol = indexIA_(sMat=sMat,item='gcol',dieWith=myname_) - - ! Scan the entries of sMat for row or column elements that - ! are out-of-bounds. Here, out-of-bounds means: 1) non- - ! positive row or column indices; 2) row or column indices - ! exceeding the stated number of rows or columns. - - do i=1,NumElements - - ! Row index out of bounds? - - if((sMat%data%iAttr(igrow,i) > MaxRow) .or. & - (sMat%data%iAttr(igrow,i) <= 0)) then - ierror = 1 - call die(myname_,"Row index out of bounds",ierror) - endif - - ! Column index out of bounds? - - if((sMat%data%iAttr(igcol,i) > MaxCol) .or. & - (sMat%data%iAttr(igcol,i) <= 0)) then - ierror = 2 - call die(myname_,"Column index out of bounds",ierror) - endif - - end do - - end subroutine CheckBounds_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: row_sumSP_ - Sum Elements in Each Row -! -! !DESCRIPTION: -! Given an input {\tt SparseMatrix} argument {\tt sMat}, {\tt row\_sum\_()} -! returns the number of the rows {\tt num\_rows} in the sparse matrix and -! the sum of the elements in each row in the array {\tt sums}. The input -! argument {\tt comm} is the Fortran 90 MPI communicator handle used to -! determine the number of rows and perform the sums. The output arguments -! {\tt num\_rows} and {\tt sums} are valid on all processes. -! -! {\bf N.B.: } This routine allocates an array {\tt sums}. The user is -! responsible for deallocating this array when it is no longer needed. -! Failure to do so will cause a memory leak. -! -! !INTERFACE: - - subroutine row_sumSP_(sMat, num_rows, sums, comm) - -! -! !USES: -! - use m_die - use m_mpif90 - use m_realkinds, only : SP, FP - - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_indexIA => indexIA - use m_AttrVect, only : AttrVect_indexRA => indexRA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: num_rows - real(SP), dimension(:), pointer :: sums - - - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 25Jan01 - Jay Larson - Prototype code. -! 23Apr01 - Jay Larson - Modified to accomodate -! changes to the SparseMatrix type. -! 18May01 - R. Jacob - Use MP_TYPE function -! to set type in the mpi_allreduce -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::row_sumSP_' - - integer :: i, igrow, ierr, iwgt, lsize, myID - integer :: start_row, end_row - integer :: mp_Type_lsums - real(FP), dimension(:), allocatable :: lsums - real(FP), dimension(:), allocatable :: gsums - - ! Determine local rank - - call MP_COMM_RANK(comm, myID, ierr) - - ! Determine on each process the row of global row indices: - - call global_row_range_(sMat, comm, start_row, end_row) - - ! Determine across the communicator the _maximum_ value of - ! end_row, which will be assigned to num_rows on each process: - - call MPI_ALLREDUCE(end_row, num_rows, 1, MP_INTEGER, MP_MAX, & - comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_ALLREDUCE(end_row...",ierr) - endif - - ! Allocate storage for the sums on each process. - - allocate(lsums(num_rows), gsums(num_rows), sums(num_rows), stat=ierr) - - if(ierr /= 0) then - call die(myname_,"allocate(lsums(...",ierr) - endif - - ! Compute the local entries to lsum(1:num_rows) for each process: - - lsize = AttrVect_lsize(sMat%data) - igrow = AttrVect_indexIA(aV=sMat%data,item='grow',dieWith=myname_) - iwgt = AttrVect_indexRA(aV=sMat%data,item='weight',dieWith=myname_) - - lsums = 0._FP - do i=1,lsize - lsums(sMat%data%iAttr(igrow,i)) = lsums(sMat%data%iAttr(igrow,i)) + & - sMat%data%rAttr(iwgt,i) - end do - - ! Compute the global sum of the entries of lsums so that all - ! processes own the global sums. - - mp_Type_lsums=MP_Type(lsums) - call MPI_ALLREDUCE(lsums, gsums, num_rows, mp_Type_lsums, MP_SUM, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_ALLREDUCE(lsums...",ierr) - endif - - ! Copy our temporary array gsums into the output pointer sums - ! This was done so that lsums and gsums have the same precision (FP) - ! Precision conversion occurs here from FP to (SP or DP) - - sums = gsums - - ! Clean up... - - deallocate(lsums, gsums, stat=ierr) - if(ierr /= 0) then - call die(myname_,"deallocate(lsums...",ierr) - endif - - end subroutine row_sumSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: row_sumDP_ - Sum Elements in Each Row -! -! !DESCRIPTION: -! Double precision version of row_sumSP_ -! -! {\bf N.B.: } This routine allocates an array {\tt sums}. The user is -! responsible for deallocating this array when it is no longer needed. -! Failure to do so will cause a memory leak. -! -! !INTERFACE: - - subroutine row_sumDP_(sMat, num_rows, sums, comm) - -! -! !USES: -! - use m_die - use m_mpif90 - - use m_realkinds, only : DP, FP - - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_indexIA => indexIA - use m_AttrVect, only : AttrVect_indexRA => indexRA - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: - - integer, intent(out) :: num_rows - real(DP), dimension(:), pointer :: sums - - - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 25Jan01 - Jay Larson - Prototype code. -! 23Apr01 - Jay Larson - Modified to accomodate -! changes to the SparseMatrix type. -! 18May01 - R. Jacob - Use MP_TYPE function -! to set type in the mpi_allreduce -! ______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::row_sumDP_' - - integer :: i, igrow, ierr, iwgt, lsize, myID - integer :: start_row, end_row - integer :: mp_Type_lsums - real(FP), dimension(:), allocatable :: lsums - real(FP), dimension(:), allocatable :: gsums - - ! Determine local rank - - call MP_COMM_RANK(comm, myID, ierr) - - ! Determine on each process the row of global row indices: - - call global_row_range_(sMat, comm, start_row, end_row) - - ! Determine across the communicator the _maximum_ value of - ! end_row, which will be assigned to num_rows on each process: - - call MPI_ALLREDUCE(end_row, num_rows, 1, MP_INTEGER, MP_MAX, & - comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_ALLREDUCE(end_row...",ierr) - endif - - ! Allocate storage for the sums on each process. - - allocate(lsums(num_rows), gsums(num_rows), sums(num_rows), stat=ierr) - - if(ierr /= 0) then - call die(myname_,"allocate(lsums(...",ierr) - endif - - ! Compute the local entries to lsum(1:num_rows) for each process: - - lsize = AttrVect_lsize(sMat%data) - igrow = AttrVect_indexIA(aV=sMat%data,item='grow',dieWith=myname_) - iwgt = AttrVect_indexRA(aV=sMat%data,item='weight',dieWith=myname_) - - lsums = 0._FP - do i=1,lsize - lsums(sMat%data%iAttr(igrow,i)) = lsums(sMat%data%iAttr(igrow,i)) + & - sMat%data%rAttr(iwgt,i) - end do - - ! Compute the global sum of the entries of lsums so that all - ! processes own the global sums. - - mp_Type_lsums=MP_Type(lsums) - call MPI_ALLREDUCE(lsums, gsums, num_rows, mp_Type_lsums, MP_SUM, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_ALLREDUCE(lsums...",ierr) - endif - - ! Copy our temporary array gsums into the output pointer sums - ! This was done so that lsums and gsums have the same precision (FP) - ! Precision conversion occurs here from FP to (SP or DP) - - sums = gsums - - ! Clean up... - - deallocate(lsums, gsums, stat=ierr) - if(ierr /= 0) then - call die(myname_,"deallocate(lsums...",ierr) - endif - - end subroutine row_sumDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: row_sum_checkSP_ - Check Row Sums vs. Valid Values -! -! !DESCRIPTION: The routine {\tt row\_sum\_check()} sums the rows of -! the input distributed (across the communicator identified by {\tt comm}) -! {\tt SparseMatrix} variable {\tt sMat}. It then compares these sums -! with the {\tt num\_valid} input "valid" values stored in the array -! {\tt valid\_sums}. If all of the sums are within the absolute tolerence -! specified by the input argument {\tt abs\_tol} of any of the valid values, -! the output {\tt LOGICAL} flag {\tt valid} is set to {\tt .TRUE}. -! Otherwise, this flag is returned with value {\tt .FALSE}. -! -! !INTERFACE: - - subroutine row_sum_checkSP_(sMat, comm, num_valid, valid_sums, abs_tol, valid) - -! -! !USES: -! - use m_die - use m_realkinds, only : SP, FP - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, intent(in) :: comm - integer, intent(in) :: num_valid - real(SP), intent(in) :: valid_sums(num_valid) - real(SP), intent(in) :: abs_tol - -! !OUTPUT PARAMETERS: - - logical, intent(out) :: valid - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 25Feb01 - Jay Larson - Prototype code. -! 06Jan03 - R. Jacob - create DP and SP versions -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::row_sum_checkSP_' - - integer :: i, j, num_invalid, num_rows - real(FP), dimension(:), pointer :: sums - - ! Compute row sums: - - call row_sum(sMat, num_rows, sums, comm) - - ! Initialize for the scanning loop (assume the matrix row - ! sums are valid): - - valid = .TRUE. - i = 1 - - SCAN_LOOP: do - - ! Count the number of elements in valid_sums(:) that - ! are separated from sums(i) by more than abs_tol - - num_invalid = 0 - - do j=1,num_valid - if(abs(sums(i) - valid_sums(j)) > abs_tol) then - num_invalid = num_invalid + 1 - endif - end do - - ! If num_invalid = num_valid, then we have failed to - ! find a valid sum value within abs_tol of sums(i). This - ! one failure is enough to halt the process. - - if(num_invalid == num_valid) then - valid = .FALSE. - EXIT - endif - - ! Prepare index i for the next element of sums(:) - - i = i + 1 - if( i > num_rows) EXIT - - end do SCAN_LOOP - - end subroutine row_sum_checkSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: row_sum_checkDP_ - Check Row Sums vs. Valid Values -! -! !DESCRIPTION: -! Double precision version of row_sum_checkSP -! -! !INTERFACE: - - subroutine row_sum_checkDP_(sMat, comm, num_valid, valid_sums, abs_tol, valid) - -! -! !USES: -! - use m_die - use m_realkinds, only : DP, FP - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - integer, intent(in) :: comm - integer, intent(in) :: num_valid - real(DP), intent(in) :: valid_sums(num_valid) - real(DP), intent(in) :: abs_tol - -! !OUTPUT PARAMETERS: - - logical, intent(out) :: valid - -! !REVISION HISTORY: -! 15Jan01 - Jay Larson - API specification. -! 25Feb01 - Jay Larson - Prototype code. -! 06Jan03 - R. Jacob - create DP and SP versions -! ______________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::row_sum_checkDP_' - - integer :: i, j, num_invalid, num_rows - real(FP), dimension(:), pointer :: sums - - ! Compute row sums: - - call row_sum(sMat, num_rows, sums, comm) - - ! Initialize for the scanning loop (assume the matrix row - ! sums are valid): - - valid = .TRUE. - i = 1 - - SCAN_LOOP: do - - ! Count the number of elements in valid_sums(:) that - ! are separated from sums(i) by more than abs_tol - - num_invalid = 0 - - do j=1,num_valid - if(abs(sums(i) - valid_sums(j)) > abs_tol) then - num_invalid = num_invalid + 1 - endif - end do - - ! If num_invalid = num_valid, then we have failed to - ! find a valid sum value within abs_tol of sums(i). This - ! one failure is enough to halt the process. - - if(num_invalid == num_valid) then - valid = .FALSE. - EXIT - endif - - ! Prepare index i for the next element of sums(:) - - i = i + 1 - if( i > num_rows) EXIT - - end do SCAN_LOOP - - end subroutine row_sum_checkDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Sort_ - Generate Index Permutation -! -! !DESCRIPTION: -! The subroutine {\tt Sort\_()} uses a list of sorting keys defined by -! the input {\tt List} argument {\tt key\_list}, searches for the appropriate -! integer or real attributes referenced by the items in {\tt key\_list} -! ( that is, it identifies the appropriate entries in {sMat\%data\%iList} -! and {\tt sMat\%data\%rList}), and then uses these keys to generate an index -! permutation {\tt perm} that will put the nonzero matrix entries of stored -! in {\tt sMat\%data} in lexicographic order as defined by {\tt key\_ist} -! (the ordering in {\tt key\_list} being from left to right. The optional -! {\tt LOGICAL} array input argument {\tt descend} specifies whether or -! not to sort by each key in {\em descending} order or {\em ascending} -! order. Entries in {\tt descend} that have value {\tt .TRUE.} correspond -! to a sort by the corresponding key in descending order. If the argument -! {\tt descend} is not present, the sort is performed for all keys in -! ascending order. -! -! !INTERFACE: - - subroutine Sort_(sMat, key_list, perm, descend) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - use m_List , only : List - - use m_AttrVect, only: AttrVect_Sort => Sort - - implicit none -! -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - type(List), intent(in) :: key_list - logical, dimension(:), optional, intent(in) :: descend -! -! !OUTPUT PARAMETERS: - - integer, dimension(:), pointer :: perm - - -! !REVISION HISTORY: -! 24Apr01 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Sort_' - - if(present(descend)) then - call AttrVect_Sort(sMat%data, key_list, perm, descend) - else - call AttrVect_Sort(sMat%data, key_list, perm) - endif - - end Subroutine Sort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: Permute_ - Permute Matrix Elements using Supplied Index Permutation -! -! !DESCRIPTION: -! The subroutine {\tt Permute\_()} uses an input index permutation -! {\tt perm} to re-order the entries of the {\tt SparseMatrix} argument -! {\tt sMat}. The index permutation {\tt perm} is generated using the -! routine {\tt Sort\_()} (in this module). -! -! !INTERFACE: - - subroutine Permute_(sMat, perm) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - use m_AttrVect, only: AttrVect_Permute => Permute - - implicit none -! -! !INPUT PARAMETERS: - - - integer, dimension(:), pointer :: perm -! -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - - -! !REVISION HISTORY: -! 24Apr01 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::Permute_' - - call AttrVect_Permute(sMat%data, perm) - - end Subroutine Permute_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SortPermute_ - Sort and Permute Matrix Elements -! -! !DESCRIPTION: -! The subroutine {\tt SortPermute\_()} uses a list of sorting keys defined -! by the input {\tt List} argument {\tt key\_list}, searches for the -! appropriate integer or real attributes referenced by the items in -! {\tt key\_ist} ( that is, it identifies the appropriate entries in -! {sMat\%data\%iList} and {\tt sMat\%data\%rList}), and then uses these -! keys to generate an index permutation that will put the nonzero matrix -! entries of stored in {\tt sMat\%data} in lexicographic order as defined -! by {\tt key\_list} (the ordering in {\tt key\_list} being from left to -! right. The optional {\tt LOGICAL} array input argument {\tt descend} -! specifies whether or not to sort by each key in {\em descending} order -! or {\em ascending} order. Entries in {\tt descend} that have value -! {\tt .TRUE.} correspond to a sort by the corresponding key in descending -! order. If the argument {\tt descend} is not present, the sort is -! performed for all keys in ascending order. -! -! Once this index permutation is created, it is applied to re-order the -! entries of the {\tt SparseMatrix} argument {\tt sMat} accordingly. -! -! !INTERFACE: - - subroutine SortPermute_(sMat, key_list, descend) - -! -! !USES: -! - use m_die , only : die - use m_stdio , only : stderr - - use m_List , only : List - - implicit none -! -! !INPUT PARAMETERS: - - type(List), intent(in) :: key_list - logical, dimension(:), optional, intent(in) :: descend -! -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !REVISION HISTORY: -! 24Apr01 - J.W. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SortPermute_' - - integer :: ier - integer, dimension(:), pointer :: perm - - ! Create index permutation perm(:) - - if(present(descend)) then - call Sort_(sMat, key_list, perm, descend) - else - call Sort_(sMat, key_list, perm) - endif - - ! Apply index permutation perm(:) to re-order sMat: - - call Permute_(sMat, perm) - - ! Clean up - - deallocate(perm, stat=ier) - if(ier/=0) call die(myname_, "deallocate(perm)", ier) - - end subroutine SortPermute_ - - end module m_SparseMatrix - - - diff --git a/src/externals/mct/mct/m_SparseMatrixComms.F90 b/src/externals/mct/mct/m_SparseMatrixComms.F90 deleted file mode 100644 index 761cd81a319..00000000000 --- a/src/externals/mct/mct/m_SparseMatrixComms.F90 +++ /dev/null @@ -1,699 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SparseMatrixComms -- sparse matrix communications methods. -! -! !DESCRIPTION: -! The {\tt SparseMatrix} datatype provides sparse matrix storage for -! the parallel matrix-vector multiplication ${\bf y} = {\bf M} {\bf x}$. -! This module provides communications services for the {\tt SparseMatrix} -! type. These services include scattering matrix elements based on row or -! column decompositions, gathering of matrix elements to the root, and -! broadcasting from the root. -! -! {\bf N.B.:} These routines will not communicate the vector portion -! of a {\tt SparseMatrix}, if it has been initialized. A WARNING will -! be issued in most cases. In general, do communication first, then -! call {\tt vecinit}. -! -! !INTERFACE: - - module m_SparseMatrixComms - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: -! - public :: ScatterByColumn - public :: ScatterByRow - public :: Gather - public :: Bcast - - interface ScatterByColumn ; module procedure & - ScatterByColumnGSMap_ - end interface - - interface ScatterByRow ; module procedure & - ScatterByRowGSMap_ - end interface - - interface Gather ; module procedure & - GM_gather_, & - GSM_gather_ - end interface - - interface Bcast ; module procedure Bcast_ ; end interface - -! !REVISION HISTORY: -! 13Apr01 - J.W. Larson - initial prototype -! and API specifications. -! 10May01 - J.W. Larson - added GM_gather_ -! and cleaned up prologues. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_SparseMatrixComms' - - contains - -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: ScatterByColumnGSMap_ - Column-based scatter for SparseMatrix. -! -! !DESCRIPTION: This routine scatters the input {\tt SparseMatrix} -! argument {\tt GsMat} (valid only on the root) to a distributed -! {\tt SparseMatrix} variable {\tt LsMat} across all the processes -! present on the communicator associated with the integer handle -! {\tt comm}. The decomposition defining the scatter is supplied by the -! input {\tt GlobalSegMap} argument {\tt columnGSMap}. The optional -! output {\tt INTEGER} flag {\tt stat} signifies a successful (failed) -! operation if it is returned with value zero (nonzero). -! -! {\bf N.B.:} This routine returns an allocated {\tt SparseMatrix} -! variable {\tt LsMat}. The user must destroy this variable when it -! is no longer needed by invoking {\tt SparseMatrix\_Clean()}. -! -! !INTERFACE: - - subroutine ScatterByColumnGSMap_(columnGSMap, GsMat, LsMat, root, comm, stat) -! -! !USES: -! - - use m_die, only : MP_perr_die,die - use m_stdio - use m_mpif90 - - use m_List, only: List - use m_List, only: List_init => init - use m_List, only: List_clean => clean - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_clean => clean - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_nRows => nRows - use m_SparseMatrix, only : SparseMatrix_nCols => nCols - use m_SparseMatrix, only : SparseMatrix_SortPermute => SortPermute - - use m_SparseMatrixDecomp, only : SparseMatrixDecompByColumn => ByColumn - - use m_AttrVectComms, only : AttrVect_Scatter => scatter - - implicit none - -! !INPUT PARAMETERS: -! - type(GlobalSegMap), intent(in) :: columnGSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(inout) :: GsMat - -! !OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(out) :: LsMat - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! -! 13Apr01 - J.W. Larson - initial API spec. -! 10May01 - J.W. Larson - cleaned up prologue. -! 13Jun01 - J.W. Larson - Made status flag stat -! optional, and ititilaze it to zero if it is present. -! 09Jul03 - E.T. Ong - added sorting to distributed -! matrix elements -!EOP -!------------------------------------------------------------------------- - - character(len=*),parameter :: myname_=myname//'ScatterByColumnGSMap_' -! GlobalSegMap used to create column decomposition of GsMat - type(GlobalSegMap) :: MatGSMap -! Storage for the number of rows and columns in the SparseMatrix - integer :: NumRowsColumns(2) -! List storage for sorting keys - type(List) :: sort_keys -! Process ID - integer :: myID -! Error flag - integer :: ierr - - ! Initialize stat if present - - if(present(stat)) stat = 0 - - ! Which process am I? - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_COMM_RANK() failed",ierr) - endif - - ! can't scatter vector parts. - if((myID.eq.root) .and. GsMat%vecinit) then - write(stderr,*) myname_,& - "WARNING: will not scatter vector parts of GsMat" - endif - - ! Create from columnGSMap the corresponding GlobalSegMap - ! that will decompose GsMat by column the same way. - - call SparseMatrixDecompByColumn(columnGSMap, GsMat, MatGSMap, root, comm) - - ! Broadcast the resulting GlobalSegMap across the communicator - - ! Scatter the matrix element data GsMat%data accordingly - - call AttrVect_Scatter(GsMat%data, LsMat%data, MatGSMap, root, comm, ierr) - - if(ierr /= 0) then - if(present(stat)) then - write(stderr,*) myname_,":: AttrVect_Scatter(GsMat%data) failed--stat=", & - ierr - stat = ierr - return - else - call die(myname_,"call AttrVect_Scatter(GsMat%data,..",ierr) - endif - endif - - ! Now, distribute to all the processes the number of Rows and - ! columns in GsMat (which are valid on the root only at this point) - - if(myID == root) then - NumRowsColumns(1) = SparseMatrix_nRows(GsMat) - NumRowsColumns(2) = SparseMatrix_nCols(GsMat) - endif - - call MPI_Bcast(NumRowsColumns, 2, MP_INTEGER, root, comm, ierr) - - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_Bcast(NumRowsColumns...",ierr) - endif - - ! Unpack NumRowsColumns - - LsMat%nrows = NumRowsColumns(1) - LsMat%ncols = NumRowsColumns(2) - - ! Set the value of vecinit - LsMat%vecinit = .FALSE. - - ! Finally, lets sort the distributed local matrix elements - - ! Sort the matrix entries in sMat by column, then row. - ! First, create the key list... - - call List_init(sort_keys,'gcol:grow') - - ! Now perform the sort/permute... - call SparseMatrix_SortPermute(LsMat, sort_keys) - - ! Cleanup - - call List_clean(sort_keys) - call GlobalSegMap_clean(MatGSMap) - - end subroutine ScatterByColumnGSMap_ - -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: ScatterByRowGSMap_ -Row-based scatter for SparseMatrix. -! -! !DESCRIPTION: This routine scatters the input {\tt SparseMatrix} -! argument {\tt GsMat} (valid only on the root) to a distributed -! {\tt SparseMatrix} variable {\tt LsMat} across all the processes -! present on the communicator associated with the integer handle -! {\tt comm}. The decomposition defining the scatter is supplied by the -! input {\tt GlobalSegMap} argument {\tt rowGSMap}. The output integer -! flag {\tt stat} signifies a successful (failed) operation if it is -! returned with value zero (nonzero). -! -! {\bf N.B.:} This routine returns an allocated {\tt SparseMatrix} -! variable {\tt LsMat}. The user must destroy this variable when it -! is no longer needed by invoking {\tt SparseMatrix\_Clean()}. -! -! !INTERFACE: - - subroutine ScatterByRowGSMap_(rowGSMap, GsMat, LsMat, root, comm, stat) -! -! !USES: -! - use m_die, only : MP_perr_die,die - use m_stdio - use m_mpif90 - - use m_List, only: List - use m_List, only: List_init => init - use m_List, only: List_clean => clean - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_clean => clean - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_nRows => nRows - use m_SparseMatrix, only : SparseMatrix_nCols => nCols - use m_SparseMatrix, only : SparseMatrix_SortPermute => SortPermute - - use m_SparseMatrixDecomp, only : SparseMatrixDecompByRow => ByRow - - use m_AttrVectComms, only : AttrVect_Scatter => scatter - - implicit none - -! !INPUT PARAMETERS: -! - type(GlobalSegMap), intent(in) :: rowGSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(inout) :: GsMat - -! !OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(out) :: LsMat - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! -! 13Apr01 - J.W. Larson - initial API spec. -! 26Apr01 - R.L. Jacob - fix use statement -! from SMDecomp so it points to ByRow -! 13Jun01 - J.W. Larson - Made status flag stat -! optional, and initialize it to zero if it is present. -! 09Jul03 - E.T. Ong - Added sorting to distributed -! matrix elements. -!EOP -!------------------------------------------------------------------------- - - character(len=*),parameter :: myname_=myname//'ScatterByRowGSMap_' -! GlobalSegMap used to create row decomposition of GsMat - type(GlobalSegMap) :: MatGSMap -! Storage for the number of rows and columns in the SparseMatrix - integer :: NumRowsColumns(2) -! List storage for sorting keys - type(List) :: sort_keys -! Process ID - integer :: myID -! Error flag - integer :: ierr - - ! Initialize stat to zero (if present) - - if(present(stat)) stat = 0 - - ! Which process are we? - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_COMM_RANK() failed",ierr) - endif - - ! can't scatter vector parts. - if((myID.eq.root) .and. GsMat%vecinit) then - write(stderr,*) myname_,& - "WARNING: will not scatter vector parts of GsMat." - endif - - ! Create from rowGSMap the corresponding GlobalSegMap - ! that will decompose GsMat by row the same way. - - call SparseMatrixDecompByRow(rowGSMap, GsMat, MatGSMap, root, comm) - - ! Scatter the matrix element data GsMat%data accordingly - - call AttrVect_Scatter(GsMat%data, LsMat%data, MatGSMap, root, comm, ierr) - if(ierr /= 0) then - if(present(stat)) then - write(stderr,*) myname_,":: AttrVect_Scatter(GsMat%data) failed--stat=", & - ierr - stat = ierr - return - else - call die(myname_,"call AttrVect_Scatter(GsMat%data,..",ierr) - endif - endif - - ! Now, distribute to all the processes the number of rows and - ! columns in GsMat (which are valid on the root only at this point) - - if(myID == root) then - NumRowsColumns(1) = SparseMatrix_nRows(GsMat) - NumRowsColumns(2) = SparseMatrix_nCols(GsMat) - endif - - call MPI_Bcast(NumRowsColumns, 2, MP_INTEGER, root, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_Bcast(NumRowsColumns...",ierr) - endif - - ! Unpack NumRowsColumns - - LsMat%nrows = NumRowsColumns(1) - LsMat%ncols = NumRowsColumns(2) - - ! Set the value of vecinit - LsMat%vecinit = .FALSE. - - ! Sort the matrix entries in sMat by row, then column. - ! First, create the key list... - - call List_init(sort_keys,'grow:gcol') - - ! Now perform the sort/permute... - call SparseMatrix_SortPermute(LsMat, sort_keys) - - ! Cleanup - - call List_clean(sort_keys) - call GlobalSegMap_clean(MatGSMap) - - end subroutine ScatterByRowGSMap_ - -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: GM_gather_ - Gather a distributed SparseMatrix to the root. -! -! !DESCRIPTION: This routine gathers the input distributed -! {\tt SparseMatrix} argument {\tt LsMat} to the {\tt SparseMatrix} -! variable {\tt GsMat} on the root. The decomposition defining the gather -! is supplied by the input {\tt GlobalMap} argument {\tt GMap}. The -! status flag {\tt stat} has value zero (nonzero) if the operation has -! succeeded (failed). -! -! {\bf N.B.:} This routine returns an allocated {\tt SparseMatrix} -! variable {\tt GsMat}. The user must destroy this variable when it -! is no longer needed by invoking {\tt SparseMatrix\_Clean()}. -! -! !INTERFACE: - - subroutine GM_gather_(LsMat, GsMat, GMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die, only : die - - use m_GlobalMap, only: GlobalMap - - use m_SparseMatrix, only: SparseMatrix - use m_SparseMatrix, only: SparseMatrix_nRows => nRows - use m_SparseMatrix, only: SparseMatrix_nCols => nCols - - use m_AttrVectComms, only : AttrVect_gather => gather - - implicit none - -! !INPUT PARAMETERS: -! - type(SparseMatrix), intent(in) :: LsMat - type(GlobalMap), intent(in) :: GMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(out) :: GsMat - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! -! 13Apr01 - J.W. Larson - initial API spec. -! 10May01 - J.W. Larson - initial routine and -! prologue -! 13Jun01 - J.W. Larson - Made status flag stat -! optional, and ititilaze it to zero if it is present. -!EOP -!------------------------------------------------------------------------- - - character(len=*),parameter :: myname_=myname//'GM_gather_' - integer :: ierr - - ! if stat is present, initialize its value to zero (success) - - if(present(stat)) stat = 0 - - if(LsMat%vecinit) then - write(stderr,*) myname_,& - "WARNING: will not gather vector parts of LsMat." - endif - - call AttrVect_gather(LsMat%data, GsMat%data, GMap, root, comm, ierr) - if(ierr /= 0) then - if(present(stat)) then - write(stderr,*) myname_,":: AttrVect_Gather(LsMat%data...) failed--stat=", & - ierr - stat = ierr - return - else - call die(myname_,"call AttrVect_Scatter(LsMat%data...) failed",ierr) - endif - endif - - ! For now, the GsMat inherits the number of rows and columns from - ! the corresponding values of LsMat on the root (this should be - ! checked in future versions). - - GsMat%nrows = SparseMatrix_nRows(LsMat) - GsMat%ncols = SparseMatrix_nCols(LsMat) - - GsMat%vecinit = .FALSE. - - end subroutine GM_gather_ - -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: GSM_gather_ - Gather a distributed SparseMatrix to the root. -! -! !DESCRIPTION: This routine gathers the input distributed -! {\tt SparseMatrix} argument {\tt LsMat} to the {\tt SparseMatrix} -! variable {\tt GsMat} on the root. The decomposition defining the gather -! is supplied by the input {\tt GlobalSegMap} argument {\tt GSMap}. The -! status flag {\tt stat} has value zero (nonzero) if the operation has -! succeeded (failed). -! -! {\bf N.B.:} This routine returns an allocated {\tt SparseMatrix} -! variable {\tt GsMat}. The user must destroy this variable when it -! is no longer needed by invoking {\tt SparseMatrix\_Clean()}. -! -! !INTERFACE: - - subroutine GSM_gather_(LsMat, GsMat, GSMap, root, comm, stat) -! -! !USES: -! - use m_stdio - use m_die, only : die - - use m_GlobalSegMap, only: GlobalSegMap - - use m_SparseMatrix, only: SparseMatrix - use m_SparseMatrix, only: SparseMatrix_nRows => nRows - use m_SparseMatrix, only: SparseMatrix_nCols => nCols - - use m_AttrVectComms, only : AttrVect_gather => gather - - implicit none - -! !INPUT PARAMETERS: -! - type(SparseMatrix), intent(in) :: LsMat - type(GlobalSegMap), intent(in) :: GSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(out) :: GsMat - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! -! 13Apr01 - J.W. Larson - initial API spec. -! 13Jun01 - J.W. Larson - Made status flag stat -! optional, and ititilaze it to zero if it is present. -!EOP -!------------------------------------------------------------------------- - - character(len=*),parameter :: myname_=myname//'GSM_gather_' - integer :: ierr - - ! if stat is present, initialize its value to zero (success) - - if(present(stat)) stat = 0 - - if(LsMat%vecinit) then - write(stderr,*) myname_,& - "WARNING: will not gather vector parts of LsMat." - endif - - ! Gather the AttrVect component of LsMat to GsMat... - - call AttrVect_gather(LsMat%data, GsMat%data, GSMap, root, comm, ierr) - if(ierr /= 0) then - if(present(stat)) then - write(stderr,*) myname_,":: AttrVect_Gather(LsMat%data...) failed--stat=", & - ierr - stat = ierr - return - else - call die(myname_,"call AttrVect_Gather(LsMat%data...)",ierr) - endif - endif - - ! For now, the GsMat inherits the number of rows and columns from - ! the corresponding values of LsMat on the root (this should be - ! checked in future versions). - - GsMat%nrows = SparseMatrix_nRows(LsMat) - GsMat%ncols = SparseMatrix_nCols(LsMat) - - GsMat%vecinit = .FALSE. - - end subroutine GSM_gather_ - -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: Bcast_ - Broadcast a SparseMatrix. -! -! !DESCRIPTION: This routine broadcasts the {\tt SparseMatrix} argument -! {\tt sMat} from the root to all processes on the communicator associated -! with the communicator handle {\tt comm}. The status flag {\tt stat} -! has value zero if the operation has succeeded. -! -! {\bf N.B.:} This routine returns an allocated {\tt SparseMatrix} -! variable {\tt sMat}. The user must destroy this variable when it -! is no longer needed by invoking {\tt SparseMatrix\_Clean()}. -! -! {\bf N.B.:} This routine will exit with an error if the vector portion -! of {\tt sMat} has been initialized prior to broadcast. -! -! !INTERFACE: - - subroutine Bcast_(sMat, root, comm, stat) - -! -! !USES: -! - - use m_die, only : MP_perr_die,die - use m_stdio - use m_mpif90 - - use m_GlobalSegMap, only: GlobalSegMap - - use m_AttrVectComms, only : AttrVect_bcast => bcast - - use m_SparseMatrix, only: SparseMatrix - use m_SparseMatrix, only: SparseMatrix_nRows => nRows - use m_SparseMatrix, only: SparseMatrix_nCols => nCols - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(inout) :: sMat - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! -! 13Apr01 - J.W. Larson - initial API spec/code -! 13Jun01 - J.W. Larson - Made status flag stat -! optional, and ititilaze it to zero if it is present. -! 17Jul02 - J.W. Larson - Bug fix--local -! process ID myID was uninitialized. -!EOP -!------------------------------------------------------------------------- - - character(len=*),parameter :: myname_=myname//'Bcast_' - -! Storage for the number of rows and columns in the SparseMatrix - integer :: NumRowsColumns(2) -! Process ID number - integer :: myID -! Error flag - integer :: ierr - - ! Initialize stat if present - - if(present(stat)) stat = 0 - - ! Determine local process ID myID: - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_COMM_RANK() failed",ierr) - endif - - if((myID.eq.root) .and. sMat%vecinit) then - write(stderr,*) myname_,& - "Cannot broadcast SparseMatrix with initialized vector parts." - call die(myname_,"Gather SparseMatrix with vecinit TRUE.") - endif - - ! Broadcast sMat%data from the root - - call AttrVect_bcast(sMat%data, root, comm, ierr) - if(ierr /= 0) then - if(present(stat)) then - write(stderr,*) myname_,":: AttrVect_bcast(sMat%data...failed--stat=", & - ierr - stat = ierr - return - else - call die(myname_,"call AttrVect_bcast(sMat%data...) failed",ierr) - endif - endif - - if(myID == root) then - NumRowsColumns(1) = SparseMatrix_nRows(sMat) - NumRowsColumns(2) = SparseMatrix_nCols(sMat) - endif - - call MPI_Bcast(NumRowsColumns, 2, MP_INTEGER, root, comm, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,"MPI_Bcast(NumRowsColumns...",ierr) - endif - - ! Unpack NumRowsColumns on broadcast destination processes - - if(myID /= root) then - sMat%nrows = NumRowsColumns(1) - sMat%ncols = NumRowsColumns(2) - endif - - sMat%vecinit = .FALSE. - - end subroutine Bcast_ - - end module m_SparseMatrixComms diff --git a/src/externals/mct/mct/m_SparseMatrixDecomp.F90 b/src/externals/mct/mct/m_SparseMatrixDecomp.F90 deleted file mode 100644 index eb914e74aa9..00000000000 --- a/src/externals/mct/mct/m_SparseMatrixDecomp.F90 +++ /dev/null @@ -1,756 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SparseMatrixDecomp -- Parallel sparse matrix decomposition. -! -! !DESCRIPTION: -! The {\tt SparseMatrix} datatype provides sparse matrix storage for -! the parallel matrix-vector multiplication ${\bf y} = {\bf M} {\bf x}$. -! This module provides services to create decompositions for the -! {\tt SparseMatrix}. The matrix decompositions available are row -! and column decompositions. They are generated by invoking the -! appropriate routine in this module, and passing the corresponding -! {\em vector} decomposition. For a row (column) decomposition, one -! invokes the routine {\tt ByRow()} ({\tt ByColumn()}), passing the -! domain decomposition for the vector {\bf y} ({\bf x}). -! -! !INTERFACE: - - module m_SparseMatrixDecomp - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: -! - public :: ByColumn - public :: ByRow - - - interface ByColumn ; module procedure & - ByColumnGSMap_ - end interface - - interface ByRow ; module procedure & - ByRowGSMap_ - end interface - -! !REVISION HISTORY: -! 13Apr01 - J.W. Larson - initial prototype -! and API specifications. -! 03Aug01 - E. Ong - in ByRowGSMap and ByColumnGSMap, -! call GlobalSegMap_init on non-root processes with actual -! shaped arguments to satisfy Fortran 90 standard. See -! comments in ByRowGSMap/ByColumnGSMap. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_SparseMatrixDecomp' - - contains - -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: ByColumnGSMap_ - Generate Row-based GlobalSegMap for SparseMatrix -! -! !INTERFACE: - - subroutine ByColumnGSMap_(xGSMap, sMat, sMGSMap, root, comm) -! -! !USES: -! - use m_die, only: MP_perr_die,die - - use m_List, only: List - use m_List, only: List_init => init - use m_List, only: List_clean => clean - - use m_AttrVect, only: AttrVect - use m_AttrVect, only: AttrVect_init => init - use m_AttrVect, only: AttrVect_zero => zero - use m_AttrVect, only: AttrVect_lsize => lsize - use m_AttrVect, only: AttrVect_indexIA => indexIA - use m_AttrVect, only: AttrVect_copy => copy - use m_AttrVect, only: AttrVect_clean => clean - - use m_AttrVectComms, only: AttrVect_scatter => scatter - use m_AttrVectComms, only: AttrVect_gather => gather - - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_init => init - use m_GlobalMap, only : GlobalMap_clean => clean - - use m_GlobalSegMap, only: GlobalSegMap - use m_GlobalSegMap, only: GlobalSegMap_init => init - use m_GlobalSegMap, only: GlobalSegMap_peLocs => peLocs - use m_GlobalSegMap, only: GlobalSegMap_comp_id => comp_id - - use m_SparseMatrix, only: SparseMatrix - use m_SparseMatrix, only: SparseMatrix_lsize => lsize - use m_SparseMatrix, only: SparseMatrix_SortPermute => SortPermute - - implicit none - -! !INPUT PARAMETERS: -! - type(GlobalSegMap), intent(in) :: xGSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(inout) :: sMat - -! !OUTPUT PARAMETERS: -! - type(GlobalSegMap), intent(out) :: sMGSMap - -! !DESCRIPTION: This routine is invoked from all processes on the -! communicator {\tt comm} to create from an input {\tt SparseMatrix} -! {\tt sMat} (valid only on the {\tt root} process) and an input -! {\bf x}-vector decomposition described by the {\tt GlobalSegMap} -! argument {\tt xGSMap} (valid at least on the {\tt root}) to create -! an output {\tt GlobalSegMap} decomposition of the matrix elements -! {\tt sMGSMap}, which is valid on all processes on the communicator. -! This matrix {\tt GlobalSegMap} describes the corresponding column -! decomposition of {\tt sMat}. -! -! {\bf N.B.}: The argument {\tt sMat} is returned sorted in lexicographic -! order by column and row. -! -! !REVISION HISTORY: -! -! 13Apr01 - J.W. Larson - initial API spec. -! 26Apr01 - R.L. Jacob - add use statements for -! GlobalSegMap_init and GSMap_peLocs. -! Add gsize argument required to GSMap_peLocs. -! Add underscore to ComputeSegments call so it matches -! the subroutine decleration. -! change attribute on starts,lengths, and pe_locs to -! pointer to match GSMap_init. -! add use m_die statement -! 26Apr01 - J.W. Larson - fixed major logic bug -! that had all processes executing some operations that -! should only occur on the root. -! 09Jul03 - E.T. Ong - call pe_locs in parallel. -! reduce the serial sort from gcol:grow to just gcol. -!EOP -!------------------------------------------------------------------------- - - character(len=*),parameter :: myname_=myname//'ByColumnGSMap_' -! Process ID number - integer :: myID, mySIZE -! Attributes for the output GlobalSegMap - integer :: gsize, comp_id, ngseg -! Temporary array for identifying each matrix element column and -! process ID destination - type(AttrVect) :: gcol - type(AttrVect) :: dist_gcol - type(AttrVect) :: element_pe_locs - type(AttrVect) :: dist_element_pe_locs -! Index variables for the AttrVects - integer :: dist_gsize - integer :: gcol_index - integer :: element_pe_locs_index -! Temporary array for initializing GlobalMap Decomposition - integer,dimension(:), allocatable :: counts -! GlobalMap for setting up decomposition to call pe_locs - type(GlobalMap) :: dist_GMap -! Temporary arrays for matrix GlobalSegMap attributes - integer, dimension(:), pointer :: starts, lengths, pe_locs -! List storage for sorting keys - type(List) :: sort_keys -! Error flag - integer :: ierr -! Loop index - integer :: i - - ! Determine process id number myID - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'call MPI_COMM_RANK(...',ierr) - endif - - ! Determine the number of processors in communicator - - call MPI_COMM_SIZE(comm, mySIZE, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'call MPI_COMM_SIZE(...',ierr) - endif - - ! Allocate space for GlobalMap length information - - allocate(counts(0:mySIZE-1),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(counts)",ierr) - - ! First step: a lot of prep work on the root only: - - if(myID == root) then - - ! Sort the matrix entries in sMat by column. - ! First, create the key list... - - call List_init(sort_keys,'gcol') - - ! Now perform the sort/permute... - - call SparseMatrix_SortPermute(sMat, sort_keys) - - call List_clean(sort_keys) - - ! The global size of matrix GlobalSegMap is the number nonzero - ! elements in sMat. - - gsize = SparseMatrix_lsize(sMat) - - ! Allocate storage space for matrix element column indices and - ! process ID destinations - - call AttrVect_init(aV=gcol, iList="gcol", lsize=gsize) - - ! Extract global column information and place in array gCol - - call AttrVect_copy(aVin=sMat%data, aVout=gcol, iList="gcol") - - ! Setup GlobalMap decomposition lengths: - - do i=0,mySIZE-1 - counts(i) = gsize/mySIZE - enddo - counts(mySIZE-1) = counts(mySIZE-1) + mod(gsize,mySIZE) - - endif - - ! Initialize GlobalMap so that we can scatter the global row - ! information. The GlobalMap will inherit the component ID - ! from xGSMap - - comp_id = GlobalSegMap_comp_id(xGSMap) - - call GlobalMap_init(GMap=dist_GMap, comp_id=comp_id, lns=counts, & - root=root, comm=comm) - - call AttrVect_scatter(iV=gcol, oV=dist_gcol, GMap=dist_GMap, & - root=root, comm=comm) - - ! Similarly, we want to scatter the element_pe_locs using the - ! same decomposition - - dist_gsize = AttrVect_lsize(dist_gcol) - - call AttrVect_init(aV=dist_element_pe_locs, iList="element_pe_locs", & - lsize=dist_gsize) - call AttrVect_zero(dist_element_pe_locs) - - ! Compute process ID destination for each matrix element, - ! and store in the AttrVect element_pe_locs - - gcol_index = AttrVect_indexIA(dist_gcol,"gcol", dieWith=myname_) - element_pe_locs_index = AttrVect_indexIA(dist_element_pe_locs, & - "element_pe_locs", dieWith=myname_) - - call GlobalSegMap_peLocs(xGSMap, dist_gsize, & - dist_gcol%iAttr(gcol_index,1:dist_gsize), & - dist_element_pe_locs%iAttr(element_pe_locs_index,1:dist_gsize)) - - call AttrVect_gather(iV=dist_element_pe_locs, oV=element_pe_locs, & - GMap=dist_GMap, root=root, comm=comm) - - ! Back to the root operations - - if(myID == root) then - - ! Sanity check: Is the globalsize of sMat the same as the - ! gathered size of element_pe_locs? - - if(gsize /= AttrVect_lsize(element_pe_locs)) then - call die(myname_,"gsize /= AttrVect_lsize(element_pe_locs) & - & on root process") - endif - - ! Using the entries of gCol and element_pe_locs, build the - ! output GlobalSegMap attribute arrays starts(:), lengths(:), - ! and pe_locs(:) - - gcol_index = AttrVect_indexIA(gcol,"gcol", dieWith=myname_) - element_pe_locs_index = AttrVect_indexIA(element_pe_locs, & - "element_pe_locs", dieWith=myname_) - - call ComputeSegments_(element_pe_locs%iAttr(element_pe_locs_index, & - 1:gsize), & - gcol%iAttr(gcol_index,1:gsize), & - gsize, ngseg, starts, lengths, pe_locs) - ! Clean up on the root - - call AttrVect_clean(gcol) - call AttrVect_clean(element_pe_locs) - - endif ! if(myID == root) - - ! Non-root processes call GlobalSegMap_init with root_start, - ! root_length, and root_pe_loc, although these arguments are - ! not used in the subroutine. Since these correspond to dummy - ! shaped array arguments in initr_, the Fortran 90 standard - ! dictates that the actual arguments must contain complete shape - ! information. Therefore, these array arguments must be - ! allocated on all processes. - - if(myID /= root) then - allocate(starts(0),lengths(0),pe_locs(0),stat=ierr) - if(ierr /= 0) then - call die(myname_,'non-root allocate(starts...',ierr) - endif - endif - - ! Using this local data on the root, create the SparseMatrix - ! GlobalSegMap sMGSMap (which will be valid on all processes - ! on the communicator: - - call GlobalSegMap_init(sMGSMap, ngseg, starts, lengths, pe_locs, & - root, comm, comp_id, gsize) - - ! Clean up - - call GlobalMap_clean(dist_GMap) - call AttrVect_clean(dist_gcol) - call AttrVect_clean(dist_element_pe_locs) - - deallocate(starts, lengths, pe_locs, counts, stat=ierr) - if(ierr /= 0) then - call die(myname_,'deallocate(starts...',ierr) - endif - - - end subroutine ByColumnGSMap_ - -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: ByRowGSMap_ - Generate Row-based GlobalSegMap for SparseMatrix -! -! !INTERFACE: - - subroutine ByRowGSMap_(yGSMap, sMat, sMGSMap, root, comm) -! -! !USES: -! - - use m_die, only: MP_perr_die,die - - use m_List, only: List - use m_List, only: List_init => init - use m_List, only: List_clean => clean - - use m_AttrVect, only: AttrVect - use m_AttrVect, only: AttrVect_init => init - use m_AttrVect, only: AttrVect_lsize => lsize - use m_AttrVect, only: AttrVect_indexIA => indexIA - use m_AttrVect, only: AttrVect_copy => copy - use m_AttrVect, only: AttrVect_clean => clean - use m_AttrVect, only: AttrVect_zero => zero - - use m_AttrVectComms, only: AttrVect_scatter => scatter - use m_AttrVectComms, only: AttrVect_gather => gather - - use m_GlobalMap, only : GlobalMap - use m_GlobalMap, only : GlobalMap_init => init - use m_GlobalMap, only : GlobalMap_clean => clean - - use m_GlobalSegMap, only: GlobalSegMap - use m_GlobalSegMap, only: GlobalSegMap_init => init - use m_GlobalSegMap, only: GlobalSegMap_peLocs => peLocs - use m_GlobalSegMap, only: GlobalSegMap_comp_id => comp_id - - use m_SparseMatrix, only: SparseMatrix - use m_SparseMatrix, only: SparseMatrix_lsize => lsize - use m_SparseMatrix, only: SparseMatrix_SortPermute => SortPermute - - implicit none - -! !INPUT PARAMETERS: -! - type(GlobalSegMap), intent(in) :: yGSMap - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(inout) :: sMat - -! !OUTPUT PARAMETERS: -! - type(GlobalSegMap), intent(out) :: sMGSMap - -! !DESCRIPTION: This routine is invoked from all processes on the -! communicator {\tt comm} to create from an input {\tt SparseMatrix} -! {\tt sMat} (valid only on the {\tt root} process) and an input -! {\bf y}-vector decomposition described by the {\tt GlobalSegMap} -! argument {\tt yGSMap} (valid at least on the {\tt root}) to create -! an output {\tt GlobalSegMap} decomposition of the matrix elements -! {\tt sMGSMap}, which is valid on all processes on the communicator. -! This matrix {\tt GlobalSegMap} describes the corresponding row -! decomposition of {\tt sMat}. -! -! {\bf N.B.}: The argument {\tt sMat} is returned sorted in lexicographic -! order by row and column. -! -! !REVISION HISTORY: -! -! 13Apr01 - J.W. Larson - initial API spec. -! 26Apr01 - R.L. Jacob - add use statements for -! GlobalSegMap_init and GSMap_peLocs. -! Add gsize argument required to GSMap_peLocs. -! Add underscore to ComputeSegments call so it matches -! the subroutine decleration. -! change attribute on starts,lengths, and pe_locs to -! pointer to match GSMap_init. -! 26Apr01 - J.W. Larson - fixed major logic bug -! that had all processes executing some operations that -! should only occur on the root. -! 09Jun03 - E.T. Ong - call peLocs in parallel. -! reduce the serial sort from grow:gcol to just grow. -!EOP -!------------------------------------------------------------------------- - - character(len=*),parameter :: myname_=myname//'ByRowGSMap_' -! Process ID number and communicator size - integer :: myID, mySIZE -! Attributes for the output GlobalSegMap - integer :: gsize, comp_id, ngseg -! Temporary array for identifying each matrix element row and -! process ID destination - type(AttrVect) :: grow - type(AttrVect) :: dist_grow - type(AttrVect) :: element_pe_locs - type(AttrVect) :: dist_element_pe_locs -! Index variables for AttrVects - integer :: dist_gsize - integer :: grow_index - integer :: element_pe_locs_index -! Temporary array for initializing GlobalMap Decomposition - integer,dimension(:), allocatable :: counts -! GlobalMap for setting up decomposition to call pe_locs - type(GlobalMap) :: dist_GMap -! Temporary arrays for matrix GlobalSegMap attributes - integer, dimension(:), pointer :: starts, lengths, pe_locs -! List storage for sorting keys - type(List) :: sort_keys -! Error flag - integer :: ierr -! Loop index - integer :: i - - ! Determine process id number myID - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'call MPI_COMM_RANK(...',ierr) - endif - - ! Determine the number of processors in communicator - - call MPI_COMM_SIZE(comm, mySIZE, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'call MPI_COMM_SIZE(...',ierr) - endif - - ! Allocate space for GlobalMap length information - - allocate(counts(0:mySIZE-1),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(counts)",ierr) - - ! First step: a lot of prep work on the root only: - - if(myID == root) then - - ! Sort the matrix entries in sMat by row. - ! First, create the key list... - - call List_init(sort_keys,'grow') - - ! Now perform the sort/permute... - - call SparseMatrix_SortPermute(sMat, sort_keys) - - call List_clean(sort_keys) - - ! The global size of matrix GlobalSegMap is the number of rows. - - gsize = SparseMatrix_lsize(sMat) - - ! Allocate storage space for matrix element row indices and - ! process ID destinations - - call AttrVect_init(aV=grow, iList="grow", lsize=gsize) - - ! Extract global row information and place in AttrVect grow - - call AttrVect_copy(aVin=sMat%data, aVout=grow, iList="grow") - - ! Setup GlobalMap decomposition lengths: - ! Give any extra points to the last process - - do i=0,mySIZE-1 - counts(i) = gsize/mySIZE - enddo - counts(mySIZE-1) = counts(mySIZE-1) + mod(gsize,mySIZE) - - endif - - ! Initialize GlobalMap and scatter the global row information. - ! The GlobalMap will inherit the component ID from yGSMap - - comp_id = GlobalSegMap_comp_id(yGSMap) - - call GlobalMap_init(GMap=dist_GMap, comp_id=comp_id, lns=counts, & - root=root, comm=comm) - - call AttrVect_scatter(iV=grow, oV=dist_grow, GMap=dist_GMap, & - root=root, comm=comm) - - ! Similarly, we want to scatter the element_pe_locs using the - ! same decomposition - - dist_gsize = AttrVect_lsize(dist_grow) - - call AttrVect_init(aV=dist_element_pe_locs, iList="element_pe_locs", & - lsize=dist_gsize) - call AttrVect_zero(dist_element_pe_locs) - - ! Compute process ID destination for each matrix element, - ! and store in the AttrVect element_pe_locs - - grow_index = AttrVect_indexIA(dist_grow,"grow", dieWith=myname_) - element_pe_locs_index = AttrVect_indexIA(dist_element_pe_locs, & - "element_pe_locs", dieWith=myname_) - - call GlobalSegMap_peLocs(yGSMap, dist_gsize, & - dist_grow%iAttr(grow_index,1:dist_gsize), & - dist_element_pe_locs%iAttr(element_pe_locs_index,1:dist_gsize)) - - ! Gather element_pe_locs on root so that we can call compute_segments - - call AttrVect_gather(iV=dist_element_pe_locs, oV=element_pe_locs, & - GMap=dist_GMap, root=root, comm=comm) - - ! Back to the root operations - - if(myID == root) then - - ! Sanity check: Is the globalsize of sMat the same as the - ! gathered size of element_pe_locs? - - if(gsize /= AttrVect_lsize(element_pe_locs)) then - call die(myname_,"gsize /= AttrVect_lsize(element_pe_locs) & - & on root process") - endif - - ! Using the entries of grow and element_pe_locs, build the - ! output GlobalSegMap attribute arrays starts(:), lengths(:), - ! and pe_locs(:) - - grow_index = AttrVect_indexIA(grow,"grow", dieWith=myname_) - element_pe_locs_index = AttrVect_indexIA(element_pe_locs, & - "element_pe_locs", dieWith=myname_) - - call ComputeSegments_(element_pe_locs%iAttr(element_pe_locs_index, & - 1:gsize), & - grow%iAttr(grow_index,1:gsize), & - gsize, ngseg, starts, lengths, pe_locs) - - ! Clean up on the root - - call AttrVect_clean(grow) - call AttrVect_clean(element_pe_locs) - - endif ! if(myID == root) - - ! Non-root processes call GlobalSegMap_init with root_start, - ! root_length, and root_pe_loc, although these arguments are - ! not used in the subroutine. Since these correspond to dummy - ! shaped array arguments in initr_, the Fortran 90 standard - ! dictates that the actual arguments must contain complete shape - ! information. Therefore, these array arguments must be - ! allocated on all processes. - - if(myID /= root) then - allocate(starts(0),lengths(0),pe_locs(0),stat=ierr) - if(ierr /= 0) then - call die(myname_,'non-root allocate(starts...',ierr) - endif - endif - - ! Using this local data on the root, create the SparseMatrix - ! GlobalSegMap sMGSMap (which will be valid on all processes - ! on the communicator. The GlobalSegMap will inherit the - ! component ID from yGSMap - - call GlobalSegMap_init(sMGSMap, ngseg, starts, lengths, pe_locs, & - root, comm, comp_id, gsize) - - ! Clean up: - - call GlobalMap_clean(dist_GMap) - call AttrVect_clean(dist_grow) - call AttrVect_clean(dist_element_pe_locs) - - deallocate(starts, lengths, pe_locs, counts, stat=ierr) - if(ierr /= 0) then - call die(myname_,'deallocate(starts...',ierr) - endif - - - end subroutine ByRowGSMap_ - -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: ComputeSegments_ - Create segments from list data. -! -! !INTERFACE: - - subroutine ComputeSegments_(element_pe_locs, elements, num_elements, & - nsegs, seg_starts, seg_lengths, seg_pe_locs) -! -! !USES: -! - - use m_die, only: die - - implicit none - -! !INPUT PARAMETERS: -! - integer, dimension(:), intent(in) :: element_pe_locs - integer, dimension(:), intent(in) :: elements - integer, intent(in) :: num_elements - -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: nsegs - integer, dimension(:), pointer :: seg_starts - integer, dimension(:), pointer :: seg_lengths - integer, dimension(:), pointer :: seg_pe_locs - -! !DESCRIPTION: This routine examins an input list of {\tt num\_elements} -! process ID locations stored in the array {\tt element\_pe\_locs}, counts -! the number of contiguous segments {\tt nsegs}, and returns the segment -! start index, length, and process ID location in the arrays {\tt seg\_starts(:)}, -! {\tt seg\_lengths(:)}, and {\tt seg\_pe\_locs(:)}, respectively. -! -! {\bf N.B.}: The argument {\tt sMat} is returned sorted in lexicographic -! order by row and column. -! -! !REVISION HISTORY: -! -! 18Apr01 - J.W. Larson - initial version. -! 28Aug01 - M.J. Zavislak -! Changed first sanity check to get size(element_pe_locs) -! instead of size(elements) -!EOP -!------------------------------------------------------------------------- - character(len=*),parameter :: myname_=myname//'ComputeSegments_' - - integer :: i, ierr, iseg - - ! Input argument sanity checks: - - if(size(element_pe_locs) < num_elements) then - call die(myname_,'input argument array element_pe_locs too small', & - num_elements-size(element_pe_locs)) - endif - - if(size(elements) < num_elements) then - call die(myname_,'input argument array elements too small', & - num_elements-size(elements)) - endif - - ! First pass: how many segments? - - do i=1,num_elements - - if(i == 1) then ! bootstrap segment count - - nsegs = 1 - - else ! usual point/segment processing - - ! New segment? If so, increment nsegs. - - if((elements(i) > elements(i-1) + 1) .or. & - (element_pe_locs(i) /= element_pe_locs(i-1))) then ! new segment - nsegs = nsegs + 1 - endif - - endif ! if(i == 1) block - - end do ! do i=1,num_elements - - allocate(seg_starts(nsegs), seg_lengths(nsegs), seg_pe_locs(nsegs), & - stat=ierr) - - if(ierr /= 0) then - call die(myname_,'allocate(seg_starts...',ierr) - endif - - ! Second pass: fill in segment data. - - ! NOTE: Structure of this loop was changed from a for loop - ! to avoid a faulty vectorization on the SUPER-UX compiler - - i=1 - ASSIGN_LOOP: do - - if(i == 1) then ! bootstrap first segment info. - - iseg = 1 - seg_starts(iseg) = 1 - seg_lengths(iseg) = 1 - seg_pe_locs(iseg) = element_pe_locs(iseg) - - else ! do usual point/segment processing - - ! New segment? This happens if 1) elements(i) > elements(i-1) + 1, or - ! 2) element_pe_locs(i) /= element_pe_locs(i-1). - - if((elements(i) > elements(i-1) + 1) .or. & - (element_pe_locs(i) /= element_pe_locs(i-1))) then ! new segment - - ! Initialize new segment - iseg = iseg + 1 - seg_starts(iseg) = i - seg_lengths(iseg) = 1 - seg_pe_locs(iseg) = element_pe_locs(i) - - else - - ! Increment current segment length - seg_lengths(iseg) = seg_lengths(iseg) + 1 - - endif ! If new segment block - - endif ! if(i == 1) block - - ! Prepare index i for the next loop around; - if(i>=num_elements) EXIT - i = i + 1 - - end do ASSIGN_LOOP - - if(iseg /= nsegs) then - call die(myname_,'segment number difference',iseg-nsegs) - endif - - end subroutine ComputeSegments_ - - end module m_SparseMatrixDecomp diff --git a/src/externals/mct/mct/m_SparseMatrixPlus.F90 b/src/externals/mct/mct/m_SparseMatrixPlus.F90 deleted file mode 100644 index de6e966b804..00000000000 --- a/src/externals/mct/mct/m_SparseMatrixPlus.F90 +++ /dev/null @@ -1,872 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SparseMatrixPlus -- Class Parallel for Matrix-Vector Multiplication -! -! !DESCRIPTION: -! Matrix-vector multiplication is one of the MCT's core services, and is -! used primarily for the interpolation of data fields from one physical -! grid to another. Let ${\bf x} \in \Re^{N_x}$ and -! ${\bf y} \in \Re^{N_y}$ represent data fields on physical grids $A$ -! and $B$, respectively. Field data is interpolated from grid $A$ to grid -! $B$ by -! $$ {\bf y} = {\bf M} {\bf x} , $$ -! where {\bf M} is aa ${N_y} \times {N_x}$ matrix. -! -! Within MCT, the {\tt SparseMatrix} data type is MCT's object for -! storing sparse matrices such as {\bf M} , and the {\tt AttrVect} data -! type is MCT's field data storage object. That is, {\bf x} and {\bf y} -! are each stored in {\tt AttrVect} form, and {\bf M} is stored as a -! {\tt SparseMatrix}. -! -! For global address spaces (uniprocessor or shared-memory parallel), this -! picture of matrix-vector multiplication is sufficient. If one wishes -! to perform {\em distributed-memory parallel} matrix-vector multiplication, -! however, in addition to computation, one must consider {\em communication}. -! -! There are three basic message-passing parallel strategies for computing -! ${\bf y} = {\bf M} {\bf x}$: -! -!\begin{enumerate} -! \item Decompose {\bf M} based on its {\em rows}, and corresponding to the -! decomposition for the vector {\bf y}. That is, if a given process owns -! the $i^{\rm th}$ element of {\bf y}, then all the elements of row $i$ of -! {\bf M} also reside on this process. Then ${\bf y} = {\bf M} {\bf x}$ is -! implemented as follows: -! \begin{enumerate} -! \item Create an {\em intermediate vector} {\bf x'} that is the pre-image of -! the elements of {\bf y} owned locally. -! \item Comunnicate with the appropriate processes on the local communicator to -! gather from {\bf x} the elements of {\bf x'}. -! \item Compute ${\bf y} = {\bf M} {\bf x'}$. -! \item Destroy the data structure holding {\bf x'}. -! \end{enumerate} -! \item Decompose {\bf M} based on its {\em columns}, and corresponding to the -! decomposition for the vector {\bf x}. That is, if a given process owns -! the $j^{\rm th}$ element of {\bf x}, then all the elements of column $j$ of -! {\bf M} also reside on this process. Then ${\bf y} = {\bf M} {\bf x}$ is -! implemented as follows: -! \begin{enumerate} -! \item Create an {\em intermediate vector} {\bf y'} that holds {\em partial sums} -! of elements of {\bf y} computed from {\bf x} and {\bf M}. -! \item Compute ${\bf y'} = {\bf M} {\bf x}$. -! \item Perform communications to route elements of {\bf y'} to their eventual -! destinations in {\bf y}, where they will be summed, resulting in the distributed -! vector {\bf y}. -! \item Destroy the data structure holding {\bf y'}. -! \end{enumerate} -! \item Decompose {\bf M} based on some arbitrary, user-supplied scheme. This will -! necessitate two intermediate vectors {\bf x'} and {\bf y'}. Then -! ${\bf y} = {\bf M} {\bf x}$ is implemented as follows: -! \begin{enumerate} -! \item Create {\em intermediate vectors} {\bf x'} and {\bf y'}. The numbers of -! elements in {\bf x'} and {\bf y'} are based {\bf M}, specifically its numbers of -! {\em distinct} row and column index values, respectively. -! \item Comunnicate with the appropriate processes on the local communicator to -! gather from {\bf x} the elements of {\bf x'}. -! \item Compute ${\bf y'} = {\bf M} {\bf x'}$. -! \item Perform communications to route elements of {\bf y'} to their eventual -! destinations in {\bf y}, where they will be summed, resulting in the distributed -! vector {\bf y}. -! \item Destroy the data structures holding {\bf x'} and {\bf y'}. -! \end{enumerate} -! \end{enumerate} -! -! These operations require information about many aspects of the multiplication -! process. These data are: -! \begin{itemize} -! \item The matrix-vector parallelization strategy, which is one of the following: -! \begin{enumerate} -! \item Distributed in {\bf x}, purely data local in {\bf y}, labeled by the -! public data member {\tt Xonly} -! \item Purely data local {\bf x}, distributed in {\bf y}, labeled by the -! public data member {\tt Yonly} -! \item Distributed in both {\bf x} and {\bf y}, labeled by the public data -! member {\tt XandY} -! \end{enumerate} -! \item A communications scheduler to create {\bf x'} from {\bf x}; -! \item A communications scheduler to deliver partial sums contained in {\bf y'} to -! {\bf y}. -! \item Lengths of the intermediate vectors {\bf x'} and {\bf y'}. -! \end{itemize} -! -! In MCT, the above data are stored in a {\em master} class for {\tt SparseMatrix}- -! {\tt AttrVect} multiplication. This master class is called a -! {\tt SparseMatrixPlus}. -! -! This module contains the definition of the {\tt SparseMatrixPlus}, and a variety -! of methods to support it. These include initialization, destruction, query, and -! data import/export. -! -! !INTERFACE: - - module m_SparseMatrixPlus - -! !USES: - - use m_String, only : String - use m_SparseMatrix, only : SparseMatrix - use m_Rearranger, only : Rearranger - -! !PUBLIC TYPES: - - public :: SparseMatrixPlus - - Type SparseMatrixPlus -#ifdef SEQUENCE - sequence -#endif - type(String) :: Strategy - integer :: XPrimeLength - type(Rearranger) :: XToXPrime - integer :: YPrimeLength - type(Rearranger) :: YPrimeToY - type(SparseMatrix) :: Matrix - integer :: Tag - End Type SparseMatrixPlus - -! !PUBLIC MEMBER FUNCTIONS: - - public :: init - public :: vecinit - public :: clean - public :: initialized - public :: exportStrategyToChar - - interface init ; module procedure & - initFromRoot_, & - initDistributed_ - end interface - interface vecinit ; module procedure vecinit_ ; end interface - interface clean ; module procedure clean_ ; end interface - interface initialized ; module procedure initialized_ ; end interface - interface exportStrategyToChar ; module procedure & - exportStrategyToChar_ - end interface - -! !PUBLIC DATA MEMBERS: - - public :: Xonly ! Matrix decomposed only by ROW (i.e., based - ! on the decomposition of y); comms x->x' - public :: Yonly ! Matrix decomposed only by COLUMN (i.e., based - ! on the decomposition of x); comms y'->y - public :: XandY ! Matrix has complex ROW/COLUMN decomposed - -! !DEFINED PARAMETERS: - - integer,parameter :: DefaultTag = 700 - - -! !SEE ALSO: -! The MCT module m_SparseMatrix for more information about Sparse Matrices. -! The MCT module m_Rearranger for deatailed information about Communications -! scheduling. -! The MCT module m_AttrVect for details regarding the Attribute Vector. -! The MCT module m_MatAttrVectMult for documentation of API's that use -! the SparseMatrixPlus. -! -! !REVISION HISTORY: -! 29August 2002 - J. Larson - API specification. -!EOP ------------------------------------------------------------------- - - character(len=*), parameter :: Xonly = 'Xonly' - character(len=*), parameter :: Yonly = 'Yonly' - character(len=*), parameter :: XandY = 'XandY' - - character(len=*), parameter :: myname = 'MCT::m_SparseMatrixPlus' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initFromRoot_ - Creation and Initializtion from the Root -! -! !DESCRIPTION: -! This routine creates an {\tt SparseMatrixPlus} {\tt sMatPlus} using -! the following elements: -! \begin{itemize} -! \item A {\tt SparseMatrix} (the input argument {\tt sMat}), whose -! elements all reside only on the {\tt root} process of the MPI -! communicator with an integer handle defined by the input {\tt INTEGER} -! argument {\tt comm}; -! \item A {\tt GlobalSegMap} (the input argument {\tt xGSMap}) describing -! the domain decomposition of the vector {\bf x} on the communicator -! {\tt comm}; -! \item A {\tt GlobalSegMap} (the input argument {\tt yGSMap}) describing -! the domain decomposition of the vector {\bf y} on the communicator -! {\tt comm}; -! \item The matrix-vector multiplication parallelization strategy. This -! is set by the input {\tt CHARACTER} argument {\tt strategy}, which must -! have value corresponding to one of the following public data members -! defined in the declaration section of this module. Acceptable values -! for use in this routine are: {\tt Xonly} and {\tt Yonly}. -! \end{itemize} -! The optional argument {\tt Tag} can be used to set the tag value used in -! the call to {\tt Rearranger}. DefaultTag will be used otherwise. -! -! !INTERFACE: - - subroutine initFromRoot_(sMatPlus, sMat, xGSMap, yGSMap, strategy, & - root, comm, ComponentID, Tag) - -! !USES: - - use m_die - use m_stdio - use m_mpif90 - - use m_String, only : String - use m_String, only : String_init => init - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - use m_GlobalSegMap, only : GlobalSegMap_lsize => lsize - use m_GlobalSegMap, only : GlobalSegMap_clean => clean - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_nRows => nRows - use m_SparseMatrix, only : SparseMatrix_nCols => nCols - - use m_SparseMatrixComms, only : SparseMatrix_ScatterByRow => ScatterByRow - use m_SparseMatrixComms, only : SparseMatrix_ScatterByColumn => & - ScatterByColumn - - use m_SparseMatrixToMaps, only : SparseMatrixToXGlobalSegMap - use m_SparseMatrixToMaps, only : SparseMatrixToYGlobalSegMap - - use m_GlobalToLocal, only : GlobalToLocalMatrix - - use m_Rearranger, only : Rearranger - use m_Rearranger, only : Rearranger_init => init - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: xGSMap - type(GlobalSegMap), intent(in) :: yGSMap - character(len=*), intent(in) :: strategy - integer, intent(in) :: root - integer, intent(in) :: comm - integer, intent(in) :: ComponentID - integer,optional, intent(in) :: Tag - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !OUTPUT PARAMETERS: - - type(SparseMatrixPlus), intent(out) :: SMatPlus - -! !REVISION HISTORY: -! 30Aug02 - Jay Larson - API Specification -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initFromRoot_' - - type(GlobalSegMap) :: xPrimeGSMap, yPrimeGSMap - - integer :: myID, ierr - - ! Set tag used in Rearranger call - - SMatPlus%Tag = DefaultTag - if(present(Tag)) SMatPlus%Tag = Tag - - ! set vector flag - SMatPlus%Matrix%vecinit = .FALSE. - - ! Get local process ID number - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_RANK() failed',ierr) - endif - - ! Basic Input Argument Checks: - - ! On the root, where the matrix is stored, do its number of - ! rows and columns match the global lengths ofthe vectors y - ! and x, respectively? - - if(myID == root) then - - if(GlobalSegMap_gsize(yGSMap) /= SparseMatrix_nRows(sMat)) then - write(stderr,'(3a,i8,2a,i8)') myname_, & - ':: FATAL--length of vector y different from row count of sMat.', & - 'Length of y = ',GlobalSegMap_gsize(yGSMap),' Number of rows in ',& - 'sMat = ',SparseMatrix_nRows(sMat) - call die(myname_) - endif - - if(GlobalSegMap_gsize(xGSMap) /= SparseMatrix_nCols(sMat)) then - write(stderr,'(3a,i8,2a,i8)') myname_, & - ':: FATAL--length of vector x different from column count of sMat.', & - 'Length of x = ',GlobalSegMap_gsize(xGSMap),' Number of columns in ',& - 'sMat = ',SparseMatrix_nCols(sMat) - call die(myname_) - endif - - endif ! if(myID == root) then... - - ! Check desired parallelization strategy name for validity. - ! If either of the strategies supported by this routine are - ! provided, initialize the appropriate component of sMatPlus. - - select case(strategy) - case(Xonly) ! decompose sMat by rows following decomposition of y - call String_init(sMatPlus%Strategy, strategy) - case(Yonly) ! decompose sMat by columns following decomposition of x - call String_init(sMatPlus%Strategy, strategy) - case(XandY) ! User has called the wrong routine. Try initDistributed() - ! instead. - write(stderr,'(4a)') myname_, & - ':: ERROR--Strategy name = ',strategy,' not supported by this routine.' - call die(myname_) - case default ! strategy name not recognized. - write(stderr,'(5a)') myname_, & - ':: ERROR--Invalid parallelization strategy name = ',strategy,' not ', & - 'recognized by this module.' - call die(myname_) - end select - - ! End Argument Sanity Checks. - - ! Based on the parallelization strategy, scatter sMat into - ! sMatPlus%Matrix accordingly. - - select case(strategy) - case(Xonly) - ! Scatter sMat by Row - call SparseMatrix_ScatterByRow(yGSMap, sMat, sMatPlus%Matrix, root, & - comm, ierr) - ! Compute GlobalSegMap associated with intermediate vector x' - call SparseMatrixToXGlobalSegMap(sMatPlus%Matrix, xPrimeGSMap, & - root, comm, ComponentID) - ! Determine length of x' from xPrimeGSMap: - sMatPlus%XPrimeLength = GlobalSegMap_lsize(xPrimeGSMap, comm) - ! Create Rearranger to assemble x' from x - call Rearranger_init(xGSMap, xPrimeGSMap, comm, sMatPlus%XToXPrime) - ! Create local column indices based on xPrimeGSMap - call GlobalToLocalMatrix(sMatPlus%Matrix, xPrimeGSMap, 'column', comm) - ! Create local row indices based on yGSMap - call GlobalToLocalMatrix(sMatPlus%Matrix, yGSMap, 'row', comm) - ! Destroy intermediate GlobalSegMap for x' - call GlobalSegMap_clean(xPrimeGSMap) - case(Yonly) - ! Scatter sMat by Column - call SparseMatrix_ScatterByColumn(xGSMap, sMat, sMatPlus%Matrix, root, & - comm, ierr) - ! Compute GlobalSegMap associated with intermediate vector y' - call SparseMatrixToYGlobalSegMap(sMatPlus%Matrix, yPrimeGSMap, & - root, comm, ComponentID) - ! Determine length of y' from yPrimeGSMap: - sMatPlus%YPrimeLength = GlobalSegMap_lsize(yPrimeGSMap, comm) - ! Create Rearranger to assemble y from partial sums in y' - call Rearranger_init(yPrimeGSMap, yGSMap, comm, sMatPlus%YPrimeToY) - ! Create local row indices based on yPrimeGSMap - call GlobalToLocalMatrix(sMatPlus%Matrix, yPrimeGSMap, 'row', comm) - ! Create local column indices based on xGSMap - call GlobalToLocalMatrix(sMatPlus%Matrix, xGSMap, 'column', comm) - ! Destroy intermediate GlobalSegMap for y' - call GlobalSegMap_clean(yPrimeGSMap) - case default ! do nothing - end select - - end subroutine initFromRoot_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initDistributed_ - Distributed Creation and Initializtion -! -! !DESCRIPTION: -! This routine creates an {\tt SparseMatrixPlus} {\tt sMatPlus} using -! the following elements: -! \begin{itemize} -! \item A {\tt SparseMatrix} (the input argument {\tt sMat}), whose -! elements have previously been destributed across the MPI communicator -! with an integer handle defined by the input {\tt INTEGER} argument -! {\tt comm}; -! \item A {\tt GlobalSegMap} (the input argument {\tt xGSMap}) describing -! the domain decomposition of the vector {\bf x} on the communicator -! {\tt comm}; and -! \item A {\tt GlobalSegMap} (the input argument {\tt yGSMap}) describing -! the domain decomposition of the vector {\bf y} on the communicator -! {\tt comm}; -! \end{itemize} -! The other input arguments required by this routine are the {\tt INTEGER} -! arguments {\tt root} and {\tt ComponentID}, which define the communicator -! root ID and MCT component ID, respectively. -! -! !INTERFACE: - - subroutine initDistributed_(sMatPlus, sMat, xGSMap, yGSMap, root, comm, & - ComponentID, Tag) - -! !USES: - - use m_die - use m_stdio - use m_mpif90 - - use m_String, only : String - use m_String, only : String_init => init - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_gsize => gsize - use m_GlobalSegMap, only : GlobalSegMap_lsize => lsize - use m_GlobalSegMap, only : GlobalSegMap_clean => clean - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_nRows => nRows - use m_SparseMatrix, only : SparseMatrix_nCols => nCols - use m_SparseMatrix, only : SparseMatrix_Copy => Copy - - use m_SparseMatrixComms, only : SparseMatrix_ScatterByRow => ScatterByRow - use m_SparseMatrixComms, only : SparseMatrix_ScatterByColumn => & - ScatterByColumn - - use m_SparseMatrixToMaps, only : SparseMatrixToXGlobalSegMap - use m_SparseMatrixToMaps, only : SparseMatrixToYGlobalSegMap - - use m_GlobalToLocal, only : GlobalToLocalMatrix - - use m_Rearranger, only : Rearranger - use m_Rearranger, only : Rearranger_init => init - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: xGSMap - type(GlobalSegMap), intent(in) :: yGSMap - integer, intent(in) :: root - integer, intent(in) :: comm - integer, intent(in) :: ComponentID - integer,optional, intent(in) :: Tag - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrix), intent(inout) :: sMat - -! !OUTPUT PARAMETERS: - - type(SparseMatrixPlus), intent(out) :: SMatPlus - -! !REVISION HISTORY: -! 30Aug02 - Jay Larson - API Specification -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initDistributed_' - - type(GlobalSegMap) :: xPrimeGSMap, yPrimeGSMap - - integer :: myID, ierr - - ! Set tag used in Rearranger call - - SMatPlus%Tag = DefaultTag - if(present(Tag)) SMatPlus%Tag = Tag - - ! Get local process ID number - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - call MP_perr_die(myname_,'MPI_COMM_RANK() failed',ierr) - endif - ! Basic Input Argument Checks: - - ! A portion of sMat (even if there are no nonzero elements in - ! this local chunk) on each PE. We must check to ensure the - ! number rows and columns match the global lengths ofthe - ! vectors y and x, respectively. - - if(GlobalSegMap_gsize(yGSMap) /= SparseMatrix_nRows(sMat)) then - write(stderr,'(3a,i8,2a,i8)') myname, & - ':: FATAL--length of vector y different from row count of sMat.', & - 'Length of y = ',GlobalSegMap_gsize(yGSMap),' Number of rows in ',& - 'sMat = ',SparseMatrix_nRows(sMat) - call die(myname_) - endif - - if(GlobalSegMap_gsize(xGSMap) /= SparseMatrix_nCols(sMat)) then - write(stderr,'(3a,i8,2a,i8)') myname, & - ':: FATAL--length of vector x different from column count of sMat.', & - 'Length of x = ',GlobalSegMap_gsize(xGSMap),' Number of columns in ',& - 'sMat = ',SparseMatrix_nCols(sMat) - call die(myname_) - endif - - ! End Argument Sanity Checks. - - ! Set parallelization strategy to XandY, since the work distribution - ! was previously determined and in principle can be *anything* - - call String_init(sMatPlus%Strategy, XandY) - - ! Based on the XandY parallelization strategy, build SMatPlus - ! First, copy Internals of sMat into sMatPlus%Matrix: - call SparseMatrix_Copy(sMat, sMatPlus%Matrix) - ! Compute GlobalSegMap associated with intermediate vector x' - call SparseMatrixToXGlobalSegMap(sMatPlus%Matrix, xPrimeGSMap, & - root, comm, ComponentID) - ! Determine length of x' from xPrimeGSMap: - sMatPlus%XPrimeLength = GlobalSegMap_lsize(xPrimeGSMap, comm) - ! Create Rearranger to assemble x' from x - call Rearranger_init(xGSMap, xPrimeGSMap, comm, sMatPlus%XToXPrime) - ! Create local column indices based on xPrimeGSMap - call GlobalToLocalMatrix(sMatPlus%Matrix, xPrimeGSMap, 'column', comm) - ! Destroy intermediate GlobalSegMap for x' - call GlobalSegMap_clean(xPrimeGSMap) - ! Compute GlobalSegMap associated with intermediate vector y' - call SparseMatrixToYGlobalSegMap(sMatPlus%Matrix, yPrimeGSMap, & - root, comm, ComponentID) - ! Determine length of y' from yPrimeGSMap: - sMatPlus%YPrimeLength = GlobalSegMap_lsize(yPrimeGSMap, comm) - ! Create Rearranger to assemble y from partial sums in y' - call Rearranger_init(yPrimeGSMap, yGSMap, comm, sMatPlus%YPrimeToY) - ! Create local row indices based on yPrimeGSMap - call GlobalToLocalMatrix(sMatPlus%Matrix, yPrimeGSMap, 'row', comm) - ! Destroy intermediate GlobalSegMap for y' - call GlobalSegMap_clean(yPrimeGSMap) - - end subroutine initDistributed_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: vecinit_ - Initialize vector parts of a SparseMatrixPlus -! -! !DESCRIPTION: -! This routine will initialize the parts of the SparseMatrix in -! the SparseMatrixPlus object that are used in the vector-friendly -! version of the sparse matrix multiply. -! -! !INTERFACE: - - subroutine vecinit_(SMatP) -! -! !USES: -! - use m_die - use m_SparseMatrix, only : SparseMatrix_vecinit => vecinit - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrixPlus), intent(inout) :: SMatP - -! !REVISION HISTORY: -! 29Oct03 - R. Jacob - initial prototype -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::vecinit_' - - call SparseMatrix_vecinit(SMatP%Matrix) - - end subroutine vecinit_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Destruction of a SparseMatrixPlus Object -! -! !DESCRIPTION: -! This routine deallocates all allocated memory belonging to the -! input/output {\tt SparseMatrixPlus} argument {\tt SMatP}, and sets -! to zero its integer components describing intermediate vector length, -! and sets its {\tt LOGICAL} flag signifying initialization to -! {\tt .FALSE.} The success (failure) of this operation is signified -! by the zero (non-zero) value of the optional {\tt INTEGER} output -! argument {\tt status}. If the user does supply {\tt status} when -! invoking this routine, failure of {\tt clean\_()} will lead to -! termination of execution with an error message. -! -! !INTERFACE: - - subroutine clean_(SMatP, status) - -! !USES: - - use m_die - use m_stdio - - use m_String, only : String - use m_String, only : String_init => init - use m_String, only : String_ToChar => toChar - use m_String, only : String_clean => clean - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_clean => clean - - use m_Rearranger, only : Rearranger - use m_Rearranger, only : Rearranger_clean => clean - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - type(SparseMatrixPlus), intent(inout) :: SMatP - -! !OUTPUT PARAMETERS: - - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 30Aug02 - Jay Larson - API Specification -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::clean_' - - integer :: myStatus - type(String) :: dummyStrategy ! SGI IR->WHIRL work-around - character(len=5) :: myStrategy - - ! If status was supplied, set it to zero (success) - - if(present(status)) status = 0 - - ! The following string copy is superfluous. It is placed here - ! to outwit a compiler bug in the SGI and SunOS compilers. - ! It occurs when a component of a derived type is used as an - ! argument to String_ToChar. This bug crashes the compiler - ! with the error message: - ! Error: Signal Segmentation fault in phase IR->WHIRL Conversion - - call String_init(dummyStrategy, SMatP%Strategy) - myStrategy = String_ToChar(dummyStrategy) - - ! Use SMatP%Strategy to determine which Rearranger(s) need - ! to be destroyed. The CHARACTER parameters Xonly, Yonly, - ! and XandY are inherited from the declaration section of - ! this module. - - - select case(myStrategy) - case(Xonly) ! destroy X-rearranger only - - call Rearranger_clean(SMatP%XToXprime, myStatus) - if(myStatus /= 0) then ! something went wrong - if(present(status)) then - status = myStatus - return - else - write(stderr,'(3a,i8)') myname_, & - ':: ERROR - call to Rearranger_clean(SMatP%XToXprime) failed.', & - ' stat = ',myStatus - endif - endif - - case(Yonly) ! destroy Y-rearranger only - - call Rearranger_clean(SMatP%YprimeToY, myStatus) - if(myStatus /= 0) then ! something went wrong - if(present(status)) then - status = myStatus - return - else - write(stderr,'(3a,i8)') myname_, & - ':: ERROR - call to Rearranger_clean(SMatP%YPrimeToY) failed.', & - ' stat = ',myStatus - endif - endif - - case(XandY) ! destroy both X- and Y-rearrangers - - call Rearranger_clean(SMatP%XToXprime, myStatus) - if(myStatus /= 0) then ! something went wrong - if(present(status)) then - status = myStatus - return - else - write(stderr,'(3a,i8)') myname_, & - ':: ERROR - call to Rearranger_clean(SMatP%XToXprime) failed.', & - ' stat = ',myStatus - endif - endif - - call Rearranger_clean(SMatP%YprimeToY, myStatus) - if(myStatus /= 0) then ! something went wrong - if(present(status)) then - status = myStatus - return - else - write(stderr,'(3a,i8)') myname_, & - ':: ERROR - call to Rearranger_clean(SMatP%YPrimeToY) failed.', & - ' stat = ',myStatus - endif - endif - - case default ! do nothing--corresponds to purely data local case - end select - - ! Zero out XPrimeLength and YPrimeLength - - SMatP%XPrimeLength = 0 - SMatP%YPrimeLength = 0 - - ! Destroy the SparseMatrix component SMatP%Matrix - - call SparseMatrix_clean(SMatP%Matrix, myStatus) - if(myStatus /= 0) then ! something went wrong - if(present(status)) then - status = myStatus - return - else - write(stderr,'(2a,i8)') myname_, & - ':: ERROR - call to SparseMatrix_clean() failed with stat=',myStatus - endif - endif - - ! Destroy the String SMatP%Strategy and its copy - - call String_clean(SMatP%Strategy) - call String_clean(dummyStrategy) - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initialized_ - Confirmation of Initialization -! -! !DESCRIPTION: -! This {\tt LOGICAL} query function tells the user if the input -! {\tt SparseMatrixPlus} argument {\tt sMatPlus} has been initialized. -! The return value of {\tt initialized\_} is {\tt .TRUE.} if -! {\tt sMatPlus} has been previously initialized, {\tt .FALSE.} if it -! has not. -! -! !INTERFACE: - - logical function initialized_(sMatPlus) -! -! !USES: -! -! No external modules are used by this function. - - use m_String, only : String_len - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_identical => identical - use m_List, only : List_clean => clean - - use m_die - - implicit none - -! !INPUT PARAMETERS: -! - type(SparseMatrixPlus), intent(in) :: sMatPlus - -! !REVISION HISTORY: -! 26Sep02 - Jay Larson - Implementation -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::initialized_' - - integer :: XonlyLen, YonlyLen, XandYLen - type(List) :: XonlyList, YonlyList, XandYList, stratList - - initialized_ = .FALSE. - - XonlyLen = len(trim(Xonly)) - YonlyLen = len(trim(Yonly)) - XandYLen = len(trim(XandY)) - - if( (XonlyLen /= YonlyLen) .or. (XonlyLen /= XandYLen) ) then - call die(myname_,"The length of the strategies are unequal. & - &This routine needs to be rewritten.") - endif - - if(associated(sMatPlus%strategy%c)) then - if(String_len(sMatPlus%strategy) == XonlyLen) then - call List_init(XonlyList,Xonly) - call List_init(YonlyList,Yonly) - call List_init(XandYList,XandY) - call List_init(stratList,sMatPlus%strategy) - if(List_identical(stratList,XonlyList)) initialized_ = .TRUE. - if(List_identical(stratList,YonlyList)) initialized_ = .TRUE. - if(List_identical(stratList,XandYList)) initialized_ = .TRUE. - call List_clean(XonlyList) - call List_clean(YonlyList) - call List_clean(XandYList) - call List_clean(stratList) - endif - endif - - end function initialized_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportStrategyToChar - Return Parallelization Strategy -! -! !DESCRIPTION: -! This query subroutine returns the parallelization strategy set in -! the input {\tt SparseMatrixPlus} argument {\tt sMatPlus}. The result -! is returned in the output {\tt CHARACTER} argument {\tt StratChars}. -! -! !INTERFACE: - - function exportStrategyToChar_(sMatPlus) -! -! !USES: -! - use m_stdio - use m_die - - use m_String, only : String_ToChar => toChar - use m_String, only : String_init => init - use m_String, only : String_clean => clean - use m_String, only : String - - implicit none - -! !INPUT PARAMETERS: -! - type(SparseMatrixPlus), intent(in) :: sMatPlus - -! !OUTPUT PARAMETERS: -! - character(len=size(sMatPlus%Strategy%c)) :: exportStrategyToChar_ - -! !REVISION HISTORY: -! 01Aug07 - Jay Larson - Implementation -!EOP ___________________________________________________________________ -! - character(len=*),parameter :: myname_=myname//'::exportStrategyToChar_' - type(String) :: dummyStrategy ! SGI IR->WHIRL work-around - - ! Check input argument to ensure it has been initialized. If not, - ! signal an error and terminate execution. - - if( .not. initialized_(sMatPlus) ) then - write(stderr,'(3a)') myname_,':: Warning, input argument not initialized, ', & - 'returning empty character field for parallelization strategy.' - exportStrategyToChar_ = ' ' - return - endif - - ! Return in character form the parallelizaiton strategy - call String_init(dummyStrategy, SMatPlus%Strategy) - - exportStrategyToChar_ = String_ToChar(dummyStrategy) - - call String_clean(dummyStrategy) - - end function exportStrategyToChar_ - - end module m_SparseMatrixPlus - diff --git a/src/externals/mct/mct/m_SparseMatrixToMaps.F90 b/src/externals/mct/mct/m_SparseMatrixToMaps.F90 deleted file mode 100644 index b28448a6231..00000000000 --- a/src/externals/mct/mct/m_SparseMatrixToMaps.F90 +++ /dev/null @@ -1,456 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SparseMatrixToMaps -- Maps from the Sparse Matrix -! -! !DESCRIPTION: -! The {\tt SparseMatrix} provides consolidated (on one process) or -! distributed sparse matrix storage for the operation -! ${\bf y} = {\bf M} {\bf x}$, where {\bf x} and {\bf y} are vectors, -! and {\bf M} is a matrix. In performing parallel matrix-vector -! multiplication, one has numerous options regarding the decomposition -! of the matrix {\bf M}, and the vectors {\bf y} and {\bf x}. -! This module provides services to generate mct mapping components---the -! {\tt GlobalMap} and {\tt GlobalSegMap} for the vectors {\bf y} and/or -! {\bf x} based on the decomposition of the sparse matrix {\bf M}. -! -! !INTERFACE: - - module m_SparseMatrixToMaps -! -! !USES: -! - use m_SparseMatrix, only : SparseMatrix - - implicit none - - private ! except - - public :: SparseMatrixToXGlobalSegMap - public :: SparseMatrixToYGlobalSegMap - - interface SparseMatrixToXGlobalSegMap ; module procedure & - SparseMatrixToXGlobalSegMap_ - end interface - - interface SparseMatrixToYGlobalSegMap ; module procedure & - SparseMatrixToYGlobalSegMap_ - end interface - -! !REVISION HISTORY: -! 13Apr01 - J.W. Larson - initial prototype -! and API specifications. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_SparseMatrixToMaps' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SparseMatrixToXGlobalSegMap_ - Generate X GlobalSegmap. -! -! !DESCRIPTION: Given an input {\tt SparseMatrix} argument {\tt sMat}, -! this routine generates an output {\tt GlobalSegMap} variable -! {\tt xGSMap}, which describes the domain decomposition of the vector -! {\bf x} in the distributed matrix-vector multiplication -! $${\bf y} = {\bf M} {\bf x}.$$ -! -! !INTERFACE: - - subroutine SparseMatrixToXGlobalSegMap_(sMat, xGSMap, root, comm, comp_id) -! -! !USES: -! - use m_stdio, only : stderr - use m_die, only : die - use m_mpif90 - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_nCols => nCols - use m_SparseMatrix, only : SparseMatrix_lsize => lsize - use m_SparseMatrix, only : SparseMatrix_indexIA => indexIA - use m_SparseMatrix, only : SparseMatrix_SortPermute => SortPermute - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_init => init - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: root ! communicator root - integer, intent(in) :: comm ! communicator handle - integer, intent(in) :: comp_id ! component id - -! !INPUT/OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(inout) :: sMat ! input SparseMatrix - -! !OUTPUT PARAMETERS: -! - type(GlobalSegMap), intent(out) :: xGSMap ! segmented decomposition - ! for x -! !REVISION HISTORY: -! 13Apr01 - J.W. Larson - API specification. -! 25Apr01 - J.W. Larson - First version. -! 27Apr01 - J.W. Larson - Bug fix--intent of -! argument sMat changed from (IN) to (INOUT) -! 27Apr01 - R.L. Jacob - bug fix-- add use -! statement for SortPermute -! 01May01 - R.L. Jacob - make comp_id an -! input argument -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SparseMatrixToXGlobalSegMap_' - -! SparseMatrix attributes: - integer :: lsize -! GlobalSegMap input attributes: - integer :: gsize, ngseg - integer, dimension(:), pointer :: starts, lengths -! Temporary array for identifying each matrix element column and -! process ID destination - integer, dimension(:), allocatable :: gCol, element_pe_locs -! Index to identify the gcol attribute in sMat: - integer :: igCol -! Matrix element sorting keys list: - type(List) :: sort_keys -! Loop index and error flag: - integer :: i, ierr - - ! Determine he local number of matrix elements lsize - - lsize = SparseMatrix_lsize(sMat) - - ! The value of gsize is taken from the number of columns in sMat: - - gsize = SparseMatrix_nCols(sMat) - - ! Sort SparseMatrix entries by global column index gcol, then - ! global row index. - - ! Create Sort keys list - - call List_init(sort_keys,'gcol:grow') - - ! Sort and permute the entries of sMat into lexicographic order - ! by global column, then global row. - - call SparseMatrix_SortPermute(sMat, sort_keys) - - ! Clean up sort keys list - - call List_clean(sort_keys) - - ! Allocate storage space for matrix element column indices and - ! process ID destinations - - allocate(gCol(lsize), stat=ierr) - - if(ierr /= 0) then - call die(myname_,'allocate(gCol...',ierr) - endif - - ! Extract global column information and place in array gCol - - igCol = SparseMatrix_indexIA(sMat, 'gcol', dieWith=myname_) - - do i=1, lsize - gCol(i) = sMat%data%iAttr(igCol,i) - end do - - ! Scan sorted entries of gCol to count segments (ngseg), and - ! their starting indices and lengths (returned in the arrays - ! starts(:) and lengths(:), respectively) - - call ComputeSegments_(gCol, lsize, ngseg, starts, lengths) - - ! Now we have sufficient data to call the GlobalSegMap - ! initialization using distributed data: - - call GlobalSegMap_init(xGSMap, starts, lengths, root, comm, & - comp_id, gsize=gsize) - - ! clean up temporary arrays gCol(:), starts(:) and lengths(:), - ! (the latter two were allocated in the call to the routine - ! ComputeSegments_()) - - deallocate(gCol, starts, lengths, stat=ierr) - - if(ierr /= 0) then - call die(myname_,'deallocate(gCol...',ierr) - endif - - end subroutine SparseMatrixToXGlobalSegMap_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SparseMatrixToYGlobalSegMap_ - Generate Y GlobalSegmap. -! -! !DESCRIPTION: Given an input {\tt SparseMatrix} argument {\tt sMat}, -! this routine generates an output {\tt GlobalSegMap} variable -! {\tt yGSMap}, which describes the domain decomposition of the vector -! {\bf y} in the distributed matrix-vector multiplication -! ${\bf y} = {\bf M} {\bf x}$. -! -! !INTERFACE: - - subroutine SparseMatrixToYGlobalSegMap_(sMat, yGSMap, root, comm, comp_id) -! -! !USES: -! - use m_stdio, only : stderr - use m_die, only : die - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_nRows => nRows - use m_SparseMatrix, only : SparseMatrix_lsize => lsize - use m_SparseMatrix, only : SparseMatrix_indexIA => indexIA - use m_SparseMatrix, only : SparseMatrix_SortPermute => SortPermute - - use m_GlobalSegMap, only : GlobalSegMap - use m_GlobalSegMap, only : GlobalSegMap_init => init - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: root ! communicator root - integer, intent(in) :: comm ! communicator handle - integer, intent(in) :: comp_id ! component id - -! !INPUT/OUTPUT PARAMETERS: -! - type(SparseMatrix), intent(inout) :: sMat ! input SparseMatrix - -! !OUTPUT PARAMETERS: -! - type(GlobalSegMap), intent(out) :: yGSMap ! segmented decomposition - ! for y -! !REVISION HISTORY: -! 13Apr01 - J.W. Larson - API specification. -! 25Apr01 - J.W. Larson - initial code. -! 27Apr01 - J.W. Larson - Bug fix--intent of -! argument sMat changed from (IN) to (INOUT) -! 27Apr01 - R.L. Jacob - bug fix-- add use -! statement for SortPermute -! 01May01 - R.L. Jacob - make comp_id an -! input argument -! 07May02 - J.W. Larson - Changed interface to -! make it consistent with SparseMatrixToXGlobalSegMap_(). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SparseMatrixToYGlobalSegMap_' - -! SparseMatrix attributes: - integer :: lsize -! GlobalSegMap input attributes: - integer :: gsize, ngseg - integer, dimension(:), pointer :: starts, lengths -! Temporary array for identifying each matrix element column and -! process ID destination - integer, dimension(:), allocatable :: gRow, element_pe_locs -! Index to identify the gRow attribute in sMat: - integer :: igRow -! Matrix element sorting keys list: - type(List) :: sort_keys -! Loop index and error flag: - integer :: i, ierr - - ! Determine he local number of matrix elements lsize - - lsize = SparseMatrix_lsize(sMat) - - ! The value of gsize is taken from the number of columns in sMat: - - gsize = SparseMatrix_nRows(sMat) - - ! Sort SparseMatrix entries by global column index grow, then - ! global row index. - - ! Create Sort keys list - - call List_init(sort_keys,'grow:gcol') - - ! Sort and permute the entries of sMat into lexicographic order - ! by global column, then global row. - - call SparseMatrix_SortPermute(sMat, sort_keys) - - ! Clean up sort keys list - - call List_clean(sort_keys) - - ! Allocate storage space for matrix element column indices and - ! process ID destinations - - allocate(gRow(lsize), stat=ierr) - - if(ierr /= 0) then - call die(myname_,'allocate(gRow...',ierr) - endif - - ! Extract global column information and place in array gRow - - igRow = SparseMatrix_indexIA(sMat,'grow', dieWith=myname_) - - do i=1, lsize - gRow(i) = sMat%data%iAttr(igRow,i) - end do - - ! Scan sorted entries of gRow to count segments (ngseg), and - ! their starting indices and lengths (returned in the arrays - ! starts(:) and lengths(:), respectively) - - call ComputeSegments_(gRow, lsize, ngseg, starts, lengths) - - ! Now we have sufficient data to call the GlobalSegMap - ! initialization using distributed data: - - call GlobalSegMap_init(yGSMap, starts, lengths, root, comm, & - comp_id, gsize=gsize) - - ! clean up temporary arrays gRow(:), starts(:) and lengths(:), - ! (the latter two were allocated in the call to the routine - ! ComputeSegments_()) - - deallocate(gRow, starts, lengths, stat=ierr) - - if(ierr /= 0) then - call die(myname_,'deallocate(gRow...',ierr) - endif - - end subroutine SparseMatrixToYGlobalSegMap_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: CreateSegments_ - Generate segment information. -! -! !DESCRIPTION: This routine examines an input {\tt INTEGER} list of -! numbers {\tt indices} (of length {\tt num\_indices}), determines the -! number of segments of consecutive numbers (or runs) {\tt nsegs}. The -! starting indices for each run, and their lengths are returned in the -! {\tt INTEGER} arrays {\tt starts(:)} and {\tt lengths(:)}, respectively. -! -! !INTERFACE: - - subroutine ComputeSegments_(indices, num_indices, nsegs, starts, lengths) - -! -! !USES: -! - use m_stdio, only : stderr - use m_die, only : die - - implicit none -! -! !INPUT PARAMETERS: -! - - integer, dimension(:), intent(in) :: indices - integer, intent(in) :: num_indices -! -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: nsegs - integer, dimension(:), pointer :: starts - integer, dimension(:), pointer :: lengths - - -! !REVISION HISTORY: -! 19Apr01 - J.W. Larson - API specification. -! 25Apr01 - J.W. Larson - Initial code. -! 27Apr01 - J.W. Larson - Bug fix--error in -! computation of segment starts/lengths. -! 27Nov01 - E.T. Ong - Bug fix--initialize -! nsegs=0 in case num_indices=0. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ComputeSegments_' - - integer :: i, ierr - - ! First pass: count the segments - - nsegs = 0 - - do i=1,num_indices - - if(i == 1) then ! bootstrap segment counting process - - nsegs = 1 - - else - - if(indices(i) > indices(i-1) + 1) then ! new segment - nsegs = nsegs + 1 - endif - - endif ! if(i==1) - - end do ! do i=1, num_indices - - ! Allocate storage space for starts(:) and lengths(:) - - allocate(starts(nsegs), lengths(nsegs), stat=ierr) - - if(ierr /= 0) then - call die(myname_,'allocate(starts...',ierr) - endif - - ! Second pass: compute segment start/length info - - do i=1,num_indices - - select case(i) - case(1) ! bootstrap segment counting process - nsegs = 1 - starts(nsegs) = indices(i) -! rml patch - lengths(nsegs) = 1 - case default - - if(i == num_indices) then ! last point - if(indices(i) > indices(i-1) + 1) then ! new segment with 1 pt. - ! first, close the books on the penultimate segment: - lengths(nsegs) = indices(i-1) - starts(nsegs) + 1 - nsegs = nsegs + 1 - starts(nsegs) = indices(i) - lengths(nsegs) = 1 ! (just one point) - else - lengths(nsegs) = indices(i) - starts(nsegs) + 1 - endif - else - if(indices(i) > indices(i-1) + 1) then ! new segment - lengths(nsegs) = indices(i-1) - starts(nsegs) + 1 - nsegs = nsegs + 1 - starts(nsegs) = indices(i) - endif - endif - - end select ! select case(i) - - end do ! do i=1, num_indices - - end subroutine ComputeSegments_ - - end module m_SparseMatrixToMaps diff --git a/src/externals/mct/mct/m_SpatialIntegral.F90 b/src/externals/mct/mct/m_SpatialIntegral.F90 deleted file mode 100644 index 2cf709b93f5..00000000000 --- a/src/externals/mct/mct/m_SpatialIntegral.F90 +++ /dev/null @@ -1,2034 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SpatialIntegral - Spatial Integrals and Averages using a GeneralGrid -! -! !DESCRIPTION: This module provides spatial integration and averaging -! services for the MCT. For a field $\Phi$ sampled at a point ${\bf x}$ -! in some multidimensional domain $\Omega$, the integral $I$ of -! $\Phi({\bf x})$ is -! $$ I = \int_{\Omega} \Phi ({\bf x}) d\Omega .$$ -! The spatial average $A$ of $\Phi({\bf x})$ over $\Omega$ is -! $$ A = {{ \int_{\Omega} \Phi ({\bf x}) d\Omega} \over -! { \int_{\Omega} d\Omega} }. $$ -! Since the {\tt AttrVect} represents a discretized field, the integrals -! above are implemented as: -! $$ I = \sum_{i=1}^N \Phi_i \Delta \Omega_i $$ -! and -! $$ A = {{\sum_{i=1}^N \Phi_i \Delta \Omega_i } \over -!{\sum_{i=1}^N \Delta \Omega_i } }, $$ -! where $N$ is the number of physical locations, $\Phi_i$ is the value -! of the field $\Phi$ at location $i$, and $\Delta \Omega_i$ is the spatial -! weight (lenghth element, cross-sectional area element, volume element, -! {\em et cetera}) at location $i$. -! -! MCT extends the concept of integrals and area/volume averages to include -! {\em masked} integrals and averages. MCT recognizes both {\em integer} -! and {\em real} masks. An integer mask $M$ is a vector of integers (one -! corresponding to each physical location) with each element having value -! either zero or one. Integer masks are used to include/exclude data from -! averages or integrals. For example, if one were to compute globally -! averaged cloud amount over land (but not ocean nor sea-ice), one would -! assign a $1$ to each location on the land and a $0$ to each non-land -! location. A {\em real} mask $F$ is a vector of real numbers (one corresponding -! to each physical location) with each element having value within the -! closed interval $[0,1]$. .Real masks are used to represent fractional -! area/volume coverage at a location by a given component model. For -! example, if one wishes to compute area averages over sea-ice, one must -! include the ice fraction present at each point. Masked Integrals and -! averages are represented in the MCT by: -! $$ I = \sum_{i=1}^N {\prod_{j=1}^J M_i} {\prod_{k=1}^K F_i} -! \Phi_i \Delta \Omega_i $$ -! and -! $$ A = {{\sum_{i=1}^N \bigg({\prod_{j=1}^J M_i}\bigg) \bigg( {\prod_{k=1}^K F_i} -! \bigg) \Phi_i -! \Delta \Omega_i } \over -!{\sum_{i=1}^N \bigg({\prod_{j=1}^J M_i}\bigg) \bigg( {\prod_{k=1}^K F_i} \bigg) -! \Delta \Omega_i } }, $$ -! where $J$ is the number of integer masks and $K$ is the number of real masks. -! -! All of the routines in this module assume field data is stored in an -! attribute vector ({\tt AttrVect}), and the integration/averaging is performed -! only on the {\tt REAL} attributes. Physical coordinate grid and mask -! information is assumed to be stored as attributes in either a -! {\tt GeneralGrid}, or pre-combined into a single integer mask and a single -! real mask. -! -! !INTERFACE: - - module m_SpatialIntegral - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: SpatialIntegral ! Spatial Integral - public :: SpatialAverage ! Spatial Area Average - - public :: MaskedSpatialIntegral ! Masked Spatial Integral - public :: MaskedSpatialAverage ! MaskedSpatial Area Average - - public :: PairedSpatialIntegrals ! A Pair of Spatial - ! Integrals - - public :: PairedSpatialAverages ! A Pair of Spatial - ! Area Averages - - public :: PairedMaskedSpatialIntegrals ! A Pair of Masked - ! Spatial Integrals - - public :: PairedMaskedSpatialAverages ! A Pair of Masked - ! Spatial Area Averages - - interface SpatialIntegral ; module procedure & - SpatialIntegralRAttrGG_ - end interface - interface SpatialAverage ; module procedure & - SpatialAverageRAttrGG_ - end interface - interface MaskedSpatialIntegral ; module procedure & - MaskedSpatialIntegralRAttrGG_ - end interface - interface MaskedSpatialAverage ; module procedure & - MaskedSpatialAverageRAttrGG_ - end interface - interface PairedSpatialIntegrals ; module procedure & - PairedSpatialIntegralRAttrGG_ - end interface - interface PairedSpatialAverages ; module procedure & - PairedSpatialAverageRAttrGG_ - end interface - interface PairedMaskedSpatialIntegrals ; module procedure & - PairedMaskedIntegralRAttrGG_ - end interface - interface PairedMaskedSpatialAverages ; module procedure & - PairedMaskedAverageRAttrGG_ - end interface - -! !REVISION HISTORY: -! 25Oct01 - J.W. Larson - Initial version -! 9May02 - J.W. Larson - Massive Refactoring. -! 10-14Jun02 - J.W. Larson - Added Masked methods. -! 17-18Jun02 - J.W. Larson - Added Paired/Masked -! methods. -! 18Jun02 - J.W. Larson - Renamed module from -! m_GlobalIntegral to m_SpatialIntegral. -! 15Jan03 - E.T. Ong - Initialized real-only -! AttrVects using nullfied integer lists. This circuitous -! hack was required because the compaq compiler does not -! compile the function AttrVectExportListToChar. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_SpatialIntegral' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SpatialIntegralRAttrGG_ - Compute spatial integral. -! -! !DESCRIPTION: -! This routine computes spatial integrals of the {\tt REAL} attributes -! of the {\tt REAL} attributes of the input {\tt AttrVect} argument -! {\tt inAv}. {\tt SpatialIntegralRAttrGG\_()} takes the input -! {\tt AttrVect} argument {\tt inAv} and computes the spatial -! integral using weights stored in the {\tt GeneralGrid} argument -! {\tt GGrid} and identified by the {\tt CHARACTER} tag {\tt WeightTag}. -! The integral of each {\tt REAL} attribute is returned in the output -! {\tt AttrVect} argument {\tt outAv}. If {\tt SpatialIntegralRAttrGG\_()} -! is invoked with the optional {\tt LOGICAL} input argument -! {\tt SumWeights} set as {\tt .TRUE.}, then the weights are also summed -! and stored in {\tt outAv} (and can be referenced with the attribute -! tag defined by the argument{\tt WeightTag}. If -! {\tt SpatialIntegralRAttrGG\_()} is invoked with the optional {\tt INTEGER} -! argument {\tt comm} (a Fortran MPI communicator handle), the summation -! operations for the integral are completed on the local process, then -! reduced across the communicator, with all processes receiving the result. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv} -! and the {\tt GeneralGrid} {\tt GGrid} must be equal. That is, there -! must be a one-to-one correspondence between the field point values stored -! in {\tt inAv} and the point weights stored in {\tt GGrid}. -! -! {\bf N.B.: } If {\tt SpatialIntegralRAttrGG\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}, -! then the value of {\tt WeightTag} must not conflict with any of the -! {\tt REAL} attribute tags in {\tt inAv}. -! -! {\bf N.B.: } The output {\tt AttrVect} argument {\tt outAv} is an -! allocated data structure. The user must deallocate it using the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to do so -! will result in a memory leak. -! -! !INTERFACE: - - subroutine SpatialIntegralRAttrGG_(inAv, outAv, GGrid, WeightTag, & - SumWeights, comm) -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - - use m_realkinds, only : FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_indexRA => indexRA - use m_GeneralGrid, only : GeneralGrid_exportRAttr => exportRAttr - - use m_SpatialIntegralV, only: SpatialIntegralV - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - type(GeneralGrid), intent(IN) :: GGrid - character(len=*), intent(IN) :: WeightTag - logical, optional, intent(IN) :: SumWeights - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 06Feb02 - J.W. Larson - initial version -! 09May02 - J.W. Larson - Refactored and -! renamed SpatialIntegralRAttrGG_(). -! 07Jun02 - J.W. Larson - Bug fix and further -! refactoring. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SpatialIntegralRAttrGG_' - - integer :: ierr, length - logical :: mySumWeights - real(FP), dimension(:), pointer :: gridWeights - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv) /= GeneralGrid_lsize(GGrid)) then - ierr = AttrVect_lsize(inAv) - GeneralGrid_lsize(GGrid) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / GGrid length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' GeneralGrid_lsize(GGrid) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(present(SumWeights)) then - mySumWeights = SumWeights - else - mySumWeights = .FALSE. - endif - - ! ensure unambiguous pointer association status for gridWeights - - nullify(gridWeights) - - ! Extract Grid Weights - - call GeneralGrid_exportRAttr(GGrid, WeightTag, gridWeights, length) - - ! - - if(present(comm)) then ! do a distributed AllReduce-style integral: - call SpatialIntegralV(inAv, outAv, gridWeights, mySumWeights, & - WeightTag, comm) - else - call SpatialIntegralV(inAv, outAv, gridWeights, mySumWeights, & - WeightTag) - endif - - ! Clean up temporary allocated space - - deallocate(gridWeights, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: deallocate(gridWeights...failed. ierr=', ierr - call die(myname_) - endif - - end subroutine SpatialIntegralRAttrGG_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SpatialAverageRAttrGG_ - Compute spatial average. -! -! !DESCRIPTION: -! This routine computes spatial averages of the {\tt REAL} attributes -! of the input {\tt AttrVect} argument {\tt inAv}. -! {\tt SpatialAverageRAttrGG\_()} takes the input {\tt AttrVect} argument -! {\tt inAv} and computes the spatial average using weights -! stored in the {\tt GeneralGrid} argument {\tt GGrid} and identified by -! the {\tt CHARACTER} tag {\tt WeightTag}. The average of each {\tt REAL} -! attribute is returned in the output {\tt AttrVect} argument {\tt outAv}. -! If {\tt SpatialAverageRAttrGG\_()} is invoked with the optional {\tt INTEGER} -! argument {\tt comm} (a Fortran MPI communicator handle), the summation -! operations for the average are completed on the local process, then -! reduced across the communicator, with all processes receiving the result. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv} -! and the {\tt GeneralGrid} {\tt GGrid} must be equal. That is, there -! must be a one-to-one correspondence between the field point values stored -! in {\tt inAv} and the point weights stored in {\tt GGrid}. -! -! {\bf N.B.: } The output {\tt AttrVect} argument {\tt outAv} is an -! allocated data structure. The user must deallocate it using the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to do so -! will result in a memory leak. -! -! !INTERFACE: - - subroutine SpatialAverageRAttrGG_(inAv, outAv, GGrid, WeightTag, comm) - -! ! USES: - - use m_realkinds, only : FP - - use m_stdio - use m_die - use m_mpif90 - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - - use m_GeneralGrid, only : GeneralGrid - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - type(GeneralGrid), intent(IN) :: GGrid - character(len=*), intent(IN) :: WeightTag - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 08Feb02 - J.W. Larson - initial version -! 08May02 - J.W. Larson - minor modifications: -! 1) renamed the routine to GlobalAverageRAttrGG_ -! 2) changed calls to reflect new routine name -! GlobalIntegralRAttrGG_(). -! 18Jun02 - J.W. Larson - Renamed routine to -! SpatialAverageRAttrGG_(). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SpatialAverageRAtttrGG_' - - type(AttrVect) :: integratedAv - type(List) :: nullIList - integer :: i, ierr, iweight - - ! Compute the spatial integral: - - if(present(comm)) then - call SpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, WeightTag, & - .TRUE., comm) - else - call SpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, WeightTag, & - .TRUE.) - endif - - ! Check value of summed weights (to avoid division by zero): - - iweight = AttrVect_indexRA(integratedAv, WeightTag) - if(integratedAv%rAttr(iweight, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights is zero.' - call die(myname_) - endif - - ! Initialize output AttrVect outAv: - - call List_nullify(nullIList) - call AttrVect_init(outAv, iList=nullIList, rList=inAv%rList, lsize=1) - call AttrVect_zero(outAv) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv) - outAv%rAttr(i,1) = integratedAv%rAttr(i,1) & - / integratedAv%rAttr(iweight,1) - end do - - ! Clean up temporary AttrVect: - - call AttrVect_clean(integratedAv) - - end subroutine SpatialAverageRAttrGG_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MaskedSpatialIntegralRAttrGG_ - Masked spatial integral. -! -! !DESCRIPTION: -! This routine computes masked spatial integrals of the {\tt REAL} -! attributes of the input {\tt AttrVect} argument {\tt inAv}, returning -! the masked integrals in the output {\tt AttrVect} {\tt outAv}. All of -! the masking data are assumed stored in the input {\tt GeneralGrid} -! argument {\tt GGrid}. If integer masks are to be used, their integer -! attribute names in {\tt GGrid} are named as a colon-delimited list -! in the optional {\tt CHARACTER} input argument {\tt iMaskTags}. Real -! masks (if desired) are referenced by their real attribute names in -! {\tt GGrid} are named as a colon-delimited list in the optional -! {\tt CHARACTER} input argument {\tt rMaskTags}. The user specifies -! a choice of mask combination method with the input {\tt LOGICAL} argument -! {\tt UseFastMethod}. If ${\tt UseFastMethod} = {\tt .FALSE.}$ this -! routine checks each mask entry to ensure that the integer masks contain -! only ones and zeroes, and that entries in the real masks are all in -! the closed interval $[0,1]$. If ${\tt UseFastMethod} = {\tt .TRUE.}$, -! this routine performs direct products of the masks, assuming that the -! user has validated them in advance. The optional {\tt LOGICAL} input -! argument {\tt SumWeights} determines whether the masked sum of the spatial -! weights is computed and returned in {\tt outAv} with the real attribute -! name supplied in the optional {\tt CHARACTER} input argument -! {\tt WeightSumTag}. This integral can either be a local (i.e. a global -! memory space operation), or a global distributed integral. The latter -! is the case if the optional input {\tt INTEGER} argument {\tt comm} is -! supplied (which corresponds to a Fortran MPI communicatior handle). -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv} -! and the input {\tt GeneralGrid} {\tt GGrid} must be equal. That is, there -! must be a one-to-one correspondence between the field point values stored -! in {\tt inAv} and the point weights stored in {\tt GGrid}. -! -! {\bf N.B.: } If {\tt SpatialIntegralRAttrV\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}. -! In this case, the none of {\tt REAL} attribute tags in {\tt inAv} may be -! named the same as the string contained in {\tt WeightSumTag}, which is an -! attribute name reserved for the sum of the weights in the output {\tt AttrVect} -! {\tt outAv}. -! -! {\bf N.B.: } The output {\tt AttrVect} argument {\tt outAv} is an -! allocated data structure. The user must deallocate it using the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to do so -! will result in a memory leak. -! -! !INTERFACE: - - subroutine MaskedSpatialIntegralRAttrGG_(inAv, outAv, GGrid, SpatialWeightTag, & - iMaskTags, rMaskTags, UseFastMethod, & - SumWeights, WeightSumTag, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - - use m_realkinds, only : FP - - use m_String, only : String - use m_String, only : String_toChar => toChar - use m_String, only : String_clean => clean - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - use m_List, only : List_nitem => nitem - use m_List, only : List_get => get - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_indexRA => indexRA - use m_GeneralGrid, only : GeneralGrid_exportIAttr => exportIAttr - use m_GeneralGrid, only : GeneralGrid_exportRAttr => exportRAttr - - use m_AttrVectReduce, only : AttrVect_GlobalWeightedSumRAttr => & - GlobalWeightedSumRAttr - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - use m_SpatialIntegralV, only : MaskedSpatialIntegralV - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - type(GeneralGrid), intent(IN) :: GGrid - character(len=*), intent(IN) :: SpatialWeightTag - character(len=*), optional, intent(IN) :: iMaskTags - character(len=*), optional, intent(IN) :: rMaskTags - logical, intent(IN) :: UseFastMethod - logical, optional, intent(IN) :: SumWeights - character(len=*), optional, intent(IN) :: WeightSumTag - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 11Jun02 - J.W. Larson - initial version -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MaskedSpatialIntegralRAttrGG_' - - integer :: i, ierr, j, length - logical :: mySumWeights - - type(List) :: iMaskList, rMaskList - type(String) :: DummStr - - integer, dimension(:), pointer :: iMask, iMaskTemp - real(FP), dimension(:), pointer :: rMask, rMaskTemp - integer :: TempMaskLength - - real(FP), dimension(:), pointer :: SpatialWeights - - integer :: niM, nrM ! Number of iMasks and rMasks, respectively - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv) /= GeneralGrid_lsize(GGrid)) then - ierr = AttrVect_lsize(inAv) - GeneralGrid_lsize(GGrid) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / GGrid length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' GeneralGrid_lsize(GGrid) = ',GeneralGrid_lsize(GGrid) - call die(myname_) - endif - - if(present(SumWeights)) then - mySumWeights = SumWeights - if(.not. present(WeightSumTag)) then - write(stderr,'(3a)') myname_,':: FATAL--If the input argument SumWeights=.TRUE.,', & - ' then the argument WeightSumTag must be provided.' - call die(myname_) - endif - else - mySumWeights = .FALSE. - endif - - if(present(iMaskTags)) then - call List_init(iMaskList, iMaskTags) - if(List_nitem(iMaskList) == 0) then - write(stderr,'(3a)') myname_,':: ERROR--an INTEGER mask list with', & - 'no valid items was provided.' - call die(myname_) - endif - endif - - if(present(rMaskTags)) then - call List_init(rMaskList, rMaskTags) - if(List_nitem(iMaskList) == 0) then - write(stderr,'(3a)') myname_,':: ERROR--an REAL mask list with', & - 'no valid items was provided.' - call die(myname_) - endif - endif - - ! Determine the on-processor vector length for use throughout - ! this routine: - - length = AttrVect_lsize(inAv) - - !========================================================== - ! Extract Spatial Weights from GGrid using SpatialWeightTag - !========================================================== - - nullify(SpatialWeights) - call GeneralGrid_exportRAttr(GGrid, SpatialWeightTag, SpatialWeights, & - TempMaskLength) - if(TempMaskLength /= length) then - write(stderr,'(3a,i8,a,i8)') myname_,& - ':: error on return from GeneralGrid_exportRAttr().' , & - 'Returned with SpatialWeights(:) length = ',TempMaskLength, & - ',which conflicts with AttrVect_lsize(inAv) = ',length - call die(myname_) - endif - - !========================================================== - ! If the argument iMaskTags is present, create the combined - ! iMask array: - !========================================================== - - if(present(iMaskTags)) then ! assemble iMask(:) from all the integer - ! mask attributes stored in GGrid(:) - - allocate(iMask(length), iMaskTemp(length), stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: allocate(iMask(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - - niM = List_nitem(iMaskList) - - do i=1,niM - - ! Retrieve current iMask tag, and get this attribute from GGrid: - call List_get(DummStr, i, iMaskList) - call GeneralGrid_exportIAttr(GGrid, String_toChar(DummStr), & - iMaskTemp, TempMaskLength) - call String_clean(DummStr) - if(TempMaskLength /= length) then - write(stderr,'(3a,i8,a,i8)') myname_,& - ':: error on return from GeneralGrid_exportIAttr().' , & - 'Returned with TempMaskLength = ',TempMaskLength, & - ',which conflicts with AttrVect_lsize(inAv) = ',length - call die(myname_) - endif - - if(i == 1) then ! first pass--examine iMaskTemp(:) only - - if(UseFastMethod) then ! straight copy of iMaskTemp(:) - do j=1,length - iMask(j) = iMaskTemp(j) - end do - else ! go through the entries of iMaskTemp(:) one-by-one - do j=1,length - select case(iMaskTemp(j)) - case(0) - iMask(j) = 0 - case(1) - iMask(j) = 1 - case default - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: FATAL--illegal INTEGER mask entry. Integer mask ', & - 'entries must be 0 or 1. iMask(',j,') = ', iMask(j) - call die(myname_) - end select ! select case(iMaskTemp(j))... - end do ! do j=1,length - endif ! if(UseFastMethod)... - - else ! That is, i /= 1 ... - - if(UseFastMethod) then ! straight product of iMask(:) - ! and iMaskTemp(:) - do j=1,length - iMask(j) = iMask(j) * iMaskTemp(j) - end do - else ! go through the entries of iMaskTemp(:) one-by-one - do j=1,length - select case(iMaskTemp(j)) - case(0) ! zero out iMask(j) - iMask(j) = 0 - case(1) ! do nothing - case default - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: FATAL--illegal INTEGER mask entry. Integer mask ', & - 'entries must be 0 or 1. iMask(',j,') = ', iMask(j) - call die(myname_) - end select ! select case(iMaskTemp(j))... - end do ! do j=1,length - endif ! if(UseFastMethod)... - - endif ! if(i == 1)... - - end do ! do i=1,niM...iMask retrievals - - endif ! if(present(iMaskTags))... - - !========================================================== - ! If the argument rMaskTags is present, create the combined - ! REAL mask rMask array: - !========================================================== - - if(present(rMaskTags)) then ! assemble rMask(:) from all the integer - ! mask attributes stored in GGrid(:) - - allocate(rMask(length), rMaskTemp(length), stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: allocate(rMask(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - - nrM = List_nitem(rMaskList) - - do i=1,nrM - - ! Retrieve current rMask tag, and get this attribute from GGrid: - call List_get(DummStr, i, rMaskList) - call GeneralGrid_exportRAttr(GGrid, String_toChar(DummStr), & - rMaskTemp, TempMaskLength) - call String_clean(DummStr) - if(TempMaskLength /= length) then - write(stderr,'(3a,i8,a,i8)') myname_,& - ':: error on return from GeneralGrid_exportRAttr().' , & - 'Returned with TempMaskLength = ',TempMaskLength, & - ',which conflicts with AttrVect_lsize(inAv) = ',length - call die(myname_) - endif - - if(i == 1) then ! first pass--examine rMaskTemp(:) only - - if(UseFastMethod) then ! straight copy of rMaskTemp(:) - do j=1,length - rMask(j) = rMaskTemp(j) - end do - else ! go through the entries of rMaskTemp(:) one-by-one - ! to ensure they are in the range [0.,1.] - do j=1,length - if((rMaskTemp(j) >= 0.) .or. (rMaskTemp(j) <=1.)) then - rMask(j) = rMaskTemp(j) - else - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: FATAL--illegal REAL mask entry. Real mask ', & - 'entries must be in [0.,1.] rMask(',j,') = ', rMask(j) - call die(myname_) - endif ! if((rMaskTemp(j) >= 0.) .or. (rMaskTemp(j) <=1.))... - end do ! do j=1,length - endif ! if(UseFastMethod)... - - else ! That is, i /= 1 ... - - if(UseFastMethod) then ! straight product of rMask(:) - ! and rMaskTemp(:) - do j=1,length - rMask(j) = rMask(j) * rMaskTemp(j) - end do - else ! go through the entries of rMaskTemp(:) one-by-one - ! to ensure they are in the range [0.,1.] - do j=1,length - if((rMaskTemp(j) >= 0.) .or. (rMaskTemp(j) <=1.)) then - rMask(j) = rMask(j) * rMaskTemp(j) - else - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: FATAL--illegal REAL mask entry. Real mask ', & - 'entries must be in [0.,1.] rMask(',j,') = ', rMask(j) - call die(myname_) - endif ! if((rMaskTemp(j) >= 0.) .or. (rMaskTemp(j) <=1.))... - end do ! do j=1,length - endif ! if(UseFastMethod)... - - endif ! if(i == 1)... - - end do ! do i=1,niM...rMask retrievals - - endif ! if(present(rMaskTags))... - - !========================================================== - ! Now that we have produced single INTEGER and REAL masks, - ! compute the masked weighted sum. - !========================================================== - - if(present(rMaskTags)) then ! We have a REAL Mask - - if(present(iMaskTags)) then ! and an INTEGER Mask - - if(present(comm)) then ! compute distributed AllReduce-style sum: - - if(mySumWeights) then ! return the global masked sum of the - ! weights in outAV - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - iMask, rMask, UseFastMethod, & - SumWeights, WeightSumTag, comm) - else ! Do not return the masked sum of the weights - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - iMask, rMask, UseFastMethod, & - comm=comm) - endif ! if(mySumWeights)... - - else ! compute local sum: - - if(mySumWeights) then ! return the global masked sum of the - ! weights in outAV - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - iMask, rMask, UseFastMethod, & - SumWeights, WeightSumTag) - else ! Do not return the masked sum of the weights - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - iMask, rMask, UseFastMethod) - endif ! if(mySumWeights)... - - endif ! if(present(comm))... - - else ! REAL Mask Only Case... - - if(present(comm)) then ! compute distributed AllReduce-style sum: - - if(mySumWeights) then ! return the global masked sum of the - ! weights in outAV - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - rMask=rMask, & - UseFastMethod=UseFastMethod, & - SumWeights=SumWeights, & - WeightSumTag=WeightSumTag, & - comm=comm) - else ! Do not return the masked sum of the weights - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - rMask=rMask, & - UseFastMethod=UseFastMethod, & - comm=comm) - endif ! if(mySumWeights)... - - else ! compute local sum: - - if(mySumWeights) then ! return the global masked sum of the - ! weights in outAV - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - rMask=rMask, & - UseFastMethod=UseFastMethod, & - SumWeights=SumWeights, & - WeightSumTag=WeightSumTag) - else ! Do not return the masked sum of the weights - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - rMask=rMask, & - UseFastMethod=UseFastMethod) - endif ! if(mySumWeights)... - - endif ! if(present(comm))... - - endif - else ! no REAL Mask... - - if(present(iMaskTags)) then ! INTEGER Mask Only Case... - - if(present(comm)) then ! compute distributed AllReduce-style sum: - - if(mySumWeights) then ! return the global masked sum of the - ! weights in outAV - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - iMask=iMask, & - UseFastMethod=UseFastMethod, & - SumWeights=SumWeights, & - WeightSumTag=WeightSumTag, & - comm=comm) - else ! Do not return the masked sum of the weights - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - iMask=iMask, & - UseFastMethod=UseFastMethod, & - comm=comm) - endif ! if(mySumWeights)... - - else ! compute local sum: - - if(mySumWeights) then ! return the global masked sum of the - ! weights in outAV - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - iMask=iMask, & - UseFastMethod=UseFastMethod, & - SumWeights=SumWeights, & - WeightSumTag=WeightSumTag) - else ! Do not return the masked sum of the weights - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - iMask=iMask, & - UseFastMethod=UseFastMethod) - endif ! if(mySumWeights)... - - endif ! if(present(comm))... - - else ! no INTEGER Mask / no REAL Mask Case... - - if(present(comm)) then ! compute distributed AllReduce-style sum: - - if(mySumWeights) then ! return the global masked sum of the - ! weights in outAV - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - UseFastMethod=UseFastMethod, & - SumWeights=SumWeights, & - WeightSumTag=WeightSumTag, & - comm=comm) - else ! Do not return the masked sum of the weights - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - UseFastMethod=UseFastMethod, & - comm=comm) - endif ! if(mySumWeights)... - - else ! compute local sum: - - if(mySumWeights) then ! return the global masked sum of the - ! weights in outAV - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - UseFastMethod=UseFastMethod, & - SumWeights=SumWeights, & - WeightSumTag=WeightSumTag) - else ! Do not return the masked sum of the weights - call MaskedSpatialIntegralV(inAv, outAv, SpatialWeights, & - UseFastMethod=UseFastMethod) - endif ! if(mySumWeights)... - - endif ! if(present(comm))... - - endif ! if(present(iMaskTags)... - - endif ! if(present(rMaskTags)... - - !========================================================== - ! The masked spatial integral is now completed. - ! Clean up the the various allocated mask structures. - !========================================================== - - if(present(iMaskTags)) then ! clean up iMask and friends... - call List_clean(iMaskList) - deallocate(iMask, iMaskTemp, stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: deallocate(iMask(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - endif - - if(present(rMaskTags)) then ! clean up rMask and co... - call List_clean(rMaskList) - deallocate(rMask, rMaskTemp, stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: deallocate(rMask(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - endif - - ! Clean up SpatialWeights(:) - - deallocate(SpatialWeights, stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: deallocate(SpatialWeights(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - - end subroutine MaskedSpatialIntegralRAttrGG_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MaskedSpatialAverageRAttrGG_ - Masked spatial average. -! -! !DESCRIPTION: -! This routine computes masked spatial averages of the {\tt REAL} -! attributes of the input {\tt AttrVect} argument {\tt inAv}, returning -! the masked averages in the output {\tt AttrVect} {\tt outAv}. All of -! the masking data are assumed stored in the input {\tt GeneralGrid} -! argument {\tt GGrid}. If integer masks are to be used, their integer -! attribute names in {\tt GGrid} are named as a colon-delimited list -! in the optional {\tt CHARACTER} input argument {\tt iMaskTags}. Real -! masks (if desired) are referenced by their real attribute names in -! {\tt GGrid} are named as a colon-delimited list in the optional -! {\tt CHARACTER} input argument {\tt rMaskTags}. The user specifies -! a choice of mask combination method with the input {\tt LOGICAL} argument -! {\tt UseFastMethod}. If ${\tt UseFastMethod} = {\tt .FALSE.}$ this -! routine checks each mask entry to ensure that the integer masks contain -! only ones and zeroes, and that entries in the real masks are all in -! the closed interval $[0,1]$. If ${\tt UseFastMethod} = {\tt .TRUE.}$, -! this routine performs direct products of the masks, assuming that the -! user has validated them in advance. This averaging can either be a -! local (equivalent to a global memory space operation), or a global -! distributed integral. The latter is the case if the optional input -! {\tt INTEGER} argument {\tt comm} is supplied (which corresponds to a -! Fortran MPI communicatior handle). -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv} -! and the input {\tt GeneralGrid} {\tt GGrid} must be equal. That is, -! there must be a one-to-one correspondence between the field point values -! stored in {\tt inAv} and the point weights stored in {\tt GGrid}. -! -! {\bf N.B.: } The output {\tt AttrVect} argument {\tt outAv} is an -! allocated data structure. The user must deallocate it using the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to do so -! will result in a memory leak. -! -! !INTERFACE: - - subroutine MaskedSpatialAverageRAttrGG_(inAv, outAv, GGrid, SpatialWeightTag, & - iMaskTags, rMaskTags, UseFastMethod, & - comm) - -! ! USES: - - use m_realkinds, only : FP - - use m_stdio - use m_die - use m_mpif90 - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_indexRA => indexRA - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_indexRA => indexRA - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - type(GeneralGrid), intent(IN) :: GGrid - character(len=*), intent(IN) :: SpatialWeightTag - character(len=*), optional, intent(IN) :: iMaskTags - character(len=*), optional, intent(IN) :: rMaskTags - logical, intent(IN) :: UseFastMethod - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 12Jun02 - J.W. Larson - initial version -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MaskedSpatialAverageRAttrGG_' - - type(AttrVect) :: integratedAv - type(List) :: nullIList - character*9, parameter :: WeightSumTag = 'WeightSum' - - integer :: i, iweight - - !================================================================ - ! Do the integration using MaskedSpatialIntegralRAttrGG_(), which - ! returns the intermediate integrals (including the masked weight - ! sum) in the AttrVect integratedAv. - !================================================================ - - if(present(iMaskTags)) then - - if(present(rMaskTags)) then ! have both iMasks and rMasks - - if(present(comm)) then ! a distributed parallel sum - call MaskedSpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, & - SpatialWeightTag, iMaskTags, & - rMaskTags, UseFastMethod, & - .TRUE., WeightSumTag, comm) - else ! a purely local sum - call MaskedSpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, & - SpatialWeightTag, iMaskTags, & - rMaskTags, UseFastMethod, & - .TRUE., WeightSumTag) - endif ! if(present(comm))... - - else ! Only iMasks are in use - - if(present(comm)) then ! a distributed parallel sum - call MaskedSpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, & - SpatialWeightTag, iMaskTags, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=WeightSumTag, & - comm=comm) - - else ! a purely local sum - call MaskedSpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, & - SpatialWeightTag, iMaskTags, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=WeightSumTag) - endif ! if(present(comm))... - - endif ! if(present(rMaskTags)... - - else ! no iMasks - - if(present(rMaskTags)) then ! Only rMasks are in use - - if(present(comm)) then ! a distributed parallel sum - call MaskedSpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, & - SpatialWeightTag, & - rMaskTags=rMaskTags, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=WeightSumTag, & - comm=comm) - else ! a purely local sum - call MaskedSpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, & - SpatialWeightTag, & - rMaskTags=rMaskTags, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=WeightSumTag) - endif - - else ! Neither iMasks nor rMasks are in use - - if(present(comm)) then ! a distributed parallel sum - call MaskedSpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, & - SpatialWeightTag, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=WeightSumTag, & - comm=comm) - else ! a purely local sum - call MaskedSpatialIntegralRAttrGG_(inAv, integratedAv, GGrid, & - SpatialWeightTag, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=WeightSumTag) - endif ! if(present(comm))... - - endif ! if(present(rMaskTags))... - - endif ! if(present(iMaskTags))... - - !================================================================ - ! The masked integrals and masked weight sum now reside in - ! in the AttrVect integratedAv. We now wish to compute the - ! averages by dividing the integtrals by the masked weight sum. - !================================================================ - - ! Check value of summed weights (to avoid division by zero): - - iweight = AttrVect_indexRA(integratedAv, WeightSumTag) - if(integratedAv%rAttr(iweight, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights is zero.' - call die(myname_) - endif - - ! Initialize output AttrVect outAv: - call List_nullify(nullIList) - call AttrVect_init(outAv, iList=nullIList, rList=inAv%rList, lsize=1) - call AttrVect_zero(outAv) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv) - outAv%rAttr(i,1) = integratedAv%rAttr(i,1) & - / integratedAv%rAttr(iweight,1) - end do - - ! Clean up temporary AttrVect: - - call AttrVect_clean(integratedAv) - - end subroutine MaskedSpatialAverageRAttrGG_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: PairedSpatialIntegralRAttrGG_ - Do two spatial integrals at once. -! -! !DESCRIPTION: -! This routine computes spatial integrals of the {\tt REAL} attributes -! of the {\tt REAL} attributes of the input {\tt AttrVect} arguments -! {\tt inAv1} and {\tt inAv2}, returning the integrals in the output -! {\tt AttrVect} arguments {\tt outAv1} and {\tt outAv2}, respectively . -! The integrals of {\tt inAv1} and {\tt inAv2} are computed using -! spatial weights stored in the input {\tt GeneralGrid} arguments -! {\tt GGrid1} and {\tt GGrid2}, respectively. The spatial weights in -! in {\tt GGrid1} and {\tt GGrid2} are identified by the input {\tt CHARACTER} -! arguments {\tt WeightTag1} and {\tt WeightTag2}, respectively. -! If {\tt SpatialIntegralRAttrGG\_()} is invoked with the optional -! {\tt LOGICAL} input argument -! {\tt SumWeights} set as {\tt .TRUE.}, then the weights are also summed -! and stored in {\tt outAv1} and {\tt outAv2}, and can be referenced with -! the attribute tags defined by the arguments {\tt WeightTag1} and -! {\tt WeightTag2}, respectively. This paired integral is implicitly a -! distributed operation (the whole motivation for pairing the integrals is -! to reduce communication latency costs), and the Fortran MPI communicator -! handle is defined by the input {\tt INTEGER} argument {\tt comm}. The -! summation is an AllReduce operation, with all processes receiving the -! global sum. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv1} -! and the {\tt GeneralGrid} {\tt GGrid1} must be equal. That is, there -! must be a one-to-one correspondence between the field point values stored -! in {\tt inAv1} and the point weights stored in {\tt GGrid1}. The same -! relationship must apply between {\tt inAv2} and {\tt GGrid2}. -! -! {\bf N.B.: } If {\tt SpatialIntegralRAttrGG\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}, -! then the value of {\tt WeightTag1} must not conflict with any of the -! {\tt REAL} attribute tags in {\tt inAv1} and the value of {\tt WeightTag2} -! must not conflict with any of the {\tt REAL} attribute tags in {\tt inAv2}. -! -! {\bf N.B.: } The output {\tt AttrVect} arguments {\tt outAv1} and -! {\tt outAv2} are allocated data structures. The user must deallocate them -! using the routine {\tt AttrVect\_clean()} when they are no longer needed. -! Failure to do so will result in a memory leak. -! -! !INTERFACE: - - subroutine PairedSpatialIntegralRAttrGG_(inAv1, outAv1, GGrid1, WeightTag1, & - inAv2, outAv2, GGrid2, WeightTag2, & - SumWeights, comm) -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - - use m_realkinds, only : FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_indexRA => indexRA - use m_GeneralGrid, only : GeneralGrid_exportRAttr => exportRAttr - - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - use m_SpatialIntegralV, only : PairedSpatialIntegralsV - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv1 - type(GeneralGrid), intent(IN) :: GGrid1 - character(len=*), intent(IN) :: WeightTag1 - type(AttrVect), intent(IN) :: inAv2 - type(GeneralGrid), intent(IN) :: GGrid2 - character(len=*), intent(IN) :: WeightTag2 - logical, optional, intent(IN) :: SumWeights - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv1 - type(AttrVect), intent(OUT) :: outAv2 - -! !REVISION HISTORY: -! 09May02 - J.W. Larson - Initial version. -! 10Jun02 - J.W. Larson - Refactored--now -! built on top of PairedIntegralRAttrV_(). -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::PairedSpatialIntegralRAttrGG_' - - ! Argument Sanity Checks: - - integer :: ierr, length1, length2 - logical :: mySumWeights - real(FP), dimension(:), pointer :: gridWeights1, gridWeights2 - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid1)) then - ierr = AttrVect_lsize(inAv1) - GeneralGrid_lsize(GGrid1) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv1 / GGrid1 length mismatch: ', & - ' AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - ' GeneralGrid_lsize(GGrid1) = ',GeneralGrid_lsize(GGrid1) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= GeneralGrid_lsize(GGrid2)) then - ierr = AttrVect_lsize(inAv2) - GeneralGrid_lsize(GGrid2) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv2 / GGrid2 length mismatch: ', & - ' AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - ' GeneralGrid_lsize(GGrid2) = ',GeneralGrid_lsize(GGrid2) - call die(myname_) - endif - - ! Are we summing the integration weights for either input - ! GeneralGrid? - - if(present(SumWeights)) then - mySumWeights = SumWeights - else - mySumWeights = .FALSE. - endif - - ! ensure unambiguous pointer association status for gridWeights1 - ! and gridWeights2 - - nullify(gridWeights1) - nullify(gridWeights2) - - ! Extract Grid Weights - - call GeneralGrid_exportRAttr(GGrid1, WeightTag1, gridWeights1, length1) - call GeneralGrid_exportRAttr(GGrid2, WeightTag2, gridWeights2, length2) - - - call PairedSpatialIntegralsV(inAv1, outAv1, gridweights1, WeightTag1, & - inAv2, outAv2, gridweights2, WeightTag2, & - mySumWeights, comm) - - ! Clean up allocated arrays: - - deallocate(gridWeights1, gridWeights2, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - 'ERROR--deallocate(gridWeights1,...) failed, ierr = ',ierr - call die(myname_) - endif - - end subroutine PairedSpatialIntegralRAttrGG_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: PairedSpatialAverageRAttrGG_ - Do two spatial averages at once. -! -! !DESCRIPTION: -! This routine computes spatial averages of the {\tt REAL} attributes -! of the {\tt REAL} attributes of the input {\tt AttrVect} arguments -! {\tt inAv1} and {\tt inAv2}, returning the integrals in the output -! {\tt AttrVect} arguments {\tt outAv1} and {\tt outAv2}, respectively . -! The integrals of {\tt inAv1} and {\tt inAv2} are computed using -! spatial weights stored in the input {\tt GeneralGrid} arguments -! {\tt GGrid1} and {\tt GGrid2}, respectively. The spatial weights in -! in {\tt GGrid1} and {\tt GGrid2} are identified by the input {\tt CHARACTER} -! arguments {\tt WeightTag1} and {\tt WeightTag2}, respectively. -! This paired average is implicitly a -! distributed operation (the whole motivation for pairing the averages is -! to reduce communication latency costs), and the Fortran MPI communicator -! handle is defined by the input {\tt INTEGER} argument {\tt comm}. The -! summation is an AllReduce operation, with all processes receiving the -! global sum. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv1} -! and the {\tt GeneralGrid} {\tt GGrid1} must be equal. That is, there -! must be a one-to-one correspondence between the field point values stored -! in {\tt inAv1} and the point weights stored in {\tt GGrid1}. The same -! relationship must apply between {\tt inAv2} and {\tt GGrid2}. -! -! {\bf N.B.: } The output {\tt AttrVect} arguments {\tt outAv1} and -! {\tt outAv2} are allocated data structures. The user must deallocate them -! using the routine {\tt AttrVect\_clean()} when they are no longer needed. -! Failure to do so will result in a memory leak. -! -! !INTERFACE: - - subroutine PairedSpatialAverageRAttrGG_(inAv1, outAv1, GGrid1, WeightTag1, & - inAv2, outAv2, GGrid2, WeightTag2, & - comm) -! ! USES: - - use m_realkinds, only : FP - - use m_stdio - use m_die - use m_mpif90 - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_indexRA => indexRA - use m_GeneralGrid, only : GeneralGrid_exportRAttr => exportRAttr - - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv1 - type(GeneralGrid), intent(IN) :: GGrid1 - character(len=*), intent(IN) :: WeightTag1 - type(AttrVect), intent(IN) :: inAv2 - type(GeneralGrid), intent(IN) :: GGrid2 - character(len=*), intent(IN) :: WeightTag2 - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv1 - type(AttrVect), intent(OUT) :: outAv2 - -! !REVISION HISTORY: -! 09May02 - J.W. Larson - Initial version. -! 14Jun02 - J.W. Larson - Bug fix to reflect -! new interface to PairedSpatialIntegralRAttrGG_(). -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::PairedSpatialAverageRAttrGG_' - - type(AttrVect) :: integratedAv1, integratedAv2 - type(List) :: nullIList - integer :: i, ierr, iweight1, iweight2 - - ! Compute the spatial integral: - - call PairedSpatialIntegralRAttrGG_(inAv1, integratedAv1, GGrid1, WeightTag1, & - inAv2, integratedAv2, GGrid2, & - WeightTag2, .TRUE., comm) - - - ! Check value of summed weights (to avoid division by zero): - - iweight1 = AttrVect_indexRA(integratedAv1, WeightTag1) - if(integratedAv1%rAttr(iweight1, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights in first integral is zero.' - call die(myname_) - endif - - iweight2 = AttrVect_indexRA(integratedAv2, WeightTag2) - if(integratedAv2%rAttr(iweight2, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights in second integral is zero.' - call die(myname_) - endif - - ! Initialize output AttrVects outAv1 and outAv2: - - call List_nullify(nullIList) - - call AttrVect_init(outAv1, iList=nullIList, rList=inAv1%rList, lsize=1) - call AttrVect_zero(outAv1) - call AttrVect_init(outAv2, iList=nullIList, rList=InAv2%rList, lsize=1) - call AttrVect_zero(outAv2) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv1) - outAv1%rAttr(i,1) = integratedAv1%rAttr(i,1) & - / integratedAv1%rAttr(iweight1,1) - end do - - do i=1,AttrVect_nRAttr(outAv2) - outAv2%rAttr(i,1) = integratedAv2%rAttr(i,1) & - / integratedAv2%rAttr(iweight2,1) - end do - - ! Clean up temporary AttrVects: - - call AttrVect_clean(integratedAv1) - call AttrVect_clean(integratedAv2) - - end subroutine PairedSpatialAverageRAttrGG_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: PairedMaskedIntegralRAttrGG_ - Do two masked integrals at once. -! -! !DESCRIPTION: -! This routine computes a pair of masked spatial integrals of the {\tt REAL} -! attributes of the input {\tt AttrVect} arguments {\tt inAv} and -! {\tt inAv2}, returning the masked integrals in the output {\tt AttrVect} -! {\tt outAv1} and {\tt outAv2}, respectively. All of the spatial weighting -! and masking data for each set of integrals are assumed stored in the input -! {\tt GeneralGrid} arguments {\tt GGrid} and {\tt GGrid2}. If integer -! masks are to be used, their integer attribute names in {\tt GGrid1} -! and {\tt GGrid2} are named as a colon-delimited lists in the optional -! {\tt CHARACTER} input arguments {\tt iMaskTags1} and {\tt iMaskTags2}, -! respectively. Real masks (if desired) are referenced by their real -! attribute names in {\tt GGrid1} and {\tt GGrid2} are named as -! colon-delimited lists in the optional {\tt CHARACTER} input arguments -! {\tt rMaskTags1} and {\tt rMaskTags2}, respectively. The user specifies -! a choice of mask combination method with the input {\tt LOGICAL} argument -! {\tt UseFastMethod}. If ${\tt UseFastMethod} = {\tt .FALSE.}$ this -! routine checks each mask entry to ensure that the integer masks contain -! only ones and zeroes, and that entries in the real masks are all in -! the closed interval $[0,1]$. If ${\tt UseFastMethod} = {\tt .TRUE.}$, -! this routine performs direct products of the masks, assuming that the -! user has validated them in advance. The optional {\tt LOGICAL} input -! argument {\tt SumWeights} determines whether the masked sum of the spatial -! weights is computed and returned in {\tt outAv1} and {\tt outAv2} with the -! real attribute names supplied in the {\tt CHARACTER} input arguments -! {\tt SpatialWeightTag1}, and {\tt SpatialWeightTag2}, respectively. -! This paired integral is implicitly a distributed operation (the whole -! motivation for pairing the averages is to reduce communication latency -! costs), and the Fortran MPI communicator handle is defined by the input -! {\tt INTEGER} argument {\tt comm}. The -! summation is an AllReduce operation, with all processes receiving the -! global sum. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv1} -! and the {\tt GeneralGrid} {\tt GGrid1} must be equal. That is, there -! must be a one-to-one correspondence between the field point values stored -! in {\tt inAv1} and the point weights stored in {\tt GGrid1}. The same -! relationship must apply between {\tt inAv2} and {\tt GGrid2}. -! -! {\bf N.B.: } If {\tt PairedMaskedIntegralRAttrGG\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}, -! then the value of {\tt SpatialWeightTag1} must not conflict with any of the -! {\tt REAL} attribute tags in {\tt inAv1} and the value of -! {\tt SpatialWeightTag2} must not conflict with any of the {\tt REAL} -! attribute tags in {\tt inAv2}. -! -! {\bf N.B.: } The output {\tt AttrVect} arguments {\tt outAv1} and -! {\tt outAv2} are allocated data structures. The user must deallocate them -! using the routine {\tt AttrVect\_clean()} when they are no longer needed. -! Failure to do so will result in a memory leak. -! -! !INTERFACE: - - subroutine PairedMaskedIntegralRAttrGG_(inAv1, outAv1, GGrid1, & - SpatialWeightTag1, rMaskTags1, & - iMaskTags1, inAv2, outAv2, GGrid2, & - SpatialWeightTag2, rMaskTags2, & - iMaskTags2, UseFastMethod, & - SumWeights, comm) -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - - use m_realkinds, only : FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_indexRA => indexRA - use m_GeneralGrid, only : GeneralGrid_exportRAttr => exportRAttr - - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv1 - type(GeneralGrid), intent(IN) :: GGrid1 - character(len=*), intent(IN) :: SpatialWeightTag1 - character(len=*), optional, intent(IN) :: iMaskTags1 - character(len=*), optional, intent(IN) :: rMaskTags1 - type(AttrVect), intent(IN) :: inAv2 - type(GeneralGrid), intent(IN) :: GGrid2 - character(len=*), intent(IN) :: SpatialWeightTag2 - character(len=*), optional, intent(IN) :: iMaskTags2 - character(len=*), optional, intent(IN) :: rMaskTags2 - logical, intent(IN) :: UseFastMethod - logical, optional, intent(IN) :: SumWeights - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv1 - type(AttrVect), intent(OUT) :: outAv2 - -! !REVISION HISTORY: -! 17Jun02 - J.W. Larson - Initial version. -! 19Jun02 - J.W. Larson - Shortened the name -! for compatibility with the Portland Group f90 compiler -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_ = & - myname//'::PairedMaskedIntegralRAttrGG_' - - logical :: mySumWeights - real(FP), dimension(:), pointer :: PairedBuffer, OutPairedBuffer - integer :: ierr, nRA1, nRA2, PairedBufferLength - - ! Basic Argument Validity Checks: - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid1)) then - ierr = AttrVect_lsize(inAv1) - GeneralGrid_lsize(GGrid1) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv1 / GGrid1 length mismatch: ', & - ' AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - ' GeneralGrid_lsize(GGrid1) = ',GeneralGrid_lsize(GGrid1) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= GeneralGrid_lsize(GGrid2)) then - ierr = AttrVect_lsize(inAv2) - GeneralGrid_lsize(GGrid2) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv2 / GGrid2 length mismatch: ', & - ' AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - ' GeneralGrid_lsize(GGrid2) = ',GeneralGrid_lsize(GGrid2) - call die(myname_) - endif - - ! Are we summing the integration weights for the input - ! GeneralGrids? - - if(present(SumWeights)) then - mySumWeights = SumWeights - else - mySumWeights = .FALSE. - endif - - ! Begin by invoking MaskedSpatialIntegralRAttrGG_() for each - ! AttrVect/GeneralGrid pair. This is done LOCALLY to create - ! integratedAv1 and integratedAv2, respectively. - - ! Local Masked Integral #1: - - if(present(iMaskTags1)) then - - if(present(rMaskTags1)) then ! both Integer and Real Masking - call MaskedSpatialIntegralRAttrGG_(inAv1, outAv1, GGrid1, & - SpatialWeightTag1, iMaskTags1, & - rMaskTags1, UseFastMethod, & - mySumWeights, SpatialWeightTag1) - else ! Integer Masking Only - call MaskedSpatialIntegralRAttrGG_(inAv1, outAv1, GGrid1, & - SpatialWeightTag1, & - iMaskTags=iMaskTags1, & - UseFastMethod=UseFastMethod, & - SumWeights=mySumWeights, & - WeightSumTag=SpatialWeightTag1) - endif ! if(present(rMaskTags1))... - - else ! No Integer Masking - - if(present(rMaskTags1)) then ! Real Masking Only - call MaskedSpatialIntegralRAttrGG_(inAv1, outAv1, GGrid1, & - SpatialWeightTag=SpatialWeightTag1, & - rMaskTags=rMaskTags1, & - UseFastMethod=UseFastMethod, & - SumWeights=mySumWeights, & - WeightSumTag=SpatialWeightTag1) - else ! Neither Integer nor Real Masking - call MaskedSpatialIntegralRAttrGG_(inAv1, outAv1, GGrid1, & - SpatialWeightTag=SpatialWeightTag1, & - UseFastMethod=UseFastMethod, & - SumWeights=mySumWeights, & - WeightSumTag=SpatialWeightTag1) - - endif ! if(present(rMaskTags1))... - - endif ! if(present(iMaskTags1))... - - ! Local Masked Integral #2: - - if(present(iMaskTags2)) then - - if(present(rMaskTags2)) then ! both Integer and Real Masking - call MaskedSpatialIntegralRAttrGG_(inAv2, outAv2, GGrid2, & - SpatialWeightTag2, iMaskTags2, & - rMaskTags2, UseFastMethod, & - mySumWeights, SpatialWeightTag2) - else ! Integer Masking Only - call MaskedSpatialIntegralRAttrGG_(inAv2, outAv2, GGrid2, & - SpatialWeightTag2, & - iMaskTags=iMaskTags2, & - UseFastMethod=UseFastMethod, & - SumWeights=mySumWeights, & - WeightSumTag=SpatialWeightTag2) - endif ! if(present(rMaskTags2))... - - else ! No Integer Masking - - if(present(rMaskTags2)) then ! Real Masking Only - call MaskedSpatialIntegralRAttrGG_(inAv2, outAv2, GGrid2, & - SpatialWeightTag=SpatialWeightTag2, & - rMaskTags=rMaskTags2, & - UseFastMethod=UseFastMethod, & - SumWeights=mySumWeights, & - WeightSumTag=SpatialWeightTag2) - else ! Neither Integer nor Real Masking - call MaskedSpatialIntegralRAttrGG_(inAv2, outAv2, GGrid2, & - SpatialWeightTag=SpatialWeightTag2, & - UseFastMethod=UseFastMethod, & - SumWeights=mySumWeights, & - WeightSumTag=SpatialWeightTag2) - - endif ! if(present(rMaskTags2))... - - endif ! if(present(iMaskTags2))... - - ! Create the paired buffer for the Global Sum - - nRA1 = AttrVect_nRAttr(outAv1) - nRA2 = AttrVect_nRAttr(outAv2) - - PairedBufferLength = nRA1 + nRA2 - allocate(PairedBuffer(PairedBufferLength), OutPairedBuffer(PairedBufferLength), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal error--allocate(PairedBuffer...failed, ierr = ',ierr - call die(myname_) - endif - - ! Load the paired buffer - - PairedBuffer(1:nRA1) = outAv1%rAttr(1:nRA1,1) - PairedBuffer(nRA1+1:PairedBufferLength) = outAv2%rAttr(1:nRA2,1) - - ! Perform the global sum on the paired buffer - - call MPI_AllReduce(PairedBuffer, OutPairedBuffer, PairedBufferLength, & - MP_Type(PairedBuffer(1)), MP_SUM, comm, ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal Error--MPI_ALLREDUCE() failed with ierror = ',ierr - call MP_perr_die(myname_,'MPI_ALLREDUCE() failed',ierr) - endif - - ! Unload OutPairedBuffer into outAv1 and outAv2: - - outAv1%rAttr(1:nRA1,1) = OutPairedBuffer(1:nRA1) - outAv2%rAttr(1:nRA2,1) = OutPairedBuffer(nRA1+1:PairedBufferLength) - - deallocate(PairedBuffer, OutPairedBuffer, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal error--deallocate(PairedBuffer...failed, ierr = ',ierr - call die(myname_) - endif - - end subroutine PairedMaskedIntegralRAttrGG_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: PairedMaskedAverageRAttrGG_ - Do two masked averages at once. -! -! !DESCRIPTION: -! This routine computes a pair of masked spatial averages of the {\tt REAL} -! attributes of the input {\tt AttrVect} arguments {\tt inAv} and -! {\tt inAv2}, returning the masked averagess in the output {\tt AttrVect} -! {\tt outAv1} and {\tt outAv2}, respectively. All of the spatial weighting -! and masking data for each set of averages are assumed stored in the input -! {\tt GeneralGrid} arguments {\tt GGrid} and {\tt GGrid2}. If integer -! masks are to be used, their integer attribute names in {\tt GGrid1} -! and {\tt GGrid2} are named as a colon-delimited lists in the optional -! {\tt CHARACTER} input arguments {\tt iMaskTags1} and {\tt iMaskTags2}, -! respectively. Real masks (if desired) are referenced by their real -! attribute names in {\tt GGrid1} and {\tt GGrid2} are named as -! colon-delimited lists in the optional {\tt CHARACTER} input arguments -! {\tt rMaskTags1} and {\tt rMaskTags2}, respectively. The user specifies -! a choice of mask combination method with the input {\tt LOGICAL} argument -! {\tt UseFastMethod}. If ${\tt UseFastMethod} = {\tt .FALSE.}$ this -! routine checks each mask entry to ensure that the integer masks contain -! only ones and zeroes, and that entries in the real masks are all in -! the closed interval $[0,1]$. If ${\tt UseFastMethod} = {\tt .TRUE.}$, -! this routine performs direct products of the masks, assuming that the -! user has validated them in advance. This paired average is implicitly -! a distributed operation (the whole motivation for pairing the averages -! is to reduce communication latency costs), and the Fortran MPI communicator -! handle is defined by the input {\tt INTEGER} argument {\tt comm}. The -! summation is an AllReduce operation, with all processes receiving the -! global sum. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv1} -! and the {\tt GeneralGrid} {\tt GGrid1} must be equal. That is, there -! must be a one-to-one correspondence between the field point values stored -! in {\tt inAv1} and the point weights stored in {\tt GGrid1}. The same -! relationship must apply between {\tt inAv2} and {\tt GGrid2}. -! -! {\bf N.B.: } The output {\tt AttrVect} arguments {\tt outAv1} and -! {\tt outAv2} are allocated data structures. The user must deallocate them -! using the routine {\tt AttrVect\_clean()} when they are no longer needed. -! Failure to do so will result in a memory leak. -! -! !INTERFACE: - - subroutine PairedMaskedAverageRAttrGG_(inAv1, outAv1, GGrid1, & - SpatialWeightTag1, rMaskTags1, & - iMaskTags1, inAv2, outAv2, GGrid2, & - SpatialWeightTag2, rMaskTags2, & - iMaskTags2, UseFastMethod, & - comm) -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - - use m_realkinds, only : FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_GeneralGrid, only : GeneralGrid - use m_GeneralGrid, only : GeneralGrid_lsize => lsize - use m_GeneralGrid, only : GeneralGrid_indexRA => indexRA - use m_GeneralGrid, only : GeneralGrid_exportRAttr => exportRAttr - - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv1 - type(GeneralGrid), intent(IN) :: GGrid1 - character(len=*), intent(IN) :: SpatialWeightTag1 - character(len=*), optional, intent(IN) :: iMaskTags1 - character(len=*), optional, intent(IN) :: rMaskTags1 - type(AttrVect), intent(IN) :: inAv2 - type(GeneralGrid), intent(IN) :: GGrid2 - character(len=*), intent(IN) :: SpatialWeightTag2 - character(len=*), optional, intent(IN) :: iMaskTags2 - character(len=*), optional, intent(IN) :: rMaskTags2 - logical, intent(IN) :: UseFastMethod - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv1 - type(AttrVect), intent(OUT) :: outAv2 - -! !REVISION HISTORY: -! 17Jun02 - J.W. Larson - Initial version. -! 19Jun02 - J.W. Larson - Shortened the name -! for compatibility with the Portland Group f90 compiler -! 25Jul02 - J.W. Larson E.T. Ong - Bug fix. This routine was -! previously doing integrals rather than area averages. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_ = & - myname//'::PairedMaskedAverageRAttrGG_' - - type(AttrVect) :: LocalIntegral1, LocalIntegral2 - type(List) :: nullIList - real(FP), dimension(:), pointer :: PairedBuffer, OutPairedBuffer - integer :: i, ierr, nRA1, nRA2, PairedBufferLength - real(FP) :: WeightSumInv - - ! Basic Argument Validity Checks: - - if(AttrVect_lsize(inAv1) /= GeneralGrid_lsize(GGrid1)) then - ierr = AttrVect_lsize(inAv1) - GeneralGrid_lsize(GGrid1) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv1 / GGrid1 length mismatch: ', & - ' AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - ' GeneralGrid_lsize(GGrid1) = ',GeneralGrid_lsize(GGrid1) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= GeneralGrid_lsize(GGrid2)) then - ierr = AttrVect_lsize(inAv2) - GeneralGrid_lsize(GGrid2) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv2 / GGrid2 length mismatch: ', & - ' AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - ' GeneralGrid_lsize(GGrid2) = ',GeneralGrid_lsize(GGrid2) - call die(myname_) - endif - - ! Begin by invoking MaskedSpatialIntegralRAttrGG_() for each - ! AttrVect/GeneralGrid pair. This is done LOCALLY to create - ! LocalIntegral1 and LocalIntegral2, respectively. - - ! Local Masked Integral #1: - - if(present(iMaskTags1)) then - - if(present(rMaskTags1)) then ! both Integer and Real Masking - call MaskedSpatialIntegralRAttrGG_(inAv1, LocalIntegral1, GGrid1, & - SpatialWeightTag1, iMaskTags1, & - rMaskTags1, UseFastMethod, & - .TRUE., SpatialWeightTag1) - else ! Integer Masking Only - call MaskedSpatialIntegralRAttrGG_(inAv1, LocalIntegral1, GGrid1, & - SpatialWeightTag1, & - iMaskTags=iMaskTags1, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=SpatialWeightTag1) - endif ! if(present(rMaskTags1))... - - else ! No Integer Masking - - if(present(rMaskTags1)) then ! Real Masking Only - call MaskedSpatialIntegralRAttrGG_(inAv1, LocalIntegral1, GGrid1, & - SpatialWeightTag=SpatialWeightTag1, & - rMaskTags=rMaskTags1, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=SpatialWeightTag1) - else ! Neither Integer nor Real Masking - call MaskedSpatialIntegralRAttrGG_(inAv1, LocalIntegral1, GGrid1, & - SpatialWeightTag=SpatialWeightTag1, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=SpatialWeightTag1) - - endif ! if(present(rMaskTags1))... - - endif ! if(present(iMaskTags1))... - - ! Local Masked Integral #2: - - if(present(iMaskTags2)) then - - if(present(rMaskTags2)) then ! both Integer and Real Masking - call MaskedSpatialIntegralRAttrGG_(inAv2, LocalIntegral2, GGrid2, & - SpatialWeightTag2, iMaskTags2, & - rMaskTags2, UseFastMethod, & - .TRUE., SpatialWeightTag2) - else ! Integer Masking Only - call MaskedSpatialIntegralRAttrGG_(inAv2, LocalIntegral2, GGrid2, & - SpatialWeightTag2, & - iMaskTags=iMaskTags2, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=SpatialWeightTag2) - endif ! if(present(rMaskTags2))... - - else ! No Integer Masking - - if(present(rMaskTags2)) then ! Real Masking Only - call MaskedSpatialIntegralRAttrGG_(inAv2, LocalIntegral2, GGrid2, & - SpatialWeightTag=SpatialWeightTag2, & - rMaskTags=rMaskTags2, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=SpatialWeightTag2) - else ! Neither Integer nor Real Masking - call MaskedSpatialIntegralRAttrGG_(inAv2, LocalIntegral2, GGrid2, & - SpatialWeightTag=SpatialWeightTag2, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag=SpatialWeightTag2) - - endif ! if(present(rMaskTags2))... - - endif ! if(present(iMaskTags2))... - - ! Create the paired buffer for the Global Sum - - nRA1 = AttrVect_nRAttr(LocalIntegral1) - nRA2 = AttrVect_nRAttr(LocalIntegral2) - - PairedBufferLength = nRA1 + nRA2 - allocate(PairedBuffer(PairedBufferLength), OutPairedBuffer(PairedBufferLength), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal error--allocate(PairedBuffer...failed, ierr = ',ierr - call die(myname_) - endif - - ! Load the paired buffer - - PairedBuffer(1:nRA1) = LocalIntegral1%rAttr(1:nRA1,1) - PairedBuffer(nRA1+1:PairedBufferLength) = LocalIntegral2%rAttr(1:nRA2,1) - - ! Perform the global sum on the paired buffer - - call MPI_AllReduce(PairedBuffer, OutPairedBuffer, PairedBufferLength, & - MP_Type(PairedBuffer(1)), MP_SUM, comm, ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal Error--MPI_ALLREDUCE() failed with ierror = ',ierr - call MP_perr_die(myname_,'MPI_ALLREDUCE() failed',ierr) - endif - - ! Create outAv1 and outAv2 from inAv1 and inAv2, respectively: - - call List_nullify(nullIList) - - call AttrVect_init(outAv1, iList=nullIList, rList=inAv1%rList, lsize=1) - call AttrVect_zero(outAv1) - call AttrVect_init(outAv2, iList=nullIList, rList=inAv2%rList, lsize=1) - call AttrVect_zero(outAv2) - - ! Unload/rescale OutPairedBuffer into outAv1 and outAv2: - - nRA1 = AttrVect_nRAttr(outAv1) - nRA2 = AttrVect_nRAttr(outAv2) - - ! First outAv1: - - if(OutPairedBuffer(nRA1+1) /= 0.) then - WeightSumInv = 1._FP / OutPairedBuffer(nRA1+1) ! Sum of weights on grid1 - ! is the nRA1+1th element in - ! the paired buffer. - else - write(stderr,'(2a)') myname_, & - ':: FATAL ERROR--Sum of the Weights for integral #1 is zero! Terminating...' - call die(myname_) - endif - - ! Rescale global integral to get global average: - - do i=1,nRA1 - outAv1%rAttr(i,1) = WeightSumInv * OutPairedBuffer(i) - end do - - ! And then outAv2: - - if(OutPairedBuffer(PairedBufferLength) /= 0.) then - WeightSumInv = 1._FP / OutPairedBuffer(PairedBufferLength) ! Sum of weights on grid2 - ! is the last element in - ! the paired buffer. - else - write(stderr,'(2a)') myname_, & - ':: FATAL ERROR--Sum of the Weights for integral #2 is zero! Terminating...' - call die(myname_) - endif - - ! Rescale global integral to get global average: - - do i=1,nRA2 - outAv2%rAttr(i,1) = WeightSumInv * OutPairedBuffer(i+nRA1+1) - end do - - ! Clean up allocated structures - - call AttrVect_clean(LocalIntegral1) - call AttrVect_clean(LocalIntegral2) - - deallocate(PairedBuffer, OutPairedBuffer, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal error--deallocate(PairedBuffer...failed, ierr = ',ierr - call die(myname_) - endif - - end subroutine PairedMaskedAverageRAttrGG_ - - end module m_SpatialIntegral - - - diff --git a/src/externals/mct/mct/m_SpatialIntegralV.F90 b/src/externals/mct/mct/m_SpatialIntegralV.F90 deleted file mode 100644 index 1c503b776aa..00000000000 --- a/src/externals/mct/mct/m_SpatialIntegralV.F90 +++ /dev/null @@ -1,2017 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SpatialIntegralV - Spatial Integrals and Averages using vectors of weights -! -! !DESCRIPTION: This module provides spatial integration and averaging -! services for the MCT similar to those in {\tt m\_SpatialIntegral} except -! the weights are provided by an input vector instead of through a -! {\tt GeneralGrid}. See the description for {\tt m\_SpatialIntegral} for -! more information -! -! -! Paired masked spatial integrals and averages have not yet been implemented in -! vector form. -! -! !INTERFACE: - - module m_SpatialIntegralV - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: SpatialIntegralV ! Spatial Integral - public :: SpatialAverageV ! Spatial Area Average - - public :: MaskedSpatialIntegralV ! Masked Spatial Integral - public :: MaskedSpatialAverageV ! MaskedSpatial Area Average - - public :: PairedSpatialIntegralsV ! A Pair of Spatial - ! Integrals - - public :: PairedSpatialAveragesV ! A Pair of Spatial - ! Area Averages - - interface SpatialIntegralV ; module procedure & - SpatialIntegralRAttrVSP_, & - SpatialIntegralRAttrVDP_ - end interface - interface SpatialAverageV ; module procedure & - SpatialAverageRAttrVSP_, & - SpatialAverageRAttrVDP_ - end interface - interface MaskedSpatialIntegralV ; module procedure & - MaskedSpatialIntegralRAttrVSP_, & - MaskedSpatialIntegralRAttrVDP_ - end interface - interface MaskedSpatialAverageV ; module procedure & - MaskedSpatialAverageRAttrVSP_, & - MaskedSpatialAverageRAttrVDP_ - end interface - interface PairedSpatialIntegralsV ; module procedure & - PairedSpatialIntegralRAttrVSP_, & - PairedSpatialIntegralRAttrVDP_ - end interface - interface PairedSpatialAveragesV ; module procedure & - PairedSpatialAverageRAttrVSP_, & - PairedSpatialAverageRAttrVDP_ - end interface - -! !REVISION HISTORY: -! 4Jan04 - R.Jacob - move Vector versions of routines -! from m_SpatialIntegral to this file. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_SpatialIntegralV' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SpatialIntegralRAttrVSP_ - Compute spatial integral. -! -! !DESCRIPTION: -! This routine computes spatial integrals of the {\tt REAL} attributes -! of the {\tt REAL} attributes of the input {\tt AttrVect} argument -! {\tt inAv}. {\tt SpatialIntegralRAttrV\_()} takes the input -! {\tt AttrVect} argument {\tt inAv} and computes the spatial -! integral using weights stored in the input {\tt REAL} array argument -! {\tt Weights}. The integral of each {\tt REAL} attribute is returned -! in the output {\tt AttrVect} argument {\tt outAv}. If -! {\tt SpatialIntegralRAttrV\_()} is invoked with the optional {\tt LOGICAL} -! input argument {\tt SumWeights} set as {\tt .TRUE.}, then the weights -! are also summed and stored in {\tt outAv} (and can be referenced with -! the attribute name {\tt WeightTag}. If {\tt SpatialIntegralRAttrV\_()} is -! invoked with the optional {\tt INTEGER} argument {\tt comm} (a Fortran -! MPI communicator handle), the summation operations for the integral are -! completed on the local process, then reduced across the communicator, -! with all processes receiving the result. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv} -! and the input array {\tt Weights} must be equal. That is, there must be -! a one-to-one correspondence between the field point values stored -! in {\tt inAv} and the point weights stored in {\tt Weights}. -! -! {\bf N.B.: } If {\tt SpatialIntegralRAttrV\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}. -! In this case, the none of {\tt REAL} attribute tags in {\tt inAv} may be -! named the same as the string contained in {\tt WeightTag}, which is an -! attribute name reserved for the sum of the weights in the output {\tt AttrVect} -! {\tt outAv}. -! -! {\bf N.B.: } The output {\tt AttrVect} argument {\tt outAv} is an -! allocated data structure. The user must deallocate it using the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to do so -! will result in a memory leak. -! -! !INTERFACE: - - subroutine SpatialIntegralRAttrVSP_(inAv, outAv, Weights, SumWeights, & - WeightTag, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : SP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - - use m_AttrVectReduce, only : AttrVect_GlobalWeightedSumRAttr => & - GlobalWeightedSumRAttr - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - real(SP), dimension(:), pointer :: Weights - logical, optional, intent(IN) :: SumWeights - character(len=*), optional, intent(IN) :: WeightTag - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 07Jun02 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SpatialIntegralRAttrVSP_' - - integer :: ierr, length - logical :: mySumWeights - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv) /= size(Weights)) then - ierr = AttrVect_lsize(inAv) - size(Weights) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / Weights array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(Weights) = ',size(Weights) - call die(myname_) - endif - - if(present(SumWeights)) then - mySumWeights = SumWeights - if(.not. present(WeightTag)) then - write(stderr,'(3a)') myname_,':: FATAL--If the input argument SumWeights=.TRUE.,', & - ' then the argument WeightTag must be provided.' - call die(myname_) - endif - else - mySumWeights = .FALSE. - endif - - ! Compute the sum - - if(present(comm)) then ! compute distributed AllReduce-style sum: - - if(mySumWeights) then ! return the spatial sum of the weights in outAV - call AttrVect_GlobalWeightedSumRAttr(inAV, outAV, Weights, & - comm, WeightTag) - else - call AttrVect_GlobalWeightedSumRAttr(inAV, outAV, Weights, comm) - endif - - else ! compute local sum: - - if(mySumWeights) then ! return the spatial sum of the weights in outAV - call AttrVect_LocalWeightedSumRAttr(inAV, outAV, Weights, & - WeightTag) - else - call AttrVect_LocalWeightedSumRAttr(inAV, outAV, Weights) - endif - - endif ! if(present(comm))... - - end subroutine SpatialIntegralRAttrVSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ------------------------------------------------------------------- -! -! !IROUTINE: SpatialIntegralRAttrVDP_ - Compute spatial integral. -! -! !DESCRIPTION: -! Double precision version of SpatialIntegralRAttrVSP_ -! -! !INTERFACE: - - subroutine SpatialIntegralRAttrVDP_(inAv, outAv, Weights, SumWeights, & - WeightTag, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : DP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - - use m_AttrVectReduce, only : AttrVect_GlobalWeightedSumRAttr => & - GlobalWeightedSumRAttr - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - real(DP), dimension(:), pointer :: Weights - logical, optional, intent(IN) :: SumWeights - character(len=*), optional, intent(IN) :: WeightTag - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 07Jun02 - J.W. Larson - initial version -! ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SpatialIntegralRAttrVDP_' - - integer :: ierr, length - logical :: mySumWeights - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv) /= size(Weights)) then - ierr = AttrVect_lsize(inAv) - size(Weights) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / Weights array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(Weights) = ',size(Weights) - call die(myname_) - endif - - if(present(SumWeights)) then - mySumWeights = SumWeights - if(.not. present(WeightTag)) then - write(stderr,'(3a)') myname_,':: FATAL--If the input argument SumWeights=.TRUE.,', & - ' then the argument WeightTag must be provided.' - call die(myname_) - endif - else - mySumWeights = .FALSE. - endif - - ! Compute the sum - - if(present(comm)) then ! compute distributed AllReduce-style sum: - - if(mySumWeights) then ! return the spatial sum of the weights in outAV - call AttrVect_GlobalWeightedSumRAttr(inAV, outAV, Weights, & - comm, WeightTag) - else - call AttrVect_GlobalWeightedSumRAttr(inAV, outAV, Weights, comm) - endif - - else ! compute local sum: - - if(mySumWeights) then ! return the spatial sum of the weights in outAV - call AttrVect_LocalWeightedSumRAttr(inAV, outAV, Weights, & - WeightTag) - else - call AttrVect_LocalWeightedSumRAttr(inAV, outAV, Weights) - endif - - endif ! if(present(comm))... - - end subroutine SpatialIntegralRAttrVDP_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: SpatialAverageRAttrVSP_ - Compute spatial average. -! -! !DESCRIPTION: -! This routine computes spatial averages of the {\tt REAL} attributes -! of the input {\tt AttrVect} argument {\tt inAv}. -! {\tt SpatialAverageRAttrV\_()} takes the input {\tt AttrVect} argument -! {\tt inAv} and computes the spatial average using weights -! stored in the {\tt REAL} array {\tt Weights}. The average of each -! {\tt REAL} attribute is returned in the output {\tt AttrVect} argument -! {\tt outAv}. If {\tt SpatialAverageRAttrV\_()} is invoked with the -! optional {\tt INTEGER} argument {\tt comm} (a Fortran MPI communicator -! handle), the summation operations for the average are completed on the -! local process, then reduced across the communicator, with all processes -! receiving the result. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv} -! and the input array {\tt Weights} must be equal. That is, there must -! be a one-to-one correspondence between the field point values stored -! in {\tt inAv} and the point weights stored in {\tt Weights}. -! -! {\bf N.B.: } The output {\tt AttrVect} argument {\tt outAv} is an -! allocated data structure. The user must deallocate it using the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to do so -! will result in a memory leak. -! -! !INTERFACE: - - subroutine SpatialAverageRAttrVSP_(inAv, outAv, Weights, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : SP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - real(SP), dimension(:), pointer :: Weights - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 10Jun02 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SpatialAverageRAtttrVSP_' - - type(AttrVect) :: integratedAv - type(List) :: nullIList - integer :: i, ierr, iweight - - ! Compute the spatial integral: - - if(present(comm)) then - call SpatialIntegralV(inAv, integratedAv, Weights, & - .TRUE., 'weights', comm) - else - call SpatialIntegralV(inAv, integratedAv, Weights, .TRUE., 'weights') - endif - - ! Check value of summed weights (to avoid division by zero): - - iweight = AttrVect_indexRA(integratedAv, 'weights') - if(integratedAv%rAttr(iweight, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights is zero.' - call die(myname_) - endif - - ! Initialize output AttrVect outAv: - - call List_nullify(nullIList) - call AttrVect_init(outAv, iList=nullIList, rList=inAv%rList, lsize=1) - call AttrVect_zero(outAv) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv) - outAv%rAttr(i,1) = integratedAv%rAttr(i,1) & - / integratedAv%rAttr(iweight,1) - end do - - ! Clean up temporary AttrVect: - - call AttrVect_clean(integratedAv) - - end subroutine SpatialAverageRAttrVSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ------------------------------------------------------------------- -! -! !IROUTINE: SpatialAverageRAttrVDP_ - Compute spatial average. -! -! !DESCRIPTION: -! Double pecision version of SpatialAverageRAttrVSP -! -! !INTERFACE: - - subroutine SpatialAverageRAttrVDP_(inAv, outAv, Weights, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : DP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - real(DP), dimension(:), pointer :: Weights - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 10Jun02 - J.W. Larson - initial version -! ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::SpatialAverageRAtttrVDP_' - - type(AttrVect) :: integratedAv - type(List) :: nullIList - integer :: i, ierr, iweight - - ! Compute the spatial integral: - - if(present(comm)) then - call SpatialIntegralV(inAv, integratedAv, Weights, & - .TRUE., 'weights', comm) - else - call SpatialIntegralV(inAv, integratedAv, Weights, .TRUE., 'weights') - endif - - ! Check value of summed weights (to avoid division by zero): - - iweight = AttrVect_indexRA(integratedAv, 'weights') - if(integratedAv%rAttr(iweight, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights is zero.' - call die(myname_) - endif - - ! Initialize output AttrVect outAv: - - call List_nullify(nullIList) - call AttrVect_init(outAv, iList=nullIList, rList=inAv%rList, lsize=1) - call AttrVect_zero(outAv) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv) - outAv%rAttr(i,1) = integratedAv%rAttr(i,1) & - / integratedAv%rAttr(iweight,1) - end do - - ! Clean up temporary AttrVect: - - call AttrVect_clean(integratedAv) - - end subroutine SpatialAverageRAttrVDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MaskedSpatialIntegralRAttrVSP_ - Masked spatial integral. -! -! !DESCRIPTION: -! This routine computes masked spatial integrals of the {\tt REAL} -! attributes of the input {\tt AttrVect} argument {\tt inAv}, returning -! the masked integrals in the output {\tt AttrVect} argument {\tt outAv}. -! The masked integral is computed using weights stored in the input -! {\tt REAL} array argument {\tt SpatialWeights}. Integer masking (if -! desired) is provided in the optional input {\tt INTEGER} array {\tt iMask}, -! and real masking (if desired) is provided in the optional input {\tt REAL} -! array {\tt rMask}. If {\tt SpatialIntegralRAttrV\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}, -! then the weights are also summed and stored in {\tt outAv} (and can be -! referenced with the attribute name defined by the optional input -! {\tt CHARACTER} argument {\tt WeightSumTag}. If -! {\tt SpatialIntegralRAttrV\_()} is invoked with the optional {\tt INTEGER} -! argument {\tt comm} (a Fortran MPI communicator handle), the summation -! operations for the integral are completed on the local process, then -! reduced across the communicator, with all processes receiving the result. -! Otherwise, the integral is assumed to be local (or equivalent to a global -! address space). -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv} -! and the input array {\tt Weights} must be equal. That is, there must be -! a one-to-one correspondence between the field point values stored -! in {\tt inAv} and the point weights stored in {\tt SpatialWeights}. -! -! {\bf N.B.: } If {\tt SpatialIntegralRAttrV\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}. -! In this case, the none of {\tt REAL} attribute tags in {\tt inAv} may be -! named the same as the string contained in {\tt WeightSumTag}, which is an -! attribute name reserved for the sum of the weights in the output {\tt AttrVect} -! {\tt outAv}. -! -! {\bf N.B.: } The output {\tt AttrVect} argument {\tt outAv} is an -! allocated data structure. The user must deallocate it using the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to do so -! will result in a memory leak. -! -! !INTERFACE: - - subroutine MaskedSpatialIntegralRAttrVSP_(inAv, outAv, SpatialWeights, iMask, & - rMask, UseFastMethod, SumWeights, & - WeightSumTag, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : SP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - - use m_AttrVectReduce, only : AttrVect_GlobalWeightedSumRAttr => & - GlobalWeightedSumRAttr - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - real(SP),dimension(:), pointer :: SpatialWeights - integer, dimension(:), optional, pointer :: iMask - real(SP),dimension(:), optional, pointer :: rMask - logical, intent(IN) :: UseFastMethod - logical, optional, intent(IN) :: SumWeights - character(len=*), optional, intent(IN) :: WeightSumTag - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 10Jun02 - J.W. Larson - initial version -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MaskedSpatialIntegralRAttrVSP_' - - integer :: i, ierr, length - logical :: mySumWeights - real(FP), dimension(:), pointer :: Weights - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv) /= size(SpatialWeights)) then - ierr = AttrVect_lsize(inAv) - size(SpatialWeights) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / SpatialWeights array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(SpatialWeights) = ',size(SpatialWeights) - call die(myname_) - endif - - if(present(iMask)) then ! make sure it is the right length - if(AttrVect_lsize(inAv) /= size(iMask)) then - ierr = AttrVect_lsize(inAv) - size(iMask) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / iMask array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(iMask) = ',size(iMask) - call die(myname_) - endif - endif - - if(present(rMask)) then ! make sure it is the right length - if(AttrVect_lsize(inAv) /= size(rMask)) then - ierr = AttrVect_lsize(inAv) - size(rMask) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / rMask array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(rMask) = ',size(rMask) - call die(myname_) - endif - endif - - if(present(SumWeights)) then - mySumWeights = SumWeights - if(.not. present(WeightSumTag)) then - write(stderr,'(3a)') myname_,':: FATAL--If the input argument SumWeights=.TRUE.,', & - ' then the argument WeightSumTag must be provided.' - call die(myname_) - endif - else - mySumWeights = .FALSE. - endif - - ! Create a common Weights(:) array... - - length = AttrVect_lsize(inAv) - - allocate(Weights(length), stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: allocate(Weights(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - - ! Combine weights and masks into a common Weights(:) array... - - if(UseFastMethod) then ! form the product of iMask, rMask, and SpatialWeights - - if(present(rMask)) then ! use it to form Weights(:) - if(present(iMask)) then ! use it and rMask to form Weights(:) - do i=1,length - Weights(i) = rMask(i) * SpatialWeights(i) * iMask(i) - end do - else - do i=1,length - Weights(i) = rMask(i) * SpatialWeights(i) - end do - endif ! if(present(iMask))... - else - if(present(iMask)) then - do i=1,length - Weights(i) = SpatialWeights(i) * iMask(i) - end do - else - do i=1,length - Weights(i) = SpatialWeights(i) - end do - endif ! if(present(iMask))... - endif ! if(present(rMask))... - - - else ! Scan iMask and rMask carefully and set Weights(i) to zero - ! when iMask(i) or rMask(i) is zero. This avoids round-off - ! effects from products and promotion of integers to reals. - - if(present(rMask)) then ! use it to form Weights(:) - if(present(iMask)) then ! use it and rMask to form Weights(:) - do i=1,length - select case(iMask(i)) - case(0) - Weights(i) = 0._FP - case(1) - if(rMask(i) == 1._FP) then - Weights(i) = SpatialWeights(i) - elseif(rMask(i) == 0._FP) then - Weights(i) = 0._FP - elseif((rMask(i) > 0._FP) .and. (rMask(i) < 1._FP)) then - Weights(i) = rMask(i) * SpatialWeights(i) - else ! rMask(i) < 0. or rMask(i) > 1. - write(stderr,'(3a,i8,a,f10.7)') myname_, & - ':: invalid value for real', & - 'mask entry rMask(',i,') = ',rMask(i) - call die(myname_) - endif - case default - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: invalid value for integer', & - 'mask entry iMask(',i,') = ',iMask(i) - call die(myname_) - end select - end do - else - do i=1,length - if(rMask(i) == 1._FP) then - Weights(i) = SpatialWeights(i) - elseif(rMask(i) == 0._FP) then - Weights(i) = 0._FP - elseif((rMask(i) > 0._FP) .and. (rMask(i) < 1._FP)) then - Weights(i) = rMask(i) * SpatialWeights(i) - else ! rMask(i) < 0. or rMask(i) > 1. - write(stderr,'(3a,i8,a,e10.6)') myname_, & - ':: invalid value for real', & - 'mask entry rMask(',i,') = ',rMask(i) - call die(myname_) - endif - end do - endif ! if(present(iMask))... - else ! no rMask present... - if(present(iMask)) then ! check iMask entries... - do i=1,length - select case(iMask(i)) - case(0) - Weights(i) = 0._FP - case(1) - Weights(i) = SpatialWeights(i) - case default - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: invalid value for integer', & - 'mask entry iMask(',i,') = ',iMask(i) - call die(myname_) - end select - end do - else ! straight assignment of SpatialWeights(:) - do i=1,length - Weights(i) = SpatialWeights(i) - end do - endif ! if(present(iMask))... - endif ! if(present(rMask))... - - - endif ! if(UseFastMethod) - - ! Now that the weights are combined into a common Weights(:), - ! compute the masked weighted sum: - - if(present(comm)) then ! compute distributed AllReduce-style sum: - - if(mySumWeights) then ! return the global sum of the weights in outAV - call AttrVect_GlobalWeightedSumRAttr(inAV, outAV, Weights, & - comm, WeightSumTag) - else - call AttrVect_GlobalWeightedSumRAttr(inAV, outAV, Weights, comm) - endif - - else ! compute local sum: - - if(mySumWeights) then ! return the global sum of the weights in outAV - call AttrVect_LocalWeightedSumRAttr(inAV, outAV, Weights, & - WeightSumAttr=WeightSumTag) - else - call AttrVect_LocalWeightedSumRAttr(inAV, outAV, Weights) - endif - - endif ! if(present(comm))... - - ! Clean up the allocated Weights(:) array - - deallocate(Weights, stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: deallocate(Weights(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - - end subroutine MaskedSpatialIntegralRAttrVSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ------------------------------------------------------------------- -! -! !IROUTINE: MaskedSpatialIntegralRAttrVDP_ - Masked spatial integral. -! -! !DESCRIPTION: -! Double precision version of MaskedSpatialIntegralRAttrVSP_ -! -! !INTERFACE: - - subroutine MaskedSpatialIntegralRAttrVDP_(inAv, outAv, SpatialWeights, iMask, & - rMask, UseFastMethod, SumWeights, & - WeightSumTag, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : DP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - - use m_AttrVectReduce, only : AttrVect_GlobalWeightedSumRAttr => & - GlobalWeightedSumRAttr - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - real(DP),dimension(:), pointer :: SpatialWeights - integer, dimension(:), optional, pointer :: iMask - real(DP),dimension(:), optional, pointer :: rMask - logical, intent(IN) :: UseFastMethod - logical, optional, intent(IN) :: SumWeights - character(len=*), optional, intent(IN) :: WeightSumTag - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 10Jun02 - J.W. Larson - initial version -! ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MaskedSpatialIntegralRAttrVDP_' - - integer :: i, ierr, length - logical :: mySumWeights - real(FP), dimension(:), pointer :: Weights - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv) /= size(SpatialWeights)) then - ierr = AttrVect_lsize(inAv) - size(SpatialWeights) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / SpatialWeights array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(SpatialWeights) = ',size(SpatialWeights) - call die(myname_) - endif - - if(present(iMask)) then ! make sure it is the right length - if(AttrVect_lsize(inAv) /= size(iMask)) then - ierr = AttrVect_lsize(inAv) - size(iMask) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / iMask array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(iMask) = ',size(iMask) - call die(myname_) - endif - endif - - if(present(rMask)) then ! make sure it is the right length - if(AttrVect_lsize(inAv) /= size(rMask)) then - ierr = AttrVect_lsize(inAv) - size(rMask) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / rMask array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(rMask) = ',size(rMask) - call die(myname_) - endif - endif - - if(present(SumWeights)) then - mySumWeights = SumWeights - if(.not. present(WeightSumTag)) then - write(stderr,'(3a)') myname_,':: FATAL--If the input argument SumWeights=.TRUE.,', & - ' then the argument WeightSumTag must be provided.' - call die(myname_) - endif - else - mySumWeights = .FALSE. - endif - - ! Create a common Weights(:) array... - - length = AttrVect_lsize(inAv) - - allocate(Weights(length), stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: allocate(Weights(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - - ! Combine weights and masks into a common Weights(:) array... - - if(UseFastMethod) then ! form the product of iMask, rMask, and SpatialWeights - - if(present(rMask)) then ! use it to form Weights(:) - if(present(iMask)) then ! use it and rMask to form Weights(:) - do i=1,length - Weights(i) = rMask(i) * SpatialWeights(i) * iMask(i) - end do - else - do i=1,length - Weights(i) = rMask(i) * SpatialWeights(i) - end do - endif ! if(present(iMask))... - else - if(present(iMask)) then - do i=1,length - Weights(i) = SpatialWeights(i) * iMask(i) - end do - else - do i=1,length - Weights(i) = SpatialWeights(i) - end do - endif ! if(present(iMask))... - endif ! if(present(rMask))... - - - else ! Scan iMask and rMask carefully and set Weights(i) to zero - ! when iMask(i) or rMask(i) is zero. This avoids round-off - ! effects from products and promotion of integers to reals. - - if(present(rMask)) then ! use it to form Weights(:) - if(present(iMask)) then ! use it and rMask to form Weights(:) - do i=1,length - select case(iMask(i)) - case(0) - Weights(i) = 0._FP - case(1) - if(rMask(i) == 1._FP) then - Weights(i) = SpatialWeights(i) - elseif(rMask(i) == 0._FP) then - Weights(i) = 0._FP - elseif((rMask(i) > 0._FP) .and. (rMask(i) < 1._FP)) then - Weights(i) = rMask(i) * SpatialWeights(i) - else ! rMask(i) < 0. or rMask(i) > 1. - write(stderr,'(3a,i8,a,f10.7)') myname_, & - ':: invalid value for real', & - 'mask entry rMask(',i,') = ',rMask(i) - call die(myname_) - endif - case default - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: invalid value for integer', & - 'mask entry iMask(',i,') = ',iMask(i) - call die(myname_) - end select - end do - else - do i=1,length - if(rMask(i) == 1._FP) then - Weights(i) = SpatialWeights(i) - elseif(rMask(i) == 0._FP) then - Weights(i) = 0._FP - elseif((rMask(i) > 0._FP) .and. (rMask(i) < 1._FP)) then - Weights(i) = rMask(i) * SpatialWeights(i) - else ! rMask(i) < 0. or rMask(i) > 1. - write(stderr,'(3a,i8,a,e10.6)') myname_, & - ':: invalid value for real', & - 'mask entry rMask(',i,') = ',rMask(i) - call die(myname_) - endif - end do - endif ! if(present(iMask))... - else ! no rMask present... - if(present(iMask)) then ! check iMask entries... - do i=1,length - select case(iMask(i)) - case(0) - Weights(i) = 0._FP - case(1) - Weights(i) = SpatialWeights(i) - case default - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: invalid value for integer', & - 'mask entry iMask(',i,') = ',iMask(i) - call die(myname_) - end select - end do - else ! straight assignment of SpatialWeights(:) - do i=1,length - Weights(i) = SpatialWeights(i) - end do - endif ! if(present(iMask))... - endif ! if(present(rMask))... - - - endif ! if(UseFastMethod) - - ! Now that the weights are combined into a common Weights(:), - ! compute the masked weighted sum: - - if(present(comm)) then ! compute distributed AllReduce-style sum: - - if(mySumWeights) then ! return the global sum of the weights in outAV - call AttrVect_GlobalWeightedSumRAttr(inAV, outAV, Weights, & - comm, WeightSumTag) - else - call AttrVect_GlobalWeightedSumRAttr(inAV, outAV, Weights, comm) - endif - - else ! compute local sum: - - if(mySumWeights) then ! return the global sum of the weights in outAV - call AttrVect_LocalWeightedSumRAttr(inAV, outAV, Weights, & - WeightSumAttr=WeightSumTag) - else - call AttrVect_LocalWeightedSumRAttr(inAV, outAV, Weights) - endif - - endif ! if(present(comm))... - - ! Clean up the allocated Weights(:) array - - deallocate(Weights, stat=ierr) - if(ierr /= 0) then - write(stderr,'(3a,i8)') myname_,':: deallocate(Weights(...) failed,', & - ' ierr=',ierr - call die(myname_) - endif - - end subroutine MaskedSpatialIntegralRAttrVDP_ - - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MaskedSpatialAverageRAttrVSP_ - Masked spatial average. -! -! !DESCRIPTION: [NEEDS **LOTS** of work...] -! This routine computes spatial integrals of the {\tt REAL} attributes -! of the {\tt REAL} attributes of the input {\tt AttrVect} argument -! {\tt inAv}. {\tt SpatialIntegralRAttrV\_()} takes the input -! {\tt AttrVect} argument {\tt inAv} and computes the spatial -! integral using weights stored in the input {\tt REAL} array argument -! {\tt Weights}. The integral of each {\tt REAL} attribute is returned -! in the output {\tt AttrVect} argument {\tt outAv}. If -! {\tt SpatialIntegralRAttrV\_()} is invoked with the optional {\tt LOGICAL} -! input argument {\tt SumWeights} set as {\tt .TRUE.}, then the weights -! are also summed and stored in {\tt outAv} (and can be referenced with -! the attribute name {\tt WeightTag}. If {\tt SpatialIntegralRAttrV\_()} is -! invoked with the optional {\tt INTEGER} argument {\tt comm} (a Fortran -! MPI communicator handle), the summation operations for the integral are -! completed on the local process, then reduced across the communicator, -! with all processes receiving the result. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv} -! and the input array {\tt Weights} must be equal. That is, there must be -! a one-to-one correspondence between the field point values stored -! in {\tt inAv} and the point weights stored in {\tt Weights}. -! -! {\bf N.B.: } If {\tt SpatialIntegralRAttrV\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}. -! In this case, the none of {\tt REAL} attribute tags in {\tt inAv} may be -! named the same as the string contained in {\tt WeightTag}, which is an -! attribute name reserved for the sum of the weights in the output {\tt AttrVect} -! {\tt outAv}. -! -! {\bf N.B.: } The output {\tt AttrVect} argument {\tt outAv} is an -! allocated data structure. The user must deallocate it using the routine -! {\tt AttrVect\_clean()} when it is no longer needed. Failure to do so -! will result in a memory leak. -! -! !INTERFACE: - - subroutine MaskedSpatialAverageRAttrVSP_(inAv, outAv, SpatialWeights, iMask, & - rMask, UseFastMethod, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : SP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - real(SP), dimension(:), pointer :: SpatialWeights - integer, dimension(:), optional, pointer :: iMask - real(SP),dimension(:), optional, pointer :: rMask - logical, intent(IN) :: UseFastMethod - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 11Jun02 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MaskedSpatialAverageRAttrVSP_' - - type(AttrVect) :: integratedAv - type(List) :: nullIList - - integer :: i, ierr, length, iweight - logical :: mySumWeights - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv) /= size(SpatialWeights)) then - ierr = AttrVect_lsize(inAv) - size(SpatialWeights) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / SpatialWeights array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(SpatialWeights) = ',size(SpatialWeights) - call die(myname_) - endif - - if(present(iMask)) then ! make sure it is the right length - if(AttrVect_lsize(inAv) /= size(iMask)) then - ierr = AttrVect_lsize(inAv) - size(iMask) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / iMask array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(iMask) = ',size(iMask) - call die(myname_) - endif - endif - - if(present(rMask)) then ! make sure it is the right length - if(AttrVect_lsize(inAv) /= size(rMask)) then - ierr = AttrVect_lsize(inAv) - size(rMask) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / rMask array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(rMask) = ',size(rMask) - call die(myname_) - endif - endif - - ! Compute the masked weighted sum, including the sum of the - ! masked weights. - - if(present(comm)) then ! communicator handle present - - if(present(iMask)) then - - if(present(rMask)) then - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - iMask, rMask, UseFastMethod, .TRUE., & - 'MaskedWeightsSum', comm) - else ! no rMask - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - iMask=iMask, UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum', & - comm=comm) - endif ! if(present(rMask))... - - else ! no iMask present... - - if(present(rMask)) then - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - rMask=rMask, UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum', & - comm=comm) - else ! neither rMask nor iMask present: - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum', & - comm=comm) - endif ! if(present(rMask))... - - endif ! if(present(iMask))... - - else ! no communicator handle present - - if(present(iMask)) then - - if(present(rMask)) then - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - iMask, rMask, UseFastMethod, .TRUE., & - 'MaskedWeightsSum') - else ! no rMask - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - iMask=iMask, UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum') - endif ! if(present(rMask))... - - else ! no iMask present... - - if(present(rMask)) then - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - rMask=rMask, UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum') - else ! neither rMask nor iMask present: - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum') - endif ! if(present(rMask))... - - endif ! if(present(iMask))... - - endif ! if(present(comm))... - - ! At this point, integratedAv containes the masked spatial integrals - ! of the REAL attributes of inAv, along with the sum of the weights. - ! to compute the masked spatial average - - ! Check value of summed weights (to avoid division by zero): - - iweight = AttrVect_indexRA(integratedAv, 'MaskedWeightsSum') - if(integratedAv%rAttr(iweight, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights is zero.' - call die(myname_) - endif - - ! Initialize output AttrVect outAv: - - call List_nullify(nullIList) - call AttrVect_init(outAv, iList=nullIList, rList=inAv%rList, lsize=1) - call AttrVect_zero(outAv) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv) - outAv%rAttr(i,1) = integratedAv%rAttr(i,1) & - / integratedAv%rAttr(iweight,1) - end do - - ! Clean up temporary AttrVect: - - call AttrVect_clean(integratedAv) - - end subroutine MaskedSpatialAverageRAttrVSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ------------------------------------------------------------------- -! -! !IROUTINE: MaskedSpatialAverageRAttrVDP_ - Masked spatial average. -! -! !DESCRIPTION: [NEEDS **LOTS** of work...] -! Double precision interface version of MaskedSpatialAverageRAttrVSP_. -! -! !INTERFACE: - - subroutine MaskedSpatialAverageRAttrVDP_(inAv, outAv, SpatialWeights, iMask, & - rMask, UseFastMethod, comm) - -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : DP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv - real(DP), dimension(:), pointer :: SpatialWeights - integer, dimension(:), optional, pointer :: iMask - real(DP),dimension(:), optional, pointer :: rMask - logical, intent(IN) :: UseFastMethod - integer, optional, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv - -! !REVISION HISTORY: -! 11Jun02 - J.W. Larson - initial version -! ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MaskedSpatialAverageRAttrVDP_' - - type(AttrVect) :: integratedAv - type(List) :: nullIList - - integer :: i, ierr, length, iweight - logical :: mySumWeights - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv) /= size(SpatialWeights)) then - ierr = AttrVect_lsize(inAv) - size(SpatialWeights) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / SpatialWeights array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(SpatialWeights) = ',size(SpatialWeights) - call die(myname_) - endif - - if(present(iMask)) then ! make sure it is the right length - if(AttrVect_lsize(inAv) /= size(iMask)) then - ierr = AttrVect_lsize(inAv) - size(iMask) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / iMask array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(iMask) = ',size(iMask) - call die(myname_) - endif - endif - - if(present(rMask)) then ! make sure it is the right length - if(AttrVect_lsize(inAv) /= size(rMask)) then - ierr = AttrVect_lsize(inAv) - size(rMask) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv / rMask array length mismatch: ', & - ' AttrVect_lsize(inAv) = ',AttrVect_lsize(inAv), & - ' size(rMask) = ',size(rMask) - call die(myname_) - endif - endif - - ! Compute the masked weighted sum, including the sum of the - ! masked weights. - - if(present(comm)) then ! communicator handle present - - if(present(iMask)) then - - if(present(rMask)) then - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - iMask, rMask, UseFastMethod, .TRUE., & - 'MaskedWeightsSum', comm) - else ! no rMask - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - iMask=iMask, UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum', & - comm=comm) - endif ! if(present(rMask))... - - else ! no iMask present... - - if(present(rMask)) then - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - rMask=rMask, UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum', & - comm=comm) - else ! neither rMask nor iMask present: - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum', & - comm=comm) - endif ! if(present(rMask))... - - endif ! if(present(iMask))... - - else ! no communicator handle present - - if(present(iMask)) then - - if(present(rMask)) then - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - iMask, rMask, UseFastMethod, .TRUE., & - 'MaskedWeightsSum') - else ! no rMask - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - iMask=iMask, UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum') - endif ! if(present(rMask))... - - else ! no iMask present... - - if(present(rMask)) then - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - rMask=rMask, UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum') - else ! neither rMask nor iMask present: - call MaskedSpatialIntegralV(inAv, integratedAv, SpatialWeights, & - UseFastMethod=UseFastMethod, & - SumWeights=.TRUE., & - WeightSumTag='MaskedWeightsSum') - endif ! if(present(rMask))... - - endif ! if(present(iMask))... - - endif ! if(present(comm))... - - ! At this point, integratedAv containes the masked spatial integrals - ! of the REAL attributes of inAv, along with the sum of the weights. - ! to compute the masked spatial average - - ! Check value of summed weights (to avoid division by zero): - - iweight = AttrVect_indexRA(integratedAv, 'MaskedWeightsSum') - if(integratedAv%rAttr(iweight, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights is zero.' - call die(myname_) - endif - - ! Initialize output AttrVect outAv: - - call List_nullify(nullIList) - call AttrVect_init(outAv, iList=nullIList, rList=inAv%rList, lsize=1) - call AttrVect_zero(outAv) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv) - outAv%rAttr(i,1) = integratedAv%rAttr(i,1) & - / integratedAv%rAttr(iweight,1) - end do - - ! Clean up temporary AttrVect: - - call AttrVect_clean(integratedAv) - - end subroutine MaskedSpatialAverageRAttrVDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: PairedSpatialIntegralRAttrVSP_ - Do two spatial integrals at once. -! -! !DESCRIPTION: -! This routine computes spatial integrals of the {\tt REAL} attributes -! of the {\tt REAL} attributes of the input {\tt AttrVect} arguments -! {\tt inAv1} and {\tt inAv2}, returning the integrals in the output -! {\tt AttrVect} arguments {\tt outAv1} and {\tt outAv2}, respectively . -! The integrals of {\tt inAv1} and {\tt inAv2} are computed using -! spatial weights stored in the input {\tt REAL} array arguments -! {\tt Weights1} and {\tt Weights2}, respectively. -! If {\tt SpatialIntegralRAttrV\_()} is invoked with the optional -! {\tt LOGICAL} input argument -! {\tt SumWeights} set as {\tt .TRUE.}, then the weights are also summed -! and stored in {\tt outAv1} and {\tt outAv2}, and can be referenced with -! the attribute tags defined by the arguments {\tt WeightName1} and -! {\tt WeightName2}, respectively. This paired integral is implicitly a -! distributed operation (the whole motivation for pairing the integrals is -! to reduce communication latency costs), and the Fortran MPI communicator -! handle is defined by the input {\tt INTEGER} argument {\tt comm}. The -! summation is an AllReduce operation, with all processes receiving the -! global sum. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv1} -! and the input {\tt REAL} array {\tt Weights1} must be equal. That is, there -! must be a one-to-one correspondence between the field point values stored -! in {\tt inAv1} and the point weights stored in {\tt Weights}. The same -! relationship must apply between {\tt inAv2} and {\tt Weights2}. -! -! {\bf N.B.: } If {\tt SpatialIntegralRAttrV\_()} is invoked with the -! optional {\tt LOGICAL} input argument {\tt SumWeights} set as {\tt .TRUE.}, -! then the value of {\tt WeightName1} must not conflict with any of the -! {\tt REAL} attribute tags in {\tt inAv1} and the value of {\tt WeightName2} -! must not conflict with any of the {\tt REAL} attribute tags in {\tt inAv2}. -! -! {\bf N.B.: } The output {\tt AttrVect} arguments {\tt outAv1} and -! {\tt outAv2} are allocated data structures. The user must deallocate them -! using the routine {\tt AttrVect\_clean()} when they are no longer needed. -! Failure to do so will result in a memory leak. -! -! !INTERFACE: - - subroutine PairedSpatialIntegralRAttrVSP_(inAv1, outAv1, Weights1, WeightName1, & - inAv2, outAv2, Weights2, WeightName2, & - SumWeights, comm) -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : SP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv1 - real(SP),dimension(:),pointer :: Weights1 - character(len=*), intent(IN) :: WeightName1 - type(AttrVect), intent(IN) :: inAv2 - real(SP),dimension(:),pointer :: Weights2 - character(len=*), intent(IN) :: WeightName2 - logical, optional, intent(IN) :: SumWeights - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv1 - type(AttrVect), intent(OUT) :: outAv2 - -! !REVISION HISTORY: -! 10Jun02 - J.W. Larson - Initial version. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::PairedSpatialIntegralRAttrVSP_' - - ! Argument Sanity Checks: - - integer :: ierr, length1, length2, PairedBufferLength - integer :: nRA1, nRA2 - logical :: mySumWeights - real(FP), dimension(:), pointer :: PairedBuffer, OutPairedBuffer - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv1) /= size(Weights1)) then - ierr = AttrVect_lsize(inAv1) - size(Weights1) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv1 / Weights1 length mismatch: ', & - ' AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - ' size(Weights1) = ',size(Weights1) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= size(Weights2)) then - ierr = AttrVect_lsize(inAv2) - size(Weights2) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv2 / Weights2 length mismatch: ', & - ' AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - ' size(Weights2) = ',size(Weights2) - call die(myname_) - endif - - ! Are we summing the integration weights? - - if(present(SumWeights)) then - mySumWeights = SumWeights - else - mySumWeights = .FALSE. - endif - - ! Compute the local contributions to the two integrals: - - if(mySumWeights) then - call AttrVect_LocalWeightedSumRAttr(inAv1, outAv1, Weights1, WeightName1) - call AttrVect_LocalWeightedSumRAttr(inAv2, outAv2, Weights2, WeightName2) - else - call AttrVect_LocalWeightedSumRAttr(inAv1, outAv1, Weights1) - call AttrVect_LocalWeightedSumRAttr(inAv2, outAv2, Weights2) - endif - - ! Create the paired buffer for the Global Sum - - nRA1 = AttrVect_nRAttr(outAv1) - nRA2 = AttrVect_nRAttr(outAv2) - - PairedBufferLength = nRA1 + nRA2 - allocate(PairedBuffer(PairedBufferLength), OutPairedBuffer(PairedBufferLength), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal error--allocate(PairedBuffer...failed, ierr = ',ierr - call die(myname_) - endif - - ! Load the paired buffer - - PairedBuffer(1:nRA1) = outAv1%rAttr(1:nRA1,1) - PairedBuffer(nRA1+1:PairedBufferLength) = outAv2%rAttr(1:nRA2,1) - - ! Perform the global sum on the paired buffer - - call MPI_AllReduce(PairedBuffer, OutPairedBuffer, PairedBufferLength, & - MP_Type(PairedBuffer(1)), MP_SUM, comm, ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal Error--MPI_ALLREDUCE() failed with ierror = ',ierr - call MP_perr_die(myname_,'MPI_ALLREDUCE() failed',ierr) - endif - - ! Unload OutPairedBuffer into outAv1 and outAv2: - - outAv1%rAttr(1:nRA1,1) = OutPairedBuffer(1:nRA1) - outAv2%rAttr(1:nRA2,1) = OutPairedBuffer(nRA1+1:PairedBufferLength) - - ! Clean up allocated arrays: - - deallocate(PairedBuffer, OutPairedBuffer, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - 'ERROR--deallocate(PairedBuffer,...) failed, ierr = ',ierr - call die(myname_) - endif - - end subroutine PairedSpatialIntegralRAttrVSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ------------------------------------------------------------------- -! -! !IROUTINE: PairedSpatialIntegralRAttrVDP_ - Two spatial integrals. -! -! !DESCRIPTION: -! Double precision interface version of PairedSpatialIntegralRAttrVSP_. -! -! !INTERFACE: - - subroutine PairedSpatialIntegralRAttrVDP_(inAv1, outAv1, Weights1, WeightName1, & - inAv2, outAv2, Weights2, WeightName2, & - SumWeights, comm) -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : DP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv1 - real(DP),dimension(:),pointer :: Weights1 - character(len=*), intent(IN) :: WeightName1 - type(AttrVect), intent(IN) :: inAv2 - real(DP),dimension(:),pointer :: Weights2 - character(len=*), intent(IN) :: WeightName2 - logical, optional, intent(IN) :: SumWeights - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv1 - type(AttrVect), intent(OUT) :: outAv2 - -! !REVISION HISTORY: -! 10Jun02 - J.W. Larson - Initial version. -! -! ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::PairedSpatialIntegralRAttrVDP_' - - ! Argument Sanity Checks: - - integer :: ierr, length1, length2, PairedBufferLength - integer :: nRA1, nRA2 - logical :: mySumWeights - real(FP), dimension(:), pointer :: PairedBuffer, OutPairedBuffer - - ! Argument Validity Checks - - if(AttrVect_lsize(inAv1) /= size(Weights1)) then - ierr = AttrVect_lsize(inAv1) - size(Weights1) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv1 / Weights1 length mismatch: ', & - ' AttrVect_lsize(inAv1) = ',AttrVect_lsize(inAv1), & - ' size(Weights1) = ',size(Weights1) - call die(myname_) - endif - - if(AttrVect_lsize(inAv2) /= size(Weights2)) then - ierr = AttrVect_lsize(inAv2) - size(Weights2) - write(stderr,'(3a,i8,a,i8)') myname_, & - ':: inAv2 / Weights2 length mismatch: ', & - ' AttrVect_lsize(inAv2) = ',AttrVect_lsize(inAv2), & - ' size(Weights2) = ',size(Weights2) - call die(myname_) - endif - - ! Are we summing the integration weights? - - if(present(SumWeights)) then - mySumWeights = SumWeights - else - mySumWeights = .FALSE. - endif - - ! Compute the local contributions to the two integrals: - - if(mySumWeights) then - call AttrVect_LocalWeightedSumRAttr(inAv1, outAv1, Weights1, WeightName1) - call AttrVect_LocalWeightedSumRAttr(inAv2, outAv2, Weights2, WeightName2) - else - call AttrVect_LocalWeightedSumRAttr(inAv1, outAv1, Weights1) - call AttrVect_LocalWeightedSumRAttr(inAv2, outAv2, Weights2) - endif - - ! Create the paired buffer for the Global Sum - - nRA1 = AttrVect_nRAttr(outAv1) - nRA2 = AttrVect_nRAttr(outAv2) - - PairedBufferLength = nRA1 + nRA2 - allocate(PairedBuffer(PairedBufferLength), OutPairedBuffer(PairedBufferLength), & - stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal error--allocate(PairedBuffer...failed, ierr = ',ierr - call die(myname_) - endif - - ! Load the paired buffer - - PairedBuffer(1:nRA1) = outAv1%rAttr(1:nRA1,1) - PairedBuffer(nRA1+1:PairedBufferLength) = outAv2%rAttr(1:nRA2,1) - - ! Perform the global sum on the paired buffer - - call MPI_AllReduce(PairedBuffer, OutPairedBuffer, PairedBufferLength, & - MP_Type(PairedBuffer(1)), MP_SUM, comm, ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Fatal Error--MPI_ALLREDUCE() failed with ierror = ',ierr - call MP_perr_die(myname_,'MPI_ALLREDUCE() failed',ierr) - endif - - ! Unload OutPairedBuffer into outAv1 and outAv2: - - outAv1%rAttr(1:nRA1,1) = OutPairedBuffer(1:nRA1) - outAv2%rAttr(1:nRA2,1) = OutPairedBuffer(nRA1+1:PairedBufferLength) - - ! Clean up allocated arrays: - - deallocate(PairedBuffer, OutPairedBuffer, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - 'ERROR--deallocate(PairedBuffer,...) failed, ierr = ',ierr - call die(myname_) - endif - - end subroutine PairedSpatialIntegralRAttrVDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: PairedSpatialAverageRAttrVSP_ - Do two spatial averages at once. -! -! !DESCRIPTION: -! This routine computes spatial averages of the {\tt REAL} attributes -! of the {\tt REAL} attributes of the input {\tt AttrVect} arguments -! {\tt inAv1} and {\tt inAv2}, returning the integrals in the output -! {\tt AttrVect} arguments {\tt outAv1} and {\tt outAv2}, respectively . -! The averages of {\tt inAv1} and {\tt inAv2} are computed using -! spatial weights stored in the input {\tt REAL} array arguments -! {\tt Weights1} and {\tt Weights2}, respectively. This paired average -! is implicitly a -! distributed operation (the whole motivation for pairing the integrals is -! to reduce communication latency costs), and the Fortran MPI communicator -! handle is defined by the input {\tt INTEGER} argument {\tt comm}. The -! summation is an AllReduce operation, with all processes receiving the -! global sum. -! -! {\bf N.B.: } The local lengths of the {\tt AttrVect} argument {\tt inAv1} -! and the array {\tt Weights} must be equal. That is, there must be a -! one-to-one correspondence between the field point values stored -! in {\tt inAv1} and the spatial weights stored in {\tt Weights} -! -! {\bf N.B.: } The output {\tt AttrVect} arguments {\tt outAv1} and -! {\tt outAv2} are allocated data structures. The user must deallocate them -! using the routine {\tt AttrVect\_clean()} when they are no longer needed. -! Failure to do so will result in a memory leak. -! -! !INTERFACE: - - subroutine PairedSpatialAverageRAttrVSP_(inAv1, outAv1, Weights1, inAv2, & - outAv2, Weights2, comm) -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : SP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv1 - real(SP),dimension(:),pointer :: Weights1 - type(AttrVect), intent(IN) :: inAv2 - real(SP),dimension(:),pointer :: Weights2 - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv1 - type(AttrVect), intent(OUT) :: outAv2 - -! !REVISION HISTORY: -! 09May02 - J.W. Larson - Initial version. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::PairedSpatialAverageRAttrVSP_' - - type(AttrVect) :: integratedAv1, integratedAv2 - type(List) :: nullIList - integer :: i, ierr, iweight1, iweight2 - - ! weight tags used to keep track of spatial weight sums - character*8, parameter :: WeightName1='WeightSum1' - character*8, parameter :: WeightName2='WeightSum2' - - ! Compute the paired spatial integral, including spatial weights: - - call PairedSpatialIntegralsV(inAv1, integratedAv1, Weights1, WeightName1, & - inAv2, integratedAv2, Weights2, WeightName2, & - .TRUE., comm) - - ! Check value of summed weights (to avoid division by zero): - - iweight1 = AttrVect_indexRA(integratedAv1, WeightName1) - if(integratedAv1%rAttr(iweight1, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights in first integral is zero.' - call die(myname_) - endif - - iweight2 = AttrVect_indexRA(integratedAv2, WeightName2) - if(integratedAv2%rAttr(iweight2, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights in second integral is zero.' - call die(myname_) - endif - - ! Initialize output AttrVects outAv1 and outAv2: - - call List_nullify(nullIList) - call AttrVect_init(outAv1, iList=nullIList, rList=inAv1%rList, lsize=1) - call AttrVect_zero(outAv1) - call AttrVect_init(outAv2, iList=nullIList, rList=inAv2%rList, lsize=1) - call AttrVect_zero(outAv2) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv1) - outAv1%rAttr(i,1) = integratedAv1%rAttr(i,1) & - / integratedAv1%rAttr(iweight1,1) - end do - - do i=1,AttrVect_nRAttr(outAv2) - outAv2%rAttr(i,1) = integratedAv2%rAttr(i,1) & - / integratedAv2%rAttr(iweight2,1) - end do - - ! Clean up temporary AttrVects: - - call AttrVect_clean(integratedAv1) - call AttrVect_clean(integratedAv2) - - end subroutine PairedSpatialAverageRAttrVSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -! ---------------------------------------------------------------------- -! -! !IROUTINE: PairedSpatialAverageRAttrVDP_ - Two spatial averages. -! -! !DESCRIPTION: -! Double precision version of PairedSpatialAverageRAttrVSP_ -! -! !INTERFACE: - - subroutine PairedSpatialAverageRAttrVDP_(inAv1, outAv1, Weights1, inAv2, & - outAv2, Weights2, comm) -! ! USES: - - use m_stdio - use m_die - use m_mpif90 - use m_realkinds, only : DP, FP - - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_lsize => lsize - use m_AttrVect, only : AttrVect_nRAttr => nRAttr - use m_AttrVect, only : AttrVect_indexRA => indexRA - - use m_AttrVectReduce, only : AttrVect_LocalWeightedSumRAttr => & - LocalWeightedSumRAttr - - use m_List, only : List - use m_List, only : List_nullify => nullify - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(IN) :: inAv1 - real(DP),dimension(:),pointer :: Weights1 - type(AttrVect), intent(IN) :: inAv2 - real(DP),dimension(:),pointer :: Weights2 - integer, intent(IN) :: comm - -! !OUTPUT PARAMETERS: - - type(AttrVect), intent(OUT) :: outAv1 - type(AttrVect), intent(OUT) :: outAv2 - -! !REVISION HISTORY: -! 09May02 - J.W. Larson - Initial version. -! -! ______________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::PairedSpatialAverageRAttrVDP_' - - type(AttrVect) :: integratedAv1, integratedAv2 - type(List) :: nullIList - integer :: i, ierr, iweight1, iweight2 - - ! weight tags used to keep track of spatial weight sums - character*8, parameter :: WeightName1='WeightSum1' - character*8, parameter :: WeightName2='WeightSum2' - - ! Compute the paired spatial integral, including spatial weights: - - call PairedSpatialIntegralsV(inAv1, integratedAv1, Weights1, WeightName1, & - inAv2, integratedAv2, Weights2, WeightName2, & - .TRUE., comm) - - ! Check value of summed weights (to avoid division by zero): - - iweight1 = AttrVect_indexRA(integratedAv1, WeightName1) - if(integratedAv1%rAttr(iweight1, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights in first integral is zero.' - call die(myname_) - endif - - iweight2 = AttrVect_indexRA(integratedAv2, WeightName2) - if(integratedAv2%rAttr(iweight2, 1) == 0._FP) then - write(stderr,'(2a)') myname_, & - '::ERROR--Global sum of grid weights in second integral is zero.' - call die(myname_) - endif - - ! Initialize output AttrVects outAv1 and outAv2: - - call List_nullify(nullIList) - call AttrVect_init(outAv1, iList=nullIList, rList=inAv1%rList, lsize=1) - call AttrVect_zero(outAv1) - call AttrVect_init(outAv2, iList=nullIList, rList=inAv2%rList, lsize=1) - call AttrVect_zero(outAv2) - - ! Divide by global weight sum to compute spatial averages from - ! spatial integrals. - - do i=1,AttrVect_nRAttr(outAv1) - outAv1%rAttr(i,1) = integratedAv1%rAttr(i,1) & - / integratedAv1%rAttr(iweight1,1) - end do - - do i=1,AttrVect_nRAttr(outAv2) - outAv2%rAttr(i,1) = integratedAv2%rAttr(i,1) & - / integratedAv2%rAttr(iweight2,1) - end do - - ! Clean up temporary AttrVects: - - call AttrVect_clean(integratedAv1) - call AttrVect_clean(integratedAv2) - - end subroutine PairedSpatialAverageRAttrVDP_ - - end module m_SpatialIntegralV diff --git a/src/externals/mct/mct/m_Transfer.F90 b/src/externals/mct/mct/m_Transfer.F90 deleted file mode 100644 index 475898a06db..00000000000 --- a/src/externals/mct/mct/m_Transfer.F90 +++ /dev/null @@ -1,818 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_Transfer - Routines for the MxN transfer of Attribute Vectors -! -! !DESCRIPTION: -! This module provides routines for doing MxN transfer of data in an -! Attribute Vector between two components on separate sets of MPI processes. -! Uses the Router datatype. -! -! !SEE ALSO: -! m_Rearranger - -! !INTERFACE: - - module m_Transfer - -! !USES: - use m_MCTWorld, only : MCTWorld - use m_MCTWorld, only : ThisMCTWorld - use m_AttrVect, only : AttrVect - use m_AttrVect, only : nIAttr,nRAttr - use m_AttrVect, only : Permute, Unpermute - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_copy => copy - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : lsize - use m_Router, only : Router - - use m_mpif90 - use m_die - use m_stdio - - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: isend - public :: send - public :: waitsend - public :: irecv - public :: recv - public :: waitrecv - - - interface isend ; module procedure isend_ ; end interface - interface send ; module procedure send_ ; end interface - interface waitsend ; module procedure waitsend_ ; end interface - interface irecv ; module procedure irecv_ ; end interface - interface recv ; module procedure recv_ ; end interface - interface waitrecv ; module procedure waitrecv_ ; end interface - -! !DEFINED PARAMETERS: - - integer,parameter :: DefaultTag = 600 - -! !REVISION HISTORY: -! 08Nov02 - R. Jacob - make new module by combining -! MCT_Send, MCT_Recv and MCT_Recvsum -! 11Nov02 - R. Jacob - Remove MCT_Recvsum and use -! optional argument in recv_ to do the same thing. -! 23Jul03 - R. Jacob - Move buffers for data and -! MPI_Reqest and MPI_Status arrays to Router. Use them. -! 24Jul03 - R. Jacob - Split send_ into isend_ and -! waitsend_. Redefine send_. -! 22Jan08 - R. Jacob - Handle unordered GSMaps -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT::m_Transfer' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: isend_ - Distributed non-blocking send of an Attribute Vector -! -! !DESCRIPTION: -! Send the the data in the {\tt AttrVect} {\tt aV} to the -! component specified in the {\tt Router} {\tt Rout}. An error will -! result if the size of the attribute vector does not match the size -! parameter stored in the {\tt Router}. -! -! Requires a corresponding {\tt recv\_} or {\tt irecv\_} to be called on the other component. -! -! The optional argument {\tt Tag} can be used to set the tag value used in -! the data transfer. DefaultTag will be used otherwise. {\tt Tag} must be -! the same in the matching {\tt recv\_} or {\tt irecv\_}. -! -! {\bf N.B.:} The {\tt AttrVect} argument in the corresponding -! {\tt recv\_} call is assumed to have exactly the same attributes -! in exactly the same order as {\tt aV}. -! -! !INTERFACE: - - subroutine isend_(aVin, Rout, Tag) - -! -! !USES: -! - implicit none - -! !INPUT PARAMETERS: -! - - Type(AttrVect),target,intent(in) :: aVin - Type(Router), intent(inout) :: Rout - integer,optional, intent(in) :: Tag - -! !REVISION HISTORY: -! 07Feb01 - R. Jacob - initial prototype -! 08Feb01 - R. Jacob - First working code -! 18May01 - R. Jacob - use MP_Type to determine type in mpi_send -! 07Jun01 - R. Jacob - remove logic to check "direction" of Router. -! remove references to ThisMCTWorld%mylrank -! 03Aug01 - E. Ong - Explicitly specify the starting address in mpi_send. -! 15Feb02 - R. Jacob - Use MCT_comm -! 26Mar02 - E. Ong - Apply faster copy order -! 26Sep02 - R. Jacob - Check Av against Router lAvsize -! 05Nov02 - R. Jacob - Remove iList, rList arguments. -! 08Nov02 - R. Jacob - MCT_Send is now send_ in m_Transfer -! 11Nov02 - R. Jacob - Use DefaultTag and add optional Tag argument -! 25Jul03 - R. Jacob - Split into isend_ and waitsend_ -! 22Jan08 - R. Jacob - Handle unordered GSMaps by permuting before send. -! remove special case for sending one segment directly from Av which probably -! wasn't safe. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::isend_' - integer :: numi,numr,i,j,k,ier - integer :: mycomp,othercomp - integer :: AttrIndex,VectIndex,seg_start,seg_end - integer :: proc,nseg,mytag - integer :: mp_Type_rp1 - logical :: unordered - type(AttrVect),pointer :: Av - type(AttrVect),target :: Avtmp - -!-------------------------------------------------------- - -! Return if no one to send to - if(Rout%nprocs .eq. 0 ) RETURN - -! set up Av to send from - unordered = associated(Rout%permarr) - if (unordered) then - call AttrVect_init(Avtmp,Avin,lsize(Avin)) - call AttrVect_copy(Avin,aVtmp) - call Permute(aVtmp,Rout%permarr) - Av => Avtmp - else - Av => Avin - endif - -!check Av size against Router -! - if(lsize(aV) /= Rout%lAvsize) then - write(stderr,'(2a)') myname_, & - ' MCTERROR: AV size not appropriate for this Router...exiting' - call die(myname_) - endif - -! get ids of components involved in this communication - mycomp=Rout%comp1id - othercomp=Rout%comp2id - - -! find total number of real and integer vectors -! for now, assume we are sending all of them - Rout%numiatt = nIAttr(aV) - Rout%numratt = nRAttr(aV) - numi = Rout%numiatt - numr = Rout%numratt - -!!!!!!!!!!!!!! IF SENDING INTEGER DATA - if(numi .ge. 1) then - -! allocate buffers to hold all outgoing data - do proc=1,Rout%nprocs - allocate(Rout%ip1(proc)%pi(Rout%locsize(proc)*numi),stat=ier) - if(ier/=0) call die(myname_,'allocate(Rout%ip1%pi)',ier) - enddo - - endif - -!!!!!!!!!!!!!! IF SENDING REAL DATA - if(numr .ge. 1) then - -! allocate buffers to hold all outgoing data - do proc=1,Rout%nprocs - allocate(Rout%rp1(proc)%pr(Rout%locsize(proc)*numr),stat=ier) - if(ier/=0) call die(myname_,'allocate(Rout%rp1%pr)',ier) - enddo - - mp_Type_rp1=MP_Type(Rout%rp1(1)%pr(1)) - - endif - - - ! Load data going to each processor - do proc = 1,Rout%nprocs - - j=1 - k=1 - - ! load the correct pieces of the integer and real vectors - ! if Rout%num_segs(proc)=1, then this will do one loop - do nseg = 1,Rout%num_segs(proc) - seg_start = Rout%seg_starts(proc,nseg) - seg_end = seg_start + Rout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,numi - Rout%ip1(proc)%pi(j) = aV%iAttr(AttrIndex,VectIndex) - j=j+1 - enddo - do AttrIndex = 1,numr - Rout%rp1(proc)%pr(k) = aV%rAttr(AttrIndex,VectIndex) - k=k+1 - enddo - enddo - enddo - - - - ! Send the integer data - if(numi .ge. 1) then - - ! set tag - mytag = DefaultTag - if(present(Tag)) mytag=Tag - - - call MPI_ISEND(Rout%ip1(proc)%pi(1), & - Rout%locsize(proc)*numi,MP_INTEGER,Rout%pe_list(proc), & - mytag,ThisMCTWorld%MCT_comm,Rout%ireqs(proc),ier) - - if(ier /= 0) call MP_perr_die(myname_,'MPI_ISEND(ints)',ier) - - endif - - ! Send the real data - if(numr .ge. 1) then - - ! set tag - mytag = DefaultTag + 1 - if(present(Tag)) mytag=Tag +1 - - - call MPI_ISEND(Rout%rp1(proc)%pr(1), & - Rout%locsize(proc)*numr,mp_Type_rp1,Rout%pe_list(proc), & - mytag,ThisMCTWorld%MCT_comm,Rout%rreqs(proc),ier) - - - if(ier /= 0) call MP_perr_die(myname_,'MPI_ISEND(reals)',ier) - - endif - - enddo - - if (unordered) then - call AttrVect_clean(aVtmp) - nullify(aV) - else - nullify(aV) - endif - -end subroutine isend_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: waitsend_ - Wait for a distributed non-blocking send to complete -! -! !DESCRIPTION: -! Wait for the data being sent with the {\tt Router} {\tt Rout} to complete. -! -! !INTERFACE: - - subroutine waitsend_(Rout) - -! -! !USES: -! - implicit none - -! !INPUT PARAMETERS: -! - Type(Router), intent(inout) :: Rout - -! !REVISION HISTORY: -! 24Jul03 - R. Jacob - First working version is -! the wait part of original send_ -!EOP ___________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::waitsend_' - integer :: proc,ier - -! Return if nothing to wait for - if(Rout%nprocs .eq. 0 ) RETURN - - ! wait for all sends to complete - if(Rout%numiatt .ge. 1) then - - call MPI_WAITALL(Rout%nprocs,Rout%ireqs,Rout%istatus,ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITALL(ints)',ier) - - do proc=1,Rout%nprocs - deallocate(Rout%ip1(proc)%pi,stat=ier) - if(ier/=0) call die(myname_,'deallocate(ip1%pi)',ier) - enddo - - endif - - if(Rout%numratt .ge. 1) then - - call MPI_WAITALL(Rout%nprocs,Rout%rreqs,Rout%rstatus,ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITALL(reals)',ier) - - do proc=1,Rout%nprocs - deallocate(Rout%rp1(proc)%pr,stat=ier) - if(ier/=0) call die(myname_,'deallocate(rp1%pi)',ier) - enddo - - endif - - -end subroutine waitsend_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: send_ - Distributed blocking send of an Attribute Vector -! -! !DESCRIPTION: -! Send the the data in the {\tt AttrVect} {\tt aV} to the -! component specified in the {\tt Router} {\tt Rout}. An error will -! result if the size of the attribute vector does not match the size -! parameter stored in the {\tt Router}. -! -! Requires a corresponding {\tt recv\_} or {\tt irecv\_} to be called on the other -! component. -! -! The optional argument {\tt Tag} can be used to set the tag value used in -! the data transfer. DefaultTag will be used otherwise. {\tt Tag} must be -! the same in the matching {\tt recv\_} or {\tt irecv\_}. -! -! {\bf N.B.:} The {\tt AttrVect} argument in the corresponding -! {\tt recv} call is assumed to have exactly the same attributes -! in exactly the same order as {\tt aV}. -! -! !INTERFACE: - - subroutine send_(aV, Rout, Tag) - -! -! !USES: -! - implicit none - -! !INPUT PARAMETERS: -! - - Type(AttrVect), intent(in) :: aV - Type(Router), intent(inout) :: Rout - integer,optional, intent(in) :: Tag - -! !REVISION HISTORY: -! 24Jul03 - R. Jacob - New version uses isend and waitsend -!EOP ___________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::send_' - - call isend_(aV,Rout,Tag) - - call waitsend_(Rout) - -end subroutine send_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: irecv_ - Distributed receive of an Attribute Vector -! -! !DESCRIPTION: -! Recieve into the {\tt AttrVect} {\tt aV} the data coming from the -! component specified in the {\tt Router} {\tt Rout}. An error will -! result if the size of the attribute vector does not match the size -! parameter stored in the {\tt Router}. -! -! Requires a corresponding {\tt send\_} or {\tt isend\_} to be called -! on the other component. -! -! The optional argument {\tt Tag} can be used to set the tag value used in -! the data transfer. DefaultTag will be used otherwise. {\tt Tag} must be -! the same in the matching {\tt send\_} or {\tt isend\_}. -! -! If data for a grid point is coming from more than one process, {\tt recv\_} -! will overwrite the duplicate values leaving the last received value -! in the output aV. If the optional argument {\tt Sum} is invoked, the output -! will contain the sum of any duplicate values received for the same grid point. -! -! Will return as soon as MPI\_IRECV's are posted. Call {\tt waitrecv\_} to -! complete the receive operation. -! -! {\bf N.B.:} The {\tt AttrVect} argument in the corresponding -! {\tt send\_} call is assumed to have exactly the same attributes -! in exactly the same order as {\tt aV}. -! -! !INTERFACE: - - subroutine irecv_(aV, Rout, Tag, Sum) -! -! !USES: -! - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - Type(AttrVect), intent(inout) :: aV - -! !INPUT PARAMETERS: -! - Type(Router), intent(inout) :: Rout - integer,optional, intent(in) :: Tag - logical,optional, intent(in) :: Sum - -! !REVISION HISTORY: -! 07Feb01 - R. Jacob - initial prototype -! 07Jun01 - R. Jacob - remove logic to -! check "direction" of Router. remove references -! to ThisMCTWorld%mylrank -! 03Aug01 - E.T. Ong - explicity specify starting -! address in MPI_RECV -! 27Nov01 - E.T. Ong - deallocated to prevent -! memory leaks -! 15Feb02 - R. Jacob - Use MCT_comm -! 26Mar02 - E. Ong - Apply faster copy order. -! 26Sep02 - R. Jacob - Check Av against Router lAvsize -! 08Nov02 - R. Jacob - MCT_Recv is now recv_ in m_Transfer -! 11Nov02 - R. Jacob - Add optional Sum argument to -! tell recv_ to sum data for the same point received from multiple -! processors. Replaces recvsum_ which had replaced MCT_Recvsum. -! Use DefaultTag and add optional Tag argument -! 25Jul03 - R. Jacob - break into irecv_ and waitrecv_ -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::irecv_' - integer :: numi,numr,i,j,k,ier - integer :: mycomp,othercomp - integer :: seg_start,seg_end - integer :: proc,numprocs,nseg,mytag - integer :: mp_Type_rp1 - logical :: DoSum - -!-------------------------------------------------------- - -! Return if no one to receive from - if(Rout%nprocs .eq. 0 ) RETURN - -!check Av size against Router -! - if(lsize(aV) /= Rout%lAvsize) then - write(stderr,'(2a)') myname_, & - ' MCTERROR: AV size not appropriate for this Router...exiting' - call die(myname_) - endif - - DoSum = .false. - if(present(Sum)) DoSum=Sum - - - mycomp=Rout%comp1id - othercomp=Rout%comp2id - -! find total number of real and integer vectors -! for now, assume we are receiving all of them - Rout%numiatt = nIAttr(aV) - Rout%numratt = nRAttr(aV) - numi = Rout%numiatt - numr = Rout%numratt - -!!!!!!!!!!!!!! IF RECEVING INTEGER DATA - if(numi .ge. 1) then - -! allocate buffers to hold all incoming data - do proc=1,Rout%nprocs - allocate(Rout%ip1(proc)%pi(Rout%locsize(proc)*numi),stat=ier) - if(ier/=0) call die(myname_,'allocate(Rout%ip1%pi)',ier) - enddo - - endif - -!!!!!!!!!!!!!! IF RECEIVING REAL DATA - if(numr .ge. 1) then - -! allocate buffers to hold all incoming data - do proc=1,Rout%nprocs - allocate(Rout%rp1(proc)%pr(Rout%locsize(proc)*numr),stat=ier) - if(ier/=0) call die(myname_,'allocate(Rout%rp1%pr)',ier) - enddo - - mp_Type_rp1=MP_Type(Rout%rp1(1)%pr(1)) - - endif - - ! Post all MPI_IRECV - do proc=1,Rout%nprocs - - ! receive the integer data - if(numi .ge. 1) then - - ! set tag - mytag = DefaultTag - if(present(Tag)) mytag=Tag - - if( Rout%num_segs(proc) > 1 .or. DoSum ) then - - call MPI_IRECV(Rout%ip1(proc)%pi(1), & - Rout%locsize(proc)*numi,MP_INTEGER,Rout%pe_list(proc), & - mytag,ThisMCTWorld%MCT_comm,Rout%ireqs(proc),ier) - - else - - call MPI_IRECV(aV%iAttr(1,Rout%seg_starts(proc,1)), & - Rout%locsize(proc)*numi,MP_INTEGER,Rout%pe_list(proc), & - mytag,ThisMCTWorld%MCT_comm,Rout%ireqs(proc),ier) - - endif - - if(ier /= 0) call MP_perr_die(myname_,'MPI_IRECV(ints)',ier) - - endif - - ! receive the real data - if(numr .ge. 1) then - - ! corresponding tag logic must be in send_ - mytag = DefaultTag + 1 - if(present(Tag)) mytag=Tag +1 - - if( Rout%num_segs(proc) > 1 .or. DoSum ) then - - call MPI_IRECV(Rout%rp1(proc)%pr(1), & - Rout%locsize(proc)*numr,mp_Type_rp1,Rout%pe_list(proc), & - mytag,ThisMCTWorld%MCT_comm,Rout%rreqs(proc),ier) - - else - - call MPI_IRECV(aV%rAttr(1,Rout%seg_starts(proc,1)), & - Rout%locsize(proc)*numr,mp_Type_rp1,Rout%pe_list(proc), & - mytag,ThisMCTWorld%MCT_comm,Rout%rreqs(proc),ier) - - endif - - if(ier /= 0) call MP_perr_die(myname_,'MPI_IRECV(reals)',ier) - - endif - - enddo - -end subroutine irecv_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: waitrecv_ - Wait for a distributed non-blocking recv to complete -! -! !DESCRIPTION: -! Wait for the data being received with the {\tt Router} {\tt Rout} to complete. -! When done, copy the data into the {\tt AttrVect} {\tt aV}. -! -! !INTERFACE: - - subroutine waitrecv_(aV, Rout, Sum) - -! -! !USES: -! - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - Type(AttrVect), intent(inout) :: aV - Type(Router), intent(inout) :: Rout - -! !INPUT PARAMETERS: -! - logical,optional, intent(in) :: Sum - - -! !REVISION HISTORY: -! 25Jul03 - R. Jacob - First working version is the wait -! and copy parts from old recv_. -! 25Jan08 - R. Jacob - Handle unordered GSMaps by -! applying permutation to received array. -!EOP ___________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::waitrecv_' - integer :: proc,ier,j,k,nseg - integer :: AttrIndex,VectIndex,seg_start,seg_end - logical :: DoSum - logical :: unordered - -! Return if nothing to wait for - if(Rout%nprocs .eq. 0 ) RETURN - -!check Av size against Router -! - if(lsize(aV) /= Rout%lAvsize) then - write(stderr,'(2a)') myname_, & - ' MCTERROR: AV size not appropriate for this Router...exiting' - call die(myname_) - endif - - unordered = associated(Rout%permarr) - - DoSum = .false. - if(present(Sum)) DoSum=Sum - - ! wait for all recieves to complete - if(Rout%numiatt .ge. 1) then - - call MPI_WAITALL(Rout%nprocs,Rout%ireqs,Rout%istatus,ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITALL(ints)',ier) - - endif - - if(Rout%numratt .ge. 1) then - - call MPI_WAITALL(Rout%nprocs,Rout%rreqs,Rout%rstatus,ier) - if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITALL(reals)',ier) - - endif - - ! Load data which came from each processor - do proc=1,Rout%nprocs - - if( (Rout%num_segs(proc) > 1) .or. DoSum ) then - - j=1 - k=1 - - if(DoSum) then - ! sum the correct pieces of the integer and real vectors - do nseg = 1,Rout%num_segs(proc) - seg_start = Rout%seg_starts(proc,nseg) - seg_end = seg_start + Rout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,Rout%numiatt - aV%iAttr(AttrIndex,VectIndex)= & - aV%iAttr(AttrIndex,VectIndex)+Rout%ip1(proc)%pi(j) - j=j+1 - enddo - do AttrIndex = 1,Rout%numratt - aV%rAttr(AttrIndex,VectIndex)= & - aV%rAttr(AttrIndex,VectIndex)+Rout%rp1(proc)%pr(k) - k=k+1 - enddo - enddo - enddo - else - ! load the correct pieces of the integer and real vectors - do nseg = 1,Rout%num_segs(proc) - seg_start = Rout%seg_starts(proc,nseg) - seg_end = seg_start + Rout%seg_lengths(proc,nseg)-1 - do VectIndex = seg_start,seg_end - do AttrIndex = 1,Rout%numiatt - aV%iAttr(AttrIndex,VectIndex)=Rout%ip1(proc)%pi(j) - j=j+1 - enddo - do AttrIndex = 1,Rout%numratt - aV%rAttr(AttrIndex,VectIndex)=Rout%rp1(proc)%pr(k) - k=k+1 - enddo - enddo - enddo - endif - - endif - - enddo - -!........................WAITANY METHOD................................ -! -!....NOTE: Make status argument a 1-dimensional array -! ! Load data which came from each processor -! do numprocs = 1,Rout%nprocs -! ! Load the integer data -! if(Rout%numiatt .ge. 1) then -! call MPI_WAITANY(Rout%nprocs,Rout%ireqs,proc,Rout%istatus,ier) -! if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITANY(ints)',ier) -! j=1 -! ! load the correct pieces of the integer vectors -! do nseg = 1,Rout%num_segs(proc) -! seg_start = Rout%seg_starts(proc,nseg) -! seg_end = seg_start + Rout%seg_lengths(proc,nseg)-1 -! do VectIndex = seg_start,seg_end -! do AttrIndex = 1,Rout%numiatt -! aV%iAttr(AttrIndex,VectIndex)=Rout%ip1(proc)%pi(j) -! j=j+1 -! enddo -! enddo -! enddo -! endif -! ! Load the real data -! if(numr .ge. 1) then -! call MPI_WAITANY(Rout%nprocs,Rout%rreqs,proc,Rout%rstatus,ier) -! if(ier /= 0) call MP_perr_die(myname_,'MPI_WAITANY(reals)',ier) -! k=1 -! ! load the correct pieces of the real vectors -! do nseg = 1,Rout%num_segs(proc) -! seg_start = Rout%seg_starts(proc,nseg) -! seg_end = seg_start + Rout%seg_lengths(proc,nseg)-1 -! do VectIndex = seg_start,seg_end -! do AttrIndex = 1,numr -! aV%rAttr(AttrIndex,VectIndex)=Rout%rp1(proc)%pr(k) -! k=k+1 -! enddo -! enddo -! enddo -! endif -! enddo -!........................................................................ - - ! Deallocate all structures - if(Rout%numiatt .ge. 1) then - - ! Deallocate the receive buffers - do proc=1,Rout%nprocs - deallocate(Rout%ip1(proc)%pi,stat=ier) - if(ier/=0) call die(myname_,'deallocate(Rout%ip1%pi)',ier) - enddo - - endif - - if(Rout%numratt .ge. 1) then - - ! Deallocate the receive buffers - do proc=1,Rout%nprocs - deallocate(Rout%rp1(proc)%pr,stat=ier) - if(ier/=0) call die(myname_,'deallocate(Rout%rp1%pr)',ier) - enddo - - endif - - if (unordered) call Unpermute(aV,Rout%permarr) - -end subroutine waitrecv_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: recv_ - Distributed receive of an Attribute Vector -! -! !DESCRIPTION: -! Recieve into the {\tt AttrVect} {\tt aV} the data coming from the -! component specified in the {\tt Router} {\tt Rout}. An error will -! result if the size of the attribute vector does not match the size -! parameter stored in the {\tt Router}. -! -! Requires a corresponding {\tt send\_} or {\tt isend\_}to be called -! on the other component. -! -! The optional argument {\tt Tag} can be used to set the tag value used in -! the data transfer. DefaultTag will be used otherwise. {\tt Tag} must be -! the same in the matching {\tt send\_} -! -! If data for a grid point is coming from more than one process, {\tt recv\_} -! will overwrite the duplicate values leaving the last received value -! in the output aV. If the optional argument {\tt Sum} is invoked, the output -! will contain the sum of any duplicate values received for the same grid point. -! -! Will not return until all data has been received. -! -! {\bf N.B.:} The {\tt AttrVect} argument in the corresponding -! {\tt send\_} call is assumed to have exactly the same attributes -! in exactly the same order as {\tt aV}. -! -! !INTERFACE: - - subroutine recv_(aV, Rout, Tag, Sum) -! -! !USES: -! - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - Type(AttrVect), intent(inout) :: aV - -! !INPUT PARAMETERS: -! - Type(Router), intent(inout) :: Rout - integer,optional, intent(in) :: Tag - logical,optional, intent(in) :: Sum - -! !REVISION HISTORY: -! 25Jul03 - R. Jacob - Rewrite using irecv and waitrecv -!EOP ___________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::recv_' - - call irecv_(aV,Rout,Tag,Sum) - - call waitrecv_(aV,Rout,Sum) - -end subroutine recv_ - - -end module m_Transfer diff --git a/src/externals/mct/mkinstalldirs b/src/externals/mct/mkinstalldirs deleted file mode 100755 index d2d5f21b611..00000000000 --- a/src/externals/mct/mkinstalldirs +++ /dev/null @@ -1,111 +0,0 @@ -#! /bin/sh -# mkinstalldirs --- make directory hierarchy -# Author: Noah Friedman -# Created: 1993-05-16 -# Public domain - -errstatus=0 -dirmode="" - -usage="\ -Usage: mkinstalldirs [-h] [--help] [-m mode] dir ..." - -# process command line arguments -while test $# -gt 0 ; do - case $1 in - -h | --help | --h*) # -h for help - echo "$usage" 1>&2 - exit 0 - ;; - -m) # -m PERM arg - shift - test $# -eq 0 && { echo "$usage" 1>&2; exit 1; } - dirmode=$1 - shift - ;; - --) # stop option processing - shift - break - ;; - -*) # unknown option - echo "$usage" 1>&2 - exit 1 - ;; - *) # first non-opt arg - break - ;; - esac -done - -for file -do - if test -d "$file"; then - shift - else - break - fi -done - -case $# in - 0) exit 0 ;; -esac - -case $dirmode in - '') - if mkdir -p -- . 2>/dev/null; then - echo "mkdir -p -- $*" - exec mkdir -p -- "$@" - fi - ;; - *) - if mkdir -m "$dirmode" -p -- . 2>/dev/null; then - echo "mkdir -m $dirmode -p -- $*" - exec mkdir -m "$dirmode" -p -- "$@" - fi - ;; -esac - -for file -do - set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'` - shift - - pathcomp= - for d - do - pathcomp="$pathcomp$d" - case $pathcomp in - -*) pathcomp=./$pathcomp ;; - esac - - if test ! -d "$pathcomp"; then - echo "mkdir $pathcomp" - - mkdir "$pathcomp" || lasterr=$? - - if test ! -d "$pathcomp"; then - errstatus=$lasterr - else - if test ! -z "$dirmode"; then - echo "chmod $dirmode $pathcomp" - lasterr="" - chmod "$dirmode" "$pathcomp" || lasterr=$? - - if test ! -z "$lasterr"; then - errstatus=$lasterr - fi - fi - fi - fi - - pathcomp="$pathcomp/" - done -done - -exit $errstatus - -# Local Variables: -# mode: shell-script -# sh-indentation: 2 -# End: -# mkinstalldirs ends here diff --git a/src/externals/mct/mpeu/Makefile b/src/externals/mct/mpeu/Makefile deleted file mode 100644 index dfadaec624e..00000000000 --- a/src/externals/mct/mpeu/Makefile +++ /dev/null @@ -1,126 +0,0 @@ -.NOTPARALLEL: -# MACHINE AND COMPILER FLAGS - -include ../Makefile.conf - -VPATH = $(SRCDIR)/mpeu -SHELL = /bin/sh - -INCPATH += $(INCFLAG). $(INCFLAG)../ - -# SOURCE FILES - -MODULE = mpeu - -SRCS_F90 = m_IndexBin_char.F90 \ - m_IndexBin_integer.F90 \ - m_IndexBin_logical.F90 \ - m_List.F90 \ - m_MergeSorts.F90 \ - m_Filename.F90 \ - m_FcComms.F90 \ - m_Permuter.F90 \ - m_SortingTools.F90 \ - m_String.F90 \ - m_StrTemplate.F90 \ - m_chars.F90 \ - m_die.F90 \ - m_dropdead.F90 \ - m_FileResolv.F90 \ - m_flow.F90 \ - m_inpak90.F90 \ - m_ioutil.F90 \ - m_mall.F90 \ - m_mpif.F90 \ - m_mpif90.F90 \ - m_mpout.F90 \ - m_rankMerge.F90 \ - m_realkinds.F90 \ - m_stdio.F90 \ - m_TraceBack.F90 \ - m_zeit.F90 - -SRCS_C = get_zeits.c - -OBJS_ALL = $(SRCS_C:.c=.o) \ - $(SRCS_F90:.F90=.o) - - -# TARGETS - -all: lib$(MODULE).a - -lib$(MODULE).a: $(OBJS_ALL) - $(RM) $@ - $(AR) $@ $(OBJS_ALL) - $(RANLIB) $@ - -# ADDITIONAL FLAGS SPECIFIC FOR MPEU COMPILATION - -MPEUFLAGS = - -# RULES - -.SUFFIXES: -.SUFFIXES: .F90 .c .o - -.c.o: - $(CC) -c $(CPPDEFS) $(CFLAGS) $(INCPATH) $< - -.F90.o: - $(FC) -c $(INCPATH) $(FPPDEFS) $(FCFLAGS) $(MPEUFLAGS) $< - -clean: - ${RM} *.o *.mod lib$(MODULE).a - -install: all - $(MKINSTALLDIRS) $(libdir) $(includedir) - $(INSTALL) lib$(MODULE).a -m 644 $(libdir) - @for modfile in *.mod; do \ - echo $(INSTALL) $$modfile -m 644 $(includedir); \ - $(INSTALL) $$modfile -m 644 $(includedir); \ - done - -# DEPENDENCIES - -m_IndexBin_char.o: m_die.o m_stdio.o -m_IndexBin_integer.o: m_die.o m_stdio.o -m_IndexBin_logical.o: m_die.o m_stdio.o -m_List.o: m_String.o m_die.o m_mall.o -m_MergeSorts.o: m_die.o m_realkinds.o m_stdio.o -m_Filename.o: -m_Permuter.o: m_die.o m_realkinds.o -m_SortingTools.o: m_IndexBin_char.o m_IndexBin_integer.o m_IndexBin_logical.o m_MergeSorts.o m_Permuter.o m_rankMerge.o -m_String.o: m_die.o m_mall.o m_mpif90.o -m_StrTemplate.o: m_chars.o m_die.o m_stdio.o -m_chars.o: -m_die.o: m_dropdead.o m_flow.o m_mpif90.o m_mpout.o m_stdio.o -m_dropdead.o: m_mpif90.o m_stdio.o -m_flow.o: m_chars.o -m_inpak90.o: m_die.o m_ioutil.o m_mall.o m_mpif90.o m_realkinds.o m_stdio.o -m_ioutil.o: m_stdio.o -m_mall.o: m_chars.o m_die.o m_ioutil.o m_realkinds.o m_stdio.o -m_mpif.o: -m_mpif90.o: m_mpif.o m_realkinds.o m_stdio.o -m_mpout.o: m_dropdead.o m_ioutil.o m_mpif90.o m_stdio.o -m_rankMerge.o: -m_realkinds.o: -m_stdio.o: -m_zeit.o: m_SortingTools.o m_die.o m_ioutil.o m_mpif90.o m_stdio.o get_zeits.o -get_zeits.o: -m_FileResolv.o: m_die.o m_StrTemplate.o -m_TraceBack.o: m_die.o m_stdio.o m_String.o - - - - - - - - - - - - - - diff --git a/src/externals/mct/mpeu/README b/src/externals/mct/mpeu/README deleted file mode 100644 index 06d3cc4d93e..00000000000 --- a/src/externals/mct/mpeu/README +++ /dev/null @@ -1,59 +0,0 @@ -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- - -This directory contains a version of MPEU distributed as part -of the Model Coupling Toolkit (MCT). MPEU was written by -Jing Guo of the NASA Data Assimilation Office. - -This copy of MPEU provided by Jing Guo. Usage is covered -by terms in the file MCT/COPYRIGHT. - -MCT distribution contents: -MCT/ -MCT/COPYRIGHT -MCT/doc/ -MCT/examples/ -MCT/mct/ -MCT/mpeu/ <- You are here -MCT/protex/ - -A complete distribution of MCT can be obtained from http://www.mcs.anl.gov/mct. - ---------------------------------------------------- -Build instructions: - -In top level directory, type "./configure", then "make". - -If "./configure" has already been run, you can also type "make" -in this directory. - ---------------------------------------------------- -NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS - -28Sep99 - Jing Guo - - Changed supported libraries to - - mpeu: libmpeu.a libeu.a with the _same_ interface in mpeu/ - - - Implemented several design changes: - - . Removed -r8/_R8_ compiler flags in Makefile.conf.IRIX64. - The current design is expected to support both single and - double precision REAL kinds. The selection should be made - by the compiler through Fortran 90 generic interface - feature. - - . Added MP_type() function in mpif90.F90 to allow a more - portable approach of using MPI_REAL. - - . Removed _SINGLE_PE_ flag to make the interface in mpeu/ - portable to both library versions. - - -14Sep99 - Jing Guo - Targets supported in this directory - - mpeu: make -f Makefile all for MPI env - eu: make -f Makefile.1pe all for single PE env - diff --git a/src/externals/mct/mpeu/assertmpeu.H b/src/externals/mct/mpeu/assertmpeu.H deleted file mode 100644 index ef83c6e464e..00000000000 --- a/src/externals/mct/mpeu/assertmpeu.H +++ /dev/null @@ -1,55 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: assertmpeu.H - an #include section of ASSERT() macro for Fortran -! -! !DESCRIPTION: -! -! !INTERFACE: -! -! #define NDEBUG -! #include "assertmpeu.H" -! ... -! use m_die,only : assert_ -! ... -! ASSERT( ) -! ALWAYS_ASSERT( ) -! -! !BUGS -! This macro requires Fortran friendly cpp() for macro processing. -! -! !REVISION HISTORY: -! 17Aug07 - R. Jacob - renamed from assert.H to -! prevent namespace collision with assert.h on Mac -! 28Aug00 - Jing Guo -! - modified -! - added the prolog for a brief documentation -! before - Tom Clune -! - Created for MP PSAS -!EOP ___________________________________________________________________ - - ! This implementation allows multi-"#include" in a single file - -#ifndef ALWAYS_ASSERT - -#define ALWAYS_ASSERT(EX) If (.not. (EX) ) call assert_("EX",__FILE__,__LINE__) -#endif - - -#ifndef ASSERT - -#ifdef NDEBUG - -#define ASSERT(EX) ! Skip assertion: EX - -#else - -#define ASSERT(EX) ALWAYS_ASSERT(EX) - -#endif - -#endif diff --git a/src/externals/mct/mpeu/get_zeits.c b/src/externals/mct/mpeu/get_zeits.c deleted file mode 100644 index b8065c5ebad..00000000000 --- a/src/externals/mct/mpeu/get_zeits.c +++ /dev/null @@ -1,76 +0,0 @@ -/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !ROUTINE: get_zeits - a C interface to times for Fortran calls -! -! !DESCRIPTION: -! -! !INTERFACE: */ - /* - System times() dependencies: - */ - - -#include -#ifndef NOTIMES -#include -#endif - -#include /* POSIX standard says CLOCKS_PER_SEC is here */ -#include "config.h" -/* - * CLK_TCK is obsolete - replace with CLOCKS_PER_SEC - */ - -#define ZCLK_TCK ((double)CLOCKS_PER_SEC) - - - - - /* Prototype: */ - - void FC_FUNC(get_zeits,GET_ZEITS)(double *zts); - void FC_FUNC(get_ztick,GET_ZTICK)(double *tic); - -/*!REVISION HISTORY: -! 12Mar98 - Jing Guo - initial prototype/prolog/code -! 06Jul99 - J.W. Larson - support for AIX platform -!EOP */ - -/* Implementations: */ - -void FC_FUNC(get_zeits,GET_ZEITS)(zts) - double *zts; -{ - -#ifndef NOTIMES - struct tms tm; - double secs; - secs=1./ZCLK_TCK; - - zts[0]=times(&tm)*secs; - zts[1]=tm.tms_utime*secs; - zts[2]=tm.tms_stime*secs; - zts[3]=tm.tms_cutime*secs; - zts[4]=tm.tms_cstime*secs; -#else - zts[0]=0.; - zts[1]=0.; - zts[2]=0.; - zts[3]=0.; - zts[4]=0.; -#endif - -} - -void FC_FUNC(get_ztick,GET_ZTICK)(tic) - double *tic; -{ - tic[0]=1./ZCLK_TCK; -} - diff --git a/src/externals/mct/mpeu/m_FcComms.F90 b/src/externals/mct/mpeu/m_FcComms.F90 deleted file mode 100644 index 0bd675c2b75..00000000000 --- a/src/externals/mct/mpeu/m_FcComms.F90 +++ /dev/null @@ -1,688 +0,0 @@ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_FcComms - MPI collective communication operators -! with explict flow control -! -! !DESCRIPTION: -! -! This module includes implementations of MPI collective operators that -! have proven problematic on certain systems when run at scale. By -! introducing additonal flow control, these problems (exhausting internal -! system resources) can be avoided. These routines were ported from -! the Community Atmosphere Model's spmd_utils.F90. -! -! !INTERFACE: -! -! Disable the use of the MPI ready send protocol by default, to -! address recurrent issues with poor performance or incorrect -! functionality in MPI libraries. When support is known to be robust, -! or for experimentation, can be re-enabled by defining the CPP token -! _USE_MPI_RSEND during the build process. -! -#ifndef _USE_MPI_RSEND -#define MPI_RSEND MPI_SEND -#define mpi_rsend mpi_send -#define MPI_IRSEND MPI_ISEND -#define mpi_irsend mpi_isend -#endif - - module m_FcComms - - implicit none - - private ! except - - public :: fc_gather_int ! flow control version of mpi_gather for integer vectors - public :: fc_gather_fp ! flow control version of mpi_gather for FP vectors - public :: fc_gatherv_int ! flow control version of mpi_gatherv for integer vectors - public :: fc_gatherv_fp ! flow control version of mpi_gatherv for integer vectors - public :: get_fcblocksize ! get current value of max_gather_block_size - public :: set_fcblocksize ! set current value of max_gather_block_size - - -! !REVISION HISTORY: -! 30Jan09 - P.H. Worley - imported routines -! from CAM's spmd_utils to create this module. - - integer, public :: max_gather_block_size = 64 - character(len=*),parameter :: myname='MCT(MPEU)::m_FcComms' - - contains - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: fc_gather_int - Gather an array of type integer -! -! !DESCRIPTION: -! This routine gathers a {\em distributed} array of type {\em integer} -! to the {\tt root} process. Explicit handshaking messages are used -! to control the number of processes communicating with the root -! at any one time. -! -! If flow_cntl optional parameter -! < 0 : use MPI_Gather -! >= 0: use point-to-point with handshaking messages and -! preposting receive requests up to -! min(max(1,flow_cntl),max_gather_block_size) -! ahead if optional flow_cntl parameter is present. -! Otherwise, max_gather_block_size is used in its place. -! Default value is max_gather_block_size. -! !INTERFACE: -! - subroutine fc_gather_int (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnt, recvtype, & - root, comm, flow_cntl ) -! -! !USES: -! - use m_die - use m_mpif90 -! -! !INPUT PARAMETERS: -! - integer, intent(in) :: sendbuf(*) - integer, intent(in) :: sendcnt - integer, intent(in) :: sendtype - integer, intent(in) :: recvcnt - integer, intent(in) :: recvtype - integer, intent(in) :: root - integer, intent(in) :: comm - integer, optional, intent(in) :: flow_cntl - -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: recvbuf(*) - -! !REVISION HISTORY: -! 30Jan09 - P.H. Worley - imported from spmd_utils.F90 -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::fc_gather_int' - - integer :: signal - logical fc_gather ! use explicit flow control? - integer gather_block_size ! number of preposted receive requests - - integer :: mytid, mysize, mtag, p, i, count, displs - integer :: preposts, head, tail - integer :: rcvid(max_gather_block_size) - integer :: status(MP_STATUS_SIZE) - integer :: ier ! MPI error code - - signal = 1 - if ( present(flow_cntl) ) then - if (flow_cntl >= 0) then - gather_block_size = min(max(1,flow_cntl),max_gather_block_size) - fc_gather = .true. - else - fc_gather = .false. - endif - else - gather_block_size = max(1,max_gather_block_size) - fc_gather = .true. - endif - - if (fc_gather) then - - call mpi_comm_rank (comm, mytid, ier) - call mpi_comm_size (comm, mysize, ier) - mtag = 0 - if (root .eq. mytid) then - -! prepost gather_block_size irecvs, and start receiving data - preposts = min(mysize-1, gather_block_size) - head = 0 - count = 0 - do p=0, mysize-1 - if (p .ne. root) then - if (recvcnt > 0) then - count = count + 1 - if (count > preposts) then - tail = mod(head,preposts) + 1 - call mpi_wait (rcvid(tail), status, ier) - end if - head = mod(head,preposts) + 1 - displs = p*recvcnt - call mpi_irecv ( recvbuf(displs+1), recvcnt, & - recvtype, p, mtag, comm, rcvid(head), & - ier ) - call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier ) - end if - end if - end do - -! copy local data - displs = mytid*recvcnt - do i=1,sendcnt - recvbuf(displs+i) = sendbuf(i) - enddo - -! wait for final data - do i=1,min(count,preposts) - call mpi_wait (rcvid(i), status, ier) - enddo - - else - - if (sendcnt > 0) then - call mpi_recv ( signal, 1, sendtype, root, mtag, comm, & - status, ier ) - call mpi_rsend ( sendbuf, sendcnt, sendtype, root, mtag, & - comm, ier ) - end if - - endif - if (ier /= 0) then - call MP_perr_die(myname_,':: (point-to-point implementation)',ier) - end if - - else - - call mpi_gather (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnt, recvtype, & - root, comm, ier) - if (ier /= 0) then - call MP_perr_die(myname_,':: MPI_GATHER',ier) - end if - - endif - - return - end subroutine fc_gather_int - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: fc_gather_fp - Gather an array of type FP -! -! !DESCRIPTION: -! This routine gathers a {\em distributed} array of type {\em FP} to -! the {\tt root} process. Explicit handshaking messages are used -! to control the number of processes communicating with the root -! at any one time. -! -! If flow_cntl optional parameter -! < 0 : use MPI_Gather -! >= 0: use point-to-point with handshaking messages and -! preposting receive requests up to -! min(max(1,flow_cntl),max_gather_block_size) -! ahead if optional flow_cntl parameter is present. -! Otherwise, max_gather_block_size is used in its place. -! Default value is max_gather_block_size. -! !INTERFACE: -! - subroutine fc_gather_fp (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnt, recvtype, & - root, comm, flow_cntl ) -! -! !USES: -! - use m_realkinds, only : FP - use m_die - use m_mpif90 -! -! !INPUT PARAMETERS: -! - real (FP), intent(in) :: sendbuf(*) - integer, intent(in) :: sendcnt - integer, intent(in) :: sendtype - integer, intent(in) :: recvcnt - integer, intent(in) :: recvtype - integer, intent(in) :: root - integer, intent(in) :: comm - integer, optional, intent(in) :: flow_cntl - -! !OUTPUT PARAMETERS: -! - real (FP), intent(out) :: recvbuf(*) - -! !REVISION HISTORY: -! 30Jan09 - P.H. Worley - imported from spmd_utils.F90 -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::fc_gather_fp' - - real (FP) :: signal - logical fc_gather ! use explicit flow control? - integer gather_block_size ! number of preposted receive requests - - integer :: mytid, mysize, mtag, p, i, count, displs - integer :: preposts, head, tail - integer :: rcvid(max_gather_block_size) - integer :: status(MP_STATUS_SIZE) - integer :: ier ! MPI error code - - signal = 1.0 - if ( present(flow_cntl) ) then - if (flow_cntl >= 0) then - gather_block_size = min(max(1,flow_cntl),max_gather_block_size) - fc_gather = .true. - else - fc_gather = .false. - endif - else - gather_block_size = max(1,max_gather_block_size) - fc_gather = .true. - endif - - if (fc_gather) then - - call mpi_comm_rank (comm, mytid, ier) - call mpi_comm_size (comm, mysize, ier) - mtag = 0 - if (root .eq. mytid) then - -! prepost gather_block_size irecvs, and start receiving data - preposts = min(mysize-1, gather_block_size) - head = 0 - count = 0 - do p=0, mysize-1 - if (p .ne. root) then - if (recvcnt > 0) then - count = count + 1 - if (count > preposts) then - tail = mod(head,preposts) + 1 - call mpi_wait (rcvid(tail), status, ier) - end if - head = mod(head,preposts) + 1 - displs = p*recvcnt - call mpi_irecv ( recvbuf(displs+1), recvcnt, & - recvtype, p, mtag, comm, rcvid(head), & - ier ) - call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier ) - end if - end if - end do - -! copy local data - displs = mytid*recvcnt - do i=1,sendcnt - recvbuf(displs+i) = sendbuf(i) - enddo - -! wait for final data - do i=1,min(count,preposts) - call mpi_wait (rcvid(i), status, ier) - enddo - - else - - if (sendcnt > 0) then - call mpi_recv ( signal, 1, sendtype, root, mtag, comm, & - status, ier ) - call mpi_rsend ( sendbuf, sendcnt, sendtype, root, mtag, & - comm, ier ) - end if - - endif - if (ier /= 0) then - call MP_perr_die(myname_,':: (point-to-point implementation)',ier) - end if - - else - - call mpi_gather (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnt, recvtype, & - root, comm, ier) - if (ier /= 0) then - call MP_perr_die(myname_,':: MPI_GATHER',ier) - end if - - endif - - return - end subroutine fc_gather_fp - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: fc_gatherv_int - Gather an array of type integer -! -! !DESCRIPTION: -! This routine gathers a {\em distributed} array of type {\em integer} -! to the {\tt root} process. Explicit handshaking messages are used -! to control the number of processes communicating with the root -! at any one time. -! -! If flow_cntl optional parameter -! < 0 : use MPI_Gatherv -! >= 0: use point-to-point with handshaking messages and -! preposting receive requests up to -! min(max(1,flow_cntl),max_gather_block_size) -! ahead if optional flow_cntl parameter is present. -! Otherwise, max_gather_block_size is used in its place. -! Default value is max_gather_block_size. -! !INTERFACE: -! - subroutine fc_gatherv_int (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnts, displs, recvtype, & - root, comm, flow_cntl ) -! -! !USES: -! - use m_die - use m_mpif90 -! -! !INPUT PARAMETERS: -! - integer, intent(in) :: sendbuf(*) - integer, intent(in) :: sendcnt - integer, intent(in) :: sendtype - integer, intent(in) :: recvcnts(*) - integer, intent(in) :: displs(*) - integer, intent(in) :: recvtype - integer, intent(in) :: root - integer, intent(in) :: comm - integer, optional, intent(in) :: flow_cntl - -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: recvbuf(*) - -! !REVISION HISTORY: -! 30Jan09 - P.H. Worley - imported from spmd_utils.F90 -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::fc_gatherv_int' - - integer :: signal - logical fc_gather ! use explicit flow control? - integer gather_block_size ! number of preposted receive requests - - integer :: mytid, mysize, mtag, p, q, i, count - integer :: preposts, head, tail - integer :: rcvid(max_gather_block_size) - integer :: status(MP_STATUS_SIZE) - integer :: ier ! MPI error code - - signal = 1 - if ( present(flow_cntl) ) then - if (flow_cntl >= 0) then - gather_block_size = min(max(1,flow_cntl),max_gather_block_size) - fc_gather = .true. - else - fc_gather = .false. - endif - else - gather_block_size = max(1,max_gather_block_size) - fc_gather = .true. - endif - - if (fc_gather) then - - call mpi_comm_rank (comm, mytid, ier) - call mpi_comm_size (comm, mysize, ier) - mtag = 0 - if (root .eq. mytid) then - -! prepost gather_block_size irecvs, and start receiving data - preposts = min(mysize-1, gather_block_size) - head = 0 - count = 0 - do p=0, mysize-1 - if (p .ne. root) then - q = p+1 - if (recvcnts(q) > 0) then - count = count + 1 - if (count > preposts) then - tail = mod(head,preposts) + 1 - call mpi_wait (rcvid(tail), status, ier) - end if - head = mod(head,preposts) + 1 - call mpi_irecv ( recvbuf(displs(q)+1), recvcnts(q), & - recvtype, p, mtag, comm, rcvid(head), & - ier ) - call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier ) - end if - end if - end do - -! copy local data - q = mytid+1 - do i=1,sendcnt - recvbuf(displs(q)+i) = sendbuf(i) - enddo - -! wait for final data - do i=1,min(count,preposts) - call mpi_wait (rcvid(i), status, ier) - enddo - - else - - if (sendcnt > 0) then - call mpi_recv ( signal, 1, sendtype, root, mtag, comm, & - status, ier ) - call mpi_rsend ( sendbuf, sendcnt, sendtype, root, mtag, & - comm, ier ) - end if - - endif - if (ier /= 0) then - call MP_perr_die(myname_,':: (point-to-point implementation)',ier) - end if - - else - - call mpi_gatherv (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnts, displs, recvtype, & - root, comm, ier) - if (ier /= 0) then - call MP_perr_die(myname_,':: MPI_GATHERV',ier) - end if - - endif - - return - end subroutine fc_gatherv_int - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: fc_gatherv_fp - Gather an array of type FP -! -! !DESCRIPTION: -! This routine gathers a {\em distributed} array of type {\em FP} to -! the {\tt root} process. Explicit handshaking messages are used -! to control the number of processes communicating with the root -! at any one time. -! -! If flow_cntl optional parameter -! < 0 : use MPI_Gatherv -! >= 0: use point-to-point with handshaking messages and -! preposting receive requests up to -! min(max(1,flow_cntl),max_gather_block_size) -! ahead if optional flow_cntl parameter is present. -! Otherwise, max_gather_block_size is used in its place. -! Default value is max_gather_block_size. -! !INTERFACE: -! - subroutine fc_gatherv_fp (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnts, displs, recvtype, & - root, comm, flow_cntl ) -! -! !USES: -! - use m_realkinds, only : FP - use m_die - use m_mpif90 -! -! !INPUT PARAMETERS: -! - real (FP), intent(in) :: sendbuf(*) - integer, intent(in) :: sendcnt - integer, intent(in) :: sendtype - integer, intent(in) :: recvcnts(*) - integer, intent(in) :: displs(*) - integer, intent(in) :: recvtype - integer, intent(in) :: root - integer, intent(in) :: comm - integer, optional, intent(in) :: flow_cntl - -! !OUTPUT PARAMETERS: -! - real (FP), intent(out) :: recvbuf(*) - -! !REVISION HISTORY: -! 30Jan09 - P.H. Worley - imported from spmd_utils.F90 -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::fc_gatherv_fp' - - real (FP) :: signal - logical fc_gather ! use explicit flow control? - integer gather_block_size ! number of preposted receive requests - - integer :: mytid, mysize, mtag, p, q, i, count - integer :: preposts, head, tail - integer :: rcvid(max_gather_block_size) - integer :: status(MP_STATUS_SIZE) - integer :: ier ! MPI error code - - signal = 1.0 - if ( present(flow_cntl) ) then - if (flow_cntl >= 0) then - gather_block_size = min(max(1,flow_cntl),max_gather_block_size) - fc_gather = .true. - else - fc_gather = .false. - endif - else - gather_block_size = max(1,max_gather_block_size) - fc_gather = .true. - endif - - if (fc_gather) then - - call mpi_comm_rank (comm, mytid, ier) - call mpi_comm_size (comm, mysize, ier) - mtag = 0 - if (root .eq. mytid) then - -! prepost gather_block_size irecvs, and start receiving data - preposts = min(mysize-1, gather_block_size) - head = 0 - count = 0 - do p=0, mysize-1 - if (p .ne. root) then - q = p+1 - if (recvcnts(q) > 0) then - count = count + 1 - if (count > preposts) then - tail = mod(head,preposts) + 1 - call mpi_wait (rcvid(tail), status, ier) - end if - head = mod(head,preposts) + 1 - call mpi_irecv ( recvbuf(displs(q)+1), recvcnts(q), & - recvtype, p, mtag, comm, rcvid(head), & - ier ) - call mpi_send ( signal, 1, recvtype, p, mtag, comm, ier ) - end if - end if - end do - -! copy local data - q = mytid+1 - do i=1,sendcnt - recvbuf(displs(q)+i) = sendbuf(i) - enddo - -! wait for final data - do i=1,min(count,preposts) - call mpi_wait (rcvid(i), status, ier) - enddo - - else - - if (sendcnt > 0) then - call mpi_recv ( signal, 1, sendtype, root, mtag, comm, & - status, ier ) - call mpi_rsend ( sendbuf, sendcnt, sendtype, root, mtag, & - comm, ier ) - end if - - endif - if (ier /= 0) then - call MP_perr_die(myname_,':: (point-to-point implementation)',ier) - end if - - else - - call mpi_gatherv (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnts, displs, recvtype, & - root, comm, ier) - if (ier /= 0) then - call MP_perr_die(myname_,':: MPI_GATHERV',ier) - end if - - endif - - return - end subroutine fc_gatherv_fp - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: get_fcblocksize - return max_gather_block_size -! -! !DESCRIPTION: -! This function returns the current value of max_gather_block_size -! -! !INTERFACE: - - function get_fcblocksize() - -! !USES: -! -! No external modules are used by this function. - - implicit none - -! !INPUT PARAMETERS: -! - -! !OUTPUT PARAMETERS: -! - integer :: get_fcblocksize - -! !REVISION HISTORY: -! 03Mar09 - R. Jacob (jacob@mcs.anl.gov) -- intial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::get_fcblocksize' - - get_fcblocksize = max_gather_block_size - - end function get_fcblocksize - -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: set_fcblocksize - set max_gather_block_size -! -! !DESCRIPTION: -! This function sets the current value of max_gather_block_size -! -! !INTERFACE: - - subroutine set_fcblocksize(gather_block_size) - -! !USES: -! -! No external modules are used by this function. - - implicit none - -! !INPUT PARAMETERS: -! - integer :: gather_block_size - -! !OUTPUT PARAMETERS: -! - -! !REVISION HISTORY: -! 03Mar09 - R. Jacob (jacob@mcs.anl.gov) -- intial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//':: set_fcblocksize' - - max_gather_block_size = gather_block_size - - end subroutine set_fcblocksize - - end module m_FcComms diff --git a/src/externals/mct/mpeu/m_FileResolv.F90 b/src/externals/mct/mpeu/m_FileResolv.F90 deleted file mode 100644 index 8145aeb43a3..00000000000 --- a/src/externals/mct/mpeu/m_FileResolv.F90 +++ /dev/null @@ -1,273 +0,0 @@ -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_FileResolv --- Resolve file name templates -! -! !INTERFACE: -! - - MODULE m_FileResolv - -! !USES: - - use m_StrTemplate ! grads style templates - use m_die - Implicit NONE - -! -! !PUBLIC MEMBER FUNCTIONS: -! - PRIVATE - PUBLIC FileResolv - PUBLIC remote_cp - PUBLIC gunzip -! -! !DESCRIPTION: This module provides routines for resolving GrADS like -! file name templates. -! -! !REVISION HISTORY: -! -! 10Jan2000 da Silva Initial code. -! -!EOP -!------------------------------------------------------------------------- - - character(len=255) :: remote_cp = 'rcp' - character(len=255) :: gunzip = 'gunzip' - -CONTAINS - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !IROUTINE: FileResolv -- Resolve file name templates (single file) -! -! !INTERFACE: -! - subroutine FileResolv ( expid, nymd, nhms, templ, fname, & - stat, cache ) - -! !USES: - - IMPLICIT NONE - -! -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: expid ! Experiment id - integer, intent(in) :: nymd ! Year-month-day - integer, intent(in) :: nhms ! Hour-min-sec - character(len=*), intent(in) :: templ ! file name template - -! -! !OUTPUT PARAMETERS: -! - character(len=*), intent(out) :: fname ! resolved file name - - integer, OPTIONAL, intent(out) :: stat ! Status - ! 0 - file exists - ! 1 - file does not exist - - logical, OPTIONAL, intent(in) :: cache ! skips rcp/gunzip if - ! file exists locally - -! !DESCRIPTION: Resolve file name templates, rcp'ing files from remote and -! performing gunzip'ing as necessary. -! -! !TO DO: -! 1. Expand environment variables in templates -! -! !REVISION HISTORY: -! -! 10Jan2000 da Silva Initial code, -! 23Jul2002 J. Larson - fixed bug detected by the -! Fujitsu frt compiler (on the VPP). -! -!EOP -!-------------------------------------------------------------------------- - - character(len=*), parameter :: myname = 'MCT(MPEU)::FileResolv' - -#if SYSUNICOS || CPRCRAY - integer, external :: ishell -#elif (!defined __GFORTRAN__) - integer, external :: system -#endif - character(len=255) :: path, host, dirn, basen, head, tail, cmd, filen - - integer i, rc - logical :: fexists, caching - - -! Default is cache = .true. -! ------------------------- - if ( present(cache) ) then - caching = cache - else - caching = .TRUE. - end if - -! Start by expanding template -! --------------------------- - call strTemplate ( path, templ, 'GRADS', trim(expid), nymd, nhms, rc ) - if ( rc .ne. 0 ) then - if ( present(stat) ) then - stat = 1 - return - else - call die ( myname, 'cannot expand template '//trim(templ) ) - end if - end if - - -! Parse file name -! --------------- - i = index ( trim(path), ':' ) - if ( i .gt. 0 ) then - host = path(1:i-1) - fname = path(i+1:) - else - host = '' - fname = path - end if - i = index ( trim(fname), '/', back=.true. ) - if ( i .gt. 1 ) then - dirn = fname(1:i-1) - basen = fname(i+1:) - else if ( i .gt. 0 ) then - dirn = fname(1:i) - basen = fname(i+1:) - else - dirn = '' - basen = fname - end if - i = index ( basen, '.', back=.true. ) - if ( i .gt. 0 ) then - head = basen(1:i-1) - tail = basen(i+1:) - else - head = basen - tail = '' - end if - -! print *, 'Template = |'//trim(templ)//'|' -! print *, ' path = |'//trim(path)//'|' -! print *, ' host = |'//trim(host)//'|' -! print *, ' dirn = |'//trim(dirn)//'|' -! print *, ' basen = |'//trim(basen)//'|' -! print *, ' head = |'//trim(head)//'|' -! print *, ' tail = |'//trim(tail)//'|' -! print *, ' fname = |'//trim(fname)//'|' - - -! If file is remote, bring it here -! -------------------------------- - if ( len_trim(host) .gt. 0 ) then - if ( trim(tail) .eq. 'gz' ) then - inquire ( file=trim(head), exist=fexists ) - filen = head - else - inquire ( file=trim(basen), exist=fexists ) - filen = basen - end if - if ( .not. ( fexists .and. caching ) ) then - cmd = trim(remote_cp) // ' ' // & - trim(host) // ':' // trim(fname) // ' . ' -#if SYSUNICOS || CPRCRAY - rc = ishell ( cmd ) -#else - rc = system ( cmd ) -#endif - - if ( rc .eq. 0 ) then - fname = basen - else - if ( present(stat) ) then ! return an error code - stat = 2 - return - else ! shut down - fname = basen - call die ( myname, 'cannot execute: '//trim(cmd) ) - end if - end if - else - fname = filen - call warn(myname,'using cached version of '//trim(filen) ) - end if - - -! If not, make sure file exists locally -! ------------------------------------- - else - - inquire ( file=trim(fname), exist=fexists ) - if ( .not. fexists ) then - if ( present(stat) ) then - stat = 3 - else - call die(myname,'cannot find '//trim(fname) ) - end if - end if - - end if - - -! If file is gzip'ed, leave original alone and create uncompressed -! version in the local directory -! ---------------------------------------------------------------- - if ( trim(tail) .eq. 'gz' ) then - inquire ( file=trim(head), exist=fexists ) ! do we have a local copy? - if ( .not. ( fexists .and. caching ) ) then - if ( len_trim(host) .gt. 0 ) then ! remove file.gz - cmd = trim(gunzip) // ' -f ' // trim(fname) - else ! keep file.gz - cmd = trim(gunzip) // ' -c ' // trim(fname) // ' > ' // trim(head) - end if -#if SYSUNICOS || CPRCRAY - rc = ishell ( cmd ) -#else - rc = system ( cmd ) -#endif - if ( rc .eq. 0 ) then - fname = head - else - if ( present(stat) ) then - stat = 4 - return - else - call die ( myname, 'cannot execute: '//trim(cmd) ) - end if - end if - else - fname = head - call warn(myname,'using cached version of '//trim(head) ) - end if - end if - - -! Once more, make sure file exists -! -------------------------------- - inquire ( file=trim(fname), exist=fexists ) - if ( .not. fexists ) then - if ( present(stat) ) then - stat = 3 - else - call die(myname,'cannot find '//trim(fname) ) - end if - end if - - -! All done -! -------- - if ( present(stat) ) stat = 0 - - end subroutine FileResolv - - end MODULE m_FileResolv diff --git a/src/externals/mct/mpeu/m_Filename.F90 b/src/externals/mct/mpeu/m_Filename.F90 deleted file mode 100644 index 1032a512c29..00000000000 --- a/src/externals/mct/mpeu/m_Filename.F90 +++ /dev/null @@ -1,106 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_Filename - Filename manipulation routines -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_Filename - implicit none - private ! except - - public :: Filename_base ! basename() - public :: Filename_dir ! dirname() - - interface Filename_base; module procedure base_; end interface - interface Filename_dir; module procedure dir_; end interface - -! !REVISION HISTORY: -! 14Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_Filename' - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: base_ - basename -! -! !DESCRIPTION: -! -! !INTERFACE: - - function base_(cstr,sfx) - implicit none - character(len=*) ,intent(in) :: cstr - character(len=*),optional,intent(in) :: sfx - character(len=len(cstr)) :: base_ - -! !REVISION HISTORY: -! 14Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::base_' - integer :: l,lb,le - - l =index(cstr,'/',back=.true.) - lb=l+1 ! correct either a '/' is in the string or not. - le=len_trim(cstr) - - if(present(sfx)) then - - l=le-len_trim(sfx) - if(sfx==cstr(l+1:le)) le=l - - endif - - base_=cstr(lb:le) - -end function base_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dir_ - dirname -! -! !DESCRIPTION: -! -! !INTERFACE: - - function dir_(cstr) - implicit none - character(len=*),intent(in) :: cstr - character(len=len(cstr)) :: dir_ - -! !REVISION HISTORY: -! 14Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::dir_' - integer :: l - - l =index(cstr,'/',back=.true.) - select case(l) - case(0) - dir_='.' - case(1) - dir_='/' - case default - dir_=cstr(1:l-1) - end select - -end function dir_ - -end module m_Filename diff --git a/src/externals/mct/mpeu/m_IndexBin_char.F90 b/src/externals/mct/mpeu/m_IndexBin_char.F90 deleted file mode 100644 index db83e996ad8..00000000000 --- a/src/externals/mct/mpeu/m_IndexBin_char.F90 +++ /dev/null @@ -1,257 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_IndexBin_char - Template of indexed bin-sorting module -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_IndexBin_char - implicit none - private ! except - - public :: IndexBin - interface IndexBin; module procedure & - IndexBin0_, & - IndexBin1_, & - IndexBin1w_ - end interface - -! !REVISION HISTORY: -! 17Feb99 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_IndexBin_char' - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: IndexBin0_ - Indexed sorting for a single value -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine IndexBin0_(n,indx,keys,key0,ln0) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, intent(in) :: n - integer, dimension(n), intent(inout) :: indx - character(len=*), dimension(n), intent(in) :: keys - character(len=*), intent(in) :: key0 ! value - integer,optional,intent(out) :: ln0 - -! !REVISION HISTORY: -! 16Feb99 - Jing Guo - initial prototype/prolog/code -! 27Sep99 - Jing Guo - Fixed a bug pointed out by -! Chris Redder -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::IndexBin0_' - integer,allocatable,dimension(:) :: inew - integer :: ni,ix,i,ier - integer :: ln(0:1),lc(0:1) -!________________________________________ - - allocate(inew(n),stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate() error, stat =',ier - call die(myname_) - endif -!________________________________________ - ! Count numbers entries for the given key0 - - lc(0)=1 ! the location of values the same as key0 - ln(0)=0 - do i=1,n - if(keys(i) == key0) ln(0)=ln(0)+1 - end do - - lc(1)=ln(0)+1 ! the location of values not the same as key0 -!________________________________________ - ! Reset the counters - ln(0:1)=0 - do i=1,n - ix=indx(i) - if(keys(ix) == key0) then - ni=lc(0)+ln(0) - ln(0)=ln(0)+1 - - else - ni=lc(1)+ln(1) - ln(1)=ln(1)+1 - endif - - inew(ni)=ix - end do - -!________________________________________ - ! Sort out the old pointers according to the new order - indx(:)=inew(:) - if(present(ln0)) ln0=ln(0) -!________________________________________ - - deallocate(inew) - -end subroutine IndexBin0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: IndexBin1_ - Indexed sorting into a set of given bins -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine IndexBin1_(n,indx,keys,bins,lcs,lns) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, intent(in) :: n - integer, dimension(n),intent(inout) :: indx - character(len=*),dimension(n),intent(in) :: keys - character(len=*),dimension(:),intent(in) :: bins ! values - integer, dimension(:),intent(out) :: lcs ! locs. of the bins - integer, dimension(:),intent(out) :: lns ! sizes of the bins - -! !REVISION HISTORY: -! 16Feb99 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::IndexBin1_' - integer,allocatable,dimension(:) :: ibin,inew - integer :: nbin,lc0,ln0 - integer :: ni,ix,ib,i,ier -!________________________________________ - - nbin=size(bins) - if(nbin==0) return -!________________________________________ - - allocate(ibin(n),inew(n),stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate() error, stat =',ier - call die(myname_) - endif -!________________________________________ - - do ib=1,nbin - lns(ib)=0 - lcs(ib)=0 - end do -!________________________________________ - ! Count numbers in every bin, and store the bin-ID for - ! later use. - do i=1,n - ix=indx(i) - - call search_(keys(ix),nbin,bins,ib) ! ib = 1:nbin; =0 if not found - - ibin(i)=ib - if(ib /= 0) lns(ib)=lns(ib)+1 - end do -!________________________________________ - ! Count the locations of every bin. - lc0=1 - do ib=1,nbin - lcs(ib)=lc0 - lc0=lc0+lns(ib) - end do -!________________________________________ - ! Reset the counters - ln0=0 - lns(1:nbin)=0 - do i=1,n - ib=ibin(i) ! the bin-index of keys(indx(i)) - if(ib/=0) then - ni=lcs(ib)+lns(ib) - lns(ib)=lns(ib)+1 - else - ni=lc0+ln0 - ln0=ln0+1 - endif - inew(ni)=indx(i) ! the current value is put in the new order - end do -!________________________________________ - ! Sort out the old pointers according to the new order - indx(:)=inew(:) -!________________________________________ - - deallocate(ibin,inew) - -contains -subroutine search_(key,nbin,bins,ib) - implicit none - character(len=*), intent(in) :: key - integer,intent(in) :: nbin - character(len=*), intent(in),dimension(:) :: bins - integer,intent(out) :: ib - integer :: i - - ib=0 - do i=1,nbin - if(key==bins(i)) then - ib=i - return - endif - end do -end subroutine search_ - -end subroutine IndexBin1_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: IndexBin1w_ - IndexBin1_ wrapped without working arrays -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine IndexBin1w_(n,indx,keys,bins) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, intent(in) :: n - integer,dimension(n),intent(inout) :: indx - character(len=*),dimension(n),intent(in) :: keys - character(len=*),dimension(:),intent(in) :: bins ! values - -! !REVISION HISTORY: -! 17Feb99 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::IndexBin1w_' - integer :: ier - integer,dimension(:),allocatable :: lcs,lns - integer :: nbin - - nbin=size(bins) - if(nbin==0) return - - allocate(lcs(nbin),lns(nbin),stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_,': allocate() error, stat =',ier - call die(myname_) - endif - - call IndexBin1_(n,indx,keys,bins,lcs,lns) - - deallocate(lcs,lns) -end subroutine IndexBin1w_ -end module m_IndexBin_char diff --git a/src/externals/mct/mpeu/m_IndexBin_integer.F90 b/src/externals/mct/mpeu/m_IndexBin_integer.F90 deleted file mode 100644 index 8eb5abf277c..00000000000 --- a/src/externals/mct/mpeu/m_IndexBin_integer.F90 +++ /dev/null @@ -1,257 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_IndexBin_integer - Template of indexed bin-sorting module -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_IndexBin_integer - implicit none - private ! except - - public :: IndexBin - interface IndexBin; module procedure & - IndexBin0_, & - IndexBin1_, & - IndexBin1w_ - end interface - -! !REVISION HISTORY: -! 17Feb99 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_IndexBin_integer' - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: IndexBin0_ - Indexed sorting for a single value -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine IndexBin0_(n,indx,keys,key0,ln0) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, intent(in) :: n - integer, dimension(n), intent(inout) :: indx - integer, dimension(n), intent(in) :: keys - integer, intent(in) :: key0 ! The key value to be moved to front - integer,optional,intent(out) :: ln0 - -! !REVISION HISTORY: -! 16Feb99 - Jing Guo - initial prototype/prolog/code -! 27Sep99 - Jing Guo - Fixed a bug pointed out by -! Chris Redder -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::IndexBin0_' - integer,allocatable,dimension(:) :: inew - integer :: ni,ix,i,ier - integer :: ln(0:1),lc(0:1) -!________________________________________ - - allocate(inew(n),stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate() error, stat =',ier - call die(myname_) - endif -!________________________________________ - ! Count numbers entries for the given key0 - - lc(0)=1 ! the location of values the same as key0 - ln(0)=0 - do i=1,n - if(keys(i) == key0) ln(0)=ln(0)+1 - end do - - lc(1)=ln(0)+1 ! the location of values not the same as key0 -!________________________________________ - ! Reset the counters - ln(0:1)=0 - do i=1,n - ix=indx(i) - if(keys(ix) == key0) then - ni=lc(0)+ln(0) - ln(0)=ln(0)+1 - - else - ni=lc(1)+ln(1) - ln(1)=ln(1)+1 - endif - - inew(ni)=ix - end do - -!________________________________________ - ! Sort out the old pointers according to the new order - indx(:)=inew(:) - if(present(ln0)) ln0=ln(0) -!________________________________________ - - deallocate(inew) - -end subroutine IndexBin0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: IndexBin1_ - Indexed sorting into a set of given bins -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine IndexBin1_(n,indx,keys,bins,lcs,lns) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, intent(in) :: n - integer, dimension(n),intent(inout) :: indx - integer, dimension(n),intent(in) :: keys - integer, dimension(:),intent(in) :: bins! values of the bins - integer, dimension(:),intent(out) :: lcs ! locs. of the bins - integer, dimension(:),intent(out) :: lns ! sizes of the bins - -! !REVISION HISTORY: -! 16Feb99 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::IndexBin1_' - integer,allocatable,dimension(:) :: ibin,inew - integer :: nbin,lc0,ln0 - integer :: ni,ix,ib,i,ier -!________________________________________ - - nbin=size(bins) - if(nbin==0) return -!________________________________________ - - allocate(ibin(n),inew(n),stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate() error, stat =',ier - call die(myname_) - endif -!________________________________________ - - do ib=1,nbin - lns(ib)=0 - lcs(ib)=0 - end do -!________________________________________ - ! Count numbers in every bin, and store the bin-ID for - ! later use. - do i=1,n - ix=indx(i) - - call search_(keys(ix),nbin,bins,ib) ! ib = 1:nbin; =0 if not found - - ibin(i)=ib - if(ib /= 0) lns(ib)=lns(ib)+1 - end do -!________________________________________ - ! Count the locations of every bin. - lc0=1 - do ib=1,nbin - lcs(ib)=lc0 - lc0=lc0+lns(ib) - end do -!________________________________________ - ! Reset the counters - ln0=0 - lns(1:nbin)=0 - do i=1,n - ib=ibin(i) ! the bin-index of keys(indx(i)) - if(ib/=0) then - ni=lcs(ib)+lns(ib) - lns(ib)=lns(ib)+1 - else - ni=lc0+ln0 - ln0=ln0+1 - endif - inew(ni)=indx(i) ! the current value is put in the new order - end do -!________________________________________ - ! Sort out the old pointers according to the new order - indx(:)=inew(:) -!________________________________________ - - deallocate(ibin,inew) - -contains -subroutine search_(key,nbin,bins,ib) - implicit none - integer, intent(in) :: key - integer,intent(in) :: nbin - integer, intent(in),dimension(:) :: bins - integer,intent(out) :: ib - integer :: i - - ib=0 - do i=1,nbin - if(key==bins(i)) then - ib=i - return - endif - end do -end subroutine search_ - -end subroutine IndexBin1_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: IndexBin1w_ - IndexBin1_ wrapped without working arrays -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine IndexBin1w_(n,indx,keys,bins) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, intent(in) :: n - integer,dimension(n),intent(inout) :: indx - integer,dimension(n),intent(in) :: keys - integer,dimension(:),intent(in) :: bins ! values of the bins - -! !REVISION HISTORY: -! 17Feb99 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::IndexBin1w_' - integer :: ier - integer,dimension(:),allocatable :: lcs,lns - integer :: nbin - - nbin=size(bins) - if(nbin==0) return - - allocate(lcs(nbin),lns(nbin),stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_,': allocate() error, stat =',ier - call die(myname_) - endif - - call IndexBin1_(n,indx,keys,bins,lcs,lns) - - deallocate(lcs,lns) -end subroutine IndexBin1w_ -end module m_IndexBin_integer diff --git a/src/externals/mct/mpeu/m_IndexBin_logical.F90 b/src/externals/mct/mpeu/m_IndexBin_logical.F90 deleted file mode 100644 index 710600eb212..00000000000 --- a/src/externals/mct/mpeu/m_IndexBin_logical.F90 +++ /dev/null @@ -1,105 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_IndexBin_logical - Template of indexed bin-sorting module -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_IndexBin_logical - implicit none - private ! except - - public :: IndexBin - interface IndexBin; module procedure & - IndexBin0_ - end interface - -! !REVISION HISTORY: -! 17Feb99 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_IndexBin_logical' - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: IndexBin0_ - Indexed sorting for a single value -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine IndexBin0_(n,indx,keys,key0,ln0) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, intent(in) :: n - integer, dimension(n), intent(inout) :: indx - logical, dimension(n), intent(in) :: keys - logical, intent(in) :: key0 ! The key value to be moved to front - integer,optional,intent(out) :: ln0 - -! !REVISION HISTORY: -! 16Feb99 - Jing Guo - initial prototype/prolog/code -! 27Sep99 - Jing Guo - Fixed a bug pointed out by -! Chris Redder -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::IndexBin0_' - integer,allocatable,dimension(:) :: inew - integer :: ni,ix,i,ier - integer :: ln(0:1),lc(0:1) -!________________________________________ - - allocate(inew(n),stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate() error, stat =',ier - call die(myname_) - endif -!________________________________________ - ! Count numbers entries for the given key0 - - lc(0)=1 ! the location of values the same as key0 - ln(0)=0 - do i=1,n - if(keys(i) .eqv. key0) ln(0)=ln(0)+1 - end do - - lc(1)=ln(0)+1 ! the location of values not the same as key0 -!________________________________________ - ! Reset the counters - ln(0:1)=0 - do i=1,n - ix=indx(i) - if(keys(ix) .eqv. key0) then - ni=lc(0)+ln(0) - ln(0)=ln(0)+1 - - else - ni=lc(1)+ln(1) - ln(1)=ln(1)+1 - endif - - inew(ni)=ix - end do - -!________________________________________ - ! Sort out the old pointers according to the new order - indx(:)=inew(:) - if(present(ln0)) ln0=ln(0) -!________________________________________ - - deallocate(inew) - -end subroutine IndexBin0_ -end module m_IndexBin_logical diff --git a/src/externals/mct/mpeu/m_List.F90 b/src/externals/mct/mpeu/m_List.F90 deleted file mode 100644 index 0e420c4bf26..00000000000 --- a/src/externals/mct/mpeu/m_List.F90 +++ /dev/null @@ -1,2112 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_List - A List Manager -! -! !DESCRIPTION: A {\em List} is a character buffer comprising -! substrings called {\em items} separated by colons, combined with -! indexing information describing (1) the starting point in the character -! buffer of each substring, and (2) the length of each substring. The -! only constraints on the valid list items are (1) the value of an -! item does not contain the ``\verb":"'' delimitter, and (2) leading -! and trailing blanks are stripped from any character string presented -! to define a list item (although any imbeded blanks are retained). -! -! {\bf Example:} Suppose we wish to define a List containing the -! items {\tt 'latitude'}, {\tt 'longitude'}, and {\tt 'pressure'}. -! The character buffer of the List containing these items will be the -! 27-character string -! \begin{verbatim} -! 'latitude:longitude:pressure' -! \end{verbatim} -! and the indexing information is summarized in the table below. -! -!\begin{table}[htbp] -!\begin{center} -!\begin{tabular}{|c|c|c|} -!\hline -!{\bf Item} & {\bf Starting Point in Buffer} & {\bf Length} \\ -!\hline -!{\tt latitude} & 1 & 8 \\ -!\hline -!{\tt longitude} & 9 & 9 \\ -!\hline -!{\tt pressure} & 20 & 8\\ -!\hline -!\end{tabular} -!\end{center} -!\end{table} -! -! One final note: All operations for the {\tt List} datatype are -! {\bf case sensitive}. -! -! !INTERFACE: - - module m_List - -! !USES: -! -! No other Fortran modules are used. - - implicit none - - private ! except - -! !PUBLIC TYPES: - - public :: List ! The class data structure - - Type List -#ifdef SEQUENCE - sequence -#endif - character(len=1),dimension(:),pointer :: bf - integer, dimension(:,:),pointer :: lc - End Type List - -! !PUBLIC MEMBER FUNCTIONS: - - public :: init - public :: clean - public :: nullify - public :: index - public :: get_indices - public :: test_indices - public :: nitem - public :: get - public :: identical - public :: assignment(=) - public :: allocated - public :: copy - public :: exportToChar - public :: exportToString - public :: CharBufferSize - public :: append - public :: concatenate - public :: bcast - public :: send - public :: recv - public :: GetSharedListIndices - - interface init ; module procedure & - init_, & - initStr_, & - initstr1_ - end interface - interface clean; module procedure clean_; end interface - interface nullify; module procedure nullify_; end interface - interface index; module procedure & - index_, & - indexStr_ - end interface - interface get_indices; module procedure get_indices_; end interface - interface test_indices; module procedure test_indices_; end interface - interface nitem; module procedure nitem_; end interface - interface get ; module procedure & - get_, & - getall_, & - getrange_ - end interface - interface identical; module procedure identical_; end interface - interface assignment(=) - module procedure copy_ - end interface - interface allocated ; module procedure & - allocated_ - end interface - interface copy ; module procedure copy_ ; end interface - interface exportToChar ; module procedure & - exportToChar_ - end interface - interface exportToString ; module procedure & - exportToString_ - end interface - interface CharBufferSize ; module procedure & - CharBufferSize_ - end interface - interface append ; module procedure append_ ; end interface - interface concatenate ; module procedure concatenate_ ; end interface - interface bcast; module procedure bcast_; end interface - interface send; module procedure send_; end interface - interface recv; module procedure recv_; end interface - interface GetSharedListIndices; module procedure & - GetSharedListIndices_ - end interface - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -! 16May01 - J. Larson - Several changes / fixes: -! public interface for copy_(), corrected version of copy_(), -! corrected version of bcast_(). -! 15Oct01 - J. Larson - Added the LOGICAL -! function identical_(). -! 14Dec01 - J. Larson - Added the LOGICAL -! function allocated_(). -! 13Feb02 - J. Larson - Added the List query -! functions exportToChar() and CharBufferLength(). -! 13Jun02- R.L. Jacob - Move GetSharedListIndices -! from mct to this module. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_List' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_ - Initialize a List from a CHARACTER String -! -! !DESCRIPTION: -! -! A list is a string in the form of ``\verb"Larry:Moe:Curly"'', -! or ``\verb"lat:lon:lev"'', combined with substring location and -! length information. Through the initialization call, the -! items delimited by ``\verb":"'' are stored as an array of sub- -! strings of a long string, accessible through an array of substring -! indices. The only constraints now on the valid list entries are, -! (1) the value of an entry does not contain ``\verb":"'', and (2) -! The leading and the trailing blanks are insignificant, although -! any imbeded blanks are. For example, -! -! \begin{verbatim} -! call init_(aList, 'batman :SUPERMAN:Green Lantern: Aquaman') -! \end{verbatim} -! will result in {\tt aList} having four items: 'batman', 'SUPERMAN', -! 'Green Lantern', and 'Aquaman'. That is -! \begin{verbatim} -! aList%bf = 'batman:SUPERMAN:Green Lantern:Aquaman' -! \end{verbatim} -! -! !INTERFACE: - - subroutine init_(aList,Values) - -! !USES: -! - use m_die,only : die - use m_mall,only : mall_mci,mall_ison - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*),intent(in) :: Values ! ":" delimited names - -! !OUTPUT PARAMETERS: -! - type(List),intent(out) :: aList ! an indexed string values - - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::init_' - character(len=1) :: c - integer :: ib,ie,id,lb,le,ni,i,ier - - ! Pass 1, getting the sizes - le=0 - ni=0 - ib=1 - ie=0 - id=0 - do i=1,len(Values) - c=Values(i:i) - select case(c) - case(' ') - if(ib==i) ib=i+1 ! moving ib up, starting from the next - case(':') - if(ib<=ie) then - ni=ni+1 - id=1 ! mark a ':' - endif - ib=i+1 ! moving ib up, starting from the next - case default - ie=i - if(id==1) then ! count an earlier marked ':' - id=0 - le=le+1 - endif - le=le+1 - end select - end do - if(ib<=ie) ni=ni+1 - - ! COMPILER MAY NOT SIGNAL AN ERROR IF - ! ALIST HAS ALREADY BEEN INITIALIZED. - ! PLEASE CHECK FOR PREVIOUS INITIALIZATION - - allocate(aList%bf(le),aList%lc(0:1,ni),stat=ier) - if(ier /= 0) call die(myname_,'allocate()',ier) - - if(mall_ison()) then - call mall_mci(aList%bf,myname) - call mall_mci(aList%lc,myname) - endif - - ! Pass 2, copy the value and assign the pointers - lb=1 - le=0 - ni=0 - ib=1 - ie=0 - id=0 - do i=1,len(Values) - c=Values(i:i) - - select case(c) - case(' ') - if(ib==i) ib=i+1 ! moving ib up, starting from the next - case(':') - if(ib<=ie) then - ni=ni+1 - aList%lc(0:1,ni)=(/lb,le/) - id=1 ! mark a ':' - endif - - ib=i+1 ! moving ib up, starting from the next - lb=le+2 ! skip to the next non-':' and non-',' - case default - ie=i - if(id==1) then ! copy an earlier marked ':' - id=0 - le=le+1 - aList%bf(le)=':' - endif - - le=le+1 - aList%bf(le)=c - end select - end do - if(ib<=ie) then - ni=ni+1 - aList%lc(0:1,ni)=(/lb,le/) - endif - - end subroutine init_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initStr_ - Initialize a List Using the String Type -! -! !DESCRIPTION: This routine initializes a {\tt List} datatype given -! an input {\tt String} datatype (see {\tt m\_String} for more -! information regarding the {\tt String} type). The contents of the -! input {\tt String} argument {\tt pstr} must adhere to the restrictions -! stated for character input stated in the prologue of the routine -! {\tt init\_()} in this module. -! -! !INTERFACE: - - subroutine initStr_(aList, pstr) - -! !USES: -! - use m_String, only : String,toChar - - implicit none - -! !INPUT PARAMETERS: -! - type(String),intent(in) :: pstr - -! !OUTPUT PARAMETERS: -! - type(List),intent(out) :: aList ! an indexed string values - - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initStr_' - - call init_(aList,toChar(pstr)) - - end subroutine initStr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initStr1_ - Initialize a List Using an Array of Strings -! -! !DESCRIPTION: This routine initializes a {\tt List} datatype given -! as input array of {\tt String} datatypes (see {\tt m\_String} for more -! information regarding the {\tt String} type). The contents of each -! {\tt String} element of the input array {\tt strs} must adhere to the -! restrictions stated for character input stated in the prologue of the -! routine {\tt init\_()} in this module. Specifically, no element in -! {\tt strs} may contain the colon \verb':' delimiter, and any -! leading or trailing blanks will be stripped (though embedded blank -! spaces will be retained). For example, consider an invocation of -! {\tt initStr1\_()} where the array {\tt strs(:)} contains four entries: -! {\tt strs(1)='John'}, {\tt strs(2)=' Paul'}, -! {\tt strs(3)='George '}, and {\tt strs(4)=' Ringo'}. The resulting -! {\tt List} output {\tt aList} will have -! \begin{verbatim} -! aList%bf = 'John:Paul:George:Ringo' -! \end{verbatim} -! !INTERFACE: - - subroutine initStr1_(aList, strs) - -! !USES: -! - use m_String, only : String,toChar - use m_String, only : len - use m_String, only : ptr_chars - use m_die,only : die - - implicit none - -! !INPUT PARAMETERS: -! - type(String),dimension(:),intent(in) :: strs - -! !OUTPUT PARAMETERS: -! - type(List),intent(out) :: aList ! an indexed string values - - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initStr1_' - character(len=1),allocatable,dimension(:) :: ch1 - integer :: ier - integer :: n,i,lc,le - - n=size(strs) - le=0 - do i=1,n - le=le+len(strs(i)) - end do - le=le+n-1 ! for n-1 ":"s - - allocate(ch1(le),stat=ier) - if(ier/=0) call die(myname_,'allocate()',ier) - - le=0 - do i=1,n - if(i>1) then - le=le+1 - ch1(le)=':' - endif - - lc=le+1 - le=le+len(strs(i)) - ch1(lc:le)=ptr_chars(strs(i)) - end do - - call init_(aList,toChar(ch1)) - - deallocate(ch1,stat=ier) - if(ier/=0) call die(myname_,'deallocate()',ier) - - end subroutine initStr1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Deallocate Memory Used by a List -! -! !DESCRIPTION: This routine deallocates the allocated memory components -! of the input/output {\tt List} argument {\tt aList}. Specifically, it -! deallocates {\tt aList\%bf} and {\tt aList\%lc}. If the optional -! output {\tt INTEGER} arguemnt {\tt stat} is supplied, no warning will -! be printed if the Fortran intrinsic {\tt deallocate()} returns with an -! error condition. -! -! !INTERFACE: - - subroutine clean_(aList, stat) - -! !USES: -! - use m_die, only : warn - use m_mall, only : mall_mco,mall_ison - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(List), intent(inout) :: aList - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -! 1Mar02 - E.T. Ong - added stat argument and -! removed die to prevent crashes. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - if(mall_ison()) then - if(associated(aList%bf)) call mall_mco(aList%bf,myname_) - if(associated(aList%lc)) call mall_mco(aList%lc,myname_) - endif - - if(associated(aList%bf) .and. associated(aList%lc)) then - - deallocate(aList%bf, aList%lc, stat=ier) - - if(present(stat)) then - stat=ier - else - if(ier /= 0) call warn(myname_,'deallocate(aList%...)',ier) - endif - - endif - - end subroutine clean_ - -!--- ------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nullify_ - Nullify Pointers in a List -! -! !DESCRIPTION: In Fortran 90, pointers may have three states: -! (1) {\tt ASSOCIATED}, that is the pointer is pointing at a target, -! (2) {\tt UNASSOCIATED}, and (3) {\tt UNINITIALIZED}. On some -! platforms, the Fortran intrinsic function {\tt associated()} -! will view uninitialized pointers as {\tt UNASSOCIATED} by default. -! This is not always the case. It is good programming practice to -! nullify pointers if they are not to be used. This routine nullifies -! the pointers present in the {\tt List} datatype. -! -! !INTERFACE: - - subroutine nullify_(aList) - -! !USES: -! - use m_die,only : die - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(List),intent(inout) :: aList - -! !REVISION HISTORY: -! 18Jun01 - J.W. Larson - - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nullify_' - - nullify(aList%bf) - nullify(aList%lc) - - end subroutine nullify_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: nitem_ - Return the Number of Items in a List -! -! !DESCRIPTION: -! This function enumerates the number of items in the input {\tt List} -! argument {\tt aList}. For example, suppose -! \begin{verbatim} -! aList%bf = 'John:Paul:George:Ringo' -! \end{verbatim} -! Then, -! $${\tt nitem\_(aList)} = 4 .$$ -! -! !INTERFACE: - - integer function nitem_(aList) - -! !USES: -! - implicit none - -! !INPUT PARAMETERS: -! - type(List),intent(in) :: aList - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -! 10Oct01 - J.W. Larson - modified routine to -! check pointers aList%bf and aList%lc using the f90 -! intrinsic ASSOCIATED before proceeding with the item -! count. If these pointers are UNASSOCIATED, an item -! count of zero is returned. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::nitem_' - integer :: NumItems - - ! Initialize item count to zero - - NumItems = 0 - - ! If the List pointers are ASSOCIATED, perform item count: - - if(ASSOCIATED(aList%bf) .and. ASSOCIATED(aList%lc)) then - NumItems = size(aList%lc,2) - endif - - nitem_ = NumItems - - end function nitem_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: index_ - Return Rank in a List of a Given Item (CHARACTER) -! -! !DESCRIPTION: -! This function returns the rank of an item (defined by the -! {\tt CHARACTER} argument {\tt item}) in the input {\tt List} argument -! {\tt aList}. If {\tt item} is not present in {\tt aList}, then zero -! is returned. For example, suppose -! \begin{verbatim} -! aList%bf = 'Bob:Carol:Ted:Alice' -! \end{verbatim} -! Then, ${\tt index\_(aList, 'Ted')}=3$, ${\tt index\_(aList, 'Carol')}=2$, -! and ${\tt index\_(aList, 'The Dude')}=0.$ -! -! !INTERFACE: - - integer function index_(aList, item) - -! !USES: -! - use m_String, only : toChar - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: aList ! a List of names - character(len=*),intent(in) :: item ! a given item name - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::index_' - integer :: i,lb,le - integer :: itemLength, length, nMatch, j - - ! How long is the input item name? - - itemLength = len(item) - - ! Set output to zero (no item match) value: - - index_=0 - - ! Now, go through the aList one item at a time - - ITEM_COMPARE: do i=1,size(aList%lc,2) ! == nitem_(aList) - - ! Compute some stats for the current item in aList: - - lb=aList%lc(0,i) ! starting index of item in aList%bf - le=aList%lc(1,i) ! ending index item in aList%bf - - length = le -lb + 1 ! length of the current item - if(length /= itemLength) then ! this list item can't match input item - - CYCLE ! that is, jump to the next item in aList... - - else ! compare one character at a time... - - ! Initialize number of matching characters in the two strings - - nMatch = 0 - - ! Now, compare item to the current item in aList one character - ! at a time: - - CHAR_COMPARE: do j=1,length - if(aList%bf(lb+j-1) == item(j:j)) then ! a match for this character - nMatch = nMatch + 1 - else - EXIT - endif - end do CHAR_COMPARE - - ! Check the number of leading characters in the current item in aList - ! that match the input item. If it is equal to the item length, then - ! we have found a match and are finished. Otherwise, we cycle on to - ! the next item in aList. - - if(nMatch == itemLength) then - index_ = i - EXIT - endif - -! Old code that does not work with V. of the IBM -! if(item==toChar(aList%bf(lb:le))) then -! index_=i -! exit - endif - end do ITEM_COMPARE - - end function index_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: indexStr_ - Return Rank in a List of a Given Item (String) -! -! !DESCRIPTION: -! This function performs the same operation as the function -! {\tt index\_()}, but the item to be indexed is instead presented in -! the form of a {\tt String} datatype (see the module {\tt m\_String} -! for more information about the {\tt String} type). This routine -! searches through the input {\tt List} argument {\tt aList} for an -! item that matches the item defined by {\tt itemStr}, and if a match -! is found, the rank of the item in the list is returned (see also the -! prologue for the routine {\tt index\_()} in this module). If no match -! is found, a value of zero is returned. -! -! !INTERFACE: - - integer function indexStr_(aList, itemStr) - -! !USES: -! - use m_String,only : String,toChar - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: aList ! a List of names - type(String), intent(in) :: itemStr - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -! 25Oct02 - R. Jacob - just call index_ above -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::indexStr_' - - indexStr_=0 - indexStr_=index_(aList,toChar(itemStr)) - - end function indexStr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: allocated_ - Check Pointers in a List for Association Status -! -! !DESCRIPTION: -! This function checks the input {\tt List} argument {\tt inList} to -! determine whether or not it has been allocated. It does this by -! invoking the Fortran90 intrinsic function {\tt associated()} on the -! pointers {\tt inList\%bf} and {\tt inList\%lc}. If both of these -! pointers are associated, the return value is {\tt .TRUE.}. -! -! {\bf N.B.:} In Fortran90, pointers have three different states: -! {\tt ASSOCIATED}, {\tt UNASSOCIATED}, and {\tt UNDEFINED}. -! If a pointer is {\tt UNDEFINED}, this function may return either -! {\tt .TRUE.} or {\tt .FALSE.} values, depending on the Fortran90 -! compiler. To avoid such problems, we advise that users invoke the -! {\tt List} method {\tt nullify()} to nullify any {\tt List} pointers -! for {\tt List} variables that are not initialized. -! -! !INTERFACE: - - logical function allocated_(inList) - -! !USES: - - use m_die,only : die - - implicit none - -! !INPUT PARAMETERS: - - type(List), intent(in) :: inList - -! !REVISION HISTORY: -! 14Dec01 - J. Larson - inital version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::allocated_' - - allocated_ = associated(inList%bf) .and. associated(inList%lc) - - end function allocated_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: copy_ - Copy a List -! -! !DESCRIPTION: -! This routine copies the contents of the input {\tt List} argument -! {\tt xL} into the output {\tt List} argument {\tt yL}. -! -! !INTERFACE: - - subroutine copy_(yL,xL) ! yL=xL - -! !USES: -! - use m_die,only : die - use m_stdio - use m_String ,only : String - use m_String ,only : String_clean - use m_mall,only : mall_mci,mall_ison - - implicit none - -! !INPUT PARAMETERS: -! - type(List),intent(in) :: xL - -! !OUTPUT PARAMETERS: -! - type(List),intent(out) :: yL - - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -! 16May01 - J. Larson - simpler, working -! version that exploits the String datatype (see m_String) -! 1Aug02 - Larson/Ong - Added logic for correct copying of blank -! Lists. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::copy_' - type(String) DummStr - - if(size(xL%lc,2) > 0) then - - ! Download input List info from xL to String DummStr - - call getall_(DummStr,xL) - - ! Initialize yL from DummStr - - call initStr_(yL,DummStr) - - call String_clean(DummStr) - - else - if(size(xL%lc,2) < 0) then ! serious error... - write(stderr,'(2a,i8)') myname_, & - ':: FATAL size(xL%lc,2) = ',size(xL%lc,2) - endif - ! Initialize yL as a blank list - call init_(yL, ' ') - endif - - end subroutine copy_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportToChar_ - Export List to a CHARACTER -! -! !DESCRIPTION: This function returns the character buffer portion of -! the input {\tt List} argument {\tt inList}---that is, the contents of -! {\tt inList\%bf}---as a {\tt CHARACTER} (suitable for printing). An -! example of the use of this function is: -! \begin{verbatim} -! write(stdout,'(1a)') exportToChar(inList) -! \end{verbatim} -! which writes the contents of {\tt inList\%bf} to the Fortran device -! {\tt stdout}. -! -! !INTERFACE: - - function exportToChar_(inList) - -! !USES: -! - use m_die, only : die - use m_stdio, only : stderr - use m_String, only : String - use m_String, only : String_ToChar => toChar - use m_String, only : String_clean - - implicit none - -! ! INPUT PARAMETERS: - - type(List), intent(in) :: inList - -! ! OUTPUT PARAMETERS: - - character(len=size(inList%bf,1)) :: exportToChar_ - -! !REVISION HISTORY: -! 13Feb02 - J. Larson - initial version. -! 06Jun03 - R. Jacob - return blank if List is not allocated -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportToChar_' - type(String) DummStr - - ! Download input List info from inList to String DummStr - if(allocated_(inList)) then - call getall_(DummStr,inList) - exportToChar_ = String_ToChar(DummStr) - call String_clean(DummStr) - else - exportToChar_ = '' - endif - - end function exportToChar_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: exportToString_ - Export List to a String -! -! !DESCRIPTION: This function returns the character buffer portion of -! the input {\tt List} argument {\tt inList}---that is, the contents of -! {\tt inList\%bf}---as a {\tt String} (see the mpeu module m\_String -! for more information regarding the {\tt String} type). This function -! was created to circumvent problems with implementing inheritance of -! the function {\tt exportToChar\_()} to other datatypes build on top -! of the {\tt List} type. -! -! !INTERFACE: - - function exportToString_(inList) - -! !USES: -! - use m_die, only : die - use m_stdio, only : stderr - - use m_String, only : String - use m_String, only : String_init => init - - implicit none - -! ! INPUT PARAMETERS: - - type(List), intent(in) :: inList - -! ! OUTPUT PARAMETERS: - - type(String) :: exportToString_ - -! !REVISION HISTORY: -! 14Aug02 - J. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::exportToString_' - - if(allocated_(inList)) then - call getall_(exportToString_, inList) - else - call String_init(exportToString_, 'NOTHING') - endif - - end function exportToString_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: CharBufferSize_ - Return size of a List's Character Buffer -! -! !DESCRIPTION: This function returns the length of the character -! buffer portion of the input {\tt List} argument {\tt inList} (that -! is, the number of characters stored in {\tt inList\%bf}) as an -! {\tt INTEGER}. Suppose for the sake of argument that {\tt inList} -! was created using the following call to {\tt init\_()}: -! \begin{verbatim} -! call init_(inList, 'Groucho:Harpo:Chico:Zeppo') -! \end{verbatim} -! Then, using the above example value of {\tt inList}, we can use -! {\tt CharBufferSize\_()} as follows: -! \begin{verbatim} -! integer :: BufferLength -! BufferLength = CharBufferSize(inList) -! \end{verbatim} -! and the resulting value of {\tt BufferLength} will be 25. -! -! !INTERFACE: - - integer function CharBufferSize_(inList) - -! !USES: -! - use m_die, only : die - use m_stdio, only : stderr - - implicit none - -! ! INPUT PARAMETERS: - - type(List), intent(in) :: inList - -! !REVISION HISTORY: -! 13Feb02 - J. Larson - initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::CharBufferSize_' - - if(allocated_(inList)) then - CharBufferSize_ = size(inList%bf) - else - write(stderr,'(2a)') myname_,":: Argument inList not allocated." - call die(myname_) - endif - - end function CharBufferSize_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: get_ - Retrieve a Numbered Item from a List as a String -! -! !DESCRIPTION: -! This routine retrieves a numbered item (defined by the input -! {\tt INTEGER} argument {\tt ith}) from the input {\tt List} argument -! {\tt aList}, and returns it in the output {\tt String} argument -! {\tt itemStr} (see the module {\tt m\_String} for more information -! about the {\tt String} type). If the argument {\tt ith} is nonpositive, -! or greater than the number of items in {\tt aList}, a String containing -! one blank space is returned. -! -! !INTERFACE: - - subroutine get_(itemStr, ith, aList) - -! !USES: -! - use m_String, only : String, init, toChar - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: ith - type(List), intent(in) :: aList - -! !OUTPUT PARAMETERS: -! - type(String),intent(out) :: itemStr - - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - initial prototype/prolog/code -! 14May07 - Larson, Jacob - add space to else case string so function -! matches documentation. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::get_' - integer :: lb,le - - if(ith>0 .and. ith <= size(aList%lc,2)) then - lb=aList%lc(0,ith) - le=aList%lc(1,ith) - call init(itemStr,toChar(aList%bf(lb:le))) - else - call init(itemStr,' ') - endif - - end subroutine get_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getall_ - Return all Items from a List as one String -! -! !DESCRIPTION: -! This routine returns all the items from the input {\tt List} argument -! {\tt aList} in the output {\tt String} argument {\tt itemStr} (see -! the module {\tt m\_String} for more information about the {\tt String} -! type). The contents of the character buffer in {\tt itemStr} will -! be the all of the items in {\tt aList}, separated by the colon delimiter. -! -! !INTERFACE: - - subroutine getall_(itemStr, aList) - -! !USES: -! - use m_String, only : String, init, toChar - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: aList - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: itemStr - - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::getall_' - integer :: lb,le,ni - - ni=size(aList%lc,2) - lb=aList%lc(0,1) - le=aList%lc(1,ni) - call init(itemStr,toChar(aList%bf(lb:le))) - - end subroutine getall_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getrange_ - Return a Range of Items from a List as one String -! -! !DESCRIPTION: -! This routine returns all the items ranked {\tt i1} through {\tt i2} -! from the input {\tt List} argument {\tt aList} in the output -! {\tt String} argument {\tt itemStr} (see the module {\tt m\_String} -! for more information about the {\tt String} type). The contents of -! the character buffer in {\tt itemStr} will be items in {\tt i1} through -! {\tt i2} {\tt aList}, separated by the colon delimiter. -! -! !INTERFACE: - - subroutine getrange_(itemStr, i1, i2, aList) - -! !USES: -! - use m_die, only : die - use m_stdio, only : stderr - use m_String, only : String,init,toChar - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: i1 - integer, intent(in) :: i2 - type(List), intent(in) :: aList - -! !OUTPUT PARAMETERS: -! - type(String),intent(out) :: itemStr - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - initial prototype/prolog/code -! 26Jul02 - J. Larson - Added argument checks. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::getrange_' - integer :: lb,le,ni - - ! Argument Sanity Checks: - - if(.not. allocated_(aList)) then - write(stderr,'(2a)') myname_, & - ':: FATAL--List argument aList is not initialized.' - call die(myname_) - endif - - ! is i2 >= i1 as we assume? - - if(i1 > i2) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: FATAL. Starting/Ending item ranks are out of order; ', & - 'i2 must be greater or equal to i1. i1 =',i1,' i2 = ',i2 - call die(myname_) - endif - - ni=size(aList%lc,2) ! the number of items in aList... - - ! is i1 or i2 too big? - - if(i1 > ni) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: FATAL--i1 is greater than the number of items in ', & - 'The List argument aList: i1 =',i1,' ni = ',ni - call die(myname_) - endif - - if(i2 > ni) then - write(stderr,'(2a,2(a,i8))') myname_, & - ':: FATAL--i2 is greater than the number of items in ', & - 'The List argument aList: i2 =',i2,' ni = ',ni - call die(myname_) - endif - - ! End of Argument Sanity Checks. - - lb=aList%lc(0,max(1,i1)) - le=aList%lc(1,min(ni,i2)) - call init(itemStr,toChar(aList%bf(lb:le))) - - end subroutine getrange_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: identical_ - Compare Two Lists for Equality -! -! !DESCRIPTION: -! This function compares the string buffer and indexing information in -! the two input {\tt List} arguments {\tt yL} and {\tt xL}. If the -! string buffers and index buffers of {\tt yL} and {\tt xL} match, this -! function returns a value of {\tt .TRUE.} Otherwise, it returns a -! value of {\tt .FALSE.} -! -! !INTERFACE: - - logical function identical_(yL, xL) - -! !USES: -! - use m_die,only : die - use m_String ,only : String - use m_String ,only : String_clean - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: yL - type(List), intent(in) :: xL - -! !REVISION HISTORY: -! 14Oct01 - J. Larson - original version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::identical_' - - logical :: myIdentical - type(String) :: DummStr - integer :: n, NumItems - - ! Compare the number of the items in the Lists xL and yL. - ! If they differ, myIdentical is set to .FALSE. and we are - ! finished. If both Lists sport the same number of items, - ! we must compare them one-by-one... - - myIdentical = .FALSE. - - if(nitem_(yL) == nitem_(xL)) then - - NumItems = nitem_(yL) - - COMPARE_LOOP: do n=1,NumItems - - call get_(DummStr, n, yL) ! retrieve nth tag as a String - - if( indexStr_(xL, Dummstr) /= n ) then ! a discrepency spotted. - call String_clean(Dummstr) - myIdentical = .FALSE. - EXIT - else - call String_clean(Dummstr) - endif - - myIdentical = .TRUE. ! we survived the whole test process. - - end do COMPARE_LOOP - - endif - - identical_ = myIdentical - - end function identical_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: get_indices_ - Index Multiple Items in a List -! -! !DESCRIPTION: This routine takes as input a {\tt List} argument -! {\tt aList}, and a {\tt CHARACTER} string {Values}, which is a colon- -! delimited string of items, and returns an {\tt INTEGER} array -! {\tt indices(:)}, which contain the rank of each item in {\tt aList}. -! For example, suppose {\tt aList} was created from the character string -! \begin{verbatim} -! 'happy:sleepy:sneezey:grumpy:dopey::bashful:doc' -! \end{verbatim} -! and get\_indices\_() is invoked as follows: -! \begin{verbatim} -! call get_indices_(indices, aList, 'sleepy:grumpy:bashful:doc') -! \end{verbatim} -! The array {\tt indices(:)} will be returned with 4 entries: -! ${\tt indices(1)}=2$, ${\tt indices(2)}=4$, ${\tt indices(3)}=6$, and -! ${\tt indices(4)}=7$. -! -! {\bf N.B.}: This routine operates on the assumption that each of the -! substrings in the colon-delimited string {\tt Values} is an item in -! {\tt aList}. If this assumption is invalid, this routine terminates -! execution with an error message. -! -! {\bf N.B.}: The pointer {\tt indices} must be {\tt UNASSOCIATED} on entry -! to this routine, and will be {\tt ASSOCIATED} upon return. After this pointer -! is no longer needed, it should be deallocated. Failure to do so will result -! in a memory leak. -! -! !INTERFACE: - - subroutine get_indices_(indices, aList, Values) - -! !USES: -! - use m_stdio - use m_die - use m_String, only : String - use m_String, only : String_clean => clean - use m_String, only : String_toChar => toChar - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: aList ! an indexed string values - character(len=*), intent(in) :: Values ! ":" delimited names - -! !OUTPUT PARAMETERS: -! - integer, dimension(:), pointer :: indices - -! !REVISION HISTORY: -! 31May98 - Jing Guo - initial prototype/prolog/code -! 12Feb03 - J. Larson Working refactored version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::get_indices_' - type(List) :: tList - type(String) :: tStr - integer :: i, ierr, n - - ! Create working list based on input colon-delimited string - - call init_(tList, values) - - - ! Count items in tList and allocate indices(:) accordingly - - n = nitem_(tList) - - if(n > nitem_(aList)) then - write(stderr,'(5a,2(i8,a))') myname_, & - ':: FATAL--more items in argument Values than aList! Input string', & - 'Values = "',Values,'" has ',n,' items. aList has ',nitem_(aList), & - ' items.' - call die(myname_) - endif - allocate(indices(n), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8,a)') myname_, & - ':: FATAL--allocate(indices(...) failed with stat=',ierr,& - '. On entry to this routine, this pointer must be NULL.' - call die(myname_) - endif - - ! Retrieve each item from tList as a String and index it - - do i=1,n - call get_(tStr,i,tList) - indices(i) = indexStr_(aList,tStr) - if(indices(i) == 0) then ! ith item not present in aList! - write(stderr,'(4a)') myname_, & - ':: FATAL--item "',String_toChar(tStr),'" not found.' - call die(myname_) - endif - call String_clean(tStr) - end do - - ! Clean up temporary List tList - - call clean_(tList) - - end subroutine get_indices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: test_indices_ - Test/Index Multiple Items in a List -! -! !DESCRIPTION: This routine takes as input a {\tt List} argument -! {\tt aList}, and a {\tt CHARACTER} string {Values}, which is a colon- -! delimited string of items, and returns an {\tt INTEGER} array -! {\tt indices(:)}, which contain the rank of each item in {\tt aList}. -! For example, suppose {\tt aList} was created from the character string -! \begin{verbatim} -! 'happy:sleepy:sneezey:grumpy:dopey::bashful:doc' -! \end{verbatim} -! and {\tt test\_indices\_()} is invoked as follows: -! \begin{verbatim} -! call test_indices_(indices, aList, 'sleepy:grumpy:bashful:doc') -! \end{verbatim} -! The array {\tt indices(:)} will be returned with 4 entries: -! ${\tt indices(1)}=2$, ${\tt indices(2)}=4$, ${\tt indices(3)}=6$, and -! ${\tt indices(4)}=7$. -! -! Now suppose {\tt test\_indices\_()} is invoked as follows: -! \begin{verbatim} -! call test_indices_(indices, aList, 'sleepy:grumpy:bashful:Snow White') -! \end{verbatim} -! The array {\tt indices(:)} will be returned with 4 entries: -! ${\tt indices(1)}=2$, ${\tt indices(2)}=4$, ${\tt indices(3)}=6$, and -! ${\tt indices(4)}=0$. -! -! {\bf N.B.}: This routine operates on the assumption that one or more -! of the substrings in the colon-delimited string {\tt Values} is may not -! be an item in {\tt aList}. If an item in {\tt Values} is {\em not} in -! {\tt aList}, its corresponding entry in {\tt indices(:)} is set to zero. -! -! {\bf N.B.}: The pointer {\tt indices} must be {\tt UNASSOCIATED} on entry -! to this routine, and will be {\tt ASSOCIATED} upon return. After this pointer -! is no longer needed, it should be deallocated. Failure to do so will result -! in a memory leak. -! -! !INTERFACE: - - subroutine test_indices_(indices, aList, Values) - -! !USES: -! - use m_stdio - use m_die - use m_String, only : String - use m_String, only : String_clean => clean - use m_String, only : String_toChar => toChar - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: aList ! an indexed string values - character(len=*), intent(in) :: Values ! ":" delimited names - -! !OUTPUT PARAMETERS: -! - integer, dimension(:), pointer :: indices - -! !REVISION HISTORY: -! 12Feb03 - J. Larson Working refactored version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::test_indices_' - type(List) :: tList - type(String) :: tStr - integer :: i, ierr, n - - ! Create working list based on input colon-delimited string - - call init_(tList, values) - - - ! Count items in tList and allocate indices(:) accordingly - - n = nitem_(tList) - allocate(indices(n), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8,a)') myname_, & - ':: FATAL--allocate(indices(...) failed with stat=',ierr,& - '. On entry to this routine, this pointer must be NULL.' - call die(myname_) - endif - - ! Retrieve each item from tList as a String and index it - - do i=1,n - call get_(tStr,i,tList) - indices(i) = indexStr_(aList,tStr) - call String_clean(tStr) - end do - - ! Clean up temporary List tList - - call clean_(tList) - - end subroutine test_indices_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: append_ - Append One List Onto the End of Another -! -! !DESCRIPTION: This routine takes two {\tt List} arguments -! {\tt iList1} and {\tt iList2}, and appends {\tt List2} onto -! the end of {\tt List1}. -! -! {\bf N.B.}: There is no check for shared items in the arguments -! {\tt List1} and {\tt List2}. It is the user's responsibility to -! ensure {\tt List1} and {\tt List2} share no items. If this routine -! is invoked in such a manner that {\tt List1} and {\tt List2} share -! common items, the resultant value of {\tt List1} will produce -! ambiguous results for some of the {\tt List} query functions. -! -! {\bf N.B.}: The outcome of this routine is order dependent. That is, -! the entries of {\tt iList2} will follow the {\em input} entries in -! {\tt iList1}. -! -! !INTERFACE: - - subroutine append_(iList1, iList2) -! -! !USES: -! - use m_stdio - use m_die, only : die - - use m_mpif90 - - use m_String, only: String - use m_String, only: String_toChar => toChar - use m_String, only: String_len - use m_String, only: String_clean => clean - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: iList2 - -! !INPUT/OUTPUT PARAMETERS: -! - type(List), intent(inout) :: iList1 - -! !REVISION HISTORY: -! 6Aug02 - J. Larson - Initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::append_' - - type(List) :: DummyList - - call copy_(DummyList, iList1) - call clean_(iList1) - call concatenate(DummyList, iList2, iList1) - call clean_(DummyList) - - end subroutine append_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: concatenate_ - Concatenates two Lists to form a Third List. -! -! !DESCRIPTION: This routine takes two input {\tt List} arguments -! {\tt iList1} and {\tt iList2}, and concatenates them, producing an -! output {\tt List} argument {\tt oList}. -! -! {\bf N.B.}: The nature of this routine is such that one must -! {\bf never} supply as the actual value of {\tt oList} the same -! value supplied for either {\tt iList1} or {\tt iList2}. -! -! {\bf N.B.}: The outcome of this routine is order dependent. That is, -! the entries of {\tt iList2} will follow {\tt iList1}. -! -! !INTERFACE: - - subroutine concatenate_(iList1, iList2, oList) -! -! !USES: -! - use m_stdio - use m_die, only : die - - use m_mpif90 - - use m_String, only: String - use m_String, only: String_init => init - use m_String, only: String_clean => clean - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: iList1 - type(List), intent(in) :: iList2 - -! !OUTPUT PARAMETERS: -! - type(List), intent(out) :: oList - -! !BUGS: For now, the List concatenate algorithm relies on fixed-length -! CHARACTER variables as intermediate storage. The lengths of these -! scratch variables is hard-wired to 10000, which should be large enough -! for most applications. This undesirable feature should be corrected -! ASAP. -! -! !REVISION HISTORY: -! 8May01 - J.W. Larson - initial version. -! 17May01 - J.W. Larson - Re-worked and tested successfully. -! 17Jul02 - E. Ong - fixed the bug mentioned above -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::concatenate_' - - character, dimension(:), allocatable :: CatBuff - integer :: CatBuffLength, i, ierr, Length1, Length2 - type(String) :: CatString - - ! First, handle the case of either iList1 and/or iList2 being - ! null - - if((nitem_(iList1) == 0) .or. (nitem_(iList2) == 0)) then - - if((nitem_(iList1) == 0) .and. (nitem_(iList2) == 0)) then - call init_(oList,'') - else - if((nitem_(iList1) == 0) .and. (nitem_(iList2) > 0)) then - call copy_(oList, iList2) - endif - if((nitem_(iList1) > 0) .and. (nitem_(iList2) == 0)) then - call copy_(oList,iList1) - endif - endif - - else ! both lists are non-null - - ! Step one: Get lengths of character buffers of iList1 and iList2: - - Length1 = CharBufferSize_(iList1) - Length2 = CharBufferSize_(iList2) - - ! Step two: create CatBuff(:) as workspace - - CatBuffLength = Length1 + Length2 + 1 - allocate(CatBuff(CatBuffLength), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: FATAL--allocate(CatBuff(...) failed. ierr=',ierr - call die(myname_) - endif - - ! Step three: concatenate CHARACTERs with the colon separator - ! into CatBuff(:) - - do i=1,Length1 - CatBuff(i) = iList1%bf(i) - end do - - CatBuff(Length1 + 1) = ':' - - do i=1,Length2 - CatBuff(Length1 + 1 + i) = iList2%bf(i) - end do - - ! Step four: initialize a String CatString: - - call String_init(CatString, CatBuff) - - ! Step five: initialize oList: - - call initStr_(oList, CatString) - - ! The concatenation is complete. Now, clean up - - call String_clean(CatString) - - deallocate(CatBuff,stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: FATAL--deallocate(CatBuff) failed. ierr=',ierr - call die(myname_) - endif - - endif - - end subroutine concatenate_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bcast_ - MPI Broadcast for the List Type -! -! !DESCRIPTION: This routine takes an input {\tt List} argument -! {\tt iList} (on input, valid on the root only), and broadcasts it. -! -! {\bf N.B.}: The outcome of this routine, {\tt ioList} on non-root -! processes, represents allocated memory. When this {\tt List} is -! no longer needed, it must be deallocated by invoking the routine -! {\tt List\_clean()}. Failure to do so will cause a memory leak. -! -! !INTERFACE: - - subroutine bcast_(ioList, root, comm, status) -! -! !USES: -! - use m_stdio, only : stderr - use m_die, only : MP_perr_die, die - - use m_String, only: String - use m_String, only: String_bcast => bcast - use m_String, only: String_clean => clean - - use m_mpif90 - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(List), intent(inout) :: ioList - - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 7May01 - J.W. Larson - initial version. -! 14May01 - R.L. Jacob - fix error checking -! 16May01 - J.W. Larson - new, simpler String-based algorigthm -! (see m_String for details), which works properly on -! the SGI platform. -! 13Jun01 - J.W. Larson - Initialize status -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bcast_' - integer :: myID, ierr - type(String) :: DummStr - - ! Initialize status (if present) - - if(present(status)) status = 0 - - ! Which process am I? - - call MPI_COMM_RANK(comm, myID, ierr) - if(ierr /= 0) then - if(present(status)) then - status = ierr - write(stderr,'(2a,i4)') myname_,":: MPI_COMM_RANK(), ierr=",ierr - return - else - call MP_perr_die(myname_,"MPI_COMM_RANK()",ierr) - endif - endif - - ! on the root, convert ioList into the String variable DummStr - - if(myID == root) then - if(CharBufferSize_(ioList) <= 0) then - call die(myname_, 'Attempting to broadcast an empty list!',& - CharBufferSize_(ioList)) - endif - call getall_(DummStr, ioList) - endif - - ! Broadcast DummStr - - call String_bcast(DummStr, root, comm, ierr) - if(ierr /= 0) then - if(present(status)) then - status = ierr - write(stderr,'(2a,i4)') myname_,":: call String_bcast(), ierr=",ierr - return - else - call MP_perr_die(myname_,"String_bcast() failed, stat=",ierr) - endif - endif - - ! Initialize ioList off the root using DummStr - - if(myID /= root) then - call initStr_(ioList, DummStr) - endif - - ! And now, the List broadcast is complete. - - call String_clean(DummStr) - - end subroutine bcast_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: send_ - MPI Point-to-Point Send for the List Type -! -! !DESCRIPTION: This routine takes an input {\tt List} argument -! {\tt inList} and sends it to processor {\tt dest} on the communicator -! associated with the fortran 90 {\tt INTEGER} handle {\tt comm}. The -! message is tagged by the input {\tt INTEGER} argument {\tt TagBase}. -! The success (failure) of this operation is reported in the zero -! (nonzero) optional output argument {\tt status}. -! -! {\bf N.B.}: One must avoid assigning elsewhere the MPI tag values -! {\tt TagBase} and {\tt TagBase+1}. This is because {\tt send\_()} -! performs the send of the {\tt List} as a pair of operations. The -! first send is the number of characters in {\tt inList\%bf}, and is -! given MPI tag value {\tt TagBase}. The second send is the -! {\tt CHARACTER} data present in {\tt inList\%bf}, and is given MPI -! tag value {\tt TagBase+1}. -! -! !INTERFACE: - - subroutine send_(inList, dest, TagBase, comm, status) -! -! !USES: -! - use m_stdio - use m_die, only : MP_perr_die - - use m_mpif90 - - use m_String, only: String - use m_String, only: String_toChar => toChar - use m_String, only: String_len - use m_String, only: String_clean => clean - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: inList - integer, intent(in) :: dest - integer, intent(in) :: TagBase - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 6Jun01 - J.W. Larson - initial version. -! 13Jun01 - J.W. Larson - Initialize status -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::send_' - - type(String) :: DummStr - integer :: ierr, length - - ! Set status flag to zero (success) if present: - - if(present(status)) status = 0 - - ! Step 1. Extract CHARACTER buffer from inList and store it - ! in String variable DummStr, determine its length. - - call getall_(DummStr, inList) - length = String_len(DummStr) - - ! Step 2. Send Length of String DummStr to process dest. - - call MPI_SEND(length, 1, MP_type(length), dest, TagBase, comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,'(2a,i8)') myname_, & - ':: MPI_SEND(length...) failed. ierror=', ierr - status = ierr - return - else - call MP_perr_die(myname_,':: MPI_SEND(length...) failed',ierr) - endif - endif - - ! Step 3. Send CHARACTER portion of String DummStr - ! to process dest. - - call MPI_SEND(DummStr%c(1), length, MP_CHARACTER, dest, TagBase+1, & - comm, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,'(2a,i8)') myname_, & - ':: MPI_SEND(DummStr%c...) failed. ierror=', ierr - status = ierr - return - else - call MP_perr_die(myname_,':: MPI_SEND(DummStr%c...) failed',ierr) - endif - endif - - end subroutine send_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: recv_ - MPI Point-to-Point Receive for the List Type -! -! !DESCRIPTION: This routine receives the output {\tt List} argument -! {\tt outList} from processor {\tt source} on the communicator associated -! with the fortran 90 {\tt INTEGER} handle {\tt comm}. The message is -! tagged by the input {\tt INTEGER} argument {\tt TagBase}. The success -! (failure) of this operation is reported in the zero (nonzero) optional -! output argument {\tt status}. -! -! {\bf N.B.}: One must avoid assigning elsewhere the MPI tag values -! {\tt TagBase} and {\tt TagBase+1}. This is because {\tt recv\_()} -! performs the receive of the {\tt List} as a pair of operations. The -! first receive is the number of characters in {\tt outList\%bf}, and -! is given MPI tag value {\tt TagBase}. The second receive is the -! {\tt CHARACTER} data present in {\tt outList\%bf}, and is given MPI -! tag value {\tt TagBase+1}. -! -! !INTERFACE: - - subroutine recv_(outList, source, TagBase, comm, status) -! -! !USES: -! - use m_stdio, only : stderr - use m_die, only : MP_perr_die - - use m_mpif90 - - use m_String, only : String - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: source - integer, intent(in) :: TagBase - integer, intent(in) :: comm - -! !OUTPUT PARAMETERS: -! - type(List), intent(out) :: outList - integer, optional, intent(out) :: status - -! !REVISION HISTORY: -! 6Jun01 - J.W. Larson - initial version. -! 11Jun01 - R. Jacob - small bug fix; status in MPI_RECV -! 13Jun01 - J.W. Larson - Initialize status -! (if present). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::recv_' - - integer :: ierr, length - integer :: MPstatus(MP_STATUS_SIZE) - type(String) :: DummStr - - ! Initialize status to zero (success), if present. - - if(present(status)) status = 0 - - ! Step 1. Receive Length of String DummStr from process source. - - call MPI_RECV(length, 1, MP_type(length), source, TagBase, comm, & - MPstatus, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,'(2a,i8)') myname_, & - ':: MPI_RECV(length...) failed. ierror=', ierr - status = ierr - return - else - call MP_perr_die(myname_,':: MPI_RECV(length...) failed',ierr) - endif - endif - - allocate(DummStr%c(length), stat=ierr) - - ! Step 2. Send CHARACTER portion of String DummStr - ! to process dest. - - call MPI_RECV(DummStr%c(1), length, MP_CHARACTER, source, TagBase+1, & - comm, MPstatus, ierr) - if(ierr /= 0) then - if(present(status)) then - write(stderr,'(2a,i8)') myname_, & - ':: MPI_RECV(DummStr%c...) failed. ierror=', ierr - status = ierr - return - else - call MP_perr_die(myname_,':: MPI_RECV(DummStr%c...) failed',ierr) - endif - endif - - ! Step 3. Initialize outList. - - call initStr_(outList, DummStr) - - end subroutine recv_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GetSharedListIndices_ - Index Shared Items for Two Lists -! -! !DESCRIPTION: {\tt GetSharedListIndices\_()} compares two user- -! supplied {\tt List} arguments {\tt List1} and {\tt Lis2} to determine: -! the number of shared items {\tt NumShared}, and arrays of the locations -! {\tt Indices1} and {\tt Indices2} in {\tt List1} and {\tt List2}, -! respectively. -! -! {\bf N.B.:} This routine returns two allocated arrays: {\tt Indices1(:)} -! and {\tt Indices2(:)}. Both of these arrays must be deallocated once they -! are no longer needed. Failure to do this will create a memory leak. -! -! !INTERFACE: - - subroutine GetSharedListIndices_(List1, List2, NumShared, Indices1, & - Indices2) - -! -! !USES: -! - use m_die, only : MP_perr_die, die, warn - - use m_String, only : String - use m_String, only : String_clean => clean - - implicit none - -! !INPUT PARAMETERS: -! - type(List), intent(in) :: List1 - type(List), intent(in) :: List2 - -! !OUTPUT PARAMETERS: -! - integer, intent(out) :: NumShared - - integer,dimension(:), pointer :: Indices1 - integer,dimension(:), pointer :: Indices2 - -! !REVISION HISTORY: -! 7Feb01 - J.W. Larson - initial version -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GetSharedListIndices_' - -! Error flag - integer :: ierr - -! number of items in List1 and List2, respectively: - integer :: nitem1, nitem2 - -! MAXIMUM number of matches possible: - integer :: NumSharedMax - -! Temporary storage for a string tag retrieved from a list: - type(String) :: tag - -! Loop counters / temporary indices: - integer :: n1, n2 - - ! Determine the number of items in each list: - - nitem1 = nitem_(List1) - nitem2 = nitem_(List2) - - ! The maximum number of list item matches possible - ! is the minimum(nitem1,nitem2): - - NumSharedMax = min(nitem1,nitem2) - - ! Allocate sufficient space for the matches we may find: - - allocate(Indices1(NumSharedMax), Indices2(NumSharedMax), stat=ierr) - if(ierr /= 0) call die(myname_,'allocate() Indices1 and 2',ierr) - - ! Initialize the counter for the number of matches found: - - NumShared = 0 - - ! Scan through the two lists. For the sake of speed, loop - ! over the shorter of the two lists... - - if(nitem1 <= nitem2) then ! List1 is shorter--scan it... - - do n1=1,NumSharedMax - - ! Retrieve string tag n1 from List1: - call get_(tag, n1, List1) - - ! Index this tag WRT List2--a nonzero value signifies a match - n2 = indexStr_(List2, tag) - - ! Clear out tag for the next iteration... - call String_clean(tag) - - ! If we have a hit, update NumShared, and load the indices - ! n1 and n2 in Indices1 and Indices2, respectively... - - if((0 < n2) .and. (n2 <= nitem2)) then - NumShared = NumShared + 1 - Indices1(NumShared) = n1 - Indices2(NumShared) = n2 - endif - - end do ! do n1=1,NumSharedMax - - else ! List1 is shorter--scan it... - - do n2=1,NumSharedMax - - ! Retrieve string tag n2 from List2: - call get_(tag, n2, List2) - - ! Index this tag WRT List1--a nonzero value signifies a match - n1 = indexStr_(List1, tag) - - ! Clear out tag for the next iteration... - call String_clean(tag) - - ! If we have a hit, update NumShared, and load the indices - ! n1 and n2 in Indices1 and Indices2, respectively... - - if((0 < n1) .and. (n1 <= nitem1)) then - NumShared = NumShared + 1 - Indices1(NumShared) = n1 - Indices2(NumShared) = n2 - endif - - end do ! do n2=1,NumSharedMax - - endif ! if(nitem1 <= nitem2)... - - end subroutine GetSharedListIndices_ - - end module m_List -!. - - - - - - - - - diff --git a/src/externals/mct/mpeu/m_MergeSorts.F90 b/src/externals/mct/mpeu/m_MergeSorts.F90 deleted file mode 100644 index 6dc4cd6db1d..00000000000 --- a/src/externals/mct/mpeu/m_MergeSorts.F90 +++ /dev/null @@ -1,1469 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_MergeSorts - Tools for incremental indexed-sorting -! -! !DESCRIPTION: -! -! This tool module contains basic sorting procedures, that in -! addition to a couple of standard Fortran 90 statements in the -! array syntex, allow a full range sort or unsort operations. -! The main characteristics of the sorting algorithm used in this -! module are, a) stable, and b) index sorting. -! -! !INTERFACE: - - module m_MergeSorts - implicit none - private ! except - - public :: IndexSet - - public :: IndexSort - - integer,parameter :: I8 = selected_int_kind (13) - - interface IndexSet - module procedure setn_ - module procedure set_ - end interface - interface IndexSort - module procedure iSortn_ - module procedure i8Sortn_ - module procedure rSortn_ - module procedure dSortn_ - module procedure cSortn_ - module procedure iSort_ - module procedure i8Sort_ - module procedure rSort_ - module procedure dSort_ - module procedure cSort_ - module procedure iSort1_ - module procedure i8Sort1_ - module procedure rSort1_ - module procedure dSort1_ - module procedure cSort1_ - end interface - -! !EXAMPLES: -! -! ... -! integer, intent(in) :: No -! type(Observations), dimension(No), intent(inout) :: obs -! -! integer, dimension(No) :: indx ! automatic array -! -! call IndexSet(No,indx) -! call IndexSort(No,indx,obs(1:No)%lev,descend=.false.) -! call IndexSort(No,indx,obs(1:No)%lon,descend=.false.) -! call IndexSort(No,indx,obs(1:No)%lat,descend=.false.) -! call IndexSort(No,indx,obs(1:No)%kt,descend=.false.) -! call IndexSort(No,indx,obs(1:No)%ks,descend=.false.) -! call IndexSort(No,indx,obs(1:No)%kx,descend=.false.) -! call IndexSort(No,indx,obs(1:No)%kr,descend=.false.) -! -! ! Sorting -! obs(1:No) = obs( (/ (indx(i),i=1,No) /) ) -! ... -! ! Unsorting -! obs( (/ (indx(i),i=1,No) /) ) = obs(1:No) -! -! !REVISION HISTORY: -! 23Mar15 - Steve Goldhaber (goldy@ucar.edu) -! . Added interface to perform index sort on 8-byte integers -! 15Mar00 - Jing Guo -! . Added interfaces without the explicit size -! . Added interfaces for two dimensional arrays -! 02Feb99 - Jing Guo - Added if(present(stat)) ... -! 04Jan99 - Jing Guo - revised -! 09Sep97 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*), parameter :: myname='MCT(MPEU)::m_MergeSorts' - -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: setn_ - Initialize an array of data location indices -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine setn_(n,indx) - implicit none - integer, intent(in) :: n ! size of indx(:) - integer, dimension(n), intent(out) :: indx ! indices - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . redefined for the original interface -!EOP ___________________________________________________________________ - - call set_(indx(1:n)) -end subroutine setn_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: set_ - Initialize an array of data location indices -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine set_(indx) - implicit none - integer, dimension(:), intent(out) :: indx ! indices - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . Modified the interface, by removing the explicit size -! 09Sep97 - Jing Guo - initial prototype/prolog/code -! 04Jan99 - Jing Guo - revised prolog format -!EOP ___________________________________________________________________ - - integer :: i - - do i=1,size(indx) - indx(i)=i - end do - -end subroutine set_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: iSortn_ - A stable merge index sorting of INTs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine iSortn_(n,indx,keys,descend,stat) - implicit none - - integer,intent(in) :: n - integer, dimension(n), intent(inout) :: indx - integer, dimension(n), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . redefined for the original interface -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::iSortn_' - - call iSort_(indx(1:n),keys(1:n),descend,stat) -end subroutine iSortn_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: i8Sortn_ - A stable merge index sorting of 8-byte INTs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine i8Sortn_(n,indx,keys,descend,stat) - implicit none - - integer,intent(in) :: n - integer, dimension(n), intent(inout) :: indx - integer(i8), dimension(n), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 23Mar15 - Steve Goldhaber (goldy@ucar.edu) -! . Added interface to perform index sort on 8-byte integers -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . redefined for the original interface -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::i8Sortn_' - - call i8Sort_(indx(1:n),keys(1:n),descend,stat) -end subroutine i8Sortn_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rSortn_ - A stable merge index sorting REALs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine rSortn_(n,indx,keys,descend,stat) - use m_realkinds,only : SP - implicit none - - integer,intent(in) :: n - integer, dimension(n), intent(inout) :: indx - real(SP),dimension(n), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . redefined for the original interface -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::rSortn_' - - call rSort_(indx(1:n),keys(1:n),descend,stat) -end subroutine rSortn_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dSortn_ - A stable merge index sorting DOUBLEs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine dSortn_(n,indx,keys,descend,stat) - use m_realkinds,only : DP - implicit none - - integer,intent(in) :: n - integer, dimension(n), intent(inout) :: indx - real(DP), dimension(n), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . redefined for the original interface -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::dSortn_' - - call dSort_(indx(1:n),keys(1:n),descend,stat) -end subroutine dSortn_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: cSortn_ - A stable merge index sorting of CHAR(*)s. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine cSortn_(n,indx,keys,descend,stat) - implicit none - - integer,intent(in) :: n - integer, dimension(n), intent(inout) :: indx - character(len=*), dimension(n), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . redefined for the original interface -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::cSortn_' - - call cSort_(indx(1:n),keys(1:n),descend,stat) -end subroutine cSortn_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: iSort_ - A stable merge index sorting of INTs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine iSort_(indx,keys,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, dimension(:), intent(inout) :: indx - integer, dimension(:), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . Modified the interface, by removing the explicit size -! 02Feb99 - Jing Guo - Added if(present(stat)) ... -! 04Jan99 - Jing Guo - revised the prolog -! 09Sep97 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::iSort_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(indx(l1)) .ge. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(indx(l1)) .le. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine iSort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: i8Sort_ - A stable merge index sorting of 8-byte INTs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine i8Sort_(indx,keys,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, dimension(:), intent(inout) :: indx - integer(i8), dimension(:), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 17Dec14 - goldy@ucar.edu - Added 8-byte version -! 15Mar00 - Jing Guo -! . Modified the interface, by removing the explicit size -! 02Feb99 - Jing Guo - Added if(present(stat)) ... -! 04Jan99 - Jing Guo - revised the prolog -! 09Sep97 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::i8Sort_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(indx(l1)) .ge. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(indx(l1)) .le. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine i8Sort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rSort_ - A stable merge index sorting REALs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine rSort_(indx,keys,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - use m_realkinds,only : SP - implicit none - - integer, dimension(:), intent(inout) :: indx - real(SP),dimension(:), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . Modified the interface, by removing the explicit size -! 02Feb99 - Jing Guo - Added if(present(stat)) ... -! 04Jan99 - Jing Guo - revised the prolog -! 09Sep97 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::rSort_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(indx(l1)) .ge. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(indx(l1)) .le. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine rSort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dSort_ - A stable merge index sorting DOUBLEs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine dSort_(indx,keys,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - use m_realkinds,only : DP - implicit none - - integer, dimension(:), intent(inout) :: indx - real(DP), dimension(:), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . Modified the interface, by removing the explicit size -! 02Feb99 - Jing Guo - Added if(present(stat)) ... -! 04Jan99 - Jing Guo - revised the prolog -! 09Sep97 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::dSort_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(indx(l1)) .ge. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(indx(l1)) .le. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine dSort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: cSort_ - A stable merge index sorting of CHAR(*)s. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine cSort_(indx,keys,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, dimension(:), intent(inout) :: indx - character(len=*), dimension(:), intent(in) :: keys - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . Modified the interface, by removing the explicit size -! 02Feb99 - Jing Guo - Added if(present(stat)) ... -! 04Jan99 - Jing Guo - revised the prolog -! 09Sep97 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::cSort_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(indx(l1)) .ge. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(indx(l1)) .le. keys(indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine cSort_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: iSort1_ - A stable merge index sorting of INTs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine iSort1_(indx,keys,ikey,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, dimension(:), intent(inout) :: indx - integer, dimension(:,:), intent(in) :: keys - integer,intent(in) :: ikey - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . Copied code from iSort_ -! . Extended the interface and the algorithm to handle -! 2-d arrays with an index. -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::i8Sort1_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(ikey,indx(l1)) .ge. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(ikey,indx(l1)) .le. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine iSort1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: i8Sort1_ - A stable merge index sorting of 8-byte INTs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine i8Sort1_(indx,keys,ikey,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, dimension(:), intent(inout) :: indx - integer(i8), dimension(:,:), intent(in) :: keys - integer,intent(in) :: ikey - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 17Dec14 - goldy@ucar.edu - Added 8-byte version -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . Copied code from iSort_ -! . Extended the interface and the algorithm to handle -! 2-d arrays with an index. -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::i8Sort1_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(ikey,indx(l1)) .ge. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(ikey,indx(l1)) .le. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine i8Sort1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rSort1_ - A stable merge index sorting REALs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine rSort1_(indx,keys,ikey,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - use m_realkinds,only : SP - implicit none - - integer, dimension(:), intent(inout) :: indx - real(SP),dimension(:,:), intent(in) :: keys - integer,intent(in) :: ikey - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . Copied code from rSort_ -! . Extended the interface and the algorithm to handle -! 2-d arrays with an index. -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::rSort1_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(ikey,indx(l1)) .ge. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(ikey,indx(l1)) .le. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine rSort1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dSort1_ - A stable merge index sorting DOUBLEs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine dSort1_(indx,keys,ikey,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - use m_realkinds,only : DP - implicit none - - integer, dimension(:), intent(inout) :: indx - real(DP), dimension(:,:), intent(in) :: keys - integer,intent(in) :: ikey - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . Copied code from dSort_ -! . Extended the interface and the algorithm to handle -! 2-d arrays with an index. -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::dSort1_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(ikey,indx(l1)) .ge. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(ikey,indx(l1)) .le. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine dSort1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: cSort1_ - A stable merge index sorting of CHAR(*)s. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine cSort1_(indx,keys,ikey,descend,stat) - use m_stdio, only : stderr - use m_die, only : die - implicit none - - integer, dimension(:), intent(inout) :: indx - character(len=*), dimension(:,:), intent(in) :: keys - integer,intent(in) :: ikey - logical, optional, intent(in) :: descend - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . initial prototype/prolog/code -! . Copied code from cSort_ -! . Extended the interface and the algorithm to handle -! 2-d arrays with an index. -!EOP ___________________________________________________________________ - - logical :: dsnd - integer :: ierr - integer, dimension(:),allocatable :: mtmp - integer :: n - - character(len=*),parameter :: myname_=myname//'::cSort1_' - - if(present(stat)) stat=0 - - n=size(indx) - - allocate(mtmp(n),stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(mtmp(:)) error, stat =',ierr - if(.not.present(stat)) call die(myname_) - stat=ierr - return - endif - - dsnd=.false. - if(present(descend)) dsnd=descend - - call MergeSort_() - - deallocate(mtmp) - -contains -subroutine MergeSort_() - implicit none - integer :: mstep,lstep - integer :: lb,lm,le - - mstep=1 - do while(mstep < n) - lstep=mstep*2 - - lb=1 - do while(lb < n) - lm=lb+mstep - le=min(lm-1+mstep,n) - - call merge_(lb,lm,le) - indx(lb:le)=mtmp(lb:le) - lb=le+1 - end do - - mstep=lstep - end do -end subroutine MergeSort_ - -subroutine merge_(lb,lm,le) - integer,intent(in) :: lb,lm,le - integer :: l1,l2,l - - l1=lb - l2=lm - do l=lb,le - if(l2.gt.le) then - mtmp(l)=indx(l1) - l1=l1+1 - elseif(l1.ge.lm) then - mtmp(l)=indx(l2) - l2=l2+1 - else - if(dsnd) then - if(keys(ikey,indx(l1)) .ge. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - else - if(keys(ikey,indx(l1)) .le. keys(ikey,indx(l2))) then - mtmp(l)=indx(l1) - l1=l1+1 - else - mtmp(l)=indx(l2) - l2=l2+1 - endif - endif - endif - end do -end subroutine merge_ - -end subroutine cSort1_ -!----------------------------------------------------------------------- -end module m_MergeSorts -!. diff --git a/src/externals/mct/mpeu/m_Permuter.F90 b/src/externals/mct/mpeu/m_Permuter.F90 deleted file mode 100644 index 202fc1de751..00000000000 --- a/src/externals/mct/mpeu/m_Permuter.F90 +++ /dev/null @@ -1,1284 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_Permuter - permute/unpermute -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_Permuter - implicit none - private ! except - - public :: permute - public :: unpermute - - interface permute; module procedure & - permutei_, & ! integer in place - permuteio_, & ! integer with an output - permutei1_, & ! integer in place - permuteio1_, & ! integer with an output - permuter_, & ! real in place - permutero_, & ! real with an output - permuter1_, & ! real in place - permutero1_, & ! real with an output - permuted_, & ! dble in place - permutedo_, & ! dble with an output - permuted1_, & ! dble in place - permutedo1_, & ! dble with an output - permutel_, & ! logical in place - permutelo_, & ! logical with an output - permutel1_, & ! logical in place - permutelo1_ ! logical with an output - end interface - - interface unpermute; module procedure & - unpermutei_, & ! integer in place - unpermuteio_, & ! integer with an output - unpermutei1_, & ! integer in place - unpermuteio1_, & ! integer with an output - unpermuter_, & ! real in place - unpermutero_, & ! real with an output - unpermuter1_, & ! real in place - unpermutero1_, & ! real with an output - unpermuted_, & ! dble in place - unpermutedo_, & ! dble with an output - unpermuted1_, & ! dble in place - unpermutedo1_, & ! dble with an output - unpermutel_, & ! logical in place - unpermutelo_, & ! logical with an output - unpermutel1_, & ! logical in place - unpermutelo1_ ! logical with an output - end interface - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_Permuter' - -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutei_ - permute an integer array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutei_(ary,indx,n) - use m_die - implicit none - integer,dimension(:),intent(inout) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutei_' - - integer,allocatable,dimension(:) :: wk - integer :: i,ier - - allocate(wk(n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call permuteio_(wk,ary,indx,n) - - do i=1,n - ary(i)=wk(i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine permutei_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permuteio_ - permute an integer array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permuteio_(aout,ary,indx,n) - implicit none - integer,dimension(:),intent(inout) :: aout - integer,dimension(:),intent(in ) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permuteio_' - - integer :: i,l - - do i=1,n - l=indx(i) - aout(i)=ary(l) - end do - -end subroutine permuteio_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutei_ - unpermute a _permuted_ integer array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutei_(ary,indx,n) - use m_die - implicit none - integer,dimension(:),intent(inout) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutei_' - - integer,allocatable,dimension(:) :: wk - integer :: i,ier - - allocate(wk(n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call unpermuteio_(wk,ary,indx,n) - - do i=1,n - ary(i)=wk(i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine unpermutei_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermuteio_ - unpermute a _permuted_ integer array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermuteio_(aout,ary,indx,n) - implicit none - integer,dimension(:),intent(inout) :: aout - integer,dimension(:),intent(in) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermuteio_' - - integer :: i,l - - do i=1,n - l=indx(i) - aout(l)=ary(i) - end do - -end subroutine unpermuteio_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permuter_ - permute a real array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permuter_(ary,indx,n) - use m_die - use m_realkinds,only : SP - implicit none - real(SP),dimension(:),intent(inout) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permuter_' - - real(kind(ary)),allocatable,dimension(:) :: wk - integer :: i,ier - - allocate(wk(n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call permutero_(wk,ary,indx,n) - - do i=1,n - ary(i)=wk(i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine permuter_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutero_ - permute a real array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutero_(aout,ary,indx,n) - use m_realkinds,only : SP - implicit none - real(SP),dimension(:),intent(inout) :: aout - real(SP),dimension(:),intent(in) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutero_' - - integer :: i,l - - do i=1,n - l=indx(i) - aout(i)=ary(l) - end do - -end subroutine permutero_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermuter_ - unpermute a _permuted_ real array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermuter_(ary,indx,n) - use m_die - use m_realkinds,only : SP - implicit none - real(SP),dimension(:),intent(inout) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermuter_' - - real(kind(ary)),allocatable,dimension(:) :: wk - integer :: i,ier - - allocate(wk(n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call unpermutero_(wk,ary,indx,n) - - do i=1,n - ary(i)=wk(i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine unpermuter_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutero_ - unpermute a _permuted_ real array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutero_(aout,ary,indx,n) - use m_realkinds,only : SP - implicit none - real(SP),dimension(:),intent(inout) :: aout - real(SP),dimension(:),intent(in) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutero_' - - integer :: i,l - - do i=1,n - l=indx(i) - aout(l)=ary(i) - end do - -end subroutine unpermutero_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permuted_ - permute a double precision array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permuted_(ary,indx,n) - use m_die - use m_realkinds,only : DP - implicit none - real(DP),dimension(:),intent(inout) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permuted_' - - real(kind(ary)),allocatable,dimension(:) :: wk - integer :: i,ier - - allocate(wk(n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call permutedo_(wk,ary,indx,n) - - do i=1,n - ary(i)=wk(i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine permuted_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutedo_ - permute a double precision array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutedo_(aout,ary,indx,n) - use m_realkinds,only : DP - implicit none - real(DP),dimension(:),intent(inout) :: aout - real(DP),dimension(:),intent(in) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutedo_' - - integer :: i,l - - do i=1,n - l=indx(i) - aout(i)=ary(l) - end do - -end subroutine permutedo_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermuted_ - unpermute a double precision array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermuted_(ary,indx,n) - use m_die - use m_realkinds,only : DP - implicit none - real(DP),dimension(:),intent(inout) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermuted_' - - real(kind(ary)),allocatable,dimension(:) :: wk - integer :: i,ier - - allocate(wk(n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call unpermutedo_(wk,ary,indx,n) - - do i=1,n - ary(i)=wk(i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine unpermuted_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutedo_ - unpermute a double precision array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutedo_(aout,ary,indx,n) - use m_realkinds,only : DP - implicit none - real(DP),dimension(:),intent(inout) :: aout - real(DP),dimension(:),intent(in) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutedo_' - - integer :: i,l - - do i=1,n - l=indx(i) - aout(l)=ary(i) - end do - -end subroutine unpermutedo_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutel_ - permute a real array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutel_(ary,indx,n) - use m_die - implicit none - logical,dimension(:),intent(inout) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutel_' - - logical,allocatable,dimension(:) :: wk - integer :: i,ier - - allocate(wk(n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call permutelo_(wk,ary,indx,n) - - do i=1,n - ary(i)=wk(i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine permutel_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutelo_ - permute a real array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutelo_(aout,ary,indx,n) - implicit none - logical,dimension(:),intent(inout) :: aout - logical,dimension(:),intent(in) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutelo_' - - integer :: i,l - - do i=1,n - l=indx(i) - aout(i)=ary(l) - end do - -end subroutine permutelo_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutel_ - unpermute a _permuted_ logical array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutel_(ary,indx,n) - use m_die - implicit none - logical,dimension(:),intent(inout) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutel_' - - logical,allocatable,dimension(:) :: wk - integer :: i,ier - - allocate(wk(n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call unpermutelo_(wk,ary,indx,n) - - do i=1,n - ary(i)=wk(i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine unpermutel_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutelo_ - unpermute a _permuted_ logical array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutelo_(aout,ary,indx,n) - implicit none - logical,dimension(:),intent(inout) :: aout - logical,dimension(:),intent(in) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutelo_' - - integer :: i,l - - do i=1,n - l=indx(i) - aout(l)=ary(i) - end do - -end subroutine unpermutelo_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutei1_ - permute an integer array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutei1_(ary,indx,n) - use m_die - implicit none - integer,dimension(:,:),intent(inout) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutei1_' - - integer,allocatable,dimension(:,:) :: wk - integer :: i,l,ier - - l=size(ary,1) - allocate(wk(l,n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call permuteio1_(wk,ary,indx,n) - - do i=1,n - ary(:,i)=wk(:,i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine permutei1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permuteio1_ - permute an integer array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permuteio1_(aout,ary,indx,n) - implicit none - integer,dimension(:,:),intent(inout) :: aout - integer,dimension(:,:),intent(in ) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permuteio1_' - - integer :: i,l,m - - m=min(size(aout,1),size(ary,1)) - do i=1,n - l=indx(i) - aout(1:m,i)=ary(1:m,l) - end do - -end subroutine permuteio1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutei1_ - unpermute a _permuted_ integer array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutei1_(ary,indx,n) - use m_die - implicit none - integer,dimension(:,:),intent(inout) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutei1_' - - integer,allocatable,dimension(:,:) :: wk - integer :: i,l,ier - - l=size(ary,1) - allocate(wk(l,n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call unpermuteio1_(wk,ary,indx,n) - - do i=1,n - ary(:,i)=wk(:,i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine unpermutei1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermuteio1_ - unpermute a _permuted_ integer array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermuteio1_(aout,ary,indx,n) - implicit none - integer,dimension(:,:),intent(inout) :: aout - integer,dimension(:,:),intent(in) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermuteio1_' - - integer :: i,l,m - - m=min(size(aout,1),size(ary,1)) - do i=1,n - l=indx(i) - aout(1:m,l)=ary(1:m,i) - end do - -end subroutine unpermuteio1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permuter1_ - permute a real array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permuter1_(ary,indx,n) - use m_die - use m_realkinds,only : SP - implicit none - real(SP),dimension(:,:),intent(inout) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permuter1_' - - real(kind(ary)),allocatable,dimension(:,:) :: wk - integer :: i,l,ier - - l=size(ary,1) - allocate(wk(l,n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call permutero1_(wk,ary,indx,n) - - do i=1,n - ary(:,i)=wk(:,i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine permuter1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutero1_ - permute a real array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutero1_(aout,ary,indx,n) - use m_realkinds,only : SP - implicit none - real(SP),dimension(:,:),intent(inout) :: aout - real(SP),dimension(:,:),intent(in) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutero1_' - - integer :: i,l,m - - m=min(size(aout,1),size(ary,1)) - do i=1,n - l=indx(i) - aout(1:m,i)=ary(1:m,l) - end do - -end subroutine permutero1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermuter1_ - unpermute a _permuted_ real array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermuter1_(ary,indx,n) - use m_die - use m_realkinds,only : SP - implicit none - real(SP),dimension(:,:),intent(inout) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermuter1_' - - real(kind(ary)),allocatable,dimension(:,:) :: wk - integer :: i,l,ier - - l=size(ary,1) - allocate(wk(l,n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call unpermutero1_(wk,ary,indx,n) - - do i=1,n - ary(:,i)=wk(:,i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine unpermuter1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutero1_ - unpermute a _permuted_ real array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutero1_(aout,ary,indx,n) - use m_realkinds,only : SP - implicit none - real(SP),dimension(:,:),intent(inout) :: aout - real(SP),dimension(:,:),intent(in) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutero1_' - - integer :: i,l,m - - m=min(size(aout,1),size(ary,1)) - do i=1,n - l=indx(i) - aout(1:m,l)=ary(1:m,i) - end do - -end subroutine unpermutero1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permuted1_ - permute a double precision array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permuted1_(ary,indx,n) - use m_die - use m_realkinds,only : DP - implicit none - real(DP),dimension(:,:),intent(inout) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permuted1_' - - real(kind(ary)),allocatable,dimension(:,:) :: wk - integer :: i,l,ier - - l=size(ary,1) - allocate(wk(l,n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call permutedo1_(wk,ary,indx,n) - - do i=1,n - ary(:,i)=wk(:,i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine permuted1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutedo1_ - permute a double precision array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutedo1_(aout,ary,indx,n) - use m_realkinds,only : DP - implicit none - real(DP),dimension(:,:),intent(inout) :: aout - real(DP),dimension(:,:),intent(in) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutedo1_' - - integer :: i,l,m - - m=min(size(aout,1),size(ary,1)) - do i=1,n - l=indx(i) - aout(1:m,i)=ary(1:m,l) - end do - -end subroutine permutedo1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermuted1_ - unpermute a double precision array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermuted1_(ary,indx,n) - use m_die - use m_realkinds,only : DP - implicit none - real(DP),dimension(:,:),intent(inout) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermuted1_' - - real(kind(ary)),allocatable,dimension(:,:) :: wk - integer :: i,l,ier - - l=size(ary,1) - allocate(wk(l,n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call unpermutedo1_(wk,ary,indx,n) - - do i=1,n - ary(:,i)=wk(:,i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine unpermuted1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutedo1_ - unpermute a double precision array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutedo1_(aout,ary,indx,n) - use m_realkinds,only : DP - implicit none - real(DP),dimension(:,:),intent(inout) :: aout - real(DP),dimension(:,:),intent(in) :: ary - integer ,dimension(:),intent(in) :: indx - integer , intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutedo1_' - - integer :: i,l,m - - m=min(size(aout,1),size(ary,1)) - do i=1,n - l=indx(i) - aout(1:m,l)=ary(1:m,i) - end do - -end subroutine unpermutedo1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutel1_ - permute a real array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutel1_(ary,indx,n) - use m_die - implicit none - logical,dimension(:,:),intent(inout) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutel1_' - - logical,allocatable,dimension(:,:) :: wk - integer :: i,l,ier - - l=size(ary,1) - allocate(wk(l,n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call permutelo1_(wk,ary,indx,n) - - do i=1,n - ary(:,i)=wk(:,i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine permutel1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: permutelo1_ - permute a real array according to indx[] -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine permutelo1_(aout,ary,indx,n) - implicit none - logical,dimension(:,:),intent(inout) :: aout - logical,dimension(:,:),intent(in) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::permutelo1_' - - integer :: i,l,m - - m=min(size(aout,1),size(ary,1)) - do i=1,n - l=indx(i) - aout(1:m,i)=ary(1:m,l) - end do - -end subroutine permutelo1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutel1_ - unpermute a _permuted_ logical array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutel1_(ary,indx,n) - use m_die - implicit none - logical,dimension(:,:),intent(inout) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutel1_' - - logical,allocatable,dimension(:,:) :: wk - integer :: i,l,ier - - l=size(ary,1) - allocate(wk(l,n),stat=ier) - if(ier/=0) call perr_die(myname_,'allocate()',ier) - - call unpermutelo1_(wk,ary,indx,n) - - do i=1,n - ary(:,i)=wk(:,i) - end do - - deallocate(wk,stat=ier) - if(ier/=0) call perr_die(myname_,'deallocate()',ier) - -end subroutine unpermutel1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: unpermutelo1_ - unpermute a _permuted_ logical array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine unpermutelo1_(aout,ary,indx,n) - implicit none - logical,dimension(:,:),intent(inout) :: aout - logical,dimension(:,:),intent(in) :: ary - integer,dimension(:),intent(in) :: indx - integer, intent(in) :: n - -! !REVISION HISTORY: -! 25Aug99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::unpermutelo1_' - - integer :: i,l,m - - m=min(size(aout,1),size(ary,1)) - do i=1,n - l=indx(i) - aout(1:m,l)=ary(1:m,i) - end do - -end subroutine unpermutelo1_ - -end module m_Permuter diff --git a/src/externals/mct/mpeu/m_SortingTools.F90 b/src/externals/mct/mpeu/m_SortingTools.F90 deleted file mode 100644 index 2f7399a45fa..00000000000 --- a/src/externals/mct/mpeu/m_SortingTools.F90 +++ /dev/null @@ -1,96 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_SortingTools - A collection of different sorting tools -! -! !DESCRIPTION: -! -! This module contains a collection of sorting utilities. The -! utilities are accessed through three generic interfaces, IndexSet(), -! IndexSort(), and IndexBin(). -! -! Note that, a version of IndexBin() for real arguments is not -! implemented due to the difficulty of comparing two real values as -! being equal. For example, a bin for real values may be specified -! as a single number, a range of two numbers, a number with an -! absolute error-bar, or a number with a relative error-bar. -! -! In general, one may have to map both keys(:) and bins(:) to -! integer indices by the a given rule, then use the integer version -! of IndexBin() with the two integer index arrays to do the sorting. -! This mapping rule, however, is application dependent. -! -! Also note that, in principle, it is possible to use both -! IndexSort() and IndexBin() in the same sorting task. -! -! !INTERFACE: - - module m_SortingTools - - use m_MergeSorts !only : IndexSet,IndexSort - use m_IndexBin_integer !only : IndexBin - use m_IndexBin_char !only : IndexBin - use m_IndexBin_logical !only : IndexBin - use m_rankMerge !only : RankSet,RankMerge,IndexedRankMerge - use m_Permuter !only : Permute, Unpermute - - implicit none - - private ! except - - public :: IndexSet ! define an initial list of indices - public :: IndexSort ! index for a new rank out of the old - public :: IndexBin ! index for sorting bins - public :: RankSet ! define an initial list of ranks - public :: RankMerge ! merge two arrays by re-ranking - public :: IndexedRankMerge ! index-merge two array segments - public :: Permute ! permute array entries - public :: Unpermute ! invert permutation - -! !EXAMPLES: -! -! - An example of using IndexSet()/IndexSort() in combination with -! the convenience of the Fortran 90 array syntex can be found in the -! prolog of m_MergeSorts. -! -! - An example of using IndexSet()/IndexBin(): Copying all "good" -! data to another array. -! -! integer :: indx(n) -! call IndexSet(n,indx) -! call IndexBin(n,indx,allObs(:)%qcflag,GOOD,ln0=ln_GOOD) -! -! ! Copy all "good" data to another array -! goodObs(1:ln_GOOD)=allObs( indx(1:ln_GOOD) ) -! -! ! Refill all "good" data back to their original places -! allObs( indx(1:ln_GOOD) ) = goodObs(1:ln_GOOD) -! -! - Similarily, multiple keys may be used in an IndexBin() call -! to selectively sort the data. The following code will move data -! with kt = kt_Us,kt_U,kt_Vs,kt_V up to the front: -! -! call IndexBin(n,indx,allObs(:)%kt,(/kt_Us,kt_U,kt_Vs,kt_V/)) -! allObs(1:n) = allObs( indx(1:n) ) -! -! - Additional applications can also be implemented with other -! argument combinations. -! -! !REVISION HISTORY: -! 15Mar00 - Jing Guo -! . Added m_rankMerge module interface -! 20Apr99 - Jing Guo -! - Commented "only" in use m_IndexBin_xxx to avoid an -! apperent compiler bug on DEC/OSF1 -! 17Feb99 - Jing Guo - initial prototype/prolog/code -! 19Oct00 - J.W. Larson - added Permuter and -! Unpermuter to list of public functions. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_SortingTools' - -end module m_SortingTools diff --git a/src/externals/mct/mpeu/m_StrTemplate.F90 b/src/externals/mct/mpeu/m_StrTemplate.F90 deleted file mode 100644 index 979e9800ac3..00000000000 --- a/src/externals/mct/mpeu/m_StrTemplate.F90 +++ /dev/null @@ -1,454 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_StrTemplate - A template formatting a string with variables -! -! !DESCRIPTION: -! -! A template resolver formatting a string with a string variable -! and time variables. The format descriptors are similar to those -! used in the GrADS. -! -! "%y4" substitute with a 4 digit year -! "%y2" a 2 digit year -! "%m1" a 1 or 2 digit month -! "%m2" a 2 digit month -! "%mc" a 3 letter month in lower cases -! "%Mc" a 3 letter month with a leading letter in upper case -! "%MC" a 3 letter month in upper cases -! "%d1" a 1 or 2 digit day -! "%d2" a 2 digit day -! "%h1" a 1 or 2 digit hour -! "%h2" a 2 digit hour -! "%h3" a 3 digit hour (?) -! "%n2" a 2 digit minute -! "%s" a string variable -! "%%" a "%" -! -! !INTERFACE: - - module m_StrTemplate - implicit none - private ! except - - public :: StrTemplate ! Substitute variables in a template - - interface StrTemplate - module procedure strTemplate_ - end interface - -! !REVISION HISTORY: -! 01Jun99 - Jing Guo -! - initial prototype/prolog/code -! 19Jan01 - Jay Larson - removed numerous -! double-quote characters appearing inside single-quote -! blocks. This was done to comply with pgf90. Also, -! numerous double-quote characters were removed from -! within comment blocks because pgf90 kept trying to -! interpret them (spooky). -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_StrTemplate' - - character(len=3),parameter,dimension(12) :: mon_lc = (/ & - 'jan','feb','mar','apr','may','jun', & - 'jul','aug','sep','oct','nov','dec' /) - - character(len=3),parameter,dimension(12) :: mon_wd = (/ & - 'Jan','Feb','Mar','Apr','May','Jun', & - 'Jul','Aug','Sep','Oct','Nov','Dec' /) - - character(len=3),parameter,dimension(12) :: mon_uc = (/ & - 'JAN','FEB','MAR','APR','MAY','JUN', & - 'JUL','AUG','SEP','OCT','NOV','DEC' /) - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: strTemplate_ - expanding a format template to a string -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine strTemplate_(str,tmpl,class,xid,nymd,nhms,stat) - use m_chars, only : uppercase - use m_stdio, only : stderr - use m_die, only : die - implicit none - - character(len=*),intent(out) :: str ! the output - - character(len=*),intent(in ) :: tmpl ! a "format" - - character(len=*),intent(in ),optional :: class - ! choose a UNIX or a GrADS(defulat) type format - - character(len=*),intent(in ),optional :: xid - ! a string substituting a '%s'. Trailing - ! spaces will be ignored - - integer,intent(in ),optional :: nymd - ! yyyymmdd, substituting '%y4', '%y2', '%m1', - ! '%m2', '%mc', '%Mc', and '%MC' - - integer,intent(in ),optional :: nhms - ! hhmmss, substituting '%h1', '%h2', '%h3', - ! and '%n2' - - integer,intent(out),optional :: stat - ! error code - -! !REVISION HISTORY: -! 03Jun99 - Jing Guo -! - initial prototype/prolog/code -! 08Jan03 - R. Jacob Small change to get -! around IBM compiler bug. Cant have character valued functions -! in case statements. Fix found by Everest Ong. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::strTemplate_' - character(len=16) :: tmpl_class - character(len=16) :: tmp_upper - - tmpl_class="GX" - if(present(class)) tmpl_class=class - - tmp_upper = uppercase(tmpl_class) - select case(tmp_upper) - - case("GX","GRADS") - call GX_(str,tmpl,xid,nymd,nhms,stat) - - !case("UX","UNIX") ! yet to be implemented - ! call UX_(str,tmpl,xid,nymd,nhms,stat) - - case default - write(stderr,'(4a)') myname_,': unknown class: ', & - trim(tmpl_class),'.' - if(.not.present(stat)) call die(myname_) - stat=-1 - return - end select - -end subroutine strTemplate_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GX_ - evaluate a GrADS style string template -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine GX_(str,tmpl,xid,nymd,nhms,stat) - use m_stdio,only : stderr - use m_die, only : die,perr - implicit none - character(len=*),intent(out) :: str - character(len=*),intent(in ) :: tmpl - character(len=*),optional,intent(in) :: xid - integer,optional,intent(in) :: nymd - integer,optional,intent(in) :: nhms - integer,optional,intent(out) :: stat - -! !REVISION HISTORY: -! 01Jun99 - Jing Guo -! - initial prototype/prolog/code -! 19Jan01 - Jay Larson - added -! variable c1c2, to store c1//c2, which pgf90 -! would not allow as an argument to the 'select case' -! statement. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GX_' - - integer :: iy4,iy2,imo,idy - integer :: ihr,imn - integer :: i,i1,i2,m,k - integer :: ln_tmpl,ln_str - integer :: istp,kstp - - character(len=1) :: c0,c1,c2 - character(len=2) :: c1c2 - character(len=4) :: sbuf -!________________________________________ - ! Determine iyr, imo, and idy - iy4=-1 - iy2=-1 - imo=-1 - idy=-1 - if(present(nymd)) then - if(nymd < 0) then - call perr(myname_,'nymd < 0',nymd) - if(.not.present(stat)) call die(myname_) - stat=1 - return - endif - - i=nymd - iy4=i/10000 - iy2=mod(iy4,100) - i=mod(i,10000) - imo=i/100 - i=mod(i,100) - idy=i - endif -!________________________________________ - ! Determine ihr and imn - ihr=-1 - imn=-1 - if(present(nhms)) then - if(nhms < 0) then - call perr(myname_,'nhms < 0',nhms) - if(.not.present(stat)) call die(myname_) - stat=1 - return - endif - - i=nhms - ihr=i/10000 - i=mod(i,10000) - imn=i/100 - endif -!________________________________________ - - ln_tmpl=len_trim(tmpl) ! size of the format template - ln_str =len(str) ! size of the output string -!________________________________________ - - if(present(stat)) stat=0 - -str="" - -i=0; istp=1 -k=1; kstp=1 - -do while( i+istp <= ln_tmpl ) ! A loop over all tokens in (tmpl) - - if(k>ln_Str) exit ! truncate the output here. - - i=i+istp - c0=tmpl(i:i) - - select case(c0) - case ("%") - !________________________________________ - - c1="" - i1=i+1 - if(i1 <= ln_Tmpl) c1=tmpl(i1:i1) - !________________________________________ - - select case(c1) - - case("s") - if(.not.present(xid)) then - write(stderr,'(2a)') myname_, & - ': optional argument expected, "xid="' - if(.not.present(stat)) call die(myname_) - stat=1 - return - endif - - istp=2 - m=min(k+len_trim(xid)-1,ln_str) - str(k:m)=xid - k=m+1 - cycle - - case("%") - - istp=2 - str(k:k)="%" - k=k+1 ! kstp=1 - cycle - - case default - - c2="" - i2=i+2 - if(i2 <= ln_Tmpl) c2=tmpl(i2:i2) - !________________________________________ - - c1c2 = c1 // c2 - select case(c1c2) - - case("y4","y2","m1","m2","mc","Mc","MC","d1","d2") - if(.not.present(nymd)) then - write(stderr,'(2a)') myname_, & - ': optional argument expected, "nymd="' - if(.not.present(stat)) call die(myname_) - stat=1 - return - endif - istp=3 - - case("h1","h2","h3","n2") - if(.not.present(nhms)) then - write(stderr,'(2a)') myname_, & - ': optional argument expected, "nhms="' - if(.not.present(stat)) call die(myname_) - stat=1 - return - endif - istp=3 - - case default - - write(stderr,'(4a)') myname_, & - ': invalid template entry: ',trim(tmpl(i:)),'.' - if(.not.present(stat)) call die(myname_) - stat=2 - return - - end select ! case(c1//c2) - end select ! case(c1) - !________________________________________ - - select case(c1) - - case("y") - select case(c2) - case("2") - write(sbuf,'(i2.2)') iy2 - kstp=2 - case("4") - write(sbuf,'(i4.4)') iy4 - kstp=4 - case default - write(stderr,'(4a)') myname_, & - ': invalid template entry: ',trim(tmpl(i:)),'.' - if(.not.present(stat)) call die(myname_) - stat=2 - return - end select - - case("m") - select case(c2) - case("1") - if(imo < 10) then - write(sbuf,'(i1)') imo - kstp=1 - else - write(sbuf,'(i2)') imo - kstp=2 - endif - case("2") - write(sbuf,'(i2.2)') imo - kstp=2 - case("c") - sbuf=mon_lc(imo) - kstp=3 - case default - write(stderr,'(4a)') myname_, & - ': invalid template entry: ',trim(tmpl(i:)),'.' - if(.not.present(stat)) call die(myname_) - stat=2 - return - end select - - case("M") - select case(c2) - case("c") - sbuf=mon_wd(imo) - kstp=3 - case("C") - sbuf=mon_uc(imo) - kstp=3 - case default - write(stderr,'(4a)') myname_, & - ': invalid template entry: ',trim(tmpl(i:)),'.' - if(.not.present(stat)) call die(myname_) - stat=2 - return - end select - - case("d") - select case(c2) - case("1") - if(idy < 10) then - write(sbuf,'(i1)') idy - kstp=1 - else - write(sbuf,'(i2)') idy - kstp=2 - endif - case("2") - write(sbuf,'(i2.2)') idy - kstp=2 - case default - write(stderr,'(4a)') myname_, & - ': invalid template entry: ',trim(tmpl(i:)),'.' - if(.not.present(stat)) call die(myname_) - stat=2 - return - end select - - case("h") - select case(c2) - case("1") - if(ihr < 10) then - write(sbuf,'(i1)') ihr - kstp=1 - else - write(sbuf,'(i2)') ihr - kstp=2 - endif - case("2") - write(sbuf,'(i2.2)') ihr - kstp=2 - case("3") - write(sbuf,'(i3.3)') ihr - kstp=3 - case default - write(stderr,'(4a)') myname_, & - ': invalid template entry: ',trim(tmpl(i:)),'.' - if(.not.present(stat)) call die(myname_) - stat=2 - return - end select - - case("n") - select case(c2) - case("2") - write(sbuf,'(i2.2)') imn - kstp=2 - case default - write(stderr,'(4a)') myname_, & - ': invalid template entry: ',trim(tmpl(i:)),'.' - if(.not.present(stat)) call die(myname_) - stat=2 - return - end select - - case default - write(stderr,'(4a)') myname_, & - ': invalid template entry: ',trim(tmpl(i:)),'.' - if(.not.present(stat)) call die(myname_) - stat=2 - return - end select ! case(c1) - - m=min(k+kstp-1,ln_Str) - str(k:m)=sbuf - k=m+1 - - case default - - istp=1 - str(k:k)=tmpl(i:i) - k=k+1 - - end select ! case(c0) -end do - -end subroutine GX_ -end module m_StrTemplate diff --git a/src/externals/mct/mpeu/m_String.F90 b/src/externals/mct/mpeu/m_String.F90 deleted file mode 100644 index 2b8bc42e700..00000000000 --- a/src/externals/mct/mpeu/m_String.F90 +++ /dev/null @@ -1,831 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_String - The String Datatype -! -! !DESCRIPTION: -! The {\tt String} datatype is an encapsulated pointer to a one-dimensional -! array of single characters. This allows one to define variable-length -! strings, and arrays of variable-length strings. -! -! !INTERFACE: - - module m_String - -! !USES: -! No external modules are used in the declaration section of this module. - - implicit none - - private ! except - -! !PUBLIC TYPES: - - public :: String ! The class data structure - - Type String -#ifdef SEQUENCE - sequence -#endif - character(len=1),dimension(:),pointer :: c - End Type String - -! !PUBLIC MEMBER FUNCTIONS: - - public :: toChar - public :: char ! convert to a CHARACTER(*) - - public :: String_init - public :: init ! set a CHARACTER(*) type to a String - - public :: String_clean - public :: clean ! Deallocate memory occupied by a String - - public :: String_len - public :: len ! length of a String - - public :: String_bcast - public :: bcast ! Broadcast a String - - public :: String_mci ! Track memory used to store a String - public :: String_mco - - public :: ptr_chars ! Assign a pointer to a String's - ! character buffer - - interface char; module procedure & - str2ch0_, & - ch12ch0_ - end interface - - interface toChar; module procedure & - str2ch0_, & - ch12ch0_ - end interface - - interface String_init; module procedure & - initc_, & - initc1_, & - inits_ - end interface - - interface init; module procedure & - initc_, & - initc1_, & - inits_ - end interface - - interface String_clean; module procedure clean_; end interface - interface clean; module procedure clean_; end interface - interface String_len; module procedure len_; end interface - interface len; module procedure len_; end interface - interface String_bcast; module procedure bcast_; end interface - interface bcast; module procedure bcast_; end interface - - interface String_mci; module procedure & - mci0_, & - mci1_, & - mci2_, & - mci3_ - end interface - - interface String_mco; module procedure & - mco0_, & - mco1_, & - mco2_, & - mco3_ - end interface - - interface ptr_chars; module procedure & - ptr_chars_ - end interface - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_String' - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: str2ch0_ - Convert a String to a CHARACTER -! -! !DESCRIPTION: -! This function returns the contents of the character buffer of the -! input {\tt String} argument {\tt str} as a {\tt CHARCTER} suitable -! for printing. -! -! !INTERFACE: - - function str2ch0_(str) - -! !USES: -! -! No external modules are used by this function. - - implicit none - -! !INPUT PARAMETERS: -! - type(String), intent(in) :: str - -! !OUTPUT PARAMETERS: -! - character(len=size(str%c,1)) :: str2ch0_ - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::str2ch0_' - integer :: i - - do i=1,size(str%c) - str2ch0_(i:i)=str%c(i) - end do - - end function str2ch0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ch12ch0_ - Convert a CHARACTER(:) to a CHARACTER(*) -! -! !DESCRIPTION: -! This function takes an input one-dimensional array of single characters -! and returns a single character string. -! -! !INTERFACE: - - function ch12ch0_(ch1) - -! !USES: -! -! No external modules are used by this function. - - implicit none - -! !INPUT PARAMETERS: -! - character(len=1), dimension(:), intent(in) :: ch1 - -! !OUTPUT PARAMETERS: -! - character(len=size(ch1,1)) :: ch12ch0_ - -! !REVISION HISTORY: -! 22Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ch12ch0_' - integer :: i - - do i=1,size(ch1) - ch12ch0_(i:i)=ch1(i) - end do - - end function ch12ch0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initc_ - Create a String using a CHARACTER -! -! !DESCRIPTION: -! This routine takes an input scalar {\tt CHARACTER} argument {\tt chr}, -! and uses it to create the output {\tt String} argument {\tt str}. -! -! !INTERFACE: - - subroutine initc_(str, chr) - -! !USES: -! - use m_die, only : die,perr - use m_mall,only : mall_mci,mall_ison - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: chr - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: str - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initc_' - integer :: ln,ier,i - - ln=len(chr) - allocate(str%c(ln),stat=ier) - if(ier /= 0) then - call perr(myname_,'allocate()',ier) - call die(myname_) - endif - - if(mall_ison()) call mall_mci(str%c,myname) - - do i=1,ln - str%c(i)=chr(i:i) - end do - - end subroutine initc_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: initc1_ - Create a String using a CHARACTER array -! -! !DESCRIPTION: -! This routine takes an input {\tt CHARACTER(:)} argument {\tt chr}, -! and uses it to create the output {\tt String} argument {\tt str}. -! -! !INTERFACE: - - subroutine initc1_(str, chr) - -! !USES: -! - use m_die, only : die,perr - use m_mall,only : mall_mci,mall_ison - - implicit none - -! !INPUT PARAMETERS: -! - character, dimension(:), intent(in) :: chr - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: str - -! !REVISION HISTORY: -! 2Aug02 - J. Larson - initial prototype -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::initc1_' - integer :: ln,ier,i - - ln=size(chr) - allocate(str%c(ln),stat=ier) - if(ier /= 0) then - call perr(myname_,'allocate()',ier) - call die(myname_) - endif - - if(mall_ison()) call mall_mci(str%c,myname) - - do i=1,ln - str%c(i)=chr(i) - end do - - end subroutine initc1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: inits_ - Initialization of a String from another String -! -! !DESCRIPTION: -! This routine takes an input {\tt String} argument {\tt iStr} and -! creates an output {\tt String} argument {\tt oStr}. In other words, -! it copies {\tt iStr} to {\tt oStr}. -! -! !INTERFACE: - - subroutine inits_(oStr, iStr) - -! !USES: -! - use m_die, only : die - use m_mall,only : mall_mci,mall_ison - - implicit none - -! !INPUT PARAMETERS: -! - type(String), intent(in) :: iStr - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: oStr - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::inits_' - integer :: ln,ier,i - - ln=size(iStr%c) - - allocate(oStr%c(ln),stat=ier) - if(ier /= 0) call die(myname_,'allocate()',ier) - - if(mall_ison()) call mall_mci(oStr%c,myname) - - do i=1,ln - oStr%c(i)=iStr%c(i) - end do - - end subroutine inits_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - Deallocate Memory Occupied by a String -! -! !DESCRIPTION: -! This routine deallocates memory associated with the input/output -! {\tt String} argument {\tt str}. This amounts to deallocating -! {\tt str\%c}. -! -! !INTERFACE: - - subroutine clean_(str) - -! !USES: -! - use m_die, only : die,perr - use m_mall,only : mall_mco,mall_ison - - implicit none - -! !INPUT/OUTPUT PARAMETERS: -! - type(String), intent(inout) :: str - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - - if(mall_ison()) call mall_mco(str%c,myname) - - deallocate(str%c,stat=ier) - if(ier /= 0) then - call perr(myname_,'deallocate()',ier) - call die(myname_) - endif - - end subroutine clean_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: bcast_ - MPI Broadcast of a rank-0 String -! -! !DESCRIPTION: -! This routine performs an MPI broadcast of the input/output {\tt String} -! argument {\tt Str} on a communicator associated with the Fortran integer -! handle {\tt comm}. The broadcast originates from the process with rank -! given by {\tt root} on {\tt comm}. The {\tt String} argument {\tt Str} -! is on entry valid only on the {\tt root} process, and is valid on exit -! on all processes on the communicator {\tt comm}. The success (failure) -! is signified by a zero (non-zero) value of the optional {\tt INTEGER} -! output argument {\tt stat}. -! -! !INTERFACE: - - subroutine bcast_(Str, root, comm, stat) - -! !USES: -! - use m_mpif90 - use m_die, only : perr,die - use m_mall,only : mall_mci,mall_ison - - implicit none - -! !INPUT PARAMETERS: -! - integer, intent(in) :: root - integer, intent(in) :: comm - -! !INPUT/OUTPUT PARAMETERS: -! - type(String), intent(inout) :: Str ! (IN) on the root, - ! (OUT) elsewhere - -! !OUTPUT PARAMETERS: -! - integer, optional, intent(out) :: stat - -! !REVISION HISTORY: -! 27Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::bcast_' - integer :: ln,ier,myID - - if(present(stat)) stat=0 - - call MP_comm_rank(comm,myID,ier) - if(ier /= 0) then - call MP_perr(myname_,'MP_comm_rank()',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - if(myID==root) then - ln=size(Str%c) - if(ln<=0) call die(myname_,'size(Str%c) <= 0') - endif - - call MPI_bcast(ln,1,MP_INTEGER,root,comm,ier) - if(ier/=0) then - call MP_perr(myname_,'MPI_bcast(ln)',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - if(myID /= root) then - - allocate(Str%c(ln),stat=ier) - if(ier /= 0) then - call perr(myname_,'allocate()',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - if(mall_ison()) call mall_mci(Str%c,myname) - endif - - call MPI_bcast(Str%c(1),ln,MP_CHARACTER,root,comm,ier) - if(ier/=0) then - call MP_perr(myname_,'MPI_bcast(Str%c)',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - end subroutine bcast_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: mci0_ - checking in a String scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine mci0_(marg,thread) - -! !USES: -! - use m_mall, only : mall_ci - - implicit none - -! !INPUT PARAMETERS: -! - type(String), intent(in) :: marg - character(len=*), intent(in) :: thread - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::mci0_' - - call mall_ci(1,thread) - - end subroutine mci0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: mco0_ - checking out a String scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine mco0_(marg,thread) - -! !USES: -! - use m_mall, only : mall_co - - implicit none - - type(String), intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::mco0_' - - call mall_co(1,thread) - - end subroutine mco0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: mci1_ - checking in a String scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine mci1_(marg,thread) - -! !USES: -! - use m_mall, only : mall_ci - - implicit none - -! !INPUT PARAMETERS: -! - type(String), dimension(:), intent(in) :: marg - character(len=*), intent(in) :: thread - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::mci1_' - - call mall_ci(size(marg),thread) - - end subroutine mci1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: mco1_ - checking out a String scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine mco1_(marg,thread) - -! !USES: -! - use m_mall, only : mall_co - - implicit none - -! !INPUT PARAMETERS: -! - type(String), dimension(:), intent(in) :: marg - character(len=*), intent(in) :: thread - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::mco1_' - - call mall_co(size(marg),thread) - - end subroutine mco1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: mci2_ - checking in a String scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine mci2_(marg, thread) - -! !USES: -! - use m_mall, only : mall_ci - - implicit none - -! !INPUT PARAMETERS: -! - type(String), dimension(:,:), intent(in) :: marg - character(len=*), intent(in) :: thread - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::mci2_' - - call mall_ci(size(marg),thread) - - end subroutine mci2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: mco2_ - checking out a String scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine mco2_(marg,thread) - -! !USES: -! - use m_mall, only : mall_co - - implicit none - -! !INPUT PARAMETERS: -! - type(String), dimension(:,:), intent(in) :: marg - character(len=*), intent(in) :: thread - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::mco2_' - - call mall_co(size(marg),thread) - - end subroutine mco2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: mci3_ - checking in a String scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine mci3_(marg,thread) - -! !USES: -! - use m_mall, only : mall_ci - - implicit none - -! !INPUT PARAMETERS: -! - type(String), dimension(:,:,:), intent(in) :: marg - character(len=*), intent(in) :: thread - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::mci3_' - - call mall_ci(size(marg),thread) - - end subroutine mci3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: mco3_ - checking out a String scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine mco3_(marg,thread) - -! !USES: -! - use m_mall, only : mall_co - - implicit none - -! !INPUT PARAMETERS: -! - type(String), dimension(:,:,:), intent(in) :: marg - character(len=*), intent(in) :: thread - -! !REVISION HISTORY: -! 07Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::mco3_' - - call mall_co(size(marg),thread) - - end subroutine mco3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: len_ = len of a String -! -! !DESCRIPTION: -! -! !INTERFACE: - - integer function len_(str) - -! !USES: -! -! No external modules are used by this function. - - implicit none - -! !INPUT PARAMETERS: -! - type(String),intent(in) :: str - -! !REVISION HISTORY: -! 10Apr00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::len_' - - len_=size(str%c) - - end function len_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ptr_chars_ - direct -! -! !DESCRIPTION: -! This pointer-valued function provides a direct interface to the -! character buffer in the input {\tt String} argument {\tt str}. That -! is, {\tt ptr\_chars\_ => str\%c}. -! -! !INTERFACE: - - function ptr_chars_(str) - -! !USES: -! -! No external modules are used by this function. - - implicit none - -! !INPUT PARAMETERS: -! - type(String), intent(in) :: str - -! !OUTPUT PARAMETERS: -! - character(len=1), dimension(:), pointer :: ptr_chars_ - -! !REVISION HISTORY: -! 10Apr00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ptr_chars_' - - ptr_chars_ => str%c - - end function ptr_chars_ - - end module m_String diff --git a/src/externals/mct/mpeu/m_StringLinkedList.F90 b/src/externals/mct/mpeu/m_StringLinkedList.F90 deleted file mode 100644 index 50300a8b0cb..00000000000 --- a/src/externals/mct/mpeu/m_StringLinkedList.F90 +++ /dev/null @@ -1,553 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_StringLinkedList - A linked-list of String -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_StringLinkedList - use m_String,only : String - implicit none - private ! except - - public :: StringLinkedList ! The class data structure - - ! o An object of a StringLinkedList should be defined - ! as a pointer of a StringLinkedList. It is often - ! represented by a pointer to the head-node of the - ! linked-list. - ! - ! o A node in a StringLinkedList is specificed by a - ! reference pointer. A reference pointer is a - ! logical reference of a node in the list. However, - ! it does not physically point to that node. In - ! fact, a reference pointer normally references to - ! the node physically pointed by the pointer in the - ! node physically pointed by the reference pointer, - ! - ! [this] -> [..|next] -> [..|next] - ! - ! where the last node is the logically referenced - ! node. - - public :: StringLinkedList_init ! constructor - public :: StringLinkedList_clean ! destructor - - ! A _clean() action will reset a StringLinkedList to its - ! pre-_init() status. - - public :: StringLinkedList_insert ! grower, insert a node - public :: StringLinkedList_delete ! ungrower, delete a node - - ! Both procedures processing the node through a given - ! reference pointer. The reference pointer will not - ! be modified directly through either _insert() or - ! _delete(). It is the pointer in the node physically - ! pointed by a reference pointer got modified. Also, - ! the node logically referenced by the reference - ! pointer is either the new node for an _insert(), and - ! the removed node for a _delete(). - - public :: StringLinkedList_eol ! inquirer, is an end-node? - - ! An end-of-list situation occurs when the reference - ! pointer is logically referencing to the end-node or - ! beyond. Note that an end-node links to itself. - - public :: StringLinkedList_next ! iterator, go to the next node. - - public :: StringLinkedList_count ! counter - - ! Count the number of nodes from this reference pointer, - ! starting from and including the logical node but - ! excluding the end-node. - - public :: StringLinkedList_get ! fetcher - - ! Get the value logically referenced by a reference - ! pointer. Return EOL if the referenced node is an - ! EOL(). The reference pointer will be iterated to - ! the next node if the referenced node is not an EOL. - - type StringLinkedList - type(String) :: str - type(StringLinkedList),pointer :: next - end type StringLinkedList - - interface StringLinkedList_init ; module procedure & - init_ - end interface - - interface StringLinkedList_clean ; module procedure & - clean_ - end interface - - interface StringLinkedList_insert; module procedure & - insertc_, & ! insert a CHARACTER(len=*) argument - inserts_ ! insert a String argument - end interface - - interface StringLinkedList_delete; module procedure & - delete_ - end interface - - interface StringLinkedList_eol ; module procedure & - eol_ - end interface - - interface StringLinkedList_next ; module procedure & - next_ - end interface - - interface StringLinkedList_count ; module procedure & - count_ - end interface - - interface StringLinkedList_get ; module procedure & - getc_, & ! get as a CHARACTER(len=*) - gets_ ! get as a String - end interface - -! !REVISION HISTORY: -! 16Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_StringLinkedList' - -! Examples: -! -! 1) Creating a first-in-first-out linked-list, -! -! type(StringLinkedList),pointer :: head,this -! character(len=80) :: aline -! -! call StringLinkedList_init(head) -! this => head -! do -! read(*,'(a)',iostat=ier) aline -! if(ier/=0) exit -! call StringLinkedList_insert(trim(aline),this) -! call StringLinkedList_next(this) -! end do -! -! 2) Creating a last-in-first-out linked-list, Note that the only -! difference from Example (1) is without a call to -! StringLinkedList_next(). -! -! type(StringLinkedList),pointer :: head,this -! character(len=80) :: aline -! -! call StringLinkedList_init(head) -! this => head -! do -! read(*,'(a)',iostat=ier) aline -! if(ier/=0) exit -! call StringLinkedList_insert(trim(aline),this) -! end do -! - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: init_ - initialize a StringLinkedList from a pointer -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine init_(head) - use m_die, only : die - use m_mall,only : mall_ison,mall_ci - implicit none - type(StringLinkedList),pointer :: head ! (out) a list - -! !REVISION HISTORY: -! 22Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::init_' - type(StringLinkedList),pointer :: tail - integer :: ier - - ! Two special nodes are needed for a linked-list, according to - ! Robert Sedgewick (Algorithms, QA76.6.S435, page 21). - ! - ! It seems only _head_ will be needed for external references. - ! Node _tail_ will be used to denote an end-node. - - allocate(head,tail,stat=ier) - if(ier/=0) call die(myname_,'allocate()',ier) - - if(mall_ison()) call mall_ci(2,myname) ! for two nodes - - head%next => tail - tail%next => tail - - nullify(tail) - -end subroutine init_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: insertc_ - insert before the logically referenced node -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine insertc_(cstr,this) - use m_String,only : String_init - use m_mall, only : mall_ison,mall_ci - use m_die, only : die - implicit none - character(len=*),intent(in) :: cstr ! a new entry - type(StringLinkedList),pointer :: this ! (in) a node - -! !REVISION HISTORY: -! 16Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::insertc_' - type(StringLinkedList),pointer :: tmpl - integer :: ier - - ! Create a memory cell for the new entry of StringLinkedList - - allocate(tmpl,stat=ier) - if(ier/=0) call die(myname_,'allocate()',ier) - - if(mall_ison()) call mall_ci(1,myname) ! for one nodes - - ! Store the data - - call String_init(tmpl%str,cstr) - - ! Rebuild the links, if the List was not empty - - tmpl%next => this%next - this%next => tmpl - - ! Clean the working pointer - - nullify(tmpl) - -end subroutine insertc_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: inserts_ - insert before the logically referenced node -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine inserts_(str,this) - use m_String,only : String,String_init - use m_mall, only : mall_ison,mall_ci - use m_die, only : die - implicit none - type(String),intent(in) :: str ! a new entry - type(StringLinkedList),pointer :: this ! (in) a node - -! !REVISION HISTORY: -! 16Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::inserts_' - type(StringLinkedList),pointer :: tmpl - integer :: ier - - ! Create a memory cell for the new entry of StringLinkedList - - allocate(tmpl,stat=ier) - if(ier/=0) call die(myname_,'allocate()',ier) - - if(mall_ison()) call mall_ci(1,myname) ! for one nodes - - ! Store the data - - call String_init(tmpl%str,str) - - ! Rebuild the links, if the List was not empty - - tmpl%next => this%next - this%next => tmpl - - ! Clean the working pointer, if it mean anyting - - nullify(tmpl) - -end subroutine inserts_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: delete_ - delete the logically referenced node -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine delete_(this) - use m_String,only : String_clean - use m_mall, only : mall_ison,mall_co - use m_die, only : die - implicit none - type(StringLinkedList),pointer :: this ! (in) a node - -! !REVISION HISTORY: -! 17Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::delete_' - type(StringLinkedList),pointer :: tmpl - integer :: ier - - tmpl => this%next%next ! hold the next target - call String_clean(this%next%str) ! remove the next storage - - if(mall_ison()) call mall_co(1,myname) ! removing one node - - deallocate(this%next,stat=ier) ! Clean memory gabage - if(ier/=0) call die(myname_,'deallocate()',ier) - - ! Skip the current target. Rebuild the link to the target - ! of the current target. - - this%next => tmpl - - ! Clean the working pointer, if it mean anything - - nullify(tmpl) -end subroutine delete_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: eol_ - if the logically referenced node is an end-node -! -! !DESCRIPTION: -! -! !INTERFACE: - - function eol_(this) - implicit none - type(StringLinkedList),pointer :: this ! (in) a node - logical :: eol_ ! returned value - -! !REVISION HISTORY: -! 23Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::eol_' - - eol_=associated(this%next,this%next%next) -end function eol_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: next_ - point a reference pointer to the next node -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine next_(this) - implicit none - type(StringLinkedList),pointer :: this ! (inout) a node - -! !REVISION HISTORY: -! 23Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::next_' - - this => this%next - -end subroutine next_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: count_ - count the number of nodes -! -! !DESCRIPTION: -! -! !INTERFACE: - - function count_(this) - implicit none - type(StringLinkedList),pointer :: this ! (in) a node - integer :: count_ ! returned value - -! !REVISION HISTORY: -! 24Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::count_' - type(StringLinkedList),pointer :: tmpl - - tmpl => this - - count_=0 - do while(.not.eol_(tmpl)) - count_=count_+1 - call next_(tmpl) - end do - - nullify(tmpl) -end function count_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: getc_ - get the logically referenced value as CHARACTERs -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine getc_(this,cstr,eol) - use m_String,only : String - use m_String,only : String_init - use m_String,only : String_clean - use m_String,only : char - implicit none - type(StringLinkedList),pointer :: this ! (inout) a node - character(len=*),intent(out) :: cstr ! the referenced value - logical ,intent(out) :: eol ! if the node is an end-node - -! !REVISION HISTORY: -! 17Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::getc_' - type(String) :: str - - call gets_(this,str,eol) - - if(.not.eol) then - cstr=char(str) - call String_clean(str) - endif - -end subroutine getc_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: gets_ - get the logically referenced value as a String -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine gets_(this,str,eol) - use m_String,only : String - use m_String,only : String_init - implicit none - type(StringLinkedList),pointer :: this ! (inout) a node - type(String),intent(out) :: str ! the referenced value - logical ,intent(out) :: eol ! if the node is an end-node - -! !REVISION HISTORY: -! 17Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::gets_' - - eol=eol_(this) - if(.not.eol) then - call String_init(str,this%next%str) - call next_(this) - endif - -end subroutine gets_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: clean_ - clean the whole object from this point -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine clean_(head,stat) - use m_die,only : die,perr - use m_mall,only : mall_ison,mall_co - implicit none - type(StringLinkedList),pointer :: head ! (inout) a head-node - integer,optional,intent(out) :: stat ! return status - -! !REVISION HISTORY: -! 17Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::clean_' - integer :: ier - logical :: err - - if(present(stat)) stat=0 - - ! Verify if the pointer is valid - - err=.not.associated(head) - if(.not.err) err=.not.associated(head%next) - - if(err) then - call perr(myname_,'Attempting to clean an uninitialized list') - if(.not.present(stat)) call die(myname_) - stat=-1 - return - endif - - ! Clean the rest before delete the current one. - - do - if(eol_(head)) exit - call delete_(head) - end do - - if(mall_ison()) call mall_co(2,myname) ! remove two nodes - - deallocate(head%next,stat=ier) - if(ier==0) deallocate(head,stat=ier) - if(ier/=0) then - call perr(myname_,'deallocate()',ier) - if(.not.present(stat)) call die(myname_) - stat=-1 - return - endif - -end subroutine clean_ - -end module m_StringLinkedList diff --git a/src/externals/mct/mpeu/m_TraceBack.F90 b/src/externals/mct/mpeu/m_TraceBack.F90 deleted file mode 100644 index 1afcaf8eb71..00000000000 --- a/src/externals/mct/mpeu/m_TraceBack.F90 +++ /dev/null @@ -1,240 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_TraceBack - Generation of Traceback Information -! -! !DESCRIPTION: -! This module supports the generation of traceback information for -! a given routine. -! -! -! !INTERFACE: - - module m_TraceBack - -! !USES: -! No external modules are used in the declaration section of this module. - - implicit none - - private ! except - -! !PUBLIC TYPES: -! No public types are declared in this module. - - -! !PUBLIC MEMBER FUNCTIONS: - - public :: GenTraceBackString - - interface GenTraceBackString; module procedure & - GenTraceBackString1, & - GenTraceBackString2 - end interface - -! !PUBLIC DATA MEMBERS: -! No public data member constants are declared in this module. - - -! !REVISION HISTORY: -! 5 Aug02 - J. Larson - Initial version. -!EOP ___________________________________________________________________ - -! Parameters local to this module: - - character(len=*),parameter :: myname='MCT(MPEU)::m_TraceBackString' - - character(len=len('|X|')), parameter :: StartChar = '|X|' - character(len=len('->')), parameter :: ArrowChar = '->' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GenTraceBackString1 - Start a TraceBack with One Routine Name -! -! !DESCRIPTION: -! This routine takes in CHARACTER form the names of the calling routine -! (the input argument {\tt RoutineName} and returns a {\tt String} -! (the output argument {\tt TraceBackString}) that portrays this routine -! as the starting point of a downwards procedural trace. The contents -! of {\tt TraceBackString} is merely an {\tt '|X|'}, followed immediately -! by the value of {\tt RoutineName}. -! -! !INTERFACE: - - subroutine GenTraceBackString1(TraceBackString, RoutineName) -! -! !USES: -! - use m_stdio - use m_die - - use m_String, only : String - use m_String, only : String_init => init - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: RoutineName - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: TraceBackString - -! !REVISION HISTORY: -! 5Aug02 - J. Larson - Initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GenTraceBackString1' - integer :: i, ierr - integer :: RoutineNameLength, ScratchBufferLength - character, dimension(:), allocatable :: ScratchBuffer - - ! Note: The value of ArrowChar is inherited - ! from the declaration section of this module. - - ! Determine the lengths of ParentName and ChildName - - RoutineNameLength = len(RoutineName) - - ! Set up ScratchBuffer: - - ScratchBufferLength = len(StartChar) + RoutineNameLength - - allocate(ScratchBuffer(ScratchBufferLength), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Allocate(ScratchBuffer...) failed. ierr = ',ierr - call die(myname_) - endif - - ! Load ScratchBuffer: - - - do i=1,len(StartChar) ! Load the '|X|'... - ScratchBuffer(i) = StartChar(i:i) - end do - - do i=1,RoutineNameLength - ScratchBuffer(len(StartChar)+i) = RoutineName(i:i) - end do - - ! Create TraceBackString - - call String_init(TraceBackString, ScratchBuffer) - - ! Clean up: - - deallocate(ScratchBuffer, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Deallocate(ScratchBuffer...) failed. ierr = ',ierr - call die(myname_) - endif - - end subroutine GenTraceBackString1 - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: GenTraceBackString2 - Connect Two Routine Names in a TraceBack -! -! !DESCRIPTION: -! This routine takes in CHARACTER form the names of the parent and -! child routines (the input arguments {\tt ParentName} and -! {\tt ChildName}, repsectively), and returns a {\tt String} (the output -! argument {\tt TraceBackString}) that portrays their procedural -! relationship. The contents of {\tt TraceBackString} is merely -! {\tt ParentName}, followe by an arrow ({\tt "->"}), followed by -! {\tt ChildName}. -! -! !INTERFACE: - - subroutine GenTraceBackString2(TraceBackString, ParentName, ChildName) -! -! !USES: -! - use m_stdio - use m_die - - use m_String, only : String - use m_String, only : String_init => init - - implicit none - -! !INPUT PARAMETERS: -! - character(len=*), intent(in) :: ParentName - character(len=*), intent(in) :: ChildName - -! !OUTPUT PARAMETERS: -! - type(String), intent(out) :: TraceBackString - -! !REVISION HISTORY: -! 5Aug02 - J. Larson - Initial version. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GenTraceBackString2' - integer :: i, ierr - integer :: ParentNameLength, ChildNameLength, ScratchBufferLength - character, dimension(:), allocatable :: ScratchBuffer - - ! Note: The value of ArrowChar is inherited - ! from the declaration section of this module. - - ! Determine the lengths of ParentName and ChildName - - ParentNameLength = len(ParentName) - ChildNameLength = len(ChildName) - - ! Set up ScratchBuffer: - - ScratchBufferLength = ParentNameLength + ChildNameLength + & - len(ArrowChar) - allocate(ScratchBuffer(ScratchBufferLength), stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Allocate(ScratchBuffer...) failed. ierr = ',ierr - call die(myname_) - endif - - ! Load ScratchBuffer: - - do i=1,ParentNameLength ! Load the Parent Routine Name... - ScratchBuffer(i) = ParentName(i:i) - end do - - do i=1,len(ArrowChar) ! Load the Arrow... - ScratchBuffer(ParentNameLength+i) = ArrowChar(i:i) - end do - - do i=1,ChildNameLength - ScratchBuffer(ParentNameLength+len(ArrowChar)+i) = ChildName(i:i) - end do - - ! Create TraceBackString - - call String_init(TraceBackString, ScratchBuffer) - - ! Clean up: - - deallocate(ScratchBuffer, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: Deallocate(ScratchBuffer...) failed. ierr = ',ierr - call die(myname_) - endif - - end subroutine GenTraceBackString2 - - end module m_TraceBack diff --git a/src/externals/mct/mpeu/m_chars.F90 b/src/externals/mct/mpeu/m_chars.F90 deleted file mode 100644 index 3ff275b138f..00000000000 --- a/src/externals/mct/mpeu/m_chars.F90 +++ /dev/null @@ -1,107 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_chars - a module for character class object operations -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_chars - implicit none - private - - public :: operator (.upper.) ! convert a string to uppercase - public :: uppercase - - public :: operator (.lower.) ! convert a string to lowercase - public :: lowercase - - interface operator (.upper.) - module procedure upper_case - end interface - interface uppercase - module procedure upper_case - end interface - - interface operator (.lower.) - module procedure lower_case - end interface - interface lowercase - module procedure lower_case - end interface - -! !REVISION HISTORY: -! 16Jul96 - J. Guo - (to do) -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname='MCT(MPEU)::m_chars' - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: upper_case - convert lowercase letters to uppercase. -! -! !DESCRIPTION: -! -! !INTERFACE: - - function upper_case(str) result(ustr) - implicit none - character(len=*), intent(in) :: str - character(len=len(str)) :: ustr - -! !REVISION HISTORY: -! 13Aug96 - J. Guo - (to do) -!EOP -!_______________________________________________________________________ - integer i - integer,parameter :: il2u=ichar('A')-ichar('a') - - ustr=str - do i=1,len_trim(str) - if(str(i:i).ge.'a'.and.str(i:i).le.'z') & - ustr(i:i)=char(ichar(str(i:i))+il2u) - end do - end function upper_case - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: lower_case - convert uppercase letters to lowercase. -! -! !DESCRIPTION: -! -! !INTERFACE: - - function lower_case(str) result(lstr) - implicit none - character(len=*), intent(in) :: str - character(len=len(str)) :: lstr - -! !REVISION HISTORY: -! 13Aug96 - J. Guo - (to do) -!EOP -!_______________________________________________________________________ - integer i - integer,parameter :: iu2l=ichar('a')-ichar('A') - - lstr=str - do i=1,len_trim(str) - if(str(i:i).ge.'A'.and.str(i:i).le.'Z') & - lstr(i:i)=char(ichar(str(i:i))+iu2l) - end do - end function lower_case - -end module m_chars -!. diff --git a/src/externals/mct/mpeu/m_die.F90 b/src/externals/mct/mpeu/m_die.F90 deleted file mode 100644 index 9e10b443353..00000000000 --- a/src/externals/mct/mpeu/m_die.F90 +++ /dev/null @@ -1,404 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_die - die with mpout flushed -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_die - use m_mpif90, only : MP_perr - implicit none - private ! except - - public :: die ! signal an exception - public :: diex ! a special die() supporting macros - public :: perr,warn ! message(s) to stderr - public :: perr_die ! to be phased out - public :: MP_die ! a special die() for MPI errors - public :: MP_perr ! perr for MPI errors, from m_mpif90 - public :: MP_perr_die ! a special die() for MPI errors - public :: assert_ ! used by ASSERT() macro of assert.H - - interface die; module procedure & - die0_, & ! die(where) - die1_, & ! die(where,message) - die2_, & ! die(where,proc,ier) - die4_ ! die(where,mesg1,ival1,mesg2,ival2) - end interface - - interface diex; module procedure & - diex_ ! diex(where,filename,lineno) - end interface - - interface perr; module procedure & - perr1_, & ! perr(where,message) - perr2_, & ! perr(where,proc,ier) - perr4_ ! perr(where,mesg1,ival1,mesg2,ival2) - end interface - interface warn; module procedure & - perr1_, & ! perr(where,message) - perr2_, & ! perr(where,proc,ier) - perr4_ ! perr(where,mesg1,ival1,mesg2,ival2) - end interface - - interface perr_die; module procedure & - die2_ ! perr_die(where,proc,ier) - end interface - - interface MP_die; module procedure & - MPdie2_ ! MP_die(where,proc,ier) - end interface - interface MP_perr_die; module procedure & - MPdie2_ ! MP_die(where,proc,ier) - end interface - - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname='MCT(MPEU)::m_die' -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: die0_ - flush(mpout) before die() -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine die0_(where) - use m_mpout, only : mpout,mpout_flush,mpout_close,mpout_ison - use m_flow, only : flow_flush - use m_dropdead, only : ddie => die - implicit none - character(len=*),intent(in) :: where - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::die0_' - - call mpout_flush() - if(mpout_ison()) call flow_flush(mpout) - call mpout_close() - call ddie(where) - -end subroutine die0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: die1_ - flush(mpout) before die() -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine die1_(where,message) - use m_mpout, only : mpout,mpout_flush,mpout_close,mpout_ison - use m_flow, only : flow_flush - use m_dropdead, only : ddie => die - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: message - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::die1_' - - call mpout_flush() - if(mpout_ison()) call flow_flush(mpout) - call mpout_close() - - call perr1_(where,message) - call ddie(where) - -end subroutine die1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: die2_ - flush(mpout) before die() -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine die2_(where,proc,ier) - use m_mpout, only : mpout,mpout_flush,mpout_close,mpout_ison - use m_flow, only : flow_flush - use m_dropdead, only : ddie => die - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: proc - integer,intent(in) :: ier - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::die2_' - - call mpout_flush() - if(mpout_ison()) call flow_flush(mpout) - call mpout_close() - - call perr2_(where,proc,ier) - call ddie(where) - -end subroutine die2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: die4_ - flush(mpout) before die() -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine die4_(where,mesg1,ival1,mesg2,ival2) - use m_mpout, only : mpout,mpout_flush,mpout_close,mpout_ison - use m_flow, only : flow_flush - use m_dropdead, only : ddie => die - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: mesg1 - integer,intent(in) :: ival1 - character(len=*),intent(in) :: mesg2 - integer,intent(in) :: ival2 - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::die4_' - - call mpout_flush() - if(mpout_ison()) call flow_flush(mpout) - call mpout_close() - - call perr4_(where,mesg1,ival1,mesg2,ival2) - call ddie(where) - -end subroutine die4_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: diex_ - flush(mpout) before die() -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine diex_(where,filename,line) - use m_mpout, only : mpout,mpout_flush,mpout_close,mpout_ison - use m_flow, only : flow_flush - use m_dropdead, only : ddie => die - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: filename - integer,intent(in) :: line - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::diex_' - - call mpout_flush() - if(mpout_ison()) call flow_flush(mpout) - call mpout_close() - call ddie(where,filename,line) - -end subroutine diex_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: perr1_ - send a simple error message to _stderr_ -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine perr1_(where,message) - use m_stdio,only : stderr - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: message - -! !REVISION HISTORY: -! 27Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::perr1_' - - write(stderr,'(3a)') where,': ',message - -end subroutine perr1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: perr2_ - send a simple error message to _stderr_ -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine perr2_(where,proc,ier) - use m_stdio,only : stderr - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: proc - integer,intent(in) :: ier - -! !REVISION HISTORY: -! 27Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::perr2_' - character(len=16) :: cer - integer :: ios - - cer='*******' - write(cer,'(i16)',iostat=ios) ier - write(stderr,'(5a)') where,': ', & - proc,' error, stat =',trim(adjustl(cer)) - -end subroutine perr2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: perr4_ - send a simple error message to _stderr_ -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine perr4_(where,mesg1,ival1,mesg2,ival2) - use m_stdio,only : stderr - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: mesg1 - integer,intent(in) :: ival1 - character(len=*),intent(in) :: mesg2 - integer,intent(in) :: ival2 - -! !REVISION HISTORY: -! 27Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::perr4_' - character(len=16) :: cval1,cval2 - integer :: ios - - cval1='*******' - cval2='*******' - write(cval1,'(i16)',iostat=ios) ival1 - write(cval2,'(i16)',iostat=ios) ival2 - - write(stderr,'(10a)') where,': error, ', & - mesg1,'=',trim(adjustl(cval1)),', ', & - mesg2,'=',trim(adjustl(cval2)),'.' - -end subroutine perr4_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MPdie2_ - invoke MP_perr before die_ -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine MPdie2_(where,proc,ier) - use m_mpif90, only : MP_perr - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: proc - integer,intent(in) :: ier - -! !REVISION HISTORY: -! 27Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MPdie2_' - - call MP_perr(where,proc,ier) - call die0_(where) - -end subroutine MPdie2_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: assert_ - an utility called by ASSERT() macro only -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine assert_(str, file, line) - use m_mpout,only : mpout,mpout_flush,mpout_close,mpout_ison - use m_flow,only : flow_flush - use m_dropdead,only : ddie => die - implicit none - Character(Len=*), Intent(In) :: str ! a message - Character(Len=*), Intent(In) :: file ! a filename - Integer, Intent(In) :: line ! a line number - -! !REVISION HISTORY: -! 25Aug00 - Jing Guo -! - modified -! - included into m_die for easier module management -! before - Tom Clune -! - Created for MPI PSAS implementation as a separate -! module -! 19Jan01 - J. Larson - removed nested -! single/double/single quotes in the second argument -! to the call to perr1_(). This was done for the pgf90 -! port. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_='ASSERT_' - - call mpout_flush() - if(mpout_ison()) call flow_flush(mpout) - call mpout_close() - - call perr1_(myname_,'failed: "//str//")') - call ddie(myname_,file,line) - -End subroutine assert_ -end module m_die diff --git a/src/externals/mct/mpeu/m_dropdead.F90 b/src/externals/mct/mpeu/m_dropdead.F90 deleted file mode 100644 index 0869fd90489..00000000000 --- a/src/externals/mct/mpeu/m_dropdead.F90 +++ /dev/null @@ -1,191 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_dropdead - An abort() with a style -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_dropdead - implicit none - private ! except - - public :: die ! terminate a program with a condition - - interface die; module procedure & - die_, & - diex_ - end interface - -! !REVISION HISTORY: -! 20Feb97 - Jing Guo - defined template -!EOP -!_______________________________________________________________________ - -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! !IROUTINE: die_ - Clean up and raise an exception to the OS -! -! !DESCRIPTION: -! -! A call to die() exits the program with minimum information for -! both the user and the operating system. -! -! !INTERFACE: - - subroutine die_(where) - use m_stdio, only : stderr - use m_mpif90,only : MP_comm_world - use m_mpif90,only : MP_comm_rank - use m_mpif90,only : MP_abort - use m_mpif90,only : MP_initialized - implicit none - character(len=*),intent(in) :: where ! where it is called - -! !REVISION HISTORY: -! 20Feb97 - Jing Guo - defined template -! 09Jan07 - R. Loy - check for initialized, add -! options for abort -! -!EOP -!_______________________________________________________________________ - - character(len=*),parameter :: myname_='MCT(MPEU)::die.' - integer :: myrank,ier - logical :: initialized - - call MP_initialized(initialized,ier) - - if (initialized) then - - !------------------------------------------------- - ! MPI_ should have been initialized for this call - !------------------------------------------------- - - call MP_comm_rank(MP_comm_world,myrank,ier) - - ! a message for the users: - - write(stderr,'(z3.3,5a)') myrank,'.',myname_, & - ': from ',trim(where),'()' - - ! raise a condition to the OS - -#ifdef ENABLE_UNIX_ABORT - call abort -#else - call MP_abort(MP_comm_world,2,ier) -#endif - - else - - write(stderr,'(5a)') 'unknown rank .',myname_, & - ': from ',trim(where),'()' - -#ifdef ENABLE_UNIX_ABORT - call abort -#else - stop -#endif - - endif - -end subroutine die_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: diex_ - Clean up and raise an exception to the OS -! -! !DESCRIPTION: -! -! A call to die() exits the program with minimum information for -! both the user and the operating system. This implementation, -! however, may be used in conjunction with with a source preprocessor -! to produce more detailed location information. -! -! !INTERFACE: - - subroutine diex_(where,fnam,line) - use m_stdio, only : stderr - use m_mpif90,only : MP_comm_world - use m_mpif90,only : MP_comm_rank - use m_mpif90,only : MP_abort - use m_mpif90,only : MP_initialized - implicit none - character(len=*),intent(in) :: where ! where it is called - character(len=*),intent(in) :: fnam - integer,intent(in) :: line - -! !REVISION HISTORY: -! 20Feb97 - Jing Guo - defined template -! 09Jan07 - R. Loy - check for initialized, add -! options for abort -! -!EOP -!_______________________________________________________________________ - - character(len=*),parameter :: myname_='die.' - integer :: myrank,ier - character(len=16) :: lineno - - logical :: initialized - - write(lineno,'(i16)') line - - call MP_initialized(initialized,ier) - - if (initialized) then - - !------------------------------------------------- - ! MPI_ should have been initialized for this call - !------------------------------------------------- - - call MP_comm_rank(MP_comm_world,myrank,ier) - - ! a message for the users: - write(stderr,'(z3.3,9a)') myrank,'.',myname_, & - ': from ',trim(where),'()', & - ', line ',trim(adjustl(lineno)), & - ' of file ',fnam - - ! raise a condition to the OS - -#ifdef ENABLE_UNIX_ABORT - call abort -#else - call MP_abort(MP_comm_world,2,ier) -#endif - - else - - ! a message for the users: - write(stderr,'(9a)') 'unknown rank .',myname_, & - ': from ',trim(where),'()', & - ', line ',trim(adjustl(lineno)), & - ' of file ',fnam - -#ifdef ENABLE_UNIX_ABORT - call abort -#else - stop -#endif - - endif - - -end subroutine diex_ -!======================================================================= -end module m_dropdead -!. diff --git a/src/externals/mct/mpeu/m_flow.F90 b/src/externals/mct/mpeu/m_flow.F90 deleted file mode 100644 index 35d7b3c5b84..00000000000 --- a/src/externals/mct/mpeu/m_flow.F90 +++ /dev/null @@ -1,196 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_flow - tracing the program calling tree -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_flow - implicit none - private ! except - - public :: flow_ci - public :: flow_co - public :: flow_flush - public :: flow_reset - - interface flow_ci; module procedure ci_; end interface - interface flow_co; module procedure co_; end interface - interface flow_flush; module procedure flush_; end interface - interface flow_reset; module procedure reset_; end interface - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname='MCT(MPEU)::m_flow' - - integer,parameter :: MX_TNAME= 64 - integer,parameter :: LN_TNAME= 32 - - integer,save :: mxdep= 0 - integer,save :: iname=-1 - character(len=LN_TNAME),save,dimension(0:MX_TNAME-1) :: tname - - character(len=LN_TNAME),save :: ciname=' ' - character(len=LN_TNAME),save :: coname=' ' - logical,save :: balanced=.true. - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: ci_ - checking in a level -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ci_(name) - implicit none - character(len=*),intent(in) :: name - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::ci_' - - ! Push in an entry in to a circulated list storage to save - ! only the last MX_TNAME entries. - - iname=iname+1 - tname(modulo(iname,MX_TNAME)) = name - - if(mxdep < iname+1) mxdep=iname+1 -end subroutine ci_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: co_ - checking out a level -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine co_(name) - use m_chars, only : uppercase - implicit none - character(len=*),intent(in) :: name - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::co_' - character(len=LN_TNAME) :: uname - - if(balanced) then - uname='?' - balanced=iname >= 0 - if(balanced) then - uname=tname(modulo(iname,MX_TNAME)) - balanced = uname == ' ' .or. uppercase(uname) == uppercase(name) - endif - if(.not.balanced) then - ciname=uname - coname= name - endif - endif - - ! Pop out an entry - - tname(modulo(iname,MX_TNAME))=' ' - iname=iname-1 - -end subroutine co_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: flush_ - print all remaining entries in the list -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine flush_(lu) - implicit none - integer,intent(in) :: lu - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::flush_' - integer :: i - - ! Nothing to show - - if(mxdep == 0 .and. iname == -1) return - - write(lu,'(2a,i4)',advance='no') myname,': depth =',mxdep - - if(.not.balanced .or. iname < -1) then - - write(lu,'(4a)',advance='no') & - ', ci/co unbalanced at ',trim(ciname),'/',trim(coname) - - write(lu,'(a,i4)') ', level =',iname+1 - return - - endif - - if(iname >= 0) then - write(lu,'(a)',advance='no') ', ' - do i=0,iname-1 - write(lu,'(2a)',advance='no') trim(tname(modulo(i,MX_TNAME))),'>' - end do - write(lu,'(a)',advance='no') trim(tname(modulo(iname,MX_TNAME))) - endif - write(lu,*) - -end subroutine flush_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: reset_ - set the stack to empty -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine reset_() - implicit none - -! !REVISION HISTORY: -! 26Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::reset_' - integer :: i - - mxdep=0 - iname=-1 - tname(0:MX_TNAME-1)=' ' - - ciname=' ' - coname=' ' - balanced=.true. - -end subroutine reset_ -end module m_flow diff --git a/src/externals/mct/mpeu/m_inpak90.F90 b/src/externals/mct/mpeu/m_inpak90.F90 deleted file mode 100644 index d1adfe11a1e..00000000000 --- a/src/externals/mct/mpeu/m_inpak90.F90 +++ /dev/null @@ -1,2049 +0,0 @@ -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!------------------------------------------------------------------------- -!BOI -! -! !TITLE: Inpak 90 Documentation \\ Version 1.01 -! -! !AUTHORS: Arlindo da Silva -! -! !AFFILIATION: Data Assimilation Office, NASA/GSFC, Greenbelt, MD 20771 -! -! !DATE: June 20, 1996 -! -! !INTRODUCTION: Package Overview -! -! Inpak 90 is a Fortran (77/90) collection of -! routines/functions for accessing {\em Resource Files} -! in ASCII format. The package is optimized -! for minimizing formatted I/O, performing all of its string -! operations in memory using Fortran intrinsic functions. -! -! \subsection{Resource Files} -! -! A {\em Resource File} is a text file consisting of variable -! length lines (records), each possibly starting with a {\em label} -! (or {\em key}), followed by some data. A simple resource file -! looks like this: -! -! \begin{verbatim} -! # Lines starting with # are comments which are -! # ignored during processing. -! my_file_names: jan87.dat jan88.dat jan89.dat -! radius_of_the_earth: 6.37E6 # these are comments too -! constants: 3.1415 25 -! my_favourite_colors: green blue 022 # text & number are OK -! \end{verbatim} -! -! In this example, {\tt my\_file\_names:} and {\tt constants:} -! are labels, while {\tt jan87.dat, jan88.dat} and {\tt jan89.dat} are -! data associated with label {\tt my\_file\_names:}. -! Resource files can also contain simple tables of the form, -! -! \begin{verbatim} -! my_table_name:: -! 1000 3000 263.0 -! 925 3000 263.0 -! 850 3000 263.0 -! 700 3000 269.0 -! 500 3000 287.0 -! 400 3000 295.8 -! 300 3000 295.8 -! :: -! \end{verbatim} -! -! Resource files are random access, the particular order of the -! records are not important (except between ::s in a table definition). -! -! \subsection{A Quick Stroll} -! -! The first step is to load the ASCII resource (rc) file into -! memory\footnote{See next section for a complete description -! of parameters for each routine/function}: -! -! \begin{verbatim} -! call i90_LoadF ( 'my_file.rc', iret ) -! \end{verbatim} -! -! The next step is to select the label (record) of interest, say -! -! \begin{verbatim} -! call i90_label ( 'constants:', iret ) -! \end{verbatim} -! -! The 2 constants above can be retrieved with the following code -! fragment: -! \begin{verbatim} -! real r -! integer i -! call i90_label ( 'constants:', iret ) -! r = i90_gfloat(iret) ! results in r = 3.1415 -! i = i90_gint(iret) ! results in i = 25 -! \end{verbatim} -! -! The file names above can be retrieved with the following -! code fragment: -! \begin{verbatim} -! character*20 fn1, fn2, fn3 -! integer iret -! call i90_label ( 'my_file_names:', iret ) -! call i90_Gtoken ( fn1, iret ) ! ==> fn1 = 'jan87.dat' -! call i90_Gtoken ( fn2, iret ) ! ==> fn1 = 'jan88.dat' -! call i90_Gtoken ( fn3, iret ) ! ==> fn1 = 'jan89.dat' -! \end{verbatim} -! -! To access the table above, the user first must use {\tt i90\_label()} to -! locate the beginning of the table, e.g., -! -! \begin{verbatim} -! call i90_label ( 'my_table_name::', iret ) -! \end{verbatim} -! -! Subsequently, {\tt i90\_gline()} can be used to gain access to each -! row of the table. Here is a code fragment to read the above -! table (7 rows, 3 columns): -! -! \begin{verbatim} -! real table(7,3) -! character*20 word -! integer iret -! call i90_label ( 'my_table_name::', iret ) -! do i = 1, 7 -! call i90_gline ( iret ) -! do j = 1, 3 -! table(i,j) = i90_gfloat ( iret ) -! end do -! end do -! \end{verbatim} -! -! Get the idea? -! -! \newpage -! \subsection{Main Routine/Functions} -! -! \begin{verbatim} -! ------------------------------------------------------------------ -! Routine/Function Description -! ------------------------------------------------------------------ -! I90_LoadF ( filen, iret ) loads resource file into memory -! I90_Label ( label, iret ) selects a label (key) -! I90_GLine ( iret ) selects next line (for tables) -! I90_Gtoken ( word, iret ) get next token -! I90_Gfloat ( iret ) returns next float number (function) -! I90_GInt ( iret ) returns next integer number (function) -! i90_AtoF ( string, iret ) ASCII to float (function) -! i90_AtoI ( string, iret ) ASCII to integer (function) -! I90_Len ( string ) string length without trailing blanks -! LabLin ( label ) similar to i90_label (no iret) -! FltGet ( default ) returns next float number (function) -! IntGet ( default ) returns next integer number (function) -! ChrGet ( default ) returns next character (function) -! TokGet ( string, default ) get next token -! ------------------------------------------------------------------ -! \end{verbatim} -! -! {\em Common Arguments:} -! -! \begin{verbatim} -! character*(*) filen file name -! integer iret error return code (0 is OK) -! character*(*) label label (key) to locate record -! character*(*) word blank delimited string -! character*(*) string a sequence of characters -! \end{verbatim} -! -! See the Prologues in the next section for additional details. -! -! -! \subsection{Package History} -! Back in the 70s Eli Isaacson wrote IOPACK in Fortran -! 66. In June of 1987 I wrote Inpak77 using -! Fortran 77 string functions; Inpak 77 is a vastly -! simplified IOPACK, but has its own goodies not found in -! IOPACK. Inpak 90 removes some obsolete functionality in -! Inpak77, and parses the whole resource file in memory for -! performance. Despite its name, Inpak 90 compiles fine -! under any modern Fortran 77 compiler. -! -! \subsection{Bugs} -! Inpak 90 is not very gracious with error messages. -! The interactive functionality of Inpak77 has not been implemented. -! The comment character \# cannot be escaped. -! -! \subsection{Availability} -! -! This software is available at -! \begin{verbatim} -! ftp://niteroi.gsfc.nasa.gov/pub/packages/i90/ -! \end{verbatim} -! There you will find the following files: -! \begin{verbatim} -! i90.f Fortran 77/90 source code -! i90.h Include file needed by i90.f -! ti90.f Test code -! i90.ps Postscript documentation -! \end{verbatim} -! An on-line version of this document is available at -! \begin{verbatim} -! ftp://niteroi.gsfc.nasa.gov/www/packages/i90/i90.html -! \end{verbatim} -! -!EOI -!------------------------------------------------------------------------- -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! -! !REVISION HISTORY: -! 03Jul96 - J. Guo - evolved to Fortran 90 module. The -! modifications include 1) additional subroutines to -! dynamically manage the memory, 2) privatized most -! entries, 3) included "i90.h" into the module source -! with better initializations, 4) removed blockdata, 5) -! used a portable opntext() call to avoid I/O portability -! problems. -! -! See I90_page() I90_Release(), and I90_LoadF() for -! details. -! -! 05Aug98 - Jing Guo - -! Removed i90_page() and its references. -! Added internal subroutines push_() and pop_(). -! Modified i90_release(). -! Added i90_fullrelease(). -! Removed %loaded. Check i90_depth instead. -! 06Aug98 - Todling - made I90_gstr public -! 20Dec98 - Jing Guo - replaced the description of I90_Gstr -! 28Sep99 - Jing Guo - Merged with the MPI version with -! some addtional changes based on -! merging decisions. -! 12Oct99 - Larson/Guo - Overloaded fltget() to new routines -! getfltsp() and fltgetdp(), providing better support -! for 32 and 64 bit platforms, respectively. -!_______________________________________________________________________ - - module m_inpak90 - use m_stdio, only : stderr,stdout - use m_realkinds, only: FP, SP, DP,kind_r8 - implicit none - private - public :: I90_LoadF ! loads a resource file into memory - public :: I90_allLoadF! loads/populates a resource file to all PEs - public :: I90_Release ! Releases one cached resource file - public :: I90_fullRelease ! Releases the whole stack - public :: I90_Label ! selects a label (key) - public :: I90_GLine ! selects the next line (for tables) - public :: I90_Gtoken ! gets the next token - public :: I90_Gstr ! get a string upto to a "$" or EOL - - public :: I90_AtoF ! ASCII to float (function) - public :: I90_AtoI ! ASCII to integer (function) - - public :: I90_Gfloat ! returns next float number (function) - public :: I90_GInt ! returns next integer number (function) - - public :: lablin,rdnext,fltget,intget,getwrd,str2rn,chrget,getstr - public :: strget - - interface fltget; module procedure & - fltgetsp, & - fltgetdp - end interface - - -!----------------------------------------------------------------------- -! -! This part was originally in "i90.h", but included for module. -! - - ! revised parameter table to fit Fortran 90 standard - - integer, parameter :: LSZ = 256 - -!ams -! On Linux with the Fujitsu compiler, I needed to reduce NBUF_MAX -!ams -! integer, parameter :: NBUF_MAX = 400*(LSZ) ! max size of buffer -! integer, parameter :: NBUF_MAX = 200*(LSZ) ! max size of buffer -! Further reduction of NBUF_MAX was necessary for the Fujitsu VPP: - integer, parameter :: NBUF_MAX = 128*(LSZ)-1 ! Maximum buffer size - ! that works with the - ! Fujitsu-VPP platform. - - - character, parameter :: BLK = achar(32) ! blank (space) - character, parameter :: TAB = achar(09) ! TAB - character, parameter :: EOL = achar(10) ! end of line mark (newline) - character, parameter :: EOB = achar(00) ! end of buffer mark (null) - character, parameter :: NULL= achar(00) ! what it says - - type inpak90 - ! May be easily paged for extentable file size (J.G.) - - integer :: nbuf ! actual size of buffer - character(len=NBUF_MAX),pointer :: buffer ! hold the whole file? - character(len=LSZ), pointer :: this_line ! the current line - - integer :: next_line ! index for next line on buffer - - type(inpak90),pointer :: last - end type inpak90 - - integer,parameter :: MALLSIZE_=10 ! just an estimation - - character(len=*),parameter :: myname='MCT(MPEU)::m_inpak90' -!----------------------------------------------------------------------- - - integer,parameter :: i90_MXDEP = 4 - integer,save :: i90_depth = 0 - type(inpak90),save,pointer :: i90_now - -!----------------------------------------------------------------------- - contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: I90_allLoadF - populate a rooted database to all PEs -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine I90_allLoadF(fname,root,comm,istat) - use m_mpif90, only : MP_perr - use m_mpif90, only : MP_comm_rank - use m_mpif90, only : MP_CHARACTER - use m_mpif90, only : MP_INTEGER - use m_die, only : perr - implicit none - character(len=*),intent(in) :: fname - integer,intent(in) :: root - integer,intent(in) :: comm - integer,intent(out) :: istat - -! !REVISION HISTORY: -! 28Jul98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::I90_allLoadF' - integer :: myID,ier - - istat=0 - - call MP_comm_rank(comm,myID,ier) - if(ier/=0) then - call MP_perr(myname_,'MP_comm_rank()',ier) - istat=ier - return - endif - - if(myID == root) then - call i90_LoadF(fname,ier) - if(ier /= 0) then - call perr(myname_,'i90_LoadF("//trim(fname)//")',ier) - istat=ier - return - endif - else - call push_(ier) - if(ier /= 0) then - call perr(myname_,'push_()',ier) - istat=ier - return - endif - endif - - ! Initialize the buffer on all PEs - - call MPI_Bcast(i90_now%buffer,NBUF_MAX,MP_CHARACTER,root,comm,ier) - if(ier /= 0) then - call MP_perr(myname_,'MPI_Bcast(%buffer)',ier) - istat=ier - return - endif - - call MPI_Bcast(i90_now%nbuf,1,MP_INTEGER,root,comm,ier) - if(ier /= 0) then - call MP_perr(myname_,'MPI_Bcast(%nbuf)',ier) - istat=ier - return - endif - - i90_now%this_line=' ' - i90_now%next_line=0 - -end subroutine I90_allLoadF - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: push_ - push on a new layer of the internal file _i90_now_ -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine push_(ier) - use m_die, only : perr - use m_mall,only : mall_mci,mall_ci,mall_ison - implicit none - integer,intent(out) :: ier - -! !REVISION HISTORY: -! 05Aug98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::push_' - type(inpak90),pointer :: new - - if(i90_depth <= 0) nullify(i90_now) ! just an initialization - - ! Too many levels - - if(i90_depth >= i90_MXDEP) then - call perr(myname_,'(overflow)',i90_depth) - ier=1 - return - endif - - allocate(new,stat=ier) - if(ier /= 0) then - call perr(myname_,'allocate(new)',ier) - return - endif - - if(mall_ison()) call mall_ci(MALLSIZE_,myname) - - allocate(new%buffer,new%this_line,stat=ier) - if(ier /= 0) then - call perr(myname_,'allocate(new%..)',ier) - return - endif - - if(mall_ison()) then - call mall_mci(new%buffer,myname) - call mall_mci(new%this_line,myname) - endif - - new%last => i90_now - i90_now => new - nullify(new) - - i90_depth = i90_depth+1 -end subroutine push_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: pop_ - pop off a layer of the internal file _i90_now_ -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine pop_(ier) - use m_die, only : perr - use m_mall,only : mall_mco,mall_co,mall_ison - implicit none - integer,intent(out) :: ier - -! !REVISION HISTORY: -! 05Aug98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::pop_' - type(inpak90),pointer :: old - - if(i90_depth <= 0) then - call perr(myname_,'(underflow)',i90_depth) - ier=1 - return - endif - - old => i90_now%last - - if(mall_ison()) then - call mall_mco(i90_now%this_line,myname) - call mall_mco(i90_now%buffer,myname) - endif - - deallocate(i90_now%buffer,i90_now%this_line,stat=ier) - if(ier /= 0) then - call perr(myname_,'deallocate(new%..)',ier) - return - endif - - if(mall_ison()) call mall_co(MALLSIZE_,myname) - - deallocate(i90_now,stat=ier) - if(ier /= 0) then - call perr(myname_,'deallocate(new)',ier) - return - endif - - i90_now => old - nullify(old) - - i90_depth = i90_depth - 1 -end subroutine pop_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! -! !ROUTINE: I90_Release - deallocate memory used to load a resource file -! -! !INTERFACE: -! - subroutine I90_Release(stat) - use m_die,only : perr,die - implicit none - integer,optional, intent(out) :: stat -! -! !DESCRIPTION: -! -! I90_Release() is used to pair I90_LoadF() to release the memory -! used by I90_LoadF() for resourse data input. -! -! !SEE ALSO: -! -! !REVISION HISTORY: -! 03Jul96 - J. Guo - added to Arlindos inpak90 for its -! Fortran 90 revision. -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::i90_Release' - integer :: ier - - if(present(stat)) stat=0 - - call pop_(ier) - if(ier/=0) then - call perr(myname_,'pop_()',ier) - if(.not.present(stat)) call die(myname_) - stat=ier - return - endif - - end subroutine I90_Release - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: i90_fullRelease - releases the whole stack led by _i90_now_ -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine i90_fullRelease(ier) - use m_die,only : perr - implicit none - integer,intent(out) :: ier - -! !REVISION HISTORY: -! 05Aug98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::i90_fullRelease' - - do while(i90_depth > 0) - call pop_(ier) - if(ier /= 0) then - call perr(myname_,'pop_()',ier) - return - endif - end do - ier=0 - -end subroutine i90_fullRelease -!======================================================================= - subroutine I90_LoadF ( filen, iret ) - use m_ioutil, only : luavail,opntext,clstext - use m_die, only : perr - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_LoadF() --- Loads resource file into memory. -! -! !DESCRIPTION: -! -! Reads resource file, strips out comments, translate TABs into -! blanks, and loads the modified file contents into memory. -! Must be called only once for each resource file. -! -! !CALLING SEQUENCE: -! -! call i90_LoadF ( filen, iret ) -! -! !INPUT PARAMETERS: -! - character*(*) filen ! file name - -! !OUTPUT PARAMETERS: - - integer iret ! Return code: - ! 0 no error - ! -98 coult not get unit number - ! (strange!) - ! -98 talk to a wizzard - ! -99 out of memory: increase - ! NBUF_MAX in 'i90.h' - ! other iostat from open statement. -! -! !BUGS: -! -! It does not perform dynamic allocation, mostly to keep vanilla f77 -! compatibility. Overall amount of static memory is small (~100K -! for default NBUF_MAX = 400*256). -! -! !SEE ALSO: -! -! i90_label() selects a label (key) -! -! !FILES USED: -! -! File name supplied on input. The file is opened, read and then closed. -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - integer lu, ios, loop, ls, ptr - character*256 line - character(len=*), parameter :: myname_ = myname//'::i90_loadf' - - ! Check to make sure there is not too many levels - ! of the stacked resource files - - if(i90_depth >= i90_MXDEP) then - call perr(myname_,'(overflow)',i90_depth) - iret=1 - return - endif - -! Open file -! --------- -! lu = i90_lua() - - lu = luavail() ! a more portable version - if ( lu .lt. 0 ) then - iret = -97 - return - end if - - ! A open through an interface to avoid portability problems. - ! (J.G.) - - call opntext(lu,filen,'old',ios) - if ( ios .ne. 0 ) then - write(stderr,'(2a,i5)') myname_,': opntext() error, ios =',ios - iret = ios - return - end if - - ! Create a dynamic page to store the file. It might be expanded - ! to allocate memory on requests (a link list) (J.G.) - - ! Changed from page_() to push_(), to allow multiple (stacked) - ! inpak90 buffers. J.G. - - call push_(ios) ! to create buffer space - if ( ios .ne. 0 ) then - write(stderr,'(2a,i5)') myname_,': push_() error, ios =',ios - iret = ios - return - end if - -! Read to end of file -! ------------------- - i90_now%buffer(1:1) = EOL - ptr = 2 ! next buffer position - do loop = 1, NBUF_MAX - -! Read next line -! -------------- - read(lu,'(a)', end=11) line ! read next line - call i90_trim ( line ) ! remove trailing blanks - call i90_pad ( line ) ! Pad with # from end of line - -! A non-empty line -! ---------------- - ls = index(line,'#' ) - 1 ! line length - if ( ls .gt. 0 ) then - if ( (ptr+ls) .gt. NBUF_MAX ) then - iret = -99 - return - end if - i90_now%buffer(ptr:ptr+ls) = line(1:ls) // EOL - ptr = ptr + ls + 1 - end if - - end do - - iret = -98 ! good chance i90_now%buffer is not big enough - return - - 11 continue - -! All done -! -------- -! close(lu) - call clstext(lu,ios) - if(ios /= 0) then - iret=-99 - return - endif - i90_now%buffer(ptr:ptr) = EOB - i90_now%nbuf = ptr - i90_now%this_line=' ' - i90_now%next_line=0 - iret = 0 - - return - end subroutine I90_LoadF - - -!................................................................... - - subroutine i90_label ( label, iret ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_Label() --- Selects a label (record). -! -! !DESCRIPTION: -! -! Once the buffer has been loaded with {\tt i90\_loadf()}, this routine -! selects a given ``line'' (record/table) associated with ``label''. -! Think of ``label'' as a resource name or data base ``key''. -! -! !CALLING SEQUENCE: -! -! call i90_Label ( label, iret ) -! -! !INPUT PARAMETERS: -! - character(len=*),intent(in) :: label ! input label - -! !OUTPUT PARAMETERS: - - integer iret ! Return code: - ! 0 no error - ! -1 buffer not loaded - ! -2 could not find label -! -! !SEE ALSO: -! -! i90_loadf() load file into buffer -! i90_gtoken() get next token -! i90_gline() get next line (for tables) -! atof() convert word (string) to float -! atoi() convert word (string) to integer -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! 19Jan01 Jay Larson - introduced CHARACTER -! variable EOL_label, which is used to circumvent pgf90 -! problems with passing concatenated characters as an argument -! to a function. -! -!EOP -!------------------------------------------------------------------------- - - integer i, j - - character(len=(len(label)+len(EOL))) :: EOL_label - -! Make sure that a buffer is defined (JG) -! ---------------------------------- - if(i90_depth <= 0) then - iret = -1 - return - endif - -! Determine whether label exists -! ------------------------------ - EOL_label = EOL // label - i = index ( i90_now%buffer(1:i90_now%nbuf), EOL_label ) + 1 - if ( i .le. 1 ) then - i90_now%this_line = BLK // EOL - iret = -2 - return - end if - -! Extract the line associated with this label -! ------------------------------------------- - i = i + len ( label ) - j = i + index(i90_now%buffer(i:i90_now%nbuf),EOL) - 2 - i90_now%this_line = i90_now%buffer(i:j) // BLK // EOL - - i90_now%next_line = j + 2 - - iret = 0 - - return - end subroutine i90_label - -!................................................................... - - subroutine i90_gline ( iret ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_GLine() --- Selects next line. -! -! !DESCRIPTION: -! -! Selects next line, irrespective of of label. If the next line starts -! with :: (end of table mark), then it lets the user know. This sequential -! access of the buffer is useful to assess tables, a concept introduced -! in Inpak 77 by Jing Guo. A table is a construct like this: -! -! \begin{verbatim} -! my_table_name:: -! 1000 3000 263.0 -! 925 3000 263.0 -! 850 3000 263.0 -! 700 3000 269.0 -! 500 3000 287.0 -! 400 3000 295.8 -! 300 3000 295.8 -! :: -! \end{verbatim} -! -! To access this table, the user first must use {\tt i90\_label()} to -! locate the beginning of the table, e.g., -! -! \begin{verbatim} -! call i90_label ( 'my_table_name::', iret ) -! \end{verbatim} -! -! Subsequently, {\tt i90\_gline()} can be used to gain acess to each -! row of the table. Here is a code fragment to read the above -! table (7 rows, 3 columns): -! -! \begin{verbatim} -! real table(7,3) -! character*20 word -! integer iret -! call i90_label ( 'my_table_name::', iret ) -! do i = 1, 7 -! call i90_gline ( iret ) -! do j = 1, 3 -! table(i,j) = fltget ( 0. ) -! end do -! end do -! \end{verbatim} -! -! For simplicity we have assumed that the dimensions of table were -! known. It is relatively simple to infer the table dimensions -! by manipulating ``iret''. -! -! !CALLING SEQUENCE: -! -! call i90_gline ( iret ) -! -! !INPUT PARAMETERS: -! -! None. -! -! !OUTPUT PARAMETERS: -! - integer iret ! Return code: - ! 0 no error - ! -1 end of buffer reached - ! +1 end of table reached - -! !SEE ALSO: -! -! i90_label() selects a line (record/table) -! -! !REVISION HISTORY: -! -! 10feb95 Guo Wrote rdnext(), Inpak 77 extension. -! 19Jun96 da Silva Original code with functionality of rdnext() -! -!EOP -!------------------------------------------------------------------------- - - integer i, j - -! Make sure that a buffer is defined (JG) -! ---------------------------------- - if(i90_depth <= 0) then - iret = -1 - return - endif - - if ( i90_now%next_line .ge. i90_now%nbuf ) then - iret = -1 - return - end if - - i = i90_now%next_line - j = i + index(i90_now%buffer(i:i90_now%nbuf),EOL) - 2 - i90_now%this_line = i90_now%buffer(i:j) // BLK // EOL - - if ( i90_now%this_line(1:2) .eq. '::' ) then - iret = 1 ! end of table - i90_now%next_line = i90_now%nbuf + 1 - return - end if - - i90_now%next_line = j + 2 - iret = 0 - - return - end subroutine i90_gline - -!................................................................... - - subroutine i90_GToken ( token, iret ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_GToken() --- Gets next token. -! -! !DESCRIPTION: -! -! Get next token from current line. The current line is defined by a -! call to {\tt i90\_label()}. Tokens are sequences of characters (including -! blanks) which may be enclosed by single or double quotes. -! If no quotes are present, the token from the current position to the next -! blank of TAB is returned. -! -! {\em Examples of valid token:} -! -! \begin{verbatim} -! single_token "second token on line" -! "this is a token" -! 'Another example of a token' -! 'this is how you get a " inside a token' -! "this is how you get a ' inside a token" -! This is valid too # the line ends before the # -! \end{verbatim} -! The last line has 4 valid tokens: {\tt This, is, valid} and {\tt too}. -! -! {\em Invalid string constructs:} -! -! \begin{verbatim} -! cannot handle mixed quotes (i.e. single/double) -! 'escaping like this \' is not implemented' -! 'this # will not work because of the #' -! \end{verbatim} -! The \# character is reserved for comments and cannot be included -! inside quotation marks. -! -! !CALLING SEQUENCE: -! -! call i90_GToken ( token, iret ) -! -! !INPUT PARAMETERS: -! -! None. -! -! !OUTPUT PARAMETERS: -! - character*(*) token ! Next token from current line - integer iret ! Return code: - ! 0 no error - ! -1 either nothing left - ! on line or mismatched - ! quotation marks. - -! !BUGS: -! -! Standard Unix escaping is not implemented at the moment. -! -! -! !SEE ALSO: -! -! i90_label() selects a line (record/table) -! i90_gline() get next line (for tables) -! atof() convert word (string) to float -! atoi() convert word (string) to integer -! -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - character*1 ch - integer ib, ie - -! Make sure that a buffer is defined (JG) -! ---------------------------------- - if(i90_depth <= 0) then - iret = -1 - return - endif - - call i90_trim ( i90_now%this_line ) - - ch = i90_now%this_line(1:1) - if ( ch .eq. '"' .or. ch .eq. "'" ) then - ib = 2 - ie = index ( i90_now%this_line(ib:), ch ) - else - ib = 1 - ie = min(index(i90_now%this_line,BLK), & - index(i90_now%this_line,EOL)) - 1 - - end if - - if ( ie .lt. ib ) then - token = BLK - iret = -1 - return - else - ! Get the token, and shift the rest of %this_line to - ! the left - - token = i90_now%this_line(ib:ie) - i90_now%this_line = i90_now%this_line(ie+2:) - iret = 0 - end if - - return - end subroutine i90_gtoken -!................................................................... - subroutine i90_gstr ( string, iret ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -! -! !ROUTINE: I90\_GStr() -! -! !DESCRIPTION: -! -! Get next string from current line. The current line is defined by a -! call to {\tt i90\_label()}. Strings are sequence of characters (including -! blanks) enclosed by single or double quotes. If no quotes -! are present, the string from the current position to the end of -! the line is returned. -! -! NOTE: This routine is defined differently from \verb"i90_GTolen()", -! where a {\sl token} is white-space delimited, but this routine -! will try to fetch a string either terminated by a "$" or by the -! end of the line. -! -! {\em Examples of valid strings:} -! -! \begin{verbatim} -! "this is a string" -! 'Another example of string' -! 'this is how you get a " inside a string' -! "this is how you get a ' inside a string" -! This is valid too # the line ends before the # -! -! \end{verbatim} -! -! {\em Invalid string constructs:} -! -! \begin{verbatim} -! cannot handle mixed quotes -! 'escaping like this \' is not implemented' -! \end{verbatim} -! -! {\em Obsolete feature (for Inpak 77 compatibility):} -! -! \begin{verbatim} -! the string ends after a $ this is another string -! \end{verbatim} -! -! !CALLING SEQUENCE: -! -! \begin{verbatim} -! call i90_Gstr ( string, iret ) -! \end{verbatim} -! -! !INPUT PARAMETERS: -! - character*(*) string ! A NULL (char(0)) delimited string. - -! !OUTPUT PARAMETERS: -! - integer iret ! Return code: - ! 0 no error - ! -1 either nothing left - ! on line or mismatched - ! quotation marks. - -! !BUGS: -! -! Standard Unix escaping is not implemented at the moment. -! No way to tell sintax error from end of line (same iret). -! -! -! !SEE ALSO: -! -! i90_label() selects a line (record/table) -! i90_gtoken() get next token -! i90_gline() get next line (for tables) -! atof() convert word (string) to float -! atoi() convert word (string) to integer -! -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! 01Oct96 Jing Guo Removed the null terminitor -! -!------------------------------------------------------------------------- - - character*1 ch - integer ib, ie - -! Make sure that a buffer is defined (JG) -! ---------------------------------- - if(i90_depth <= 0) then - iret = -1 - return - endif - - call i90_trim ( i90_now%this_line ) - - ch = i90_now%this_line(1:1) - if ( ch .eq. '"' .or. ch .eq. "'" ) then - ib = 2 - ie = index ( i90_now%this_line(ib:), ch ) - else - ib = 1 - ie = index(i90_now%this_line,'$')-1 ! undocumented feature! - if ( ie .lt. 1 ) ie = index(i90_now%this_line,EOL)-2 - end if - - if ( ie .lt. ib ) then -! string = NULL - iret = -1 - return - else - string = i90_now%this_line(ib:ie) ! // NULL - i90_now%this_line = i90_now%this_line(ie+2:) - iret = 0 - end if - - return - end subroutine i90_gstr - -!................................................................... - - real(FP) function i90_GFloat( iret ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: i90_GFloat() --- Returns next float number. -! -! !DESCRIPTION: -! -! Returns next float (real number) from the current line. -! If an error occurs a zero value is returned. -! -! !CALLING SEQUENCE: -! -! real rnumber -! rnumber = i90_gfloat ( default ) -! -! !OUTPUT PARAMETERS: -! - integer,intent(out) :: iret ! Return code: - ! 0 no error - ! -1 either nothing left - ! on line or mismatched - ! quotation marks. - ! -2 parsing error - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - character*256 token - integer ios - real(FP) x - -! Make sure that a buffer is defined (JG) -! ---------------------------------- - if(i90_depth <= 0) then - iret = -1 - return - endif - - call i90_gtoken ( token, iret ) - if ( iret .eq. 0 ) then - read(token,*,iostat=ios) x ! Does it require an extension? - if ( ios .ne. 0 ) iret = -2 - end if - if ( iret .ne. 0 ) x = 0. - i90_GFloat = x - - return - end function i90_GFloat - -!................................................................... - - integer function I90_GInt ( iret ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_GInt() --- Returns next integer number. -! -! !DESCRIPTION: -! -! Returns next integer number from the current line. -! If an error occurs a zero value is returned. -! -! !CALLING SEQUENCE: -! -! integer number -! number = i90_gint ( default ) -! -! !OUTPUT PARAMETERS: -! - integer iret ! Return code: - ! 0 no error - ! -1 either nothing left - ! on line or mismatched - ! quotation marks. - ! -2 parsing error - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! 24may00 da Silva delcared x as real*8 in case this module is compiled -! with real*4 -! -!EOP -!------------------------------------------------------------------------- - - character*256 token - real(kind_r8) x - integer ios - -! Make sure that a buffer is defined (JG) -! ---------------------------------- - if(i90_depth <= 0) then - iret = -1 - return - endif - - call i90_gtoken ( token, iret ) - if ( iret .eq. 0 ) then - read(token,*,iostat=ios) x - if ( ios .ne. 0 ) iret = -2 - end if - if ( iret .ne. 0 ) x = 0 - i90_gint = nint(x) - - return - end function i90_gint - -!................................................................... - - real(FP) function i90_AtoF( string, iret ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: i90_AtoF() --- Translates ASCII (string) to float. -! -! !DESCRIPTION: -! -! Converts string to real number. Same as obsolete {\tt str2rn()}. -! -! !CALLING SEQUENCE: -! -! real rnumber -! rnumber = i90_atof ( string, iret ) -! -! !INPUT PARAMETERS: -! - character(len=*),intent(in) :: string ! a string - -! !OUTPUT PARAMETERS: -! - integer,intent(out) :: iret ! Return code: - ! 0 no error - ! -1 could not convert, probably - ! string is not a number - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - read(string,*,end=11,err=11) i90_AtoF - iret = 0 - return - 11 iret = -1 - return - end function i90_AtoF - -!................................................................... - - integer function i90_atoi ( string, iret ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_AtoI() --- Translates ASCII (strings) to integer. -! -! !DESCRIPTION: -! -! Converts string to integer number. -! -! !CALLING SEQUENCE: -! -! integer number -! number = i90_atoi ( string, iret ) -! -! !INPUT PARAMETERS: -! - character*(*) string ! a string - -! !OUTPUT PARAMETERS: -! - integer iret ! Return code: - ! 0 no error - ! -1 could not convert, probably - ! string is not a number - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - read(string,*,end=11,err=11) i90_atoi - iret = 0 - return - 11 iret = -1 - return - end function i90_atoi - -!................................................................... - - integer function i90_Len ( string ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_Len() --- Returns length of string. -! -! !DESCRIPTION: -! -! Returns the length of a string excluding trailing blanks. -! It follows that -! \begin{verbatim} -! i90_len(string) .le. len(string), -! \end{verbatim} -! where {\tt len} is the intrinsic string length function. -! Example: -! \begin{verbatim} -! ls = len('abc ') ! results in ls = 5 -! ls = i90_len ('abc ') ! results in ls = 3 -! \end{verbatim} -! -! !CALLING SEQUENCE: -! -! integer ls -! ls = i90_len ( string ) -! -! !INPUT PARAMETERS: -! - character*(*) string ! a string -! -! !OUTPUT PARAMETERS: -! -! The length of the string, excluding trailing blanks. -! -! !REVISION HISTORY: -! -! 01Apr94 Guo Original code (a.k.a. luavail()) -! 19Jun96 da Silva Minor modification + prologue. -! -!EOP -!------------------------------------------------------------------------- - - integer ls, i, l - ls = len(string) - do i = ls, 1, -1 - l = i - if ( string(i:i) .ne. BLK ) go to 11 - end do - l = l - 1 - 11 continue - i90_len = l - return - end function i90_len - -!................................................................... - - integer function I90_Lua() - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_Lua() --- Returns available logical unit number. -! -! !DESCRIPTION: -! -! Look for an available (not opened) Fortran logical unit for i/o. -! -! !CALLING SEQUENCE: -! -! integer lu -! lu = i90_lua() -! -! !INPUT PARAMETERS: -! -! None. -! -! !OUTPUT PARAMETERS: -! -! The desired unit number if positive, -1 if unsucessful. -! -! !REVISION HISTORY: -! -! 01Apr94 Guo Original code (a.k.a. luavail()) -! 19Jun96 da Silva Minor modification + prologue. -! -!EOP -!------------------------------------------------------------------------- - - - integer lu,ios - logical opnd - lu=7 - inquire(unit=lu,opened=opnd,iostat=ios) - do while(ios.eq.0.and.opnd) - lu=lu+1 - inquire(unit=lu,opened=opnd,iostat=ios) - end do - if(ios.ne.0) lu=-1 - i90_lua=lu - return - end function i90_lua - -!................................................................... - - subroutine i90_pad ( string ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_Pad() --- Pad strings. -! -! !DESCRIPTION: -! -! Pads from the right with the comment character (\#). It also -! replaces TABs with blanks for convenience. This is a low level -! i90 routine. -! -! !CALLING SEQUENCE: -! -! call i90_pad ( string ) -! -! !INPUT PARAMETERS: -! - character*256 string ! input string - -! !OUTPUT PARAMETERS: ! modified string -! -! character*256 string -! -! !BUGS: -! -! It alters TABs even inside strings. -! -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - integer i - -! Pad end of string with # -! ------------------------ - do i = 256, 1, -1 - if ( string(i:i) .ne. ' ' .and. & - string(i:i) .ne. '$' ) go to 11 - string(i:i) = '#' - end do - 11 continue - -! Replace TABs with blanks -! ------------------------- - do i = 1, 256 - if ( string(i:i) .eq. TAB ) string(i:i) = BLK - if ( string(i:i) .eq. '#' ) go to 21 - end do - 21 continue - - return - end subroutine i90_pad - -!................................................................... - - subroutine I90_Trim ( string ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: I90_Trim() - Removes leading blanks from strings. -! -! !DESCRIPTION: -! -! Removes blanks and TABS from begenning of string. -! This is a low level i90 routine. -! -! !CALLING SEQUENCE: -! -! call i90_Trim ( string ) -! -! !INPUT PARAMETERS: -! - character*256 string ! the input string -! -! !OUTPUT PARAMETERS: -! -! character*256 string ! the modified string -! -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - integer ib, i - -! Get rid of leading blanks -! ------------------------- - ib = 1 - do i = 1, 255 - if ( string(i:i) .ne. ' ' .and. & - string(i:i) .ne. TAB ) go to 21 - ib = ib + 1 - end do - 21 continue - -! String without trailling blanks -! ------------------------------- - string = string(ib:) - - return - end subroutine i90_trim - - -!========================================================================== - - -! ----------------------------- -! Inpak 77 Upward Compatibility -! ----------------------------- - - - subroutine lablin ( label ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: Lablin() --- Selects a Label (Inpak 77) -! -! !DESCRIPTION: -! -! Selects a given ``line'' (record/table) associated with ``label''. -! Similar to {\tt i90\_label()}, but prints a message to {\tt stdout} -! if it cannot locate the label. Kept for Inpak 77 upward compatibility. -! -! !CALLING SEQUENCE: -! -! call lablin ( label ) -! -! !INPUT PARAMETERS: - - character(len=*),intent(in) :: label ! string with label name -! -! !OUTPUT PARAMETERS: -! -! None. -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - integer iret - - call i90_label ( label, iret ) - if ( iret .ne. 0 ) then - write(stderr,'(2a)') 'i90/lablin: cannot find label ', label - endif - - end subroutine lablin - -!................................................................... - - real(SP) function fltgetsp ( default ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: FltGetsp() --- Returns next float (Inpak 77, single precision) -! -! !DESCRIPTION: -! -! Returns next float (real number, single precision) from the current -! line, or a default value if it fails to obtain the desired number. -! Kept for Inpak 77 upward compatibility. -! -! !CALLING SEQUENCE: -! -! real rnumber, default -! rnumber = fltgetsp ( default ) -! -! !INPUT PARAMETERS: -! - real(SP), intent(IN) :: default ! default value. - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! 12Oct99 Guo/Larson - Built from original FltGet() function. -! -!EOP -!------------------------------------------------------------------------- - - character*256 token - real(FP) x - integer iret - - call i90_gtoken ( token, iret ) - if ( iret .eq. 0 ) then - read(token,*,iostat=iret) x - end if - if ( iret .ne. 0 ) x = default - !print *, x - fltgetsp = x - - return - end function fltgetsp - -!................................................................... - - real(DP) function fltgetdp ( default ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: FltGetdp() --- Returns next float (Inpak 77) -! -! !DESCRIPTION: -! -! Returns next float (real number) from the current line, or a -! default value (double precision) if it fails to obtain the desired -! number. Kept for Inpak 77 upward compatibility. -! -! !CALLING SEQUENCE: -! -! real(DP) :: default -! real :: rnumber -! rnumber = FltGetdp(default) -! -! !INPUT PARAMETERS: -! - real(DP), intent(IN) :: default ! default value. - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! 12Oct99 Guo/Larson - Built from original FltGet() function. -! -!EOP -!------------------------------------------------------------------------- - - character*256 token - real(FP) x - integer iret - - call i90_gtoken ( token, iret ) - if ( iret .eq. 0 ) then - read(token,*,iostat=iret) x - end if - if ( iret .ne. 0 ) x = default - !print *, x - fltgetdp = x - - return - end function fltgetdp - -!................................................................... - - integer function intget ( default ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: IntGet() --- Returns next integer (Inpak 77). -! -! !DESCRIPTION: -! -! Returns next integer number from the current line, or a default -! value if it fails to obtain the desired number. -! Kept for Inpak 77 upward compatibility. -! -! !CALLING SEQUENCE: -! -! integer number, default -! number = intget ( default ) -! -! !INPUT PARAMETERS: -! - integer default ! default value. - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - character*256 token - real(FP) x - integer iret - - call i90_gtoken ( token, iret ) - if ( iret .eq. 0 ) then - read(token,*,iostat=iret) x - end if - if ( iret .ne. 0 ) x = default - intget = nint(x) - !print *, intget - - return - end function intget - -!................................................................... - - character(len=1) function chrget ( default ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: ChrGet() --- Returns next character (Inpak 77). -! -! !DESCRIPTION: -! -! Returns next non-blank character from the current line, or a default -! character if it fails for whatever reason. -! Kept for Inpak 77 upward compatibility. -! -! !CALLING SEQUENCE: -! -! character*1 ch, default -! ch = chrget ( default ) -! -! !INPUT PARAMETERS: -! - character*1 default ! default value. - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - character*256 token - integer iret - - call i90_gtoken ( token, iret ) - if ( iret .ne. 0 ) then - chrget = default - else - chrget = token(1:1) - end if - !print *, chrget - - return - end function chrget - -!................................................................... - - subroutine TokGet ( token, default ) - - implicit NONE - - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -!BOP -! -! !ROUTINE: TokGet() --- Gets next token (Inpakk 77 like). -! -! !DESCRIPTION: -! -! Returns next token from the current line, or a default -! word if it fails for whatever reason. -! -! !CALLING SEQUENCE: -! -! call TokGet ( token, default ) -! -! !INPUT PARAMETERS: -! - character*(*) default ! default token - -! !OUTPUT PARAMETERS: -! - character*(*) token ! desired token -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! -!EOP -!------------------------------------------------------------------------- - - integer iret - - call i90_GToken ( token, iret ) - if ( iret .ne. 0 ) then - token = default - end if - !print *, token - - return - end subroutine tokget - -!==================================================================== - -! -------------------------- -! Obsolete Inpak 77 Routines -! (Not Documented) -! -------------------------- - -!................................................................... - - subroutine iniin() - print *, & - 'i90: iniin() is obsolete, use i90_loadf() instead!' - return - end subroutine iniin - - -!................................................................... - - subroutine iunits ( mifans, moftrm, moferr, miftrm ) - integer mifans, moftrm, moferr, miftrm - print *, & - 'i90: iunits() is obsolete, use i90_loadf() instead!' - return - end subroutine iunits - -!................................................................... - - subroutine getstr ( iret, string ) - implicit NONE - character*(*) string - integer iret !, ls - call i90_gstr ( string, iret ) - return - end subroutine getstr - -!................................................................... - - subroutine getwrd ( iret, word ) - implicit NONE - character*(*) word - integer iret - call i90_gtoken ( word, iret ) - return - end subroutine getwrd - -!................................................................... - - subroutine rdnext ( iret ) - implicit NONE - integer iret - call i90_gline ( iret ) - return - end subroutine rdnext - -!................................................................... - - real(FP) function str2rn ( string, iret ) - implicit NONE - character*(*) string - integer iret - read(string,*,end=11,err=11) str2rn - iret = 0 - return - 11 iret = 1 - return - end function str2rn - -!................................................................... - - subroutine strget ( string, default ) - - implicit NONE - -!------------------------------------------------------------------------- -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!------------------------------------------------------------------------- -! -! !ROUTINE: StrGet() -! -! !DESCRIPTION: -! -! Returns next string on the current line, or a default -! string if it fails for whatever reason. Similar to {\tt i90\_gstr()}. -! Kept for Inpak 77 upward compatibility. -! -! NOTE: This is an obsolete routine. The notion of "string" used -! here is not conventional. Please use routine {\tt TokGet()} -! instead. -! -! !CALLING SEQUENCE: -! -! call strget ( string, default ) -! -! !INPUT PARAMETERS: -! - character*(*) default ! default string - -! !OUTPUT PARAMETERS: - - character*(*) string ! desired string - -! -! !REVISION HISTORY: -! -! 19Jun96 da Silva Original code. -! 01Oct96 Jing Guo Removed the null terminitor -! -!------------------------------------------------------------------------- - - integer iret - - call i90_gstr ( string, iret ) - if ( iret .ne. 0 ) then - string = default - end if - - return - end subroutine strget - - -end module m_inpak90 diff --git a/src/externals/mct/mpeu/m_ioutil.F90 b/src/externals/mct/mpeu/m_ioutil.F90 deleted file mode 100644 index 94cce456a7a..00000000000 --- a/src/externals/mct/mpeu/m_ioutil.F90 +++ /dev/null @@ -1,439 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_ioutil - a F90 module for several convenient I/O functions -! -! !DESCRIPTION: -! -! m\_ioutil is a module containing several portable interfaces for -! some highly system dependent, but frequently used I/O functions. -! -! !INTERFACE: - - module m_ioutil - implicit none - private ! except - - public :: opntext,clstext ! open/close a text file - public :: opnieee,clsieee ! open/close a binary sequential file - public :: luavail ! return a free logical unit - public :: luflush ! flush the buffer of a given unit - !public :: MX_LU - -! !REVISION HISTORY: -! 16Jul96 - J. Guo - (to do) -! 02Apr97 - Jing Guo - finished the coding -! 11Feb97 - Jing Guo - added luflush() -! 08Nov01 - Jace A Mogill FORTRAN only defines -! 99 units, three units below unit 10 are often used for -! stdin, stdout, and stderr. Be far more conservative -! and stay within FORTRAN standard. -! -!EOP -!_______________________________________________________________________ - - character(len=*),parameter :: myname="MCT(MPEU)::m_ioutil" - integer,parameter :: MX_LU=99 - -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: opnieee - portablly open an IEEE format file -! -! !DESCRIPTION: -! -! Open a file in IEEE format. -! -! IEEE format is refered as a FORTRAN "unformatted" file with -! "sequantial" access and variable record lengths. Under common -! Unix, it is only a file with records packed with a leading 4- -! byte word and a trailing 4-byte word indicating the size of -! the record in bytes. However, under UNICOS, it is also assumed -! to have numerical data representations represented according to -! the IEEE standard corresponding KIND conversions. Under a DEC -! machine, it means that compilations of the source code should -! have the "-bigendian" option specified. -! -! !INTERFACE: - - subroutine opnieee(lu,fname,status,ier,recl) - use m_stdio,only : stderr - implicit none - - integer, intent(in) :: lu ! logical unit number - character(len=*),intent(in) :: fname ! filename to be opended - character(len=*),intent(in) :: status ! the value for STATUS= - integer, intent(out):: ier ! the status - integer,optional,intent(in) :: recl ! record length - -! !REVISION HISTORY: -! 02Feb95 - Jing G. - First version included in PSAS. It is not -! used in the libpsas.a calls, since no binary data input/ -! output is to be handled. -! -! 09Oct96 - J. Guo - Check for any previous assign() call under -! UNICOS. -!EOP -!_______________________________________________________________________ - -#ifdef _UNICOS - character(len=128) :: attr -#endif - - ! local parameter - character(len=*),parameter :: myname_=myname//'::opnieee' - - integer,parameter :: iA=ichar('a') - integer,parameter :: mA=ichar('A') - integer,parameter :: iZ=ichar('z') - - logical :: direct - character(len=16) :: clen - character(len=len(status)) :: Ustat - integer :: i,ic - -! Work-around for absoft 9.0 f90, which has trouble understanding that -! ier is an output argument from the write() call below. - - ier = 0 - - direct=.false. - if(present(recl)) then - if(recl<0) then - clen='****************' - write(clen,'(i16)',iostat=ier) recl - write(stderr,'(3a)') myname_, & - ': invalid recl, ',trim(adjustl(clen)) - ier=-1 - return - endif - direct = recl>0 - endif - -#ifdef _UNICOS - call asnqunit(lu,attr,ier) ! test the unit - - if(ier.eq.-1) then ! the unit is not used - if(direct) then - call asnunit(lu,'-N ieee -F null',ier) - else - call asnunit(lu,'-N ieee -F f77',ier) - endif - ier=0 - - elseif(ier.ge.0) then ! the unit is already assigned - ier=-1 - endif - if(ier.ne.0) return -#endif - - do i=1,len(status) - ic=ichar(status(i:i)) - if(ic >= iA .and. ic <= iZ) ic=ic+(mA-iA) - Ustat(i:i)=char(ic) - end do - - select case(Ustat) - - case ('APPEND') - - if(direct) then - write(stderr,'(2a)') myname_, & - ': invalid arguments, (status=="APPEND",recl>0)' - ier=1 - return - endif - - open( & - unit =lu, & - file =fname, & - form ='unformatted', & - access ='sequential', & - status ='unknown', & - position ='append', & - iostat =ier ) - - case default - - if(direct) then - open( & - unit =lu, & - file =fname, & - form ='unformatted', & - access ='direct', & - status =status, & - recl =recl, & - iostat =ier ) - - else - open( & - unit =lu, & - file =fname, & - form ='unformatted', & - access ='sequential', & - status =status, & - position ='asis', & - iostat =ier ) - endif - - end select - - end subroutine opnieee -!----------------------------------------------------------------------- -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: clsieee - Close a logical unit opened by opnieee() -! -! !DESCRIPTION: -! -! The reason for a paired clsieee() for opnieee() instead of a -! simple close(), is for the portability reason. For example, -! under UNICOS, special system calls may be need to set up the -! unit right, and the status of the unit should be restored upon -! close. -! -! !INTERFACE: - - subroutine clsieee(lu,ier) - implicit none - integer, intent(in) :: lu ! the unit used by opnieee() - integer, intent(out) :: ier ! the status - -! !REVISION HISTORY: -! 10Oct96 - J. Guo - (to do) -!EOP -!_______________________________________________________________________ - close(lu,iostat=ier) -#ifdef _UNICOS - if(ier==0) call asnunit(lu,'-R',ier) ! remove attributes -#endif - - end subroutine clsieee - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: opntext - portablly open a text file -! -! !DESCRIPTION: -! -! Open a text (ASCII) file. Under FORTRAN, it is defined as -! "formatted" with "sequential" access. -! -! !INTERFACE: - - subroutine opntext(lu,fname,status,ier) - implicit none - - integer, intent(in) :: lu ! logical unit number - character(len=*),intent(in) :: fname ! filename to be opended - character(len=*),intent(in) :: status ! the value for STATUS=<> - integer, intent(out):: ier ! the status - - -! !REVISION HISTORY: -! -! 02Feb95 - Jing G. - First version included in PSAS and libpsas.a -! 09Oct96 - J. Guo - modified to allow assign() call under UNICOS -! = and now, it is a module in Fortran 90. -!EOP -!_______________________________________________________________________ - - ! local parameter - character(len=*),parameter :: myname_=myname//'::opntext' - - integer,parameter :: iA=ichar('a') - integer,parameter :: mA=ichar('A') - integer,parameter :: iZ=ichar('z') - - character(len=len(status)) :: Ustat - integer :: i,ic - -#ifdef _UNICOS - call asnunit(lu,'-R',ier) ! remove any set attributes - if(ier.ne.0) return ! let the parent handle it -#endif - - do i=1,len(status) - ic=ichar(status(i:i)) - if(ic >= iA .and. ic <= iZ) ic=ic+(mA-iA) - Ustat(i:i)=char(ic) - end do - - select case(Ustat) - - case ('APPEND') - - open( & - unit =lu, & - file =fname, & - form ='formatted', & - access ='sequential', & - status ='unknown', & - position ='append', & - iostat =ier ) - - case default - - open( & - unit =lu, & - file =fname, & - form ='formatted', & - access ='sequential', & - status =status, & - position ='asis', & - iostat =ier ) - - end select - - end subroutine opntext - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: clstext - close a text file opend with an opntext() call -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine clstext(lu,ier) - implicit none - - integer, intent(in) :: lu ! a logical unit to close - integer, intent(out) :: ier ! the status - -! !REVISION HISTORY: -! 09Oct96 - J. Guo - (to do) -!EOP -!_______________________________________________________________________ - - close(lu,iostat=ier) -#ifdef _UNICOS - if(ier == 0) call asnunit(lu,'-R',ier) ! remove any attributes -#endif - - end subroutine clstext - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: luavail - locate the next available unit -! -! !DESCRIPTION: -! -! luavail() Look for an available (not opened and not statically -! assigned to any I/O attributes to) logical unit. -! -! !INTERFACE: - - function luavail() - use m_stdio - implicit none - integer :: luavail ! result - -! !REVISION HISTORY: -! 23Apr98 - Jing Guo - new prototype/prolog/code -! - with additional unit constraints for SunOS. -! -! : Jing Guo, [09-Oct-96] -! + Checking also Cray assign() attributes, with some -! changes to the code. See also other routines. -! -! : Jing Guo, [01-Apr-94] -! + Initial code. -! 2001-11-08 - Jace A Mogill clean up -! logic for finding lu. -! -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::luavail' - - integer lu,ios - logical inuse - - lu=10 - ios=0 - inuse=.true. - - do while(ios.eq.0 .and. inuse .and. lu.le.MX_LU) - lu=lu+1 - inquire(unit=lu,opened=inuse,iostat=ios) - end do - - if(ios.ne.0) lu=-1 - luavail=lu -end function luavail - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: luflush - a uniform interface of system flush() -! -! !DESCRIPTION: -! -! Flush() calls available on many systems are often implementation -! dependent. This subroutine provides a uniform interface. It -! also ignores invalid logical unit value. -! -! !INTERFACE: - - subroutine luflush(unit) - use m_stdio, only : stdout -#ifdef CPRNAG - use F90_UNIX_IO,only : flush -#endif - implicit none - integer,optional,intent(in) :: unit - -! !REVISION HISTORY: -! 13Mar98 - Jing Guo - initial prototype/prolog/code -! 08Jul02 - E. Ong - added flush support for nag95 -! 2001-11-08 Jace A Mogill - Flush is not part of -! the F90 standard. Default is NO unit flush. -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::luflush' - - integer :: ier - integer :: lu - - ! Which logical unit number? - - lu=stdout - if(present(unit)) lu=unit - if(lu < 0) return - - ! The following call may be system dependent. - -#if SYSIRIX64 || CPRNAG || SYSUNICOS - call flush(lu,ier) -#elif SYSAIX || CPRXLF - call flush_(lu) ! Function defined in xlf reference document. -#elif SYSLINUX || SYSOSF1 || SYSSUNOS || SYST3E || SYSUNIXSYSTEMV || SYSSUPERUX - call flush(lu) -#endif - -end subroutine luflush -!----------------------------------------------------------------------- -end module m_ioutil -!. diff --git a/src/externals/mct/mpeu/m_mall.F90 b/src/externals/mct/mpeu/m_mall.F90 deleted file mode 100644 index 416538a4ced..00000000000 --- a/src/externals/mct/mpeu/m_mall.F90 +++ /dev/null @@ -1,1669 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_mall - A bookkeeper of user allocated memories -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_mall - implicit none - private ! except - - public :: mall_ci - public :: mall_co - public :: mall_mci - public :: mall_mco - public :: mall_flush - public :: mall_reset - - ! mall_ activity controls - - public :: mall_ison - public :: mall_set - - interface mall_ci; module procedure ci_; end interface - interface mall_co; module procedure co_; end interface - - interface mall_mci; module procedure & - ciI0_, & - ciI1_, & - ciI2_, & - ciI3_, & - ciR0_, & - ciR1_, & - ciR2_, & - ciR3_, & - ciD0_, & - ciD1_, & - ciD2_, & - ciD3_, & - ciL0_, & - ciL1_, & - ciL2_, & - ciL3_, & - ciC0_, & - ciC1_, & - ciC2_, & - ciC3_ - end interface - - interface mall_mco; module procedure & - coI0_, & - coI1_, & - coI2_, & - coI3_, & - coR0_, & - coR1_, & - coR2_, & - coR3_, & - coD0_, & - coD1_, & - coD2_, & - coD3_, & - coL0_, & - coL1_, & - coL2_, & - coL3_, & - coC0_, & - coC1_, & - coC2_, & - coC3_ - end interface - - interface mall_flush; module procedure flush_; end interface - interface mall_reset; module procedure reset_; end interface - - interface mall_ison; module procedure ison_; end interface - interface mall_set; module procedure set_; end interface - -! !REVISION HISTORY: -! 13Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname='MCT(MPEU)::m_mall' - -#if SYSUNICOS || SYSIRIX64 || _R8_ - integer,parameter :: NBYTE_PER_WORD = 8 -#else - integer,parameter :: NBYTE_PER_WORD = 4 -#endif - - integer,parameter :: NSZ= 32 - integer,parameter :: MXL=250 - - integer, save :: nreset = 0 ! number of reset_() calls - logical, save :: started = .false. ! the module is in use - - integer, save :: n_ =0 ! number of accouting bins. - character(len=NSZ),dimension(MXL),save :: name_ - - ! integer, dimension(1) :: mall - ! names of the accouting bins - - logical,save :: mall_on=.false. ! mall activity switch - - integer,save :: mci - integer,dimension(MXL),save :: mci_ ! maximum ci_() calls - integer,save :: nci - integer,dimension(MXL),save :: nci_ ! net ci_() calls - integer,save :: hwm - integer,dimension(MXL),save :: hwm_ ! high-water-mark of allocate() - integer,save :: nwm - integer,dimension(MXL),save :: nwm_ ! net-water-mark of allocate() - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ison_ - -! -! !DESCRIPTION: -! -! !INTERFACE: - - function ison_() - implicit none - logical :: ison_ - -! !REVISION HISTORY: -! 25Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ison_' - - ison_=mall_on - -end function ison_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: set_ - set the switch on -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine set_(on) - implicit none - logical,optional,intent(in) :: on - -! !REVISION HISTORY: -! 25Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::set_' - - mall_on=.true. - if(present(on)) mall_on=on - -end subroutine set_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciI0_ - check in as an integer scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciI0_(marg,thread) - implicit none - integer,intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciI0_' - - if(mall_on) call ci_(1,thread) - -end subroutine ciI0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciI1_ - check in as an integer rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciI1_(marg,thread) - implicit none - integer,dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciI1_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciI1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciI2_ - check in as an integer rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciI2_(marg,thread) - implicit none - integer,dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciI2_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciI2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciI3_ - check in as an integer rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciI3_(marg,thread) - implicit none - integer,dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciI3_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciI3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciR0_ - check in as a real(SP) scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciR0_(marg,thread) - use m_realkinds, only : SP - implicit none - real(SP),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciR0_' - - if(mall_on) call ci_(1,thread) - -end subroutine ciR0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciR1_ - check in as a real(SP) rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciR1_(marg,thread) - use m_realkinds, only : SP - implicit none - real(SP),dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciR1_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciR1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciR2_ - check in as a real(SP) rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciR2_(marg,thread) - use m_realkinds, only : SP - implicit none - real(SP),dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciR2_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciR2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciR3_ - check in as a real(SP) rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciR3_(marg,thread) - use m_realkinds, only : SP - implicit none - real(SP),dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciR3_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciR3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciD0_ - check in as a real(DP) scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciD0_(marg,thread) - use m_realkinds, only : DP - implicit none - real(DP),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciD0_' - - if(mall_on) call ci_(2,thread) - -end subroutine ciD0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciD1_ - check in as a real(DP) rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciD1_(marg,thread) - use m_realkinds, only : DP - implicit none - real(DP),dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciD1_' - - if(mall_on) call ci_(2*size(marg),thread) - -end subroutine ciD1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciD2_ - check in as a real(DP) rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciD2_(marg,thread) - use m_realkinds, only : DP - implicit none - real(DP),dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciD2_' - - if(mall_on) call ci_(2*size(marg),thread) - -end subroutine ciD2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciD3_ - check in as a real(DP) rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciD3_(marg,thread) - use m_realkinds, only : DP - implicit none - real(DP),dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciD3_' - - if(mall_on) call ci_(2*size(marg),thread) - -end subroutine ciD3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciL0_ - check in as a logical scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciL0_(marg,thread) - implicit none - logical,intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciL0_' - - if(mall_on) call ci_(1,thread) - -end subroutine ciL0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciL1_ - check in as a logical rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciL1_(marg,thread) - implicit none - logical,dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciL1_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciL1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciL2_ - check in as a logical rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciL2_(marg,thread) - implicit none - logical,dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciL2_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciL2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciL3_ - check in as a logical rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciL3_(marg,thread) - implicit none - logical,dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciL3_' - - if(mall_on) call ci_(size(marg),thread) - -end subroutine ciL3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciC0_ - check in as a character scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciC0_(marg,thread) - implicit none - character(len=*),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciC0_' - integer :: nw - - if(.not.mall_on) return - nw=(len(marg)+NBYTE_PER_WORD-1)/NBYTE_PER_WORD - call ci_(nw,thread) - -end subroutine ciC0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciC1_ - check in as a character rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciC1_(marg,thread) - implicit none - character(len=*),dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciC1_' - integer :: nw - - if(.not.mall_on) return - nw=(len(marg(1))+NBYTE_PER_WORD-1)/NBYTE_PER_WORD - call ci_(size(marg)*nw,thread) - -end subroutine ciC1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciC2_ - check in as a character rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciC2_(marg,thread) - implicit none - character(len=*),dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciC2_' - integer :: nw - - if(.not.mall_on) return - nw=(len(marg(1,1))+NBYTE_PER_WORD-1)/NBYTE_PER_WORD - call ci_(size(marg)*nw,thread) - -end subroutine ciC2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ciC3_ - check in as a character rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ciC3_(marg,thread) - implicit none - character(len=*),dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ciC3_' - integer :: nw - - if(.not.mall_on) return - nw=(len(marg(1,1,1))+NBYTE_PER_WORD-1)/NBYTE_PER_WORD - call ci_(size(marg)*nw,thread) - -end subroutine ciC3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: ci_ - check-in allocate activity -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ci_(nword,thread) - use m_stdio, only : stderr - use m_die, only : die - implicit none - integer,intent(in) :: nword - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 13Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::ci_' - integer :: ith - - if(.not.mall_on) return - - if(nword < 0) then - write(stderr,'(2a,i4)') myname_, & - ': invalide argument, nword = ',nword - call die(myname_) - endif - - ith=lookup_(thread) - - ! update the account - - nci_(ith)=nci_(ith)+1 - mci_(ith)=mci_(ith)+1 - nwm_(ith)=nwm_(ith)+nword - if(hwm_(ith).lt.nwm_(ith)) hwm_(ith)=nwm_(ith) - - ! update the total budget - - nci=nci+1 - mci=mci+1 - nwm=nwm+nword - if(hwm.lt.nwm) hwm=nwm - -end subroutine ci_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coI0_ - check in as an integer scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coI0_(marg,thread) - implicit none - integer,intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coI0_' - - if(mall_on) call co_(1,thread) - -end subroutine coI0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coI1_ - check in as an integer rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coI1_(marg,thread) - implicit none - integer,dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coI1_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coI1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coI2_ - check in as an integer rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coI2_(marg,thread) - implicit none - integer,dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coI2_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coI2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coI3_ - check in as an integer rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coI3_(marg,thread) - implicit none - integer,dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coI3_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coI3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coR0_ - check in as a real(SP) scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coR0_(marg,thread) - use m_realkinds, only : SP - implicit none - real(SP),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coR0_' - - if(mall_on) call co_(1,thread) - -end subroutine coR0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coR1_ - check in as a real(SP) rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coR1_(marg,thread) - use m_realkinds, only : SP - implicit none - real(SP),dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coR1_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coR1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coR2_ - check in as a real(SP) rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coR2_(marg,thread) - use m_realkinds, only : SP - implicit none - real(SP),dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coR2_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coR2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coR3_ - check in as a real(SP) rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coR3_(marg,thread) - use m_realkinds, only : SP - implicit none - real(SP),dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coR3_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coR3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coD0_ - check in as a real(DP) scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coD0_(marg,thread) - use m_realkinds, only : DP - implicit none - real(DP),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coD0_' - - if(mall_on) call co_(2,thread) - -end subroutine coD0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coD1_ - check in as a real(DP) rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coD1_(marg,thread) - use m_realkinds, only : DP - implicit none - real(DP),dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coD1_' - - if(mall_on) call co_(2*size(marg),thread) - -end subroutine coD1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coD2_ - check in as a real(DP) rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coD2_(marg,thread) - use m_realkinds, only : DP - implicit none - real(DP),dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coD2_' - - if(mall_on) call co_(2*size(marg),thread) - -end subroutine coD2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coD3_ - check in as a real(DP) rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coD3_(marg,thread) - use m_realkinds, only : DP - implicit none - real(DP),dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coD3_' - - if(mall_on) call co_(2*size(marg),thread) - -end subroutine coD3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coL0_ - check in as a logical scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coL0_(marg,thread) - implicit none - logical,intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coL0_' - - if(mall_on) call co_(1,thread) - -end subroutine coL0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coL1_ - check in as a logical rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coL1_(marg,thread) - implicit none - logical,dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coL1_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coL1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coL2_ - check in as a logical rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coL2_(marg,thread) - implicit none - logical,dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coL2_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coL2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coL3_ - check in as a logical rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coL3_(marg,thread) - implicit none - logical,dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coL3_' - - if(mall_on) call co_(size(marg),thread) - -end subroutine coL3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coC0_ - check in as a character scalar -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coC0_(marg,thread) - implicit none - character(len=*),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coC0_' - integer :: nw - - if(.not.mall_on) return - nw=(len(marg)+NBYTE_PER_WORD-1)/NBYTE_PER_WORD - call co_(nw,thread) - -end subroutine coC0_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coC1_ - check in as a character rank 1 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coC1_(marg,thread) - implicit none - character(len=*),dimension(:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coC1_' - integer :: nw - - if(.not.mall_on) return - nw=(len(marg(1))+NBYTE_PER_WORD-1)/NBYTE_PER_WORD - call co_(size(marg)*nw,thread) - -end subroutine coC1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coC2_ - check in as a character rank 2 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coC2_(marg,thread) - implicit none - character(len=*),dimension(:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coC2_' - integer :: nw - - if(.not.mall_on) return - nw=(len(marg(1,1))+NBYTE_PER_WORD-1)/NBYTE_PER_WORD - call co_(size(marg)*nw,thread) - -end subroutine coC2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: coC3_ - check in as a character rank 3 array -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine coC3_(marg,thread) - implicit none - character(len=*),dimension(:,:,:),intent(in) :: marg - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 21Oct99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::coC3_' - integer :: nw - - if(.not.mall_on) return - nw=(len(marg(1,1,1))+NBYTE_PER_WORD-1)/NBYTE_PER_WORD - call co_(size(marg)*nw,thread) - -end subroutine coC3_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: co_ - check-out allocate activity -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine co_(nword,thread) - use m_stdio, only : stderr - use m_die, only : die - implicit none - integer,intent(in) :: nword - character(len=*),intent(in) :: thread - -! !REVISION HISTORY: -! 13Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::co_' - integer :: ith - - if(.not.mall_on) return - - if(nword < 0) then - write(stderr,'(2a,i4)') myname_, & - ': invalide argument, nword = ',nword - call die(myname_) - endif - - ! if the thread is "unknown", it would be treated as a - ! new thread with net negative memory activity. - - ith=lookup_(thread) - - ! update the account - - nci_(ith)=nci_(ith)-1 - nwm_(ith)=nwm_(ith)-nword - - ! update the total budget - - nci=nci-1 - nwm=nwm-nword - -end subroutine co_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: cix_ - handling macro ALLOC_() error -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine cix_(thread,stat,fnam,line) - use m_stdio, only : stderr - use m_die, only : die - implicit none - character(len=*),intent(in) :: thread - integer,intent(in) :: stat - character(len=*),intent(in) :: fnam - integer,intent(in) :: line - - -! !REVISION HISTORY: -! 13Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::cix_' - - write(stderr,'(2a,i4)') trim(thread), & - ': ALLOC_() error, stat =',stat - call die('ALLOC_',fnam,line) - -end subroutine cix_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: cox_ - handling macro DEALLOC_() error -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine cox_(thread,stat,fnam,line) - use m_stdio, only : stderr - use m_die, only : die - implicit none - character(len=*),intent(in) :: thread - integer,intent(in) :: stat - character(len=*),intent(in) :: fnam - integer,intent(in) :: line - -! !REVISION HISTORY: -! 13Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::cox_' - - write(stderr,'(2a,i4)') trim(thread), & - ': DEALLOC_() error, stat =',stat - call die('DEALLOC_',fnam,line) - -end subroutine cox_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: flush_ - balancing the up-to-date ci/co calls -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine flush_(lu) - use m_stdio, only : stderr - use m_ioutil, only : luflush - use m_die, only : die - implicit none - integer,intent(in) :: lu - -! !REVISION HISTORY: -! 17Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::flush_' - - integer,parameter :: lnmax=38 - character(len=max(lnmax,NSZ)) :: name - - character(len=6) :: hwm_wd,nwm_wd - character(len=1) :: flag_ci,flag_wm - integer :: i,ier,ln - - if(.not.mall_on) return - - if(.not.started) call reset_() - - write(lu,'(72a/)',iostat=ier) ('_',i=1,72) - if(ier /= 0) then - write(stderr,'(2a,i3)') myname_,': can not write(), unit =',lu - call die(myname_) - endif - - write(lu,'(a,t39,4(2x,a))',iostat=ier) '[MALL]', & - 'max-ci','net-ci ','max-wm','net-wm' - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_,': can not write(), unit =',lu - call die(myname_) - endif - - call luflush(lu) - -!23.|....1....|....2....|....3....|....4....|....5....|....6....|....7.. -!_______________________________________________________________________ -! -![MALL] max_ci net-ci max-wm net-wm -!----------------------------------------------------------------------- -!total. ...333 ...333* ..333M ..333i* -!_______________________________________________________________________ - - write(lu,'(72a)') ('-',i=1,72) - - do i=1,min(n_,MXL) - call wcount_(hwm_(i),hwm_wd) - call wcount_(nwm_(i),nwm_wd) - - flag_ci=' ' - if(nci_(i) /= 0) flag_ci='*' - - flag_wm=' ' - if(nwm_(i) /= 0) flag_wm='*' - - name=name_(i) - ln=max(len_trim(name),lnmax) - write(lu,'(a,2(2x,i6),a,2(2x,a6),a)') name(1:ln), & - mci_(i),nci_(i),flag_ci,hwm_wd,nwm_wd,flag_wm - end do - - call wcount_(hwm,hwm_wd) - call wcount_(nwm,nwm_wd) - - flag_ci=' ' - if(nci /= 0) flag_ci='*' - flag_wm=' ' - if(nwm /= 0) flag_wm='*' - - name='.total.' - ln=max(len_trim(name),lnmax) - write(lu,'(a,2(2x,i6),a,2(2x,a6),a)') name(1:ln), & - mci,nci,flag_ci,hwm_wd,nwm_wd,flag_wm - - write(lu,'(72a/)') ('_',i=1,72) - - if(nreset /= 1) write(lu,'(2a,i3,a)') myname_, & - ': reset_ ',nreset,' times' - - call luflush(lu) -end subroutine flush_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: wcount_ - generate word count output with unit -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine wcount_(wknt,cknt) - implicit none - - integer, intent(in) :: wknt ! given an integer value - character(len=6),intent(out) :: cknt ! return a string value - -! !REVISION HISTORY: -! 17Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::wcount_' - -character(len=1) :: cwd -integer,parameter :: KWD=1024 -integer,parameter :: MWD=1024*1024 -integer,parameter :: GWD=1024*1024*1024 - -integer :: iwd - -if(wknt < 0) then - cknt='------' -else - cwd='i' - iwd=wknt - if(iwd > 9999) then - cwd='K' - iwd=(wknt+KWD-1)/KWD - endif - if(iwd > 9999) then - cwd='M' - iwd=(wknt+MWD-1)/MWD - endif - if(iwd > 9999) then - cwd='G' - iwd=(wknt+GWD-1)/GWD - endif - write(cknt,'(i5,a)') iwd,cwd -endif -end subroutine wcount_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: lookup_ - search/insert a name in a list -! -! !DESCRIPTION: -! -! !INTERFACE: - - function lookup_(thread) - use m_chars, only : uppercase - implicit none - character(len=*),intent(in) :: thread - integer :: lookup_ - -! !REVISION HISTORY: -! 17Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::lookup_' - - logical :: found - integer :: ith - - if(.not.started) call reset_() - -!---------------------------------------- -ith=0 -found=.false. -do while(.not.found .and. ith < min(n_,MXL)) - ith=ith+1 - found= uppercase(thread) == uppercase(name_(ith)) -end do - -if(.not.found) then - if(n_==0) then - nci=0 - mci=0 - nwm=0 - hwm=0 - endif - - n_=n_+1 - if(n_ == MXL) then - ith=MXL - name_(ith)='.overflow.' - else - ith=n_ - name_(ith)=thread - endif - - nci_(ith)=0 - mci_(ith)=0 - nwm_(ith)=0 - hwm_(ith)=0 -endif - -lookup_=ith - -end function lookup_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: reset_ - initialize the module data structure -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine reset_() - implicit none - -! !REVISION HISTORY: -! 16Mar98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::reset_' - - if(.not.mall_on) return - - nreset=nreset+1 - started=.true. - - name_(1:n_)=' ' - - mci_(1:n_)=0 - nci_(1:n_)=0 - hwm_(1:n_)=0 - nwm_(1:n_)=0 - - n_ =0 - - mci=0 - nci=0 - hwm=0 - nwm=0 - -end subroutine reset_ -!======================================================================= -end module m_mall diff --git a/src/externals/mct/mpeu/m_mpif.F90 b/src/externals/mct/mpeu/m_mpif.F90 deleted file mode 100644 index d8d6318a545..00000000000 --- a/src/externals/mct/mpeu/m_mpif.F90 +++ /dev/null @@ -1,69 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_mpif - a portable interface to the MPI "mpif.h" COMMONs. -! -! !DESCRIPTION: -! -! The purpose of \verb"m_mpif" module is to provide a portable -! interface of \verb"mpif.h" with different MPI implementation. -! By combining module \verb"m_mpif" and \verb"m_mpif90", it may be -! possible to build a Fortran 90 MPI binding module graduately. -! -! Although it is possible to use \verb'include "mpif.h"' directly -! in individual modules, it has several problems: -! \begin{itemize} -! \item It may conflict with either the source code of a {\sl fixed} -! format or the code of a {\sl free} format; -! \item It does not provide the protection and the safety of using -! these variables as what a \verb"MODULE" would provide. -! \end{itemize} -! -! More information may be found in the module \verb"m_mpif90". -! -! !INTERFACE: - - module m_mpif - implicit none - private ! except - - public :: MPI_INTEGER - public :: MPI_REAL - public :: MPI_DOUBLE_PRECISION - public :: MPI_LOGICAL - public :: MPI_CHARACTER - - public :: MPI_REAL4 - public :: MPI_REAL8 - - public :: MPI_COMM_WORLD - public :: MPI_COMM_NULL - - public :: MPI_SUM - public :: MPI_PROD - public :: MPI_MIN - public :: MPI_MAX - - public :: MPI_MAX_ERROR_STRING - public :: MPI_STATUS_SIZE - public :: MPI_ANY_SOURCE - -#ifdef MPICH_ - public :: MPIPRIV ! the common block name -#endif - - include "mpif.h" - -! !REVISION HISTORY: -! 01Apr98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname='MCT(MPEU)::m_mpif' - - end module m_mpif -!. diff --git a/src/externals/mct/mpeu/m_mpif90.F90 b/src/externals/mct/mpeu/m_mpif90.F90 deleted file mode 100644 index 42e5d335579..00000000000 --- a/src/externals/mct/mpeu/m_mpif90.F90 +++ /dev/null @@ -1,719 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_mpif90 - a Fortran 90 style MPI module interface. -! -! !DESCRIPTION: -! -! By wrapping \verb'include "mpif.h"' into a module, \verb"m_mpif()" -! provides an easy way to -!\begin{itemize} -! \item avoid the problem with {\sl fixed} or {\sl free} formatted -! Fortran 90 files; -! \item provide protections with only a limited set of \verb"PUBLIC" -! variables; and -! \item be extended to a MPI Fortran 90 binding. -!\end{itemize} -! -! !INTERFACE: - - module m_mpif90 - use m_mpif, only : MP_INTEGER => MPI_INTEGER - use m_mpif, only : MP_REAL => MPI_REAL - use m_mpif, only : MP_DOUBLE_PRECISION & - => MPI_DOUBLE_PRECISION - use m_mpif, only : MP_LOGICAL => MPI_LOGICAL - use m_mpif, only : MP_CHARACTER => MPI_CHARACTER - - use m_mpif, only : MP_REAL4 => MPI_REAL4 - use m_mpif, only : MP_REAL8 => MPI_REAL8 - - use m_mpif, only : MP_COMM_WORLD => MPI_COMM_WORLD - use m_mpif, only : MP_COMM_NULL => MPI_COMM_NULL - use m_mpif, only : MP_SUM => MPI_SUM - use m_mpif, only : MP_PROD => MPI_PROD - use m_mpif, only : MP_MIN => MPI_MIN - use m_mpif, only : MP_MAX => MPI_MAX - use m_mpif, only : MP_MAX_ERROR_STRING & - => MPI_MAX_ERROR_STRING - use m_mpif, only : MP_STATUS_SIZE => MPI_STATUS_SIZE - use m_mpif, only : MP_ANY_SOURCE => MPI_ANY_SOURCE - - implicit none - private - - public :: MP_type - - public :: MP_INTEGER - public :: MP_REAL - public :: MP_DOUBLE_PRECISION - public :: MP_LOGICAL - public :: MP_CHARACTER - - public :: MP_REAL4 - public :: MP_REAL8 - - public :: MP_COMM_WORLD - public :: MP_COMM_NULL - - public :: MP_SUM - public :: MP_PROD - public :: MP_MIN - public :: MP_MAX - - public :: MP_ANY_SOURCE - - public :: MP_MAX_ERROR_STRING - - public :: MP_init - public :: MP_initialized - public :: MP_finalize - public :: MP_abort - - public :: MP_wtime - public :: MP_wtick - - public :: MP_comm_size - public :: MP_comm_rank - public :: MP_comm_dup - public :: MP_comm_free - - public :: MP_cart_create - public :: MP_dims_create - public :: MP_cart_coords - public :: MP_cart_rank - - public :: MP_error_string - - public :: MP_perr - - public :: MP_STATUS_SIZE - public :: MP_status - - public :: MP_log2 - -! !REVISION HISTORY: -! 09Dec97 - Jing Guo - initial prototyping/coding. -! . started with everything public, without any interface -! declaration. -! . Then limited to only variables current expected to -! be used. -! -!EOP -!_______________________________________________________________________ - -integer,dimension(MP_STATUS_SIZE) :: MP_status - - !---------------------------------------- - -interface MP_init - subroutine MPI_init(ier) - integer :: ier - end subroutine MPI_init -end interface - -interface MP_initialized - subroutine MPI_initialized(flag,ier) - logical :: flag - integer :: ier - end subroutine MPI_initialized -end interface - -interface MP_finalize - subroutine MPI_finalize(ier) - integer :: ier - end subroutine MPI_finalize -end interface - -interface MP_error_string - subroutine MPI_error_string(ierror,cerror,ln,ier) - integer :: ierror - character(len=*) :: cerror - integer :: ln - integer :: ier - end subroutine MPI_error_string -end interface - -interface MP_type; module procedure & - typeI_, & ! MPI_INTEGER - typeL_, & ! MPI_LOGICAL - typeC_, & ! MPI_CHARACTER - typeSP_, & ! MPI_REAL - typeDP_, & ! MPI_DOUBLE_PRECISION - typeI1_, & ! MPI_INTEGER - typeL1_, & ! MPI_LOGICAL - typeC1_, & ! MPI_CHARACTER - typeSP1_, & ! MPI_REAL - typeDP1_, & ! MPI_DOUBLE_PRECISION - typeI2_, & ! MPI_INTEGER - typeL2_, & ! MPI_LOGICAL - typeC2_, & ! MPI_CHARACTER - typeSP2_, & ! MPI_REAL - typeDP2_ ! MPI_DOUBLE_PRECISION -end interface - -interface MP_perr; module procedure perr_; end interface - -interface MP_abort - subroutine MPI_abort(comm,errorcode,ier) - integer :: comm - integer :: errorcode - integer :: ier - end subroutine MPI_abort -end interface - - !---------------------------------------- -interface MP_wtime - function MPI_wtime() - double precision :: MPI_wtime - end function MPI_wtime -end interface - -interface MP_wtick - function MPI_wtick() - double precision :: MPI_wtick - end function MPI_wtick -end interface - - !---------------------------------------- -interface MP_comm_size - subroutine MPI_comm_size(comm,size,ier) - integer :: comm - integer :: size - integer :: ier - end subroutine MPI_comm_size -end interface - -interface MP_comm_rank - subroutine MPI_comm_rank(comm,rank,ier) - integer :: comm - integer :: rank - integer :: ier - end subroutine MPI_comm_rank -end interface - -interface MP_comm_dup - subroutine MPI_comm_dup(comm,newcomm,ier) - integer :: comm - integer :: newcomm - integer :: ier - end subroutine MPI_comm_dup -end interface - -interface MP_comm_free - subroutine MPI_comm_free(comm,ier) - integer :: comm - integer :: ier - end subroutine MPI_comm_free -end interface - - !---------------------------------------- -interface MP_cart_create - subroutine MPI_cart_create(comm_old,ndims,dims,periods, & - reorder,comm_cart,ier) - integer :: comm_old - integer :: ndims - integer,dimension(*) :: dims - logical,dimension(*) :: periods - logical :: reorder - integer :: comm_cart - integer :: ier - end subroutine MPI_cart_create -end interface - -interface MP_dims_create - subroutine MPI_dims_create(nnodes,ndims,dims,ier) - integer :: nnodes - integer :: ndims - integer,dimension(*) :: dims - integer :: ier - end subroutine MPI_dims_create -end interface - -interface MP_cart_coords - subroutine MPI_cart_coords(comm,rank,maxdims,coords,ier) - integer :: comm - integer :: rank - integer :: maxdims - integer,dimension(*) :: coords - integer :: ier - end subroutine MPI_cart_coords -end interface - -interface MP_cart_rank - subroutine MPI_cart_rank(comm,coords,rank,ier) - integer :: comm - integer,dimension(*) :: coords - integer :: rank - integer :: ier - end subroutine MPI_cart_rank -end interface - !---------------------------------------- - - character(len=*),parameter :: myname='m_mpif90' -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeI_ - return MPI datatype of INTEGER -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeI_(ival) - implicit none - integer,intent(in) :: ival - integer :: typeI_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeI_' - - typeI_=MP_INTEGER - -end function typeI_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeL_ - return MPI datatype of LOGICAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeL_(lval) - implicit none - logical,intent(in) :: lval - integer :: typeL_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeL_' - - typeL_=MP_LOGICAL - -end function typeL_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeC_ - return MPI datatype of CHARACTER -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeC_(cval) - implicit none - character(len=*),intent(in) :: cval - integer :: typeC_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeC_' - - typeC_=MP_CHARACTER - -end function typeC_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeSP_ - return MPI datatype of single precision REAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeSP_(rval) - use m_realkinds,only : SP - implicit none - real(SP),intent(in) :: rval - integer :: typeSP_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeSP_' - - typeSP_=MP_REAL - -end function typeSP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeDP_ - return MPI datatype of double precision REAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeDP_(rval) - use m_realkinds,only : DP - implicit none - real(DP),intent(in) :: rval - integer :: typeDP_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeDP_' - - typeDP_=MP_DOUBLE_PRECISION - -end function typeDP_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeI1_ - return MPI datatype of INTEGER -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeI1_(ival) - implicit none - integer,dimension(:),intent(in) :: ival - integer :: typeI1_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeI1_' - - typeI1_=MP_INTEGER - -end function typeI1_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeL1_ - return MPI datatype of LOGICAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeL1_(lval) - implicit none - logical,dimension(:),intent(in) :: lval - integer :: typeL1_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeL1_' - - typeL1_=MP_LOGICAL - -end function typeL1_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeC1_ - return MPI datatype of CHARACTER -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeC1_(cval) - implicit none - character(len=*),dimension(:),intent(in) :: cval - integer :: typeC1_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeC1_' - - typeC1_=MP_CHARACTER - -end function typeC1_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeSP1_ - return MPI datatype of single precision REAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeSP1_(rval) - use m_realkinds,only : SP - implicit none - real(SP),dimension(:),intent(in) :: rval - integer :: typeSP1_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeSP1_' - - typeSP1_=MP_REAL - -end function typeSP1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeDP1_ - return MPI datatype of double precision REAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeDP1_(rval) - use m_realkinds,only : DP - implicit none - real(DP),dimension(:),intent(in) :: rval - integer :: typeDP1_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeDP1_' - - typeDP1_=MP_DOUBLE_PRECISION - -end function typeDP1_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeI2_ - return MPI datatype of INTEGER -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeI2_(ival) - implicit none - integer,dimension(:,:),intent(in) :: ival - integer :: typeI2_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeI2_' - - typeI2_=MP_INTEGER - -end function typeI2_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeL2_ - return MPI datatype of LOGICAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeL2_(lval) - implicit none - logical,dimension(:,:),intent(in) :: lval - integer :: typeL2_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeL2_' - - typeL2_=MP_LOGICAL - -end function typeL2_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeC2_ - return MPI datatype of CHARACTER -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeC2_(cval) - implicit none - character(len=*),dimension(:,:),intent(in) :: cval - integer :: typeC2_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeC2_' - - typeC2_=MP_CHARACTER - -end function typeC2_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeSP2_ - return MPI datatype of single precision REAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeSP2_(rval) - use m_realkinds,only : SP - implicit none - real(SP),dimension(:,:),intent(in) :: rval - integer :: typeSP2_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeSP2_' - - typeSP2_=MP_REAL - -end function typeSP2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: typeDP2_ - return MPI datatype of double precision REAL -! -! !DESCRIPTION: -! -! !INTERFACE: - - function typeDP2_(rval) - use m_realkinds,only : DP - implicit none - real(DP),dimension(:,:),intent(in) :: rval - integer :: typeDP2_ - -! !REVISION HISTORY: -! 28Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::typeDP2_' - - typeDP2_=MP_DOUBLE_PRECISION - -end function typeDP2_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: perr_ - MPI error information hanlder -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine perr_(proc,MP_proc,ierror) - use m_stdio, only : stderr - implicit none - character(len=*),intent(in) :: proc - character(len=*),intent(in) :: MP_proc - integer,intent(in) :: ierror - -! !REVISION HISTORY: -! 21Apr98 - Jing Guo - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::perr_' - - character(len=MP_MAX_ERROR_STRING) :: estr - integer :: ln,ier - - call MP_error_string(ierror,estr,ln,ier) - if(ier /= 0 .or. ln<=0) then - write(stderr,'(4a,i4)') proc,': ', & - MP_proc,' error, ierror =',ierror - else - write(stderr,'(6a)') proc,': ', & - MP_proc,' error, "',estr(1:ln),'"' - endif - -end subroutine perr_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: MP_log2 - The smallest integer its power of 2 is >= nPE -! -! !DESCRIPTION: -! -! !INTERFACE: - - function MP_log2(nPE) - implicit none - integer,intent(in) :: nPE - integer :: MP_log2 - -! !REVISION HISTORY: -! 01Feb00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::MP_log2' - - integer :: n2 - - MP_log2=0 - n2=1 - do while(n2 - initial prototype/prolog/code -! 28Sep99 - Jing Guo -! - Added additional calls to support the "Violet" system -! development. -! -! !DESIGN ISSUES: -! \begin{itemize} -! -! \item It might be considered useful to implement this module to be -! applicable to a given {\sl communicator}. The argument -! taken now is to only have one multiple output stream handle -! per excution. This is consistent with \verb"stdout" in the -! traditional sense. (Jing Guo, 25Feb98) -! -! \item \verb"mpout_log()" is implemented in a way producing output -! only if \verb"mpout_ison()" (being \verb".true."). The reason -! of not implementing a default output such as \verb"stdout", is -! hoping to provent too many unexpected output when the system is -! switched to a multiple PE system. The design principle for -! this module is that \verb"mpout" is basically {\sl not} the same -! module as \verb"stdout". (Jing Guo, 28Sep99) -! -! \end{itemize} -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname='MCT(MPEU)::m_mpout' - - character(len=*),parameter :: def_pfix='mpout' - - integer,save :: isec=-1 - integer,save :: mpout=stdout - logical,save :: mpout_set=.false. - character(len=LEN_FILENAME-4),save :: upfix=def_pfix - integer,parameter :: mpout_MASK=3 ! every four PEs - -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: open_ - open a multiple files with the same name prefix -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine open_(mask,pfix) - use m_stdio, only : stderr,stdout - use m_ioutil, only : luavail,opntext - use m_dropdead, only : die - use m_mpif90, only : MP_comm_WORLD - use m_mpif90, only : MP_comm_rank - use m_mpif90, only : MP_perr - implicit none - integer,optional,intent(in) :: mask - character(len=*),optional,intent(in) :: pfix - -! !EXAMPLES: -! -! Examples of using mpout_MASK or mask: -! -! If the mask has all "1" in every bit, there will be no output -! on every PE, except the PE of rank 0. -! -! If the mask is 3 or "11"b, any PE of rank with any "dirty" bit -! in its rank value will not have output. -! -! !REVISION HISTORY: -! 25Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::open_' - integer :: lu - character(len=4) :: sfix - integer :: irank - integer :: ier - integer :: umask - - ! Set the filename prefix - - upfix=def_pfix - if(present(pfix)) upfix=pfix - - ! Set the mask of the PEs with mpout - - umask=mpout_MASK - if(present(mask)) umask=mask - - ! If a check is not in place, sent the outputs to stdout - - mpout=stdout - mpout_set=.false. - - call MP_comm_rank(MP_comm_world,irank,ier) - if(ier /= 0) then - call MP_perr(myname_,'MP_comm_rank()',ier) - call die(myname_) - endif - - if(iand(irank,umask) == 0) then - - lu=luavail() - if(lu > 0) mpout=lu - - write(sfix,'(a,z3.3)') '.',irank - call opntext(mpout,trim(upfix)//sfix,'unknown',ier) - if(ier /= 0) then - write(stderr,'(4a,i4)') myname_, & - ': opntext("',trim(upfix)//sfix,'") error, ier =',ier - call die(myname_) - endif - - mpout_set=.true. - - isec=0 - write(mpout,'(a,z8.8,2a)') '.BEGIN. ',isec,' ',trim(upfix) - endif - -end subroutine open_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: close_ - close the unit opened by open_ -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine close_() - use m_stdio, only : stderr - use m_ioutil, only : clstext, luflush - use m_dropdead, only : die - implicit none - -! !REVISION HISTORY: -! 25Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::close_' - integer :: ier - - if(mpout_set) then - call luflush(mpout) - - isec=isec+1 - write(mpout,'(a,z8.8,2a)') '.END. ',isec,' ',trim(upfix) - endfile(mpout) - - call clstext(mpout,ier) - if(ier /= 0) then - write(stderr,'(2a,i3.3,a,i4)') myname_, & - ': clstext("',mpout,'") error, ier =',ier - call die(myname_) - endif - mpout=stdout - mpout_set=.false. - endif - - isec=-1 - -end subroutine close_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: sync_ - write a mark for posible later file merging -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine sync_(tag) - use m_stdio, only : stderr - use m_dropdead, only : die - implicit none - character(len=*),intent(in) :: tag - -! !REVISION HISTORY: -! 25Feb98 - Jing Guo - initial prototype/prolog/code -! -! !DESIGN ISSUES: -! \begin{itemize} -! -! \item Should the variable \verb"tag" be implemented as an optional -! argument? Because the current implementation does not require -! actual synchronization between all threads of the multiple -! output streams, forcing the user to supply a unique \verb"tag" -! would make the final multi-stream merging verifiable. However, -! since the \verb"tag"s have not been forced to be unique, the -! synchronization operations are still symbolic. -! -! \{itemize} -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::sync_' - - if(mpout_set) then - isec=isec+1 - write(mpout,'(a,z8.8,2a)') '.SYNC. ',isec,' ',trim(tag) - endif - -end subroutine sync_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: flush_ - flush the multiple output streams -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine flush_() - use m_stdio, only : stderr - use m_ioutil, only : luflush - use m_dropdead, only : die - implicit none - -! !REVISION HISTORY: -! 27Feb98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::flush_' - - if(mpout_set) call luflush(mpout) - -end subroutine flush_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: ison_ - decide if the current PE has a defined mpout -! -! !DESCRIPTION: -! -! It needs to be checked to avoid undesired output. -! -! !INTERFACE: - - function ison_() - implicit none - logical :: ison_ - -! !REVISION HISTORY: -! 14Sep99 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::ison_' - - ison_=mpout_set - -end function ison_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! ANL/MCS Mathematics and Computer Science Division ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: log1_ - write a message to mpout -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine log1_(message) - implicit none - character(len=*),intent(in) :: message - -! !REVISION HISTORY: -! 07Jan02 - R. Jacob (jacob@mcs.anl.gov) -! - based on log2_. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::log1_' - - if(mpout_set) write(mpout,'(3a)') message - -end subroutine log1_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: log2_ - write a message to mpout with a where -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine log2_(where,message) - implicit none - character(len=*),intent(in) :: where - character(len=*),intent(in) :: message - -! !REVISION HISTORY: -! 14Sep99 - Jing Guo -! - initial prototype/prolog/code -! 07Jan02 - R. Jacob (jacob@mcs.anl.gov) -! - change name to log2_ -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::log2_' - - if(mpout_set) write(mpout,'(3a)') where,': ',message - -end subroutine log2_ -end module m_mpout -!. diff --git a/src/externals/mct/mpeu/m_rankMerge.F90 b/src/externals/mct/mpeu/m_rankMerge.F90 deleted file mode 100644 index b3f78fb42db..00000000000 --- a/src/externals/mct/mpeu/m_rankMerge.F90 +++ /dev/null @@ -1,620 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!BOP ------------------------------------------------------------------- -! -! !MODULE: m_rankMerge - A merging tool through ranking -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_rankMerge - implicit none - private ! except - - public :: rankSet ! set inital ranks - public :: rankMerge ! merge two ranks - public :: IndexedRankMerge ! index-merge two array segments - - interface rankSet; module procedure set_; end interface - - interface rankMerge; module procedure & - imerge_, & ! rank-merging two integer arrays - rmerge_, & ! rank-merging two real arrays - dmerge_, & ! rank-merging two dble arrays - uniq_ ! merging to rank arrays - end interface - - interface IndexedRankMerge; module procedure & - iindexmerge_, & ! merging two index arrays of integers - rindexmerge_, & ! merging two index arrays of reals - dindexmerge_ ! merging two index arrays of dbles - end interface - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='MCT(MPEU)::m_rankMerge' - -contains -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: set_ - set initial ranking -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine set_(rank) - implicit none - integer,dimension(:),intent(out) :: rank - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::set_' - integer :: i - - do i=1,size(rank) - rank(i)=0 - end do - -end subroutine set_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: imerge_ - merge two sorted integer arrays by ranking -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine imerge_(value_i,value_j,krank_i,krank_j,descend) - implicit none - - integer,dimension(:),intent(in) :: value_j ! value of j-vec - integer,dimension(:),intent(in) :: value_i ! value of i-vec - - integer,dimension(:),intent(inout) :: krank_i ! rank of i-vec - integer,dimension(:),intent(inout) :: krank_j ! rank of j-vec - - logical,optional,intent(in) :: descend - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::imerge_' - - integer :: ni,nj - logical :: descend_ - logical :: geti - integer :: value_sv,value - integer :: krank - integer :: i,j - - descend_=.false. - if(present(descend)) descend_=descend - - ni=size(krank_i) - nj=size(krank_j) - - i=1 - j=1 - krank=0 ! a preset rank value - value_sv=0 - - do - geti=j>nj - if(geti) then ! .eqv. j>nj - if(i>ni) exit ! i>ni - value = value_i(i) - else ! .eqv. j<=nj - geti = i<=ni - if(geti) then ! .eqv. i<=ni - value = value_i(i) - geti = krank_i(i) <= krank_j(j) - if(krank_i(i)==krank_j(j)) then - geti = value_i(i)<=value_j(j) - if(descend_) geti = value_i(i)>=value_j(j) - endif - endif - if(.not.geti) value = value_j(j) - endif - - if(krank==0 .or. value /= value_sv) then - krank=krank+1 ! the next rank value - value_sv=value - endif - - if(geti) then - krank_i(i)=krank - i=i+1 - else - krank_j(j)=krank - j=j+1 - endif - end do - -end subroutine imerge_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rmerge_ - merge two sorted real arrays by ranking -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine rmerge_(value_i,value_j,krank_i,krank_j,descend) - use m_realkinds, only : SP - implicit none - - real(SP),dimension(:),intent(in) :: value_i ! value of i-vec - real(SP),dimension(:),intent(in) :: value_j ! value of j-vec - - integer,dimension(:),intent(inout) :: krank_i ! rank of i-vec - integer,dimension(:),intent(inout) :: krank_j ! rank of j-vec - - logical,optional,intent(in) :: descend - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::rmerge_' - - integer :: ni,nj - logical :: descend_ - logical :: geti - real(SP) :: value_sv,value - integer :: krank - integer :: i,j - - descend_=.false. - if(present(descend)) descend_=descend - - ni=size(krank_i) - nj=size(krank_j) - - i=1 - j=1 - krank=0 ! a preset rank value - value_sv=0 - - do - geti=j>nj - if(geti) then ! .eqv. j>nj - if(i>ni) exit ! i>ni - value = value_i(i) - else ! .eqv. j<=nj - geti = i<=ni - if(geti) then ! .eqv. i<=ni - value = value_i(i) - geti = krank_i(i) <= krank_j(j) - if(krank_i(i)==krank_j(j)) then - geti = value_i(i)<=value_j(j) - if(descend_) geti = value_i(i)>=value_j(j) - endif - endif - if(.not.geti) value = value_j(j) - endif - - if(krank==0 .or. value /= value_sv) then - krank=krank+1 ! the next rank value - value_sv=value - endif - - if(geti) then - krank_i(i)=krank - i=i+1 - else - krank_j(j)=krank - j=j+1 - endif - end do - -end subroutine rmerge_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dmerge_ - merge two sorted real arrays by ranking -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine dmerge_(value_i,value_j,krank_i,krank_j,descend) - use m_realkinds, only : DP - implicit none - - real(DP),dimension(:),intent(in) :: value_i ! value of i-vec - real(DP),dimension(:),intent(in) :: value_j ! value of j-vec - - integer,dimension(:),intent(inout) :: krank_i ! rank of i-vec - integer,dimension(:),intent(inout) :: krank_j ! rank of j-vec - - logical,optional,intent(in) :: descend - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::dmerge_' - - integer :: ni,nj - logical :: descend_ - logical :: geti - real(DP):: value_sv,value - integer :: krank - integer :: i,j - - descend_=.false. - if(present(descend)) descend_=descend - - ni=size(krank_i) - nj=size(krank_j) - - i=1 - j=1 - krank=0 ! a preset rank value - value_sv=0 - - do - geti=j>nj - if(geti) then ! .eqv. j>nj - if(i>ni) exit ! i>ni - value = value_i(i) - else ! .eqv. j<=nj - geti = i<=ni - if(geti) then ! .eqv. i<=ni - value = value_i(i) - geti = krank_i(i) <= krank_j(j) - if(krank_i(i)==krank_j(j)) then - geti = value_i(i)<=value_j(j) - if(descend_) geti = value_i(i)>=value_j(j) - endif - endif - if(.not.geti) value = value_j(j) - endif - - if(krank==0 .or. value /= value_sv) then - krank=krank+1 ! the next rank value - value_sv=value - endif - - if(geti) then - krank_i(i)=krank - i=i+1 - else - krank_j(j)=krank - j=j+1 - endif - end do - -end subroutine dmerge_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: iindexmerge_ - merge two sorted integer arrays by ranking -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine iindexmerge_(indx_i,indx_j,value,krank_i,krank_j,descend) - implicit none - - integer,dimension(:),intent(in) :: indx_i ! of the i-vec - integer,dimension(:),intent(in) :: indx_j ! of the j-vec - integer,dimension(:),intent(in) :: value ! of the full - - integer,dimension(:),intent(inout) :: krank_i ! rank of i-vec - integer,dimension(:),intent(inout) :: krank_j ! rank of j-vec - - logical,optional,intent(in) :: descend - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::iindexmerge_' - - integer :: ni,nj - logical :: descend_ - logical :: geti - integer :: value_sv,value_ - integer :: krank - integer :: i,j,li,lj - - descend_=.false. - if(present(descend)) descend_=descend - - ni=size(krank_i) - nj=size(krank_j) - - i=1 - j=1 - krank=0 ! a preset rank value - value_sv=0 - - do - geti=j>nj - if(geti) then ! .eqv. j>nj - if(i>ni) exit ! i>ni - li=indx_i(i) - value_ = value(li) - else ! .eqv. j<=nj - lj=indx_j(j) - geti = i<=ni - if(geti) then ! .eqv. i<=ni - li=indx_i(i) - value_ = value(li) - geti = krank_i(i) <= krank_j(j) - if(krank_i(i)==krank_j(j)) then - geti = value(li)<=value(lj) - if(descend_) geti = value(li)>=value(lj) - endif - endif - if(.not.geti) value_ = value(lj) - endif - - if(krank==0 .or. value_ /= value_sv) then - krank=krank+1 ! the next rank value - value_sv=value_ - endif - - if(geti) then - krank_i(i)=krank - i=i+1 - else - krank_j(j)=krank - j=j+1 - endif - end do - -end subroutine iindexmerge_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: rindexmerge_ - merge two sorted real arrays by ranking -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine rindexmerge_(indx_i,indx_j,value,krank_i,krank_j,descend) - use m_realkinds,only : SP - implicit none - - integer,dimension(:),intent(in) :: indx_i ! of the i-vec - integer,dimension(:),intent(in) :: indx_j ! of the j-vec - real(SP),dimension(:),intent(in) :: value ! of the full - - integer,dimension(:),intent(inout) :: krank_i ! rank of i-vec - integer,dimension(:),intent(inout) :: krank_j ! rank of j-vec - - logical,optional,intent(in) :: descend - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::rindexmerge_' - - integer :: ni,nj - logical :: descend_ - logical :: geti - real(SP):: value_sv,value_ - integer :: krank - integer :: i,j,li,lj - - descend_=.false. - if(present(descend)) descend_=descend - - ni=size(krank_i) - nj=size(krank_j) - - i=1 - j=1 - krank=0 ! a preset rank value - value_sv=0 - - do - geti=j>nj - if(geti) then ! .eqv. j>nj - if(i>ni) exit ! i>ni - li=indx_i(i) - value_ = value(li) - else ! .eqv. j<=nj - lj=indx_j(j) - geti = i<=ni - if(geti) then ! .eqv. i<=ni - li=indx_i(i) - value_ = value(li) - geti = krank_i(i) <= krank_j(j) - if(krank_i(i)==krank_j(j)) then - geti = value(li)<=value(lj) - if(descend_) geti = value(li)>=value(lj) - endif - endif - if(.not.geti) value_ = value(lj) - endif - - if(krank==0 .or. value_ /= value_sv) then - krank=krank+1 ! the next rank value - value_sv=value_ - endif - - if(geti) then - krank_i(i)=krank - i=i+1 - else - krank_j(j)=krank - j=j+1 - endif - end do - -end subroutine rindexmerge_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: dindexmerge_ - merge two sorted real arrays by ranking -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine dindexmerge_(indx_i,indx_j,value,krank_i,krank_j,descend) - use m_realkinds,only : DP - implicit none - - integer,dimension(:),intent(in) :: indx_i ! of the i-vec - integer,dimension(:),intent(in) :: indx_j ! of the j-vec - real(DP),dimension(:),intent(in) :: value ! of the full - - integer,dimension(:),intent(inout) :: krank_i ! rank of i-vec - integer,dimension(:),intent(inout) :: krank_j ! rank of j-vec - - logical,optional,intent(in) :: descend - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::dindexmerge_' - - integer :: ni,nj - logical :: descend_ - logical :: geti - real(DP):: value_sv,value_ - integer :: krank - integer :: i,j,li,lj - - descend_=.false. - if(present(descend)) descend_=descend - - ni=size(krank_i) - nj=size(krank_j) - - i=1 - j=1 - krank=0 ! a preset rank value - value_sv=0 - - do - geti=j>nj - if(geti) then ! .eqv. j>nj - if(i>ni) exit ! i>ni - li=indx_i(i) - value_ = value(li) - else ! .eqv. j<=nj - lj=indx_j(j) - geti = i<=ni - if(geti) then ! .eqv. i<=ni - li=indx_i(i) - value_ = value(li) - geti = krank_i(i) <= krank_j(j) - if(krank_i(i)==krank_j(j)) then - geti = value(li)<=value(lj) - if(descend_) geti = value(li)>=value(lj) - endif - endif - if(.not.geti) value_ = value(lj) - endif - - if(krank==0 .or. value_ /= value_sv) then - krank=krank+1 ! the next rank value - value_sv=value_ - endif - - if(geti) then - krank_i(i)=krank - i=i+1 - else - krank_j(j)=krank - j=j+1 - endif - end do - -end subroutine dindexmerge_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: uniq_ - merge two rank arrays with unique rank values -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine uniq_(krank_i,krank_j) - implicit none - integer,dimension(:),intent(inout) :: krank_i ! rank of i-vec - integer,dimension(:),intent(inout) :: krank_j ! rank of j-vec - -! !REVISION HISTORY: -! 13Mar00 - Jing Guo -! - initial prototype/prolog/code -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::uniq_' - - integer :: ni,nj - integer :: i,j - integer :: krank - logical :: geti - - ni=size(krank_i) - nj=size(krank_j) - - i=1 - j=1 - krank=0 - do - geti=j>nj - if(geti) then ! .eqv. j>nj - if(i>ni) exit ! i>ni - else ! .eqv. j<=nj - geti = i<=ni - if(geti) geti = krank_i(i) <= krank_j(j) ! if(i<=ni) .. - endif - - krank=krank+1 ! the next rank value - - if(geti) then - krank_i(i)=krank - i=i+1 - else - krank_j(j)=krank - j=j+1 - endif - end do - -end subroutine uniq_ - -end module m_rankMerge diff --git a/src/externals/mct/mpeu/m_realkinds.F90 b/src/externals/mct/mpeu/m_realkinds.F90 deleted file mode 100644 index cb5f9994c0c..00000000000 --- a/src/externals/mct/mpeu/m_realkinds.F90 +++ /dev/null @@ -1,52 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_realkinds - real KIND definitions -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_realkinds - implicit none - private ! except - - public :: kind_r4 ! real*4 - public :: kind_r8 ! real*8 - public :: kind_r ! default real - public :: SP ! default REAL - public :: DP ! default DOUBLE_PRECISION - public :: FP ! general floating point precision - - real*4,parameter :: mpeuR4=1. - real*8,parameter :: mpeuR8=1. - real, parameter :: mpeuR =1. - -#ifdef SELECTEDREALKIND - integer,parameter :: SP = selected_real_kind( 6) ! 32-bit real, on most platforms - integer,parameter :: DP = selected_real_kind(12) ! 64-bit real, on most platforms -#else - integer,parameter :: SP = kind(1. ) - integer,parameter :: DP = kind(1.D0) -#endif - -! Set the current default floating point precision - integer,parameter :: FP = DP - - integer,parameter :: kind_r4=kind(mpeuR4) - integer,parameter :: kind_r8=kind(mpeuR8) - integer,parameter :: kind_r =kind(mpeuR ) - -! !REVISION HISTORY: -! 19Feb98 - Jing Guo - initial prototype/prolog/code -! 23Jan03 - R. Jacob - add FP -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname='MCT(MPEU)::m_realkinds' - -end module m_realkinds diff --git a/src/externals/mct/mpeu/m_stdio.F90 b/src/externals/mct/mpeu/m_stdio.F90 deleted file mode 100644 index 9f9fad81fed..00000000000 --- a/src/externals/mct/mpeu/m_stdio.F90 +++ /dev/null @@ -1,53 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_stdio - a F90 module defines std. I/O parameters -! -! !DESCRIPTION: -! Define system dependent I/O parameters. -! -! !INTERFACE: - - module m_stdio - implicit none - private - - public :: stdin ! a unit linked to UNIX stdin - public :: stdout ! a unit linked to UNIX stdout - public :: stderr ! a unit linked to UNIX stderr - - public :: LEN_FILENAME - -! !REVISION HISTORY: -! 10oct96 - Jing G. - Defined -! 25Jul02 - J. Larson - Changed cpp define token HP-UX to -! HP_UX for compatibility with Fujitsu -! cpp. -!EOP -!_______________________________________________________________________ - -! Defines standar i/o units. - - integer, parameter :: stdin = 5 - integer, parameter :: stdout = 6 - -#ifdef sysHP_UX - ! Special setting for HP-UX - - integer, parameter :: stderr = 7 -#else - ! Generic setting for UNIX other than HP-UX - - integer, parameter :: stderr = 0 -#endif - - integer, parameter :: LEN_FILENAME = 128 - -!----------------------------------------------------------------------- -end module m_stdio -!. diff --git a/src/externals/mct/mpeu/m_zeit.F90 b/src/externals/mct/mpeu/m_zeit.F90 deleted file mode 100644 index 207de748c84..00000000000 --- a/src/externals/mct/mpeu/m_zeit.F90 +++ /dev/null @@ -1,1008 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -! CVS $Id$ -! CVS $Name$ -!----------------------------------------------------------------------- -!BOP -! -! !MODULE: m_zeit - a multi-timer of process times and wall-clock times -! -! !DESCRIPTION: -! -! !INTERFACE: - - module m_zeit - implicit none - private ! except - - public :: zeit_ci ! push a new name to the timer - public :: zeit_co ! pop the current name on the timer - public :: zeit_flush ! print per PE timing - public :: zeit_allflush ! print all PE timing - public :: zeit_reset ! reset the timers to its initial state - - ! Flags of all printable timers - - public :: MWTIME ! MPI_Wtime() wall-clock time - public :: XWTIME ! times() wall-clock time - public :: PUTIME ! times() process user time - public :: PSTIME ! times() process system time - public :: CUTIME ! times() user time of all child-processes - public :: CSTIME ! times() system time of all child-processes - public :: ALLTIME ! all of above - public :: UWRATE ! (putime+cutime)/xwtime - - interface zeit_ci; module procedure ci_; end interface - interface zeit_co; module procedure co_; end interface - interface zeit_flush; module procedure flush_; end interface - interface zeit_allflush; module procedure allflush_; end interface - interface zeit_reset; module procedure reset_; end interface - -! !REVISION HISTORY: -! -! 22Jan01 - Jay Larson - Minor correction in -! write statements in the routines sp_balances_() and -! mp_balances_(): replaced x (single-space) descriptor -! with 1x. This is apparently strict adherance to the -! f90 standard (though the first of many, many compilers -! where it has arisen). This was for the SunOS platform. -! 05Mar98 - Jing Guo - -! . rewritten for possible MPI applications, with -! additional functionalities and new performance -! analysis information. -! . Interface names have been redefined to ensure all -! use cases to be verified. -! . removed the type(pzeit) data structure, therefore, -! limited to single _instance_ applications. -! . added additional data components for more detailed -! timing analysis. -! . used times() for the XPG4 standard conforming -! timing functions. -! . used MPI_Wtime() for the MPI standard conforming -! high-resolution timing functions. -! -! 20Feb97 - Jing Guo - -! . rewritten in Fortran 90 as the first modular -! version, with a type(pzeit) data structure. -! -! 10may96 - Jing G. - Add _TZEITS macro for the testing code -! 09may96 - Jing G. - Changed output format also modifed -! comments -! 11Oct95 - Jing G. - Removed earlier way of letting clock -! timing (clkknt and clktot) to be no less -! then the CPU timing, following a -! suggestion by James Abeles from Cray. -! This way, users may use the routings to -! timing multitasking speedup as well. -! 12May95 - Jing G. - Merged zeitCRAY.f and zeitIRIS.f. -! Before - ? - See zeitCRAY.f and zeitIRIS.f for more -! information. Authors of those files are -! not known to me. -! -! !DESIGN ISSUES: -! -! 05Mar98 - Jing Guo - -! . Removing the data structure may be consider as a -! limitation to future changes to multiple _instance_ -! applications. However, it is unlikely there will be -! any neccessary multi-_intance_ application soon, if -! ever for this module. -! . Without an additional layer with the derived -! datatype, one may worry less the tricky performance -! issues associated with ci_/co_. -! . Performance issue with the flush_() calls are not -! considered. -! -! 20Feb97 - Jing Guo - -! . Currently a single threaded module. May be easily -! extended to multi-threaded module by adding the name -! of an instance of the class to the argument list. It -! requires some but very limited interface extensions. -! Right now, the backward compatibility is the main -! issue. -! -! 10may96 - Jing Guo - -! -! + This zeit subroutine collection replaces original zeit files -! used in PSAS on both systems, UNICOS and IRIX, with following -! changes: -! -! + Removed the some bugs in zeitCRAY.f that overite the -! first user defined name entry in a special situation -! (but not being able to correct in zeitCRAY.f). -! -! + Unified both zeitCRAY.f and zeitIRIS.f in to one file -! (this file), that handles system dependency in only -! one subroutine syszeit_() with a couple of lines of -! differences. -! -! + Added system CPU time counts for system supporting -! the function. -! -! + Added some error checking and reporting functions. -! -! + According to zeitCRAY.f, "zeit" is "time" in Germen. -! The name is used through the code as another name for -! "time". -! -! + This version does not work for parallelized processes. -! -! + Elapsed time records since the first call are used. Although -! it may loose accuracy when the values of the time records -! become large, it will keep the total time values conserved. -! -! + The accuracy of the elapsed times at a IEEE real*4 accuracy -! (ffrac = 2^23 ~= 1.19e-7) should be no worse than +- 1 second -! in 97 days, if only the numerical accuracy is considered. -! -! + The precision of "wall clock" time returned by syszeit_() is -! only required to be reliable upto seconds. -! -! + The wall clock time for individual name tag (clkknt) is -! accumulated by adding the differences between two integer -! values, iclk and iclksv. Care must be taken to compute the -! differences of iclk and iclksv first. That is, doing -! -! clkknt()=clkknt() + (iclk-iclksv) -! -! not -! -! clkknt()=clkknt() + iclk-iclksv -! -! The latter statement may ignore the difference between the two -! integer values (iclk and iclksv). -! -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname='MCT(MPEU)::m_zeit' - - integer,parameter :: MWTIME = 1 - integer,parameter :: XWTIME = 2 - integer,parameter :: PUTIME = 4 - integer,parameter :: PSTIME = 8 - integer,parameter :: CUTIME = 16 - integer,parameter :: CSTIME = 32 - integer,parameter :: ALLTIME = MWTIME + XWTIME + PUTIME + & - PSTIME + CUTIME + CSTIME - integer,parameter :: UWRATE = 64 - - integer,parameter :: MASKS(0:5) = & - (/ MWTIME,XWTIME,PUTIME,PSTIME,CUTIME,CSTIME /) - - character(len=*),parameter :: ZEIT='.zeit.' - character(len=8),parameter :: HEADER(0:5) = & - (/ '[MWTIME]','[XWTIME]','[PUTIME]', & - '[PSTIME]','[CUTIME]','[CSTIME]' /) - character(len=8),parameter :: UWRHDR = '[UWRATE]' - - integer,parameter :: MXN= 250 ! the size of a name list -! integer,parameter :: NSZ= 32 ! the size of a name -! LPC jun/6/2000 - integer,parameter :: NSZ= 36 ! the size of a name - integer,parameter :: MXS= 64 ! the depth of the timer stack - - integer,save :: nreset=0 - logical,save :: started=.false. - logical,save :: balanced=.false. - - character(len=NSZ), & - save :: ciname=' ' - character(len=NSZ), & - save :: coname=' ' - - integer,save :: mxdep=0 ! the maximum ndep value recorded - integer,save :: ndep=-1 ! depth, number of net ci_() - integer,save :: lnk_n(0:MXS) ! name index of the depth - - integer,save :: nname=-1 ! number of accounts - character(len=NSZ), & - save,dimension(0:MXN) :: name_l ! the accounts - integer,save,dimension(0:MXN) :: knt_l ! counts of ci_() calls - integer,save,dimension(0:MXN) :: level_l ! remaining ci_() counts - - real*8,save,dimension(0:5) :: zts_sv ! the last timings - - real*8,save,dimension(0:5,0:MXN) :: zts_l ! credited to a name - real*8,save,dimension(0:5,0:MXN) :: szts_l ! all under the name - real*8,save,dimension(0:5,0:MXN) :: szts_sv ! the last ci_ timings - -!======================================================================= -contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: ci_ - push an entry into the timer -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine ci_(name) - use m_stdio, only : stderr - use m_die, only : die - use m_mpif90,only : MP_wtime - implicit none - character(len=*), intent(in) :: name - -! !REVISION HISTORY: -! 05Mar98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::ci_' - - ! Local variables - - real*8,dimension(0:5) :: zts - integer :: lname,iname - integer :: i - - ! Encountered a limitation. Programming is required - - if(ndep >= MXS) then - write(stderr,'(2a,i4)') myname_, & - ': stack overflow with "'//trim(name)//'", ndep =',ndep - call die(myname_) - endif - - !-------------------------------------------------------- - ! Initialize the stack if it is called the first time. - - if(.not.started) call reset_() - - ! Get the current _zeits_ - - call get_zeits(zts(1)) - zts(0)=MP_wtime() - - !-------------------------------------------------------- - ! Charge the ticks since the last co_() to the current level - - lname=lnk_n(ndep) - - do i=0,5 - zts_l(i,lname)=zts_l(i,lname) + zts(i)-zts_sv(i) - end do - - do i=0,5 - zts_sv(i)=zts(i) ! update the record - end do - - !-------------------------------------------------------- - ! Is the name already in the list? Case sensitive and - ! space maybe sensitive if they are inbeded between non- - ! space characters. - ! - ! If the name is already in the list, the index of the - ! table entry is given. - ! - ! If the name is not in the list, a new entry will be added - ! to the list, if 1) there is room, and 2) - - iname=lookup_(name) - - !-------------------------------------------------------- - ! push up the stack level - - ndep=ndep+1 - if(mxdep <= ndep) mxdep=ndep - - lnk_n(ndep)=iname - knt_l(iname)=knt_l(iname)+1 - - ! Recording the check-in time, if there is no remaining - ! levels for the same name. This is used to handle - ! recursive ci_() calls for the same name. - - if(level_l(iname) == 0) then - do i=0,5 - szts_sv(i,iname)=zts_sv(i) - end do - endif - - ! open a level - - level_l(iname)=level_l(iname)+1 - -end subroutine ci_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: co_ - pop the current level -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine co_(name,tms) - use m_stdio, only : stderr - use m_die, only : die - use m_mpif90,only : MP_wtime - implicit none - character(len=*), intent(in) :: name ! account name - real*8,optional,dimension(0:5,0:1),intent(out) :: tms ! timings - -! The returned variable tms(0:5,0:1) contains two sets of timing -! information. tms(0:5,0) is the NET timing data charged under the -! account name only, and tms(0:5,1) is the SCOPE timing data since -! the last ci() with the same account name and at the out most level. -! -! !REVISION HISTORY: -! 11Oct99 - J.W. Larson - explicit definition of -! tms as real*8 -! 05Mar98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::co_' - - real*8 :: tms0,tms1 - real*8,dimension(0:5) :: zts - integer :: lname - integer :: i - - ! Encountered a limitation. Programming is required - - if(ndep <= 0) then - write(stderr,'(2a,i4)') myname_, & - ': stack underflow with "'//trim(name)//'", ndep =',ndep - call die(myname_) - endif - - !-------------------------------------------------------- - ! Initialize the stack if it is called the first time. - - if(.not.started) call reset_() - - ! Get the current _zeits_ - - call get_zeits(zts(1)) - zts(0)=MP_wtime() - - ! need special handling if ndep is too large or too small. - - lname=lnk_n(ndep) - level_l(lname)=level_l(lname)-1 ! close a level - - do i=0,5 - tms0=zts(i)- zts_sv(i) ! NET by the _account_ - tms1=zts(i)-szts_sv(i,lname) ! within its SCOPE - - zts_l(i,lname)= zts_l(i,lname) + tms0 - - if(level_l(lname) == 0) & - szts_l(i,lname)=szts_l(i,lname) + tms1 - - zts_sv(i)=zts(i) - - if(present(tms)) then - - ! Return the timings of the current call segment - ! - ! tms(:,0) is for the NET timing data, that have been charged - ! to this account. - ! - ! tms(:,1) is for the SCOPE timing data since the ci() of the - ! same account name at the out most level. - ! - - tms(i,0)=tms0 - tms(i,1)=tms1 ! only the sub-segments - endif - end do - - ! Record the unbalanced ci/co. Name .void. is supplied for - ! backward compartible calls of pzeitend() - - if(name /= '.void.'.and.balanced) then - balanced = lname == MXN .or. name == name_l(lname) - if(.not.balanced) then - ciname=name_l(lname) - coname=name - endif - endif - - ! pop (need special handling of ndep too large or too small. - - ndep=ndep-1 - -end subroutine co_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: reset_ - reset module m_zeit to an initial state -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine reset_() - use m_mpif90,only : MP_wtime - implicit none - -! !REVISION HISTORY: -! 04Mar98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::reset_' - integer :: i - - ! keep tracking the number of reset_() calls - - nreset=nreset+1 - started=.true. - balanced=.true. - - ! Start timing - - call get_zeits(zts_sv(1)) - zts_sv(0)=MP_wtime() - - ! Sign in the module name for the overheads (.eqv. ci_(ZEIT)) - - nname=0 - name_l(nname)=ZEIT - knt_l(nname)=1 - - ndep =0 - lnk_n(ndep)=nname - - ! Initialize the timers. - - do i=0,5 - zts_l(i,nname)=0. - szts_l(i,nname)=0. - szts_sv(i,nname)=zts_sv(i) - end do - level_l(nname)=1 - -end subroutine reset_ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: lookup_ search/insert a name -! -! !DESCRIPTION: -! -! !INTERFACE: - - function lookup_(name) - implicit none - character(len=*),intent(in) :: name - integer :: lookup_ - -! !REVISION HISTORY: -! 04Mar98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::lookup_' - - logical :: found - integer :: ith - integer :: i - - ith=-1 - found=.false. - do while(.not.found.and. ith < min(nname,MXN)) - ith=ith+1 - found = name == name_l(ith) - end do - - if(.not.found) then - - found = nname >= MXN ! Can not handle too many accounts? - ith=MXN ! Then use the account for ".foo." - - if(.not.found) then ! Otherwise, add a new account. - nname=nname+1 - ith=nname - - name_l(ith)=name - if(ith==MXN) name_l(ith)='.foo.' - - ! Initialize a new account - - do i=0,5 - zts_l(i,ith)=0. - szts_l(i,ith)=0. - end do - level_l(ith)=0 - - endif - endif - - lookup_=ith - -end function lookup_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: flush_ - print the timing data -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine flush_(lu,umask) - use m_stdio, only : stderr - use m_ioutil, only : luflush - use m_die, only : die - use m_mpif90,only : MP_wtime - implicit none - integer,intent(in) :: lu ! logical unit for the output - integer,optional,intent(in) :: umask - -! !REVISION HISTORY: -! 05Mar98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::flush_' - integer :: imask - - real*8,dimension(0:5) :: zts - integer :: i,ier - - ! specify which timer to print - - imask=MWTIME - if(present(umask)) imask=umask - - ! write a - - write(lu,*,iostat=ier) - if(ier /= 0) then - write(stderr,'(2a,i3)') myname_,': can not write(), unit =',lu - call die(myname_) - endif - - if(.not.balanced) write(lu,'(5a)') myname_, & - ': ci/co unbalanced, ',trim(ciname),'/',trim(coname) - - call luflush(lu) - - ! latest times, but not closing on any entry - - call get_zeits(zts(1)) - zts(0)=MP_wtime() - - ! Print selected tables - - do i=0,5 - if(iand(MASKS(i),imask) /= 0) & - call sp_balances_(lu,i,zts(i)) - end do -#ifdef TODO - if(iand(UWRATE,imask) /= 0) call sp_rate_(lu,zts) -#endif - - call luflush(lu) - -end subroutine flush_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: sp_balances_ - print a table of a given timer -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine sp_balances_(lu,itm,zti) - implicit none - integer,intent(in) :: lu - integer,intent(in) :: itm - real*8,intent(in) :: zti - -! !REVISION HISTORY: -! 06Mar98 - Jing Guo - initial prototype/prolog/code -! 22Jan01 - Jay Larson - Minor correction in -! A write statement: replaced x (single-space) descriptor -! with 1x. This is apparently strict adherance to the -! f90 standard (though the first of many, many compilers -! where it has arisen). This was for the SunOS platform. -! 24Feb01 - Jay Larson - Extra decimal place in -! timing numbers (some reformatting will be necessary). -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::sp_balances_' - - real*8,parameter :: res=.001 ! (sec) - - integer,parameter :: lnmax=12 - character(len=max(NSZ,lnmax)) :: name - - character(len=1) :: tag - character(len=4) :: num - - integer :: zt_min,zt_sec - integer :: sz_min,sz_sec - integer :: l,i,ln - - real*8 :: sz0 - real*8 :: zt,zt_percent,zt_percall - real*8 :: sz,sz_percent - - ! The total time is given in the ZEIT bin - - sz0=szts_l(itm,0) - if(level_l(0) /= 0) sz0=sz0 + zti - szts_sv(itm,0) - sz0=max(res,sz0) - - write(lu,'(a,t14,a,t21,a,t31,a,t52,a)') & - HEADER(itm), 'counts','period', & - 'NET m:s %', & - 'SCOPE m:s %' - -!23.|....1....|....2....|....3....|....4....|....5....|....6....|....7.. -![MWTIME] counts period NET m:s % SCOPE m:s % -!----------------------------------------------------------------------- -!zeit. ( 3s 3d 3) 333.3 33:33 3.3+ 333.3 33:33 3.3+ -!sub 333 33.3 333.3 33:33 3.3% 333.3 33:33 3.3% - - write(lu,'(80a)') ('-',i=1,72) - do l=0,min(MXN,nname) - - zt= zts_l(itm,l) - sz=szts_l(itm,l) - tag='%' - if(level_l(l) /= 0) then - zt=zt + zti - zts_sv(itm) - sz=sz + zti - szts_sv(itm,l) - tag='+' - endif - - zt_percall=zt/max(1,knt_l(l)) - - zt_percent=100.*zt/sz0 - sz_percent=100.*sz/sz0 - - zt_sec=nint(zt) - zt_min= zt_sec/60 - zt_sec=mod(zt_sec,60) - - sz_sec=nint(sz) - sz_min= sz_sec/60 - sz_sec=mod(sz_sec,60) - - name=name_l(l) - ln=max(len_trim(name),lnmax) - - select case(l) - case(0) - write(num,'(i4)') mxdep -! write(lu,'(2(a,i3),2a,t26,2(1x,f7.1,1x,i4.2,a,i2.2,1x,f5.1,a))')& - write(lu,'(2(a,i3),2a,t26,2(1x,f8.2,1x,i4.2,a,i2.2,1x,f6.2,a))')& - name(1:ln),nreset,'s',ndep,'/',num, & - zt,zt_min,':',zt_sec,zt_percent,tag, & - sz,sz_min,':',sz_sec,sz_percent,tag - -! write(lu,'(2a,3(i3,a),t26,2(x,f7.1,x,i4.2,a,i2.2,x,f5.1,a))')& -! name(1:ln),'(',nreset,'s',ndep,'d',mxdep,')', & - - case default - if(len_trim(name) < lnmax)then -! write(lu,'(a,1x,i5,1x,f6.1,2(1x,f7.1,1x,i4.2,a,i2.2,1x,f5.1,a))') & - write(lu,'(a,1x,i5,1x,f7.2,2(1x,f8.2,1x,i4.2,a,i2.2,1x,f6.2,a))') & - name(1:ln),knt_l(l),zt_percall, & - zt,zt_min,':',zt_sec,zt_percent,tag, & - sz,sz_min,':',sz_sec,sz_percent,tag - else - write(lu,'(a)')name(1:ln) -! write(lu,'(13x,i5,1x,f6.1,2(1x,f7.1,1x,i4.2,a,i2.2,1x,f5.1,a))') & - write(lu,'(13x,i5,1x,f7.2,2(1x,f8.2,1x,i4.2,a,i2.2,1x,f6.2,a))') & - knt_l(l),zt_percall, & - zt,zt_min,':',zt_sec,zt_percent,tag, & - sz,sz_min,':',sz_sec,sz_percent,tag - endif - end select - - end do - write(lu,'(80a)') ('-',i=1,72) - -end subroutine sp_balances_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: allflush_ - print a summary of all PEs. -! -! !DESCRIPTION: -! -! !INTERFACE: - - subroutine allflush_(comm,root,lu,umask) - use m_stdio, only : stderr - use m_ioutil, only : luflush - use m_die, only : die - use m_mpif90,only : MP_wtime,MP_type - use m_mpif90,only : MP_comm_size,MP_comm_rank - use m_SortingTools,only : IndexSet,IndexSort - implicit none - integer,intent(in) :: comm - integer,intent(in) :: root - integer,intent(in) :: lu - integer,optional,intent(in) :: umask - -! !REVISION HISTORY: -! 09Mar98 - Jing Guo - initial prototype/prolog/code -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::allflush_' - integer myID,nPE - integer :: imask - real*8,dimension(0:5) :: zts - real*8,dimension(0:5,0:1,0:MXN) :: ztbf - real*8,dimension(:,:,:,:),allocatable :: ztmp - integer,dimension(0:MXN) :: indx_ - integer :: mnm - - integer :: i,l - integer :: nbf,ier - integer :: mp_Type_ztbf - - mp_Type_ztbf=MP_type(ztbf(0,0,0)) - - imask=MWTIME - if(present(umask)) imask=umask - - if(imask==0) return - - call get_zeits(zts(1)) - zts(0)=MP_wtime() - - ! Update the accounts and prepare for the messages - - mnm=min(MXN,nname) - do l=0,mnm - do i=0,5 - ztbf(i,0,l)= zts_l(i,l) - ztbf(i,1,l)=szts_l(i,l) - end do - - if(level_l(l) /= 0) then - ! Update the current accounts. - do i=0,5 - ztbf(i,0,l)=ztbf(i,0,l) + zts(i) - zts_sv(i ) - ztbf(i,1,l)=ztbf(i,1,l) + zts(i) -szts_sv(i,l) - end do - endif - end do - nbf=size(ztbf(0:5,0:1,0:mnm)) - - call MP_comm_rank(comm,myID,ier) - if(ier /= 0) then - write(stderr,'(2a,i3)') myname_, & - ': MP_comm_rank() error, ier =',ier - call die(myname_) - endif - - ! An urgent hack for now. Need to be fixed later. J.G. - indx_(0)=0 - call IndexSet( nname,indx_(1:mnm)) - call IndexSort(nname,indx_(1:mnm),name_l(1:mnm)) - - if(myID /= root) then - - call MPI_gather((ztbf(0:5,0:1,indx_(0:mnm))),nbf,mp_Type_ztbf, & - ztbf,nbf,mp_Type_ztbf,root,comm,ier ) - if(ier /= 0) then - write(stderr,'(2a,i3)') myname_, & - ': MPI_gather(!root) error, ier =',ier - call die(myname_) - endif - - else - - call MP_comm_size(comm,nPE,ier) - if(ier /= 0) then - write(stderr,'(2a,i3)') myname_, & - ': MP_comm_size() error, ier =',ier - call die(myname_) - endif - - allocate(ztmp(0:5,0:1,0:mnm,0:nPE-1),stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': allocate(zts) error, stat =',ier - call die(myname_) - endif - - call MPI_gather((ztbf(0:5,0:1,indx_(0:mnm))),nbf,mp_Type_ztbf, & - ztmp,nbf,mp_Type_ztbf,root,comm,ier ) - if(ier /= 0) then - write(stderr,'(2a,i3)') myname_, & - ': MPI_gather(root) error, ier =',ier - call die(myname_) - endif - - ! write a - - write(lu,*,iostat=ier) - if(ier /= 0) then - write(stderr,'(2a,i3)') myname_,': can not write(), unit =',lu - call die(myname_) - endif - - call luflush(lu) - - do i=0,5 - if(iand(MASKS(i),imask) /= 0) & - call mp_balances_(lu,i,nPE,ztmp,indx_) - end do -#ifdef TODO - if(iand(UWRATE,imask) /= 0) call mp_rate_(lu,nPE,ztmp) -#endif - - deallocate(ztmp,stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i4)') myname_, & - ': deallocate(zts) error, stat =',ier - call die(myname_) - endif - endif - - call luflush(lu) -end subroutine allflush_ - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! NASA/GSFC, Data Assimilation Office, Code 910.3, GEOS/DAS ! -!----------------------------------------------------------------------- -!BOP -! -! !IROUTINE: mp_balances_ - summarize the timing data of all PEs -! -! !DESCRIPTION: -! -! \newcommand{\tb}{\overline{t}} -! -! \verb"mp_balances_"() summarizes the timing data of all PEs -! with quantified load balancing measures: -! \begin{eqnarray*} -! x &=& \frac{\max(t) - \tb}{N\tb} \times 100\% \\ -! i &=& \frac{\max(t) - \tb}{\max(t)} \times 100\% \\ -! r &=& \frac{1}{N\tb} \sum^{t>\tb}{(t-\tb)} -! \times 100\% -! \end{eqnarray*} -! where -! \begin{center} -! \begin{tabular}{rl} -! $t$: & time by any process element \\ -! $\tb$: & mean time by all process elements \\ -! $x$: & the ma{\bf x}imum percentage load deviation \\ -! $i$: & percentage {\bf i}dle process-time or -! load {\bf i}mbalance \\ -! $r$: & percentage {\bf r}elocatable loads \\ -! $N$: & {\bf n}umber of process elements -! \end{tabular} -! \end{center} -! -! !INTERFACE: - - subroutine mp_balances_(lu,itm,nPE,ztmp,indx) - implicit none - integer,intent(in) :: lu - integer,intent(in) :: itm - integer,intent(in) :: nPE - real*8,dimension(0:,0:,0:,0:),intent(in) :: ztmp - integer,dimension(0:),intent(in) :: indx - -! !REVISION HISTORY: -! 10Mar98 - Jing Guo - initial prototype/prolog/code -! 22Jan01 - Jay Larson - Minor correction in -! A write statement: replaced x (single-space) descriptor -! with 1x. This is apparently strict adherance to the -! f90 standard (though the first of many, many compilers -! where it has arisen). This was for the SunOS platform. -! 25Feb01 - R. Jacob change number of -! decimal places from 1 to 4. -!EOP -!_______________________________________________________________________ - character(len=*),parameter :: myname_=myname//'::mp_balances_' - - real*8,parameter :: res=.001 ! (sec) - - integer,parameter :: lnmax=12 - character(len=max(NSZ,lnmax)) :: name - character(len=4) :: num - - integer :: i,k,l,ln,lx - - ! NET times - integer :: ix_o - real*8 :: zts_o,zta_o,ztm_o,ztr_o - integer :: x_o,i_o,r_o - - ! SCOPE times - integer :: ix_s - real*8 :: zts_s,zta_s,ztm_s,ztr_s - integer :: x_s,i_s,r_s - - write(num,'(i4)') nPE - write(lu,'(3a,t18,a,t58,a)') & - HEADER(itm),'x',adjustl(num), & - 'NET avg max imx x% r% i%', & - 'SCP avg max imx x% r% i%' - -!23.|....1....|....2....|....3....|....4....|....5....|....6....|....7.. - -!MWTIME]x3 NET avg max imx x% r% i% SCP avg max imx x% r% i% -!----------------------------------------------------------------------- -!zeit. 333333.3 33333.3 333 33 33 33 333333.3 33333.3 333 33 33 33 - -write(lu,'(91a)') ('-',i=1,91) -do l=0,min(MXN,nname) - - ! sum() of all processes - - zts_o=0. - zts_s=0. - - ! indices of max() of all processes - - ix_o=0 - ix_s=0 - do k=0,nPE-1 - - zts_o=zts_o+ztmp(itm,0,l,k) ! compute sum() - zts_s=zts_s+ztmp(itm,1,l,k) ! compute sum() - - if(ztmp(itm,0,l,ix_o) < ztmp(itm,0,l,k)) ix_o=k - if(ztmp(itm,1,l,ix_s) < ztmp(itm,1,l,k)) ix_s=k - - end do - - zta_o=zts_o/max(1,nPE) ! compute mean() - zta_s=zts_s/max(1,nPE) ! compute mean() - - ztr_o=0. - ztr_s=0. - do k=0,nPE-1 - if(ztmp(itm,0,l,k) > zta_o) ztr_o=ztr_o+ztmp(itm,0,l,k)-zta_o - if(ztmp(itm,1,l,k) > zta_s) ztr_s=ztr_s+ztmp(itm,1,l,k)-zta_s - end do - - ztm_o=ztmp(itm,0,l,ix_o) - ztm_s=ztmp(itm,1,l,ix_s) - - lx=indx(l) - name=name_l(lx) - ln=max(len_trim(name),lnmax) - - x_o=nint(100.*(ztm_o-zta_o)/max(zts_o,res)) - r_o=nint(100.* ztr_o /max(zts_o,res)) - i_o=nint(100.*(ztm_o-zta_o)/max(ztm_o,res)) - - x_s=nint(100.*(ztm_s-zta_s)/max(zts_s,res)) - r_s=nint(100.* ztr_s /max(zts_s,res)) - i_s=nint(100.*(ztm_s-zta_s)/max(ztm_s,res)) - - write(lu,'(a,2(3x,f10.6,3x,f10.6,1x,z3.3,3i3,1x))') & - name(1:ln), & - zta_o,ztm_o,ix_o,x_o,r_o,i_o, & - zta_s,ztm_s,ix_s,x_s,r_s,i_s - -end do -write(lu,'(91a)') ('-',i=1,91) -end subroutine mp_balances_ - -!======================================================================= -end module m_zeit -!. diff --git a/src/externals/mct/mpi-serial/.gitignore b/src/externals/mct/mpi-serial/.gitignore deleted file mode 100644 index 8b137891791..00000000000 --- a/src/externals/mct/mpi-serial/.gitignore +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/externals/mct/mpi-serial/Makefile b/src/externals/mct/mpi-serial/Makefile deleted file mode 100644 index 0b1ca1db6c2..00000000000 --- a/src/externals/mct/mpi-serial/Makefile +++ /dev/null @@ -1,93 +0,0 @@ -SHELL = /bin/sh -############################### -include Makefile.conf - -VPATH=$(SRCDIR)/mpi-serial -# SOURCE FILES - -MODULE = mpi-serial - -SRCS_F90 = fort.F90 \ - mpif.F90 - -SRCS_C = mpi.c \ - send.c \ - recv.c \ - collective.c \ - req.c \ - list.c \ - handles.c \ - comm.c \ - error.c \ - ic_merge.c \ - group.c \ - time.c \ - pack.c \ - type.c \ - type_const.c \ - copy.c \ - op.c \ - cart.c \ - getcount.c \ - probe.c \ - info.c - - -OBJS_ALL = $(SRCS_C:.c=.o) \ - $(SRCS_F90:.F90=.o) - - -INCPATH:= -I . - - -############################### - -# TARGETS - -default: lib$(MODULE).a - - -fort.o: mpif.h - - -lib$(MODULE).a: $(OBJS_ALL) - echo $(OBJS_ALL) - $(RM) $@ - $(AR) $@ $(OBJS_ALL) - $(RANLIB) $@ - - -LIB = lib$(MODULE).a - - -############################### -#RULES - -.SUFFIXES: -.SUFFIXES: .F90 .c .o - -.c.o: - $(CC) -c $(INCPATH) $(DEFS) $(CPPDEFS) $(CFLAGS) $< - -.F90.o: - $(FC) -c $(INCFLAG) . $(INCPATH) $(DEFS) $(FPPDEFS) $(FCFLAGS) $(MPEUFLAGS) $< - -MYF90FLAGS=$(INCPATH) $(DEFS) $(FCFLAGS) $(MPEUFLAGS) - -.PHONY: clean tests install - -clean: - /bin/rm -f *.o ctest ftest $(LIB) mpi.mod config.log config.status - cd tests ; $(MAKE) clean - -tests: - cd tests; make - -install: lib - $(MKINSTALLDIRS) $(libdir) $(includedir) - $(INSTALL) lib$(MODULE).a -m 644 $(libdir) - $(INSTALL) mpi.h -m 644 $(includedir) - $(INSTALL) mpif.h -m 644 $(includedir) - - - diff --git a/src/externals/mct/mpi-serial/Makefile.conf.in b/src/externals/mct/mpi-serial/Makefile.conf.in deleted file mode 100644 index 9f4ec263480..00000000000 --- a/src/externals/mct/mpi-serial/Makefile.conf.in +++ /dev/null @@ -1,16 +0,0 @@ -CC = @CC@ -FC = @FC@ -FCFLAGS = @FCFLAGS@ -INCLUDE = -I. -INCFLAG = @INCLUDEFLAG@ -DEFS = @DEFS@ -CFLAGS = @CFLAGS@ -AR = @AR@ -RANLIB = @RANLIB@ -LIBS = @LIBS@ -CRULE = .c.o -F90RULE = .F90.o - -SHELL = /bin/sh - -MODULE = mpi-serial diff --git a/src/externals/mct/mpi-serial/README b/src/externals/mct/mpi-serial/README deleted file mode 100644 index aaa728501f9..00000000000 --- a/src/externals/mct/mpi-serial/README +++ /dev/null @@ -1,140 +0,0 @@ - -###################################################################### - -mpi-serial - - Version 2.0 - Ray Loy (rloy@alcf.anl.gov) - John Yackovich - -###################################################################### - - -This library provides a one-processor version of MPI. Most common MPI -calls, including all that are necessary for MCT, are supported. This -includes sends and receives (which cannot be simply stubbed out). See -below for a complete list. - -Version 2.0 adds support for user-defined MPI types and MPI_STATUS_IGNORE. - - ---------------- -Quick Start ---------------- -./configure -make -make tests - - ---------------- -Configuration ---------------- - -There is now a dedicated configure for mpi-serial. - -By default, it is assumed that Fortran programs linked with mpi-serial -(e.g. MCT) will be using REAL variables of size 4 bytes, and DOUBLE -PRECISION variables of size 8 bytes. If this is not the case -(e.g. due to hardware sizes or Fortran compiler options), you must -specify an option to the mpi-serial configure, e.g.: - - ./configure --enable-fort-real=16 --enable-fort-double=32 - - - --------------------------------- -Manual make targets --------------------------------- - -'make' - compile the mpi-serial library - -'make examples' - compile mpi-serial and its example programs - -'make clean' - get rid of all objects and executables - - - ----------------------------------- -List of MPI calls supported ----------------------------------- - - general ops - mpi_init - mpi_finalize - mpi_abort - mpi_error_string - mpi_initialized - mpi_get_processor_name - mpi_get_library_version - mpi_wtime - - comm and group ops - mpi_comm_free - mpi_comm_size - mpi_comm_rank - mpi_comm_dup - mpi_comm_create - mpi_comm_split - mpi_comm_group - mpi_group_incl - mpi_group_range_incl - mpi_group_union - mpi_group_intersection - mpi_group_difference - mpi_group_translate_ranks - mpi_group_free - mpi_cart_create - mpi_cart_coords - mpi_dims_create - - send/receive ops - mpi_irecv - mpi_recv - mpi_test - mpi_testany - mpi_testall - mpi_testsome - mpi_wait - mpi_waitany - mpi_waitall - mpi_waitsome - mpi_isend - mpi_send - mpi_ssend - mpi_rsend - mpi_irsend - mpi_sendrecv - mpi_iprobe - mpi_probe - mpi_request_free - - collective operations - mpi_barrier - mpi_bcast - mpi_gather - mpi_gatherv - mpi_allgather - mpi_scatter - mpi_scatterv - mpi_reduce - mpi_allreduce - mpi_reduce_scatter - mpi_scan - mpi_alltoall - mpi_alltoallv - mpi_alltoallw - mpi_op_create - mpi_op_free - - data types and info objects - mpi_get_count - mpi_get_elements - mpi_pack - mpi_pack_size - mpi_unpack - mpi_info_create - mpi_info_set - mpi_info_free - ------ -EOF diff --git a/src/externals/mct/mpi-serial/aclocal.m4 b/src/externals/mct/mpi-serial/aclocal.m4 deleted file mode 100644 index c5b6de47a45..00000000000 --- a/src/externals/mct/mpi-serial/aclocal.m4 +++ /dev/null @@ -1,15 +0,0 @@ -# generated automatically by aclocal 1.10 -*- Autoconf -*- - -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, -# 2005, 2006 Free Software Foundation, Inc. -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - - -m4_include([m4/ax_fc_version.m4]) diff --git a/src/externals/mct/mpi-serial/cart.c b/src/externals/mct/mpi-serial/cart.c deleted file mode 100644 index a53ef4814c6..00000000000 --- a/src/externals/mct/mpi-serial/cart.c +++ /dev/null @@ -1,128 +0,0 @@ -#include "mpiP.h" - -/* - * MPI_Cart_create - * - * create a new communicator, - */ - - -FC_FUNC( mpi_cart_create , MPI_CART_CREATE ) - ( int *comm_old, int *ndims, int *dims, int *periods, - int *reorder, int *comm_cart, int *ierr) -{ - *ierr = MPI_Cart_create( *comm_old, *ndims, dims, periods, *reorder, - comm_cart); -} - - -int MPI_Cart_create( MPI_Comm comm_old, int ndims, int *dims, int *periods, - int reorder, MPI_Comm *comm_cart) -{ - int i; - for (i = 0; i < ndims; i++) - if (dims[i] > 1) - { - printf("MPI_Cart_create: Greater dimension than no. of procs\n"); - abort(); - } - - MPI_Comm_dup(comm_old, comm_cart); - - return MPI_SUCCESS; -} - - -/* - * MPI_Cart_get - * - * Returns information about the cartesian organization - * of the communicator. - * - * Assuming the user gives right maxdims, the only possible - * dimensions are (1,1,..,1) for however many dimensions - */ - - -FC_FUNC( mpi_cart_get , MPI_CART_GET ) - (int * comm, int * maxdims, int * dims, - int * periods, int * coords, int * ierr) -{ - *ierr = MPI_Cart_get(*comm, *maxdims, dims, periods, coords); -} - - -int MPI_Cart_get(MPI_Comm comm, int maxdims, int *dims, - int *periods, int *coords) -{ - int i; - for (i=0;i 1) - { - printf("MPI_Dims_create: More nodes than procs specified.\n"); - abort(); - } - - for (i=0; isendlist=AP_list_new(); - cptr->recvlist=AP_list_new(); - - cptr->num=num++; - - return(chandle); -} - - -/*********/ - - -FC_FUNC( mpi_comm_free , MPI_COMM_FREE )(int *comm, int *ierror) -{ - *ierror=MPI_Comm_free(comm); -} - - -/* - * MPI_Comm_free() - * - * Note: will NOT free any pending MPI_Request handles - * that are allocated... correct user code should have - * already done a Wait or Test to free them. - * - */ - - -int MPI_Comm_free(MPI_Comm *comm) -{ - pList sendlist, recvlist; - int size; - Comm *mycomm; - - mycomm=mpi_handle_to_ptr(*comm); /* (Comm *)(*comm) */ - - sendlist=mycomm->sendlist; - recvlist=mycomm->recvlist; - - size=AP_list_size(sendlist); - if (size!=0) - fprintf(stderr,"MPI_Comm_free: warning: %d pending send reqs\n", - size); - AP_list_free(sendlist); - - - size=AP_list_size(recvlist); - if (size!=0) - fprintf(stderr,"MPI_Comm_free: warning: %d pending receive reqs\n", - size); - AP_list_free(recvlist); - - mpi_free_handle(*comm); /* free(mycomm); */ - *comm=MPI_COMM_NULL; - - return(MPI_SUCCESS); -} - - -/*********/ - - - -FC_FUNC( mpi_comm_size , MPI_COMM_SIZE )(int *comm, int *size, int *ierror) -{ - *ierror=MPI_Comm_size(*comm, size); -} - - - -int MPI_Comm_size(MPI_Comm comm, int *size) -{ - *size=1; - - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC( mpi_comm_rank , MPI_COMM_RANK )(int *comm, int *rank, int *ierror) -{ - *ierror=MPI_Comm_rank( *comm, rank); -} - - -int MPI_Comm_rank(MPI_Comm comm, int *rank) -{ - *rank=0; - - return(MPI_SUCCESS); -} - - - -/*********/ - - -FC_FUNC( mpi_comm_dup , MPI_COMM_DUP )(int *comm, int *newcomm, int *ierror) -{ - - *ierror=MPI_Comm_dup( *comm, newcomm); - -} - - -int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) -{ - *newcomm= mpi_comm_new(); - -#ifdef INFO - fflush(stdout); - fprintf(stderr,"MPI_Comm_dup: new comm handle=%d\n",*newcomm); -#endif - - return(MPI_SUCCESS); -} - - -/*********/ - - -int FC_FUNC( mpi_comm_create, MPI_COMM_CREATE) - (int *comm, int *group, int *newcomm, int *ierror) -{ - *ierror=MPI_Comm_create(*comm,*group,newcomm); -} - - - -int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) -{ - if (group==MPI_GROUP_NULL || group==MPI_GROUP_EMPTY) - *newcomm= MPI_COMM_NULL; - else - *newcomm=mpi_comm_new(); - - return(MPI_SUCCESS); -} - - - -/*********/ - - -FC_FUNC( mpi_comm_split, MPI_COMM_SPLIT ) - (int *comm, int *color, int *key, int *newcomm, int *ierror) -{ - *ierror=MPI_Comm_split(*comm,*color,*key,newcomm); - -} - - - -int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm) -{ - if (color==MPI_UNDEFINED) - *newcomm=MPI_COMM_NULL; - else - *newcomm= mpi_comm_new(); - - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC( mpi_comm_group, MPI_COMM_GROUP ) - (int *comm, int *group, int *ierror) -{ - *ierror= MPI_Comm_group(*comm, group); -} - - - -int MPI_Comm_group(MPI_Comm comm, MPI_Group *group) -{ - if (comm==MPI_COMM_NULL) - *group= MPI_GROUP_NULL; - else - *group= MPI_GROUP_ONE; - - return(MPI_SUCCESS); -} - -/* Intercomm_create - * - */ - -FC_FUNC(mpi_intercomm_create, MPI_INTERCOMM_CREATE)( - int * local_comm, int * local_leader, - int * peer_comm, int * remote_leader, - int * tag, int * newintercomm, int* ierr) -{ - *ierr = MPI_Intercomm_create(*local_comm, *local_leader, *peer_comm, - *remote_leader, *tag, newintercomm); -} - -int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, - MPI_Comm peer_comm, int remote_leader, - int tag, MPI_Comm *newintercomm) -{ - if (local_comm==MPI_COMM_NULL && peer_comm==MPI_COMM_NULL) - *newintercomm = MPI_COMM_NULL; - else - MPI_Comm_dup(MPI_COMM_WORLD, newintercomm); - - return MPI_SUCCESS; -} - - -/*********/ - - -MPI_Comm MPI_Comm_f2c(MPI_Fint comm) -{ - /* Comm is an integer handle used both by C and Fortran */ - return(comm); -} - - -MPI_Fint MPI_Comm_c2f(MPI_Comm comm) -{ - return(comm); -} diff --git a/src/externals/mct/mpi-serial/config.h.in b/src/externals/mct/mpi-serial/config.h.in deleted file mode 100644 index eed022557ad..00000000000 --- a/src/externals/mct/mpi-serial/config.h.in +++ /dev/null @@ -1,84 +0,0 @@ -/* config.h.in. Generated from configure.in by autoheader. */ - -/* User-set Fortran double size */ -#undef CONFIG_FORT_DOUBLE - -/* User-set Fortran real size */ -#undef CONFIG_FORT_REAL - -/* Define to dummy `main' function (if any) required to link to the Fortran - libraries. */ -#undef FC_DUMMY_MAIN - -/* Define if F77 and FC dummy `main' functions are identical. */ -#undef FC_DUMMY_MAIN_EQ_F77 - -/* Define to a macro mangling the given C identifier (in lower and upper - case), which must not contain underscores, for linking with Fortran. */ -#undef FC_FUNC - -/* As FC_FUNC, but for C identifiers containing underscores. */ -#undef FC_FUNC_ - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Print extra debug info */ -#undef INFO - -/* Name of package */ -#undef PACKAGE - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* The size of `long', as computed by sizeof. */ -#undef SIZEOF_LONG - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* Perform tests on data copies internally instead of using MPI_Send */ -#undef TEST_INTERNAL - -/* Perform type checking during communications */ -#undef TYPE_CHECKING - -/* Version number of package */ -#undef VERSION diff --git a/src/externals/mct/mpi-serial/configure b/src/externals/mct/mpi-serial/configure deleted file mode 100755 index 5dd570dc928..00000000000 --- a/src/externals/mct/mpi-serial/configure +++ /dev/null @@ -1,5833 +0,0 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.68. -# -# -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, -# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software -# Foundation, Inc. -# -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - # We cannot yet assume a decent shell, so we have to provide a - # neutralization value for shells without unset; and this also - # works around shells that cannot unset nonexistent variables. - # Preserve -v and -x to the replacement shell. - BASH_ENV=/dev/null - ENV=/dev/null - (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV - export CONFIG_SHELL - case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; - esac - exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"} -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -test -n "$DJDIR" || exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME= -PACKAGE_TARNAME= -PACKAGE_VERSION= -PACKAGE_STRING= -PACKAGE_BUGREPORT= -PACKAGE_URL= - -ac_unique_file="mpi.h" -# Factoring default headers for most tests. -ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef STDC_HEADERS -# include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif -# include -#endif -#ifdef HAVE_STRINGS_H -# include -#endif -#ifdef HAVE_INTTYPES_H -# include -#endif -#ifdef HAVE_STDINT_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif" - -ac_subst_vars='LTLIBOBJS -LIBOBJS -INCLUDEFLAG -EGREP -GREP -CPP -FCLIBS -RANLIB -AR -ac_ct_FC -FCFLAGS -FC -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -enable_test_internal -enable_info -enable_fort_real -enable_fort_double -enable_type_checking -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS -FC -FCFLAGS -AR -RANLIB -CPP -INCLUDEFLAG' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *=) ac_optarg= ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error $? "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information" - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error $? "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host. - If a cross compiler is detected then cross compile mode will be used" >&2 - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error $? "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error $? "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures this package to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking ...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF -_ACEOF -fi - -if test -n "$ac_init_help"; then - - cat <<\_ACEOF - -Optional Features: - --disable-option-checking ignore unrecognized --enable/--with options - --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) - --enable-FEATURE[=ARG] include FEATURE [ARG=yes] - --enable-test-internal Specify internal test as opposed to full suite test - - --enable-info Print extra debugging info - --enable-fort-real=SIZE Specify Fortran real size - --enable-fort-double=SIZE Specify Fortran double size - --enable-type-checking Perform type checking during communications - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - FC Fortran compiler command - FCFLAGS Fortran compiler flags - AR Archive Command - RANLIB Archive index update command - CPP C preprocessor - INCLUDEFLAG Fortran compiler flag for specifying module search path - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to the package provider. -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -configure -generated by GNU Autoconf 2.68 - -Copyright (C) 2010 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_fc_try_compile LINENO -# --------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_fc_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_fc_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_fc_try_compile - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - $as_test_x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES -# -------------------------------------------- -# Tries to find the compile-time value of EXPR in a program that includes -# INCLUDES, setting VAR accordingly. Returns whether the value could be -# computed -ac_fn_c_compute_int () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if test "$cross_compiling" = yes; then - # Depending upon the size, compute the lo and hi bounds. -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -static int test_array [1 - 2 * !(($2) >= 0)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_lo=0 ac_mid=0 - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -static int test_array [1 - 2 * !(($2) <= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=$ac_mid; break -else - as_fn_arith $ac_mid + 1 && ac_lo=$as_val - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -static int test_array [1 - 2 * !(($2) < 0)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=-1 ac_mid=-1 - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -static int test_array [1 - 2 * !(($2) >= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_lo=$ac_mid; break -else - as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done -else - ac_lo= ac_hi= -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -# Binary search between lo and hi bounds. -while test "x$ac_lo" != "x$ac_hi"; do - as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -static int test_array [1 - 2 * !(($2) <= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=$ac_mid -else - as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -done -case $ac_lo in #(( -?*) eval "$3=\$ac_lo"; ac_retval=0 ;; -'') ac_retval=1 ;; -esac - else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -static long int longval () { return $2; } -static unsigned long int ulongval () { return $2; } -#include -#include -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ - - FILE *f = fopen ("conftest.val", "w"); - if (! f) - return 1; - if (($2) < 0) - { - long int i = longval (); - if (i != ($2)) - return 1; - fprintf (f, "%ld", i); - } - else - { - unsigned long int i = ulongval (); - if (i != ($2)) - return 1; - fprintf (f, "%lu", i); - } - /* Do not output a trailing newline, as this causes \r\n confusion - on some platforms. */ - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - echo >>conftest.val; read $3 &5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } > conftest.i && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_compile -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by $as_me, which was -generated by GNU Autoconf 2.68. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - $as_echo "## ---------------- ## -## Cache variables. ## -## ---------------- ##" - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - $as_echo "## ----------------- ## -## Output variables. ## -## ----------------- ##" - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## -## File substitutions. ## -## ------------------- ##" - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - $as_echo "## ----------- ## -## confdefs.h. ## -## ----------- ##" - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5; } - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -ac_config_headers="$ac_config_headers config.h" - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "C compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -if test -n "$ac_tool_prefix"; then - for ac_prog in gfortran g95 xlf95 f95 fort ifort ifc efc pgfortran pgf95 lf95 ftn xlf90 f90 pgf90 pghpf epcf90 g77 xlf f77 frt pgf77 cf77 fort77 fl32 af77 - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_FC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$FC"; then - ac_cv_prog_FC="$FC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_FC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -FC=$ac_cv_prog_FC -if test -n "$FC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FC" >&5 -$as_echo "$FC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$FC" && break - done -fi -if test -z "$FC"; then - ac_ct_FC=$FC - for ac_prog in gfortran g95 xlf95 f95 fort ifort ifc efc pgfortran pgf95 lf95 ftn xlf90 f90 pgf90 pghpf epcf90 g77 xlf f77 frt pgf77 cf77 fort77 fl32 af77 -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_FC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_FC"; then - ac_cv_prog_ac_ct_FC="$ac_ct_FC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_FC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_FC=$ac_cv_prog_ac_ct_FC -if test -n "$ac_ct_FC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FC" >&5 -$as_echo "$ac_ct_FC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_FC" && break -done - - if test "x$ac_ct_FC" = x; then - FC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - FC=$ac_ct_FC - fi -fi - - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done -rm -f a.out - -# If we don't use `.F' as extension, the preprocessor is not run on the -# input file. (Note that this only needs to work for GNU compilers.) -ac_save_ext=$ac_ext -ac_ext=F -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU Fortran compiler" >&5 -$as_echo_n "checking whether we are using the GNU Fortran compiler... " >&6; } -if ${ac_cv_fc_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat > conftest.$ac_ext <<_ACEOF - program main -#ifndef __GNUC__ - choke me -#endif - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_fc_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_compiler_gnu" >&5 -$as_echo "$ac_cv_fc_compiler_gnu" >&6; } -ac_ext=$ac_save_ext -ac_test_FCFLAGS=${FCFLAGS+set} -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS= -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $FC accepts -g" >&5 -$as_echo_n "checking whether $FC accepts -g... " >&6; } -if ${ac_cv_prog_fc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - FCFLAGS=-g -cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_cv_prog_fc_g=yes -else - ac_cv_prog_fc_g=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_g" >&5 -$as_echo "$ac_cv_prog_fc_g" >&6; } -if test "$ac_test_FCFLAGS" = set; then - FCFLAGS=$ac_save_FCFLAGS -elif test $ac_cv_prog_fc_g = yes; then - if test "x$ac_cv_fc_compiler_gnu" = xyes; then - FCFLAGS="-g -O2" - else - FCFLAGS="-g" - fi -else - if test "x$ac_cv_fc_compiler_gnu" = xyes; then - FCFLAGS="-O2" - else - FCFLAGS= - fi -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# ARCHIVE COMMAND SIMILAR ACROSS ALL PLATFORMS - -if test -z "$AR"; then - AR="ar cruv" -fi - -# RANLIB - -if test -z "$RANLIB"; then - # Necessary on Darwin to deal with common symbols (particularly when - # using ifort). - if test "$SYSDEF"x = DARWINx; then - RANLIB="ranlib -c" - else - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. -set dummy ${ac_tool_prefix}ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$RANLIB"; then - ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -RANLIB=$ac_cv_prog_RANLIB -if test -n "$RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 -$as_echo "$RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_RANLIB"; then - ac_ct_RANLIB=$RANLIB - # Extract the first word of "ranlib", so it can be a program name with args. -set dummy ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_RANLIB"; then - ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_RANLIB="ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB -if test -n "$ac_ct_RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 -$as_echo "$ac_ct_RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_RANLIB" = x; then - RANLIB=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - RANLIB=$ac_ct_RANLIB - fi -else - RANLIB="$ac_cv_prog_RANLIB" -fi - - fi -fi - - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get verbose linking output from $FC" >&5 -$as_echo_n "checking how to get verbose linking output from $FC... " >&6; } -if ${ac_cv_prog_fc_v+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_cv_prog_fc_v= -# Try some options frequently used verbose output -for ac_verb in -v -verbose --verbose -V -\#\#\#; do - cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF - -# Compile and link our simple test program by passing a flag (argument -# 1 to this macro) to the Fortran compiler in order to get -# "verbose" output that we can then parse for the Fortran linker -# flags. -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS="$FCFLAGS $ac_verb" -eval "set x $ac_link" -shift -$as_echo "$as_me:${as_lineno-$LINENO}: $*" >&5 -# gfortran 4.3 outputs lines setting COLLECT_GCC_OPTIONS, COMPILER_PATH, -# LIBRARY_PATH; skip all such settings. -ac_fc_v_output=`eval $ac_link 5>&1 2>&1 | - sed '/^Driving:/d; /^Configured with:/d; - '"/^[_$as_cr_Letters][_$as_cr_alnum]*=/d"` -$as_echo "$ac_fc_v_output" >&5 -FCFLAGS=$ac_save_FCFLAGS - -rm -rf conftest* - -# On HP/UX there is a line like: "LPATH is: /foo:/bar:/baz" where -# /foo, /bar, and /baz are search directories for the Fortran linker. -# Here, we change these into -L/foo -L/bar -L/baz (and put it first): -ac_fc_v_output="`echo $ac_fc_v_output | - grep 'LPATH is:' | - sed 's|.*LPATH is\(: *[^ ]*\).*|\1|;s|: */| -L/|g'` $ac_fc_v_output" - -# FIXME: we keep getting bitten by quoted arguments; a more general fix -# that detects unbalanced quotes in FLIBS should be implemented -# and (ugh) tested at some point. -case $ac_fc_v_output in - # If we are using xlf then replace all the commas with spaces. - *xlfentry*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/,/ /g'` ;; - - # With Intel ifc, ignore the quoted -mGLOB_options_string stuff (quoted - # $LIBS confuse us, and the libraries appear later in the output anyway). - *mGLOB_options_string*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"-mGLOB[^"]*"/ /g'` ;; - - # Portland Group compiler has singly- or doubly-quoted -cmdline argument - # Singly-quoted arguments were reported for versions 5.2-4 and 6.0-4. - # Doubly-quoted arguments were reported for "PGF90/x86 Linux/x86 5.0-2". - *-cmdline\ * | *-ignore\ * | *-def\ *) - ac_fc_v_output=`echo $ac_fc_v_output | sed "\ - s/-cmdline *'[^']*'/ /g; s/-cmdline *\"[^\"]*\"/ /g - s/-ignore *'[^']*'/ /g; s/-ignore *\"[^\"]*\"/ /g - s/-def *'[^']*'/ /g; s/-def *\"[^\"]*\"/ /g"` ;; - - # If we are using Cray Fortran then delete quotes. - *cft90*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"//g'` ;; -esac - - - # look for -l* and *.a constructs in the output - for ac_arg in $ac_fc_v_output; do - case $ac_arg in - [\\/]*.a | ?:[\\/]*.a | -[lLRu]*) - ac_cv_prog_fc_v=$ac_verb - break 2 ;; - esac - done -done -if test -z "$ac_cv_prog_fc_v"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot determine how to obtain linking information from $FC" >&5 -$as_echo "$as_me: WARNING: cannot determine how to obtain linking information from $FC" >&2;} -fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: compilation failed" >&5 -$as_echo "$as_me: WARNING: compilation failed" >&2;} -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_v" >&5 -$as_echo "$ac_cv_prog_fc_v" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran libraries of $FC" >&5 -$as_echo_n "checking for Fortran libraries of $FC... " >&6; } -if ${ac_cv_fc_libs+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "x$FCLIBS" != "x"; then - ac_cv_fc_libs="$FCLIBS" # Let the user override the test. -else - -cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF - -# Compile and link our simple test program by passing a flag (argument -# 1 to this macro) to the Fortran compiler in order to get -# "verbose" output that we can then parse for the Fortran linker -# flags. -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS="$FCFLAGS $ac_cv_prog_fc_v" -eval "set x $ac_link" -shift -$as_echo "$as_me:${as_lineno-$LINENO}: $*" >&5 -# gfortran 4.3 outputs lines setting COLLECT_GCC_OPTIONS, COMPILER_PATH, -# LIBRARY_PATH; skip all such settings. -ac_fc_v_output=`eval $ac_link 5>&1 2>&1 | - sed '/^Driving:/d; /^Configured with:/d; - '"/^[_$as_cr_Letters][_$as_cr_alnum]*=/d"` -$as_echo "$ac_fc_v_output" >&5 -FCFLAGS=$ac_save_FCFLAGS - -rm -rf conftest* - -# On HP/UX there is a line like: "LPATH is: /foo:/bar:/baz" where -# /foo, /bar, and /baz are search directories for the Fortran linker. -# Here, we change these into -L/foo -L/bar -L/baz (and put it first): -ac_fc_v_output="`echo $ac_fc_v_output | - grep 'LPATH is:' | - sed 's|.*LPATH is\(: *[^ ]*\).*|\1|;s|: */| -L/|g'` $ac_fc_v_output" - -# FIXME: we keep getting bitten by quoted arguments; a more general fix -# that detects unbalanced quotes in FLIBS should be implemented -# and (ugh) tested at some point. -case $ac_fc_v_output in - # If we are using xlf then replace all the commas with spaces. - *xlfentry*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/,/ /g'` ;; - - # With Intel ifc, ignore the quoted -mGLOB_options_string stuff (quoted - # $LIBS confuse us, and the libraries appear later in the output anyway). - *mGLOB_options_string*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"-mGLOB[^"]*"/ /g'` ;; - - # Portland Group compiler has singly- or doubly-quoted -cmdline argument - # Singly-quoted arguments were reported for versions 5.2-4 and 6.0-4. - # Doubly-quoted arguments were reported for "PGF90/x86 Linux/x86 5.0-2". - *-cmdline\ * | *-ignore\ * | *-def\ *) - ac_fc_v_output=`echo $ac_fc_v_output | sed "\ - s/-cmdline *'[^']*'/ /g; s/-cmdline *\"[^\"]*\"/ /g - s/-ignore *'[^']*'/ /g; s/-ignore *\"[^\"]*\"/ /g - s/-def *'[^']*'/ /g; s/-def *\"[^\"]*\"/ /g"` ;; - - # If we are using Cray Fortran then delete quotes. - *cft90*) - ac_fc_v_output=`echo $ac_fc_v_output | sed 's/"//g'` ;; -esac - - - -ac_cv_fc_libs= - -# Save positional arguments (if any) -ac_save_positional="$@" - -set X $ac_fc_v_output -while test $# != 1; do - shift - ac_arg=$1 - case $ac_arg in - [\\/]*.a | ?:[\\/]*.a) - ac_exists=false - for ac_i in $ac_cv_fc_libs; do - if test x"$ac_arg" = x"$ac_i"; then - ac_exists=true - break - fi - done - - if test x"$ac_exists" = xtrue; then : - -else - ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" -fi - ;; - -bI:*) - ac_exists=false - for ac_i in $ac_cv_fc_libs; do - if test x"$ac_arg" = x"$ac_i"; then - ac_exists=true - break - fi - done - - if test x"$ac_exists" = xtrue; then : - -else - if test "$ac_compiler_gnu" = yes; then - for ac_link_opt in $ac_arg; do - ac_cv_fc_libs="$ac_cv_fc_libs -Xlinker $ac_link_opt" - done -else - ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" -fi -fi - ;; - # Ignore these flags. - -lang* | -lcrt*.o | -lc | -lgcc* | -lSystem | -libmil | -little \ - |-LANG:=* | -LIST:* | -LNO:* | -link) - ;; - -lkernel32) - test x"$CYGWIN" != xyes && ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" - ;; - -[LRuYz]) - # These flags, when seen by themselves, take an argument. - # We remove the space between option and argument and re-iterate - # unless we find an empty arg or a new option (starting with -) - case $2 in - "" | -*);; - *) - ac_arg="$ac_arg$2" - shift; shift - set X $ac_arg "$@" - ;; - esac - ;; - -YP,*) - for ac_j in `$as_echo "$ac_arg" | sed -e 's/-YP,/-L/;s/:/ -L/g'`; do - ac_exists=false - for ac_i in $ac_cv_fc_libs; do - if test x"$ac_j" = x"$ac_i"; then - ac_exists=true - break - fi - done - - if test x"$ac_exists" = xtrue; then : - -else - ac_arg="$ac_arg $ac_j" - ac_cv_fc_libs="$ac_cv_fc_libs $ac_j" -fi - done - ;; - -[lLR]*) - ac_exists=false - for ac_i in $ac_cv_fc_libs; do - if test x"$ac_arg" = x"$ac_i"; then - ac_exists=true - break - fi - done - - if test x"$ac_exists" = xtrue; then : - -else - ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" -fi - ;; - -zallextract*| -zdefaultextract) - ac_cv_fc_libs="$ac_cv_fc_libs $ac_arg" - ;; - # Ignore everything else. - esac -done -# restore positional arguments -set X $ac_save_positional; shift - -# We only consider "LD_RUN_PATH" on Solaris systems. If this is seen, -# then we insist that the "run path" must be an absolute path (i.e. it -# must begin with a "/"). -case `(uname -sr) 2>/dev/null` in - "SunOS 5"*) - ac_ld_run_path=`$as_echo "$ac_fc_v_output" | - sed -n 's,^.*LD_RUN_PATH *= *\(/[^ ]*\).*$,-R\1,p'` - test "x$ac_ld_run_path" != x && - if test "$ac_compiler_gnu" = yes; then - for ac_link_opt in $ac_ld_run_path; do - ac_cv_fc_libs="$ac_cv_fc_libs -Xlinker $ac_link_opt" - done -else - ac_cv_fc_libs="$ac_cv_fc_libs $ac_ld_run_path" -fi - ;; -esac -fi # test "x$[]_AC_LANG_PREFIX[]LIBS" = "x" - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_libs" >&5 -$as_echo "$ac_cv_fc_libs" >&6; } -FCLIBS="$ac_cv_fc_libs" - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dummy main to link with Fortran libraries" >&5 -$as_echo_n "checking for dummy main to link with Fortran libraries... " >&6; } -if ${ac_cv_fc_dummy_main+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_fc_dm_save_LIBS=$LIBS - LIBS="$LIBS $FCLIBS" - ac_fortran_dm_var=FC_DUMMY_MAIN - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - # First, try linking without a dummy main: - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_fortran_dummy_main=none -else - ac_cv_fortran_dummy_main=unknown -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - - if test $ac_cv_fortran_dummy_main = unknown; then - for ac_func in MAIN__ MAIN_ __main MAIN _MAIN __MAIN main_ main__ _main; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#define $ac_fortran_dm_var $ac_func -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_fortran_dummy_main=$ac_func; break -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - fi - ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - ac_cv_fc_dummy_main=$ac_cv_fortran_dummy_main - rm -rf conftest* - LIBS=$ac_fc_dm_save_LIBS - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_dummy_main" >&5 -$as_echo "$ac_cv_fc_dummy_main" >&6; } -FC_DUMMY_MAIN=$ac_cv_fc_dummy_main -if test "$FC_DUMMY_MAIN" != unknown; then : - if test $FC_DUMMY_MAIN != none; then - -cat >>confdefs.h <<_ACEOF -#define FC_DUMMY_MAIN $FC_DUMMY_MAIN -_ACEOF - - if test "x$ac_cv_fc_dummy_main" = "x$ac_cv_f77_dummy_main"; then - -$as_echo "#define FC_DUMMY_MAIN_EQ_F77 1" >>confdefs.h - - fi -fi -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "linking to Fortran libraries from C fails -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran name-mangling scheme" >&5 -$as_echo_n "checking for Fortran name-mangling scheme... " >&6; } -if ${ac_cv_fc_mangling+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat > conftest.$ac_ext <<_ACEOF - subroutine foobar() - return - end - subroutine foo_bar() - return - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - mv conftest.$ac_objext cfortran_test.$ac_objext - - ac_save_LIBS=$LIBS - LIBS="cfortran_test.$ac_objext $LIBS $FCLIBS" - - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - ac_success=no - for ac_foobar in foobar FOOBAR; do - for ac_underscore in "" "_"; do - ac_func="$ac_foobar$ac_underscore" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $ac_func (); -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -return $ac_func (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_success=yes; break 2 -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - done - ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - - if test "$ac_success" = "yes"; then - case $ac_foobar in - foobar) - ac_case=lower - ac_foo_bar=foo_bar - ;; - FOOBAR) - ac_case=upper - ac_foo_bar=FOO_BAR - ;; - esac - - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - ac_success_extra=no - for ac_extra in "" "_"; do - ac_func="$ac_foo_bar$ac_underscore$ac_extra" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $ac_func (); -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ -return $ac_func (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_success_extra=yes; break -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - - if test "$ac_success_extra" = "yes"; then - ac_cv_fc_mangling="$ac_case case" - if test -z "$ac_underscore"; then - ac_cv_fc_mangling="$ac_cv_fc_mangling, no underscore" - else - ac_cv_fc_mangling="$ac_cv_fc_mangling, underscore" - fi - if test -z "$ac_extra"; then - ac_cv_fc_mangling="$ac_cv_fc_mangling, no extra underscore" - else - ac_cv_fc_mangling="$ac_cv_fc_mangling, extra underscore" - fi - else - ac_cv_fc_mangling="unknown" - fi - else - ac_cv_fc_mangling="unknown" - fi - - LIBS=$ac_save_LIBS - rm -rf conftest* - rm -f cfortran_test* -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compile a simple Fortran program -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_mangling" >&5 -$as_echo "$ac_cv_fc_mangling" >&6; } - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu -case $ac_cv_fc_mangling in - "lower case, no underscore, no extra underscore") - $as_echo "#define FC_FUNC(name,NAME) name" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) name" >>confdefs.h - ;; - "lower case, no underscore, extra underscore") - $as_echo "#define FC_FUNC(name,NAME) name" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) name ## _" >>confdefs.h - ;; - "lower case, underscore, no extra underscore") - $as_echo "#define FC_FUNC(name,NAME) name ## _" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) name ## _" >>confdefs.h - ;; - "lower case, underscore, extra underscore") - $as_echo "#define FC_FUNC(name,NAME) name ## _" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) name ## __" >>confdefs.h - ;; - "upper case, no underscore, no extra underscore") - $as_echo "#define FC_FUNC(name,NAME) NAME" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) NAME" >>confdefs.h - ;; - "upper case, no underscore, extra underscore") - $as_echo "#define FC_FUNC(name,NAME) NAME" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) NAME ## _" >>confdefs.h - ;; - "upper case, underscore, no extra underscore") - $as_echo "#define FC_FUNC(name,NAME) NAME ## _" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) NAME ## _" >>confdefs.h - ;; - "upper case, underscore, extra underscore") - $as_echo "#define FC_FUNC(name,NAME) NAME ## _" >>confdefs.h - - $as_echo "#define FC_FUNC_(name,NAME) NAME ## __" >>confdefs.h - ;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unknown Fortran name-mangling scheme" >&5 -$as_echo "$as_me: WARNING: unknown Fortran name-mangling scheme" >&2;} - ;; -esac - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if ${ac_cv_prog_CPP+:} false; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if ${ac_cv_path_GREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if ${ac_cv_path_EGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -#ifdef FC_DUMMY_MAIN -#ifndef FC_DUMMY_MAIN_EQ_F77 -# ifdef __cplusplus - extern "C" -# endif - int FC_DUMMY_MAIN() { return 1; } -#endif -#endif -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -# The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5 -$as_echo_n "checking size of long... " >&6; } -if ${ac_cv_sizeof_long+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_long" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "cannot compute sizeof (long) -See \`config.log' for more details" "$LINENO" 5; } - else - ac_cv_sizeof_long=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5 -$as_echo "$ac_cv_sizeof_long" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_LONG $ac_cv_sizeof_long -_ACEOF - - - -# Check whether --enable-test-internal was given. -if test "${enable_test_internal+set}" = set; then : - enableval=$enable_test_internal; -$as_echo "#define TEST_INTERNAL /**/" >>confdefs.h - -fi - - -# Check whether --enable-info was given. -if test "${enable_info+set}" = set; then : - enableval=$enable_info; -$as_echo "#define INFO /**/" >>confdefs.h - -fi - - -# Check whether --enable-fort-real was given. -if test "${enable_fort_real+set}" = set; then : - enableval=$enable_fort_real; -cat >>confdefs.h <<_ACEOF -#define CONFIG_FORT_REAL $enable_fort_real -_ACEOF - -fi - -# Check whether --enable-fort-double was given. -if test "${enable_fort_double+set}" = set; then : - enableval=$enable_fort_double; -cat >>confdefs.h <<_ACEOF -#define CONFIG_FORT_DOUBLE $enable_fort_double -_ACEOF - -fi - -# Check whether --enable-type-checking was given. -if test "${enable_type_checking+set}" = set; then : - enableval=$enable_type_checking; -$as_echo "#define TYPE_CHECKING /**/" >>confdefs.h - -fi - - - -# Determine flag for fortran module include path -# taken from the MCT configure - - -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to get the version output from $FC" >&5 -$as_echo_n "checking how to get the version output from $FC... " >&6; } -if ${ac_cv_prog_fc_version+:} false; then : - $as_echo_n "(cached) " >&6 -else - -cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF -if ac_fn_fc_try_compile "$LINENO"; then : - ac_cv_prog_fc_version= -# Try some options frequently used verbose output -for ac_version in -V -version --version +version -qversion; do - ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - -cat > conftest.$ac_ext <<_ACEOF - program main - - end -_ACEOF - -# Compile and link our simple test program by passing a flag (argument -# 1 to this macro) to the Fortran 90 compiler in order to get "version" output -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS="$FCFLAGS $ac_version" -(eval echo $as_me:4480: \"$ac_link\") >&5 -ac_fc_version_output=`eval $ac_link 5>&1 2>&1 | grep -v 'Driving:'` -echo "$ac_fc_version_output" >&5 -FCFLAGS=$ac_save_FCFLAGS - -rm -f conftest.* -ac_ext=${ac_fc_srcext-f} -ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' -ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_fc_compiler_gnu - - - # look for "copyright" constructs in the output - for ac_arg in $ac_fc_version_output; do - case $ac_arg in - COPYRIGHT | copyright | Copyright | '(c)' | '(C)' | Compiler | Compilers | Version | Version:) - ac_cv_prog_fc_version=$ac_version - break 2 ;; - esac - done -done -if test -z "$ac_cv_prog_fc_version"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot determine how to obtain version information from $FC" >&5 -$as_echo "$as_me: WARNING: cannot determine how to obtain version information from $FC" >&2;} -fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: compilation failed" >&5 -$as_echo "$as_me: WARNING: compilation failed" >&2;} -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_version" >&5 -$as_echo "$ac_cv_prog_fc_version" >&6; } -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -if echo $ac_fc_version_output | grep -i absoft >/dev/null 2>&1; then - echo "Fortran Compiler is Absoft" - if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-p" - fi -elif echo $ac_fc_version_output | grep -i workshop >/dev/null 2>&1; then - echo "Fortran Compiler is Workshop" - if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-M" - fi -elif echo $ac_fc_version_output | grep -i pgf >/dev/null 2>&1; then - echo "Fortran Compiler is Portland Group" - LIBS="$LIBS -pgf90libs" -elif echo $ac_fc_version_output | grep -i nag >/dev/null 2>&1; then - echo "Fortran Compiler is NAG" - CPRDEF="NAG" - if test -z "$FCFLAGS"; then - FCFLAGS="-mismatch" - fi -fi -# INCLUDE FLAG IF NOT ALREADY SET IS MOST LIKELY -I -if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-I" -fi - - -ac_config_files="$ac_config_files Makefile.conf" - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - if test ! -f "$cache_file" || test -h "$cache_file"; then - cat confcache >"$cache_file" - else - case $cache_file in #( - */* | ?:*) - mv -f confcache "$cache_file"$$ && - mv -f "$cache_file"$$ "$cache_file" ;; #( - *) - mv -f confcache "$cache_file" ;; - esac - fi - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -U= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - -: "${CONFIG_STATUS=./config.status}" -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by $as_me, which was -generated by GNU Autoconf 2.68. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" -config_headers="$ac_config_headers" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration files: -$config_files - -Configuration headers: -$config_headers - -Report bugs to the package provider." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -config.status -configured by $0, generated by GNU Autoconf 2.68, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2010 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=?*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - --*=) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg= - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - '') as_fn_error $? "missing file argument" ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error $? "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error $? "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; - "Makefile.conf") CONFIG_FILES="$CONFIG_FILES Makefile.conf" ;; - - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= ac_tmp= - trap 'exit_status=$? - : "${ac_tmp:=$tmp}" - { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 -ac_tmp=$tmp - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$ac_tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\)..*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\)..*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' >$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ - || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove sole $(srcdir), -# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ -h -s/// -s/^/:/ -s/[ ]*$/:/ -s/:\$(srcdir):/:/g -s/:\${srcdir}:/:/g -s/:@srcdir@:/:/g -s/^:*// -s/:*$// -x -s/\(=[ ]*\).*/\1/ -G -s/\n// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$ac_tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_tt=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_tt"; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' >$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$ac_tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$ac_tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ - >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ - "$ac_tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&2;} - - rm -f "$ac_tmp/stdin" - case $ac_file in - -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; - *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; - esac \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - ;; - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" - } >"$ac_tmp/config.h" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$ac_tmp/config.h" "$ac_file" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error $? "could not create -" "$LINENO" 5 - fi - ;; - - - esac - -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit 1 -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - diff --git a/src/externals/mct/mpi-serial/configure.in b/src/externals/mct/mpi-serial/configure.in deleted file mode 100644 index 8d3a750b18c..00000000000 --- a/src/externals/mct/mpi-serial/configure.in +++ /dev/null @@ -1,91 +0,0 @@ -dnl initialize autoconf -AC_INIT(mpi.h) -dnl specify config header file -AC_CONFIG_HEADER(config.h) -dnl find c compiler, and fort compiler -AC_PROG_CC -AC_PROG_FC - -# ARCHIVE COMMAND SIMILAR ACROSS ALL PLATFORMS -AC_ARG_VAR(AR,Archive Command) -if test -z "$AR"; then - AR="ar cruv" -fi - -# RANLIB -AC_ARG_VAR(RANLIB,Archive index update command) -if test -z "$RANLIB"; then - # Necessary on Darwin to deal with common symbols (particularly when - # using ifort). - if test "$SYSDEF"x = DARWINx; then - RANLIB="ranlib -c" - else - AC_PROG_RANLIB - fi -fi - -dnl determine fortran name-mangling -dnl result functions end up in config.h -AC_FC_WRAPPERS - -dnl to determine type of integer needed for fortran -AC_CHECK_SIZEOF(long) - -dnl these are to specify the possible arguments to configure. -AC_ARG_ENABLE([test-internal], - [ --enable-test-internal Specify internal test as opposed to full suite test] - ,AC_DEFINE([TEST_INTERNAL],[], - [Perform tests on data copies internally instead of using MPI_Send])) - -AC_ARG_ENABLE([info],[ --enable-info Print extra debugging info], - AC_DEFINE([INFO],[],[Print extra debug info])) - -AC_ARG_ENABLE([fort-real], - [ --enable-fort-real=SIZE Specify Fortran real size], - AC_DEFINE_UNQUOTED([CONFIG_FORT_REAL],[$enable_fort_real], - [User-set Fortran real size])) -AC_ARG_ENABLE([fort-double], - [ --enable-fort-double=SIZE Specify Fortran double size], - AC_DEFINE_UNQUOTED([CONFIG_FORT_DOUBLE],[$enable_fort_double], - [User-set Fortran double size])) -AC_ARG_ENABLE([type-checking], - [ --enable-type-checking Perform type checking during communications], - AC_DEFINE([TYPE_CHECKING],[],[Perform type checking during communications])) - - -# Determine flag for fortran module include path -# taken from the MCT configure - -AC_ARG_VAR(INCLUDEFLAG,Fortran compiler flag for specifying module search path) -AC_LANG_PUSH(Fortran) -AX_FC_VERSION() -AC_LANG_POP(Fortran) - - -if echo $ac_fc_version_output | grep -i absoft >/dev/null 2>&1; then - echo "Fortran Compiler is Absoft" - if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-p" - fi -elif echo $ac_fc_version_output | grep -i workshop >/dev/null 2>&1; then - echo "Fortran Compiler is Workshop" - if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-M" - fi -elif echo $ac_fc_version_output | grep -i pgf >/dev/null 2>&1; then - echo "Fortran Compiler is Portland Group" - LIBS="$LIBS -pgf90libs" -elif echo $ac_fc_version_output | grep -i nag >/dev/null 2>&1; then - echo "Fortran Compiler is NAG" - CPRDEF="NAG" - if test -z "$FCFLAGS"; then - FCFLAGS="-mismatch" - fi -fi -# INCLUDE FLAG IF NOT ALREADY SET IS MOST LIKELY -I -if test -z "$INCLUDEFLAG"; then - INCLUDEFLAG="-I" -fi - - -AC_OUTPUT(Makefile.conf) diff --git a/src/externals/mct/mpi-serial/copy.c b/src/externals/mct/mpi-serial/copy.c deleted file mode 100644 index 66d4d07efcb..00000000000 --- a/src/externals/mct/mpi-serial/copy.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * copy.c - * - * memcpy "wrapper" to copy MPI Datatypes - * - */ - -#include "mpiP.h" -#include "type.h" -#include -#include -#include -#include - -//For type matching -#ifdef HAVE_CONFIG_H -#include -#endif - -/* - * rml: this prototype should be in mpiP.h, but mpiP.h does not currently - * include type.h so it can't just be added right now. Come back and - * fix this issue later... - */ - -extern int Pcopy_data2(void *source, int src_count, Datatype src_type, - void *dest, int dest_count, Datatype dest_type); - - -int copy_data2(void *source, int src_count, MPI_Datatype src_type, - void *dest, int dest_count, MPI_Datatype dest_type) -{ - Datatype src_ptr = *(Datatype*) mpi_handle_to_datatype(src_type); - Datatype dest_ptr = *(Datatype*) mpi_handle_to_datatype(dest_type); - - return Pcopy_data2(source, src_count, src_ptr, dest, dest_count, dest_ptr); -} - - - - -int Pcopy_data2(void *source, int src_count, Datatype src_type, - void *dest, int dest_count, Datatype dest_type) -{ - int i; - int soffset, doffset; - MPI_Aint src_extent, dest_extent; - - //commit checking here, since if any datatype is used in this function - // it is considered "communication". Should it be somewhere else? - - if (!(src_type->committed && dest_type->committed)) - { - fprintf(stderr, "Type not committed\n"); - exit(-1); - } - - // A receive of less elements than sent - // is valid, but the reverse is a violation - - if (src_type->count * src_count < dest_type->count * dest_count) - { - printf("copy_data: Trying to over-receive\n"); - exit(1); - } - - Type_extent(src_type, &src_extent); - Type_extent(dest_type, &dest_extent); - - for (i = 0; i < dest_count * dest_type->count; i++) - { - -#ifdef TYPE_CHECKING - if ( src_type->pairs[i % src_type->count].type != - dest_type->pairs[i % dest_type->count].type) - { - printf("copy_data: Types don't match.\n"); - exit(1); - } -#endif - - soffset = src_type->pairs[i % src_type->count].disp + ((i / src_type->count) * src_extent); - doffset = dest_type->pairs[i % dest_type->count].disp + ((i / dest_type->count) * dest_extent); - - memcpy(dest+doffset, source+soffset, Simpletype_length(dest_type->pairs[i % dest_type->count].type)); - } -} - - - - diff --git a/src/externals/mct/mpi-serial/error.c b/src/externals/mct/mpi-serial/error.c deleted file mode 100644 index d26cfd164f9..00000000000 --- a/src/externals/mct/mpi-serial/error.c +++ /dev/null @@ -1,13 +0,0 @@ - -#include "mpiP.h" - -/* - * Error handling code - * Just a stub for now to support the MPI interface without actually - * doing anything - */ - - int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler handle) - { - return(MPI_SUCCESS); - } diff --git a/src/externals/mct/mpi-serial/fort.F90 b/src/externals/mct/mpi-serial/fort.F90 deleted file mode 100644 index f07e51855b4..00000000000 --- a/src/externals/mct/mpi-serial/fort.F90 +++ /dev/null @@ -1,62 +0,0 @@ - - - subroutine mpi_init(ierror) - - implicit none - include "mpif.h" - - integer fint(2) - logical flog(2) - real freal(2) - double precision fdub(2) - complex fcomp(2) - integer status(MPI_STATUS_SIZE) - - integer ierror - - - !! - !! Pass values from mpif.h to the C side - !! to check for consistency mpi.h and hardware sizes. - !! - - call mpi_init_fort( MPI_COMM_WORLD, & - MPI_ANY_SOURCE, MPI_ANY_TAG, & - MPI_PROC_NULL, MPI_ROOT, & - MPI_COMM_NULL, MPI_REQUEST_NULL, & - MPI_GROUP_NULL, MPI_GROUP_EMPTY, & - MPI_UNDEFINED, & - MPI_MAX_ERROR_STRING, & - MPI_MAX_PROCESSOR_NAME, & - MPI_STATUS_SIZE, & - MPI_SOURCE, MPI_TAG, MPI_ERROR, & - status, status(MPI_SOURCE), & - status(MPI_TAG), status(MPI_ERROR), & - MPI_INTEGER, fint(1), fint(2), & - MPI_LOGICAL, flog(1), flog(2), & - MPI_REAL, freal(1), freal(2), & - MPI_DOUBLE_PRECISION, fdub(1), fdub(2), & - MPI_COMPLEX, fcomp(1), fcomp(2), & - IERROR ) - - - return - end - - -! -! mpi_get_fort_pointers -! -! In Fortran, various values e.g. MPI_STATUS_IGNORE, MPI_STATUSES_IGNORE, -! and MPI_IN_PLACE are in a COMMON block and not accessible by C code. -! This routine calls back a C routine to store the addresses. -! - - subroutine mpi_get_fort_pointers - implicit none - include "mpif.h" - - call mpi_save_fort_pointers(MPI_STATUS_IGNORE,MPI_STATUSES_IGNORE,MPI_IN_PLACE) - - end subroutine mpi_get_fort_pointers - diff --git a/src/externals/mct/mpi-serial/getcount.c b/src/externals/mct/mpi-serial/getcount.c deleted file mode 100644 index 1313a7cca3d..00000000000 --- a/src/externals/mct/mpi-serial/getcount.c +++ /dev/null @@ -1,40 +0,0 @@ -/* getcount.c - * - * 07/2007 JCY - * Functions for count information regarding MPI_Status - */ - -#include "type.h" -#include "mpiP.h" - - -FC_FUNC( mpi_get_count , MPI_GET_COUNT ) - (int *status, int *datatype, int *count, int *ierr) -{ - *ierr = MPI_Get_count((MPI_Status *)status, *datatype, count); -} - - -int MPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count) -{ - *count = status->get_count; -} - - -/********/ - - -FC_FUNC( mpi_get_elements , MPI_GET_ELEMENTS ) - (MPI_Status *status, int *datatype, int *count, int *ierr) -{ - *ierr = MPI_Get_elements(status, *datatype, count); -} - - -int MPI_Get_elements(MPI_Status *status, MPI_Datatype datatype, int *count) -{ - Datatype dt_ptr = *(Datatype*)mpi_handle_to_datatype(datatype); - *count = status->get_count * dt_ptr->count; -} - - diff --git a/src/externals/mct/mpi-serial/group.c b/src/externals/mct/mpi-serial/group.c deleted file mode 100644 index cec4879f4b6..00000000000 --- a/src/externals/mct/mpi-serial/group.c +++ /dev/null @@ -1,264 +0,0 @@ - -#include "mpiP.h" - - -/*********/ - - -FC_FUNC( mpi_group_incl, MPI_GROUP_INCL ) - (int *group, int *n, int *ranks, int *newgroup, int *ierror) -{ - *ierror= MPI_Group_incl(*group, *n, ranks, newgroup); -} - - -int MPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group *newgroup) -{ - - if (group==MPI_GROUP_NULL) - { - fprintf(stderr,"MPI_Group_incl: null group passed in\n"); - abort(); - } - - if (group==MPI_GROUP_EMPTY || n==0) - *newgroup=MPI_GROUP_EMPTY; - else - if (n==1 && ranks[0]==0) - *newgroup=MPI_GROUP_ONE; - else - { - fprintf(stderr,"MPI_Group_incl: more than 1 proc in group\n"); - abort(); - } - - return(MPI_SUCCESS); -} - - -/*********/ - - -/* MPI_Group_range_incl - * Include a strided range of ranks in a group. For one processor, if - * "0" is included in any of these ranges, it can only be the first rank. - * Thus, if rank 0 is specified, include it, otherwise use GROUP_NULL - */ - - -FC_FUNC( mpi_group_range_incl, MPI_GROUP_RANGE_INCL ) - (int *group, int *n, int ranges[][3], int *newgroup, int *ierror) -{ - *ierror= MPI_Group_range_incl(*group, *n, ranges, newgroup); -} - - -int MPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], - MPI_Group *newgroup) -{ - - if (group==MPI_GROUP_NULL) - { - fprintf(stderr,"MPI_Group_range_incl: null group passed in\n"); - abort(); - } - - if (group==MPI_GROUP_EMPTY || n==0) - *newgroup=MPI_GROUP_EMPTY; - else - if (n==1 && ranges[0][0]==0 && ranges[0][1]==0) - *newgroup=MPI_GROUP_ONE; - else - { - fprintf(stderr,"MPI_Group_range_incl: more than 1 proc in group\n"); - abort(); - } - - return(MPI_SUCCESS); -} - - - - -/*********/ - - - -FC_FUNC( mpi_group_union, MPI_GROUP_UNION ) - (int *group1, int *group2, int *newgroup, int *ierror) -{ - *ierror= MPI_Group_union(*group1,*group2,newgroup); -} - - - -int MPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group *newgroup) -{ - - if (group1==MPI_GROUP_NULL || group2==MPI_GROUP_NULL) - { - fprintf(stderr,"MPI_Group_union: null group passed in\n"); - abort(); - } - - if (group1==MPI_GROUP_ONE || group2==MPI_GROUP_ONE) - *newgroup=MPI_GROUP_ONE; - else - *newgroup=MPI_GROUP_EMPTY; - - - return(MPI_SUCCESS); -} - -/*********/ - - - -FC_FUNC( mpi_group_intersection, MPI_GROUP_INTERSECTION ) - (int *group1, int *group2, int *newgroup, int *ierror) -{ - *ierror= MPI_Group_intersection(*group1,*group2,newgroup); -} - - - -int MPI_Group_intersection(MPI_Group group1, MPI_Group group2, - MPI_Group *newgroup) -{ - - if (group1==MPI_GROUP_NULL || group2==MPI_GROUP_NULL) - { - fprintf(stderr,"MPI_Group_intersection: null group passed in\n"); - abort(); - } - - if (group1==MPI_GROUP_ONE && group2==MPI_GROUP_ONE) - *newgroup=MPI_GROUP_ONE; - else - *newgroup=MPI_GROUP_EMPTY; - - - return(MPI_SUCCESS); -} - - -/*********/ - - - -FC_FUNC( mpi_group_difference, MPI_GROUP_DIFFERENCE ) - (int *group1, int *group2, int *newgroup, int *ierror) -{ - *ierror= MPI_Group_difference(*group1,*group2,newgroup); -} - - - -int MPI_Group_difference(MPI_Group group1, MPI_Group group2, - MPI_Group *newgroup) -{ - - if (group1==MPI_GROUP_NULL || group2==MPI_GROUP_NULL) - { - fprintf(stderr,"MPI_Group_intersection: null group passed in\n"); - abort(); - } - - if (group1==MPI_GROUP_EMPTY || group2==MPI_GROUP_ONE) - *newgroup=MPI_GROUP_EMPTY; - else - *newgroup=MPI_GROUP_ONE; - - return(MPI_SUCCESS); -} - - - -/*********/ - - -FC_FUNC( mpi_group_free, MPI_GROUP_FREE )(int *group, int *ierror) -{ - *ierror= MPI_Group_free(group); -} - - -int MPI_Group_free(MPI_Group *group) -{ - *group= MPI_GROUP_NULL; - - return(MPI_SUCCESS); -} - - -/*********/ - - - -FC_FUNC( mpi_group_translate_ranks, MPI_GROUP_TRANSLATE_RANKS ) - ( int *group1, int *n, int *ranks1, - int *group2, int *ranks2, int *ierror) -{ - *ierror= MPI_Group_translate_ranks(*group1,*n,ranks1,*group2,ranks2); -} - - - -int MPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, - MPI_Group group2, int *ranks2) -{ - int i; - - if (group1==MPI_GROUP_NULL || group2==MPI_GROUP_NULL) - { - fprintf(stderr,"MPI_Group_translate_ranks: null group passed in\n"); - abort(); - } - - if (n==0) - return(MPI_SUCCESS); - - if (group1==MPI_GROUP_EMPTY) - { - fprintf(stderr,"MPI_Group_translate_ranks: empty input group\n"); - abort(); - } - - for (i=0; i simplified and store item directly in the struct - * rather than as pointer to separately allocated object. - * - * CAVEAT: - * as in mpich-1, storage will grow as needed and will - * remain at the high water mark since it is likely that - * the user code will repeat the use. - * - */ - - -typedef struct _Handleitem -{ - int handle; - struct _Handleitem *next; - - union - { - void *anything; /* At least size of void * */ - Comm comm; - Req req; - Datatype* type; - - } data; - - -} Handleitem; - - -/* - * These must be consistent with each other - * - */ - -#define BLOCK_ITEMS (256) -#define HANDLE_TO_BLOCK(x) ( (x) >> 8) -#define HANDLE_TO_INDEX(x) ( (x) & 0xff ) -#define HANDLE(block,index) ( (block << 8) | (index) ) - - -/* - * The first block of handle items will be statically allocated. - * Subsequent ones will be added if necessary. - * blocks[0..nblocks-1] are allocated at any given time. - * - * Increase MAX_BLOCKS if you *really* need more active request - * (Although probably something is wrong if you need more than 256k !!!) - * - */ - - -#define MAX_BLOCKS (1024) - -static Handleitem block0[BLOCK_ITEMS]; /* array of handleitems */ -static Handleitem *(blocks[MAX_BLOCKS]); /* array of pointers to blocks */ -static int nblocks; - - -static int need_to_init=1; -static Handleitem *nextfree; - - -/************************************************************************/ - -void *mpi_malloc(int size) -{ - void *ret; - - ret=malloc(size); - - if (!ret) - { - fprintf(stderr,"mpi_malloc: failed to allocate %d bytes\n",size); - abort(); - } - - return(ret); -} - - -void mpi_free(void *ptr) -{ - free(ptr); -} - - -/************************************************************************/ - - -/* - * initialize a block s.t. handles are set and - * 0 -> 1 -> 2 ... -> (BLOCK_ITEMS-1) -> NULL - * - */ - -static Handleitem *init_block(int block, Handleitem *b) -{ - int i; - - for (i=0; inext; /* Skip over using item 0 */ - new->next=NULL; - - /* - * initialize the array of blocks - * - */ - - blocks[0]=block0; - nblocks=1; - - for (i=1; inext; - new->next=NULL; - - *handle= new->handle; - *data= &(new->data); - - return; - } - - /* there is nothing free, so allocate a new block and add it - * to blocks[] - */ - - if (nblocks==MAX_BLOCKS) - { - fprintf(stderr,"mpi_allocate_handle: max %d active handles exceeded\n", - MAX_BLOCKS*BLOCK_ITEMS); - abort(); - } - - blocks[nblocks]= (Handleitem *)mpi_malloc(sizeof(Handleitem)* BLOCK_ITEMS); - new=init_block(nblocks,blocks[nblocks]); - - nextfree= new->next; - new->next=NULL; - - *handle= new->handle; - *data= &(new->data); - - nblocks++; /* DON'T FORGET THIS!!!! */ - -#ifdef HANDLE_INFO - fflush(stdout); - fprintf(stderr,"mpi_alloc_handle: allocation %d blocks (%d handles)\n", - nblocks,nblocks*BLOCK_ITEMS); -#endif - -} - - - - -static void verify_handle(int handle, int block, int index) -{ - if (block>=nblocks || block<0 || - index>=BLOCK_ITEMS || index<0) - { - fprintf(stderr,"mpi_verify_handle: bad handle\n"); - abort(); - } - - if (blocks[block][index].handle != handle) - { - fprintf(stderr,"mpi_verify_handle: handle mismatch\n"); - abort(); - } -} - -void *mpi_handle_to_ptr(int handle) -{ - int block; - int index; - - if (need_to_init) - init_handles(); - - if (!handle) /* Handle 0 -> NULL */ - return(NULL); - - block=HANDLE_TO_BLOCK(handle); - index=HANDLE_TO_INDEX(handle); - -#ifdef CHECKS - verify_handle(handle,block,index); -#endif - - return( &(blocks[block][index].data) ); -} - - - -void mpi_free_handle(int handle) -{ - int block; - int index; - Handleitem *item; - - if (!handle) /* ignore null handle */ - return; - - if (need_to_init) - { - fprintf(stderr,"mpi_free_handle: handles not initialized\n"); - abort(); - } - - block=HANDLE_TO_BLOCK(handle); - index=HANDLE_TO_INDEX(handle); - -#ifdef CHECKS - verify_handle(handle,block,index); -#endif - - item=&(blocks[block][index]); - -#ifdef CHECKS - if (item->next) - { - fprintf(stderr,"mpi_free_handle: handle still in use\n"); - abort(); - } -#endif - - - /* just return it to the free list. - * space is not reclaimed. - */ - - item->next=nextfree; - nextfree=item; -} diff --git a/src/externals/mct/mpi-serial/ic_merge.c b/src/externals/mct/mpi-serial/ic_merge.c deleted file mode 100644 index ea19b387155..00000000000 --- a/src/externals/mct/mpi-serial/ic_merge.c +++ /dev/null @@ -1,15 +0,0 @@ - -#include "mpiP.h" - -/* - * MPI_Intercomm_merge - Creates an intracommunicator from an intercommunicator - * This is just a stub for now to support mpi function calls even in Serial - * applications. In the case of a serial program, this function is a no-op and - * only ever returns MPI_SUCCESS - */ - -int MPI_Intercomm_merge( MPI_Comm intercomm, int high, MPI_Comm *newintracomm ) -{ - newintracomm = (MPI_Comm *)intercomm; - return(MPI_SUCCESS); -} diff --git a/src/externals/mct/mpi-serial/info.c b/src/externals/mct/mpi-serial/info.c deleted file mode 100644 index 32593cb7937..00000000000 --- a/src/externals/mct/mpi-serial/info.c +++ /dev/null @@ -1,53 +0,0 @@ - -#include "mpiP.h" - - - -/***/ - - -FC_FUNC( mpi_info_create , MPI_INFO_CREATE ) (int *info, int *ierror) -{ - *ierror=MPI_Info_create(info); -} - - - -int MPI_Info_create(MPI_Info *info) -{ - /* For now, we aren't storing anything, so don't bother with a real handle */ - *info=0; - return(MPI_SUCCESS); -} - - -/***/ - - -FC_FUNC( mpi_info_set , MPI_INFO_SET ) (int *info, char *key, char *value, int *ierror) -{ - *ierror=MPI_Info_set(*info, key, value); -} - - -int MPI_Info_set(MPI_Info info, char *key, char *value) -{ - /* for now, don't bother storing anything */ - return(MPI_SUCCESS); -} - -/***/ - -FC_FUNC( mpi_info_free , MPI_INFO_FREE ) (int *info, int *ierror) -{ - *ierror=MPI_Info_free(info); -} - - - -int MPI_Info_free(MPI_Info *info) -{ - /* For now, we aren't storing anything, so don't bother with a real handle */ - *info=0; - return(MPI_SUCCESS); -} diff --git a/src/externals/mct/mpi-serial/list.c b/src/externals/mct/mpi-serial/list.c deleted file mode 100644 index 90ef049b75f..00000000000 --- a/src/externals/mct/mpi-serial/list.c +++ /dev/null @@ -1,705 +0,0 @@ -/* - * (C) 2000 UNIVERSITY OF CHICAGO - * See COPYRIGHT in top-level directory. - */ - - - -#include -#include -#include "listops.h" -#include "listP.h" - -/* - * list management code - * - * For storing singly-linked lists of pointers. - * - */ - - -static int itemcount=0; -static int headcount=0; - - -/* - * AP_listitem_malloc() - * - * malloc a new ilist item and return a pointer to it. - * - */ - -static pListitem AP_listitem_malloc(void) -{ - pListitem item; - - itemcount++; - item=(pListitem)malloc( (unsigned) sizeof(Listitem) ); - - if (!item) - { - perror("AP_listitem_malloc: malloc failure"); - abort(); - } - - return(item); -} - - - -/* - * AP_listitem_free(listitem) - * - * Free a listitem generated by AP_listitem_malloc() - * - */ - -static void AP_listitem_free(pListitem listitem) -{ - free(listitem); - itemcount--; -} - - - -/* - * AP_listitem_verify(void) - * - * Checks to see if there are any outstanding listitems that have been - * malloc'd. Returns true if there are any. - * - */ - -int AP_listitem_verify(void) -{ - if (itemcount!=0) - fprintf(stderr,"AP_list_verify: outstanding items, count=%d\n", - itemcount); - - if (headcount!=0) - fprintf(stderr,"AP_list_verify: outstanding lists, count=%d\n", - headcount); - - return( (itemcount!=0) || (headcount!=0) ); -} - - - - -pListitem AP_listitem_prev(pListitem listitem) -{ - return(listitem->prev); -} - - - -pListitem AP_listitem_next(pListitem listitem) -{ - return(listitem->next); -} - - - - -void *AP_listitem_data(pListitem listitem) -{ - return(listitem->data ); -} - - - - -/***************************************************************/ - - - -/* - * AP_list_new(void) - * - * allocate an empty list return a pointer to it - * - */ - -pList AP_list_new(void) -{ - pList list; - - list=(pList)malloc(sizeof(List)); - - if (!list) - { - perror("AP_list_new: malloc failure\n"); - abort(); - } - - list->head=NULL; - list->tail=NULL; - list->count=0; - - headcount++; - return(list); -} - - - - - -/* - * AP_list_free(list) - * - * Free an entire list - * - */ - -void AP_list_free(pList list) -{ - pListitem next,cur; - int count; - - count=0; - cur=list->head; - - while(cur) - { - next=cur->next; - - AP_listitem_free(cur); - count++; - - cur=next; - } - - if (count!=list->count) - { - fprintf(stderr,"AP_list_free: count %d does not match actual length %d\n", - list->count,count); - abort(); - } - - headcount--; - free(list); -} - - - -/* - * AP_list_size(list) - * - * return the number of items in an ilist - * - */ - -int AP_list_size(pList list) -{ - return(list->count); -} - - - -/* - * AP_list_prepend(list,data) - * - * Prepend item to the front of list. - * - */ - -pListitem AP_list_prepend(pList list, void *data) -{ - pListitem new; - - new=AP_listitem_malloc(); - - new->data=data; - new->prev=NULL; - new->next=list->head; - -#ifdef CHECKS - new->list=list; -#endif - - if (list->head) - list->head->prev=new; - - list->head=new; - if (!list->tail) - list->tail=new; - - (list->count)++; - - return(new); -} - - - -/* - * AP_list_append(list,data) - * - * append item to end of list - * - */ - -pListitem AP_list_append(pList list, void *data) -{ - pListitem new; - - new=AP_listitem_malloc(); - new->data=data; - new->prev=list->tail; - new->next= NULL; - -#ifdef CHECKS - new->list= list; -#endif - - if (list->tail) - list->tail->next=new; - else - list->head=new; - - list->tail=new; - (list->count)++; - - return(new); -} - - - - - -/* - * AP_list_delete(list,data) - * - * delete item from list; return TRUE if successful - * - */ - -int AP_list_delete(pList list, void *data) -{ - pListitem item; - - if (item=AP_list_search(list,data)) - { - AP_list_delete_item(list,item); - return(1); - } - - return(0); -} - - - -void AP_list_delete_item(pList list, pListitem item) -{ - -#ifdef CHECKS - if (item->list != list) - { - fprintf(stderr,"AP_list_delete_item: item is not in list\n"); - abort(); - } -#endif - - /* set pointer of prior listitem */ - - if (item == list->head) - list->head = item->next; - else - item->prev->next = item->next; - - /* set pointer of following listitem */ - - if (item == list->tail) - list->tail = item->prev; - else - item->next->prev = item->prev; - - AP_listitem_free(item); - (list->count)--; -} - - - - -pListitem AP_list_head_item(pList list) -{ - return(list->head); -} - - - -int AP_list_head(pList list, void **data) -{ - if (list->head) - { - *data=list->head->data; - return(1); - } - else - return(0); -} - - - -int AP_list_tail(pList list, void **data) -{ - if (list->tail) - { - *data=list->tail->data; - return(1); - } - else - return(0); -} - - - - - -/* - * AP_list_print(str,list) - * - * Print out the message string followed by the - * items in the list - * - */ - -void AP_list_print(char *str, pList list) -{ - pListitem cur; - - printf("%s (%d items): ",str,list->count); - - cur=list->head; - while(cur) - { - printf("%d ",(long int)cur->data); - cur=cur->next; - } - - printf("\n"); -} - - - - -/* - * AP_list_revprint(str,list) - * - * Print out the message string followed by the - * items in the list - * - */ - -void AP_list_revprint(char *str, pList list) -{ - pListitem cur; - - printf("%s (%d items): ",str,list->count); - - cur=list->tail; - while(cur) - { - printf("%d ",(long int)cur->data); - cur=cur->prev; - } - - printf("\n"); -} - - - - -/* - * AP_list_search(list,data) - * - * Returns listitem if item appears in the list, otherwise NULL. - * - */ - - -pListitem AP_list_search(pList list, void *data) -{ - pListitem cur; - - cur=list->head; - - while (cur) - { - if (cur->data == data) - return(cur); - - cur=cur->next; - } - - return(NULL); -} - - -/* - * AP_list_search_func(list,func,data) - * - * Returns listitem if func(listitem->data,data) returns true - * - */ - - -pListitem AP_list_search_func(pList list, - int (*func)(void *item_data, void *fixed_data), - void *fixed_data) -{ - pListitem cur; - - cur=list->head; - - while (cur) - { - if ( (*func)(cur->data,fixed_data) ) - return(cur); - - cur=cur->next; - } - - return(NULL); -} - - - -/* - * AP_list_next(list,data,temp) - * - * like PList_next() except handles NULL pointers properly. - * - * initially, pass in (void **) NULL in 'temp' - * returns next list item through 'item' - * returns nonzero if there is a next item - * - */ - -int AP_list_next(pList list, void **data, void **temp) -{ - pListitem cur; - - if (*temp) /* temp is previous item */ - { - cur=(pListitem)(*temp); - cur=cur->next; - } - else /* First item */ - cur=list->head; - - if (cur) - { - *temp=(void *)cur; - *data=cur->data; - return(1); - } - else - return(0); -} - - -/* - * Compatibility routine for scorec list traversal - * Does not provide any way to differentiate - * between NULL in the list, and the end of the list - * - */ - -void *AP_list_braindead_next(pList list, void **temp) -{ - void *item; - - if (AP_list_next(list,&item,temp)) - return(item); - else - return(NULL); -} - - - -/* - * AP_list_duplicate(list) - * - * return a copy of the list - * (Note: caller is responsible for freeing this list) - * - */ - -pList AP_list_duplicate(pList list) -{ - pList newlist; - pListitem cur,new,prev; - - newlist=AP_list_new(); - prev=NULL; - - cur=list->head; - while(cur) - { - new=AP_listitem_malloc(); - new->data=cur->data; - new->prev=prev; - - if (prev) - prev->next=new; - else - newlist->head=new; - - prev=new; - - cur=cur->next; - } - - if (prev) - prev->next=NULL; - - newlist->tail=prev; - newlist->count=list->count; - return(newlist); -} - - - -int AP_list_apply(pList list, - int (*func)(void *item_data, void *fixed_data), - void *fixed_data) -{ - pListitem cur; - int total; - - total=0; - cur=list->head; - - while (cur) - { - total += (*func)(cur->data,fixed_data); - - cur=cur->next; - } - - return(total); -} - - - - -/* - * main for debugging - * - */ - - -#ifdef LISTMAIN - -int main() -{ - pList mylist, list2; - int i; - void *temp,*item; - pListitem next; - - mylist=AP_list_new(); - - for (i=1; i<10; i++) - { - AP_list_prepend(mylist,(void *)i); - AP_list_print("current",mylist); - AP_list_revprint(" rev",mylist); - } - - printf("Size %d\n",AP_list_size(mylist)); - - for (i=10; i<15; i++) - { - AP_list_append(mylist,(void *)i); - AP_list_print("new",mylist); - AP_list_revprint(" rev",mylist); - } - - AP_list_delete(mylist,(void *)5); - AP_list_print("less 5",mylist); - AP_list_revprint(" rev",mylist); - - AP_list_delete(mylist,(void *)9); - AP_list_print("less 9",mylist); - AP_list_revprint(" rev",mylist); - - AP_list_delete(mylist,(void *)14); - AP_list_print("less 14",mylist); - AP_list_revprint(" rev",mylist); - - AP_list_delete(mylist,(void *)2); - AP_list_print("less 2",mylist); - AP_list_revprint(" rev",mylist); - - if (!AP_list_delete(mylist,(void *)0)) - printf("(did not delete 0)\n"); - else - printf("ERROR - found 0\n"); - AP_list_print("less 0",mylist); - AP_list_revprint(" rev",mylist); - - if (AP_list_search(mylist,(void *)4)) - printf("Found 4\n"); - else - printf("Did not find 4\n"); - - if (AP_list_search(mylist,(void *)9)) - printf("Found 9\n"); - else - printf("Did not find 9\n"); - - printf("Traversal by AP_list_next()\n"); - temp=NULL; - while (AP_list_next(mylist,&item,&temp)) - printf(" Got item %d\n",(int)item); - - printf("Traversal by AP_listitem_next()\n"); - for (item=AP_list_head_item(mylist); item; item=AP_listitem_next(item)) - printf(" Got item %d\n",(int)(AP_listitem_data(item))); - - - list2=AP_list_duplicate(mylist); - AP_list_print("Original list",mylist); - AP_list_revprint(" rev",mylist); - AP_list_print("Duplicate ",list2); - AP_list_revprint(" rev",list2); - - AP_list_append(list2,(void *)99); - AP_list_print("Dup add 99 ",list2); - AP_list_revprint(" rev",list2); - - - printf("Traversal by AP_listitem_next(), deleting\n"); - i=0; - for (item=AP_list_head_item(list2); item; ) - { - printf(" Got item %d",(int)(AP_listitem_data(item))); - - next=AP_listitem_next(item); - - if (i%2) - { - AP_list_delete_item(list2,item); - printf(" - deleted\n"); - } - else - printf("\n"); - - item=next; - i++; - } - - AP_list_print("After delete-traversal",list2); - - AP_list_free(mylist); - AP_list_print("After del ",list2); - AP_list_revprint(" rev",list2); - - AP_list_free(list2); - - AP_listitem_verify(); - - return(0); -} -#endif diff --git a/src/externals/mct/mpi-serial/list.h b/src/externals/mct/mpi-serial/list.h deleted file mode 100644 index 3d533fef613..00000000000 --- a/src/externals/mct/mpi-serial/list.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * (C) 2000 UNIVERSITY OF CHICAGO - * See COPYRIGHT in top-level directory. - */ - - - - - -/****************************************************** - * WARNING: This file automatically generated. * - * Do not edit by hand. * - ****************************************************** - */ - - - - -extern int AP_listitem_verify(void); -extern pListitem AP_listitem_prev(pListitem listitem); -extern pListitem AP_listitem_next(pListitem listitem); -extern void *AP_listitem_data(pListitem listitem); -extern pList AP_list_new(void); -extern void AP_list_free(pList list); -extern int AP_list_size(pList list); -extern pListitem AP_list_prepend(pList list, void *data); -extern pListitem AP_list_append(pList list, void *data); -extern int AP_list_delete(pList list, void *data); -extern void AP_list_delete_item(pList list, pListitem item); -extern pListitem AP_list_head_item(pList list); -extern int AP_list_head(pList list, void **data); -extern int AP_list_tail(pList list, void **data); -extern void AP_list_print(char *str, pList list); -extern void AP_list_revprint(char *str, pList list); -extern pListitem AP_list_search(pList list, void *data); -extern int AP_list_next(pList list, void **data, void **temp); -extern void *AP_list_braindead_next(pList list, void **temp); -extern pList AP_list_duplicate(pList list); - - -extern pListitem AP_list_search_func(pList list, int (*func)(void *i, void *j),void *data); - -extern int AP_list_apply(pList list, int (*func)(void *item_data, void *fixed_data), void *data); - - diff --git a/src/externals/mct/mpi-serial/listP.h b/src/externals/mct/mpi-serial/listP.h deleted file mode 100644 index 2fa9e859612..00000000000 --- a/src/externals/mct/mpi-serial/listP.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * (C) 2000 UNIVERSITY OF CHICAGO - * See COPYRIGHT in top-level directory. - */ - - - -/* - * Private data structures for the list - * - */ - - -typedef struct _List -{ - pListitem head; - pListitem tail; - int count; -} List; - - -typedef struct _Listitem -{ - void *data; - pListitem prev; - pListitem next; - -#ifdef CHECKS - pList list; -#endif - -} Listitem; - diff --git a/src/externals/mct/mpi-serial/listops.h b/src/externals/mct/mpi-serial/listops.h deleted file mode 100644 index fa0ef725751..00000000000 --- a/src/externals/mct/mpi-serial/listops.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * (C) 2000 UNIVERSITY OF CHICAGO - * See COPYRIGHT in top-level directory. - */ - - - - -#ifndef _listops_h -#define _listops_h - -/* - * Support for singly-linked list of pointers (or ints) - * - */ - - -typedef struct _List *pList; -typedef struct _Listitem *pListitem; - -#include "list.h" - -#endif diff --git a/src/externals/mct/mpi-serial/m4/README b/src/externals/mct/mpi-serial/m4/README deleted file mode 100644 index b748178e2c7..00000000000 --- a/src/externals/mct/mpi-serial/m4/README +++ /dev/null @@ -1,5 +0,0 @@ -This directory contains some specific tests used in the MCT autoconf system. -They are placed here to make the configure.ac a little cleaner. - -These are only needed if you are trying to recreate the "configure" script from -the "configure.ac" file. diff --git a/src/externals/mct/mpi-serial/m4/ax_fc_version.m4 b/src/externals/mct/mpi-serial/m4/ax_fc_version.m4 deleted file mode 100644 index c7e2eaec3c7..00000000000 --- a/src/externals/mct/mpi-serial/m4/ax_fc_version.m4 +++ /dev/null @@ -1,51 +0,0 @@ -#AX_FC_VERSION_OUTPUT([FLAG = $ac_cv_prog_fc_version]) -# ------------------------------------------------- -# Link a trivial Fortran program, compiling with a version output FLAG -# (which default value, $ac_cv_prog_fc_version, is computed by -# AX_FC_VERSION), and return the output in $ac_fc_version_output. -AC_DEFUN([AX_FC_VERSION_OUTPUT], -[AC_REQUIRE([AC_PROG_FC])dnl -AC_LANG_PUSH(Fortran)dnl - -AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) - -# Compile and link our simple test program by passing a flag (argument -# 1 to this macro) to the Fortran 90 compiler in order to get "version" output -ac_save_FCFLAGS=$FCFLAGS -FCFLAGS="$FCFLAGS m4_default([$1], [$ac_cv_prog_fc_version])" -(eval echo $as_me:__oline__: \"$ac_link\") >&AS_MESSAGE_LOG_FD -ac_fc_version_output=`eval $ac_link AS_MESSAGE_LOG_FD>&1 2>&1 | grep -v 'Driving:'` -echo "$ac_fc_version_output" >&AS_MESSAGE_LOG_FD -FCFLAGS=$ac_save_FCFLAGS - -rm -f conftest.* -AC_LANG_POP(Fortran)dnl - -])# AX_FC_VERSION_OUTPUT - -# AX_FC_VERSION -# -------------- -# -AC_DEFUN([AX_FC_VERSION], -[AC_CACHE_CHECK([how to get the version output from $FC], - [ac_cv_prog_fc_version], -[AC_LANG_ASSERT(Fortran) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], -[ac_cv_prog_fc_version= -# Try some options frequently used verbose output -for ac_version in -V -version --version +version -qversion; do - AX_FC_VERSION_OUTPUT($ac_version) - # look for "copyright" constructs in the output - for ac_arg in $ac_fc_version_output; do - case $ac_arg in - COPYRIGHT | copyright | Copyright | '(c)' | '(C)' | Compiler | Compilers | Version | Version:) - ac_cv_prog_fc_version=$ac_version - break 2 ;; - esac - done -done -if test -z "$ac_cv_prog_fc_version"; then - AC_MSG_WARN([cannot determine how to obtain version information from $FC]) -fi], - [AC_MSG_WARN([compilation failed])]) -])])# AX_FC_VERSION diff --git a/src/externals/mct/mpi-serial/mpi.c b/src/externals/mct/mpi-serial/mpi.c deleted file mode 100644 index 0353f477f81..00000000000 --- a/src/externals/mct/mpi-serial/mpi.c +++ /dev/null @@ -1,378 +0,0 @@ - - -#include "mpiP.h" -#include "mpi.h" -#include "type.h" - -/****************************************************************************/ - -static int initialized=0; - - -/* Store fortran pointer values here */ - -static int *f_MPI_STATUS_IGNORE; -static int *f_MPI_STATUSES_IGNORE; -static int *f_MPI_IN_PLACE; - -static char *mpi_version_string="mpi-serial 2.0"; - - -/****************************************************************************/ - - -/* - * INIT/FINALIZE - * - */ - - - -FC_FUNC( mpi_init_fort , MPI_INIT_FORT) - (int *f_MPI_COMM_WORLD, - int *f_MPI_ANY_SOURCE, int *f_MPI_ANY_TAG, - int *f_MPI_PROC_NULL, int *f_MPI_ROOT, - int *f_MPI_COMM_NULL, int *f_MPI_REQUEST_NULL, - int *f_MPI_GROUP_NULL, int *f_MPI_GROUP_EMPTY, - int *f_MPI_UNDEFINED, - int *f_MPI_MAX_ERROR_STRING, - int *f_MPI_MAX_PROCESSOR_NAME, - int *f_MPI_STATUS_SIZE, - int *f_MPI_SOURCE, int *f_MPI_TAG, int *f_MPI_ERROR, - int *f_status, - int *fsource, int *ftag, int *ferror, - int *f_MPI_INTEGER, void *fint1, void *fint2, - int *f_MPI_LOGICAL, void *flog1, void *flog2, - int *f_MPI_REAL, void *freal1, void *freal2, - int *f_MPI_DOUBLE_PRECISION, - void *fdub1, void *fdub2, - int *f_MPI_COMPLEX, void *fcomp1, void *fcomp2, - int *ierror) -{ - int err; - int size; - int offset; - - *ierror=MPI_Init(NULL,NULL); - - err=0; - - /* - * These 3 macros compare things from mpif.h (as passed in by the f_ - * arguments) to the values in C (from #including mpi.h). - * - * Unfortunately, this kind of thing is done most easily in a nasty - * looking macto. - * - */ - - - /* - * verify_eq - * compare value of constants in C and fortran - * i.e. compare *f_ to - */ - -#define verify_eq(name) \ - if (*f_##name != name) \ - { fprintf(stderr,"mpi-serial: mpi_init_fort: %s not consistent " \ - "between mpif.h (%d) and mpi.h (%d)\n",\ - #name,*f_##name,name); \ - err=1; } - -#define verify_eq_warn(name) \ - if (*f_##name != name) \ - { fprintf(stderr,"mpi-serial: mpi_init_fort: warning: %s not consistent " \ - "between mpif.h (%d) and mpi.h (%d)\n",\ - #name,*f_##name,name); \ - } - - - /* - * verify_size - * verify that the type name in fortran has the correct - * value (i.e. the size of that data type). - * Determine size by subtracting the pointer values of two - * consecutive array locations. - */ - -#define verify_size(name,p1,p2) \ - if ( (size=((char *)(p2) - (char *)(p1))) != Simpletype_length( \ - (*(Datatype*)mpi_handle_to_datatype(*f_##name))->pairs[0].type) ) \ - { fprintf(stderr,"mpi-serial: mpi_init_fort: mpif.h %s (%d) " \ - "does not match actual fortran size (%d)\n", \ - #name,*f_##name,size); \ - err=1; } - - /* - * verify_field - * check the struct member offsets for MPI_Status vs. the - * fortan integer array offsets. E.g. the location of - * status->MPI_SOURCE should be the same as STATUS(MPI_SOURCE) - */ - -#define verify_field(name) \ - { offset= (char *)&((MPI_Status *)f_status)->name - (char *)f_status; \ - if ( offset != (*f_##name-1)*sizeof(int) ) \ - { fprintf(stderr,"mpi-serial: mpi_init_fort: mpif.h %s (%d) (%d bytes) " \ - "is inconsistent w/offset in MPI_Status (%d bytes)\n", \ - #name,*f_##name,(*f_##name-1)*sizeof(int),offset); \ - err=1; }} - - - - verify_eq(MPI_COMM_WORLD); - verify_eq(MPI_ANY_SOURCE); - verify_eq(MPI_ANY_TAG); - verify_eq(MPI_PROC_NULL); - verify_eq(MPI_ROOT); - verify_eq(MPI_COMM_NULL); - verify_eq(MPI_REQUEST_NULL); - verify_eq(MPI_GROUP_NULL); - verify_eq(MPI_GROUP_EMPTY); - verify_eq(MPI_UNDEFINED); - verify_eq(MPI_MAX_ERROR_STRING); - verify_eq(MPI_MAX_PROCESSOR_NAME); - - verify_eq(MPI_STATUS_SIZE); - verify_field(MPI_SOURCE); - verify_field(MPI_TAG); - verify_field(MPI_ERROR); - - verify_eq(MPI_INTEGER); - verify_size(MPI_INTEGER,fint1,fint2); - - verify_size(MPI_LOGICAL,flog1,flog2); - - verify_eq_warn(MPI_REAL); - verify_size(MPI_REAL,freal1,freal2); - - verify_eq(MPI_DOUBLE_PRECISION); - verify_size(MPI_DOUBLE_PRECISION,fdub1,fdub2); - - verify_size(MPI_COMPLEX,fcomp1,fcomp2); - - if (err) - abort(); -} - -int MPI_Init(int *argc, char **argv[]) -{ - MPI_Comm my_comm_world; - - if (sizeof(MPI_Aint) < sizeof(void *)) - { - fprintf(stderr, "mpi-serial: MPI_Init: " - "MPI_Aint is not large enough for void *\n"); - abort(); - } - - my_comm_world=mpi_comm_new(); - - if (my_comm_world != MPI_COMM_WORLD) - { - fprintf(stderr,"MPI_Init: conflicting MPI_COMM_WORLD\n"); - abort(); - } - - // call this to have the fortran routine call back and save - // values for f_MPI_STATUS_IGNORE and f_MPI_STATUSES_IGNORE - FC_FUNC(mpi_get_fort_pointers,MPI_GET_FORT_POINTERS)(); // the () are important - - initialized=1; - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC( mpi_finalize, MPI_FINALIZE )(int *ierror) -{ - *ierror=MPI_Finalize(); -} - - -/* - * MPI_Finalize() - * - * this library doesn't support re-initializing MPI, so - * the finalize will just leave everythign as it is... - * - */ - - -int MPI_Finalize(void) -{ - initialized=0; - - mpi_destroy_handles(); - - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC( mpi_abort , MPI_ABORT )(int *comm, int *errorcode, int *ierror) -{ - *ierror=MPI_Abort( *comm, *errorcode); -} - - - -int MPI_Abort(MPI_Comm comm, int errorcode) -{ - fprintf(stderr,"MPI_Abort: error code = %d\n",errorcode); - exit(errorcode); -} - - -/*********/ - - - -FC_FUNC( mpi_error_string , MPI_ERROR_STRING) - (int *errorcode, char *string, - int *resultlen, int *ierror) -{ - *ierror=MPI_Error_string(*errorcode, string, resultlen); -} - - -int MPI_Error_string(int errorcode, char *string, int *resultlen) -{ - sprintf(string,"MPI Error: code %d\n",errorcode); - *resultlen=strlen(string); - - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC( mpi_get_processor_name , MPI_GET_PROCESSOR_NAME ) - (char *name, int *resultlen, int *ierror) -{ - *ierror=MPI_Get_processor_name(name,resultlen); -} - - -int MPI_Get_processor_name(char *name, int *resultlen) -{ - int ret; - - ret=gethostname(name,MPI_MAX_PROCESSOR_NAME); - - if (ret!=0) - strncpy(name,"unknown host name",MPI_MAX_PROCESSOR_NAME); - - - name[MPI_MAX_PROCESSOR_NAME-1]='\0'; /* make sure NULL terminated */ - *resultlen=strlen(name); - - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC( mpi_initialized , MPI_INITIALIZED )(int *flag, int *ierror) -{ - *ierror=MPI_Initialized(flag); -} - - -int MPI_Initialized(int *flag) -{ - *flag= initialized; - - return(MPI_SUCCESS); -} - - -/**********/ - - -void FC_FUNC( mpi_get_library_version, MPI_GET_LIBRARY_VERSION) (char *version, int *resultlen, int *ierror) -{ - MPI_Get_library_version(version,resultlen); - - // Sanity check before the memset() - if ( (*resultlen) > (MPI_MAX_LIBRARY_VERSION_STRING-1) ) - abort(); - - memset(version+(*resultlen),' ',MPI_MAX_LIBRARY_VERSION_STRING-(*resultlen)); - - *ierror=MPI_SUCCESS; -} - - - -int MPI_Get_library_version(char *version, int *resultlen) -{ - - strncpy(version,mpi_version_string,MPI_MAX_LIBRARY_VERSION_STRING); - // Make sure it is null terminated - version[MPI_MAX_LIBRARY_VERSION_STRING-1]='\0'; - *resultlen=strlen(version); - - return(MPI_SUCCESS); -} - -/**********/ -void FC_FUNC( mpi_get_version, MPI_GET_VERSION )(int *mpi_vers, int *mpi_subvers, int *ierror) -{ - MPI_Get_Version(mpi_vers, mpi_subvers); - - *ierror=MPI_SUCCESS; -} - -int MPI_Get_Version(int *mpi_vers, int *mpi_subvers) -{ - *mpi_vers = 1; - *mpi_subvers = 0; - - return (MPI_SUCCESS); -} - -/**********/ - - -void FC_FUNC( mpi_save_fort_pointers, MPI_SAVE_FORT_POINTERS ) (int *status, int *statuses, int *in_place) -{ - f_MPI_STATUS_IGNORE=status; - f_MPI_STATUSES_IGNORE=statuses; - f_MPI_IN_PLACE=in_place; -} - - - -MPI_Status *mpi_c_status(int *status) -{ - if (status==f_MPI_STATUS_IGNORE) - return(MPI_STATUS_IGNORE); - - return((MPI_Status *)status); -} - - -MPI_Status *mpi_c_statuses(int *statuses) -{ - if (statuses==f_MPI_STATUSES_IGNORE) - return(MPI_STATUSES_IGNORE); - - return((MPI_Status *)statuses); -} - - -void *mpi_c_in_place(void *buffer) -{ - if (buffer==(void *)f_MPI_IN_PLACE) - return(MPI_IN_PLACE); - - return(buffer); -} diff --git a/src/externals/mct/mpi-serial/mpi.h b/src/externals/mct/mpi-serial/mpi.h deleted file mode 100644 index 9183bf89d20..00000000000 --- a/src/externals/mct/mpi-serial/mpi.h +++ /dev/null @@ -1,436 +0,0 @@ -#ifndef _MPI_H_ -#define _MPI_H_ - -#define MPI_MAX_LIBRARY_VERSION_STRING (80) - -typedef int MPI_Comm; -typedef int MPI_Request; - - -#define MPI_COMM_WORLD (1) -#define MPI_COMM_NULL (0) /* handle 0 maps to NULL */ - - -typedef int MPI_Group; - -/* MPI_GROUP_EMPTY and MPI_GROUP_NULL must not conflict with MPI_GROUP_ONE */ -#define MPI_GROUP_EMPTY (-1) -#define MPI_GROUP_NULL (0) - - -/* - * Return codes - * On error, mpi-serial aborts so the values don't really matter - * as long as they are different than MPI_SUCCESS - * - */ - -#define MPI_SUCCESS (0) -#define MPI_ERR_BUFFER (-1) -#define MPI_ERR_COUNT (-1) -#define MPI_ERR_TYPE (-1) -#define MPI_ERR_TAG (-1) -#define MPI_ERR_COMM (-1) -#define MPI_ERR_RANK (-1) -#define MPI_ERR_REQUEST (-1) -#define MPI_ERR_ROOT (-1) -#define MPI_ERR_GROUP (-1) -#define MPI_ERR_OP (-1) -#define MPI_ERR_TOPOLOGY (-1) -#define MPI_ERR_DIMS (-1) -#define MPI_ERR_ARG (-1) -#define MPI_ERR_UNKNOWN (-1) -#define MPI_ERR_TRUNCATE (-1) -#define MPI_ERR_OTHER (-1) -#define MPI_ERR_INTERN (-1) -#define MPI_PENDING (-1) -#define MPI_ERR_IN_STATUS (-1) -#define MPI_ERR_LASTCODE (-1) - -/* - * MPI_UNDEFINED - * - * Uses: - * value for "color" in e.g. comm_split - * value for rank in Group_translate_ranks - * - */ - - -#define MPI_UNDEFINED (-1) - - -/* - * Data types etc. - */ - -typedef unsigned long int MPI_Aint; -#define MPI_BOTTOM (0) -#define MPI_IN_PLACE (void *)(-1) -typedef int MPI_Datatype; - - -/* The type's value is now a handle */ - -#define MPI_DATATYPE_NULL (0) - -//C types -#define MPI_CHAR (-1) -#define MPI_SHORT (-2) -#define MPI_INT (-3) -#define MPI_LONG (-4) -#define MPI_UNSIGNED_CHAR (-5) -#define MPI_UNSIGNED_SHORT (-6) -#define MPI_UNSIGNED (-7) -#define MPI_UNSIGNED_LONG (-8) -#define MPI_FLOAT (-9) -#define MPI_DOUBLE (-10) -#define MPI_LONG_DOUBLE (-11) - -//Cross-language -#define MPI_BYTE (-12) -#define MPI_PACKED (-13) -#define MPI_LB (-14) -#define MPI_UB (-15) - -// Fortran types -#define MPI_INTEGER (-16) // RML: why not (MPI_INT) -#define MPI_REAL (-17) // RML: why not (MPI_FLOAT) -#define MPI_DOUBLE_PRECISION (-18) // RML: why not (MPI_DOUBLE) - -#define MPI_COMPLEX (-19) -#define MPI_DOUBLE_COMPLEX (-20) -#define MPI_LOGICAL (-21) -#define MPI_CHARACTER (-22) -#define MPI_2REAL (-23) -#define MPI_2DOUBLE_PRECISION (-24) -#define MPI_2INTEGER (-25) - -//Reduction function types - -#define MPI_FLOAT_INT (-26) -#define MPI_DOUBLE_INT (-27) -#define MPI_LONG_INT (-28) -#define MPI_2INT (-29) -#define MPI_SHORT_INT (-30) -#define MPI_LONG_DOUBLE_INT (-31) - - -/* Fortran size-specific types */ - -#define MPI_INTEGER1 (-32) -#define MPI_INTEGER2 (-33) -#define MPI_INTEGER4 (-34) -#define MPI_INTEGER8 (-35) -#define MPI_INTEGER16 (-36) - -#define MPI_REAL4 (-37) -#define MPI_REAL8 (-38) -#define MPI_REAL16 (-39) - -#define MPI_COMPLEX8 (-40) -#define MPI_COMPLEX16 (-41) -#define MPI_COMPLEX32 (-42) - -/* Some more types */ - -#define MPI_LONG_LONG_INT (-43) -#define MPI_LONG_LONG MPI_LONG_LONG_INT -#define MPI_UNSIGNED_LONG_LONG (-44) - -#define MPI_OFFSET (-45) - - -/* - * Fortran int size - * - */ - -typedef int MPI_Fint; - - - -#define MPI_ANY_TAG (-1) - -#define MPI_ANY_SOURCE (-1) -#define MPI_PROC_NULL (-2) -#define MPI_ROOT (-3) - -#define MPI_REQUEST_NULL (0) - -#define MPI_MAX_ERROR_STRING (128) -#define MPI_MAX_PROCESSOR_NAME (128) - - -/* - * MPI_Status - * - * definition must be compatible with the mpif.h values for - * MPI_STATUS_SIZE, MPI_SOURCE, MPI_TAG, and MPI_ERROR. - * - * Note: The type used for MPI_Status_int must be chosen to match - * Fortran INTEGER. - * - */ - -typedef int MPI_Status_int; - -typedef struct /* Fortran: INTEGER status(MPI_STATUS_SIZE) */ -{ - MPI_Status_int MPI_SOURCE; /* Fortran: status(MPI_SOURCE) */ - MPI_Status_int MPI_TAG; /* Fortran: status(MPI_TAG) */ - MPI_Status_int MPI_ERROR; /* Fortran: status(MPI_ERROR) */ - int get_count; /* Number specified for send */ - -} MPI_Status; - - -#define MPI_STATUS_IGNORE ((MPI_Status *)0) -#define MPI_STATUSES_IGNORE ((MPI_Status *)0) - - -/* - * MPI Errhandling stubs (Not functional currently) - */ -typedef int MPI_Errhandler; - -#define MPI_ERRORS_ARE_FATAL ((MPI_Errhandler)0) -#define MPI_ERRORS_RETURN ((MPI_Errhandler)-1) - - -/* - * Collective operations - */ - - -typedef int MPI_Op; - -typedef void MPI_User_function( void *invec, void *inoutvec, int *len, - MPI_Datatype *datatype); - -#define MPI_OP_NULL (0) - -#define MPI_MAX (0) -#define MPI_MIN (0) -#define MPI_SUM (0) -#define MPI_PROD (0) -#define MPI_LAND (0) -#define MPI_BAND (0) -#define MPI_LOR (0) -#define MPI_BOR (0) -#define MPI_LXOR (0) -#define MPI_BXOR (0) -#define MPI_MAXLOC (0) -#define MPI_MINLOC (0) - - - -#define MPI_STATUS_SIZE (sizeof(MPI_Status) / sizeof(int)) - - -/* NOTE: the C type MPI_Offset is NOT the same as MPI datatype MPI_OFFSET */ -typedef long long int MPI_Offset; - - -/* info - */ - -typedef int MPI_Info; /* handle */ - -#define MPI_INFO_NULL (0) - - - -/********************************************************** - * - * Note: if you need to regenerate the prototypes below, - * you can use 'protify.awk' and paste the output here. - * - */ - - -extern int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, - MPI_Comm peer_comm, int remote_leader, - int tag, MPI_Comm *newintercomm); -extern int MPI_Intercomm_merge(MPI_Comm intercomm, int high, - MPI_Comm *newintercomm); -extern int MPI_Cart_create(MPI_Comm comm_old, int ndims, int *dims, - int *periods, int reorder, MPI_Comm *comm_cart); -extern int MPI_Cart_get(MPI_Comm comm, int maxdims, int *dims, - int *periods, int *coords); -extern int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, - int *coords); -extern int MPI_Dims_create(int nnodes, int ndims, int *dims); - -extern int MPI_Barrier(MPI_Comm comm ); -extern int MPI_Bcast(void* buffer, int count, MPI_Datatype datatype, - int root, MPI_Comm comm ); -extern int MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype sendtype, - void* recvbuf, int recvcount, MPI_Datatype recvtype, - int root, MPI_Comm comm); -extern int MPI_Gatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype, - void* recvbuf, int *recvcounts, int *displs, - MPI_Datatype recvtype, int root, MPI_Comm comm); -extern int MPI_Allgather(void* sendbuf, int sendcount, MPI_Datatype sendtype, - void* recvbuf, int recvcount, MPI_Datatype recvtype, - MPI_Comm comm); -extern int MPI_Allgatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype, - void* recvbuf, int *recvcounts, int *displs, - MPI_Datatype recvtype, MPI_Comm comm); -extern int MPI_Scatter( void* sendbuf, int sendcount, MPI_Datatype sendtype, - void* recvbuf, int recvcount, MPI_Datatype recvtype, - int root, MPI_Comm comm); -extern int MPI_Scatterv(void* sendbuf, int *sendcounts, int *displs, - MPI_Datatype sendtype, void* recvbuf, int recvcount, - MPI_Datatype recvtype, int root, MPI_Comm comm); -extern int MPI_Reduce(void* sendbuf, void* recvbuf, int count, - MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm); -extern int MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int *recvcounts, - MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); -extern int MPI_Allreduce(void* sendbuf, void* recvbuf, int count, - MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); -extern int MPI_Scan( void* sendbuf, void* recvbuf, int count, - MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); -extern int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - MPI_Comm comm); -extern int MPI_Alltoallv(void *sendbuf, int *sendcounts, - int *sdispls, MPI_Datatype sendtype, - void *recvbuf, int *recvcounts, - int *rdispls, MPI_Datatype recvtype, - MPI_Comm comm) ; -extern int MPI_Alltoallw(void *sendbuf, int *sendcounts, - int *sdispls, MPI_Datatype *sendtypes, - void *recvbuf, int *recvcounts, - int *rdispls, MPI_Datatype *recvtypes, - MPI_Comm comm) ; - - -extern int MPI_Op_create(MPI_User_function *function, int commute, - MPI_Op *op); -extern MPI_Op MPI_Op_f2c(MPI_Fint op); -extern MPI_Fint MPI_Op_c2f(MPI_Op op); -extern MPI_Comm mpi_comm_new(void); -extern int MPI_Op_free(MPI_Op *op); -extern int MPI_Comm_free(MPI_Comm *comm); -extern int MPI_Comm_size(MPI_Comm comm, int *size); -extern int MPI_Comm_rank(MPI_Comm comm, int *rank); -extern int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm); -extern int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm); -extern int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm); -extern int MPI_Comm_group(MPI_Comm comm, MPI_Group *group); -extern MPI_Comm MPI_Comm_f2c(MPI_Fint comm); -extern MPI_Fint MPI_Comm_c2f(MPI_Comm comm); -extern int MPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group *newgroup); -extern int MPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], - MPI_Group *newgroup); -extern int MPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group *newgroup); -extern int MPI_Group_intersection(MPI_Group group1, MPI_Group group2, - MPI_Group *newgroup); -extern int MPI_Group_difference(MPI_Group group1, MPI_Group group2, - MPI_Group *newgroup); -extern int MPI_Group_free(MPI_Group *group); -extern int MPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, - MPI_Group group2, int *ranks2); -extern MPI_Group MPI_Group_f2c(MPI_Fint group); -extern MPI_Fint MPI_Group_c2f(MPI_Group group); - -extern int MPI_Init(int *argc, char **argv[]) ; -extern int MPI_Finalize(void); -extern int MPI_Abort(MPI_Comm comm, int errorcode); -extern int MPI_Error_string(int errorcode, char *string, int *resultlen); -extern int MPI_Get_processor_name(char *name, int *resultlen); - -extern int MPI_Info_create(MPI_Info *info); -extern int MPI_Info_set(MPI_Info info, char *key, char *value); - -extern int MPI_Initialized(int *flag); -extern int MPI_Pack( void *inbuf, int incount, MPI_Datatype datatype, - void *outbuf, int outsize, int *position, MPI_Comm comm); -extern int MPI_Unpack( void *inbuf, int insize, int *position, - void *outbuf, int outcount, MPI_Datatype datatype, - MPI_Comm comm ); -extern int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, - int source, int tag, MPI_Comm comm, MPI_Request *request); -extern int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, - int tag, MPI_Comm comm, MPI_Status *status); - -extern int MPI_Test(MPI_Request *request, int *flag, MPI_Status *status); -extern int MPI_Wait(MPI_Request *request, MPI_Status *status); -extern int MPI_Testany(int count, MPI_Request *array_of_requests, - int *index, int *flag, MPI_Status *status); -extern int MPI_Waitany(int count, MPI_Request *array_of_requests, - int *index, MPI_Status *status); -extern int MPI_Testall(int count, MPI_Request *array_of_requests, - int *flag, MPI_Status *array_of_statuses); -extern int MPI_Waitall(int count, MPI_Request *array_of_requests, - MPI_Status *array_of_statuses); -extern MPI_Request MPI_Request_f2c(MPI_Fint request); -extern MPI_Fint MPI_Request_c2f(MPI_Request request); -extern int MPI_Testsome(int incount, MPI_Request *array_of_requests, - int *outcount, int *array_of_indices, - MPI_Status *array_of_statuses); -extern int MPI_Waitsome(int incount, MPI_Request *array_of_requests, - int *outcount, int *array_of_indices, - MPI_Status *array_of_statuses); -extern int MPI_Request_free(MPI_Request * req); -extern int MPI_Isend(void *buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm, MPI_Request *request) ; -extern int MPI_Send(void* buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm); -extern int MPI_Ssend(void* buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm); -extern int MPI_Rsend(void* buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm); -extern int MPI_Irsend(void *buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm, MPI_Request *request) ; -extern int MPI_Sendrecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, - int dest, int sendtag, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - int source, int recvtag, - MPI_Comm comm, MPI_Status *status); - -extern int MPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status); -extern int MPI_Iprobe(int source, int tag, MPI_Comm comm, - int *flag, MPI_Status *status); - -extern int MPI_Pack_size(int incount, MPI_Datatype type, MPI_Comm comm, MPI_Aint * size); - -/* Error handling stub, not currently functional */ -extern int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler handle); - -/* new type functions */ -extern int MPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count); -extern int MPI_Get_elements(MPI_Status *status, MPI_Datatype datatype, int *count); -extern int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype); - -extern int MPI_Type_vector(int count, int blocklen, int stride, MPI_Datatype oldtype, - MPI_Datatype *newtype); - -extern int MPI_Type_hvector(int count, int blocklen, MPI_Aint stride, - MPI_Datatype oldtype, MPI_Datatype *newtype); - -extern int MPI_Type_create_hvector(int count, int blocklen, MPI_Aint stride, - MPI_Datatype oldtype, MPI_Datatype *newtype); - -extern int MPI_Type_indexed(int count, int *blocklens, int *displacements, - MPI_Datatype oldtype, MPI_Datatype *newtype); - -extern int MPI_Type_create_indexed_block(int count, int blocklen, int *displacements, - MPI_Datatype oldtype, MPI_Datatype *newtype); -extern int MPI_Type_hindexed(int count, int *blocklens, MPI_Aint *displacements, - MPI_Datatype oldtype, MPI_Datatype *newtype); -extern int MPI_Type_size(MPI_Datatype type, int * size); -extern int MPI_Type_struct(int count, int *blocklens, MPI_Aint *displacements, - MPI_Datatype *oldtypes, MPI_Datatype *newtype); -extern int MPI_Type_dup(MPI_Datatype oldtype, MPI_Datatype *newtype); - -extern int MPI_Type_extent(MPI_Datatype datatype, MPI_Aint * extent); -extern int MPI_Type_commit(MPI_Datatype * datatype); -extern int MPI_Type_free(MPI_Datatype * datatype); -extern int MPI_Type_lb(MPI_Datatype datatype, MPI_Aint * lb); -extern int MPI_Type_ub(MPI_Datatype datatype, MPI_Aint * ub); - -extern double MPI_Wtime(void); - -#endif diff --git a/src/externals/mct/mpi-serial/mpiP.h b/src/externals/mct/mpi-serial/mpiP.h deleted file mode 100644 index 290d3cf9f4d..00000000000 --- a/src/externals/mct/mpi-serial/mpiP.h +++ /dev/null @@ -1,128 +0,0 @@ -#ifndef _MPIP_H -#define _MPIP_H - -/* - * Private .h file for MPI - */ - - -#include -#include -#include -#include - -#include "listops.h" -#include "mpi.h" - -/* Autoconf Fortran name mangling - * - * config.h defines F77_FUNC and F77_FUNC_ - * Since we are generally using FC_FUNC, and - * all of our functions will ONLY use F77_FUNC_ - * (with the underscore, define FC_FUNC as the - * aforementioned. - * - * If config.h is not present, default to the old - * approach. - */ - -#ifdef HAVE_CONFIG_H -#include -/* config.h should define FC_FUNC */ -#else - -/* - * Fortran name mangling - * - * the configure.ac specifies these - * - * cpp does not have the ability to change the case - * of the argument, so the invocation of the macro - * has to be give both e.g. FC_FUNC(hello,HELLO) - * and maps to "hello_", "hello", and "HELLO" repectively. - * - * IMPORTANT NOTE: - * In the case of FORTRAN_GNUF2C (e.g. g95), the rule is this: - * name does not contain an underscore -> append *one* underscore - * name contains an underscore -> append *two* underscore - * Since all the mpi-serial names exported to fortran start with "mpi_", - * we only support the latter. - * - * Note: FORTRANUNDERSCORE is needed by ccsm - * - */ - - -#if defined(FORTRAN_UNDERSCORE_) || defined(FORTRANUNDERSCORE) -#define FC_FUNC(lower,upper) lower##_ -#elif defined(FORTRAN_GNUF2C) -#define FC_FUNC(lower,upper) lower##__ -#elif defined(FORTRAN_SAME) -#define FC_FUNC(lower,upper) lower -#elif defined(FORTRAN_CAPS_) -#define FC_FUNC(lower,upper) upper -#else -#error "Unrecognized Fortran-mangle type" -/* set to something reasonable to avoid cascade of cc errors */ -#define FC_FUNC(lower,upper) lower##_ -#endif -#endif /* HAVE_CONFIG_H */ - -/* - * MPI_GROUP_ONE must not conflict with MPI_GROUP_NULL or - * MPI_GROUP_EMPTY - */ - -#define MPI_GROUP_ONE (1) - - -/****************************************************************************/ - - -typedef struct -{ - pList sendlist; - pList recvlist; - - int num; - -} Comm; - - - -typedef struct -{ - pListitem listitem; /* to allow Req to be removed from list */ - - int *buf; - int source; - int tag; - int complete; - -} Req; - - -/****************************************************************************/ - -/* copy functions */ -extern int copy_data2(void * source, int src_count, MPI_Datatype src_type, - void * dest, int dest_count, MPI_Datatype dest_type); - -extern void *mpi_malloc(int size); -extern void mpi_free(void *ptr); - -extern MPI_Comm mpi_comm_new(void); - -extern void mpi_destroy_handles(void); -extern void mpi_alloc_handle(int *handle, void **data); -extern void *mpi_handle_to_ptr(int handle); -extern void mpi_free_handle(int handle); - -extern void FC_FUNC(mpi_get_fort_pointers,MPI_GET_FORT_POINTERS)(void); - -extern MPI_Status *mpi_c_status(int *status); -extern MPI_Status *mpi_c_statuses(int *statuses); -extern void *mpi_c_in_place(void *buffer); - - -#endif /* _MPIP_H */ diff --git a/src/externals/mct/mpi-serial/mpif.F90 b/src/externals/mct/mpi-serial/mpif.F90 deleted file mode 100644 index 369b71459dc..00000000000 --- a/src/externals/mct/mpi-serial/mpif.F90 +++ /dev/null @@ -1,12 +0,0 @@ -#ifdef HAVE_CONFIG_H -#include -#endif - -Module mpi -implicit none -! MPI_ADDRESS_KIND: need an 8-byte integer. - INTEGER, PARAMETER, PUBLIC :: MPI_ADDRESS_KIND=selected_int_kind(13) - - - include "mpif.h" -end Module mpi diff --git a/src/externals/mct/mpi-serial/mpif.h b/src/externals/mct/mpi-serial/mpif.h deleted file mode 100644 index 678ad9e9fdd..00000000000 --- a/src/externals/mct/mpi-serial/mpif.h +++ /dev/null @@ -1,335 +0,0 @@ - -! -! MPI_COMM_WORLD -! - -INTEGER MPI_COMM_WORLD -parameter (mpi_comm_world=1) - -! -! -! - -integer MPI_BOTTOM -parameter (MPI_BOTTOM=0) - - -! -! source,tag - ! - - integer MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_TAG_UB - parameter (mpi_any_source=-1, mpi_any_tag= -1, mpi_tag_ub=1681915906) - - integer MPI_PROC_NULL, MPI_ROOT - parameter (MPI_PROC_NULL=-2, MPI_ROOT=-3) - - integer MPI_COMM_NULL, MPI_REQUEST_NULL - parameter (MPI_COMM_NULL=0, MPI_REQUEST_NULL=0) - - integer MPI_GROUP_NULL, MPI_GROUP_EMPTY - parameter (MPI_GROUP_NULL=0, MPI_GROUP_EMPTY= -1) - - integer MPI_MAX_ERROR_STRING - parameter (MPI_MAX_ERROR_STRING=128) - - integer MPI_MAX_PROCESSOR_NAME - parameter (MPI_MAX_PROCESSOR_NAME=128) - - ! - ! Return codes - ! - - integer MPI_SUCCESS - parameter (MPI_SUCCESS=0) - - integer MPI_ERR_BUFFER - parameter (MPI_ERR_BUFFER= -1) - - integer MPI_ERR_COUNT - parameter (MPI_ERR_COUNT= -1) - - integer MPI_ERR_TYPE - parameter (MPI_ERR_TYPE= -1) - - integer MPI_ERR_TAG - parameter (MPI_ERR_TAG= -1) - - integer MPI_ERR_COMM - parameter (MPI_ERR_COMM= -1) - - integer MPI_ERR_RANK - parameter (MPI_ERR_RANK= -1) - - integer MPI_ERR_REQUEST - parameter (MPI_ERR_REQUEST= -1) - - integer MPI_ERR_ROOT - parameter (MPI_ERR_ROOT= -1) - - integer MPI_ERR_GROUP - parameter (MPI_ERR_GROUP= -1) - - integer MPI_ERR_OP - parameter (MPI_ERR_OP= -1) - - integer MPI_ERR_TOPOLOGY - parameter (MPI_ERR_TOPOLOGY= -1) - - integer MPI_ERR_DIMS - parameter (MPI_ERR_DIMS= -1) - - integer MPI_ERR_ARG - parameter (MPI_ERR_ARG= -1) - - integer MPI_ERR_UNKNOWN - parameter (MPI_ERR_UNKNOWN= -1) - - integer MPI_ERR_TRUNCATE - parameter (MPI_ERR_TRUNCATE= -1) - - integer MPI_ERR_OTHER - parameter (MPI_ERR_OTHER= -1) - - integer MPI_ERR_INTERN - parameter (MPI_ERR_INTERN= -1) - - integer MPI_PENDING - parameter (MPI_PENDING= -1) - - integer MPI_ERR_IN_STATUS - parameter (MPI_ERR_IN_STATUS= -1) - - integer MPI_ERR_LASTCODE - parameter (MPI_ERR_LASTCODE= -1) - - integer MPI_ERRORS_RETURN - parameter (MPI_ERRORS_RETURN= -1) - - ! - ! - - - integer MPI_UNDEFINED - parameter (MPI_UNDEFINED= -1) - - - ! - ! MPI_Status - ! - ! The values in this section MUST match the struct definition - ! in mpi.h - ! - - - INTEGER MPI_STATUS_SIZE - PARAMETER (MPI_STATUS_SIZE=4) - - INTEGER MPI_SOURCE, MPI_TAG, MPI_ERROR - PARAMETER(MPI_SOURCE=1, MPI_TAG=2, MPI_ERROR=3) - ! There is a 4th value only used internally - - INTEGER MPI_STATUS_IGNORE(MPI_STATUS_SIZE) - INTEGER MPI_STATUSES_IGNORE(MPI_STATUS_SIZE,1) - COMMON /MPISERIAL/ MPI_STATUS_IGNORE - COMMON /MPISERIAL/ MPI_STATUSES_IGNORE - - ! - ! MPI_IN_PLACE - ! - - INTEGER MPI_IN_PLACE - COMMON /MPISERIAL/ MPI_IN_PLACE - - SAVE /MPISERIAL/ ! Technically needed in case goes out of scope - - - ! - ! MPI_Datatype values - ! - ! New datatype values - ! Type constants represent integer handles, matching up to the index of the - ! type array equal to the absolute value of the constant plus one. For - ! example, MPI_BYTE=-12, corresponding to type index 11. - ! (Array in type_const.c) - ! - - - INTEGER MPI_DATATYPE_NULL - PARAMETER (MPI_DATATYPE_NULL=0) - - INTEGER MPI_BYTE - PARAMETER (MPI_BYTE=-12) - - INTEGER MPI_PACKED - PARAMETER (MPI_PACKED=-13) - - INTEGER MPI_LB - PARAMETER (MPI_LB=-14) - - INTEGER MPI_UB - PARAMETER (MPI_UB=-15) - - INTEGER MPI_INTEGER - PARAMETER (MPI_INTEGER=-16) - - INTEGER MPI_REAL - PARAMETER (MPI_REAL=-17) - - INTEGER MPI_DOUBLE_PRECISION - PARAMETER (MPI_DOUBLE_PRECISION=-18) - - INTEGER MPI_COMPLEX - PARAMETER (MPI_COMPLEX=-19) - - INTEGER MPI_DOUBLE_COMPLEX - PARAMETER (MPI_DOUBLE_COMPLEX=-20) - - INTEGER MPI_LOGICAL - PARAMETER (MPI_LOGICAL=-21) - - INTEGER MPI_CHARACTER - PARAMETER (MPI_CHARACTER=-22) - - integer MPI_2REAL - parameter (MPI_2REAL= -23) - - integer MPI_2DOUBLE_PRECISION - parameter (MPI_2DOUBLE_PRECISION= -24) - - integer MPI_2INTEGER - parameter (MPI_2INTEGER= -25) - - - ! - ! Size-specific types - ! - - INTEGER MPI_INTEGER1 - PARAMETER (MPI_INTEGER1= -32 ) - - INTEGER MPI_INTEGER2 - PARAMETER (MPI_INTEGER2= -33 ) - - INTEGER MPI_INTEGER4 - PARAMETER (MPI_INTEGER4= -34 ) - - INTEGER MPI_INTEGER8 - PARAMETER (MPI_INTEGER8= -35 ) - - INTEGER MPI_INTEGER16 - PARAMETER (MPI_INTEGER16= -36 ) - - - INTEGER MPI_REAL4 - PARAMETER (MPI_REAL4= -37 ) - - INTEGER MPI_REAL8 - PARAMETER (MPI_REAL8= -38 ) - - INTEGER MPI_REAL16 - PARAMETER (MPI_REAL16= -39 ) - - - integer MPI_COMPLEX8 - parameter (MPI_COMPLEX8= -40 ) - - integer MPI_COMPLEX16 - parameter (MPI_COMPLEX16= -41 ) - - integer MPI_COMPLEX32 - parameter (MPI_COMPLEX32= -42 ) - - integer MPI_LONG_LONG_INT - parameter (MPI_LONG_LONG_INT= -43) - - integer MPI_LONG_LONG - parameter (MPI_LONG_LONG= MPI_LONG_LONG_INT) - - integer MPI_UNSIGNED_LONG_LONG - parameter (MPI_UNSIGNED_LONG_LONG= -44) - - integer MPI_OFFSET - parameter (MPI_OFFSET= -45) - - ! - ! MPI_Op values - ! - ! (All are handled as no-op so no value is necessary; but provide one - ! anyway just in case.) - ! - - INTEGER MPI_SUM - PARAMETER (MPI_SUM=0) - INTEGER MPI_MAX - PARAMETER (MPI_MAX=0) - INTEGER MPI_MIN - PARAMETER (MPI_MIN=0) - INTEGER MPI_PROD - PARAMETER (MPI_PROD=0) - INTEGER MPI_LAND - PARAMETER (MPI_LAND=0) - INTEGER MPI_BAND - PARAMETER (MPI_BAND=0) - INTEGER MPI_LOR - PARAMETER (MPI_LOR=0) - INTEGER MPI_BOR - PARAMETER (MPI_BOR=0) - INTEGER MPI_LXOR - PARAMETER (MPI_LXOR=0) - INTEGER MPI_BXOR - PARAMETER (MPI_BXOR=0) - INTEGER MPI_MINLOC - PARAMETER (MPI_MINLOC=0) - INTEGER MPI_MAXLOC - PARAMETER (MPI_MAXLOC=0) - INTEGER MPI_OP_NULL - PARAMETER (MPI_OP_NULL=0) - - ! - ! MPI_Wtime - ! - - DOUBLE PRECISION MPI_WTIME - EXTERNAL MPI_WTIME - - - ! - ! Kinds - ! - - INTEGER MPI_OFFSET_KIND - PARAMETER (MPI_OFFSET_KIND=selected_int_kind(13)) - - INTEGER MPI_MODE_RDONLY - PARAMETER (MPI_MODE_RDONLY=0) - - INTEGER MPI_MODE_CREATE - PARAMETER (MPI_MODE_CREATE=1) - - INTEGER MPI_MODE_RDWR - PARAMETER (MPI_MODE_RDWR=2) - - - ! - ! Info - ! - - INTEGER MPI_INFO_NULL - PARAMETER (MPI_INFO_NULL=0) - - - ! - ! Library version string (must match C value) - ! - - INTEGER MPI_MAX_LIBRARY_VERSION_STRING - PARAMETER (MPI_MAX_LIBRARY_VERSION_STRING=80) - - ! - ! MPI Version - ! - INTEGER MPI_VERSION - PARAMETER (MPI_VERSION=1) - INTEGER MPI_SUBVERSION - PARAMETER (MPI_SUBVERSION=0) diff --git a/src/externals/mct/mpi-serial/op.c b/src/externals/mct/mpi-serial/op.c deleted file mode 100644 index 64efbc1004a..00000000000 --- a/src/externals/mct/mpi-serial/op.c +++ /dev/null @@ -1,28 +0,0 @@ -#include "mpi.h" -#include "mpiP.h" -/* Because operations based on one processor are essentially no operation, - * all MPI_Ops are handled as null ops. Therefore, returning 0 (OP_NULL) - * suffices here. - */ - -FC_FUNC(mpi_op_create, MPI_OP_CREATE)(MPI_User_function *func, int * commute, int * op, int * ierr) -{ - *ierr = MPI_Op_create(func, *commute, op); -} - -int MPI_Op_create(MPI_User_function *function, int commute, MPI_Op *op) -{ - *op = 0; - return MPI_SUCCESS; -} - -FC_FUNC(mpi_op_free, MPI_OP_FREE)(int * op, int * ierr) -{ - *ierr = MPI_Op_free(op); -} - -int MPI_Op_free(MPI_Op * op) -{ - return MPI_SUCCESS; -} - diff --git a/src/externals/mct/mpi-serial/pack.c b/src/externals/mct/mpi-serial/pack.c deleted file mode 100644 index 83ff8799856..00000000000 --- a/src/externals/mct/mpi-serial/pack.c +++ /dev/null @@ -1,145 +0,0 @@ -#include -#include -#include -#include "mpiP.h" -#include "type.h" - -/* - * - */ - - -FC_FUNC( mpi_pack , MPI_PACK ) - ( void *inbuf, int *incount, int *datatype, - void *outbuf, int *outsize, int *position, int *comm, int *ierror) -{ - *ierror=MPI_Pack(inbuf, *incount,* datatype, - outbuf, *outsize, position, *comm); -} - - - -int MPI_Pack( void *inbuf, int incount, MPI_Datatype datatype, - void *outbuf, int outsize, int *position, MPI_Comm comm) -{ - int ret; - - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(datatype); - Comm* comm_ptr = mpi_handle_to_ptr(comm); - - ret = Pack(inbuf, incount, type_ptr, outbuf, outsize, position, comm_ptr); - - return ret; -} - - - -int Pack(void *inbuf, int incount, Datatype type, - void *outbuf, int outsize, int *position, Comm * comm) -{ - int i, j; - MPI_Aint extent; - //check that buffer is large enough - Type_extent(type, &extent); - for (i = 0; i < incount; i++) - { - for (j = 0; j < type->count; j++) - { - if ((*position) + Simpletype_length(type->pairs[j].type) > outsize) - { - printf("MPI_Pack: data exceeds buffer size\n"); - exit(1); - } - memcpy(((char*) outbuf)+(*position), inbuf+type->pairs[j].disp + (extent*i), - Simpletype_length(type->pairs[j].type)); - *position += Simpletype_length(type->pairs[j].type); - } - } -} - -FC_FUNC( mpi_pack_size, MPI_PACK_SIZE )(int * incount, int * datatype, - int * comm, long * size, int *ierr) -{ - *ierr = MPI_Pack_size(*incount, *datatype, *comm, size); -} - -int MPI_Pack_size(int incount, MPI_Datatype datatype, - MPI_Comm comm, MPI_Aint * size) -{ - int ret; - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(datatype); - Comm * comm_ptr = mpi_handle_to_ptr(comm); - - ret = Pack_size(incount, type_ptr, comm_ptr, size); - - return ret; -} - - -int Pack_size(int incount, Datatype datatype, - Comm * comm, MPI_Aint * size) -{ - int i; - *size = 0; - //sum up all sizes - for(i = 0; i < datatype->count; i++) - { - *size += Simpletype_length(datatype->pairs[i].type); - } - *size *= incount; - printf("Size = %d\n", *size); -} - - -/* - * - */ - - -FC_FUNC( mpi_unpack , MPI_UNPACK ) - ( void *inbuf, int *insize, int *position, - void *outbuf, int *outcount, int *datatype, - int *comm, int *ierror ) -{ - *ierror=MPI_Unpack( inbuf, *insize, position, - outbuf, *outcount, *datatype, *comm); -} - - -int MPI_Unpack(void * inbuf, int insize, int * position, void * outbuf, - int outcount, MPI_Datatype type, MPI_Comm comm) -{ - int ret; - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type); - Comm * comm_ptr = mpi_handle_to_ptr(comm); - - ret = Unpack(inbuf, insize, position, outbuf, outcount, type_ptr, comm_ptr); - - return ret; -} - -int Unpack(void * inbuf, int insize, int * position, void *outbuf, - int outcount, Datatype type, Comm* comm) -{ - int i, j; - MPI_Aint extent; - - Type_extent(type, &extent); - - for (i = 0; i < outcount; i++) - { - for (j = 0; j < type->count; j++) - { - if ((*position) + Simpletype_length(type->pairs[j].type) > insize) - { - printf("MPI_Unpack: Data exceeds buffer size\n"); - exit(1); - } - memcpy(outbuf+type->pairs[j].disp + (extent*i), ((char*) inbuf)+(*position) , - Simpletype_length(type->pairs[j].type)); - *position += Simpletype_length(type->pairs[j].type); - } - } -} - - diff --git a/src/externals/mct/mpi-serial/probe.c b/src/externals/mct/mpi-serial/probe.c deleted file mode 100644 index 29c3c52e07b..00000000000 --- a/src/externals/mct/mpi-serial/probe.c +++ /dev/null @@ -1,88 +0,0 @@ -//probe.c -#include "mpiP.h" - -static int mpi_match_send(void *r, void *tag) -{ - return( *((int *)tag) == MPI_ANY_TAG || - *((int *)tag) == ((Req *)r)->tag ); -} - -FC_FUNC(mpi_iprobe, MPI_IPROBE)(int * source, int * tag, int * comm, - int * flag, int *status, int * ierr) -{ - *ierr = MPI_Iprobe(*source, *tag, *comm, flag, mpi_c_status(status)); -} - -/* Iprobe - * Search for existing message, return status about it - */ - -int MPI_Iprobe(int source, int tag, MPI_Comm comm, int *flag, - MPI_Status *status) - -{ - pListitem match; - Comm *mycomm; - Req *sreq; - - mycomm=mpi_handle_to_ptr(comm); /* mycomm=(Comm *)comm; */ - -#ifdef INFO - fflush(stdout); - fprintf(stderr,"MPI_IProbev: Comm=%d tag=%d count=%d type=%d\n", - mycomm->num,tag,count,datatype); -#endif - - - if (source!=0 && source!=MPI_ANY_SOURCE) - { - fprintf(stderr,"MPI_Irecv: bad source %d\n",source); - abort(); - } - - match=AP_list_search_func(mycomm->sendlist,mpi_match_send,&tag); - - *flag= (match==NULL ? 0:1 ); - - if (*flag) - { - sreq=(Req *)AP_listitem_data(match); - - if (status!=MPI_STATUS_IGNORE) - { - status->MPI_SOURCE=0 ; - status->MPI_TAG= sreq->tag; - } - } - - return(MPI_SUCCESS); -} - - -//probe: wait for message, and return status -// (either message will immediately be available, or deadlock. - -FC_FUNC(mpi_probe,MPI_PROBE)(int *source, int *tag, int *comm, int *status, - int *ierr) -{ - *ierr=MPI_Probe(*source,*tag,*comm,mpi_c_status(status)); -} - - - -int MPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status) -{ - - int flag; - - MPI_Iprobe(source,tag,comm,&flag,status); - - if (!flag) - { - fprintf(stderr,"MPI_Probe: no existing match, deadlock\n"); - abort(); - } - - return(MPI_SUCCESS); -} - diff --git a/src/externals/mct/mpi-serial/protify.awk b/src/externals/mct/mpi-serial/protify.awk deleted file mode 100755 index 483fc2ec0d1..00000000000 --- a/src/externals/mct/mpi-serial/protify.awk +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/awk -f - - -####################################################################### -# -# Because of awk problems on the sgi, this file is converted to perl -# via 'a2p' to yield 'protify'. Do not edit the perl version!!!! -# -####################################################################### - - -BEGIN { - - printf("\n"); - printf("/****************************************************** \n"); - printf(" * WARNING: This file automatically generated. * \n"); - printf(" ****************************************************** \n"); - printf(" */ \n"); - printf("\n\n\n\n"); -} - - -/[ \t]*extern/ { next } -/main\(/ { next } - -/FORT_NAME/ {next} - -# Ignore doctext comments -/\/\*[DMN@]/ { while (!match($0,/[DMN@]\*\//)) getline; next; } - - -/^[^ \t{}/*#].*[^ \t]+\(.*[^;]*$/ \ - { - if ($1=="static") - next; #continue; - - printf("extern %s",$0); - - while (!match($0,"\)")) - { - getline; - gsub("\t"," "); - printf("\n %s",$0); - } - printf(";\n"); - } diff --git a/src/externals/mct/mpi-serial/recv.c b/src/externals/mct/mpi-serial/recv.c deleted file mode 100644 index d70344a3765..00000000000 --- a/src/externals/mct/mpi-serial/recv.c +++ /dev/null @@ -1,164 +0,0 @@ - -#include "mpiP.h" - - - -/* - * RECEIVING - * - */ - - - -static int mpi_match_send(void *r, void *tag) -{ - return( *((int *)tag) == MPI_ANY_TAG || - *((int *)tag) == ((Req *)r)->tag ); -} - - - -/* - * - */ - - - -FC_FUNC( mpi_irecv , MPI_IRECV )(void *buf, int *count, int *datatype, - int *source, int *tag, int *comm, - int *request, int *ierror) -{ - - *ierror=MPI_Irecv(buf,*count,*datatype,*source,*tag, - *comm, (void *)request); - -} - - - -int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, - int source, int tag, MPI_Comm comm, MPI_Request *request) - -{ - pListitem match; - Comm *mycomm; - Req *rreq, *sreq; - - mycomm=mpi_handle_to_ptr(comm); /* mycomm=(Comm *)comm; */ - -#ifdef INFO - fflush(stdout); - fprintf(stderr,"MPI_Irecv: Comm=%d tag=%d count=%d type=%d\n", - mycomm->num,tag,count,datatype); -#endif - - - if (source!=0 && source!=MPI_ANY_SOURCE && source!=MPI_PROC_NULL) - { - fprintf(stderr,"MPI_Irecv: bad source %d\n",source); - abort(); - } - - mpi_alloc_handle(request,(void **)&rreq); - - if (source==MPI_PROC_NULL) - { - rreq->complete=1; - rreq->source=MPI_PROC_NULL; - rreq->tag=MPI_ANY_TAG; - - return(MPI_SUCCESS); - } - - - if ( match=AP_list_search_func(mycomm->sendlist,mpi_match_send,&tag) ) - { - sreq=(Req *)AP_listitem_data(match); - AP_list_delete_item(mycomm->sendlist,match); - -// memcpy(buf,sreq->buf,count * datatype); - copy_data2(sreq->buf, count, datatype, buf, count, datatype); - rreq->complete=1; - rreq->source=0; - rreq->tag=sreq->tag; /* in case tag was MPI_ANY_TAG */ - - sreq->complete=1; - -#ifdef DEBUG - printf("Completion(recv) value=%d tag=%d\n", - *((int *)buf),rreq->tag); -#endif - - return(MPI_SUCCESS); - } - - rreq->buf=buf; - rreq->tag=tag; - rreq->complete=0; - rreq->listitem=AP_list_append(mycomm->recvlist,rreq); - -#ifdef INFO - print_list(mycomm->recvlist,"recvlist for comm ",mycomm->num); -#endif - - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC( mpi_recv , MPI_RECV )(void *buf, int *count, int *datatype, - int *source, int *tag, int *comm, - int *status, int *ierror) -{ - *ierror=MPI_Recv(buf,*count,*datatype,*source,*tag,*comm, - mpi_c_status(status)); -} - - - -int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, - int tag, MPI_Comm comm, MPI_Status *status) -{ - MPI_Request request; - -#ifdef INFO - fflush(stdout); - fprintf(stderr,"MPI_Recv: "); -#endif - - - MPI_Irecv(buf,count,datatype,source,tag,comm,&request); - MPI_Wait(&request,status); - - if (status!=MPI_STATUS_IGNORE) - status->get_count = count; // rml: shouldn't this depend on send? - - return(MPI_SUCCESS); -} - - - -#ifdef INFO - -int print_item(void *item, void *data) -{ - fprintf(stderr,"%d ", ((Req *)item)->tag); - return(0); -} - - -int print_list(pList list, char *msg, int num) -{ - fflush(stdout); - fprintf(stderr,"%s %d: ",msg,num); - - AP_list_apply(list,print_item,NULL); - - fprintf(stderr,"\n"); - return(0); -} - - -#endif diff --git a/src/externals/mct/mpi-serial/req.c b/src/externals/mct/mpi-serial/req.c deleted file mode 100644 index 5cfa827fe5e..00000000000 --- a/src/externals/mct/mpi-serial/req.c +++ /dev/null @@ -1,301 +0,0 @@ -#include "mpiP.h" - - -/* - * COMPLETION - */ - - - -FC_FUNC( mpi_test , MPI_TEST)(int *request, int *flag, int *status, - int *ierror) -{ - *ierror=MPI_Test( (void *)request ,flag,mpi_c_status(status)); -} - - - -int MPI_Test(MPI_Request *request, int *flag, MPI_Status *status) -{ - Req *req; - - if (*request==MPI_REQUEST_NULL) - { - if (status!=MPI_STATUS_IGNORE) - { - status->MPI_TAG= MPI_ANY_TAG; - status->MPI_SOURCE= MPI_ANY_SOURCE; - } - *flag=1; - return(MPI_SUCCESS); - } - - - req=mpi_handle_to_ptr(*request); - - *flag=req->complete; - - if (*flag) - { - if (status!=MPI_STATUS_IGNORE) - { - status->MPI_SOURCE= req->source; - status->MPI_TAG= req->tag; - } - - mpi_free_handle(*request); - *request=MPI_REQUEST_NULL; - } - - return(MPI_SUCCESS); -} - - - -FC_FUNC( mpi_wait , MPI_WAIT )(int *request, int *status, int *ierror) -{ - *ierror=MPI_Wait( (void *)request, mpi_c_status(status) ); -} - - - -int MPI_Wait(MPI_Request *request, MPI_Status *status) -{ - int flag; - - MPI_Test(request,&flag,status); - - if (!flag) - { - fprintf(stderr,"MPI_Wait: request not complete, deadlock\n"); - abort(); - } - - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC( mpi_waitany , MPI_WAITANY )(int *count, int *requests, - int *index, int *status, int *ierror) -{ - - *ierror=MPI_Waitany(*count, (void *)requests,index,mpi_c_status(status)); -} - - - -int MPI_Waitany(int count, MPI_Request *array_of_requests, - int *index, MPI_Status *status) -{ - int flag; - - MPI_Testany(count, array_of_requests, index, &flag, status); - - if (!flag) - { - /* none are completed */ - - fprintf(stderr,"MPI_Waitany: no requests complete, deadlock\n"); - abort(); - - } - - return(MPI_SUCCESS); -} - -/* MPI_Testany: looks for any message matching an element - * in request array and returns its status. - * flag=0 means no match was found. - */ - -FC_FUNC(mpi_testany, MPI_TESTANY) - (int * count, int * array_of_requests, - int * index, int * flag, int *status, int * ierr) -{ - *ierr = MPI_Testany(*count, array_of_requests, index, - flag, mpi_c_status(status)); -} - -int MPI_Testany(int count, MPI_Request *array_of_requests, - int *index, int *flag, MPI_Status *status) -{ - int i; - - for (i=0; itag == MPI_ANY_TAG || - ((Req *)r)->tag == *((int *)tag) ); -} - - -/* - * - */ - - - -FC_FUNC( mpi_isend , MPI_ISEND )(void *buf, int *count, int *datatype, - int *dest, int *tag, int *comm, int *req, int *ierror) -{ - - *ierror=MPI_Isend(buf,*count,*datatype,*dest,*tag, - *comm, (void *)req); - -} - - - -int MPI_Isend(void *buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm, MPI_Request *request) -{ - pListitem match; - Comm *mycomm; - Req *rreq, *sreq; - - mycomm=mpi_handle_to_ptr(comm); /* (Comm *)comm; */ - -#ifdef INFO - fflush(stdout); - fprintf(stderr,"MPI_Isend: Comm=%d tag=%d count=%d type=%d\n", - mycomm->num,tag,count,datatype); -#endif - - if (dest!=0 && dest!=MPI_PROC_NULL) - { - fprintf(stderr,"MPI_Isend: send to %d\n",dest); - abort(); - } - - mpi_alloc_handle(request,(void **) &sreq); - - - if (dest==MPI_PROC_NULL) - { - sreq->complete=1; - return(MPI_SUCCESS); - } - - if ( match=AP_list_search_func(mycomm->recvlist,mpi_match_recv,&tag) ) - { - rreq=(Req *)AP_listitem_data(match); - AP_list_delete_item(mycomm->recvlist,match); - -// memcpy(rreq->buf,buf,count * datatype); - copy_data2(buf, count, datatype, rreq->buf, count, datatype); - rreq->complete=1; - rreq->source=0; - rreq->tag=tag; /* in case rreq->tag was MPI_ANY_TAG */ - - sreq->complete=1; - -#ifdef DEBUG - printf("Completion(send) value=%d tag=%d\n", - *((int *)buf),rreq->tag); -#endif - - return(MPI_SUCCESS); - } - - sreq->buf=buf; - sreq->tag=tag; - sreq->complete=0; - sreq->listitem=AP_list_append(mycomm->sendlist,sreq); - -#ifdef INFO - print_list(mycomm->sendlist,"sendlist for comm ",mycomm->num); -#endif - - return(MPI_SUCCESS); -} - - -/*********/ - - -FC_FUNC(mpi_send, MPI_SEND) ( void *buf, int *count, int *datatype, - int *dest, int *tag, int *comm, int *ierror) -{ - *ierror=MPI_Send(buf, *count, *datatype, *dest, *tag, *comm); -} - - - -int MPI_Send(void* buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm) -{ - MPI_Request request; - MPI_Status status; - -#ifdef INFO - fflush(stdout); - fprintf(stderr,"MPI_Send: "); -#endif - - MPI_Isend(buf,count,datatype,dest,tag,comm,&request); - MPI_Wait(&request,&status); - - - return(MPI_SUCCESS); -} - - - - -/*********/ - - -FC_FUNC(mpi_ssend, MPI_SSEND) ( void *buf, int *count, int *datatype, - int *dest, int *tag, int *comm, int *ierror) -{ - *ierror=MPI_Send(buf, *count, *datatype, *dest, *tag, *comm); -} - - - -int MPI_Ssend(void* buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm) -{ - return(MPI_Send(buf,count,datatype,dest,tag,comm)); -} - - - -/*********/ - - -FC_FUNC(mpi_rsend, MPI_RSEND) ( void *buf, int *count, int *datatype, - int *dest, int *tag, int *comm, int *ierror) -{ - *ierror=MPI_Send(buf, *count, *datatype, *dest, *tag, *comm); -} - - - -int MPI_Rsend(void* buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm) -{ - return(MPI_Send(buf,count,datatype,dest,tag,comm)); -} - - - - -/*********/ - - - -FC_FUNC( mpi_irsend , MPI_IRSEND )(void *buf, int *count, int *datatype, - int *dest, int *tag, int *comm, int *req, int *ierror) -{ - - *ierror=MPI_Irsend(buf,*count,*datatype,*dest,*tag, - *comm, (void *)req); - -} - - - -int MPI_Irsend(void *buf, int count, MPI_Datatype datatype, - int dest, int tag, MPI_Comm comm, MPI_Request *request) -{ - MPI_Status status; - Req *req; - - - MPI_Isend(buf,count,datatype,dest,tag,comm,request); - - /* Ready mode implied a receive must already be posted, - * so the Isend should have completed already. - * Can't use MPI_Test here for the error check because - * it would clear the request prematurely. - */ - - req=mpi_handle_to_ptr(*request); - if ( !req->complete ) - { - fprintf(stderr,"MPI_Irsend: no matching receive found\n"); - abort(); - } - - - return(MPI_SUCCESS); -} - - - - -/*********/ - - -FC_FUNC(mpi_sendrecv, MPI_SENDRECV) ( - void *sendbuf, int *sendcount, int *sendtype, int *dest, int *sendtag, - void *recvbuf, int *recvcount, int *recvtype, int *source, int *recvtag, - int *comm, int *status, - int *ierror) -{ - *ierror=MPI_Sendrecv(sendbuf, *sendcount, *sendtype, *dest, *sendtag, - recvbuf, *recvcount, *recvtype, *source, *recvtag, - *comm, mpi_c_status(status)); -} - - - -int MPI_Sendrecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, - int dest, int sendtag, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - int source, int recvtag, - MPI_Comm comm, MPI_Status *status) -{ - MPI_Request request; - - - MPI_Irecv(recvbuf, recvcount, recvtype, source, recvtag, comm, &request); - - MPI_Send(sendbuf, sendcount, sendtype, dest, sendtag, comm); - - MPI_Wait(&request,status); - - - return(MPI_SUCCESS); -} - - - diff --git a/src/externals/mct/mpi-serial/tests/.gitignore b/src/externals/mct/mpi-serial/tests/.gitignore deleted file mode 100644 index 2037e022f76..00000000000 --- a/src/externals/mct/mpi-serial/tests/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -ctest -ctest2 -ftest -ftest2 diff --git a/src/externals/mct/mpi-serial/tests/Makefile b/src/externals/mct/mpi-serial/tests/Makefile deleted file mode 100644 index c03c1fe9d96..00000000000 --- a/src/externals/mct/mpi-serial/tests/Makefile +++ /dev/null @@ -1,41 +0,0 @@ -############################### - -# -# test programs Makefile -# - -# Parent dir Makefile.conf has all necessary vars -include ../Makefile.conf - -TINC = -I.. -LDFLAGS = -L.. -MYLIBS = $(LIBS) -l$(MODULE) -MYF90FLAGS=$(INCPATH) $(DEFS) $(FCFLAGS) $(MPEUFLAGS) - -runtests: all - ./ctest - @echo - ./ftest - @echo - -all: ctest ftest - -ctest: ctest.c - $(CC) $(DEFS) $(TINC) $(ALLCFLAGS) -o $@ ctest.c $(LDFLAGS) $(MYLIBS) - -ftest: ftest.F90 - $(FC) $(DEFS) $(TINC) $(MYF90FLAGS) -o $@ ftest.F90 $(LDFLAGS) $(MYLIBS) - -ctest2: ctest_old.c - $(CC) $(DEFS) $(TINC) $(ALLCFLAGS) -o $@ ctest_old.c $(LDFLAGS) $(MYLIBS) - -ftest2: ftest_old.F90 - $(FC) $(DEFS) $(TINC) $(MYF90FLAGS) -o $@ ftest_old.F90 $(LDFLAGS) $(MYLIBS) - -stest: stest.F90 stest2.o - $(FC) $(DEFS) $(TINC) $(MYF90FLAGS) -o $@ stest.F90 stest2.o $(LDFLAGS) $(MYLIBS) - - -clean: - rm -f ctest ftest ctest2 ftest2 - rm -f *.o diff --git a/src/externals/mct/mpi-serial/tests/ctest.c b/src/externals/mct/mpi-serial/tests/ctest.c deleted file mode 100644 index 4a9b50abb64..00000000000 --- a/src/externals/mct/mpi-serial/tests/ctest.c +++ /dev/null @@ -1,967 +0,0 @@ -#include -#include -#include - -#ifdef HAVE_CONFIG_H -#include -#endif - -#ifdef TEST_INTERNAL -#include -#include -#else -MPI_Request req; -#endif - - -int errcount = 0; -//simplest example: contiguous -// type of 5 MPI_INT - -void test_simple_contig() -{ - int i; - int a [5] = {1, 2, 3, 4, 5}; - int b [5]; - MPI_Datatype contig_type; - - //Contiguous type of simple types - printf("\nContiguous type of 5 x MPI_INT\n"); - MPI_Type_contiguous(5, MPI_INT, &contig_type); - MPI_Type_commit(&contig_type); - -#ifdef TEST_INTERNAL - print_typemap(contig_type); - copy_data(&a, &b, contig_type); -#else - MPI_Isend(&a, 1, contig_type, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&b, 1, contig_type, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD,&req); -#endif - - printf("a = ["); - for (i = 0; i < 5; i++) - printf("%d ", a[i]); - printf("]\n"); - - printf("b = ["); - for (i = 0; i < 5; i++) - printf("%d ", b[i]); - printf("]\n"); - - for (i = 0; i < 5; i++) - if (a[i]!=b[i]) - { - printf(">>>FAILED: test_simple_contig\n"); - errcount++; - return; - } -} - -// vector type of MPI_INTs - -void test_simple_vector() -{ - int i; - int a[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - int b[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - int index_test []= {0, 1, 3, 4, 6, 7}; - MPI_Datatype vector_type; - - //Vector type of simple types - printf("\nVector type of 3 groups of 2 MPI_INT, stride of 3.\n"); - MPI_Type_vector(3, 2, 3, MPI_INT, &vector_type); - MPI_Type_commit(&vector_type); - -#ifdef TEST_INTERNAL - print_typemap(vector_type); - copy_data(&a, &b, vector_type); -#else - MPI_Isend(&a, 1, vector_type, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&b, 1, vector_type, 0, 0, MPI_COMM_WORLD, &req); -#endif - - printf("a = ["); - for (i = 0; i < 10; i++) - printf("%d ", a[i]); - printf("]\n"); - - printf("b = ["); - for (i = 0; i < 10; i++) - printf("%d ", b[i]); - printf("]\n"); - - for (i = 0; i < 6; i++) - if (a[index_test[i]]!=b[index_test[i]]) - { - printf(">>>FAILED: test_simple_vector\n"); - errcount++; - return; - } -} -//vector type (byte addressed, using -// sizeof(int) to compute stride - -void test_simple_hvector() -{ - MPI_Datatype vector_type; - int i; - int a[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - int b[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - int index_test [6] = {0, 1, 4, 5, 8, 9}; - //Vector (byte-addressed) of simple types - printf("\nVector type of 3 groups of 2 MPI_INT, stride of 16 bytes.\n"); - MPI_Type_hvector(3, 2, 4*sizeof(int), MPI_INT, &vector_type); - MPI_Type_commit(&vector_type); - -#ifdef TEST_INTERNAL - print_typemap(vector_type); - copy_data(&a, &b, vector_type); -#else - MPI_Isend(&a, 1, vector_type, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&b, 1, vector_type, 0, 0, MPI_COMM_WORLD, &req); -#endif - - printf("a = ["); - for (i = 0; i < 10; i++) - printf("%d ", a[i]); - printf("]\n"); - - printf("b = ["); - for (i = 0; i < 10; i++) - printf("%d ", b[i]); - printf("]\n"); - - for (i = 0; i < 6; i++) - if (a[index_test[i]]!=b[index_test[i]]) - { - printf(">>>FAILED: test_simple_hvector\n"); - errcount++; - return; - } -} - -//indexed type. - -void test_simple_indexed() -{ - int i; - int a[15] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; - int b[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - int index_test [6] = {0, 5, 6, 10, 11, 12}; - int blens[3] = {2, 1, 3}; - int disps[3] = {5, 0, 10}; - MPI_Datatype indexed_type; - //Indexed of simple types - - printf("\nIndexed type of MPI_INT.\n"); - - MPI_Type_indexed(3, blens, disps, MPI_INT, &indexed_type); - MPI_Type_commit(&indexed_type); - -#ifdef TEST_INTERNAL - print_typemap(indexed_type); - copy_data(&a, &b, indexed_type); -#else - MPI_Isend(&a, 1, indexed_type, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&b, 1, indexed_type, 0, 0, MPI_COMM_WORLD, &req); -#endif - - printf("a = ["); - for (i = 0; i < 15; i++) - printf("%d ", a[i]); - printf("]\n"); - - printf("b = ["); - for (i = 0; i < 15; i++) - printf("%d ", b[i]); - printf("]\n"); - - for (i = 0; i < 6; i++) - if (a[index_test[i]]!=b[index_test[i]]) - { - printf(">>>FAILED: test_simple_indexed\n"); - errcount++; - return; - } -} - -//block indexed. Same as indexed except -//static block length - -void test_simple_bindexed() -{ - int i; - int disps[3] = {0, 4, 7}; - int a [10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - int b [10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - int index_test[6] = {0, 1, 4, 5, 7, 8}; - MPI_Datatype indexed_type; - - //block indexed of simple types - printf("\nBlock indexed type of MPI_INT.\n"); - MPI_Type_create_indexed_block(3, 2, disps, MPI_INT, &indexed_type); - MPI_Type_commit(&indexed_type); -#ifdef TEST_INTERNAL - copy_data(&a, &b, indexed_type); - print_typemap(indexed_type); -#else - MPI_Isend(&a, 1, indexed_type, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&b, 1, indexed_type, 0, 0, MPI_COMM_WORLD, &req); -#endif - - printf("a = ["); - for (i = 0; i < 10; i++) - printf("%d ", a[i]); - printf("]\n"); - - printf("b = ["); - for (i = 0; i < 10; i++) - printf("%d ", b[i]); - printf("]\n"); - - for (i = 0; i < 6; i++) - if (a[index_test[i]]!=b[index_test[i]]) - { - printf(">>>FAILED: test_simple_bindexed\n"); - errcount++; - return; - } -} - -//hindexed: same as indexed, but -//using byte displacements based off of sizeof(int) -//(no reason why this shouldn't work) - -void test_simple_hindexed() -{ - int i; - int a [10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - int b [10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - int index_test [6] = {0, 2, 3, 5, 6, 7}; - int blens[3] = {2, 1, 3}; - MPI_Aint disps[3] = {2*sizeof(int), 0, 5*sizeof(int)}; - MPI_Datatype indexed_type; - -//Indexed (byte-addressed) of simple types - printf("\nBlock indexed (byte addressed) type of MPI_INT.\n"); - MPI_Type_hindexed(3, blens, disps, MPI_INT, &indexed_type); - MPI_Type_commit(&indexed_type); -#ifdef TEST_INTERNAL - print_typemap(indexed_type); - copy_data(&a, &b, indexed_type); -#else - MPI_Isend(&a, 1, indexed_type, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&b, 1, indexed_type, 0, 0, MPI_COMM_WORLD, &req); -#endif - - printf("a = ["); - for (i = 0; i < 10; i++) - printf("%d ", a[i]); - printf("]\n"); - - printf("b = ["); - for (i = 0; i < 10; i++) - printf("%d ", b[i]); - printf("]\n"); - - for (i = 0; i < 6; i++) - if (a[index_test[i]]!=b[index_test[i]]) - { - printf(">>>FAILED: test_simple_hindexed\n"); - errcount++; - return; - } -} - - -/* - * void struct_test() -{ - int blocklengths[6]; - int offsets[6]; - MPI_Aint boffsets[6]; - MPI_Datatype types[6]; - MPI_Datatype struct_type, newtype, newtype2, sstruct, - indexed_type, vector_type; - MPI_Aint extent2, extent3; - //struct type of simple types - printf("\nStruct of simple types\n"); - blocklengths[0] = 3; - blocklengths[1] = 5; - blocklengths[2] = 2; - blocklengths[3] = 1; - boffsets[0] = 0; - boffsets[1] = 24; - boffsets[2] = 32; - boffsets[3] = 40; - types[0] = MPI_DOUBLE; - types[1] = MPI_CHAR; - types[2] = MPI_INT; - types[3] = MPI_LONG_DOUBLE; - - MPI_Type_struct(4, blocklengths, boffsets, types, &struct_type); - print_typemap(struct_type); - - //struct type of simple types, with artificial LB and UB - printf("\nStruct type of simple types, with LB and UB.\n"); - blocklengths[0] = 2; - blocklengths[1] = 4; - blocklengths[2] = 1; - blocklengths[3] = 24; - blocklengths[4] = 1; - boffsets[0] = 0; - boffsets[1] = 40; - boffsets[2] = 80; - boffsets[3] = 48; - boffsets[4] = -8; - types[0] = MPI_LONG; - types[1] = MPI_INT; - types[2] = MPI_UB; - types[3] = MPI_CHAR; - types[4] = MPI_LB; - - MPI_Type_struct(5, blocklengths, boffsets, types, &newtype2); - print_typemap(newtype2); - - //struct type: 2 int, 1 float - printf("\nSimple struct for use: 2 int, 1 float\n"); - blocklengths[0] = 2; - blocklengths[1] = 1; - boffsets[0] = 0; - boffsets[1] = 8; - types[0] = MPI_INT; - types[1] = MPI_FLOAT; - - MPI_Type_struct(2, blocklengths, boffsets, types, &sstruct); - print_typemap(sstruct); - - //contiguous type of complex (struct) type - printf("\nContiguous type of complex (struct) type\n"); - MPI_Type_contiguous(3, newtype2, &newtype); - print_typemap(newtype); - - //vector type of complex type - printf("\nVector type of struct\n"); - MPI_Type_vector(3, 2, 2, struct_type, &vector_type); - print_typemap(vector_type); - - //indexed of complex type - printf("\nIndexed type of struct\n"); - blocklengths[0] = 1; - blocklengths[1] = 2; - offsets[0] = 0; - offsets[1] = 7; - MPI_Type_indexed(2, blocklengths, offsets, sstruct, &indexed_type); - print_typemap(indexed_type); - - //struct of simple/complex - printf("\nStruct of smaller structs and simple types\n"); - MPI_Type_extent(sstruct, &extent2); - MPI_Type_extent(indexed_type, &extent3); - blocklengths[0] = 2; - blocklengths[1] = 1; - blocklengths[2] = 4; - blocklengths[3] = 5; - boffsets[0] = 0; - boffsets[1] = 2 * extent2; - boffsets[2] = boffsets[1] + extent3; - boffsets[3] = boffsets[2] + 4; - types[0] = sstruct; - types[1] = indexed_type; - types[2] = MPI_CHAR; - types[3] = newtype2; - - MPI_Type_struct(4, blocklengths, boffsets, types, &struct_type); - print_typemap(struct_type); -} -*/ - -//simple struct, comprised of an int, 2 chars -// and a long int value. - -void test_simple_struct() -{ - struct {int a; char b; char c; long d; } s1; - struct {int a; char b; char c; long d; } s2; - - int blens[4] = {1, 2, 1}; - MPI_Aint disps[4] = {0, 4, 8}; - MPI_Datatype types[4] = {MPI_INT, MPI_CHAR, MPI_LONG}; - MPI_Datatype struct_type; - - printf("\nSimple struct type: 1 int, 2 char, 1 long\n"); - MPI_Type_struct(3, blens, disps, types, &struct_type); - MPI_Type_commit(&struct_type); - s1.a = 10; - s1.b = 'x'; - s1.c = 'a'; - s1.d = 3000; - -#ifdef TEST_INTERNAL - print_typemap(struct_type); - copy_data(&s1, &s2, struct_type); -#else - MPI_Isend(&s1, 1, struct_type, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&s2, 1, struct_type, 0, 0, MPI_COMM_WORLD, &req); -#endif - - if (!(s1.a==s2.a && s1.b==s2.b && s1.c==s2.c && s1.d==s2.d)) - { - printf(">>>FAILED: test_simple_struct\n"); - errcount++; - return; - } -} - -// combine one struct into another struct for a complex -// type. This should test any funny padding issues - -void test_complex_struct() -{ - MPI_Datatype sstruct; - typedef struct {long a; long b; char c; int d; int e;} st; - typedef struct {st a; int b; char c;} st2; - st s1 = {.a = 100, .b = 200, .c = 'x', .d = 45, .e = 50}; - st s2; - st2 s3 = {.a = { .a = 40, .b = 100, .c = 'x', .d = 50, .e = 20}, .b = 100, .c = 'g'} ; - st2 s4; - int blens[3] = {2, 2, 1}; - MPI_Aint disps[3] = {0, 2*sizeof(long) + sizeof(int), 2*sizeof(long)}; - MPI_Datatype types[3] = {MPI_LONG, MPI_INT, MPI_CHAR}; - MPI_Datatype newtype; - - - printf("\nSimple struct to create complex struct\n"); - MPI_Type_struct(3, blens, disps, types, &newtype); - MPI_Type_commit(&newtype); -#ifdef TEST_INTERNAL - print_typemap(newtype); - copy_data(&s1, &s2, newtype); -#else - MPI_Isend(&s1, 1, newtype, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&s2, 1, newtype, 0, 0, MPI_COMM_WORLD, &req); -#endif - - if (!(s1.a==s2.a && s1.b==s2.b && s1.c==s2.c && s1.d==s2.d && s1.e==s2.e)) - { - printf(">>>FAILED: test_complex_struct\n"); - errcount++; - return; - } - MPI_Datatype newtype2; - - blens[0] = 1; - blens[1] = 1; - blens[2] = 1; - disps[0] = 0; - disps[1] = sizeof(st); - disps[2] = sizeof(st) + sizeof(int); - types[0] = newtype; - types[1] = MPI_INT; - types[2] = MPI_CHAR; - - printf("\nComplex struct type composed of other struct.\n"); - MPI_Type_struct(3, blens, disps, types, &newtype2); - MPI_Type_commit(&newtype2); -#ifdef TEST_INTERNAL - print_typemap(newtype2); - copy_data(&s3, &s4, newtype2); -#else - MPI_Isend(&s3, 1, newtype2, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&s4, 1, newtype2, 0, 0, MPI_COMM_WORLD, &req); -#endif - - if (!(s3.a.a==s4.a.a && s3.a.b==s4.a.b && s3.a.c==s4.a.c && s3.b==s4.b && s3.c==s4.c)) - { - printf(">>>FAILED: test_complex_struct\n"); - errcount++; - return; - } -} - -// Indexed struct. This one is a bit complicated -// as to datatype layout, so it will also test the -// padding issue - -void test_indexed_struct() -{ - int i; - - //simple struct vars - int s_blens[4] = {1,1,1,2}; - MPI_Aint s_disps[4]; - MPI_Datatype s_types[4] = {MPI_CHAR, MPI_LONG, - MPI_CHAR, MPI_INT}; - MPI_Datatype s_struct; - int i_blens[3] = {3, 1, 2}; - int i_disps[3] = {0, 5, 7}; - MPI_Datatype i_struct_indexed; - int index_test [6] = {0,1,2,5,7,8}; - char* sadd; - typedef struct - {char a; long b; char c; int d; int e;} - struct_t; - - struct_t send[10]; - struct_t recv[10]; - - //initialize the structs - for (i = 0; i < 10; i++) - { - send[i].a = i; - send[i].b = 2*i; - send[i].c = 'A' + i; - send[i].d = i; - send[i].e =-i; - recv[i].a=0; - recv[i].b=0; - recv[i].c=' '; - recv[i].d=0; - recv[i].e=0; - } - - //set the displacements by using address differences - sadd = (char *)&send[0]; - s_disps[0] = (char*)&(send[0].a) - sadd; - s_disps[1] = (char*)&(send[0].b) - sadd; - s_disps[2] = (char*)&(send[0].c) - sadd; - s_disps[3] = (char*)&(send[0].d) - sadd; - //e is "contiguous" of d - - - MPI_Type_struct(4, s_blens, s_disps, s_types, &s_struct); - MPI_Type_commit(&s_struct); -#ifdef TEST_INTERNAL - print_typemap(s_struct); -#endif - - //now, create an indexed type of this struct - MPI_Type_indexed(3, i_blens, i_disps, - s_struct, &i_struct_indexed); - MPI_Type_commit(&i_struct_indexed); - -#ifdef TEST_INTERNAL - print_typemap(i_struct_indexed); - copy_data2(send, 1, i_struct_indexed, recv, 1, i_struct_indexed); -#else - MPI_Isend(&send, 1, i_struct_indexed, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&recv, 1, i_struct_indexed, 0, 0, MPI_COMM_WORLD, &req); -#endif - - for (i = 0; i < 6; i++) - { - if (!(send[index_test[i]].a==recv[index_test[i]].a - && send[index_test[i]].b==recv[index_test[i]].b - && send[index_test[i]].c==recv[index_test[i]].c - && send[index_test[i]].d==recv[index_test[i]].d - && send[index_test[i]].e==recv[index_test[i]].e)) - { - printf(">>>FAILED: test_indexed_struct\n"); - errcount++; - return; - } - } - - //to make things really interesting, let's send as the - //indexed type, and receive instead as _count_ - //consecutive struct types -#ifdef TEST_INTERNAL - copy_data2(send, 1, i_struct_indexed, recv, 6, s_struct); -#else - MPI_Gather(&send, 1, i_struct_indexed, &recv, - 6, s_struct, 0, MPI_COMM_WORLD); - -// MPI_Isend(&send, 1, i_struct_indexed, 0, 0, MPI_COMM_WORLD, &req); -// MPI_Irecv(&recv, 6, s_struct, 0, 0, MPI_COMM_WORLD, &req); - -#endif - - for (i = 0; i < 6; i++) - { - if (!(send[index_test[i]].a==recv[i].a - && send[index_test[i]].b==recv[i].b - && send[index_test[i]].c==recv[i].c - && send[index_test[i]].d==recv[i].d - && send[index_test[i]].e==recv[i].e)) - { - printf(">>>FAILED: test_indexed_struct (multiple recv)\n"); - errcount++; - return; - } - } - -} - - -//test a differing issue with send/receive -//A contiguous type of 5 MPI_INTs is sent, and is -//received using a receive x5 of MPI_INT - -void test_multiple() -{ - int i; - int a[5] = {1, 2, 3, 4, 5}; - int b[5] = {0, 0, 0, 0, 0}; - - - - MPI_Datatype contig5int; - - printf("\nSend contiguous of 5 MPI_INT, receive 5 x MPI_INT\n"); - MPI_Type_contiguous(5, MPI_INT, &contig5int); - MPI_Type_commit(&contig5int); - -#ifdef TEST_INTERNAL - copy_data2(&a, 5, MPI_INT, &b, 1, contig5int); -#else - MPI_Isend(&a, 5, MPI_INT, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&b, 1, contig5int, 0, 0, MPI_COMM_WORLD, &req); -#endif - - - printf("a = ["); - for (i = 0; i < 5; i++) - printf("%d ", a[i]); - printf("]\n"); - - printf("b = ["); - for (i = 0; i < 5; i++) - printf("%d ", b[i]); - printf("]\n"); - - for (i = 0; i < 5; i++) - if (a[i]!=b[i]) - { - printf(">>>FAILED: test_multiple\n"); - errcount++; - return; - } -} - -void test_multiple_struct() -{ - int i; - typedef struct {int a; double b; char c;} struct_t; - struct_t s1[5],s2[5]; - MPI_Aint disps[3]; - int blens[3] = {1,1,1}; - MPI_Datatype types[3] = {MPI_INT, MPI_DOUBLE, MPI_CHAR}; - MPI_Datatype struct_type, contig_struct; - - disps[0] = 0; - disps[1] = (char*) &(s1[0].b) - (char*) &s1[0]; - disps[2] = (char*) &(s1[0].c) - (char*) &s1[0]; - - for (i=0; i<5; i++) - { - s1[i].a=i; s1[i].b=i+15.0; s1[i].c='a'+i; - s2[i].a=0; s2[i].b=0.0 ; s2[i].c=0 ; - } - - MPI_Type_struct(3, blens, disps, types, &struct_type); - MPI_Type_commit(&struct_type); - MPI_Type_contiguous(5, struct_type, &contig_struct); - MPI_Type_commit(&contig_struct); - printf("\nSend contiguous of 5 struct, receive 5x struct\n"); - -#ifdef TEST_INTERNAL - copy_data2(&s1, 1, contig_struct, &s2, 5, struct_type); -#else - MPI_Isend(&s1, 1, contig_struct, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&s2, 5, struct_type, 0, 0, MPI_COMM_WORLD, &req); -#endif - - for (i = 0; i < 5; i++) - if (!(s1[i].a == s2[i].a && s1[i].b == s2[i].b && s1[i].c == s2[i].c)) - { - printf(">>>FAILED: test_multiple_struct\n"); - errcount++; - return; - } -} - -// packed type. Pack some arbitrary simple -// values into a buffer and copy. - -void test_packed() -{ - int SIZE = 77; - int i = 8; - char c[] = "abcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyabcdefghijklmnopqrstuvwxyzabcdefg\0"; - int j; - double k = 0.234234, l; - char d[SIZE]; - char buffer[110]; - char recv[110]; - int position = 0; - - printf("\nSimple packed type (int, char, double)\n"); - c[SIZE-1] = '\0'; - MPI_Pack(&i, 1, MPI_INT, buffer, 110, &position, MPI_COMM_WORLD); - MPI_Pack(c, SIZE, MPI_CHAR, buffer, 110, &position, MPI_COMM_WORLD); - MPI_Pack(&k, 1, MPI_DOUBLE, buffer, 110, &position, MPI_COMM_WORLD); -#ifdef TEST_INTERNAL - copy_data2(&buffer, position, MPI_PACKED, &recv, position, MPI_PACKED); -#else - MPI_Isend(&buffer, position, MPI_PACKED, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&recv, position, MPI_PACKED, 0, 0, MPI_COMM_WORLD,&req); -#endif - - position = 0; - - MPI_Unpack(&recv, 110, &position, &j, 1, MPI_INT, MPI_COMM_WORLD); - MPI_Unpack(&recv, 110, &position, d, SIZE, MPI_CHAR, MPI_COMM_WORLD); - MPI_Unpack(&recv, 110, &position, &l, 1, MPI_DOUBLE, MPI_COMM_WORLD); - - if (!(i==j && k==l)) - { - printf(">>>FAILED: test_packed\n"); - errcount++; - return; - } -} - -// Complex pack. Includes struct types that are packed - -void test_packed_complex() -{ - struct {int a; char b; char c; long d; } s1; - struct {int a; char b; char c; long d; } s2; - - MPI_Aint size; - int pos = 0; - int x = 10, y; - float f = 0.345, g; - char buf[100]; - char rbuf[100]; - int blens[3] = {1, 1, 1}; - MPI_Aint disps[3]; - MPI_Datatype types[3] = {MPI_INT, MPI_CHAR, MPI_LONG}; - MPI_Datatype struct_type; - - disps[0] = 0; - disps[1] = (char*) &s1.b - (char*)&s1.a; - disps[2] = (char*) &s1.d - (char*)&s1.a; - - printf("\nComplex packed type\n"); - - MPI_Type_struct(3, blens, disps, types, &struct_type); - s1.a = 10; - s1.b = 'x'; - s1.c = 'a'; - s1.d = 3000; - - MPI_Pack_size(1, struct_type,MPI_COMM_WORLD, &size); - MPI_Pack(&x, 1, MPI_INT, buf, 100, &pos, MPI_COMM_WORLD); - MPI_Pack(&s1, 1, struct_type, buf, 100, &pos, MPI_COMM_WORLD); - MPI_Pack(&f, 1, MPI_FLOAT, buf, 100, &pos, MPI_COMM_WORLD); - -#ifdef TEST_INTERNAL - copy_data2(&buf, pos, MPI_PACKED, &rbuf, pos, MPI_PACKED); -#else - MPI_Isend(&buf, pos, MPI_PACKED, 0, 0, MPI_COMM_WORLD, &req); - MPI_Irecv(&rbuf, pos, MPI_PACKED, 0, 0, MPI_COMM_WORLD,&req); -#endif - - pos = 0; - MPI_Unpack(&rbuf, 100, &pos, &y, 1, MPI_INT, 0); - MPI_Unpack(&rbuf, 100, &pos, &s2, 1, struct_type, 0); - MPI_Unpack(&rbuf, 100, &pos, &g, 1, MPI_FLOAT, 0); - - if (!(s1.a==s2.a && s1.b==s2.b /*&& s1.c==s2.c*/ && s1.d==s2.d && x == y && f == g)) - { - printf(">>>FAILED: test_packed_complex\n"); - errcount++; - return; - } - -} - -//Macro used in test_collectives -#define test_eq(s1, s2, op) { \ - printf("testing %s\n",op); \ - if (!(s1.a == s2.a && s1.b == s2.b && \ - s1.c == s2.c && s1.d == s2.d)) {\ - errcount++; \ - printf(">>>FAILED: test_collectives: %s\n", op); \ - } \ -} - -void test_collectives() -{ - typedef struct {int a; int b; double c; long d;} struct_t; - MPI_Datatype struct_type; - struct_t s1 = {.a=1, .b=2, .c=4.00, .d=100}, - s2 = {.a=0, .b=0, .c=0.00, .d=0 }; - MPI_Aint disps[3]; - - int disp = 0; - int sendcount = 1, recvcount = 1; - - - int blens[3] = {2,1,1}; - MPI_Datatype types[3] = {MPI_INT, MPI_DOUBLE, MPI_LONG}; - - disps[0] = 0; - disps[1] = (char*)&s1.c - (char*) &s1.a; - disps[2] = (char*)&s1.d - (char*) &s1.a; - - MPI_Type_struct(3, blens, disps, types, &struct_type); - MPI_Type_commit(&struct_type); - - MPI_Bcast(&s1, sendcount, struct_type, 0, MPI_COMM_WORLD); - MPI_Gather(&s1, sendcount, struct_type, &s2, recvcount, - struct_type, 0, MPI_COMM_WORLD); - test_eq(s1,s2,"MPI_Gather"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Gatherv(&s1, sendcount, struct_type, &s2, &recvcount, &disp, - struct_type, 0, MPI_COMM_WORLD); - test_eq(s1,s2,"MPI_Gatherv"); - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Allgather(&s1, sendcount, struct_type, &s2, recvcount, - struct_type, MPI_COMM_WORLD); - test_eq(s1,s2,"MPI_Allgather"); - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Allgatherv(&s1, sendcount, struct_type, &s2, &recvcount, &disp, - struct_type, MPI_COMM_WORLD); - test_eq(s1,s2,"MPI_Allgatherv"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Scatter(&s1, sendcount, struct_type, - &s2, recvcount, struct_type, - 0, MPI_COMM_WORLD); - test_eq(s1,s2,"MPI_Scatter"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Scatterv(&s1, &sendcount, &disp, struct_type, &s2, recvcount, - struct_type, 0, MPI_COMM_WORLD); - test_eq(s1,s2,"MPI_Scatterv"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Reduce(&s1, &s2, sendcount, struct_type, MPI_MAX, 0, MPI_COMM_WORLD); - test_eq(s1, s2, "MPI_Reduce"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Allreduce(&s1, &s2, sendcount, struct_type, MPI_MAX, MPI_COMM_WORLD); - test_eq(s1, s2, "MPI_Allreduce"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Alltoall(&s1, sendcount, struct_type, - &s2, recvcount, struct_type, MPI_COMM_WORLD); - test_eq(s1, s2, "MPI_Alltoall"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Alltoallv(&s1, &sendcount, &disp, struct_type, - &s2, &recvcount, &disp, struct_type, MPI_COMM_WORLD); - test_eq(s1, s2, "MPI_Alltoallv"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Reduce_scatter(&s1, &s2, &recvcount,struct_type, MPI_MAX, MPI_COMM_WORLD); - test_eq(s1, s2, "MPI_Reduce_scatter"); - - s2.a=0; s2.b=0; s2.c=0.00; s2.d=0; - MPI_Scan(&s1, &s2, sendcount,struct_type, MPI_MAX, MPI_COMM_WORLD); - test_eq(s1, s2, "MPI_Scan"); -} -/* -void vector_test() -{ - int c[3][2] = { {1, 2}, {3, 4}, {5, 6} }; - int d[3][2] = { {0, 0}, {0, 0}, {0, 0} }; - int i; - MPI_Datatype vector_type; - //test vector. First and third rows of array - printf("\nVector type of first and third rows in INT array\n"); - MPI_Type_vector(2, 2, 4, MPI_INT, &vector_type); - - print_typemap(vector_type); - - copy_data(&c, &d, vector_type); - - for (i = 0; i < 3; i++) - printf("%d %d\n", d[i][0], d[i][1]); -} - -void indexed_test() -{ - //we want the 2nd, 3rd, 5th, and 8th elements (starting at 0) - int i; - int a[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - int b[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - int blens[3] = {2, 1, 1}; - int disps[3] = {2, 5, 8}; - MPI_Datatype indexed_type; - - printf("\nIndexed: 2nd, 3rd, 5th, and 8th elements (0 base)\n"); - MPI_Type_indexed(3, blens, disps, MPI_INT, &indexed_type); - - print_typemap(indexed_type); - - copy_data(&a, &b, indexed_type); - - for (i = 0; i < 10; i++) - printf("%d ", b[i]); - printf("\n"); -} - -void structtests() -{ - int a[5] = {1, 2, 3, 4, 5}; - int b[5]; - - MPI_Datatype type, vector_type; - - //test contiguous - printf("\nContiguous type of 5 MPI_INT\n"); - MPI_Type_contiguous(5, MPI_INT, &type); - printf("Done.\n"); - fflush(stdout); - print_typemap(type); - copy_data(&a, &b, type); - printf("b = %d\n", a[4]); -} -*/ - -int main(int argc, char ** argv) -{ - char version[MPI_MAX_LIBRARY_VERSION_STRING]; - int vlen; - - MPI_Init(&argc, &argv); - - MPI_Get_library_version(version,&vlen); - printf("MPI version=\"%s\" (len=%d)\n",version,vlen); - -// structtests(); -// indexed_test(); -// struct_test(); - -// printf("\n\n---End of samples: Testing now---\n\n"); -#ifdef TEST_INTERNAL - printf("Using internal tests\n"); -#endif - test_simple_contig(); - test_simple_vector(); - test_simple_hvector(); - test_simple_indexed(); - test_simple_bindexed(); - test_simple_hindexed(); - test_simple_struct(); - test_complex_struct(); - test_indexed_struct(); - test_multiple(); - test_multiple_struct(); - test_packed(); - test_packed_complex(); - test_collectives(); - - MPI_Finalize(); - if (errcount) - printf("Found %d errors\n", errcount); - else - printf(">>>PASSED ALL TESTS. No errors. <<<\n"); - - return(errcount); -} - diff --git a/src/externals/mct/mpi-serial/tests/ctest_old.c b/src/externals/mct/mpi-serial/tests/ctest_old.c deleted file mode 100644 index e4ff3cb806f..00000000000 --- a/src/externals/mct/mpi-serial/tests/ctest_old.c +++ /dev/null @@ -1,181 +0,0 @@ - -#include -#include "mpi.h" - - - - - -main(int argc, char *argv[]) -{ - MPI_Request sreq[10], sreq2[10], rreq[10], rreq2[10]; - int sbuf[10],sbuf2[10],rbuf[10],rbuf2[10]; - int tag; - MPI_Status status[10]; - int i,j; - MPI_Comm comm2; - int flag; - MPI_Group mygroup; - char pname[MPI_MAX_PROCESSOR_NAME]; - int pnamelen; - - int position, temp; - int errcount = 0; - - printf("Time: %f\n",MPI_Wtime()); - - MPI_Initialized(&flag); - printf("MPI is initialized = %d\n",flag); - - MPI_Init(NULL,NULL); - - MPI_Get_processor_name(pname,&pnamelen); - printf("Processor name: %s (len=%d)\n",pname,pnamelen); - -#if 0 - MPI_Comm_dup(MPI_COMM_WORLD,&comm2); -#endif - -#if 0 - MPI_Comm_split(MPI_COMM_WORLD,42,99,&comm2); -#endif - -#if 1 - MPI_Comm_group(MPI_COMM_WORLD,&mygroup); - MPI_Comm_create(MPI_COMM_WORLD,mygroup,&comm2); -#endif - - MPI_Initialized(&flag); - printf("MPI is initialized = %d\n",flag); - - for (i=0; i<5; i++) - { - tag=100+i; - printf("COMWORLD Post ireceive tag %d\n",tag); - - MPI_Irecv(&rbuf[2*i],1,MPI_2INT, - 0,tag,MPI_COMM_WORLD,&rreq[i]); - - - } - - - - for (i=0; i<5; i++) - { - sbuf2[i]=1000+10*i; - tag=100+i; - printf("COM2 Post isend %d tag %d\n",sbuf2[i],tag); - MPI_Isend(&sbuf2[i],1,MPI_INT,0,tag,comm2,&sreq2[i]); - } - - - for (i=0; i<5; i++) - { - sbuf[2*i]=10*i; - sbuf[2*i+1]=10*i+1; - tag=100+(4-i); - printf("COMWORLD Post isend %d tag %d\n",sbuf[i],tag); - MPI_Isend(&sbuf[2*i],1,MPI_2INT,0,tag,MPI_COMM_WORLD,&sreq[i]); - } - - for (i=0; i < 5; i++) - { - if (sbuf[9-(2*i)] != rbuf[2*i+1] || sbuf[8-2*i] != rbuf[2*i]) - { - errcount++; - printf("Error for COMWORLD send\n"); - printf("buf[%d] = %d, rbuf= %d\n", i, sbuf[9-2*i], rbuf[2*i+1]); - printf("buf[%d] = %d, rbuf= %d\n", i, sbuf[8-2*i], rbuf[2*i]); - } - } - - printf("Time: %f\n",MPI_Wtime()); - MPI_Waitall(5,sreq,status); - MPI_Waitall(5,rreq,status); - - printf("Waiting for COMWORLD send/receives\n"); - - for (i=0; i<5; i++) - printf("tag %d rbuf= %d %d\n",status[i].MPI_TAG,rbuf[2*i],rbuf[2*i+1]); - - - for (i=0; i<5; i++) - { - tag=100+i; - printf("COM2 Post receive tag %d\n",tag); - - MPI_Irecv(&rbuf2[i],1,MPI_INT, - 0,tag,comm2,&rreq2[i]); - - if (rbuf2[i] != sbuf2[i]) - { - errcount++; - printf("Error for COM2 send %d\n", i); - printf("Found %d should be %d\n", rbuf2[i], sbuf2[i]); - } - } - - - MPI_Waitall(5,sreq2,status); - MPI_Waitall(5,rreq2,status); - - printf("Waiting for COM2 send/receive\n"); - - for (i=0; i<5; i++) - printf("tag %d rbuf= %d\n",status[i].MPI_TAG,rbuf2[i]); - - - /* - * pack/unpack - */ - - position=0; - for (i=0; i<5; i++) - { - temp=100+i; - MPI_Pack(&temp, 1, MPI_INT, sbuf, 20, &position, MPI_COMM_WORLD); - } - - MPI_Isend( sbuf, position, MPI_PACKED, 0, 0, MPI_COMM_WORLD,&sreq[0]); - - MPI_Irecv( rbuf, position, MPI_PACKED, 0, 0, MPI_COMM_WORLD, &rreq[0] ); - MPI_Waitall(1,rreq,status); - - printf("Pack/send/unpack: \n"); - - position=0; - for (i=0; i<5; i++) - { - MPI_Unpack(rbuf,20,&position,&temp,1,MPI_INT,MPI_COMM_WORLD); - printf("%d\n",temp); - } - - for (i=0; i<5; i++) - { - if (sbuf[i] != rbuf[i]) - { - errcount++; - printf("Error for pack/send/unpack\n"); - printf("Found %d should be %d\n", rbuf[i], sbuf[i]); - } - } - - MPI_Finalize(); - - - for (i=0; i<5; i++) - { - printf("Time: %f\n",MPI_Wtime()); - sleep(1); - } - - - if (errcount) - printf("Finished with %d errors.\n", errcount); - else - printf("No errors\n"); -} - - - diff --git a/src/externals/mct/mpi-serial/tests/ftest.F90 b/src/externals/mct/mpi-serial/tests/ftest.F90 deleted file mode 100644 index ef8681a35d1..00000000000 --- a/src/externals/mct/mpi-serial/tests/ftest.F90 +++ /dev/null @@ -1,709 +0,0 @@ -#ifdef HAVE_CONFIG_H -#include -#endif - - program test - use mpi - implicit none - integer ierr - integer ec - character*(MPI_MAX_LIBRARY_VERSION_STRING) version - integer vlen - - ec = 0 -#ifdef TEST_INTERNAL - print *, "Using internal tests" -#endif - - call mpi_init(ierr) - - call MPI_GET_LIBRARY_VERSION(version,vlen,ierr) - print *,"MPI Version '",version,"' len=",vlen - - call test_contiguous(ec) - call test_vector(ec) - call test_simple_hvector(ec) - call test_simple_indexed(ec) - call test_simple_bindexed(ec) - call test_simple_hindexed(ec) - call test_complex_indexed(ec) - call test_packed(ec) - call test_multiple(ec) - call test_multiple_indexed(ec) - call test_collectives(ec) - call test_mpi_version(ec) - - call mpi_finalize(ierr) - if (ec .eq. 0) then - print *, "PASSED ALL TESTS" - else - print *, "Errors:",ec - end if - stop - end - -!!!!!!!!!!!!!!!!!!! -! Contiguous type. Simplest example. Strings 5 -! integers together and tests their equality after -! a send operation -!!!!!!!!!!!!!!!!!!! - - subroutine test_contiguous(ec) - use mpi - integer ec - integer ierr - integer datatype - integer a(5) - integer b(5) - integer i - data a/1,2,3,4,5/ - data b/5 * 0/ - integer req - - print *, "Test Contiguous of 5 x MPI_INTEGER" - call mpi_type_contiguous(5, mpi_integer, datatype,ierr) - call mpi_type_commit(datatype, ierr) - -#ifdef TEST_INTERNAL - call copy_data2(a,1,datatype,b,1,datatype,ierr) -#else - call mpi_isend(a, 1, datatype, 0, 0, mpi_comm_world, req, ierr) - call mpi_irecv(b, 1, datatype, mpi_any_source, mpi_any_tag, & - mpi_comm_world, req, ierr) -#endif - - do i=1,5 - if (a(i) .ne. b(i)) then - print *,">>>FAILED: mpi_type_contiguous" - ec = ec+1 - return - end if - end do - - end - -!!!!!!!!!!!!!!!!!!!!!!!! -! Vector type. collect a series of indices with -! set stride from an array. -!!!!!!!!!!!!!!!!!!!!!!!! - - subroutine test_vector(ec) - use mpi - integer ec - integer ierr - integer datatype - integer a(10) != (1,2,3,4,5,6,7,8,9,0) - integer b(10) - integer check_index(6) - data a/1,2,3,4,5,6,7,8,9,10/ - data b/10 * 0/ - data check_index/1,2,4,5,7,8/ - integer i - integer req - - print *, "Test vector of MPI_INTEGER" - - call mpi_type_vector(3, 2, 3, mpi_integer, datatype, ierr) - call mpi_type_commit(datatype, ierr) -#ifdef TEST_INTERNAL - call copy_data2(a,1,datatype,b,1,datatype,ierr) -#else - call mpi_isend(a, 1, datatype, 0, 0, mpi_comm_world, req, ierr) - call mpi_irecv(b, 1, datatype, mpi_any_source, mpi_any_tag, & - mpi_comm_world, req, ierr) -#endif - do i=1,6 - if (a(check_index(i)) .ne. b(check_index(i))) then - print *,">>>FAILED: mpi_type_vector" - ec = ec+1 - return - end if - end do - end - -!!!!!!!!!!!!!!!!!!!!! -! Byte-addressed vector. -! values calculated with mpi_type_extent(), -! so basically we are doing the work here in the -! test program instead of in the library -!!!!!!!!!!!!!!!!!!!!! - - subroutine test_simple_hvector(ec) - use mpi - integer ec - integer vector_type - integer (kind=mpi_address_kind) extent - integer i - integer a(10) - integer b(10) - integer index_test(6) - integer ierr - integer req - - data a/1,2,3,4,5,6,7,8,9,10/, b/0,0,0,0,0,0,0,0,0,0/ - data index_test/1,2,5,6,9,10/ - - print *, "Vector type with stride 4 in bytes" - - call mpi_type_extent(mpi_integer, extent, ierr) - call mpi_type_hvector(3, 2, 4 * extent, mpi_integer, & - vector_type, ierr) - call mpi_type_commit(vector_type, ierr) -#ifdef TEST_INTERNAL - call copy_data2(a,1,vector_type, b,1,vector_type, ierr) -#else - call mpi_isend(a, 1, vector_type, 0, 0, mpi_comm_world,req,ierr) - call mpi_irecv(b, 1, vector_type, mpi_any_source, mpi_any_tag, & - mpi_comm_world, req, ierr) -#endif - do i=1,6 - if (a(index_test(i)) .ne. (b(index_test(i)))) then - print *, ">>>FAILED: test_simple_hvector" - ec = ec+1 - return - end if - end do - end subroutine - -!!!!!!!!!!!!!!!!!!!! -! indexed type. test certain indices of an array -!!!!!!!!!!!!!!!!!!!! - - subroutine test_simple_indexed(ec) - use mpi - integer ec - integer i - double complex a(15) - double complex b(15) - integer index_test(6) - integer blens(3) - integer disps(3) - integer indexed_type - integer ierr - integer req - - data a/1,2,3,4,5,6,7,8,9,10,11,12,13,14,15/ - data b/0,0,0,0,0,0,0,0,0,0,0,0,0,0,0/ - data index_test/1,6,7,11,12,13/ - data blens/2,1,3/ - data disps/5,0,10/ - print *, "Indexed type" - - call mpi_type_indexed(3, blens, disps, mpi_double_complex, & - indexed_type, ierr) - call mpi_type_commit(indexed_type, ierr) -#ifdef TEST_INTERNAL - call copy_data2(a,1,indexed_type,b,1,indexed_type,ierr) -#else - call mpi_isend(a, 1, indexed_type,0, 0, mpi_comm_world,req,ierr) - call mpi_irecv(b, 1, indexed_type, mpi_any_source, mpi_any_tag,& - mpi_comm_world, req, ierr) -#endif - - do i=1,6 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, ">>>FAILED: test_simple_indexed" - ec = ec+1 - return - end if - end do - end subroutine - -!!!!!!!!!!!!!!!! -! Block indexed. All blocks have same length -!!!!!!!!!!!!!!!! - - subroutine test_simple_bindexed(ec) - use mpi - integer ec - integer i - integer disps(3) - integer a(10), b(10) - integer index_test(6) - integer indexed_type - integer ierr - integer req - - data disps/0,4,7/ - data a/1,2,3,4,5,6,7,8,9,10/ - data b/0,0,0,0,0,0,0,0,0,0/ - data index_test/1,2,5,6,8,9/ - print *, "Block indexed type" - - call mpi_type_create_indexed_block(3,2,disps,mpi_integer, & - indexed_type, ierr) - call mpi_type_commit(indexed_type, ierr) -#ifdef TEST_INTERNAL - call copy_data2(a,1,indexed_type, b,1,indexed_type, ierr) -#else - call mpi_isend(a, 1, indexed_type,0, 0, mpi_comm_world,req,ierr) - call mpi_irecv(b, 1, indexed_type,mpi_any_source,mpi_any_tag, & - mpi_comm_world, req, ierr) -#endif - do i=1,6 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, ">>>FAILED:test_simple_bindexed" - ec = ec+1 - return - end if - end do - end subroutine - -!!!!!!!!!!!!!!!! -! test_simple_hindexed -! test equality of a byte-addressed -! type of integer array -! (disps calculated through mpi_type_extent() -!!!!!!!!!!!!!!! - subroutine test_simple_hindexed(ec) - use mpi - integer ec - integer i - integer a(10), b(10) - integer index_test(6) - integer blens(3) - integer(kind=mpi_address_kind) disps(3) - integer indexed_type - integer(kind=mpi_address_kind) extent - integer ierr - integer req - integer (kind=mpi_address_kind) addr, baddr - - data a/1,2,3,4,5,6,7,8,9,10/ - data b/0,0,0,0,0,0,0,0,0,0/ - data index_test/1,3,4,6,7,8/ - data blens/2,1,3/ - - call mpi_address(a(1), baddr,ierr) - call mpi_address(a(3), addr ,ierr) - disps(1) = addr - baddr - call mpi_address(a(6), addr, ierr) - disps(3) = addr - baddr -! call mpi_type_extent(mpi_integer, extent, ierr) -! disps(1) = 2*extent - disps(2) = 0 -! disps(3) = 5*extent - - - print *, "Byte addressed indexed type" - call mpi_type_hindexed(3,blens,disps, MPI_INTEGER, & - indexed_type,ierr) - call mpi_type_commit(indexed_type, ierr) -#ifdef TEST_INTERNAL - call copy_data2(a,1,indexed_type, b,1,indexed_type, ierr) -#else - call mpi_isend(a, 1, indexed_type,0, 0, mpi_comm_world,req,ierr) - call mpi_irecv(b, 1, indexed_type,mpi_any_source,mpi_any_tag, & - mpi_comm_world,req,ierr) -#endif - do i=1,6 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, ">>>FAILED: test_simple_hindexed" - ec = ec+1 - return - end if - end do - end subroutine - - subroutine test_complex_indexed(ec) - use mpi - integer ec - integer i - double precision a(72), b(72) - integer disps(3), blens(3) - integer cdisps(2), cblens(2) - integer index_test(8), cindex_test(3) - integer ierr - integer req - integer indexed_type, complex_indexed - - data blens/3,1,4/ - data disps/0,5,8/ - data cindex_test/1,4,5/ - data index_test/1,2,3, 6, 9,10,11,12/ - - data a/1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15, & - 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30, & - 31,32,33,34,35,36,37,38,39,40,41,42,43,44,45, & - 46,47,48,49,50,51,52,53,54,55,56,57,58,59,60, & - 61,62,63,64,65,66,67,68,69,70,71,72/ - data b/0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, & - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, & - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, & - 0,0,0,0,0,0,0,0,0,0,0,0/ - - call mpi_type_indexed(3,blens,disps, MPI_DOUBLE_PRECISION, & - indexed_type, ierr) - call mpi_type_commit(indexed_type, ierr) - - data cblens/1, 2/ - data cdisps/1, 4/ - call mpi_type_indexed(2,cblens,cdisps,indexed_type, & - complex_indexed, ierr) - call mpi_type_commit(complex_indexed, ierr) -#ifdef TEST_INTERNAL - call copy_data2(a,1,complex_indexed,b,1,complex_indexed,ierr) -#else - call mpi_isend(a,1,complex_indexed,0,0,mpi_comm_world,req,ierr) - call mpi_irecv(b,1,complex_indexed,mpi_any_source,mpi_any_tag,& - mpi_comm_world, req, ierr) -#endif - do i=1,3 - do j=1,8 - if (a(index_test(j)+12*cindex_test(i)) .ne. & - b(index_test(j)+12*cindex_test(i))) then - print *, ">>>FAILED: test_complex_indexed" - print *, "index ",index_test(j)+12*cindex_test(i) - print *, "Found:",b(index_test(j)+12*cindex_test(i)) - print *, "Should be:",a(index_test(j)+12*cindex_test(i)) - ec = ec+1 - end if - end do - end do - - call mpi_type_free(complex_indexed, ierr) - call mpi_type_free(indexed_type, ierr) - end subroutine -!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! test_packed() -! Creates a few variable pairs, assigns the first -! of each pair, then packs their values and unpacks -! them to the other set. -!!!!!!!!!!!!!!!!!!!!!!!!!!!! - subroutine test_packed(ec) - use mpi - integer ec - integer size - integer x, y - real f, g - complex c, d - character*5 a, b - character buf(100), rbuf(100) - integer blens(3) - integer(kind=mpi_address_kind) disps(3) - integer pos - integer req - - x = 10 - f = 14.333 - c = (100, 20) - a = "xyzab" - - pos = 0 - data blens/1,2,1/, disps/0,4,8/ - - print *, "Packed type " - - call mpi_pack(x, 1, mpi_integer, buf, 100, pos, 0, ierr) - call mpi_pack(f, 1, mpi_real, buf, 100, pos, 0, ierr) - call mpi_pack(c, 1, mpi_complex, buf, 100, pos, 0, ierr) - call mpi_pack(a, 5, mpi_character, buf, 100, pos, 0, ierr) -#ifdef TEST_INTERNAL - call copy_data2(buf, pos, mpi_packed, rbuf, pos, & - mpi_packed, ierr) -#else - call mpi_isend(buf, pos, mpi_packed,0,0,mpi_comm_world,req,ierr) - call mpi_irecv(rbuf, pos, mpi_packed,mpi_any_source,mpi_any_tag& - ,mpi_comm_world, req, ierr) -#endif - pos = 0; - - call mpi_unpack(rbuf, 100, pos, y, 1, mpi_integer, 0, ierr) - call mpi_unpack(rbuf, 100, pos, g, 1, mpi_real, 0, ierr) - call mpi_unpack(rbuf, 100, pos, d, 1, mpi_complex, 0, ierr) - call mpi_unpack(rbuf, 100, pos, b, 5, mpi_character, & - 0, ierr) - - if (x .ne. y .OR. f .ne. g & - .OR. c .ne. d .OR. a .ne. b) & - then - print *, ">>>FAILED: mpi_pack" - ec = ec+1 - return - end if - - end subroutine - - subroutine test_multiple(ec) - use mpi - integer ec - integer i - complex a(10) - complex b(10) - integer contig_type - integer ierr - integer req - - data a/1,2,3,4,5,6,7,8,9,10/ - data b/0,0,0,0,0,0,0,0,0,0/ - print *, "Contig type send, multiple receive" - - call mpi_type_contiguous(10, mpi_complex, contig_type, ierr) - call mpi_type_commit(contig_type, ierr) -#ifdef TEST_INTERNAL - call copy_data2(a,1,contig_type, b,10, mpi_complex, ierr) -#else - call mpi_isend(a, 1, contig_type,0,0,mpi_comm_world,req,ierr) - call mpi_irecv(b, 10, mpi_complex,mpi_any_source,mpi_any_tag, & - mpi_comm_world,req,ierr) -#endif - - do i=1,10 - if (a(i) .ne. b(i)) then - print *, ">>>FAILED: test_multiple" - ec = ec+1 - return - end if - end do - end subroutine - -!!!!!!!!!!!!!!!!!!!!!!!!! -! Test an indexed send with a multiple receive -!!!!!!!!!!!!!!!!!!!!!!!!! - subroutine test_multiple_indexed(ec) - use mpi - integer ec - integer i,j - complex a(75) - complex b(75) - integer index_test(6) - integer blens(3) - integer disps(3) - integer indexed_type,contig_indexed - integer ierr - integer req - - data a/ 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,& - 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,& - 31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,& - 46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,& - 61,62,63,64,65,66,67,68,69,70,71,72,73,74,75/ - data b/75*0/ - data index_test/1,6,7,11,12,13/ - data blens/1,2,3/ - data disps/0,5,10/ - print *, "Indexed type send, multiple indexed receive" - - call mpi_type_indexed(3, blens, disps, mpi_complex, & - indexed_type, ierr) - call mpi_type_commit(indexed_type, ierr) - - call mpi_type_contiguous(5, indexed_type, contig_indexed,ierr) - call mpi_type_commit(contig_indexed, ierr) -#ifdef TEST_INTERNAL - call copy_data2(a,1,contig_indexed,b,5,indexed_type,ierr) -#else - call mpi_isend(a, 1, contig_indexed,0,0,mpi_comm_world,req,ierr) - call mpi_irecv(b, 5, indexed_type,mpi_any_source,mpi_any_tag, & - mpi_comm_world,req,ierr) -#endif - do i=0,4 - do j=1,6 - if (a(index_test(j)+(13*i)) .ne. b(index_test(j)+(13*i))) then - print *, ">>>FAILED: test_multiple_indexed" - print *, " Found:",a(index_test(j)+13*i) - print *, " Expected:",b(index_test(j)+13*i) - ec = ec+1 -! return - end if - end do - end do - end subroutine - - subroutine test_collectives(ec) - use mpi - integer ec - integer i - integer a(10) - integer b(10) - integer disps(3) - integer blens(3) - integer itype - integer ierr - integer scount - integer rcount - integer disp - integer index_test(7) - - data scount/1/rcount/1/disp/0/ - data disps/0,5,8/ - data blens/4,2,1/ - data a/1,2,3,4,5,6,7,8,9,10/ - data b/10*0/ - data index_test/1,2,3,4,6,7,9/ - - call mpi_type_indexed(3, blens, disps, MPI_LOGICAL,& - itype, ierr) - call mpi_type_commit(itype, ierr) - - call mpi_bcast(a, scount, itype, 0, & - mpi_comm_world, ierr) - call mpi_gather(a,scount, itype, b, rcount, & - itype, 0, mpi_comm_world, ierr) - print *, "Testing mpi_gather" - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_gather failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_gatherv" - call mpi_gatherv(a, scount, itype, b, rcount, & - disp, itype, 0, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_gatherv failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_allgather" - call mpi_allgather(a, scount, itype, b, rcount, & - itype, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_allgather failed" - ec=ec+1 - end if - end do - print *, "Testing mpi_allgatherv" - call mpi_allgatherv(a, scount, itype, b, rcount, & - disp, itype, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_allgatherv failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_scatter" - call mpi_scatter(a, scount, itype, b, rcount, & - itype, 0, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_scatter failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_scatterv" - call mpi_scatterv(a, scount, disp, itype, b, & - rcount, itype, 0, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_scatterv failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_reduce" - call mpi_reduce(a, b, scount, itype, mpi_max, & - 0, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_reduce failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_allreduce" - call mpi_allreduce(a, b, scount, itype, mpi_max, & - mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_allreduce failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_alltoall" - call mpi_alltoall(a, scount, itype, b, rcount, & - itype, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_alltoall failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_alltoallv" - call mpi_alltoallv(a, scount, disp, itype, b, & - rcount, disp, itype, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_alltoallv failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_reduce_scatter" - call mpi_reduce_scatter(a, b, rcount, itype, & - mpi_max, mpi_comm_world, ierr) - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_reduce_scatter failed" - ec=ec+1 - end if - end do - do i=1,10 - b(i) = 0 - end do - print *, "Testing mpi_scan" - call mpi_scan(a, b, scount, itype, mpi_max, & - mpi_comm_world, ierr) - - do i=1,7 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, "mpi_scan failed" - ec=ec+1 - end if - end do - end subroutine - -!!!!!!!!!!!!!!!!!!!!!!!! -! Test MPI_VERSION -!!!!!!!!!!!!!!!!!!!!!!!! - - subroutine test_mpi_version(ec) - use mpi - integer ec - integer ierr - integer mpiv - integer mpisv - - print *, "Testing MPI_Get_Version" - - call mpi_get_version(mpiv, mpisv, ierr) - if (ierr /= MPI_SUCCESS) then - print *, "MPI_get_VERSION ierr not zero (",ierr,")" - ec = ec + 1 - else - if (mpiv /= MPI_VERSION) then - print *, "MPI_VERSION mismatch, should be ",MPI_VERSION,", found ",mpiv - ec = ec + 1 - end if - if (mpisv /= MPI_SUBVERSION) then - print *, "MPI_SUBVERSION mismatch, should be ",MPI_SUBVERSION,", found ",mpisv - ec = ec + 1 - end if - end if - end subroutine test_mpi_version diff --git a/src/externals/mct/mpi-serial/tests/ftest_internal.F90 b/src/externals/mct/mpi-serial/tests/ftest_internal.F90 deleted file mode 100644 index 9e1f6a676c6..00000000000 --- a/src/externals/mct/mpi-serial/tests/ftest_internal.F90 +++ /dev/null @@ -1,328 +0,0 @@ - program test - use mpi - implicit none - - call test_contiguous() - call test_vector() - call test_simple_hvector() - call test_simple_indexed() - call test_simple_bindexed() - call test_simple_hindexed() - call test_packed() - call test_multiple() - stop - end - -!!!!!!!!!!!!!!!!!!! -! Contiguous type. Simplest example. Strings 5 -! integers together and tests their equality after -! a send operation -!!!!!!!!!!!!!!!!!!! - - subroutine test_contiguous() - use mpi - integer ierr - integer datatype - integer a(5) - integer b(5) - integer i - data a/1,2,3,4,5/ - data b/5 * 0/ - - print *, "Test Contiguous of 5 x MPI_INTEGER" - call mpi_type_contiguous(5, mpi_integer, datatype,ierr) - - call mpi_type_commit(datatype, ierr) - - call print_typemap(datatype,ierr) - call copy_data2(a,1,datatype, b,1,datatype, ierr) - - do i=1,5 - if (a(i) .ne. b(i)) then - print *,">>>FAILED: mpi_type_contiguous" - stop - end if - end do - print *, ">>>PASSED: mpi_type_contiguous" - end - -!!!!!!!!!!!!!!!!!!!!!!!! -! Vector type. collect a series of indices with -! set stride from an array. -!!!!!!!!!!!!!!!!!!!!!!!! - - subroutine test_vector() - use mpi - integer ierr - integer datatype - integer a(10) != (1,2,3,4,5,6,7,8,9,0) - integer b(10) - integer check_index(6) - data a/1,2,3,4,5,6,7,8,9,10/ - data b/10 * 0/ - data check_index/1,2,4,5,7,8/ - integer i - - print *, "Test vector of MPI_INTEGER" - - call mpi_type_vector(3, 2, 3, mpi_integer, datatype, ierr) - call mpi_type_commit(datatype, ierr) - call print_typemap(datatype,ierr) - call copy_data2(a,1,datatype,b,1,datatype,ierr) - - do i=1,6 - if (a(check_index(i)) .ne. b(check_index(i))) then - print *,">>>FAILED: mpi_type_vector" - stop - end if - end do - print *, ">>>PASSED: mpi_type_vector" - end - -!!!!!!!!!!!!!!!!!!!!! -! Byte-addressed vector. -! values calculated with mpi_type_extent(), -! so basically we are doing the work here in the -! test program instead of in the library -!!!!!!!!!!!!!!!!!!!!! - - subroutine test_simple_hvector() - use mpi - integer vector_type - integer (kind=mpi_address_kind) extent - integer i - integer a(10) - integer b(10) - integer index_test(6) - integer ierr - - data a/1,2,3,4,5,6,7,8,9,10/, b/0,0,0,0,0,0,0,0,0,0/ - data index_test/1,2,5,6,9,10/ - - print *, "Vector type of 3 groups of 2 MPI_INTEGER" - print *, "Stride of 4 (in bytes)" - - call mpi_type_extent(mpi_integer, extent, ierr) - call mpi_type_hvector(3, 2, 4 * extent, mpi_integer, & - vector_type, ierr) - call mpi_type_commit(vector_type, ierr) - call print_typemap(vector_type,ierr) - call copy_data2(a,1,vector_type, b,1,vector_type,ierr) - - do i=1,7 - if (a(index_test(i)) .ne. (b(index_test(i)))) then - print *, ">>>FAILED: test_simple_hvector" - stop - end if - end do - print *, ">>>PASSED: test_simple_hvector" - end subroutine - -!!!!!!!!!!!!!!!!!!!! -! indexed type. test certain indices of an array -!!!!!!!!!!!!!!!!!!!! - - subroutine test_simple_indexed() - use mpi - integer i - complex a(15) - complex b(15) - integer index_test(6) - integer blens(3) - integer disps(3) - integer indexed_type - integer ierr - - data a/1,2,3,4,5,6,7,8,9,10,11,12,13,14,15/ - data b/0,0,0,0,0,0,0,0,0,0,0,0,0,0,0/ - data index_test/1,6,7,11,12,13/ - data blens/2,1,3/ - data disps/5,0,10/ - print *, "Indexed type" - - call mpi_type_indexed(3, blens, disps, mpi_complex, & - indexed_type, ierr) - call mpi_type_commit(indexed_type, ierr) - call print_typemap(indexed_type, ierr) - call copy_data2(a,1,indexed_type, b,1,indexed_type,ierr) - - do i=1,6 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, ">>>FAILED: test_simple_indexed" - stop - end if - end do - print *, ">>>PASSED: test_simple_indexed" - end subroutine - -!!!!!!!!!!!!!!!! -! Block indexed. All blocks have same length -!!!!!!!!!!!!!!!! - - subroutine test_simple_bindexed() - use mpi - integer i - integer disps(3) - integer a(10), b(10) - integer index_test(6) - integer indexed_type - integer ierr - - data disps/0,4,7/ - data a/1,2,3,4,5,6,7,8,9,10/ - data b/0,0,0,0,0,0,0,0,0,0/ - data index_test/1,2,5,6,8,9/ - print *, "Block indexed type" - - call mpi_type_indexed_block(3,2,disps,mpi_integer, & - indexed_type, ierr) - - call mpi_type_commit(indexed_type, ierr) - call print_typemap(indexed_type, ierr) - call copy_data2(a,1,indexed_type, b,1,indexed_type, ierr) - - do i=1,6 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, ">>>FAILED: test_simple_bindexed" - stop - end if - end do - print *, ">>>PASSED: test_simple_bindexed" - end subroutine - -!!!!!!!!!!!!!!!! -! test_simple_indexed -! test equality of a byte-addressed -! type of integer array -! (disps calculated through mpi_type_extent() -!!!!!!!!!!!!!!! - subroutine test_simple_hindexed() - use mpi - integer i - integer a(10), b(10) - integer index_test(6) - integer blens(3) - integer*8 disps(3) - integer indexed_type - integer*8 extent - integer ierr - - data a/1,2,3,4,5,6,7,8,9,10/ - data b/0,0,0,0,0,0,0,0,0,0/ - data index_test/1,3,4,6,7,8/ - data blens/2,1,3/ - - call mpi_type_extent(mpi_integer, extent, ierr) - disps(1) = 2*extent - disps(2) = 0 - disps(3) = 5*extent - - - print *, "Byte addressed indexed type" - call mpi_type_hindexed(3,blens,disps, MPI_INTEGER, & - indexed_type,ierr) - call mpi_type_commit(indexed_type, ierr) - call print_typemap(indexed_type, ierr) - call copy_data2(a,1,indexed_type, b,1,indexed_type, ierr) - - do i=1,6 - if (a(index_test(i)) .ne. b(index_test(i))) then - print *, ">>>FAILED: test_simple_hindexed" - stop - end if - end do - print *, ">>>PASSED: test_simple_hindexed" - end subroutine - -!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! test_packed() -! Creates a few variable pairs, assigns the first -! of each pair, then packs their values and unpacks -! them to the other set. -!!!!!!!!!!!!!!!!!!!!!!!!!!!! - subroutine test_packed() - use mpi - integer size - integer x, y - real f, g - complex c, d - character*5 a, b - character buf(100), rbuf(100) - integer blens(3) - integer(kind=mpi_address_kind) disps(3) - integer pos - - - x = 10 - f = 14.333 - c = (100, 20) - a = "xyzab" - - pos = 0 - data blens/1,2,1/, disps/0,4,8/ - - print *, "Packed type " - - call mpi_pack(x, 1, mpi_integer, buf, 100, pos, 0, ierr) - call mpi_pack(f, 1, mpi_real, buf, 100, pos, 0, ierr) - call mpi_pack(c, 1, mpi_complex, buf, 100, pos, 0, ierr) - call mpi_pack(a, 5, mpi_character, buf, 100, pos, 0, ierr) - - call copy_data2(buf, pos, mpi_packed, rbuf, pos, & - mpi_packed, ierr) - - pos = 0; - - call mpi_unpack(rbuf, 100, pos, y, 1, mpi_integer, 0, ierr) - call mpi_unpack(rbuf, 100, pos, g, 1, mpi_real, 0, ierr) - call mpi_unpack(rbuf, 100, pos, d, 1, mpi_complex, 0, ierr) - call mpi_unpack(rbuf, 100, pos, b, 5, mpi_character, & - 0, ierr) - - if (x .ne. y .OR. f .ne. g & - .OR. c .ne. d .OR. a .ne. b) & - then - print *, ">>>FAILED: mpi_pack" - stop - end if - - print *, ">>>PASSED: mpi_pack" - - end subroutine - -!!!!!!!!!!!!!!!!!!!!!!!!! -! Test an indexed send with a multiple receive -!!!!!!!!!!!!!!!!!!!!!!!!! - - subroutine test_multiple() - use mpi - integer i - complex a(15) - complex b(15) - integer index_test(6) - integer blens(3) - integer disps(3) - integer indexed_type - integer ierr - - data a/1,2,3,4,5,6,7,8,9,10,11,12,13,14,15/ - data b/0,0,0,0,0,0,0,0,0,0,0,0,0,0,0/ - data index_test/1,6,7,11,12,13/ - data blens/1,2,3/ - data disps/0,5,10/ - print *, "Indexed type" - - call mpi_type_indexed(3, blens, disps, mpi_complex, & - indexed_type, ierr) - call mpi_type_commit(indexed_type, ierr) - call copy_data2(a,1,indexed_type, b,6, mpi_complex, ierr) - - do i=1,6 - if (a(index_test(i)) .ne. b(i)) then - print *, ">>>FAILED: test_multiple" - stop - end if - end do - print *, ">>>PASSED: test_multiple" - end subroutine - diff --git a/src/externals/mct/mpi-serial/tests/ftest_old.F90 b/src/externals/mct/mpi-serial/tests/ftest_old.F90 deleted file mode 100644 index 938d4472a94..00000000000 --- a/src/externals/mct/mpi-serial/tests/ftest_old.F90 +++ /dev/null @@ -1,165 +0,0 @@ - - program test - implicit none - include "mpif.h" - - integer ier - - integer sreq(10), sreq2(10), rreq(10), rreq2(10) - integer sbuf(10), sbuf2(10), rbuf(10), rbuf2(10) - integer tag - integer status(MPI_STATUS_SIZE,10) - integer i - integer comm2; - logical flag; - character pname(MPI_MAX_PROCESSOR_NAME) - integer pnamesize - - integer temp,position - integer errcount - - errcount = 0 - - print *, 'Time=',mpi_wtime() - - call mpi_initialized(flag,ier) - print *, 'MPI is initialized=',flag - - call mpi_init(ier) - - call mpi_get_processor_name(pname,pnamesize,ier) - print *, 'proc name: "',pname(1:pnamesize),'" size:',pnamesize - - - call mpi_comm_dup(MPI_COMM_WORLD,comm2,ier) - - call mpi_initialized(flag,ier) - print *, 'MPI is initialized=',flag - - - - - do i=1,5 - tag= 100+i - print *, 'Post receive tag ',tag - - call mpi_irecv( rbuf(i),1,MPI_INTEGER,0,tag, & - MPI_COMM_WORLD,rreq(i),ier) - - end do - do i=1,5 -! tag=1100+i -! print *, 'Post receive tag ',tag - - call mpi_irecv( rbuf2(i),1,MPI_INTEGER, & - MPI_ANY_SOURCE, MPI_ANY_TAG, & - comm2,rreq2(i),ier) - - end do - - - do i=1,5 - sbuf(i)=10*i - tag=100+i - print *, 'Send ',sbuf(i),' tag ',tag - - call mpi_isend( sbuf(i),1,MPI_INTEGER,0,tag, & - MPI_COMM_WORLD,sreq(i),ier) - end do - - - do i=1,5 - sbuf2(i)=1000+10*i - tag=1100+i - print *, 'Send ',sbuf2(i),' tag ',tag - - call mpi_isend( sbuf2(i),1,MPI_INTEGER,0,tag, & - comm2,sreq2(i),ier) - end do - - do i=1,5 - if (sbuf(i) .ne. rbuf(i)) then - errcount = errcount+1 - print *, 'error on Send2' - print *, 'found ',sbuf2(i),' should be ',rbuf2(i) - end if - end do - - do i=1,5 - if (sbuf2(i) .ne. rbuf2(i)) then - errcount = errcount+1 - print *, 'error on Send2' - print *, 'found ',sbuf2(i),' should be ',rbuf2(i) - end if - end do - - print *, 'Time=',mpi_wtime() - call mpi_waitall(5,sreq,status,ier) - print *,'sends on MPI_COMM_WORLD done' - - call mpi_waitall(5,rreq,status,ier) - print *,'recvs on MPI_COMM_WORLD done' - - do i=1,5 - print *, 'Status source=',status(MPI_SOURCE,i), & - ' tag=',status(MPI_TAG,i) - end do - - call mpi_waitall(5,sreq2,status,ier) - print *,'sends on comm2 done' - - call mpi_waitall(5,rreq2,status,ier) - print *,'recvs on comm2 done' - - do i=1,5 - print *, 'Status source=',status(MPI_SOURCE,i), & - ' tag=',status(MPI_TAG,i) - end do - - -! pack/unpack - - position=0 - do i=1,5 - temp=100+i - call mpi_pack(temp,1,MPI_INTEGER,sbuf,20,position,MPI_COMM_WORLD,ier) - end do - - call mpi_isend(sbuf,position,MPI_PACKED,0,0,MPI_COMM_WORLD,sreq(1),ier) - call mpi_irecv(rbuf,position,MPI_PACKED,0,0,MPI_COMM_WORLD,rreq(1),ier) - call mpi_waitall(1,rreq,status,ier) - - print *,"Pack/send/unpack:" - - position=0 - do i=1,5 - call mpi_unpack( rbuf,20,position,temp,1,MPI_INTEGER, & - MPI_COMM_WORLD) - print *,temp - end do - - do i=1,5 - if (rbuf(i) .ne. sbuf(i)) then - errcount = errcount + 1 - print *,"Error for pack/send/unpack" - print *,"found ",rbuf(i)," should be ",sbuf(i) - end if - end do -! - - - call mpi_finalize(ier) - - do i=1,5 - print *, 'Time=',mpi_wtime() - call sleep(1) - end do - - if (errcount .gt. 0) then - print *,errcount," errors" - else - print *,"No errors" - end if - - end - diff --git a/src/externals/mct/mpi-serial/time.c b/src/externals/mct/mpi-serial/time.c deleted file mode 100644 index 6170009e17d..00000000000 --- a/src/externals/mct/mpi-serial/time.c +++ /dev/null @@ -1,35 +0,0 @@ - -#include -#include - - -#include "mpiP.h" - - -double MPI_Wtime(void); - - - -double FC_FUNC( mpi_wtime, MPI_WTIME )(void) -{ - return(MPI_Wtime()); -} - - - -double MPI_Wtime(void) -{ - struct timeval tv; - - if (gettimeofday(&tv,0)) - { - fprintf(stderr,"MPI_Wtime: error calling gettimeofday()\n"); - abort(); - } - - - return((double)(tv.tv_sec) + (double)(tv.tv_usec)/1e6) ; -} - - - diff --git a/src/externals/mct/mpi-serial/type.c b/src/externals/mct/mpi-serial/type.c deleted file mode 100644 index 8dd93f27414..00000000000 --- a/src/externals/mct/mpi-serial/type.c +++ /dev/null @@ -1,846 +0,0 @@ -/* - * JCY - * 07/2007 - * Derived Datatype functions for mpi-serial - */ - -#include "type.h" -#include "mpiP.h" -#include -#include -#include - -#ifdef HAVE_CONFIG_H -#include -#endif - -/* - * NOTES: All MPI_ prefixed (public) functions operate - * using the integer handle for a datatype. Most of these - * functions are wrapper functions for a different function, - * _not_ prefixed with MPI_. These functions translate the - * handle to a pointer and call the non-MPI_ func. - * - * Fortran bindings use FC_FUNC, as defined in mpiP.h. - */ - - -/* - * Wrapper for mpi_handle_to_ptr in handles.c - * specific for datatype handles, which may be - * predefined negative handles - */ -Datatype* mpi_handle_to_datatype(int handle) -{ - if (handle < 0) - return (Datatype*) &simpletypes[-1-handle]; - else - return (Datatype*) mpi_handle_to_ptr(handle); -} - -/* - * Calculate the epsilon value of typemap - * using the largest element in the typemap - */ - -int calc_padding(Datatype datatype) -{ - long size_max = INT_MIN; - long type_len; - int i; - //find the largest datatype size. The epsilon padding is (probably) based on this. - - for (i = 0; i < datatype->count; i++) - { - type_len = Simpletype_length(datatype->pairs[i].type); - size_max = type_len > size_max ? type_len : size_max; - } - - return size_max; -} - -/* Retrieve size of any simple type - * C sizes use sizeof the literal type - * they represent. Fortran types are those - * as defined in type.h - */ - -int Simpletype_length(Simpletype t) -{ - switch(t) - { - case SIMPLE_CHAR: - return sizeof(char); break; - case SIMPLE_SHORT: - return sizeof(short); break; - case SIMPLE_INT: - return sizeof(int); break; - case SIMPLE_LONG: - return sizeof(long); break; - case SIMPLE_UCHAR: - return sizeof(unsigned char); break; - case SIMPLE_USHORT: - return sizeof(unsigned short); break; - case SIMPLE_UINT: - return sizeof(unsigned int); break; - case SIMPLE_ULONG: - return sizeof(unsigned long); break; - case SIMPLE_FLOAT: - return sizeof(float); break; - case SIMPLE_DOUBLE: - return sizeof(double); break; - case SIMPLE_LDOUBLE: - return sizeof(long double); break; - case SIMPLE_BYTE: - return sizeof(char); break; - case SIMPLE_FINTEGER: - return FSIZE_INTEGER; break; - case SIMPLE_FREAL: - return FSIZE_REAL; break; - case SIMPLE_FDPRECISION: - return FSIZE_DPRECISION; break; - case SIMPLE_FCOMPLEX: - return FSIZE_COMPLEX; break; - case SIMPLE_FDCOMPLEX: - return FSIZE_DCOMPLEX; break; - case SIMPLE_FLOGICAL: - return FSIZE_LOGICAL; break; - case SIMPLE_FCHARACTER: - return FSIZE_CHARACTER; break; - case SIMPLE_FINTEGER1: - return 1; break; - case SIMPLE_FINTEGER2: - return 2; break; - case SIMPLE_FINTEGER4: - return 4; break; - case SIMPLE_FINTEGER8: - return 8; break; - case SIMPLE_FREAL4: - return 4; break; - case SIMPLE_FREAL8: - return 8; break; - case SIMPLE_FREAL16: - return 16; break; - case SIMPLE_FCOMPLEX8: - return 8; break; - case SIMPLE_FCOMPLEX16: - return 16; break; - case SIMPLE_FCOMPLEX32: - return 32; break; - case SIMPLE_LONGLONG: - return sizeof(long long); break; - case SIMPLE_ULONGLONG: - return sizeof(unsigned long long); break; - case SIMPLE_OFFSET: - return sizeof(MPI_Offset); break; - - default: - printf("Invalid simple type\n"); - exit(1); - } -} - -/* - * calculates the lower bound of a datatype using typemap - * (This gives no regard to MPI_LB, but rather uses only displacements) - */ -long calc_lb(Datatype type) -{ - int i; - int min_disp = INT_MAX; - typepair * tp; - - for(i =0; i < type->count; i++) - { - tp = type->pairs+i; - min_disp = tp->disp < min_disp - ? tp->disp - : min_disp; - } - return min_disp; -} - -/* - * Calculate upper bound using typemap - * (Gives no regard to MPI_UB, just calculates - * highest displacement+size of its respective data type) - */ -long calc_ub(Datatype type) -{ - int i; - long max_disp = INT_MIN; - typepair * tp; - - for(i = 0; i < type->count; i++) - { - tp = type->pairs+i; - max_disp = tp->disp + Simpletype_length(tp->type) > max_disp - ? tp->disp + Simpletype_length(tp->type) - : max_disp; - } - - return max_disp; -} - - -/*******************************************************/ -/* MPI_Type_struct is the most general type constructor that - * does the common work other constructors. - * All other type constructors call this function. - */ - -FC_FUNC( mpi_type_struct, MPI_TYPE_STRUCT ) - (int * count, int * blocklens, long * displacements, - int *oldtypes_ptr, int *newtype, int *ierror) -{ - *ierror=MPI_Type_struct(*count, blocklens, displacements, - oldtypes_ptr, newtype); -} - -/* Public function, wrapper for Type_struct that translates handle to - * pointer (see NOTES at top of file) - */ -int MPI_Type_struct(int count, int * blocklens, MPI_Aint * displacements, - MPI_Datatype *oldtypes, MPI_Datatype *newtype) -{ - int i; - Datatype oldtypes_ptr[count]; - Datatype * newtype_ptr; - - for (i = 0; i < count; i++) - { - oldtypes_ptr[i] = *(Datatype*) mpi_handle_to_datatype(oldtypes[i]); - } - - mpi_alloc_handle(newtype, (void**) &newtype_ptr); - - return Type_struct(count, blocklens, displacements, - oldtypes_ptr, newtype_ptr); -} - -int Type_struct(int count, int * blocklens, MPI_Aint * displacements, - Datatype *oldtypes_ptr, Datatype *newtype) -{ - int i, j, k; - Datatype temp, temp2; - int newcount; - char override_lower = 0, //whether to override - override_upper = 0; - MPI_Aint new_lb = LONG_MAX, - new_ub = LONG_MIN, - clb, cub; //calculated lb and ub - int simpletype_count = 0; //total additional blocks for malloc - MPI_Aint tmp_offset; //for contiguous blocks of type - MPI_Aint extent; - - // find the total number of elements in the typemap we need to add. - for (i = 0; i < count; i++) - { - //check for MPI_UB or MPI_LB. These types are special - // cases and will be skipped over - - temp2 = oldtypes_ptr[i]; - if (temp2->pairs[0].type == SIMPLE_LOWER) - { - //found MPI_LB. This is a candidate for the actual lb - if (new_lb > displacements[i]) - new_lb = displacements[i]; - override_lower = 1; - } - else if (temp2->pairs[0].type == SIMPLE_UPPER) - { - //same as above, but ub - if (new_ub < displacements[i]) - new_ub = displacements[i]; - override_upper = 1; - } - else - { - //this is not MPI_LB or MPI_UB - //However it may still have overriding bounds - //Test for these and add its size to the typemap. - - if (temp2->o_lb) - // this type's lb has been overridden. - // ONLY an overriding lb can be the actual lb now. - override_lower = 1; - if (temp2->o_ub) - //same as above, but ub - override_upper = 1; - - simpletype_count += blocklens[i] * oldtypes_ptr[i]->count; - } - } - temp = malloc(sizeof(Typestruct) + - ((simpletype_count-1) * sizeof(typepair))); - - temp->count = simpletype_count; - - i = 0; //old type's index - newcount = 0; //new type's index - - while (i < count) - { - tmp_offset = 0; - - temp2 = oldtypes_ptr[i]; - - //test for previous MPI_LB or MPI_UB in one of the comprising types. - //If found, skip over. - if (!((temp2->pairs[0].type == SIMPLE_LOWER) || - (temp2->pairs[0].type == SIMPLE_UPPER))) - { - for (j = 0; j < blocklens[i]; j++) - { - //Copy the old type's typemap and merge into the new type - //by a "flattening" process - Type_extent((Datatype) oldtypes_ptr[i], &extent); - - tmp_offset = j * extent; - - if (temp2->o_lb && temp2->lb+displacements[i]+tmp_offset < new_lb) - new_lb = temp2->lb+displacements[i]+tmp_offset; - if (temp2->o_ub && temp2->ub+displacements[i]+tmp_offset > new_ub) - { - new_ub = temp2->ub+displacements[i]+tmp_offset; - } - - for (k = 0; k < oldtypes_ptr[i]->count; k++) - { - Copy_type( (typepair*) oldtypes_ptr[i]->pairs+k, - (typepair*) (temp->pairs+newcount)); - - - ((typepair*) temp->pairs+(newcount))->disp += - displacements[i] + tmp_offset; - newcount++; - } - } - } - i++; - } - //type is NOT committed - temp->committed = 0; - - //assign upper and lower bounds here - if (override_lower) - { - //use lowest previous overridden lower bound - temp->o_lb = 1; - temp->lb = new_lb; - } - else - { - //use calculation - temp->lb = calc_lb(temp); - } - - if (override_upper) - { - temp->o_ub = 1; - temp->ub = new_ub; - } - else - { - temp->ub = calc_ub(temp); - } - - *newtype = temp; - temp = MPI_DATATYPE_NULL; - - return MPI_SUCCESS; -} - -/*******************************************************/ -/* MPI_Type_contiguous. Create count copies of a type. - * this creates arrays of the singleton arguments and use them to call - * MPI_Type_struct() - */ - -FC_FUNC( mpi_type_contiguous, MPI_TYPE_CONTIGUOUS ) - (int *count, int *oldtype, int * newtype, int * ierr) -{ - *ierr = MPI_Type_contiguous(*count, *oldtype, newtype); -} - -int MPI_Type_contiguous(int count, MPI_Datatype old, MPI_Datatype * new) -{ - int ret; - Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(old); - Datatype * new_ptr; - - mpi_alloc_handle(new, (void**) &new_ptr); - - return Type_contiguous(count, old_ptr, new_ptr); -} - -int Type_contiguous(int count, Datatype oldtype, Datatype *newtype) -{ - int i; - int blocklengths[count]; - Datatype oldtypes[count]; - MPI_Aint offsets[count]; - MPI_Aint extent; - - //each copy is strided by the extent of the datatype. - // Calculate that here. - Type_extent(oldtype, &extent); - for (i = 0; i < count; i++) - { - blocklengths[i] = 1; - offsets[i] = extent * i; - oldtypes[i] = oldtype; - } - return Type_struct(count, blocklengths, offsets, oldtypes, newtype); -} - -/*************************/ -/* Type_vector - */ - -FC_FUNC( mpi_type_vector, MPI_TYPE_VECTOR ) - (int * count, int * blocklen, int * stride, - int * oldtype, int * newtype, int * ierr) -{ - *ierr = MPI_Type_vector(*count, *blocklen, *stride, *oldtype, newtype); -} - -int MPI_Type_vector(int count, int blocklen, int stride, - MPI_Datatype oldtype, MPI_Datatype * newtype) -{ - Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype); - Datatype * new_ptr; - - mpi_alloc_handle(newtype, (void**) &new_ptr); - - return Type_vector(count, blocklen, stride, old_ptr, new_ptr); -} - - -int Type_vector(int count, int blocklen, int stride, - Datatype oldtype, Datatype *newtype) -{ - MPI_Aint extent; - MPI_Aint bstride; - - Type_extent(oldtype, &extent); - bstride = stride * extent; - - return Type_hvector(count, blocklen, bstride, oldtype, newtype); -} - -/*******************************************************/ - -FC_FUNC( mpi_type_hvector, MPI_TYPE_HVECTOR ) - (int * count, long * blocklen, long * stride, - int * oldtype, int * newtype, int * ierr) -{ - *ierr = MPI_Type_hvector(*count, *blocklen, *stride, *oldtype, newtype); -} - -int MPI_Type_hvector(int count, int blocklen, MPI_Aint stride, - MPI_Datatype oldtype, MPI_Datatype * newtype) -{ - Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype); - Datatype * new_ptr; - - mpi_alloc_handle(newtype, (void**) &new_ptr); - return Type_hvector(count, blocklen, stride, old_ptr, new_ptr); -} - -FC_FUNC( mpi_type_create_hvector, MPI_TYPE_CREATE_HVECTOR ) - (int * count, long * blocklen, long * stride, - int * oldtype, int * newtype, int * ierr) -{ - *ierr = MPI_Type_create_hvector(*count, *blocklen, *stride, *oldtype, newtype); -} - -int MPI_Type_create_hvector(int count, int blocklen, MPI_Aint stride, - MPI_Datatype oldtype, MPI_Datatype * newtype) -{ - Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype); - Datatype * new_ptr; - - mpi_alloc_handle(newtype, (void**) &new_ptr); - return Type_hvector(count, blocklen, stride, old_ptr, new_ptr); -} - - -int Type_hvector(int count, int blocklen, MPI_Aint stride, - Datatype oldtype, Datatype *newtype) -{ - int i; - int blocklengths[count]; - Datatype oldtypes[count]; - MPI_Aint offsets[count]; - MPI_Aint extent; - - Type_extent(oldtype, &extent); - for (i = 0; i < count; i++) - { - blocklengths[i] = blocklen; - offsets[i] = stride * i; - oldtypes[i] = oldtype; - } - - return Type_struct(count, blocklengths, offsets, oldtypes, newtype); -} - -/*******************************************************/ - -FC_FUNC( mpi_type_indexed, MPI_TYPE_INDEXED ) - (int * count, int * blocklens, int * displacements, - int * oldtype, int * newtype, int * ierr) -{ - *ierr = MPI_Type_indexed(*count, blocklens, displacements, *oldtype, newtype); -} - - -int MPI_Type_indexed(int count, int *blocklens, int *displacements, - MPI_Datatype oldtype, MPI_Datatype * newtype) -{ - Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype); - Datatype * new_ptr; - - mpi_alloc_handle(newtype, (void**) &new_ptr); - return Type_indexed(count, blocklens, displacements, old_ptr, new_ptr); -} - -int Type_indexed(int count, int *blocklens, int *displacements, - Datatype oldtype, Datatype *newtype) -{ - int i; - MPI_Aint extent; - MPI_Aint bdisps[count]; - - for (i = 0; i < count; i++) - { - Type_extent(oldtype, &extent); - bdisps[i] = displacements[i] * extent; - } - - return Type_hindexed(count, blocklens, bdisps, oldtype, newtype); -} - -/*******************************************************/ - -FC_FUNC( mpi_type_create_indexed_block, MPI_TYPE_CREATE_INDEXED_BLOCK ) - (int * count, int * blocklen, int * displacements, - int * oldtype, int * newtype, int * ierr) -{ - *ierr = MPI_Type_create_indexed_block(*count, *blocklen, displacements, - *oldtype, newtype); -} - -int MPI_Type_create_indexed_block(int count, int blocklen, int *displacements, - MPI_Datatype oldtype, MPI_Datatype * newtype) -{ - int ret; - Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype); - Datatype * new_ptr; - - mpi_alloc_handle(newtype, (void**) &new_ptr); - return Type_create_indexed_block(count, blocklen, displacements, old_ptr, new_ptr); -} - -int Type_create_indexed_block(int count, int blocklen, int *displacements, - Datatype oldtype, Datatype *newtype) -{ - int i; - int blocklens[count]; - - for (i = 0; i < count; i++) - blocklens[i] = blocklen; - - return Type_indexed(count, blocklens, displacements, oldtype, newtype); -} - -/*******************************************************/ - -FC_FUNC( mpi_type_hindexed, MPI_TYPE_HINDEXED ) - (int * count, int * blocklens, MPI_Aint * displacements, - int * oldtype, int * newtype, int * ierr) -{ - *ierr = MPI_Type_hindexed(*count, blocklens, displacements, - *oldtype, newtype); -} - -int MPI_Type_hindexed(int count, int *blocklens, MPI_Aint * disps, - MPI_Datatype oldtype, MPI_Datatype * newtype) -{ - Datatype old_ptr = *(Datatype*) mpi_handle_to_datatype(oldtype); - Datatype * new_ptr; - - mpi_alloc_handle(newtype, (void**) &new_ptr); - return Type_hindexed(count, blocklens, disps, old_ptr, new_ptr); -} - -int Type_hindexed(int count, int *blocklens, MPI_Aint *displacements, - Datatype oldtype, Datatype *newtype) -{ - int i; - Datatype oldtypes[count]; - - for (i = 0; i < count; i++) - { - oldtypes[i] = oldtype; - } - - return Type_struct(count, blocklens, displacements, oldtypes, newtype); -} - - -/*******************************************************/ - -int Type_dup(Datatype oldtype, Datatype *newtype) -{ - int i; - //create a deep copy of given Datatype - newtype = malloc(sizeof(oldtype)); - (*newtype)->committed = oldtype->committed; - (*newtype)->lb = oldtype->lb; - (*newtype)->ub = oldtype->ub; - (*newtype)->o_lb = oldtype->o_lb; - (*newtype)->o_ub = oldtype->o_ub; - - for (i = 0; i < oldtype->count; i++) - { - Copy_type((typepair*) oldtype->pairs + i, - (typepair*) (*newtype)->pairs + i ); - } -} - -/* copy_type: Creates a deep copy of source typepair into dest - */ -int Copy_type(typepair *source, typepair *dest) -{ - dest->type = source->type; - dest->disp = source->disp; -} - -/* MPI_Type_size: Returns the sum of the lengths of each simple - * type that makes up the data type argument - */ -FC_FUNC( mpi_type_size, MPI_TYPE_SIZE )(int * type, int * size, int * ierr) -{ - *ierr=MPI_Type_size(*type, size); -} - -int MPI_Type_size(MPI_Datatype type, int * size) -{ - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type); - return Type_size(type_ptr, size); -} - -int Type_size(Datatype type, int * size) -{ - int i; - *size = 0; - for (i=0; i < type->count; i++) - *size += Simpletype_length(type->pairs[i].type); - - - return MPI_SUCCESS; -} -/* MPI_Type_lb: Returns the lower bound (which may be overridden - * or calculated) - */ -FC_FUNC( mpi_type_lb, MPI_TYPE_LB )(int * type, long * lb, int * ierr) -{ - *ierr = MPI_Type_lb(*type, lb); -} - -int MPI_Type_lb(MPI_Datatype type, MPI_Aint * lb) -{ - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type); - - return Type_lb(type_ptr, lb); -} - -int Type_lb(Datatype type, MPI_Aint * lb) -{ - *lb = type->lb; -} - -/* MPI_Type_ub: Return upper bound (which may be overridden - * or calculated - */ -FC_FUNC( mpi_type_ub, MPI_TYPE_UB )(int * type, long * ub, int * ierr) -{ - *ierr = MPI_Type_ub(*type, ub); -} - -int MPI_Type_ub(MPI_Datatype type, MPI_Aint * ub) -{ - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type); - - return Type_ub(type_ptr, ub); -} - -int Type_ub(Datatype type, MPI_Aint * ub) -{ - *ub = type->ub; -} - -/* MPI_Get_address - * MPI_Address - * Return address of an object - */ -FC_FUNC( mpi_get_address, MPI_ADDRESS )(void * loc, long * address, int * ierr) -{ - *ierr = FGet_address(loc, address); -} - -FC_FUNC( mpi_address, MPI_ADDRESS )(void * loc, long * address, int * ierr) -{ - *address = (long) loc; - *ierr = FGet_address(loc, address); -} - -int FGet_address(void * loc, long * address, int * ierr) -{ - *address = (long) loc; - return MPI_SUCCESS; -} - -int MPI_Address(void * loc, MPI_Aint * address) -{ - return MPI_Get_address(loc, address); -} - -int MPI_Get_address(void * loc, MPI_Aint * address) -{ - *address = (MPI_Aint) loc; - return MPI_SUCCESS; -} - -/* MPI_Type_extent: return ub-lb, plus padding - */ -FC_FUNC( mpi_type_extent, MPI_TYPE_EXTENT)(int * type, long * extent, int * ierr) -{ - *ierr = MPI_Type_extent(*type, extent); -} - -int MPI_Type_extent(MPI_Datatype type, MPI_Aint * extent) -{ - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type); - - return Type_extent(type_ptr, extent); -} - -int Type_extent(Datatype datatype, MPI_Aint * extent) -{ - - if (!(datatype->o_lb || datatype->o_ub)) - { - int epsilon = calc_padding(datatype); - //current epsilon value is based off of largest datatype size - int mod = (datatype->ub - datatype->lb) % epsilon; - if (mod == 0) - epsilon = 0; - else - epsilon = epsilon - mod; - *extent = (datatype->ub - datatype->lb) + epsilon; - } - else - { - *extent = datatype->ub - datatype->lb; - } - - return MPI_SUCCESS; -} - -/* True_extent returns an extent based only on - * calculated upper and lower bound, regardless of any - * override using MPI_LB or MPI_UB - */ -int Type_get_true_extent(Datatype type, MPI_Aint * extent) -{ - long epsilon = calc_padding(type); - long ub = calc_ub(type); - long lb = calc_lb(type); - //current epsilon value is based off of largest datatype size - long mod = (ub - lb) % epsilon; - if (mod == 0) - epsilon = 0; - else - epsilon = epsilon - mod; - *extent = (ub - lb) + epsilon; - - return MPI_SUCCESS; -} - -/***********************/ - -FC_FUNC( mpi_type_commit, MPI_TYPE_COMMIT )(int * datatype, int * ierr) -{ - *ierr = MPI_Type_commit(datatype); -} - -int MPI_Type_commit(MPI_Datatype * datatype) -{ - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(*datatype); - (type_ptr)->committed = 1; - - return MPI_SUCCESS; -} - -/**********************/ -FC_FUNC( mpi_type_free, MPI_TYPE_FREE )(int * datatype, int * ierr) -{ - *ierr = MPI_Type_free(datatype); -} - -int MPI_Type_free(MPI_Datatype * datatype) -{ - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(*datatype); - free(type_ptr); - type_ptr = MPI_DATATYPE_NULL; - - mpi_free_handle(*datatype); - - return MPI_SUCCESS; -} - -/* Print_typemap is used in test programs only when - * --enable-test-internal is enabled in configure. - */ - -#ifdef TEST_INTERNAL -FC_FUNC( print_typemap, PRINT_TYPEMAP )(int * type, int * ierr) -{ - *ierr = print_typemap(*type); -} - -int print_typemap(MPI_Datatype type) -{ - Datatype type_ptr = *(Datatype*) mpi_handle_to_datatype(type); - - return Pprint_typemap(type_ptr); -} - -int Pprint_typemap(Datatype type) -{ - int i; - MPI_Aint extent; - Type_extent(type, &extent); - - printf("Type with %d type pairs.\n>> lb is %d\n>> ub is %d\n>>" - "Extent is %d\n>>Epsilon based on %d\nTypemap: \n{", - type->count, type->lb, type->ub, extent, calc_padding(type)); - - for (i = 0; i < type->count; i++) - { - printf("(t%d:%d, o%d)", type->pairs[i].type, - Simpletype_length(type->pairs[i].type), - type->pairs[i].disp); - - if (i != type->count-1) - printf(", "); - } - printf("}\n"); - - return MPI_SUCCESS; -} -#endif //TEST_INTERNAL - diff --git a/src/externals/mct/mpi-serial/type.h b/src/externals/mct/mpi-serial/type.h deleted file mode 100644 index cd92b78f1b7..00000000000 --- a/src/externals/mct/mpi-serial/type.h +++ /dev/null @@ -1,124 +0,0 @@ -#ifndef TYPE_H -#define TYPE_H - -/* type.h */ -/* defines interface and types used for mpi-serial user-defined datatypes */ - -#include "mpiP.h" - -//for Fortran type sizes -#ifdef HAVE_CONFIG_H -#include -#endif - -//predefined type value used in typemap -typedef int Simpletype; - -typedef struct -{ - long disp; - Simpletype type; -} typepair; - -typedef struct -{ - int count; - long ub; - long lb; - int committed; //type has been committed - int o_lb; //overridden lower/upper bound - int o_ub; // " - /* pairs[] is size 2 because of predefined types - * such as MPI_2INT that have 2 typemap entries - * upon initialization. - */ - typepair pairs[2]; -} Typestruct; - -typedef Typestruct* Datatype; - -//Simpletype constants -#define SIMPLE_CHAR 0 -#define SIMPLE_SHORT 1 -#define SIMPLE_INT 2 -#define SIMPLE_LONG 3 -#define SIMPLE_UCHAR 4 -#define SIMPLE_USHORT 5 -#define SIMPLE_UINT 6 -#define SIMPLE_ULONG 7 -#define SIMPLE_FLOAT 8 -#define SIMPLE_DOUBLE 9 -#define SIMPLE_LDOUBLE 10 -#define SIMPLE_BYTE 11 -#define SIMPLE_LOWER 12 -#define SIMPLE_UPPER 13 -#define SIMPLE_FINTEGER 14 -#define SIMPLE_FREAL 15 -#define SIMPLE_FDPRECISION 16 -#define SIMPLE_FCOMPLEX 17 -#define SIMPLE_FDCOMPLEX 18 -#define SIMPLE_FLOGICAL 19 -#define SIMPLE_FCHARACTER 20 - -#define SIMPLE_FINTEGER1 21 -#define SIMPLE_FINTEGER2 22 -#define SIMPLE_FINTEGER4 23 -#define SIMPLE_FINTEGER8 24 -#define SIMPLE_FINTEGER16 25 - -#define SIMPLE_FREAL4 26 -#define SIMPLE_FREAL8 27 -#define SIMPLE_FREAL16 28 - -#define SIMPLE_FCOMPLEX8 29 -#define SIMPLE_FCOMPLEX16 30 -#define SIMPLE_FCOMPLEX32 31 - -#define SIMPLE_LONGLONG 32 -#define SIMPLE_ULONGLONG 33 - -#define SIMPLE_OFFSET 34 - -//internal type functions -int Simpletype_length(Simpletype s); - -//testing only -int print_typemap(MPI_Datatype in); - - -/* - * Fortran type sizes - * - * If config.h is used and the user has specified - * sizes using --enable-fort-real and --enable-fort-double - * args, they will be used here. Otherwise just take a shot - * in the dark? - * - */ - -#ifdef CONFIG_FORT_REAL -#define FSIZE_REAL CONFIG_FORT_REAL -#else -#define FSIZE_REAL 4 //guess something reasonable -#endif - -#ifdef CONFIG_FORT_DOUBLE -#define FSIZE_DPRECISION CONFIG_FORT_DOUBLE -#else -#define FSIZE_DPRECISION 8 -#endif - -#define FSIZE_INTEGER 4 -#define FSIZE_COMPLEX 2*FSIZE_REAL -#define FSIZE_DCOMPLEX 2*FSIZE_DPRECISION -#define FSIZE_LOGICAL 4 -#define FSIZE_CHARACTER 1 - -const extern Datatype simpletypes[]; -Datatype* mpi_handle_to_datatype(int handle); - -extern int Unpack(void * inbuf, int insize, int * position, void *outbuf, - int outcount, Datatype type, Comm* comm); -extern int Pack(void *inbuf, int incount, Datatype type, - void *outbuf, int outsize, int *position, Comm * comm); -#endif /* TYPE_H */ diff --git a/src/externals/mct/mpi-serial/type_const.c b/src/externals/mct/mpi-serial/type_const.c deleted file mode 100644 index fcb6ed4e46d..00000000000 --- a/src/externals/mct/mpi-serial/type_const.c +++ /dev/null @@ -1,189 +0,0 @@ -#include "type.h" - - /* Here are the statically initialized structs for the predefined datatypes. - */ - - //C type structs - Typestruct TSchar = {.count=1, .lb=0, .ub=sizeof(char), - .committed=1, .o_lb=0, .o_ub=0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_CHAR }}; - Typestruct TSshort = {.count=1, .lb=0, .ub=sizeof(short), - .committed=1, .o_lb=0, .o_ub=0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_SHORT }}; - Typestruct TSint = {.count = 1, .lb = 0, .ub=sizeof(int), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_INT }}; - Typestruct TSlong = {.count = 1, .lb = 0, .ub = sizeof(long), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_LONG }}; - Typestruct TSuchar = {.count = 1, .lb = 0, .ub=sizeof(unsigned char), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_UCHAR }}; - Typestruct TSushort = {.count = 1, .lb = 0, .ub=sizeof(unsigned short), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_USHORT }}; - Typestruct TSuint = {.count = 1, .lb = 0, .ub = sizeof(unsigned int), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_UINT }}; - Typestruct TSulong = {.count = 1, .lb = 0, .ub = sizeof(unsigned long), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_ULONG }}; - Typestruct TSfloat = {.count = 1, .lb = 0, .ub = sizeof(float), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_FLOAT }}; - Typestruct TSdouble = {.count = 1, .lb = 0, .ub = sizeof(double), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_DOUBLE }}; - Typestruct TSldouble = {.count = 1, .lb = 0, .ub = sizeof(long double), - .committed=1,.o_lb = 0, .o_ub = 0, .pairs[0] = - {.disp = 0, .type = (Simpletype) SIMPLE_LDOUBLE }}; - - //Cross-language types - Typestruct TSbyte = { .count = 1, .lb = 0, .ub = sizeof(char), .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_BYTE } }; - Typestruct TSpacked = { .count = 1, .lb = 0, .ub = sizeof(char), .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_BYTE } }; - Typestruct TSlower = { .count = 1, .lb = 0, .ub = 0, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_LOWER } }; - Typestruct TSupper = { .count = 1, .lb = 0, .ub = 0, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_UPPER } }; - - //Fortran type structs - Typestruct TSinteger = { .count = 1, .lb = 0, .ub = FSIZE_INTEGER, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FINTEGER } }; - Typestruct TSreal = { .count = 1, .lb = 0, .ub = FSIZE_REAL, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FREAL } }; - Typestruct TSdprecision = { .count = 1, .lb = 0, .ub = FSIZE_DPRECISION, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FDPRECISION } }; - Typestruct TScomplex = { .count = 1, .lb = 0, .ub = FSIZE_COMPLEX, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FCOMPLEX } }; - Typestruct TSdcomplex = { .count = 1, .lb = 0, .ub = FSIZE_DCOMPLEX, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FDCOMPLEX } }; - Typestruct TSlogical = { .count = 1, .lb = 0, .ub = FSIZE_LOGICAL, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FLOGICAL } }; - Typestruct TScharacter = { .count = 1, .lb = 0, .ub = FSIZE_CHARACTER, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FCHARACTER } }; - - /*Reduction function types (C) - */ - Typestruct TSfloat_int = { .count = 2, .lb = 0, .ub = sizeof(struct {float a; int b;}), .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FLOAT }, - .pairs[1] = { .disp=sizeof(float), .type = (Simpletype) SIMPLE_INT}}; - Typestruct TSdouble_int = { .count = 2, .lb = 0, .ub = sizeof(struct {double a; int b;}), .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_DOUBLE }, - .pairs[1] = { .disp=sizeof(double), .type = (Simpletype) SIMPLE_INT}}; - Typestruct TSlong_int = { .count = 2, .lb = 0, .ub = sizeof(struct {long a; int b;}), .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_LONG }, - .pairs[1] = { .disp=sizeof(long), .type = (Simpletype) SIMPLE_INT}}; - Typestruct TS2int = { .count = 2, .lb = 0, .ub = 2*sizeof(int), .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_INT }, - .pairs[1] = { .disp=sizeof(int), .type = (Simpletype) SIMPLE_INT}}; - Typestruct TSshort_int = { .count = 2, .lb = 0, .ub = sizeof(struct {short a; int b;}), .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_SHORT }, - .pairs[1] = { .disp=sizeof(int), .type = (Simpletype) SIMPLE_INT}}; - Typestruct TSldouble_int = { .count = 2, .lb = 0, .ub = sizeof(struct {long double a; int b;}), .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_LDOUBLE }, - .pairs[1] = { .disp=sizeof(long double), .type = (Simpletype) SIMPLE_INT}}; - - /* Reduction function types (Fortran) - */ - Typestruct TS2real = { .count = 2, .lb = 0, .ub = 2*FSIZE_REAL, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FREAL }, - .pairs[1] = { .disp=FSIZE_REAL, .type = (Simpletype) SIMPLE_FREAL}}; - Typestruct TS2dprecision = { .count = 2, .lb = 0, .ub = 2*FSIZE_DPRECISION, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FDPRECISION }, - .pairs[1] = { .disp=FSIZE_DPRECISION, .type = (Simpletype) SIMPLE_FDPRECISION}}; - Typestruct TS2integer = { .count = 2, .lb = 0, .ub = 2*FSIZE_INTEGER, .committed = 1, - .o_lb = 0, .o_ub = 0, .pairs[0] = { .disp = 0, .type = (Simpletype) SIMPLE_FINTEGER }, - .pairs[1] = { .disp=FSIZE_INTEGER, .type = (Simpletype) SIMPLE_FINTEGER}}; - - - /* Fortran sized types - */ - - Typestruct TSinteger1 = {.count = 1, .lb = 0, .ub=1, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FINTEGER1 }}; - - Typestruct TSinteger2 = {.count = 1, .lb = 0, .ub=2, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FINTEGER2 }}; - - Typestruct TSinteger4 = {.count = 1, .lb = 0, .ub=4, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FINTEGER4 }}; - - Typestruct TSinteger8 = {.count = 1, .lb = 0, .ub=8, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FINTEGER8 }}; - - Typestruct TSinteger16 = {.count = 1, .lb = 0, .ub=16, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FINTEGER16 }}; - - - Typestruct TSreal4 = {.count = 1, .lb = 0, .ub=4, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FREAL4 }}; - - Typestruct TSreal8 = {.count = 1, .lb = 0, .ub=8, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FREAL8 }}; - - Typestruct TSreal16 = {.count = 1, .lb = 0, .ub=16, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FREAL16 }}; - - Typestruct TScomplex8 = {.count = 1, .lb = 0, .ub=8, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FCOMPLEX8 }}; - - Typestruct TScomplex16 = {.count = 1, .lb = 0, .ub=16, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FCOMPLEX16 }}; - - Typestruct TScomplex32 = {.count = 1, .lb = 0, .ub=32, - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_FCOMPLEX32 }}; - - /* Additions - */ - -Typestruct TSlonglong = {.count = 1, .lb = 0, .ub=sizeof(long long), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_LONGLONG }}; - -Typestruct TSulonglong = {.count = 1, .lb = 0, .ub=sizeof(unsigned long long), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_ULONGLONG }}; - -Typestruct TSoffset = {.count = 1, .lb = 0, .ub=sizeof(MPI_Offset), - .committed=1, .o_lb = 0, .o_ub = 0, .pairs[0]= - {.disp = 0, .type = (Simpletype) SIMPLE_OFFSET }}; - - - - /* RML NOTE: the order and numbering of the elements of simpletypes[] MUST match - * the values for the MPI type constants e.g. MPI_INT - * This should be coded in a better way to avoid human error. - */ - - const Datatype simpletypes[64] = - {&TSchar , &TSshort , &TSint , &TSlong, - &TSuchar , &TSushort , &TSuint , &TSulong, //4 - &TSfloat , &TSdouble , &TSldouble , &TSbyte, //8 - &TSpacked , &TSlower , &TSupper , &TSinteger, //12 - &TSreal , &TSdprecision, &TScomplex , &TSdcomplex, //16 - &TSlogical , &TScharacter , &TS2real , &TS2dprecision,//20 - &TS2integer, &TSfloat_int , &TSdouble_int , &TSlong_int, //24 - &TS2int , &TSshort_int , &TSldouble_int, &TSinteger1, //28 - &TSinteger2, &TSinteger4 , &TSinteger8 , &TSinteger16, //32 - &TSreal4 , &TSreal8 , &TSreal16 , &TScomplex8, //36 - &TScomplex16, &TScomplex32, &TSlonglong , &TSulonglong, //40 - &TSoffset - }; - - - /* optional datatypes (Fortran) MPI_INTEGER1 MPI_INTEGER2 MPI_INTEGER4 MPI_REAL2 MPI_REAL4 MPI_REAL8 - - /* optional datatypes (C) MPI_LONG_LONG_INT */ diff --git a/src/externals/mct/protex/protex b/src/externals/mct/protex/protex deleted file mode 100755 index 000708e3106..00000000000 --- a/src/externals/mct/protex/protex +++ /dev/null @@ -1,879 +0,0 @@ -#!/usr/bin/perl -#BOP -# -# !ROUTINE: ProTeX v. 2.00 - Translates DAO Prologues to LaTeX -# -# !INTERFACE: -# protex [-hbACFS] ] [+-nlsxf] [src_file(s)] -# -# !DESCRIPTION: -# Perl filter to produce a \LaTeX compatible document -# from a DAO Fortran source code with standard Pro\TeX -# prologues. If source files are not specified it -# reads from stdin; output is always to stdout. -# -# \noindent -# {\bf Command Line Switches:} \vspace{0.2cm} -# -# \begin{center} -# \begin{tabular}{|c|l|} \hline \hline -# -h & Help mode: list command line options \\ \hline -# -b & Bare mode, meaning no preamble, etc. \\ \hline -# +/-n & New Page for each subsection (wastes paper) \\ \hline -# +/-l & Listing mode, default is prologues only \\ \hline -# +/-s & Shut-up mode, i.e., ignore any code from BOC to EOC \\ \hline -# +/-x & No LaTeX mode, i.e., put !DESCRIPTION: in verbatim mode \\ \hline -# +/-f & No source file info \\ \hline -# -A & Ada code \\ \hline -# -C & C++ code \\ \hline -# -F & F90 code (default) \\ \hline -# -S & Shell script \\ \hline \hline -# \end{tabular} -# \end{center} -# -# The options can appear in any order. The options, -h and -b, affect -# the input from all files listed on command-line input. Each of the -# remaining options effects only the input from the files listed after -# the option and prior to any overriding option. The plus sign -# turns off the option. For example, the command-line input, -# \bv -# protex -bnS File1 -F File2.f +n File3.f -# \ev -# will cause the option, {\tt -n} to affect the input from the files, -# {\tt File} and {\tt File2.f}, but not from {\tt File3.f}. The -# {\tt -S} option is implemented for {\tt File1} but is overridden by -# the {\tt -F} for files {\tt File2.f} and {\tt File3.f}. -# -# -# !SEE ALSO: -# For a more detailed description of ProTeX functionality, -# DAO Prologue and other conventions, consult: -# -# Sawyer, W., and A. da Silva, 1997: ProTeX: A Sample -# Fortran 90 Source Code Documentation System. -# DAO Office Note 97-11 -# -# -# !REVISION HISTORY: -# -# 20Dec1995 da Silva First experimental version -# 10Nov1996 da Silva First internal release (v1.01) -# 28Jun1997 da Silva Modified so that !DESCRIPTION can appear after -# !INTERFACE, and !INPUT PARAMETERS etc. changed to italics. -# 02Jul1997 Sawyer Added shut-up mode -# 20Oct1997 Sawyer Added support for shell scripts -# 11Mar1998 Sawyer Added: file name, date in header, C, script support -# 05Aug1998 Sawyer Fixed LPChang-bug-support-for-files-with-underscores -# 10Oct1998 da Silva Introduced -f option for removing source file info -# from subsection, etc. Added help (WS). -# 06Dec1999 C. Redder Added LaTeX command "\label{sec:prologues}" just -# after the beginning of the proglogue section. -# 13Dec1999 C. Redder Increased flexbility in command-line -# interface. The options can appear in any -# order which will allow the user to implement -# options for select files. -# 01Feb1999 C. Redder Added \usepackage commands to preamble of latex -# document to include the packages amsmath, epsfig -# and hangcaption. -# 10May2000 C. Redder Revised LaTeX command "\label{sec:prologues}" -# to "\label{app:ProLogues}" -# 24May2001 da Silva Added !PARAMETERS/!REURN VALUE: keywords for CAM. -# -#EOP -#---------------------------------------------------------------------------- - -# Keep this if you don't know what it does... -# ------------------------------------------- - $[ = 1; # set array base to 1 - $, = ' '; # set output field separator - $\ = "\n"; # set output record separator - -# Set valid options lists -# ----------------------- - $GlobOptions = 'hb'; # Global options (i.e for all files) - $LangOptions = 'ACFS'; # Options for setting programming languages - $SwOptions = 'flnsx'; # Options that can change for each input - # file - $RegOptions = "$GlobOptions$LangOptions"; - # Scan for global options until first first - # file is processed. - -# Scan for global options -# ----------------------- - $NFiles = 0; -Arg: - foreach $arg (@ARGV) { - $option = &CheckOpts ( $arg, $RegOptions, $SwOptions ) + 1; - if ( $option ) { - $rc = &GetOpts ( $arg, $GlobOptions ); - next Arg; } - - else { $NFiles++; -}# end if -}# end foreach - -# If all inut arguments are options, then assume the -# filename, "-", for the standard input -# -------------------------------------------------- - if ( $NFiles == 0 ) { push (@ARGV, "-"); } - -# Implement help option -# --------------------- - if ( $opt_h ) { - &print_help(); - exit(); -}#end if - -# Optional Prologue Keywords -# -------------------------- - @keys = ( "!INTERFACE:", - "!USES:", - "!PUBLIC TYPES:", - "!PUBLIC MEMBER FUNCTIONS:", - "!PUBLIC DATA MEMBERS:", - "!DEFINED PARAMETERS:", - "!PARAMETERS:", - "!INPUT PARAMETERS:", - "!INPUT/OUTPUT PARAMETERS:", - "!OUTPUT PARAMETERS:", - "!RETURN VALUE:", - "!REVISION HISTORY:", - "!BUGS:", - "!SEE ALSO:", - "!SYSTEM ROUTINES:", - "!FILES USED:", - "!REMARKS:", - "!TO DO:", - "!CALLING SEQUENCE:", - "!AUTHOR:", - "!CALLED FROM:", - "!LOCAL VARIABLES:" ); - -# Initialize these for clarity -# ---------------------------- - $intro = 0; # doing introduction? - $prologue = 0; # doing prologue? - $first = 1; # first prologue? - $source = 0; # source code mode? - $verb = 0; # verbatim mode? - $tpage = 0; # title page? - $begdoc = 0; # has \begin{document} been written? - -# Initial LaTeX stuff -# ------------------- - &print_notice(); - &print_preamble(); # \documentclass, text dimensions, etc. - &print_macros(); # short-hand LaTeX macros - -# Main loop -- for each command-line argument -# ------------------------------------------- -ARG: - foreach $arg (@ARGV) { - -# Scan for non-global command-line options -# ---------------------------------------- - $option = &CheckOpts ( $arg, $RegOptions, $SwOptions, "quiet" ) + 1; - if ( $option ) { - &GetOpts ( $arg, $SwOptions ); - &SetOpt ( $arg, $LangOptions ); - next ARG; - -}# end if - -# Determine the type of code, set corresponding search strings -# ------------------------------------------------------------ -# if ( $opt_F ) { # FORTRAN - $comment_string = '!'; # ------- - $boi_string = '!BOI'; - $eoi_string = '!EOI'; - $bop_string = '!BOP'; - $eop_string = '!EOP'; - $boc_string = '!BOC'; - $eoc_string = '!EOC'; -#}# end if - - if ( $opt_A ) { # ADA - $comment_string = '--'; # --- - $boi_string = '--BOI'; - $eoi_string = '--EOI'; - $bop_string = '--BOP'; - $eop_string = '--EOP'; - $boc_string = '--BOC'; - $eoc_string = '--EOC'; -}# end if - - if ( $opt_C ) { - $comment_string = '//'; # C - $boi_string = '//BOI'; # - - $eoi_string = '//EOI'; - $bop_string = '//BOP'; - $eop_string = '//EOP'; - $boc_string = '//BOC'; - $eoc_string = '//EOC'; -}# end if - - if ( $opt_S ) { # Script - $comment_string = '#'; # ------ - $boi_string = '#BOI'; - $eoi_string = '#EOI'; - $bop_string = '#BOP'; - $eop_string = '#EOP'; - $boc_string = '#BOC'; - $eoc_string = '#EOC'; -}# end if - -# Set file name parameters -# ------------------------ - $InputFile = $arg; - @all_path_components = split( /\//, $InputFile ); - $FileBaseName = pop ( @all_path_components ); - $FileBaseName =~ s/_/\\_/g; - if ( $InputFile eq "-" ) {$FileBaseName = "Standard Input";} - -# Set date -# -------- - $Date = `date`; - -# Open current file -# ----------------- - open ( InputFile, "$InputFile" ) - or print STDERR "Unable to open $InputFile: $!"; - -# Print page header -# ----------------- - printf "\n\\markboth{Left}{Source File: %s, Date: %s}\n\n", - $FileBaseName, $Date; - -LINE: -# Inner loop --- for processing each line of the input file -# --------------------------------------------------------- - while ( ) { - chop; # strip record separator - @Fld = split(' ', $_, 9999); - -# Straight quote -# -------------- - if ($Fld[1] eq '!QUOTE:') { - for ($i = 2; $i <= $#Fld; $i++) { - printf '%s ', $Fld[$i]; -}# end for - print " "; - next LINE; -}# end if - -# Handle optional Title Page and Introduction -# ------------------------------------------- - if ($Fld[1] eq $boi_string) { - print ' '; - $intro = 1; - next LINE; -}# end if - - if ($Fld[2] eq '!TITLE:') { - if ( $intro ) { - shift @Fld; - shift @Fld; - @title = @Fld; - $tpage = 1; - next LINE; -}# end if -}# end if - - if ($Fld[2] eq '!AUTHORS:') { - if ( $intro ) { - shift @Fld; - shift @Fld; - @author = @Fld; - $tpage = 1; - next LINE; -}# end if -}# end if - - if ($Fld[2] eq '!AFFILIATION:') { - if ( $intro ) { - shift @Fld; - shift @Fld; - @affiliation = @Fld; - $tpage = 1; - next LINE; -}# end if -}# end if - - if ($Fld[2] eq '!DATE:') { - if ( $intro ) { - shift @Fld; - shift @Fld; - @date = @Fld; - $tpage = 1; - next LINE; -}# end if -}# end if - - if ($Fld[2] eq '!INTRODUCTION:') { - if ( $intro ) { - &do_beg(); - print ' '; - print '%..............................................'; - shift @Fld; - shift @Fld; - print "\\section{@Fld}"; - next LINE; -}# end if -}# end if - - -# End of introduction -# ------------------- - if ($Fld[1] eq $eoi_string) { - print ' '; - print '%/////////////////////////////////////////////////////////////'; - print "\\newpage"; - $intro = 0; - next LINE; -}# end if - -# Beginning of prologue -# --------------------- - if ($Fld[1] eq $bop_string) { - if ( $source ) { &do_eoc(); } - print ' '; - print '%/////////////////////////////////////////////////////////////'; - &do_beg(); - if ($first == 0) { - ### print "\\newpage"; - print " "; - print "\\mbox{}\\hrulefill\\ "; - print " ";} - else { - unless($opt_b){print "\\section{Routine/Function Prologues} \\label{app:ProLogues}";} -}# end if - - $first = 0; - $prologue = 1; - $verb = 0; - $source = 0; - &set_missing(); # no required keyword yet - next LINE; -}# end if - -# A new subroutine/function -# ------------------------- - if ($Fld[2] eq '!ROUTINE:' ) { - if ($prologue) { - shift @Fld; - shift @Fld; - $_ = join(' ', @Fld); - $name_is = $_; - s/_/\\_/g; # Replace "_" with "\_" - if ( $opt_n && $not_first ) { printf "\\newpage\n"; } - unless ($opt_f) {printf "\\subsection{%s (Source File: %s)}\n\n", $_, $FileBaseName;} - else {printf "\\subsection{%s }\n\n", $_;} - $have_name = 1; - $not_first = 1; - next LINE; -}# end if -}# end if - -# A new Module -# ------------ - if ($Fld[2] eq '!MODULE:' ) { - if ($prologue) { - shift @Fld; - shift @Fld; - $_ = join(' ', @Fld); - $name_is = $_; - s/_/\\_/g; # Replace "_" with "\_" - if ( $opt_n && $not_first ) { printf "\\newpage\n"; } - unless($opt_f) {printf "\\subsection{Module %s (Source File: %s)}\n\n", $_, $FileBaseName;} - else {printf "\\subsection{Module %s }\n\n", $_;} - $have_name = 1; - $have_intf = 1; # fake it, it does not need one. - $not_first = 1; - next LINE; -}# end if -}# end if - -# A new include file -# ------------------ - if ($Fld[2] eq '!INCLUDE:' ) { - if ($prologue) { - shift @Fld; - shift @Fld; - $_ = join(' ', @Fld); - $name_is = $_; - s/_/\\_/g; # Replace "_" with "\_" - if ( $opt_n && $not_first ) { printf "\\newpage\n"; } - unless($opt_f) {printf "\\subsection{Include File %s (Source File: %s)}\n\n", $_, $FileBaseName;} - else {printf "\\subsection{Include File %s }\n\n", $_;} - $have_name = 1; - $have_intf = 1; # fake it, it does not need one. - $not_first = 1; - next LINE; -}# end if -}# end if - -# A new INTERNAL subroutine/function -# ---------------------------------- - if ($Fld[2] eq '!IROUTINE:') { # Internal routine - if ($prologue) { - shift @Fld; - shift @Fld; - $_ = join(' ', @Fld); - $name_is = $_; - s/_/\\_/g; # Replace "_" with "\_" - printf "\\subsubsection{%s}\n\n", $_; - $have_name = 1; - next LINE; -}# end if -}# end if - -# Description: what follows will be regular LaTeX (no verbatim) -# ------------------------------------------------------------- - if (/!DESCRIPTION:/) { - if ($prologue) { - if ($verb) { - printf "\\end{verbatim}"; - printf "\n{\\sf DESCRIPTION:\\\\ }\n\n"; - $verb = 0; } - else { # probably never occurs -}# end if - if ($opt_x) { - printf "\\begin{verbatim} "; - $verb = 1; - $first_verb = 1; } - else { - for ($i = 3; $i <= $#Fld; $i++) { - printf '%s ', $Fld[$i]; -}# end for -}# end if - ### print " "; - $have_desc = 1; - next LINE; -}# end if -}# end if - -# Handle optional keywords (these will appear as verbatim) -# -------------------------------------------------------- - if ($prologue) { -KEY: foreach $key ( @keys ) { - if ( /$key/ ) { - if ($verb) { - printf "\\end{verbatim}"; - $verb = 0; } - else { - printf "\n\\bigskip"; -}# end if - $k = sprintf('%s', $key); - $ln = length($k); - ###printf "\\subsubsection*{%s}\n", substr($k, 2, $ln - 1); - ###printf "{\\Large \\em %s}\n", ucfirst lc substr($k, 2, $ln - 1); - $_ = $key; - if( /USES/ || /INPUT/ || /OUTPUT/ || /PARAMETERS/ || /VALUE/ ) { - printf "{\\em %s}\n", substr($k, 2, $ln - 1); } # italics - else { - printf "{\\sf %s}\n", substr($k, 2, $ln - 1); # san serif -}# end if - - printf "\\begin{verbatim} "; - $verb = 1; - $first_verb = 1; - if ( $key eq "!INTERFACE:" ) { $have_intf = 1; } - if ( $key eq "!CALLING SEQUENCE:" ) { $have_intf = 1; } - if ( $key eq "!REVISION HISTORY:" ) { $have_hist = 1; } - next LINE; -}# end if -}# end foreach -}# end if - -# End of prologue -# --------------- - if ($Fld[1] eq $eop_string) { - if ($verb) { - print "\\end{verbatim}"; - $verb = 0; -}# end if - $prologue = 0; - &check_if_all_there(); # check if all required keyword are there. - if ( $opt_l ) { - $Fld[1] = $boc_string;} - else { next LINE; } -}# end if - - unless ( $opt_s ) { -# -# Beginning of source code section -# -------------------------------- - if ($Fld[1] eq $boc_string) { - print ' '; - print '%/////////////////////////////////////////////////////////////'; - $first = 0; - $prologue = 0; - $source = 1; - ### printf "\\subsubsection*{CONTENTS:}\n\n", $Fld[3]; - printf "{\\sf CONTENTS:}"; - printf "\n \\begin{verbatim}\n"; - $verb = 1; - next LINE; -}# end if - -# End of source code -# ------------------ - if ($Fld[1] eq $eoc_string) { - &do_eoc(); - $prologue = 0; - next LINE; -}# end if -}# end unless - -# Prologue or Introduction, print regular line (except for !) -# ----------------------------------------------------------- - if ($prologue||$intro) { - if ( $verb && $#Fld == 1 && ( $Fld[1] eq $comment_string ) ) { - next LINE; # to eliminate excessive blanks -}# end if - if ( $Fld[2] eq "\\ev" ) { # special handling - $_ = $comment_string . " \\end{verbatim}"; -}# end if - s/^$comment_string/ /; # replace comment string with blank -# $line = sprintf('%s', $_); # not necessary -- comment str is absent -# $ln = length($line); # not necessary -- comment str is absent - unless ( $first_verb ) { printf "\n "; } - printf '%s', $_; -# printf '%s', substr($line, 1, $ln - 1); # comment str is absent - $first_verb = 0; - next LINE; -}# end if - -# Source code: print the full line -# -------------------------------- - if ($source) { - print $_; - next LINE; -}# end if - -}# end inner loop for processing each line of the input file - # --------------------------------------------------------- - -}# end main loop for each command-line argument - # -------------------------------------------- - print $_; - if ( $source ) { &do_eoc(); } - print '%...............................................................'; - - unless ( $opt_b ) { - print "\\end{document}"; -}#end unless - - -#---------------------------------------------------------------------- - - sub CheckOpts -# Checks options against a given list. Outputs error message -# for any invalid option. -# -# Usage: -# $rc = &CheckOpts ( options, valid_reg_options, -# valid_sw_options, -# quiet_mode ) -# -# character: options - options to be checked. (e.g. -df+x) The -# list must begin with a positive or -# negative sign. If no sign appears at the -# beginning or by itself, then the argument -# is not recognized as a list of options. -# character: valid_reg_options - list of valid regular options. -# (i.e. options that are associated only -# eith negative sign.) -# character: valid_sw_options - list of valid switch options. -# (i.e. options that can be associated with -# either a positive or negative sign. -# logical: quiet mode (optional) If true then print no error -# messages. -# integer: rc - return code -# = -1 if the arguement, options, is -# not recognized as a list of options -# = 0 if all options are valid. -# > 0 for the number of invalid options. -# -{ local($options, - $valid_reg_options, - $valid_sw_options, - $quiet_mode ) = @_; - - if ( $options eq "+" || - $options eq "-" ) {return -1} - - local(@Options) = split( / */, $options ); - if ( $Options[ $[ ] ne "-" && - $Options[ $[ ] ne "+" ) {return -1;} - - local($option, $option_sign, $valid_list, $pos); - local($errs) = 0; - foreach $option ( @Options ) { - if ( $option eq "-" || - $option eq "+" ) {$option_sign = $option;} - else { - if ( $option_sign eq "-" ) - { $valid_list = $valid_reg_options - . $valid_sw_options; } - else - { $valid_list = $valid_sw_options; } - $pos = index ($valid_list,$option); - if ( $pos < $[ && - $quiet_mode ) { - $errs++; - print STDERR "Invalid option: $option_sign$option \n"; - -}# end if -}# end if -}# end foreach - return $errs; - -}#end sub GetOpts - - sub GetOpts -# Gets options. If an option is valid, then opt_[option] is -# set to 0 or 1 as a side effect if the option is preceeded by -# a positive or negative sign. -# -# Usage: -# $rc = &GetOpts ( options, valid_options ) -# -# character: options - options to be checked. (e.g. -df+x) The -# list must begin with a positive or -# negative sign. If no sign appears at the -# beginning or by itself, then the argument -# is not recognized as a list of options. -# character: valid_options - list of valid options (e.g. dfhx) -# integer: rc - return code -# = -1 if the arguement, options, is -# not recognized as a list of options. -# = 0 otherwise -# -{ local($options,$valid_options) = @_; - - if ( $options eq "+" || - $options eq "-" ) {return -1} - - local(@Options) = split( / */, $options ); - if ( $Options[ $[ ] ne "-" && - $Options[ $[ ] ne "+" ) {return -1;} - - local($option, $option_sign); - - foreach $option ( @Options ) { - - if ( $option eq "-" || - $option eq "+" ) { - $option_sign = $option; } - - else { - - if ( index ($valid_options,$option) >= $[ ) { - if ( $option_sign eq "-" ) {${"opt_$option"} = 1;} - if ( $option_sign eq "+" ) {${"opt_$option"} = 0;}; - -}# end if -}# end if -}# end foreach - - return 0; -}#end sub GetOpts - - sub SetOpt -# Sets option flags. For the last input option that is in a -# list, the flag opt_[option] is set to 1 as a side effect. -# For all other options in the list, opt_[option] is set to 0. -# -# Usage: -# $rc = &SetOpt ( options, valid_options ) -# -# character: options - options to be checked. (e.g. -df+x) The -# list must begin with a positive or -# negative sign. If no sign appears at the -# beginning or by itself, then the argument -# is not recognized as a list of options. -# character: valid_options - list of valid options (e.g. def ) -# integer: rc - return code -# = -1 if the arguement, options, is -# not recognized as a list of options. -# = 0 otherwise -# Note: For the examples provided for the input arguments, -# $opt_d = 0, $opt_e = 0, and $opt_f = 1, since the -# input option, -f, was the last in the argument, -# option. -# -{ local($options,$valid_options) = @_; - - if ( $options eq "+" || - $options eq "-" ) {return -1} - - local(@Options) = split( / */, $options ); - local(@ValidOptions) = split( / */, $valid_options ); - if ( $Options[ $[ ] ne "-" && - $Options[ $[ ] ne "+" ) {return -1;} - - local($option, $option_sign); - - foreach $option ( @Options ) { - if ( $option ne "-" && - $option ne "+" ) { - - if ( index ($valid_options,$option) >= $[ ) { - foreach $valid_option (@ValidOptions ) { - ${"opt_$valid_option"} = 0; - -}# end foreach - ${"opt_$option"} = 1; -}# end if -}# end if -}# end foreach - - return 0; -}#end sub SetOpt - -sub print_help { - - print "Usage: protex [-hbACFS] [+-nlsxf] [src_file(s)]"; - print " "; - print " Options:"; - print " -h Help mode: list command line options"; - print " -b Bare mode, meaning no preamble, etc."; - print " +-n New Page for each subsection (wastes paper)"; - print " +-l Listing mode, default is prologues only"; - print " +-s Shut-up mode, i.e., ignore any code from BOC to EOC"; - print " +-x No LaTeX mode, i.e., put !DESCRIPTION: in verbatim mode"; - print " +-f No source file info"; - print " -A Ada code"; - print " -C C++ code"; - print " -F F90 code"; - print " -S Shell script"; - print " "; - print " The options can appear in any order. The options, -h and -b,"; - print " affect the input from all files listed on command-line input."; - print " Each of the remaining options effects only the input from the"; - print " files listed after the option and prior to any overriding"; - print " option. The plus sign turns off the option."; -}# end sub print_help - -sub print_notice { - - print "% **** IMPORTANT NOTICE *****" ; - print "% This LaTeX file has been automatically produced by ProTeX v. 1.1"; - print "% Any changes made to this file will likely be lost next time"; - print "% this file is regenerated from its source. Send questions "; - print "% to Arlindo da Silva, dasilva\@gsfc.nasa.gov"; - print " "; - -}# sub print_notice - -sub print_preamble { - - unless ( $opt_b ) { - print "%------------------------ PREAMBLE --------------------------"; - print "\\documentclass[11pt]{article}"; - print "\\usepackage{amsmath}"; - print "\\usepackage{epsfig}"; - print "\\usepackage{hangcaption}"; - print "\\textheight 9in"; - print "\\topmargin 0pt"; - print "\\headsep 1cm"; - print "\\headheight 0pt"; - print "\\textwidth 6in"; - print "\\oddsidemargin 0in"; - print "\\evensidemargin 0in"; - print "\\marginparpush 0pt"; - print "\\pagestyle{myheadings}"; - print "\\markboth{}{}"; - print "%-------------------------------------------------------------"; -}#end unless - - print "\\parskip 0pt"; - print "\\parindent 0pt"; - print "\\baselineskip 11pt"; - -}# end sub print_preamble - -sub print_macros { - - print " "; - print "%--------------------- SHORT-HAND MACROS ----------------------"; - print "\\def\\bv{\\begin{verbatim}}"; - print "\\def\\ev\{\\end\{verbatim}}"; - print "\\def\\be{\\begin{equation}}"; - print "\\def\\ee{\\end{equation}}"; - print "\\def\\bea{\\begin{eqnarray}}"; - print "\\def\\eea{\\end{eqnarray}}"; - print "\\def\\bi{\\begin{itemize}}"; - print "\\def\\ei{\\end{itemize}}"; - print "\\def\\bn{\\begin{enumerate}}"; - print "\\def\\en{\\end{enumerate}}"; - print "\\def\\bd{\\begin{description}}"; - print "\\def\\ed{\\end{description}}"; - print "\\def\\({\\left (}"; - print "\\def\\){\\right )}"; - print "\\def\\[{\\left [}"; - print "\\def\\]{\\right ]}"; - print "\\def\\<{\\left \\langle}"; - print "\\def\\>{\\right \\rangle}"; - print "\\def\\cI{{\\cal I}}"; - print "\\def\\diag{\\mathop{\\rm diag}}"; - print "\\def\\tr{\\mathop{\\rm tr}}"; - print "%-------------------------------------------------------------"; - -}# end sub print_macros - -sub do_beg { - unless ( $opt_b ) { - if ( $begdoc == 0 ) { - if ( $tpage ) { - print "\\title{@title}"; - print "\\author{{\\sc @author}\\\\ {\\em @affiliation}}"; - print "\\date{@date}"; - } - print "\\begin{document}"; - if ( $tpage ) { - print "\\maketitle"; - } - print "\\tableofcontents"; - print "\\newpage"; - $begdoc = 1; - } - } -}# end sub do_beg - -sub do_eoc { - print ' '; - if ($verb) { - print "\\end{verbatim}"; - $verb = 0; - } - $source = 0; -}# end sub do_eoc - -sub set_missing { - - $have_name = 0; # have routine name? - $have_desc = 0; # have description? - $have_intf = 0; # have interface? - $have_hist = 0; # have revision history? - $name_is = "UNKNOWN"; - -}# end sub set_missing - - -sub check_if_all_there { - -$have_name || -die "ProTeX: invalid prologue, missing !ROUTINE: or !IROUTINE: in <$name_is>"; - -$have_desc || -die "ProTeX: invalid prologue, missing !DESCRIPTION: in <$name_is>"; - -$have_intf || -die "ProTeX: invalid prologue, missing !INTERFACE: in <$name_is>"; - -$have_hist || -die "ProTeX: invalid prologue, missing !REVISION HISTORY: in <$name_is>"; - -}# end sub check_if_all_there diff --git a/src/externals/mct/testsystem/Makefile b/src/externals/mct/testsystem/Makefile deleted file mode 100644 index b3614ef25ff..00000000000 --- a/src/externals/mct/testsystem/Makefile +++ /dev/null @@ -1,20 +0,0 @@ - -SHELL = /bin/sh - -SUBDIRS = testall - -# TARGETS -subdirs: - @for dir in $(SUBDIRS); do \ - cd $$dir; \ - $(MAKE); \ - cd ..; \ - done - -clean: - @for dir in $(SUBDIRS); do \ - cd $$dir; \ - $(MAKE) clean; \ - cd ..; \ - done - diff --git a/src/externals/mct/testsystem/testall/.gitignore b/src/externals/mct/testsystem/testall/.gitignore deleted file mode 100644 index d675e0fa576..00000000000 --- a/src/externals/mct/testsystem/testall/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -testall -*.clog -fort.* -*.log -*testall.* -*.script diff --git a/src/externals/mct/testsystem/testall/Makefile b/src/externals/mct/testsystem/testall/Makefile deleted file mode 100644 index 3c99e0d9cc0..00000000000 --- a/src/externals/mct/testsystem/testall/Makefile +++ /dev/null @@ -1,60 +0,0 @@ - -SHELL = /bin/sh - -# SOURCE FILES - -MODULE = testall - -SRCS_F90 = mph.F90 m_AVTEST.F90 m_ACTEST.F90 \ - m_GGRIDTEST.F90 m_GMAPTEST.F90 \ - m_GSMAPTEST.F90 m_MCTWORLDTEST.F90 \ - m_ROUTERTEST.F90 m_SMATTEST.F90 \ - master.F90 convertgauss.F90 convertPOPT.F90 \ - cpl.F90 ccm.F90 pop.F90 \ - ReadSparseMatrixAsc.F90 - - -OBJS_ALL = $(SRCS_F90:.F90=.o) - -# MACHINE AND COMPILER FLAGS - -include ../../Makefile.conf - -# TARGETS - -all: testall - -testall: $(OBJS_ALL) - $(FC) -o $@ $(OBJS_ALL) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -# ADDITIONAL FLAGS SPECIFIC FOR UTMCT COMPILATION - -MCTLIBS = -L$(MPEUPATH) -L$(MCTPATH) -lmct -lmpeu -UTLDFLAGS = $(REAL8) -UTCMPFLAGS = $(PROGFCFLAGS) $(REAL8) $(INCFLAG)$(MPEUPATH) $(INCFLAG)$(MCTPATH) - -# RULES - -.SUFFIXES: -.SUFFIXES: .F90 .o - -.F90.o: - $(FC) -c $(INCPATH) $(FPPDEFS) $(FCFLAGS) $(MCTFLAGS) $(UTCMPFLAGS) $< - -clean: - ${RM} *.o *.mod testall - -# DEPENDENCIES: - -$(OBJS_ALL): $(MCTPATH)/libmct.a - - - - - - - - - - - diff --git a/src/externals/mct/testsystem/testall/ReadSparseMatrixAsc.F90 b/src/externals/mct/testsystem/testall/ReadSparseMatrixAsc.F90 deleted file mode 100644 index a0ce00128b8..00000000000 --- a/src/externals/mct/testsystem/testall/ReadSparseMatrixAsc.F90 +++ /dev/null @@ -1,244 +0,0 @@ -!------------------------------------------------------------------------- -! Math + Computer Science Division / Argonne National Laboratory ! -!------------------------------------------------------------------------- -! CVS $Id: ReadSparseMatrixAsc.F90,v 1.4 2004-06-15 19:16:08 eong Exp $ -! CVS $Name: $ -!----------------------------------------------------------------------- -!BOP -! -! !ROUTINE: ReadSparseMatrixAsc - Read in a SparseMatrix -! -! !INTERFACE: - subroutine ReadSparseMatrixAsc(sMat, fileID, src_dims, dst_dims) -! -! !USES: - - use m_inpak90, only : I90_LoadF - use m_inpak90, only : I90_Label - use m_inpak90, only : I90_Gstr - use m_inpak90, only : I90_Release - use m_ioutil, only : luavail - use m_stdio, only : stdout,stderr - use m_die, only : die - - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_clean => clean - - use m_AttrVect, only : Attrvect_zero => zero - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_Init => init - use m_SparseMatrix, only : SparseMatrix_Clean => clean - use m_SparseMatrix, only : SparseMatrix_indexIA => indexIA - use m_SparseMatrix, only : SparseMatrix_indexRA => indexRA - use m_SparseMatrix, only : SparseMatrix_lsize => lsize - use m_SparseMatrix, only : SparseMatrix_SortPermute => SortPermute - use m_SparseMatrix, only : SMatrix_importGlobalRowInd => & - importGlobalRowIndices - use m_SparseMatrix, only : SMatrix_importGlobalColumnInd => & - importGlobalColumnIndices - use m_SparseMatrix, only : SMatrix_importMatrixElements => & - importMatrixElements - - implicit none -! -! !DESCRIPTION: This is the reader/tester driver for the Model -! Coupling Toolkit (mct) {\tt SparseMatrix} datatype. -! -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: fileID - -! !OUTPUT PARAMETERS: - - type(SparseMatrix), intent(out) :: sMat - integer, dimension(2), intent(out) :: src_dims - integer, dimension(2), intent(out) :: dst_dims - -! -! -! !BUGS: -! -! !SYSTEM ROUTINES: -! -! !FILES USED: -! -! !REVISION HISTORY: -! -!EOP -!------------------------------------------------------------------------- -! - character(len=*), parameter :: myname = 'ReadSparseMatrixAsc' - - integer :: n,ierr - - integer :: mdev - character*1024 :: filename, data_dir - - integer :: num_elements, nRows, nColumns - integer, dimension(:), pointer :: rows, columns - real, dimension(:), pointer :: weights - -! VARIABLES FOR TESTING ! - -! SparseMatrix attribute indices: - integer :: igrow, igcol, iwgt -! SparseMatrix sorting key list: - type(List) :: sort_keys -! Descending order flag array for SparseMatrix Sort test 2a. - logical :: descending(2) - -!------------------------------------------------ -! Use mpeu resource file utilities to read in the name of the -! file with the weights -! - call I90_LoadF("ut_SparseMatrix.rc", ierr) - - write(stdout,*) myname, ":: loaded ut_SparseMatrix.rc" - - call I90_Label("Data_Directory:", ierr) - call I90_Gstr(data_dir, ierr) - - call I90_Label(trim(fileID), ierr) - call I90_Gstr(filename, ierr) - - filename = trim(data_dir) // "/" // trim(filename) - - write(stdout,*) myname,":: remapfile path = ", trim(filename) - - call I90_Release(ierr) - - write(stdout,*) myname, ":: unloaded ut_SparseMatrix.rc" - - -! First Activity: Input of matrix elements from a file. -!------------------------------------------------ -! Go and actually read the weights. - - ! Find an empty f90 i/o device number - - mdev = luavail() - - ! Open the matrix file - - open(mdev, file=trim(filename), status='old') - - ! LINE 1: - ! Read in the number of matrix elements, and allocate - ! input buffer space: - - read(mdev,*) num_elements - - allocate(rows(num_elements), columns(num_elements), & - weights(num_elements), stat=ierr) - if(ierr /= 0) call die(myname,"allocate(row,col... failed",ierr) - - ! LINE 2: - ! Read in the source grid dimensions - - read(mdev,*) src_dims(1), src_dims(2) - - ! LINE 3: - ! Read in the destination grid dimensions - - read(mdev,*) dst_dims(1), dst_dims(2) - - - ! Read in the row, column, and weight data: - - write(stdout,'(2a)')myname,":: Reading elements from file" - do n=1, num_elements - read(mdev,*) rows(n), columns(n), weights(n) - end do - write(stdout,'(2a)')myname,":: Done reading from file" - - ! Initialize sMat: - nRows = dst_dims(1) * dst_dims(2) - nColumns = src_dims(1) * src_dims(2) - call SparseMatrix_init(sMat, nRows, nColumns, num_elements) - call AttrVect_zero(sMat%data) - - ! ...and store them. - - call SMatrix_importGlobalRowInd(sMat, rows, size(rows)) - call SMatrix_importGlobalColumnInd(sMat, columns, size(columns)) - call SMatrix_importMatrixElements(sMat, weights, size(weights)) - - deallocate(rows, columns, weights, stat=ierr) - if(ierr/=0) call die(myname,':: deallocate(rows... failed',ierr) - -!------------------------------------------------ - - - -!------------------------------------------------ -! Test features of the SparseMatrix module -! -! Was everything read without incident? -! You can answer this question by comparing the sample -! values printed below with the results of a head and tail -! on the ascii matrix file. - - igrow = SparseMatrix_indexIA(sMat, 'grow') - igcol = SparseMatrix_indexIA(sMat, 'gcol') - iwgt = SparseMatrix_indexRA(sMat, 'weight') - - num_elements = SparseMatrix_lsize(sMat) - - write(stdout,*) myname, ":: Number of sMat elements= ",num_elements - - write(stdout,*) myname, ":: sMat%data%iAttr(igrow,1) = ",sMat%data%iAttr(igrow,1) - write(stdout,*) myname, ":: sMat%data%iAttr(igcol,1) = ",sMat%data%iAttr(igcol,1) - write(stdout,*) myname, ":: sMat%data%rAttr(iwgt,1) = ",sMat%data%rAttr(iwgt,1) - - - write(stdout,*) myname, ":: sMat%data%iAttr(igrow,num_elements) = ", & - sMat%data%iAttr(igrow,num_elements) - write(stdout,*) myname, ":: sMat%data%iAttr(igcol,num_elements) = ", & - sMat%data%iAttr(igcol,num_elements) - write(stdout,*) myname, ":: sMat%data%rAttr(iwgt,num_elements) = ", & - sMat%data%rAttr(iwgt,num_elements) - -! Second Activity: Sorting - - call List_init(sort_keys,"grow:gcol") - - call SparseMatrix_SortPermute(sMat, sort_keys, descending) - -! Second Test Part a): Did it work? - - write(stdout,*) myname, ":: Index sorting test results--descending:" - - write(stdout,*) myname, ":: sMat%data%iAttr(igrow,1) = ",sMat%data%iAttr(igrow,1) - write(stdout,*) myname, ":: sMat%data%iAttr(igcol,1) = ",sMat%data%iAttr(igcol,1) - - write(stdout,*) myname, ":: sMat%data%iAttr(igrow,num_elements) = ", & - sMat%data%iAttr(igrow,num_elements) - write(stdout,*) myname, ":: sMat%data%iAttr(igcol,num_elements) = ", & - sMat%data%iAttr(igcol,num_elements) - - write(stdout,*) myname, ":: End index sorting test results part a." - - - call SparseMatrix_SortPermute(sMat,sort_keys) - -! Second Test Partb: Did it work? - - write(stdout,*) myname, ":: Index sorting test results:--ascending" - - write(stdout,*) myname, ":: sMat%data%iAttr(igrow,1) = ",sMat%data%iAttr(igrow,1) - write(stdout,*) myname, ":: sMat%data%iAttr(igcol,1) = ",sMat%data%iAttr(igcol,1) - - write(stdout,*) myname, ":: sMat%data%iAttr(igrow,num_elements) = ", & - sMat%data%iAttr(igrow,num_elements) - write(stdout,*) myname, ":: sMat%data%iAttr(igcol,num_elements) = ", & - sMat%data%iAttr(igcol,num_elements) - - write(stdout,*) myname, ":: End index sorting test results." - - call List_clean(sort_keys) - -! done testing -!------------------------------------------------ - - end subroutine ReadSparseMatrixAsc diff --git a/src/externals/mct/testsystem/testall/UNTESTED b/src/externals/mct/testsystem/testall/UNTESTED deleted file mode 100644 index 0840bdbc40e..00000000000 --- a/src/externals/mct/testsystem/testall/UNTESTED +++ /dev/null @@ -1,13 +0,0 @@ -The following routines are untested: - -m_GlobalToLocal ---> GlobalSegMapToNavigator - -m_Merge - -m_Navigator - -m_NBSend - -m_SparseMatrixComms ---> GM_gather diff --git a/src/externals/mct/testsystem/testall/ccm.F90 b/src/externals/mct/testsystem/testall/ccm.F90 deleted file mode 100644 index 919de17bf53..00000000000 --- a/src/externals/mct/testsystem/testall/ccm.F90 +++ /dev/null @@ -1,835 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: ccm.F90,v 1.13 2004-06-02 22:22:51 eong Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: ccm3 -- dummy atmosphere model for unit tester -! -! !DESCRIPTION: -! An atmosphere model subroutine to test functionality of MPH and MCT. -! -! !INTERFACE: - subroutine ccm3 (CCM_World) -! -! !USES: -! - use MPH_all -!---Field Storage DataType and associated methods -#ifndef SYSOSF1 - use m_AttrVect,only : AttrVect_exportIListToChar => exportIListToChar - use m_AttrVect,only : AttrVect_exportRListToChar => exportRListToChar -#endif - use m_AttrVect,only : MCT_AtrVt_init => init - use m_AttrVect,only : MCT_AtrVt_clean => clean - use m_AttrVect,only : MCT_AtrVt_lsize => lsize - use m_AttrVect,only : MCT_AtrVt_nReal => nRAttr - use m_AttrVect,only : MCT_AtrVt_nInteger => nIAttr - use m_AttrVect,only : AttrVect_zero => zero - use m_AttrVect,only : AttrVect_Copy => Copy - use m_AttrVect,only : AttrVect -!---Coordinate Grid DataType and associated methods - use m_GeneralGrid,only : GeneralGrid - use m_GeneralGrid,only : MCT_GGrid_init => init - use m_GeneralGrid,only : MCT_GGrid_cart => initCartesian - use m_GeneralGrid,only : MCT_GGrid_clean => clean - use m_GeneralGrid,only : MCT_GGrid_dims => dims - use m_GeneralGrid,only : MCT_GGrid_lsize => lsize - use m_GeneralGrid,only : MCT_GGrid_indexIA => indexIA - use m_GeneralGrid,only : MCT_GGrid_indexRA => indexRA - use m_GeneralGrid,only : MCT_GGrid_exportIAttr => exportIAttr - use m_GeneralGrid,only : MCT_GGrid_importIAttr => importIAttr - use m_GeneralGrid,only : MCT_GGrid_exportRAttr => exportRAttr - use m_GeneralGrid,only : MCT_GGrid_importRAttr => importRAttr - use m_GeneralGrid,only : MCT_GGrid_SortPermute => sortpermute - use m_GeneralGridComms,only: MCT_GGrid_send => send - use m_GeneralGridComms,only: MCT_GGrid_scatter => scatter -!---MCT Spatial Integral services... - use m_SpatialIntegral,only : MCT_SpatialIntegral => SpatialIntegral - use m_SpatialIntegral,only : MCT_SpatialAverage => SpatialAverage - use m_SpatialIntegral,only : MCT_MaskedSpatialIntegral => & - MaskedSpatialIntegral - use m_SpatialIntegral,only : MCT_MaskedSpatialAverage => & - MaskedSpatialAverage -!---Domain Decomposition Descriptor DataType and associated methods - use m_GlobalSegMap,only: MCT_GSMap_init => init - use m_GlobalSegMap,only: MCT_GSMap_clean => clean - use m_GlobalSegMap,only: MCT_GSMap_gsize => gsize - use m_GlobalSegMap,only: MCT_GSMap_lsize => lsize - use m_GlobalSegMap,only: MCT_GSMap_ngseg => ngseg - use m_GlobalSegMap,only: MCT_GSMap_nlseg => nlseg - use m_GlobalSegMap,only: GlobalSegMap -!---Global-to-Local indexing services - use m_GlobalToLocal,only: MCT_GStoL => GlobalToLocalIndices - use m_GlobalToLocal,only: MCT_GStoLI => GlobalToLocalIndex -!---Component Model Registry - use m_MCTWorld,only: ThisMCTWorld - use m_MCTWorld,only: MCTComponentRootRank => ComponentRootRank - use m_MCTWorld,only: MCTWorld_init => init - use m_MCTWorld,only: MCTWorld_clean => clean -!---Intercomponent communications scheduler - use m_Router,only: Router - use m_Router,only: MCT_Router_init => init - use m_Router,only: MCT_Router_clean => clean - use m_Transfer,only: MCT_Send => send -!---mpeu List datatype - use m_List, only : List - use m_List, only : List_clean => clean - use m_List, only : List_copy => copy - use m_List, only : List_exportToChar => exportToChar -!---mpeu routines for MPI communications - use m_mpif90 -!---mpeu timers - use m_zeit -!---mpeu error handling - use m_die -!---mpeu stderr/stdout handling - use m_stdio -!---Tester Modules - use m_ACTEST, only : Accumulator_test => testall - use m_ACTEST, only : Accumulator_identical => identical - use m_AVTEST, only : AttrVect_test => testall - use m_AVTEST, only : AttrVect_identical => Identical - use m_GGRIDTEST, only : GGrid_test => testall - use m_GGRIDTEST, only : GGrid_identical => Identical - use m_GMAPTEST, only : GMap_test => testall - use m_GSMAPTEST, only : GSMap_test => testall - use m_MCTWORLDTEST, only : MCTWorld_test => testall - use m_ROUTERTEST, only : Router_test => testall - use m_SMATTEST, only : sMat_test => testall - use m_SMATTEST, only : sMat_identical => Identical - - implicit none - -! !INPUT PARAMETERS: - - integer,intent(in) :: CCM_World ! communicator for ccm - -! -! !REVISION HISTORY: -! Oct00 - Yun (Helen) He and Chris Ding, NERSC/LBNL - initial MPH-only version -! 19Nov00 - R. Jacob -- interface with mct -! 06Feb01 - J. Larson - slight mod to -! accomodate new interface to MCT_GSMap_lsize(). -! 08Feb01 - R. Jacob -- use MCT_Send -! 23Feb01 - R. Jacob -- expand size of AtrVect -! and add a check for transfer. -! 08Jun01 - R. Jacob initialize a General Grid -! 11Jun01 - Jacob/Larson Send a General Grid to cpl -! 15Feb02 - R.Jacob -- new MCTWorld_init interface. -! 13Jun02 - J. Larson - More GeneralGrid usage, -! including import/export of attributes, and sorting by -! coordinate. Also added mpeu error handling and stdout/stderr. -! 18Jun02 - J. Larson - Introduction of Spatial -! Integral/Average services. -! 18Jul02 - E. Ong - Use a gaussian atmosphere grid -!EOP ___________________________________________________________________ - character(len=*), parameter :: ccmname='ccm3' - -!----------------------- MPH vars - integer :: myProc, myProc_global, root - integer :: Global_World - integer :: coupler_id - integer :: mySize, ncomps, mycompid - -!----------------------- MCT and dummy model vars - integer :: i,j,n,k,ier - -! SparseMatrix dimensions and Processor Layout - integer :: Nax, Nay ! Atmosphere lons, lats - integer :: Nox, Noy ! Ocean lons, lats - integer :: NPROCS_LATA, NPROCS_LONA ! Processor layout - -! Number of steps to send to coupler - - integer :: steps - integer, parameter :: nsteps = 10 - -! Arrays used to initialize the MCT GlobalSegMap - integer,dimension(:),pointer :: starts - integer,dimension(:),pointer :: lengths - integer,dimension(:,:),pointer :: myglobalmap -! integer,dimension(:),pointer :: lstart,llength - -! Arrays used to test MCT import/export routines - integer, dimension(:), pointer :: dummyI - real, dimension(:), pointer :: dummyR - integer :: latindx,lonindx,gridindx,status - integer :: length - -! Index to AtmGrid area element dA - integer :: dAindx - -! Set the value of pi - real, parameter :: pi = 3.14159265359 - -! Atmosphere GSMap - type(GlobalSegMap) :: GSMap -! Router from Atm to Cpl - type(Router) :: Atm2Cpl -! AttrVect for atm data - type(AttrVect) :: a2coupler -! AttrVect for atm data used to test spatial integration services - type(AttrVect) :: a2coupler2, integratedA2CaV -! The atmosphere's grid - type(GeneralGrid) :: AtmGrid, dAtmGrid - -! Test Grids and test dummy vars - type(GeneralGrid) :: AtmGridExactCopy, dAtmGridExactCopy - type(GeneralGrid) :: AtmCartGrid - type(List) :: cartlist,cartindex,cartother,cartweight - integer,dimension(:),pointer :: cartdims - real,dimension(:),pointer :: dummyatmlats, dummyatmlons - real,dimension(:),pointer :: dummycartlats, dummycartlons - real,dimension(:,:),pointer :: cartaxis - real,dimension(:),allocatable :: gauss_wgt, gauss_lat - logical,dimension(:),pointer :: cartdescend - integer :: axlength,aylength,cxlength,cylength - real :: dlon - -! Spatial Integral Temporary Variables - -#ifdef MPE -#include "mpe.h" -#endif - -!------------------------------------------------------- - - call MPI_COMM_DUP (MPI_COMM_WORLD, Global_World, ierr) - call MPI_COMM_RANK (MPI_COMM_WORLD, myProc_global, ierr) - call MPI_COMM_RANK (CCM_World, myProc, ierr) - if (myProc==0) call MPH_redirect_output ('ccm') -! write(*,*) myProc, ' in ccm === ', myProc_global, ' in global' -! write(*,*) 'MPH_local_proc_id()=', MPH_local_proc_id_ME_SE() -! write(*,*) 'MPH_global_proc_id()=', MPH_global_proc_id() -! write(*,*) 'MPH_component_id()=', MPH_component_id_ME_SE() - -! if profiling with the MPE lib -#ifdef MPE - call mpe_logging_init(myProc_global,init_s,init_e,gsmi_s,gsmi_e,& - atri_s,atri_e,routi_s,routi_e,send_s,send_e,recv_s,recv_e,& - clean_s,clean_e) -#endif - -! Get the coupler's component id - coupler_id = MPH_get_component_id("coupler") - -!------------------------------------------------------- -! Begin using MCT - -!!!!!!!!!!!!!!!!!----------MCTWorld -! initialize the MCTWorld - ncomps=MPH_total_components() - mycompid=MPH_component_id_ME_SE() - -! all components must call this -! if(myProc==0)write(stdout,*)"Initializing MCTWorld" - - call zeit_ci('Aworldinit') - call MCTWorld_init(ncomps,MPI_COMM_WORLD,CCM_World,mycompid) - call zeit_co('Aworldinit') - - call MCTWorld_test("CCM::MCTWorld",6100+myProc) - - ! Get the Sparse Matrix dimensions and processor layout - root = MCTComponentRootRank(coupler_id,ThisMCTWorld) - call MPI_BCAST(Nax,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Nay,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Nox,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Noy,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(NPROCS_LATA,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(NPROCS_LONA,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - - ! check to see if there are enough processors - call MPI_COMM_SIZE(CCM_World, mySize, ierr) - if (mySize /= NPROCS_LATA*NPROCS_LONA) then - write(*,*)'ERROR: wrong number of processors' - write(*,*)'found ',mySize,' Needed',NPROCS_LATA*NPROCS_LONA - stop - endif - -! Number the grid 1 to Nax*Nay, starting -! in the South Pole and proceeding along a latitude and -! then from south to north. -! NOTE: This may not look like much but its very important. -! This is where the numbering scheme for each grid point, -! on which all of MCT is based, is defined. The points -! are numbered from 1 to Nax*Nay starting at the south -! pole (j=1) and moving west to east and south to north - - allocate(myglobalmap(Nax,Nay),stat=ierr) - if(ierr/=0) call die(ccmname, "allocate(myglobalmap)", ierr) - n=0 - do j=1,Nay - do i= 1,Nax - n=n+1 - myglobalmap(i,j) = n - enddo - enddo - -!!!!!!!!!!!!!!!!!----------General Grid - -! Load a Gaussian atmosphere general grid -! Note: The following block of code is for the root process. - -if(myProc==0) then - - write(*,*) ccmname, ":: Initializing Atm General Grid" - - call convertgauss(AtmGrid, Nax, Nay) - - - call GGrid_test(AtmGrid,"CCM::AtmGrid",3300+myProc) - - ! Set up a copy for later on... - call MCT_GGrid_init(AtmGridExactCopy,AtmGrid,MCT_GGrid_lsize(AtmGrid)) - call AttrVect_Copy(aVin=AtmGrid%data,aVout=AtmGridExactCopy%data) - -!::::::::::::::::::::::::::::::::::::! -!:::::TEST INITCARTESIAN:::::::::::::! -!::::::::::::::::::::::::::::::::::::! - - ! Test initCartesian from AtmGrid values - - call List_copy(cartlist,AtmGrid%coordinate_list) - call List_copy(cartweight,AtmGrid%weight_list) - call List_copy(cartother,AtmGrid%other_list) - call List_copy(cartindex,AtmGrid%index_list) - - allocate(cartdims(2),cartaxis(MAX(Nay,Nax),2), & - gauss_wgt(Nay),gauss_lat(Nay),cartdescend(2),stat=ierr) - if(ierr/=0) call die(ccmname,"allocate(cart...)",ierr) - - cartdims(1) = Nay - cartdims(2) = Nax - - ! Obtain the gaussian latitudes and longitudes from convertgauss.F90 - call gquad(Nay,gauss_lat,gauss_wgt) - do i=1,Nay - cartaxis(i,1) = (0.5*pi - gauss_lat(Nay+1-i)) * 180./pi - enddo - - dlon = 360./Nax - do i=1,Nax - cartaxis(i,2) = (i-1)*dlon - enddo - - cartdescend=.false. - - call MCT_GGrid_cart(GGrid=AtmCartGrid, & - CoordChars=List_exportToChar(cartlist), & - CoordSortOrder="grid_center_lat:grid_center_lon", & - descend=cartdescend, & - WeightChars=List_exportToChar(cartweight), & - OtherChars=List_exportToChar(cartother), & - IndexChars=List_exportToChar(cartindex), & - Dims=cartdims, & - AxisData=cartaxis) - - call GGrid_test(AtmCartGrid,"CCM::AtmCartGrid",3600+myProc) - - call MCT_GGrid_SortPermute(AtmCartGrid) - call MCT_GGrid_SortPermute(AtmGrid) - - allocate(dummycartlats(MCT_GGrid_lsize(AtmCartGrid)), & - dummycartlons(MCT_GGrid_lsize(AtmCartGrid)), & - dummyatmlats(MCT_GGrid_lsize(AtmGrid)), & - dummyatmlons(MCT_GGrid_lsize(AtmGrid)), & - stat=ierr) - if(ierr/=0) call die(ccmname, "allocate(dummy...)", ierr) - - call MCT_GGrid_exportRAttr(AtmCartGrid, 'grid_center_lat', & - dummycartlats,cylength) - call MCT_GGrid_exportRAttr(AtmCartGrid, 'grid_center_lon', & - dummycartlons,cxlength) - call MCT_GGrid_exportRAttr(AtmGrid, 'grid_center_lat', & - dummyatmlats, aylength) - call MCT_GGrid_exportRAttr(AtmGrid, 'grid_center_lon', & - dummyatmlons, axlength) - - if((aylength/=cylength).or.(axlength/=cxlength)) then - call die(ccmname,"Atmosphere GeneralGrid failed the first LENGTH test") - endif - - if((aylength/=Nay*Nax).or.(axlength/=Nax*Nay)) then - call die(ccmname,"Atmosphere GeneralGrid failed the second LENGTH test") - endif - - ! The lowest limit I have found for this is 1e-5 on the Absoft compiler - ! This is not as precise as the lons because of round off - do i=1,Nay*Nax - if( abs(dummycartlats(i)-dummyatmlats(i)) > 1e-5 ) then - call die(ccmname,"GeneralGrid INITCARTESIAN failed the LAT test") - endif - enddo - do i=1,Nax*Nay - if( abs(dummycartlons(i)-dummyatmlons(i)) > 1e-8 ) then - call die(ccmname,"GeneralGrid INITCARTESIAN failed the LON test") - endif - enddo - - deallocate(cartdims,cartaxis,cartdescend,dummycartlats,dummycartlons, & - dummyatmlats,dummyatmlons,gauss_wgt,gauss_lat,stat=ierr) - if(ierr/=0) call die(ccmname,"deallocate(cart...)",ierr) - - call List_clean(cartlist) - call List_clean(cartweight) - call List_clean(cartindex) - call List_clean(cartother) -!::::::::::::::::::::::::::::::::::::! -!:::::DONE WITH INITCARTESIAN::::::::! -!::::::::::::::::::::::::::::::::::::! - -! Write out the basic things we initialized - - write(stdout,'(3a,i1)') ccmname, & - ":: Initialized Atm GeneralGrid variable AtmGrid.", & - "Number of dimensions = ", MCT_GGrid_dims(AtmGrid) - write(stdout,'(2a,i8)') ccmname, & - ":: Number of grid points in AtmGrid=", & - MCT_GGrid_lsize(AtmGrid) - write(stdout,'(2a,i8)') ccmname, & - ":: Number of latitudes Nay=", Nay - write(stdout,'(2a,i8)') ccmname, & - ":: Number of longitudes Nax=", Nax - write(stdout,'(2a,i8)') ccmname, & - ":: Number of grid points Nax*Nax=", Nay*Nax - write(stdout,'(3a)') ccmname, & - ":: AtmGrid%coordinate_list = ", & - List_exportToChar(AtmGrid%coordinate_list) - write(stdout,'(3a)') ccmname, & - ":: AtmGrid%weight_list = ", & - List_exportToChar(AtmGrid%weight_list) - write(stdout,*) ccmname, & ! * is used for SUPER_UX compatibility - ":: AtmGrid%other_list = ", & - List_exportToChar(AtmGrid%other_list) - write(stdout,'(3a)') ccmname, & - ":: AtmGrid%index_list = ", & - List_exportToChar(AtmGrid%index_list) - write(stdout,'(2a,i3)') ccmname, & - ":: Number of integer attributes stored in AtmGrid=", & - MCT_AtrVt_nInteger(AtmGrid%data) - write(stdout,'(2a,i3)') ccmname, & - ":: Total Number of real attributes stored in AtmGrid=", & - MCT_AtrVt_nReal(AtmGrid%data) - -! Get AtmGrid attribute indicies - latindx=MCT_GGrid_indexRA(AtmGrid,'grid_center_lat') - lonindx=MCT_GGrid_indexRA(AtmGrid,'grid_center_lon') - -! NOTE: The integer attribute GlobGridNum is automatically -! appended to any General Grid. Store the grid numbering -! scheme (used in the GlobalSegMap) here. - gridindx=MCT_GGrid_indexIA(AtmGrid,'GlobGridNum') - - do j=1,Nay - do i=1,Nax - n=myglobalmap(i,j) - AtmGrid%data%iAttr(gridindx,n)=n - enddo - enddo - -! Check the weight values of the grid_area attribute - - dAindx = MCT_GGrid_indexRA(AtmGrid, 'grid_area') - - write(stdout,'(2a)') ccmname, & - ':: Various checks of GeneralGrid AtmGrid Weight data...' - write(stdout,'(2a,f12.6)') ccmname, & - ':: direct ref--AtmGrid 1st dA entry=.', & - AtmGrid%data%rAttr(dAindx,1) - write(stdout,'(2a,f12.6)') ccmname, & - ':: direct ref--AtmGrid last dA entry=.', & - AtmGrid%data%rAttr(dAindx,MCT_GGrid_lsize(AtmGrid)) - write(stdout,'(2a,f12.6)') ccmname, & - ':: Sum of dA(1,...,Nax*Nay)=.', & - sum(AtmGrid%data%rAttr(dAindx,:)) - write(stdout,'(2a,f12.6)') ccmname, & - ':: Unit Sphere area 4 * pi=.', 4.*pi - -! Check on coordinate values (and check some export functions, too...) - - allocate(dummyR(MCT_GGrid_lsize(AtmGrid)), stat=ierr) - if(ierr/=0) call die(ccmname, "allocate(myglobalmap)", ierr) - - call MCT_GGrid_exportRAttr(AtmGrid, 'grid_center_lat', dummyR, length) - - write(stdout,'(2a)') ccmname, & - ':: Various checks of GeneralGrid AtmGrid coordinate data...' - write(stdout,'(2a,i8)') ccmname, & - ':: No. exported AtmGrid latitude values =.',length - write(stdout,'(2a,f12.6)') ccmname, & - ':: export--AtmGrid 1st latitude=.',dummyR(1) - write(stdout,'(2a,f12.6)') ccmname, & - ':: export--AtmGrid last latitude=.',dummyR(length) - write(stdout,'(2a,f12.6)') ccmname, & - ':: direct ref--AtmGrid 1st latitude=.', & - AtmGrid%data%rAttr(latindx,1) - write(stdout,'(2a,f12.6)') ccmname, & - ':: direct ref--AtmGrid last latitude=.', & - AtmGrid%data%rAttr(latindx,length) - write(stdout,'(2a,f12.6)') ccmname, & - ':: direct ref--AtmGrid 1st longitude=.', & - AtmGrid%data%rAttr(lonindx,1) - write(stdout,'(2a,f12.6)') ccmname, & - ':: direct ref--AtmGrid last longitude=.', & - AtmGrid%data%rAttr(lonindx,MCT_GGrid_lsize(AtmGrid)) - write(stdout,'(2a)') ccmname, & - ':: End checks of GeneralGrid AtmGrid coordinate data.' - -! Check the GlobalGridNum values: - - allocate(dummyI(MCT_GGrid_lsize(AtmGrid)), stat=ierr) - if(ierr/=0) call die(ccmname, "allocate(dummyI)", ierr) - - call MCT_GGrid_exportIAttr(AtmGrid, 'GlobGridNum', dummyI, length) - - write(stdout,'(2a,i8)') ccmname, & - ':: No. exported AtmGrid GlobalGridNum values =.',length - write(stdout,'(2a,i8)') ccmname, & - ':: export--AtmGrid 1st GlobalGridNum =.', dummyI(1) - write(stdout,'(2a,i8)') ccmname, & - ':: export--AtmGrid last GlobalGridNum =.', dummyI(length) - write(stdout,'(2a,i8)') ccmname, & - ':: direct ref--AtmGrid 1st GlobalGridNum =.', & - AtmGrid%data%iAttr(gridindx,1) - write(stdout,'(2a,i8)') ccmname, & - ':: direct ref--AtmGrid last GlobalGridNum =.', & - AtmGrid%data%iAttr(gridindx,length) - -! send the atmosphere's grid from the atmosphere's root to the -! coupler's root. 1400 is the randomly chosen tag base. - call MCT_GGrid_send(AtmGrid,coupler_id,1400,status=status) - -! Clean up arrays used for GGrid tests: - - deallocate(dummyI, dummyR, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') ccmname, & - ':: ERROR--deallocate(dummyI,dummyR) failed with ierr=', ierr - call die(ccmname) - endif - -endif ! if(myProc==0) - -!!!!!!!!!!!!!!!!!----------GlobalSegMap -! Get ready to initialize the GlobalSegMap -! -! -! Go and define the starts and lengths according to the -! decomposition we want - - call FoldOverDecomp(myglobalmap,starts,lengths,Nax,Nay) - -! now put the information in a GlobalSegMap. -! if(myProc==0)write(*,*)"Inializing GSMap" - call zeit_ci('Agsmapinit') - call MCT_GSMap_init(GSMap,starts,lengths,0,CCM_World,mycompid) - call zeit_co('Agsmapinit') - -! Try using some GSMap functions. -! write(*,*)myProc,'number of global segs is',MCT_GSMap_ngseg(GSMap) -! write(*,*)myProc,'number of local segs is', MCT_GSMap_nlseg(GSMap,myProc) -! write(*,*)myProc,'local size is',MCT_GSMap_lsize(GSMap,CCM_World) -! write(*,*)myProc,'global size is',MCT_GSMap_gsize(GSMap) - -! call MCT_GStoL(GSMap,CCM_World,lstart,llength) -! if(myProc==0) then -! do i=1,GSMap%ngseg -! write(*,*)i,GSMap%start(i),GSMap%pe_loc(i) -! if(myProc==GSMap%pe_loc(i)) then -! point = GSMap%start(i) -! write(*,*)"MCTGStoLI",MCT_GStoLI(GSMap,point,CCM_World) -! endif -! enddo -! endif - - -!!!!!!!!!!!!!!!!!----------Attribute Vector -! intialize an attribute vector -! if(myProc==0)write(*,*)"Initializing Attrvect" - - call zeit_ci('Aatvecinit') -! declare an attrvect to hold all atm model outputs -! an identical decleration needs to be made in the coupler -! NOTE: the size of the AttrVect is set to be the local -! size of the GSMap. - call MCT_AtrVt_init(a2coupler, & - iList='gsindex', &! local GSMap values - rList=& -! height of first atm level - "alevh:& -! u wind - &uwind:& -! v wind - &vwind:& -! potential temp - &pottem:& -! specific humidity - &s_hum:& -! density - &rho:& -! barometric pressure - &barpres:& -! surface pressure - &surfp:& -! net solar radiation - &solrad:& -! downward direct visible radiation - &dirvis:& -! downward diffuse visible radiation - &difvis:& -! downward direct near-infrared radiation - &dirnif:& -! downward diffuse near-infrared radiation - &difnif:& -! downward longwave radiation - &lngwv:& -! convective precip - &precc:& -! large-scale precip - &precl",& - lsize=MCT_GSMap_lsize(GSMap, CCM_World)) - call zeit_co('Aatvecinit') - -! create a second attribute vector to test copying - call MCT_AtrVt_init(a2coupler2, rList="conpre:precl:uwind:vwind", & - lsize=MCT_GSMap_lsize(GSMap,CCM_World)) - call AttrVect_zero(a2coupler2) - -if(myProc==0)then -#ifndef SYSOSF1 - write(stdout,*) ccmname,':: a2coupler%rList = ', & - AttrVect_exportRListToChar(a2coupler) - write(stdout,*) ccmname,':: a2coupler%iList = ', & - AttrVect_exportIListToChar(a2coupler) -#endif - write(stdout,'(2a,i8)') ccmname, & - ':: a2coupler length = ', MCT_AtrVt_lsize(a2coupler) - write(stdout,'(2a,i8)') ccmname, & - ':: MCT_GSMap_lsize = ', MCT_GSMap_lsize(GSMap, CCM_World) -endif - -! load the local values of the GSMap into gsindex for checking - j=1 - do i=1,MCT_GSMap_ngseg(GSMap) - if(myProc==GSMap%pe_loc(i)) then - do k=1,GSMap%length(i) - a2coupler%iAttr(1,j)=GSMap%start(i)+k-1 - j=j+1 - enddo - endif - enddo - -! put some data in the Attribute Vector - do j=1,MCT_AtrVt_nReal(a2coupler) - do i=1,MCT_GSMap_lsize(GSMap, CCM_World) - a2coupler%rAttr(j,i)=30. - enddo - enddo - -! test Attribute vector copying -if(myProc==0)write(stdout,'(2a)') ccmname,':: Test aV copy services' -if(myProc==0)write(stdout,*) ccmname, ':: initial values', & - a2coupler2%rAttr(1,1), a2coupler2%rAttr(2,1), & - a2coupler2%rAttr(3,1), a2coupler2%rAttr(4,1) - -! copy all shared attributes -call AttrVect_Copy(a2coupler,a2coupler2) -if(myProc==0)write(stdout,*) ccmname, ':: copy shared', & - a2coupler2%rAttr(1,1), a2coupler2%rAttr(2,1), & - a2coupler2%rAttr(3,1), a2coupler2%rAttr(4,1) -call AttrVect_zero(a2coupler2) - -! copy only one attribute -call AttrVect_Copy(a2coupler,a2coupler2,"precl") -if(myProc==0)write(stdout,*) ccmname, ':: copy one real', & - a2coupler2%rAttr(1,1), a2coupler2%rAttr(2,1), & - a2coupler2%rAttr(3,1),a2coupler2%rAttr(4,1) -call AttrVect_zero(a2coupler2) - -! copy two with a translation -call AttrVect_Copy(a2coupler,a2coupler2,"precc:vwind","conpre:vwind") -if(myProc==0)write(stdout,*) ccmname, ':: copy two real, translate', & - a2coupler2%rAttr(1,1), a2coupler2%rAttr(2,1), & - a2coupler2%rAttr(3,1),a2coupler2%rAttr(4,1) - - -! Remember AtmGrid? This was created only on the root. To do -! some neat integrals, we must scatter it using MCT onto the -! same decomposition as a2coupler: - - call MCT_GGrid_scatter(AtmGrid, dAtmGrid, GSMap, 0, CCM_World) - call MCT_GGrid_scatter(AtmGridExactCopy,dAtmGridExactCopy,GSMap,0,CCM_World) - - if(myProc==0) then - if(.NOT.GGrid_identical(AtmGrid,AtmGridExactCopy,1e-5)) then - call die(ccmname,"AtmGrid unexpectedly altered!!!") - endif - endif - - if(.NOT.GGrid_identical(dAtmGrid,dAtmGridExactCopy,1e-5)) then - call die(ccmname,"dAtmGrid unexpectedly altered!!!") - endif - -! Now, Test the MCT Spatial Integration/Averaging Services... - if(myProc==0)write(stdout,'(3a)') ccmname, & - ':: on-Root test of MCT Spatial Integration Services...' - -! simple unmasked integral case: - call MCT_SpatialIntegral(a2coupler, integratedA2CaV, & - dAtmGrid, 'grid_area', comm=CCM_World) - -if(myProc==0)then - do i=1,MCT_AtrVt_nReal(integratedA2CaV) - write(stdout,'(3a,i2,a,f12.6)') ccmname, & - ':: Unmasked distributed MCT ', & - 'integral: integratedA2CaV%rAttr(',i,',1)=', & - integratedA2CaV%rAttr(i,1) - end do -endif - - call MCT_AtrVt_clean(integratedA2CaV) - -! simple unmasked average case: - call MCT_SpatialAverage(a2coupler, integratedA2CaV, & - dAtmGrid, 'grid_area', comm=CCM_World) - -if(myProc==0)then - do i=1,MCT_AtrVt_nReal(integratedA2CaV) - write(stdout,'(3a,i2,a,f12.6)') ccmname, & - ':: Unmasked distributed MCT ', & - 'average: averagedA2CaV%rAttr(',i,',1)=', & - integratedA2CaV%rAttr(i,1) - end do -endif - - call MCT_AtrVt_clean(integratedA2CaV) - -! not-so-simple masked average cases... - call MCT_MaskedSpatialAverage(inAv=a2coupler, & - outAv=integratedA2CaV, & - GGrid=dAtmGrid, & - SpatialWeightTag='grid_area', & - imaskTags='grid_imask', & - UseFastMethod=.TRUE., & - comm=CCM_World) - -if(myProc==0)then - do i=1,MCT_AtrVt_nReal(integratedA2CaV) - write(stdout,'(3a,i2,a,f12.6)') ccmname, & - ':: Masked distributed MCT ', & - 'average: averagedA2CaV%rAttr(',i,',1)=', & - integratedA2CaV%rAttr(i,1) - end do -endif - - call MCT_AtrVt_clean(integratedA2CaV) - -!!!!!!!!!!!!!!!!!----------Router -! intialize a Router to the Coupler. Call it Atm2Cpl - if(myProc==0)write(*,*) ccmname,":: Initializing Router" - call zeit_ci('Arouterinit') - call MCT_Router_init(coupler_id,GSMap,CCM_World,Atm2Cpl) - call zeit_co('Arouterinit') - if(myProc==0)write(*,*) ccmname,":: Done Initializing Router" - - call Router_test(Atm2Cpl,"CCM::Atm2Cpl",7300+myProc) - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Endof initialization phase -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -!!!!!!!!!!!!!!!!!----------MCT_Send -! send data to the coupler. - if(myProc==0)write(*,*) ccmname,":: Doing Distributed Send" - - call AttrVect_test(a2coupler,"CCM::a2coupler",2000+myProc) - do steps=1,nsteps - call zeit_ci('Amctsend') - call MCT_Send(a2coupler,Atm2Cpl) - call zeit_co('Amctsend') - enddo - - if(myProc==0)write(*,*) ccmname,":: Done with Send" - - -!!!!!!!!!!!!!!!!!---------- all done - call zeit_ci('Acleanup') - - ! Clean MCT datatypes - if(myProc==0) then - call MCT_GGrid_clean(AtmGrid) - call MCT_GGrid_clean(AtmCartGrid) - call MCT_GGrid_clean(AtmGridExactCopy) - endif - - call MCT_GGrid_clean(dAtmGrid) - call MCT_GGrid_clean(dAtmGridExactCopy) - call MCT_GSMap_clean(GSMap) - call MCT_Router_clean(Atm2Cpl) - call MCT_AtrVt_clean(a2coupler) - call MCT_AtrVt_clean(a2coupler2) - call MCTWorld_clean() - - ! Clean temporary structures - - deallocate(starts, lengths, myglobalmap, stat=ierr) - if(ierr/=0) call die(ccmname, "deallocate(starts,lengths..)", ierr) - - call zeit_co('Acleanup') - -! write out timing info to fortran unit 45 - call zeit_allflush(CCM_World,0,45) - -contains - - subroutine FoldOverDecomp(myglobalmap,starts,lengths,nx,ny) - - integer,dimension(:,:),intent(in) :: myglobalmap - integer,dimension(:),pointer :: starts,lengths - integer, intent(in) :: nx,ny - integer :: i,j,n,row,col,plat,plon -! For this example, we will do a fold-over-the-equator -! mapping of our grid onto the cartesian processor topology: -! each row of processors handles latitudes from -! the northern and southern hemispheres. - -! -! For each processor, each seglength is plon -! -! the value of the global index at the start of each -! segment can be found from myglobalmap - -! set local latitude and longitude size - plat = ny / NPROCS_LATA - plon = nx / NPROCS_LONA - -! define a Cartesian topology by assigning -! row and column indicies to each processor. -! processor with rank 0 is (0,0) - row = myProc / NPROCS_LONA - col = mod(myProc,NPROCS_LONA) - - allocate(starts(plat),lengths(plat),stat=ierr) - if(ierr/=0) call die(ccmname, "allocate(starts..)", ierr) - -! the fist plat/2 latitudes are from the southern hemisphere - do j=1,plat/2 - starts(j)= myglobalmap(col*plon + 1,(plat/2 * row) + j) - lengths(j)=plon - enddo - -! the next plat/2 latitudes are from the northern hemisphere - n=1 - do j=plat/2 + 1,plat - starts(j)=myglobalmap(col*plon + 1,(ny - (plat/2 * (row+1))) + n) - lengths(j)=plon - n=n+1 - enddo - -end subroutine FoldOverDecomp - -end subroutine ccm3 - diff --git a/src/externals/mct/testsystem/testall/convertPOPT.F90 b/src/externals/mct/testsystem/testall/convertPOPT.F90 deleted file mode 100644 index 52c0098298b..00000000000 --- a/src/externals/mct/testsystem/testall/convertPOPT.F90 +++ /dev/null @@ -1,454 +0,0 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! This file converts a POP grid.dat file to a remapping grid file -! in netCDF format. -! -!----------------------------------------------------------------------- -! -! CVS:$Id: convertPOPT.F90,v 1.9 2004-06-02 23:25:50 eong Exp $ -! CVS $Name: $ -! -! Copyright (c) 1997, 1998 the Regents of the University of -! California. -! -! Unless otherwise indicated, this software has been authored -! by an employee or employees of the University of California, -! operator of the Los Alamos National Laboratory under Contract -! No. W-7405-ENG-36 with the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this -! software. The public may copy and use this software without -! charge, provided that this Notice and any statement of authorship -! are reproduced on all copies. Neither the Government nor the -! University makes any warranty, express or implied, or assumes -! any liability or responsibility for the use of this software. -! -!*********************************************************************** - - subroutine convertPOPT(GGrid, grid_file_in, grid_topo_in, nx, ny) - -!----------------------------------------------------------------------- -! -! This file converts a POP grid.dat file to a remapping grid file. -! -!----------------------------------------------------------------------- - - use m_AttrVect,only : AttrVect - use m_GeneralGrid,only : MCT_GGrid_init => init - use m_GeneralGrid,only : MCT_GGrid_indexIA => indexIA - use m_GeneralGrid,only : MCT_GGrid_indexRA => indexRA - use m_GeneralGrid,only : GeneralGrid - use m_stdio - use m_ioutil - use m_die - - - implicit none - -!----------------------------------------------------------------------- -! -! variables that describe the grid -! 4/3 nx = 192, ny = 128 -! 2/3 (mod) nx = 384, ny = 288 -! x3p Greenland DP nx = 100, ny = 116 -! x2p Greenland DP nx = 160, ny = 192 -! x1p Greenland DP nx = 320, ny = 384 -! -!----------------------------------------------------------------------- - - type(GeneralGrid), intent(out) :: GGrid - character (len=*), intent(in) :: grid_file_in - character (len=*), intent(in) :: grid_topo_in - integer, intent(in) :: nx - integer, intent(in) :: ny - - integer :: grid_size - - integer, parameter :: & - grid_rank = 2, & - grid_corners = 4 - - integer, dimension(2) :: & - grid_dims ! size of each dimension - -!----------------------------------------------------------------------- -! -! grid coordinates and masks -! -!----------------------------------------------------------------------- - -!:: NOTE: The following kind specifiers are needed to read the proper -!:: values for the POP grid files. The subsequent type conversions -!:: on these variables may pose a risk. - - integer(kind(1)), dimension(:), allocatable :: & - grid_imask - - real, dimension(:), allocatable :: & - grid_area , &! area as computed in POP - grid_center_lat, &! lat/lon coordinates for - grid_center_lon ! each grid center in radians - - real(selected_real_kind(13)), dimension(:,:), allocatable :: & - grid_corner_lat, &! lat/lon coordinates for - grid_corner_lon ! each grid corner in radians - - real(selected_real_kind(13)), dimension(:,:), allocatable :: & - HTN, HTE ! T-cell grid lengths - -!----------------------------------------------------------------------- -! -! defined constants -! -!----------------------------------------------------------------------- - - real(selected_real_kind(13)), parameter :: & - zero = 0.0, & - one = 1.0, & - two = 2.0, & - three = 3.0, & - four = 4.0, & - five = 5.0, & - half = 0.5, & - quart = 0.25, & - bignum = 1.e+20, & - tiny = 1.e-14, & - pi = 3.14159265359, & - pi2 = two*pi, & - pih = half*pi - - real(selected_real_kind(13)), parameter :: & - radius = 6.37122e8 , & ! radius of Earth (cm) - area_norm = one/(radius*radius) - -!----------------------------------------------------------------------- -! -! other local variables -! -!----------------------------------------------------------------------- - - character(len=*),parameter :: myname_= 'convertPOPT' - - integer :: i, j, k, n, p, q, r, ier - - integer :: iunit, ocn_add, im1, jm1, np1, np2 - - integer :: center_lat, center_lon, & - corner_lat, corner_lon, & - imask, area - - real :: tmplon, dlat, dxt, dyt - - real :: x1, x2, x3, x4, & - y1, y2, y3, y4, & - z1, z2, z3, z4, & - tx, ty, tz, da - - grid_size = nx*ny - - allocate(grid_imask(grid_size), & - grid_area(grid_size), & - grid_center_lat(grid_size), & - grid_center_lon(grid_size), & - grid_corner_lat(grid_corners,grid_size), & - grid_corner_lon(grid_corners,grid_size), & - HTN(nx,ny), & - HTE(nx,ny), & - stat=ier) - - if(ier/=0) call die(myname_,"allocate(grid_imask... ", ier) - -!----------------------------------------------------------------------- -! -! read in grid info -! lat/lon info is on velocity points which correspond -! to the NE corner (in logical space) of the grid cell. -! -!----------------------------------------------------------------------- - - iunit = luavail() - - open(unit=iunit, file=trim(grid_topo_in), status='old', & - form='unformatted', access='direct', recl=grid_size*4) - - read (unit=iunit,rec=1) grid_imask - - call luflush(iunit) - - iunit = luavail() -#if SYSSUPERUX || SYSOSF1 - open(unit=iunit, file=trim(grid_file_in), status='old', & - form='unformatted', access='direct', recl=grid_size*2) -#else - open(unit=iunit, file=trim(grid_file_in), status='old', & - form='unformatted', access='direct', recl=grid_size*8) -#endif - - read (unit=iunit, rec=1) grid_corner_lat(3,:) - read (unit=iunit, rec=2) grid_corner_lon(3,:) - read (unit=iunit, rec=3) HTN - read (unit=iunit, rec=4) HTE - call luflush(iunit) - -!:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!::::::::::::TEST DIAGNOSTICS:::::::::::::::::::::::::::::::::: -!:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - k=0 - do j=1,grid_size - if(grid_imask(j)==0) k=k+1 - enddo - - write(stdout,*) "CONVERTPOPT: NUM_ZEROES(GRID_IMASK), SUM(GRID_IMASK)",& - k, sum(grid_imask) - - write(stdout,*) "CONVERTPOPT: GRID_CORNER_LAT VALUES = ", & - grid_corner_lat(3,1:10) - - write(stdout,*) "CONVERTPOPT: GRID_CORNER_LON VALUES = ", & - grid_corner_lon(3,1:10) - - write(stdout,*) "CONVERTPOPT: HTN VALUES = ", & - HTN(1,1:10) - - write(stdout,*) "CONVERTPOPT: HTE VALUES = ", & - HTE(1,1:10) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - grid_dims(1) = nx - grid_dims(2) = ny - -!----------------------------------------------------------------------- -! -! convert KMT field to integer grid mask -! -!----------------------------------------------------------------------- - - grid_imask = min(grid_imask, 1) - -!----------------------------------------------------------------------- -! -! compute remaining corners -! -!----------------------------------------------------------------------- - - do j=1,ny - do i=1,nx - ocn_add = (j-1)*nx + i - if (i .ne. 1) then - im1 = ocn_add - 1 - else - im1 = ocn_add + nx - 1 - endif - - grid_corner_lat(4,ocn_add) = grid_corner_lat(3,im1) - grid_corner_lon(4,ocn_add) = grid_corner_lon(3,im1) - end do - end do - - do j=2,ny - do i=1,nx - ocn_add = (j-1)*nx + i - jm1 = (j-2)*nx + i - - grid_corner_lat(2,ocn_add) = grid_corner_lat(3,jm1) - grid_corner_lat(1,ocn_add) = grid_corner_lat(4,jm1) - - grid_corner_lon(2,ocn_add) = grid_corner_lon(3,jm1) - grid_corner_lon(1,ocn_add) = grid_corner_lon(4,jm1) - end do - end do - -!----------------------------------------------------------------------- -! -! mock up the lower row boundaries -! -!----------------------------------------------------------------------- - - do i=1,nx - dlat = grid_corner_lat(1,i+2*nx) - grid_corner_lat(1,i+nx) - grid_corner_lat(1,i) = grid_corner_lat(1,i+nx) - dlat - grid_corner_lat(1,i) = max(grid_corner_lat(1,i), -pih + tiny) - - dlat = grid_corner_lat(2,i+2*nx) - grid_corner_lat(2,i+nx) - grid_corner_lat(2,i) = grid_corner_lat(2,i+nx) - dlat - grid_corner_lat(2,i) = max(grid_corner_lat(2,i), -pih + tiny) - - grid_corner_lon(1,i) = grid_corner_lon(4,i) - grid_corner_lon(2,i) = grid_corner_lon(3,i) - end do - -!----------------------------------------------------------------------- -! -! correct for 0,2pi longitude crossings -! -!----------------------------------------------------------------------- - - do ocn_add=1,grid_size - if (grid_corner_lon(1,ocn_add) > pi2) & - grid_corner_lon(1,ocn_add) = & - grid_corner_lon(1,ocn_add) - pi2 - if (grid_corner_lon(1,ocn_add) < 0.0) & - grid_corner_lon(1,ocn_add) = & - grid_corner_lon(1,ocn_add) + pi2 - do n=2,grid_corners - tmplon = grid_corner_lon(n ,ocn_add) - & - grid_corner_lon(n-1,ocn_add) - if (tmplon < -three*pih) grid_corner_lon(n,ocn_add) = & - grid_corner_lon(n,ocn_add) + pi2 - if (tmplon > three*pih) grid_corner_lon(n,ocn_add) = & - grid_corner_lon(n,ocn_add) - pi2 - end do - end do - -!----------------------------------------------------------------------- -! -! compute ocean cell centers by averaging corner values -! -!----------------------------------------------------------------------- - - do ocn_add=1,grid_size - z1 = cos(grid_corner_lat(1,ocn_add)) - x1 = cos(grid_corner_lon(1,ocn_add))*z1 - y1 = sin(grid_corner_lon(1,ocn_add))*z1 - z1 = sin(grid_corner_lat(1,ocn_add)) - - z2 = cos(grid_corner_lat(2,ocn_add)) - x2 = cos(grid_corner_lon(2,ocn_add))*z2 - y2 = sin(grid_corner_lon(2,ocn_add))*z2 - z2 = sin(grid_corner_lat(2,ocn_add)) - - z3 = cos(grid_corner_lat(3,ocn_add)) - x3 = cos(grid_corner_lon(3,ocn_add))*z3 - y3 = sin(grid_corner_lon(3,ocn_add))*z3 - z3 = sin(grid_corner_lat(3,ocn_add)) - - z4 = cos(grid_corner_lat(4,ocn_add)) - x4 = cos(grid_corner_lon(4,ocn_add))*z4 - y4 = sin(grid_corner_lon(4,ocn_add))*z4 - z4 = sin(grid_corner_lat(4,ocn_add)) - - tx = (x1+x2+x3+x4)/4.0 - ty = (y1+y2+y3+y4)/4.0 - tz = (z1+z2+z3+z4)/4.0 - da = sqrt(tx**2+ty**2+tz**2) - - tz = tz/da - ! grid_center_lon in radians - grid_center_lon(ocn_add) = 0.0 - if (tx .ne. 0.0 .or. ty .ne. 0.0) & - grid_center_lon(ocn_add) = atan2(ty,tx) - ! grid_center_lat in radians - grid_center_lat(ocn_add) = asin(tz) - - end do - - ! j=1: linear approximation - n = 0 - do i=1,nx - n = n + 1 - np1 = n + nx - np2 = n + 2*nx - grid_center_lon(n) = grid_center_lon(np1) - grid_center_lat(n) = 2.0*grid_center_lat(np1) - & - grid_center_lat(np2) - end do - - do ocn_add=1,grid_size - if (grid_center_lon(ocn_add) > pi2) & - grid_center_lon(ocn_add) = grid_center_lon(ocn_add) - pi2 - if (grid_center_lon(ocn_add) < 0.0) & - grid_center_lon(ocn_add) = grid_center_lon(ocn_add) + pi2 - enddo - -!----------------------------------------------------------------------- -! -! compute cell areas in same way as POP -! -!----------------------------------------------------------------------- - - n = 0 - do j=1,ny - if (j > 1) then - jm1 = j-1 - else - jm1 = 1 - endif - do i=1,nx - if (i > 1) then - im1 = i-1 - else - im1 = nx - endif - - n = n+1 - - dxt = half*(HTN(i,j) + HTN(i,jm1)) - dyt = half*(HTE(i,j) + HTE(im1,j)) - if (dxt == zero) dxt=one - if (dyt == zero) dyt=one - - grid_area(n) = dxt*dyt*area_norm - end do - end do - -!----------------------------------------------------------------------- -! -! intialize GeneralGrid -! -!----------------------------------------------------------------------- - - call MCT_GGrid_init(GGrid=GGrid, & - CoordChars="grid_center_lat:& - &grid_center_lon", & - WeightChars="grid_area", & - OtherChars="grid_corner_lat_1:& - &grid_corner_lat_2:& - &grid_corner_lat_3:& - &grid_corner_lat_4:& - &grid_corner_lon_1:& - &grid_corner_lon_2:& - &grid_corner_lon_3:& - &grid_corner_lon_4", & - IndexChars="grid_imask", & - lsize=grid_size) - - center_lat = MCT_GGrid_indexRA(GGrid,'grid_center_lat') - center_lon = MCT_GGrid_indexRA(GGrid,'grid_center_lon') - corner_lat = MCT_GGrid_indexRA(GGrid,'grid_corner_lat_1') - corner_lon = MCT_GGrid_indexRA(GGrid,'grid_corner_lon_1') - area = MCT_GGrid_indexRA(GGrid,'grid_area') - imask = MCT_GGrid_indexIA(GGrid,'grid_imask') - - GGrid%data%rattr(center_lat,1:grid_size) = & - grid_center_lat(1:grid_size) - GGrid%data%rattr(center_lon,1:grid_size) = & - grid_center_lon(1:grid_size) - GGrid%data%rattr(area,1:grid_size) = & - grid_area(1:grid_size) - GGrid%data%iattr(imask,1:grid_size) = & - grid_imask(1:grid_size) - - do p = 1,grid_corners - GGrid%data%rattr(corner_lat+p-1,1:grid_size) = & - grid_corner_lat(p,1:grid_size) - GGrid%data%rattr(corner_lon+p-1,1:grid_size) = & - grid_corner_lon(p,1:grid_size) - enddo - - deallocate(grid_imask, grid_area, & - grid_center_lat, grid_center_lon, & - grid_corner_lat, grid_corner_lon, & - HTN, HTE, stat=ier) - - if(ier/=0) call die(myname_,"deallocate(grid_imask... ", ier) - - -!*********************************************************************** - - end subroutine convertPOPT - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - diff --git a/src/externals/mct/testsystem/testall/convertgauss.F90 b/src/externals/mct/testsystem/testall/convertgauss.F90 deleted file mode 100644 index ec4e7996399..00000000000 --- a/src/externals/mct/testsystem/testall/convertgauss.F90 +++ /dev/null @@ -1,516 +0,0 @@ -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! This program creates a remapping grid file for Gaussian lat/lon -! grids (for spectral transform codes). -! -!----------------------------------------------------------------------- -! -! CVS:$Id: convertgauss.F90,v 1.3 2002-11-14 17:11:07 eong Exp $ -! CVS $Name: $ -! -! Copyright (c) 1997, 1998 the Regents of the University of -! California. -! -! Unless otherwise indicated, this software has been authored -! by an employee or employees of the University of California, -! operator of the Los Alamos National Laboratory under Contract -! No. W-7405-ENG-36 with the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this -! software. The public may copy and use this software without -! charge, provided that this Notice and any statement of authorship -! are reproduced on all copies. Neither the Government nor the -! University makes any warranty, express or implied, or assumes -! any liability or responsibility for the use of this software. -! -!*********************************************************************** - - subroutine convertgauss(GGrid, nx, ny) - -!----------------------------------------------------------------------- -! -! This file creates a remapping grid file for a Gaussian grid -! -!----------------------------------------------------------------------- - - use m_AttrVect,only : AttrVect -! use m_GeneralGrid,only : MCT_GGrid_init => init - use m_GeneralGrid,only : MCT_GGrid_initUnstructured => initUnstructured - use m_GeneralGrid,only : MCT_GGrid_indexIA => indexIA - use m_GeneralGrid,only : MCT_GGrid_indexRA => indexRA - use m_GeneralGrid,only : GeneralGrid - use m_die - use m_stdio - - implicit none - -!----------------------------------------------------------------------- -! -! variables that describe the grid -! -! T42: nx=128 ny=64 -! T62: nx=192 ny=94 -! -!----------------------------------------------------------------------- - - type(GeneralGrid), intent(out) :: GGrid - integer, intent(in) :: nx - integer, intent(in) :: ny - - integer :: grid_size - - integer, parameter :: & - grid_rank = 2, & - grid_corners = 4 - - integer, dimension(grid_rank) :: & - grid_dims - -!----------------------------------------------------------------------- -! -! grid coordinates and masks -! -!----------------------------------------------------------------------- - - integer, dimension(:), allocatable :: & - grid_imask - - real, dimension(:), allocatable :: & - grid_area , & ! area weights - grid_center_lat, & ! lat/lon coordinates for - grid_center_lon ! each grid center in degrees - - real, dimension(:,:), allocatable :: & - grid_corner_lat, & ! lat/lon coordinates for - grid_corner_lon ! each grid corner in degrees - - -!----------------------------------------------------------------------- -! -! defined constants -! -!----------------------------------------------------------------------- - - real, parameter :: & - zero = 0.0, & - one = 1.0, & - two = 2.0, & - three = 3.0, & - four = 4.0, & - five = 5.0, & - half = 0.5, & - quart = 0.25, & - bignum = 1.e+20, & - tiny = 1.e-14, & - pi = 3.14159265359, & - pi2 = two*pi, & - pih = half*pi - -!----------------------------------------------------------------------- -! -! other local variables -! -!----------------------------------------------------------------------- - - character(len=*),parameter :: myname_= 'convertgauss' - - integer :: i, j, k, p, q, r, ier, atm_add - - integer :: center_lat, center_lon, & - corner_lat, corner_lon, & - imask, area - - real :: dlon, minlon, maxlon, centerlon, & - minlat, maxlat, centerlat - - real, dimension(ny) :: gauss_root, gauss_wgt, gauss_lat - - real, dimension(:), pointer :: PointData - integer :: offset - -!----------------------------------------------------------------------- -! -! compute longitudes of cell centers and corners. set up alon -! array for search routine. -! -!----------------------------------------------------------------------- - - grid_size = nx*ny - - allocate(grid_imask(grid_size), & - grid_area(grid_size), & - grid_center_lat(grid_size), & - grid_center_lon(grid_size), & - grid_corner_lat(grid_corners,grid_size), & - grid_corner_lon(grid_corners,grid_size), stat=ier) - - if(ier/=0) call die(myname_,"allocate(grid_imask... ", ier) - - grid_dims(1) = nx - grid_dims(2) = ny - - dlon = 360./nx - - do i=1,nx - - centerlon = (i-1)*dlon - minlon = centerlon - half*dlon - maxlon = centerlon + half*dlon - - do j=1,ny - atm_add = (j-1)*nx + i - - grid_center_lon(atm_add ) = centerlon - grid_corner_lon(1,atm_add) = minlon - grid_corner_lon(2,atm_add) = maxlon - grid_corner_lon(3,atm_add) = maxlon - grid_corner_lon(4,atm_add) = minlon - end do - - end do - -!----------------------------------------------------------------------- -! -! compute Gaussian latitudes and store in gauss_wgt. -! -!----------------------------------------------------------------------- - - call gquad(ny, gauss_root, gauss_wgt) - do j=1,ny - gauss_lat(j) = pih - gauss_root(ny+1-j) - end do - -!----------------------------------------------------------------------- -! -! compute latitudes at cell centers and corners. set up alat -! array for search routine. -! -!----------------------------------------------------------------------- - - do j=1,ny - centerlat = gauss_lat(j) - - if (j .eq. 1) then - minlat = -pih - else - minlat = ATAN((COS(gauss_lat(j-1)) - & - COS(gauss_lat(j )))/ & - (SIN(gauss_lat(j )) - & - SIN(gauss_lat(j-1)))) - endif - - if (j .eq. ny) then - maxlat = pih - else - maxlat = ATAN((COS(gauss_lat(j )) - & - COS(gauss_lat(j+1)))/ & - (SIN(gauss_lat(j+1)) - & - SIN(gauss_lat(j )))) - endif - - do i=1,nx - atm_add = (j-1)*nx + i - grid_center_lat(atm_add ) = centerlat*360./pi2 - grid_corner_lat(1,atm_add) = minlat*360./pi2 - grid_corner_lat(2,atm_add) = minlat*360./pi2 - grid_corner_lat(3,atm_add) = maxlat*360./pi2 - grid_corner_lat(4,atm_add) = maxlat*360./pi2 - grid_area(atm_add) = gauss_wgt(j)*pi2/nx - end do - - end do - -!----------------------------------------------------------------------- -! -! define mask -! -!----------------------------------------------------------------------- - - grid_imask = 1 - -!----------------------------------------------------------------------- -! -! intialize GeneralGrid -! -!----------------------------------------------------------------------- - -! call MCT_GGrid_init(GGrid=GGrid, & -! CoordChars="grid_center_lat:& -! &grid_center_lon", & -! WeightChars="grid_area", & -! OtherChars="grid_corner_lat_1:& -! &grid_corner_lat_2:& -! &grid_corner_lat_3:& -! &grid_corner_lat_4:& -! &grid_corner_lon_1:& -! &grid_corner_lon_2:& -! &grid_corner_lon_3:& -! &grid_corner_lon_4", & -! IndexChars="grid_imask", & -! lsize=grid_size) - -! Create and fill PointData(:) array for unstructured-style GeneralGrid_init - - allocate(PointData(2*grid_size), stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: allocate(PointData(...) failed with ier=',ier - call die(myname_) - endif - - do i=1,grid_size - offset = 2 * (i-1) - PointData(offset+1) = grid_center_lat(i) - PointData(offset+2) = grid_center_lon(i) - end do - - call MCT_GGrid_initUnstructured(GGrid=GGrid, & - CoordChars="grid_center_lat:& - &grid_center_lon", & - CoordSortOrder="grid_center_lat:& - &grid_center_lon", & - WeightChars="grid_area", & - OtherChars="grid_corner_lat_1:& - &grid_corner_lat_2:& - &grid_corner_lat_3:& - &grid_corner_lat_4:& - &grid_corner_lon_1:& - &grid_corner_lon_2:& - &grid_corner_lon_3:& - &grid_corner_lon_4", & - IndexChars="grid_imask", & - nDims=2, nPoints=grid_size, & - PointData=PointData) - - deallocate(PointData, stat=ier) - if(ier /= 0) then - write(stderr,'(2a,i8)') myname_, & - ':: deallocate(PointData...) failed with ier=',ier - call die(myname_) - endif - -! center_lat = MCT_GGrid_indexRA(GGrid,'grid_center_lat') -! center_lon = MCT_GGrid_indexRA(GGrid,'grid_center_lon') - corner_lat = MCT_GGrid_indexRA(GGrid,'grid_corner_lat_1') - corner_lon = MCT_GGrid_indexRA(GGrid,'grid_corner_lon_1') - area = MCT_GGrid_indexRA(GGrid,'grid_area') - imask = MCT_GGrid_indexIA(GGrid,'grid_imask') - -! GGrid%data%rattr(center_lat,1:grid_size) = & -! grid_center_lat(1:grid_size) -! GGrid%data%rattr(center_lon,1:grid_size) = & -! grid_center_lon(1:grid_size) - GGrid%data%rattr(area,1:grid_size) = & - grid_area(1:grid_size) - GGrid%data%iattr(imask,1:grid_size) = & - grid_imask(1:grid_size) - - do p = 1,grid_corners - GGrid%data%rattr(corner_lat+p-1,1:grid_size) = & - grid_corner_lat(p,1:grid_size) - GGrid%data%rattr(corner_lon+p-1,1:grid_size) = & - grid_corner_lon(p,1:grid_size) - enddo - - deallocate(grid_imask, grid_area, & - grid_center_lat, grid_center_lon, & - grid_corner_lat, grid_corner_lon, & - stat=ier) - - if(ier/=0) call die(myname_,"deallocate(grid_imask... ", ier) - - -!----------------------------------------------------------------------- - - end subroutine convertgauss - -!*********************************************************************** - - subroutine gquad(l,root,w) - -!----------------------------------------------------------------------- -! -! This subroutine finds the l roots (in theta) and gaussian weights -! associated with the legendre polynomial of degree l > 1. -! -!----------------------------------------------------------------------- - - use m_die - - implicit none - -!----------------------------------------------------------------------- -! -! intent(in) -! -!----------------------------------------------------------------------- - - integer, intent(in) :: l - -!----------------------------------------------------------------------- -! -! intent(out) -! -!----------------------------------------------------------------------- - - real, dimension(l), intent(out) :: root, w - -!----------------------------------------------------------------------- -! -! defined constants -! -!----------------------------------------------------------------------- - - real, parameter :: & - zero = 0.0, & - one = 1.0, & - two = 2.0, & - three = 3.0, & - four = 4.0, & - five = 5.0, & - half = 0.5, & - quart = 0.25, & - bignum = 1.e+20, & - tiny = 1.e-14, & - pi = 3.14159265359, & - pi2 = two*pi, & - pih = half*pi - -!----------------------------------------------------------------------- -! -! local -! -!----------------------------------------------------------------------- - - integer :: l1, l2, l22, l3, k, i, j, loop_counter - - real :: del,co,p1,p2,p3,t1,t2,slope,s,c,pp1,pp2,p00 - -!-----MUST adjust tolerance for newton convergence-----! - - ! Modify tolerance level to the precision of the real numbers: - ! Increase for lower precision, decrease for higher precision. - - real, parameter :: RTOL = 1.0e4*epsilon(0.) - -!------------------------------------------------------! - -!----------------------------------------------------------------------- -! -! Define useful constants. -! -!----------------------------------------------------------------------- - - del= pi/float(4*l) - l1 = l+1 - co = float(2*l+3)/float(l1**2) - p2 = 1.0 - t2 = -del - l2 = l/2 - k = 1 - p00 = one/sqrt(two) - -!----------------------------------------------------------------------- -! -! Start search for each root by looking for crossing point. -! -!----------------------------------------------------------------------- - - do i=1,l2 - 10 t1 = t2 - t2 = t1+del - p1 = p2 - s = sin(t2) - c = cos(t2) - pp1 = 1.0 - p3 = p00 - do j=1,l1 - pp2 = pp1 - pp1 = p3 - p3 = 2.0*sqrt((float(j**2)-0.250)/float(j**2))*c*pp1- & - sqrt(float((2*j+1)*(j-1)*(j-1))/ & - float((2*j-3)*j*j))*pp2 - end do - p2 = pp1 - if ((k*p2).gt.0) goto 10 - -!----------------------------------------------------------------------- -! -! Now converge using Newton-Raphson. -! -!----------------------------------------------------------------------- - - k = -k - loop_counter=0 - 20 continue - loop_counter=loop_counter+1 - slope = (t2-t1)/(p2-p1) - t1 = t2 - t2 = t2-slope*p2 - p1 = p2 - s = sin(t2) - c = cos(t2) - pp1 = 1.0 - p3 = p00 - do j=1,l1 - pp2 = pp1 - pp1 = p3 - p3 = 2.0*sqrt((float(j**2)-0.250)/float(j**2))*c*pp1- & - sqrt(float((2*j+1)*(j-1)*(j-1))/ & - float((2*j-3)*j*j))*pp2 - end do - p2 = pp1 - - if(loop_counter > 1e4) then - call die("subroutine gquad",& - "ERROR:: Precision of reals is too low. & - & Increase the magnitude of RTOL.",0) - endif - - if (abs(p2).gt.RTOL) goto 20 - root(i) = t2 - w(i) = co*(sin(t2)/p3)**2 - end do - -!----------------------------------------------------------------------- -! -! If l is odd, take care of odd point. -! -!----------------------------------------------------------------------- - - l22 = 2*l2 - if (l22 .ne. l) then - l2 = l2+1 - t2 = pi/2.0 - root(l2) = t2 - s = sin(t2) - c = cos(t2) - pp1 = 1.0 - p3 = p00 - do j=1,l1 - pp2 = pp1 - pp1 = p3 - p3 = 2.0*sqrt((float(j**2)-0.250)/float(j**2))*c*pp1- & - sqrt(float((2*j+1)*(j-1)*(j-1))/ & - float((2*j-3)*j*j))*pp2 - end do - p2 = pp1 - w(l2) = co/p3**2 - endif - -!----------------------------------------------------------------------- -! -! Use symmetry to compute remaining roots and weights. -! -!----------------------------------------------------------------------- - - l3 = l2+1 - do i=l3,l - root(i) = pi-root(l-i+1) - w(i) = w(l-i+1) - end do - -!----------------------------------------------------------------------- - - end subroutine gquad - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! diff --git a/src/externals/mct/testsystem/testall/cpl.F90 b/src/externals/mct/testsystem/testall/cpl.F90 deleted file mode 100644 index 0a1235d9d0f..00000000000 --- a/src/externals/mct/testsystem/testall/cpl.F90 +++ /dev/null @@ -1,1270 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: cpl.F90,v 1.25 2007-12-18 00:02:05 jacob Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: cpl -- coupler for unit tester -! -! !DESCRIPTION: -! A coupler subroutine to test functionality of MCT. -! -! !INTERFACE: -! - subroutine cpl (CPL_World) -! -! !USES: -! - use MPH_all -!---Field Storage DataType and associated methods - use m_AttrVect,only : MCT_AtrVt_init => init - use m_AttrVect,only : MCT_AtrVt_clean => clean - use m_AttrVect,only : MCT_AtrVt_nreals => nRAttr - use m_AttrVect,only : MCT_AtrVt_nints => nIAttr - use m_AttrVect,only : MCT_AtrVt_lsize => lsize - use m_AttrVect,only : AttrVect - use m_AttrVect,only : AttrVect_exportIListToChar =>exportIListToChar - use m_AttrVect,only : AttrVect_exportRListToChar =>exportRListToChar - use m_AttrVect,only : AttrVect_Copy => Copy -!---AttrVect Communication methods - use m_AttrVectComms,only : AttrVect_Send => send - use m_AttrVectComms,only : AttrVect_Recv => recv - use m_AttrVectComms, only : AttrVect_gather => gather -!---AttrVect Reduction methods - use m_AttrVectReduce,only : AttrVect_LocalReduce => LocalReduce - use m_AttrVectReduce,only : AttrVect_LocalReduceRAttr => & - LocalReduceRAttr - use m_AttrVectReduce,only : AttrVectSUM, AttrVectMIN, AttrVectMAX -!---Coordinate Grid DataType and associated methods - use m_GeneralGrid,only: GeneralGrid - use m_GeneralGrid,only: MCT_GGrid_clean => clean - use m_GeneralGrid,only : MCT_GGrid_lsize => lsize - use m_GeneralGridComms,only: MCT_GGrid_recv => recv - use m_GeneralGridComms,only: MCT_GGrid_scatter => scatter - use m_GeneralGridComms,only: MCT_GGrid_gather => gather - use m_GeneralGridComms,only: MCT_GGrid_bcast => bcast -!---MCT Spatial Integral services... - use m_SpatialIntegral,only : MCT_PairedSpatialIntegrals => & - PairedSpatialIntegrals - use m_SpatialIntegral,only : MCT_PairedSpatialAverages => & - PairedSpatialAverages - use m_SpatialIntegral,only : MCT_PairedMaskedSpatialIntegral => & - PairedMaskedSpatialIntegrals - use m_SpatialIntegral,only : MCT_PairedMaskedSpatialAverages => & - PairedMaskedSpatialAverages -!---Domain Decomposition Descriptor DataType and associated methods - use m_GlobalSegMap,only: MCT_GSMap_init => init - use m_GlobalSegMap,only: MCT_GSMap_copy => copy ! rml - use m_GlobalSegMap,only: MCT_GSMap_clean => clean - use m_GlobalSegMap,only: MCT_GSMap_gsize => gsize - use m_GlobalSegMap,only: MCT_GSMap_lsize => lsize - use m_GlobalSegMap,only: MCT_GSMap_ngseg => ngseg - use m_GlobalSegMap,only: MCT_GSMap_nlseg => nlseg - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalMap,only : GlobalMap - use m_GlobalMap,only : GlobalMap_init => init - use m_GlobalMap,only : GlobalMap_init_remote => init_remote - use m_GlobalMap,only : GlobalMap_clean => clean -!---GlobalSegMap Communication Methods - use m_GlobalSegMapComms,only: GlobalSegMap_bcast => bcast - use m_GlobalSegMapComms,only: GlobalSegMap_send => send - use m_GlobalSegMapComms,only: GlobalSegMap_recv => recv - use m_GlobalSegMapComms,only: GlobalSegMap_isend => isend -!---Methods for Exchange of GlobalMapping Objects - use m_ExchangeMaps,only: ExchangeMap -!---Convert between GlobalSegMap and GlobalMap - use m_ConvertMaps,only:GlobalSegMapToGlobalMap -!---Global-to-Local indexing services - use m_GlobalToLocal,only: MCT_GStoL => GlobalToLocalIndices -!---Component Model Registry - use m_MCTWorld,only: ThisMCTWorld - use m_MCTWorld,only: MCTComponentRootRank => ComponentRootRank - use m_MCTWorld,only: MCTWorld_initialized => initialized - use m_MCTWorld,only: MCTWorld_init => init - use m_MCTWorld,only: MCTWorld_clean => clean -!---Intercomponent communications scheduler - use m_Router,only: Router - use m_Router,only: MCT_Router_init => init - use m_Router,only: MCT_Router_print => print ! rml - use m_Router,only: MCT_Router_clean => clean - use m_Transfer,only: MCT_Send => send - use m_Transfer,only: MCT_Recv => recv -!---Sparse Matrix DataType and associated methods - use m_SparseMatrix, only : SparseMatrix - use m_SparseMatrix, only : SparseMatrix_clean => clean - use m_SparseMatrix, only : SparseMatrix_lsize => lsize - use m_SparseMatrix, only : SMatrix_exportGlobalRowIndices => & - exportGlobalRowIndices - use m_SparseMatrix, only : SMatrix_exportGlobalColumnInd => & - exportGlobalColumnIndices - use m_SparseMatrix, only : SMatrix_exportMatrixElements => & - exportMatrixElements - - use m_SparseMatrixComms, only: SparseMatrix_ScatterByRow => ScatterByRow - use m_SparseMatrixComms, only: SparseMatrix_gather => gather - use m_SparseMatrixComms, only: SparseMatrix_bcast => bcast - use m_SparseMatrixDecomp, only : SparseMatrixDecompByRow => ByRow -!---SparseMatrixPlus DataType and associated methods - use m_SparseMatrixPlus, only : SparseMatrixPlus - use m_SparseMatrixPlus, only : SparseMatrixPlus_init => init - use m_SparseMatrixPlus, only : SparseMatrixPlus_clean => clean - use m_SparseMatrixPlus, only : SparseMatrixPlus_initialized => initialized - use m_SparseMatrixPlus, only : Xonly ! Decompose matrix by column - use m_SparseMatrixPlus, only : Yonly ! Decompose matrix by row - use m_SparseMatrixPlus, only : XandY ! Arbitrary row/column decomp -!---Accumulation data type and methods - use m_Accumulator, only : Accumulator - use m_Accumulator, only : accumulate - use m_Accumulator, only : MCT_Accumulator_init => init - use m_Accumulator, only : MCT_Accumulator_clean => clean - use m_Accumulator, only : Accumulator_lsize => lsize - use m_Accumulator, only : MCT_SUM - use m_Accumulator, only : MCT_AVG - use m_AccumulatorComms,only : MCT_Acc_scatter => scatter - use m_AccumulatorComms,only : MCT_Acc_gather => gather - use m_AccumulatorComms,only : MCT_Acc_bcast => bcast -!---Matrix-Vector multiply methods - use m_MatAttrVectMul, only: MCT_MatVecMul => sMatAvMult -!---mpeu file reading routines - use m_inpak90 -!---mpeu routines for MPI communications - use m_mpif90 -!---mpeu timers - use m_zeit -!---mpeu stdout/stderr - use m_stdio - use m_ioutil, only: luavail -!---mpeu error handling - use m_die -!---mpeu reals - use m_realkinds - -!---Tester Modules - use m_ACTEST, only : Accumulator_test => testall - use m_ACTEST, only : Accumulator_identical => identical - use m_AVTEST, only : AttrVect_test => testall - use m_AVTEST, only : AttrVect_identical => Identical - use m_AVTEST, only : AttrVect_ReduceTest => Reduce - use m_GGRIDTEST, only : GGrid_test => testall - use m_GGRIDTEST, only : GGrid_identical => Identical - use m_GMAPTEST, only : GMap_test => testall - use m_GSMAPTEST, only : GSMap_test => testall - use m_GSMAPTEST, only : GSMap_identical => Identical - use m_MCTWORLDTEST, only : MCTWorld_test => testall - use m_ROUTERTEST, only : Router_test => testall - use m_SMATTEST, only : sMat_test => testall - use m_SMATTEST, only : sMat_identical => Identical - use m_List, only : ListExportToChar => ExportToChar - - implicit none - -! !INPUT PARAMETERS: - - integer,intent(in) :: CPL_World ! communicator for coupler - -! !REVISION HISTORY: -! Oct00 - Yun (Helen) He and Chris Ding, NERSC/LBNL - initial MPH-only version -! 19Nov00 - R. Jacob -- interface with mct -! 06Feb01 - J. Larson - slight mod to -! accomodate new interface to MCT_GSMap_lsize(). -! 08Feb01 - R. Jacob -- use MCT_Recv, new interface -! to MCT_GSMap_lsize(). -! 23Feb01 - R. Jacob -- add check for transfer -! expand size of AttrVect -! 25Feb01 - R. Jacob - add mpe and mpeu -! 22Mar01 - R. Jacob - use new router init -! 27Apr01 - R. Jacob - use SparseMatrix -! 02May01 - R. Jacob - Router is now built -! between atmosphere model and sparsematrix-defined -! atmosphere globalsegmap. Recv data in aV and check. -! Add new argument to MCT_Smat2xGSMap. -! 16May01 - Larson/Jacob - only root -! needs to call ReadSparseMatrix with new Comms -! 17May01 - R. Jacob - perfrom the sparse -! matrix multiply on the received dummy data and check -! 19May01 - R. Jacob - verify that matrix -! multiply works on constant data -! 11Jun01 - Larson/Jacob - receive atmosphere's general grid from -! the atmosphere. -! 15Feb02 - R. Jacob New MCTWorld argument -! 28Mar02 - R. Jacob Use Rearranger -! 12Jun02 - J. Larson - Use SparseMatrix -! export routines. -! -!EOP ___________________________________________________________________ - - character(len=*), parameter :: cplname='cpl.F90' - -!----------------------- MPH vars - integer :: myProc, myProc_global - integer :: Global_World - integer :: atmo_id, ocn_id - integer :: ncomps,mycompid,mySize - -!----------------------- MCT and dummy model vars - - logical :: initialized - integer :: root,stat,status - integer, dimension(:,:),pointer :: sendstatus - integer, dimension(:),pointer :: sendrequest - integer, dimension(2) :: sMat_src_dims, sMat_dst_dims - -! SparseMatrix dimensions and Processor Layout - integer :: Nax, Nay ! Atmosphere lons, lats - integer :: Nox, Noy ! Ocean lons, lats - integer :: NPROCS_LATA, NPROCS_LONA ! Processor layout - -! Arrays used to initialize the MCT GlobalSegMap - integer :: asize,asize2,i,j,k - integer :: osize,osize2 - integer,dimension(1) :: start,length -! integer,dimension(:),pointer :: lstart,llength - -! Number of accumulation steps and accumulator dummy variables - integer :: steps - integer, parameter :: nsteps = 10 - character*64 :: ACCA2O_rList - integer, dimension(:), allocatable :: ACCA2O_rAction - -! Dummy arrays used for testing SparseMatrix export routines: - integer :: Num - integer, dimension(:), pointer :: DummyI - real, dimension(:), pointer :: DummyR - -! Atmosphere and Ocean GSMap - type(GlobalSegMap) :: testAGSMap ! rml - type(GlobalSegMap) :: AGSMap,OGSMap, DAGSMap - -! GSMap for testing GlobalSegMapComms - type(GlobalSegMap) :: inGSMap - -! Ocean GlobalSegMap from ocean - type(GlobalSegMap) :: OCN_OGSMap - -! Ocean GlobalMap from ocean - type(GlobalMap) :: OCN_OGMap - -! Remote GlobalMap for testing - type(GlobalMap) :: rOGMap - -! GlobalMap for Testing Accumulator Comms - type(GlobalMap) :: OGMap - -! Router from Atm to Cpl - type(Router) :: Atm2Cpl - -! Router from Cpl to Ocn - type(Router) :: Cpl2Ocn - -! Accumulator for data from atmosphere to ocean - type(Accumulator) :: ACCA2O - -! Accumulator for testing scatter and gather routines - type(Accumulator) :: scatterAcc, GgatherAcc, GSgatherAcc - -! AttrVect for data from the atm - type(AttrVect) :: fromatm - -! AttrVect for data from the atm on the ocean grid - type(AttrVect) :: fromatm_ocn - -! Coupler AttrVect for data from process 1 to process 0 - type(AttrVect) :: fromP1 - -! AttrVect for data from the ocn - type(AttrVect) :: fromocn - -! AttrVect for data from the ocn on the atmosphere's grid - type(AttrVect) :: fromocn_atm - -! AttrVects for PairedSpatialIntegral services - type(AttrVect) :: IntegratedAVect, IntegratedOVect - -! Spatial Integral Temporary Variables - integer :: VectorLength - -! AttrVects for testing mapping - type(AttrVect) :: gatherAV_ocn,gatherAV_atm - integer :: unit, unit1, unit2 - -! a2o SparseMatrix elements on root - type(SparseMatrix) :: DummySMat - -! a2o distributed SparseMatrix elements - type(SparseMatrix) :: dMat, dMat_test - -! Test sMat for gather - type(SparseMatrix) :: gathersMat - -! Test GlobalSegMap for sMat gather - type(GlobalSegMap) :: MatGSMap - -! a2o and o2a distributed SparseMatrixPlus variables - type(SparseMatrixPlus) :: A2OMatPlus, O2AMatPlus - -! The atmosphere's grid recieved from the atmosphere - type(GeneralGrid) :: AtmGrid - -! The atmosphere's distributed grid - type(GeneralGrid) :: dAtmGrid - -! The ocean's grid recieved from the ocean - type(GeneralGrid) :: OcnGrid - -! The ocean's distributed grid - type(GeneralGrid) :: dOcnGrid - -! Test grid for scatter,gather,bcast - type(GeneralGrid) :: scatterGGrid, gatherGGrid - -!::DEFINE POP REMAP MATRIX DIMENSIONS:: - -#ifdef MPE -#include "mpe.h" -#endif - - -!------------------------------------Begin code - - call MPI_COMM_DUP (MPI_COMM_WORLD, Global_World, ierr) - - call MPI_COMM_RANK (MPI_COMM_WORLD, myProc_global, ierr) - call MPI_COMM_RANK (CPL_World, myProc, ierr) -! write(*,*) myProc, ' in cpl === ', myProc_global, ' in global' -! write(*,*) 'MPH_local_proc_id()=', MPH_local_proc_id_ME_SE() -! write(*,*) 'MPH_global_proc_id()=', MPH_global_proc_id() - - call MPI_COMM_SIZE(CPL_World,mySize,ierr) - if (myProc==0) call MPH_redirect_output ('cpl') - ncomps=MPH_total_components() - mycompid=MPH_component_id_ME_SE() - -! Get the atmosphere's component id - atmo_id = MPH_get_component_id("atmosphere") - -! Get the ocean's component id - ocn_id = MPH_get_component_id("ocean") - -!------------------------------------------------------- -! Begin attempts to use MCT - -#ifdef MPE - call mpe_logging_init(myProc_global,init_s,init_e,gsmi_s,gsmi_e, & - atri_s,atri_e,routi_s,routi_e,send_s,send_e,recv_s,recv_e, & - clean_s,clean_e) -#endif - - initialized= MCTWorld_initialized() - if (myProc==0)write(stdout,*) cplname, & - ":: MCTWorld initialized=",initialized - if(initialized) call die(cplname, "mct already initialized") - - if(myProc==0)write(stdout,*) cplname, ":: Initializing MCTWorld" - call zeit_ci('Cworldinit') - call MCTWorld_init(ncomps,MPI_COMM_WORLD,CPL_World,mycompid) - call zeit_co('Cworldinit') - - initialized= MCTWorld_initialized() - if (myProc==0)write(stdout,*) cplname, & - ":: MCTWorld initialized=",initialized - if(.not. initialized) call die(cplname, "mct not initialized") - - call MCTWorld_test("CPL::MCTWorld",6000+myProc) - -! Read in Sparse Matrix dimensions and processor layout - - if(myProc==0) then - - ! Read in SparseMatrix dimensions for atmosphere and ocean - call I90_LoadF("ut_SparseMatrix.rc", ierr) - - call I90_Label("atmosphere_dimensions:", ierr) - Nax = I90_GInt(ierr) - Nay = I90_GInt(ierr) - - call I90_Label("ocean_dimensions:", ierr) - Nox = I90_GInt(ierr) - Noy = I90_GInt(ierr) - - call I90_Release(ierr) - - ! Read in processor layout information for atmosphere and ocean - call I90_LoadF("./processors_map.in", ierr) - - call I90_Label("NPROCS_ATM", ierr) - NPROCS_LATA = I90_GInt(ierr) - NPROCS_LONA = I90_GInt(ierr) - - call I90_Release(ierr) - - endif - - root = MCTComponentRootRank(mycompid,ThisMCTWorld) - call MPI_BCAST(Nax,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Nay,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Nox,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Noy,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(NPROCS_LATA,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(NPROCS_LONA,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - -!::::Receive the Atmosphere's General Grid on the root process - - if(myProc==0) then - write(stdout,*) cplname, ":: Receiving Grid from atmosphere" - - call MCT_GGrid_recv(AtmGrid, atmo_id, 1400, status) - - call GGrid_test(AtmGrid,"CPL::Root AtmGrid",3000+myProc) - -! check that we can make inquiries about the atmosphere's grid. - write(stdout,*) cplname, ':: AtmGrid%coordinate_list%bf = ', & - AtmGrid%coordinate_list%bf - write(stdout,*) cplname, ':: AtmGrid%index_list%bf = ', & - AtmGrid%index_list%bf - write(stdout,*) cplname, ':: AtmGrid%data%iList%bf = ', & - AttrVect_exportIListToChar(AtmGrid%data) - write(stdout,*) cplname, ':: size(AtmGrid%data%iAttr) = ', & - size(AtmGrid%data%iAttr) - write(stdout,*) cplname, ':: AtmGrid%data%rList%bf = ', & - AttrVect_exportRListToChar(AtmGrid%data) - write(stdout,*) cplname, ':: size(AtmGrid%data%rAttr) = ', & - size(AtmGrid%data%rAttr) - -!!!!!!!!!!!!! Receive the Ocean's General Grid -! - write(stdout,*) cplname, ":: Receiving Grid from ocean" - - call MCT_GGrid_recv(OcnGrid, ocn_id, 2800, status) - - call GGrid_test(OcnGrid,"CPL::Root OcnGrid",3100+myProc) - -! check that we can make inquiries about the atmosphere's grid. - write(stdout,*) cplname, ':: OcnGrid%coordinate_list%bf = ', & - OcnGrid%coordinate_list%bf - write(stdout,*) cplname, ':: OcnGrid%index_list%bf = ', & - OcnGrid%index_list%bf - write(stdout,*) cplname, ':: OcnGrid%data%iList%bf = ', & - AttrVect_exportIListToChar(OcnGrid%data) - write(stdout,*) cplname, ':: size(OcnGrid%data%iAttr) = ', & - size(OcnGrid%data%iAttr) - write(stdout,*) cplname, ':: OcnGrid%data%rList%bf = ', & - AttrVect_exportRListToChar(OcnGrid%data) - write(stdout,*) cplname, ':: size(OcnGrid%data%rAttr) = ', & - size(OcnGrid%data%rAttr) - endif - - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Set a decomposition of the atmosphere in the coupler "by hand" -! For this example, the coupler will split atmosphere points -! evenly between processors. -! -! number of local atmosphere points - - asize = (Nay * Nax)/mySize - asize2 = asize - -! (Nay *Nax)/mySize isnt an integer, give extra points to last proc. - if(myProc == mySize - 1) then - asize = asize + mod(Nay*Nax,mySize) - endif - -! find starting point in the numbering scheme -! numbering scheme is same as that used in atmosphere model. - start(1) = (myProc * asize2) +1 - length(1) = asize - -! write(stdout,*)myProc,asize2,asize,start(1) - -! describe this information in a Global Map for the atmosphere. - if(myProc==0)write(stdout,*) cplname, ":: Inializing AGSMap" - call zeit_ci('Cagsmapinit') -! rml test of the copy - call MCT_GSMap_init(testAGSMap,start,length,0,CPL_World,mycompid) - call MCT_GSMap_copy(testAGSMap,AGSMap) - call MCT_GSMap_clean(testAGSMap) - print *,'Copied AGSMap' - call zeit_co('Cagsmapinit') - -! Test GlobalSegMapComms: - -! Test GlobalSegMap broadcast: - - if(myProc==0) then - - DAGSMap%comp_id = AGSMap%comp_id - DAGSMap%ngseg = AGSMap%ngseg - DAGSMap%gsize = AGSMap%gsize - - allocate(DAGSMap%start(DAGSMap%ngseg),DAGSMap%length(DAGSMap%ngseg), & - DAGSMap%pe_loc(DAGSMap%ngseg), stat=ierr) - if(ierr/=0) call die(cplname, "allocate(DAGSMap%start...)", ierr) - - do i=1,DAGSMap%ngseg - DAGSMap%start(i) = AGSMap%start(i) - DAGSMap%length(i) = AGSMap%length(i) - DAGSMap%pe_loc(i) = AGSMap%pe_loc(i) - end do - - endif - - call GlobalSegMap_bcast(DAGSMap, 0, CPL_World) - - if (.NOT.(GSMap_identical(DAGSMap,AGSMap))) then - call die(cplname,"GSMap_identical(DAGSMap,AGSMap)") - endif - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Describe OGSMap, the ocean grid decomposed in the coupler - -! number of local oceanpoints - osize = (Noy * Nox)/mySize - osize2 = osize - -! (Noy *Nox)/mySize isnt an integer, give extra points to last proc. - if(myProc == mySize - 1) then - osize = osize + mod(Noy*Nox,mySize) - endif -! find starting point in the numbering scheme -! numbering scheme is same as that used in ocean model. - start(1) = (myProc * osize2) +1 - length(1) = osize - -! describe this information in a Global Map for the ocean. - if(myProc==0)write(stdout,*) cplname, ":: Inializing OGSMap" - call zeit_ci('Cogsmapinit') - call MCT_GSMap_init(OGSMap,start,length,0,CPL_World,mycompid) - call zeit_co('Cogsmapinit') - call GSMap_test(OGSMap,"CPL::OGSMap",CPL_World,5000+myProc) - - ! lets exchange maps with the ocean - call ExchangeMap(OGSMap,CPL_World,OCN_OGSMap,ocn_id,ierr) - if(ierr/=0) call die(cplname,"call ExchangeMap") - call GSMap_test(OCN_OGSMap,"CPL::OCN_OGSMap",CPL_World,5100+myProc) - - ! Compare this to sending and recieving maps - if(myProc==0) then - - call GlobalSegMap_send(OGSMap,ocn_id,777) - - call GlobalSegMap_isend(OGSMap,ocn_id,888,sendrequest,ierr) - if(ierr/=0) call die(cplname,"call GlobalSegMap_isend") - - ! Careful: sendrequest gets allocated with length 6 inside GSMap_isend - allocate(sendstatus(MP_STATUS_SIZE,6),stat=ierr) - if(ierr/=0) call die(cplname,"allocate(sendstatus)") - - call MPI_WAITALL(6,sendrequest,sendstatus,ierr) - if(ierr/=0) call MP_Perr_die(cplname,"call MPI_WAITALL(sendrequest)",& - ierr) - - deallocate(sendrequest,sendstatus,stat=ierr) - if(ierr/=0) call die(cplname,"deallocate(sendrequest)") - - endif - - call GlobalSegMapToGlobalMap(OCN_OGSMap,OCN_OGMap,ierr) - if(ierr/=0) call die(cplname,"GlobalSegMapToGlobalMap(OCN_OGSMap,OCN_OGMap)") - call GMap_test(GMap=OCN_OGMap,Identifier="CPL->OCN_OGMap",device=4000+myProc) - - call GlobalMap_init_remote(rOGMap,OCN_OGMap%counts,& - size(OCN_OGMap%counts),0,CPL_World,OCN_OGMap%comp_id) - call GMap_test(GMap=rOGMap,Identifier="CPL::rOGMap",device=4100+myProc) - -!!! test some GlobalSegMap functions -! write(*,*)myProc,'number of global segs is',MCT_GSMap_ngseg(OGSMap) -! write(*,*)myProc,'local size is',MCT_GSMap_lsize(OGSMap,CPL_World) -! write(*,*)myProc,'global size is',MCT_GSMap_gsize(OGSMap) - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -if(myProc==0) write(*,*) cplname, ":: Test GeneralGridComms" -call MCT_GGrid_bcast(AtmGrid,0,CPL_World) -call GGrid_test(AtmGrid,"CPL::Broadcast AtmGrid",3200+myProc) - -call MCT_GGrid_scatter(OcnGrid,scatterGGrid,OGSMap,0,CPL_World) -call MCT_GGrid_gather(scatterGGrid,gatherGGrid,OGSMap,0,CPL_World) - -if(myProc==0) then - if(.NOT. GGrid_identical(OcnGrid,gatherGGrid,0.1) ) then - call die(cplname,"GGrid Comms test failed") - endif - call MCT_GGrid_clean(gatherGGrid) -endif - - call MCT_GGrid_clean(scatterGGrid) - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!SparseMatrix Read -! read in the SparseMatrix elements onto root -! -! This example reads in a2o -! - if(myProc==0)write(stdout,*)" " - if(myProc==0)write(stdout,*) cplname, ":: Reading SparseMatrix elements" - if(myProc==0)write(stdout,*)" " - call zeit_ci('CsmatReadnTest') -if(myProc==0) then -! NOTE: this is a custom routine, will not be part of MCT - call ReadSparseMatrixAsc(DummySMat,"atmosphere_to_ocean_remap_file:", & - sMat_src_dims, sMat_dst_dims) -! Check that the values in the SparseMatrix match the values of the -! POP grid and the Gaussian grid - if(sMat_src_dims(1) /= Nax) call die(cplname, & - "sMat_src_dims(1) does not match Nax") - if(sMat_src_dims(2) /= Nay) call die(cplname, & - "sMat_src_dims(2) does not match Nay") - if(sMat_dst_dims(1) /= Nox) call die(cplname, & - "sMat_dst_dims(1) does not match Nox") - if(sMat_dst_dims(2) /= Noy) call die(cplname, & - "sMat_dst_dims(2) does not match Noy") - - nullify(DummyI) ! let first export routine create this - Num = SparseMatrix_lsize(DummySMat)+1 - allocate(DummyR(Num), stat=ierr) ! try this one pre-created - if(ierr /= 0) then - write(stderr,'(2a,i8)') cplname,':: allocate(DummyR(...) failed, ierr=',ierr - call die(cplname) - endif - - write(stdout,'(2a)') cplname,' SparseMatrix export tests. Compare with' - call SMatrix_exportGlobalRowIndices(DummySMat, DummyI, Num) - write(stdout,'(2a,i8)') cplname,':: exportGlobalRowIndices(): Num=',Num - write(stdout,'(2a,i8)') cplname,':: SparseMatrix_lsize(DummySMat)=',& - SparseMatrix_lsize(DummySMat) - write(stdout,'(2a,i8)') cplname,':: exportGlobalRowIndices() 1st Row=',DummyI(1) - write(stdout,'(2a,i8)') cplname,':: exportGlobalRowIndices() last Row=',DummyI(Num) - - call SMatrix_exportGlobalColumnInd(DummySMat, DummyI, Num) - write(stdout,'(2a,i8)') cplname,':: exportGlobalColumnIndices(): Num=',Num - write(stdout,'(2a,i8)') cplname,':: SparseMatrix_lsize(DummySMat)=',& - SparseMatrix_lsize(DummySMat) - write(stdout,'(2a,i8)') cplname,':: exportGlobalColumnIndices() 1st Col=',DummyI(1) - write(stdout,'(2a,i8)') cplname,':: exportGlobalColumnIndices() last Col=',DummyI(Num) - - call SMatrix_exportMatrixElements(DummySMat, DummyR, Num) - write(stdout,'(2a,i8)') cplname,':: exportMatrixElements(): Num=',Num - write(stdout,'(2a,i8)') cplname,':: SparseMatrix_lsize(DummySMat)=',& - SparseMatrix_lsize(DummySMat) - write(stdout,'(2a,f10.8)') cplname,':: exportMatrixElements() 1st wgt=',& - DummyR(1) - write(stdout,'(2a,f10.8)') cplname,':: exportMatrixElements() last wgt=', & - DummyR(Num) - - deallocate(DummyI, DummyR, stat=ierr) - if(ierr /= 0) then - write(stderr,'(2a,i8)') cplname,':: deallocate(DummyR(...) failed, ierr=',& - ierr - call die(cplname) - endif - -endif - - call zeit_co('CsmatReadnTest') - if(myProc==0)write(stdout,*) cplname, ":: Done Reading elements" - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!FOR TESTING ONLY:::::: -! now scatter the SparseMatrix from root to other coupler nodes -! according to the decomposition of the ocean grid (the Y) -! - root=0 - if(myProc==0)write(stdout,*) cplname, ":: Testing SparseMatrix Gather" - - ! Testing GSMap scatter and gather - call SparseMatrix_ScatterByRow(OGSMap, DummySMat, dMat, root, CPL_World, stat) - call SparseMatrixDecompByRow(OGSMap, DummySMat, MatGSMap, root, CPL_World) - call SparseMatrix_gather(dMat,gathersMat,MatGSMap,root,CPL_World) - - call MCT_GSMap_clean(MatGSMap) - - if(myProc==root) then - if(.not. sMat_identical(DummySMat,gathersMat,1e-5)) then - call die(cplname,"SMAT GATHER TEST FAILED!") - endif - call SparseMatrix_clean(gathersMat) - endif - - ! Testing broadcast - call SparseMatrix_bcast(DummySMat,root,CPL_World) - - call sMat_test(sMat=DummySMat,Identifier="CPL::Broadcast DummySMat-a2o", & - device=8000+myProc) - call sMat_test(sMat=dMat,Identifier="CPL::dMat-a2o",device=8100+myProc, & - mycomm=CPL_World) - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Build A2OMatPlus from root-centric sMat. Specify matrix decomposition -! to be by row, following the ocean's GlobalSegMap (OGSMap) - - if(SparseMatrixPlus_initialized(A2OMatPlus)) then - call die(cplname,"SparseMatrixPlus_initialized failed!") - endif - - ! TESTING INIT_DISTRIBUTED: - call SparseMatrixPlus_init(A2OMatPlus, dMat, AGSMap, OGSMap, & - root, CPL_World, mycompid) - - if(.NOT.SparseMatrixPlus_initialized(A2OMatPlus)) then - call die(cplname,"SparseMatrixPlus_initialized failed!") - endif - - call SparseMatrix_ScatterByRow(OGSMap, DummySMat, dMat_test, root, CPL_World, stat) - - if(.not. sMat_identical(dMat,dMat_test,1e-5)) then - call die(cplname,"dMat has been unexpectedly altered by & - &SparseMatrixPlus_init!") - endif - - ! Clean the SparseMatrix - call SparseMatrix_clean(DummySMat) - call SparseMatrix_clean(dMat) - call SparseMatrix_clean(dMat_test) - - if(myProc==0) write(stdout,*) cplname,':: Reading in O2A on root.' - -! On the root, read in O2A ascii file into DummySMat: - if(myProc==0) then - call ReadSparseMatrixAsc(DummySMat,"ocean_to_atmosphere_remap_file:", & - sMat_src_dims, sMat_dst_dims) - if(sMat_src_dims(1) /= Nox) call die(cplname, & - "sMat_src_dims(1) does not match Nox") - if(sMat_src_dims(2) /= Noy) call die(cplname, & - "sMat_src_dims(2) does not match Noy") - if(sMat_dst_dims(1) /= Nax) call die(cplname, & - "sMat_dst_dims(1) does not match Nax") - if(sMat_dst_dims(2) /= Nay) call die(cplname, & - "sMat_dst_dims(2) does not match Nay") - endif - - if(myProc==0) write(stdout,*) cplname,':: Finished reading in O2A on root.' - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Build O2AMatPlus from root-centric sMat. Specify matrix decomposition -! to be by column, following the ocean's GlobalSegMap (OGSMap) - - call SparseMatrixPlus_init(O2AMatPlus, DummySMat, OGSMap, AGSMap, Yonly, & - root, CPL_World, mycompid) - - if(.NOT.SparseMatrixPlus_initialized(A2OMatPlus)) then - call die(cplname,"O2AMatPlus has not been initialized!") - endif - - if(myProc==root) then - call sMat_test(sMat=DummySMat,Identifier="CPL::DummySMat-o2a", & - device=8300+myProc) - call SparseMatrix_clean(DummySMat) - endif - -!!!!!!!!!!!!!!!!!----------Attribute Vector for incoming Atmosphere data -! Build an Attribute Vector to hold data coming in from Atmosphere's -! decomposition to AGSMap -! - if(myProc==0)write(stdout,*) cplname, ":: Initializing Attrvect" - call zeit_ci('Catvecinit') - call MCT_AtrVt_init(fromatm, & - iList='gsindex', &! local GSMap values - rList=& -! height of first atm level - "alevh:& -! u wind - &uwind:& -! v wind - &vwind:& -! potential temp - &pottem:& -! specific humidity - &s_hum:& -! density - &rho:& -! barometric pressure - &barpres:& -! surface pressure - &surfp:& -! net solar radiation - &solrad:& -! downward direct visible radiation - &dirvis:& -! downward diffuse visible radiation - &difvis:& -! downward direct near-infrared radiation - &dirnif:& -! downward diffuse near-infrared radiation - &difnif:& -! downward longwave radiation - &lngwv:& -! convective precip - &precc:& -! large-scale precip - &precl",& - lsize=MCT_GSMap_lsize(AGSMap, Cpl_World)) - call zeit_co('Catvecinit') - -!!! declare an AttrVect to hold atmosphere data on the ocean grid -! use AtrVect already declared so that it has the same Attributes -! -if(myProc==0)write(stdout,*) cplname, ":: Init output AtrVect" - call MCT_AtrVt_init(fromatm_ocn, fromatm,MCT_GSMap_lsize(OGSMap, Cpl_World)) -if(myProc==0)write(stdout,*) cplname, ":: Done with init of output vector" - - -!!!!!!!!!!!!!!!!!----------Attribute Vector for incoming Ocean data -! Build an Attribute Vector to hold data coming in from Ocean's Decomp -! decomposition to OGSMap -! - if(myProc==0)write(stdout,*)cplname,":: Initializing Incoming Ocean Attrvect" - - call zeit_ci('fromocnAVinit') - - call MCT_AtrVt_init(fromocn, & - rList=& -! East-West Gradient of Ocean Surface Height - "dhdx:& -! North-South Gradient of Ocean Surface Height - &dhdy:& -! Heat of Fusion of Ocean Water - &Qfusion:& -! Sea Surface Temperature - &SST:& -! Salinity - &salinity:& -! East Component of the Surface Current - &Uocean:& -! East Component of the Surface Current - &Vocean",& - lsize=MCT_GSMap_lsize(OGSMap, CPL_World)) - - call zeit_co('fromocnAVinit') - -!!!!!!!!!!!!!!!!!----------Attribute Vector for Ocean data on ATM grid - - call MCT_AtrVt_init(fromocn_atm, & - rList=& -! East-West Gradient of Ocean Surface Height - "dhdx:& -! North-South Gradient of Ocean Surface Height - &dhdy:& -! Heat of Fusion of Ocean Water - &Qfusion:& -! Sea Surface Temperature - &SST:& -! Salinity - &salinity:& -! East Component of the Surface Current - &Uocean:& -! East Component of the Surface Current - &Vocean",& - lsize=MCT_GSMap_lsize(AGSMap, CPL_World)) - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!--Build Router -! -! Intialize 2 routers: -! 1.) Between atmosphere and coupler using AGSMap. -! 2.) Between coupler and ocean using OGSMap - -! These calls must be paired with similar calls in atm and ocn -! - if(myProc==0)write(stdout,*) cplname, ":: Initializing Routers" - - call zeit_ci('CAtmRouterInit') - call MCT_Router_init(atmo_id,AGSMap,CPL_World,Atm2Cpl) - call zeit_co('CAtmRouterInit') - - call zeit_ci('COcnRouterInit') - call MCT_Router_init(ocn_id,OGSMap,CPL_World,Cpl2Ocn) - call zeit_co('COcnRouterInit') - -! rml print router info - if(myProc==0)call MCT_Router_print(Atm2Cpl,CPL_World,90) - close(90) - - call Router_test(Atm2Cpl,"CPL::Atm2Cpl",7000+myProc) - call Router_test(Cpl2Ocn,"CPL::Cpl2Ocn",7100+myProc) - - if(myProc==0)write(stdout,*) cplname, ":: Done Initializing Routers" - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!--Build Accumulator - ACCA2O_rList="solrad:dirvis:difvis:dirnif:difnif:precc:precl" - - allocate(ACCA2O_rAction(7),stat=ierr) - if(ierr/=0) call die(cplname,"allocate(ACCA20_rAction)",ierr) - - ACCA2O_rAction = (/MCT_SUM,MCT_AVG,MCT_AVG,MCT_AVG, & - MCT_AVG,MCT_AVG,MCT_AVG/) - - call MCT_Accumulator_init(aC=ACCA2O, & - rList=trim(ACCA2O_rList), & - rAction=ACCA2O_rAction, & - lsize=MCT_GSMap_lsize(OGSMap,Cpl_World), & - num_steps=nsteps) - - call Accumulator_test(ACCA2O,"CPL::ACCA2O",1000+myProc) - - deallocate(ACCA2O_rAction,stat=ierr) - if(ierr/=0) call die(cplname,"deallocate(ACCA20_rAction)",ierr) - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! Done with Initialization Phase -! -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -!:::::::BEGIN REMAPPING DATA FROM ATMOSPHERE::::::::! - -do steps = 1,nsteps - -!!!!!!!!!!!!!!!!!----------MCT_Recv -! Receive data into AGSMap associated aV fromatm -! -if((myProc==0).and.(steps==1)) then - write(stdout,*) cplname, ":: Doing Distributed Recv" -endif - call zeit_ci('Cmctrecv') - call MCT_Recv(fromatm,Atm2Cpl) - call zeit_co('Cmctrecv') -if((myProc==0).and.(steps==1)) then - write(stdout,*) cplname, ":: Done with Recv" -endif -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Do the parallel A2O SparseMatrix-AttrVect multiply -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -if((myProc==0).and.(steps==1)) then - write(stdout,*) cplname, ":: Begin A2O sparsematrix mul" -endif - call zeit_ci('CMatMul') - call MCT_MatVecMul(fromatm, A2OMatPlus, fromatm_ocn) - call zeit_co('CMatMul') -if((myProc==0).and.(steps==1)) then - write(stdout,*) cplname, ":: Completed A2O sparsematrix mul" -endif -! Perform Accumulation -call accumulate(fromatm_ocn,ACCA2O) - -enddo -call AttrVect_test(fromatm,"CPL::fromatm",2100+myProc) -call AttrVect_test(fromatm_ocn,"CPL::fromatm_ocn",2200+myProc) - -if(myProc==1)write(stdout,*) cplname, ":: Testing point to point send and recv" - -if(mySize>1) then - - if(myProc==1) then - call AttrVect_Send(inAV=fromatm,dest=0,TagBase=123,comm=CPL_World,status=ierr) - if(ierr/=0) call die(cplname,"AttrVect_Send- p1",ierr) - - call AttrVect_Recv(outAV=fromP1,dest=0,TagBase=124,comm=CPL_World,status=ierr) - if(ierr/=0) call die(cplname,"AttrVect_Recv- p1",ierr) - - if(.not.AttrVect_identical(fromatm,fromP1,0.1)) then - call die(cplname, "point to point comms failed") - endif - - call MCT_AtrVt_clean(fromP1) - - endif - if(myProc==0) then - call AttrVect_Recv(outAV=fromP1,dest=1,TagBase=123,comm=CPL_World,status=ierr) - if(ierr/=0) call die(cplname,"AttrVect_Recv- p0",ierr) - - call AttrVect_Send(inAV=fromP1,dest=1,TagBase=124,comm=CPL_World,status=ierr) - if(ierr/=0) call die(cplname,"AttrVect_Send- p0",ierr) - - call MCT_AtrVt_clean(fromP1) - - endif - -endif - - ! Send the accumulator registers to the ocean - call zeit_ci('Cmctsend') - call MCT_Send(ACCA2O%data,Cpl2Ocn) - call zeit_co('Cmctsend') - - ! Check received globalmap values against expected ones - j=1 - do i=1,MCT_GSMap_ngseg(AGSMap) - if(myProc==AGSMap%pe_loc(i)) then - do k=1,AGSMap%length(i) - if(fromatm%iAttr(1,j) /= AGSMap%start(i)+k-1) then - write(*,*) cplname, ':: MCT GSMap mismatch. Expected', & - AGSMap%start(i)+k-1,'got ',fromatm%iAttr(1,j) - endif - j=j+1 - enddo - endif - enddo - - !::::::TESTING ACCUMULATOR COMM FUNCTIONS:::::! - if(myProc==0) write(stdout,*) cplname,":: TESTING ACCUMULATOR_COMMS" - - call GlobalMap_init(OGMap,mycompid,MCT_GSMap_lsize(OGSMap,CPL_World), & - CPL_World) - - call MCT_Acc_gather(ACCA2O,GSgatherAcc,OGSMap,0,CPL_World,ierr) - if(ierr/=0) call die(cplname,"call MCT_Acc_gather #1") - - ! TESTING COMMS USING GMAP - call MCT_Acc_scatter(GSgatherAcc,scatterAcc,OGMap,0,CPL_World,ierr) - if(ierr/=0) call die(cplname,"call MCT_Acc_scatter #2") - - call MCT_Acc_gather(scatterAcc,GgatherAcc,OGMap,0,CPL_World,ierr) - if(ierr/=0) call die(cplname,"call MCT_Acc_gather #3") - - if(myProc==0) then - if(.NOT.Accumulator_identical(GSgatherAcc,GgatherAcc,0.1)) then - call die(cplname,"ACCUMULATOR SCATTER/GATHER #4 FAILED!") - endif - endif - - call MCT_Accumulator_clean(scatterAcc) - ! DONE TESTING COMMS USING GMAP - - call MCT_Acc_scatter(GSgatherAcc,scatterAcc,OGSMap,0,CPL_World,ierr) - if(ierr/=0) call die(cplname,"call MCT_Acc_scatter #5") - - if(.NOT.Accumulator_identical(ACCA2O,scatterAcc,0.1)) then - call die(cplname,"ACCUMULATOR SCATTER/GATHER #6 FAILED!") - endif - - call MCT_Acc_bcast(GSgatherAcc,0,CPL_World,ierr) - if(ierr/=0) call die(cplname,"call MCT_Acc_bcast") - - call Accumulator_test(GSgatherAcc,"CPL::bcastAcc",1100+myProc) - - call AttrVect_test(ACCA2O%data,"CPL::ACCA2O%data",2300+myProc) - -!::::::::DONE TESTING ACCUMULATOR COMMS:::::::::::::::::! - -!::::::::TEST LOCAL REDUCE::::::::! - call AttrVect_ReduceTest(GSgatherAcc%data,"GSgatherAcc%data on Root",2700) - - ! Lets prepare to do some neat integrals using MCT. - ! First, we scatter both of the General Grids. - call MCT_GGrid_scatter(AtmGrid, dAtmGrid, AGSMap, 0, CPL_World) - call MCT_GGrid_scatter(OcnGrid, dOcnGrid, OGSMap, 0, CPL_World) - - if(myProc==0) call AttrVect_test(OcnGrid%data,"CPL::OcnGrid%data",2400+myProc) - - ! unmasked paired integral: - call MCT_PairedSpatialIntegrals(inAv1=fromatm, outAv1=integratedAVect, & - GGrid1=dAtmGrid,WeightTag1="grid_area", & - inAv2=fromatm_ocn, outAv2=integratedOVect,& - GGrid2=dOcnGrid, WeightTag2="grid_area", & - SumWeights=.true., comm=CPL_World) - if(myProc==0)then - - j=MCT_AtrVt_nreals(integratedAVect) - do i=1,j,j-1 - write(stdout,'(3a,i2,a,f12.6)') cplname,':: Paired MCT ', & - 'integral: integratedAVect%rAttr(',i,',1)=', & - integratedAVect%rAttr(i,1) - enddo - - k=MCT_AtrVt_nreals(integratedOVect) - do i=1,k,k-1 - write(stdout,'(3a,i2,a,f12.6)') cplname,':: Paired MCT ', & - 'integral: integratedOVect%rAttr(',i,',1)=', & - integratedOVect%rAttr(i,1) - end do - endif - - call MCT_AtrVt_clean(integratedAVect) - call MCT_AtrVt_clean(integratedOVect) - - ! unmasked paired average: - call MCT_PairedSpatialAverages(inAv1=fromatm, outAv1=integratedAVect, & - GGrid1=dAtmGrid,WeightTag1="grid_area", & - inAv2=fromatm_ocn, outAv2=integratedOVect,& - GGrid2=dOcnGrid, WeightTag2="grid_area", & - comm=CPL_World) - -if(myProc==0)then - - i=1 - write(stdout,'(3a,i2,a,f12.6)') cplname,':: Paired MCT ',& - 'average: averagedAVect%rAttr(',i,',1)=', & - integratedAVect%rAttr(i,1) - - write(stdout,'(3a,i2,a,f12.6)') cplname,':: Paired MCT ',& - 'average: averagedOVect%rAttr(',i,',1)=', & - integratedOVect%rAttr(i,1) - -endif - - call MCT_AtrVt_clean(integratedAVect) - call MCT_AtrVt_clean(integratedOVect) - - ! masked paired integral: - call MCT_PairedMaskedSpatialIntegral(inAv1=fromatm, & - outAv1=integratedAVect, & - GGrid1=dAtmGrid, & - SpatialWeightTag1="grid_area", & - iMaskTags1="grid_imask", & - inAv2=fromatm_ocn, & - outAv2=integratedOVect, & - GGrid2=dOcnGrid, & - SpatialWeightTag2="grid_area", & - iMaskTags2="grid_imask", & - UseFastMethod=.true., & - SumWeights=.true., & - comm=CPL_World) - -if(myProc==0)then - - j=MCT_AtrVt_nreals(integratedAVect) - do i=1,j,j-1 - write(stdout,'(3a,i2,a,f12.6)') cplname,':: Paired masked MCT ', & - 'integral: integratedAVect%rAttr(',i,',1)=', & - integratedAVect%rAttr(i,1) - end do - - k=MCT_AtrVt_nreals(integratedOVect) - do i=1,k,k-1 - write(stdout,'(3a,i2,a,f12.6)') cplname,':: Paired masked MCT ', & - 'integral: integratedOVect%rAttr(',i,',1)=', & - integratedOVect%rAttr(i,1) - end do - -endif - - call MCT_AtrVt_clean(integratedAVect) - call MCT_AtrVt_clean(integratedOVect) - - ! Masked paired average: - call MCT_PairedMaskedSpatialAverages(inAv1=fromatm, & - outAv1=integratedAVect, & - GGrid1=dAtmGrid, & - SpatialWeightTag1="grid_area", & - iMaskTags1="grid_imask", & - inAv2=fromatm_ocn, & - outAv2=integratedOVect, & - GGrid2=dOcnGrid, & - SpatialWeightTag2="grid_area", & - iMaskTags2="grid_imask", & - UseFastMethod=.true., & - comm=CPL_World) - -if(myProc==0)then - - i=1 - write(stdout,'(3a,i2,a,f12.6)') cplname,':: Paired masked MCT ', & - 'average : averagedAVect%rAttr(',i,',1)=', & - integratedAVect%rAttr(i,1) - - write(stdout,'(3a,i2,a,f12.6)') cplname,':: Paired masked MCT ', & - 'average : averagedOVect%rAttr(',i,',1)=', & - integratedOVect%rAttr(i,1) - -endif - - call AttrVect_test(integratedAVect,"CPL::integratedAVect",myProc+2500) - - call MCT_AtrVt_clean(integratedAVect) - call MCT_AtrVt_clean(integratedOVect) - - ! Now, receive Input AV from ocean (fromocn) - if(myProc==0) write(stdout,*) cplname,':: Before MCT_RECV from ocean' - call zeit_ci('RecvFromOcn') - call MCT_Recv(fromocn,Cpl2Ocn) - call zeit_co('RecvFromOcn') - if(myProc==0) write(stdout,*) cplname,':: After MCT_RECV from ocean' - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! Do the parallel O2A SparseMatrix-AttrVect multiply -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if(myProc==0) write(stdout,*) cplname,":: Commencing O2A sparsematrix mul" - call zeit_ci('O2AMatMul') - call MCT_MatVecMul(fromocn, O2AMatPlus, fromocn_atm) - call zeit_co('O2AMatMul') - if(myProc==0) write(stdout,*) cplname,":: Completed O2A sparsematrix mul" - - ! Check the interpolated values - do i=2,MCT_AtrVt_nreals(fromocn_atm) - do j=1,MCT_AtrVt_lsize(fromocn_atm) - if(abs(fromocn_atm%rAttr(1,j)-fromocn_atm%rAttr(i,j)) > 1e-4) then - write(stderr,*) cplname, ":: Interpolation Error", & - fromocn_atm%rAttr(1,j), fromocn_atm%rAttr(i,j), i, j - call die(cplname,"Interpolation Error") - endif - enddo - enddo - - ! TEST MAPPING FOR HMV - -! call AttrVect_gather(fromocn_atm,gatherAV_atm,AGSMap, & -! 0,CPL_World,ierr) - call AttrVect_gather(fromocn_atm,gatherAV_atm,AGSMap, & - 0,CPL_World,ierr,99.0_FP) ! rml test - - if(myProc == 0) then - unit = luavail() + 9500 - write(unit,*) Nax, Nay - k=0 - do i=1,Nax - do j=1,Nay - k=k+1 - write(unit,*) gatherAV_atm%rAttr(1,k) - enddo - enddo - call MCT_AtrVt_clean(gatherAV_atm) - endif - -if(myProc==0)write(stdout,*) cplname, ":: All Done, cleanup" - call zeit_ci('Ccleanup') - - ! Clean MCT datatypes - if(myProc==0) then - call MCT_GGrid_clean(AtmGrid) - call MCT_GGrid_clean(OcnGrid) - call MCT_Accumulator_clean(GgatherAcc) - endif - - call MCT_Accumulator_clean(GSgatherAcc) - call MCT_Accumulator_clean(scatterAcc) - call GlobalMap_clean(rOGMap) - call GlobalMap_clean(OCN_OGMap) - call GlobalMap_clean(OGMap) - call MCT_GGrid_clean(dAtmGrid) - call MCT_GGrid_clean(dOcnGrid) - call MCT_GSMap_clean(AGSMap) - call MCT_GSMap_clean(OGSMap) - call MCT_GSMap_clean(DAGSMap) - call MCT_GSMap_clean(OCN_OGSMap) - call MCT_Router_clean(Atm2Cpl) - call MCT_Router_clean(Cpl2Ocn) - call SparseMatrixPlus_clean(A2OMatPlus) - call SparseMatrixPlus_clean(O2AMatPlus) - call MCT_Accumulator_clean(ACCA2O) - call MCT_AtrVt_clean(fromatm) - call MCT_AtrVt_clean(fromatm_ocn) - call MCT_AtrVt_clean(fromocn) - call MCT_AtrVt_clean(fromocn_atm) - call MCTWorld_clean() - - call zeit_co('Ccleanup') - - call zeit_allflush(CPL_World,0,46) - - initialized= MCTWorld_initialized() - if (myProc==0)write(stdout,*) cplname, & - ":: MCTWorld initialized=",initialized - if(initialized) call die(cplname, "mct still initialized") - - -end subroutine - - - - - - - - - - - - - - diff --git a/src/externals/mct/testsystem/testall/job.ut-all.jaguar b/src/externals/mct/testsystem/testall/job.ut-all.jaguar deleted file mode 100644 index c61a7432023..00000000000 --- a/src/externals/mct/testsystem/testall/job.ut-all.jaguar +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh -#PBS -q debug -#PBS -l walltime=5:00,size=6 -#PBS -o job.out.jaguar -#PBS -j oe -#PBS -m abe -#PBS -A CLI017dev - -# job starts in home directory, cd to the submission directory - -# IMPORTANT! after CNL upgrade, all files (input,output,pwd) -# must be in /lustre. - -cd $PBS_O_WORKDIR - - -echo '---------------------------------------------------------' - -# phoenix -# aprun -n 6 ./utmct - -# jaguar -aprun -n 6 ./utmct diff --git a/src/externals/mct/testsystem/testall/m_ACTEST.F90 b/src/externals/mct/testsystem/testall/m_ACTEST.F90 deleted file mode 100644 index 01a89ba4ec4..00000000000 --- a/src/externals/mct/testsystem/testall/m_ACTEST.F90 +++ /dev/null @@ -1,633 +0,0 @@ -! -! !INTERFACE: - - module m_ACTEST -! -! !USES: -! - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: testall - public :: IndexAttr - public :: Copy - public :: ImportExport - public :: Identical - - interface testall - module procedure testaC_ - end interface - interface IndexAttr - module procedure IndexTest_ - end interface - interface Copy - module procedure CopyTest_ - end interface - interface ImportExport - module procedure ImportExportTest_ - end interface - interface Identical - module procedure Identical_ - end interface - - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_ACTEST' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: aCtest_ - Test the functions in the Accumulator module -! -! !DESCRIPTION: -! This routine writes diagnostic information about the input -! {\tt Accumulator}. Each line of the output will be preceded by the -! character argument {\tt identifier}. The output device is specified -! by the integer argument {\tt device}. -! -! !INTERFACE: - - subroutine testaC_(aC, identifier, device) - -! -! !USES: -! - - use m_Accumulator, only : Accumulator - use m_Accumulator, only : accumulate - use m_Accumulator, only : MCT_SUM, MCT_AVG - use m_Accumulator, only : nIAttr, nRAttr - use m_Accumulator, only : lsize - use m_Accumulator, only : clean - use m_Accumulator, only : Accumulator_init => init - use m_AttrVect, only : AttrVect - use m_AttrVect, only : AttrVect_init => init - use m_AttrVect, only : AttrVect_clean => clean - use m_AttrVect, only : AttrVect_copy => Copy - use m_List, only : List_allocated => allocated - use m_List, only : ListExportToChar => exporttoChar - use m_stdio - use m_die - - implicit none - -! !INPUT PARAMETERS: - - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - -! !REVISION HISTORY: -! 23Sep02 - E.T. Ong - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::aCtest_' - - type(Accumulator) :: aCCopy1, aCCopy2, aCExactCopy - type(AttrVect) :: aVDummy - integer :: i,j,k - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::WRITE OUT INFO ABOUT THE ATTRVECT::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - write(device,*) identifier, ":: TYPE CHECK " - write(device,*) identifier, ":: NUM_STEPS = ", aC%num_steps - write(device,*) identifier, ":: STEPS_DONE = ", aC%steps_done - - if(associated(aC%iAction)) then - write(device,*) identifier, ":: IACTION (SIZE,VALUES) = ", & - size(aC%iAction), aC%iAction - else - write(device,*) identifier, ":: IACTION NOT ASSOCIATED" - endif - - if(associated(aC%rAction)) then - write(device,*) identifier, ":: RACTION (SIZE,VALUES) = ", & - size(aC%rAction), aC%rAction - else - write(device,*) identifier, ":: RACTION NOT ASSOCIATED" - endif - - if(List_allocated(aC%data%iList)) then - write(device,*) identifier, ":: data%ILIST = ", & - ListExportToChar(aC%data%iList) - else - write(device,*) identifier, ":: data%ILIST NOT INITIALIZED" - endif - - if(List_allocated(aC%data%rList)) then - write(device,*) identifier, ":: data%RLIST = ", & - ListExportToChar(aC%data%rList) - else - write(device,*) identifier, ":: data%RLIST NOT INITIALIZED" - endif - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TESTING ACCUMULATION:::::::::::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - call Accumulator_init(aC=aCExactCopy, bC=aC, lsize=lsize(aC), & - num_steps=aC%num_steps, steps_done=aC%steps_done) - - call AttrVect_copy(aVin=aC%data,aVout=aCExactCopy%data) - - call Accumulator_init(aC=aCCopy1, bC=aC, lsize=100, & - num_steps=aC%num_steps, steps_done=0) - - call Accumulator_init(aC=aCCopy2, bC=aC, lsize=100, & - num_steps=aC%num_steps, steps_done=0) - - call AttrVect_init(aV=aVDummy, bV=aC%data, lsize=100) - - if(nIAttr(aC)>0) then - aCCopy1%iAction=MCT_AVG - aCCopy2%iAction=MCT_SUM - aVDummy%iAttr = 1 - endif - - if(nRAttr(aC)>0) then - aCCopy1%rAction=MCT_AVG - aCCopy2%rAction=MCT_SUM - aVDummy%rAttr = 1. - endif - - do i=1,aC%num_steps - call accumulate(aVDummy,ACCopy1) - call accumulate(aVDummy,ACCopy2) - enddo - - call accumulate(aVDummy,ACCopy1) - call accumulate(aVDummy,ACCopy2) - - if(.NOT. (aCCopy1%num_steps == aC%num_steps)) then - call die(myname_,"SEVERE: aCCopy1 num_steps value has changed!") - endif - - if(.NOT. (aCCopy2%num_steps == aC%num_steps)) then - call die(myname_,"SEVERE: aCCopy2 num_steps value has changed!") - endif - - if(.NOT. (aCCopy1%steps_done == aC%num_steps+1)) then - call die(myname_,"SEVERE: aCCopy1 stesp_done value is incorrect!") - endif - - if(.NOT. (aCCopy2%steps_done == aC%num_steps+1)) then - call die(myname_,"SEVERE: aCCopy2 stesp_done value is incorrect!") - endif - - do i=1,lsize(ACCopy1) - do j=1,nRAttr(aC) - if( (aCCopy1%data%rAttr(j,i) < 1.9) .or. & - (aCCopy1%data%rAttr(j,i) > 2.1) ) then - call die(myname_,"Averaging Reals failed") - endif - if( (aCCopy2%data%rAttr(j,i) < aC%num_steps+0.9) .or. & - (aCCopy2%data%rAttr(j,i) > aC%num_steps+1.1) ) then - call die(myname_,"Summing Reals failed") - endif - enddo - enddo - - do i=1,lsize(aCCopy1) - do j=1,nIAttr(aC) - if( aCCopy1%data%iAttr(j,i) /= 2 ) then - call die(myname_,"Averaging Ints failed",aCCopy1%data%iAttr(j,i)) - endif - if( aCCopy2%data%iAttr(j,i) /= aC%num_steps+1 ) then - call die(myname_,"Summing Ints failed",aCCopy1%data%iAttr(j,i)) - endif - enddo - enddo - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TESTING INDEXIA AND GETILIST:::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - call IndexTest_(aC,identifier,device) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING COPY AND SHAREDATTRINDEXLIST:::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - call CopyTest_(aC,identifier,device) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING EXPORT AND IMPORT FUNCTIONS:::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - call ImportExportTest_(aC,identifier,device) - - ! Check that aC is unchanged! - - if(.not.Identical_(ACC1=aC,ACC2=aCExactCopy,Range=1e-5)) then - call die(myname_,"aC has been unexpectedly modified!!!") - endif - - call clean(aCCopy1) - call clean(aCCopy2) - call clean(aCExactCopy) - call AttrVect_clean(aVDummy) - -end subroutine testaC_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TEST FOR INDEXIA AND GETILIST:::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - subroutine IndexTest_(aC,identifier,device) - - use m_Accumulator, only: nIAttr, nRAttr, getIList, getRList, indexIA, indexRA, Accumulator - use m_List, only: List_allocated => allocated - use m_String, only: String - use m_String, only: StringToChar => toChar - use m_String, only: String_clean => clean - use m_stdio - use m_die - - implicit none - - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::IndexTest_' - type(String) :: ItemStr - integer :: i,j,k,ierr - - if(nIAttr(aC)>0) then - write(device,*) identifier, ":: Testing indexIA and getIList::" - else - if(List_allocated(aC%data%iList)) then - call die(myname_,"iList has been allocated, :& - &but there are no atttributes. :& - &Please do not initialize a blank list.") - end if - if(associated(aC%data%iAttr)) then - if(size(aC%data%iAttr,1) /= 0) then - call die(myname_,"iAttr contains no attributes, & - &yet its size /= 0",size(aC%data%iAttr,1)) - endif - endif - end if - - do i=1,nIAttr(aC) - - call getIList(ItemStr,i,aC) - j = indexIA(aC,StringToChar(ItemStr)) - if(i/=j) call die(myname_,"Function indexIA failed!") - write(device,*) identifier, & - ":: aC Index = ", j, & - ":: Attribute Name = ", StringToChar(ItemStr) - call String_clean(ItemStr) - - enddo - - if(nRAttr(aC)>0) then - write(device,*) identifier, ":: Testing indexRA and getRList::" - else - if(List_allocated(aC%data%rList)) then - call die(myname_,"rList has been allocated, :& - &but there are no atttributes. :& - &Please do not initialize a blank list.") - end if - if(associated(aC%data%rAttr)) then - if(size(aC%data%rAttr,1) /= 0) then - call die(myname_,"rAttr contains no attributes, & - &yet its size /= 0",size(aC%data%rAttr,1)) - endif - endif - end if - - do i=1,nRAttr(aC) - - call getRList(ItemStr,i,aC) - j = indexRA(aC,StringToChar(ItemStr)) - if(i/=j) call die(myname_,"Function indexIA failed!") - write(device,*) identifier, & - "::aC Index = ", j, & - "::Attribute Name = ", StringToChar(ItemStr) - call String_clean(ItemStr) - - enddo - - end subroutine IndexTest_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR COPY AND SHAREDATTRINDEXLIST:::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - -! NOTE: SO FOR ONLY TESTING SHAREDATTRINDEX for reals - - subroutine CopyTest_(aC,identifier,device) - - use m_AttrVect, only : copy - use m_AttrVect, only : exportIListToChar,exportRListToChar - use m_AttrVect, only : AttrVect_init => init - use m_Accumulator - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_copy => copy - use m_List, only : List_append => append - use m_List, only : ListexportToChar => exportToChar - use m_List, only : List_clean => clean - use m_String, only : String - use m_String, only : StringToChar => toChar - use m_String, only : String_clean => clean - use m_stdio - use m_die - - implicit none - - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::CopyTest_' - type(String) :: ItemStr1, ItemStr2 - type(Accumulator) :: aCExactCopy - integer,dimension(:), pointer :: aCaCIndices1, aCaCIndices2 - integer,dimension(:), pointer :: aVaCIndices1, aVaCIndices2 - integer :: aCaCNumShared, aVaCNumShared - integer :: i,j,k,ierr - - if( (nRAttr(aC)>0) ) then - - write(device,*) identifier, ":: Testing Copy and SharedAttrIndexList ::" - write(device,*) identifier, ":: FIRST AV ATTRIBUTES::", & - " RATTR = ", exportRListToChar(aC%data) - call init(aCExactCopy,aC,lsize(aC)) - write(device,*) identifier, ":: SECOND AV ATTRIBUTES::", & - " RATTR = ", exportRListToChar(aCExactCopy%data) - call zero(aCExactCopy) - call copy(aVin=aC%data, aVout=aCExactCopy%data) - call SharedAttrIndexList(aC,aCExactCopy,"REAL ", & - aCaCNumShared,aCaCIndices1,aCaCIndices2) - call SharedAttrIndexList(aC%data,aCExactCopy,"REAL ", & - aVaCNumShared,aVaCIndices1,aVaCIndices2) - - if(aCaCNumShared/=aVaCNumShared) then - call die(myname_,"aCaCNumShared/=aVaCNumShared") - endif - - do i=1,aCaCNumShared - if(aCaCIndices1(i)/=aVaCIndices1(i)) then - call die(myname_,"aCaCIndices1(i)/=aVaCIndices1(i)") - endif - if(aCaCIndices2(i)/=aVaCIndices2(i)) then - call die(myname_,"aCaCIndices2(i)/=aVaCIndices2(i)") - endif - enddo - - write(device,*) identifier, ":: Indices1 :: Indices2 :: & - &Attribute1 :: Attribute2" - do i=1,aCaCNumShared - call getRList(ItemStr1,aCaCIndices1(i),aC) - call getRList(ItemStr2,aCaCIndices2(i),aCExactCopy) - write(device,*) identifier,":: ", aCaCIndices1(i), "::", & - aCaCIndices2(i), "::", StringToChar(ItemStr1), "::", & - StringToChar(ItemStr2) - call String_clean(ItemStr1) - call String_clean(ItemStr2) - enddo - - do i=1,aCaCNumShared - do j=1,lsize(aC) - if(aC%data%rAttr(aCaCIndices1(i),j) /= & - aCExactCopy%data%rAttr(aCaCIndices2(i),j)) then - write(device,*) identifier,aCaCIndices1(i),aCaCIndices2(i), j - call die(myname_,"Copy function is MALFUNCTIONING", ierr) - endif - enddo - enddo - - deallocate(aCaCIndices1,aCaCIndices2,aVaCIndices1,aVaCIndices2,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(aCaCIndices,aVaCIndices)",ierr) - - call clean(aCExactCopy) - - else - - write(device,*) identifier, & - ":: NOT Testing Copy and SharedAttrIndexList ::", & - ":: Consult m_ACTest.F90 to enable this function::" - endif - - end subroutine CopyTest_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR EXPORT AND IMPORT FUNCTIONS:::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - subroutine ImportExportTest_(aC,identifier,device) - - use m_Accumulator - use m_AttrVect, only : exportIList, exportRList - use m_AttrVect, only : exportIListToChar, exportRListToChar - use m_List, only : List - use m_List, only : List_identical => identical - use m_List, only : List_get => get - use m_List, only : List_clean => clean - use m_String, only : String - use m_String, only : StringToChar => toChar - use m_String, only : String_clean => clean - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(Accumulator), intent(in) :: aC - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::ImportExportTest_' - type(Accumulator) :: importAC - type(List) :: OutIList, OutRList - type(String) :: ItemStr - integer,dimension(:),pointer :: OutIVect - real(FP), dimension(:),pointer :: OutRVect - integer :: exportsize - integer :: i,j,k,ierr - - write(device,*) identifier, ":: Testing import and export functions" - - if(nIAttr(aC)>0) then - - call exportIList(aV=aC%data,outIList=outIList) - - if(.NOT. List_identical(aC%data%iList,outIList)) then - call die(myname_, "Function exportIList failed!") - endif - - call List_get(ItemStr=ItemStr,ith=nIAttr(aC),aList=aC%data%iList) - - allocate(outIVect(lsize(aC)),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(outIVect)") - - call exportIAttr(aC=aC,AttrTag=StringToChar(ItemStr), & - outVect=OutIVect,lsize=exportsize) - - if(exportsize /= lsize(aC)) then - call die(myname_,"(exportsize /= lsize(aC))") - endif - - do i=1,exportsize - if(aC%data%iAttr(nIAttr(aC),i) /= outIVect(i)) then - call die(myname_,"Function exportIAttr failed!") - endif - enddo - - call init(aC=importAC,bC=aC,lsize=exportsize) - call zero(importAC) - - call importIAttr(aC=importAC,AttrTag=StringToChar(ItemStr), & - inVect=outIVect,lsize=exportsize) - - j=indexIA(importAC,StringToChar(ItemStr)) - if(j<=0) call die(myname_,"indexIA(importAC,StringToChar(ItemStr))") - do i=1,exportsize - if(importAC%data%iAttr(j,i) /= outIVect(i)) then - call die(myname_,"Function importIAttr failed!") - endif - enddo - - call clean(importAC) - call List_clean(outIList) - call String_clean(ItemStr) - - deallocate(outIVect,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(outIVect)") - - endif - - if(nRAttr(aC)>0) then - - call exportRList(aV=aC%data,outRList=outRList) - - if(.NOT. List_identical(aC%data%rList,outRList)) then - call die(myname_, "Function exportRList failed!") - endif - - call List_get(ItemStr=ItemStr,ith=nRAttr(aC),aList=aC%data%rList) - - allocate(outRVect(lsize(aC)),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(outRVect)") - - call exportRAttr(aC=aC,AttrTag=StringToChar(ItemStr), & - outVect=OutRVect,lsize=exportsize) - - if(exportsize /= lsize(aC)) then - call die(myname_,"(exportsize /= lsize(aC))") - endif - - do i=1,exportsize - if(aC%data%rAttr(nRAttr(aC),i) /= outRVect(i)) then - call die(myname_,"Function exportRAttr failed!") - endif - enddo - - call init(aC=importAC,bC=aC,lsize=exportsize) - call zero(importAC) - - call importRAttr(aC=importAC,AttrTag=StringToChar(ItemStr), & - inVect=outRVect,lsize=exportsize) - - j=indexRA(importAC,StringToChar(ItemStr)) - if(j<=0) call die(myname_,"indexRA(importAC,StringToChar(ItemStr))") - do i=1,exportsize - if(importAC%data%rAttr(j,i) /= outRVect(i)) then - call die(myname_,"Function importRAttr failed!") - endif - enddo - - call clean(importAC) - call List_clean(outRList) - call String_clean(ItemStr) - - deallocate(outRVect,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(outRVect)") - - endif - - end subroutine ImportExportTest_ - - logical function Identical_(ACC1,ACC2,Range) - - use m_Accumulator - use m_AVTEST,only: AttrVect_identical => Identical - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(Accumulator), intent(in) :: ACC1 - type(Accumulator), intent(in) :: ACC2 - real, optional, intent(in) :: Range - - character(len=*),parameter :: myname_=myname//'::Identical_' - integer :: i,j,k - - Identical_=.true. - - if(present(Range)) then - if(.NOT. AttrVect_identical(ACC1%data,ACC2%data,Range)) then - Identical_=.false. - endif - else - if(.NOT. AttrVect_identical(ACC1%data,ACC2%data)) then - Identical_=.false. - endif - endif - - if(ACC1%num_steps/=ACC2%num_steps) then - Identical_=.false. - endif - - if(ACC1%steps_done/=ACC2%steps_done) then - Identical_=.false. - endif - - j=0 - k=0 - - if(associated(ACC1%iAction).or.associated(ACC2%iAction)) then - if(size(ACC1%iAction) /= size(ACC2%iAction)) then - Identical_=.FALSE. - endif - j=size(ACC1%iAction) - endif - - if(associated(ACC1%rAction).or.associated(ACC2%rAction)) then - if(size(ACC1%rAction) /= size(ACC2%rAction)) then - Identical_=.FALSE. - endif - k=size(ACC2%rAction) - endif - - do i=1,j - if(ACC1%iAction(i)/=ACC2%iAction(i)) then - Identical_=.FALSE. - endif - enddo - - do i=1,k - if(ACC1%rAction(i)/=ACC2%rAction(i)) then - Identical_=.FALSE. - endif - enddo - - end function Identical_ - - -end module m_ACTEST diff --git a/src/externals/mct/testsystem/testall/m_AVTEST.F90 b/src/externals/mct/testsystem/testall/m_AVTEST.F90 deleted file mode 100644 index 5632926d821..00000000000 --- a/src/externals/mct/testsystem/testall/m_AVTEST.F90 +++ /dev/null @@ -1,857 +0,0 @@ -! -! !INTERFACE: - - module m_AVTEST -! -! !USES: -! - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: testall - public :: IndexAttr - public :: SortPermute - public :: Copy - public :: ImportExport - public :: Reduce - public :: Identical - - interface testall - module procedure testaV_ - end interface - interface IndexAttr - module procedure IndexTest_ - end interface - interface SortPermute - module procedure SortPermuteTest_ - end interface - interface Copy - module procedure CopyTest_ - end interface - interface ImportExport - module procedure ImportExportTest_ - end interface - interface Reduce - module procedure ReduceTest_ - end interface - interface Identical - module procedure Identical_ - end interface - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_AVTest' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: aVtest_ - Test the functions in the AttrVect module -! -! !DESCRIPTION: -! This routine writes diagnostic information about the input -! {\tt AttrVect}. Each line of the output will be preceded by the -! character argument {\tt identifier}. The output device is specified -! by the integer argument {\tt device}. -! -! !INTERFACE: - - subroutine testaV_(aV, identifier, device) - -! -! !USES: -! - use m_AttrVect ! Use all AttrVect routines - use m_stdio - use m_die - - implicit none - -! !INPUT PARAMETERS: - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - -! !REVISION HISTORY: -! 23Sep02 - E.T. Ong - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::aVtest_' - type(AttrVect) :: aVExactCopy - -!::::MAKE A COPY::::! - - call init(aVExactCopy,aV,lsize(aV)) - call Copy(aVin=aV,aVout=aVExactCopy) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::WRITE OUT INFO ABOUT THE ATTRVECT::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - write(device,*) identifier, ":: lsize = ", lsize(aV) - write(device,*) identifier, ":: nIAttr = ", nIAttr(aV) - write(device,*) identifier, ":: nRAttr = ", nRAttr(aV) - - if(nIAttr(aV)>0) then - write(device,*) identifier, ":: exportIListToChar = ", & - exportIListToChar(aV) - endif - - if(nRAttr(aV)>0) then - write(device,*) identifier, ":: exportRListToChar = ", & - exportRListToChar(aV) - endif - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TESTING INDEXIA AND GETILIST:::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - call IndexTest_(aV,identifier,device) - - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING SORT AND PERMUTE:::::::::::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - -! NOTE: THIS IS NOT A CHECK FOR CORRECTNESS, JUST A CHECK FOR CONSISTENCY - - call SortPermuteTest_(aV,identifier,device) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING COPY AND SHAREDATTRINDEXLIST:::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - call CopyTest_(aV,identifier,device) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING EXPORT AND IMPORT FUNCTIONS::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - call ImportExportTest_(aV,identifier,device) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING LOCAL REDUCE FUNCTIONS:::::::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - call ReduceTest_(aV,identifier,device) - - - ! Check that aV is unchanged! - - if(.NOT.Identical_(aV,aVExactCopy,1e-5)) then - call die(myname_,"aV has been unexpectedly altered!!!") - endif - - call clean(aVExactCopy) - -end subroutine testaV_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TEST FOR INDEXIA AND GETILIST:::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - subroutine IndexTest_(aV,identifier,device) - - use m_AttrVect, only: AttrVect, nIattr, nRattr,getIList, getRList,indexIa,indexRA - use m_List, only: List_allocated => allocated - use m_String, only: String - use m_String, only: StringToChar => toChar - use m_String, only: String_clean => clean - use m_stdio - use m_die - - implicit none - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::IndexTest_' - type(String) :: ItemStr - integer :: i,j,k,ierr - - if(nIAttr(aV)>0) then - write(device,*) identifier, ":: Testing indexIA and getIList::" - else - if(List_allocated(aV%iList)) then - call die(myname_,"iList has been allocated, :& - &but there are no atttributes. :& - &Please do not initialize a blank list.") - end if - if(associated(aV%iAttr)) then - if(size(aV%iAttr,1) /= 0) then - call die(myname_,"iAttr contains no attributes, & - &yet its size /= 0",size(aV%iAttr,1)) - endif - endif - end if - - do i=1,nIAttr(aV) - - call getIList(ItemStr,i,aV) - j = indexIA(aV,StringToChar(ItemStr)) - if(i/=j) call die(myname_,"Function indexIA failed!") - write(device,*) identifier, & - ":: aV Index = ", j, & - ":: Attribute Name = ", StringToChar(ItemStr) - call String_clean(ItemStr) - - enddo - - if(nRAttr(aV)>0) then - write(device,*) identifier, ":: Testing indexRA and getRList::" - else - if(List_allocated(aV%rList)) then - call die(myname_,"rList has been allocated, :& - &but there are no atttributes. :& - &Please do not initialize a blank list.") - end if - if(associated(aV%rAttr)) then - if(size(aV%rAttr,1) /= 0) then - call die(myname_,"rAttr contains no attributes, & - &yet its size /= 0",size(aV%rAttr,1)) - endif - endif - end if - - do i=1,nRAttr(aV) - - call getRList(ItemStr,i,aV) - j = indexRA(aV,StringToChar(ItemStr)) - if(i/=j) call die(myname_,"Function indexIA failed!") - write(device,*) identifier, & - "::aV Index = ", j, & - "::Attribute Name = ", StringToChar(ItemStr) - call String_clean(ItemStr) - - enddo - - end subroutine IndexTest_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR SORT AND PERMUTE:::::::::::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - -! NOTE: THIS IS NOT A CHECK FOR CORRECTNESS, JUST A CHECK FOR CONSISTENCY - - subroutine SortPermuteTest_(aV,identifier,device) - - use m_AttrVect - use m_stdio - use m_die - - implicit none - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::SortPermuteTest_' - type(AttrVect) :: AVCOPY1, AVCOPY2 - logical,dimension(:), pointer :: descend - integer,dimension(:), pointer :: perm - integer :: i,j,k,ierr - real :: r - - write(device,*) identifier, ":: Testing Sort and Permute" - - call init(aV=AVCOPY1,bV=aV,lsize=100) - call init(av=AVCOPY2,bV=aV,lsize=100) - - if( (nIAttr(AVCOPY1)>0) .or. (nRAttr(AVCOPY1)>0) ) then - - if(nIAttr(AVCOPY1)>0) then - - allocate(descend(nIAttr(AVCOPY1)),stat=ierr) - if(ierr /= 0) call die(myname_,"allocate(descend)") - - call zero(AVCOPY1) - call zero(AVCOPY2) - - k=0 - do i=1,nIAttr(AVCOPY1) - do j=1,lsize(AVCOPY1) - k=k+1 - AVCOPY1%iAttr(i,j) = k - AVCOPY2%iAttr(i,j) = k - enddo - enddo - - descend=.true. - call Sort(aV=AVCOPY1,key_list=AVCOPY1%iList,perm=perm,descend=descend) - call Permute(aV=AVCOPY1,perm=perm) - - call SortPermute(aV=AVCOPY2,key_list=AVCOPY2%iList,descend=descend) - - do i=1,nIAttr(AVCOPY1) - do j=1,lsize(AVCOPY1) - if(AVCOPY1%iAttr(i,j) /= AVCOPY2%iAttr(i,j)) then - call die(myname_,"Sort Testing FAILED!") - endif - enddo - enddo - - write(device,*) identifier, ":: INTEGER AV IN DESCENDING ORDER:: ", & - AVCOPY1%iAttr(1,1:5) - - deallocate(perm,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(perm)") - - deallocate(descend,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(descend)") - - endif - - if(nRAttr(AVCOPY1)>0) then - - allocate(descend(nRAttr(AVCOPY1)),stat=ierr) - if(ierr /= 0) call die(myname_,"allocate(descend)") - - call zero(AVCOPY1) - call zero(AVCOPY2) - - r=0. - do i=1,nRAttr(AVCOPY1) - do j=1,lsize(AVCOPY1) - r=r+1.29 - AVCOPY1%rAttr(i,j) = r - AVCOPY2%rAttr(i,j) = r - enddo - enddo - - descend=.true. - call Sort(aV=AVCOPY1,key_list=AVCOPY1%rList,perm=perm,descend=descend) - call Permute(aV=AVCOPY1,perm=perm) - - call SortPermute(aV=AVCOPY2,key_list=AVCOPY2%rList,descend=descend) - - do i=1,nRAttr(AVCOPY1) - do j=1,lsize(AVCOPY1) - if(AVCOPY1%rAttr(i,j) /= AVCOPY2%rAttr(i,j)) then - call die(myname_,"Sort Testing FAILED!") - endif - enddo - enddo - - write(device,*) identifier, ":: REAL AV IN DESCENDING ORDER:: ", & - AVCOPY1%rAttr(1,1:5) - - deallocate(perm,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(perm)") - - deallocate(descend,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(descend)") - - endif - else - write(device,*) identifier, ":: NOT TESTING SORTING AND PERMUTING. CONSULT & - &SOURCE CODE TO ENABLE TESTING." - endif - - call clean(AVCOPY1) - call clean(AVCOPY2) - - end subroutine SortPermuteTest_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR COPY AND SHAREDATTRINDEXLIST:::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - -! NOTE: SO FOR ONLY TESTING SHAREDATTRINDEX for reals - - subroutine CopyTest_(aV,identifier,device) - - use m_AttrVect - use m_List, only : List - use m_List, only : List_init => init - use m_List, only : List_copy => copy - use m_List, only : List_append => append - use m_List, only : ListexportToChar => exportToChar - use m_List, only : List_clean => clean - use m_String, only : String - use m_String, only : StringToChar => toChar - use m_String, only : String_clean => clean - use m_stdio - use m_die - - implicit none - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::CopyTest_' - type(String) :: ItemStr1, ItemStr2 - type(List) :: OneIList, HalfIList, FullIList - type(List) :: OneRList, HalfRList, FullRList - type(AttrVect) :: aVExactCopy, aVPartialCopy, aVOtherCopy - type(AttrVect) :: HalfAV - integer,dimension(:), pointer :: Indices1, Indices2 - integer :: NumShared - integer :: i,j,k,ierr - - if( (nIAttr(aV)>0) .and. (nRAttr(aV)>0) ) then - - !:::INITIALIZE LISTS FOR USE IN COPY TESTS:::! - do i=1,nIAttr(aV) - - call getIList(ItemStr1,i,aV) - - if(i==1) then - call List_init(HalfIList,ItemStr1) - call List_init(FullIList,ItemStr1) - else - if(mod(i,2) == 0) then ! if EVEN - call List_init(OneIList,'REPLACE_'//ACHAR(64+i)) - call List_append(FullIList,OneIList) - call List_clean(OneIList) - else ! if ODD - call List_init(OneIList,ItemStr1) - call List_append(HalfIList,OneIList) - call List_append(FullIList,OneIList) - call List_clean(OneIList) - endif - endif - - call String_clean(ItemStr1) - - enddo - - do i=1,nRAttr(aV) - - call getRList(ItemStr1,i,aV) - - if(i==1) then - call List_init(OneRList,'REPLACE_'//ACHAR(64+i)) - call List_copy(FullRList,OneRList) - call List_clean(OneRList) - else - if(mod(i,2) == 0) then ! IF EVEN - call List_init(OneRList,ItemStr1) - if(i==2) then - call List_init(HalfRList,ItemStr1) - else - call List_append(HalfRList,OneRList) - endif - call List_append(FullRList,OneRList) - call List_clean(OneRList) - else ! IF ODD - call List_init(OneRList,'REPLACE_'//ACHAR(64+i)) - call List_append(FullRList,OneRList) - call List_clean(OneRList) - endif - endif - - call String_clean(ItemStr1) - - enddo - - write(device,*) identifier, ":: Testing Copy and SharedAttrIndexList ::" - write(device,*) identifier, ":: FIRST AV ATTRIBUTES::", & - "IATTR = ", exportIListToChar(aV), & - " RATTR = ", exportRListToChar(aV) - call init(aVExactCopy,aV,lsize(aV)) - write(device,*) identifier, ":: SECOND AV ATTRIBUTES::", & - "IATTR = ", exportIListToChar(aVExactCopy), & - " RATTR = ", exportRListToChar(aVExactCopy) - call zero(aVExactCopy) - call copy(aVin=aV, aVout=aVExactCopy) - ! call copy(aVin=aV,rList=exportRListToChar(aV), & - ! iList=exportIListToChar(aV),aVout=aVExactCopy) - call SharedAttrIndexList(aV,aVExactCopy,"REAL ", & - NumShared,Indices1,Indices2) - write(device,*) identifier, ":: Indices1 :: Indices2 :: & - &Attribute1 :: Attribute2" - do i=1,NumShared - call getRList(ItemStr1,Indices1(i),aV) - call getRList(ItemStr2,Indices2(i),aVExactCopy) - write(device,*) identifier,":: ", Indices1(i), "::", Indices2(i), & - "::", StringToChar(ItemStr1), "::", StringToChar(ItemStr2) - call String_clean(ItemStr1) - call String_clean(ItemStr2) - enddo - - do i=1,NumShared - do j=1,lsize(aV) - if(aV%rAttr(Indices1(i),j) /= & - aVExactCopy%rAttr(Indices2(i),j)) then - call die(myname_,"Copy function is MALFUNCTIONING", ierr) - endif - enddo - enddo - - deallocate(Indices1,Indices2,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(Indices1,Indices2)",ierr) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - call init(aVPartialCopy,aV,lsize(aV)) - write(device,*) identifier, ":: FIRST AV ATTRIBUTES::", & - "IATTR = ", exportIListToChar(aVPartialCopy), & - " RATTR = ", exportRListToChar(aVPartialCopy) - call zero(aVPartialCopy) - call copy(aVin=aV,rList=ListexportToChar(HalfRList), & - iList=ListexportToChar(HalfIList),aVout=aVPartialCopy) - call init(aV=HalfAV,iList=HalfIList,rList=HalfRList,lsize=1) - write(device,*) identifier, ":: SECOND AV ATTRIBUTES::", & - "IATTR = ", exportIListToChar(HalfAV), & - " RATTR = ", exportRListToChar(HalfAV) - call SharedAttrIndexList(aV,HalfAV,"REAL ", & - NumShared,Indices1,Indices2) - write(device,*) identifier, ":: Indices1 :: Indices2 :: & - &Attribute1 :: Attribute2" - do i=1,NumShared - call getRList(ItemStr1,Indices1(i),aV) - call getRList(ItemStr2,Indices2(i),HalfAV) - write(device,*) identifier,":: ", Indices1(i), "::", Indices2(i), & - "::", StringToChar(ItemStr1), "::", StringToChar(ItemStr2) - call String_clean(ItemStr1) - call String_clean(ItemStr2) - enddo - - do i=1,NumShared - do j=1,lsize(aV) - if(aV%rAttr(Indices1(i),j) /= & - aVPartialCopy%rAttr(Indices1(i),j)) then - call die(myname_,"Copy function is MALFUNCTIONING", ierr) - endif - enddo - enddo - - call List_clean(HalfIList) - call List_clean(HalfRList) - - deallocate(Indices1,Indices2,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(Indices1,Indices2)",ierr) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - call init(aVOtherCopy,FullIList,FullRList,lsize(aV)) - write(device,*) identifier, ":: FIRST AV ATTRIBUTES::", & - "IATTR = ", exportIListToChar(aV), & - " RATTR = ", exportRListToChar(aV) - write(device,*) identifier, ":: SECOND AV ATTRIBUTES::", & - "IATTR = ", exportIListToChar(aVOtherCopy), & - " RATTR = ", exportRListToChar(aVOtherCopy) - call zero(aVOtherCopy) - call copy(aV,rList=exportRListToChar(aV), & - TrList=ListexportToChar(FullRList), & - iList=exportIListToChar(aV), & - TiList=ListexportToChar(FullIList), & - aVout=aVOtherCopy) - call SharedAttrIndexList(aV,aVOtherCopy,"REAL", & - NumShared,Indices1,Indices2) - write(device,*) identifier, ":: Indices1 :: Indices2 :: & - &Attribute1 :: Attribute2" - do i=1,NumShared - call getRList(ItemStr1,Indices1(i),aV) - call getRList(ItemStr2,Indices2(i),aVOtherCopy) - write(device,*) identifier,":: ", Indices1(i), "::", Indices2(i), & - "::", StringToChar(ItemStr1), "::", StringToChar(ItemStr2) - call String_clean(ItemStr1) - call String_clean(ItemStr2) - enddo - - do i=1,NumShared - do j=1,lsize(aV) - if(aV%rAttr(Indices1(i),j) /= & - aVOtherCopy%rAttr(Indices2(i),j)) then - write(device,*) identifier,Indices1(i),Indices2(i), j - call die(myname_,"Copy function is MALFUNCTIONING", ierr) - endif - enddo - enddo - - call List_clean(FullIList) - call List_clean(FullRList) - - deallocate(Indices1,Indices2,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(Indices1,Indices2)",ierr) - - call clean(aVExactCopy) - call clean(aVPartialCopy) - call clean(aVOtherCopy) - call clean(HalfAV) - - else - - write(device,*) identifier, & - ":: NOT Testing Copy and SharedAttrIndexList ::", & - ":: Consult m_MCTTest.F90 to enable this function::" - endif - - end subroutine CopyTest_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR EXPORT AND IMPORT FUNCTIONS:::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - subroutine ImportExportTest_(aV,identifier,device) - - use m_AttrVect - use m_List, only : List - use m_List, only : List_identical => identical - use m_List, only : List_get => get - use m_List, only : List_clean => clean - use m_String, only : String - use m_String, only : StringToChar => toChar - use m_String, only : String_clean => clean - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::ImportExportTest_' - type(AttrVect) :: importAV - type(List) :: OutIList, OutRList - type(String) :: ItemStr - integer,dimension(:),pointer :: OutIVect - real(FP), dimension(:),pointer :: OutRVect - integer :: exportsize - integer :: i,j,k,ierr - - write(device,*) identifier, ":: Testing import and export functions" - - if(nIAttr(aV)>0) then - - call exportIList(aV=aV,outIList=outIList) - - if(.NOT. List_identical(aV%iList,outIList)) then - call die(myname_, "Function exportIList failed!") - endif - - call List_get(ItemStr=ItemStr,ith=nIAttr(aV),aList=aV%iList) - - allocate(outIVect(lsize(aV)),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(outIVect)") - - call exportIAttr(aV=aV,AttrTag=StringToChar(ItemStr), & - outVect=OutIVect,lsize=exportsize) - - if(exportsize /= lsize(aV)) then - call die(myname_,"(exportsize /= lsize(aV))") - endif - - do i=1,exportsize - if(aV%iAttr(nIAttr(aV),i) /= outIVect(i)) then - call die(myname_,"Function exportIAttr failed!") - endif - enddo - - call init(aV=importAV,iList=exportIListToChar(aV),lsize=exportsize) - call zero(importAV) - - call importIAttr(aV=importAV,AttrTag=StringToChar(ItemStr), & - inVect=outIVect,lsize=exportsize) - - j=indexIA(importAV,StringToChar(ItemStr)) - if(j<=0) call die(myname_,"indexIA(importAV,StringToChar(ItemStr))") - do i=1,exportsize - if(importAV%iAttr(j,i) /= outIVect(i)) then - call die(myname_,"Function importIAttr failed!") - endif - enddo - - call clean(importAV) - call List_clean(outIList) - call String_clean(ItemStr) - - deallocate(outIVect,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(outIVect)") - - endif - - if(nRAttr(aV)>0) then - - call exportRList(aV=aV,outRList=outRList) - - if(.NOT. List_identical(aV%rList,outRList)) then - call die(myname_, "Function exportRList failed!") - endif - - call List_get(ItemStr=ItemStr,ith=nRAttr(aV),aList=aV%rList) - - allocate(outRVect(lsize(aV)),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(outRVect)") - - call exportRAttr(aV=aV,AttrTag=StringToChar(ItemStr), & - outVect=OutRVect,lsize=exportsize) - - if(exportsize /= lsize(aV)) then - call die(myname_,"(exportsize /= lsize(aV))") - endif - - do i=1,exportsize - if(aV%rAttr(nRAttr(aV),i) /= outRVect(i)) then - call die(myname_,"Function exportRAttr failed!") - endif - enddo - - call init(aV=importAV,rList=exportRListToChar(aV),lsize=exportsize) - call zero(importAV) - - call importRAttr(aV=importAV,AttrTag=StringToChar(ItemStr), & - inVect=outRVect,lsize=exportsize) - - j=indexRA(importAV,StringToChar(ItemStr)) - if(j<=0) call die(myname_,"indexRA(importAV,StringToChar(ItemStr))") - do i=1,exportsize - if(importAV%rAttr(j,i) /= outRVect(i)) then - call die(myname_,"Function importRAttr failed!") - endif - enddo - - call clean(importAV) - call List_clean(outRList) - call String_clean(ItemStr) - - deallocate(outRVect,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(outRVect)") - - endif - - end subroutine ImportExportTest_ - - subroutine ReduceTest_(aV,identifier,device) - - use m_AttrVectReduce - use m_AttrVect - use m_List, only : ListExportToChar => ExportToChar - use m_stdio - use m_die - - implicit none - - type(AttrVect), intent(in) :: aV - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::ReduceTest_' - integer :: i,j,k,ierr - type(AttrVect) :: reducedAVsum, reducedAVmin, reducedAVmax - type(AttrVect) :: reducedAVRsum, reducedAVRmin, reducedAVRmax - - if( (nIAttr(aV)==0).and.(nRAttr(aV)>0) ) then - - call LocalReduce(aV,reducedAVsum,AttrVectSUM) - call LocalReduce(aV,reducedAVmin,AttrVectMIN) - call LocalReduce(aV,reducedAVmax,AttrVectMAX) - - call LocalReduceRAttr(aV,reducedAVRsum,AttrVectSUM) - call LocalReduceRAttr(aV,reducedAVRmin,AttrVectMIN) - call LocalReduceRAttr(aV,reducedAVRmax,AttrVectMAX) - - if(.NOT.Identical_(reducedAVsum,reducedAVRsum,1e-4)) then - call die(myname_,"LocalReduce -SUM- functions produced inconsistent & - &results!") - endif - - if(.NOT.Identical_(reducedAVmin,reducedAVRmin,1e-4)) then - call die(myname_,"LocalReduce -MIN- functions produced inconsistent & - &results!") - endif - - if(.NOT.Identical_(reducedAVmax,reducedAVRmax,1e-4)) then - call die(myname_,"LocalReduce -MAX- functions produced inconsistent & - &results!") - endif - - write(device,*) identifier,":: RESULTS OF ATTRVECT LOCAL REDUCE :: & - &(Name, rList, Values)" - write(device,*) identifier,":: REDUCEDAVSUM = ", & - ListExportToChar(reducedAVsum%rList), & - reducedAVsum%rAttr - write(device,*) identifier,":: REDUCEDAVMIN = ", & - ListExportToChar(reducedAVmin%rList), & - reducedAVmin%rAttr - write(device,*) identifier,":: REDUCEDAVMAX = ", & - ListExportToChar(reducedAVmax%rList), & - reducedAVmax%rAttr - - call clean(reducedAVsum) - call clean(reducedAVmin) - call clean(reducedAVmax) - call clean(reducedAVRsum) - call clean(reducedAVRmin) - call clean(reducedAVRmax) - - else - - write(device,*) identifier,":: NOT TESTING LOCAL REDUCE. & - &PLEASE CONSULT SOURCE CODE." - - endif - - end subroutine ReduceTest_ - - logical function Identical_(aV1,aV2,Range) - - use m_AttrVect - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(AttrVect), intent(in) :: aV1 - type(AttrVect), intent(in) :: aV2 - real, optional, intent(in) :: Range - - integer :: i,j,k,AVSize - - Identical_=.true. - - AVSize = lsize(aV1) - - if(lsize(aV1) /= lsize(aV2)) then - AVSize=0 - Identical_=.false. - endif - - do i=1,AVSize - do j=1,nIAttr(aV1) - if(AV1%iAttr(j,i) /= AV2%iAttr(j,i)) then - Identical_=.false. - endif - enddo - enddo - - if(present(Range)) then - - do i=1,AVSize - do j=1,nRAttr(aV1) - if( ABS(AV1%rAttr(j,i)-AV2%rAttr(j,i)) > Range ) then - Identical_=.false. - endif - enddo - enddo - - else - - do i=1,AVSize - do j=1,nRAttr(aV1) - if(AV1%rAttr(j,i) /= AV2%rAttr(j,i)) then - Identical_=.false. - endif - enddo - enddo - - endif - - end function Identical_ - -end module m_AVTEST diff --git a/src/externals/mct/testsystem/testall/m_GGRIDTEST.F90 b/src/externals/mct/testsystem/testall/m_GGRIDTEST.F90 deleted file mode 100644 index df2f8c0889b..00000000000 --- a/src/externals/mct/testsystem/testall/m_GGRIDTEST.F90 +++ /dev/null @@ -1,636 +0,0 @@ -! -! !INTERFACE: - - module m_GGRIDTEST -! -! !USES: -! - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: testall - public :: IndexAttr - public :: SortPermute - public :: ImportExport - public :: Identical - - interface testall - module procedure testGGrid_ - end interface - interface IndexAttr - module procedure IndexTest_ - end interface - interface SortPermute - module procedure SortPermuteTest_ - end interface - interface ImportExport - module procedure ImportExportTest_ - end interface - interface Identical - module procedure Identical_ - end interface - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_GGridTest' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: testGGRID_ - Test the functions in the GeneralGrid module -! -! !DESCRIPTION: -! This routine writes diagnostic information about the input -! {\tt GeneralGrid}. Each line of the output will be preceded by the -! character argument {\tt identifier}. The output device is specified -! by the integer argument {\tt device}. -! -! !INTERFACE: - - subroutine testGGrid_(GGrid, identifier, device) - -! -! !USES: -! - use m_GeneralGrid, only: GeneralGrid,init,clean,dims,lsize ! Use all GeneralGrid routines - use m_List, only : ListExportToChar => exportToChar - use m_List, only : List_allocated => allocated - use m_AttrVect, only : AttrVect_copy => copy - use m_stdio - use m_die - - implicit none - -! !INPUT PARAMETERS: - - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - -! !REVISION HISTORY: -! 23Sep02 - E.T. Ong - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::GGridtest_' - type(GeneralGrid) :: GGridExactCopy1, GGridExactCopy2 - integer :: i,j,k - logical :: calledinitl_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::WRITE OUT INFO ABOUT THE ATTRVECT::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - write(device,*) identifier, ":: TYPE CHECK" - - if(List_allocated(GGrid%coordinate_list)) then - write(device,*) identifier, ":: COORDINATE_LIST = ", & - ListExportToChar(GGrid%coordinate_list) - else - call die(myname_,"COORDINATE_LIST IS NOT INITIALIZED!") - endif - - if(List_allocated(GGrid%coordinate_sort_order)) then - write(device,*) identifier, ":: COORDINATE_SORT_ORDER = ", & - ListExportToChar(GGrid%coordinate_sort_order) - else - write(device,*) identifier, ":: COORDINATE_SORT_ORDER NOT INITIALIZED" - endif - - if(associated(GGrid%descend)) then - write(device,*) identifier, ":: DESCEND = ", & - size(GGrid%descend), GGrid%descend - else - write(device,*) identifier, ":: DESCEND NOT ASSOCIATED" - endif - - if(List_allocated(GGrid%weight_list)) then - write(device,*) identifier, ":: WEIGHT_LIST = ", & - ListExportToChar(GGrid%weight_list) - else - write(device,*) identifier, ":: WEIGHT_LIST NOT INITIALIZED" - endif - - if(List_allocated(GGrid%other_list)) then - write(device,*) identifier, ":: OTHER_LIST = ", & - ListExportToChar(GGrid%other_list) - else - write(device,*) identifier, ":: OTHER_LIST NOT INITIALIZED" - endif - - if(List_allocated(GGrid%index_list)) then - write(device,*) identifier, ":: INDEX_LIST = ", & - ListExportToChar(GGrid%index_list) - else - write(device,*) identifier, ":: INDEX_LIST NOT INITIALIZED" - endif - - if(List_allocated(GGrid%data%iList)) then - write(device,*) identifier, ":: DATA%ILIST = ", & - ListExportToChar(GGrid%data%iList) - else - write(device,*) identifier, ":: DATA%ILIST NOT INITIALIZED" - endif - - if(List_allocated(GGrid%data%rList)) then - write(device,*) identifier, ":: DATA%RLIST = ", & - ListExportToChar(GGrid%data%rList) - else - write(device,*) identifier, ":: DATA%RLIST NOT INITIALIZED" - endif - - write(device,*) identifier, ":: DIMS = ", dims(GGrid) - write(device,*) identifier, ":: LSIZE = ", lsize(GGrid) - - call init(GGridExactCopy1,GGrid,lsize(GGrid)) - call AttrVect_copy(aVin=GGrid%data,aVout=GGridExactCopy1%data) - - calledinitl_=.false. - - if( ((((List_allocated(GGrid%coordinate_sort_order).AND.& - List_allocated(GGrid%weight_list)).AND.& - List_allocated(GGrid%other_list)).AND.& - List_allocated(GGrid%index_list)).AND.& - ASSOCIATED(GGrid%descend)) ) then - calledinitl_=.true. - call init(GGrid=GGridExactCopy2,& - CoordList=GGrid%coordinate_list, & - CoordSortOrder=GGrid%coordinate_sort_order, & - descend=GGrid%descend, & - WeightList=GGrid%weight_list, & - OtherList=GGrid%other_list, & - IndexList=GGrid%index_list, & - lsize=lsize(GGrid)) - call AttrVect_copy(aVin=GGrid%data,aVout=GGridExactCopy2%data) - else - write(device,*) identifier, ":: NOT TESTING INIL_. PLEASE & - &CONSULT SOURCE CODE." - endif - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TESTING INDEXIA AND GETILIST:::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - call IndexTest_(GGrid,identifier,device) - - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING SORT AND PERMUTE:::::::::::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - -! NOTE: THIS IS NOT A CHECK FOR CORRECTNESS, JUST A CHECK FOR CONSISTENCY - - call SortPermuteTest_(GGrid,identifier,device) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING EXPORT AND IMPORT FUNCTIONS::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - call ImportExportTest_(GGrid,identifier,device) - - ! Check that GGrid is unchanged! - - if(.NOT.Identical_(GGrid,GGridExactCopy1,1e-5)) then - call die(myname_,"GGrid has been unexpectedly altered!!!") - endif - - call clean(GGridExactCopy1) - - if(calledinitl_) then - if(.NOT.Identical_(GGrid,GGridExactCopy2,1e-5)) then - call die(myname_,"GGrid has been unexpectedly altered!!!") - endif - call clean(GGridExactCopy2) - endif - -end subroutine testGGrid_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TEST FOR INDEXIA AND GETILIST:::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - subroutine IndexTest_(GGrid,identifier,device) - - use m_GeneralGrid, only: GeneralGrid,indexIA,indexRA - use m_AttrVect, only : getIList, getRList - use m_AttrVect, only : nIAttr,nRAttr - use m_List, only: List_allocated => allocated - use m_String, only: String - use m_String, only: StringToChar => toChar - use m_String, only: String_clean => clean - use m_stdio - use m_die - - implicit none - - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::IndexTest_' - type(String) :: ItemStr - integer :: i,j,k,ierr - - if(nIAttr(GGrid%data)>0) then - write(device,*) identifier, ":: Testing indexIA and getIList::" - else - if(List_allocated(GGrid%data%iList)) then - call die(myname_,"iList has been allocated, :& - &but there are no atttributes. :& - &Please do not initialize a blank list.") - end if - if(associated(GGrid%data%iAttr)) then - if(size(GGrid%data%iAttr,1) /= 0) then - call die(myname_,"iAttr contains no attributes, & - &yet its size /= 0",size(GGrid%data%iAttr,1)) - endif - endif - end if - - do i=1,nIAttr(GGrid%data) - - call getIList(ItemStr,i,GGrid%data) - j = indexIA(GGrid,StringToChar(ItemStr)) - if(i/=j) call die(myname_,"Function indexIA failed!") - write(device,*) identifier, & - ":: GGrid Index = ", j, & - ":: Attribute Name = ", StringToChar(ItemStr) - call String_clean(ItemStr) - - enddo - - if(nRAttr(GGrid%data)>0) then - write(device,*) identifier, ":: Testing indexRA and getRList::" - else - if(List_allocated(GGrid%data%rList)) then - call die(myname_,"rList has been allocated, :& - &but there are no atttributes. :& - &Please do not initialize a blank list.") - end if - if(associated(GGrid%data%rAttr)) then - if(size(GGrid%data%rAttr,1) /= 0) then - call die(myname_,"rAttr contains no attributes, & - &yet its size /= 0",size(GGrid%data%rAttr,1)) - endif - endif - end if - - do i=1,nRAttr(GGrid%data) - - call getRList(ItemStr,i,GGrid%data) - j = indexRA(GGrid,StringToChar(ItemStr)) - if(i/=j) call die(myname_,"Function indexIA failed!") - write(device,*) identifier, & - "::GGrid Index = ", j, & - "::Attribute Name = ", StringToChar(ItemStr) - call String_clean(ItemStr) - - enddo - - end subroutine IndexTest_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR SORT AND PERMUTE:::::::::::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - -! NOTE: THIS IS NOT A CHECK FOR CORRECTNESS, JUST A CHECK FOR CONSISTENCY - - subroutine SortPermuteTest_(GGrid,identifier,device) - - use m_GeneralGrid - use m_AttrVect, only: nIAttr, nRAttr, Zero - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::SortPermuteTest_' - type(GeneralGrid) :: GGRIDCOPY1, GGRIDCOPY2 - logical,dimension(:), pointer :: descend - integer,dimension(:), pointer :: perm - integer :: i,j,k,ierr - real :: r - - if( associated(GGrid%descend) ) then - - write(device,*) identifier, ":: Testing Sort and Permute" - - call init(oGGrid=GGRIDCOPY1,iGGrid=GGrid,lsize=100) - call init(oGGrid=GGRIDCOPY2,iGGrid=GGrid,lsize=100) - - call Zero(GGRIDCOPY1%data) - call Zero(GGRIDCOPY2%data) - - if(nIAttr(GGRIDCOPY1%data)>0) then - - k=0 - do i=1,nIAttr(GGRIDCOPY1%data) - do j=1,lsize(GGRIDCOPY1) - k=k+1 - GGRIDCOPY1%data%iAttr(i,j) = k - GGRIDCOPY2%data%iAttr(i,j) = k - enddo - enddo - endif - if(nRAttr(GGRIDCOPY1%data)>0) then - - r=0. - do i=1,nRAttr(GGRIDCOPY1%data) - do j=1,lsize(GGRIDCOPY1) - r=r+1.29 - GGRIDCOPY1%data%rAttr(i,j) = r - GGRIDCOPY2%data%rAttr(i,j) = r - enddo - enddo - endif - - call Sort(GGrid=GGRIDCOPY1,key_List=GGRIDCOPY1%coordinate_sort_order,perm=perm,descend=GGrid%descend) - call Permute(GGrid=GGRIDCOPY1,perm=perm) - - call SortPermute(GGrid=GGRIDCOPY2) - - deallocate(perm,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(perm)") - - if(nIAttr(GGRIDCOPY1%data)>0) then - - do i=1,nIAttr(GGRIDCOPY1%data) - do j=1,lsize(GGRIDCOPY1) - if(GGRIDCOPY1%data%iAttr(i,j) /= GGRIDCOPY2%data%iAttr(i,j)) then - call die(myname_,"Sort Testing FAILED!") - endif - enddo - enddo - - write(device,*) identifier, ":: INTEGER GGRID%DATA IN ", GGrid%descend, & - " ORDER:: ", GGRIDCOPY1%data%iAttr(1,1:5) - - endif - - if(nRAttr(GGRIDCOPY1%data)>0) then - - do i=1,nRAttr(GGRIDCOPY1%data) - do j=1,lsize(GGRIDCOPY1) - if(GGRIDCOPY1%data%rAttr(i,j) /= GGRIDCOPY2%data%rAttr(i,j)) then - call die(myname_,"Sort Testing FAILED!") - endif - enddo - enddo - - write(device,*) identifier, ":: REAL GGRID%DATA IN ", GGrid%descend, & - " ORDER:: ", GGRIDCOPY1%data%rAttr(1,1:5) - - endif - - call clean(GGRIDCOPY1) - call clean(GGRIDCOPY2) - else - write(device,*) identifier, ":: NOT TESTING SORTING AND PERMUTING. CONSULT & - &SOURCE CODE TO ENABLE TESTING." - endif - - end subroutine SortPermuteTest_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR EXPORT AND IMPORT FUNCTIONS:::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - subroutine ImportExportTest_(GGrid,identifier,device) - - use m_GeneralGrid - use m_AttrVect, only : exportIList, exportRList - use m_AttrVect, only : AttrVect_zero => zero - use m_AttrVect, only : nIAttr, nRAttr - use m_List, only : List - use m_List, only : List_identical => identical - use m_List, only : List_get => get - use m_List, only : List_clean => clean - use m_String, only : String - use m_String, only : StringToChar => toChar - use m_String, only : String_clean => clean - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(GeneralGrid), intent(in) :: GGrid - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::ImportExportTest_' - type(GeneralGrid) :: importGGrid - type(List) :: OutIList, OutRList - type(String) :: ItemStr - integer,dimension(:),pointer :: OutIVect - real(FP), dimension(:),pointer :: OutRVect - integer :: exportsize - integer :: i,j,k,ierr - - write(device,*) identifier, ":: Testing import and export functions" - - if(nIAttr(GGrid%data)>0) then - - call exportIList(aV=GGrid%data,outIList=outIList) - - if(.NOT. List_identical(GGrid%data%iList,outIList)) then - call die(myname_, "Function exportIList failed!") - endif - - call List_get(ItemStr=ItemStr,ith=nIAttr(GGrid%data),aList=GGrid%data%iList) - - allocate(outIVect(lsize(GGrid)),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(outIVect)") - - call exportIAttr(GGrid=GGrid,AttrTag=StringToChar(ItemStr), & - outVect=OutIVect,lsize=exportsize) - - if(exportsize /= lsize(GGrid)) then - call die(myname_,"(exportsize /= lsize(GGrid))") - endif - - do i=1,exportsize - if(GGrid%data%iAttr(nIAttr(GGrid%data),i) /= outIVect(i)) then - call die(myname_,"Function exportIAttr failed!") - endif - enddo - - call init(oGGrid=importGGrid,iGGrid=GGrid,lsize=exportsize) - call AttrVect_zero(importGGrid%data) - - call importIAttr(GGrid=importGGrid,AttrTag=StringToChar(ItemStr), & - inVect=outIVect,lsize=exportsize) - - j=indexIA(importGGrid,StringToChar(ItemStr)) - if(j<=0) call die(myname_,"indexIA(importGGrid,StringToChar(ItemStr))") - do i=1,exportsize - if(importGGrid%data%iAttr(j,i) /= outIVect(i)) then - call die(myname_,"Function importIAttr failed!") - endif - enddo - - call clean(importGGrid) - call List_clean(outIList) - call String_clean(ItemStr) - - deallocate(outIVect,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(outIVect)") - - endif - - if(nRAttr(GGrid%data)>0) then - - call exportRList(aV=GGrid%data,outRList=outRList) - - if(.NOT. List_identical(GGrid%data%rList,outRList)) then - call die(myname_, "Function exportRList failed!") - endif - - call List_get(ItemStr=ItemStr,ith=nRAttr(GGrid%data),aList=GGrid%data%rList) - - allocate(outRVect(lsize(GGrid)),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(outRVect)") - - call exportRAttr(GGrid=GGrid,AttrTag=StringToChar(ItemStr), & - outVect=OutRVect,lsize=exportsize) - - if(exportsize /= lsize(GGrid)) then - call die(myname_,"(exportsize /= lsize(GGrid))") - endif - - do i=1,exportsize - if(GGrid%data%rAttr(nRAttr(GGrid%data),i) /= outRVect(i)) then - call die(myname_,"Function exportRAttr failed!") - endif - enddo - - call init(oGGrid=importGGrid,iGGrid=GGrid,lsize=exportsize) - call AttrVect_zero(importGGrid%data) - - call importRAttr(GGrid=importGGrid,AttrTag=StringToChar(ItemStr), & - inVect=outRVect,lsize=exportsize) - - j=indexRA(importGGrid,StringToChar(ItemStr)) - if(j<=0) call die(myname_,"indexRA(importGGrid,StringToChar(ItemStr))") - do i=1,exportsize - if(importGGrid%data%rAttr(j,i) /= outRVect(i)) then - call die(myname_,"Function importRAttr failed!") - endif - enddo - - call clean(importGGrid) - call List_clean(outRList) - call String_clean(ItemStr) - - deallocate(outRVect,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(outRVect)") - - endif - - end subroutine ImportExportTest_ - - logical function Identical_(GGrid1,GGrid2,Range) - - use m_GeneralGrid, only: GeneralGrid - use m_AVTEST,only: AttrVect_identical => Identical - use m_List,only : List_allocated => allocated - use m_List,only : List_identical => identical - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(GeneralGrid), intent(in) :: GGrid1 - type(GeneralGrid), intent(in) :: GGrid2 - real, optional, intent(in) :: Range - - integer :: i,j,k - - Identical_=.true. - - if(present(Range)) then - if(.NOT. AttrVect_identical(GGrid1%data,GGrid2%data,Range)) then - Identical_=.false. - endif - else - if(.NOT. AttrVect_identical(GGrid1%data,GGrid2%data)) then - Identical_=.false. - endif - endif - - if(.NOT. List_identical(GGrid1%coordinate_list, & - GGrid2%coordinate_list) ) then - Identical_=.false. - endif - - if( List_allocated(GGrid1%coordinate_sort_order) .or. & - List_allocated(GGrid2%coordinate_sort_order) ) then - if(.NOT. List_identical(GGrid1%coordinate_sort_order, & - GGrid2%coordinate_sort_order) ) then - Identical_=.false. - endif - endif - - if( List_allocated(GGrid1%weight_list) .or. & - List_allocated(GGrid2%weight_list) ) then - if(.NOT. List_identical(GGrid1%weight_list, & - GGrid2%weight_list) ) then - Identical_=.false. - endif - endif - - if( List_allocated(GGrid1%other_list) .or. & - List_allocated(GGrid2%other_list) ) then - if(.NOT. List_identical(GGrid1%other_list, & - GGrid2%other_list) ) then - Identical_=.false. - endif - endif - - if( List_allocated(GGrid1%index_list) .or. & - List_allocated(GGrid2%index_list) ) then - if(.NOT. List_identical(GGrid1%index_list, & - GGrid2%index_list) ) then - Identical_=.false. - endif - endif - - if(associated(GGrid1%descend) .and. & - associated(GGrid2%descend)) then - - if(size(GGrid1%descend) == size(GGrid2%descend)) then - do i=1,size(GGrid1%descend) - if(GGrid1%descend(i).neqv.GGrid2%descend(i)) then - Identical_=.false. - endif - enddo - else - Identical_=.false. - endif - - endif - - if((associated(GGrid1%descend).and..NOT.associated(GGrid2%descend)).or.& - (.NOT.associated(GGrid1%descend).and.associated(GGrid2%descend)))then - Identical_=.false. - endif - - end function Identical_ - - -end module m_GGRIDTEST diff --git a/src/externals/mct/testsystem/testall/m_GMAPTEST.F90 b/src/externals/mct/testsystem/testall/m_GMAPTEST.F90 deleted file mode 100644 index 032d0772347..00000000000 --- a/src/externals/mct/testsystem/testall/m_GMAPTEST.F90 +++ /dev/null @@ -1,160 +0,0 @@ -! -! !INTERFACE: - - module m_GMAPTEST -! -! !USES: -! - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: testall - - interface testall - module procedure testGMap_ - end interface - - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_GMAPTEST' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: testGMap_ - Test the functions in the AttrVect module -! -! !DESCRIPTION: -! This routine writes diagnostic information about the input -! {\tt AttrVect}. Each line of the output will be preceded by the -! character argument {\tt identifier}. The output device is specified -! by the integer argument {\tt device}. -! -! !INTERFACE: - - subroutine testGMap_(GMap, identifier, mycomm, device) - -! -! !USES: -! - use m_GlobalMap ! Use all of MCTWorld - use m_GlobalToLocal,only : GlobalToLocalIndex - use m_stdio - use m_die - use m_mpif90 - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalMap), intent(in) :: GMap - character(len=*), intent(in) :: identifier - integer, optional, intent(in) :: mycomm - integer, intent(in) :: device - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::testGMap_' - integer :: i,j,k,lower,upper - integer :: mySize,myProc,proc,ierr - - write(device,*) identifier, ":: TESTING GLOBALMAP ::" - - write(device,*) identifier, ":: TYPE CHECK:" - write(device,*) identifier, ":: comp_id = ", GMap%comp_id - write(device,*) identifier, ":: gsize = ", GMap%gsize - write(device,*) identifier, ":: lsize = ", GMap%lsize - - mySize = size(GMap%counts) - - if(mySize<=0) call die(myname_,"size(GMap%counts)<=0") - - if(size(GMap%counts) /= size(GMap%displs)) then - call die(myname_,"size(GMap%counts) /= size(GMap%displs)") - endif - - write(device,*) identifier, ":: counts = & - &(associated, size, counts) ", associated(GMap%counts), & - size(GMap%counts), GMap%counts - write(device,*) identifier, ":: displs = & - &(associated, size, displs) ", associated(GMap%displs), & - size(GMap%displs), GMap%displs - - write(device,*) identifier, ":: counts = ", & - GMap%counts - - write(device,*) identifier, ":: FUNCTION CHECK:" - write(device,*) identifier, ":: lsize = ", lsize(GMap) - write(device,*) identifier, ":: gsize = ", gsize(GMap) - write(device,*) identifier, ":: comp_id = ",comp_id(GMap) - - write(device,*) identifier, ":: Testing rank" - do i=0,mySize-1 - do j=1,GMap%counts(i) - call rank(GMap,GMap%displs(i)+j,proc) - if(i/=proc) then - write(device,*) identifier, ":: subroutine rank failed! ", & - i,j,mySize,GMap%counts(i), GMap%displs(i),proc - call die(myname_,"subroutine rank failed!") - endif - enddo - enddo - - write(device,*) identifier, ":: Testing bounds" - do i=0,mySize-1 - call bounds(GMap,i,lower,upper) - if(lower/=GMap%displs(i)+1) then - write(device,*) identifier, ":: subroutine bounds failed! ", & - i, lower, GMap%displs(i) - call die(myname_,"subroutine bounds failed!") - endif - if(upper/=GMap%displs(i)+GMap%counts(i)) then - write(device,*) identifier, ":: subroutine bounds failed! ", & - i,upper,GMap%displs(i)+GMap%counts(i)-1 - call die(myname_,"subroutine bounds failed!") - endif - enddo - - if(present(mycomm)) then - j=-12345 - k=-12345 - - do i=1,GMap%gsize - if(GlobalToLocalIndex(GMap,i,mycomm)/=-1) then - j=GlobalToLocalIndex(GMap,i,mycomm) - EXIT - endif - enddo - - do i=1,GMap%gsize - if(GlobalToLocalIndex(GMap,i,mycomm)/=-1) then - k=GlobalToLocalIndex(GMap,i,mycomm) - endif - enddo - - if( (j==-12345).and.(k==-12345) ) then - write(device,*) identifier, ":: GlobalMapToIndex :: & - &THIS PROCESS OWNS ZERO POINTS" - else - write(device,*) identifier, ":: GlobalMapToIndex :: & - &first, last indices = ", j, k - endif - - else - - write(device,*) identifier, ":: NOT TESTING GLOBALMAPTOLOCALINDEX. & - &PLEASE CONSULT SOURCE CODE TO ENABLE TESTING" - - endif - -end subroutine testGMap_ - -end module m_GMAPTEST diff --git a/src/externals/mct/testsystem/testall/m_GSMAPTEST.F90 b/src/externals/mct/testsystem/testall/m_GSMAPTEST.F90 deleted file mode 100644 index 55ce3ada903..00000000000 --- a/src/externals/mct/testsystem/testall/m_GSMAPTEST.F90 +++ /dev/null @@ -1,377 +0,0 @@ -! -! !INTERFACE: - - module m_GSMapTest -! -! !USES: -! - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: testall - public :: Identical - - interface testall - module procedure testGSMap_ - end interface - - interface Identical - module procedure Identical_ - end interface - - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_GSMapTest' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: aVtest_ - Test the functions in the AttrVect module -! -! !DESCRIPTION: -! This routine writes diagnostic information about the input -! {\tt AttrVect}. Each line of the output will be preceded by the -! character argument {\tt identifier}. The output device is specified -! by the integer argument {\tt device}. -! -! !INTERFACE: - - subroutine testGSMap_(GSMap, identifier, mycomm, device) - -! -! !USES: -! - use m_GlobalSegMap ! Use all GlobalSegMap routines - use m_GlobalToLocal ! Use all GlobalToLocal routines - use m_stdio - use m_die - use m_mpif90 - - implicit none - -! !INPUT PARAMETERS: - - type(GlobalSegMap), intent(in) :: GSMap - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - integer, intent(in) :: mycomm - -! !REVISION HISTORY: -! 23Sep02 - E.T. Ong - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::testGSMap_' - integer :: myProc, mySize, ierr - integer :: i, j, k, m, n, o - integer :: first,last, owner, numlocs, nactive, npoints, proc - integer, dimension(:), pointer :: points, owners, pelist, perm, & - mystart, mylength - integer, dimension(:), allocatable :: locs, slpArray - logical :: found - - type(GlobalSegMap) :: PGSMap, P1GSMap - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::WRITE OUT INFO ABOUT THE GLOBALSEGMAP::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - call MPI_COMM_RANK (mycomm, myProc, ierr) - call MPI_COMM_SIZE(mycomm, mySize, ierr) - - write(device,*) identifier, ":: TYPE CHECK:" - write(device,*) identifier, ":: COMP_ID = ", GSMap%comp_id - write(device,*) identifier, ":: NGSEG = ", GSMap%ngseg - write(device,*) identifier, ":: GSIZE = ", GSMap%gsize - write(device,*) identifier, ":: START:: association status, & - & size, values = ", associated(GSMap%start), size(GSMap%start) - write(device,*) identifier, ":: START = ", GSMap%start - write(device,*) identifier, ":: LENGTH:: association status, & - &size, values = ", associated(GSMap%length), size(GSMap%length) - write(device,*) identifier, ":: LENGTH = ", GSMap%length - write(device,*) identifier, ":: PE_LOC:: association status, & - &size, values = ", associated(GSMap%pe_loc), size(GSMap%pe_loc) - write(device,*) identifier, ":: PE_LOC = ", GSMap%pe_loc - - write(device,*) identifier, ":: NGSEG_ = ", ngseg(GSMap) - write(device,*) identifier, ":: NLSEG_ = ", nlseg(GSMap,myProc) - write(device,*) identifier, ":: COMP_ID_ = ", comp_id(GSMap) - write(device,*) identifier, ":: GSIZE_ = ", gsize(GSMap) - write(device,*) identifier, ":: GLOBALSTORAGE = ", GlobalStorage(GSMap) - write(device,*) identifier, ":: PROCESSSTORAGE = (PE, PE-STORAGE)" - do i=1,mySize - write(device,*) identifier, ":: PROCESSSTORAGE = ", & - i-1, ProcessStorage(GSMap,i-1) - enddo - write(device,*) identifier, ":: LSIZE_ = ", lsize(GSMap,mycomm) - write(device,*) identifier, ":: HALOED = ", haloed(GSMap) - - write(device,*) identifier, ":: SUBROUTINES CHECK:" - write(device,*) identifier, ":: ORDERED POINTS = (PE, SIZE, FIRST, LAST)" - - do i=1,mySize - - first=1 - last=0 - - proc = i-1 - - call OrderedPoints(GSMap,proc,points) - - npoints=size(points) - if(npoints>0) then - first = points(1) - last = points(npoints) - write(device,*) identifier, ":: ORDERED POINTS = ", proc, npoints, & - first, last - else - write(device,*) identifier, ":: ORDERED POINTS :: EXTREME WARNING:: & - &Process ", proc, " contains ", npoints, "points" - write(device,*) identifier, ":: AS A RESULT, & - &NOT TESTING RANK AND PELOCS::" - EXIT -! call die(myname_,"OrderedPoints may have failed ") - endif - - - !:::CHECK THE CORRECTNESS OF ROUTINE RANK1_:::! !::NOT YET PUBLIC IN MODULE::! - if(haloed(GSMap)) then - do k=first,last - call rank(GSMap,k,numlocs,owners) - found = .false. - do n=1,numlocs - if(owners(n) /= proc) then - found = .true. - endif - enddo - if(.not.found) then - call die(myname_,"SUBROUTINE RANKM_ failed!") - endif - enddo - deallocate(owners,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(owners)",ierr) - else - allocate(locs(npoints),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(locs)") - call peLocs(GSMap,npoints,points,locs) - do n=1,npoints - if(locs(n) /= proc) then - call die(myname_,"SUBROUTINE PELOCS FAILED!",locs(n)) - endif - enddo - deallocate(locs,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(locs)") - do k=first,last - call rank(GSMap,k,owner) - if(owner /= proc) then - write(device,*) identifier, ":: RANK1_ FAILED:: ", owner, proc, first, last, k - call die(myname_,"SUBROUTINE RANK1_ failed!") - endif - enddo - endif - !:::::::::::::::::::::::::::::::::::::::::::::! - - deallocate(points,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(points)",ierr) - enddo - - call active_pes(GSMap, nactive, pelist) - write(device,*) identifier, ":: ACTIVE PES (NUM_ACTIVE, PE_LIST) = ", & - nactive, pelist - deallocate(pelist,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(pelist)",ierr) - - - write(device,*) identifier, ":: TESTING INITP and INITP1" - call init(PGSMAP, GSMap%comp_id, GSMap%ngseg, GSMap%gsize, GSMap%start, & - GSMap%length, GSMap%pe_loc) - - k = size(GSMap%start)+size(GSMap%length)+size(GSMap%pe_loc) - allocate(slparray(k),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(slparray)",ierr) - - slpArray(1:GSMap%ngseg) = GSMap%start(1:GSMap%ngseg) - slpArray(GSMap%ngseg+1:2*GSMap%ngseg) = GSMap%length(1:GSMap%ngseg) - slpArray(2*GSMap%ngseg+1:3*GSMap%ngseg) = GSMap%pe_loc(1:GSMap%ngseg) - - call init(P1GSMap, GSMap%comp_id, GSMap%ngseg, GSMap%gsize, slpArray) - - deallocate(slpArray,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(slparray)",ierr) - - write(device,*) identifier, ":: COMPARE ALL GLOBALSEGMAPS: & - & YOU SHOULD SEE 3 IDENTICAL COLUMNS OF NUMBERS:" - write(device,*) identifier, ":: COMP_ID = ", & - GSMap%comp_id, PGSMap%comp_id, P1GSMap%comp_id - write(device,*) identifier, ":: NGSEG = ", & - GSMap%ngseg, GSMap%ngseg, GSMap%ngseg - write(device,*) identifier, ":: GSIZE = ", & - GSMap%gsize, GSMap%gsize, GSMap%gsize - write(device,*) identifier, ":: START:: association status = ", & - associated(GSMap%start), associated(PGSMap%start), & - associated(P1GSMap%start) - write(device,*) identifier, ":: START:: size = ", & - size(GSMap%start), size(PGSMap%start), size(P1GSMap%start) - - write(device,*) identifier, ":: LENGTH:: association status = ", & - associated(GSMap%length), associated(PGSMap%length), & - associated(P1GSMap%length) - write(device,*) identifier, ":: LENGTH:: size = ", & - size(GSMap%length), size(PGSMap%length), size(P1GSMap%length) - - - write(device,*) identifier, ":: PE_LOC:: association status = ", & - associated(GSMap%pe_loc), associated(PGSMap%pe_loc), & - associated(P1GSMap%pe_loc) - write(device,*) identifier, ":: PE_LOC:: size = ", & - size(GSMap%pe_loc), size(PGSMap%pe_loc), size(P1GSMap%pe_loc) - - do i=1,GSMap%ngseg - if( (GSMap%start(i) /= PGSMap%start(i)) .or. & - (GSMap%start(i) /= P1GSMap%start(i)) ) then - call die(myname_,"INITP or INITP1 failed -starts-!") - endif - if( (GSMap%length(i) /= PGSMap%length(i)) .or. & - (GSMap%length(i) /= P1GSMap%length(i)) ) then - call die(myname_,"INITP or INITP1 failed -lengths-!") - endif - if( (GSMap%pe_loc(i) /= PGSMap%pe_loc(i)) .or. & - (GSMap%pe_loc(i) /= P1GSMap%pe_loc(i)) ) then - call die(myname_,"INITP or INITP1 failed -pe_locs-!") - endif - enddo - - write(device,*) identifier, ":: TESTING SORT AND PERMUTE" - - call Sort(PGSMap,PGSMap%pe_loc,PGSMap%start,perm) - call Permute(PGSMap, perm) - - deallocate(perm,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(perm)") - - call SortPermute(P1GSMap,PGSMap%pe_loc,PGSMap%start) - - do i=1,GSMap%ngseg - if( (P1GSMap%start(i) /= PGSMap%start(i)) ) then - call die(myname_,"Sort or Permute failed -starts-!") - endif - if( (P1GSMap%length(i) /= PGSMap%length(i)) ) then - call die(myname_,"Sort or Permute failed -lengths-!") - endif - if( (P1GSMap%pe_loc(i) /= PGSMap%pe_loc(i)) ) then - call die(myname_,"Sort or Permute failed -pe_locs-!") - endif - enddo - - write(device,*) identifier, ":: TESTING GLOBALTOLOCAL FUNCTIONS ::" - - write(device,*) identifier, ":: TESTING GLOBALSEGMAPTOINDICES ::" - - call GlobalToLocalIndices(GSMap,mycomm,mystart,mylength) - - if(.NOT. (associated(mystart).and.associated(mylength)) ) then - call die(myname_, "::GLOBALSEGMAPTOINDICES::& - &mystart and/or mylength is not associated") - endif - - if(size(mystart)<0) then - call die(myname_, "::GLOBALSEGMAPTOINDICES::size(start) < 0") - endif - - if(size(mystart) /= size(mylength)) then - call die(myname_, "::GLOBALSEGMAPTOINDICES::size(start)/=size(length)") - endif - - if(size(mystart) /= nlseg(GSMap,myProc)) then - call die(myname_, "::GLOBALSEGMAPTOINDICES::size(start)/=nlseg") - endif - - if(size(mystart)>0) then - write(device,*) identifier, ":: GLOBALSEGMAPTOINDICES :: & - &start = (size, values) ", & - size(mystart), mystart - else - write(device,*) identifier, ":: GLOBALSEGMAPTOINDICES :: & - &start has zero size" - endif - - if(size(mylength)>0) then - write(device,*) identifier, ":: GLOBALSEGMAPTOINDICES :: & - &length = (size, values) ", & - size(mylength), mylength - else - write(device,*) identifier, ":: GLOBALSEGMAPTOINDICES :: & - &length has zero size" - endif - - if(size(mystart)>0) then - write(device,*) identifier, ":: GLOBALSEGMAPTOINDICES :: & - &first, last indices = ", & - mystart(1), mystart(size(mystart))+mylength(size(mylength))-1 - else - write(device,*) identifier, ":: GLOBALSEGMAPTOINDICES :: NOT TESTING& - & THIS ROUTINE BECAUSE START AND LENGTH HAVE ZERO SIZE" - endif - - deallocate(mystart,mylength,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(mystart,mylength)") - - write(device,*) identifier, ":: TESTING GLOBALSEGMAPTOINDEX" - - j=-12345 - k=-12345 - - do i=1,GlobalStorage(GSMap) - if(GlobalToLocalIndex(GSMap,i,mycomm)/=-1) then - j=GlobalToLocalIndex(GSMap,i,mycomm) - EXIT - endif - enddo - - do i=1,GlobalStorage(GSMap) - if(GlobalToLocalIndex(GSMap,i,mycomm)/=-1) then - k=GlobalToLocalIndex(GSMap,i,mycomm) - endif - enddo - - if( (j==-12345).and.(k==-12345) ) then - write(device,*) identifier, ":: GlobalSegMapToIndex :: & - &THIS PROCESS OWNS ZERO POINTS" - else - write(device,*) identifier, ":: GlobalSegMapToIndex :: & - &first, last indices = ", j, k - endif - - end subroutine testGSMap_ - - logical function Identical_(GSMap1,GSMap2) - - use m_GlobalSegMap ! Use all GlobalSegMap routines - - implicit none - - type(GlobalSegMap), intent(in) :: GSMap1, GSMap2 - - integer :: i - Identical_=.true. - - if(GSMap1%comp_id /= GSMap2%comp_id) Identical_=.false. - if(GSMap1%ngseg /= GSMap2%ngseg) Identical_=.false. - if(GSMap1%gsize /= GSMap2%gsize) Identical_=.false. - - do i=1,GSMap1%ngseg - if(GSMap1%start(i) /= GSMap2%start(i)) Identical_=.false. - if(GSMap1%length(i) /= GSMap2%length(i)) Identical_ =.false. - if(GSMap1%pe_loc(i) /= GSMap2%pe_loc(i)) Identical_ =.false. - enddo - - end function Identical_ - -end module m_GSMapTest diff --git a/src/externals/mct/testsystem/testall/m_MCTWORLDTEST.F90 b/src/externals/mct/testsystem/testall/m_MCTWORLDTEST.F90 deleted file mode 100644 index bf16a337c5c..00000000000 --- a/src/externals/mct/testsystem/testall/m_MCTWORLDTEST.F90 +++ /dev/null @@ -1,121 +0,0 @@ -! -! !INTERFACE: - - module m_MCTWORLDTEST -! -! !USES: -! - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: testall - - interface testall - module procedure testMCTWorld_ - end interface - - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_MCTWORLDTEST' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: aVtest_ - Test the functions in the AttrVect module -! -! !DESCRIPTION: -! This routine writes diagnostic information about the input -! {\tt AttrVect}. Each line of the output will be preceded by the -! character argument {\tt identifier}. The output device is specified -! by the integer argument {\tt device}. -! -! !INTERFACE: - - subroutine testMCTWorld_(identifier, device) - -! -! !USES: -! - use m_MCTWorld ! Use all of MCTWorld - use m_stdio - use m_die - use m_mpif90 - - implicit none - -! !INPUT PARAMETERS: - - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - -! !REVISION HISTORY: -! 23Sep02 - E.T. Ong - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::testMCTWorld_' - integer :: i,j,k - integer :: mySize,ierr - - write(device,*) identifier, ":: TYPE CHECK:" - - write(device,*) identifier, ":: MCT_comm = ", ThisMCTWorld%MCT_comm - write(device,*) identifier, ":: ncomps = ", ThisMCTWorld%ncomps - write(device,*) identifier, ":: mygrank = ", ThisMCTWorld%mygrank - - if(associated(ThisMCTWorld%nprocspid).and.associated(ThisMCTWorld%idGprocid)) then - - write(device,*) identifier, ":: nprocspid = & - &(compid , nprocspid(compid)) " - - do i=1,size(ThisMCTWorld%nprocspid) - write(device,*) identifier, i, ThisMCTWorld%nprocspid(i) - enddo - - write(device,*) identifier, "::idGprocid = & - &(compid , local_PID, idGprocid(compid,local_PID)) " - - do i=1,size(ThisMCTWorld%idGprocid,1) - do j=0,size(ThisMCTWorld%idGprocid,2)-1 - write(device,*) identifier, i, j, ThisMCTWorld%idGprocid(i,j) - enddo - enddo - - else - - call die(myname_, "MCTWorld pointer components are not associated!") - - endif - - write(device,*) identifier, ":: NumComponents = ", NumComponents(ThisMCTWorld) - write(device,*) identifier, ":: ComponentNumProcs = & - &(compid, ComponentNumProcs(compid)) = " - do i=1,ThisMCTWorld%ncomps - write(device,*) identifier, i, ComponentNumProcs(ThisMCTWorld, i) - enddo - - write(device,*) identifier, ":: ComponentToWorldRank = & - &(compid, local_PID, ComponentToWorldRank(local_PID,compid))" - do i=1,ThisMCTWorld%ncomps - do j=0,ThisMCTWorld%nprocspid(i)-1 - write(device,*) identifier, i, j, ComponentToWorldRank(j,i,ThisMCTWorld) - enddo - enddo - - write(device,*) identifier, ":: ComponentRootRank = (compid, & - &ComponentRootRank(compid)" - - do i=1,ThisMCTWorld%ncomps - write(device,*) identifier, i, ComponentRootRank(i,ThisMCTWorld) - enddo - -end subroutine testMCTWorld_ - -end module m_MCTWORLDTEST diff --git a/src/externals/mct/testsystem/testall/m_ROUTERTEST.F90 b/src/externals/mct/testsystem/testall/m_ROUTERTEST.F90 deleted file mode 100644 index 2634c6db531..00000000000 --- a/src/externals/mct/testsystem/testall/m_ROUTERTEST.F90 +++ /dev/null @@ -1,120 +0,0 @@ -! -! !INTERFACE: - - module m_ROUTERTEST -! -! !USES: -! - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: testall - - interface testall - module procedure testRouter_ - end interface - - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_ROUTERTEST' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: aVtest_ - Test the functions in the AttrVect module -! -! !DESCRIPTION: -! This routine writes diagnostic information about the input -! {\tt AttrVect}. Each line of the output will be preceded by the -! character argument {\tt identifier}. The output device is specified -! by the integer argument {\tt device}. -! -! !INTERFACE: - - subroutine testRouter_(Rout, identifier, device) - -! -! !USES: -! - use m_Router ! Use all GlobalSegMap routines - use m_stdio - use m_die - use m_mpif90 - - implicit none - -! !INPUT PARAMETERS: - - type(Router), intent(in) :: Rout - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - -! !REVISION HISTORY: -! 23Sep02 - E.T. Ong - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::testRouter_' - integer :: proc, nseg - - write(device,*) identifier, ":: TYPE CHECK:" - write(device,*) identifier, ":: COMP1ID = ", Rout%comp1id - write(device,*) identifier, ":: COMP2ID = ", Rout%comp2id - write(device,*) identifier, ":: NPROCS = ", Rout%nprocs - write(device,*) identifier, ":: MAXSIZE = ", Rout%maxsize - - if(associated(Rout%pe_list)) then - write(device,*) identifier, ":: PE_LIST = ", Rout%pe_list - else - call die(myname_,"PE_LIST IS NOT ASSOCIATED!") - endif - - if(associated(Rout%num_segs)) then - write(device,*) identifier, ":: NUM_SEGS = ", Rout%num_segs - else - call die(myname_,"NUM_SEGS IS NOT ASSOCIATED!") - endif - - if(associated(Rout%locsize)) then - write(device,*) identifier, ":: LOCSIZE = ", Rout%locsize - else - call die(myname_,"LOCSIZE IS NOT ASSOCIATED!") - endif - - if(associated(Rout%seg_starts)) then - write(device,*) identifier, ":: SIZE OF SEG_STARTS & - &(FIRST, SECOND DIM) = ", & - size(Rout%seg_starts,1), size(Rout%seg_lengths,2) - else - call die(myname_,"SEG_STARTS IS NOT ASSOCIATED!") - endif - - if(associated(Rout%seg_lengths)) then - write(device,*) identifier, ":: SIZE OF SEG_LENGTHS = & - &(FIRST, SECOND DIM) = ", & - size(Rout%seg_lengths,1), size(Rout%seg_lengths,2) - else - call die(myname_,"SEG_LENGTHS IS NOT ASSOCIATED!") - endif - - write(device,*) identifier, ":: SEG_STARTS AND SEG_LENGTHS & - &VALUES: (PE, START, LENGTH) = " - - do proc = 1, Rout%nprocs - do nseg = 1, Rout%num_segs(proc) - write(device,*) identifier, Rout%pe_list(proc), & - Rout%seg_starts(proc,nseg), & - Rout%seg_lengths(proc,nseg) - enddo - enddo - - end subroutine testRouter_ - -end module m_ROUTERTEST diff --git a/src/externals/mct/testsystem/testall/m_SMATTEST.F90 b/src/externals/mct/testsystem/testall/m_SMATTEST.F90 deleted file mode 100644 index 060a6b5bee4..00000000000 --- a/src/externals/mct/testsystem/testall/m_SMATTEST.F90 +++ /dev/null @@ -1,627 +0,0 @@ -! -! !INTERFACE: - - module m_SMATTEST -! -! !USES: -! - implicit none - - private ! except - -! !PUBLIC MEMBER FUNCTIONS: - - public :: testall - public :: IndexAttr - public :: SortPermute - public :: ImportExport - public :: Identical - - interface testall - module procedure testsMat_ - end interface - interface IndexAttr - module procedure IndexTest_ - end interface - interface SortPermute - module procedure SortPermuteTest_ - end interface - interface ImportExport - module procedure ImportExportTest_ - end interface - interface Identical - module procedure Identical_ - end interface - - -! !REVISION HISTORY: -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname='m_SMATTEST' - - contains - -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!BOP ------------------------------------------------------------------- -! -! !IROUTINE: sMattest_ - Test the functions in the SparseMatrix module -! -! !DESCRIPTION: -! This routine writes diagnostic information about the input -! {\tt SparseMatrix}. Each line of the output will be preceded by the -! character argument {\tt identifier}. The output device is specified -! by the integer argument {\tt device}. -! -! !INTERFACE: - - subroutine testsMat_(sMat, identifier, device, mycomm) - -! -! !USES: -! - use m_SparseMatrix ! Use all SparseMatrix routines - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - -! !INPUT PARAMETERS: - - type(SparseMatrix), intent(in) :: sMat - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - integer, optional, intent(in) :: mycomm - -! !REVISION HISTORY: -! 23Sep02 - E.T. Ong - initial prototype. -!EOP ___________________________________________________________________ - - character(len=*),parameter :: myname_=myname//'::sMattest_' - integer :: i,j,k,ierr - integer :: numrows, start, end - real :: sparsity - real, dimension(:), pointer :: sums - real, dimension(:), allocatable :: validsums - logical :: rowsumcheck - type(SparseMatrix) :: sMatExactCopy - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::MAKE A COPY::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - call Copy(sMat=sMat,sMatCopy=sMatExactCopy) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::WRITE OUT INFO ABOUT THE ATTRVECT::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - write(device,*) identifier, ":: Testing SparseMatrix Routines" - write(device,*) identifier, ":: lsize = ", lsize(sMat) - write(device,*) identifier, ":: nRows = ", nRows(sMat) - write(device,*) identifier, ":: nCols = ", nCols(sMat) - write(device,*) identifier, ":: vecinit = ", sMat%vecinit - - ! Add vecinit to smat_identical - call CheckBounds(sMat,ierr) - write(device,*) identifier, ":: CheckBounds ierror = ", ierr - - call local_row_range(sMat,start,end) - - write(device,*) identifier, ":: local_row_range (start_row, end_row) = ", & - start,end - - call local_col_range(sMat,start,end) - - write(device,*) identifier, ":: local_col_ramge (start_col, end_col) = ", & - start,end - - if(present(mycomm)) then - - write(device,*) identifier, ":: SINCE THE COMMUNICATOR ARGUMENT WAS & - &PROVIDED, PLEASE ENSURE THAT THIS TEST IS BEING CALLED ON & - &ALL PROCESSORS OF THIS COMPONENT AND THAT THE SPARSEMATRIX HAS& - & BEEN SCATTERED." - - write(device,*) identifier, ":: GlobalNumElements = ", & - GlobalNumElements(sMat,mycomm) - - call ComputeSparsity(sMat,sparsity,mycomm) - write(device,*) identifier, ":: ComputeSparsity = ", sparsity - - call global_row_range(sMat,mycomm,start,end) - - write(device,*) identifier,":: global_row_range (start_row, end_row) = ",& - start,end - - call global_col_range(sMat,mycomm,start,end) - - write(device,*) identifier,":: global_col_range (start_col, end_col) = ",& - start,end - - call row_sum(sMat,numrows,sums,mycomm) - write(device,*) identifier, ":: row_sum (size(sums),numrows,& - &first,last,min,max) = ", & - size(sums), numrows, sums(1), sums(size(sums)), & - MINVAL(sums), MAXVAL(sums) - - allocate(validsums(2),stat=ierr) - if(ierr/=0) call die(myname_,"allocate(validsums)",ierr) - - validsums(1)=0. - validsums(2)=1. - - call row_sum_check(sMat=sMat,comm=mycomm,num_valid=2, & - valid_sums=validsums,abs_tol=1e-5,valid=rowsumcheck) - - write(device,*) identifier,":: row_sum_check = ", rowsumcheck - - deallocate(sums,validsums, stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(sums,validsums)",ierr) - - endif - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TESTING INDEXIA AND GETILIST:::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - call IndexTest_(sMat,identifier,device) - - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING SORT AND PERMUTE:::::::::::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - -! NOTE: THIS IS NOT A CHECK FOR CORRECTNESS, JUST A CHECK FOR CONSISTENCY - - call SortPermuteTest_(sMat,identifier,device) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TESTING EXPORT AND IMPORT FUNCTIONS:::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - call ImportExportTest_(sMat,identifier,device) - - ! Check that sMat is unchanged! - - if(.NOT.Identical(sMat,sMatExactCopy,1e-5)) then - call die(myname_,"sMat unexpectedly altered!!!") - endif - - call clean(sMatExactCopy) - -end subroutine testsMat_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: -!:::::TEST FOR INDEXIA AND GETILIST:::::::::::::::::::::::::::::::::::::: -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - subroutine IndexTest_(sMat,identifier,device) - - use m_SparseMatrix - use m_AttrVect, only: getIList, getRList - use m_AttrVect, only: nIAttr, nRAttr - use m_List, only: List_allocated => allocated - use m_String, only: String - use m_String, only: StringToChar => toChar - use m_String, only: String_clean => clean - use m_stdio - use m_die - - implicit none - - type(SparseMatrix), intent(in) :: sMat - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::IndexTest_' - type(String) :: ItemStr - integer :: i,j,k,ierr - - if(nIAttr(sMat%data)>0) then - write(device,*) identifier, ":: Testing indexIA ::" - else - if(List_allocated(sMat%data%iList)) then - call die(myname_,"iList has been allocated, :& - &but there are no atttributes. :& - &Please do not initialize a blank list.") - end if - if(associated(sMat%data%iAttr)) then - if(size(sMat%data%iAttr,1) /= 0) then - call die(myname_,"iAttr contains no attributes, & - &yet its size /= 0",size(sMat%data%iAttr,1)) - endif - endif - end if - - do i=1,nIAttr(sMat%data) - - call getIList(ItemStr,i,sMat%data) - j = indexIA(sMat,StringToChar(ItemStr)) - if(i/=j) call die(myname_,"Function indexIA failed!") - write(device,*) identifier, & - ":: sMat Index = ", j, & - ":: Attribute Name = ", StringToChar(ItemStr) - call String_clean(ItemStr) - - enddo - - if(nRAttr(sMat%data)>0) then - write(device,*) identifier, ":: Testing indexRA::" - else - if(List_allocated(sMat%data%rList)) then - call die(myname_,"rList has been allocated, :& - &but there are no atttributes. :& - &Please do not initialize a blank list.") - end if - if(associated(sMat%data%rAttr)) then - if(size(sMat%data%rAttr,1) /= 0) then - call die(myname_,"rAttr contains no attributes, & - &yet its size /= 0",size(sMat%data%rAttr,1)) - endif - endif - end if - - do i=1,nRAttr(sMat%data) - - call getRList(ItemStr,i,sMat%data) - j = indexRA(sMat,StringToChar(ItemStr)) - if(i/=j) call die(myname_,"Function indexIA failed!") - write(device,*) identifier, & - "::sMat Index = ", j, & - "::Attribute Name = ", StringToChar(ItemStr) - call String_clean(ItemStr) - - enddo - - end subroutine IndexTest_ - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR SORT AND PERMUTE:::::::::::::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - -! NOTE: THIS IS NOT A CHECK FOR CORRECTNESS, JUST A CHECK FOR CONSISTENCY - - subroutine SortPermuteTest_(sMat,identifier,device) - - use m_SparseMatrix - use m_AttrVect, only : nIAttr, nRAttr, Zero - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(SparseMatrix), intent(in) :: sMat - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::SortPermuteTest_' - type(SparseMatrix) :: SMATCOPY1, SMATCOPY2 - logical,dimension(:), pointer :: descend - integer,dimension(:), pointer :: perm - integer :: i,j,k,ierr - real :: r - - write(device,*) identifier, ":: Testing Sort and Permute" - - call init(SMATCOPY1,sMat%nrows,sMat%ncols,lsize(sMat)) - call init(SMATCOPY2,sMat%nrows,sMat%ncols,lsize(sMat)) - - if( (nIAttr(SMATCOPY1%data)>0) .or. & - (nRAttr(SMATCOPY1%data)>0) ) then - - if(nIAttr(SMATCOPY1%data)>0) then - - allocate(descend(nIAttr(SMATCOPY1%data)),stat=ierr) - if(ierr /= 0) call die(myname_,"allocate(descend)") - - call Zero(SMATCOPY1%data) - call Zero(SMATCOPY2%data) - - k=0 - do i=1,nIAttr(SMATCOPY1%data) - do j=1,lsize(SMATCOPY1) - k=k+1 - SMATCOPY1%data%iAttr(i,j) = k - SMATCOPY2%data%iAttr(i,j) = k - enddo - enddo - - descend=.true. - call Sort(sMat=SMATCOPY1,key_list=SMATCOPY1%data%iList,perm=perm,descend=descend) - call Permute(sMat=SMATCOPY1,perm=perm) - - call SortPermute(sMat=SMATCOPY2,key_list=SMATCOPY2%data%iList,descend=descend) - - do i=1,nIAttr(SMATCOPY1%data) - do j=1,lsize(SMATCOPY1) - if(SMATCOPY1%data%iAttr(i,j) /= SMATCOPY2%data%iAttr(i,j)) then - call die(myname_,"Sort Testing FAILED!") - endif - enddo - enddo - - write(device,*) identifier, ":: Integer SparseMatrix data IN DESCENDING ORDER:: ", & - SMATCOPY1%data%iAttr(1,1:5) - - deallocate(perm,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(perm)") - - deallocate(descend,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(descend)") - - endif - - if(nRAttr(SMATCOPY1%data)>0) then - - allocate(descend(nRAttr(SMATCOPY1%data)),stat=ierr) - if(ierr /= 0) call die(myname_,"allocate(descend)") - - call Zero(SMATCOPY1%data) - call Zero(SMATCOPY2%data) - - r=0. - do i=1,nRAttr(SMATCOPY1%data) - do j=1,lsize(SMATCOPY1) - r=r+1.29 - SMATCOPY1%data%rAttr(i,j) = r - SMATCOPY2%data%rAttr(i,j) = r - enddo - enddo - - descend=.true. - call Sort(sMat=SMATCOPY1,key_list=SMATCOPY1%data%rList,perm=perm,descend=descend) - call Permute(sMat=SMATCOPY1,perm=perm) - - call SortPermute(sMat=SMATCOPY2,key_list=SMATCOPY2%data%rList,descend=descend) - - do i=1,nRAttr(SMATCOPY1%data) - do j=1,lsize(SMATCOPY1) - if(SMATCOPY1%data%rAttr(i,j) /= SMATCOPY2%data%rAttr(i,j)) then - call die(myname_,"Sort Testing FAILED!") - endif - enddo - enddo - - write(device,*) identifier, ":: REAL SparseMatrix data IN DESCENDING ORDER:: ", & - SMATCOPY1%data%rAttr(1,1:5) - - deallocate(perm,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(perm)") - - deallocate(descend,stat=ierr) - if(ierr /= 0) call die(myname_,"deallocate(descend)") - - endif - else - write(device,*) identifier, ":: NOT TESTING SORTING AND PERMUTING. CONSULT & - &SOURCE CODE TO ENABLE TESTING." - endif - - call clean(SMATCOPY1) - call clean(SMATCOPY2) - - end subroutine SortPermuteTest_ - - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! -!:::::TEST FOR EXPORT AND IMPORT FUNCTIONS:::::::::::::::::::::::::::::::! -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::! - - subroutine ImportExportTest_(sMat,identifier,device) - - use m_SparseMatrix - - use m_List, only : List - use m_List, only : List_identical => identical - use m_List, only : List_get => get - use m_List, only : List_clean => clean - use m_String, only : String - use m_String, only : StringToChar => toChar - use m_String, only : String_clean => clean - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(SparseMatrix), intent(in) :: sMat - character(len=*), intent(in) :: identifier - integer, intent(in) :: device - - character(len=*),parameter :: myname_=myname//'::ImportExportTest_' - integer :: i,j,k,ierr - real :: r - - type(SparseMatrix) :: sMatCopy - integer :: size - integer, dimension(:), pointer :: GlobalRows, GlobalColumns - integer, dimension(:), pointer :: LocalRows, LocalColumns - integer, dimension(:), pointer :: importIVect - real(FP), dimension(:), pointer :: importRVect - real(FP), dimension(:), pointer :: MatrixElements - - write(device,*) identifier, ":: Testing import and export functions" - - nullify(GlobalRows) - nullify(GlobalColumns) - nullify(LocalRows) - nullify(LocalColumns) - nullify(MatrixElements) - nullify(importIVect) - nullify(importRVect) - - call exportGlobalRowIndices(sMat,GlobalRows,size) - if(.NOT.aVEqualsMat_(sMat=sMat,ivector=GlobalRows,attribute="grow")) then - call die(myname_,"exportGlobalRowIndices failed") - endif - - call exportGlobalColumnIndices(sMat,GlobalColumns,size) - if(.NOT.aVEqualsMat_(sMat=sMat,ivector=GlobalColumns,attribute="gcol")) then - call die(myname_,"exportGlobalColumnIndices failed") - endif - - call exportLocalRowIndices(sMat,LocalRows,size) - if(.NOT.aVEqualsMat_(sMat=sMat,ivector=LocalRows,attribute="lrow")) then - call die(myname_,"exportLocalRowIndices failed") - endif - - call exportLocalColumnIndices(sMat,LocalColumns,size) - if(.NOT.aVEqualsMat_(sMat=sMat,ivector=LocalColumns,attribute="lcol")) then - call die(myname_,"exportLocalColumnIndices failed") - endif - - call exportMatrixElements(sMat,MatrixElements,size) - if(.NOT.aVEqualsMat_(sMat=sMat,rvector=MatrixElements,attribute="weight")) then - call die(myname_,"exportMatrixElements failed") - endif - - call init(sMatCopy,sMat%nrows,sMat%ncols,lsize(sMat)) - - allocate(importIVect(lsize(sMat)),importRVect(lsize(sMat)),stat=ierr) - if(ierr/=0) call die(myname_,"llocate(importVect)",ierr) - - r=0. - do i=1,lsize(sMat) - r=r+1.1 - importIVect(i) = i - importRVect(i) = r - enddo - - call importGlobalRowIndices(sMatCopy,importIVect,lsize(sMat)) - if(.NOT.aVEqualsMat_(sMat=sMatCopy,ivector=importIVect,attribute="grow")) then - call die(myname_,"importGlobalRowIndices failed") - endif - - call importGlobalColumnIndices(sMatCopy,importIVect,lsize(sMat)) - if(.NOT.aVEqualsMat_(sMat=sMatCopy,ivector=importIVect,attribute="gcol")) then - call die(myname_,"importGlobalColumnIndices failed") - endif - - call importLocalRowIndices(sMatCopy,importIVect,lsize(sMat)) - if(.NOT.aVEqualsMat_(sMat=sMatCopy,ivector=importIVect,attribute="lrow")) then - call die(myname_,"importLocalRowIndices failed") - endif - - call importLocalColumnIndices(sMatCopy,importIVect,lsize(sMat)) - if(.NOT.aVEqualsMat_(sMat=sMatCopy,ivector=importIVect,attribute="lcol")) then - call die(myname_,"importLocalColumnIndices failed") - endif - - call importMatrixElements(sMatCopy,importRVect,lsize(sMat)) - if(.NOT.aVEqualsMat_(sMat=sMatCopy,rvector=importRVect,attribute="weight")) then - call die(myname_,"importMatrixElements failed") - endif - - call clean(sMatCopy) - - deallocate(GlobalRows,GlobalColumns,LocalRows,LocalColumns, & - importIVect, importRVect,MatrixElements,stat=ierr) - if(ierr/=0) call die(myname_,"deallocate(Global....)",ierr) - - contains - - logical function aVEqualsMat_(sMat,ivector,rvector,attribute) - - use m_SparseMatrix - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(SparseMatrix), intent(in) :: sMat - integer, dimension(:), pointer, optional :: ivector - real(FP), dimension(:), pointer, optional :: rvector - character(len=*), intent(in) :: attribute - - integer :: i, attribute_index - - aVEqualsMat_ = .TRUE. - - if(present(ivector)) then - - attribute_index = indexIA(sMat,trim(attribute)) - - do i=1,lsize(sMat) - if(sMat%data%iAttr(attribute_index,i) /= ivector(i)) then - aVEqualsMat_ = .FALSE. - EXIT - endif - enddo - - else - - if(present(rvector)) then - - attribute_index = indexRA(sMat,trim(attribute)) - - do i=1,lsize(sMat) - if(sMat%data%rAttr(attribute_index,i) /= rvector(i)) then - aVEqualsMat_ = .FALSE. - EXIT - endif - enddo - - else - - call die("aVEqualsMat_::","ivector or rvector must be present") - - endif - - endif - - end function aVEqualsMat_ - - end subroutine ImportExportTest_ - - logical function Identical_(SMAT1,SMAT2,Range) - - use m_SparseMatrix - use m_AVTEST,only: AttrVect_identical => Identical - use m_List,only : List_allocated => allocated - use m_List,only : List_identical => identical - use m_stdio - use m_die - - use m_realkinds, only : FP - - implicit none - - type(SparseMatrix), intent(in) :: SMAT1 - type(SparseMatrix), intent(in) :: SMAT2 - real, optional, intent(in) :: Range - - integer :: i,j,k - - Identical_=.true. - - if(present(Range)) then - if(.NOT. AttrVect_identical(SMAT1%data,SMAT2%data,Range)) then - Identical_=.false. - endif - else - if(.NOT. AttrVect_identical(SMAT1%data,SMAT2%data)) then - Identical_=.false. - endif - endif - - if(SMAT1%nrows /= SMAT2%nrows) then - Identical_=.false. - endif - - if(SMAT1%ncols /= SMAT2%ncols) then - Identical_=.false. - endif - - if(SMAT1%vecinit .neqv. SMAT2%vecinit) then - Identical_=.false. - endif - - end function Identical_ - -end module m_SMATTEST diff --git a/src/externals/mct/testsystem/testall/master.F90 b/src/externals/mct/testsystem/testall/master.F90 deleted file mode 100644 index 4081f31656a..00000000000 --- a/src/externals/mct/testsystem/testall/master.F90 +++ /dev/null @@ -1,39 +0,0 @@ -!----------------------------------------------------------------------- -! CVS $Id: master.F90,v 1.2 2007-10-30 20:57:16 rloy Exp $ -! CVS $Name: $ -!----------------------------------------------------------------------- -! A driver model code for Multi-Process Handshaking utility -! to facilitate a plug & play style programming using single executable. -! each processor only execute one component model once. -! Written by Yun (Helen) He and Chris Ding, NERSC/LBNL, October 2000. - - - program main - use MPH_all - implicit none - integer myProc_global - - external ccm3, cpl, pop2_2 - - call MPI_INIT(ierr) - call MPI_COMM_RANK(MPI_COMM_WORLD,myProc_global,ierr) - -! here ccm3.8, pop2.2 etc are subroutine names in component models -! you could list the components in any order or omit any of them - call MPH_setup_SE (atmosphere=ccm3, coupler=cpl, ocean=pop2_2) - -! write(*,*)'I am proc ', MPH_global_proc_id(), -! & ' of global proc ', MPH_local_proc_id_ME_SE(), ' of ', -! & MPH_myName_ME_SE() -! write(*,*)'==============================================' - - call MPI_FINALIZE(ierr) - - - if(myProc_global==0) then - write(9999,*) "End of main" - close(9999) - endif - - end program - diff --git a/src/externals/mct/testsystem/testall/mph.F90 b/src/externals/mct/testsystem/testall/mph.F90 deleted file mode 100644 index 0779705c867..00000000000 --- a/src/externals/mct/testsystem/testall/mph.F90 +++ /dev/null @@ -1,1068 +0,0 @@ -!----------------------------------------------------------------------- -! CVS $Id: mph.F90,v 1.3 2006-10-03 22:43:29 jacob Exp $ -! CVS $Name: $ -! ============================================================= -! Multi Program-Components Handshaking (MPH) Utility - -! This is a small utility of global handshaking among different component -! models. Each component will run on a set of nodes or processors. -! Different components could run either on different set of nodes, or -! on set of nodes that overlap. - -! There are three seperate implementations: -! 1. Multiple Components, Multiple Executables, components non-overlap -! 2. Multiple Components, Single Executable, components non-overlap -! 3. Multiple Components, Single Executable, components overlap, flexible - -! This is a combined module for all the above. -! The user only has to "use MPH_all" in their application codes. -! You may need to use MPH_help to understand the required information -! for setup, input file and inquiry functions. - -! Written by Yun He and Chris Ding, NERSC/LBL, January 2001. - - -!============================================================== -! common data used by all three versions of MPH -!============================================================== - - module comm_data123 - - use m_mpif - implicit none - - integer istatus(MPI_STATUS_SIZE), ierr - integer max_num_comps, maxProcs_comp - parameter (max_num_comps=20) ! maximum number of components - parameter (maxProcs_comp=128) ! maximum number of procs per comps - - type Acomponent - character*16 name ! component name - integer num_process ! number of processors - integer process_list(maxProcs_comp) - ! global processor_id, increasing order - end type Acomponent - - type (Acomponent) components(max_num_comps) ! allocate components - integer MPI_Acomponent - - integer global_proc_id ! proc id in the whole world - integer global_totProcs ! total # of procs for the whole world - integer COMM_master ! communicator for submaster of each component - - integer total_components - character*16 component_names(max_num_comps) - -! for timer - integer N_CHANNELS - parameter (N_CHANNELS=10) - real (kind=8) :: init_time = -1.0 - real (kind=8) :: last_time, tot_time(0:N_CHANNELS) - - end module comm_data123 - -!=============================================================== -! common data shared by MPH_Multi_Exec and MPH_Single_Exec -!=============================================================== - - module comm_data12 - use comm_data123 - integer component_id - integer local_world ! communicator for this component - integer local_proc_id ! proc id in this component - integer local_totProcs ! total # of procs for this component - end module comm_data12 - -!================================================================== -! common subroutines used by all three versions of MPH -!================================================================== - - module comm_sub123 - use comm_data123 - contains - -!--------------- subroutine MPH_init () ------------ - - subroutine MPH_init () - implicit none - - integer iblock(3), idisp(3), itype(3) - - call MPI_COMM_RANK (MPI_COMM_WORLD, global_proc_id, ierr) - call MPI_COMM_SIZE (MPI_COMM_WORLD, global_totProcs, ierr) - -! create a new MPI data type MPI_Acomponent - - iblock(1) = 16 - iblock(2) = 1 - iblock(3) = maxProcs_comp - idisp(1) = 0 - idisp(2) = 16 - idisp(3) = 20 - itype(1) = MPI_CHARACTER - itype(2) = MPI_INTEGER - itype(3) = MPI_INTEGER - call MPI_TYPE_STRUCT (3,iblock,idisp,itype,MPI_Acomponent,ierr) - call MPI_TYPE_COMMIT (MPI_Acomponent, ierr) - - end subroutine MPH_init - - -!--------- subroutine MPH_global_id (name, local_id) ---------- - - integer function MPH_global_id (name, local_id) - implicit none - - character*(*) name - integer local_id, temp - -! then find out the component rank - temp = MPH_find_name (name, component_names, total_components) - -! process_list starts from 1, while proc rank starts from 0 - MPH_global_id = components(temp) % process_list(local_id+1) - - end function MPH_global_id - - -!------ integer function MPH_find_name(name, namelist, num) ------ - - integer function MPH_find_name(name, namelist, num) - implicit none - -! find name in component_names - character*(*) name - integer i, num - character*16 namelist(num) - - do i = 1, num - if (name == namelist(i)) then -! print *, i, name, namelist(i) - goto 100 - endif - enddo - -! name is not found - MPH_find_name = -1 - print *, "ERROR: ", name, " not found in components.in" - stop - -100 MPH_find_name = i - return - end function MPH_find_name - - -!---------- subroutine MPH_redirect_output (name) --------- - - subroutine MPH_redirect_output (name) - character*(*) name - integer lenname, lenval, rcode - character*16 output_name_env - character*64 output_name, temp_value - - output_name = ' ' - output_name_env = trim (name) // "_out_env" - -#if (defined AIX) - call getenv (trim(output_name_env), temp_value) - output_name = trim (temp_value) - if (len_trim(output_name) == 0) then - write(*,*)'output file names not preset by env varibales' - write(*,*)'so output not redirected' - else - open (unit=6, file=output_name, position='append') - call flush_(6) - endif -#endif - -#if (defined SUPERUX) - call getenv (trim(output_name_env), temp_value) - output_name = trim (temp_value) - if (len_trim(output_name) == 0) then - write(*,*)'output file names not preset by env varibales' - write(*,*)'so output not redirected' - else - open (unit=6, file=output_name, position='append') - call flush(6) - endif -#endif - -#if (defined IRIX64 || defined CRAY || defined sn6711) - lenname = len_trim (output_name_env) - call pxfgetenv (output_name_env,lenname,output_name,lenval,rcode) - if (len_trim(output_name) == 0) then - write(*,*)'output file names not preset by env varibales' - write(*,*)'so output not redirected' - else - open (unit=6, file=output_name, position='append') - call flush(6) - endif -#endif - -#if (!defined AIX && !defined IRIX64 && !defined CRAY && !defined sn6711 && !defined SUPERUX) - write(*,*) 'No implementation for this architecture' - write(*,*) 'output redirect is not performed by getenv' -#endif - - end subroutine MPH_redirect_output - - -!----------- subroutine MPH_help (arg) -------------- - - subroutine MPH_help (arg) - implicit none - - character*(*) arg - write(*,*)'Message from MPH_help:' - - if (arg .eq. 'off') then - write(*,*)'off' - - else if (arg .eq. 'Multi_Exec') then - write(*,*)'Multiple executables' - write(*,*)'Required setup function for pop is: ' - write(*,*)' call MPH_setup_ME ("ocean", POP_World)' - write(*,*)'Required input file is "components.in"' - - write(*,*)'Subroutine call to join two communicators is:' - write(*,*)' MPH_comm_join_ME_SE(name1,name2,comm_joined)' - - write(*,*)'Available inquiry functions are:' - write(*,*)' character*16 MPH_component_name(id)' - write(*,*)' integer MPH_get_component_id(name)' - write(*,*)' integer MPH_total_components()' - write(*,*)' integer MPH_global_proc_id()' - write(*,*)' character*16 MPH_myName_ME_SE()' - write(*,*)' integer MPH_component_id_ME_SE()' - write(*,*)' integer MPH_local_proc_id_ME_SE()' - write(*,*)' integer MPH_local_world_ME_SE()' - - else if (arg .eq. 'Single_Exec') then - write(*,*)'Single executable, processors non-overlap' - write(*,*)'Required setup function is: ' - write(*,*)' call MPH_setup_SE (atmosphere=ccm3_8,& - & ocean=pop2_2, coupler=cpl5_1)' - write(*,*)'Required input file is "processors_map.in"' - - write(*,*)'Subroutine call to join two communicators is:' - write(*,*)' MPH_comm_join_ME_SE(name1,name2,comm_joined)' - - write(*,*)'Available inquiry functions are:' - write(*,*)' character*16 MPH_component_name(id)' - write(*,*)' integer MPH_get_component_id(name)' - write(*,*)' integer MPH_total_components()' - write(*,*)' integer MPH_global_proc_id()' - write(*,*)' character*16 MPH_myName_ME_SE()' - write(*,*)' integer MPH_component_id_ME_SE()' - write(*,*)' integer MPH_local_proc_id_ME_SE()' - write(*,*)' integer MPH_local_world_ME_SE()' - write(*,*)' integer MPH_low_proc_limit(id)' - write(*,*)' integer MPH_up_proc_limit(id)' - - else if (arg .eq. 'Single_Exec_Overlap') then - write(*,*)'Single executable, processors overlap' - write(*,*)'Required setup function is: ' - write(*,*)' call MPH_setup_SE_overlap ("atmosphere",& - & "ocean", "coupler")' - write(*,*)'Required input file is "processors_map.in"' - - write(*,*)'Subroutine call to join two communicators is:' - write(*,*)' MPH_comm_join_SE_overlap (name1, name2,& - & comm_joined)' - - write(*,*)'Available inquiry functions are:' - write(*,*)' character*16 MPH_component_name(id)' - write(*,*)' integer MPH_get_component_id(name)' - write(*,*)' integer MPH_total_components()' - write(*,*)' integer MPH_global_proc_id()' - write(*,*)' integer MPH_local_proc_id_SE_overlap(id)' - write(*,*)' integer MPH_local_world_SE_overlap(id)' - write(*,*)' integer MPH_low_proc_limit(id)' - write(*,*)' integer MPH_up_proc_limit(id)' - - else - write(*,*)'wrong argument for MPH_help' - endif - - end subroutine MPH_help - - -!----------- function MPH_timer (flag, channel) ------------ - -! Usage: - -! channel 0 is the default channel, using init_time. - -! --------------------------------------------------------- -! timer calls to walk-clock dclock(), and do the following: -! --------------------------------------------------------- -! flag=0 : Sets initial time; init all channels. -! -! flag =1 : Calculates the most recent time interval; accure it to the -! specified channel; -! Returns it to calling process. -! Channel 0 is the default channel, which is automatically accrued. - -! flag =2 : Calculates the most recent time interval; accure it to the -! specified channel; -! Returns the curent total time in the specified channel; -! Channel 0 is the default channel, which is automatically accrued. -! --------------------------------------------------------- - - real (kind=8) function MPH_timer (flag, channel) - integer flag, channel - real (kind=8) :: new_time, delta_time, MPI_Wtime - - new_time = MPI_Wtime() - - if (flag == 0) then - init_time = new_time - last_time = new_time - tot_time = 0.0 - MPH_timer = new_time - init_time - else if (init_time == -1.0) then -! Error Condition - MPH_timer = init_time - endif - -! Timer is initialized and flag != 0 - - delta_time = new_time - last_time - last_time = new_time - -! For channel=0 or other undefined channels which is treated as 0 - if ( channel < 0 .or. channel > N_CHANNELS) then - write(*,*) 'Timer channel is not properly specified!' - endif - -! channel != 0 - - if (flag == 1) then - tot_time(channel) = tot_time(channel) + delta_time - MPH_timer = delta_time - else if (flag == 2) then - tot_time(channel) = tot_time(channel) + delta_time - MPH_timer = tot_time(channel) - else -! Error Condition - MPH_timer = -1.0 - endif - - end function MPH_timer - - -!-------- common inquiry functions for MPH1, MPH2 and MPH3 ------- - - character*16 function MPH_component_name(id) - integer id - MPH_component_name = component_names (id) - end function MPH_component_name - - integer function MPH_get_component_id(name) - character*(*) name - MPH_get_component_id = MPH_find_name (name, component_names,& - total_components) - end function MPH_get_component_id - - integer function MPH_total_components() - MPH_total_components = total_components - end function MPH_total_components - - integer function MPH_global_proc_id() - MPH_global_proc_id = global_proc_id - end function MPH_global_proc_id - - end module comm_sub123 - - -! =============================================================== -! common subroutines used by MPH_Multi_Exec and MPH_Single_Exec -! =============================================================== - - module comm_sub12 - use comm_data123 - use comm_data12 - use comm_sub123 - - contains - -!--------------- subroutine MPH_global_ME_SE () ------------ - -! global hand-shaking among root processors of each component. - - subroutine MPH_global_ME_SE () - implicit none - integer sendtag, recvtag, i, color, key - -! create a MPI communicator COMM_master for all submasters -! arrange the rank of the submasters in COMM_master by their component_id -! i.e., their rank of the component model in "components.in" - if (local_proc_id == 0) then - color = 1 - else - color = 2 - endif - key = component_id - call MPI_COMM_SPLIT (MPI_COMM_WORLD,color,key,COMM_master,ierr) - -! gather Acomponents to 0th proc in COMM_master - if (local_proc_id == 0) then - call MPI_GATHER (components(component_id), 1, MPI_Acomponent,& - components, 1, MPI_Acomponent,& - 0, COMM_master, ierr) - -! 0th proc in COMM_master broadcast Acomponents to all submasters - call MPI_BCAST (components, total_components,& - MPI_Acomponent, 0, COMM_master, ierr) - endif - -! submaster broadcast AComponents to all process in the components - call MPI_BCAST (components, total_components,& - MPI_Acomponent, 0, local_world, ierr) - -! everybody lists the complete info -! write(*,*)'I am proc ', local_proc_id, ' in ', -! & component_names(component_id), ' , which is proc ', -! & global_proc_id, ' in global_world' -! write(*,*)'infos I have for all proc of all components are:' -! do i = 1, total_components -! write(*,*)' ', components(i)%name -! write(*,*)' ', components(i)%num_process -! write(*,*)' ', components(i)%process_list(1:8) ! partial list -! enddo - - end subroutine MPH_global_ME_SE - - -!------- subroutine MPH_comm_join_ME_SE (name1, name2, comm_joined) --- - - subroutine MPH_comm_join_ME_SE (name1, name2, comm_joined) - implicit none - - character*(*) name1, name2 - integer temp1, temp2 - integer comm_joined, color, key - - temp1 = MPH_find_name(name1,component_names,total_components) - temp2 = MPH_find_name(name2,component_names,total_components) - -! the order of two components does matter: first one has lower ranks in -! the new joined communicator, and second one has higher ranks. - - if (component_id==temp1 .or. component_id==temp2) then - color = 1 - if (component_id == temp1) then - key = local_proc_id - else - key = global_totProcs + local_proc_id - endif - else - color = 2 - key = 0 - endif - - call MPI_COMM_SPLIT (MPI_COMM_WORLD,color,key,comm_joined,ierr) - - end subroutine MPH_comm_join_ME_SE - - -!-------- common inquiry functions for MPH1 and MPH2 --------- - - character*16 function MPH_myName_ME_SE() - MPH_myName_ME_SE = component_names (component_id) - end function MPH_myName_ME_SE - - integer function MPH_component_id_ME_SE() - MPH_component_id_ME_SE = component_id - end function MPH_component_id_ME_SE - - integer function MPH_local_proc_id_ME_SE() - MPH_local_proc_id_ME_SE = local_proc_id - end function MPH_local_proc_id_ME_SE - - integer function MPH_local_world_ME_SE() - MPH_local_world_ME_SE = local_world - end function MPH_local_world_ME_SE - - end module comm_sub12 - - -! ============================================================== -! module MPH_Multi_Exec -! ============================================================== - -! Multi-Process Handshaking utility -! to facilitate a plug & play style programming on -! using multiple component executables. - - module MPH_Multi_Exec - use comm_data123 - use comm_data12 - use comm_sub123 - use comm_sub12 - character*16 myName - - contains - -!------------- subroutine MPH_setup_ME (name, comm_world) --------- - - subroutine MPH_setup_ME (name, comm_world) - implicit none - - character*(*) name - integer comm_world - - myName = name - call MPH_init () - call MPH_local_ME () - call MPH_global_ME_SE () - call MPI_COMM_DUP (local_world, comm_world, ierr) - - end subroutine MPH_setup_ME - - -!--------------- subroutine MPH_local_ME () ------------ - -! local hand-shaking - - subroutine MPH_local_ME () - implicit none - integer key - - total_components = MPH_read_list_ME("components.in",& - "COMPONENT_LIST", component_names, max_num_comps) - - component_id = MPH_find_name (myName, component_names,& - total_components) - key = 0 - call MPI_COMM_SPLIT (MPI_COMM_WORLD, component_id, key,& - local_world,ierr) - -! setup local_world, local_proc_id, local_totProcs - call MPI_COMM_RANK (local_world, local_proc_id, ierr) - call MPI_COMM_SIZE (local_world, local_totProcs, ierr) - - components(component_id)%name = myName - components(component_id)%num_process = local_totProcs - -! gather processor ids to 0th proc in this component. - call MPI_GATHER (global_proc_id, 1, MPI_INTEGER,& - components(component_id)%process_list,& - 1, MPI_INTEGER, 0, local_world, ierr) - - end subroutine MPH_local_ME - - -!--- function MPH_read_list_ME(filename, filetag, namelist, num) --- - - integer function MPH_read_list_ME(filename,filetag,namelist,num) - implicit none - integer i, num - character*(*) filename, filetag - character*16 namelist(num), firstline, temp - - open(10, file=filename, status='unknown') - read(10, '(a16)', end=200) firstline - if (firstline .ne. filetag) then - print *, 'ERROR: filetag inconsistent', filename - print *, 'ERROR: ', filetag, '!=', firstline - stop - endif - - read(10, '(a16)', end=200) temp - if (temp .ne. 'BEGIN') then - print *, 'ERROR: no BEGIN in ', filename - stop - endif - - do i = 1, num - read(10, '(a16)', end=100) temp - if (temp .ne. 'END') then - namelist(i) = temp - else - goto 200 - endif - enddo - -100 print *, 'ERROR: no END in ', filename - stop - -200 MPH_read_list_ME = i - 1 - close(10) - - return - end function MPH_read_list_ME - - end module MPH_Multi_Exec - - -! ============================================================== -! module MPH_Single_Exec -! ============================================================== - -! Multi-Process Handshaking utility -! to facilitate a plug & play style programming using single executable. -! each processor only execute one component model once. - - module MPH_Single_Exec - use comm_data123 - use comm_data12 - use comm_sub123 - use comm_sub12 - integer low_proc_limit(max_num_comps) - integer up_proc_limit(max_num_comps) - - contains - - -!---- subroutine MPH_setup_SE (atmosphere, ocean, coupler, land) ------ - - subroutine MPH_setup_SE (atmosphere, ocean, coupler, land,& - ice, biosphere, io) - implicit none - - optional atmosphere, ocean, coupler, land, ice, biosphere, io - external atmosphere, ocean, coupler, land, ice, biosphere, io - integer id - - call MPH_init () - - total_components = MPH_read_list_SE ("processors_map.in",& - "PROCESSORS_MAP", component_names,& - low_proc_limit, up_proc_limit, max_num_comps) - - if (present(atmosphere)) then - id=MPH_find_name("atmosphere",component_names,total_components) - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - call MPH_local_SE (id) - call MPH_global_ME_SE () - call atmosphere (local_world) - endif - endif - - if (present(ocean)) then - id=MPH_find_name("ocean",component_names,total_components) - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - call MPH_local_SE (id) - call MPH_global_ME_SE () - call ocean (local_world) - endif - endif - - if (present(coupler)) then - id=MPH_find_name("coupler",component_names,total_components) - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - call MPH_local_SE (id) - call MPH_global_ME_SE () - call coupler (local_world) - endif - endif - -! add more component models as follows: - if (present(land)) then - id=MPH_find_name("land",component_names,total_components) - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - call MPH_local_SE (id) - call MPH_global_ME_SE () - call land (local_world) - endif - endif - - if (present(ice)) then - id=MPH_find_name("ice",component_names,total_components) - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - call MPH_local_SE (id) - call MPH_global_ME_SE () - call ice (local_world) - endif - endif - - if (present(biosphere)) then - id=MPH_find_name("biosphere",component_names,total_components) - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - call MPH_local_SE (id) - call MPH_global_ME_SE () - call biosphere (local_world) - endif - endif - - if (present(io)) then - id=MPH_find_name("io",component_names,total_components) - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - call MPH_local_SE (id) - call MPH_global_ME_SE () - call io (local_world) - endif - endif - - end subroutine MPH_setup_SE - - -!--------------- subroutine MPH_local_SE (id) ------------ - -! local hand-shaking - - subroutine MPH_local_SE (id) - implicit none - integer id, key - - component_id = id - key = 0 - call MPI_COMM_SPLIT (MPI_COMM_WORLD, component_id,& - key, local_World, ierr) - -! setup local_world, local_proc_id, local_totProcs - call MPI_COMM_RANK (local_world, local_proc_id, ierr) - call MPI_COMM_SIZE (local_world, local_totProcs, ierr) - - components(component_id)%name = component_names(component_id) - components(component_id)%num_process = local_totProcs - -! gather processor ids to 0th proc in this component. - call MPI_GATHER (global_proc_id, 1, MPI_INTEGER,& - components(component_id)%process_list, 1,& - MPI_INTEGER, 0, local_world, ierr) - - end subroutine MPH_local_SE - - -!---- function MPH_read_list_SE (filename, filetag, namelist, -!---- low, up, num) -------- - - integer function MPH_read_list_SE (filename, filetag,& - namelist, low, up, num) - implicit none - integer i, num - character*(*) filename, filetag - character*16 namelist(num), firstline, temp - integer itemp1, itemp2 - integer low(num), up(num) - - open(10, file=filename, status='unknown') - read(10, *, end=100) firstline - if (firstline .ne. filetag) then - print *, 'ERROR: filetag inconsistent', filename - print *, 'ERROR: ', filetag, '!=', firstline - stop - endif - - read(10, *, end=200) temp - if (temp .ne. "BEGIN") then - print *, 'ERROR: no BEGIN in ', filename - stop - endif - - do i = 1, num - read(10, *, err=300, end=400) temp, itemp1, itemp2 - if (temp .eq. "END") goto 500 - namelist(i) = temp - low(i) = itemp1 - up(i) = itemp2 - enddo - -100 print *, 'ERROR: no filetag in ', filename - stop - -200 print *, 'ERROR: no BEGIN in ', filename - stop - -300 if (temp .eq. "END") then - goto 500 - else - print *, 'ERROR: either: no END in ', filename - print *, ' or: does not provide correct format as' - print *, ' in input example: ocean 11 18' - stop - endif - -400 print *, 'ERROR: no END in ', filename - stop - -500 MPH_read_list_SE = i - 1 - close(10) - - return - end function MPH_read_list_SE - - -!---- the following two functions are common for MPH2 and MPH3 ------- - - integer function MPH_low_proc_limit(id) - integer id - MPH_low_proc_limit = low_proc_limit(id) - end function MPH_low_proc_limit - - integer function MPH_up_proc_limit(id) - integer id - MPH_up_proc_limit = up_proc_limit(id) - end function MPH_up_proc_limit - - end module MPH_Single_Exec - - -! ============================================================== -! module MPH_Single_Exec_Overlap -! ============================================================== - -! Multi-Process Handshaking utility -! to facilitate a plug & play style programming using single executable. -! each processor could execute more than one component model (processor -! overlap) in any flexible way (any order). - - - module MPH_Single_Exec_Overlap - use comm_data123 - use comm_sub123 - - integer local_world(max_num_comps) ! communicator for this component - integer local_proc_id(max_num_comps) ! proc id in this component - integer local_totProcs(max_num_comps) ! total procs for this component - integer low_proc_limit(max_num_comps) - integer up_proc_limit(max_num_comps) - - contains - -!---- subroutine MPH_setup_SE_overlap (model1, model2, ...) ------ - - subroutine MPH_setup_SE_overlap (model1, model2, model3, model4,& - model5, model6, model7, model8, model9, model10) - implicit none - - character*(*) model1, model2, model3, model4, model5 - character*(*) model6, model7, model8, model9, model10 - optional model1, model2, model3, model4, model5 - optional model6, model7, model8, model9, model10 - - integer id, i - - call MPH_init () - call MPH_local_SE_overlap () - call MPH_global_SE_overlap () - - end subroutine MPH_setup_SE_overlap - - -!--------------- subroutine MPH_local_SE_overlap () ------------ - - subroutine MPH_local_SE_overlap () - implicit none - integer id, color, key - - total_components=MPH_read_list_SE_overlap("processors_map.in",& - "PROCESSORS_MAP", component_names,& - low_proc_limit, up_proc_limit, max_num_comps,& - local_totProcs) - -! setup local_world, local_proc_id, local_totProcs - do id = 1, total_components - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - color = 1 - else - color = 2 - endif - key = 0 - call MPI_COMM_SPLIT (MPI_COMM_WORLD, color, key,& - local_World(id), ierr) - call MPI_COMM_RANK(local_world(id),local_proc_id(id),ierr) - enddo - - end subroutine MPH_local_SE_overlap - - -!--------------- subroutine MPH_global_SE_overlap () ------------ - - subroutine MPH_global_SE_overlap() - implicit none - integer id, i - -! record Acomponent for each component - do id = 1, total_components - components(id)%name = component_names(id) - components(id)%num_process = local_totProcs(id) - do i = low_proc_limit(id), up_proc_limit(id) - components(id)%process_list(i-low_proc_limit(id)+1)=i - enddo - enddo - -! everybody lists the complete info - do id = 1, total_components - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - write(*,*)'I am proc ', local_proc_id(id), ' in ',& - component_names(id), ' , which is proc ',& - global_proc_id, ' in global_world' - write(*,*)'infos I have for all proc of all components are:' - do i = 1, total_components - write(*,*)' ', components(i)%name - write(*,*)' ', components(i)%num_process - write(*,*)' ', components(i)%process_list(1:9) - enddo - endif - enddo - - end subroutine MPH_global_SE_overlap - - -!----------- subroutine PE_in_component (name, comm) ------------ - - logical function PE_in_component (name, comm) - implicit none - character*(*) name - integer id, comm - - id = MPH_find_name(name, component_names, total_components) - if (low_proc_limit(id) .le. global_proc_id .and.& - global_proc_id .le. up_proc_limit(id)) then - comm = local_world(id) - PE_in_component = .true. - else - PE_in_component = .false. - endif - - end function PE_in_component - - -!------ subroutine MPH_comm_join_SE_overlap (name1, name2, comm_joined) --- - - subroutine MPH_comm_join_SE_overlap (name1, name2, comm_joined) - implicit none - integer id1, id2 - - character*(*) name1, name2 - integer comm_joined, color, key - logical con1, con2 - - id1 = MPH_find_name(name1,component_names,total_components) - id2 = MPH_find_name(name2,component_names,total_components) - -! the order of two components does matter: first one has lower ranks in -! the new joined communicator, and second one has higher ranks. - - con1 = (low_proc_limit(id1) .le. global_proc_id) .and.& - (global_proc_id .le. up_proc_limit(id1)) - con2 = (low_proc_limit(id2) .le. global_proc_id).and.& - (global_proc_id .le. up_proc_limit(id2)) - - if (con1 .or. con2) then - color = 1 - if (con1) then - key = local_proc_id(id1) - else - key = global_totProcs + local_proc_id(id2) - endif - else - color = 2 - key = 0 - endif - - call MPI_COMM_SPLIT (MPI_COMM_WORLD,color,key,comm_joined,ierr) - - end subroutine MPH_comm_join_SE_overlap - - -!---- function MPH_read_list_SE_overlap (filename, filetag, namelist, -!---- low, up, num, local_num) ------ - - integer function MPH_read_list_SE_overlap (filename, filetag,& - namelist, low, up, num, local_num) - implicit none - integer i, num - character*(*) filename, filetag - character*16 namelist(num), firstline, temp - integer itemp1, itemp2 - integer low(num), up(num), local_num(num) - - open(10, file=filename, status='unknown') - read(10, *, end=100) firstline - if (firstline .ne. filetag) then - print *, 'ERROR: filetag inconsistent', filename - print *, 'ERROR: ', filetag, '!=', firstline - stop - endif - - read(10, *, end=200) temp - if (temp .ne. "BEGIN") then - print *, 'ERROR: no BEGIN in ', filename - stop - endif - - do i = 1, num - read(10, *, err=300, end=400) temp, itemp1, itemp2 - if (temp .eq. "END") goto 500 - namelist(i) = temp - low(i) = itemp1 - up(i) = itemp2 - local_num(i) = itemp2 - itemp1 + 1 - enddo - -100 print *, 'ERROR: no filetag in ', filename - stop - -200 print *, 'ERROR: no BEGIN in ', filename - stop - -300 if (temp .eq. "END") then - goto 500 - else - print *, 'ERROR: either: no END in ', filename - print *, ' or: does not provide correct format as' - print *, ' in input example: ocean 11 18' - stop - endif - -400 print *, 'ERROR: no END in ', filename - stop - -500 MPH_read_list_SE_overlap = i - 1 - close(10) - - return - end function MPH_read_list_SE_overlap - - -!--------- some special inquiry functions for MPH3 ----------- - - integer function MPH_local_proc_id_SE_overlap(id) - integer id - MPH_local_proc_id_SE_overlap = local_proc_id(id) - end function MPH_local_proc_id_SE_overlap - - integer function MPH_local_world_SE_overlap(id) - integer id - MPH_local_world_SE_overlap = local_world(id) - end function MPH_local_world_SE_overlap - -! -- the following two functions are common for MPH2 and MPH3 - - integer function MPH_low_proc_limit(id) - integer id - MPH_low_proc_limit = low_proc_limit(id) - end function MPH_low_proc_limit - - integer function MPH_up_proc_limit(id) - integer id - MPH_up_proc_limit = up_proc_limit(id) - end function MPH_up_proc_limit - - end module MPH_Single_Exec_Overlap - - -! ============================================================== -! module MPH_all -! ============================================================== - - module MPH_all - - use MPH_Multi_Exec - use MPH_Single_Exec - use MPH_Single_Exec_Overlap - - end module MPH_all diff --git a/src/externals/mct/testsystem/testall/pop.F90 b/src/externals/mct/testsystem/testall/pop.F90 deleted file mode 100644 index 74c8f35e6cb..00000000000 --- a/src/externals/mct/testsystem/testall/pop.F90 +++ /dev/null @@ -1,650 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -! CVS $Id: pop.F90,v 1.15 2004-03-04 20:04:17 eong Exp $ -! CVS $Name: $ -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: pop2_2 -- dummy ocean model for unit tester -! -! !DESCRIPTION: -! An ocean model subroutine to test functionality of MPH and MCT. -! -! !INTERFACE: - subroutine pop2_2 (POP_World) -! -! !USES: -! - use MPH_all -!---Component Model Registry - use m_MCTWorld,only: ThisMCTWorld - use m_MCTWorld,only: MCTComponentRootRank => ComponentRootRank - use m_MCTWorld,only: MCTWorld_init => init - use m_MCTWorld,only: MCTWorld_clean => clean -!---Intercomponent communications scheduler - use m_Router,only: Router - use m_Router,only: MCT_Router_init => init - use m_Router,only: MCT_Router_clean => clean - use m_Transfer,only: MCT_Send => send - use m_Transfer,only: MCT_Recv => recv -!---Field Storage DataType and associated methods - use m_AttrVect,only : AttrVect - use m_AttrVect,only : MCT_AtrVt_init => init - use m_AttrVect,only : MCT_AtrVt_clean => clean - use m_AttrVect,only : MCT_AtrVt_lsize => lsize - use m_AttrVect,only : MCT_AtrVt_nReal => nRAttr - use m_AttrVect,only : MCT_AtrVt_nInteger => nIAttr - use m_AttrVect,only : AttrVect_zero => zero - use m_AttrVect,only : AttrVect_Copy => Copy - use m_AttrVectComms,only : AttrVect_gather => gather -!---Domain Decomposition Descriptor DataType and associated methods - use m_GlobalSegMap,only: GlobalSegMap - use m_GlobalSegMap,only: MCT_GSMap_init => init - use m_GlobalSegMap,only: MCT_GSMap_clean => clean - use m_GlobalSegMap,only: MCT_GSMap_gsize => gsize - use m_GlobalSegMap,only: MCT_GSMap_lsize => lsize - use m_GlobalSegMap,only: MCT_GSMap_ngseg => ngseg - use m_GlobalSegMap,only: MCT_GSMap_nlseg => nlseg - use m_GlobalMap,only : GlobalMap - use m_GlobalMap,only : GlobalMap_init => init - use m_GlobalMap,only : GlobalMap_clean => clean -!---GlobalSegMap Communication Methods - use m_GlobalSegMapComms,only: GlobalSegMap_bcast => bcast - use m_GlobalSegMapComms,only: GlobalSegMap_send => send - use m_GlobalSegMapComms,only: GlobalSegMap_recv => recv - use m_GlobalSegMapComms,only: GlobalSegMap_isend => isend -!---Methods for Exchange of GlobalMapping Objects - use m_ExchangeMaps,only: ExchangeMap -!---Coordinate Grid DataType and associated methods - use m_GeneralGrid,only : GeneralGrid - use m_GeneralGrid,only : MCT_GGrid_init => init - use m_GeneralGrid,only : MCT_GGrid_clean => clean - use m_GeneralGrid,only : MCT_GGrid_dims => dims - use m_GeneralGrid,only : MCT_GGrid_lsize => lsize - use m_GeneralGrid,only : MCT_GGrid_indexIA => indexIA - use m_GeneralGrid,only : MCT_GGrid_indexRA => indexRA - use m_GeneralGrid,only : MCT_GGrid_exportIAttr => exportIAttr - use m_GeneralGrid,only : MCT_GGrid_importIAttr => importIAttr - use m_GeneralGrid,only : MCT_GGrid_exportRAttr => exportRAttr - use m_GeneralGrid,only : MCT_GGrid_importRAttr => importRAttr - use m_GeneralGrid,only : MCT_GGrid_SortPermute => sortpermute - use m_GeneralGridComms,only: MCT_GGrid_send => send - use m_GeneralGridComms,only: MCT_GGrid_scatter => scatter - use m_GeneralGridComms,only: MCT_GGrid_gather => gather -!---Spatial Integral DataType and associated methods - use m_SpatialIntegral,only : MCT_SpatialIntegral => SpatialIntegral - use m_SpatialIntegral,only : MCT_SpatialAverage => SpatialAverage - use m_SpatialIntegral,only : MCT_MaskedSpatialIntegral => & - MaskedSpatialIntegral - use m_SpatialIntegral,only : MCT_MaskedSpatialAverage => & - MaskedSpatialAverage - -!---mpeu List datatype - use m_List, only : List - use m_List, only : List_clean => clean - use m_List, only : List_exportToChar => exportToChar -!---mpeu routines for MPI communications - use m_mpif90 -!---mpeu timers - use m_zeit - - use m_stdio - use m_ioutil, only: luavail - use m_die - -!---Tester Modules - use m_ACTEST, only : Accumulator_test => testall - use m_AVTEST, only : AttrVect_test => testall - use m_AVTEST, only : AttrVect_identical => Identical - use m_GGRIDTEST, only : GGrid_test => testall - use m_GGRIDTEST, only : GGrid_identical => Identical - use m_GMAPTEST, only : GMap_test => testall - use m_GSMAPTEST, only : GSMap_test => testall - use m_GSMAPTEST, only : GSMap_identical => Identical - use m_MCTWORLDTEST, only : MCTWorld_test => testall - use m_ROUTERTEST, only : Router_test => testall - use m_SMATTEST, only : sMat_test => testall - use m_SMATTEST, only : sMat_identical => Identical - -! -! !REVISION HISTORY: -! Oct00 - Yun (Helen) He and Chris Ding, NERSC/LBNL - initial version -! 19Nov00 - R. Jacob - interface with mct -! 09Feb01 - R. Jacob - add MPI_Barrier -! 25Feb01 - R. Jacob - mpeu timing and MPE -! 15Feb02 - R. Jacob - new MCTWorld_init interface -! 13Jul02 - E. Ong - introduce a POP grid -!EOP ___________________________________________________________________ - - implicit none - - character(len=*), parameter :: popname='pop2_2' - -!----------------------- MPH vars - - integer myProc, myProc_global, mySize, root - integer Global_World, POP_World - integer ncomps, mycompid, coupler_id - -! SparseMatrix dimensions and Processor Layout - integer :: Nax, Nay ! Atmosphere lons, lats - integer :: Nox, Noy ! Ocean lons, lats - integer :: NPROCS_LATA, NPROCS_LONA ! Processor layout - -!----------------------- MCT vars - - ! Variables used for GlobalSegMap - integer,dimension(1) :: starts,lengths - integer :: osize,osize2 - integer :: i,j,k,n - - ! Arrays used to test MCT import/export routines - integer,dimension(:),pointer :: MaskVector - integer, dimension(:), pointer :: dummyI - real, dimension(:), pointer :: dummyR - integer :: latindx,lonindx,gridindx,status - integer :: length - integer :: dAindx - real :: pi - - ! Ocean GeneralGrid - type(GeneralGrid) :: POPGrid, dPOPGrid - - ! Test grid for scatter,gather - type(GeneralGrid) :: scatterGGrid, gatherGGrid - - ! Ocean GlobalSegMap - type(GlobalSegMap) :: OGSMap - - ! Ocean GlobalSegMap from coupler - type(GlobalSegMap) :: CPL_OGSMap - - ! GSMap for testing GlobalSegMapComms - type(GlobalSegMap) :: inGSMap - - ! Ocean GlobalMap - type(GlobalMap) :: OGMap - - ! Router from Cpl to Ocn - type(Router) :: Cpl2Ocn - - ! Ocean Inputs from the Coupler and Integral - type(AttrVect) :: OinputAV, IntegratedOinputAV - - ! Ocean Outputs to the Coupler - type(AttrVect) :: OoutputAV - - ! Temporary Vars for hmv tests - type(AttrVect) :: gatherAV_ocn - integer :: unit - -#ifdef MPE -#include "mpe.h" -#endif - -! Set the value of pi: - pi = acos(-1.0) - -!-------------------------begin code - - call MPI_COMM_DUP (MPI_COMM_WORLD, Global_World, ierr) - call MPI_COMM_RANK (Global_World, myProc_global, ierr) - call MPI_COMM_RANK (POP_World, myProc, ierr) - call MPI_COMM_SIZE(POP_World,mySize,ierr) - - if (myProc==0) call MPH_redirect_output ('pop') -! write(*,*) myProc, ' in pop === ', myProc_global, ' in global' -! write(*,*) 'MPH_local_proc_id_ME_SE()=', MPH_local_proc_id_ME_SE() -! write(*,*) 'MPH_global_proc_id()=', MPH_global_proc_id() - - -!------------------------------------------------------- -! Begin attempts to use MCT -#ifdef MPE - call mpe_logging_init(myProc_global,init_s,init_e,gsmi_s,gsmi_e, & - atri_s,atri_e,routi_s,routi_e,send_s,send_e,recv_s,recv_e, & - clean_s,clean_e) -#endif - - ! Get the coupler's component id - coupler_id = MPH_get_component_id("coupler") - - ! Initialize MCTWorld - ncomps=MPH_total_components() - mycompid=MPH_component_id_ME_SE() - call zeit_ci('Oworldinit') - call MCTWorld_init(ncomps,MPI_COMM_WORLD,POP_World,mycompid) - call zeit_co('Oworldinit') - - call MCTWorld_test("POP::MCTWorld",6200+myProc) - - ! Get the Sparse Matrix dimensions and processor layout - root = MCTComponentRootRank(coupler_id,ThisMCTWorld) - call MPI_BCAST(Nax,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Nay,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Nox,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(Noy,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(NPROCS_LATA,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - call MPI_BCAST(NPROCS_LONA,1,MP_INTEGER,root,MPI_COMM_WORLD,ierr) - - - ! Load a POP grid on the ROOT PROCESS - -if(myProc==0) then - - write(*,*) popname, ":: Initializing Ocean General Grid" - -! NOTE: Since POP grids already have a predefined order, -! do not impose a sorting order upon initialization - - call convertPOPT(POPGrid, & - "../../data/grid.320x384.da", & - "../../data/kmt_full_40.da", Nox, Noy) - - call GGrid_test(POPGrid,"POP::POPGrid",3400+myProc) - -! Write out the basic things we initialized - - write(stdout,'(3a,i1)') popname, ":: Initialized POP GeneralGrid variable POPGrid.", & - "Number of dimensions = ",MCT_GGrid_dims(POPGrid) - write(stdout,'(2a,i8)') popname, ":: Number of grid points in POPGrid=", & - MCT_GGrid_lsize(POPGrid) - write(stdout,'(2a,i8)') popname, ":: Number of latitudes Noy=", Noy - write(stdout,'(2a,i8)') popname, ":: Number of longitudes Nox=", Nox - write(stdout,'(2a,i8)') popname, ":: Number of grid points Nox*Nox=", Noy*Nox - write(stdout,'(3a)') popname, ":: POPGrid%coordinate_list = ", & - List_exportToChar(POPGrid%coordinate_list) -! write(stdout,'(3a)') popname, ":: POPGrid%coordinate_sort_order = ", & -! List_exportToChar(POPGrid%coordinate_sort_order) - write(stdout,'(3a)') popname, ":: POPGrid%weight_list = ", & - List_exportToChar(POPGrid%weight_list) - write(stdout,*) popname, ":: POPGrid%other_list = ", & - ! * is used for SUPER_UX compatibility - List_exportToChar(POPGrid%other_list) - write(stdout,'(3a)') popname, ":: POPGrid%index_list = ", & - List_exportToChar(POPGrid%index_list) - write(stdout,'(2a,i3)') popname, ":: Number of integer attributes stored in POPGrid=", & - MCT_AtrVt_nInteger(POPGrid%data) - write(stdout,'(2a,i3)') popname, ":: Total Number of real attributes stored in POPGrid=", & - MCT_AtrVt_nReal(POPGrid%data) - -! Get POPGrid attribute indicies - latindx=MCT_GGrid_indexRA(POPGrid,'grid_center_lat') - lonindx=MCT_GGrid_indexRA(POPGrid,'grid_center_lon') - -! NOTE: The integer attribute GlobGridNum is automatically -! appended to any General Grid. Store the grid numbering -! scheme (used in the GlobalSegMap) here. - gridindx=MCT_GGrid_indexIA(POPGrid,'GlobGridNum') - - do i=1,MCT_GGrid_lsize(POPGrid) - POPGrid%data%iAttr(gridindx,i)=i - enddo - -! Check the weight values of the grid_area attribute - - dAindx = MCT_GGrid_indexRA(POPGrid, 'grid_area') - - write(stdout,'(2a)') popname, & - ':: Various checks of GeneralGrid POPGrid Weight data...' - write(stdout,'(2a,f12.6)') popname, & - ':: direct ref--POPGrid 1st dA entry=.', & - POPGrid%data%rAttr(dAindx,1) - write(stdout,'(2a,f12.6)') popname, & - ':: direct ref--POPGrid last dA entry=.', & - POPGrid%data%rAttr(dAindx,MCT_GGrid_lsize(POPGrid)) - write(stdout,'(2a,f12.6)') popname, & - ':: Sum of dA(1,...,Nox*Noy)=.', sum(POPGrid%data%rAttr(dAindx,:)) - write(stdout,'(2a,f12.6)') popname, & - ':: Unit Sphere area 4 * pi=.', 4.*pi - -! Check on coordinate values (and check some export functions, too...) - - allocate(dummyR(MCT_GGrid_lsize(POPGrid)), stat=ierr) - if(ierr/=0) call die(popname, "allocate(dummyR)", ierr) - - call MCT_GGrid_exportRAttr(POPGrid, 'grid_center_lat', dummyR, length) - - write(stdout,'(2a)') popname, & - ':: Various checks of GeneralGrid POPGrid coordinate data...' - write(stdout,'(2a,i8)') popname, & - ':: No. exported POPGrid latitude values =.',length - write(stdout,'(2a,f12.6)') popname, & - ':: export--POPGrid 1st latitude=.',dummyR(1) - write(stdout,'(2a,f12.6)') popname, & - ':: export--POPGrid last latitude=.',dummyR(length) - write(stdout,'(2a,f12.6)') popname, & - ':: direct ref--POPGrid 1st latitude=.', & - POPGrid%data%rAttr(latindx,1) - write(stdout,'(2a,f12.6)') popname, & - ':: direct ref--POPGrid last latitude=.', & - POPGrid%data%rAttr(latindx,length) - write(stdout,'(2a,f12.6)') popname, & - ':: direct ref--POPGrid 1st longitude=.', & - POPGrid%data%rAttr(lonindx,1) - write(stdout,'(2a,f12.6)') popname, & - ':: direct ref--POPGrid last longitude=.', & - POPGrid%data%rAttr(lonindx,MCT_GGrid_lsize(POPGrid)) - write(stdout,'(2a)') popname, & - ':: End checks of GeneralGrid POPGrid coordinate data.' - -! Check the GlobalGridNum values: - - allocate(dummyI(MCT_GGrid_lsize(POPGrid)), stat=ierr) - if(ierr/=0) call die(popname, "allocate(dummyI)", ierr) - - call MCT_GGrid_exportIAttr(POPGrid, 'GlobGridNum', dummyI, length) - - write(stdout,'(2a,i8)') popname, & - ':: No. exported POPGrid GlobalGridNum values =.',length - write(stdout,'(2a,i8)') popname, & - ':: export--POPGrid 1st GlobalGridNum =.', dummyI(1) - write(stdout,'(2a,i8)') popname, & - ':: export--POPGrid last GlobalGridNum =.', dummyI(length) - write(stdout,'(2a,i8)') popname, & - ':: direct ref--POPGrid 1st GlobalGridNum =.', & - POPGrid%data%iAttr(gridindx,1) - write(stdout,'(2a,i8)') popname, & - ':: direct ref--POPGrid last GlobalGridNum =.', & - POPGrid%data%iAttr(gridindx,length) - -! Clean temporary structures - - deallocate(dummyI, dummyR, stat=ierr) - if(ierr/=0) call die(popname, "deallocate(dummyI...)", ierr) - -endif ! if(myProc==0) - -! send the ocean's grid from the ocean's root to the -! coupler's root. 2800 is the randomly chosen tag base. -if(myProc==0) call MCT_GGrid_send(POPGrid,coupler_id,2800,ierr) - -!:::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! Describe OGSMap, the ocean grid decomposition - - ! number of local oceanpoints - osize = (Noy * Nox)/mySize - osize2 = osize - - ! (Noy *Nox)/mySize isnt an integer, give extra points to last proc. - if(myProc == mySize - 1) then - osize = osize + mod(Noy*Nox,mySize) - endif - - ! find starting point in the numbering scheme - ! numbering scheme is same as that used in ocean model. - starts(1) = (myProc * osize2) +1 - lengths(1) = osize - - ! describe this information in a Global Map for the ocean. - call zeit_ci('OGSMapinit') - call MCT_GSMap_init(OGSMap,starts,lengths,0,POP_World,mycompid) - call zeit_co('OGSMmapinit') - -!!! test some GlobalSegMap functions -! write(*,*)myProc,'number of global segs is',MCT_GSMap_ngseg(OGSMap) -! write(*,*)myProc,'local size is',MCT_GSMap_lsize(OGSMap,CPL_World) -! write(*,*)myProc,'global size is',MCT_GSMap_gsize(OGSMap) - - ! make a sample GlobalMap based on the local sizes of the GlobalSegMap - call GlobalMap_init(OGMap,mycompid,MCT_GSMap_lsize(OGSMap,POP_World), & - POP_World) - call GMap_test(GMap=OGMap,Identifier="POP::OGMap", & - mycomm=POP_World,device=4200+myProc) - - ! lets exchange maps with the coupler - call ExchangeMap(OGMap,POP_World,CPL_OGSMap,coupler_id,ierr) - if(ierr/=0) call die(popname,"call ExchangeMap") - - call GMap_test(GMap=OGMap,Identifier="POP::OGMap", & - mycomm=POP_World,device=4300+myProc) - call GSMap_test(CPL_OGSMap,"POP::CPL_OGSMap",POP_World,5200+myProc) - - ! Compare this to sending and recieving maps - if(myProc==0) then - - call GlobalSegMap_recv(inGSMap,coupler_id,777) - if (.NOT.(GSMap_identical(inGSMap,CPL_OGSMap))) then - call die(popname,"GSMap_identical(inGSMap,CPL_OGSMap)") - endif - call MCT_GSMap_clean(inGSMap) - - call GlobalSegMap_recv(inGSMap,coupler_id,888) - if (.NOT.(GSMap_identical(inGSMap,CPL_OGSMap))) then - call die(popname,"GSMap_identical(inGSMap,CPL_OGSMap)") - endif - call MCT_GSMap_clean(inGSMap) - - endif - -!:::::::GGRID COMMUNICATIONS TESTING:::::::! - - call MCT_GGrid_scatter(POPGrid,scatterGGrid,OGMap,0,POP_World) - call MCT_GGrid_gather(scatterGGrid,gatherGGrid,OGMap,0,POP_World) - - if(myProc==0) then - if(.NOT. GGrid_identical(POPGrid,gatherGGrid,0.1) ) then - call die(popname,"GGrid Comms test failed") - endif - endif - -! declare an attrvect to hold all ocean model inputs -! NOTE: the size of the AttrVect is set to be the local -! size of the GSMap. - - call zeit_ci('OInputAVinit') - - call MCT_AtrVt_init(OinputAV, & - rList=& -! net solar radiation - "solrad:& -! downward direct visible radiation - &dirvis:& -! downward diffuse visible radiation - &difvis:& -! downward direct near-infrared radiation - &dirnif:& -! downward diffuse near-infrared radiation - &difnif:& -! convective precip - &precc:& -! large-scale precip - &precl",& - lsize=MCT_GSMap_lsize(OGSMap, POP_World)) - - call zeit_co('OinputAVinit') - -! declare an attrvect to hold all ocean model outputs -! NOTE: the size of the AttrVect is set to be the local -! size of the GSMap. - - call zeit_ci('OoutputAVinit') - - call MCT_AtrVt_init(OoutputAV, & - rList=& -! East-West Gradient of Ocean Surface Height - "dhdx:& -! North-South Gradient of Ocean Surface Height - &dhdy:& -! Heat of Fusion of Ocean Water - &Qfusion:& -! Sea Surface Temperature - &SST:& -! Salinity - &salinity:& -! East Component of the Surface Current - &Uocean:& -! East Component of the Surface Current - &Vocean",& - lsize=MCT_GSMap_lsize(OGSMap, POP_World)) - - call zeit_co('OoutputAVinit') - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!--Build Router -! -! Intialize router between atmosphere and coupler using AGSMap. -! This call must be paired with a similar call in cp - call zeit_ci('OCplRouterInit') - call MCT_Router_init(coupler_id,OGSMap,POP_World,Cpl2Ocn) - call zeit_co('OCplRouterInit') - - call Router_test(Cpl2Ocn,"POP::Cpl2Ocn",7200+myProc) - -!::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - - ! Lets prepare to do some neat integrals using MCT. - ! First, we must scatter the Ocean Grid: - - call MCT_GGrid_scatter(POPGrid, dPOPGrid, OGSMap, 0, POP_World) - - ! Then, receive the accumulated and interpolated attrvect from the coupler - if(myProc == 0) write(stdout,*) popname,':: Before MCT_RECV from CPL.' - call zeit_ci('OinputAVrecv') - call MCT_Recv(OinputAV,Cpl2Ocn) - call zeit_co('OinputAVrecv') - call AttrVect_test(OinputAV,"POP::OinputAV",2600) - if(myProc == 0) write(stdout,*) popname,':: After MCT_RECV from CPL.' - - ! Lets check the values to make sure our asci matrix file - ! corresponds to the imask in our GeneralGrid. - allocate(MaskVector(MCT_GGrid_lsize(dPOPGrid)), stat=ierr) - if(ierr/=0) call die(popname, "allocate(dPOPGrid)", ierr) - - call MCT_GGrid_exportIAttr(dPOPGrid,"grid_imask",MaskVector,k) - - if(MCT_GGrid_lsize(dPOPGrid)/=k) then - call die(popname,"MCT_GGrid_exportIAttr failed") - endif - - do i=1,k - if(MaskVector(i)==0) then - if(abs(OinputAV%rAttr(1,i)-MaskVector(i)) > 1e-4) then - call die(popname,"GeneralGrid Mask does not match & - &matrix file mask") - endif - endif - enddo - - deallocate(MaskVector,stat=ierr) - if(ierr/=0) call die(popname,"deallocate(MaskVector)",ierr) - - ! TEST MAPPING FOR HMV - - call AttrVect_gather(OinputAV,gatherAV_ocn,OGSMap, & - 0,POP_World,ierr) - - if(myProc == 0) then - unit = luavail() + 9000 - write(unit,*) Nox, Noy - k=0 - do i=1,Nox - do j=1,Noy - k=k+1 - write(unit,*) gatherAV_ocn%rAttr(1,k) - enddo - enddo - call MCT_AtrVt_clean(gatherAV_ocn) - endif - - ! Now, Test the MCT Spatial Integration/Averaging Services... - if(myProc==0)write(stdout,'(3a)') popname,':: on-Root test of MCT Spatial ', & - 'Integration Services...' - - ! simple unmasked integral case: - - call MCT_SpatialIntegral(OinputAV, integratedOinputAV, dPOPGrid, 'grid_area', & - comm=POP_World) - - if(myProc==0)then - do i=1,MCT_AtrVt_nReal(integratedOinputAV) - write(stdout,'(3a,i2,a,f12.6)') popname,':: Unmasked distributed MCT ', & - 'integral: integratedOinputAV%rAttr(',i,',1)=', & - integratedOinputAV%rAttr(i,1) - end do - endif - - call MCT_AtrVt_clean(integratedOinputAV) - - ! simple unmasked average case: - call MCT_SpatialAverage(OinputAV, integratedOinputAV, dPOPGrid, 'grid_area', & - comm=POP_World) - -if(myProc==0)then - do i=1,MCT_AtrVt_nReal(integratedOinputAV) - write(stdout,'(3a,i2,a,f12.6)') popname,':: Unmasked distributed MCT ', & - 'average: averagedOinputAV%rAttr(',i,',1)=', & - integratedOinputAV%rAttr(i,1) - end do -endif - call MCT_AtrVt_clean(integratedOinputAV) - - ! masked average case... - - call MCT_MaskedSpatialAverage(inAv=OinputAV, outAv=integratedOinputAV, & - GGrid=dPOPGrid, SpatialWeightTag='grid_area', & - iMaskTags='grid_imask', UseFastMethod=.TRUE., & - comm=POP_World) - -if(myProc==0)then - do i=1,MCT_AtrVt_nReal(integratedOinputAV) - write(stdout,'(3a,i2,a,f12.6)') popname,':: Masked distributed MCT ', & - 'average (both iMask & rMask = unity): averagedOinputAV%rAttr(',i,',1)=', & - integratedOinputAV%rAttr(i,1) - end do -endif - call MCT_AtrVt_clean(integratedOinputAV) - - call GGrid_test(dPOPGrid,"POP::dPOPGrid",3500+myProc) - - ! Fill the Ocean's output with test values: - ! the first attribute will be constant, while - ! the rest will contain interolated values from OinputAV - call AttrVect_copy(aVin=OinputAV,aVout=OoutputAV, & - rList=List_exportToChar(OinputAV%rList), & - TrList=List_exportToChar(OoutputAV%rList)) - - OoutputAV%rAttr(1,:) = 30. - - ! Now, send the Ocean's output to the Coupler... - if(myProc == 0) write(stdout,*) popname,':: Before MCT_SEND to CPL.' - call zeit_ci('OoutputAVsend') - call MCT_Send(OoutputAV,Cpl2Ocn) - call zeit_co('OoutputAVsend') - if(myProc == 0) write(stdout,*) popname,':: After MCT_SEND to CPL.' - - ! All Done - call zeit_ci('Ocleanup') - - ! Clean MCT datatypes - if(myProc==0) then - call MCT_GGrid_clean(POPGrid) - call MCT_GGrid_clean(gatherGGrid) - endif - - call MCT_GGrid_clean(scatterGGrid) - call MCT_GGrid_clean(dPOPGrid) - call MCT_AtrVt_clean(OinputAV) - call MCT_AtrVt_clean(OoutputAV) - call MCT_GSMap_clean(OGSMap) - call MCT_GSMap_clean(CPL_OGSMap) - call GlobalMap_clean(OGMap) - call MCT_Router_clean(Cpl2Ocn) - call MCTWorld_clean() - - call zeit_co('Ocleanup') - -! write out timing info to fortran unit 47 - call zeit_allflush(POP_World,0,47) - - -end subroutine - - - - - - - - - diff --git a/src/externals/mct/testsystem/testall/processors_map.in b/src/externals/mct/testsystem/testall/processors_map.in deleted file mode 100644 index dc260c7e40f..00000000000 --- a/src/externals/mct/testsystem/testall/processors_map.in +++ /dev/null @@ -1,12 +0,0 @@ -PROCESSORS_MAP -BEGIN -atmosphere 0 1 -coupler 2 3 -ocean 4 5 -END -NPROCS_ATM 1 2 -ADD any comments in this line and below. -1) -ccm.3.6, ocean_POP, couple.PCM are all legitimate name, too. -2) -Order of names is irrelevant. diff --git a/src/externals/mct/testsystem/testall/script.jag b/src/externals/mct/testsystem/testall/script.jag deleted file mode 100644 index d62277c7019..00000000000 --- a/src/externals/mct/testsystem/testall/script.jag +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/csh -#PBS -N mct -#PBS -j oe -#PBS -q debug - -#PBS -A cli017esm -##PBS -l feature=xt5 -#PBS -l size=16 -#PBS -l walltime=01:00:00 -#PBS -l gres=widow3 -#PBS -j oe -#PBS -S /bin/csh -V - - -cd $PBS_O_WORKDIR -date -setenv MPICH_NO_BUFFER_ALIAS_CHECK 1 -aprun -n 6 ./utmct diff --git a/src/externals/mct/testsystem/testall/ut_SparseMatrix.rc b/src/externals/mct/testsystem/testall/ut_SparseMatrix.rc deleted file mode 100644 index 0aaa729738c..00000000000 --- a/src/externals/mct/testsystem/testall/ut_SparseMatrix.rc +++ /dev/null @@ -1,29 +0,0 @@ -#------------------------------------------------------------------------- -# Math + Computer Science Division / Argonne National Laboratory ! -#----------------------------------------------------------------------- -# CVS $Id: ut_SparseMatrix.rc,v 1.4 2003-08-11 23:24:25 eong Exp $ -# CVS $Name: $ -#------------------------------------------------------------------------- -# -# !FILE: ut_SparseMatrix.rc -# -# !DESCRIPTION: This is the resource file for the SparseMatrix unit -# tester. -# -# !SEE ALSO: ./ut_SparseMatrix.F90 (SparseMatrix unit tester). -# -# -# !REVISION HISTORY: -# -# 11Apr01 J.W. Larson -- Initial version. -# -#------------------------------------------------------------------------- -Data_Directory: ../../data -atmosphere_to_ocean_remap_file: t42_to_popx1_c_mat.asc -ocean_to_atmosphere_remap_file: popx1_to_t42_c_mat.asc -atmosphere_dimensions: 128 64 -ocean_dimensions: 320 384 - - - - diff --git a/src/externals/mct/testunit/.gitignore b/src/externals/mct/testunit/.gitignore deleted file mode 100644 index bebbb2047a5..00000000000 --- a/src/externals/mct/testunit/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -mctTester -AttrVect.log.* -pbs.mct.script -MCTtestunit.o* diff --git a/src/externals/mct/testunit/AttrVect_Test.F90 b/src/externals/mct/testunit/AttrVect_Test.F90 deleted file mode 100644 index 764e5b8e47d..00000000000 --- a/src/externals/mct/testunit/AttrVect_Test.F90 +++ /dev/null @@ -1,1907 +0,0 @@ -!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -! Math and Computer Science Division, Argonne National Laboratory ! -!----------------------------------------------------------------------- -!BOP ------------------------------------------------------------------- -! -! !ROUTINE: AttrVectTest.F90 -- Unit tests for MCT Attribute Vector -! -! !DESCRIPTION: Unit tests for all subroutines in mct/m_AttrVect.F90 -! and a top level program to call them all. -! -! !REVISION HISTORY: -! 11Jan11 - Sheri Mickelson - Initial version. -!EOP ___________________________________________________________________ - -!#################################### -!# -!# Call of of the tests for m_AttrVect -!# -!#################################### - -subroutine testAttrVect(mypid, AVui) - -implicit none - -integer mypid -integer AVui - -call testAttrVect_lsize(mypid,AVui) - -call testAttrVect_clean(mypid,AVui) - -call testAttrVect_init(mypid,AVui) - -call testAttrVect_zero(mypid,AVui) - -call testAttrVect_nIAttr(mypid,AVui) - -call testAttrVect_nRAttr(mypid,AVui) - -call testAttrVect_indexIA(mypid,AVui) - -call testAttrVect_indexRA(mypid,AVui) - -call testAttrVect_getIList(mypid,AVui) - -call testAttrVect_getRList(mypid,AVui) - -call testAttrVect_exportIList(mypid,AVui) - -call testAttrVect_exportRList(mypid,AVui) - -call testAttrVect_exportIListToChar(mypid,AVui) - -call testAttrVect_exportRListToChar(mypid,AVui) - -call testAttrVect_appendIAttr(mypid,AVui) - -call testAttrVect_appendRAttr(mypid,AVui) - -call testAttrVect_exportIAttr(mypid,AVui) - -call testAttrVect_exportRAttr(mypid,AVui) - -call testAttrVect_importIAttr(mypid,AVui) - -call testAttrVect_importRAttr(mypid,AVui) - -call testAttrVect_copy(mypid,AVui) - -call testAttrVect_sort(mypid,AVui) - -call testAttrVect_permute(mypid,AVui) - -call testAttrVect_unpermute(mypid,AVui) - -call testAttrVect_sortPermute(mypid,AVui) - -call testAttrVect_sharedAttrIndexList(mypid,Avui) - -end subroutine - -!#################################### -!# -!# Test AttrVect_lsize -!# -!#################################### -subroutine testAttrVect_lsize(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_lsize => lsize -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect - -implicit none - -integer mypid -integer AVui -integer length -integer returnedLength - -type(AttrVect) :: av - -length = 3 - -! initialize vector -call MCT_AtrVt_init(av,iList="lat:lon:time",lsize=length) - -! get the size of the new vector -returnedLength = MCT_AtrVt_lsize(av) - -! test to see if the size is correct -if(returnedLength == length) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_lsize",1,"PASS") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_lsize","PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_lsize",1,"FAIL") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_lsize","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_clean -!# -!#################################### -subroutine testAttrVect_clean(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_lsize => lsize -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -type(AttrVect) :: av -integer ier, result - -result = 0 - -! test the different optional args to make sure all combos work -! first initializes new vector -! second, clean the vector -! finally, check to make sure size is zero - -call MCT_AtrVt_init(av,iList="lat:lon:time") -call MCT_AtrVt_clean(av, ier) -if(MCT_AtrVt_lsize(av) == 0 .AND. ier == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_clean",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_clean",1,"FAIL") - result = 1 -endif - -call MCT_AtrVt_init(av,iList="lat:lon:time") -call MCT_AtrVt_clean(av) -if(MCT_AtrVt_lsize(av) == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_clean",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_clean",2,"FAIL") - result = 1 -endif - -if (result == 0)then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_clean","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_clean","FAIL") -endif -end subroutine - -!#################################### -!# -!# Test AttrVect_init -!# -!#################################### -subroutine testAttrVect_init(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -type(AttrVect) :: av -integer ier - -! test all of the combinations of optional args -! first, try an initialization -! then write out a pass staement if returned successfully -! fianlly, clean the vector - -call MCT_AtrVt_init(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_init",1,"PASS") -call MCT_AtrVt_clean(av, ier) - -call MCT_AtrVt_init(av,iList='index') -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_init",2,"PASS") -call MCT_AtrVt_clean(av, ier) - -call MCT_AtrVt_init(av,rList='value') -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_init",3,"PASS") -call MCT_AtrVt_clean(av, ier) - -call MCT_AtrVt_init(av,iList='index',rList='value') -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_init",4,"PASS") -call MCT_AtrVt_clean(av, ier) - -call MCT_AtrVt_init(av,iList='index',lsize=1) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_init",5,"PASS") -call MCT_AtrVt_clean(av, ier) - -call MCT_AtrVt_init(av,rList='value',lsize=1) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_init",6,"PASS") -call MCT_AtrVt_clean(av, ier) - -call MCT_AtrVt_init(av,iList='index',rList='value',lsize=1) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_init",7,"PASS") -call MCT_AtrVt_clean(av, ier) - -call MCT_AtrVt_init(av,lsize=1) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_init",8,"PASS") -call MCT_AtrVt_clean(av, ier) - -if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_init","PASS") -end subroutine - -!#################################### -!# -!# Test AttrVect_zero -!# -!#################################### -subroutine testAttrVect_zero(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_zero => zero -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_lsize => lsize -use m_AttrVect -use m_realkinds,only : SP,DP,FP - -implicit none - -integer mypid -integer AVui - -integer result, localResult - -type(AttrVect) :: av - -integer i,x,y,totalSize - -integer intSize,realSize,listTotal - -real r - -totalSize = 32 -intSize = 3 -realSize = 3 -!listTotal = intSize+realSize -listTotal = 3 - -result = 0 -localResult = 0 -r = .09_FP -i = 4 - -call MCT_AtrVt_init(av,iList="lat:lon:time",rList="T:P:Q",lsize=totalSize) -av%iAttr=i -av%rAttr=r -call MCT_AtrVt_zero(av) -do x=1,listTotal -do y=1,totalSize -if(av%iAttr(x,y) /= 0 .OR. av%rAttr(x,y) /= 0._FP)then - localResult = 1 -endif -enddo -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",1,"FAIL") - result = 1 - localResult = 0 -endif -call MCT_AtrVt_clean(av) - -call MCT_AtrVt_init(av,iList="lat:lon:time",rList="T:P:Q",lsize=totalSize) -av%iAttr=i -av%rAttr=r -call MCT_AtrVt_zero(av,zeroReals=.TRUE.,zeroInts=.TRUE.) -do x=1,listTotal -do y=1,totalSize -if(av%iAttr(x,y) /= 0 .OR. av%rAttr(x,y) /= 0._FP)then - localResult = 1 -endif -enddo -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",2,"FAIL") - result = 1 - localResult = 0 -endif -call MCT_AtrVt_clean(av) - -call MCT_AtrVt_init(av,iList="lat:lon:time",rList="T:P:Q",lsize=totalSize) -av%iAttr=i -av%rAttr=r -call MCT_AtrVt_zero(av,zeroReals=.TRUE.,zeroInts=.FALSE.) -do x=1,listTotal -do y=1,totalSize -if(av%iAttr(x,y) == 0 .OR. av%rAttr(x,y) /= 0._FP)then - localResult = 1 -endif -enddo -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",3,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",3,"FAIL") - result = 1 - localResult = 0 -endif -call MCT_AtrVt_clean(av) - -call MCT_AtrVt_init(av,iList="lat:lon:time",rList="T:P:Q",lsize=totalSize) -av%iAttr=i -av%rAttr=r -call MCT_AtrVt_zero(av,zeroReals=.FALSE.,zeroInts=.TRUE.) -do x=1,listTotal -do y=1,totalSize -if(av%iAttr(x,y) /= 0 .OR. av%rAttr(x,y) == 0._FP)then - localResult = 1 -endif -enddo -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",4,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",4,"FAIL") - result = 1 - localResult = 0 -endif -call MCT_AtrVt_clean(av) - -call MCT_AtrVt_init(av,iList="lat:lon:time",rList="T:P:Q",lsize=totalSize) -av%iAttr=i -av%rAttr=r -call MCT_AtrVt_zero(av,zeroReals=.FALSE.,zeroInts=.FALSE.) -do x=1,listTotal -do y=1,totalSize -if(av%iAttr(x,y) == 0 .OR. av%rAttr(x,y) == 0._FP)then - localResult = 1 -endif -enddo -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",5,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_zero",5,"FAIL") - result = 1 - localResult = 0 -endif -call MCT_AtrVt_clean(av) - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_zero","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_zero","FAIL") -endif - -end subroutine - -!#################################### -!# -!# Test AttrVect_nIAttr -!# -!#################################### -subroutine testAttrVect_nIAttr(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_nIAttr => nIAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -integer length, argLength, returnedLength - -type(AttrVect) :: av - -length = 32 -argLength = 3 - -! initialize vector -call MCT_AtrVt_init(av,iList="lat:lon:time",lsize=length) - -returnedLength = MCT_AtrVt_nIAttr(av) - -if (argLength == returnedLength) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_nIAttr",1,"PASS") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_nIAttr","PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_nIAttr",1,"FAIL") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_nIAttr","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_nRAttr -!# -!#################################### -subroutine testAttrVect_nRAttr(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_nRAttr => nRAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -integer length, argLength, returnedLength - -type(AttrVect) :: av - -length = 32 -argLength = 3 - -! initialize vector -call MCT_AtrVt_init(av,rList="T:Q:P",lsize=length) - -returnedLength = MCT_AtrVt_nRAttr(av) - -if (argLength == returnedLength) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_nRAttr",1,"PASS") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_nRAttr","PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_nRAttr",1,"FAIL") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_nRAttr","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - - -!#################################### -!# -!# Test AttrVect_indexIA -!# -!#################################### -subroutine testAttrVect_indexIA(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_indexIA => indexIA -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -integer length, indexFound, index - -integer result - -character(len=4) var -character(len=18) variables - -type(AttrVect) :: av - -result = 0 - -length = 32 -var = "date" -variables = "lat:lon:"//var//":time" -index = 3 !This must match the location of 'var' in above line - -! initialize vector -call MCT_AtrVt_init(av,iList=variables,lsize=length) - -indexFound = MCT_AtrVt_indexIA(av,var) -if(index == indexFound) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",1,"FAIL") - result = 1 -endif - -indexFound = MCT_AtrVt_indexIA(av,var,perrWith="ERROR") -if(index == indexFound) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",2,"FAIL") - result = 1 -endif - -indexFound = MCT_AtrVt_indexIA(av,var,perrWith="ERROR",dieWith="KILLED JOB") -if(index == indexFound) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",3,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",3,"FAIL") - result = 1 -endif - -indexFound = MCT_AtrVt_indexIA(av,var,dieWith="KILLED JOB") -if(index == indexFound) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",4,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",4,"FAIL") - result = 1 -endif - -! Check for a name that is not in the list. With 'perrwith' it should -! return 0 as an index -indexFound = MCT_AtrVt_indexIA(av,"foo",perrWith="quiet") -if(indexFound == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",5,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexIA",5,"FAIL") - result = 1 -endif - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_indexIA","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_indexIA","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - - -!#################################### -!# -!# Test AttrVect_indexRA -!# -!#################################### -subroutine testAttrVect_indexRA(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_indexRA => indexRA -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -integer length, indexFound, index - -integer result - -character(len=1) var -character(len=8) variables - -type(AttrVect) :: av - -result = 0 - -length = 32 -var = "U" -variables = "T:Q:"//var//":P" -index = 3 !This must match the location of 'var' in above line - -! initialize vector -call MCT_AtrVt_init(av,rList=variables,lsize=length) - -indexFound = MCT_AtrVt_indexRA(av,var) -if(index == indexFound) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",1,"FAIL") - result = 1 -endif - -indexFound = MCT_AtrVt_indexRA(av,var,perrWith="ERROR") -if(index == indexFound) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",2,"FAIL") - result = 1 -endif - -indexFound = MCT_AtrVt_indexRA(av,var,perrWith="ERROR",dieWith="KILLED JOB") -if(index == indexFound) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",3,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",3,"FAIL") - result = 1 -endif - -indexFound = MCT_AtrVt_indexRA(av,var,dieWith="KILLED JOB") -if(index == indexFound) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",4,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",4,"FAIL") - result = 1 -endif - -! Check for a name that is not in the list. With 'perrwith' it should -! return 0 as an index -indexFound = MCT_AtrVt_indexRA(av,"foo",perrWith="quiet") -if(indexFound == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",5,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_indexRA",5,"FAIL") - result = 1 -endif - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_indexRA","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_indexRA","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_getIList -!# -!#################################### -subroutine testAttrVect_getIList(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_getIList => getIList -use m_AttrVect -use m_String,only : String -use m_String,only : ptr_chars - -implicit none - -integer mypid -integer AVui - -integer result, length, index - -type(String) returnVar -character(len=20)temp1 -character(len=20) var -character(len=35) variables - - -type(AttrVect) :: av - -result = 0 - -var = "date" -length = 32 -variables = "lat:lon:"//var//":time" -index = 3 !This must match the location of 'var' in above line - -! initialize vector -call MCT_AtrVt_init(av,iList=variables,lsize=length) -call MCT_AtrVt_getIList(returnVar, index, av) -write(temp1,*)ptr_chars(returnVar) -if (verify(temp1,var)==0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_getIList",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_getIList",1,"FAIL") - result = 1 -endif - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_getIList","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_getIList","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - - -!#################################### -!# -!# Test AttrVect_getRList -!# -!#################################### -subroutine testAttrVect_getRList(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_getRList => getRList -use m_AttrVect -use m_String,only : String -use m_String,only : ptr_chars - -implicit none - -integer mypid -integer AVui - -integer result, length, index - -type(String) returnVar -character(len=20)temp1 -character(len=20) var -character(len=35) variables - - -type(AttrVect) :: av - -result = 0 - -var = "P" -length = 32 -variables = "T:Q:"//var//":U" -index = 3 !This must match the location of 'var' in above line - -! initialize vector -call MCT_AtrVt_init(av,rList=variables,lsize=length) -call MCT_AtrVt_getRList(returnVar, index, av) -write(temp1,*)ptr_chars(returnVar) -if (verify(temp1,var)==0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_getRList",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_getRList",1,"FAIL") - result = 1 -endif - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_getRList","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_getRList","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_exportIList -!# -!#################################### -subroutine testAttrVect_exportIList(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_exportIList => exportIList -use m_AttrVect -use m_List,only : List - -implicit none - -integer mypid -integer AVui - -integer result, length - -character(len=35) variables - -type(AttrVect) :: av - -type(List) vList - -length = 32 -write(variables,*) "lat:lon:time" - -! initialize vector -call MCT_AtrVt_init(av,iList=variables,lsize=length) - -call MCT_AtrVt_exportIList(av,vList,result) - -if (result == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportIList",1,"PASS") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportIList","PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportIList",1,"FAIL") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportIList","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_exportRList -!# -!#################################### -subroutine testAttrVect_exportRList(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_exportRList => exportRList -use m_AttrVect -use m_List,only : List - -implicit none - -integer mypid -integer AVui - -integer result, length - -character(len=35) variables - -type(AttrVect) :: av - -type(List) vList - -length = 32 -write(variables,*) "T:P:Q" - -! initialize vector -call MCT_AtrVt_init(av,rList=variables,lsize=length) - -call MCT_AtrVt_exportRList(av,vList,result) - -if (result == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportRList",1,"PASS") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportRList","PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportRList",1,"FAIL") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportRList","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - - -!#################################### -!# -!# Test AttrVect_exportIListToChar -!# -!#################################### -subroutine testAttrVect_exportIListToChar(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_exportIListToChar => exportIListToChar -use m_AttrVect -use m_List,only : List - -implicit none - -integer mypid -integer AVui - -integer result, length - -character(len=35) variables -character(len=35) returnVariables - -type(AttrVect) :: av - -type(List) vList - -length = 32 -write(variables,*) "lat:lon:time" - -! initialize vector -call MCT_AtrVt_init(av,iList=variables,lsize=length) - -write(returnVariables,*) MCT_AtrVt_exportIListToChar(av) - -result = verify(variables,returnVariables) - -if (result == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportIListToChar",1,"PASS") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportIListToChar","PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportIListToChar",1,"FAIL") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportIListToChar","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_exportRListToChar -!# -!#################################### -subroutine testAttrVect_exportRListToChar(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_exportRListToChar => exportRListToChar -use m_AttrVect -use m_List,only : List - -implicit none - -integer mypid -integer AVui - -integer result, length - -character(len=35) variables -character(len=35) returnVariables - -type(AttrVect) :: av - -type(List) vList - -length = 32 -write(variables,*) "T:Q:P" - -! initialize vector -call MCT_AtrVt_init(av,rList=variables,lsize=length) - -write(returnVariables,*) MCT_AtrVt_exportRListToChar(av) - -result = verify(variables,returnVariables) - -if (result == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportRListToChar",1,"PASS") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportRListToChar","PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportRListToChar",1,"FAIL") - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportRListToChar","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_appendIAttr -!# -!#################################### -subroutine testAttrVect_appendIAttr(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_appendIAttr => appendIAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -integer result, localResult, length - -character(len=35) variables -character(len=35) appendVariables - -type(AttrVect) :: av - -result = 0 - -length = 32 -write(variables,*) "lat:lon" -write(appendVariables,*) "year:month:day" - -call MCT_AtrVt_init(av,iList=variables,lsize=length) -call MCT_AtrVt_appendIAttr(av, appendVariables, localResult) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_appendIAttr",1,"PASS") -call MCT_AtrVt_clean(av) - -call MCT_AtrVt_init(av,iList=variables,lsize=length) -call MCT_AtrVt_appendIAttr(av, appendVariables, localResult) -if (localResult == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_appendIAttr",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_appendIAttr",2,"FAIL") - result = 1 -endif -call MCT_AtrVt_clean(av) - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_appendIAttr","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_appendIAttr","FAIL") -endif - -end subroutine - -!#################################### -!# -!# Test AttrVect_appendRAttr -!# -!#################################### -subroutine testAttrVect_appendRAttr(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_appendRAttr => appendRAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -integer result, localResult, length - -character(len=35) variables -character(len=35) appendVariables - -type(AttrVect) :: av - -result = 0 - -length = 32 -write(variables,*) "T:Q:P" -write(appendVariables,*) "U:W" - -call MCT_AtrVt_init(av,rList=variables,lsize=length) -call MCT_AtrVt_appendRAttr(av, appendVariables, localResult) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_appendRAttr",1,"PASS") -call MCT_AtrVt_clean(av) - -call MCT_AtrVt_init(av,rList=variables,lsize=length) -call MCT_AtrVt_appendRAttr(av, appendVariables, localResult) -if (localResult == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_appendRAttr",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_appendRAttr",2,"FAIL") - result = 1 -endif -call MCT_AtrVt_clean(av) - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_appendRAttr","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_appendRAttr","FAIL") -endif - - -end subroutine - -!#################################### -!# -!# Test AttrVect_exportIAttr -!# -!#################################### -subroutine testAttrVect_exportIAttr(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_exportIAttr => exportIAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -integer result, localResult, length - -character(len=35) variables -character(len=4) keyVar - -integer, dimension(:),pointer :: out - -integer size, i, y - -type(AttrVect) :: av - -result = 0 -localResult = 0 - -length = 32 -keyVar="date" -write(variables,*) "lat:",keyVar,":lon" - -i = 4 - -call MCT_AtrVt_init(av,iList=variables,lsize=length) -av%iAttr=i - -nullify(out) -call MCT_AtrVt_exportIAttr(av, keyVar,out) -do y=1,length -if(out(y) /= i)then - localResult = 1 -endif -out(y) = 0 -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportIAttr",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportIAttr",1,"FAIL") - localResult = 0 - result = 1 -endif - -deallocate(out) - -call MCT_AtrVt_exportIAttr(av, keyVar,out,size) -do y=1,length -if(out(y) /= i)then - localResult = 1 -endif -out(y) = 0 -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportIAttr",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportIAttr",2,"FAIL") - localResult = 0 - result = 1 -endif - -!!! bug? --> call MCT_AtrVt_exportIAttr(av, AttrTag="foo",outVect=out, perrWith="quiet") -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportIAttr","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportIAttr","FAIL") -endif -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_exportRAttr -!# -!#################################### -subroutine testAttrVect_exportRAttr(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_exportRAttr => exportRAttr -use m_AttrVect -use m_realkinds,only : SP,DP,FP - -implicit none - -integer mypid -integer AVui - -integer result, localResult, length - -character(len=35) variables -character(len=1) keyVar - -real, dimension(:),pointer :: out - -integer size, y - -real r - -type(AttrVect) :: av - -result = 0 -localResult = 0 - -length = 32 -keyVar="T" -variables = "P:"//keyVar//":Q" - -r = .09_FP - -call MCT_AtrVt_init(av,rList=variables,lsize=length) -av%rAttr=r - -nullify(out) -call MCT_AtrVt_exportRAttr(av, keyVar,out) -do y=1,length -if(out(y) /= r)then - localResult = 1 -endif -out(y) = 0 -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportRAttr",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportRAttr",1,"FAIL") - localResult = 0 - result = 1 -endif - -deallocate(out) - -call MCT_AtrVt_exportRAttr(av, keyVar,out,size) -do y=1,length -if(out(y) /= r)then - localResult = 1 -endif -out(y) = 0 -enddo -if(localResult == 0)then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportRAttr",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_exportRAttr",2,"FAIL") - localResult = 0 - result = 1 -endif - -!!! bug? --> call MCT_AtrVt_exportRAttr(av, AttrTag="foo",outVect=out, perrWith="quiet") -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportRAttr","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_exportRAttr","FAIL") -endif -call MCT_AtrVt_clean(av) - -end subroutine - - -!#################################### -!# -!# Test AttrVect_importIAttr -!# -!#################################### -subroutine testAttrVect_importIAttr(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_importIAttr => importIAttr -use m_AttrVect,only : MCT_AtrVt_exportIAttr => exportIAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -integer result, localResult, length - -character(len=35) variables -character(len=12) keyVar - -integer size, y, i, index - -integer,pointer :: importVectP(:) -integer,target :: importVect(32) -integer, dimension(:),pointer :: out - -type(AttrVect) :: av - -result = 0 -localResult = 0 - -length = 32 -keyVar="date" -variables="lat:lon:"//keyVar - -i=4 -importVect = i -importVectP => importVect - -call MCT_AtrVt_init(av,iList=variables,lsize=length) -call MCT_AtrVt_importIAttr(av,TRIM(keyVar),importVectP) - -nullify(out) -call MCT_AtrVt_exportIAttr(av,TRIM(keyVar),out) -do y=1,length -if(out(y) /= i)then - localResult = 1 -endif -end do -if (localResult == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_importIAttr",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_importIAttr",1,"FAIL") - localResult = 0 - result = 1 -endif - -deallocate(out) - -i=6 -importVect = i -importVectP => importVect - -call MCT_AtrVt_importIAttr(av,TRIM(keyVar),importVectP,length) -call MCT_AtrVt_exportIAttr(av,TRIM(keyVar),out) -do y=1,length -if(out(y) /= i)then - localResult = 1 -endif -end do -if (localResult == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_importIAttr",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_importIAttr",2,"FAIL") - result = 1 -endif - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_importIAttr","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_importIAttr","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - - -!#################################### -!# -!# Test AttrVect_importRAttr -!# -!#################################### -subroutine testAttrVect_importRAttr(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_importRAttr => importRAttr -use m_AttrVect,only : MCT_AtrVt_exportRAttr => exportRAttr -use m_AttrVect -use m_realkinds,only : SP,DP,FP - -implicit none - -integer mypid -integer AVui - -integer result, localResult, length - -character(len=35) variables -character(len=12) keyVar - -integer size, y, index -real r - -real,pointer :: importVectP(:) -real,target :: importVect(32) -real, dimension(:),pointer :: out - -type(AttrVect) :: av - -result = 0 -localResult = 0 - -length = 32 -keyVar="T" -variables="Q:P:U:W:"//keyVar - -r=0.04_FP -importVect = r -importVectP => importVect - -call MCT_AtrVt_init(av,rList=variables,lsize=length) -call MCT_AtrVt_importRAttr(av,TRIM(keyVar),importVectP) -nullify(out) -call MCT_AtrVt_exportRAttr(av,TRIM(keyVar),out) -do y=1,length -if(out(y) /= r)then - localResult = 1 -endif -end do -if (localResult == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_importRAttr",1,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_importRAttr",1,"FAIL") - localResult = 0 - result = 1 -endif - -deallocate(out) - -r=0.06_FP -importVect = r -importVectP => importVect - -call MCT_AtrVt_importRAttr(av,TRIM(keyVar),importVectP,length) -call MCT_AtrVt_exportRAttr(av,TRIM(keyVar),out) -do y=1,length -if(out(y) /= r)then - localResult = 1 -endif -end do -if (localResult == 0) then - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_importRAttr",2,"PASS") -else - if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_importRAttr",2,"FAIL") - result = 1 -endif - -if (result == 0) then - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_importRAttr","PASS") -else - if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_importRAttr","FAIL") -endif - -call MCT_AtrVt_clean(av) - -end subroutine - -!#################################### -!# -!# Test AttrVect_Copy -!# -!#################################### -subroutine testAttrVect_copy(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_copy => copy -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -character(len=35) Rvariables, RvariablesOUT -character(len=35) Ivariables, IvariablesOUT - -integer result,localResult,length - -type(AttrVect) :: avIN, avOUT - -result = 0 - -length = 32 -Rvariables="Q:P:U:W" -RvariablesOUT="q:p:u:w" -Ivariables="date:lat:lon" -IvariablesOUT="DATE:LAT:LON" - -call MCT_AtrVt_init(avIN,iList=Ivariables,rList=Rvariables,lsize=length) -call MCT_AtrVt_init(avOUT,iList=Ivariables,rList=Rvariables,lsize=length) - -call MCT_AtrVt_copy(avIN,avOUT) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_copy",1,"PASS") -call MCT_AtrVt_clean(avOUT) - -call MCT_AtrVt_init(avOUT,iList=IvariablesOUT,rList=RvariablesOUT,lsize=length) -call MCT_AtrVt_Copy(avIN,avOUT,iList=Ivariables,TiList=IvariablesOUT) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_copy",2,"PASS") -call MCT_AtrVt_clean(avOUT) - -call MCT_AtrVt_init(avOUT,iList=IvariablesOUT,rList=RvariablesOUT,lsize=length) -call MCT_AtrVt_Copy(avIN,avOUT,rList=Rvariables,TrList=RvariablesOUT) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_copy",3,"PASS") -call MCT_AtrVt_clean(avOUT) - -call MCT_AtrVt_init(avOUT,iList=IvariablesOUT,rList=RvariablesOUT,lsize=length) -call MCT_AtrVt_Copy(avIN,avOUT,iList=Ivariables,TiList=IvariablesOUT,rList=Rvariables,TrList=RvariablesOUT) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_copy",4,"PASS") -call MCT_AtrVt_clean(avOUT) - -call MCT_AtrVt_init(avOUT,iList=IvariablesOUT,rList=RvariablesOUT,lsize=length) -call MCT_AtrVt_Copy(avIN,avOUT,iList=Ivariables,TiList=IvariablesOUT,rList=Rvariables,TrList=RvariablesOUT,vector=.false.) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_copy",5,"PASS") -call MCT_AtrVt_clean(avOUT) - -call MCT_AtrVt_init(avOUT,iList=IvariablesOUT,rList=RvariablesOUT,lsize=length) -call MCT_AtrVt_Copy(avIN,avOUT,iList=Ivariables,TiList=IvariablesOUT,rList=Rvariables,TrList=RvariablesOUT,vector=.true.) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_copy",6,"PASS") -call MCT_AtrVt_clean(avOUT) - -call MCT_AtrVt_init(avOUT,iList=Ivariables,rList=Rvariables,lsize=length) -call MCT_AtrVt_copy(avIN,avOUT,vector=.true.) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_copy",7,"PASS") -call MCT_AtrVt_clean(avOUT) - -call MCT_AtrVt_init(avOUT,iList=Ivariables,rList=Rvariables,lsize=length) -call MCT_AtrVt_copy(avIN,avOUT,vector=.false.) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_copy",8,"PASS") -call MCT_AtrVt_clean(avOUT) - -if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_copy","PASS") - -end subroutine - -!#################################### -!# -!# Test AttrVect_sort -!# -!#################################### -subroutine testAttrVect_sort(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_sort => sort -use m_AttrVect,only : MCT_AtrVt_nIAttr => nIAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -type(AttrVect) :: av -logical,dimension(:), pointer :: des -integer,dimension(:), pointer :: perm - -character(len=35) Ivariables - -integer result,length - -result = 0 - -length = 32 -Ivariables="date:lat:lon" - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",1,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -allocate(des(MCT_AtrVt_nIAttr(av)),stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not allocate des in the AttrVect_sort test." -endif -des = .true. -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm,descend=des) -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_sort test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",2,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -des = .false. -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm,descend=des) -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_sort test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",3,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -des = .true. -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm,descend=des,perrWith="ERROR") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_sort test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",4,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -des = .true. -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm,descend=des,perrWith="ERROR",& - dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_sort test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",5,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -des = .true. -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm,descend=des,dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_sort test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",6,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm,perrWith="ERROR") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_sort test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",7,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm,dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_sort test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",8,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm,perrWith="ERROR",dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_sort test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sort",9,"PASS") - -deallocate(des,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate des in the AttrVect_sort test." -endif - -if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_sort","PASS") - -end subroutine - -!#################################### -!# -!# Test AttrVect_permute -!# -!#################################### -subroutine testAttrVect_permute(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_sort => sort -use m_AttrVect,only : MCT_AtrVt_permute => permute -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -type(AttrVect) :: av -integer,dimension(:), pointer :: perm - -character(len=35) Ivariables - -integer result,length - -result = 0 - -length = 32 -Ivariables="date:lat:lon" - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_permute(av,perm) -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_permute test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_permute",1,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_permute(av,perm,perrWith="ERROR") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_permute test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_permute",2,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_permute(av,perm,perrWith="ERROR",dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_permute test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_permute",3,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_permute(av,perm,dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_permute test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_permute",4,"PASS") - -if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_permute","PASS") - -end subroutine - - -!#################################### -!# -!# Test AttrVect_unpermute -!# -!#################################### -subroutine testAttrVect_unpermute(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_sort => sort -use m_AttrVect,only : MCT_AtrVt_unpermute => unpermute -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -type(AttrVect) :: av -integer,dimension(:), pointer :: perm - -character(len=35) Ivariables - -integer result,length - -result = 0 - -length = 32 -Ivariables="date:lat:lon" - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_unpermute(av,perm) -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_unpermute test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_unpermute",1,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_unpermute(av,perm,perrWith="ERROR") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_unpermute test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_unpermute",2,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_unpermute(av,perm,perrWith="ERROR",dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_unpermute test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_unpermute",3,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sort(av=av,key_list=av%iList,perm=perm) -call MCT_AtrVt_unpermute(av,perm,dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -deallocate(perm,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate perm in the AttrVect_unpermute test." -endif -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_unpermute",4,"PASS") - -if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_unpermute","PASS") - -end subroutine - -!#################################### -!# -!# Test AttrVect_sortPermute -!# -!#################################### -subroutine testAttrVect_sortPermute(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_sort => sort -use m_AttrVect,only : MCT_AtrVt_sortPermute => SortPermute -use m_AttrVect,only : MCT_AtrVt_nIAttr => nIAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -type(AttrVect) :: av -logical,dimension(:), pointer :: des - -character(len=35) Ivariables - -integer length, result - -result = 0 - -length = 32 -Ivariables="date:lat:lon" - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sortPermute(av,key_list=av%iList) -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",1,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -allocate(des(MCT_AtrVt_nIAttr(av)),stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not allocate des in the AttrVect_sortPermute test." -endif -des = .true. -call MCT_AtrVt_sortPermute(av,key_list=av%iList,descend=des) -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",2,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -des = .false. -call MCT_AtrVt_sortPermute(av,key_list=av%iList,descend=des) -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",3,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -des = .true. -call MCT_AtrVt_sortPermute(av,key_list=av%iList,descend=des,perrWith="ERROR") -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",4,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sortPermute(av,key_list=av%iList,descend=des,perrWith="ERROR", & - dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",5,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sortPermute(av,key_list=av%iList,descend=des,dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",6,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -des = .true. -call MCT_AtrVt_sortPermute(av,key_list=av%iList,perrWith="ERROR") -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",7,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sortPermute(av,key_list=av%iList,perrWith="ERROR", & - dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",8,"PASS") - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_sortPermute(av,key_list=av%iList,dieWith="KILLED JOB") -call MCT_AtrVt_clean(av) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_SortPermute",9,"PASS") - -deallocate(des,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate des in the AttrVect_sortPermute test." -endif - -if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_SortPermute","PASS") - -end subroutine - -!#################################### -!# -!# Test AttrVect_sharedAttrIndexList -!# -!#################################### -subroutine testAttrVect_sharedAttrIndexList(mypid,AVui) - -use m_AttrVect,only : MCT_AtrVt_init => init -use m_AttrVect,only : MCT_AtrVt_clean => clean -use m_AttrVect,only : MCT_AtrVt_sharedAttrIndexList => SharedAttrIndexList -use m_AttrVect,only : MCT_AtrVt_nIAttr => nIAttr -use m_AttrVect - -implicit none - -integer mypid -integer AVui - -type(AttrVect) :: av,av2 -character(len=35) type -integer numShare -integer, dimension(:),pointer :: indx1,indx2 - -character(len=35) Ivariables,Ivariables2 - -integer result,length - -result = 0 - -length = 32 -Ivariables="date:lat:lon" -Ivariables2="lat:lon:month:day:year" - -call MCT_AtrVt_init(av,iList=Ivariables,lsize=length) -call MCT_AtrVt_init(av2,iList=Ivariables2,lsize=length) -type="integer" -call MCT_AtrVt_sharedAttrIndexList(av,av2,type,numShare,indx1,indx2) -if(mypid .eq. 0) call outputTestStatus(AVui,"AttrVect_sharedAttrIndexList",1,"PASS") -deallocate(indx1,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate indx1 in the AttrVect_sharedAttrIndexList test." -endif -deallocate(indx2,stat=result) -if(result /= 0)then -if(mypid .eq. 0) write(AVui,*)"ERROR: Could not deallocate indx2 in the AttrVect_sharedAttrIndexList test." -endif -call MCT_AtrVt_clean(av) - -if(mypid .eq. 0) call outputRoutineStatus(AVui,"AttrVect_sharedAttrIndexList","PASS") - -end subroutine diff --git a/src/externals/mct/testunit/Makefile b/src/externals/mct/testunit/Makefile deleted file mode 100644 index d337249c6b9..00000000000 --- a/src/externals/mct/testunit/Makefile +++ /dev/null @@ -1,41 +0,0 @@ - -SHELL = /bin/sh - -# SOURCE FILES - -SRCS_F90 = master.F90 \ - AttrVect_Test.F90 \ - -OBJS_ALL = $(SRCS_F90:.F90=.o) - -# MACHINE AND COMPILER FLAGS - -include ../Makefile.conf - -# ADDITIONAL DEFINITIONS SPECIFIC FOR UTMCT COMPILATION - -MCTLIBS = -L$(MPEUPATH) -L$(MCTPATH) -lmct -lmpeu -UTLDFLAGS = $(REAL8) -UTCMPFLAGS = $(REAL8) $(INCFLAG)$(MPEUPATH) $(INCFLAG)$(MCTPATH) - -# TARGETS - -all: mctTester - -mctTester: $(OBJS_ALL) - $(FC) -o $@ $(OBJS_ALL) $(FCFLAGS) $(UTLDFLAGS) $(MCTLIBS) $(MPILIBS) - -# RULES - -.SUFFIXES: -.SUFFIXES: .F90 .o - -.F90.o: - $(FC) -c $(INCPATH) $(FPPDEFS) $(FCFLAGS) $(MCTFLAGS) $(UTCMPFLAGS) $< - -clean: - ${RM} *.o *.mod mctTester - -# DEPENDENCIES: - -$(OBJS_ALL): $(MCTPATH)/libmct.a diff --git a/src/externals/mct/testunit/master.F90 b/src/externals/mct/testunit/master.F90 deleted file mode 100644 index 7a222ccba2a..00000000000 --- a/src/externals/mct/testunit/master.F90 +++ /dev/null @@ -1,101 +0,0 @@ -program main - -implicit none - -#include "mpif.h" - -integer ierr,myProc -character(len=12) date1 - -integer ui - -call MPI_INIT(ierr) -call MPI_COMM_RANK(MPI_COMM_WORLD,myProc,ierr) - -call DATE_AND_TIME(date=date1) -ui = 7 - -if(myProc .eq. 0) call openIO(date1,ui,'AttrVect') -call testAttrVect(myProc,ui) -ui = ui+1 - -call MPI_FINALIZE(ierr) - - -end program - -subroutine outputTestStatus(ui, routine, testid, status) - -integer ui, testid - -character(*) routine, status - -character(len=96) output - -integer ok - -if (status == "PASS") then -ok=1 -else if (status == "FAIL") then -ok = 1 -else -write(0,*) "WHAT HAPPENED? ", routine, testid -endif - -write(ui,'(a,a,i1,a,a)')routine," ... ",status - -end subroutine - - -subroutine outputRoutineStatus(ui, routine, status) - -integer ui - -character(*) routine, status - -character(len=96) output - -integer ok - -if (status == "PASS") then -ok=1 -else if (status == "FAIL") then -ok = 1 -else -write(0,*) "WHAT HAPPENED? ", routine -endif - -write(ui,'(a,a,a)')routine," SUMMARY ... ",status - -end subroutine - - -!#################################### -! -! open io unit for log file -! -!#################################### - -subroutine openIO(stamp,ui,routine) - - character(*) stamp, routine - integer ui - - character(len=54) filename - integer ierr - - ierr = 0 - - filename = trim(routine)//'.log.' // stamp(1:8) - OPEN (UNIT=ui, FILE=filename,STATUS='NEW',IOSTAT=ierr) - - if (ierr /= 0) then - write(6,*) "Open failed on unit: ", ui - write(6,*) "File name was: [", filename, "]" - write(6,*) "Error code was: ", ierr - - stop 1 - end if - -end subroutine - diff --git a/src/externals/pio1/CMakeLists.txt b/src/externals/pio1/CMakeLists.txt deleted file mode 100644 index fcf7e3f839f..00000000000 --- a/src/externals/pio1/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -PROJECT(PIO C Fortran) -ENABLE_LANGUAGE(Fortran) -CMAKE_MINIMUM_REQUIRED(VERSION 2.8.5) - -ADD_SUBDIRECTORY(pio) diff --git a/src/externals/pio1/ChangeLog b/src/externals/pio1/ChangeLog deleted file mode 100644 index e6b3c2e1687..00000000000 --- a/src/externals/pio1/ChangeLog +++ /dev/null @@ -1,4714 +0,0 @@ -================================================================================ -SVN $Id: ChangeLog 45058 2013-03-20 16:12:21Z jedwards $ -SVN $URL: https://parallelio.googlecode.com/svn/trunk $ -================================================================================ -This file describes what tags were created and why -=========================== -Originator: jedwards -Date: Jan 14, 2015 -Model: pioi -Version: pio1_9_10 -One-line : improved handling of overlaps - -M box_rearrange.F90.in - -=========================== -Originator: jedwards -Date: Dec 26, 2014 -Model: pio -Version: pio1_9_9 -One-line : remove broken BGQ opt - -M pio/piolib_mod.F90 - -=========================== -Originator: jedwards -Date: Nov 26, 2014 -Model: pio -Version: pio1_9_0 -One-line : Add code for pio2 api compatability - -M pio/nf_mod.F90 -M pio/pio.F90 -M pio/piolib_mod.F90 - -=========================== -Originator: jedwards/santos -Date: Nov 19, 2014 -Model: pio -Version: pio1_8_14 -One-line : Fix minor errors pointed out by valgrind - -M piolib_mod.F90 - -=========================== -Originator: jedwards/santos -Date: Oct 28, 2014 -Model: pio -Version: pio1_8_13 -One-line : Fix minor errors pointed out by valgrind - -M pio_support.F90 -M nf_mod.F90 - - -=========================== -Originator: jamroz -Date: April 8, 2014 -Model: pio -Version: pio1_8_12 -One-line : Update cmake tag - -M . -M SVN_EXTERNAL_DIRECTORIES - -=========================== -Originator: jedwards -Date: Mar 17, 2014 -Model: pio -Version: pio1_8_11 -One-line : timing build should be optional - -M pio/CMakeLists.txt - -=========================== -Originator: jedwards -Date: Feb 20, 2014 -Model: pio -Version: pio1_8_10 -One-line : add gptl timing library support - -M 924 pio/CMakeLists.txt -M 924 testpio/CMakeLists.txt -M 924 timing/CMakeLists.txt -M 924 unittests/driver.F90 -M 924 unittests/CMakeLists.txt - -=========================== -Originator: jedwards -Date: Feb 12, 2014 -Model: pio -Version: pio1_8_9 -One-line : bug fix in darray_write_complete - -M piodarray.F90.in -M pio_types.F90 - -=========================== -Originator: jedwards -Date: Jan 24, 2014 -Model: pio -Version: pio1_8_8 -One-line : Fix some issues with netcdf4c - -M 924 pionfatt_mod.F90.in -M 924 piodarray.F90.in -M 924 pionfget_mod.F90.in -M 924 pionfread_mod.F90.in - - - -=========================== -Originator: jedwards -Date: Jan 16, 2014 -Model: pio -Version: pio1_8_7 -One-line Fix issues with BGQ topology, fix minor memleaks - -M topology.c -M piolib_mod.F90 - - -=========================== -Originator: jedwards -Date: Dec 11, 2013 -Model: pio -Version: pio1_8_6 -One-line summary: further build updates - - - -=========================== -Originator: jedwards -Date: Dec 06, 2013 -Model: pio -Version: pio1_8_5 -One-line summary: Get cmake utilities from external repository - -D pio/cmake -D SVN_EXTERNAL_DIRECTORIES -A pio/SVN_EXTERNAL_DIRECTORIES - -=========================== -Originator: jedwards -Date: Dec 06, 2013 -Model: pio -Version: pio1_8_4 -One-line summary: added unittests - tested on yellowstone_intel and hopper_pgi - these tests must be run from an - interative login session - -A unittests/ - -=========================== -Originator: jedwards -Date: Dec 03, 2013 -Model: pio -Version: pio1_8_3 -One-line summary: cmake updates - -M pio/cmake/TryCSizeOf.f90 -M pio/cmake/TryMPISERIAL.f90 -M pio/CMakeLists.txt - - -=========================== -Originator: jedwards -Date: Nov 24, 2013 -Model: pio -Version: pio1_8_2 -One-line summary: fix issues with put in pnetcdf - -M pionfput_mod.F90.in -M pionfput_mod.F90 - -=========================== -Originator: jedwards -Date: Nov 19, 2013 -Model: pio -Version: pio1_8_1 -One-line summary: Fixes for problems found in the build of cesm1_3_alpha06c - - M pio_types.F90 - M pio.F90 - M nf_mod.F90 - - - -=========================== -Originator: jedwards -Date: Nov 13, 2013 -Model: pio -Version: pio1_8_0 -One-line summary: switch to a cmake build, updates for mira, fixes for netcdf4p - - remove all autoconf support files, add cmake support files. - it is currently required to run tests by hand, a cmake implimentation - of tests is planned but incomplete. CESM tests have been run - on this tag. - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_7_2 -Originator(s): Jim -Date: 05-31-2013 -One-line Summary: Bug fix in calcdecomp.F90 - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: -M Calcdecomp.F90 - only mark as converged if all iotasks are counted -M box_rearranger.F90.in - add code to avoid an infinate loop - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: Bugzilla 1721 - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: - testpio.693883.stdout:test complete on yellowstone 103 tests PASS, 0 tests FAIL - testpio.696040.stdout:test complete on yellowstone_gnu 80 tests PASS, 0 tests FAIL - testpio.696166.stdout:test complete on yellowstone_pgi 80 tests PASS, 0 tests FAIL - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_7_1 -Originator(s): Jim -Date: 05-20-2013 -One-line Summary: bug fix in calcdecomp.F90 - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - M calcdecomp.F90 - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: edge case in ROF model, problem was found in cesm test - ERS_N2.f19_g16.B20TRC5CN.yellowstone_intel - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Self - -Summary of pre-tag testing: - - Regression tests: - testpio.693883.stdout:test complete on yellowstone 103 tests PASS, 0 tests FAIL - testpio.696040.stdout:test complete on yellowstone_gnu 80 tests PASS, 0 tests FAIL - testpio.696166.stdout:test complete on yellowstone_pgi 80 tests PASS, 0 tests FAIL - cesm1_2_rel03 prebeta yellowstone intel tests all pass. - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_7_0 -Originator(s): Jim -Date: May 10, 2013 -One-line Summary: Bug fix in calcdecomp, remove parameters from mpi_bcast - and pass variables to satisfy f90 module issues. - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - pionfput_mod.F90.in - piolib_mod.F90 - pionfget_mod.F90.in - pionfatt_mod.F90.in - piodarray.F90.in - Remove constant fields from mpi_bcast calls, these were not errors, but - caused problems with the F90 interface in fujitsues mpi.mod - calcdecomp.F90 - Every io task now computes all io tasks start and count and checks them - for consistancy, if inconsistant reduce the number of io tasks by 1 and try - again. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: - test complete on yellowstone 103 tests PASS, 0 tests FAIL - test complete on yellowstone_gnu 80 tests PASS, 0 tests FAIL - test complete on yellowstone_pgi 80 tests PASS, 0 tests FAIL - cesm intel prealpha tests on yellowstone - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_9 -Originator(s): Jim -Date: May 01, 2013 -One-line Summary: minor fix (again) in box_rearrange.F90 for mpi-serial - -M pio/box_rearrange.F90.in -M pio/box_rearrange.F90 - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_8 -Originator(s): Jim -Date: April 23, 2013 -One-line Summary: minor fix in box_rearrange.F90 for mpi-serial - -M pio/box_rearrange.F90.in -M pio/box_rearrange.F90 - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_7 -Originator(s): Jim -Date: April 22, 2013 -One-line Summary: bug fixes in async interface, mpi-serial interface - -List all subroutines eliminated: - -List all subroutines added and what they do: - readded file dtypes.h which is generated but timestamps were causing issues - -List all existing files that have been modified, and describe the changes: -M pio/pio_msg_callbacks.F90 -M pio/piolib_mod.F90 -M pio/Makefile -M pio/pio_msg_mod.F90 -A pio/dtypes.h -M pio/nf_mod.F90 -M pio/piodarray.F90.in -M pio/piodarray.F90 -M pio/iompi_mod.F90.in -M pio/iompi_mod.F90 -M pio/box_rearrange.F90.in -M pio/box_rearrange.F90 -M pio/pionfput_mod.F90.in -M pio/pionfput_mod.F90 - - - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: cesm prealpha testing - - - Regression tests: -test complete on yellowstone 103 tests PASS, 0 tests FAIL -test complete on yellowstone_gnu 80 tests PASS, 0 tests FAIL -test complete on yellowstone_pgi 80 tests PASS, 0 tests FAIL - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_6 -Originator(s): Jim, Alice -Date: Apr 03, 2013 -One-line Summary: Documentation update, mv genf90.pl to external, bug fix in nfput for pnetcdf - -List all subroutines eliminated: pio_quicksort.F90 (not used), dtypes.h (now autogenerated) genf90.pl (now external) - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Many changes in documentation. - pionfput_mod.F90.in: Made put_var_0d_{TYPE} explicit and seperate from put_var_{DIMS}d_{TYPE} to allow - for a bug fix in 0d vars written using pnetcdf and to allow some CPP code cleanup. - - -List all component tags that were used to create pio tag: - genf90: genf90_130402 -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: Bugzilla 1647 - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: cesm1_2_beta05 yellowstone_intel.prealpha tests - test complete on yellowstone_gnu 80 tests PASS, 0 tests FAIL - test complete on yellowstone 103 tests PASS, 0 tests FAIL - test complete on titan 102 tests PASS, 0 tests FAIL - - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_5 -Originator(s): Jim -Date: 03-07-2013 -One-line Summary: minor change to work with mpi-serial - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - piolib_mod.F90 : add ifdef around MPI datatype calls (not supported by mpi-serial lib) - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_4 -Originator(s): Jim -Date: 03-06-2013 -One-line Summary: improved init_decomp performance - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: -M pio/configure.ac -M pio/configure - Updated version number in configure -M pio/piolib_mod.F90 -M pio/box_rearrange.F90 -M pio/box_rearrange.F90.in - Refactored search algorythm for ionodes significantly reducing the - time required for this operation - -M testpio/build_defaults.xml -A testpio/ystest.sh - Changes to allow yellowstone to run netcdf4 tests, requires - a new script to submit compile to caldera. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - box_rearrange_create time was reduced significantly with a change - to the search algorythm - - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: test complete on yellowstone 103 tests PASS, 0 tests FAIL - cesm yellowstone prealpha tests completed (cesm1_2_alpha03c) - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_3 -Originator(s): Jim -Date: 02-28-2013 -One-line Summary: Remove indep mode calls from pnetcdf interface - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - testpio/testpio.F90 - reordered write of integer test file, useful for debugging - testpio/testpio_run.pl - fix netcdf4 test match - testpio/build_defaults.xml - update yellowstone interface to work with netcdf4 tests - a minor change to make the file xml compliant - testpio/check_mod.F90 - update test to avoid underflow warning - testpio/Utils.pm - update modules loaded for yellowstone - a minor change to support xml compliance in build_defaults.xml - pio/configure.ac - fix sizeof test issue - pio/configure - pio/calcdisplace_mod.F90 - remove intent from pointer argument - pio/pionfput_mod.F90.in - rework interface to pnetcdf to avoid indep_mode - pio/pionfput_mod.F90 - pio/piolib_mod.F90 - add gensubarray routine to create subarray mpitype - pio/piovdc.F90 - reindent file - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: test complete on yellowstone 103 tests PASS, 0 tests FAIL - test complete on titan 102 tests PASS, 0 tests FAIL - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_2 -Originator(s): jim -Date: 02-07-2013 -One-line Summary: support for large data (bug fix), autoconf update - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: -M 703 testpio/testpio_run.pl - When using --twopass option do not - submit job unless build completes successfully -M 700 testpio/gdecomp_mod.F90 - fix a dependancy issue -M 700 pio/calcdisplace_mod.F90 - fail silently on overflow of displace - array, this data is not used for pnetcdf so by failing silently - here we defer the failure of mpiio i/o until the read or write call -M 700 pio/configure.ac - change line length max to 132 to support nag - fortran compiler -M 703 pio/piolib_mod.F90 - if displace array overflows in calcdisplace_mod - set filetype to mpi_datatype_null -M 700 pio/configure -M 700 pio/aclocal.m4 -M 700 pio/config.h.in - these autoconf generated files are in the repo - for convience - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: Tested on challenger, titan, yellowstone with xlf, - pgi, and intel compilers - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_1 -Originator(s): Jim -Date: 02-05-2013 -One-line Summary: bug fix in reading 1d arrays - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: -M pio/piolib_mod.F90 lenblocks was incorrectly set to 1 for 1D arrays -M testpio/Utils.pm updates for yellowstone_pgi -M testpio/testpio_run.pl match yellowstone_pgi to host yellowstone - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: test complete on yellowstone 81 tests PASS, 0 tests FAIL - test complete on titan 80 tests PASS, 0 tests FAIL - test complete on yellowstone_pgi 80 tests PASS, 0 tests FAIL - Also ran cesm1_2_alpha02a prealpha tests - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_6_0 -Originator(s): Jim -Date: 02-04-2013 -One-line Summary: fix bug in genindexedblock to allow larger arrays, separate vdc init routine - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - M piolib_mod.F90 - M calcdisplace_mod.F90 - M pio_types.F90 - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: mpas decomp issue when two leading non-decomposed dimensions. - -Describe any changes made to scripts/build system: - Updated tests to reflect -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - -Regression tests: test complete on yellowstone 81 tests PASS, 0 tests FAIL - test complete on titan 80 tests PASS, 0 tests FAIL - test complete on yellowstone_pgi 80 tests PASS, 0 tests FAIL - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_5_7 -Originator(s): Jim -Date: 11-04-2012 -One-line Summary: minor change in box_rearrange to avoid intel compiler bug - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - box_rearranger.F90.in : a variable was changed from i4 to i8 that didn't need to be - this was tickling a known issue in the intel compiler v 11.1 -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Self - -Summary of pre-tag testing: - - Regression tests: - yellowstone 80 tests PASS, 0 tests FAIL - lynx_intel 81 tests PASS, 0 tests FAIL - bluefire 114 tests PASS, 0 tests FAIL -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name:pio1_5_6 -Originator(s): Jim -Date: 10-30-2012 -One-line Summary: minor change in pnetcdf interface, update documentation, update yellowstone in testpio - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Index: Doxyfile - Index: doc/Decomp.txt - Index: doc/GettingStarted.txt - Index: doc/base.txt - Index: testpio/testpio.F90 - Index: testpio/testpio_run.pl - Index: testpio/build_defaults.xml - Index: testpio/Utils.pm - Index: pio/pionfwrite_mod.F90 - Index: pio/pionfwrite_mod.F90.in - Index: pio/pionfput_mod.F90 - Index: pio/pionfput_mod.F90.in - Index: pio/pio_types.F90 - Index: pio/piolib_mod.F90 - Index: pio/piovdc.F90 - Index: pio/iompi_mod.F90.in - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - bluefire 114 tests PASS, 0 tests FAIL - yellowstone 80 tests PASS, 0 tests FAIL - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_5_5 -Originator(s): Jim Edwards -Date: 10-15-2012 -One-line Summary: bug fix for large >4GB data, cmake tools, updated timing lib - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - testpio/build_defaults.xml: - testpio/Utils.pm: - testpio/testpio_run.pl : port to erebus and yellowstone - pio/box_rearrange.F90.in : promote some variables i4 to i8, remove debug prints - pio/calcdecomp.F90 : correct product calculation to i8 - pio/topology.c : remove extra print statements (bluegene only) - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - bluefire 114 tests PASS, 0 tests FAIL - yellowstone 80 tests PASS, 0 tests FAIL - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_5_4 (ChangeLog was not updated for pio1_5_3) -Originator(s): Jim Edwards -Date: 9-12-2012 -One-line Summary: move testlib to test directory make sure f95 compatible when - _COMPRESSION is not defined. Clean up pionfput_mod.F90.in - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - testpio/testpio.F90 - testpio/testpio_run.pl - testpio/build_defaults.xml - testpio/namelists/testpio_in.vdc01 - testpio/test_lib.F90 - testpio/gdecomp_mod.F90 - testpio/Utils.pm - pio/pionfwrite_mod.F90.in - pio/C_interface_mod.F90 - pio/calcdisplace_mod.F90 - pio/pionfput_mod.F90.in - pio/pio_types.F90 - pio/box_rearrange.F90.in - pio/pio_support.F90 - pio/pionfatt_mod.F90 - pio/pio_spmd_utils.F90 - pio/piodarray.F90.in - pio/nf_mod.F90 - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: tested with several cesm1_1_alpha18a tests - - Regression tests: - bluefire 114 tests PASS, 0 tests FAIL - carver 81 tests PASS, 0 tests FAIL (intel) - hopper 80 tests PASS, 0 tests FAIL (pgi) -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_5_2 -Originator(s): Jim -Date: 7-21-2012 -One-line Summary: bug fix in calcdecomp, port to carver - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - M pio/calcdecomp_mod.F90 - M testpio/Utils.pm - M testpio/build_defaults.xml - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: Wrong length of gaps array caused computation of block length to always return 1 - which increased memory consumption 3x or more - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - Substantial memory reduction in box_rearranger - - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests:114 of 114 tests pass on Bluefire - 81 of 81 pass on lynx_intel - 81 of 81 pass on carver - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_5_1 -Originator(s): Jedwards -Date: July 09, 2012 -One-line Summary: Fix issues in async, make pnetcdf put calls root only - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - testpio/build_defaults.xml - pio/piodarray.F90 - pio/pio_msg_callbacks.F90 - pio/pionfput_mod.F90 - pio/pionfput_mod.F90.in - pio/piolib_mod.F90 - pio/pio_msg_mod.F90 - pio/piodarray.F90.in - pio/test_lib.F90 - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: 81 of 81 tests pass on Janus - 114 of 114 tests pass on Bluefire - 80 of 80 pass on lynx - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_5_0 -Originator(s): jedwards, ypolius -Date: 6-28-2012 -One-line Summary: New VDC2 output feature added - -List all subroutines eliminated: - testpio/utils_mod.F90 - subroutine Readheader is not used and was removed -List all files added and what they do: - pio/piovdc.F90 - Interface to the piovdc library - testpio/namelists/testpio_in.vdc01 - A test of vdc2 functionality - pio/m4/expat.m4 - Test of expat availability for autoconf - pio/C_interface_mod.F90 - Some helper functions for c_interoperability - pio/test_lib.F90 - Another vdc2 developement test - this should probably be removed. - -List all existing files that have been modified, and describe the changes: - testpio/testpio.F90 - testpio/namelist_mod.F90 - testpio/testpio_run.pl - testpio/gdecomp_mod.F90 - Updated dof arrays to be i8, added a vdc test - - testpio/testpio_build.pl - testpio/build_defaults.xml - testpio/Makefile - Updated to link with CXX compiler if nessasary and libraries to support vdc2 (expat) - - testpio/namelists/testpio_in.wr01 - Fix a minor typo - - pio/aclocal.m4 - pio/configure.ac - pio/configure - pio/Makefile - pio/config.h.in - pio/Makefile.conf.in - Refactored to allow building with CXX and the (external) piovdc library - - pio/pio_types.F90 - Added new pio_iotype_vdc2 - - pio/pio.F90 - pio/piolib_mod.F90 - pio/piodarray.F90.in - pio/nf_mod.F90 - Added support for vdc2 datatype (requires _COMPRESSION) - - pio/pio_support.F90.in - pio/box_rearrange.F90.in - Changed several variables to be of type pio_offset and added a test to check the kind of this variable - - pio/calcdecomp.F90 - Removed dead code - - -List all component tags that were used to create pio tag: - https://parallelio.googlecode.com/svn/libpiovdc/trunk_tags/libpiovdc_1_0_1 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: Janus: 81 tests pass, 0 fail - Bluefire: 115 tests pass, 0 fail - Lynx: 80 tests pass, 0 fail - Lynx_intel: 80 pass, 0 fail - Intrepid(Challenger) 74 pass, 0 fail (testpio_run.pl was modified to avoid tests that are expected to fail) - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_4_6 -Originator(s): abaker, jedwards, goldhaber -Date: 6-26-2012 -One-line Summary: Improve BGP performance, bug fix in GCD calculation - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - timing/GPTLget_memusage.c - Memory usage on intrepid was only being reported for IO nodes, - with this fix we now report all tasks - testpio/testpio.F90 - Problem with mpi_finalize on bg machines - needs to be addressed - testpio/testpio_run.pl - Fix issue with path to NETCDF on bluefire - testpio/build_defaults.xml - Update testpio paths on lynx, lynx_intel - pio/calcdisplace_mod.F90 - Fix GCD calculation bug - pio/piolib_mod.F90 - Update Bluegene interface - pio/topology.c - - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: GCD was not always giving consistant results - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: intrepid 74 tests PASS, 6 tests FAIL (expected) - Bluefire: 113 tests PASS, 0 tests FAIL - lynx intel : 80 tests PASS, 0 tests FAIL - lynx pgi : 80 tests PASS, 0 tests FAIL - - -=============================================================== -Tag name: pio1_4_5 -Originator: Jim -Date: 05-19-2012 -One-line Summary: bug fix in calcdecomp.F90 - pio/calcdecomp.F90 - -tests: - bluefire - lynx pgi - lynx intel - challenger (intrepid) - -=============================================================== -Tag name: pio1_4_4 -Originator: Jim -Date: 04-18-2012 -One-line Summary: Minor fixes for mpi-serial build and mpi_abort syntax - -Modified Files: - pio/pio_support.F90.in - pio/pio_support.F90 - pio/pio_types.F90 - -test complete on lynx 80 tests PASS, 0 tests FAIL - - -Tag name: pio1_4_3 -Originator(s): Jim, John Dennis -Date: 03-21-2012 -One-line Summary: Add test decomp for MPAS, additional lustre optimizations - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - - testpio/testpio.F90 - testpio/config_bench.xml - testpio/MPASA30km.csh - testpio/CAM05.csh - testpio/build_defaults.xml - testpio/POPDv2.csh - testpio/testpio_bench.pl - testpio/namelist_mod.F90 - testpio/gdecomp_mod.F90 - testpio/MPASA60km.csh - testpio/Utils.pm - testpio/POPB.csh - - pio/pio_types.F90 - pio/piolib_mod.F90 - pio/pio_kinds.F90 - pio/box_rearrange.F90.in - pio/box_rearrange.F90 - Fix some minor issues including further protections against calling mpi with invalid (comm,info) handles. - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: - Regression tests: Bluefire: 113 tests PASS, 0 tests FAIL - lynx intel : 80 tests PASS, 0 tests FAIL - lynx pgi : 80 tests PASS, 0 tests FAIL - - -=============================================================== -Tag name: pio1_4_2 -Originator(s): Jim -Date: 03-??-2012 -One-line Summary: Correct cppdef flags around use mpi call in pio_msg_callbackes.F90 - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_4_1 -Originator(s): Jim -Date: 02-23-2012 -One-line Summary: async pnetcdf writes, bug fix in calcdecomp - -List all subroutines eliminated: - -List all subroutines added and what they do: - pio_set_blocksize: PIO will try to generate contiguous blocks of this size on - each iotask - pio_set_buffer_limit_size: PIO will buffer up to this limit of bytes of data - before flushing to disk (pnetcdf only) - -List all existing files that have been modified, and describe the changes: - -pio/calcdecomp.F90 - Add code allowing for runtime setting of blocksize with default 884736 bytes - Correct bug introduced in 1.4.0 which may cause index to exceed dimension bounds -pio/calcdisplace_mod.F90 - A few more variables need to be i8 for > 2GB array support -pio/pio.F90 - Add new functions pio_set_buffer_size_limit and pio_set_blocksize to public entities - -pio/piolib_mod.F90 -pio/pio_msg_callbacks.F90 - Add function freedecomp_handler for async interface (fixes longstanding expected test failure) -pio/pio_msg_mod.F90 - Correct handling of iodesc when io is async -pio/pio_types.F90 -pio/piodarray.F90.in -pio/pionfwrite_mod.F90.in - Add support for buffering IO fields written using pnetcdf async interface - - - -testpio/POPDv2.csh -testpio/Utils.pm -testpio/build_defaults.xml -testpio/namelists/testpio_in.pb06 - Update hopper interface, reduce size of arrays in pb06 to prevent memory blowout - - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - async pnetcdf change appears to be a 10x improvement on some systems - - -Code reviewed by: Self - -Summary of pre-tag testing: - - Regression tests: Bluefire: 113 tests PASS, 0 tests FAIL - lynx intel : 80 tests PASS, 0 tests FAIL - Hopper pgi : 113 tests PASS, 0 tests FAIL - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_4_0 -Originator(s): Jim -Date: 02-07-2012 -One-line Summary: add i8 ldof support - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - testpio/testpio.F90 - testpio/testpio_run.pl - testpio/utils_mod.F90 - testpio/Makefile - testpio/build_defaults.xml - testpio/testpio_bench.pl - testpio/namelist_mod.F90 - testpio/check_mod.F90 - testpio/Utils.pm - - - pio/pio_msg_getput_callbacks.F90 - pio/configure - pio/pionfwrite_mod.F90 - pio/alloc_mod.F90 - pio/pionfwrite_mod.F90.in - pio/piodarray.F90 - pio/pio_msg_callbacks.F90 - pio/alloc_mod.F90.in - pio/pio_spmd_utils.F90.in - pio/pionfput_mod.F90 - pio/pionfread_mod.F90 - pio/calcdisplace_mod.F90 - pio/configure.ac - pio/rearrange.F90 - pio/pio_utils.F90 - pio/pionfput_mod.F90.in - pio/pio_types.F90 - pio/iompi_mod.F90 - pio/pio_mpi_utils.F90 - pio/piolib_mod.F90 - pio/pionfget_mod.F90.in - pio/pio_kinds.F90 - pio/box_rearrange.F90.in - pio/pio_support.F90 - pio/pionfatt_mod.F90 - pio/pio_support.F90.in - pio/pio_msg_getput_callbacks.F90.in - pio/pio_spmd_utils.F90 - pio/calcdecomp.F90 - pio/ionf_mod.F90 - pio/pio_msg_mod.F90 - pio/piodarray.F90.in - pio/nf_mod.F90 - pio/pionfget_mod.F90 - pio/rearrange.F90.in - pio/pionfread_mod.F90.in - pio/box_rearrange.F90 - pio/aclocal.m4 - pio/iompi_mod.F90.in - - Added code to detect mpi.mod and use it if it exists, falling back to the - mpif.h if it does not. With some mpi implemenations this gives strong type checking - in the mpi interface. Made all ldof related internals size i8 and added interfaces - to call pio_initdecomp with i8 dof's, corrected calcdecomp code to consider max block - size as well as min block size. - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - Bluefire: - Janus - pgi - lynx - intel - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_13 -Originator(s): Jim, John -Date: 1-23-2012 -One-line Summary: remove extra arrays for memory savings - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - pio_types.F90 add an ndof field and a padding int to the iodesc type structure - box_rearranger.F90.in deallocate dest_ioproc and dest_ioindex after iodesc initialization. - these are only needed for mpi-serial after this point. - piodarray.F90.in allow for 7 possible dimensions on input arrays - this is the fortran limit. - - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: Lynx PGI 74 Pass, 6 expected failures - Bluefire 74 pass, 6 expected failures. - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_12 -Originator(s): Jim -Date: 10-31-2011 -One-line Summary: Make flowcontrol default, add workaround for mpi_rsend bug in cray/gemini - -List all subroutines eliminated: none - -List all subroutines added and what they do: none - -List all existing files that have been modified, and describe the changes: - pio_spmd_utils.F90.in - pio_support.F90.in - box_rearrange.F90.in - - Added code to replace MPI_RSEND, MPI_IRSEND with MPI_SEND and MPI_ISEND when _NO_MPI_RSEND is - defined. Made _USE_FLOW_CONTROL the default by replacing the macro with its negative _NO_FLOW_CONTROL - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - -Code reviewed by: self - -Summary of pre-tag testing: Full set of pretag testing completed on hopper. - - Regression tests: bluefire 74 PASS, 6 expected failures - hopper 74 tests PASS, 6 tests FAIL - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_11 -Originator(s): Jim -Date: 8/26/2011 -One-line Summary: Further improvement of calcdecomp.F90 - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - calcdecomp.F90 : After computing the optimal number of ioprocs we - adjust that number down to be a multiple of the last decomposed - dimension. This appears to be very robust, avoiding the edge cases - that all of these recent tags have tried to address. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: 74 PASS, 6 expected failures - lynx 74 PASS, 6 expected failures - edinburgh 22 PASS, 2 expected failures - In addition the startandcount serial test program was run on - bluefire, this program should be run following any subsequent - changes to calcdecomp.F90 - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_10 -Originator(s): Edwards -Date: 8/23/11 -One-line Summary: A robust solution for calcdecomp.F90 - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Calcdecomp.F90 - Added a correction for an unstanding edge - condition in this algorytm, tested hopefully thoroghly - buildconf.xml updated settings for netcdf on bluefire - - -List all component tags that were used to create pio tag: - -Describe the changes for each comdponent above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: 74 PASS, 6 expected failures - lynx 74 PASS, 6 expected failures - edinburgh 22 PASS, 2 expected failures - In addition the startandcount serial test program was run on - bluefire, this program should be run following any subsequent - changes to calcdecomp.F90 - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_9 -Originator(s): Edwards -Date: 8-12-2011 -One-line Summary: refactor calcdecomp again - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Calcstartandcount it is clear that the testsuite is not providing adequate - coverage for testing the validity of this algorythm. I keep finding issues - with cesm - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Self - -Summary of pre-tag testing: - - Regression tests: bluefire 74 PASS, 6 expected failures - lynx 74 PASS, 6 expected failures - edinburgh 22 PASS, 2 expected failures - - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_8 -Originator(s): Edwards -Date: 8-9-2011 -One-line Summary: Update Cacldecomp.F90 - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Corrected a further edge error in calcdecomp.F90, added startandcount - target to Makefile - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: bluefire 74 PASS, 6 expected failures - lynx 74 PASS, 6 expected failures - edinburgh 22 PASS, 2 expected failures - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_7 -Originator(s): Edwards -Date: 8-9-2011 -One-line Summary: Update Cacldecomp.F90 - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Calcdecomp.F90 was generating bad start and count arrays when - gdims(n) was smaller than the number of io tasks. - Algorythm was changed to resolve this and a unit test was added in case - of further issues. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: bluefire 74 PASS, 6 expected failures - lynx 74 PASS, 6 expected failures - edinburgh 22 PASS, 2 expected failures - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_6 -Originator(s): Edwards -Date: 8-4-2011 -One-line Summary: handle genf90.pl correctly - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - pio/Makefile - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: none - testing in cesm1_1_alpha02b - - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_5 -Originator(s): Edwards -Date: 8-4-2011 -One-line Summary: update autoconf and build stuff - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Index: testpio/testpio.F90 - Index: testpio/build_defaults.xml - Add code to use the AMPI compiler from the NCSA CHARM++ project, this - is not working yet. - Index: pio/m4/acx_mpi.m4 - Updated to work on cray systems with ftn and cc - Index: pio/m4/fortran.m4 - Updated to latest autoconf - Index: pio/alloc_mod.F90.in - Index: pio/alloc_mod.F90 - Added Debug code - Index: pio/pio_msg_callbacks.F90 - Added an impicit none statement - Index: pio/Makefile - Index: pio/config.h.in - Index: pio/Makefile.conf.in - Index: pio/configure.ac - Clean up and make configure and build work from other than the source directory - Index: pio/pio_types.F90 - Added null initialization for start and count variables - this is an F95 feature - Index: pio/piolib_mod.F90 - Fixed some issues in start and count variables of iodesc_t structure. -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: bluefire: 74 tests PASS, 6 tests FAIL(expected) - edinburgh: 20 pass 2 fail (expected) - lynx : test complete on lynx 74 tests PASS, 6 tests FAIL (expected) - - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_2 -Originator(s): Edwards -Date: 7-24-2011 -One-line Summary: Fix some autoconf and cesm issues - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Added some debug logging in alloc_mod.F90.in - Added null initialization of some pointers in pio_types.F90, this is a f95 issue. - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_3_1 -Originator(s): Edwards, Dennis -Date: 07/06/2011 -One-line Summary: update autoconf tools, remove getiostartandcount, - Add optimizations for lustre platform - -List all subroutines eliminated: - -List all subroutines added and what they do: - added m4 subdirectory with various autoconf tools - config.h.in - added calcdecomp as a replacement for getiostartandcount - -List all existing files that have been modified, and describe the changes: - configure.ac, Makefile.conf.in - Updated autoconf support - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: http://bugs.cgd.ucar.edu/show_bug.cgi?id=1372 - -Describe any changes made to scripts/build system: - configure.ac was brought up to compatability with autoconf 2.63 - a macros subdirectory was added with additional fortran and netcdf macros - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Edwards - -Summary of pre-tag testing: - - Regression tests: bluefire 80 pass, 0 fail - jaguar (pgi) 80 tests PASS, 0 tests FAIL - lynx_gnu 80 tests PASS, 0 tests FAIL - edinburgh (pgi) 22 pass 2 fail (expected) - - - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name:pio1_3_0 -Originator(s): Edwards, Dennis -Date: 05/04/2011 -One-line Summary: Code Clean up, box rearranger bug fixes - -List all subroutines eliminated: mct_rearrange.F90.in mct_rearrange.F90.in - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - testpio/build_defaults.xml - testpio/namelist_mod.F90 - pio/configure.in - pio/configure - pio/rearrange.F90.in - pio/pio_types.F90 - pio/Makefile - pio/pio.F90 - Remove support for mct rearranger - pio/piodarray.F90.in - Fix line return in string issue - pio/calcdisplace_mod.F90 - Fix issues with handling gaps (holes) in source data array - pio/piolib_mod.F90 - Remove calcdisplace code duplicated in calcdisplace_mod - pio/box_rearranger.F90.in - Avoid 0 sized array errors - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: Errors with handling holes in input arrays found with cam regression tests - -Describe any changes made to scripts/build system: - Removed references to mct in build - -Describe any substantial timing or memory changes: - - -Code reviewed by: Edwards - -Summary of pre-tag testing: Cam regressions on bluefire and edinburgh with xlf, pgi, lahey compilers - all pass - - bluefire 80 tests PASS, 0 tests FAIL - edinburgh (lahey) 22 pass 2 fail (expected) - jaguar (pgi) 80 pass, 0 Fail - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_2_7 -Originator(s): John Dennis -Date: 04-25-2011 -One-line Summary: Significant reduction in memory usage due to changes in the way MPI data structures are used in the box rearranger. - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: cam pretag testing on bluefire and edinburgh(PGI) all pass - - Regression tests: bluefire (AIX) 80 PASS, 0 FAIL - edinburgh (lahey) 22 pass 2 fail (expected) - lynx (pgi) 80 pass, 0 Fail - frost (BGL) 74 pass, 6 fail (expected) - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_2_6 -Originator(s): Jim -Date: 11-19-2010 -One-line Summary: Fix problems with single task run, cleanup mpi_info_set calls - make start and count args static - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: cam pretag testing on bluefire and edinburgh(PGI) all pass - - Regression tests: bluefire (AIX) 113 PASS, 0 FAIL - edinburgh (lahey) 22 pass 2 fail (expected) - lynx (pgi) 80 pass, 0 Fail - frost (BGL) 74 pass, 6 fail (expected) - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name:pio1_2_5 -Originator(s): Jim -Date: 11-17-2010 -One-line Summary: Fixed problems with single task (mpi-serial) runs. - Improved lynx port - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - pio/piolib_mod.F90 force rearranger to none when total tasks = 1 - testpio/Utils.pm added module support for lynx - - -List all component tags that were used to create pio tag: - MCT2_7_0_100228-mpiserial101109_tag02 -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: lynx 80 PASS 0 FAIL - bluefire 107 PASS, 0 FAIL - frost 74 PASS, 6 FAIL (expected failures) - edinburgh 22 PASS, 2 FAIL (expected failures) - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_2_4 -Originator(s): Jim -Date: 11-15-2010 -One-line Summary: Port to mirage, mpi-serial - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Some compilers export symbols included with include 'mpif.h even though - the module is default private, fixed by using only clause in pio_types - include statements - -List all component tags that were used to create pio tag: - MCT2_7_0_100228-mpiserial101109_tag02 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - added test for mirage1 - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: CESM lnd serial build - - Regression tests - test complete on mirage1 11 tests PASS, 2 tests FAIL (new system mpiserial intel compiler) - Jaguarpf 80 tests pass - frost 74 PASS, 6 FAIL (expected failures) - bluefire - edinburgh 22 pass, 2 fail (expected failures) - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_2_3 -Originator(s): Jim -Date:11-12-2010 -One-line Summary: bug fix in get_attname - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Added an internal variable of PIO_MAX_NAME length to get att names -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: cesm1_0_beta11 pretag testing on bluefire - - Regression tests: - Edinburgh (lahey) 22 tests pass 2 fail (expected async tests) - Bluefire 107 tests pass - Jaguarpf 70 tests pass - - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_2_2 -Originator(s): Jim -Date: 11-11-2010 -One-line Summary: minor bug fixes - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - nf_mod.F90 : modified pio_def_var to avoid a bounds check error from - the pgi compiler - piolib_mod.F90 : Removed NEED_MPI_ROOT ifdef and replaced with - NO_MPI2 and MPISERIAL wrappers - pio_support.F90 : fixed bug in variable declaration (missing ::) - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: cam stand alone and cesm tests - - Regression tests: - Edinburgh (lahey) 22 tests pass 2 fail (expected async tests) - Bluefire 107 tests pass - Jaguarpf 70 tests pass - frost 74 tests pass 6 fail (expected async tests) - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_2_1 -Originator(s): Jim, Pat Worley -Date: 11-04-2010 -One-line Summary: bug fixes - -List all subroutines eliminated: - -List all subroutines added and what they do: - camlike_decomp_generator: Adds a cam type deconfiguration generator for testing and performance studies - - -List all existing files that have been modified, and describe the changes: - Index: testpio/testpio_build.pl - Index: testpio/testpio.F90 - Index: testpio/testpio_run.pl - Index: testpio/build_defaults.xml - Index: testpio/testpio_bench.pl - Index: testpio/namelist_mod.F90 - Index: testpio/gdecomp_mod.F90 - Index: testpio/Utils.pm - Added support for a realistic cam-like decomp, added a new host edinburgh_openmpi, bug fix for - mpich-1 on edinburgh (MPI_ROOT is not defined in mpif.h) - Updated intrepid flags to use netcdf-4 - - Index: pio/aclocal.m4 - Index: pio/configure - Index: pio/configure.ac - Fixed a bug in getting mpif90 from the environment - - Index: pio/piolib_mod.F90 - Index: pio/ionf_mod.F90 - Improved Debug print statements, code clean up - - Index: pio/nf_mod.F90 - Renamed some variables which had the same name as the fortran len intrensic - - Index: pio/box_rearrange.F90.in - Index: pio/box_rearrange.F90 - Minor change in flow control options (Pat) - - alloc_mod - piolib_mod - pio_support - Removed references to mpi_comm_world - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: - cam regressions on bluefire - Regression tests: - bluefire 107 tests PASS, 0 tests FAIL - jaguar 80 tests PASS, 0 tests FAIL - intrepid 60 tests PASS, 5 tests FAIL - Support for async on bluegene is incomplete in this tag. - test complete on edinburgh 22 tests PASS, 2 tests FAIL - snet asb01 FAIL - snet asb04 FAIL - These are expected failures due to mpich not - supporting MPI-2 intercommunicators - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_2_0 -Originator(s): Jim -Date: 10-14-2010 -One-line Summary: Support for disjoint IO tasks - -List all subroutines eliminated: - -List all subroutines added and what they do: -A + - pio_msg_callbacks.F90 -A + - pio_msg_getput_callbacks.F90.in - The subroutines in these files are the interface between the disjoint io tasks - and the pio functions, to avoid circular dependencies they are not written in - module form. Each subroutine collects the input arguments from the compute - tasks and calls the requested function. Communication of function outputs back to the - compute tasks happens within the function so no post call clean up is required. - -A + - pio_msg_mod.F90 - This module contains the loop in which the disjoint io tasks wait for requests from - compute tasks. Multiple compute communicators with distinct iosystem descripters are - supported. - - -A + - dtypes.h - Move type macros to an include file to be shared by multiple pio internal files - - -List all existing files that have been modified, and describe the changes: - pionfwrite_mod.F90.in - piodarray.F90 - pio_utils.F90 - pionfput_mod.F90.in - pio_types.F90 - piolib_mod.F90 - pionfget_mod.F90.in - Makefile - pio_support.F90 - box_rearrange.F90.in - pio.F90 - pio_support.F90.in - pionfatt_mod.F90.in - ionf_mod.F90 - piodarray.F90.in - nf_mod.F90 - genf90.pl - mct_rearrange.F90 - - Add support for disjoint io tasks. Add tests for disjoint tasks to testsuite. - Improve support for netcdf4. - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: Added new files to build - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_1_2 -Originator(s): Jim -Date: 9-27-2010 -One-line Summary: getiostartandcount fix - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - piolib_mod.F90: removed fanning functionality in getiostartandcount - this feature was not robust and was failing to converge in a number of cases. - Fixed a case which allowed division by 0 if count(1)==0. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - -Code reviewed by: self, Tony - -Summary of pre-tag testing: - - Regression tests: - bluefire 107 tests PASS, 0 tests FAIL - jaguar 74 tests PASS, 0 tests FAIL - edinburg 22 tests PASS, 0 tests FAIL - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_1_1 -Originator(s): Jim -Date: 05-26-2010 -One-line Summary: fix for mpi-serial build - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - piolib_mod.F90 correct #enddef placement. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - updated jaguar modules -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: - Regression tests: bluefire 107 tests PASS, 0 tests FAIL - jaguar 80 tests PASS, 0 tests FAIL - test complete on edinburgh 22 tests PASS, 2 tests FAIL - snet asb01 FAIL - snet asb04 FAIL - These are expected failures due to mpich not - supporting MPI-2 intercommunicators - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_1_0 -Originator(s): Jim -Date: 05-20-2010 -One-line Summary: code cleanup, improved netcdf4 performance - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - updated optional timing library - testpio.F90 - piolib_mod.F90 - topology.c - namelist_mod.F90 - added BGP optimizations - build_defaults.xml - Utils.pm - testpio_run.pl - testpio_bench.pl - added support for intrepid bgp platform - - testpio_run.pl - added -twopass option to allow for compiling on the - front end before submitting. Default on Jaguar. - check_mod.F90 - improved error message - configure.ac - added HAVE_MPI flag needed by timing - pionfput_mod.F90.in - fixed a bug that created a potential race - condition in the pnetcdf interface - piolib_mod.F90 - Explicitly read input arguments on task 0 and ignore - others in pio_init, pio_openfile and pio_createfile - calls. - pio_kinds.F90 - Increase default string length - box_rearrange.F90.in - Moved stdout info to debug mode only - ionf_mod.F90 - Improved error handling - - - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - bluefire 107 tests PASS, 0 tests FAIL - edinburgh 22 tests PASS, 0 tests FAIL - edinburgh_intel 22 tests PASS, 0 tests FAIL - intrepid - jaguarpf - - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_18 -Originator(s): John -Date: 03-19-2010 -One-line Summary: Reworked testpio execution scripts - -List all subroutines eliminated: - -List all subroutines added and what they do: - - * runString creates a 'mpirun' string for a particular system (Utils.pm) - * submitString creates a 'qsub' string for a particular system (Utils.pm) - -List all existing files that have been modified, and describe the changes: - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: pgi compiler could not open large files with direct access thus causing ccsm restart failures - -Describe any changes made to scripts/build system: - - Reworked the testpio execution scripts such that the number of cores is now a - command line arguement. Added preliminary support for columbia - -Describe any substantial timing or memory changes: - -Code reviewed by: self & Jim - -Summary of pre-tag testing: - - Regression tests: - bluefire 107 tests PASS, 0 tests FAIL - edinburgh 22 tests PASS, 0 tests FAIL - edinburgh_intel 22 tests PASS, 0 tests FAIL - frost 74 tests PASS, 0 tests FAIL - kraken 74 tests PASS, 0 tests FAIL - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_17 -Originator(s): Jim -Date: 03-18-2010 -One-line Summary: Remove file detection subroutine - -List all subroutines eliminated: check_file_type - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: pgi compiler could not open large files with direct access thus causing ccsm restart failures - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - ccsm regression on jaguar - bluefire 107 tests PASS, 0 tests FAIL - edinburgh 22 tests PASS, 0 tests FAIL - edinburgh_intel 22 tests PASS, 0 tests FAIL - frost 75 tests PASS, 0 tests FAIL - jaguar 75 tests PASS, 0 tests FAIL - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_16 -Originator(s): John -Date: 03-10-2010 -One-line Summary: Updates to testing infrastructure - -List all subroutines eliminated: - -List all subroutines added and what they do: - - - Added file config_bench.xml which describes several different - benchmarks which can be run. - - Added fine testpio_bench.pl which executes a benchmark using - a single command. - -List all existing files that have been modified, and describe the changes: - - - Minor cleanup of memory usage in testpio - - Moved some host specific code into Utils.pm - - Marked code in the perl scripts that are host specific - - Allowed the use of negative numIOprocs in namelist for BGL - - Removed the b14 benchmark. This was only added as experimental - code and it exceeds memory on Blue Genel/L when running on - 64 MPI tasks. - - Removed the 'bench' testsuite. This was only added as - experimental code. - - Added support for Kraken in PIO testsuite - - Added script support in tesetpio_run.pl on Frost. - - Turned on flow-control as default mode on frost - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - Minor cleanup of memory usage in testpio - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - bluefire 107 tests PASS, 0 tests FAIL ** - frost 74 tests PASS, 0 tests FAIL ** - kraken 74 tests PASS, 0 tests FAIL - CAM regression - ccsm pretags - - ** Note that reduction in the number of tests is the result of removing the 'bench' testsuite - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_15 -Originator(s): Jim -Date: 03-03-2010 -One-line Summary: unfix of pionfread_mod.F90 - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - bluefire 119 tests PASS, 0 tests FAIL - edinburgh 22 tests PASS, 0 tests FAIL - edinburgh_intel 22 tests PASS, 0 tests FAIL - frost 75 tests PASS, 0 tests FAIL - jaguar 75 tests PASS, 0 tests FAIL - ccsm pretags - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_13 -Originator(s): Jim -Date: 3-2-2010 -One-line Summary: fix bug in internal start and count array lengths - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - pionfread_mod.F90.in - pionfwrite_mod.F90.in - pionfput_mod.F90.in - internal start and count arrays were dependent on the lengths of - the start and count arrays passed in. They should instead be dependent - on the number of dimensions of the variable being read or written. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - bluefire 119 tests PASS, 0 tests FAIL - edinburgh 22 tests PASS, 0 tests FAIL - edinburgh_intel 22 tests PASS, 0 tests FAIL - frost 75 tests PASS, 0 tests FAIL - jaguar 75 tests PASS, 0 tests FAIL - all cam pretag tests on bluefire - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_12 -Originator(s): Jim, John -Date: 02-35-2010 -One-line Summary: bug fixes based on cam tests (future tags should be made after running cam regression) - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - piolib_mod : corrected assumptions about number of decomp dimensions and number of tasks in io. - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: - bluefire 119 tests PASS, 0 tests FAIL - edinburgh 22 tests PASS, 0 tests FAIL - edinburgh_intel 22 tests PASS, 0 tests FAIL - frost 75 tests PASS, 0 tests FAIL - jaguar 75 tests PASS, 0 tests FAIL - all cam pretag tests on bluefire, edinburgh and calgary. - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_11 (No changelog entry was made for pio1_0_10) -Originator(s): Jim, John -Date: 02-24-2010 -One-line Summary: Blue Gene specific optimization - -List all subroutines eliminated: - Removed the Makefile.conf.bluefire and changed testpio_run.pl so that configure is - run using the bash shell on bluefire. (ksh is slow) - -List all subroutines added and what they do: - - PIO_getnumiotasks: This returns the actual number of IO tasks - used by PIO. - - nextlarger: Calculates the next larger value which even divides an input arguement. - - - determineiotasks: A special routine only for Blue Gene which will load balance - io-tasks accross IO-nodes - - identity: A Blue Gene specific routine which prints out node identity - -List all existing files that have been modified, and describe the changes: - - pio/configure.ac - - Added CFLAGS to configure stdout - - pio/pio_types.F90 - - Moved ioranks array into type declaration. This is a - necessary generalization because we no longer have even - strides for the IO ranks. - - pio/piolib_mod.F90 - - getiostartandcount has been generallized the provide more - functionality including: - - - Block decomposition in the z-dimension - - A fan-in/out limiter for the decomposition creation loop - - PIO_inidecomp_dof has been modified to call determineiotasks - if on a Blue Gene system - - add BGP and BGL ifdef - - pio/Makefile - - Includes the Blue Gene specific topology.c file - - pio/box_rearrange.F90.in - - Removed assumption about a fixed io-stride in the box rearranger - - pio/pio.F90 - - exported public PIO_getnumiotasks subroutine - - - testpio/testpio_build.pl - - Exported ALLCFLAGS - - - testpio/tstpio.F90 - - Added test which only allocates arrays if they are used by the code. - - Added the option to do 2-phase pass to measure performance. The - first phase writes out all files, while the second phase will read all - files. This 2-phase approach does a better job of filtering out the - impact of read-caching on performance measurements. - - Fixed the diagnostic which prints out the location of the maximum memory usage - - testpio/testpio_run.pl - - Added the new testsuite 'bench' - - testpio/build_defaults.xml - - updated the version of pnetcdf on frost - - testpio/Utils.pm - - Added some module commands for kraken - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - Blue Gene specific optimizations that balance the number of io-tasks to io-nodes. - - Added test which only allocates arrays if they are used by the code. - - Added the option to do 2-phase pass to measure performance. The - first phase writes out all files, while the second phase will read all - files. This 2-phase approach does a better job of filtering out the - impact of read-caching on performance measurements. - -Code reviewed by: John, Jim - -Summary of pre-tag testing: - bluefire 119 tests PASS, 0 tests FAIL - edinburgh 22 tests PASS, 0 tests FAIL - edinburgh_intel 22 tests PASS, 0 tests FAIL - frost 75 tests PASS, 0 tests FAIL - jaguar 75 tests PASS, 0 tests FAIL - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_9 -Originator(s): Jim, Pat Worley -Date: -One-line Summary: handshaking for cray - -List all subroutines eliminated: - -List all subroutines added and what they do: pnetcdf_version_check - check that the pnetcdf version is new enough for pio - -List all existing files that have been modified, and describe the changes: - - pio_spmd_utils.F90.in - - Minor clean-up. - - pio_support.F90.in - - Introduced of a new routine PIO_FC_GATHER_INT - - Replaced call to MPI_GATHER in pio_writedof with a call to PIO_FC_GATHER_INT - - Added handshaking messages to point-to-point gather-like operation in pio_writedof, enabled by the CPP flag _USE_FLOW_CONTROL. Without this flag, the original logic is executed. - - I do not know if the original MPI_GATHER call was causing - problems, but gathers have become a red flag to me. If the CPP - flag _USE_FLOW_CONTROL is not defined, then PIO_FC_GATHER_INT - defaults to a call to MPI_GATHER. Defining _USE_FLOW_CONTROL - enables the point-to-point implementation with flow control. I - originally put this new routine in pio_spmd_utils, but circular - module dependencies forced me to put it here. - - box_rearrange.F90.in - - - Added the following to the module preamble: - -#ifdef _USE_FLOW_CONTROL -#define _USE_COMP2IO_FC 1 -#define _USE_IO2COMP_FC 1 -#define _USE_CREATE_FC 1 -#endif - - - Minor clean-ups: renamed USESPACE to be _USE_SPACE, introduced more consistent spacing in subroutine calls, assignments, etc. - - - Introduced replacement implementations of the routines BOX_REARRANGE_CREATE and COMPUTE_COUNTS when _USE_FLOW_CONTROL is defined. - - The new BOX_REARRANGE_CREATE uses PIO_FC_GATHER_INT. The new - COMPUTE_COUNTS uses SWAPM. The memory requirements are similar to - the USESPACE branch in the original code, which is the default as - far as I can tell. (The comments are a little confusing.) The - performance of the MPI communication logic should be reasonable in - the new implementations. Any extra cost will come from the memory - requirements (allocate/assignment/deallocate). Since this is - similar to what is used in USESPACE, I doubt that we will notice - any difference. - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - bluefire 107 tests PASS, 0 tests FAIL - jaguarpf 74 tests PASS, 0 tests FAIL - edinburgh (intel compiler) 22 tests PASS, 0 tests FAIL - edinburgh (lahey compiler) 22 tests PASS, 0 tests FAIL - frost 74 tests PASS, 0 tests FAIL - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_8 -Originator(s): Jim -Date: 02-02-10 -One-line Summary: add netcdf4 backend - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: -M 58 pionfwrite_mod.F90.in -M 58 pionfput_mod.F90.in -M 58 pionfatt_mod.F90.in -M 58 piodarray.F90.in -M 58 pionfget_mod.F90.in -M 58 pionfread_mod.F90.in -M 58 pio_types.F90 -M 58 pio.F90 -M 58 ionf_mod.F90 -M 58 nf_mod.F90 -M 58 pio_utils.F90 -M 58 piolib_mod.F90 -M 58 pio_spmd_utils.F90 -M 58 configure.ac -M 58 aclocal.m4 - Added support for netcdf4, two new iotypes are defined PIO_IOTYPE_NETCDF4C and PIO_IOTYPE_NETCDF4P - for compressed and parallel options of netcdf4. Files opened for read should open in the correct - format - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - Added tests for netcdf4 on bluefire - - -Describe any substantial timing or memory changes: - - - -Code reviewed by: Jim - -Summary of pre-tag testing: - bluefire: 107 tests PASS, 0 tests FAIL - frost : 74 tests PASS, 0 tests FAIL - dublin : 22 tests PASS, 0 tests FAIL - Regression tests: Tested in cam - - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_7 -Originator(s): Jim -Date: 1-12-10 -One-line Summary: fix piolib_mod.F90 cpp macro bug - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - piolib_mod.F90 corrected case of cpp macros - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: cam test and bluefire - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio1_0_6 -Originator(s): Jim & John -Date: 01-08-2010 -One-line Summary: Moved repository to google code, changed revision naming scheme -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - Removed the mct requirement - Removed OPT flags added by default in configure, no OPT flags are - included unless explicitly added by the user. - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim and John - -Summary of pre-tag testing: - - Regression tests: Bluefire, frost, dublin (pgi and intel) all tests pass - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio60_prod -Originator(s): Jim -Date: 12-07-2009 -One-line Summary: Fix problems when using lahey compiler, fix last two - problems in testsuite. - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - - pionfwrite_mod.F90.in : Removed irecv and replaced with recv - irecv waitall combination was failing on dublin with lahey compiler - no explaination was found. - alloc_mod.F90.in : Changed variable named size to varlen to avoid conflict - with intrensic of same name. - piolib_mod.F90 : Modified GenIndexedBlock to allow for 0 length writes - thus allowing tests bn01 and bn03 to pass. - pio_support.F90.in: Removed a #ifdef USEMPIIO that was unneeded. - - genf90.pl : Modified to print line numbers at the beginning of routines - instead of the end. This should make line numbers from compilers and - debuggers more accurate. - iompi_mod.F90.in : - - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - fixed the bn01 and bn03 tests. - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - all 74 tests PASS on bluefire, frost and Jaguar - all 22 tests PASS on dublin using lahey and intel compilers. - - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio59_prod -Originator(s): Jim -Date: 10-20-2009 -One-line Summary: bug fix in pio_get_var - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - Renamed functions in nf_mod to avoid redirection, this was done for the intel compiler - Updated ChangeLog_template - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - -Bugs fixed: MPI_Bcast was incorrect when number of dimensions on file - was greater than number of dimensions in variable. - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: bluefire 70 of 74 - cam regression - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio58_prod -Originator(s): Jim -Date: 10-19-2009 -One-line Summary: Extend support for netcdf - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - Bluefire 70 of 74 - cam regressions - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio57_prod -Originator(s): Jim, Pat Worley -Date: 10-12-2009 -One-line Summary: Fix bug in put_var_text, remove definition of MPI_INFO_NULL - defined in mpi_serial library. Change logic of handshaking to avoid deadlock - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -pionf_mod.F90.in fixed a bug that caused text writes to fail if the text variable included the netcdf unlimited dimension - -genf90.pl fixed preprocessor line numbering to refer to correct .in - file line numbers. Debuggers and core files should now point back to - the correct source line. - -pio_spmd_utils.F90.in : Changed send to isend in handshaking algorythm to avoid potential deadlock should mpi buffers become filled. - -pio_types.F90 removed definition of MPI_INFO_NULL when compiled with mpi-serial, mpi-serial already defines this variable. - - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: Self - -Summary of pre-tag testing: - testpio was run on bluefire and frost - cam regression tests run on bluefire, calgary and dublin - homme interpolation tested - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio56_prod -Originator(s): Jim -Date: 10-08-09 -One-line Summary: Fixed a bug in mpi_free_info - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - -Bugs fixed: mpi_free_info was being called on an undefined mpi_info handle - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: bluefire passes 70 of 74. - cam regression test. - - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio55_prod -Originator(s): Jim -Date: 09-29-09 -One-line Summary: New test scripts. Added support for multidimensional darrays. - New documentation - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - There is no longer a need to flatten multidimensional arrays when calling - pio_read_darray and pio_write_darray. - -Bugs fixed: - -Describe any changes made to scripts/build system: testpio_build.pl and testpio_run.pl - replace individual csh scripts. - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: Tested on bluefire, frost, dublin, athena, jaguar - 70 tests now pass and 4 fail - (bn01 and bn03). - Tests were also run with the cam testsuite. - - Regression tests: - - -============================================================== - -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio54_prod -Originator(s): J. Dennis -Date: 9-18-09 -One-line Summary: Added PIO_finalize method. Reworked some of the logic checks in PIO_freedecomp. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: Bluefire test suite (12 failures expected) - Kraken test suite (12 failues expected) - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio53_prod -Originator(s): Jim -Date: 9-11-09 -One-line Summary: Rearranged structure of testpio so that memory leaks would be easier to detect. - Added code to pio_freedecomp to deallocate mpi_types created in initdecomp. - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: Bluefire test suite (12 failures expected) - cam test suite on bluefire - Regression tests: - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio52_prod -Originator(s): Jim -Date: 09-02-2009 -One-line Summary: Additional options for testpio, reduce number of switchs to indep mode - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Added a new variable to the testpio_in namelist: nvars - which controls the number of variables written to the r4 and r8 files - also added an unlimited dimension to test file variables - moved setframe calls in testpio - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: bluefire test suite (12 failures expected) - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio51_prod -Originator(s): Jim -Date: 07-20-2009 -One-line Summary: bug fix in piolib_mod (mpi_sum should have been mpi_max). - unmask error flag in nf_mod - netcdf f90 interface bug workaround in pionfputmod.F90 - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: Cam regressions (all pass). bluefire test suite (12 failures expected) - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio50_prod -Originator(s): Jim, Tony, Mariana, John -Date: 7-1-2009 -One-line Summary: bug fix in piolib_mod to accept base=0. - fixed intel compiler line length problem - fixed getiostartandcount for large processor counts - fixed problems and improved testing on bluegene platforms - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_5_1_080912 - timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_081221 - testpio/perl5lib https://svn-ccsm-models.cgd.ucar.edu/perl5lib/trunk_tags/perl5lib_090613/ - - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - -Bugs fixed: - -Describe any changes made to scripts/build system: - Began working on improved test scripts using xml and perl - this is a - work in progress, your contribution would be welcome. - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: - - Regression tests: - bluefire: 62 PASS, 12 FAIL - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio49_prod -Originator(s): Jim -Date: 06-11-2009 -One-line Summary: Improved boxrearranger performance - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_5_1_080912 - timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_081221 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - Disabled timing tests in frost test suite - the timing library needs a few changes - to work on frost - Replaced getiostartandcount with an algorithm provided by Tony, this is doing a better - job of decomposing into optimal strips - Relaxed an error condition in box_rearranger that required the size of the comp location - to be equal to the size of the dof. We now require that the comp location is >= the - size of the dof. - Added a cpp instruction to allow a build with pnetcdf but without netcdf to work on a single - processor. This reduces the number of expected failures from 13 to 12. - - Changed cpp formating in box_rearranger.F90.in to satisfy fussy cpp in intel compiler. - -Bugs fixed: - -Describe any changes made to scripts/build system: - Disabled timing tests in frost test suite - the timing library needs a few changes - to work on frost - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim, Mariana - -Summary of pre-tag testing: testpio suite on bluefire and frost - cam testsuite on bluefire and calgary - - Regression tests: - bluefire: 62 PASS, 12 FAIL - frost : - -=============================================================== - - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio48_prod -Originator(s): John -Date: 06-05-09 -One-line Summary: Added support for benchmark and testsuite on frost - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_5_1_080912 - timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_081221 - - Added support for the benchmark and testsuite on frost. - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: - A total of 50 passed and 24 failures. - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio47_prod -Originator(s): Jim, Pat Worley -Date: 05-28-09 -One-line Summary: Added communications options to box_rearranger - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_5_1_080912 - timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_081221 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - There are a number of options to be exercised, - and exercising these is not yet very convenient. - The options can be set in the calls to - - box_rearrange_comp2io_{TYPE} - box_rearrange_io2comp_{TYPE} - - as optional parameters. As the calls to these routine - don't do this currently, you will need to add defines - to the Makefile and/or modify the code in order - to enable the different options. - - 1) Out of the box, the original cam3_6_39 algorithms - are used. - - 2) Compiling with - - -D_USE_ALLTOALLW - - (after deleting box_rearrange.o :-) ) - - will use the MPI_ALLTOALLW branch that - Ray put in originally. Note that this code is always - compiled. The ifdef now simply sets the option - variable and a runtime if-test selects calling - MPI_ALLTOALLW . - - 3) Compiling with - - -D_USE_COMP2IO_FC - - (and not compiling with -D_USE_ALLTOALLW and - deleting box_rearrange.o)) will use the - pio_swapm routine in box_rearrange_comp2io_{TYPE} . - - Similarly, compiling with - - -D_USE_COMP2IO_FC - - will use the pio_swapm routine in box_rearrange_io2comp_{TYPE} . - - a) The default for this is to use handshaking, use MPI_SEND (not MPI_ISEND), - and set the throttle on the number of nonblocking requests to 64. - - b) If you want to try one of the other options, modify the defaults at the - top of box_rearrange.F90 as follows: - - enabling/disabling handshaking (true/false) - #define DEF_P2P_HANDSHAKE .true. - - enabling/disabling using MPI_ISEND (true/false) - #define DEF_P2P_ISEND .false. - - setting/disbling the throttle (>0, -1) - #define DEF_P2P_MAXREQ 64 - - and recompile. - - - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: - Passed all expected tests on bluefire and jaguar (13 expected failures on each) - Passed cam regression tests - -=============================================================== -=============================================================== - -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio46_prod -Originator(s): John -Date: 05-22-09 -One-line Summary: Bug fixes to benchmark code - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_5_1_080912 - timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_081221 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - * Reduced the amount of memory used in gdecomp_DOF subroutine - * Added several new benchmark configurations - * Addressed bug in decomposition calculation algorithm - * Added support for full specification of IO decomposition - using namelist - * Added output of maximum memory across processors in testpio code - - - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: kraken tests completed - there are 13 tests expected to fail on kraken and 61 pass. - -=============================================================== - -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio45_prod -Originator(s): Jim, Pat Worley -Date: 05-19-09 -One-line Summary: Changed send recv algorythm in pionfwrite_mod, minor bug fixes - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_5_1_080912 - timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_081221 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - Added handshaking to make sure that irecv is posted prior to send in - communications for netcdf output method in pionfwrite_mod. - Replaced mpi_int with mpi_integer. - - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: bluefire tests completed - there are 13 tests expected to fail on bluefire and 61 pass. - cam regression suite completed - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio44_prod -Originator(s): Jim -Date: 05-15-09 -One-line Summary: Rewrite getiostartandcount routine in piolib_mod.F90 - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_5_1_080912 - timing https://svn-ccsm-models.cgd.ucar.edu/timing/trunk_tags/timing_081221 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - A complete rewrite of thegetiostartandcount routine includes 3 options for - how arrays should be decomposed onto the io processors plus allows the - user to specify start and count arrays at the application level. - The initdecomp interface therefor has three additional optional parameters - - initDecomp_1dof_nf_box(Iosystem,basepioTYPE,dims,compDOF, IOdesc, start, count, method_in) - - where start and count are of type PIO_OFFSET and have the same number of dimensions as dims - and method_in is an integer with value 1 (inner dim first) ,2 (outer dim first), or 3 (largest to smallest) - If either start or count is provided both must be and method_in is ignored. - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: bluefire tests completed - there are 13 tests expected to fail on bluefire and 61 pass. - cam regression suite completed - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio43_prod -Originator(s): Jim -Date: May 11, 2009 -One-line Summary: PGI compiler doesnt like 0 sized arrays. - - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: testpio run on bluefire - - Regression tests: cam tests on bluefire and calgary(lahey and pgi) - - -============================================================================================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio42_prod -Originator(s): Jim -Date: May 4, 2009 -One-line Summary: Fixed problem when size of field output with parallel mechanism write_darray was smaller than the number of io procs. Added a minimum count per io proc - currently set to 16. - - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - - -=============================================================== - -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio41_prod -Originator(s): Jim -Date: May 1, 2009 -One-line Summary: fixed a configure error: -D_MPISERIAL define was missing - - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: bluefire all pass - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: pio40_prod -Originator(s): Jim -Date: May 1, 2009 -One-line Summary: Added pio_64bit_offset flag, --enable-mpiserial forces --disable-pnetcdf - - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: bluefire all pass - - -=============================================================== -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** -=============================================================== - -Tag name: pio39_prod -Originator(s): tcraig -Date: April 30, 2009 -One-line Summary: Comment out 64BIT_OFFSET in nf_mod.F90 - -- commented out use of the 64BIT_OFFSET hardwired setting - in nf_mod.F90, createfile. - -=============================================================== -Tag name: pio38_prod -Originator(s): Jim -Date: April 17, 2009 -One-line Summary: Fixed generated files, clarified procedure for commiting - -=============================================================== - -Tag name: pio37_prod -Originator(s): tcraig -Date: 03/20/2009 -One-line Summary: add Makefile.conf.bluefire for ccsm build - -add Makefile.conf.bluefire for ccsm build. has no effect on -standalone implementation. - -=============================================================== - -Tag name: pio36_prod -Originator(s): Jim -Date: 03/20/2009 -One-line Summary: minor bug fix and sanity check - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: Subname variable was undeclared in mct_rearrange_init - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: cam regression tests - - -=============================================================== - -=============================================================== - -Tag name: pio35_prod -Originator(s): tcraig -Date: 03/05/09 -One-line Summary: merge testdev11_pio34_prod to trunk - - - Add timing lib - - Update tests - - Update error checking - - Update testpio scripts - - and more... - -=============================================================== - -Tag name: pio34_prod -Originator(s): tcraig -Date: 03/05/09 -One-line Summary: pio bug fixes - - - Fix pio_types bug for MPIIO builds only in pio_types.F90 (jedwards) - - Fix xl__trbk calls in pio_support.F90.in (jedwards) - - Fix argument attributes on dof in pio_support.F90.in (jedwards) - - Fix use of pio_nofill and pio_unlimited in ifdef in pio.F90 (jedwards) - - Fix fill value bug in piodarray.F90.in (tcraig) - -=============================================================== - -Tag name: pio33_prod -Originator(s): tcraig -Date: 02/06/09 -One-line Summary: testpio update, pio update - - - Fix a couple pio bugs (jedwards) - - Add pio_readdof, pio_writedof subroutines in pio_support - - Update build and run scripts for bluefire, jaguar, - franklin, kraken, and intrepid. - - Update suite of tests to better reflect capabilities - - Update testpio to add readdof and writedof capability - - Update README - - Extend testpio logic wrt test capabilities - -=============================================================== - -Tag name: pio32_prod -Originator(s): tcraig -Date: 01/30/09 -One-line Summary: Merge testdev07_pio31_prod to trunk - - - Update testpio - - Update pio for fixes to bin/rearrange mode (jd) - -testpio.bluefire.run -testpio_suite.bluefire.run -testdecomp.bluefire.run - -all run to completion on bluefire - -=============================================================== - -Tag name: pio31_prod -Originator(s): Jim Edwards -Date: 01/30/09 -One-line Summary: Minor bug fix, update configure for mpiio - - pio/configure - pio/configure.ac - pio/piolib_mod.F90 - -=============================================================== - -Tag name: pio30_prod -Originator(s): Jim Edwards -Date: 01/30/09 -One-line Summary: Minor bug fixes for Lahey compiler - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: - -Summary of pre-tag testing: cam regression on bluefire and calgary - - Regression tests: - - -=============================================================== - -=============================================================== - -Tag name: pio29_prod -Originator(s): John Dennis -Date: 01/21/09 -One-line Summary: Exported more symbols need by POP - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio28_prod -Originator(s): Jim -Date: 12/16/08 -One-line Summary: Changed configure to not try to compute sizeof when --host is specified - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio27_prod -Originator(s): Jim, Ray -Date: 12/10/08 -One-line Summary: configuration changes - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_5_1_080912 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - Reconfigured to build as a standalone library in CCSM build. - -Bugs fixed: - -Describe any changes made to scripts/build system: - configuration tools moved to pio subdirectory to allow exporting - pio lib without test directory and externals - -Describe any substantial timing or memory changes: - -Code reviewed by: Jim - -Summary of pre-tag testing: tested using cam test suite on bluefire and calgary - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio26_prod -Originator(s): Jim -Date: 10/20/08 -One-line Summary: Minor bug fixes - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: tested with homme/cam on bluefire and calgary - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio25_prod -Originator(s): Jim -Date: 10/03/08 -One-line Summary: Port to lf95 6.1 compiler - -List all component tags that were used to create pio tag: MCT2_5_1_080912 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: me - -Summary of pre-tag testing: tested against cam on bluefire and bangkok - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio24_prod -Originator(s): Jim -Date: 8/28/08 -One-line Summary: mem leak fix, updates to pio unified interface - -List all component tags that were used to create pio tag: MCT2_5_1_080522 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: me - -Summary of pre-tag testing: tested on bluefire and with cam and homme interfaces - - Regression tests: - - -=============================================================== - -=============================================================== - -Tag name: pio23_prod -Originator(s): Jim -Date: 4/24/08 -One-line Summary: Added line-number generation to code generator - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: tested on aix and with cam - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio22_prod -Originator(s): Jim Edwards -Date: 4/14/08 -One-line Summary: Moved F90 generation from cpp to perl script genf90.pl - -List all component tags that were used to create pio tag: - mct https://svn-ccsm-models.cgd.ucar.edu/mct/trunk_tags/MCT2_4_2_071026 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - This change reduces duplication of code and reduces reliance on CPP - which can be a bit finicky. - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: Tested on AIX - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio21_prod -Originator(s): Jim Edwards -Date: 4-9-2008 -One-line Summary: Consolidate all exports into a single pio module - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: tested on blueice - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio20_prod -Originator(s): Jim Edwards -Date: 3-26-2008 -One-line Summary: bug fixes for pnetcdf interface - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: text variables were not handled correctly in nf_put and nf_get - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: me - -Summary of pre-tag testing: tested on blueice - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio18_prod -Originator(s): Jim Edwards -Date: 03-20-2008 -One-line Summary: minor updates to netcdf attributes and put/get functions - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: none - -Describe any substantial timing or memory changes: none - -Code reviewed by: me - -Summary of pre-tag testing: tested against cam on blueice - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio16_prod -Originator(s): John Dennis -Date: 03-14-2008 -One-line Summary: Improved decomposition support in testcode. -A testpio/factor_mod.F90 - - Several factoring need by the new decompostion algorthm in testcode - -M testpio/Makefile - - Added the source file factor_mod.F90 to build - -M testpio/check_mod.F90 - - Eliminated excessive error messages generation - -M testpio/testPIO.F90 -M testpio/pdiff.F90 - - Minor changes to use the improved decomposition algorithm - -M testpio/decomp.F90 - - Added a slightly more generate decompostion algorithm - - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - See above. - -Code reviewed by: John - -Summary of pre-tag testing: - - Regression tests: - tested on frost for binary files - - -=============================================================== -=============================================================== - -Tag name: pio15_prod -Originator(s): Jim Edwards -Date: 03-07-2008 -One-line Summary: API changes -M 9138 pio/pionfread.inc -M 9138 pio/mct_rearrange.inc -M 9138 pio/pionfget.inc -M 9138 pio/alloc_mod.F90 -M 9138 pio/box_rearrange.inc -M 9138 pio/pioreadmpiio.inc -M 9138 pio/pioread.inc -M 9138 pio/pionfatt.inc -M 9138 pio/pionfwrite.inc -M 9138 pio/nf_mod.F90 -M 9138 pio/rearrange.F90 -M 9138 pio/mct_rearrange.F90 -M 9138 pio/pio_types.F90 -M 9138 pio/piowritempiio.inc -M 9138 pio/iompi_mod.F90 -M 9138 pio/piolib_mod.F90 -M 9138 pio/piowrite.inc -M 9138 pio/box_rearrange.F90 -M 9138 pio/pionfput.inc -M 9138 pio/rearrange.inc -M 9138 Makefile.ncarAIX -M 9138 testpio/decomp.F90 -M 9138 testpio/testPIO.F90 -M 9138 testpio/pdiff.F90 -M 9138 testpio/popio_in - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - Rename PIO_INITFILE to PIO_INIT and create a iosystem_desc_t structure which consists of the file independent portion of the file_desc_t structure remove iotype as an argument to this routine. - Change the arguments to PIO_OPENFILE and PIO_CREATEFILE from (File, filename) to (PIO, FIle, iotype, filename) - Change the first argument to PIO_INITDECOMP from file_desc_t to iosystem_desc_t - Explicitly include the io_desc_t in calls to pio_read_darray and pio_write_darray - -Bugs fixed: - -Describe any changes made to scripts/build system: - Remove mct from defaults on ncarAIX -Describe any substantial timing or memory changes: - -Code reviewed by: -Jim -Summary of pre-tag testing: - Tested on blueice with bin, netcdf and pnetcdf all passed - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio14_prod -Originator(s): J. Dennis -Date: 03/04/08 -One-line Summary: Add rearranger support for binary files - -M testPIO.F90 - - - Added code to use rearranger for binary files - - - Added namelist variables to control number of IO processors (nprocsIO) - -M piolib_mod.F90 - - - Cleaned up some dead code in initDecomp_2dof_BIN - -M iompi_mod.F90 - - - Removed broadcast of MPI-IO file handle - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: J. Dennis - -Summary of pre-tag testing: tested on BGL - - Regression tests: - - -=============================================================== -=============================================================== - -Tag name: pio13_prod -Originator(s): J. Dennis -Date: 02/29/08 -One-line Summary: Changes to improve benchmarking of testpio code. - -M testPIO.F90 - - - Added ability to read/write to a other directories - - - Added support to flush buffer cache - - - Added support to run performance test several iterations. - -m Makefile.ncarbgl - - - Changed flags to support box rearranger - -List all component tags that were used to create pio tag: - - mct MCT2_4_2_071026 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - - Passed pio test on frost - -=============================================================== -=============================================================== - -Tag name: pio11_prod -Originator(s): Jim Edwards -Date: 01/17/08 -One-line Summary: Additional functionality in netcdf interface - -M 8542 mct_rearrange.inc - Replaced stop statements with piodie calls -M 8542 pionfget.inc - Added support for calls using vdesc instead of vdesc%var_id -M 8542 box_rearrange.inc - Changed some io format for port to pgi compiler -M 8542 pio_quicksort.F90 - Removed DOS style EOL characters (^M) -M 8542 pioread.inc - minor format changes -A + - pionfatt.inc - Moved from nf_mod and added support for calls using vdesc instead of vdesc%var_id -M 8542 pionfwrite.inc - Added timing calls -M 8542 nf_mod.F90 - Added support for calls using vdesc instead of vdesc%var_id -M 8542 rearrange.F90 - minor format changes -M 8542 pio_types.F90 - Additional types copied from (p)netcdf -M 8542 piolib_mod.F90 - minor format changes -M 8542 piowrite.inc - minor format changes -M 8542 box_rearrange.F90 - added MPI_TYPE_CREATE_INDEXED_BLOCK for platforms without MPI2 support (NO_MPI2) -M 8542 pionfput.inc - Added support for calls using vdesc instead of vdesc%var_id -M 8542 rearrange.inc - minor format changes - -List all component tags that were used to create pio tag: - - mct MCT2_4_2_071026 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - -Code reviewed by: self - -Summary of pre-tag testing: - - Regression tests: - - Passed pio test on blueice - -=============================================================== -=============================================================== - -Changes since last tag - - updated to MCT2_4_2_071026 - use MCT_initialized to determine if need to start MCT - added timers to rearranger - - -=============================================================== - -Tag name: pio10_prod -Originator(s): Ray Loy -Date: Jan 3, 2008 -One-line Summary: changed initDecomp args from MPI types to PIO types - -List all component tags that were used to create pio tag: - - mct MCT2_3_0_070524 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - all interfaces of PIO_initDecomp are now changed to use - PIO_{double,real,int,char} instead of - MPI_{DOUBLE_PRECISION,REAL,INTEGER,CHAR} - - this is to be more consistent - -Bugs fixed: - - -Summary of pre-tag testing: - - Regression tests: - - testpio run on BG/L - - Machine (frost): - Regression tests: - - Machine (nyblue): - Regression tests: - -=============================================================== - -Tag name: pio09_prod -Originator(s): Ray Loy -Date: Dec 5, 2007 -One-line Summary: added mp_alltoallw option to box rearrange - -List all component tags that were used to create pio tag: - - mct MCT2_3_0_070524 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - - box_rearrange.inc will use mpi_alltoall2 instead - of point-to-point sends if you compile with -D_USE_ALLTOALLW - - on some platforms this might be faster. - -Bugs fixed: - - -Summary of pre-tag testing: - - Regression tests: - - box rearranger test and testpio run on BG/L - - Machine (frost): - Regression tests: - - Machine (nyblue): - Regression tests: - -=============================================================== - -Tag name: pio08_prod -Originator(s): Ray Loy -Date: Nov 30, 2007 -One-line Summary: mpi type caching in box rearranger - -List all component tags that were used to create pio tag: - - mct MCT2_3_0_070524 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -box rearranger restructured so that the creation of the scatter/gather -mpi types happens up front in the rearranger_create instead of at the -time of the rearrange. - -also was able to remove some of the data in IODesc because it is no -longer needed after the type creation. - - - -Bugs fixed: - - -Summary of pre-tag testing: - - Regression tests: - - box rearranger test and testpio run on BG/L - - Machine (frost): - Regression tests: - - Machine (nyblue): - Regression tests: - -=============================================================== - -Tag name: pio05_prod -Originator(s): John Dennis -Date: Oct 23, 2007 -One-line Summary: Support for PIO in POP - -List all component tags that were used to create pio tag: - - mct MCT2_3_0_070524 - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -PIO in POP - - Several major changes were applied to PIO to enable use in -POP. This included the porting of the old version of the subroutine -initDecomp_2dof_BIN. The previous version of the subroutine does not -use the two separate IODOF properly. - -ChangeLog - - PIO now uses a changed log and new form of tags: pio##_prod - - -Bugs fixed: - - Binary writing with MPI-IO now works again. - -Summary of pre-tag testing: - - Regression tests: - - Minor testing on BlueGene within LANL POP and CCSM POP in coupled system. - - Machine (frost): - Regression tests: - - Machine (nyblue): - Regression tests: - -=============================================================== diff --git a/src/externals/pio1/ChangeLog_template b/src/externals/pio1/ChangeLog_template deleted file mode 100644 index 2104c3ba7b9..00000000000 --- a/src/externals/pio1/ChangeLog_template +++ /dev/null @@ -1,36 +0,0 @@ -=============================================================== -*** You must run 'make predist' in the pio subdirectory before committing. *** - -Tag name: -Originator(s): -Date: -One-line Summary: - -List all subroutines eliminated: - -List all subroutines added and what they do: - -List all existing files that have been modified, and describe the changes: - -List all component tags that were used to create pio tag: - -Describe the changes for each component above that was changed relative -to the previous ccsm tag in this sequence (this includes description -of new functionality and how user can use it): - -Bugs fixed: - -Describe any changes made to scripts/build system: - -Describe any substantial timing or memory changes: - - - -Code reviewed by: - -Summary of pre-tag testing: - - Regression tests: - - -=============================================================== diff --git a/src/externals/pio1/Doxyfile b/src/externals/pio1/Doxyfile deleted file mode 100644 index 75f1f10fa3a..00000000000 --- a/src/externals/pio1/Doxyfile +++ /dev/null @@ -1,1715 +0,0 @@ -# Doxyfile 1.8.2 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# http://www.gnu.org/software/libiconv for the list of possible encodings. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or sequence of words) that should -# identify the project. Note that if you do not use Doxywizard you need -# to put quotes around the project name if it contains spaces. - -PROJECT_NAME = PIO - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = 1.7.2 - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer -# a quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify an logo or icon that is -# included in the documentation. The maximum height of the logo should not -# exceed 55 pixels and the maximum width should not exceed 200 pixels. -# Doxygen will copy the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = /cesmweb/html/models/pio_new -#OUTPUT_DIRECTORY = ./doc/html - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create -# 4096 sub-directories (in 2 levels) under the output directory of each output -# format and will distribute the generated files over these directories. -# Enabling this option can be useful when feeding doxygen a huge amount of -# source files, where putting all generated files in the same directory would -# otherwise cause performance problems for the file system. - -CREATE_SUBDIRS = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, -# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English -# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, -# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, -# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator -# that is used to form the text in various listings. Each string -# in this list, if found as the leading text of the brief description, will be -# stripped from the text and the result after processing the whole list, is -# used as the annotated text. Otherwise, the brief description is used as-is. -# If left blank, the following values are used ("$name" is automatically -# replaced with the name of the entity): "The $name class" "The $name widget" -# "The $name file" "is" "provides" "specifies" "contains" -# "represents" "a" "an" "the" - -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = YES - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the -# path to strip. Note that you specify absolute paths here, but also -# relative paths, which will be relative from the directory where doxygen is -# started. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of -# the path mentioned in the documentation of a class, which tells -# the reader which header file to include in order to use a class. -# If left blank only the name of the header file containing the class -# definition is used. Otherwise one should specify the include paths that -# are normally passed to the compiler using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful if your file system -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like regular Qt-style comments -# (thus requiring an explicit @brief command for a brief description.) - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then Doxygen will -# interpret the first line (until the first dot) of a Qt-style -# comment as the brief description. If set to NO, the comments -# will behave just like regular Qt-style comments (thus requiring -# an explicit \brief command for a brief description.) - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed -# description. Set this tag to YES if you prefer the old behaviour instead. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# re-implements. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce -# a new page for each member. If set to NO, the documentation of a member will -# be part of the file/class/namespace that contains it. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - - - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C -# sources only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java -# sources only. Doxygen will then generate output that is more tailored for -# Java. For instance, namespaces will be presented as packages, qualified -# scopes will look different, etc. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources only. Doxygen will then generate output that is more tailored for -# Fortran. - -OPTIMIZE_FOR_FORTRAN = YES - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for -# VHDL. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, -# and language is one of the parsers supported by doxygen: IDL, Java, -# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, -# C++. For instance to make doxygen treat .inc files as Fortran files (default -# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note -# that for custom extensions you also need to set FILE_PATTERNS otherwise the -# files are not read by doxygen. - -EXTENSION_MAPPING = in=Fortran - - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should -# set this tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. -# func(std::string) {}). This also makes the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. -# Doxygen will parse them like normal C++ but will assume all classes use public -# instead of private inheritance when no explicit protection keyword is present. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES (the -# default) will make doxygen replace the get and set methods by a property in -# the documentation. This will only work if the methods are indeed getting or -# setting a simple type. If this is not the case, or you want to show the -# methods anyway, you should set this option to NO. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using -# the \nosubgrouping command. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and -# unions are shown inside the group in which they are included (e.g. using -# @ingroup) instead of on a separate page (for HTML and Man pages) or -# section (for LaTeX and RTF). - -INLINE_GROUPED_CLASSES = NO - -# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum -# is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically -# be useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. - -TYPEDEF_HIDES_STRUCT = NO - -# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to -# determine which symbols to keep in memory and which to flush to disk. -# When the cache is full, less often used symbols will be written to disk. -# For small to medium size projects (<1000 input files) the default value is -# probably good enough. For larger projects a too small cache size can cause -# doxygen to be busy swapping symbols to and from disk most of the time -# causing a significant performance penalty. -# If the system has enough physical memory increasing the cache will improve the -# performance by keeping more symbols in memory. Note that the value works on -# a logarithmic scale so increasing the size by one will roughly double the -# memory usage. The cache size is given by this formula: -# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, -# corresponding to a cache size of 2^16 = 65536 symbols. - -SYMBOL_CACHE_SIZE = 0 - - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local -# methods, which are defined in the implementation section but not in -# the interface are included in the documentation. -# If set to NO (the default) only methods in the interface are included. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base -# name of the file that contains the anonymous namespace. By default -# anonymous namespaces are hidden. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the -# documentation. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the -# function's detailed documentation block. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. - -CASE_SENSE_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen -# will list include files with double quotes in the documentation -# rather than with sharp brackets. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in -# declaration order. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen -# will sort the (brief and detailed) documentation of class members so that -# constructors and destructors are listed first. If set to NO (the default) -# the constructors will appear in the respective orders defined by -# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. -# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO -# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the -# hierarchy of group names into alphabetical order. If set to NO (the default) -# the group names will appear in their defined order. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the -# alphabetical list. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to -# do proper type resolution of all parameters of a function it will reject a -# match between the prototype and the implementation of a member function even -# if there is only one candidate or it is obvious which candidate to choose -# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen -# will still accept a match between prototype and implementation in such cases. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting -# \deprecated commands in the documentation. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or macro consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and macros in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. -# This will remove the Files entry from the Quick Index and from the -# Folder Tree View (if specified). The default is YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. This will remove the Namespaces entry from the Quick Index -# and from the Folder Tree View (if specified). The default is YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command , where is the value of -# the FILE_VERSION_FILTER tag, and is the name of an input file -# provided by doxygen. Whatever the program writes to standard output -# is used as the file version. See the manual for examples. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. -# You can optionally specify a file name after the option, if omitted -# DoxygenLayout.xml will be used as the name of the layout file. - -LAYOUT_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that -# don't exist or using markup commands wrongly. - -WARN_IF_DOC_ERROR = YES - -# The WARN_NO_PARAMDOC option can be enabled to get warnings for -# functions that are documented, but have no documentation for their parameters -# or return value. If set to NO (the default) doxygen will only warn about -# wrong or incomplete parameter documentation, but not about the absence of -# documentation. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. Optionally the format may contain -# $version, which will be replaced by the version of the file (if it could -# be obtained via FILE_VERSION_FILTER) - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = . - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is -# also the default input encoding. Doxygen uses libiconv (or the iconv built -# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for -# the list of possible encodings. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh -# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py -# *.f90 *.f *.for *.vhd *.vhdl - -FILE_PATTERNS = *.c \ - *.h \ - *.inc \ - *.dox \ - *.F90 \ - *.F90.in \ - *.txt - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = doc/example \ - testpio - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = * - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = doc/images - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. If FILTER_PATTERNS is specified, this tag will be -# ignored. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: -# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further -# info on how filters are used. If FILTER_PATTERNS is empty or if -# non of the patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse (i.e. when SOURCE_BROWSER is set to YES). - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) -# and it is also possible to disable source filtering for a specific pattern -# using *.ext= (so without naming a filter). This option only has effect when -# FILTER_SOURCE_FILES is enabled. - -FILTER_SOURCE_PATTERNS = - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also -# VERBATIM_HEADERS is set to NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C, C++ and Fortran comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) -# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from -# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. Otherwise they will link to the documentation. - -REFERENCES_LINK_SOURCE = YES - -# If the USE_HTAGS tag is set to YES then the references to source code -# will point to the HTML generated by the htags(1) tool instead of doxygen -# built-in source browser. The htags tool is part of GNU's global source -# tagging system (see http://www.gnu.org/software/global/global.html). You -# will need version 4.8.6 or higher. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = YES - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = ./ - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. Note that when using a custom header you are responsible -# for the proper inclusion of any scripts and style sheets that doxygen -# needs, which is dependent on the configuration options used. -# It is advised to generate a default header using "doxygen -w html -# header.html footer.html stylesheet.css YourConfigFile" and then modify -# that header. Note that the header is subject to change so you typically -# have to redo this when upgrading to a newer version of doxygen or when -# changing the value of configuration settings such as GENERATE_TREEVIEW! - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If left blank doxygen will -# generate a default style sheet. Note that it is recommended to use -# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this -# tag will in the future become obsolete. - -HTML_STYLESHEET = -HTML_EXTRA_STYLESHEET = customdoxygen.css - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that -# the files will be copied as-is; there are no commands or markers available. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. -# Doxygen will adjust the colors in the style sheet and background images -# according to this color. Hue is specified as an angle on a colorwheel, -# see http://en.wikipedia.org/wiki/Hue for more information. -# For instance the value 0 represents red, 60 is yellow, 120 is green, -# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. -# The allowed range is 0 to 359. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of -# the colors in the HTML output. For a value of 0 the output will use -# grayscales only. A value of 255 will produce the most vivid colors. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to -# the luminance component of the colors in the HTML output. Values below -# 100 gradually make the output lighter, whereas values above 100 make -# the output darker. The value divided by 100 is the actual gamma applied, -# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, -# and 100 does not change the gamma. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting -# this to NO can help when comparing the output of multiple runs. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. - -HTML_DYNAMIC_SECTIONS = NO - - -# If the GENERATE_DOCSET tag is set to YES, additional index files -# will be generated that can be used as input for Apple's Xcode 3 -# integrated development environment, introduced with OSX 10.5 (Leopard). -# To create a documentation set, doxygen will generate a Makefile in the -# HTML output directory. Running make will produce the docset in that -# directory and running "make install" will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find -# it at startup. -# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. - -GENERATE_DOCSET = NO - -# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the -# feed. A documentation feed provides an umbrella under which multiple -# documentation sets from a single provider (such as a company or product suite) -# can be grouped. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that -# should uniquely identify the documentation set bundle. This should be a -# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen -# will append .docset to the name. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely -# identify the documentation publisher. This should be a reverse domain-name -# style string, e.g. com.mycompany.MyDocSet.documentation. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be -# written to the html output directory. - -CHM_FILE = - -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run -# the HTML help compiler on the generated index.hhp. - -HHC_LOCATION = - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING -# is used to encode HtmlHelp index (hhk), content (hhc) and project file -# content. - -CHM_INDEX_ENCODING = - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the HTML help documentation and to the tree view. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated -# that can be used as input for Qt's qhelpgenerator to generate a -# Qt Compressed Help (.qch) of the generated HTML documentation. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can -# be used to specify the file name of the resulting .qch file. -# The path specified is relative to the HTML output folder. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#namespace - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#virtual-folders - -QHP_VIRTUAL_FOLDER = doc - -# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to -# add. For more information please see -# http://doc.trolltech.com/qthelpproject.html#custom-filters - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see -# -# Qt Help Project / Custom Filters. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's -# filter section matches. -# -# Qt Help Project / Filter Attributes. - -QHP_SECT_FILTER_ATTRS = - -# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can -# be used to specify the location of Qt's qhelpgenerator. -# If non-empty doxygen will try to run qhelpgenerator on the generated -# .qhp file. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files -# will be generated, which together with the HTML files, form an Eclipse help -# plugin. To install this plugin and make it available under the help contents -# menu in Eclipse, the contents of the directory containing the HTML and XML -# files needs to be copied into the plugins directory of eclipse. The name of -# the directory within the plugins directory should be the same as -# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before -# the help appears. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have -# this name. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) -# at top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. Since the tabs have the same information as the -# navigation tree you can set this option to NO if you already set -# GENERATE_TREEVIEW to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. -# If the tag value is set to YES, a side panel will be generated -# containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). -# Windows users are probably better off using the HTML help feature. -# Since the tree basically has the same information as the tab index you -# could consider to set DISABLE_INDEX to NO when enabling this option. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values -# (range [0,1..20]) that doxygen will group on one line in the generated HTML -# documentation. Note that a value of 0 will completely suppress the enum -# values from appearing in the overview section. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open -# links to external symbols imported via tag files in a separate window. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of Latex formulas included -# as images in the HTML documentation. The default is 10. Note that -# when you change the font size after a successful doxygen run you need -# to manually remove any form_*.png images from the HTML output directory -# to force them to be regenerated. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are -# not supported properly for IE 6.0, but are supported on all modern browsers. -# Note that when changing this option you need to delete any form_*.png files -# in the HTML output before the changes have effect. - -FORMULA_TRANSPARENT = YES - -# When MathJax is enabled you need to specify the location relative to the -# HTML output directory using the MATHJAX_RELPATH option. The destination -# directory should contain the MathJax.js script. For instance, if the mathjax -# directory is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to -# the MathJax Content Delivery Network so you can quickly see the result without -# installing MathJax. However, it is strongly recommended to install a local -# copy of MathJax from http://www.mathjax.org before deployment. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box -# for the HTML output. The underlying search engine uses javascript -# and DHTML and should work on any modern browser. Note that when using -# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets -# (GENERATE_DOCSET) there is already a search function so this one should -# typically be disabled. For large projects the javascript based search engine -# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. - -SEARCHENGINE = YES - -# When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a PHP enabled web server instead of at the web client -# using Javascript. Doxygen will generate the search PHP script and index -# file to put on the web server. The advantage of the server -# based approach is that it scales better to large projects and allows -# full text search. The disadvantages are that it is more difficult to setup -# and does not have live searching capabilities. - -SERVER_BASED_SEARCH = NO - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = latex - -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be -# invoked. If left blank `latex' will be used as the default command name. -# Note that when enabling USE_PDFLATEX this option is only used for -# generating bitmaps for formulas in the HTML output, but not in the -# Makefile that is written to the output directory. - -LATEX_CMD_NAME = latex - -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to -# generate index for LaTeX. If left blank `makeindex' will be used as the -# default command name. - -MAKEINDEX_CMD_NAME = makeindex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = a4 - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for -# the generated latex document. The footer should contain everything after -# the last chapter. If it is left blank doxygen will generate a -# standard footer. Notice: only use this tag if you know what you are doing! - -LATEX_FOOTER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = NO - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = YES - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -# If LATEX_HIDE_INDICES is set to YES then doxygen will not -# include the index chapters (such as File Index, Compound Index, etc.) -# in the output. - -LATEX_HIDE_INDICES = NO - -# If LATEX_SOURCE_CODE is set to YES then doxygen will include -# source code with syntax highlighting in the LaTeX output. -# Note that which sources are shown also depends on other settings -# such as SOURCE_BROWSER. - -LATEX_SOURCE_CODE = NO - - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimized for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load style sheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assignments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. - -GENERATE_XML = NO - -# The XML_OUTPUT tag is used to specify where the XML pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `xml' will be used as the default path. - -XML_OUTPUT = xml - -# The XML_SCHEMA tag can be used to specify an XML schema, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_SCHEMA = - -# The XML_DTD tag can be used to specify an XML DTD, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_DTD = - -# If the XML_PROGRAMLISTING tag is set to YES Doxygen will -# dump the program listings (including syntax highlighting -# and cross-referencing information) to the XML output. Note that -# enabling this will significantly increase the size of the XML output. - -XML_PROGRAMLISTING = YES - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- - -# If the GENERATE_PERLMOD tag is set to YES Doxygen will -# generate a Perl module file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_PERLMOD = NO - -# If the PERLMOD_LATEX tag is set to YES Doxygen will generate -# the necessary Makefile rules, Perl scripts and LaTeX code to be able -# to generate PDF and DVI output from the Perl module output. - -PERLMOD_LATEX = NO - -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. This is useful -# if you want to understand what is going on. On the other hand, if this -# tag is set to NO the size of the Perl module output will be much smaller -# and Perl will parse it just the same. - -PERLMOD_PRETTY = YES - -# The names of the make variables in the generated doxyrules.make file -# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. -# This is useful so different doxyrules.make files included by the same -# Makefile don't overwrite each other's variables. - -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = NO - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_DEFINED tags. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# pointed to by INCLUDE_PATH will be searched when a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED = - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition that -# overrules the definition found in the source code. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all references to function-like macros -# that are alone on a line, have an all uppercase name, and do not end with a -# semicolon, because these will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES option can be used to specify one or more tagfiles. For each -# tag file the location of the external documentation should be added. The -# format of a tag file without this location is as follows: -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where "loc1" and "loc2" can be relative or absolute paths -# or URLs. Note that each tag file must have a unique name (where the name does -# NOT include the path). If a tag file is not located in the directory in which -# doxygen is run, you must also specify the path to the tagfile here. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base -# or super classes. Setting the tag to NO turns the diagrams off. Note that -# this option also works with HAVE_DOT disabled, but it is recommended to -# install and use dot, since it yields more powerful graphs. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see -# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = YES - -# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is -# allowed to run in parallel. When set to 0 (the default) doxygen will -# base this on the number of processors available in the system. You can set it -# explicitly to a value larger than 0 to get control over the balance -# between CPU load and processing speed. - -DOT_NUM_THREADS = 0 - -# By default doxygen will use the Helvetica font for all dot files that -# doxygen generates. When you want a differently looking font you can specify -# the font name using DOT_FONTNAME. You need to make sure dot is able to find -# the font, which can be done by putting it in a standard location or by setting -# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the -# directory containing the font. - -DOT_FONTNAME = Helvetica - -# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. -# The default size is 10pt. - -DOT_FONTSIZE = 10 - -# By default doxygen will tell dot to use the Helvetica font. -# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to -# set the path where dot can find it. - -DOT_FONTPATH = - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for groups, showing the direct groups dependencies - -GROUP_GRAPHS = YES - -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling -# Language. - -UML_LOOK = NO - - - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the CALL_GRAPH and HAVE_DOT options are set to YES then -# doxygen will generate a call dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable call graphs -# for selected functions only using the \callgraph command. - -CALL_GRAPH = NO - -# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then -# doxygen will generate a caller dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable caller -# graphs for selected functions only using the \callergraph command. - -CALLER_GRAPH = NO - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will generate a graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES -# then doxygen will show the dependencies a directory has on other directories -# in a graphical way. The dependency relations are determined by the #include -# relations between the files in the directories. - -DIRECTORY_GRAPH = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are svg, png, jpg, or gif. -# If left blank png will be used. If you choose svg you need to set -# HTML_FILE_EXTENSION to xhtml in order to make the SVG files -# visible in IE 9+ (other browsers do not have this requirement). - -DOT_IMAGE_FORMAT = png - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found in the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MSCFILE_DIRS tag can be used to specify one or more directories that -# contain msc files that are included in the documentation (see the -# \mscfile command). - -MSCFILE_DIRS = - -# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of -# nodes that will be shown in the graph. If the number of nodes in a graph -# becomes larger than this value, doxygen will truncate the graph, which is -# visualized by representing a node as a red box. Note that doxygen if the -# number of direct children of the root node in a graph is already larger than -# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note -# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. - -DOT_GRAPH_MAX_NODES = 50 - -# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the -# graphs generated by dot. A depth value of 3 means that only nodes reachable -# from the root by following a path via at most 3 edges will be shown. Nodes -# that lay further from the root node will be omitted. Note that setting this -# option to 1 or 2 may greatly reduce the computation time needed for large -# code bases. Also note that the size of a graph can be further restricted by -# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. - -MAX_DOT_GRAPH_DEPTH = 0 - -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not -# seem to support this out of the box. Warning: Depending on the platform used, -# enabling this option may lead to badly anti-aliased labels on the edges of -# a graph (i.e. they become hard to read). - -DOT_TRANSPARENT = NO - -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output -# files in one run (i.e. multiple -o and -T options on the command line). This -# makes dot run faster, but since only newer versions of dot (>1.8.10) -# support this, this feature is disabled by default. - -DOT_MULTI_TARGETS = NO - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermediate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES diff --git a/src/externals/pio1/DoxygenLayout.xml b/src/externals/pio1/DoxygenLayout.xml deleted file mode 100644 index 8faedfb81da..00000000000 --- a/src/externals/pio1/DoxygenLayout.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/externals/pio1/SVN_EXTERNAL_DIRECTORIES b/src/externals/pio1/SVN_EXTERNAL_DIRECTORIES deleted file mode 100644 index 7c68470f0f9..00000000000 --- a/src/externals/pio1/SVN_EXTERNAL_DIRECTORIES +++ /dev/null @@ -1,3 +0,0 @@ -pio/bin http://parallelio.googlecode.com/svn/genf90/trunk_tags/genf90_131120 -pio/cmake https://github.com/CESM-Development/CMake_Fortran_utils/tags/CMake_Fortran_utils_150304a - diff --git a/src/externals/pio1/customdoxygen.css b/src/externals/pio1/customdoxygen.css deleted file mode 100644 index 5e327faf68d..00000000000 --- a/src/externals/pio1/customdoxygen.css +++ /dev/null @@ -1,1184 +0,0 @@ -/* The standard CSS for doxygen 1.8.3.1 */ - -body, table, div, p, dl { - font: 400 14px/19px Roboto,sans-serif; -} - -/* @group Heading Levels */ - -h1.groupheader { - font-size: 150%; -} - -.title { - font-size: 150%; - font-weight: bold; - margin: 10px 2px; -} - -h2.groupheader { - border-bottom: 1px solid #879ECB; - color: #354C7B; - font-size: 150%; - font-weight: normal; - margin-top: 1.75em; - padding-top: 8px; - padding-bottom: 4px; - width: 100%; -} - -h3.groupheader { - font-size: 100%; -} - -h1, h2, h3, h4, h5, h6 { - -webkit-transition: text-shadow 0.5s linear; - -moz-transition: text-shadow 0.5s linear; - -ms-transition: text-shadow 0.5s linear; - -o-transition: text-shadow 0.5s linear; - transition: text-shadow 0.5s linear; - margin-right: 15px; -} - -h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { - text-shadow: 0 0 15px cyan; -} - -dt { - font-weight: bold; -} - -div.multicol { - -moz-column-gap: 1em; - -webkit-column-gap: 1em; - -moz-column-count: 3; - -webkit-column-count: 3; -} - -p.startli, p.startdd, p.starttd { - margin-top: 2px; -} - -p.endli { - margin-bottom: 0px; -} - -p.enddd { - margin-bottom: 4px; -} - -p.endtd { - margin-bottom: 2px; -} - -/* @end */ - -caption { - font-weight: bold; -} - -span.legend { - font-size: 70%; - text-align: center; -} - -h3.version { - font-size: 90%; - text-align: center; -} - -div.qindex, div.navtab{ - background-color: #EBEFF6; - border: 1px solid #A3B4D7; - text-align: center; -} - -div.qindex, div.navpath { - width: 100%; - line-height: 140%; -} - -div.navtab { - margin-right: 15px; -} - -/* @group Link Styling */ - -a { - color: #3D578C; - font-weight: normal; - text-decoration: none; -} - -.contents a:visited { - color: #4665A2; -} - -a:hover { - text-decoration: underline; -} - -a.qindex { - font-weight: bold; -} - -a.qindexHL { - font-weight: bold; - background-color: #9CAFD4; - color: #ffffff; - border: 1px double #869DCA; -} - -.contents a.qindexHL:visited { - color: #ffffff; -} - -a.el { - font-weight: bold; -} - -a.elRef { -} - -a.code, a.code:visited { - color: #4665A2; -} - -a.codeRef, a.codeRef:visited { - color: #4665A2; -} - -/* @end */ - -dl.el { - margin-left: -1cm; -} - -pre.fragment { - border: 1px solid #C4CFE5; - background-color: #FBFCFD; - padding: 4px 6px; - margin: 4px 8px 4px 2px; - overflow: auto; - word-wrap: break-word; - font-size: 9pt; - line-height: 125%; - font-family: monospace, fixed; - font-size: 105%; -} - -div.fragment { - padding: 4px; - margin: 4px; - background-color: #FBFCFD; - border: 1px solid #C4CFE5; -} - -div.line { - font-family: monospace, fixed; - font-size: 13px; - min-height: 13px; - line-height: 1.0; - text-wrap: unrestricted; - white-space: -moz-pre-wrap; /* Moz */ - white-space: -pre-wrap; /* Opera 4-6 */ - white-space: -o-pre-wrap; /* Opera 7 */ - white-space: pre-wrap; /* CSS3 */ - word-wrap: break-word; /* IE 5.5+ */ - text-indent: -53px; - padding-left: 53px; - padding-bottom: 0px; - margin: 0px; - -webkit-transition-property: background-color, box-shadow; - -webkit-transition-duration: 0.5s; - -moz-transition-property: background-color, box-shadow; - -moz-transition-duration: 0.5s; - -ms-transition-property: background-color, box-shadow; - -ms-transition-duration: 0.5s; - -o-transition-property: background-color, box-shadow; - -o-transition-duration: 0.5s; - transition-property: background-color, box-shadow; - transition-duration: 0.5s; -} - -div.line.glow { - background-color: cyan; - box-shadow: 0 0 10px cyan; -} - - -span.lineno { - padding-right: 4px; - text-align: right; - border-right: 2px solid #0F0; - background-color: #E8E8E8; - white-space: pre; -} -span.lineno a { - background-color: #D8D8D8; -} - -span.lineno a:hover { - background-color: #C8C8C8; -} - -div.ah { - background-color: black; - font-weight: bold; - color: #ffffff; - margin-bottom: 3px; - margin-top: 3px; - padding: 0.2em; - border: solid thin #333; - border-radius: 0.5em; - -webkit-border-radius: .5em; - -moz-border-radius: .5em; - box-shadow: 2px 2px 3px #999; - -webkit-box-shadow: 2px 2px 3px #999; - -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; - background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); - background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000); -} - -div.groupHeader { - margin-left: 16px; - margin-top: 12px; - font-weight: bold; -} - -div.groupText { - margin-left: 16px; - font-style: italic; -} - -body { - background-color: white; - color: black; - margin: 0; -} - -div.contents { - margin-top: 10px; - margin-left: 12px; - margin-right: 8px; -} - -td.indexkey { - background-color: #EBEFF6; - font-weight: bold; - border: 1px solid #C4CFE5; - margin: 2px 0px 2px 0; - padding: 2px 10px; - white-space: nowrap; - vertical-align: top; -} - -td.indexvalue { - background-color: #EBEFF6; - border: 1px solid #C4CFE5; - padding: 2px 10px; - margin: 2px 0px; -} - -tr.memlist { - background-color: #EEF1F7; -} - -p.formulaDsp { - text-align: center; -} - -img.formulaDsp { - -} - -img.formulaInl { - vertical-align: middle; -} - -div.center { - text-align: center; - margin-top: 0px; - margin-bottom: 0px; - padding: 0px; -} - -div.center img { - border: 0px; -} - -address.footer { - text-align: right; - padding-right: 12px; -} - -img.footer { - border: 0px; - vertical-align: middle; -} - -/* @group Code Colorization */ - -span.keyword { - color: #008000 -} - -span.keywordtype { - color: #604020 -} - -span.keywordflow { - color: #e08000 -} - -span.comment { - color: #800000 -} - -span.preprocessor { - color: #806020 -} - -span.stringliteral { - color: #002080 -} - -span.charliteral { - color: #008080 -} - -span.vhdldigit { - color: #ff00ff -} - -span.vhdlchar { - color: #000000 -} - -span.vhdlkeyword { - color: #700070 -} - -span.vhdllogic { - color: #ff0000 -} - -blockquote { - background-color: #DDDDDD; - border-left: 2px solid #9CAFD4; - margin: 0 24px 0 4px; - padding: 0 12px 0 16px; -} - -/* @end */ - -/* -.search { - color: #003399; - font-weight: bold; -} - -form.search { - margin-bottom: 0px; - margin-top: 0px; -} - -input.search { - font-size: 75%; - color: #000080; - font-weight: normal; - background-color: #e8eef2; -} -*/ - -td.tiny { - font-size: 75%; -} - -.dirtab { - padding: 4px; - border-collapse: collapse; - border: 1px solid #A3B4D7; -} - -th.dirtab { - background: #EBEFF6; - font-weight: bold; -} - -hr { - height: 0px; - border: none; - border-top: 1px solid #4A6AAA; -} - -hr.footer { - height: 1px; -} - -/* @group Member Descriptions */ - -table.memberdecls { - border-spacing: 0px; - padding: 0px; -} - -.memberdecls td, .fieldtable tr { - -webkit-transition-property: background-color, box-shadow; - -webkit-transition-duration: 0.5s; - -moz-transition-property: background-color, box-shadow; - -moz-transition-duration: 0.5s; - -ms-transition-property: background-color, box-shadow; - -ms-transition-duration: 0.5s; - -o-transition-property: background-color, box-shadow; - -o-transition-duration: 0.5s; - transition-property: background-color, box-shadow; - transition-duration: 0.5s; -} - -.memberdecls td.glow, .fieldtable tr.glow { - background-color: cyan; - box-shadow: 0 0 15px cyan; -} - -.mdescLeft, .mdescRight, -.memItemLeft, .memItemRight, -.memTemplItemLeft, .memTemplItemRight, .memTemplParams { - background-color: #F9FAFC; - border: none; - margin: 4px; - padding: 1px 0 0 8px; -} - -.mdescLeft, .mdescRight { - padding: 0px 8px 4px 8px; - color: #555; -} - -.memSeparator { - border-bottom: 1px solid #DEE4F0; - line-height: 1px; - margin: 0px; - padding: 0px; -} - -.memItemLeft, .memTemplItemLeft { - white-space: nowrap; -} - -.memItemRight { - width: 100%; -} - -.memTemplParams { - color: #4665A2; - white-space: nowrap; - font-size: 80%; -} - -/* @end */ - -/* @group Member Details */ - -/* Styles for detailed member documentation */ - -.memtemplate { - font-size: 80%; - color: #4665A2; - font-weight: normal; - margin-left: 9px; -} - -.memnav { - background-color: #EBEFF6; - border: 1px solid #A3B4D7; - text-align: center; - margin: 2px; - margin-right: 15px; - padding: 2px; -} - -.mempage { - width: 100%; -} - -.memitem { - padding: 0; - margin-bottom: 10px; - margin-right: 5px; - -webkit-transition: box-shadow 0.5s linear; - -moz-transition: box-shadow 0.5s linear; - -ms-transition: box-shadow 0.5s linear; - -o-transition: box-shadow 0.5s linear; - transition: box-shadow 0.5s linear; - display: table !important; - width: 100%; -} - -.memitem.glow { - box-shadow: 0 0 15px cyan; -} - -.memname { - font-weight: bold; - margin-left: 6px; -} - -.memname td { - vertical-align: bottom; -} - -.memproto, dl.reflist dt { - border-top: 1px solid #A8B8D9; - border-left: 1px solid #A8B8D9; - border-right: 1px solid #A8B8D9; - padding: 6px 0px 6px 0px; - color: #253555; - font-weight: bold; - text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); - background-image:url('nav_f.png'); - background-repeat:repeat-x; - background-color: #E2E8F2; - /* opera specific markup */ - box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); - border-top-right-radius: 4px; - border-top-left-radius: 4px; - /* firefox specific markup */ - -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; - -moz-border-radius-topright: 4px; - -moz-border-radius-topleft: 4px; - /* webkit specific markup */ - -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); - -webkit-border-top-right-radius: 4px; - -webkit-border-top-left-radius: 4px; - -} - -.memdoc, dl.reflist dd { - border-bottom: 1px solid #A8B8D9; - border-left: 1px solid #A8B8D9; - border-right: 1px solid #A8B8D9; - padding: 6px 10px 2px 10px; - background-color: #FBFCFD; - border-top-width: 0; - background-image:url('nav_g.png'); - background-repeat:repeat-x; - background-color: #FFFFFF; - /* opera specific markup */ - border-bottom-left-radius: 4px; - border-bottom-right-radius: 4px; - box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); - /* firefox specific markup */ - -moz-border-radius-bottomleft: 4px; - -moz-border-radius-bottomright: 4px; - -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; - /* webkit specific markup */ - -webkit-border-bottom-left-radius: 4px; - -webkit-border-bottom-right-radius: 4px; - -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); -} - -dl.reflist dt { - padding: 5px; -} - -dl.reflist dd { - margin: 0px 0px 10px 0px; - padding: 5px; -} - -.paramkey { - text-align: right; -} - -.paramtype { - white-space: nowrap; -} - -.paramname { - color: #602020; - white-space: nowrap; -} -.paramname em { - font-style: normal; -} -.paramname code { - line-height: 14px; -} - -.params, .retval, .exception, .tparams { - margin-left: 0px; - padding-left: 0px; -} - -.params .paramname, .retval .paramname { - font-weight: bold; - vertical-align: top; -} - -.params .paramtype { - font-style: italic; - vertical-align: top; -} - -.params .paramdir { - font-family: "courier new",courier,monospace; - vertical-align: top; -} - -table.mlabels { - border-spacing: 0px; -} - -td.mlabels-left { - width: 100%; - padding: 0px; -} - -td.mlabels-right { - vertical-align: bottom; - padding: 0px; - white-space: nowrap; -} - -span.mlabels { - margin-left: 8px; -} - -span.mlabel { - background-color: #728DC1; - border-top:1px solid #5373B4; - border-left:1px solid #5373B4; - border-right:1px solid #C4CFE5; - border-bottom:1px solid #C4CFE5; - text-shadow: none; - color: white; - margin-right: 4px; - padding: 2px 3px; - border-radius: 3px; - font-size: 7pt; - white-space: nowrap; - vertical-align: middle; -} - - - -/* @end */ - -/* these are for tree view when not used as main index */ - -div.directory { - margin: 10px 0px; - border-top: 1px solid #A8B8D9; - border-bottom: 1px solid #A8B8D9; - width: 100%; -} - -.directory table { - border-collapse:collapse; -} - -.directory td { - margin: 0px; - padding: 0px; - vertical-align: top; -} - -.directory td.entry { - white-space: nowrap; - padding-right: 6px; -} - -.directory td.entry a { - outline:none; -} - -.directory td.entry a img { - border: none; -} - -.directory td.desc { - width: 100%; - padding-left: 6px; - padding-right: 6px; - padding-top: 3px; - border-left: 1px solid rgba(0,0,0,0.05); -} - -.directory tr.even { - padding-left: 6px; - background-color: #F7F8FB; -} - -.directory img { - vertical-align: -30%; -} - -.directory .levels { - white-space: nowrap; - width: 100%; - text-align: right; - font-size: 9pt; -} - -.directory .levels span { - cursor: pointer; - padding-left: 2px; - padding-right: 2px; - color: #3D578C; -} - -div.dynheader { - margin-top: 8px; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -address { - font-style: normal; - color: #2A3D61; -} - -table.doxtable { - border-collapse:collapse; - margin-top: 4px; - margin-bottom: 4px; -} - -table.doxtable td, table.doxtable th { - border: 1px solid #2D4068; - padding: 3px 7px 2px; -} - -table.doxtable th { - background-color: #374F7F; - color: #FFFFFF; - font-size: 110%; - padding-bottom: 4px; - padding-top: 5px; -} - -table.fieldtable { - /*width: 100%;*/ - margin-bottom: 10px; - border: 1px solid #A8B8D9; - border-spacing: 0px; - -moz-border-radius: 4px; - -webkit-border-radius: 4px; - border-radius: 4px; - -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; - -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); - box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); -} - -.fieldtable td, .fieldtable th { - padding: 3px 7px 2px; -} - -.fieldtable td.fieldtype, .fieldtable td.fieldname { - white-space: nowrap; - border-right: 1px solid #A8B8D9; - border-bottom: 1px solid #A8B8D9; - vertical-align: top; -} - -.fieldtable td.fieldname { - padding-top: 5px; -} - -.fieldtable td.fielddoc { - border-bottom: 1px solid #A8B8D9; - /*width: 100%;*/ -} - -.fieldtable td.fielddoc p:first-child { - margin-top: 2px; -} - -.fieldtable td.fielddoc p:last-child { - margin-bottom: 2px; -} - -.fieldtable tr:last-child td { - border-bottom: none; -} - -.fieldtable th { - background-image:url('nav_f.png'); - background-repeat:repeat-x; - background-color: #E2E8F2; - font-size: 90%; - color: #253555; - padding-bottom: 4px; - padding-top: 5px; - text-align:left; - -moz-border-radius-topleft: 4px; - -moz-border-radius-topright: 4px; - -webkit-border-top-left-radius: 4px; - -webkit-border-top-right-radius: 4px; - border-top-left-radius: 4px; - border-top-right-radius: 4px; - border-bottom: 1px solid #A8B8D9; -} - - -.tabsearch { - top: 0px; - left: 10px; - height: 36px; - background-image: url('tab_b.png'); - z-index: 101; - overflow: hidden; - font-size: 13px; -} - -.navpath ul -{ - font-size: 11px; - background-image:url('tab_b.png'); - background-repeat:repeat-x; - background-position: 0 -5px; - height:30px; - line-height:30px; - color:#8AA0CC; - border:solid 1px #C2CDE4; - overflow:hidden; - margin:0px; - padding:0px; -} - -.navpath li -{ - list-style-type:none; - float:left; - padding-left:10px; - padding-right:15px; - background-image:url('bc_s.png'); - background-repeat:no-repeat; - background-position:right; - color:#364D7C; -} - -.navpath li.navelem a -{ - height:32px; - display:block; - text-decoration: none; - outline: none; - color: #283A5D; - font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; - text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); - text-decoration: none; -} - -.navpath li.navelem a:hover -{ - color:#6884BD; -} - -.navpath li.footer -{ - list-style-type:none; - float:right; - padding-left:10px; - padding-right:15px; - background-image:none; - background-repeat:no-repeat; - background-position:right; - color:#364D7C; - font-size: 8pt; -} - - -div.summary -{ - float: right; - font-size: 8pt; - padding-right: 5px; - width: 50%; - text-align: right; -} - -div.summary a -{ - white-space: nowrap; -} - -div.ingroups -{ - font-size: 8pt; - width: 50%; - text-align: left; -} - -div.ingroups a -{ - white-space: nowrap; -} - -div.header -{ - background-image:url('nav_h.png'); - background-repeat:repeat-x; - background-color: #F9FAFC; - margin: 0px; - border-bottom: 1px solid #C4CFE5; -} - -div.headertitle -{ - padding: 5px 5px 5px 10px; -} - -dl -{ - padding: 0 0 0 10px; -} - -/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ -dl.section -{ - margin-left: 0px; - padding-left: 0px; -} - -dl.note -{ - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #D0C000; -} - -dl.warning, dl.attention -{ - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #FF0000; -} - -dl.pre, dl.post, dl.invariant -{ - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #00D000; -} - -dl.deprecated -{ - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #505050; -} - -dl.todo -{ - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #00C0E0; -} - -dl.test -{ - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #3030E0; -} - -dl.bug -{ - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #C08050; -} - -dl.section dd { - margin-bottom: 6px; -} - - -#projectlogo -{ - text-align: center; - vertical-align: bottom; - border-collapse: separate; -} - -#projectlogo img -{ - border: 0px none; -} - -#projectname -{ - font: 300% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 2px 0px; -} - -#projectbrief -{ - font: 120% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 0px; -} - -#projectnumber -{ - font: 50% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 0px; -} - -#titlearea -{ - padding: 0px; - margin: 0px; - width: 100%; - border-bottom: 1px solid #5373B4; -} - -.image -{ - text-align: center; -} - -.dotgraph -{ - text-align: center; -} - -.mscgraph -{ - text-align: center; -} - -.caption -{ - font-weight: bold; -} - -div.zoom -{ - border: 1px solid #90A5CE; -} - -dl.citelist { - margin-bottom:50px; -} - -dl.citelist dt { - color:#334975; - float:left; - font-weight:bold; - margin-right:10px; - padding:5px; -} - -dl.citelist dd { - margin:2px 0; - padding:5px 0; -} - -div.toc { - padding: 14px 25px; - background-color: #F4F6FA; - border: 1px solid #D8DFEE; - border-radius: 7px 7px 7px 7px; - float: right; - height: auto; - margin: 0 20px 10px 10px; - width: 200px; -} - -div.toc li { - background: url("bdwn.png") no-repeat scroll 0 5px transparent; - font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; - margin-top: 5px; - padding-left: 10px; - padding-top: 2px; -} - -div.toc h3 { - font: bold 12px/1.2 Arial,FreeSans,sans-serif; - color: #4665A2; - border-bottom: 0 none; - margin: 0; -} - -div.toc ul { - list-style: none outside none; - border: medium none; - padding: 0px; -} - -div.toc li.level1 { - margin-left: 0px; -} - -div.toc li.level2 { - margin-left: 15px; -} - -div.toc li.level3 { - margin-left: 30px; -} - -div.toc li.level4 { - margin-left: 45px; -} - -.inherit_header { - font-weight: bold; - color: gray; - cursor: pointer; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -.inherit_header td { - padding: 6px 0px 2px 5px; -} - -.inherit { - display: none; -} - -tr.heading h2 { - margin-top: 12px; - margin-bottom: 4px; -} - -@media print -{ - #top { display: none; } - #side-nav { display: none; } - #nav-path { display: none; } - body { overflow:visible; } - h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } - .summary { display: none; } - .memitem { page-break-inside: avoid; } - #doc-content - { - margin-left:0 !important; - height:auto !important; - width:auto !important; - overflow:inherit; - display:inline; - } -} - diff --git a/src/externals/pio1/doc/CAMexample.txt b/src/externals/pio1/doc/CAMexample.txt deleted file mode 100644 index d6a3c20f3cf..00000000000 --- a/src/externals/pio1/doc/CAMexample.txt +++ /dev/null @@ -1,139 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ -/*! \page CAMexample Community Atmosphere Model (CAM) - -\section cam Implementation of PIO in CAM - -CAM references PIO through both interface routines (which call PIO routines) and -PIO routines proper. The interface routines control much of the setting up of PIO -output and input, whereas the actual output and input is accomplished through -direct PIO calls. The PIO routines are located in models/utils/pio. A -description of the PIO package along with usage information may be found in the -main PIO documentation. We will first describe the interface and then cover the -direct calls. - - -\section description Description of the CAM Interface - -The CAM interface to PIO consists of the pio_utils module, which is the file -models/atm/cam/src/utils/pio_utils.F90. - -Init_pio_subsystem is the first routine called. It is called from cam_init and -initializes PIO in CAM. Init_pio_subsystem calls read_namelist_pio, which calls -set_pio_parameters. The main parameters set includes the IO mode (netcdf vs pnetcdf), -number of IO tasks, and IO stride. - -Cam_pio_createfile and cam_pio_openfile create and open a PIO file, respectively. -Cam_pio_createfile is called from cam_write_restart, h_define (called from wshist, -which is called from write_restart_history), and atm_write_srfrest_mct. - -Cam_pio_openfile is called from setup_initial (called from cam_initial), -cam_read_restart, read_restart_history, atm_read_srfrest_mct, and wrapup (called -from cam_run4). - -Clean_iodesc_list, called from startup_initialconds and cam_read_restart, cleans the -list of IO descriptors. - -Get_phys_decomp and get_dyn_decomp create decompositions for physics and dynamics -variables, respectively. Get_decomp is an interface to those routines. - -Get_phys_decomp is directly called from infld_real_2d, infld_real_3d, -infld_real_3dncol; read_restart_physics, write_restart_physics; and -pbuf_read_restart, pbuf_write_restart. - -Get_dyn_decomp is directly called from infld_real_2d and infld_real_3d. - -Get_decomp is called from read_restart_history and dump_field (called from wshist, -which is called from write_restart_history). We see that the only time the -get_decomp interface is called is when dealing with history variables (where each -variable contains a description telling whether it relates to dynamics or physics). - -Get_phys_decomp and get_dyn_decomp have an optional column (or column_in) argument, -which is used when defining history variables over a range of columns rather than the -whole domain. - -Whenever a decomposition is created, a description of that decomposition is stored in -a list. When a new variable is catalogued, that list is searched to see if that needed -decomposition already exists. The routine performing that search is find_iodesc -(called from get_phys_decomp and get_dyn_decomp). This capability is supported only -when the range of history output is the whole domain. - -Get_phys_decomp calls get_phys_ldof (or get_column_ldof), and get_dyn_decomp calls -get_dyn_ldof (or get_column_ldof). These routines do the bulk of the work in -constructing the IO decompositions. Get_column_ldof is called when the history output -is restricted to a subset of the domain. - - -\section direct Direct calls to PIO - -Virtually all the calls to PIO routines reference the pio module, located in pio.F90. -Many of the variables contained in the pio module are referenced as well. This -section of the documentation focuses on these calls and surrounding environs -and discusses data types only peripherally. - -\section historyWrite Writing a history file - -Write_restart_history (in control/cam_history.F90) controls the writing of a history -file. Wshist writes the main variables that are on history tapes. Pio_put_var is used -to write header information, and subroutine dump_field writes the main history -variables, using primarily \ref PIO_write_darray. Additional variables are then written -using \ref PIO_put_var. (Recall that \ref PIO_write_darray isused to write distributed arrays.) - -\section historyRead Reading a history file - -Read_restart_history (in control/cam_history.F90) controls the reading of a history -file. Individual parameters are first read using pio_get_var. History variables are -then read in using \ref PIO_read_darray (which reads distributed arrays). - -\section restartWrite Writing a restart dump - -Cam_write_restart (in control/cam_restart.F90) controls the writing of a restart dump. -The calling chain includes write_restart_hycoef (which writes the pressure -coefficients), write_restart_dynamics, write_restart_physics, and write_restart_history. -Write_restart_coef is in hycoef.F90 and uses \ref PIO_put_var calls. Write_restart_dynamics -is in restart_dynamics.F90 (in the dynamics subdirectory relevant to the active -dynamical core) and issues primarily \ref PIO_write_darray calls. Write_restart_physics is -in physics/cam/restart_physics.F90. It calls a number of restart routines relating to -chemistry and aerosols, such as pbuf_write_restart (in physics/cam/phys_buffer.F90), -chem_write_restart (in chemistry/mozart), and write_prescribed_?????_restart (where -????? = ozone, ghg, aero, volcaero), located in chemistry/utils/prescribed_?????.F90. -Pbuf_write_restart calls the relevant decomposition routine followed by -\ref PIO_write_darray. Chem_write_restart calls write_?????_restart (where ????? = -tracer_cnst, tracer_srcs, linoz_data, spedata), located in chemistry/mozart/?????.F90. -These routines typically use \ref PIO_put_var. Write_restart_history is in cam_history.F90 -and uses \ref PIO_put_var. - -\section restartRead Reading a restart dump - -Cam_read_restart (in control/cam_restart.F90) controls the reading of a restart dump. -The calling chain corresponds to that for cam_write_restart, and includes -read_restart_hycoef, read_restart_dynamics, read_restart_physics, and -read_restart_history. Typical routines called are pio_get_var and \ref PIO_read_darray. - -\section Init Initialization - -Cam_init (in control/cam_comp.F90) calls cam_initial and phys_init. Cam_initial, -which is located in the relevant dynamics subdirectory, calls initial_conds, -located in control/startup_initialconds. Initial_conds calls read_inidat, which is -located in the relevant dynamics subdirectory. Read_inidat calls infld, located in -control/ncdio_atm.F90. Infld calls the relevant decomposition routine followed by -\ref PIO_read_darray. - -Phys_init is located in physics/cam/phys_grid.F90 and calls a number of initialization -routines. An important one is phys_inidat, which makes several calls to infld. - -*/ diff --git a/src/externals/pio1/doc/Decomp.txt b/src/externals/pio1/doc/Decomp.txt deleted file mode 100644 index 1b7ad9e9444..00000000000 --- a/src/externals/pio1/doc/Decomp.txt +++ /dev/null @@ -1,109 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ /*! \page decomp Describing decompositions - - -On of the biggest challenges to working with PIO is setting up -the call to \ref PIO_initdecomp. The user must properly describe -how the data within each MPI tasks memory should be placed or retrieved from -disk. PIO provides several interfaces. We describe the simplest interface -first and then progress to the most complex and flexible interface. - -\section decomp_bc Block-cyclic interface - -The simplest interface assumes that your arrays are decomposed in a block-cyclic -structure and can be described simply using a start-count type approach. -A simple block-cyclic decomposition for a 1-dimension (1D) array is illustrated in Figure 1. -Note that contigious layout of the data in memory can be easily mapped to a contigious -layout on disk. The \em start arguments correspond to the starting point of the block of contigious -memory, while the \em count is the number of words. Note that \em start and \em count must be arrays of length -equal to the dimensionality of the distributed array. While we use a 1D array for simplicity, PIO -currently supports up to the Fortran limit of 7-dimension (7D) arrays. In the case of 7D arrays, the start and count arrays -would be of length 7. - -\image html block-cyclic.png "Figure 1: Setting up the \em start and \em count arrays for a single 1D array distributed accross 3 MPI tasks." -\image latex block-cyclic.eps "Setting up the \em start and \em count arrays for a single 1D array distributed accross 3 MPI tasks." width=10cm - -The call to \ref PIO_initdecomp that would implement the decomposition illustrated in Figure 1 is listed below. -The variable \em iosystem is created by the call to \ref PIO_init. The second argument \em PIO_double is the PIO kind, and -indicates that this is a decomposition for a 8-byte real. (For a list of supported kinds see \ref PIO_kinds.) -The argument \em dims is the global dimension for the array. The \em start and \em count arrays are 8-byte integers of -type PIO_OFFSET, while \em iodesc is the IO descriptor generated by the call to PIO_initdecomp. - -\verbinclude simple-bc - -\section rearr Controlling IO decomposition - -The above example represents the simplest way to initialize and use PIO to write out and read distributed arrays. However, PIO provides some additional features that allows greater control over the IO process. In particular, it provides the ability to define an IO decomposition. Note that a user defined IO decomposition is optional. If one is not provided and rearrangement is necessary, PIO will internally compute an IO decomposition. The reason an IO decomposition may be necessary is described in the section \ref decomp_dof below. - -This flexibility provides the ability to define an intermediate decomposition that is unique from the computational decomposition. This IO decomposition can be constructed to maximize the write or read performance to the disk subsystem. We extend the -simple example in Figure 1 to include an IO decomposition in Figure 1b. - -\image html block-cyclic-rearr.png "Figure 1b: Block cyclic decomposition with rearrangement" -\image latex block-cyclic-rearr.eps "Block cyclic decomposition with rearrangement" width=10cm - -Figure 1b illustrates the creation of an IO decomposition on two of the MPI tasks. For this decomposition, the 8 word IO decomposition array and corresponding disk layout are evenly distributed between PE 0 (yellow) and PE 2 (blue). The arrows in Figure 1b indicates rearrangement that is performed within the PIO library. In this -case, PE 0 sends a word to PE 2, illustrated by the shading of yellow to blue, while PE 1 sends two words to PE 0 as illustrated by the shading of red to yellow. The rearranged array in the IO decomposition is subsequently written to disk. -Note that in this case, only two of three MPI tasks are performing writes to disk. The number of MPI tasks involved in IO to disk is specified in the \ref PIO_init using a combination of the num_aggregator and stride parameters. For figure 1b, the num_aggregator=3 and the stride=2. PIO allows the user to specify the IO decomposition using the -optional parameters \em iostart and \em iocount. The following bits of code for PE 0, PE 1, and PE 2 illustrates the necessary calls to \ref PIO_initdecomp. - -\verbinclude simple-bc-rearr - -\verbinclude simple-bc-rearr-pe1 - -\verbinclude simple-bc-rearr-pe2 - -\section decomp_dof Degree of freedom interface - -The interface described in Section \ref decomp_bc, while simple and -used by both pNetCDF -(http://trac.mcs.anl.gov/projects/parallel-netcdf) and NetCDF-4 -(http://www.unidata.ucar.edu/software/netcdf/) can be insufficient for -applications with non-trivial decompositions. While it is possible to -use multiple calls to construct a file with a non-trivial -decomposition, the performance penalty may be significant. Therefore, PIO -provides a more general interface to \ref PIO_initdecomp -based on the degree of freedom concept. Each word within the -distributed array must be given a unique value that corresponds to its -order placement in the file on disk. So, the first word in the file on -disk has a dof of 1, the second 2, etc. This allows a fully general -specification of the decomposition. We illustrate its use in Figure 2. -Note that in Figure 2, PE 0 and PE 1 do not contain contiguous pieces -of the distributed array. The desired order on disk must be specified -using the the compDOF argument to \ref PIO_initdecomp. In this case -PE 0 contains the 2nd, 4th, and 5th element of the array, PE 1 -contains the 1st and 3rd, and PE 2 contains the 6th, 7th, and 8th -elements of the array. The integer compDOF arrays for each MPI task -is illustrated at the bottom of Figure 2. - -\image html dof.png "Figure 2: Setting up the comDOF arrays for a single 1D array distributed accross 3 MPI tasks." -\image latex dof.eps "Setting up the comDOF arrays for a single 1D array distributed accross 3 MPI tasks." width=10cm - -The call to \ref PIO_initdecomp which implements Figure 2 on PE 0 is provided below. - -\verbinclude simple-dof - -As with the block-cyclic interface, the degree of freedom interface provides the ability to specify the io decomposition through optional arguments to \ref PIO_initdecomp. - -\image html dof-rearr.png "Figure 3: Setting up the comDOF arrays and setting IO decomposition for a single 1D array distributed accross 3 MPI tasks and written from 2 tasks after rearrangement within the PIO library" -\image latex dof-rearr.eps "Setting up the comDOF arrays and, io decomposition for a single 1D array distributed accross 3 MPI tasks" width=10cm - -Figure 2 illustrates the inclusion of an IO decomposition and associated rearrangement to write out the distributed array. The shading of the array elements shows how the individual PE arrays are blended using the IO decomposition specifications. The subroutine call to \ref PIO_initdecomp for PE 0 is illustrated below: - -\verbinclude simple-dof-rearr - -*/ - diff --git a/src/externals/pio1/doc/DoxygenLayout.xml b/src/externals/pio1/doc/DoxygenLayout.xml deleted file mode 100644 index c3552b69f03..00000000000 --- a/src/externals/pio1/doc/DoxygenLayout.xml +++ /dev/null @@ -1,177 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/externals/pio1/doc/Error.txt b/src/externals/pio1/doc/Error.txt deleted file mode 100644 index 774e11039d7..00000000000 --- a/src/externals/pio1/doc/Error.txt +++ /dev/null @@ -1,30 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ /*! -\page error Error Handling - -By default, PIO handles errors internally by printing a string -describing the error and then calling mpi_abort. Application -developers can change this behaivior with a call to -\ref PIO_seterrorhandling - -For example, if a developer wanted -to see if an input netcdf file contained the variable 'U' they might do the following: - -\verbinclude errorhandle - -\copydoc PIO_error_method -*/ diff --git a/src/externals/pio1/doc/Examples.txt b/src/externals/pio1/doc/Examples.txt deleted file mode 100644 index 60164535c9d..00000000000 --- a/src/externals/pio1/doc/Examples.txt +++ /dev/null @@ -1,27 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ /*! \page examp Examples - Examples of the usage of PIO - - PIO has been implemented in several geophysical component models, including the -Community Atmosphere Model (CAM), the Community Land Model (CLM), the Parallel Ocean Program -(POP), the Community Ice CodE (CICE), and coupler for used by CCSM4.0 (CPL7). We also provide -several simpler example code as well as a test code that is suitable for regression testing and -benchmarking. - - - \subpage CAMexample - - \subpage testpio_example -*/ diff --git a/src/externals/pio1/doc/GettingStarted.txt b/src/externals/pio1/doc/GettingStarted.txt deleted file mode 100644 index 7dab4a05eee..00000000000 --- a/src/externals/pio1/doc/GettingStarted.txt +++ /dev/null @@ -1,56 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ - -/*! \page intro Introduction - -PIO is a software interface layer designed to encapsolate the -complexities of parallel IO and make it easier to replace the lower -level software backend. It currently supports netcdf -(http://www.unidata.com), pnetcdf -(http://trac.mcs.anl.gov/projects/parallel-netcdf) and MPI-IO. - -PIO calls are collective. A MPI communicator is set in a call to -\ref PIO_init and all tasks associated with that communicator must -participate in all subsequent calls to PIO. An application can make -multiple calls to PIO in order to support multiple MPI communicators. - -To use PIO, your program should begin by calling the -\ref PIO_init function providing the MPI communicator and the -rank within that communicator of the calling task. You should also -provide the number of iotasks to be used, the stride or number of -tasks between iotasks, and the number of mpi aggregators to be used. -You may optionally also choose the base IO task. This task will be -used for output of any non-decomposed data. This call initializes an -IO system type structure that will be used in subsequent file and -decomposition functions. - -You can then open a file for reading or writing with a call to \ref -PIO_createfile or \ref PIO_openfile. In this call you will specify the -file type: pio_iotype_netcdf, pio_iotype_pnetcdf, or -pio_iotype_binary, pio_iotype_netcdf4c or -pio_iotype_netcdf4p; along with the file name and optionally the -netcdf mode. - -To read or write decomposed data you must first describe the mapping -between the organization of data in the file and that same data in the -application space. This is done in a call to -\ref PIO_initdecomp. In the simplest call to this function, a -one dimensional integer array is passed from each task, the values in -the array represent the 0 based offset from the beginning of the array -on file. - -*/ diff --git a/src/externals/pio1/doc/Installing.txt b/src/externals/pio1/doc/Installing.txt deleted file mode 100644 index 629bd3cda3b..00000000000 --- a/src/externals/pio1/doc/Installing.txt +++ /dev/null @@ -1,62 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2013 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ /*! \page install Installing PIO - -
    -
  1. Download the code from the following Google code subversion location using either a subversion export: - -> svn export http://parallelio.googlecode.com/svn/trunk_tags/pio1_7_1 -
  2. - -
  3. Configure - PIO uses the gnu autoconf tools, for historical reasons the configure -script is in the pio subdirectory. A complete set of configure arguments, options and flags is available by running: - -> configure --help. - -Before running configure, you will need to know the location of some supporting libraries. - -- Set the NETCDF_PATH environment variable to the netcdf install directory or --disable-netcdf to -disable serial NetCDF support. - -- Use the flag --enable-netcdf4 to enable the NetCDF4/HDF5 parallel interface. - -- Set the PNETCDF_PATH environment variable to the pnetcdf install directory, This will enable support -for parallel-netcdf - -- Set the CC, FC, MPICC, and MPIFC environment variables to the serial and parallel C and Fortran 90 compiler names. - -- Use the --prefix=$PREFIX argument to set the correct installation directory. The default is /usr/local. - -This should be the minimal set of flags required to build pio. - -One additional flag of interest is --enable-filesystem-hints which can -be set to gpfs or lustre. You should only use this flag if you are sure that the -file system you are using is either gpfs or lustre respectively. If you are unsure, then do not use this flag.
  4. - -
  5. After configure completes use gmake in the pio subdirectory to complete the build. - -> gmake - -Note that although the user application only needs to 'use pio' at least some compilers require that -all of the module files be located in the install directory. If that is the case, then running - -> gmake install - -will move the compiled library and module files to the directory specified -by $PREFIX.
  6. -
- -*/ diff --git a/src/externals/pio1/doc/api.txt b/src/externals/pio1/doc/api.txt deleted file mode 100644 index caba3bb377b..00000000000 --- a/src/externals/pio1/doc/api.txt +++ /dev/null @@ -1,61 +0,0 @@ - /*! \page api PIO user interface - This is a list of all user interface routines: - - \section api_fileops PIO file Operations - - \ref PIO_openfile - - \ref PIO_createfile - - \ref PIO_syncfile - - \ref PIO_closefile - \section api_system PIO startup and shutdown routines - - \ref PIO_init - - \ref PIO_finalize - \section api_decomp PIO decomposition routines - - \ref PIO_initdecomp - - \ref PIO_freedecomp - \section readwrite Reading and Writing distributed variables - - \ref PIO_read_darray - - \ref PIO_write_darray - \section utility Utility routines - - \ref PIO_setiotype - - \ref PIO_set_hint - - \ref PIO_numtoread - - \ref PIO_numtowrite - - \ref PIO_setframe - - \ref PIO_advanceframe - - \ref PIO_setdebuglevel - - \ref PIO_seterrorhandling - - \ref PIO_get_local_array_size - - \ref PIO_dupiodesc - - \ref PIO_getnumiotasks - - \ref PIO_set_blocksize - \section netcdf NetCDF format specific routines - Also see: http://www.unidata.ucar.edu/software/netcdf/docs/ - \subsection putget Reading/Writing netcdf metadata - - \ref PIO_get_att - - \ref PIO_put_att - - \ref PIO_get_var - - \ref PIO_put_var - \subsection utilnc Netcdf utility routines - - \ref PIO_enddef - - \ref PIO_redef - - \ref PIO_def_dim - - \ref PIO_def_var - - \ref PIO_copy_att - \subsection inqnc NetCDF file inquiry routines - - \ref PIO_inquire - - \ref PIO_inq_attname - - \ref PIO_inq_att - - \ref PIO_inq_attlen - - \ref PIO_inq_varid - - \ref PIO_inq_varname - - \ref PIO_inq_vartype - - \ref PIO_inq_varndims - - \ref PIO_inq_vardimid - - \ref PIO_inq_varnatts - - \ref PIO_inq_dimid - - \ref PIO_inq_dimname - - \ref PIO_inq_dimlen - - \ref PIO_inquire_variable - - \ref PIO_inquire_dimension - -*/ diff --git a/src/externals/pio1/doc/base.txt b/src/externals/pio1/doc/base.txt deleted file mode 100644 index 21cfdaf7bf5..00000000000 --- a/src/externals/pio1/doc/base.txt +++ /dev/null @@ -1,44 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ -/*! - -\mainpage Parallel I/O library (PIO) - -\author John M. Dennis -\author Jim Edwards -\author Robert Jacob -\author Ray Loy -\author Art Mirin -\author Mariana Vertenstein - -The Parallel I/O (PIO) library has been developed over several years to improve the -ability of component models of the Community Earth System Model (CESM) to perform I/O. However, we -believe that the interface is sufficiently general to be useful to a broader spectrum of applications. -Although PIO has a binary output capability the focus of development has been on backend tools that -use the NetCDF file format. - -This user's guide provides information about the PIO library and examples on how it can be used. -Please review the ChangeLog that is included with the distribution for up-to-date release information. - - - \ref intro - - \ref install - - \ref decomp - - \ref error - - \ref examp - - \ref faq - - \ref api -*/ diff --git a/src/externals/pio1/doc/example/errorhandle b/src/externals/pio1/doc/example/errorhandle deleted file mode 100644 index aa4f9be30a0..00000000000 --- a/src/externals/pio1/doc/example/errorhandle +++ /dev/null @@ -1,9 +0,0 @@ - call pio_seterrorhandling(File, PIO_BCAST_ERROR) - ierr = pio_inq_varid(File, 'U', uid) - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) - if(ierr/= PIO_NOERR) then - write(stderr,*) 'Variable U not found on input file, setting to 0' - U=0 - else - ... - endif diff --git a/src/externals/pio1/doc/example/simple-bc b/src/externals/pio1/doc/example/simple-bc deleted file mode 100644 index 1b7cac1658f..00000000000 --- a/src/externals/pio1/doc/example/simple-bc +++ /dev/null @@ -1,14 +0,0 @@ - - type (iosystem_desc_t) :: iosystem - integer (i4) :: dims(1) - integer (kind=PIO_OFFSET) :: start(1), count(1) - type (io_desc_t) :: iodesc - ... - !--------------------------------------- - ! Initializing the decomposition on PE 0 - !--------------------------------------- - dims(1) = 8 - start(1) = 3 - count(1) = 3 - call PIO_initdecomp(iosystem,PIO_double,dims,start,count,iodesc) - diff --git a/src/externals/pio1/doc/example/simple-bc-rearr b/src/externals/pio1/doc/example/simple-bc-rearr deleted file mode 100644 index 3ef3876a8dd..00000000000 --- a/src/externals/pio1/doc/example/simple-bc-rearr +++ /dev/null @@ -1,17 +0,0 @@ - - type (iosystem_desc_t) :: iosystem - integer (i4) :: dims(1) - integer (kind=PIO_OFFSET) :: compstart(1), compcount(1) - integer (kind=PIO_OFFSET) :: iostart(1), iocount(1) - type (io_desc_t) :: iodesc - ... - !--------------------------------------- - ! Initializing the decomposition on PE 0 - !--------------------------------------- - dims(1) = 8 - compstart(1) = 3 - compcount(1) = 3 - iostart(1) = 1 - iocount(1) = 4 - call PIO_initdecomp(iosystem,PIO_double,dims,compstart,compcount,iodesc,iostart=iostart,iocount=iocount) - diff --git a/src/externals/pio1/doc/example/simple-bc-rearr-pe1 b/src/externals/pio1/doc/example/simple-bc-rearr-pe1 deleted file mode 100644 index 2b016a3aea8..00000000000 --- a/src/externals/pio1/doc/example/simple-bc-rearr-pe1 +++ /dev/null @@ -1,14 +0,0 @@ - - type (iosystem_desc_t) :: iosystem - integer (i4) :: dims(1) - integer (kind=PIO_OFFSET) :: compstart(1), compcount(1) - type (io_desc_t) :: iodesc - ... - !--------------------------------------- - ! Initializing the decomposition on PE 1 - !--------------------------------------- - dims(1) = 8 - compstart(1) = 1 - compcount(1) = 2 - call PIO_initdecomp(iosystem,PIO_double,dims,compstart,compcount,iodesc) - diff --git a/src/externals/pio1/doc/example/simple-bc-rearr-pe2 b/src/externals/pio1/doc/example/simple-bc-rearr-pe2 deleted file mode 100644 index 3aedb52f36d..00000000000 --- a/src/externals/pio1/doc/example/simple-bc-rearr-pe2 +++ /dev/null @@ -1,17 +0,0 @@ - - type (iosystem_desc_t) :: iosystem - integer (i4) :: dims(1) - integer (kind=PIO_OFFSET) :: compstart(1), compcount(1) - integer (kind=PIO_OFFSET) :: iostart(1), iocount(1) - type (io_desc_t) :: iodesc - ... - !--------------------------------------- - ! Initializing the decomposition on PE 2 - !--------------------------------------- - dims(1) = 8 - compstart(1) = 6 - compcount(1) = 3 - iostart(1) = 5 - iocount(1) = 4 - call PIO_initdecomp(iosystem,PIO_double,dims,compstart,compcount,iodesc,iostart=iostart,iocount=iocount) - diff --git a/src/externals/pio1/doc/example/simple-dof b/src/externals/pio1/doc/example/simple-dof deleted file mode 100644 index 159a6895dbc..00000000000 --- a/src/externals/pio1/doc/example/simple-dof +++ /dev/null @@ -1,13 +0,0 @@ - - type (iosystem_desc_t) :: iosystem - integer (i4) :: dims(1) - integer (i4) :: compdof - type (io_desc_t) :: iodesc - ... - !--------------------------------------- - ! Initializing the decomposition on PE 0 - !--------------------------------------- - dims(1) = 8 - compdof = (/2,4,5/) - call PIO_initdecomp(iosystem,PIO_double,dims,compdof,iodesc) - diff --git a/src/externals/pio1/doc/example/simple-dof-rearr b/src/externals/pio1/doc/example/simple-dof-rearr deleted file mode 100644 index bea57693691..00000000000 --- a/src/externals/pio1/doc/example/simple-dof-rearr +++ /dev/null @@ -1,17 +0,0 @@ - - type (iosystem_desc_t) :: iosystem - integer (i4) :: dims(1) - integer (i4) :: compdof - type (io_desc_t) :: iodesc - integer (kind=PIO_OFFSET) :: iostart(:),iocount(:) - - ... - !--------------------------------------- - ! Initializing the decomposition on PE 0 - !--------------------------------------- - dims(1) = 8 - compdof = (/2,4,5/) - iostart(1) = 1 - iocount(1) = 4 - call PIO_initdecomp(iosystem,PIO_double,dims,compdof,iodesc,iostart=iostart,iocount=iocount) - diff --git a/src/externals/pio1/doc/faq.txt b/src/externals/pio1/doc/faq.txt deleted file mode 100644 index 0f37956ea19..00000000000 --- a/src/externals/pio1/doc/faq.txt +++ /dev/null @@ -1,31 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ /*! \page faq Frequently Asked Questions - - Here is a list of frequently asked questions and their answers. -
-
How do I specify which tasks perform IO?
-
This is done in the call to \ref PIO_init which has two interfaces: init_intracom and init_intercom. -
  • In the init_intracom interface, use the num_iotasks and stride variables to specify the total number of io tasks and the stride between them with respect to the mpi communicator, comp_comm, which is provided. You can also use the optional base argument to shift the first IO task away from the first computational task, this is often desirable because the applications first computational task often has higher memory requirements than other tasks. IO tasks are a subset of the tasks defined in comp_comm. -
  • In the init_intercom interface, IO tasks are a disjoint set of tasks from those in the computational communicator. This interface is still experimental and not recommended for production use at this time. -
-Note that num_iotasks is the maximum number of IO tasks to use for an IO operation. The size of the field being read or written along with the tunable blocksize parameter, \ref PIO_set_blocksize, determines the actual number of tasks used for a given IO operation. -
-
How do I test if PIO is installed and working correctly?
-
The PIO Library distribution contains a testpio subdirectory with a number of programs to test the PIO library. Please see the \ref examp page for details.
- -
-*/ diff --git a/src/externals/pio1/doc/footer.html b/src/externals/pio1/doc/footer.html deleted file mode 100644 index 33c4cb5a0db..00000000000 --- a/src/externals/pio1/doc/footer.html +++ /dev/null @@ -1,4 +0,0 @@ -
-Generated by doxygen
- - diff --git a/src/externals/pio1/doc/header.html b/src/externals/pio1/doc/header.html deleted file mode 100644 index 9965a24c889..00000000000 --- a/src/externals/pio1/doc/header.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - - -$title - - - - - - diff --git a/src/externals/pio1/doc/images/baseimage.graffle b/src/externals/pio1/doc/images/baseimage.graffle deleted file mode 100644 index eed22e89717..00000000000 --- a/src/externals/pio1/doc/images/baseimage.graffle +++ /dev/null @@ -1,1128 +0,0 @@ - - - - - ActiveLayerIndex - 0 - ApplicationVersion - - com.omnigroup.OmniGrafflePro - 137.11.0.108132 - - AutoAdjust - - BackgroundGraphic - - Bounds - {{0, 0}, {576, 733}} - Class - SolidGraphic - ID - 2 - Style - - fill - - GradientColor - - w - 0.666667 - - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - CanvasOrigin - {0, 0} - CanvasSize - {576, 733} - ColumnAlign - 1 - ColumnSpacing - 36 - CreationDate - 2009-12-16 08:35:43 -0700 - Creator - John Dennis - DisplayScale - 1 0/72 in = 1 0/72 in - FileType - flat - GraphDocumentVersion - 6 - GraphicsList - - - Bounds - {{369, 23}, {39, 39}} - Class - ShapedGraphic - ID - 44 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{330, 23}, {39, 39}} - Class - ShapedGraphic - ID - 43 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{291, 23}, {39, 39}} - Class - ShapedGraphic - ID - 42 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{213, 23}, {39, 39}} - Class - ShapedGraphic - ID - 41 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{174, 23}, {39, 39}} - Class - ShapedGraphic - ID - 40 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{98, 23}, {39, 39}} - Class - ShapedGraphic - ID - 38 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{59, 23}, {39, 39}} - Class - ShapedGraphic - ID - 37 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{20, 23}, {39, 39}} - Class - ShapedGraphic - ID - 36 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{330, 134}, {39, 39}} - Class - ShapedGraphic - ID - 35 - Shape - Rectangle - Style - - fill - - Color - - b - 0.701961 - g - 0.701961 - r - 0.701961 - - - shadow - - Draws - NO - - - - - Bounds - {{291, 134}, {39, 39}} - Class - ShapedGraphic - ID - 34 - Shape - Rectangle - Style - - fill - - Color - - b - 0.701961 - g - 0.701961 - r - 0.701961 - - - shadow - - Draws - NO - - - - - Bounds - {{252, 134}, {39, 39}} - Class - ShapedGraphic - ID - 33 - Shape - Rectangle - Style - - fill - - Color - - b - 0.701961 - g - 0.701961 - r - 0.701961 - - - shadow - - Draws - NO - - - - - Bounds - {{213, 134}, {39, 39}} - Class - ShapedGraphic - ID - 32 - Shape - Rectangle - Style - - fill - - Color - - b - 0.701961 - g - 0.701961 - r - 0.701961 - - - shadow - - Draws - NO - - - - - Bounds - {{174, 134}, {39, 39}} - Class - ShapedGraphic - ID - 31 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 0.4 - r - 0.8 - - - shadow - - Draws - NO - - - - - Bounds - {{135, 134}, {39, 39}} - Class - ShapedGraphic - ID - 30 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 0.4 - r - 0.8 - - - shadow - - Draws - NO - - - - - Bounds - {{96, 134}, {39, 39}} - Class - ShapedGraphic - ID - 29 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 0.4 - r - 0.8 - - - shadow - - Draws - NO - - - - - Bounds - {{57, 134}, {39, 39}} - Class - ShapedGraphic - ID - 27 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 0.4 - r - 0.8 - - - shadow - - Draws - NO - - - - - Bounds - {{330, 199}, {39, 39}} - Class - ShapedGraphic - ID - 26 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{291, 199}, {39, 39}} - Class - ShapedGraphic - ID - 25 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{213, 199}, {39, 39}} - Class - ShapedGraphic - ID - 24 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{174, 199}, {39, 39}} - Class - ShapedGraphic - ID - 23 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{135, 199}, {39, 39}} - Class - ShapedGraphic - ID - 22 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{252, 199}, {39, 39}} - Class - ShapedGraphic - ID - 14 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{96, 199}, {39, 39}} - Class - ShapedGraphic - ID - 18 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{57, 199}, {39, 39}} - Class - ShapedGraphic - ID - 19 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{57, 199}, {273, 39}} - Class - ShapedGraphic - ID - 20 - Shape - Rectangle - Style - - shadow - - Draws - NO - - - - - GridInfo - - GuidesLocked - NO - GuidesVisible - YES - HPages - 1 - ImageCounter - 3 - KeepToScale - - Layers - - - Lock - NO - Name - Layer 1 - Print - YES - View - YES - - - LayoutInfo - - Animate - NO - AutoLayout - 2 - LineLength - 0.4643835723400116 - circoMinDist - 18 - circoSeparation - 0.0 - layoutEngine - dot - neatoSeparation - 0.0 - twopiSeparation - 0.0 - - LinksVisible - NO - MagnetsVisible - NO - MasterSheets - - ModificationDate - 2009-12-16 08:54:03 -0700 - Modifier - John Dennis - NotesVisible - NO - Orientation - 2 - OriginVisible - NO - OutlineStyle - Basic - PageBreaks - NO - PrintInfo - - NSBottomMargin - - float - 41 - - NSLeftMargin - - float - 18 - - NSPaperSize - - size - {612, 792} - - NSRightMargin - - float - 18 - - NSTopMargin - - float - 18 - - - PrintOnePage - - QuickLookPreview - - JVBERi0xLjMKJcTl8uXrp/Og0MTGCjQgMCBvYmoKPDwgL0xlbmd0aCA1IDAgUiAvRmls - dGVyIC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAGNVMtuwjAQvOcr9gdI/cjLZ269oRz6 - AahIrdRKlP+XOrOxE+IAMRHCKDOza894r3KSqxg8bd9J7738fcqH/IqpO6Mf+ZK3483K - +SZWn9tZDqZu49tlFQUqCFygJk1oxfVefKDmBdx3fL8nteOoRY2MR9S3+ufAH7Zw/iHf - hhD5lBzRKN7XzVw3rdBPrIZKqdgLTUUtkiwKidBpwzsSQLGtGYWuqqUnng+kXOtKtAhb - i1Uvtmh9WyJK2Cy62aTtmyIRwGYRmjeiM2d9CZewNRfG3R1RNR1RsEVigG3EvDcLd0rb - I7cJ23BNPWzyow0hQW1nJmOfazKVvlnZz+DMzCnoeTcV8szg5Ex6tUPlVVBLE3e2FBaV - cOlk4qqT6kZvbOjMgOjGFRxKKwYYHhaIq9VJPDUWhSxGyiS5WsXbsSPOA9PbkcRT5w55 - 2OHyxAibd50aYx4KuBqbvC7kdEpUzkjX2yUAl4fDCyinc2+aRhxdbciIVZ4SnXqYeBkx - DBlxU1HjNWyIT4clh8DOJihJWN4MHd+h6vkDlqjp+O9GQJySMKlEC7Bcix4VUAlL1JQg - 390Zwd4eO0hYTj39A380eq0KZW5kc3RyZWFtCmVuZG9iago1IDAgb2JqCjQ0OQplbmRv - YmoKMiAwIG9iago8PCAvVHlwZSAvUGFnZSAvUGFyZW50IDMgMCBSIC9SZXNvdXJjZXMg - NiAwIFIgL0NvbnRlbnRzIDQgMCBSIC9NZWRpYUJveCBbMCAwIDU3NiA3MzNdCj4+CmVu - ZG9iago2IDAgb2JqCjw8IC9Qcm9jU2V0IFsgL1BERiBdIC9Db2xvclNwYWNlIDw8IC9D - czEgNyAwIFIgPj4gPj4KZW5kb2JqCjggMCBvYmoKPDwgL0xlbmd0aCA5IDAgUiAvTiAz - IC9BbHRlcm5hdGUgL0RldmljZVJHQiAvRmlsdGVyIC9GbGF0ZURlY29kZSA+PgpzdHJl - YW0KeAGFlE1IFGEYx/+zjQSxBtGXCMXQwSRUJgtSAtP1K1O2ZdVMCWKdfXedHGenmd0t - RSKE6Jh1jC5WRIeITuGhQ6c6RASZdYmgo0UQBV4itv87k7tjVL4wM795nv/7fL3DAFWP - Uo5jRTRgys67yd6Ydnp0TNv8GlWoRhRcKcNzOhKJAZ+plc/1a/UtFGlZapSx1vs2fKt2 - mRBQNCp3ZAM+LHk84OOSL+SdPDVnJBsTqTTZITe4Q8lO8i3y1myIx0OcFp4BVLVTkzMc - l3EiO8gtRSMrYz4g63batMnvpT3tGVPUsN/INzkL2rjy/UDbHmDTi4ptzAMe3AN211Vs - 9TXAzhFg8VDF9j3pz0fZ9crLHGr2wynRGGv6UCp9rwM23wB+Xi+VftwulX7eYQ7W8dQy - Cm7R17Iw5SUQ1BvsZvzkGv2Lg558VQuwwDmObAH6rwA3PwL7HwLbHwOJamCoFZHLbDe4 - 8uIi5wJ05pxp18xO5LVmXT+idfBohdZnG00NWsqyNN/laa7whFsU6SZMWQXO2V/beI8K - e3iQT/YXuSS87t+szKVTXZwlmtjWp7To6iY3kO9nzJ4+cj2v9xm3Zzhg5YCZ7xsKOHLK - tuI8F6mJ1Njj8ZNkxldUJx+T85A85xUHZUzffi51IkGupT05meuXml3c2z4zMcQzkqxY - MxOd8d/8xi0kZd591Nx1LP+bZ22RZxiFBQETNu82NCTRixga4cBFDhl6TCpMWqVf0GrC - w+RflRYS5V0WFb1Y4Z4Vf895FLhbxj+FWBxzDeUImv5O/6Iv6wv6Xf3zfG2hvuKZc8+a - xqtrXxlXZpbVyLhBjTK+rCmIb7DaDnotZGmd4hX05JX1jeHqMvZ8bdmjyRzianw11KUI - ZWrEOOPJrmX3RbLFN+HnW8v2r+lR+3z2SU0l17K6eGYp+nw2XA1r/7OrYNKyq/DkjZAu - PGuh7lUPqn1qi9oKTT2mtqttahffjqoD5R3DnJWJC6zbZfUp9mBjmt7KSVdmi+Dfwi+G - /6VeYQvXNDT5D024uYxpCd8R3DZwh5T/w1+zAw3eCmVuZHN0cmVhbQplbmRvYmoKOSAw - IG9iago3OTIKZW5kb2JqCjcgMCBvYmoKWyAvSUNDQmFzZWQgOCAwIFIgXQplbmRvYmoK - MyAwIG9iago8PCAvVHlwZSAvUGFnZXMgL01lZGlhQm94IFswIDAgNTc2IDczM10gL0Nv - dW50IDEgL0tpZHMgWyAyIDAgUiBdID4+CmVuZG9iagoxMCAwIG9iago8PCAvVHlwZSAv - Q2F0YWxvZyAvUGFnZXMgMyAwIFIgPj4KZW5kb2JqCjEgMCBvYmoKPDwgL1RpdGxlIChV - bnRpdGxlZCkgL0F1dGhvciAoSm9obiBEZW5uaXMpIC9DcmVhdG9yIChPbW5pR3JhZmZs - ZSBQcm9mZXNzaW9uYWwpCi9Qcm9kdWNlciAoTWFjIE9TIFggMTAuNS44IFF1YXJ0eiBQ - REZDb250ZXh0KSAvQ3JlYXRpb25EYXRlIChEOjIwMDkxMjE2MTYwNjIyWjAwJzAwJykK - L01vZERhdGUgKEQ6MjAwOTEyMTYxNjA2MjJaMDAnMDAnKSA+PgplbmRvYmoKeHJlZgow - IDExCjAwMDAwMDAwMDAgNjU1MzUgZiAKMDAwMDAwMTgxNiAwMDAwMCBuIAowMDAwMDAw - NTY0IDAwMDAwIG4gCjAwMDAwMDE2ODMgMDAwMDAgbiAKMDAwMDAwMDAyMiAwMDAwMCBu - IAowMDAwMDAwNTQ1IDAwMDAwIG4gCjAwMDAwMDA2NjggMDAwMDAgbiAKMDAwMDAwMTY0 - OCAwMDAwMCBuIAowMDAwMDAwNzM2IDAwMDAwIG4gCjAwMDAwMDE2MjkgMDAwMDAgbiAK - MDAwMDAwMTc2NiAwMDAwMCBuIAp0cmFpbGVyCjw8IC9TaXplIDExIC9Sb290IDEwIDAg - UiAvSW5mbyAxIDAgUiAvSUQgWyA8MzIwM2I5NzllNTkwODY1OGU1N2MzZTFmZWM2NjQz - MjA+CjwzMjAzYjk3OWU1OTA4NjU4ZTU3YzNlMWZlYzY2NDMyMD4gXSA+PgpzdGFydHhy - ZWYKMjAzNAolJUVPRgoxIDAgb2JqCjw8L0F1dGhvciAoSm9obiBEZW5uaXMpL0NyZWF0 - aW9uRGF0ZSAoRDoyMDA5MTIxNjE1MzUwMFopL0NyZWF0b3IgKE9tbmlHcmFmZmxlIFBy - b2Zlc3Npb25hbCA1LjEuMSkvTW9kRGF0ZSAoRDoyMDA5MTIxNjE1NTQwMFopL1Byb2R1 - Y2VyIChNYWMgT1MgWCAxMC41LjggUXVhcnR6IFBERkNvbnRleHQpL1RpdGxlIChVbnRp - dGxlZCk+PgplbmRvYmoKeHJlZgoxIDEKMDAwMDAwMjQxMSAwMDAwMCBuIAp0cmFpbGVy - Cjw8L0lEIFs8MzIwM2I5NzllNTkwODY1OGU1N2MzZTFmZWM2NjQzMjA+IDwzMjAzYjk3 - OWU1OTA4NjU4ZTU3YzNlMWZlYzY2NDMyMD5dIC9JbmZvIDEgMCBSIC9QcmV2IDIwMzQg - L1Jvb3QgMTAgMCBSIC9TaXplIDExPj4Kc3RhcnR4cmVmCjI2MTYKJSVFT0YK - - QuickLookThumbnail - - TU0AKgAABUSANOBP8AJoAQeEQmFN5vPMRCIGwqJQlwuJ7B8PAqJxuDjsWiwrxyENRmMw - AMhkSKFOB3O4QBIJSqFB8Piwmk2ZQhHtRqN+cwl5Op1A4LBaZO5uNwJCQST8ANJptIVi - wXU5bLYNkolOWnL1eCghkRsU5qtQ9R9CzJqMRiCxOp2nLp+PwjAQCU5qDAYCw3m+nESe - L0WCynBNns92jEYzIErFYvcnk+nVCpVSrVitVyf16wWKyWa0Wq2W64T+5XS7Xi9Xy/T/ - ANTBYSf4bEYrGY7IZKf5Sp1Wf1es1uu1+w2Of2Wziy0yq12233G53W7z+83u+3/A4PC4 - fE4uVY3H5HJ1He5fg5qc5zi5/k8uRc3SdDUdOc9XWdjYdrZ9zbd/cPE3byMs37MOEzbi - M847QOU0TnNKnLTuk1TrNanLXti7bau8kTwNy8bKt8nLgMy4bOuMnLkNC5jRue0zotS6 - jVuu1zstknLaO627wt0nKBGmghdqcbpunUEYRqMn5vm+dgQhCCinIOFCPiStTDgAbJsq - cbxznOEQMgzKCihYIQhKcUqeHVKAAHScpyguDYNpkdJtm2C4ShKpx/z1NU+T7P0/0BQN - BUGmU9IJQlEUTRVF0ZRVDUbSFI0lSdGUfSlL0xTNNIVS1N09T9QUHTtQ1JUtTI3UdT1V - VdP1TVlX1hSNXVjWla0FWdbVzXSf1xXdfV+hNeo4aRmGof59Sgbxvm8EQQhFLVl2bZ6V - G4bxthIEU8J+bhwG0EATg/IUhyMEctIYh9ppzJRvyaEKZADeAZXlPM9zUYxFmoDpqRst - QAmUFh/hspxrnqaAUgWGCZF6dxZiGCScJ+XIJFODAhoin5wYyEGNqccePA7kCnHFkYPZ - KmRyZQNGVXpQ8oXvfN9rxf2AYEn+CYNhGFYZh2IJziWKYsp2MnBjYQY7j+Qp/kZxZKD2 - T5TldeXrl18X1fjmZngOB4Lg+EpVheG4ep2f4ri6c6HoujnHkAO5FkmTJVlByZUNGWT5 - l+rZlf+tZtrmc6/nexYjiey6FjWOJ/j216SnOl6bp+56inNhI3vGYuprOapzm+u51sOe - plsmg4xw+jcTpG26Vt+nbjqG66llqnctq738zreca8kWwZ5sfCdHs/S7Vtm3aZuCRblu - m7TUZRQGofBqSgeJ/naB4Agmpx6nyeQFgOByZHEeZvA8Bt0pkbwFGqA4Un8px5fcB34K - cen5gZ+vsHqeoF/0mQBf6K7/3lLAgFAMADlICQHVZAaBEC1SwKgZA9T0DiEi9EQIgBqS - inDtCMNMCQHR5FOHsJoGoCwBgGJkN0fw7QRBlROTIcA4QEggA+PcpwvhzA1FKBoNJTgF - DsHYPYCiTyfgBHmPMBAFwLkyI0AAYJ+nJtTKcMIPYewfDmHMU4ZAYBjA2B+NoyYVwqgw - AUAsmQsh+DkCYKMXhcRdAkCMEYbhThLjBCsGwH4qSnAlJQNsHAOCnAPGWMseINQakyAa - TweUTlCxQJ/FKKkVosRai5F43cYIxRkJVGaNEao2RujhHKOkdo8E/j0MiPkfifyAkFIS - Q0iJFEqgkQiR0VYrk/izFuLsX4wxjjLGeNMazTRtjfHEn8c46x3jzHuPsf5AyDkKSqQ4 - 1JEu0I5LEg8s5IS2klLmSsu5MEik1L+Tsw5QTHlGTmUsp5mSrmeSKaM04AxRinLSSMuJ - KE5GlJaXkmZfScmDJ6YhOZjSimTKaZcqZmysmhK6aiqJGE5mxLUnMt5Jy6kvL2TcwEIT - Ck/MWUMyJSTKlQTmVUzpWzSleSIgIAAOAQAAAwAAAAEARAAAAQEAAwAAAAEAJgAAAQIA - AwAAAAMAAAXyAQMAAwAAAAEABQAAAQYAAwAAAAEAAgAAAREABAAAAAEAAAAIARIAAwAA - AAEAAQAAARUAAwAAAAEAAwAAARYAAwAAAAECggAAARcABAAAAAEAAAU8ARwAAwAAAAEA - AQAAAT0AAwAAAAEAAgAAAVMAAwAAAAMAAAX4h3MABwAAERwAAAX+AAAAAAAIAAgACAAB - AAEAAQAAERxhcHBsAgAAAG1udHJSR0IgWFlaIAfZAAkAHgAWAAoAF2Fjc3BBUFBMAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD21gABAAAAANMtYXBwbK+MRddT6/ech82QJO8y - lrIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADnJYWVoAAAEsAAAAFGdYWVoA - AAFAAAAAFGJYWVoAAAFUAAAAFHd0cHQAAAFoAAAAFGNoYWQAAAF8AAAALHJUUkMAAAGo - AAAADmdUUkMAAAG4AAAADmJUUkMAAAHIAAAADnZjZ3QAAAHYAAAGEm5kaW4AAAfsAAAG - PmRlc2MAAA4sAAAAZGRzY20AAA6QAAACPm1tb2QAABDQAAAAKGNwcnQAABD4AAAAJFhZ - WiAAAAAAAABgAAAANcQAAAcqWFlaIAAAAAAAAG+RAACzYwAAGPJYWVogAAAAAAAAJ0QA - ABbzAACzCVhZWiAAAAAAAADzUgABAAAAARbPc2YzMgAAAAAAAQxCAAAF3v//8yYAAAeS - AAD9kf//+6L///2jAAAD3AAAwGxjdXJ2AAAAAAAAAAEBzQAAY3VydgAAAAAAAAABAc0A - AGN1cnYAAAAAAAAAAQHNAAB2Y2d0AAAAAAAAAAAAAwEAAAIAAAApAJABIAG6AnYDUARF - BWEGlAfdCUEKugw5DcEPVxDoEnYT/hV5FuYYShmbGuQcFR02HlofeSCYIbgi2SP/JSIm - RidqKJApuyrlLBAtQy5yL6Uw2zIPM0k0hTXENwA4RTmDOr876z0RPjo/WkB7QaBCxUPm - RQpGLkdRSHFJkkq2S9hM+04cTzpQWVF9UpxTu1TaVfpXHVhEWW9aolvUXQVeNF9kYJFh - wWLyZCJlVWaPZ8Ro/Wo7a39svm4Fb09wnXHuc0N0lXXwd0B4YnlsenJ7eXyDfYZ+i3+K - gIyBh4J9g3iEaIVVhkKHKYgNiOyJx4qhi3WMSo0ajeeOs4+AkFaRLZIEktuTspSIlV6W - MpcHl92Ys5mJmmGbOJwPnOedwZ6cn3igVaE0ohSi9aPVpLiljKZWpyOn8qjAqY2qXKsr - q/isx62YrmivOrAKsNqxrLJ9s060ILTytcW2lLdjuDO5A7nQupC7TbwIvMS9gr5Avv+/ - vsB8wT3CAMLCw4XETMUTxdvGpcdxyD3JC8nbyq3LgMxTzSXN9M6+z4jQUdEZ0eLSrtN3 - 1D/VCtXV1qDXbNg52QfZ1dqk23TcRN0V3eneut+P4GPhNuIN4urjwuSV5WXmMeb758no - lOlj6jbrDevo7NHtxu7G79jw//JC85n1Cvak+Fz6Rvxl/pL//wAAAEEA5AFxAhcC0wO3 - BLAFxwb3CD0JlQsGDHUN9A99EQASghP+FW4WzxgqGXgatxvmHQYeKB9IIGQhgSKhI8Uk - 5CYGJygoRilvKpMruCzjLg4vNzBkMY4yvjPwNSQ2UzeKOLw56zsIPB09NT5IP1hAbkGD - QpZDqkS9Rc9G4UfySQZKGEspTDpNSU5XT2pQd1GEUpJToVSxVcZW4lgCWSNaQ1thXIFd - n16+X99g/2IhY0pkb2WXZsNn9GkialVrjWzGbgNvQ3B+ccRy/HQVdRx2H3cjeCd5KHoo - eyd8Jn0ifhp/E4AJgPyB7oLeg82EuIWfhoqHbYhTiTaKF4r3i9OMq42FjluPM5AMkOaR - wJKak3KUUJUtlgyW7JfMmK6Zj5p1m1ucQ50rnhafAp/uoNqhxqKio22kPaUQpeOmtaeM - qGCpNaoNquWrvqyarXKuS68lr/+w2bGwsoqzY7Q4tQy14La0t4i4Xrkzuga62LusvH+9 - U74lvve/ysCdwW7CQMMUw+fEucWNxmHHNcgKyN/JtcqNy2TMP80kzhPPA8/y0N3RyNK0 - 05zUg9Vq1lLXOdgf2QfZ79rY28Lcr92b3ovffeBv4WXiW+NS5EvlRuZE50HoP+lD6krr - Uuxc7Wrueu+P8KjxwPLa8/f1GvY891/4g/mr+tL79/0e/j//Qf//AAAAGwBeAMMBNgGu - Aj4C5AOaBGMFPgYqBxwIGQkaCiILJAwsDSsOJA8SD/sQ2hGuEnYTOBP5FLUVchYwFu8X - rxhuGS4Z8BqzG3YcOx0BHcwelh9hIC8g+yHMIp8jciRFJR4l8ybBJ4QoPij6KbQqbisr - K+gspi1mLiYu5y+oMGoxMTH1MrozgDRGNQs11DabN2E4KDjwObg6fztGPBA82j2kPm4/ - N0ABQMxBl0JjQzBEAETORZ1Gb0dESBRI6Em9SpRLa0xETRtN9E7JT5VQXFEjUexStlOC - VFFVHlXwVsJXllhvWUZaIVr/W99cwF2hXoNfaWBMYTViG2MBY+hkymWoZoZnYWhAaR9p - /2rga8FsoW2GbmtvT3A3cR5yBnLuc9d0wnWsdph3hHhxeV96SXsxfAF8xH2DfkV/A3/A - gH+BOIHygqyDZ4QihN2FmIZThw+HzYiMiUqKC4rOi5CMUo0XjdyOoY9akBOQy5GEkkGT - AJPBlISVSZYSlt+Xrph/mVmaM5sQm/Cc1J26nqOfj6B9oW+iYaNOpDKlEKXqpsWnoah+ - qV+qP6sgrAOs6a3OrrOvnrCHsXCyW7NFtDC1G7YGtvC33LjFua66nrvAvOS+Ab8ZwDPB - U8J5w6jE4sYqx3/I7MpwzAfNv8+k0bTT99Z52VrcnuCT5U7rpPVK//8AAG5kaW4AAAAA - AAAGNgAAmQIAAFbIAABVBAAAkbAAACeRAAAVYAAAUA0AAFQ5AAIKPQAB+uEAASZmAAMB - AAACAAAAEwAsAEUAXwB4AJEAqwDFAN8A+QEUATABTAFqAYcBpgHGAecCCgIuAlQCfAKn - AtMDAwM1A2oDpQPkBCQEZwSsBPMFOwWFBc8GHAZqBroHDAddB7AIBQhbCLAJCAlhCboK - FApxCs0LKguHC+YMRwymDQkNbA3VDkQOtQ8mD5wQFBCLEQQRfxH9EnoS+hN7E/8UhRUK - FZEWGxalFzIXwhhTGOIZdhoLGqIbOhvTHGwdBB2cHjEexx9gH/oglyE0IdQidCMUI7Yk - WiT9JZ0mQibnJ4soLijPKXUqGSq7K14sACyiLUQt5i6LLywvzjB6MUQyHTL5M9c0szWY - Nns3ZzhROUM6PTszPDY9Pz5KP2BAe0GiQsxEAkU5Rn5HyEkSSlFLkEzUThlPYlCtUf5T - T1ShVfdXSlijWf1bVVytXgZfX2C3Yg9jbGTDZjdnwWlCashsU23ab2lw+HKFdBV1pXc8 - eNF6anwDfaB/O4DhgomEMoXgh7GJj4tzjVKPM5EUkvuU2pa9mJ6aeJxXnjCgCKHfo7Sl - hqdWqSmrB6zyruOw2LLStMS2wri/urm8uL61wLLCr8StxqrIqcqhzKPOnNCe0p3UitaH - 2JLarNzS3u/hGeMz5ULnR+kw6v/su+5b7+HxS/Km8/D1KfZK92L4bvlo+lf7NvwQ/N79 - pP6X//8AAAAMACMAPABUAG4AhwChALsA1gDxAQwBKQFGAWQBggGiAcMB5QIJAi4CVQJ/ - AqsC2QMJAz0DdAOwA/AEMQR0BLoFAgVLBZUF4AYvBn4GzwcjB3UHyggiCHkI0QkrCYcJ - 4wpCCqELAAthC8IMJwyKDPANWA3DDjgOsA8oD6QQIxChESERpBIoEq4TNhPAFEwU2hVo - FfkWjBchF7kYUxjrGYkaKBrJG2scDhyxHVId8R6QHzEf1CB5IR8hxyJwIxojxSRyJR4l - yCZ2JyQn0ih9KSop2iqHKzMr4CyMLTkt5S6WL0Ev8DCtMYEyXjM+NB81ATXpNtE3vjir - OaA6mDuQPJE9kz6bP6VAtUHOQuREBUUkRkxHd0imSeNLHkxiTaNO5lArUXFSvVQBVUpW - kVfaWSJablu0XPxeRF+MYNNiGWNlZKxmEmeZaRRqkGwRbYxvDnCOcgxzjHUJdo94FHmb - eyN8tH5Af9OBcIMQhLKGVYf3iaCLT4z8jq2QYJIak9GVjJdOmQyazJyTnlagHKHko6yl - dKc6qQWqwqxjrgKvp7FSswK0r7ZpuCW54bufvWS/JMDowqrEbsYrx+/JqMtjzR3O0tCL - 0kDT99Wm11fZDNq53GDeBd+t4U/i7eSK5iDnsulK6t/scO3774jxFvKl9DD1uPdH+Nb6 - ZPv9/Y7//wAAAB0ARABqAJEAuADfAQgBMgFeAYsBvAHuAiQCXgKeAuIDLAN/A9wEPgSm - BRIFgQXzBmoG4wdfB94IYAjhCWcJ7gp5CwMLjwwgDK8NQw3hDpIPRQ/+ELgRdBIzEvQT - txR+FUMWDRbYF6gYehlMGiUa/xvbHLodnx6CH2ggUSE+Ii0jHSQRJQUl+CbxJ+ko3ynd - Ktcr1CzRLc8u0i/TMOQyBDMnNEg1azaLN684zjnyOw48Lz1NPmk/hUCiQcJC3UP+RRpG - PUdgSIVJuErtTClNYU6aT9VREFJRU4xUyVYJV0RYhVnGWwhcSV2MXtBgFWFaYqBj7mVC - ZtFocmoPa7xtZm8fcNlylHRSdhR32Xmce1x9In7egKGCZIQkheaHx4mwi5uNe49YkTCT - BpTOlpWYVpoHm7qdZ58OoLKiUqPvpYinHqi0qlmsFa3er6ixdLM9tQC2ybiPulC8Er3Y - v5TBV8MYxNrGnshlyijL9M27z4vRXtMi1JTWCdeJ2RPantwk3aPfG+CL4fHjTOSd5ePn - GOhA6WLqe+uD7IXtcu5d7zfwD/DX8Z/yV/MO87v0WPT29Yv2F/aj9yj3nPgP+IP48flS - +bT6Fvp4+sr7E/tc+6X77vw3/Hb8pvzV/QX9NP1k/ZP9w/3z/iL+T/56/qX+0P77/yf/ - Uv99/6j/1P//AABkZXNjAAAAAAAAAApDb2xvciBMQ0QAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAbWx1YwAAAAAAAAASAAAADG5iTk8AAAASAAAA6HB0UFQAAAAWAAAA+nN2 - U0UAAAAQAAABEGZpRkkAAAAQAAABIGRhREsAAAAcAAABMHpoQ04AAAAMAAABTGZyRlIA - AAASAAABWGphSlAAAAAOAAABamVuVVMAAAASAAABeHBsUEwAAAASAAABinB0QlIAAAAY - AAABnGVzRVMAAAASAAABtHpoVFcAAAAOAAABxnJ1UlUAAAAkAAAB1GtvS1IAAAAMAAAB - +GRlREUAAAAQAAACBG5sTkwAAAAWAAACFGl0SVQAAAAUAAACKgBGAGEAcgBnAGUALQBM - AEMARABMAEMARAAgAGEAIABDAG8AcgBlAHMARgDkAHIAZwAtAEwAQwBEAFYA5AByAGkA - LQBMAEMARABMAEMARAAtAGYAYQByAHYAZQBzAGsA5gByAG1faYJyACAATABDAEQAyQBj - AHIAYQBuACAATABDAEQwqzDpMPwAIABMAEMARABDAG8AbABvAHIAIABMAEMARABLAG8A - bABvAHIAIABMAEMARABMAEMARAAgAEMAbwBsAG8AcgBpAGQAbwBMAEMARAAgAGMAbwBs - AG8Acl9pgnJtsmZ2mG95OlZoBCYEMgQ1BEIEPQQ+BDkAIAQWBBoALQQ0BDgEQQQ/BDsE - NQQ5zuy37AAgAEwAQwBEAEYAYQByAGIALQBMAEMARABLAGwAZQB1AHIAZQBuAC0ATABD - AEQATABDAEQAIABjAG8AbABvAHIAaQAAbW1vZAAAAAAAAAYQAACcgQAAAADBvf+AAAAA - AAAAAAAAAAAAAAAAAHRleHQAAAAAQ29weXJpZ2h0IEFwcGxlLCBJbmMuLCAyMDA5AA== - - ReadOnly - NO - RowAlign - 1 - RowSpacing - 36 - SheetTitle - Canvas 1 - SmartAlignmentGuidesActive - YES - SmartDistanceGuidesActive - YES - UniqueID - 1 - UseEntirePage - - VPages - 1 - WindowInfo - - CurrentSheet - 0 - ExpandedCanvases - - FitInWindow - - Frame - {{89, 37}, {716, 841}} - ListView - - OutlineWidth - 142 - RightSidebar - - Sidebar - - SidebarWidth - 138 - VisibleRegion - {{1, 1}, {574, 732}} - Zoom - 1 - ZoomValues - - - Canvas 1 - 0.0 - 1 - - - - saveQuickLookFiles - YES - - diff --git a/src/externals/pio1/doc/images/block-cyclic-rearr.eps b/src/externals/pio1/doc/images/block-cyclic-rearr.eps deleted file mode 100644 index 1318dc814b8..00000000000 --- a/src/externals/pio1/doc/images/block-cyclic-rearr.eps +++ /dev/null @@ -1,3315 +0,0 @@ -%!PS-Adobe-3.0 EPSF-3.0 -%%HiResBoundingBox: 0.000000 0.000000 534.000000 457.000000 -%APL_DSC_Encoding: UTF8 -%APLProducer: (Version 10.5.8 (Build 9L31a) Quartz PS Context) -%%Title: (Unknown) -%%Creator: (Unknown) -%%CreationDate: (Unknown) -%%For: (Unknown) -%%DocumentData: Clean7Bit -%%LanguageLevel: 2 -%%Pages: 1 -%%BoundingBox: 0 0 534 457 -%%EndComments -%%BeginProlog -%%BeginFile: cg-pdf.ps -%%Copyright: Copyright 2000-2004 Apple Computer Incorporated. -%%Copyright: All Rights Reserved. -currentpacking true setpacking -/cg_md 141 dict def -cg_md begin -/L3? languagelevel 3 ge def -/bd{bind def}bind def -/ld{load def}bd -/xs{exch store}bd -/xd{exch def}bd -/cmmtx matrix def -mark -/sc/setcolor -/scs/setcolorspace -/dr/defineresource -/fr/findresource -/T/true -/F/false -/d/setdash -/w/setlinewidth -/J/setlinecap -/j/setlinejoin -/M/setmiterlimit -/i/setflat -/rc/rectclip -/rf/rectfill -/rs/rectstroke -/f/fill -/f*/eofill -/sf/selectfont -/s/show -/xS/xshow -/yS/yshow -/xyS/xyshow -/S/stroke -/m/moveto -/l/lineto -/c/curveto -/h/closepath -/n/newpath -/q/gsave -/Q/grestore -counttomark 2 idiv -{ld}repeat pop -/SC{ - /ColorSpace fr scs -}bd -/sopr /setoverprint where{pop/setoverprint}{/pop}ifelse ld -/soprm /setoverprintmode where{pop/setoverprintmode}{/pop}ifelse ld -/cgmtx matrix def -/sdmtx{cgmtx currentmatrix pop}bd -/CM {cgmtx setmatrix}bd -/cm {cmmtx astore CM concat}bd -/W{clip newpath}bd -/W*{eoclip newpath}bd -statusdict begin product end dup (HP) anchorsearch{ - pop pop pop - true -}{ - pop - (hp) anchorsearch{ - pop pop true - }{ - pop false - }ifelse -}ifelse -{ - { - { - pop pop - (0)dup 0 4 -1 roll put - F charpath - }cshow - } -}{ - {F charpath} -}ifelse -/cply exch bd -/cps {cply stroke}bd -/pgsave 0 def -/bp{/pgsave save store}bd -/ep{pgsave restore showpage}def -/re{4 2 roll m 1 index 0 rlineto 0 exch rlineto neg 0 rlineto h}bd -/scrdict 10 dict def -/scrmtx matrix def -/patarray 0 def -/createpat{patarray 3 1 roll put}bd -/makepat{ -scrmtx astore pop -gsave -initgraphics -CM -patarray exch get -scrmtx -makepattern -grestore -setpattern -}bd -/cg_BeginEPSF{ - userdict save/cg_b4_Inc_state exch put - userdict/cg_endepsf/cg_EndEPSF load put - count userdict/cg_op_count 3 -1 roll put - countdictstack dup array dictstack userdict/cg_dict_array 3 -1 roll put - 3 sub{end}repeat - /showpage {} def - 0 setgray 0 setlinecap 1 setlinewidth 0 setlinejoin - 10 setmiterlimit [] 0 setdash newpath - false setstrokeadjust false setoverprint -}bd -/cg_EndEPSF{ - countdictstack 3 sub { end } repeat - cg_dict_array 3 1 index length 3 sub getinterval - {begin}forall - count userdict/cg_op_count get sub{pop}repeat - userdict/cg_b4_Inc_state get restore - F setpacking -}bd -/cg_biproc{currentfile/RunLengthDecode filter}bd -/cg_aiproc{currentfile/ASCII85Decode filter/RunLengthDecode filter}bd -/ImageDataSource 0 def -L3?{ - /cg_mibiproc{pop pop/ImageDataSource{cg_biproc}def}bd - /cg_miaiproc{pop pop/ImageDataSource{cg_aiproc}def}bd -}{ - /ImageBandMask 0 def - /ImageBandData 0 def - /cg_mibiproc{ - string/ImageBandMask xs - string/ImageBandData xs - /ImageDataSource{[currentfile/RunLengthDecode filter dup ImageBandMask/readstring cvx - /pop cvx dup ImageBandData/readstring cvx/pop cvx]cvx bind}bd - }bd - /cg_miaiproc{ - string/ImageBandMask xs - string/ImageBandData xs - /ImageDataSource{[currentfile/ASCII85Decode filter/RunLengthDecode filter - dup ImageBandMask/readstring cvx - /pop cvx dup ImageBandData/readstring cvx/pop cvx]cvx bind}bd - }bd -}ifelse -/imsave 0 def -/BI{save/imsave xd mark}bd -/EI{imsave restore}bd -/ID{ -counttomark 2 idiv -dup 2 add -dict begin -{def} repeat -pop -/ImageType 1 def -/ImageMatrix[Width 0 0 Height neg 0 Height]def -currentdict dup/ImageMask known{ImageMask}{F}ifelse exch -L3?{ - dup/MaskedImage known - { - pop - << - /ImageType 3 - /InterleaveType 2 - /DataDict currentdict - /MaskDict - << /ImageType 1 - /Width Width - /Height Height - /ImageMatrix ImageMatrix - /BitsPerComponent 1 - /Decode [0 1] - currentdict/Interpolate known - {/Interpolate Interpolate}if - >> - >> - }if -}if -exch -{imagemask}{image}ifelse -end -}bd -/cguidfix{statusdict begin mark version end -{cvr}stopped{cleartomark 0}{exch pop}ifelse -2012 lt{dup findfont dup length dict begin -{1 index/FID ne 2 index/UniqueID ne and -{def} {pop pop} ifelse}forall -currentdict end definefont pop -}{pop}ifelse -}bd -/t_array 0 def -/t_i 0 def -/t_c 1 string def -/x_proc{ - exch t_array t_i get add exch moveto - /t_i t_i 1 add store -}bd -/y_proc{ - t_array t_i get add moveto - /t_i t_i 1 add store -}bd -/xy_proc{ - - t_array t_i 2 copy 1 add get 3 1 roll get - 4 -1 roll add 3 1 roll add moveto - /t_i t_i 2 add store -}bd -/sop 0 def -/cp_proc/x_proc ld -/base_charpath -{ - /t_array xs - /t_i 0 def - { - t_c 0 3 -1 roll put - currentpoint - t_c cply sop - cp_proc - }forall - /t_array 0 def -}bd -/sop/stroke ld -/nop{}def -/xsp/base_charpath ld -/ysp{/cp_proc/y_proc ld base_charpath/cp_proc/x_proc ld}bd -/xysp{/cp_proc/xy_proc ld base_charpath/cp_proc/x_proc ld}bd -/xmp{/sop/nop ld /cp_proc/x_proc ld base_charpath/sop/stroke ld}bd -/ymp{/sop/nop ld /cp_proc/y_proc ld base_charpath/sop/stroke ld}bd -/xymp{/sop/nop ld /cp_proc/xy_proc ld base_charpath/sop/stroke ld}bd -/refnt{ -findfont dup length dict copy dup -/Encoding 4 -1 roll put -definefont pop -}bd -/renmfont{ -findfont dup length dict copy definefont pop -}bd -L3? dup dup{save exch}if -/Range 0 def -/DataSource 0 def -/val 0 def -/nRange 0 def -/mulRange 0 def -/d0 0 def -/r0 0 def -/di 0 def -/ri 0 def -/a0 0 def -/a1 0 def -/r1 0 def -/r2 0 def -/dx 0 def -/Nsteps 0 def -/sh3tp 0 def -/ymax 0 def -/ymin 0 def -/xmax 0 def -/xmin 0 def -/setupFunEval -{ - begin - /nRange Range length 2 idiv store - /mulRange - - [ - 0 1 nRange 1 sub - { - 2 mul/nDim2 xd - Range nDim2 get - Range nDim2 1 add get - 1 index sub - - 255 div - exch - }for - ]store - end -}bd -/FunEval -{ - begin - - nRange mul /val xd - - 0 1 nRange 1 sub - { - dup 2 mul/nDim2 xd - val - add DataSource exch get - mulRange nDim2 get mul - mulRange nDim2 1 add get - add - }for - end -}bd -/max -{ - 2 copy lt - {exch pop}{pop}ifelse -}bd -/sh2 -{ - /Coords load aload pop - 3 index 3 index translate - - 3 -1 roll sub - 3 1 roll exch - sub - 2 copy - dup mul exch dup mul add sqrt - dup - scale - atan - - rotate - - /Function load setupFunEval - - - clippath {pathbbox}stopped {0 0 0 0}if newpath - /ymax xs - /xmax xs - /ymin xs - /xmin xs - currentdict/Extend known - { - /Extend load 0 get - { - 0/Function load FunEval sc - xmin ymin xmin abs ymax ymin sub rectfill - }if - }if - - /Nsteps/Function load/Size get 0 get 1 sub store - /dx 1 Nsteps div store - gsave - /di ymax ymin sub store - /Function load - - 0 1 Nsteps - { - 1 index FunEval sc - 0 ymin dx di rectfill - dx 0 translate - }for - pop - grestore - currentdict/Extend known - { - /Extend load 1 get - { - Nsteps/Function load FunEval sc - 1 ymin xmax 1 sub abs ymax ymin sub rectfill - }if - }if -}bd -/shp -{ - 4 copy - - dup 0 gt{ - 0 exch a1 a0 arc - }{ - pop 0 moveto - }ifelse - dup 0 gt{ - 0 exch a0 a1 arcn - }{ - pop 0 lineto - }ifelse - - fill - - dup 0 gt{ - 0 exch a0 a1 arc - }{ - pop 0 moveto - }ifelse - dup 0 gt{ - 0 exch a1 a0 arcn - }{ - pop 0 lineto - }ifelse - - fill -}bd -/calcmaxs -{ - - xmin dup mul ymin dup mul add sqrt - xmax dup mul ymin dup mul add sqrt - xmin dup mul ymax dup mul add sqrt - xmax dup mul ymax dup mul add sqrt - max max max -}bd -/sh3 -{ - /Coords load aload pop - 5 index 5 index translate - 3 -1 roll 6 -1 roll sub - 3 -1 roll 5 -1 roll sub - 2 copy dup mul exch dup mul add sqrt - /dx xs - 2 copy 0 ne exch 0 ne or - { - - exch atan rotate - }{ - pop pop - }ifelse - - /r2 xs - /r1 xs - /Function load - dup/Size get 0 get 1 sub - /Nsteps xs - setupFunEval - - - - - - dx r2 add r1 lt{ - - 0 - }{ - dx r1 add r2 le - { - 1 - }{ - r1 r2 eq - { - 2 - }{ - 3 - }ifelse - }ifelse - }ifelse - /sh3tp xs - clippath {pathbbox}stopped {0 0 0 0}if - newpath - /ymax xs - /xmax xs - /ymin xs - /xmin xs - - dx dup mul r2 r1 sub dup mul sub dup 0 gt - { - sqrt r2 r1 sub atan - /a0 exch 180 exch sub store - /a1 a0 neg store - }{ - pop - /a0 0 store - /a1 360 store - }ifelse - currentdict/Extend known - { - /Extend load 0 get r1 0 gt and - { - 0/Function load FunEval sc - - - - - { - { - dx 0 r1 360 0 arcn - xmin ymin moveto - xmax ymin lineto - xmax ymax lineto - xmin ymax lineto - xmin ymin lineto - eofill - } - { - r1 0 gt{0 0 r1 0 360 arc fill}if - } - { - - - - - 0 r1 xmin abs r1 add neg r1 shp - } - { - - - r2 r1 gt{ - - 0 r1 - r1 neg r2 r1 sub div dx mul - 0 - shp - }{ - - - - 0 r1 calcmaxs - dup - - r2 add dx mul dx r1 r2 sub sub div - neg - exch 1 index - abs exch sub - shp - }ifelse - } - }sh3tp get exec - }if - }if - - /d0 0 store - /r0 r1 store - /di dx Nsteps div store - /ri r2 r1 sub Nsteps div store - /Function load - 0 1 Nsteps - { - 1 index FunEval sc - d0 di add r0 ri add d0 r0 shp - { - - d0 0 r0 a1 a0 arc - d0 di add 0 r0 ri add a0 a1 arcn - fill - - - d0 0 r0 a0 a1 arc - d0 di add 0 r0 ri add a1 a0 arcn - fill - }pop - - - /d0 d0 di add store - /r0 r0 ri add store - }for - pop - - currentdict/Extend known - { - /Extend load 1 get r2 0 gt and - { - Nsteps/Function load FunEval sc - - - - - { - { - dx 0 r2 0 360 arc fill - } - { - dx 0 r2 360 0 arcn - xmin ymin moveto - xmax ymin lineto - xmax ymax lineto - xmin ymax lineto - xmin ymin lineto - eofill - } - { - - - xmax abs r1 add r1 dx r1 shp - } - { - - r2 r1 gt{ - - - - calcmaxs dup - - r1 add dx mul dx r2 r1 sub sub div - exch 1 index - exch sub - dx r2 - shp - }{ - - r1 neg r2 r1 sub div dx mul - 0 - dx - r2 - shp - }ifelse - } - } - sh3tp get exec - }if - }if -}bd -/sh -{ - begin - /ShadingType load dup dup 2 eq exch 3 eq or - { - gsave - newpath - /ColorSpace load scs - currentdict/BBox known - { - /BBox load aload pop - 2 index sub - 3 index - 3 -1 roll exch sub - exch rectclip - }if - 2 eq - {sh2}{sh3}ifelse - grestore - }{ - - pop - (DEBUG: shading type unimplemented\n)print flush - }ifelse - end -}bd -{restore}if not dup{save exch}if - L3?{ - /sh/shfill ld - /csq/clipsave ld - /csQ/cliprestore ld - }if -{restore}if -end -setpacking -%%EndFile -%%EndProlog -%%BeginSetup -%%EndSetup -%%Page: 1 1 -%%PageBoundingBox: 0 0 534 457 -%%BeginPageSetup -cg_md begin -bp -sdmtx -%RBIBeginFontSubset: Helvetica -%!FontType1-1.0: Helvetica 1.0000.0.0000 - 14 dict begin/FontName /Helvetica def - /PaintType 0 def - /Encoding 256 array 0 1 255{1 index exch/.notdef put}for - dup 33 /P put - dup 34 /E put - dup 35 /space put - dup 36 /zero put - dup 37 /one put - dup 38 /two put - dup 39 /C put - dup 40 /o put - dup 41 /m put - dup 42 /p put - dup 43 /d put - dup 44 /e put - dup 45 /c put - dup 46 /s put - dup 47 /i put - dup 48 /t put - dup 49 /n put - dup 50 /D put - dup 51 /k put - dup 52 /l put - dup 53 /a put - dup 54 /y put - dup 55 /u put - dup 56 /S put - dup 57 /r put - dup 58 /bracketleft put - dup 59 /three put - dup 60 /bracketright put - dup 61 /six put - dup 62 /R put - dup 63 /g put - dup 64 /I put - dup 65 /O put - dup 66 /five put - dup 67 /four put - readonly def - 42/FontType resourcestatus{pop pop false}{true}ifelse - %APLsfntBegin - {currentfile 0(%APLsfntEnd\n)/SubFileDecode filter flushfile}if - /FontType 42 def - /FontMatrix matrix def - /FontBBox[2048 -1947 1 index div -985 2 index div 2961 3 index div 2297 5 -1 roll div]cvx def - /sfnts [< - 74727565000900000000000063767420000000000000009C0000036C6670676D000000000000040800000A0C676C79660000000000000E1400001EC0686561640000000000002CD400000038686865610000000000002D0C00000024686D74780000000000002D30000000906C6F63610000000000002DC00000004A6D6178700000000000002E0C00000020707265700000000000002E2C000003CF05C0001005BD00280580001A042F001F0000FFD90000FFDA0000FFD9FE55FFE605C70010FE6DFFF1033B000000B9000000B902FE3F3C00C0008D009B00AF000600A800C00028005E009800C9016A00B9015C00B400D6011E002E0080000400B8004C00CC01FFFFD1006600A400AF007400C2009500B1000C0028006D0015004C008E0125FF7A000C0040004C00620084FFA200240038008600BD0039005E008E00EDFFA9FFB300400052005500AA00AB00C200CB012302B10413FFAEFFE4000800510074008400AA00D1FF4CFFAF0012002C004200500051008400BE012503DAFF680018003B0098009C009F00A100C100EC018201B4FF68FF76FFD0FFE100020018001C00530053007D01B401E103AF0486FF9CFFEAFFFE001F0028002A00520060009300A300AA00AF00AF00C001000145016B0174019301950240028202B404850517FEFD00060029004700470048006F008800B400B900C400F200F901EF02180310037403C5FF35FFF3000B004B004C0052005500650076007600870087008E00AB00BB0106013001430150017D0194019501D3022A025502580277027802E6034E035C037903D3047304B2058C0598060BFEF5FFBBFFC7FFD50017001D005B0072007E009C00C200D000F400FA01030106011C0125013B0142015E015E0180019B02B901A101B9025001C001D002AA01DF01E301EF01FB0205020C0215022B0274029302AB02C202CE03690395039903DF03F5043E050205A105E5062507DBFE62FE89FECEFF3BFFE1FFF800030008002100390042004E005F0061006F00700034007F008E00AD00AD00AF00BD00C400C500C900C900C900E3011C00ED00F800F901000112011A0132014D014D014E014F01660169019E01BA01BA01BE01E301EF01F602000200020902110217021C02530262026D028002D50280031B032A034A035A03AF03AF03C803D603FB03FB04050413041504470449008C046D049A049A04A604A804B204CF0539053E054E055605800589058C036305D105D6067E068E06B206EF06F00728074C076F078C00B400C900C000C10000000000000000000000000004012400AF0032006E0063014401620096014301A10161008A00740064018801EF01700028FF5D037E0347023000AA00BE007B0062009A007D0089035C00A1FFD803AA00D70093006C0000008000A70442001D0597001D00820030002A - 002A002A002A002A40292A292827262524232221201F1E1D1C1B1A191817161514131211100D0C0B0A090807060504030201002C4523466020B02660B004262348482D2C452346236120B02661B004262348482D2C45234660B0206120B04660B004262348482D2C4523462361B0206020B02661B02061B004262348482D2C45234660B0406120B06660B004262348482D2C4523462361B0406020B02661B04061B004262348482D2C0110203C003C2D2C20452320B0CD442320B8015A51582320B08D44235920B0ED51582320B04D44235920B09051582320B00D44235921212D2C20204518684420B001602045B04676688A4560442D2C01B9400000000A2D2C00B9000040000B2D2C2045B00043617D6818B0004360442D2C45B01A234445B01923442D2C2045B00325456164B050515845441B2121592D2C20B0032552582359212D2C69B04061B0008B0C6423648BB8400062600C642364615C58B0036159B002602D2C45B0112BB0172344B0177AE5182D2C45B0112BB01723442D2C45B0112BB017458CB0172344B0177AE5182D2CB002254661658A46B040608B482D2CB0022546608A46B040618C482D2C4B53205C58B002855958B00185592D2C20B0032545B019236A4445B01A23444565234520B00325606A20B009234223688A6A606120B0005258B21A401A4523614459B0005058B219401945236144592D2CB9187E3B210B2D2CB92D412D410B2D2CB93B21187E0B2D2CB93B21E7830B2D2CB92D41D2C00B2D2CB9187EC4E00B2D2C4B525845441B2121592D2C0120B003252349B04060B0206320B000525823B002253823B002256538008A63381B212121212159012D2C456920B00943B0022660B00325B005254961B0805358B21940194523616844B21A401A4523606A44B209191A45652345604259B00943608A103A2D2C01B005251023208AF500B0016023EDEC2D2C01B005251023208AF500B0016123EDEC2D2C01B0062510F500EDEC2D2C20B001600110203C003C2D2C20B001610110203C003C2D2C764520B003254523616818236860442D2C7645B00325452361682318456860442D2C7645B0032545616823452361442D2C4569B014B0324B505821B0205961442DB8002B2C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB8002C2C2020456944B001602DB8002D2CB8002C2A212DB8002E2C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB8002F2C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800302C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800312C2020456944B0016020 - 20457D691844B001602DB800322CB800312A2DB800332C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800342C4B535845441B2121592DB800352C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800362C2020456944B001602DB800372CB800362A212DB800382C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800392C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB8003A2C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB8003B2C2020456944B001602020457D691844B001602DB8003C2CB8003B2A2DB8003D2C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB8003E2C4B535845441B2121592DB8003F2C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800402C2020456944B001602DB800412CB800402A212DB800422C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800432C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800442C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800452C2020456944B001602020457D691844B001602DB800462CB800452A2DB800472C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800482C4B535845441B2121592DB800492C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB8004A2C2020456944B001602DB8004B2CB8004A2A212DB8004C2C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB8004D2C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB8004E2C4B20B0032650585158 - B080441BB04044591B21212045B0C05058B0C0441B2159592DB8004F2C2020456944B001602020457D691844B001602DB800502CB8004F2A2DB800512C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800522C4B535845441B2121592DB800532C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800542C2020456944B001602DB800552CB800542A212DB800562C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800572C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800582C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800592C2020456944B001602020457D691844B001602DB8005A2CB800592A2DB8005B2C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB8005C2C4B535845441B2121592D00020042000004D005BD00030007003FB800532BB800082FB800092FB8000810B80000D0B800002FB8000910B80003DCB80004DCB8000010B80007DC00BA0007000000562BBA0002000500562B3031331121112711211142048EB8FCE205BDFA43B8044DFBB3000000020040FFD9041C0598000F001C00714017870501460815350F051C35070D1238036F18380B1E471D1076C418D4EDFDED003FED3FED313043794034001B0D2601251A2609250526160E18280014001228011B081828001006122801170C1528011302152801190A1C280011041C28002B2B2B2B012B2B2B2B2B2B2B2B2B81005D001716111007022120272611343712211236113402232202111417163303407C60577EFEE2FEFE7E693F7601358AA678AD9F932F48AE0598E5B1FECCFEDCBFFEEEE0BB013BF4AF0146FAE5F80152F4013BFED5FEDDDB85CB000000000100C4000002D5059200080023B10801B80133400C0404070C04079605000A47091076C418C4D5FD39003F3FF4CD313013353E013733112311C4C39A268EC003F68A1359A6FA6E03F60000000100400000041E059D002200A6404E3604460457056B1D6E1E7A1E84018702082A085A196B197C197C1CB519050022010F041C0E1921071C19040100051F0F0F22130A351305201F7521220C217F0738166F220E270F811F382224 - 47231076C418D4EDF4ED10F5EDE4003F3CFD3C3FED1112392F1217390111123912393911391239005D31304379401C04190C2511260B120E2800091407280119040D100A280108150A2801002B2B1010012B2B2B2B81005D36123F01363736353426232207060723363736213212151407060F01060706072115214A85C1C0813452967DB9472604B70342750128F6E37946B5896238641A030EFC29B90112706F4B35536B7D938C4B85BB76D0FEF6A3AC7A47654C3631576AAA000000010031FFD9041A059A003100C4401F490C532D6529632D7529752D069626014B082B0A182B140D03F93114350D20B8012340141135107F0D0D311B3524050735310D1896274411B8014A400F0A382E6F021F2920810327023347321076C418D4EDF4ED10F5EDEDF4ED003FED3FED12392FF4FDE610ED10ED111239011112393130437940362F301926000905261D2522260600032800040308300A28011C231F28001A251828010401072800092F0728001E211B280119261B2801002B2B2B2B012B2B2B103C2B2B2B2B818181005D015D0402353316171633323635342623220607351E013332373635342623220706072334373621321615140706071E01151400230116E5BC0C2846B78EACB5A11225131C26166541729865B4452605B2406E0115DBF0472C46717FFEF2F8270117C88B3F719878947601019F03022038906B7478427AA070C8C3B98452331D1FB180CDFEFE000000000200340000042F059C0002000D005C402209020A080D010202BE12060607020601050708030106070A02750B05EF030C070403B80117B501960D08AC0AB80158B3050F470E1076C418D4F5F43CFDE4003F3FF43CFD3C1139390111123911123939872E2B047D10C50F3130011109011121350133113315231102A5FE3501CEFD8C029098D3D301FB0289FD77FE05015EB0038EFC5F9DFEA200000000010042FFDC041C0580002000BB402B4812881F02390C461357136713043A080C0D07000C0A0F0E0E75121313120E0F201213070013170A17350AB80122401A0D7F0E0E1D123A0F04043520C71D0D1107381A6F0038202247211076C418D4EDF5EDC4003FEDED3FFD12392FE4F4ED1112390111123939123939872E2B7D10C5001239011112393931301843794028181F01090226031E00280001001F20051C0728010918072801011F042800061B04280008190A2801002B2B2B012B2B103C103C2B2B8181005D5D131617163332363534262322060727132115210336373633320415140221222427FD127D4054A09AB7805D852F9C6D02E8FD9F3D322D5069C50112FBFEEDAFFEF310016D9A3B1ECC7C96A44840090303AEFE72261321FEC3CBFECAC5CC00000002004DFFDB0423059E001B002700A94039771A0117082508271958198600870188090747080A21 - 2721350E0E1403C707351B052735140D032902811E38116F170A023C0A2431172947281076C418D4FD397C4B52787A2F1810F5EDF4ED003FED3FEDED12392FED11123931304379403425261C200F1600060526200F1E280126152428001C131E2801060003280104031F1021280125162728001D1227280004010728012B2B2B2B01103C2B2B2B2B2B81818181015D005D001615232627262322020336373633320415140223220011103712211236353426232206151416330347BDB21023418497B20A3E5E566AB4010CFEEBC9FEDC417D014C818D7EA674AF9F8D059EF98455305AFEE9FEFC5B2D28E6E4C3FED301310169010BBA0164FADDBF826EC79A9B88B900000002005AFFDA057105E5001D001E00B1403B1B0597019605031F011F0482018705891305530803861D111115063A1D030C3A15091E021E1E190331023B1031111A20093119191F20A1216A66182B2B4EF44DED4E10F64DEDF4ED12392F003F3FED3FED12392F10ED31304379403A001C172513260E251B260B160932000D1410320112110F10071C0932000500033201010204030A180C32000F120C3200081A06320104010632012B2B2B2B01103C103C2B2B103C103C2B2B2B2B2B2B815D015D080117232E0123220011101233323736373306070621202726111037362123041E013411C221C5B2D9FEF5F1EFDC733D1EC21A92AFFED7FF00AEE5ACBA01472805E5FEDABB8EA6FECFFEC5FEFEFEBFA95991E89DBD9BCD01AC0145D0E20000000200A50000056305BD000D00180067401F871196120232080B1E0F02001E17080831131A1A0D250E19191AD6217689182B2B4EF44DFD4E10F64DED003FFD3FFD3130437940260116112515260607050704070307020705060A10083201011608320109120B320107140032002B2B012B2B2A2B2B815D2532373637363736351002232111032120171611140702290102D06541744A3B1A0FD9F1FE9FC80253012FA795589BFE86FDAFAA15276F598B53470111012EFB980513D7C2FED1EABDFEB20000000200AF000004ED05BD000B000C004E4024071E04040B031E01000209081E0A0B080C020C0C02000669023B091A0E03082500190D0EB8010BB3219589182B2B4EF44DFD3C4E10F64DF4E41112392F003F3F3CFD3C3F3CED12392FFD313013211521112115211121152101AF042FFC93032BFCD5037CFBC2021F05BDB4FE42AAFE0EAF05BD000000000100C90000019205BD00030017B8002B2BBA00010000002E2B00B800002FB800022F303113331123C9C9C905BDFA4300030050FFD505E805E5000F001B001C008A402C8705C700C701C302C808C90A064308153A0F031B3A07091C021C1C0B1231031A1E18310B191D1ED8216A66182B2B4EF44DED4E10F64DED12392F003F3FED3FED313043794032001A0D260125 - 09250526160E18320014001232011A081832001006123201170C1532011302153201190A1B320011041B32002B2B2B2B012B2B2B2B2B2B2B2B81005D0017161110070221202726111037122100123510002322001114122103049BBB92A7C4FE95FEADC2AD94BE0174011BEBFEF1EBE4FEE0F701150E05E5FAC3FED0FEB7DAFF00E0D8014A012AD40110FAA20179F50103013CFEC7FECFF4FEB1055E000200AF000004F805BD000A001400614035690C6912790C7A12044814581468147A140407081E1110100A010E0F1E0100020A080206120C0409141431041A160F092500191516B8010BB3219589182B2B4EF44DFD3C4E10F64DFD11121739003F3F3CFD3C1012392F3CFD3C015D31305D132132161514062321112300272623211121323635AF0295C4F0D6DEFE32C70380784273FE74018C86A705BDDDC8ACFFFD9304B93A1FFE03729000000200B40000057805BD0009002700944012070D49014805590158056905641478050816BB01300119001B011C40422321202660127112751403121E1F141C1503211E161404261224251E0009091B071E0B02261C1B082025151F251603310F691B1A29082625270A192829D6219566182B2B4EF43C4DFD3C4E10F64DF4EDD4EDD4ED003F3C3C3FFD12392F3CFD3C3911173901111239391239395D1112392B3130015D013236353427262321110321321716151406071E011F011617161715232E012F012627262321112303478CA3723D66FE1AC702A8A86DCF6D6256570507030B122EF40A0C040C0764397AFE3BC7031C70929D391EFE0A02A1315EFD84A833237280C55429461421133C56F590311BFD8A00020060FFD504F605E5002F003000FE405E290F27232626360E3521472662267A0E7724096B08180E172502590E680EAA22030A0E3A2224220A03041C1886142F2F2B1C3A1403043A2B0930020A0E081124221F28303011182517490825281A321F25114900252F193132A0216A89182B2B4EF44DEDF4ED4E10F64DEDF4ED12392F1211393912113939003F3FED3FED12392F10ED111217392EFD335D7131304379404D282E111F0008022602262D2506251A26032C002B002D2E032C002B00052A082B011D131F2B001B15182B011918012D042B0009270B2B01250B012E042B000729042B001E121C2B0119161C2B01002B2B2B2B10102B2B01103C2B2B2B2B103C2B2B2B2B2B2B818181005D0116171633323736353427262F012627263534243332041523262726232206151417161F01161716151404232027263701011E073463FA705CB24B4CA2C7C3518C0112FBE70143BB0F315BDAB09A5A3BD0CE95518CFE9DEBFEEE9B9B03024D01DA7D4E92203EA0783332252D2C355CB7C6FEDFF5763F7394626C3220302F223B67C4F4D28C8BEE040B0000010080FE6D020005C70007 - 0035401C031C0010041C07120917171A0501580304200700190809F0216C33182B2B4EF43C4DFD3CF43C4E456544E6003F4DFD3FFD31301321152311331521800180D6D6FE8005C793F9CC930001002FFE6D01AF05C70007003E402000070102031C050410001C07120917171A06200201580003190809F0213C7C182B2B4EF43C4DF43CFD4E456544E6003F4DFD3F3CFD3C01113939313013331123352111212FD5D50180FE80FF00063493F8A6000000030052FFDC04470449000F003B003C00DD40382A30010A100B1B0C1C2733481069096A10073908120C09031B320724091D100C1D3B2B022E293BB73B023B322A2512100705081C2722171CB8018A4023171D1F07271D2E0B021D350B3C073C3C1C1407292AA8241A3E1B291C4A0F2738193D3EBC0197002100B9019600182B2B4EF44DEDF4ED4E10F64DE4FDC412392F003F3FED3FED3FEDED1239111217395D1112392EED2EED01111239111739313043794028363715220001192501360F2100181E1B21001620142101212200370221001A1D1721011521172101002B2B2B01103C2B2B2B2B818181005D015D2416333237363D010E010F0106070615013637363534262322070607233E01333217161511141633323637150E0123222726270E012322263534363713010E724E5F59962168326D62315301B43E150C837A8D3B210AA805F7A3BD767517250C1E112A2C265D2A160937CE7C95BDBA978ACF5A2C49A691151C060E0D1C2F67016C082C182D5C534C2A53C69B484898FD971C220303850C06422340486AB58895A41301E4000002003BFFE103D0044E001A001B00A7402FA719019818A808AA18034A08119B14030314061D1A070D1D140B1B071B1B1710271201032702111A1D0A2717191C1DB80107B321727D182B2B4EF44DED4E10F63C4DED3939ED12392F003F3FED3FED12392F10ED313043794034001908250C150A26000E1310260112110F1007190A26000500032101010204030B160D26000F120D2600091806260104010621012B2B2B2B01103C103C2B2B103C103C2B2B2B81005D015D001617232E012322070615141633323637330E01232202351000330702D6E317AF10727EAC4A308892708319AF1EF0BBD2FA0112D41C044EB0D76383A86DA0A1DC8977D5C50133E6011A013A0500020038FFDA03ED05C2000B001D00774032370E470E570EA704A91B05250814020F1D1000081D1D07130A021D170B052E132E102911121A1F0B271A191E1F87217242182B2B4EF44DED4E10F63C4DFDE4E4003FED3F3FED3F1139113931304379401A181C090A000101180B2600091C0B260000190226000A1B0826012B2B012B2B818181005D1216333236353426232206150017161711331123350E0123220035341233F692A17DA1A67A88A9018A53303DADA23FAC6F - B3FEFAEFDE015FE8D7C9CBC3D0CA0237341E4B021DFA3E956358012DFAEA015700030048FFDA041A0449001C00240025010C40799708991AA71F03050E020F0514150E120F1514400C401408291A014B0BB603C701C603C71BD808D909D61FD823E817E8230BC711C712025C080521240F9A161D243906070716211D1C070A1D160B2507971CA71CB71CD71C0425160F251C05190A0C07110E270F1D27051A27242E072719192627D421A65D182B2B4EF44DFDE44E10F64DEDD4FD391239391112393912392F5D003F3FED3FED12392F3CFD3C10ED1112393130437940460023040503050205010504061F26111012101310141004060C25221B24260020001D26011E1D09170726000B150E26010D0E231A2126011E0521260108180A26000D100A2600002B2B2B2B01103C2B2B103C2B2B2B2A2B2A8101715D00715D5D00161716171615211E013332373637330E01070607062322001110003301262726232206070102B4D638361210FCEF0590978D543014B1074F3152794152C8FEEA0118E2011F0B284AAD7CA805012304476B55516C4AA2A3C55D36473B912E501C100123010601020142FE26754682B38A01DC0000000003003DFE3B03E80449001F002D002E00B7404D36144908490958085909880CA91BA81DA927A62BB91B0B4008031622290EC40A221D1F070406291D190A121D0A0F2E072E2E051C032E162E2D29051A300C0E270D3E26271C192F3087217242182B2B4EF44DEDF4ED394E10F64DFDE4F51112392F003F3FED3FED3F3FED10ED1112393931304379402C23281A1E0B1124251026231E262600281A262600110B0E21000F0E0C0D251D222601271B2926000F0C122100002B2B2B01103C103C2B2B2B2B2B818181005D00171617353311140706212226273316171633323736270E0123222411100033002623220706151416333237363501027C5E3335A63C70FEC9ADEC0EB70D273D83CF40260336987DAEFEFB0107BA0144A47FBE4625937CC24F2CFED104423E234387FC32CC76DA9BA548273C9256DD5250F7011D010D012EFEA1C0B25F9AB5BDAF6384022D0000000200840000013B05BD000300070036401C07E50400010006030A0917171A06010229070300190809AA216242182B2B4EF43C4DC4FD3CC44E456544E6003F3F3C3F4DED3130133311231133152384B7B7B7B7042AFBD605BDCC000000010080000003F805BD000B00A740645902013A08011902010706170657056705790678078705B903C903DA030A05050608080709030284029402A4020302391209090405060504066D12070708080705040305060908050204030A00000403060A07060A061A0D09020A29000B190C0DB22162B9011600182B2B4EF43C4DFD3C3C194E10E618003F3C3C3F3C3F1112173901121739874D2E2B087D10C10487 - 2E182B5D057D10C010083C083C3130015D00715D7213331101330901230107112380AD01CEE6FE6601B1E6FEB297AD05BDFCAB01C7FE6FFD62021C8AFE6E0000000100890000013D05BD0003002940150000030A0517171A0102290003190405AA216242182B2B4EF43C4DFD3C4E456544E6003F3F31301333112389B4B405BDFA4300000001008400000625044700260085403B0708070E060F1708170E170F2708270E270F4819560B670B0C23250A1A1D23190A02041725211D171D0D060700061B1C2503130A2817171A112914B80101B21A291DB80101400A00012E25292600192728B8010DB3216242182B2B4EF43C4DFDE410F4EDF4FD4E456544E6003F173C3F3F3C4DEDED111217390111123912393130005D13331536373633321716173E01333217161511231134262322061511231134272623220615112384B240345971804E2C243CA265D84E2ABB6B4D6A99B71A297066A7B4042F984F243D3F244656539C548EFD3702E86B508EA6FD9102BB6D324B9ECFFDC80000020084000003ED04490019001A005E4031B706C706020406140627147606740705140C021418101D05070006180B0A1A071A1A000C29091A1C012E18291900191B1CB80106B3216242182B2B4EF43C4DFDE44E10F64DED12392F003F3F3C3F3FED1139390112393130005D015D1333153E01333217161511231134272623220706070E011511230184AB4CAA68E4502CB71D307E40294A382D1BB401A7042F985E529F57A2FD5102A3623C640D1642357169FDCF0449000003003BFFD90421044E000C0018001900904033980896109916A504A808A610A916B808C808D704E50EE9140C3A08061D18070C1D120B190719191502270F1A1B092715191A1BB80109B321725D182B2B4EF44DED4E10F64DED12392F003F3FED3FED31304379402C001704260B1309260000110226010717092600050D0226010A140C260001100C26000816062601030E0626012B2B2B2B012B2B2B2B2B81005D241235342726232206151416331200111002212200351000330702E085304CBAA59696A3D6011EFCFEF7DDFEFC0112E70674010FA6965E94FCB2ABE403DAFEECFEF4FEFDFEAE012BFC010E01400500020076FE5504250449000E00220074402CA908A717022808201C110E061D15070F060E1D1C0B220E0227181A240A2E102E2129220F1923248721BD5D182B2B4EF43C4DFDE4E44E10F64DED003F3FED3F3FED1139123931304379401C161B00051A260426001B022601051602260101190E260003170626012B2B012B2B2B2B8181005D243635342726232207061514171633013315363736333212111007062322272627112302C6A72546BABB45252546BAFE2EAF36405B7BB6FEB7749A7952303BB479D3D2805CB1BB649A7C57A603B18E49283CFEE9FEFDFEA2 - 965F351E49FDDD00000100890000029204470011004F40262703260D37034704040E0810020E0911090C270805070006110A081A13012E10291100191213B80145B321627E182B2B4EF43C4DFDE44E10E6003F3F4D3FC4FDC411123939011112393130005D1333153E0133321617152E0123220615112389AB15A46B05181D101B108892B4042FB9369B0203BE0302AF72FD980000020042FFD703B6044B002E002F012E408F38099805961299149815982A062824252736214621472447275624572766246726790C790D790E7623742474257426A61EA82C1303000B15052D042E13001A151B171C18152D142E280F0B6908262536250225220D0A042B1318C61C1D1307041D2E9A2B0B2F07090E100207002F212F1A1F18161827173E28260727281A310E1F27103E00272E193031B221A65D182B2B4EF44DEDF4FD394E10F64DFD3910F4FD3911123939392F111239113939003F3FEDED3FEDED111217397131304379404C012D022615251A26210E1F21000926072101032C002100052A0721011D121F21001B14182101200F22210021220E0D08270A21012625090A012D04210006290421001E111C210119161C2101002B2B2B2B103C103C2B103C103C2B012B2B2B2B2B2B2B2B2B81005D5D015D13161716333236353427262F01262726353436333217160723262726232206151417161F011617161514062322262701EF082544A864983D27738F894174DBB9F26B4302AA05263E99666945284E77C24269D9DEEFC70701B701505A3057575B4524161D24222A498198BC8E5A683D32474E40462A19131D2F2C45948FD0D9A002F900010017FFEF0209055A00180052B50D2E0AC00E01B8013F40250416391703060E0A111A17171A0301062900150E150F031F030203FC1619191AFC21677D182B2B4EF44DFD5D39C42F3CFD3C104E456544E6002F3F3F3C4DFD3CED10FDE431301333113315231114171633323637150E012322263511233533A8B6ABAB2615310D1E141F43277E5A9191055AFED593FD4538130B01028E0908816702C593000000020080FFE303DE044900170018005E403AB814C81402091308141913191428067703D707070800050E0A00060D0A051D120B180718180B160D2E0A290C0B1A1A01291619191AD2216242182B2B4EF44DED4E10F63C4DFDE41112392F003F3FED3F3F3C391112393130005D015D0111141716333237363511331123370607062322272635112501381A3083BC4425B4AA0223346793E5532D01AF042FFD39523460A85A9D020EFBD19E3D2A5499528902D81A0000020015FE4903E804490018001900CA406E8A158818A71803070617063812481258126707770377078C1498009705981597169717A800A8161048004B154717C915044405C605028705A600A601A705A817052428051815 - 1716010006150C0B0F1D080E19071919161B17171A050001AF171518AF0C8F16191A1BD421677E182B2B194EF44DE418FD3939FD3939194E456544E61812392F003F3F4DFD3932192F183F3C3C3C123939014B5279401215150016166D121717180501016D12000018872E2B107DC418872E2B10087DC418015D71313071015D005D013306030207020623222627351E01333236373E0137013301030321C7268362429C809C26291E2F2A10322F10053E0EFE74CC011F01042F67FE91FEECAEFE66B40608A40D062118089424044EFC980382000001000000000000032DBC1E5F0F3CF500110800000000005F4D8F0000000000C321F6B6F865FC270B9108F9000000090001000000000000000100000629FE2900000C01F865FCED0B91000100000000000000000000000000000024051200420239000004730040047300C4047300400473003104730034047300420473004D05C7005A05C700A5055600AF023900C906390050055600AF05C700B405560060023900800239002F047300520400003B04730038047300480473003D01C700840400008001C7008906AA0084047300840473003B0473007602AA00890400004202390017047300800400001500000034003400A200C801540200024E02E203780408046A04AE04C60544059A062406F0071C074E0818089A090609D00A780AA60B160B380BB40C100C880CFA0D400E200E700ECA0F600000000100000024006F0009006B0007000200100010005D000007E80A0C00040001B800532BB800492BB8003F2BB800352BB8002B2B4118008001A6009001A600A001A600030069018B0079018B0089018B0099018B00040089018B0099018B00A9018B00B9018BB2040840BA0179001A014A400B041F5414191F180A0B1FD2B80106B49E1FD918E3BB0119000D00E10119B20D0009410A01A0019F0064001F01A50025017A00480028019AB3296C1F60410A01A9007001A9008001A90003008001A9000101A9B21E321FBE012C00250401001F0126001E0401B61FE7312D1FE531B80201B21FC227B80401B21FC11EB80201400F1FC01D9E1FBF1D671FBE1D671FAB27B80401B21FAA29B80401B61FA91D6C1F931EB8019AB21F921DB80101B21F911DB80101B21F751DB80201B61F6D29961F6431B8019AB21F4C96B802ABB21F391DB80156400B1F3638211F351DE41F2F27B80801400B1F2D1D4C1F2A31CD1F241DB802ABB21F201EB8012540111F1C1D931F3A1D4C1F1E1D45273A1D4527BB01AA019B002A019BB2254A1FBA019B0025017AB349293896B8017BB348283125B8017A403648289629482725294C1F252946272729482756C80784075B07410732072B072807260721071B071408120810080E080C080A08080807B801ACB23F1F06BB01AB003F001F01ABB308060805B801AEB2 - 3F1F04BB01AD003F001F01ADB70804080208000814B8FFE0B40000010014B801ABB41000000100B801ABB606100000010006B801ADB300000100B801AD401F04000001000410000001001002000001000200000001000002010802004A00B0018DB806008516763F183F123E113946443E113946443E113946443E113946443E113946443E11394660443E11394660443E11394660442B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B18011DB0964B5358B0AA1D59B0324B5358B0FF1D592B2B2B2B2B2B2B2B182B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B74752B2B2B65422B2B4B5279B376706A66456523456023456560234560B08B766818B080622020B16A704565234520B003266062636820B003266165B070236544B06A234420B176664565234520B003266062636820B003266165B066236544B0762344B10066455458B166406544B27640764523614459B36242725D456523456023456560234560B089766818B080622020B172424565234520B003266062636820B003266165B042236544B072234420B1625D4565234520B003266062636820B003266165B05D236544B0622344B1005D455458B15D406544B262406245236144592B2B2B2B456953427374B8019A2045694B20B02853B049515A58B020615944B801A6204569447500 - 00>] def - /CharStrings 36 dict dup begin - /.notdef 0 def -/space 1 def -/zero 2 def -/one 3 def -/two 4 def -/three 5 def -/four 6 def -/five 7 def -/six 8 def -/C 9 def -/D 10 def -/E 11 def -/I 12 def -/O 13 def -/P 14 def -/R 15 def -/S 16 def -/bracketleft 17 def -/bracketright 18 def -/a 19 def -/c 20 def -/d 21 def -/e 22 def -/g 23 def -/i 24 def -/k 25 def -/l 26 def -/m 27 def -/n 28 def -/o 29 def -/p 30 def -/r 31 def -/s 32 def -/t 33 def -/u 34 def -/y 35 def - end readonly def - currentdict dup/FontName get exch definefont pop end - %APLsfntEnd - 42/FontType resourcestatus{pop pop true}{false}ifelse - {currentfile 0(%APLT1End\n)/SubFileDecode filter flushfile}if - /FontType 1 def - /FontMatrix [ 0.00048828125 0 0 0.00048828125 0 0 ] def - /FontBBox{-1947 -985 2961 2297}def - /UniqueID 4375838 def - currentdict currentfile eexec - 54544758EC884CF30C3CD503CEDBFF3839C47C3C3333173232E3FDBFF439491DB843E1924E63AA7726BBB0485AB56D93D8C0906F647A47162891E73FFC2A9873C4B1EAC5EEBDFFC4D06084FBD84139DF4583C6E259D10699944D1068C9C45667DCCCFB9B7EA01B606435EDCBD273ABAC093D14085CCBAC149BD7382E842CFE0D7FE4FD2EF589A2471F6074A80A8B675C2F7A50D63AC1EF90D787BADD11633CB01CF6EE3B37AAF9078A69AC4740E9B6525D78BBD839551A1CB80DB8682FA5E87591BBD6EE8B946063A2A58D9CA3685AB305495DC5FB5747EB8A9A059C4976C0FE4EEAB1D56FF47F1E9664ED9F4A7DAB763AF92B2F6CF2FA7DEC24710E0B9096E30F772BA7FEA9BDBE496C42ED2CEB58F54E80BDF57CE7B4DB6CCFE7182F43BF93CCA0767AF95D62C5D2C3DC6AE1E6D139F51A2C63432117F1714C5566572EE9967A715420ABDCD1D7BD74F8450B89965FCC81C6ACA565C5F3CCF91D430D1F953E4F1A645300A98DD8C47CD64555F08F422340A85404EAE0D3229C4F9336B9470CACBD6BBF3395104750A915CC6EAAC197668267B8C62D2764C8CD69FD937CA3C924D997A0EDE7964BEB9EA2F92EF70C5E5DA0AA5567765E71F2B911B3C5586B741EEB93F3C73016EC16BFF283758900903D203992EFC8BAFAF13579C602F38C93174D4E3C01712375EA6AD09BB8CBB6B1EDF2EE8AD6EA8A1100B272772E25CD49A11C2BBCE1537891115B66436919C4C60947F480A3767CB2C6AC7EEA7195682BC7FE2D529A41DF58A2732B8527527333B870700375B2B70924FBE2A27610A13D6824D197E739A92ADB5F15AD9F5CFFE2CD24BC300118BA7329D2A335FE083FCA4ADCFA14AFC617DF8E493940D2FA8E57E361A8E5B22ACE147CC7D5DF9F88356DA0F5442DE36D73A2FA38EA3C47F6547C2E94A2B6328F073D4B10B0E9E74DF5FF021B01CDDEA36E101BFD56244086CA252BFD6BA04FAAD8A4B0DE11EECB2A1D197F7412D31713564EF34A1F8A566B687E4A2DEB45C864F900A919F505D7A47FD25AF050BEB3675C7896E53902EA241A1FCC0E43CFCD43667B89B21DC709BD0A813DBBBA8E2671C6C40006771F72B4423F4C4D49A814660FD2019DC7ECA689EBC3F6FCF9E3074B416F691FEF85277754553FE4E4DF6C00D3EB8860A2D5A8ABA1305F3C0451352E55C1FA1D41BD2D0B5C9EB53A50F0CA24B356342A2824F23B9AC786F6F10BA883A3B0C1A9B653F4A19826862E2DFD4FD9EC7E85D3B9FB2DDB9022E62FFF23FF2DB096B8C57A6E20B586AEFD018C6316624027CED9D8F4ED19F03191F442FDDBD81A4C359D1F6831E623F9ED8985FB917B05D0D3C11846158151E5ECB99ED6D5CAFD3E00EB5A31A657B381B43DA52BE4DB2751C8130D96D56C15D61C638301904EFBA6C6F8B8065F2C64D766A3FF1748C6DE72E1DA13D72D8A1B1D - 7150487A7131FFBCF12618A921AA93FDC649A11F8B354F9F6F283678DEFF996E8BA4E1E9E567601812EDF17BF39244C23E83F914B579545F1769A93F39098CDB516AC5EC6CC3A5206F1B14FED97BDB841E61DF2F80C4F259D1B2332800091968C38E1B8214050216F921F026E3CBE13BA4EC28C2A2448F8BE3DCACBEA1A78D497474436A1BCD69118F46AA31CB63D8556C322ED4C58AFD09299A07DD49E9CD7F2CDD332F9763E5B3935BDDCAFF238913BD4C6A541D8F267D25D99C9A807E5370B30EE2CCCC114411A84A77ED70B1599CB0C8CDAABD9E32902586709D17DAF93760F86A304EB3B53907827638F67ADD9AA7C992C6E6BCCE76DC0B87D8DD3B7C935F16B4DC1E475528F4FDD1A7F583FFBE40558DA94484C8B898671430FDCC1F8B4D7785E4B293092E9E791B14740CF35C90BD7D3E9A4DFD7D4CFC6435341ECCF066525F3EF84A1D800D64BA8B7288ED32DAD29B67B77618B43E86EB4D082B9007B346332F46599DDBB62546F02BF84FB4A90EE2CEB183B97CDC5BD73536058C0EA57FA98D76F76B85E526D5B1D6DE6FD6DFDC759820E6CFFF67536484F7ABD548D1CC1856D417AAD8A864AA833B051A684916F38C2C92713FD65E421C5D4E70270436192620BC60AF5694B5CF69BE630614071AFD3BA92C6DB5F99A46C29D3ECC70A873104ED2FC19BE1302A1754944E2568947E58B056B42F8CA0859AC65A9994EDC63ADFF1CA418393BB96D2A450D94F3E6B62DE7ADD8AC4B4CB6E335D89394C17A6AF0FC2C9FCBE428B6D4483313BC29886D81F504DB26A0C13CBAC00EC59CE79D291CED18FDFACE8D88E02C648BC451C7511544122A307DC5334F8B61BE0F9C03CA33024F00864889D1A3B3D9AF8B305CAF24E7A35F522C8D9BAF13CF5EEA4251AA447C4345C0033C07FAE394D52734F4CD2ADD5C2016C83797801372F36F92B823CAC9C995C5F6EE41F1BA69FE6FF74A7EEE54FAE2B21E4FACDA25553BDE70163FA391D273BE629819BAFFC2CDC27A16BE311B8B629B4A0019E0327BDB3448C04C954A215BCF7BB89E6B7136C3B9DDDA8E8FDA7EA5090CEB1B09F91194D52D64A78A09FFE8425A7BC2BD3EC7C67FF6B445F9AD64A7052198FC012F4B17349055CC6519AE5C16B10B3A563ACF155CC65AC287A70F5663ED824FAACF2DE371ABCB0D42CA13EE4FFAF356637CC6C1CA1A223E6AA5292E503C2FC7997DB9B3E904809840F31177C1E19C5CEA346B25CD947F89791C8146F13A813CCD0DD6D197510E8110A1942A519815EDEF1D31B1F324084BACBA62880333B53BBF72F753440692E01DE8553193BE3BE30187A35E4196C3025A97091D9482966939953F14EF1B8897B7BFBB7ED7E77E1F4CFC1C2DB0071B13736AA83217607EC2B83D4A625E935785E970118355643C633A3CDC302A36DB01214D83E291168C6D6AC14BB094 - DACF15CF842C87745E2FD8D6559672C68700530A49D2EF2E8F28E06BE4AC9495246F892BE22005F94715BA50438DC0E00C8D17788E5A5F0BD204510CC89CBC58CDF866CF82316744A2B9D774B8A25D4633EDD9AF9FA22CD455BE1C0B40DFDDF0AEDB51F7A2D2D4182D74F4340D55C83AB17CC215166A3738AA88E7B88EB50C3E774D6DDF9DCFAA106D3FAB60D380278387D6AFCC3E20F32DAD6EC8095E14F031FB34C6BF7FFE1223E2DB954309D39CA5859B62EC9FEB724FE60F4B90B7257AA5A96846978039149277DC48427BBB143C32266F8BBB27EA54D7652C4A30095A4E7C8B52517D45EDB320EBCC2081F121C3A56944F2D09D1194FD2E178EB92437F0F5322A92DD7AA7F403940540D2F0CDEBC228FD5109BE5738A78E972AE3ED036685F175B15D72107F7D27C839C2DFD9265901D7A48957742FC973EC79A932C2449203470AB21461ACCE8B2CF292C44670676C87AA1C6DD3BCB4B655A50F60F55E3E4D5359525AC54FFF5CCDD55406EF0DDA8F979C8593CFFC7257E6FF25FC6BB43B14D98B8DB0863AE1FAD0D4DC1AA72AE857C87CF2D0FEC1D2CA920953DAB531BCBF740B9D5AB2D984BE731E3556AB6E330A4F587389B841FD4115906EBAD9F9DB3BF4074756AE6DA445521B0108AB3FEB41849110CDC3E14AAE11D41E647B5EA06037125CE308D82BB4E3654D86277DD996A78A26B002E8F3F71DA3390ACE4650DA825CB77AF7E35DAB3726C3BFD7E9CD8F76C2CC7A478410A25455757DFEDC67C0BDC98EC27595B1827061A70EC33EB37C0A536C079FA1825114D262E2FA88B05E617E369650B121DA4A6FFBFE4E68DF83614C84F3D4AC25DE9E5BFDC41016E9CF2A2F74B951DECDDAB38F55A0FE5ECD7C9506B271C93E6DE5232C55EE8CDABF5E077168EEAA518E318536A9920BD6FA7882C6327F43EE17DCBB5486A3A3D7A75D66311BFAEB9D18578A72DDB804A4031C3929E6E2F8B62E03F06296D1AE9A5CE61505E150BBDFDCE1291C8FFA6E4B4BF309AFF33C31CC637D1CF2B216B4A2EAE321F49A6F64D29D2879A504398AE755403398BE32A43B3CF823FDA8A0151A43F0871F1D73D7476942C185B10F8A3BB7DC43C1EC98BDE8AA36BBAB85FAD6BD8D9A426BBA6CC2C0E0B5329540891B4F5D34D2396BECA1D22A8B261D5014A00DE6207039EFDAC54CCA8E9D0FB823BDAB89D2500F779DD091311115AAAF7633BDB3511582F4846091C0D81917803C140B9906B868F3C9F06F24BCB53AE24A8F30D980BF6EA02B380E559DBB682BA371140FEFCB822BE8A7C2CC8224210C3CD58CC6E292FD70AE57F2BEBBD1FB0FAA071DE698D6AC57BBB989AEDF73787AEC060AD2C84CAC30DBF13CA9063733E10CB1449BDAC77DE6233833C982B0F0305CD791BD8B7986FF43E63A8C97753E3ED0E31A6FBA4FB9D36E22288B1550AA221D1118 - 8E10342BDFA8636DB4C76A6F31345405D8F95F3BE77D2C0E1F77738D3F48E9390598DDB0797D5BD14DF9EB0FC38FB82841714001E710E5134293A9D1D1E48FEE4B5D231F55B16566043C89828757BA23554FF314E07E6595959A7EF8BBFBF0EF7DF5485D5CA47272F7FE95A8F2A0C12CAC24CFB86CFC95C5737AC9C91765C60BAD2877CF766784B8DB9E272ADC92C8468D16F2B99E9DC0571792220222344C92B4E3FDDB3027822FC04B0A3C118F1526BE9B1499FCE37F0D9A4A2C27E609A8F228BE9DD4B4256D799CF06F62724B3FC765A269CC1E0F75418537C6BA22803A774DA21793BF2F70E0F563B8AB7B4D3363C1684CE26B0E5B55F274D494547C76B4571247C8D7F852F0EE2C65F40358C706255795D27CECF9402FA71428659D95D53181E6E46097EE1655E41EC6C478D15A4AAB07625A66AE83DE11979FBD5719063738D1232071C6D56BBF29A870B1531334E0F46FA31A1CBBFA1B57FF982322DB26119A7BC37DE80A01855056A44E2F3A440CB1B6030A7F159AE85CC31311917905D231AC0024BEB1EB33AAD911CCF4F6BEBE9FBC83E2A3DC0904FAD1441C811D5F4F11176A49327B01D5F7EE18D7DD904C7D672CB513FC80D9CEE340811C9E83280AE818EF3E28C6073FD85153604DB6007288F3C75D21C9F3D6884632DB577FF6A949F097F738D3B41014A26F37F72B6124ACEA67DB45B6ED4E10D53225CC9D3565282E46E1BD498398DEA72F86463064F8CB5D3F9E7A55695C5F5D00296DCAF32B1596183E3B297D941FA5CA69E3613F81397D3B8471691854461AC75B1BFF543F8059A072E7BB755B2C2D2CF4E52441071D473C3DA8A20DF1A89DB07AEC49B3C38F868FD314E607EFCFB97DDCD5730D6517F841F097141D7A31CC92E5DB2F2533C60F0D4372003C143745AA60D46C0787F5B8001E7BC7FDE0C6B6AB72CAA82CA393EFC2A1A3E49B5AD9FF1DED2BE4304714D16BBFAFAF1313CAB92950B63E074A51260DB00B4A374EDE0F5D82CFE69123A8DF8261A149709A89A3BE5A81E9924374B56C244705C1DD0D56DC7E8BBFC53C4A48D2119716339A9B07670E25BD6AC4C5DCCDC52AE9120A6DA85FEE1CADBD876169EDBCDDEF11A7ECC5299D6E8B80BEE7F11B80CAAE610AFCB52BA77E7DDBCAAEDB14AC818BB0824574E404BF048BCE52C4C1AB090265556ADDD93ADE5BE290769C14C447A9761A2E4BA734BFB6E257072CA739E249951CEC8C527744D1F3A32CA1E671FD8891043BF5D7C1ABD729FBC147595D646613C8CFFED5D8570717D5849D669B4C700A84A021BE812D690A20269F41AE63C83201D15198DD05BFCE2921BD58348C4E82EA094B60E8C4B2F7C00E54A4397C20DF72D0528C394F0CB510EE7194E93277EBEBA4BB2D8CF32BC8C9F0916D5FD482F2D7F65C9EB69D748FB2F44906E48E5C30A5197B2E3DEF17 - FC1CBA85D678457D4664F5D74ACB031E370F3BF46BBA933FD587DB212BA85488A921A49F3592BF343B9B7603FFFCC9ED93628BF94963C69E2027C4BD614EA7217F79357AE4F51A9EA1C79118DAE8B9AD0411C7D8FBA6A94FF93EE9A850FADDC1A3E6D7514951BB90BC2B409E647BE5954E1B3B2EFE3672530E49C74CF12B046947F6E4D69B2DE42CEB1AA2A04AB42E498D1B9A4E63156078B6D0050C1492286F7E5F6726E734C0BD7E34D4B54A9261C14991D5043DF8292625E02FBDD4EA7950B8E5A93210968B9E1FA97ACBFAD3C5B592DE857907B6A7761D8B5B912FCD584E4CEEB31D1A743250729160D297357FF3A097A24BAF803D2A6363FA3F2E5C9586B87052A6B59897435C1A909229EBD032EB09CC4245D88B65AF584724F38A2875344AA710D511DD029E3296F077D21DBF33B25E3F629CD600838C8A1A25C60CE9792BF67D09253D1C4053E5896C01F4F4FF86B1CE805DD077E0A43A247F3A02EB9A74819E30BFF5015E5108B25A4A0A5352C266B5AC3F36755F61AEFF85C2B4F9AA511F2FF33B5D193C06DB2442E37709309A029C3DF7FA44D9A2FA09CBF8BF6D4BC5507E9AC24C0DAEE1AFBD327A7258BAD6329D86C6FD6D285B5A6E3B54FA6CA58CA32DD0EF578FAA92D79C8614BA7910F34F9AC76BD6D5B6FC8B6A5D7AB837948B51F18278DC2499018E3E879681F122B4255A6FE68D04EB5ACD520ECA0018FD727BDE218727E32FC0A9EB9AC29AE52478CE8B91DDF69AF6B87E6A3B297605B62AFA5F20A297ECF19CEA06E89D24446FDF91A9D07E982E6436C0DCC52CBFB0F3A3218957DA5DD0CDEC468048BD2193FB9B970581DC4198FAC1E580834FF1A99CE2BE7E1A4020CEF63EF90D9589B2FA7E1B74D7A9D43D40D05E6B3B57813B3E5A01EAA8A34F000A625E57B8DFDBEE534B63AB195A8122DF1CB1EC401B93BF22DF8A38EE197700E613B3360C8A6891F21F2631F14A0F0140FA667926AE7CD4D577FA85173B28EEB90F422E216E408CC58EE121761021A1AC948774485FF6BF197EDB2CB817B294923EA8FB62361ADBE47D18E6E80B66A03A7D7D3FDABABEE45D6AA3C34B97BA4B49D6E68613892FEC12D1E6DB47BEA017CAE40A3F93F56FE09303A2C9F6D357375F1DFB721150D57A942000095D9C7750DCC665D20244FC0113E14E71908A7F01CA0ABDEA09C3D25A02B6E5B163A3C484A46F7B96CFCAD3A5CE318F6D476616358B30B22336163A2827B11744B6C5AF23F8F04616B46BEEF71C4B34A4CEEB835F6DB762766F246A1F1E6045C0CE1342F0325E189D5B8693F56F9237585FCC9B809F03AC31042C110DFD0256B4A2BB26458E757B664BE19D3CB27D2B07EF6DA42D29B4B144A51D88701FE9DE854449C61BD7A19D5A6F82B334454134D0BB9081C06942830037928313581A23FB2C610D12C15324B2DF209CDEA3 - D0FEC00DC6E41AFAF83A4DC42043C7F5AC5AA7AC8DC713C88B6A894EC87457E06592274811149276A6BCF7CC7791F4FDE0165131EE474C808779E552833DCE71355AEA215ADB7A55F0AF0695A375C0FAD7DCCD4BFBE2F8EA01E18D0A9B39FBF4E38F947B724AF62165B97C28A26F306F516046EB006BA87E75FA6FE5FE7C5434E95839EC48CE2A16FAE74D6B716949DFB0BC7B1B9327947148E56AF98EC322B1B07E9AADA84B03245E8D2AD556FA9C3FB6A4F972E25A0754EF814E52F784AEF5A2D15822FC997034030E28C39432CADE23BF622E02B13D76616F04EDD1500606EF9FB188FDA7F01BDD11982EA44B5756FDAD2BB918A539C9970E7AAF4DE9756FFED23C92D65BF33FEB0BF4AA05ED99ECCDF6503E95E6D7F1E1CB6621DFE4942EBDD7F2FB3A483B9368905620C259A8A29F70AEF27F30580AB7A024E045AE1AEAD54160807BCF453E280672C3EB08553626155561234FD883A74A6A5D2265AA3F6F1AD5D101151AF870DD26D431F4032DFC2021B8DDA976E813E9D3F7387CE1109EC7D35CDFBC3A3EB09B7FC7A277DF940B434320159C9C0C6B680838DC09651197BFCFFCFAC3DE43D6AF30BAD08C9F24268AE6C544A560F75E89C88B5116CCEAB023644A1B3269E4B37824933DB9D7E2EF36ADE611648BBB658E8CB6BA5790AC74724358DE753DAF58470E1B9E6641D1AB3C24A07E5D32E97DF2715CABFFCE424297C195D8FA666C041BF27D22C9DE7476A0F6F42EDF334F1E663529076187E809859F2C1140F7F428E7E9270D46B174892EB68E17CEBDBB615FC9E723E239B8AA3AD7BFC916B7F60C7F4450E22A2F98F4A3499AFDD6309512359C671F46FA7123A8163DB09A87FE0C - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - cleartomark end - %APLT1End - -%RBIEndFontSubset -/Helvetica cguidfix -/F1.1/Helvetica renmfont -[ /CIEBasedABC 4 dict dup begin -/WhitePoint [ 0.9505 1.0000 1.0891 ] def -/DecodeABC [ { 1.8008 exp } bind { 1.8008 exp } bind { 1.8008 exp } bind ] def -/MatrixABC [ 0.4294 0.2332 0.0202 0.3278 0.6737 0.1105 0.1933 0.0938 0.9580 ] def -/RangeLMN [ 0.0 0.9505 0.0 1.0000 0.0 1.0891 ] def -end ] /Cs1 exch/ColorSpace dr pop -[ /CIEBasedA 5 dict dup begin /WhitePoint [ 0.9505 1.0000 1.0891 ] def -/DecodeA { { 1.8008 exp } bind exec} bind -def -/MatrixA [ 0.9642 1.0000 0.8249 ] def -/RangeLMN [ 0.0 2.0000 0.0 2.0000 0.0 2.0000 ] def -/DecodeLMN [ { 0.9857 mul} bind { 1.0000 mul} bind { 1.3202 mul} bind ] def -end ] /Cs2 exch/ColorSpace dr pop -%%EndPageSetup -0.60000002 i -/Cs1 SC -1 1 1 sc -q -0 0 534 457 rc --23.5 465.5 m -552.5 465.5 l -552.5 -267.5 l --23.5 -267.5 l -h --23.5 465.5 m -f -1 1 0 sc -2.9513016 431 m -41.951302 431 l -41.951302 392 l -2.9513016 392 l -h -2.9513016 431 m -f -1 J -1 j -0 0 0 sc -1 0 0 -1 -23 465 cm -25.951302 34 m -64.951302 34 l -64.951302 73 l -25.951302 73 l -h -25.951302 34 m -S -1 1 0 sc -CM -41.951302 431 m -80.951302 431 l -80.951302 392 l -41.951302 392 l -h -41.951302 431 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -64.951302 34 m -103.9513 34 l -103.9513 73 l -64.951302 73 l -h -64.951302 34 m -S -1 1 0 sc -CM -80.950996 431 m -119.95099 431 l -119.95099 392 l -80.950996 392 l -h -80.950996 431 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -103.951 34 m -142.95099 34 l -142.95099 73 l -103.951 73 l -h -103.951 34 m -S -1 0.40000001 0.40000001 sc -CM -159.99699 431 m -198.99699 431 l -198.99699 392 l -159.99699 392 l -h -159.99699 431 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -182.99699 34 m -221.99699 34 l -221.99699 73 l -182.99699 73 l -h -182.99699 34 m -S -1 0.40000001 0.40000001 sc -CM -198.99699 431 m -237.99701 431 l -237.99701 392 l -198.99699 392 l -h -198.99699 431 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -221.99699 34 m -260.99701 34 l -260.99701 73 l -221.99699 73 l -h -221.99699 34 m -S -0.40000001 1 1 sc -CM -276.99701 431 m -315.99701 431 l -315.99701 392 l -276.99701 392 l -h -276.99701 431 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -299.99701 34 m -338.99701 34 l -338.99701 73 l -299.99701 73 l -h -299.99701 34 m -S -0.40000001 1 1 sc -CM -315.99701 431 m -354.99701 431 l -354.99701 392 l -315.99701 392 l -h -315.99701 431 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -338.99701 34 m -377.99701 34 l -377.99701 73 l -338.99701 73 l -h -338.99701 34 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 64.451302 448 cm -/F1.1[ 15 0 0 -15 0 0]sf --16.259766 6 m -(!"#$)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 201.451 448 cm --16.259766 6 m -(!"#%)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 335.95099 448 cm --16.259766 6 m -(!"#&)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 467.49701 411.5 cm --20.006104 -3 m -('\(\)*)[ 10.832520 8.342284 12.495117 0.000000 ] xS --50.445557 15 m -(#+,-\(\)*\(./0/\(1)[ 4.167480 8.342285 8.342285 7.500000 8.342285 12.495117 8.342285 8.342285 7.500000 3.332520 4.167480 3.332520 8.342285 0.000000 ] xS -1 0 0 -1 471 249 cm --36.679688 6 m -(2/.3#456\(70)[ 10.832520 3.332520 7.500002 7.500000 4.167479 3.332520 8.342285 7.500000 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 470 143 cm --34.178467 6 m -(-\(\)*80590)[ 7.500000 8.342285 12.495117 8.342285 10.004883 4.167480 8.342285 4.995117 0.000000 ] xS -1 0 0 -1 92.41275 142 cm -/F1.1[ 17 0 0 -17 0 0]sf --9.4504395 6 m -(:;<)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 225.80214 142 cm --9.4504395 6 m -(:%<)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 350.65463 142 cm --9.4504395 6 m -(:=<)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 85 170 cm -/F1.1[ 15 0 0 -15 0 0]sf --16.259766 6 m -(!"#$)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 222 170 cm --16.259766 6 m -(!"#%)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 356.5 170 cm --16.259766 6 m -(!"#&)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 470 104 cm --38.353271 6 m -(-\(\)*'\(710)[ 7.500000 8.342285 12.495117 8.342285 10.832520 8.342285 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 92.41275 103 cm -/F1.1[ 17 0 0 -17 0 0]sf --9.4504395 6 m -(:;<)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 225.80214 103 cm --9.4504395 6 m -(:&<)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 350.65463 103 cm --9.4504395 6 m -(:;<)[ 4.723145 9.454590 0.000000 ] xS -0.60000002 i -/Cs1 SC -0 0 0 sc -1 0 0 -1 -23 465 cm -70.999802 131 m -109.9998 131 l -109.9998 170 l -70.999802 170 l -h -70.999802 131 m -S -1 1 0 sc -CM -126.08701 334 m -165.08701 334 l -165.08701 295 l -126.08701 295 l -h -126.08701 334 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -149.08701 131 m -188.08701 131 l -188.08701 170 l -149.08701 170 l -h -149.08701 131 m -S -1 1 0 sc -CM -165.08701 334 m -204.08701 334 l -204.08701 295 l -165.08701 295 l -h -165.08701 334 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -188.08701 131 m -227.08701 131 l -227.08701 170 l -188.08701 170 l -h -188.08701 131 m -S -227.08701 131 m -266.08701 131 l -266.08701 170 l -227.08701 170 l -h -227.08701 131 m -S -0.40000001 1 1 sc -CM -243 334 m -282 334 l -282 295 l -243 295 l -h -243 334 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -266 131 m -305 131 l -305 170 l -266 170 l -h -266 131 m -S -0.40000001 1 1 sc -CM -282.08701 334 m -321.08701 334 l -321.08701 295 l -282.08701 295 l -h -282.08701 334 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -305.08701 131 m -344.08701 131 l -344.08701 170 l -305.08701 170 l -h -305.08701 131 m -S -0.40000001 1 1 sc -CM -321 334 m -360 334 l -360 295 l -321 295 l -h -321 334 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -344 131 m -383 131 l -383 170 l -344 170 l -h -344 131 m -S -40 0 0 39 204 295 cm -BI -/Width 120 -/Height 117 -/BitsPerComponent 8 -/Decode[ -0 1 -0 1 -0 1 -] -/Interpolate true -/DataSource cg_aiproc -ID -JH16$R/m3dIiTR1IX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?H -IfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI -!.KCtIX_?HIs_6FIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI -!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCt -IX_?HIfOq$IfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCt -IX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!&$&&!!O6)5;I_UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWp -q>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZYB!;ZWp -q>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHj -rrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq-*duq>UHj -rrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp -!;ZWpq>UHjrrDlp!;ZWpq>UG$@/9j,14Odcs8W&uIlfdnq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?h -rrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio -!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hs*OhArrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio -!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQo -q#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDgtrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQo -q#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!*s"/!!P-- -IqJ`\rW%F04TP6V!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKn -p\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6f -rrDfn!;HM@!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6f -rrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn -!;HKnpKIRsp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn -!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t5!@/9j,14Odcs8W&uIl]^mpAY-drrDcm!;?EmpAY-d -rrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm -!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-ds*Oh?rrDcm!;?EmpAY-drrDcm -!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?Em -pAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDarrrDcm!;?EmpAY-drrDcm!;?Em -pAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-d -rrDcm!*s"/!!P--IqJ`\rW%F/4TP*R!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3j -oD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^ -rrDZj!;$3joD\g^rrDZj!;$5Q53&rsnPu%e0K.nGa!frsnPu%e0K.nGa!frsnPu%e0K.nGa!frsnPu%e0K.nGa!frsnPu%e0K. -nGa!frsnPu%e0K.nGa!frsnPu%e0K.nGa!frsi4?"T&0&14Odcs8W&uIlTXon,Esfrt+Z!&F]W1n,Esf -rt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z! -&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfs*PCHrt+Z!&F]W1n,Esfrt+Z! -&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1 -n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+X'rt+Z!&F]W1n,Esfrt+Z!&F]W1 -n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esf -rt+Z!&6`HFqZ$dTIt))1s8E"J3'@E4rt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c" -'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4 -mf*pfrt=c"'(5c4mf*pfrt=c"HkPlI'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4 -mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pf -rt=c"'(5c4mf*pfrt=c"'(0!('(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pf -rt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4>$qR3!!P--IqJ`\rW%F-4U(0O -)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2= -li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h3L)!h2= -li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gf -rtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=lWaB+li.gf -rtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)% -)!h2=li.gfrtt)%)!h2=li.f(?jQW0"YtRWB)ho1!.Qsk"mc1+li.jgru(/&)=.;?li.jgru(/&)=.;? -li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jg -ru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li2>Mli.jgru(/&)=.;?li.jgru(/&)=.;?li.jg -ru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/& -)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jg>Q5T,ru(/&)=.;?li.jgru(/&)=.;?li.jgru(/& -)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru"pH$2X]+ -14Odcs8W&uIlBLplMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhgg -ru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8' -)s[GBlMhggs*PdNru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8' -)s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GB -lMhggru:6-ru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GB -lMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')d$JSqZ$dTIt))1s8E"J2a%E0rugP*+R&hJkl2dh -rugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP* -+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*Hm%YQ+R&hJkl2dhrugP* -+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJ -kl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R!&0+R&hJkl2dhrugP*+R&hJ -kl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dh -rugP*+R&hJ='uF5!!P--IqJ`\rW%F,4UC6N,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+ -,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StM -kPlahs!$Y+,3StMkPlahs!$Y+,3SuR,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StM -kPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlah -s!$Y+,3StMkPlahs!$Y+,3StMk?Is1kPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlah -s!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPl`-?juo4"YtRWB)ho1!.Qpj -#j2.4k5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+P -k5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5TfS -k5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^h -s!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^h>Q5u2 -s!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b, -,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!1WQ%Jp,/14Odcs8W&uIl9Frj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=U -j8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOf -s!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs*Q6Ts!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOf -s!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq- -.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zo3s!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq- -.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq--rjXb -qZ$dTIt))1s8E"J2E_E,s!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lf -s!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%. -.cUIXir:Lfs!m%.Hn+.U.cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%. -.cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIX -ir:Lfs!m%..cO\4.cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIX -ir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIX@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:0 -0Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_ -hu>@es"E:00Ald_hu>@es"E:00Ald_huA'Whu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_ -hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@e -s"E:00Ald_hu>@es"E:00Ald_hu>@e>Q6A6s"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@e -s"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"@>Z&c2P314Odcs8W&u -Il0@shZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#Dpb -hZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=e -s*QQXs"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=e -s"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WA7 -s"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC1 -1#DpbhZ#=es"WC11#DpbhZ#=es"WC10iMHmqZ$dTIt))1s8E"J2*D?(s"`I21>`$dhZ#@fs"`I21>`$d -hZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@f -s"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I2HnsRY1>`$dhZ#@fs"`I21>`$dhZ#@f -s"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I2 -1>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>Z781>`$dhZ#@fs"`I21>`$dhZ#@fs"`I2 -1>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$d -;d^78!!P--IqJ`\rW%F)4Up0G2r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4e -s#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^4 -2r"?kg]'4es#8^42r"@[2r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^4 -2r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?k -g]'4es#8^42r"?kgKX\:g]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?k -g]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'31?kiJ<"YtRWB)ho1!.Qgg%GLhBgAa1e -s#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg5 -3SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAcO\gAa1es#Jg5 -3SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKn -gAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1e>Q6_;s#Jg53SOKn -gAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1e -s#Jg53SOKngAa1es#Etb(&It714Odcs8W&uIks4sg&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj5 -3naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNo -g&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds*Ql\s#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNo -g&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+d -s#Sj53naNog&F+ds#Sj53naNog&F+ds#Sh;s#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+d -s#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53`9?#qZ$dTIt))1 -s8E"J1Hc6"s$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffu -fDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%d -s$#'7Hp60^51ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%d -s$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'7 -51a$=51ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'7 -51ffufDe%ds$#'751ffufDe%ds$#'751ffu;.(4;!!P--IqJ`\rW%F(4V--D5M#j!f)Itcs$,*75M#j! -f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itc -s$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j^5M#j!f)Itcs$,*75M#j!f)Itc -s$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*7 -5M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!em&/=f)Itcs$,*75M#j!f)Itcs$,*7 -5M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j! -f)Is2?l/\?"YtRWB)ho1!.Qdf&(^bGf)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"d -s$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$508 -5h>s#f)J"ds$5085h>s#f)L+_f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$508 -5h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s# -f)J"ds$5085h>s#f)J"d>Q6t>s$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s# -f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$0Cg(]+1914Odcs8W&uIkj.ueGhqd -s$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB: -7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds*R5as$YB: -7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6) -eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$Y@@s$YB:7+D6) -eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqd -s$YB:7+D6)eGhqds$YB:6r78/qZ$dTIt))1s8E"J1-H2ss$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN; -8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E- -df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;Hq2Wb8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E- -df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hc -s$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8((WA8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hc -s$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-:LG+Q7=Bs%:Z<9$mT1d/Q_bs%:Z< -9$mT1d/Q_bs%:Z<9$mT1d/Q_bs%:Z<9$mT1d/Q_bs%:Z<9$mT1d/Q_bs%:Z<9$mT1d/Q_bs%:Z<9$mT1 -d/Q_bs%6$o)uBU=14Odcs8W&uIka)!cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVa -s%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf= -:!Wc5cMpVas%Uf=:!Wc5cMpVas*RPds%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf= -:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5 -cMpVas%Uf=:!Wc5cMpVas%UdCs%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5 -cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=9hf"8qZ$dTIt))1s8E"J0g-/o -s%^i=:;9]&;bl:Pas&%#?;9]&;bl:Pas&%#? -;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&; -bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&f;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&; -bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pa -s&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bZk*Ebl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pa -s&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:O7?lo1F -"YtRWB)ho1!.Q[c'[$GTb5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5? -b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG` -s&@/@<6G5?b5Zigb5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG` -s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@ -<6G5?b5YG`>Q7[Fs&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@ -<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&;["+8Z$A14Odcs8W&uIkX#"b5YJas&I5AA -b5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJa -s&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas*Rhhs&I5AAb5YJa -s&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5A -Ab5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I3Gs&I5AAb5YJas&I5A -Ab5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AA -b5YJas&I5A!!P--IqJ`\rW%F$ -4Vc$;>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC ->0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$Yj ->0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YH -a8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa'8RI -a8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>` -s'!JC>0$YHa8]>`s'!JC>0$YHa8]=9?m5CI"YtRWB)ho1!.QXb(<6AZ`rB;`s'3SD>fQeK`rB;`s'3SD ->fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK -`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rCEk`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK -`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;` -s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`>Q7sJs'3SD>fQeK`rB;`s'3SD>fQeK`rB;` -s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'/0( -,5V?D14Odcs8W&uIkEl"`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO -`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_ -s'N_E?c;tO`;a2_s*S1ls'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_ -s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E -?c;tO`;a2_s'N]Ks'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E -?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?V4]NqZ$dTIt))1s8E"J/j1#fs'ikF@`&.S -_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^ -s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikFHt'tm@`&.S_Z+)^ -s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF -@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@_u@L@`&.S_Z+)^s'ikF -@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S -_Z+)^s'ikF@`&.S8RNh@!!P--IqJ`\rW%F#4Vts7A&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#] -s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnF -A&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81mA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnF -A&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T -_>e#]s'rnFA&81T_>e#]s'rnFA&81T_-?qL_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T -_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e"9?mY[M"YtRWB)ho1 -!.QUa(r?5`_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%H -A\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX -_>emo_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX -_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_ ->Q89Ns(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_ -s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(+`/-2RZG14Odcs8W&uIkQ8WPs)5IJDn5md\c6TYs)5IJDn5md\c6TYs)5IJDn5md -\c6TYs)5IJDn5md\c6TYs)5IJDn5md\c6TYs)5IJDn5md\c6TYs)5IJDn5md\c6TYs)1>6.Jj)K14Odc -s8W&uIk3`$\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OK -E4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f -\c6WZs*Sdrs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f -\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZ -s)>MQs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZ -s)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE(+.aqZ$dTIt))1s8E"J/3Or^s)GRKEOc$g\GpQYs)GRK -EOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g -\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKHuZ[rEOc$g\GpQYs)GRKEOc$g -\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQY -s)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEO]6QEOc$g\GpQYs)GRKEOc$g\GpQY -s)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRK -EOc$g7:7VB!!P--IqJ`\rW%Eu4WLp1G..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo -[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZ -s)tjNG..Eo[f:NZs)tjNG..EuG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZ -s)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjN -G..Eo[f:NZs)tjNG..Eo[TicT[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjN -G..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:M=?nD0T"YtRWB)ho1!.QL^*4>fi -[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEX -s)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtVt[JtEX -s)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgM -G.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEX>Q8lSs)tgM -G.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n -[JtEXs)tgMG.%?n[JtEXs)peQ92Ws*q9QJ$As&YlB3Ws*q9QJ$As&YlB3Ws*q9QJ$As&YlB3Ws*q9Q -J$As&YlB3Ws*q9QJ$As&YlB3Ws*q9QJ$As&YlB3Ws*q9QJ$As&YlB3Ws*m@C0Db_Q14Odcs8W&uIjmN& -XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$U -s+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us*TI$ -s+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HR -K<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@FXs+@HR -K<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+ -XoF$Us+@HRK<>0+XoF$Us+@HRK0`m"qZ$dTIt))1s8E"J.6ScSs+RQSKrk<.XT+!Us+RQSKrk<.XT+!U -s+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQS -Krk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSI"e[%Krk<.XT+!Us+RQSKrk<.XT+!Us+RQS -Krk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<. -XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKreMYKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<. -XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.5[ZAE -!!P--IqJ`\rW%Es4Wpj+L91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WT -L91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0 -XT+$Vs+[WTL91E&L91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0 -XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$V -s+[WTL91E0XBY^ZXT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$V -s+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+#??o%TZ"YtRWB)ho1!.QC[+fYL!WW.jTs,*fU -MQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5 -WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.@'WW.jTs,*fUMQ-W5 -WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jT -s,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jT>Q9S[s,*fUMQ-W5WW.jT -s,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fU -MQ-W5WW.jTs,'$K1]%.U14Odcs8W&uIjdH'W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6 -W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdS -s,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs*Ta's,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdS -s,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iU -Ml?Z6W;hdSs,3iUMl?Z6W;hdSs,3g[s,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iU -Ml?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMa(T+qZ$dTIt))1s8E"J --TrWNs,F!!P--IqJ`\rW%Eq4X-a&OJVu=V>lXRs,a)WOJVu=V>lXR -s,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)W -OJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu)OJVu=V>lXRs,a)WOJVu=V>lXRs,a)W -OJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu= -V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V-Et]V>lXRs,a)WOJVu=V>lXRs,a)WOJVu= -V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lW@ -?oIl^"YtRWB)ho1!.Q@Z,,G7$V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,W -Oei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#> -V#QRQs,j,WOei#>V#Ph)V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#> -V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQ -s,j,WOei#>V#QRQ>Q9h]s,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQ -s,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,fHP2Z!IX14Odcs8W&uIj[B(V#QXSs-'8Y -PGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5B -V#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs*U$+s-'8YPGJ5B -V#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXS -s-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'6_s-'8YPGJ5BV#QXS -s-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8Y -PGJ5BV#QXSs-'8YPEU&UFPs-BAYQD+>EU&UFPs-BAYQD+>E -U&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFP -s-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYI$UK+QD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFP -s-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAY -QD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD%O_QD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAY -QD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>E4CC/G!!P--IqJ`\ -rW%Ep4X?^#R%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CP -s-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZ -R%XJ,R%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZ -R%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJH -TNhG`T`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJH -T`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:BA?oe)a"YtRWB)ho1!.Q=Y,bP+(TDt:Ns-TGYR%ODGTDt:N -s-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGY -R%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDs;+TDt:Ns-TGYR%ODGTDt:Ns-TGY -R%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODG -TDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:N>Q:(_s-TGYR%ODGTDt:Ns-TGYR%ODG -TDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:N -s-PlU3Vrd[14Odcs8W&uIjI6(T)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\ -S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bN -T)Y=Ps.#\\S=]bNT)Y=Ps*U?.s.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bN -T)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=P -s.#\\S=]bNT)Y=Ps.#Zbs.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=P -s.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S3(+>qZ$dTIt))1s8E"J,sh] -T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqR -SH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]I%Qr/T:GqR -SH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4O -s.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:B-cT:GqRSH#4O -s.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h] -T:GqRSH#4Os.>h]T:GqR3ab&H!!P--IqJ`\rW%En4XQWtTUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnR -RfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%L -s.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPn.TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%L -s.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\ -TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRTofbRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\ -TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB$A?p+;d"YtRW -B)ho1!.Q7W-COn.RK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"L -s.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq] -U7)%URK%Z/RK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq] -U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%U -RK'"L>Q:Fcs.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%U -RK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.VM]4o53_14Odcs8W&uIj@0)QiEnKs.u(^V3h4YQiEnK -s.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^ -V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs*UZ0s.u(^V3h4YQiEnKs.u(^ -V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4Y -QiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u&ds.u(^V3h4YQiEnKs.u(^V3h4Y -QiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnK -s.u(^V)VjHqZ$dTIt))1s8E"J,X!NAs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._ -VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[ -QiEqLs/)._VO.=[QiEqLs/)._I&<81VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[ -QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqL -s/)._VO.=[QiEqLs/)._VO(NeVO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqL -s/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[3++rI!!P--IqJ`\rW%Em4XcTq -WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_ -Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL2WKmL_ -Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhK -s/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q!=9fQ2dhK -s/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:` -WKmL_Q2dhKs/D:`WKmL_Q2dgC?pFMg"YtRWB)ho1!.Q4V.?je6PQ.bKs/hLbXcrdePQ.bKs/hLbXcrde -PQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bK -s/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ-$4PQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bK -s/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLb -XcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bK>Q:ghs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLb -XcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/e4f62LWc -14Odcs8W&uIj.$)P5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\J -s/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qOb -Y*/gfP5h\Js*Uu4s/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qOb -Y*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gf -P5h\Js/qMhs/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gf -P5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObXuB`SqZ$dTIt))1s8E"J,!@EQ;$j -s0IddZ]G-mO8lPIs0IddZ]G-mO8lPIs0IddZ]G-mO8lPIs0IddZ]G-mO8lPIs0IddZ]G-mO8lPIs0Idd -Z]G-mO8lPIs0IddZ]G-mO8lPIs0FRj6i-ie14Odcs8W&uIj$s*NW6JIs0n!f[uLEsNW6JIs0n!f[uLEs -NW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JI -s0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs*V;8s0n!f[uLEsNW6JIs0n!f[uLEsNW6JI -s0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f -[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0mtls0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f -[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[l%P^ -qZ$dTIt))1s8E"J+[%B7s1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>G -s1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f -\VpKuMuU>Gs1+'fI(>18\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f -\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKu -MuU>Gs1+'f\Vj\l\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKu -MuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKu1gicL!!P--IqJ`\rW%Ek4Y2Nk]8Q^$MuUDI -s1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h -]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q]:]8Q^$MuUDIs1=3h -]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$ -MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$Md-4nMuUDIs1=3h]8Q^$ -MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDI -s1=3h]8Q^$MuUCG?q1"n"YtRWB)ho1!.Q.T/;jJ?MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h -]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca% -MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ8(:MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca% -MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>H -s1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>H>Q;?ns1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>H -s1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1C-q8,E8i14Odcs8W&u -Iipm+L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj( -L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,E -s*VS:s1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,E -s1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a=n -s1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h -^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^G91fqZ$dTIt))1s8E"J+?_<2s1jBh^kVm)LB#&Ds1jBh^kVm) -LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&D -s1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBhI)(L:^kVm)LB#&Ds1jBh^kVm)LB#&D -s1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh -^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kQ(n^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh -^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm) -113ZM!!P--IqJ`\rW%Ei4YDHg_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uC -s1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh -_1hp*L&\uCs1sEh_1ho:_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh -_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp* -L&\uCs1sEh_1hp*Kj4SnL&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp* -L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\tE?qC.p"YtRWB)ho1!.Q(R07s5EK)`lC -s2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`k -a+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)^5=K)`lCs2T`k -a+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3 -K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lC>Q;`qs2T`ka+F?3 -K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lC -s2T`ka+F?3K)`lCs2Qg$9D\\m14Odcs8W&uIigg,K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`k -a+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3 -K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs*Vk=s2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3 -K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lC -s2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T^qs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lC -s2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka"UmoqZ$dTIt))1 -s8E"J*^)3.s2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6 -JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiC -s2filI*$s>aasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiC -s2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2fil -aam[raasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2fil -aasK6JcEiCs2filaasK6JcEiCs2filaasK60ORQN!!P--IqJ`\rW%Eh4YV?bb^TT9IfIW@s3,rlb^TT9 -IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@ -s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TS>b^TT9IfIW@s3,rlb^TT9IfIW@ -s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rl -b^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9ITuirIfIW@s3,rlb^TT9IfIW@s3,rl -b^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9 -IfIVF?qpLu"YtRWB)ho1!.Q"P0n')HIK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q? -s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ul -c$fW:IK.Q?s35ulc$fW:IK+]>IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ul -c$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW: -IK.Q?s35ulc$fW:IK.Q?>Q;rrs35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW: -IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s333):&=no14Odcs8W&uIiU[,I/hN? -s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)m -c[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s*W.?s3H)m -c[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c= -I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H'ss3H)mc[>c= -I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN? -s3H)mc[>c=I/hN?s3H)mcRrU#qZ$dTIt))1s8E"J*Bc0(s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5n -dX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rA -HN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5nI+!?@dX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rA -HN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E> -s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX#-tdX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E> -s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rA/RVBO!!P-- -IqJ`\rW%Ef4Yh<_e9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)D -H2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB> -s3u>oe9V(Ae9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB> -s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>o -e9V)DH!Cs3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>o -e9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lAG?r6_#"YtRWB)ho1!.PtO13iiLGlQ<=s4)AoeTh,E -GlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<= -s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlN0AGlQ<=s4)AoeTh,EGlQ<= -s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)Ao -eTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=>Q<5us4)AoeTh,EGlQ<=s4)Ao -eTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,E -GlQ<=s4&]/;#:4r14Odcs8W&uIiCO-FoU0Q9qZ$dTIt))1s8E"J)Efuss5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9 -s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%t -j)=tXDu\j9s5S%tj)=tXDu\j9s5S%tI,f/Fj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%t -j)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tX -Du\j9s5S%tj)=tXDu\j9s5S%tj)80%j)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tX -Du\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tX.:?0Q!!P--IqJ`\rW%Ed -4Z@6XkAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8! -kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC6H -kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^ -D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D-R&' -D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9 -s6"8!kAC7^D?&d9s6"8!kAC7^D?&cI?s!4*"YtRWB)ho1!.PkL2f/NYD#`a9s64A"l"pCaD#`a9s64A" -l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCa -D#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#\nID#`a9s64A"l"pCaD#`a9s64A"l"pCa -D#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9 -s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9>Q -=Si(%14Odcs8W&uIi1C.CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_ -CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5 -s64:ul"^7_CB*O5s*X'Gs64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5 -s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:u -l"^7_CB*O5s649&s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:u -l"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ukpKk@qZ$dTIt))1s8E"J(d0iks6aP"mUuRf -BE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4 -s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"I-tYImUuRfBE.C4 -s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP" -mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUoc(mUuRfBE.C4s6aP" -mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRf -BE.C4s6aP"mUuRf-=C$S!!P--IqJ`\rW%Eb4ZR-Smq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3 -s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS" -mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2TImq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS" -mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2Ug -B)h=3s6jS"mq2UgB)h=3s6jS"mq2UgAm><(B)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2Ug -B)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)hQ=2)s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4 -s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4s6q@C>5J:'14Odcs8W&uIht7.AH242s70_#nmqdkAH242s70_# -nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdk -AH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s*XBJs70_#nmqdkAH242s70_#nmqdk -AH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242 -s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70])s70_#nmqdkAH242s70_#nmqdkAH242 -s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_# -ng.[JqZ$dTIt))1s8E"J(Hjfgs7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so -@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1 -s7Kk$oj[so@fQ+1s7Kk$I.^tKoj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1 -s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$ -oj[so@fQ+1s7Kk$ojV/*oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$ -oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so,[amS!!P--IqJ`\rW%Ea4Zd*PpL4*r -@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1 -s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4)LpL4*r@K6(1 -s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t% -pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@9`d+@K6(1s7]t% -pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r -@K6(1s7]t%pL4*r@K6'I?sWX0"YtRWB)ho1!.P@t*A;ni07S@&Is`o!HnTts07S@&Is`o!HnTts07S@& -Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o! -HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@s07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o! -HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts -07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&>CV#RIs`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts -07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is_h^0)GVP)*^>@ -It*"LJH2) %APLeod -EI -78 0 0 39 48 295 cm -BI -/Width 234 -/Height 117 -/BitsPerComponent 8 -/Decode[ -0 1 -0 1 -0 1 -] -/Interpolate true -/DataSource cg_aiproc -ID -JH16$U&]]CAn(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%L -s(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a? -An(dP@fL$_@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?Aq+:iAn(dP@fL%Ls(5a?An(dP@fL%L -s(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a? -An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fJT'@fL%Ls(5a?An(dP@fL%Ls(5a? -An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@f?@W5T1Jh1GF;70Rkl4Ikrd.1GF;7 -0Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4 -Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;7Hn^%+0Rkl4Ikrd.1GF;70Rkl4 -Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd. -1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rm:_0Rkl4Ikrd.1GF;70Rkl4Ikrd. -1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;7 -0Rkl458b::!.Y#^@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"L -s(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@ -BOUpS@K1"Ls(EN`s(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpSIpr@j@K1"Ls(Gj@BOUpS@K1"L -s(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@ -BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls#k%(s(Gj@BOUpS@K1"Ls(Gj@ -BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"LrW%Fu1berD@/jqKs(Pm@BjgsT -@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqK -s(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqg@/jqKs(Pm@BjgsT@/jqK -s(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs''2Fs(Pm@BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT -@/jqKs(Pkj1b\h.J,a4js(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqK -s(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@Bcpg`BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqj@/jqKs(Pm@BjgsT@/jqK -s(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@4_\=(BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm?!.TMV0E5cos(Yp@C1%!U -?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJ -s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Zpgs(Yp@C1%!U?iOkJ -s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@ -C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@>A@OFC1%!U?iOkJs(Yp@ -C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U -?iOkJs(Yp@C0uHI0E(s$s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJ -s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@ -C1%!U?iOkJs(Yp@C1%!U+CG.`?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs([$js(Yp@C1%!U?iOkJ -s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@ -C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!(C1%!U?iOkJs(Yp@ -C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1$r?It0F1s(l$ACgR-X -?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJ -s(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACj]RhCgR-X?N4hJ -s(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-GCgR-X?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X -?N4hJs(l$ACgR-X?@OgbrW%HJCgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJ -s(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4ga?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACk#dkCgR-X?N4hJ -s(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N30)?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N'qSJ2]+\DI*9[ -?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJ -s))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[HsQbi?2neJ -s))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-B -DI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2n,H?2neJs))-B -DI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[ -?2neJs))-BDI*9[?2neJIl8qY!.Y#d?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJ -s))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-B -DI*9[?2neJs))-BDI*9[?2neJs)&rbs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[IpN(l?2neJ -s))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-B -DI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs#k7*s))-B -DI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJrW%Fu2_P,K ->lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_I -s)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_i>lS_I -s)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20B -Dd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is''DHs)20B -Dd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\ ->lS_Is)20BDd<<\>lS_Is)2.l2_G"/J,aFls)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_I -s)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20B -Dd<<\>lS_Is)20BDd<<\>lS_Is)20BD]ilS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_l>lS_I -s)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20B -Dd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20B4`=U* -Dd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20A!.TMX -/cT`ns)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VG -s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)39h -s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-A -Dd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-A>AmaG -Dd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[ ->Q8VGs)2-ADd36[>Q8VGs)2-ADd.]M/cGa"s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VG -s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-A -Dd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[+C"ka>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)3Bk -s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-A -Dd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36) -Dd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd32@ -It0L0s)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJE -s)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3A -EI:phEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3A -EEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEWBX-KG$+cf=Tl=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QE -G?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg -=9!JGs*%QEG?=fg=8uKK=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg -=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGIlf1[!.Y#m=9!JGs*%QEG?=fg=9!JG -s*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QE -G?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*#Ses*%QEG?=fg=9!JGs*%QE -G?=fg=9!JGs*%QEG?=fgIolYo=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QE -G?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg -=9!JGs*%QEG?=fg=9!JGs#kR-s*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg -=9!JGs*%QEG?=fg=9!JGrW%Fu4=^AV<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5C -s*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TD -GuXfh<<%5Cs*7TDGuXfh<<%5k<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TD -GuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh -<<%5Cs*7TDGuXfh<<%5Cs''bJs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh -<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7Rn4=U70J,adns*7TDGuXfh<<%5C -s*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TD -GuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGp$)dGuXfh<<%5Cs*7TD -GuXfh<<%5Cs*7TDGuXfh<<%5n<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TD -GuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh -<<%5Cs*7TDGuXfh<<%5Cs*7TD4aC$,GuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh -<<%5Cs*7TDGuXfh<<%5Cs*7TC!.TM^.fXfos*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8D -s*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZE -H;soj<<%8Ds*@ZEH;soj<<%8Ds*B&ls*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZE -H;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj -<<%8Ds*@ZEH;soj<<%8Ds*@ZE>C'9KH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj -<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;oAV.K0CTNMIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q -;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo1\[./j3r -s*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moG -Io65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q -+Ar/g;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*oMqs*moGIo65q;?),Cs*moGIo65q;?),Cs*moG -Io65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q -;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65/Io65q;?),Cs*moGIo65q;?),Cs*moGIo65q -;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo61FIt0d4s++#HJPcAt;#c)Cs++#HJPcAt;#c)C -s++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#H -JPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJUC;oJPcAt;#c)Cs++#HJPcAt;#c)Cs++#H -JPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt -;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcANJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt -;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt:k(_X -rW%HJJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#H -JPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt -;#c(h;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJU^MrJPcAt;#c)Cs++#HJPcAt;#c)Cs++#H -JPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt -;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#`\0;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt -;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#UHEJ3bR_JkuDu:]H#Bs+4&HJkuDu:]H#B -s+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDuHr'co:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]FXN:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#B -Im>C\!.Y$#:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+2@hs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDuIo$)r:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs#ks0s+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#BrW%Fu5pu\a:]H#Bs+4&HJkuDu:]H#B -s+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#o:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs'((Ns+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#B -s+4$r5UZO2J,b*rs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJfmkhJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#r:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&H4b?K0JkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&G!.TMc-NAcns+O/HKhVN#9`Kf? -s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/H -KhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+Phos+O/HKhVN#9`Kf?s+O/H -KhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN# -9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/H>D5cNKhVN#9`Kf?s+O/HKhVN# -9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf? -s+O/HKhQt_-2mmos+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/H -KhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN# -9`Kf?s+O/HKhVN#+ADfh9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+Pqrs+O/HKhVN#9`Kf?s+O/H -KhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN# -9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVM0KhVN#9`Kf?s+O/HKhVN# -9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVIGIt0m5s+X5IL.qW%9`Ki@ -s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5I -L.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL3u\pL.qW%9`Ki@s+X5I -L.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW% -9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qVOL.qW%9`Ki@s+X5IL.qW% -9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@ -s+X5IL.qW%9RfATrW%HJL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5I -L.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW% -9`Ki@s+X5IL.qW%9`Khi9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL4;nsL.qW%9`Ki@s+X5I -L.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW% -9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`I819`Ki@s+X5IL.qW% -9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`>$AJ3t[`LJ7`'9`KlA -s+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;J -LJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'HqaQq9`KlAs+a;J -LJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`' -9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`J=P9`KlAs+a;JLJ7`' -9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlA -s+a;JLJ7`'9`KlAImYO]!.Y$(9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;J -LJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`' -9`KlAs+a;JLJ7`'9`KlAs+_^js+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'In]lt9`KlAs+a;J -LJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`' -9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs#l-2s+a;JLJ7`' -9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlArW%Fu6m_ki9E0iA -s+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDK -M+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0ir9E0iAs+sDK -M+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl* -9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs'(=Qs+sDKM+dl* -9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iA -s+sDKM+dl*9E0iAs+sBu6m_g4J,b?us+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDK -M+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl* -9E0iAs+sDKM+dl*9E0iAs+sDKM',IkM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iu9E0iAs+sDK -M+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl* -9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDK4c)i3M+dl* -9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDJ!.TMf-3&ip -s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GK -MG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,)1rs,'GK -MG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+ -9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GK>Dc&QMG!o+ -9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@ -s,'GKMG!o+9)jc@s,'GKMFr@d,lRdns,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GK -MG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+ -9)jc@s,'GKMG!o+9)jc@s,'GKMG!o++A2Zk9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,):us,'GK -MG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+ -9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!n3MG!o+ -9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!jJIt0s5 -s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJ -MFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJML8"q -MFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi* -8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmhPMFmi* -8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ> -s,'DJMFmi*8cOZ>s,'DJMFmi*8Uj,RrW%HJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJ -MFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi* -8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOYj8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMLS4t -MFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi* -8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cLr2 -8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cA^> -J4:d`NCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPK -NCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -Hq43r8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,leQ -8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ= -s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=Imt[^!.Y$.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPK -NCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,A-ks,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -In0Nu8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ= -s#l?3s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ= -rW%Fu7jJ%o7fSKE;8RO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN= -s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%+Uh,5qRls,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYL -O%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/1 -7fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/1+@cBl7fSN=s,TYLO%0/17fSN=s,TYLO%0/1 -7fSN=s,VY!s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/1 -7fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN= -s,TYLO%0.4O%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN=s,TYLO%0/17fSN= -s,TYLO%0*KIt1'6s,]\LO@B227K8HD76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kM -PX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7 -6N<9:s-,kMPX>D76N<9:s-.mts-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7 -6N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9: -s-,kMPX>D76N<9:s-,kM>EhJSPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9: -s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX9jk+T;@js-,kMPX>D76N<9:s-,kM -PX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7 -6N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7+@?*m6N<9:s-,kMPX>D7 -6N<9:s-,kMPX>D76N<9:s-/""s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7 -6N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9: -s-,kMPX>D76N<9:s-,kMPX>C5PX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9: -s-,kMPX>D76N<9:s-,kMPX>?LIt107s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tN -Q9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP: -63!6:s->tNQ9kP:63!6:s->tNQ@)!uQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP: -63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6: -s->tNQ9kP:63!6:s->tNQ9kOTQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6: -s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:6%;KLrW%HJQ9kP:63!6:s->tN -Q9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP: -63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!5n63!6:s->tNQ9kP: -63!6:s->tNQ9kP:63!6:s->tNQ@D4#Q9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP: -63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6: -s->tNQ9kP:63!6:s->tNQ9kP:62s*663!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6: -s->tNQ9kP:63!6:s->tNQ9kP:62gk6J4h$bQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qM -Q9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9 -5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9HpI^t5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9 -5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8 -s->qMQ9bJ95l[-8s->qMQ9bJ95lY&S5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8 -s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8InCg^!.Y$75l[-8s->qM -Q9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9 -5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s-=cms->qMQ9bJ9 -5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9ImF%"5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9 -5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8 -s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s#lZ5s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8 -s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8rW%Fu9-F8$5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"N -QU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S; -5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[0u5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S; -5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09 -s-H"NQU(S;5l[09s-H"NQU(S;5l[09s'(gTs-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09 -s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H!#9-=-5J,bj#s-H"N -QU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S; -5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQQSQnQU(S; -5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[1#5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S; -5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09 -s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"N4dSG6QU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09 -s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"M!.TMm+9.`ns-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+O -R6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_> -5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-\7!s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_> -5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9 -s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+O>F@_UR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9 -s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6Q0p+8u7is-Z+O -R6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_> -5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>+@#mo -5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-\@$s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_> -5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9 -s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U^7R6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9 -s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6UZNIt137s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(N -R6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY= -56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR=%3uR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY= -56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7 -s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LXTR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7 -s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=5(?6IrW%HJ -R6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY= -56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%#n -56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR=@F#R6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY= -56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7 -s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56!d656%$7s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7 -s-Z(NR6LY=56%$7s-Z(NR6LY=56%$7s-Z(NR6LY=55kP3J5%*bRm$e@4o_!7s-l1ORm$e@4o_!7s-l1O -Rm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@ -4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@Hp.M!4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@ -4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7 -s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o\`U4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7 -s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7In_!` -!.Y$<4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@ -4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7 -s-k,os-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@Im*h$4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@ -4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7 -s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s#li7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7 -s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7rW%Fu:*0G+4TCs7s.):PSNQqC4TCs7s.):P -SNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC -4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCt"4TCs7s.):PSNQqC4TCs7s.):PSNQqC -4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7 -s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s')$Vs.):PSNQqC4TCs7s.):PSNQqC4TCs7 -s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.)9% -:*'<6J,c'%s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC -4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7 -s.):PSKL&pSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCt%4TCs7s.):PSNQqC4TCs7s.):PSNQqC -4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7 -s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):P4e4_8SNQqC4TCs7s.):PSNQqC4TCs7 -s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):O!.TMp*rhfos.2@QSim%E4TD!8s.2@Q -Sim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E -4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.4U#s.2@QSim%E4TD!8s.2@QSim%E -4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8 -s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@Q>FmtWSim%E4TD!8s.2@QSim%E4TD!8 -s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@Q -SihKt*W?%gs.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E -4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8 -s.2@QSim%E+?][q4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.4^&s.2@QSim%E4TD!8s.2@QSim%E -4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8 -s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim$9Sim%E4TD!8s.2@QSim%E4TD!8 -s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSiluPIt1<8s.;@PT0!"E3rbg5s.;@P -T0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E -3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT6r^"T0!"E3rbg5s.;@PT0!"E -3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5 -s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!!VT0!"E3rbg5s.;@PT0!"E3rbg5 -s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@P -T0!"E3e'pFrW%HJT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E -3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5 -s.;@PT0!"E3rbfp3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT78p%T0!"E3rbg5s.;@PT0!"E -3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5 -s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3r_@83rbg5s.;@PT0!"E3rbg5 -s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rT,/J5@6cTK)tE3<,X2s.D@O -TK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE -3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tEHoV/!3<,X2s.D@OTK)tE -3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2 -s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<*3U3<,X2s.D@OTK)tE3<,X2 -s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@O -TK)tE3<,X2Inq$_!.Y$A3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE -3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2 -s.D@OTK)tE3<,X2s.CJos.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tEIlRJ$3<,X2s.D@OTK)tE -3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2 -s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s#m#7s.D@OTK)tE3<,X2 -s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2s.D@OTK)tE3<,X2rW%Fu:`TM/3<,[3s.MFP -TfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G -3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,\"3<,[3s.MFPTfE(G -3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3 -s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s')0Vs.MFPTfE(G3<,[3 -s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFP -TfE(G3<,[3s.ME%:`KB6J,c3%s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G -3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3 -s.MFPTfE(G3<,[3s.MFPTcc>pTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,\%3<,[3s.MFPTfE(G -3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3 -s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFP4eXk8TfE(G3<,[3 -s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFPTfE(G3<,[3s.MFO!.TMr)ulWks.VIP -U,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H -2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.Xm"s.VIPU,W+H -2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2 -s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIP>G=(VU,W+H2ufU2 -s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIP -U,W+H2ufU2s.VIPU,RQu)u]hes.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H -2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2 -s.VIPU,W+H2ufU2s.VIPU,W+H+?0=p2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.Y!%s.VIPU,W+H -2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2 -s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W*8U,W+H2ufU2 -s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W+H2ufU2s.VIPU,W&OIt1E9s.qXR -V)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M -2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV0k3$V)J@M -2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3 -s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J?XV)J@M2ZKU3 -s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXR -V)J@M2ZKU3s.qXRV)J@M2LeUCrW%HJV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M -2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3 -s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKTr2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV11E'V)J@M -2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3 -s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZGq:2ZKU3 -s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2ZKU3s.qXRV)J@M2Z<]+J5R\52#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M -2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jG# -2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0 -s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s')?W -s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQ -VDS=M2#jF0s/%XQVDS=M2#jF0s/%W&;AoH6J,cB&s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M -2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0 -s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVB@_qVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jG& -2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0 -s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQ -4f1(9VDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XQVDS=M2#jF0s/%XP -!.TMu)?6Wks/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP -1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0 -s/:<$s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0 -s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aR ->Gs@XW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aR -W&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&&p%)?'Vcs/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP -1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0 -s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP+>a%r1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0 -s/:E's/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0 -s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aR -W&+H:W&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aRW&+IP1]OC0s/7aR -W&+DQIt1K9s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR -1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1 -s/@gSWI-N%WAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1 -s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gS -WAFQYWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gS -WAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1Oi@ArW%HJWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR -1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1 -s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OEs1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1 -s/@gSWIH`(WAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1 -s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gS -WAFRR1]KV;1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gSWAFRR1]OF1s/@gS -WAFRR1]@B(J5mKeW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS -1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0 -s/IjSW\XUSHnt`%1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0 -s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjS -W\XUS1B1RY1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjS -W\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0IoI9a!.Y$K1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS -1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0 -s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/I1ss/IjSW\XUS1B4@0s/IjSW\XUS1B4@0 -s/IjSW\XUSIkq&(1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0 -s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjS -W\XUS1B4@0s#mA;s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjS -W\XUS1B4@0rW%FuHKRYXY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V0E8.-s/dsS -XY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY50)(]FDas/dsSXY9^V0E8.-s/dsSXY9^V -0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V0E8.- -s/dsSXY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V0E8.-s/dsSXY9^V+>I#g[Z7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3U -Z7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7LK-(&e2_s0=3UZ7Q$] -/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<", -s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]+>!Pu/H<", -s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0@,*s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<", -s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3U -Z7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q#=Z7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3U -Z7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7PtTIt1];s0O/,ut,s0Os0X?V[4;3a.fZn+s0X?V[4;3a.fZn+s0X?V -[4;3a.fZn+s0X?V[4;3a.fZn+s0X?V[4;3a.fZn+rW%Fu>87+G.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b -.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h* -s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?i(.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h* -s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV -[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s')o\s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV -[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aA+=qpr9 -J,cr+s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h* -s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV -[NI%![OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?i+.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h* -s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV -[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV4glg>[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV -[OM6b.K?h*s0aBV[OM6b.K?h*s0aBV[OM6b.K?h*s0aBU!.TN('E>Tjs1'NW\L7Ef-i^_)s1'NW\L7Ef --i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_) -s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1*M)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_) -s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW -\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW>Ic-]\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW -\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L2l3 -'E.u]s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_) -s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW -\L7Ef+=I3"-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1*V,s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_) -s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW -\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7D?\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW -\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7Ef-i^_)s1'NW\L7@VIt1fJ2<^]d3Wk,lbP's1K]X -]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk -,lbP's1K]X]d/)6&cMc[s1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP' -s1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X -]d3Wk,lbP's1K]X]d3Wk+=.!#,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1Nn-s1K]X]d3Wk,lbP' -s1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X -]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3V@]d3Wk,lbP's1K]X -]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3Wk,lbP's1K]X]d3RWIt1l=s1TcY^*N`m -,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS( -s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^3h4+^*N`m,lbS( -s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY -^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N__^*N`m,lbS(s1TcY -^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m -,lbS(s1TcY^*N`m,_',4rW%HJ^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS( -s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY -^*N`m,lbS(s1TcY^*N`m,lbS$,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^4.F.^*N`m,lbS( -s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY -^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,l^$A,lbS(s1TcY -^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lbS(s1TcY^*N`m,lRdnJ7&rg^`i`n -+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$ -s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`nHm/O*+of>$ -s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX -^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+obc^+of>$s1ffX -^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n -+of>$s1ffX^`i`n+of>$IpWcd!.Y$a+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$ -s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX -^`i`n+of>$s1ffX^`i`n+of>$s1fa#s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`nIj+j-+of>$ -s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX -^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s#n.@s1ffX -^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$rW%Fu?kE@R -+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5" -s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK6)+TK5" -s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW -^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s'*8]s1fcW -^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm -+TK5"s1fcW^``Zm+TK5"s1fb,?P!,9J,d;,s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5" -s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW -^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^`Xd"^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK6,+TK5" -s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW -^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW4hr3? -^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcV!.TN, -&-'Hfs1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/! -s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1s() -s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW -_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW>JVE] -_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n -+90/!s1ofW_&r]n+90/!s1ofW_&n/7&,lQYs1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/! -s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW -_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO," -s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z -`Z>*!Hl`7,*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z -`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*! -*WK?`*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*! -*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"Iproe!.Y$g*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO," -s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z -`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H0%s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z -`Z>*!Ii\R/*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z -`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*! -*WO,"s#n@Bs2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*! -*WO,"rW%Fu@h/OZ*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&! -s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4',*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-" -*<4&!s'*M`s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-" -*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q+/@h&D;J,dP/s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&! -s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`ulB%`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4'/*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-" -*<4&!s2Q,Z4i\QB`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-" -*<4&!s2Q,Y!.TN0%KFKgs2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtu -s2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Z -a;b0#)umtus2]R,s2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Z -a;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0# -)umtus2Z/Z>K@c`a;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0# -)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;]V=%K6?Ws2Z/Za;b0#)umtus2Z/Za;b0#)umtu -s2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Z -a;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#+<1@%)umtus2Z/Za;b0#)umtus2Z/Z -a;b0#)umtus2][/s2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Z -a;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0# -)umtus2Z/Za;b.Ba;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0# -)umtus2Z/Za;b*YIt2)>s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[b'Y--ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar::aar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)Ll9+rW%HJar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRr&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[b't?0ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZMtC)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZB_dJ7T2iar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&HlE%-)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZO$a)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRquIq/ue!.Y$k)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2lH&s2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&IiA@0)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZRqus#nLCs2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZRqurW%FuAIJO](]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYp -s2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Y -b8:3%(]VYps2u5Yb8:3%(]V[+(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Y -b8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3% -(]VYps2u5Yb8:3%(]VYps'*Y_s2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3% -(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u4.AIJJ;J,d\.s2u5Yb8:3%(]VYp -s2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Y -b8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb9.W$b8:3%(]VYps2u5Y -b8:3%(]VYps2u5Yb8:3%(]V[.(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Y -b8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3% -(]VYps2u5Yb8:3%(]VYps2u5Y4j+ZAb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3% -(]VYps2u5Yb8:3%(]VYps2u5X!.TN2$NJ?cs32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMn -s32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Y -bn^9'(&uMns32;Ybn^9'(&uMns35p+s32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Y -bn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9' -(&uMns32;Ybn^9'(&uMns32;Y>Kmo_bn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9' -(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;YbnY_?$N:$Ts32;Ybn^9'(&uMn -s32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Y -bn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'+;Oq$(&uMns32;Y -bn^9'(&uMns32;Ybn^9'(&uMns36$.s32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Y -bn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9' -(&uMns32;Ybn^9'(&uMns32;Ybn^7Abn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9' -(&uMns32;Ybn^9'(&uMns32;Ybn^3XIt2/=s3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPo -s3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZ -c5$B)(&uPos3;AZc5$B)(&uPos3;AZc?pB,c5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZ -c5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B) -(&uPos3;AZc5$B)(&uPos3;AZc5$@`c5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B) -(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)'n9g&rW%HJc5$B)(&uPo -s3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZ -c5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uQ%(&uPos3;AZ -c5$B)(&uPos3;AZc5$B)(&uPos3;AZc@6T/c5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZ -c5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B) -(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&pGB(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B) -(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&e2_J7f5hcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJn -s3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZ -cP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*HkcV,'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZ -cP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E* -'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`VC`'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E* -'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJnIqK)e!.Y$p'`ZJn -s3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZ -cP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3Df%s3DDZ -cP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*Ih_q/'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZ -cP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E* -'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns#n[Bs3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E* -'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJnrW%FuBF4^c'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;k -s3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDY -ck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$=+'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDY -ck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B* -'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks'*h_s3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B* -'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MC.B*eJ:J,dk. -s3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDY -ck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYcl`u$ -ck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$=.'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDY -ck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B* -'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDY4jXiAck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B* -'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDX!.TN4#li9as3VGYd1QE+&c^5js3VGYd1QE+&c^5j -s3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGY -d1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3Z3+s3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGY -d1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+ -&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGY>L=&_d1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+ -&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1LkA#lXgR -s3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGY -d1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+ -+;+Y$&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3Z<.s3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGY -d1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+ -&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QCAd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+ -&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1Q?XIt25heIMW0%fb&hs4%VZeIMW0%fb&h -s4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZ -eIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0Hk-2,%fb&hs4%VZeIMW0%fb&hs4%VZ -eIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0 -%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%f]b`%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0 -%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&h -Iqf2e!.Y%!%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZ -eIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0 -%fb&hs4&5%s4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0Ih)M/%fb&hs4%VZeIMW0%fb&hs4%VZ -eIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0 -%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs#nmBs4%VZeIMW0%fb&hs4%VZeIMW0 -%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hrW%FuC'O^g%KFrfs4%SYeIDQ/%KFrf -s4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SY -eIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFt+%KFrfs4%SYeIDQ/%KFrfs4%SY -eIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/ -%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs'+"_s4%SYeIDQ/%KFrfs4%SYeIDQ/ -%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrf -s4%R.C'OY;J,e%.s4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SY -eIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/ -%KFrfs4%SYeK>>$eIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFt.%KFrfs4%SYeIDQ/%KFrfs4%SY -eIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/ -%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SY4k1#AeIDQ/%KFrfs4%SYeIDQ/ -%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SX!.TN7"om-]s4.SXedMN/$iecc -s4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SX -edMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs42Q*s4.SXedMN/$ieccs4.SX -edMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/ -$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SX>Lj2^edMN/$ieccs4.SXedMN/ -$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$iecc -s4.SXedHtB"o\LOs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SX -edMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/ -$ieccs4.SXedMN/+:J5#$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs42Z-s4.SXedMN/$ieccs4.SX -edMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/ -$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedML@edMN/$ieccs4.SXedMN/ -$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMHWIt2>>ffa%Q1#QNH^ -s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYW -fa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1HjB])#QNH^s4IYW -fa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1 -#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QJ#]#QNH^s4IYWfa%Q1 -#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^ -s4IYWfa%Q1#QNH^Ir#5d!.Y%%#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYW -fa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1 -#QNH^s4IYWfa%Q1#QNH^s4JM"s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1Ig?#,#QNH^s4IYW -fa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1 -#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s#o$?s4IYWfa%Q1 -#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^rW%FuD$0gl#63B] -s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\W -g'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63D)#63B]s4R\W -g'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2 -#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s'+1]s4R\Wg'7T2 -#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B] -s4R\Wg'7T2#63B]s4R[,C]aS9J,e4,s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\W -g'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2 -#63B]s4R\Wg'7T2#63B]s4R\Wg)pV"g'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63D,#63B]s4R\W -g'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2 -#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\W4k^,?g'7T2 -#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\V!.TN:!<:jT -s4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\T -h#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4r&&s4m\T -h#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2 -!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\T>292Yh#RK2 -!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jT -s4m\Th#RK2!<:jTs4m\Th#MqBr;_?Ih#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2 -!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jT -s4m\Th#RK2!<:jTs4m\Th#RK2!<:jt!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th1#G)h#RK2 -!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jT -s4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<542bJ,e=)s4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jT -s4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\T -h#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th&l^th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:l)!<:jT -s4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\T -h#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\T4l$,< -h#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\S!.TN; -!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mU -s5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5&,' -s5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bU -h>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bU>2B8Z -h>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4 -!<:mUs5!bUh>mT4!<:mUs5!bUh>i%Cr;_?Ih>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bU -h>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4 -!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mu!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUhL>P* -h>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4 -!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<54= -!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<)tJ -J8bMghuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nW -huNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8 -HiX3)!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8 -!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<66\ -!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sW -s53nWhuNf8!<:sWs53nWhuNf8!<:sWIrG8cJ,eF,s53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8 -!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sW -s53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWi#i%"huNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:u, -!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sW -s53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nW -4l?>?huNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nV -!.TN*s52]J]i;io:!<;!Xs5dJ,eL.s5F%YiW0#!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[ -j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@ -!<;*[s5\P-s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@ -!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[ -s5X1[>3#\`j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[ -s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8a[Lr;_?Ij8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@ -!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[ -s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;+&!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[ -s5X1[jF710j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[ -s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[ -j8f5@!<54C!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[ -j8f5@!<)tJJ8tYijT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B -!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\ -s5a7\jT,>BHiX3.!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\ -s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\ -jT,>B!<66a!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\ -jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\IrbJfJ,eU1s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\ -s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\ -jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jWFR'jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\ -jT,>B!<;/1!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\ -jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B -!<;-\s5a7\4ll\DjT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B -!<;-\s5a7[!.TN?!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0] -s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=] -joGGD!<;0]s5n\/s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=] -joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD -!<;0]s5j=]>35hbjoGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD -!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joBmOr;_?IjoGGD!<;0]s5j=]joGGD!<;0]s5j=] -joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD -!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;1(!<;0]s5j=]joGGD!<;0]s5j=]joGGD -!<;0]s5j=]k'mC2joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD -!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0] -s5j=]joGGD!<54E!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0] -s5j=]joGGD!<)tJJ91ekkQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_ -kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH -!<;6_s6'I_kQ(YHHiX31!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH -!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_ -s6'I_kQ(YH!<66d!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_ -s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_IrkPgJ,e^4s6'I_kQ(YH!<;6_s6'I_kQ(YH -!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_ -s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kTBm*kQ(YH!<;6_s6'I_kQ(YH!<;6_ -s6'I_kQ(YH!<;84!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_ -s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_ -kQ(YH!<;6_s6'I_4m2nGkQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_kQ(YH!<;6_s6'I_ -kQ(YH!<;6_s6'I^!.TN@!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9` -s60O`klCbJ!<;9`s64n2s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9` -s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O` -klCbJ!<;9`s60O`>3Q%eklCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O` -klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`kl?3Tr;_?IklCbJ!<;9`s60O`klCbJ!<;9` -s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O` -klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;:+!<;9`s60O`klCbJ!<;9`s60O` -klCbJ!<;9`s60O`l$i^5klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O` -klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<54H!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<)tJJ91ekklCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9` -s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O` -klCbJ!<;9`s60O`klCbJHiX32!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O` -klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<66e!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`IrtVhJ,ea5s60O`klCbJ!<;9`s60O` -klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`ko^!+klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<;;5!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9` -s60O`klCbJ!<;9`s60O`4m;tHklCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9` -s60O`klCbJ!<;9`s60O_!.TNB!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kac -li@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P -!<;Bcs6Kacli@(P!<;Bcs6P+5s6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P -!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bc -s6Kacli@(P!<;Bcs6Kac>3l7hli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bc -s6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli;NXr;_?Ili@(P!<;Bcs6Kacli@(P -!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bc -s6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;C.!<;Bcs6Kacli@(P!<;Bc -s6Kacli@(P!<;Bcs6Kacm!f$8li@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bc -s6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kac -li@(P!<;Bcs6Kacli@(P!<54K!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kacli@(P!<;Bcs6Kac -li@(P!<;Bcs6Kacli@(P!<)tJJ9Cqmm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R -!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Ed -s6Tgdm/[1R!<;Eds6Tgdm/[1RHiX36!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Ed -s6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgd -m/[1R!<;Eds6Tgdm/[1R!<66i!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgd -m/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;EdIs1bjJ,em9s6Tgdm/[1R!<;Ed -s6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgd -m/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm2uE/m/[1R!<;Eds6Tgd -m/[1R!<;Eds6Tgdm/[1R!<;G9!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgd -m/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R -!<;Eds6Tgdm/[1R!<;Eds6Tgd4m`7Lm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R -!<;Eds6Tgdm/[1R!<;Eds6Tgc!.TNC!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;He -s6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]me -mK!:T!<;Hes6]memK!:T!<;Hes6b77s6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]me -mK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T -!<;Hes6]memK!:T!<;Hes6]me>4)CjmK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T -!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memJq`[r;_?ImK!:T!<;Hes6]me -mK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T -!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;I0!<;Hes6]memK!:T -!<;Hes6]memK!:T!<;Hes6]memXG6:mK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T -!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;He -s6]memK!:T!<;Hes6]memK!:T!<54M!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;He -s6]memK!:T!<;Hes6]memK!:T!<)tJJ9V(on,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$g -n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX -!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLXHiX39!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX -!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ng -s6p$gn,WLX!<;Ngs6p$gn,WLX!<66l!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ng -s6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;NgIs:hkJ,f!4;Oln,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$g -n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,Rr^r;_?In,WLX!<;Ng -s6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$g -n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;O2!<;Ngs6p$g -n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn:(H4_gpoDnp`!<;Zks7?!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]l -s7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBl -o`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<66q!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBl -o`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]lIsV%nJ,f0A -s7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBl -o`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlocO87 -o`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;_A!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBl -o`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b -!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBl4nSgTo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b -!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBk!.TNG!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`m -s7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHm -p&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7Ug?s7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHm -p&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d -!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHm>4qsrp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d -!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&KSgr;_?I -p&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d -!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;a8 -!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp4!)Bp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d -!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`m -s7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<54U!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`m -s7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<)tJJ9q:rpAk6f!<;cns7ZNnpAk6f!<;cns7ZNn -pAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f -!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6fHiX3@!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f -!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cn -s7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<66s!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cn -s7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cnIs_+o -J,f6Cs7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cn -s7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNn -pE0J9pAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;eC!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cn -s7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNn -pAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNn4nesVpAk6f!<;cns7ZNnpAk6f!<;cns7ZNn -pAk6f!<;cns7ZNnpAk6f!<;cns7ZNnpAk6f!<;cns7ZNm!.TNH!<;ips7lZpq#LHj!<;ips7lZpq#LHj -!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ip -s7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7q$Bs7lZpq#LHj!<;ips7lZpq#LHj!<;ip -s7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZp>580uq#LHj!<;ips7lZpq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#Gnl -r;_?Iq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj -!<;j;!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq0rDEq#LHj!<;ips7lZpq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj -!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<54X!<;ips7lZpq#LHj!<;ips7lZpq#LHj -!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<)tJJ:%@sq#LHj!<;ips7lZpq#LHj!<;ip -s7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHjHiX3B!<;ips7lZpq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj -!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<66u!<;ips7lZpq#LHj!<;ips7lZpq#LHj -!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ip -Ish1pJ,f($C#I/nbF!.TCsIsh %APLeod -EI -1 0 0 -1 -23 465 cm -110 131 m -149 131 l -149 170 l -110 170 l -h -110 131 m -S -3 w -130.99001 78.5 m -230.47148 113.40271 l -S -CM -218.45703 349.41553 m -208.28972 355.7168 l -206.65344 347.47772 l -h -218.45703 349.41553 m -f -0 J -0 j -1 0 0 -1 -23 465 cm -241.45703 115.58447 m -231.28972 109.2832 l -229.65344 117.52228 l -h -241.45703 115.58447 m -S -1 J -1 j -221.08701 80 m -137.07774 109.70026 l -S -CM -103.51823 351.56659 m -115.47769 351.33994 l -112.67781 359.25958 l -h -103.51823 351.56659 m -f -0 J -0 j -1 0 0 -1 -23 465 cm -126.51823 113.43341 m -138.47769 113.66006 l -135.67781 105.74042 l -h -126.51823 113.43341 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 475 367 cm -/F1.1[ 15 0 0 -15 0 0]sf --52.111816 6 m -(>,59951?,\),10)[ 10.832520 8.342285 8.342285 4.995117 4.995117 8.342285 8.342285 8.342285 8.342285 12.495117 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 475.5 323.5 cm --58.363037 6 m -(@A#+,-\(\)*\(./0/\(1)[ 4.167480 11.667480 4.167480 8.342285 8.342285 7.500000 8.342285 12.495117 8.342285 8.342285 7.500000 3.332520 4.167480 3.332520 8.342285 0.000000 ] xS -0.60000002 i -/Cs1 SC -1 1 0 sc -CM -126.08701 260.5 m -165.08701 260.5 l -165.08701 221.5 l -126.08701 221.5 l -h -126.08701 260.5 m -f -1 w -1 J -1 j -0 0 0 sc -1 0 0 -1 -23 465 cm -149.08701 204.5 m -188.08701 204.5 l -188.08701 243.5 l -149.08701 243.5 l -h -149.08701 204.5 m -S -1 1 0 sc -CM -165.08701 260.5 m -204.08701 260.5 l -204.08701 221.5 l -165.08701 221.5 l -h -165.08701 260.5 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -188.08701 204.5 m -227.08701 204.5 l -227.08701 243.5 l -188.08701 243.5 l -h -188.08701 204.5 m -S -1 1 0 sc -CM -87 260.5 m -126 260.5 l -126 221.5 l -87 221.5 l -h -87 260.5 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -110 204.5 m -149 204.5 l -149 243.5 l -110 243.5 l -h -110 204.5 m -S -0.40000001 1 1 sc -CM -203.174 260.5 m -242.17401 260.5 l -242.17401 221.5 l -203.174 221.5 l -h -203.174 260.5 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -226.174 204.5 m -265.17401 204.5 l -265.17401 243.5 l -226.174 243.5 l -h -226.174 204.5 m -S -0.40000001 1 1 sc -CM -321 260.5 m -360 260.5 l -360 221.5 l -321 221.5 l -h -321 260.5 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -344 204.5 m -383 204.5 l -383 243.5 l -344 243.5 l -h -344 204.5 m -S -1 1 0 sc -CM -48.999802 260.5 m -87.999802 260.5 l -87.999802 221.5 l -48.999802 221.5 l -h -48.999802 260.5 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -71.999802 204.5 m -110.9998 204.5 l -110.9998 243.5 l -71.999802 243.5 l -h -71.999802 204.5 m -S -0.40000001 1 1 sc -CM -242.17401 260.5 m -282.7243 260.5 l -282.7243 220.5 l -242.17401 220.5 l -h -242.17401 260.5 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -265.17401 204.5 m -305.7243 204.5 l -305.7243 244.5 l -265.17401 244.5 l -h -265.17401 204.5 m -S -0.40000001 1 1 sc -CM -282.724 261.5 m -323.27429 261.5 l -323.27429 221.5 l -282.724 221.5 l -h -282.724 261.5 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -305.724 203.5 m -346.27429 203.5 l -346.27429 243.5 l -305.724 243.5 l -h -305.724 203.5 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 471 60 cm --21.676025 6 m -(/\(80590)[ 3.332520 8.342285 10.004885 4.167480 8.342283 4.995117 0.000000 ] xS -1 0 0 -1 93.41275 59 cm -/F1.1[ 17 0 0 -17 0 0]sf --9.4504395 6 m -(:%<)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 226.80214 59 cm --4.7231445 6 m -(:<)[ 4.723145 0.000000 ] xS -1 0 0 -1 351.65463 59 cm --9.4504395 6 m -(:B<)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 471 21 cm -/F1.1[ 15 0 0 -15 0 0]sf --25.85083 6 m -(/\('\(710)[ 3.332520 8.342285 10.832521 8.342283 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 93.41275 20 cm -/F1.1[ 17 0 0 -17 0 0]sf --9.4504395 6 m -(:C<)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 226.80214 20 cm --4.7231445 6 m -(:<)[ 4.723145 0.000000 ] xS -1 0 0 -1 351.65463 20 cm --9.4504395 6 m -(:C<)[ 4.723145 9.454590 0.000000 ] xS -0.60000002 i -/Cs1 SC -0.40000001 1 1 sc -CM -354.99701 431 m -393.99701 431 l -393.99701 392 l -354.99701 392 l -h -354.99701 431 m -f -0 0 0 sc -1 0 0 -1 -23 465 cm -377.99701 34 m -416.99701 34 l -416.99701 73 l -377.99701 73 l -h -377.99701 34 m -S -ep -end -%%Trailer -%%EOF diff --git a/src/externals/pio1/doc/images/block-cyclic-rearr.graffle b/src/externals/pio1/doc/images/block-cyclic-rearr.graffle deleted file mode 100644 index 9c4278adbcf..00000000000 --- a/src/externals/pio1/doc/images/block-cyclic-rearr.graffle +++ /dev/null @@ -1,2928 +0,0 @@ - - - - - ActiveLayerIndex - 0 - ApplicationVersion - - com.omnigroup.OmniGrafflePro - 137.11.0.108132 - - AutoAdjust - - BackgroundGraphic - - Bounds - {{0, 0}, {576, 733}} - Class - SolidGraphic - ID - 2 - Style - - fill - - GradientColor - - w - 0.666667 - - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - CanvasOrigin - {0, 0} - CanvasSize - {576, 733} - ColumnAlign - 1 - ColumnSpacing - 36 - CreationDate - 2009-12-16 08:35:43 -0700 - Creator - John Dennis - DisplayScale - 1 0/72 in = 1.0000 in - FileType - flat - GraphDocumentVersion - 6 - GraphicsList - - - Bounds - {{377.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 140 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{345.309, 425}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 139 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [4]} - VerticalPad - 0 - - - - Bounds - {{229.527, 435}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 138 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 []} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{86, 435}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 137 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [4]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{468, 435}, {52, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 136 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 ioCount} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{345.309, 386}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 135 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [5]} - VerticalPad - 0 - - - - Bounds - {{229.527, 396}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 134 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 []} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{86, 396}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 133 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [1]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{472, 396}, {44, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 132 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 ioStart} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{305.724, 203.5}, {40.5503, 40}} - Class - ShapedGraphic - ID - 131 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{265.174, 204.5}, {40.5503, 40}} - Class - ShapedGraphic - ID - 130 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{71.9998, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 128 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - GradientColor - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{344, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 120 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{226.174, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 121 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{110, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 123 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - GradientColor - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{188.087, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 124 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{149.087, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 125 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{440, 132.5}, {117, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 108 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 IO decomposition} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{445.5, 89}, {105, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 107 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Rearrangement} - VerticalPad - 0 - - Wrap - NO - - - Class - LineGraphic - ID - 103 - Points - - {221.087, 80} - {122.087, 115} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 3 - - - - - Class - LineGraphic - ID - 102 - Points - - {130.99, 78.5} - {245, 118.5} - {246.067, 116.5} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 3 - - - - - Class - Group - Graphics - - - Bounds - {{110, 131}, {39, 39}} - Class - ShapedGraphic - ID - 110 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - FillType - 2 - GradientAngle - 90 - GradientColor - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{344, 131}, {39, 39}} - Class - ShapedGraphic - ID - 111 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{305.087, 131}, {39, 39}} - Class - ShapedGraphic - ID - 112 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{266, 131}, {39, 39}} - Class - ShapedGraphic - ID - 113 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{227.087, 131}, {39, 39}} - Class - ShapedGraphic - ID - 114 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - FillType - 2 - GradientAngle - 90 - GradientColor - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{188.087, 131}, {39, 39}} - Class - ShapedGraphic - ID - 115 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{149.087, 131}, {39, 39}} - Class - ShapedGraphic - ID - 116 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{70.9998, 131}, {39, 39}} - Class - ShapedGraphic - ID - 117 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - FillType - 2 - GradientAngle - 90 - GradientColor - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - ID - 109 - - - Bounds - {{344.309, 342}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 92 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [3]} - VerticalPad - 0 - - - - Bounds - {{228.527, 352}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 91 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [2]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{85, 352}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 90 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [3]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{454.5, 352}, {77, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 89 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 compCount} - VerticalPad - 0 - - Wrap - NO - - - Class - Group - Graphics - - - Bounds - {{363, 286}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 86 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 2} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{228.5, 286}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 87 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 1} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{91.5, 286}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 88 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 0} - VerticalPad - 0 - - Wrap - NO - - - ID - 85 - - - Bounds - {{344.309, 303}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 80 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [6]} - VerticalPad - 0 - - - - Bounds - {{228.527, 313}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 79 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [1]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{85, 313}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 77 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [3]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{458.5, 313}, {69, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 75 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 compStart} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{457, 207}, {74, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 68 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Disk layout} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{439.997, 35.5}, {101, 36}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 57 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Comp\ - decomposition} - VerticalPad - 0 - - Wrap - NO - - - Class - Group - Graphics - - - Bounds - {{342.451, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 82 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 2} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{207.951, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 83 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 1} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{70.9513, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 84 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 0} - VerticalPad - 0 - - Wrap - NO - - - ID - 81 - - - Bounds - {{338.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 47 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{299.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 48 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{221.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 49 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{182.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 50 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{103.951, 34}, {39, 39}} - Class - ShapedGraphic - ID - 51 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{64.9513, 34}, {39, 39}} - Class - ShapedGraphic - ID - 52 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{25.9513, 34}, {39, 39}} - Class - ShapedGraphic - ID - 53 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - GridInfo - - GuidesLocked - NO - GuidesVisible - YES - HPages - 1 - ImageCounter - 3 - KeepToScale - - Layers - - - Lock - NO - Name - Layer 1 - Print - YES - View - YES - - - LayoutInfo - - Animate - NO - AutoLayout - 2 - LineLength - 0.4643835723400116 - circoMinDist - 18 - circoSeparation - 0.0 - layoutEngine - dot - neatoSeparation - 0.0 - twopiSeparation - 0.0 - - LinksVisible - NO - MagnetsVisible - NO - MasterSheets - - ModificationDate - 2009-12-21 10:09:39 -0700 - Modifier - John Dennis - NotesVisible - NO - Orientation - 2 - OriginVisible - NO - OutlineStyle - Basic - PageBreaks - NO - PrintInfo - - NSBottomMargin - - float - 41 - - NSLeftMargin - - float - 18 - - NSPaperSize - - size - {612, 792} - - NSRightMargin - - float - 18 - - NSTopMargin - - float - 18 - - - PrintOnePage - - QuickLookPreview - - JVBERi0xLjMKJcTl8uXrp/Og0MTGCjQgMCBvYmoKPDwgL0xlbmd0aCA1IDAgUiAvRmls - dGVyIC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAG1WE1TIzcQvc+v0HH3wKDW91zDbqqy - l+wGqnJw5UARE0wM3jVQW/n3ea2RNOMZjddQiYHCBvXT61ar+/V8E1/ENyHxZb0TXmux - X4vfxaOQrZPxJTbi/OKJxM2ToPj1dCPOZGvTf4d3CaABwG1cKAWWKtt2lrRwTgrd8Xf6 - /yesue+hLy4jAykuL0CG4ocz/gU+zc1DwdBmgLgEcWeq2FMIAYi8NEMwS4YgqRmjpxfa - jl+FZA0oGzDQsL7AIW6mRCa/QxgoKID7aRgqOzR5aabKAWOqStEIouVd1FGq2SAClfWZ - 6kCUEHI+qa4b4R9HTkszxYypY0hO8VLkpRkiedkg1xTnWkweieQbhSj41nAueXJ8pj9d - CbIpWaw4I9cq23nsLq4exPnP1MqGxNWtePf5o5DvxdW9+HgV071kmFKGEU8BbCIgIpUA - qQqoLXLiBMAmMxwAVRXQdDgnzhvftbbitJI4WEfiTE8hL3YPX98EabGjsdYhuFNM8ef6 - BrC7p83zZve4gG6EJV+hql0LL1wn3BT2w+bpb7G9/mf38ryAqYUhlKDZmWvTkg8G8Zli - Ms/L5+t9HZHItoZUAKzMsD6lkhdnHbJCGtSqGexK/1GlqExog1T0ekCqA2qvW2eNez2g - qwOShLc6ZG+rF2caxOWLY+xbwBYuTcztN3BbujJaaI8iMs+W0Gqr1UK2XOxeHn+QLdqX - JPxvsuXVgKp+uCVbXg04zmeWAl5yEwzCOkQxt0IWBOef1/ub9dfnl+ut2KMso7aybECf - jiWKTdDjYo06v7xDR7lDrV0QFyT67l9ERpNExjElwAea2ZGmgR03xyJRIi1uuKZrZfCH - ftyOu0mUFwyal05B0YnDyRhp6RQjRkApf4jD/bIW00H7KONay/RHMVXVmGasWUyzcPtR - TDO7Qj138xzTuVJw7pS4KiybgWppD2PR68FRly/nkpcWjKwRtEGPGedn/Vx52dQ2ngeh - 6Bf7HL9pfg9nQYp77/gk9P9zEsyrEM6noMX3etZqvqnCB3CDRMYn4yGHiDQUKGrcNopG - PsMmyim+rKzyDKG5oQQSxBTZ3pZaFVhkKN1CWwXYKvjsLMSZ407poaq24i4bNyPjW8B+ - ws99lWTejfutDWgazDTtRrLDtlo1o90IrKxSerwbXCrGUbKn4lFJmYYlL9/6ILETaVw7 - ps47eSkZNQIo3C2obPiGg3XIUl4bejf5b3B725C20Cs+ICqehaWLnKhiezwE2aKJJ6OB - PdqN/4b9t9g/7Ua4H95IMEC8s21/qr0tPDgqkg0qsdM1rWhVSxT9ngrk39bX+/3141/r - h/VCCwQq34GO+go/kd5QvdppkJ7ppV9+XdKMpfbnOpUGzJiluSSbwFcv9iG+DzxXfsdP - 3z0qCVBquZImG+b5KVfzGWYVJ9XzGg5u6Qgj1a+hHZb6xde5Yj8vp8gp8qaAHoxq9dKm - kkmGLybZWa59I5J98GqO8sKMkma+/pJxe0e98Dx0IqVOAsuLK4Bzr51NXnNmGTxHsFJ1 - eLPMVSWTHn5kUrzmy6Oy56MFy5jcZNhE4UHAAY2MOR1Im1EUTYenAJgi5lITdch5h5Iz - DKPIW54dN7syljTTeZQww8axhEVqAq0KzXKBE+gKU0RTG3ChguJYUgc0cF0TyvKEZfNu - taQzTT+V1PGGuakQTCP4ytYBOYLoPdnZ0VSCB0fByjCOYMLa7I5J9RRBFcqxVCM4HXVW - pk5Q5QjWAYcIzgDreNqnCNbxhgjO8MYEa8VzdsO094cPdI7cgrR08jRGfPkXCHw/Pwpl - bmRzdHJlYW0KZW5kb2JqCjUgMCBvYmoKMTM0OAplbmRvYmoKMiAwIG9iago8PCAvVHlw - ZSAvUGFnZSAvUGFyZW50IDMgMCBSIC9SZXNvdXJjZXMgNiAwIFIgL0NvbnRlbnRzIDQg - MCBSIC9NZWRpYUJveCBbMCAwIDU3NiA3MzNdCj4+CmVuZG9iago2IDAgb2JqCjw8IC9Q - cm9jU2V0IFsgL1BERiAvVGV4dCBdIC9Db2xvclNwYWNlIDw8IC9DczIgOCAwIFIgL0Nz - MSA3IDAgUiA+PiAvRm9udCA8PAovRjEuMCA5IDAgUiA+PiAvU2hhZGluZyA8PCAvU2gx - IDEwIDAgUiAvU2gyIDExIDAgUiAvU2gzIDEyIDAgUiA+PiA+PgplbmRvYmoKMTAgMCBv - YmoKPDwgL0NvbG9yU3BhY2UgNyAwIFIgL1NoYWRpbmdUeXBlIDIgL0Nvb3JkcyBbIDIw - IC0yMCAxOS45OTk5OSAyMC4wMDAwMSBdIC9Eb21haW4KWyAwIDEgXSAvRXh0ZW5kIFsg - ZmFsc2UgZmFsc2UgXSAvRnVuY3Rpb24gMTMgMCBSID4+CmVuZG9iagoxMSAwIG9iago8 - PCAvQ29sb3JTcGFjZSA3IDAgUiAvU2hhZGluZ1R5cGUgMiAvQ29vcmRzIFsgMjAgLTIw - IDE5Ljk5OTk5IDIwLjAwMDAxIF0gL0RvbWFpbgpbIDAgMSBdIC9FeHRlbmQgWyBmYWxz - ZSBmYWxzZSBdIC9GdW5jdGlvbiAxNCAwIFIgPj4KZW5kb2JqCjEyIDAgb2JqCjw8IC9D - b2xvclNwYWNlIDcgMCBSIC9TaGFkaW5nVHlwZSAyIC9Db29yZHMgWyAyMCAtMjAgMTku - OTk5OTkgMjAuMDAwMDEgXSAvRG9tYWluClsgMCAxIF0gL0V4dGVuZCBbIGZhbHNlIGZh - bHNlIF0gL0Z1bmN0aW9uIDE1IDAgUiA+PgplbmRvYmoKMTYgMCBvYmoKPDwgL0xlbmd0 - aCAxNyAwIFIgL04gMSAvQWx0ZXJuYXRlIC9EZXZpY2VHcmF5IC9GaWx0ZXIgL0ZsYXRl - RGVjb2RlID4+CnN0cmVhbQp4AYVST0gUURz+zTYShIhBhXiIdwoJlSmsrKDadnVZlW1b - ldKiGGffuqOzM9Ob2TXFkwRdojx1D6JjdOzQoZuXosCsS9cgqSAIPHXo+83s6iiEb3k7 - 3/v9/X7fe0RtnabvOylBVHNDlSulp25OTYuDHylFHdROWKYV+OlicYyx67mSv7vX1mfS - 2LLex7V2+/Y9tZVlYCHqLba3EPohkWYAH5mfKGWAs8Adlq/YPgE8WA6sGvAjogMPmrkw - 09GcdKWyLZFT5qIoKq9iO0mu+/m5xr6LtYmD/lyPZtaOvbPqqtFM1LT3RKG8D65EGc9f - VPZsNRSnDeOcSEMaKfKu1d8rTMcRkSsQSgZSNWS5n2pOnXXgdRi7XbqT4/j2EKU+yWCo - ibXpspkdhX0AdirL7BDwBejxsmIP54F7Yf9bUcOTwCdhP2SHedatH/YXrlPge4Q9NeDO - FK7F8dqKH14tAUP3VCNojHNNxNPXOXOkiO8x1BmY90Y5pgsxd5aqEzeAO2EfWapmCrFd - +67qJe57AnfT4zvRmzkLXKAcSXKxFdkU0DwJWBR9i7BJDjw+zh5V4HeomMAcuYnczSj3 - HtURG2ejUoFWeo1Xxk/jufHF+GVsGM+Afqx213t8/+njFXXXtj48+Y163DmuvZ0bVWFW - cWUL3f/HMoSP2Sc5psHToVlYa9h25A+azEywDCjEfwU+l/qSE1Xc1e7tuEUSzFA+LGwl - uktUbinU6j2DSqwcK9gAdnCSxCxaHLhTa7o5eHfYInpt+U1XsuuG/vr2evva8h5tyqgp - KBPNs0RmlLFbo+TdeNv9ZpERnzg6vue9ilrJ/klFED+FOVoq8hRV9FZQ1sRvZw5+G7Z+ - XD+l5/VB/TwJPa2f0a/ooxG+DHRJz8JzUR+jSfCwaSHiEqCKgzPUTlRjjQPiKfHytFtk - kf0PQBn9ZgplbmRzdHJlYW0KZW5kb2JqCjE3IDAgb2JqCjcwNAplbmRvYmoKOCAwIG9i - agpbIC9JQ0NCYXNlZCAxNiAwIFIgXQplbmRvYmoKMTggMCBvYmoKPDwgL0xlbmd0aCAx - OSAwIFIgL04gMyAvQWx0ZXJuYXRlIC9EZXZpY2VSR0IgL0ZpbHRlciAvRmxhdGVEZWNv - ZGUgPj4Kc3RyZWFtCngBhZRNSBRhGMf/s40EsQbRlwjF0MEkVCYLUgLT9StTtmXVTAli - nX13nRxnp5ndLUUihOiYdYwuVkSHiE7hoUOnOkQEmXWJoKNFEAVeIrb/O5O7Y1S+MDO/ - eZ7/+3y9wwBVj1KOY0U0YMrOu8nemHZ6dEzb/BpVqEYUXCnDczoSiQGfqZXP9Wv1LRRp - WWqUsdb7NnyrdpkQUDQqd2QDPix5PODjki/knTw1ZyQbE6k02SE3uEPJTvIt8tZsiMdD - nBaeAVS1U5MzHJdxIjvILUUjK2M+IOt22rTJ76U97RlT1LDfyDc5C9q48v1A2x5g04uK - bcwDHtwDdtdVbPU1wM4RYPFQxfY96c9H2fXKyxxq9sMp0Rhr+lAqfa8DNt8Afl4vlX7c - LpV+3mEO1vHUMgpu0deyMOUlENQb7Gb85Br9i4OefFULsMA5jmwB+q8ANz8C+x8C2x8D - iWpgqBWRy2w3uPLiIucCdOacadfMTuS1Zl0/onXwaIXWZxtNDVrKsjTf5Wmu8IRbFOkm - TFkFztlf23iPCnt4kE/2F7kkvO7frMylU12cJZrY1qe06OomN5DvZ8yePnI9r/cZt2c4 - YOWAme8bCjhyyrbiPBepidTY4/GTZMZXVCcfk/OQPOcVB2VM334udSJBrqU9OZnrl5pd - 3Ns+MzHEM5KsWDMTnfHf/MYtJGXefdTcdSz/m2dtkWcYhQUBEzbvNjQk0YsYGuHARQ4Z - ekwqTFqlX9BqwsPkX5UWEuVdFhW9WOGeFX/PeRS4W8Y/hVgccw3lCJr+Tv+iL+sL+l39 - 83xtob7imXPPmsara18ZV2aW1ci4QY0yvqwpiG+w2g56LWRpneIV9OSV9Y3h6jL2fG3Z - o8kc4mp8NdSlCGVqxDjjya5l90WyxTfh51vL9q/pUft89klNJdeyunhmKfp8NlwNa/+z - q2DSsqvw5I2QLjxroe5VD6p9aovaCk09prarbWoX346qA+Udw5yViQus22X1KfZgY5re - yklXZovg38Ivhv+lXmEL1zQ0+Q9NuLmMaQnfEdw2cIeU/8NfswMN3gplbmRzdHJlYW0K - ZW5kb2JqCjE5IDAgb2JqCjc5MgplbmRvYmoKNyAwIG9iagpbIC9JQ0NCYXNlZCAxOCAw - IFIgXQplbmRvYmoKMTUgMCBvYmoKPDwgL0xlbmd0aCAyMCAwIFIgL0Z1bmN0aW9uVHlw - ZSAwIC9CaXRzUGVyU2FtcGxlIDggL1NpemUgWyAxMzY1IF0gL0RvbWFpbgpbIDAgMSBd - IC9SYW5nZSBbIDAgMSAwIDEgMCAxIF0gL0ZpbHRlciAvRmxhdGVEZWNvZGUgPj4Kc3Ry - ZWFtCngBdcJxdEhlGMBhZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZpYSpZQopURIiVJK - lFKilBKllCilRCklSimllCilRPk53z33j+533vc5D3V1/P8VdZgThZMmIqxlUi1X6q+q - xZwgnDwBew2TnVfXIJ5Sg3s8U7ynjkdYzdRqrtFfW405TjhtHMIqplVxnf76Ksyxwulj - EVYyvZIb9DdWYo4RzhiDvYIZzpsqEM+swD2amd6zRiMsZ1Y5N+tvKcccJZw9CmEZs8u4 - VX9bGeZI4ZyRCEuZU8rt+jtKMUcI547AXsLcEu7UzyvBPZx53vOHIyxmfjF36RcUYw4T - LhyGsIiFRSzS312EOVS4eCjCQhYXco/+3kLMIcIlQ7AXsKSA+/RLC3APZqn3ssEI81mW - z/36B/IxBwmXD0KYx/I8HtQ/lIc5ULhiIPZcVjgfzkX7SC7mAOHKAdhzWJnDo/pVObj7 - s8p7dX+E2azO5jH949mY/YRr+iHMYk0WT+ifzMLsK1zbF3sma51PZaJ9OhOzj3BdH+wZ - rMvgGf36DNy9We+9oTfCdDak86z+uXTMXsKNvRCmsTGN5/UvpGH2FG7qiT2VTc4XU9G+ - lIrZw765B8IUNqfwsn5LCu7ubPHe2h1hMluTeUX/ajJmN+G2bgiT2JbEa/rXkzC7Crd3 - xZ7IducbiWjfTMTsYt/RBWECOxJ4S78zAXdndnrv6owwnl3xvK1/Jx6zk3B3J4Rx7I7j - Xf17cZgdhXs6Yo9lj/P9WLQfxGJ2sO/tgDCGvTF8qN8Xg7s9+7z3t0cYzf5oPtJ/HI3Z - TnigHcIoDkTxif7TKMy2woNtsUdy0PlZJNrPIzHb2A+1QRjBoQi+0B+OwN2aw95HWiMM - 50g4X+q/CsdsJTzaCmEYR8P4Wv9NGGZL4bGW2EM55vw2FO13oZgt7MdbIAzheAjf638I - wWwuPNEcYTAngvlR/1MwZjPhyWYIgzgZxM/6X4IwmwpPNcUeyCnnr4GITwfibsJp7zNN - EAZwJoDf9L8HYDYWnm2M0J+z/vyh/9Mfs5HwXCOEfpzz4y/9336YDYXnG2L35bzzH1/E - F3xxN+CC98UGCH246MO/+v98MOsLL9VHWI9L9cDzMjQNF/cKZW5kc3RyZWFtCmVuZG9i - agoyMCAwIG9iago4NDcKZW5kb2JqCjEzIDAgb2JqCjw8IC9MZW5ndGggMjEgMCBSIC9G - dW5jdGlvblR5cGUgMCAvQml0c1BlclNhbXBsZSA4IC9TaXplIFsgMTM2NSBdIC9Eb21h - aW4KWyAwIDEgXSAvUmFuZ2UgWyAwIDEgMCAxIDAgMSBdIC9GaWx0ZXIgL0ZsYXRlRGVj - b2RlID4+CnN0cmVhbQp4AXXCcXRIZRjAYWZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmaW - EqWUKKVESIlSSpRSopQSpZQopUQpJUoppZQopUT5Od8994/ud973OQ91dfz/FXWYE4WT - JiKsZVItV+qvqsWcIJw8AXsNk51X1yCeUoN7PFO8p45HWM3Uaq7RX1uNOU44bRzCKqZV - cZ3++irMscLpYxFWMr2SG/Q3VmKOEc4Yg72CGc6bKhDPrMA9mpnes0YjLGdWOTfrbynH - HCWcPQphGbPLuFV/WxnmSOGckQhLmVPK7fo7SjFHCOeOwF7C3BLu1M8rwT2ced7zhyMs - Zn4xd+kXFGMOEy4chrCIhUUs0t9dhDlUuHgowkIWF3KP/t5CzCHCJUOwF7CkgPv0Swtw - D2ap97LBCPNZls/9+gfyMQcJlw9CmMfyPB7UP5SHOVC4YiD2XFY4H85F+0gu5gDhygHY - c1iZw6P6VTm4+7PKe3V/hNmszuYx/ePZmP2Ea/ohzGJNFk/on8zC7Ctc2xd7JmudT2Wi - fToTs49wXR/sGazL4Bn9+gzcvVnvvaE3wnQ2pPOs/rl0zF7Cjb0QprExjef1L6Rh9hRu - 6ok9lU3OF1PRvpSK2cO+uQfCFDan8LJ+Swru7mzx3todYTJbk3lF/2oyZjfhtm4Ik9iW - xGv615Mwuwq3d8WeyHbnG4lo30zE7GLf0QVhAjsSeEu/MwF3Z3Z67+qMMJ5d8bytfyce - s5NwdyeEceyO4139e3GYHYV7OmKPZY/z/Vi0H8RidrDv7YAwhr0xfKjfF4O7Pfu897dH - GM3+aD7SfxyN2U54oB3CKA5E8Yn+0yjMtsKDbbFHctD5WSTazyMx29gPtUEYwaEIvtAf - jsDdmsPeR1ojDOdIOF/qvwrHbCU82gphGEfD+Fr/TRhmS+GxlthDOeb8NhTtd6GYLezH - WyAM4XgI3+t/CMFsLjzRHGEwJ4L5Uf9TMGYz4clmCIM4GcTP+l+CMJsKTzXFHsgp56+B - iE8H4m7Cae8zTRAGcCaA3/S/B2A2Fp5tjNCfs/78of/TH7OR8FwjhH6c8+Mv/d9+mA2F - 5xti9+W88x9fxBd8cTfggvfFBgh9uOjDv/r/fDDrCy/VR1iPS/XA8zI0DRf3CmVuZHN0 - cmVhbQplbmRvYmoKMjEgMCBvYmoKODQ3CmVuZG9iagoxNCAwIG9iago8PCAvTGVuZ3Ro - IDIyIDAgUiAvRnVuY3Rpb25UeXBlIDAgL0JpdHNQZXJTYW1wbGUgOCAvU2l6ZSBbIDEz - NjUgXSAvRG9tYWluClsgMCAxIF0gL1JhbmdlIFsgMCAxIDAgMSAwIDEgXSAvRmlsdGVy - IC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAFlwglYyGccwPHZ7LDTZjaz2Wkzm9m92cxm - dm82s5ndm81QOSIliVwhJVLkihQSkahEjkJKpChHRK6IRKXkiL57/v/nfZ73/fX/PB+4 - BbURNKoXb61Hv8ltcuObqDdobL/9BuY7bnBHnX5nHeJ17pKbXEe9RpNr3O14z1XM915F - v8J9jvdfwVqrPlCLuWktTS+LD15Gr+EhuVkNajXN7A9XY25eTfNL4iOX0Kt4VG5RhVpJ - C/tjlZhbVtKyQn+8AvEiT8itLqJeoNUFnnR8qhzz0+Xo53nG8dnzWMvU58owty6j9Tnx - +XPoZ3lBbnMWtZQ29hdLMbctpe0Z/aUziKd5WW53GrWEdvZXSjC3L6H9Kf3VU4gneU1+ - /STWE+obJ2jwzeOY3zqOfoy3Hd85hrVYfbcYc4diOhwV3zuKfoT35Y5HUIvoaP+gCHOn - Ijod1j88jHiIj+TOh1AL6Wz/uBBzl0K6HNQ/OYh4gE/lzw5g3a9+vp8Gv9iH+ct96AV8 - 5fh1AdZ89Zt8zF3z6bpX/HYv+h6+k7vtQc2jm/37PMzd8+ieq/+Qi7ibH+Ueu1Fz6JHD - T449c+i5S/95F+JOfpF/3Yk1W/0tmwZ/34H5jx3oWfzp+FcW1kz170zMvTLptV38Zzt6 - Bv/KvTNQt9Hb/t82zH220Wer3ncr4hb6yS5bUNNxScfV0S0dtzS9fxriZgbIAzdj3aQO - 2kSD7hsxD96IvoEhjh4bsKaqQ1Mxe6biuV70Wo++jmGy9zrUFLztw1Mw+6Tgs1YfsRYx - GV95ZDJqEiOTGOXol4Rfoj46EfOYNTQ4dg3W1eq41TQ4PgGzfwL6KiY4TlyFNV6dFI85 - IJ6AleLklegrCJSDVqDGEWSfEoc5OI7g5frU5YjLmCaHLEONJSSW6Y6hsYQu1cOWoscw - w3FmDNYlavgSGpy1GPPsxeiLmOM4dxHWaHVeNOaIaCKixPlR6AtZIEcuRI0k0r4wEnNU - JFEL9OgFiPNZJC+ejxrB4giWOMZEEDNPXzoPfS6xjsvmYp2jLp9Dg3GzMa+YjT6LlY7x - s7CGq6vCMSeEkzBTXD0TfQZr5MQZqGEk2pPCMCeHkRyqrw1FnE6KvG46agjrQljvmBpC - 6jR9wzT0qWx03DQVa7C6ORhzWjBpU8T0KehBbHHcGoQ1UN0WiDkjkIzJ4vbJ6AFkylkB - qJPIsu+YhDl7EtkT9Z0TESewS86ZgOpPjj+7HXP9yR2v541HH8cex73jsI5V88diLhhL - wRhx3xj00ex3PDAaq5960A9zoR+Fo8RDo9BHclguGonqS5H9iC/mo74cHaEXj0D04Zh8 - 3Ad1OMeHc8LxpDfmU97owyhxPD0Mq5d6xgtzqRelnuJZT/ShnJPLhqJ6UGY/74G53IPy - IeKFIeiDuShXDEZ1p8Je6Y65yp2qQfqlQYgDqZZrBqIOoGYAlx1r+2O+0h/djauO19yw - uqrXXTHXuVLnIt5wQe/HTbm+H2pf6u30xfo/HQmv7AplbmRzdHJlYW0KZW5kb2JqCjIy - IDAgb2JqCjExMjkKZW5kb2JqCjMgMCBvYmoKPDwgL1R5cGUgL1BhZ2VzIC9NZWRpYUJv - eCBbMCAwIDU3NiA3MzNdIC9Db3VudCAxIC9LaWRzIFsgMiAwIFIgXSA+PgplbmRvYmoK - MjMgMCBvYmoKPDwgL1R5cGUgL0NhdGFsb2cgL1BhZ2VzIDMgMCBSID4+CmVuZG9iagoy - NCAwIG9iago8PCAvTGVuZ3RoIDI1IDAgUiAvTGVuZ3RoMSAxMzE5MiAvRmlsdGVyIC9G - bGF0ZURlY29kZSA+PgpzdHJlYW0KeAG9enl8VcX598xZ7jl33/f15OZu2RcSEhLIJWRj - FQEhQYIJEHZkMUSggEFBICJVkBDBDRdWMSGgBBBLEQxYq7iASrW1Fay/tiltf2AVuee+ - z5wbIvT17esf/fScO+uZO2fmO8882xmEEUIq1IxoFJ06r34B/gc+BzXvQHh7alOj79H/ - GfQMQrgNIXru9AUz5um/ePNthFgGIYVqxtyl0+MX/tqOkFaHkOfEzIb6aVd5/AFC6aSP - /JlQoUjiihHK4KGcPHNe45Ktk9VHoZwG5cfmzp9a/4juoX5Qfh3Kw+bVL1nAr1N8h1Am - ae+7t35ew8RVP1sI5TCUkxbMv6+RfoyB9pkjoDx7waKGBa8/dG82lLtgfO9BHYabXCok - Q29A6kMTe2uk6h+NKJg7g1j4B4fIe3/8kiMFUvY+Ut3SRI00UNIiQOC2Sw8lQ2+NEVIT - MiMLpFYINgh25EBO5EJu5IGSF0YqoCTIkcsPCJ9AOvY4CrPNyMFkwnMU/xTCRZKKd8W/ - YruRTpwX/ztdBO2PkECJJcXoBHoUbUftMJfdkA+jyagNncWz0RE8CR1CF7AHZcBaM6gL - jUDv4Hj8fTQdvQjtG9FJtAUdANzCaB6MdATaiAPxZVCOQn4KWh1/HiWjAvQwOo4KodeN - qCe+J34Qno5Bd6G9aB/8/1fYTx1gjPFX4pcAxzuhz9Xw5P34iHg7IJGGStFoqF2N3sAB - +mJ8JqBQBKN7Cj2LdqBfor/gB/Gh+Mx4U/xc/PeIgqcuNBbuFfgQ/j3dzjwcfyr+p7gI - SIRRCry1Dm1GL0D/7XCfgGUvx3NwI96Mt1BR6kHqELOGtYoxwCGCKuGuQvPROkDgCDqF - /oG+w1coG62jG+nT8bz4/8K6DodZkpk0oCa418K9EeZ0DMtwFh6CR+MV+Am8BX9IpVB3 - UdXU/dQS6it6FD2JXkp/yNzHdLIb2DaZUrwWPxbvjp+HNXaju9EitBJmdxKdQ1fRdUxD - Xy4cwEW4FE+Guxlvp47gHfgINRqfwOeovfh3+Et8BX9PsZSKMlOpVCO1mdpHnaTepWfR - W+gn6d/R15hBLMXuYC/LAtxvxCnievHdeFH89/FvgYJ5oJ9CwHgUugfVw2wXoH7oAZjF - frjbYdVOodPorHR/iV2oB30LKCBswA6cg0fCPQrfgafjWfgZfBTuN6SxfEPBQlBySk9Z - KRc1lppCzaOaqfNUM+2kU+hh9ES6He4z9AX6e/p7hmWMjJmpZIaiDcw8ZhvcO5ndTCfz - HlvIDmJHsePZZnY9u4Geyr7PXpCtlG2UdcquyP7GhbkR3HxuA6zOWaDZX5IN0HcxOBlG - n4PuRVNxGZ6CWmE1duB61ALUNQ2vA7wWoHC8ll5JV1JZQA1voJ8BtW5DK9B6ehLaEf+E - 3os+BkqZCz02o11MKXKzW2F1HkRZQEW9dzSSEgmHgoFkf5Lg83rcLqfDbrNazCajQa9T - q5QKOc/JWIamMEor91fU+TqCdR1M0F9VlU7K/nqoqL+loq7DB1UVt7fp8JH/1cOj21pG - oeX0f2kZTbSM9rXEOl8xKk5P85X7fR2/LvP7uvDEO6sh/2iZv8bX0SPlR0r5x6S8GvKC - AH/wldtmlvk6cJ2vvKOiaWZLeV1Zeho+EgU4FOlphHFEkZJ03IGG1K+YaYOEtCjvcPjL - yjvsfsjDMzpQXj+tY/Sd1eVlTkGogTqoGlMN70hPm9UB40SPqKb5pz3SFUVT6kiuflJ1 - B11f00HVkb70qR1Wf1mHddll2w/Fm7nyDbc87KACFfUNLRUd0bpHAFxSrCOl+g1QGj7W - B91Sa2qqO/Ca3kGQMc6GkZLhNvjLybjqZvs65P5S/8yW2XUALhpT3emIOsr99WU1HWh0 - dac9apcK6WlHbCuLBJj9kfTB6YNJWiTYVibSPz6UqP/gBEltK099AenwMX0AYPIm/1AY - Z4dvqvQSPwy2gEQNBahlagHgBFcNhmnOgvEM6aCAZuhABxsYWt/RPPbmMGaWJQZXN7us - U253kDnUldZA+7oW3QBYKWiv8/tariFYQn/PX26vqe+tkQV01xB5SBa6j1Y6cP3NfJME - DMx6ps0/k6xvk7SmUPbbym+pgDKBhoy5w9SRM3x0tdDhq4GKLpSaNrwLyUdXH8B4Y00X - jq/pQmXuI0iO6Hsmw+M0QmqzyuD9UEhPg4oUAXIZab4KmHUFoRVfi69l6LQWX4VvJhAT - E5BSeNDQUpMJCI6tBpzQOHhjtMbZl22oqRkA/WSSfuAv0LylBnqY3dsDpFJVZgwaZaUN - h1UJjq6+s7qjuczZES2rgVUA8j0xurrjBFBuTQ20yu4bKYx4xSxb75hzYMzZKfA8N9HL - WOgDuqhpaSF9jq32Cx0nWlqcLWS/JcpdGP1rRbS3oguRJjDx8i7cPBr+C4lfcJIKv+AX - YFg1BNN+QNI3KaoL5f17hPP7xg3/7A+jzZcQLvgPIVz4UxAe8JMQLuob6W0IF8OYiwjC - A/97CA+6DeGSf49wtG/cMMjBMNqohHDpfwjhIT8F4bKfhHB530hvQ7gCxlxOEK787yFc - dRvCQ/89wsP6xg2DHA6jHSYhPOI/hPDIn4LwqJ+E8B19I70N4dEw5jsIwnf+9xAecxvC - Y/89wuP6xg2DvAtGO05CePx/COEJPwXh6p+EcE3fSG9DeCKMuYYgfPd/D+FJtyAMCm8p - Qsw5sL1osAdLutDY1C7Eg4Eph8DrwNA8B4GUIU9/1oUYCAjy3GfoKPwDofGpR6EXFtKs - 7Fy9oA9BKGU2dt34A3v8+pAuZuT3B6EVhaLxTxkX2wa2owstjFrXsriCN+dpWVcepzYU - 0PNtBUpPpVvXdMr2UU+sB5X0lPRkZw1ZGu2HnOogDjiC8gAbtGhsYbAtDWHs5CGnk0HO - qjKHsZGCyK5whZGegSgVLkwi6VqFapHVotdxlOALBfX9+hsEQ76+H+VPovQmqyWXji6v - m7BS/IMorpxV0oTzWnYu2f/s5syqV9i2ywfEd8TPfiH+9YtjuOhqO664fvlbPOYqLhLP - i5//Zs2vYGpgcZ+CCZ5nN4FV4j/A4y6cG1UxDKdiuFYWKSrlZFKnzscKUUnJ1V9nZxnz - BuH+uXq//tSb24IbT9DftBhrdl6/l/5G6isK6+FhnwbreGd0VD5TwUxg57jv9SzzrMZr - KT6Fn2ifY19uX+561c6iJKxlXBq7wLnsDEasV6tNMiryjKzPu1hIUgkPcAWW+UmakHaV - tyApudKfAPdqj+5azyVUUhwrLunRGwozDdZCDKmhsFAPEaqVYHcxdlVAH1QaNGEkN3EA - LqPWKcKYN0ME+Op0Er4Abb6hBOf3z8/rF/QncTLOD3khx2A2cTItlkGFYBaGrfnliVX9 - xrSuOFIZZA7TpYtx+Jsvl1a8un5KwTQHrbkROYINC+YPzxs7Z8XmDcPXHGs6J37zwsvL - KhtG5GdPmL1XwiUb6MfBbkPZ6FTUO1Q1Nr0hMjV9cWRxuqw1iIfzqQpbqklNf5dtylOD - weCPmvR5ugfU6mxnXjLL5WWrba2hMn0XHhbVKgoy5lPeiG8VHaJyK3NuQaXnaoLwAJSr - sa90PTqCD8FGgiQ/M8seRHI26A4kBWWIDiOG5rMADpffG0aOgC2MGcwBXJkQeQQnYBaE - qI8YdcWEGletAsxwLUPl5VqA9nISwMm4PA/OzbkFxn4ERrDKAEEPNpuQH1suv64KVxze - +PKrOwwBoytoaRi8qK3hUHmQ7Yzei82/+VtlWsXCB8R/fBvC1jOPlCxsW/JEE8bP0pSv - 4LE5jUtKlz234MybR1aPyXV7DzT/WhQBVtiX4F9iMtmnIKdGk6JJckrBqzFFvWGQyThK - hlmOB1ueU1CLlewVWsUxdBe2vopb1fzLii5cfZDVVmokBK9dLY4BVZVAUqwvlFAD4ArX - ZqQyK3SntdlZWC/HeiEP5+pzzX499ZKYh9+NbaAea/vwQ3ADrI/dL7J4cge98cY9T4vP - k7FhVBr/DHhGM3iLjkVTqwzrvFShqsI4wTjDyAzgVWoOqRRajWaxwWg0aLQ+g5FDRqvC - mgcDS4o61A9oNG7DAC3D5Pm63Wo9V+CYjwp8SZVCYsWv9ZwCLtNTEoPVvnT15kqTbQBj - hiGjxNLD2tuADYVtXiyngrQHHDIYeX2sC/aE3AYR9jJhJHNCxNsTe4OwHl0xWW6y1rXG - 29Y5ZIQNQcMmyc1hzCZKSEoOxQwrouOe23a4uXZN5lPzqK9jzw7MSR896zQ2fC/2tIv/ - q8PzthV53lne+mJVVE7Tr4iLgkZBfPNX4tun35HWcGT8N4yffQY8bCG0J1p4vwNb+QAf - slfbH0Zr8To5V8krhJCQp9GY6G4uz8mG8mCvRKhVngL9fKuCKlYkZ1sjlWEJmFjh8uFj - lizLtAGb6N0PPQARASjBkANBl09rQTI26NN6wjhoTg4jlxFyZE9ghvbqhDAOWEJh5DZA - RPaExCtwYgOQHbAK1wJPtpj9wRBwDeoHOPxJSK+T+LPET2RmE7DnyuOdOv/g1Vs7FYMm - j599CKvEP58VPxu8Ao9Y9ejKnY3tzz7KPvPd6ruyJor/I964Oz381aU3xQ9xNrh3lEfx - tOuf/+LBe7u3bV9H/IYUmhi/yC5kL4NXxIMORIuc7FbcytJeWMUH8Vp2vZEdy9MPu/V6 - s2yAm1YNMMs9lMdjp7OpIl223uGTZ9vtXt8OYfZ0W2rqqKsje0bpvhkJOAFCQPwgtCCj - S0iuAchlDRiDmoAzqLTIc5DapMvBBr1Wx7mgxCI6B2OKoRU2VQ7SGiDiHbIcYCEQEUYB - cBHAErHEOYBt8Njqz8ASSgbCLvrnAouVpJoO+IWf8eB++pPC6c5PxWt/v/LZfQM9Jx2b - 2sWP4+iVyy8fxZVh9rJ48djGneJ74mlRFH+xp+bxr58+vv3X+GVcfu4PEj4vgTd2KugD - avBJzoh61+pbDVQOr/RoKeSx8ny20eFQBzR2u+OC0LQ+gUFMwgCVxEpiEoUEsUUfMAdl - HMsxHM1RHCtT6HiYrQUiuUGZgzkTeLkkEZ0Cm6Q2QGZCZIiO8gt6WvCBtDZxVART5xoG - Nw4rcmg//bv47BlqLM7ctaV6u/hwrH2vOTS/5pGxlViPM75vY40fnxTf/9NxsVOaA/jo - mR6YgxI8uqOiyZyHYZS0B4Nzj/colLyKUqkoJJtFFckdGpoPILta04WVB4UtNydUTGZ0 - 9RKQPFlVIimLycLC9Mju1fcG3M5k3thMp944Ty///iTlZY8fEkv3ipp2eDVcGDx8CG+C - Ao2s4FMARQlDdUYq+KOJSkVlZmUbQQ84e/YsUZWg1ej4efZroE2t5CtviaatBcd3N36T - OsOfVciG8OYBWto5gJO7KJdLacimHR5bttLu9nzyL+TYR4zScuQgB9GgevWnHKI/5WAH - b8sh+lMO0Z9yiP6UA/qTMwf0J4gk+iMRuYCH3ao+ke2JDHk6RKjQZBBoZvuxTbtOiVvE - /Sf3P/EGuKWdfxb//udL4hf/xGYNe/n6m+I58fDFOPriEzwMp3yEddefx0uvgYu4WOwW - 37sqHmAnw74ka/YtYKWA8dVH82apZhmWqpYZmCpTtWmmaZmJ4XiPXqdTYI2WrKSCp2QG - FSM3mbIZh0Urh0U0W35kEWN6UG8Sa6gDWGApMSg4tUZJtsqA2vwIWBAsKQjidmrLqb9d - +K2Y0003Lym9T2zEGx7exR7//MzL8dhm5sgAr0gveozwkEOwR5bAWOXAb5+IGjj1UFzF - 1uBqdhY7zbSE5S3HwJFuR07sipb6BV+wzrDQsNhEGzxek8tMCx6LiQkakgMeJJc7OY+S - CrqcvC9g9gYsdLZ2ltMR4YOBkMIejlwQtiT4TPHIWIImez6CG5S3YkluwXQK9Qn1jQiu - WiDYVJC0tZioFdK8aCGH+HaJEuHFIIisZuAfmZjwXZg7XbnhhUUDp4uObmr37nnvzZsy - fgLL0UpDxlWFilFx0wqXiUXdtGvBpqcLPaKC2pE9ObZ6d65/UfPpcZEKk2AsHn/tsWxn - rAUwqYufZ74B2s0EX7MYnRzRhvzBYL4mT6gMTgku09yfLJ/D2zTWAFWjmanZm0QrNAOS - kpMUNOOyPWzKzEx1DTDRzIBUeRal0PD65CRvOCtLbwtYh/KBsCPHG9APRYFMe3bOc8Ls - 3l0KXPcH5msAnYOEW5gwWfmMWG7tQmkXjAxn6L2Ip4JUMD0gA3uCTkOpKD1DStgUPhW7 - jd5U5DTbUrHdhtOZVCQPKVNxQIkzIM9FIPIYXPDQApG0Q3Q6iUuTPdIr2nq1O0ngE10u - FJSgzuuXTKR9QtqBTLNapLUwmxg/GCT9MfZw/aZeXzCpc/iI57vfvHMDCP4/4iHHtNl3 - X+zYNrHo3Ltb7twgPv1n8a/bt9PUSHxxxahNvkHPLcnNCaSn5U06/Jb4u2tNJfc9MWVu - ji8rM6loxqmrH2x45K8MfMDDqFmcB+ZZN9hqeVEX7UI6xsUZ5Farg8XVNOPjkJ2X/02Y - myuxckJmV4tHlTeUfYVKRkr8HBieHzQ2oRk3X7ggNrPdm797ZzP0mynOw+1Sv4OiPsRh - inaxjA71di7zUbga0VLfxct7+wbV8IeuyU6E9QKOCiwwDzZg5vnzuFlsjiNZv83f7gRG - SaMxoP+RLyxa+HZWjD6PFqRkYYVO6VS5QrlVulny2TqukDeo5LQzh0uWu3Uqd1EqlREp - OlxEFeWkBAw6juVdoSSrqwu3RP1Wt5cLuTOUlDtPWcwVF7tMXCRld7JjkDPiGqYNFdgH - Dnodb4UPS0dwK+qV7gkSuxQ71cdSQWU0FBLSIpstoyejh9gHsAclIgvn9zcnIWwP4Hyt - gGwep4AsPpOAhSTUnxKQw20VsFmAiNBPrz7UaxAkgy3QP38g1mDJbDLfZlMNAtsAtrEe - tKEceIUGNIBQMESSYF6//P5GrFk06p6aVmFmzrwp2WPxoUFm1UPLHi0SFLvZf75wvGmx - NaDy6FPSgrUpFnn/d5dvOX50a8t7E9OG7nzc7JJp1K7MGXgun2ZLnzR2RMrYt7ZXVbXF - trqSaHqNSlbqj1bNfnXdlheN+BLhffAFjj7HjJK+5+6KZu6y4zbbbn6vjR7G67ebaNok - czs4tdukdHJOp1UXMmAws/QOtyJktbvcXZg7KCxa0YsubN/ikT2FhT+mPfUDwgyozIog - 0hh1wYTeZIcS6E2CpDcpLeog6E0QyW2yINGbhB/RmyRjC1kSWhMn6ZOAYC6BjgIZlstR - F760tusWrXx5WNa6TQsesrd7/nbsg+vY8JGLGdXx8dSHds97bsdn6+8/fxrnfgWfDwew - gEFB/CLdw54E/cKN7o/m9NdUaiZodjF7nGyAN1Fatw7xbjdnVFBuq5LNMGboInqDw6sM - Oewe71phUemt049dAquph6iOerC/JSpy2FxyBcLYpoS5uSBCdiqIFE4+CBOEn0Qxhh9s - RzOIZ6uebCMyLZTXz5D7zaYdK3bsXLZuD24ZmzVw//MlL88/KF6/8lt8z9cfn/3Vm+fe - pvr38wyn3NcHbZlajdOv/wlPgP1WFb/IOGC/ueDrdwCroku38k86dnlpVkNpWZNZY9Ca - TVFV1MRHHHi48jW6G79Fdzs/4T+VX/B+4v/a+rVf2a3vNlCTeFZI1m6zuJMLZRxnEdwu - TuG2KAPcVtcu12HXxy4mYNEGXKxdoeL04J9wh1hHKDmDC9ntwdBHws7aBECxSxJ//ygm - +SZAvsEmrO1j8MBAJPtcgqwC+RmWhs/FmGVk3qBeZ9AZdSYdI1MFkpzJQbAg3UHsccut - XBApzZogVmv8DgGqWIh4G9AVeDcAaMLLpX0pMfSU1JRVeGEtWlgLxgpgbDELCSudEBAY - 5jJJEUe5kjhNkgETPHShIN+gu3GFfWzro+OyTAe4O7LHLB085oz4J2z7A/Yqw8P2L9/N - Yj9TOeeuO+cOe/6F07X5lUWPZ4x26bAfvplTuFQMLq548GAL/ozokDQaKBbRX8OaeFE6 - nGY4HB2ZbxrKD5VX8zXydao9zt3uPaGdqUecyihPW5IimlOKJGBzjCzitisMboU2g8vI - YF10hiUjPcI6slSakHpQMOSyZ2bdQohXewoJ0rFL13o1XrBjgCIleBMkmeYPOzxKfXJA - F/R7gkEUdkCkV2oEpNWo1AF3UhCHnBHYjypQDnuZ2w/mnqRAWvNyQamXCUnBUMLz0T9f - 4mDJetiGSHIeEWuP+EAwtXxybt7O4gXi2f1/0RxWhwY+9F40SOe3rXhF/B5zR3HZiw+8 - URHYvPzkHWni+0zpIP+QtTdy3mm6uP2lqlDxpvGfjxn9T+zGapwh7jjRec+2V4+3T11N - pQOeFJzIQLhI0sk5lB91cZcZEGYyWkFEIuAf4WgiEfcKUxK7tHjkqVjxqV6xVVJcMhJs - OyKxiBNj9WG4mJTvL7DHiR0OshYWjOiwSrQrOq2GwgN4bKeAUKyyCewMdqlsCbeWPUKf - pS/SCpaV8Twnp6nV1BPUixRNFRrkcoaFD++yeQaOg2fwCZ6VyXmWmIigG9MyBSdTyBxq - 8NBEkNKuUncKU45gS0JSEcFdbB+l+8om6YbFJURCYQhrR2ak8it0v2TWZthSa9kVuhM6 - vpgvJkoikPUimArOlQPpcXp/83787lfidHzgK7Fz6372+I19uFucH5tCuVrEe6X5rYdJ - DpSwi0TNMFcFC6ABZiDoGfYWyEBXTagQJQmw1h86JFk4BCPAXxZgKlEQrYkWcTynkWmt - vFVj1Yb4ELCCKvt45Qylyh9QONx+u4JirAHBbXWrZRz4WFwB2qgIwzv1EVMXxp2OCAgU - HAVemREA4rOHwl1YffCHpYtd0l3tuXpT6bAWgyk3sgccGTcdntlZqNaYa5acMbC9b0pZ - v574MmVmWGWiREu51Z3RfjULm0elJRc/3/DJqJRjc0bOfvKwI7Jg+q5DTGbbHckDS5Ir - xo99atzGWH/q6zmjN+6MPU4dm5cz/Jn3Ymd66Y7ugX1MTkJNjmYflnXLKEZmkoVMTbJG - jjWpKJNN52ZhmjalwsE5HEgVkTtcOMMWsSO7E1QY2W0zk1hjQiuBefUQlxtx0BDfIkzp - lqmQGQCv0mCYD169b8TemZdGpx12Z62MRoYVpDsP4V0w/sljnp3wfOxO6oUpxdPUltK8 - hbNi78Fggf8Ugd9VAHmvAn+AHT0WzW3jW3VPWl5idvM7dXssXfwZ/mPmsuZ/TKoBvMxt - 41Rug9LO2e1mKqR1OOUhs93h7MJykPq9XD2htPfpUxL7TkNWJqg0yoED66kg5qyQY9WQ - U5hUQYR1EPEWEPK0BiKJR5OIWKbJBsnQgTUC3mEA5xF41FBCsH+xJmvE0ZdaW1+Ag2A3 - xH9+Lt7Ahj/KGrF2Z+vkJ2507rtEXxT/Il4VY+IrOPUGKF5RItubxLuYAExdAx74xmja - Hn6XlQrzPpdeI3ObOa1M43YpkzRUyOZIVmToMoRIktbuT14rHE9MDzbipYTYkgQVWZhe - v5nL4kSsI8gEkRMmxlogwnZNENFWaU7StIi/MDlhusGaEUMhF/fSJxzOIfIG1D69n3pr - V6Di6LHyAMRiRnt+9O6fvSYebty2dExW0aGlH37QPOnAsWnblk/YSR/YODRcDG6xmPh8 - 6z15nqGxz8kehH1MbYI9qEd3RIMhOqjuT1cyjIbXURq5Xq4K8YQM9QreYcREd0F2g7EL - lwP5rZQEM5kjsBqY4MiSU7FTIDN6neTSbiKkB2ZmBqgiZAut32d+cQ5rc+ucunWbYKsc - yd9O0W/QVPuiWBvZF6Xxj+nXmOEg2zJxRvTnBfI2ttXwpKnN3JYiCycHQvlChVCZXBka - nzwhND15RnCpaql6qabJ35jcGGgM7vTsTjPSINLZdCbDiBxmp9VlM6ebMsJa5SywovMD - VCBJrWBSjba3XG4jx7gztqUqMzm5RkdxKFPIdHhtFlvIOigc5EJhR7bGG9INQqEMe1Z2 - Z58eAiwkIR8LdZAj0y3MJEp/wtoEWSmxlISZOQKnU0EzmJeCxisgeZATMFiYAmJTIOc2 - QJ3TZBOwT5skICFJo+ZDCgEHA3IFWJwCkkUg8uhdArEyE1ZCwgEoeQElErlJ+GSfS3bm - rWZmPjETuP/bzgTCCYbwFT5Qtnta28DQfT9fP7jxN0f+MWcItZcNDnpy+qzy8Kj7T5bO - +vS3V7o5fBiPnpg1YcLd5cmgwSWlDF3V9vrGiTMH5lSOilak2I3uzLTyJ35+7tPnqO+A - lqzxK5ScnQjcYcyr6gzFCQ18/yqJBhhLoZWWaRR6B7Br+IIQQWaNWUt7aYq+YQHv4Q1h - Rq8VEKstPEX8MrqEzMgkTDpW3KOLXZKEB/lYQPbBTbsnmAd6bu7u1/btC5qz1R6Td0ho - 5cTHH2cniuc3x8oLjEpMbZTzq2ZQpzdL8r45/iX9W9jPVhjh5OiALtMZEyU38ia70W4K - y+6nPwZhi1iNAsnUChZ4l42z2cC0yFBEVEqHA0fIYD+4KVIkdy8h/z49qQQcMDf5Lk4M - FBQYooz3l/TDEIw3gAscWQ+9XhY4tJfy95ux+fLYdOI6jBWO6Ve3e+LTlOb7958ZmDLu - yTHrqU8cZE+YxaGSzkc47tvRe1vM62y7bDTRIwoMVYZqwwzufvp+boOpDW1l28xbLVut - u9Fui64KDTdXWs+amTL2LZZay+5EO/EudreVTQ6zNrPVAnqOWaXUunkNYdAWJ6wMi3C7 - 1WxrV/3cAnz6I2EG2eF28GtfssUKC+Fnl1bFlmDXI2OFOfZMG3ifismeHz5madRghoO+ - lnkGq9XGYjwPzgDb4POJbsUpKeEhxcQBs5A4/HGujKbgoxIhRulzV35/+BKKczFNC93B - h6aUPtX8VDDiyUzR5WTq2EEasfEdcFoxmTPEx8W/vCJOPyTjX1TLBBv/RDIz6kYb/SDh - ZdJ1JXbuF/doi68hfeJIc/ddkyaSB1JqFotkAfBMIfDX9bYnqSwiRuDoNP624UaP8vG+ - J1J/ECWzBlRKFYLmdR+KQjjVm2ZD2g9CKYSR4P+YCOEldjxqh7ZnZYVoNMlD3SFI66Cu - GUImtB3DIFQEaQGEKggDcTdaDXXNkK6X7YX8fVIgbZqovWg9PCPvt0K5GfKgb8G3un7o - eXQGj6MQNYr6A72E4ZnZzCvMW2wZu02WDKdLXdwo3sZvk8vk5xRLFDuVFmWB8pBKp1qn - uq6OanyaBZq3tXXQE8EhGc7N0mgOeIYoOM+tAyUdcV8rVIgoxKSFoRcTGTxD44bcNXFk - ZWpVw9ymhsZZU+uhBQUBrngDnCP+sSsZKmlk7D0LTvafHbS+EHxASwPvfy7KQ/loEIqi - MlQhnVkehkbAed47pHPTY+As9F1oPJqAqtEkBA5zci5hKIQSCHkQUlMH2wC7negxCM9B - oNEs/AhaCmE9hCchMH25PVA6gh/pZPjoUbwUOeArsJLxjjPZvTaF0vsBqFaHnvF+avvy - GDht1ej32N6pRvLBCvwcfhZNQ178EljDy+BUdRhvOxiZ662DR3vQAgjNEGgpxnhPpyfH - +wZOQwH4Fu/FQeRh8GveP2aney9nd1G403sy1MVA8ksPlKJa7wn3M95fuGd434CwL/Fo - bwRavObd457r3ezpwts6vZuIgtvpfTyRLHbDX1/zzou0eqdlS89HtHZR+zq9hfB8fFTp - zS8QvHnuS97MUBePoZzuHuFNyf61Nxn+CM180Gkgqve63Ju9A+CRx10eGgDhGN6Lt6MU - vL0zMMx7FLIw3YNDIwWtXfhnB6vC2YEuvCyaXxVujVSFApER3kCkIhSC/Pgz3Grubm4w - l8OlwsFmEHackzPxBl7Ha3gVr+DBjOnCL3eWeGXH8D5UArDsO8jLeLYLvwKVzDG8X6rc - f5hneIpHvKkr/gU41zEC9X7fISA7jCDzmkzKybrwfjhLQqr2R72wJTAChyPEOqBE+CQA - 5AaES2GeQsPgBOmjXTK0xtJUYisxDNIXVpT9v6I66cnNWBKvPx7ZsLujFc4wdux118Bx - UcjE3TU3mwLb/P9cjYuhQUNpaiqwzYNNC2ZPl46/+ssb6uAUbMcjTXAcuXmKz3dg9oLe - s73BuilTZ5Lzl/UNHQv8DWUds/1lvgNN0v9I9S2Pp5PHTf6yA2h6+bjqA9OjDWWdTdGm - cjjeWnNwSumi2tvetb7vXYtKf+RdpaSzReRdU6T//cu7asnjKeRdteRdteRdU6JTpHcR - CMpnjS29rxGoE47IwhHV8NiOoXdOrIaT4DVlXXgnOTe7GP0fAOQKiwplbmRzdHJlYW0K - ZW5kb2JqCjI1IDAgb2JqCjkxOTkKZW5kb2JqCjI2IDAgb2JqCjw8IC9UeXBlIC9Gb250 - RGVzY3JpcHRvciAvQXNjZW50IDc3MCAvQ2FwSGVpZ2h0IDcyNyAvRGVzY2VudCAtMjMw - IC9GbGFncyAzMgovRm9udEJCb3ggWy05NTEgLTQ4MSAxNDQ1IDExMjJdIC9Gb250TmFt - ZSAvVENVWk1HK0hlbHZldGljYSAvSXRhbGljQW5nbGUgMAovU3RlbVYgOTggL01heFdp - ZHRoIDE1MDAgL1N0ZW1IIDg1IC9YSGVpZ2h0IDUzMSAvRm9udEZpbGUyIDI0IDAgUiA+ - PgplbmRvYmoKMjcgMCBvYmoKWyAyNzggMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAw - IDAgNTU2IDU1NiA1NTYgNTU2IDU1NiA1NTYgNTU2IDAgMCAwIDAgMAowIDAgMCAwIDAg - MCAwIDcyMiA3MjIgNjY3IDAgMCAwIDI3OCAwIDAgMCAwIDAgNzc4IDY2NyAwIDcyMiA2 - NjcgMCAwIDAgMCAwCjAgMCAyNzggMCAyNzggMCAwIDAgNTU2IDAgNTAwIDU1NiA1NTYg - MCA1NTYgMCAyMjIgMCA1MDAgMjIyIDgzMyA1NTYgNTU2IDU1NgowIDMzMyA1MDAgMjc4 - IDU1NiAwIDAgMCA1MDAgXQplbmRvYmoKOSAwIG9iago8PCAvVHlwZSAvRm9udCAvU3Vi - dHlwZSAvVHJ1ZVR5cGUgL0Jhc2VGb250IC9UQ1VaTUcrSGVsdmV0aWNhIC9Gb250RGVz - Y3JpcHRvcgoyNiAwIFIgL1dpZHRocyAyNyAwIFIgL0ZpcnN0Q2hhciAzMiAvTGFzdENo - YXIgMTIxIC9FbmNvZGluZyAvTWFjUm9tYW5FbmNvZGluZwo+PgplbmRvYmoKMSAwIG9i - ago8PCAvVGl0bGUgKFVudGl0bGVkKSAvQXV0aG9yIChKb2huIERlbm5pcykgL0NyZWF0 - b3IgKE9tbmlHcmFmZmxlIFByb2Zlc3Npb25hbCkKL1Byb2R1Y2VyIChNYWMgT1MgWCAx - MC41LjggUXVhcnR6IFBERkNvbnRleHQpIC9DcmVhdGlvbkRhdGUgKEQ6MjAwOTEyMjEx - NzA5NDZaMDAnMDAnKQovTW9kRGF0ZSAoRDoyMDA5MTIyMTE3MDk0NlowMCcwMCcpID4+ - CmVuZG9iagp4cmVmCjAgMjgKMDAwMDAwMDAwMCA2NTUzNSBmIAowMDAwMDE3NDg3IDAw - MDAwIG4gCjAwMDAwMDE0NjQgMDAwMDAgbiAKMDAwMDAwNzM2MyAwMDAwMCBuIAowMDAw - MDAwMDIyIDAwMDAwIG4gCjAwMDAwMDE0NDQgMDAwMDAgbiAKMDAwMDAwMTU2OCAwMDAw - MCBuIAowMDAwMDAzOTU0IDAwMDAwIG4gCjAwMDAwMDMwMDMgMDAwMDAgbiAKMDAwMDAx - NzMxMyAwMDAwMCBuIAowMDAwMDAxNzI4IDAwMDAwIG4gCjAwMDAwMDE4NzcgMDAwMDAg - biAKMDAwMDAwMjAyNiAwMDAwMCBuIAowMDAwMDA1MDIwIDAwMDAwIG4gCjAwMDAwMDYw - NTAgMDAwMDAgbiAKMDAwMDAwMzk5MCAwMDAwMCBuIAowMDAwMDAyMTc1IDAwMDAwIG4g - CjAwMDAwMDI5ODMgMDAwMDAgbiAKMDAwMDAwMzAzOSAwMDAwMCBuIAowMDAwMDAzOTM0 - IDAwMDAwIG4gCjAwMDAwMDUwMDAgMDAwMDAgbiAKMDAwMDAwNjAzMCAwMDAwMCBuIAow - MDAwMDA3MzQyIDAwMDAwIG4gCjAwMDAwMDc0NDYgMDAwMDAgbiAKMDAwMDAwNzQ5NiAw - MDAwMCBuIAowMDAwMDE2Nzg2IDAwMDAwIG4gCjAwMDAwMTY4MDcgMDAwMDAgbiAKMDAw - MDAxNzA0MyAwMDAwMCBuIAp0cmFpbGVyCjw8IC9TaXplIDI4IC9Sb290IDIzIDAgUiAv - SW5mbyAxIDAgUiAvSUQgWyA8MGY4ZTdjNTk3NTA3MDNlNTFjNDZjYWRlOTU0OGJkNDA+ - CjwwZjhlN2M1OTc1MDcwM2U1MWM0NmNhZGU5NTQ4YmQ0MD4gXSA+PgpzdGFydHhyZWYK - MTc3MDUKJSVFT0YKMSAwIG9iago8PC9BdXRob3IgKEpvaG4gRGVubmlzKS9DcmVhdGlv - bkRhdGUgKEQ6MjAwOTEyMTYxNTM1MDBaKS9DcmVhdG9yIChPbW5pR3JhZmZsZSBQcm9m - ZXNzaW9uYWwgNS4xLjEpL01vZERhdGUgKEQ6MjAwOTEyMjExNzA5MDBaKS9Qcm9kdWNl - ciAoTWFjIE9TIFggMTAuNS44IFF1YXJ0eiBQREZDb250ZXh0KS9UaXRsZSAoYmxvY2st - Y3ljbGljLXJlYXJyLmdyYWZmbGUpPj4KZW5kb2JqCnhyZWYKMSAxCjAwMDAwMTg0MjMg - MDAwMDAgbiAKdHJhaWxlcgo8PC9JRCBbPDBmOGU3YzU5NzUwNzAzZTUxYzQ2Y2FkZTk1 - NDhiZDQwPiA8MGY4ZTdjNTk3NTA3MDNlNTFjNDZjYWRlOTU0OGJkNDA+XSAvSW5mbyAx - IDAgUiAvUHJldiAxNzcwNSAvUm9vdCAyMyAwIFIgL1NpemUgMjg+PgpzdGFydHhyZWYK - MTg2NDYKJSVFT0YK - - QuickLookThumbnail - - TU0AKgAAEbyAP+BACCQWDQeEPx6O95gAEgB7PIChILAwCPh6PgEAwEQiPR+QSGRACFuq - HA59vKJRSLSOXS+YQV+O91PMGhICu53PsHBYIzN6AgIy2Y0WjUekACBP+ktxfKxiNd0t - 16hcymooABnrZoPsQkYhC2O0myQd1KA9JgDDwVNxqAAwVgGt9juQODkWhECWW+QR6NhW - KxnBgUhV3PqvjkJtBeMkKDwkCQGX3KZWXUukuZuOILBh7LBZOAbE4iXNhMN4CQnD0PZa - yO9hr5yDkbAXQaLSB4CO5zPwJBqia6jPRzOJ9hYLPNpMZwgUYjEHr1ZOEhloehHhdns5 - jtd3vd/weHxcLuePzef0en1X2ltJqGuCP2Rt9vPUQiIFzBxuJ8h0PAOmAAgAHYWhYLqY - GkVpWn+b5wJgdZ6nqCoFvyl4BCGsLnpGZhqGmYiinicJwgeD4PpgeZynKBoNg2mAXoII - IWBapB+G4ZxmH4DgYBQCyxu89pqA0FgWKYkRaFoBomCYhqXl4XgOCIIhyJgapqDHApCJ - gaJHkgFRsmymBln6foagGAaYGwMQwhaGwbJGY0OlVIaYHqXRdAWIwjJgfhgGCAggB+mA - emoagpxko51GcXx0gwGIGHQZ5sngBIBHobJzH0GIbg4bZqgAJgqBxHyiyBIUiJHI8kyW - mEnShKUqStLEtS5L0wJfMUyTNNE1TZNyRTgac5BZOk7TxPSXz5P1AUFQlDRmo58GwZBl - gADwSH2cR2AgEYLgOvZ/HKbJtn+E4ehaCyk1LIcipDVMlSYl1WyjKaXyrK4WSyl8ty7L - 8wzHMszpfNM1zbN84zml86zvPM9z7P9ApfQdC0Oj58HwfgEAQvbhH5i4CY0l91VPI0kX - fVkn3nWF73yl191rf1c4Cl2B17g1g4Ql2FWNhtlYgl2JWchFElYTBjgQGC9A6EoFgKBY - EnUdoCgSeptmydwDBeFQInieJ/H8AQIAUd5zgeEoGmmZxzAeDYMAUBgFnqcx4BcsANVH - kV2JBd1VyblFX3rWN8VnflbJdXGAV3glfJDYFhWJhdjpdZOH2ZidnoMcRkFkYh1AYBh6 - HoCgLgYAwKgUcxzH+FASgsfx2HCcR0HcjQGqUBwIHodYHhmEp5mubZ7AKAwCAkDwUAyC - ACJ6iuNqUgT3VNvKP73eCR3lv6XXtWV9Vpftb3/XWBV5gtf4PYeE2LhlkYdZeI2bijLY - tjGQNcpZpmoUqCekjxuG4cgJASAcJgN4bw5gRAiA0SAdo7B4gSAmA4AIAQNoFCETAaYw - hhD/JUTAcSKQPIsQCB0DoLQXovJENBDo5iijjgIB2A5MByDfG+BwEIISYAUIIDZ+B6yj - Hlh4QcX8QRjRDBdEUGER4RAdL4Todw0InDRigGWKTnjJw/itFcssPosEFHfF2KA0YnFd - H2PuEsZQXgXjQQh0A9BpxtjDF0d8ZoSgcjpFuO0dyYRajwR4dEfYvxfAJIEB0gx9SFHb - IcFMiYjgwBHI2CKAo9yRjUNwWQrBeADBSDoDwEwKATH4O0fgCwHjxHWPICDyR9wRHUO8 - BIHwHDxH3KIeI7JQgAHWOsfYDwPAUH2O0eqlR5j6AGAkDYNVzx6kkQVMY/RtTNjCMuaB - BYxj7kSCkGU1wWzZQohWZM3S/DiGcNo4ozRgDSAKDkHg8RYDQA2FgHYJAJgQHQLwYQ6Q - FgRaUBMv45WLDWHABcJQPwND0HGNceI9x4jeHiAgEoKgVgYAqCIHgLQNTIh+OqjDXh/D - 3o5F9Qg1AMUhkWkMFjGQEUaf6NyL416WAepdGUFdMQDUzm9TUo6NRkDHAACwHpkiyUWP - TH0dAXaiDzqMFypEcgXyDAcUYflT6WDXi/AQbwJarUzAMNurQcquVYptV9H5A4sCYrIM - WswFK0CPrUeBiw+KPhzrgN2uQO66CprtWCvB5KxV5PCOKvw7LAARsFDWG1fLDRZr3Yex - Vi6v1AJGNMZoyx/DGFkUwf6ApIEeHGAYCYHR9DtstZgkA3QNgDBICYbhMB1DpAABYC5M - RxtXAgBIBD+yCSQHCNwD44wSBKJgPKGQDoap0HIOQBcdSXguIIEmHdTnQD4HkNQZ46AR - A5BkBYi47x6AEKG80vg+B4DsHmxgCIFDgv6sSUUaYyhigpGeIUkZSxeD+BmEUAYzr4kC - FmBYDgUApC0JgMcYwHQcg6HGTAVozgdAjBkzK0T+ilDNFiCcaoTxBEwHiLcW4DwkhJJg - PoXwvgDBCgqS8GyhAtXNJiTMbgwRfjkBSDwDA0hyAHAgN4YI3gBD9GoNwCQRQXAJa8Pc - eoAwFDuGiMofNEgSgMHmPKUo9gEgNHqOUfYMwagAGON4BoKwLj/H0PYcw0B6glscSIaY - zBhgpHGHp5xHoIvOF2PAHYRQIjGzeQjOJAhZAABTf7ABL8BYEwMTATwzgpAeBlU0g6RZ - IDbFiA4bYTw8YYw1hzDxL8QYixITDE41MUuXWgQseg7hqDXHMCcGoNB8DbGYPYBgEx4D - vH6BEAo+x+gBH2AICQIgJy9eCAIb4zBvgRBSBJMYCpbD5A0CMCI9x6D/H4PkdYBAGAUA - GAcBuZyQjTGeMIFQ9g3EFIHJCyxBRdjhCKEYEAvNyFM3MUogosh1A20BgHAeBcDkvEgM - 4NYGAZAZ3JbfCA/x1CxHqOEJ+4yX4Zw3h3D+IcR4lJdp/UNjL822JeNMaAwQUgKDHZnC - AALMwRF2NEJQRAXC15FvLkhBYIixGoEAKAUdAku0HvomAgxnCEAgDEEfL7MFLziPcWI3 - hzhPDDpbh+mSXab4np7FGKuMaNvSTEaY0hgArA8Fzc/BAA2h5ILoYYUAjA9Fh1/oXYgA - iwF+EnmvNyR850KS8OozhOgMBkC8zFouwkCAK5sdoTwr9M0xxHTnFCR8W6p1Xd/GiXUf - pITAV/lQo+XJgK4V4Ze4740JvslwsBXh9CkFENRIxVixFiMUJ4TvDcQ01xLTuJup6i8d - 1byBI6PlGq0NuqwJSYDaG2M2qwDyYQyHHDWJRL6tD6BNQ0kfwhttlBITC3NuoAfWgIB+ - F5L9GA18b7fbnt/yflO7+P836f1WI9z+v9371SdX/h/P+nGf6/3/wSD9BIT3BIL8JGdA - H4c8u8JEtWAitcHeJgHcHaAcgcHkJGq0HqGAA4EqAQymJGHwHAHAAQBABAROG8G+HwBE - sKJeTGAqfCJcB2IICg/CLIJmzSHIAmdWAsAgI6dAAAc8JIHpAEI4JGY6HoAAI3AI9wL4 - PcBESGHuJGGeGeAQOeHwJgGEGCBEB8B+G88850JEUIH2E8BYHWJgHeFsFsAiCUt8JeHO - FkFmAyCcCaJgHaGkGkAmiK6k1BBaRoHeHEG4HaoSG0HQHsHAHKA6BkAMGuH2CICeAyHE - GGF0FsHWA4BmA2AYAaAMH0H4AaAieSAEHsHyAuBoBwA8VHCILLCNCRCVCZCdChClCpCs - 0E3y7qJDC1C5C8JfDBDFDIJhDPDTDXDbDfDiuU9pDo9sMoH4USLqAqBIA6HkHOHGAiBE - M8ykA4ASH4G6GyGwH0AUH+HoAYBKAgHUHYI2HqHwAWA+niAqA0L0/0/kKLFGBZCSJFCX - CaBjCeJfCjCnCrCvFcJBFhC7C/DDDHDKJdFxDVDYJfDdDhDlF+4u/c/2JBHXHaJDHfFP - HnFTHtFY8+90GpC3H3FnH7FtDNDRIFF3INF84q9q/fIWI/IbFLHhHkJdHpFVHu9BHzIx - FjH5FrH+JHIDF1IJF5IPJJGBJNHSJjJTHdFNHjFRHrFW5xFbJiI/H1FkJdFpH9FvI/J0 - JdILF7DnIS/XJOI8fuGg5GJCq0G6qs6C+YG3LGBLLKJdLFLJAgG2G4KYAIBIBK+qJEGw - f6Am+yJeHEq0d4+BLM98qsJgAEIIQLGC/LK4/zMU8dMTMXMcsVMbMfMkrxMjMnMsm9Mr - MvM0j3MzM3M8ixM6I8HwkoFoGs2aAsHkHoAWBiByBSHaGOGEH4BYB8BRHOisHwHMF8GA - GwA6BGAgHGHQOcLwHSGOGcA4B6BkiqisHMGmGGGYHKHmH2HoAEBqCGCIA0H2GuGEGyAS - CEp6/VNCIQHeGQEUFSHgCaBsAGHCH0BOCIBwAaGuGQGyH2AoBOBkBIOwisHeGwFwGeHs - BOAsHgGyHoBJPcA8GOFAFMBGDAC0gSiuG4NiHYAMHsFyGQH8DKDGCUAkHcGYF0HCAoNX - Lq/NPCIPB/CCAYJmHMJ2N+AsHudQAKAsOBCGiwHwHfRUH3RZB02svOi2HwHVRUImA0AS - HlRUAAAcA0IrPBKDM/SZMxSXSbShM5SeMsRsGiHgHUHCHyAmAWHOAGBkCoBlPyjsHeGc - FkGYAKA+AbRgAgHsHqBRPui2HeG4GmHAHMGyG6H+BGBEA2BBPvTEO0L+GmHQAoAeH4HY - HwH6HgHcHyAgJ8AAHqH6H+HgGsGqHwRIAwBEOwHyAQAKHUG4HwBICABgXQJFRIJGHEGw - HEAKAIG4FyGUHgAYokVDVIjsHUc0G8A8BwAWeCk8HmA0BQNaixDuG4G0G4GgHGH0MIoc - 65T+OEHoG4FwGIHYAiAGHIG6HEHiAMBMBWBEH6HuG8HYHyAyA6AWHCG6HcA8BMAqGoFs - GIAyCGCGA0AcAymNQcJDVMJERsGoyMH+AGH+HQGUHuBfTBVpTjTKGeAoCEB0XQHAGsHR - TfRFP1TmHgAOAeH62mHkH6ApT8ivRqHMHuATHNFAPZSnSjZMivXxZPZUMtZTZXZc/YPG - IxB2IIkCH2H4AAKHZHRoIXZuAAH2AAALVWc9Z0PXZlZ6kDZoI5RmouHEHMAUA1RlZI/b - SoGcGwH6HqGpPmAUHoAKBoOtYKixVsFMGiAeBcH6GqHOAtBGmvYih/TkGmHWH6HWGMGs - AGBmBSBcBbNqO6HMGcGGHGHuH0HeH4AKH0HKG9WSBYBAAKHkH4H8HqHsAuBeA4HiG2Pq - AYBcBYBCH6HaG8Gqf8HcAgB9a/HRamMrVQG2HuHWGiGOtYAYAuB8CoB7bAivVsFYGgHy - ANY+BECEBAAceNWCivTkGwHRdUGqHKACBYBIBWBRPwO7QgF8HEH0HrCAAuBCAYHyHsAU - AeHeGyG0AsBeBSHhXOAOAQHkHaH6AgAMIYA6BmBIAGIiHsAOBDY4I/ZaI/U8HEAUA4uw - HkG2GsHxfoA9OVR6nAG2AA+egSHUHAHkWtXtNuM0HkAcA4qaHkHIHkAUBIOAh+Y6H4Y/ - aWMpfvZfhG/jdNhJhOO1hFhRhXftZLhZheL5hUIPR8HEHcH6zCH+QAABGMbsklNwGmGS - G2HeBCBuB9E+jthphsH0HQHYAGAgASAWBEBJdorYIyI2AQOIGwG+H4AmBCAkKGKIIVCB - AGh7hcL7eGYsG0FiHeBQCAAyArebWbVqGQFYGTASAgBndlioPXeGHpbkGSGyAOBeBCAw - BKBbeCPGHEGaGEG4HWH4ASAOH0G+HUHuAojoHYGKGaAgBaAuH4AiBuCcBxgeZDjML5bf - PoASG8HQH+PyApb1jki3bEGCHOH8AwB2C5dnTHYnMIGyHAH4A6Agy7TgPOOIhiHEHfUa - lsHqARUIAIdeHSAigAAPX7ahlhVLlKLLB/g9B6Ixm2fokjRqHOHeHwAaAqAzR5g5m62s - Y1jFB5aJRHmxhhnkJjhlnnhXnrnthPnwIJhpgkAkH4HmHeHsH3h5nfghiBiFiJiNoMPR - n7SPGGHOH4H0AOWtj2OzRrnGIIAQACHmGMGQG4A8BmBkAmASAYu6ABRqlWHYHSH2AzkP - gJmvhNjOG5i0GmGAGUAoB3jcA5jikldtjsABjxj1l0GwHgHoGoFWGgA8CwBmAwA9kOPH - TkGTPkHeTOHoASBOB0AEGUFqGmH0AjLSAuAoAWH6HCHKHOHoAgB+OtlHXvniKTbeHoAe - AeHgHTCAAJpdb2kjlllpltlxosPTbfkqAuHkHGu2H2H8BJmGPEY6HWHUHcASOQHlTqAc - AwAaHkH2AKAAyeN+AkABZ6AAY/m+/tsZnVm5B3nXhBY6bHnHnLnPtUPRm1tTnbaG/oIC - AAAOAQAAAwAAAAEAXQAAAQEAAwAAAAEATwAAAQIAAwAAAAMAABJqAQMAAwAAAAEABQAA - AQYAAwAAAAEAAgAAAREABAAAAAEAAAAIARIAAwAAAAEAAQAAARUAAwAAAAEAAwAAARYA - AwAAAAEB1QAAARcABAAAAAEAABGzARwAAwAAAAEAAQAAAT0AAwAAAAEAAgAAAVMAAwAA - AAMAABJwh3MABwAAA+wAABJ2AAAAAAAIAAgACAABAAEAAQAAA+xhcHBsAgAAAG1udHJS - R0IgWFlaIAfZAAwAFQAIACMAEGFjc3BBUFBMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAD21gABAAAAANMtYXBwbEEj5hRuMs7GqKR1ONKdcX8AAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAADnJYWVoAAAEsAAAAFGdYWVoAAAFAAAAAFGJYWVoAAAFUAAAAFHd0 - cHQAAAFoAAAAFGNoYWQAAAF8AAAALHJUUkMAAAGoAAAADmdUUkMAAAG4AAAADmJUUkMA - AAHIAAAADnZjZ3QAAAHYAAAAMG5kaW4AAAIIAAAAOGRlc2MAAAJAAAAAZGRzY20AAAKk - AAAA+m1tb2QAAAOgAAAAKGNwcnQAAAPIAAAAJFhZWiAAAAAAAAB4bAAAP+cAAAI3WFla - IAAAAAAAAFejAACrOgAAFh1YWVogAAAAAAAAJsgAABT0AAC60VhZWiAAAAAAAADz2AAB - AAAAARYIc2YzMgAAAAAAAQu3AAAFlv//81cAAAcpAAD91///+7f///2mAAAD2gAAwPZj - dXJ2AAAAAAAAAAEBzQAAY3VydgAAAAAAAAABAc0AAGN1cnYAAAAAAAAAAQHNAAB2Y2d0 - AAAAAAAAAAEAANF0AAAAAAABAAAAANF0AAAAAAABAAAAANF0AAAAAAABAABuZGluAAAA - AAAAADAAAKPAAABXwAAASsAAAJyAAAAmlwAAE1sAAFBAAABUQAACMzMAAjMzAAIzM2Rl - c2MAAAAAAAAACkNpbmVtYSBIRAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABtbHVj - AAAAAAAAABIAAAAMbmJOTwAAABIAAADocHRQVAAAABIAAADoc3ZTRQAAABIAAADoZmlG - SQAAABIAAADoZGFESwAAABIAAADoemhDTgAAABIAAADoZnJGUgAAABIAAADoamFKUAAA - ABIAAADoZW5VUwAAABIAAADocGxQTAAAABIAAADocHRCUgAAABIAAADoZXNFUwAAABIA - AADoemhUVwAAABIAAADocnVSVQAAABIAAADoa29LUgAAABIAAADoZGVERQAAABIAAADo - bmxOTAAAABIAAADoaXRJVAAAABIAAADoAEMAaQBuAGUAbQBhACAASABEAABtbW9kAAAA - AAAABhAAAJIhAgIIGcXeAQAAAAAAAAAAAAAAAAAAAAAAdGV4dAAAAABDb3B5cmlnaHQg - QXBwbGUsIEluYy4sIDIwMDkA - - ReadOnly - NO - RowAlign - 1 - RowSpacing - 36 - SheetTitle - Canvas 1 - SmartAlignmentGuidesActive - YES - SmartDistanceGuidesActive - YES - UniqueID - 1 - UseEntirePage - - VPages - 1 - WindowInfo - - CurrentSheet - 0 - ExpandedCanvases - - FitInWindow - - Frame - {{193, 584}, {716, 841}} - ListView - - OutlineWidth - 142 - RightSidebar - - Sidebar - - SidebarWidth - 138 - VisibleRegion - {{1, 1}, {574, 732}} - Zoom - 1 - ZoomValues - - - Canvas 1 - 0.0 - 1 - - - - saveQuickLookFiles - YES - - diff --git a/src/externals/pio1/doc/images/block-cyclic-rearr.png b/src/externals/pio1/doc/images/block-cyclic-rearr.png deleted file mode 100644 index 549db654b84..00000000000 Binary files a/src/externals/pio1/doc/images/block-cyclic-rearr.png and /dev/null differ diff --git a/src/externals/pio1/doc/images/block-cyclic.eps b/src/externals/pio1/doc/images/block-cyclic.eps deleted file mode 100644 index baba1098988..00000000000 --- a/src/externals/pio1/doc/images/block-cyclic.eps +++ /dev/null @@ -1,1219 +0,0 @@ -%!PS-Adobe-3.0 EPSF-3.0 -%%HiResBoundingBox: 0.000000 0.000000 504.000000 295.000000 -%APL_DSC_Encoding: UTF8 -%APLProducer: (Version 10.5.8 (Build 9L31a) Quartz PS Context) -%%Title: (Unknown) -%%Creator: (Unknown) -%%CreationDate: (Unknown) -%%For: (Unknown) -%%DocumentData: Clean7Bit -%%LanguageLevel: 2 -%%Pages: 1 -%%BoundingBox: 0 0 504 295 -%%EndComments -%%BeginProlog -%%BeginFile: cg-pdf.ps -%%Copyright: Copyright 2000-2004 Apple Computer Incorporated. -%%Copyright: All Rights Reserved. -currentpacking true setpacking -/cg_md 141 dict def -cg_md begin -/L3? languagelevel 3 ge def -/bd{bind def}bind def -/ld{load def}bd -/xs{exch store}bd -/xd{exch def}bd -/cmmtx matrix def -mark -/sc/setcolor -/scs/setcolorspace -/dr/defineresource -/fr/findresource -/T/true -/F/false -/d/setdash -/w/setlinewidth -/J/setlinecap -/j/setlinejoin -/M/setmiterlimit -/i/setflat -/rc/rectclip -/rf/rectfill -/rs/rectstroke -/f/fill -/f*/eofill -/sf/selectfont -/s/show -/xS/xshow -/yS/yshow -/xyS/xyshow -/S/stroke -/m/moveto -/l/lineto -/c/curveto -/h/closepath -/n/newpath -/q/gsave -/Q/grestore -counttomark 2 idiv -{ld}repeat pop -/SC{ - /ColorSpace fr scs -}bd -/sopr /setoverprint where{pop/setoverprint}{/pop}ifelse ld -/soprm /setoverprintmode where{pop/setoverprintmode}{/pop}ifelse ld -/cgmtx matrix def -/sdmtx{cgmtx currentmatrix pop}bd -/CM {cgmtx setmatrix}bd -/cm {cmmtx astore CM concat}bd -/W{clip newpath}bd -/W*{eoclip newpath}bd -statusdict begin product end dup (HP) anchorsearch{ - pop pop pop - true -}{ - pop - (hp) anchorsearch{ - pop pop true - }{ - pop false - }ifelse -}ifelse -{ - { - { - pop pop - (0)dup 0 4 -1 roll put - F charpath - }cshow - } -}{ - {F charpath} -}ifelse -/cply exch bd -/cps {cply stroke}bd -/pgsave 0 def -/bp{/pgsave save store}bd -/ep{pgsave restore showpage}def -/re{4 2 roll m 1 index 0 rlineto 0 exch rlineto neg 0 rlineto h}bd -/scrdict 10 dict def -/scrmtx matrix def -/patarray 0 def -/createpat{patarray 3 1 roll put}bd -/makepat{ -scrmtx astore pop -gsave -initgraphics -CM -patarray exch get -scrmtx -makepattern -grestore -setpattern -}bd -/cg_BeginEPSF{ - userdict save/cg_b4_Inc_state exch put - userdict/cg_endepsf/cg_EndEPSF load put - count userdict/cg_op_count 3 -1 roll put - countdictstack dup array dictstack userdict/cg_dict_array 3 -1 roll put - 3 sub{end}repeat - /showpage {} def - 0 setgray 0 setlinecap 1 setlinewidth 0 setlinejoin - 10 setmiterlimit [] 0 setdash newpath - false setstrokeadjust false setoverprint -}bd -/cg_EndEPSF{ - countdictstack 3 sub { end } repeat - cg_dict_array 3 1 index length 3 sub getinterval - {begin}forall - count userdict/cg_op_count get sub{pop}repeat - userdict/cg_b4_Inc_state get restore - F setpacking -}bd -/cg_biproc{currentfile/RunLengthDecode filter}bd -/cg_aiproc{currentfile/ASCII85Decode filter/RunLengthDecode filter}bd -/ImageDataSource 0 def -L3?{ - /cg_mibiproc{pop pop/ImageDataSource{cg_biproc}def}bd - /cg_miaiproc{pop pop/ImageDataSource{cg_aiproc}def}bd -}{ - /ImageBandMask 0 def - /ImageBandData 0 def - /cg_mibiproc{ - string/ImageBandMask xs - string/ImageBandData xs - /ImageDataSource{[currentfile/RunLengthDecode filter dup ImageBandMask/readstring cvx - /pop cvx dup ImageBandData/readstring cvx/pop cvx]cvx bind}bd - }bd - /cg_miaiproc{ - string/ImageBandMask xs - string/ImageBandData xs - /ImageDataSource{[currentfile/ASCII85Decode filter/RunLengthDecode filter - dup ImageBandMask/readstring cvx - /pop cvx dup ImageBandData/readstring cvx/pop cvx]cvx bind}bd - }bd -}ifelse -/imsave 0 def -/BI{save/imsave xd mark}bd -/EI{imsave restore}bd -/ID{ -counttomark 2 idiv -dup 2 add -dict begin -{def} repeat -pop -/ImageType 1 def -/ImageMatrix[Width 0 0 Height neg 0 Height]def -currentdict dup/ImageMask known{ImageMask}{F}ifelse exch -L3?{ - dup/MaskedImage known - { - pop - << - /ImageType 3 - /InterleaveType 2 - /DataDict currentdict - /MaskDict - << /ImageType 1 - /Width Width - /Height Height - /ImageMatrix ImageMatrix - /BitsPerComponent 1 - /Decode [0 1] - currentdict/Interpolate known - {/Interpolate Interpolate}if - >> - >> - }if -}if -exch -{imagemask}{image}ifelse -end -}bd -/cguidfix{statusdict begin mark version end -{cvr}stopped{cleartomark 0}{exch pop}ifelse -2012 lt{dup findfont dup length dict begin -{1 index/FID ne 2 index/UniqueID ne and -{def} {pop pop} ifelse}forall -currentdict end definefont pop -}{pop}ifelse -}bd -/t_array 0 def -/t_i 0 def -/t_c 1 string def -/x_proc{ - exch t_array t_i get add exch moveto - /t_i t_i 1 add store -}bd -/y_proc{ - t_array t_i get add moveto - /t_i t_i 1 add store -}bd -/xy_proc{ - - t_array t_i 2 copy 1 add get 3 1 roll get - 4 -1 roll add 3 1 roll add moveto - /t_i t_i 2 add store -}bd -/sop 0 def -/cp_proc/x_proc ld -/base_charpath -{ - /t_array xs - /t_i 0 def - { - t_c 0 3 -1 roll put - currentpoint - t_c cply sop - cp_proc - }forall - /t_array 0 def -}bd -/sop/stroke ld -/nop{}def -/xsp/base_charpath ld -/ysp{/cp_proc/y_proc ld base_charpath/cp_proc/x_proc ld}bd -/xysp{/cp_proc/xy_proc ld base_charpath/cp_proc/x_proc ld}bd -/xmp{/sop/nop ld /cp_proc/x_proc ld base_charpath/sop/stroke ld}bd -/ymp{/sop/nop ld /cp_proc/y_proc ld base_charpath/sop/stroke ld}bd -/xymp{/sop/nop ld /cp_proc/xy_proc ld base_charpath/sop/stroke ld}bd -/refnt{ -findfont dup length dict copy dup -/Encoding 4 -1 roll put -definefont pop -}bd -/renmfont{ -findfont dup length dict copy definefont pop -}bd -L3? dup dup{save exch}if -/Range 0 def -/DataSource 0 def -/val 0 def -/nRange 0 def -/mulRange 0 def -/d0 0 def -/r0 0 def -/di 0 def -/ri 0 def -/a0 0 def -/a1 0 def -/r1 0 def -/r2 0 def -/dx 0 def -/Nsteps 0 def -/sh3tp 0 def -/ymax 0 def -/ymin 0 def -/xmax 0 def -/xmin 0 def -/setupFunEval -{ - begin - /nRange Range length 2 idiv store - /mulRange - - [ - 0 1 nRange 1 sub - { - 2 mul/nDim2 xd - Range nDim2 get - Range nDim2 1 add get - 1 index sub - - 255 div - exch - }for - ]store - end -}bd -/FunEval -{ - begin - - nRange mul /val xd - - 0 1 nRange 1 sub - { - dup 2 mul/nDim2 xd - val - add DataSource exch get - mulRange nDim2 get mul - mulRange nDim2 1 add get - add - }for - end -}bd -/max -{ - 2 copy lt - {exch pop}{pop}ifelse -}bd -/sh2 -{ - /Coords load aload pop - 3 index 3 index translate - - 3 -1 roll sub - 3 1 roll exch - sub - 2 copy - dup mul exch dup mul add sqrt - dup - scale - atan - - rotate - - /Function load setupFunEval - - - clippath {pathbbox}stopped {0 0 0 0}if newpath - /ymax xs - /xmax xs - /ymin xs - /xmin xs - currentdict/Extend known - { - /Extend load 0 get - { - 0/Function load FunEval sc - xmin ymin xmin abs ymax ymin sub rectfill - }if - }if - - /Nsteps/Function load/Size get 0 get 1 sub store - /dx 1 Nsteps div store - gsave - /di ymax ymin sub store - /Function load - - 0 1 Nsteps - { - 1 index FunEval sc - 0 ymin dx di rectfill - dx 0 translate - }for - pop - grestore - currentdict/Extend known - { - /Extend load 1 get - { - Nsteps/Function load FunEval sc - 1 ymin xmax 1 sub abs ymax ymin sub rectfill - }if - }if -}bd -/shp -{ - 4 copy - - dup 0 gt{ - 0 exch a1 a0 arc - }{ - pop 0 moveto - }ifelse - dup 0 gt{ - 0 exch a0 a1 arcn - }{ - pop 0 lineto - }ifelse - - fill - - dup 0 gt{ - 0 exch a0 a1 arc - }{ - pop 0 moveto - }ifelse - dup 0 gt{ - 0 exch a1 a0 arcn - }{ - pop 0 lineto - }ifelse - - fill -}bd -/calcmaxs -{ - - xmin dup mul ymin dup mul add sqrt - xmax dup mul ymin dup mul add sqrt - xmin dup mul ymax dup mul add sqrt - xmax dup mul ymax dup mul add sqrt - max max max -}bd -/sh3 -{ - /Coords load aload pop - 5 index 5 index translate - 3 -1 roll 6 -1 roll sub - 3 -1 roll 5 -1 roll sub - 2 copy dup mul exch dup mul add sqrt - /dx xs - 2 copy 0 ne exch 0 ne or - { - - exch atan rotate - }{ - pop pop - }ifelse - - /r2 xs - /r1 xs - /Function load - dup/Size get 0 get 1 sub - /Nsteps xs - setupFunEval - - - - - - dx r2 add r1 lt{ - - 0 - }{ - dx r1 add r2 le - { - 1 - }{ - r1 r2 eq - { - 2 - }{ - 3 - }ifelse - }ifelse - }ifelse - /sh3tp xs - clippath {pathbbox}stopped {0 0 0 0}if - newpath - /ymax xs - /xmax xs - /ymin xs - /xmin xs - - dx dup mul r2 r1 sub dup mul sub dup 0 gt - { - sqrt r2 r1 sub atan - /a0 exch 180 exch sub store - /a1 a0 neg store - }{ - pop - /a0 0 store - /a1 360 store - }ifelse - currentdict/Extend known - { - /Extend load 0 get r1 0 gt and - { - 0/Function load FunEval sc - - - - - { - { - dx 0 r1 360 0 arcn - xmin ymin moveto - xmax ymin lineto - xmax ymax lineto - xmin ymax lineto - xmin ymin lineto - eofill - } - { - r1 0 gt{0 0 r1 0 360 arc fill}if - } - { - - - - - 0 r1 xmin abs r1 add neg r1 shp - } - { - - - r2 r1 gt{ - - 0 r1 - r1 neg r2 r1 sub div dx mul - 0 - shp - }{ - - - - 0 r1 calcmaxs - dup - - r2 add dx mul dx r1 r2 sub sub div - neg - exch 1 index - abs exch sub - shp - }ifelse - } - }sh3tp get exec - }if - }if - - /d0 0 store - /r0 r1 store - /di dx Nsteps div store - /ri r2 r1 sub Nsteps div store - /Function load - 0 1 Nsteps - { - 1 index FunEval sc - d0 di add r0 ri add d0 r0 shp - { - - d0 0 r0 a1 a0 arc - d0 di add 0 r0 ri add a0 a1 arcn - fill - - - d0 0 r0 a0 a1 arc - d0 di add 0 r0 ri add a1 a0 arcn - fill - }pop - - - /d0 d0 di add store - /r0 r0 ri add store - }for - pop - - currentdict/Extend known - { - /Extend load 1 get r2 0 gt and - { - Nsteps/Function load FunEval sc - - - - - { - { - dx 0 r2 0 360 arc fill - } - { - dx 0 r2 360 0 arcn - xmin ymin moveto - xmax ymin lineto - xmax ymax lineto - xmin ymax lineto - xmin ymin lineto - eofill - } - { - - - xmax abs r1 add r1 dx r1 shp - } - { - - r2 r1 gt{ - - - - calcmaxs dup - - r1 add dx mul dx r2 r1 sub sub div - exch 1 index - exch sub - dx r2 - shp - }{ - - r1 neg r2 r1 sub div dx mul - 0 - dx - r2 - shp - }ifelse - } - } - sh3tp get exec - }if - }if -}bd -/sh -{ - begin - /ShadingType load dup dup 2 eq exch 3 eq or - { - gsave - newpath - /ColorSpace load scs - currentdict/BBox known - { - /BBox load aload pop - 2 index sub - 3 index - 3 -1 roll exch sub - exch rectclip - }if - 2 eq - {sh2}{sh3}ifelse - grestore - }{ - - pop - (DEBUG: shading type unimplemented\n)print flush - }ifelse - end -}bd -{restore}if not dup{save exch}if - L3?{ - /sh/shfill ld - /csq/clipsave ld - /csQ/cliprestore ld - }if -{restore}if -end -setpacking -%%EndFile -%%EndProlog -%%BeginSetup -%%EndSetup -%%Page: 1 1 -%%PageBoundingBox: 0 0 504 295 -%%BeginPageSetup -cg_md begin -bp -sdmtx -%RBIBeginFontSubset: Helvetica -%!FontType1-1.0: Helvetica 1.0000.0.0000 - 14 dict begin/FontName /Helvetica def - /PaintType 0 def - /Encoding 256 array 0 1 255{1 index exch/.notdef put}for - dup 33 /P put - dup 34 /E put - dup 35 /space put - dup 36 /zero put - dup 37 /one put - dup 38 /two put - dup 39 /M put - dup 40 /e put - dup 41 /m put - dup 42 /o put - dup 43 /r put - dup 44 /y put - dup 45 /l put - dup 46 /a put - dup 47 /u put - dup 48 /t put - dup 49 /D put - dup 50 /i put - dup 51 /s put - dup 52 /k put - dup 53 /S put - dup 54 /bracketleft put - dup 55 /three put - dup 56 /bracketright put - dup 57 /six put - dup 58 /C put - dup 59 /n put - readonly def - 42/FontType resourcestatus{pop pop false}{true}ifelse - %APLsfntBegin - {currentfile 0(%APLsfntEnd\n)/SubFileDecode filter flushfile}if - /FontType 42 def - /FontMatrix matrix def - /FontBBox[2048 -1947 1 index div -985 2 index div 2961 3 index div 2297 5 -1 roll div]cvx def - /sfnts [< - 74727565000900000000000063767420000000000000009C0000036C6670676D000000000000040800000A0C676C79660000000000000E14000017C46865616400000000000025D80000003868686561000000000000261000000024686D74780000000000002634000000706C6F636100000000000026A40000003A6D61787000000000000026E000000020707265700000000000002700000003CF05C0001005BD00280580001A042F001F0000FFD90000FFDA0000FFD9FE55FFE605C70010FE6DFFF1033B000000B9000000B902FE3F3C00C0008D009B00AF000600A800C00028005E009800C9016A00B9015C00B400D6011E002E0080000400B8004C00CC01FFFFD1006600A400AF007400C2009500B1000C0028006D0015004C008E0125FF7A000C0040004C00620084FFA200240038008600BD0039005E008E00EDFFA9FFB300400052005500AA00AB00C200CB012302B10413FFAEFFE4000800510074008400AA00D1FF4CFFAF0012002C004200500051008400BE012503DAFF680018003B0098009C009F00A100C100EC018201B4FF68FF76FFD0FFE100020018001C00530053007D01B401E103AF0486FF9CFFEAFFFE001F0028002A00520060009300A300AA00AF00AF00C001000145016B0174019301950240028202B404850517FEFD00060029004700470048006F008800B400B900C400F200F901EF02180310037403C5FF35FFF3000B004B004C0052005500650076007600870087008E00AB00BB0106013001430150017D0194019501D3022A025502580277027802E6034E035C037903D3047304B2058C0598060BFEF5FFBBFFC7FFD50017001D005B0072007E009C00C200D000F400FA01030106011C0125013B0142015E015E0180019B02B901A101B9025001C001D002AA01DF01E301EF01FB0205020C0215022B0274029302AB02C202CE03690395039903DF03F5043E050205A105E5062507DBFE62FE89FECEFF3BFFE1FFF800030008002100390042004E005F0061006F00700034007F008E00AD00AD00AF00BD00C400C500C900C900C900E3011C00ED00F800F901000112011A0132014D014D014E014F01660169019E01BA01BA01BE01E301EF01F602000200020902110217021C02530262026D028002D50280031B032A034A035A03AF03AF03C803D603FB03FB04050413041504470449008C046D049A049A04A604A804B204CF0539053E054E055605800589058C036305D105D6067E068E06B206EF06F00728074C076F078C00B400C900C000C10000000000000000000000000004012400AF0032006E0063014401620096014301A10161008A00740064018801EF01700028FF5D037E0347023000AA00BE007B0062009A007D0089035C00A1FFD803AA00D70093006C0000008000A70442001D0597001D00820030002A - 002A002A002A002A40292A292827262524232221201F1E1D1C1B1A191817161514131211100D0C0B0A090807060504030201002C4523466020B02660B004262348482D2C452346236120B02661B004262348482D2C45234660B0206120B04660B004262348482D2C4523462361B0206020B02661B02061B004262348482D2C45234660B0406120B06660B004262348482D2C4523462361B0406020B02661B04061B004262348482D2C0110203C003C2D2C20452320B0CD442320B8015A51582320B08D44235920B0ED51582320B04D44235920B09051582320B00D44235921212D2C20204518684420B001602045B04676688A4560442D2C01B9400000000A2D2C00B9000040000B2D2C2045B00043617D6818B0004360442D2C45B01A234445B01923442D2C2045B00325456164B050515845441B2121592D2C20B0032552582359212D2C69B04061B0008B0C6423648BB8400062600C642364615C58B0036159B002602D2C45B0112BB0172344B0177AE5182D2C45B0112BB01723442D2C45B0112BB017458CB0172344B0177AE5182D2CB002254661658A46B040608B482D2CB0022546608A46B040618C482D2C4B53205C58B002855958B00185592D2C20B0032545B019236A4445B01A23444565234520B00325606A20B009234223688A6A606120B0005258B21A401A4523614459B0005058B219401945236144592D2CB9187E3B210B2D2CB92D412D410B2D2CB93B21187E0B2D2CB93B21E7830B2D2CB92D41D2C00B2D2CB9187EC4E00B2D2C4B525845441B2121592D2C0120B003252349B04060B0206320B000525823B002253823B002256538008A63381B212121212159012D2C456920B00943B0022660B00325B005254961B0805358B21940194523616844B21A401A4523606A44B209191A45652345604259B00943608A103A2D2C01B005251023208AF500B0016023EDEC2D2C01B005251023208AF500B0016123EDEC2D2C01B0062510F500EDEC2D2C20B001600110203C003C2D2C20B001610110203C003C2D2C764520B003254523616818236860442D2C7645B00325452361682318456860442D2C7645B0032545616823452361442D2C4569B014B0324B505821B0205961442DB8002B2C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB8002C2C2020456944B001602DB8002D2CB8002C2A212DB8002E2C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB8002F2C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800302C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800312C2020456944B0016020 - 20457D691844B001602DB800322CB800312A2DB800332C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800342C4B535845441B2121592DB800352C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800362C2020456944B001602DB800372CB800362A212DB800382C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800392C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB8003A2C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB8003B2C2020456944B001602020457D691844B001602DB8003C2CB8003B2A2DB8003D2C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB8003E2C4B535845441B2121592DB8003F2C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800402C2020456944B001602DB800412CB800402A212DB800422C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800432C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800442C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800452C2020456944B001602020457D691844B001602DB800462CB800452A2DB800472C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800482C4B535845441B2121592DB800492C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB8004A2C2020456944B001602DB8004B2CB8004A2A212DB8004C2C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB8004D2C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB8004E2C4B20B0032650585158 - B080441BB04044591B21212045B0C05058B0C0441B2159592DB8004F2C2020456944B001602020457D691844B001602DB800502CB8004F2A2DB800512C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800522C4B535845441B2121592DB800532C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800542C2020456944B001602DB800552CB800542A212DB800562C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800572C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800582C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800592C2020456944B001602020457D691844B001602DB8005A2CB800592A2DB8005B2C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB8005C2C4B535845441B2121592D00020042000004D005BD00030007003FB800532BB800082FB800092FB8000810B80000D0B800002FB8000910B80003DCB80004DCB8000010B80007DC00BA0007000000562BBA0002000500562B3031331121112711211142048EB8FCE205BDFA43B8044DFBB3000000020040FFD9041C0598000F001C00714017870501460815350F051C35070D1238036F18380B1E471D1076C418D4EDFDED003FED3FED313043794034001B0D2601251A2609250526160E18280014001228011B081828001006122801170C1528011302152801190A1C280011041C28002B2B2B2B012B2B2B2B2B2B2B2B2B81005D001716111007022120272611343712211236113402232202111417163303407C60577EFEE2FEFE7E693F7601358AA678AD9F932F48AE0598E5B1FECCFEDCBFFEEEE0BB013BF4AF0146FAE5F80152F4013BFED5FEDDDB85CB000000000100C4000002D5059200080023B10801B80133400C0404070C04079605000A47091076C418C4D5FD39003F3FF4CD313013353E013733112311C4C39A268EC003F68A1359A6FA6E03F60000000100400000041E059D002200A6404E3604460457056B1D6E1E7A1E84018702082A085A196B197C197C1CB519050022010F041C0E1921071C19040100051F0F0F22130A351305201F7521220C217F0738166F220E270F811F382224 - 47231076C418D4EDF4ED10F5EDE4003F3CFD3C3FED1112392F1217390111123912393911391239005D31304379401C04190C2511260B120E2800091407280119040D100A280108150A2801002B2B1010012B2B2B2B81005D36123F01363736353426232207060723363736213212151407060F01060706072115214A85C1C0813452967DB9472604B70342750128F6E37946B5896238641A030EFC29B90112706F4B35536B7D938C4B85BB76D0FEF6A3AC7A47654C3631576AAA000000010031FFD9041A059A003100C4401F490C532D6529632D7529752D069626014B082B0A182B140D03F93114350D20B8012340141135107F0D0D311B3524050735310D1896274411B8014A400F0A382E6F021F2920810327023347321076C418D4EDF4ED10F5EDEDF4ED003FED3FED12392FF4FDE610ED10ED111239011112393130437940362F301926000905261D2522260600032800040308300A28011C231F28001A251828010401072800092F0728001E211B280119261B2801002B2B2B2B012B2B2B103C2B2B2B2B818181005D015D0402353316171633323635342623220607351E013332373635342623220706072334373621321615140706071E01151400230116E5BC0C2846B78EACB5A11225131C26166541729865B4452605B2406E0115DBF0472C46717FFEF2F8270117C88B3F719878947601019F03022038906B7478427AA070C8C3B98452331D1FB180CDFEFE0000000002004DFFDB0423059E001B002700A94039771A0117082508271958198600870188090747080A212721350E0E1403C707351B052735140D032902811E38116F170A023C0A2431172947281076C418D4FD397C4B52787A2F1810F5EDF4ED003FED3FEDED12392FED11123931304379403425261C200F1600060526200F1E280126152428001C131E2801060003280104031F1021280125162728001D1227280004010728012B2B2B2B01103C2B2B2B2B2B81818181015D005D001615232627262322020336373633320415140223220011103712211236353426232206151416330347BDB21023418497B20A3E5E566AB4010CFEEBC9FEDC417D014C818D7EA674AF9F8D059EF98455305AFEE9FEFC5B2D28E6E4C3FED301310169010BBA0164FADDBF826EC79A9B88B900000002005AFFDA057105E5001D001E00B1403B1B0597019605031F011F0482018705891305530803861D111115063A1D030C3A15091E021E1E190331023B1031111A20093119191F20A1216A66182B2B4EF44DED4E10F64DEDF4ED12392F003F3FED3FED12392F10ED31304379403A001C172513260E251B260B160932000D1410320112110F10071C0932000500033201010204030A180C32000F120C3200081A06320104010632012B2B2B2B01103C103C2B2B103C103C2B2B2B2B2B2B815D01 - 5D080117232E0123220011101233323736373306070621202726111037362123041E013411C221C5B2D9FEF5F1EFDC733D1EC21A92AFFED7FF00AEE5ACBA01472805E5FEDABB8EA6FECFFEC5FEFEFEBFA95991E89DBD9BCD01AC0145D0E20000000200A50000056305BD000D00180067401F871196120232080B1E0F02001E17080831131A1A0D250E19191AD6217689182B2B4EF44DFD4E10F64DED003FFD3FFD3130437940260116112515260607050704070307020705060A10083201011608320109120B320107140032002B2B012B2B2A2B2B815D2532373637363736351002232111032120171611140702290102D06541744A3B1A0FD9F1FE9FC80253012FA795589BFE86FDAFAA15276F598B53470111012EFB980513D7C2FED1EABDFEB20000000200AF000004ED05BD000B000C004E4024071E04040B031E01000209081E0A0B080C020C0C02000669023B091A0E03082500190D0EB8010BB3219589182B2B4EF44DFD3C4E10F64DF4E41112392F003F3F3CFD3C3F3CED12392FFD313013211521112115211121152101AF042FFC93032BFCD5037CFBC2021F05BDB4FE42AAFE0EAF05BD000000000100970000061705BD001300CB405944014B03020601090316011903D7010513011C03140B1B0C57015803D401DB03D40BDB0C0A040A040D45028602045102970202290A280D380A380D4702570276020725640D0A0203120301020B0C120306081517171A040405B8019B400D0A1F030B06FD0C0102FD0D1F12B8019BB6130019147670182B4E10F43C4DFDE419F43939F4393918E4FD3C4E10456544E6003F173C3F3C1217394B5279B10D0CB801AAB40201020A0BB801AAB202020387054D2E7AFD047DC487052E7AFD047DC43130005D727101725D71132109012111231134363501230115141615112397011D01A601A3011ABD04FE5DC5FE5A05BE05BDFB2604DAFA4303632DD077FB2904D72D36DD34FC9D0000000200AF000004F805BD000A001400614035690C6912790C7A12044814581468147A140407081E1110100A010E0F1E0100020A080206120C0409141431041A160F092500191516B8010BB3219589182B2B4EF44DFD3C4E10F64DFD11121739003F3F3CFD3C1012392F3CFD3C015D31305D132132161514062321112300272623211121323635AF0295C4F0D6DEFE32C70380784273FE74018C86A705BDDDC8ACFFFD9304B93A1FFE0372900000020060FFD504F605E5002F003000FE405E290F27232626360E3521472662267A0E7724096B08180E172502590E680EAA22030A0E3A2224220A03041C1886142F2F2B1C3A1403043A2B0930020A0E081124221F28303011182517490825281A321F25114900252F193132A0216A89182B2B4EF44DEDF4ED4E10F64DEDF4ED12392F1211393912113939003F3FED3FED - 12392F10ED111217392EFD335D7131304379404D282E111F0008022602262D2506251A26032C002B002D2E032C002B00052A082B011D131F2B001B15182B011918012D042B0009270B2B01250B012E042B000729042B001E121C2B0119161C2B01002B2B2B2B10102B2B01103C2B2B2B2B103C2B2B2B2B2B2B818181005D0116171633323736353427262F012627263534243332041523262726232206151417161F01161716151404232027263701011E073463FA705CB24B4CA2C7C3518C0112FBE70143BB0F315BDAB09A5A3BD0CE95518CFE9DEBFEEE9B9B03024D01DA7D4E92203EA0783332252D2C355CB7C6FEDFF5763F7394626C3220302F223B67C4F4D28C8BEE040B0000010080FE6D020005C700070035401C031C0010041C07120917171A0501580304200700190809F0216C33182B2B4EF43C4DFD3CF43C4E456544E6003F4DFD3FFD31301321152311331521800180D6D6FE8005C793F9CC930001002FFE6D01AF05C70007003E402000070102031C050410001C07120917171A06200201580003190809F0213C7C182B2B4EF43C4DF43CFD4E456544E6003F4DFD3F3CFD3C01113939313013331123352111212FD5D50180FE80FF00063493F8A6000000030052FFDC04470449000F003B003C00DD40382A30010A100B1B0C1C2733481069096A10073908120C09031B320724091D100C1D3B2B022E293BB73B023B322A2512100705081C2722171CB8018A4023171D1F07271D2E0B021D350B3C073C3C1C1407292AA8241A3E1B291C4A0F2738193D3EBC0197002100B9019600182B2B4EF44DEDF4ED4E10F64DE4FDC412392F003F3FED3FED3FEDED1239111217395D1112392EED2EED01111239111739313043794028363715220001192501360F2100181E1B21001620142101212200370221001A1D1721011521172101002B2B2B01103C2B2B2B2B818181005D015D2416333237363D010E010F0106070615013637363534262322070607233E01333217161511141633323637150E0123222726270E012322263534363713010E724E5F59962168326D62315301B43E150C837A8D3B210AA805F7A3BD767517250C1E112A2C265D2A160937CE7C95BDBA978ACF5A2C49A691151C060E0D1C2F67016C082C182D5C534C2A53C69B484898FD971C220303850C06422340486AB58895A41301E40000030048FFDA041A0449001C00240025010C40799708991AA71F03050E020F0514150E120F1514400C401408291A014B0BB603C701C603C71BD808D909D61FD823E817E8230BC711C712025C080521240F9A161D243906070716211D1C070A1D160B2507971CA71CB71CD71C0425160F251C05190A0C07110E270F1D27051A27242E072719192627D421A65D182B2B4EF44DFDE44E10F64DEDD4FD391239391112393912392F5D00 - 3F3FED3FED12392F3CFD3C10ED1112393130437940460023040503050205010504061F26111012101310141004060C25221B24260020001D26011E1D09170726000B150E26010D0E231A2126011E0521260108180A26000D100A2600002B2B2B2B01103C2B2B103C2B2B2B2A2B2A8101715D00715D5D00161716171615211E013332373637330E01070607062322001110003301262726232206070102B4D638361210FCEF0590978D543014B1074F3152794152C8FEEA0118E2011F0B284AAD7CA805012304476B55516C4AA2A3C55D36473B912E501C100123010601020142FE26754682B38A01DC000000000200840000013B05BD000300070036401C07E50400010006030A0917171A06010229070300190809AA216242182B2B4EF43C4DC4FD3CC44E456544E6003F3F3C3F4DED3130133311231133152384B7B7B7B7042AFBD605BDCC000000010080000003F805BD000B00A740645902013A08011902010706170657056705790678078705B903C903DA030A05050608080709030284029402A4020302391209090405060504066D12070708080705040305060908050204030A00000403060A07060A061A0D09020A29000B190C0DB22162B9011600182B2B4EF43C4DFD3C3C194E10E618003F3C3C3F3C3F1112173901121739874D2E2B087D10C104872E182B5D057D10C010083C083C3130015D00715D7213331101330901230107112380AD01CEE6FE6601B1E6FEB297AD05BDFCAB01C7FE6FFD62021C8AFE6E0000000100890000013D05BD0003002940150000030A0517171A0102290003190405AA216242182B2B4EF43C4DFD3C4E456544E6003F3F31301333112389B4B405BDFA4300000001008400000625044700260085403B0708070E060F1708170E170F2708270E270F4819560B670B0C23250A1A1D23190A02041725211D171D0D060700061B1C2503130A2817171A112914B80101B21A291DB80101400A00012E25292600192728B8010DB3216242182B2B4EF43C4DFDE410F4EDF4FD4E456544E6003F173C3F3F3C4DEDED111217390111123912393130005D13331536373633321716173E01333217161511231134262322061511231134272623220615112384B240345971804E2C243CA265D84E2ABB6B4D6A99B71A297066A7B4042F984F243D3F244656539C548EFD3702E86B508EA6FD9102BB6D324B9ECFFDC80000020084000003ED04490019001A005E4031B706C706020406140627147606740705140C021418101D05070006180B0A1A071A1A000C29091A1C012E18291900191B1CB80106B3216242182B2B4EF43C4DFDE44E10F64DED12392F003F3F3C3F3FED1139390112393130005D015D1333153E01333217161511231134272623220706070E011511230184AB4CAA68E4502CB71D307E40294A382D1BB4 - 01A7042F985E529F57A2FD5102A3623C640D1642357169FDCF0449000003003BFFD90421044E000C0018001900904033980896109916A504A808A610A916B808C808D704E50EE9140C3A08061D18070C1D120B190719191502270F1A1B092715191A1BB80109B321725D182B2B4EF44DED4E10F64DED12392F003F3FED3FED31304379402C001704260B1309260000110226010717092600050D0226010A140C260001100C26000816062601030E0626012B2B2B2B012B2B2B2B2B81005D241235342726232206151416331200111002212200351000330702E085304CBAA59696A3D6011EFCFEF7DDFEFC0112E70674010FA6965E94FCB2ABE403DAFEECFEF4FEFDFEAE012BFC010E014005000100890000029204470011004F40262703260D37034704040E0810020E0911090C270805070006110A081A13012E10291100191213B80145B321627E182B2B4EF43C4DFDE44E10E6003F3F4D3FC4FDC411123939011112393130005D1333153E0133321617152E0123220615112389AB15A46B05181D101B108892B4042FB9369B0203BE0302AF72FD980000020042FFD703B6044B002E002F012E408F38099805961299149815982A062824252736214621472447275624572766246726790C790D790E7623742474257426A61EA82C1303000B15052D042E13001A151B171C18152D142E280F0B6908262536250225220D0A042B1318C61C1D1307041D2E9A2B0B2F07090E100207002F212F1A1F18161827173E28260727281A310E1F27103E00272E193031B221A65D182B2B4EF44DEDF4FD394E10F64DFD3910F4FD3911123939392F111239113939003F3FEDED3FEDED111217397131304379404C012D022615251A26210E1F21000926072101032C002100052A0721011D121F21001B14182101200F22210021220E0D08270A21012625090A012D04210006290421001E111C210119161C2101002B2B2B2B103C103C2B103C103C2B012B2B2B2B2B2B2B2B2B81005D5D015D13161716333236353427262F01262726353436333217160723262726232206151417161F011617161514062322262701EF082544A864983D27738F894174DBB9F26B4302AA05263E99666945284E77C24269D9DEEFC70701B701505A3057575B4524161D24222A498198BC8E5A683D32474E40462A19131D2F2C45948FD0D9A002F900010017FFEF0209055A00180052B50D2E0AC00E01B8013F40250416391703060E0A111A17171A0301062900150E150F031F030203FC1619191AFC21677D182B2B4EF44DFD5D39C42F3CFD3C104E456544E6002F3F3F3C4DFD3CED10FDE431301333113315231114171633323637150E012322263511233533A8B6ABAB2615310D1E141F43277E5A9191055AFED593FD4538130B01028E0908816702C593000000020080FFE303DE - 044900170018005E403AB814C81402091308141913191428067703D707070800050E0A00060D0A051D120B180718180B160D2E0A290C0B1A1A01291619191AD2216242182B2B4EF44DED4E10F63C4DFDE41112392F003F3FED3F3F3C391112393130005D015D0111141716333237363511331123370607062322272635112501381A3083BC4425B4AA0223346793E5532D01AF042FFD39523460A85A9D020EFBD19E3D2A5499528902D81A0000020015FE4903E804490018001900CA406E8A158818A71803070617063812481258126707770377078C1498009705981597169717A800A8161048004B154717C915044405C605028705A600A601A705A8170524280518151716010006150C0B0F1D080E19071919161B17171A050001AF171518AF0C8F16191A1BD421677E182B2B194EF44DE418FD3939FD3939194E456544E61812392F003F3F4DFD3932192F183F3C3C3C123939014B5279401215150016166D121717180501016D12000018872E2B107DC418872E2B10087DC418015D71313071015D005D013306030207020623222627351E01333236373E0137013301030321C7268362429C809C26291E2F2A10322F10053E0EFE74CC011F01042F67FE91FEECAEFE66B40608A40D062118089424044EFC980382000001000000000000032DBC1E5F0F3CF500110800000000005F4D8F0000000000C321F6B6F865FC270B9108F9000000090001000000000000000100000629FE2900000C01F865FCED0B9100010000000000000000000000000000001C051200420239000004730040047300C404730040047300310473004D05C7005A05C700A5055600AF06AA0097055600AF05560060023900800239002F047300520473004801C700840400008001C7008906AA0084047300840473003B02AA00890400004202390017047300800400001500000034003400A200C80154020002960326038803CC045804AE057A05A605D806A2076C079A080A082C08A80904097C09C20AA20AF20B4C0BE2000000010000001C006F0009006B0007000200100010005D000007E80A0C00040001B800532BB800492BB8003F2BB800352BB8002B2B4118008001A6009001A600A001A600030069018B0079018B0089018B0099018B00040089018B0099018B00A9018B00B9018BB2040840BA0179001A014A400B041F5414191F180A0B1FD2B80106B49E1FD918E3BB0119000D00E10119B20D0009410A01A0019F0064001F01A50025017A00480028019AB3296C1F60410A01A9007001A9008001A90003008001A9000101A9B21E321FBE012C00250401001F0126001E0401B61FE7312D1FE531B80201B21FC227B80401B21FC11EB80201400F1FC01D9E1FBF1D671FBE1D671FAB27B80401B21FAA29B80401B61FA91D6C1F931EB8019AB21F921DB80101B21F - 911DB80101B21F751DB80201B61F6D29961F6431B8019AB21F4C96B802ABB21F391DB80156400B1F3638211F351DE41F2F27B80801400B1F2D1D4C1F2A31CD1F241DB802ABB21F201EB8012540111F1C1D931F3A1D4C1F1E1D45273A1D4527BB01AA019B002A019BB2254A1FBA019B0025017AB349293896B8017BB348283125B8017A403648289629482725294C1F252946272729482756C80784075B07410732072B072807260721071B071408120810080E080C080A08080807B801ACB23F1F06BB01AB003F001F01ABB308060805B801AEB23F1F04BB01AD003F001F01ADB70804080208000814B8FFE0B40000010014B801ABB41000000100B801ABB606100000010006B801ADB300000100B801AD401F04000001000410000001001002000001000200000001000002010802004A00B0018DB806008516763F183F123E113946443E113946443E113946443E113946443E113946443E11394660443E11394660443E11394660442B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B18011DB0964B5358B0AA1D59B0324B5358B0FF1D592B2B2B2B2B2B2B2B182B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B74752B2B2B65422B2B4B5279B376706A66456523456023456560234560B08B766818B080622020B16A704565234520B003266062636820B003266165B070236544B06A234420B176664565234520B003266062636820B003266165B066236544B0762344B10066455458B166406544B27640764523614459B36242725D456523456023456560234560B089766818B080622020B172424565234520B003266062636820B003266165B042236544B072234420B1625D4565234520B003266062636820B003266165B05D236544B0622344B1005D455458B15D406544B262406245236144592B2B2B2B456953427374B8019A2045694B20B02853B049515A58B020615944B801A6204569447500 - 00>] def - /CharStrings 28 dict dup begin - /.notdef 0 def -/space 1 def -/zero 2 def -/one 3 def -/two 4 def -/three 5 def -/six 6 def -/C 7 def -/D 8 def -/E 9 def -/M 10 def -/P 11 def -/S 12 def -/bracketleft 13 def -/bracketright 14 def -/a 15 def -/e 16 def -/i 17 def -/k 18 def -/l 19 def -/m 20 def -/n 21 def -/o 22 def -/r 23 def -/s 24 def -/t 25 def -/u 26 def -/y 27 def - end readonly def - currentdict dup/FontName get exch definefont pop end - %APLsfntEnd - 42/FontType resourcestatus{pop pop true}{false}ifelse - {currentfile 0(%APLT1End\n)/SubFileDecode filter flushfile}if - /FontType 1 def - /FontMatrix [ 0.00048828125 0 0 0.00048828125 0 0 ] def - /FontBBox{-1947 -985 2961 2297}def - /UniqueID 4375838 def - currentdict currentfile eexec - 54544758EC884CF30C3CD503CEDBFF3839C47C3C3333173232E3FDBFF439491DB843E1924E63AA7726BBB0485AB56D93D8C0906F647A47162891E73FFC2A9873C4B1EAC5EEBDFFC4D06084FBD84139DF4583C6E259D10699944D1068C9C45667DCCCFB9B7EA01B606435EDCBD273ABAC093D14085CCBAC149BD7382E842CFE0D7FE4FD2EF589A2471F6074A80A8B675C2F7A50D63AC1EF90D787BADD11633CB01CF6EE3B37AAF9078A69AC4740E9B6525D78BBD839551A1CB80DB8682FA5E87591BBD6EE8B946063A2A58D9CA3685AB305495DC5FB5747EB8A9A059C4976C0FE4EEAB1D56FF47F1E9664ED9F4A7DAB763AF92B2F6CF2FA7DEC24710E0B9096E30F772BA7FEA9BDBE496C42ED2CEB58F54E80BDF57CE7B4DB6CCFE7182F43BF93CCA0767AF95D62C5D2C3DC6AE1E6D139F51A2C63432117F1714C5566572EE9967A715420ABDCD1D7BD74F8450B89965FCC81C6ACA565C5F3CCF91D430D1F953E4F1A645300A98DD8C47CD64555F08F422340A85404EAE0D3229C4F9336B9470CACBD6BBF3395104750A915CC6EAAC197668267B8C62D2764C8CD69FD937CA3C924D997A0EDE7964BEB9EA2F92EF70C5E5DA0AA5567765E71F2B911B3C5586B741EEB93F3C73016EC16BFF283758900903D203992EFC8BAFAF13579C602F38C926AE0E2EF20EAFDBFC5FC1E83BEF5713FF899F5A98AA12E11EE8D73521432C8A46F877635B50ED1C7F49172BDD9714B9454371EE6FAFFBE4A3B64D061EB65AB3AD5CC8E1E81515877FC9F1502C0AF91B87B486E912A3DA01F6A7AE358A6E3BE6D55A0068A62E2D129C521FA50404B049969B997340513F57312C893F35A3A79CC5F1661B9A59DD726664768C8CF923A3F499C2352FE3166C9BF8400CD78948EACBBF5BC6291183BCE907675CEEF42D1BF240249840E7D9513C9507BC451E0937161B0B06D784D31946AFE0AE3150DBD10DD946234B72A822C4C4173E1A38A1AB34656287F74491FA8482FB46559EF2CB4D9ED80EE937B60F38DBE4AEA3D04D2ADCF79BF8E223E5D99D4D962E79176418DB85BA083F5842DDD7B9AFE472FE92B1EEB50BCD1BA2C4675252D05D80E11FF6D63BA651CA2ECB525465689A2062A9DAC67A7CD714809318500B95F8BF11E76ECD6E6B560A25773FC4F35EB5736F6165BDB0249E1271560631A2EDBA14AF34C8597B2E1CA2F4539566304B3AAFDEABEFF8B0DD032C4E9011C27DF2A8336549C6F5CD558735F73422075CF42E209A7665D638B1FD916B62266555AC4692403890A502CA0321AEAAD01624A80E8900938BC132352ABA8DC9BED1A79E9730B029BEE411DCE65B9A34F1A75013E8C774F2B1FD969470F10E786FF71CC5D0758F4D1F45DE928BB21FF422300AEB2C90C6C5B5473DF59DDD7CC3077885E14FF5F295BDD63B3404C5320F2E77E73A6A64DCE03E4 - 2AF1FE38487F04A3633F01795CC6F809B21555BFC2DB4273ACE9BB2EE6992D2E98465C9C7512C6E37C3F37FC4E93FC96A28102EE8AD1A23B5078FFE021B3433FBFA8933DB00D5B97923B52A87A7CFB490A60552D36AD49CD5F636401DF63181916B44DB9A85849CD6609A1634187E765E23FFF4E02381D3530836B11274D9B74B8497F8B072A927597700C80310CB3A0742B82E5F59026798469119B44FFC1B74E5B7771CD00108F2086A87E4A204531FA352BC7E63D17A830894783F4E305FBD8BBB4006930C274247F3109AFEAEBC76100265FD64136C2B0E20D5AC2363C137AA708FC29DCAF331D41767C9D9109F95AEE62341486F836CB28B8FF1C201670643A28794E7428EF19538DEE0E2B4E59870910D52C8ADD9AA61538AE47D37BAA665E5CB9EBF960D2078F2A7496C9DF045BCF1DB72FCA4F43C78B236D52843277193FEEDDC83578C1E42BB02B699243B457245FF676CB21839FF9106BBC97EA0F803F922FCCA201AA88F6858A984B66B6A4E93CC9FD514B0505ABB9D0CE4D2A44E58F83EDF55BF9E4A83ED57A340CD55250D5FE3227777B01F1E4DEED83C1BFE74DBA35FB745009B63BC07609524D61BD6EBE1498C2024F8CE882F488616F4A470E0C299A831663490DE67888A737BD5896866BDFB6A8AB6D92B4907D0275450B3F065605A82FC9AFED8ECEE689E3E7074B0953A36A8D64E14DFCBEC0354B77ED28266115E84507DCC5225B16D04139D5C446189D5EB7F72CC812B44A1B780DBBD774A8DFE495038652E95016BF22A23BAA6873FE2E4E7EB12BAC67D98C1A3A04F0668B46A33EE1DE4D2C140AC040FD229AA25301BA9442CF945B7DC49B093660E34B5717CA88E44799E380D6B06E376F54C285999BCD4CD8F55CA34474942306BE3EBD113BDF0541F1D9B426747680796EE203CC539DA5FAF08A6FCDC5D1B45332CB6DAD4AAC8CA4890B4DB71E861E2DEA4977C56774FCBB01B6DBA4C75A34488811F526B801D2BD1141005A8289F9D092BCB5F7DC972B2E66E311CF0C0F2206DABF970AB252105479C3E2328D1DABEA7923126B6009CDF10BDD81E4A2180C5411E4E4CC2895DDD0A6B38E0077F009F144DA13A5DB15BF7D2BAE58BAA8F07DEF143AA3339AF7A27E5444A80A71A634D7B175B31EE3D3CDBF81BDBCB2E8784FE930594E89EF1F819E4302C9693EA6BBC9183AF0E34A77AA850C5B72F902481942972A6F5D6166537D522D23E730D450F1D268A8803FA92F0CE1E07669C0A1BBEE8C2EF7415F7477BBCCEA8456E2194D2AE989B0DDB69F5D33653603558127C447941A5A748099F4E644738DC08DF1EDA60D5DFCBCD04A8B8420C7C22CF7B9756DDB1E373F72DBF4856F75CAB8E255A15F5E8050EE5A67372EB70806B6ECEBB3DC722F01DE1244C1E108B4D7D87E03DE3159F7B2DBD082DDDD50DD31DD7904B914 - 3483344E232E4C94B7A9EE2925F6B8162513C4BC4087FC7A9B2DD33CC61E057A511AA1CE3EFEDC20ADD1C287D9ABEDCEE6AE4832806636E46CA638011DF2381187D24F2DE73C94666578A10C50C145B70BE478128940E9EC77099B8AA5BFA06F981769C03C33E77012D25A9CFC5267CF90D16FE9395B7154A43D306B7E45567BA63CBC972B389A0C11DD63A1312A3BCA341BBF3D86AD21CCFD988F18195AA04179B4EE345EB8B27DB20F06E9D8A9F07C4972F614DA36EAE28298C0E0E741A427A5EE0B4C5E7379155C7323206E3382BBA344EC3AAD62D7016FC5C1B189A64ACB486A60D64CEF073EB83DCE85669E64A3051AB4FBD883B85888B820FB4BA0590AF9B1221B1F318460B267967E00F2F4E2BCEB7FA7BBA8651024C9A94E2DEF17D85709628F7CB5ABE013B81EF17A98BB2B0A82037489928501147874BFDC21A42C609B8AB094A7559AB1B715B8AD8CB646DAABC84630DB483B73B4E519B5E5E796316D75EE0952C291BB0760299E7DBB9367AC41104ADFC8F6D068FF01BBAAE3CBD4AF98705887B32E2A2A78716F63E40B76FB3D136B2B8D157D1B92CE568B0A59B318AED0440976DD872A53ED21A0AB02F237950C0172534A5AAC42D87E3FD9217B77B1610065C80597EA9B036248B3D301528F3CCDF93F289D9379B417F2E01A443975C87640B13B6430B1F6BF26511A8896E9138EDE07CBC85223C6265C41F1B60A0A0987459CEA76655C0F7BDBAD13DB4132C0C47EC5DB8B8E54B2FAF7FB2332EAA1C6DD5B9AB6D9C96ACB57CF4705EB45FDC30A4888B6587544900B6DA05F7C455925F4AF6DD696DFCD6A53552DC1568D2DC3FBF6D3B48F614E42C1395C526246DB34268F498D1E98BEFBA9F2C507F9C889696B7A669FF6EB4908E0C0BC29665598E5D14C3F54686FEDC3A47178760DA29CF535DA8B0DB35B9332F813FC13496D4CCA94705B7DC0AEE7405E890CF68BFD965A96862F250B739F00189F6D742882D21D3989D2B38EEA0B1AA867BF5EC851EEE30DB2564A260358C5FC29D4582B83DA9BB272B56C67E70CB6510C9D0251AD71A1E6F4688BC22D9AA83AB1803EAF74FA7EA1098DE6A2E01E163F9C338290559D6A66EAF6BA45A5128717F196D0A5A3EFDC35A6BB9618C042C9C5402ECC8C8417635D904454B0FF846626F873FFC69DF5C4FDEDECF876407F8470DDAB5EDA82FC8B4CC889F66F5A8F6A6FC9C3C2E99EE868698FB1D0410008B82011BAFF1EE5D7305187F5E252E0308443C0B2454DCF7A3BFDAEC8ABE80662BAA206CCD9B8D0AB589E51848D03336BE8B8BB8BF20E607A6EEE2BBB81C0D20952BAE7A4AD8C2D3C06D4614B4CB6998177963082D4876A83182781DB82AAED35B3A318171DBDD9484177D23143034342C8B652A28016EA2D233E96CF6F85E95A9665DC6EB4E3084B6F5971357ECC266DE03ED66270 - AE188496B57CC654FC665E96BDF297AE162D82FF4EA6A3EC62F9C5AEBD1414926D80BF7CD47A3D8F6B5D033D20B882EDB0742C70A2EE45A1D4C5B70C1FB4868016FF6084F01B33DB1F2B060595B5D5DCE0E2DDB074DE59CD27798D79CD057A4F424403DC1717AED6DDE7C7733CCF36A9568A649C3D318F53004DAA970AA2ABF9E3E6A1C9E510E424601F8A9B985C59D984E5140C42621E854ED22DCC5930DED30C9259A7B1D232C08A2569A6F5DA9E6D3F8C2BE53F6D8684714C6CB5C34282FFC2D32656A7142CADEA143D97EC5F4066BF03C95971407FD790EB038B08D676A9E85BBBC266FD9D7BBE89753F257FABBA6DCE277C98125CBE6A7313FC0FEA43D07BF0B0746B47D917342B8E8F5226125D976C33CC51ED0EEDD4882655C2D0DB0D1A16CC34EE1C3780177AF3388A77AAF89B195E92CE6127732C49B0AF82102C85162885BC12CB4223D3F931EA6F107EF73F6F2C5C0F71A800D8718326D4EF2ACA55B5FC98AE5A7FEC4008550A8C693F178EA9627699C4840725FD2E9191CE1CD303B4F47CEE536FBEC9D4FAC7F0879E07D72071740E465C7E6F0D3D63C2C4CC9423D1F204B363247ACD665A8A74DF9157770F5591C70BE24807CACF4D7838213247BCA3D550C80A24EBB1C1B68494C7746D88E33A559F6CF76F9DD1F80BB090B8F368875952CE4E6F3B47700A8DA57F679EC4E0BB6F5274B86CC9413280F652873DF3DE755D7EC6907D8699CA38A0A4E424ACC12EAF77351B982BBE50B5E63E49150B7004E5DC2A19B7321976992ED5EF246AE23CA69F2B80CA5C31AD3CA524DA46E1046E725287A433D29C117FD65FB9E2B8B1687EB0A8E4FF4D1785EAA0FFEA11E54C3A7026F64861F9FD044151F7DFE124C0005B08216F26C711A113154DE4C7AC17F8034C97147911FA6D5465C8FDE904814806E66B19010C7089E5CCBCC1E0DC882B716094084173B4C5F1071BF8D23D99A8038CD3240AFE778698122DE140D4A0FBDE22A05060CFCF69DF39677A1A69BC34BA945E1A4AC57D387A302474C29FF932103BDB8A99871625A6C4416F5EBD3D94BBB5B78C436F31ECBF86E6A55046C7CA42446907C2AAD8F0ED58D06736FFB2D68F06DE6D369D2C625240084251D498AE89286E71C39CE5ACE99267E124C94D3E00B01995127A235EA0A0E4F5242AE51FFE327C9D5278CA685F7B8EB22FC09826E93DF00FA67824821FA0971B7B368392BC78249EC69F17F89509AC943550EDFD6C822C66A3CB5332E12888609782DA57AEBA6A53C45717AD0C4CB3C462B1D31468E83987F176F3C968BC3B402F2716DAE1176465D066DD9C9D9013E28BA386DD809EF3A80A894B752A87790AFDC39FC326580DD101C6DD1E02D500516C4419D4D8777B5300152458C23FC2C6E2AE79B03CD2DA8DF9BAF3BA619AF9ACC22E374F7FBC0D1E5311D5953199E1CA - A62355647E3D38DB4FB107F53C1645DD0822303AD03576FDB3DB613AB8B8B2B1BEAF687A1B15BCB17630A9254D909D9FF72B8806239A223AC41A9D6F5F94EE7A3F379078990560BF999C36AFCBE85528ABE39B47E871B0361ACEB0106093A0BEC6F069B179952F891CC6DCD127B17A545177E5E657CE2E9E5427193D55E039112CD56154268DC8FA190EE55C2E626968C444F868F084ED37903AB96B1BD338C9067BC2ECF09D2734D7358F940C127E34719B1CFD83C787A774FAB359400AAF1DF91A3370F55FC7BED33000D79464A559AA42338E4557D09248B1A32A234F3A1CCF738B04E27CDB49EE884BDA25215E8BB41EA91A7CB1BADEEC5D914513E00604E5693466DEB734B301059B445CCDD0EC685363E9DCF9D4D5C27F2FDB4D2E48413D208AC0B6A8437BF9094891A7C2E337CCAC387D839B8710F825D9A67B80 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - cleartomark end - %APLT1End - -%RBIEndFontSubset -/Helvetica cguidfix -/F1.1/Helvetica renmfont -[ /CIEBasedABC 4 dict dup begin -/WhitePoint [ 0.9505 1.0000 1.0891 ] def -/DecodeABC [ { 1.8008 exp } bind { 1.8008 exp } bind { 1.8008 exp } bind ] def -/MatrixABC [ 0.4294 0.2332 0.0202 0.3278 0.6737 0.1105 0.1933 0.0938 0.9580 ] def -/RangeLMN [ 0.0 0.9505 0.0 1.0000 0.0 1.0891 ] def -end ] /Cs1 exch/ColorSpace dr pop -[ /CIEBasedA 5 dict dup begin /WhitePoint [ 0.9505 1.0000 1.0891 ] def -/DecodeA { { 1.8008 exp } bind exec} bind -def -/MatrixA [ 0.9642 1.0000 0.8249 ] def -/RangeLMN [ 0.0 2.0000 0.0 2.0000 0.0 2.0000 ] def -/DecodeLMN [ { 0.9857 mul} bind { 1.0000 mul} bind { 1.3202 mul} bind ] def -end ] /Cs2 exch/ColorSpace dr pop -%%EndPageSetup -0.60000002 i -/Cs1 SC -1 1 1 sc -q -0 0 504 295 rc --52.5 303.5 m -523.5 303.5 l -523.5 -429.5 l --52.5 -429.5 l -h --52.5 303.5 m -f -47 164 m -320 164 l -320 125 l -47 125 l -h -47 164 m -f -1 J -1 j -0 0 0 sc -1 0 0 -1 -52 303 cm -99 139 m -372 139 l -372 178 l -99 178 l -h -99 139 m -S -1 0.40000001 0.40000001 sc -CM -86 164 m -125 164 l -125 125 l -86 125 l -h -86 164 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -138 139 m -177 139 l -177 178 l -138 178 l -h -138 139 m -S -1 1 0 sc -CM -203 164 m -242 164 l -242 125 l -203 125 l -h -203 164 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -255 139 m -294 139 l -294 178 l -255 178 l -h -255 139 m -S -0.40000001 1 1 sc -CM -242 164 m -281 164 l -281 125 l -242 125 l -h -242 164 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -294 139 m -333 139 l -333 178 l -294 178 l -h -294 139 m -S -1 0.40000001 0.40000001 sc -CM -47 164 m -86 164 l -86 125 l -47 125 l -h -47 164 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -99 139 m -138 139 l -138 178 l -99 178 l -h -99 139 m -S -1 1 0 sc -CM -164 164 m -203 164 l -203 125 l -164 125 l -h -164 164 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -216 139 m -255 139 l -255 178 l -216 178 l -h -216 139 m -S -1 1 0 sc -CM -125 164 m -164 164 l -164 125 l -125 125 l -h -125 164 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -177 139 m -216 139 l -216 178 l -177 178 l -h -177 139 m -S -0.40000001 1 1 sc -CM -281 164 m -320 164 l -320 125 l -281 125 l -h -281 164 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -333 139 m -372 139 l -372 178 l -333 178 l -h -333 139 m -S -0.40000001 1 1 sc -CM -320 164 m -359 164 l -359 125 l -320 125 l -h -320 164 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -372 139 m -411 139 l -411 178 l -372 178 l -h -372 139 m -S -1 1 0 sc -CM -2 269 m -41 269 l -41 230 l -2 230 l -h -2 269 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -54 34 m -93 34 l -93 73 l -54 73 l -h -54 34 m -S -1 1 0 sc -CM -41 269 m -80 269 l -80 230 l -41 230 l -h -41 269 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -93 34 m -132 34 l -132 73 l -93 73 l -h -93 34 m -S -1 1 0 sc -CM -80 269 m -119 269 l -119 230 l -80 230 l -h -80 269 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -132 34 m -171 34 l -171 73 l -132 73 l -h -132 34 m -S -1 0.40000001 0.40000001 sc -CM -156 269 m -195 269 l -195 230 l -156 230 l -h -156 269 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -208 34 m -247 34 l -247 73 l -208 73 l -h -208 34 m -S -1 0.40000001 0.40000001 sc -CM -195 269 m -234 269 l -234 230 l -195 230 l -h -195 269 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -247 34 m -286 34 l -286 73 l -247 73 l -h -247 34 m -S -0.40000001 1 1 sc -CM -273 269 m -312 269 l -312 230 l -273 230 l -h -273 269 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -325 34 m -364 34 l -364 73 l -325 73 l -h -325 34 m -S -0.40000001 1 1 sc -CM -312 269 m -351 269 l -351 230 l -312 230 l -h -312 269 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -364 34 m -403 34 l -403 73 l -364 73 l -h -364 34 m -S -0.40000001 1 1 sc -CM -351 269 m -390 269 l -390 230 l -351 230 l -h -351 269 m -f -0 0 0 sc -1 0 0 -1 -52 303 cm -403 34 m -442 34 l -442 73 l -403 73 l -h -403 34 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 63.5 286 cm -/F1.1[ 15 0 0 -15 0 0]sf --16.259766 6 m -(!"#$)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 200.5 286 cm --16.259766 6 m -(!"#%)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 335 286 cm --16.259766 6 m -(!"#&)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 454.5 247.5 cm --49.182129 6 m -('\(\)*+,#-.,*/0)[ 12.495117 8.342285 12.495117 8.342285 4.995121 7.500000 4.167480 3.332516 8.342285 7.500000 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 442 148 cm --36.679688 6 m -(1234#-.,*/0)[ 10.832520 3.332520 7.500002 7.500000 4.167479 3.332520 8.342285 7.500000 8.342285 8.342285 0.000000 ] xS -2 w -0.60000002 i -/Cs1 SC -0 0 0 sc -1 0 0 -1 -52 303 cm -117 79 m -223.19537 125.79797 l -S -CM -179.98021 173.33075 m -172.64711 180.49634 l -169.74365 173.90771 l -h -179.98021 173.33075 m -f -0 J -0 j -1 0 0 -1 -52 303 cm -231.98021 129.66925 m -224.64711 122.50366 l -221.74365 129.09229 l -h -231.98021 129.66925 m -S -3 w -1 J -1 j -244.802 81.5 m -159.87134 126.54945 l -S -CM -97.977051 171.20236 m -109.83939 172.74019 l -105.90326 180.16089 l -h -97.977051 171.20236 m -f -0 J -0 j -1 0 0 -1 -52 303 cm -149.97705 131.79764 m -161.83939 130.25981 l -157.90326 122.83911 l -h -149.97705 131.79764 m -S -1 J -1 j -383 79 m -363.99207 117.71989 l -S -CM -307.06683 175.22116 m -315.76416 183.43314 l -308.21997 187.12704 l -h -307.06683 175.22116 m -f -0 J -0 j -1 0 0 -1 -52 303 cm -359.06683 127.77884 m -367.76416 119.56686 l -360.21997 115.87296 l -h -359.06683 127.77884 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 441 60 cm --15.838623 6 m -(50.+0)[ 10.004884 4.167480 8.342284 4.995119 0.000000 ] xS -1 0 0 -1 63.41275 59 cm -/F1.1[ 17 0 0 -17 0 0]sf --9.4504395 6 m -(678)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 196.80214 59 cm --9.4504395 6 m -(6%8)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 321.65463 59 cm --9.4504395 6 m -(698)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 56 87 cm -/F1.1[ 15 0 0 -15 0 0]sf --16.259766 6 m -(!"#$)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 193 87 cm --16.259766 6 m -(!"#%)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 327.5 87 cm --16.259766 6 m -(!"#&)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 441 21 cm --20.013428 6 m -(:*/;0)[ 10.832520 8.342284 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 63.41275 20 cm -/F1.1[ 17 0 0 -17 0 0]sf --9.4504395 6 m -(678)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 196.80214 20 cm --9.4504395 6 m -(6&8)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 321.65463 20 cm --9.4504395 6 m -(678)[ 4.723145 9.454590 0.000000 ] xS -ep -end -%%Trailer -%%EOF diff --git a/src/externals/pio1/doc/images/block-cyclic.graffle b/src/externals/pio1/doc/images/block-cyclic.graffle deleted file mode 100644 index 72677bac115..00000000000 --- a/src/externals/pio1/doc/images/block-cyclic.graffle +++ /dev/null @@ -1,2054 +0,0 @@ - - - - - ActiveLayerIndex - 0 - ApplicationVersion - - com.omnigroup.OmniGrafflePro - 137.11.0.108132 - - AutoAdjust - - BackgroundGraphic - - Bounds - {{0, 0}, {576, 733}} - Class - SolidGraphic - ID - 2 - Style - - fill - - GradientColor - - w - 0.666667 - - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - CanvasOrigin - {0, 0} - CanvasSize - {576, 733} - ColumnAlign - 1 - ColumnSpacing - 36 - CreationDate - 2009-12-16 08:35:43 -0700 - Creator - John Dennis - DisplayScale - 1 0/72 in = 1.0000 in - FileType - flat - GraphDocumentVersion - 6 - GraphicsList - - - Bounds - {{344.309, 263}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 92 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [3]} - VerticalPad - 0 - - - - Bounds - {{228.527, 273}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 91 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [2]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{85, 273}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 90 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [3]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{472.5, 273}, {41, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 89 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Count} - VerticalPad - 0 - - Wrap - NO - - - Class - Group - Graphics - - - Bounds - {{363, 207}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 86 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 2} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{228.5, 207}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 87 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 1} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{91.5, 207}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 88 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 0} - VerticalPad - 0 - - Wrap - NO - - - ID - 85 - - - Bounds - {{344.309, 224}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 80 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [6]} - VerticalPad - 0 - - - - Bounds - {{228.527, 234}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 79 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [1]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{85, 234}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 77 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [3]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{477, 234}, {32, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 75 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Start} - VerticalPad - 0 - - Wrap - NO - - - Class - LineGraphic - ID - 74 - Points - - {383, 79} - {356, 134} - {357, 132} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 3 - - - - - Class - LineGraphic - ID - 73 - Points - - {244.802, 81.5} - {145.825, 134} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 3 - - - - - Class - LineGraphic - ID - 70 - Points - - {117, 79} - {235, 131} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 2 - - - - - Bounds - {{457, 146}, {74, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 68 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Disk layout} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{457, 46.5}, {99, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 57 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Memory layout} - VerticalPad - 0 - - Wrap - NO - - - Class - Group - Graphics - - - Bounds - {{370.5, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 82 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 2} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{236, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 83 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 1} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{99, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 84 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 0} - VerticalPad - 0 - - Wrap - NO - - - ID - 81 - - - Bounds - {{403, 34}, {39, 39}} - Class - ShapedGraphic - ID - 46 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{364, 34}, {39, 39}} - Class - ShapedGraphic - ID - 47 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{325, 34}, {39, 39}} - Class - ShapedGraphic - ID - 48 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{247, 34}, {39, 39}} - Class - ShapedGraphic - ID - 49 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{208, 34}, {39, 39}} - Class - ShapedGraphic - ID - 50 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{132, 34}, {39, 39}} - Class - ShapedGraphic - ID - 51 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{93, 34}, {39, 39}} - Class - ShapedGraphic - ID - 52 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{54, 34}, {39, 39}} - Class - ShapedGraphic - ID - 53 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{372, 139}, {39, 39}} - Class - ShapedGraphic - ID - 59 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{333, 139}, {39, 39}} - Class - ShapedGraphic - ID - 60 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{177, 139}, {39, 39}} - Class - ShapedGraphic - ID - 61 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{216, 139}, {39, 39}} - Class - ShapedGraphic - ID - 62 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{99, 139}, {39, 39}} - Class - ShapedGraphic - ID - 63 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{294, 139}, {39, 39}} - Class - ShapedGraphic - ID - 64 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{255, 139}, {39, 39}} - Class - ShapedGraphic - ID - 65 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{138, 139}, {39, 39}} - Class - ShapedGraphic - ID - 66 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{99, 139}, {273, 39}} - Class - ShapedGraphic - ID - 67 - Shape - Rectangle - Style - - shadow - - Draws - NO - - - - - GridInfo - - GuidesLocked - NO - GuidesVisible - YES - HPages - 1 - ImageCounter - 3 - KeepToScale - - Layers - - - Lock - NO - Name - Layer 1 - Print - YES - View - YES - - - LayoutInfo - - Animate - NO - AutoLayout - 2 - LineLength - 0.4643835723400116 - circoMinDist - 18 - circoSeparation - 0.0 - layoutEngine - dot - neatoSeparation - 0.0 - twopiSeparation - 0.0 - - LinksVisible - NO - MagnetsVisible - NO - MasterSheets - - ModificationDate - 2009-12-18 09:19:14 -0700 - Modifier - John Dennis - NotesVisible - NO - Orientation - 2 - OriginVisible - NO - OutlineStyle - Basic - PageBreaks - NO - PrintInfo - - NSBottomMargin - - float - 41 - - NSLeftMargin - - float - 18 - - NSPaperSize - - size - {612, 792} - - NSRightMargin - - float - 18 - - NSTopMargin - - float - 18 - - - PrintOnePage - - QuickLookPreview - - JVBERi0xLjMKJcTl8uXrp/Og0MTGCjQgMCBvYmoKPDwgL0xlbmd0aCA1IDAgUiAvRmls - dGVyIC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAGdVk1vE0sQvO+v6CMcMkx3z8fO9QU4 - ID2JyJY4RBxQRAR5MRFJ0BP/nupdrz32TuIlsZ04cld1T1dNt3/SBf0kj0fMibIq3X+l - T/SDvEt++KHv9Ob8genqgXh4PFzRmXdx++n+3ZagA8E1lUIxRpKspMU4r4H9gNfNyHa+ - GpJ6Wp0jPw//nNkfK+FqY3gGcMQb5QqF4nMXdnmnd6iHtR/SATFla5FamLEOzz2ppQWJ - oF6r+QSJhe1I7GSr7qKqy3pkZCUsIkNYTdY9e8xtU08UuG3dE4fktKguhNV1DYfknJdg - LewQC+n2wnE3NEih84JuW9iMTLPssaPfWnJb2Axr+gxyx0Ap+QO1UZndhNqHiFI8J1+Z - PEVnwLmBO0QdA1llARIlSw193hLi+yWcFjaVMxlfQl4ERdgEHexeq4mOjWpKXMKlCDvm - 0lQJ8YyYCDuGBn9Sig6zxMKOoRhpYiNtsILHjKsMxBxdpIwrAPA/a+K4tUWENZKTWODw - 1K039OY9O48OrK/p1cd35F/T+oberdGhik6ivIyOm3Ta55fUJk2y6BNqSznj9/ywoTju - hdHk48P++3Vzd/+bbr/8vvv12GQONgBz32DV5FIuqcxZ335/+G/GKfT/wTKqOrvbFsy5 - y4U2JKKOCy4tS3S59HQ7LI5pgdntHme9siu9x3306lR9HrDBpZC5S4xpVZICLMIuB4Xc - iCseut/SN5IG+hrEH/C6qaUfCjQL7hAsxaWE4WC1jvlQKwzikaTKZ3G+iNWwz9dVaJtE - itZMW7XVFgnB2SF7hrwb+Li4PkNOFqgOgbqxOwydYWnGIdmJl2CxiV2v0DB5QQdAcgt8 - Rg8wkK1BnHwZipvQXYV+uhnmhwmBSYfuFnzvqPKxerthPWrb5bMGoRg+yFejx605fr1o - NKLTXmnwhybIWIQJjnGZsSrHFiha41PCLPXRQXRBSZoQkgI8wuqCqqXXhOoEDsmOBR0z - dVrY5xuwQ4DD5dz3XZWNubgIi+yyodYI3WRs9wyLSu3424HWtQZagN9C8Y27aMzaJ5yR - jsbZ6vHL/ePrrjHPrJzA0lPoy0SZh4t1xpnOigvRB2zMI8ru1aV+bo4KCb3ZlJcTYu3Y - yL3kNqFmdSmGtISwq2f4ZWoTMtZnxOHmI3K/D9DAA66n90HA98wTZMfzFmRPbIMMt7yE - rr0PBqdEfNmfLT7xzrOGxi44v/v1o70D9k6JO/M1nXJ83NNO+VtCaQu7d8rfEtZevvgD - ZiPARAplbmRzdHJlYW0KZW5kb2JqCjUgMCBvYmoKOTQ2CmVuZG9iagoyIDAgb2JqCjw8 - IC9UeXBlIC9QYWdlIC9QYXJlbnQgMyAwIFIgL1Jlc291cmNlcyA2IDAgUiAvQ29udGVu - dHMgNCAwIFIgL01lZGlhQm94IFswIDAgNTc2IDczM10KPj4KZW5kb2JqCjYgMCBvYmoK - PDwgL1Byb2NTZXQgWyAvUERGIC9UZXh0IF0gL0NvbG9yU3BhY2UgPDwgL0NzMiA4IDAg - UiAvQ3MxIDcgMCBSID4+IC9Gb250IDw8Ci9GMS4wIDkgMCBSID4+ID4+CmVuZG9iagox - MCAwIG9iago8PCAvTGVuZ3RoIDExIDAgUiAvTiAxIC9BbHRlcm5hdGUgL0RldmljZUdy - YXkgL0ZpbHRlciAvRmxhdGVEZWNvZGUgPj4Kc3RyZWFtCngBhVJPSBRRHP7NNhKEiEGF - eIh3CgmVKaysoNp2dVmVbVuV0qIYZ9+6o7Mz05vZNcWTBF2iPHUPomN07NChm5eiwKxL - 1yCpIAg8dej7zezqKIRveTvf+/39ft97RG2dpu87KUFUc0OVK6Wnbk5Ni4MfKUUd1E5Y - phX46WJxjLHruZK/u9fWZ9LYst7HtXb79j21lWVgIeottrcQ+iGRZgAfmZ8oZYCzwB2W - r9g+ATxYDqwa8COiAw+auTDT0Zx0pbItkVPmoigqr2I7Sa77+bnGvou1iYP+XI9m1o69 - s+qq0UzUtPdEobwPrkQZz19U9mw1FKcN45xIQxop8q7V3ytMxxGRKxBKBlI1ZLmfak6d - deB1GLtdupPj+PYQpT7JYKiJtemymR2FfQB2KsvsEPAF6PGyYg/ngXth/1tRw5PAJ2E/ - ZId51q0f9heuU+B7hD014M4UrsXx2oofXi0BQ/dUI2iMc03E09c5c6SI7zHUGZj3Rjmm - CzF3lqoTN4A7YR9ZqmYKsV37ruol7nsCd9PjO9GbOQtcoBxJcrEV2RTQPAlYFH2LsEkO - PD7OHlXgd6iYwBy5idzNKPce1REbZ6NSgVZ6jVfGT+O58cX4ZWwYz4B+rHbXe3z/6eMV - dde2Pjz5jXrcOa69nRtVYVZxZQvd/8cyhI/ZJzmmwdOhWVhr2HbkD5rMTLAMKMR/BT6X - +pITVdzV7u24RRLMUD4sbCW6S1RuKdTqPYNKrBwr2AB2cJLELFocuFNrujl4d9giem35 - TVey64b++vZ6+9ryHm3KqCkoE82zRGaUsVuj5N142/1mkRGfODq+572KWsn+SUUQP4U5 - WiryFFX0VlDWxG9nDn4btn5cP6Xn9UH9PAk9rZ/Rr+ijEb4MdEnPwnNRH6NJ8LBpIeIS - oIqDM9ROVGONA+Ip8fK0W2SR/Q9AGf1mCmVuZHN0cmVhbQplbmRvYmoKMTEgMCBvYmoK - NzA0CmVuZG9iago4IDAgb2JqClsgL0lDQ0Jhc2VkIDEwIDAgUiBdCmVuZG9iagoxMiAw - IG9iago8PCAvTGVuZ3RoIDEzIDAgUiAvTiAzIC9BbHRlcm5hdGUgL0RldmljZVJHQiAv - RmlsdGVyIC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAGFlE1IFGEYx/+zjQSxBtGXCMXQ - wSRUJgtSAtP1K1O2ZdVMCWKdfXedHGenmd0tRSKE6Jh1jC5WRIeITuGhQ6c6RASZdYmg - o0UQBV4itv87k7tjVL4wM795nv/7fL3DAFWPUo5jRTRgys67yd6Ydnp0TNv8GlWoRhRc - KcNzOhKJAZ+plc/1a/UtFGlZapSx1vs2fKt2mRBQNCp3ZAM+LHk84OOSL+SdPDVnJBsT - qTTZITe4Q8lO8i3y1myIx0OcFp4BVLVTkzMcl3EiO8gtRSMrYz4g63batMnvpT3tGVPU - sN/INzkL2rjy/UDbHmDTi4ptzAMe3AN211Vs9TXAzhFg8VDF9j3pz0fZ9crLHGr2wynR - GGv6UCp9rwM23wB+Xi+VftwulX7eYQ7W8dQyCm7R17Iw5SUQ1BvsZvzkGv2Lg558VQuw - wDmObAH6rwA3PwL7HwLbHwOJamCoFZHLbDe48uIi5wJ05pxp18xO5LVmXT+idfBohdZn - G00NWsqyNN/laa7whFsU6SZMWQXO2V/beI8Ke3iQT/YXuSS87t+szKVTXZwlmtjWp7To - 6iY3kO9nzJ4+cj2v9xm3Zzhg5YCZ7xsKOHLKtuI8F6mJ1Njj8ZNkxldUJx+T85A85xUH - ZUzffi51IkGupT05meuXml3c2z4zMcQzkqxYMxOd8d/8xi0kZd591Nx1LP+bZ22RZxiF - BQETNu82NCTRixga4cBFDhl6TCpMWqVf0GrCw+RflRYS5V0WFb1Y4Z4Vf895FLhbxj+F - WBxzDeUImv5O/6Iv6wv6Xf3zfG2hvuKZc8+axqtrXxlXZpbVyLhBjTK+rCmIb7DaDnot - ZGmd4hX05JX1jeHqMvZ8bdmjyRzianw11KUIZWrEOOPJrmX3RbLFN+HnW8v2r+lR+3z2 - SU0l17K6eGYp+nw2XA1r/7OrYNKyq/DkjZAuPGuh7lUPqn1qi9oKTT2mtqttahffjqoD - 5R3DnJWJC6zbZfUp9mBjmt7KSVdmi+Dfwi+G/6VeYQvXNDT5D024uYxpCd8R3DZwh5T/ - w1+zAw3eCmVuZHN0cmVhbQplbmRvYmoKMTMgMCBvYmoKNzkyCmVuZG9iago3IDAgb2Jq - ClsgL0lDQ0Jhc2VkIDEyIDAgUiBdCmVuZG9iagozIDAgb2JqCjw8IC9UeXBlIC9QYWdl - cyAvTWVkaWFCb3ggWzAgMCA1NzYgNzMzXSAvQ291bnQgMSAvS2lkcyBbIDIgMCBSIF0g - Pj4KZW5kb2JqCjE0IDAgb2JqCjw8IC9UeXBlIC9DYXRhbG9nIC9QYWdlcyAzIDAgUiA+ - PgplbmRvYmoKMTUgMCBvYmoKPDwgL0xlbmd0aCAxNiAwIFIgL0xlbmd0aDEgMTEzNDAg - L0ZpbHRlciAvRmxhdGVEZWNvZGUgPj4Kc3RyZWFtCngBvXp5fJTV1f+591lnyWRmMntm - MjOZzJZ9ISEhkQwhC2sEgpAgsUkg7MhiiMALNCiIBKQqEhDcF0BQGQLKIGIRo2Drgq2g - ora2grV9m9r2h74qzMzvPM+EFPrz7c8/+uk8c/f73HvO95577rn3uUAAQA1dwEBoxsLW - xWQGmYE5b6H7xYzODtc9fxz+CADZAcAsmLV49kLdZ6/9AoBjAZTq2QtWzDrdMWwrQLIW - wNw3p7115iUg1QC+9/H9kjmYoUwXhgD41ZjOmLOwY/mye5VfYjof04sXLJrROrn6xpWY - fgzTwxa2Ll8sdim/w/QnmHbd2rqwffqquZswfQXT6YsX3dZB9zBNAAEXpqctXtq++OU7 - by3A9Cqk713MI/hIPzXw8AqGLpg2kCNn/6BHkXcWOLmM/8EaUqYAIij+qVQpp1WyL3F4 - 7S8JE5qBjOSBEFGSfzr09ZACBjCCSc4xgwWsYEsUQyoifAK03HEIcF1gY/PACRD/CN15 - KYzdFP+COwXa2ML435hyfOeo5GissgJOwD2wCw4g93sxHoBbYAe8SebBUTIdDsM5kga5 - ONYsRGAcvEXi8fdgFjyF9TvgJGyDg4hbABYiVeNgC/HGV2I6hPE2WBd/AjKgFO6C41CG - rW6B/vgz8UNYOglugn2wH9//JfHQg2xK/Pn4BcRqIra5Dkvei4+LH0Bus6EKJmDuOniF - eJnz8TnIcTlS9xA8Co/Dq/Bncgc5HJ8T74yfif8OKJbaoQGf1eQw+R1zgL0r/lD8T/EY - IhGATOy1BbbCk9j+AXxO4LDXkPmkg2wl22iI3kEPs+s5cyyKOAShDp9RsAjuRgSOQh/8 - Hb4jX1ELo2U6mNfjxfH/AyoYi1xKnLRDJz4b8NmCPB0jPMknI8kEspo8QLaRX9NMehNt - pLfT5fQLpp6Zzqxgfs3exvZym7kdvCr2dfxY/FT8LJjBATfDUliD3J2EM3AJvicMtmUn - XlJOqsgt+HSRXfQoeZwcpRPICXKG7iO/JZ+Tr8hlylE1NdIs2kG30v30JH2HmctsYx5k - fst8zQ7nKPc4d5H3Ch/H2mIbY+/Ey+O/i3+LEiyCG0emCurhJ9CK3C6GIfBT5OI5fA7g - qPXB6/Cm/HxO7NAP3yIKQPTERgrJeHzqyY1kFplLHiEv4fOKTMs3FAeCKqiOmqmdNtA2 - upB20bO0i0llMpkxzDTmAD6nmXPMZeYyy7EprJGtY0fDZnYhuxOf3exetpd9lyvjhnP1 - 3BSui9vIbWZmcO9x5/g1/Ba+l/+K/6sQEMYJi4TNODpvosy+OjAHEgFLMpD6QrgVZpBq - 0gY9OBqPk1boRumaSe5GvBZDIN7MrGHqaD5KwyvwXyitO2E1bGSmw+PxD5l98AFKygJs - rgv2sFXg4Lbj6NwB+ShFA08omBkM+H3eDE+62+VMc9hTbVaL2WQ0pOh12iS1SqkQBZ5j - GUogu8ZT2+IK+1rCrM8zalSOlPa0YkbrNRktYRdm1V5fJ+yS3mvFoutqhrDmrH+qGUrU - DA3WJFpXBVTkZLtqPK7w29UeV4RMm9iI8XuqPU2ucL8cHy/H75XjSRh3u/EFV41lTrUr - TFpcNeHazjndNS3VOdnkaAjhUOZkS4ojBCqp4TCMbF09x4KBVKMmbPNU14StHoxjGeOt - aZ0ZnjCxsaY61e1uwjzMmtSIfeRkzw0jnbBJPdMzc1MkBG0tUqx1emOYaW0K0xapLV1W - 2OypDptXXrT8I3k1VrP5msIw9da2tnfXhkMtmxBcKdkipVo3Y2psgwubpeubGsNk/QAR - Eo3zkFKJ3HZPjURXyzxXWOGp8szpnteC4MKkxl5byFbjaa1uCsOExl5ryConcrKPWtaU - u5H7ozkjckZIYbnbsiYR/uHORP6vTkihZU3fZxiOnTQIAJF68oxGOsOuGXInHiS2VPLa - S6F7RinihL8mgmzORXpGhinKDOMNc97RreGuhqtkzKlOENcyr7pXYbVJPLRUNWH9lm7t - MBwprK/1uLq/BhxCT/+fr89pHcjhvdqvQSqUBnpQVsKk9Wq8UwYGuZ5j8cyRxrdTHlNM - eyw112RgWoJGojlsCBeOndDoDruaMCMCWdljI6CY0HiQkC1NERJfH4Fqx1FcEZmf3ILF - 2ZKoza3G/jGRk40ZmW6M5Wa7apHrWklWXN2u7tEzu121rjkoTKxXDrGgvbspDxFsaESc - YDL2GGpKHYy2NzUNw3bypHbwFaze3YQtzBtoAUM5Ky+KlfKzx+Ko+CY0TmwMd1WnhkPV - TTgKKL4nJjSGT6DkNjVhrYJBSpHi1XMtAzQXIs0FmVhelGilAdvAJpq6u6U2Gxo97vCJ - 7u7Ubmm+JdIRAv+cERrIiIBUBRmviZCuCfguBh53qpThcXvcSFaThOkQFOmrEhWB4n+N - cMkg3fjmUKS2REa49N+EcNmPQXjYj0K4fJDS6xCuQJrLJYRv+M8hPPw6hCv/NcKhQbqR - yBFIbUhGuOrfhPDIH4Nw9Y9CuGaQ0usQrkWaaySE6/5zCI+6DuHR/xrhMYN0I5Fjkdox - MsLj/k0Ij/8xCNf/KIRvHKT0OoQnIM03SghP/M8hPOk6hBv+NcKTB+lGIm9CaifLCE/5 - NyE89ccg3PijEG4apPQ6hKchzU0Swjf/5xCefg3CaPBWAbBncO/F4J6vMgINWREQ83Dx - QydqIwBn0ElpjDOfRIBFBxgXPoGX8A2AKVkvYSschvkFRTq3zo+uit0SufJ77vj3IyPs - +MuHsBaFUPwj1s7tgGTc6SwJmTdwpFY0Fidz9mIhSV/KLLKUqtLqHNrOPsv7/dF+qOyv - 7C/IH7kiNARSk3zEa/MpvJzPpLEEcB+pD5BUEWNaHmNmtTFAUih6VqU9ADoWvSz8EcmT - f2uhGcwmnVagbpffpxsyVO/Wl+iGUE861RnMpiImtKpl6prY72OxNXMrO0lx9+7lzz26 - NW/U89yOiwdjb8U++XnsL58dI+WXDpDa7y9+SyZdIuWxs7FPP17/S2QNd9x9yOBZ7n7c - lXgOiiRCikJqlhXUrNDDgbJOITHVdzZaBpWVl94uyE8pHk6GFuk8ur7Xdvq2nGC+6U5p - 2v39rcw3clshHI807mFIh92h+hK2lp3KzXfcmrYybR3ZQMVMcZp1vnWVdZX9BSsH6SSZ - tWusbsFuZQlwzuTk9BRlcQrnci5zp6vdPxVKTYvSNf7ktc7S9Iw6TwLcS/3ar/svQGVF - tKKyX6cvy9ObywiG+rIyHXrQLMNuZ61qr86n0msCoDAICC6bpFUGiGhED/HVamV8EdoS - fSUpGVpSPMTnSRd4wYNxd6HeaBD4ZMJjhtvoHrP+1RNrh0zqWX20zsceYaqWkcA3n6+o - fWFjW+lMG6O5EjxK9IsXjS1umL966+ax6491nol98+SzK+vax5UUTJ23T8alAOXHxu2E - AugLOUerG3LagzNylgWX5fA9PjJWzFJasgxJzHcFhuIk3DB4QgZdsfanSUkFqcUZnFBc - kGTp8VfrImRMKFlZmruIOoOutYyfFtUVXoNK/6WE4CEol6JfaPu1Ej4SNjIkJXn5Vh8o - OJ/Dm+7jgQkAy4j5CIfd4wyAzWsJEJYICFceemnuVMTMh96gMGorJGlcuxYxI80sLS4y - oewVJoDjheI0UlR4DYxDJBhxV4YIphGjATzEdPFldaD2yJZnX3hc702x+0ztI5buaD9c - 4+N6Q7cS48d/rcuuXfLT2N+/9RPz6U2VS3Ysf6CTkEcZ6iq9d37H8qqVjy0+/drRdZOK - HM6DXW/HYggrzsvx8Y9ZD/cInsL44ZlQ2e02Yha9ot/aaL0LNpC7FUKdqHT73cUajYE5 - JRSncv5ixDlI16aV6haZlbRCmVFgDtYFZBijZavGTlq+Ms+CIjaAZT+CKcGYmMxen92V - bAKe87mS0wLEZ8wIgD0FYxKehGWcWneAeE3+ADj06El4ynJGEuBJ6K0lzTifTUaPz48S - RxmUt6JC1mjA6Qw6rTy3ZVnkjQac2nXHe7WeEeu29yqH3zJl3mGijv33m7FPRqwm49be - s2Z3x4FH7+Ee+W7dTfnTYn+MXbk5J/DFhddivyYFeDSgeonM/P7Tn99x66mdu+6Wzpwo - TIuf55ZwF3FHnQYHQ+Wp3HbSwzFO4mTvIBu4jSlcg8jc5dDpjPwwB6MeZlSk0bQ0K1NA - y7UFOptLUWC1Ol2Pu+fNsmRl1V8a31+v/WY84oQIoWpAhYcRbULrDQO72Zvi03hTfSqT - ohCSDNpCotclawU7pjhgCgmhLKO0qAshWY+eaOMLUfzQk4QM4ZIAS/iy1KHIicTsySUy - SnpJ1IYW4fSUNaIWZc3DppEhupPu13s/in39t68+ue2GtJO2+w/EPojD8xeffYnUBbiL - sfPHtuyOvRt7PRaL/fyZpvu+fPj4rrfJs6TmzO9lfJ7Gk7wZuJYk4XnW7JBzg65HTwtF - VVoyhTSzKBak2GxJXo3Vajvn7tyYwCAqYwCV0cqoLCE+YtJ5jT5e4ARWYAQqcLxSKyK3 - JvQUelUhEQx4QiKr90ycTc1eiRNJ/2ipx61j3C7U9AaBBgk90z6iY0y5Lfmjv8UePU0b - SN6ebY27YndFD+wz+hc1bWqoIzqSe3kHl/LBydh7fzoe65V5OID6tx95UOFpYH0oQ0hj - WRWTRvBgSExTqkQ1Vasp8HNpucKmYUQvWJM0EaI65N52laEKiaNLF1DkpVGVtGyFNLDI - XgqqQt2AIwfYvCtbmawrZ5lVl09SJ3f8cKxqX0xzALvGH4HtALwZEynwy1BTNRnLUJ4o - GBOxMh8QLoXYGYMqVT2VNDLvk4+Z91Ufq5Wskk2qoXdRdiLdTmlQGUgqVZYm1dGptJMK - 3plJSsroGUJVaj3Di0az2cayXITsCiUpnYyKj6oJjSY59ZjzYgpYDZ2LLVn12ksV46MX - rJfKyvBvuRBF3mraq7+ASjNyhevG2EkrDiapI2TfYUqoUoWRXkqZDdz43JVRdnXfBi4R - FuRD89IlZGnzkhS3grhx6RtSUkw8qNZMRp1nO3GQ3eRJYjvOxppfj03jXuGOX/ax578f - yczIOXP75SD7QU7Jp0OuPDw4Pt8iLkq0A1pDxXPVc/Ur1Cv17ChDo2GOYaWBFcQ0nVar - JJpkadSUIuX1alZhMBSwNlOyAgfMaPqBAYvqkJ3EeGlxtHDYCC6EzSmyDuZRsjyA6gaH - DxX2Abqt76/nfhMrPMV0La+6LdZBNt+1hzv+6eln49Gt7NFhzhiz9F5JX7TEz7LfoL7I - w/O3WOiWYLLf4/OVaIrddb4230rN7RmK+aJFY/bSJs0czb50RqkZlp6RrmRYu+UuQ15e - ln2YgWGHZSnyqVIj6jLSnYH8fJ3Fax4tegO2QqdXNxq8edaCwsfc8wakD7XJP5SKXldW - JrlrlIvEZW60qHmJPNnGB3J1ThCpj/pyvDzaWEw2ZEFOrhxwmWIWcaQ4syDVaMkiVgvJ - YbNA4VdlEa+K5GJcCKKXprdjoQk9eaHTamXtI+mcAZU9sOLJi520vvl9eQRVd/GQDElt - J7Q46mqzyUmwjtHAetBIG0pImjBkxveLp/eOHffEqdcmbib6y38gI48lF9x8PrxzWvmZ - d7ZN3Bx7+L9jf9m1i6HjyfnV9fe7hj+2vKjQm5NdPP3IG7Hfft1ZedsDbQsKXfl56eWz - +y79avOmv7D4+YJAV2whmqyn0H4tDtkZO2hZu6BX4IzgcD6xLgGsouKv7gVFsorCGVBx - qaI+IffjZT2FE9mjKzK6u0jXuXOxLu7U1u/e2ort5sUWkgNyu8NDLhAIZewcq4WBxnkX - JY3AyG1XrBpo+1JF9B9NS1KH44WaAs3DYhS2vLNnSVesKw78kK3f7kb5Z2BS/BP51DkZ - vydUwKeh0sx8otSiLrD7i0Zp5yrmaYUyUa9WMKmFQobCoVU7yrNobrD8SDktL8z06rUC - J9r96WZ7hHSHPGaHU/A7clXUUayqECoq7AYhmLk3wzY8NWgfk+wvtd4w/GWyHQ/bj5Ie - GFi1EiJ2IdqX0HC4blX2o4aTRKsZtV5uf26/ZDPpzIk1P1Ay1JgOxOolJclusKSlusHk - MriJOx2GUjfYHGY3MbrRk+RnYJ0fMJIy0D4aWnID0RDZlDReZ2cOR3sJ5UWHq3whdqHB - lc3v80sBylbJ0BSiWVr/k6Ye95zChW0FDeTwcKP6zpX3lLuVe7n/efJ45zKzV52my8z2 - NWeaFEPfWbXt+Evbu9+dlj16931GO69JsufNJgvEbEvO9IZxmQ1v7Bo1akd0uz2dYdar - +SpPaNS8F+7e9lQKuSCNyaj4edaGXwLs+NXIS9ShFdvFB217nAynocmcwajRJxsNIXXI - IAZtZKzqReYUeYM5lfqh+JHinPNDz5fmLz2qU7pTejpd5NwZyTtNjowyXhBMboddUDpM - Kq+w3b7HfsT+gZ31mpK9ds6qVAs6tOsdfs7mz8gV/Farz/++e3fzwJp6QdYB70dlmx7t - ehyo5kElgEIm27WyBqgFD8sx+JmFcCzv9Om0em2K1qBlebU3PTXDh18UHT6S5lCYBR+o - jBofSdJ4bG7M4tATLUof4K7ABwkbN2Fy4ChmZmWuJUuaYUkzGmpmaVa7E9bt0CINQYOW - l40QKJL0gCedx4ly+FxpiV575Svu3u33TM43HBRuLJi0YsSk07E/EcvviVMVGPPcqr0c - 8bB182+auGDME0++3lxSV35f7gS7FtcRnlBSFfMtq73jUDeRPqai7l2HE70c1wlpX1sS - sgsXWZygPKOUpjnWDwqMNMv3uduqBmZiX7Sib2AqVlZUjkc7TJqFOM09647gj828fI47 - Ln0pRv2Bgy6tQSrYE5rZRMkwkVgpMmbmp3KzuRX8cmEDd5R5kznPKDmOF0VBwdB19AH6 - FGVomV6hYDn8wMIv1AsCluGnFo5XiJxkzqHtwfBKgVfytiQFVQZBZVUn9brbjhJTYvZJ - yqjCWq/9woIbOFyDK6VZR9BtGJ+bJa7WvspuyLVkNXOrtSe0YoVYUZCP1vKS5qXICilS - IFSCztP1HHnni9gscvCLWO/257jjV/aTU7FF0TZq747dKvO3EZm8QcYuGDIir0oOQUPM - UHmx3DWQoT2QUIuVCbA2Hj4sbfrlNhB/3svWgQ/Wh8oFUdDwyWbRrDEn+0U/iu4o6xTV - bJXa41XaHB6rkrJmr9thdiTxAvCpdi+Togxgn7qgIUJIry3owCCkBJLrDfrA6g9ESNKh - fwxd9IL2Uv+lq4oUbZPKivH9uOm4urGV7I+UIqO8U0BxvKo50AyRxA9NEB2uSonYut7Q - kKYlXfXZGRVPtH9Yn3ls/vh5Dx6xBRfP2nOYzdtxY8YNlRm1UxoemrwlOpR+OX/Clt3R - ++ixhYVjH3k3enpA7ph+1AXS9+1bQgVH+FM8ZXkD7zd08h0CZ1BTg0Xr4JBNi0ppE2w2 - UAcVNjvJtQStYE1Ftcxfx5k8lROaFvnqx+WBSJspaQ+JLF3DisQBzi0NQX7Iuv3j9s25 - MCH7iCN/TSg4pjQn9TDZg/TfMunRqU9EJ9In2ypmJpmqipfMjb6LxOIsKcf9tZutR6tX - +jZ/b6hoh9ijfdD0NLtX3K19xhQRT4sfsBc1fzSoh4m8wyKoHXqVVbBajdSfbEtV+I1W - W2qEKA65lw5ooYQhMrhGyOomG8ysT5WiQI2hoz4imDHGJWFMaVD7gGjRE028jzAa9CQb - QvaycMecoZcsB2mMUN/rcaNHcf0o1kKRQD9bnz/upad7ep7ED/5XYv/zaewK0f+B7yDJ - u3tueeBK7/4LzPnYn2OXYtHY8yTrCi4mIQ7nMMo4vR/lUwc3hnx+xpc0lKljWY2opRqF - TqH2i9IQ6ZSiLYXkaoM6sOpTIqQGh2bNgL6IopZFS7hyfGVftA/Xu4GDAlnSpGExmY3S - bksSr437jU/N5ywObar27vtRjI6W7KLMKww9sDSKV03wDC7+AfMiOxZvK+SR3NDPShU7 - uB79g4Ydxh2ZfCDD6y9x17rrMur8UzKm+mdlzPatUK9IWqHp9HRkdHg7fLvT9manMKie - uRw2NwVsxlSz3WLMMeQGklVzRZ+3xEu96UlKNivF8obdkSKwjtydWao8QaHRUgHy3Hk2 - p8Vk8ZuHB3yCP2Ar0Dj92uHgz7XmF/QOrik4vcokUYyWaTEmsVuWJy3yCetSsgCk6ZYw - K8eRHOozojnp1jjdoPAJboIWpRu4TIw59JiXarC4iSs53Q3udE2S6Fe6ic+rUKKF6QY+ - iF6azu6WrMqEVZDYyMq72YRASKKBQiHNAdmuvNasxNMAs0n4f+1KHg0DP/lK9Fbvnbnj - Bv9tP9s4ouPjo3+fP5Lu43zDH5w1tyZQf/vJqrkf/earUwI5QiZMy5869eaaDFyN0zNH - r93x8pZpc24orKsP1WZaUxx52TUP/OzMR4/R71CWzPGvqIKbhjNn0gtJucoTGjwDrAx5 - WVOZmeE1Sp0NVRneiAiCUWNMZpwMZa6YcBd8xT179cCK3VzWJ+05tAl9micpsGhFvzZ6 - QVasuAbJB5VX7RxfMZqHRXtf3L/fZyxISjM4R/rXTLvvPm5a7OzWaE1piorQLQpx7Wz6 - Ot5WQvnqin/O/AanuXTv5pbQsIjhtIEqUkSDNcVqCPC3Mx/gQgScRgl8kpLDeW0RLBaV - CTkJqlU2GwlKxP7qqrqVjy2kbSEOf+LcorJCEoiETiIJQtHQ1+HqOVRe69HY13lJqS3/ - zpervYf3Uc+Q2VsvNuRIW+Bo2aQhLXunPUw1l9975IbMyQ9O2kg/xItBFIyx0cyXSLOk - jX4RurXbeLdlj4WR1thS/Sh9o362cDtzu7DZsAO2czuM203bzXthr0k7CsYa68xvGtlq - 7g2ObuB2w26yh9tr5jICnMVoNqENYFSrkh2iRlJeplQcGdQGB8xGywH1z0yow953z5Zm - uBXPZy5YomVl+LfKo2JJsIszoNCaZ8HVt0Ka87gDDumNeLHJtFBvNls4QhbqASwbcrO0 - q/vkQMSQSBuuJdLBFSniGSpQWRjlI7+SoXgaTIoIw7hP+e5sq3qo6yFfMC0vU1uYp+WG - a2IdbxEnYfNmx+6L/fn52KzDvPhUEu+2iA9ksPVXdjB3SPaI/EvWJz35k+SKr0EnyulT - E6fLt7TkENHkvbgTBfyGPVBfCvlgLIjXx8i37Vf6VfcNliQaBLBzeqiiZWiV3AYhdH0D - YQGG43GfMw3d09wUOMDvw1HAEF0L1u9Cl4d1JqEbRU7BOhbvo2C4EeutwzzJldN9sBHz - pfbNmO7CONoaeI9nCDwBp8lkCrSH8TF3M2+xjezz3EpuN/ch/4SwQNgpKsVsca+CVaxS - nFQ+ofy7apxKOn+S+LLjXSAG5qPNR0GLTzNeZftSqQbJ+JNq6Ad45LEMaqdMvWl0fdao - 9gWd7R1zZ7RiDYoOf/F2vBv1Qz87ZjKDd9msaN348YZWPhRBCQyHEFTj3atxeCvpRvn2 - l3THagpMhUaYDngJTfq2MhpdJbpidFlZIyyIy264F91j6BiYSzbBCnQb0T2Ijh2MPYOp - o2RTLyuGXiIrwIYn2SrWOdlgdVqUKuev0Gw4/IjzI8vnx4gVz99+R6y9SaAYoSSPkUdh - JjjJ07gzWYnUBcjOQ8EFzhYsegYWo+tCx8g+Ic/0phU6XyHZ4MXvCU7igzSWvOj8Q0GO - 82JBhJJe50l/hMXg1TRMhZKdJxyPOH/umO18Bd3+RNG+INZ40fmMY4Fza1qE7Ox13i8Z - b73O+xLBMge++qJzYbDHObNALh/XE6H7e51lWD4lpHKWlLqdxY4Lzjx/RCSYznGMc2YW - vO3MwBexmgsb9YZ0Trtjq3MYFqU5avzD0B0j+8guyCS7er1jnC9hFNk9NDpY2hMh/3Vo - VKDAGyErQyWjAj3BUX5vcJzTG6z1+zE+5bSwTrhZGCEUCll4OQsXKyFVMIh6UStqRDWK - GproEfJsb6WTP0b2QyXCsv+QyIt4dPY8ZrLHyHNy5nNHRFakIoiGSPyzw5Ksoem6/zCK - GQGMvMjLMT5CnsPvYVLWcyEnijwBPCBAX4uSh8dVKF4oqJSIFMbgLZh7IjysN3VWWir1 - w3VltdX/m9cil1z1pdXxf/lZiCPcg/cwwvscTXjlBSNxR9PVuqj2/j+/jmVYob0qKwvV - 3qHOxfNmyVd4PDXtLXiTJ7ypE69UdbW5XAfnLR64n+RraZsxR7pD0toeXuxprw7P81S7 - DnbK70nZ1xTPkoo7PdUHYVbN5MaDs0Lt1b2doc4a6SrTobaqpc3X9bVxsK+lVT/QV5XU - 2FKprzb5vX/qq1kqbpP6apb6apb6agu1yX1JENTMbai6rQOlE6/54DWbQEN49MRpjXib - rak6QnZLd3+Wwf8Fjy8zzgplbmRzdHJlYW0KZW5kb2JqCjE2IDAgb2JqCjc3ODgKZW5k - b2JqCjE3IDAgb2JqCjw8IC9UeXBlIC9Gb250RGVzY3JpcHRvciAvQXNjZW50IDc3MCAv - Q2FwSGVpZ2h0IDcyNyAvRGVzY2VudCAtMjMwIC9GbGFncyAzMgovRm9udEJCb3ggWy05 - NTEgLTQ4MSAxNDQ1IDExMjJdIC9Gb250TmFtZSAvRlZXVUlOK0hlbHZldGljYSAvSXRh - bGljQW5nbGUgMAovU3RlbVYgOTggL01heFdpZHRoIDE1MDAgL1N0ZW1IIDg1IC9YSGVp - Z2h0IDUzMSAvRm9udEZpbGUyIDE1IDAgUiA+PgplbmRvYmoKMTggMCBvYmoKWyAyNzgg - MCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgNTU2IDU1NiA1NTYgNTU2IDAgMCA1 - NTYgMCAwIDAgMCAwIDAgMAowIDAgMCAwIDAgNzIyIDcyMiA2NjcgMCAwIDAgMCAwIDAg - MCA4MzMgMCAwIDY2NyAwIDAgNjY3IDAgMCAwIDAgMCAwIDAgMjc4CjAgMjc4IDAgMCAw - IDU1NiAwIDAgMCA1NTYgMCAwIDAgMjIyIDAgNTAwIDIyMiA4MzMgNTU2IDU1NiAwIDAg - MzMzIDUwMCAyNzgKNTU2IDAgMCAwIDUwMCBdCmVuZG9iago5IDAgb2JqCjw8IC9UeXBl - IC9Gb250IC9TdWJ0eXBlIC9UcnVlVHlwZSAvQmFzZUZvbnQgL0ZWV1VJTitIZWx2ZXRp - Y2EgL0ZvbnREZXNjcmlwdG9yCjE3IDAgUiAvV2lkdGhzIDE4IDAgUiAvRmlyc3RDaGFy - IDMyIC9MYXN0Q2hhciAxMjEgL0VuY29kaW5nIC9NYWNSb21hbkVuY29kaW5nCj4+CmVu - ZG9iagoxIDAgb2JqCjw8IC9UaXRsZSAoVW50aXRsZWQpIC9BdXRob3IgKEpvaG4gRGVu - bmlzKSAvQ3JlYXRvciAoT21uaUdyYWZmbGUgUHJvZmVzc2lvbmFsKQovUHJvZHVjZXIg - KE1hYyBPUyBYIDEwLjUuOCBRdWFydHogUERGQ29udGV4dCkgL0NyZWF0aW9uRGF0ZSAo - RDoyMDA5MTIxODE2MTkyNFowMCcwMCcpCi9Nb2REYXRlIChEOjIwMDkxMjE4MTYxOTI0 - WjAwJzAwJykgPj4KZW5kb2JqCnhyZWYKMCAxOQowMDAwMDAwMDAwIDY1NTM1IGYgCjAw - MDAwMTE3ODYgMDAwMDAgbiAKMDAwMDAwMTA2MSAwMDAwMCBuIAowMDAwMDAzMDg5IDAw - MDAwIG4gCjAwMDAwMDAwMjIgMDAwMDAgbiAKMDAwMDAwMTA0MiAwMDAwMCBuIAowMDAw - MDAxMTY1IDAwMDAwIG4gCjAwMDAwMDMwNTMgMDAwMDAgbiAKMDAwMDAwMjEwMiAwMDAw - MCBuIAowMDAwMDExNjEyIDAwMDAwIG4gCjAwMDAwMDEyNzQgMDAwMDAgbiAKMDAwMDAw - MjA4MiAwMDAwMCBuIAowMDAwMDAyMTM4IDAwMDAwIG4gCjAwMDAwMDMwMzMgMDAwMDAg - biAKMDAwMDAwMzE3MiAwMDAwMCBuIAowMDAwMDAzMjIyIDAwMDAwIG4gCjAwMDAwMTEx - MDEgMDAwMDAgbiAKMDAwMDAxMTEyMiAwMDAwMCBuIAowMDAwMDExMzU4IDAwMDAwIG4g - CnRyYWlsZXIKPDwgL1NpemUgMTkgL1Jvb3QgMTQgMCBSIC9JbmZvIDEgMCBSIC9JRCBb - IDw5NDk2NTM3ZjA5MzJlMjdjODRjYWExNTQ1NTU0N2ExNT4KPDk0OTY1MzdmMDkzMmUy - N2M4NGNhYTE1NDU1NTQ3YTE1PiBdID4+CnN0YXJ0eHJlZgoxMjAwNAolJUVPRgoxIDAg - b2JqCjw8L0F1dGhvciAoSm9obiBEZW5uaXMpL0NyZWF0aW9uRGF0ZSAoRDoyMDA5MTIx - NjE1MzUwMFopL0NyZWF0b3IgKE9tbmlHcmFmZmxlIFByb2Zlc3Npb25hbCA1LjEuMSkv - TW9kRGF0ZSAoRDoyMDA5MTIxODE2MTkwMFopL1Byb2R1Y2VyIChNYWMgT1MgWCAxMC41 - LjggUXVhcnR6IFBERkNvbnRleHQpL1RpdGxlIChibG9jay1jeWNsaWMuZ3JhZmZsZSk+ - PgplbmRvYmoKeHJlZgoxIDEKMDAwMDAxMjU0MiAwMDAwMCBuIAp0cmFpbGVyCjw8L0lE - IFs8OTQ5NjUzN2YwOTMyZTI3Yzg0Y2FhMTU0NTU1NDdhMTU+IDw5NDk2NTM3ZjA5MzJl - MjdjODRjYWExNTQ1NTU0N2ExNT5dIC9JbmZvIDEgMCBSIC9QcmV2IDEyMDA0IC9Sb290 - IDE0IDAgUiAvU2l6ZSAxOT4+CnN0YXJ0eHJlZgoxMjc1OQolJUVPRgo= - - QuickLookThumbnail - - TU0AKgAACpKAP+BACCQWDQeEPx6O95gAEgB7PIChILAwCPh6PgEAwEQiPR+QSGRACFuq - HA59vKJRSLSOXS+YQV+O91PMGhICu53PsHBYIzN6AgIy2Y0WjUeEQJ/0gANxfKxiNd0t - 16hcymooABnrZoPsQkYhC2O0yyQd1KA9JgDDwVNxqAAwVgGt9juQODkWhECWW+QR6NhW - KxnBgUhV3PqvjkJtBeMkKDwkCQGX3KX2lUxzNxxBYMPZYLJwDYnES5sJhvASE4eh7K2V - 3sNfOQcjYC5/Q6MPAR3OZ+BINUTW0d6OZxPsLBZ5tJjOECjEYg9erJwkMtD0I8HsUbL9 - nud3vd/weGydvxeXzef0emYtJpsulpGYOFwPsPiACzByuN/BsOgKYACAAmBaFgrpGahj - GMABhmGmBxHmeYPAaBqYhUFQWCaJqYD4ahqHCop5p0m4JJGepum6BYRhGmATIIPoWBY9 - SQvYYgVhYHiYFyXAQiOJBvpgX5fhWIIgmqmBqmoQ8BjxAxeF4FhSlKmBeIEIgAwAl5qB - yHIWDMMyYBtDhlxemAEmiaJ7heF6RgkWZZndDCYCHDknRhGKPxnGsbpfHMdx7H8gyHIq - XyPJIWSWkRqSbJ8opfKZ/yrK6XSzLcuy/MMxpfMszzTNc2zfDKXzlRMxnwd53n4Ap7nc - ewNA8CyjoUegAAY4CQowfE8RtHEdR5HyXyBIUiSNJElSZJ0oSlKkrJhScuS8l8wGpMU6 - pdTU0TUkU2TdOFQznMZ3mcTBRGUBwNg0dZrHMIAphWbZonSBwLgceR9gABwAAIfx9AAF - Qcg8d5vGyZ50gwFwDHKdB9H6eB1nUBgTBmCR9gFXM9JdPlez/YNBJdQli0RRVkUbZVIw - NLVnUtaVMWrM1r07bdQJdUU6AAhRuG4cgFAcBh2nEd4MBQDh/n0fYAn0d5unUfgRhMCR - mmIdgkCAEJznOchwneAwAGwbQJhkCx+gWBp4HKfoPg8B2K13PtfJdYFA2HQtDpDRNj0Y - l1HUhZmT0raFL2okdrU5bNPW5mVvcBOyDbVPdeT9X9AWFQdiUNY1F2TR9lyxvlnpdaNp - zJlvBpDbVPzjxHFI89hnoIaCYRMc8UgymBvm8c4QhF2aYhDAYfwMZxnAAcpypgbxxnGE - QOg6mIJAkFgdh2mBWw4hqYvycb+eWkTMm4DQSBImDrgAJ+V9SgryMqfv1Fh9gRfcE34A - X+XU/Ufpk/v5X8+18yR6qc4y4ABOgE/w7z6Dgj3gQNqBSJhunHAsCiCAFIJHgKUM+Cwv - YMAZg0FeDgBYPQEJGJmESWgcguhNCA7kBjvP1G9C0bML4PAFggCgD0NUrMlKQNeHQuYe - PyAWEiID+YUEuhaN4WMRw4RJhvEM7EKj0DmihC8bJKh5Alis98EgBotEjiLDwXL9Qjxh - isCWJhRRORnBnGk5wMYynZic4oeMcYpPHHHDUD0M0JANigOYXUfR2x/CLIGEwLo2lGHD - IcwIrA5SLiXIU1sb4QD5kkNuSjwBnSUG2P6TQT5OA0k8AKUEjijiflJIONIM5RRNIHI6 - KgvpXDTlg/AEytAGQ3hmCCXEoD/SpI9JofwhpgDwmEIqYkupeSPlW+Yfcy5JD5QWMMZs - 0QbTTB9NUBE1yCjpm1FIdk3YxgOnBOECM45RTaHSEKdEeRkTrmPKopbipujsC9PMnQ7g - 7T3kCEWPJMB6T9ikGygEaxLUDlFHQKdBxX0JA3QudpwZIHloGJYVdE4SB3osA+jBfJLD - fo5QcKcx49gapFQ2N0ySjCwomAdUxMByDoHQBwDAGCYDjHwN0DoJQIHxHCOoD4H1XkvH - AOAdMuALkjGyOEeI9wPu+JeOscQ4gKw1JgOt44MgdA6JgioACW3EztoeSIW4mxNhJQQT - AYoBACA7H4PwmAwwojUB2E4ZhMBbi3A6EkJI4yYPsAIFAKFbCRCUFoDcN4TBkEwBOk0b - IRAiVZF2LsbsgSYCaQ4GV8pLh8DqHMPIii9wCAIL2Qcfg+B+WftCZWr5Iaw1jrKS+s9a - a11trfXGuZL6613rzXsWFfa/kjsFYSw1iLFWMsdZCyRL7KDUstV0kI7xxDTHIOYcY2x0 - gDAaAcA4Ax9j4AqDwGQ9RdC3HCAMEAKgWgNHoOweg/ABghBABofI7BoDKHoDg6tPyPWp - JBausiCbXVorVYAl1bq4VyrpXavFeiX18r9gIkFv7C2HJfYkXli7GkvBHY+yIRbJ2Vsu - SIfA4hfDAG4A8CIAo/jyAGBECQAWHm0GmLUZI2wEgsBaPp4gFwDD+A4CMAo0xxD/A4B8 - FAMgSPivzSYot/LWkutfgG2WBbakutvgm3VvMHEfwhcHCdw8LkuwzcbDlyMPXMO5aMd4 - +ACFDtOUe/RH8mX+ydgC2JL8CW0wPbjBRLsGW9sDYPCNwsK3Ewxhq45Lrk3LpI+fJRMc - 41mzplkkOd8DW2wRbnBdu8G2+0Blwl2FMLXFw3h25WH5j5vI8LMUgpAIocJgOgew9gMA - KAUTAc4IhtAYBUPcmFQR+S4zaSJEw9UUgLJGM0b4DRcghDcTAf9TwA1SJhFACCFSYBaI - IE3U0vNUaL29t89G3dwbj3JCnRu5d0bpO7uLdW7d3Eu26TMaY0hyATBEBgeQ4x4gcBaC - Qew3BsAAA4CgCxY3U7y3pvbfG+t+PgHUO8BBPtgoxucNgcg+gHAJHkOjgQJgQAKHkNwd - wCYaZI3dt014ghJjdCiFUFg2Rtj8CQEoE43xpDZJ4CfI3JuKDD5Vyzl3MOZBKBwO4Zgy - AEA4BxfhxQ2BfCyGoPoBY6BcjNB0HUNQLgCDfFs8EJwSAW7vIJt2zI3LOAkAkO7eY7gQ - guBIOgaQzADAmBwB4yb5uy9n7T2vtoKAEKlH4AwCPBnFELHp4IAg2BkDLAAB4GIFx5DU - Gz40HAKANdiABuzzHm+xea855/dPniDDvG5dAdg5Rvj1AnkMEnO4hrgFkMwAoHwGgFAs - BAew9civgf56QaY4BzDZG6P8EYIgNgg9ceodQ0xfDO5GBsEIKQQgSI6PseA7h7gMA8b8 - pnoiC3OG4UEe4zxhDxBUDYFYJAPc8fMOoZAshvAeBwAsewBQJj8HmBqGkBPwDaG4GgHG - H0MIBUBWBW/WRiHM+YGAGKGmHqA2A8HOYGBoB8BiAkAcA6BwBk8uKQ+8II98HYH+AKAC - AIAGHkH6Ao+ShA9gGeAoCEB0VeHAGsHQ92/49KHgAOAeH6H4HzBNBQyORiVKHUJ2AMAk - 1qHcHkH0N8AkIU8CI4+63O9BCi85A7ClCq1PChCtCy3RCpC1C6iHCo98HI9O9S9WA+9b - B+hQ9gGSH6A0H0HOHqA4A4A2BkyNBq9M9Q9U9ZBSPK9IGcGoIkBGAy40HuAWA4A0AaHm - HCG+HakkH+AoxsA08IJfDAHE/CAQ/G/K/O/TAOhQ/cFkG2AcA4HaHQAUBgA0AUjuNY97 - Eq/E/I/M/Q/U/YPAHe8UGyHsAgACHKGQGuHsAiAmATBwAmAMHIG2HmBaCMCOBbEiO1Cw - JhA/BDBHBLBPD296GcFkGMH6RSXkAAHYHzBo969LBBBFBJB7GoPMHwtImu4mndC9Ha3Q - ICAAAA4BAAADAAAAAQBYAAABAQADAAAAAQAzAAABAgADAAAAAwAAC0ABAwADAAAAAQAF - AAABBgADAAAAAQACAAABEQAEAAAAAQAAAAgBEgADAAAAAQABAAABFQADAAAAAQADAAAB - FgADAAAAAQHwAAABFwAEAAAAAQAACokBHAADAAAAAQABAAABPQADAAAAAQACAAABUwAD - AAAAAwAAC0aHcwAHAAARHAAAC0wAAAAAAAgACAAIAAEAAQABAAARHGFwcGwCAAAAbW50 - clJHQiBYWVogB9kACQAeABYACgAXYWNzcEFQUEwAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAPbWAAEAAAAA0y1hcHBsr4xF11Pr95yHzZAk7zKWsgAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAOclhZWgAAASwAAAAUZ1hZWgAAAUAAAAAUYlhZWgAAAVQAAAAU - d3RwdAAAAWgAAAAUY2hhZAAAAXwAAAAsclRSQwAAAagAAAAOZ1RSQwAAAbgAAAAOYlRS - QwAAAcgAAAAOdmNndAAAAdgAAAYSbmRpbgAAB+wAAAY+ZGVzYwAADiwAAABkZHNjbQAA - DpAAAAI+bW1vZAAAENAAAAAoY3BydAAAEPgAAAAkWFlaIAAAAAAAAGAAAAA1xAAABypY - WVogAAAAAAAAb5EAALNjAAAY8lhZWiAAAAAAAAAnRAAAFvMAALMJWFlaIAAAAAAAAPNS - AAEAAAABFs9zZjMyAAAAAAABDEIAAAXe///zJgAAB5IAAP2R///7ov///aMAAAPcAADA - bGN1cnYAAAAAAAAAAQHNAABjdXJ2AAAAAAAAAAEBzQAAY3VydgAAAAAAAAABAc0AAHZj - Z3QAAAAAAAAAAAADAQAAAgAAACkAkAEgAboCdgNQBEUFYQaUB90JQQq6DDkNwQ9XEOgS - dhP+FXkW5hhKGZsa5BwVHTYeWh95IJghuCLZI/8lIiZGJ2ookCm7KuUsEC1DLnIvpTDb - Mg8zSTSFNcQ3ADhFOYM6vzvrPRE+Oj9aQHtBoELFQ+ZFCkYuR1FIcUmSSrZL2Ez7ThxP - OlBZUX1SnFO7VNpV+lcdWERZb1qiW9RdBV40X2RgkWHBYvJkImVVZo9nxGj9ajtrf2y+ - bgVvT3Cdce5zQ3SVdfB3QHhieWx6cnt5fIN9hn6Lf4qAjIGHgn2DeIRohVWGQocpiA2I - 7InHiqGLdYxKjRqN546zj4CQVpEtkgSS25OylIiVXpYylweX3ZizmYmaYZs4nA+c553B - npyfeKBVoTSiFKL1o9WkuKWMplanI6fyqMCpjapcqyur+KzHrZiuaK86sAqw2rGssn2z - TrQgtPK1xbaUt2O4M7kDudC6kLtNvAi8xL2CvkC+/7++wHzBPcIAwsLDhcRMxRPF28al - x3HIPckLydvKrcuAzFPNJc30zr7PiNBR0RnR4tKu03fUP9UK1dXWoNds2DnZB9nV2qTb - dNxE3RXd6d6634/gY+E24g3i6uPC5JXlZeYx5vvnyeiU6WPqNusN6+js0e3G7sbv2PD/ - 8kLzmfUK9qT4XPpG/GX+kv//AAAAQQDkAXECFwLTA7cEsAXHBvcIPQmVCwYMdQ30D30R - ABKCE/4VbhbPGCoZeBq3G+YdBh4oH0ggZCGBIqEjxSTkJgYnKChGKW8qkyu4LOMuDi83 - MGQxjjK+M/A1JDZTN4o4vDnrOwg8HT01Pkg/WEBuQYNClkOqRL1Fz0bhR/JJBkoYSylM - Ok1JTldPalB3UYRSklOhVLFVxlbiWAJZI1pDW2FcgV2fXr5f32D/YiFjSmRvZZdmw2f0 - aSJqVWuNbMZuA29DcH5xxHL8dBV1HHYfdyN4J3koeih7J3wmfSJ+Gn8TgAmA/IHugt6D - zYS4hZ+GiodtiFOJNooXiveL04yrjYWOW48zkAyQ5pHAkpqTcpRQlS2WDJbsl8yYrpmP - mnWbW5xDnSueFp8Cn+6g2qHGoqKjbaQ9pRCl46a1p4yoYKk1qg2q5au+rJqtcq5LryWv - /7DZsbCyirNjtDi1DLXgtrS3iLheuTO6BrrYu6y8f71TviW+97/KwJ3BbsJAwxTD58S5 - xY3GYcc1yArI38m1yo3LZMw/zSTOE88Dz/LQ3dHI0rTTnNSD1WrWUtc52B/ZB9nv2tjb - wtyv3Zvei9994G/hZeJb41LkS+VG5kTnQeg/6UPqSutS7Fztau5674/wqPHA8trz9/Ua - 9jz3X/iD+av60vv3/R7+P/9B//8AAAAbAF4AwwE2Aa4CPgLkA5oEYwU+BioHHAgZCRoK - IgskDCwNKw4kDxIP+xDaEa4SdhM4E/kUtRVyFjAW7xevGG4ZLhnwGrMbdhw7HQEdzB6W - H2EgLyD7IcwinyNyJEUlHiXzJsEnhCg+KPoptCpuKysr6CymLWYuJi7nL6gwajExMfUy - ujOANEY1CzXUNps3YTgoOPA5uDp/O0Y8EDzaPaQ+bj83QAFAzEGXQmNDMEQARM5FnUZv - R0RIFEjoSb1KlEtrTERNG030TslPlVBcUSNR7FK2U4JUUVUeVfBWwleWWG9ZRlohWv9b - 31zAXaFeg19pYExhNWIbYwFj6GTKZahmhmdhaEBpH2n/auBrwWyhbYZua29PcDdxHnIG - cu5z13TCdax2mHeEeHF5X3pJezF8AXzEfYN+RX8Df8CAf4E4gfKCrINnhCKE3YWYhlOH - D4fNiIyJSooLis6LkIxSjReN3I6hj1qQE5DLkYSSQZMAk8GUhJVJlhKW35eumH+ZWZoz - mxCb8JzUnbqeo5+PoH2hb6Jho06kMqUQpeqmxaehqH6pX6o/qyCsA6zprc6us6+esIex - cLJbs0W0MLUbtga28LfcuMW5rrqeu8C85L4BvxnAM8FTwnnDqMTixirHf8jsynDMB82/ - z6TRtNP31nnZWtye4JPlTuuk9Ur//wAAbmRpbgAAAAAAAAY2AACZAgAAVsgAAFUEAACR - sAAAJ5EAABVgAABQDQAAVDkAAgo9AAH64QABJmYAAwEAAAIAAAATACwARQBfAHgAkQCr - AMUA3wD5ARQBMAFMAWoBhwGmAcYB5wIKAi4CVAJ8AqcC0wMDAzUDagOlA+QEJARnBKwE - 8wU7BYUFzwYcBmoGugcMB10HsAgFCFsIsAkICWEJugoUCnEKzQsqC4cL5gxHDKYNCQ1s - DdUORA61DyYPnBAUEIsRBBF/Ef0SehL6E3sT/xSFFQoVkRYbFqUXMhfCGFMY4hl2Ggsa - ohs6G9McbB0EHZweMR7HH2Af+iCXITQh1CJ0IxQjtiRaJP0lnSZCJucniyguKM8pdSoZ - KrsrXiwALKItRC3mLosvLC/OMHoxRDIdMvkz1zSzNZg2ezdnOFE5Qzo9OzM8Nj0/Pko/ - YEB7QaJCzEQCRTlGfkfISRJKUUuQTNROGU9iUK1R/lNPVKFV91dKWKNZ/VtVXK1eBl9f - YLdiD2NsZMNmN2fBaUJqyGxTbdpvaXD4coV0FXWldzx40XpqfAN9oH87gOGCiYQyheCH - sYmPi3ONUo8zkRSS+5Talr2Ynpp4nFeeMKAIod+jtKWGp1apKasHrPKu47DYstK0xLbC - uL+6uby4vrXAssKvxK3GqsipyqHMo86c0J7SndSK1ofYktqs3NLe7+EZ4zPlQudH6TDq - /+y77lvv4fFL8qbz8PUp9kr3Yvhu+Wj6V/s2/BD83v2k/pf//wAAAAwAIwA8AFQAbgCH - AKEAuwDWAPEBDAEpAUYBZAGCAaIBwwHlAgkCLgJVAn8CqwLZAwkDPQN0A7AD8AQxBHQE - ugUCBUsFlQXgBi8GfgbPByMHdQfKCCIIeQjRCSsJhwnjCkIKoQsAC2ELwgwnDIoM8A1Y - DcMOOA6wDygPpBAjEKERIRGkEigSrhM2E8AUTBTaFWgV+RaMFyEXuRhTGOsZiRooGskb - axwOHLEdUh3xHpAfMR/UIHkhHyHHInAjGiPFJHIlHiXIJnYnJCfSKH0pKinaKocrMyvg - LIwtOS3lLpYvQS/wMK0xgTJeMz40HzUBNek20Te+OKs5oDqYO5A8kT2TPps/pUC1Qc5C - 5EQFRSRGTEd3SKZJ40seTGJNo07mUCtRcVK9VAFVSlaRV9pZIlpuW7Rc/F5EX4xg02IZ - Y2VkrGYSZ5lpFGqQbBFtjG8OcI5yDHOMdQl2j3gUeZt7I3y0fkB/04FwgxCEsoZVh/eJ - oItPjPyOrZBgkhqT0ZWMl06ZDJrMnJOeVqAcoeSjrKV0pzqpBarCrGOuAq+nsVKzArSv - tmm4Jbnhu5+9ZL8kwOjCqsRuxivH78moy2PNHc7S0IvSQNP31abXV9kM2rncYN4F363h - T+Lt5IrmIOey6Urq3+xw7fvviPEW8qX0MPW490f41vpk+/39jv//AAAAHQBEAGoAkQC4 - AN8BCAEyAV4BiwG8Ae4CJAJeAp4C4gMsA38D3AQ+BKYFEgWBBfMGagbjB18H3ghgCOEJ - ZwnuCnkLAwuPDCAMrw1DDeEOkg9FD/4QuBF0EjMS9BO3FH4VQxYNFtgXqBh6GUwaJRr/ - G9scuh2fHoIfaCBRIT4iLSMdJBElBSX4JvEn6SjfKd0q1yvULNEtzy7SL9Mw5DIEMyc0 - SDVrNos3rzjOOfI7DjwvPU0+aT+FQKJBwkLdQ/5FGkY9R2BIhUm4Su1MKU1hTppP1VEQ - UlFTjFTJVglXRFiFWcZbCFxJXYxe0GAVYVpioGPuZUJm0Whyag9rvG1mbx9w2XKUdFJ2 - FHfZeZx7XH0ift6AoYJkhCSF5ofHibCLm417j1iRMJMGlM6WlZhWmgebup1nnw6gsqJS - o++liKceqLSqWawVrd6vqLF0sz21ALbJuI+6ULwSvdi/lMFXwxjE2saeyGXKKMv0zbvP - i9Fe0yLUlNYJ14nZE9qe3CTdo98b4Ivh8eNM5J3l4+cY6EDpYup764Pshe1y7l3vN/AP - 8Nfxn/JX8w7zu/RY9Pb1i/YX9qP3KPec+A/4g/jx+VL5tPoW+nj6yvsT+1z7pfvu/Df8 - dvym/NX9Bf00/WT9k/3D/fP+Iv5P/nr+pf7Q/vv/J/9S/33/qP/U//8AAGRlc2MAAAAA - AAAACkNvbG9yIExDRAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABtbHVjAAAAAAAA - ABIAAAAMbmJOTwAAABIAAADocHRQVAAAABYAAAD6c3ZTRQAAABAAAAEQZmlGSQAAABAA - AAEgZGFESwAAABwAAAEwemhDTgAAAAwAAAFMZnJGUgAAABIAAAFYamFKUAAAAA4AAAFq - ZW5VUwAAABIAAAF4cGxQTAAAABIAAAGKcHRCUgAAABgAAAGcZXNFUwAAABIAAAG0emhU - VwAAAA4AAAHGcnVSVQAAACQAAAHUa29LUgAAAAwAAAH4ZGVERQAAABAAAAIEbmxOTAAA - ABYAAAIUaXRJVAAAABQAAAIqAEYAYQByAGcAZQAtAEwAQwBEAEwAQwBEACAAYQAgAEMA - bwByAGUAcwBGAOQAcgBnAC0ATABDAEQAVgDkAHIAaQAtAEwAQwBEAEwAQwBEAC0AZgBh - AHIAdgBlAHMAawDmAHIAbV9pgnIAIABMAEMARADJAGMAcgBhAG4AIABMAEMARDCrMOkw - /AAgAEwAQwBEAEMAbwBsAG8AcgAgAEwAQwBEAEsAbwBsAG8AcgAgAEwAQwBEAEwAQwBE - ACAAQwBvAGwAbwByAGkAZABvAEwAQwBEACAAYwBvAGwAbwByX2mCcm2yZnaYb3k6VmgE - JgQyBDUEQgQ9BD4EOQAgBBYEGgAtBDQEOARBBD8EOwQ1BDnO7LfsACAATABDAEQARgBh - AHIAYgAtAEwAQwBEAEsAbABlAHUAcgBlAG4ALQBMAEMARABMAEMARAAgAGMAbwBsAG8A - cgBpAABtbW9kAAAAAAAABhAAAJyBAAAAAMG9/4AAAAAAAAAAAAAAAAAAAAAAdGV4dAAA - AABDb3B5cmlnaHQgQXBwbGUsIEluYy4sIDIwMDkA - - ReadOnly - NO - RowAlign - 1 - RowSpacing - 36 - SheetTitle - Canvas 1 - SmartAlignmentGuidesActive - YES - SmartDistanceGuidesActive - YES - UniqueID - 1 - UseEntirePage - - VPages - 1 - WindowInfo - - CurrentSheet - 0 - ExpandedCanvases - - FitInWindow - - Frame - {{601, 343}, {716, 841}} - ListView - - OutlineWidth - 142 - RightSidebar - - Sidebar - - SidebarWidth - 138 - VisibleRegion - {{1, 1}, {574, 732}} - Zoom - 1 - ZoomValues - - - Canvas 1 - 0.0 - 1 - - - - saveQuickLookFiles - YES - - diff --git a/src/externals/pio1/doc/images/block-cyclic.png b/src/externals/pio1/doc/images/block-cyclic.png deleted file mode 100644 index 2853e25b073..00000000000 Binary files a/src/externals/pio1/doc/images/block-cyclic.png and /dev/null differ diff --git a/src/externals/pio1/doc/images/dof-rearr.eps b/src/externals/pio1/doc/images/dof-rearr.eps deleted file mode 100644 index ed75b271ad5..00000000000 --- a/src/externals/pio1/doc/images/dof-rearr.eps +++ /dev/null @@ -1,3366 +0,0 @@ -%!PS-Adobe-3.0 EPSF-3.0 -%%HiResBoundingBox: 0.000000 0.000000 534.000000 429.000000 -%APL_DSC_Encoding: UTF8 -%APLProducer: (Version 10.5.8 (Build 9L31a) Quartz PS Context) -%%Title: (Unknown) -%%Creator: (Unknown) -%%CreationDate: (Unknown) -%%For: (Unknown) -%%DocumentData: Clean7Bit -%%LanguageLevel: 2 -%%Pages: 1 -%%BoundingBox: 0 0 534 429 -%%EndComments -%%BeginProlog -%%BeginFile: cg-pdf.ps -%%Copyright: Copyright 2000-2004 Apple Computer Incorporated. -%%Copyright: All Rights Reserved. -currentpacking true setpacking -/cg_md 141 dict def -cg_md begin -/L3? languagelevel 3 ge def -/bd{bind def}bind def -/ld{load def}bd -/xs{exch store}bd -/xd{exch def}bd -/cmmtx matrix def -mark -/sc/setcolor -/scs/setcolorspace -/dr/defineresource -/fr/findresource -/T/true -/F/false -/d/setdash -/w/setlinewidth -/J/setlinecap -/j/setlinejoin -/M/setmiterlimit -/i/setflat -/rc/rectclip -/rf/rectfill -/rs/rectstroke -/f/fill -/f*/eofill -/sf/selectfont -/s/show -/xS/xshow -/yS/yshow -/xyS/xyshow -/S/stroke -/m/moveto -/l/lineto -/c/curveto -/h/closepath -/n/newpath -/q/gsave -/Q/grestore -counttomark 2 idiv -{ld}repeat pop -/SC{ - /ColorSpace fr scs -}bd -/sopr /setoverprint where{pop/setoverprint}{/pop}ifelse ld -/soprm /setoverprintmode where{pop/setoverprintmode}{/pop}ifelse ld -/cgmtx matrix def -/sdmtx{cgmtx currentmatrix pop}bd -/CM {cgmtx setmatrix}bd -/cm {cmmtx astore CM concat}bd -/W{clip newpath}bd -/W*{eoclip newpath}bd -statusdict begin product end dup (HP) anchorsearch{ - pop pop pop - true -}{ - pop - (hp) anchorsearch{ - pop pop true - }{ - pop false - }ifelse -}ifelse -{ - { - { - pop pop - (0)dup 0 4 -1 roll put - F charpath - }cshow - } -}{ - {F charpath} -}ifelse -/cply exch bd -/cps {cply stroke}bd -/pgsave 0 def -/bp{/pgsave save store}bd -/ep{pgsave restore showpage}def -/re{4 2 roll m 1 index 0 rlineto 0 exch rlineto neg 0 rlineto h}bd -/scrdict 10 dict def -/scrmtx matrix def -/patarray 0 def -/createpat{patarray 3 1 roll put}bd -/makepat{ -scrmtx astore pop -gsave -initgraphics -CM -patarray exch get -scrmtx -makepattern -grestore -setpattern -}bd -/cg_BeginEPSF{ - userdict save/cg_b4_Inc_state exch put - userdict/cg_endepsf/cg_EndEPSF load put - count userdict/cg_op_count 3 -1 roll put - countdictstack dup array dictstack userdict/cg_dict_array 3 -1 roll put - 3 sub{end}repeat - /showpage {} def - 0 setgray 0 setlinecap 1 setlinewidth 0 setlinejoin - 10 setmiterlimit [] 0 setdash newpath - false setstrokeadjust false setoverprint -}bd -/cg_EndEPSF{ - countdictstack 3 sub { end } repeat - cg_dict_array 3 1 index length 3 sub getinterval - {begin}forall - count userdict/cg_op_count get sub{pop}repeat - userdict/cg_b4_Inc_state get restore - F setpacking -}bd -/cg_biproc{currentfile/RunLengthDecode filter}bd -/cg_aiproc{currentfile/ASCII85Decode filter/RunLengthDecode filter}bd -/ImageDataSource 0 def -L3?{ - /cg_mibiproc{pop pop/ImageDataSource{cg_biproc}def}bd - /cg_miaiproc{pop pop/ImageDataSource{cg_aiproc}def}bd -}{ - /ImageBandMask 0 def - /ImageBandData 0 def - /cg_mibiproc{ - string/ImageBandMask xs - string/ImageBandData xs - /ImageDataSource{[currentfile/RunLengthDecode filter dup ImageBandMask/readstring cvx - /pop cvx dup ImageBandData/readstring cvx/pop cvx]cvx bind}bd - }bd - /cg_miaiproc{ - string/ImageBandMask xs - string/ImageBandData xs - /ImageDataSource{[currentfile/ASCII85Decode filter/RunLengthDecode filter - dup ImageBandMask/readstring cvx - /pop cvx dup ImageBandData/readstring cvx/pop cvx]cvx bind}bd - }bd -}ifelse -/imsave 0 def -/BI{save/imsave xd mark}bd -/EI{imsave restore}bd -/ID{ -counttomark 2 idiv -dup 2 add -dict begin -{def} repeat -pop -/ImageType 1 def -/ImageMatrix[Width 0 0 Height neg 0 Height]def -currentdict dup/ImageMask known{ImageMask}{F}ifelse exch -L3?{ - dup/MaskedImage known - { - pop - << - /ImageType 3 - /InterleaveType 2 - /DataDict currentdict - /MaskDict - << /ImageType 1 - /Width Width - /Height Height - /ImageMatrix ImageMatrix - /BitsPerComponent 1 - /Decode [0 1] - currentdict/Interpolate known - {/Interpolate Interpolate}if - >> - >> - }if -}if -exch -{imagemask}{image}ifelse -end -}bd -/cguidfix{statusdict begin mark version end -{cvr}stopped{cleartomark 0}{exch pop}ifelse -2012 lt{dup findfont dup length dict begin -{1 index/FID ne 2 index/UniqueID ne and -{def} {pop pop} ifelse}forall -currentdict end definefont pop -}{pop}ifelse -}bd -/t_array 0 def -/t_i 0 def -/t_c 1 string def -/x_proc{ - exch t_array t_i get add exch moveto - /t_i t_i 1 add store -}bd -/y_proc{ - t_array t_i get add moveto - /t_i t_i 1 add store -}bd -/xy_proc{ - - t_array t_i 2 copy 1 add get 3 1 roll get - 4 -1 roll add 3 1 roll add moveto - /t_i t_i 2 add store -}bd -/sop 0 def -/cp_proc/x_proc ld -/base_charpath -{ - /t_array xs - /t_i 0 def - { - t_c 0 3 -1 roll put - currentpoint - t_c cply sop - cp_proc - }forall - /t_array 0 def -}bd -/sop/stroke ld -/nop{}def -/xsp/base_charpath ld -/ysp{/cp_proc/y_proc ld base_charpath/cp_proc/x_proc ld}bd -/xysp{/cp_proc/xy_proc ld base_charpath/cp_proc/x_proc ld}bd -/xmp{/sop/nop ld /cp_proc/x_proc ld base_charpath/sop/stroke ld}bd -/ymp{/sop/nop ld /cp_proc/y_proc ld base_charpath/sop/stroke ld}bd -/xymp{/sop/nop ld /cp_proc/xy_proc ld base_charpath/sop/stroke ld}bd -/refnt{ -findfont dup length dict copy dup -/Encoding 4 -1 roll put -definefont pop -}bd -/renmfont{ -findfont dup length dict copy definefont pop -}bd -L3? dup dup{save exch}if -/Range 0 def -/DataSource 0 def -/val 0 def -/nRange 0 def -/mulRange 0 def -/d0 0 def -/r0 0 def -/di 0 def -/ri 0 def -/a0 0 def -/a1 0 def -/r1 0 def -/r2 0 def -/dx 0 def -/Nsteps 0 def -/sh3tp 0 def -/ymax 0 def -/ymin 0 def -/xmax 0 def -/xmin 0 def -/setupFunEval -{ - begin - /nRange Range length 2 idiv store - /mulRange - - [ - 0 1 nRange 1 sub - { - 2 mul/nDim2 xd - Range nDim2 get - Range nDim2 1 add get - 1 index sub - - 255 div - exch - }for - ]store - end -}bd -/FunEval -{ - begin - - nRange mul /val xd - - 0 1 nRange 1 sub - { - dup 2 mul/nDim2 xd - val - add DataSource exch get - mulRange nDim2 get mul - mulRange nDim2 1 add get - add - }for - end -}bd -/max -{ - 2 copy lt - {exch pop}{pop}ifelse -}bd -/sh2 -{ - /Coords load aload pop - 3 index 3 index translate - - 3 -1 roll sub - 3 1 roll exch - sub - 2 copy - dup mul exch dup mul add sqrt - dup - scale - atan - - rotate - - /Function load setupFunEval - - - clippath {pathbbox}stopped {0 0 0 0}if newpath - /ymax xs - /xmax xs - /ymin xs - /xmin xs - currentdict/Extend known - { - /Extend load 0 get - { - 0/Function load FunEval sc - xmin ymin xmin abs ymax ymin sub rectfill - }if - }if - - /Nsteps/Function load/Size get 0 get 1 sub store - /dx 1 Nsteps div store - gsave - /di ymax ymin sub store - /Function load - - 0 1 Nsteps - { - 1 index FunEval sc - 0 ymin dx di rectfill - dx 0 translate - }for - pop - grestore - currentdict/Extend known - { - /Extend load 1 get - { - Nsteps/Function load FunEval sc - 1 ymin xmax 1 sub abs ymax ymin sub rectfill - }if - }if -}bd -/shp -{ - 4 copy - - dup 0 gt{ - 0 exch a1 a0 arc - }{ - pop 0 moveto - }ifelse - dup 0 gt{ - 0 exch a0 a1 arcn - }{ - pop 0 lineto - }ifelse - - fill - - dup 0 gt{ - 0 exch a0 a1 arc - }{ - pop 0 moveto - }ifelse - dup 0 gt{ - 0 exch a1 a0 arcn - }{ - pop 0 lineto - }ifelse - - fill -}bd -/calcmaxs -{ - - xmin dup mul ymin dup mul add sqrt - xmax dup mul ymin dup mul add sqrt - xmin dup mul ymax dup mul add sqrt - xmax dup mul ymax dup mul add sqrt - max max max -}bd -/sh3 -{ - /Coords load aload pop - 5 index 5 index translate - 3 -1 roll 6 -1 roll sub - 3 -1 roll 5 -1 roll sub - 2 copy dup mul exch dup mul add sqrt - /dx xs - 2 copy 0 ne exch 0 ne or - { - - exch atan rotate - }{ - pop pop - }ifelse - - /r2 xs - /r1 xs - /Function load - dup/Size get 0 get 1 sub - /Nsteps xs - setupFunEval - - - - - - dx r2 add r1 lt{ - - 0 - }{ - dx r1 add r2 le - { - 1 - }{ - r1 r2 eq - { - 2 - }{ - 3 - }ifelse - }ifelse - }ifelse - /sh3tp xs - clippath {pathbbox}stopped {0 0 0 0}if - newpath - /ymax xs - /xmax xs - /ymin xs - /xmin xs - - dx dup mul r2 r1 sub dup mul sub dup 0 gt - { - sqrt r2 r1 sub atan - /a0 exch 180 exch sub store - /a1 a0 neg store - }{ - pop - /a0 0 store - /a1 360 store - }ifelse - currentdict/Extend known - { - /Extend load 0 get r1 0 gt and - { - 0/Function load FunEval sc - - - - - { - { - dx 0 r1 360 0 arcn - xmin ymin moveto - xmax ymin lineto - xmax ymax lineto - xmin ymax lineto - xmin ymin lineto - eofill - } - { - r1 0 gt{0 0 r1 0 360 arc fill}if - } - { - - - - - 0 r1 xmin abs r1 add neg r1 shp - } - { - - - r2 r1 gt{ - - 0 r1 - r1 neg r2 r1 sub div dx mul - 0 - shp - }{ - - - - 0 r1 calcmaxs - dup - - r2 add dx mul dx r1 r2 sub sub div - neg - exch 1 index - abs exch sub - shp - }ifelse - } - }sh3tp get exec - }if - }if - - /d0 0 store - /r0 r1 store - /di dx Nsteps div store - /ri r2 r1 sub Nsteps div store - /Function load - 0 1 Nsteps - { - 1 index FunEval sc - d0 di add r0 ri add d0 r0 shp - { - - d0 0 r0 a1 a0 arc - d0 di add 0 r0 ri add a0 a1 arcn - fill - - - d0 0 r0 a0 a1 arc - d0 di add 0 r0 ri add a1 a0 arcn - fill - }pop - - - /d0 d0 di add store - /r0 r0 ri add store - }for - pop - - currentdict/Extend known - { - /Extend load 1 get r2 0 gt and - { - Nsteps/Function load FunEval sc - - - - - { - { - dx 0 r2 0 360 arc fill - } - { - dx 0 r2 360 0 arcn - xmin ymin moveto - xmax ymin lineto - xmax ymax lineto - xmin ymax lineto - xmin ymin lineto - eofill - } - { - - - xmax abs r1 add r1 dx r1 shp - } - { - - r2 r1 gt{ - - - - calcmaxs dup - - r1 add dx mul dx r2 r1 sub sub div - exch 1 index - exch sub - dx r2 - shp - }{ - - r1 neg r2 r1 sub div dx mul - 0 - dx - r2 - shp - }ifelse - } - } - sh3tp get exec - }if - }if -}bd -/sh -{ - begin - /ShadingType load dup dup 2 eq exch 3 eq or - { - gsave - newpath - /ColorSpace load scs - currentdict/BBox known - { - /BBox load aload pop - 2 index sub - 3 index - 3 -1 roll exch sub - exch rectclip - }if - 2 eq - {sh2}{sh3}ifelse - grestore - }{ - - pop - (DEBUG: shading type unimplemented\n)print flush - }ifelse - end -}bd -{restore}if not dup{save exch}if - L3?{ - /sh/shfill ld - /csq/clipsave ld - /csQ/cliprestore ld - }if -{restore}if -end -setpacking -%%EndFile -%%EndProlog -%%BeginSetup -%%EndSetup -%%Page: 1 1 -%%PageBoundingBox: 0 0 534 429 -%%BeginPageSetup -cg_md begin -bp -sdmtx -%RBIBeginFontSubset: Helvetica -%!FontType1-1.0: Helvetica 1.0000.0.0000 - 14 dict begin/FontName /Helvetica def - /PaintType 0 def - /Encoding 256 array 0 1 255{1 index exch/.notdef put}for - dup 33 /P put - dup 34 /E put - dup 35 /space put - dup 36 /zero put - dup 37 /one put - dup 38 /two put - dup 39 /C put - dup 40 /o put - dup 41 /m put - dup 42 /p put - dup 43 /d put - dup 44 /e put - dup 45 /c put - dup 46 /s put - dup 47 /i put - dup 48 /t put - dup 49 /n put - dup 50 /D put - dup 51 /k put - dup 52 /l put - dup 53 /a put - dup 54 /y put - dup 55 /u put - dup 56 /O put - dup 57 /F put - dup 58 /bracketleft put - dup 59 /comma put - dup 60 /four put - dup 61 /five put - dup 62 /bracketright put - dup 63 /three put - dup 64 /six put - dup 65 /seven put - dup 66 /eight put - dup 67 /R put - dup 68 /r put - dup 69 /g put - dup 70 /I put - dup 71 /S put - readonly def - 42/FontType resourcestatus{pop pop false}{true}ifelse - %APLsfntBegin - {currentfile 0(%APLsfntEnd\n)/SubFileDecode filter flushfile}if - /FontType 42 def - /FontMatrix matrix def - /FontBBox[2048 -1947 1 index div -985 2 index div 2961 3 index div 2297 5 -1 roll div]cvx def - /sfnts [< - 74727565000900000000000063767420000000000000009C0000036C6670676D000000000000040800000A0C676C79660000000000000E1400002180686561640000000000002F9400000038686865610000000000002FCC00000024686D74780000000000002FF0000000A06C6F63610000000000003090000000526D61787000000000000030E400000020707265700000000000003104000003CF05C0001005BD00280580001A042F001F0000FFD90000FFDA0000FFD9FE55FFE605C70010FE6DFFF1033B000000B9000000B902FE3F3C00C0008D009B00AF000600A800C00028005E009800C9016A00B9015C00B400D6011E002E0080000400B8004C00CC01FFFFD1006600A400AF007400C2009500B1000C0028006D0015004C008E0125FF7A000C0040004C00620084FFA200240038008600BD0039005E008E00EDFFA9FFB300400052005500AA00AB00C200CB012302B10413FFAEFFE4000800510074008400AA00D1FF4CFFAF0012002C004200500051008400BE012503DAFF680018003B0098009C009F00A100C100EC018201B4FF68FF76FFD0FFE100020018001C00530053007D01B401E103AF0486FF9CFFEAFFFE001F0028002A00520060009300A300AA00AF00AF00C001000145016B0174019301950240028202B404850517FEFD00060029004700470048006F008800B400B900C400F200F901EF02180310037403C5FF35FFF3000B004B004C0052005500650076007600870087008E00AB00BB0106013001430150017D0194019501D3022A025502580277027802E6034E035C037903D3047304B2058C0598060BFEF5FFBBFFC7FFD50017001D005B0072007E009C00C200D000F400FA01030106011C0125013B0142015E015E0180019B02B901A101B9025001C001D002AA01DF01E301EF01FB0205020C0215022B0274029302AB02C202CE03690395039903DF03F5043E050205A105E5062507DBFE62FE89FECEFF3BFFE1FFF800030008002100390042004E005F0061006F00700034007F008E00AD00AD00AF00BD00C400C500C900C900C900E3011C00ED00F800F901000112011A0132014D014D014E014F01660169019E01BA01BA01BE01E301EF01F602000200020902110217021C02530262026D028002D50280031B032A034A035A03AF03AF03C803D603FB03FB04050413041504470449008C046D049A049A04A604A804B204CF0539053E054E055605800589058C036305D105D6067E068E06B206EF06F00728074C076F078C00B400C900C000C10000000000000000000000000004012400AF0032006E0063014401620096014301A10161008A00740064018801EF01700028FF5D037E0347023000AA00BE007B0062009A007D0089035C00A1FFD803AA00D70093006C0000008000A70442001D0597001D00820030002A - 002A002A002A002A40292A292827262524232221201F1E1D1C1B1A191817161514131211100D0C0B0A090807060504030201002C4523466020B02660B004262348482D2C452346236120B02661B004262348482D2C45234660B0206120B04660B004262348482D2C4523462361B0206020B02661B02061B004262348482D2C45234660B0406120B06660B004262348482D2C4523462361B0406020B02661B04061B004262348482D2C0110203C003C2D2C20452320B0CD442320B8015A51582320B08D44235920B0ED51582320B04D44235920B09051582320B00D44235921212D2C20204518684420B001602045B04676688A4560442D2C01B9400000000A2D2C00B9000040000B2D2C2045B00043617D6818B0004360442D2C45B01A234445B01923442D2C2045B00325456164B050515845441B2121592D2C20B0032552582359212D2C69B04061B0008B0C6423648BB8400062600C642364615C58B0036159B002602D2C45B0112BB0172344B0177AE5182D2C45B0112BB01723442D2C45B0112BB017458CB0172344B0177AE5182D2CB002254661658A46B040608B482D2CB0022546608A46B040618C482D2C4B53205C58B002855958B00185592D2C20B0032545B019236A4445B01A23444565234520B00325606A20B009234223688A6A606120B0005258B21A401A4523614459B0005058B219401945236144592D2CB9187E3B210B2D2CB92D412D410B2D2CB93B21187E0B2D2CB93B21E7830B2D2CB92D41D2C00B2D2CB9187EC4E00B2D2C4B525845441B2121592D2C0120B003252349B04060B0206320B000525823B002253823B002256538008A63381B212121212159012D2C456920B00943B0022660B00325B005254961B0805358B21940194523616844B21A401A4523606A44B209191A45652345604259B00943608A103A2D2C01B005251023208AF500B0016023EDEC2D2C01B005251023208AF500B0016123EDEC2D2C01B0062510F500EDEC2D2C20B001600110203C003C2D2C20B001610110203C003C2D2C764520B003254523616818236860442D2C7645B00325452361682318456860442D2C7645B0032545616823452361442D2C4569B014B0324B505821B0205961442DB8002B2C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB8002C2C2020456944B001602DB8002D2CB8002C2A212DB8002E2C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB8002F2C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800302C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800312C2020456944B0016020 - 20457D691844B001602DB800322CB800312A2DB800332C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800342C4B535845441B2121592DB800352C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800362C2020456944B001602DB800372CB800362A212DB800382C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800392C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB8003A2C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB8003B2C2020456944B001602020457D691844B001602DB8003C2CB8003B2A2DB8003D2C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB8003E2C4B535845441B2121592DB8003F2C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800402C2020456944B001602DB800412CB800402A212DB800422C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800432C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800442C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800452C2020456944B001602020457D691844B001602DB800462CB800452A2DB800472C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800482C4B535845441B2121592DB800492C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB8004A2C2020456944B001602DB8004B2CB8004A2A212DB8004C2C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB8004D2C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB8004E2C4B20B0032650585158 - B080441BB04044591B21212045B0C05058B0C0441B2159592DB8004F2C2020456944B001602020457D691844B001602DB800502CB8004F2A2DB800512C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800522C4B535845441B2121592DB800532C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800542C2020456944B001602DB800552CB800542A212DB800562C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800572C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800582C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800592C2020456944B001602020457D691844B001602DB8005A2CB800592A2DB8005B2C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB8005C2C4B535845441B2121592D00020042000004D005BD00030007003FB800532BB800082FB800092FB8000810B80000D0B800002FB8000910B80003DCB80004DCB8000010B80007DC00BA0007000000562BBA0002000500562B3031331121112711211142048EB8FCE205BDFA43B8044DFBB30000000100AAFED0018000DA000E002D401600230E0A64080A1017171A07340A640008190F6365182B4E10F44D3CFDED4E456544E6003F4DEDD4ED3130173637363534262723353315140607AA451C0F01026DD66076D10C552D2A070B07DACA77B41500000000020040FFD9041C0598000F001C00714017870501460815350F051C35070D1238036F18380B1E471D1076C418D4EDFDED003FED3FED313043794034001B0D2601251A2609250526160E18280014001228011B081828001006122801170C1528011302152801190A1C280011041C28002B2B2B2B012B2B2B2B2B2B2B2B2B81005D001716111007022120272611343712211236113402232202111417163303407C60577EFEE2FEFE7E693F7601358AA678AD9F932F48AE0598E5B1FECCFEDCBFFEEEE0BB013BF4AF0146FAE5F80152F4013BFED5FEDDDB85CB000000000100C4000002D5059200080023B10801B80133400C0404070C04079605000A47091076C418C4D5FD39003F3FF4CD313013353E013733112311C4C39A268EC003F68A13 - 59A6FA6E03F60000000100400000041E059D002200A6404E3604460457056B1D6E1E7A1E84018702082A085A196B197C197C1CB519050022010F041C0E1921071C19040100051F0F0F22130A351305201F7521220C217F0738166F220E270F811F38222447231076C418D4EDF4ED10F5EDE4003F3CFD3C3FED1112392F1217390111123912393911391239005D31304379401C04190C2511260B120E2800091407280119040D100A280108150A2801002B2B1010012B2B2B2B81005D36123F01363736353426232207060723363736213212151407060F01060706072115214A85C1C0813452967DB9472604B70342750128F6E37946B5896238641A030EFC29B90112706F4B35536B7D938C4B85BB76D0FEF6A3AC7A47654C3631576AAA000000010031FFD9041A059A003100C4401F490C532D6529632D7529752D069626014B082B0A182B140D03F93114350D20B8012340141135107F0D0D311B3524050735310D1896274411B8014A400F0A382E6F021F2920810327023347321076C418D4EDF4ED10F5EDEDF4ED003FED3FED12392FF4FDE610ED10ED111239011112393130437940362F301926000905261D2522260600032800040308300A28011C231F28001A251828010401072800092F0728001E211B280119261B2801002B2B2B2B012B2B2B103C2B2B2B2B818181005D015D0402353316171633323635342623220607351E013332373635342623220706072334373621321615140706071E01151400230116E5BC0C2846B78EACB5A11225131C26166541729865B4452605B2406E0115DBF0472C46717FFEF2F8270117C88B3F719878947601019F03022038906B7478427AA070C8C3B98452331D1FB180CDFEFE000000000200340000042F059C0002000D005C402209020A080D010202BE12060607020601050708030106070A02750B05EF030C070403B80117B501960D08AC0AB80158B3050F470E1076C418D4F5F43CFDE4003F3FF43CFD3C1139390111123911123939872E2B047D10C50F3130011109011121350133113315231102A5FE3501CEFD8C029098D3D301FB0289FD77FE05015EB0038EFC5F9DFEA200000000010042FFDC041C0580002000BB402B4812881F02390C461357136713043A080C0D07000C0A0F0E0E75121313120E0F201213070013170A17350AB80122401A0D7F0E0E1D123A0F04043520C71D0D1107381A6F0038202247211076C418D4EDF5EDC4003FEDED3FFD12392FE4F4ED1112390111123939123939872E2B7D10C5001239011112393931301843794028181F01090226031E00280001001F20051C0728010918072801011F042800061B04280008190A2801002B2B2B012B2B103C103C2B2B8181005D5D131617163332363534262322060727132115210336373633320415140221222427FD12 - 7D4054A09AB7805D852F9C6D02E8FD9F3D322D5069C50112FBFEEDAFFEF310016D9A3B1ECC7C96A44840090303AEFE72261321FEC3CBFECAC5CC00000002004DFFDB0423059E001B002700A94039771A0117082508271958198600870188090747080A212721350E0E1403C707351B052735140D032902811E38116F170A023C0A2431172947281076C418D4FD397C4B52787A2F1810F5EDF4ED003FED3FEDED12392FED11123931304379403425261C200F1600060526200F1E280126152428001C131E2801060003280104031F1021280125162728001D1227280004010728012B2B2B2B01103C2B2B2B2B2B81818181015D005D001615232627262322020336373633320415140223220011103712211236353426232206151416330347BDB21023418497B20A3E5E566AB4010CFEEBC9FEDC417D014C818D7EA674AF9F8D059EF98455305AFEE9FEFC5B2D28E6E4C3FED301310169010BBA0164FADDBF826EC79A9B88B900000001004B0000042F0580000F00534036050B1902380B3A0C390D4A0D550256046202B7070A7C0DB40DC50D030209010D0E3A000F04090C020D0800006F0E083809AC0E1147101076C418D4F4ED10E511123939003F3F3CFD3C391239015D31305D01150602070607060723121336372135042F45E558572D1D2EC744E88997FCE805809D43FEB4C0BB9A63DC019A0196EEADB50000030042FFD7041A059C000B0017003200CB4047560157095A0F5913640165096B0F691377250949107B197623722573277C317C32881987208727882E883198190D47083208250E0225320B3511112C05351E0517352C0D023821B80186B70E38296F2F08381BB80186B514382F3447331076C418D4EDF4ED10F5EDF4ED003FED3FED12392FED39390111123912393130437940352A2E1C201216030D162D1428000C2B0E28010A12082800061D082800041F022801152E1728000D2A172800071C0528010320052801002B2B2B2B012B2B2B2B2B81818181015D005D00363534262322061514163312363534262322061514163300272635343633321615140706071617161514022322243534363702A48680838274966688A5AA8581A3959CFEB52A4FE8D5CEEA44265059335FFEE8D1FEDF7C7A0340855C5086865A6572FD3B87868B90938270A302A02B5080A0E6D99186532F2D293564A0BDFEF9E3D87FB931000002005AFFDA057105E5001D001E00B1403B1B0597019605031F011F0482018705891305530803861D111115063A1D030C3A15091E021E1E190331023B1031111A20093119191F20A1216A66182B2B4EF44DED4E10F64DEDF4ED12392F003F3FED3FED12392F10ED31304379403A001C172513260E251B260B160932000D1410320112110F10071C0932000500033201010204030A180C32000F120C3200081A - 06320104010632012B2B2B2B01103C103C2B2B103C103C2B2B2B2B2B2B815D015D080117232E0123220011101233323736373306070621202726111037362123041E013411C221C5B2D9FEF5F1EFDC733D1EC21A92AFFED7FF00AEE5ACBA01472805E5FEDABB8EA6FECFFEC5FEFEFEBFA95991E89DBD9BCD01AC0145D0E20000000200A50000056305BD000D00180067401F871196120232080B1E0F02001E17080831131A1A0D250E19191AD6217689182B2B4EF44DFD4E10F64DED003FFD3FFD3130437940260116112515260607050704070307020705060A10083201011608320109120B320107140032002B2B012B2B2A2B2B815D2532373637363736351002232111032120171611140702290102D06541744A3B1A0FD9F1FE9FC80253012FA795589BFE86FDAFAA15276F598B53470111012EFB980513D7C2FED1EABDFEB20000000200AF000004ED05BD000B000C004E4024071E04040B031E01000209081E0A0B080C020C0C02000669023B091A0E03082500190D0EB8010BB3219589182B2B4EF44DFD3C4E10F64DF4E41112392F003F3F3CFD3C3F3CED12392FFD313013211521112115211121152101AF042FFC93032BFCD5037CFBC2021F05BDB4FE42AAFE0EAF05BD000000000100AF000004AA05BD000900394018071E040409031E0100020908066B011A0B03082500190A0BB80157B32195DC182B2B4EF44DFD3C4E10F64DE4003F3F3CED12392FFD313013211521112115211123AF03FBFCCC02D1FD2FC705BDB4FE42AFFD6400000100C90000019205BD00030017B8002B2BBA00010000002E2B00B800002FB800022F303113331123C9C9C905BDFA4300030050FFD505E805E5000F001B001C008A402C8705C700C701C302C808C90A064308153A0F031B3A07091C021C1C0B1231031A1E18310B191D1ED8216A66182B2B4EF44DED4E10F64DED12392F003F3FED3FED313043794032001A0D26012509250526160E18320014001232011A081832001006123201170C1532011302153201190A1B320011041B32002B2B2B2B012B2B2B2B2B2B2B2B81005D0017161110070221202726111037122100123510002322001114122103049BBB92A7C4FE95FEADC2AD94BE0174011BEBFEF1EBE4FEE0F701150E05E5FAC3FED0FEB7DAFF00E0D8014A012AD40110FAA20179F50103013CFEC7FECFF4FEB1055E000200AF000004F805BD000A001400614035690C6912790C7A12044814581468147A140407081E1110100A010E0F1E0100020A080206120C0409141431041A160F092500191516B8010BB3219589182B2B4EF44DFD3C4E10F64DFD11121739003F3F3CFD3C1012392F3CFD3C015D31305D132132161514062321112300272623211121323635AF0295C4F0D6DEFE32C70380784273FE74018C86A705BDDDC8ACFFFD9304 - B93A1FFE03729000000200B40000057805BD0009002700944012070D49014805590158056905641478050816BB01300119001B011C40422321202660127112751403121E1F141C1503211E161404261224251E0009091B071E0B02261C1B082025151F251603310F691B1A29082625270A192829D6219566182B2B4EF43C4DFD3C4E10F64DF4EDD4EDD4ED003F3C3C3FFD12392F3CFD3C3911173901111239391239395D1112392B3130015D013236353427262321110321321716151406071E011F011617161715232E012F012627262321112303478CA3723D66FE1AC702A8A86DCF6D6256570507030B122EF40A0C040C0764397AFE3BC7031C70929D391EFE0A02A1315EFD84A833237280C55429461421133C56F590311BFD8A00020060FFD504F605E5002F003000FE405E290F27232626360E3521472662267A0E7724096B08180E172502590E680EAA22030A0E3A2224220A03041C1886142F2F2B1C3A1403043A2B0930020A0E081124221F28303011182517490825281A321F25114900252F193132A0216A89182B2B4EF44DEDF4ED4E10F64DEDF4ED12392F1211393912113939003F3FED3FED12392F10ED111217392EFD335D7131304379404D282E111F0008022602262D2506251A26032C002B002D2E032C002B00052A082B011D131F2B001B15182B011918012D042B0009270B2B01250B012E042B000729042B001E121C2B0119161C2B01002B2B2B2B10102B2B01103C2B2B2B2B103C2B2B2B2B2B2B818181005D0116171633323736353427262F012627263534243332041523262726232206151417161F01161716151404232027263701011E073463FA705CB24B4CA2C7C3518C0112FBE70143BB0F315BDAB09A5A3BD0CE95518CFE9DEBFEEE9B9B03024D01DA7D4E92203EA0783332252D2C355CB7C6FEDFF5763F7394626C3220302F223B67C4F4D28C8BEE040B0000010080FE6D020005C700070035401C031C0010041C07120917171A0501580304200700190809F0216C33182B2B4EF43C4DFD3CF43C4E456544E6003F4DFD3FFD31301321152311331521800180D6D6FE8005C793F9CC930001002FFE6D01AF05C70007003E402000070102031C050410001C07120917171A06200201580003190809F0213C7C182B2B4EF43C4DF43CFD4E456544E6003F4DFD3F3CFD3C01113939313013331123352111212FD5D50180FE80FF00063493F8A6000000030052FFDC04470449000F003B003C00DD40382A30010A100B1B0C1C2733481069096A10073908120C09031B320724091D100C1D3B2B022E293BB73B023B322A2512100705081C2722171CB8018A4023171D1F07271D2E0B021D350B3C073C3C1C1407292AA8241A3E1B291C4A0F2738193D3EBC0197002100B9019600182B2B4EF44DEDF4ED4E10F64DE4FDC41239 - 2F003F3FED3FED3FEDED1239111217395D1112392EED2EED01111239111739313043794028363715220001192501360F2100181E1B21001620142101212200370221001A1D1721011521172101002B2B2B01103C2B2B2B2B818181005D015D2416333237363D010E010F0106070615013637363534262322070607233E01333217161511141633323637150E0123222726270E012322263534363713010E724E5F59962168326D62315301B43E150C837A8D3B210AA805F7A3BD767517250C1E112A2C265D2A160937CE7C95BDBA978ACF5A2C49A691151C060E0D1C2F67016C082C182D5C534C2A53C69B484898FD971C220303850C06422340486AB58895A41301E4000002003BFFE103D0044E001A001B00A7402FA719019818A808AA18034A08119B14030314061D1A070D1D140B1B071B1B1710271201032702111A1D0A2717191C1DB80107B321727D182B2B4EF44DED4E10F63C4DED3939ED12392F003F3FED3FED12392F10ED313043794034001908250C150A26000E1310260112110F1007190A26000500032101010204030B160D26000F120D2600091806260104010621012B2B2B2B01103C103C2B2B103C103C2B2B2B81005D015D001617232E012322070615141633323637330E01232202351000330702D6E317AF10727EAC4A308892708319AF1EF0BBD2FA0112D41C044EB0D76383A86DA0A1DC8977D5C50133E6011A013A0500020038FFDA03ED05C2000B001D00774032370E470E570EA704A91B05250814020F1D1000081D1D07130A021D170B052E132E102911121A1F0B271A191E1F87217242182B2B4EF44DED4E10F63C4DFDE4E4003FED3F3FED3F1139113931304379401A181C090A000101180B2600091C0B260000190226000A1B0826012B2B012B2B818181005D1216333236353426232206150017161711331123350E0123220035341233F692A17DA1A67A88A9018A53303DADA23FAC6FB3FEFAEFDE015FE8D7C9CBC3D0CA0237341E4B021DFA3E956358012DFAEA015700030048FFDA041A0449001C00240025010C40799708991AA71F03050E020F0514150E120F1514400C401408291A014B0BB603C701C603C71BD808D909D61FD823E817E8230BC711C712025C080521240F9A161D243906070716211D1C070A1D160B2507971CA71CB71CD71C0425160F251C05190A0C07110E270F1D27051A27242E072719192627D421A65D182B2B4EF44DFDE44E10F64DEDD4FD391239391112393912392F5D003F3FED3FED12392F3CFD3C10ED1112393130437940460023040503050205010504061F26111012101310141004060C25221B24260020001D26011E1D09170726000B150E26010D0E231A2126011E0521260108180A26000D100A2600002B2B2B2B01103C2B2B103C2B2B2B2A2B2A8101715D00715D5D0016 - 1716171615211E013332373637330E01070607062322001110003301262726232206070102B4D638361210FCEF0590978D543014B1074F3152794152C8FEEA0118E2011F0B284AAD7CA805012304476B55516C4AA2A3C55D36473B912E501C100123010601020142FE26754682B38A01DC0000000003003DFE3B03E80449001F002D002E00B7404D36144908490958085909880CA91BA81DA927A62BB91B0B4008031622290EC40A221D1F070406291D190A121D0A0F2E072E2E051C032E162E2D29051A300C0E270D3E26271C192F3087217242182B2B4EF44DEDF4ED394E10F64DFDE4F51112392F003F3FED3FED3F3FED10ED1112393931304379402C23281A1E0B1124251026231E262600281A262600110B0E21000F0E0C0D251D222601271B2926000F0C122100002B2B2B01103C103C2B2B2B2B2B818181005D00171617353311140706212226273316171633323736270E0123222411100033002623220706151416333237363501027C5E3335A63C70FEC9ADEC0EB70D273D83CF40260336987DAEFEFB0107BA0144A47FBE4625937CC24F2CFED104423E234387FC32CC76DA9BA548273C9256DD5250F7011D010D012EFEA1C0B25F9AB5BDAF6384022D0000000200840000013B05BD000300070036401C07E50400010006030A0917171A06010229070300190809AA216242182B2B4EF43C4DC4FD3CC44E456544E6003F3F3C3F4DED3130133311231133152384B7B7B7B7042AFBD605BDCC000000010080000003F805BD000B00A740645902013A08011902010706170657056705790678078705B903C903DA030A05050608080709030284029402A4020302391209090405060504066D12070708080705040305060908050204030A00000403060A07060A061A0D09020A29000B190C0DB22162B9011600182B2B4EF43C4DFD3C3C194E10E618003F3C3C3F3C3F1112173901121739874D2E2B087D10C104872E182B5D057D10C010083C083C3130015D00715D7213331101330901230107112380AD01CEE6FE6601B1E6FEB297AD05BDFCAB01C7FE6FFD62021C8AFE6E0000000100890000013D05BD0003002940150000030A0517171A0102290003190405AA216242182B2B4EF43C4DFD3C4E456544E6003F3F31301333112389B4B405BDFA4300000001008400000625044700260085403B0708070E060F1708170E170F2708270E270F4819560B670B0C23250A1A1D23190A02041725211D171D0D060700061B1C2503130A2817171A112914B80101B21A291DB80101400A00012E25292600192728B8010DB3216242182B2B4EF43C4DFDE410F4EDF4FD4E456544E6003F173C3F3F3C4DEDED111217390111123912393130005D13331536373633321716173E01333217161511231134262322061511231134272623220615112384B2 - 40345971804E2C243CA265D84E2ABB6B4D6A99B71A297066A7B4042F984F243D3F244656539C548EFD3702E86B508EA6FD9102BB6D324B9ECFFDC80000020084000003ED04490019001A005E4031B706C706020406140627147606740705140C021418101D05070006180B0A1A071A1A000C29091A1C012E18291900191B1CB80106B3216242182B2B4EF43C4DFDE44E10F64DED12392F003F3F3C3F3FED1139390112393130005D015D1333153E01333217161511231134272623220706070E011511230184AB4CAA68E4502CB71D307E40294A382D1BB401A7042F985E529F57A2FD5102A3623C640D1642357169FDCF0449000003003BFFD90421044E000C0018001900904033980896109916A504A808A610A916B808C808D704E50EE9140C3A08061D18070C1D120B190719191502270F1A1B092715191A1BB80109B321725D182B2B4EF44DED4E10F64DED12392F003F3FED3FED31304379402C001704260B1309260000110226010717092600050D0226010A140C260001100C26000816062601030E0626012B2B2B2B012B2B2B2B2B81005D241235342726232206151416331200111002212200351000330702E085304CBAA59696A3D6011EFCFEF7DDFEFC0112E70674010FA6965E94FCB2ABE403DAFEECFEF4FEFDFEAE012BFC010E01400500020076FE5504250449000E00220074402CA908A717022808201C110E061D15070F060E1D1C0B220E0227181A240A2E102E2129220F1923248721BD5D182B2B4EF43C4DFDE4E44E10F64DED003F3FED3F3FED1139123931304379401C161B00051A260426001B022601051602260101190E260003170626012B2B012B2B2B2B8181005D243635342726232207061514171633013315363736333212111007062322272627112302C6A72546BABB45252546BAFE2EAF36405B7BB6FEB7749A7952303BB479D3D2805CB1BB649A7C57A603B18E49283CFEE9FEFDFEA2965F351E49FDDD00000100890000029204470011004F40262703260D37034704040E0810020E0911090C270805070006110A081A13012E10291100191213B80145B321627E182B2B4EF43C4DFDE44E10E6003F3F4D3FC4FDC411123939011112393130005D1333153E0133321617152E0123220615112389AB15A46B05181D101B108892B4042FB9369B0203BE0302AF72FD980000020042FFD703B6044B002E002F012E408F38099805961299149815982A062824252736214621472447275624572766246726790C790D790E7623742474257426A61EA82C1303000B15052D042E13001A151B171C18152D142E280F0B6908262536250225220D0A042B1318C61C1D1307041D2E9A2B0B2F07090E100207002F212F1A1F18161827173E28260727281A310E1F27103E00272E193031B221A65D182B2B4EF44DEDF4FD394E10 - F64DFD3910F4FD3911123939392F111239113939003F3FEDED3FEDED111217397131304379404C012D022615251A26210E1F21000926072101032C002100052A0721011D121F21001B14182101200F22210021220E0D08270A21012625090A012D04210006290421001E111C210119161C2101002B2B2B2B103C103C2B103C103C2B012B2B2B2B2B2B2B2B2B81005D5D015D13161716333236353427262F01262726353436333217160723262726232206151417161F011617161514062322262701EF082544A864983D27738F894174DBB9F26B4302AA05263E99666945284E77C24269D9DEEFC70701B701505A3057575B4524161D24222A498198BC8E5A683D32474E40462A19131D2F2C45948FD0D9A002F900010017FFEF0209055A00180052B50D2E0AC00E01B8013F40250416391703060E0A111A17171A0301062900150E150F031F030203FC1619191AFC21677D182B2B4EF44DFD5D39C42F3CFD3C104E456544E6002F3F3F3C4DFD3CED10FDE431301333113315231114171633323637150E012322263511233533A8B6ABAB2615310D1E141F43277E5A9191055AFED593FD4538130B01028E0908816702C593000000020080FFE303DE044900170018005E403AB814C81402091308141913191428067703D707070800050E0A00060D0A051D120B180718180B160D2E0A290C0B1A1A01291619191AD2216242182B2B4EF44DED4E10F63C4DFDE41112392F003F3FED3F3F3C391112393130005D015D0111141716333237363511331123370607062322272635112501381A3083BC4425B4AA0223346793E5532D01AF042FFD39523460A85A9D020EFBD19E3D2A5499528902D81A0000020015FE4903E804490018001900CA406E8A158818A71803070617063812481258126707770377078C1498009705981597169717A800A8161048004B154717C915044405C605028705A600A601A705A8170524280518151716010006150C0B0F1D080E19071919161B17171A050001AF171518AF0C8F16191A1BD421677E182B2B194EF44DE418FD3939FD3939194E456544E61812392F003F3F4DFD3932192F183F3C3C3C123939014B5279401215150016166D121717180501016D12000018872E2B107DC418872E2B10087DC418015D71313071015D005D013306030207020623222627351E01333236373E0137013301030321C7268362429C809C26291E2F2A10322F10053E0EFE74CC011F01042F67FE91FEECAEFE66B40608A40D062118089424044EFC980382000001000000000000032DBC1E5F0F3CF500110800000000005F4D8F0000000000C321F6B6F865FC270B9108F9000000090001000000000000000100000629FE2900000C01F865FCED0B910001000000000000000000000000000000280512004202390000023900AA04730040 - 047300C4047300400473003104730034047300420473004D0473004B0473004205C7005A05C700A5055600AF04E300AF023900C906390050055600AF05C700B405560060023900800239002F047300520400003B04730038047300480473003D01C700840400008001C7008906AA0084047300840473003B0473007602AA008904000042023900170473008004000015000000340034006600D400FA018602320280031403AA03F404A60536059805DC060E062606A406FA07840850087C08AE097809FA0A660B300BD80C060C760C980D140D700DE80E5A0EA00F800FD0102A10C00000000100000028006F0009006B0007000200100010005D000007E80A0C00040001B800532BB800492BB8003F2BB800352BB8002B2B4118008001A6009001A600A001A600030069018B0079018B0089018B0099018B00040089018B0099018B00A9018B00B9018BB2040840BA0179001A014A400B041F5414191F180A0B1FD2B80106B49E1FD918E3BB0119000D00E10119B20D0009410A01A0019F0064001F01A50025017A00480028019AB3296C1F60410A01A9007001A9008001A90003008001A9000101A9B21E321FBE012C00250401001F0126001E0401B61FE7312D1FE531B80201B21FC227B80401B21FC11EB80201400F1FC01D9E1FBF1D671FBE1D671FAB27B80401B21FAA29B80401B61FA91D6C1F931EB8019AB21F921DB80101B21F911DB80101B21F751DB80201B61F6D29961F6431B8019AB21F4C96B802ABB21F391DB80156400B1F3638211F351DE41F2F27B80801400B1F2D1D4C1F2A31CD1F241DB802ABB21F201EB8012540111F1C1D931F3A1D4C1F1E1D45273A1D4527BB01AA019B002A019BB2254A1FBA019B0025017AB349293896B8017BB348283125B8017A403648289629482725294C1F252946272729482756C80784075B07410732072B072807260721071B071408120810080E080C080A08080807B801ACB23F1F06BB01AB003F001F01ABB308060805B801AEB23F1F04BB01AD003F001F01ADB70804080208000814B8FFE0B40000010014B801ABB41000000100B801ABB606100000010006B801ADB300000100B801AD401F04000001000410000001001002000001000200000001000002010802004A00B0018DB806008516763F183F123E113946443E113946443E113946443E113946443E113946443E11394660443E11394660443E11394660442B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B18011DB0964B5358B0AA1D59B0324B5358B0FF1D592B2B2B2B2B2B2B2B182B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B74752B2B2B65422B2B4B5279B376706A66456523456023456560234560B08B766818B080622020B16A704565234520B003266062636820B0 - 03266165B070236544B06A234420B176664565234520B003266062636820B003266165B066236544B0762344B10066455458B166406544B27640764523614459B36242725D456523456023456560234560B089766818B080622020B172424565234520B003266062636820B003266165B042236544B072234420B1625D4565234520B003266062636820B003266165B05D236544B0622344B1005D455458B15D406544B262406245236144592B2B2B2B456953427374B8019A2045694B20B02853B049515A58B020615944B801A6204569447500 - 00>] def - /CharStrings 40 dict dup begin - /.notdef 0 def -/space 1 def -/comma 2 def -/zero 3 def -/one 4 def -/two 5 def -/three 6 def -/four 7 def -/five 8 def -/six 9 def -/seven 10 def -/eight 11 def -/C 12 def -/D 13 def -/E 14 def -/F 15 def -/I 16 def -/O 17 def -/P 18 def -/R 19 def -/S 20 def -/bracketleft 21 def -/bracketright 22 def -/a 23 def -/c 24 def -/d 25 def -/e 26 def -/g 27 def -/i 28 def -/k 29 def -/l 30 def -/m 31 def -/n 32 def -/o 33 def -/p 34 def -/r 35 def -/s 36 def -/t 37 def -/u 38 def -/y 39 def - end readonly def - currentdict dup/FontName get exch definefont pop end - %APLsfntEnd - 42/FontType resourcestatus{pop pop true}{false}ifelse - {currentfile 0(%APLT1End\n)/SubFileDecode filter flushfile}if - /FontType 1 def - /FontMatrix [ 0.00048828125 0 0 0.00048828125 0 0 ] def - /FontBBox{-1947 -985 2961 2297}def - /UniqueID 4375838 def - currentdict currentfile eexec - 54544758EC884CF30C3CD503CEDBFF3839C47C3C3333173232E3FDBFF439491DB843E1924E63AA7726BBB0485AB56D93D8C0906F647A47162891E73FFC2A9873C4B1EAC5EEBDFFC4D06084FBD84139DF4583C6E259D10699944D1068C9C45667DCCCFB9B7EA01B606435EDCBD273ABAC093D14085CCBAC149BD7382E842CFE0D7FE4FD2EF589A2471F6074A80A8B675C2F7A50D63AC1EF90D787BADD11633CB01CF6EE3B37AAF9078A69AC4740E9B6525D78BBD839551A1CB80DB8682FA5E87591BBD6EE8B946063A2A58D9CA3685AB305495DC5FB5747EB8A9A059C4976C0FE4EEAB1D56FF47F1E9664ED9F4A7DAB763AF92B2F6CF2FA7DEC24710E0B9096E30F772BA7FEA9BDBE496C42ED2CEB58F54E80BDF57CE7B4DB6CCFE7182F43BF93CCA0767AF95D62C5D2C3DC6AE1E6D139F51A2C63432117F1714C5566572EE9967A715420ABDCD1D7BD74F8450B89965FCC81C6ACA565C5F3CCF91D430D1F953E4F1A645300A98DD8C47CD64555F08F422340A85404EAE0D3229C4F9336B9470CACBD6BBF3395104750A915CC6EAAC197668267B8C62D2764C8CD69FD937CA3C924D997A0EDE7964BEB9EA2F92EF70C5E5DA0AA5567765E71F2B911B3C5586B741EEB93F3C73016EC16BFF283758900903D203992EFC8BAFAF13579C602F38C94DF6C6C4CAA4D4724D5FBE8DE875B8787096CC4B940CDD1EB776B48720974CA44D863B501A56E0010915099D9EE0B5A694E8162ADBF9836250BACCB77DB13E16980885486883919330A746641657E0563FFF3F5162B06A9D7D5DB5CE0D50CF57E919C0EA9398C970605E91134D93151321BF9CFB68DEE8AA7C39B2C73C4C6175EF0CE620A39A11E7AE9AD125EF46D3E8B684B873D5D497F3D20EB2E5A886C17BFA268072965E77892E138B19645F24DED9C8A5BF3A35DC4FEC894F19188C929CD9DB707DFF8C45729C817979C98E7D78B75EBA2D9350B128BA84B6287E350C37FA88B49594D0C4F3CF05BF288448B2544B29661E9275FF002961003CF24E00B4A9D3EE3EA38CE4C682A756F03317ABAA30EBCCBEC97DF504CA2DB26A7D0086A72CDA923EE8DDD6C4839832535AF0BA8828305A9A2A3A8748E04F50CED072CA21A3678A27E1D83DC94F060BE1CA2CD0F5B3F41484F5EFB0EB6C75E0EBEE3E8B99C22C9C5F6C9676C6A0FFA3C2CEEBE663B59BD84EE31D6996E36DBA2D9606262C1493205AE393DB58F7FAF0BE70AC4420FDB7BBF7F5986F482D2E8231E404F482245C6B4E7310CF2EDA33C13F71AD0F58070E9D44A1D2788C9E05E7EBD911704FF1813183DEDF60C6AB0E291C99888E416B784FC31D743B02F05C0866894C6E2AA884041C8FADB75E161D1481AFB984BBF2687FBCFE4EBCECA8B76ECA254DFAFAB06B297F6798D89FE628E602939DE0C7641BDD0E11769ABC14AECC982759A0A2D - 4C7CA016012774A231CD2F1F2F2C9CB7466A309178213AC3E587FE2F1D25C6299EEA07FAFD66700B1A67B3B458770446414ACD33EB6FA561F32D48C51C79F56B79B911973726D18E7E1023E6F4BF3BB9C82C7FA8C2BB7464CF82B9F713DB8FD3D1FF14F0227EF397F3A1963EA52291C5A48D0242D89B719856CC388759BED09259C476FEF8B07E2B96EE0A6005014DB3D46402FFDE59B5A3643C938015189036657306C04BE0454BB9074C9BF31ADB2DEED42D6F95097A9B57FBF2198B2AEC448463150CBE41DD4E8D9A909C043DD5CF707A30F8E8B6BA502F54D2A443BB80678A7B13D299F08F1BA2805CC80452090EB9A1C4500792D2E0503ADACA142482FFCB791C8069DE13C6A3565F5C199AB20E58DF036456A352448AAD904BFA082731ABB68353F7CE313D622DB2EDB8EC563A312347541F6FC588D0C54DC583A60B28C3C19D469F226098E104D7B33EE5A0257A99126C882AD071CF404ED31D734D1DCFD4CEA26AA1C7CB803F3E81308B7F1880BC387F4DBFC3FC13416EB84A6D6ECDFBC3DD6C12E15C0B851431BE60B43E58A44E9E1DC078CE489836FFCF38188CC4BC9B7C20918B9033ACFE1C1782BE7049557C627B7CF667E27218769C2C54FFA5BCF8D6378550311674D03F7B208D22D03DE719B3EEF9D2B5E704C269EC6B867567DE21D8DC51DFA4D6C05E0776E85622887398F739501DFB1E620CEC5D93CADD9A5813A9856A0574CE922F1D68AF3C495957B99F749BA24A057D3619978EBA514CFBD1E18DFFC4C0C235197C3E28DED1034F29582FE42B0CD6676887E3EF1D0F77D7216A43FD82081BBB0BCCBD453C0E17D3CFD085570CA1269693BA9A8C4B72448FB51D9D937EDB59B85780E1D34630BCA9026C3B60EA13908FBABCAB5FF7EC7150374C6FF2A93028DE5EF7E34F3C45512E729C7583CBE025297438F4E7846EABE8D79754172E7E4B5916F34A08B9DCF90D1C984F031BB14CDCDD27C5469A534F35C4C9725A4842F11D5FF3EB967E66EF2AA596FEE496174F4774D9D046E08C1C3A5029468A1E476A58CE6D0E59FC99BFBCFBEA381B8538A7169B67C977EC19C52540EC3ED2FE6C62FAD408492B63F08259F477043FD0986797BB7F6258F840B388A2689F38D549A597D8CFC84BEF70FAEE788857885EAF87739F18AB0937FE61502FDD35FAEF21D77B57F7839753E439F25D08477F7D2382E238974CA3E7C23CEDF9C2154C3EAD63787C34370A12EF0768351521F799814E33DAA9C20EE26FC03903925B7768B708F60F65FB5BB5DF31B09DE693984AFD9FDAFFB179F70384E008E9A166A19CCE3C547EE124DEF739A0B85704D557B19E24CAE3966C1BE54A7B96703CB15409ABF8F7E471D5337B02377149A957BA982DCB8AF3214DF53FD961FB10CBC4CBADC08A0EEC9B3163114FC21EDA622064F885E37E11AFCFE22FE7 - C9546E33DA20809769580964092CC761F817B883DBD6C9E69A985D41960D4F0959D22B9333C515A051800233A6B634DCC373FE6926B47446C748CDC77057247C9E88CE17DD4BA464D98B039754CCA357DCDC00920EA2F5BB417867FA5FE7732C6762CD4EE6868CF8955DA3F9A81C1CA1FDCF6B51AD2B218161E6CE40C4A6CEF7C742635FEF2993BBF0DB5B4AF934A12A6FA742F82E27C08E456ABF27B9EC6DEAF84D4DFD5BF0C771B43B83C3AA56F982FF38B2867C6A8471E8E28EC3FE597142690F493867B07B2A41AA81ECA332CE549A263AB1B70EC4D3331A5FC9E44448DA0FD212A9E9F1D57B390BF2C30554124818AD9E978C88DE9A778EE02752E92D349441CD9949ABFC7AAD786CDB6FCE264CA42901ADAC84531B2208AADD2D9DAA0A80328A4B202709548C617F23F41CC248EF5D147F3B9AB517197FBD435D129529754ABAF0EA5933EF5EE041309D27E6CBA298F61AC582CCD9C0AA2602DEC46D55B3753C776F4C263A524EF18A121DB37BDB839E330DAB741D11AF559CBF4B42AB71B8E0980512D7669AC2D24A138041A0629E6593DBC0A225B277F9C88B2B30F07290ADFEF6228DDC264B8CE6E781CB800E8700A82AC242082C722EE551EF9970F595F6DC65CDCCF72244915950374FA1DB4E21BB86F5ED3873BE39270D931EC558D077E447C88C523C20023A9192EB8EDF85200B98100F2F2F3C7C727E7AD575DA72B4FDFE6F73493BCB571CC1B089105B11110B0FA099B4BAAC305B2569B46946C4CAC62CB498C8A838402226C8B384B09C36B558BD19FAC4B8191C8361F5B57867ABC100B14C8E09326C1DE3684DDC1A613E599BD2D28E00D6A596730E1BA48904EB2D3116A12F9D1868588BCA5A167D36FA23AAB1940C54786B01785AAA49256C563C2F69DCEF9DDEA13B387B63A1F7FEB3AEEA33B3396093C82BF2CA21D50B4C77E9545FB5D63DBD96D5AF09096643C8C61EAC3974974575A963E123AB1A4F380708A5F53C1209040A684D382C5D098DEBC7E7C978EA9E36027ACE87735F964BAD9B6FED542A45DD404F54F477B9B2B3A9E8AA9010547FDA41F6576D44F9E4716CA8852090B99C543183AAD08403D6DA0D5B15285DA2E20D31F790B7CE80BD30415FC84F3B3F5D35EF35644B0733DAD58A72632A3EA5C50485987F4A7CC419E2CE1EC210F9B307E22549AC49C955A13467A0F16DEAD8876A028C004C6450F39F4595CCA2A97143D736B50FBDAA857D18C2406EA5D44B586FF5FBD85DE767B766BDA10B0CDE204B2BA368D134E62C1E5605304A12D142E432BF817836E5FA33EB5ADF3F5EB66C5F1746B78259DF2A750A7E595F733C67BCF2A0581841E0539BEBA4A4644165D3556C7EC37EE467A66E7E81E50A632CFBB6DF03D9B12F7CB855331CBE98C02C9ABEB3401C5C6F4649EE3FE29D3C9B52979938FF6078DBD264 - 230E8693599D9EF057D1F468E629C4198B0EDEBA36551FEEEDB9A2C57D25E3AA583E895CE0680A07E0F0C6480ADA9CEBA4ACBEE6CD60B4A478F42D8956845E693EC210B2E0E93AA4BC4411AE4DF56EF85C072805C9CB27E2D62E4B27D1A8C44F0425F7BC82A9071DD655894324828FABD0B6236EAFF231CC209B81EDAC9C90C2F72582EE07DDE0A406C32FB81B60C7D7D71A57A5FCC7EDEC408430DD7B2084700ED98B4676986832825305E5B5209ECD4392EF9C531626F83A6249240ED27FCFC704EEB27567BFD7F1629476139ABD3B12687F1846837FCD3ADFFDBE42FA94DF400847368940F74E3A596962E65046B9CA5DA69E21BCC9CD9AAC540494D949267A1FF4C85856F339DFD888500F54E93F5FAE937C6D30B22589B9D01923CA9EB9609A311A39A74AC8AE40F1ECAA31E26DCFC03E4FC09E4F00600FCD988EC592FD8EDD0866867DA303D96F529304D4BF46AF025530338DAC1B5FF320B2BE8F86407FF4B27906B838C389F21B3C49C009362AFFAB8F923279EB3D4B81B89080F3809B1933C76D277A35FC96F2283CED8AECD5277111F70D8F49050046B0F8B3E0CD75DB2FED4DE33782A03402A7D1AB7C886F3C0F77975C35FA46F77111BC520B4CF779062CF978844F491E39A260F12F9E59348653FDA6B8C0761F063EB1CDC3D064F945C0D0DC7CC90851ACFC6FE5D330B91238CECC61C949F10C41052BF889FC9BDDF9E52F38371CF78044AD703D1F83EE5B937400BDCB0E9423CC90B367CC4B26D5E23009EBE64C327AB3ED981516AF93C9C31163AC165E9D6996BA1C313791AF485BD1551556092A49B81C8C29A6E726694ACE22B67E13CAE1EA6DEA59833D0E90AF0A9E55C98D5249FC6A8BBFE2D67CEF07D7A12826544284B0E99D3E7A9DCE56E5D46A397F711E5D801ACE4428F4713D573D39F7F49DE384CCC56F3CA60A0995C4B6F650D3983A73A1C5C6CB27E2453686BBE9A709FAB324E9F56FF5729AEE4C24CAE3EB25C531405D7483A8D1981B78B9594DF12AECE3AB534BB50D8D122F566A363EA8FC44A0F6F5DC72821C1A7F399D0A0ECBC5C13611BB3EF6C3FE735796377E68651C54B711B5D423E8747FAFC9CF0F65DA7245C03BE92FD3C2BDA6D6862F093EF6097B18E2C6300BF32943296A3763676C0FE91190BDFCA93D012B56C2B26209AC413DD3DC7FFB6B640D891B8173537E5B0CDBBF4713125C43CA51A26019C2FBCC6D676DFC1BEC4972C068E843AAEAF3F1EC58F8BF88672356F12BB3D641A9C12F01E8C7D105FF8E19F64C9C2B1B9C525B7D3E2FAA62C8DD3453F098072925CC95D3BBC5D44C6AA06B52841250680C3789B0893C8C92E02221EF4192F70A25657991F88E00BD436B8DB2DE851F8993EA532ABCE7CC32AF1A66FC0B081965E08ED18043FFB4B738F0E96187A5481DFDC11F20E4B6A758A07DDC09C3 - 1ED76521D6F1A2CBC4BC41C3FD0A977E557675B76D333E75580290FF6267944B9E9AF39B5E9987B5C0D23B3EB0BFCF975BDC2199F1FC1757219CFE6F776A2D921A68DD8FACB00FD11E137EA282DF3830141CD290E4B11872D9E3186892C02BAF912745D05A15CCA7F7068BFB392157CC55A1D9C84C982DEB9618ECF8FA30E9560FEAC6DC262763E35D0F9D9E52130BCC3D3DEB1949AFB665C2C0543A2DDA4550B2F4CA444EB88E8BDEF9501B53852D180E2AF5EB7999C950600C04D41D3A59DDDFFCAAB779974BA72F4AB28C2FD5AF28E9508A52D2311B15C103DE993397DF2279C07B5D6DD0DD087B81C10CF6D7F2467839C4ED26C0DB9ED72E7C1EF96F1634DC8139FE6CDCC166157D3452D363592865416E5D88403544615A68167908FABF27ECE6F79F711E9AF87232230EFB80360DF6986C3750AA67F510B83C3FC6A4178584038AE2D8C54DD932C9BA4E06517DFBCD374A9D82F653F0E9C64A087995210E59B819EB4DD46A016EFB1806C6F0ABCEE8487594C2A8E10624DCACC8955E62CFBC1F968977E2097A35CA23E326423A27DB545268D23E6CD80E21EAA504293C70C9E30D6E883ED4BEA90F97C4218A9BA7BC18EAC79525BAF545E22BC43C5B19E1EB752A3D2257A06A344D409504AC4904A3F41C9FB5D1A80AE55C7E417A1D9AEE15938286A24B554F50329C9B91446F9186F8C29629AFCA58142D8D3B036A6CB3EA6CFCA7F7C29FA5F07156BB753FD035F254DC3F7873424708C3FA4EC242A13F0A471C945FF93F3B5CD05CEED2F39172F4BD678C6E96322C9E673313255536267AEA47FFA8D7B34FA06F01C6710BA4A0306BA68940EB0007B1E834D9B31D91F56FB9D4FF31244DE0B54EB756CE7C3876EDD2BB313282C78B1D0B4C148BDAFA4377ED5CD3BFFF24AF2B654505FAD5A87D1F362AF8D74639724E28E8F171C3234A37049BEF13AD263890C7BE2601D610579931CE15A3BB7586CACE211C10D749AECCC899EBA879486060A08119CFE2E8C9FD536F36B62E02299904CBBAEB7A852E3FB139348912150EF0200196EA23DD2E3915B672A66759BAE2A030B82FB21439BDC8084D4B6C1274877E248369AF37472A1D903EF0DA1CC2CF653CFB6A8B83DB227059A67E5CC846F1D7B8F0558475D7EBCA1F8F821495D5E944E04F311FDD2431B95E26F3FBF00F9BDBC763E9AFE6E94BD3BAF91E791757644B1D6DABCE46AD2B2EA410C16F9603CFECD7B4D9C69A3C404F925D2E9D0CE2360022C342B213823886C79B926531A4A95794BABDF582B17F6ACDF15E035937ECD3B01D5D5D17CEB5322FDF504F0D2F39AFC724A8AFFDA8ABE78AA2CA17274A678A2E441909D4C5D41631F1AC02994C99081B83828B2183035AEAD4D7129522E25A93EF3D161D439BB0FF90AC346E4C6A6388B946D23A35CDACF861CEFE597FBC938893F91C8A - 8AD9265C74ACC7FCA3258476D0B679A398F6C9F5120C80F132FB8B21AF2A7084E05B35CD2E6F622A3372F732037D76137E872165474144231BE6057CD7D7AF6EFF2E7128BD7B4F7E362E7D1AD41C222A74B5BA89DD0A2FD0825A02B5C9E9EAA0FD8A99A9026A4943D76E09ED27104F03C3FB557DDDCE4B00CEDE89A84B661C6B160E8EE805C4605DC99BAEFD9B1073B3DF2759355DE98C5DB05752673D265F274B727E49FB4A161D70F0F91C7BD0393A19121680730E120D42B3C0FA552DEA82C86B0A5AD25EACA13E7767A293820228EE53CBDDEAF26C6D617754DE77FE21AE256AD93512B1643F4FE87B4669C6B6A87E5CC9E41CC37BD95F5CC34C0F18AA843A0B764C1E917F72ECCFD9364CC378B1E000A0EAD87F5D13114C46382E2605B5CFDFDC947987F03D137DBB9FBA5FB9D667F41D41FC4FADF0FE14998F3867D97E2DA6FEF87786F4C2A32B26355F0239ADA6B5C489E41E395D5B709BD849C398F5DA6E044BF58D140E134F401AFDBA636ECD2ABC79252FCF8C8E1581F321B7980714784AC2FAEFB0107D8A1D299CA98E38AD92C34CC21252BB2C5FDB98C28B2631675764A4C2771D39F89B9A42C6B674D68B504C32E87D1343CA8FA5D8F8B9882418D72E7522B15A80E8199E763A3CBF3E434DE04D2A1B9617F36DA0F1EA298B8E7E620097B482168A578A7B4155B5EF58405125A191EE9F4CCF0C2114085B559AE8B7F12B508A58E795791A6376F7EDC21A67A9F3D715857783B753EABED7F4B915097EF9D41CFC1579262B693365EB4D1BB8FDEFB60B825288607B54AC4DB32997AE49D9EE0966E1EAA78903D504B4799795B60FA5746D9E9FF56BDA83AC7A533F777942F3298FB38751EBF266BE9E4B93F46DEEB1E4FA1CE701DE38EC0F42387A581BF2A0F03AE476EB5071114B83B8061AB07593CE66B3F361EE272D9D80F5EBEC2739A9C951C6E9209984199C53AF038599E606314FE1EF0F3556E78E0733158F3F45B9C7BB9EBADD4D65C632ADCB5476D4F3706D0E40D791BA0F3A6622CAE673504D84A530D2DC60AE2623765CCB12C81F0818E52100BBF11D1E49DC5F12CFFBFEE19C8F05B7726EA82FCDB32C9A1F32ED9A0912DDDA6B58C9CF0D8A038765E08C82E7D9196CA4A9D990D3F2BA037805C1F7512BD9EB173087CFE0573BF274381B4C555C68A100D5545C58E88EF5F372FB30A44504DCC37F3ED4B60D70B566257E380377524E974439D21C5A212A20537844FF6BB1FB634F08C5C4E741141C32772CD8D2FDD8592404D51596CADE8F0C515496482CC6320566B8008971B58CFB4C0FCB9C2A2CE00F11BD642FBF455397024AEB0875F8316BDD6D8DF63B497273792BB2549DAFCFB48400D84545EF828AFD369EF33C93EC29FEC395D4D1FD48E8CFC6F6884CE06C5245E70AE4D47BD0711A2FBA2EBA22F5F911538883FF0B - 09BF8B2856F6B8231B68D12C95FA75BB9758CCCF514D7575353EF1AE488AC2C2ACFCD83FC5328D2EDD8542267FFD1F903DCA423173F05B5EF3B636D08ECA21DE5A2DCF9FBBCECF04278C6B0C96EC254239FE2367665E4B2A13C81E86EA39EDA5A67DA1722E5EB81BD6A78AF53245ECCAA3E87559633A1418E1 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - cleartomark end - %APLT1End - -%RBIEndFontSubset -/Helvetica cguidfix -/F1.1/Helvetica renmfont -[ /CIEBasedABC 4 dict dup begin -/WhitePoint [ 0.9505 1.0000 1.0891 ] def -/DecodeABC [ { 1.8008 exp } bind { 1.8008 exp } bind { 1.8008 exp } bind ] def -/MatrixABC [ 0.4294 0.2332 0.0202 0.3278 0.6737 0.1105 0.1933 0.0938 0.9580 ] def -/RangeLMN [ 0.0 0.9505 0.0 1.0000 0.0 1.0891 ] def -end ] /Cs1 exch/ColorSpace dr pop -[ /CIEBasedA 5 dict dup begin /WhitePoint [ 0.9505 1.0000 1.0891 ] def -/DecodeA { { 1.8008 exp } bind exec} bind -def -/MatrixA [ 0.9642 1.0000 0.8249 ] def -/RangeLMN [ 0.0 2.0000 0.0 2.0000 0.0 2.0000 ] def -/DecodeLMN [ { 0.9857 mul} bind { 1.0000 mul} bind { 1.3202 mul} bind ] def -end ] /Cs2 exch/ColorSpace dr pop -%%EndPageSetup -0.60000002 i -/Cs1 SC -1 1 1 sc -q -0 0 534 429 rc --23.5 437.5 m -552.5 437.5 l -552.5 -295.5 l --23.5 -295.5 l -h --23.5 437.5 m -f -1 1 0 sc -2.9513016 403 m -41.951302 403 l -41.951302 364 l -2.9513016 364 l -h -2.9513016 403 m -f -1 J -1 j -0 0 0 sc -1 0 0 -1 -23 437 cm -25.951302 34 m -64.951302 34 l -64.951302 73 l -25.951302 73 l -h -25.951302 34 m -S -1 1 0 sc -CM -41.951302 403 m -80.951302 403 l -80.951302 364 l -41.951302 364 l -h -41.951302 403 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -64.951302 34 m -103.9513 34 l -103.9513 73 l -64.951302 73 l -h -64.951302 34 m -S -1 1 0 sc -CM -80.950996 403 m -119.95099 403 l -119.95099 364 l -80.950996 364 l -h -80.950996 403 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -103.951 34 m -142.95099 34 l -142.95099 73 l -103.951 73 l -h -103.951 34 m -S -1 0.40000001 0.40000001 sc -CM -159.99699 403 m -198.99699 403 l -198.99699 364 l -159.99699 364 l -h -159.99699 403 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -182.99699 34 m -221.99699 34 l -221.99699 73 l -182.99699 73 l -h -182.99699 34 m -S -1 0.40000001 0.40000001 sc -CM -198.99699 403 m -237.99701 403 l -237.99701 364 l -198.99699 364 l -h -198.99699 403 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -221.99699 34 m -260.99701 34 l -260.99701 73 l -221.99699 73 l -h -221.99699 34 m -S -0.40000001 1 1 sc -CM -276.99701 403 m -315.99701 403 l -315.99701 364 l -276.99701 364 l -h -276.99701 403 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -299.99701 34 m -338.99701 34 l -338.99701 73 l -299.99701 73 l -h -299.99701 34 m -S -0.40000001 1 1 sc -CM -315.99701 403 m -354.99701 403 l -354.99701 364 l -315.99701 364 l -h -315.99701 403 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -338.99701 34 m -377.99701 34 l -377.99701 73 l -338.99701 73 l -h -338.99701 34 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 64.451302 420 cm -/F1.1[ 15 0 0 -15 0 0]sf --16.259766 6 m -(!"#$)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 201.451 420 cm --16.259766 6 m -(!"#%)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 335.95099 420 cm --16.259766 6 m -(!"#&)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 467.49701 383.5 cm --20.006104 -3 m -('\(\)*)[ 10.832520 8.342284 12.495117 0.000000 ] xS --50.445557 15 m -(#+,-\(\)*\(./0/\(1)[ 4.167480 8.342285 8.342285 7.500000 8.342285 12.495117 8.342285 8.342285 7.500000 3.332520 4.167480 3.332520 8.342285 0.000000 ] xS -1 0 0 -1 471 221 cm --36.679688 6 m -(2/.3#456\(70)[ 10.832520 3.332520 7.500002 7.500000 4.167479 3.332520 8.342285 7.500000 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 470 115 cm --34.171143 6 m -(-\(\)*289)[ 7.500000 8.342285 12.495117 8.342285 10.832520 11.667484 0.000000 ] xS -1 0 0 -1 92.41275 114 cm -/F1.1[ 17 0 0 -17 0 0]sf --28.351318 6 m -(:&;#<;#=>)[ 4.723145 9.454590 4.723145 4.723145 9.454590 4.723145 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 225.80214 114 cm --18.900879 6 m -(:%;#?>)[ 4.723145 9.454590 4.723145 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 350.65463 114 cm --28.351318 6 m -(:@;#A;#B>)[ 4.723145 9.454590 4.723145 4.723145 9.454590 4.723145 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 85 142 cm -/F1.1[ 15 0 0 -15 0 0]sf --16.259766 6 m -(!"#$)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 222 142 cm --16.259766 6 m -(!"#%)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 356.5 142 cm --16.259766 6 m -(!"#&)[ 10.004884 10.004884 4.167480 0.000000 ] xS -0.60000002 i -/Cs1 SC -0 0 0 sc -1 0 0 -1 -23 437 cm -70.999802 131 m -109.9998 131 l -109.9998 170 l -70.999802 170 l -h -70.999802 131 m -S -1 1 0 sc -CM -86.999802 306 m -125.9998 306 l -125.9998 267 l -86.999802 267 l -h -86.999802 306 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -109.9998 131 m -148.9998 131 l -148.9998 170 l -109.9998 170 l -h -109.9998 131 m -S -1 1 0 sc -CM -165.08701 306 m -204.08701 306 l -204.08701 267 l -165.08701 267 l -h -165.08701 306 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -188.08701 131 m -227.08701 131 l -227.08701 170 l -188.08701 170 l -h -188.08701 131 m -S -227.08701 131 m -266.08701 131 l -266.08701 170 l -227.08701 170 l -h -227.08701 131 m -S -0.40000001 1 1 sc -CM -243 306 m -282 306 l -282 267 l -243 267 l -h -243 306 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -266 131 m -305 131 l -305 170 l -266 170 l -h -266 131 m -S -0.40000001 1 1 sc -CM -282.08701 306 m -321.08701 306 l -321.08701 267 l -282.08701 267 l -h -282.08701 306 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -305.08701 131 m -344.08701 131 l -344.08701 170 l -305.08701 170 l -h -305.08701 131 m -S -0.40000001 1 1 sc -CM -321 306 m -360 306 l -360 267 l -321 267 l -h -321 306 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -344 131 m -383 131 l -383 170 l -344 170 l -h -344 131 m -S -40 0 0 39 204 267 cm -BI -/Width 120 -/Height 117 -/BitsPerComponent 8 -/Decode[ -0 1 -0 1 -0 1 -] -/Interpolate true -/DataSource cg_aiproc -ID -JH16$R/m3dIiTR1IX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?H -IfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI -!.KCtIX_?HIs_6FIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI -!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCt -IX_?HIfOq$IfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!.KCt -IX_?HIfOqI!.KCtIX_?HIfOqI!.KCtIX_?HIfOqI!&$&&!!O6)5;I_UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWp -q>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZYB!;ZWp -q>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHj -rrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq-*duq>UHj -rrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp!;ZWpq>UHjrrDlp -!;ZWpq>UHjrrDlp!;ZWpq>UG$@/9j,14Odcs8W&uIlfdnq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?h -rrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio -!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hs*OhArrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio -!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQo -q#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDgtrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQo -q#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!;QQoq#:?hrrDio!*s"/!!P-- -IqJ`\rW%F04TP6V!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKn -p\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6f -rrDfn!;HM@!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6f -rrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn -!;HKnpKIRsp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t6frrDfn -!;HKnp\t6frrDfn!;HKnp\t6frrDfn!;HKnp\t5!@/9j,14Odcs8W&uIl]^mpAY-drrDcm!;?EmpAY-d -rrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm -!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-ds*Oh?rrDcm!;?EmpAY-drrDcm -!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?Em -pAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDarrrDcm!;?EmpAY-drrDcm!;?Em -pAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-drrDcm!;?EmpAY-d -rrDcm!*s"/!!P--IqJ`\rW%F/4TP*R!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3j -oD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^rrDZj!;$3joD\g^ -rrDZj!;$3joD\g^rrDZj!;$5Q53&rsnPu%e0K.nGa!frsnPu%e0K.nGa!frsnPu%e0K.nGa!frsnPu%e0K.nGa!frsnPu%e0K. -nGa!frsnPu%e0K.nGa!frsnPu%e0K.nGa!frsi4?"T&0&14Odcs8W&uIlTXon,Esfrt+Z!&F]W1n,Esf -rt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z! -&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfs*PCHrt+Z!&F]W1n,Esfrt+Z! -&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1 -n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+X'rt+Z!&F]W1n,Esfrt+Z!&F]W1 -n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esfrt+Z!&F]W1n,Esf -rt+Z!&6`HFqZ$dTIt))1s8E"J3'@E4rt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c" -'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4 -mf*pfrt=c"'(5c4mf*pfrt=c"HkPlI'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4 -mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pf -rt=c"'(5c4mf*pfrt=c"'(0!('(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pf -rt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4mf*pfrt=c"'(5c4>$qR3!!P--IqJ`\rW%F-4U(0O -)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2= -li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h3L)!h2= -li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gf -rtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=lWaB+li.gf -rtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)%)!h2=li.gfrtt)% -)!h2=li.gfrtt)%)!h2=li.f(?jQW0"YtRWB)ho1!.Qsk"mc1+li.jgru(/&)=.;?li.jgru(/&)=.;? -li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jg -ru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li2>Mli.jgru(/&)=.;?li.jgru(/&)=.;?li.jg -ru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/& -)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jg>Q5T,ru(/&)=.;?li.jgru(/&)=.;?li.jgru(/& -)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru(/&)=.;?li.jgru"pH$2X]+ -14Odcs8W&uIlBLplMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhgg -ru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8' -)s[GBlMhggs*PdNru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8' -)s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GB -lMhggru:6-ru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')s[GB -lMhggru:8')s[GBlMhggru:8')s[GBlMhggru:8')d$JSqZ$dTIt))1s8E"J2a%E0rugP*+R&hJkl2dh -rugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP* -+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*Hm%YQ+R&hJkl2dhrugP* -+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJ -kl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R!&0+R&hJkl2dhrugP*+R&hJ -kl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dhrugP*+R&hJkl2dh -rugP*+R&hJ='uF5!!P--IqJ`\rW%F,4UC6N,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+ -,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StM -kPlahs!$Y+,3StMkPlahs!$Y+,3SuR,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StM -kPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlah -s!$Y+,3StMkPlahs!$Y+,3StMk?Is1kPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlah -s!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPlahs!$Y+,3StMkPl`-?juo4"YtRWB)ho1!.Qpj -#j2.4k5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+P -k5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5TfS -k5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^h -s!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^h>Q5u2 -s!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!6b, -,j,+Pk5Q^hs!6b,,j,+Pk5Q^hs!1WQ%Jp,/14Odcs8W&uIl9Frj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=U -j8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOf -s!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs*Q6Ts!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOf -s!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq- -.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zo3s!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq- -.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq-.-(=Uj8UOfs!Zq--rjXb -qZ$dTIt))1s8E"J2E_E,s!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lf -s!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%. -.cUIXir:Lfs!m%.Hn+.U.cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%. -.cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIX -ir:Lfs!m%..cO\4.cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIX -ir:Lfs!m%..cUIXir:Lfs!m%..cUIXir:Lfs!m%..cUIX@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:0 -0Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_ -hu>@es"E:00Ald_hu>@es"E:00Ald_huA'Whu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_ -hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@e -s"E:00Ald_hu>@es"E:00Ald_hu>@e>Q6A6s"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@e -s"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"E:00Ald_hu>@es"@>Z&c2P314Odcs8W&u -Il0@shZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#Dpb -hZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=e -s*QQXs"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=e -s"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WA7 -s"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC11#DpbhZ#=es"WC1 -1#DpbhZ#=es"WC11#DpbhZ#=es"WC10iMHmqZ$dTIt))1s8E"J2*D?(s"`I21>`$dhZ#@fs"`I21>`$d -hZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@f -s"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I2HnsRY1>`$dhZ#@fs"`I21>`$dhZ#@f -s"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I2 -1>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>Z781>`$dhZ#@fs"`I21>`$dhZ#@fs"`I2 -1>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$dhZ#@fs"`I21>`$d -;d^78!!P--IqJ`\rW%F)4Up0G2r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4e -s#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^4 -2r"?kg]'4es#8^42r"@[2r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^4 -2r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?k -g]'4es#8^42r"?kgKX\:g]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?k -g]'4es#8^42r"?kg]'4es#8^42r"?kg]'4es#8^42r"?kg]'31?kiJ<"YtRWB)ho1!.Qgg%GLhBgAa1e -s#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg5 -3SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAcO\gAa1es#Jg5 -3SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKn -gAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1e>Q6_;s#Jg53SOKn -gAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1es#Jg53SOKngAa1e -s#Jg53SOKngAa1es#Etb(&It714Odcs8W&uIks4sg&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj5 -3naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNo -g&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds*Ql\s#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNo -g&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+d -s#Sj53naNog&F+ds#Sj53naNog&F+ds#Sh;s#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+d -s#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53naNog&F+ds#Sj53`9?#qZ$dTIt))1 -s8E"J1Hc6"s$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffu -fDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%d -s$#'7Hp60^51ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%d -s$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'7 -51a$=51ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'751ffufDe%ds$#'7 -51ffufDe%ds$#'751ffufDe%ds$#'751ffu;.(4;!!P--IqJ`\rW%F(4V--D5M#j!f)Itcs$,*75M#j! -f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itc -s$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j^5M#j!f)Itcs$,*75M#j!f)Itc -s$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*7 -5M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!em&/=f)Itcs$,*75M#j!f)Itcs$,*7 -5M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j!f)Itcs$,*75M#j! -f)Is2?l/\?"YtRWB)ho1!.Qdf&(^bGf)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"d -s$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$508 -5h>s#f)J"ds$5085h>s#f)L+_f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$508 -5h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s# -f)J"ds$5085h>s#f)J"d>Q6t>s$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s# -f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$5085h>s#f)J"ds$0Cg(]+1914Odcs8W&uIkj.ueGhqd -s$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB: -7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds*R5as$YB: -7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6) -eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$Y@@s$YB:7+D6) -eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqds$YB:7+D6)eGhqd -s$YB:7+D6)eGhqds$YB:6r78/qZ$dTIt))1s8E"J1-H2ss$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN; -8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E- -df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;Hq2Wb8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E- -df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hc -s$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8((WA8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hc -s$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-df2hcs$tN;8(.E-:LG+Q7=Bs%:Z<9$mT1d/Q_bs%:Z< -9$mT1d/Q_bs%:Z<9$mT1d/Q_bs%:Z<9$mT1d/Q_bs%:Z<9$mT1d/Q_bs%:Z<9$mT1d/Q_bs%:Z<9$mT1 -d/Q_bs%6$o)uBU=14Odcs8W&uIka)!cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVa -s%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf= -:!Wc5cMpVas%Uf=:!Wc5cMpVas*RPds%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf= -:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5 -cMpVas%Uf=:!Wc5cMpVas%UdCs%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5 -cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=:!Wc5cMpVas%Uf=9hf"8qZ$dTIt))1s8E"J0g-/o -s%^i=:;9]&;bl:Pas&%#?;9]&;bl:Pas&%#? -;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&; -bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&f;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&; -bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pa -s&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bZk*Ebl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pa -s&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:Pas&%#?;9]&;bl:O7?lo1F -"YtRWB)ho1!.Q[c'[$GTb5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5? -b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG` -s&@/@<6G5?b5Zigb5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG` -s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@ -<6G5?b5YG`>Q7[Fs&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@ -<6G5?b5YG`s&@/@<6G5?b5YG`s&@/@<6G5?b5YG`s&;["+8Z$A14Odcs8W&uIkX#"b5YJas&I5AA -b5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJa -s&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas*Rhhs&I5AAb5YJa -s&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5A -Ab5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I3Gs&I5AAb5YJas&I5A -Ab5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AAb5YJas&I5AA -b5YJas&I5A!!P--IqJ`\rW%F$ -4Vc$;>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC ->0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$Yj ->0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YH -a8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa'8RI -a8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>`s'!JC>0$YHa8]>` -s'!JC>0$YHa8]>`s'!JC>0$YHa8]=9?m5CI"YtRWB)ho1!.QXb(<6AZ`rB;`s'3SD>fQeK`rB;`s'3SD ->fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK -`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rCEk`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK -`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;` -s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`>Q7sJs'3SD>fQeK`rB;`s'3SD>fQeK`rB;` -s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'3SD>fQeK`rB;`s'/0( -,5V?D14Odcs8W&uIkEl"`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO -`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_ -s'N_E?c;tO`;a2_s*S1ls'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_ -s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E -?c;tO`;a2_s'N]Ks'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E -?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?c;tO`;a2_s'N_E?V4]NqZ$dTIt))1s8E"J/j1#fs'ikF@`&.S -_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^ -s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikFHt'tm@`&.S_Z+)^ -s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF -@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@_u@L@`&.S_Z+)^s'ikF -@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S_Z+)^s'ikF@`&.S -_Z+)^s'ikF@`&.S8RNh@!!P--IqJ`\rW%F#4Vts7A&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#] -s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnF -A&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81mA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnF -A&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T -_>e#]s'rnFA&81T_>e#]s'rnFA&81T_-?qL_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T -_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e#]s'rnFA&81T_>e"9?mY[M"YtRWB)ho1 -!.QUa(r?5`_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%H -A\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX -_>emo_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX -_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_ ->Q89Ns(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_ -s(0%HA\nCX_>e)_s(0%HA\nCX_>e)_s(+`/-2RZG14Odcs8W&uIkQ8WPs)5IJDn5md\c6TYs)5IJDn5md\c6TYs)5IJDn5md -\c6TYs)5IJDn5md\c6TYs)5IJDn5md\c6TYs)5IJDn5md\c6TYs)5IJDn5md\c6TYs)1>6.Jj)K14Odc -s8W&uIk3`$\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OK -E4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f -\c6WZs*Sdrs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f -\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZ -s)>MQs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZ -s)>OKE4Q!f\c6WZs)>OKE4Q!f\c6WZs)>OKE(+.aqZ$dTIt))1s8E"J/3Or^s)GRKEOc$g\GpQYs)GRK -EOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g -\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKHuZ[rEOc$g\GpQYs)GRKEOc$g -\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQY -s)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEO]6QEOc$g\GpQYs)GRKEOc$g\GpQY -s)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRKEOc$g\GpQYs)GRK -EOc$g7:7VB!!P--IqJ`\rW%Eu4WLp1G..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo -[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZ -s)tjNG..Eo[f:NZs)tjNG..EuG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZ -s)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjN -G..Eo[f:NZs)tjNG..Eo[TicT[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjN -G..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:NZs)tjNG..Eo[f:M=?nD0T"YtRWB)ho1!.QL^*4>fi -[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEX -s)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtVt[JtEX -s)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgM -G.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEX>Q8lSs)tgM -G.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n[JtEXs)tgMG.%?n -[JtEXs)tgMG.%?n[JtEXs)peQ92Ws*q9QJ$As&YlB3Ws*q9QJ$As&YlB3Ws*q9QJ$As&YlB3Ws*q9Q -J$As&YlB3Ws*q9QJ$As&YlB3Ws*q9QJ$As&YlB3Ws*q9QJ$As&YlB3Ws*m@C0Db_Q14Odcs8W&uIjmN& -XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$U -s+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us*TI$ -s+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HR -K<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@FXs+@HR -K<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+XoF$Us+@HRK<>0+ -XoF$Us+@HRK<>0+XoF$Us+@HRK0`m"qZ$dTIt))1s8E"J.6ScSs+RQSKrk<.XT+!Us+RQSKrk<.XT+!U -s+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQS -Krk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSI"e[%Krk<.XT+!Us+RQSKrk<.XT+!Us+RQS -Krk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<. -XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKreMYKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<. -XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.XT+!Us+RQSKrk<.5[ZAE -!!P--IqJ`\rW%Es4Wpj+L91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WT -L91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0 -XT+$Vs+[WTL91E&L91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0 -XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$V -s+[WTL91E0XBY^ZXT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+$V -s+[WTL91E0XT+$Vs+[WTL91E0XT+$Vs+[WTL91E0XT+#??o%TZ"YtRWB)ho1!.QC[+fYL!WW.jTs,*fU -MQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5 -WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.@'WW.jTs,*fUMQ-W5 -WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jT -s,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jT>Q9S[s,*fUMQ-W5WW.jT -s,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fUMQ-W5WW.jTs,*fU -MQ-W5WW.jTs,'$K1]%.U14Odcs8W&uIjdH'W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6 -W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdS -s,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs*Ta's,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdS -s,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iU -Ml?Z6W;hdSs,3iUMl?Z6W;hdSs,3g[s,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iU -Ml?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMl?Z6W;hdSs,3iUMa(T+qZ$dTIt))1s8E"J --TrWNs,F!!P--IqJ`\rW%Eq4X-a&OJVu=V>lXRs,a)WOJVu=V>lXR -s,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)W -OJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu)OJVu=V>lXRs,a)WOJVu=V>lXRs,a)W -OJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu= -V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V-Et]V>lXRs,a)WOJVu=V>lXRs,a)WOJVu= -V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lXRs,a)WOJVu=V>lW@ -?oIl^"YtRWB)ho1!.Q@Z,,G7$V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,W -Oei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#> -V#QRQs,j,WOei#>V#Ph)V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#> -V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQ -s,j,WOei#>V#QRQ>Q9h]s,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQ -s,j,WOei#>V#QRQs,j,WOei#>V#QRQs,j,WOei#>V#QRQs,fHP2Z!IX14Odcs8W&uIj[B(V#QXSs-'8Y -PGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5B -V#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs*U$+s-'8YPGJ5B -V#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXS -s-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'6_s-'8YPGJ5BV#QXS -s-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8YPGJ5BV#QXSs-'8Y -PGJ5BV#QXSs-'8YPEU&UFPs-BAYQD+>EU&UFPs-BAYQD+>E -U&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFP -s-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYI$UK+QD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFP -s-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAY -QD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD%O_QD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAY -QD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>EU&UFPs-BAYQD+>E4CC/G!!P--IqJ`\ -rW%Ep4X?^#R%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CP -s-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZ -R%XJ,R%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZ -R%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJH -TNhG`T`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:CPs-TJZR%XJH -T`:CPs-TJZR%XJHT`:CPs-TJZR%XJHT`:BA?oe)a"YtRWB)ho1!.Q=Y,bP+(TDt:Ns-TGYR%ODGTDt:N -s-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGY -R%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDs;+TDt:Ns-TGYR%ODGTDt:Ns-TGY -R%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODG -TDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:N>Q:(_s-TGYR%ODGTDt:Ns-TGYR%ODG -TDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:Ns-TGYR%ODGTDt:N -s-PlU3Vrd[14Odcs8W&uIjI6(T)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\ -S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bN -T)Y=Ps.#\\S=]bNT)Y=Ps*U?.s.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bN -T)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=P -s.#\\S=]bNT)Y=Ps.#Zbs.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=P -s.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S=]bNT)Y=Ps.#\\S3(+>qZ$dTIt))1s8E"J,sh] -T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqR -SH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]I%Qr/T:GqR -SH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4O -s.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:B-cT:GqRSH#4O -s.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h]T:GqRSH#4Os.>h] -T:GqRSH#4Os.>h]T:GqR3ab&H!!P--IqJ`\rW%En4XQWtTUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnR -RfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%L -s.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPn.TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%L -s.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\ -TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRTofbRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\ -TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB%Ls.Gh\TUPnRRfB$A?p+;d"YtRW -B)ho1!.Q7W-COn.RK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"L -s.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq] -U7)%URK%Z/RK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq] -U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%U -RK'"L>Q:Fcs.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%U -RK'"Ls.Yq]U7)%URK'"Ls.Yq]U7)%URK'"Ls.VM]4o53_14Odcs8W&uIj@0)QiEnKs.u(^V3h4YQiEnK -s.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^ -V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs*UZ0s.u(^V3h4YQiEnKs.u(^ -V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4Y -QiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u&ds.u(^V3h4YQiEnKs.u(^V3h4Y -QiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnKs.u(^V3h4YQiEnK -s.u(^V)VjHqZ$dTIt))1s8E"J,X!NAs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._ -VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[ -QiEqLs/)._VO.=[QiEqLs/)._I&<81VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[ -QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqL -s/)._VO.=[QiEqLs/)._VO(NeVO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqL -s/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[QiEqLs/)._VO.=[3++rI!!P--IqJ`\rW%Em4XcTq -WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_ -Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL2WKmL_ -Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhK -s/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q!=9fQ2dhK -s/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:`WKmL_Q2dhKs/D:` -WKmL_Q2dhKs/D:`WKmL_Q2dgC?pFMg"YtRWB)ho1!.Q4V.?je6PQ.bKs/hLbXcrdePQ.bKs/hLbXcrde -PQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bK -s/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ-$4PQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bK -s/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLb -XcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bK>Q:ghs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLb -XcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/hLbXcrdePQ.bKs/e4f62LWc -14Odcs8W&uIj.$)P5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\J -s/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qOb -Y*/gfP5h\Js*Uu4s/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qOb -Y*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gf -P5h\Js/qMhs/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObY*/gf -P5h\Js/qObY*/gfP5h\Js/qObY*/gfP5h\Js/qObXuB`SqZ$dTIt))1s8E"J,!@EQ;$j -s0IddZ]G-mO8lPIs0IddZ]G-mO8lPIs0IddZ]G-mO8lPIs0IddZ]G-mO8lPIs0IddZ]G-mO8lPIs0Idd -Z]G-mO8lPIs0IddZ]G-mO8lPIs0FRj6i-ie14Odcs8W&uIj$s*NW6JIs0n!f[uLEsNW6JIs0n!f[uLEs -NW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JI -s0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs*V;8s0n!f[uLEsNW6JIs0n!f[uLEsNW6JI -s0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f -[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0mtls0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f -[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[uLEsNW6JIs0n!f[l%P^ -qZ$dTIt))1s8E"J+[%B7s1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>G -s1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f -\VpKuMuU>Gs1+'fI(>18\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f -\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKu -MuU>Gs1+'f\Vj\l\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKu -MuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKuMuU>Gs1+'f\VpKu1gicL!!P--IqJ`\rW%Ek4Y2Nk]8Q^$MuUDI -s1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h -]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q]:]8Q^$MuUDIs1=3h -]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$ -MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$Md-4nMuUDIs1=3h]8Q^$ -MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDIs1=3h]8Q^$MuUDI -s1=3h]8Q^$MuUCG?q1"n"YtRWB)ho1!.Q.T/;jJ?MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h -]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca% -MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ8(:MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca% -MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>H -s1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>H>Q;?ns1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>H -s1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1F6h]Sca%MZ:>Hs1C-q8,E8i14Odcs8W&u -Iipm+L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj( -L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,E -s*VS:s1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,E -s1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a=n -s1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h -^PDj(L]>,Es1a?h^PDj(L]>,Es1a?h^G91fqZ$dTIt))1s8E"J+?_<2s1jBh^kVm)LB#&Ds1jBh^kVm) -LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&D -s1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBhI)(L:^kVm)LB#&Ds1jBh^kVm)LB#&D -s1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh -^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kQ(n^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh -^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm)LB#&Ds1jBh^kVm) -113ZM!!P--IqJ`\rW%Ei4YDHg_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uC -s1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh -_1hp*L&\uCs1sEh_1ho:_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh -_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp* -L&\uCs1sEh_1hp*Kj4SnL&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp* -L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\uCs1sEh_1hp*L&\tE?qC.p"YtRWB)ho1!.Q(R07s5EK)`lC -s2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`k -a+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)^5=K)`lCs2T`k -a+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3 -K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lC>Q;`qs2T`ka+F?3 -K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lC -s2T`ka+F?3K)`lCs2Qg$9D\\m14Odcs8W&uIigg,K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`k -a+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3 -K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs*Vk=s2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3 -K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lC -s2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T^qs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lC -s2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka+F?3K)`lCs2T`ka"UmoqZ$dTIt))1 -s8E"J*^)3.s2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6 -JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiC -s2filI*$s>aasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiC -s2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2fil -aam[raasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2filaasK6JcEiCs2fil -aasK6JcEiCs2filaasK6JcEiCs2filaasK60ORQN!!P--IqJ`\rW%Eh4YV?bb^TT9IfIW@s3,rlb^TT9 -IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@ -s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TS>b^TT9IfIW@s3,rlb^TT9IfIW@ -s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rl -b^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9ITuirIfIW@s3,rlb^TT9IfIW@s3,rl -b^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9IfIW@s3,rlb^TT9 -IfIVF?qpLu"YtRWB)ho1!.Q"P0n')HIK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q? -s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ul -c$fW:IK.Q?s35ulc$fW:IK+]>IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ul -c$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW: -IK.Q?s35ulc$fW:IK.Q?>Q;rrs35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW: -IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s35ulc$fW:IK.Q?s333):&=no14Odcs8W&uIiU[,I/hN? -s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)m -c[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s*W.?s3H)m -c[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c= -I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H'ss3H)mc[>c= -I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN?s3H)mc[>c=I/hN? -s3H)mc[>c=I/hN?s3H)mcRrU#qZ$dTIt))1s8E"J*Bc0(s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5n -dX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rA -HN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5nI+!?@dX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rA -HN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E> -s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX#-tdX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E> -s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rAHN2E>s3c5ndX(rA/RVBO!!P-- -IqJ`\rW%Ef4Yh<_e9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)D -H2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB> -s3u>oe9V(Ae9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB> -s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>o -e9V)DH!Cs3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lB>s3u>o -e9V)DH2lB>s3u>oe9V)DH2lB>s3u>oe9V)DH2lAG?r6_#"YtRWB)ho1!.PtO13iiLGlQ<=s4)AoeTh,E -GlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<= -s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlN0AGlQ<=s4)AoeTh,EGlQ<= -s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)Ao -eTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=>Q<5us4)AoeTh,EGlQ<=s4)Ao -eTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,EGlQ<=s4)AoeTh,E -GlQ<=s4&]/;#:4r14Odcs8W&uIiCO-FoU0Q9qZ$dTIt))1s8E"J)Efuss5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9 -s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%t -j)=tXDu\j9s5S%tj)=tXDu\j9s5S%tI,f/Fj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%t -j)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tX -Du\j9s5S%tj)=tXDu\j9s5S%tj)80%j)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tX -Du\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tXDu\j9s5S%tj)=tX.:?0Q!!P--IqJ`\rW%Ed -4Z@6XkAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8! -kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC6H -kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^ -D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D-R&' -D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9s6"8!kAC7^D?&d9 -s6"8!kAC7^D?&d9s6"8!kAC7^D?&cI?s!4*"YtRWB)ho1!.PkL2f/NYD#`a9s64A"l"pCaD#`a9s64A" -l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCa -D#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#\nID#`a9s64A"l"pCaD#`a9s64A"l"pCa -D#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9 -s64A"l"pCaD#`a9s64A"l"pCaD#`a9s64A"l"pCaD#`a9>Q -=Si(%14Odcs8W&uIi1C.CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_ -CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5 -s64:ul"^7_CB*O5s*X'Gs64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5 -s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:u -l"^7_CB*O5s649&s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:u -l"^7_CB*O5s64:ul"^7_CB*O5s64:ul"^7_CB*O5s64:ukpKk@qZ$dTIt))1s8E"J(d0iks6aP"mUuRf -BE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4 -s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"I-tYImUuRfBE.C4 -s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP" -mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUoc(mUuRfBE.C4s6aP" -mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRfBE.C4s6aP"mUuRf -BE.C4s6aP"mUuRf-=C$S!!P--IqJ`\rW%Eb4ZR-Smq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3 -s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS" -mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2TImq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS" -mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2Ug -B)h=3s6jS"mq2UgB)h=3s6jS"mq2UgAm><(B)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2Ug -B)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)h=3s6jS"mq2UgB)hQ=2)s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4 -s6sY#n7M^iB)h@4s6sY#n7M^iB)h@4s6q@C>5J:'14Odcs8W&uIht7.AH242s70_#nmqdkAH242s70_# -nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdk -AH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s*XBJs70_#nmqdkAH242s70_#nmqdk -AH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242 -s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70])s70_#nmqdkAH242s70_#nmqdkAH242 -s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_#nmqdkAH242s70_# -ng.[JqZ$dTIt))1s8E"J(Hjfgs7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so -@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1 -s7Kk$oj[so@fQ+1s7Kk$I.^tKoj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1 -s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$ -oj[so@fQ+1s7Kk$ojV/*oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$ -oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so@fQ+1s7Kk$oj[so,[amS!!P--IqJ`\rW%Ea4Zd*PpL4*r -@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1 -s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4)LpL4*r@K6(1 -s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t% -pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@9`d+@K6(1s7]t% -pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r@K6(1s7]t%pL4*r -@K6(1s7]t%pL4*r@K6'I?sWX0"YtRWB)ho1!.P@t*A;ni07S@&Is`o!HnTts07S@&Is`o!HnTts07S@& -Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o! -HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@s07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o! -HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts -07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&>CV#RIs`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts -07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is`o!HnTts07S@&Is_h^0)GVP)*^>@ -It*"LJH2) %APLeod -EI -40 0 0 39 126 267 cm -BI -/Width 120 -/Height 117 -/BitsPerComponent 8 -/Decode[ -0 1 -0 1 -0 1 -] -/Interpolate true -/DataSource cg_aiproc -ID -J)4=/s(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%L -s(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a? -Aq+:iAn(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a? -An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP -@fKkK@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP -@fL%Ls(5a?An(dP@fL%Ls(5a?An(dP@fL%Ls(5a?Amukl%Jp-ThHO"[BOUpS@K1"Ls(Gj@BOUpS@K1"L -s(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@ -BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpSIpr@j@K1"Ls(Gj@BOUpS@K1"Ls(Gj@ -BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS -@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(?"Rs(Gj@BOUpS@K1"Ls(Gj@BOUpS -@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"Ls(Gj@BOUpS@K1"L -s(Gj@BOUpS@3?$h*Zb<2IfOuT>ZC`;@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqK -s(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@BjgsT@/jqj@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT -@/jqKs(Pm@BjgsT@/jqKs(Pm@B5(cRBjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT -@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqK+:\iD4TPN^!.Wj2 -;u^T>s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJ -s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs([$j -s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@ -C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!R -C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U -?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Ym`%LbK`!<<'!J)4O1s(l$ACgR-X?N4hJs(l$ACgR-X -?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJ -s(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACk#dkCgR-X?N4hJs(l$ACgR-X?N4hJ -s(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4YS?N4hJs(l$ACgR-X?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X -?N4hJs(l$ACgJ4o$p?`Rs8N(KhI'4\DI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[ -?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJ -s))-BDI*9[?2neJs))-BDI*9[IpN(l?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJ -s))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-B -DI*9[?2neJs))-BDI*9[?2neJs(?4Ts))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-B -DI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[>p'Uc4ZrfFrr@Q* -@8[&C>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\ ->lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_l ->lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_I -s)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20B -B5_&TDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20B -Dd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_I+:\fC4TPN^!.Wj6;#bHQ8VGs)2-A -Dd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[ ->Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)3Bks)2-ADd36[>Q8VGs)2-ADd36[ ->Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VG -s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36SDd36[>Q8VGs)2-ADd36[>Q8VG -s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-A -Dd36[>Q8VGs)2*a%LbK`!<<'!J)4[1s)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3A -EEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<] -=oWJEs)D3AEEW<]=oWJEs)D3AEIV-kEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<] -=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJE -s)D3AEEW<]=oWJEs)D3AEEW<]=oW,S=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJE -s)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEW<]=oWJEs)D3AEEOCp$p?`Rs8N(K -hIKC]E`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9B -E`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_ -Ip)el=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_ -=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMF -s(?@Ts)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=oWMF -s)M9BE`rE_=oWMFs)M9BE`rE_=oWMFs)M9BE`rE_=We4`4ZrfFrr@Q*APN2G=T -s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJ -MFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMLS4tMFmi*8cOZ>s,'DJ -MFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi* -8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cNF\8cOZ>s,'DJMFmi* -8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ> -s,'DJMFmi*8cOZ>s,'DJMFep'$9^NPs8N(KhKqHdNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ= -s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPK -NCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.In0Nu8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPK -NCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s(@<]s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.7j&EN -4ZrfFrr@Q*Hp.Li7fSKD76N<9:s-,kMPX>D76N<9:s-,kM -PX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7 -6N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-/""s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7 -6N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9: -s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>C_PX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9: -s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,hm -'+$fb!<<'!J)5ftNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tN -Q9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP: -63!6:s->tNQ@D4#Q9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP: -63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6: -s->tNQ9kP:62tS`63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6: -s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9cW,#sCEOs8N(KhLdffQ9bJ95l[-8 -s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qM -Q9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9ImF%"5l[-8s->qM -Q9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9 -5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s(@W_s->qMQ9bJ9 -5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8 -s->qMQ9bJ95l[-8s->qMQ9bJ95Tg^H4ZrfFrr@Q*KK0!u5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09 -s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"N -QU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[1#5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"N -QU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S; -5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NB9tm`QU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S; -5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09 -+;5&E4TPN^!.Wj[2uf:1s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9 -s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+O -R6U_>5Q@-9s-\@$s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+O -R6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_> -5Q@-9s-Z+OR6U^aR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_> -5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z(o'F6ib!<<'!J)5o)E4TPN^!.Wj_2?071s.2@Q -Sim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E -4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.4^&s.2@QSim%E -4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8 -s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim$cSim%E4TD!8 -s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@QSim%E4TD!8s.2@Q -Sim%E4TD!8s.2@QSim%E4TD!8s.2=q'F6ib!<<'!J)6)>s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@P -T0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E -3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT78p%T0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E -3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5 -s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3r`ib3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5 -s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@PT0!"E3rbg5s.;@P -T/n)/#X(Y4#es/IjSW\XUS1B4@0s/IjSW\XUS1B4@0 -s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjSW\XUS1B4@0s/IjS -W\XUS1*@>;4ZrfFrr@Q*QR`$;0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!T -XYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW -0`S7/s/e!TXYBdW0`S8)0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW -0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/s/e!TXYBdW0`S7/ -s/e!TXYBdW0`S7/s/e!TB<$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n -+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$ -s1ffX^`i`n+of>$s1ffX^`i`nIj+j-+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$ -s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX -^`i`n+of>$s1ffX^`i`n+of>$s(B+js1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX -^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+WqX+4ZrfFrr@Q* -W#YiQ+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm -+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK6, -+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5" -s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW -B>>Yi^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW -^``Zm+TK5"s1fcW^``Zm+TK5"s1fcW^``Zm+TK5"+;k>G4TPN^!.Wk))um_rs1ofW_&r]n+90/!s1ofW -_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n -+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1s1,s1ofW_&r]n+90/!s1ofW_&r]n -+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/! -s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r\i_&r]n+90/!s1ofW_&r]n+90/! -s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW_&r]n+90/!s1ofW -_&r]n+90/!s1od")$E)c!<<'!J)71Es2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX -_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq -*rj,!s2,oX_]Jiq*rj,!s2,oX_g`a-_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq -*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,! -s2,oX_]Jiq*rj,!s2,oX_]Jiq*rflj*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,! -s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Jiq*rj,!s2,oX_]Bp<"?emJs8N(K -hQ7^r`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z -`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*! -Ii\R/*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*! -*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO," -s(B=ls2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO," -s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*?Z4&4ZrfFrr@Q*Xr78[*<4&!s2Q,Z`uP-"*<4&! -s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4'/*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-" -*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,ZB?)"l`uP-"*<4&!s2Q,Z`uP-" -*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&! -s2Q,Z`uP-"*<4&!+;k;F4TPN^!.Wk/)#qYrs2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtu -s2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Z -a;b0#)umtus2Z/Za;b0#)umtus2][/s2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Z -a;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0# -)umtus2Z/Za;b0#)umtus2Z/Za;b.la;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0# -)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z-%)?`2d!<<'! -J)7CGs2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -b't?0ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZOHm)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar2B@"?emJs8N(KhQRgrar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&IiA@0)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus(BIms2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)B]q$4ZrfFrr@Q*Ynd;^(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3% -(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYp -s2u5Yb8:3%(]VYps2u5Yb8:3%(]V[.(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYp -s2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Y -b8:3%(]VYps2u5Yb8:3%(]VYps2u5YB?M+kb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Y -b8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYps2u5Yb8:3%(]VYp+;t>F4TPN^ -!.Wk3'E?;ls32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9' -(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMn -s36$.s32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMn -s32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Y -bn^7kbn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns32;Y -bn^9'(&uMns32;Ybn^9'(&uMns32;Ybn^9'(&uMns329$)?W,c!<<'!J)7OGs3;AZc5$B)(&uPos3;AZ -c5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B) -(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc@6T/c5$B)(&uPos3;AZc5$B) -(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPo -s3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&qpl(&uPos3;AZc5$B)(&uPo -s3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZc5$B)(&uPos3;AZ -c5$B)(&uPos3;AZc4qH?"$JdIs8N(KhR+!rcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZ -cP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E* -'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*Ih_q/'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E* -'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJn -s3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns(BXls3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJn -s3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'`ZJns3DDZcP6E*'He:r4ZrfF -rr@Q*[LiJc'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDY -ck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B* -'*$=.'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B* -'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;k -s3MDYB@%:kck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;k -s3MDYck?B*'*$;ks3MDYck?B*'*$;ks3MDYck?B*'*$;k+<(DG4TPN^!.Wk6&-(#hs3VGYd1QE+&c^5j -s3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGY -d1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3Z<.s3VGYd1QE+&c^5js3VGY -d1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+ -&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QCkd1QE+&c^5js3VGYd1QE+ -&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5js3VGYd1QE+&c^5j -s3VGYd1QE+&c^5js3VE$)Zr5d!<<'!J)7[Fs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()h -s3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMY -dguK-&-()hs3hMYdguK-&-()hs3hMYdsho.dguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMY -dguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK- -&-()hs3hMYdguK-&-()hs3hMYdguK-&-$:k&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK- -&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdguK-&-()hs3hMYdgmQ?!^/[H -s8N(KhRX0reIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&h -s4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZ -eIMW0Ih)M/%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZ -eIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0 -%fb&hs(Bjls4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0 -%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%fb&hs4%VZeIMW0%Nl\l4ZrfFrr@Q*\d\Vh%KFrfs4%SYeIDQ/ -%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrf -s4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFt.%KFrfs4%SYeIDQ/%KFrf -s4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SY -eIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYB@RIkeIDQ/%KFrfs4%SY -eIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/%KFrfs4%SYeIDQ/ -%KFrfs4%SYeIDQ/%KFrf+<(AF4TPN^!.Wk;$NJZbs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/ -$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$iecc -s4.SXedMN/$ieccs4.SXedMN/$ieccs42Z-s4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$iecc -s4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SX -edMN/$ieccs4.SXedMN/$ieccs4.SXedMLjedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SX -edMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.SXedMN/$ieccs4.Q#)Zi/c -!<<'!J)7jDs4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0 -#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_ -s4@VWfRF2,fEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_ -s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VW -fEhN0#lePi#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VW -fEhN0#liN_s4@VWfEhN0#liN_s4@VWfEhN0#liN_s4@VWfE`T>!^/[Hs8N(KhRs0ofa%Q1#QNH^s4IYW -fa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1 -#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1Ig?#,#QNH^s4IYWfa%Q1 -#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^ -s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s(C!is4IYWfa%Q1#QNH^ -s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYW -fa%Q1#QNH^s4IYWfa%Q1#9Xue4ZrfFrr@Q*^'=Vk#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\W -g'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2 -#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63D,#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2 -#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B] -s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\WBA*Rig'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B] -s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]s4R\Wg'7T2#63B]+<1DF -4TPN^!.WkA!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\T -h#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2 -!<:jTs4r/)s4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2 -!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jT -s4m\Th#RIfh#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jT -s4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4mYt)ur,b!<<'!J)8!As4m\Th#RK2!<:jT -s4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\T -h#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th1#G)h#RK2!<:jTs4m\T -h#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2 -!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<6]f!<:jTs4m\Th#RK2 -!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jTs4m\Th#RK2!<:jT -s4m\Th#RK2!<:jTs4m\Th#JQ;!'NIFs8N(KhSK9mh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mU -s5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bU -h>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4IfTN*!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bU -h>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4 -!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs(C0gs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4 -!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!$E6] -4ZrfFrr@Q*_uThq!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sW -s53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nW -huNf8!<:u,!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nW -huNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8 -!<:sWs53nWBA`dihuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8 -!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sWs53nWhuNf8!<:sW+<:GF4TPN^!.WkD!<;!Xs5B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B -!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;/1!<;-\s5a7\jT,>B -!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\ -s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\BB9-njT,>B!<;-\ -s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\ -jT,>B!<;-\s5a7\jT,>B!<;-\+<:GF4TPN^!.WkI!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=] -joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD -!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5ne2s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD -!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0] -s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGEojoGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0] -s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j;( -*<85c!<<'!J)8d!<<'!J)8KOs6Tgdm/[1R -!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Ed -s6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm=,-9m/[1R!<;Ed -s6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgd -m/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<6^!!<;Eds6Tgd -m/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R!<;Eds6Tgdm/[1R -!<;Eds6Tgdm/[1R!<;Eds6Tgdm/S7M!'NIFs8N(KhTu9&mK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T -!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;He -s6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:TIfTN:!<;Hes6]memK!:T!<;Hes6]memK!:T!<;He -s6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]me -mK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes(Ca"s6]memK!:T!<;Hes6]memK!:T!<;Hes6]me -mK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T -!$E<_4ZrfFrr@Q*d/a49!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX -!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ng -s6p$gn,WLX!<;PaC!<;]l -s7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBl -o`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;_A!<;]ls7HBl -o`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b -!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlBCu9)o`5$b -!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]l -s7HBlo`5$b!<;]ls7HBlo`5$b!<;]l+c$ -~> %APLeod -EI -39 0 0 39 48 267 cm -BI -/Width 117 -/Height 117 -/BitsPerComponent 8 -/Decode[ -0 1 -0 1 -0 1 -] -/Interpolate true -/DataSource cg_aiproc -ID -JH16$U&b/mIm=nhIkrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd. -1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;7 -0Rkl4Ikrd.1OLm+1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;7 -0Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4 -Ikrd.1GF;_1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4 -Ikrd.1GF;70Rkl4Ikrd.1GF;70Rkl4Ikrd.1GF;70HU]2rW)otIt0@0s(Pm@BjgsT@/jqKs(Pm@BjgsT -@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqK -s(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@Bma=gBjgsT@/jqKs(Pm@BjgsT@/jqK -s(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsFBjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@ -BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT@/jqKs(Pm@BjgsT -?p>MFrW)otIt0C0s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@ -C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U -?iOkJs(Yp@C4'CgC1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U -?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJ -s(Yp@C1%!FC1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJ -s(Yp@C1%!U?iOkJs(Yp@C1%!U?iOkJs(Yp@C1%!U?U#DErW)otIt0F1s(l$ACgR-X?N4hJs(l$ACgR-X -?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJ -s(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACj]RhCgR-X?N4hJs(l$ACgR-X?N4hJ -s(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-GCgR-X?N4hJs(l$ACgR-X?N4hJs(l$A -CgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X?N4hJs(l$ACgR-X -?9]>DrW)otIt0I1s))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-B -DI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[ -?2neJs))-BDL>aiDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[ -?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJ -s))-BDI*9HDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[?2neJ -s))-BDI*9[?2neJs))-BDI*9[?2neJs))-BDI*9[>sB5CrW)otIt0I1s)20BDd<<\>lS_Is)20BDd<<\ ->lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_I -s)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDgYgiDd<<\>lS_Is)20BDd<<\>lS_I -s)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20B -Dd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<lS_Is)20BDd<<\>lS_Is)20B -Dd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\>lS_Is)20BDd<<\ ->X'/CrW)otIt0I0s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-A -Dd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[ ->Q8VGs)2-ADgYdhDd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[ ->Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VG -s)2-ADd36GDd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>Q8VG -s)2-ADd36[>Q8VGs)2-ADd36[>Q8VGs)2-ADd36[>rW)otIt0U2s*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QE -G?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg -=9!JGs*%QEGC3KlG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg -=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JG -s*%QEG?=fKG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=9!JG -s*%QEG?=fg=9!JGs*%QEG?=fg=9!JGs*%QEG?=fg=$I]?rW)otIt0X2s*7TDGuXfh<<%5Cs*7TDGuXfh -<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5C -s*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDH$iTkGuXfh<<%5Cs*7TDGuXfh<<%5C -s*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TD -GuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfJGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TD -GuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh<<%5Cs*7TDGuXfh -<'MB;rW)otIt0[3s*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZE -H;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj -<<%8Ds*@ZEH@/]lH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj -<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8D -s*@ZEH;soKH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<<%8D -s*@ZEH;soj<<%8Ds*@ZEH;soj<<%8Ds*@ZEH;soj<'MB;rW)otIt0[2s*I]EHW0rk;u_2Cs*I]EHW0rk -;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2C -s*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EH[JclHW0rk;u_2Cs*I]EHW0rk;u_2C -s*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]E -HW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rKHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]E -HW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk;u_2Cs*I]EHW0rk -;a2<;rW)otIt0^3s*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iG -I8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o -;u_8Es*[iGI=+unI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o -;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8E -s*[iGI8g/MI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;u_8E -s*[iGI8g/o;u_8Es*[iGI8g/o;u_8Es*[iGI8g/o;a2<;rW)otIt0a4s*dlGIT$2p;ZD2Ds*dlGIT$2p -;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2D -s*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIXG&nIT$2p;ZD2Ds*dlGIT$2p;ZD2D -s*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlG -IT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2MIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlG -IT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p;ZD2Ds*dlGIT$2p -;El39rW)otIt0a3s*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moG -Io65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q -;?),Cs*moGIsb,nIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q -;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),C -s*moGIo65MIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;?),C -s*moGIo65q;?),Cs*moGIo65q;?),Cs*moGIo65q;*Q-9rW)otIt0d4s++#HJPcAt;#c)Cs++#HJPcAt -;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)C -s++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJUC;oJPcAt;#c)Cs++#HJPcAt;#c)C -s++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#H -JPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcANJPcAt;#c)Cs++#HJPcAt;#c)Cs++#H -JPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt;#c)Cs++#HJPcAt -:d6$8rW)otIt0g4s+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJp^AoJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#B -s+4&HJkuDNJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#B -s+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:Hop7rW)otIt0g4s+4&HJkuDu:]H#Bs+4&HJkuDu -:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#B -s+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJp^AoJkuDu:]H#Bs+4&HJkuDu:]H#B -s+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDNJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&H -JkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu:]H#Bs+4&HJkuDu -:Hop7rW)otIt0j4s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/H -KhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN# -9`Kf?s+O/HKmZSoKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN# -9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf? -s+O/HKhVMNKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf? -s+O/HKhVN#9`Kf?s+O/HKhVN#9`Kf?s+O/HKhVN#9KsX4rW)otIt0m5s+X5IL.qW%9`Ki@s+X5IL.qW% -9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@ -s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL3u\pL.qW%9`Ki@s+X5IL.qW%9`Ki@ -s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5I -L.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qVOL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5I -L.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW%9`Ki@s+X5IL.qW% -9KsX4rW)otIt0m5s+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;J -LJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`' -9`KlAs+a;JLO;eqLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`' -9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlA -s+a;JLJ7_PLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlA -s+a;JLJ7`'9`KlAs+a;JLJ7`'9`KlAs+a;JLJ7`'9Ks[5rW)otIt0p5s+sDKM+dl*9E0iAs+sDKM+dl* -9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iA -s+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM0qtrM+dl*9E0iAs+sDKM+dl*9E0iA -s+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDK -M+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dkQM+dl*9E0iAs+sDKM+dl*9E0iAs+sDK -M+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl*9E0iAs+sDKM+dl* -90XR4rW)otIt0s6s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GK -MG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+ -9)jc@s,'GKML8%rMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+ -9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@ -s,'GKMG!nQMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@ -s,'GKMG!o+9)jc@s,'GKMG!o+9)jc@s,'GKMG!o+8j=I2rW)otIt0s5s,'DJMFmi*8cOZ>s,'DJMFmi* -8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ> -s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJML8"qMFmi*8cOZ>s,'DJMFmi*8cOZ> -s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJ -MFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmhPMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJ -MFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi*8cOZ>s,'DJMFmi* -8O"@1rW)otIt1!5s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPK -NCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -8,nQ=s,BPKNI47rNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#. -8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ= -s,BPKNCX"QNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ= -s,BPKNCX#.8,nQ=s,BPKNCX#.8,nQ=s,BPKNCX#.7mA10rW)otIt1$6s,KSKN^j&/7fSKD76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kM -PX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7 -6N<9:s-,kMP^GgtPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D7 -6N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9: -s-,kMPX>CSPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D76N<9: -s-,kMPX>D76N<9:s-,kMPX>D76N<9:s-,kMPX>D769c_+rW)otIt107s->tNQ9kP:63!6:s->tNQ9kP: -63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6: -s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ@)!uQ9kP:63!6:s->tNQ9kP:63!6: -s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tN -Q9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kOTQ9kP:63!6:s->tNQ9kP:63!6:s->tN -Q9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP:63!6:s->tNQ9kP: -5sHV*rW)otIt107s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qM -Q9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9 -5l[-8s->qMQ@(stQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ9 -5l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8 -s->qMQ9bISQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8 -s->qMQ9bJ95l[-8s->qMQ9bJ95l[-8s->qMQ9bJ95X-M)rW)otIt107s-H"NQU(S;5l[09s-H"NQU(S; -5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09 -s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQ[D'uQU(S;5l[09s-H"NQU(S;5l[09 -s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"N -QU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(RTQU(S;5l[09s-H"NQU(S;5l[09s-H"N -QU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S;5l[09s-H"NQU(S; -5X-P*rW)otIt137s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+O -R6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_> -5Q@-9s-Z+OR=%7!R6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_> -5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9 -s-Z+OR6U^UR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9 -s-Z+OR6U_>5Q@-9s-Z+OR6U_>5Q@-9s-Z+OR6U_>5'rW)otIt167s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1O -Rm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@ -4o_!7s-l1ORs[C!Rm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@ -4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7 -s-l1ORm$dURm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7 -s-l1ORm$e@4o_!7s-l1ORm$e@4o_!7s-l1ORm$e@4[18'rW)otIt198s.):PSNQqC4TCs7s.):PSNQqC -4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7 -s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSNQqC4TCs7s.):PSUu[Yq?!\/cW(-s040UYq?!\/cW(-s040U -Yq?!\/cW(-s040UYq?!\/cW(-s040UYq?!\/cW(-s040UYq?!\/cW(-s040UYq?!\/cW(-s040UYq?!\ -/O(`mrW)otIt1Z;s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3U -Z7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$] -/H<",s0=3UZ@"5'Z7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$] -/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<", -s0=3UZ7Q#[Z7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<", -s0=3UZ7Q$]/H<",s0=3UZ7Q$]/H<",s0=3UZ7Q$]/3bWlrW)otIt1];s0O$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX -^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n -+of>$s1ffX^jI=*^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n -+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$ -s1ffX^`i_^^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+of>$ -s1ffX^`i`n+of>$s1ffX^`i`n+of>$s1ffX^`i`n+[7UbrW)otIt1o*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z -`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*! -*WO,"s2H)Z`dAg,`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*! -*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO," -s2H)Z`Z>(``Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO," -s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*WO,"s2H)Z`Z>*!*Bu4^rW)otIt2#=s2Q,Z`uP-"*<4&!s2Q,Z`uP-" -*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&! -s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Za*\m,`uP-"*<4&!s2Q,Z`uP-"*<4&! -s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP+``uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z -`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-"*<4&!s2Q,Z`uP-" -*'Z.^rW)otIt2&=s2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Z -a;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0# -)umtus2Z/ZaF"s,a;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0# -)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtu -s2Z/Za;b.`a;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)umtu -s2Z/Za;b0#)umtus2Z/Za;b0#)umtus2Z/Za;b0#)a?%]rW)otIt2)>s2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[b'Y--ar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar::aar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)F#q[rW)otIt2)>s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ -ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[b'Y--ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<& -)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar::aar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)ZRqu -s2l8[ar:<&)ZRqus2l8[ar:<&)ZRqus2l8[ar:<&)F#q[rW)otIt2);s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYW -fa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1 -#QNH^s4IYWfmF&)fa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1 -#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^ -s4IYWfa%O]fa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^ -s4IYWfa%Q1#QNH^s4IYWfa%Q1#QNH^s4IYWfa%Q1# -2Z3XT!.TN;!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bU -h>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4 -!<:mUs5&,'s5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4 -!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mU -s5!bU>2B8Zh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>mT4!<:mU -s5!bUh>mT4!<:mUs5!bUh>mT4!<:mUs5!bUh>fbur;cfsIt2J!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@ -!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[ -s5\P-s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[ -s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[ ->3#\`j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[ -j8f5@!<;*[s5X1[j8f5@!<;*[s5X1[j8_D(r;cfsIt2P>s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\ -jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B -!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\ja7(.jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B -!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\ -s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\ -s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!<;-\s5a7\jT,>B!'`IA!<)tJ -J9(_jjoGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=] -joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD -HiX3/!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD -!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<66b -!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0]s5j=]joGGD!<;0] -s5j=]joGGD!<;0]s5j=]joGGD!<;0]53Q%e -klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ!<;9`s60O`klCbJ -!<;9`s60O`klCbJ!<;9`s60O`kl4)CjmK!:T -!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;Hes6]memK!:T!<;He -s6]memK!:T!<;Hes6]memJoI4r;cfsIt2bDs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ng -s6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$g -n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn9b69n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$g -n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX -!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WJln,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX -!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!'`RD!<)tJJ9V(on,WLX -!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ng -s6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLXHiX39!<;Ng -s6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$g -n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<66l!<;Ngs6p$g -n,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX!<;Ngs6p$gn,WLX -!<;Ngs6p$gn,WLX!<;Ng54_gpoDnp`!<;Zk -s7?o`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b -!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]l -s7HBlo`5$b!<;]ls7HBlo`5"qo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]l -s7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!<;]ls7HBlo`5$b!'`UE!<)tJJ9q:rp&P-d!<;`m -s7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHm -p&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-dHiX3?!<;`ms7QHm -p&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d -!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<66r!<;`ms7QHmp&P-d -!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`ms7QHmp&P-d!<;`m -s7QHmp&P-d!<;`m5580uq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj -!<;ips7lZpq#EWBr;cfsIt2nHs7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj -!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ip -s7lZpq#LHj!<;ips7lZpq0W2Bq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ip -s7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LFuq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZp -q#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!<;ips7lZpq#LHj!'`XF!<)tJ5X#0tI/nbF!.TCsIsh %APLeod -EI -1 0 0 -1 -23 437 cm -149.04341 131 m -188.04341 131 l -188.04341 170 l -149.04341 170 l -h -149.04341 131 m -S -3 w -130.99001 78.5 m -230.47148 113.40271 l -S -CM -218.45703 321.41553 m -208.28972 327.7168 l -206.65344 319.47772 l -h -218.45703 321.41553 m -f -0 J -0 j -1 0 0 -1 -23 437 cm -241.45703 115.58447 m -231.28972 109.2832 l -229.65344 117.52228 l -h -241.45703 115.58447 m -S -1 J -1 j -198.08701 80 m -107.80096 116.97427 l -S -CM -74.436417 315.78119 m -86.39267 316.13901 l -83.209267 323.91241 l -h -74.436417 315.78119 m -f -0 J -0 j -1 0 0 -1 -23 437 cm -97.436417 121.21881 m -109.39267 120.86099 l -106.20927 113.08759 l -h -97.436417 121.21881 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 475 339 cm --52.111816 6 m -(C,5DD51E,\),10)[ 10.832520 8.342285 8.342285 4.995117 4.995117 8.342285 8.342285 8.342285 8.342285 12.495117 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 475.5 295.5 cm --58.363037 6 m -(F8#+,-\(\)*\(./0/\(1)[ 4.167480 11.667480 4.167480 8.342285 8.342285 7.500000 8.342285 12.495117 8.342285 8.342285 7.500000 3.332520 4.167480 3.332520 8.342285 0.000000 ] xS -0.60000002 i -/Cs1 SC -1 1 0 sc -CM -126.08701 232.5 m -165.08701 232.5 l -165.08701 193.5 l -126.08701 193.5 l -h -126.08701 232.5 m -f -1 w -1 J -1 j -0 0 0 sc -1 0 0 -1 -23 437 cm -149.08701 204.5 m -188.08701 204.5 l -188.08701 243.5 l -149.08701 243.5 l -h -149.08701 204.5 m -S -1 1 0 sc -CM -165.08701 232.5 m -204.08701 232.5 l -204.08701 193.5 l -165.08701 193.5 l -h -165.08701 232.5 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -188.08701 204.5 m -227.08701 204.5 l -227.08701 243.5 l -188.08701 243.5 l -h -188.08701 204.5 m -S -1 1 0 sc -CM -87 232.5 m -126 232.5 l -126 193.5 l -87 193.5 l -h -87 232.5 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -110 204.5 m -149 204.5 l -149 243.5 l -110 243.5 l -h -110 204.5 m -S -0.40000001 1 1 sc -CM -203.174 232.5 m -242.17401 232.5 l -242.17401 193.5 l -203.174 193.5 l -h -203.174 232.5 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -226.174 204.5 m -265.17401 204.5 l -265.17401 243.5 l -226.174 243.5 l -h -226.174 204.5 m -S -0.40000001 1 1 sc -CM -321 232.5 m -360 232.5 l -360 193.5 l -321 193.5 l -h -321 232.5 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -344 204.5 m -383 204.5 l -383 243.5 l -344 243.5 l -h -344 204.5 m -S -1 1 0 sc -CM -48.999802 232.5 m -87.999802 232.5 l -87.999802 193.5 l -48.999802 193.5 l -h -48.999802 232.5 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -71.999802 204.5 m -110.9998 204.5 l -110.9998 243.5 l -71.999802 243.5 l -h -71.999802 204.5 m -S -0.40000001 1 1 sc -CM -242.17401 232.5 m -282.7243 232.5 l -282.7243 192.5 l -242.17401 192.5 l -h -242.17401 232.5 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -265.17401 204.5 m -305.7243 204.5 l -305.7243 244.5 l -265.17401 244.5 l -h -265.17401 204.5 m -S -0.40000001 1 1 sc -CM -282.724 233.5 m -323.27429 233.5 l -323.27429 193.5 l -282.724 193.5 l -h -282.724 233.5 m -f -0 0 0 sc -1 0 0 -1 -23 437 cm -305.724 203.5 m -346.27429 203.5 l -346.27429 243.5 l -305.724 243.5 l -h -305.724 203.5 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 471 60 cm --21.676025 6 m -(/\(G05D0)[ 3.332520 8.342285 10.004885 4.167480 8.342283 4.995117 0.000000 ] xS -1 0 0 -1 93.41275 59 cm -/F1.1[ 17 0 0 -17 0 0]sf --9.4504395 6 m -(:%>)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 226.80214 59 cm --4.7231445 6 m -(:>)[ 4.723145 0.000000 ] xS -1 0 0 -1 351.65463 59 cm --9.4504395 6 m -(:=>)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 471 21 cm -/F1.1[ 15 0 0 -15 0 0]sf --25.85083 6 m -(/\('\(710)[ 3.332520 8.342285 10.832521 8.342283 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 93.41275 20 cm -/F1.1[ 17 0 0 -17 0 0]sf --9.4504395 6 m -(:<>)[ 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 226.80214 20 cm --4.7231445 6 m -(:>)[ 4.723145 0.000000 ] xS -1 0 0 -1 351.65463 20 cm --9.4504395 6 m -(:<>)[ 4.723145 9.454590 0.000000 ] xS -3 w -0.60000002 i -/Cs1 SC -0 0 0 sc -1 0 0 -1 -23 437 cm -240.0773 81 m -179.6626 117.73865 l -S -CM -147.09308 313.44202 m -158.84483 315.67276 l -154.48036 322.84991 l -h -147.09308 313.44202 m -f -0 J -0 j -1 0 0 -1 -23 437 cm -170.09308 123.55798 m -181.84483 121.32724 l -177.48036 114.15009 l -h -170.09308 123.55798 m -S -0.40000001 1 1 sc -CM -354 403 m -393 403 l -393 364 l -354 364 l -h -354 403 m -f -1 w -1 J -1 j -0 0 0 sc -1 0 0 -1 -23 437 cm -377 34 m -416 34 l -416 73 l -377 73 l -h -377 34 m -S -ep -end -%%Trailer -%%EOF diff --git a/src/externals/pio1/doc/images/dof-rearr.graffle b/src/externals/pio1/doc/images/dof-rearr.graffle deleted file mode 100644 index 69fa79b0ee1..00000000000 --- a/src/externals/pio1/doc/images/dof-rearr.graffle +++ /dev/null @@ -1,2758 +0,0 @@ - - - - - ActiveLayerIndex - 0 - ApplicationVersion - - com.omnigroup.OmniGrafflePro - 137.11.0.108132 - - AutoAdjust - - BackgroundGraphic - - Bounds - {{0, 0}, {576, 733}} - Class - SolidGraphic - ID - 2 - Style - - fill - - GradientColor - - w - 0.666667 - - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - CanvasOrigin - {0, 0} - CanvasSize - {576, 733} - ColumnAlign - 1 - ColumnSpacing - 36 - CreationDate - 2009-12-16 08:35:43 -0700 - Creator - John Dennis - DisplayScale - 1 0/72 in = 1.0000 in - FileType - flat - GraphDocumentVersion - 6 - GraphicsList - - - Bounds - {{377, 34}, {39, 39}} - Class - ShapedGraphic - ID - 150 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Class - LineGraphic - ID - 140 - Points - - {240.077, 81} - {166.077, 126} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 3 - - - - - Class - Group - Graphics - - - Bounds - {{345.309, 397}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 142 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [4]} - VerticalPad - 0 - - - - Bounds - {{229.527, 407}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 143 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 []} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{86, 407}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 144 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [4]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{468, 407}, {52, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 145 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 ioCount} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{345.309, 358}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 146 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [5]} - VerticalPad - 0 - - - - Bounds - {{229.527, 368}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 147 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 []} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{86, 368}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 148 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [1]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{472, 368}, {44, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 149 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 ioStart} - VerticalPad - 0 - - Wrap - NO - - - ID - 141 - - - Bounds - {{305.724, 203.5}, {40.5503, 40}} - Class - ShapedGraphic - ID - 131 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{265.174, 204.5}, {40.5503, 40}} - Class - ShapedGraphic - ID - 130 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{71.9998, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 128 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - GradientColor - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{344, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 120 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{226.174, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 121 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{110, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 123 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - GradientColor - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{188.087, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 124 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{149.087, 204.5}, {39, 39}} - Class - ShapedGraphic - ID - 125 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{440, 132.5}, {117, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 108 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 IO decomposition} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{445.5, 89}, {105, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 107 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Rearrangement} - VerticalPad - 0 - - Wrap - NO - - - Class - LineGraphic - ID - 103 - Points - - {198.087, 80} - {93.087, 123} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 3 - - - - - Class - LineGraphic - ID - 102 - Points - - {130.99, 78.5} - {245, 118.5} - {246.067, 116.5} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 3 - - - - - Bounds - {{149.043, 131}, {39, 39}} - Class - ShapedGraphic - ID - 110 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - FillType - 2 - GradientAngle - 90 - GradientColor - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{344, 131}, {39, 39}} - Class - ShapedGraphic - ID - 111 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{305.087, 131}, {39, 39}} - Class - ShapedGraphic - ID - 112 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{266, 131}, {39, 39}} - Class - ShapedGraphic - ID - 113 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{227.087, 131}, {39, 39}} - Class - ShapedGraphic - ID - 114 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - FillType - 2 - GradientAngle - 90 - GradientColor - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{188.087, 131}, {39, 39}} - Class - ShapedGraphic - ID - 115 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{110, 131}, {39, 39}} - Class - ShapedGraphic - ID - 116 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{70.9998, 131}, {39, 39}} - Class - ShapedGraphic - ID - 117 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - FillType - 2 - GradientAngle - 90 - GradientColor - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Class - Group - Graphics - - - Bounds - {{363, 286}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 86 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 2} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{228.5, 286}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 87 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 1} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{91.5, 286}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 88 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 0} - VerticalPad - 0 - - Wrap - NO - - - ID - 85 - - - Bounds - {{344.309, 303}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 80 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [6, 7, 8]} - VerticalPad - 0 - - - - Bounds - {{228.527, 313}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 79 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [1, 3]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{85, 313}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 77 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [2, 4, 5]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{458.5, 313}, {69, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 75 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 compDOF} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{457, 207}, {74, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 68 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Disk layout} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{439.997, 35.5}, {101, 36}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 57 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Comp\ - decomposition} - VerticalPad - 0 - - Wrap - NO - - - Class - Group - Graphics - - - Bounds - {{342.451, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 82 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 2} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{207.951, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 83 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 1} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{70.9513, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 84 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 0} - VerticalPad - 0 - - Wrap - NO - - - ID - 81 - - - Bounds - {{338.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 47 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{299.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 48 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{221.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 49 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{182.997, 34}, {39, 39}} - Class - ShapedGraphic - ID - 50 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{103.951, 34}, {39, 39}} - Class - ShapedGraphic - ID - 51 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{64.9513, 34}, {39, 39}} - Class - ShapedGraphic - ID - 52 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{25.9513, 34}, {39, 39}} - Class - ShapedGraphic - ID - 53 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - GridInfo - - GuidesLocked - NO - GuidesVisible - YES - HPages - 1 - ImageCounter - 3 - KeepToScale - - Layers - - - Lock - NO - Name - Layer 1 - Print - YES - View - YES - - - LayoutInfo - - Animate - NO - AutoLayout - 2 - LineLength - 0.4643835723400116 - circoMinDist - 18 - circoSeparation - 0.0 - layoutEngine - dot - neatoSeparation - 0.0 - twopiSeparation - 0.0 - - LinksVisible - NO - MagnetsVisible - NO - MasterSheets - - ModificationDate - 2009-12-21 10:08:25 -0700 - Modifier - John Dennis - NotesVisible - NO - Orientation - 2 - OriginVisible - NO - OutlineStyle - Basic - PageBreaks - NO - PrintInfo - - NSBottomMargin - - float - 41 - - NSLeftMargin - - float - 18 - - NSPaperSize - - size - {612, 792} - - NSRightMargin - - float - 18 - - NSTopMargin - - float - 18 - - - PrintOnePage - - QuickLookPreview - - JVBERi0xLjMKJcTl8uXrp/Og0MTGCjQgMCBvYmoKPDwgL0xlbmd0aCA1IDAgUiAvRmls - dGVyIC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAGlWE1vHDcMvc+v4LEFbFnU91zrJEBz - SVIb6CHowXDXjV3bm/gDQf99HzUjzeyOZrFN1za8a5NPFEXxPc43+kTfSOPLx0DRWnra - 0O/0SFoFnV90S2fnz0zXz8T56/maTrXy43+ndyNAB4CbbKgJpsar3rOlEDTZXr7H/7+H - zd0AfX6RI9B0cY5gOH84lV+Ip7t+qBjWTRAXCDy4JvY+BAGimBYIiVIgWFvBGMJLqpdX - DbIFVBwEaLKvcMibq5kp75AGTgbgcT8NjRW6YlpClYRJqMbwDELJKuZgqMUhA1X7EuoU - KCPlclJ9P8M/jDyalhALps0pOWaXVEwLxLjLDrVmpNZy8WgU3yxFKSontRQ5yJn+ckns - x2LxdMpBGd9HrE6XD3T2jpXumC5v6KePb0n/TJd39PYyl3utMGOcIB4D2GVAZGoE5Cag - 9aiJIwC7EuEEaJqArsc5Sd3EXvnGpo3GwQamU7sPeb59+PpDkB4rOu8DkruPSX9urgG7 - fb59ud0+rqA78hwbodqgsIvQU9iHfXP7/DfdX/2zfX1ZwbTkGC1ocebWKY7Mbokpcb75 - 8K6Jx+yVY5MAqgtoHAsp0qlJyqLMzBL0szkhd0L+jyascUklbXgFllEaWqfUgOUTsm1M - G60K3oUVzAOhhhOKJ5TasKyxeZvK5pu3aP+U1m+R8z8CtnKDcqH/QGyz+yOUFrU080Q+ - 2Ik0hNjOPm6erjdfX16v7ukJ7QU9QugPfJOvmrigV+e7dnbxBZ3xC3rGCkkyDSxWybIb - yfIQo0nSS3RseYpOmnyl2hyWEIeWrry/kZt5W8w8KajVdhe2+wT6SUqnuJuNFZDRdBdj - yEFnTFzitLI6sbhxQXlZepZVg6wiqmOzWiTIoaxmnTBGtwi9ZHXJeSEscjIVRE2sgdkC - 1Gq/zMUypxJYMV1iOHTLeYUu/eVgLcyqbyFbyR67XmkHDt0BOXwgHJLy2UdORMJDndv/ - VufNE2kkrsZXoy9qxtL3dg1bubhdRIwE5YdPLoLlmS2ElYl0n7VQPdB8d0W8OAaTgycZ - GoH94MvKpD52wVgFyZDga0yPdop8BaGACLFwT19azjcAfo+fu0WQudDG1YRIfEL7k0iH - 1fIlNAnsMVuNEZU3xu6sVpy7h0GJjr1kpndq/XE/XN+ksRLrCJphpCSoPjoBlc7RY+M2 - ONMFZhUTm2zaK9sja4GNYmjve3hDJukegpzRWNi4HFNxxl+r8023lgIpyerBkKUG/WW+ - HEOWJKiSablODhAdKOwuN3Meu9+q+HPogcFKX+72hJ/H3hiqeSb8Rln12+bq6enq8a/N - w+ZxTVpIofmeh5uwjwwlEKxGuVRJOSL/+mFNCy24YBycsqDN9wHN0CWRczvz0HcgD2zS - qICuOBrt5o6StNLbF5gNnGo8wyn9hCGGjsOA4cy/3OjaXNEHhoHCBKgzV0HrCCIe7UZn - RpcCX11KkNIJjwpSDAvKOMvkZBGjrNEwogxTKKmjwIrxPiA4rO4aex52Hfy4a6ksh/nY - a9PjzYFdjy4D/Myl7hpUE3FVh2BnBuuYQjniYjDg7oRRMA8NWq7HdIsJAPd870bgsocY - NNoHbsTOTHS7vXi5emrfMmlVWXBbdOYRdCa4e7RusFhjMvjMbf1qwHtZarcBHbZuGX15 - MWp8buPZ6AaZ3cY7EODKLJAz2ByD8EAkeZ2QwX2Bfbs9376u9Kkpg9Pse1wGXXvHUwab - gP8jg028AxmcByiqoDbQ3eG/kqFB/esYLSUGdTLmhRAM9Bk4NtokBDNSSX5mJdp+aL3Q - 3OA9POqBonYu02NilZwDr4D1QjSpA2FF8GiSlo95LTk8DRKFwEvndYUgFV4cOob48D6z - Y1lNSM+aKAqhroYZVrEHq++sNjljS92i1Vh52LH/XO0gj5C4lEcupRN8+he8czUlCmVu - ZHN0cmVhbQplbmRvYmoKNSAwIG9iagoxNDQ5CmVuZG9iagoyIDAgb2JqCjw8IC9UeXBl - IC9QYWdlIC9QYXJlbnQgMyAwIFIgL1Jlc291cmNlcyA2IDAgUiAvQ29udGVudHMgNCAw - IFIgL01lZGlhQm94IFswIDAgNTc2IDczM10KPj4KZW5kb2JqCjYgMCBvYmoKPDwgL1By - b2NTZXQgWyAvUERGIC9UZXh0IF0gL0NvbG9yU3BhY2UgPDwgL0NzMiA4IDAgUiAvQ3Mx - IDcgMCBSID4+IC9Gb250IDw8Ci9GMS4wIDkgMCBSID4+IC9TaGFkaW5nIDw8IC9TaDEg - MTAgMCBSIC9TaDIgMTEgMCBSIC9TaDMgMTIgMCBSID4+ID4+CmVuZG9iagoxMCAwIG9i - ago8PCAvQ29sb3JTcGFjZSA3IDAgUiAvU2hhZGluZ1R5cGUgMiAvQ29vcmRzIFsgMjAg - LTIwIDE5Ljk5OTk5IDIwLjAwMDAxIF0gL0RvbWFpbgpbIDAgMSBdIC9FeHRlbmQgWyBm - YWxzZSBmYWxzZSBdIC9GdW5jdGlvbiAxMyAwIFIgPj4KZW5kb2JqCjExIDAgb2JqCjw8 - IC9Db2xvclNwYWNlIDcgMCBSIC9TaGFkaW5nVHlwZSAyIC9Db29yZHMgWyAyMCAtMjAg - MTkuOTk5OTkgMjAuMDAwMDEgXSAvRG9tYWluClsgMCAxIF0gL0V4dGVuZCBbIGZhbHNl - IGZhbHNlIF0gL0Z1bmN0aW9uIDE0IDAgUiA+PgplbmRvYmoKMTIgMCBvYmoKPDwgL0Nv - bG9yU3BhY2UgNyAwIFIgL1NoYWRpbmdUeXBlIDIgL0Nvb3JkcyBbIDIwIC0yMCAxOS45 - OTk5OSAyMC4wMDAwMSBdIC9Eb21haW4KWyAwIDEgXSAvRXh0ZW5kIFsgZmFsc2UgZmFs - c2UgXSAvRnVuY3Rpb24gMTUgMCBSID4+CmVuZG9iagoxNiAwIG9iago8PCAvTGVuZ3Ro - IDE3IDAgUiAvTiAxIC9BbHRlcm5hdGUgL0RldmljZUdyYXkgL0ZpbHRlciAvRmxhdGVE - ZWNvZGUgPj4Kc3RyZWFtCngBhVJPSBRRHP7NNhKEiEGFeIh3CgmVKaysoNp2dVmVbVuV - 0qIYZ9+6o7Mz05vZNcWTBF2iPHUPomN07NChm5eiwKxL1yCpIAg8dej7zezqKIRveTvf - +/39ft97RG2dpu87KUFUc0OVK6Wnbk5Ni4MfKUUd1E5YphX46WJxjLHruZK/u9fWZ9LY - st7HtXb79j21lWVgIeottrcQ+iGRZgAfmZ8oZYCzwB2Wr9g+ATxYDqwa8COiAw+auTDT - 0Zx0pbItkVPmoigqr2I7Sa77+bnGvou1iYP+XI9m1o69s+qq0UzUtPdEobwPrkQZz19U - 9mw1FKcN45xIQxop8q7V3ytMxxGRKxBKBlI1ZLmfak6ddeB1GLtdupPj+PYQpT7JYKiJ - temymR2FfQB2KsvsEPAF6PGyYg/ngXth/1tRw5PAJ2E/ZId51q0f9heuU+B7hD014M4U - rsXx2oofXi0BQ/dUI2iMc03E09c5c6SI7zHUGZj3RjmmCzF3lqoTN4A7YR9ZqmYKsV37 - ruol7nsCd9PjO9GbOQtcoBxJcrEV2RTQPAlYFH2LsEkOPD7OHlXgd6iYwBy5idzNKPce - 1REbZ6NSgVZ6jVfGT+O58cX4ZWwYz4B+rHbXe3z/6eMVdde2Pjz5jXrcOa69nRtVYVZx - ZQvd/8cyhI/ZJzmmwdOhWVhr2HbkD5rMTLAMKMR/BT6X+pITVdzV7u24RRLMUD4sbCW6 - S1RuKdTqPYNKrBwr2AB2cJLELFocuFNrujl4d9giem35TVey64b++vZ6+9ryHm3KqCko - E82zRGaUsVuj5N142/1mkRGfODq+572KWsn+SUUQP4U5WiryFFX0VlDWxG9nDn4btn5c - P6Xn9UH9PAk9rZ/Rr+ijEb4MdEnPwnNRH6NJ8LBpIeISoIqDM9ROVGONA+Ip8fK0W2SR - /Q9AGf1mCmVuZHN0cmVhbQplbmRvYmoKMTcgMCBvYmoKNzA0CmVuZG9iago4IDAgb2Jq - ClsgL0lDQ0Jhc2VkIDE2IDAgUiBdCmVuZG9iagoxOCAwIG9iago8PCAvTGVuZ3RoIDE5 - IDAgUiAvTiAzIC9BbHRlcm5hdGUgL0RldmljZVJHQiAvRmlsdGVyIC9GbGF0ZURlY29k - ZSA+PgpzdHJlYW0KeAGFlE1IFGEYx/+zjQSxBtGXCMXQwSRUJgtSAtP1K1O2ZdVMCWKd - fXedHGenmd0tRSKE6Jh1jC5WRIeITuGhQ6c6RASZdYmgo0UQBV4itv87k7tjVL4wM795 - nv/7fL3DAFWPUo5jRTRgys67yd6Ydnp0TNv8GlWoRhRcKcNzOhKJAZ+plc/1a/UtFGlZ - apSx1vs2fKt2mRBQNCp3ZAM+LHk84OOSL+SdPDVnJBsTqTTZITe4Q8lO8i3y1myIx0Oc - Fp4BVLVTkzMcl3EiO8gtRSMrYz4g63batMnvpT3tGVPUsN/INzkL2rjy/UDbHmDTi4pt - zAMe3AN211Vs9TXAzhFg8VDF9j3pz0fZ9crLHGr2wynRGGv6UCp9rwM23wB+Xi+Vftwu - lX7eYQ7W8dQyCm7R17Iw5SUQ1BvsZvzkGv2Lg558VQuwwDmObAH6rwA3PwL7HwLbHwOJ - amCoFZHLbDe48uIi5wJ05pxp18xO5LVmXT+idfBohdZnG00NWsqyNN/laa7whFsU6SZM - WQXO2V/beI8Ke3iQT/YXuSS87t+szKVTXZwlmtjWp7To6iY3kO9nzJ4+cj2v9xm3Zzhg - 5YCZ7xsKOHLKtuI8F6mJ1Njj8ZNkxldUJx+T85A85xUHZUzffi51IkGupT05meuXml3c - 2z4zMcQzkqxYMxOd8d/8xi0kZd591Nx1LP+bZ22RZxiFBQETNu82NCTRixga4cBFDhl6 - TCpMWqVf0GrCw+RflRYS5V0WFb1Y4Z4Vf895FLhbxj+FWBxzDeUImv5O/6Iv6wv6Xf3z - fG2hvuKZc8+axqtrXxlXZpbVyLhBjTK+rCmIb7DaDnotZGmd4hX05JX1jeHqMvZ8bdmj - yRzianw11KUIZWrEOOPJrmX3RbLFN+HnW8v2r+lR+3z2SU0l17K6eGYp+nw2XA1r/7Or - YNKyq/DkjZAuPGuh7lUPqn1qi9oKTT2mtqttahffjqoD5R3DnJWJC6zbZfUp9mBjmt7K - SVdmi+Dfwi+G/6VeYQvXNDT5D024uYxpCd8R3DZwh5T/w1+zAw3eCmVuZHN0cmVhbQpl - bmRvYmoKMTkgMCBvYmoKNzkyCmVuZG9iago3IDAgb2JqClsgL0lDQ0Jhc2VkIDE4IDAg - UiBdCmVuZG9iagoxNSAwIG9iago8PCAvTGVuZ3RoIDIwIDAgUiAvRnVuY3Rpb25UeXBl - IDAgL0JpdHNQZXJTYW1wbGUgOCAvU2l6ZSBbIDEzNjUgXSAvRG9tYWluClsgMCAxIF0g - L1JhbmdlIFsgMCAxIDAgMSAwIDEgXSAvRmlsdGVyIC9GbGF0ZURlY29kZSA+PgpzdHJl - YW0KeAF1wnF0SGUYwGFmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmlhKllCilREiJUkqU - UqKUEqWUKKVEKSVKKaWUKKVE+TnfPfeP7nfe9zkPdXX8/xV1mBOFkyYirGVSLVfqr6rF - nCCcPAF7DZOdV9cgnlKDezxTvKeOR1jN1Gqu0V9bjTlOOG0cwiqmVXGd/voqzLHC6WMR - VjK9khv0N1ZijhHOGIO9ghnOmyoQz6zAPZqZ3rNGIyxnVjk3628pxxwlnD0KYRmzy7hV - f1sZ5kjhnJEIS5lTyu36O0oxRwjnjsBewtwS7tTPK8E9nHne84cjLGZ+MXfpFxRjDhMu - HIawiIVFLNLfXYQ5VLh4KMJCFhdyj/7eQswhwiVDsBewpID79EsLcA9mqfeywQjzWZbP - /foH8jEHCZcPQpjH8jwe1D+UhzlQuGIg9lxWOB/ORftILuYA4coB2HNYmcOj+lU5uPuz - ynt1f4TZrM7mMf3j2Zj9hGv6IcxiTRZP6J/MwuwrXNsXeyZrnU9lon06E7OPcF0f7Bms - y+AZ/foM3L1Z772hN8J0NqTzrP65dMxewo29EKaxMY3n9S+kYfYUbuqJPZVNzhdT0b6U - itnDvrkHwhQ2p/CyfksK7u5s8d7aHWEyW5N5Rf9qMmY34bZuCJPYlsRr+teTMLsKt3fF - nsh25xuJaN9MxOxi39EFYQI7EnhLvzMBd2d2eu/qjDCeXfG8rX8nHrOTcHcnhHHsjuNd - /XtxmB2Fezpij2WP8/1YtB/EYnaw7+2AMIa9MXyo3xeDuz37vPe3RxjN/mg+0n8cjdlO - eKAdwigORPGJ/tMozLbCg22xR3LQ+Vkk2s8jMdvYD7VBGMGhCL7QH47A3ZrD3kdaIwzn - SDhf6r8Kx2wlPNoKYRhHw/ha/00YZkvhsZbYQznm/DYU7XehmC3sx1sgDOF4CN/rfwjB - bC480RxhMCeC+VH/UzBmM+HJZgiDOBnEz/pfgjCbCk81xR7IKeevgYhPB+JuwmnvM00Q - BnAmgN/0vwdgNhaebYzQn7P+/KH/0x+zkfBcI4R+nPPjL/3ffpgNhecbYvflvPMfX8QX - fHE34IL3xQYIfbjow7/6/3ww6wsv1UdYj0v1wPMyNA0X9wplbmRzdHJlYW0KZW5kb2Jq - CjIwIDAgb2JqCjg0NwplbmRvYmoKMTMgMCBvYmoKPDwgL0xlbmd0aCAyMSAwIFIgL0Z1 - bmN0aW9uVHlwZSAwIC9CaXRzUGVyU2FtcGxlIDggL1NpemUgWyAxMzY1IF0gL0RvbWFp - bgpbIDAgMSBdIC9SYW5nZSBbIDAgMSAwIDEgMCAxIF0gL0ZpbHRlciAvRmxhdGVEZWNv - ZGUgPj4Kc3RyZWFtCngBdcJxdEhlGMBhZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZpYS - pZQopURIiVJKlFKilBKllCilRCklSimllCilRPk53z33j+533vc5D3V1/P8VdZgThZMm - IqxlUi1X6q+qxZwgnDwBew2TnVfXIJ5Sg3s8U7ynjkdYzdRqrtFfW405TjhtHMIqplVx - nf76KsyxwuljEVYyvZIb9DdWYo4RzhiDvYIZzpsqEM+swD2amd6zRiMsZ1Y5N+tvKccc - JZw9CmEZs8u4VX9bGeZI4ZyRCEuZU8rt+jtKMUcI547AXsLcEu7UzyvBPZx53vOHIyxm - fjF36RcUYw4TLhyGsIiFRSzS312EOVS4eCjCQhYXco/+3kLMIcIlQ7AXsKSA+/RLC3AP - Zqn3ssEI81mWz/36B/IxBwmXD0KYx/I8HtQ/lIc5ULhiIPZcVjgfzkX7SC7mAOHKAdhz - WJnDo/pVObj7s8p7dX+E2azO5jH949mY/YRr+iHMYk0WT+ifzMLsK1zbF3sma51PZaJ9 - OhOzj3BdH+wZrMvgGf36DNy9We+9oTfCdDak86z+uXTMXsKNvRCmsTGN5/UvpGH2FG7q - iT2VTc4XU9G+lIrZw765B8IUNqfwsn5LCu7ubPHe2h1hMluTeUX/ajJmN+G2bgiT2JbE - a/rXkzC7Crd3xZ7IducbiWjfTMTsYt/RBWECOxJ4S78zAXdndnrv6owwnl3xvK1/Jx6z - k3B3J4Rx7I7jXf17cZgdhXs6Yo9lj/P9WLQfxGJ2sO/tgDCGvTF8qN8Xg7s9+7z3t0cY - zf5oPtJ/HI3ZTnigHcIoDkTxif7TKMy2woNtsUdy0PlZJNrPIzHb2A+1QRjBoQi+0B+O - wN2aw95HWiMM50g4X+q/CsdsJTzaCmEYR8P4Wv9NGGZL4bGW2EM55vw2FO13oZgt7Mdb - IAzheAjf638IwWwuPNEcYTAngvlR/1MwZjPhyWYIgzgZxM/6X4IwmwpPNcUeyCnnr4GI - TwfibsJp7zNNEAZwJoDf9L8HYDYWnm2M0J+z/vyh/9Mfs5HwXCOEfpzz4y/9336YDYXn - G2L35bzzH1/EF3xxN+CC98UGCH246MO/+v98MOsLL9VHWI9L9cDzMjQNF/cKZW5kc3Ry - ZWFtCmVuZG9iagoyMSAwIG9iago4NDcKZW5kb2JqCjE0IDAgb2JqCjw8IC9MZW5ndGgg - MjIgMCBSIC9GdW5jdGlvblR5cGUgMCAvQml0c1BlclNhbXBsZSA4IC9TaXplIFsgMTM2 - NSBdIC9Eb21haW4KWyAwIDEgXSAvUmFuZ2UgWyAwIDEgMCAxIDAgMSBdIC9GaWx0ZXIg - L0ZsYXRlRGVjb2RlID4+CnN0cmVhbQp4AWXCCVjIZxzA8dnssNNmNrPZaTOb2b3ZzGZ2 - bzazmd2bzVA5IiWJXCElUuSKFBKRqESOQkqkKEdErohEpeSIvnv+/+d9nvf99f88H7gF - tRE0qhdvrUe/yW1y45uoN2hsv/0G5jtucEedfmcd4nXukptcR71Gk2vc7XjPVcz3XkW/ - wn2O91/BWqs+UIu5aS1NL4sPXkav4SG5WQ1qNc3sD1djbl5N80viI5fQq3hUblGFWkkL - +2OVmFtW0rJCf7wC8SJPyK0uol6g1QWedHyqHPPT5ejnecbx2fNYy9TnyjC3LqP1OfH5 - c+hneUFucxa1lDb2F0sxty2l7Rn9pTOIp3lZbncatYR29ldKMLcvof0p/dVTiCd5TX79 - JNYT6hsnaPDN45jfOo5+jLcd3zmGtVh9txhzh2I6HBXfO4p+hPfljkdQi+ho/6AIc6ci - Oh3WPzyMeIiP5M6HUAvpbP+4EHOXQroc1D85iHiAT+XPDmDdr36+nwa/2If5y33oBXzl - +HUB1nz1m3zMXfPpulf8di/6Hr6Tu+1BzaOb/fs8zN3z6J6r/5CLuJsf5R67UXPokcNP - jj1z6LlL/3kX4k5+kX/diTVb/S2bBn/fgfmPHehZ/On4VxbWTPXvTMy9Mum1XfxnO3oG - /8q9M1C30dv+3zbMfbbRZ6vedyviFvrJLltQ03FJx9XRLR23NL1/GuJmBsgDN2PdpA7a - RIPuGzEP3oi+gSGOHhuwpqpDUzF7puK5XvRaj76OYbL3OtQUvO3DUzD7pOCzVh+xFjEZ - X3lkMmoSI5MY5eiXhF+iPjoR85g1NDh2DdbV6rjVNDg+AbN/AvoqJjhOXIU1Xp0Ujzkg - noCV4uSV6CsIlINWoMYRZJ8Shzk4juDl+tTliMuYJocsQ40lJJbpjqGxhC7Vw5aixzDD - cWYM1iVq+BIanLUY8+zF6IuY4zh3EdZodV405ohoIqLE+VHoC1kgRy5EjSTSvjASc1Qk - UQv06AWI81kkL56PGsHiCJY4xkQQM09fOg99LrGOy+ZinaMun0ODcbMxr5iNPouVjvGz - sIarq8IxJ4STMFNcPRN9BmvkxBmoYSTak8IwJ4eRHKqvDUWcToq8bjpqCOtCWO+YGkLq - NH3DNPSpbHTcNBVrsLo5GHNaMGlTxPQp6EFscdwahDVQ3RaIOSOQjMni9snoAWTKWQGo - k8iy75iEOXsS2RP1nRMRJ7BLzpmA6k+OP7sdc/3JHa/njUcfxx7HveOwjlXzx2IuGEvB - GHHfGPTR7Hc8MBqrn3rQD3OhH4WjxEOj0EdyWC4aiepLkf2IL+ajvhwdoRePQPThmHzc - B3U4x4dzwvGkN+ZT3ujDKHE8PQyrl3rGC3OpF6We4llP9KGck8uGonpQZj/vgbncg/Ih - 4oUh6IO5KFcMRnWnwl7pjrnKnapB+qVBiAOplmsGog6gZgCXHWv7Y77SH92Nq47X3LC6 - qtddMde5Uuci3nBB78dNub4fal/q7fTF+j8dCa/sCmVuZHN0cmVhbQplbmRvYmoKMjIg - MCBvYmoKMTEyOQplbmRvYmoKMyAwIG9iago8PCAvVHlwZSAvUGFnZXMgL01lZGlhQm94 - IFswIDAgNTc2IDczM10gL0NvdW50IDEgL0tpZHMgWyAyIDAgUiBdID4+CmVuZG9iagoy - MyAwIG9iago8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMyAwIFIgPj4KZW5kb2JqCjI0 - IDAgb2JqCjw8IC9MZW5ndGggMjUgMCBSIC9MZW5ndGgxIDEzOTI4IC9GaWx0ZXIgL0Zs - YXRlRGVjb2RlID4+CnN0cmVhbQp4Ab17eXxVxdn/zFnuOXff9z03d8uem9yQkEAuIQu7 - SBQSJBiWsIoshrAUMChrRKogIYILuLCKCQElSLEUwYi1iguoVFtbwdIlte0LVoF77vvM - uSFCX3/9+Uc/PSeznrnnzHzneZ55nmcmCCOEFKgZ0Sg2Zc6keVQFpYeadyC8PaWp0fPo - Hwc+gxBuQ4i+b9q86XO0X7zxNkIsg5BMMf2+JdOGf5D3FkJqDULe5hkNk6Ze4fEshHIe - g98XzIAKWQoXgzK0Qakz5jQu3l2v3gHlv0B5x31zp0zahZ7uj1DuBiiPmTNp8Tx+g+w7 - KH8BZc/9k+Y01K2Y34hQBIooZd7cBxrpbuZ+KKdCecG8BQ3zfvbw/blQJv17D+ow3ORS - IAl6HVIPGt9bI1bfFlG9JRoxiIX2HOKRFMmQ/LZWNwsKpEQqpBaLMNq+S4t0kNcjQ19N - MmOExNRbZ4bUgqzIBqkdggOCE7mQG/rnRSlQ8qFU5EcByJErCAifQBr2OAqxzcjGZENL - lPgUwgWSCncnvmK7kUaYk/g7XQztj5JACaUl6AR6FG1H7TCaPZAPoYmoDZ2BGTmKJ6DD - 6Dx2oSyYawZ1oRHoHZxIvI+moRegfSM6ibagg4BbCM1BRni6EfsTS6Ecg/xktCrxHPSw - EK1Bx1ERvHUj6knsTRyCp2PQ3Wgf2g+//yX2UQcZfeLlxEVA8k545yp48n5iRKIdMMpA - ZWg01K5Cr2M/fSExAxApht49hZ5FO9Ev0F/wQ/hwYkaiKXE28TtEwVMHqoZ7OT6Mf0e3 - M2sSTyX+lBAAiRBKg6/Wo83oeXh/O9wnYNor8GzciDfjLVSMeog6zKxmzUIccAijKriH - oLloHSBwFJ1C/0Df4a8pC62hG+nTiWjif2DGh8MoyUgaUBPca+HeCGM6hiU4Bw/Go/Fy - /ATegj+k0qi7qRpqEbWY+ooeRU+gl9AfMg8wnewGtk0iF64mjiW6E+eQGWb3HrQArYDR - nURn0RV0DdPwLgf242JchifC3Yy3U0fxTnyUGo1P4LPUPvxb/CX+Gl+nWEpBGal0qpHa - TO2nTlLv0jPpLfST9G/pq8xAlmJ3spckfu7XwmRhvfBuojjxu8S3wL08UFIRYDwK3Ysm - wWjnoXz0IIziANztMGun0Gl0Rry/xA7Ug74FFBDWYRuO4JFwj8J34Gl4Jn4Gvwb362Jf - vqFgIigppaXMlIOqpiZTc6hm6hzVTNvpNHoYPZ5uh/st+jx9nb7OsIyeMTJVzFC0gZnD - bIN7F7OH6WTeY4vYgewodizbzK5nN9BT2PfZ85IVko2STsnXkr9xIW4EN5fbALNzBmj2 - F4QB+i4Gp0LvI+h+NAWX48moFWZjJ56EWoC6puJ1gNc8FErU0SvoKioHqOF19BOg1m1o - OVpPT0A7E5/Q+9DHQCn3wRub0W6mDDnZrTA7D6EcoKLeOxZOC4eCAX+qL8XrcbucDrvN - ajGbjAa9TqtRKuQyKc9JWIamMMqo8FXWezoC9R1MwDdkSCYp+yZBxaRbKuo7PFBVeXub - Dg/53SR4dFvLGLSc9i8tY8mWsb6WWOMpQSWZGZ4Kn6fjV+U+Txcef2cN5B8t99V6OnrE - /Egx/5iYV0Le64UfeCosM8o9HbjeU9FR2TSjpaK+PDMDH40BHLLMDCI4YkhOXtyBBk9a - PsMCCWlR0WHzlVd0WH2Qh2e0v2LS1I7Rd9ZUlNu93lqog6oxNfCNzIyZHdBP9Ihiqm/q - I10xNLme5CZNqOmgJ9V2UPXkXdr0DrOvvMO89JLl++LNXMWGWx52UP7KSQ0tlR2x+kcA - XFKsJ6VJG6A0vNoDr6VW19Z04NW9nSB9nAU9Jd1t8FWQftXP8nRIfWW+GS2z6gFcNKam - 0xazVfgmldd2oNE1ndaYVSxkZhy1rCj2wuiPZg7KHETSYq9lRTL9w8PJ+g9OkNSy4tQX - kA4f0wcAJl/yDYV+dnimiB/xQWcLSdRQiFqmFAJOcNViGOZM6M/gDgpohvZ3sP6hkzqa - q292Y0Z5snP1s8o7pVYbGUN9WS20r2/R9IeZgvYan6flKoIp9PX85faaSb01Er/mKiIP - yUT30UoHnnQz3yQCA6OeYfHNIPPbJM4plH2WilsqoEygIX3uMHREho+u8XZ4aqGiC6Vn - DO9C0tE1BzHeWNuFE6u7ULnzKKyX9L0T4XEGIbWZ5fB9KGRmQEWaF3JZGZ5KGHUloRVP - i6dl6NQWT6VnBhAT4xdTeNDQUpsNCFbXAE7oLvhirNbel22ore0P78km74GfQPOWWnjD - rN43QCpWZcehUU7GcJiVwOiaO2s6msvtHbHyWpgFIN8To2s6TgDl1tZCq9y+nkKPl8+0 - 9PY5An3OTYPnecm3VMM74BW1LS3kndU1Pm/HiZYWewvht2S5C6N/rYj1VnQh0gQGXtGF - m0fDbyHxee2kwuf1eaFbtQTTfCDpmxTVhaL/HuGCvn7DL/tBbwtEhAv/QwgX/RiE+/8o - hIv7enobwiXQ52KC8ID/HsIDb0O49N8jHOvrN3RyEPQ2JiJc9h9CePCPQbj8RyFc0dfT - 2xCuhD5XEISr/nsID7kN4aH/HuFhff2GTg6H3g4TER7xH0J45I9BeNSPQviOvp7ehvBo - 6PMdBOE7/3sIj7kN4ep/j/Bdff2GTt4Nvb1LRHjsfwjhcT8G4ZofhXBtX09vQ3g89LmW - IHzPfw/hCbcgDApvGULMWbC9aLAIS7tQdXoX4rNh8YPAa7oQOguBlCFPf9aFGAgI8txn - 6DX4BUJj01+Dt7CQ5uTmab3aIIQyZmPXjd+zx68N7mJGXj8ErTDaJ5zFzegCWJWZMRPy - qWRTeZnGbLZx+bKpiLeqpzRY0kdprowsifeMqmgo/wqVjuz5qCc3x1zQryCaHwj6onlG - g4TbV+FQY2rO+fqm9xV3Z6Zxcu7C24sOE/MTehFLfMo42DawWx1ofsy8lsWVvDGqZh1R - TqkrpOdaCuWuKqem6ZTlo554DyrtKYUPDF4Sy0d2ZQD7bQGpnw2YVJYQ2Le6ELbzkNNI - IGdWGENYT0FklTlCSMtAlA4XJpF4rUR1yGzSajjK6wkGtPn9dF5dgTaf8qVQWoPZlEfH - ltWPWyH8XhBWzCxtwtGWXYsPPLs5e8jLbNulg8I7wmc/F/76xTFcfKUdV1679C0ecwUX - C+eEz3+9+pdkbBgsGkSdYzeB5eM7yOMunBdTMAynYLhWFsmqpGRQp87Fi1Bp6ZVf5ebo - owNxvzytT3vqjW2BjSfob1r0tbuu3U9/I74rBnPuYp8GW3xXbFQBU8mMY2c773ctda3C - ayk+jR9vnW1dZl3meMXKohSsZhwqq5dzWBmMWLdanaKXRfWsx73Qm6LwPsgVmuamqILq - le7ClNQqXxLcKz2aqz0XUWlJvKS0R6srytaZizCkuqIiLUSoToTdwVgVfm1ArlOFkNTA - AbiMUiMLYd4IEeCr0Yj4ArQFulKcpANfCifhfJD3RnRGAydRYwlUeI3eYat/cWJl/pjW - 5UerAswRumwhDn3z5ZLKV9ZPLpxqo1U3wkexbt7c4dHq2cs3bxi++ljTWeGb519aWtUw - oiB33Kx9Ii65QD82dhvKRadi7qGK6syG8JTMheGFmZLWAB7Op8ss6QYl/V2uIaoEo8QX - M2ijmgeVylx7NJXlorlKS2uwXNuFh8XUssKsuZQ77FlJB6m8qsgtqPRcSRIegHIl/pWm - R0PwIdiIkBRk51gDSMoGnP6UgATRIcTQfA7A4fC5Q8jmt4QwgzmAKxsil9cOmAUg6iNG - TQmhxpUrATNcx1DRPBPQXoQwkC9FwkVdOC8islMSxnwCI1h+gKALGw3Ih02XfqYIVR7Z - +NIrO3V+vSNgahi0oK3hcEWA7Yzdj42//ltVRuX8B4V/fBvE5rceKZ3ftviJJoyfpSlP - 4WOzGxeXLd0x7603jq4ak+d0H2z+lSAArMCX+UBv2exTkFOiCbEUKSXjlZiiXtdJJBwl - wSzHg7+Ak1EL5ezXtIJj6C5sfgW3KvmXZF245hCrrlKJCF69UhIHqiqFpERbJKIGwBWt - zUpnlmtOq3NzsFaKtd4oztPmGX1a6kUhit+Nb6Aea/vwQ3A1rI8vElg8sYPeeOPep4Xn - SN8wKkt8BjKjGXxTx2LpQ3Tr3FSRolI/Tj9dz/TnFUoOKWRqlWqhTq/XqdQenZ5DerPM - HIWOpcRsygdVKqeuv5phop5up1LLFdrmokJPSpU3OeNXe06BlOkpjcNsX7xyc6YJG0Cf - ocsoOfUw9xYQQyGLG0upAO0Cpw9Gbg/rAJ6QWiDCbiaEJHaIeGuSN4jo0ZSQ6SZzXae/ - bZ6DemAIGpgkL8IYDZQ3JTUY1y2P3bVj25HmutXZT82hLsefHRDJHD3zNNZdF3rahf/R - 4Dnbil3vLGt9YUhMStMvCwsCeq/wxi+Ft08T7yiFRiZ+zfjYZ8CjF0R7Y0WLbNjM+/mg - tca6Bq3F66RcFS/zBr1RlcpAd3NROxuMAq+EqZWuQu1cs4wqkaXmmsNVIRGYeNGy4WMW - L822gJjo5YcegIgAlBTI/oDDozYhCRvwqF0hHDCmhpBDDznCE5ih3RpvCPtNwRBy6iAi - PCHKCpxkAMIBK3EdyGST0RcIAvFT38PhS0FajSifk2xhNIB4rjreqfENWrW1UzZw4thZ - h7FC+PMZ4bNBy/GIlY+u2NXY/uyj7DPfrbo7Z7zwR+HGPZmhry6+IXyIc8GFJH8NT732 - +c8fur972/Z1xDeJwb9G6L0Z1qHqWAErt1KF8v6KIuUw5d3UWGYydYSTLVMeVp5W0pQU - K1X9kZqRKiglj9BcFV8ofUmlrdKIMIEYvUQIHEgeKB7IBoPgrMNGCQUcC2JQpy/o540y - 2RWXasZlOrO6yy+v33rjMtv89GDh8Ilj26Z8hrfh1r8eeAXcxUDnH4Nsewq8f2bwMP0y - VjUWj5OOV9fqp+IG6Wz1TP0iv3So5ifWJt8C/wPBZbnLIuusaz1rg+uy1uW2WZVVfIT3 - qyh/RB7VajPYqIs1RzOUVCEYe2uOqArDc7P5QjvkXzEUZudX5SXJX1wGvpd3PUmW7Z3j - aFqWw6Mz0UpTpiGEFOmqEJbpeCBzJ0SMmwphY5Y5hJRpEHEONoRpD0R9kk6Ucsk57hVv - ZB51t+RRMBDNBxZICjhgEGAFX0oq1PWjXljT/PBDja3T1r24b/XK57c8JbySdsflc+/+ - qTwwujbvXuHy+8Jvly2lY6snjF6zZnzDgnjx2jWPPLb5oXnPUzvSRzfv+OrTx9dUZ2eG - o1N3HBe++/KTB4+CW55C4xMX2PnsJfC4udDBWLGd3YpbWdoN3PsQXsuu17PVPL3GqdUa - Jf2dtKK/UeqiXC4rnUsVa3K1No8012p1e3Z6Z02zpKePujKyZ5Tmm5HAH8AZQAGgrEBG - k9RY+iOH2a8PqPz2gNwkjSClQRPBOq1awzmgxCI6gjHF0DKLIoLUOoh4myQCSwdEZIEA - NiGMkozFFQOWCx6bfVlY5A4dWSb65QGRidqMBtYJH+PC+dqT3tOdnwpX//71Zw8McJ20 - bWoXPk6gly+99BquCrGXhAvHNu4S3hNOC4Lw8721j19++vj2X+GXcMXZ34sy5EXw9E8B - XVMJ/u7pMfdabauOivByl5pCLjPP5+ptNqVfZbXaznub1icxiIsYoNJ4aVyUDAFs0vqN - AQnHcgxHcxTHSmQaHkZrgkiqk0cwZwD6FlWzNKCROj8ZCdEdNJTPq6W9HtDSDBwVxtTZ - hkGNw4pt6k//Ljz7FlWNs3dvqdkurIm37zMG59Y+Ul2FtTjrehur//ik8P6fjgud4hja - gbd7YAxy2C0YFUvlXAwjp10YHMe8SybnFZRCQSHJTKpYalPRvB9ZlaouLD/k3XJzQCVk - RFcuAl+TWSUaUgmZWBgekdra3oDbmewbm+n0G+foZddPUm72+GGhbJ+gaodPizKG9GMf - FKSoKGYhvZD29kIyG9vk4pdl8i48Dr78WS+U4pfJ+vl/Puhrp6/feId6P57dLX6oPT6V - yLEzEG2Cb9DIDD4xUPQxfDorHfZTiElAZefk6kHHPHPmDFH1odXoxDn2MtC/GlYJB2qJ - ZayFjZtu/Ab1Fn9GJhnMG/uraXt/TuqgHA65Lpe2uSy5cqvT9cm/kHwfwYtTHkE2op33 - 6uYRoptHsI23RIhuHiG6eYTo5hHQze0R0M0hEmmcROSC9fFW1ZyIfqSLahChdIPOSzPb - j23afUrYIhw4eeCJ12Fbxf5n4e9/vih88U9sVLGXrr0hnBWOXEigLz7Bw3DaR1hz7Tm8 - 5CpscZQI3cJ7V4SD7ETgfTIf3wJWMujfpFh0pmKmboliqY4ZYqgxzDAsNTAc79JqNDKs - UhNqkfGURKdgpAZDLmMzqaVAKEbTDxBKXAuqc5JONAALkIu4BuhFvU0CFO0DMQeJF5S8 - dmrLqb+d/40Q6aabF5c9IDTiDWt2s8c/f+ulRHwzc7S/W6AXwFYlBXsPiF0s0k4QPRHT - ccqheAhbi2vYmexUw2KWNx2DjSArsmNHrMzn9QTqdfN1Cw20zuU2OIy012UyMAFdqt+F - pFI755JTAYed9/iNbr+JzlXPtNvCfMAflFlD4fPeLUlZVjIynqR7sOk+AgW4pETUiciq - pk2aBkQpqgOmSActrg4TlVUcF+2NEMFNFFQ3BhluNoKMysZkTYex01Ubnl8wYJpg66b2 - 7Jnz3pzJY8exHC3XZV2RKRgFN7VoqVDcTTvmbXq6yCXIqJ25E+Or9uT5FjSfvitcafDq - S8ZefSzXHm8BTOoT55hvgHazYa9EiE0Mq4O+QKBAFfVWBSYHlqoWpUpn8xaV2U/Vqmao - 9qXQMlX/lNQUGc04LGsM2dnpjv4GmumfLs2hZCpem5riDuXkaC1+81DeH7JF3H7tUOTP - tuZGdnhn9UoCkOzfC3gd6LMk3CLoycxnxfPq5otcMDKUpXUjngpQgUy/BGxVOgOlo8ws - MWHT+HTs1LvTkd1oScdWC85k0pE0KE/HfjnOgjwXhsilc8BDE0Qih2g04kpAeKRXbeq1 - HERlktgJwYAIdTQ/lWiSSU1KYgR7VpwLo4HxgbHbD2MXlz/l2rwJncNHPNf9xp0bQKn8 - Ax58TJ17z4WObeOLz7675c4NwtN/Fv66fTtNjcQXlo/a5Bm4Y3FexJ+ZEZ1w5E3ht1eb - Sh94YvJ9EU9Odkrx9FNXPtjwyF8Z2JrGqFmYA+6FbvA1RGMO2oE0jIPTScFxwOIamvFw - yMpL/+a9L0+UcYTMrpTc9B+IawYIVR9YA95m3Hz+vNDMdm/+7p3N8N5sYQ5uF987MOZB - HKZoB8toUO/LJR4K1yBafHfJst53g9nx/asJJ8J8gdQGERgFBsw+dw43C80JJMnf/O0u - EJQ0GgO2BdkhVMPebwn6PFaYloNlGrld4QjmDdHMlM7ScEW8TiGl7REuVerUKJzF6VRW - uPhIMVUcSfPrNBzLO4IpZkcXbon5zE43F3RmySlnVF7ClZQ4DFw4bU+qbaA97BimDhZa - Bwz8Gd4KG6NHcStKivxeErsYP9UnUsEc0RUR0iLMltWT1UNsT+BBkchCBf2MKQhb/bhA - 7UUWl92LTB6DF3tTUD/Ki2xOsxcbvRAR+unVtXuNzVSwM/sVDMAqLJrkxtvs9YFgdwIb - a0FDi8AnVKBlBANBkhAVrZ8eqxaMure21TsjMmdybjU+PNCoeHjpo8Ve2R72n88fb1po - 9itc2rSMQF2aSdrv3WVbjr+2teW98RlDdz1udEhUSkf2dHwfn2HJnFA9Iq36ze1DhrTF - tzpSaHq1QlLmiw2Z9cq6LS/o8UUi+2AHmT7LjILTCXa0O5a924rbLHv4fRZ6GK/dbqBp - g8Rp45ROg9zO2e1mTVCHwYTX2pyyoNnqcHZh7pB3wfJedIF9S0aCVvtDGlo+EKZfYZQF - kEqvCSR1MyuUQDfzirqZ3KQMgG4GkdQiCRDdzPsDuployCNTUjPjgPdEBPMIdBSsYXkc - df5Lc7tmwYqXhuWs2zTvYWu762/HPriGdR85mFEdH095eM+cHTs/W7/o3Gmc9xVsf/dn - AYPCxAW6hz0JOowTLYpF+qmqVONUu5m9dtbPGyi1U4N4p5PTyyinWc5m6bM0Ya3O5pYH - bVaXe613Qdmtw49fBI2ih6inWvDtiFRkszikMoSxRQ5jc0CErFQAyex8AAYIfyLF6MhA - eg0wWJ7NWsJGZFgomq/L+2bTzuU7dy1dtxe3VOcMOPBc6UtzDwnXvv4Nvvfyx2d++cbZ - t6l++a7hlPPawC1TanDmtT/hccBvQxIXGBvwm4OcL8GK2JKt/JO23W6aVVFq1mBU6dRG - Q0wRM/BhGx4uf5Xuxm/S3fZP+E+l592f+C6bL/vk3dpuHTWBZ72p6m0mZ2qRhONMXqeD - kzlNcj+31bHbccTxsYPxm9R+B2uVKTgt+L6cQdYWTM3iglZrIPiRd1ddEqD4RVG+fxQn - axtwGSTZdX0CHgSI6PsRIatEPoal4bgDZhmJO6DV6DR6jUHDSBT+FHtqALwTzgB2OaVm - LoDkRlUAzEafzQtVLES8BegKPGcANJHlIl+KAj0tPW0lnl+H5teBIQwYm4zepAeoHxAQ - MSFFZR/licsp+Icwdfh8YYFOc+Nr9rGtj96VYzjI3ZE7ZsmgMW8Jf8KW32O3PDTswLI9 - LPYxVbPvvvO+Yc89f7quoKr48azRDg32wZkPCpcJgYWVDx1qwZ8RPZVGA4Ri+jLMiRtl - wmmcI7GRBYah/FBpDV8rXafYa9/j3BvclX7ULo/xtCklrDolSwExx0jCTqtM55Sps7is - LNZBZ5myMsOsLUehCioHBoIOa3bOLYR4paeIIB2/eBVQ7rWVgCJFeJMkmeEL2Vxybapf - E/C5AgEUskGklau8SK1SKP3OlAAO2sPAjwpQDnuF2/euBFGBNEfzwHCQeFMCwaRXrV+B - KMFStcCGCFwMvdwJ6yamlk3Mi+4qmSecOfAX1RFlcMDD78UCdEHb8peF65h7DZe/8ODr - lf7Ny07ekSG8z5QN9A1eeyPyTtOF7S8OCZZsGvv5mNH/xE6sxFnCzhOd92575Xj7lFVU - JuBJwYkihItFnZxDBTEHd4mBxUxCy8iSCPiHOZqsiPu8k5NcWjLyVLzkVO+yVVoCTvXk - ikUcZKuOwMWkXT/PHic+HlhrYcKIDisHyTi1lsL9eWylgFDMknHsdHaJZDG3lj1Kn6Ev - 0DKWlfA8J6WpVdQT1AsUTRXppFKGhYMjkjk6joNncISElUh5lpihoBvTEhknkUlsSvD+ - hZHcqlB2eicfxabkSkUW7hLrKM1XFlE3LCklKxSGsHZkVjq/XPMLZm2WJb2OXa45oeFL - +BKiJAJZL4DFF+dJgfQ4ra/5AH73K2EaPviV0Ln1AHv8xn7cLcyNT6YcLQKcooPxrYdo - gIhdOAZbBjAKAA0wg4WeYW+BDHTV5BZEaRKs9YcPixYOeQfgL/EzVXB6bXWsmOM5lURt - 5s0qszrIB0EUDLGOlU+XK3x+mc3ps8ooxuz3Os1OpYQD/53DT+tlIfimNmzowrjTFoYF - BcdAVmb5gfiswVAXVh76furiFzVXeq7cVDrMJWC5jewBJ9lNZ3puDqrT5xlFRx+w981V - 1qcV3b1GcP8TJTpFArlVnbH82vnNozJSS55r+GRU2rHZI2c9ecQWnjdt92Emu+2O1AGl - qZVjq5+6a2O8H3V59uiNu+KPU8fmRIY/814cjjOKdEf3AB+Tc30TY7lHJN0SipEYJEFD - k6SRYw0KymDROFkYpkUus3E2G1KEpTYHzrKErchqBxVGctvIRNGY5FYYVw9x5xLHEPFb - w5BuGQoZAcgqFYZR4FX7R+ybcXF0xhFnzopYeFhhpv0w3g39nzjm2XHPxe+knp9cMlVp - KovOnxl/DzoL8qcYfPpeWO8V4HOwosdieW18q+ZJ04vMHn6XZq+pi3+L/5i5pPqjQdGf - lzgtnMKpk1s5q9VIBdU2uzRotNrAryWFVb9XqieV9j45I4rvDGRmAnK9FCSwlgpgzgw5 - Vgk5mUERQFgDEW+CRZ5WQSTKaBIRyzRVJxo6MEfEewWOSfDWouTC/sXqnBGvvdja+jwc - ZLwh/PNz4QbW/UHSiNW7Wic+caNz/0X6gvAX4YoQF17G6TdA8YqRtb1JuJvxw9BVsLvT - GMvYy+82UyHe49CqJE4jp5aonA55iooKWmypsixNljecorb6Utd6jyeHB4x4MblsiQsV - mZhef53DZEesLcAEkB0GxpogwlZVANFmcUzisIgvOjVpuoniEDYfcC99wuEyst6A2qf1 - UW/u9le+dqzCD7GQ1V4Qu+cnrwpHGrctGZNTfHjJhx80Tzh4bOq2ZeN20Qc3Dg2VgMs1 - LjzXem/UNTT+eS8fU5uAB7XojlggSAeU/egqhlHxGkol1UoVQZ6QoVbG2/SY6C7IqtN3 - 4QogvxXiwkzGCKIGBjiy9FT8FFmZkxswIjcR0gMzMwtUEcI46/cbX5jNWpwau2bdJmCV - owXbKfp1mmpfEG8jfAF+VfpVZjisbdk4K/bTQmkb26p70tBmbEuThFL9wQJvpbcqtSo4 - NnVccFrq9MASxRLlElWTrzG10d8Y2OXak6GnYUlnM5ksPbIZ7WaHxZhpyAqp5TPBii7w - U/4UpYxJ11vedDj1HOPM2pYuz+akKg3FoWxvts1tMVmC5oGhABcM2XJV7qBmIApmWXNy - O/v0EBAhyfWxSAM5MtyibIh7rU3wK4oiJWlmjsCZVMAI5qVX5fYiaYDzYrAwvYhNg5xT - B3V2g8WLPeoUL/KmqJR8UObFAb9UBhanF0nCELm0Di+xMpNWQtLJKHoaRRK5SfiEz0U7 - 81Yzs4CYCdz/tTOBcAJB/DXvL98ztW1A8IGfrh/U+Ouj/5g9mNrHBgY+OW1mRWjUopNl - Mz/9zdfdHD6CR4/PGTfunopU0OBS0oaubPvZxvEzBkSqRsUq06x6Z3ZGxRM/PfvpDuo7 - kOfmxNeUlB0P0mHMK8os2QkV7K2WxvyMqchMS1QyrQ3ENexOhZFRZVTTbpqib5jAQ3nD - O73XCojXFZ0ifhlNcs3IJkI6XtKjiV8UFw+yEUU2gG/aPYEo6Ll5e17dvz9gzFW6DO7B - wRXjH3+cHS+c2xyvKNTLMbVRyq+cTp3eLK73zYkv6d8AP5uhhxNj/bsMbxkoqZ43WPVW - Q0iyiP4YFlvEqmRIopSxILssnMUCpkWWLKyQ22w4TDr7wc0lRXQpE/Lv05NKwQFzU+7i - ZEdBgSHKeD9RPwQfutaPC205D/+s3H94H+XLn775UnUmcU/Gi8bk1+8Z/zSluv7+MwPS - 7npyzHrqEzj0TSGjMFTU+YjEfTt2f4txnWW3hSZ6RKFuiK5GN51bRC/iNhja0Fa2zbjV - tNW8B+0xaYag4cYq8xkjU86+yVJr2V1oF97N7jGzqSHWYjSbQM8xKuRqJ68iAtpkh5lh - EW43Gy3tip+aQE5/5J1OONwKvvOLlnhREfxZxVmxJMX1yHhRxJptAe9TCeH54WOWxHRG - IzKZ5ujMZguL8Rw43W6BrTnN8lNiwkOKiQNmPtlMwnkSmoINS0KM4lZqQT/YZcd5mKa9 - 3YGHJ5c91fxUIOzKTtNEsjXsQJXQ+A44rZjs6cLjwl9eFqYdlvAvKCVeC/9EKjPqRhv9 - EJFl4nVdKfXfqy65irSwCQRX990TYCnpTY1CscQPnikEvt7e9iSVhIUwHP3H3zbc6JE/ - 3veE/IxcIVaHyqgimI4itI95AMUgnOpNcyHNh1AGYSSE4SQPvpDxEF5kx6J25kvUDr87 - IylCo0kZ6g9DWg91zRCyof0YBqFiSAshDIEwAHejVVDXDOl6yT7IPyAG0qaJ2ofWwzPS - HzOUmyFPjmzkwz0NfQTnxddQEThnbaD30VeYXWwB28Z+JoG1UvKC5Bq3ih/NL+Nfli6W - XpNNk+fIP1FIFE2KNqVBOU95WTVetUPdrD6rSdOcgDcSfEJwHpxGs8FjRCEN3HUIcZdl - CkQUZdJC14uVBJ6hyvKacXcOTR/ScF9TQ+PMKZOgBQUBrkQDnI//oSsElTT4dMh/QQDx - iJxJtCI76IRB+HoYTv9HwCSIogI0EMVQOaoUz+QPQyPgvPod4v8FjIGz/nejsWgcqkET - EDjUybmboRBKIUQhpKcPsgCWu9BjEHZAoNFM/AhaAmE9hCchMH25vVA6ih/pZPjYa3gJ - ssEJBDnjvstgdVtkcvcHoHodfsb9qeXLY+DUVaLfYWunEkkHyfAO/Cyaitz4RbCWl8J/ - DYTwtkPh+9z18GgvmgehGQItxhjv7XRF3K/jDOSHcyBuHEAuBr/q/kNupvtSbheFO90n - g10MJL9wQSmmdp9wPuP+uXO6+3UI+5OP9oWhxavuvc773JtdXXhbp3sTUYA73Y8nk4VO - +Omr7jnhVvfUXPH5iNYuan+nuwiej43J3QWFXnfUedGdHeziMZQznSPcabm/cqfCD6GZ - B17qj2ndDudmd3945HJWBPtDOIb34e0oDW/v9A9zvwZZGO6hoeHC1i78k0NDQrn+Lrw0 - VjAk1BoeEvSHR7j94cpgEPJj3+JWcfdwg7gIlw4H92Ex5OycgdfxGl7FK3gZD2ZOF36p - s9QtOYb3o1KAZf8hXsKzXfhlqGSO4QNi5YEjPMNTPOINXYkvwPmOEaj/+w8D+WEEmVcl - Yk7ShQ/AWSlSdSDmBhbBCBySEGuAImHLAMgOCJjCPIWGwQnpR7skaLWpqdRSqhuoLaos - /39F9eKTm7G4/P5wZMHOjlY4o9uxz1kLx6Ehk3DW3mwKYvX/czUuhAYNZenpIFYPNc2b - NU083u2raKiHU94djzTBcfvmyR7PwVnzes+uB+onT5lBzhdPauiY52so75jlK/ccbBJ/ - R6pveTyNPG7ylR9E0yruqjk4LdZQ3tkUa6qA49u1hyaXLai77Vvr+761oOwHvlVGXraA - fGuy+Lt/+VYdeTyZfKuOfKuOfGtybLL4LQJBxczqsgcagTrhCDgcwQ5Vdwy9c3wN/KdD - bXkX3kXOhS9E/wtTaNH0CmVuZHN0cmVhbQplbmRvYmoKMjUgMCBvYmoKOTc4NwplbmRv - YmoKMjYgMCBvYmoKPDwgL1R5cGUgL0ZvbnREZXNjcmlwdG9yIC9Bc2NlbnQgNzcwIC9D - YXBIZWlnaHQgNzI3IC9EZXNjZW50IC0yMzAgL0ZsYWdzIDMyCi9Gb250QkJveCBbLTk1 - MSAtNDgxIDE0NDUgMTEyMl0gL0ZvbnROYW1lIC9GRFhXUUkrSGVsdmV0aWNhIC9JdGFs - aWNBbmdsZSAwCi9TdGVtViA5OCAvTWF4V2lkdGggMTUwMCAvU3RlbUggODUgL1hIZWln - aHQgNTMxIC9Gb250RmlsZTIgMjQgMCBSID4+CmVuZG9iagoyNyAwIG9iagpbIDI3OCAw - IDAgMCAwIDAgMCAwIDAgMCAwIDAgMjc4IDAgMCAwIDU1NiA1NTYgNTU2IDU1NiA1NTYg - NTU2IDU1NiA1NTYgNTU2CjAgMCAwIDAgMCAwIDAgMCAwIDAgNzIyIDcyMiA2NjcgNjEx - IDAgMCAyNzggMCAwIDAgMCAwIDc3OCA2NjcgMCA3MjIgNjY3IDAKMCAwIDAgMCAwIDAg - Mjc4IDAgMjc4IDAgMCAwIDU1NiAwIDUwMCA1NTYgNTU2IDAgNTU2IDAgMjIyIDAgNTAw - IDIyMiA4MzMgNTU2CjU1NiA1NTYgMCAzMzMgNTAwIDI3OCA1NTYgMCAwIDAgNTAwIF0K - ZW5kb2JqCjkgMCBvYmoKPDwgL1R5cGUgL0ZvbnQgL1N1YnR5cGUgL1RydWVUeXBlIC9C - YXNlRm9udCAvRkRYV1FJK0hlbHZldGljYSAvRm9udERlc2NyaXB0b3IKMjYgMCBSIC9X - aWR0aHMgMjcgMCBSIC9GaXJzdENoYXIgMzIgL0xhc3RDaGFyIDEyMSAvRW5jb2Rpbmcg - L01hY1JvbWFuRW5jb2RpbmcKPj4KZW5kb2JqCjEgMCBvYmoKPDwgL1RpdGxlIChVbnRp - dGxlZCkgL0F1dGhvciAoSm9obiBEZW5uaXMpIC9DcmVhdG9yIChPbW5pR3JhZmZsZSBQ - cm9mZXNzaW9uYWwpCi9Qcm9kdWNlciAoTWFjIE9TIFggMTAuNS44IFF1YXJ0eiBQREZD - b250ZXh0KSAvQ3JlYXRpb25EYXRlIChEOjIwMDkxMjIxMTcwODU0WjAwJzAwJykKL01v - ZERhdGUgKEQ6MjAwOTEyMjExNzA4NTRaMDAnMDAnKSA+PgplbmRvYmoKeHJlZgowIDI4 - CjAwMDAwMDAwMDAgNjU1MzUgZiAKMDAwMDAxODE4NCAwMDAwMCBuIAowMDAwMDAxNTY1 - IDAwMDAwIG4gCjAwMDAwMDc0NjQgMDAwMDAgbiAKMDAwMDAwMDAyMiAwMDAwMCBuIAow - MDAwMDAxNTQ1IDAwMDAwIG4gCjAwMDAwMDE2NjkgMDAwMDAgbiAKMDAwMDAwNDA1NSAw - MDAwMCBuIAowMDAwMDAzMTA0IDAwMDAwIG4gCjAwMDAwMTgwMTAgMDAwMDAgbiAKMDAw - MDAwMTgyOSAwMDAwMCBuIAowMDAwMDAxOTc4IDAwMDAwIG4gCjAwMDAwMDIxMjcgMDAw - MDAgbiAKMDAwMDAwNTEyMSAwMDAwMCBuIAowMDAwMDA2MTUxIDAwMDAwIG4gCjAwMDAw - MDQwOTEgMDAwMDAgbiAKMDAwMDAwMjI3NiAwMDAwMCBuIAowMDAwMDAzMDg0IDAwMDAw - IG4gCjAwMDAwMDMxNDAgMDAwMDAgbiAKMDAwMDAwNDAzNSAwMDAwMCBuIAowMDAwMDA1 - MTAxIDAwMDAwIG4gCjAwMDAwMDYxMzEgMDAwMDAgbiAKMDAwMDAwNzQ0MyAwMDAwMCBu - IAowMDAwMDA3NTQ3IDAwMDAwIG4gCjAwMDAwMDc1OTcgMDAwMDAgbiAKMDAwMDAxNzQ3 - NSAwMDAwMCBuIAowMDAwMDE3NDk2IDAwMDAwIG4gCjAwMDAwMTc3MzIgMDAwMDAgbiAK - dHJhaWxlcgo8PCAvU2l6ZSAyOCAvUm9vdCAyMyAwIFIgL0luZm8gMSAwIFIgL0lEIFsg - PDVjNzUxYWU1YWZkOWY0NWM3Mjk2NzYzNGE2OWRhNjkwPgo8NWM3NTFhZTVhZmQ5ZjQ1 - YzcyOTY3NjM0YTY5ZGE2OTA+IF0gPj4Kc3RhcnR4cmVmCjE4NDAyCiUlRU9GCjEgMCBv - YmoKPDwvQXV0aG9yIChKb2huIERlbm5pcykvQ3JlYXRpb25EYXRlIChEOjIwMDkxMjE2 - MTUzNTAwWikvQ3JlYXRvciAoT21uaUdyYWZmbGUgUHJvZmVzc2lvbmFsIDUuMS4xKS9N - b2REYXRlIChEOjIwMDkxMjIxMTcwODAwWikvUHJvZHVjZXIgKE1hYyBPUyBYIDEwLjUu - OCBRdWFydHogUERGQ29udGV4dCkvVGl0bGUgKGRvZi1yZWFyci5ncmFmZmxlKT4+CmVu - ZG9iagp4cmVmCjEgMQowMDAwMDE5MTIwIDAwMDAwIG4gCnRyYWlsZXIKPDwvSUQgWzw1 - Yzc1MWFlNWFmZDlmNDVjNzI5Njc2MzRhNjlkYTY5MD4gPDVjNzUxYWU1YWZkOWY0NWM3 - Mjk2NzYzNGE2OWRhNjkwPl0gL0luZm8gMSAwIFIgL1ByZXYgMTg0MDIgL1Jvb3QgMjMg - MCBSIC9TaXplIDI4Pj4Kc3RhcnR4cmVmCjE5MzM0CiUlRU9GCg== - - QuickLookThumbnail - - TU0AKgAAEb6AP+BACCQWDQeEPx6O95gAEgB7PIChILAwCPh6PgEAwEQiPR+QSGRACFuq - HA59vKJRSLSOXS+YQV+O91PMGhICu53PsHBYIzN6AgIy2Y0WjUekACBP+ktxfKxiNd0t - 16hcymooABnrZoPsQkYhC2O0myQd1KA9JgDDwVNxqAAwVgGt9juQODkWhECWW+QR6NhW - KxnBgUhV3PqvjkJtBeMkKDwkCQGX3KZWXUukuZuOILBh7LBZOAbE4iXNhMN4CQnD0PZa - yO9hr5yDkbAXQaLSB4CO5zPwJBqia6jPRzOJ9hYLPNpMZwgUYjEHr1ZOEhloehHhdns5 - jtd3vd/weHxcLuePzef0en1X2ltJqGuCP2Rt9vPUQiIFzBxuJ8h0PAOmAAgAHYWhYLqY - GkVpWn+b5wJgdZ6nqCoFvyl4BCGsLnpGZhqGmYiinicJwgeD4Ppgdx0HSCQMAumAdIIG - wWBapB+G4ZxmH4DgYBQCyxu89pqA0FgWKYkRaFoBomCYhqXl4XgOCIIhyJgapqDHApCJ - gaJHkgFRsmymBln6foagGAaYGwMQwhaGwbJGY0OlVIaYHqXRdAWIwjJge5jmOBIchymA - pGoagfRko51GcXx0gwGIGHQZ5sngBIBHobJzH0GIbg4bZqgAJgqBxHyiyBIUiJHI8kyW - mEnShKUqStLEtS5L0wJfMUyTNNE1TZNyRTgac5BZOk7TxPSXz5P1AUFQlDRmo58GwZBl - gADwSH2cR2AgEYLgOvZ/HKbJtn+E4ehaCyk1LIcipDVMlSYl1WyjKaXyrK4WSyl8ty7L - 8wzHMszpfNM1zbN84zml86zvPM9z7P9ApfQdC0Oj58HwfgEAQvbhH5i4CY0l91VPI0kX - fVkn3nWF73yl191rf1c4Cl2B17g1g4Ql2FWNhtlYgl2JWchFElYTBjgQGC9A6EoFgKBY - EnUdoCgSeptmydwDBeFQInieJ/H8AQIAUd5zgeEoGmmZxzAeDYMAUBgFnqcx4BcsANVH - kV2JBd1VyblFX3rWN8VnflbJdXGAV3glfJDYFhWJhdjpdZOH2ZidnoMcRkFkYh1AYBh6 - HoCgLgYAwKgUcxzH+FASgsfx2HCcR0HcjQGqUBwIHodYHhmEp5mubZ7AKAwCAkDwUAyC - ACJ6iuNqUgT3VNvKP73eCR3lv6XXtWV9Vpftb3/XWBV5gtf4PYeE2LhlkYdZeI2bijLY - tjGQNcpZpmoUqCekjxuG4cgJASAcJgN4bw5gRAiA0SAdo7B4gSAmA4AIAQNoFCETAaYw - hhD/JUTAcQ5RygeA2BtAIHQOgtBeC8kY0EOjmKKOOAgHYDkwREOFEqJiXgZIIDF+B6yj - Hlh4QcX8QRjRDBdEUGER4SAdJGPqJgz4nDJigGWKQCYqEjGdFcpYM4tQ/i5F0mEPovEF - HfGMaMZRoRnH3GmE8awXlLGRG8accYAAkBvHWA4IiQDpj0LGPiYx+hfkAAqQUYZCSFIL - GCQxHhySLj4LEXsjx+SRCFJMKklQOSXJBEwfSTheRnGgEWUANZRSJjCPQbgshWC8AGCk - HQHgJgUAmPwdo/AFgPHiOseQEHkj7giOod4CQPgOHiPuWo8R2S0AAOsdY+wHgeAoPsdo - 9VKjzH0AMBIGwarnkRIYnQ7ooDJiuM6S4HI6g3YyAgaU6Y4jTApO2NYLZ4QEG8LaegJZ - 7BInxIIBUpJCj0HEM4bRxRmjAGkAUHIPB4iwGgBsLAOwSATAgOgXgwh0gLAi0oCZfxys - WGsOAC4SgfgaHoOMa48R7jxG8PEBAJQVArAwBUEQPAWgam2eprw/hsU5m+OengNKfJtB - tLoCBIKbv9G4MSpAsqlIUAWFupyUQiAGqlPyqhfUajIGOAAFgPTJFkpqdke1YapAGrCP - YZdZxmVpAlWucoK63JmZkSOSI/BgV1m+D2vFQp0jSnlPYEtYxt2BDlYOsdVbDHmq+ZUf - Niwz2NAvY9zwDIjgwnKchdBRR1WZDhZsE9nbPBLtAA20RCGLD4UINQOdqRu2rB3a0VNr - 7D2xPBYkyggrbCutwBO3QuLeAFt8UlCI9ZABfGDcUNtxw/XJKMOK5g7LnARugCG6Vsrq - HatpYexY+Y3jIrWBIFN3wCXhureOql1yQDTGaMsfwxhZFMH+gJARHxxgZAU8ZTxLxyjH - AgBofAEb3XwI9LUboERxAkBKO4/TVwIASAQ/sgl8RwjcA+OMEgSiYDyG+N8B102EyLAX - Jgl4LiCBJh2UYhRGR5DUGeOgEQOQZAWIuO8egBChvNL4PgeA7B5sYAiBQ4L+iBlIGmMo - YoKRniFJGUsXoFgThGCkKkmAvxMA0B+AE+RIh/gQA8LIAw3wohRGkTAVozgdAjBkzLAD - +ilDNFiCcaoTxBEwHiLcW4DwkhJJgPoXwvgDSTJgDZQgWsSkxJmNwYIvxyApB4BgaQ5A - DgQG8MEbwAh+jUG4BIIoLgEteHuPUAYCh3DRGUPmmIJQGDzHlLgewCQGj1HKPsGYNQAD - HG8A0FYFx/j6HsOYaA9QS3mI+NMZgwwUjjD084j0EXnSqBmEYJ4qsoigBeD8B49dkEI2 - UA0EAsR5jqy/mEl4nhnBSA8DIBxCEi3xG2LEBw2wnh4zlnTO2eCX56z5n4l+gBqaCcut - AhY9B3DUGuOYE4NQaD4G2MwewDAJjwHeP0CIBR9j9ACPsAQEgRATmg8EAQ3xmDfAiCkC - SY59jrHyBoEYER7j0H+PwfI6wCAMAoAMA4DdgEeGmM8YQKh7BukOUy+N7iCi8HeD8IwT - RV5RFQCoIAHR28/AB0EpQAAFAkFiOEem3yYCQGcGsDAMocZq6jmof46hYj1HCE/nxL85 - 51zvnnPefYK750DoO8mWMglHGmNAYIKQFBjvjIfB5BYIi7G0EcIwSxWkwGALEEwPgUjl - 8D2LwIBwTCwGcP3rRLxBjOEIBAGII+x3wKXsoe4sRvDnCeGHePbt6Eu3t3LP/dd+93yT - 3kow0xpDABWB4LnQvBgBv/1EXQxwnhGCSK7xgugRBABsOH4Ho7/gCBQK8XoBPNkuDqM4 - ToDAZRtIHgD4RAgCubHaE8K/rd59w3v3Ml2+t+e2i/7gotp0hvnJcLkXIggihHEuJgFe - FeCGCmCiFIzEFeCO+yJGFgFeD6CkCiDUJGFWj4GKCeCc/U7e3q7i3w/e9o/kZC/oJitO - KMtWGmBEBGIeJesCHeBMBK1kJeG0G2GOnsAqJgsCH1BaBUJHBiG2bKBIhkf6A+gAhkgI - A+hiJe3OAABq7tA+3RBDCbChCijC5vClCrCsO7CpCvC1C2LLCzC5C/DAMvCfDDDJDLBA - wcKKPcCQL8JGdAH4c8xsJEHUHSAiAsAuHeROHaAcgcHkJGsCHqGAA4EqAQ1YJGHwHAHA - AQBABAJgHmG8G+HwBEBCJiTGAqfCJcB2IIChCYLKJm2EHIAmdWAsAgI6dAAAc8JIHpDe - I4rkIwAAI3DiIPC8IQPcBESGHuJGicAQOeHwJgGEGCBEB8B+G8JgGOGMA6ByB0HGJGUI - H2E8BYHWJgHenoAiCUwsJeHOFkFmAyCcCaJgHanSAmiK9m33E4KSJmHEG4HapQG0HQHs - HAHKA6BkAMGuH2CICeAyHEGGF0FsHWA4BmA2AYAaAMH0H4AaAieSAEHsHyAuBoBwA8VH - FlDGJjFrFvFyGfF2BjF6JfF/GDGHGLGPGTGWJFGbGfGiJfGmFtGrGuJdGzG3G7G/HDHG - 7pHK9qMoH4USLqAqBIA6HkHOHGAiBEM81WA4ASH4G6GyGwH0AUH+HoAYBKAgHUHYI2Hq - HwAWA+ogAqA0L0JBFmIPIqBZFwJFF1F5F9GBGFGIJfGNGRGVGYGpGdGhGlGpGsJhJdG5 - G8JfHAGlHExFJo/jDBK8INLBLEJDLJI1LNI9LSJdLXJFLdLhJOJdJTJXLrG1LvJjL3Jn - A7JrDDMCILMHIvIzI2JdI7LRJBLZJGJDJLLjJRLnJYJHLtJhLzJlL7M1L/C/M6IJM/LH - IxLLI5LPI/LVJDLbJJLfJNLlJVLpGxMrNiJdL1L5HJNtC5NwAAfuGg7EJCsCG6ns9FBU - G3O0BLO4JdOzO3D8G2G4KYAIwNB+JEGwf6AnCGJeHEsCd4BLBssCr8JgAEIIQLJtCvOn - DNQBCtP/QDQJCbQHQLQQvJQPQTQYtjQXQbQgn5QfQjQokJQmAAHwlOFoGs5QAsHkHoAW - BiByBSHaGOGEH4BYB8BRK4i4HwHMF8GAGwA6BGAgHGHQOcLwHSGOnEB6BkMmi6HMGmGG - GYHKHmH2HoAEBqCGCIA0H2GuGEGyASCEq5OlImNeGQEUFSHgCaBsAGHCH0BOCIBwAaGu - GQGyH2AoBOBkBIOwi4HeGwFwGeHsBOAsHgGyHoBJTEA8GOFAFMBGDAC0gSi6G4NiHYrI - FyGQH8DKDGCUAkHcGYF0HCAoNXPXC1QuY6HpFeAYJmHMJ2N+AsHudQAKAsOBFii8HwHf - U6H3U/FS5ix+jCHwHVU6ImA0ASHlU6AAAcA0IrSrDRQrV+9tQvWBWGKMYvFfVM9vV8KQ - HeGQGKG0HgHCGuAMBsCwCEBIAQWiFwFoHsBuCoBkAilMF8FwHFFCAgAWBZTWPMHeGcFM - GYACA6AQAarxPWHwGmFwF0HkAmAWHOAGBlW9TaO7WYGQG0HUGgGuBCCuC4LyAIlMGGFY - GUALUABxRcGGFAF0H0ByBcA+BfXSL6L+GmHQAoAeH4HYHwH6HgHcHyAgJ8AAHqH6H+H2 - gYH0AXX0H2HmHqH0HoHaHUH4AyBoCEBlUEJDQeHeG4bkHEGMGmA8CQCUMkH4F8EiDeGI - BOD2D8CQA8n8GcGI16A+BMA+BSBQNaPGHUc0HIA8BUAKHsBIBbPXJwG4GEFsGUHgAYpi - VCsvYCG4GuGgFMFGGWBsCyDKCQLyH4HMGczCBdaCAIHMGQFYFWGwAYCMBwBVbBbELKlM - FwGIHYAiAGHIG6HEHiAMBMBWBEH6HuG8HYHyAqAwAsW8AKAeAMH2HuH0HqHgH4AgBYBo - XPIiI/aJWaG8AgAcQobCouA9U2HpKkx2AYAyXQM0HQPkHkHyBRY6PFXWFMMcCkB4XQHs - HcH4OQHkGwGuH4H+HQGUHuBfW9buO1YEHSA0qHdcAcAaAybtFMMmsyAAAsAVHeHQHyH5 - ehelUqPDVQHMHuASA0drF6YyAIY7gQfoJjWFWJggPDgfgjgoutStgrgxCngvgzg4h5gn - g7hAKPg/hDhJDOPHViHEHcH610H+QAABJ2bsn5RcGmGSG2HeBCBuB9IgkJhRhUH0HQHY - AGAgASAWBEBJfSPEIwI0I4OIGwG+H4AmBCAkKGKIxPFPVfDFWUNdaKGwYsG0FiHeBQCA - AyAqBRTYn5bIFYGTDwAgBmCoB7iQh/i4HoH6HWGSGyAOBeBCAwBKBbcqPGHEGaGEG4HW - H4ASAOH0G+HUHuAokuHYGKGaAgBaAuH4AiBuCcBxaE/ni0MtaKGnTQASG8HQH+PyAoBb - RXjQGQFMGCHOH8AwB2C5jgkJk8HhP0GyHAH4A6Ag1temPGOIHIG+HEHfZWmSHqARZEAI - dfDogAAOH+AGA1K2h7g2MrUwH45iI6IxmsI5WRVObGHeHwAaAqAziwh5mrmvgVDcc9d3 - V7hLnaPXhHndnjIlk5nlnqPZmmMphQHlD2H4HmHeHsH3hhnXVPSDhrhvhzh3VgHUHFn2 - A1JwHOH4H0AOWtjiO7gXFeZAHoGmGQGoAmBhkzoGIVm+KFnJd5nwL7i4G+GmGAGUAoB3 - jGA5jNYAkNjTjWABjbjfoqPXi4HgHoGoFWGgA8CwBmAwA9j8PHaKGaGiG+HW10HkASBi - CKAgGeFwGmHwAcBWBeBGrIHGGqHAHYHoAgB+DACFk07xnoMpk8HoAeAeHgHTUyAIAzlP - pmkLbJlXlblfljp0PVk9kYAuHkHGxmH2H8BJl6O/VQHWHGHIHGAWukUgGyAaAwAWHmHy - QAHeHcAwBaBOAIHsHrmsApV3oGI9ngIJnNFZmznOqpVQHPm/nDnHm4PTtMY0xPFXtFCu - ICAAAA4BAAADAAAAAQBdAAABAQADAAAAAQBKAAABAgADAAAAAwAAEmwBAwADAAAAAQAF - AAABBgADAAAAAQACAAABEQAEAAAAAQAAAAgBEgADAAAAAQABAAABFQADAAAAAQADAAAB - FgADAAAAAQHVAAABFwAEAAAAAQAAEbUBHAADAAAAAQABAAABPQADAAAAAQACAAABUwAD - AAAAAwAAEnKHcwAHAAAD7AAAEngAAAAAAAgACAAIAAEAAQABAAAD7GFwcGwCAAAAbW50 - clJHQiBYWVogB9kADAAVAAgAIwAQYWNzcEFQUEwAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAPbWAAEAAAAA0y1hcHBsQSPmFG4yzsaopHU40p1xfwAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAOclhZWgAAASwAAAAUZ1hZWgAAAUAAAAAUYlhZWgAAAVQAAAAU - d3RwdAAAAWgAAAAUY2hhZAAAAXwAAAAsclRSQwAAAagAAAAOZ1RSQwAAAbgAAAAOYlRS - QwAAAcgAAAAOdmNndAAAAdgAAAAwbmRpbgAAAggAAAA4ZGVzYwAAAkAAAABkZHNjbQAA - AqQAAAD6bW1vZAAAA6AAAAAoY3BydAAAA8gAAAAkWFlaIAAAAAAAAHhsAAA/5wAAAjdY - WVogAAAAAAAAV6MAAKs6AAAWHVhZWiAAAAAAAAAmyAAAFPQAALrRWFlaIAAAAAAAAPPY - AAEAAAABFghzZjMyAAAAAAABC7cAAAWW///zVwAABykAAP3X///7t////aYAAAPaAADA - 9mN1cnYAAAAAAAAAAQHNAABjdXJ2AAAAAAAAAAEBzQAAY3VydgAAAAAAAAABAc0AAHZj - Z3QAAAAAAAAAAQAA0XQAAAAAAAEAAAAA0XQAAAAAAAEAAAAA0XQAAAAAAAEAAG5kaW4A - AAAAAAAAMAAAo8AAAFfAAABKwAAAnIAAACaXAAATWwAAUEAAAFRAAAIzMwACMzMAAjMz - ZGVzYwAAAAAAAAAKQ2luZW1hIEhEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAG1s - dWMAAAAAAAAAEgAAAAxuYk5PAAAAEgAAAOhwdFBUAAAAEgAAAOhzdlNFAAAAEgAAAOhm - aUZJAAAAEgAAAOhkYURLAAAAEgAAAOh6aENOAAAAEgAAAOhmckZSAAAAEgAAAOhqYUpQ - AAAAEgAAAOhlblVTAAAAEgAAAOhwbFBMAAAAEgAAAOhwdEJSAAAAEgAAAOhlc0VTAAAA - EgAAAOh6aFRXAAAAEgAAAOhydVJVAAAAEgAAAOhrb0tSAAAAEgAAAOhkZURFAAAAEgAA - AOhubE5MAAAAEgAAAOhpdElUAAAAEgAAAOgAQwBpAG4AZQBtAGEAIABIAEQAAG1tb2QA - AAAAAAAGEAAAkiECAggZxd4BAAAAAAAAAAAAAAAAAAAAAAB0ZXh0AAAAAENvcHlyaWdo - dCBBcHBsZSwgSW5jLiwgMjAwOQA= - - ReadOnly - NO - RowAlign - 1 - RowSpacing - 36 - SheetTitle - Canvas 1 - SmartAlignmentGuidesActive - YES - SmartDistanceGuidesActive - YES - UniqueID - 1 - UseEntirePage - - VPages - 1 - WindowInfo - - CurrentSheet - 0 - ExpandedCanvases - - FitInWindow - - Frame - {{193, 584}, {716, 841}} - ListView - - OutlineWidth - 142 - RightSidebar - - Sidebar - - SidebarWidth - 138 - VisibleRegion - {{1, 1}, {574, 732}} - Zoom - 1 - ZoomValues - - - Canvas 1 - 0.0 - 1 - - - - saveQuickLookFiles - YES - - diff --git a/src/externals/pio1/doc/images/dof-rearr.png b/src/externals/pio1/doc/images/dof-rearr.png deleted file mode 100644 index 182cb97b110..00000000000 Binary files a/src/externals/pio1/doc/images/dof-rearr.png and /dev/null differ diff --git a/src/externals/pio1/doc/images/dof.eps b/src/externals/pio1/doc/images/dof.eps deleted file mode 100644 index 6310571f83f..00000000000 --- a/src/externals/pio1/doc/images/dof.eps +++ /dev/null @@ -1,1283 +0,0 @@ -%!PS-Adobe-3.0 EPSF-3.0 -%%HiResBoundingBox: 0.000000 0.000000 504.000000 256.000000 -%APL_DSC_Encoding: UTF8 -%APLProducer: (Version 10.5.8 (Build 9L31a) Quartz PS Context) -%%Title: (Unknown) -%%Creator: (Unknown) -%%CreationDate: (Unknown) -%%For: (Unknown) -%%DocumentData: Clean7Bit -%%LanguageLevel: 2 -%%Pages: 1 -%%BoundingBox: 0 0 504 256 -%%EndComments -%%BeginProlog -%%BeginFile: cg-pdf.ps -%%Copyright: Copyright 2000-2004 Apple Computer Incorporated. -%%Copyright: All Rights Reserved. -currentpacking true setpacking -/cg_md 141 dict def -cg_md begin -/L3? languagelevel 3 ge def -/bd{bind def}bind def -/ld{load def}bd -/xs{exch store}bd -/xd{exch def}bd -/cmmtx matrix def -mark -/sc/setcolor -/scs/setcolorspace -/dr/defineresource -/fr/findresource -/T/true -/F/false -/d/setdash -/w/setlinewidth -/J/setlinecap -/j/setlinejoin -/M/setmiterlimit -/i/setflat -/rc/rectclip -/rf/rectfill -/rs/rectstroke -/f/fill -/f*/eofill -/sf/selectfont -/s/show -/xS/xshow -/yS/yshow -/xyS/xyshow -/S/stroke -/m/moveto -/l/lineto -/c/curveto -/h/closepath -/n/newpath -/q/gsave -/Q/grestore -counttomark 2 idiv -{ld}repeat pop -/SC{ - /ColorSpace fr scs -}bd -/sopr /setoverprint where{pop/setoverprint}{/pop}ifelse ld -/soprm /setoverprintmode where{pop/setoverprintmode}{/pop}ifelse ld -/cgmtx matrix def -/sdmtx{cgmtx currentmatrix pop}bd -/CM {cgmtx setmatrix}bd -/cm {cmmtx astore CM concat}bd -/W{clip newpath}bd -/W*{eoclip newpath}bd -statusdict begin product end dup (HP) anchorsearch{ - pop pop pop - true -}{ - pop - (hp) anchorsearch{ - pop pop true - }{ - pop false - }ifelse -}ifelse -{ - { - { - pop pop - (0)dup 0 4 -1 roll put - F charpath - }cshow - } -}{ - {F charpath} -}ifelse -/cply exch bd -/cps {cply stroke}bd -/pgsave 0 def -/bp{/pgsave save store}bd -/ep{pgsave restore showpage}def -/re{4 2 roll m 1 index 0 rlineto 0 exch rlineto neg 0 rlineto h}bd -/scrdict 10 dict def -/scrmtx matrix def -/patarray 0 def -/createpat{patarray 3 1 roll put}bd -/makepat{ -scrmtx astore pop -gsave -initgraphics -CM -patarray exch get -scrmtx -makepattern -grestore -setpattern -}bd -/cg_BeginEPSF{ - userdict save/cg_b4_Inc_state exch put - userdict/cg_endepsf/cg_EndEPSF load put - count userdict/cg_op_count 3 -1 roll put - countdictstack dup array dictstack userdict/cg_dict_array 3 -1 roll put - 3 sub{end}repeat - /showpage {} def - 0 setgray 0 setlinecap 1 setlinewidth 0 setlinejoin - 10 setmiterlimit [] 0 setdash newpath - false setstrokeadjust false setoverprint -}bd -/cg_EndEPSF{ - countdictstack 3 sub { end } repeat - cg_dict_array 3 1 index length 3 sub getinterval - {begin}forall - count userdict/cg_op_count get sub{pop}repeat - userdict/cg_b4_Inc_state get restore - F setpacking -}bd -/cg_biproc{currentfile/RunLengthDecode filter}bd -/cg_aiproc{currentfile/ASCII85Decode filter/RunLengthDecode filter}bd -/ImageDataSource 0 def -L3?{ - /cg_mibiproc{pop pop/ImageDataSource{cg_biproc}def}bd - /cg_miaiproc{pop pop/ImageDataSource{cg_aiproc}def}bd -}{ - /ImageBandMask 0 def - /ImageBandData 0 def - /cg_mibiproc{ - string/ImageBandMask xs - string/ImageBandData xs - /ImageDataSource{[currentfile/RunLengthDecode filter dup ImageBandMask/readstring cvx - /pop cvx dup ImageBandData/readstring cvx/pop cvx]cvx bind}bd - }bd - /cg_miaiproc{ - string/ImageBandMask xs - string/ImageBandData xs - /ImageDataSource{[currentfile/ASCII85Decode filter/RunLengthDecode filter - dup ImageBandMask/readstring cvx - /pop cvx dup ImageBandData/readstring cvx/pop cvx]cvx bind}bd - }bd -}ifelse -/imsave 0 def -/BI{save/imsave xd mark}bd -/EI{imsave restore}bd -/ID{ -counttomark 2 idiv -dup 2 add -dict begin -{def} repeat -pop -/ImageType 1 def -/ImageMatrix[Width 0 0 Height neg 0 Height]def -currentdict dup/ImageMask known{ImageMask}{F}ifelse exch -L3?{ - dup/MaskedImage known - { - pop - << - /ImageType 3 - /InterleaveType 2 - /DataDict currentdict - /MaskDict - << /ImageType 1 - /Width Width - /Height Height - /ImageMatrix ImageMatrix - /BitsPerComponent 1 - /Decode [0 1] - currentdict/Interpolate known - {/Interpolate Interpolate}if - >> - >> - }if -}if -exch -{imagemask}{image}ifelse -end -}bd -/cguidfix{statusdict begin mark version end -{cvr}stopped{cleartomark 0}{exch pop}ifelse -2012 lt{dup findfont dup length dict begin -{1 index/FID ne 2 index/UniqueID ne and -{def} {pop pop} ifelse}forall -currentdict end definefont pop -}{pop}ifelse -}bd -/t_array 0 def -/t_i 0 def -/t_c 1 string def -/x_proc{ - exch t_array t_i get add exch moveto - /t_i t_i 1 add store -}bd -/y_proc{ - t_array t_i get add moveto - /t_i t_i 1 add store -}bd -/xy_proc{ - - t_array t_i 2 copy 1 add get 3 1 roll get - 4 -1 roll add 3 1 roll add moveto - /t_i t_i 2 add store -}bd -/sop 0 def -/cp_proc/x_proc ld -/base_charpath -{ - /t_array xs - /t_i 0 def - { - t_c 0 3 -1 roll put - currentpoint - t_c cply sop - cp_proc - }forall - /t_array 0 def -}bd -/sop/stroke ld -/nop{}def -/xsp/base_charpath ld -/ysp{/cp_proc/y_proc ld base_charpath/cp_proc/x_proc ld}bd -/xysp{/cp_proc/xy_proc ld base_charpath/cp_proc/x_proc ld}bd -/xmp{/sop/nop ld /cp_proc/x_proc ld base_charpath/sop/stroke ld}bd -/ymp{/sop/nop ld /cp_proc/y_proc ld base_charpath/sop/stroke ld}bd -/xymp{/sop/nop ld /cp_proc/xy_proc ld base_charpath/sop/stroke ld}bd -/refnt{ -findfont dup length dict copy dup -/Encoding 4 -1 roll put -definefont pop -}bd -/renmfont{ -findfont dup length dict copy definefont pop -}bd -L3? dup dup{save exch}if -/Range 0 def -/DataSource 0 def -/val 0 def -/nRange 0 def -/mulRange 0 def -/d0 0 def -/r0 0 def -/di 0 def -/ri 0 def -/a0 0 def -/a1 0 def -/r1 0 def -/r2 0 def -/dx 0 def -/Nsteps 0 def -/sh3tp 0 def -/ymax 0 def -/ymin 0 def -/xmax 0 def -/xmin 0 def -/setupFunEval -{ - begin - /nRange Range length 2 idiv store - /mulRange - - [ - 0 1 nRange 1 sub - { - 2 mul/nDim2 xd - Range nDim2 get - Range nDim2 1 add get - 1 index sub - - 255 div - exch - }for - ]store - end -}bd -/FunEval -{ - begin - - nRange mul /val xd - - 0 1 nRange 1 sub - { - dup 2 mul/nDim2 xd - val - add DataSource exch get - mulRange nDim2 get mul - mulRange nDim2 1 add get - add - }for - end -}bd -/max -{ - 2 copy lt - {exch pop}{pop}ifelse -}bd -/sh2 -{ - /Coords load aload pop - 3 index 3 index translate - - 3 -1 roll sub - 3 1 roll exch - sub - 2 copy - dup mul exch dup mul add sqrt - dup - scale - atan - - rotate - - /Function load setupFunEval - - - clippath {pathbbox}stopped {0 0 0 0}if newpath - /ymax xs - /xmax xs - /ymin xs - /xmin xs - currentdict/Extend known - { - /Extend load 0 get - { - 0/Function load FunEval sc - xmin ymin xmin abs ymax ymin sub rectfill - }if - }if - - /Nsteps/Function load/Size get 0 get 1 sub store - /dx 1 Nsteps div store - gsave - /di ymax ymin sub store - /Function load - - 0 1 Nsteps - { - 1 index FunEval sc - 0 ymin dx di rectfill - dx 0 translate - }for - pop - grestore - currentdict/Extend known - { - /Extend load 1 get - { - Nsteps/Function load FunEval sc - 1 ymin xmax 1 sub abs ymax ymin sub rectfill - }if - }if -}bd -/shp -{ - 4 copy - - dup 0 gt{ - 0 exch a1 a0 arc - }{ - pop 0 moveto - }ifelse - dup 0 gt{ - 0 exch a0 a1 arcn - }{ - pop 0 lineto - }ifelse - - fill - - dup 0 gt{ - 0 exch a0 a1 arc - }{ - pop 0 moveto - }ifelse - dup 0 gt{ - 0 exch a1 a0 arcn - }{ - pop 0 lineto - }ifelse - - fill -}bd -/calcmaxs -{ - - xmin dup mul ymin dup mul add sqrt - xmax dup mul ymin dup mul add sqrt - xmin dup mul ymax dup mul add sqrt - xmax dup mul ymax dup mul add sqrt - max max max -}bd -/sh3 -{ - /Coords load aload pop - 5 index 5 index translate - 3 -1 roll 6 -1 roll sub - 3 -1 roll 5 -1 roll sub - 2 copy dup mul exch dup mul add sqrt - /dx xs - 2 copy 0 ne exch 0 ne or - { - - exch atan rotate - }{ - pop pop - }ifelse - - /r2 xs - /r1 xs - /Function load - dup/Size get 0 get 1 sub - /Nsteps xs - setupFunEval - - - - - - dx r2 add r1 lt{ - - 0 - }{ - dx r1 add r2 le - { - 1 - }{ - r1 r2 eq - { - 2 - }{ - 3 - }ifelse - }ifelse - }ifelse - /sh3tp xs - clippath {pathbbox}stopped {0 0 0 0}if - newpath - /ymax xs - /xmax xs - /ymin xs - /xmin xs - - dx dup mul r2 r1 sub dup mul sub dup 0 gt - { - sqrt r2 r1 sub atan - /a0 exch 180 exch sub store - /a1 a0 neg store - }{ - pop - /a0 0 store - /a1 360 store - }ifelse - currentdict/Extend known - { - /Extend load 0 get r1 0 gt and - { - 0/Function load FunEval sc - - - - - { - { - dx 0 r1 360 0 arcn - xmin ymin moveto - xmax ymin lineto - xmax ymax lineto - xmin ymax lineto - xmin ymin lineto - eofill - } - { - r1 0 gt{0 0 r1 0 360 arc fill}if - } - { - - - - - 0 r1 xmin abs r1 add neg r1 shp - } - { - - - r2 r1 gt{ - - 0 r1 - r1 neg r2 r1 sub div dx mul - 0 - shp - }{ - - - - 0 r1 calcmaxs - dup - - r2 add dx mul dx r1 r2 sub sub div - neg - exch 1 index - abs exch sub - shp - }ifelse - } - }sh3tp get exec - }if - }if - - /d0 0 store - /r0 r1 store - /di dx Nsteps div store - /ri r2 r1 sub Nsteps div store - /Function load - 0 1 Nsteps - { - 1 index FunEval sc - d0 di add r0 ri add d0 r0 shp - { - - d0 0 r0 a1 a0 arc - d0 di add 0 r0 ri add a0 a1 arcn - fill - - - d0 0 r0 a0 a1 arc - d0 di add 0 r0 ri add a1 a0 arcn - fill - }pop - - - /d0 d0 di add store - /r0 r0 ri add store - }for - pop - - currentdict/Extend known - { - /Extend load 1 get r2 0 gt and - { - Nsteps/Function load FunEval sc - - - - - { - { - dx 0 r2 0 360 arc fill - } - { - dx 0 r2 360 0 arcn - xmin ymin moveto - xmax ymin lineto - xmax ymax lineto - xmin ymax lineto - xmin ymin lineto - eofill - } - { - - - xmax abs r1 add r1 dx r1 shp - } - { - - r2 r1 gt{ - - - - calcmaxs dup - - r1 add dx mul dx r2 r1 sub sub div - exch 1 index - exch sub - dx r2 - shp - }{ - - r1 neg r2 r1 sub div dx mul - 0 - dx - r2 - shp - }ifelse - } - } - sh3tp get exec - }if - }if -}bd -/sh -{ - begin - /ShadingType load dup dup 2 eq exch 3 eq or - { - gsave - newpath - /ColorSpace load scs - currentdict/BBox known - { - /BBox load aload pop - 2 index sub - 3 index - 3 -1 roll exch sub - exch rectclip - }if - 2 eq - {sh2}{sh3}ifelse - grestore - }{ - - pop - (DEBUG: shading type unimplemented\n)print flush - }ifelse - end -}bd -{restore}if not dup{save exch}if - L3?{ - /sh/shfill ld - /csq/clipsave ld - /csQ/cliprestore ld - }if -{restore}if -end -setpacking -%%EndFile -%%EndProlog -%%BeginSetup -%%EndSetup -%%Page: 1 1 -%%PageBoundingBox: 0 0 504 256 -%%BeginPageSetup -cg_md begin -bp -sdmtx -%RBIBeginFontSubset: Helvetica -%!FontType1-1.0: Helvetica 1.0000.0.0000 - 14 dict begin/FontName /Helvetica def - /PaintType 0 def - /Encoding 256 array 0 1 255{1 index exch/.notdef put}for - dup 33 /P put - dup 34 /E put - dup 35 /space put - dup 36 /zero put - dup 37 /one put - dup 38 /two put - dup 39 /M put - dup 40 /e put - dup 41 /m put - dup 42 /o put - dup 43 /r put - dup 44 /y put - dup 45 /l put - dup 46 /a put - dup 47 /u put - dup 48 /t put - dup 49 /D put - dup 50 /i put - dup 51 /s put - dup 52 /k put - dup 53 /C put - dup 54 /p put - dup 55 /O put - dup 56 /F put - dup 57 /bracketleft put - dup 58 /comma put - dup 59 /four put - dup 60 /five put - dup 61 /bracketright put - dup 62 /three put - dup 63 /six put - dup 64 /seven put - dup 65 /eight put - readonly def - 42/FontType resourcestatus{pop pop false}{true}ifelse - %APLsfntBegin - {currentfile 0(%APLsfntEnd\n)/SubFileDecode filter flushfile}if - /FontType 42 def - /FontMatrix matrix def - /FontBBox[2048 -1947 1 index div -985 2 index div 2961 3 index div 2297 5 -1 roll div]cvx def - /sfnts [< - 74727565000900000000000063767420000000000000009C0000036C6670676D000000000000040800000A0C676C79660000000000000E1400001BD86865616400000000000029EC00000038686865610000000000002A2400000024686D74780000000000002A48000000886C6F63610000000000002AD0000000466D6178700000000000002B1800000020707265700000000000002B38000003CF05C0001005BD00280580001A042F001F0000FFD90000FFDA0000FFD9FE55FFE605C70010FE6DFFF1033B000000B9000000B902FE3F3C00C0008D009B00AF000600A800C00028005E009800C9016A00B9015C00B400D6011E002E0080000400B8004C00CC01FFFFD1006600A400AF007400C2009500B1000C0028006D0015004C008E0125FF7A000C0040004C00620084FFA200240038008600BD0039005E008E00EDFFA9FFB300400052005500AA00AB00C200CB012302B10413FFAEFFE4000800510074008400AA00D1FF4CFFAF0012002C004200500051008400BE012503DAFF680018003B0098009C009F00A100C100EC018201B4FF68FF76FFD0FFE100020018001C00530053007D01B401E103AF0486FF9CFFEAFFFE001F0028002A00520060009300A300AA00AF00AF00C001000145016B0174019301950240028202B404850517FEFD00060029004700470048006F008800B400B900C400F200F901EF02180310037403C5FF35FFF3000B004B004C0052005500650076007600870087008E00AB00BB0106013001430150017D0194019501D3022A025502580277027802E6034E035C037903D3047304B2058C0598060BFEF5FFBBFFC7FFD50017001D005B0072007E009C00C200D000F400FA01030106011C0125013B0142015E015E0180019B02B901A101B9025001C001D002AA01DF01E301EF01FB0205020C0215022B0274029302AB02C202CE03690395039903DF03F5043E050205A105E5062507DBFE62FE89FECEFF3BFFE1FFF800030008002100390042004E005F0061006F00700034007F008E00AD00AD00AF00BD00C400C500C900C900C900E3011C00ED00F800F901000112011A0132014D014D014E014F01660169019E01BA01BA01BE01E301EF01F602000200020902110217021C02530262026D028002D50280031B032A034A035A03AF03AF03C803D603FB03FB04050413041504470449008C046D049A049A04A604A804B204CF0539053E054E055605800589058C036305D105D6067E068E06B206EF06F00728074C076F078C00B400C900C000C10000000000000000000000000004012400AF0032006E0063014401620096014301A10161008A00740064018801EF01700028FF5D037E0347023000AA00BE007B0062009A007D0089035C00A1FFD803AA00D70093006C0000008000A70442001D0597001D00820030002A - 002A002A002A002A40292A292827262524232221201F1E1D1C1B1A191817161514131211100D0C0B0A090807060504030201002C4523466020B02660B004262348482D2C452346236120B02661B004262348482D2C45234660B0206120B04660B004262348482D2C4523462361B0206020B02661B02061B004262348482D2C45234660B0406120B06660B004262348482D2C4523462361B0406020B02661B04061B004262348482D2C0110203C003C2D2C20452320B0CD442320B8015A51582320B08D44235920B0ED51582320B04D44235920B09051582320B00D44235921212D2C20204518684420B001602045B04676688A4560442D2C01B9400000000A2D2C00B9000040000B2D2C2045B00043617D6818B0004360442D2C45B01A234445B01923442D2C2045B00325456164B050515845441B2121592D2C20B0032552582359212D2C69B04061B0008B0C6423648BB8400062600C642364615C58B0036159B002602D2C45B0112BB0172344B0177AE5182D2C45B0112BB01723442D2C45B0112BB017458CB0172344B0177AE5182D2CB002254661658A46B040608B482D2CB0022546608A46B040618C482D2C4B53205C58B002855958B00185592D2C20B0032545B019236A4445B01A23444565234520B00325606A20B009234223688A6A606120B0005258B21A401A4523614459B0005058B219401945236144592D2CB9187E3B210B2D2CB92D412D410B2D2CB93B21187E0B2D2CB93B21E7830B2D2CB92D41D2C00B2D2CB9187EC4E00B2D2C4B525845441B2121592D2C0120B003252349B04060B0206320B000525823B002253823B002256538008A63381B212121212159012D2C456920B00943B0022660B00325B005254961B0805358B21940194523616844B21A401A4523606A44B209191A45652345604259B00943608A103A2D2C01B005251023208AF500B0016023EDEC2D2C01B005251023208AF500B0016123EDEC2D2C01B0062510F500EDEC2D2C20B001600110203C003C2D2C20B001610110203C003C2D2C764520B003254523616818236860442D2C7645B00325452361682318456860442D2C7645B0032545616823452361442D2C4569B014B0324B505821B0205961442DB8002B2C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB8002C2C2020456944B001602DB8002D2CB8002C2A212DB8002E2C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB8002F2C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800302C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800312C2020456944B0016020 - 20457D691844B001602DB800322CB800312A2DB800332C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800342C4B535845441B2121592DB800352C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800362C2020456944B001602DB800372CB800362A212DB800382C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800392C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB8003A2C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB8003B2C2020456944B001602020457D691844B001602DB8003C2CB8003B2A2DB8003D2C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB8003E2C4B535845441B2121592DB8003F2C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800402C2020456944B001602DB800412CB800402A212DB800422C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800432C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800442C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800452C2020456944B001602020457D691844B001602DB800462CB800452A2DB800472C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800482C4B535845441B2121592DB800492C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB8004A2C2020456944B001602DB8004B2CB8004A2A212DB8004C2C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB8004D2C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB8004E2C4B20B0032650585158 - B080441BB04044591B21212045B0C05058B0C0441B2159592DB8004F2C2020456944B001602020457D691844B001602DB800502CB8004F2A2DB800512C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB800522C4B535845441B2121592DB800532C4BB800095058B101018E59B801FF85B800441DB9000900035F5E2DB800542C2020456944B001602DB800552CB800542A212DB800562C2046B003254652582359208A208A49648A204620686164B004254620686164525823658A592F20B00053586920B000545821B040591B6920B000545821B0406559593A2DB800572C2046B00425465258238A592046206A6164B0042546206A61645258238A592FFD2DB800582C4B20B0032650585158B080441BB04044591B21212045B0C05058B0C0441B2159592DB800592C2020456944B001602020457D691844B001602DB8005A2CB800592A2DB8005B2C4B20B003265358B0801BB040598A8A20B0032653582321B0C08A8A1B8A235920B0032653582321B801008A8A1B8A235920B0032653582321B801408A8A1B8A235920B80003265358B0032545B8018050582321B8018023211BB003254523212321591B2159442DB8005C2C4B535845441B2121592D00020042000004D005BD00030007003FB800532BB800082FB800092FB8000810B80000D0B800002FB8000910B80003DCB80004DCB8000010B80007DC00BA0007000000562BBA0002000500562B3031331121112711211142048EB8FCE205BDFA43B8044DFBB30000000100AAFED0018000DA000E002D401600230E0A64080A1017171A07340A640008190F6365182B4E10F44D3CFDED4E456544E6003F4DEDD4ED3130173637363534262723353315140607AA451C0F01026DD66076D10C552D2A070B07DACA77B41500000000020040FFD9041C0598000F001C00714017870501460815350F051C35070D1238036F18380B1E471D1076C418D4EDFDED003FED3FED313043794034001B0D2601251A2609250526160E18280014001228011B081828001006122801170C1528011302152801190A1C280011041C28002B2B2B2B012B2B2B2B2B2B2B2B2B81005D001716111007022120272611343712211236113402232202111417163303407C60577EFEE2FEFE7E693F7601358AA678AD9F932F48AE0598E5B1FECCFEDCBFFEEEE0BB013BF4AF0146FAE5F80152F4013BFED5FEDDDB85CB000000000100C4000002D5059200080023B10801B80133400C0404070C04079605000A47091076C418C4D5FD39003F3FF4CD313013353E013733112311C4C39A268EC003F68A13 - 59A6FA6E03F60000000100400000041E059D002200A6404E3604460457056B1D6E1E7A1E84018702082A085A196B197C197C1CB519050022010F041C0E1921071C19040100051F0F0F22130A351305201F7521220C217F0738166F220E270F811F38222447231076C418D4EDF4ED10F5EDE4003F3CFD3C3FED1112392F1217390111123912393911391239005D31304379401C04190C2511260B120E2800091407280119040D100A280108150A2801002B2B1010012B2B2B2B81005D36123F01363736353426232207060723363736213212151407060F01060706072115214A85C1C0813452967DB9472604B70342750128F6E37946B5896238641A030EFC29B90112706F4B35536B7D938C4B85BB76D0FEF6A3AC7A47654C3631576AAA000000010031FFD9041A059A003100C4401F490C532D6529632D7529752D069626014B082B0A182B140D03F93114350D20B8012340141135107F0D0D311B3524050735310D1896274411B8014A400F0A382E6F021F2920810327023347321076C418D4EDF4ED10F5EDEDF4ED003FED3FED12392FF4FDE610ED10ED111239011112393130437940362F301926000905261D2522260600032800040308300A28011C231F28001A251828010401072800092F0728001E211B280119261B2801002B2B2B2B012B2B2B103C2B2B2B2B818181005D015D0402353316171633323635342623220607351E013332373635342623220706072334373621321615140706071E01151400230116E5BC0C2846B78EACB5A11225131C26166541729865B4452605B2406E0115DBF0472C46717FFEF2F8270117C88B3F719878947601019F03022038906B7478427AA070C8C3B98452331D1FB180CDFEFE000000000200340000042F059C0002000D005C402209020A080D010202BE12060607020601050708030106070A02750B05EF030C070403B80117B501960D08AC0AB80158B3050F470E1076C418D4F5F43CFDE4003F3FF43CFD3C1139390111123911123939872E2B047D10C50F3130011109011121350133113315231102A5FE3501CEFD8C029098D3D301FB0289FD77FE05015EB0038EFC5F9DFEA200000000010042FFDC041C0580002000BB402B4812881F02390C461357136713043A080C0D07000C0A0F0E0E75121313120E0F201213070013170A17350AB80122401A0D7F0E0E1D123A0F04043520C71D0D1107381A6F0038202247211076C418D4EDF5EDC4003FEDED3FFD12392FE4F4ED1112390111123939123939872E2B7D10C5001239011112393931301843794028181F01090226031E00280001001F20051C0728010918072801011F042800061B04280008190A2801002B2B2B012B2B103C103C2B2B8181005D5D131617163332363534262322060727132115210336373633320415140221222427FD12 - 7D4054A09AB7805D852F9C6D02E8FD9F3D322D5069C50112FBFEEDAFFEF310016D9A3B1ECC7C96A44840090303AEFE72261321FEC3CBFECAC5CC00000002004DFFDB0423059E001B002700A94039771A0117082508271958198600870188090747080A212721350E0E1403C707351B052735140D032902811E38116F170A023C0A2431172947281076C418D4FD397C4B52787A2F1810F5EDF4ED003FED3FEDED12392FED11123931304379403425261C200F1600060526200F1E280126152428001C131E2801060003280104031F1021280125162728001D1227280004010728012B2B2B2B01103C2B2B2B2B2B81818181015D005D001615232627262322020336373633320415140223220011103712211236353426232206151416330347BDB21023418497B20A3E5E566AB4010CFEEBC9FEDC417D014C818D7EA674AF9F8D059EF98455305AFEE9FEFC5B2D28E6E4C3FED301310169010BBA0164FADDBF826EC79A9B88B900000001004B0000042F0580000F00534036050B1902380B3A0C390D4A0D550256046202B7070A7C0DB40DC50D030209010D0E3A000F04090C020D0800006F0E083809AC0E1147101076C418D4F4ED10E511123939003F3F3CFD3C391239015D31305D01150602070607060723121336372135042F45E558572D1D2EC744E88997FCE805809D43FEB4C0BB9A63DC019A0196EEADB50000030042FFD7041A059C000B0017003200CB4047560157095A0F5913640165096B0F691377250949107B197623722573277C317C32881987208727882E883198190D47083208250E0225320B3511112C05351E0517352C0D023821B80186B70E38296F2F08381BB80186B514382F3447331076C418D4EDF4ED10F5EDF4ED003FED3FED12392FED39390111123912393130437940352A2E1C201216030D162D1428000C2B0E28010A12082800061D082800041F022801152E1728000D2A172800071C0528010320052801002B2B2B2B012B2B2B2B2B81818181015D005D00363534262322061514163312363534262322061514163300272635343633321615140706071617161514022322243534363702A48680838274966688A5AA8581A3959CFEB52A4FE8D5CEEA44265059335FFEE8D1FEDF7C7A0340855C5086865A6572FD3B87868B90938270A302A02B5080A0E6D99186532F2D293564A0BDFEF9E3D87FB931000002005AFFDA057105E5001D001E00B1403B1B0597019605031F011F0482018705891305530803861D111115063A1D030C3A15091E021E1E190331023B1031111A20093119191F20A1216A66182B2B4EF44DED4E10F64DEDF4ED12392F003F3FED3FED12392F10ED31304379403A001C172513260E251B260B160932000D1410320112110F10071C0932000500033201010204030A180C32000F120C3200081A - 06320104010632012B2B2B2B01103C103C2B2B103C103C2B2B2B2B2B2B815D015D080117232E0123220011101233323736373306070621202726111037362123041E013411C221C5B2D9FEF5F1EFDC733D1EC21A92AFFED7FF00AEE5ACBA01472805E5FEDABB8EA6FECFFEC5FEFEFEBFA95991E89DBD9BCD01AC0145D0E20000000200A50000056305BD000D00180067401F871196120232080B1E0F02001E17080831131A1A0D250E19191AD6217689182B2B4EF44DFD4E10F64DED003FFD3FFD3130437940260116112515260607050704070307020705060A10083201011608320109120B320107140032002B2B012B2B2A2B2B815D2532373637363736351002232111032120171611140702290102D06541744A3B1A0FD9F1FE9FC80253012FA795589BFE86FDAFAA15276F598B53470111012EFB980513D7C2FED1EABDFEB20000000200AF000004ED05BD000B000C004E4024071E04040B031E01000209081E0A0B080C020C0C02000669023B091A0E03082500190D0EB8010BB3219589182B2B4EF44DFD3C4E10F64DF4E41112392F003F3F3CFD3C3F3CED12392FFD313013211521112115211121152101AF042FFC93032BFCD5037CFBC2021F05BDB4FE42AAFE0EAF05BD000000000100AF000004AA05BD000900394018071E040409031E0100020908066B011A0B03082500190A0BB80157B32195DC182B2B4EF44DFD3C4E10F64DE4003F3F3CED12392FFD313013211521112115211123AF03FBFCCC02D1FD2FC705BDB4FE42AFFD6400000100970000061705BD001300CB405944014B03020601090316011903D7010513011C03140B1B0C57015803D401DB03D40BDB0C0A040A040D45028602045102970202290A280D380A380D4702570276020725640D0A0203120301020B0C120306081517171A040405B8019B400D0A1F030B06FD0C0102FD0D1F12B8019BB6130019147670182B4E10F43C4DFDE419F43939F4393918E4FD3C4E10456544E6003F173C3F3C1217394B5279B10D0CB801AAB40201020A0BB801AAB202020387054D2E7AFD047DC487052E7AFD047DC43130005D727101725D71132109012111231134363501230115141615112397011D01A601A3011ABD04FE5DC5FE5A05BE05BDFB2604DAFA4303632DD077FB2904D72D36DD34FC9D000000030050FFD505E805E5000F001B001C008A402C8705C700C701C302C808C90A064308153A0F031B3A07091C021C1C0B1231031A1E18310B191D1ED8216A66182B2B4EF44DED4E10F64DED12392F003F3FED3FED313043794032001A0D26012509250526160E18320014001232011A081832001006123201170C1532011302153201190A1B320011041B32002B2B2B2B012B2B2B2B2B2B2B2B81005D00171611100702212027261110371221001235100023220011141221 - 03049BBB92A7C4FE95FEADC2AD94BE0174011BEBFEF1EBE4FEE0F701150E05E5FAC3FED0FEB7DAFF00E0D8014A012AD40110FAA20179F50103013CFEC7FECFF4FEB1055E000200AF000004F805BD000A001400614035690C6912790C7A12044814581468147A140407081E1110100A010E0F1E0100020A080206120C0409141431041A160F092500191516B8010BB3219589182B2B4EF44DFD3C4E10F64DFD11121739003F3F3CFD3C1012392F3CFD3C015D31305D132132161514062321112300272623211121323635AF0295C4F0D6DEFE32C70380784273FE74018C86A705BDDDC8ACFFFD9304B93A1FFE0372900000010080FE6D020005C700070035401C031C0010041C07120917171A0501580304200700190809F0216C33182B2B4EF43C4DFD3CF43C4E456544E6003F4DFD3FFD31301321152311331521800180D6D6FE8005C793F9CC930001002FFE6D01AF05C70007003E402000070102031C050410001C07120917171A06200201580003190809F0213C7C182B2B4EF43C4DF43CFD4E456544E6003F4DFD3F3CFD3C01113939313013331123352111212FD5D50180FE80FF00063493F8A6000000030052FFDC04470449000F003B003C00DD40382A30010A100B1B0C1C2733481069096A10073908120C09031B320724091D100C1D3B2B022E293BB73B023B322A2512100705081C2722171CB8018A4023171D1F07271D2E0B021D350B3C073C3C1C1407292AA8241A3E1B291C4A0F2738193D3EBC0197002100B9019600182B2B4EF44DEDF4ED4E10F64DE4FDC412392F003F3FED3FED3FEDED1239111217395D1112392EED2EED01111239111739313043794028363715220001192501360F2100181E1B21001620142101212200370221001A1D1721011521172101002B2B2B01103C2B2B2B2B818181005D015D2416333237363D010E010F0106070615013637363534262322070607233E01333217161511141633323637150E0123222726270E012322263534363713010E724E5F59962168326D62315301B43E150C837A8D3B210AA805F7A3BD767517250C1E112A2C265D2A160937CE7C95BDBA978ACF5A2C49A691151C060E0D1C2F67016C082C182D5C534C2A53C69B484898FD971C220303850C06422340486AB58895A41301E40000030048FFDA041A0449001C00240025010C40799708991AA71F03050E020F0514150E120F1514400C401408291A014B0BB603C701C603C71BD808D909D61FD823E817E8230BC711C712025C080521240F9A161D243906070716211D1C070A1D160B2507971CA71CB71CD71C0425160F251C05190A0C07110E270F1D27051A27242E072719192627D421A65D182B2B4EF44DFDE44E10F64DEDD4FD391239391112393912392F5D003F3FED3FED12392F3CFD3C10ED1112393130437940460023 - 040503050205010504061F26111012101310141004060C25221B24260020001D26011E1D09170726000B150E26010D0E231A2126011E0521260108180A26000D100A2600002B2B2B2B01103C2B2B103C2B2B2B2A2B2A8101715D00715D5D00161716171615211E013332373637330E01070607062322001110003301262726232206070102B4D638361210FCEF0590978D543014B1074F3152794152C8FEEA0118E2011F0B284AAD7CA805012304476B55516C4AA2A3C55D36473B912E501C100123010601020142FE26754682B38A01DC000000000200840000013B05BD000300070036401C07E50400010006030A0917171A06010229070300190809AA216242182B2B4EF43C4DC4FD3CC44E456544E6003F3F3C3F4DED3130133311231133152384B7B7B7B7042AFBD605BDCC000000010080000003F805BD000B00A740645902013A08011902010706170657056705790678078705B903C903DA030A05050608080709030284029402A4020302391209090405060504066D12070708080705040305060908050204030A00000403060A07060A061A0D09020A29000B190C0DB22162B9011600182B2B4EF43C4DFD3C3C194E10E618003F3C3C3F3C3F1112173901121739874D2E2B087D10C104872E182B5D057D10C010083C083C3130015D00715D7213331101330901230107112380AD01CEE6FE6601B1E6FEB297AD05BDFCAB01C7FE6FFD62021C8AFE6E0000000100890000013D05BD0003002940150000030A0517171A0102290003190405AA216242182B2B4EF43C4DFD3C4E456544E6003F3F31301333112389B4B405BDFA4300000001008400000625044700260085403B0708070E060F1708170E170F2708270E270F4819560B670B0C23250A1A1D23190A02041725211D171D0D060700061B1C2503130A2817171A112914B80101B21A291DB80101400A00012E25292600192728B8010DB3216242182B2B4EF43C4DFDE410F4EDF4FD4E456544E6003F173C3F3F3C4DEDED111217390111123912393130005D13331536373633321716173E01333217161511231134262322061511231134272623220615112384B240345971804E2C243CA265D84E2ABB6B4D6A99B71A297066A7B4042F984F243D3F244656539C548EFD3702E86B508EA6FD9102BB6D324B9ECFFDC8000003003BFFD90421044E000C0018001900904033980896109916A504A808A610A916B808C808D704E50EE9140C3A08061D18070C1D120B190719191502270F1A1B092715191A1BB80109B321725D182B2B4EF44DED4E10F64DED12392F003F3FED3FED31304379402C001704260B1309260000110226010717092600050D0226010A140C260001100C26000816062601030E0626012B2B2B2B012B2B2B2B2B81005D241235342726232206151416331200111002 - 212200351000330702E085304CBAA59696A3D6011EFCFEF7DDFEFC0112E70674010FA6965E94FCB2ABE403DAFEECFEF4FEFDFEAE012BFC010E01400500020076FE5504250449000E00220074402CA908A717022808201C110E061D15070F060E1D1C0B220E0227181A240A2E102E2129220F1923248721BD5D182B2B4EF43C4DFDE4E44E10F64DED003F3FED3F3FED1139123931304379401C161B00051A260426001B022601051602260101190E260003170626012B2B012B2B2B2B8181005D243635342726232207061514171633013315363736333212111007062322272627112302C6A72546BABB45252546BAFE2EAF36405B7BB6FEB7749A7952303BB479D3D2805CB1BB649A7C57A603B18E49283CFEE9FEFDFEA2965F351E49FDDD00000100890000029204470011004F40262703260D37034704040E0810020E0911090C270805070006110A081A13012E10291100191213B80145B321627E182B2B4EF43C4DFDE44E10E6003F3F4D3FC4FDC411123939011112393130005D1333153E0133321617152E0123220615112389AB15A46B05181D101B108892B4042FB9369B0203BE0302AF72FD980000020042FFD703B6044B002E002F012E408F38099805961299149815982A062824252736214621472447275624572766246726790C790D790E7623742474257426A61EA82C1303000B15052D042E13001A151B171C18152D142E280F0B6908262536250225220D0A042B1318C61C1D1307041D2E9A2B0B2F07090E100207002F212F1A1F18161827173E28260727281A310E1F27103E00272E193031B221A65D182B2B4EF44DEDF4FD394E10F64DFD3910F4FD3911123939392F111239113939003F3FEDED3FEDED111217397131304379404C012D022615251A26210E1F21000926072101032C002100052A0721011D121F21001B14182101200F22210021220E0D08270A21012625090A012D04210006290421001E111C210119161C2101002B2B2B2B103C103C2B103C103C2B012B2B2B2B2B2B2B2B2B81005D5D015D13161716333236353427262F01262726353436333217160723262726232206151417161F011617161514062322262701EF082544A864983D27738F894174DBB9F26B4302AA05263E99666945284E77C24269D9DEEFC70701B701505A3057575B4524161D24222A498198BC8E5A683D32474E40462A19131D2F2C45948FD0D9A002F900010017FFEF0209055A00180052B50D2E0AC00E01B8013F40250416391703060E0A111A17171A0301062900150E150F031F030203FC1619191AFC21677D182B2B4EF44DFD5D39C42F3CFD3C104E456544E6002F3F3F3C4DFD3CED10FDE431301333113315231114171633323637150E012322263511233533A8B6ABAB2615310D1E141F43277E5A9191055AFED593FD4538130B - 01028E0908816702C593000000020080FFE303DE044900170018005E403AB814C81402091308141913191428067703D707070800050E0A00060D0A051D120B180718180B160D2E0A290C0B1A1A01291619191AD2216242182B2B4EF44DED4E10F63C4DFDE41112392F003F3FED3F3F3C391112393130005D015D0111141716333237363511331123370607062322272635112501381A3083BC4425B4AA0223346793E5532D01AF042FFD39523460A85A9D020EFBD19E3D2A5499528902D81A0000020015FE4903E804490018001900CA406E8A158818A71803070617063812481258126707770377078C1498009705981597169717A800A8161048004B154717C915044405C605028705A600A601A705A8170524280518151716010006150C0B0F1D080E19071919161B17171A050001AF171518AF0C8F16191A1BD421677E182B2B194EF44DE418FD3939FD3939194E456544E61812392F003F3F4DFD3932192F183F3C3C3C123939014B5279401215150016166D121717180501016D12000018872E2B107DC418872E2B10087DC418015D71313071015D005D013306030207020623222627351E01333236373E0137013301030321C7268362429C809C26291E2F2A10322F10053E0EFE74CC011F01042F67FE91FEECAEFE66B40608A40D062118089424044EFC980382000001000000000000032DBC1E5F0F3CF500110800000000005F4D8F0000000000C321F6B6F865FC270B9108F9000000090001000000000000000100000629FE2900000C01F865FCED0B910001000000000000000000000000000000220512004202390000023900AA04730040047300C4047300400473003104730034047300420473004D0473004B0473004205C7005A05C700A5055600AF04E300AF06AA009706390050055600AF023900800239002F047300520473004801C700840400008001C7008906AA00840473003B0473007602AA008904000042023900170473008004000015000000340034006600D400FA018602320280031403AA03F404A60536059805DC060E069A0718076E079A07CC08960960098E09FE0A200A9C0B140B860BCC0CAC0CFC0D560DEC0000000100000022006F0009006B0007000200100010005D000007E80A0C00040001B800532BB800492BB8003F2BB800352BB8002B2B4118008001A6009001A600A001A600030069018B0079018B0089018B0099018B00040089018B0099018B00A9018B00B9018BB2040840BA0179001A014A400B041F5414191F180A0B1FD2B80106B49E1FD918E3BB0119000D00E10119B20D0009410A01A0019F0064001F01A50025017A00480028019AB3296C1F60410A01A9007001A9008001A90003008001A9000101A9B21E321FBE012C00250401001F0126001E0401B61FE7312D1FE531B80201B21FC227B8 - 0401B21FC11EB80201400F1FC01D9E1FBF1D671FBE1D671FAB27B80401B21FAA29B80401B61FA91D6C1F931EB8019AB21F921DB80101B21F911DB80101B21F751DB80201B61F6D29961F6431B8019AB21F4C96B802ABB21F391DB80156400B1F3638211F351DE41F2F27B80801400B1F2D1D4C1F2A31CD1F241DB802ABB21F201EB8012540111F1C1D931F3A1D4C1F1E1D45273A1D4527BB01AA019B002A019BB2254A1FBA019B0025017AB349293896B8017BB348283125B8017A403648289629482725294C1F252946272729482756C80784075B07410732072B072807260721071B071408120810080E080C080A08080807B801ACB23F1F06BB01AB003F001F01ABB308060805B801AEB23F1F04BB01AD003F001F01ADB70804080208000814B8FFE0B40000010014B801ABB41000000100B801ABB606100000010006B801ADB300000100B801AD401F04000001000410000001001002000001000200000001000002010802004A00B0018DB806008516763F183F123E113946443E113946443E113946443E113946443E113946443E11394660443E11394660443E11394660442B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B18011DB0964B5358B0AA1D59B0324B5358B0FF1D592B2B2B2B2B2B2B2B182B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B2B74752B2B2B65422B2B4B5279B376706A66456523456023456560234560B08B766818B080622020B16A704565234520B003266062636820B003266165B070236544B06A234420B176664565234520B003266062636820B003266165B066236544B0762344B10066455458B166406544B27640764523614459B36242725D456523456023456560234560B089766818B080622020B172424565234520B003266062636820B003266165B042236544B072234420B1625D4565234520B003266062636820B003266165B05D236544B0622344B1005D455458B15D406544B262406245236144592B2B2B2B456953427374B8019A2045694B20B02853B049515A58B020615944B801A6204569447500 - 00>] def - /CharStrings 34 dict dup begin - /.notdef 0 def -/space 1 def -/comma 2 def -/zero 3 def -/one 4 def -/two 5 def -/three 6 def -/four 7 def -/five 8 def -/six 9 def -/seven 10 def -/eight 11 def -/C 12 def -/D 13 def -/E 14 def -/F 15 def -/M 16 def -/O 17 def -/P 18 def -/bracketleft 19 def -/bracketright 20 def -/a 21 def -/e 22 def -/i 23 def -/k 24 def -/l 25 def -/m 26 def -/o 27 def -/p 28 def -/r 29 def -/s 30 def -/t 31 def -/u 32 def -/y 33 def - end readonly def - currentdict dup/FontName get exch definefont pop end - %APLsfntEnd - 42/FontType resourcestatus{pop pop true}{false}ifelse - {currentfile 0(%APLT1End\n)/SubFileDecode filter flushfile}if - /FontType 1 def - /FontMatrix [ 0.00048828125 0 0 0.00048828125 0 0 ] def - /FontBBox{-1947 -985 2961 2297}def - /UniqueID 4375838 def - currentdict currentfile eexec - 54544758EC884CF30C3CD503CEDBFF3839C47C3C3333173232E3FDBFF439491DB843E1924E63AA7726BBB0485AB56D93D8C0906F647A47162891E73FFC2A9873C4B1EAC5EEBDFFC4D06084FBD84139DF4583C6E259D10699944D1068C9C45667DCCCFB9B7EA01B606435EDCBD273ABAC093D14085CCBAC149BD7382E842CFE0D7FE4FD2EF589A2471F6074A80A8B675C2F7A50D63AC1EF90D787BADD11633CB01CF6EE3B37AAF9078A69AC4740E9B6525D78BBD839551A1CB80DB8682FA5E87591BBD6EE8B946063A2A58D9CA3685AB305495DC5FB5747EB8A9A059C4976C0FE4EEAB1D56FF47F1E9664ED9F4A7DAB763AF92B2F6CF2FA7DEC24710E0B9096E30F772BA7FEA9BDBE496C42ED2CEB58F54E80BDF57CE7B4DB6CCFE7182F43BF93CCA0767AF95D62C5D2C3DC6AE1E6D139F51A2C63432117F1714C5566572EE9967A715420ABDCD1D7BD74F8450B89965FCC81C6ACA565C5F3CCF91D430D1F953E4F1A645300A98DD8C47CD64555F08F422340A85404EAE0D3229C4F9336B9470CACBD6BBF3395104750A915CC6EAAC197668267B8C62D2764C8CD69FD937CA3C924D997A0EDE7964BEB9EA2F92EF70C5E5DA0AA5567765E71F2B911B3C5586B741EEB93F3C73016EC16BFF283758900903D203992EFC8BAFAF13579C602F38C9315F0017FC3864FF8D40C74661F4AC4F359AB40455CCBD4B88F0CECF691E280CF7D8A62C93CD46D55DE6A00104424B5912F5A0A98F495CF09143242D784855AFA13DBBD5906DAFC5B74B21715EA5A8A3C54DF37D56101B82DCFFA54AA75D4470566AA271DD6C21497C1532ED5E562384BA9B8CD79C838B516B8ACB0062F8ACBA46F35F160E814416EA3C57B8C5483D52FD1C8E69AC5E91937BCED341CD39BCC268D497E035CA34F6E413C184979A2AFF6D6FD1B1ACD66008A88C4FC0F2002C5E7FDBC51C24F474294205310A890B74E596CD3CEA7AFF407328765E6AF41E61F71C4FECB3B508E6169109175F4B9DA036366FCE43A0809473E6A929FFEF506447DA908B284A257ADBC95B7CF5E10FF6A658C2A6A1877EAA4429874AAFB39D91F1E7B13918CCFE2AA3242AA99FADDA4E178ADC6A74A398CE168FE87E33913763CE997CDD4FF8BAE93CCB32AF683685CFC7681EBCC77538911E2969EE7B9F6F8F178C3B8072704B47010244255E9EF3A2ED4067AB6F96BE39189A5268FD7BAA6CFB4F2E57F409A806573F26B0889800EA2BBACB62C980C4A440756393BBB2DF2EBAD78DFCFD8B6231874CEB45A090985DDCE6B79CD29ABE4377898146A20F585C04B86E14ED74D547B0EAB4AD91F1F9219FB3C91328FC9EE40A4B020389D83B54BCA363C18CE90AB50CF41C906B682ACC7302279E67CC75D8E32D54DEC622C2225AEE1965F1E57881684A82D259FE2EAD723712202F95EFBC355CE7103416378B405 - 6FD36999B5517B8E111473A80597C18F2B8F090A95D9C84B46471BF24287A19DADE8DEB46C8A7010CB7A1C7746E0F0857FECCE2FAE57FA5B9A57B503B6BBD662D05B93ADE14B08427715183EECD3B4E104157D88536F9F922BF2A785B5D7C601DFA2DAA294860E27DE54C6DF4956D55B4E423B5711F42811309E3B34DCA4E4CD6D2A5B8F1E7BBBD201C1CD8100B2DDA85D4557407D734B2E7D3425AA1D99D72D5C83BF08E55F84A1A869D4AD122384903265BC742E1973E2BB37800EB9D71578E2AEF5EE1B05F34819D21488373752AC69E12C0ED95186B64EF8878737E82DF43FD9455682542F883548A2221C3B3C7D54D74F4CCE03BF77672FD1F71CA526B885F4E12A68AB234D6D4D5EC6A16B7E68726B0B219FBEDD0544A3EC8AF44EEC076DB35F98DEEADCC243D19C4FB85FF4827BB7C915E3B060D287732DF1088C3A294C5566C35E41B0643C6FEAED17A785E40F8EF7356DD44424CBF6C4B2C81D54749A4D0D0FCBD46D3C3EAAABB6C6E0D0738C4F2B39EF8E2C70AF356709FC34CB8FF5F8275951CE5C30393F573BDECF6FA9D30D3DE7275FB5C6F9CE9DEDFA1C382441787CF6B3A7F68B316A5DCE23DA8C7890C0DCA56DE8E3BFD9395E4B5C1B8C92FF68BE0392557984BA2C571826536207CFD51B0EF0F1F8E0DE6A2678687BCC1817247B3F179D3A5DD79B0C4A2CED7603CBE3F1B38F30EC8EDFD4191A470CA90BC61EB4F99B785211CF1752D02CC03CF60F7E7DDB726A62689B87885C6D6AAA8EFD0EEBEC7C720BD0F4E9DAEE88DFC2482B13E04367F43EEDBF37B1B33898A412EE507E5EB22778D0BDD5FDA9533E85F8585A7234C6CC4FF94EF510E70B76E64DC5B5E1444CF82EA4E365A68A20C7E34F83B9F85AD156AFCA594FCB32E2C512B977E440891C29514D3D838505B6B9E64E4347567E36D4C54913E78F178DF881117C32ECE2E99A1B9D38AAC81B41FC14EEF3F0C74CB41045A37F69727005AA82D1AFB79F098A490B36FEE7109645FA693E1D8427FFD22FBDD171453C8BDD0C1D41FAFC1266F9558469A511C69813AEF94E497FDD9A140E61C052AAED8DEF6C2346A9C3772FD1C639E51E6196A97C563F88D8BDB3C3CBAF9D3E1EEBCBE69EB5508B8DABD6E299F1D5068E18D6F1526685AF4CBA3B4CF9C402EB597E31ABC2EF18F720DFA02DE66739CB4876683FA151A42F0EF725383785B5BE7C13D57D9AF5661778DA9B297D9A4F86D57E59FBBF208EE0F9F7386584B772EBD664A810F8BFD617161BE679C2CBD70AD88910C30522359E41B2D4347CD265AF1D82758E47773BEEE31849D3B1BA63719B881E1B8A20854E4B2D4E40D3932FB72A3ADE2DD16CB3629836D0325218791DC88860F3F8B4BFA036029B9F21C35A10B7D4D2E81D04DDD05CB35D76C1D907F4A8696BC60AAE390B8367FBB5F617100476AADA5AF1A37877 - 517B38B4899231EA1F99D57E73DEA1E4B130FAED65FF879DCF80F7A0BC7DC87D1DDF0F91B1A182B0C5488E21A4A7001105AA016063974F749B8C23F4D3FE5C18CE136D05E0BBBB69BEB5C8BAC4D693E73251E4D209392C7C1874FE91E1118E62E710E6F2D1F585BCF7C968DDD99C771EC5377E8EDEC8881CA542FD5A0DFE54B1053801EA096A88EDEE2D9C226B29565568EA41FC73D3DA9CBE134D72A9558BE6DB15FFEFB2B9624C937E2CDC45940B3B4D490E3D1B1CE422F979C9F6B5F05C48EF19ED81BBDAB0718041C45A3D73930E42117EFF71E16A3FF125FB923563EDFDAFFD17C8BDF9751EADCD839C993A7A6FBC78FFD9908B77B410A8E8DD814FA04885185B4A3ABBC7CD50A2C94A8FBC878689945DFB8EA4564A5EE1D22991A7730C38718972202EF02A3324141602AC7C0D951FD6B271774C1E4300968C49E6113E6ED9749F099C880E6CE26075383CBDAAD82696B689A7396F2AF82614D5E032AED73922DBD4BD190D217252C2398CA80E7BBD96EF5743F26A83C21B85152FC752D85A4D5D2466162AF839D8E1F64FCC6AD3F050CDB8D3639AF806F4A86AF4BE9F7120FA9BBFBB187B4C2930ED756DF95B8B3530181B592558B297A37C73230590959C53724CCED8136FBF29323291F33D7CCE96BB5721DC7ECC0FCC765E6199E771EA018304AE7183711D0722DAC8250767F3E221C79D212CD594FA2585DFD507DEB7EBAC632CD6237C874A15B6E861BCFA68C076B3BC5191B10EBA7772FC7CDD10C56F1D1EAA7C92D763438E294922554DC65AFF63B83A683A0FC635BFA884BDC7BC14C4D0F35B65A8D70053A1A3DC594FDD2BFBDB200081D31585BB3393654546B2CEBE05A58050218C2DACE3C7EA61DE3B974DB2D3290637595809833F9A0BFCA06C23CD9DBFBD1B276AE79BD47FE4CEBDC3405A47BAD93F473672BFFE11C63536AB7A73DEE2F9B754097CB5E03D8D784F83311B151147C6512E54B3F9AA4E8079989A729067137580336A9A4B2D0E32C5124C49A6CDD4561F525B99F38A7D24BE1A87067BBAB5AFB2281835AB1B6CA8EB5755083315695705103EEE0DF230FB034D87B5BD5606D5F044A9136B1D99444E77F55C9A4294F018A8D8A055EEFFB3F8D7F68C0E35D6ECDD28FFC55859EA06A8A95032E2CCB711445F5A63AD59953BF8A60DC17FC5CF7E9371D8FEF3F1516E9B5378873C7700E81DC8AA08561DC3EC1426F30101E4301A3D2550D173BE945618E9A75E9B35CCFA913278637C09D270EC52CC1CAA4E2916165221A8BABAE3EFA4AF94A3E344BA005F7FEE98469BAAD1CA283C94F082C61EF4703D2838187CD6AED424004C62989F57C16C3D82050AC9A3D60704D88865B2CBF1858040379BE8D8C6D5051871025ACDD0E10A9ABA2B34D97A63469AF759678C1A1B9AC7E45F4528E214D5620F7E2A8ABCBF6033634C - B28A19DF7420609B4ABF7C15C141FE676167169911E9E39C5394A19F4F3FA0770A3306470B7F820332A3853034669A98EFDFA79EEE438CA4FB41A03FE0CEA53E8FC3434C6BBFD2E532AFB70B6FFD750B09BCDD6E32F2F137C741F36ECC7E79B6D6FFADB75ACB2AA9780D94E159FFA44F87955D80E5B8A3A3E8C64E6EFBABD0FA41351412890B91874E20D0AD572FDE9C725C9A95D10BBA0EFD9FE8657BFC60089AB341C5C726A9DD93F2E212C9FA70B9FA716915ED9C485E0163FD216F3463DBA486E81A5F9C68BC88FB6083F926EAC23E45FFADB278718F56A0390107318F942ADB590714BB248252FC46C2A051C849CE3A78CDAB286722486F556AA71F014891B1C89627704E8809A5A7FA991AC859EC9FFF82A83A2EBD776591FD0768EDF42880837E5597806C094A47F0A269BE7BD06781BBC35D5D699DD20DF968A62F4161668E97243D42267CC1313F9282F7A0F7C8A9A3417735A0A785EAE43E6100A77E57E2762A074D976E731FF7E7BE67AC7A20AAA71A42ED3E160378C20B41229AFB88BB9D53931A3E28205A6F48DA8DAE9E131F08C7537F598745EABDFFC642216C1B032CCF63A773841D5A8F5E8C0E82EE6B7FD504D6992EB080B71A4BA99D8DB140030986DA07A01212BDCFADB2E071D37CF789475682E4DF7DBBE30EA6FABB268CB5372A0BE10B6EB3BA398CA23EA3F86619656034499C02CF7C43716BCE097465DD5A47FBDFE6B59313DD760E024FD69BD515C4D1005E77A763275286BF3B60DA936E27A0F976EA7A4A00DDEFA3B727888B900AFEAD98970CF29FB2E1E313339A28E7B600EB3B5B159F30A31757D32B9E47AFA1A264A24CAD0B8C469ECBED6C1B7A7C8756B00E7B22C5C4B831CC431BEDFC543D4B158B05422A79DDBF2CB10B54660C992D8160839A07FA6FBB104F27E50B11691B1DD7E70C7E3B1DCE7544682FC8DAD5F3D690FD4F9AA44666C8E8D4ADF7CB09A7BB873B9FA64C73205733BF9EBAF48043DB41009D58FA40CA6423956C6B4D599F410BD3904D2374BE9AE04AECE622ACB5F905A0A8FB13E0DF5618AFAAC7AA4205240C26DF5146D51137A78BC1F1019C582B4C7D7329CECDBEBC8FD234E74110477287A5268402D8AB70DDD8EFAF243C46B5B341559023969171AB7C54B8986C871A369137FB8E2B5ECD510CD8882CB2E0C42B93B4A1642FC92B7969B699E3AFEDE745C975B42F250AED677662606A245AA282217FDC80E9EA25CC3F4D50CD2C5F9838BEF44A8846C378582A975FBB43B446AF5E5088BB7D0B6746149918EAC4D61BE497EFBC212B29E00052DEDC9E47A6B8BDA5593074B04EF816155675306B2650F40AA087BB2815DE1990C7D31089A8899663BAF9BB23DAE31E03890D833A4A63DB29553F26D50351EC3C6121A94F8531AB7EB64009A2EBCB3F501A99B652C0AD6721A39F76DF3081F8 - 95041876453253070EA784F7F5569220F4F4742CD5EF8A8A86372A15F609399FF37A2B2EC031881B3E22B66F0E778E942D4C72F39617861DC9ECE1D5BBD48D0A2E93CC5FAC577A5371209B8A8E6835094279F37EFC1BEDA301BC6F2C39E961514B854A84C13D982DD38B90B9D65CD3BE6BFCA1FFCC47D651AA864473FD46B8AB33A03DD71F5406D9F03B0B7AF003BA6435B055886303FD87B88A0AA661DA27A6F1725B8C930F8AF48AB56975393A6633D56459B78DBEFDDCB82843AA8D92D708578565E9320DCD0D4CEAEDA6F2D3824A25244FA199B9FC37CB55A981EFB822AD517B784637ED5437A66B7CDC02D24B01D002B663F049610449681C38BBAB0AF588FBAA07AAD27D69ABD1030B6D4E3F22C7B23393A8DB33704FAD5F193039317C419D2F5FF0BE945D8B4A7FF5FF98AB5809FB33B94A685F37C8BFFA306423C4B393A61356EEBEF795A4157358131A25C327F16787BD7F50C817C0BF3A74460FDD171E570FB98F46662D339360D10CDD88DBDF98D1CD0E6B905DAF3F8F4D045C51D39AE7AA2B6585E7B391A7728665BBC61F3A60BD0FF77CDAEBB8CEC140B6F2942BDF3D723AD1DEFDE852F5FF859329384CEA5946A659CE18676C5C0F20F120A3F41AE91B7DAF3829584D7BE77932ED52224B35F0DFC140AD8C1583140C7BF33E7D9F9BAE2186596A29B55EF6A20794807B7C5792554FCE1816582FEEDA4597A072CABFB9937ADC9D1CCBAD685A3A8E2F31D4533AB302ADD68B07763373245F3B04F179D3DF71CB29AF0C408D20094CC5CDB26DD6BEE84D616E1350955527A2E9E5543325E77426F862AF51C0898EF034D04675F7A25A71C2A92962C1D081E3AD8D9DB6F43B92BBB240A71600D10F21E75E70239EB91716765DE5B042C78D9BB2C66A8C7A3EFDD8681D42B4DECDB0CFA67F3B1F1FF2CA6C205D3F401DED18A50EBB1F8A3FC26DE81C70CA3DD8B8C674424D8EA5EC7423BB88B35D5C644B7411C820DD8B2C45A63FE8A94A682841B8EA4F0F074499AED39DDB76E651932D10EC6D4E371C09338C2BFF438422FF6211F8FFA57D063C3636E8E2D4B0C4B2D85AD5518D1F23084616934083492A24B7A245534236B8E3316C23FCB568C2C562F49A8795B7971987B4537547003628D1C0C006AB37DE8DF73508271DDEF086CEF5B88272F066DAC1F09BA994974BDC5DA5F23B351BE3145C8E94284EE2D0666088B1B2613EB887B13D857FBFAABB9EE2F0A2E7B15D4221883CAB44362F69738B5FFA73DFA117CC94FE54180165F2CCE220A3D455AC5D3E81683C3371E2983FCA00B4008C5201C8B42B7C185DCB909837DB699D40E473E8CA9865CC48D665B2ECC1D1258F90A7F36C7E003DA48833A34DC22185C9449A10C3CBC6EEE1F66AF3F51C98FA8548EFF96E7119BB735674B14217CCB7553DE95A94EA1472B59ACD62E9EE6CC6 - 9538DC667AA54616F324204D4F771ED58D9F940EDE26AD4F3570B48A25DCD6CA36404D985AC2C88D35ED08FBDA1949366E6383160745AD7475FC90C2E2F933867289A6EE951E52C5061C79BD04C31FF1970A72FB969B79D0AB9F11A968462C314B - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000 - cleartomark end - %APLT1End - -%RBIEndFontSubset -/Helvetica cguidfix -/F1.1/Helvetica renmfont -[ /CIEBasedABC 4 dict dup begin -/WhitePoint [ 0.9505 1.0000 1.0891 ] def -/DecodeABC [ { 1.8008 exp } bind { 1.8008 exp } bind { 1.8008 exp } bind ] def -/MatrixABC [ 0.4294 0.2332 0.0202 0.3278 0.6737 0.1105 0.1933 0.0938 0.9580 ] def -/RangeLMN [ 0.0 0.9505 0.0 1.0000 0.0 1.0891 ] def -end ] /Cs1 exch/ColorSpace dr pop -[ /CIEBasedA 5 dict dup begin /WhitePoint [ 0.9505 1.0000 1.0891 ] def -/DecodeA { { 1.8008 exp } bind exec} bind -def -/MatrixA [ 0.9642 1.0000 0.8249 ] def -/RangeLMN [ 0.0 2.0000 0.0 2.0000 0.0 2.0000 ] def -/DecodeLMN [ { 0.9857 mul} bind { 1.0000 mul} bind { 1.3202 mul} bind ] def -end ] /Cs2 exch/ColorSpace dr pop -%%EndPageSetup -0.60000002 i -/Cs1 SC -1 1 1 sc -q -0 0 504 256 rc --52.5 264.5 m -523.5 264.5 l -523.5 -468.5 l --52.5 -468.5 l -h --52.5 264.5 m -f -47 125 m -320 125 l -320 86 l -47 86 l -h -47 125 m -f -1 J -1 j -0 0 0 sc -1 0 0 -1 -52 264 cm -99 139 m -372 139 l -372 178 l -99 178 l -h -99 139 m -S -1 0.40000001 0.40000001 sc -CM -47 125 m -86 125 l -86 86 l -47 86 l -h -47 125 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -99 139 m -138 139 l -138 178 l -99 178 l -h -99 139 m -S -1 1 0 sc -CM -86 125 m -125 125 l -125 86 l -86 86 l -h -86 125 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -138 139 m -177 139 l -177 178 l -138 178 l -h -138 139 m -S -0.40000001 1 1 sc -CM -242 125 m -281 125 l -281 86 l -242 86 l -h -242 125 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -294 139 m -333 139 l -333 178 l -294 178 l -h -294 139 m -S -1 0.40000001 0.40000001 sc -CM -125 125 m -164 125 l -164 86 l -125 86 l -h -125 125 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -177 139 m -216 139 l -216 178 l -177 178 l -h -177 139 m -S -1 1 0 sc -CM -164 125 m -203 125 l -203 86 l -164 86 l -h -164 125 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -216 139 m -255 139 l -255 178 l -216 178 l -h -216 139 m -S -1 1 0 sc -CM -203 125 m -242 125 l -242 86 l -203 86 l -h -203 125 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -255 139 m -294 139 l -294 178 l -255 178 l -h -255 139 m -S -0.40000001 1 1 sc -CM -281 125 m -320 125 l -320 86 l -281 86 l -h -281 125 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -333 139 m -372 139 l -372 178 l -333 178 l -h -333 139 m -S -0.40000001 1 1 sc -CM -320 125 m -359 125 l -359 86 l -320 86 l -h -320 125 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -372 139 m -411 139 l -411 178 l -372 178 l -h -372 139 m -S -1 1 0 sc -CM -2 230 m -41 230 l -41 191 l -2 191 l -h -2 230 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -54 34 m -93 34 l -93 73 l -54 73 l -h -54 34 m -S -1 1 0 sc -CM -41 230 m -80 230 l -80 191 l -41 191 l -h -41 230 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -93 34 m -132 34 l -132 73 l -93 73 l -h -93 34 m -S -1 1 0 sc -CM -80 230 m -119 230 l -119 191 l -80 191 l -h -80 230 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -132 34 m -171 34 l -171 73 l -132 73 l -h -132 34 m -S -1 0.40000001 0.40000001 sc -CM -156 230 m -195 230 l -195 191 l -156 191 l -h -156 230 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -208 34 m -247 34 l -247 73 l -208 73 l -h -208 34 m -S -1 0.40000001 0.40000001 sc -CM -195 230 m -234 230 l -234 191 l -195 191 l -h -195 230 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -247 34 m -286 34 l -286 73 l -247 73 l -h -247 34 m -S -0.40000001 1 1 sc -CM -273 230 m -312 230 l -312 191 l -273 191 l -h -273 230 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -325 34 m -364 34 l -364 73 l -325 73 l -h -325 34 m -S -0.40000001 1 1 sc -CM -312 230 m -351 230 l -351 191 l -312 191 l -h -312 230 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -364 34 m -403 34 l -403 73 l -364 73 l -h -364 34 m -S -0.40000001 1 1 sc -CM -351 230 m -390 230 l -390 191 l -351 191 l -h -351 230 m -f -0 0 0 sc -1 0 0 -1 -52 264 cm -403 34 m -442 34 l -442 73 l -403 73 l -h -403 34 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 63.5 247 cm -/F1.1[ 15 0 0 -15 0 0]sf --16.259766 6 m -(!"#$)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 200.5 247 cm --16.259766 6 m -(!"#%)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 335 247 cm --16.259766 6 m -(!"#&)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 454.5 208.5 cm --49.182129 6 m -('\(\)*+,#-.,*/0)[ 12.495117 8.342285 12.495117 8.342285 4.995121 7.500000 4.167480 3.332516 8.342285 7.500000 8.342285 8.342285 0.000000 ] xS -1 0 0 -1 442 109 cm --36.679688 6 m -(1234#-.,*/0)[ 10.832520 3.332520 7.500002 7.500000 4.167479 3.332520 8.342285 7.500000 8.342285 8.342285 0.000000 ] xS -2 w -0.60000002 i -/Cs1 SC -0 0 0 sc -1 0 0 -1 -52 264 cm -75 80 m -143.22476 124.90744 l -S -CM -99.243546 133.81438 m -93.204086 142.09961 l -89.245453 136.08551 l -h -99.243546 133.81438 m -f -0 J -0 j -1 0 0 -1 -52 264 cm -151.24355 130.18562 m -145.20409 121.90039 l -141.24545 127.91449 l -h -151.24355 130.18562 m -S -1 J -1 j -119 82 m -224.21182 128.7608 l -S -CM -180.98442 131.34026 m -173.67393 138.5289 l -170.74973 131.94946 l -h -180.98442 131.34026 m -f -0 J -0 j -1 0 0 -1 -52 264 cm -232.98442 132.65974 m -225.67393 125.4711 l -222.74973 132.05054 l -h -232.98442 132.65974 m -S -1 J -1 j -156 82 m -261.21182 128.7608 l -S -CM -217.98444 131.34026 m -210.67395 138.5289 l -207.74976 131.94946 l -h -217.98444 131.34026 m -f -0 J -0 j -1 0 0 -1 -52 264 cm -269.98444 132.65974 m -262.67395 125.4711 l -259.74976 132.05054 l -h -269.98444 132.65974 m -S -1 J -1 j -223 85 m -130.75531 126.6875 l -S -CM -70.007172 133.35901 m -80.237869 134.03195 l -77.272751 140.59305 l -h -70.007172 133.35901 m -f -0 J -0 j -1 0 0 -1 -52 264 cm -122.00717 130.64099 m -132.23787 129.96805 l -129.27275 123.40695 l -h -122.00717 130.64099 m -S -1 J -1 j -263 87 m -207.5079 126.51709 l -S -CM -147.68806 131.91423 m -157.59616 134.55048 l -153.41965 140.41534 l -h -147.68806 131.91423 m -f -0 J -0 j -1 0 0 -1 -52 264 cm -199.68806 132.08577 m -209.59616 129.44952 l -205.41965 123.58466 l -h -199.68806 132.08577 m -S -3 w -1 J -1 j -383 79 m -363.99207 117.71989 l -S -CM -307.06683 136.22116 m -315.76416 144.43314 l -308.21997 148.12704 l -h -307.06683 136.22116 m -f -0 J -0 j -1 0 0 -1 -52 264 cm -359.06683 127.77884 m -367.76416 119.56686 l -360.21997 115.87296 l -h -359.06683 127.77884 m -S -/Cs2 SC -0 sc -0 i -1 0 0 -1 441 21 cm --35.837402 6 m -(5*\)6178)[ 10.832520 8.342285 12.495117 8.342285 10.832523 11.667477 0.000000 ] xS -1 0 0 -1 63.41275 20 cm -/F1.1[ 17 0 0 -17 0 0]sf --28.351318 6 m -(9&:#;:#<=)[ 4.723145 9.454590 4.723145 4.723145 9.454590 4.723145 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 196.80214 20 cm --18.900879 6 m -(9%:#>=)[ 4.723145 9.454590 4.723145 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 321.65463 20 cm --28.351318 6 m -(9?:#@:#A=)[ 4.723145 9.454590 4.723145 4.723145 9.454590 4.723145 4.723145 9.454590 0.000000 ] xS -1 0 0 -1 56 48 cm -/F1.1[ 15 0 0 -15 0 0]sf --16.259766 6 m -(!"#$)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 193 48 cm --16.259766 6 m -(!"#%)[ 10.004884 10.004884 4.167480 0.000000 ] xS -1 0 0 -1 327.5 48 cm --16.259766 6 m -(!"#&)[ 10.004884 10.004884 4.167480 0.000000 ] xS -ep -end -%%Trailer -%%EOF diff --git a/src/externals/pio1/doc/images/dof.graffle b/src/externals/pio1/doc/images/dof.graffle deleted file mode 100644 index 31bf38c3a5c..00000000000 --- a/src/externals/pio1/doc/images/dof.graffle +++ /dev/null @@ -1,1953 +0,0 @@ - - - - - ActiveLayerIndex - 0 - ApplicationVersion - - com.omnigroup.OmniGrafflePro - 137.11.0.108132 - - AutoAdjust - - BackgroundGraphic - - Bounds - {{0, 0}, {576, 733}} - Class - SolidGraphic - ID - 2 - Style - - fill - - GradientColor - - w - 0.666667 - - - shadow - - Draws - NO - - stroke - - Draws - NO - - - - CanvasOrigin - {0, 0} - CanvasSize - {576, 733} - ColumnAlign - 1 - ColumnSpacing - 36 - CreationDate - 2009-12-16 08:35:43 -0700 - Creator - John Dennis - DisplayScale - 1 0/72 in = 1 0/72 in - FileType - flat - GraphDocumentVersion - 6 - GraphicsList - - - Class - Group - Graphics - - - Bounds - {{363, 207}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 86 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 2} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{228.5, 207}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 87 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 1} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{91.5, 207}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 88 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 0} - VerticalPad - 0 - - Wrap - NO - - - ID - 85 - - - Bounds - {{344.309, 224}, {58.6913, 40}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 80 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [6, 7, 8]} - VerticalPad - 0 - - - - Bounds - {{228.527, 234}, {40.5503, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 79 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [1, 3]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{85, 234}, {60.8255, 20}} - Class - ShapedGraphic - FontInfo - - Font - Helvetica - Size - 16 - - ID - 77 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs34 \cf0 [2, 4, 5]} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{457, 234}, {72, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 75 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 CompDOF} - VerticalPad - 0 - - Wrap - NO - - - Class - LineGraphic - ID - 74 - Points - - {383, 79} - {356, 134} - {357, 132} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 3 - - - - - Class - LineGraphic - ID - 73 - Points - - {263, 87} - {197, 134} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 2 - - - - - Class - LineGraphic - ID - 72 - Points - - {223, 85} - {119, 132} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 2 - - - - - Class - LineGraphic - ID - 71 - Points - - {156, 82} - {273, 134} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 2 - - - - - Class - LineGraphic - ID - 70 - Points - - {119, 82} - {236, 134} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 2 - - - - - Class - LineGraphic - ID - 69 - Points - - {75, 80} - {154, 132} - - Style - - stroke - - HeadArrow - FilledArrow - TailArrow - 0 - Width - 2 - - - - - Bounds - {{457, 146}, {74, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 68 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Disk layout} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{457, 46.5}, {99, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 57 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 Memory layout} - VerticalPad - 0 - - Wrap - NO - - - Class - Group - Graphics - - - Bounds - {{370.5, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 82 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 2} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{236, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 83 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 1} - VerticalPad - 0 - - Wrap - NO - - - Bounds - {{99, 8}, {33, 18}} - Class - ShapedGraphic - FitText - YES - Flow - Resize - FontInfo - - Font - Helvetica - Size - 14 - - ID - 84 - Shape - Rectangle - Style - - fill - - Draws - NO - - shadow - - Draws - NO - - stroke - - Draws - NO - - - Text - - Pad - 0 - Text - {\rtf1\ansi\ansicpg1252\cocoartf949\cocoasubrtf540 -{\fonttbl\f0\fswiss\fcharset0 Helvetica;} -{\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural - -\f0\fs30 \cf0 PE 0} - VerticalPad - 0 - - Wrap - NO - - - ID - 81 - - - Bounds - {{403, 34}, {39, 39}} - Class - ShapedGraphic - ID - 46 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{364, 34}, {39, 39}} - Class - ShapedGraphic - ID - 47 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{325, 34}, {39, 39}} - Class - ShapedGraphic - ID - 48 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{247, 34}, {39, 39}} - Class - ShapedGraphic - ID - 49 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{208, 34}, {39, 39}} - Class - ShapedGraphic - ID - 50 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{132, 34}, {39, 39}} - Class - ShapedGraphic - ID - 51 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{93, 34}, {39, 39}} - Class - ShapedGraphic - ID - 52 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{54, 34}, {39, 39}} - Class - ShapedGraphic - ID - 53 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Class - Group - Graphics - - - Bounds - {{372, 139}, {39, 39}} - Class - ShapedGraphic - ID - 59 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{333, 139}, {39, 39}} - Class - ShapedGraphic - ID - 60 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{255, 139}, {39, 39}} - Class - ShapedGraphic - ID - 61 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{216, 139}, {39, 39}} - Class - ShapedGraphic - ID - 62 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{177, 139}, {39, 39}} - Class - ShapedGraphic - ID - 63 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{294, 139}, {39, 39}} - Class - ShapedGraphic - ID - 64 - Shape - Rectangle - Style - - fill - - Color - - b - 1 - g - 1 - r - 0.4 - - - shadow - - Draws - NO - - - - - Bounds - {{138, 139}, {39, 39}} - Class - ShapedGraphic - ID - 65 - Shape - Rectangle - Style - - fill - - Color - - b - 0 - g - 1 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{99, 139}, {39, 39}} - Class - ShapedGraphic - ID - 66 - Shape - Rectangle - Style - - fill - - Color - - b - 0.4 - g - 0.4 - r - 1 - - - shadow - - Draws - NO - - - - - Bounds - {{99, 139}, {273, 39}} - Class - ShapedGraphic - ID - 67 - Shape - Rectangle - Style - - shadow - - Draws - NO - - - - - ID - 58 - - - GridInfo - - GuidesLocked - NO - GuidesVisible - YES - HPages - 1 - ImageCounter - 3 - KeepToScale - - Layers - - - Lock - NO - Name - Layer 1 - Print - YES - View - YES - - - LayoutInfo - - Animate - NO - AutoLayout - 2 - LineLength - 0.4643835723400116 - circoMinDist - 18 - circoSeparation - 0.0 - layoutEngine - dot - neatoSeparation - 0.0 - twopiSeparation - 0.0 - - LinksVisible - NO - MagnetsVisible - NO - MasterSheets - - ModificationDate - 2009-12-16 09:21:00 -0700 - Modifier - John Dennis - NotesVisible - NO - Orientation - 2 - OriginVisible - NO - OutlineStyle - Basic - PageBreaks - NO - PrintInfo - - NSBottomMargin - - float - 41 - - NSLeftMargin - - float - 18 - - NSPaperSize - - size - {612, 792} - - NSRightMargin - - float - 18 - - NSTopMargin - - float - 18 - - - PrintOnePage - - QuickLookPreview - - JVBERi0xLjMKJcTl8uXrp/Og0MTGCjQgMCBvYmoKPDwgL0xlbmd0aCA1IDAgUiAvRmls - dGVyIC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAGlVstuG0cQvO9X9NEGqPF0z/tq2T4Y - CBJBBHIQfAgEE7YsRrGVIPDfp3r2weHuSiQUy5Iosqu6u/q13+mKvpPFV0iRknP04zP9 - Tn+SNdHWf/SV3lw+Mt0+Etevx1u6sCYMnx5eDQQdCHZUCoUQSJIjV5RzB+xHfN/1bJfX - 1aml60v45/rHhf7SEG73imcAe7xSXiNQfG785Hd8hXgGbwCMzp7hrFYHSnUKCna5RnyC - Q800sMnsurtqolKFQCbFn0OmZi1Z92ySnNI5pGo2kS7SFI7nkKjZRKLl0zQFBdWiTrnv - VkunZsdYlO5QOO6qRA51PoNMzRZkLskB2/fbWr3VbIHVCtWCB08x2qNkEJlOQtuHsHL4 - PzaWFqi4BXDZwB2s5kB2cgYSIUsLfb4pxOZzONVsDGfsCfHpLCjMRmjthLaaUKyvpoRz - uBzM5lwuNoV4ppgwm0O9PVmKDrtEzeZQrDTRlVZbwWLHNQ3EHEyghBEA+O2WOAxtEdAa - 0UgoGLHYbff05gMbCwW2O3r123uyr2l7R++3UKihkyAvo+NVOpfTS2KTVbJgI2KLKeHn - MllfDGdhiDxP9pfP+4cfP+n+j58P//y9yux1Baa8wuqiianEsmR99/Xx24JT6N+jY9Qo - O12LFLpsaU/snRHxWNHiTbEp0H09HOMB0+nut31gI94hdSsms/cVHIxYz11kVLWUCDB7 - tcNCizYYm/Hinr6gIZboHYg/4vuuLX0NUFtwQrCz0DTExh+CZQRrfeuPJZnC/thf16D7 - U9Ff1DVJmAtlgRvoYYRZNckmRSwCaKLb3IkpGZnjzhunM6K2AbXBtosWHSG5wFZETPIF - HQ+74ksf0gLd7XHin5JAu2BCYL+ZiBlq/TEc+8Tc+EOyYmywM38N+qQEkFkl6CSiXnMJ - MKQSy1KCiOiSqnJQIBRVAIt2UKDTJlgDn1BgdHfIAaH17loBRnfHAizBwzPR0AMrx0vE - UQ7aaui6FAJOqWD48jQXjNpaizOpY+BCqaZixKXSReuMxSnCEEgxkkTzLybgsNUZWEKf - zr6OwAjQYKLnxlf1UKJ25uSLxeFZLx37GqDotVO1l4jUa4tpHW3C4yRSD5y0xSq4FCiR - Me1aVPZ1VGp+UeVwJvStJxh8z1gGmrxnaFizH9B4d0Lvuufan0dErWoOWdt/9KdZ+360 - Rn+dChAyju+RvwatWTgsx/G5em0LuOwItdyTi86UIkzMySQEM8iAohsbIb0uOBFWGVyE - SYQikVED51QyFy0+xjvJYDVxnYA17NM9oDtgQuh6SylDhG7yho1lQsRdmLzpIc5J9J0v - a9hxt/eXvMOD9+ySezSrL3btCIHZ6ZjT7IxfPuz/evfrh9fdyiXXeDxCJ4/NOJzLBJ/6 - yJjoAvvVBRRoTtq9upEN+Q2FT6unEgfLZCtPsHLW4wCpNNSufeK44Q25dUqXHHasR/Yn - Ip0f9pu4obShvE7LuB6YoTH3//lchOv2ArInnooSmucldM1z0dV/J6sYugplbmRzdHJl - YW0KZW5kb2JqCjUgMCBvYmoKMTEzOAplbmRvYmoKMiAwIG9iago8PCAvVHlwZSAvUGFn - ZSAvUGFyZW50IDMgMCBSIC9SZXNvdXJjZXMgNiAwIFIgL0NvbnRlbnRzIDQgMCBSIC9N - ZWRpYUJveCBbMCAwIDU3NiA3MzNdCj4+CmVuZG9iago2IDAgb2JqCjw8IC9Qcm9jU2V0 - IFsgL1BERiAvVGV4dCBdIC9Db2xvclNwYWNlIDw8IC9DczIgOCAwIFIgL0NzMSA3IDAg - UiA+PiAvRm9udCA8PAovRjEuMCA5IDAgUiA+PiA+PgplbmRvYmoKMTAgMCBvYmoKPDwg - L0xlbmd0aCAxMSAwIFIgL04gMSAvQWx0ZXJuYXRlIC9EZXZpY2VHcmF5IC9GaWx0ZXIg - L0ZsYXRlRGVjb2RlID4+CnN0cmVhbQp4AYVST0gUURz+zTYShIhBhXiIdwoJlSmsrKDa - dnVZlW1bldKiGGffuqOzM9Ob2TXFkwRdojx1D6JjdOzQoZuXosCsS9cgqSAIPHXo+83s - 6iiEb3k73/v9/X7fe0RtnabvOylBVHNDlSulp25OTYuDHylFHdROWKYV+OlicYyx67mS - v7vX1mfS2LLex7V2+/Y9tZVlYCHqLba3EPohkWYAH5mfKGWAs8Adlq/YPgE8WA6sGvAj - ogMPmrkw09GcdKWyLZFT5qIoKq9iO0mu+/m5xr6LtYmD/lyPZtaOvbPqqtFM1LT3RKG8 - D65EGc9fVPZsNRSnDeOcSEMaKfKu1d8rTMcRkSsQSgZSNWS5n2pOnXXgdRi7XbqT4/j2 - EKU+yWCoibXpspkdhX0AdirL7BDwBejxsmIP54F7Yf9bUcOTwCdhP2SHedatH/YXrlPg - e4Q9NeDOFK7F8dqKH14tAUP3VCNojHNNxNPXOXOkiO8x1BmY90Y5pgsxd5aqEzeAO2Ef - WapmCrFd+67qJe57AnfT4zvRmzkLXKAcSXKxFdkU0DwJWBR9i7BJDjw+zh5V4HeomMAc - uYnczSj3HtURG2ejUoFWeo1Xxk/jufHF+GVsGM+Afqx213t8/+njFXXXtj48+Y163Dmu - vZ0bVWFWcWUL3f/HMoSP2Sc5psHToVlYa9h25A+azEywDCjEfwU+l/qSE1Xc1e7tuEUS - zFA+LGwluktUbinU6j2DSqwcK9gAdnCSxCxaHLhTa7o5eHfYInpt+U1XsuuG/vr2evva - 8h5tyqgpKBPNs0RmlLFbo+TdeNv9ZpERnzg6vue9ilrJ/klFED+FOVoq8hRV9FZQ1sRv - Zw5+G7Z+XD+l5/VB/TwJPa2f0a/ooxG+DHRJz8JzUR+jSfCwaSHiEqCKgzPUTlRjjQPi - KfHytFtkkf0PQBn9ZgplbmRzdHJlYW0KZW5kb2JqCjExIDAgb2JqCjcwNAplbmRvYmoK - OCAwIG9iagpbIC9JQ0NCYXNlZCAxMCAwIFIgXQplbmRvYmoKMTIgMCBvYmoKPDwgL0xl - bmd0aCAxMyAwIFIgL04gMyAvQWx0ZXJuYXRlIC9EZXZpY2VSR0IgL0ZpbHRlciAvRmxh - dGVEZWNvZGUgPj4Kc3RyZWFtCngBhZRNSBRhGMf/s40EsQbRlwjF0MEkVCYLUgLT9StT - tmXVTAlinX13nRxnp5ndLUUihOiYdYwuVkSHiE7hoUOnOkQEmXWJoKNFEAVeIrb/O5O7 - Y1S+MDO/eZ7/+3y9wwBVj1KOY0U0YMrOu8nemHZ6dEzb/BpVqEYUXCnDczoSiQGfqZXP - 9Wv1LRRpWWqUsdb7NnyrdpkQUDQqd2QDPix5PODjki/knTw1ZyQbE6k02SE3uEPJTvIt - 8tZsiMdDnBaeAVS1U5MzHJdxIjvILUUjK2M+IOt22rTJ76U97RlT1LDfyDc5C9q48v1A - 2x5g04uKbcwDHtwDdtdVbPU1wM4RYPFQxfY96c9H2fXKyxxq9sMp0Rhr+lAqfa8DNt8A - fl4vlX7cLpV+3mEO1vHUMgpu0deyMOUlENQb7Gb85Br9i4OefFULsMA5jmwB+q8ANz8C - +x8C2x8DiWpgqBWRy2w3uPLiIucCdOacadfMTuS1Zl0/onXwaIXWZxtNDVrKsjTf5Wmu - 8IRbFOkmTFkFztlf23iPCnt4kE/2F7kkvO7frMylU12cJZrY1qe06OomN5DvZ8yePnI9 - r/cZt2c4YOWAme8bCjhyyrbiPBepidTY4/GTZMZXVCcfk/OQPOcVB2VM334udSJBrqU9 - OZnrl5pd3Ns+MzHEM5KsWDMTnfHf/MYtJGXefdTcdSz/m2dtkWcYhQUBEzbvNjQk0YsY - GuHARQ4ZekwqTFqlX9BqwsPkX5UWEuVdFhW9WOGeFX/PeRS4W8Y/hVgccw3lCJr+Tv+i - L+sL+l3983xtob7imXPPmsara18ZV2aW1ci4QY0yvqwpiG+w2g56LWRpneIV9OSV9Y3h - 6jL2fG3Zo8kc4mp8NdSlCGVqxDjjya5l90WyxTfh51vL9q/pUft89klNJdeyunhmKfp8 - NlwNa/+zq2DSsqvw5I2QLjxroe5VD6p9aovaCk09prarbWoX346qA+Udw5yViQus22X1 - KfZgY5reyklXZovg38Ivhv+lXmEL1zQ0+Q9NuLmMaQnfEdw2cIeU/8NfswMN3gplbmRz - dHJlYW0KZW5kb2JqCjEzIDAgb2JqCjc5MgplbmRvYmoKNyAwIG9iagpbIC9JQ0NCYXNl - ZCAxMiAwIFIgXQplbmRvYmoKMyAwIG9iago8PCAvVHlwZSAvUGFnZXMgL01lZGlhQm94 - IFswIDAgNTc2IDczM10gL0NvdW50IDEgL0tpZHMgWyAyIDAgUiBdID4+CmVuZG9iagox - NCAwIG9iago8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMyAwIFIgPj4KZW5kb2JqCjE1 - IDAgb2JqCjw8IC9MZW5ndGggMTYgMCBSIC9MZW5ndGgxIDEyNDMyIC9GaWx0ZXIgL0Zs - YXRlRGVjb2RlID4+CnN0cmVhbQp4Ab16eXxURdZ2Vd21931Nd7o7nV6yk5WERNKEbKwi - UUiQaAKEVWQxRGEAg7JGZVQkRHBfAIOYEKI0Ig4iCM644CioqDPOCA6z5HUWdFTo29+p - 2yEj8807r3/Mb/p2rbdu3VNPnXOqzrmFMEJIjdoRgyIzFzYvxm/jXqh5C8LPZ7a1eu/7 - /cjHEMJdCDG3zF48Z6Hh89d/jhDHIqRUz7ll+ewPTq98BSGdHqGkj+a2NM/62/evFCOU - fgGeL5oLFcoUYQRCGV4op85d2HrHT2/UBKFcCeWNtyya2bzs6KIiKNP3VS9svmOxuF75 - HUKZZih7b21e2HLtihnroFwA5ZTFi25rJd8yu6FcD+XZi5e2LH7l7ltzoXwE6HsX6jBc - 9KdGPHoVUi+aNlgjV18VkcESg1jEQXsBiUiBlEh1VasrBTXSIC3SXSnKKYwaGZDxqror - BRNk6Cjoz5JIkHUwtUFqRw7khDQJuSB2o2TkAWoTPx8gfATpucMozLUjJ5sD91D8Ywhn - aSrdEP+SO4H00sL4X5hSeOQgDUQqL0NH0H1oB+qB0eyGfBjdhLrQm3g+Ooino350Biej - bJhrFkXRePQWjsffQ7PRM9C+FR1FW9E+wC2MFgK949FmHIivgHIE8jPQ2vhTKBUVo/Xo - MCqBXjejgfhz8f1wdzK6AXWjPfD8L7Cf7GNN8Rfi5wDJ66DPtXDnvfj4eA9glIkq0CSo - XYtexQHmbHwuIFAK1D2CHkdPotfQn/BduD8+N94WPxX/DSJw14Xq4FqF+/FvmB52ffyR - +B/iEiARRunw1ia0BT0N/ffAdQSmvQovwK14C95KIuQu0s+u42xSDHBIQzVw1aJFaCMg - cBAdQ39F3+GviJ3RM63M8Xhh/G8w4+NglHQkLagNrg1wbYYxHcI8HoZH40l4FX4Ib8Xv - k3RyA6knt5M7yJfMRGY6s5x5n72N7ePu5bp4lfR1/FD8RPw0ssF83oiWotUwuqPoFLqI - vscM9OXCAVyKK/BNcLXjHeQgfhIfJJPwEXyKdONf4y/wV/gS4YiaWEgGaSVbyB5ylLzD - zGO2Mg8zv2a+ZkdyhHuSO88HhE+kGdIm6Z14afw38W9BekXkg5mpQBPRzagZRrsYFaA7 - YRR74eqBWTuGjqM35esL7EID6FtAAWEjduI8PAGuifhaPBvPw4/hl+F6VablGwITQRTE - QGzERerIDLKQtJPTpJ1JYtKZscw0pgeuk8wZ5hJzieVYE2tha9gx6F52Ibsdrp3sbraP - fZcr4UZyE7kpXDu3ibuXmcm9x53hV/Ob+T7+K/7PQlgYLywS7oXZeRN49rVBGUgkLE4F - 6vPQrWgmrsQzUCfMxpO4GXUAd83CGwGvxSgcb2RWMzVkGHDDq+gnwK3b0Sq0iZmOnox/ - xHSjD4FTboHu2tEutgK5uW0wO3ehYcBFg1ckLT0tHAoGUv0pPq8n2e1KcjrsNqvFbDIa - 9Bq1SqkQBZ5jGYJRZpW/usnbG2zqZYP+2tosWvY3Q0XzDyqaer1QVX11m14vfa4Zbl3V - MgItZ/9Ty0iiZWSoJdZ7y1BZVqa3yu/tfbvS743iadfVQ/6+Sn+Dt3dAzk+Q8/fLeQ3k - fT54wFtln1vp7cVN3qre6ra5HVVNlVmZ+GAE4FBmZVLFEUEq2nEvGt28aq4dEtqiqtfp - r6zqdfghD/eYQFXzrN5J19VXVSb5fA1QB1WT6+EdWZnzeoFOdI96ln/WPdEImtFEc83T - 63uZ5oZe0kT7MmT02vyVvbYV5+3/KF7JVd37g5u9JFDd3NJR3RtpugfApcUmWmq+F0rj - 6rzQLVnXUN+L1w0SQWmcD5RSclv8VZSupvneXoW/wj+3Y34TgIsm1/c5I84qf3NlQy+a - VN/niDjkQlbmQfvqUh+M/mDWqKxRNC312Vcn0t/dnaj/5RGa2lcf+xzScZOHAMD0Tf4x - QGevd6b8Ej8QW0yjlmLUMbMYcIJfA4ZhzgN6RvcS4Bkm0MsFxjT3ttddIWNuZYK4pvmV - fQqHk46hqaIB2jd16EfATEF7vd/b8TWCKfQP/OnqmubBGj6g/xrRm3Sih3ilFzdfybfJ - wMCo59r9c+n8tslzCmW/veoHFVCm0FCae829eeMm1ft6vQ1QEUUZmeOiSDGpfh/Gmxui - OL4uiirdB2G9ZG6+CW5nUlabVwnvh0JWJlSk+yCXnemthlFXU17xdng7xszq8FZ75wIz - sQE5hRstHQ05gGBdPeCEroc3RhqShrItDQ0joJ8c2g88As07GqCH+YM9QCpX5cSg0bDM - cTArwUn119X3tlcm9UYqG2AWgH2PTKrvPQKc29AArXKHKAWKV82zD9KcBzTnpsP9/EQv - ddAHdNHQ0UH7rKv3+3qPdHQkdVB5S5SjGP1zRWSwIopoExh4VRS3T4JnIfH7kmiF3+f3 - AVkNFNMCYOkrHBVFhf8e4aIhuuHJ4UBtkYxw8X8I4ZIfg/CIH4Vw6RClVyFcBjSXUoSv - +e8hPPIqhMv/PcKRIbqByFFAbURGuOI/hPDoH4Nw5Y9CuGqI0qsQrgaaqyjCNf89hGuv - QnjMv0d47BDdQOQ4oHasjPD4/xDCE34MwhN/FMLXDlF6FcKTgOZrKcLX/fcQnnwVwnX/ - HuHrh+gGIm8Aaq+XEZ7yH0J46o9BuP5HIdwwROlVCE8Dmhsowjf+9xCe/gOEYcNbgRB7 - CmwvBizC8iiqy4giMQcWPwiiPorQKQi0DHnm0yhiISDIC5+il+EJhKZkvAy9cJAOy803 - +AwhCBXs5ujl33KHvx8dZSdc2g+tMOqWTuF2dBasyqyIFfm1ylmiUm+zOYUC5SwkOnQz - W+wZE/UXJ5TFBiZWtVR+iconDHwwkDvMVjS8qLAgGPIX5lvMvNBd5dJhsvBMU9t76huy - 0gWVcPbnt/fLJidBkfjHrIvrArvVhZZEbBs4XC1aCnWcq1DQGIuZRfZiVXKNW992zP7B - QGwAlQ+UwwtGL48UoCRNEAecQUWAC1q19jDYssYwThIhp+chZ1NbwthEIHIoXWFkYCHK - gB+mkfxbgxqRzWrQC8TnDQUNBcONPmORoYD4U4jBbLPmM5GVTVNXS7+VpNXzyttwYcfO - O/Y+viWn9gWu6/w+6S3p059J//P5IVx6sQdXf3/+Wzz5Ii6VTkuffbLuFwAf4HcMIXKa - exAsH/8+EUdxfkTNsoKaFTo5pKxR0EEdOx0rQeXlF9/OHWYqHImH5xv8hmOvbw9uPsJ8 - 02Fq2Pn9rcw3cl8RmPNk7lGUgnZGJhax1exUboH71uQVyWvxBiKmi9McCxwrHStdLzo4 - lIJ1rEvr8AkuB4sR59HpUkzKQhPn9Szzpah9dwrF1kUp2pBujac4JbXGnwD34oD+64Fz - qLwsVlY+YDCW5BhtJRhSY0mJASLUKMPuYh3qgCGoMmrDSGEWAFxWo1eGsWiBCPDV62V8 - AdoiYzlO8IE/ReAFP+R9eUaLWeB1mIcKn8U3dt1rR9YUTO5cdbAmyB5gKpbh8DdfLK9+ - cdOM4llORns57SA2Ll40rrBuwaot945bd6jtlPTN08+vqGkZX5Q7dX63jEsu8I+T245y - 0bGIZ4y6LqslbWbWsrRlWXxnEI8TM5T2DLOG+S7XXKgBo8QfMRsK9XdqNLlJhamcUJir - sXeGKg1RPDaiUxZnLyKeNO8aJkTya/J+gMrAxQTjASgXY1/qB/QUH4qNDElRzjBHECm4 - oDuQEuQRE0YsIw4DOFx+Txg5A/YwZrEAcOVAlOxLAsyCEA0xo76McuOaNYAZbmRJYb4V - eC+PCpA/hRcKk3F+nixOCRgLKIxg+QGCydhiRn5sPf+KOlx9YPPzLz5pDJhcQWvLqKVd - Lf1VQa4vciu2fPLnmszqJXdKf/02hG0n7ylf0nXHQ20YP84Qb/H9C1rvqFjxxOKTrx9c - Oznf7dnX/rYkAaygHcCfxeZwj0BOg6ZHUhREKWowIa8aeV4gPOYEEfwFgpIsU3FfMWqB - ZaLY9iLu1IjPK6O4fj+nq9HKCH59sSwGXFUOSZmhREYNgCvZkJ3BrtIf1+UOwwYFNvgK - cb4h3+I3kGelQvxO7F5yf9f774OrYVPsdonDN/Uymy/f/Kj0FKUNo4r4p6Az2pEXHYpk - 1Bo3ekiJuto01TTHxI4Q1RoBqZU6rXaZ0WQyanVeo0lAJpvSVgiEpUScmju1WrdxhI5l - C70n3BqDUOxchIq9KTW+xIx/PXAMtMxAeQxm+9zFKzNNxQBoBpJRYuph7u2ghsJ2D1aQ - IJMMTh+MPF7OBTKhsEOEPWwY8UkQiY6EbFDVoy+j003nutF01TyHTCAQDAhJfh5rMRNf - SmooZlwVuf6J7QfaG9flPLKQXIg9fk1e1qR5x7HxkjTQI/1NjxduL01+a2XnM7URBcO8 - IC0NmnzS67+Qfn6ceisJmhD/hPVzj4EHL4Sei5Tc7sQ2MSCGHPWO9WgD3qgQakSlL+Qr - 1GrNzAmhMIkLFYKspJE1ycWGRTYlKVOm5trSasIyMLGSleMm37Eixw5qYlAeBgAiClBC - IQeCLq/Oingu6NUlh3HQkhpGLhPkqExglvHofWEcsIbCyG2EiMqErCtwQgCoBKzBjaCT - rRZ/MATMT/4Bhz8FGfSyfk6IhcUM6rnmcJ/eP2rttj7lyJumzO/HaumPb0qfjlqFx6+5 - b/XO1p7H7+Me+27tDcOmSb+XLt+YFf7y3OvS+zgXXEiql/Gs7z/72V23nti+YyP1TWLw - r1F+b4d1qC5SxKkcpFg1Ql2iGau5gUxhZ5ADgnKlpl9zXMMQBdZoRyAdq1ATjYjQIq1Y - rHhea6jRyzCBGj1PGRxYHjge2AaD4mzEFp6AxIIaNJqKhvsK2Zyq8/VTs9zZJyovbNp2 - +QLX/uhoqf/Ioe0zP8Xbcef/7H0R3MXA5x+CbnsEvH828DD9IlIzBU9VTNM1mGbhFsUC - 3TzT7QHFGP1PHG3+pYHbQitzV+ZtdGzwbghtzN6Y2+XQ1Ih5YkBLAnmqQoMhkytM5myF - mRpSDMbe+gPa4rRFOWJxEuRfNBfnFNTkJ9hfXgb+oe8GEiI7OMeF6dkur9HKaKxZ5jBS - Z2jDWGkUgc3dELEeEsaWbFsYadIhElxcGDNeiIY0nazlEnM8qN7oPBp/kEehYGEBiEBC - wYGAgCj4U1Khbjh5Zn373Xe1ds7e+Gz3ujVPb31EejH92gun3/lDZXBSQ/7N0oX3pF+v - XMFE1k2ftH79tJalsdIN6++5f8tdi58mT2RMan/iy48fWF+Xk5VWOOuJw9J3X3x050Fw - yxM0LX6WW8KdB49bMtoXKU3ituFOjvGA9N6FN3CbTFydyKx3GwwWfoSbUY+wKJJJcrKD - ySWl+lyD06vIdTg83id982fbMzImXpwwMFH/zQSQD5AM4ADYrEBGn9ixjEAuW8AU1AaS - giqrIg9pzPo8bDTo9IILShxi8jAmLKO0q/OQzgiR6OTzYOmAiC4QICZUUBKxvGLAciFi - mz8by9JhpMvE8HxgMnk3o4d1ws8m4wLDUd/xvo+lr//y1ae3XZN81Plgj/RhHL1w/vmX - cU2YOy+dPbR5p/SudFySpJ891/DAhUcP73gbP4+rTv1W1iHPgqd/Juw1NeDvnhPxbDB0 - GkmeqErWEZRsE8Vck9OpCWgdDucZX9umBAYxGQNUHiuPyZohiK2GgCXIC5zACoxABI5X - 6kUYrRUihVGVhwUz8Le8NUsHHmkM0JHQvYOe+H0GxueFXZpZIGmYnGoZ1Tq21Kn7+C/S - 4ydJHc7ZtbV+h7Q+1tNtCS1quKeuBhtw9qUuzvThUem9PxyW+uQx9IBsD8AYVPC1YGIk - VUhmWRWTjMFxLCYrVaKaqNUE8fNIqcKpZcQAcmi0Uaza79t6ZUBldEQXz4Fc01mlO6Qy - OrEwPKq1DYMB97A5l7cwGZdPMysvHSUe7nC/VNEtaXvg1bKOoXR0Q0GBSiJ2SoVikAp+ - AXaq5DcrVVE8Fd786SCU8pvp+vn/vdDfw1y6/BZ5L5ZzQn5RT2wW1WPbEOJt8A4T6IuG - SjyOgcVawVixg/kQcybsYsyqJPVUXM98gD9hPlB9olaySlZTRdYT9jqyjZA0ZVhTrCzW - 1JCppI0IgVkaJWGMDCYqtZHhRQts/1mWi+IdEY3Sw6j4mBqTmMZjhJqXTMhhblss2wRl - E2LnHBdLSuBvPxeDUSTsAxsgB/vKcZOX79Ooo7i7n2BCh9zdRwizgZuQvSLGrjq2gUuk - ucNQ49IleGnjEpNPgX2wNS4oKsR+2PZYLQb/NuzGO/HT2HmYlRqPS9O4V7nDl4Ls2e9H - MzOzTt1+KY39MKvos4LLjwL2DJoUP81dADnXyd+zOiKZG+AD1Qn8OjkpvqnkR4uWETom - aYSgcBGXS2XMZZzJ9lyVw5380T+J9pBgy6ydh5zUChm0QfKoDZKHnaI9j9ogedQGyaM2 - SB7YIEl5YINAJMsyjegP9gE/NEHoEoeMhXpEJdps9DHsjkMP7jombZX2Ht370Kvw+Sjp - j9Jf/nhO+vzv2KLlzn//unRKOnA2jj7/CI/F6R9g/fdP4eVfw6ecMumE9O5FaR93E+g4 - ynffAk8ogb7mSOE89TzjcvUKI1trrjfPNa8ws4KYbNDrlViro1KhFAlvVLMKszmXdVp1 - ChAIi/VfCETMAFOZkAc9wAJiIa91Jnl/yoPk+kGdQ+KDzWwP2Xrsz2d+JeWdYNrvqLhN - asX3rt/FHf7s5PPx2Bb24AiPxCy9n/Jvu7QQTNMTYKcWRlyMC+lZl2BUANdxwLOsV0AO - UfFn3y35snwAl5VdLLtie8r6BgTSDztJXztuP3NGaudObPnurS3Qb460EPfI/Y6MeJGA - CePiWD0a7Jz3ElyPGLnvspWDfcOW9R9d09HB/hUkHky0QhhUzunTuF1qjyO+YMu3O2Ue - mwz7Uvp1SQffDcvQZ5Hi9GFYqQd5c4Xya/XzFPP1QoloVCuYpDwhVeHWq92lGSQ7rfRA - KSnNSw8Y9QInukIpNlcUd0T8NrdHCLmzVcRdqCoTyspcZiEtfXeqc2RSmmusLlTsuGbk - K3gbfFQ7iDtRQl3AwkM11bnYsSE2ha2ssYROVCNor+yB7AFqtxhsiT1buGi4JQVhRwAX - 6XzInpzkQ1av2Yd9KWg48SGn2+bDFh9ElGsH92mDhkoq2CjDi67BWiybc5arbL2RYLPA - 2m2A1T0PXqGFFSoUDNGELu/DTVi7dOLNDZ2+uXkLZ+TW4f6RFvXdK+4r9Sl3c39/+nDb - MltAnWxIzww2plsVw99ZufXwy9s63p2WOWbnAxYXr9W4cubgW8RMe9b0uvHpdW/sqK3t - im1zpTDMOjVf4Y/Uzn9x49ZnTPgclfva+FnWCXPigq/DAayOLN8mPuzc5WE4LdFxZovW - qLOYI+qIWUxz4nGql5gT+A3mRNJH4seKM56P/BdsF/yqE4YTRjJd5Hypuu1Wd2oJLwhW - n9slKN1WVUDY5trlOuD60MUGrLqAi3Mo1YIBbGt3iHOGUrOFkMMRDH3g29k4qNDPyYvj - BzHZroZ9IkxU49BOAZhMti1l3VKN/CzHwOdUzLG8J2jQG/UmvVnP8upASlJqEKwfdxAn - uxU2IYhUFm0QtqV+pw+qOIhEuzKIwDIPwtGFK3Mn77PTM9LX4CWNaEkjbLRtdIflS1iY - w/O1mG5R5c0EysfyFpwHQek/U1xk1F/+irt/233XDzPvE67Nnbx81OST0h+w/bfYowqP - 3btyN4f9bM2CG667ZexTTx9vLKopfSB7kksPuprHBFdIwWXVd+3vwJ/SdZDAF3aES0Ef - Uf9VUcQlnGdBQHlGScUc2qcJDJXybt+MikFJPBYrOzYoiuVl4GRKSCE1GNcegB+bfukM - d5jaPKA/YNKprlOhXZFZDQSPELGDwMBs/FRuDrecv0PYwB1k3mTOMkqO40VRUDBkLXmI - PEMYUmJUKFgOPqTyC42CAPfgkyrHK0SObstAhzK8UuCVvFMD1nAaUjnUmj7fjIPYmpA+ - qozKHBP1X9rBiQLrXDmVOgxhw4TsDHGV/jV2Q7Y9o5FbpT+iF8vEMjB96TQsBYWC8xUA - lWDwt+/F73wpzcb7vpT6tu3lDl/eg09Ii2IziKtDulUe3yYY5DUydmkRcKHBKAA0wAyU - F8v9ADJYcxNLbnkCrE39/dS5J/cB+PMBtgYF0bpIqSAKWl5nE21amy4khoB1ax1TVHNU - an9A6XT7HUrC2gI+t82t4QWwZ10BxqQMwzsNaeYoxn3ONDckESXC2YG0IHKEwlGs2f+P - qYud018cuHhFkcL6X142YQCMxivOJbrGm/ItsuEL7HhFc8BST90fsMwbCqgtSHNr+yIF - DUvaJ2amlj3V8tHE9EMLJsx/+IAzbfHsXf1sTte1qdeUp1ZPqXvk+s2x4eTCgkmbd8Ye - IIcW5o177N3YSaoLSsFX5GMnwi6Qnm25P5LfJXbqH7Y+y+4Wd+qfs0bFk+KH7Hnt783q - ESLvtgtqt1HlEBwOCwnpnEmKkMXhBHtJsd+3dFCaE7v9IV0ri20msrFBlUkBkmcgQSzY - IMdpIKc0q4MI6yESrXwQM1qIZNmkEd0JpBoLB8dKrSIweMELgGAzkC+Qz9cNG//ys52d - T8MBmcvS3z+TLmPj7/hWrNvZedNDl/v2nGPOSn+SLkox6QWccRmUcoQDOWuTbmADoPu0 - 4DVsjWQ+J+6ykbDodRm0vNsi6Hit26VK0ZKQ3ZmqzNZn+9JSdA5/6gbf4cTw6B4uoa5k - BQVeQdBW8hhd1iTEOYNsECXBwDgrRNihDSLGJo9JHhb1caSCaUdnT+At1KGKB+cZDi1Q - PQNLgsFP3tgVqH75UFUAYim7pyhy409ekg60bl8+eVhp//L3f9k+fd+hWdtXTt3J7Ns8 - JlwGpnxMeqrz5sLkMbHPqLyDPJAHgZcN6NpIMMQENcOZGpbVinqiVRgU6pDIAdcalKLT - hLP1aQbkMJqiuAoYdPWgbonBEMFzXT6h/FjsGLXcE449mSspN1ptFmphUQbctMfyzALO - 7tYn6Tc+CCx3sGgHYV5lSM/SWBfVa2CvMy+x4+AEUw7Ojvy0WNHFdRofNndZutL5cGog - VOSr9tWk1oSmpE4NzU6dE1yuXq5Zrm3zt6a2BlqDO5N3Z5oYUOVcFpttQk5Lks1lt2SZ - s8M61TwxGCgKkECKRslmmOxvuNwmgXVnb89Q5QgKrZ4IKMeX4/TYrfaQbWQ4KITCzlyt - J6QfiULZjmG5fUPrD4hiCV2BYiV6yNHhluTQDYHssACvV0I0l8izPB5nkaAFXOw+rceH - FEHBh5lM2G9w6ZBzG6EuyWz3Ya8uxYd8KVqNGFL6cDCgUOIs1of4NIiSDS4fdlghkpch - 2XiVI5lFrjA+GLJDzrBQMIcuPbIXwGYVEp4gmX08WPYHAOMEQ/grMVC5e1bXNaHbfrpp - VOsnB/+6YDTp5oIjH549ryo88fajFfM+/tVXJwR8AE+aNmzq1BurUmHlTkkfs6brlc3T - 5l6TVzMxUp3uMLlzMqse+umpj58g3wEv2eJfEQU3DbTD5Bc12cojWvDZl0cCrLXExvBa - pcEJag+8nmnIorXoGA9DmMtWsHwv++asGlzdG0uO0X2wPqF7c6iyi5UN6GPnZCVMHZxU - Dq7siYKFsJXM3/3Snj1BS64m2ewZHVo97YEHuGnS6S2xqmKTCpPNCnHNHHJ8C+htgtrj - XzC/Anm2AYU3RUZEzSfNRGESzQ6Twxzmb2c+hEULcVol4jVKDnSXXbDbVVYYSZpa5XTi - NErsL6+oZtlVQUUcpj/hqygvowxBFTJuxAlCwQY3wH53uLwvgFkxBHCxc9jdr1QG+ruJ - v2DOlvN1WdTsjZVMLmjaPe1Ror303mPXpF//8ORN5CM4PEiQRRrDXACaqcb9eeTWDstG - +y47Q9fjYmOtsd44R7iduV2419yFtnFdlm3WbbbdaLdVX4vGWWpsb1rYSu4NjmzgdqKd - eBe328alhjm7xWaF/YJFrdK5RS1V0NYkmBkO4R6bxd6j/qkV9PQHvjlUwh3gkzlnj5WU - wN8hz4o9MVyQgDxHjh1W6jIq82CRRowWC7JaFxptNjuH8UI4NWkHl69+1TE5ESHFjSAZ - S6iTEufzDAFHOGVG2UVfNBy+3uB8zDC+E8G7Z1Q80v5IMC05J12fl6PnRmql1rewB7M5 - c6QHpD+9IM3u58VnNLzPLj6Uyk683MXcRXWZ/Osa89KzN+vKvkYGcC7C78TEg+uHUkCT - D4AVi8CHMNiepnyalAZHSvG3LZcHVA8M3aGP0V8KZ0QVpASmowR1s7ehCIRjg2kupAUQ - KiBMgDCO5sFOmgbhWW4K6mG/QD18N9rGl6BJtAx9tEPIgXaTIdTiE2gtC2fXIN0E7dZC - XSmENtKNNkE9fa8Nyu2Qp5/8CuCajT6A84brSR6c0zMz3cxFdidXxHVxn/JafrtgF24V - tgtviZ2KJsVmhaT0Kh9RmVXrVW+pn1df1kzR/Al6oWNPgTOEDFoAe0iC9HA1IiRcUKoR - 3UzSFsZBHHi4hyonjK2rvC6jtuWWtpbWeTOboQWBAL94C5yp/Fe/FKhkwJYzgdUMjCFL - HT0RmwT7phC4+9PghF4eykcjUQRVwvnN8XCy8Vo4QToZzoTegKagqageTUdwkJV+nx0D - oRxCIYSMjFF2wGsnuh/CExAYNA/fg5ZD2AThYQjsUO45KB3E9/SxYuRlvBw54UuVivVc - b3Z47EqV55dRzPc/5vnY/sUh7AAf3W+wo0+DFKOU+An8OJqFPPhZsHpWAHVhvH1/2i2e - Jrj1HFoMoR0CI8cYP9eXnOd5FWeiAHwv9OAgSmbxS57f5WZ5zudGCe7zHA1FWUheS4ZS - ROc54n7M8zP3HM+rEPYkbnWnQYuXPM+5b/FsSY7i7X2eB+nGsM/zQCJZ5oZHX/IsTOv0 - zMqV74/vjJI9fZ4SuD8lovIUFfs8he5znpxQVMRQznKP96Tnvu1JhQehmRc6DUQMHpd7 - i2cE3Ep2V4VGQDiEu/EOlI539AXGel6GLAx3/5i04s4o/sn+2nBuIIpXRIpqw51ptaFA - 2nhPIK06FIL8lJPCWuFGYZSQJ2TAAU9Y3IQkwSwaRb2oFdWiUoTtfxQ/31fu4Q/hPagc - YNmzX+RFcH29AJXsIbxXrtx7QGRFIiLRHI1/3k/5DrbFe/qB5TCCzEu8nOOjeC98U6dV - eyMeEAWMwPkAsR64EFwuwGrAtASLBI2Fk3T3RXm0ztpWbi83jjSUVFf+b1GTfOdKLC+n - /zqyY3dvJ5zl6u12N8CxOcjE3Q1XmoKa/D9+rcugQUtFRgaoyf1ti+fPlo8B+qtamuA0 - YO89bXAss32G17tv/uLBM47Bphkz59JzaM0tvYv9LZW98/2V3n1t8nO0+ge3Z9Pbbf7K - fWh21fX1+2ZHWir72iJtVXDMr2H/jIqljVe9a9PQu5ZW/It3VdDOltJ3zZCf+6d3NdLb - M+i7Gum7Gum7ZkRmyO+iEFTNq6u4rRW4E44KwlG9cF3vmOum1cOJ2IbKKN5Jzw8uQ/8P - zOtw7wplbmRzdHJlYW0KZW5kb2JqCjE2IDAgb2JqCjg2NTQKZW5kb2JqCjE3IDAgb2Jq - Cjw8IC9UeXBlIC9Gb250RGVzY3JpcHRvciAvQXNjZW50IDc3MCAvQ2FwSGVpZ2h0IDcy - NyAvRGVzY2VudCAtMjMwIC9GbGFncyAzMgovRm9udEJCb3ggWy05NTEgLTQ4MSAxNDQ1 - IDExMjJdIC9Gb250TmFtZSAvRE1KU0RRK0hlbHZldGljYSAvSXRhbGljQW5nbGUgMAov - U3RlbVYgOTggL01heFdpZHRoIDE1MDAgL1N0ZW1IIDg1IC9YSGVpZ2h0IDUzMSAvRm9u - dEZpbGUyIDE1IDAgUiA+PgplbmRvYmoKMTggMCBvYmoKWyAyNzggMCAwIDAgMCAwIDAg - MCAwIDAgMCAwIDI3OCAwIDAgMCA1NTYgNTU2IDU1NiA1NTYgNTU2IDU1NiA1NTYgNTU2 - IDU1NgowIDAgMCAwIDAgMCAwIDAgMCAwIDcyMiA3MjIgNjY3IDYxMSAwIDAgMCAwIDAg - MCA4MzMgMCA3NzggNjY3IDAgMCAwIDAgMCAwCjAgMCAwIDAgMjc4IDAgMjc4IDAgMCAw - IDU1NiAwIDAgMCA1NTYgMCAwIDAgMjIyIDAgNTAwIDIyMiA4MzMgMCA1NTYgNTU2IDAK - MzMzIDUwMCAyNzggNTU2IDAgMCAwIDUwMCBdCmVuZG9iago5IDAgb2JqCjw8IC9UeXBl - IC9Gb250IC9TdWJ0eXBlIC9UcnVlVHlwZSAvQmFzZUZvbnQgL0RNSlNEUStIZWx2ZXRp - Y2EgL0ZvbnREZXNjcmlwdG9yCjE3IDAgUiAvV2lkdGhzIDE4IDAgUiAvRmlyc3RDaGFy - IDMyIC9MYXN0Q2hhciAxMjEgL0VuY29kaW5nIC9NYWNSb21hbkVuY29kaW5nCj4+CmVu - ZG9iagoxIDAgb2JqCjw8IC9UaXRsZSAoVW50aXRsZWQpIC9BdXRob3IgKEpvaG4gRGVu - bmlzKSAvQ3JlYXRvciAoT21uaUdyYWZmbGUgUHJvZmVzc2lvbmFsKQovUHJvZHVjZXIg - KE1hYyBPUyBYIDEwLjUuOCBRdWFydHogUERGQ29udGV4dCkgL0NyZWF0aW9uRGF0ZSAo - RDoyMDA5MTIxNjE2MjEyNlowMCcwMCcpCi9Nb2REYXRlIChEOjIwMDkxMjE2MTYyMTI2 - WjAwJzAwJykgPj4KZW5kb2JqCnhyZWYKMCAxOQowMDAwMDAwMDAwIDY1NTM1IGYgCjAw - MDAwMTI4NTcgMDAwMDAgbiAKMDAwMDAwMTI1NCAwMDAwMCBuIAowMDAwMDAzMjgyIDAw - MDAwIG4gCjAwMDAwMDAwMjIgMDAwMDAgbiAKMDAwMDAwMTIzNCAwMDAwMCBuIAowMDAw - MDAxMzU4IDAwMDAwIG4gCjAwMDAwMDMyNDYgMDAwMDAgbiAKMDAwMDAwMjI5NSAwMDAw - MCBuIAowMDAwMDEyNjgzIDAwMDAwIG4gCjAwMDAwMDE0NjcgMDAwMDAgbiAKMDAwMDAw - MjI3NSAwMDAwMCBuIAowMDAwMDAyMzMxIDAwMDAwIG4gCjAwMDAwMDMyMjYgMDAwMDAg - biAKMDAwMDAwMzM2NSAwMDAwMCBuIAowMDAwMDAzNDE1IDAwMDAwIG4gCjAwMDAwMTIx - NjAgMDAwMDAgbiAKMDAwMDAxMjE4MSAwMDAwMCBuIAowMDAwMDEyNDE3IDAwMDAwIG4g - CnRyYWlsZXIKPDwgL1NpemUgMTkgL1Jvb3QgMTQgMCBSIC9JbmZvIDEgMCBSIC9JRCBb - IDxhNDQ0ZDMyNTVlZWYyYmIzNzc2ZGFiN2E0NmZiZjM4ZD4KPGE0NDRkMzI1NWVlZjJi - YjM3NzZkYWI3YTQ2ZmJmMzhkPiBdID4+CnN0YXJ0eHJlZgoxMzA3NQolJUVPRgoxIDAg - b2JqCjw8L0F1dGhvciAoSm9obiBEZW5uaXMpL0NyZWF0aW9uRGF0ZSAoRDoyMDA5MTIx - NjE1MzUwMFopL0NyZWF0b3IgKE9tbmlHcmFmZmxlIFByb2Zlc3Npb25hbCA1LjEuMSkv - TW9kRGF0ZSAoRDoyMDA5MTIxNjE2MjEwMFopL1Byb2R1Y2VyIChNYWMgT1MgWCAxMC41 - LjggUXVhcnR6IFBERkNvbnRleHQpL1RpdGxlIChub3JlYXJyLmdyYWZmbGUpPj4KZW5k - b2JqCnhyZWYKMSAxCjAwMDAwMTM2MTMgMDAwMDAgbiAKdHJhaWxlcgo8PC9JRCBbPGE0 - NDRkMzI1NWVlZjJiYjM3NzZkYWI3YTQ2ZmJmMzhkPiA8YTQ0NGQzMjU1ZWVmMmJiMzc3 - NmRhYjdhNDZmYmYzOGQ+XSAvSW5mbyAxIDAgUiAvUHJldiAxMzA3NSAvUm9vdCAxNCAw - IFIgL1NpemUgMTk+PgpzdGFydHhyZWYKMTM4MjUKJSVFT0YK - - QuickLookThumbnail - - TU0AKgAAC6yAP+BACCQWDQeEPx6O95gAEgB7PIChILAwCPh6PgEAwEQiPR+QSGRACFuq - HA59vKJRSLSOXS+YQV+O91PMGhICu53PsHBYIzN6AgIy2Y0WjUeEQJ/0gANxfKxiNd0t - 16hcymooABnrZoPsQkYhC2O0yyQd1KA9JgDDwVNxqAAwVgGt9juQODkWhECWW+QR6NhW - KxnBgUhV3PqvjkJtBeMkKDwkCQGX3KX2lUxzNxxBYMPZYLJwDYnES5sJhvASE4eh7K2V - 3sNfOQcjYC5/Q6MPAR3OZ+BINUTW0d6OZxPsLBZ5tJjOECjEYg9erJwkMtD0I8HsUbL9 - nud3vd/weGydvxeXzef0emYtJpsulpGYOFwPsPiACzByuN/BsOgKYACAAmBaFgrpGahj - GMABhmGmBxHmeYPAaBqYhUFQWCaJqYD4ahqHCop5p0m4JJGepum6BYRhGmATIIPoWBY9 - SQvYYgVhYHiYFyXAQiOJBvpgX5fhWIIgmqmBqmoQ8BjxAxeF4FhSlKmBeIEIgAwAl5qB - yHIWDMMyYBtDhlxemAEmiaJ7heF6RgkWZZndDCYCHDknRhGKPxnGsbpfHMdx7H8gyHIq - XyPJIWSWkRqSbJ8opfKZ/yrK6XSzLcuy/MMxpfMszzTNc2zfDKXzlRMxnwd53n4Ap7nc - ewNA8CyjoUegAAY4CQowfE8RtHEdR5HyXyBIUiSNJElSZJ0oSlKkrJhScuS8l8wGpMU6 - pdTU0TUkU2TdOFQznMZ3mcTBRGUBwNg0dZrHMIAphWbZonSBwLgceR9gABwAAIfx9AAF - Qcg8d5vGyZ50gwFwDHKdB9H6eB1nUBgTBmCR9gFXM9JdPlez/YNBJdQli0RRVkUbZVIw - NLVnUtaVMWrM1r07bdQJdUU6AAhRuG4cgFAcBh2nEd4MBQDh/n0fYAn0d5unUfgRhMCR - mmIdgkCAEJznOchwneAwAGwbQJhkCx+gWBp4HKfoPg8B2K13PtfJdYFA2HQtDpDRNj0Y - l1HUhZmT0raFL2okdrU5bNPW5mVvcBOyDbVPdeT9X9AWFQdiUNY1F2TR9lyxvlnpdaNp - zJlvBpDbVPzjxHFI89hnoIaCYRMc8UgymBvm8c4QhF2aYhDAYfwMZxnAAcpypgbxxnGE - QOg6mIJAkFgdh2mBWw4hqYvycb+eWkTMm4DQSBImDrgAJ+V9SgryIQcH1Hj9gW/cAX4J - GbH5gx+oIfukB2/19h4hB/z8D/PmgEQdqo5xlwHCdAmAZ3X0EHH9A8Y8ER4QTBvBUCkF - yQPHHG8AZyEgGgohABqERBR9wlgiMd/Q7XvgkBNC0A8LyRjkhkNeGkKwQw3JGOmHQ9oe - Q3BDAsggmYhJaByC6I0QDswNJDDIcgzInAfihEYFwA4qFJIEOKLD8xsQlH3CAFEPoqAD - QePMbMZRwxnA5GmLytAGDejdFoBMcYvRpA4SCB4/hpR5jOOEI0fY4kPgFG4bwsZCBwkM - lZksSDKRKJGPqR0HB1yRgqDc46ryRSRHXFqTEK4WgmARJ+Rw+hqyjGDKUlQ8opA9lUBO - VhI5ToIGNGwG0swDS1iQJyXAM5dHOBjIo7kjCijmmFAcZYG5jAwmQASZSJB6j1jKNkb8 - 0QHzTkRBMeAHpsALm1FgcQ/ZvReh9AAgrNxuR5GlMgGAIp1S+ABHswIrA5TxkROw7EwC - jxcGfPkdE+5Zg2fqBiHMOkODUGtQWRAJaEPuBbCIDRBR1UPi1CmbC/1TTeH69AHcbJ6E - EE/R2KUugZ0biSQM7kOh0jJpRP+XkygCTchoNeO8Xn/AgkRNyLUXAT05nUCKMM5BYU/k - RRgFNQ5pgPiRHcQ1SZrCKqZOKkRwZ7GVJ0O6l8+Rn0Wh8DWrQGauEwHZV+LVDx1ShgAE - Csz9wITkG1WuosXquO6TtSYIVc4PDIrtU+X9JDsj8r4Gmv1fB+BJsEIKwiLwWChsRC8A - 5Rq7DIDHY8MVkY6D4soPeywaLMFKj3FqZo9bGh/tBRo8UGgp2lFfaeYwG68UjKWdx9Q4 - EoClnQEu2ke4OBDtwA63RRrQB/lqAYNtwY/huuJAUSlx46EIF9cupIhoiCDugeiYQ5qG - WrgZXoowsBVirAOqZBo8xwgeBUZMl45Bpj1A4BA1hLxzD+G0BoD4EyYT7YIBgC5Ixwjm - VMBoIJMB1xYArNi/zxwZA6B0TBFQAEtuJutVEkQtxNibCSghKQNRwhCDUL8mAxg7hYB4 - OosZLhlBkGADQHY4CYR5AJEYfhIxkjNA2DoGY5CYAnSaNkIgRMEC7F2N0IoRSYCaQ4GV - 8pLh8DqHMPIii9wCAIL2Qcfg+B+ZNyeZXBxIcIYSwoo3C2GMNEvw5h7EBMMR4lxPikaW - KwXYtJFi/GOM8a43xzjvHuP8g5DyKSId44hpjkHMOMbY6QBgNheAMfY+AKg8BkPUXQtx - wgDBACoFoDR6DsHoPwAYIQQAaHyOwaAyh6A4OrJYj2VyQZZwmgnLmF8M4bw7h/EJI8y4 - mxQS/FWLMXYwxljQl+NheY4x0S8EePMfZAJfkIamRMGK2HEL4YA3AHgRAE/oeQAwIgSA - Cw82g0xajJG2AkFgLR9PEAuAYfwHARgFGmOIf4HAPgoBkCR8WpbsFF1RltvGXdW5g1fm - Ml+s8z62zTrjNuus4a9zlsEl2w867GJdsjZR5cojvHwAQoeVSj6mI/vfVW+dWZfJdmHW - GZMSa0zRmrNhIc3a7zjr/OewtiZ22PnjZdeONEe45hXj+rsxaxJFwDWpLtb5r1zm/XhL - tfbAzpsXO+yc82r5uQgWYpBSARQ4TAcgExxAbByO++YugNgZH0BS/wMRpAUA3a0lxKh7 - W6AUSMcw7gBCgAkHkmA/4sABwETCYQEEKkwC0QQJvT+bb1ut4fxFT+o+J8Z408/i/HeR - 8lazyflfLHl6iTMaY0usgiAwPIcY8QOAtBIPYbg2AAAcBQBbnyMfNecAn570HovSAAHU - O8BBPuMJ2z2Ngcg+gHAJHkOj1IJgQAKHkNwdwCQUAe3n5cgnUTXiCEmN0KIVQWDZG2Pw - JASgTjfGkNkngJ94/PRj9P6v1/s/b+6DgdwzBkAIBwDjUjihsC+FkNQfQCx0C5GaB0Dq - DUBcAIG+FseCCcCQBa+gIM6iyOG4yUBIAkHc82HcBCBcBIHQGkGYAMBMBwA8vIdTAdAh - AlApAsBQAQVKH4AYAi9aPUIWHpBWAIGwGQGWAAA8BiAuHkGoGzBsBwBQobAW+i8NCDCJ - CK7vCHCNCTCUIO40HeGQGKG0HgHCGuAMBsCwCEBII6HfBmHaA4BwMkHoKcFwHEA0BaAU - HsAsBwBk/MO/CcGQHaAWHsHOA4CEOsAAHwGmFwF0HqBOBaAMH6Ag/KPAG4GcGoHYG8GY - HsBgCpCwAZDCGGFYGUAKDAC0BwJIG4FMFIGgBABaBEBSB3DWL4Ho2aGIG4A8BuBsvkmW - HmHwAAKEAKyiH4HuGeGQGkAqBOBMHMGyG0AgBGA+Hg9CBiB8BwL0JDCaG4HMHgHEGMGm - A8+6MkAIGcFYEiGUAMCODQCcBRFGGcGIGgHqBWBoAyAYAs3k92O8HfGQAKAmHsHIH2BA - 9WAIH4HUG4GEFsGsAeB4nSBI+cPAHEGwGmGaGAF8HuBMCWC1CwH4HMGcGkAABcBkA0AI - L+FkEMFEG+CcC6CWBOBLHKLIHfDyGoASBcAuHbA00EAgAAHiHUHAHMAaBIBAHyHsAYQG - AgHOGsGxJCBsHqGUGsAMBEBUB1C/BbAZCQIRCcGKG8AgAcm0AgAUAiA7A+IIsoHgHcHk - AIAs+QG4HQmUHY6zDVDZHOrsHaBoBgA4HwHkHcAAOOHkGwGuAAAoAeHQHgAbEEO/EIGw - AOXuAGAYqMAYAyVeVkMmoeAAA0YkZuHmHyHgHYA1K8L7ITGQAcA5AkJ2AsAkQmAAHuHG - GmG2AWBaBU9sHcAkVcHdMfH3BAJe8hCXNS8kICAAAA4BAAADAAAAAQBYAAABAQADAAAA - AQAsAAABAgADAAAAAwAADFoBAwADAAAAAQAFAAABBgADAAAAAQACAAABEQAEAAAAAQAA - AAgBEgADAAAAAQABAAABFQADAAAAAQADAAABFgADAAAAAQHwAAABFwAEAAAAAQAAC6MB - HAADAAAAAQABAAABPQADAAAAAQACAAABUwADAAAAAwAADGCHcwAHAAARHAAADGYAAAAA - AAgACAAIAAEAAQABAAARHGFwcGwCAAAAbW50clJHQiBYWVogB9kACQAeABYACgAXYWNz - cEFQUEwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPbWAAEAAAAA0y1hcHBsr4xF11Pr - 95yHzZAk7zKWsgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOclhZWgAAASwA - AAAUZ1hZWgAAAUAAAAAUYlhZWgAAAVQAAAAUd3RwdAAAAWgAAAAUY2hhZAAAAXwAAAAs - clRSQwAAAagAAAAOZ1RSQwAAAbgAAAAOYlRSQwAAAcgAAAAOdmNndAAAAdgAAAYSbmRp - bgAAB+wAAAY+ZGVzYwAADiwAAABkZHNjbQAADpAAAAI+bW1vZAAAENAAAAAoY3BydAAA - EPgAAAAkWFlaIAAAAAAAAGAAAAA1xAAABypYWVogAAAAAAAAb5EAALNjAAAY8lhZWiAA - AAAAAAAnRAAAFvMAALMJWFlaIAAAAAAAAPNSAAEAAAABFs9zZjMyAAAAAAABDEIAAAXe - ///zJgAAB5IAAP2R///7ov///aMAAAPcAADAbGN1cnYAAAAAAAAAAQHNAABjdXJ2AAAA - AAAAAAEBzQAAY3VydgAAAAAAAAABAc0AAHZjZ3QAAAAAAAAAAAADAQAAAgAAACkAkAEg - AboCdgNQBEUFYQaUB90JQQq6DDkNwQ9XEOgSdhP+FXkW5hhKGZsa5BwVHTYeWh95IJgh - uCLZI/8lIiZGJ2ookCm7KuUsEC1DLnIvpTDbMg8zSTSFNcQ3ADhFOYM6vzvrPRE+Oj9a - QHtBoELFQ+ZFCkYuR1FIcUmSSrZL2Ez7ThxPOlBZUX1SnFO7VNpV+lcdWERZb1qiW9Rd - BV40X2RgkWHBYvJkImVVZo9nxGj9ajtrf2y+bgVvT3Cdce5zQ3SVdfB3QHhieWx6cnt5 - fIN9hn6Lf4qAjIGHgn2DeIRohVWGQocpiA2I7InHiqGLdYxKjRqN546zj4CQVpEtkgSS - 25OylIiVXpYylweX3ZizmYmaYZs4nA+c553BnpyfeKBVoTSiFKL1o9WkuKWMplanI6fy - qMCpjapcqyur+KzHrZiuaK86sAqw2rGssn2zTrQgtPK1xbaUt2O4M7kDudC6kLtNvAi8 - xL2CvkC+/7++wHzBPcIAwsLDhcRMxRPF28alx3HIPckLydvKrcuAzFPNJc30zr7PiNBR - 0RnR4tKu03fUP9UK1dXWoNds2DnZB9nV2qTbdNxE3RXd6d6634/gY+E24g3i6uPC5JXl - ZeYx5vvnyeiU6WPqNusN6+js0e3G7sbv2PD/8kLzmfUK9qT4XPpG/GX+kv//AAAAQQDk - AXECFwLTA7cEsAXHBvcIPQmVCwYMdQ30D30RABKCE/4VbhbPGCoZeBq3G+YdBh4oH0gg - ZCGBIqEjxSTkJgYnKChGKW8qkyu4LOMuDi83MGQxjjK+M/A1JDZTN4o4vDnrOwg8HT01 - Pkg/WEBuQYNClkOqRL1Fz0bhR/JJBkoYSylMOk1JTldPalB3UYRSklOhVLFVxlbiWAJZ - I1pDW2FcgV2fXr5f32D/YiFjSmRvZZdmw2f0aSJqVWuNbMZuA29DcH5xxHL8dBV1HHYf - dyN4J3koeih7J3wmfSJ+Gn8TgAmA/IHugt6DzYS4hZ+GiodtiFOJNooXiveL04yrjYWO - W48zkAyQ5pHAkpqTcpRQlS2WDJbsl8yYrpmPmnWbW5xDnSueFp8Cn+6g2qHGoqKjbaQ9 - pRCl46a1p4yoYKk1qg2q5au+rJqtcq5LryWv/7DZsbCyirNjtDi1DLXgtrS3iLheuTO6 - BrrYu6y8f71TviW+97/KwJ3BbsJAwxTD58S5xY3GYcc1yArI38m1yo3LZMw/zSTOE88D - z/LQ3dHI0rTTnNSD1WrWUtc52B/ZB9nv2tjbwtyv3Zvei9994G/hZeJb41LkS+VG5kTn - Qeg/6UPqSutS7Fztau5674/wqPHA8trz9/Ua9jz3X/iD+av60vv3/R7+P/9B//8AAAAb - AF4AwwE2Aa4CPgLkA5oEYwU+BioHHAgZCRoKIgskDCwNKw4kDxIP+xDaEa4SdhM4E/kU - tRVyFjAW7xevGG4ZLhnwGrMbdhw7HQEdzB6WH2EgLyD7IcwinyNyJEUlHiXzJsEnhCg+ - KPoptCpuKysr6CymLWYuJi7nL6gwajExMfUyujOANEY1CzXUNps3YTgoOPA5uDp/O0Y8 - EDzaPaQ+bj83QAFAzEGXQmNDMEQARM5FnUZvR0RIFEjoSb1KlEtrTERNG030TslPlVBc - USNR7FK2U4JUUVUeVfBWwleWWG9ZRlohWv9b31zAXaFeg19pYExhNWIbYwFj6GTKZahm - hmdhaEBpH2n/auBrwWyhbYZua29PcDdxHnIGcu5z13TCdax2mHeEeHF5X3pJezF8AXzE - fYN+RX8Df8CAf4E4gfKCrINnhCKE3YWYhlOHD4fNiIyJSooLis6LkIxSjReN3I6hj1qQ - E5DLkYSSQZMAk8GUhJVJlhKW35eumH+ZWZozmxCb8JzUnbqeo5+PoH2hb6Jho06kMqUQ - peqmxaehqH6pX6o/qyCsA6zprc6us6+esIexcLJbs0W0MLUbtga28LfcuMW5rrqeu8C8 - 5L4BvxnAM8FTwnnDqMTixirHf8jsynDMB82/z6TRtNP31nnZWtye4JPlTuuk9Ur//wAA - bmRpbgAAAAAAAAY2AACZAgAAVsgAAFUEAACRsAAAJ5EAABVgAABQDQAAVDkAAgo9AAH6 - 4QABJmYAAwEAAAIAAAATACwARQBfAHgAkQCrAMUA3wD5ARQBMAFMAWoBhwGmAcYB5wIK - Ai4CVAJ8AqcC0wMDAzUDagOlA+QEJARnBKwE8wU7BYUFzwYcBmoGugcMB10HsAgFCFsI - sAkICWEJugoUCnEKzQsqC4cL5gxHDKYNCQ1sDdUORA61DyYPnBAUEIsRBBF/Ef0SehL6 - E3sT/xSFFQoVkRYbFqUXMhfCGFMY4hl2Ggsaohs6G9McbB0EHZweMR7HH2Af+iCXITQh - 1CJ0IxQjtiRaJP0lnSZCJucniyguKM8pdSoZKrsrXiwALKItRC3mLosvLC/OMHoxRDId - Mvkz1zSzNZg2ezdnOFE5Qzo9OzM8Nj0/Pko/YEB7QaJCzEQCRTlGfkfISRJKUUuQTNRO - GU9iUK1R/lNPVKFV91dKWKNZ/VtVXK1eBl9fYLdiD2NsZMNmN2fBaUJqyGxTbdpvaXD4 - coV0FXWldzx40XpqfAN9oH87gOGCiYQyheCHsYmPi3ONUo8zkRSS+5Talr2Ynpp4nFee - MKAIod+jtKWGp1apKasHrPKu47DYstK0xLbCuL+6uby4vrXAssKvxK3GqsipyqHMo86c - 0J7SndSK1ofYktqs3NLe7+EZ4zPlQudH6TDq/+y77lvv4fFL8qbz8PUp9kr3Yvhu+Wj6 - V/s2/BD83v2k/pf//wAAAAwAIwA8AFQAbgCHAKEAuwDWAPEBDAEpAUYBZAGCAaIBwwHl - AgkCLgJVAn8CqwLZAwkDPQN0A7AD8AQxBHQEugUCBUsFlQXgBi8GfgbPByMHdQfKCCII - eQjRCSsJhwnjCkIKoQsAC2ELwgwnDIoM8A1YDcMOOA6wDygPpBAjEKERIRGkEigSrhM2 - E8AUTBTaFWgV+RaMFyEXuRhTGOsZiRooGskbaxwOHLEdUh3xHpAfMR/UIHkhHyHHInAj - GiPFJHIlHiXIJnYnJCfSKH0pKinaKocrMyvgLIwtOS3lLpYvQS/wMK0xgTJeMz40HzUB - Nek20Te+OKs5oDqYO5A8kT2TPps/pUC1Qc5C5EQFRSRGTEd3SKZJ40seTGJNo07mUCtR - cVK9VAFVSlaRV9pZIlpuW7Rc/F5EX4xg02IZY2VkrGYSZ5lpFGqQbBFtjG8OcI5yDHOM - dQl2j3gUeZt7I3y0fkB/04FwgxCEsoZVh/eJoItPjPyOrZBgkhqT0ZWMl06ZDJrMnJOe - VqAcoeSjrKV0pzqpBarCrGOuAq+nsVKzArSvtmm4Jbnhu5+9ZL8kwOjCqsRuxivH78mo - y2PNHc7S0IvSQNP31abXV9kM2rncYN4F363hT+Lt5IrmIOey6Urq3+xw7fvviPEW8qX0 - MPW490f41vpk+/39jv//AAAAHQBEAGoAkQC4AN8BCAEyAV4BiwG8Ae4CJAJeAp4C4gMs - A38D3AQ+BKYFEgWBBfMGagbjB18H3ghgCOEJZwnuCnkLAwuPDCAMrw1DDeEOkg9FD/4Q - uBF0EjMS9BO3FH4VQxYNFtgXqBh6GUwaJRr/G9scuh2fHoIfaCBRIT4iLSMdJBElBSX4 - JvEn6SjfKd0q1yvULNEtzy7SL9Mw5DIEMyc0SDVrNos3rzjOOfI7DjwvPU0+aT+FQKJB - wkLdQ/5FGkY9R2BIhUm4Su1MKU1hTppP1VEQUlFTjFTJVglXRFiFWcZbCFxJXYxe0GAV - YVpioGPuZUJm0Whyag9rvG1mbx9w2XKUdFJ2FHfZeZx7XH0ift6AoYJkhCSF5ofHibCL - m417j1iRMJMGlM6WlZhWmgebup1nnw6gsqJSo++liKceqLSqWawVrd6vqLF0sz21ALbJ - uI+6ULwSvdi/lMFXwxjE2saeyGXKKMv0zbvPi9Fe0yLUlNYJ14nZE9qe3CTdo98b4Ivh - 8eNM5J3l4+cY6EDpYup764Pshe1y7l3vN/AP8Nfxn/JX8w7zu/RY9Pb1i/YX9qP3KPec - +A/4g/jx+VL5tPoW+nj6yvsT+1z7pfvu/Df8dvym/NX9Bf00/WT9k/3D/fP+Iv5P/nr+ - pf7Q/vv/J/9S/33/qP/U//8AAGRlc2MAAAAAAAAACkNvbG9yIExDRAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA - AAAAAAAAAAAAAAAAAAAAAABtbHVjAAAAAAAAABIAAAAMbmJOTwAAABIAAADocHRQVAAA - ABYAAAD6c3ZTRQAAABAAAAEQZmlGSQAAABAAAAEgZGFESwAAABwAAAEwemhDTgAAAAwA - AAFMZnJGUgAAABIAAAFYamFKUAAAAA4AAAFqZW5VUwAAABIAAAF4cGxQTAAAABIAAAGK - cHRCUgAAABgAAAGcZXNFUwAAABIAAAG0emhUVwAAAA4AAAHGcnVSVQAAACQAAAHUa29L - UgAAAAwAAAH4ZGVERQAAABAAAAIEbmxOTAAAABYAAAIUaXRJVAAAABQAAAIqAEYAYQBy - AGcAZQAtAEwAQwBEAEwAQwBEACAAYQAgAEMAbwByAGUAcwBGAOQAcgBnAC0ATABDAEQA - VgDkAHIAaQAtAEwAQwBEAEwAQwBEAC0AZgBhAHIAdgBlAHMAawDmAHIAbV9pgnIAIABM - AEMARADJAGMAcgBhAG4AIABMAEMARDCrMOkw/AAgAEwAQwBEAEMAbwBsAG8AcgAgAEwA - QwBEAEsAbwBsAG8AcgAgAEwAQwBEAEwAQwBEACAAQwBvAGwAbwByAGkAZABvAEwAQwBE - ACAAYwBvAGwAbwByX2mCcm2yZnaYb3k6VmgEJgQyBDUEQgQ9BD4EOQAgBBYEGgAtBDQE - OARBBD8EOwQ1BDnO7LfsACAATABDAEQARgBhAHIAYgAtAEwAQwBEAEsAbABlAHUAcgBl - AG4ALQBMAEMARABMAEMARAAgAGMAbwBsAG8AcgBpAABtbW9kAAAAAAAABhAAAJyBAAAA - AMG9/4AAAAAAAAAAAAAAAAAAAAAAdGV4dAAAAABDb3B5cmlnaHQgQXBwbGUsIEluYy4s - IDIwMDkA - - ReadOnly - NO - RowAlign - 1 - RowSpacing - 36 - SheetTitle - Canvas 1 - SmartAlignmentGuidesActive - YES - SmartDistanceGuidesActive - YES - UniqueID - 1 - UseEntirePage - - VPages - 1 - WindowInfo - - CurrentSheet - 0 - ExpandedCanvases - - FitInWindow - - Frame - {{89, 37}, {716, 841}} - ListView - - OutlineWidth - 142 - RightSidebar - - Sidebar - - SidebarWidth - 138 - VisibleRegion - {{1, 1}, {574, 732}} - Zoom - 1 - ZoomValues - - - Canvas 1 - 0.0 - 1 - - - - saveQuickLookFiles - YES - - diff --git a/src/externals/pio1/doc/images/dof.png b/src/externals/pio1/doc/images/dof.png deleted file mode 100644 index f946907053c..00000000000 Binary files a/src/externals/pio1/doc/images/dof.png and /dev/null differ diff --git a/src/externals/pio1/doc/testpio_example.txt b/src/externals/pio1/doc/testpio_example.txt deleted file mode 100644 index 21875023bad..00000000000 --- a/src/externals/pio1/doc/testpio_example.txt +++ /dev/null @@ -1,390 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ -/*! \page testpio_example testpio: a regression and benchmarking code - -The testpio directory, included with the release package, tests both the accuracy -and performance of reading and writing data -using the pio library. - -The testpio directory contains 3 perl scripts that you can use to build and run the testpio.F90 code. -
    -
  • testpio_build.pl - builds the pio, timing and testpio libraries and executables
  • -
  • testpio_bench.pl - setups, builds and runs a user specified set of test suites using testpio and generates log files with benchmarking information.
  • -
  • testpio_run.pl - setups, builds and runs a user specified set of test suites using testpio.
  • -
- -Additional C shell scripts wrappers are packaged with the testpio suite to allow for environment customization of the 3 perl scripts listed above. The following help information describes in more detail how the testpio code works. - - The tests are controlled via a namelist. Sample namelist files -are located in the testpio/namelists directory. It contains a -set of general namelists and specific namelists to setup a computational -decomposition and an IO decomposition. The computational decomposition -should be setup to duplicate a realistic model data decomposition. The IO decomposition -is generally not used, but in some cases, can be used and impacts IO performance. -The IO decomposition is an intermediate decomposition that provides -compatability between a relative arbitrary computational decomposition -and the MPI-IO, netcdf, pnetcdf, or other IO layers. Depending on the -IO methods used, only certain IO decompositions are valid. In general, -the IO decomposition is not used and is set internally. - -The namelist input file is called "testpio_in". The first namelist -block, io_nml, contains some general settings: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
namelist io_nml
casename string, user defined test case name
nx_global integer, global size of "x" dimension
ny_global integer, global size of "y" dimension
nz_global integer, glboal size of "z" dimension
ioFMT string, type and i/o method of data file - ("bin","pnc","snc"), binary, pnetcdf, or serial netcdf
rearr string, type of rearranging to be done - ("none","mct","box","boxauto")
nprocsIO integer, number of IO processors used only when rearr is - not "none", if rearr is "none", then the IO decomposition - will be the computational decomposition
base integer, base pe associated with nprocIO striding
stride integer, the stride of io pes across the global pe set. A stride=-1 - directs PIO to calculate the stride automatically.
num_aggregator integer, mpi-io number of aggregators, only used if no - pio rearranging is done
dir string, directory to write output data, this must exist - before the model starts up
num_iodofs tests either 1dof or 2dof init decomp interfaces (1,2)
maxiter integer, the number of trials for the test
DebugLevel integer, sets the debug level (0,1,2,3)
compdof_input string, setting of the compDOF ('namelist' or a filename)
compdof_output string, whether the compDOF is saved to disk - ('none' or a filename)
- -Notes: - - the "mct" rearr option is not currently available - - if rearr is set to "none", then the computational decomposition is also - going to be used as the IO decomposition. The computation decomposition - must therefore be suited to the underlying I/O methods. - - if rearr is set to "box", then pio is going to generate an internal - IO decomposition automatically and pio will rearrange to that decomp. - - num_aggregator is used with mpi-io and no pio rearranging. mpi-io is only - used with binary data. - - nprocsIO, base, and stride implementation has some special options - - if nprocsIO > 0 and stride > 0, then use input values - - if nprocsIO > 0 and stride <= 0, then stride=(npes-base)/nprocsIO - - if nprocsIO <= 0 and stride > 0, then nprocsIO=(npes-base)/stride - - if nprocsIO <= 0 and stride <= 0, then nprocsIO=npes, base=0, stride=1 - -Two other namelist blocks exist to described the computational -and IO decompositions, compdof_nml and iodof_nml. These namelist -blocks are identical in use. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
namelist compdof_nml - or - iodof_nml
nblksppe integer, sets the number of blocks desired per pe, - the default is one per pe for automatic decomposition. - increasing this increases the flexibility of decompositions.
grdorder string, sets the gridcell ordering within the block - ("xyz","xzy","yxz","yzx","zxy","zyx")
grddecomp string, sets up the block size with gdx, gdy, and gdz, see - below, ("x","y","z","xy","xye","xz","xze","yz","yze", - "xyz","xyze","setblk")
gdx integer, "x" size of block
gdy integer, "y" size of block
gdz integer, "z" size of block
blkorder string, sets the block ordering within the domain - ("xyz","xzy","yxz","yzx","zxy","zyx")
blkdecomp1 string, sets up the block / processor layout within the domain - with bdx, bdy, and bdz, see below. - ("x","y","z","xy","xye","xz","xze","yz","yze","xyz","xyze", - "setblk","cont1d","cont1dm")
blkdecomp2 string, provides an additional option to the block decomp - after blkdecomp1 is computes ("","ysym2","ysym4")
bdx integer, "x" numbers of contiguous blocks
bdy integer, "y" numbers of contiguous blocks
bdz integer, "z" numbers of contiguous blocks
- - -A description of the decomposition implementation and some examples -are provided below. - -Testpio writes out several files including summary information to -stdout, data files to the namelists directory, and a netcdf -file summarizing the decompositions. The key output information -is written to stdout and contains the timing information. In addition, -a netcdf file called gdecomp.nc is written that provides both the -block and task ids for each gridcell as computed by the decompositions. -Finally, foo.* files are written by testpio using the methods -specified. - -Currently, the timing information is limited to the high level -pio read/write calls which generally will also include copy and rearrange -overhead as well as actual I/O time. Addition timers will be added -in the future. - -The test script is called testpio_run.pl, it uses the hostname -function to determine the platform. New platforms can be added by -editing the files build_defaults.xml and Utils.pm. If more than one -configuration should be tested on a single platform you can provide -two hostnames in this file and specify the name to test in a --host -option to testpio_run.pl - -There are several testpio_in files for the pio test suite. The ones that -come with pio test specific things. In general, these tests include: -
    -
  • sn = serial netcdf and no rearrangement
  • -
  • sb = serial netcdf and box rearrangement
  • -
  • pn = parallel netcdf and no rearrangement
  • -
  • pb = parallel netcdf and box rearrangement
  • -
  • bn = binary I/O and no rearrangement
  • -
  • bb = binary I/O and box rearrangment and the test number (01, etc) is consistent across I/O methods with
  • -
  • 01 = all data on root pe, only root pe active in I/O
  • -
  • 02 = simple 2d xy decomp across all pes with all pes active in I/O
  • -
  • 03 = all data on root pe, all pes active in I/O
  • -
  • 04 = simple 2d xy decomp with yxz ordering and stride=4 pes active in I/O
  • -
  • 05 = 2d xy decomp with 4 blocks/pe, yxz ordering, xy block decomp, and stride=4 pes active in I/O
  • -
  • 06 = 3d xy decomp with 4 blocks/pe, yxz ordering, xy block decomp, and stride=4 pes active in I/O
  • -
  • 07 = 3d xyz decomp with 16 blocks/pe, yxz ordering, xyz block decomp with block yzx ordering and stride=4 pes active in I/O
  • -
  • 08 = 2d xy decomp with 4 blocks/pe and yxz grid ordering, yxz block ordering and cont1d block decomp the rd01 and wr01 tests are distinct and test writing, reading and use of DOF data via pio methods.
  • -
- -PIO can use several backend libraries including netcdf, pnetcdf and mpi-io. For each library used, a -compile time cpp flag is defined (eg _USEMPIIO). The test suite builds and tests the model for several -combinations of these cpp flags. -
    -
  • snet = serial netcdf only
  • -
  • pnet = parallel netcdf only
  • -
  • mpiio = mpiio only
  • -
  • all = everything on
  • -
  • ant = everything on but timing disabled
  • -
- -======================================================================== -\section Decomposition - -The decomposition implementation supports the decomposition of -a general 3 dimensional "nx * ny * nz" grid into multiple blocks -of gridcells which are then ordered and assigned to processors. -In general, blocks in the decomposition are rectangular, -"gdx * gdy * gdz" and the same size, although some blocks around -the edges of the domain may be smaller if the decomposition is uneven. -Both gridcells within the block and blocks within the domain can be -ordered in any of the possible dimension hierarchies, such as "xyz" -where the first dimension is the fastest. - -The gdx, gdy, and gdz inputs allow the user to specify the size in -any dimension and the grddecomp input specifies which dimensions are -to be further optimized. In general, automatic decomposition generation -of 3 dimensional grids can be done in any of possible combination of -dimensions, (x, y, z, xy, xz, yz, or xyz), with the other dimensions having a -fixed block size. The automatic generation of the decomposition is -based upon an internal algorithm that tries to determine the most -"square" blocks with an additional constraint on minimizing the maximum -number of gridcells across processors. If evenly divided grids are -desired, use of the "e" addition to grddecomp specifies that the grid -decomposition must be evenly divided. The setblk option uses the -prescibed gdx, gdy, and gdz inputs without further automation. - -The blkdecomp1 input works fundamentally the same way as the grddecomp -in mapping blocks to processors, but has a few additional options. -"cont1d" (contiguous 1d) basically unwraps the blocks in the order specified -by the blkorder input and then decomposes that "1d" list of blocks -onto processors by contiguously grouping blocks together and allocating -them to a processor. The number of contiguous blocks that are -allocated to a processor is the maximum of the values of bdx, bdy, and -bdz inputs. Contiguous blocks are allocated to each processor in turn -in a round robin fashion until all blocks are allocated. The -"cont1dm" does basically the same thing except the number of -contiguous blocks are set automatically such that each processor -recieves only 1 set of contiguous blocks. The ysym2 and ysym4 -blkdecomp2 options modify the original block layout such that -the tasks assigned to the blocks are 2-way or 4-way symetric -in the y axis. - -The decomposition tool is extremely flexible, but arbitrary -inputs will not always yield valid decompositions. If a valid -decomposition cannot be computed based on the global grid size, -number of pes, number of blocks desired, and decomposition options, -the model will stop. - -As indicated above, the IO decomposition must be suited to the -IO methods, so decompositions are even further limited by those -constraints. The testpio tool provides limited checking about -whether the IO decomposition is valid for the IO method used. -Since the IO output is written in "xyz" order, it's likely the -best IO performance will be achieved with both grdorder and blkorder -set to "xyz" for the IO decomposition. - -Also note that in all cases, regardless of the decomposition, -the global gridcell numbering and ordering in the output file -is assumed to be "xyz" and defined as a single block. The number -scheme in the examples below demonstrates how the namelist input -relates back to the grid numbering on the local computational -decomposition. - -Some decomposition examples: -
    -
  • "B" is the block number
  • -
  • "P" is the processor (1:npes) the block is associated with - numbers are the local gridcell numbering within the block if the - local dimensions are unrolled.
  • -
- -Standard xyz ordering, 2d decomp: -note: blkdecomp plays no role since there is 1 block per pe -
- nx_global  6
- ny_global  4
- nz_global  1           ______________________________
- npes       4          |B3  P3        |B4  P4         |
- nblksppe   1          |              |               |
- grdorder   "xyz"      |              |               |
- grddecomp  "xy"       |              |               |
- gdx        0          |              |               |
- gdy        0          |--------------+---------------|
- gdz        0          |B1  P1        |B2  P2         |
- blkorder   "xyz"      |  4    5   6  |  4   5   6    |
- blkdecomp1 "xy"       |              |               |
- blkdecomp2 ""         |              |               |
- bdx        0          |  1    2   3  |  1   2   3    |
- bdy        0          |______________|_______________|
- bdz        0
-
- -Same as above but yxz ordering, 2d decomp -note: blkdecomp plays no role since there is 1 block per pe -
- nx_global  6
- ny_global  4
- nz_global  1           _____________________________
- npes       4          |B2  P2        |B4  P4        |
- nblksppe   1          |              |              |
- grdorder   "yxz"      |              |              |
- grddecomp  "xy"       |              |              |
- gdx        0          |              |              |
- gdy        0          |--------------+--------------|
- gdz        0          |B1  P1        |B3  P3        |
- blkorder   "yxz"      |  2    4   6  |  2   4   6   |
- blkdecomp1 "xy"       |              |              |
- blkdecomp2 ""         |              |              |
- bdx        0          |  1    3   5  |  1   3   5   |
- bdy        0          |______________|______________|
- bdz        0
-
- -xyz grid ordering, 1d x decomp - note: blkdecomp plays no role since there is 1 block per pe - note: blkorder plays no role since it's a 1d decomp -
- nx_global  8
- ny_global  4
- nz_global  1           _____________________________________
- npes       4          |B1  P1  |B2  P2   |B3  P3  |B4  P4   |
- nblksppe   1          | 7   8  |  7   8  |        |         |
- grdorder   "xyz"      |        |         |        |         |
- grddecomp  "x"        |        |         |        |         |
- gdx        0          | 5   6  |  5   6  |        |         |
- gdy        0          |        |         |        |         |
- gdz        0          |        |         |        |         |
- blkorder   "yxz"      | 3   4  |  3   4  |        |         |
- blkdecomp1 "xy"       |        |         |        |         |
- blkdecomp2 ""         |        |         |        |         |
- bdx        0          | 1   2  |  1   2  |        |         |
- bdy        0          |________|_________|________|_________|
- bdz        0
-
- -yxz block ordering, 2d grid decomp, 2d block decomp, 4 block per pe -
- nx_global  8
- ny_global  4
- nz_global  1           _____________________________________
- npes       4          |B4  P2  |B8  P2   |B12  P4 |B16  P4  |
- nblksppe   4          |        |         |        |         |
- grdorder   "xyz"      |--------+---------+--------+---------|
- grddecomp  "xy"       |B3  P2  |B7  P2   |B11  P4 |B15  P4  |
- gdx        0          |        |         |        |         |
- gdy        0          |--------+---------+--------+---------|
- gdz        0          |B2  P1  |B6  P1   |B10  P3 |B14  P3  |
- blkorder   "yxz"      |        |         |        |         |
- blkdecomp1 "xy"       |--------+---------+--------+---------|
- blkdecomp2 ""         |B1  P1  |B5  P1   |B9   P3 |B13  P3  |
- bdx        0          | 1   2  | 1   2   |        |         |
- bdy        0          |________|_________|________|_________|
- bdz        0
-
- -*/ diff --git a/src/externals/pio1/doxygen.sty b/src/externals/pio1/doxygen.sty deleted file mode 100644 index 17ad12862d3..00000000000 --- a/src/externals/pio1/doxygen.sty +++ /dev/null @@ -1,350 +0,0 @@ -\NeedsTeXFormat{LaTeX2e} -\ProvidesPackage{doxygen} - -% Packages used by this style file -\RequirePackage{alltt} -\RequirePackage{array} -\RequirePackage{calc} -\RequirePackage{color} -\RequirePackage{fancyhdr} -\RequirePackage{verbatim} - -% Setup fancy headings -\pagestyle{fancyplain} -\newcommand{\clearemptydoublepage}{% - \newpage{\pagestyle{empty}\cleardoublepage}% -} -\renewcommand{\chaptermark}[1]{% - \markboth{#1}{}% -} -\renewcommand{\sectionmark}[1]{% - \markright{\thesection\ #1}% -} -\lhead[\fancyplain{}{\bfseries\thepage}]{% - \fancyplain{}{\bfseries\rightmark}% -} -\rhead[\fancyplain{}{\bfseries\leftmark}]{% - \fancyplain{}{\bfseries\thepage}% -} -\rfoot[\fancyplain{}{\bfseries\scriptsize% - Generated on Mon Dec 21 13:55:00 2009 by doxygen\lfoot[]{\fancyplain{}{\bfseries\scriptsize% - Generated on Mon Dec 21 13:55:00 2009 by doxygen}} -\cfoot{} - -%---------- Internal commands used in this style file ---------------- - -% Generic environment used by all paragraph-based environments defined -% below. Note that the command \title{...} needs to be defined inside -% those environments! -\newenvironment{DoxyDesc}[1]{% - \begin{list}{}% - {% - \settowidth{\labelwidth}{40pt}% - \setlength{\leftmargin}{\labelwidth}% - \setlength{\parsep}{0pt}% - \setlength{\itemsep}{-4pt}% - \renewcommand{\makelabel}{\entrylabel}% - }% - \item[#1:]% -}{% - \end{list}% -} - -%---------- Commands used by doxygen LaTeX output generator ---------- - -% Used by
 ... 
-\newenvironment{DoxyPre}{% - \small% - \begin{alltt}% -}{% - \end{alltt}% - \normalsize% -} - -% Used by @code ... @endcode -\newenvironment{DoxyCode}{% - \footnotesize% - \verbatim% -}{% - \endverbatim% - \normalsize% -} - -% Used by @example, @include, @includelineno and @dontinclude -\newenvironment{DoxyCodeInclude}{% - \DoxyCode% -}{% - \endDoxyCode% -} - -% Used by @verbatim ... @endverbatim -\newenvironment{DoxyVerb}{% - \footnotesize% - \verbatim% -}{% - \endverbatim% - \normalsize% -} - -% Used by @verbinclude -\newenvironment{DoxyVerbInclude}{% - \DoxyVerb% -}{% - \endDoxyVerb% -} - -% Used by numbered lists (using '-#' or
    ...
) -\newenvironment{DoxyEnumerate}{% - \enumerate% -}{% - \endenumerate% -} - -% Used by bullet lists (using '-', @li, @arg, or
    ...
) -\newenvironment{DoxyItemize}{% - \itemize% -}{% - \enditemize% -} - -% Used by description lists (using
...
) -\newenvironment{DoxyDescription}{% - \description% -}{% - \enddescription% -} - -% Used by @image, @dotfile, and @dot ... @enddot -% (only if caption is specified) -\newenvironment{DoxyImage}{% - \begin{figure}[H]% - \begin{center}% -}{% - \end{center}% - \end{figure}% -} - -% Used by @image, @dotfile, @dot ... @enddot, and @msc ... @endmsc -% (only if no caption is specified) -\newenvironment{DoxyImageNoCaption}{% -}{% -} - -% Used by @attention -\newenvironment{DoxyAttention}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @author and @authors -\newenvironment{DoxyAuthor}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @date -\newenvironment{DoxyDate}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @invariant -\newenvironment{DoxyInvariant}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @note -\newenvironment{DoxyNote}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @post -\newenvironment{DoxyPostcond}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @pre -\newenvironment{DoxyPrecond}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @remark -\newenvironment{DoxyRemark}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @return -\newenvironment{DoxyReturn}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @since -\newenvironment{DoxySince}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @see -\newenvironment{DoxySeeAlso}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @version -\newenvironment{DoxyVersion}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @warning -\newenvironment{DoxyWarning}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @internal -\newenvironment{DoxyInternal}[1]{% - \begin{DoxyDesc}{#1}% -}{% - \end{DoxyDesc}% -} - -% Used by @par and @paragraph -\newenvironment{DoxyParagraph}[1]{% - \begin{list}{}% - {% - \settowidth{\labelwidth}{40pt}% - \setlength{\leftmargin}{\labelwidth}% - \setlength{\parsep}{0pt}% - \setlength{\itemsep}{-4pt}% - \renewcommand{\makelabel}{\entrylabel}% - }% - \item[#1]% -}{% - \end{list}% -} - -% Used by parameter lists -\newenvironment{DoxyParams}[1]{% - \begin{DoxyDesc}{#1}% - \begin{description}% -}{% - \end{description}% - \end{DoxyDesc}% -} - -% Used by return value lists -\newenvironment{DoxyRetVals}[1]{% - \begin{DoxyDesc}{#1}% - \begin{description}% -}{% - \end{description}% - \end{DoxyDesc}% -} - -% Used by exception lists -\newenvironment{DoxyExceptions}[1]{% - \begin{DoxyDesc}{#1}% - \begin{description}% -}{% - \end{description}% - \end{DoxyDesc}% -} - -% Used by template parameter lists -\newenvironment{DoxyTemplParams}[1]{% - \begin{DoxyDesc}{#1}% - \begin{description}% -}{% - \end{description}% - \end{DoxyDesc}% -} - -\newcommand{\doxyref}[3]{\textbf{#1} (\textnormal{#2}\,\pageref{#3})} -\newenvironment{DoxyCompactList} -{\begin{list}{}{ - \setlength{\leftmargin}{0.5cm} - \setlength{\itemsep}{0pt} - \setlength{\parsep}{0pt} - \setlength{\topsep}{0pt} - \renewcommand{\makelabel}{\hfill}}} -{\end{list}} -\newenvironment{DoxyCompactItemize} -{ - \begin{itemize} - \setlength{\itemsep}{-3pt} - \setlength{\parsep}{0pt} - \setlength{\topsep}{0pt} - \setlength{\partopsep}{0pt} -} -{\end{itemize}} -\newcommand{\PBS}[1]{\let\temp=\\#1\let\\=\temp} -\newlength{\tmplength} -\newenvironment{TabularC}[1] -{ -\setlength{\tmplength} - {\linewidth/(#1)-\tabcolsep*2-\arrayrulewidth*(#1+1)/(#1)} - \par\begin{tabular*}{\linewidth} - {*{#1}{|>{\PBS\raggedright\hspace{0pt}}p{\the\tmplength}}|} -} -{\end{tabular*}\par} -\newcommand{\entrylabel}[1]{ - {\parbox[b]{\labelwidth-4pt}{\makebox[0pt][l]{\textbf{#1}}\vspace{1.5\baselineskip}}}} -\newenvironment{Desc} -{\begin{list}{} - { - \settowidth{\labelwidth}{40pt} - \setlength{\leftmargin}{\labelwidth} - \setlength{\parsep}{0pt} - \setlength{\itemsep}{-4pt} - \renewcommand{\makelabel}{\entrylabel} - } -} -{\end{list}} -\newenvironment{Indent} - {\begin{list}{}{\setlength{\leftmargin}{0.5cm}} - \item[]\ignorespaces} - {\unskip\end{list}} -\setlength{\parindent}{0cm} -\setlength{\parskip}{0.2cm} -\addtocounter{secnumdepth}{1} -\sloppy -\usepackage[T1]{fontenc} -\makeatletter -\renewcommand{\paragraph}{\@startsection{paragraph}{4}{0ex}% - {-3.25ex plus -1ex minus -0.2ex}% - {1.5ex plus 0.2ex}% - {\normalfont\normalsize\bfseries}} -\makeatother -\stepcounter{secnumdepth} -\stepcounter{tocdepth} -\definecolor{comment}{rgb}{0.5,0.0,0.0} -\definecolor{keyword}{rgb}{0.0,0.5,0.0} -\definecolor{keywordtype}{rgb}{0.38,0.25,0.125} -\definecolor{keywordflow}{rgb}{0.88,0.5,0.0} -\definecolor{preprocessor}{rgb}{0.5,0.38,0.125} -\definecolor{stringliteral}{rgb}{0.0,0.125,0.25} -\definecolor{charliteral}{rgb}{0.0,0.5,0.5} -\definecolor{vhdldigit}{rgb}{1.0,0.0,1.0} -\definecolor{vhdlkeyword}{rgb}{0.43,0.0,0.43} -\definecolor{vhdllogic}{rgb}{1.0,0.0,0.0} -\definecolor{vhdlchar}{rgb}{0.0,0.0,0.0} diff --git a/src/externals/pio1/pio/CMakeLists.txt b/src/externals/pio1/pio/CMakeLists.txt deleted file mode 100644 index facdbaceb25..00000000000 --- a/src/externals/pio1/pio/CMakeLists.txt +++ /dev/null @@ -1,189 +0,0 @@ -IF( NOT GENF90_PATH) - SET (GENF90_PATH ${CMAKE_CURRENT_SOURCE_DIR}/bin) -ENDIF() - -PROJECT(PIO C Fortran) -ENABLE_LANGUAGE(Fortran) -#INCLUDE(FortranCInterface) -CMAKE_MINIMUM_REQUIRED(VERSION 2.8.5) -IF (USER_CMAKE_MODULE_PATH) - SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${USER_CMAKE_MODULE_PATH}) -ELSE() - SET (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") -ENDIF() -find_file( TESTFILE NAMES TryCSizeOf.f90 PATHS ${CMAKE_MODULE_PATH} NO_DEFAULT_PATH) -get_filename_component( TESTFILEPATH ${TESTFILE} PATH) - - -SET(pio_include_dirs_ ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -SET(PIO_LIB_DIR ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} CACHE STRING "") - -#SET(bld_PIO_DEFINITIONS) - -TRY_COMPILE(WITH_CSIZEOF ${CMAKE_CURRENT_BINARY_DIR}/tryCompileCSIZEOF - ${TESTFILEPATH}/TryCSizeOf.f90) -#MESSAGE(STATUS "c_sizeof test ${WITH_CSIZEOF}") -IF(${WITH_CSIZEOF} STREQUAL FALSE) - MESSAGE(STATUS "Fortran compiler does not support c_sizeof function") - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -DNO_C_SIZEOF) -endif() - -# Netcdf is required -#SET (NETCDF_FIND_COMPONENTS F90) -FIND_PACKAGE(NetCDF "4.3.3" COMPONENTS C Fortran) -IF (${NetCDF_Fortran_FOUND}) - MESSAGE("Building PIO with netcdf support ") - SET(pio_include_dirs_ ${pio_include_dirs_} ${NetCDF_Fortran_INCLUDE_DIR}) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} ${NetCDF_Fortran_DEFINITIONS}) -ELSE() - MESSAGE("Building PIO without netcdf support ${NetCDF_C_FOUND} ${NetCDF_Fortran_FOUND}") -ENDIF () - - -# PNetcdf is optional but used by default -OPTION(WITH_PNETCDF "Whether to build with PnetCDF" TRUE) -IF (${WITH_PNETCDF}) - FIND_PACKAGE(PnetCDF REQUIRED COMPONENTS Fortran) -ELSE () - MESSAGE(WARNING "Warning: Not building with PNetcdf - cannot run all regression tests.") -ENDIF () - - -OPTION(PIO_BIG_ENDIAN "Specify that the machine is big endian" test) -IF ("${PIO_BIG_ENDIAN}" STREQUAL "test") - INCLUDE(TestBigEndian) - TestBigEndian(PIO_BIG_ENDIAN_TEST) - IF(PIO_BIG_ENDIAN_TEST) - SET(PIO_BIG_ENDIAN ON CACHE BOOL "") - ELSE() - SET(PIO_BIG_ENDIAN OFF CACHE BOOL "") - ENDIF() -ELSE() - SET(PIO_BIG_ENDIAN ${PIO_BIG_ENDIAN} CACHE BOOL "") -ENDIF() - - -IF (PIO_FILESYSTEM_HINTS STREQUAL "lustre") - MESSAGE(STATUS "PIO using lustre filesystem hints") - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -DPIO_LUSTRE_HINTS) -ELSEIF(PIO_FILESYSTEM_HINTS STREQUAL "gpfs") - MESSAGE(STATUS "PIO using gpfs filesystem hints") - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -DPIO_GPFS_HINTS) -ELSEIF(NOT "${PIO_FILESYSTEM_HINTS}" STREQUAL "") - MESSAGE(WARNING "${PIO_FILESYSTEM_HINTS} not valid option for PIO_FILESYSTEM_HINTS; use gpfs or lustre.") -ENDIF() -IF(NetCDF_Fortran_FOUND) - SET(pio_include_dirs_ ${pio_include_dirs_} ${NetCDF_Fortran_INCLUDE_DIR}) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NETCDF ${NetCDF_Fortran_DEFINITIONS}) - if (${NetCDF_C_HAS_PARALLEL}) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NETCDF4) - ENDIF() -ELSE() - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NONETCDF) -ENDIF() -IF(PnetCDF_Fortran_FOUND) - SET(pio_include_dirs_ ${pio_include_dirs_} ${PnetCDF_Fortran_INCLUDE_DIRS}) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_PNETCDF) -ELSE() - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NOPNETCDF) -ENDIF() - -OPTION(PIO_USE_MPIIO "Enable support for MPI-IO (default auto detect)" ON) -IF (PIO_USE_MPIIO) - TRY_COMPILE(MPIIO_SUCCESS ${CMAKE_CURRENT_BINARY_DIR}/tryCompileMPIIO - ${TESTFILEPATH}/TryMPIIO.f90) - IF (${MPIIO_SUCCESS}) - MESSAGE(STATUS "MPIIO detected and enabled.") - ELSE() - MESSAGE(STATUS "MPIIO not detected and therefore disabled.") - SET(PIO_USE_MPIIO FALSE) - ENDIF() -ENDIF() -IF (${PIO_USE_MPIIO}) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -DUSEMPIIO) -ENDIF() - -SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NOUSEMCT) - -OPTION(PIO_USE_BOX "" ON) -if(PIO_USE_BOX) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_USEBOX) -else() - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -D_NOUSEBOX) -endif() - -OPTION(PIO_USE_MPIMOD "Use Fortran MPI module (default auto detect)" ON) -IF (PIO_USE_MPIMOD) - TRY_COMPILE(MPIMOD_SUCCESS ${CMAKE_CURRENT_BINARY_DIR}/tryCompileMPIMod - ${TESTFILEPATH}/TryMPIMod.f90) - IF (${MPIMOD_SUCCESS}) - MESSAGE(STATUS "MPI Fortran module detected and enabled.") - ELSE() - MESSAGE(STATUS "MPI Fortran module not detected and therefore disabled.") - SET(PIO_USE_MPIMOD FALSE) - ENDIF() -ENDIF() - -IF (NOT ${PIO_USE_MPIMOD}) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -DNO_MPIMOD) -ENDIF() -OPTION(PIO_BUILD_TIMING "" OFF) - -if(${PIO_BUILD_TIMING}) - if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../timing) - SET(bld_PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} -DTIMING -I ${CMAKE_CURRENT_BINARY_DIR}/timing) - ADD_SUBDIRECTORY(../timing timing) - endif() -endif() - -SET(PIO_DEFINITIONS ${bld_PIO_DEFINITIONS} CACHE STRING "") -ADD_DEFINITIONS(${PIO_DEFINITIONS}) - -SET(PIO_INCLUDE_DIRS ${pio_include_dirs_} CACHE STRING "") -INCLUDE_DIRECTORIES(${PIO_INCLUDE_DIRS}) - -SET(SRCS_C topology.c) - -SET(SRCS_F90 pio.F90 pio_kinds.F90 nf_mod.F90 ionf_mod.F90 pio_types.F90 - piolib_mod.F90 pio_mpi_utils.F90 pio_nf_utils.F90 pio_utils.F90 - pio_support.F90 calcdisplace_mod.F90 - calcdecomp.F90 pio_msg_mod.F90 pio_msg_callbacks.F90) - -SET(TEMPSRCF90 pionfatt_mod.F90 - pionfread_mod.F90 - pionfwrite_mod.F90 - pionfput_mod.F90 - pionfget_mod.F90 - alloc_mod.F90 - box_rearrange.F90 - rearrange.F90 - iompi_mod.F90 - piodarray.F90 - pio_spmd_utils.F90 - pio_msg_getput_callbacks.F90 - ) - -FOREACH(tempfile IN LISTS TEMPSRCF90) -ADD_CUSTOM_COMMAND( - OUTPUT ${tempfile} - COMMAND ${GENF90_PATH}/genf90.pl ${CMAKE_CURRENT_SOURCE_DIR}/${tempfile}.in > ${tempfile} - DEPENDS ${tempfile}.in -) -ENDFOREACH() -if("${CMAKE_Fortran_COMPILER_ID}" STREQUAL "GNU") - SET(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -ffree-line-length-none") -endif() - -ADD_LIBRARY(pio ${SRCS_F90} ${SRCS_C} ${TEMPSRCF90}) -TARGET_LINK_LIBRARIES(pio ${PnetCDF_Fortran_LIBRARIES}) -TARGET_LINK_LIBRARIES(pio ${NetCDF_Fortran_LIBRARIES}) -TARGET_LINK_LIBRARIES(pio ${ADDITIONAL_LIBS}) - -if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../unittests) - ADD_SUBDIRECTORY(../unittests unittests) -endif() -if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../testpio) - ADD_SUBDIRECTORY(../testpio testpio) -endif() - - diff --git a/src/externals/pio1/pio/C_interface_mod.F90 b/src/externals/pio1/pio/C_interface_mod.F90 deleted file mode 100644 index 281362a5596..00000000000 --- a/src/externals/pio1/pio/C_interface_mod.F90 +++ /dev/null @@ -1,449 +0,0 @@ -! FILE: c_interface_module.f -! PURPOSE: Supplement ISO-C-Binding to provide type aliases and interfaces -! to common ISO-C string functions to aid working with strings. -! AUTHOR: Joseph M. Krahn -! STATUS: Still in development. Reasonably complete, but somewhat limited testing. -! -! The idea is to provide type aliases for all ISO-C types, so that the -! Fortran interface code more explicitly defines the actual C interface. -! This should be updated to support F2008 variable-length allocatable -! strings. -! -! Entity names all have the "C_" prefix, as with ISO-C-Binding, with a -! few exceptions. -! -module C_interface_mod -#ifdef _COMPRESSION - use, intrinsic :: ISO_C_Binding, & - ! C type aliases for pointer derived types: - C_ptr => C_ptr , & - C_char_ptr => C_ptr, & - C_const_char_ptr => C_ptr, & - C_void_ptr => C_ptr, & - C_const_void_ptr => C_ptr - - implicit none - -!---------------------------------------------------------------------------- -! C type aliases for intrinsic type KIND parameters: - -! NOTE: a C enum may not always be a standard C int - integer, parameter :: C_enum = C_int - -! Defining off_t is difficult, because it may depend on "LARGEFILE" selection. -! integer, parameter :: C_off_t = ?? - -! C string terminator alais using the 3-letter ASCII name. -! The C_ prefix is not used because it is just an ASCII character. - character(len=1,kind=C_char), parameter :: NUL = C_NULL_char - -! NOTE: In C, "char" is distinct from "signed char", unlike integers. -! The plain "char" type is specific for text/string values, whereas -! "signed char" should indicate 1-byte integer data. -! -! Most ISO-C systems have wide chars "wchar_t", but Fortran compilers -! have limited support for different character kinds. UTF encoding -! adds more complexity. This should be updated as Fortran compilers -! include support for more character types. -! - -! Fortran does not (yet) support unsigned types. - integer, parameter :: & - C_unsigned = C_int, & - C_unsigned_short = C_short, & - C_unsigned_long = C_long, & - C_unsigned_long_long = C_long_long, & - C_unsigned_char = C_signed_char, & - C_ssize_t = C_size_t, & - C_uint8_t = C_int8_t, & - C_uint16_t = C_int16_t, & - C_uint32_t = C_int32_t, & - C_uint64_t = C_int64_t, & - C_uint_least8_t = C_int_least8_t, & - C_uint_least16_t = C_int_least16_t, & - C_uint_least32_t = C_int_least32_t, & - C_uint_least64_t = C_int_least64_t, & - C_uint_fast8_t = C_int_fast8_t, & - C_uint_fast16_t = C_int_fast16_t, & - C_uint_fast32_t = C_int_fast32_t, & - C_uint_fast64_t = C_int_fast64_t, & - C_uintmax_t = C_intmax_t -! Note: ptrdiff_t cannot be reliably defined from other types. -! When practical, it is larger than a pointer because it benefits -! from the full unsigned range in both positive and negative directions. - -! Integer versions including 'int', where the 'int' is optional: - integer, parameter :: & - C_short_int = C_short, & - C_long_int = C_long, & - C_long_long_int = C_long_long, & - C_unsigned_int = C_unsigned, & - C_unsigned_short_int = C_short, & - C_unsigned_long_int = C_long, & - C_unsigned_long_long_int = C_long_long - - interface C_F_string - module procedure C_F_string_ptr - module procedure C_F_string_chars - end interface C_F_string - - interface F_C_string - module procedure F_C_string_ptr - module procedure F_C_string_chars - end interface F_C_string - -!======================================================================= -! Some useful ISO C library string functions from -! These are based on GCC header sections marked as NAMESPACE_STD - interface - -! Copy N bytes of SRC to DEST, no aliasing or overlapping allowed. -!extern void *memcpy (void *dest, const void *src, size_t n); - function C_memcpy(dest, src, n) result(result) bind(C,name="memcpy") - import C_void_ptr, C_size_t - type(C_void_ptr) :: result - type(C_void_ptr), value, intent(in) :: dest ! target=intent(out) - type(C_void_ptr), value, intent(in) :: src ! target=intent(in) - integer(C_size_t), value, intent(in) :: n - end function C_memcpy - -! Copy N bytes of SRC to DEST, guaranteeing correct behavior for overlapping strings. -!extern void *memmove (void *dest, const void *src, size_t n) - function C_memmove(dest, src, n) result(result) bind(C,name="memmove") - import C_void_ptr, C_size_t - type(C_void_ptr) :: result - type(C_void_ptr), value, intent(in) :: dest ! target=intent(out) - type(C_void_ptr), value, intent(in) :: src - integer(C_size_t), value, intent(in) :: n - end function C_memmove - -! Set N bytes of S to C. -!extern void *memset (void *s, int c, size_t n) - function C_memset(s, c, n) result(result) bind(C,name="memset") - import C_void_ptr, C_int, C_size_t - type(C_void_ptr) :: result - type(C_void_ptr), value, intent(in) :: s ! target=intent(out) - integer(C_int), value, intent(in) :: c - integer(C_size_t), value, intent(in) :: n - end function C_memset - -! Compare N bytes of S1 and S2. -!extern int memcmp (const void *s1, const void *s2, size_t n) - pure & - function C_memcmp(s1, s2, n) result(result) bind(C,name="memcmp") - import C_int, C_void_ptr, C_size_t - integer(C_int) :: result - type(C_void_ptr), value, intent(in) :: s1 - type(C_void_ptr), value, intent(in) :: s2 - integer(C_size_t), value, intent(in) :: n - end function C_memcmp - -! Search N bytes of S for C. -!extern void *memchr (const void *s, int c, size_t n) - pure & - function C_memchr(s, c, n) result(result) bind(C,name="memchr") - import C_void_ptr, C_int, C_size_t - type(C_void_ptr) :: result - type(C_void_ptr), value, intent(in) :: s - integer(C_int), value, intent(in) :: c - integer(C_size_t), value, intent(in) :: n - end function C_memchr - -! Copy SRC to DEST. -!extern char *strcpy (char *dest, const char *src) - function C_strcpy(dest, src) result(result) bind(C,name="strcpy") - import C_char_ptr, C_size_t - type(C_char_ptr) :: result - type(C_char_ptr), value, intent(in) :: dest ! target=intent(out) - type(C_char_ptr), value, intent(in) :: src - end function C_strcpy - -! Copy no more than N characters of SRC to DEST. -!extern char *strncpy (char *dest, const char *src, size_t n) - function C_strncpy(dest, src, n) result(result) bind(C,name="strncpy") - import C_char_ptr, C_size_t - type(C_char_ptr) :: result - type(C_char_ptr), value, intent(in) :: dest ! target=intent(out) - type(C_char_ptr), value, intent(in) :: src - integer(C_size_t), value, intent(in) :: n - end function C_strncpy - -! Append SRC onto DEST. -!extern char *strcat (char *dest, const char *src) - function C_strcat(dest, src) result(result) bind(C,name="strcat") - import C_char_ptr, C_size_t - type(C_char_ptr) :: result - type(C_char_ptr), value, intent(in) :: dest ! target=intent(out) - type(C_char_ptr), value, intent(in) :: src - end function C_strcat - -! Append no more than N characters from SRC onto DEST. -!extern char *strncat (char *dest, const char *src, size_t n) - function C_strncat(dest, src, n) result(result) bind(C,name="strncat") - import C_char_ptr, C_size_t - type(C_char_ptr) :: result - type(C_char_ptr), value, intent(in) :: dest ! target=intent(out) - type(C_char_ptr), value, intent(in) :: src - integer(C_size_t), value, intent(in) :: n - end function C_strncat - -! Compare S1 and S2. -!extern int strcmp (const char *s1, const char *s2) - pure & - function C_strcmp(s1, s2) result(result) bind(C,name="strcmp") - import C_int, C_char_ptr, C_size_t - integer(C_int) :: result - type(C_char_ptr), value, intent(in) :: s1 - type(C_char_ptr), value, intent(in) :: s2 - end function C_strcmp - -! Compare N characters of S1 and S2. -!extern int strncmp (const char *s1, const char *s2, size_t n) - pure & - function C_strncmp(s1, s2, n) result(result) bind(C,name="strncmp") - import C_int, C_char_ptr, C_size_t - integer(C_int) :: result - type(C_char_ptr), value, intent(in) :: s1 - type(C_char_ptr), value, intent(in) :: s2 - integer(C_size_t), value, intent(in) :: n - end function C_strncmp - -! Return the length of S. -!extern size_t strlen (const char *s) - pure & - function C_strlen(s) result(result) bind(C,name="strlen") - import C_char_ptr, C_size_t - integer(C_size_t) :: result - type(C_char_ptr), value, intent(in) :: s !character(len=*), intent(in) - end function C_strlen - - end interface - -! End of -!========================================================================= -! Standard ISO-C malloc routines: - interface - - ! void *calloc(size_t nmemb, size_t size); - type(C_void_ptr) & - function C_calloc(nmemb, size) bind(C,name="calloc") - import C_void_ptr, C_size_t - integer(C_size_t), value, intent(in) :: nmemb, size - end function C_calloc - - ! void *malloc(size_t size); - type(C_void_ptr) & - function C_malloc(size) bind(C,name="malloc") - import C_void_ptr, C_size_t - integer(C_size_t), value, intent(in) :: size - end function C_malloc - - ! void free(void *ptr); - subroutine C_free(ptr) bind(C,name="free") - import C_void_ptr - type(C_void_ptr), value, intent(in) :: ptr - end subroutine C_free - - ! void *realloc(void *ptr, size_t size); - type(C_void_ptr) & - function C_realloc(ptr,size) bind(C,name="realloc") - import C_void_ptr, C_size_t - type(C_void_ptr), value, intent(in) :: ptr - integer(C_size_t), value, intent(in) :: size - end function C_realloc - - end interface - - interface assignment(=) - module procedure F_string_assign_C_string - end interface assignment(=) - -!========================================================================== - -contains - - ! HACK: For some reason, C_associated was not defined as pure. - pure logical & - function C_associated_pure(ptr) result(associated) - type(C_ptr), intent(in) :: ptr - integer(C_intptr_t) :: iptr - iptr = transfer(ptr,iptr) - associated = (iptr /= 0) - end function C_associated_pure - -! Set a fixed-length Fortran string to the value of a C string. - subroutine F_string_assign_C_string(F_string, C_string) - character(len=*), intent(out) :: F_string - type(C_ptr), intent(in) :: C_string - character(len=1,kind=C_char), pointer :: p_chars(:) - integer :: i - if (.not. C_associated(C_string) ) then - F_string = ' ' - else - call C_F_pointer(C_string,p_chars,[huge(0)]) - i=1 - do while(p_chars(i)/=NUL .and. i<=len(F_string)) - F_string(i:i) = p_chars(i) - i=i+1 - end do - if (i ./configure F90=pgf90. - - You can also direct configure through environment variables: - > setenv F90 xlf90 - > ./configure - - -PLATFORM NOTES: - - On robin at NCCS, use: - > ./configure --host=Linux ac_cv_c_bigendian=yes - - On the Cray XT3/XT4 (e.g. jaguar) use: - > ./configure --host=Linux FC=ftn F90=ftn - - On jazz for g95: - use .soft key @all-mpich_gm-gcc4.0.2-g95-0.50 - set the environment variable G95_UNBUFFERED_6 to prevent - truncation of stdout - > ./configure F90=mpif90 - - On Blue Gene/P (BG/P), use: - > ./configure FC=mpixlf90_r CC=mpixlc_r diff --git a/src/externals/pio1/pio/alloc_mod.F90.in b/src/externals/pio1/pio/alloc_mod.F90.in deleted file mode 100644 index 7843c7f69f6..00000000000 --- a/src/externals/pio1/pio/alloc_mod.F90.in +++ /dev/null @@ -1,328 +0,0 @@ -#define __PIO_FILE__ "alloc_mod.F90.in" -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief Internal allocation routines for PIO -!< -module alloc_mod - - use pio_kinds - use pio_types - use pio_support, only : piodie, CheckMPIreturn, debug - implicit none - private - -!> -!! @private -!! PIO internal memory allocation check routines. -!< - public:: alloc_check -!> -!! @private -!! PIO internal memory allocation check routines. -!< - public:: dealloc_check - - interface alloc_check - ! TYPE long,int,real,double ! DIMS 1,2 - module procedure alloc_check_{DIMS}d_{TYPE} - ! TYPE double,long,int,real - module procedure alloc_check_0d_{TYPE} - end interface - - - interface dealloc_check - ! TYPE long,int,real,double ! DIMS 1,2 - module procedure dealloc_check_{DIMS}d_{TYPE} - ! TYPE double,long,int,real - module procedure dealloc_check_0d_{TYPE} - end interface - - -!> -!! @private -!! PIO internal memory allocation check routines. -!< - public :: alloc_print_usage - -!> -!! @private -!! PIO internal memory allocation check routines. -!< - public :: alloc_trace_on - -!> -!! @private -!! PIO internal memory allocation check routines. -!< - public :: alloc_trace_off - - character(len=*), parameter :: modName='pio::alloc_mod' - -contains - - ! - ! Instantiate all the variations of alloc_check_ and dealloc_check_ - ! - - ! TYPE long,int,real,double - subroutine alloc_check_1d_{TYPE} (data,varlen,msg) - - {VTYPE}, pointer :: data(:) - integer, intent(in) :: varlen - character(len=*), intent(in), optional:: msg - - character(len=*), parameter :: subName=modName//'::alloc_check_1d_{TYPE}' - - integer ierr, ierror, rank -#ifdef ALLOC_DEBUG - if(alloc_debug) then - if(present(msg)) then - print *,__PIO_FILE__,__LINE__,msg,varlen - else - print *,__PIO_FILE__,__LINE__,varlen - end if - end if -#endif - - if(varlen==0) then - allocate(data(1),stat=ierr) - else - allocate(data(varlen),stat=ierr) - endif - if (ierr /= 0) then - if (present(msg)) then - call piodie('alloc_check_1d_{TYPE}',__LINE__,'allocate failed on task:',& - msg2=msg) - else - call piodie('alloc_check_1d_{TYPE}',__LINE__,'allocate failed on task:') - endif - endif - - end subroutine alloc_check_1d_{TYPE} - - ! TYPE long,int,real,double - subroutine alloc_check_2d_{TYPE} (data,size1, size2,msg) - - {VTYPE}, pointer :: data(:,:) - integer, intent(in) :: size1, size2 - character(len=*), intent(in), optional:: msg - - character(len=*), parameter :: subName=modName//'::alloc_check_2d_{TYPE}' - integer ierr, ierror, rank - - allocate(data(size1,size2),stat=ierr) - - if (ierr /= 0) then - if (present(msg)) then - call piodie('alloc_check_2d_{TYPE}',__LINE__,'allocate failed on task:',& - msg2=msg) - else - call piodie('alloc_check_2d_{TYPE}',__LINE__,'allocate failed on task:') - endif - endif - - end subroutine alloc_check_2d_{TYPE} - - ! - ! - ! - ! TYPE long,int,real,double ! DIMS 1,2 - subroutine dealloc_check_{DIMS}d_{TYPE} (data,msg) - - {VTYPE}, pointer :: data{DIMSTR} - character(len=*), intent(in), optional:: msg - - character(len=*), parameter :: subName=modName//'::dealloc_check_{DIMS}d_{TYPE}' - integer ierr, ierror, rank -#ifdef ALLOC_DEBUG - if(debug) then - if(present(msg)) then - print *,__PIO_FILE__,__LINE__,msg,size(data) - else - print *,__PIO_FILE__,__LINE__,size(data) - end if - end if -#endif - - deallocate(data,stat=ierr) - - if (ierr /= 0) then - if (present(msg)) then - call piodie('dealloc_check_{DIMS}d_{TYPE}',__LINE__, & - ': deallocate failed on task:',msg2=msg) - else - call piodie('dealloc_check_{DIMS}d_{TYPE}',__LINE__, & - ': deallocate failed on task:') - endif - - endif - - end subroutine dealloc_check_{DIMS}d_{TYPE} - -! TYPE long,int,real,double - subroutine alloc_check_0d_{TYPE}(data,msg) - - {VTYPE}, pointer :: data - character(len=*), intent(in), optional:: msg - - character(len=*), parameter :: subName=modName//'::alloc_check_0d_{TYPE}' - integer ierr, ierror, rank - - allocate(data,stat=ierr) - - if (ierr /= 0) then - if (present(msg)) then - call piodie('alloc_check_0d_{TYPE}',__LINE__,'allocate failed on task:',& - msg2=msg) - else - call piodie('alloc_check_0d_{TYPE}',__LINE__,'allocate failed on task:') - endif - - endif - -end subroutine alloc_check_0d_{TYPE} - - - - -! TYPE long,int,real,double -subroutine dealloc_check_0d_{TYPE} (data,msg) - - {VTYPE}, pointer :: data - character(len=*), intent(in), optional:: msg - - character(len=*), parameter :: subName=modName//'::dealloc_check_0d_{TYPE}' - integer ierr, ierror, rank - - deallocate(data,stat=ierr) - - if (ierr /= 0) then - if (present(msg)) then - call piodie('dealloc_check_0d_{TYPE}',__LINE__, & - ': deallocate failed on task:',msg2=msg) - else - call piodie('dealloc_check_0d_{TYPE}',__LINE__, & - ': deallocate failed on task:') - endif - - endif - -end subroutine dealloc_check_0d_{TYPE} - -!> -!! @private -!! @fn alloc_print_usage -!! PIO internal memory allocation check routines. -!< - subroutine alloc_print_usage(rank,msg) -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#else - include 'mpif.h' ! _EXTERNAL -#endif - - integer, intent(in) :: rank - character(len=*), intent(in), optional :: msg - - character(len=*), parameter :: subName=modName//'::alloc_print_usage' - integer ierr, myrank - -#ifdef _TESTMEM - call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ierr) - call CheckMPIReturn(subName,ierr) -#ifdef _MEMMON - if ( rank<0 .or. rank==myrank ) then - print *,'' - if (present(msg)) then - print *,myrank,': alloc_print_usage: ',msg - else - print *,myrank,': alloc_print_usage: ' - endif - - call memmon_print_usage - - print *,'' - - endif -#endif - -#ifdef _STACKMON - if ( myrank == 0 ) then - print *,'' - print *,myrank,': alloc_print_usage: ',msg - print *,myrank,': writing stackmonitor.txt' - endif - - call print_stack_size -#endif - - -#endif /* _TESTMEM */ - - end subroutine alloc_print_usage - - - - subroutine alloc_trace_on(rank,msg) - - integer, intent(in) :: rank - character(len=*), intent(in), optional :: msg - - character(len=*), parameter :: subName=modName//'::alloc_trace_on' - integer ierr, myrank - -#ifdef _TESTMEM -#ifdef _MEMMON - call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ierr) - call CheckMPIReturn(subName,ierr) - if ( rank<0 .or. rank==myrank ) then - if (present(msg)) then - print *,myrank,': alloc_trace_on: ',msg - else - print *,myrank,': alloc_trace_on: ' - endif - call memmon_trace_on(myrank) - print *,'' - - endif - -#endif -#endif - - end subroutine alloc_trace_on - - - - subroutine alloc_trace_off(rank,msg) - - integer, intent(in) :: rank - character(len=*), intent(in), optional :: msg - - character(len=*), parameter :: subName=modName//'::alloc_trace_off' - integer ierr, myrank - -#ifdef _TESTMEM -#ifdef _MEMMON - - call MPI_COMM_RANK(MPI_COMM_WORLD,myrank,ierr) - call CheckMPIReturn(subName,ierr) - if ( rank<0 .or. rank==myrank ) then - if (present(msg)) then - print *,myrank,': alloc_trace_off: ',msg - else - print *,myrank,': alloc_trace_off: ' - endif - call memmon_trace_off(myrank) - print *,'' - - endif - -#endif -#endif - - end subroutine alloc_trace_off - - -end module alloc_mod diff --git a/src/externals/pio1/pio/box_rearrange.F90.in b/src/externals/pio1/pio/box_rearrange.F90.in deleted file mode 100644 index dc498c82366..00000000000 --- a/src/externals/pio1/pio/box_rearrange.F90.in +++ /dev/null @@ -1,1685 +0,0 @@ -#define __PIO_FILE__ "box_rearrange.F90.in" -!> -!! -!! @file -!! $Revision: 819 $ -!! $LastChangedDate: 2013-05-31 13:32:27 -0500 (Fri, 31 May 2013) $ -!! @brief -!! Perform data rearrangement with each io processor -!! owning a rectangular box in the output domain -!! @details -!! REVISION HISTORY: -!! -!! 20070726 Initial version - R. Loy -!! 20070807 Improved way MPI is called - R. Loy -!! 20070825 fix hardcoded dim and unintentionally templated int - R. Loy -!! 20071111 cache rearranger setup (1st and 2nd communications) -!! 20090512 added flow-control logic option to comp2io and io2comp -!! (imported flow-controlled alltoall logic ("swapm") from -!! Community Atmosphere Model) - P. Worley -!! 20100207 added flow-control logic option to box_rearrange_create - P. Worley -!! -!< - -!#define MEMCHK -!#define DEBUG 0 -!#define DEBUG_INDICES 0 -!#define DEBUG_BARRIER 0 - -! cache communication pattern for rearranger in the ioDesc -#define BOX_CACHE 1 - -#include "rearr_options.h" - -!> -!! \def TAG -!! Arbitrary mpi message tags used for the rearrange -!< -#define TAG0 100 -#define TAG1 101 -#define TAG2 102 - -module box_rearrange - - use pio_kinds, only : pio_offset, r4, r8, i4, i8 - use pio_types - use pio_support, only : piodie, Debug, DebugIO, CheckMPIReturn, pio_fc_gather_offset -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf !_EXTERNAL -#endif - use alloc_mod, only : alloc_check, dealloc_check - use pio_spmd_utils, only : pio_swapm -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none - - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - - public :: box_rearrange_create, & - box_rearrange_free, & - box_rearrange_comp2io, & - box_rearrange_io2comp - - interface box_rearrange_comp2io - ! TYPE int,real,double - module procedure box_rearrange_comp2io_{TYPE} - end interface - - interface box_rearrange_io2comp - ! TYPE int,real,double - module procedure box_rearrange_io2comp_{TYPE} - end interface - - character(len=*), parameter :: modName='box_rearrange' - -#ifdef MEMCHK -integer :: msize, rss, mshare, mtext, mstack, lastrss=0 -#endif - - -contains -!> -!! @public box_rearrange_comp2io -!! -!! @brief moves data from the computational tasks to the io tasks -!! -!< -! TYPE real,double,int -subroutine box_rearrange_comp2io_{TYPE} (IOsystem, ioDesc, s1, src, niodof, & - dest, comm_option, fc_options) - - implicit none - - type (IOsystem_desc_t), intent(inout) :: IOsystem - type (IO_desc_t) :: ioDesc - integer, intent(in) :: s1, niodof - {VTYPE}, intent(in) :: src(s1) - {VTYPE}, intent(out) :: dest(niodof) - integer, optional, intent(in) :: comm_option - integer, optional, intent(in) :: fc_options(3) ! 1: handshake (0/false,1/true) - ! 2: send (0) vs isend (1) - ! 3: max number of outstanding requests - - ! local vars - - character(len=*), parameter :: subName=modName//'::box_rearrange_comp2io_{TYPE}' - - integer :: pio_option - logical :: pio_hs - logical :: pio_isend - integer :: pio_maxreq - integer :: ndof - integer :: num_iotasks - integer :: nrecvs - - integer :: i - integer :: ierror - integer :: io_comprank - integer :: myrank - integer :: nprocs - integer :: status(MPI_STATUS_SIZE) - - integer,pointer :: rfrom(:) ! rank of ith sender to this ioproc - integer,pointer :: rtype(:) - - integer,pointer :: scount(:) - integer,pointer :: stype(:) - integer :: from - integer,pointer :: a2a_displs(:) - integer,pointer :: a2a_sendcounts(:) - integer,pointer :: a2a_sendtypes(:) - integer,pointer :: a2a_recvcounts(:) - integer,pointer :: a2a_recvtypes(:) - integer,pointer :: sreq(:) - integer,pointer :: rreq(:) ! receive requests - - -#ifdef _MPISERIAL - integer :: num_tasks, ioproc, ioindex, s2 - ndof= iodesc%ndof - num_tasks = IOsystem%num_tasks - num_iotasks = IOsystem%num_iotasks - if (num_tasks /= 1 .or. num_iotasks /= 1) & - call piodie( __PIO_FILE__,__LINE__, & - 'built with -D_MPISERIAL but num_tasks=', num_tasks, & - 'num_iotasks=', num_iotasks ) - - if (s1>0 .and. s1< ndof) & - call piodie( __PIO_FILE__,__LINE__, & - 'box_rearrange_comp2io: size(compbuf)=', size(src), & - ' not equal to size(compdof)=', ndof ) - - do i=1,ndof - ioproc = ioDesc%dest_ioproc(i) - ioindex = ioDesc%dest_ioindex(i) - - if (ioproc /= -1 ) then ! ignore sender hole - if (ioproc /= 1) & ! ioproc is 1-based - call piodie( __PIO_FILE__,__LINE__, & - 'box_rearrange_comp2io: i=', i, & - 'dest_ioproc(i)=', ioproc ) - -! if ( ioindex<0 .or. ioindex>=ndof ) & -! call piodie( __PIO_FILE__,__LINE__, & -! 'box_rearrange_comp2io: i=', i, & -! 'dest_ioindex(i) out of range=', int(ioindex)) - - dest(ioindex+1) = src(i) ! ioindex is 0-based - - endif - - end do - -#else - - - ! FIXME: Ideally the iodesc should contain the rearr and - ! the rearr_opts - however the current code sets the - ! rearr and rearr_opts on the IOsystem - - ! The rearranger options in IODESC overrides the defaults - if(IOsystem%rearr_opts%comm_type == PIO_rearr_comm_p2p) then - if( (IOsystem%rearr_opts%fcd == PIO_rearr_comm_fc_2d_disable) .or.& - (IOsystem%rearr_opts%fcd == PIO_rearr_comm_fc_1d_io2comp) ) then - pio_option = POINT_TO_POINT - else - pio_option = FLOW_CONTROL - end if - else - pio_option = COLLECTIVE - end if - - ! The rearranger options passed in to this function overrides - ! both the defaults and the options in IODESC - if ( present( comm_option ) ) then - if ((comm_option == COLLECTIVE) & - .or. (comm_option == POINT_TO_POINT) & - .or. (comm_option == FLOW_CONTROL)) then - pio_option = comm_option - endif - endif - - if (pio_option == FLOW_CONTROL) then - pio_hs = IOsystem%rearr_opts%comm_fc_opts_comp2io%enable_hs - pio_isend = IOsystem%rearr_opts%comm_fc_opts_comp2io%enable_isend - pio_maxreq = IOsystem%rearr_opts%comm_fc_opts_comp2io%max_pend_req - - ! The rearranger options passed to this function overrides - ! both the default and the options in IODESC - if ( present(fc_options) ) then - if (fc_options(1) == 0) then - pio_hs = .false. - endif - if (fc_options(2) == 1) then - pio_isend = .true. - endif - if (fc_options(3) >=-1) then - pio_maxreq = fc_options(3) - endif - endif - endif - - ndof= iodesc%ndof - nrecvs = ioDesc%nrecvs ! number of distinct senders to the ioproc - myrank = IOsystem%union_rank - nprocs = IOsystem%num_tasks - num_iotasks = IOsystem%num_iotasks - - if (s1 > 0 .and. s1 ioDesc%scount - stype => ioDesc%stype - if (pio_option /= POINT_TO_POINT) then - call alloc_check(a2a_sendcounts, nprocs) - call alloc_check(a2a_displs, nprocs) - call alloc_check(a2a_sendtypes, nprocs) - call alloc_check(a2a_recvcounts, nprocs) - call alloc_check(a2a_recvtypes, nprocs) - do i=1,nprocs - a2a_displs(i) = 0 - - a2a_sendcounts(i) = 0 - a2a_sendtypes(i) = MPI_INTEGER - a2a_recvcounts(i) = 0 - a2a_recvtypes(i) = MPI_INTEGER - end do - - if (IOsystem%IOproc) then - do i=1,nrecvs - from = rfrom(i)+1 ! array is 1-based - a2a_recvcounts(from) = 1 - a2a_recvtypes(from) = rtype(i) - end do - endif - - do i=1,num_iotasks - if (scount(i) /= 0) then - ! go from 1-based io rank to 0-based comprank - io_comprank = find_io_comprank(IOsystem,i) + 1 ! array is 1-based - - a2a_sendcounts(io_comprank) = 1 - a2a_sendtypes(io_comprank) = stype(i) - endif - end do - - if (pio_option == COLLECTIVE) then - -#ifdef TIMING - call t_startf("PIO:a2a_box_rear_comp2io_{TYPE}") -#endif - call MPI_ALLTOALLW(src, a2a_sendcounts, a2a_displs, a2a_sendtypes, & - dest, a2a_recvcounts, a2a_displs, a2a_recvtypes, & - IOsystem%union_comm, ierror ) -#ifdef TIMING - call t_stopf("PIO:a2a_box_rear_comp2io_{TYPE}") -#endif - call CheckMPIReturn('box_rearrange', ierror) - else -#ifdef TIMING - call t_startf("PIO:swapm_box_rear_comp2io_{TYPE}") -#endif - call pio_swapm( nprocs, myrank, & - src, ndof, a2a_sendcounts, a2a_displs, a2a_sendtypes, & - dest, niodof, a2a_recvcounts, a2a_displs, a2a_recvtypes, & - IOsystem%union_comm, pio_hs, pio_isend, pio_maxreq ) -#ifdef TIMING - call t_stopf("PIO:swapm_box_rear_comp2io_{TYPE}") -#endif - endif - call dealloc_check(a2a_sendcounts) - call dealloc_check(a2a_displs) - call dealloc_check(a2a_sendtypes) - call dealloc_check(a2a_recvcounts) - call dealloc_check(a2a_recvtypes) - - else - call alloc_check(sreq, num_iotasks, 'send requests') - -#ifdef DEBUG - if (myrank==0) then - print *,'comp2io using cached rearranger info' - endif -#endif - -#ifdef TIMING - call t_startf("PIO:p2p_box_rear_comp2io_{TYPE}") -#endif - ! - ! send data from comp procs - ! - - do i=1,num_iotasks - if (scount(i) /= 0) then - - ! go from 1-based io rank to 0-based comprank - io_comprank=find_io_comprank(IOsystem,i) - - if(Debug) print *, __PIO_FILE__,__LINE__,myrank,': send posted dest=',io_comprank,' count=',scount(i), stype(i) - - call MPI_ISEND( src, 1, stype(i), & ! buf, count, type - io_comprank,TAG2, & ! destination,tag - IOsystem%union_comm,sreq(i),ierror ) - call CheckMPIReturn('box_rearrange',ierror) - endif - - end do - - ! - ! post receives on io procs - ! - if (IOsystem%IOproc) then - do i=1,nrecvs - - call MPI_IRECV( dest,1, rtype(i), & ! buf, count, type - rfrom(i), TAG2, & ! source, tag - IOsystem%union_comm,rreq(i),ierror ) - call CheckMPIReturn('box_rearrange',ierror) - end do - - endif - - ! - ! finish up - ! - - if (IOsystem%IOproc) then - do i=1,nrecvs - call MPI_WAIT( rreq(i), status, ierror ) - call CheckMPIReturn('box_rearrange',ierror) - end do - call dealloc_check(rreq, 'receive requests') - endif - - do i=1,num_iotasks - if (scount(i) /= 0) then - call MPI_WAIT( sreq(i), status, ierror ) - call CheckMPIReturn('box_rearrange',ierror) - endif - end do - - call dealloc_check(sreq, 'send requests') - -#ifdef TIMING - call t_stopf("PIO:p2p_box_rear_comp2io_{TYPE}") -#endif - -#if DEBUG_BARRIER - call MPI_BARRIER(IOsystem%union_comm,ierror) - call CheckMPIReturn(subName,ierror) - if (myrank==0) print *,'BARRIER - end of comp2io' -#endif - - endif ! POINT_TO_POINT -#endif /* not _MPISERIAL */ -end subroutine box_rearrange_comp2io_{TYPE} - -! TYPE real,double,int -subroutine box_rearrange_io2comp_{TYPE} (IOsystem,ioDesc,s1, iobuf,s2, compbuf, & - comm_option, fc_options) - implicit none - - type (IOsystem_desc_t), intent(inout) :: IOsystem - type (IO_desc_t) :: ioDesc - integer, intent(in) :: s1, s2 - {VTYPE}, intent(in) :: iobuf(s1) - {VTYPE}, intent(out) :: compbuf(s2) - integer, optional, intent(in) :: comm_option - integer, optional, intent(in) :: fc_options(3) - - ! local vars - - character(len=*), parameter :: subName=modName//'::box_rearrange_io2comp_{TYPE}' - - integer :: pio_option - logical :: pio_hs - logical :: pio_isend - integer :: pio_maxreq - integer :: ndof - integer :: niodof - integer :: num_iotasks - integer :: nrecvs - - integer :: i - integer :: ierror - integer :: io_comprank - integer :: myrank - integer :: comprank - integer :: nprocs - integer :: status(MPI_STATUS_SIZE) - - integer,pointer :: rfrom(:) ! rank of ith sender to this ioproc - integer,pointer :: rtype(:) ! mpi receive types - - integer,pointer :: scount(:) ! scount(i) = no. sends to ith ioproc - integer,pointer :: stype(:) ! mpi send types - - integer,pointer :: a2a_displs(:) - integer,pointer :: a2a_sendcounts(:) - integer,pointer :: a2a_sendtypes(:) - integer,pointer :: a2a_recvcounts(:) - integer,pointer :: a2a_recvtypes(:) - integer,pointer :: sreq(:) - integer,pointer :: rreq(:) ! receive requests for comp procs - -#ifdef _MPISERIAL - integer :: num_tasks, ioproc, ioindex - ! begin - compbuf(:) = 0 - ndof = iodesc%ndof - niodof = size(iobuf) - num_tasks = IOsystem%num_tasks - num_iotasks = IOsystem%num_iotasks - - if (num_tasks /= 1 .or. num_iotasks /= 1) & - call piodie( __PIO_FILE__,__LINE__, & - 'built with -D_MPISERIAL but num_tasks=', num_tasks, & - 'num_iotasks=', num_iotasks ) - - if (size(compbuf) > 0 .and. size(compbuf) ioDesc%scount - stype => ioDesc%stype - if (pio_option /= POINT_TO_POINT) then - call alloc_check(a2a_sendcounts, nprocs) - call alloc_check(a2a_displs, nprocs) - call alloc_check(a2a_sendtypes, nprocs) - call alloc_check(a2a_recvcounts, nprocs) - call alloc_check(a2a_recvtypes, nprocs) - - do i=1,nprocs - a2a_displs(i) = 0 - - a2a_sendcounts(i) = 0 - a2a_sendtypes(i) = MPI_INTEGER - a2a_recvcounts(i) = 0 - a2a_recvtypes(i) = MPI_INTEGER - end do - - do i=1,num_iotasks - if (scount(i) /= 0) then - ! go from 1-based io rank to 0-based comprank - io_comprank = find_io_comprank(IOsystem,i) +1 ! array is 1-based - - a2a_recvcounts(io_comprank) = 1 - a2a_recvtypes(io_comprank) = stype(i) - endif - end do - - if (IOsystem%IOproc) then - do i=1,nrecvs - comprank = rfrom(i) +1 ! array is 1-based - a2a_sendcounts(comprank) = 1 - a2a_sendtypes(comprank) = rtype(i) - end do - endif - - if (pio_option == COLLECTIVE) then - -#ifdef TIMING - call t_startf("PIO:a2a_box_rear_io2comp_{TYPE}") -#endif - call MPI_ALLTOALLW(iobuf, a2a_sendcounts, a2a_displs, a2a_sendtypes, & - compbuf, a2a_recvcounts, a2a_displs, a2a_recvtypes, & - IOsystem%union_comm, ierror ) -#ifdef TIMING - call t_stopf("PIO:a2a_box_rear_io2comp_{TYPE}") -#endif - call CheckMPIReturn(subName, ierror) - else - -#ifdef TIMING - call t_startf("PIO:swapm_box_rear_io2comp_{TYPE}") -#endif - call pio_swapm( nprocs, myrank, & - iobuf, niodof, a2a_sendcounts, a2a_displs, a2a_sendtypes, & - compbuf, ndof, a2a_recvcounts, a2a_displs, a2a_recvtypes, & - IOsystem%union_comm, pio_hs, pio_isend, pio_maxreq ) -#ifdef TIMING - call t_stopf("PIO:swapm_box_rear_io2comp_{TYPE}") -#endif - endif - call dealloc_check(a2a_sendcounts) - call dealloc_check(a2a_displs) - call dealloc_check(a2a_sendtypes) - call dealloc_check(a2a_recvcounts) - call dealloc_check(a2a_recvtypes) - - else - -#ifdef TIMING - call t_startf("PIO:p2p_box_rear_io2comp_{TYPE}") -#endif - call alloc_check(rreq, num_iotasks, 'recv requests') - - ! - ! post receives on comp procs - ! - - do i=1,num_iotasks - if (scount(i) /= 0) then - - ! go from 1-based io rank to 0-based comprank - io_comprank=find_io_comprank(IOsystem,i) - - call MPI_IRECV( compbuf, 1, stype(i), & ! buf, count, type - io_comprank,TAG2, & ! destination,tag - IOsystem%union_comm,rreq(i),ierror ) - call CheckMPIReturn(subName,ierror) - endif - - end do - - - ! - ! do sends on io procs - ! - - - if (IOsystem%IOproc) then - do i=1,nrecvs - - call MPI_ISEND( iobuf,1, rtype(i), & ! buf, count, type - rfrom(i), TAG2, & ! dest, tag - IOsystem%union_comm,sreq(i),ierror ) - call CheckMPIReturn(subName,ierror) - - end do - - endif - - ! - ! finish up - ! - - do i=1,num_iotasks - if (scount(i) /= 0) then - call MPI_WAIT( rreq(i), status, ierror ) - call CheckMPIReturn(subName,ierror) - endif - end do - - call dealloc_check(rreq,'recv requests') - - if (IOsystem%IOproc) then - do i=1,nrecvs - call MPI_WAIT( sreq(i), status, ierror ) - call CheckMPIReturn(subName,ierror) - end do - - call dealloc_check(sreq,'send requests') - endif -#ifdef TIMING - call t_stopf("PIO:p2p_box_rear_io2comp_{TYPE}") -#endif - - endif ! POINT_TO_POINT -#endif /* not _MPISERIAL */ - -end subroutine box_rearrange_io2comp_{TYPE} - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! - ! io_comprank - ! - ! find the rank in union_comm of the ith io processor - ! - - integer function find_io_comprank( Iosystem, ioprocindex ) - implicit none - - type (Iosystem_desc_t), intent(in) :: Iosystem - integer ioprocindex - - find_io_comprank=iosystem%ioranks(ioprocindex) - end function find_io_comprank - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! - ! gindex_to_coord - ! - ! find global xyz coordinates given a global index - ! - - subroutine gindex_to_coord( gindex, gstride, ndim, gcoord ) - implicit none - integer(kind=pio_offset),intent(in) :: gindex ! 0-based global index - integer(kind=pio_offset),intent(in) :: gstride(:) ! stride for each dimension - ! e.g. (nx,nx*ny,nx*ny*nz) - integer,intent(in) :: ndim ! number of dimesions e.g. 2 or 3 - integer(kind=pio_offset),intent(out) :: gcoord(:) ! output global coords (0-based) - - ! local vars - character(len=*), parameter :: subName=modName//'::gindex_to_coord' - integer i - integer (kind=pio_offset) :: tempindex - - ! loop outermost to innermost e.g. z,y,x - - tempindex=gindex - - do i=ndim,2,-1 - gcoord(i) = tempindex/gstride(i-1) ! integer division - tempindex = tempindex - gcoord(i)*gstride(i-1) ! remainder - end do - - ! base case - innermost dimension - gcoord(1) = tempindex - - end subroutine gindex_to_coord - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! - ! find_ioproc - ! - ! determine if a coordinate is in any ioproc's box '! extra apostrophy added for cpp - ! if so, return a 1-based ioproc number - ! and 1-based index for that ioproc's iobuf ' - ! - - logical function find_ioproc( gcoord, lb, ub, lstride, ndim, nioproc, & - io_proc, io_index ) - implicit none - integer(kind=pio_offset),intent(in) :: gcoord(:) - integer,intent(in) :: ndim - integer,intent(in) :: nioproc - integer(kind=pio_offset),intent(in) :: lb(ndim,nioproc) - integer(kind=pio_offset),intent(in) :: ub(ndim,nioproc) - integer(kind=pio_offset),intent(in) :: lstride(ndim,nioproc) - integer,intent(inout) :: io_proc - integer(kind=pio_offset),intent(out) :: io_index - - character(len=*), parameter :: subName=modName//'::find_ioproc' - integer :: i,j, decompstep(ndim), k - logical :: found - integer(kind=pio_offset) :: lcoord(ndim) - integer(kind=pio_offset):: lindex - - found = .false. - io_index = -1 - i = max(1,min(io_proc,nioproc)) - decompstep=1 - do j=1,ndim - if(minval(ub(j,:))ub(j,1)) then - decompstep(j)=i-1 - exit - endif - enddo - endif - enddo - k=0 - loop_ioproc: do while(.not. found.and.k<5004) - k=k+1 - do j=1,ndim - if ( gcoord(j) < lb(j,i) ) then - i = max(1,i-decompstep(j)) - if(k>5000) print *,__PIO_FILE__,__LINE__,i,gcoord(:),lb(:,i),ub(:,i) - cycle loop_ioproc - else if(gcoord(j) >= ub(j,i) ) then - i = min(nioproc,i+decompstep(j)) - if(k>5000) print *,__PIO_FILE__,__LINE__,i,gcoord(:),lb(:,i),ub(:,i) - cycle loop_ioproc - endif - end do - - ! gcoord matches this box - found = .true. - io_proc = i ! 1-based here - end do loop_ioproc - - find_ioproc = found - - if (found) then - - ! find location within the ioproc's box ' - do i=1,ndim - lcoord(i) = gcoord(i)-lb(i,io_proc) - end do - - ! find index into ioproc's buffer ' - lindex = lcoord(1) - do i=2,ndim - lindex = lindex+lcoord(i)*lstride(i-1,io_proc) - end do - - ! io_index=lindex+1 ! convert to 1-based - io_index = lindex ! 0-based - endif - - - end function find_ioproc - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! - ! compute_dest - ! - ! compute destination ioproc and index for every compdof - ! - ! - - subroutine compute_dest(compdof, start, kount, gsize, ndim, nioproc, & - dest_ioproc, dest_ioindex ) - implicit none - integer(kind=pio_offset), intent(in) :: compdof(:) - integer(kind=pio_offset), intent(in) :: start(:,:) ! start(ndim,nioproc) - integer(kind=pio_offset), intent(in) :: kount(:,:) ! count(ndim,nioproc) - - integer, intent(in) :: gsize(:) ! global domain size gsize(ndim) - integer, intent(in) :: ndim - integer, intent(in) :: nioproc - integer, intent(out) :: dest_ioproc(:) ! ioproc number to send to - integer(kind=PIO_OFFSET), intent(out) :: dest_ioindex(:) ! index in iobuf on that ioproc - - ! local vars - character(len=*), parameter :: subName=modName//'::compute_dest' - integer i,j - integer ndof - integer(kind=pio_offset):: gindex - integer(kind=pio_offset):: lb(ndim,nioproc) ! 0-based lower bound of boxes - integer(kind=pio_offset):: ub(ndim,nioproc) ! 0-based upper bound of boxes - integer(kind=pio_offset):: gcoord(ndim) ! 0-based xyz coordinates - integer(kind=pio_offset):: gstride(ndim) ! stride for each dimension - integer(kind=pio_offset):: lstride(ndim,nioproc) ! stride for each dim on each ioprocs - integer ioproc - integer (kind=pio_offset) :: ioindex - - ioproc = 0 - ! compute 0-based start array - - do i=1,nioproc - do j=1,ndim ! rml fix 3->ndim - lb(j,i) = start(j,i)-1 - ub(j,i) = lb(j,i)+ kount(j,i) - end do - end do - - ! compute stride for each dimension of array - ! e.g. (NX,NX*NY,NX*NY*NZ) - - gstride(1) = gsize(1) ! innermost dimension - do i=2,ndim - gstride(i) = gsize(i)*gstride(i-1) - end do - - do i=1,nioproc ! loop over all io boxes - lstride(1,i) = kount(1,i) ! innermost dimension - do j=2,ndim - lstride(j,i) = kount(j,i)*lstride(j-1,i) - end do - end do - - ndof=size(compdof) - -! if(Debug) print *,__PIO_FILE__,__LINE__,minval(compdof), maxval(compdof) - do i=1,ndof - ! Compute global coordinates for compdof(i) - - if (compdof(i)==0) then ! sender hole - dest_ioproc(i) = -1 - dest_ioindex(i) = -1 - else - - gindex = compdof(i)-1 ! 0-based index - - call gindex_to_coord(gindex, gstride, ndim, gcoord) - - ! if(Debug) print *, subName,':: dof ',i,' index=',gindex,' gcoord=',gcoord - - ! determine if gcoord lies in any io proc's start/count box ' - - if (.not. find_ioproc(gcoord, lb, ub, lstride, ndim, nioproc, & - ioproc, ioindex)) then - - print *, subName,':: ERROR: no destination found for compdof=', compdof(i) - print *, subName,':: INFO: gsize=', gsize - print *, subName,':: INFO: nioproc',nioproc,' ioproc ',ioproc,' ioindex ',ioindex - - do j=1,nioproc - print *, subName, ':: INFO io ', j, ' start=', start(:,j), ' count=', kount(:,j) - end do - - do j=1,nioproc - print *, subName, ':: INFO io ', j, ' lb=', lb(:,j), ' ub=', ub(:,j) - end do - - print *, subName, ':: INFO dof ', i, ' index=', gindex, ' gcoord=', gcoord - call piodie( __PIO_FILE__,__LINE__, 'quitting' ) - endif - - dest_ioproc(i) = ioproc - dest_ioindex(i) = ioindex - - endif - - end do ! i=1,ndof - - end subroutine compute_dest - -!> -!! box_rearrange_create -!! -!! @brief create a rearranger -!! -!! @detail this will allocate the following storage in ioDesc: -!! dest_ioproc(ndof) -!! dest_ioindex(ndof) -!! -!! this space should be freed in box_rearrange_free -!! -!< - subroutine box_rearrange_create(Iosystem, compdof, gsize, ndim, & - nioproc, ioDesc) - - - implicit none - - type (Iosystem_desc_t), intent(in) :: Iosystem - integer(kind=pio_offset), intent(in) :: compdof(:) ! global indices for compbuf - integer, intent(in) :: gsize(:) ! global domain size gsize(ndim) - integer, intent(in) :: ndim, nioproc - type (IO_desc_t), intent(inout) :: ioDesc - - ! local vars - character(len=*), parameter :: subName=modName//'::box_rearrange_create' - integer(kind=pio_offset) :: start(ndim,nioproc), count(ndim,nioproc) - - integer :: ierror - integer :: i - - integer :: niodof - integer :: pio_offset_kind ! kind of pio_offset - -!!!!!! - iodesc%ndof = size(compdof) - - call alloc_check( ioDesc%dest_ioproc, iodesc%ndof, & - 'box_rearrange_create dest_ioproc' ) - call alloc_check( ioDesc%dest_ioindex, iodesc%ndof, & - 'box_rearrange_create dest_ioindex') - -!!!!!! - ! Gather iodesc%start,iodesc%count from IO procs to root IO proc - ! then broadcast to all procs - if(ndim.ne.size(iodesc%start)) then - print *,__PIO_FILE__,__LINE__,ndim, size(iodesc%start) - call piodie(__PIO_FILE__,__LINE__,'bad ndim size',ndim) - end if - - start = 0 - count = 0 -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - if(kind(start) == kind(ndim)) then - pio_offset_kind = MPI_INTEGER - else - pio_offset_kind = MPI_INTEGER8 - end if - - if (Iosystem%IOproc) then - call pio_fc_gather_offset(iodesc%start, ndim, PIO_OFFSET_KIND, & ! sendbuf, count, type - start, ndim, PIO_OFFSET_KIND, & ! recvbuf, count, type - 0, Iosystem%IO_comm ) - - call pio_fc_gather_offset(iodesc%count, ndim, PIO_OFFSET_KIND, & ! sendbuf, count, type - count, ndim, PIO_OFFSET_KIND, & ! recvbuf, count, type - 0, Iosystem%IO_comm ) - - if(Debug) then - print *, __PIO_FILE__,__LINE__,iodesc%start, iodesc%count - if(iosystem%io_rank==0) & - print *,__PIO_FILE__,__LINE__,ndim,(i,' :', & - start(:,i), count(:,i),i=1,iosystem%num_iotasks) - end if - ! note that index in start,count is the io_rank not comp_rank - endif - - call MPI_BCAST(start, ndim*Iosystem%num_iotasks, PIO_OFFSET_KIND, & ! buf, cnt - Iosystem%ioranks(1), Iosystem%union_comm, ierror ) - call CheckMPIReturn(subName, ierror) - - call MPI_BCAST(count, ndim*Iosystem%num_iotasks, PIO_OFFSET_KIND, & ! buf, cnt - Iosystem%ioranks(1), Iosystem%union_comm, ierror ) - call CheckMPIReturn(subName, ierror) - -!#if DEBUG - if (debug .and. Iosystem%comp_rank==0) then - do i=1,Iosystem%num_iotasks - print *, subName,':: comp_rank=', Iosystem%comp_rank, ': io ', & - i, ' start=',start(:,i), ' count=', count(:,i) - end do - endif -!#endif -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif -!!!!!!! - ! compute io dest and indices - - call compute_dest(compdof, start, count, gsize, ndim, & - Iosystem%num_aiotasks, ioDesc%dest_ioproc, ioDesc%dest_ioindex ) - -#ifdef _MPISERIAL -! Version for use with mpi-serial. -! NOTE: cached values in iodesc other than dest_ioproc() and dest_ioindex() -! will NOT be allocated in this build - - if (Iosystem%num_tasks /= 1 .or. Iosystem%num_iotasks /= 1) then - call piodie( __PIO_FILE__,__LINE__, & - 'pio was built with -D_MPISERIAL but tasks=', & - Iosystem%num_tasks, & - 'iotasks=', Iosystem%num_iotasks) - endif - -#else -! else not _MPISERIAL -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - niodof = ioDesc%count(1) - do i=2,ndim - niodof = niodof*ioDesc%count(i) - end do - - call compute_counts(Iosystem, ioDesc, niodof) - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - call dealloc_check(iodesc%dest_ioindex,'dest_ioindex') - nullify(iodesc%dest_ioindex) - call dealloc_check(iodesc%dest_ioproc,'dest_ioproc') - nullify(iodesc%dest_ioproc) - - -! not _MPISERIAL -#endif - - end subroutine box_rearrange_create - -!> -!! @private compute_counts -!! @brief Define comp <-> IO communications patterns -!! -!< -#ifndef _MPISERIAL - subroutine compute_counts(Iosystem, ioDesc, niodof) - - use calcdisplace_mod, only : calcdisplace,GCDblocksize,gcd - - - type (Iosystem_desc_t), intent(in) :: Iosystem - type (IO_desc_t),intent(inout) :: ioDesc - - - integer, intent(in) :: niodof - - ! local vars - integer :: ndof - character(len=*), parameter :: subName=modName//'::compute_counts' - integer :: myrank ! local task id - integer :: num_tasks ! size of comp communicator - integer :: num_iotasks ! size of I/O communicator - integer :: i ! loop index - integer :: iorank ! i/o task id in i/o communicator + 1 - integer :: io_comprank ! i/o task id in comp communicator - integer :: nrecvs ! if i/o task, number of comp tasks sending - ! to/receiving from this task (cached) - integer(kind=pio_offset) :: ioindex ! offset for data to be sent to i/o task - integer :: pos ! array offset - integer :: ierror ! MPI error return - - integer,pointer :: scount(:) ! scount(num_iotasks) is no. sends to each i/o task (cached) - integer(kind=pio_offset),pointer :: sindex(:) ! sindex(ndof) is blocks of src indices - integer(kind=pio_offset),pointer :: s2rindex(:)! s2rindex(ndof) is local blocks of dest indices - integer,pointer :: spos(:) ! spos(num_iotasks) is start in sindex for each i/o task - integer,pointer :: tempcount(:) ! used in calculating sindex and s2rindex - integer,pointer :: stype(:) ! MPI type used in i/o sends (cached) - - ! needed on ioprocs only - integer,pointer :: rcount(:) ! rcount(nrecvs) is no. recvs from each sender - integer,pointer :: rfrom(:) ! rfrom(nrecvs) is id of each sender (cached) - integer(kind=pio_offset),pointer :: rindex(:) ! rindex(niodof) is blocks of dest indices - integer,pointer :: rtype(:) ! MPI type used in comp receives (cached) - - ! swapm alltoall communication variables - integer,pointer :: sr_types(:) - integer,pointer :: send_counts(:) - integer,pointer :: send_displs(:) - integer :: rbuf_size - integer,pointer :: recv_buf(:) - integer,pointer :: recv_counts(:) - integer,pointer :: recv_displs(:) - - ! swapm flow control parameters - logical :: pio_hs - logical :: pio_isend - integer :: pio_maxreq - - ! added 24MAR11 - integer :: bsize, len - integer(i4) :: blocksize - integer,allocatable :: blk_len(:) - integer(kind=pio_offset) :: i8blocksize - integer(kind=pio_offset),allocatable :: displace(:) - integer(kind=pio_offset),allocatable :: bsizeT(:) - integer :: numblks - integer :: newTYPEs,newTYPEr - integer :: ii - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Communication initialization - pio_hs = DEF_P2P_HANDSHAKE - pio_isend = DEF_P2P_ISEND - pio_maxreq = DEF_P2P_MAXREQ - ndof = iodesc%ndof - - ! First communication - ! comp procs tell io procs how many items they will send - - ! init - myrank = Iosystem%union_rank - num_tasks = IOsystem%num_tasks - num_iotasks = Iosystem%num_iotasks - - !need to cache - call alloc_check(ioDesc%scount, num_iotasks, 'scount buffer') - scount=>ioDesc%scount - - ! determine number of items going to each io proc - scount=0 - do i=1,ndof - iorank=ioDesc%dest_ioproc(i) - - if (iorank /= -1) then ! not a sender hole - if (iorank<1 .or. iorank>num_iotasks) & - call piodie(__PIO_FILE__,__LINE__,'io destination out of range',iorank) - scount(iorank) = scount(iorank) + 1 - endif - - end do - -#if DEBUG - print *,myrank,': scount()=',scount -#endif - - ! allocate and initialize swapm specification arguments - call alloc_check(sr_types, num_tasks, 'sr_types temp') - sr_types = MPI_INTEGER - - ! send data structures for all processes - ! send_buf (num_iotasks) is scount - ! sbuf_size = num_iotasks - ! send_counts(num_tasks) = 0 for non-io, 1 for i/o - ! send_displs(num_tasks) = 0 for non-io, (i-1) for i/o - - call alloc_check(send_counts, num_tasks, 'send_counts temp') - send_counts = 0 - - call alloc_check(send_displs, num_tasks, 'send_displs temp') - send_displs = 0 - - do i=1,num_iotasks - ! go from 1-based io rank to 0-based rank in union_comm - io_comprank = find_io_comprank(IOsystem,i) + 1 ! arrays are 1-based - send_counts(io_comprank) = 1 - send_displs(io_comprank) = i-1 - end do - - ! receive data structures - if (Iosystem%IOproc) then - - ! for i/o processes: - ! recv_buf (num_tasks) == scount from each process - ! rbuf_size = num_tasks - ! recv_counts(num_tasks) == 1 - ! recv_displs(num_tasks) == (i-1) - - rbuf_size = num_tasks - call alloc_check(recv_buf, rbuf_size, 'recv_buf temp') - recv_buf = 0 - - call alloc_check(recv_counts, num_tasks, 'recv_counts temp') - recv_counts = 1 - - call alloc_check(recv_displs, num_tasks, 'recv_displs temp') - do i=1,num_tasks - recv_displs(i) = i-1 - end do -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - else - - ! for non-i/o processes - ! recv_buf(1) is ignored - ! rbuf_size = 1 - ! recv_counts(num_tasks) == 0 - ! recv_displs(num_tasks) == 0 - - rbuf_size = 1 - call alloc_check(recv_buf, rbuf_size, 'recv_buf temp') - recv_buf = 0 - - call alloc_check(recv_counts, num_tasks, 'recv_counts temp') - recv_counts = 0 - - call alloc_check(recv_displs, num_tasks, 'recv_displs temp') - recv_displs = 0 - - endif - - call pio_swapm( num_tasks, myrank, & - scount, num_iotasks, send_counts, send_displs, sr_types, & - recv_buf, rbuf_size, recv_counts, recv_displs, sr_types, & - IOsystem%union_comm, pio_hs, pio_isend, pio_maxreq ) - - ! determine nrecvs, rcount, and rfrom - nrecvs = 0 - if (Iosystem%IOproc) then - - do i=1,num_tasks - if (recv_buf(i) /= 0) then - nrecvs = nrecvs + 1 - endif - enddo - - call alloc_check(rcount, nrecvs, 'rcount buffer') - rcount = 0 - - !need to cache - call alloc_check(ioDesc%rfrom, nrecvs, 'rfrom') - rfrom=>ioDesc%rfrom - - nrecvs = 0 - do i=1,num_tasks - if (recv_buf(i) /= 0) then - nrecvs = nrecvs + 1 - rcount(nrecvs) = recv_buf(i) - rfrom(nrecvs) = i-1 - endif - enddo -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - endif - ioDesc%nrecvs = nrecvs - - call dealloc_check(recv_buf, 'recv_buf temp') - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! Second communication - ! send indices to io procs - - ! sindex() contains blocks of indices defining - ! data going to/coming from the i/o processes - call alloc_check(sindex, ndof, 'sindex temp') - sindex = 0 - - ! s2rindex() contains the destination indices - ! corresponding to sindex - call alloc_check(s2rindex, ndof, 'sindex temp') - s2rindex = 0 - - ! spos(i) is the position in sindex() where the - ! block of indices going to the ith ioproc starts - call alloc_check(spos, num_iotasks, 'spos temp') - spos(1)=1 - do i=2,num_iotasks - spos(i)=spos(i-1)+scount(i-1) - - if (scount(i)/=0 .and. spos(i) > ndof) & - call piodie(__PIO_FILE__,__LINE__,'spos=',int(spos(i)),'> ndof=',ndof) - end do - - call alloc_check(tempcount, num_iotasks, 'tempcount') - tempcount=0 - do i=1,ndof - iorank = ioDesc%dest_ioproc(i) - ioindex = ioDesc%dest_ioindex(i) - - if (iorank /= -1) then ! skip sender hole - sindex(spos(iorank)+tempcount(iorank)) = i-1 - s2rindex(spos(iorank)+tempcount(iorank)) = ioindex - tempcount(iorank) = tempcount(iorank) + 1 - - if (tempcount(iorank) > scount(iorank)) & - call piodie(__PIO_FILE__,__LINE__,'tempcount>scount') - endif - end do - call dealloc_check(tempcount, 'tempcount') - - ! send data mapping for all processes - ! send_buf (ndof) is s2rindex - ! sbuf_size = ndof - ! send_counts(num_tasks) = 0 for non-i/o, scount for i/o - ! send_displs(num_tasks) = 0 for non-i/o, spos-1 for i/o - - send_counts = 0 - send_displs = 0 - do i=1,num_iotasks - ! go from 1-based io rank to 0-based rank in union_comm - io_comprank = find_io_comprank(IOsystem,i) + 1 ! arrays are 1-based - send_counts(io_comprank) = scount(i) - send_displs(io_comprank) = spos(i)-1 - end do - call dealloc_check(spos, 'spos temp') - - ! receive data structures - if (Iosystem%IOproc) then - - ! for i/o processes: - ! recv_buf (niodof) is rindex - ! rbuf_size = niodof - ! recv_counts(num_tasks) is 0 for non-'rfrom', is rcount for 'rfrom' - ! recv_displs(num_tasks) is 0 for non-'rfrom', is sum_i recv_counts for 'rfrom' - - recv_counts = 0 - do i=1,nrecvs - recv_counts(rfrom(i)+1) = rcount(i) - enddo - - rbuf_size = sum(recv_counts) - call alloc_check(rindex, rbuf_size, 'rindex buffer') - rindex = 0 - - recv_displs = 0 - do i=2,nrecvs - recv_displs(rfrom(i)+1) = recv_displs(rfrom(i-1)+1) + rcount(i-1) - enddo -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - else - - ! for non-i/o processes - ! recv_buf(1) is rindex, which is ignored - ! rbuf_size = 1 - ! recv_counts(num_tasks) == 0 - ! recv_displs(num_tasks) == 0 - - rbuf_size = 1 - call alloc_check(rindex, rbuf_size) - rindex = 0 - - recv_counts = 0 - recv_displs = 0 - - endif - sr_types = MPI_INTEGER8 - call pio_swapm( num_tasks, myrank, & - s2rindex, ndof, send_counts, send_displs, sr_types, & - rindex, rbuf_size, recv_counts, recv_displs, sr_types, & - IOsystem%union_comm, pio_hs, pio_isend, pio_maxreq ) - - call dealloc_check(s2rindex, 's2rindex temp') - call dealloc_check(sr_types, 'sr_types temp') - call dealloc_check(send_counts, 'send_counts temp') - call dealloc_check(send_displs, 'send_displs temp') - call dealloc_check(recv_counts, 'recv_counts temp') - call dealloc_check(recv_displs, 'recv_displs temp') - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - ! - ! Create the mpi types for io proc receives - ! - - if (Iosystem%IOproc .and. nrecvs>0) then -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - !need to cache - call alloc_check(ioDesc%rtype, nrecvs, 'mpi recv types') - rtype=>ioDesc%rtype - pos = 1 - ii = 1 - allocate(bsizeT(nrecvs)) - do i=1,nrecvs - call GCDblocksize(rindex(pos:pos+rcount(i)-1),i8blocksize) - if(rcount(i) > 0) then - bsizeT(ii)=int(i8blocksize) - ii = ii + 1 - endif - pos = pos + rcount(i) - enddo - blocksize = gcd(bsizeT(1:ii-1)) - ! print *,'gcd: receive block lengths: ', bsizeT(1:ii-1) - deallocate(bsizeT) - ! print *,'GCD calculated for receive loop blocksize: ',blocksize -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - call MPI_TYPE_CONTIGUOUS(blocksize,ioDesc%baseTYPE,newTYPEr,ierror) - call CheckMPIReturn(subName,ierror) - call MPI_TYPE_COMMIT(newTYPEr,ierror) - call CheckMPIReturn(subName,ierror) -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - pos = 1 - do i=1,nrecvs - -#if DEBUG -#if DEBUG_INDICES - print *, subName,':: myrank=',myrank,': recv indices from ',rfrom(i), & - ' count=',rcount(i),' value=',rindex(pos:pos+rcount(i)-1) -#else - print *, subName,':: myrank=',myrank,': recv indices from ',rfrom(i), & - ' count=',rcount(i) -#endif -#endif - - len = rcount(i)/blocksize - allocate(displace(len)) - if(blocksize == 1) then - displace(:) = rindex(pos:pos+rcount(i)-1) - else - rindex(pos:pos+rcount(i)-1) = rindex(pos:pos+rcount(i)-1)+1 - call calcdisplace(blocksize,rindex(pos:pos+rcount(i)-1),displace) - endif -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - !DBG call alloc_print_usage(iosystem%comp_comm,80,'l2629') - ! need rindex to contain 0-based displacements here - call MPI_TYPE_CREATE_INDEXED_BLOCK( & - len, 1, int(displace), & ! count,blen, disp - newTYPEr, rtype(i), ierror ) ! oldtype, newtype - call CheckMPIReturn(subName,ierror) -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - call MPI_TYPE_COMMIT(rtype(i), ierror) - call CheckMPIReturn(subName,ierror) - - deallocate(displace) - pos = pos + rcount(i) - end do - call MPI_TYPE_FREE(newTYPEr,ierror) - - endif - ! - ! Create the mpi types for the comp proc sends -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - !need to cache - call alloc_check(ioDesc%stype, num_iotasks, 'mpi send types') - stype=>ioDesc%stype - - pos = 1 - allocate(bsizeT(num_iotasks)) - ii = 1 - do i=1,num_iotasks - if(scount(i) /= 0) then - call GCDblocksize(sindex(pos:pos+scount(i)-1),i8blocksize) - bsizeT(ii)=int(i8blocksize) - pos = pos + scount(i) - ii = ii+1 - endif - enddo - blocksize = gcd(bsizeT(1:ii-1)) - deallocate(bsizeT) -! print *,'GCD calculated for send loop blocksize: ',blocksize - call MPI_TYPE_CONTIGUOUS(blocksize,ioDesc%baseTYPE,newTYPEs,ierror) - call CheckMPIReturn(subName,ierror) - call MPI_TYPE_COMMIT(newTYPEs,ierror) - call CheckMPIReturn(subName,ierror) - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - pos = 1 - do i=1,num_iotasks - - if (scount(i) /= 0) then - len = scount(i)/blocksize - allocate(displace(len)) - if(blocksize == 1) then - displace(:) = sindex(pos:pos+scount(i)-1) - else - sindex(pos:pos+scount(i)-1) = sindex(pos:pos+scount(i)-1)+1 - call calcdisplace(blocksize,sindex(pos:pos+scount(i)-1),displace) - endif - call MPI_TYPE_CREATE_INDEXED_BLOCK( & - len, 1, int(displace), & ! count, blen, disp - newTYPEs, stype(i), ierror ) ! oldtype, newtype - call CheckMPIReturn(subName,ierror) - - call MPI_TYPE_COMMIT(stype(i), ierror) - call CheckMPIReturn(subName,ierror) - - deallocate(displace) - pos = pos + scount(i) - endif - - end do - - call MPI_TYPE_FREE(newTYPEs,ierror) - - ! - ! clean up - ! -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - if (Iosystem%IOproc) then - call dealloc_check(rcount, 'rcount temp') - call dealloc_check(rindex, 'rindex temp') - endif - - call dealloc_check(sindex, 'sindex temp') - - end subroutine compute_counts -#endif - -!> -!! @public box_rearrange_free -!! @brief free the storage in the ioDesc that was allocated for the rearrangement -!! -!< - - subroutine box_rearrange_free(Iosystem,ioDesc) - implicit none - - type (Iosystem_desc_t), intent(in) ::Iosystem - type (IO_desc_t),intent(inout) :: ioDesc - - ! local vars - character(len=*), parameter :: subName=modName//'::box_rearrange_free' - integer :: i - integer :: ierror - - if(associated(iodesc%dest_ioproc)) then - call dealloc_check(ioDesc%dest_ioproc,'ioDesc%dest_ioproc') - nullify(iodesc%dest_ioproc) - end if - - if(associated(iodesc%dest_ioindex)) then - call dealloc_check(ioDesc%dest_ioindex,'ioDesc%dest_ioindex') - nullify(iodesc%dest_ioindex) - end if - -#ifdef _MPISERIAL - -! Other vars not allocated in _MPISERIAL build - -#else -!else not _MPISERIAL - - if (Iosystem%IOproc) then - if(associated(iodesc%rfrom)) then - call dealloc_check(ioDesc%rfrom) - nullify(iodesc%rfrom) - end if - - do i=1,ioDesc%nrecvs - call MPI_TYPE_FREE(ioDesc%rtype(i), ierror) - call CheckMPIReturn(subName,ierror) - end do - if(associated(iodesc%rtype)) then - call dealloc_check(ioDesc%rtype,'iodesc%rtype') - nullify(iodesc%rtype) - end if - - endif - - - do i=1,Iosystem%num_iotasks - if (ioDesc%scount(i) /= 0) then - call MPI_TYPE_FREE(ioDesc%stype(i), ierror) - call CheckMPIReturn(subName,ierror) - endif - end do - - if(associated(iodesc%scount)) then - call dealloc_check(ioDesc%scount) - nullify(iodesc%scount) - end if - if(associated(iodesc%stype)) then - call dealloc_check(ioDesc%stype,'iodesc%stype') - nullify(iodesc%stype) - end if - -! not _MPISERIAL -#endif - - - end subroutine box_rearrange_free - -end module box_rearrange - - - diff --git a/src/externals/pio1/pio/calcdecomp.F90 b/src/externals/pio1/pio/calcdecomp.F90 deleted file mode 100644 index ceb96e15e8c..00000000000 --- a/src/externals/pio1/pio/calcdecomp.F90 +++ /dev/null @@ -1,280 +0,0 @@ -#define __PIO_FILE__ "calcdecomp.F90" -!> -!! @file -!! @brief calcdecomp This module computes the IO decomposition when generated by PO internally. -!! -!! $Revision$ -!! $LastChangedDate$ -!< - -module calcdecomp -#ifdef TESTCALCDECOMP - implicit none - integer, parameter :: i4=selected_int_kind(6), & - i8=selected_int_kind(13), pio_offset=i8, r8=selected_real_kind(13) - logical, parameter :: debug=.false. - integer, parameter :: pio_real=1,pio_int=2, pio_double=3 -#else - use pio_kinds, only: i4, r4,r8,i4,i8, PIO_offset - use pio_types, only: PIO_int, PIO_real, PIO_double - use pio_support, only : debug, piodie - implicit none -#endif - - public :: CalcStartandCount, pio_set_blocksize - integer, parameter :: default_blocksize=884736 - integer :: blocksize=default_blocksize -contains -!> -!! @defgroup PIO_set_blocksize -!! Sets the contiguous block size read or written from each IO task. -!! The optimal value of this parameter is filesystem specific. -!< - - subroutine pio_set_blocksize(newsize) - integer, intent(in) :: newsize -#ifndef TESTCALCDECOMP - if(newsize<0) then - call piodie(__PIO_FILE__,__LINE__,'bad value to blocksize: ',newsize) - end if -#endif - blocksize=newsize - - - end subroutine pio_set_blocksize - - -! -! Determine start and kount values for an array of global size gdims over at most num_io_procs tasks. -! The algorythm creates contigous blocks of approximate size stripesize. Blocksize should be adjusted -! to be optimal for the filesystem being used. The actual number of io tasks used is output in variable -! use_io_procs -! - subroutine CalcStartandCount(basetype, ndims, gdims, num_io_procs,myiorank,start, kount, use_io_procs, innermostdecomposed) - integer(i4), intent(in) :: ndims, num_io_procs, basetype,myiorank - integer(i4), intent(in) :: gdims(ndims) - integer(kind=PIO_OFFSET), intent(out) :: start(ndims), kount(ndims) - integer, intent(out) :: use_io_procs - integer, intent(out), optional :: innermostdecomposed - integer :: i, dims(ndims), lb, ub, inc - integer(kind=pio_offset) :: p, tpsize, pgdims - logical :: converged - integer :: extras, subrank, tioprocs, rem - integer :: minbytes, maxbytes, iorank - integer :: minblocksize, basesize, maxiosize, ioprocs, tiorank - integer :: ldims - integer(kind=PIO_OFFSET) :: mystart(ndims), mykount(ndims) - - - minbytes = blocksize-256 ! minimum number of contigous blocks in bytes to put on a IO task - maxbytes = blocksize+256 ! maximum length of contigous block in bytes to put on a IO task - - select case(basetype) - case(PIO_int) - basesize = 4 - case(PIO_real) - basesize = 4 - case(PIO_double) - basesize = 8 - end select - - minblocksize = minbytes/basesize - - pgdims=product(int(gdims,pio_offset)) - p=pgdims - use_io_procs = max(1, min(int(real(p)/real(minblocksize)+0.5),num_io_procs)) - converged=.false. - tpsize=0 - mystart=1 - mykount=0 - do while(.not. converged) - do iorank=0,use_io_procs-1 - start(:)=1 - kount(:)=0 - - ldims=ndims - p=basesize - do i=1,ndims - p=p*gdims(i) - if(p/use_io_procs > maxbytes) then - ldims=i - exit - end if - end do - - - -! Things work best if use_io_procs is a multiple of gdims(ndims) -! this adjustment makes it so, potentially increasing the blocksize a bit - if (gdims(ldims)1 .and. gdims(ldims-1) > use_io_procs) then - ldims=ldims-1 - else - use_io_procs = use_io_procs - mod(use_io_procs,gdims(ldims)) - end if - end if - - kount(:)=gdims - - ioprocs=use_io_procs - tiorank=iorank - - do i=ldims,1,-1 - if(gdims(i)>1) then - if(gdims(i)>=ioprocs) then - - call computestartandcount(gdims(i),ioprocs,tiorank,start(i),kount(i)) - if(start(i)+kount(i)>gdims(i)+1) then - print *,__PIO_FILE__,__LINE__,i,ioprocs,gdims(i),start(i),kount(i) -#if TESTCALCDECOMP - stop -#else - call piodie(__PIO_FILE__,__LINE__,'Start plus count exceeds dimension bound') -#endif - endif - exit ! Decomposition is complete - else - ! The current dimension cannot complete the decomposition. Decompose this - ! dimension in groups then go on to decompose the next dimesion in each of those - ! groups. - tioprocs=gdims(i) - tiorank = (iorank*tioprocs)/ioprocs - - call computestartandcount(gdims(i),tioprocs, tiorank , start(i),kount(i)) - ioprocs=ioprocs/tioprocs - tiorank = mod(iorank,ioprocs) - end if - end if - end do - if(myiorank==iorank) then - mystart=start - mykount=kount - endif - tpsize=tpsize+product(kount(:)) - if(tpsize==pgdims .and. use_io_procs==iorank+1) then - converged=.true. - exit - else if(tpsize>=pgdims) then - exit - endif - enddo - if(.not.converged) then - tpsize=0 - use_io_procs=use_io_procs-1 - endif - - end do - start=mystart - kount=mykount - -! if(myiorank>=0 .and. myiorank= ioprocs -! -! - subroutine computestartandcount(gdim,ioprocs,rank,start,kount) - implicit none - integer,intent(in) :: gdim,ioprocs,rank - integer(kind=pio_offset),intent(out) :: start,kount - integer :: remainder, irank - - if(gdim=ioprocs-irank) then - kount=kount+1 - start=start+max(0,(irank+remainder-ioprocs)) - end if -! write(99,*) __LINE__,gdim,ioprocs,rank,start,kount,remainder - end subroutine computestartandcount - - -end module calcdecomp - -#ifdef TESTCALCDECOMP -program sandctest - use calcdecomp !_EXTERNAL - implicit none - - -! integer, parameter :: ndims=4 -! integer, parameter :: gdims(ndims) = (/66,199,10,8/) -! integer, parameter :: ndims=3 -! integer, parameter :: gdims(ndims) = (/1024,1024,1024/) -! integer, parameter :: num_io_procs=16 - integer, parameter :: ndims=2 - integer, parameter :: gdims(ndims) = (/777602,31/) - integer :: num_io_procs=24 - logical :: converged=.false. -! integer :: gdims(ndims) - integer :: psize, n, i,j,k,m - integer, parameter :: imax=200,jmax=200,kmax=30,mmax=7 - integer(kind=pio_offset) :: start(ndims), count(ndims) - integer :: iorank, numaiotasks, tpsize -#ifdef DOTHIS - do i=1,imax - gdims(1)=i - do j=1,jmax - gdims(2)=j - do k=20,kmax - gdims(3)=k - do m=1,mmax - gdims(4)=m -#endif - tpsize = 0 - numaiotasks=0 - do while(.not. converged) - do iorank=0,num_io_procs-1 - call Calcstartandcount(PIO_double, ndims, gdims, num_io_procs, iorank, start, count, numaiotasks) - - psize=1 - do n=1,ndims - psize=psize*count(n) - end do - tpsize = tpsize+psize - ! if(ndims==3) then -! write(*,'(i2,a,3i5,a,3i5,2i12)') iorank,' start =',start,' count=', count, product(gdims), psize - ! else if(ndims==4) then - if(sum(count)>0) then - write(*,'(i2,a,2i8,a,2i8,2i12)') iorank,' start =',start,' count=', count, product(gdims), psize - if(any(start<0)) then - print *, gdims - stop - endif - end if - - end do - if(tpsize==product(gdims)) then - converged=.true. - else - print *,'Failed to converge: ',tpsize,product(gdims),gdims,num_io_procs - tpsize=0 - num_io_procs=num_io_procs-1 - end if - end do -#ifdef DOTHIS - end do - end do - end do - end do -#endif -end program sandctest -#endif diff --git a/src/externals/pio1/pio/calcdisplace_mod.F90 b/src/externals/pio1/pio/calcdisplace_mod.F90 deleted file mode 100644 index b5340022824..00000000000 --- a/src/externals/pio1/pio/calcdisplace_mod.F90 +++ /dev/null @@ -1,391 +0,0 @@ -#define __PIO_FILE__ "calcdisplace_mod.F90" -MODULE calcdisplace_mod - - use pio_kinds, only: i4, PIO_OFFSET, i8 - use pio_support, only : piodie - implicit none - private - public :: GCDblocksize,gcd - public :: calcdisplace, calcdisplace_box - - interface gcd_pair - module procedure gcd_pair_i4 - module procedure gcd_pair_i8 - end interface - - interface gcd_array - module procedure gcd_array_i4 - module procedure gcd_array_i8 - end interface - - interface gcd - module procedure gcd_array_i4 - module procedure gcd_array_i8 - module procedure gcd_pair_i4 - module procedure gcd_pair_i8 - end interface - -CONTAINS - !***************************** - ! calcdisplace - ! - - subroutine calcdisplace(bsize,dof,displace) - - integer(i4), intent(in) :: bsize ! length of contigious blocks of numbers - integer(kind=pio_offset), intent(in) :: dof(:) ! degree of freedom on which to setup the displacement array - integer(kind=pio_offset), intent(inout) :: displace(:) ! array of mpi displacments - - integer :: numblocks,lenblocks,i,ii - integer(kind=pio_offset) :: dis - - numblocks = size(displace) - lenblocks = bsize - do i=1,numblocks - ii = (i-1)*lenblocks+1 - dis = dof(ii)-1 - dis = dis/lenblocks - displace(i) = dis - enddo - - end subroutine calcdisplace - - - - subroutine calcdisplace_box(gsize,lenblock,start,count,ndim,displace) - use alloc_mod, only : alloc_check,dealloc_check - integer(i4),intent(in) :: gsize(:) ! global size of output domain - integer(i4),intent(in) :: lenblock - integer(kind=PIO_offset),intent(in) :: start(:), count(:) - integer(i4), intent(in) :: ndim - integer(i4),pointer :: displace(:) ! mpi displacments - - !! - - integer(i4):: ndisp - integer(i4) :: gstride(ndim) - integer(i4) :: i,j - integer(i4) :: iosize - integer(i4) :: myloc(ndim) - integer(i4) :: ub(ndim) - integer :: idim - logical :: done - integer(i4) :: gindex, fdim, bsize - - gstride(1)=gsize(1) - do i=2,ndim - gstride(i)=gsize(i)*gstride(i-1) - end do - - iosize=min(int(count(1)),1) - do i=2,ndim - iosize=iosize*count(i) - end do - - ndisp=size(displace) - - if (iosize<1 .or. ndisp<1) return - - if (mod(iosize,ndisp) /= 0) then - print *,__PIO_FILE__,__LINE__,ndisp,mod(iosize,ndisp) - call piodie(__PIO_FILE__,__LINE__,'mod(iosize,ndisp)/=0 iosize=',iosize) - endif - - do i=1,ndim - ub(i)=start(i)+count(i)-1 - end do - - ! skip x dimension (start of each block) - ! generate displacement for every 1,y,z - ! i.e. loop over y,z,... - ! compute corresponding global index - ! divide by lenblocks - - displace(1)=1 - myloc=start - fdim=0 - bsize = 1 - do while(bsizendim) call piodie(__PIO_FILE__,__LINE__,'dim overflow') - endif - end do - endif - - end do - - do i=fdim+1,ndim - if (myloc(i) /= ub(i)) then - print *,'myloc=',myloc - print *,'ub=',ub - call piodie( __PIO_FILE__,__LINE__,'myloc/=ub') - endif - end do - - - ! check for strictly increasing - - do i=1,ndisp-1 - if(displace(i) > displace(i+1)) then -! This is an error but only if you are writing a binary file, so we are going to -! silently fail by deallocating the displace array - call dealloc_check(displace) - call alloc_check(displace,0) - exit -! call piodie(__PIO_FILE__,__LINE__,'displace is not increasing') - endif - enddo - - end subroutine calcdisplace_box - - - SUBROUTINE GCDblocksize(arr_in,bsize,debug) - implicit none - - integer(kind=pio_offset),intent(in) ,dimension(:) :: arr_in !arr_in = rindex array from box_rearrange - integer(kind=pio_offset),intent(out) :: bsize ! the gcd of the block length array - - ! Locals - integer(kind=pio_offset),dimension(:),allocatable :: del_arr,loc_arr - integer(kind=pio_offset),dimension(:),allocatable :: gaps, blk_len - integer(i4) :: i,j,k,n,numblks,numtimes,ii, numgaps - integer(kind=pio_offset) :: bsizeg - integer, intent(in), optional :: debug - - - numblks=0 - numtimes=0 - numgaps=0 - - n = size(arr_in) - - allocate(del_arr(n-1)) - - del_arr = 0 ! forward diff of the input array to deterine where contiguous blocks end. - - - do i = 1,n-1 ! compute foward diff of the elements; if =1, still in a contiguous block, - ! if /= 1 , the end of a block has been reached. - del_arr(i) = (arr_in(i+1) - arr_in(i)) - - end do - - numtimes = count( del_arr /= 1) - numblks = numtimes + 1 ! the number of contiguous blocks. - - if(present(debug)) print *,debug,': numtimes:',numtimes - - if ( numtimes == 0 ) then ! new logic to account for the case that there is only - allocate(loc_arr(numblks)) ! one contigious block in which case numtimes=0 and the - else ! error from the assignment in line 87 goes away - allocate(loc_arr(numtimes)) - end if - loc_arr = 1 - - j=0 - - do i = 1, n-1 - - if ( del_arr(i) == 1 ) cycle - - j = j+1 - - loc_arr(j) = i - - end do - - if(numtimes>0) then - ii=1 - numgaps = count(del_arr > 1) - if(numgaps>0) then - allocate(gaps(numgaps)) - do i=1,n-1 - if(del_arr(i) > 1) then - gaps(ii) = del_arr(i) -1 - ii=ii+1 - endif - enddo - end if - endif - - allocate(blk_len(numblks)) - blk_len(1) = loc_arr(1) - - do k = 2,numblks-1 ! computes the the length of each block by differencing the - ! elements of the res array. - - blk_len(k) = loc_arr(k) - loc_arr(k-1) - - end do - - blk_len(numblks) = n - sum(blk_len(1:numblks-1)) ! computes the length of the last block - - - - bsize = gcd_array(blk_len) ! call to compute the gcd of the blk_len array. - - if(present(debug)) then - print *,debug,': numblks,blk_len :',numblks, minval(blk_len),minloc(blk_len),maxval(blk_len),maxloc(blk_len),bsize - endif - - - if(numgaps>0) then - bsizeg = gcd_array(gaps(1:numgaps)) - bsize = gcd_pair(bsize,bsizeg) - - if(present(debug)) then - print *,debug,': numblks,gaps :',numblks, minval(gaps(1:numgaps)),minloc(gaps(1:numgaps)),maxval(gaps(1:numgaps)), & - maxloc(gaps(1:numgaps)),bsize,bsizeg,arr_in(1) - endif - - deallocate(gaps) - endif - if(arr_in(1)>0) then ! account for an initial gap - bsize = gcd_pair(bsize,arr_in(1)) - end if - deallocate(del_arr,loc_arr,blk_len) - - end SUBROUTINE GCDblocksize - - - integer(kind=pio_offset) function gcd_array_i8(ain) result(bsize) - implicit none - - - integer(kind=pio_offset), intent(in),dimension(:) :: ain - - ! locals - integer(i8) :: i,n - - bsize=1 - n = size(ain) - ! First check, if an element is 1, then 1 is the gcd (i.e bsize) - if(n==0 .or. any(ain <= 1)) return - - ! Calculate GCD using GCD(a,b,c,...) = GCD(a,GCD(b,c...)) - ! otherwise gcd = 1. - ! Done by calling the external function that is below. - - bsize = ain(1) - do i = 2,n - bsize = gcd_pair(bsize,ain(i)) - if (bsize == 1) exit - end do - - end function gcd_array_i8 - - integer function gcd_array_i4(ain) result(bsize) - implicit none - - - integer(i4), intent(in),dimension(:) :: ain - - ! locals - integer(i4) :: i,n - - bsize=1 - n = size(ain) - ! First check, if an element is 1, then 1 is the gcd (i.e bsize) - if(n==0 .or. any(ain <= 1)) return - - ! Calculate GCD using GCD(a,b,c,...) = GCD(a,GCD(b,c...)) - ! otherwise gcd = 1. - ! Done by calling the external function that is below. - - bsize = ain(1) - do i = 2,n - bsize = gcd_pair(bsize,ain(i)) - if (bsize == 1) exit - end do - - end function gcd_array_i4 - - integer(kind=pio_offset) FUNCTION gcd_pair_i8(u,v) result(gcd) - implicit none - - integer(kind=pio_offset),intent(in) :: u,v - - ! locals - integer(i8) :: x,a,b - - a = u - b = v - - if(a < b) then - x = a - a = b - b = x - end if - - do - x = mod(a,b) - if ( x == 0 ) EXIT - a = b - b = x - end do - - gcd = b - - end FUNCTION gcd_pair_i8 - - integer FUNCTION gcd_pair_i4(u,v) result(gcd) - implicit none - - integer(i4),intent(in) :: u,v - - ! locals - integer(i4) :: x,a,b - - a = u - b = v - - if(a < b) then - x = a - a = b - b = x - end if - - do - x = mod(a,b) - if ( x == 0 ) EXIT - a = b - b = x - end do - - gcd = b - - end FUNCTION gcd_pair_i4 - -END MODULE calcdisplace_mod diff --git a/src/externals/pio1/pio/config.h.in b/src/externals/pio1/pio/config.h.in deleted file mode 100644 index ff6b17880b8..00000000000 --- a/src/externals/pio1/pio/config.h.in +++ /dev/null @@ -1,79 +0,0 @@ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Define if building universal (internal helper macro) */ -#undef AC_APPLE_UNIVERSAL_BUILD - -/* Define to 1 if you have the header file. */ -#undef HAVE_EXPAT_H - -/* Define if you have the HDF5 library. */ -#undef HAVE_HDF5 - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define if you have the MPI library. */ -#undef HAVE_MPI - -/* Define if you have the NETCDF library. */ -#undef HAVE_NETCDF - -/* Define if the netCDF library is compiled with netCDF 4 API. */ -#undef HAVE_NETCDF4 - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the home page for this package. */ -#undef PACKAGE_URL - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most - significant byte first (like Motorola and SPARC, unlike Intel). */ -#if defined AC_APPLE_UNIVERSAL_BUILD -# if defined __BIG_ENDIAN__ -# define WORDS_BIGENDIAN 1 -# endif -#else -# ifndef WORDS_BIGENDIAN -# undef WORDS_BIGENDIAN -# endif -#endif diff --git a/src/externals/pio1/pio/dtypes.h b/src/externals/pio1/pio/dtypes.h deleted file mode 100644 index 9076cf0f75b..00000000000 --- a/src/externals/pio1/pio/dtypes.h +++ /dev/null @@ -1,5 +0,0 @@ -#define TYPEDOUBLE 102 -#define TYPEINT 103 -#define TYPETEXT 100 -#define TYPELONG 104 -#define TYPEREAL 101 diff --git a/src/externals/pio1/pio/fdepends.awk b/src/externals/pio1/pio/fdepends.awk deleted file mode 100644 index 2980920cf23..00000000000 --- a/src/externals/pio1/pio/fdepends.awk +++ /dev/null @@ -1,56 +0,0 @@ -# -# File fdepends.awk -# -# Take an .F90 file and generate Makefile dependencies for -# each module "use", "#include", and "include" -# -# Example: -# POP.o: io.o -# -# Predefined variables Typical values -# NAME POP -# SUF .F90 -# - -BEGIN { IGNORECASE=1 - PRLINE = NAME".o: " -# print NAME".o : " NAME SUF - } - - -# -# awk reads each line of the filename argument $2 until it finds -# a "use" or "#include" -# - - -/^[ \t]*use[ \t]+/ { - - # Ignore any "use" line that contains comment "_EXTERNAL" - if ( $0 ~ /_EXTERNAL/ ) next - - # Assume the second field is the F90 module name, - # remove any comma at the end of the second field (due to - # ONLY or rename), and print it in a dependency line. - - sub(/,$/,"",$2) - print PRLINE $2".o" - } - - -# This will match include lines (either cpp or fortran style) -# #include "myinclude.inc" -# #include -# INCLUDE 'MYINCLUDE.INC' - -/^[ \t]*#?include[ \t]/ { - - # Ignore any "#include" line that contains comment "_EXTERNAL" - if ( $0 ~ /_EXTERNAL/ ) next - - # Remove starting or ending quote or angle bracket - sub(/^["<']/,"",$2) - sub(/[">']$/,"",$2) - print PRLINE $2 - - } diff --git a/src/externals/pio1/pio/iompi_mod.F90.in b/src/externals/pio1/pio/iompi_mod.F90.in deleted file mode 100644 index b5cc563b776..00000000000 --- a/src/externals/pio1/pio/iompi_mod.F90.in +++ /dev/null @@ -1,393 +0,0 @@ -#define __PIO_FILE__ "iompi_mod.F90" -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief The MPI-IO direct binary interface to PIO -!< -module iompi_mod - use pio_kinds, only : i4,r4,r8,log_kind,pio_offset - use pio_types, only : io_desc_t,file_desc_t,var_desc_t, & - iotype_pbinary, & - iotype_direct_pbinary,pio_noerr -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf !_EXTERNAL -#endif - - use pio_support - use alloc_mod, only : alloc_check -#ifndef NO_C_SIZEOF - use iso_c_binding, only : c_sizeof ! _EXTERNAL -#else -#define c_sizeof(x) size(transfer (x, xxx_sizeof_data)) -#endif - -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - - implicit none - private - -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - public :: open_mpiio, create_mpiio, close_mpiio, write_mpiio,read_mpiio -#if defined(NO_C_SIZEOF) - character, private :: xxx_sizeof_data(32) -#endif - -!> -!! @private -!< - interface write_mpiio -! TYPE int,real,double - module procedure write_mpiio_{TYPE} - end interface - - interface read_mpiio -! TYPE int,real,double - module procedure read_mpiio_{TYPE} - end interface - -character(len=*), parameter :: modName='iompi_mod' - -contains - - integer function close_mpiio(File) result(ierr) - - type (File_desc_t), intent(inout) :: File ! file descriptor - - ! =================== - ! Local variables - ! =================== - character(len=*), parameter :: subName=modName//'::close_mpiio' - integer :: amode,iotype - - logical, parameter :: Check = .TRUE. - - ierr = PIO_NOERR - if(File%iosystem%ioproc) then -#ifdef USEMPIIO - iotype = File%iotype - - select case(iotype) - case(iotype_pbinary,iotype_direct_pbinary) - call MPI_file_close(File%fh,ierr) - if(Check) call CheckMPIreturn('close_mpiio: after call to file_close: ',ierr) - !--------------------------------- - ! set the base file offset to zero - !--------------------------------- - File%offset = 0 - end select -#else - call piodie(__PIO_FILE__,__LINE__,'PIO was not built with -DUSEMPIIO') -#endif - end if - end function close_mpiio - - integer function create_mpiio(File,fname) result(ierr) - - type (File_desc_t), intent(inout) :: File ! file descriptor - character(len=*), intent(in) :: fname - - ! =================== - ! Local variables - ! =================== - character(len=*), parameter :: subName=modName//'::create_mpiio' - integer :: amode,iotype - - logical, parameter :: Check = .TRUE. - integer :: ierrl -#ifdef USEMPIIO - ierr = PIO_noerr - if(file%iosystem%ioproc) then - iotype = File%iotype - - select case(iotype) - case(iotype_pbinary,iotype_direct_pbinary) - amode = IOR(MPI_MODE_RDWR,MPI_MODE_CREATE) - if(Debug) print *,'OpenFile: io_rank: ',File%iosystem%io_rank,'amode,info: ',amode,File%iosystem%info,'fname: ',fname - call MPI_file_open(File%iosystem%IO_comm,fname,amode,File%iosystem%info,File%fh,ierr) - if(Check) call CheckMPIreturn('create_mpiio: after call to MPI_file_open: ',ierr) - !--------------------------------- - ! set the base file offset to zero - !--------------------------------- - File%offset = 0 - end select -! print *, subName,':: CreateFile: io_rank: ',File%io_rank,'amode,info: ',amode,File%info,' fname: ',fname, & -! ' File%fh: ',File%fh - end if - if(File%iosystem%num_tasks>File%iosystem%num_iotasks) then - ! broadcast the return code from MPI_file_open - call MPI_BCAST(ierr,1,MPI_INTEGER,File%iosystem%IOMaster, File%iosystem%Comp_comm ,ierrl) - if(Check) call CheckMPIreturn('create_mpiio: after call to MPI_BCAST: ',ierrl) - end if -#else - call piodie(__PIO_FILE__,__LINE__,'PIO was not built with -DUSEMPIIO') - ierr=0 -#endif - end function create_mpiio - - integer function open_mpiio(File,fname, CheckMPI) result(ierr) - - type (File_desc_t), intent(inout) :: File ! file descriptor - character(len=*), intent(in) :: fname - logical, optional, intent(in) :: CheckMPI - - ! =================== - ! Local variables - ! =================== - character(len=*), parameter :: subName=modName//'::open_mpiio' - integer :: amode,iotype - integer :: ierrl - - logical :: Check = .True. -#ifdef USEMPIIO - if (present(CheckMPI)) Check = CheckMPI - ierr = PIO_noerr - if(File%iosystem%ioproc) then - iotype = File%iotype - - select case(iotype) - case(iotype_pbinary,iotype_direct_pbinary) - amode = MPI_MODE_RDONLY - call MPI_file_open(File%iosystem%IO_comm,fname,amode,File%iosystem%info,File%fh,ierr) - if(Check) call CheckMPIreturn('open_mpiio: after call to MPI_file_open: ',ierr) - if(Debug) print *, subName,':: io_rank: ',File%iosystem%io_rank,'amode,info: ',amode,File%iosystem%info,'fname: ',fname - !--------------------------------- - ! set the base file offset to zero - !--------------------------------- - File%offset = 0 - end select - end if - if(File%iosystem%num_tasks>File%iosystem%num_iotasks) then - ! broadcast the return code from MPI_file_open - call MPI_BCAST(ierr,1,MPI_INTEGER,File%iosystem%IOMaster, File%iosystem%Comp_comm ,ierrl) - if(Check) call CheckMPIreturn('open_mpiio: after call to MPI_BCAST: ',ierrl) - end if - -#else - call piodie(__PIO_FILE__,__LINE__,'PIO was not built with -DUSEMPIIO') - ierr=0 -#endif - end function open_mpiio - - -! TYPE int,real,double - integer function write_mpiio_{TYPE} (File,IOBUF,varDesc, iodesc) result(ierr) - type (File_desc_t), intent(inout) :: File ! file descriptor - {VTYPE}, intent(in) :: IOBUF(:) ! IO buffer - type (VAR_desc_t), intent(in) :: varDesc - type (IO_desc_t), intent(in) :: IODesc - - character(len=*), parameter :: subName=modName//'::write_mpiio_{TYPE}' - - character(len=10) :: datarep - integer(kind=PIO_OFFSET) :: reclen - integer(i4) :: iotype - integer(kind=pio_offset) :: glen ! global length of IO request - integer(kind=PIO_OFFSET) :: offset ! local offset - - integer :: fstatus(MPI_STATUS_SIZE) - - integer(i4) :: cnt - logical, parameter :: Check = .TRUE. -#ifdef TIMING - call t_startf("PIO:pio_write_mpiio_{TYPE}") -#endif -#ifdef USEMPIIO - datarep = 'native' - iotype = File%iotype - glen = iodesc%glen - offset = iodesc%IOmap%start - - !------------------------------- - ! write the record control word - !------------------------------- - - reclen=glen*c_sizeof(iobuf(1)) - - if(iotype == iotype_direct_pbinary) then - File%offset = INT(varDesc%rec-1,kind=PIO_OFFSET)*reclen - endif -!DBG print *,'TEMPLATE_NAME(write_mpiio_): At the begining of subroutine: ',File%offset - !------------------------------------- - ! Set file view for distributed array - !------------------------------------- - if(Debug) print *,__PIO_FILE__,__LINE__,' inside write_mpiio_{TYPE} offset: ',File%offset - call MPI_File_set_view(File%fh,File%offset, iodesc%Write%elemTYPE, iodesc%Write%fileTYPE, 'native',File%iosystem%info,ierr) - - if(Check.and.ierr/=MPI_SUCCESS) then - call CheckMPIreturn('write_mpiio_{TYPE} after call to file_set_view: ',ierr) - call piodie(__PIO_FILE__,__LINE__) - end if - !----------------------------- - ! Write out distributed array - !----------------------------- - - call MPI_file_write_all(File%fh,IOBUF,int(iodesc%Write%n_elemTYPE),iodesc%Write%elemTYPE,fstatus,ierr) - if(Check.and.ierr/=MPI_SUCCESS) then - call CheckMPIreturn('write_mpiio_{TYPE}: after call to file_write_all: ',ierr) - call piodie(__PIO_FILE__,__LINE__) - end if - !----------------------------------- - ! increment the file offset pointer - !----------------------------------- - File%Offset = File%Offset + reclen -#else - call piodie(__PIO_FILE__,__LINE__,'PIO was not built with -DUSEMPIIO') - ierr=0 -#endif -#ifdef TIMING - call t_stopf("PIO:pio_write_mpiio_{TYPE}") -#endif - - end function write_mpiio_{TYPE} - -! TYPE int,real,double - integer function read_mpiio_{TYPE} (File,IOBUF,varDesc, iodesc) result(ierr) - - type (File_desc_t), intent(inout) :: File ! file descriptor - {VTYPE}, intent(out) :: IOBUF(:) ! IO buffer - type (VAR_desc_t), intent(in) :: varDesc - type (IO_desc_t), intent(in) :: IODesc - - character(len=*), parameter :: subName=modName//'::read_mpiio_{TYPE}' - character(len=10) :: datarep - integer(kind=PIO_OFFSET) :: reclen - integer(i4) :: iotype - integer(i4) :: cnt - integer(kind=pio_offset) :: glen ! global length of IO request - - integer(kind=PIO_OFFSET) :: offset ! local offset - integer :: fstatus(MPI_STATUS_SIZE) - - logical, parameter :: Debug = .FALSE. - logical, parameter :: Check = .TRUE. - - datarep = 'native' - iotype = File%iotype - glen = iodesc%glen - offset = iodesc%IOmap%start -#ifdef TIMING - call t_startf("PIO:pio_read_mpiio_{TYPE}") -#endif -#ifdef USEMPIIO - reclen=glen*c_sizeof(iobuf(1)) - - if(Debug) print *, subName,':: IAM: ',File%iosystem%io_rank,' read_real8_mpiio: SIZE(IOBUF),n_RelemTYPE: ', & - SIZE(IOBUF),iodesc%Read%n_elemTYPE - - if(iotype == iotype_direct_pbinary) then - File%offset = INT(varDesc%rec-1,kind=PIO_OFFSET)*reclen - endif - !------------------------------------- - ! Set file view for distributed array - !------------------------------------- - if(Debug) print *,__PIO_FILE__,__LINE__,'IAM: ',File%iosystem%io_rank,' before set_view iodesc%Read%fileTYPE: ', & - iodesc%Read%FileTYPE - call MPI_File_set_view(File%fh,File%offset, iodesc%Read%elemTYPE, iodesc%Read%fileType, 'native',File%iosystem%info,ierr) - if(Check) call CheckMPIreturn('read_mpiio_{TYPE}: after call to file_set_view: ',ierr) -!DBG if(Debug) print *,__PIO_FILE__,__LINE__,'IAM: ',File%iosystem%io_rank,' after set_view IODesc%Read%fileTYPE: ', & -!DBG iodesc%Read%fileTYPE - - !----------------------------- - ! Read out distributed array - !----------------------------- - call MPI_file_read_all(File%fh,IOBUF,int(iodesc%Read%n_elemTYPE),iodesc%Read%elemTYPE,fstatus,ierr) - if(Check) call CheckMPIreturn('read_mpiio_{TYPE}: after call to file_read_all: ',ierr) - if(Debug) call MPI_get_count(fstatus,iodesc%Read%elemTYPE,cnt,ierr) - if(Debug) print *,__PIO_FILE__,__LINE__,'IAM: ',File%iosystem%io_rank,'read_mpiio_{TYPE}: cnt is: ',iodesc%Read%n_elemTYPE, cnt - if(Debug) print *,__PIO_FILE__,__LINE__,'IAM: ',File%iosystem%io_rank,'read_mpio_{TYPE}: reclen is: ',reclen - - - !----------------------------------- - ! increment the file offset pointer - !----------------------------------- - File%Offset = File%Offset + reclen -#else - call piodie(__PIO_FILE__,__LINE__,'PIO was not built with -DUSEMPIIO') - ierr=0 -#endif -#ifdef TIMING - call t_stopf("PIO:pio_read_mpiio_{TYPE}") -#endif - end function read_mpiio_{TYPE} - - - subroutine Write_FORTRAN_CntrlWord(File,reclen) - - type (File_desc_t), intent(inout) :: File - integer(i4), intent(in) :: reclen - - character(len=*), parameter :: subName=modName//'::Write_FORTRAN_CntrlWord' - character(len=10) :: datarep - integer(kind=PIO_OFFSET) :: offset, offset2, glen - integer(i4) :: ierr - - logical, parameter :: Check = .TRUE. - integer :: fstatus(MPI_STATUS_SIZE) - - -#ifdef USEMPIIO - - datarep = 'native' - call MPI_File_set_view(File%fh,File%offset,MPI_INTEGER, MPI_INTEGER, datarep,File%iosystem%info,ierr) - if(Check) call CheckMPIreturn('write_FORTRAN_CntrlWord: after call to file_write_set_view: ',ierr) - - if(File%iosystem%io_rank == 0) then - ! ------------------------------------ - ! add the FORTRAN record control words - ! ------------------------------------ - offset = 0 - call MPI_file_write_at(File%fh,offset,reclen,1,MPI_INTEGER,fstatus,ierr) - if(Check) call CheckMPIreturn('write_FORTRAN_CntrlWord: after call to MPI_file_write_at offset ',ierr) - glen = reclen/i4 - offset2 = offset+glen+1 - call MPI_file_write_at(File%fh,offset2,reclen,1,MPI_INTEGER,fstatus,ierr) - if(Check) call CheckMPIreturn('write_FORTRAN_CntrlWord: after call to MPI_file_write_at: offset2 ',ierr) - endif - File%offset = File%offset+i4 -#else - call piodie(__PIO_FILE__,__LINE__,'PIO was not built with -DUSEMPIIO') -#endif - - end subroutine Write_FORTRAN_CntrlWord -!*********************************************************************** - subroutine Read_FORTRAN_CntrlWord(File,reclen) - - type (File_desc_t), intent(inout) :: File - integer(i4), intent(inout) :: reclen - - character(len=*), parameter :: subName=modName//'::Read_FORTRAN_CntrlWord' - character(len=10) :: datarep - integer(kind=PIO_OFFSET) :: offset, offset2, glen - integer(i4) :: ierr - - logical, parameter :: Check = .TRUE. - integer :: fstatus(MPI_STATUS_SIZE) -#ifdef USEMPIIO - - datarep = 'native' - print *,'Read_FORTRAN_CntrlWord: File%offset: ',File%offset - call MPI_File_set_view(File%fh,File%offset,MPI_INTEGER, MPI_INTEGER, datarep,File%iosystem%info,ierr) - if(Check) call CheckMPIreturn('Read_FORTRAN_CntrlWord: after call to MPI_file_set_view: ',ierr) - - if(File%iosystem%io_rank == 0) then - ! ------------------------------------ - ! read the FORTRAN record control words - ! ------------------------------------ - offset=0 - call MPI_file_read_at(File%fh,offset,reclen,1,MPI_INTEGER,fstatus,ierr) - if(Check) call CheckMPIreturn('Read_FORTRAN_CntrlWord: after call MPI_to file_read_at_all: ',ierr) - endif - File%offset = File%offset+i4 - - if(Debug) print *, subName,':: reclen ',reclen -#else - call piodie(__PIO_FILE__,__LINE__,'PIO was not built with -DUSEMPIIO') -#endif - end subroutine Read_FORTRAN_CntrlWord - -end module iompi_mod diff --git a/src/externals/pio1/pio/ionf_mod.F90 b/src/externals/pio1/pio/ionf_mod.F90 deleted file mode 100644 index 113d6c64544..00000000000 --- a/src/externals/pio1/pio/ionf_mod.F90 +++ /dev/null @@ -1,392 +0,0 @@ -#define __PIO_FILE__ "ionf_mod.F90" -module ionf_mod -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf ! _EXTERNAL -#endif - use alloc_mod - - use pio_kinds, only: i4,r4,r8,pio_offset - use pio_types - use pio_utils, only: bad_iotype, check_netcdf - - use pio_support, only : Debug, DebugIO, piodie, DebugAsync -#ifdef _NETCDF - use netcdf ! _EXTERNAL -#endif - use pio_support, only : CheckMPIReturn -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none - private - -#if defined(_PNETCDF) && ! (USE_PNETCDF_MOD) -#include /* _EXTERNAL */ -#endif - - - public :: create_nf - public :: open_nf - public :: close_nf - public :: sync_nf - -contains - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! create_nf - ! - - integer function create_nf(File,fname, amode) result(ierr) -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#else - include 'mpif.h' ! _EXTERNAL -#endif - type (File_desc_t), intent(inout) :: File - character(len=*), intent(in) :: fname - integer(i4), intent(in) :: amode - integer(i4) :: iotype, mpierr - integer :: nmode, tmpfh - - nmode=amode - - ierr=PIO_noerr - File%fh=-1 - - if(File%iosystem%ioproc) then - iotype = File%iotype - select case (iotype) -#ifdef _PNETCDF - case(PIO_iotype_pnetcdf) - ierr = nfmpi_create(File%iosystem%IO_comm,fname,nmode ,File%iosystem%info,File%fh) -#endif -#ifdef _NETCDF -#ifdef _NETCDF4 - case(PIO_iotype_netcdf4p) -! The 64 bit options are not compatable with hdf5 format files - - if(iand(PIO_64BIT_OFFSET,amode)==PIO_64BIT_OFFSET) then - nmode = ieor(amode,PIO_64BIT_OFFSET) - else if(iand(PIO_64BIT_DATA,amode)==PIO_64BIT_DATA) then - nmode = ieor(amode,PIO_64BIT_DATA) - else - nmode=amode - end if - - nmode = ior(nmode,NF90_NETCDF4) -#ifdef _MPISERIAL - ierr = nf90_create(fname, nmode , File%fh) -#else - nmode = ior(nmode,NF90_MPIIO) - ierr = nf90_create(fname, nmode, File%fh, & - comm=File%iosystem%io_comm, info=File%iosystem%info) -#endif -! Set default to NOFILL for performance. -! if(ierr==PIO_NOERR) ierr = nf90_set_fill(File%fh, NF90_NOFILL, nmode) - case(PIO_iotype_netcdf4c) - if(iand(PIO_64BIT_OFFSET,amode)==PIO_64BIT_OFFSET) then - nmode = ieor(amode,PIO_64BIT_OFFSET) - else if(iand(PIO_64BIT_DATA,amode)==PIO_64BIT_DATA) then - nmode = ieor(amode,PIO_64BIT_DATA) - else - nmode=amode - end if - - nmode = ior(nmode,NF90_NETCDF4) - - ! Only io proc 0 will do writing - if (File%iosystem%io_rank == 0) then - ! Stores the ncid in File%fh - ierr = nf90_create(fname, nmode, File%fh, & - info=File%iosystem%info ) -! Set default to NOFILL for performance. - if(ierr==PIO_NOERR) & - ierr = nf90_set_fill(File%fh, NF90_NOFILL, nmode) - endif -#endif - case(PIO_iotype_netcdf) - ! Only io proc 0 will do writing - if (File%iosystem%io_rank == 0) then - ! Stores the ncid in File%fh - ierr = nf90_create(fname, nmode , File%fh) - if(Debug .or. Debugasync) print *,__PIO_FILE__,__LINE__,file%fh, ierr, nmode -! Set default to NOFILL for performance. - if(ierr==NF90_NOERR) & - ierr = nf90_set_fill(File%fh, NF90_NOFILL, nmode) - endif - call mpi_bcast(file%fh,1,mpi_integer, 0, file%iosystem%io_comm, mpierr) - if(File%iosystem%io_rank > 0) File%fh=-File%fh -#endif - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - if(Debug) print *,__PIO_FILE__,__LINE__,file%fh,ierr - end if - tmpfh = file%fh - - call mpi_bcast(tmpfh,1,mpi_integer, file%iosystem%iomaster, file%iosystem%my_comm, mpierr) - - if(.not. file%iosystem%ioproc) file%fh=-tmpfh - - if(Debug.or.DebugAsync) print *,__PIO_FILE__,__LINE__,file%fh,ierr - - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - - end function create_nf - - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! open_nf - ! - - integer function open_nf(File,fname, mode) result(ierr) -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#else - include 'mpif.h' ! _EXTERNAL -#endif - type (File_desc_t), intent(inout) :: File - character(len=*), intent(in) :: fname - integer(i4), optional, intent(in) :: mode - integer(i4) :: iotype, amode , mpierr, ier2 - integer :: tmpfh, format - - - ierr=PIO_noerr - File%fh=-1 - if(file%iosystem%ioproc) then -! This subroutine seems to break pgi compiler for large files. -! call check_file_type(File, fname) - iotype = File%iotype -#ifdef _NETCDF - if(present(mode)) then - if(mode == 1) then - amode = NF90_WRITE - else - amode = mode - end if - else - amode = NF90_NOWRITE - end if -#endif -#ifdef _PNETCDF - if(iotype==PIO_iotype_pnetcdf) then - if(present(mode)) then - amode = mode - else - amode = NF_NOWRITE - end if - ierr = nfmpi_open(File%iosystem%IO_comm,fname,amode,File%iosystem%info,File%fh) -#ifdef _NETCDF -#ifdef _NETCDF4 - if(ierr /= PIO_NOERR) then ! try hdf5 format - if(Debug) print *, 'try netcdf4 format' - File%iotype = pio_iotype_netcdf4p - iotype = pio_iotype_netcdf4p - end if -#endif -#endif - end if -#endif - -#ifdef _NETCDF -#ifdef _NETCDF4 - if(iotype==PIO_iotype_netcdf4p) then -! we need to go through some contortions to make sure a file we are opening is okay for parallel access - ierr = nf90_open(fname,amode,File%fh) - ierr = nf90_inquire(File%fh,formatnum=format) -#ifndef MPI_SERIAL - if(format == nf90_format_netcdf4) then - ierr = nf90_close(File%fh) - ierr = nf90_open(fname, ior(amode,ior(NF90_NETCDF4,NF90_MPIIO)), File%fh, & - comm=File%iosystem%io_comm, info=File%iosystem%info) - if(ierr==nf90_enotnc4 .or. ierr==nf90_einval) then - ierr = nf90_open(fname, amode, File%fh,info=File%iosystem%info) - end if - end if -#endif - end if -#endif - - if(iotype==PIO_iotype_netcdf .or. iotype==PIO_IOTYPE_NETCDF4C) then - if (File%iosystem%io_rank == 0) then - ! Stores the ncid in File%fh - ierr = nf90_open(fname,amode,File%fh) - if(Debug .or. Debugasync) print *,__PIO_FILE__,__LINE__,file%fh, ierr - ! Set default to NOFILL for performance. - if(iotype==pio_iotype_netcdf .and. ierr .eq. NF90_NOERR .and. iand(amode, NF90_WRITE) > 0) then - ierr = nf90_set_fill(File%fh, NF90_NOFILL, ier2) - end if - endif - call mpi_bcast(file%fh,1,mpi_integer, 0, file%iosystem%io_comm, mpierr) - if(File%iosystem%io_rank > 0) File%fh=-File%fh - end if -#endif - end if - - tmpfh = file%fh - call mpi_bcast(tmpfh,1,mpi_integer, file%iosystem%iomaster, file%iosystem%my_comm, mpierr) - - if(.not. file%iosystem%ioproc) file%fh=-tmpfh - - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - - end function open_nf - - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! close_nf - ! - - - integer function close_nf(File) result(ierr) - type (File_desc_t), intent(inout) :: File - - ierr=PIO_noerr - - if(File%iosystem%IOproc) then - if(Debug) print *,__PIO_FILE__,__LINE__,'CFILE closing : ',file%fh - select case (File%iotype) -#ifdef _PNETCDF - case(PIO_iotype_pnetcdf) - ierr=nfmpi_close(file%fh) -#endif -#ifdef _NETCDF - case(PIO_iotype_netcdf, pio_iotype_netcdf4c, pio_iotype_netcdf4p) - if (File%fh>0) then - ierr = nf90_sync(File%fh) - if(Debug) print *,__PIO_FILE__,__LINE__,ierr - ierr= nf90_close(File%fh) - if(Debug) print *,__PIO_FILE__,__LINE__,ierr - endif -#endif - case default - call bad_iotype(File%iotype,__PIO_FILE__,__LINE__) - end select - end if - file%fh=-1 - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - end function close_nf - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! sync_nf - ! - - - integer function sync_nf(File) result(ierr) - - type (File_desc_t), intent(inout) :: File - - ierr=PIO_noerr - - if(File%iosystem%IOproc) then - if(Debug) print *,__PIO_FILE__,__LINE__,'CFILE syncing : ',file%fh - select case (File%iotype) -#ifdef _PNETCDF - case(PIO_iotype_pnetcdf) - ierr=nfmpi_sync(file%fh) -#endif -#ifdef _NETCDF - case(PIO_iotype_netcdf, pio_iotype_netcdf4c,PIO_IOTYPE_NETCDF4P) - if (File%fh>0) then - ierr= nf90_sync(File%fh) - endif -#endif - case default - call bad_iotype(File%iotype,__PIO_FILE__,__LINE__) - end select - end if - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - end function sync_nf - - subroutine check_file_type(File, filename) -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#else - include 'mpif.h' ! _EXTERNAL -#endif - - type (File_desc_t), intent(inout) :: File - character(len=*), intent(in) :: filename - character(len=4) :: magic - integer :: fh, mpierr, reclength=4, i, eof - logical :: UNITOK, UNITOP - -! Check format of existing files opened to read. - - inquire(file=filename, exist=UNITOK) - if(.not. UNITOK) return - - magic='fail' - - if(File%iosystem%ioproc) then - if(File%iosystem%io_rank==0) then -! Find a unique unit number to open the file - do fh=12,99 - inquire (unit=fh,exist=UNITOK,opened=UNITOP) - if (UNITOK .and. .not. UNITOP) then - open (unit = fh,File=filename,access='direct',recl=reclength,& - FORM='UNFORMATTED',STATUS='OLD',err=100) -! Read the first 4 bytes and look for the CDF or HDF stamp - read (fh,rec=1,err=101) magic - close(fh) - exit - endif - end do - if(magic(1:3) .eq. 'CDF') then - ! No need to do anything here - else if(magic(2:4).eq.'HDF') then -#ifdef _NETCDF4 - if(File%iotype /= PIO_IOTYPE_NETCDF4C .and. & - File%iotype /= PIO_IOTYPE_NETCDF4P) then - print *,'Changing file type to netcdf4p' - File%iotype=pio_iotype_netcdf4c - end if -#else - call piodie(__PIO_FILE__,__LINE__,'You must link with the netcdf4 ',0,& - 'library built with hdf5 support to read this file',0,filename) -#endif - else - ! The HDF identifier could be offset further into the file. - - open (unit = fh,file=filename,access='direct',recl=reclength,& - form='UNFORMATTED',STATUS='OLD',err=100) - - i=128 - eof=0 - do while(eof>=0) - read (fh,rec=i, iostat=eof, err=101) magic - - if(magic(2:4).eq.'HDF') then - if(debug) print *,'Changing file type to netcdf4p' - File%iotype=pio_iotype_netcdf4p - exit - end if - i=i*2 - end do - close(fh) - if(eof<0) call piodie(__PIO_FILE__,__LINE__,'Unrecognized file format ',0,filename) - end if - - end if - - call mpi_bcast(file%iotype,1,mpi_integer, 0, file%iosystem%io_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - return -100 call piodie(__PIO_FILE__,__LINE__,'File open error ',0,filename) -101 call piodie(__PIO_FILE__,__LINE__,'File read error ',0,filename) - - - - end subroutine check_file_type - - - - -end module ionf_mod diff --git a/src/externals/pio1/pio/nf_mod.F90 b/src/externals/pio1/pio/nf_mod.F90 deleted file mode 100644 index c6973ccb1d6..00000000000 --- a/src/externals/pio1/pio/nf_mod.F90 +++ /dev/null @@ -1,1925 +0,0 @@ -#define __PIO_FILE__ "nf_mod.F90" -!> -!! @file -!! @brief NetCDF interface routines -!! -!! $Revision$ -!! $LastChangedDate$ -!! -!< -module nf_mod - -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf ! _EXTERNAL -#endif - use alloc_mod - - use pio_kinds, only: i4,r4,r8,pio_offset - use pio_types, only: file_desc_t, iosystem_desc_t, var_desc_t, pio_noerr, pio_iotype_netcdf, & - pio_iotype_pnetcdf, pio_iotype_netcdf4p, pio_iotype_netcdf4c, pio_max_name - - use pio_support, only : Debug, DebugIO, DebugAsync, piodie - use pio_utils, only : bad_iotype, check_netcdf - -#ifdef _NETCDF - use netcdf ! _EXTERNAL -#endif - use pio_support, only : CheckMPIReturn - use pio_msg_mod -#ifdef _COMPRESSION - use pio_types, only : pio_iotype_vdc2 -! use piovdc -#endif -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -#include /* _EXTERNAL */ -#endif -#endif - - ! - ! Attribute functions - ! - public :: pio_def_var, & - pio_inq_attname, & - pio_inq_att, & - pio_inq_attlen, & - pio_inq_varid, & - pio_inq_varname, & - pio_inq_vartype, & - pio_inq_varndims, & - pio_inq_vardimid, & - pio_inq_varnatts, & - pio_inquire_variable -!> -!! \defgroup PIO_def_var -!< - interface pio_def_var - module procedure & - def_var_0d, & - def_var_md - end interface - -!> -!! \defgroup PIO_inq_varid -!< - interface pio_inq_varid - module procedure inq_varid_vid, & - inq_varid_vardesc - end interface -!> -!! \defgroup PIO_inq_att -!< - interface pio_inq_att - module procedure inq_att_vid, & - inq_att_vardesc, & - inq_att_vardesc_pio2, & - inq_att_vid_pio2 - end interface - -!> -!! \defgroup PIO_inq_attlen -!< - interface pio_inq_attlen - module procedure inq_attlen_vid, & - inq_attlen_vardesc, & - inq_attlen_vardesc_pio2, & - inq_attlen_vid_pio2 - end interface - -!> -!! \defgroup PIO_inq_attname -!< - interface pio_inq_attname - module procedure inq_attname_vid, & - inq_attname_vardesc - end interface - -!> -!! \defgroup PIO_inq_varname -!< - interface pio_inq_varname - module procedure inq_varname_vid, inq_varname_vdesc - end interface - -!> -!! \defgroup PIO_inq_varndims -!< - interface pio_inq_varndims - module procedure inq_varndims_vid, inq_varndims_vdesc - end interface - -!> -!! \defgroup PIO_inq_varnatts -!< - interface pio_inq_varnatts - module procedure inq_varnatts_vid, inq_varnatts_vdesc - end interface - -!> -!! \defgroup PIO_inq_vardimid -!< - interface pio_inq_vardimid - module procedure inq_vardimid_vid, inq_vardimid_vdesc - end interface - -!> -!! \defgroup PIO_inq_vartype -!< - interface pio_inq_vartype - module procedure inq_vartype_vid, inq_vartype_vdesc - end interface - -!> -!! \defgroup PIO_inquire_variable -!< - interface pio_inquire_variable - module procedure inquire_variable_vid, inquire_variable_vdesc - end interface - -!> -!! @defgroup PIO_def_dim -!< - public :: PIO_def_dim - -!> -!! @defgroup PIO_enddef -!< - public :: PIO_enddef - -!> -!! \defgroup PIO_redef -!< - public :: PIO_redef - -!> -!! \defgroup PIO_inquire -!< - public :: PIO_inquire - -!> -!! \defgroup PIO_inq_dimid -!< - public :: PIO_inq_dimid - -!> -!! \defgroup PIO_inq_dimname -!< - public :: PIO_inq_dimname - -!> -!! \defgroup PIO_inq_dimlen -!< - public :: PIO_inq_dimlen - -!> -!! \defgroup PIO_inquire_dimension -!< - public :: PIO_inquire_dimension - -!> -!! \defgroup PIO_copy_att -!< - public :: PIO_copy_att -#ifdef _COMPRESSION - interface - subroutine defvdfvar(foo) bind(C) - use, intrinsic :: iso_c_binding - type(c_ptr), value, intent(in) :: foo - end subroutine defvdfvar - end interface -#endif -contains - -!> -!! @public -!! @ingroup PIO_inquire -!! @brief Gets metadata information for netcdf file. -!! @details -!! @param File @copydoc file_desc_t -!! @param nDimensions : Number of dimensions defined for the netcdf file -!! @param nVariables : Number of variables defined for the netcdf file -!! @param nAttributes : Number of attributes defined for the netcdf file -!! @param unlimitedDimID : the Unlimited dimension ID -!! @retval ierr @copydoc error_return -!> - integer function pio_inquire(File,nDimensions,nVariables,nAttributes,unlimitedDimID) result(ierr) - type (File_desc_t), intent(in) :: File - - integer, optional, intent(out) :: & - nDimensions, &! number of dimensions - nVariables, &! number of variables - nAttributes, & ! number of global attributes - unlimitedDimID ! ID of unlimited dimension - integer :: vals(4) - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg - type(iosystem_desc_t), pointer :: ios - - ierr=PIO_noerr - vals(:) = -1 - - ios => File%iosystem - - if(ios%async_interface .and. .not. ios%ioproc) then - msg=PIO_MSG_INQUIRE - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call mpi_bcast(file%fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - end if - - iotype = File%iotype - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq( File%fh,vals(1),vals(2), & - vals(3),vals(4)) - -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire( File%fh,vals(1),vals(2), & - vals(3),vals(4)) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inquire( File%fh,vals(1),vals(2), & - vals(3),vals(4)) - endif - - if(ios%num_iotasks>1) then - call MPI_BCAST(vals,4,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - - call check_netcdf(File, ierr, __PIO_FILE__,__LINE__) - - if(file%iosystem%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(vals,4,MPI_INTEGER,ios%IOMaster, ios%my_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - - if(present(nDimensions)) then - ndimensions = vals(1) - endif - if(present(nVariables)) then - nVariables = vals(2) - endif - if(present(nAttributes)) then - nAttributes = vals(3) - endif - if(present(unlimitedDimID)) then - unlimitedDimID = vals(4) - endif - - end function pio_inquire - -!> -!! @public -!! @ingroup PIO_inq_att -!! @brief Gets information about attributes -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param name : Name of the attribute -!! @param xtype : The type of attribute -!! @param len : The length of the attribute -!! @retval ierr @copydoc error_return -!> - integer function inq_att_vid(File,varid,name,xtype,len) result(ierr) - - - type (File_desc_t), intent(inout) :: File - integer(i4), intent(in) :: varid - character(len=*), intent(in) :: name - integer, intent(out) :: xtype - integer, intent(out) :: len !Attribute length - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg, nlen - integer(kind=PIO_Offset) :: clen - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - iotype = File%iotype - ierr=PIO_noerr - nlen = len_trim(name) - - if(ios%async_interface) then - if(.not. ios%ioproc ) then - msg=PIO_MSG_INQ_ATT - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) - end if - - - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_att(File%fh,varid,name(1:nlen),xtype,clen) - - len = INT(clen,kind=i4) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_attribute( File%fh,varid,name(1:nlen), & - xtype=xtype,len=len) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - - if (ios%io_rank==0) then - ierr=nf90_inquire_attribute( File%fh,varid,name(1:nlen), & - xtype=xtype,len=len) - endif - - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(xtype,1,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - call MPI_BCAST(len,1,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(xtype,1,MPI_INTEGER,ios%IOMaster, ios%my_comm , mpierr) - call CheckMPIReturn('nf_mod',mpierr) - call MPI_BCAST(len,1,MPI_INTEGER,ios%IOMaster, ios%my_comm , mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - end function inq_att_vid - - -!> -!! @public -!! @ingroup PIO_inq_att -!! @brief Gets information about attributes -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param name : Name of the attribute -!! @param xtype : The type of attribute -!! @param len : The length of the attribute -!! @retval ierr @copydoc error_return -!> - integer function inq_att_vardesc(File,vardesc,name,xtype,len) result(ierr) - - type (File_desc_t), intent(inout) :: File - type(var_desc_t), intent(in) :: vardesc - character(len=*), intent(in) :: name - integer, intent(out) :: xtype - integer, intent(out) :: len !Attribute length - - ierr = pio_inq_att(file, vardesc%varid, name, xtype, len) - - end function inq_att_vardesc - integer function inq_att_vardesc_pio2(File,vardesc,name,xtype,len) result(ierr) - - type (File_desc_t), intent(inout) :: File - type(var_desc_t), intent(in) :: vardesc - character(len=*), intent(in) :: name - integer, intent(out) :: xtype - integer(pio_offset), intent(out) :: len !Attribute length - - ierr = inq_att_vid_pio2(file, vardesc%varid, name, xtype, len) - - end function inq_att_vardesc_pio2 - integer function inq_att_vid_pio2(File,varid,name,xtype,len) result(ierr) - - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: varid - character(len=*), intent(in) :: name - integer, intent(out) :: xtype - integer(pio_offset), intent(out) :: len !Attribute length - integer :: ilen - - ierr = inq_att_vid(file, varid, name, xtype, ilen) - len = int(ilen, pio_offset) - - end function inq_att_vid_pio2 - -!> -!! @public -!! @ingroup PIO_inq_attlen -!! @brief Gets the attribute length -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : attribute id -!! @param name : name of attribute -!! @param len : Length of attribute -!! @retval ierr @copydoc error_return -!> - integer function inq_attlen_vid(File,varid,name,len) result(ierr) - - type (File_desc_t), intent(inout) :: File - integer(i4), intent(in) :: varid - character(len=*), intent(in) :: name - integer, intent(out) :: len !Attribute length - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg, nlen - integer(kind=PIO_Offset) :: clen - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - - iotype = File%iotype - ierr=PIO_noerr - nlen = len_trim(name) - - if(ios%async_interface) then - if(.not. ios%ioproc ) then - msg=PIO_MSG_INQ_ATTLEN - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) - - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_attlen(File%fh,varid,name(1:nlen),clen) - len = INT(clen,kind=i4) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_attribute( File%fh,varid,name(1:nlen), & - len=len) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inquire_attribute( File%fh,varid,name(1:nlen), & - len=len) - endif - - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(len,1,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface.or.ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(len,1,MPI_INTEGER,ios%IOMaster,ios%my_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - - end function inq_attlen_vid - integer function inq_attlen_vid_pio2(File,vid,name,len) result(ierr) - - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: vid - character(len=*), intent(in) :: name - integer(pio_offset), intent(out) :: len !Attribute length - - integer :: ilen - ierr = inq_attlen_vid(file, vid, name, ilen) - len = int(ilen,pio_offset) - - end function inq_attlen_vid_pio2 - -!> -!! @public -!! @ingroup PIO_inq_attlen -!! @brief Gets the attribute length -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param name : name of attribute -!! @param len : Length of attribute -!! @retval ierr @copydoc error_return -!> - integer function inq_attlen_vardesc(File,vardesc,name,len) result(ierr) - - type (File_desc_t), intent(inout) :: File - type (Var_desc_t), intent(in) :: vardesc - character(len=*), intent(in) :: name - integer, intent(out) :: len !Attribute length - - ierr = pio_inq_attlen(file, vardesc%varid, name, len) - - end function inq_attlen_vardesc - - integer function inq_attlen_vardesc_pio2(File,vardesc,name,len) result(ierr) - - type (File_desc_t), intent(inout) :: File - type (Var_desc_t), intent(in) :: vardesc - character(len=*), intent(in) :: name - integer(kind=pio_offset), intent(out) :: len !Attribute length - - ierr = inq_attlen_vid_pio2(file, vardesc%varid, name, len) - - end function inq_attlen_vardesc_pio2 - -!> -!! @public -!! @ingroup PIO_inq_attname -!! @brief Returns the name of a netcdf attribute -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The variable ID -!! @param attnum : Attribute number returned from function ???? -!! @param name : Name of the returned attribute -!! @retval ierr @copydoc error_return -!< - integer function inq_attname_vid(File,varid,attnum,name) result(ierr) - - type (File_desc_t), intent(inout) :: File - integer(i4), intent(in) :: varid - integer, intent(in) :: attnum !Attribute number - character(len=*), intent(out) :: name - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg - type(iosystem_desc_t), pointer :: ios - character(len=PIO_MAX_NAME) :: tmpname - - ios => File%iosystem - - iotype = File%iotype - ierr=PIO_noerr - if(ios%async_interface) then - if(.not. ios%ioproc ) then - msg=PIO_MSG_INQ_ATTNAME - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(attnum,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_attname(File%fh,varid,attnum,tmpname) - -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inq_attname(File%fh,varid,attnum,tmpname) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inq_attname(File%fh,varid,attnum,tmpname) - if(Debug) print *,__PIO_FILE__,__LINE__,name - endif - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(tmpname,PIO_MAX_NAME,MPI_CHARACTER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(tmpname,PIO_MAX_NAME,MPI_CHARACTER,ios%IOMaster,ios%my_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - name = tmpname(1:len_trim(tmpname)) - end function inq_attname_vid - -!> -!! @public -!! @ingroup PIO_inq_attname -!! @brief Returns the name of a netcdf attribute. -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param attnum : Attribute number returned from function ???? -!! @param name : Name of the returned attribute -!! @retval ierr @copydoc error_return -!< - integer function inq_attname_vardesc(File,vardesc,attnum,name) result(ierr) - type (File_desc_t), intent(inout) :: File - type(var_desc_t), intent(in) :: vardesc - integer, intent(in) :: attnum !Attribute number - character(len=*), intent(out) :: name - - ierr = pio_inq_attname(file, vardesc%varid, attnum, name) - - end function inq_attname_vardesc - -!> -!! @public -!! @ingroup PIO_inq_varid -!! @brief Returns the ID of a netcdf variable given its name -!! @details -!! @param File @copydoc file_desc_t -!! @param name : Name of the returned attribute -!! @param varid : variable ID -!! @retval ierr @copydoc error_return -!< - integer function inq_varid_vid(File,name,varid) result(ierr) - - type (File_desc_t), intent(in) :: File - character(len=*), intent(in) :: name - integer(i4), intent(out) :: varid - integer :: ierr2 - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg, nlen - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - - iotype = File%iotype - ierr=PIO_noerr - nlen = len_trim(name) - - if(ios%async_interface) then - if( .not. ios%ioproc ) then - msg=PIO_MSG_INQ_VARID - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_varid(File%fh,name(1:nlen),varid) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inq_varid(File%fh,name(1:nlen),varid) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inq_varid(File%fh,name(1:nlen),varid) - endif - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(varid,1,MPI_INTEGER,0,ios%IO_comm,ierr2) - end if -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if (ierr /= PIO_NOERR) varid = 0 - if(ios%async_interface.or.ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(varid,1,MPI_INTEGER,ios%IOMaster,ios%my_comm,ierr2) - end if - - end function inq_varid_vid - -!> -!! @public -!! @ingroup PIO_inq_varid -!! @brief Returns the ID of a netcdf variable given its name -!! @details -!! @param File @copydoc file_desc_t -!! @param name : Name of the returned attribute -!! @param vardesc @copydoc var_desc_t -!! @retval ierr @copydoc error_return -!< - integer function inq_varid_vardesc(File,name,vardesc) result(ierr) - - type (File_desc_t), intent(in) :: File - character(len=*), intent(in) :: name - type (Var_desc_t), intent(inout) :: vardesc - - ierr = pio_inq_varid(File, name, vardesc%varid) - vardesc%rec=-1 - if(ierr==PIO_NOERR) then - ierr = pio_inq_varndims(File, vardesc%varid, vardesc%ndims) ! needed for nfwrite - end if - end function inq_varid_vardesc - -!> -!! @public -!! @ingroup PIO_inq_varname -!! @brief Get the name associated with a variable -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param name : The name of the netcdf variable. -!! @retval ierr @copydoc error_return -!> - integer function inq_varname_vdesc(File,vardesc,name) result(ierr) - - type (File_desc_t), intent(in) :: File - type (Var_desc_t), intent(in) :: vardesc - character(len=*), intent(out) :: name - - ierr = pio_inq_varname(file,vardesc%varid,name) - - end function inq_varname_vdesc - -!> -!! @public -!! @ingroup PIO_inq_varname -!! @brief Get the name associated with a variable -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable id. -!! @param name : The name of the netcdf variable. -!! @retval ierr @copydoc error_return -!> - integer function inq_varname_vid(File,varid,name) result(ierr) - - type (File_desc_t), intent(in) :: File - integer, intent(in) :: varid - character(len=*), intent(out) :: name - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg, nlen - - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - iotype = File%iotype - ierr=PIO_noerr - nlen = len(name) - if(ios%async_interface) then - if(.not. ios%ioproc ) then - msg=PIO_MSG_INQ_VARNAME - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_varname(File%fh,varid,name(1:nlen)) - -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_variable(File%fh,varid,name=name(1:nlen)) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inquire_variable(File%fh,varid,name=name(1:nlen)) - endif - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(name,nlen,MPI_CHARACTER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface.or.ios%num_tasks>=ios%num_iotasks) then - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%IOMaster,ios%my_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - - end function inq_varname_vid - -!> -!! @public -!! @ingroup PIO_inq_varndims -!! @brief Gets the number of dimension associated with a netcdf variable -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The variable identifier -!! @param ndims : The number of dimensions for the variable -!! @retval ierr @copydoc error_return -!> - integer function inq_varndims_vid(File,varid,ndims) result(ierr) - - type (File_desc_t), intent(in) :: File - integer, intent(in) :: varid - integer(i4), intent(out) :: ndims - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg - - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - iotype = File%iotype - ierr=PIO_noerr - - if(ios%async_interface) then - if( .not. ios%ioproc ) then - msg=PIO_MSG_INQ_VARNDIMS - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_varndims(File%fh,varid,ndims) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_variable(File%fh,varid,ndims=ndims) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inquire_variable(File%fh,varid,ndims=ndims) - endif - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(ndims,1,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) - - - - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(ndims,1,MPI_INTEGER,ios%IOMaster,ios%my_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - end function inq_varndims_vid - -!> -!! @public -!! @ingroup PIO_inq_varndims -!! @brief Gets the number of dimension associated with a netcdf variable -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param ndims : The number of dimensions for the variable -!! @retval ierr @copydoc error_return -!> - integer function inq_varndims_vdesc(File,vardesc,ndims) result(ierr) - - type (File_desc_t), intent(in) :: File - type (Var_desc_t), intent(in) :: vardesc - integer(i4), intent(out) :: ndims - - ierr = pio_inq_varndims(File, vardesc%varid, ndims) - end function inq_varndims_vdesc - -!> -!! @public -!! @ingroup PIO_inq_vartype -!! @brief Gets metadata information for netcdf file. -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable id -!! @param type : The type of variable -!! @retval ierr @copydoc error_return -!> - integer function inq_vartype_vid(File,varid,type) result(ierr) - - type (File_desc_t), intent(in) :: File - integer, intent(in) :: varid - integer(i4), intent(out) :: type - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg - - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - iotype = File%iotype - ierr=PIO_noerr - - if(ios%async_interface) then - if(.not. ios%ioproc ) then - msg=PIO_MSG_INQ_VARTYPE - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_vartype(File%fh,varid,type) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_variable(File%fh,varid,xtype=type) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inquire_variable(File%fh,varid,xtype=type) - endif - - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(type,1,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) - if(file%iosystem%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(type,1,MPI_INTEGER,ios%IOMaster,ios%my_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - end function inq_vartype_vid - -!> -!! @public -!! @ingroup PIO_inq_vartype -!! @brief Gets metadata information for netcdf file. -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param type : The type of variable -!! @retval ierr @copydoc error_return -!> - integer function inq_vartype_vdesc(File,vardesc,type) result(ierr) - - type (File_desc_t), intent(in) :: File - type (Var_desc_t), intent(in) :: vardesc - integer(i4), intent(out) :: type - - ierr = pio_inq_vartype(File, vardesc%varid, type) - end function inq_vartype_vdesc - -!> -!! @public -!! @ingroup PIO_inq_vardimid -!! @brief returns the dimids of the variable as an interger array -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The variable id -!! @param dimids : The dimension identifier returned by \ref PIO_def_dim -!! @retval ierr @copydoc error_return -!> - integer function inq_vardimid_vid(File,varid,dimids) result(ierr) - - type (File_desc_t), intent(in) :: File - integer, intent(in) :: varid - integer(i4), intent(out) :: dimids(:) - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg - integer :: size_dimids - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - - iotype = File%iotype - ierr=PIO_noerr - - size_dimids=size(dimids) - - if(ios%async_interface) then - if( .not. ios%ioproc ) then - msg=PIO_MSG_INQ_VARDIMID - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(size_dimids,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_vardimid(File%fh,varid,dimids) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_variable(File%fh,varid,dimids=dimids) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inquire_variable(File%fh,varid,dimids=dimids) - endif - - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(dimids,size(dimids),MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) - if(ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(dimids,size_dimids,MPI_INTEGER,ios%IOMaster,ios%My_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - end function inq_vardimid_vid - -!> -!! @public -!! @ingroup PIO_inq_vardimid -!! @brief returns the dimids of the variable as an interger array -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param dimids : The dimension identifier returned by \ref PIO_def_dim -!! @retval ierr @copydoc error_return -!> - integer function inq_vardimid_vdesc(File,vardesc,dimids) result(ierr) - - type (File_desc_t), intent(in) :: File - type (Var_desc_t), intent(in) :: vardesc - integer(i4), intent(out) :: dimids(:) - - - ierr = pio_inq_vardimid(File, vardesc%varid, dimids) - end function inq_vardimid_vdesc - -!> -!! @public -!! @ingroup PIO_inq_varnatts -!! @brief Returns the number of attributes associated with a varaible -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable id -!! @param natts : The number of attributes associated with the variable -!! @retval ierr @copydoc error_return -!> - integer function inq_varnatts_vid(File,varid,natts) result(ierr) - - type (File_desc_t), intent(in) :: File - integer , intent(in) :: varid - integer(i4), intent(out) :: natts - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - - iotype = File%iotype - ierr=PIO_noerr - - if(ios%async_interface) then - if( .not. ios%ioproc ) then - msg=PIO_MSG_INQ_VARNATTS - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_varnatts(File%fh,varid,natts) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_variable(File%fh,varid,nAtts=natts) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inquire_variable(File%fh,varid,nAtts=natts) - endif - - call MPI_BCAST(natts,1,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(natts,1,MPI_INTEGER,ios%IOMaster,ios%My_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - end function inq_varnatts_vid - -!> -!! @public -!! @ingroup PIO_inq_varnatts -!! @brief Returns the number of attributes associated with a varaible -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param natts : The number of attributes associated with the variable -!! @retval ierr @copydoc error_return -!> - integer function inq_varnatts_vdesc(File,vardesc,natts) result(ierr) - - type (File_desc_t), intent(in) :: File - type (Var_desc_t), intent(in) :: vardesc - integer(i4), intent(out) :: natts - - - ierr = pio_inq_varnatts(file, vardesc%varid, natts) - end function inq_varnatts_vdesc - -!> -!! @public -!! @ingroup PIO_inq_dimid -!! @brief Returns the netcdf dimension id for the name. -!! @details -!! @param File @copydoc file_desc_t -!! @param name : The name of the netcdf dimension. -!! @param dimid : The netcdf dimension id. -!! @retval ierr @copydoc error_return -!! -!! Note that we do not want internal error checking for this funtion. -!> - integer function pio_inq_dimid(File,name,dimid) result(ierr) - - type (File_desc_t), intent(in) :: File - character(len=*), intent(in) :: name - integer, intent(out) :: dimid !dimension ID - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg, nlen - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - - iotype = File%iotype - ierr=PIO_noerr - dimid=-1 - nlen = len(name) - if(ios%async_interface) then - if(.not. ios%ioproc ) then - msg=PIO_MSG_INQ_DIMID - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) - end if - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_dimid(File%fh,name(1:nlen),dimid) -#endif - -#ifdef _NETCDF - case (pio_iotype_netcdf4p) - ierr=nf90_inq_dimid(File%fh,name(1:nlen),dimid) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inq_dimid(File%fh,name(1:nlen),dimid) - endif - if(.not. ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(dimid,1,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - - if(Debug .or. Debugasync) print *,__PIO_FILE__,__LINE__,file%fh, & - name, dimid, ierr - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(dimid,1,MPI_INTEGER,ios%IOMaster,ios%My_comm, mpierr) - if(Debugasync) print *,__PIO_FILE__,__LINE__,dimid,ierr,mpierr - call CheckMPIReturn('nf_mod',mpierr) - end if - - end function pio_inq_dimid - -!> -!! @public -!! @ingroup PIO_inq_dimname -!! @brief Gets the name of a dimension given its ID -!! @details -!! @param File @copydoc file_desc_t -!! @param dimid : The netcdf dimension id. -!! @param dimname : The name associated with the netcdf dimension id. -!! @retval ierr @copydoc error_return -!> - integer function pio_inq_dimname(File,dimid,dimname) result(ierr) - - type (File_desc_t), intent(in) :: File - integer , intent(in) :: dimid - character(len=*), intent(out) :: dimname !dimension name - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg, ldn - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - iotype = File%iotype - ierr=PIO_noerr - - ldn = len(dimname) - - if(ios%async_interface) then - if(.not. ios%ioproc ) then - msg=PIO_MSG_INQ_DIMNAME - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(dimid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(ldn,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_dimname(File%fh,dimid,dimname(1:ldn)) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_dimension(File%fh,dimid,name=dimname(1:ldn)) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - - if (ios%io_rank==0) then - ierr=nf90_inquire_dimension(File%fh,dimid,name=dimname(1:ldn)) - endif - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(dimname,ldn,MPI_CHARACTER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(dimname,ldn,MPI_CHARACTER,ios%IOMaster,ios%My_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - - end function pio_inq_dimname - -!> -!! @public -!! @ingroup PIO_inq_dimlen -!! @brief Returns the extent of a netCDF dimension -!! @details -!! @param File @copydoc file_desc_t -!! @param dimid : The netcdf dimension. -!! @param dimlen : The extent of the netcdf dimension. -!! @retval ierr @copydoc error_return -!> - integer function pio_inq_dimlen(File,dimid,dimlen) result(ierr) - - type (File_desc_t), intent(in) :: File - integer(i4) , intent(in) :: dimid - integer(i4) , intent(out) :: dimlen !dimension name - - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg - integer(kind=PIO_OFFSET) :: clen - type(iosystem_desc_t), pointer :: ios - - ios => File%iosystem - iotype = File%iotype - ierr=PIO_noerr - - if(ios%async_interface) then - if(.not. ios%ioproc ) then - msg=PIO_MSG_INQ_DIMLEN - if(debugasync) print *,__PIO_FILE__,__LINE__,msg - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - call MPI_BCAST(dimid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_inq_dimlen(File%fh,dimid,clen) - dimlen = INT(clen,kind=i4) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_inquire_dimension(File%fh,dimid,len=dimlen) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_inquire_dimension(File%fh,dimid,len=dimlen) - endif - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(dimlen,1,MPI_INTEGER,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(file%iosystem%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(dimlen,1,MPI_INTEGER,ios%IOMaster,ios%My_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - - - end function pio_inq_dimlen - -!> -!! @public -!! @ingroup PIO_enddef -!! @brief Exits netcdf define mode. -!! @details -!! @param File @copydoc file_desc_t -!! @retval ierr @copydoc error_return -!< - integer function PIO_enddef(File) result(ierr) - type (File_desc_t), intent(inout) :: File - type (iosystem_desc_t), pointer :: ios - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr - logical, parameter :: Check = .TRUE. - integer :: msg = PIO_MSG_ENDDEF -#ifdef _COMPRESSION - interface - subroutine endvdfdef() bind(C) - end subroutine endvdfdef - end interface -#endif - iotype = File%iotype - - ierr=PIO_noerr - - ios => file%iosystem - - if(ios%async_interface .and. .not. ios%ioproc) then - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call mpi_bcast(file%fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - end if - if(ios%IOproc) then - select case(iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr=nfmpi_enddef(File%fh) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_enddef(File%fh) - endif - case(PIO_iotype_netcdf4p) - ierr=nf90_enddef(File%fh) -#endif - -#ifdef _COMPRESSION - case(pio_iotype_vdc2) - if(ios%io_rank .eq. 0) then - call endvdfdef - endif - -#endif - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - end function PIO_enddef - -!> -!! @public -!! @ingroup PIO_redef -!! @brief Re-enters netcdf define mode. -!! @details -!! @warning Entering and leaving netcdf define mode causes a file sync operation to -!! occur, these operations can be very expensive in parallel systems. We -!! recommend structuring your code to minimize calls to this function. -!! @param File @copydoc file_desc_t -!! @retval ierr @copydoc error_return -!< - integer function PIO_redef(File) result(ierr) - type (File_desc_t), intent(inout) :: File - - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, msg - logical, parameter :: Check = .TRUE. - type(iosystem_desc_t), pointer :: ios - - - iotype = File%iotype - ios => file%iosystem - ierr=PIO_noerr - if(ios%async_interface .and. .not. ios%ioproc) then - msg = PIO_MSG_REDEF - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call mpi_bcast(file%fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - - ierr=nfmpi_redef(File%fh) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr=nf90_redef(File%fh) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_redef(File%fh) - endif - -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - end function PIO_redef - -!> -!! @public -!! @ingroup PIO_def_dim -!! @brief Defines the netcdf dimension -!! @details -!! @param File @copydoc file_desc_t -!! @param name : The name of the dimension to define -!! @param len : The size of the dimension -!! @param dimid : The dimension identifier -!< - integer function PIO_def_dim(File,name,len,dimid) result(ierr) - - type (File_desc_t), intent(in) :: File - character(len=*), intent(in) :: name - integer(i4), intent(in) :: len - integer(i4), intent(out) :: dimid - - !------------------ - ! Local variables - !------------------ - type(iosystem_desc_t), pointer :: ios - integer :: iotype, mpierr, nlen - integer(kind=PIO_Offset) :: clen - integer :: msg = PIO_MSG_DEF_DIM - - iotype = File%iotype - - ierr=PIO_noerr - ios => file%iosystem - nlen = len_trim(name) - if(ios%async_interface) then - if(Debugasync) print *,__PIO_FILE__,__LINE__ - if( .not. ios%ioproc) then - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call mpi_bcast(file%fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - end if - call mpi_bcast(len, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(nlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(name, nlen, mpi_character, ios%compmaster, ios%intercomm, ierr) - if(Debugasync) print *,__PIO_FILE__,__LINE__,file%fh, name(1:nlen) - end if - - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - - clen = len - ierr=nfmpi_def_dim(File%fh,name(1:nlen),clen,dimid) -#endif - -#ifdef _NETCDF - case(PIO_iotype_netcdf4p) - ierr=nf90_def_dim(ncid=File%fh,name=name(1:nlen),len=len,dimid=dimid) - case(pio_iotype_netcdf,PIO_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr=nf90_def_dim(ncid=File%fh,name=name(1:nlen),len=len,dimid=dimid) - endif - if(.not.ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(dimid, 1, MPI_INTEGER, 0, ios%IO_Comm, ierr) - end if -#endif - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - - if(ios%async_interface .or. ios%num_tasks > ios%num_iotasks) then - call MPI_BCAST(dimid, 1, MPI_INTEGER, ios%IOMaster, ios%my_Comm, ierr) - end if - if(debugasync) print *,__PIO_FILE__,__LINE__,dimid - end function PIO_def_dim - - -!> -!! @public -!! @ingroup PIO_def_var -!! @brief Defines a netcdf variable -!! @details -!! @param File @copydoc file_desc_t -!! @param name : The name of the variable to define -!! @param type : The type of variable -!! @param vardesc @copydoc var_desc_t -!! @retval ierr @copydoc error_return -!< - integer function def_var_0d(File,name,type,vardesc) result(ierr) - - type (File_desc_t), intent(in) :: File - character(len=*), intent(in) :: name - integer, intent(in) :: type - type (Var_desc_t), intent(inout) :: vardesc - integer :: dimids(0) - - ierr = def_var_md(File,name,type,dimids,vardesc) - - end function def_var_0d - -!> -!! @public -!! @ingroup PIO_def_var -!! @brief Defines the a netcdf variable -!! @details -!! @param File @copydoc file_desc_t -!! @param name : The name of the variable to define -!! @param type : The type of variable -!! @param dimids : The dimension identifier returned by \ref PIO_def_dim -!! @param vardesc @copydoc var_desc_t -!! @retval ierr @copydoc error_return -!< - integer function def_var_md(File,name,type,dimids,vardesc) result(ierr) -#ifdef _COMPRESSION - use C_interface_mod, only : F_C_String_dup -#endif - type (File_desc_t), intent(in) :: File - character(len=*), intent(in) :: name - integer, intent(in) :: type - integer, intent(in) :: dimids(:) - - type (Var_desc_t), intent(inout) :: vardesc - type(iosystem_desc_t), pointer :: ios - !------------------ - ! Local variables - !------------------ - integer :: iotype, mpierr, nlen - integer :: msg = PIO_MSG_DEF_VAR - - - iotype = File%iotype - - ierr=PIO_noerr - vardesc%rec=-1 - vardesc%ndims = SIZE(dimids) - - vardesc%type = type - - ios => file%iosystem - nlen = len_trim(name) - - if(ios%async_interface) then - if( .not. ios%ioproc) then - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call mpi_bcast(file%fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - end if - call mpi_bcast(type, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - - call mpi_bcast(nlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(name, nlen, mpi_character, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(vardesc%ndims, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(dimids, vardesc%ndims, mpi_integer, ios%compmaster, ios%intercomm, ierr) - endif - if(ios%IOproc) then - select case(iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - if(vardesc%ndims==0) then - ierr=nfmpi_def_var(File%fh,name(1:nlen),type,vardesc%ndims,dimids,vardesc%varid) - else - ierr=nfmpi_def_var(File%fh,name(1:nlen),type,vardesc%ndims,dimids(1:vardesc%ndims),vardesc%varid) - end if -#endif - -#ifdef _NETCDF -#ifdef _NETCDF4 - case(pio_iotype_netcdf4p) - if(vardesc%ndims==0) then - ierr=nf90_def_var( ncid=File%fh,name=name(1:nlen),xtype=type, & - varid=vardesc%varid) - else - ierr=nf90_def_var( ncid=File%fh,name=name(1:nlen),xtype=type, & - dimids=dimids(1:vardesc%ndims),varid=vardesc%varid) - endif -! removed , use nf90_set_fill instead -! ierr = nf90_def_var_fill(File%fh, vardesc%varid, 1, 0) -#endif - case(pio_iotype_netcdf,pio_iotype_netcdf4c) - ! assuming type valid for both pnetcdf and netcdf - if (ios%io_rank==0) then - if(vardesc%ndims==0) then - ierr=nf90_def_var( ncid=File%fh,name=name(1:nlen),xtype=type, & - varid=vardesc%varid) - else - ierr=nf90_def_var( ncid=File%fh,name=name(1:nlen),xtype=type, & - dimids=dimids(1:vardesc%ndims),varid=vardesc%varid) - end if - if (Debug) print *, '0: def_var fh=',File%fh, & - 'name=',name(1:nlen),' id=',vardesc%varid -#ifdef _NETCDF4 - if(iotype==pio_iotype_netcdf4c) then - if(vardesc%ndims>0 .and. ierr==PIO_NOERR) then - ierr = nf90_def_var_deflate(File%fh,vardesc%varid,0,1,1) - end if - endif -#endif - - endif - if(.not.ios%async_interface.and.ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(vardesc%varid, 1, MPI_INTEGER, 0, ios%IO_Comm, ierr) - end if -#endif -#ifdef _COMPRESSION - case(pio_iotype_vdc2) - vardesc%name = name(1:nlen)//char(0) - - if(ios%io_rank .eq. 0) then - call defvdfvar( F_C_String_dup(name) ) - endif -#endif - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks> ios%num_iotasks) then - call MPI_BCAST(vardesc%varid, 1, MPI_INTEGER, ios%Iomaster, ios%my_Comm, ierr) - end if - end function def_var_md - -!> -!! @public -!! @ingroup PIO_copy_att -!! @brief No idea what this function does -!! @details -!! @param infile @copydoc file_desc_t -!! @param invarid : -!! @param name : -!! @param outfile : -!! @param outvarid : -!! @retval ierr @copydoc error_return -!< - integer function pio_copy_att(infile, invarid, name, outfile, outvarid) result(ierr) - - type (File_desc_t), intent(in) :: infile, outfile - character(len=*), intent(in) :: name - integer, intent(in) :: invarid, outvarid - integer :: iotype, mpierr, msg - type(iosystem_desc_t), pointer :: ios - - - ios => infile%iosystem - ierr=PIO_noerr - iotype = infile%iotype - if(ios%IOproc) then - select case(iotype) - -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - - ierr = nfmpi_copy_att(infile%fh, invarid, name, & - outfile%fh, outvarid) -#endif -#ifdef _NETCDF - case(pio_iotype_netcdf,PIO_iotype_netcdf4c) - if (ios%io_rank==0) then - ierr = nf90_copy_att(infile%fh,invarid,name,& - outfile%fh,outvarid) - end if - case(PIO_iotype_netcdf4p) - ierr = nf90_copy_att(infile%fh,invarid,name,& - outfile%fh,outvarid) -#endif - end select - end if - call check_netcdf(outFile, ierr,__PIO_FILE__,__LINE__) - end function pio_copy_att - - -!> -!! @public -!! @ingroup PIO_inquire_variable -!! @brief Inquires if a NetCDF variable is present and returns its attributes -!! @details -!! @param ncid : A netcdf file descriptor returned by \ref PIO_openfile or \ref PIO_createfile. -!! @param varid : The netcdf variable ID. -!! @param name : The name of the variable -!! @param xtype : The type of the variable -!! @param ndims : The number of dimensions for the variable. -!! @param dimids : The dimension identifier returned by \ref PIO_def_dim -!! @param natts : Number of attributes associated with the variable -!! @retval ierr @copydoc error_return -!> - integer function inquire_variable_vid(ncid, varid, name, xtype, ndims, dimids, natts) result(ierr) - type(file_desc_t), intent(in) :: ncid - integer, intent( in) :: varid - character (len = *), optional, intent(out) :: name - integer, optional, intent(out) :: xtype, ndims - integer, dimension(:), optional, intent(out) :: dimids - integer, optional, intent(out) :: natts - - - if(present(name)) ierr = pio_inq_varname(ncid, varid, name) - if(present(ndims)) ierr = pio_inq_varndims(ncid, varid, ndims) - if(present(dimids)) ierr = pio_inq_vardimid(ncid, varid, dimids) - if(present(natts)) ierr = pio_inq_varnatts(ncid, varid, natts) - if(present(xtype)) ierr = pio_inq_vartype(ncid, varid, xtype) - - - - end function inquire_variable_vid - -!> -!! @public -!! @ingroup PIO_inquire_variable -!! @brief Inquires if a NetCDF variable is present and returns its attributes -!! @details -!! @param ncid : A netcdf file descriptor returned by \ref PIO_openfile or \ref PIO_createfile. -!! @param vardesc @copydoc var_desc_t -!! @param name : The name of the variable -!! @param xtype : The type of the variable -!! @param ndims : The number of dimensions for the variable. -!! @param dimids : The dimension identifier returned by \ref PIO_def_dim -!! @param natts : Number of attributes associated with the variable -!! @retval ierr @copydoc error_return -!> - integer function inquire_variable_vdesc(ncid, vardesc, name, xtype, ndims, dimids, natts) result(ierr) - type(file_desc_t), intent(in) :: ncid - type(var_desc_t), intent( in) :: vardesc - character (len = *), optional, intent(out) :: name - integer, optional, intent(out) :: xtype, ndims - integer, dimension(:), optional, intent(out) :: dimids - integer, optional, intent(out) :: natts - - if(present(name)) ierr = pio_inq_varname(ncid, vardesc, name) - if(present(ndims)) ierr = pio_inq_varndims(ncid, vardesc, ndims) - if(present(dimids)) ierr = pio_inq_vardimid(ncid, vardesc, dimids) - if(present(natts)) ierr = pio_inq_varnatts(ncid, vardesc, natts) - if(present(xtype)) ierr = pio_inq_vartype(ncid, vardesc, xtype) - - end function inquire_variable_vdesc - -!> -!! @public -!! @ingroup PIO_inquire_dimension -!! @brief Get information about a particular dimension in netcdf file -!! @details -!! @param ncid : A netcdf file descriptor returned by \ref PIO_openfile or \ref PIO_createfile. -!! @param dimid : The netcdf dimension ID. -!! @param name : The name of the dimension. -!! @param len : The length of the dimesions name. -!! @retval ierr @copydoc error_return -!> - integer function PIO_inquire_dimension(ncid, dimid, name, len) result(ierr) - type(file_desc_T), intent(in) :: ncid - integer, intent( in) :: dimid - character (len = *), optional, intent(out) :: name - integer, optional, intent(out) :: len - - if(present(len)) ierr = pio_inq_dimlen(ncid, dimid, len) - if(present(name)) ierr = pio_inq_dimname(ncid, dimid,name) - - end function PIO_inquire_dimension - - -end module nf_mod diff --git a/src/externals/pio1/pio/pio.F90 b/src/externals/pio1/pio/pio.F90 deleted file mode 100644 index b8e3a828a30..00000000000 --- a/src/externals/pio1/pio/pio.F90 +++ /dev/null @@ -1,91 +0,0 @@ -!> -!! @file -!! @brief User interface Module for PIO, this is the only file a user program should 'use' -!! -!! $Revision: 856 $ -!! $LastChangedDate: 2013-11-19 15:48:54 -0600 (Tue, 19 Nov 2013) $ -!< - -module pio -! Package all exposed variables and functions under one roof - -! only pio_offset is intended for export from kinds - use pio_kinds, only : pio_offset - - use piolib_mod, only : pio_initdecomp, pio_set_rearr_opts, & - pio_openfile, pio_closefile, pio_createfile, pio_setdebuglevel, & - pio_seterrorhandling, pio_setframe, pio_init, pio_get_local_array_size, & - pio_freedecomp, pio_syncfile,pio_numtowrite,pio_numtoread,pio_setiotype, & - pio_dupiodesc, pio_finalize, pio_set_hint, pio_getnumiotasks, pio_file_is_open, & - pio_setnum_OST, pio_getnum_OST - - use pio_types, only : io_desc_t, file_desc_t, var_desc_t, iosystem_desc_t,& - pio_rearr_opt_t, pio_rearr_comm_fc_opt_t, pio_rearr_comm_fc_2d_enable,& - pio_rearr_comm_fc_1d_comp2io, pio_rearr_comm_fc_1d_io2comp,& - pio_rearr_comm_fc_2d_disable, pio_rearr_comm_unlimited_pend_req,& - pio_rearr_comm_p2p, pio_rearr_comm_coll,& - pio_int, pio_real, pio_double, pio_noerr, iotype_netcdf, & - iotype_pnetcdf, iotype_binary, iotype_direct_pbinary, iotype_pbinary, & - PIO_iotype_binary, PIO_iotype_direct_pbinary, PIO_iotype_pbinary, & - pio_iotype_netcdf4p, pio_iotype_netcdf4c, pio_iotype_pnetcdf,pio_iotype_netcdf, & - pio_global, pio_char, pio_write, pio_nowrite, pio_clobber, pio_noclobber, & - pio_max_name, pio_max_var_dims, pio_rearr_none, & -#if defined(_NETCDF) || defined(_PNETCDF) - pio_nofill, pio_unlimited, & -#endif - pio_64bit_offset, pio_64bit_data, & - pio_iotype_vdc2, & - pio_rearr_box, pio_internal_error, pio_bcast_error, pio_return_error - - use piodarray, only : pio_read_darray, pio_write_darray, pio_set_buffer_size_limit - - use nf_mod, only: & - PIO_enddef, & - PIO_inquire , & - PIO_inq_attname , & - PIO_inq_att , & - PIO_inq_attlen , & - PIO_inq_varid , & - PIO_inq_varname , & - PIO_inq_vartype , & - PIO_inq_varndims ,& - PIO_inq_vardimid ,& - PIO_inq_varnatts ,& - PIO_inq_dimid , & - PIO_inq_dimname , & - PIO_inq_dimlen , & - PIO_def_dim , & - PIO_def_var , & - PIO_redef , & - PIO_copy_att , & - PIO_inquire_variable , & - PIO_inquire_dimension - - use pionfatt_mod, only : PIO_put_att => put_att, & - PIO_get_att => get_att - use pionfput_mod, only : PIO_put_var => put_var - use pionfget_mod, only : PIO_get_var => get_var - - use calcdecomp, only : pio_set_blocksize - - - - implicit none - public -! Added for pio2 compatability - integer, parameter :: pio_offset_kind = pio_offset - integer, parameter :: pio_rearr_subset = pio_rearr_box -contains - function pio_iam_iotask(iosystem) result(task) - type(iosystem_desc_t), intent(in) :: iosystem - logical :: task - task = iosystem%ioproc - end function pio_iam_iotask - function pio_iotask_rank(iosystem) result(rank) - type(iosystem_desc_t), intent(in) :: iosystem - integer :: rank - rank = iosystem%io_rank - end function pio_iotask_rank - -end module pio - diff --git a/src/externals/pio1/pio/pio_kinds.F90 b/src/externals/pio1/pio/pio_kinds.F90 deleted file mode 100644 index 1655a4fd5ec..00000000000 --- a/src/externals/pio1/pio/pio_kinds.F90 +++ /dev/null @@ -1,53 +0,0 @@ -!> -!! @file pio_kinds.F90 -!! @brief basic data types -!! -!! $Revision$ -!! $LastChangedDate$ -!< - module pio_kinds - -!BOP -! !MODULE: pio_kinds -! -! !DESCRIPTION: -! This module defines default numerical data types for all common data -! types like integer, character, logical, real4 and real8. -! -! !REVISION HISTORY: -! CVS:$Id: pio_kinds.F90,v 1.1.1.1 2006/07/31 16:15:30 dennis Exp $ -! CVS:$Name: $ - -! !USES: -! uses mpi if available -#ifndef NO_MPIMOD - use mpi, only : MPI_OFFSET_KIND ! _EXTERNAL -#endif - - implicit none - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif -! !DEFINED PARAMETERS: - - integer, parameter, public :: & - log_kind = kind(.true.) ,& - int_kind = kind(1) ,& - i4 = selected_int_kind(6) ,& - i8 = selected_int_kind(13) ,& - r4 = selected_real_kind(6) ,& - r8 = selected_real_kind(13) - - - integer, parameter, public :: & - PIO_OFFSET = MPI_OFFSET_KIND - -!EOP -!BOC -!EOC -!*********************************************************************** - - end module pio_kinds - -!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/src/externals/pio1/pio/pio_mpi_utils.F90 b/src/externals/pio1/pio/pio_mpi_utils.F90 deleted file mode 100644 index c22ac340001..00000000000 --- a/src/externals/pio1/pio/pio_mpi_utils.F90 +++ /dev/null @@ -1,41 +0,0 @@ -#define __PIO_FILE__ "pio_mpi_utils.F90" -module pio_mpi_utils - - implicit none - private - - public :: pio_type_to_mpi_type - -contains - - integer function pio_type_to_mpi_type(ptype) result(mtype) - - use pio_support, only : piodie - use pio_types, only : PIO_char, PIO_int, PIO_double, PIO_real -#ifndef NO_MPIMOD - use mpi, only : MPI_REAL8, MPI_REAL4, MPI_INTEGER, MPI_CHARACTER ! _EXTERNAL -#endif - - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - integer, intent(in):: ptype - - select case(ptype) - case (PIO_double) - mtype=MPI_REAL8 - case (PIO_real) - mtype=MPI_REAL4 - case (PIO_int) - mtype=MPI_INTEGER - case (PIO_char) - mtype=MPI_CHARACTER - case default - call piodie( __PIO_FILE__,__LINE__, & - 'Could not convert pio type=',ptype,' to an mpi type') - end select - - end function pio_type_to_mpi_type - -end module pio_mpi_utils diff --git a/src/externals/pio1/pio/pio_msg_callbacks.F90 b/src/externals/pio1/pio/pio_msg_callbacks.F90 deleted file mode 100644 index 49880c15c5b..00000000000 --- a/src/externals/pio1/pio/pio_msg_callbacks.F90 +++ /dev/null @@ -1,499 +0,0 @@ -#include "dtypes.h" -#define __PIO_FILE__ "pio_msg_callbacks.F90" -subroutine pio_callback_handler(iosystem, msg) - use pio - use pio_msg_mod - use pio_support, only : debugAsync, piodie -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - type(iosystem_desc_t) :: iosystem - integer, intent(in) :: msg - - type(file_desc_t), pointer :: file - integer fh, ierr - - - integer :: len, id, dimids(PIO_MAX_VAR_DIMS), type, info(4) - character(len=pio_max_name) :: name - type(var_desc_t) :: vardesc - - - call mpi_bcast(fh, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - file=> lookupfile(fh) - - - select case(msg) - case (PIO_MSG_CLOSE_FILE) - call delete_from_file_list(fh) - call pio_closefile(file) - deallocate(file) - case (PIO_MSG_DEF_DIM) - ierr = pio_def_dim(file, name, len, id) - case (PIO_MSG_DEF_VAR) - ierr = pio_def_var(file, name, type, dimids, vardesc) - case (PIO_MSG_ENDDEF) - ierr = pio_enddef(file) - case (PIO_MSG_REDEF) - ierr = pio_redef(file) - case (PIO_MSG_INQ_VARNDIMS) - ierr = pio_inq_varndims(file, id, len) - case (PIO_MSG_INQ_VARNATTS) - ierr = pio_inq_varnatts(file, id, len) - case (PIO_MSG_INQ_VARDIMID) - ierr = pio_inq_vardimid(file, id, dimids) - case (PIO_MSG_INQ_VARID) - ierr = pio_inq_varid(File, name, id) - case (PIO_MSG_INQ_VARNAME) - ierr = pio_inq_varname(file, id, name) - case (PIO_MSG_INQ_VARTYPE) - ierr = pio_inq_vartype(file, id, type) - case (PIO_MSG_INQ_DIMID) - ierr = pio_inq_dimid(file, name, id) - case (PIO_MSG_INQ_DIMLEN) - ierr = pio_inq_dimlen(file, id, len) - case (PIO_MSG_INQ_DIMNAME) - ierr = pio_inq_dimname(file, id, name) - case(PIO_MSG_INQUIRE) - ierr = pio_inquire(file, info(1), info(2), info(3), info(4)) - case(PIO_MSG_INQ_ATT) - ierr = pio_inq_att(file, id, name, type, len) - case(PIO_MSG_INQ_ATTNAME) - ierr = pio_inq_attname(file, id, type, name) - case(PIO_MSG_INQ_ATTLEN) - ierr = pio_inq_attlen(file, id, name, len) - case(PIO_MSG_SYNC_FILE) - call pio_syncfile(file) - case default - print *, 'PIO Got unrecognized message ', msg, ierr - call piodie(__PIO_FILE__,__LINE__) - end select - -end subroutine pio_callback_handler - -subroutine freedecomp_handler(iosystem) - use pio, only : iosystem_desc_t, io_desc_t, pio_freedecomp -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - use pio_msg_mod, only : delete_from_iodesc_list - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - type(iosystem_desc_t) :: iosystem - type(io_desc_t), pointer :: iodesc - integer :: async_id, ierr - - async_id=-1 - call mpi_bcast(async_id, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - iodesc=>delete_from_iodesc_list(async_id) - call pio_freedecomp(iosystem, iodesc) - - -end subroutine freedecomp_handler - - -subroutine create_file_handler(iosystem) - use pio, only : iosystem_desc_t, file_desc_t, pio_createfile - use pio_msg_mod, only : add_to_file_list - use pio_support, only : debugAsync -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - type(iosystem_desc_t) :: iosystem - - integer :: ierr - integer :: iotype, amode - integer :: namelen - character(len=:), allocatable :: fname - type(file_desc_t), pointer :: file - - call mpi_bcast(namelen, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - allocate(character(len=namelen):: fname ) - call mpi_bcast(fname, namelen, mpi_character, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(iotype, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - - call mpi_bcast(amode, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - - allocate(file) - - ierr= pio_createfile(iosystem, file, iotype, trim(fname), amode ) - deallocate(fname) - call add_to_file_list(file) - if(Debugasync) print *,__PIO_FILE__,__LINE__,file%fh - -end subroutine create_file_handler - -subroutine open_file_handler(iosystem) - use pio - use piolib_mod - use pio_kinds - use pio_msg_mod - use pio_support, only : debugAsync -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - type(iosystem_desc_t) :: iosystem - - integer :: ierr - integer :: iotype, amode - - character(len=:), allocatable :: fname - type(file_desc_t), pointer :: file - integer :: namelen - - - call mpi_bcast(namelen, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - allocate(character(len=namelen):: fname ) - call mpi_bcast(fname, namelen, mpi_character, iosystem%compmaster, iosystem%intercomm, ierr) - - call mpi_bcast(iotype, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - - call mpi_bcast(amode, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - - allocate(file) - - ierr= pio_openfile(iosystem, file, iotype, trim(fname), amode) - deallocate(fname) - call add_to_file_list(file) - if(Debugasync) print *,__PIO_FILE__,__LINE__,file%fh - -end subroutine open_file_handler - -subroutine initdecomp_dof_handler(iosystem) - - use pio - use pio_kinds - use pio_msg_mod - use pio_support, only : debugAsync -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - type(iosystem_desc_t) :: iosystem - - type(io_desc_t), pointer :: iodesc - integer :: ierr - integer(i4) :: basepiotype, dims_size, dims(PIO_MAX_VAR_DIMS), dof_size, sandc_size - integer(kind=pio_offset) :: compdof(1) - integer(kind=pio_offset), allocatable :: iostart(:), iocount(:) - - call mpi_bcast(basepiotype, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(dims_size, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(dims(1:dims_size), dims_size, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - - allocate(iodesc) - - compdof=0 - call add_to_iodesc_list(iodesc) - - call mpi_bcast(iodesc%async_id, 1, mpi_integer, iosystem%iomaster, iosystem%intercomm, ierr) - - call pio_initdecomp(iosystem, basepiotype, dims(1:dims_size), compdof, iodesc) - -end subroutine initdecomp_dof_handler - -subroutine writedarray_handler(iosystem) - use pio - use pio_kinds - use pio_msg_mod - use pio_support, only : debugAsync -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - type(iosystem_desc_t) :: iosystem - type(file_desc_t), pointer :: file - type(var_desc_t) :: v - type(io_desc_t), pointer :: iodesc - - integer :: ierr, type, fh, fillv, iod_id - - integer(i4) :: fillval_int, aint(1) - real(r4) :: fillval_real, areal(1) - real(r8) :: fillval_double, adouble(1) - - - call mpi_bcast(fh, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(v%varid, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(v%rec, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(v%ndims, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(iod_id, 1, mpi_integer , iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(type, 1, mpi_integer , iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(fillv, 1, mpi_integer , iosystem%compmaster, iosystem%intercomm, ierr) - - file=> lookupfile(fh) - if(debugasync) print *,__PIO_FILE__,__LINE__,v%varid,iod_id - iodesc => lookupiodesc(iod_id) -#ifndef _MPISERIAL - select case(type) - case(mpi_integer) - if(fillv==1) then - call mpi_bcast(fillval_int, 1, mpi_integer , iosystem%compmaster, iosystem%intercomm, ierr) - call pio_write_darray(file, v, iodesc, aint, ierr, fillval_int) - else - call pio_write_darray(file, v, iodesc, aint, ierr) - end if - case(mpi_real4) - if(fillv==1) then - call mpi_bcast(fillval_real, 1, mpi_real4 , iosystem%compmaster, iosystem%intercomm, ierr) - call pio_write_darray(file, v, iodesc, areal, ierr, fillval_real) - else - call pio_write_darray(file, v, iodesc, areal, ierr) - end if - case(mpi_real8) - if(fillv==1) then - call mpi_bcast(fillval_double, 1, mpi_real8, iosystem%compmaster, iosystem%intercomm, ierr) - call pio_write_darray(file, v, iodesc, adouble, ierr, fillval_double) - else - call pio_write_darray(file, v, iodesc, adouble, ierr) - end if - end select -#endif - -end subroutine writedarray_handler - - -subroutine readdarray_handler(iosystem) - use pio - use pio_kinds - use pio_msg_mod - use pio_support, only : debugAsync -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - type(iosystem_desc_t) :: iosystem - type(file_desc_t), pointer :: file - type(var_desc_t) :: v - type(io_desc_t), pointer :: iodesc - - integer :: ierr, type, fh, iod_id - - integer(i4) :: aint(1) - real(r4) :: areal(1) - real(r8) :: adouble(1) - - if(debugasync) print *,__PIO_FILE__,__LINE__ - - call mpi_bcast(fh, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(v%varid, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(v%rec, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(iod_id, 1, mpi_integer , iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(type, 1, mpi_integer , iosystem%compmaster, iosystem%intercomm, ierr) - - file=> lookupfile(fh) - - if(debugasync) print *,__PIO_FILE__,__LINE__,iod_id, type - - iodesc => lookupiodesc(iod_id) -#ifndef _MPISERIAL - select case(type) - case(mpi_integer) - call pio_read_darray(file, v, iodesc, aint, ierr) - case(mpi_real4) - call pio_read_darray(file, v, iodesc, areal, ierr) - case(mpi_real8) - call pio_read_darray(file, v, iodesc, adouble, ierr) - end select -#endif - -end subroutine readdarray_handler - -subroutine seterrorhandling_handler(ios) - use pio, only : iosystem_desc_t, pio_seterrorhandling -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - type(iosystem_desc_t), intent(inout) :: ios - integer :: method, ierr - - call mpi_bcast(method, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - - call pio_seterrorhandling(ios, method) - -end subroutine seterrorhandling_handler - -subroutine string_handler_for_att(file, varid, name, strlen, msg) - use pio_msg_mod, only : pio_msg_getatt - use pio, only : file_desc_t, pio_get_att, pio_put_att - use pio_support, only : debugasync - implicit none - - type(file_desc_t) :: file - integer, intent(in) :: varid, strlen, msg - character(len=*) :: name - character(len=strlen) :: str - integer :: ierr - - if(msg==PIO_MSG_GETATT) then - if(Debugasync) print *,__PIO_FILE__,__LINE__, varid, name - ierr = pio_get_att(file, varid, name, str ) - if(Debugasync) print *,__PIO_FILE__,__LINE__, str - else - ierr = pio_put_att(file, varid, name, str ) - end if -end subroutine string_handler_for_att - -subroutine att_handler(ios, msg) - - use pio, only : iosystem_desc_t, file_desc_t, pio_get_att, pio_max_name, pio_put_att - use pio_kinds, only : i4, r4, r8 - use pio_msg_mod, only : lookupfile, pio_msg_putatt, pio_msg_getatt - use pio_support, only : debugAsync, piodie -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - integer, intent(in) :: msg - type(iosystem_desc_t), intent(inout) :: ios - type(file_desc_t), pointer :: file - integer :: fh, varid, ierr, itype, strlen, nlen - character(len=PIO_MAX_NAME) :: name - - real(r4) :: rvar - real(r8) :: dvar - integer(i4) :: ivar - - if(Debugasync) print *,__PIO_FILE__,__LINE__ - - call mpi_bcast(fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(varid, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(itype, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(nlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(name(1:nlen), nlen, mpi_integer, ios%compmaster, ios%intercomm, ierr) - if(Debugasync) print *,__PIO_FILE__,__LINE__, itype,nlen - - file=> lookupfile(fh) - - select case(itype) - case (TYPETEXT) - call mpi_bcast(strlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - if(Debugasync) print *,__PIO_FILE__,__LINE__, strlen,nlen - call string_handler_for_att (file, varid, name(1:nlen), strlen, msg) - case (TYPEREAL) - if(msg==PIO_MSG_GETATT) then - ierr = pio_get_att(file, varid, name(1:nlen), rvar) - else - ierr = pio_put_att(file, varid, name(1:nlen), rvar) - end if - case (TYPEDOUBLE) - if(msg==PIO_MSG_GETATT) then - ierr = pio_get_att(file, varid, name(1:nlen), dvar) - else - ierr = pio_put_att(file, varid, name(1:nlen), dvar) - end if - case (TYPEINT) - if(msg==PIO_MSG_GETATT) then - ierr = pio_get_att(file, varid, name(1:nlen), ivar) - else - ierr = pio_put_att(file, varid, name(1:nlen), ivar) - end if - end select - -end subroutine att_handler - - - -subroutine att_1d_handler(ios, msg) - - use pio, only : iosystem_desc_t, file_desc_t, pio_get_att, pio_max_name, pio_put_att - use pio_kinds, only : i4, r4, r8 - use pio_msg_mod, only : lookupfile, pio_msg_getatt_1d, pio_msg_putatt_1d - use pio_support, only : debugAsync, piodie -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - - type(iosystem_desc_t), intent(inout) :: ios - integer, intent(in) :: msg - type(file_desc_t), pointer :: file - integer :: fh, varid, ierr, itype, strlen, nlen, clen - character(len=PIO_MAX_NAME) :: name - - real(r4), allocatable :: rvar(:) - real(r8), allocatable :: dvar(:) - integer(i4), allocatable :: ivar(:) - - call mpi_bcast(fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(varid, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(itype, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(nlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(name(1:nlen), nlen, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call MPI_BCAST(clen,1,MPI_INTEGER,ios%CompMaster, ios%intercomm , ierr) - - file=> lookupfile(fh) - - select case(itype) - case (TYPEREAL) - allocate(rvar(clen)) - if(msg==pio_msg_getatt_1d) then - ierr = pio_get_att(file, varid, name(1:nlen), rvar) - else - ierr = pio_put_att(file, varid, name(1:nlen), rvar) - end if - deallocate(rvar) - case (TYPEDOUBLE) - allocate(dvar(clen)) - if(msg==pio_msg_getatt_1d) then - ierr = pio_get_att(file, varid, name(1:nlen), dvar) - else - ierr = pio_put_att(file, varid, name(1:nlen), dvar) - end if - deallocate(dvar) - case (TYPEINT) - allocate(ivar(clen)) - if(msg==pio_msg_getatt_1d) then - ierr = pio_get_att(file, varid, name(1:nlen), ivar) - else - ierr = pio_put_att(file, varid, name(1:nlen), ivar) - end if - deallocate(ivar) - end select - -end subroutine att_1d_handler - - -subroutine finalize_handler(iosystem) - use pio, only : iosystem_desc_t, pio_finalize - use pio_support, only : debugAsync - implicit none - type(iosystem_desc_t) :: iosystem - integer :: ierr - - call pio_finalize(iosystem, ierr) - -end subroutine finalize_handler diff --git a/src/externals/pio1/pio/pio_msg_getput_callbacks.F90.in b/src/externals/pio1/pio/pio_msg_getput_callbacks.F90.in deleted file mode 100644 index 2a75c28ac37..00000000000 --- a/src/externals/pio1/pio/pio_msg_getput_callbacks.F90.in +++ /dev/null @@ -1,415 +0,0 @@ -! Not a module - this line is required by genf90.pl -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief Callback functions for Asyncronous IO -!< -subroutine string_handler_for_var1(file, varid, index, ndims, strlen, msg) - use pio, only : file_desc_t, pio_get_var, pio_put_var - use pio_msg_mod, only : pio_msg_getvar1 - implicit none - type(file_desc_t) :: file - integer, intent(in) :: varid, strlen, msg, ndims - integer, intent(in) :: index(ndims) - character(len=strlen) :: str - integer :: ierr - - if(msg==PIO_MSG_GETVAR1) then - ierr = pio_get_var(file, varid, index, str ) - else - ierr = pio_put_var(file, varid, index, str ) - end if -end subroutine string_handler_for_var1 - -subroutine var1_handler(ios, msg) - use pio, only : iosystem_desc_t, file_desc_t, pio_get_var, pio_put_var - use pio_kinds, only : i4, r4, r8, pio_offset - use pio_msg_mod, only : lookupfile, pio_msg_getvar1 - use pio_support, only : debugAsync -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - - type(iosystem_desc_t), intent(inout) :: ios - integer, intent(in) :: msg - type(file_desc_t), pointer :: file - integer :: fh, varid, ierr, itype, strlen, size_index - integer, allocatable :: index(:) - - real(r4) :: rvar - real(r8) :: dvar - integer(i4) :: ivar - - call mpi_bcast(fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(varid, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(size_index, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - allocate(index(size_index)) - call mpi_bcast(index, size_index, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(itype, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - file=> lookupfile(fh) - - - if(itype == TYPETEXT) then - call mpi_bcast(strlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - - call string_handler_for_var1(file, varid, index, size_index, strlen, msg) - else - if(msg==pio_msg_getvar1) then - select case(itype) - case (TYPEREAL) - ierr = pio_get_var(file, varid, index, rvar) - case (TYPEDOUBLE) - ierr = pio_get_var(file, varid, index, dvar) - case (TYPEINT) - ierr = pio_get_var(file, varid, index, ivar) - end select - else - select case(itype) - case (TYPEREAL) - ierr = pio_put_var(file, varid, index, rvar) - case (TYPEDOUBLE) - ierr = pio_put_var(file, varid, index, dvar) - case (TYPEINT) - ierr = pio_put_var(file, varid, index, ivar) - end select - end if - end if - deallocate(index) -end subroutine var1_handler - -! DIMS 1,2,3,4,5 -subroutine vara_{DIMS}d_handler(ios, msg) - use pio, only : iosystem_desc_t, file_desc_t, pio_get_var, pio_put_var - use pio_kinds, only : i4, r4, r8, pio_offset - use pio_msg_mod, only : lookupfile, pio_msg_getvara_{DIMS}d - use pio_support, only : debugAsync -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - - type(iosystem_desc_t), intent(inout) :: ios - integer,intent(in) :: msg - - type(file_desc_t), pointer :: file - integer :: fh, varid, ierr, itype, strlen, size_index, ndims - integer :: dims({DIMS}) - integer, allocatable :: start(:), count(:) - real(r4), allocatable :: rvar{DIMSTR} - real(r8), allocatable :: dvar{DIMSTR} - integer(i4), allocatable :: ivar{DIMSTR} - - call mpi_bcast(fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(varid, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(itype, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - - call mpi_bcast(ndims, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - allocate(start(ndims),count(ndims)) - call mpi_bcast(start, ndims, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(count, ndims, mpi_integer, ios%compmaster, ios%intercomm, ierr) - - call MPI_BCAST(dims,{DIMS},MPI_INTEGER,ios%CompMaster, ios%intercomm , ierr) - - file=> lookupfile(fh) - - select case(itype) - case (TYPETEXT) - call mpi_bcast(strlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call string_handler_for_vara_{DIMS}d(file, varid, start, count, strlen, dims, msg) - case (TYPEREAL) -#if({DIMS} == 1) - allocate(rvar(dims(1))) -#elif({DIMS} == 2) - allocate(rvar(dims(1),dims(2))) -#elif({DIMS} == 3) - allocate(rvar(dims(1),dims(2),dims(3))) -#elif({DIMS} == 4) - allocate(rvar(dims(1),dims(2),dims(3),dims(4))) -#elif({DIMS} == 5) - allocate(rvar(dims(1),dims(2),dims(3),dims(4),dims(5))) -#endif - if(msg==pio_msg_getvara_{DIMS}d) then - ierr = pio_get_var(file, varid, start, count, rvar) - else - ierr = pio_put_var(file, varid, start, count, rvar) - end if - deallocate(rvar) - case (TYPEDOUBLE) -#if({DIMS} == 1) - allocate(dvar(dims(1))) -#elif({DIMS} == 2) - allocate(dvar(dims(1),dims(2))) -#elif({DIMS} == 3) - allocate(dvar(dims(1),dims(2),dims(3))) -#elif({DIMS} == 4) - allocate(dvar(dims(1),dims(2),dims(3),dims(4))) -#elif({DIMS} == 5) - allocate(dvar(dims(1),dims(2),dims(3),dims(4),dims(5))) -#endif - if(msg==pio_msg_getvara_{DIMS}d) then - ierr = pio_get_var(file, varid, start, count, dvar) - else - ierr = pio_put_var(file, varid, start, count, dvar) - end if - deallocate(dvar) - case (TYPEINT) -#if({DIMS} == 1) - allocate(ivar(dims(1))) -#elif({DIMS} == 2) - allocate(ivar(dims(1),dims(2))) -#elif({DIMS} == 3) - allocate(ivar(dims(1),dims(2),dims(3))) -#elif({DIMS} == 4) - allocate(ivar(dims(1),dims(2),dims(3),dims(4))) -#elif({DIMS} == 5) - allocate(ivar(dims(1),dims(2),dims(3),dims(4),dims(5))) -#endif - if(msg==pio_msg_getvara_{DIMS}d) then - ierr = pio_get_var(file, varid, start, count, ivar) - else - ierr = pio_put_var(file, varid, start, count, ivar) - end if - deallocate(ivar) - end select - deallocate(start,count) -end subroutine vara_{DIMS}d_handler - -subroutine string_handler_for_var_0d(file, varid, strlen, msg) - use pio, only : file_desc_t, pio_get_var, pio_put_var - use pio_msg_mod, only : PIO_MSG_GETVAR_0D - implicit none - type(file_desc_t) :: file - integer, intent(in) :: varid, strlen, msg - character(len=strlen) :: str - integer :: ierr - - if(msg==PIO_MSG_GETVAR_0D) then - ierr = pio_get_var(file, varid, str ) - else - ierr = pio_put_var(file, varid, str ) - end if -end subroutine string_handler_for_var_0d - -subroutine var_0d_handler (ios, msg) - use pio, only : iosystem_desc_t, file_desc_t, pio_get_var, pio_put_var - use pio_kinds, only : i4, r4, r8, pio_offset - use pio_msg_mod, only : lookupfile, pio_msg_getvar_0d - use pio_support, only : debugAsync, piodie -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - - type(iosystem_desc_t), intent(inout) :: ios - integer, intent(in) ::msg - type(file_desc_t), pointer :: file - integer :: fh, varid, ierr, itype, strlen, dimcnt - - real(r4) :: rvar - real(r8) :: dvar - integer(i4) :: ivar - - call mpi_bcast(fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(varid, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(itype, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - - file=> lookupfile(fh) - - select case(itype) - case (TYPETEXT) - call mpi_bcast(strlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call string_handler_for_var_0d (file, varid, strlen, msg) - case (TYPEREAL) - if(msg == pio_msg_getvar_0D) then - ierr = pio_get_var(file, varid, rvar) - else - ierr = pio_put_var(file, varid, rvar) - end if - case (TYPEDOUBLE) - if(msg == pio_msg_getvar_0D) then - ierr = pio_get_var(file, varid, dvar) - else - ierr = pio_put_var(file, varid, dvar) - end if - case (TYPEINT) - if(msg == pio_msg_getvar_0D) then - ierr = pio_get_var(file, varid, ivar) - else - ierr = pio_put_var(file, varid, ivar) - end if - end select - -end subroutine var_0d_handler - -! DIMS 1,2,3,4,5 -subroutine string_handler_for_var_{DIMS}d (file, varid, strlen, dims, msg) - use pio, only : file_desc_t, pio_get_var, pio_put_var - use pio_msg_mod, only : pio_msg_getvar_{DIMS}D - implicit none - type(file_desc_t) :: file - integer, intent(in) :: varid, strlen, dims({DIMS}), msg - - character(len=strlen), allocatable :: str{DIMSTR} - integer :: ierr - -#if({DIMS} == 1) - allocate(str(dims(1))) -#elif({DIMS} == 2) - allocate(str(dims(1),dims(2))) -#elif({DIMS} == 3) - allocate(str(dims(1),dims(2),dims(3))) -#elif({DIMS} == 4) - allocate(str(dims(1),dims(2),dims(3),dims(4))) -#elif({DIMS} == 5) - allocate(str(dims(1),dims(2),dims(3),dims(4),dims(5))) -#endif - if(msg == PIO_MSG_GETVAR_{DIMS}D) then - ierr = pio_get_var(file, varid, str ) - else - ierr = pio_put_var(file, varid, str ) - end if - deallocate(str) - -end subroutine string_handler_for_var_{DIMS}d - -! DIMS 1,2,3,4,5 -subroutine string_handler_for_vara_{DIMS}d (file, varid, start, count, strlen, dims, msg) - use pio_msg_mod, only : pio_msg_getvara_{DIMS}d - use pio, only : file_desc_t, pio_get_var, pio_put_var - implicit none - type(file_desc_t) :: file - integer, intent(in) :: varid, strlen, start({DIMS}), count({DIMS}), dims({DIMS}), msg - - character(len=strlen), allocatable :: str{DIMSTR} - integer :: ierr - -#if({DIMS} == 1) - allocate(str(dims(1))) -#elif({DIMS} == 2) - allocate(str(dims(1),dims(2))) -#elif({DIMS} == 3) - allocate(str(dims(1),dims(2),dims(3))) -#elif({DIMS} == 4) - allocate(str(dims(1),dims(2),dims(3),dims(4))) -#elif({DIMS} == 5) - allocate(str(dims(1),dims(2),dims(3),dims(4),dims(5))) -#endif - if(msg==pio_msg_getvara_{DIMS}d) then - ierr = pio_get_var(file, varid, start, count, str ) - else - str = ' ' - ierr = pio_put_var(file, varid, start, count, str ) - end if - deallocate(str) - -end subroutine string_handler_for_vara_{DIMS}d - - -! DIMS 1,2,3,4,5 -subroutine var_{DIMS}d_handler (ios, msg) - use pio, only : iosystem_desc_t, file_desc_t, pio_get_var, pio_put_var - use pio_kinds, only : i4, r4, r8, pio_offset - use pio_msg_mod, only : lookupfile, pio_msg_getvar_{DIMS}d - use pio_support, only : debugAsync -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - - type(iosystem_desc_t), intent(inout) :: ios - integer, intent(in) :: msg - - type(file_desc_t), pointer :: file - integer :: fh, varid, ierr, itype, strlen, dimcnt - integer, allocatable :: dims(:) - - real(r4), allocatable :: rvar{DIMSTR} - real(r8), allocatable :: dvar{DIMSTR} - integer(i4), allocatable :: ivar{DIMSTR} - - call mpi_bcast(fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(varid, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(itype, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - - allocate(dims({DIMS})) - call mpi_bcast(dims, {DIMS}, mpi_integer, ios%compmaster, ios%intercomm, ierr) - - - file=> lookupfile(fh) - - select case(itype) - case (TYPETEXT) - call mpi_bcast(strlen, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call string_handler_for_var_{DIMS}d (file, varid, strlen, dims, msg) - case (TYPEREAL) -#if({DIMS} == 1) - allocate(rvar(dims(1))) -#elif({DIMS} == 2) - allocate(rvar(dims(1),dims(2))) -#elif({DIMS} == 3) - allocate(rvar(dims(1),dims(2),dims(3))) -#elif({DIMS} == 4) - allocate(rvar(dims(1),dims(2),dims(3),dims(4))) -#elif({DIMS} == 5) - allocate(rvar(dims(1),dims(2),dims(3),dims(4),dims(5))) -#endif - if(msg==pio_msg_getvar_{DIMS}d ) then - ierr = pio_get_var(file, varid, rvar) - else - ierr = pio_put_var(file, varid, rvar) - end if - deallocate(rvar) - case (TYPEDOUBLE) -#if({DIMS} == 1) - allocate(dvar(dims(1))) -#elif({DIMS} == 2) - allocate(dvar(dims(1),dims(2))) -#elif({DIMS} == 3) - allocate(dvar(dims(1),dims(2),dims(3))) -#elif({DIMS} == 4) - allocate(dvar(dims(1),dims(2),dims(3),dims(4))) -#elif({DIMS} == 5) - allocate(dvar(dims(1),dims(2),dims(3),dims(4),dims(5))) -#endif - if(msg==pio_msg_getvar_{DIMS}d ) then - ierr = pio_get_var(file, varid, dvar) - else - ierr = pio_put_var(file, varid, dvar) - end if - deallocate(dvar) - case (TYPEINT) -#if({DIMS} == 1) - allocate(ivar(dims(1))) -#elif({DIMS} == 2) - allocate(ivar(dims(1),dims(2))) -#elif({DIMS} == 3) - allocate(ivar(dims(1),dims(2),dims(3))) -#elif({DIMS} == 4) - allocate(ivar(dims(1),dims(2),dims(3),dims(4))) -#elif({DIMS} == 5) - allocate(ivar(dims(1),dims(2),dims(3),dims(4),dims(5))) -#endif - if(msg==pio_msg_getvar_{DIMS}d ) then - ierr = pio_get_var(file, varid, ivar) - else - ierr = pio_put_var(file, varid, ivar) - end if - deallocate(ivar) - end select - -end subroutine var_{DIMS}d_handler - diff --git a/src/externals/pio1/pio/pio_msg_mod.F90 b/src/externals/pio1/pio/pio_msg_mod.F90 deleted file mode 100644 index e393bb2b2f7..00000000000 --- a/src/externals/pio1/pio/pio_msg_mod.F90 +++ /dev/null @@ -1,448 +0,0 @@ -#define __PIO_FILE__ "pio_msg_mod.F90" -module pio_msg_mod - use pio_kinds - use pio_types - use pio_support, only : piodie, DebugAsync - - implicit none - private - public :: pio_msg_handler_init, pio_msg_handler - - - public :: add_to_file_list, lookupfile, delete_from_file_list, lookupiodesc, add_to_iodesc_list, delete_from_iodesc_list - -! PIO ASYNC MESSAGE TAGS - integer, parameter, public :: pio_msg_create_file = 300 - integer, parameter, public :: pio_msg_open_file = 301 - integer, parameter, public :: pio_msg_close_file = 302 - integer, parameter, public :: pio_msg_def_dim = 310 - integer, parameter, public :: pio_msg_def_var = 312 - integer, parameter, public :: pio_msg_enddef = 313 - integer, parameter, public :: pio_msg_redef = 314 - integer, parameter, public :: pio_msg_initdecomp_dof = 315 - - integer, parameter, public :: pio_msg_writedarray = 320 - integer, parameter, public :: pio_msg_readdarray = 325 - integer, parameter, public :: pio_msg_inquire = 330 - integer, parameter, public :: pio_msg_inq_att = 331 - integer, parameter, public :: pio_msg_inq_attname = 332 - integer, parameter, public :: pio_msg_inq_varid = 333 - integer, parameter, public :: pio_msg_inq_varname = 334 - integer, parameter, public :: pio_msg_inq_vardimid = 335 - integer, parameter, public :: pio_msg_inq_varnatts = 336 - integer, parameter, public :: pio_msg_inq_varndims = 337 - integer, parameter, public :: pio_msg_inq_vartype = 338 - integer, parameter, public :: pio_msg_inq_dimid = 339 - integer, parameter, public :: pio_msg_inq_dimlen = 340 - integer, parameter, public :: pio_msg_inq_dimname = 341 - integer, parameter, public :: pio_msg_inq_attlen = 342 - integer, parameter, public :: pio_msg_seterrorhandling = 350 - - integer, parameter, public :: pio_msg_getvar1 = 360 - integer, parameter, public :: pio_msg_getvar_0d = 361 - integer, parameter, public :: pio_msg_getvar_1d = 362 - integer, parameter, public :: pio_msg_getvar_2d = 363 - integer, parameter, public :: pio_msg_getvar_3d = 364 - integer, parameter, public :: pio_msg_getvar_4d = 365 - integer, parameter, public :: pio_msg_getvar_5d = 366 - - integer, parameter, public :: pio_msg_getvara_1d = 367 - integer, parameter, public :: pio_msg_getvara_2d = 368 - integer, parameter, public :: pio_msg_getvara_3d = 369 - integer, parameter, public :: pio_msg_getvara_4d = 370 - integer, parameter, public :: pio_msg_getvara_5d = 371 - - integer, parameter, public :: pio_msg_putvar1 = 380 - integer, parameter, public :: pio_msg_putvar_0d = 381 - integer, parameter, public :: pio_msg_putvar_1d = 382 - integer, parameter, public :: pio_msg_putvar_2d = 383 - integer, parameter, public :: pio_msg_putvar_3d = 384 - integer, parameter, public :: pio_msg_putvar_4d = 385 - integer, parameter, public :: pio_msg_putvar_5d = 386 - - integer, parameter, public :: pio_msg_putvara_1d = 387 - integer, parameter, public :: pio_msg_putvara_2d = 388 - integer, parameter, public :: pio_msg_putvara_3d = 389 - integer, parameter, public :: pio_msg_putvara_4d = 390 - integer, parameter, public :: pio_msg_putvara_5d = 391 - - integer, parameter, public :: pio_msg_getatt = 400 - integer, parameter, public :: pio_msg_getatt_1d = 401 - integer, parameter, public :: pio_msg_putatt = 402 - integer, parameter, public :: pio_msg_putatt_1d = 403 - - integer, parameter, public :: PIO_MSG_SYNC_FILE = 500 - integer, parameter, public :: PIO_MSG_FREEDECOMP = 502 - - integer, parameter, public :: pio_msg_exit = 999 - - - type :: file_desc_list - type(file_desc_t), pointer :: file => null() - type(file_desc_list), pointer :: next => null() - end type file_desc_list - - type(file_desc_list), target, save :: top_file - - type :: io_desc_list - integer :: index - type(io_desc_t), pointer :: iodesc => null() - type(io_desc_list), pointer :: next => null() - end type io_desc_list - - type(io_desc_list), target, save :: top_iodesc - - integer :: io_comm, iorank - -contains - - subroutine pio_msg_handler_init(io_comm_in, io_rank_in) - integer, intent(in) :: io_comm_in, io_rank_in - - io_comm = io_comm_in - iorank = io_rank_in - top_iodesc%index = 1 - - end subroutine pio_msg_handler_init - - - subroutine pio_msg_handler(numcomps, iosystem) -! use pio_types, only : -#ifdef TIMING - use perf_mod ! _EXTERNAL -#endif -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none - integer, intent(in) :: numcomps - type(iosystem_desc_t), target :: iosystem(numcomps) - type(iosystem_desc_t), pointer :: ios - integer :: msg = 0, ierr -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - integer :: status(MPI_STATUS_SIZE) - integer :: req(numcomps) - integer :: index - -#ifdef TIMING - call t_startf('pio_msg_mod') -#endif - if(iorank==0) then - req(:) = MPI_REQUEST_NULL - do index=1,numcomps - ios=>iosystem(index) - if(ios%io_comm .ne. mpi_comm_null) then - call mpi_irecv(msg, 1, mpi_integer, ios%comproot, 1, ios%union_comm, req(index), ierr) - end if - enddo - end if - do while(msg /= -1) - if(iorank==0) then - if(Debugasync) print *,__PIO_FILE__,__LINE__, ' waiting' - call mpi_waitany(numcomps, req, index, status, ierr) - if(Debugasync) print *,__PIO_FILE__,__LINE__, ' recieved on ', index - end if - - call mpi_bcast(index, 1, mpi_integer, 0, io_comm, ierr) - ios => iosystem(index) - - if(Debugasync) print *,__PIO_FILE__,__LINE__, index, ios%intercomm - call mpi_bcast(msg, 1, mpi_integer, 0, io_comm, ierr) - - if(debugasync) print *,__PIO_FILE__,__LINE__, msg ,' recieved on ', index - select case(msg) - case (PIO_MSG_CREATE_FILE) - call create_file_handler(ios) - case (PIO_MSG_OPEN_FILE) - call open_file_handler(ios) - case (PIO_MSG_INITDECOMP_DOF) - call initdecomp_dof_handler(ios) - case (PIO_MSG_WRITEDARRAY) - call writedarray_handler(ios) - case (PIO_MSG_READDARRAY) - call readdarray_handler(ios) - case (PIO_MSG_SETERRORHANDLING) - call seterrorhandling_handler(ios) - case (PIO_MSG_GETVAR1) - call var1_handler(ios, msg) - case (PIO_MSG_GETVAR_0d) - call var_0d_handler(ios, msg) - case (PIO_MSG_GETVAR_1d) - call var_1d_handler(ios, msg) - case (PIO_MSG_GETVAR_2d) - call var_2d_handler(ios, msg) - case (PIO_MSG_GETVAR_3d) - call var_3d_handler(ios, msg) - case (PIO_MSG_GETVAR_4d) - call var_4d_handler(ios, msg) - case (PIO_MSG_GETVAR_5d) - call var_5d_handler(ios, msg) - case (PIO_MSG_GETVARA_1d) - call vara_1d_handler(ios, msg) - case (PIO_MSG_GETVARA_2d) - call vara_2d_handler(ios, msg) - case (PIO_MSG_GETVARA_3d) - call vara_3d_handler(ios, msg) - case (PIO_MSG_GETVARA_4d) - call vara_4d_handler(ios, msg) - case (PIO_MSG_GETVARA_5d) - call vara_5d_handler(ios, msg) - - case (PIO_MSG_PUTVAR1) - call var1_handler(ios, msg) - case (PIO_MSG_PUTVAR_0d) - call var_0d_handler(ios, msg) - case (PIO_MSG_PUTVAR_1d) - call var_1d_handler(ios, msg) - case (PIO_MSG_PUTVAR_2d) - call var_2d_handler(ios, msg) - case (PIO_MSG_PUTVAR_3d) - call var_3d_handler(ios, msg) - case (PIO_MSG_PUTVAR_4d) - call var_4d_handler(ios, msg) - case (PIO_MSG_PUTVAR_5d) - call var_5d_handler(ios, msg) - - case (PIO_MSG_PUTVARA_1d) - call vara_1d_handler(ios, msg) - case (PIO_MSG_PUTVARA_2d) - call vara_2d_handler(ios, msg) - case (PIO_MSG_PUTVARA_3d) - call vara_3d_handler(ios, msg) - case (PIO_MSG_PUTVARA_4d) - call vara_4d_handler(ios, msg) - case (PIO_MSG_PUTVARA_5d) - call vara_5d_handler(ios, msg) - case (PIO_MSG_GETATT) - call att_handler(ios, msg) - case (PIO_MSG_GETATT_1D) - call att_1d_handler(ios, msg) - case (PIO_MSG_PUTATT) - call att_handler(ios, msg) - case (PIO_MSG_PUTATT_1D) - call att_1d_handler(ios, msg) - case (PIO_MSG_FREEDECOMP) - call freedecomp_handler(ios) - case (PIO_MSG_EXIT) - print *,'PIO Exiting' -! call mpi_barrier(ios%io_comm,ierr) - call finalize_handler(ios) - exit - case default - call pio_callback_handler(ios,msg) - end select - if(iorank==0) then - call mpi_irecv(msg, 1, mpi_integer, ios%comproot, 1, ios%union_comm, req(index), ierr) - end if - end do - -#ifdef TIMING - call t_stopf('pio_msg_mod') - call t_finalizef() -#endif - - - if(Debugasync) print *,__PIO_FILE__,__LINE__ - call mpi_finalize(ierr) - stop - - end subroutine pio_msg_handler - - - subroutine add_to_file_list(file) - type(file_desc_t), pointer :: file - type(file_desc_list), pointer :: list_item - - list_item=> top_file - - if(associated(list_item%file)) then - do while(associated(list_item%file) .and. associated(list_item%next)) - if(Debugasync) print *,__PIO_FILE__,__LINE__,list_item%file%fh - list_item => list_item%next - end do - if(associated(list_item%file)) then - allocate(list_item%next) - list_item=>list_item%next - nullify(list_item%next) - end if - end if - if(Debugasync) print *,__PIO_FILE__,__LINE__,file%fh - list_item%file => file - - end subroutine add_to_file_list - - - subroutine add_to_iodesc_list(iodesc) - type(io_desc_t), pointer :: iodesc - type(io_desc_list), pointer :: list_item - integer :: index - - - list_item=> top_iodesc - - index=top_iodesc%index - - if(associated(list_item%iodesc)) then - do while(associated(list_item%iodesc) .and. associated(list_item%next)) - list_item => list_item%next - index = index+1 - end do - if(associated(list_item%iodesc)) then -! id = max(id+1, list_item%iodesc%async_id+1) - allocate(list_item%next) - list_item=>list_item%next - index = index+1 - nullify(list_item%next) - end if - - - - if(debugasync) print *,__PIO_FILE__,__LINE__,index - end if - iodesc%async_id=index - list_item%index=index - list_item%iodesc => iodesc - - - if(debugasync) print *,__PIO_FILE__,__LINE__,index,list_item%iodesc%async_id - - end subroutine add_to_iodesc_list - - - function delete_from_iodesc_list(id) result(iodesc) - integer, intent(in) :: id - type(io_desc_list), pointer :: list_item, previtem, nextitem - type(io_desc_t), pointer :: iodesc - - list_item=> top_iodesc - nullify(previtem) - do while(associated(list_item%iodesc) ) - if(abs(list_item%iodesc%async_id) == id) then - iodesc=>list_item%iodesc - - iodesc%async_id=-1 - nullify(list_item%iodesc) - if(associated(previtem)) then - if(associated(list_item%next)) then - previtem%next => list_item%next - else - nullify(previtem%next) - end if - deallocate(list_item) - else if(associated(list_item%next)) then - nextitem => list_item%next - list_item%iodesc=>nextitem%iodesc - list_item%index = nextitem%index - if(associated(nextitem%next)) then - list_item%next => nextitem%next - else - nullify(list_item%next) - end if - deallocate(nextitem) - - end if - - exit - end if - if(associated(list_item%next)) then - previtem=>list_item - list_item=>list_item%next - else - if(debugasync) then - list_item=> top_iodesc - do while(associated(list_item)) - print *,__PIO_FILE__,__LINE__,id,list_item%index,list_item%iodesc%async_id - list_item=>list_item%next - enddo - endif - - call piodie(__PIO_FILE__,__LINE__,'delete_from_iodesc_list',id) - end if - end do - - end function delete_from_iodesc_list - - subroutine delete_from_file_list(fh) - integer, intent(in) :: fh - type(file_desc_list), pointer :: list_item, previtem - integer :: fh1 - - fh1 = abs(fh) - - list_item=> top_file - nullify(previtem) - do while(associated(list_item%file) ) - if(abs(list_item%file%fh) == fh1) then - nullify(list_item%file) - if(associated(previtem)) then - if(associated(list_item%next)) then - previtem%next=>list_item%next - else - nullify(previtem%next) - end if - deallocate(list_item) - end if - exit - end if - if(associated(list_item%next)) then - previtem=>list_item - list_item=>list_item%next - else - call piodie(__PIO_FILE__,__LINE__,'delete_from_file_list') - end if - end do - - end subroutine delete_from_file_list - - - - function lookupfile(fh) result(file) - type(file_desc_t), pointer :: file - integer, intent(in) :: fh - type(file_desc_list), pointer :: list_item - - integer :: fh1 - - fh1 = abs(fh) - - list_item=> top_file - - do while(associated(list_item%file) ) - if(abs(list_item%file%fh) == fh1) then - file => list_item%file - exit - end if - list_item=>list_item%next - end do - - - end function lookupfile - - function lookupiodesc(async_id) result(iodesc) - type(io_desc_t), pointer :: iodesc - integer, intent(in) :: async_id - type(io_desc_list), pointer :: list_item - - - list_item=> top_iodesc - nullify(iodesc) - do while(associated(list_item%iodesc) ) - - if(debugasync) print *,__PIO_FILE__,__LINE__,list_item%index,async_id,list_item%iodesc%async_id - if(abs(list_item%iodesc%async_id) == async_id) then - iodesc => list_item%iodesc - if(debugasync) print *,__PIO_FILE__,__LINE__,async_id,list_item%index,iodesc%write%n_elemtype - exit - end if - list_item=>list_item%next - end do - if(.not.associated(iodesc)) then - call piodie(__PIO_FILE__,__LINE__) - end if - - end function lookupiodesc - -end module pio_msg_mod - diff --git a/src/externals/pio1/pio/pio_nf_utils.F90 b/src/externals/pio1/pio/pio_nf_utils.F90 deleted file mode 100644 index e6241821f87..00000000000 --- a/src/externals/pio1/pio/pio_nf_utils.F90 +++ /dev/null @@ -1,118 +0,0 @@ -module pio_nf_utils - use pio_types, only : file_desc_t, var_desc_t - use nf_mod, only : pio_inq_vartype - use pionfget_mod, only : pio_get_var=>get_var - use pionfput_mod, only : pio_put_var=>put_var - use pio_types, only : pio_int, pio_real, pio_double, pio_char - use pio_kinds, only : i4, r4, r8 - use pio_support, only : piodie - - implicit none - private -!> -!! @private -!< - public :: copy_pio_var - interface copy_pio_var - module procedure copy_pio_var01d - module procedure copy_pio_var2d - end interface - -contains - -subroutine copy_pio_var01d(ifh, ofh, ivid, ovid, length, strlength) - type(File_Desc_t) :: Ifh, Ofh - type(Var_Desc_t) :: ivid, ovid - integer, intent(in) :: length - integer, intent(in), optional :: strlength - integer(i4), allocatable :: ival(:) - real(r4), allocatable :: rval(:) - real(r8), allocatable :: dval(:) - character(len=length), allocatable :: cval(:) - - integer :: ierr - -! ierr = pio_inq_vartype(ifh, ivid, itype) -! ierr = pio_inq_vartype(ofh, ovid, otype) - - - if( ivid%type.ne.ovid%type) then - write(6,*) 'WARNING: copy_pio_var coercing type ', ivid%type, ' to ',ovid%type - end if - select case(ivid%type) - case (PIO_int) - allocate(ival(length)) - ierr = pio_get_var(ifh, ivid%varid, ival) - ierr = pio_put_var(ofh, ovid%varid, ival) - deallocate(ival) - case (PIO_real) - allocate(rval(length)) - ierr = pio_get_var(ifh, ivid%varid, rval) - ierr = pio_put_var(ofh, ovid%varid, rval) - deallocate(rval) - case (PIO_double) - allocate(dval(length)) - ierr = pio_get_var(ifh, ivid%varid, dval) - ierr = pio_put_var(ofh, ovid%varid, dval) - deallocate(dval) - case (PIO_char) - if(present(strlength)) then - allocate(cval(strlength)) - else - allocate(cval(1)) - end if - ierr = pio_get_var(ifh, ivid%varid, cval) - ierr = pio_put_var(ofh, ovid%varid, cval) - - deallocate(cval) - end select -end subroutine copy_pio_var01d - -subroutine copy_pio_var2d(ifh, ofh, ivid, ovid, length) - type(File_Desc_t) :: Ifh, Ofh - type(Var_Desc_t) :: ivid, ovid - integer, intent(in) :: length(:) -! integer, intent(in), optional :: strlength - integer(i4), allocatable :: ival(:,:) - real(r4), allocatable :: rval(:,:) - real(r8), allocatable :: dval(:,:) -! character(len=length), allocatable :: cval(:,:) - - integer :: ierr - -! ierr = pio_inq_vartype(ifh, ivid, itype) -! ierr = pio_inq_vartype(ofh, ovid, otype) - - - if( ivid%type.ne.ovid%type) then - write(6,*) 'WARNING: copy_pio_var coercing type ', ivid%type, ' to ',ovid%type - end if - select case(ivid%type) - case (PIO_int) - allocate(ival(length(1),length(2))) - ierr = pio_get_var(ifh, ivid%varid, ival) - ierr = pio_put_var(ofh, ovid%varid, ival) - deallocate(ival) - case (PIO_real) - allocate(rval(length(1),length(2))) - ierr = pio_get_var(ifh, ivid%varid, rval) - ierr = pio_put_var(ofh, ovid%varid, rval) - deallocate(rval) - case (PIO_double) - allocate(dval(length(1),length(2))) - ierr = pio_get_var(ifh, ivid%varid, dval) - ierr = pio_put_var(ofh, ovid%varid, dval) - deallocate(dval) - case (PIO_char) - ! if(present(strlength)) then - ! allocate(cval(strlength)) - ! else - ! allocate(cval(1)) - ! end if - ! ierr = pio_get_var(ifh, ivid%varid, cval) - ! ierr = pio_put_var(ofh, ovid%varid, cval) - ! deallocate(cval) - end select -end subroutine copy_pio_var2d - -end module pio_nf_utils diff --git a/src/externals/pio1/pio/pio_spmd_utils.F90.in b/src/externals/pio1/pio/pio_spmd_utils.F90.in deleted file mode 100644 index 1819ace570c..00000000000 --- a/src/externals/pio1/pio/pio_spmd_utils.F90.in +++ /dev/null @@ -1,619 +0,0 @@ -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief SPMD helper routines -!< -#define __PIO_FILE__ "pio_spmd_utils.F90" -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! Module pio_spmd_utils -! -! Point-to-point implementations of -! MPI collectives, for improved performance -! and/or robustness on certain platforms -! -! -! 20090508 Initial version (based on spmd_utils in CAM) - P. Worley -! -! Code added as a work around for poor rsend performance on cray systems with -! Gemini interconnect -! -#ifdef _NO_MPI_RSEND -#define MPI_RSEND MPI_SEND -#define mpi_rsend mpi_send -#define MPI_IRSEND MPI_ISEND -#define mpi_irsend mpi_isend -#endif - -module pio_spmd_utils - - use pio_kinds - use pio_support, only: CheckMPIReturn -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none - - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - - public :: pio_swapm - - interface pio_swapm - ! TYPE int,real,double,long - module procedure pio_swapm_{TYPE} - end interface - - - character(len=*), parameter :: modName='pio_spmd_utils' - -contains -!======================================================================== -! - - integer function pair(np,p,k) - - integer np,p,k,q - q = ieor(p,k) - if(q.gt.np-1) then - pair = -1 - else - pair = q - endif - return - - end function pair - -! -!======================================================================== -! - - integer function ceil2(n) - integer n,p - p=1 - do while(p.lt.n) - p=p*2 - enddo - ceil2=p - return - end function ceil2 - -! -!======================================================================== -! -! TYPE int,real,double,long - subroutine pio_swapm_{TYPE} ( nprocs, mytask, & - sndbuf, sbuf_siz, sndlths, sdispls, stypes, & - rcvbuf, rbuf_siz, rcvlths, rdispls, rtypes, & - comm, comm_hs, comm_isend, comm_maxreq ) - -!----------------------------------------------------------------------- -! -!> Purpose: -!! Reduced version of original swapm (for swap of multiple messages -!! using MPI point-to-point routines), more efficiently implementing a -!! subset of the swap protocols. -!! -!! Method: -!! comm_protocol: -!! comm_isend == .true.: use nonblocking send, else use blocking send -!! comm_hs == .true.: use handshaking protocol -!! comm_maxreq: -!! =-1,0: do not limit number of outstanding send/receive requests -!! >0: do not allow more than min(comm_maxreq, steps) outstanding -!! nonblocking send requests or nonblocking receive requests -!! -!! Author of original version: P. Worley -!! Ported from CAM: P. Worley, May 2009 -!< -!----------------------------------------------------------------------- -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf ! _EXTERNAL -#endif -!----------------------------------------------------------------------- - implicit none -!---------------------------Input arguments-------------------------- -! - integer, intent(in) :: nprocs ! size of communicator - integer, intent(in) :: mytask ! MPI task id with communicator - integer, intent(in) :: sbuf_siz ! size of send buffer - integer, intent(in) :: rbuf_siz ! size of receive buffer - - integer, intent(in) :: sndlths(0:nprocs-1)! length of outgoing message - integer, intent(in) :: sdispls(0:nprocs-1)! offset from beginning of send - ! buffer where outgoing messages - ! should be sent from - integer, intent(in) :: stypes(0:nprocs-1) ! MPI data types - integer, intent(in) :: rcvlths(0:nprocs-1)! length of incoming messages - integer, intent(in) :: rdispls(0:nprocs-1)! offset from beginning of receive - ! buffer where incoming messages - ! should be placed - integer, intent(in) :: rtypes(0:nprocs-1) ! MPI data types - {VTYPE}, intent(in) :: sndbuf(sbuf_siz) ! outgoing message buffer - - integer, intent(in) :: comm ! MPI communicator - logical, intent(in) :: comm_hs ! handshaking protocol? - logical, intent(in) :: comm_isend ! nonblocking send protocol? - integer, intent(in) :: comm_maxreq ! maximum number of outstanding - ! nonblocking requests - -!---------------------------Output arguments-------------------------- -! - {VTYPE}, intent(out) :: rcvbuf(rbuf_siz) ! incoming message buffer - -#ifndef _MPISERIAL -! -!---------------------------Local workspace------------------------------------------- -! - character(len=*), parameter :: subName=modName//'::pio_swapm_{TYPE}' - - integer :: steps ! number of swaps to initiate - integer :: swapids(nprocs) ! MPI process id of swap partners - integer :: p ! process index - integer :: istep ! loop index - integer :: tag ! MPI message tag - integer :: offset_t ! MPI message tag offset, for addressing - ! message conflict bug (if necessary) - integer :: offset_s ! index of message beginning in - ! send buffer - integer :: offset_r ! index of message beginning in - ! receive buffer - integer :: sndids(nprocs) ! send request ids - integer :: rcvids(nprocs) ! receive request ids - integer :: hs_rcvids(nprocs) ! handshake receive request ids - - integer :: maxreq, maxreqh ! maximum number of outstanding - ! nonblocking requests (and half) - integer :: hs ! handshake variable - integer :: rstep ! "receive" step index - - logical :: handshake, sendd ! protocol option flags - - integer :: ier ! return error status - integer :: status(MPI_STATUS_SIZE) ! MPI status -! -!------------------------------------------------------------------------------------- -! -#ifdef _NO_PIO_SWAPM_TAG_OFFSET - offset_t = 0 -#else - offset_t = nprocs -#endif -! - ! if necessary, send to self - if (sndlths(mytask) > 0) then - tag = mytask + offset_t - - offset_r = rdispls(mytask)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(mytask), rtypes(mytask), & - mytask, tag, comm, rcvids(1), ier ) - call CheckMPIReturn(subName,ier) - - offset_s = sdispls(mytask)+1 - call mpi_send( sndbuf(offset_s), sndlths(mytask), stypes(mytask), & - mytask, tag, comm, ier ) - call CheckMPIReturn(subName,ier) - - call mpi_wait( rcvids(1), status, ier ) - call CheckMPIReturn(subName,ier) - endif - - ! calculate swap partners and communication ordering - steps = 0 - do istep=1,ceil2(nprocs)-1 - p = pair(nprocs,istep,mytask) - if (p >= 0) then - if (sndlths(p) > 0 .or. rcvlths(p) > 0) then - steps = steps + 1 - swapids(steps) = p - end if - end if - end do - - if (steps .eq. 0) return - - ! identify communication protocol - if (comm_isend) then - sendd = .false. - else - sendd = .true. - endif - handshake = comm_hs - - ! identify maximum number of outstanding nonblocking requests to permit - if (steps .eq. 1) then - maxreq = 1 - maxreqh = 1 - else - if (comm_maxreq >= -1) then - maxreq = comm_maxreq - else - maxreq = steps - endif - - if ((maxreq .le. steps) .and. (maxreq > 0)) then - if (maxreq > 1) then - maxreqh = maxreq/2 - else - maxreq = 2 - maxreqh = 1 - endif - else - maxreq = steps - maxreqh = steps - endif - endif - -! Four protocol options: -! (1) handshaking + blocking sends - if ((handshake) .and. (sendd)) then - - ! Initialize hs variable - hs = 1 - - ! Post initial handshake receive requests - do istep=1,maxreq - p = swapids(istep) - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MPI_INTEGER, p, tag, comm, & - hs_rcvids(istep), ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - - ! Post initial receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - call CheckMPIReturn(subName,ier) - - call mpi_send ( hs, 1, MPI_INTEGER, p, tag, comm, ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new rsend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_wait ( hs_rcvids(istep), status, ier ) - call CheckMPIReturn(subName,ier) - - call mpi_rsend ( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, ier ) - call CheckMPIReturn(subName,ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - call CheckMPIReturn(subName,ier) - endif - - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - - ! Submit a new handshake irecv request - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MPI_INTEGER, p, tag, comm, & - hs_rcvids(rstep), ier ) - call CheckMPIReturn(subName,ier) - endif - - ! Submit a new irecv request - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - call CheckMPIReturn(subName,ier) - - call mpi_send ( hs, 1, MPI_INTEGER, p, tag, comm, ier ) - call CheckMPIReturn(subName,ier) - endif - endif - - endif -! - enddo - - ! wait for rest of receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - -! (2) handshaking + nonblocking sends - elseif ((handshake) .and. (.not. sendd)) then - - ! Initialize hs variable - hs = 1 - - ! Post initial handshake receive requests - do istep=1,maxreq - p = swapids(istep) - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MPI_INTEGER, p, tag, comm, & - hs_rcvids(istep), ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - - ! Post initial receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - call CheckMPIReturn(subName,ier) - - call mpi_send ( hs, 1, MPI_INTEGER, p, tag, comm, ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new irsend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_wait ( hs_rcvids(istep), status, ier ) - call CheckMPIReturn(subName,ier) - - call mpi_irsend( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, sndids(istep), ier ) - call CheckMPIReturn(subName,ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - call CheckMPIReturn(subName,ier) - endif - - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - - ! Submit a new handshake irecv request - if (sndlths(p) > 0) then - tag = mytask + offset_t - call mpi_irecv( hs, 1, MPI_INTEGER, p, tag, comm, & - hs_rcvids(rstep), ier ) - call CheckMPIReturn(subName,ier) - endif - - ! Submit a new irecv request - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - call CheckMPIReturn(subName,ier) - - call mpi_send ( hs, 1, MPI_INTEGER, p, tag, comm, ier ) - call CheckMPIReturn(subName,ier) - endif - endif - - ! Wait for outstanding i(r)send request to complete - p = swapids(istep-maxreqh) - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep-maxreqh), status, ier ) - call CheckMPIReturn(subName,ier) - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - call CheckMPIReturn(subName,ier) - endif - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep), status, ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - -! (3) no handshaking + blocking sends - elseif ((.not. handshake) .and. (sendd)) then - - ! Post receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new send request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_send( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, ier ) - call CheckMPIReturn(subName,ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - call CheckMPIReturn(subName,ier) - endif - - ! Submit a new irecv request - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - call CheckMPIReturn(subName,ier) - endif - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - -! (4) no handshaking + nonblocking sends - elseif ((.not. handshake) .and. (.not. sendd)) then - - ! Post receive requests - do istep=1,maxreq - p = swapids(istep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(istep), ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - rstep = maxreq - - ! Send (and start receiving) data - do istep=1,steps - p = swapids(istep) - - ! Submit new isend request - if (sndlths(p) > 0) then - tag = mytask + offset_t - - offset_s = sdispls(p)+1 - call mpi_isend( sndbuf(offset_s), sndlths(p), stypes(p), & - p, tag, comm, sndids(istep), ier ) - call CheckMPIReturn(subName,ier) - endif - - if (istep > maxreqh) then - - ! Wait for oldest irecv request to complete - p = swapids(istep-maxreqh) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep-maxreqh), status, ier ) - call CheckMPIReturn(subName,ier) - endif - - ! Submit a new irecv request - if (rstep < steps) then - rstep = rstep + 1 - p = swapids(rstep) - if (rcvlths(p) > 0) then - tag = p + offset_t - - offset_r = rdispls(p)+1 - call mpi_irecv( rcvbuf(offset_r), rcvlths(p), rtypes(p), & - p, tag, comm, rcvids(rstep), ier ) - call CheckMPIReturn(subName,ier) - endif - endif - - ! Wait for outstanding i(r)send request to complete - p = swapids(istep-maxreqh) - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep-maxreqh), status, ier ) - call CheckMPIReturn(subName,ier) - endif - - endif - - enddo - - ! wait for rest of send and receive requests to complete - do istep=steps-maxreqh+1,steps - p = swapids(istep) - if (rcvlths(p) > 0) then - call mpi_wait( rcvids(istep), status, ier ) - call CheckMPIReturn(subName,ier) - endif - if (sndlths(p) > 0) then - call mpi_wait( sndids(istep), status, ier ) - call CheckMPIReturn(subName,ier) - endif - enddo - - endif - -#endif - - return - - end subroutine pio_swapm_{TYPE} - -! -!======================================================================== -! - - - -end module pio_spmd_utils diff --git a/src/externals/pio1/pio/pio_support.F90 b/src/externals/pio1/pio/pio_support.F90 deleted file mode 100644 index d18cb9a1cb4..00000000000 --- a/src/externals/pio1/pio/pio_support.F90 +++ /dev/null @@ -1,536 +0,0 @@ -#define __PIO_FILE__ "pio_support.F90" -!> -!! @file pio_support.F90 -!! @brief internal code for compiler workarounds, aborts and debug functions -!! -!! $Revision$ -!! $LastChangedDate$ -!< -!> -!! \def _NO_MPI_RSEND -!! Code added as a work around for poor rsend performance on cray systems with -!! Gemini interconnect -!< -#ifdef BGP -#define BGx -#endif -#ifdef BGL -#define BGx -#endif -#ifdef BGQ -#define BGx -#endif -#ifdef _NO_MPI_RSEND -#define MPI_RSEND MPI_SEND -#define mpi_rsend mpi_send -#define MPI_IRSEND MPI_ISEND -#define mpi_irsend mpi_isend -#endif - -module pio_support - use pio_kinds -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - public :: piodie - public :: CheckMPIreturn - public :: pio_readdof - public :: pio_writedof - public :: pio_fc_gather_offset - - - - logical, public :: Debug=.FALSE. - logical, public :: DebugIO=.FALSE. - logical, public :: DebugAsync=.FALSE. - integer,private,parameter :: versno = 1001 - - character(len=*), parameter :: modName='pio_support' - -contains - - subroutine piodie (file,line, msg, ival1, msg2, ival2, msg3, ival3, mpirank) -#ifdef CPRINTEL - ! tracebackqq uses optional arguments, so *must* have an explicit - ! interface. - use ifcore, only: tracebackqq -#endif - !----------------------------------------------------------------------- - ! Purpose: - ! - ! Abort the model for abnormal termination - ! - ! Author: Jim Edwards - ! - ! Change History - ! 20070608 R. Loy added optional args - !----------------------------------------------------------------------- - ! $Id$ - !----------------------------------------------------------------------- - !----------------------------------------------------------------------- - implicit none - !----------------------------------------------------------------------- - ! - ! Arguments - ! - character(len=*), intent(in) :: file - integer,intent(in) :: line - character(len=*), intent(in), optional :: msg,msg2,msg3 - integer,intent(in),optional :: ival1,ival2,ival3, mpirank - - character(len=*), parameter :: subName=modName//'::pio_die' - integer :: ierr, myrank=-1 - - if(present(mpirank)) myrank=mpirank - - if (present(ival3)) then - write(6,*) subName,':: myrank=',myrank,': ERROR: ',file,':',line,': ', & - msg,ival1,msg2,ival2,msg3,ival3 - else if (present(msg3)) then - write(6,*) subName,':: myrank=',myrank,': ERROR: ',file,':',line,': ', & - msg,ival1,msg2,ival2,msg3 - else if (present(ival2)) then - write(6,*) subName,':: myrank=',myrank,': ERROR: ',file,':',line,': ',msg,ival1,msg2,ival2 - else if (present(msg2)) then - write(6,*) subName,':: myrank=',myrank,': ERROR: ',file,':',line,': ',msg,ival1,msg2 - else if (present(ival1)) then - write(6,*) subName,':: myrank=',myrank,': ERROR: ',file,':',line,': ',msg,ival1 - else if (present(msg)) then - write(6,*) subName,':: myrank=',myrank,': ERROR: ',file,':',line,': ',msg - else - write(6,*) subName,':: myrank=',myrank,': ERROR: ',file,':',line,': (no message)' - endif - - -#if defined(CPRXLF) && !defined(BGx) - close(5) ! needed to prevent batch jobs from hanging in xl__trbk - call xl__trbk() -#elif defined(CPRINTEL) - - - ! An exit code of -1 is a special value that prevents this subroutine - ! from aborting the run. - call tracebackqq(user_exit_code=-1) - -#endif - - ! passing an argument of 1 to mpi_abort will lead to a STOPALL output - ! error code of 257 - call mpi_abort (MPI_COMM_WORLD, 1, ierr) - -#ifdef CPRNAG - stop -#else - call abort -#endif - - - end subroutine piodie - -!============================================= -! CheckMPIreturn: -! -! Check and prints an error message -! if an error occured in a MPI subroutine. -!============================================= - subroutine CheckMPIreturn(locmesg, errcode, file, line) - - character(len=*), intent(in) :: locmesg - integer(i4), intent(in) :: errcode - character(len=*),optional :: file - integer, intent(in),optional :: line - character(len=MPI_MAX_ERROR_STRING) :: errorstring - - integer(i4) :: errorlen - - integer(i4) :: ierr - if (errcode .ne. MPI_SUCCESS) then - call MPI_Error_String(errcode,errorstring,errorlen,ierr) - write(*,*) TRIM(ADJUSTL(locmesg))//errorstring(1:errorlen) - if(present(file).and.present(line)) then - call piodie(file,line) - endif - end if - end subroutine CheckMPIreturn - - subroutine pio_writedof (file, DOF, comm, punit) - !----------------------------------------------------------------------- - ! Purpose: - ! - ! Write a DOF to standard format - ! - ! Author: T Craig - ! - !----------------------------------------------------------------------- - !----------------------------------------------------------------------- - implicit none - !----------------------------------------------------------------------- - ! - ! Arguments - ! - character(len=*),intent(in) :: file - integer(kind=pio_offset) ,intent(in) :: dof(:) - integer ,intent(in) :: comm - integer,optional,intent(in) :: punit - - character(len=*), parameter :: subName=modName//'::pio_writedof' - integer ierr, myrank, npes, m, n, unit - integer(kind=pio_offset), pointer :: wdof(:) - integer(kind=pio_offset), pointer :: sdof1d(:) - integer(kind=pio_offset) :: sdof, sdof_tmp(1) - integer :: status(MPI_STATUS_SIZE) - integer, parameter :: masterproc = 0 - - integer :: pio_offset_kind ! kind of pio_offset -#ifndef _NO_FLOW_CONTROL - integer :: & - rcv_request ,&! request id - hs = 1 ! MPI handshaking variable -#endif - - unit = 81 - if (present(punit)) then - unit = punit - endif - - call MPI_COMM_SIZE(comm,npes,ierr) - call CheckMPIReturn(subName,ierr) - call MPI_COMM_RANK(comm,myrank,ierr) - call CheckMPIReturn(subName,ierr) - sdof = size(dof) - - allocate(sdof1d(0:npes-1)) - sdof1d = -1 - sdof_tmp(1) = sdof - - if(kind(sdof_tmp) == kind(comm)) then - pio_offset_kind = MPI_INTEGER - else - pio_offset_kind = MPI_INTEGER8 - end if - - - - call pio_fc_gather_offset(sdof_tmp, 1, PIO_OFFSET_KIND, & - sdof1d, 1, PIO_OFFSET_KIND,masterproc,comm) - - if (myrank == masterproc) then - write(6,*) subName,': writing file ',trim(file),' unit=',unit - open(unit,file=file) - write(unit,*) versno,npes - endif - - do n = 0,npes-1 - if (myrank == masterproc) then - allocate(wdof(sdof1d(n))) - endif - if (myrank == masterproc .and. n == masterproc) then - wdof = dof - else - if (myrank == n .and. sdof > 0) then -#ifndef _NO_FLOW_CONTROL - call MPI_RECV(hs,1,MPI_INTEGER,masterproc,n,comm,status,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_writedof mpi_recv') -#endif - call MPI_SEND(dof,int(sdof),PIO_OFFSET_KIND,masterproc,n,comm,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_writedof mpi_send') - endif - if (myrank == masterproc .and. sdof1d(n) > 0) then -#ifndef _NO_FLOW_CONTROL - call MPI_IRECV(wdof,int(sdof1d(n)),PIO_OFFSET_KIND,n,n,comm,rcv_request,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_writedof mpi_irecv') - call MPI_SEND(hs,1,MPI_INTEGER,n,n,comm,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_writedof mpi_send') - call MPI_WAIT(rcv_request,status,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_writedof mpi_wait') -#else - call MPI_RECV(wdof,int(sdof1d(n)),PIO_OFFSET_KIND,n,n,comm,status,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_writedof mpi_recv') -#endif - endif - endif - if (myrank == masterproc) then - write(unit,*) n,sdof1d(n) - do m = 1,sdof1d(n) - write(unit,*) wdof(m) - enddo - deallocate(wdof) - endif - enddo - - if (myrank == masterproc) then - close(unit) - endif - - deallocate(sdof1d) - - end subroutine pio_writedof - - subroutine pio_readdof (file, DOF, comm, punit) - !----------------------------------------------------------------------- - ! Purpose: - ! - ! Read a DOF to standard format - ! - ! Author: T Craig - ! - ! Change History - ! - !----------------------------------------------------------------------- - ! $Id$ - !----------------------------------------------------------------------- - !----------------------------------------------------------------------- - implicit none - !----------------------------------------------------------------------- - ! - ! Arguments - ! - character(len=*),intent(in) :: file - integer(kind=pio_offset),pointer:: dof(:) - integer ,intent(in) :: comm - integer,optional,intent(in) :: punit - - character(len=*), parameter :: subName=modName//'::pio_readdof' - integer :: ierr, myrank, npes, m, n, unit, rn - integer(kind=pio_offset) :: sdof - integer :: rversno, rnpes - integer(kind=pio_offset), pointer :: wdof(:) - integer, parameter :: masterproc = 0 - integer :: status(MPI_STATUS_SIZE) - integer :: pio_offset_kind ! kind of pio_offset - - unit = 81 - if (present(punit)) then - unit = punit - endif - - call MPI_COMM_SIZE(comm,npes,ierr) - call CheckMPIReturn(subName,ierr) - call MPI_COMM_RANK(comm,myrank,ierr) - call CheckMPIReturn(subName,ierr) - - if(kind(sdof) == kind(comm)) then - pio_offset_kind = MPI_INTEGER - else - pio_offset_kind = MPI_INTEGER8 - end if - - - allocate(dof(0)) ! default for pes with no dof - - if (myrank == masterproc) then - write(6,*) subName,': reading file ',trim(file),' unit=',unit - open(unit,file=file,status='old') - read(unit,*) rversno,rnpes - write(6,*) subName,': reading file ',trim(file),' versno=',rversno - if (rnpes /= npes) then - call piodie(__PIO_FILE__,__LINE__,'pio_readdof npes incorrect') - endif - - do n = 0,npes-1 - read(unit,*) rn,sdof - if (rn /= n) then - call piodie(__PIO_FILE__,__LINE__,'pio_readdof rn out of sync') - endif - allocate(wdof(sdof)) - do m = 1,sdof - read(unit,*) wdof(m) - enddo - if (n == masterproc) then - deallocate(dof) - allocate(dof(sdof)) - dof = wdof - else - call MPI_SEND(sdof,1,PIO_OFFSET_KIND,n,n,comm,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_readdof mpi_send1') - if (sdof > 0) then - call MPI_SEND(wdof,int(sdof),PIO_OFFSET_KIND,n,npes+n,comm,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_readdof mpi_send2') - endif - endif - deallocate(wdof) - enddo - close(unit) - else - call MPI_RECV(sdof,1,PIO_OFFSET_KIND,masterproc,myrank,comm,status,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_readdof mpi_recv1') - if (sdof > 0) then - deallocate(dof) - allocate(dof(sdof)) - call MPI_RECV(dof,int(sdof),PIO_OFFSET_KIND,masterproc,npes+myrank,comm,status,ierr) - if (ierr /= MPI_SUCCESS) call piodie(__PIO_FILE__,__LINE__,' pio_readdof mpi_recv2') - endif - endif - - end subroutine pio_readdof - -! -!======================================================================== -! - - subroutine pio_fc_gather_offset ( sendbuf, sendcnt, sendtype, & - recvbuf, recvcnt, recvtype, & - root, comm, flow_cntl ) - -!----------------------------------------------------------------------- -! -!> Purpose: -!! Gather collective with additional flow control, so as to -!! be more robust when used with high process counts. -!! -!! Method: -!! If flow_cntl optional parameter -!! < 0: use MPI_Gather -!! >= 0: use point-to-point with handshaking messages and -!! preposting receive requests up to -!! max(min(1,flow_cntl),max_gather_block_size) -!! ahead if optional flow_cntl parameter is present. -!! Otherwise, fc_gather_flow_cntl is used in its place. -!! Default value is 64. -!! -!! Author of original version: P. Worley -!! Ported from CAM: P. Worley, Jan 2010 -!< -!----------------------------------------------------------------------- - -!----------------------------------------------------------------------- - implicit none - -!---------------------------Parameters --------------------------------- -! - integer, parameter :: max_gather_block_size = 64 - -!---------------------------Input arguments-------------------------- -! - integer(kind=pio_offset), intent(in) :: sendbuf(:) ! outgoing message buffer - integer, intent(in) :: sendcnt ! size of send buffer - integer, intent(in) :: sendtype ! MPI type of send buffer - integer, intent(in) :: recvcnt ! size of receive buffer - integer, intent(in) :: recvtype ! MPI type of receive buffer - integer, intent(in) :: root ! gather destination - integer, intent(in) :: comm ! MPI communicator - integer,optional, intent(in):: flow_cntl ! flow control variable - -!---------------------------Output arguments-------------------------- -! - integer(kind=pio_offset), intent(out) :: recvbuf(*) ! incoming message buffer -! -!---------------------------Local workspace--------------------------------- -! - character(len=*), parameter :: subName=modName//'::pio_fc_gather_int' - - logical :: fc_gather ! use explicit flow control? - integer :: hs ! handshake variable - integer :: gather_block_size ! number of preposted receive requests - - integer :: nprocs ! size of communicator - integer :: mytask ! MPI task id with communicator - integer :: mtag ! MPI message tag - integer :: p, i ! loop indices - integer :: displs ! offset into receive buffer - integer :: count, preposts, head, tail ! variables controlling recv-ahead logic - - integer :: rcvid(max_gather_block_size) ! receive request ids - - integer :: ier ! return error status - integer :: status(MPI_STATUS_SIZE) ! MPI status - -! -!------------------------------------------------------------------------------------- -! - if ( present(flow_cntl) ) then - if (flow_cntl >= 0) then - gather_block_size = min(max(1,flow_cntl),max_gather_block_size) - fc_gather = .true. - else - fc_gather = .false. - endif - else -#ifndef _NO_FLOW_CONTROL - gather_block_size = max(1,max_gather_block_size) - fc_gather = .true. -#else - fc_gather = .false. -#endif - endif - - if (fc_gather) then - - ! Determine task id and size of communicator - call mpi_comm_rank (comm, mytask, ier) - call mpi_comm_size (comm, nprocs, ier) - - ! Initialize tag and hs variable -#ifdef _NO_PIO_SWAPM_TAG_OFFSET - mtag = 0 -#else - mtag = 2*nprocs -#endif - hs = 1 - - if (root .eq. mytask) then - -! prepost gather_block_size irecvs, and start receiving data - preposts = min(nprocs-1, gather_block_size) - head = 0 - count = 0 - do p=0, nprocs-1 - if (p .ne. root) then - if (recvcnt > 0) then - count = count + 1 - if (count > preposts) then - tail = mod(head,preposts) + 1 - call mpi_wait (rcvid(tail), status, ier) - end if - head = mod(head,preposts) + 1 - displs = p*recvcnt - call mpi_irecv ( recvbuf(displs+1), recvcnt, & - recvtype, p, mtag, comm, rcvid(head), & - ier ) - call mpi_send ( hs, 1, MPI_INTEGER, p, mtag, comm, ier ) - end if - end if - end do - -! copy local data - displs = mytask*recvcnt - do i=1,sendcnt - recvbuf(displs+i) = sendbuf(i) - enddo - -! wait for final data - do i=1,min(count,preposts) - call mpi_wait (rcvid(i), status, ier) - enddo - - else - - if (sendcnt > 0) then - call mpi_recv ( hs, 1, MPI_INTEGER, root, mtag, comm, & - status, ier ) - call mpi_rsend ( sendbuf, sendcnt, sendtype, root, mtag, & - comm, ier ) - end if - - endif - call CheckMPIReturn(subName,ier) - - else - - call mpi_gather (sendbuf, sendcnt, sendtype, & - recvbuf, recvcnt, recvtype, & - root, comm, ier) - call CheckMPIReturn(subName,ier) - - endif - - return - - end subroutine pio_fc_gather_offset - -end module pio_support diff --git a/src/externals/pio1/pio/pio_types.F90 b/src/externals/pio1/pio/pio_types.F90 deleted file mode 100644 index a0227ca220e..00000000000 --- a/src/externals/pio1/pio/pio_types.F90 +++ /dev/null @@ -1,418 +0,0 @@ -#define __PIO_FILE__ "pio_types.F90" -!> -!! @file -!! @brief Derived datatypes and constants for PIO -!! -!! $Revision: 943 $ -!! $LastChangedDate: 2014-02-14 10:20:17 -0600 (Fri, 14 Feb 2014) $ -!< -module pio_types - use pio_kinds - -#ifdef _NETCDF - use netcdf ! _EXTERNAL -#endif -#ifndef NO_MPIMOD - use mpi, only : MPI_COMM_NULL, MPI_INFO_NULL ! _EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - !------------------------------------------- - ! data structure to describe decomposition - !------------------------------------------- - type, public :: DecompMap_t -#ifdef SEQUENCE - sequence -#endif - integer(i4) :: start - integer(i4) :: length - end type - -!> -!! @defgroup PIO_rearr_method PIO_rearr_method -!! @public -!! @brief The three choices to control rearrangement are: -!! @details -!! - PIO_rearr_none : Do not use any form of rearrangement -!! - PIO_rearr_box : Use a PIO internal box rearrangement -!> - integer(i4), public, parameter :: PIO_rearr_none = 0 - integer(i4), public, parameter :: PIO_rearr_box = 1 - -!> -!! @defgroup PIO_rearr_comm_t PIO_rearr_comm_t -!! @public -!! @brief The two choices for rearranger communication -!! @details -!! - PIO_rearr_comm_p2p : Point to point -!! - PIO_rearr_comm_coll : Collective -!> - enum, bind(c) - enumerator :: PIO_rearr_comm_p2p = 0 - enumerator :: PIO_rearr_comm_coll - end enum - -!> -!! @defgroup PIO_rearr_comm_dir PIO_rearr_comm_dir -!! @public -!! @brief The four choices for rearranger communication direction -!! @details -!! - PIO_rearr_comm_fc_2d_enable : COMM procs to IO procs and vice versa -!! - PIO_rearr_comm_fc_1d_comp2io: COMM procs to IO procs only -!! - PIO_rearr_comm_fc_1d_io2comp: IO procs to COMM procs only -!! - PIO_rearr_comm_fc_2d_disable: Disable flow control -!> - enum, bind(c) - enumerator :: PIO_rearr_comm_fc_2d_enable = 0 - enumerator :: PIO_rearr_comm_fc_1d_comp2io - enumerator :: PIO_rearr_comm_fc_1d_io2comp - enumerator :: PIO_rearr_comm_fc_2d_disable - end enum - -!> -!! @defgroup PIO_rearr_comm_fc_options PIO_rearr_comm_fc_options -!! @brief Type that defines the PIO rearranger options -!! @details -!! - enable_hs : Enable handshake (true/false) -!! - enable_isend : Enable Isends (true/false) -!! - max_pend_req : Maximum pending requests (To indicated unlimited -!! number of requests use PIO_REARR_COMM_UNLIMITED_PEND_REQ) -!> - type, public :: PIO_rearr_comm_fc_opt_t - logical :: enable_hs ! Enable handshake? - logical :: enable_isend ! Enable isends? - integer :: max_pend_req ! Maximum pending requests - end type PIO_rearr_comm_fc_opt_t - - integer, public, parameter :: PIO_REARR_COMM_UNLIMITED_PEND_REQ = -1 -!> -!! @defgroup PIO_rearr_options PIO_rearr_options -!! @brief Type that defines the PIO rearranger options -!! @details -!! - comm_type : @copydoc PIO_rearr_comm_t -!! - fcd : @copydoc PIO_rearr_comm_dir -!! - comm_fc_opts : @copydoc PIO_rearr_comm_fc_options -!> - type, public :: PIO_rearr_opt_t - integer :: comm_type - integer :: fcd ! Flow control direction - type(PIO_rearr_comm_fc_opt_t) :: comm_fc_opts_comp2io - type(PIO_rearr_comm_fc_opt_t) :: comm_fc_opts_io2comp - end type PIO_rearr_opt_t - - public :: PIO_rearr_comm_p2p, PIO_rearr_comm_coll,& - PIO_rearr_comm_fc_2d_enable, PIO_rearr_comm_fc_1d_comp2io,& - PIO_rearr_comm_fc_1d_io2comp, PIO_rearr_comm_fc_2d_disable - - !------------------------------------ - ! a file descriptor data structure - !------------------------------------ -!> -!! @public -!! @defgroup iosystem_desc_t -!! @brief A defined PIO system descriptor created by @ref PIO_init (see pio_types) -!< - type, public :: IOSystem_desc_t -#ifdef SEQUENCE - sequence -#endif - - integer(i4) :: union_comm=MPI_COMM_NULL ! The intracomm union of comp and io communicators (for async only) - integer(i4) :: IO_comm=MPI_COMM_NULL ! The IO communicator - integer(i4) :: comp_comm=MPI_COMM_NULL ! The Compute communicator - integer(i4) :: intercomm=MPI_COMM_NULL ! the intercomm (may be MPI_COMM_NULL) - - integer(i4) :: my_comm=MPI_COMM_NULL ! either comp_comm or intercomm - integer(i4) :: num_tasks ! number of tasks - integer(i4) :: num_iotasks ! total number of IO tasks - integer(i4) :: num_aiotasks ! number of actual IO tasks - integer(i4) :: num_comptasks - - integer(i4) :: union_rank - integer(i4) :: comp_rank ! the computational rank - integer(i4) :: io_rank ! the io rank if io_rank = -1 not an IO processor -! - integer(i4) :: Info=MPI_INFO_NULL ! MPI-IO info structure - integer(i4) :: numOST ! The number of Object Storage Target (OST) to use. This is a hardware raid device. - -! rank of the io and comp roots in the intercomm - integer(i4) :: IOMaster ! The intercom of the io_rank 0 - integer(i4) :: compMaster ! The intercom of the comp_rank 0 - -! rank of the io and comp roots in the union_comm - integer(i4) :: IOroot ! The union_rank of the io_rank 0 - integer(i4) :: comproot ! The union_rank of the comp_rank 0 - - logical(log_kind) :: IOproc ! .true. if an IO processor - logical(log_kind) :: UseRearranger ! .true. if data rearrangement is necessary - logical(log_kind) :: async_interface=.false. ! .true. if using the async interface model - integer(i4) :: rearr ! type of rearranger - ! e.g. rearr_{none,box} - !integer(i4), dimension(IOSYS_REARR_OPT_MAX) :: rearr_opts ! Rearranger options - see PIO_rearr_opt_t for details - type(PIO_rearr_opt_t) :: rearr_opts ! Rearranger options - integer(i4) :: error_handling ! how pio handles errors - integer(i4),pointer :: ioranks(:) => null() ! the computational ranks for the IO tasks - - ! This holds the IODESC - end type - - type iosystem_list_t - type(iosystem_desc_t), pointer :: this_iosystem => null() - end type iosystem_list_t - - - integer, parameter :: MAX_IO_SYSTEMS=6 - type(iosystem_list_t), save :: iosystems(MAX_IO_SYSTEMS) - -!> -!! @private -!! @struct io_data_list -!! @brief Linked list of buffers for pnetcdf non-blocking interface -!> - type, public :: io_data_list - integer :: request - real(r4), pointer :: data_real(:) => null() - integer(i4), pointer :: data_int(:) => null() - real(r8), pointer :: data_double(:) => null() - type(io_data_list), pointer :: next => null() - end type io_data_list - - -!> -!! @defgroup file_desc_t -!! File descriptor returned by \ref PIO_openfile or \ref PIO_createfile (see pio_types) -!! -!> - type, public :: File_desc_t - type(iosystem_desc_t), pointer :: iosystem => null() - type(io_data_list), pointer :: data_list_top => null() ! used for non-blocking pnetcdf calls - type(io_data_list), pointer :: data_list_end => null() ! used for non-blocking pnetcdf calls - integer :: buffsize=0 - integer(i4) :: fh - integer(kind=PIO_OFFSET) :: offset ! offset into file - integer(i4) :: iotype ! Type of IO to perform see parameter statement below - logical :: file_is_open = .false. - end type File_desc_t - - - !------------------------------------------------------ - ! data structure to describe a data movement operator - !------------------------------------------------------ - type, public :: IO_desc2_t -#ifdef SEQUENCE - sequence -#endif - integer(i4) :: fileTYPE ! MPI data types for file - integer(i4) :: elemTYPE - integer(i4) :: n_words - integer(kind=pio_offset) :: n_elemTYPE - end type IO_desc2_t - -!> -!! @private -!! @defgroup iodesc_generate io descriptors, generating -!! @brief The io descriptor structure in defined in this subroutine -!! and subsequently used in @ref PIO_read_darray, @ref PIO_write_darray, -!! @ref PIO_put_var, @ref PIO_get_var calls (see pio_types). -!< - -!> -!! @public -!! @struct io_desc_t -!! @brief An io descriptor handle that is generated in @ref PIO_initdecomp -!! (see pio_types) -!< - type, public :: io_desc_t -#ifdef SEQUENCE - sequence -#endif - type(IO_desc2_t) :: Read - type(IO_desc2_t) :: Write - integer(kind=PIO_Offset), pointer :: start(:) => NULL() - integer(kind=PIO_Offset), pointer :: count(:) => NULL() - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - ! fields for box-based rearranger - ! should put this in its own derived type later - - integer :: baseTYPE - - integer, pointer :: dest_ioproc(:)=> NULL() ! for each dof - integer(kind=pio_offset), pointer :: dest_ioindex(:)=> NULL() ! for each dof - - - ! Values needed only on io procs - integer,pointer :: rfrom(:)=> NULL() ! rfrom(nrecvs)= rank of ith sender - integer,pointer :: rtype(:)=> NULL() ! rtype(nrecvs)=mpi types for receives - - - ! needed on all procs - integer,pointer :: scount(:)=> NULL() ! scount(num_iotasks)= # sends to ith ioproc - integer,pointer :: stype(:)=> NULL() ! stype(num_iotasks)=mpi type for sends - - !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - integer(i4) :: async_id - - - type (DecompMap_t) :: IOmap ! IO decomposition map - type (DecompMap_t) :: COMPmap ! Computational decomposition map - integer :: nrecvs ! valid for io procs - integer(kind=PIO_OFFSET) :: glen ! global length of array in words - integer(i4) :: compsize ! size of expected comp buffer - integer(i4) :: maxiobuflen ! size of largest iobuffer - integer(i4) :: ndof - integer(i4) :: padding - end type - -!> -!! @public -!! @defgroup var_desc_t -!! @brief A variable descriptor returned from @ref PIO_def_var (see pio_types) -!< - type, public :: Var_desc_t -#ifdef SEQUENCE - sequence -#endif - integer(i4) :: varID - integer(i4) :: rec ! This is a record number or pointer into the unlim dimension of the - ! netcdf file - integer(i4) :: type - integer(i4) :: ndims ! number of dimensions as defined on the netcdf file. - character(len=50) :: name ! vdc needed variable - end type - -!> -!! @defgroup PIO_iotype PIO_iotype -!! @public -!! @brief An integer parameter which controls the iotype -!! @details -!! - PIO_iotype_pbinary : Use MPI-IO to read/write C like binary file -!! - PIO_iotype_direct_pbinary: Use MPI-IO to read/write direct access binary files -!! - PIO_iotype_binary : serial read/write of binary files using 'base_node' -!! - PIO_iotype_pnetcdf : parallel read/write of pNetCDF files (netcdf3) -!! - PIO_iotype_netcdf : serial read/write of NetCDF files using 'base_node' (netcdf3) -!! - PIO_iotype_netcdf4c : parallel read/serial write of NetCDF4 (HDF5) files with data compression -!! - PIO_iotype_netcdf4p : parallel read/write of NETCDF4 (HDF5) files -!> - integer(i4), public, parameter :: & - PIO_iotype_pbinary = 1, &! use MPI-IO with data types to read/write C like binary files - PIO_iotype_direct_pbinary = 2, & !use MPI-IO with data types to read/write direct access binary files - PIO_iotype_binary = 4, & ! serial read/write of binary files using 'base_node' - PIO_iotype_pnetcdf = 5, & ! parallel read/write of pNetCDF files - PIO_iotype_netcdf = 6, & ! serial read/write of NetCDF file using 'base_node' - PIO_iotype_netcdf4c = 7, & ! netcdf4 (hdf5 format) file opened for compression (serial write access only) - PIO_iotype_netcdf4p = 8, & ! netcdf4 (hdf5 format) file opened in parallel (all netcdf4 files for read will be opened this way) - PIO_iotype_vdc2 = 10 ! VDC2 format file opened for compressed parallel write - - -! These are for backward compatability and should not be used or expanded upon - integer(i4), public, parameter :: & - iotype_pbinary = PIO_iotype_pbinary, & - iotype_direct_pbinary = PIO_iotype_direct_pbinary, & - iotype_binary = PIO_iotype_binary, & - iotype_pnetcdf = PIO_iotype_pnetcdf, & - iotype_netcdf = PIO_iotype_netcdf - - - -!> -!! @public -!! @defgroup PIO_error_method error_methods -!! @details -!! The three types of error handling methods are: -!! - PIO_INTERNAL_ERROR : abort on error from any task -!! - PIO_BCAST_ERROR : broadcast an error from io_rank 0 to all tasks in comm -!! - PIO_RETURN_ERROR : do nothing - allow the user to handle it -!< - integer(i4), public, parameter :: PIO_INTERNAL_ERROR = -51 - integer(i4), public, parameter :: PIO_BCAST_ERROR = -52 - integer(i4), public, parameter :: PIO_RETURN_ERROR = -53 - -!> -!! @public -!! @defgroup error_return error return codes -!! @brief : The error return code; ierr != PIO_noerr indicates -!! an error. (see @ref PIO_seterrorhandling ) -!> - -!> -!! @struct use_PIO_kinds -!! @brief The type of variable(s) associated with this iodesc. -!! @copydoc PIO_kinds -!< - -!> -!! @public -!! @defgroup PIO_kinds PIO_kinds -!! @brief The base types supported by PIO are: -!! @details -!! - PIO_double : 8-byte reals or double precision -!! - PIO_real : 4-byte reals -!! - PIO_int : 4-byte integers -!! - PIO_char : character -!< -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -#include /* _EXTERNAL */ -#endif - integer, public, parameter :: PIO_global = nf_global - integer, public, parameter :: PIO_unlimited = nf_unlimited - integer, public, parameter :: PIO_double = nf_double - integer, public, parameter :: PIO_real = nf_real - integer, public, parameter :: PIO_int = nf_int - integer, public, parameter :: PIO_char = nf_char - integer, public, parameter :: PIO_noerr = nf_noerr - integer, public, parameter :: PIO_WRITE = nf_write - integer, public, parameter :: PIO_nowrite = nf_nowrite - integer, public, parameter :: PIO_CLOBBER = nf_clobber - integer, public, parameter :: PIO_NOCLOBBER = nf_NOclobber - integer, public, parameter :: PIO_NOFILL = nf_nofill - integer, public, parameter :: PIO_MAX_NAME = nf_max_name - integer, public, parameter :: PIO_MAX_VAR_DIMS = min(128,nf_max_var_dims) - integer, public, parameter :: PIO_64BIT_OFFSET = nf_64bit_offset - integer, public, parameter :: PIO_64BIT_DATA = nf_64bit_data - -#else -#ifdef _NETCDF - integer, public, parameter :: PIO_global = nf90_global - integer, public, parameter :: PIO_unlimited = nf90_unlimited - integer, public, parameter :: PIO_double = nf90_double - integer, public, parameter :: PIO_real = nf90_real - integer, public, parameter :: PIO_int = nf90_int - integer, public, parameter :: PIO_char = nf90_char - integer, public, parameter :: PIO_noerr = nf90_noerr - integer, public, parameter :: PIO_WRITE = nf90_write - integer, public, parameter :: PIO_nowrite = nf90_nowrite - integer, public, parameter :: PIO_CLOBBER = nf90_clobber - integer, public, parameter :: PIO_NOCLOBBER = nf90_NOclobber - integer, public, parameter :: PIO_NOFILL = nf90_nofill - integer, public, parameter :: PIO_MAX_NAME = nf90_max_name - integer, public, parameter :: PIO_MAX_VAR_DIMS = nf90_max_var_dims - integer, public, parameter :: PIO_64BIT_OFFSET = nf90_64bit_offset - integer, public, parameter :: PIO_64BIT_DATA = 0 -#else - integer, public, parameter :: PIO_global = 0 - integer, public, parameter :: PIO_double = 6 - integer, public, parameter :: PIO_real = 5 - integer, public, parameter :: PIO_int = 4 - integer, public, parameter :: PIO_char = 2 - integer, public, parameter :: PIO_noerr = 0 - integer, public, parameter :: PIO_MAX_NAME = 25 - integer, public, parameter :: PIO_MAX_VAR_DIMS = 6 - integer, public, parameter :: PIO_CLOBBER = 10 - integer, public, parameter :: PIO_NOCLOBBER = 11 - integer, public, parameter :: PIO_WRITE = 20 - integer, public, parameter :: PIO_NOWRITE = 21 - integer, public, parameter :: PIO_64BIT_OFFSET = 0 - integer, public, parameter :: PIO_64BIT_DATA = 0 -#endif -#endif - integer, public, parameter :: PIO_num_OST = 16 - -end module pio_types diff --git a/src/externals/pio1/pio/pio_utils.F90 b/src/externals/pio1/pio/pio_utils.F90 deleted file mode 100644 index 690f81dafbc..00000000000 --- a/src/externals/pio1/pio/pio_utils.F90 +++ /dev/null @@ -1,117 +0,0 @@ -module pio_utils - use pio_types, only : file_desc_t, var_desc_t - use pio_types, only : pio_int, pio_real, pio_double, pio_char - use pio_types, only : iotype_netcdf, iotype_pnetcdf, PIO_internal_error - use pio_types, only : PIO_iotype_netcdf4p, pio_iotype_netcdf4c - use pio_types, only : PIO_bcast_error - use pio_kinds, only : i4, r4, r8 - use pio_support, only : checkmpireturn, piodie, Debug - -#ifdef _NETCDF - use netcdf ! _EXTERNAL -#endif -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -#include /* _EXTERNAL */ -#endif -#endif - - public :: check_netcdf - public :: bad_iotype - - - -contains - - subroutine check_netcdf(File, status, filestr, line) - type(file_desc_t), intent(in) :: file - integer, intent(inout) :: status - character(len=*), intent(in) :: filestr - integer, intent(in) :: line - - integer :: mpierr, iotype - -! Three choices for error handling: -! 1: abort on error from any task PIO_INTERNAL_ERROR -! 2: broadcast an error from io_rank 0 PIO_BCAST_ERROR -! 3: do nothing - allow the user to handle it PIO_RETURN_ERROR -! - iotype = file%iotype - - if(Debug) call mpi_barrier(file%iosystem%union_comm, mpierr) - - select case(iotype) - case(iotype_pnetcdf) -#ifdef _PNETCDF - if(file%iosystem%error_handling==PIO_INTERNAL_ERROR) then - if(status /= nf_noerr) then - call piodie(filestr,line,trim(nfmpi_strerror(status))) - end if - else if(file%iosystem%error_handling==PIO_BCAST_ERROR) then - call MPI_BCAST(status,1,MPI_INTEGER,file%iosystem%iomaster,File%iosystem%my_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - -#endif - case(iotype_netcdf,pio_iotype_netcdf4p,pio_iotype_netcdf4c) -#ifdef _NETCDF - if(status /= nf90_noerr) then - print *,trim(nf90_strerror(status)) - endif - if(File%iosystem%error_handling==PIO_INTERNAL_ERROR) then - if(status /= nf90_noerr) then - call piodie(filestr,line,trim(nf90_strerror(status))) - end if - else if(file%iosystem%error_handling==PIO_BCAST_ERROR) then - call MPI_BCAST(status,1,MPI_INTEGER,file%iosystem%iomaster,File%iosystem%my_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if -#endif - end select - - end subroutine check_netcdf - - - -!> -!! @private -!< - subroutine bad_iotype(iotype,file,line) - integer iotype - character(len=*) file - integer line - -#ifndef _PNETCDF - if (iotype==iotype_pnetcdf) then - call piodie(file,line,'PNETCDF not enabled in the build') - endif -#endif -#ifndef _NETCDF - if (iotype==iotype_netcdf) then - call piodie(file,line,'NETCDF not enabled in the build') - endif -#endif -#ifndef _NETCDF4 - if (iotype==PIO_iotype_netcdf4p .or. iotype==pio_iotype_netcdf4c) then - call piodie(file,line,'NETCDF4 not enabled in the build') - endif -#endif - print *,'Invalid iotype, value=',iotype - call piodie(file,line,'Quitting') - - end subroutine bad_iotype - - - -end module pio_utils diff --git a/src/externals/pio1/pio/piodarray.F90.in b/src/externals/pio1/pio/piodarray.F90.in deleted file mode 100644 index 384ee1fb7ef..00000000000 --- a/src/externals/pio1/pio/piodarray.F90.in +++ /dev/null @@ -1,1558 +0,0 @@ -#define __PIO_FILE__ 'piodarray' -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief Read and write Routines for decomposed data. -!< -module piodarray - use pio_types, only : file_desc_t, io_desc_t, var_desc_t, pio_noerr, iosystem_desc_t, & - pio_iotype_pbinary, pio_iotype_binary, pio_iotype_direct_pbinary, & - pio_iotype_netcdf, pio_iotype_pnetcdf, pio_iotype_netcdf4p, pio_iotype_netcdf4c, & - PIO_MAX_VAR_DIMS, pio_iotype_vdc2 - use pio_kinds - use pio_support - use pionfwrite_mod, only : write_nf - use pionfread_mod, only : read_nf - use nf_mod, only : pio_inq_varndims - use iompi_mod - use alloc_mod - use rearrange -#ifndef NO_C_SIZEOF - use iso_c_binding, only : c_sizeof ! _EXTERNAL -#else -#define c_sizeof(x) size(transfer (x, xxx_sizeof_data)) -#endif - - - - -#ifdef _COMPRESSION - use piovdc -#endif -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf !_EXTERNAL -#endif -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif - - private - public :: pio_read_darray, pio_write_darray, darray_write_complete, pio_set_buffer_size_limit - -#if defined(NO_C_SIZEOF) - character, private :: xxx_sizeof_data(32) -#endif - - -!> -!! @defgroup PIO_write_darray PIO_write_darray -!! @brief The overloaded PIO_write_darray writes a distributed array to disk. -!< - interface PIO_write_darray -! TYPE real,int,double -! DIMS 1,2,3,4,5,6,7 - module procedure write_darray_{DIMS}d_{TYPE} - end interface - - -!> -!! @defgroup PIO_read_darray PIO_read_darray -!! @brief The overloaded PIO_read_darray function reads a distributed array from disk. -!< - interface PIO_read_darray -! TYPE real,int,double -! DIMS 1,2,3,4,5,6,7 - module procedure read_darray_{DIMS}d_{TYPE} - end interface - interface pio_set_buffer_size_limit - module procedure pio_set_buffer_size_limit_i4 - module procedure pio_set_buffer_size_limit_i8 - end interface -!> -!! @private -!< - interface add_data_to_buffer -! TYPE real,int,double - module procedure add_data_to_buffer_{TYPE} - end interface - -#ifdef _COMPRESSION - interface - subroutine WriteVDC2Var(iobuf, start, kount, iocomm, ts, lod, reflevel, iotasks, name ) bind(C) - use, intrinsic :: iso_c_binding - type(c_ptr), intent(in), value :: iobuf - integer(c_int), intent(in) :: start(3), kount(3) - integer(c_int), intent(in), value :: iocomm, ts, lod, reflevel, iotasks - type(c_ptr), intent(in), value :: name - end subroutine WriteVDC2Var - end interface - interface - subroutine ReadVDC2Var(iobuf, start, kount, iocomm, ts, lod, reflevel, iotasks, name) bind(C) - use, intrinsic :: iso_c_binding - type(c_ptr), intent(in), value :: iobuf - integer(c_int), intent(in) :: start(3), kount(3) - integer(c_int), intent(in), value :: iocomm, ts, lod, reflevel, iotasks - type(c_ptr), intent(in), value :: name - end subroutine ReadVDC2Var - end interface -#endif - - character(len=*), parameter, private :: modName='piodarray' - integer :: total_buffsize=0 - integer :: pio_buffer_size_limit= 100000000 ! 100MB default - -#ifdef MEMCHK -integer :: msize, rss, mshare, mtext, mstack, lastrss=0 -#endif - -contains - - subroutine pio_set_buffer_size_limit_i4(limit) - integer, intent(in) :: limit - - if(limit<0) then - call piodie(__PIO_FILE__,__LINE__,& - ' bad value to pio_set_buffer_size_limit') - end if - pio_buffer_size_limit=limit - - end subroutine pio_set_buffer_size_limit_i4 - - subroutine pio_set_buffer_size_limit_i8(limit) - integer(pio_offset), intent(in) :: limit - - if(limit<0) then - call piodie(__PIO_FILE__,__LINE__,' bad value to pio_set_buffer_size_limit') - end if - pio_buffer_size_limit=int(limit) - - end subroutine pio_set_buffer_size_limit_i8 - - -! TYPE real,int,double -!> -!! @public -!! @ingroup PIO_write_darray -!! @brief Writes a 1D array of type {TYPE}. -!! @details -!! @param File \ref file_desc_t -!! @param varDesc \ref var_desc_t -!! @param ioDesc \ref io_desc_t -!! @param array : The data to be written -!! @param iostat : The status returned from this routine (see \ref PIO_seterrorhandling for details) -!! @param fillval : An optional fill value to fill holes in the data written -!< - subroutine write_darray_1d_{TYPE} (File,varDesc,ioDesc, array, iostat, fillval) - use pio_msg_mod, only : pio_msg_writedarray - ! !DESCRIPTION: - ! Writes a 2-d slab of TYPE to a netcdf file. - ! - ! !REVISION HISTORY: - ! same as module - - ! !INPUT PARAMETERS: - - type (File_desc_t), intent(inout) :: & - File ! file information - - type (var_desc_t), intent(inout) :: & - varDesc ! variable descriptor - - type (io_desc_t), intent(inout) :: & - ioDesc ! variable descriptor - - {VTYPE}, dimension(:), target, intent(in) :: & - array ! array to be written - - {VTYPE}, optional, intent(in) :: fillval ! rearrange receiver fill value - type(iosystem_desc_t), pointer :: ios - - integer(i4), intent(out) :: iostat - integer :: msg, ierr - integer :: hasfill, itype - - character(len=*), parameter :: subName=modName//'::write_darray_{TYPE}' - - ios => file%iosystem - if(ios%async_interface .and. .not. ios%ioproc) then - msg = PIO_MSG_WRITEDARRAY - - if(debugasync) print *,__PIO_FILE__,__LINE__, iodesc%async_id - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call mpi_bcast(file%fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(vardesc%varid, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(vardesc%rec, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(vardesc%ndims, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(iodesc%async_id, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - itype = {MPITYPE} - call mpi_bcast(itype, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - if(debugasync) print *,__PIO_FILE__,__LINE__, {MPITYPE} - - if(present(fillval)) then - hasfill = 1 - call mpi_bcast(hasfill, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(fillval, 1, {MPITYPE}, ios%compmaster, ios%intercomm, ierr) - else - hasfill=0 - call mpi_bcast(hasfill, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - end if - if(debugasync) print *,__PIO_FILE__,__LINE__ - endif - - if(debugasync .and. ios%ioproc) print *,__PIO_FILE__,__LINE__,iodesc%async_id - - select case(File%iotype) - case(pio_iotype_pbinary, pio_iotype_direct_pbinary) - if (present(fillval)) then - call write_darray_bin_{TYPE}(File,varDesc,iodesc, array, iostat, fillval) - else - call write_darray_bin_{TYPE}(File,varDesc,iodesc, array, iostat) - endif - case(pio_iotype_pnetcdf, pio_iotype_netcdf, pio_iotype_netcdf4c, pio_iotype_netcdf4p) - if (present(fillval)) then - call write_darray_nf_{TYPE}(File,varDesc,iodesc, array, iostat, fillval) - else - call write_darray_nf_{TYPE}(File,varDesc,iodesc, array, iostat) - endif - case(pio_iotype_binary) - print *, subName,': IO type not supported' -#ifdef _COMPRESSION - case(pio_iotype_vdc2) -#if ( {ITYPE} == TYPEREAL ) - call write_vdc2_real(File, Vardesc, iodesc, array, iostat) -#endif -#endif - end select - - end subroutine write_darray_1d_{TYPE} - -! TYPE real,int,double -! DIMS 2,3,4,5,6,7 -!> -!! @public -!! @ingroup PIO_write_darray -!! @brief Writes a {DIMS}D array of type {TYPE}. -!! @details -!! @param File @ref file_desc_t -!! @param varDesc @ref var_desc_t -!! @param ioDesc @ref io_desc_t -!! @param array : The data to be written -!! @param iostat : The status returned from this routine (see \ref PIO_seterrorhandling for details) -!! @param fillval : An optional fill value to fill holes in the data written -!< - subroutine write_darray_{DIMS}d_{TYPE} (File,varDesc,ioDesc, array, iostat, fillval) - ! !INPUT PARAMETERS: - - type (File_desc_t), intent(inout) :: & - File ! file information - - type (var_desc_t), intent(inout) :: & - varDesc ! variable descriptor - - type (io_desc_t), intent(inout) :: & - ioDesc ! variable descriptor - - {VTYPE}, intent(in) :: & - array{DIMSTR} ! array to be written - - {VTYPE}, optional, intent(in) :: fillval ! rearrange receiver fill value - - integer(i4), intent(out) :: iostat - {VTYPE} :: transvar(1), dumbvar(0) - -! This code is required due to a bug in gfortran 4.7.2 -#if (__GFORTRAN__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 8) - {VTYPE}, allocatable :: acopy(:) - integer :: isize - - isize= size(array) - allocate(acopy(isize)) - acopy = reshape(array,(/isize/)) - if(present(fillval)) then - call write_darray_1d_{TYPE} (File, varDesc, iodesc, acopy, iostat, fillval) - else - call write_darray_1d_{TYPE} (File, varDesc, iodesc, acopy, iostat) - end if - deallocate(acopy) - return -#else -! cannot call transfer function with a 0 sized array - if(size(array)==0) then - call write_darray_1d_{TYPE} (File, varDesc, iodesc, dumbvar, iostat) - else if(present(fillval)) then - call write_darray_1d_{TYPE} (File, varDesc, iodesc, transfer(array,transvar), iostat, fillval) - else - call write_darray_1d_{TYPE} (File, varDesc, iodesc, transfer(array,transvar), iostat) - end if -#endif - end subroutine write_darray_{DIMS}d_{TYPE} - -! TYPE real,int,double -!> -!! @public -!! @ingroup PIO_read_darray -!! @brief -!! @details -!! @param File @ref file_desc_t -!! @param varDesc @ref var_desc_t -!! @param ioDesc @ref io_desc_t -!! @param array : The read data -!! @param iostat : The status returned from this routine (see \ref PIO_seterrorhandling for details) -!< - subroutine read_darray_1d_{TYPE} (File,varDesc, ioDesc, array, iostat) - use pio_msg_mod, only : pio_msg_readdarray - ! !DESCRIPTION: - ! Reads a 2-d slab of TYPE to a netcdf file. - ! - ! !REVISION HISTORY: - ! same as module - - ! !INPUT PARAMETERS: - - type (File_desc_t), intent(inout) :: & - File ! file information - - type (var_desc_t), intent(inout) :: & - varDesc ! variable descriptor - - type (io_desc_t), intent(inout) :: & - ioDesc ! iodecomp descriptor - - {VTYPE}, dimension(:), intent(out) :: & - array ! array to be read - - integer(i4), intent(out) :: iostat - - character(len=*), parameter :: subName=modName//'::read_darray_{TYPE}' - - type(iosystem_desc_t), pointer :: ios - integer :: ierr, msg, itype - - - array = 0 - ios => File%iosystem - - if(ios%async_interface .and. .not. ios%ioproc) then - msg = PIO_MSG_READDARRAY - - if(DebugAsync) print *,__PIO_FILE__,__LINE__ - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call mpi_bcast(file%fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(vardesc%varid, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(vardesc%rec, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - call mpi_bcast(iodesc%async_id, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - itype = {MPITYPE} - call mpi_bcast(itype, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - if(DebugAsync) print *,__PIO_FILE__,__LINE__, {MPITYPE} - endif - - select case(File%iotype) - case(pio_iotype_pbinary, pio_iotype_direct_pbinary) - call read_darray_bin_{TYPE} (File,varDesc,iodesc,array, iostat) - case(pio_iotype_pnetcdf, pio_iotype_netcdf, pio_iotype_netcdf4c, pio_iotype_netcdf4p) - call read_darray_nf_{TYPE} (File,varDesc,iodesc,array, iostat) -#ifdef _COMPRESSION - case(pio_iotype_vdc2) -#if ( {ITYPE} == TYPEREAL ) - call read_vdc2_real(File, Vardesc, iodesc, array, iostat) -#endif -#endif - case(pio_iotype_binary) - print *, subName,': IO type not supported' - end select - - end subroutine read_darray_1d_{TYPE} - -! TYPE real,int,double -! DIMS 2,3,4,5,6,7 -!> -!! @public -!! @ingroup PIO_read_darray -!! @brief Reads a {DIMS}D array of type {TYPE}. -!! @details -!! @param File @ref file_desc_t -!! @param varDesc @ref var_desc_t -!! @param ioDesc @ref io_desc_t -!! @param array : The read data -!! @param iostat : The status returned from this routine (see \ref PIO_seterrorhandling for details) -!< - subroutine read_darray_{DIMS}d_{TYPE} (File,varDesc,ioDesc, array, iostat) - ! !INPUT PARAMETERS: - - type (File_desc_t), intent(inout) :: & - File ! file information - - type (var_desc_t), intent(inout) :: & - varDesc ! variable descriptor - - type (io_desc_t), intent(inout) :: & - ioDesc ! iodecomp descriptor - - {VTYPE}, intent(out) :: array{DIMSTR} ! array to be read - - integer(i4), intent(out) :: iostat - - {VTYPE}, pointer :: tmpvar(:) - - call alloc_check(tmpvar,size(array)) - call read_darray_1d_{TYPE} (File, varDesc, iodesc, tmpvar, iostat) - array = reshape(tmpvar,shape(array)) - call dealloc_check(tmpvar) - - end subroutine read_darray_{DIMS}d_{TYPE} - -! TYPE real,int,double -!> -!! @private -!! @brief Write a 1D array of type {TYPE} defined by varDesc using the decomposition described in iodesc to the netcdf or pnetcdf file File. -!! @details -!! @param File @ref file_desc_t -!! @param varDesc @ref var_desc_t -!! @param ioDesc @ref io_desc_t -!! @param array : The data to be written -!! @param iostat : The status returned from this routine (see \ref PIO_seterrorhandling for details) -!! @param fillval : An optional fill value to fill holes in the data written -!< - subroutine write_darray_nf_{TYPE} (File,varDesc,ioDesc,array, iostat, fillval) - - ! !DESCRIPTION: - ! Writes a 2-d slab of TYPE to a netcdf file. - ! - ! !REVISION HISTORY: - ! same as module - - ! !INPUT PARAMETERS: - - type (File_desc_t), intent(inout) :: & - File ! file information - - type (var_desc_t), intent(inout) :: & - varDesc ! variable descriptor - type (io_desc_t), intent(inout) :: & - ioDesc ! io decomp descriptor - - {VTYPE}, target, intent(in) :: & - array(:) ! array to be written - - {VTYPE}, optional, intent(in) :: fillval ! rearrange receiver fill value - - integer(pio_offset), pointer :: start(:), count(:) - - integer(i4), intent(out) :: iostat - integer :: request - integer :: fndims - !EOP - !BOC - !----------------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------------- - - character(len=*), parameter :: subName=modName//'::write_darray_nf_{TYPE}' - {VTYPE}, dimension(:), pointer :: & - IOBUF => null() ! local IO buffer - - logical (log_kind) :: IOproc ! true if IO processor - integer (i4) :: len, &! length of IO decomp segmap - iotype, &! type of IO to perform - ndims ! number of variable dimensions - - logical(log_kind) :: UseRearranger - - logical(log_kind) :: is_iobuf_owned - -#if DEBUG_REARR - {VTYPE}, dimension(:), pointer :: array2 - integer i -#endif - - {VTYPE} :: rsum - integer(i4) :: ierr - -#ifdef TIMING - call t_startf("PIO:pio_write_darray") -#endif - ! ----------------------------------------------------- - ! pull information from file_desc_t data structure - ! ----------------------------------------------------- - IOproc = File%iosystem%IOproc - iotype = File%iotype - UseRearranger = File%iosystem%UseRearranger - - is_iobuf_owned = .false. - - ! --------------------------------------------------------- - ! pull information out of the decomposition data structure - ! --------------------------------------------------------- - len = iodesc%IOmap%length - - ! - ! added for pio2 compatability - ! - ierr = pio_inq_varndims(file,vardesc,fndims) - - - if(Debug) print *,__PIO_FILE__,__LINE__,' NAME : IAM: ', & - File%iosystem%comp_rank,' UseRearranger: ',UseRearranger,iodesc%glen, iodesc%iomap%start, len -#ifdef TIMING - call t_startf("PIO:pre_pio_write_nf") - call t_startf("PIO:pio_rearrange_write") -#endif - if(UseRearranger) then - if (IOproc) then - if(Debug) print *, subName,': IAM: ',File%iosystem%comp_rank, & - 'Before call to allocate(IOBUF): ',len, iodesc%write%n_elemtype - - call alloc_check(IOBUF,len,' TYPE :IOBUF') - is_iobuf_owned = .true. - - if (present(fillval)) then - IOBUF=fillval - else - IOBUF= -1.0_r8 - endif - - !------------------------------------------------ - ! set the IO buffer to a particular test pattern - !------------------------------------------------ - !JMD IOBUF(:) = real(File%iosystem%io_rank,kind=r8) - if(Debug) print *, subName,': {comp,io}_rank: ',File%iosystem%comp_rank,File%iosystem%io_rank, & - 'offset: ',iodesc%iomap%start,'len: ',len !,' IOBUF: ',IOBUF - - else - call alloc_check(IOBUF,0) - is_iobuf_owned = .true. - IOBUF= -1.0_r8 - endif - - !------------------------------------ - ! Rearrange data from comp->io decomp - !------------------------------------ - ! "array" is comp data - - call rearrange_comp2io(File%iosystem,iodesc, array, IOBUF) - -#if DEBUG_REARR - call alloc_check(array2,size(array),'array2') - - call rearrange_io2comp(File%iosystem,iodesc,IOBUF,array2) - - do i=1,size(array) - if (array(i) /= array2(i)) then - print *, subName,': error: write ping-pong test failed on index',i - - end if - end do - - print *, subName,': passed write ping-pong test' - - call dealloc_check(array2) - -!!!!!!! end debug -#endif - !-------------------------------------------- - ! End data rearrange - !-------------------------------------------- - else - if(file%iotype==pio_iotype_pnetcdf) then - allocate(IOBUF(size(array))) - IOBUF=array - is_iobuf_owned = .true. - else - IOBUF=>array - end if - endif ! if(UseRearranger) -#ifdef TIMING - call t_stopf("PIO:pio_rearrange_write") -#endif - - if (IOproc) then - !---------------------------------------------- - ! write the global 2-d slice from IO processors - !---------------------------------------------- - - - if (DebugIO.and.userearranger.and.len>1) then - print *,__PIO_FILE__,__LINE__, & - File%iosystem%comp_rank,': write IOBUF r8', & - IOBUF(1:2),' ...',IOBUF(len-1:len), & - iodesc%glen,len - ! write an ascii version - write(10+File%iosystem%comp_rank,*) IOBUF(1:len) - close(10+File%iosystem%comp_rank) - endif - - ! this is a time dependent multidimensional array - if(vardesc%rec>=0 .and. iodesc%start(1)>=0) then - ndims = size(iodesc%start) - if(fndims> ndims) then - ndims = ndims+1 - call alloc_check(start,ndims) - call alloc_check(count,ndims) - - start(1:ndims-1)=iodesc%start - count(1:ndims-1)=iodesc%count - start(ndims:ndims)=vardesc%rec - count(ndims:ndims)=1 - else - ndims = fndims - call alloc_check(start,ndims) - call alloc_check(count,ndims) - start(1:ndims-1)=iodesc%start(1:ndims-1) - start(ndims) = vardesc%rec - count(1:ndims)=iodesc%count - endif - - - - if(Debug) print *, __PIO_FILE__,__LINE__,'start:',start, & - ' count:',count,' ndims:',ndims, minval(IOBUF),maxval(IOBUF) - ! this is a timedependent single value - else if(vardesc%rec>=0) then - call alloc_check(start,1) - call alloc_check(count,1) - start(1) = int(vardesc%rec,kind=PIO_Offset) - count(1) = 1_PIO_Offset - if(Debug) print *, __PIO_FILE__,__LINE__,'start:',start,' count:',count - - ! this is a non-timedependent array - else - ndims = size(iodesc%start) - call alloc_check(start,ndims) - call alloc_check(count,ndims) - start=iodesc%start - count=iodesc%count - if(Debug) print *, __PIO_FILE__,__LINE__,'start:',start,' count:',count,' ndims:',ndims - end if - - - - else - ! some compilers have problems passing - ! unassociated pointers when they are intent in - call alloc_check(start, 0) - call alloc_check(count, 0) - endif - -#ifdef TIMING - call t_stopf("PIO:pre_pio_write_nf") - call t_startf("PIO:pio_write_nf") -#endif - ierr = write_nf(File,IOBUF,varDesc,iodesc,start,count, request) -#ifdef TIMING - call t_stopf("PIO:pio_write_nf") -#endif - call dealloc_check(start) - call dealloc_check(count) - - if(IOPROC) then -#ifdef TIMING - call t_startf("PIO:post_pio_write_nf") -#endif - if(file%iotype==pio_iotype_pnetcdf) then - call add_data_to_buffer(File, IOBUF, request) - is_iobuf_owned = .false. - end if -#ifdef TIMING - call t_stopf("PIO:post_pio_write_nf") -#endif - end if - if(associated(IOBUF) .and. is_iobuf_owned) then - deallocate(IOBUF) - endif ! if(IOPROC) - - ! call MPI_Barrier(File%iosystem%comp_comm,ierr) - - !-------------------------- - ! set the error return code - !-------------------------- - iostat=ierr - - !----------------------------------------------------------------------- - !EOC -#ifdef TIMING - call t_stopf("PIO:pio_write_darray") -#endif - end subroutine write_darray_nf_{TYPE} - -! TYPE real,int,double -!> -!! @private -!! @brief Write a 1D array of type {TYPE}. -!! @details -!! @param File @ref file_desc_t -!! @param varDesc @ref var_desc_t -!! @param ioDesc @ref io_desc_t -!! @param array : The data to be written -!! @param iostat : The status returned from this routine (see \ref PIO_seterrorhandling for details) -!! @param fillval : An optional fill value to fill holes in the data written -!< - subroutine write_darray_bin_{TYPE}(File,varDesc,ioDesc,array, iostat, fillval) - - ! !DESCRIPTION: - ! Writes a 2-d slab of integers to a file - ! - ! !REVISION HISTORY: - ! same as module - - ! !INPUT PARAMETERS: - - type (File_desc_t), intent(inout) :: & - File ! file information - - type (var_desc_t), intent(inout) :: & - varDesc ! varable descriptor - - type (io_desc_t), intent(inout) :: & - ioDesc ! IO decomp descriptor - - - {VTYPE}, dimension(:), intent(in), target :: & - array ! array to be written - - {VTYPE}, optional, intent(in) :: fillval - - integer (i4), intent(out) :: iostat - - !EOP - !BOC - !----------------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------------- - character(len=*), parameter :: subName=modName//'::write_darray_bin_{TYPE}' - - {VTYPE}, dimension(:), pointer :: & - IOBUF => null() ! local IO buffer - - -#if DEBUG_REARR - {VTYPE}, dimension(:), pointer :: array2 - integer i -#endif - - logical (log_kind) :: IOproc ! true if IO processor - integer (i4) :: len, &! length of IO decomp segmap - iotype, &! type of IO to perform - varID ! variable ID - integer (i4) :: ierr - - logical(log_kind) :: UseRearranger - -#ifdef TIMING - call t_startf("PIO:pio_write_darray") -#endif - ! ----------------------------------------------------- - ! pull information from file_desc_t data structure - ! ----------------------------------------------------- - IOproc = File%iosystem%IOproc - iotype = File%iotype - UseRearranger = File%iosystem%UseRearranger - - ! ------------------------------------------------- - ! Pull information about the IO decomposition - ! ------------------------------------------------- - if(Debug) print *, __PIO_FILE__,__LINE__,iodesc%Write%elemTYPE, iodesc%Write%fileTYPE - len = iodesc%IOmap%length - - ierr = pio_noerr - if (IOproc) then - if(userearranger) then - call alloc_check(IOBUF,len,' TYPE :IOBUF') - - if (present(fillval)) then - IOBUF=fillval - else - IOBUF=-1.0_r8 - endif - else - iobuf=>array - end if - !------------------------------------------------ - ! set the IO buffer to a particular test pattern - !------------------------------------------------ - !JMD IOBUF(:) = File%iosystem%io_rank - if(Debug) print *, subName,': {comp,io}_rank: ',File%iosystem%comp_rank,File%iosystem%io_rank, & - 'offset: ',iodesc%iomap%start,'len: ',len ! ,' IOBUF: ',IOBUF - else - if(userearranger) call alloc_check(IOBUF,0,'write_darray_int:IOBUF') - endif - - !----------------------------------------- - !NEED HELP: - ! - ! Need a call to a data rearranger here - ! - ! call ESMF_rearrange() - ! - ! or - ! - ! call MCT_rearrange() - !----------------------------------------- -#ifdef TIMING - call t_startf("PIO:pio_rearrange_write") -#endif - if(UseRearranger) then - !------------------------------------ - ! Rearrange data from comp->io decomp - !------------------------------------ - - ! "array" is comp data - - call rearrange_comp2io(File%iosystem,iodesc,array,IOBUF) - -#if DEBUG_REARR - call alloc_check(array2,size(array),'array2') - - call rearrange_io2comp(File%iosystem,iodesc,IOBUF,array2) - - do i=1,size(array) - if (array(i) /= array2(i)) then - print *, subName,': error: int write ping-pong test failed on index',i - - end if - end do - - print *, subName,': passed int write ping-pong test' - - call dealloc_check(array2) - -!!!!!!! end debug -#endif - !-------------------------------------------- - ! End data rearrange - !-------------------------------------------- - endif -#ifdef TIMING - call t_stopf("PIO:pio_rearrange_write") -#endif - - if(IOProc) then -#ifdef TIMING - call t_startf("PIO:pio_write_bin") -#endif - !---------------------------------------------- - ! write the global 2-d slice from IO processors - !---------------------------------------------- - ierr = write_mpiio(File,IOBUF,varDesc,iodesc) -#ifdef TIMING - call t_stopf("PIO:pio_write_bin") -#endif - endif - - !-------------------------- - ! deallocate the IO buffer - !-------------------------- - if(userearranger) call dealloc_check(IOBUF) - ! call MPI_Barrier(File%iosystem%comp_comm,ierr) - - !-------------------------- - ! set the error return code - !-------------------------- - iostat=ierr - - !----------------------------------------------------------------------- - !EOC -#ifdef TIMING - call t_stopf("PIO:pio_write_darray") -#endif - end subroutine write_darray_bin_{TYPE} - - -! TYPE real,int,double -!> -!! @private -!! @brief Read a 1D array of type {TYPE} defined by varDesc using the decomposition -!! described in ioDesc to the netcdf or pnetcdf file File. -!! @details -!! @param File @ref file_desc_t -!! @param varDesc @ref var_desc_t -!! @param ioDesc @ref io_desc_t -!! @param array : The read data -!! @param iostat : The status returned from this routine (see \ref PIO_seterrorhandling for details) -!< - subroutine read_darray_nf_{TYPE} (File,varDesc,ioDesc,array, iostat) - - ! - ! !DESCRIPTION: - ! Reads a 2-d horizontal slice of integers from a binary file - ! - ! !REVISION HISTORY: - ! same as module - - ! !INPUT PARAMETERS: - - type (File_desc_t), intent(inout) :: & - File ! info about data file - - type (var_desc_t), intent(inout) :: & - varDesc ! variable descriptor - type (io_desc_t), intent(inout) :: & - ioDesc ! io decomp descriptor - - ! !INPUT/OUTPUT PARAMETERS: - - {VTYPE}, dimension(:), intent(out), target :: & - array ! array to be read - integer (i4), intent(out) :: iostat - !EOP - !BOC - !----------------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------------- - - character(len=*), parameter :: subName=modName//'::read_darray_nf_{TYPE}' - {VTYPE}, dimension(:), pointer :: & - IOBUF ! local IO buffer - - logical (log_kind) :: IOproc ! true if an IO processor - integer (i4) :: len, &! local length of IO decomp - iotype, &! type of IO to perform - ndims - integer(pio_offset), dimension(PIO_MAX_VAR_DIMS) :: start, count - - - logical(log_kind), parameter :: Debug = .FALSE. - logical(log_kind) :: UseRearranger - integer :: fndims - integer(i4) :: ierr -#if DEBUG_REARR - {VTYPE}, dimension(:), pointer :: iobuf2 - integer i -#endif -#ifdef TIMING - call t_startf("PIO:pio_read_darray") -#endif - - ! ----------------------------------------------------- - ! pull information from file_desc_t data structure - ! ----------------------------------------------------- - IOproc = File%iosystem%IOproc - iotype = File%iotype - UseRearranger = File%iosystem%UseRearranger - ierr = PIO_NOERR - - ! ----------------------------------------------------- - ! Pull out information of DecompMap_t data structure - ! ----------------------------------------------------- - len = iodesc%IOmap%length - start=0 - count=0 - - ierr = pio_inq_varndims(File,vardesc,fndims) - - - if (IOproc) then -! This is for pio2 compatablity - if(fndims > size(iodesc%start) .and. vardesc%rec<0) then - vardesc%rec = 1_PIO_OFFSET - endif - - !----------------------------- - ! allocate temporary IO buffer - !----------------------------- - if(userearranger) then - if((File%iotype == pio_iotype_netcdf .or. File%iotype == pio_iotype_netcdf4c) & - .and. file%iosystem%io_rank==0) then - call alloc_check(IOBUF,iodesc%maxiobuflen,' TYPE :IOBUF') - else - call alloc_check(IOBUF,len,' TYPE :IOBUF') - end if - else - iobuf=>array - end if - !---------------------------------------------- - ! read the global 2-d slice to IO processors - !---------------------------------------------- - ! this is a time dependent multidimensional array - - - if(vardesc%rec>=0 .and. iodesc%start(1)>=0) then - ndims = size(iodesc%start)+1 - start(1:ndims-1)=iodesc%start - count(1:ndims-1)=iodesc%count - start(ndims:ndims)=vardesc%rec - count(ndims:ndims)=1 - ! this is a timedependent single value - else if(vardesc%rec>=0) then - ndims=1 - start(1) = int(vardesc%rec,kind=PIO_Offset) - count(1) = 1_PIO_Offset - - ! this is a non-timedependent array - else - ndims = size(iodesc%start) - start(1:ndims)=iodesc%start - count(1:ndims)=iodesc%count - end if - else - ndims=1 - if(userearranger) then - call alloc_check(IOBUF,0,'IOBUF') - end if - endif -! print *,__PIO_FILE__,__LINE__,ndims, fndims, start(1:fndims),count(1:fndims),vardesc%rec - -#ifdef TIMING - call t_startf("PIO:pio_read_nf") -#endif - ierr = read_nf(File,IOBUF,varDesc,iodesc,start(1:ndims),count(1:ndims)) -#ifdef TIMING - call t_stopf("PIO:pio_read_nf") -#endif - - if(DebugIO) print *, subName,': {comp,io}_rank: ',File%iosystem%comp_rank,File%iosystem%io_rank, & - 'offset: ',iodesc%iomap%start,'len: ',len !,' IOBUF: ',IOBUF - - -#ifdef TIMING - call t_startf("PIO:pio_rearrange_read") -#endif - if(UseRearranger) then - !------------------------------------ - ! Rearrange data from io->comp decomp - !------------------------------------ - - ! "array" is comp data - call rearrange_io2comp(File%iosystem,iodesc,IOBUF,array) - -#if DEBUG_REARR - call alloc_check(iobuf2,size(IOBUF),'iobuf2') - - call rearrange_comp2io(File%iosystem,iodesc,array,iobuf2) - - do i=1,size(iobuf) - if (iobuf(i) /= iobuf2(i)) then - print *, subName,': error: int read ping-pong test failed on index',i - - end if - end do - - print *, subName,': passed int read ping-pong test' - - call dealloc_check(iobuf2) - -!!!!!!! end debug -#endif - ! -------------------------- - ! deallocate IO buffer - ! -------------------------- - call dealloc_check(IOBUF) - - endif -#ifdef TIMING - call t_stopf("PIO:pio_rearrange_read") -#endif - - !---------------- - ! set errror code - !---------------- - iostat = ierr - !----------------------------------------------------------------------- - !EOC -#ifdef TIMING - call t_stopf("PIO:pio_read_darray") -#endif - - end subroutine read_darray_nf_{TYPE} - -! TYPE real,int,double -!> -!! @private -!! @brief Read an array of type {TYPE} defined by varDesc using the decomposition -!! described in ioDesc to the netcdf or pnetcdf file File. -!! @details -!! @param File @ref file_desc_t -!! @param varDesc @ref var_desc_t -!! @param ioDesc @ref io_desc_t -!! @param array : The read data -!! @param iostat : The status returned from this routine (see \ref PIO_seterrorhandling for details) -!< - subroutine read_darray_bin_{TYPE} (File,varDesc,ioDesc,array, iostat) - ! - ! !DESCRIPTION: - ! Reads a 2-d horizontal slice of integers from a binary file - ! - ! !REVISION HISTORY: - ! same as module - - ! !INPUT PARAMETERS: - - type (File_desc_t), intent(inout) :: & - File ! info about data file - - type (var_desc_t), intent(inout) :: & - varDesc ! variable descriptor - - type (io_desc_t), intent(inout) :: & - ioDesc ! io decomp descriptor - - ! !INPUT/OUTPUT PARAMETERS: - - {VTYPE}, dimension(:), intent(out), target :: & - array ! array to be read - integer (i4), intent(out) :: iostat - !EOP - !BOC - !----------------------------------------------------------------------- - ! - ! local variables - ! - !----------------------------------------------------------------------- - - character(len=*), parameter :: subName=modName//'::read_darray_bin_{TYPE}' - {VTYPE}, dimension(:), pointer :: & - IOBUF ! local IO buffer - - logical (log_kind) :: IOproc ! true if an IO processor - integer (i4) :: len, &! local length of IO decomp - iotype ! type of IO to perform - - - - logical(log_kind), parameter :: Debug = .FALSE. - logical(log_kind) :: UseRearranger - - integer(i4) :: ierr - -#if DEBUG_REARR - {VTYPE}, dimension(:), pointer :: iobuf2 - integer i -#endif - -#ifdef TIMING - call t_startf("PIO:pio_read_darray") -#endif - - ! ----------------------------------------------------- - ! pull information from file_desc_t data structure - ! ----------------------------------------------------- - IOproc = File%iosystem%IOproc - iotype = File%iotype - UseRearranger = File%iosystem%UseRearranger - - - ! ----------------------------------------------------- - ! Pull out information of DecompMap_t data structure - ! ----------------------------------------------------- - - ! len = iodesc%IOmap%length - len = iodesc%Read%n_words - ierr = pio_noerr - - if (IOproc) then - - !----------------------------- - ! allocate temporary IO buffer - !----------------------------- - if(userearranger) then - call alloc_check(IOBUF,len,'read_darray_ :IOBUF') - else - iobuf=>array - end if - !---------------------------------------------- - ! read the global 2-d slice to IO processors - !---------------------------------------------- - if(iotype.eq.pio_iotype_binary) then - print *, subName,': TYPE : IO type not supported' - iostat =-1 - return - end if - -#ifdef TIMING - call t_startf("PIO:pio_read_bin") -#endif - ierr = read_mpiio(File,IOBUF,varDesc,iodesc) -#ifdef TIMING - call t_stopf("PIO:pio_read_bin") -#endif - - if(DebugIO) print *, subName,': TYPE: {comp,io}_rank: ',File%iosystem%comp_rank,File%iosystem%io_rank, & - 'len: ',len,' IOBUF: ',IOBUF(1:4) - else if(userearranger) then - call alloc_check(IOBUF,0,'IOBUF') - endif - -#ifdef TIMING - call t_startf("PIO:pio_rearrange_read") -#endif - if(UseRearranger) then - !------------------------------------ - ! Rearrange data from io->comp decomp - !------------------------------------ - - ! "array" is comp data - call rearrange_io2comp(File%iosystem,iodesc,IOBUF,array) - - -#if DEBUG_REARR - call alloc_check(iobuf2,size(IOBUF),'iobuf2') - - call rearrange_comp2io(File%iosystem,iodesc,array,iobuf2) - - - do i=1,size(iobuf) - if (iobuf(i) /= iobuf2(i)) then - print *, subName,': error: int read ping-pong test failed on index',i - - end if - end do - - print *, subName,': passed int read ping-pong test' - - call dealloc_check(iobuf2) - -!!!!!!! end debug -#endif - ! -------------------------- - ! deallocate IO buffer - ! -------------------------- - call dealloc_check(IOBUF) - endif -#ifdef TIMING - call t_stopf("PIO:pio_rearrange_read") -#endif - - !---------------- - ! set errror code - !---------------- - iostat = ierr - !----------------------------------------------------------------------- - !EOC -#ifdef TIMING - call t_stopf("PIO:pio_read_darray") -#endif - - end subroutine read_darray_bin_{TYPE} - - ! TYPE real,int,double - subroutine add_data_to_buffer_{TYPE} (File, IOBUF, request) - use pio_types, only : io_data_list - type(file_desc_t) :: File - {VTYPE}, pointer :: IOBUF(:) - integer, intent(in) :: request - integer :: cnt, mpierr, maxbuffsize, this_buffsize - type(io_data_list), pointer :: ptr - - if(.not. associated(File%data_list_top)) then - allocate(file%data_list_top) - ptr => file%data_list_top - file%data_list_end => file%data_list_top - else - ptr => file%data_list_end - do while(associated(ptr%next)) - ptr => ptr%next - end do - - allocate(ptr%next) - ptr=>ptr%next - nullify(ptr%next) - end if - ptr%request = request - ptr%data_{TYPE} => IOBUF - this_buffsize = size(iobuf)*c_sizeof(iobuf(1)) - file%buffsize=file%buffsize+this_buffsize - total_buffsize = total_buffsize+this_buffsize - file%data_list_end => ptr -#ifdef TIMING - call t_startf("PIO:allred_add_data_to_buf") -#endif - call MPI_ALLREDUCE(total_buffsize,maxbuffsize,1,MPI_INTEGER,MPI_MAX,file%iosystem%io_comm, mpierr) -#ifdef TIMING - call t_stopf("PIO:allred_add_data_to_buf") -#endif - - if(maxbuffsize > pio_buffer_size_limit) then - call darray_write_complete(File) - endif - -! if(debug) - - end subroutine add_data_to_buffer_{TYPE} - - - - subroutine darray_write_complete(File) - use pio_types, only : io_data_list -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -# include -#endif -#endif - - type(file_desc_t) :: File - type(io_data_list), pointer :: ptr, prevptr - integer :: cnt, ierr - integer, pointer :: array_of_requests(:), status(:) - - if(associated(file%data_list_top)) then - - cnt=1 - ptr=>file%data_list_top - do while(associated(ptr)) - cnt=cnt+1 - ptr=>ptr%next - end do - allocate(array_of_requests(cnt), status(cnt)) - ptr=>file%data_list_top - cnt=1 - do while(associated(ptr)) - array_of_requests(cnt)=ptr%request - cnt=cnt+1 - ptr=>ptr%next - end do - -#ifdef _PNETCDF - ierr = nfmpi_wait_all(file%fh, cnt-1, array_of_requests, status) -#endif - if(Debug) print *,__PIO_FILE__,__LINE__,status, ierr, total_buffsize - - ptr=>file%data_list_top - do while(associated(ptr)) - if(associated(ptr%data_double)) then - deallocate(ptr%data_double) - else if(associated(ptr%data_real)) then - deallocate(ptr%data_real) - else if(associated(ptr%data_int)) then - deallocate(ptr%data_int) - end if - prevptr=>ptr - ptr => ptr%next - deallocate(prevptr) - end do - nullify(file%data_list_top) - nullify(file%data_list_end) - - total_buffsize=total_buffsize-file%buffsize - - file%buffsize=0 - deallocate(array_of_requests) - deallocate(status) - end if -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - end subroutine darray_write_complete - - -#ifdef _COMPRESSION - - subroutine write_vdc2_real(File, Vardesc, iodesc, array, iostat) - use pio_support, only : piodie - use, intrinsic :: iso_c_binding - use C_interface_mod, only : F_C_STRING_DUP - ! !DESCRIPTION: - ! Writes a VDC2 vapor data collection - ! - ! !REVISION HISTORY: - ! same as module - - ! !INPUT PARAMETERS: - type (File_desc_t), intent(inout) :: & - File ! file information - - type (var_desc_t), intent(inout) :: & - varDesc ! varable descriptor - - type (io_desc_t), intent(inout) :: & - ioDesc ! IO decomp descriptor - - real(r4), dimension(:), intent(in), target :: & - array ! array to be written - - integer (i4), intent(out) :: iostat - - !EOP - !BOC - - !!!!!!!!!!! locals !!!!!!!!!!!!!!!!!!!!! - real(r4), dimension(:), pointer :: & - IOBUF ! local IO buffer - - logical(log_kind) :: UseRearranger - logical (log_kind) :: IOproc ! true if IO processor - integer (i4) :: len, &! length of IO decomp segmap - iotype, &! type of IO to perform - ndims ! number of variable dimensions - integer(4), pointer :: start(:), count(:) - double precision :: timer; - integer :: vlen - - type(c_ptr) :: cptr - - - ! pull information from file_desc_t data structure - IOproc = File%iosystem%IOproc - iotype = File%iotype - UseRearranger = File%iosystem%UseRearranger - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - ! pull information out of the decomposition data structure - len = iodesc%IOmap%length - ndims = size(iodesc%start) - call alloc_check(start,ndims) - call alloc_check(count,ndims) - start = iodesc%start - count = iodesc%count - - - - if(UseRearranger) then - if (IOproc) then - - if(Debug) print *, 'write_vdc2_real: IAM: ',File%iosystem%comp_rank,'Before call to allocate(IOBUF): ',len - call alloc_check(IOBUF,len,' TYPE :IOBUF') - IOBUF= -1.0_r8 - - else - call alloc_check(IOBUF,0) - IOBUF= -1.0_r8 - endif - - !------------------------------------ - ! Rearrange data from comp->io decomp - !------------------------------------ - ! "array" is comp data - call rearrange_comp2io(File%iosystem, iodesc, array, iobuf) - - !-------------------------------------------- - ! End data rearrange - !-------------------------------------------- - else - iobuf=>array - endif ! if(UseRearranger) - - - if (IOproc) then - vlen = len_trim(vardesc%name) - cptr = c_loc(iobuf(1)) - call WriteVDC2Var(cptr, start, count, File%iosystem%IO_comm, Vardesc%rec, -1, -1, File%iosystem%num_iotasks, & - F_C_STRING_DUP(varDesc%name(1:vlen))) - endif - - if(UseRearranger) call dealloc_check(IOBUF) - - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - end subroutine write_vdc2_real - -subroutine read_vdc2_real(File, Vardesc, iodesc, array, iostat) - use pio_support, only : piodie - use, intrinsic :: iso_c_binding - use C_interface_mod, only : F_C_STRING_DUP - ! !DESCRIPTION: - ! Writes a VDC2 vapor data collection - ! - ! !REVISION HISTORY: - ! same as module - - ! !INPUT PARAMETERS: - type (File_desc_t), intent(inout) :: & - File ! file information - - type (var_desc_t), intent(inout) :: & - varDesc ! varable descriptor - - type (io_desc_t), intent(inout) :: & - ioDesc ! IO decomp descriptor - - real(r4), dimension(:), intent(inout), target :: & - array ! array to be written - - integer (i4), intent(out) :: iostat - - !EOP - !BOC - - !!!!!!!!!!! locals !!!!!!!!!!!!!!!!!!!!! - real(r4), dimension(:), pointer :: & - IOBUF ! local IO buffer - - logical(log_kind) :: UseRearranger - logical (log_kind) :: IOproc ! true if IO processor - integer (i4) :: len, &! length of IO decomp segmap - iotype, &! type of IO to perform - ndims ! number of variable dimensions - integer(4), pointer :: start(:), count(:) - - double precision :: timer; - type(c_ptr) :: cptr - - - ! pull information from file_desc_t data structure - IOproc = File%iosystem%IOproc - iotype = File%iotype - UseRearranger = File%iosystem%UseRearranger - - ! pull information out of the decomposition data structure - len = iodesc%IOmap%length - ndims = size(iodesc%start) - call alloc_check(start,ndims) - call alloc_check(count,ndims) - start = iodesc%start - count = iodesc%count - - if(UseRearranger) then - if (IOproc) then - - if(Debug) print *, 'read_vdc2_real: IAM: ',File%iosystem%comp_rank,'Before call to allocate(IOBUF): ',len - call alloc_check(IOBUF,len,' TYPE :IOBUF') - IOBUF= -1.0_r8 - cptr = c_loc(iobuf(1)) - call ReadVDC2Var(cptr, start, count, File%iosystem%IO_comm, vardesc%rec, -1, -1, File%iosystem%num_iotasks, & - F_C_String_dup(varDesc%name)) - else - call alloc_check(IOBUF,0) - IOBUF= -1.0_r8 - endif - - !------------------------------------ - ! Rearrange data from comp->io decomp - !------------------------------------ - ! "array" is comp data - call rearrange_io2comp(File%iosystem, iodesc, iobuf, array) - - !-------------------------------------------- - ! End data rearrange - !-------------------------------------------- - else - iobuf=>array - endif ! if(UseRearranger) - - if(UseRearranger) call dealloc_check(IOBUF) - - - end subroutine read_vdc2_real -#endif - - -end module piodarray diff --git a/src/externals/pio1/pio/piolib_mod.F90 b/src/externals/pio1/pio/piolib_mod.F90 deleted file mode 100644 index b45e552dbd0..00000000000 --- a/src/externals/pio1/pio/piolib_mod.F90 +++ /dev/null @@ -1,3064 +0,0 @@ -#define __PIO_FILE__ "piolib_mod.f90" -#define debug_rearr 0 -#ifdef BGP -#define BGx -#endif -#ifdef BGL -#define BGx -#endif -#ifdef BGQbroken -#define BGx -#endif - -#include "rearr_options.h" -!> -!! @file -!! @brief Initialization Routines for PIO -!! -!! $Revision$ -!! $LastChangedDate$ -!< -module piolib_mod - !-------------- - use pio_kinds - !-------------- - use pio_types, only : file_desc_t, iosystem_desc_t, var_desc_t, io_desc_t, & - pio_iotype_pbinary, pio_iotype_binary, pio_iotype_direct_pbinary, & - pio_iotype_netcdf, pio_iotype_pnetcdf, pio_iotype_netcdf4p, pio_iotype_netcdf4c, & - pio_noerr, pio_num_ost, PIO_rearr_opt_t - !-------------- - use alloc_mod - !-------------- - use pio_support, only : piodie, debug, debugio, debugasync, checkmpireturn - ! - use ionf_mod, only : create_nf, open_nf,close_nf, sync_nf - use pionfread_mod, only : read_nf - use pionfwrite_mod, only : write_nf -#ifdef _COMPRESSION - use piovdc - use C_interface_mod, only : F_C_STRING_DUP -#endif - use pio_mpi_utils, only : PIO_type_to_mpi_type - use iompi_mod - use rearrange -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf ! _EXTERNAL -#endif - use pio_msg_mod -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - ! !public member functions: - - public :: PIO_init, & - PIO_finalize, & - PIO_initdecomp, & - PIO_set_rearr_opts,& - PIO_openfile, & - PIO_syncfile, & - PIO_createfile, & - PIO_closefile, & - PIO_setiotype, & - PIO_numtoread, & - PIO_numtowrite, & - PIO_setframe, & - PIO_advanceframe, & - PIO_setdebuglevel, & - PIO_seterrorhandling, & - PIO_get_local_array_size, & - PIO_freedecomp, & - PIO_dupiodesc, & - PIO_getnumiotasks, & - PIO_set_hint, & - PIO_getnum_OST, & - PIO_setnum_OST, & - PIO_FILE_IS_OPEN, & - pio_iotask_rank - -#ifdef MEMCHK -!> this is an internal variable for memory leak debugging -!! it is used when macro memchk is defined and it causes each task to print the -!! memory resident set size anytime it changes within pio. -!< - integer :: lastrss=0 -#endif - - !eop - !boc - !----------------------------------------------------------------------- - ! - ! module variables - ! - !----------------------------------------------------------------------- -!> -!! @defgroup PIO_openfile PIO_openfile -!< - interface PIO_openfile - module procedure PIO_openfile - end interface - -!> -!! @defgroup PIO_syncfile PIO_syncfile -!< - interface PIO_syncfile - module procedure syncfile - end interface - -!> -!! @defgroup PIO_createfile PIO_createfile -!< - interface PIO_createfile - module procedure createfile - end interface - -!> -!! @defgroup PIO_setframe PIO_setframe -!! @brief sets the unlimited dimension for netcdf file for record number for binary files -!< - interface PIO_setframe - module procedure setframe - module procedure setframe_pio2 - end interface - -!> -!! @defgroup PIO_advanceframe PIO_advanceframe -!< - interface PIO_advanceframe - module procedure advanceframe - end interface - -!> -!! @defgroup PIO_closefile PIO_closefile -!< - interface PIO_closefile - module procedure closefile - end interface - - -!> -!! @defgroup PIO_freedecomp PIO_freedecomp -!! free memory associated with a io descriptor -!< - interface PIO_freedecomp - module procedure freedecomp_ios - module procedure freedecomp_file - end interface - -!> -!! @defgroup PIO_init PIO_init -!! initializes the pio subsystem -!< - interface PIO_init - module procedure init_intracom - module procedure init_intercom - - end interface - -!> -!! @defgroup PIO_finalize PIO_finalize -!! Shuts down and cleans up any memory associated with the pio library. -!< - interface PIO_finalize - module procedure finalize - end interface - -!> -!! @defgroup PIO_initdecomp PIO_initdecomp -!! @brief PIO_initdecomp is an overload interface the models decomposition to pio. -!< - - - interface PIO_initdecomp - module procedure PIO_initdecomp_dof_i4 ! previous name: initdecomop_1dof_nf_box - module procedure PIO_initdecomp_dof_i8 ! previous name: initdecomop_1dof_nf_box - module procedure PIO_initdecomp_dof_i8_vdc - module procedure initdecomp_1dof_nf_i4 - module procedure initdecomp_1dof_nf_i8 - module procedure initdecomp_1dof_bin_i4 - module procedure initdecomp_1dof_bin_i8 - module procedure initdecomp_2dof_nf_i4 - module procedure initdecomp_2dof_nf_i8 - module procedure initdecomp_2dof_bin_i4 - module procedure initdecomp_2dof_bin_i8 - module procedure PIO_initdecomp_bc - module procedure PIO_initdecomp_dof_dof - end interface - -!> -!! @defgroup PIO_dupiodesc PIO_dupiodesc -!! duplicates an eisting io descriptor -!< - interface PIO_dupiodesc - module procedure dupiodesc - end interface - -!> -!! @defgroup PIO_setiotype PIO_setiotype -!! sets the io type used by pio -!< - interface PIO_setiotype - module procedure setiotype - end interface - -!> -!! @defgroup PIO_numtoread PIO_numtoread -!! returns the total number of words to read -!< - interface PIO_numtoread - module procedure numtoread - end interface - -!> -!! @defgroup PIO_numtowrite PIO_numtowrite -!! returns the total number of words to write -!< - interface PIO_numtowrite - module procedure numtowrite - end interface - - -!> -!! @defgroup PIO_getnumiotasks PIO_getnumiotasks -!! returns the actual number of IO-tasks used. PIO -!! will reset the total number of IO-tasks if certain -!! conditions are meet -!< - interface PIO_getnumiotasks - module procedure getnumiotasks - end interface - -!> -!! @defgroup PIO_setdebuglevel PIO_setdebuglevel -!! sets the level of debug information that pio will generate. -!< - interface PIO_setdebuglevel - module procedure setdebuglevel - end interface - -!> -!! @defgroup PIO_seterrorhandling PIO_seterrorhandling -!! sets the form of error handling for pio. -!! -!! By default pio handles errors internally by printing a string -!! describing the error and calling mpi_abort. Application -!! developers can change this behavior for calls to the underlying netcdf -!! libraries with a call to PIO_seterrorhandling. For example if a -!! developer wanted to see if an input netcdf format file contained the variable -!! 'u' they might write the following -!! @verbinclude errorhandle -!< - interface PIO_seterrorhandling - module procedure seterrorhandlingf - module procedure seterrorhandlingi - end interface - -!> -!! @defgroup PIO_get_local_array_size PIO_get_local_array_size -!< - - !eoc - !*********************************************************************** -#ifdef _COMPRESSION - interface - subroutine createvdf(vdc_dims, vdc_bsize, vdc_ts, restart , fname) bind(C) - use, intrinsic :: iso_c_binding - integer(c_int), intent(in) :: vdc_dims(3), vdc_bsize(3) - integer(c_int), intent(in), value :: vdc_ts, restart - type(c_ptr), intent(in), value :: fname - end subroutine createvdf - end interface -#endif - -contains -!> -!! @public -!! @ingroup PIO_file_is_open -!! @brief This logical function indicates if a file is open. -!! @details -!! @param File @copydoc file_desc_t -!< - logical function PIO_FILE_IS_OPEN(File) - type(file_desc_t), intent(in) :: file - pio_file_is_open = file%file_is_open - end function PIO_FILE_IS_OPEN - - -!> -!! @public -!! @ingroup PIO_get_local_array_size -!! @brief This function returns the expected local size of an array associated with iodesc -!! @details -!! @param iodesc -!! @copydoc io_desc_t -!< - integer function PIO_get_local_array_size(iodesc) - type(io_desc_t), intent(in) :: iodesc - PIO_get_local_array_size = iodesc%compsize - end function PIO_get_local_array_size - -!> -!! @public -!! @ingroup PIO_advanceframe -!! @brief advances the record dimension of a variable in a netcdf format file -!! or the block address in a binary file -!! @details -!! @param[in,out] vardesc @copybrief var_desc_t -!< - subroutine advanceframe(vardesc) - type(var_desc_t), intent(inout) :: vardesc - vardesc%rec=vardesc%rec+1 - end subroutine advanceframe - -!> -!! @public -!! @ingroup PIO_setframe -!! @brief sets the record dimension of a variable in a netcdf format file -!! or the block address in a binary file -!! @details -!! @param vardesc @copydoc var_desc_t -!! @param frame : frame number to set -!< - subroutine setframe(vardesc,frame) - type(var_desc_t), intent(inout) :: vardesc - integer(kind=PIO_offset), intent(in) :: frame - vardesc%rec=frame - end subroutine setframe - - subroutine setframe_pio2(pioid,vardesc,frame) - type(file_desc_t), intent(in) :: pioid - type(var_desc_t), intent(inout) :: vardesc - integer(kind=PIO_offset), intent(in) :: frame - call setframe(vardesc,frame) - end subroutine setframe_pio2 - -!> -!! @public -!! @ingroup PIO_setdebuglevel -!! @brief sets the level of debug information output to stdout by pio -!! @details -!! @param level : default value is 0, allowed values 0-3 -!< - subroutine setdebuglevel(level) - integer(i4), intent(in) :: level - if(level.eq.0) then - debug=.false. - debugio=.false. - debugasync=.false. - else if(level.eq.1) then - debug=.true. - debugio=.false. - debugasync=.false. - else if(level.eq.2) then - debug=.false. - debugio=.true. - debugasync=.false. - else if(level.eq.3) then - debug=.true. - debugio=.true. - debugasync=.false. - else if(level.eq.4) then - debug=.false. - debugio=.false. - debugasync=.true. - else if(level.eq.5) then - debug=.true. - debugio=.false. - debugasync=.true. - else if(level.ge.6) then - debug=.true. - debugio=.true. - debugasync=.true. - - end if - end subroutine setdebuglevel - -!> -!! @ingroup PIO_seterrorhandling -!! @public -!! @brief set the pio error handling method for a file -!! -!! @param file @copydoc file_desc_t -!! @param method : -!! @copydoc PIO_error_method -!< - subroutine seterrorhandlingf(file, method,oldmethod) - type(file_desc_t), intent(inout) :: file - integer, intent(in) :: method - integer, optional, intent(out) :: oldmethod - - call seterrorhandlingi(file%iosystem, method, oldmethod) - end subroutine seterrorhandlingf - -!> -!! @ingroup PIO_seterrorhandling -!! @public -!! @brief set the pio error handling method for the iosystem -!! @param iosystem : a defined pio system descriptor, see PIO_types -!! @param method : -!! @copydoc PIO_error_method -!< - subroutine seterrorhandlingi(ios, method,oldmethod) - use pio_types, only : pio_internal_error, pio_return_error - use pio_msg_mod, only : pio_msg_seterrorhandling - type(iosystem_desc_t), intent(inout) :: ios - integer, intent(in) :: method - integer, optional, intent(out) :: oldmethod - integer :: msg, ierr - - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_SETERRORHANDLING - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(method,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , ierr) - end if - if(Debugasync) print *,__PIO_FILE__,__LINE__,method - if(present(oldmethod)) then - oldmethod = ios%error_handling - endif - ios%error_handling=method - - if(method > PIO_internal_error .or. method < PIO_return_error) then - call piodie(__PIO_FILE__,__LINE__,'invalid error handling method requested') - end if - end subroutine seterrorhandlingi - -!> -!! @public -!! @ingroup PIO_initdecomp -!! @brief Implements the @ref decomp_bc for PIO_initdecomp -!! @details This provides the ability to describe a computational -!! decomposition in PIO that has a block-cyclic form. That is -!! something that can be described using start and count arrays. -!! Optional parameters for this subroutine allows for the specification -!! of io decomposition using iostart and iocount arrays. If iostart -!! and iocount arrays are not specified by the user, and rearrangement -!! is turned on then PIO will calculate a suitable IO decomposition -!! @param iosystem @copydoc iosystem_desc_t -!! @param basepiotype @copydoc use_PIO_kinds -!! @param dims An array of the global length of each dimesion of the variable(s) -!! @param compstart The start index into the block-cyclic computational decomposition -!! @param compcount The count for the block-cyclic computational decomposition -!! @param iodesc @copydoc iodesc_generate -!! @param iostart The start index for the block-cyclic io decomposition -!! @param iocount The count for the block-cyclic io decomposition -!< - subroutine PIO_initdecomp_bc(iosystem,basepiotype,dims,compstart,compcount,iodesc,iostart,iocount) - type (iosystem_desc_t), intent(inout) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer (kind=PIO_OFFSET) :: compstart(:) - integer (kind=PIO_OFFSET) :: compcount(:) - type (IO_desc_t), intent(out) :: iodesc - integer (kind=PIO_OFFSET),optional :: iostart(:) - integer (kind=PIO_OFFSET),optional :: iocount(:) - -! character(len=*), parameter :: '::PIO_initdecomp_bc' - - call piodie(__PIO_FILE__,__LINE__,'subroutine not yet implemented') - - end subroutine PIO_initdecomp_bc - -!> -!! @public -!! @ingroup PIO_initdecomp -!! @brief Implements the @ref decomp_dof for PIO_initdecomp -!! @details This provides the ability to describe a computational -!! decomposition in PIO using degrees of freedom method. This is -!! a decomposition that can not be easily described using a start -!! and count metehod (see @ref decomp_dof). This subroutine also -!! requires the user to specify the IO decomposition using the -!! degree of freedom method. This version of the subroutine -!! is most suitable for those who want complete control over -!! the actions of PIO. -!! @param iosystem @copydoc iosystem_desc_t -!! @param basepiotype @copydoc use_PIO_kinds -!! @param dims An array of the global length of each dimesion of the variable(s) -!! @param compdof Mapping of the storage order for the computatinal decomposition to its memory order -!! @param iodesc @copydoc iodesc_generate -!! @param iodof Mapping of the storage order for the IO decomposition its memory order -!< - subroutine PIO_initdecomp_dof_dof(iosystem,basepiotype,dims,compdof,iodesc,iodof) - type (iosystem_desc_t), intent(inout) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer(i4), intent(in) :: compdof(:) - type (IO_desc_t), intent(out) :: iodesc - integer(i4), intent(in) :: iodof(:) - -! character(len=*), parameter :: subName=modName//'::PIO_initdecomp_dof_dof' - -! call piodie(subname,__LINE__,'subroutine not yet implemented') - - end subroutine PIO_initdecomp_dof_dof - -!> -!! @public -!! @ingroup PIO_initdecomp -!! @brief A deprecated interface to the PIO_initdecomp method. -!! @details -!! @deprecated -!! @param iosystem : a defined pio system descriptor, see PIO_types -!! @param basepiotype : the type of variable(s) associated with this iodesc. -!! @copydoc PIO_kinds -!! @param dims : an array of the global length of each dimesion of the variable(s) -!! @param lenblocks : -!! @param compdof : mapping of the storage order of the variable to its memory order -!! @param iodofr : -!! @param iodofw : -!! @param iodesc @copydoc iodesc_generate -!< - subroutine initdecomp_2dof_bin_i4(iosystem,basepiotype,dims,lenblocks,compdof,iodofr,iodofw,iodesc) - use calcdisplace_mod, only : calcdisplace - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4) :: basetype - integer(i4), intent(in) :: dims(:) - integer (i4), intent(in) :: lenblocks - integer (i4), intent(in) :: compdof(:) !> global degrees of freedom for computational decomposition - integer (i4), intent(in) :: iodofr(:) !> global degrees of freedom for io decomposition - integer (i4), intent(in) :: iodofw(:) !> global degrees of freedom for io decomposition - type (io_desc_t), intent(inout) :: iodesc - - - call initdecomp_2dof_bin_i8(iosystem,basepiotype,dims,lenblocks,int(compdof,kind=pio_offset),int(iodofr,kind=pio_offset), & - int(iodofw,kind=pio_offset),iodesc) - - - end subroutine initdecomp_2dof_bin_i4 - subroutine initdecomp_2dof_bin_i8(iosystem,basepiotype,dims,lenblocks,compdof,iodofr,iodofw,iodesc) - use calcdisplace_mod, only : calcdisplace - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4) :: basetype - integer(i4), intent(in) :: dims(:) - integer (i4), intent(in) :: lenblocks - integer (kind=pio_offset), intent(in) :: compdof(:) !> global degrees of freedom for computational decomposition - integer (kind=pio_offset), intent(in) :: iodofr(:) !> global degrees of freedom for io decomposition - integer (kind=pio_offset), intent(in) :: iodofw(:) !> global degrees of freedom for io decomposition - type (io_desc_t), intent(inout) :: iodesc - - integer(kind=PIO_offset) :: start(1), count(1) - - integer (i4) :: i,ndims,n_iotasks - integer(kind=PIO_OFFSET) glength - logical :: userearranger - integer (kind=pio_offset) :: ndispr,ndispw - integer (kind=pio_offset) :: lengthr, lengthw - integer (kind=pio_offset), pointer :: displacer(:),displacew(:) - - - nullify(iodesc%start) - nullify(iodesc%count) - - basetype=PIO_type_to_mpi_type(basepiotype) - - !------------------------------------------- - ! for testing purposes set the iomap - ! (decompmap_t) to something basic for - ! testing. - !------------------------------------------- - userearranger = iosystem%userearranger - - !--------------------- - ! number of dimensions - !--------------------- - ndims = size(dims) - !--------------------- - ! total global size - !--------------------- - glength= product(int(dims,kind=PIO_OFFSET)) - if(glength > int(huge(i),kind=pio_offset)) then - call piodie( __PIO_FILE__,__LINE__, & - 'requested array size too large for this interface ') - endif - - - - lengthr = size(iodofr); - lengthw = size(iodofw) - if(lenblocks>0) then - ndispw=size(iodofw)/lenblocks - ndispr=size(iodofr)/lenblocks - else - ndispw=0 - ndispr=0 - end if - call alloc_check(displacer,int(ndispr)) - call alloc_check(displacew,int(ndispw)) - - !-------------------------------------------- - ! calculate mpi data structure displacements - !-------------------------------------------- - !dbg print *,'PIO_initdecomp: before call to calcdisplace' - if(lenblocks>0) then - call calcdisplace(lenblocks,iodofr,displacer) - call calcdisplace(lenblocks,iodofw,displacew) - end if - n_iotasks = iosystem%num_iotasks - - iodesc%glen = glength - - if(debug) print *,'iam: ',iosystem%io_rank,'initdecomp: userearranger: ',userearranger - - !--------------------------------------------- - ! the setup for the mpi-io type information - !--------------------------------------------- - if(iosystem%ioproc) then - !----------------------------------------------- - ! setup the data structure for the read operation - !----------------------------------------------- - iodesc%read%n_elemtype = ndispr - iodesc%read%n_words = iodesc%read%n_elemtype*lenblocks - call genindexedblock(lenblocks,basetype,iodesc%read%elemtype,iodesc%read%filetype,int(displacer)) - - !------------------------------------------------- - ! setup the data structure for the write operation - !------------------------------------------------- - iodesc%write%n_elemtype = ndispw - iodesc%write%n_words = iodesc%write%n_elemtype*lenblocks - - call genindexedblock(lenblocks,basetype,iodesc%write%elemtype,iodesc%write%filetype,int(displacew)) - - if(debug) print *,'initdecomp: at the end of subroutine' - ! if(iodesc%read%n_elemtype == 0 .and. iodesc%write%n_elemtype == 0) iosystem%ioproc = .false. - endif - - deallocate(displacer,displacew) - - - end subroutine initdecomp_2dof_bin_i8 - - -!> -!! @public -!! @ingroup PIO_initdecomp -!! @brief A deprecated interface to the PIO_initdecomp method. -!! @details -!! @deprecated -!! @param iosystem : a defined pio system descriptor, see PIO_types -!! @param basepiotype : the type of variable(s) associated with this iodesc. -!! @copydoc PIO_kinds -!! @param dims : an array of the global length of each dimesion of the variable(s) -!! @param lenblocks : -!! @param compdof : mapping of the storage order of the variable to its memory order -!! @param iodofr : -!! @param iodesc @copydoc iodesc_generate -!< - subroutine initdecomp_1dof_bin_i8(iosystem,basepiotype,dims,lenblocks,compdof,iodofr,iodesc) - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer(i4), intent(in) :: lenblocks - integer(kind=pio_offset), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - integer(kind=pio_offset), intent(in) :: iodofr(:) ! global degrees of freedom for io decomposition - type (io_desc_t), intent(inout) :: iodesc - - integer(kind=PIO_offset) :: start(1), count(1) - ! these are not used in the binary interface - - start(1)=-1 - count(1)=-1 - call initdecomp_1dof_nf_i8(iosystem,basepiotype,dims,lenblocks,compdof,iodofr,start, count, iodesc) - end subroutine initdecomp_1dof_bin_i8 - - subroutine initdecomp_1dof_bin_i4(iosystem,basepiotype,dims,lenblocks,compdof,iodofr,iodesc) - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer (i4), intent(in) :: lenblocks - integer (i4), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - integer (i4), intent(in) :: iodofr(:) ! global degrees of freedom for io decomposition - type (io_desc_t), intent(inout) :: iodesc - - integer(kind=PIO_offset) :: start(1), count(1) - ! these are not used in the binary interface - - start(1)=-1 - count(1)=-1 - call initdecomp_1dof_nf_i8(iosystem,basepiotype,dims,lenblocks, & - int(compdof,kind=PIO_OFFSET),int(iodofr,kind=PIO_OFFSET),start, count, iodesc) - end subroutine initdecomp_1dof_bin_i4 - -!> -!! @public -!! @ingroup PIO_initdecomp -!! @brief A deprecated interface to the PIO_initdecomp method. -!! @details -!! @deprecated -!! @param iosystem : a defined pio system descriptor, see PIO_types -!! @param basepiotype : the type of variable(s) associated with this iodesc. -!! @copydoc PIO_kinds -!! @param dims : an array of the global length of each dimesion of the variable(s) -!! @param lenblocks : -!! @param compdof : mapping of the storage order of the variable to its memory order -!! @param iodofr : -!! @param iodofw : -!! @param start : used with count to give a block description of the shape of the data -!! @param count : -!! @param iodesc @copydoc iodesc_generate -!< - subroutine initdecomp_2dof_nf_i4(iosystem,basepiotype,dims,lenblocks,compdof,iodofr,iodofw,start, count, iodesc) - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer (i4), intent(in) :: lenblocks - integer (i4), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - integer (i4), intent(in) :: iodofr(:) ! global degrees of freedom for io decomposition - integer (i4), intent(in) :: iodofw(:) ! global degrees of freedom for io decomposition - - type (io_desc_t), intent(inout) :: iodesc - - integer(kind=PIO_offset), intent(in) :: start(:), count(:) - type (io_desc_t) :: tmp - - - call pio_initdecomp(iosystem, basepiotype,dims,lenblocks,int(compdof,kind=PIO_OFFSET),int(iodofr,kind=PIO_OFFSET), & - int(iodofw,kind=PIO_OFFSET),start,count,iodesc) - - end subroutine initdecomp_2dof_nf_i4 - - subroutine initdecomp_2dof_nf_i8(iosystem,basepiotype,dims,lenblocks,compdof,iodofr,iodofw,start, count, iodesc) - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer (i4), intent(in) :: lenblocks - integer (kind=pio_offset), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - integer (kind=pio_offset), intent(in) :: iodofr(:) ! global degrees of freedom for io decomposition - integer (kind=pio_offset), intent(in) :: iodofw(:) ! global degrees of freedom for io decomposition - - type (io_desc_t), intent(inout) :: iodesc - - integer(kind=PIO_offset), intent(in) :: start(:), count(:) - type (io_desc_t) :: tmp - - - call initdecomp_1dof_nf_i8(iosystem, basepiotype, dims, lenblocks, compdof, iodofr, start, count, iodesc) - - call initdecomp_1dof_nf_i8(iosystem, basepiotype, dims, lenblocks, compdof, iodofw, start, count, tmp) - - call dupiodesc2(iodesc%write,tmp%write) - - if(debug) then - print *, __PIO_FILE__,__LINE__,iodesc%read%filetype,iodesc%read%elemtype,& - iodesc%read%n_elemtype,iodesc%read%n_words - print *, __PIO_FILE__,__LINE__,iodesc%write%filetype,iodesc%write%elemtype,& - iodesc%write%n_elemtype,iodesc%write%n_words - end if - - end subroutine initdecomp_2dof_nf_i8 - -!> -!! @public -!! @ingroup PIO_initdecomp -!! @brief A deprecated interface to the PIO_initdecomp method. -!! @details -!! @deprecated -!! @param iosystem : a defined PIO system descriptor, see pio_types -!! @param basepiotype : The type of variable(s) associated with this iodesc. -!! @copydoc PIO_kinds -!! @param dims : an array of the global length of each dimesion of the variable(s) -!! @param lenblocks : -!! @param compdof : mapping of the storage order of the variable to its memory order -!! @param iodof : -!! @param start : -!! @param count : -!! @param iodesc @copydoc iodesc_generate -!< - subroutine initdecomp_1dof_nf_i4(iosystem,basepiotype,dims,lenblocks,compdof,iodof,start, count, iodesc) - use calcdisplace_mod, only : calcdisplace - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer (i4), intent(in) :: lenblocks - integer (i4), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - integer (i4), intent(in) :: iodof(:) ! global degrees of freedom for io decomposition - type (io_desc_t), intent(inout) :: iodesc - integer :: piotype - integer(kind=PIO_offset), intent(in) :: start(:), count(:) - - call initdecomp_1dof_nf_i8(iosystem, basepiotype,dims,lenblocks,int(compdof,kind=pio_offset),int(iodof,kind=pio_offset),& - start,count,iodesc) - - end subroutine initdecomp_1dof_nf_i4 - subroutine initdecomp_1dof_nf_i8(iosystem,basepiotype,dims,lenblocks,compdof,iodof,start, count, iodesc) - use calcdisplace_mod, only : calcdisplace - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer (i4), intent(in) :: lenblocks - integer (kind=pio_offset), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - integer (kind=pio_offset), intent(in) :: iodof(:) ! global degrees of freedom for io decomposition - type (io_desc_t), intent(inout) :: iodesc - integer :: piotype - integer(kind=PIO_offset), intent(in) :: start(:), count(:) - - integer(i4) :: length,n_iotasks - integer(i4) :: ndims - - integer (kind=pio_offset), pointer :: displace(:) ! the displacements for the mpi data structure (read) - - integer(i4) :: prev - integer(kind=PIO_OFFSET) :: glength ! global length in words - integer(i4) :: ii,i,dis,ierr - integer(i4),pointer, dimension(:) :: blocklen,disp - logical(log_kind) :: userearranger - logical, parameter :: check = .true. - integer(kind=pio_offset) :: ndisp -#ifdef MEMCHK - integer :: msize, rss, mshare, mtext, mstack -#endif - nullify(iodesc%start) - nullify(iodesc%count) - - piotype=PIO_type_to_mpi_type(basepiotype) - - !------------------------------------------- - ! for testing purposes set the iomap - ! (decompmap_t) to something basic for - ! testing. - !------------------------------------------- -#ifdef TIMING - call t_startf("PIO:PIO_initdecomp") -#endif -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - userearranger = iosystem%userearranger - !--------------------- - ! number of dimensions - !--------------------- - ndims = size(dims) - !--------------------- - ! total global size - !--------------------- - glength= product(int(dims,kind=PIO_OFFSET)) - if(glength > huge(ndisp)) then - print *,__PIO_FILE__,__LINE__,dims,glength - call piodie( __PIO_FILE__,__LINE__, & - 'requested array size too large for this interface ') - endif - - if(lenblocks>0) then - ndisp=size(iodof)/lenblocks - else - ndisp=0 - end if - call alloc_check(displace,int(ndisp)) - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - call alloc_check(iodesc%start,ndims) - call alloc_check(iodesc%count,ndims) - iodesc%start(1:size(start)) = start(:) - iodesc%count(1:size(count)) = count(:) - !-------------------------------------------- - ! calculate mpi data structure displacements - !-------------------------------------------- - if(lenblocks>0) then - if(debug) print *,'PIO_initdecomp: calcdisplace',ndisp,size(iodof),lenblocks - call calcdisplace(lenblocks,iodof,displace) - end if -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - n_iotasks = iosystem%num_iotasks - length = size(iodof) - ! - ! this facilitates the use of seperate read and write descripters. - ! - iodesc%iomap%start = iosystem%io_rank*length - iodesc%iomap%length = length - iodesc%glen = glength - - if(debug) print *,'iam: ',iosystem%io_rank,'initdecomp: userearranger: ',userearranger, glength - if(userearranger) then - call piodie( __PIO_FILE__,__LINE__, & - 'this interface does not use rearranger') - - endif -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - - !--------------------------------------------- - ! the setup for the mpi-io type information - !--------------------------------------------- - if(iosystem%ioproc) then - !----------------------------------------------- - ! setup the data structure for the io operation - !----------------------------------------------- - iodesc%write%n_elemtype = ndisp - iodesc%write%n_words = iodesc%write%n_elemtype*lenblocks - - call genindexedblock(lenblocks,piotype,iodesc%write%elemtype,iodesc%write%filetype,int(displace)) - - -! call gensubarray(dims,piotype,iodesc,iodesc%write) - - - - if(debug) print *,'initdecomp: at the end of subroutine',iodesc%write%n_elemtype,iodesc%write%n_words - endif -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - call dupiodesc2(iodesc%write,iodesc%read) - if(debug) then - print *, __PIO_FILE__,__LINE__,iodesc%read%filetype,iodesc%read%elemtype,& - iodesc%read%n_elemtype,iodesc%read%n_words - print *, __PIO_FILE__,__LINE__,iodesc%write%filetype,iodesc%write%elemtype,& - iodesc%write%n_elemtype,iodesc%write%n_words - end if - call dealloc_check(displace) - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif -#ifdef TIMING - call t_stopf("PIO:PIO_initdecomp") -#endif - end subroutine initdecomp_1dof_nf_i8 - -!> -!! @public -!! @ingroup PIO_initdecomp -!! @brief Implements the @ref decomp_dof for PIO_initdecomp (previous name: \b initdecomp_1dof_nf_box) -!! @details This provides the ability to describe a computational -!! decomposition in PIO using degrees of freedom method. This is -!! a decomposition that can not be easily described using a start -!! and count method (see @ref decomp_dof). -!! Optional parameters for this subroutine allows for the specififcation of -!! io decomposition using iostart and iocount arrays. If iostart -!! and iocount arrays are not specified by the user, and rearrangement -!! is turned on then PIO will calculate an suitable IO decomposition. -!! Note that this subroutine was previously called \em initdecomp_1dof_nf_box -!! @param iosystem @copydoc iosystem_desc_t -!! @param basepiotype @copydoc use_PIO_kinds -!! @param dims An array of the global length of each dimesion of the variable(s) -!! @param compdof Mapping of the storage order for the computational decomposition to its memory order -!! @param iodesc @copydoc iodesc_generate -!! @param iostart The start index for the block-cyclic io decomposition -!! @param iocount The count for the block-cyclic io decomposition -!< - subroutine PIO_initdecomp_dof_i4(iosystem,basepiotype,dims,compdof, iodesc, iostart, iocount, num_ts, bsize, rearr) - type (iosystem_desc_t), intent(inout) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - integer (kind=PIO_offset), optional :: iostart(:), iocount(:) - type (io_desc_t), intent(inout) :: iodesc - integer(kind=PIO_OFFSET), pointer :: internal_compdof(:) - integer(i4), intent(in) :: dims(:) - integer, intent(in), optional :: rearr - !vdf optionals - integer(i4), intent(in), optional:: num_ts, bsize(3) - allocate(internal_compdof(size(compdof))) - internal_compdof = int(compdof,kind=pio_offset) - - call pio_initdecomp_dof_i8(iosystem, basepiotype, dims, internal_compdof, & - iodesc, iostart, iocount,rearr) - - deallocate(internal_compdof) - - end subroutine PIO_initdecomp_dof_i4 - - - subroutine PIO_initdecomp_dof_i8(iosystem,basepiotype,dims,compdof, iodesc, iostart, iocount, rearr) - use calcdisplace_mod, only : calcdisplace_box - use calcdecomp, only : calcstartandcount - type (iosystem_desc_t), intent(inout) :: iosystem - integer(i4), intent(in) :: basepiotype - integer(i4), intent(in) :: dims(:) - integer (kind=pio_offset), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - integer (kind=PIO_offset), optional :: iostart(:), iocount(:) - type (io_desc_t), intent(inout) :: iodesc - integer, intent(in), optional :: rearr - - integer(i4) :: length,n_iotasks - integer(i4) :: ndims - integer (i4) :: lenblocks - integer(i4) :: piotype - - integer(i4), pointer :: displace(:) ! the displacements for the mpi data structure (read) - - integer(i4) :: prev - integer(kind=PIO_OFFSET) :: glength ! global length in words - integer(i4) :: ii,i,dis,ierr - integer(i4),pointer, dimension(:) :: blocklen,disp - logical(log_kind) :: userearranger - logical, parameter :: check = .true. - integer(kind=pio_offset) :: ndisp - integer(i4) :: iosize ! rml - integer(i4) :: msg - integer(i4), allocatable :: lstart(:),lcount(:) - logical :: is_async=.false. -#ifdef MEMCHK - integer :: msize, rss, mshare, mtext, mstack -#endif - integer ierror, dsize - - nullify(displace) - -#ifdef TIMING - call t_startf("PIO:PIO_initdecomp_dof") -#endif - if(iosystem%async_interface .and. .not. iosystem%ioproc) then - msg = PIO_MSG_INITDECOMP_DOF - is_async=.true. - if(DebugAsync) print*,__PIO_FILE__,__LINE__, iosystem%ioranks - if(iosystem%comp_rank==0) then - call mpi_send(msg, 1, mpi_integer, iosystem%ioroot, 1, iosystem%union_comm, ierr) - end if - if(DebugAsync) print*,__PIO_FILE__,__LINE__, ierr, iosystem%ioroot, iosystem%comp_rank - - call mpi_bcast(basepiotype, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - if(DebugAsync) print*,__PIO_FILE__,__LINE__ - dsize = size(dims) - call mpi_bcast(dsize, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(dims, size(dims), mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - - if(DebugAsync) print*,__PIO_FILE__,__LINE__ - call mpi_bcast(iodesc%async_id, 1, mpi_integer, iosystem%iomaster, iosystem%intercomm, ierr) - if(DebugAsync) print*,__PIO_FILE__,__LINE__, iodesc%async_id - endif - - if(minval(dims)<=0) then - print *,__PIO_FILE__,__LINE__,dims - call piodie(__PIO_FILE__,__LINE__,'bad value in dims argument') - end if - - if (iosystem%comp_rank == 0 .and. debug) & - print *,iosystem%comp_rank,': invoking PIO_initdecomp_dof' - - if (iosystem%comp_rank == 0 .and. present(rearr)) then - print *,'WARNING: Rearr optional argument is a pio2 feature, ignored in pio1' - endif - - - - if(DebugAsync) print*,__PIO_FILE__,__LINE__ - piotype=PIO_type_to_mpi_type(basepiotype) - - !------------------------------------------- - ! for testing purposes set the iomap - ! (decompmap_t) to something basic for - ! testing. - !------------------------------------------- -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - userearranger = iosystem%userearranger - !--------------------- - ! number of dimensions - !--------------------- - ndims = size(dims) - !--------------------- - ! total global size - !--------------------- - glength= product(int(dims,kind=PIO_OFFSET)) - if(glength > huge(int(i,kind=pio_offset))) then !not sure if this works, glength is pio_offset, if its > pio_offset range then - call piodie( __PIO_FILE__,__LINE__, & !it will simply wrap around rather than be > max_int(pio_offset) - 'requested array size too large for this interface ') !might be better to use a temp 8 byte int to store results - !of dims product and compare to the maxint(pio_offset) - endif - - - - ! remember iocount() is only defined on io procs - call alloc_check(iodesc%start,ndims) - call alloc_check(iodesc%count,ndims) - iodesc%basetype=piotype - - iodesc%compsize=size(compdof) - - iodesc%start=0 - iodesc%count=0 - - if(debug) print*,__PIO_FILE__,__LINE__, 'before calcstartandcount: ', iosystem%num_tasks, iosystem%num_iotasks, & - iosystem%io_rank, iosystem%io_comm, iosystem%ioranks - - if (iosystem%ioproc) then - if(present(iostart) .and. present(iocount)) then - iodesc%start = iostart - iodesc%count = iocount - else if(present(iostart) .or. present(iocount)) then - call piodie( __PIO_FILE__,__LINE__, & - 'both optional parameters start and count must be provided') - else - call calcstartandcount(basepiotype, ndims, dims, iosystem%num_iotasks, iosystem%io_rank,& - iodesc%start, iodesc%count,iosystem%num_aiotasks) - endif - iosize=1 - do i=1,ndims - iosize=iosize*iodesc%count(i) - end do - call mpi_allreduce(iosize, iodesc%maxiobuflen, 1, mpi_integer, mpi_max, iosystem%io_comm, ierr) - call checkmpireturn('mpi_allreduce in initdecomp',ierr) - - lenblocks=1 - do i=1,ndims - if(iodesc%count(i) == dims(i)) then - lenblocks=lenblocks*iodesc%count(i) - else - exit - endif - enddo - if(lenblocks==1) lenblocks=iodesc%count(1) - - if(lenblocks>0) then - ndisp=iosize/lenblocks - else - ndisp=0 - end if - call alloc_check(displace,int(ndisp)) - - if(debug) print *,'IAM: ',iosystem%comp_rank,' after getiostartandcount: count is: ',iodesc%count,& - ' lenblocks =',lenblocks,' ndisp=',ndisp - - if(debug) print *,'IAM: ',iosystem%comp_rank,' after getiostartandcount, num_aiotasks is: ', iosystem%num_aiotasks - !-------------------------------------------- - ! calculate mpi data structure displacements - !-------------------------------------------- - - if(debug) print *,'PIO_initdecomp: calcdisplace', & - ndisp,iosize,lenblocks, iodesc%start, iodesc%count - call calcdisplace_box(dims,lenblocks,iodesc%start,iodesc%count,ndims,displace) - - n_iotasks = iosystem%num_iotasks - length = iosize ! rml - - ! - ! this facilitates the use of seperate read and write descripters. - ! - - iodesc%iomap%start = iosystem%io_rank*length - iodesc%iomap%length = length - iodesc%glen = glength - endif - if(DebugAsync) print*,__PIO_FILE__,__LINE__ - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - if(debug) print *,__PIO_FILE__,__LINE__,'iam: ',iosystem%io_rank, & - 'initdecomp: userearranger: ',userearranger, glength - - if(userearranger) then - call MPI_BCAST(iosystem%num_aiotasks,1,mpi_integer,iosystem%iomaster,& - iosystem%my_comm,ierr) - call rearrange_create( iosystem,compdof,dims,ndims,iodesc) - endif - - if(DebugAsync) print*,__PIO_FILE__,__LINE__ -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - !--------------------------------------------- - ! the setup for the mpi-io type information - !--------------------------------------------- - if(iosystem%ioproc) then - !----------------------------------------------- - ! setup the data structure for the io operation - !----------------------------------------------- - call gensubarray(dims,piotype,iodesc,iodesc%write) - - if(debug) print *,__PIO_FILE__,__LINE__,iodesc%write%n_elemtype, & - iodesc%write%n_words,iodesc%write%elemtype,iodesc%write%filetype, lenblocks - - else - iodesc%write%n_elemtype=0 - iodesc%write%n_words=0 - iodesc%write%elemtype = mpi_datatype_null - iodesc%write%filetype = mpi_datatype_null - endif - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - call dupiodesc2(iodesc%write,iodesc%read) - - - if (associated(displace)) then - call dealloc_check(displace) - endif - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif -#ifdef TIMING - call t_stopf("PIO:PIO_initdecomp_dof") -#endif - - end subroutine PIO_initdecomp_dof_i8 - - subroutine PIO_initdecomp_dof_i8_vdc(iosystem,dims,compdof, iodesc, num_ts, bsize) - use calcdisplace_mod, only : calcdisplace_box - use calcdecomp, only : calcstartandcount - use pio_types, only : pio_real - type (iosystem_desc_t), intent(inout) :: iosystem - integer(i4), intent(in) :: dims(:) - integer (kind=pio_offset), intent(in) :: compdof(:) ! global degrees of freedom for computational decomposition - - type (io_desc_t), intent(inout) :: iodesc - !vdc args - integer(i4), intent(in) :: num_ts - integer(i4), intent(in), optional:: bsize(3) - - - integer(i4) :: length,n_iotasks - integer(i4) :: ndims - integer (i4) :: lenblocks - integer(i4) :: piotype - integer(i4), pointer :: displace(:) ! the displacements for the mpi data structure (read) - - integer(i4) :: prev - integer(kind=PIO_OFFSET) :: glength ! global length in words - integer(i4) :: ii,i,dis,ierr - integer(i4),pointer, dimension(:) :: blocklen,disp - logical(log_kind) :: userearranger - logical, parameter :: check = .true. - integer(kind=pio_offset) :: ndisp - integer(i4) :: iosize ! rml - integer(i4) :: msg, dsize - logical :: is_async=.false. -#ifdef MEMCHK - integer :: msize, rss, mshare, mtext, mstack -#endif - - integer ierror - - nullify(iodesc%start) - nullify(iodesc%count) - - -#ifdef TIMING - call t_startf("PIO:PIO_initdecomp_dof") -#endif - if(iosystem%async_interface .and. .not. iosystem%ioproc) then - msg = PIO_MSG_INITDECOMP_DOF - is_async=.true. - if(DebugAsync) print*,__PIO_FILE__,__LINE__, iosystem%ioranks - if(iosystem%comp_rank==0) then - call mpi_send(msg, 1, mpi_integer, iosystem%ioroot, 1, iosystem%union_comm, ierr) - end if - if(DebugAsync) print*,__PIO_FILE__,__LINE__, ierr, iosystem%ioroot, iosystem%comp_rank - -! call mpi_bcast(basepiotype, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) -! if(DebugAsync) print*,__PIO_FILE__,__LINE__ - dsize = size(dims) - call mpi_bcast(dsize, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(dims, dsize, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - - if(DebugAsync) print*,__PIO_FILE__,__LINE__ - call mpi_bcast(iodesc%async_id, 1, mpi_integer, iosystem%iomaster, iosystem%intercomm, ierr) - if(DebugAsync) print*,__PIO_FILE__,__LINE__, iodesc%async_id - endif - - if(minval(dims)<=0) then - print *,__PIO_FILE__,__LINE__,dims - call piodie(__PIO_FILE__,__LINE__,'bad value in dims argument') - end if - - if (iosystem%comp_rank == 0 .and. debug) & - print *,iosystem%comp_rank,': invoking PIO_initdecomp_dof' - - if(DebugAsync) print*,__PIO_FILE__,__LINE__ - piotype=MPI_REAL4 - - !------------------------------------------- - ! for testing purposes set the iomap - ! (decompmap_t) to something basic for - ! testing. - !------------------------------------------- -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif - - userearranger = iosystem%userearranger - !--------------------- - ! number of dimensions - !--------------------- - ndims = size(dims) - !--------------------- - ! total global size - !--------------------- - glength= product(int(dims,kind=PIO_OFFSET)) - if(glength > huge(int(i,kind=pio_offset))) then !not sure if this works, glength is pio_offset, if its > pio_offset range then - call piodie( __PIO_FILE__,__LINE__, & !it will simply wrap around rather than be > max_int(pio_offset) - 'requested array size too large for this interface ') !might be better to use a temp 8 byte int to store results - !of dims product and compare to the maxint(pio_offset) - endif - - - - ! remember iocount() is only defined on io procs - call alloc_check(iodesc%start,ndims) - call alloc_check(iodesc%count,ndims) - iodesc%basetype=piotype - - iodesc%compsize=size(compdof) - - iodesc%start=0 - iodesc%count=0 - - if (iosystem%ioproc) then -#ifdef _COMPRESSION - if(.not. present(bsize)) then - vdc_bsize = (/64, 64, 64/) !default bsize of 64^3 if none is given - else - vdc_bsize = bsize - endif - vdc_ts = num_ts - - iosystem%num_aiotasks = iosystem%num_iotasks - - call init_vdc2(iosystem%io_rank, dims, vdc_bsize, vdc_iostart, vdc_iocount, iosystem%num_aiotasks) - - if(debug) then - print *, 'rank: ', iosystem%comp_rank, ' pio_init iostart: ' , vdc_iostart, ' iocount: ', vdc_iocount - endif - - vdc_dims = dims - iodesc%start = vdc_iostart - iodesc%count = vdc_iocount -#endif - - iosize=1 - do i=1,ndims - iosize=iosize*iodesc%count(i) - end do - call mpi_allreduce(iosize, iodesc%maxiobuflen, 1, mpi_integer, mpi_max, iosystem%io_comm, ierr) - call checkmpireturn('mpi_allreduce in initdecomp',ierr) - - - iodesc%iomap%start = iosystem%io_rank*iosize - iodesc%iomap%length = iosize - iodesc%glen = glength - endif - if(DebugAsync) print*,__PIO_FILE__,__LINE__ - - if(userearranger) then - call MPI_BCAST(iosystem%num_aiotasks,1,mpi_integer,iosystem%iomaster,& - iosystem%my_comm,ierr) - call rearrange_create( iosystem,compdof,dims,ndims,iodesc) - endif - - iodesc%write%n_elemtype=0 - iodesc%write%n_words=0 - iodesc%write%elemtype = mpi_datatype_null - iodesc%write%filetype = mpi_datatype_null - - call dupiodesc2(iodesc%write,iodesc%read) - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__PIO_FILE__,__LINE__,'mem=',rss - end if -#endif -#ifdef TIMING - call t_stopf("PIO:PIO_initdecomp_dof") -#endif - - end subroutine PIO_initdecomp_dof_i8_vdc - - - - !************************************ - ! dupiodesc2 - ! - - subroutine dupiodesc2(src, dest) - use pio_types, only : io_desc2_t - type(io_desc2_t), intent(in) :: src - type(io_desc2_t), intent(out) :: dest - - dest%filetype = src%filetype - dest%elemtype = src%elemtype - dest%n_elemtype = src%n_elemtype - dest%n_words = src%n_words - end subroutine dupiodesc2 - - - - !************************************ - ! genindexedblock - ! - ! given input lenblocks, basetype, and displacement - ! create two mpi types: - ! elemtype - a single block of basetype repeated lenblocks times - ! filetype - elemtype repeated at each entry in displacement() - ! (i.e. size(displacement) entries) - ! - - - subroutine genindexedblock(lenblocks,basetype,elemtype,filetype,displace) - use pio_types, only : pio_double, pio_int, pio_real, pio_char - integer(i4), intent(in) :: lenblocks ! length of blocks - integer(i4), intent(in) :: basetype ! base mpi type - integer(i4), intent(inout) :: elemtype ! elementary mpi type - integer(i4), intent(inout) :: filetype ! file mpi type - integer(i4), intent(in) :: displace(:) ! mpi displacement in the array - - integer(i4) :: numblocks,i,ierr, prev - - logical, parameter :: check = .true. - - integer:: nints, nadds, ndtypes, comb, lbasetype - - numblocks = size(displace) - - !tcx - allow empty displace array - if (numblocks > 0) then - prev = displace(1) - do i=2,numblocks - if(prev > displace(i)) then - print *,'genindexedblock: error detected: non-monotonic increasing displace detected!' - endif - prev = displace(i) - enddo - - endif - select case(basetype) - case (PIO_double) - lbasetype=mpi_real8 - case (PIO_real ) - lbasetype=mpi_real4 - case (PIO_int) - lbasetype=mpi_integer - case (PIO_char) - lbasetype=mpi_character - case default - lbasetype=basetype - end select - - -#ifdef _MPISERIAL - ! when compiling w/mpiserial for snetcdf output, these fields are not used - elemtype=0 - filetype=0 - ! _MPISERIAL -#else - if(lenblocks<1) then - elemtype = lbasetype - filetype = lbasetype - else - elemtype = mpi_datatype_null - filetype = mpi_datatype_null - call mpi_type_contiguous(lenblocks,lbasetype,elemtype,ierr) - if(check) call checkmpireturn('genindexedblock: after call to type_contiguous: ',ierr) - call mpi_type_commit(elemtype,ierr) - if(check) call checkmpireturn('genindexedblock: after call to type_commit: ',ierr) - if(numblocks>0) then - call mpi_type_create_indexed_block(numblocks,1,displace,elemtype,filetype,ierr) - if(check) call checkmpireturn('genindexedblock: after call to type_create_indexed_block: ',ierr) - call mpi_type_commit(filetype,ierr) - if(check) call checkmpireturn('genindexedblock: after call to type_commit: ',ierr) -! if(debug) then -! call mpi_type_get_envelope(filetype, nints, nadds, ndtypes, comb, ierr) -! print *,__PIO_FILE__,__LINE__,nints,nadds,ndtypes,comb,ierr -! endif - endif - - end if - ! _MPISERIAL -#endif - - end subroutine genindexedblock - - subroutine gensubarray(gdims,mpidatatype, iodesc, iodesc2) - use pio_types, only : io_desc2_t, io_desc_t - implicit none - - integer, intent(in) :: gdims(:) - integer, intent(in) :: mpidatatype - type(IO_desc_t), intent(in) :: iodesc - type(IO_desc2_t), intent(inout) :: iodesc2 - - integer :: ndims, ierr - integer, allocatable :: lstart(:), lcount(:) - - ndims = size(gdims) -#ifdef _MPISERIAL - iodesc2%elemtype=mpidatatype - iodesc2%filetype=mpidatatype - iodesc2%n_elemtype = 0 - iodesc2%n_words = 0 -#else - if(sum(iodesc%count)>0) then - allocate(lstart(ndims),lcount(ndims)) - lstart = 0 - lcount = int(iodesc%count) - iodesc2%n_elemtype = 1 - iodesc2%n_words = product(lcount) - call mpi_type_contiguous(iodesc2%n_words,mpidatatype,iodesc2%elemtype,ierr) - call checkmpireturn('mpi_type_create_subarray in initdecomp',ierr) - call mpi_type_commit(iodesc2%elemtype,ierr) - call checkmpireturn('mpi_type_commit in initdecomp',ierr) - -#ifdef USEMPIIO -! the filetype subarray is only used for binary file io, since we don't know in initdecomp what -! kind of file we are writing we need to create it anyway (as long as we are building with mpi-io support) - lstart = int(iodesc%start)-1 - - call mpi_type_create_subarray(ndims, gdims, lcount, lstart, & - mpi_order_fortran,mpidatatype, iodesc2%filetype, ierr) - call checkmpireturn('mpi_type_create_subarray in initdecomp',ierr) - call mpi_type_commit(iodesc2%filetype,ierr) - call checkmpireturn('mpi_type_commit in initdecomp',ierr) - deallocate(lstart,lcount) - -#else - iodesc2%filetype=mpi_datatype_null -#endif - else - iodesc2%elemtype=mpidatatype - iodesc2%filetype=mpidatatype - iodesc2%n_elemtype = 0 - iodesc2%n_words = 0 - endif -#endif - - - - - end subroutine gensubarray - - - ! This function initializes the rearranger communication - ! options in iosystem_desc_t - subroutine init_iosystem_rearr_options(iosystem) - use pio_types - - type (iosystem_desc_t), intent(inout) :: iosystem ! io descriptor to initalize - -#ifdef _USE_ALLTOALLW - iosystem%rearr_opts%comm_type = PIO_rearr_comm_coll -#else - iosystem%rearr_opts%comm_type = PIO_rearr_comm_p2p -#endif - -#ifdef _NO_FLOW_CONTROL - iosystem%rearr_opts%fcd = PIO_rearr_comm_fc_2d_disable -#else - ! We ignore the following flags - ! 1) _MPISERIAL : The flow control code is never used when _MPISERIAL is set - ! 2) _USE_COMP2IO_FC/_USE_IO2COMP_FC : These flags are not currently used - ! (These were experimental flags). The user can explicitly control - ! these options (comp2io and io2comp flow control) via rearranger - ! options passed to pio_init() - iosystem%rearr_opts%fcd = PIO_rearr_comm_fc_2d_enable -#endif - - ! the following will be ignored if not p2p with flow control - iosystem%rearr_opts%comm_fc_opts_comp2io%enable_hs = DEF_P2P_HANDSHAKE - iosystem%rearr_opts%comm_fc_opts_comp2io%enable_isend = DEF_P2P_ISEND - iosystem%rearr_opts%comm_fc_opts_comp2io%max_pend_req = DEF_P2P_MAXREQ - - iosystem%rearr_opts%comm_fc_opts_io2comp%enable_hs = DEF_P2P_HANDSHAKE - iosystem%rearr_opts%comm_fc_opts_io2comp%enable_isend = DEF_P2P_ISEND - iosystem%rearr_opts%comm_fc_opts_io2comp%max_pend_req = DEF_P2P_MAXREQ - - end subroutine init_iosystem_rearr_options - - function PIO_set_rearr_opts(iosystem, comm_type, fcd,& - enable_hs_c2i, enable_isend_c2i,& - max_pend_req_c2i,& - enable_hs_i2c, enable_isend_i2c,& - max_pend_req_i2c) result(ierr) - - use pio_types - - type (iosystem_desc_t), intent(inout) :: iosystem - integer, intent(in) :: comm_type, fcd - logical, intent(in) :: enable_hs_c2i, enable_hs_i2c - logical, intent(in) :: enable_isend_c2i, enable_isend_i2c - integer, intent(in) :: max_pend_req_c2i, max_pend_req_i2c - - integer :: ierr - - ierr = PIO_NOERR - - if(max_pend_req_c2i < 0) then - if(max_pend_req_c2i /= PIO_REARR_COMM_UNLIMITED_PEND_REQ) then - call piodie(__PIO_FILE__,__LINE__,& - "Invalid max pend req (comp to io) specified") - end if - end if - if(max_pend_req_i2c < 0) then - if(max_pend_req_i2c /= PIO_REARR_COMM_UNLIMITED_PEND_REQ) then - call piodie(__PIO_FILE__,__LINE__,& - "Invalid max pend req (io to comp) specified") - end if - end if - - iosystem%rearr_opts%comm_type = comm_type - - ! Reset to defaults - iosystem%rearr_opts%comm_fc_opts_comp2io%enable_hs = .false. - iosystem%rearr_opts%comm_fc_opts_comp2io%enable_isend = .false. - iosystem%rearr_opts%comm_fc_opts_comp2io%max_pend_req = DEF_P2P_MAXREQ - - iosystem%rearr_opts%comm_fc_opts_io2comp%enable_hs = .false. - iosystem%rearr_opts%comm_fc_opts_io2comp%enable_isend = .false. - iosystem%rearr_opts%comm_fc_opts_io2comp%max_pend_req = DEF_P2P_MAXREQ - if(iosystem%rearr_opts%comm_type == PIO_REARR_COMM_COLL) then - ! Init/Reset rest of the structure to valid values - iosystem%rearr_opts%fcd = PIO_REARR_COMM_FC_2D_DISABLE - else if(iosystem%rearr_opts%comm_type == PIO_REARR_COMM_P2P) then - iosystem%rearr_opts%fcd = fcd - if(iosystem%rearr_opts%fcd == PIO_REARR_COMM_FC_2D_DISABLE) then - ! Nothing to do here - the opts are already reset to defaults above - else if(iosystem%rearr_opts%fcd == PIO_REARR_COMM_FC_1D_COMP2IO) then - iosystem%rearr_opts%comm_fc_opts_comp2io%enable_hs = enable_hs_c2i - iosystem%rearr_opts%comm_fc_opts_comp2io%enable_isend = enable_isend_c2i - iosystem%rearr_opts%comm_fc_opts_comp2io%max_pend_req = max_pend_req_c2i - else if(iosystem%rearr_opts%fcd == PIO_REARR_COMM_FC_1D_IO2COMP) then - iosystem%rearr_opts%comm_fc_opts_io2comp%enable_hs = enable_hs_i2c - iosystem%rearr_opts%comm_fc_opts_io2comp%enable_isend = enable_isend_i2c - iosystem%rearr_opts%comm_fc_opts_io2comp%max_pend_req = max_pend_req_i2c - else if(iosystem%rearr_opts%fcd == PIO_REARR_COMM_FC_2D_ENABLE) then - iosystem%rearr_opts%comm_fc_opts_comp2io%enable_hs = enable_hs_c2i - iosystem%rearr_opts%comm_fc_opts_comp2io%enable_isend = enable_isend_c2i - iosystem%rearr_opts%comm_fc_opts_comp2io%max_pend_req = max_pend_req_c2i - - iosystem%rearr_opts%comm_fc_opts_io2comp%enable_hs = enable_hs_i2c - iosystem%rearr_opts%comm_fc_opts_io2comp%enable_isend = enable_isend_i2c - iosystem%rearr_opts%comm_fc_opts_io2comp%max_pend_req = max_pend_req_i2c - else - call piodie(__PIO_FILE__,__LINE__, "Invalid flow control dir specified") - end if - else - call piodie(__PIO_FILE__,__LINE__, "Invalid comm type specified") - end if - - end function PIO_set_rearr_opts - -!> -!! @public -!! @ingroup PIO_init -!! @brief initialize the pio subsystem. -!! @details This is a collective call. Input parameters are read on comp_rank=0 -!! values on other tasks are ignored. This variation of PIO_init locates the IO tasks on a subset -!! of the compute tasks. -!! @param comp_rank mpi rank of each participating task, -!! @param comp_comm the mpi communicator which defines the collective. -!! @param num_iotasks the number of iotasks to define. -!! @param num_aggregator the mpi aggregator count -!! @param stride the stride in the mpi rank between io tasks. -!! @param rearr @copydoc PIO_rearr_method -!! @param iosystem a derived type which can be used in subsequent pio operations (defined in PIO_types). -!! @param base @em optional argument can be used to offset the first io task - default base is task 1. -!< - subroutine init_intracom(comp_rank, comp_comm, num_iotasks, num_aggregator, stride, rearr, iosystem,base, rearr_opts) - use pio_types, only : pio_internal_error, pio_rearr_none, pio_rearr_opt_t - integer(i4), intent(in) :: comp_rank - integer(i4), intent(in) :: comp_comm - integer(i4), intent(in) :: num_iotasks - integer(i4), intent(in) :: num_aggregator - integer(i4), intent(in) :: stride - integer(i4), intent(in) :: rearr - type (iosystem_desc_t), intent(out) :: iosystem ! io descriptor to initalize - - integer(i4), intent(in),optional :: base - type (pio_rearr_opt_t), intent(in), optional :: rearr_opts - - integer(i4) :: n_iotasks - integer(i4) :: length - integer(i4) :: ngseg,io_rank,i,lbase, io_comm,ierr - integer(i4) :: lstride, itmp - integer(i4), pointer :: iotmp(:),iotmp2(:) - - integer :: mpi_comm_io, intercomm - - character(len=5) :: cb_nodes - logical(log_kind), parameter :: check = .true. - logical :: async_setup = .false. - - integer(i4) :: j - - integer(i4) :: mpi_group_world, mpi_group_io, mpi_group_compute - - integer(i4) :: iotask - integer(i4) :: rearrFlag - -#ifdef TIMING - call t_startf("PIO:PIO_init") -#endif - - iosystem%error_handling = PIO_internal_error - iosystem%union_comm = comp_comm - iosystem%comp_comm = comp_comm - iosystem%comp_rank = comp_rank - iosystem%intercomm = MPI_COMM_NULL - iosystem%my_comm = comp_comm - iosystem%async_interface = .false. -#ifndef _MPISERIAL - iosystem%info = mpi_info_null -#endif - - - if(comp_comm == MPI_COMM_NULL) then - call piodie(__PIO_FILE__,__LINE__,'invalid comp_comm in pio_init') - end if - - call mpi_comm_size(comp_comm,iosystem%num_tasks,ierr) - iosystem%num_comptasks = iosystem%num_tasks - iosystem%union_rank = comp_rank - iosystem%rearr = rearr - if(present(rearr_opts)) then - iosystem%rearr_opts = rearr_opts - else - ! Set the default rearranger options - call init_iosystem_rearr_options(iosystem) - end if - - if(check) call checkmpireturn('init: after call to comm_size: ',ierr) - ! --------------------------------------- - ! need some more error checking code for - ! setting of number of io nodes - ! --------------------------------------- - - n_iotasks=num_iotasks - - if (n_iotasks>iosystem%num_tasks) then - n_iotasks=iosystem%num_tasks - if (iosystem%comp_rank==0) then - print *,'***warning, reducing io tasks to ',n_iotasks, & - ' because there are not enough processors' - endif - endif - - lbase = 0 - ! unless you are using all procs, shift off the masterproc - if(n_iotasks=0 .and. base= iosystem%num_tasks .and. lstride > 0 .and. n_iotasks > 0) then - print *,__PIO_FILE__,__LINE__,lbase,n_iotasks,lstride,iosystem%num_tasks - call piodie(__PIO_FILE__,__LINE__,'not enough procs for the stride') - endif - - iosystem%ioproc = .false. - -#ifdef BGx - - call alloc_check(iotmp,iosystem%num_tasks,'init:num_tasks') - call alloc_check(iotmp2,iosystem%num_tasks,'init:num_tasks') - !--------------------------------------------------- - ! Note: n_iotasks get overwritten (set correctly) in - ! determineiotasks - ! - ! Entry: it is the number of IO-clients per IO-node - ! Exit: is is the total number of IO-tasks - !--------------------------------------------------- - if (iosystem%rearr == PIO_rearr_none) then - rearrFlag = 0 - else - rearrFlag = 1 - endif - - ! more diagnostics (AB) - if (debug) print *,__PIO_FILE__,__LINE__,iosystem%comp_rank, 'START determineiotasks (num_tasks, n_iotasks, lstride, lbase)', & - iosystem%num_tasks, n_iotasks, lstride, lbase - - !now determine n_iotasks and iotasks - call determineiotasks(iosystem%comp_comm, n_iotasks, lbase, lstride, rearrFlag, iotask) - - !more diagnostics (AB) - if (debug) print *,__PIO_FILE__,__LINE__, 'CHECK 1 (myid, n_iotasks, lstride, lbase, iotask, num_iotasks):', & - iosystem%comp_rank, n_iotasks, lstride, lbase, iotask, iosystem%num_tasks - - iosystem%num_iotasks =n_iotasks - - ! now determine the iomaster and ioranks to populate iosystem - iotmp(:)=0 - if(iotask == 1) then - iosystem%ioproc = .true. - iotmp(comp_rank + 1) = 1 - endif - - iotmp2(:)=0 - call MPI_allreduce(iotmp, iotmp2, iosystem%num_tasks, MPI_INTEGER, MPI_SUM, comp_comm, ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - call alloc_check(iosystem%ioranks,n_iotasks,'init:n_ioranks') - j=1 - iosystem%iomaster = -1 - do i=1, iosystem%num_tasks - if(iotmp2(i) == 1) then - iosystem%ioranks(j) = i-1 - j=j+1 - if(iosystem%iomaster<0) iosystem%iomaster = i-1 - endif - enddo - call dealloc_check(iotmp) - call dealloc_check(iotmp2) - - call identity(comp_comm,iotask) - - if (debug) print *,__PIO_FILE__,__LINE__, 'CHECK 2 (myid, n_iotasks, lstride, lbase, iotask, num_iotasks) :', & - iosystem%comp_rank, n_iotasks, lstride, lbase, iotask, iosystem%num_tasks - if (debug) print *,__PIO_FILE__,__LINE__, 'IORANK CHECK for proc=:', iosystem%comp_rank, 'n_iotasks = ', & - n_iotasks, iosystem%iomaster, iosystem%ioranks(:) - - - iosystem%ioroot = iosystem%iomaster - -#else - - iosystem%num_iotasks = n_iotasks - call alloc_check(iosystem%ioranks,n_iotasks,'init:n_ioranks') - - do i=1,n_iotasks - iosystem%ioranks(i)=(lbase + (i-1)*lstride) - - if (iosystem%ioranks(i)>=iosystem%num_tasks) then - call piodie( __PIO_FILE__,__LINE__, & - 'tried to assign io processor beyond max rank ',& - iosystem%ioranks(i), & - ' num_tasks=',iosystem%num_tasks ) - endif - - if(comp_rank == iosystem%ioranks(i)) iosystem%ioproc = .true. - enddo - - - iosystem%iomaster = iosystem%ioranks(1) - iosystem%ioroot = iosystem%ioranks(1) - -#endif - - - - if(debug) print *,'init: iam: ',comp_rank,' before allocate(status): n_iotasks: ',n_iotasks - - if (iosystem%rearr == PIO_rearr_none) then - iosystem%userearranger= .false. - else - iosystem%userearranger= .true. - endif - -#ifndef _MPISERIAL - call mpi_info_create(iosystem%info,ierr) -#endif - - !--------------------------------- - ! initialize the rearranger system - !--------------------------------- - - if (iosystem%userearranger) then - call rearrange_init(iosystem) - endif - - iosystem%io_rank=-1 - call mpi_comm_group(comp_comm,mpi_group_world,ierr) - if(check) call checkmpireturn('init: after call to comm_group: ',ierr) - - call mpi_group_incl(mpi_group_world,n_iotasks,iosystem%ioranks,mpi_group_io,ierr) - if(check) call checkmpireturn('init: after call to group_range_incl: ',ierr) - - call mpi_group_free(mpi_group_world,ierr) - if(check) call checkmpireturn('init: after call to group_world: ',ierr) - - if(DebugAsync) print *,__PIO_FILE__,__LINE__,'n: ',n_iotasks, ' r: ', & - iosystem%ioranks, ' g: ',mpi_group_io - - !----------------------- - ! setup io_comm and io_rank - !----------------------- - - call mpi_comm_create(comp_comm,mpi_group_io,iosystem%io_comm,ierr) - if(check) call checkmpireturn('init: after call to comm_create: ',ierr) - - call mpi_group_free(mpi_group_io, ierr) - if(check) call checkmpireturn('init: after call to group_free: ',ierr) - - - if(iosystem%ioproc) call mpi_comm_rank(iosystem%io_comm,iosystem%io_rank,ierr) - if(check) call checkmpireturn('init: after call to comm_rank: ',ierr) - ! turn on mpi-io aggregation - !DBG print *,'PIO_init: before call to setnumagg' - itmp = num_aggregator - call mpi_bcast(itmp, 1, mpi_integer, 0, iosystem%comp_comm, ierr) - - - if(debug) print *,__LINE__,'init: iam: ',comp_rank,'io processor: ',iosystem%ioproc, 'io rank ',& - iosystem%io_rank, iosystem%iomaster, iosystem%comp_comm, iosystem%io_comm - - - if(itmp .gt. 0) then - write(cb_nodes,('(i5)')) itmp -#ifdef BGx - call PIO_set_hint(iosystem,"bgl_nodes_pset",trim(adjustl(cb_nodes))) -#else - call PIO_set_hint(iosystem,"cb_nodes",trim(adjustl(cb_nodes))) -#endif - endif - -#ifdef PIO_GPFS_HINTS - call PIO_set_hint(iosystem,"ibm_largeblock_io","true") -#endif -#ifdef PIO_LUSTRE_HINTS - call PIO_set_hint(iosystem, 'romio_ds_read','disable') - call PIO_set_hint(iosystem,'romio_ds_write','disable') -#endif - iosystem%num_aiotasks = iosystem%num_iotasks - iosystem%numost = PIO_NUM_OST - if(debug) print *,__LINE__,'init: iam: ',comp_rank,'io processor: ',iosystem%ioproc, 'io rank ',& - iosystem%io_rank, iosystem%iomaster, iosystem%comp_comm, iosystem%io_comm - -#ifdef TIMING - call t_stopf("PIO:PIO_init") -#endif - end subroutine init_intracom - - -!> -!! @public -!! @ingroup PIO_init -!! @brief Initialize the pio subsystem. -!! @details This is a collective call. Input parameters are read on comp_rank=0 -!! values on other tasks are ignored. This variation of PIO_init sets up a distinct set of tasks -!! to handle IO, these tasks do not return from this call. Instead they go to an internal loop -!! and wait to receive further instructions from the computational tasks -!! @param component_count The number of computational components to associate with this IO component -!! @param peer_comm The communicator from which all other communicator arguments are derived -!! @param comp_comms The computational communicator for each of the computational components -!! @param io_comm The io communicator -!! @param iosystem a derived type which can be used in subsequent pio operations (defined in PIO_types). -!< - subroutine init_intercom(component_count, peer_comm, comp_comms, io_comm, iosystem, rearr_opts) - use pio_types, only : pio_internal_error, pio_rearr_box - integer, intent(in) :: component_count - integer, intent(in) :: peer_comm - integer, intent(in) :: comp_comms(component_count) ! The compute communicator - integer, intent(in) :: io_comm ! The io communicator - - type (iosystem_desc_t), intent(out) :: iosystem(component_count) ! io descriptor to initalize - type (pio_rearr_opt_t), intent(in), optional :: rearr_opts - - integer :: ierr - logical :: is_inter - logical, parameter :: check=.true. - - integer :: i, j, iam, io_leader, comp_leader - integer(i4), pointer :: iotmp(:) - character(len=5) :: cb_nodes - integer :: itmp - -#ifdef TIMING - call t_startf("PIO:PIO_init") -#endif -#if defined(NO_MPI2) || defined(_MPISERIAL) - call piodie( __PIO_FILE__,__LINE__, & - 'The PIO async interface requires an MPI2 complient MPI library') -#else - do i=1,component_count - iosystem(i)%error_handling = PIO_internal_error - iosystem(i)%comp_comm = comp_comms(i) - iosystem(i)%io_comm = io_comm - iosystem(i)%info = mpi_info_null - iosystem(i)%comp_rank= -1 - iosystem(i)%io_rank = -1 - iosystem(i)%async_interface = .true. - iosystem(i)%comproot = MPI_PROC_NULL - iosystem(i)%ioroot = MPI_PROC_NULL - iosystem(i)%compmaster= MPI_PROC_NULL - iosystem(i)%iomaster = MPI_PROC_NULL - iosystem(i)%numOST = PIO_num_OST - if(present(rearr_opts)) then - iosystem(i)%rearr_opts = rearr_opts - else - ! Set the default rearranger options - call init_iosystem_rearr_options(iosystem(i)) - end if - - - if(io_comm/=MPI_COMM_NULL) then - ! Find the rank of the io leader in peer_comm - call mpi_comm_rank(io_comm,iosystem(i)%io_rank, ierr) - if(iosystem(i)%io_rank==0) then - call mpi_comm_rank(peer_comm, iam, ierr) - else - iam = -1 - end if - call mpi_allreduce(iam, io_leader, 1, mpi_integer, MPI_MAX, peer_comm, ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - ! Find the rank of the comp leader in peer_comm - iam = -1 - call mpi_allreduce(iam, comp_leader, 1, mpi_integer, MPI_MAX, peer_comm, ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - ! create the intercomm - call mpi_intercomm_create(io_comm, 0, peer_comm, comp_leader, i, iosystem(i)%intercomm, ierr) - ! create the union_comm - call mpi_intercomm_merge(iosystem(i)%intercomm, .true., iosystem(i)%union_comm, ierr) - else - ! Find the rank of the io leader in peer_comm - iam = -1 - call mpi_allreduce(iam, io_leader, 1, mpi_integer, MPI_MAX, peer_comm, ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - - ! Find the rank of the comp leader in peer_comm - iosystem(i)%comp_rank = -1 - if(comp_comms(i)/=MPI_COMM_NULL) then - call mpi_comm_rank(comp_comms(i),iosystem(i)%comp_rank, ierr) - if(iosystem(i)%comp_rank==0) then - call mpi_comm_rank(peer_comm, iam, ierr) - else - iam=-1 - end if - end if - call mpi_allreduce(iam, comp_leader, 1, mpi_integer, MPI_MAX, peer_comm, ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - - ! create the intercomm - call mpi_intercomm_create(comp_comms(i), 0, peer_comm, io_leader, i, iosystem(i)%intercomm, ierr) - ! create the union comm - call mpi_intercomm_merge(iosystem(i)%intercomm, .false., iosystem(i)%union_comm, ierr) - end if - if(Debugasync) print *,__PIO_FILE__,__LINE__,i, iosystem(i)%intercomm, iosystem(i)%union_comm - - if(iosystem(i)%union_comm /= MPI_COMM_NULL) then - call mpi_comm_rank(iosystem(i)%union_comm, iosystem(i)%union_rank, ierr) - if(check) call checkmpireturn('init: after call to comm_rank: ',ierr) - call mpi_comm_size(iosystem(i)%union_comm, iosystem(i)%num_tasks, ierr) - if(check) call checkmpireturn('init: after call to comm_size: ',ierr) - - - if(io_comm /= MPI_COMM_NULL) then - call mpi_comm_size(io_comm, iosystem(i)%num_iotasks, ierr) - if(check) call checkmpireturn('init: after call to comm_size: ',ierr) - - if(iosystem(i)%io_rank==0) then - iosystem(i)%iomaster = MPI_ROOT - iosystem(i)%ioroot = iosystem(i)%union_rank - end if - iosystem(i)%ioproc = .true. - iosystem(i)%compmaster = 0 - - call pio_msg_handler_init(io_comm, iosystem(i)%io_rank) - end if - - - if(comp_comms(i) /= MPI_COMM_NULL) then - call mpi_comm_size(comp_comms(i), iosystem(i)%num_comptasks, ierr) - if(check) call checkmpireturn('init: after call to comm_size: ',ierr) - - iosystem(i)%iomaster = 0 - iosystem(i)%ioproc = .false. - if(iosystem(i)%comp_rank==0) then - iosystem(i)%compmaster = MPI_ROOT - iosystem(i)%comproot = iosystem(i)%union_rank - end if - - end if - - iosystem(i)%userearranger = .true. - iosystem(i)%rearr = PIO_rearr_box - - if(Debugasync) print *,__PIO_FILE__,__LINE__ - - call MPI_allreduce(iosystem(i)%comproot, j, 1, MPI_INTEGER, MPI_MAX,iosystem(i)%union_comm,ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - - iosystem%comproot=j - call MPI_allreduce(iosystem(i)%ioroot, j, 1, MPI_INTEGER, MPI_MAX,iosystem(i)%union_comm,ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - - iosystem%ioroot=j - - if(Debugasync) print *,__PIO_FILE__,__LINE__, i, iosystem(i)%comproot, iosystem(i)%ioroot - - if(io_comm/=MPI_COMM_NULL) then - call mpi_bcast(iosystem(i)%num_comptasks, 1, mpi_integer, iosystem(i)%compmaster,iosystem(i)%intercomm, ierr) - - call mpi_bcast(iosystem(i)%num_iotasks, 1, mpi_integer, iosystem(i)%iomaster, iosystem(i)%intercomm, ierr) - - call alloc_check(iotmp,iosystem(i)%num_iotasks,'init:iotmp') - iotmp(:) = 0 - iotmp( iosystem(i)%io_rank+1)=iosystem(i)%union_rank - - end if - if(comp_comms(i)/=MPI_COMM_NULL) then - call mpi_bcast(iosystem(i)%num_comptasks, 1, mpi_integer, iosystem(i)%compmaster, iosystem(i)%intercomm, ierr) - - call mpi_bcast(iosystem(i)%num_iotasks, 1, mpi_integer, iosystem(i)%iomaster, iosystem(i)%intercomm, ierr) - - call alloc_check(iotmp,iosystem(i)%num_iotasks,'init:iotmp') - iotmp(:)=0 - - end if - - iosystem(i)%my_comm = iosystem(i)%intercomm - - call alloc_check(iosystem(i)%ioranks, iosystem(i)%num_iotasks,'init:n_ioranks') - if(Debugasync) print *,__PIO_FILE__,__LINE__,iotmp - call MPI_allreduce(iotmp,iosystem(i)%ioranks,iosystem(i)%num_iotasks,MPI_INTEGER,MPI_MAX,iosystem(i)%union_comm,ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - - if(Debugasync) print *,__PIO_FILE__,__LINE__,iosystem(i)%ioranks - call dealloc_check(iotmp) - - !--------------------------------- - ! initialize the rearranger system - !--------------------------------- - if (iosystem(i)%userearranger) then - call rearrange_init(iosystem(i)) - endif - end if - -#if defined(USEMPIIO) || defined(_PNETCDF) || defined(_NETCDF4) -#ifndef _MPISERIAL - call mpi_info_create(iosystem(i)%info,ierr) - ! turn on mpi-io aggregation - !DBG print *,'PIO_init: before call to setnumagg' -! itmp = num_aggregator -! call mpi_bcast(itmp, 1, mpi_integer, 0, iosystem%union_comm, ierr) -! if(itmp .gt. 0) then -! write(cb_nodes,('(i5)')) itmp -!#ifdef BGx -! call PIO_set_hint(iosystem(i),"bgl_nodes_pset",trim(adjustl(cb_nodes))) -!#else -! call PIO_set_hint(iosystem(i),"cb_nodes",trim(adjustl(cb_nodes))) -!#endif -! endif - -#ifdef PIO_GPFS_HINTS - call PIO_set_hint(iosystem(i),"ibm_largeblock_io","true") -#endif -#ifdef PIO_LUSTRE_HINTS - call PIO_set_hint(iosystem(i), 'romio_ds_read','disable') - call PIO_set_hint(iosystem(i),'romio_ds_write','disable') -#endif -#endif -#endif - end do - - if(DebugAsync) print*,__PIO_FILE__,__LINE__, iosystem(1)%ioranks - - - iosystem%num_aiotasks = iosystem%num_iotasks - iosystem%numost = PIO_NUM_OST - - ! This routine does not return - if(io_comm /= MPI_COMM_NULL) call pio_msg_handler(component_count,iosystem) - - if(DebugAsync) print*,__PIO_FILE__,__LINE__, iosystem(1)%ioranks -#ifdef TIMING - call t_stopf("PIO:PIO_init") -#endif -#endif - end subroutine init_intercom - -!> -!! @public -!! @defgroup PIO_recommend_iotasks PIO_recommend_iotasks -!! @brief Recommend a subset of tasks in comm to use as IO tasks -!! @details This subroutine will give PIO's best recommendation for the number and -!! location of iotasks for a given system there is no requirement to follow this recommendation. -!! Using the recommendation requires that PIO_BOX_RERRANGE be used -!! @param A communicator of mpi tasks to choose from -!! @param miniotasks \em optional The minimum number of IO tasks the caller desires -!! @param maxiotasks \em optional The maximum number of IO tasks the caller desires -!! @param iotask if true pio recommends that this task be used as an iotask -!< - - subroutine pio_recommend_iotasks(comm, ioproc, numiotasks, miniotasks, maxiotasks ) - integer, intent(in) :: comm - logical, intent(out) :: ioproc - integer, intent(out) :: numiotasks - integer, optional, intent(in) :: miniotasks, maxiotasks - - integer :: num_tasks, ierr, iotask, iotasks, iam - - integer(i4), pointer :: iotmp(:),iotmp2(:) - - call mpi_comm_size(comm,num_tasks,ierr) - call mpi_comm_rank(comm,iam,ierr) - -#ifdef BGx - call alloc_check(iotmp,num_tasks,'init:num_tasks') - call alloc_check(iotmp2,num_tasks,'init:num_tasks') - !--------------------------------------------------- - ! Note for Blue Gene n_iotasks get overwritten in - ! determineiotasks - ! - ! Entry: it is the number of IO-clients per IO-node - ! Exit: is is the total number of IO-tasks - !--------------------------------------------------- - - numiotasks=-(miniotasks+maxiotasks)/2 - call determineiotasks(comm,numiotasks,1,0,1,iotask) - - iotmp(:)=0 - if(iotask==1) then - ioproc = .true. - iotmp(iam+1) = 1 - endif - iotmp2(:)=0 - call MPI_allreduce(iotmp,iotmp2,num_tasks,MPI_INTEGER,MPI_SUM,comm,ierr) - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierr,__PIO_FILE__,__LINE__) - - numiotasks=SUM(iotmp2) - - call dealloc_check(iotmp) - call dealloc_check(iotmp2) - - call identity(comm,iotask) -#endif - - - end subroutine pio_recommend_iotasks - - -!> -!! @public -!! @defgroup PIO_set_hint PIO_set_hint -!! @brief set file system hints using mpi_info_set -!! @details This is a collective call which expects the following parameters: -!! @param iosystem @copydoc io_desc_t -!! @param hint the string name of the hint to define -!! @param hintval the string value to set the hint to -!! @retval ierr @copydoc error_return -!< - subroutine PIO_set_hint(iosystem, hint, hintval) - type (iosystem_desc_t), intent(inout) :: iosystem ! io descriptor to initalize - character(len=*), intent(in) :: hint, hintval - - integer :: ierr -#if defined(USEMPIIO) || defined(_PNETCDF) || defined(_NETCDF4) -#ifndef _MPISERIAL - if(iosystem%ioproc .and. (iosystem%info /= MPI_INFO_NULL)) then - if(iosystem%io_rank==0 .or. Debug) print *,'Setting mpi info: ',hint,'=',hintval - call mpi_info_set(iosystem%info,hint,hintval,ierr) - call checkmpireturn('PIO_set_hint',ierr) - else - iosystem%info=mpi_info_null - end if -#endif -#endif - end subroutine PIO_set_hint - - -!> -!! @public -!! @ingroup PIO_finalize -!! @brief finalizes the pio subsystem. -!! @details This is a collective call which expects the following parameters -!! @param iosystem : @copydoc io_desc_t -!! @retval ierr @copydoc error_return -!< - subroutine finalize(iosystem,ierr) - type (iosystem_desc_t), intent(inout) :: iosystem - integer(i4), intent(out) :: ierr - - integer :: msg - - if(iosystem%async_interface .and. iosystem%comp_rank==0) then - !print *,'IAM: ',iosystem%comp_rank, ' ASYNC in finalize' - msg = PIO_MSG_EXIT - call mpi_send(msg, 1, mpi_integer, iosystem%ioroot, 1, iosystem%union_comm, ierr) - end if - If (associated (iosystem%ioranks)) deallocate (iosystem%ioranks) -#ifndef _MPISERIAL - if(iosystem%info .ne. mpi_info_null) then - call mpi_info_free(iosystem%info,ierr) - iosystem%info=mpi_info_null - !print *,'IAM: ',iosystem%comp_rank, ' finalize (1) error = ', ierr - endif - if(iosystem%io_comm .ne. mpi_comm_null .and. .not. iosystem%async_interface) then - call mpi_comm_free(iosystem%io_comm,ierr) - iosystem%io_comm=mpi_comm_null - !print *,'IAM: ',iosystem%comp_rank, ' finalize (2) error = ', ierr - endif -#endif - ierr = 0 - - end subroutine finalize - - -!> -!! @public -!! @ingroup PIO_getnumiotasks -!! @brief This returns the number of IO-tasks that PIO is using -!! @param iosystem : a defined pio system descriptor, see PIO_types -!! @param numiotasks : the number of IO-tasks -!< - subroutine getnumiotasks(iosystem,numiotasks) - type (iosystem_desc_t), intent(in) :: iosystem - integer(i4), intent(out) :: numiotasks - numiotasks = iosystem%num_iotasks - end subroutine getnumiotasks - - - - - !============================================= - ! dupiodesc: - ! - ! duplicate the io descriptor - ! - !============================================= - - - ! rml: possible problem here wrt dubbing the box rearranger - ! data, as well as maybe the mct rearranger??? - -!> -!! @public -!! @ingroup PIO_dupiodesc -!! @brief duplicates an existing io descriptor -!! @details -!! @param src : an io description handle returned from @ref PIO_initdecomp (see PIO_types) -!! @param dest : the newly created io descriptor with the same characteristcs as src. -!< - subroutine dupiodesc(src,dest) - - integer :: n - type (io_desc_t), intent(in) :: src - type (io_desc_t), intent(inout) :: dest - - - dest%glen = src%glen - if(associated(src%start)) then - n = size(src%start) - allocate(dest%start(n)) - dest%start(:) = src%start(:) - endif - - if(associated(src%count)) then - n = size(src%count) - allocate(dest%count(n)) - dest%count(:) = src%count(:) - endif - - !dbg print *,'before dupiodesc2' - call dupiodesc2(src%read, dest%read) - call dupiodesc2(src%write, dest%write) - !dbg print *,'after dupiodesc2' - - dest%basetype = src%basetype - - if(associated(src%dest_ioproc)) then - n = size(src%dest_ioproc) - allocate(dest%dest_ioproc(n)) - dest%dest_ioproc(:) = src%dest_ioproc(:) - endif - - if(associated(src%dest_ioindex)) then - n = size(src%dest_ioindex) - allocate(dest%dest_ioindex(n)) - dest%dest_ioindex(:) = src%dest_ioindex(:) - endif - - if(associated(src%rfrom)) then - n = size(src%rfrom) - allocate(dest%rfrom(n)) - dest%rfrom(:) = src%rfrom(:) - endif - - if(associated(src%rtype)) then - n = size(src%rtype) - allocate(dest%rtype(n)) - dest%rtype(:) = src%rtype(:) - endif - - if(associated(src%scount)) then - n = size(src%scount) - allocate(dest%scount(n)) - dest%scount(:) = src%scount(:) - endif - - if(associated(src%stype)) then - n = size(src%stype) - allocate(dest%stype(n)) - dest%stype(:) = src%stype(:) - endif - - call copy_decompmap(src%iomap,dest%iomap) - call copy_decompmap(src%compmap,dest%compmap) - - dest%compsize = src%compsize - - - end subroutine dupiodesc - - !============================================= - ! copy_decompmap: - ! - ! copy decompmap_t data structures - ! - !============================================= - - subroutine copy_decompmap(src,dest) - use pio_types, only : decompmap_t - type (decompmap_t), intent(in) :: src - type (decompmap_t), intent(inout) :: dest - - - dest%start = src%start - dest%length = src%length - - end subroutine copy_decompmap - -!> -!! @public -!! @ingroup PIO_setiotype -!! @brief sets the desired type of io to perform -!! @details -!! @param file @copydoc file_desc_t -!! @param iotype : @copydoc PIO_iotype -!! @param rearr : @copydoc PIO_rearr_method -!! @param rearr_opts : @copydoc PIO_rearr_options -!< - subroutine setiotype(file,iotype,rearr,rearr_opts) - - use pio_types - - type (file_desc_t), intent(inout) :: file - integer(i4), intent(in) :: iotype - integer(i4), intent(in) :: rearr - type (PIO_rearr_opt_t), intent(in), optional :: rearr_opts - - file%iotype = iotype - ! FIXME: Ideally the file_desc_t should contain a pointer to - ! iodesc_t and the rearranger and its options should be set - ! there - so that we can control the rearranger to be used - ! on a per file basis. The current design only contains - ! a pointer to the iosystem_desc_t in the file_desc_t, - ! so each call results in setting the global rearr not - ! per file rearranger - file%iosystem%rearr = rearr - - if(present(rearr_opts)) then - file%iosystem%rearr_opts = rearr_opts - end if - - end subroutine setiotype - -!> -!! @public -!! @ingroup PIO_numtoread -!! @brief returns the global number of words to read for this io descriptor -!! @details -!! @param iodesc : @copydoc io_desc_t -!! @retval num : the number of words to read -!< - integer function numtoread(iodesc) result(num) - - type (io_desc_t) :: iodesc - - num = iodesc%read%n_words - - end function numtoread - -!> -!! @public -!! @ingroup PIO_numtowrite -!! @brief returns the global number of words to write for this io descriptor -!! @details -!! @param iodesc : @copydoc io_desc_t -!< - integer function numtowrite(iodesc) result(num) - - type (io_desc_t) :: iodesc - - num = iodesc%write%n_words - - end function numtowrite - -!> -!! @public -!! @ingroup PIO_createfile -!! @brief create a file using pio -!! @details Input parameters are read on comp task 0 and ignored elsewhere -!! @param iosystem : a defined pio system descriptor created by a call to @ref PIO_init (see PIO_types) -!! @param file : the returned file descriptor -!! @param iotype : @copydoc PIO_iotype -!! @param fname : the name of the file to open -!! @param amode_in : the creation mode flag. the following flags are available: PIO_clobber, PIO_noclobber. -!! @retval ierr @copydoc error_return -!< - integer function createfile(iosystem, file,iotype, fname, amode_in) result(ierr) -#ifdef _COMPRESSION - use pio_types, only : pio_clobber, pio_noclobber, pio_iotype_vdc2 -#endif - type (iosystem_desc_t), intent(inout), target :: iosystem - type (file_desc_t), intent(out) :: file - integer, intent(in) :: iotype - character(len=*), intent(in) :: fname - integer, optional, intent(in) :: amode_in - - ! =================== - ! local variables - ! =================== - logical :: iscallback - integer :: amode - integer :: msg - logical, parameter :: check = .true. - character(len=9) :: rd_buffer - character(len=4) :: stripestr - character(len=9) :: stripestr2 - character(len=:), allocatable :: myfname - integer :: namelen -#ifdef _COMPRESSION - integer :: restart - - - -#endif -#ifdef TIMING - call t_startf("PIO:PIO_createfile") -#endif - - if(debug.or.debugasync) print *,'createfile: {comp,io}_rank:',iosystem%comp_rank,iosystem%io_rank, & - 'io proc: ',iosystem%ioproc,iosystem%async_interface, iotype - ierr=PIO_noerr - - - if(present(amode_in)) then - amode = amode_in - else - amode = 0 - end if - - file%iotype = iotype - - - - if(.not. (iosystem%async_interface .and. iosystem%ioproc)) then - namelen = len_trim(fname) - allocate(character(len=namelen) :: myfname) - - myfname = trim(fname) - - call mpi_bcast(amode, 1, MPI_INTEGER, 0, iosystem%comp_comm, ierr) - call mpi_bcast(file%iotype, 1, MPI_INTEGER, 0, iosystem%comp_comm, ierr) - - call mpi_bcast(myfname, namelen, mpi_character, 0, iosystem%comp_comm, ierr) - end if - - file%iosystem => iosystem - - !-------------------------------- - ! set some iotype specific stuff - !-------------------------------- - -#if defined(USEMPIIO) - if ( (file%iotype==pio_iotype_pbinary .or. file%iotype==pio_iotype_direct_pbinary) & - .and. (.not. iosystem%userearranger) ) then - write(rd_buffer,('(i9)')) 16*1024*1024 - call PIO_set_hint(iosystem, "cb_buffer_size",trim(adjustl(rd_buffer))) - endif -#endif -#ifdef PIO_LUSTRE_HINTS - write(stripestr,('(i3)')) min(iosystem%num_iotasks,iosystem%numOST) - call PIO_set_hint(iosystem,"striping_factor",trim(adjustl(stripestr))) - write(stripestr2,('(i9)')) 1024*1024 - call PIO_set_hint(iosystem,"striping_unit",trim(adjustl(stripestr2))) -#endif - -#ifndef _NETCDF4 - if(file%iotype==pio_iotype_netcdf4p .or. file%iotype==pio_iotype_netcdf4c) then - print *, 'WARNING: PIO was not built with NETCDF 4 support changing iotype to netcdf' - file%iotype = pio_iotype_netcdf - end if -#endif - if(iosystem%async_interface .and. .not. iosystem%ioproc) then - msg = PIO_MSG_CREATE_FILE - if(iosystem%comp_rank==0) then - call mpi_send(msg, 1, mpi_integer, iosystem%ioroot, 1, iosystem%union_comm, ierr) - end if - call mpi_bcast(namelen, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(myfname, namelen, mpi_character, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(iotype, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(amode, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - - end if - select case(iotype) - case(pio_iotype_pbinary, pio_iotype_direct_pbinary) - if(present(amode_in) .and. iosystem%io_rank==0) then - print *, 'warning, the mode argument is currently ignored for binary file operations' - end if - ierr = create_mpiio(file,myfname) - case( pio_iotype_pnetcdf, pio_iotype_netcdf, pio_iotype_netcdf4p, pio_iotype_netcdf4c) - if(debug) print *,__PIO_FILE__,__LINE__,' open: ', trim(myfname), amode - ierr = create_nf(file,trim(myfname), amode) - if(debug .and. iosystem%io_rank==0)print *,__PIO_FILE__,__LINE__,' open: ', myfname, file%fh, ierr - case(pio_iotype_binary) - print *,'createfile: io type not supported' -#ifdef _COMPRESSION - case(pio_iotype_vdc2) - restart=0 - if(iosystem%io_rank==0) then - restart = 1 - call createvdf(vdc_dims, vdc_bsize, vdc_ts, restart , F_C_String_dup(trim(fname)) ) - else if(iosystem%io_rank>0) then - call createvdf(vdc_dims, vdc_bsize, vdc_ts, restart , F_C_String_dup(trim(fname))) - endif -#endif - end select - if(ierr==0) file%file_is_open=.true. - - if(debug .and. file%iosystem%io_rank==0) print *,__PIO_FILE__,__LINE__,'open: ',file%fh, myfname - deallocate(myfname) -#ifdef TIMING - call t_stopf("PIO:PIO_createfile") -#endif - end function createfile -!> -!! @public -!! @defgroup PIO_setnum_OST PIO_setnum_OST -!! @brief Sets the default number of Lustre Object Storage Targets (OST) -!! @details When PIO is used on a Lustre filesystem, this subroutine sets the -!! default number Object Storage targets (OST) to use. PIO -!! will use min(num_aiotasks,numOST) where num_aiotasks the the -!! actual number of active iotasks -!! @param iosystem : a defined pio system descriptor created by a call to @ref PIO_init (see PIO_types) -!! @param numOST : The number of OST to use by default -!< - subroutine PIO_setnum_OST(iosystem,numOST) - type (iosystem_desc_t), intent(inout), target :: iosystem - integer(i4) :: numOST - iosystem%numOST = numOST - end subroutine PIO_setnum_OST -!> -!! @public -!! @defgroup PIO_getnum_OST PIO_getnum_OST -!! @brief Sets the default number of Lustre Object Storage Targets (OST) -!! @details When PIO is used on a Lustre filesystem, this subroutine gets the -!! default number Object Storage targets (OST) to use. -!! @param iosystem : a defined pio system descriptor created by a call to @ref PIO_init (see PIO_types) -!! @retval numOST : The number of OST to use. -!< - integer function PIO_getnum_OST(iosystem) result(numOST) - type (iosystem_desc_t), intent(inout), target :: iosystem - numOST = iosystem%numOST - end function PIO_getnum_OST -!> -!! @public -!! @ingroup PIO_openfile -!! @brief open an existing file using pio -!! @details Input parameters are read on comp task 0 and ignored elsewhere. -!! @param iosystem : a defined pio system descriptor created by a call to @ref PIO_init (see PIO_types) -!! @param file : the returned file descriptor -!! @param iotype : @copybrief PIO_iotype -!! @param fname : the name of the file to open -!! @param mode : a zero value (or PIO_nowrite) specifies the default -!! behavior: open the dataset with read-only access, buffering and -!! caching accesses for efficiency otherwise, the creation mode is -!! PIO_write. setting the PIO_write flag opens the dataset with -!! read-write access. ("writing" means any kind of change to the dataset, -!! including appending or changing data, adding or renaming dimensions, -!! variables, and attributes, or deleting attributes.) -!! @retval ierr @copydoc error_return -!< - integer function PIO_openfile(iosystem, file, iotype, fname,mode, CheckMPI) result(ierr) -#ifdef _COMPRESSION - use pio_types, only : pio_iotype_vdc2 -#endif - type (iosystem_desc_t), intent(inout), target :: iosystem - type (file_desc_t), intent(out) :: file - integer, intent(in) :: iotype - character(len=*), intent(in) :: fname - integer, optional, intent(in) :: mode - logical, optional, intent(in) :: CheckMPI - ! =================== - ! local variables - ! ================ - integer :: amode, msg - logical, parameter :: check = .true. - character(len=9) :: rd_buffer - character(len=:), allocatable :: myfname - integer :: namelen -#ifdef TIMING - call t_startf("PIO:PIO_openfile") -#endif - - - - if(Debug .or. Debugasync) print *,'PIO_openfile: {comp,io}_rank:',iosystem%comp_rank,iosystem%io_rank,& - 'io proc: ',iosystem%ioproc - ierr=PIO_noerr - - file%iosystem => iosystem - - if(present(mode)) then - amode = mode - else - amode = 0 - end if - !-------------------------------- - ! set some iotype specific stuff - !-------------------------------- - - if(iosystem%num_iotasks.eq.1.and.iotype.eq.pio_iotype_pnetcdf) then -#if defined(_NETCDF) - file%iotype=pio_iotype_netcdf -#else - file%iotype = iotype -#endif - else - file%iotype = iotype - end if - -#if defined(USEMPIIO) - if ( (file%iotype==pio_iotype_pbinary .or. file%iotype==pio_iotype_direct_pbinary) & - .and. (.not. iosystem%userearranger) ) then - write(rd_buffer,('(i9)')) 16*1024*1024 - call PIO_set_hint(iosystem, "cb_buffer_size",trim(adjustl(rd_buffer))) - endif -#endif -#ifndef _NETCDF4 - if(file%iotype==pio_iotype_netcdf4p .or. file%iotype==pio_iotype_netcdf4c) then - print *, 'WARNING: PIO was not built with NETCDF 4 support changing iotype to netcdf' - file%iotype = pio_iotype_netcdf - end if -#endif - if(.not. (iosystem%ioproc .and. iosystem%async_interface)) then - namelen = len_trim(fname) - allocate(character(len=namelen) :: myfname) - - myfname = trim(fname) - - call mpi_bcast(amode, 1, MPI_INTEGER, 0, iosystem%comp_comm, ierr) - call mpi_bcast(file%iotype, 1, MPI_INTEGER, 0, iosystem%comp_comm, ierr) - - - call mpi_bcast(myfname, namelen, mpi_character, 0, iosystem%comp_comm, ierr) - end if - - if(iosystem%async_interface .and. .not. iosystem%ioproc) then - msg = PIO_MSG_OPEN_FILE - if(iosystem%comp_rank==0) then - call mpi_send(msg, 1, mpi_integer, iosystem%ioroot, 1, iosystem%union_comm, ierr) - end if - - call mpi_bcast(namelen, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(myfname, namelen, mpi_character, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(iotype, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - call mpi_bcast(amode, 1, mpi_integer, iosystem%compmaster, iosystem%intercomm, ierr) - end if - - select case(iotype) - case(pio_iotype_pbinary, pio_iotype_direct_pbinary) - if(amode /=0) then - print *, 'warning, the mode argument is currently ignored for binary file operations' - end if - if (present(CheckMPI)) then - ierr = open_mpiio(file,myfname, CheckMPI) - else - ierr = open_mpiio(file,myfname) - end if - case( pio_iotype_pnetcdf, pio_iotype_netcdf, pio_iotype_netcdf4c, pio_iotype_netcdf4p) - ierr = open_nf(file,myfname,amode) - if(debug .and. iosystem%io_rank==0)print *,__PIO_FILE__,__LINE__,' open: ', myfname, file%fh - case(pio_iotype_binary) ! appears to be a no-op -#ifdef _COMPRESSION - case(pio_iotype_vdc2) !equivalent to calling create def without clobbering the file, arguments dont matter - if(iosystem%io_rank>=0) then - call createvdf(vdc_dims, vdc_bsize, vdc_ts, 0 , F_C_STRING_DUP(trim(myfname))) - end if -#endif - end select - if(Debug .and. file%iosystem%io_rank==0) print *,__PIO_FILE__,__LINE__,'open: ',file%fh, myfname - if(ierr==0) file%file_is_open=.true. - deallocate(myfname) -#ifdef TIMING - call t_stopf("PIO:PIO_openfile") -#endif - end function PIO_openfile - -!> -!! @public -!! @ingroup PIO_syncfile -!! @brief synchronizing a file forces all writes to complete before the subroutine returns. -!! -!! @param file @copydoc file_desc_t -!< - subroutine syncfile(file) - use piodarray, only : darray_write_complete - implicit none - type (file_desc_t), target :: file - integer :: ierr, msg - type(iosystem_desc_t), pointer :: ios - - - ios => file%iosystem - if(ios%async_interface .and. .not. ios%ioproc) then - msg = PIO_MSG_SYNC_FILE - if(ios%comp_rank==0) then - call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - end if - - call mpi_bcast(file%fh, 1, mpi_integer, ios%compmaster, ios%intercomm, ierr) - end if - - select case(file%iotype) - case( pio_iotype_pnetcdf, pio_iotype_netcdf, pio_iotype_netcdf4c,pio_iotype_netcdf4p) - call darray_write_complete(file) - ierr = sync_nf(file) - case(pio_iotype_pbinary, pio_iotype_direct_pbinary) - case(pio_iotype_binary) - end select - end subroutine syncfile -!> -!! @public -!! @ingroup PIO_freedecomp -!! @brief free all allocated storage associated with this decomposition -!! @details -!! @param ios : a defined pio system descriptor created by call to @ref PIO_init (see PIO_types) -!! @param iodesc @copydoc io_desc_t -!< - subroutine freedecomp_ios(ios,iodesc) - implicit none - type (iosystem_desc_t) :: ios - type (io_desc_t) :: iodesc - integer :: ierr, msg - - if(ios%async_interface .and. .not. ios%ioproc) then - msg = PIO_MSG_FREEDECOMP - if(ios%comp_rank==0) then - call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - end if - call MPI_Barrier(ios%comp_comm,ierr) - call mpi_bcast(iodesc%async_id,1, mpi_integer, ios%compmaster,ios%intercomm, ierr) - end if - call MPI_Barrier(ios%union_comm,ierr) - - iodesc%async_id=-1 - call rearrange_free(ios,iodesc) - -#ifndef _MPISERIAL - if(ios%ioproc) then -! if(debug) print *,__PIO_FILE__,__LINE__,iodesc%write%n_elemtype,iodesc%write%n_words, & -! iodesc%write%elemtype,iodesc%write%filetype - - if((iodesc%read%filetype .ne. mpi_datatype_null) & - .and. (iodesc%read%filetype .ne. iodesc%write%filetype) .and. & - iodesc%read%n_words>0) then - call mpi_type_free(iodesc%read%filetype,ierr) - call checkmpireturn('freedecomp mpi_type_free: ',ierr) - call mpi_type_free(iodesc%read%elemtype,ierr) - call checkmpireturn('freedecomp mpi_type_free: ',ierr) - iodesc%read%filetype=mpi_datatype_null - endif - if(iodesc%write%filetype .ne. mpi_datatype_null .and. & - iodesc%write%n_words>0) then - call mpi_type_free(iodesc%write%filetype,ierr) - call checkmpireturn('freedecomp mpi_type_free: ',ierr) - call mpi_type_free(iodesc%write%elemtype,ierr) - call checkmpireturn('freedecomp mpi_type_free: ',ierr) - iodesc%write%filetype=mpi_datatype_null - endif - - end if -#endif - - if(associated(iodesc%start)) then - call dealloc_check(iodesc%start,'iodesc%start') - nullify(iodesc%start) - end if - - if(associated(iodesc%count)) then - call dealloc_check(iodesc%count,'iodesc%count') - nullify(iodesc%count) - end if - end subroutine freedecomp_ios -!> -!! @public -!! @ingroup PIO_freedecomp -!! @brief free all allocated storage associated with this decomposition -!! @details -!! @param file @copydoc file_desc_t -!! @param iodesc : @copydoc io_desc_t -!! @retval ierr @copydoc error_return -!< - subroutine freedecomp_file(file,iodesc) - implicit none - type (file_desc_t) :: file - type (io_desc_t) :: iodesc - - call freedecomp_ios(file%iosystem, iodesc) - - end subroutine freedecomp_file - -!> -!! @public -!! @ingroup PIO_closefile -!! @brief close a disk file -!! @details -!! @param file @copydoc file_desc_t -!< - subroutine closefile(file) - use piodarray, only : darray_write_complete - type (file_desc_t),intent(inout) :: file - - integer :: ierr, msg - integer :: iotype - logical, parameter :: check = .true. - -#ifdef TIMING - call t_startf("PIO:PIO_closefile") -#endif - if(file%iosystem%async_interface .and. .not. file%iosystem%ioproc) then - msg = PIO_MSG_CLOSE_FILE - if(file%iosystem%comp_rank==0) then - call mpi_send(msg, 1, mpi_integer, file%iosystem%ioroot, 1, file%iosystem%union_comm, ierr) - end if - call mpi_bcast(file%fh, 1, mpi_integer, file%iosystem%compmaster, file%iosystem%intercomm, ierr) - end if - - if(debug .and. file%iosystem%io_rank==0) & - print *,__PIO_FILE__,__LINE__,'close: ',file%fh - iotype = file%iotype - select case(iotype) - case(pio_iotype_pbinary, pio_iotype_direct_pbinary) - ierr = close_mpiio(file) - case( pio_iotype_pnetcdf, pio_iotype_netcdf, pio_iotype_netcdf4p, pio_iotype_netcdf4c) - call darray_write_complete(file) - ierr = close_nf(file) - case(pio_iotype_binary) - print *,'closefile: io type not supported' - end select - if(ierr==0) file%file_is_open=.false. - -#ifdef TIMING - call t_stopf("PIO:PIO_closefile") -#endif - - - end subroutine closefile - - - !****************************** - ! read_ascii - ! - - subroutine read_ascii(rank,iobuf,size) - - integer, intent(in) :: rank - real (r8), dimension(:) :: iobuf - integer, intent(in) :: size - - character(len=80) filename - integer lun - integer ios - integer i - - lun=10+rank - write(filename,"('fort.',i2)" ) lun - write(6,*) 'filename is:', filename - - open(lun,file=filename,status='old',iostat=ios) - if (ios /= 0) then - write(6,*) rank,': could not open ascii file: ',filename - endif - - do i=1,size - read(unit=lun,fmt=*,iostat=ios) iobuf(i) - if (ios /= 0) then - write (6,*) rank,': error reading item ',i,' of ',size -#ifndef CPRNAG - call abort -#else - stop -#endif - endif - - end do - - close(lun) - - end subroutine read_ascii - - integer function pio_iotask_rank(pio_subsystem) - type(iosystem_desc_t), intent(in) :: pio_subsystem - - pio_iotask_rank = pio_subsystem%io_rank - - - end function pio_iotask_rank - - - -end module piolib_mod - - !||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/src/externals/pio1/pio/pionfatt_mod.F90.in b/src/externals/pio1/pio/pionfatt_mod.F90.in deleted file mode 100644 index b6432e0a0bc..00000000000 --- a/src/externals/pio1/pio/pionfatt_mod.F90.in +++ /dev/null @@ -1,524 +0,0 @@ -#define __PIO_FILE__ "pionfatt_mod.F90" -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief NetCDF attribute interface to PIO -!< -module pionfatt_mod - use pio_kinds, only : r4, r8, i4 - use pio_types, only : iotype_netcdf, iotype_pnetcdf, pio_noerr - use pio_types, only : pio_iotype_netcdf4p, pio_iotype_netcdf4c - use pio_types, only : file_desc_t, var_desc_t, iosystem_desc_t - use pio_kinds, only : pio_offset - use pio_support, only : piodie, checkmpireturn, debug, debugasync - use pio_utils, only : check_netcdf, bad_iotype - -#ifdef _NETCDF - use netcdf ! _EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -# include /* _EXTERNAL */ -#endif -#endif - include 'mpif.h' ! _EXTERNAL - - -!> -!! @private -!< - public :: put_att - interface put_att - module procedure put_att_{TYPE}, put_att_desc_{TYPE} - ! TYPE real,int,double - module procedure put_att_1d_{TYPE}, put_att_desc_1d_{TYPE} - end interface - - -!> -!! @private -!< - public :: get_att - interface get_att - module procedure get_att_{TYPE}, get_att_desc_{TYPE} - ! TYPE real,int,double - module procedure get_att_1d_{TYPE}, get_att_desc_1d_{TYPE} - end interface - -!> -!! @public -!! @defgroup PIO_put_att PIO_put_att -!! @brief Writes an netcdf attribute to a file -!< -!> -!! @public -!! @defgroup PIO_get_att PIO_get_att -!! @brief Reads an netcdf attribute from a file -!< - - private :: modName - character(len=*), parameter :: modName='pionfatt_mod' - -contains - -!> -!! @public -!! @ingroup PIO_put_att -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param name : name of the attribute to add -!! @param value : The value for the netcdf attribute -!! @retval ierr @copydoc error_return -!< - integer function put_att_{TYPE} (File, varid, name, value) result(ierr) - use pio_msg_mod, only : pio_msg_putatt - type (File_desc_t), intent(inout) , target :: File - integer, intent(in) :: varid - character(len=*), intent(in) :: name - {VTYPE}, intent(in) :: value - - type(iosystem_desc_t), pointer :: ios -#if ({ITYPE} != TYPETEXT) -#ifdef DEBUG - {VTYPE} :: chkval -#endif -#endif - !------------------ - ! Local variables - !------------------ - character(len=*), parameter :: subName=modName//'::put_att_{TYPE}' - integer :: iotype, mpierr, msg, itype - integer :: clen=1, nlen - - iotype = File%iotype - ierr=PIO_noerr - -#if ({ITYPE} == TYPETEXT) - clen = len_trim(value) -#else - clen = 1 -#endif - ios => file%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTATT - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - nlen=len_trim(name) - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) -#if ({ITYPE} == TYPETEXT) - call MPI_BCAST(clen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif - end if - - if(ios%async_interface) then - call MPI_BCAST(value, clen, {MPITYPE}, ios%compmaster, ios%my_comm, mpierr) - end if - - if(Ios%IOproc) then - select case(iotype) -#ifdef _PNETCDF - case(iotype_pnetcdf) -#if ({ITYPE} == TYPETEXT) - ierr= nfmpi_put_att_text (File%fh,varid,name,int(clen,kind=PIO_OFFSET),value) -#else - -#ifdef DEBUG - print *, __PIO_FILE__,__LINE__,value - call MPI_ALLREDUCE(value, chkval, 1, {MPITYPE}, MPI_MAX ,Ios%io_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - if(chkval /= value) then - print *,__PIO_FILE__,__LINE__,chkval,value, mpierr - call piodie(__PIO_FILE__,__LINE__,'attributes do not match') - end if -#endif -#undef DEBUG -#ifdef USE_PNETCDF_MOD - ierr =nf90mpi_put_att (File%fh,varid,name,value) -#else - ierr= nfmpi_put_att_{TYPE} (File%fh,varid,name, nf_{TYPE} , int(clen,kind=PIO_OFFSET),value) -#endif -#endif -#endif - -#ifdef _NETCDF -! case(iotype_netcdf,PIO_iotype_netcdf4c,PIO_iotype_netcdf4p) - case(iotype_netcdf,PIO_iotype_netcdf4c) - if (Ios%io_rank==0) then - if(debug) print *,__PIO_FILE__,__LINE__,name,value - ierr=nf90_put_att(File%fh,varid,name,value) - endif - case(PIO_iotype_netcdf4p) - ierr=nf90_put_att(File%fh,varid,name,value) -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - end function put_att_{TYPE} - -!pl The next line is needed by genf90.pl, do not remove it. -! TYPE real,double,int -!> -!! @public -!! @ingroup PIO_put_att -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param name : name of the attribute to add -!! @param value : The value for the netcdf attribute -!! @retval ierr @copydoc error_return -!< - integer function put_att_1d_{TYPE} (File, varid, name, value) result(ierr) - use pio_msg_mod, only : pio_msg_putatt_1D - type (File_desc_t), intent(inout) , target :: File - integer, intent(in) :: varid - character(len=*), intent(in) :: name - {VTYPE}, intent(in) :: value(:) - type(iosystem_desc_t), pointer :: ios - -#ifdef DEBUG - {VTYPE} :: chkval -#endif - !------------------ - ! Local variables - !------------------ - - character(len=*), parameter :: subName=modName//'::put_att_1d_{TYPE}' - integer :: iotype, mpierr, msg - integer :: clen, itype, nlen - - iotype = File%iotype - ierr=PIO_noerr - clen = size(value) - - ios => file%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTATT_1D - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - nlen = len(name) - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(clen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - call mpi_barrier(ios%union_comm, mpierr) - - if(ios%async_interface) then - call MPI_BCAST(value, clen, {MPITYPE}, ios%compmaster, ios%my_comm, mpierr) - end if - - if(Debug.or.DebugAsync) print *,__PIO_FILE__,__LINE__,clen,value - - - if(Ios%IOproc) then - select case(iotype) -#ifdef _PNETCDF - case(iotype_pnetcdf) -#ifdef DEBUG - print *, __PIO_FILE__,__LINE__,value - call MPI_ALLREDUCE(value, chkval, 1, {MPITYPE}, MPI_MAX ,Ios%io_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - if(chkval /= value) then - print *,__PIO_FILE__,__LINE__,chkval,value, mpierr - call piodie(__PIO_FILE__,__LINE__,'attributes do not match') - end if -#endif -#undef DEBUG - ierr= nfmpi_put_att_{TYPE} (File%fh,varid,name, nf_{TYPE} , int(clen,kind=PIO_OFFSET),value) -#endif -#ifdef _NETCDF - case(iotype_netcdf, PIO_iotype_netcdf4c) - if (Ios%io_rank==0) then - ierr=nf90_put_att(File%fh,varid,name,value) - endif - case(pio_iotype_netcdf4p) - ierr=nf90_put_att(File%fh,varid,name,value) -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - end function put_att_1d_{TYPE} - -!> -!! @public -!! @ingroup PIO_put_att -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varDesc @copydoc var_desc_t -!! @param name : name of the attribute to add -!! @param value : The value for the netcdf attribute -!! @retval ierr @copydoc error_return -!< - integer function put_att_desc_{TYPE} (File,varDesc,name,value) result(ierr) - - type (File_desc_t), intent(inout) , target :: File - type (VAR_desc_t), intent(in) :: varDesc - character(len=*), intent(in) :: name - {VTYPE}, intent(in) :: value - - ierr = put_att_{TYPE} (File,varDesc%varid,name,value) - - end function put_att_desc_{TYPE} - -! TYPE real,int,double -!> -!! @public -!! @ingroup PIO_put_att -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varDesc @copydoc var_desc_t -!! @param name : name of the attribute to add -!! @param value : The value for the netcdf attribute -!! @retval ierr @copydoc error_return -!< - integer function put_att_desc_1d_{TYPE} (File,varDesc,name,value) result(ierr) - - type (File_desc_t), intent(inout) , target :: File - type (VAR_desc_t), intent(in) :: varDesc - character(len=*), intent(in) :: name - {VTYPE}, intent(in) :: value(:) - - character(len=*), parameter :: subName=modName//'::put_att_desc_1d_{TYPE}' - - ierr = put_att_1d_{TYPE} (File,varDesc%varid,name,value) - - end function put_att_desc_1d_{TYPE} - - -!> -!! @public -!! @ingroup PIO_get_att -!! @brief Reads an netcdf attribute from a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varDesc @copydoc var_desc_t -!! @param name : name of the attribute to get -!! @param value : The value for the netcdf attribute -!! @retval ierr @copydoc error_return -!< - integer function get_att_desc_{TYPE} (File,varDesc,name,value) result(ierr) - - type (File_desc_t), intent(inout) , target :: File - type (VAR_desc_t), intent(in) :: varDesc - character(len=*), intent(in) :: name - {VTYPE}, intent(out) :: value - - character(len=*), parameter :: subName=modName//'::get_att_desc_{TYPE}' - - ierr = get_att_{TYPE} (File,varDesc%varid,name,value) - - end function get_att_desc_{TYPE} - -! TYPE real,int,double -!> -!! @public -!! @ingroup PIO_get_att -!! @brief Reads an netcdf attribute from a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varDesc @copydoc var_desc_t -!! @param name : name of the attribute to get -!! @param value : The value for the netcdf attribute -!! @retval ierr @copydoc error_return -!< - integer function get_att_desc_1d_{TYPE} (File,varDesc,name,value) result(ierr) - - type (File_desc_t), intent(inout) , target :: File - type (VAR_desc_t), intent(in) :: varDesc - character(len=*), intent(in) :: name - {VTYPE}, intent(out) :: value(:) - - character(len=*), parameter :: subName=modName//'::get_att_desc_1d_{TYPE}' - - ierr = get_att_1d_{TYPE} (File,varDesc%varid,name,value) - - end function get_att_desc_1d_{TYPE} - -!> -!! @public -!! @ingroup PIO_get_att -!! @brief Reads an netcdf attribute from a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param name : name of the attribute to get -!! @param value : The value for the netcdf attribute -!! @retval ierr @copydoc error_return -!< - integer function get_att_{TYPE} (File,varid,name,value) result(ierr) - use pio_msg_mod, only : pio_msg_getatt - type (File_desc_t), intent(in) , target :: File - integer(i4), intent(in) :: varid - character(len=*), intent(in) :: name - {VTYPE}, intent(out) :: value - type(iosystem_desc_t), pointer :: ios - !------------------ - ! Local variables - !------------------ - character(len=*), parameter :: subName=modName//'::get_att_{TYPE}' - integer :: iotype, mpierr, msg - integer :: clen=1, itype, nlen - - iotype = File%iotype - ierr=PIO_noerr -#if ({ITYPE} == TYPETEXT) - clen = len(value) - value = ' ' -#endif - ios => file%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_GETATT - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - nlen = len(name) - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) -#if ({ITYPE} == TYPETEXT) - call MPI_BCAST(clen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif - end if - - - if(Ios%IOproc) then - select case(iotype) -#ifdef _PNETCDF - case(iotype_pnetcdf) - ierr= nfmpi_get_att_{TYPE} (File%fh,varid,name,value) -#endif - -#ifdef _NETCDF - case(iotype_netcdf,PIO_iotype_netcdf4c) - if (Ios%io_rank==0) then - ierr=nf90_get_att(File%fh,varid,name,value) - endif - if(Ios%num_tasks==Ios%num_iotasks) then - call MPI_BCAST(value,clen ,{MPITYPE} ,0,Ios%IO_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - end if - case(PIO_iotype_netcdf4p) - ierr=nf90_get_att(File%fh,varid,name,value) -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(value, clen, {MPITYPE},Ios%iomaster,Ios%my_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - end if - end function get_att_{TYPE} - -! TYPE real,int,double -!> -!! @public -!! @ingroup PIO_get_att -!! @brief Reads an netcdf attribute from a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param name : name of the attribute to get -!! @param value : The value for the netcdf attribute -!! @retval ierr @copydoc error_return -!< - integer function get_att_1d_{TYPE} (File,varid,name,value) result(ierr) - use pio_msg_mod, only : pio_msg_getatt_1d - - type (File_desc_t), intent(in) , target :: File - integer(i4), intent(in) :: varid - character(len=*), intent(in) :: name - {VTYPE}, intent(out) :: value(:) - type(iosystem_desc_t), pointer :: ios - !------------------ - ! Local variables - !------------------ - character(len=*), parameter :: subName=modName//'::get_att_1d_{TYPE}' - integer :: iotype, mpierr, msg - integer :: clen, itype, nlen - - iotype = File%iotype - ierr=PIO_noerr - clen = size(value) - - ios => file%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_GETATT_1D - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - nlen = len(name) - call MPI_BCAST(nlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(name,nlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(clen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - - - - - if(Ios%IOproc) then - select case(iotype) -#ifdef _PNETCDF - case(iotype_pnetcdf) - ierr= nfmpi_get_att_{TYPE} (File%fh,varid,name,value) -#endif -#ifdef _NETCDF - case(iotype_netcdf,PIO_iotype_netcdf4c) - if (Ios%io_rank==0) then - ierr=nf90_get_att(File%fh,varid,name,value) - endif - if(Ios%num_tasks==Ios%num_iotasks) then - call MPI_BCAST(value,clen ,{MPITYPE} ,0,Ios%IO_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - end if - case(PIO_iotype_netcdf4p) - ierr=nf90_get_att(File%fh,varid,name,value) -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_BCAST(value, clen, {MPITYPE},Ios%iomaster,Ios%my_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - end if - - end function get_att_1d_{TYPE} - - -end module pionfatt_mod - diff --git a/src/externals/pio1/pio/pionfget_mod.F90.in b/src/externals/pio1/pio/pionfget_mod.F90.in deleted file mode 100644 index ffa305582c6..00000000000 --- a/src/externals/pio1/pio/pionfget_mod.F90.in +++ /dev/null @@ -1,456 +0,0 @@ -#define __PIO_FILE__ "pionfget_mod.F90" -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief Read Routines for non-decomposed NetCDF data. -!< -module pionfget_mod -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf ! _EXTERNAL -#endif - use pio_msg_mod - use pio_kinds, only: i4,r4,r8,pio_offset - use pio_types, only : file_desc_t, iosystem_desc_t, var_desc_t, & - pio_iotype_pbinary, pio_iotype_binary, pio_iotype_direct_pbinary, & - pio_iotype_netcdf, pio_iotype_pnetcdf, pio_iotype_netcdf4p, pio_iotype_netcdf4c, & - pio_noerr - use pio_utils, only : check_netcdf - use pio_support, only : Debug, DebugIO, piodie, CheckMPIReturn -#ifdef _NETCDF - use netcdf ! _EXTERNAL -#endif -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none - private -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -#include /* _EXTERNAL */ -#endif -#endif -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif -!> -!! @defgroup PIO_get_var PIO_get_var -!! @brief Reads non-decomposed data from a NetCDF file -!! @details The get_var interface is provided as a simplified interface to -!! read variables from a NetCDF format file. The variable is read on the -!! root IO task and broadcast in its entirety to all tasks. -!< - public :: get_var - interface get_var - module procedure get_var_{DIMS}d_{TYPE}, get_var_vdesc_{DIMS}d_{TYPE} - ! DIMS 1,2,3,4,5 - module procedure get_vara_{DIMS}d_{TYPE}, get_vara_vdesc_{DIMS}d_{TYPE} - module procedure get_var1_{TYPE}, get_var1_vdesc_{TYPE} - end interface - - character(len=*), parameter :: modName='pionfget_mod' - -CONTAINS - -!> -!! @public -!! @ingroup PIO_get_var -!! @brief Reads non-decomposed fields from a NetCDF file -!! @details -!! @param File @ref file_desc_t -!! @param varid : The netcdf variable identifier -!! @param index : a multidimensional index that specifies which value to get -!! @param ival : The value for the netcdf metadata -!! @retval ierr @ref error_return -!< - integer function get_var1_{TYPE} (File,varid, index, ival) result(ierr) - use pio_msg_mod, only : pio_msg_getvar1 - use pio_types, only : pio_max_var_dims - type (File_desc_t), intent(in) :: File - integer, intent(in) :: varid, index(:) - {VTYPE}, intent(out) :: ival - type(iosystem_desc_t), pointer :: ios - character(len=*), parameter :: subName=modName//'::get_var1_{TYPE}' - integer :: iotype, mpierr, ilen, msg, sofindex, itype - integer(kind=pio_offset) :: kount(PIO_MAX_VAR_DIMS) -#ifdef TIMING - call t_startf("PIO:pio_get_var1_{TYPE}") -#endif - ierr=0 - iotype = File%iotype - ios => File%iosystem - sofindex = size(index) - -#if ({ITYPE} == TYPETEXT) - ilen = len(ival) - ival(1:ilen) = ' ' -#else - ilen=1 -#endif - if(Debug) print *,__PIO_FILE__,__LINE__,index, ilen - - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_GETVAR1 - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(sofindex,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(index,sofindex,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - if({ITYPE} == TYPETEXT) then - call MPI_BCAST(ilen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - end if - endif - - - if(File%iosystem%IOProc) then - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr = nfmpi_begin_indep_data(File%fh) - ! Only io proc 0 will do reading - if(ierr==PIO_NOERR .and. File%iosystem%io_rank==0) then -#if ({ITYPE} == TYPETEXT) - kount = 1 - kount(1) = ilen - ierr = nfmpi_get_vara_{TYPE} (File%fh, varid, int(index,kind=PIO_OFFSET), kount(1:sofindex), ival) -#else - ierr = nfmpi_get_var1_{TYPE} (File%fh, varid, int(index,kind=PIO_OFFSET), ival) -#endif - if(ierr/=PIO_NOERR) print *, __PIO_FILE__,__LINE__,index, ival - end if - if(ierr==PIO_NOERR) then - ierr = nfmpi_end_indep_data(File%fh) - end if -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr = nf90_get_var(File%fh, varid, ival, start=index) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - ! Only io proc 0 will do reading - if (File%iosystem%io_rank == 0) then - ierr = nf90_get_var(File%fh, varid, ival, start=index) - if(ierr/=PIO_NOERR) print *,__PIO_FILE__,__LINE__,index, ival - end if -#endif - end select - end if - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) -#if ({ITYPE} == TYPETEXT) - ilen = len(ival) -#else - ilen=1 -#endif - - call MPI_Bcast(ival, ilen, {MPITYPE} , File%iosystem%IOMaster, File%iosystem%MY_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - -#ifdef TIMING - call t_stopf("PIO:pio_get_var1_{TYPE}") -#endif - end function get_var1_{TYPE} - -!> -!! @public -!! @ingroup PIO_get_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @ref file_desc_t -!! @param vardesc @ref var_desc_t -!! @param index : a multidimensional index that specifies which value to get -!! @param ival : The value for the netcdf metadata -!! @retval ierr @ref error_return -!< - integer function get_var1_vdesc_{TYPE} (File,vardesc, index, ival) result(ierr) - type (File_desc_t), intent(in) :: File - type(var_desc_t), intent(in) :: vardesc - integer, intent(in) :: index(:) - {VTYPE}, intent(out) :: ival - - character(len=*), parameter :: subName=modName//'::get_var1_vdesc_{TYPE}' - - ierr = get_var1_{TYPE} (File, vardesc%varid, index, ival) - - end function get_var1_vdesc_{TYPE} - - -! DIMS 1,2,3,4,5 -!> -!! @public -!! @ingroup PIO_get_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @ref file_desc_t -!! @param varid : The netcdf variable identifier -!! @param start : A vector of size_t integers specifying the index in -!! the variable where the first of the data values will be read. The -!! indices are relative to 0, so for example, the first data value of -!! a variable would have index (0, 0, ... , 0). The length of start -!! must be the same as the number of dimensions of the specified -!! variable. The elements of start correspond, in order, to the -!! variable's dimensions. Hence, if the variable is a record variable, -!! the first index would correspond to the starting record number for -!! reading the data values. -!! @param count : A vector of size_t integers specifying the edge -!! lengths along each dimension of the block of data values to be -!! read. To read a single value, for example, specify count as (1, 1, -!! ... , 1). The length of count is the number of dimensions of the -!! specified variable. The elements of count correspond, in order, to -!! the variable's dimensions. Hence, if the variable is a record -!! variable, the first element of count corresponds to a count of the -!! number of records to read. -!! Note: setting any element of the count array to zero causes the function to exit without error, and without doing anything. -!! @param ival : The value for the netcdf metadata -!! @retval ierr @ref error_return -!< - integer function get_vara_{DIMS}d_{TYPE} (File,varid, start, count, ival) result(ierr) - type (File_desc_t), intent(in) :: File - integer, intent(in) :: varid, start(:), count(:) - {VTYPE}, intent(out) :: ival{DIMSTR} - - character(len=*), parameter :: subName=modName//'::get_vara_{DIMS}d_{TYPE}' - - integer :: dims({DIMS}) - integer :: iotype, mpierr, i, msg, ilen, itype, slen - integer(kind=PIO_OFFSET) :: isize - type(iosystem_desc_t), pointer :: ios -#ifdef TIMING - call t_startf("PIO:pio_get_vara_{DIMS}d_{TYPE}") -#endif - ierr=0 - iotype = File%iotype - isize=1 - do i=1,size(count) - isize=isize*count(i) - end do - - - - ios=>File%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_GETVARA_{DIMS}d - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - slen = size(start) - call MPI_BCAST(slen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(start,slen,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(count,slen,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - -#if ({DIMS} > 0) - do i=1,{DIMS} - dims(i)=size(ival,i) - end do - call MPI_BCAST(dims,{DIMS},MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif -#if({ITYPE} == TYPETEXT) - call MPI_BCAST(ilen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif - - - endif - - - - - - - if(File%iosystem%IOProc) then - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr = nfmpi_get_vara_all (File%fh, varid, int(start,kind=PIO_OFFSET), & - int(count,kind=PIO_OFFSET), ival, isize, {MPITYPE}) -#endif -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr = nf90_get_var(File%fh, varid, ival, start=start, count=count) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - ! Only io proc 0 will do reading - if (File%iosystem%io_rank == 0) then - ierr = nf90_get_var(File%fh, varid, ival, start=start, count=count) - end if - if(.not. ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(ival,int(isize), {MPITYPE} ,0,ios%IO_comm, mpierr) - call CheckMPIReturn(subName,mpierr) - end if - - -#endif - end select - end if - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_Bcast(ival,int(isize), {MPITYPE} , ios%IOMaster, ios%My_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - end if - - - -#ifdef TIMING - call t_stopf("PIO:pio_get_vara_{DIMS}d_{TYPE}") -#endif - end function get_vara_{DIMS}d_{TYPE} - -! DIMS 1,2,3,4,5 -!> -!! @public -!! @ingroup PIO_get_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @ref file_desc_t -!! @param vardesc @ref var_desc_t -!! @param start : A vector of size_t integers specifying the index in -!! the variable where the first of the data values will be read. The -!! indices are relative to 0, so for example, the first data value of -!! a variable would have index (0, 0, ... , 0). The length of start -!! must be the same as the number of dimensions of the specified -!! variable. The elements of start correspond, in order, to the -!! variable's dimensions. Hence, if the variable is a record variable, -!! the first index would correspond to the starting record number for -!! reading the data values. -!! @param count : A vector of size_t integers specifying the edge -!! lengths along each dimension of the block of data values to be -!! read. To read a single value, for example, specify count as (1, 1, -!! ... , 1). The length of count is the number of dimensions of the -!! specified variable. The elements of count correspond, in order, to -!! the variable's dimensions. Hence, if the variable is a record -!! variable, the first element of count corresponds to a count of the -!! number of records to read. -!! Note: setting any element of the count array to zero causes the function to exit without error, and without doing anything. -!! @param ival : The value for the netcdf metadata -!! @retval ierr @ref error_return -!< - integer function get_vara_vdesc_{DIMS}d_{TYPE} (File,vardesc, start, count, ival) result(ierr) - type (File_desc_t), intent(in) :: File - type(var_desc_t), intent(in) :: vardesc - integer, intent(in) :: start(:), count(:) - {VTYPE}, intent(out) :: ival{DIMSTR} - - character(len=*), parameter :: subName=modName//'::get_vara_vdesc_{DIMS}d_{TYPE}' - - ierr = get_vara_{DIMS}d_{TYPE} (File, vardesc%varid, start, count, ival) - - end function get_vara_vdesc_{DIMS}d_{TYPE} - -!> -!! @public -!! @ingroup PIO_get_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @ref file_desc_t -!! @param varid : The netcdf variable identifier -!! @param ival : The value for the netcdf metadata -!! @retval ierr @ref error_return -!< - integer function get_var_{DIMS}d_{TYPE} (File,varid, ival) result(ierr) - use pio_msg_mod, only : pio_msg_getvar_{DIMS}d - type (File_desc_t), intent(in) :: File - integer, intent(in) :: varid - {VTYPE}, intent(out) :: ival{DIMSTR} - type(iosystem_desc_t), pointer :: ios - character(len=*), parameter :: subName=modName//'::get_var_{DIMS}d_{TYPE}' - integer :: iotype, mpierr, msg, ilen, itype -#if ({DIMS} > 0) - integer :: dims({DIMS}) - integer :: i -#endif - integer(kind=PIO_OFFSET) :: isize - -#ifdef TIMING - call t_startf("PIO:pio_get_var_{DIMS}d_{TYPE}") -#endif - ierr=0 - iotype = File%iotype - isize=1 -#if ({DIMS} > 0) - isize= size(ival) -#endif -#if ({ITYPE} == TYPETEXT) - ilen = len(ival) - isize = isize*ilen - ival{DIMSTR} = ' ' -#endif - ios=>File%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_GETVAR_{DIMS}d - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#if ({DIMS} > 0) - do i=1,{DIMS} - dims(i)=size(ival,i) - end do - call MPI_BCAST(dims,{DIMS},MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif -#if({ITYPE} == TYPETEXT) - call MPI_BCAST(ilen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif - - - endif - - - - if(File%iosystem%IOProc) then - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - ierr = nfmpi_get_var_all(File%fh, varid, ival, isize, {MPITYPE}) -#endif -#ifdef _NETCDF - case(pio_iotype_netcdf4p) - ierr = nf90_get_var(File%fh, varid, ival) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - ! Only io proc 0 will do reading - if (File%iosystem%io_rank == 0) then - ierr = nf90_get_var(File%fh, varid, ival) - end if - if(.not. ios%async_interface .and. ios%num_tasks==ios%num_iotasks) then - call MPI_BCAST(ival,int(isize), {MPITYPE} ,0,ios%IO_comm, mpierr) - call CheckMPIReturn('nf_mod',mpierr) - end if - -#endif - end select - end if - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) - if(ios%async_interface .or. ios%num_tasks>ios%num_iotasks) then - call MPI_Bcast(ival,int(isize), {MPITYPE} , ios%IOMaster, ios%My_comm, mpierr) - call CheckMPIReturn(subName, mpierr) - end if -#ifdef TIMING - call t_stopf("PIO:pio_get_var_{DIMS}d_{TYPE}") -#endif - end function get_var_{DIMS}d_{TYPE} - -!> -!! @public -!! @ingroup PIO_get_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @ref file_desc_t -!! @param vardesc @ref var_desc_t -!! @param ival : The value for the netcdf metadata -!! @retval ierr @ref error_return -!< - integer function get_var_vdesc_{DIMS}d_{TYPE} (File,vardesc, ival) result(ierr) - type (File_desc_t), intent(in) :: File - type(var_desc_t), intent(in) :: vardesc - {VTYPE}, intent(out) :: ival{DIMSTR} - - character(len=*), parameter :: subName=modName//'::get_var_vdesc_{DIMS}d_{TYPE}' - - ierr = get_var_{DIMS}d_{TYPE} (File, vardesc%varid, ival) - - end function get_var_vdesc_{DIMS}d_{TYPE} -end module pionfget_mod diff --git a/src/externals/pio1/pio/pionfput_mod.F90.in b/src/externals/pio1/pio/pionfput_mod.F90.in deleted file mode 100644 index 8bd2fe076f1..00000000000 --- a/src/externals/pio1/pio/pionfput_mod.F90.in +++ /dev/null @@ -1,866 +0,0 @@ -#define __PIO_FILE__ "pionfput_mod.F90" -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief Write Routines for non-decomposed NetCDF data. -!< -module pionfput_mod -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf ! _EXTERNAL -#endif - use pio_kinds, only: i4,r4,r8,pio_offset - use pio_types, only : file_desc_t, iosystem_desc_t, var_desc_t, & - pio_iotype_pbinary, pio_iotype_binary, pio_iotype_direct_pbinary, & - pio_iotype_netcdf, pio_iotype_pnetcdf, pio_iotype_netcdf4p, pio_iotype_netcdf4c, & - pio_noerr - - use pio_utils, only : check_netcdf - use pio_msg_mod - use pio_support, only : Debug, DebugIO, piodie -#ifdef _NETCDF - use netcdf ! _EXTERNAL -#endif -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none - private -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -#include -#endif -#endif -#ifdef _NETCDF -! Required for netcdf bug workaround - integer, external :: nf_put_vars_text -#endif -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif -!> -!! @defgroup PIO_put_var PIO_put_var -!! @brief Writes netcdf metadata to a file -!! @details The put_var interface is provided as a simplified interface to -!! write variables to a netcdf format file. -!! @warning Although this is a collective call the variable is written from the -!! root IO task, no consistancy check is made with data passed on other tasks. -!! -!< - public :: put_var - interface put_var - ! DIMS 0,1,2,3,4,5 - module procedure put_var_{DIMS}d_{TYPE}, put_var_vdesc_{DIMS}d_{TYPE} - ! DIMS 1,2,3,4,5 - module procedure put_vara_{DIMS}d_{TYPE}, put_vara_vdesc_{DIMS}d_{TYPE} - module procedure put_var1_{TYPE}, put_var1_vdesc_{TYPE} - end interface -contains - -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param index : -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_var1_text (File,varid, index, ival) result(ierr) - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: varid, index(:) - character(len=*), intent(in) :: ival - integer, allocatable :: count(:) - integer :: iotype - type(iosystem_desc_t), pointer :: ios - integer :: xlen, msg, mpierr, isize, itype - -#ifdef TIMING - call t_startf("PIO:pio_put_var1_text") -#endif - ierr=PIO_NOERR - iotype = File%iotype - if(debug) print *,__PIO_FILE__,__LINE__,ival,iotype, index - - ios=>File%iosystem - - xlen = len_trim(ival) - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTVAR1 - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - isize = size(index) - call MPI_BCAST(isize,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(index,isize,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = TYPETEXT - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(xlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - endif - - if(ios%async_interface) then - call MPI_BCAST(ival,xlen,MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) - end if - - - - if(Ios%IOProc) then - allocate(count(size(index))) -! if(Ios%io_rank == 0) then - count(:) = 1 - count(1) = len(ival) -! else -! count(:) = 0 -! end if - - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) -!#ifdef USE_INDEP_WRITE - ierr = nfmpi_begin_indep_data(File%fh) - - if(Ios%io_rank==0 .and. (ierr==NF_EINDEP .or. ierr==PIO_NOERR)) then - ierr = nfmpi_put_vara (File%fh, varid, int(index,kind=PIO_OFFSET), & - int(count,kind=PIO_OFFSET), ival, int(count,kind=PIO_OFFSET), & - MPI_CHARACTER) - end if - if(ierr==PIO_NOERR) then - ierr = nfmpi_end_indep_data(File%fh) - end if -!#else -! print *,__PIO_FILE__,__LINE__,index,count,trim(ival) -! ierr = nfmpi_put_vara_all (File%fh, varid, int(index,kind=PIO_OFFSET), & -! int(count,kind=PIO_OFFSET), ival) -!#endif - -#endif -#ifdef _NETCDF -#ifdef _NETCDF4 - case (pio_iotype_netcdf4p) - ierr=nf90_var_par_access(File%fh, varid, NF90_COLLECTIVE) - ierr = nf90_put_var(File%fh, varid, ival, start=index) -#endif - case( pio_iotype_netcdf,pio_iotype_netcdf4c) - ! Only io proc 0 will do writing - if (Ios%io_rank == 0) then - ierr = nf90_put_var(File%fh, varid, ival, start=index) - end if -#endif - case default - print *,__PIO_FILE__,__LINE__,iotype - call piodie(__PIO_FILE__,__LINE__,"bad iotype specified") - end select - deallocate(count) - end if - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) - -#ifdef TIMING - call t_stopf("PIO:pio_put_var1_text") -#endif - end function put_var1_text -! TYPE int,real,double -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param index : -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_var1_{TYPE} (File,varid, index, ival) result(ierr) - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: varid, index(:) - {VTYPE}, intent(in) :: ival - integer, allocatable :: count(:) - integer :: iotype, isize - type(iosystem_desc_t), pointer :: ios - integer :: xlen, msg, mpierr, itype - -#ifdef TIMING - call t_startf("PIO:pio_put_var1_{TYPE}") -#endif - ierr=PIO_NOERR - iotype = File%iotype - if(debug) print *,__PIO_FILE__,__LINE__,ival,iotype, index - - ios=>File%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTVAR1 - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - isize = size(index) - call MPI_BCAST(isize,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(index,isize,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - endif - - if(ios%async_interface) then - call MPI_BCAST(ival,1,{MPITYPE},ios%CompMaster, ios%my_comm , mpierr) - end if - - - - if(Ios%IOProc) then - - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - allocate(count(size(index))) - if(Ios%io_rank == 0) then - count(:) = 1 - else - count(:) = 0 - end if -!#ifdef USE_INDEP_WRITE - ierr = nfmpi_begin_indep_data(File%fh) - if(Ios%io_rank==0 .and. (ierr==NF_EINDEP .or. ierr==PIO_NOERR)) then - ierr = nfmpi_put_vara (File%fh, varid, int(index,kind=PIO_OFFSET), int(count,kind=PIO_OFFSET), & - ival, int(count,kind=PIO_OFFSET), {MPITYPE}) - end if - if(ierr==PIO_NOERR) then - ierr = nfmpi_end_indep_data(File%fh) - end if -!#else -! ierr = nfmpi_put_vara_all (File%fh, varid, int(index,kind=PIO_OFFSET), int(count,kind=PIO_OFFSET), & -! ival) -!#endif - deallocate(count) -#endif -#ifdef _NETCDF -#ifdef _NETCDF4 - case (pio_iotype_netcdf4p) - ierr=nf90_var_par_access(File%fh, varid, NF90_COLLECTIVE) - ierr = nf90_put_var(File%fh, varid, ival, start=index) -#endif - case( pio_iotype_netcdf,pio_iotype_netcdf4c) - ! Only io proc 0 will do writing - if (Ios%io_rank == 0) then - ierr = nf90_put_var(File%fh, varid, ival, start=index) - end if -#endif - case default - print *,__PIO_FILE__,__LINE__,iotype - call piodie(__PIO_FILE__,__LINE__,"bad iotype specified") - end select - end if - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) - -#ifdef TIMING - call t_stopf("PIO:pio_put_var1_{TYPE}") -#endif - end function put_var1_{TYPE} - -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param start : -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_var1_vdesc_{TYPE} (File,vardesc, start, ival) result(ierr) - type (File_desc_t), intent(inout) :: File - type(var_desc_t), intent(in) :: vardesc - integer, intent(in) :: start(:) - {VTYPE}, intent(in) :: ival - - ierr = put_var1_{TYPE} (File, vardesc%varid, start, ival) - end function put_var1_vdesc_{TYPE} - -! DIMS 0,1,2,3,4,5 -! TYPE text -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param File : A file handle returne from \ref PIO_openfile or \ref PIO_createfile. -!! @param varid : The netcdf variable identifier -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_var_{DIMS}d_text (File,varid, ival) result(ierr) - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: varid - character(len=*), intent(in) :: ival{DIMSTR} - integer :: iotype - integer :: i, is, msg, mpierr, xlen, itype - type(iosystem_desc_t), pointer :: ios - integer :: dims({DIMS}) - integer :: start({DIMS}+1), count({DIMS}+1) -#ifdef TIMING - call t_startf("PIO:pio_put_var_{DIMS}d_text") -#endif - ierr=PIO_NOERR - - iotype = File%iotype - start = 1 - count = 0 - is=0 - - - ios=>File%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTVAR_{DIMS}d - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = TYPETEXT - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - -#if ({DIMS} > 0) - do i=1,{DIMS} - dims(i)=size(ival,i) - end do - call MPI_BCAST(dims,{DIMS},MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif - xlen = len(ival) - call MPI_BCAST(xlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - endif - - if(ios%async_interface ) then -#if({DIMS}==0) - call MPI_BCAST(ival,len_trim(ival),MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) -#else - call MPI_BCAST(ival,size(ival),MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) -#endif - end if - - if(Ios%IOProc) then - if(Ios%io_rank==0) then - count(1)=len(ival) - is=1 -#if ({DIMS} > 0) - do i=1,{DIMS} - count(i+is) = size(ival,i) - end do -#endif - end if - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - if(ios%io_rank>0) count = 0 - ierr = nfmpi_put_vara_{TYPE}_all (File%fh, varid,int(start,kind=pio_offset),& - int(count,kind=pio_offset),ival) -#endif -#ifdef _NETCDF -! case(pio_iotype_netcdf4p) -! ierr=nf90_var_par_access(File%fh, varid, NF90_COLLECTIVE) -#if ({DIMS}==0) -! This is a workaround for a bug in the netcdf f90 interface -! The netcdf bug is that when you use nf90_put_var -! to write a scalar string the trailing blanks are stripped by the specific -! function nf90_put_var_text before it calls nf_put_vars_text. -! if (Ios%io_rank == 0) then -! ierr = nf_put_vars_text(File%fh, varid, (/1/), (/len(ival)/), (/1/), ival) -! else -! ierr = nf_put_vars_text(File%fh, varid, (/1/), (/0/), (/1/), ival) -! end if -#else -! ierr = nf90_put_var(File%fh, varid, ival, start=start, count=count) -#endif - case( pio_iotype_netcdf,pio_iotype_netcdf4c, pio_iotype_netcdf4p) - ! Only io proc 0 will do writing - if (Ios%io_rank == 0) then - -#if ({DIMS}==0) -! This is a workaround for a bug in the netcdf f90 interface -! The netcdf bug is that when you use nf90_put_var -! to write a scalar string the trailing blanks are stripped by the specific -! function nf90_put_var_text before it calls nf_put_vars_text. - ierr = nf_put_vars_text(File%fh, varid, (/1/), (/len(ival)/), (/1/), ival) -#else - ierr = nf90_put_var(File%fh, varid, ival) -#endif - end if -#endif - case default - print *,__PIO_FILE__,__LINE__,iotype - call piodie(__PIO_FILE__,__LINE__,"bad iotype specified" ) - - end select - end if - - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) -#ifdef TIMING - call t_stopf("PIO:pio_put_var_{DIMS}d_text") -#endif - end function put_var_{DIMS}d_text - -! DIMS 1,2,3,4,5 -! TYPE int,real,double -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param File : A file handle returne from \ref PIO_openfile or \ref PIO_createfile. -!! @param varid : The netcdf variable identifier -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_var_{DIMS}d_{TYPE} (File,varid, ival) result(ierr) - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: varid - {VTYPE}, intent(in) :: ival{DIMSTR} - integer :: iotype, itype - integer :: i, is, msg, mpierr, xlen - type(iosystem_desc_t), pointer :: ios - integer :: dims({DIMS}) - integer :: start({DIMS}), count({DIMS}) - - - - ierr=PIO_NOERR - - iotype = File%iotype - start = 1 - count = 0 - is=0 - -#ifdef _PNETCDF - if(iotype == pio_iotype_pnetcdf) then - do i=1,{DIMS} - count(i) = size(ival,i) - end do - ierr = put_vara_{DIMS}d_{TYPE} (File, varid, start, count, ival) - return - end if -#endif -#ifdef TIMING - call t_startf("PIO:pio_put_var_{DIMS}d_{TYPE}") -#endif - - ios=>File%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTVAR_{DIMS}d - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - do i=1,{DIMS} - dims(i)=size(ival,i) - end do - call MPI_BCAST(dims,{DIMS},MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - endif - - if(ios%async_interface ) then - call MPI_BCAST(ival,size(ival),{MPITYPE},ios%CompMaster, ios%my_comm , mpierr) - end if - - if(Ios%IOProc) then - if(Ios%io_rank==0) then - do i=1,{DIMS} - count(i+is) = size(ival,i) - end do - end if - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - if(Ios%io_rank>0) count=0 - ierr = nfmpi_put_vara_{TYPE}_all(File%fh, varid, int(start,kind=pio_offset),& - int(count,kind=pio_offset),ival) -#endif -#ifdef _NETCDF -! case(pio_iotype_netcdf4p) -! ierr = nf90_put_var(File%fh, varid, ival, start=start, count=count) - case( pio_iotype_netcdf,pio_iotype_netcdf4c,pio_iotype_netcdf4p) - ! Only io proc 0 will do writing - if (Ios%io_rank == 0) then - ierr = nf90_put_var(File%fh, varid, ival) - end if -#endif - case default - print *,__PIO_FILE__,__LINE__,iotype - call piodie(__PIO_FILE__,__LINE__,"bad iotype specified" ) - - end select - end if - - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) -#ifdef TIMING - call t_stopf("PIO:pio_put_var_{DIMS}d_{TYPE}") -#endif - end function put_var_{DIMS}d_{TYPE} - -! TYPE int,real,double -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param File : A file handle returne from \ref PIO_openfile or \ref PIO_createfile. -!! @param varid : The netcdf variable identifier -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_var_0d_{TYPE} (File,varid, ival) result(ierr) - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: varid - {VTYPE}, intent(in) :: ival - integer :: iotype - integer :: i, is, msg, mpierr, xlen - type(iosystem_desc_t), pointer :: ios - integer :: start(1),count(1), itype - - ierr=PIO_NOERR - - iotype = File%iotype - start = 1 - count = 1 - is=0 - -#ifdef TIMING - call t_startf("PIO:pio_put_var_0d_{TYPE}") -#endif - - ios=>File%iosystem - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTVAR_0d - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - endif - - if(ios%async_interface ) then - call MPI_BCAST(ival,1,{MPITYPE},ios%CompMaster, ios%my_comm , mpierr) - end if - - if(Ios%IOProc) then - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - if(Ios%io_rank>0) count=0 - ierr = nfmpi_put_vara_{TYPE}_all(File%fh, varid, int(start,kind=pio_offset),& - int(count,kind=pio_offset),ival) -#endif -#ifdef _NETCDF -! case(pio_iotype_netcdf4p) -! ierr = nf90_put_var(File%fh, varid, ival) - case( pio_iotype_netcdf,pio_iotype_netcdf4c,pio_iotype_netcdf4p) - ! Only io proc 0 will do writing - if (Ios%io_rank == 0) then - ierr = nf90_put_var(File%fh, varid, ival) - end if -#endif - case default - print *,__PIO_FILE__,__LINE__,iotype - call piodie(__PIO_FILE__,__LINE__,"bad iotype specified" ) - - end select - end if - - call check_netcdf(File,ierr,__PIO_FILE__,__LINE__) -#ifdef TIMING - call t_stopf("PIO:pio_put_var_0d_{TYPE}") -#endif - end function put_var_0d_{TYPE} - -! DIMS 0,1,2,3,4,5 -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_var_vdesc_{DIMS}d_{TYPE} (File, vardesc, ival) result(ierr) - type (File_desc_t), intent(inout) :: File - type(var_desc_t) , intent(in) :: vardesc - {VTYPE}, intent(in) :: ival{DIMSTR} - integer :: iotype - - ierr = put_var_{DIMS}d_{TYPE} (File, vardesc%varid, ival) - end function put_var_vdesc_{DIMS}d_{TYPE} - -! DIMS 1,2,3,4,5 -! TYPE text -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param start : -!! @param count : -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_vara_{DIMS}d_text (File,varid, start, count, ival) result(ierr) - use nf_mod, only : pio_inq_varndims - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: varid, start(:), count(:) - - integer(kind=PIO_OFFSET), allocatable :: pstart(:), pcount(:) - - character(len=*), intent(in) :: ival{DIMSTR} - integer :: iotype, i, ndims, msg, mpierr - integer(kind=pio_offset) :: clen - type(iosystem_desc_t), pointer :: ios - integer :: dims({DIMS}), xlen, itype, slen -#ifdef TIMING - call t_startf("PIO:pio_put_vara_{DIMS}d_text") -#endif - ndims=0 - ierr=0 - iotype = File%iotype - ios=>File%iosystem - xlen = len(ival) - if(.not. ios%async_interface .or. .not. ios%ioproc ) then - ierr = pio_inq_varndims(File, varid, ndims) - end if - - if(debug) print *,__PIO_FILE__,__LINE__,varid, iotype, start, count - - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTVARA_{DIMS}d - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = TYPETEXT - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - slen = size(start) - call MPI_BCAST(slen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(start,slen,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(count,slen,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - -#if ({DIMS} > 0) - do i=1,{DIMS} - dims(i)=size(ival,i) - end do - call MPI_BCAST(dims,{DIMS},MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif - call MPI_BCAST(xlen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - endif - - if(ios%async_interface ) then - call MPI_BCAST(ndims,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(ival,xlen*size(ival),MPI_CHARACTER,ios%CompMaster, ios%my_comm , mpierr) - end if - - - - - if(Ios%IOProc) then - allocate(pstart(ndims),pcount(ndims)) - if(Ios%io_rank==0) then - pstart = start(1:ndims) - pcount = count(1:ndims) - else - pstart=1 ! avoids an unessasary pnetcdf error - pcount=0 - endif - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - clen=count(1) - do i=2,size(count) - clen=clen*count(i) - end do -#ifdef USE_INDEP_WRITE - ierr = nfmpi_begin_indep_data(File%fh) - if(Ios%io_rank==0 .and. (ierr==NF_EINDEP .or. ierr==PIO_NOERR)) then - ierr = nfmpi_put_vara_{TYPE} (File%fh, varid, pstart, & - pcount, ival) - end if - if(ierr==PIO_NOERR) then - ierr = nfmpi_end_indep_data(File%fh) - end if -#else - ierr = nfmpi_put_vara_{TYPE}_all (File%fh, varid, pstart, & - pcount, ival) - -#endif -#endif -#ifdef _NETCDF -#ifdef _NETCDF4 - case(pio_iotype_netcdf4p) - ierr=nf90_var_par_access(File%fh, varid, NF90_COLLECTIVE) - ierr = nf90_put_var(File%fh, varid, ival, start=int(pstart), count=int(pcount)) -#endif - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - ! Only io proc 0 will do writing - if (Ios%io_rank == 0) then - ierr = nf90_put_var(File%fh, varid, ival, start=int(pstart), count=int(pcount)) - end if -#endif - case default - print *,__PIO_FILE__,__LINE__,iotype - call piodie(__PIO_FILE__,__LINE__,"bad iotype specified") - end select - deallocate(pstart, pcount) - end if - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - -#ifdef TIMING - call t_stopf("PIO:pio_put_vara_{DIMS}d_{TYPE}") -#endif - end function put_vara_{DIMS}d_text -! TYPE int,real,double -! DIMS 1,2,3,4,5 -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf attribute to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param varid : The netcdf variable identifier -!! @param start : -!! @param count : -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_vara_{DIMS}d_{TYPE} (File,varid, start, count, ival) result(ierr) - use nf_mod, only : pio_inq_varndims - type (File_desc_t), intent(inout) :: File - integer, intent(in) :: varid, start(:), count(:) - - integer(kind=PIO_OFFSET), allocatable :: pstart(:), pcount(:) - - {VTYPE}, intent(in) :: ival{DIMSTR} - integer :: iotype, i, ndims, msg, mpierr - integer(kind=pio_offset) :: clen - type(iosystem_desc_t), pointer :: ios - integer :: dims({DIMS}), xlen, itype, slen -#ifdef TIMING - call t_startf("PIO:pio_put_vara_{DIMS}d_{TYPE}") -#endif - ierr=0 - iotype = File%iotype - ios=>File%iosystem - xlen=1 - if(debug) print *,__PIO_FILE__,__LINE__,varid, iotype, start, count - if(.not. ios%async_interface .or. .not. ios%ioproc ) then - ierr = pio_inq_varndims(File, varid, ndims) - end if - if(ios%async_interface .and. .not. ios%ioproc ) then - msg=PIO_MSG_PUTVARA_{DIMS}d - if(ios%comp_rank==0) call mpi_send(msg, 1, mpi_integer, ios%ioroot, 1, ios%union_comm, ierr) - call MPI_BCAST(file%fh,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(varid,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - itype = {ITYPE} - call MPI_BCAST(itype,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - slen = size(start) - call MPI_BCAST(slen,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(start,slen,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(count,slen,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - -#if ({DIMS} > 0) - do i=1,{DIMS} - dims(i)=size(ival,i) - end do - call MPI_BCAST(dims,{DIMS},MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) -#endif - endif - - if(ios%async_interface ) then - call MPI_BCAST(ndims,1,MPI_INTEGER,ios%CompMaster, ios%my_comm , mpierr) - call MPI_BCAST(ival,xlen*size(ival),{MPITYPE},ios%CompMaster, ios%my_comm , mpierr) - end if - - - - if(Ios%IOProc) then - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - allocate(pstart(ndims),pcount(ndims)) - if(Ios%io_rank==0) then - pstart = start(1:ndims) - pcount = count(1:ndims) - else - pstart=1 ! avoids an unessasary pnetcdf error - pcount=0 - endif - - clen=count(1) - do i=2,size(count) - clen=clen*count(i) - end do -#ifdef USE_INDEP_WRITE - ierr = nfmpi_begin_indep_data(File%fh) - if(Ios%io_rank==0 .and. (ierr==NF_EINDEP .or. ierr==PIO_NOERR)) then - ierr = nfmpi_put_vara_{TYPE} (File%fh, varid, pstart, & - pcount, ival, clen, {MPITYPE}) - end if - if(ierr==PIO_NOERR) then - ierr = nfmpi_end_indep_data(File%fh) - end if -#else - ierr = nfmpi_put_vara_{TYPE}_all (File%fh, varid, pstart, & - pcount, ival) - -#endif - deallocate(pstart, pcount) -#endif -#ifdef _NETCDF -#ifdef _NETCDF4 - case(pio_iotype_netcdf4p) - ierr=nf90_var_par_access(File%fh, varid, NF90_COLLECTIVE) - ierr = nf90_put_var(File%fh, varid, ival, start=start, count=count) -#endif - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - ! Only io proc 0 will do writing - if (Ios%io_rank == 0) then - ierr = nf90_put_var(File%fh, varid, ival, start=start(1:ndims), count=count(1:ndims)) - end if -#endif - case default - print *,__PIO_FILE__,__LINE__,iotype - call piodie(__PIO_FILE__,__LINE__,"bad iotype specified") - end select - end if - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__) - -#ifdef TIMING - call t_stopf("PIO:pio_put_vara_{DIMS}d_{TYPE}") -#endif - end function put_vara_{DIMS}d_{TYPE} - -! DIMS 1,2,3,4,5 -!> -!! @public -!! @ingroup PIO_put_var -!! @brief Writes an netcdf variable to a file -!! @details -!! @param File @copydoc file_desc_t -!! @param vardesc @copydoc var_desc_t -!! @param start : -!! @param count : -!! @param ival : The value for the netcdf metadata -!! @retval ierr @copydoc error_return -!< - integer function put_vara_vdesc_{DIMS}d_{TYPE} (File,vardesc, start, count, ival) result(ierr) - type (File_desc_t), intent(inout) :: File - type(var_desc_t), intent(in) :: vardesc - integer, intent(in) :: start(:), count(:) - {VTYPE}, intent(in) :: ival{DIMSTR} - - ierr = put_vara_{DIMS}d_{TYPE} (File, vardesc%varid, start, count, ival) - - - end function put_vara_vdesc_{DIMS}d_{TYPE} -end module pionfput_mod diff --git a/src/externals/pio1/pio/pionfread_mod.F90.in b/src/externals/pio1/pio/pionfread_mod.F90.in deleted file mode 100644 index c4aba92540a..00000000000 --- a/src/externals/pio1/pio/pionfread_mod.F90.in +++ /dev/null @@ -1,210 +0,0 @@ -#define __PIO_FILE__ "pionfread_mod.F90.in" -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief Decomposed Read interface to NetCDF -!< -module pionfread_mod - - implicit none - - private -!> -!! @private -!< - public :: read_nf - interface read_nf - ! TYPE real,double,int - module procedure read_nfdarray_{TYPE} - end interface - - character(len=*), parameter :: modName='pionfread_mod' - -contains - - ! TYPE real,double,int -!> -!! @private -!< - integer function read_nfdarray_{TYPE} (File,IOBUF,varDesc,IODesc, start,count) result(ierr) - use pio_types, only : file_desc_t, var_desc_t, io_desc_t, pio_real, pio_double, pio_int, & - pio_noerr, pio_iotype_netcdf4p, pio_iotype_netcdf4c, pio_iotype_pnetcdf, pio_iotype_netcdf, & - pio_max_var_dims - use pio_kinds, only : pio_offset, i4, r4, r8 - use pio_utils, only : check_netcdf, bad_iotype - use pio_support, only : Debug, DebugIO, piodie, checkmpireturn - use alloc_mod, only: alloc_check -#ifdef _NETCDF - use netcdf, only : nf90_get_var !_EXTERNAL -#endif -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf !_EXTERNAL -#endif -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -#include /* _EXTERNAL */ -#endif -#endif - - type (File_desc_t), intent(inout) :: File - {VTYPE}, intent(inout) :: IOBUF(:) - type (Var_desc_t), intent(in) :: varDesc - type (IO_desc_t), intent(in) :: IODesc - integer(kind=pio_offset), intent(in) :: start(:), count(:) - - - character(len=*), parameter :: subName=modName//'::read_nfdarray_{TYPE}' - integer(kind=i4) :: iotype - - integer :: iobuf_size, max_iobuf_size - integer :: status(MPI_STATUS_SIZE) - integer, dimension(PIO_MAX_VAR_DIMS) :: temp_start, temp_count - integer :: i, mpierr, ndims - -#ifdef TIMING - call t_startf("PIO:pio_read_nfdarray_{TYPE}") -#endif - iotype = File%iotype - ierr=PIO_noerr - - ndims = size(start) - if (File%iosystem%IOproc) then - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) - if(DebugIO) print *,__PIO_FILE__,__LINE__, & - ' IAM: ',File%iosystem%io_rank - if(Debug) & - print *,__PIO_FILE__,__LINE__, & - ' IAM: ',File%iosystem%io_rank,' start: ', & - start,' count: ', & - count, ' iobuf size: ',size(iobuf), vardesc%varid - - ierr=nfmpi_get_vara_all( File%fh,varDesc%varid, & - start, & - count, & - IOBUF,iodesc%Read%n_ElemTYPE, & - iodesc%Read%ElemTYPE) -#endif - -#ifdef _NETCDF - case(pio_iotype_netcdf4p) -! all reads can be parallel in netcdf4 format - ierr= nf90_get_var(File%fh, vardesc%varid, iobuf, start=int(start),count=int(count)) - case(pio_iotype_netcdf, pio_iotype_netcdf4c) - iobuf_size=size(IOBUF) - call MPI_REDUCE( iobuf_size,max_iobuf_size, & - 1,MPI_INTEGER,MPI_MAX,0,File%iosystem%IO_comm,mpierr ) - call checkmpireturn(subName, mpierr) - - if (File%iosystem%io_rank==0) then - if (max_iobuf_size > iobuf_size) then - print *, 'IOBUF on root is not big enough', max_iobuf_size, iobuf_size -#ifndef CPRNAG - call abort -#else - stop -#endif - endif - endif - - ! create temporaries of size int (netcdf limitation) - - temp_start=1 - temp_count=1 - if (File%iosystem%io_rank>0) then - temp_start(1:ndims)=start(1:ndims) - temp_count(1:ndims)=count(1:ndims) - - if (Debug) print *, File%iosystem%comp_rank,': waiting to receive IOBUF', start, count - - call MPI_SEND( temp_start,ndims,MPI_INTEGER, & - 0,File%iosystem%io_rank,File%iosystem%IO_comm,mpierr ) - call checkmpireturn(subName, mpierr) - - call MPI_SEND( temp_count,ndims,MPI_INTEGER, & - 0,File%iosystem%io_rank,File%iosystem%IO_comm,mpierr ) - call checkmpireturn(subName, mpierr) - - call MPI_SEND( iobuf_size,1,MPI_INTEGER, & - 0,File%iosystem%io_rank,File%iosystem%IO_comm,mpierr ) - call checkmpireturn(subName, mpierr) - - call MPI_RECV( IOBUF,size(IOBUF), & - {MPITYPE}, & - 0,File%iosystem%io_rank,File%iosystem%IO_comm,status,mpierr ) - call checkmpireturn(subName, mpierr) - - if (Debug) print *, subName,':: comp_rank: ',File%iosystem%comp_rank, & - ': received IOBUF size=',size(IOBUF) - endif - - ! Read rank>0 first then go back and read 0 - ! so that we can re-use the rank 0 IOBUF - - if (File%iosystem%io_rank==0) then - do i=1,File%iosystem%num_iotasks-1 - if (Debug) print *, subName,': 0: reading netcdf for ',i - - call MPI_RECV( temp_start, ndims, MPI_INTEGER, & - i,i,File%iosystem%IO_comm,status,mpierr) - call CheckMPIReturn('read_nfdarray_{TYPE}',mpierr) - - call MPI_RECV( temp_count, ndims, MPI_INTEGER, & - i,i,File%iosystem%IO_comm,status,mpierr) - call CheckMPIReturn('read_nfdarray_{TYPE}',mpierr) - - call MPI_RECV( iobuf_size, 1, MPI_INTEGER, & - i,i,File%iosystem%IO_comm,status,mpierr) - call CheckMPIReturn('read_nfdarray_{TYPE}',mpierr) - - ierr=nf90_get_var( File%fh, varDesc%varid, & - IOBUF, temp_start(1:ndims), temp_count(1:ndims) ) - - call MPI_SEND( IOBUF,iobuf_size, & - {MPITYPE}, & - i,i,File%iosystem%IO_comm,mpierr) - call CheckMPIReturn('read_nfdarray_{TYPE}',mpierr) - - if (Debug) print *, subName,': 0: done reading netcdf for ',i - end do ! i=1,File%iosystem%num_iotasks-1 - - ! Read root data last - - if (Debug) print *, subName,': 0: reading netcdf for self', vardesc%varid, ndims, start, count - - temp_start(1:ndims)=start(1:ndims) - temp_count(1:ndims)=count(1:ndims) - - ierr=nf90_get_var( File%fh, varDesc%varid, & - IOBUF, temp_start(1:ndims), temp_count(1:ndims) ) - - if (Debug) print *, subName,': 0: done reading netcdf for self' - - endif ! File%iosystem%io_rank==0 - -#endif - - case default - call bad_iotype(iotype,__PIO_FILE__,__LINE__) - - end select - endif ! File%iosystem%IOproc - call check_netcdf(File, ierr,__PIO_FILE__,__LINE__); -#ifdef TIMING - call t_stopf("PIO:pio_read_nfdarray_{TYPE}") -#endif - - end function read_nfdarray_{TYPE} -end module pionfread_mod diff --git a/src/externals/pio1/pio/pionfwrite_mod.F90.in b/src/externals/pio1/pio/pionfwrite_mod.F90.in deleted file mode 100644 index 6628abb59eb..00000000000 --- a/src/externals/pio1/pio/pionfwrite_mod.F90.in +++ /dev/null @@ -1,261 +0,0 @@ -#define __PIO_FILE__ "pionfwrite_mod.F90" -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief Decomposed Write interface to NetCDF -!< -module pionfwrite_mod - use pio_kinds, only : r4, r8, i4, pio_offset - implicit none - private -!> -!! @private -!< - public :: write_nf - interface write_nf - ! TYPE real,int,double - module procedure write_nfdarray_{TYPE} - end interface - - character(len=*), parameter :: modName='pionfwrite_mod' - - -contains - ! note: IOBUF may actually point to the original data - ! array, and cannot be modified (which is why it is intent(in)) - - ! TYPE real,int,double -!> -!! @private -!< - integer function write_nfdarray_{TYPE} (File,IOBUF,varDesc,iodesc,start,count, request) result(ierr) - use nf_mod - use pio_types, only : io_desc_t, var_desc_t, file_desc_t, iosystem_desc_t, pio_noerr, & - pio_iotype_netcdf, pio_iotype_pnetcdf, pio_iotype_netcdf4p, pio_iotype_netcdf4c, pio_max_var_dims - use pio_utils, only : check_netcdf, bad_iotype - use alloc_mod, only: alloc_check - use pio_support, only : Debug, DebugIO, piodie, checkmpireturn - -#ifdef _NETCDF - use netcdf, only : nf90_put_var, nf90_inquire_variable !_EXTERNAL -#endif -#ifdef _NETCDF4 - use netcdf, only : nf90_var_par_access, nf90_collective -#endif -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf !_EXTERNAL -#endif -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif -#ifdef USE_PNETCDF_MOD - use pnetcdf -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif -#ifdef _PNETCDF -#ifndef USE_PNETCDF_MOD -# include /* _EXTERNAL */ -#endif -#endif - - type (File_desc_t), intent(inout) :: File - {VTYPE} , intent(in), target :: IOBUF(:) - type (var_desc_t), intent(in) :: varDesc - type (IO_desc_t), intent(in) :: IODesc - - integer(pio_offset), intent(in) :: start(:), count(:) - integer, intent(out) :: request - - - character(len=*), parameter :: subName=modName//'::write_nfdarray_{TYPE}' - - integer(i4) :: iotype, mpierr - integer :: status(MPI_STATUS_SIZE) - integer iobuf_size, max_iobuf_size - {VTYPE} , pointer :: temp_iobuf(:) - integer, dimension(PIO_MAX_VAR_DIMS) :: temp_start, temp_count - integer i, ndims - integer :: fh, vid, oldval - - request = MPI_REQUEST_NULL - -#ifdef TIMING - call t_startf("PIO:pio_write_nfdarray_{TYPE}") -#endif - ierr = PIO_NOERR - if(file%iosystem%ioproc) then - iotype = File%iotype - - select case (iotype) -#ifdef _PNETCDF - case(pio_iotype_pnetcdf) -#ifdef DEBUG - if(size(iobuf)<=0) then - call piodie(subname,__LINE__,'empty iobuf') - end if -#endif - - ierr=nfmpi_iput_vara( File%fh,varDesc%varid,start, & - count, IOBUF , & - iodesc%Write%n_ElemTYPE, & - iodesc%Write%ElemTYPE, request) - if(Debug.or.ierr/=PIO_noerr) & - print *,subname,__LINE__, & - ' IAM: ',File%iosystem%io_rank,' start: ',start,' count: ',count,& - ' size :',iodesc%Write%n_ElemTYPE, ' error: ',ierr, & - iodesc%Write%ElemTYPE, request -! if(Debug.or.ierr/=PIO_noerr) print *,subname,__LINE__, & -! ' IAM: ',File%iosystem%io_rank,'minval: ',minval(IOBUF),'maxval: ',maxval(IOBUF) -#endif - -#ifdef _NETCDF -#ifdef _NETCDF4 - case(PIO_iotype_netcdf4p) - ierr=nf90_var_par_access(File%fh, vardesc%varid, NF90_COLLECTIVE) - ierr=nf90_put_var(File%fh, vardesc%varid, iobuf,start=int(start),count=int(count)) -#endif - case(pio_iotype_netcdf,pio_iotype_netcdf4c) - ! allocate space on root for copy of iobuf etc. - iobuf_size=size(IOBUF) - if(File%iosystem%num_iotasks>1) then - if(Debug) print *,__PIO_FILE__,__LINE__ - call MPI_ALLREDUCE(iobuf_size,max_iobuf_size, & - 1,MPI_INTEGER,MPI_MAX,File%iosystem%IO_comm,mpierr) - call CheckMPIReturn(subName, mpierr) - if(Debug) print *,__PIO_FILE__,__LINE__,iobuf_size - if (File%iosystem%io_rank==0) then - call alloc_check(temp_iobuf,max_iobuf_size) - else - if(max_iobuf_size>iobuf_size) then - call alloc_check(temp_iobuf,max_iobuf_size) - temp_iobuf(1:iobuf_size) = iobuf - else - temp_iobuf => iobuf - end if - end if - endif - - if(File%iosystem%io_rank==0) then - ierr=nf90_inquire_variable(File%fh,vardesc%varid,ndims=ndims) - end if - call MPI_BCAST(ndims,1,MPI_INTEGER,0,file%iosystem%io_comm,ierr) - - temp_start(1:ndims)=int(start(1:ndims)) - - temp_count(1:ndims)=int(count(1:ndims)) - - if(Debug) print *,__PIO_FILE__,__LINE__,ndims,temp_start(1:ndims),temp_count(1:ndims) - - ! Every i/o proc send data to root - - if (File%iosystem%io_rank>0) then - ! Wait for io_rank 0 to indicate that its ready before sending - ! this handshaking is nessasary for jaguar - call MPI_RECV( ierr, 1, MPI_INTEGER, 0, file%iosystem%io_rank, & - file%iosystem%io_comm, status, mpierr) - call CheckMPIReturn(subName, mpierr) - if(ierr==pio_NOERR) then - if (Debug) print *, subName,': File%iosystem%comp_rank:',File%iosystem%comp_rank, & - ': relaying IOBUF for write size=',size(IOBUF), temp_start(1:ndims),temp_count(1:ndims), i - - - call MPI_SEND( temp_IOBUF,max_iobuf_size, & - {MPITYPE}, & - 0,File%iosystem%io_rank,File%iosystem%IO_comm,mpierr ) - call CheckMPIReturn(subName, mpierr) - - call MPI_SEND( temp_start,ndims,MPI_INTEGER, & - 0,File%iosystem%num_iotasks+File%iosystem%io_rank,File%iosystem%IO_comm,mpierr ) - call CheckMPIReturn(subName, mpierr) - - call MPI_SEND( temp_count,ndims,MPI_INTEGER, & - 0,2*File%iosystem%num_iotasks+File%iosystem%io_rank,File%iosystem%IO_comm,mpierr ) - - call CheckMPIReturn(subName, mpierr) - endif - endif - - if (File%iosystem%io_rank==0) then - fh = file%fh - vid = vardesc%varid - ierr=nf90_put_var( fh, vid,IOBUF,temp_start(1:ndims),temp_count(1:ndims)) - if (Debug) print *, subName,': 0: done writing for self',ndims - - do i=1,File%iosystem%num_iotasks-1 - - ! Send a signal indicating ready to recv - call MPI_SEND( ierr, 1, MPI_INTEGER, i, i, & - file%iosystem%io_comm, mpierr) - call CheckMPIReturn(subName,mpierr) - if(ierr==pio_noerr) then - ! receive IOBUF, temp_start, temp_count from io_rank i - if(Debug) print *,subName, ' 1 receiving from ',i, max_iobuf_size - - call MPI_RECV( temp_iobuf, max_iobuf_size, & - {MPITYPE}, & - i,i,File%iosystem%IO_comm,status,mpierr) - call CheckMPIReturn(subName,mpierr) - if(Debug) print *,subName, ' 2 receiving from ',i, ndims - - call MPI_RECV( temp_start, & - ndims, MPI_INTEGER, & - i,File%iosystem%num_iotasks+i,File%iosystem%IO_comm,status,mpierr) - call CheckMPIReturn(subName,mpierr) - if(Debug) print *,subName, ' 3 receiving from ',i,ndims - - call MPI_RECV( temp_count, & - ndims, MPI_INTEGER, & - i,2*File%iosystem%num_iotasks+i,File%iosystem%IO_comm,status,mpierr) - call CheckMPIReturn(subName,mpierr) - - if(sum(temp_count(1:ndims))>0) then - -#ifdef TIMING - call t_startf("PIO:nc_put_var2") -#endif - ierr=nf90_put_var( fh,vid, & - temp_iobuf,temp_start(1:ndims),temp_count(1:ndims)) - if(Debug) print *, subname,__LINE__,i,fh,vid, ierr -#ifdef TIMING - call t_stopf("PIO:nc_put_var2") -#endif - if (Debug) print *, subName,': 0: done writing for ',i - else - ierr = PIO_NOERR - end if - - - end if ! ierr==pio_noerr - end do ! i=1,File%iosystem%num_iotasks-1 - endif ! File%iosystem%io_rank==0 - - if (File%iosystem%num_iotasks>1) then - if(File%iosystem%io_rank==0 .or. iobuf_size @file piovdc.F90 -!> @author Yannick Polius -!> @version $Revision$ -!> @date $LastChangedDate$ -!> @brief The piovdc library for writing Vapor Data Collection (VDC) 2 data files -!> https://wiki.ucar.edu/display/dasg/PIOVDC -!>
-!> @details The piovdc library is used to write VDC2 data files in a -!> parallel manner using PIO. After the prerequisite library functions are -!> used, a call to pio_writedarray is made, writing the passed -!> data to an on disk VDC2 collection.
-!> PRE-REQUISITES:
-!> VDF meta-file must be generated, using either rawtovdf or vdfcreate -!> if advanced features (wavelet type, compression ratios, or boundary type) are needed -!> VDF file requires VDC version to be 2, and requires the Waveletname, -!> WaveletBoundaryMode, CompressionRations, and NumTransforms to be set.
-!> POST-EFFECTS:
-!> After a successful write, VDC2 data will be in a directory located in -!> the same directory as the vdf file, using the vdf name, appended with _data -!> (ex. ghost.vdf generates VDC2 data in the dir ghost_data in the vdf dir) -!> If no compression is enabled, a single, uncompressed file will be -!> generated using PIO instead of a VDC -module piovdc - use pio_kinds, only : i4, r4, pio_offset - implicit none - integer (i4) :: vdc_dims(3), vdc_bsize(3), vdc_ts - integer (kind=PIO_OFFSET) :: vdc_iostart(3), vdc_iocount(3) -contains - - !> @brief subroutine checks start/count for out of bounds, adjusts if the start/count is too high, - !> zeroes start if it is invalid - !> POST-EFFECTS: - !>
all start/counts are now legal, non-IO tasks have zeroed start counts - !> @param[in] global_dims int(3) global grid dimensions - !> @param[in] rank int rank of current MPI task - !> @param[inout] start int(3) current MPI task global start - !> @param[inout] count int(3) current MPI task global count - subroutine adjust_bounds(global_dims, start, count, rank) - integer (i4), dimension(:), intent(in) :: global_dims - integer(i4), intent(in) :: rank - integer (kind=PIO_OFFSET), dimension(:), intent(inout) :: start, count - - !first check to ensure the start is legal - - if (start(1) .GT. global_dims(1) .OR. start(2) .GT. global_dims(2) & - .OR. start(3) .GT. global_dims(3)) then !outside of global bounds! - - !negate everything, they're useless -#ifdef DEBUG - print *, ' rank: ' , rank, ' start: ' , start, ' count: ' , count , ' negated' -#endif - start = (/ 0, 0, 0/) - count = (/ 0, 0, 0/) - else - !start is legit but count might not be, check & adjust to the boundaries - if(count(1) + start(1) - 1 .GT. global_dims(1)) then - count(1) = global_dims(1) - start(1) + 1 - - endif - if(count(2) + start(2) - 1 .GT. global_dims(2)) then - count(2) = global_dims(2) - start(2) + 1 - - endif - if(count(3) + start(3) - 1 .GT. global_dims(3)) then - count(3) = global_dims(3) - start(3) + 1 - - endif - end if - end subroutine adjust_bounds - - !> @brief subroutine that, given a global grid, VDC blocksize, and max # of nioprocs, will - !> automatically create an VDC optimized IO decomposition that uses the most possible IO tasks - ! - !> POST-EFFECTS: - !>
Each MPI Task is now either and IO task or a computational task. IO tasks have nonzero start/counts - !> @param[in] rank int rank of the current MPI task - !> @param[inout] nioprocs int represents the max possible # of IO procs, - !> @algorithm will try to get as close as possible to this # and return it in nioprocs - !> @param[in] blockdims int(3) global grid dimensions represented as VDC blocks - !> @param[out] start int(3) iostart for the current MPI task - !> @param[out] count int(3) iocount for the current MPI task - !> @param[in] bsize int(3) VDC block size - subroutine auto_get_start_count(rank, nioprocs, block_dims, start, count, bsize, data_dims) - use pio_kinds - integer (kind=PIO_OFFSET), intent(out):: start(3), count(3) - integer(i4), dimension(:), intent(in) :: bsize - integer (i4), intent(in) :: rank - real (r4), dimension(:), intent(in) :: block_dims - - integer (i4), intent(inout) :: nioprocs - !locals - real (r4) :: proc_count - integer (i4) :: lpp, spp0, spp1, counter, slab_counter, calc_procs, nslabs, nlinesPslab - integer(i4), dimension(:), intent(in) :: data_dims - logical :: found - - found = .FALSE. - nlinesPslab = CEILING(block_dims(2)) !max # of possible lines per slab PER TASK - nslabs = CEILING(block_dims(3)) - calc_procs = -1 - if (nioprocs .EQ. 1) then - nioprocs = 1 - start = (/0, 0, 0/) - count = INT(block_dims * bsize) - else - do slab_counter=1, nslabs - do counter=1, nlinesPslab - proc_count = CEILING(nlinesPslab / REAL(counter)) * CEILING(nslabs / REAL(slab_counter)) - !test to see if counter # of lines per processor per slab is possible - if (nioprocs >= proc_count) then - if (proc_count .gt. calc_procs) then - calc_procs = proc_count ! return the actual # of io procs used - count = (/ data_dims(1), counter * bsize(2), slab_counter *bsize(3) /) - start = (/ 0, mod(rank, INT(CEILING(nlinesPslab / REAL(counter)))) * counter * bsize(2), & - INT(rank / CEILING(nlinesPslab / REAL(counter))) * slab_counter * bsize(3)/) + 1 - call adjust_bounds(data_dims, start, count, rank) - if(proc_count .eq. nioprocs) then !using max #of procs, suitable solution found (for now) - found = .TRUE. - exit - end if - if (found) then - exit - end if - end if - end if -#ifdef DEBUG - -#endif - end do - if (found) then - exit - end if - end do - end if - nioprocs = calc_procs - end subroutine auto_get_start_count - - !> @brief subroutine that prepares the global grid to be split by the auto_start_count routine - ! - !> POST-EFFECTS: - !>
A valid IO decomposition is created that can be used with PIO - !> @param[in] rank int rank of the current - !> @param[in] data_dims int(3) size of the global grid - !> @param[in] vdc_bsize int(3) VDC block size - !> @param[out] iostart int(3) IO start for the current MPI task - !> @param[out] iocount int(3) IO count for the current MPI task - !> @param[inout] ioprocs int max # of IO procs, gets returned as the actual # used - subroutine init_vdc2(rank, data_dims, vdc_bsize, iostart, iocount, ioprocs) - integer (kind=PIO_OFFSET), intent(out) :: iostart(3), iocount(3) - integer (i4), intent(in) :: rank - integer(i4), dimension(:), intent(in) :: data_dims, vdc_bsize - integer (i4), intent(inout):: ioprocs - !locals - real(r4) :: vdc_blocks(3) - integer (i4) :: ierr - - print *, 'Calling get start count...block_dims: ', data_dims/real(vdc_bsize), ' bsize: ' , & - vdc_bsize, ' ioprocs: ', ioprocs, ' dims: ', data_dims, ' rank: ',rank - - vdc_blocks = data_dims/real(vdc_bsize) - - call auto_get_start_count (rank, ioprocs, vdc_blocks, iostart, iocount, vdc_bsize, data_dims) - -#ifdef DEBUG - print *, 'Retrieved VDF start count', iostart, '-', iocount, 'rank: ' , rank -#endif - - endsubroutine init_vdc2 - -end module piovdc diff --git a/src/externals/pio1/pio/rearr_options.h b/src/externals/pio1/pio/rearr_options.h deleted file mode 100644 index 39111853bef..00000000000 --- a/src/externals/pio1/pio/rearr_options.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef REARR_OPTIONS_H -#define REARR_OPTIONS_H - -! communication algorithm options -#define COLLECTIVE 0 -#define POINT_TO_POINT 1 -#define FLOW_CONTROL 2 - -! Default values for POINT_TO_POINT and FLOW_CONTROL -#define DEF_P2P_HANDSHAKE .true. -#define DEF_P2P_ISEND .false. -#define DEF_P2P_MAXREQ 64 - -#endif diff --git a/src/externals/pio1/pio/rearrange.F90.in b/src/externals/pio1/pio/rearrange.F90.in deleted file mode 100644 index 44d74daade1..00000000000 --- a/src/externals/pio1/pio/rearrange.F90.in +++ /dev/null @@ -1,209 +0,0 @@ -#define __PIO_FILE__ "rearrange.F90" -!> -!! @file -!! $Revision$ -!! $LastChangedDate$ -!! @brief Generic interface to the rearrange layer -!< - -module rearrange - - use pio_kinds - use pio_types - use pio_support - use box_rearrange - - -#ifdef TIMING - use perf_mod, only : t_startf, t_stopf, t_barrierf ! _EXTERNAL -#endif - - implicit none - private - save - -!> -!! @private -!< - public :: rearrange_init, & - rearrange_create, & - rearrange_comp2io, & - rearrange_io2comp, & - rearrange_free - - interface rearrange_init - module procedure rearrange_init_ - end interface - - - interface rearrange_create - module procedure rearrange_create_box_ - end interface - - interface rearrange_comp2io - ! TYPE real,double,int - module procedure rearrange_comp2io_{TYPE} - end interface - - interface rearrange_io2comp - ! TYPE real,double,int - module procedure rearrange_io2comp_{TYPE} - end interface - - interface rearrange_free - module procedure rearrange_free_ - end interface - - -contains - -! TYPE real,double,int -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! rearrange_comp2io_{TYPE} -! - - subroutine rearrange_comp2io_{TYPE}(Iosystem,iodesc,compbuf,iobuf) - implicit none - - type (Iosystem_desc_t) :: Iosystem - type (io_desc_t) :: iodesc - {VTYPE}, intent(in) :: compbuf(:) - {VTYPE}, intent(out) :: iobuf(:) - -#ifdef TIMING - call t_barrierf("pio_rearrange_comp2io_{TYPE}",IoSystem%comp_comm) - call t_startf("PIO:pio_rearrange_comp2io_{TYPE}") -#endif - - - call box_rearrange_comp2io(Iosystem,iodesc,size(compbuf), compbuf,size(iobuf), iobuf) - -#ifdef TIMING - call t_stopf("PIO:pio_rearrange_comp2io_{TYPE}") -#endif - - end subroutine rearrange_comp2io_{TYPE} - - - - -! TYPE real,double,int -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! rearrange_io2comp_{TYPE} -! - subroutine rearrange_io2comp_{TYPE} (Iosystem,iodesc,iobuf,compbuf) - implicit none - - type (Iosystem_desc_t) :: Iosystem - type(io_desc_t) :: iodesc - {VTYPE} :: iobuf(:) - {VTYPE} :: compbuf(:) - -#ifdef TIMING - call t_startf("PIO:pio_rearrange_io2comp_{TYPE}") -#endif - - call box_rearrange_io2comp(Iosystem,iodesc,size(iobuf),iobuf,size(compbuf),compbuf) - -#ifdef TIMING - call t_stopf("PIO:pio_rearrange_io2comp_{TYPE}") -#endif - - end subroutine rearrange_io2comp_{TYPE} - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! rearrange_init -! -! called from initFile_ -! - - - subroutine rearrange_init_(Iosystem) - implicit none - - type (Iosystem_desc_t), intent(inout) :: Iosystem - - - - ! no general init required for box rearranger - - end subroutine rearrange_init_ - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! rearrange_create_box_ -! -! called from initDecomp -! - - - subroutine rearrange_create_box_(Iosystem,compDOF, & - dims,ndims,ioDesc) - implicit none - - type (Iosystem_desc_t), intent(in) :: Iosystem - integer (kind=pio_offset), intent(in) :: compDOF(:) - integer, intent(in) :: dims(:) - integer, intent(in) :: ndims - type (IO_desc_t) :: ioDesc - -#ifdef TIMING - call t_startf("PIO:pio_rearrange_create_box") -#endif - - if (Iosystem%rearr /= PIO_rearr_box) then - call piodie( __PIO_FILE__,__LINE__, & - 'rearrange_create called with args for box but rearranger type is not box, Iosystem%rearr=',& - Iosystem%rearr) - endif - - - call box_rearrange_create( Iosystem,compDOF,dims,ndims,Iosystem%num_iotasks,ioDesc) - - -#ifdef TIMING - call t_stopf("PIO:pio_rearrange_create_box") -#endif - - end subroutine rearrange_create_box_ - - - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! -! rearrange_free_ -! -! called from freeDecomp - - - subroutine rearrange_free_(Iosystem,ioDesc) - implicit none - - type (Iosystem_desc_t), intent(in) :: Iosystem - type (IO_desc_t) :: ioDesc - - - select case (Iosystem%rearr) - case (PIO_rearr_box) - call box_rearrange_free(Iosystem,ioDesc) - case (PIO_rearr_none) - ! do nothing - - case default - call piodie(__PIO_FILE__,__LINE__,'Unrecognized rearranger:',Iosystem%rearr) - - end select - - - end subroutine rearrange_free_ - - -end module rearrange - - - diff --git a/src/externals/pio1/pio/topology.c b/src/externals/pio1/pio/topology.c deleted file mode 100644 index fd4c9ca847e..00000000000 --- a/src/externals/pio1/pio/topology.c +++ /dev/null @@ -1,479 +0,0 @@ -#include -#include -#include - - -#if defined(BGL) || defined(BGP) || defined(BGQ) - -#include -#include - -#ifdef BGL - -#include -#include - -#define get_personality rts_get_personality -#define get_processor_id rts_get_processor_id -#define Personality BGLPersonality -#define Personality_getLocationString BGLPersonality_getLocationString -#define Personality_numIONodes BGLPersonality_numIONodes -#define Personality_numPsets BGLPersonality_numPsets -#define Personality_numNodesInPset BGLPersonality_numNodesInPset -#define Personality_rankInPset BGLPersonality_rankInPset -#define Personality_psetNum BGLPersonality_psetNum - -#endif -#ifdef BGP - -#include -#include -#include - -#define get_personality Kernel_GetPersonality -#define get_processor_id Kernel_PhysicalProcessorID -#define Personality _BGP_Personality_t -#define Personality_getLocationString BGP_Personality_getLocationString -#define Personality_numIONodes BGP_Personality_numIONodes -#define Personality_numNodesInPset BGP_Personality_psetSize -#define Personality_rankInPset BGP_Personality_rankInPset -#define Personality_psetNum BGP_Personality_psetNum - -#endif - -#ifdef BGQ - -#include -#include -#include -#include - -#define get_personality Kernel_GetPersonality -#define get_processor_id Kernel_PhysicalProcessorID -#define Personality Personality_t - -#endif - -#define max(a,b) \ - ({ __typeof__ (a) _a = (a); \ - __typeof__ (b) _b = (b); \ - _a > _b ? _a : _b; }) - -#define min(a,b) \ - ({ __typeof__ (a) _a = (a); \ - __typeof__ (b) _b = (b); \ - _a < _b ? _a : _b; }) - - -int rank; -int np; -int my_name_len; -char my_name[255]; - -void identity(MPI_Fint *comm, int *iotask) -{ - - - MPI_Comm comm2; - comm2 = MPI_Comm_f2c(*comm); - MPI_Comm_rank(comm2,&rank); - MPI_Comm_size(comm2,&np); - MPI_Get_processor_name(my_name, &my_name_len); - -#ifdef BGQ - MPIX_Hardware_t hw; - MPIX_Hardware(&hw); -#endif - - /* Get the personality */ - Personality pers; - char message[100]; - - /* Number of MPI tasks per Pset */ - int coreId; - int *TasksPerPset; - int *tmp; - int i,ierr; - -#ifdef BGQ - Personality personality; - Kernel_GetPersonality(&pers, sizeof(pers)); -#else - get_personality (&pers, sizeof(pers)); -#endif - int numIONodes,numPsets,numNodesInPset,rankInPset; -#if defined(BGL) || defined(BGP) - Personality_getLocationString (&pers, message); - numIONodes = Personality_numIONodes (&pers); - numNodesInPset = Personality_numNodesInPset (&pers); - rankInPset = Personality_rankInPset (&pers); -#endif -#ifdef BGQ - int numpsets, psetID, psetsize, psetrank; - - bgq_pset_info (comm2, &numpsets, &psetID, &psetsize, &psetrank); - - numIONodes = numpsets; - numNodesInPset = psetsize; - rankInPset = rank; -#endif - -#ifdef BGL - numPsets = Personality_numPsets (&pers); -#endif -#ifdef BGP - rankInPset --; - numPsets = BGP_Personality_numComputeNodes(&pers)/numNodesInPset; -#endif -#ifdef BGQ - numPsets = numpsets; -#endif - - if(rank == 0) { printf("number of IO nodes in block: %i \n",numIONodes);} - if(rank == 0) { printf("number of Psets in block : %i \n",numPsets);} - if(rank == 0) { printf("number of compute nodes in Pset: %i \n",numNodesInPset);} - - int psetNum; -#ifdef BGQ - psetNum = psetID; -#else - psetNum = Personality_psetNum (&pers); -#endif - -#ifdef DEBUG - if((*iotask)>0) { - printf( "%04i (%-50s %s) %i yes\n", rank, my_name, message, psetNum ); - } else { - printf( "%04i (%-50s %s) %i --\n", rank, my_name, message, psetNum); - } - printf("MPI task %6i is rank %3i in Pset: %3i \n",rank, rankInPset,psetNum); -#endif - /* Determine which core on node.... I don't want to put more than one io-task per node */ - coreId = get_processor_id (); - - TasksPerPset = malloc(numPsets*sizeof(int)); - tmp = malloc(numPsets*sizeof(int)); - for(i=0;i 0) { - -#ifdef BGQ - Personality personality; - Kernel_GetPersonality(&pers, sizeof(pers)); -#else - get_personality (&pers, sizeof(pers)); -#endif - - int numIONodes,numPsets,numNodesInPset,rankInPset; - int numiotasks_per_node,remainder,numIONodes_per_pset; - int lstride; - - /* Number of computational nodes in processor set */ - #ifdef BGQ - int numpsets, psetID, psetsize, psetrank; - bgq_pset_info (comm,&numpsets, &psetID, &psetsize, &psetrank); - numIONodes = numpsets; - numNodesInPset = psetsize; - #else - /* total number of IO-nodes */ - numIONodes = Personality_numIONodes (&pers); - numNodesInPset = Personality_numNodesInPset (&pers); - #endif - - /* printf("Determine io tasks: me %i : nodes in pset= %i ionodes = %i\n", rank, numNodesInPset, numIONodes); */ - - - if((*numiotasks) < 0 ) { - /* negative numiotasks value indicates that this is the number per IO-node */ - (*numiotasks) = - (*numiotasks); - if((*numiotasks) > numNodesInPset) { - numiotasks_per_node = numNodesInPset; - } else { - numiotasks_per_node = (*numiotasks); - } - remainder = 0; - } else if ((*numiotasks) > 0 ) { - /* balance the number of iotasks to number of IO nodes */ - numiotasks_per_node = floor((float)(*numiotasks)/ (float) numIONodes); - /* put a minumum here so that we have a chance - though this may be too low */ - if (numiotasks_per_node < 1) { - numiotasks_per_node = 1; - *numiotasks = numIONodes; - } - remainder = (*numiotasks) - numiotasks_per_node * numIONodes; - } else if ((*numiotasks) == 0 ) { - if(stride > 0) { - numiotasks_per_node = numNodesInPset/stride; - if (numiotasks_per_node < 1) { - numiotasks_per_node = 1; - *numiotasks = numIONodes; - } - } else { - numiotasks_per_node = 8; /* default number of IO-client per IO-node is not otherwise specificied */ - } - remainder = 0; - } - - /* number of IO nodes with a larger number of io-client per io-node */ - if(remainder > 0) { - if(rank ==0) {printf("Unbalanced IO-configuration: %i IO-nodes have %i IO-clients : %i IO-nodes have %i IO-clients \n", - remainder, numiotasks_per_node+1, numIONodes-remainder,numiotasks_per_node);} - lstride = min(np,floor((float)numNodesInPset/(float)(numiotasks_per_node+1))); - } else { - if(rank == 0) { - printf("Balanced IO-configuration: %i IO-nodes have %i IO-clients\n",numIONodes-remainder, numiotasks_per_node); - } - lstride = min(np,floor((float)numNodesInPset/(float)numiotasks_per_node)); - } - - /* Number of processor sets */ -#ifdef BGL - numPsets = Personality_numPsets (&pers); -#endif -#ifdef BGP - numPsets = BGP_Personality_numComputeNodes(&pers)/numNodesInPset; -#endif -#ifdef BGQ - numPsets = numpsets; -#endif - - /* number of IO nodes in processor set (I need to add - code to deal with the case where numIONodes_per_pset != 1 works - correctly) */ - numIONodes_per_pset = numIONodes/numPsets; - - /* Determine which core on node.... I don't want to put more than one io-task per node */ - coreId = get_processor_id (); - - /* What is the rank of this node in the processor set */ -#ifdef BGQ - psetNum = psetID; - rankInPset = psetrank; -#else - /* determine the processor set that this node belongs to */ - psetNum = Personality_psetNum (&pers); - rankInPset = Personality_rankInPset (&pers); -#endif -#ifdef BGP - rankInPset--; -#endif - - /* printf("Pset #: %i has %i nodes in Pset; base = %i\n",psetNum,numNodesInPset, base); */ - - (*iamIOtask) = 0; /* initialize to zero */ - - if (numiotasks_per_node == numNodesInPset) base = 0; /* Reset the base to 0 if we are using all tasks */ - - - /* start stridding MPI tasks from base task */ - iam = max(0,rankInPset-(*base)); - if (iam >= 0) { - /* mark tasks that will be IO-tasks or IO-clients */ - /* printf("iam = %d lstride = %d coreID = %d\n",iam,lstride,coreId); */ - if((iam % lstride == 0) && (coreId == 0) ) { /* only io tasks indicated by stride and coreId = 0 */ - if((iam/lstride) < numiotasks_per_node) { - /* only set the first (numiotasks_per_node - 1) tasks */ - (*iamIOtask) = 1; - } else if ((iam/lstride) == numiotasks_per_node) { - /* If there is an uneven number of io-clients to io-nodes - allocate the first remainder - 1 processor sets to - have a total of numiotasks_per_node */ - if(psetNum < remainder) {(*iamIOtask) = 1; - }; - } - } - /* printf("comm = %d iam = %d lstride = %d coreID = %d iamIOtask = %i \n",comm2, iam,lstride,coreId,(*iamIOtask)); */ - } - }else{ - /* We are not doing rearrangement.... so all tasks are io-tasks */ - (*iamIOtask) = 1; - } - - /*printf("comm = %d myrank = %i iotask = %i \n", comm2, rank, (*iamIOtask));*/ - - /* now we need to correctly determine the numiotasks */ - MPI_Allreduce(iamIOtask, &task_count, 1, MPI_INT, MPI_SUM, comm); - - (*numiotasks) = task_count; - - -} - -int bgq_ion_id (void) -{ - int iA, iB, iC, iD, iE; /* The local node's coordinates */ - int nA, nB, nC, nD, nE; /* Size of each torus dimension */ - int brA, brB, brC, brD, brE; /* The bridge node's coordinates */ - int io_node_route_id; - - Personality_t personality; - - Kernel_GetPersonality(&personality, sizeof(personality)); - - iA = personality.Network_Config.Acoord; - iB = personality.Network_Config.Bcoord; - iC = personality.Network_Config.Ccoord; - iD = personality.Network_Config.Dcoord; - iE = personality.Network_Config.Ecoord; - - nA = personality.Network_Config.Anodes; - nB = personality.Network_Config.Bnodes; - nC = personality.Network_Config.Cnodes; - nD = personality.Network_Config.Dnodes; - nE = personality.Network_Config.Enodes; - - brA = personality.Network_Config.cnBridge_A; - brB = personality.Network_Config.cnBridge_B; - brC = personality.Network_Config.cnBridge_C; - brD = personality.Network_Config.cnBridge_D; - brE = personality.Network_Config.cnBridge_E; - -/* -* This is the bridge node, numbered in ABCDE order, E increments first. -* It is considered the unique "io node route identifer" because each -* bridge node only has one torus link to one io node. -*/ - - io_node_route_id = brE + brD*nE + brC*nD*nE + brB*nC*nD*nE + brA*nB*nC*nD*nE; - - return io_node_route_id; - -} - - - -int bgq_pset_info (MPI_Fint comm2, int* tot_pset, int* psetID, int* pset_size, int* rank_in_pset) -{ - MPI_Comm comp_comm2, pset_comm, bridge_comm; - int comp_rank, status, key, bridge_root, tot_bridges, cur_pset, itr, t_buf; - int temp_id, rem_psets; - MPI_Status mpi_status; - - MPI_Comm_rank (comm2, &comp_rank); - - status = MPI_Comm_dup ( comm2, &comp_comm2); - if ( MPI_SUCCESS != status) - { - printf(" Error duplicating communicator \n"); - MPI_Abort(comm2, status); - } - - // Compute the ION BridgeNode ID - key = bgq_ion_id (); - - // Create the pset_comm per bridge node - status = MPI_Comm_split ( comp_comm2, key, comp_rank, &pset_comm); - if ( MPI_SUCCESS != status) - { - printf(" Error splitting communicator \n"); - MPI_Abort(comm2, status); - } - - // Calculate the rank in pset and pset size - MPI_Comm_rank (pset_comm, rank_in_pset); - MPI_Comm_size (pset_comm, pset_size); - - // Create the Bridge root nodes communicator - bridge_root = 0; - if (0 == *rank_in_pset) - bridge_root = 1; - - // Calculate the total number of bridge nodes / psets - tot_bridges = 0; - MPI_Allreduce (&bridge_root, &tot_bridges, 1, MPI_INT, MPI_SUM, comm2); - - *tot_pset = tot_bridges; - - // Calculate the Pset ID - cur_pset = 0; - rem_psets = tot_bridges; - if ((0 == comp_rank) && (bridge_root ==1)) - { - *psetID = 0; - rem_psets = tot_bridges-1; - cur_pset++; - } - - t_buf = 0; // Dummy value - if (0 == comp_rank) - { - for (itr = 0; itr < rem_psets; itr++) - { - MPI_Recv (&t_buf,1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG,comm2, &mpi_status); - MPI_Send (&cur_pset, 1, MPI_INT, mpi_status.MPI_SOURCE, 0, comm2); - cur_pset++; - } - } - - if ((1 == bridge_root) && ( 0 != comp_rank)) - { - MPI_Send (&t_buf, 1, MPI_INT, 0, 0, comm2); - MPI_Recv (&temp_id,1, MPI_INT, 0, 0, comm2, &mpi_status); - - *psetID = temp_id; - /*printf (" Pset ID is %d \n", *psetID);*/ - } - // Broadcast the PSET ID to all ranks in the psetcomm - MPI_Bcast ( psetID, 1, MPI_INT, 0, pset_comm); - - // Free the split comm - MPI_Comm_free (&pset_comm); - - MPI_Barrier (comm2); - - return 0; -} - -#endif diff --git a/src/externals/pio1/scripts/Utils.pm b/src/externals/pio1/scripts/Utils.pm deleted file mode 100644 index ef1eadc044d..00000000000 --- a/src/externals/pio1/scripts/Utils.pm +++ /dev/null @@ -1,370 +0,0 @@ -package Utils; - -use strict; -BEGIN { - use vars qw( $VERSION @ISA ); - $VERSION = '0.10'; - @ISA = qw(); -} # end BEGIN -# non-exported package globals go here -use vars qw(); -use POSIX qw(ceil); - -sub host{ - my $host = `hostname -f`; - $host = `hostname` if($?); -#HOST SPECIFIC START - if($host =~ /intrepid/){ - $host = "intrepid"; - }elsif($host =~ /^fr\d+en/){ - $host = "frost"; - }elsif($host =~ /^eos/){ - $host = "eos"; - }elsif($host =~ /^titan/){ - $host = "titan"; - }elsif($host =~ /^ath/ ){ - $host = "athena"; - }elsif($host =~ /^kra/){ - $host = "kraken"; - }elsif($host =~ /^lynx/){ - $host = "lynx"; - }elsif($host =~ /^hopp/){ - $host = "hopper"; - }elsif($host =~ /^cvrs/) { - $host = "carver"; - }elsif($host =~/erlogin/) { - $host="erebus"; - }elsif($host =~/yslogin/) { - $host="yellowstone"; - }elsif( $host =~ /^login/){ - if(-d "/lustre/janus_scratch"){ - $host="janus"; - }else{ - $host = "athena"; - } - }elsif($host =~ /(\w+)\./){ - $host = $1; - } -#HOST SPECIFIC END -} - -sub projectInfo{ - my ($mod,$host,$user) = @_; - my $projectInfo; - my $project; -#HOST SPECIFIC START - if($host =~ "erebus" or $host =~ "yellowstone"){ - if(defined $ENV{ACCOUNT}){ - $project=$ENV{ACCOUNT}; - }else{ - $project="P93300606"; - } - $projectInfo = "#BSUB -R \"span[ptile=16]\"\n#BSUB -P $project\n"; - }elsif($host =~ "titan"){ - $project = `showproj -s $host | tail -1`; - $projectInfo ="#PBS -A $project\n"; - }elsif($host =~ "athena" or $host =~ "kraken"){ -# $project = `showproj -s athena | tail -1`; - $projectInfo ="##PBS -A $project\n"; - }elsif($host =~ "columbia" or $host =~ "pleiades" or $host=~ "carver"){ - $project = ""; - $projectInfo ="##PBS -W group_list=$project\n"; - } -#HOST SPECIFIC END -} - -sub preambleResource{ - my ($mod,$host,$pecount,$corespernode) = @_; - my $nodes; - my $preambleResource; - if($host =~ "bluefire" or $host =~ "erebus" or $host =~ "yellowstone") { - $preambleResource = "#BSUB -n $pecount\n"; - }elsif($host =~ "frost"){ - $preambleResource = ""; - }elsif($host =~ "edinburgh" or $host =~ "carver"){ - $nodes = ceil($pecount/$corespernode); - $preambleResource = "#PBS -l nodes=$nodes:ppn=$corespernode\n"; - }elsif($host =~ "aum"){ - $nodes = ceil($pecount/$corespernode); - $preambleResource = "#PBS -l nodes=$nodes:ppn=$corespernode\n"; - }elsif($host =~ "cyberstar" ){ - $nodes = ceil($pecount/$corespernode); - $preambleResource = "#PBS -l nodes=$nodes:nehalem:ppn=$corespernode\n"; - }elsif($host =~ "lynx" or $host =~ "hopper"){ - my $pecnt = $corespernode*ceil($pecount/$corespernode); - $preambleResource = "#PBS -l mppwidth=$pecnt\n"; - }elsif($host =~ "athena" or $host =~ /janus/){ - my $pecnt = $corespernode*ceil($pecount/$corespernode); - $preambleResource = "#PBS -l size=$pecnt\n"; - }elsif($host =~ "kraken"){ - my $pecnt = $corespernode*ceil($pecount/$corespernode); - $preambleResource = "#PBS -l size=$pecnt\n"; - }elsif($host =~ "titan"){ - my $nodecnt = ceil($pecount/$corespernode); - $preambleResource = "#PBS -l nodes=$nodecnt\n"; - }elsif($host =~ "columbia" or $host =~ "pleiades"){ - $preambleResource = "#PBS -l ncpus=$pecount\n"; - } -} - -sub runString{ - my ($mod,$host,$pecount,$run,$exename,$log)=@_; - my $runString; - if($host =~ "bluefire" || $host =~ "erebus" || $host =~ "yellowstone") { - $runString = "$run $exename 1> $log 2>&1"; - }elsif($host eq "frost" ) { - $runString = "$run $log -np $pecount $exename"; - #$runString = "$run -np $pecount $exename"; - }elsif($host eq "intrepid") { - $runString = "$run $log -np $pecount $exename "; - }elsif($host =~ "columbia" or $host =~ "pleiades"){ - $runString = "$run -np $pecount $exename 1> $log 2>&1"; -# } elsif($host =~ "kraken" or $host =~ "jaguar" or $host =~ "athena"){ -# make this default - }else{ - $runString = "$run -n $pecount $exename 1> $log 2>&1"; - } - -} - -sub submitString{ - my ($mod,$host,$pecount,$corespernode,$submit,$script)=@_; - my $submitString; - my $nodecnt; - $submit =~ s/</ "/usr/share/Modules/", - yellowstone => "/glade/apps/opt/modulefiles", - yellowstone_pgi => "/glade/apps/opt/modulefiles", - yellowstone_gnu => "/glade/apps/opt/modulefiles", - titan => "/opt/modules/default/", - eos => "/opt/modules/default/", - athena => "/opt/modules/default/", - kraken => "/opt/modules/default/", - hopper => "/opt/modules/default/", - lynx => "/opt/modules/default/", - lynx_intel => "/opt/modules/default/", - pleiades => "/usr", - carver => "/usr/common/nsg/opt/Modules/default/", - columbia => "/usr/share/modules/"}; -#HOST SPECIFIC END - - return unless(defined $modpath->{$host}); - - -#HOST SPECIFIC START - if($host =~ "titan"){ - require "/opt/modules/default/init/perl"; - module_check($modpath,$host); - module("switch cray-mpich2 cray-mpich2/5.6.3"); - module(" switch xt-libsci xt-libsci/12.0.00"); - module(" swap xt-asyncpe xt-asyncpe/5.16"); - module("load szip/2.1"); - module(" switch pgi pgi/12.10.0"); - module(" load netcdf-hdf5parallel/4.2.0"); - module(" load parallel-netcdf/1.3.1"); - module("list"); - }elsif($host eq "eos"){ - require "/opt/modules/default/init/perl"; - module_check($modpath,$host); - module("rm netcdf"); - module("rm cray-netcdf"); - module("rm cray-netcdf-hdf5parallel"); - module("rm pnetcdf"); - module("switch cray-mpich cray-mpich/6.0.2"); - module("switch intel intel/13.1.3.192"); - module("load cray-netcdf-hdf5parallel/4.3.0"); - module("load cray-parallel-netcdf/1.3.1.1"); - module("load cmake/2.8.11.2"); - }elsif($host =~ "athena"){ -# require "/opt/modules/default/init/perl"; - module_check($modpath,$host); - module(" purge"); - module(" load PrgEnv-pgi Base-opts"); - module(" load xtpe-quadcore"); - module(" load torque moab"); - module(" load xt-mpt"); - module(" switch pgi pgi/7.1.6"); - module(" load netcdf/3.6.2"); - module(" load p-netcdf/1.1.1"); - module(" swap xt-asyncpe xt-asyncpe/1.0c"); - module(" swap xt-binutils-quadcore xt-binutils-quadcore/2.0.1"); - }elsif($host =~ "kraken"){ - require "/opt/modules/default/init/perl"; - module_check($modpath,$host); - module(" load netcdf/3.6.3"); - module(" load p-netcdf/1.2.0"); - }elsif($host =~ "hopper"){ - require "/opt/modules/default/init/perl"; - module_check($modpath,$host); - module(" load cray-netcdf-hdf5parallel/4.3.0"); - module(" load cray-parallel-netcdf/1.3.1.1"); - module("list"); - }elsif($host =~ "pleiades"){ - module(" load netcdf/4.0-i10.1"); - }elsif($host =~ "columbia"){ - module(" load pd-netcdf.3.6.2"); -! module(" load pd-pnetcdf.1.1.1"); - }elsif($host =~ "lynx_intel"){ - require "/opt/modules/default/init/perl"; - module_check($modpath,$host); - module(" rm PrgEnv-pgi "); - module(" load PrgEnv-intel"); - module(" switch intel intel/12.1.0"); - module(" load INTEL/netcdf4/4.1.3_seq"); - module(" load pnetcdf/1.2.0"); - module(" list"); - }elsif($host =~ "hopper_gnu"){ - require "/opt/modules/default/init/perl"; - module_check($modpath,$host); - module(" rm PrgEnv-pgi "); - module(" load PrgEnv-gnu"); - module(" switch gcc gcc/4.7.1"); - module(" load netcdf-hdf5parallel/4.2.0"); - module(" load parallel-netcdf/1.2.0"); - module(" list"); - }elsif($host eq "carver"){ - require "/usr/common/nsg/opt/Modules/default/init/perl"; - module_check($modpath,$host); - module("load intel "); - module("load openmpi-intel/1.6"); - module("load netcdf-intel/4.1.1"); -# module("load pnetcdf/1.3.0"); - module("list"); - }elsif($host eq "erebus"){ - require "/glade/apps/opt/lmod/lmod/init/perl"; - module_check($modpath,$host); - module("load intel/13.1.2"); - module("load ncarcompilers/1.0"); - module("rm netcdf"); - module("load netcdf-mpi/4.2"); -# module("load netcdf/4.2"); - module("load pnetcdf/1.3.0"); - module("load ncarenv/1.0"); - module("load ncarbinlibs/1.1"); - module("list"); - }elsif($host eq "yellowstone_pgi"){ - print "Loading modules for $host\n"; - require "/glade/apps/opt/lmod/lmod/init/perl"; - module_check($modpath,"yellowstone"); - module("rm netcdf"); - module("rm intel"); - module("load pgi/13.9"); - module("load ncarcompilers/1.0"); - module("unload netcdf"); - module("load netcdf/4.2"); - module("load pnetcdf/1.3.0"); - module("load ncarenv/1.0"); - module("load ncarbinlibs/1.1"); - module("list"); - }elsif($host eq "yellowstone_gnu"){ - print "Loading modules for $host\n"; - require "/glade/apps/opt/lmod/lmod/init/perl"; - module_check($modpath,"yellowstone"); - module("rm netcdf"); - module("rm intel"); - module("load gnu/4.8.0"); - module("load ncarcompilers/1.0"); - module("unload netcdf"); - module("load netcdf/4.3.0-rc4"); - module("load pnetcdf/1.3.0"); - module("load ncarenv/1.0"); - module("load ncarbinlibs/0.0"); - module("list"); - }elsif($host eq "yellowstone"){ - require "/glade/apps/opt/lmod/lmod/init/perl"; - module_check($modpath,$host); -# module("purge"); - module("load intel/13.1.2"); - module("load ncarcompilers/1.0"); - module("rm netcdf"); - module("load netcdf-mpi/4.3.0"); - module("load pnetcdf/1.3.0"); - module("load ncarenv/1.0"); - module("load ncarbinlibs/1.0"); - module("load cmake"); - module("list"); - } - -#HOST SPECIFIC END -} - - -sub module { - my $exec_prefix = "$ENV{MODULESHOME}"; - - if(-e "$exec_prefix/bin/modulecmd"){ - eval `$exec_prefix/bin/modulecmd perl @_`; - }else{ - die "Could not find $exec_prefix/bin/modulecmd"; - } -} - -sub module_check{ - my($modpath,$host) = @_; - - $ENV{MODULESHOME} = $modpath->{$host}; - - if($modpath->{$host} =~ /([^\/]*)\/?$/){ - $ENV{MODULE_VERSION}=$1; - } - if (! defined $ENV{MODULEPATH} ) { - open(F,"$modpath->{$host}/init/.modulespath") || die "could not open $modpath->{$host}/init/.modulespath"; - my @file = ; - close(F); - my $modulepath; - foreach(@file){ - if(/^([\/\w+]+)\s*/){ - if(defined $modulepath){ - $modulepath = "$modulepath:$1"; - }else{ - $modulepath = $1; - } - } - } - $ENV{MODULEPATH} = $modulepath; - } - if (! defined $ENV{"LOADEDMODULES"} ) { - $ENV{"LOADEDMODULES"} = ""; - } - print "module path = $ENV{MODULEPATH}\n"; -} - -sub hostmods{ - my($self,$host,$mpi) = @_; - my ($scratch,$netcdf,$pnetcdf,$cc,$fc,$filesystem); - print "host = $host\n"; - if($host =~ /yellowstone/){ - $scratch = "/glade/scratch/$ENV{USER}/piotest"; - $netcdf = $ENV{NETCDF}; - $pnetcdf = $ENV{PNETCDF}; - $filesystem = "gpfs"; - if($mpi ne "mpi-serial") { - $cc = "mpicc"; - $fc = "mpif90"; - } - } - if($host =~ /^eos/){ - $scratch = "$ENV{WORKDIR}/$ENV{PROJECT}/testpio"; - $netcdf = $ENV{NETCDF_DIR}; - $pnetcdf = $ENV{PARALLEL_NETCDF_DIR}; - $filesystem = "lustre"; - $cc = "cc"; - $fc = "ftn"; - } - return ($scratch,$netcdf,$pnetcdf,$cc,$fc,$filesystem); -} - -1; diff --git a/src/externals/pio1/scripts/config.pl b/src/externals/pio1/scripts/config.pl deleted file mode 100644 index 0f3d96600a6..00000000000 --- a/src/externals/pio1/scripts/config.pl +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/perl -use strict; -use Getopt::Long; -require Utils; - -my $host; -my $compiler; -my $build; -my $result = GetOptions("host=s"=>\$host, - "compiler=s"=>\$compiler, - "build=s"=>\$build); - - -$host = Utils->host() unless(defined $host); -print "host = $host\n"; -Utils->loadmodules("$host"); - -$build="all" unless(defined($build)); - -my ($scratch,$netcdf,$pnetcdf,$mpi,$cc,$fc,$filesystem); - -($scratch,$netcdf,$pnetcdf,$cc,$fc,$filesystem) = Utils->hostmods($host,$mpi); - -print "$scratch\n"; -print "$cc\n"; -print "$filesystem\n"; -print "$fc\n"; - -# override pnetcdf with my own -#$pnetcdf = "/glade/u/home/jedwards/pnetcdf/svn1544/intel/"; - - -my $piosrc = `pwd`; -chomp $piosrc; -$piosrc.="/../"; - -$ENV{CC}=$cc; -$ENV{FC}=$fc; - -my $cmake_opts; - -if($build eq "netcdf" or $build eq "all"){ - $cmake_opts .= " -DNETCDF_DIR=$netcdf "; -} -if($build eq "pnetcdf" or $build eq "all"){ - $cmake_opts .= " -DPNETCDF_DIR=$pnetcdf "; -} -if(defined($filesystem)){ - $cmake_opts .= " -DPIO_FILESYSTEM_HINTS=$filesystem "; -} - -$cmake_opts .= " -DPIO_BUILD_TIMING=ON --debug-trycompile "; - - -mkdir "$scratch"; - -chdir "$scratch" or die "Could not make directory $scratch"; - -system("cmake $cmake_opts -DCMAKE_VERBOSE_MAKEFILE=1 $piosrc"); -system("gmake -j 4"); - - diff --git a/src/externals/pio1/scripts/testpio_yellowstone.pl b/src/externals/pio1/scripts/testpio_yellowstone.pl deleted file mode 100755 index 20b2227305d..00000000000 --- a/src/externals/pio1/scripts/testpio_yellowstone.pl +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env perl -use strict; -use File::Copy ; -#BSUB -P P93300606 # project code -#BSUB -W 0:40 # wall-clock time (hrs:mins) -#BSUB -n 64 # number of tasks in job -#BSUB -R "span[ptile=16]" # run 16 MPI tasks per node -#BSUB -J testpio # job name -#BSUB -o testpio.%J.out # output file name in which %J is replaced by the job ID -#BSUB -e testpio.%J.err # error file name in which %J is replaced by the job ID -#BSUB -q small # queue -#BSUB -a poe -##BSUB -XF -##BSUB -a tv -#BSUB -N -#BSUB -x - -my $piosrc="$ENV{HOME}/pio_trunk"; -my $testdir="/glade/scratch/$ENV{USER}/piotest/pio.all/pio"; - -opendir(TNL,"$piosrc/testpio/namelists"); -my @namelists = grep(/testpio_in.*\d$/,readdir(TNL)); -closedir(TNL); -$ENV{LD_LIBRARY_PATH}="$ENV{LD_LIBRARY_PATH}:/glade/apps/opt/hdf5-mpi/1.8.11/intel/13.1.2/lib"; -my $passcnt=0; -my $failcnt=0; - -open(T,">$testdir/testpio/TestStatus"); -chdir "$testdir/unittests"; -copy("$piosrc/unittests/input.nl","input.nl"); -print T "Running unittests ... "; -system("mpirun.lsf ./piotest > unittest.out"); -open(F,"unittest.out"); -my $cnt = grep /PASSED unit testing/ , ; -close(F); -if($cnt>0){ - $passcnt++; - print "PASS \n"; - print T "PASS \n"; -}else{ - $failcnt++; - print "FAIL \n"; - print T "FAIL \n"; -} - - - -foreach my $nl (sort @namelists){ - chdir "$testdir/testpio"; - $nl =~ /testpio_in\.(.*)/; - my $test = "test.$1"; - print T "Running test $1 ... "; - mkdir $test; - chdir $test; - copy("$piosrc/testpio/namelists/$nl","testpio_in"); - mkdir "none"; - system("mpirun.lsf ../testpio > $test.out"); - open(F,"$test.out"); - my $cnt = grep /testpio completed successfully/ , ; - close(F); - - if($cnt>0){ - $passcnt++; - print "PASS \n"; - print T "PASS \n"; - }else{ - $failcnt++; - print "FAIL \n"; - print T "FAIL \n"; - } - -} -close(T); diff --git a/src/externals/pio1/tests/testpio/CAM05.csh b/src/externals/pio1/tests/testpio/CAM05.csh deleted file mode 100644 index 54a8c1c7d53..00000000000 --- a/src/externals/pio1/tests/testpio/CAM05.csh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'bluefire' -# asfa -#set host = 'hopper' -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench CAM05 --numIO 6 --log ${host}.0064.pnc.iotask_6.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 128 --bench CAM05 --numIO 10 --log ${host}.0128.pnc.iotask_10.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 256 --bench CAM05 --numIO 20 --log ${host}.0256.pnc.iotask_20.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 416 --bench CAM05 --numIO 36 --log ${host}.0416.pnc.iotask_36.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 832 --bench CAM05 --numIO 70 --log ${host}.0832.pnc.iotask_70.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1664 --bench CAM05 --numIO 140 --log ${host}.1664.pnc.iotask_140.log.${id} diff --git a/src/externals/pio1/tests/testpio/CMakeLists.txt b/src/externals/pio1/tests/testpio/CMakeLists.txt deleted file mode 100644 index 2678d672249..00000000000 --- a/src/externals/pio1/tests/testpio/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ - -SET(SRC check_mod.F90 gdecomp_mod.F90 kinds_mod.F90 namelist_mod.F90 - testpio.F90 utils_mod.F90) - -INCLUDE_DIRECTORIES(${PIO_INCLUDE_DIRS}) -LINK_DIRECTORIES(${PIO_LIB_DIR}) -ADD_EXECUTABLE(testpio ${SRC}) -if(${PIO_BUILD_TIMING}) - TARGET_LINK_LIBRARIES(testpio pio timing) -else() - TARGET_LINK_LIBRARIES(testpio pio) -endif() - - diff --git a/src/externals/pio1/tests/testpio/MPASA30km.csh b/src/externals/pio1/tests/testpio/MPASA30km.csh deleted file mode 100755 index e030403c359..00000000000 --- a/src/externals/pio1/tests/testpio/MPASA30km.csh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#./testpio_bench.pl --maxiter 10 --iofmt pnc --numvars 10 --pecount 120 --bench MPASA30km -numIO 20 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -#./testpio_bench.pl --maxiter 10 --iofmt pnc --numvars 10 --pecount 240 --bench MPASA30km -numIO 40 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -#./testpio_bench.pl --maxiter 10 --iofmt pnc --numvars 10 --pecount 480 --bench MPASA30km -numIO 80 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -#./testpio_bench.pl --maxiter 10 --iofmt pnc --numvars 10 --pecount 960 --bench MPASA30km -numIO 160 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -#./testpio_bench.pl --maxiter 10 --iofmt pnc --numvars 10 --pecount 1920 --bench MPASA30km -numIO 320 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --numvars 10 --pecount 3840 --bench MPASA30km -numIO 320 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close - diff --git a/src/externals/pio1/tests/testpio/MPASA60km.csh b/src/externals/pio1/tests/testpio/MPASA60km.csh deleted file mode 100755 index 0c3cd5a2c16..00000000000 --- a/src/externals/pio1/tests/testpio/MPASA60km.csh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench MPASA60km -numIO 12 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 120 --bench MPASA60km -numIO 20 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 240 --bench MPASA60km -numIO 40 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 480 --bench MPASA60km -numIO 80 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 960 --bench MPASA60km -numIO 160 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1020 --bench MPASA60km -numIO 170 --partdir /lustre/scratch/jdennis/MPAS --logfile-suffix trunk_close - diff --git a/src/externals/pio1/tests/testpio/POPB.csh b/src/externals/pio1/tests/testpio/POPB.csh deleted file mode 100755 index 0f9cf55ec86..00000000000 --- a/src/externals/pio1/tests/testpio/POPB.csh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 24 --bench POPB --numIO 4 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 80 --bench POPB --numIO 12 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 160 --bench POPB --numIO 24 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 320 --bench POPB --numIO 48 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 640 --bench POPB --numIO 96 --logfile-suffix trunk_close - diff --git a/src/externals/pio1/tests/testpio/POPC.csh b/src/externals/pio1/tests/testpio/POPC.csh deleted file mode 100644 index 6212c292b72..00000000000 --- a/src/externals/pio1/tests/testpio/POPC.csh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench POPC --numIO 10 --log ${host}.0064.pnc.iotask_10.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 128 --bench POPC --numIO 20 --log ${host}.0128.pnc.iotask_20.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 256 --bench POPC --numIO 40 --log ${host}.0256.pnc.iotask_40.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 512 --bench POPC --numIO 80 --log ${host}.0512.pnc.iotask_80.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1000 --bench POPC --numIO 160 --log ${host}.1000.pnc.iotask_160.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1600 --bench POPC --numIO 160 --log ${host}.1600.pnc.iotask_160.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 2000 --bench POPC --numIO 320 --log ${host}.2000.pnc.iotask_320.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 4000 --bench POPC --numIO 640 --log ${host}.4000.pnc.iotask_640.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 8000 --bench POPC --numIO 640 --log ${host}.8000.pnc.iotask_640.log.${id} diff --git a/src/externals/pio1/tests/testpio/POPD.csh b/src/externals/pio1/tests/testpio/POPD.csh deleted file mode 100644 index 62d0f1da3e1..00000000000 --- a/src/externals/pio1/tests/testpio/POPD.csh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 64 --bench POPD --numIO 6 --log ${host}.0064.pnc.iotask_6.log.${id} -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 128 --bench POPD --numIO 10 --log ${host}.0128.pnc.iotask_10.log.${id} -./testpio_bench.pl --maxiter 2 --iofmt snc --pecount 256 --bench POPD --numIO 4 --log ${host}.0256.snc.iotask_4.log.${id} -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 512 --bench POPD --numIO 40 --log ${host}.0512.pnc.iotask_40.log.${id} -#./testpio_bench.pl --maxiter 5 --iofmt pnc --pecount 1000 --bench POPD --numIO 80 --log ${host}.1000.pnc.iotask_80.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1600 --bench POPD --numIO 160 --log ${host}.1600.pnc.iotask_160.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 2000 --bench POPD --numIO 320 --log ${host}.2000.pnc.iotask_320.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 4000 --bench POPD --numIO 640 --log ${host}.4000.pnc.iotask_640.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 8000 --bench POPD --numIO 640 --log ${host}.8000.pnc.iotask_640.log.${id} - diff --git a/src/externals/pio1/tests/testpio/POPDv0.csh b/src/externals/pio1/tests/testpio/POPDv0.csh deleted file mode 100755 index d51d7bb31a9..00000000000 --- a/src/externals/pio1/tests/testpio/POPDv0.csh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' - -#------------------------------ -# should increase queue runtime -#------------------------------ -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 64 --bench POPD --numIO 1 --log ${host}.0064.snc.box.iotask_1.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 128 --bench POPD --numIO 1 --log ${host}.0128.snc.box.iotask_1.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 256 --bench POPD --numIO 1 --log ${host}.0256.snc.box.iotask_1.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 512 --bench POPD --numIO 1 --log ${host}.0512.snc.box.iotask_1.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 1000 --bench POPD --numIO 1 --log ${host}.1000.snc.box.iotask_1.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 1600 --bench POPD --numIO 1 --log ${host}.1600.snc.box.iotask_1.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 2000 --bench POPD --numIO 1 --log ${host}.2000.snc.box.iotask_1.log.${id} -# Don't execute this one yet because 4000 core job from previous set is still in queue -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 4000 --bench POPD --numIO 1 --log ${host}.4000.snc.box.iotask_1.log.${id} - -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 8000 --bench POPD --numIO 640 --log ${host}.8000.pnc.iotask_640.log.${id} - diff --git a/src/externals/pio1/tests/testpio/POPDv1.csh b/src/externals/pio1/tests/testpio/POPDv1.csh deleted file mode 100755 index f10ab6f3028..00000000000 --- a/src/externals/pio1/tests/testpio/POPDv1.csh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' -#------------------------------ -# Should increase queue runtime -#------------------------------ -#./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 64 --bench POPD --numIO 10 --log ${host}.0064.snc.box.iotask_10.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 128 --bench POPD --numIO 20 --log ${host}.0128.snc.box.iotask_20.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 256 --bench POPD --numIO 40 --log ${host}.0256.snc.box.iotask_40.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 512 --bench POPD --numIO 80 --log ${host}.0512.snc.box.iotask_80.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 1000 --bench POPD --numIO 160 --log ${host}.1000.snc.box.iotask_160.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 1600 --bench POPD --numIO 320 --log ${host}.1600.snc.box.iotask_320.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 2000 --bench POPD --numIO 640 --log ${host}.2000.snc.box.iotask_640.log.${id} -# Do not submit this job. A previous 4K core job is still in the queue -set id = 012211-1449 -./testpio_bench.pl --maxiter 10 --iofmt snc --pecount 4000 --bench POPD --numIO 640 --log ${host}.4000.snc.box.iotask_640.log.${id} - -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 8000 --bench POPD --numIO 640 --log ${host}.8000.pnc.iotask_640.log.${id} - diff --git a/src/externals/pio1/tests/testpio/POPDv2.csh b/src/externals/pio1/tests/testpio/POPDv2.csh deleted file mode 100755 index 57d3d58e47f..00000000000 --- a/src/externals/pio1/tests/testpio/POPDv2.csh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -#set host = 'kraken' -set host = 'hopper' - -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench POPD --numIO 10 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 128 --bench POPD --numIO 20 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 256 --bench POPD --numIO 40 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 512 --bench POPD --numIO 80 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1000 --bench POPD --numIO 160 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1600 --bench POPD --numIO 320 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 2000 --bench POPD --numIO 320 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 4000 --bench POPD --numIO 320 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 8000 --bench POPD --numIO 320 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 12000 --bench POPD --numIO 320 --logfile-suffix trunk_close -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 15000 --bench POPD --numIO 320 --logfile-suffix trunk_close - -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench POPD --numIO 10 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR1 -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 128 --bench POPD --numIO 20 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR1 -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 256 --bench POPD --numIO 40 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR1 -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 512 --bench POPD --numIO 80 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR1 - -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 4 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 8 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 10 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 16 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 20 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 24 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 32 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 40 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 64 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 80 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 -#./testpio_bench.pl --maxiter 10 --iofmt pnc -lfs-ost-count 160 --pecount 1000 --bench POPD --numIO 160 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR3 - -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1600 --bench POPD --numIO 320 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR1 -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 2000 --bench POPD --numIO 640 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR1 -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 4000 --bench POPD --numIO 640 --mpi-cb-buffer-size=8388608 --logfile-suffix DnR1 - - diff --git a/src/externals/pio1/tests/testpio/POPDv3.csh b/src/externals/pio1/tests/testpio/POPDv3.csh deleted file mode 100755 index 4483057f122..00000000000 --- a/src/externals/pio1/tests/testpio/POPDv3.csh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' - -./testpio_bench.pl --maxiter 10 --rearr none --iofmt pnc --pecount 64 --bench POPD --numIO 64 --log ${host}.0064.pnc.none.iotask_64.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt pnc --pecount 128 --bench POPD --numIO 128 --log ${host}.0128.pnc.none.iotask_128.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt pnc --pecount 256 --bench POPD --numIO 256 --log ${host}.0256.pnc.none.iotask_256.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt pnc --pecount 512 --bench POPD --numIO 512 --log ${host}.0512.pnc.none.iotask_512.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt pnc --pecount 1000 --bench POPD --numIO 1000 --log ${host}.1000.pnc.none.iotask_1000.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt pnc --pecount 1600 --bench POPD --numIO 1600 --log ${host}.1600.pnc.none.iotask_1600.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt pnc --pecount 2000 --bench POPD --numIO 2000 --log ${host}.2000.pnc.none.iotask_2000.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt pnc --pecount 4000 --bench POPD --numIO 4000 --log ${host}.4000.pnc.none.iotask_4000.log.${id} - -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 8000 --bench POPD --numIO 640 --log ${host}.8000.pnc.iotask_640.log.${id} - diff --git a/src/externals/pio1/tests/testpio/POPDv4.csh b/src/externals/pio1/tests/testpio/POPDv4.csh deleted file mode 100755 index 918ea7e87bf..00000000000 --- a/src/externals/pio1/tests/testpio/POPDv4.csh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' - -#./testpio_bench.pl --maxiter 10 --iofmt bin --pecount 64 --bench POPD --numIO 10 --log ${host}.0064.bin.box.iotask_10.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt bin --pecount 128 --bench POPD --numIO 20 --log ${host}.0128.bin.box.iotask_20.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt bin --pecount 256 --bench POPD --numIO 40 --log ${host}.0256.bin.box.iotask_40.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt bin --pecount 512 --bench POPD --numIO 80 --log ${host}.0512.bin.box.iotask_80.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt bin --pecount 1000 --bench POPD --numIO 160 --log ${host}.1000.bin.box.iotask_160.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt bin --pecount 1600 --bench POPD --numIO 320 --log ${host}.1600.bin.box.iotask_320.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt bin --pecount 2000 --bench POPD --numIO 640 --log ${host}.2000.bin.box.iotask_640.log.${id} - -./testpio_bench.pl --maxiter 10 --iofmt bin --pecount 4000 --bench POPD --numIO 640 --log ${host}.4000.bin.box.iotask_640.log.012211-2233 - -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 8000 --bench POPD --numIO 640 --log ${host}.8000.pnc.iotask_640.log.${id} - diff --git a/src/externals/pio1/tests/testpio/POPDv5.csh b/src/externals/pio1/tests/testpio/POPDv5.csh deleted file mode 100755 index c1267dd790b..00000000000 --- a/src/externals/pio1/tests/testpio/POPDv5.csh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' - -./testpio_bench.pl --maxiter 10 --rearr none --iofmt bin --pecount 64 --bench POPD --numIO 64 --log ${host}.0064.bin.none.iotask_64.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt bin --pecount 128 --bench POPD --numIO 128 --log ${host}.0128.bin.none.iotask_128.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt bin --pecount 256 --bench POPD --numIO 256 --log ${host}.0256.bin.none.iotask_256.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt bin --pecount 512 --bench POPD --numIO 512 --log ${host}.0512.bin.none.iotask_512.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt bin --pecount 1000 --bench POPD --numIO 1000 --log ${host}.1000.bin.none.iotask_1000.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt bin --pecount 1600 --bench POPD --numIO 1600 --log ${host}.1600.bin.none.iotask_1600.log.${id} -./testpio_bench.pl --maxiter 10 --rearr none --iofmt bin --pecount 2000 --bench POPD --numIO 2000 --log ${host}.2000.bin.none.iotask_2000.log.${id} -# waiting for another 4000 core job in queue -#./testpio_bench.pl --maxiter 10 --rearr none --iofmt bin --pecount 4000 --bench POPD --numIO 4000 --log ${host}.4000.bin.none.iotask_4000.log.${id} - -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 8000 --bench POPD --numIO 640 --log ${host}.8000.pnc.iotask_640.log.${id} - diff --git a/src/externals/pio1/tests/testpio/README.testpio b/src/externals/pio1/tests/testpio/README.testpio deleted file mode 100644 index 7156a9e1e38..00000000000 --- a/src/externals/pio1/tests/testpio/README.testpio +++ /dev/null @@ -1,288 +0,0 @@ - -TESTPIO README - -Testpio tests both the accuracy and performance of reading and writing data -using the pio library. The tests are controlled via namelist. There are a -set of general namelist and then namelist to setup a computational -decomposition and an IO decomposition. The computational decomposition -should be setup to duplicate a realistic model data decomposition. The IO -is generally not used , but in some cases, can be used and impacts IO performance. -The IO decomposition is an intermediate decomposition that provides -compatability between a relative arbitrary computational decomposition -and the MPI-IO, netcdf, pnetcdf, or other IO layers. Depending on the -IO methods used, only certain IO decompositions are valid. In general, -the IO decomposition is not used and is set internally. - -The namelist input file is called "testpio_in". The first namelist -block, io_nml, contains some general settings: - - namelist /io_nml/ - casename - string, user defined test case name - nx_global - integer, global size of "x" dimension - ny_global - integer, global size of "y" dimension - nz_global - integer, glboal size of "z" dimension - ioFMT - string, type and i/o method of data file - ("bin","pnc","snc"), binary, pnetcdf, or serial netcdf - rearr - string, type of rearranging to be done - ("none","mct","box","boxauto") - nprocsIO - integer, number of IO processors used only when rearr is - not "none", if rearr is "none", then the IO decomposition - will be the computational decomposition - base - integer, base pe associated with nprocIO striding - stride - integer, the stride of io pes across the global pe set - num_aggregator - integer, mpi-io number of aggregators, only used if no - pio rearranging is done - dir - string, directory to write output data, this must exist - before the model starts up - num_iodofs - tests either 1dof or 2dof init decomp interfaces (1,2) - maxiter - integer, the number of trials for the test - DebugLevel - integer, sets the debug level (0,1,2,3) - compdof_input - string, setting of the compDOF ('namelist' or a filename) - compdof_output - string, whether the compDOF is saved to disk - ('none' or a filename) - -Notes: - - the "mct" rearr option is not currently available - - if rearr is set to "none", then the computational decomposition is also - going to be used as the IO decomposition. The computation decomposition - must therefore be suited to the underlying I/O methods. - - if rearr is set to "box", then pio is going to generate an internal - IO decomposition automatically and pio will rearrange to that decomp. - - num_aggregator is used with mpi-io and no pio rearranging. mpi-io is only - used with binary data. - - nprocsIO, base, and stride implementation has some special options - if nprocsIO > 0 and stride > 0, then use input values - if nprocsIO > 0 and stride <= 0, then stride=(npes-base)/nprocsIO - if nprocsIO <= 0 and stride > 0, then nprocsIO=(npes-base)/stride - if nprocsIO <= 0 and stride <= 0, then nprocsIO=npes, base=0, stride=1 - -Two other namelist blocks exist to described the computational -and IO decompositions, compdof_nml and iodof_nml. These namelist -blocks are identical in use. - - namelist / compdof_nml || iodof_nml / - nblksppe - integer, sets the number of blocks desired per pe, - the default is one per pe for automatic decomposition. - increasing this increases the flexibility of decompositions. - grdorder - string, sets the gridcell ordering within the block - ("xyz","xzy","yxz","yzx","zxy","zyx") - grddecomp - string, sets up the block size with gdx, gdy, and gdz, see - below, ("x","y","z","xy","xye","xz","xze","yz","yze", - "xyz","xyze","setblk") - gdx - integer, "x" size of block - gdy - integer, "y" size of block - gdz - integer, "z" size of block - blkorder - string, sets the block ordering within the domain - ("xyz","xzy","yxz","yzx","zxy","zyx") - blkdecomp1 - string, sets up the block / processor layout within the domain - with bdx, bdy, and bdz, see below. - ("x","y","z","xy","xye","xz","xze","yz","yze","xyz","xyze", - "setblk","cont1d","cont1dm") - blkdecomp2 - string, provides an additional option to the block decomp - after blkdecomp1 is computes ("","ysym2","ysym4") - bdx - integer, "x" numbers of contiguous blocks - bdy - integer, "y" numbers of contiguous blocks - bdz - integer, "z" numbers of contiguous blocks - -A description of the decomposition implementation and some examples -are provided below. - -Testpio writes out several files including summary information to -stdout, data files to the namelist dir directory, and a netcdf -file summarizing the decompositions. The key output information -is stdout, which contains the timing information. In addition, -a netcdf file called gdecomp.nc is written that provides both the -block and task ids for each gridcell as computed by the decompositions. -Finally, foo.* files are written by testpio using the methods -specified. - -Currently, the timing information is limited to the high level -pio read/write calls which generally will also include copy and rearrange -overhead as well as actual I/O time. Addition timers will be added -in the future. - -The test script is called testpio_run.pl, it uses the hostname -function to determine the platform. New platforms can be added by -editing the files build_defaults.xml and Utils.pm. If more than one -configuration should be tested on a single platform you can provide -two hostnames in this file and specify the name to test in a --host -option to testpio_run.pl - -There are several testpio_in files for the pio test suite. The ones that -come with pio test specific things. In general, there are tests for - sn = serial netcdf and no rearrangement - sb = serial netcdf and box rearrangement - pn = parallel netcdf and no rearrangement - pb = parallel netcdf and box rearrangement - bn = binary I/O and no rearrangement - bb = binary I/O and box rearrangment -and the test number (01, etc) is consistent across I/O methods with - 01 = all data on root pe, only root pe active in I/O - 02 = simple 2d xy decomp across all pes with all pes active in I/O - 03 = all data on root pe, all pes active in I/O - 04 = simple 2d xy decomp with yxz ordering and stride=4 pes active in I/O - 05 = 2d xy decomp with 4 blocks/pe, yxz ordering, xy block decomp, and - stride=4 pes active in I/O - 06 = 3d xy decomp with 4 blocks/pe, yxz ordering, xy block decomp, and - stride=4 pes active in I/O - 07 = 3d xyz decomp with 16 blocks/pe, yxz ordering, xyz block decomp - with block yzx ordering and stride=4 pes active in I/O - 08 = 2d xy decomp with 4 blocks/pe and yxz grid ordering, yxz block - ordering and cont1d block decomp -the rd01 and wr01 tests are distinct and test writing, reading and use -of DOF data via pio methods. - -The test suite builds the model with different options and runs -sets of tests for each build. In general, mct is disabled and -the timing utility is enabled during testing and - snet = serial netcdf only - pnet = parallel netcdf only - mpiio = mpiio only - all = everything on - ant = everything on but timing disabled - -======================================================================== -DECOMPOSITION: - -The decomposition implementation supports the decomposition of -a general 3 dimensional "nx * ny * nz" grid into multiple blocks -of gridcells which are then ordered and assigned to processors. -In general, blocks in the decomposition are rectangular, -"gdx * gdy * gdz" and the same size, although some blocks around -the edges of the domain may be smaller if the decomposition is uneven. -Both gridcells within the block and blocks within the domain can be -ordered in any of the possible dimension hierarchies, such as "xyz" -where the first dimension is the fastest. - -The gdx, gdy, and gdz inputs allow the user to specify the size in -any dimension and the grddecomp input specifies which dimensions are -to be further optimized. In general, automatic decomposition generation -of 3 dimensional grids can be done in any of possible combination of -dimensions, (x, y, z, xy, xz, yz, or xyz), with the other dimensions having a -fixed block size. The automatic generation of the decomposition is -based upon an internal algorithm that tries to determine the most -"square" blocks with an additional constraint on minimizing the maximum -number of gridcells across processors. If evenly divided grids are -desired, use of the "e" addition to grddecomp specifies that the grid -decomposition must be evenly divided. the setblk option uses the -prescibed gdx, gdy, and gdz inputs withtout further automation. - -The blkdecomp1 input works fundamentally the same way as the grddecomp -in mapping blocks to processors, but has a few additional options. -"cont1d" (contiguous 1d) basically unwraps the blocks in the order specified -by the blkorder input and then decomposes that "1d" list of blocks -onto processors by contiguously grouping blocks together and allocating -them to a processor. The number of contiguous blocks that are -allocated to a processor is the maximum of the values of bdx, bdy, and -bdz inputs. Contiguous blocks are allocated to each processor in turn -in a round robin fashion until all blocks are allocated. The -"cont1dm" does basically the same thing except the number of -contiguous blocks are set automatically such that each processor -recieves only 1 set of contiguous blocks. The ysym2 and ysym4 -blkdecomp2 options modify the original block layout such that -the tasks assigned to the blocks are 2-way or 4-way symetric -in the y axis. - -The decomposition tool is extremely flexible, but arbitrary -inputs will not always yield valid decompositions. If a valid -decomposition cannot be computed based on the global grid size, -number of pes, number of blocks desired, and decomposition options, -the model will stop. - -As indicated above, the IO decomposition must be suited to the -IO methods, so decompositions are even further limited by those -constraints. the testpio tool provides limited checking about -whether the IO decomposition is valid for the IO method used. -Since the IO output is written in "xyz" order, it's likely the -best IO performance will be achieved with both grdorder and blkorder -set to "xyz" for the IO decomposition. - -Also note that in all cases, regardless of the decomposition, -the global gridcell numbering and ordering in the output file -is assumed to be "xyz" and defined as a single block. The number -scheme in the examples below demonstrates how the namelist input -relates back to the grid numbering on the local computational -decomposition. - -Some decomposition examples: - "B" is the block number - "P" is the processor (1:npes) the block is associated with - numbers are the local gridcell numbering within the block if the - local dimensions are unrolled. - -Standard xyz ordering, 2d decomp: - note: blkdecomp plays no role since there is 1 block per pe - nx_global 6 - ny_global 4 - nz_global 1 ______________________________ - npes 4 |B3 P3 |B4 P4 | - nblksppe 1 | | | - grdorder "xyz" | | | - grddecomp "xy" | | | - gdx 0 | | | - gdy 0 |--------------+---------------| - gdz 0 |B1 P1 |B2 P2 | - blkorder "xyz" | 4 5 6 | 4 5 6 | - blkdecomp1 "xy" | | | - blkdecomp2 "" | | | - bdx 0 | 1 2 3 | 1 2 3 | - bdy 0 |______________|_______________| - bdz 0 - -Same as above but yxz ordering, 2d decomp - note: blkdecomp plays no role since there is 1 block per pe - nx_global 6 - ny_global 4 - nz_global 1 _____________________________ - npes 4 |B2 P2 |B4 P4 | - nblksppe 1 | | | - grdorder "yxz" | | | - grddecomp "xy" | | | - gdx 0 | | | - gdy 0 |--------------+--------------| - gdz 0 |B1 P1 |B3 P3 | - blkorder "yxz" | 2 4 6 | 2 4 6 | - blkdecomp1 "xy" | | | - blkdecomp2 "" | | | - bdx 0 | 1 3 5 | 1 3 5 | - bdy 0 |______________|______________| - bdz 0 - -xyz grid ordering, 1d x decomp - note: blkdecomp plays no role since there is 1 block per pe - note: blkorder plays no role since it's a 1d decomp - nx_global 8 - ny_global 4 - nz_global 1 _____________________________________ - npes 4 |B1 P1 |B2 P2 |B3 P3 |B4 P4 | - nblksppe 1 | 7 8 | 7 8 | | | - grdorder "xyz" | | | | | - grddecomp "x" | | | | | - gdx 0 | 5 6 | 5 6 | | | - gdy 0 | | | | | - gdz 0 | | | | | - blkorder "yxz" | 3 4 | 3 4 | | | - blkdecomp1 "xy" | | | | | - blkdecomp2 "" | | | | | - bdx 0 | 1 2 | 1 2 | | | - bdy 0 |________|_________|________|_________| - bdz 0 - -yxz block ordering, 2d grid decomp, 2d block decomp, 4 block per pe - nx_global 8 - ny_global 4 - nz_global 1 _____________________________________ - npes 4 |B4 P2 |B8 P2 |B12 P4 |B16 P4 | - nblksppe 4 | | | | | - grdorder "xyz" |--------+---------+--------+---------| - grddecomp "xy" |B3 P2 |B7 P2 |B11 P4 |B15 P4 | - gdx 0 | | | | | - gdy 0 |--------+---------+--------+---------| - gdz 0 |B2 P1 |B6 P1 |B10 P3 |B14 P3 | - blkorder "yxz" | | | | | - blkdecomp1 "xy" |--------+---------+--------+---------| - blkdecomp2 "" |B1 P1 |B5 P1 |B9 P3 |B13 P3 | - bdx 0 | 1 2 | 1 2 | | | - bdy 0 |________|_________|________|_________| - bdz 0 - diff --git a/src/externals/pio1/tests/testpio/WRFB.csh b/src/externals/pio1/tests/testpio/WRFB.csh deleted file mode 100644 index 2f237453981..00000000000 --- a/src/externals/pio1/tests/testpio/WRFB.csh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -set host = 'kraken' -#set host = 'hopper' -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 125 --bench WRFB --numIO 32 --log ${host}.0125.pnc.iotask_32.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 250 --bench WRFB --numIO 40 --log ${host}.0250.pnc.iotask_40.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 500 --bench WRFB --numIO 80 --log ${host}.0500.pnc.iotask_80.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1000 --bench WRFB --numIO 160 --log ${host}.1000.pnc.iotask_160.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 2025 --bench WRFB --numIO 320 --log ${host}.2025.pnc.iotask_320.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 4050 --bench WRFB --numIO 640 --log ${host}.4050.pnc.iotask_640.log.${id} - -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 8100 --bench WRFB --numIO 640 --log ${host}.8100.pnc.iotask_640.log.${id} -#./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 16200 --bench WRFB --numIO 640 --log ${host}.16200.pnc.iotask_640.log.${id} diff --git a/src/externals/pio1/tests/testpio/build_defaults.xml b/src/externals/pio1/tests/testpio/build_defaults.xml deleted file mode 100644 index 55691bfc20d..00000000000 --- a/src/externals/pio1/tests/testpio/build_defaults.xml +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/externals/pio1/tests/testpio/check_mod.F90 b/src/externals/pio1/tests/testpio/check_mod.F90 deleted file mode 100644 index 25666900538..00000000000 --- a/src/externals/pio1/tests/testpio/check_mod.F90 +++ /dev/null @@ -1,250 +0,0 @@ -module check_mod - - use kinds_mod - use pio_types, only : PIO_NOERR ! _EXTERNAL - use alloc_mod ! _EXTERNAL - use pio_support, only : CheckMPIReturn ! _EXTERNAL -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - implicit none - private -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - public :: checkpattern - - interface checkpattern - module procedure check_1D_r8, & - check_1D_r4, & - check_1D_i4 - module procedure check_3D_r8, & - check_3D_r4, & - check_3D_i4 - end interface - -contains - -subroutine check_1D_r8(my_comm, fname,wr_array,rd_array,len,iostat) - integer, intent(in) :: my_comm - character(len=*) :: fname - real(r8) :: wr_array(:) - real(r8) :: rd_array(:) - integer(i4), intent(in) :: len - integer(i4),optional :: iostat - - real(r8),pointer :: diff(:) - real(r8) :: lsum,gsum, maxbad - integer(i4) :: ierr,cbad,rank, maxbadloc(1) - - if(present(iostat)) iostat = PIO_noerr - - call alloc_check(diff,len,' check_1D_r8:diff ') - - if(len>0) then - diff = abs(wr_array - rd_array) - cbad = COUNT(diff > 1.0d-299) - maxbad = maxval(diff) - maxbadloc = maxloc(diff) - lsum = SUM(diff) - else - lsum = 0 - end if - call MPI_Allreduce(lsum,gsum,1,MPI_REAL8,MPI_SUM,MY_COMM,ierr) - call CheckMPIReturn('Call to MPI_Allreduce()',ierr,__FILE__,__LINE__) - - if(lsum > 1.0d-80) then ! There is a discrepency between read + write data - call MPI_COMM_rank(MY_COMM,rank,ierr) - call CheckMPIReturn('Call to MPI_COMM_RANK()',ierr,__FILE__,__LINE__) - print *,'IAM: ', rank, 'File: ',TRIM(fname),& - ' Error detected for correctness test(1D,R8): ',lsum,' # bad: ',cbad, & - ' gsum:', gsum, 'max ',maxbad,' loc ',maxbadloc, & - wr_array(maxbadloc), rd_array(maxbadloc) - if(present(iostat)) iostat = -20 - endif - call dealloc_check(diff) -end subroutine check_1D_r8 - -subroutine check_3D_r8(my_comm, fname,wr_array,rd_array) - integer, intent(in) :: my_comm - - character(len=*) :: fname - real(r8) :: wr_array(:,:,:) - real(r8) :: rd_array(:,:,:) - - real(r8), pointer :: diff(:,:,:) - real(r8) :: lsum,gsum - integer(i4) :: ierr,cbad,rank - integer(i4) :: len1,len2,len3 - - len1 = SIZE(wr_array,dim=1) - len2 = SIZE(wr_array,dim=2) - len3 = SIZE(wr_array,dim=3) - - allocate(diff(len1,len2,len3)) - - diff = wr_array - rd_array - cbad = COUNT(diff .ne. 0.0) - lsum = SUM(diff) - - call MPI_Allreduce(lsum,gsum,1,MPI_REAL8,MPI_SUM,MY_COMM,ierr) - call CheckMPIReturn('Call to MPI_Allreduce()',ierr,__FILE__,__LINE__) - - if(abs(gsum) > 1.0d-80) then - call MPI_COMM_rank(MY_COMM,rank,ierr) - call CheckMPIReturn('Call to MPI_COMM_RANK()',ierr,__FILE__,__LINE__) - if(lsum .ne. 0.0) print *,'IAM: ', rank, 'File: ',TRIM(fname),& - ' Error detected for correctness test(3D,R8): ',lsum,' # bad: ',cbad - endif - deallocate(diff) - -end subroutine check_3D_r8 - -subroutine check_3D_r4(my_comm, fname,wr_array,rd_array) - integer, intent(in) :: my_comm - - character(len=*) :: fname - real(r4) :: wr_array(:,:,:) - real(r4) :: rd_array(:,:,:) - - real(r4), pointer :: diff(:,:,:) - real(r4) :: lsum,gsum - integer(i4) :: ierr,cbad,rank - integer(i4) :: len1,len2,len3 - - len1 = SIZE(wr_array,dim=1) - len2 = SIZE(wr_array,dim=2) - len3 = SIZE(wr_array,dim=3) - - allocate(diff(len1,len2,len3)) - - diff = wr_array - rd_array - cbad = COUNT(diff .ne. 0.0) - lsum = SUM(diff) - - call MPI_Allreduce(lsum,gsum,1,MPI_REAL,MPI_SUM,MY_COMM,ierr) - call CheckMPIReturn('Call to MPI_Allreduce()',ierr,__FILE__,__LINE__) - - if(abs(gsum) .gt. tiny(gsum)) then - call MPI_COMM_rank(MY_COMM,rank,ierr) - call CheckMPIReturn('Call to MPI_COMM_RANK()',ierr,__FILE__,__LINE__) - if(lsum .ne. 0) print *,'IAM: ', rank, 'File: ',TRIM(fname),& - ' Error detected for correctness test(3D,R4): ',lsum,' # bad: ',cbad - endif - deallocate(diff) - -end subroutine check_3D_r4 - -subroutine check_3D_i4(my_comm, fname,wr_array,rd_array) - integer, intent(in) :: my_comm - - character(len=*) :: fname - integer(i4) :: wr_array(:,:,:) - integer(i4) :: rd_array(:,:,:) - - integer(i4), pointer :: diff(:,:,:) - integer(i4) :: lsum,gsum - integer(i4) :: ierr,cbad,rank - integer(i4) :: len1,len2,len3 - - len1 = SIZE(wr_array,dim=1) - len2 = SIZE(wr_array,dim=2) - len3 = SIZE(wr_array,dim=3) - - allocate(diff(len1,len2,len3)) - - diff = wr_array - rd_array - cbad = COUNT(diff .ne. 0.0) - lsum = SUM(diff) - - call MPI_Allreduce(lsum,gsum,1,MPI_INTEGER,MPI_SUM,MY_COMM,ierr) - call CheckMPIReturn('Call to MPI_Allreduce()',ierr,__FILE__,__LINE__) - if(gsum .ne. 0.0) then - call MPI_COMM_rank(MY_COMM,rank,ierr) - call CheckMPIReturn('Call to MPI_COMM_RANK()',ierr,__FILE__,__LINE__) - if(lsum .ne. 0) print *,'IAM: ', rank, 'File: ',TRIM(fname),& - ' Error detected for correctness test(3D,I4): ',lsum,' # bad: ',cbad - endif - deallocate(diff) - -end subroutine check_3D_i4 - -subroutine check_1D_r4(my_comm,fname,wr_array,rd_array,len,iostat) - integer, intent(in) :: my_comm - - character(len=*) :: fname - real(r4) :: wr_array(:) - real(r4) :: rd_array(:) - integer(i4),optional :: iostat - - real(r4),pointer :: diff(:) - real(r4) :: lsum,gsum - integer(i4) :: ierr,len,cbad,rank - - - -! Set default (no error) value for iostat if present) - if(present(iostat)) iostat = PIO_noerr - - call alloc_check(diff,len,' check_1D_r4:diff ') - - if(len>0) then - diff = wr_array - rd_array - cbad = COUNT(diff .ne. 0.0) - lsum = SUM(diff) - else - lsum = 0 - end if - - call MPI_Allreduce(lsum,gsum,1,MPI_REAL,MPI_SUM,MY_COMM,ierr) - call CheckMPIReturn('Call to MPI_Allreduce()',ierr,__FILE__,__LINE__) - if(abs(gsum) > tiny(gsum)) then - call MPI_COMM_rank(MY_COMM,rank,ierr) - call CheckMPIReturn('Call to MPI_COMM_RANK()',ierr,__FILE__,__LINE__) - if(lsum .ne. 0.0) print *,'IAM: ', rank, 'File: ',TRIM(fname),& - ' Error detected for correctness test(1D,R4): ',lsum,' # bad: ',cbad - if(present(iostat)) iostat = -20 - endif - deallocate(diff) - -end subroutine check_1D_r4 - -subroutine check_1D_i4(my_comm, fname,wr_array,rd_array,len,iostat) - integer, intent(in) :: my_comm - character(len=*) :: fname - integer(i4) :: wr_array(:) - integer(i4) :: rd_array(:) - integer(i4), intent(in) :: len - integer(i4),optional :: iostat - - integer(i4),pointer :: diff(:) - integer(i4) :: lsum,gsum - integer(i4) :: ierr,cbad,rank - - - -! Set default (no error) value for iostat if present) - if(present(iostat)) iostat = PIO_noerr - - call alloc_check(diff,len,' check_1D_r4:diff ') - if(len>0) then - diff = wr_array - rd_array - cbad = COUNT(diff .ne. 0.0) - lsum = SUM(diff) - else - lsum = 0 - end if - call MPI_Allreduce(lsum,gsum,1,MPI_INTEGER,MPI_SUM,MY_COMM,ierr) - call CheckMPIReturn('Call to MPI_Allreduce()',ierr,__FILE__,__LINE__) - if(gsum .ne. 0.0) then - call MPI_COMM_rank(MY_COMM,rank,ierr) - call CheckMPIReturn('Call to MPI_COMM_RANK()',ierr,__FILE__,__LINE__) - if(lsum .ne. 0) print *,'IAM: ', rank, 'File: ',TRIM(fname),& - ' Error detected for correctness test(1D,I4): ',lsum,' # bad: ',cbad - if(present(iostat)) iostat = -20 - endif - deallocate(diff) - -end subroutine check_1D_i4 - -end module check_mod diff --git a/src/externals/pio1/tests/testpio/config_bench.xml b/src/externals/pio1/tests/testpio/config_bench.xml deleted file mode 100644 index 4c2f6840e7a..00000000000 --- a/src/externals/pio1/tests/testpio/config_bench.xml +++ /dev/null @@ -1,315 +0,0 @@ - - - - - - - - - - - - - - -163842/x1.163842.graph.info.part.64 - -163842/x1.163842.graph.info.part.120 - -163842/x1.163842.graph.info.part.240 - -163842/x1.163842.graph.info.part.480 - -163842/x1.163842.graph.info.part.960 - -163842/x1.163842.graph.info.part.1020 - - - -655362/x1.655362.graph.info.part.120 - -655362/x1.655362.graph.info.part.240 - -655362/x1.655362.graph.info.part.480 - -655362/x1.655362.graph.info.part.960 - -655362/x1.655362.graph.info.part.1920 - -655362/x1.655362.graph.info.part.3840 - - - -3118083/x32.3118083.graph.info.part.480 - -3118083/x32.3118083.graph.info.part.960 - -3118083/x32.3118083.graph.info.part.1920 - -3118083/x32.3118083.graph.info.part.3840 - -3118083/x32.3118083.graph.info.part.8000 - -3118083/x32.3118083.graph.info.part.16000 - - - - - -256 256 256 - - - -80 48 60 - - -32 48 60 - - -32 24 60 - - -16 24 60 - - -16 12 60 - - - - - -90 72 100 - - -90 36 100 - - -60 30 100 - - -45 36 100 - - -30 30 100 - - -45 18 100 - - -20 40 100 - - -20 20 100 - - -10 20 100 - - -10 10 100 - - - -576 6 26 - - -576 6 13 - - -576 3 13 - - -576 12 2 - - -576 6 2 - - -576 3 2 - - - -450 300 20 - - -225 300 20 - - -225 150 20 - - -225 75 20 - - -90 120 20 - - -90 96 20 - - -90 60 20 - - -90 48 20 - - -45 48 20 - - -45 24 20 - - - -900 1200 1 - - -900 600 1 - - -450 600 1 - - -450 300 1 - - -225 300 1 - - -225 150 1 - - -225 75 1 - - -90 120 1 - - -90 96 1 - - -90 60 1 - - -90 48 1 - - -45 48 1 - - -45 24 1 - - - -450 300 40 - - -225 300 40 - - -225 150 40 - - -225 75 40 - - -90 120 40 - - -90 96 40 - - -90 60 40 - - -90 48 40 - - -45 48 40 - - -45 24 10 - - -30 24 40 - - -24 24 40 - - - - - diff --git a/src/externals/pio1/tests/testpio/fdepends.awk b/src/externals/pio1/tests/testpio/fdepends.awk deleted file mode 100644 index 2980920cf23..00000000000 --- a/src/externals/pio1/tests/testpio/fdepends.awk +++ /dev/null @@ -1,56 +0,0 @@ -# -# File fdepends.awk -# -# Take an .F90 file and generate Makefile dependencies for -# each module "use", "#include", and "include" -# -# Example: -# POP.o: io.o -# -# Predefined variables Typical values -# NAME POP -# SUF .F90 -# - -BEGIN { IGNORECASE=1 - PRLINE = NAME".o: " -# print NAME".o : " NAME SUF - } - - -# -# awk reads each line of the filename argument $2 until it finds -# a "use" or "#include" -# - - -/^[ \t]*use[ \t]+/ { - - # Ignore any "use" line that contains comment "_EXTERNAL" - if ( $0 ~ /_EXTERNAL/ ) next - - # Assume the second field is the F90 module name, - # remove any comma at the end of the second field (due to - # ONLY or rename), and print it in a dependency line. - - sub(/,$/,"",$2) - print PRLINE $2".o" - } - - -# This will match include lines (either cpp or fortran style) -# #include "myinclude.inc" -# #include -# INCLUDE 'MYINCLUDE.INC' - -/^[ \t]*#?include[ \t]/ { - - # Ignore any "#include" line that contains comment "_EXTERNAL" - if ( $0 ~ /_EXTERNAL/ ) next - - # Remove starting or ending quote or angle bracket - sub(/^["<']/,"",$2) - sub(/[">']$/,"",$2) - print PRLINE $2 - - } diff --git a/src/externals/pio1/tests/testpio/gdecomp_mod.F90 b/src/externals/pio1/tests/testpio/gdecomp_mod.F90 deleted file mode 100644 index 79cdc0ba704..00000000000 --- a/src/externals/pio1/tests/testpio/gdecomp_mod.F90 +++ /dev/null @@ -1,1350 +0,0 @@ -module gdecomp_mod - - use kinds_mod -#if !defined(STANDALONE_TEST) - use pio_support, only : piodie ! _EXTERNAL - use pio, only : pio_offset ! _EXTERNAL -#endif - - implicit none - private - - public :: gdecomp_type - - public :: gdecomp_read_nml - public :: gdecomp_set - public :: gdecomp_print - public :: gdecomp_DOF - public :: camlike_decomp_generator - public :: mpas_decomp_generator - - type :: gdecomp_type - private - integer(i4) :: nxg,nyg,nzg ! global grid size - integer(i4) :: gdx,gdy,gdz ! block size - integer(i4) :: bdx,bdy,bdz ! block ordering - integer(i4) :: npes,nblksppe ! total pes, avg blocks/pe - character(len=16) :: grdorder ! grid order - character(len=16) :: grddecomp ! grid decomp strategy - character(len=16) :: blkorder ! block order - character(len=16) :: blkdecomp1 ! block decomp strategy - character(len=16) :: blkdecomp2 ! block decomp strategy - character(len=128):: nml_file ! namelist filename if used - character(len=16) :: nml_var ! namelist variable if used - end type - - character(len=*),parameter :: modname = 'gdecomp_mod' - integer(i4),parameter :: master_task = 0 - -!================================================================== -contains - -!================================================================== -!================================================================== -!================================================================== - subroutine gdecomp_set(gdecomp,nxg,nyg,nzg,gdx,gdy,gdz,bdx,bdy,bdz, & - npes,nblksppe,grdorder,grddecomp,blkorder,blkdecomp1,blkdecomp2, & - name,my_task) - - implicit none - - type(gdecomp_type), intent(inout) :: gdecomp - -! NOTE: not all of these are optional, but optional allows -! them to be called in arbitrary order - - integer(i4),optional :: nxg,nyg,nzg ! global grid size - integer(i4),optional :: gdx,gdy,gdz ! block size - integer(i4),optional :: bdx,bdy,bdz ! block ordering - integer(i4),optional :: npes,nblksppe ! total pes, avg blocks/pe - character(len=*),optional :: grdorder ! grid order - character(len=*),optional :: grddecomp ! grid decomp strategy - character(len=*),optional :: blkorder ! block order - character(len=*),optional :: blkdecomp1 ! block decomp strategy - character(len=*),optional :: blkdecomp2 ! block decomp strategy - character(len=*),optional :: name ! optional input name - integer(i4),optional :: my_task ! task number - - character(len=*),parameter :: subname = 'gdecomp_set' - - gdecomp%nml_file='set_manually' - if (present(name)) then - gdecomp%nml_var=trim(name) - else - gdecomp%nml_var='none' - endif - - if (present(nxg)) then - gdecomp%nxg = nxg - else - call piodie(__FILE__,__LINE__,trim(subname)//' nxg must be set') - endif - - if (present(nyg)) then - gdecomp%nyg = nyg - else - call piodie(__FILE__,__LINE__,trim(subname)//' nyg must be set') - endif - - if (present(nzg)) then - gdecomp%nzg = nzg - else - call piodie(__FILE__,__LINE__,trim(subname)//' nzg must be set') - endif - - if (present(npes)) then - gdecomp%npes = npes - else - call piodie(__FILE__,__LINE__,trim(subname)//' npes must be set') - endif - - if (present(nblksppe)) then - gdecomp%nblksppe = nblksppe - else - gdecomp%nblksppe = 1 - endif - - if (present(grdorder)) then - gdecomp%grdorder = grdorder - else - gdecomp%grdorder = 'xyz' - endif - - if (present(grddecomp)) then - gdecomp%grddecomp = grddecomp - else - gdecomp%grddecomp = 'xyz' - endif - - if (present(gdx)) then - gdecomp%gdx = gdx - else - gdecomp%gdx = 0 - endif - - if (present(gdy)) then - gdecomp%gdy = gdy - else - gdecomp%gdy = 0 - endif - - if (present(gdz)) then - gdecomp%gdz = gdz - else - gdecomp%gdz = 0 - endif - - if (present(blkorder)) then - gdecomp%blkorder = blkorder - else - gdecomp%blkorder = 'xyz' - endif - - if (present(blkdecomp1)) then - gdecomp%blkdecomp1 = blkdecomp1 - else - gdecomp%blkdecomp1 = 'xyz' - endif - - if (present(blkdecomp2)) then - gdecomp%blkdecomp2 = blkdecomp2 - else - gdecomp%blkdecomp2 = '' - endif - - if (present(bdx)) then - gdecomp%bdx = bdx - else - gdecomp%bdx = 0 - endif - - if (present(bdy)) then - gdecomp%bdy = bdy - else - gdecomp%bdy = 0 - endif - - if (present(bdz)) then - gdecomp%bdz = bdz - else - gdecomp%bdz = 0 - endif - - if (present(my_task)) then - if (my_task == master_task) call gdecomp_print(gdecomp) - endif - - end subroutine gdecomp_set - -!================================================================== - subroutine gdecomp_read_nml(gdecomp,nml_file,nml_var,my_task,ntasks,gdims) - - implicit none - - type(gdecomp_type), intent(inout) :: gdecomp - character(len=*),intent(in) :: nml_file ! input namelist file - character(len=*),intent(in) :: nml_var ! input namelist variable - integer(i4), optional, intent(in) :: my_task ! task number - integer(i4), optional, intent(in) :: ntasks ! total number of tasks - integer(i4), optional, intent(in) :: gdims(3) ! global grid size - - ! --- namelist --- - integer(i4) :: nxg,nyg,nzg - integer(i4) :: gdx,gdy,gdz - integer(i4) :: bdx,bdy,bdz - integer(i4) :: npes,nblksppe - character(len=16) :: grdorder - character(len=16) :: grddecomp - character(len=16) :: blkorder - character(len=16) :: blkdecomp1 - character(len=16) :: blkdecomp2 - namelist / compdof_nml / & - nxg,nyg,nzg,npes,nblksppe, & - grdorder,grddecomp,gdx,gdy,gdz, & - blkorder,blkdecomp1,blkdecomp2,bdx,bdy,bdz - namelist / iodof_nml / & - nxg,nyg,nzg,npes,nblksppe, & - grdorder,grddecomp,gdx,gdy,gdz, & - blkorder,blkdecomp1,blkdecomp2,bdx,bdy,bdz - character(len=*),parameter :: subname = 'gdecomp_read_nml' - - nxg=1;nyg=1;nzg=1 - gdx=0;gdy=0;gdz=0 - bdx=0;bdy=0;bdz=0 - npes=1 - nblksppe=1 - grdorder='xyz' - grddecomp='xy' - blkorder='xyz' - blkdecomp1='xy' - blkdecomp2='' - - if (trim(nml_var) == 'comp') then - open(10,file=nml_file,status='old') - read(10,nml=compdof_nml) - close(10) - elseif (trim(nml_var) == 'io') then - open(10,file=nml_file,status='old') - read(10,nml=iodof_nml) - close(10) - endif - - if (present(ntasks)) then - npes=ntasks - endif - if (present(gdims)) then - nxg = gdims(1) - nyg = gdims(2) - nzg = gdims(3) - endif - - gdecomp%nml_file = trim(nml_file) - gdecomp%nml_var = trim(nml_var) - gdecomp%nxg = nxg - gdecomp%nyg = nyg - gdecomp%nzg = nzg - gdecomp%npes = npes - gdecomp%nblksppe = nblksppe - - gdecomp%grdorder = grdorder - gdecomp%grddecomp = grddecomp - gdecomp%gdx = gdx - gdecomp%gdy = gdy - gdecomp%gdz = gdz - - gdecomp%blkorder = blkorder - gdecomp%blkdecomp1 = blkdecomp1 - gdecomp%blkdecomp2 = blkdecomp2 - gdecomp%bdx = bdx - gdecomp%bdy = bdy - gdecomp%bdz = bdz - - if (present(my_task)) then - if (my_task == master_task) call gdecomp_print(gdecomp) - endif - - end subroutine gdecomp_read_nml - -!================================================================== - - subroutine gdecomp_print(gdecomp) - - implicit none - - type(gdecomp_type),intent(in) :: gdecomp - character(len=*),parameter :: subname = 'gdecomp_print' - - write(6,*) ' ' - write(6,*) trim(subname),' nml_file = ',trim(gdecomp%nml_file) - write(6,*) trim(subname),' nml_var = ',trim(gdecomp%nml_var) - write(6,*) trim(subname),' nxg = ',gdecomp%nxg - write(6,*) trim(subname),' nyg = ',gdecomp%nyg - write(6,*) trim(subname),' nzg = ',gdecomp%nzg - write(6,*) trim(subname),' npes = ',gdecomp%npes - write(6,*) trim(subname),' nblksppe = ',gdecomp%nblksppe - write(6,*) trim(subname),' grdorder = ',gdecomp%grdorder - write(6,*) trim(subname),' grddecomp = ',gdecomp%grddecomp - write(6,*) trim(subname),' gdx = ',gdecomp%gdx - write(6,*) trim(subname),' gdy = ',gdecomp%gdy - write(6,*) trim(subname),' gdz = ',gdecomp%gdz - write(6,*) trim(subname),' blkdecomp1 = ',gdecomp%blkdecomp1 - write(6,*) trim(subname),' blkdecomp2 = ',gdecomp%blkdecomp2 - write(6,*) trim(subname),' bdx = ',gdecomp%bdx - write(6,*) trim(subname),' bdy = ',gdecomp%bdy - write(6,*) trim(subname),' bdz = ',gdecomp%bdz - write(6,*) trim(subname),' blkorder = ',gdecomp%blkorder - write(6,*) ' ' - - end subroutine gdecomp_print - -!================================================================== - subroutine gdecomp_DOF(gdecomp,my_task,DOF,start,count,write_decomp,test) - -#ifdef _NETCDF - use netcdf ! _EXTERNAL -#endif - - implicit none - - type(gdecomp_type), intent(in) :: gdecomp - integer(i4), intent(in) :: my_task ! task number - integer(kind=pio_offset),pointer :: DOF(:) ! allocated in this routine - integer(i4), intent(out) :: start(3) ! netcdf start index - integer(i4), intent(out) :: count(3) ! netcdf count index - logical, optional, intent(in) :: write_decomp ! write gdecomp.nc output file - logical, optional, intent(in) :: test ! single pe test mode - - integer(i4),parameter :: ndims=3 - integer(i4) :: gsiz(ndims) ! global size - integer(i4) :: bsiz(ndims) ! block size - integer(i4) :: nblk(ndims) ! number of blocks - integer(i4) :: dblk(ndims) ! block decomp - integer(i4) :: nbor(ndims) ! block ordering - integer(i4) :: nn(ndims) ! index ordering - integer(i4),pointer :: blkid(:,:,:) - integer(i4),pointer :: tskid(:,:,:) - integer(i4),pointer :: bxyzbord(:) - integer(i4),pointer :: bxyzpord(:) - integer(i4),pointer :: bordpord(:) - integer(i4),pointer :: testdof(:,:) - integer(i4),pointer :: bordstart(:,:) - integer(i4),pointer :: bordend(:,:) - integer(i4),pointer :: pstart(:,:) - integer(i4),pointer :: pend(:,:) - integer(i4),pointer :: cnta(:) - integer(i4),pointer :: cntb(:) - integer(i4) :: cntmax,cnt1,cnt2 - integer(i4) :: tsk - integer(i4) :: minv,maxv - integer(i4) :: ierr,rcode - integer(i4) :: npesx,nblks,nbors - integer(i4) :: gnpes - integer(i4) :: n1,n2,n3,n2b,nbord,nb1,nb2,nb3,nb,nbtmp,n - integer(kind=pio_offset) :: ii, nbxyz - integer(i4) :: contval - logical :: testonly,startok,wdecomp - logical,save :: first_call = .true. - - integer(i4) :: ncid,dimid(ndims),varid(2) - character(len=16) :: dname,vname - character(len=*),parameter :: ncname = 'gdecomp.nc' - - character(len=*),parameter :: subname = 'gdecomp_DOF' - - ! --- start instructions --- - -!DBG print *,'IAM: ',my_task,'gdecomp_DOF: point #1' - testonly = .false. - if (present(test)) then - testonly = test - endif - wdecomp = .false. - if (present(write_decomp)) then - wdecomp = write_decomp - endif - start = 1 - count = 0 - -!DBG print *,'IAM: ',my_task,'gdecomp_DOF: point #2' - if (.not.testonly) then - if (my_task < 0) return - if (my_task < 0 .or. my_task > gdecomp%npes-1) then - write(6,*) trim(subname),' ERROR: my_task out of range ',my_task,0,gdecomp%npes-1 - endif - endif - - gsiz(1) = gdecomp%nxg - gsiz(2) = gdecomp%nyg - gsiz(3) = gdecomp%nzg - bsiz(1) = gdecomp%gdx - bsiz(2) = gdecomp%gdy - bsiz(3) = gdecomp%gdz - nbor(1) = gdecomp%bdx - nbor(2) = gdecomp%bdy - nbor(3) = gdecomp%bdz - gnpes = gdecomp%npes -!DBG print *,'IAM: ',my_task,'gdecomp_DOF: point #3 gsiz:',gsiz -!DBG print *,'IAM: ',my_task,'gdecomp_DOF: point #3 bsiz:',bsiz - - if(wdecomp) then - allocate(blkid(gsiz(1),gsiz(2),gsiz(3))) - allocate(tskid(gsiz(1),gsiz(2),gsiz(3))) - blkid = -1 - tskid = -1 - endif -!DBG print *,'IAM: ',my_task,'gdecomp_DOF: point #4' - - - - ! --- calc blocks --- - - npesx = gnpes * gdecomp%nblksppe - - selectcase (trim(gdecomp%grddecomp)) - case default - call calcdecomp(gdecomp%grddecomp,npesx,gsiz,bsiz,ierr) -!DBG print *,'gdecomp_DOF: grdecomp is:',gdecomp%grddecomp - end select -!DBG print *,'IAM: ',my_task,'gdecomp_DOF: point #5 bsiz:',bsiz -!DBG stop "after call to gdecomp_print" - - ! --- sort and arrange blocks --- - - call pad_div(nblk(1),gsiz(1),bsiz(1)) - call pad_div(nblk(2),gsiz(2),bsiz(2)) - call pad_div(nblk(3),gsiz(3),bsiz(3)) - nblks = nblk(1)*nblk(2)*nblk(3) - contval = 0 - - selectcase (trim(gdecomp%blkdecomp1)) -!! case ('sfcxy') - case ('cont1d') - contval = maxval(nbor) - if (contval <= 0) then - write(6,*) trim(subname),' ERROR: contval must be > 0 ',nbor - call piodie(__FILE__,__LINE__) - endif - if (my_task == master_task) & - write(6,*) trim(subname),' blkdecomp1 = ',trim(gdecomp%blkdecomp1),' contval = ',contval - case ('cont1dm') - call pad_div(contval,nblks,gnpes) - if (contval <= 0) then - write(6,*) trim(subname),' ERROR: contval must be > 0 ',nbor - call piodie(__FILE__,__LINE__) - endif - if (my_task == master_task) & - write(6,*) trim(subname),' blkdecomp1 = ',trim(gdecomp%blkdecomp1),' contval = ',contval - case default - call calcdecomp(gdecomp%blkdecomp1,gnpes,nblk,nbor,ierr) - end select - - nbors = nbor(1)*nbor(2)*nbor(3) - dblk = 0 - if (nbors > 0) then - call pad_div(dblk(1),nblk(1),nbor(1)) - call pad_div(dblk(2),nblk(2),nbor(2)) - call pad_div(dblk(3),nblk(3),nbor(3)) - endif - - call calcorder(gdecomp%blkorder,nb1,nb2,nb3,ierr) - allocate(bxyzbord(nblks),bxyzpord(nblks),bordpord(nblks)) - allocate(bordstart(3,nblks),bordend(3,nblks)) - bxyzbord = -1 - bxyzpord = -1 - bordpord = -1 - bordstart = -1 - bordend = -1 - do n3 = 1,nblk(3) - do n2 = 1,nblk(2) - do n1 = 1,nblk(1) - nn(1)=n1; nn(2)=n2; nn(3)=n3 - nbxyz = (nn( 3)-1)*nblk( 1)*nblk( 2) + (nn( 2)-1)*nblk( 1) + nn( 1) - nbord = (nn(nb3)-1)*nblk(nb1)*nblk(nb2) + (nn(nb2)-1)*nblk(nb1) + nn(nb1) - if (nbxyz < 1 .or. nbxyz > nblks .or. nbord < 1 .or. nbord > nblks) then - write(6,*) trim(subname),' ERROR: bxyzbord ',nbxyz,nbord - call piodie(__FILE__,__LINE__) - endif - bxyzbord(nbxyz) = nbord - bordstart(1,nbord) = (n1-1)*bsiz(1) + 1 - bordstart(2,nbord) = (n2-1)*bsiz(2) + 1 - bordstart(3,nbord) = (n3-1)*bsiz(3) + 1 - bordend (1,nbord) = min((n1)*bsiz(1),gsiz(1)) - bordend (2,nbord) = min((n2)*bsiz(2),gsiz(2)) - bordend (3,nbord) = min((n3)*bsiz(3),gsiz(3)) - if (contval > 0) then - tsk = mod(((nbord-1)/contval),gnpes) - if (tsk < 0 .or. tsk >= gnpes) then - write(6,*) trim(subname),' ERROR: tsk1 ',tsk,nbord,contval,gnpes - call piodie(__FILE__,__LINE__) - endif - bxyzpord(nbxyz) = tsk - else - tsk = ((n3-1)/nbor(3))*dblk(2)*dblk(1) + ((n2-1)/nbor(2))*dblk(1) + (n1-1)/nbor(1) - if (nbors <= 0 .or. tsk < 0 .or. tsk >= gnpes) then - write(6,*) trim(subname),' ERROR: tsk2 ',tsk,gnpes,n1,n2,n3,nbor,dblk - call piodie(__FILE__,__LINE__) - endif - bxyzpord(nbxyz) = tsk - endif - enddo - enddo - enddo - - ! --- "refine" blkdecomp1 decomp --- - - selectcase (trim(gdecomp%blkdecomp2)) - case ('') - ! ok, but does nothing - case ('ysym2') - if (mod(nblk(2),2) /= 0) then - write(6,*) trim(subname),' ERROR: ysym2 option must have factor of 2 in y nblocks ' - call piodie(__FILE__,__LINE__) - endif - case ('ysym4') - if (mod(nblk(2),4) /= 0) then - write(6,*) trim(subname),' ERROR: ysym4 option must have factor of 4 in y nblocks ' - call piodie(__FILE__,__LINE__) - endif - case default - write(6,*) trim(subname),' ERROR: blkdecomp2 not supported ',trim(gdecomp%blkdecomp2) - call piodie(__FILE__,__LINE__) - end select - - if (trim(gdecomp%blkdecomp2) == 'ysym4') then - do n3 = 1,nblk(3) - do n2 = nblk(2)/4+1,nblk(2)/2 - do n1 = 1,nblk(1) - n2b = nblk(2)/2-n2+1 - nbxyz = (n3-1)*nblk(1)*nblk(2) + (n2 -1)*nblk(1) + n1 - nbtmp = (n3-1)*nblk(1)*nblk(2) + (n2b-1)*nblk(1) + n1 - bxyzpord(nbxyz) = bxyzpord(nbtmp) - enddo - enddo - enddo - endif - - if (trim(gdecomp%blkdecomp2) == 'ysym2' .or. trim(gdecomp%blkdecomp2) == 'ysym4') then - do n3 = 1,nblk(3) - do n2 = nblk(2)/2+1,nblk(2) - do n1 = 1,nblk(1) - n2b = nblk(2)-n2+1 - nbxyz = (n3-1)*nblk(1)*nblk(2) + (n2 -1)*nblk(1) + n1 - nbtmp = (n3-1)*nblk(1)*nblk(2) + (n2b-1)*nblk(1) + n1 - bxyzpord(nbxyz) = bxyzpord(nbtmp) - enddo - enddo - enddo - endif - - ! derive one more block mapping - do nb = 1,nblks - bordpord(bxyzbord(nb)) = bxyzpord(nb) - enddo - -! if (testonly) then -! write(6,*) ' ' -! do nb = 1,nblks -! write(6,*) trim(subname),' nb,bxyzbord,bxyzpord,bordpord ',nb,bxyzbord(nb),bxyzpord(nb),bordpord(nb) -! enddo -! write(6,*) ' ' -! do nb = 1,nblks -! write(6,*) trim(subname),' nb,bordstart,bordend ',n,bordstart(:,nb),bordend(:,nb) -! enddo -! write(6,*) ' ' -! endif - - ! --- map blocks onto gridcells --- - - allocate(cnta(0:gnpes-1),cntb(0:gnpes-1)) - cnta = 0 - cntb = 0 - do n3 = 1,gsiz(3) - do n2 = 1,gsiz(2) - do n1 = 1,gsiz(1) -! ii = (n3-1)*gsiz(2)*gsiz(1) + (n2-1)*gsiz(1) + n1 - nbxyz = ((n3-1)/bsiz(3))*nblk(2)*nblk(1) + ((n2-1)/bsiz(2))*nblk(1) + & - ((n1-1)/bsiz(1)) + 1 - if(wdecomp) then - blkid(n1,n2,n3) = bxyzbord(nbxyz) - tskid(n1,n2,n3) = bxyzpord(nbxyz) - endif -! checked above -! if (tskid(n1,n2,n3) < 0 .or. tskid(n1,n2,n3) >= gnpes) then -! write(6,*) trim(subname),' ERROR: tskid ',n1,n2,n3,tskid(n1,n2,n3) -! call piodie(__FILE__,__LINE__) -! endif -! cnta(tskid(n1,n2,n3)) = cnta(tskid(n1,n2,n3)) + 1 - cnta(bxyzpord(nbxyz)) = cnta(bxyzpord(nbxyz)) + 1 - enddo - enddo - enddo - cntmax = maxval(cnta) - - ! --- map gridcells to dof --- - - if (testonly) then - allocate(testdof(cntmax,0:gnpes-1)) - testdof = 0 - else - allocate(testdof(1,1)) - testdof = 0 - endif - - allocate(dof(cnta(my_task))) - dof = 0 - cntb = 0 - allocate(pstart(3,0:gnpes-1),pend(3,0:gnpes-1)) - pstart = maxval(gsiz) - pend = 0 - - call calcorder(gdecomp%grdorder,nb1,nb2,nb3,ierr) - do nb = 1,nblks - tsk = bordpord(nb) - pstart(1,tsk) = min(pstart(1,tsk),bordstart(1,nb)) - pstart(2,tsk) = min(pstart(2,tsk),bordstart(2,nb)) - pstart(3,tsk) = min(pstart(3,tsk),bordstart(3,nb)) - pend(1,tsk) = max(pend(1,tsk),bordend(1,nb)) - pend(2,tsk) = max(pend(2,tsk),bordend(2,nb)) - pend(3,tsk) = max(pend(3,tsk),bordend(3,nb)) - if (testonly .or. (.not.testonly .and. my_task == tsk)) then - do n3 = bordstart(nb3,nb),bordend(nb3,nb) - do n2 = bordstart(nb2,nb),bordend(nb2,nb) - do n1 = bordstart(nb1,nb),bordend(nb1,nb) - nn(nb1)=n1 - nn(nb2)=n2 - nn(nb3)=n3 - ii = (nn(3)-1)*gsiz(2)*gsiz(1) + (nn(2)-1)*gsiz(1) + nn(1) -!! tsk = tskid(nn(1),nn(2),nn(3)) - cntb(tsk) = cntb(tsk) + 1 - if (cntb(tsk) > cntmax) then - write(6,*) trim(subname),' ERROR: cntb > cntmax ',tsk,cntb(tsk),cntmax - call piodie(__FILE__,__LINE__) - endif - if (testonly) then - testdof(cntb(tsk),tsk) = ii - endif - if (my_task == tsk) dof(cntb(tsk)) = ii - enddo - enddo - enddo - endif - enddo -!DBG print *,__FILE__,__LINE__,cnta -!DBG print *,__FILE__,__LINE__,cntb - - if (cntb(my_task) /= cnta(my_task)) then - write(6,*) trim(subname),' ERROR: cntb ne cnta ',tsk,cnta(tsk),cntb(tsk) - call piodie(__FILE__,__LINE__) - endif - - startok = .true. - do n1 = 0,gnpes-1 - cnt1 = cnta(n1) - cnt2 = (max(pend(1,n1)-pstart(1,n1)+1,0))* & - (max(pend(2,n1)-pstart(2,n1)+1,0))* & - (max(pend(3,n1)-pstart(3,n1)+1,0)) - if (cnt1 /= cnt2) then - startok = .false. - endif - enddo - - if (startok) then - n1 = my_task - cnt2 = (max(pend(1,n1)-pstart(1,n1)+1,0))* & - (max(pend(2,n1)-pstart(2,n1)+1,0))* & - (max(pend(3,n1)-pstart(3,n1)+1,0)) - if (cnt2 == 0) then - start = 1 - count = 0 - else - start(1:3) = pstart(1:3,my_task) - count(1:3) = pend(1:3,my_task) - pstart(1:3,my_task) + 1 - endif - if (my_task == master_task) & - write(6,*) trim(subname),' start and count were computed ',my_task,start,count - else - start = 1 - count = 0 - if (my_task == master_task) & - write(6,*) trim(subname),' start and count could NOT be computed ' - endif - -!------- MASTER TASK WRITE ------------------------------------- - - if (my_task == master_task) then - - ! --- write testdof --- - - if (testonly) then - write(6,*) ' ' - do n1 = 0,gnpes-1 - if (cnta(n1) > 0) then - minv = testdof(1,n1) - maxv = testdof(1,n1) - else - minv = 0 - maxv = 0 - endif - do n2 = 1,cnta(n1) - minv = min(minv,testdof(n2,n1)) - maxv = max(maxv,testdof(n2,n1)) - enddo - write(6,*) trim(subname),' TESTDOF ntask=',n1,' size=',cnta(n1),& - ' min=',minv,' max=',maxv,' values=',testdof(1:min(10,cnta(n1)),n1) - enddo - endif ! testonly - - ! --- write summary --- - - write(6,*) ' ' - write(6,*) trim(subname),' MY_TASK = ',my_task - write(6,*) trim(subname),' GRID SIZE = ',gsiz - write(6,*) trim(subname),' BLOCK SIZE = ',bsiz - write(6,*) trim(subname),' NUM of BLOCKS = ',nblks - if (nbors > 0) then - write(6,*) trim(subname),' BLOCK GROUP = ',nbor - endif - if (startok) then - write(6,*) trim(subname),' START = ',start - write(6,*) trim(subname),' COUNT = ',count - endif - write(6,*) ' ' - - ! --- write out arrays --- - -#ifdef _NETCDF - if (wdecomp) then - write(6,*) ' ' - write(6,*) trim(subname),' writing decomp info to file ',trim(ncname) - write(6,*) ' ' - if (first_call) then - rcode = nf90_create(ncname,nf90_clobber,ncid) - else - rcode = nf90_open(ncname,nf90_write,ncid) - endif - rcode = nf90_redef(ncid) - dname = trim(gdecomp%nml_var)//'_nx' - rcode = nf90_def_dim(ncid,dname,gsiz(1),dimid(1)) - dname = trim(gdecomp%nml_var)//'_ny' - rcode = nf90_def_dim(ncid,dname,gsiz(2),dimid(2)) - dname = trim(gdecomp%nml_var)//'_nz' - rcode = nf90_def_dim(ncid,dname,gsiz(3),dimid(3)) - vname = trim(gdecomp%nml_var)//'_blkid' - rcode = nf90_def_var(ncid,vname,NF90_INT,dimid,varid(1)) - vname = trim(gdecomp%nml_var)//'_tskid' - rcode = nf90_def_var(ncid,vname,NF90_INT,dimid,varid(2)) - rcode = nf90_enddef(ncid) - rcode = nf90_put_var(ncid,varid(1),blkid) - rcode = nf90_put_var(ncid,varid(2),tskid) - rcode = nf90_close(ncid) - endif -#endif - - endif ! testonly - -!------- END MASTER TASK WRITE --------------------------------- - - if(wdecomp) then - deallocate(blkid,tskid) - endif - deallocate(cnta,cntb,bxyzbord,bxyzpord,bordpord) - deallocate(bordstart,bordend,pstart,pend) - first_call = .false. - - end subroutine gdecomp_DOF - -!================================================================== - subroutine calcorder(type,nb1,nb2,nb3,ierr) - - implicit none - - character(len=*),intent(in) :: type - integer(i4), intent(out) :: nb1,nb2,nb3 - integer(i4), intent(out) :: ierr - - character(len=*),parameter :: subname = 'calcorder' - - selectcase (trim(type)) - case ('xyz') - nb1=1; nb2=2; nb3=3 - case ('xzy') - nb1=1; nb2=3; nb3=2 - case ('yxz') - nb1=2; nb2=1; nb3=3 - case ('yzx') - nb1=2; nb2=3; nb3=1 - case ('zxy') - nb1=3; nb2=1; nb3=2 - case ('zyx') - nb1=3; nb2=2; nb3=1 - case default - write(6,*) trim(subname),' ERROR: ',trim(type),' not supported' - call piodie(__FILE__,__LINE__) - end select - - end subroutine calcorder - -!================================================================== - - subroutine calcdecomp(type,npes,gsiz,bsiz,ierr) - - implicit none - - character(len=*),intent(in) :: type - integer(i4), intent(in) :: npes - integer(i4), intent(in) :: gsiz(:) - integer(i4), intent(inout) :: bsiz(:) - integer(i4), intent(out) :: ierr - - character(len=16) :: option - character(len=*),parameter :: subname = 'calcdecomp' - - option = '' - -!DBG print *,'calcdecomp: type: ',trim(type),' npes: ',npes - selectcase (trim(type)) - - case ('x') - bsiz(1) = 0 - if (bsiz(2) == 0) bsiz(2) = gsiz(2) - if (bsiz(3) == 0) bsiz(3) = gsiz(3) - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('y') - bsiz(2) = 0 - if (bsiz(1) == 0) bsiz(1) = gsiz(1) - if (bsiz(3) == 0) bsiz(3) = gsiz(3) - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('z') - bsiz(3) = 0 - if (bsiz(1) == 0) bsiz(1) = gsiz(1) - if (bsiz(2) == 0) bsiz(2) = gsiz(2) - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('xy') - bsiz(1) = 0 - bsiz(2) = 0 - if (bsiz(3) == 0) bsiz(3) = gsiz(3) - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('xye') - bsiz(1) = 0 - bsiz(2) = 0 - if (bsiz(3) == 0) bsiz(3) = gsiz(3) - option = 'ediv' - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('yz') - bsiz(2) = 0 - bsiz(3) = 0 - if (bsiz(1) == 0) bsiz(1) = gsiz(1) - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('yze') - bsiz(2) = 0 - bsiz(3) = 0 - if (bsiz(1) == 0) bsiz(1) = gsiz(1) - option = 'ediv' - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('xz') - bsiz(1) = 0 - bsiz(3) = 0 - if (bsiz(2) == 0) bsiz(2) = gsiz(2) - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('xze') - bsiz(1) = 0 - bsiz(3) = 0 - if (bsiz(2) == 0) bsiz(2) = gsiz(2) - option = 'ediv' - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('xyz') - bsiz(1) = 0 - bsiz(2) = 0 - bsiz(3) = 0 - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('xyze') - bsiz(1) = 0 - bsiz(2) = 0 - bsiz(3) = 0 - option = 'ediv' - call calcbsiz(npes,gsiz,bsiz,option,ierr) - - case ('setblk') - if (bsiz(1) == 0 .or. bsiz(2) == 0 .or. bsiz(3) == 0) then - write(6,*) trim(subname),' ERROR: must specify bx, by, and bz with type setblk' - call piodie(__FILE__,__LINE__) - endif - - case default - write(6,*) trim(subname),' ERROR: type ',trim(type),' not supported' - call piodie(__FILE__,__LINE__) - - end select - - end subroutine calcdecomp -!================================================================== - - subroutine calcbsiz(npes,gsiz,bsiz,option,ierr) - - implicit none - - integer(i4), intent(in) :: npes - integer(i4), intent(in) :: gsiz(:) - integer(i4), intent(inout) :: bsiz(:) - character(len=*),intent(in) ,optional :: option - integer(i4), intent(out),optional :: ierr - - integer(i4) :: gs,bs - integer(i4),allocatable :: nsiz(:),isiz(:) - integer(i4) :: npes2 - integer(i4) :: bs1,bs2,bs3 - real(r8) :: rbs1,rbs2,rbs3 - integer(i4) :: n,m,nx,ny,nz,n1,n2,n3 - real(r8) :: ratio,ratio2 - integer(i4) :: nbsiz - real(r8),parameter :: dbsizm = 0.7 ! 0.0 turns this off - real(r8),parameter :: dbsizp = 1.3 ! big number turns this off (npes) - logical :: found - logical :: ediv - character(len=*),parameter :: subname = 'calcbsiz' - - ediv = .false. - - if (present(option)) then - if (trim(option) == 'ediv') then - ediv = .true. - elseif (trim(option) == '') then - ! no op - else - write(6,*) trim(subname),' ERROR: option not valid ',trim(option) - call piodie(__FILE__,__LINE__) - endif - endif - - found = .false. - gs = size(gsiz) - bs = size(bsiz) - if (gs /= 3) then - write(6,*) trim(subname),' ERROR: gs size must be 3 ',gs - call piodie(__FILE__,__LINE__) - endif - if (gs /= bs) then - write(6,*) trim(subname),' ERROR: bs ne gs ',bs,gs - call piodie(__FILE__,__LINE__) - endif - allocate(nsiz(gs),isiz(gs)) - - npes2 = npes - do n = 1,3 - if (bsiz(n) /= 0) then - call pad_div(m,gsiz(n),bsiz(n)) - if (mod(npes2,m) == 0) then - nsiz(n) = m - npes2 = npes2/m - bs = bs - 1 - else - write(6,*) trim(subname),' ERROR: bsiz not allowed ',n,gsiz(n),bsiz(n),m,npes,npes2 - call piodie(__FILE__,__LINE__) - endif - endif - enddo - - bs = 0 - isiz = 0 - do n = 1,3 - if (bsiz(n) == 0) then - bs = bs + 1 - isiz(bs) = n - endif - enddo - n1 = isiz(1) - n2 = isiz(2) - n3 = isiz(3) - - if (bs == 1) then - nsiz(n1) = npes2 - if (check_ediv(ediv,gsiz(n1),nsiz(n1))) then - call pad_div(bsiz(n1),gsiz(n1),nsiz(n1)) - found = .true. - endif - endif - - if (bs == 2) then - ratio = 10.*gsiz(1)*gsiz(2) - nbsiz = 10.*gsiz(1)*gsiz(2) - do nx = 1,npes2 - if (mod(npes2,nx) == 0) then - ny = npes2/nx - if (check_ediv(ediv,gsiz(n1),nx) .and. check_ediv(ediv,gsiz(n2),ny)) then - call pad_div(bs1,gsiz(n1),nx) - call pad_div(bs2,gsiz(n2),ny) - rbs1 = bs1 - rbs2 = bs2 -! if (max(rbs1/rbs2,rbs2/rbs1) < ratio) then - if ((bs1*bs2 < (dbsizm)*nbsiz) .or. & - (bs1*bs2 < (dbsizp)*nbsiz .and. max(rbs1/rbs2,rbs2/rbs1) < ratio)) then - ratio = max(rbs1/rbs2,rbs2/rbs1) - nbsiz = bs1*bs2 - bsiz(n1) = bs1 - bsiz(n2) = bs2 - nsiz(n1) = nx - nsiz(n2) = ny - found = .true. - endif - endif - endif - enddo - endif - - if (bs == 3) then - ratio = 10.*gsiz(1)*gsiz(2)*gsiz(3) - nbsiz = 10.*gsiz(1)*gsiz(2)*gsiz(3) - do nx = 1,npes2 - if (mod(npes2,nx) == 0) then - do ny = 1,npes2/nx - if (mod(npes2/nx,ny) == 0) then - nz = npes2/(nx*ny) - if (check_ediv(ediv,gsiz(n1),nx) .and. check_ediv(ediv,gsiz(n2),ny) .and. check_ediv(ediv,gsiz(n3),nz)) then - call pad_div(bs1,gsiz(n1),nx) - call pad_div(bs2,gsiz(n2),ny) - call pad_div(bs3,gsiz(n3),nz) - rbs1 = bs1 - rbs2 = bs2 - rbs3 = bs3 - ratio2 = max(rbs1/rbs2,rbs2/rbs1) - ratio2 = max(ratio2,rbs1/rbs3) - ratio2 = max(ratio2,rbs3/rbs1) - ratio2 = max(ratio2,rbs2/rbs3) - ratio2 = max(ratio2,rbs3/rbs2) -! if (ratio2 < ratio) then - if ((bs1*bs2*bs3 < (dbsizm)*nbsiz) .or. & - (bs1*bs2*bs3 < (dbsizp)*nbsiz .and. ratio2 < ratio)) then - ratio = ratio2 - nbsiz = bs1*bs2*bs3 - bsiz(n1) = bs1 - bsiz(n2) = bs2 - bsiz(n3) = bs3 - nsiz(n1) = nx - nsiz(n2) = ny - nsiz(n3) = nz - found = .true. - endif - endif - endif - enddo - endif - enddo - endif - - if (found) then -! write(6,*) trim(subname),' found; gsiz=',gsiz,'npes=',npes,'nsiz=',nsiz,'bsiz=',bsiz - else - write(6,*) trim(subname),' ERROR: no decomp found gsiz=',gsiz,' npes=',npes - call piodie(__FILE__,__LINE__) - endif - - deallocate(nsiz,isiz) - - end subroutine calcbsiz - -!================================================================== - - subroutine pad_div(mout,num,den) - - implicit none - integer(i4), intent(out) :: mout - integer(i4), intent(in) :: num - integer(i4), intent(in) :: den - character(len=*),parameter :: subname = 'pad_div' - - if (den == 0) then - write(6,*) trim(subname),' ERROR: den = 0' - call piodie(__FILE__,__LINE__) - endif - mout = num/den - if (mod(num,den) > 0) then - mout = mout + 1 - endif - - end subroutine pad_div - -!================================================================== - - logical function check_ediv(ediv,num,den) - - implicit none - - logical, intent(in) :: ediv - integer(i4), intent(in) :: num - integer(i4), intent(in) :: den - character(len=*),parameter :: subname = 'check_ediv' - - if (ediv .and. (den == 0 .or. mod(num,den) > 0)) then - check_ediv = .false. - else - check_ediv = .true. - endif - - end function check_ediv - -!================================================================== -#if defined(STANDALONE_TEST) - subroutine piodie(file,line,msg) - implicit none - character(len=*), intent(in) :: file - integer,intent(in) :: line - character(len=*),optional,intent(in) :: msg - character(len=*),parameter :: subname = 'abort' - - if (present(msg)) then - write(6,*) 'piodie in file=',trim(file),' line=',line, & - ' msg=',trim(msg) - else - write(6,*) 'piodie in file=',trim(file),' line=',line - endif - stop - end subroutine piodie -#endif -!================================================================== -!================================================================== - - subroutine mpas_decomp_generator(dim1,dim2,dim3,my_task,fname,dof) - integer :: dim1, dim2, dim3 - integer, intent(in) :: my_task ! my MPI rank - character(len=*),intent(in) :: fname ! name of MPAS partition file - integer(kind=pio_offset), pointer :: dof(:) - -! Local variables - - integer :: idx - integer :: gnz ! number of vertical levels - integer :: nCellsGlobal ! total number of cells in the horizontal - integer :: nCellsSolve - - integer, pointer :: globalIDList(:) - integer :: i1,i2 - -! print *,'IAM: ',my_task,' Inside mpas_decomp_generator' - ! ---------------------------- - ! MPAS convension - ! ---------------------------- - ! 1st dimension: vertical - ! 2nd dimension: horizontal - - gnz = dim1 - nCellsGlobal = dim2*dim3 - call get_global_id_list(my_task,fname,nCellsSolve,nCellsGlobal,globalIDList) - - allocate(dof(gnz*nCellsSolve)) - idx = 1 - do i2=1,nCellsSolve - do i1=1,gnz - dof(idx) = i1 + (globalIDList(i2)-1)*gnz - idx = idx + 1 - enddo - enddo - - end subroutine mpas_decomp_generator - - subroutine get_global_id_list(mype, fname, nCellsSolve, nCellsGlobal, globalIDList) - - implicit none - - include 'mpif.h' ! _EXTERNAL - - integer, intent(in) :: mype, nCellsGlobal - character(len=*), intent(in) :: fname - integer, intent(out) :: nCellsSolve - integer, dimension(:), pointer :: globalIDList - - integer :: i, nlist, ierr - integer, dimension(nCellsGlobal) :: owner_list - - ! - ! Each line in the part.128 file corresponds to a global column, and the value in the line - ! identifies the partition that owns the corresponding column - ! - if (mype == 0) then - open(21,file=TRIM(fname),form='formatted',status='old') - do i=1,nCellsGlobal - read(21,*) owner_list(i) - end do - close(21) - end if - call MPI_Bcast(owner_list, nCellsGlobal, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr) - - nlist = 0 - do i=1,nCellsGlobal - if (owner_list(i) == mype) nlist = nlist + 1 - end do - allocate(globalIDList(nlist)) - - nCellsSolve = nlist - - nlist = 1 - do i=1,nCellsGlobal - if (owner_list(i) == mype) then - globalIDList(nlist) = i - nlist = nlist + 1 - end if - end do - - end subroutine get_global_id_list - - - - - subroutine camlike_decomp_generator(gnx, gny, gnz, myid, ntasks, npr_yz, dof) - integer, intent(in) :: gnx, gny, gnz, myid, ntasks, npr_yz(4) - integer(kind=pio_offset), pointer :: dof(:), tdof(:), tchk(:) - real, pointer :: rdof(:) - integer(kind=pio_offset) :: dofsize,tdofsize - - integer :: twodsize, i, j, spnt - - - - if(gny > 1 .and. npr_yz(1)*npr_yz(2)/=ntasks) then - call piodie(__FILE__,__LINE__,& - 'npr_yz(1)*npr_yz(2) must equal ntasks') - end if - - - - - - twodsize=gnx*gny - dofsize = twodsize/npr_yz(1) - ! print *,myid, dofsize - - - - spnt=dofsize*myid+1 - - if( mod(twodsize,npr_yz(1))>=(npr_yz(1)-myid)) then - dofsize=dofsize+1 - spnt=spnt+mod(twodsize,npr_yz(1))-(npr_yz(1)-myid) - end if - - ! call mpi_allreduce(dofsize, tdofsize, 1, mpi_integer, mpi_sum, mpi_comm_world, ierr) - ! print *, myid, dofsize, spnt, tdofsize, twodsize, mod(twodsize,npr_yz(1)), npr_yz(1)-myid - - - - - - - - allocate(dof(dofsize*gnz)) - - allocate(rdof(twodsize), tdof(twodsize)) - - call random_number(rdof) - - tdof = int(twodsize*rdof)+1 - - deallocate(rdof) - allocate(tchk(twodsize)) - tchk=0 - - do i=1,twodsize - if((tchk(tdof(i)))==0) then - tchk(tdof(i))=1 - else - do j=tdof(i),twodsize - if(tchk(j)==0) then - tdof(i)=j - tchk(j)=1 - exit - end if - end do - if(tdof(i)/=j) then - do j=1,tdof(i)-1 - if(tchk(j)==0) then - tdof(i)=j - tchk(j)=1 - exit - end if - end do - end if - end if - end do - deallocate(tchk) - ! print *, tdof, sum(tchk) - - - do j=1,gnz - do i=1,dofsize - dof(i+(j-1)*dofsize) = tdof(spnt+i-1)+(j-1)*twodsize - end do - end do - - CALL qsRecursive(1_PIO_OFFSET, dofsize, dof) !kicks off the recursive - - deallocate(tdof) - - - end subroutine camlike_decomp_generator - - - - - - RECURSIVE SUBROUTINE qsRecursive (lo, hi, list) - !This is the actualy recursive portion of the quicksort - INTEGER(KIND=PIO_OFFSET) :: pivotPoint - INTEGER(KIND=PIO_OFFSET), INTENT(IN) :: lo - INTEGER(KIND=PIO_OFFSET), INTENT(IN) :: hi - integer(kind=pio_offset), INTENT(INOUT), DIMENSION(*) :: list - pivotPoint = qsPartition(lo, hi, list); !basically all we do is find the pivot point, adjust elements, then call it again - IF (lo < pivotPoint) CALL qsRecursive(lo, pivotPoint -1, list) - IF (pivotPoint < hi) CALL qsRecursive(pivotPoint + 1, hi, list) - - END SUBROUTINE qsRecursive - - - - integer(kind=pio_offset) FUNCTION qsPartition (loin, hiin, list) - !The partition portios of the Quick Sort is the must involved part - integer(kind=pio_offset), INTENT(INOUT), DIMENSION(*) :: list - INTEGER(KIND=PIO_OFFSET), INTENT(IN) :: loin - INTEGER(KIND=PIO_OFFSET):: lo !variable so we can manipulate the hi and lo values without changing things elsewhere in the program by reference - INTEGER(KIND=PIO_OFFSET), INTENT(IN) :: hiin - INTEGER(KIND=PIO_OFFSET):: hi !variable so we can manipulate the hi and lo values without changing things elsewhere in the program by reference - integer(kind=pio_offset)::pivot !the temp location for the pivitoal element to which everything will be compaired - hi = hiin - lo = loin - pivot = list(lo) - DO - IF (lo >= hi) EXIT !exit the loop when done - DO !move in from the right - IF ((pivot > list(hi)) .OR. (lo >= hi)) EXIT - hi = hi - 1 - END DO - IF (hi /= lo) then !move the entry indexed by hi to left side of partition - list(lo) = list(hi) - lo = lo + 1 - END IF - DO !move in from the left - IF ((list(lo) > pivot) .OR. (lo >= hi)) EXIT - lo = lo + 1 - END DO - IF (hi /= lo) then !move the entry indexed by hi to left side of partition - list(hi) = list(lo) - hi = hi - 1 - END IF - END DO - list(hi) = pivot !put the pivot element back when we're done - qsPartition = hi !return the correct position of the pivot element - END FUNCTION qsPartition - - - -!================================================================== - -end module gdecomp_mod diff --git a/src/externals/pio1/tests/testpio/kinds_mod.F90 b/src/externals/pio1/tests/testpio/kinds_mod.F90 deleted file mode 100644 index ec2db1a3aec..00000000000 --- a/src/externals/pio1/tests/testpio/kinds_mod.F90 +++ /dev/null @@ -1,41 +0,0 @@ -!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| - - module kinds_mod - -!BOP -! !MODULE: kinds_mod -! -! !DESCRIPTION: -! This module defines default numerical data types for all common data -! types like integer, character, logical, real4 and real8. -! -! !REVISION HISTORY: -! CVS:$Id: kinds_mod.F90,v 1.1.1.1 2006/07/31 16:15:30 dennis Exp $ -! CVS:$Name: $ - -! !USES: -! uses no other modules - - implicit none - private - save - -! !DEFINED PARAMETERS: - - integer, parameter, public :: & - char_len = 100 ,& - log_kind = kind(.true.) ,& - int_kind = kind(1) ,& - i4 = selected_int_kind(6) ,& - i8 = selected_int_kind(13) ,& - r4 = selected_real_kind(6) ,& - r8 = selected_real_kind(13) - -!EOP -!BOC -!EOC -!*********************************************************************** - - end module kinds_mod - -!||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| diff --git a/src/externals/pio1/tests/testpio/kraken.128.csh b/src/externals/pio1/tests/testpio/kraken.128.csh deleted file mode 100755 index fe93a20808c..00000000000 --- a/src/externals/pio1/tests/testpio/kraken.128.csh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/csh -#./testpio_bench.pl --iofmt pnc --pecount 128 --bench POPC --numIO 8 -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 64 --bench POPD --numIO 12 - -set id = `date "+%m%d%y-%H%M"` -# POPB -# WRFB -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 128 --bench POPC --numIO 20 --log testpio.0128.pnc.iotask_20.${id} -./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 128 --bench POPD --numIO 20 --log testpio.0128.pnc.iotask_20.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 128 --bench CAM05 --numIO 20 --log testpio.0128.pnc.iotask_20.${id} diff --git a/src/externals/pio1/tests/testpio/kraken.1K.csh b/src/externals/pio1/tests/testpio/kraken.1K.csh deleted file mode 100755 index 7b597c5daed..00000000000 --- a/src/externals/pio1/tests/testpio/kraken.1K.csh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/csh -#./testpio_bench.pl --iofmt pnc --pecount 128 --bench POPC --numIO 8 -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 64 --bench POPD --numIO 12 -set id = `date "+%m%d%y-%H%M"` -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1000 --bench POPC --numIO 160 --log kraken.1000.pnc.iotask_160.log.${id} -./testpio_bench.pl --maxiter 5 --iofmt pnc --pecount 1000 --bench POPD --numIO 160 --log kraken.1000.pnc.iotask_160.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 832 --bench CAM05 --numIO 140 --log kraken.0832.pnc.iotask_140.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 1000 --bench WRFB --numIO 160 --log kraken.1000.pnc.iotask_160.log.${id} diff --git a/src/externals/pio1/tests/testpio/kraken.256.csh b/src/externals/pio1/tests/testpio/kraken.256.csh deleted file mode 100755 index 103aff85de3..00000000000 --- a/src/externals/pio1/tests/testpio/kraken.256.csh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/csh -#./testpio_bench.pl --iofmt pnc --pecount 128 --bench POPC --numIO 8 -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 64 --bench POPD --numIO 12 -set id = `date "+%m%d%y-%H%M"` -# POPB -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 256 --bench POPC --numIO 40 --log kraken.0256.pnc.iotask_40.log.${id} -./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 256 --bench POPD --numIO 40 --log kraken.0256.pnc.iotask_40.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 256 --bench CAM05 --numIO 40 --log kraken.0256.pnc.iotask_40.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 250 --bench WRFB --numIO 40 --log kraken.0256.pnc.iotask_40.log.${id} diff --git a/src/externals/pio1/tests/testpio/kraken.512.csh b/src/externals/pio1/tests/testpio/kraken.512.csh deleted file mode 100755 index 63b6ef65059..00000000000 --- a/src/externals/pio1/tests/testpio/kraken.512.csh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/csh -#./testpio_bench.pl --iofmt pnc --pecount 128 --bench POPC --numIO 8 -#./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 64 --bench POPD --numIO 12 - -set id = `date "+%m%d%y-%H%M"` -#POPB -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 512 --bench POPC --numIO 80 --log kraken.0512.pnc.iotask_80.log.${id} -./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 512 --bench POPD --numIO 80 --log kraken.0512.pnc.iotask_80.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 416 --bench CAM05 --numIO 70 --log kraken.0416.pnc.iotask_70.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 500 --bench WRFB --numIO 80 --log kraken.0500.pnc.iotask_80.log.${id} diff --git a/src/externals/pio1/tests/testpio/kraken.64.csh b/src/externals/pio1/tests/testpio/kraken.64.csh deleted file mode 100755 index 64e59bad7a7..00000000000 --- a/src/externals/pio1/tests/testpio/kraken.64.csh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/csh -set id = `date "+%m%d%y-%H%M"` -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench WRFB --numIO 12 --log kraken.0064.pnc.iotask_12.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 80 --bench POPB --numIO 30 --log kraken.0080.pnc.iotask_30.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench POPC --numIO 12 --log kraken.0064.pnc.iotask_12.log.${id} -./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 64 --bench POPD --numIO 12 --log kraken.0064.pnc.iotask_12.log.${id} -./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench CAM05 --numIO 12 --log kraken.0064.pnc.iotask_12.log.${id} diff --git a/src/externals/pio1/tests/testpio/namelist_mod.F90 b/src/externals/pio1/tests/testpio/namelist_mod.F90 deleted file mode 100644 index e551af5fd30..00000000000 --- a/src/externals/pio1/tests/testpio/namelist_mod.F90 +++ /dev/null @@ -1,548 +0,0 @@ -#ifdef BGP -#define BGx -#endif -#ifdef BGL -#define BGx -#endif -module namelist_mod - - use kinds_mod - -! Modules from PIO package that are used by this application - - use pio_support, only : piodie, CheckMPIReturn ! _EXTERNAL - - implicit none - private - - public :: broadcast_namelist - public :: readtestpio_namelist - - integer(kind=i4), public, parameter :: buffer_size_str_len = 20 - integer(kind=i4), public, parameter :: true_false_str_len = 6 - integer(kind=i4), public, parameter :: romio_str_len = 10 - - logical, public, save :: async - integer(i4), public, save :: nx_global,ny_global,nz_global - integer(i4), public, save :: rearr_type - integer(i4), public, save :: num_iotasks - integer(i4), public, save :: stride - integer(i4), public, save :: base - integer(i4), public, save :: DebugLevel - integer(i4), public, save :: maxiter - integer(i4), public, save :: num_aggregator - integer(i4), public, save :: iotype - integer(i4), public, save :: num_iodofs - integer(i4), public, save :: nvars - integer(i4), public, save :: npr_yz(4) ! To simulate cam fv decompositions - - integer(kind=i4), public, save :: set_mpi_values = 0 !! Set to one for true - character(len=buffer_size_str_len), public, save :: mpi_cb_buffer_size = '' - integer(kind=i4), public, save :: set_romio_values = 0 !! Set to one for true - character(len=romio_str_len), public, save :: romio_cb_write = '' - character(len=romio_str_len), public, save :: romio_cb_read = '' - character(len=romio_str_len), public, save :: romio_direct_io = '' - integer(kind=i4), public, save :: set_ibm_io_values = 0 !! Set to one for true - character(len=buffer_size_str_len), public, save :: ibm_io_buffer_size = '' - character(len=true_false_str_len), public, save :: ibm_io_largeblock_io = '' - character(len=true_false_str_len), public, save :: ibm_io_sparse_access = '' - - integer(kind=i4), public, save :: set_lustre_values = 0 !! Set to one for true - integer(kind=i4), public, save :: lfs_ost_count = 1 - - character(len=80), save, public :: compdof_input - character(len=80), save, public :: iodof_input - character(len=80), save, public :: compdof_output - character(len=256), save, public :: part_input - character(len=256), save, public :: casename - character(len=80), save, public :: dir - character(len=4) , save, public :: ioFMTd - character(len=8) , save, public :: rearr - - integer(i4), save :: nprocsIO - integer(i4), save :: PrintRec - character(len=4), save :: ioFMT - character(len=80), save :: fname1, fname2 - character(len=*), parameter :: myname='namelist_mod' - integer(i4) :: max_buffer_size - integer(i4) :: block_size - -! Variables whose values are derived form items in namelist io_nml: - - namelist /io_nml/ & - async, & - stride, & - base, & - num_aggregator, & - nx_global, & - ny_global, & - nz_global, & - nvars, & - dir, & - max_buffer_size, & - block_size, & - casename, & - maxiter, & - ioFMT, & - rearr, & - nprocsIO, & - num_iodofs, & - compdof_input, & - compdof_output, & - iodof_input, & - part_input, & - DebugLevel, & - npr_yz, & - set_mpi_values, & - mpi_cb_buffer_size, & - set_romio_values, & - romio_cb_write, & - romio_cb_read, & - romio_direct_io, & - set_ibm_io_values, & - ibm_io_buffer_size, & - ibm_io_largeblock_io, & - ibm_io_sparse_access, & - set_lustre_values, & - lfs_ost_count - -contains - - -subroutine ReadTestPIO_Namelist(device, nprocs, filename, caller, ierror) - - use pio ! _EXTERNAL - - implicit none - - integer(i4), intent(IN) :: device - integer(i4), intent(IN) :: nprocs - character(len=*), intent(IN) :: filename - character(len=*), intent(IN) :: caller - integer(i4), intent(OUT) :: ierror - - character(len=16) :: string - character(len=*), parameter :: myname_=myname//'ReadPIO_Namelist' - - !------------------------------------------------- - ! set default values for namelist io_nml variables - !------------------------------------------------- - - async = .false. - DebugLevel=2 - stride = 0 - base = 0 - nx_global = 3600 - ny_global = 2400 - nz_global = 1 - num_iotasks = -1 - num_aggregator = 4 - nprocsIO = 0 - num_iodofs = 1 - compdof_input = 'namelist' - part_input = 'null' - iodof_input = 'internal' - compdof_output = 'none' - nvars = 10 - - max_buffer_size = -1 !! use default value - block_size = -1 !! use default value - - - - npr_yz = (/nprocs,1,1,nprocs/) - set_mpi_values = 0 !! Set to one for true - mpi_cb_buffer_size = '' - - set_romio_values = 0 !! Set to one for true - romio_cb_write = '' !! Default is "automatic" - romio_cb_read = '' !! Default is "automatic" - romio_direct_io = '' !! Default is "automatic" - - set_ibm_io_values = 0 !! Set to one for true - ibm_io_buffer_size = '' - ibm_io_largeblock_io = '' !! Default is "false" - ibm_io_sparse_access = '' !! Default is false - - set_lustre_values = 0 - lfs_ost_count = 1 - - ioFMT = 'bin' - dir = './' - casename = '' - rearr = 'box' - maxiter = 10 - - open (device, file=filename,status='old',iostat=ierror) - - if(ierror /= 0) then - write(*,*) caller,'->',myname_,':: Error opening file ',filename, & - ' on device ',device,' with iostat=',ierror - ierror = -1 - else - ierror = 1 - endif - - do while (ierror > 0) - read(device, nml=io_nml, iostat=ierror) - enddo - - if (ierror == 0) close(device) - - if(nvars > 99999) then - write(*,*) 'nvars exceeds limit of 99999, resetting' - nvars = 99999 - else if(nvars < 1) then - write(*,*) 'nvars < 1, resetting' - nvars = 1 - end if - - - string = 'namelist_input' - write(*,*) ' ' - write(*,*) trim(string),' async = ',async - write(*,*) trim(string),' casename = ',trim(casename) - write(*,*) trim(string),' nx_global = ',nx_global - write(*,*) trim(string),' ny_global = ',ny_global - write(*,*) trim(string),' nz_global = ',nz_global - write(*,*) trim(string),' nvars = ',nvars - write(*,*) trim(string),' ioFMT = ',ioFMT - write(*,*) trim(string),' rearr = ',rearr - write(*,*) trim(string),' nprocsIO = ',nprocsIO - write(*,*) trim(string),' base = ',base - write(*,*) trim(string),' stride = ',stride - write(*,*) trim(string),' num_aggregator = ',num_aggregator - write(*,*) trim(string),' num_iodofs = ',num_iodofs - write(*,*) trim(string),' maxiter = ',maxiter - write(*,*) trim(string),' dir = ',trim(dir) - write(*,*) trim(string),' npr_yz = ',npr_yz - write(*,*) trim(string),' DebugLevel = ',DebugLevel - write(*,*) trim(string),' DebugLevel = ',DebugLevel - write(*,*) trim(string),' compdof_input = ',trim(compdof_input) - write(*,*) trim(string),' compdof_output = ',trim(compdof_output) - write(*,*) trim(string),' iodof_input = ',trim(iodof_input) - write(*,*) trim(string),' part_input =', trim(part_input) - if (set_mpi_values /= 0) then - if (mpi_cb_buffer_size /= '') then - write(*,*) trim(string),' mpi_cb_buffer_size = ', & - trim(mpi_cb_buffer_size) - end if - end if - - if (set_romio_values /= 0) then - if (romio_cb_write /= '') then - write(*,*) trim(string),' romio_cb_write = ', romio_cb_write - end if - - if (romio_cb_read /= '') then - write(*,*) trim(string),' romio_cb_read = ', romio_cb_read - end if - - if (romio_direct_io /= '') then - write(*,*) trim(string),' romio_direct_io = ', romio_direct_io - end if - end if - - if (set_ibm_io_values /= 0) then - if (ibm_io_buffer_size /= '') then - write(*,*) trim(string),'ibm_io_buffer_size = ', & - trim(ibm_io_buffer_size) - end if - - if (ibm_io_largeblock_io /= '') then - write(*,*) trim(string),'ibm_io_largeblock_io = ', & - trim(ibm_io_largeblock_io) - end if - - if (ibm_io_sparse_access /= '') then - write(*,*) trim(string),'ibm_io_sparse_access = ', & - trim(ibm_io_sparse_access) - end if - end if - - if (set_lustre_values /= 0) then - write(*,*) trim(string),'lfs_ost_count = ', lfs_ost_count - endif - - write(*,*) ' ' - - string = 'derived_input' - select case(trim(rearr)) - case('none') - rearr_type=PIO_rearr_none - write(*,*) trim(string),' rearr_type = ','PIO_rearr_none' - case('box') - rearr_type=PIO_rearr_box - write(*,*) trim(string),' rearr_type = ','PIO_rearr_box' - case default - write(*,'(6a)') caller,'->',myname,':: Value of Rearranger type rearr = ',rearr, & - 'not supported.' - call piodie(__FILE__,__LINE__) - end select - write(*,*) trim(string),' rearr_type = ',rearr_type - - iofmtd = iofmt - select case(ioFMT) - case('bin') ! binary format - iotype = iotype_pbinary - write(*,*) trim(string),' iotype = ','iotype_pbinary' - case('pnc') !Parallel netCDF - iotype = iotype_pnetcdf - ioFmtd = 'nc' - write(*,*) trim(string),' iotype = ','iotype_pnetcdf' - case('snc') ! serial netCDF - iotype = iotype_netcdf - ioFmtd = 'nc' - write(*,*) trim(string),' iotype = ','iotype_netcdf' - case('nc4p') ! netCDF4 parallel - iotype = PIO_iotype_netcdf4p - ioFmtd = 'nc' - write(*,*) trim(string),' iotype = ','PIO_iotype_netcdf4p' - case('nc4c') ! netCDF4 compressed - iotype = PIO_iotype_netcdf4c - ioFmtd = 'nc' - write(*,*) trim(string),' iotype = ','PIO_iotype_netcdf4c' - case('vdc2') ! netCDF4 compressed - iotype = PIO_iotype_vdc2 - ioFmtd = 'vdf' - write(*,*) trim(string),' iotype = ','PIO_iotype_vdc2' - case default - write(*,'(4a,i8)') caller,'->',myname,':: Unrecognized value of ioFMT =',ioFMT - call piodie(__FILE__,__LINE__) - end select - write(*,*) trim(string),' iofmtd = ',trim(iofmtd) - - num_iotasks = -1 - if (nprocsIO > 0) then - num_iotasks=nprocsIO - if (stride <= 0 .or. stride>nprocs) then - stride = (nprocs-base)/num_iotasks - endif - elseif (nprocsIO <= 0) then -#ifdef BGx - ! A negative value for num_iotasks has a special meaning on Blue Gene - num_iotasks = nprocsIO -#else - if (stride <= 0 .or. stride>nprocs) then - num_iotasks = nprocs - stride = 1 - base=0 - else - num_iotasks = max(1,(nprocs-base)/stride) - endif -#endif - endif - - !------------------------------------------------ - ! reset stride if there are not enough processors - !------------------------------------------------ - if (base + num_iotasks * (stride-1) > nprocs-1) then - stride = FLOOR(real((nprocs - 1 - base),kind=r8)/real(num_iotasks,kind=r8)) - endif - - !------------------------------------------------------- - ! If rearrangement is 'none' reset to the proper values - !------------------------------------------------------- - if(trim(rearr) == 'none') then - stride = 1 - num_iotasks = nprocs - endif - - write(*,*) trim(string),' n_iotasks = ',num_iotasks,' (updated)' - write(*,*) trim(string),' base = ',base,' (updated)' - write(*,*) trim(string),' stride = ',stride,' (updated)' - write(*,*) ' ' - - !--- error check - - string = 'namelist_ERROR:' - print *,'ReadTestPIO_Namelist: at the end' - -end subroutine ReadTestPIO_Namelist - - -subroutine Broadcast_Namelist(caller, myID, root, comm, ierror) - - use pio ! _EXTERNAL -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - character(len=*), intent(IN) :: caller - integer(i4), intent(IN) :: myID - integer(i4), intent(IN) :: root - integer(i4), intent(IN) :: comm - - integer(i4), intent(OUT) :: ierror - - character(len=*), parameter :: myname_=myname//'Broadcast_Namelist' - integer(i4) :: itmp - - !------------------------------------------ - ! broadcast namelist info to all processors - !------------------------------------------ - - if(async) then - itmp=1 - else - itmp=0 - end if - - call MPI_Bcast(itmp, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(async)',ierror,__FILE__,__LINE__) - - if(itmp==1) then - async=.true. - else - async=.false. - end if - - - call MPI_Bcast(num_iotasks, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(num_iotasks)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(num_iodofs, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(num_iodofs)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(num_aggregator, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(num_aggregator)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(stride, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(stride)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(base, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(base)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(nx_global, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(nx_global)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(ny_global, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(ny_global)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(nz_global, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(nz_global)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(nvars, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(nvars)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(set_mpi_values, 1, MPI_INTEGER, root, comm,ierror) - call CheckMPIReturn('Call to MPI_Bcast(set_mpi_values)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(mpi_cb_buffer_size, buffer_size_str_len, MPI_CHARACTER, & - root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(mpi_cb_buffer_size)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(set_romio_values, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(set_romio_values)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(romio_cb_write, romio_str_len, MPI_CHARACTER, root, & - comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(romio_cb_write)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(romio_cb_read, romio_str_len, MPI_CHARACTER, root, & - comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(romio_cb_read)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(romio_direct_io, romio_str_len, MPI_CHARACTER, root, & - comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(romio_direct_io)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(set_ibm_io_values, 1, MPI_INTEGER, root, comm, & - ierror) - call CheckMPIReturn('Call to MPI_Bcast(set_ibm_io_values)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(ibm_io_buffer_size, buffer_size_str_len, MPI_CHARACTER, & - root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(ibm_io_buffer_size)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(ibm_io_largeblock_io, true_false_str_len, MPI_CHARACTER, & - root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(ibm_io_largeblock_io)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(ibm_io_sparse_access, true_false_str_len, MPI_CHARACTER, & - root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(ibm_io_sparse_access)', ierror, & - __FILE__, __LINE__) - - call MPI_Bcast(set_lustre_values,1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcase(set_lustre_values)', ierror, __FILE__, __LINE__) - - call MPI_Bcast(lfs_ost_count,1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcase(lfs_ost_count)', ierror, __FILE__, __LINE__) - - call MPI_Bcast(iotype, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(iotype)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(ioFMTd, 4, MPI_CHARACTER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(ioFMTd)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(rearr, 8, MPI_CHARACTER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(rearr)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(rearr_type, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(rearr_type)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(dir, 80, MPI_CHARACTER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(dir)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(compdof_input, 80, MPI_CHARACTER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(compdof_input)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(compdof_output, 80, MPI_CHARACTER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(compdof_output)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(iodof_input, 80, MPI_CHARACTER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(iodof_input)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(part_input, 256, MPI_CHARACTER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(part_input)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(casename, 256, MPI_CHARACTER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(casename)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(DebugLevel, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(DebugLevel)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(maxiter, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(maxiter)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(nprocsIO, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(nprocsIO)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(npr_yz, 4, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(npr_yz)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(max_buffer_size, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(npr_yz)',ierror,__FILE__,__LINE__) - - call MPI_Bcast(block_size, 1, MPI_INTEGER, root, comm, ierror) - call CheckMPIReturn('Call to MPI_Bcast(npr_yz)',ierror,__FILE__,__LINE__) - - if(max_buffer_size>0) then - if(myid==0) print *,'Setting buffer_size_limit to : ',max_buffer_size - call pio_set_buffer_size_limit(max_buffer_size) - end if - if(block_size>0) then - if(myid==0) print *,'Setting blocksize to : ',block_size - call pio_set_blocksize(block_size) - end if - - - - - -end subroutine Broadcast_Namelist - -end module namelist_mod diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.apb05 b/src/externals/pio1/tests/testpio/namelists/testpio_in.apb05 deleted file mode 100644 index ebc7f854980..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.apb05 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'apb05:pnc:box:stride=4:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 2778 - ny_global = 3014 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = 8 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 5 - compdof_input = 'namelist' - compdof_output = 'none' - async = .true. -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.asb01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.asb01 deleted file mode 100644 index 370ae7cf6e2..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.asb01 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'asb01:snc:box:nprocsIO=1:g_root' - nx_global = 4563 - ny_global = 4464 - iofmt = 'snc' - rearr = 'box' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' - async = .true. -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.asb04 b/src/externals/pio1/tests/testpio/namelists/testpio_in.asb04 deleted file mode 100644 index 4da42f74198..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.asb04 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'asb04:snc:box:stride=4:g_xy:go_yxz' - nx_global = 4190 - ny_global = 2645 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 1 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' - async = .true. -/ -&compdof_nml - nblksppe = 1 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b01 deleted file mode 100644 index c963eea2ec4..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'b01:snc:box:nprocsIO=1' - nx_global = 3730 - ny_global = 4918 - iofmt = 'snc' - rearr = 'box' - nprocsIO = 1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xyz' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b02 deleted file mode 100644 index 3ca9cbe96f6..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'b02:bin:box:nproc=1' - nx_global = 2616 - ny_global = 683 - iofmt = 'bin' - rearr = 'box' - nprocsIO = 1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xyz' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b03 deleted file mode 100644 index 0b170e9e7c5..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'b03:pnc:box:nprocsIO=1' - nx_global = 4222 - ny_global = 4385 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = 1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xyz' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b04 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b04 deleted file mode 100644 index 976d29ef498..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b04 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'b04:snc:box:stride=4' - nx_global = 5249 - ny_global = 2454 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xyz' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b05 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b05 deleted file mode 100644 index 9cdb4463d8f..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b05 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'b05:bin:box:stride=4:agg=1' - nx_global = 2881 - ny_global = 1467 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xyz' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b06 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b06 deleted file mode 100644 index 143603635bc..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b06 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'b06:pnc:box:stride=4:agg=1' - nx_global = 4285 - ny_global = 732 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xyz' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b07 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b07 deleted file mode 100644 index 71509462116..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b07 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'b07:bin:box:stride=4:agg=-1' - nx_global = 4209 - ny_global = 5073 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 1 - dir = './none/' - num_aggregator = -1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xyz' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b08 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b08 deleted file mode 100644 index 9dea68d13a5..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b08 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'b08:pnc:box:stride=4:agg=4' - nx_global = 4432 - ny_global = 3480 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 4 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xyz' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b09 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b09 deleted file mode 100644 index fd1869df8c8..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b09 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'b09:pnc:box:stride=2:agg=4' - nx_global = 5017 - ny_global = 1151 - nz_global = 8 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 2 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 4 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b10 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b10 deleted file mode 100644 index f5bcaa0407c..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b10 +++ /dev/null @@ -1,46 +0,0 @@ -&io_nml - casename = 'b10:pnc:box:stride=2:agg=4' - nx_global = 2448 - ny_global = 1545 - nz_global = 30 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = 30 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 4 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' - iodof_input = 'namelist' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ -&iodof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'z' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'z' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b11 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b11 deleted file mode 100644 index 9abd7a5753c..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b11 +++ /dev/null @@ -1,46 +0,0 @@ -&io_nml - casename = 'b11:pnc:none:stride=2:agg=4' - nx_global = 5014 - ny_global = 1497 - nz_global = 30 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 4 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' - iodof_input = 'namelist' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ -&iodof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b12 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b12 deleted file mode 100755 index 8f82ce862fd..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b12 +++ /dev/null @@ -1,54 +0,0 @@ -&io_nml - casename = 'b12:bin:box:stride=10' - nx_global = 4726 - ny_global = 4629 - nz_global = 20 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 10 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 8 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' - iodof_input = 'namelist' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 90 - gdy = 60 - gdz = 20 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ -&iodof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 3600 - gdy = 60 - gdz = 5 - blkorder = 'xyz' - blkdecomp1 = 'yz' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ -&prof_inparm - profile_disable = .false. - profile_barrier = .true. - profile_single_file = .false. - profile_depth_limit = 10 - profile_detail_limit = 0 -/ -EOF diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.b13 b/src/externals/pio1/tests/testpio/namelists/testpio_in.b13 deleted file mode 100755 index 53b9747e2e0..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.b13 +++ /dev/null @@ -1,53 +0,0 @@ -&io_nml - casename = 'b13:bin:box:stride=32' - nx_global = 4274 - ny_global = 3689 - nz_global = 30 - iofmt = 'bin' - rearr = 'box' - nprocsIO = 30 - stride = 32 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 8 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' - iodof_input = 'namelist' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 18 - gdy = 12 - gdz = 30 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ -&iodof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 576 - gdy = 384 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'yz' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ -&prof_inparm - profile_disable = .false. - profile_barrier = .true. - profile_single_file = .false. - profile_depth_limit = 10 - profile_detail_limit = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bb01 deleted file mode 100644 index 16f66418ac4..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'bb01:bin:box:nprocsIO=1:g_root' - nx_global = 4552 - ny_global = 2850 - iofmt = 'bin' - rearr = 'box' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bb02 deleted file mode 100644 index d9dabb9f0f4..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'bb02:bin:box:stride=1:g_xy' - nx_global = 1055 - ny_global = 2625 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bb03 deleted file mode 100644 index e0b1add1a00..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'bb03:bin:box:stride=1:g_root' - nx_global = 2047 - ny_global = 2642 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb04 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bb04 deleted file mode 100644 index 61717253ddf..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb04 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'bb04:bin:box:stride=4:g_xy:go_yxz' - nx_global = 4794 - ny_global = 2536 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb05 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bb05 deleted file mode 100644 index 702df727486..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb05 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'bb05:bin:box:stride=4:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 4500 - ny_global = 2671 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb06 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bb06 deleted file mode 100644 index ff9e774a9cf..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb06 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'bb06:bin:box:stride=4:3d:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 1399 - ny_global = 3189 - nz_global = 10 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 10 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb07 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bb07 deleted file mode 100644 index 3f9979621e7..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb07 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'bb07:bin:box:stride=4:3d:nblksppe=16:g_xy:go_yxz:b_xyz:bo_yzx' - nx_global = 3676 - ny_global = 1409 - nz_global = 10 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 16 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 5 - blkorder = 'yzx' - blkdecomp1 = 'xyz' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb08 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bb08 deleted file mode 100644 index 2565581fb04..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bb08 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'bb08:bin:box:stride=4:nblksppe=4:g_xy:go_yxz:b_cont1d:bo_yzx' - nx_global = 1889 - ny_global = 2668 - nz_global = 1 - iofmt = 'bin' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'yzx' - blkdecomp1 = 'cont1d' - blkdecomp2 = '' - bdx = 1 - bdy = 1 - bdz = 1 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bn01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bn01 deleted file mode 100644 index 6122b6940e1..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bn01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'bn01:bin:none:nprocsIO=1:g_root' - nx_global = 339 - ny_global = 231 - iofmt = 'bin' - rearr = 'none' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 339 - gdy = 231 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bn02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bn02 deleted file mode 100644 index 2ec45236aac..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bn02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'bn02:bin:none:stride=1:g_xy' - nx_global = 360 - ny_global = 240 - iofmt = 'bin' - rearr = 'none' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.bn03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.bn03 deleted file mode 100644 index 0d24153b7bd..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.bn03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'bn03:bin:none:stride=1:g_root' - nx_global = 498 - ny_global = 272 - iofmt = 'bin' - rearr = 'none' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 498 - gdy = 272 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b01 deleted file mode 100644 index 36b070af612..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pb01:pnc:box:nprocIO=1:g_root' - nx_global = 1276 - ny_global = 4542 - iofmt = 'nc4p' - rearr = 'box' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b02 deleted file mode 100644 index 5a274c2fa32..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pb02:pnc:box:stride=1:g_xy' - nx_global = 4386 - ny_global = 4232 - iofmt = 'nc4p' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b03 deleted file mode 100644 index a1989e9bf78..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'n4b03:n4nc:box:stride=1:g_root' - nx_global = 5096 - ny_global = 4116 - iofmt = 'nc4p' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b04 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b04 deleted file mode 100644 index 57681d8f008..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b04 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'nc4b04:nc4:box:stride=4:g_xy:go_yxz' - nx_global = 659 - ny_global = 2158 - iofmt = 'nc4p' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b05 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b05 deleted file mode 100644 index faa040ef482..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b05 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'nc4b05:nc4:box:stride=4:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 3688 - ny_global = 4438 - iofmt = 'nc4p' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b06 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b06 deleted file mode 100644 index 89572d5496f..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b06 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'nc4b06:nc4p:box:stride=4:3d:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 2332 - ny_global = 1233 - nz_global = 60 - iofmt = 'nc4p' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 60 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b07 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b07 deleted file mode 100644 index ce81af12077..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b07 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'nc4b07:nc4p:box:stride=4:3d:nblksppe=16:g_xy:go_yxz:b_xyz:bo_yzx' - nx_global = 2669 - ny_global = 2335 - nz_global = 10 - iofmt = 'nc4p' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 16 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 5 - blkorder = 'yzx' - blkdecomp1 = 'xyz' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b08 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b08 deleted file mode 100644 index ecffa90fb1e..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4b08 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'nc4b08:nc4p:box:stride=4:nblksppe=4:g_xy:go_yxz:b_cont1d:bo_yzx' - nx_global = 1981 - ny_global = 5039 - nz_global = 1 - iofmt = 'nc4p' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'yzx' - blkdecomp1 = 'cont1d' - blkdecomp2 = '' - bdx = 1 - bdy = 1 - bdz = 1 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n01 deleted file mode 100644 index 7d2100a495e..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'nc4n01:nc4p:none:nprocIO=1:g_root' - nx_global = 1454 - ny_global = 695 - iofmt = 'nc4p' - rearr = 'none' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n02 deleted file mode 100644 index 88ef9b19771..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'n4n02:n4nc:none:stride=1:g_xy' - nx_global = 3974 - ny_global = 698 - iofmt = 'nc4p' - rearr = 'none' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n03 deleted file mode 100644 index 5de679b079c..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.n4n03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'n4n03:n4nc:none:stride=1:g_root' - nx_global = 1465 - ny_global = 558 - iofmt = 'nc4p' - rearr = 'none' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pb01 deleted file mode 100644 index f99450907fa..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pb01:pnc:box:nprocIO=1:g_root' - nx_global = 1995 - ny_global = 2535 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pb02 deleted file mode 100644 index f5dea3df967..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pb02:pnc:box:stride=1:g_xy' - nx_global = 3694 - ny_global = 4747 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pb03 deleted file mode 100644 index ee374b44956..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pb03:pnc:box:stride=1:g_root' - nx_global = 2427 - ny_global = 3820 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb04 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pb04 deleted file mode 100644 index 4f32ad5cdd3..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb04 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pb04:pnc:box:stride=4:g_xy:go_yxz' - nx_global = 4327 - ny_global = 4323 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb05 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pb05 deleted file mode 100644 index 3b7f6f21ad8..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb05 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pb05:pnc:box:stride=4:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 4656 - ny_global = 2117 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb06 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pb06 deleted file mode 100644 index 8ce21eaa718..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb06 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'pb06:pnc:box:stride=4:3d:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 2642 - ny_global = 445 - nz_global = 60 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = -1 - base = 0 - maxiter = 1 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 60 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb07 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pb07 deleted file mode 100644 index fdcb7064372..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb07 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'pb07:pnc:box:stride=4:3d:nblksppe=16:g_xy:go_yxz:b_xyz:bo_yzx' - nx_global = 3825 - ny_global = 2879 - nz_global = 10 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 16 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 5 - blkorder = 'yzx' - blkdecomp1 = 'xyz' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb08 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pb08 deleted file mode 100644 index 1f1d3b2a0d8..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pb08 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'pb08:pnc:box:stride=4:nblksppe=4:g_xy:go_yxz:b_cont1d:bo_yzx' - nx_global = 4759 - ny_global = 1268 - nz_global = 1 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'yzx' - blkdecomp1 = 'cont1d' - blkdecomp2 = '' - bdx = 1 - bdy = 1 - bdz = 1 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pn01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pn01 deleted file mode 100644 index 1f3b9ed76ed..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pn01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pn01:pnc:none:nprocIO=1:g_root' - nx_global = 3368 - ny_global = 4476 - iofmt = 'pnc' - rearr = 'none' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pn02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pn02 deleted file mode 100644 index cb7a5b33dc8..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pn02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pn02:pnc:none:stride=1:g_xy' - nx_global = 5201 - ny_global = 4896 - iofmt = 'pnc' - rearr = 'none' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.pn03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.pn03 deleted file mode 100644 index a83c15e3aac..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.pn03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'pn03:pnc:none:stride=1:g_root' - nx_global = 773 - ny_global = 3829 - iofmt = 'pnc' - rearr = 'none' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sb01 deleted file mode 100644 index 2d015b63a45..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'sb01:snc:box:nprocsIO=1:g_root' - nx_global = 2827 - ny_global = 931 - iofmt = 'snc' - rearr = 'box' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sb02 deleted file mode 100644 index a5fac263f43..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'sb02:snc:box:stride=1:g_xy' - nx_global = 1281 - ny_global = 3915 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sb03 deleted file mode 100644 index 83328756dcc..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'sb03:snc:box:stride=1:g_root' - nx_global = 5066 - ny_global = 2172 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb04 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sb04 deleted file mode 100644 index 3f60f48d71e..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb04 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'sb04:snc:box:stride=4:g_xy:go_yxz' - nx_global = 1952 - ny_global = 871 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb05 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sb05 deleted file mode 100644 index 75ec8ea1fcd..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb05 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'sb05:snc:box:stride=4:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 1265 - ny_global = 1137 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb06 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sb06 deleted file mode 100644 index 2b6a8b425fb..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb06 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'sb06:snc:box:stride=4:3d:nblksppe=4:g_xy:go_yxz:b_xy' - nx_global = 362 - ny_global = 524 - nz_global = 60 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 60 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb07 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sb07 deleted file mode 100644 index d42829af473..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb07 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'sb07:snc:box:stride=4:3d:nblksppe=16:g_xy:go_yxz:b_xyz:bo_yzx' - nx_global = 3469 - ny_global = 2232 - nz_global = 10 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 16 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 5 - blkorder = 'yzx' - blkdecomp1 = 'xyz' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb08 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sb08 deleted file mode 100644 index 1012eeb6d7e..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sb08 +++ /dev/null @@ -1,31 +0,0 @@ -&io_nml - casename = 'sb08:snc:box:stride=4:nblksppe=4:g_xy:go_yxz:b_cont1d:bo_yzx' - nx_global = 349 - ny_global = 1801 - nz_global = 1 - iofmt = 'snc' - rearr = 'box' - nprocsIO = -1 - stride = 4 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 4 - grdorder = 'yxz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'yzx' - blkdecomp1 = 'cont1d' - blkdecomp2 = '' - bdx = 1 - bdy = 1 - bdz = 1 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sn01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sn01 deleted file mode 100644 index c0cb634fc45..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sn01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'sn01:snc:none:nprocsIO=1:g_root' - nx_global = 2866 - ny_global = 587 - iofmt = 'snc' - rearr = 'none' - nprocsIO = 1 - stride = -1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sn02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sn02 deleted file mode 100644 index af2f894fae5..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sn02 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'sn02:snc:none:stride=1:g_xy' - nx_global = 2842 - ny_global = 2926 - iofmt = 'snc' - rearr = 'none' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.sn03 b/src/externals/pio1/tests/testpio/namelists/testpio_in.sn03 deleted file mode 100644 index 50c1a49bb77..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.sn03 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'sn03:snc:none:stride=1:g_root' - nx_global = 4691 - ny_global = 3445 - iofmt = 'snc' - rearr = 'none' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = 'none' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'setblk' - gdx = 360 - gdy = 240 - gdz = 1 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.wr01 b/src/externals/pio1/tests/testpio/namelists/testpio_in.wr01 deleted file mode 100644 index bc7e1ec0238..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.wr01 +++ /dev/null @@ -1,30 +0,0 @@ -&io_nml - casename = 'wr01:pnc:box:stride=1:wrdof' - nx_global = 2335 - ny_global = 1011 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = 'namelist' - compdof_output = '../wr01.dof.txt' -/ -&compdof_nml - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'xy' - gdx = 0 - gdy = 0 - gdz = 0 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/namelists/testpio_in.wr02 b/src/externals/pio1/tests/testpio/namelists/testpio_in.wr02 deleted file mode 100644 index c1c9821f1af..00000000000 --- a/src/externals/pio1/tests/testpio/namelists/testpio_in.wr02 +++ /dev/null @@ -1,16 +0,0 @@ -&io_nml - casename = 'rd01:pnc:box:stride=1:rddof' - nx_global = 1013 - ny_global = 2966 - iofmt = 'pnc' - rearr = 'box' - nprocsIO = -1 - stride = 1 - base = 0 - maxiter = 10 - dir = './none/' - num_aggregator = 1 - DebugLevel = 0 - compdof_input = '../wr01.dof.txt' - compdof_output = 'none' -/ diff --git a/src/externals/pio1/tests/testpio/perl5lib/ChangeLog b/src/externals/pio1/tests/testpio/perl5lib/ChangeLog deleted file mode 100644 index d5bfab83683..00000000000 --- a/src/externals/pio1/tests/testpio/perl5lib/ChangeLog +++ /dev/null @@ -1,276 +0,0 @@ -============================================================== - Description of changes -============================================================== -Tag name: perl5lib_090613 -Originator(s): erik -Date: Sat Jun 13, 2009 -One-line Summary: Add %ymd indicator for streams so can do year-month-days - -M Streams/Template.pm ---- Add ability to write out %ymd year-month-day - for filenames in streams. It assumes a noleap - calendar -- could easily be extended to make - Gregorian optional. -M t/01.t ---- Change formatting of successful test -M t/02.t ---- Add more tests for %ymd, and offset -M t/03.t ---- Change formatting of successful test -M t/04.t ---- Change formatting of successful test -M t/datm.streams.txt ---------- Add another year and the last-month - to start for testing -A t/datm.ymd.streams.txt ------ Add streams test file with %ymd -M t/datm.template.streams.xml - Add CPLHIST test section with %ymd -M README --- Add notes about Streams and Decomp directories. - -============================================================== -Tag name: perl5lib_090609 -Originator(s): tcraig -Date: Tue Jun 9, 2009 -One-line Summary: add offset support for streams template - -M Streams/Template.pm - -============================================================== -Tag name: perl5lib_090424 -Originator(s): erik -Date: Fri Apr 24 14:10:42 MDT 2009 -One-line Summary: Allow opt hash to be sent to is_valid_value method - -M Build/NamelistDefinition.pm --- Allow an opt hash be sent to the - is_valid_value method. This way - noquotes or other options can be - given to the get_valid_values method - used within is_valid_value. - -============================================================== -Tag name: perl5lib_081008 -Originator(s): erik -Date: Wed Oct 8 21:39:24 MDT 2008 -One-line Summary: Get unit tests working - -M Build/Namelist.pm --- Be more careful with spacing between array elements - Get complex to work, make sure long lists with new-lines - can be parsed correctly. -M t/01.t -------- Comment out last test as it fails (Config object with no files) -M t/04.t -------- Add in test for integer array with new lines, and add some tests - that are commented that currently fail (complex and character multi-lines). - Also set variable names with mixed case variable names. -M t/namelist_definition_cam.xml --- Add a complex variable so can be tested -M t/output_test_namelist.nl ------- New longer namelist testing more aspects - -============================================================== -Tag name: perl5lib_081007 -Originator(s): eaton -Date: Tue Oct 7 18:32:46 MDT 2008 -One-line Summary: Modify Build::Namelist* user interfaces to be case insensitive. - -Modify the user interfaces of the Build::Namelist* modules. The user -interfaces include the object methods and the xml definition and defaults -files. By storing the variable names internally as lower case strings in -the Namelist* objects, the xml files can contain variable names in mixed -case without affecting the objects' internal representations. Similarly, -by converting variable names that are passed as method arguments to lower -case, the user can supply mixed case variable names as arguments, and the -correct variables from the definition and defaults files will be found -regardless of the case that appeared in those files. - -Build/Config.pm -. die if no definition file is supplied to the new() method. - -Build/Namelist.pm -. Change validate_variable_value() from an object method to a class method, - and remove the unused argument. -. add fix to _split_namelist_value method to replace embedded newlines by - spaces. - -Build/NamelistDefaults.pm -. make the method interfaces case insensitive by converting all variable - names passed as arguments to lower case, and by maintaining the variable - names in lower case in the internal data structures. -. Changed behavior of adding additional defaults from multiple files. - Previously an exception was triggered if the same variable contained - defaults in multiple files. The new behavior just accumulates all - default specifications for a variable, whether they come from a single - file or from multiple files. It's the responsibility of the get_value - method to find the best match among all possible default values. - -Build/NamelistDefinition.pm -. added methods for extracting data from the namelist definition file for - the purpose of producing documentation -. moved the validate_variable_value call to the _validate_pair method -. make the method interfaces case insensitive by converting all variable - names passed as arguments to lower case, and by maintaining the variable - names in lower case in the internal data structures. - -============================================================== -Tag name: perl5lib_081001 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Wed Oct 1 19:18:52 MDT 2008 -One-line Summary: Fix validate_variable_value so will work from a generic namelist - -Add unit test to set a variable in a generic namelist and use validate_variable_value -to verify it's value is correct for the right one. This required sending the variable -value into valicate_variable_value. - -M Build/NamelistDefinition.pm -M Build/Namelist.pm -M t/04.t -M t/output_test_namelist.nl - -============================================================== -Tag name: perl5lib_080924 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Wed Sep 24 17:22:07 MDT 2008 -One-line Summary: Add ability to read in additional files for defaults and definition, add empty config, and add lastmonth option to streams - -M Build/NamelistDefaults.pm ------- Add "add" method to read in an additional file - after object already constructed. -M Build/Config.pm ----------------- Add ability to create an empty config object. -M Build/NamelistDefinition.pm ----- Add "add" method to read in an additional file - after object already constructed. -M Streams/Template.pm ------------- Add option to output previous year December in - list of filenames for %ym (lastmonth option) ->>>>>>>>>>>>>> Get testing working with above changes and features added -M t/datm.streams.txt -M t/01.t ----------------------- Test that empty config works -M t/namelist_defaults_cam.xml -- Separate defaults into cam and clm parts -M t/02.t ----------------------- Test that lastmonth option works for streams -D t/namelist_definition.xml ---- Separate definition into cam and clm parts -M t/04.t ----------------------- Test that add method works for both defaults - and namelistdefinition. -A t/namelist_defaults_clm.xml -- Just the clm defaults -A + t/namelist_definition_clm.xml Just the clm part of the definition -A t/namelist_definition_cam.xml Everything but the clm part of the definition - -============================================================== -Tag name: perl5lib_080715 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Mon Aug 11 10:44:52 MDT 2008 -One-line Summary: Turn off printing of file existance if NOT -verbose - -M Streams/Template.pm ----------- Turn off printing of file - checking if NOT $printing; - -============================================================== -Tag name: perl5lib_080715 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Tue Jul 15 15:51:06 MDT 2008 -One-line Summary: Add ability to optionally include tInterpAlgo - -M Streams/Template.pm ----------- Add optional ability to deal with tInterpAlgo -M t/02.t ------------------------ Add tInterpAlgo unit test -M t/datm.template.streams.xml --- Add new stream with tInterpAlgo - -============================================================== -Tag name: perl5lib_080701 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Tue Jul 1 14:52:20 MDT 2008 -One-line Summary: Include previous year December for streams %ym form - -M Streams/Template.pm ---- Include the year before beg_year December when listing - year-month files with the %ym form. ->>>>>>>> Change so unit-test for streams will work -M t/datm.streams.txt -M t/02.t - -============================================================== -Tag name: perl5lib_080611 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Wed Jun 11 13:10:37 MDT 2008 -One-line Summary: Increase line length to 90 characters before split up values... - -M Build/Namelist.pm --- Split up line after 90 characters long instead of 70. - -============================================================== -Tag name: perl5lib_080604 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Wed Jun 4 13:11:56 MDT 2008 -One-line Summary: Finalize validation changes - -Improve documentation, make sure all public methods are documented. Remove notes -about needing to do validation as is done now. Change the validate methods a bit -and make them more robust. - -M Build/Config.pm --------------- Add get_valid_values method and use it internally. -M Build/NamelistDefinition.pm --- Add namelist validate_variable_value to validate - method. Add option to return without quotes to - get_valid_values method. -M Build/Namelist.pm ------------- Make validate_variable_value more robust. - ->>>>> Get unit tests to work with above changes and add tests for new methods. - -M t/01.t -M t/namelist_definition.xml -M t/04.t -M t/config_cache.xml -M t/config_definition.xml - -============================================================== -Tag name: perl5lib_080522 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Thu May 22 23:41:48 MDT 2008 -One-line Summary: Add methods to check valid values - -M Build/Namelist.pm ------------- Add validate_variable_value method -M Build/NamelistDefinition.pm --- Add get_valid_values method -M t/04.t ------------------------ Add tests for new methods -M t/output_test_namelist.nl ----- Add new variables that are being checked - -============================================================== -Tag name: perl5lib_080517 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Wed Apr 30 16:45:02 MDT 2008 -One-line Summary: Add optional method to add comments to end of namelist file - -M Build/Namelist.pm ------------- new method and option to write method -M t/datm.streams.txt ------------ add comments -M t/output_test_namelist.nl ----- add comments -M t/04.t ------------------------ add new option -M t/config_definition.xml ------- change so sets needed data - -============================================================== -Tag name: perl5lib_080430 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Wed Apr 30 16:45:02 MDT 2008 -One-line Summary: Add file check for DIN_LOC_ROOT and add Namelist stuff - -Add file check for DIN_LOC_ROOT in Streams/Template.pm -Add new files and simple tests for Namelist objects from cam3_5_43. - ------------- New namelist objects from cam3_5_43 -A Build/NamelistDefaults.pm -A Build/NamelistDefinition.pm --- change hgrid attribute to res. -A Build/Namelist.pm ------------- Tests for namelist objects. -A t/namelist_defaults_cam.xml -A t/namelist_definition.xml -A t/output_test_namelist.nl -A t/04.t ------------- Changes -M t/config_cache.xml ----- Set resolution and dyn to something. -M Streams/Template.pm ---- Change so can test for files with DIN_LOC_ROOT in them. - -============================================================== -Tag name: perl5lib_071204 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Tue Dec 4 13:13:42 MST 2007 -One-line Summary: Add in decomposition stuff - -Add Decomp::Config object and add tests for it in the test directory. - -============================================================== -Tag name: perl5lib_070928 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Fri Sep 28 12:56:01 MDT 2007 -One-line Summary: Add in Streams Template stuff - -Also add a get_names method to Build/Config.pm -And add in tests to Streams::Template. Modify Config -tests to use relative path rather than a specific absolute -path to a directory for Brian Eaton. - -============================================================== -============================================================== -Tag name: perl5lib_070817 -Originator(s): erik (KLUZEK ERIK 1326 CGD) -Date: Fri Aug 17 15:24:50 MDT 2007 -One-line Summary: Initial checkin from cam3_5_08 -============================================================== diff --git a/src/externals/pio1/tests/testpio/perl5lib/README b/src/externals/pio1/tests/testpio/perl5lib/README deleted file mode 100644 index 321448a73c0..00000000000 --- a/src/externals/pio1/tests/testpio/perl5lib/README +++ /dev/null @@ -1,10 +0,0 @@ -perl5lib/README - -Generic perl5 utilities and subroutines. - - -Build --- Generic utilities for reading build-time configuration files -Streams - Utility for reading/writing streams files for ccsm data models -Decomp -- Utility to figure out decompositions for cam, cice and pop -XML ----- Generic XML reader -t ------- test directory (includes test scripts and sample input files) diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/Changes b/src/externals/pio1/tests/testpio/perl5lib/XML/Changes deleted file mode 100644 index d0be5104f77..00000000000 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/Changes +++ /dev/null @@ -1,27 +0,0 @@ -Revision history for Perl extension XML::Lite. - -0.14 31 January 2003 - - Fixed a major bug in parsing empty elements - - Fixed some typos in documenation - - Fixed error in documentation of XML::Element::get_attributes interface -0.13 13 November 2001 - - Minor bug fixes? -0.12 15 November 2001 - - Fixed bugs in test that failed on CPAN Testers - - Fixed warnings in XML::Lite::Element->_find_self - - Fixed bug where mutiple child lists failed (problem in opt code) - - Added tests for above - - Removed from CPAN because Matt Sergeant got upset -0.11 6 November 2001 - - XML::Lite::Element->get_text() now removes CDATA tags (but leaves content) -0.10 6 November 2001 - - Fixed children() and text() methods by re-vamping the - tree. - - Built tests for all exposed methods of all objects - - Built tests for all contructor calls -0.05 4 November 2001 - - Added get_text method -0.01 Sat Aug 25 13:31:48 2001 - - original version; created by h2xs 1.20 with options - -XA -n XML::Lite - diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm b/src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm deleted file mode 100644 index d6aa32e978c..00000000000 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm +++ /dev/null @@ -1,550 +0,0 @@ -############################################################ -# -# Module: XML::Lite -# -# Created: 25 August 2001 by Jeremy Wadsack for Wadsack-Allen Digital Group -# Copyright (C) 2001 Wadsack-Allen. All rights reserved. -# -# TODO -# * Need to support for doctypes, and doctype delarations -# * Could add a method 'element' that accepts path-like syntax -# * Could add write_to_file, to_string, etc. methods (requires that the orig doc be preserved!) -# * Could improve support for comments, CDATA, PI's etc as objects? -# * Expose handler interface -# * Expose a method to provide better error handling -# -############################################################ -# Date Modification Author -# ---------------------------------------------------------- -# 04.Sep.2001 Fixed lots of bugs and built tests JW -# 08.Sep.2001 Added linked list & handlers to parser JW -# 04.Nov.2001 Fixed bug in parameter handling JW -############################################################ -package XML::Lite; -use strict; -#$^W=1; # 'use warnings;' in perl 5.005_62 and later - -=head1 NAME - -XML::Lite - A lightweight XML parser for simple files - -=head1 SYNOPSIS - -use XML::Lite; -my $xml = new XML::Lite( xml => 'a_file.xml' ); - -=head1 DESCRIPTION - -XML::Lite is a lightweight XML parser, with basic element traversing -methods. It is entirely self-contained, pure Perl (i.e. I based on -expat). It provides useful methods for reading most XML files, including -traversing and finding elements, reading attributes and such. It is -designed to take advantage of Perl-isms (Attribute lists are returned as -hashes, rather than, say, lists of objects). It provides only methods -for reading a file, currently. - -=head1 METHODS - -The following methods are available: - -=over 4 - -=cut - -use XML::Lite::Element; -BEGIN { - use vars qw( $VERSION @ISA ); - $VERSION = '0.14'; - @ISA = qw(); -} # end BEGIN - -# non-exported package globals go here -use vars qw( %ERRORS ); - -# Predefined error messages in English -%ERRORS = ( - NO_START => "A closing tag (\%1) was found with no corresponding start tag at position \%0 in your XML file.\n", - NO_ROOT => "Your XML document must begin with a root element.\n", - ROOT_NOT_CLOSED => "The root element of your XML document (starting at position \%0) is incomplete.\n", - ELM_NOT_CLOSED => "The XML-like element starting at position \%0 is incomplete. (Did you forget to escape a '<'?)\n", -); -############################ -## The object constructor ## -############################ - -=item my $xml = new XML::Lite( xml => $source[, ...] ); - -Creates a new XML::Lite object. The XML::Lite object acts as the document -object for the $source that is sent to it to parse. This means that you -create a new object for each document (or document sub-section). As the -objects are lightweight this should not be a performance consideration. - -The object constructor can take several named parameters. Parameter names -may begin with a '-' (as in the example above) but are not required to. The -following parameters are recognized. - - xml The source XML to parse. This can be a filename, a scalar that - contains the document (or document fragment), or an IO handle. - - -As a convenince, if only on parameter is given, it is assumed to be the source. -So you can use this, if you wish: - - my $xml = new XML::Lite( 'file.xml' ); - -=cut - -sub new { - my $self = {}; - my $proto = shift; - my %parms; - my $class = ref($proto) || $proto; - - # Parse parameters - $self->{settings} = {}; - if( @_ > 1 ) { - my($k, $v); - local $_; - %parms = @_; - while( ($k, $v) = each %parms ) { - $k =~ s/^-//; # Removed leading '-' if it exists. (Why do Perl programmers use this?) - $self->{settings}{$k} = $v; - } # end while - } else { - $self->{settings}{xml} = $_[0]; - } # end if; - - bless ($self, $class); - - # Some defaults - $self->{doc_offset} = 0; - $self->{doc} = ''; - $self->{_CDATA} = []; - $self->{handlers} = {}; - - # Refer to global error messages - $self->{ERRORS} = $self->{settings}{error_messages} || \%ERRORS; - - # Now parse the XML document and build look-up tables - return undef unless $self->_parse_it(); - - return $self; -} # end new - -########################## -## ## -## Public Methods ## -## ## -########################## - -=item my $elm = $xml->root_element() - -Returns a reference to an XML::Lite::Element object that represents -the root element of the document. - -Returns C on errors. - -=cut - -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 04Sep2001 Added root alias JW -# 08Sep2001 Modified to use tree instead of element list JW -# 05Nov2001 Added additional aliases JW -# ---------------------------------------------------------- -sub root; -*root = \&root_element; -sub get_root; -*get_root = \&root_element; -sub get_root_element; -*get_root_element = \&root_element; -sub root_element { - my $self = shift; - return undef unless defined $self->{doc}; - - # Find the first thing in the root of tree that's an element - my $root; - foreach( @{$self->{tree}} ) { - if( @$_ == 4 ) { - $root = $_; - last; - } # end if - } # end foreach - return undef unless defined $root; - return XML::Lite::Element->new( $self, $root ); -} # end root_element - - -=item @list = $xml->elements_by_name( $name ) - -Returns a list of all elements that match C<$name>. -C<@list> is a list of L objects -If called in a scalar context, this will return the -first element found that matches (it's more efficient -to call in a scalar context than assign the results -to a list of one scalar). - -If no matching elements are found then returns C -in scalar context or an empty list in array context. - -=cut - -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 27Aug2001 Added method. JW -# 04Sep2001 Added element_by_name alias JW -# ---------------------------------------------------------- -sub element_by_name; -*element_by_name = \&elements_by_name; -sub elements_by_name { - my $self = shift; - my( $name ) = @_; - - if( wantarray ) { - my @list = (); - foreach( @{$self->{elements}{$name}} ) { - my $elm = new XML::Lite::Element( $self, $_, ); - push @list, $elm if defined $elm; - } # end foreach - return @list; - } else { - return new XML::Lite::Element( $self, $self->{elements}{$name}[0] ); - } # end if -} # end elements_by_name - - -########################## -## ## -## Private Methods ## -## ## -########################## -# ---------------------------------------------------------- -# Sub: _parse_it -# -# Args: (None) -# -# Returns: True value on success, false on failure -# -# Description: Parses the XML file in $self->{settings}{xml} -# If this is an IO reference or filename, then reads from that, -# else if it starts with '<' assumes it's an XML document. -# During parsing, stores an internal database of named elements -# for lookups ($self->{elements}) and an internal linked list -# of elements and text nodes ($self->{tree}) for traversal. -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 08Sep2001 Added linked list tree to internal objects JW -# 30Jan2003 Fixed bug in child tree with EMTPY elements JW -# ---------------------------------------------------------- -sub _parse_it { - my $self = shift; - - # Get the xml content - if( $self->{settings}{xml} =~ /^\s*{doc} = $self->{settings}{xml}; - } else { - $self->{doc} = $self->_get_a_file( $self->{settings}{xml} ); - } # end if - return 0 unless defined $self->{doc}; - delete $self->{settings}{xml}; # Just save some memory - - # -- Normalize the document to make things easier to find - # Remove comments (but replace with spaces to maintain positioning for messages - $self->{doc} =~ s/()/' ' x length($1)/sge; - - # Move CDATA to hash and insert a reference to it (so it doesn't mess up regexp parsing) - $self->{doc} =~ s//'_store_cdata($1).']]\/>'/sge; - - # Remove processing instructions (but replace with spaces to maintain positioning for messages - # (Perhaps we could do something with these -- they are instructions for processors...) - $self->{doc} =~ s/(<\?.+?\?>)/' ' x length($1)/sge; - - # NOTE: This makes it not possible to save the same formatting - # -- will also remove the space from the processing instruction! - if( $self->{doc} =~ s/^(\s+)// ) { - $self->{doc_offset} = length $1; # Store the number of removed chars for messages - } # end if - $self->{doc} =~ s/\s+$//; - - - # Build lookup tables - $self->{elements} = {}; - $self->{tree} = []; - # - These are used in the building process - my $element_list = []; - my $current_element = $self->{tree}; - - # Call init handler if defined - &{$self->{handlers}{init}}($self) if defined $self->{handlers}{init}; - - # Make a table of offsets to each element start and end point - # Table is a hash of element names to lists of offsets: - # [start_tag_start, start_tag_end, end_tag_start, end_tag_end] - # where tags include the '<' and '>' - - # Also make a tree of linked lists. List contains root element - # and other nodes. Each node consits of a list ref (the position list) - # and a following list containing the child element. Text nodes are - # a list ref (with just two positions). - - # Find the opening and closing of the XML, giving errors if not well-formed - my $start_pos = index( $self->{doc}, '<' ); - $self->_error( 'NO_ROOT' ) if $start_pos == -1; - my $end_pos = index( $self->{doc}, '>', $start_pos + 1 ); - $self->_error( 'ROOT_NOT_CLOSED', $start_pos + $self->{doc_offset} ) if $end_pos == -1; - my $doc_end = rindex( $self->{doc}, '>' ); - $self->_error( 'ROOT_NOT_CLOSED' ) if $doc_end == -1; - - # Now walk through the document, one tag at a time, building up our - # lookup tables - while( $end_pos <= $doc_end ) { - - # Get a tag - my $tag = substr( $self->{doc}, $start_pos, $end_pos - $start_pos + 1 ); - - # Get the tag name and see if it's an end tag (starts with \s]+)}; - - if( $end ) { - # If there is no start tag for this end tag then throw an error - $self->_error( 'NO_START', $start_pos + $self->{doc_offset}, $tag ) unless defined $self->{elements}{$name}; - - # Otherwise, add the end point to the array for the last element in - # the by-name lookup hash - my( $x, $found ) = (@{$self->{elements}{$name}} - 1, 0); - while( $x >= 0 ) { - - # Close the last open element (ignore elements already closed) - if( @{$self->{elements}{$name}[$x]} < 4 ) { - $self->{elements}{$name}[$x][2] = $start_pos; - $self->{elements}{$name}[$x][3] = $end_pos; - $found = 1; - last; - } # end if - $x--; - } # end while - - # If we didn't find an open element then throw an error - $self->_error( 'NO_START', $start_pos + $self->{doc_offset}, $tag ) unless $found; - - # Call an end-tag handler if defined (not yet exposed) - &{$self->{handlers}{end}}($self, $name) if defined $self->{handlers}{end}; - - # Close element in linked list (tree) - $current_element = pop @$element_list; - - } else { - # Make a new list in the by-name lookup hash if none found by this name yet - $self->{elements}{$name} = [] unless defined $self->{elements}{$name}; - - # Add start points to the array of positions and push it on the hash - my $pos_list = [$start_pos, $end_pos]; - push @{$self->{elements}{$name}}, $pos_list; - - # Call start-tag handler if defined (not yet exposed) - &{$self->{handlers}{start}}($self, $name) if defined $self->{handlers}{start}; - - # If this is a single-tag element (e.g. <.../>) then close it immediately - if( $tag =~ m{/\s*>$} ) { - push @$current_element, $pos_list; - $pos_list->[2] = undef; - $pos_list->[3] = undef; - # Call an end-tag handler now too - &{$self->{handlers}{end}}($self, $name) if defined $self->{handlers}{end}; - } else { - # Now add the element to the linked list (tree) - push @$element_list, $current_element; - # Otherwise, put this on the list and start a sublist for children - my $new_element = []; - push @$current_element, $pos_list, $new_element; - $current_element = $new_element; - } # end if - - } # end if - - # Move the start pointer to beginning of next element - $start_pos = index( $self->{doc}, '<', $start_pos + 1 ); - last if $start_pos == -1 || $end_pos == $doc_end; - - # Now $end_pos is end of old tag and $start_pos is start of new - # So do things on the data between the tags as needed - if( $start_pos - $end_pos > 1 ) { - # Call any character data handler - &{$self->{handlers}{char}}($self, substr($self->{doc}, $end_pos + 1, $start_pos - $end_pos - 1)) if defined $self->{handlers}{char}; - - # Inserting the text into the linked list as well -# push @$current_element, [$end_pos + 1, $start_pos - 1]; - } # end if - - # Now finish by incrementing the parser to the next element - $end_pos = index( $self->{doc}, '>', $start_pos + 1 ); - - # If there is no next element, and we're not at the end of the document, - # then throw an error - $self->_error( 'ELM_NOT_CLOSED', $start_pos + $self->{doc_offset} ) if $end_pos == -1; - } # end while - - # Call finalization handler if defined and return it's value - return &{$self->{handlers}{final}}($self) if defined $self->{handlers}{final}; - - # Else return the tree pointer - return $self->{tree}; -} # end _parse_it - -# ---------------------------------------------------------- -# Sub: _get_a_file -# -# Args: $file -# -# Returns: Scalar content of $file, undef on error -# -# Description: Reads from $file and returns the content. -# $file may be either a filename or an IO handle -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 28Aug2001 Added scalar and IO handling JW -# ---------------------------------------------------------- -sub _get_a_file { - my $self = shift; - my $file = shift; - my $content = undef; - - # If it's a ref and a handle, then read that - if( ref($file) ) { - $content = join '', <$file>; - } - # If it's a scalar and the file exits then open it - elsif( -e $file ) { - open( XML, $file ) || return undef; - $content = join '', ; - close XML || return undef; - } - # Don't know how to handle this type of parameter - else { - return undef; - } # end if - - return $content; -} # end _get_a_file - -# ---------------------------------------------------------- -# Sub: _error -# -# Args: $code [, @args] -# $code A code representing the message to send -# -# Returns: Does not. Dies. -# -# Description: Outputs an error message and dies -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# ---------------------------------------------------------- -sub _error { - my $self = shift; - my( $code, @args ) = @_; - my $msg = $self->{ERRORS}{$code}; - - # Handle replacement codes - $msg =~ s/\%(\d+)/$args[$1]/g; - - # Throw exception - die ref($self) . ":$msg\n"; -} # end _error - - -# ---------------------------------------------------------- -# Sub: _store_cdata -# -# Args: $content -# -# Returns: A reference to the CDATA element, padded to -# original size. -# -# Description: Stores the CDATA element in the internal -# hash, and returns a reference plus padding to replace it -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 28Aug2001 Added to support CDATA JW -# ---------------------------------------------------------- -sub _store_cdata { - my $self = shift; - my( $content ) = @_; - my $ref = @{$self->{_CDATA}}; - $self->{_CDATA}[$ref] = $content; - return $ref . ' ' x (length($content) - length($ref)); -} # end _store_cdata - - -# ---------------------------------------------------------- -# Sub: _dump_tree -# -# Args: $node -# $node A starting node, or the root, if not given -# -# Returns: The string to print -# -# Description: Builds a printable tree in a debugging format -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 06Nov2001 Added for debugging tree JW -# ---------------------------------------------------------- -sub _dump_tree { - my $self = shift; - my $node = shift || $self->{tree}; - - my $tree = ''; - for( my $i = 0; $i < scalar(@$node) && defined $node->[$i]; $i++ ) { - if( (scalar(@{$node->[$i]}) == 4) && (defined $node->[$i][2]) ) { - $tree .= '[' . join( ',', @{$node->[$i]} ) . "] " - . substr($self->{doc}, $node->[$i][0], $node->[$i][1] - $node->[$i][0] + 1) - . "..." - . substr($self->{doc}, $node->[$i][2], $node->[$i][3] - $node->[$i][2] + 1) . " (child $i)\n"; - # Do child list - $i++; - $tree .= join( '', map( " $_\n", split( "\n", $self->_dump_tree( $node->[$i] ) ) ) ); - } elsif( (scalar(@{$node->[$i]}) == 4) ) { - $tree .= '[' . join( ',', $node->[$i][0], $node->[$i][1] ) . "] " - . substr($self->{doc}, $node->[$i][0], $node->[$i][1] - $node->[$i][0] + 1) . "\n"; - } else { - $tree .= "ERROR! Invalid node: [" . join( ',', @{$node->[$i]} ) . "]\n"; - } # end for - } # end for - - return $tree; -} # end _dump_tree - -# module clean-up code here (global destructor) -END { } - -1; # so the require or use succeeds - -=back - -=head1 BUGS - -Lots. This 'parser' (Matt Sergeant takes umbrance to my us of that word) will handle some XML -documents, but not all. - -=head1 VERSION - -0.14 - -=head1 AUTHOR - -Jeremy Wadsack for Wadsack-Allen Digital Group (dgsupport@wadsack-allen.com) - -=head1 COPYRIGHT - -Copyright 2001-2003 Wadsack-Allen. All rights reserved. -This library is free software; you can redistribute it and/or -modify it under the same terms as Perl itself. - -=cut - diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm b/src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm deleted file mode 100644 index 388511d89a0..00000000000 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm +++ /dev/null @@ -1,491 +0,0 @@ -############################################################ -# -# Module: XML::Lite::Element -# -# Created: 27 August 2001 by Jeremy Wadsack for Wadsack-Allen Digital Group -# Copyright (C) 2001 Wadsack-Allen. All rights reserved. -# -# TODO -# * firstChild, lastChild, previousSibling, nextSibling? -# * Equivalent 'parent' method to return enclosing element. -# * Could add to_string methods to reproduce original XML content (incl. tags) (requires that original doc be preserved!) -# * Could add open_tag, close_tag methods to get those parts of content -# -############################################################ -# Date Modification Author -# ---------------------------------------------------------- -# 08Sep2001 Changed ->{parent} to ->{doc} JW -# Changed ->{_positions} to ->{node} JW -############################################################ -package XML::Lite::Element; - -=head1 NAME - -XML::Lite::Element - A class representing an XML element in an XML::Lite -document - -=head1 SYNOPSIS - -use XML::Lite; -my $xml = new XML::Lite( -xml => 'a_file.xml' ); -my $elm = $xml->elements_by_name( 'element_name' ); -print $elm->get_attribute( 'attribute_name' ); - -=head1 DESCRIPTION - -C objects contain rudimentary methods for querying XML -elements in an XML document as parsed by XML::Lite. Usually these objects -are returned by method calls in XML::Lite. - -=head1 METHODS - -The following methods are available. All methods like 'get_name' can be -abbeviated as 'name.' - -=over 4 - -=cut - -use strict; -BEGIN { - use vars qw( $VERSION @ISA ); - $VERSION = '0.14'; - @ISA = qw(); -} # end BEGIN -# non-exported package globals go here -use vars qw(); - -############################ -## The object constructor ## -############################ - -=item my $element = new XML::Lite::Element( $owner_document, \@pointers ); - -Creates a new XML::Lite::Element object from the XML::Lite object, C<$owner_document>. - -Currently, you must not call this manually. You can create an object with one of -the 'factory' methods in XML::Lite, such as C or C -or with one of the XML::Lite::Element 'factory' methods below, like C. - -=cut - -sub new { - my $self = {}; - my $proto = shift; - my $class = ref($proto) || $proto; - - # The arguments are as follows: - # $owner_document is an XML::Lite object within which this element lives - # \@pointers is a two or four element array ref containing the offsets - # into the original document of the start and end points of - # the opening and closing (when it exists) tags for the element - - # Validate arguments - return undef unless @_ >= 2; - return undef unless ref($_[0]) && (ref($_[1]) eq 'ARRAY'); - - # Load 'em up - - # The data structure for the ::Element object has these properties - # doc A reference to the containing XML::Lite object - # node A reference to an array of pointers to our element in the document - # self A pointer to our own entry in the owner doc's tree - # parent A pointer to our parent elemenet's entry in the owner doc's tree - # name The name on our tag - # _attrs A string of the attibutes in our tag (unparsed) - # attrs A hash ref of attributes in our tag - - $self->{doc} = $_[0]; - $self->{node} = $_[1]; - - # Using the pointers, find out tag name, and attribute list from the - # opening tag (if there are any attributes). - my $tag = substr( $self->{doc}{doc}, $self->{node}[0], $self->{node}[1] - $self->{node}[0] + 1 ); - if( $tag =~ m{^<\s*([^/>\s]+)\s+([^>]+)\s*/?\s*>$} ) { - $self->{name} = $1; - $self->{_attrs} = $2; # Store the attributes as a scalar. Parse when asked - } elsif( $tag =~ m{^<\s*([^/>\s]+)\s*/?\s*>$} ) { - $self->{name} = $1; - $self->{_attrs} = ''; - } else { - # Should have been caught in the parsing! maybe an assert? - $self->{doc}->_error( 'ELM_NOT_CLOSED', $self->{node}[0] + $self->{doc}->{doc_offset} ); - } # end if - - # Good. Now returns it. - bless ($self, $class); - return $self; -} # end new - - -########################## -## ## -## Public Methods ## -## ## -########################## - -=item my $content = $element->get_content() - -Returns the content of the XML element. This may include other XML tags. The -entire content is returned as a scalar. - -=cut - -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 28Aug2001 Added CDATA retoration JW -# 06Nov2001 Added <.../> optimization JW -# ---------------------------------------------------------- -sub content; -*content = \&get_content; -sub get_content { - my $self = shift; - - # If we don't have any content, then we should return - # '' right away. - return '' unless defined $self->{node}[2]; - - # Using our pointers, find everything between our tags - my $content = substr( $self->{doc}{doc}, $self->{node}[1] + 1, $self->{node}[2] - $self->{node}[1] - 1 ); - - # Now, restore any CDATA chunks that may have been pulled out - $content =~ s//{doc}{_CDATA}[$1]]]>/g; - - # And return the content - return $content; -} # end get_content - - -=item my %attributes = $element->get_attributes() - -Returns a hash of name - value pairs for the attributes in this element. - -=cut - -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 13Mar2002 Return empty hash if no attributes EBK -# 31Jan2003 Fixed docs - return hash, not ref JW -# ---------------------------------------------------------- -sub attributes; -*attributes = \&get_attributes; -sub get_attributes { - my $self = shift; - - # Parse the attribute string into a hash of name-value pairs - # unless we've already done that. - $self->_parse_attrs() unless defined $self->{attrs}; - - # Just return a *copy* of the hash (this is read-only after all!) - if ( defined($self->{attrs}) ) { - return %{$self->{attrs}}; - } else { - my %empty; - return %empty; - } -} # end get_attributes - -=item my $value = $element->get_attribute( $name ) - -Returns the value of the named attribute for this element. - -=cut - -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# ---------------------------------------------------------- -sub attribute; -*attribute = \&get_attribute; -sub get_attribute { - my $self = shift; - my( $name ) = @_; - - # If we haven't parsed the attribute string into a hash, then do that. - $self->_parse_attrs() unless defined $self->{attrs}; - - # Now return the requested attribute. If it's not there - # then 'undef' is returned - return $self->{attrs}{$name}; -} # end get_attribute - - -=item my $name = $element->get_name() - -Returns the name of the element tag - -=cut - -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# ---------------------------------------------------------- -sub name; -*name = \&get_name; -sub get_name { - my $self = shift; - # Just look it up. We got this in the contructor - return $self->{name}; -} # end get_name - - -=item my @children = $element->get_children() - -Returns a list of XML::Lite::Element objects for each element contained -within the current element. This does not return any text or CDATA in -the content of this element. You can parse that through the L -method. - -If no child elements exist then an empty list is returned. - -=cut - -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 06Sep2001 Added to support tree-like iteration JW -# 04Nov2001 Changed to get_children (with alias) JW -# 05Nov2001 Fixed so that it actually works JW -# 06Nov2001 Added comments, optimizations and bug fixes JW -# ---------------------------------------------------------- -sub children; -*children = \&get_children; -sub get_children { - my $self = shift; - my @children = (); - - # If we don't have any content, then we should return an emtpty - # list right away -- we have no children. - return @children unless defined $self->{node}[2]; - - # We need to traverse the document tree and find our own node - # This will also load {children} and {parent} as well - $self->_find_self() unless defined $self->{self}; - - # Now that we know who we are (if this didn't fail) we can - # iterate through the sub nodes (our child list) and make - # XML::Lite::Elements objects for each child - if( defined $self->{children} ) { - my $i = 0; - my $node = $self->{children}[$i]; - while( defined $node ) { - push @children, XML::Lite::Element->new( $self->{doc}, $node ); - $i++ if (@$node == 4) && (defined $node->[2]); # Skip element's child list if it exists - $node = $self->{children}[++$i]; - } # end while - } # end if - - return @children; -} # end get_children - - -=item my $text = $element->get_text() - -Returns a scalar of the text within an element sans children elements. -This effectively takes the content of the element and strips all XML -elements. All text is concatenated into a single string. White space -is preserved. CDATA elements are included without the optimization JW -# 06Nov2001 Included CDATA text recovery JW -# ---------------------------------------------------------- -sub text; -*text = \&get_text; -sub get_text { - my $self = shift; - my $content = ''; - - # If we don't have any content, then we should return - # $content right away -- we have no text - return $content unless defined $self->{node}[2]; - - # Otherwise get out content and children - my @children = $self->get_children; - my $orig_content = $self->get_content; - - # Then remove the child elements from our content - my $start = 0; - foreach( @children ) { - my $end = $_->{node}[0] - $self->{node}[1] - 1; - $content .= substr( $orig_content, $start, $end - $start); - $start = ($_->{node}[3] || $_->{node}[1]) - $self->{node}[1]; - } # end foreach - $content .= substr( $orig_content, $start ) if $start < length($orig_content); - - # Remove the CDATA wrapper, preserving the content - $content =~ s//$1/g; - - # Return the left-over text - return $content; -} # end get_text - -########################## -## ## -## Private Methods ## -## ## -########################## -# ---------------------------------------------------------- -# Sub: _parse_attrs -# -# Args: (None) -# -# Returns: True value on success, false on failure -# -# Description: Pares the attributes in the element into a hash -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 18Nov2006 Allow whitespace between = and attribute BEE -# value. Allow values to use either single -# or double quotes. -# 08Apr2002 Allow null strings as valid values BEE -# 13Mar2002 Don't do anything if not defined EBK -# ---------------------------------------------------------- -sub _parse_attrs { - my $self = shift; - - my $attrs = $self->{_attrs}; - if ( defined($attrs) ) { - $attrs =~ s/^\s+//; - $attrs =~ s/\s+$//; - $self->{attrs} = {}; - while( $attrs =~ s/^(\S+)\s*=\s*["']([^"]*)["']// ) #" For syntax highlighter - { - $self->{attrs}{$1} = $2; - $attrs =~ s/^\s+//; - } # end while - } - - return 1; -} # end _parse_atttrs - -# ---------------------------------------------------------- -# Sub: _find_self -# -# Args: (None) -# -# Returns: A reference to our node or undef on error -# -# Description: Traverses the owner document's tree to find -# the node that references the current element. Sets -# $self-{self} as a side-effect. Even if this is already set, -# _find_self will traverse again, so don't call unless needed. -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 06Nov2001 Added to support children() method JW -# 13Mar2002 Check that nodes are defined EBK -# ---------------------------------------------------------- -sub _find_self { - my $self = shift; - - # We actually just call this recusively, so the first - # argument can be a starting point to descend from - # but we don't doc that above - my $node = shift || $self->{doc}{tree}; - return undef unless defined $node; - - # Our owner XML::Lite document has a tree (list of lists) that - # tracks all elements in the document. Starting at the root - # of the tree, walk through each node until we find one with - # the same offsets as our $self->{node} has. - - # Walk through the nodes in this node and compare to our selves - for( my $i = 0; $i < scalar(@$node) && defined $node->[$i]; $i++ ) { - - # If this is our self, then we're done! - # NOTE: Since the list references are the same in the by-name hash - # and tree objects, we can just do a reference compare here. - # If objects are ever created with non-factory methods then we need to - # use a _compare_lists call. -# if( _compare_lists( $node->[$i], $self->{node} ) ) { - if( $node->[$i] eq $self->{node} ) { - $self->{parent} = $node; - $self->{self} = $node->[$i]; - # If this list has children, then add a pointer to that list - $self->{children} = $node->[$i + 1] if (scalar(@{$node->[$i]}) == 4) && (defined $node->[$i][2]); - last; - } # end if - - # For efficiency, we only need look at nodes that start before - # our node does - if ( defined($node->[$i][0]) && defined($self->{node}->[3]) ) { - last if $node->[$i][0] > ($self->{node}->[3] || $self->{node}->[1]); - } - - # If this is a node with content (start and end tag) then check children - if( (scalar(@{$node->[$i]}) == 4) && (defined $node->[$i][2]) ) { - # This is a node with content (start and end tag) - # So look at the child node list that follows and see what it's got - $i++; - last if defined $self->_find_self( $node->[$i] ); - } # end for - - } # end for - - # And return it - return $self->{self}; -} # end _find_self - -# ---------------------------------------------------------- -# Sub: _compare_lists -# -# Args: $list_ref_1, $list_ref_2 -# -# Returns: True if the same elements, false otherwise -# -# Description: Compare the contents of two lists and returns -# whether they are the same -# NOTE: This is a CLASS METHOD (or sub) -# ---------------------------------------------------------- -# Date Modification Author -# ---------------------------------------------------------- -# 06Nov2001 Added to support node lookups JW -# ---------------------------------------------------------- -sub _compare_lists { - my( $rA, $rB ) = @_; - - # Lists are not equal unless same size - return 0 unless scalar(@$rA) == scalar(@$rB); - - # Now compare item by item. - my $i; - for( $i = 0; $i < scalar(@$rA); $i++ ) { - return 0 unless $rA->[$i] eq $rB->[$i]; - } # end for - - return 1; -} # end _compare_lists - -# module clean-up code here (global destructor) -END { } - -1; # so the require or use succeeds - -=back - -=head1 VERSION - -0.14 - -=head1 AUTHOR - -Jeremy Wadsack for Wadsack-Allen Digital Group (dgsupport@wadsack-allen.com) - -=head1 COPYRIGHT - -Copyright 2001 Wadsack-Allen. All rights reserved. -This library is free software; you can redistribute it and/or -modify it under the same terms as Perl itself. - -=cut - diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/README b/src/externals/pio1/tests/testpio/perl5lib/XML/README deleted file mode 100644 index 6234a760cec..00000000000 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/README +++ /dev/null @@ -1,20 +0,0 @@ -XML::Lite - -A light-weight, read-only XML parser for small files - -This XML parser is written in pure perl and provides basic methods -for most things you need to do with XML files. - -It is not dependent on any other modules or external programs for installation. - -NOTE that this parser will do many things that you want with XML but -not everything. It is not a validating parser! It will not handle -international characters (unless run on those systems). Use -at your own risk. - -Copyright 2001-2003 Wadsack-Allen. All rights reserved. - -This library is free software; you can redistribute it and/or -modify it under the same terms as Perl itself. - - diff --git a/src/externals/pio1/tests/testpio/test.csh b/src/externals/pio1/tests/testpio/test.csh deleted file mode 100755 index 4a04065f31a..00000000000 --- a/src/externals/pio1/tests/testpio/test.csh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/csh -# ./testpio_bench.pl --iofmt pnc --pecount 128 --bench POPC --numIO 8 -# ./testpio_bench.pl --maxiter 2 --iofmt pnc --pecount 64 --bench POPD --numIO 12 -set id = `date "+%m%d%y-%H%M"` -# ./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench POPC --numIO 6 -# ./testpio_bench.pl --maxiter 1 --iofmt pnc --pecount 128 --bench POPD --numIO -8 --log frost.128.pnc.iotask_8.log.${id} -# ./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 64 --bench CAM05 --numIO 6 -# ./testpio_bench.pl --maxiter 10 --iofmt pnc --pecount 256 --bench WRFB --numIO 40 - -./testpio_bench.pl --bench CAM05 --iofmt pnc --pecount 256 --numIO -7 diff --git a/src/externals/pio1/tests/testpio/test_lib.F90 b/src/externals/pio1/tests/testpio/test_lib.F90 deleted file mode 100644 index da8ebf4952c..00000000000 --- a/src/externals/pio1/tests/testpio/test_lib.F90 +++ /dev/null @@ -1,244 +0,0 @@ -program Test_Lib - use pio !everything is forwarded from the PIO module, should be the only use necessary - use pio_kinds - implicit none - - include 'mpif.h' - type (Var_desc_t) :: var_handle_no_comp !type handle for normal, uncompressed PIO variables - type (Var_desc_t) :: var_handle !type handle for compressed, VDC variables - character :: vdf_path*100 !location to save the vdf file and it's related data - character :: binary_path*100 !location to save the binary data - type (File_desc_t) :: file_handle !each open file requires a separate file handle, this one is for VDC data - type (File_desc_t) :: file_handle_no_comp !file handle for normal PIO variables - type (IOsystem_desc_t) :: iosystem !PIO type handle to hold PIO-specific information about a file's IO settings - type (io_desc_t) :: iodesc !PIO type handle to hold PIO-specific information about a file's IO decomposition - integer(i4) :: rank !MPI Rank of the current process - integer(i4) :: ierr !general error code variable - integer(i4) :: iostat !PIO-specific error code variable - integer(i4) :: dim_ids(3) !used in the uncompressed PIO for defining dimensions used in a *cdf file - integer(i4) :: nioprocs !used to tell PIO how many IO procs you want to use, functions as the max # of IO procs wanted when using compression, less may be used - integer(i4) :: nprocs !the # of processes involve in the MPI run - integer(i4) :: dims(3) !the 3D grid size used to write VDC data - integer(i4) :: n !counter - integer(kind=PIO_Offset) :: dpp !data per process, the amount of data each MPI task contributes to the overall file - integer(kind=PIO_Offset),allocatable :: compdof(:) !computational degrees-of-freedom, this array holds the mapping from the local - !slice of computational data to the global grid - real (r4), allocatable :: array(:), read_array(:) !arrays holding the local computational data -#ifdef DEBUG - double precision :: start, end, temp !timing variables -#endif - - !first set locals for vdc compression and the uncompressed data path - dims = (/1024, 1024, 1024/) - vdf_path = '/glade/scratch/ypolius/piovdc/libbench.vdf' - binary_path = '/glade/scratch/ypolius/piovdc/benchdata.nc' - nioprocs = 64 - - !init MPI and retrieve MPI-specific info - call MPI_init(ierr) - call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr) - call MPI_COMM_SIZE(MPI_COMM_WORLD, nprocs, ierr) - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Initiating PIO...' - endif -#endif - - !call PIO_init to initiate iosystem - call PIO_init(rank, MPI_COMM_WORLD, nioprocs, nioprocs, 1, PIO_rearr_box, iosystem, 0) - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'PIO Initiated procs: ', nioprocs - endif -#endif - - !set data-per-process to be the # of grid elements / # of computational procs - !conversions to PIO_Offset int to allow for extremely large dims, ex 2048 x 2048 x 2048 - dpp = int(dims(1), kind=PIO_Offset) * int(dims(2), kind=PIO_Offset) * int(dims(3), kind=PIO_Offset) / int(nprocs, kind=PIO_Offset) - - !allocate local memory arrays - if(allocated(array)) then - deallocate(array) - endif - - allocate(array(dpp)) - allocate(read_array(dpp)) - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Allocated write and read arrays' - print *, 'File type', file_handle%iotype - endif -#endif - - !allocate compdof - if(allocated(compdof)) then - deallocate(compdof) - endif - - allocate(compdof(dpp)) - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Allocated compdof' - print *, 'Filling compdof array...dpp-comp: ', dpp, ' dpp-io: ', product(dims)/nioprocs, ' dims: ' , dims, ' int limit: ' , huge(compdof(1)), ' sample calc: ' , product(dims) - endif -#endif - - !setup mapping from local data to global grid, fill local data with sample data - !sample data (array) is a constant for this example, but can be any floating point value so user data may be used for this - !this example uses a simple linear mapping, ex rank 0 points to global data [0, dpp], rank 1 points to global data[dpp, dpp *2] - !any mapping is possible, care must be taken to make sure that the mapping is strictly 1-1 or PIO will report errors - do n = 1, dpp - compdof(n) = int(n + rank * dpp,kind=pio_offset) !INT(REAL(n) + REAL(rank) * REAL(dpp)) - array(n) = 53.0 - if(compdof(n) .lt. 0) then - print *, ' n: ' , n , ' compdof(n): ' , compdof(n), ' array(n): ', array(n) - end if - end do - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Filled compdof array: ', compdof(1) , '-', compdof(2) - print *, 'Filled data array: '!, array(1), '-' , array(2) - endif -#endif - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Initializing decomposition...' - endif -#endif - -#ifdef DEBUG - start = MPI_WTIME() -#endif - - !call init_decomp in order to setup the IO decomposition with PIO - ! The optional parameter num_ts is required to indicate the vdc output method - call PIO_initdecomp(iosystem, PIO_real, dims, compdof, iodesc, num_ts=10) - - !example using optional bsize and # timesteps specifiers - !call PIO_initdecomp(iosystem, PIO_real, dims, compdof, iodesc, bsize=(/128, 128, 128/), num_ts=30) -#ifdef DEBUG - if(rank .eq. 0) then - print *, 'Decomposition initialized' - print *, 'Creating vdf file' - endif - print *, 'Rank: ', rank, 'Decomposition rearrangment runtime: ' , MPI_WTIME() - start -#endif - - !use create file with VDC2 io type to begin the creation of a VDF metadata file, not valid until enddef is called - ierr = PIO_CreateFile(iosystem, file_handle, PIO_iotype_vdc2, vdf_path, PIO_clobber) - -#ifdef DEBUG - if(rank .eq. 0) then - print *, 'VDF file created' - print *, 'Opening vdf var for writing...(vx0)' - endif -#endif - - !define the variables that will be written into the VDC - !VDC WRITING DOES NOT REQUIRE CREATING DIMS, THERE ARE ALWAYS 3, dims = (/x, y, z/) - iostat = PIO_def_var(file_handle, 'vx' , PIO_real,var_handle) - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Opened var for writing' - endif -#endif - - !finally call enddef to have the VDF metadata file written out - ierr = PIO_enddef(file_handle) - call PIO_setframe(var_handle, int(0, PIO_OFFSET)) -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Ended VDF definition' - endif - start = MPI_WTIME() -#endif - - !to write data call PIO_write_darray, the only difference with compressed vs uncompressed - !writing is that compressed writing requires that the the user inputs the current time step - !corresponding to the variable about to be written - call PIO_write_darray(file_handle, var_handle, iodesc, array, iostat) - -#ifdef DEBUG - print *, 'Rank: ', rank, ' vdc write time: ', MPI_WTIME() - start -#endif - -#ifdef DEBUG - if(rank .eq. 0) then - print *, 'Attempting to read back data' - endif - start = MPI_WTIME() -#endif - - - !to read data call PIO_read_darray, the only difference with compressed vs uncompressed - !reading is that compressed reading requires that the the user inputs the current time step - !corresponding to the variable about to be read - call PIO_read_darray(file_handle, var_handle, iodesc, read_array, iostat) - -#ifdef DEBUG - print *, 'Rank: ', rank, ' vdc read time: ' , MPI_WTIME() - start -#endif - - !Setup for UNCOMPRESSED files - - !Same call as the VDF file, but we switch the IO type to netcdf - ierr = PIO_CreateFile(iosystem, file_handle_no_comp, PIO_iotype_pnetcdf, binary_path, PIO_clobber) - - !define the dimensions to be used with the file PIO writes - iostat = PIO_def_dim(file_handle_no_comp, 'z', dims(3), dim_ids(3)) - iostat = PIO_def_dim(file_handle_no_comp, 'y', dims(2), dim_ids(2)) - iostat = PIO_def_dim(file_handle_no_comp, 'x', dims(1), dim_ids(1)) - - !define variables to be written - iostat = PIO_def_var(file_handle_no_comp, 'vx', PIO_real, dim_ids, var_handle_no_comp) - - !end definition to make file valid - ierr = PIO_enddef(file_handle_no_comp) - -#ifdef DEBUG - start = MPI_WTIME() -#endif - - !write data, like compressed files but no timestep required at the end - call pio_write_darray(file_handle_no_comp, var_handle_no_comp, iodesc, array, iostat) - -#ifdef DEBUG - - print *, 'Rank: ', rank , ' pure NC write_darray runtime: ' , MPI_WTIME() - start - -#endif - - !close non-compressed files, compressed files are automatically closed - call PIO_CloseFile(file_handle_no_comp) - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Closed PIO file' - endif -#endif - - !clean up PIO - call PIO_Finalize(iosystem, ierr) - -#ifdef DEBUG - if(rank .eq. 0 ) then - print *, 'Finalized PIO' - endif -#endif - - !clean up data arrays - deallocate(compdof) - deallocate(array) - - !clean up MPI - - call MPI_Finalize(ierr) - stop -endprogram diff --git a/src/externals/pio1/tests/testpio/testdecomp.F90 b/src/externals/pio1/tests/testpio/testdecomp.F90 deleted file mode 100644 index 6ea015fd56e..00000000000 --- a/src/externals/pio1/tests/testpio/testdecomp.F90 +++ /dev/null @@ -1,45 +0,0 @@ - program testdecomp - -! xlf90 -g -I/usr/local/include -c kinds_mod.F90 -! xlf90 -g -DSTANDALONE_TEST -I/usr/local/include -c gdecomp_mod.F90 -! xlf90 -g -DSTANDALONE_TEST -I/usr/local/include -L/usr/local/lib -lnetcdf gdecomp_mod.F90 testdecomp.F90 - - use gdecomp_mod - - implicit none - - integer, pointer :: compDOF(:), ioDOF(:) - integer :: startcomp(3),cntcomp(3) - integer :: startio(3),cntio(3),gdims(3) - integer :: my_task,num_tasks - logical :: test - character(len=*),parameter :: fin = 'testdecomp_in' - character(len=*),parameter :: progname = 'testdecomp' - type(gdecomp_type) :: gdecomp - - test = .true. - - my_task = 0 - num_tasks = 192 - gdims(1) = 3600 - gdims(2) = 2400 - gdims(3) = 40 - -! call gdecomp_read_nml(gdecomp,fin,'comp',my_task) -! print *,'after gdecomp_read_nml' -! call gdecomp_DOF(gdecomp,my_task,compDOF,startcomp,cntcomp,test=test,write_decomp=.false.) - - call gdecomp_read_nml(gdecomp,fin,'io',my_task) - call gdecomp_DOF(gdecomp,my_task,ioDOF,startio,cntio,test=test,write_decomp=.true.) - - call gdecomp_read_nml(gdecomp,fin,'comp',my_task,num_tasks,gdims) - call gdecomp_DOF(gdecomp,my_task,compDOF,startcomp,cntcomp,test=test) - - call gdecomp_set(gdecomp,name='xx1',my_task=my_task, & - nxg=360,nyg=240,nzg=10,gdz=5,npes=32,nblksppe=4, & - grdorder='zxy',grddecomp='xy',blkorder='xyz',blkdecomp1='xyz') - call gdecomp_DOF(gdecomp,my_task,compDOF,startcomp,cntcomp,test=test,write_decomp=.true.) - - write(6,*) ' testdecomp completed successfully ' - - end program testdecomp diff --git a/src/externals/pio1/tests/testpio/testdecomp.bluefire.run b/src/externals/pio1/tests/testpio/testdecomp.bluefire.run deleted file mode 100644 index d3f1ddbf79c..00000000000 --- a/src/externals/pio1/tests/testpio/testdecomp.bluefire.run +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/csh -f - -#BSUB -n 1 -###BSUB -R "span[ptile=32]" -#BSUB -q share -#BSUB -N -###BSUB -x -#BSUB -a poe -#BSUB -o poe.stdout.%J -#BSUB -e poe.stderr.%J -#BSUB -J testdecomp -#BSUB -W 0:10 -#BSUB -P 93300006 - -##set srcdir = "/fis/cgd/ccr/tcraig/pio-sandbox/trunk/testpio" -set srcdir = `pwd` -set wrkdir = "/ptmp/tcraig/testdecomp" -set LID = "`date +%y%m%d-%H%M%S`" - -if (! -d $wrkdir) mkdir $wrkdir -cd $wrkdir -rm -f ./testdecomp -cp -f $srcdir/testdecomp ./testdecomp -rm -f ./testdecomp_in -cp -f $srcdir/testdecomp_in ./testdecomp_in - -mpirun.lsf ./testdecomp >& testdecomp.out.$LID - -cp testdecomp.out.$LID $srcdir/ - - - diff --git a/src/externals/pio1/tests/testpio/testdecomp_in b/src/externals/pio1/tests/testpio/testdecomp_in deleted file mode 100644 index d8dc90bb822..00000000000 --- a/src/externals/pio1/tests/testpio/testdecomp_in +++ /dev/null @@ -1,36 +0,0 @@ -&compdof_nml - nxg = 3600 - nyg = 2400 - nzg = 40 - npes = 2000 - nblksppe = 1 - grdorder = 'xyz' - grddecomp = 'yze' - gdx = 90 - gdy = 96 - gdz = 40 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ -&iodof_nml - nxg = 3600 - nyg = 2400 - nzg = 40 - npes = 192 - nblksppe = 1 - grdorder = 'xy' - grddecomp = 'yze' - gdx = 3600 - gdy = 25 - gdz = 20 - blkorder = 'xyz' - blkdecomp1 = 'xy' - blkdecomp2 = '' - bdx = 0 - bdy = 0 - bdz = 0 -/ diff --git a/src/externals/pio1/tests/testpio/testpio.F90 b/src/externals/pio1/tests/testpio/testpio.F90 deleted file mode 100644 index 010faca5b6c..00000000000 --- a/src/externals/pio1/tests/testpio/testpio.F90 +++ /dev/null @@ -1,1690 +0,0 @@ -#ifdef BGP -#define BGx -#endif -#ifdef BGL -#define BGx -#endif -#ifdef TIMING -#define MEMCHK -#endif -!> -!! @file testpio.F90 -!! An example of how PIO can be used -!< -program testpio - ! Modules from PIO package that are used by this application - - use kinds_mod - !> - !! Use PIO methods and data structures - !< - use pio ! _EXTERNAL - - use utils_mod -#ifdef TIMING - use perf_mod ! _EXTERNAL -#endif - use pio_support, only : piodie , checkmpireturn, pio_writedof, pio_readdof !_EXTERNAL - ! Modules from testpio suite that are used by this application - - use gdecomp_mod, only: gdecomp_type, gdecomp_DOF, gdecomp_read_nml, camlike_decomp_generator, mpas_decomp_generator - use alloc_mod ! _EXTERNAL - use check_mod - use namelist_mod -#ifndef NO_MPIMOD - use mpi ! _EXTERNAL -#endif - implicit none -#ifdef NO_MPIMOD - include 'mpif.h' ! _EXTERNAL -#endif - ! Code name, used in debug prints and passed to called routines for flow tracing - character(len=*), parameter :: myname='testpio' - - integer(i4) :: my_task, ierr - integer(i4) :: iostat - integer(i4) :: indx - integer(i4) :: mode - - integer(i4) :: ip,numPhases - character(len=*), parameter :: TestR8CaseName = 'r8_test' - character(len=*), parameter :: TestR4CaseName = 'r4_test' - character(len=*), parameter :: TestI4CaseName = 'i4_test' - character(len=*), parameter :: TestComboCaseName = 'combo_test' - - type (iosystem_desc_t), pointer :: PIOSYS - type (iosystem_desc_t), target :: piosystems(1) - type (File_desc_t) :: File, File_r8,File_r4,File_i4 - type (Var_desc_t) :: vard_i4, & - vard_r8c,vard_r4c,vard_i4c, & - vard_i4i,vard_i4j,vard_i4k,vard_i4m,vard_i4dof - type(var_desc_t), pointer :: vard_r8(:), vard_r4(:) - - type (IO_desc_t) :: IOdesc_r8,IOdesc_r4,IOdesc_i4 - type (gdecomp_type) :: gdecomp - - real(r8), parameter :: MBYTES = 1.0e-6 - - integer(i4) :: cbad, ivar - integer(i4) :: i,j,is,ie,itmp,it,n,i1,j1,k1 - integer(i4) :: gDims3D(3) - - character(6) :: ew_type,ns_type - character(len=10) :: varname - - integer(i4) :: varid,dimid_x,dimid_y,dimid_z - - integer(kind=PIO_OFFSET),parameter :: one = 1 - - integer, parameter :: ntest = 5 - integer(i4), dimension(ntest),parameter :: num_agg =(/ 8,12,16,24,32/) - - integer(i4),pointer :: test_i4wr(:),test_i4rd(:),diff_i4(:) - integer(i4),pointer :: test_i4i(:),test_i4j(:),test_i4k(:),test_i4m(:),test_i4dof(:) - real(r4), pointer :: test_r4wr(:),test_r4rd(:),diff_r4(:) - real(r8), pointer :: test_r8wr(:),test_r8rd(:),diff_r8(:) - - - logical :: TestR8 = .false. - logical :: TestR4 = .false. - logical :: TestInt = .false. - logical :: TestCombo = .true. - logical :: CheckArrays = .true. ! turn off the array check for maximum memory usage testing - - - logical :: writePhase, readPhase - logical, parameter :: splitPhase = .true. - integer :: numPhase - - real(r8) :: lsum,lsum2,gsum - real(r8) :: st,et ! start/end times for timing - real(r8) :: dt_write_r8, dt_write_r4, dt_write_i4 ! individual write times - real(r8) :: dt_read_r8, dt_read_r4, dt_read_i4 ! individual read times - ! Arrays to hold globally reduced read/write times--one element per time trial - real(r8), dimension(:), pointer :: gdt_write_r8, gdt_write_r4, gdt_write_i4 - real(r8), dimension(:), pointer :: gdt_read_r8, gdt_read_r4, gdt_read_i4 - - integer(i4) :: nprocs - integer(i4) :: lLength ! local number of words in the computational decomposition - - integer(i4), parameter :: nml_in = 10 - character(len=*), parameter :: nml_filename = 'testpio_in' - - integer(i4) :: master_task - logical :: log_master_task - integer(i4) :: nml_error - integer(kind=pio_offset) :: sdof,sdof_sum,sdof_min,sdof_max - - ! memory tracking stuff - integer(i4) :: msize,mrss,mrss0,mrss1,mrss2 - real(r8) :: mb_blk - real(r8),allocatable :: mem_tmp(:) - integer(i4),allocatable :: lmem(:),gmem(:,:) - - integer(kind=pio_offset), pointer :: compDOF(:), ioDOF(:) - integer(kind=pio_offset), pointer :: ioDOFR(:),ioDOFW(:) - - integer(i4) :: startIO(3),countIO(3), & - startCOMP(3), countCOMP(3), & - start(3), count(3) - integer(i4) :: lenblocks, glenr8, glenr4, gleni4 - integer(kind=PIO_OFFSET) :: startpio(3), countpio(3) - integer, parameter :: strlen=80 - character(len=strlen) :: fname, fname_r8,fname_r4,fname_i4, fnamechk - logical, parameter :: Debug = .false. - integer :: mpi_comm_compute, mpi_comm_io, mpi_icomm_cio - integer :: charlen - character(len=3) :: citer - - type(var_desc_t) :: varfn_r8, varfn_r4, varfn - -#ifdef MEMCHK - integer :: rss, mshare, mtext, mstack, lastrss=0 -#endif - - - ! Initialize MPI - - call MPI_INIT(ierr) - call CheckMPIReturn('Call to MPI_INIT()',ierr,__FILE__,__LINE__) - - - ! call enable_abort_on_exit - - call MPI_COMM_RANK(MPI_COMM_WORLD,my_task,ierr) - call CheckMPIReturn('Call to MPI_COMM_RANK()',ierr,__FILE__,__LINE__) - call MPI_COMM_SIZE(MPI_COMM_WORLD,nprocs,ierr) - call CheckMPIReturn('Call to MPI_COMM_SIZE()',ierr,__FILE__,__LINE__) - master_task = 0 - if (my_task == master_task) then - log_master_task = .true. - else - log_master_task = .false. - endif - - if(Debug) print *,'testpio: before call to t_initf' -#ifdef TIMING - !--------------------------------------------------------------- - ! timing library - !--------------------------------------------------------------- - if(Debug) print *,'testpio: point #1' - call t_initf(nml_filename, logprint=.false., logunit=6, & - mpicom=MPI_COMM_WORLD, MasterTask=log_master_task) - if(Debug) print *,'testpio: point #2' - call t_startf('testpio_total') - - if(Debug) print *,'testpio: after call to t_startf' - !----------------------------------------------------------------------------- - ! Memory test - !----------------------------------------------------------------------------- - call get_memusage(msize,mrss0) - if(Debug) print *,'testpio: after get_memusage #3' - allocate(mem_tmp(1024*1024)) ! 1 MWord, 8 MB - mem_tmp = -1.0 - call get_memusage(msize,mrss1) - if(Debug) print *,'testpio: after get_memusage #4' - deallocate(mem_tmp) - call get_memusage(msize,mrss2) - if(Debug) print *,'testpio: after get_memusage #5' - mb_blk = 0.0_r8 - if(Debug) print *,'testpio: after get_memusage #6' - if (mrss1 - mrss0 > 0) then - mb_blk = (8.0_r8)/((mrss1-mrss0)*1.0_r8) - endif - if (my_task == master_task) then - write(*,*) myname,' 8 MB memory alloc in MB is ',(mrss1-mrss0)*mb_blk - write(*,*) myname,' 8 MB memory dealloc in MB is ',(mrss1-mrss2)*mb_blk - write(*,*) myname,' Memory block size conversion in bytes is ',mb_blk*1024_r8*1024.0_r8 - endif -#endif - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - !---------------------------------------------------------------- - ! Read in namelist and set File IO Type and Format accordingly... - !---------------------------------------------------------------- - - if(Debug) print *,'testpio: before call to readTestPIO_Namelist' - if(my_task == master_task) then - call ReadTestPIO_Namelist(nml_in, nprocs, nml_filename, myname, nml_error) - endif - if(Debug) print *,'testpio: before call to broadcast_namelist' - call MPI_barrier(MPI_COMM_WORLD,ierr) - call Broadcast_Namelist(myname, my_task, master_task, MPI_COMM_WORLD, ierr) - if(Debug) print *,'testpio: after call to broadcast_namelist' - - !------------------------------------- - ! Checks (num_iotasks can be negative on BGx) - !------------------------------------- - -#if !defined(BGx) - if (num_iotasks <= 0) then - write(*,*) trim(myname),' ERROR: ioprocs invalid num_iotasks=',num_iotasks - call piodie(__FILE__,__LINE__) - endif -#endif - - ! ---------------------------------------------------------------- - ! if stride is and num_iotasks is incompatible than reset stride (ignore stride on BGx) - ! ---------------------------------------------------------------- -#if !defined(BGx) - if (base + num_iotasks * (stride-1) > nprocs-1) then - write(*,*) trim(myname),' ERROR: num_iotasks, base and stride too large', & - ' base=',base,' num_iotasks=',num_iotasks,' stride=',stride,' nprocs=',nprocs - call piodie(__FILE__,__LINE__) - endif -#endif - - !-------------------------------------- - ! Initalizes the parallel IO subsystem - !-------------------------------------- - call PIO_setDebugLevel(DebugLevel) - - if(Debug) print *,'testpio: before call to PIO_init' - - if(async) then -#ifdef BGx - call piodie(__FILE__,__LINE__,'async option not currently supported') -! allocate(PIOSYS) -! call PIO_init(my_task, MPI_COMM_WORLD, num_iotasks, num_aggregator, stride, & -! rearr_type, PIOSYS, base, async=.true.,mpi_comm_compute=mpi_comm_compute) -#else - call split_comm(mpi_comm_world,nprocs, num_iotasks, stride, base, & - mpi_comm_compute, mpi_comm_io, mpi_icomm_cio) - call PIO_init(1, mpi_comm_world, (/mpi_comm_compute/), mpi_comm_io, PIOSYSTEMS) - PIOSYS => PIOSYSTEMS(1) - -#endif - call MPI_COMM_RANK(MPI_COMM_COMPUTE,my_task,ierr) - call MPI_COMM_SIZE(MPI_COMM_COMPUTE,nprocs,ierr) - - else -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - mpi_comm_compute = mpi_comm_world - allocate(PIOSYS) - - call PIO_init(my_task, MPI_COMM_COMPUTE, num_iotasks, num_aggregator, stride, & - rearr_type, PIOSYS, base) - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - - end if - if(Debug) print *,'testpio: after call to PIO_init', piosys%num_tasks,piosys%io_comm - - gDims3D(1) = nx_global - gDims3D(2) = ny_global - gDims3D(3) = nz_global - - !! ** Set PIO/MPI filesystem hints ** - - if (set_mpi_values /= 0) then - if (trim(mpi_cb_buffer_size) /= '') then - call PIO_set_hint(PIOSYS, 'cb_buffer_size', trim(mpi_cb_buffer_size)) - end if - end if - - if (set_romio_values /= 0) then - if (trim(romio_cb_write) /= '') then - call PIO_set_hint(PIOSYS, 'romio_cb_write', trim(romio_cb_write)) - end if - - if (trim(romio_cb_read) /= '') then - call PIO_set_hint(PIOSYS, 'romio_cb_read', trim(romio_cb_read)) - end if - - !! NCH: Not sure if the following applies to non-XFS file systems... - - if (trim(romio_direct_io) /= '') then - call PIO_set_hint(PIOSYS, 'direct_read', trim(romio_direct_io)) - call PIO_set_hint(PIOSYS, 'direct_write', trim(romio_direct_io)) - end if - end if - - if (set_ibm_io_values /= 0) then - if (trim(ibm_io_buffer_size) /= '') then - call PIO_set_hint(PIOSYS, 'IBM_io_buffer_size', & - trim(ibm_io_buffer_size)) - end if - - if (trim(ibm_io_largeblock_io) /= '') then - call PIO_set_hint(PIOSYS, 'IBM_largeblock_io', & - trim(ibm_io_largeblock_io)) - end if - - if (trim(ibm_io_sparse_access) /= '') then - call PIO_set_hint(PIOSYS, 'IBM_sparse_access', & - trim(ibm_io_sparse_access)) - end if - end if - if(set_lustre_values /= 0) then - call PIO_setnum_OST(PIOSYS,lfs_ost_count) - endif - - !----------------------------------------- - ! Compute compDOF based on namelist input - !----------------------------------------- -! write(rd_buffer,('(i9)')) 64*1024*1024 -! call PIO_set_hint(PIOSYS,'cb_buffer_size',trim(adjustl(rd_buffer))) -! call PIO_set_hint(PIOSYS,'romio_cb_write','enable') -! call PIO_set_hint(PIOSYS,'romio_cb_read','disable') - - startCOMP = 0 - - if(index(casename,'CAM')==1) then - call camlike_decomp_generator(gdims3d(1),gdims3d(2),gdims3d(3),my_task,nprocs,npr_yz,compDOF) - elseif(index(casename,'MPAS')==1) then -! print *,'testpio: before call to mpas_decomp_generator: (',TRIM(part_input),') gdims3d: ',gdims3d - call mpas_decomp_generator(gdims3d(1),gdims3d(2),gdims3d(3),my_task,part_input,compDOF) - else if (trim(compdof_input) == 'namelist') then -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #1' - call gdecomp_read_nml(gdecomp,nml_filename,'comp',PIOSYS%comp_rank,PIOSYS%num_tasks,gDims3D(1:3)) - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #2' - call gdecomp_DOF(gdecomp,PIOSYS%comp_rank,compDOF,start,count) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #3', minval(compdof),maxval(compdof) -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - else - call pio_readdof(trim(compdof_input),compDOF,MPI_COMM_COMPUTE,75) - sdof = size(compDOF) - start = gDims3D(1:3) ! min tmp - count = 0 ! max tmp - do n = 1,sdof - call c1dto3d(compdof(n),gDims3D(1),gDims3D(2),gDims3D(3),i1,j1,k1) - start(1) = min(start(1),i1) - start(2) = min(start(2),j1) - start(3) = min(start(3),k1) - count(1) = max(count(1),i1) - count(2) = max(count(2),j1) - count(3) = max(count(3),k1) - enddo - do n = 1,3 - count(n) = max(count(n)-start(n)+1,0) - enddo - if (count(1)*count(2)*count(3) == sdof) then - ! start and count seem consistent with compDOF - else - ! start and count NOT consistent with compDOF, zero them out - start = 0 - count = 0 - endif - endif - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - - startCOMP(1:3) = start(1:3) - countCOMP(1:3) = count(1:3) - if (trim(compdof_output) /= 'none') then - call pio_writedof(trim(compdof_output),compDOF,MPI_COMM_COMPUTE,75) - endif - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - - sdof = sum(compDOF) - call MPI_REDUCE(sdof,sdof_sum,1,MPI_INTEGER8,MPI_SUM,master_task,MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_REDUCE SUM',ierr,__FILE__,__LINE__) - - sdof = minval(compDOF) - call MPI_REDUCE(sdof,sdof_min,1,MPI_INTEGER8,MPI_MIN,master_task,MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_REDUCE MIN',ierr,__FILE__,__LINE__) - - sdof = maxval(compDOF) - call MPI_REDUCE(sdof,sdof_max,1,MPI_INTEGER8,MPI_MAX,master_task,MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_REDUCE MAX',ierr,__FILE__,__LINE__) - if (my_task == master_task) then - write(6,*) trim(myname),' total nprocs = ',nprocs - write(6,*) trim(myname),' compDOF sum/min/max = ',sdof_sum,sdof_min,sdof_max - endif - - if (mod(my_task,((nprocs/32)+1)) == 0) then - if(Debug) write(6,*) trim(myname),' my_task,sdof,start,count = ',my_task,sdof,start,count - endif - - !-------------------------------- - ! calculate ioDOF - !-------------------------------- - - startIO = 0 - countIO = 0 - if (trim(rearr) == 'none') then - ioDOF => compDOF - startIO(1:3) = startCOMP(1:3) - countIO(1:3) = countCOMP(1:3) - elseif (trim(rearr) == 'box') then - ! do nothing - if (trim(iodof_input) == 'namelist') then - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #4' - call gdecomp_read_nml(gdecomp,nml_filename,'io',PIOSYS%io_rank,PIOSYS%num_iotasks,gDims3D(1:3)) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #5' - call gdecomp_DOF(gdecomp,PIOSYS%io_rank,ioDOF,start,count) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #6' - startIO(1:3) = start(1:3) - countIO(1:3) = count(1:3) - endif - elseif (trim(rearr) == 'mct') then - call gdecomp_read_nml(gdecomp,nml_filename,'io',PIOSYS%io_rank,PIOSYS%num_iotasks,gDims3D(1:3)) - call gdecomp_DOF(gdecomp,PIOSYS%io_rank,ioDOF,start,count) - startIO(1:3) = start(1:3) - countIO(1:3) = count(1:3) - else - call piodie(__FILE__,__LINE__,' rearr '//trim(rearr)//' not supported') - endif -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - ioDOFR => ioDOF - ioDOFW => ioDOF - startpio = startIO - countpio = countIO - lenblocks = countIO(1) - -! if(Debug) print *,'comp_rank,io_rank: ',piosys%comp_rank,piosys%io_rank,' ioDOF ',ioDOF - if(Debug) print *,'comp_rank: ',PIOSYS%comp_rank,SIZE(compDOF),SIZE(ioDOF) - - !----------------------------------------------------- - ! number of words on each computational processor owns - !----------------------------------------------------- - - lLength = size(compDOF) - - !---------------------- - ! allocate and set test arrays - !---------------------- - if(iotype == PIO_IOTYPE_vdc2) then - CheckArrays=.false. - TestR8 = .false. - TestR4 = .true. - TestInt = .false. - TestCombo = .false. - end if - - if(TestR8 .or. TestCombo) then - call alloc_check(test_r8wr,lLength,'testpio:test_r8wr') - endif - if(TestR4 .or. TestCombo) then - call alloc_check(test_r4wr,lLength,'testpio:test_r4wr' ) - endif - if(TestInt .or. TestCombo) then - call alloc_check(test_i4wr,lLength,'testpio:test_i4wr') - endif - if(TestInt) then - call alloc_check(test_i4i ,lLength,'testpio:test_i4i ') - call alloc_check(test_i4j ,lLength,'testpio:test_i4j ') - call alloc_check(test_i4k ,lLength,'testpio:test_i4k ') - call alloc_check(test_i4m ,lLength,'testpio:test_i4m ') - call alloc_check(test_i4dof,lLength,'testpio:test_i4dof') - endif - - do n = 1,lLength - call c1dto3d(compdof(n),gDims3D(1),gDims3D(2),gDims3D(3),i1,j1,k1) - if(TestInt) then - test_i4dof(n) = compdof(n) - test_i4i(n) = i1 - test_i4j(n) = j1 - test_i4k(n) = k1 - test_i4m(n) = my_task - endif - if(TestR8 .or. TestCombo) then - test_r8wr(n) = 10.0_r8*cos(20.*real(i1,kind=r8)/real(gDims3D(1),kind=r8))* & - cos(10.*real(j1,kind=r8)/real(gDims3D(2),kind=r8))* & - (1.0+1.0*real(j1,kind=r8)/real(gDims3D(2),kind=r8))* & - cos(25.*real(k1,kind=r8)/real(gDims3D(3),kind=r8)) - endif - if(TestR4 .or. TestCombo) then - test_r4wr(n) = 10.0_r4*cos(20.*real(i1,kind=r4)/real(gDims3D(1),kind=r4))* & - cos(10.*real(j1,kind=r4)/real(gDims3D(2),kind=r4))* & - (1.0+1.0*real(j1,kind=r4)/real(gDims3D(2),kind=r4))* & - cos(25.*real(k1,kind=r4)/real(gDims3D(3),kind=r4)) - endif - if(TestInt .or. TestCombo) then - test_i4wr(n) = nint(10.0_r8*cos(20.*real(i1,kind=r8)/real(gDims3D(1),kind=r8))* & - cos(10.*real(j1,kind=r8)/real(gDims3D(2),kind=r8))* & - (1.0+1.0*real(j1,kind=r8)/real(gDims3D(2),kind=r8))* & - cos(25.*real(k1,kind=r8)/real(gDims3D(3),kind=r8))*1000.0_r8) - endif - enddo - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #10' - -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - if(TestR8 .or. TestCombo) call alloc_check(test_r8rd,lLength,'testpio:test_r8rd') - if(TestInt .or. TestCombo) call alloc_check(test_i4rd,lLength,'testpio:test_i4rd') - if(TestR4 .or. TestCombo) call alloc_check(test_r4rd,lLength,'testpio:test_r4rd') - - if(TestR8 .or. TestCombo) test_r8rd(:) = 1000.00 - if(TestR4 .or. TestCombo) test_r4rd(:) = 1000.00 - if(TestInt .or. TestCombo) test_i4rd(:) = 1000 - - if(Debug) then - write(*,'(a,2(a,i8))') myname,':: Before call to OpenFile(). comp_rank=',piosys%comp_rank, & - ' io_rank=',piosys%io_rank - endif - - !-------------------------------- - ! allocate arrays for holding globally-reduced timing information - !-------------------------------- -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - call alloc_check(gdt_write_r8, maxiter, ' testpio:gdt_write_r8 ') - call alloc_check(gdt_read_r8, maxiter, ' testpio:gdt_read_r8 ') - call alloc_check(gdt_write_r4, maxiter, ' testpio:gdt_write_r4 ') - call alloc_check(gdt_read_r4, maxiter, ' testpio:gdt_read_r4 ') - call alloc_check(gdt_write_i4, maxiter, ' testpio:gdt_write_i4 ') - call alloc_check(gdt_read_i4, maxiter, ' testpio:gdt_read_i4 ') - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #11' -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - if(splitPhase) then - numPhases = 2 - else - numPhases = 1 - endif - - do ip=1,numPhases - if(numPhases == 1) then - readPhase = .true. - writePhase = .true. - else - if(ip == 1) then - writePhase = .true. - readPhase = .false. - else - writePhase = .false. - readPhase = .true. - endif - endif - if(log_master_task) print *,'{write,read}Phase: ',writePhase,readPhase - - - do it=1,maxiter -#ifdef MEMCHK - call GPTLget_memusage(msize, rss, mshare, mtext, mstack) - if(rss>lastrss) then - lastrss=rss - print *,__FILE__,__LINE__,'mem=',rss,' it=',it - end if -#endif - !------------------------------------------------------- - ! Explain the distributed array decomposition to PIO lib - !------------------------------------------------------- - - if (trim(rearr) == 'box') then - !JMD print *,__FILE__,__LINE__,gdims3d,minval(compdof),maxval(compdof) - - if (trim(iodof_input) == 'namelist') then - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #7' - if(TestR8 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_double, gDims3D,compDOF,& - IOdesc_r8,startpio,countpio) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #7.1' - - if(TestR4 .or. TestCombo) then - if(iotype == PIO_IOTYPE_vdc2) then - call PIO_initDecomp(PIOSYS, gDims3D,compDOF,IOdesc_r4,10) - else - call PIO_initDecomp(PIOSYS,PIO_real, gDims3D,compDOF,& - IOdesc_r4,startpio,countpio) - end if - end if - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #7.2' - if(TestInt .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_int, gDims3D,compDOF,& - IOdesc_i4,startpio,countpio) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8' - else - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.1' - if(TestR8 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_double, gDims3D,compDOF,& - IOdesc_r8) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.2' - if(TestR4 .or. TestCombo) then - if(iotype == PIO_IOTYPE_vdc2) then - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.2a' - call PIO_initDecomp(PIOSYS, gDims3D,compDOF,IOdesc_r4,10) - else - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.2b' - call PIO_initDecomp(PIOSYS,PIO_real, gDims3D,compDOF,& - IOdesc_r4) - end if - end if - - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.3' - if(TestInt .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_int, gDims3D,compDOF,& - IOdesc_i4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.4' - endif - else - if(iofmtd.eq.'nc' ) then ! netCDF - if (num_iodofs == 1) then - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.5' - if(TestR8 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_double, gDims3D,lenblocks,& - compDOF,ioDOF,startpio,countpio,IOdesc_r8) - if(Debug)print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.6' - if(TestR4 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_real, gDims3D,lenblocks,& - compDOF,ioDOF,startpio,countpio,IOdesc_r4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.7' - if(TestInt .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_int, gDims3D,lenblocks,& - compDOF,ioDOF,startpio,countpio,IOdesc_i4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.8' - elseif (num_iodofs == 2) then - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.9' - if(TestR8 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_double, gDims3D,lenblocks,& - compDOF,ioDOFR,ioDOFW,startpio,countpio,IOdesc_r8) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.10' - if(TestR4 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_real, gDims3D,lenblocks,& - compDOF,ioDOFR,ioDOFW,startpio,countpio,IOdesc_r4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.11' - if(TestInt .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_int, gDims3D,lenblocks,& - compDOF,ioDOFR,ioDOFW,startpio,countpio,IOdesc_i4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.12' - else - call piodie(__FILE__,__LINE__,' num_iodofs not 1 or 2') - endif - else - ! tcraig: there are cases where lenblocks is not valid here like different size IO blocks - if (num_iodofs == 1) then - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.13' - if(TestR8 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_double, gDims3D,lenblocks,& - compDOF,ioDOF,IOdesc_r8) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.14' - if(TestR4 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_real, gDims3D,lenblocks,& - compDOF,ioDOF,IOdesc_r4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.15' - if(TestInt .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_int, gDims3D,lenblocks,& - compDOF,ioDOF,IOdesc_i4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.16' - elseif (num_iodofs == 2) then - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.17' - if(TestR8 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_double, gDims3D,lenblocks,& - compDOF,ioDOFR,ioDOFW,IOdesc_r8) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.18' - if(TestR4 .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_real, gDims3D,lenblocks,& - compDOF,ioDOFR,ioDOFW,IOdesc_r4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.19' - if(TestInt .or. TestCombo) & - call PIO_initDecomp(PIOSYS,PIO_int, gDims3D,lenblocks,& - compDOF,ioDOFR,ioDOFW,IOdesc_i4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #8.20' - else - call piodie(__FILE__,__LINE__,' num_iodofs not 1 or 2') - endif - endif - endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #9' - - if(Debug) then - write(*,'(a,2(a,i8))') myname,':: After call to initDecomp. comp_rank=',piosys%comp_rank, & - ' io_rank=',piosys%io_rank - endif - - call PIO_getnumiotasks(PIOSYS,num_iotasks) - !------------ - ! Open file{s} - !------------ - write(citer,'(i3.3)') it - - fname = TRIM(dir)//'foo.'//citer//'.'//TRIM(Iofmtd) - fname_r8 = TRIM(dir)//'foo.r8.'//citer//'.'//TRIM(Iofmtd) - fname_r4 = TRIM(dir)//'foo.r4.'//citer//'.'//TRIM(Iofmtd) - fname_i4 = TRIM(dir)//'foo.i4.'//citer//'.'//TRIM(Iofmtd) - ! print *, __FILE__,__LINE__,'>',fname,'<' - ! print *, __FILE__,__LINE__,'>',fname_r8,'<' - ! print *, __FILE__,__LINE__,'>',fname_i4,'<' - ! print *, __FILE__,__LINE__,'>',fname_r4,'<' -#if defined(_NETCDF) || defined(_PNETCDF) - mode = pio_64bit_offset -#else - mode = 0 -#endif - - if(writePhase) then - if(TestCombo) then - if(Debug) write(*,'(2a,i8)') myname,':: Combination Test: Creating File...it=',it - ierr = PIO_CreateFile(PIOSYS,File,iotype,trim(fname), mode) - call check_pioerr(ierr,__FILE__,__LINE__,' combo createfile') - endif - - if(TestR8) then - if (Debug) write(*,'(2a,i8)') myname,':: REAL*8 Test: Creating File...it=',it - ierr = PIO_CreateFile(PIOSYS,File_r8,iotype,trim(fname_r8), mode) - call check_pioerr(ierr,__FILE__,__LINE__,' r8 createfile') - endif - - if(TestR4) then - if(Debug) write(*,'(2a,i8)') myname,':: REAL*4 Test: Creating File...,it=',it - ierr = PIO_CreateFile(PIOSYS,File_r4,iotype,trim(fname_r4), mode) - call check_pioerr(ierr,__FILE__,__LINE__,' r4 createfile') - endif - - if(TestInt) then - if(Debug) write(*,'(2a,i8)') myname,':: INTEGER*4 Test: Creating File...,it=',it - ierr = PIO_CreateFile(PIOSYS,File_i4,iotype,trim(fname_i4), mode) - call check_pioerr(ierr,__FILE__,__LINE__,' i4 createfile') - endif - - - allocate(vard_r8(nvars), vard_r4(nvars)) - - - !--------------------------- - ! Code specifically for netCDF files - !--------------------------- - if(iotype == iotype_pnetcdf .or. & - iotype == iotype_netcdf .or. & - iotype == PIO_iotype_netcdf4p .or. & - iotype == PIO_iotype_netcdf4c .or. & - iotype == PIO_IOTYPE_vdc2) then - - if(TestR8) then - !----------------------------------- - ! for the single record real*8 file - !----------------------------------- - call WriteHeader(File_r8,nx_global,ny_global,nz_global,dimid_x,dimid_y,dimid_z) - - iostat = PIO_def_dim(File_r8,'charlen',strlen,charlen) - iostat = PIO_def_var(File_r8,'filename',pio_char,(/charlen/),varfn_r8) - - - do ivar = 1, nvars - write(varname,'(a,i5.5,a)') 'field',ivar,char(0) - iostat = PIO_def_var(File_r8,varname,PIO_double,(/dimid_x,dimid_y,dimid_z/),vard_r8(ivar)) - call check_pioerr(iostat,__FILE__,__LINE__,' r8 defvar') - end do - iostat = PIO_enddef(File_r8) - call check_pioerr(iostat,__FILE__,__LINE__,' r8 enddef') - endif - - if(TestR4) then - !----------------------------------- - ! for the single record real*4 file - !----------------------------------- - if(iotype /= PIO_IOTYPE_vdc2) then - call WriteHeader(File_r4,nx_global,ny_global,nz_global,dimid_x,dimid_y,dimid_z) - iostat = PIO_def_dim(File_r4,'charlen',strlen,charlen) - iostat = PIO_def_var(File_r4,'filename',pio_char,(/charlen/),varfn_r4) - end if - - do ivar = 1, nvars - write(varname,'(a,i5.5,a)') 'field',ivar - iostat = PIO_def_var(File_r4,varname,PIO_real,(/dimid_x,dimid_y,dimid_z/),vard_r4(ivar)) - call check_pioerr(iostat,__FILE__,__LINE__,' r4 defvar') - end do - iostat = PIO_enddef(File_r4) - call check_pioerr(iostat,__FILE__,__LINE__,' i4 enddef') - endif - - if(TestInt) then - !----------------------------------- - ! for the single record integer file - !----------------------------------- - call WriteHeader(File_i4,nx_global,ny_global,nz_global,dimid_x,dimid_y,dimid_z) - iostat = PIO_def_var(File_i4,'fdof',PIO_int,(/dimid_x,dimid_y,dimid_z/),vard_i4dof) - call check_pioerr(iostat,__FILE__,__LINE__,' i4dof defvar') - - iostat = PIO_def_var(File_i4,'field',PIO_int,(/dimid_x,dimid_y,dimid_z/),vard_i4) - call check_pioerr(iostat,__FILE__,__LINE__,' i4 defvar') - iostat = PIO_def_var(File_i4,'fi',PIO_int,(/dimid_x,dimid_y,dimid_z/),vard_i4i) - call check_pioerr(iostat,__FILE__,__LINE__,' i4i defvar') - iostat = PIO_def_var(File_i4,'fj',PIO_int,(/dimid_x,dimid_y,dimid_z/),vard_i4j) - call check_pioerr(iostat,__FILE__,__LINE__,' i4j defvar') - iostat = PIO_def_var(File_i4,'fk',PIO_int,(/dimid_x,dimid_y,dimid_z/),vard_i4k) - call check_pioerr(iostat,__FILE__,__LINE__,' i4k defvar') - iostat = PIO_def_var(File_i4,'my_task',PIO_int,(/dimid_x,dimid_y,dimid_z/),vard_i4m) - call check_pioerr(iostat,__FILE__,__LINE__,' i4m defvar') - - - iostat = PIO_enddef(File_i4) - call check_pioerr(iostat,__FILE__,__LINE__,' i4 enddef') - endif - - if(TestCombo) then - !----------------------------------- - ! for the multi record file - !----------------------------------- - call WriteHeader(File,nx_global,ny_global,nz_global,dimid_x,dimid_y,dimid_z) - iostat = PIO_def_var(File,'field_r8',PIO_double,(/dimid_x,dimid_y,dimid_z/),vard_r8c) - call check_pioerr(iostat,__FILE__,__LINE__,' combo r8 defvar') - iostat = PIO_def_var(File,'field_r4',PIO_real,(/dimid_x,dimid_y,dimid_z/),vard_r4c) - call check_pioerr(iostat,__FILE__,__LINE__,' combo r4 defvar') - iostat = PIO_def_var(File,'field_i4',PIO_int,(/dimid_x,dimid_y,dimid_z/),vard_i4c) - call check_pioerr(iostat,__FILE__,__LINE__,' combo i4 defvar') - - iostat = PIO_def_dim(File,'charlen',strlen,charlen) - iostat = PIO_def_var(File,'filename',pio_char,(/charlen/),varfn) - - - - iostat = PIO_enddef(File) - call check_pioerr(iostat,__FILE__,__LINE__,' combo enddef') - endif - - endif ! if(iotype == iotype_pnetcdf .or. iotype == iotype_netcdf ) then - - ! Set Frame to '1' in the PIO descriptor file - do ivar=1,nvars - call PIO_SetFrame(vard_r8(ivar),one) - call PIO_SetFrame(vard_r4(ivar),one) - end do - - call PIO_SetFrame(vard_i4,one) - call PIO_SetFrame(vard_r8c,one) - call PIO_SetFrame(vard_r4c,one) - call PIO_SetFrame(vard_i4c,one) - call PIO_SetFrame(vard_i4i,one) - call PIO_SetFrame(vard_i4j,one) - call PIO_SetFrame(vard_i4k,one) - call PIO_SetFrame(vard_i4m,one) - call PIO_SetFrame(vard_i4dof,one) - - if(Debug) then - write(*,'(a,2(a,i8))') myname,':: After call to OpenFile. comp_rank=',piosys%comp_rank, & - ' io_rank=',piosys%io_rank - endif - - !------------------------- - ! Time the parallel write - !------------------------- - - - - if(TestR8) then - if(iofmtd .ne. 'bin') then - iostat = pio_put_var(file_r8,varfn_r8,fname_r8) - end if - - - dt_write_r8 = 0. - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #9.0.1' - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #9.0.2' - call CheckMPIReturn('Call to MPI_BARRIER()',ierr,__FILE__,__LINE__) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #9.0.3' - st = MPI_Wtime() -#ifdef TIMING - call t_startf('testpio_write') -#endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #9.0.4' - do ivar=1,nvars - call PIO_write_darray(File_r8,vard_r8(ivar), iodesc_r8, test_r8wr, iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' r8 write_darray') - end do -#ifdef TIMING - call t_stopf('testpio_write') -#endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #9.1' - call PIO_CloseFile(File_r8) - et = MPI_Wtime() - dt_write_r8 = dt_write_r8 + (et - st)/nvars - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #9.2' - endif - - if(TestR4) then - if(iofmtd(2:3) .eq. 'nc') then - iostat = pio_put_var(file_r4,varfn_r4,fname_r4) - end if - dt_write_r4 = 0. - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_BARRIER()',ierr,__FILE__,__LINE__) - st = MPI_Wtime() -#ifdef TIMING - call t_startf('testpio_write') -#endif - do ivar=1,nvars - call PIO_write_darray(File_r4,vard_r4(ivar),iodesc_r4, test_r4wr,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' r4 write_darray') - end do -#ifdef TIMING - call t_stopf('testpio_write') -#endif - call PIO_CloseFile(File_r4) - et = MPI_Wtime() - dt_write_r4 = dt_write_r4 + (et - st)/nvars - endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #13' - - if(TestInt) then - dt_write_i4 = 0. - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_BARRIER()',ierr,__FILE__,__LINE__) - - call PIO_write_darray(File_i4,vard_i4dof,iodesc_i4,test_i4dof,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' i4dof write_darray') - - st = MPI_Wtime() -#ifdef TIMING - call t_startf('testpio_write') -#endif - call PIO_write_darray(File_i4,vard_i4,iodesc_i4,test_i4wr,iostat) -#ifdef TIMING - call t_stopf('testpio_write') -#endif - et = MPI_Wtime() - dt_write_i4 = dt_write_i4 + et - st - - - call PIO_write_darray(File_i4,vard_i4i,iodesc_i4,test_i4i,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' i4i write_darray') - - call PIO_write_darray(File_i4,vard_i4j,iodesc_i4,test_i4j,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' i4j write_darray') - - call PIO_write_darray(File_i4,vard_i4k,iodesc_i4,test_i4k,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' i4k write_darray') - - call PIO_write_darray(File_i4,vard_i4m,iodesc_i4,test_i4m,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' i4m write_darray') - call PIO_CloseFile(File_i4) - endif - - if(TestCombo) then - if(iofmtd .ne. 'bin') then - iostat = pio_put_var(file,varfn,fname) - end if - - call PIO_write_darray(File,vard_r8c,iodesc_r8,test_r8wr,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' combo r8 write_darray') - call PIO_write_darray(File,vard_r4c,iodesc_r4, test_r4wr,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' combo r4 write_darray') - call PIO_write_darray(File,vard_i4c,iodesc_i4, test_i4wr,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' combo i4 write_darray') - call PIO_CloseFile(File) - endif - - if(Debug) then - write(*,'(a,2(a,i8),i8)') myname,':: After calls to PIO_write_darray. comp_rank=',piosys%comp_rank, & - ' io_rank=',piosys%io_rank,piosys%io_comm - - endif - - endif - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_BARRIER()',ierr,__FILE__,__LINE__) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #14' - - - if (readPhase) then - !------------------------------------- - ! Open the file back up and check data - !------------------------------------- - - if(TestR8) then - ierr = PIO_OpenFile(PIOSYS, File_r8, iotype, fname_r8) - call check_pioerr(ierr,__FILE__,__LINE__,' r8 openfile') - endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #15' - - if(TestR4) then - ierr = PIO_OpenFile(PIOSYS,File_r4,iotype, fname_r4) - call check_pioerr(ierr,__FILE__,__LINE__,' r4 openfile') - endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #16' - - if(TestInt) then - ierr = PIO_OpenFile(PIOSYS,File_i4,iotype, fname_i4) - call check_pioerr(ierr,__FILE__,__LINE__,' int openfile') - endif - - ! if(TestCombo) ierr = PIO_OpenFile(PIOSYS,File,iotype,fname) - if(Debug) then - write(*,'(2a,i8)') myname,':: After calls to PIO_OpenFile. my_task=',my_task - endif - - if(Debug) print *,__FILE__,__LINE__ - - if(iotype == iotype_pnetcdf .or. & - iotype == iotype_netcdf .or. & - iotype == pio_iotype_vdc2) then - do ivar=1,nvars - if(TestR8) then - iostat = PIO_inq_varid(file_r8,'filename',varfn_r8) - - - iostat = PIO_inq_varid(File_r8,'field00001',vard_r8(ivar)) - call check_pioerr(iostat,__FILE__,__LINE__,' r8 inq_varid') - endif - - if(TestR4) then - if(iofmtd(2:3) .eq. 'nc') then - iostat = PIO_inq_varid(file_r4,'filename',varfn_r4) - end if - if(iotype == pio_iotype_vdc2) then - - !there currently exists no vdc concept of inquiring a variable, the only thing in the var_desc_t - !directly used by the writing/reading is var name - iostat = PIO_def_var(File_r4,'field00001', PIO_real, vard_r4(ivar)) - - else - iostat = PIO_inq_varid(File_r4,'field00001',vard_r4(ivar)) - endif - call check_pioerr(iostat,__FILE__,__LINE__,' r4 inq_varid') - endif - end do - if(TestInt) then - iostat = PIO_inq_varid(File_i4,'field',vard_i4) - call check_pioerr(iostat,__FILE__,__LINE__,' i4 inq_varid') - endif - - endif ! if((iotype == iotype_pnetcdf) .or (iotype == iotype_netcdf))... - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #17' - if(Debug) print *,__FILE__,__LINE__ - do ivar=1,nvars - call PIO_SetFrame(vard_r8(ivar),one) - call PIO_SetFrame(vard_r4(ivar),one) - end do - call PIO_SetFrame(vard_i4,one) - call PIO_SetFrame(vard_r8c,one) - call PIO_SetFrame(vard_r4c,one) - call PIO_SetFrame(vard_i4c,one) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #18' - - !------------------------- - ! Time the parallel read - !------------------------- - if(TestR8) then - if(iofmtd(2:3) .eq. 'nc') then - iostat = pio_get_var(file_r8,varfn_r8, fnamechk) - if(fnamechk /= fname_r8) then - print *,__FILE__,__LINE__,'fname chk failed: ',fname_r8,fnamechk - end if - end if - - dt_read_r8 = 0. - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_BARRIER()',ierr,__FILE__,__LINE__) - st = MPI_Wtime() -#ifdef TIMING - call t_startf('testpio_read') -#endif - if(Debug) print *,__FILE__,__LINE__ - do ivar=1,nvars - call PIO_read_darray(File_r8,vard_r8(ivar),iodesc_r8,test_r8rd,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' r8 read_darray') - enddo - if(Debug) print *,__FILE__,__LINE__ -#ifdef TIMING - call t_stopf('testpio_read') -#endif - et = MPI_Wtime() - dt_read_r8 = dt_read_r8 + (et - st)/nvars - call check_pioerr(iostat,__FILE__,__LINE__,' r8 read_darray') - endif - - if(TestR4) then - if(iofmtd(2:3) .eq. 'nc') then - iostat = pio_get_var(file_r8,varfn_r8, fnamechk) - - if(fnamechk /= fname_r4) then - print *,__FILE__,__LINE__,'fname chk failed: ',fname_r4,fnamechk - end if - end if - dt_read_r4 = 0. - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_BARRIER()',ierr,__FILE__,__LINE__) - st = MPI_Wtime() -#ifdef TIMING - call t_startf('testpio_read') -#endif - do ivar=1,nvars - call PIO_read_darray(File_r4,vard_r4(ivar),iodesc_r4,test_r4rd,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' r4 read_darray') - enddo - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #19' -#ifdef TIMING - call t_stopf('testpio_read') -#endif - et = MPI_Wtime() - dt_read_r4 = dt_read_r4 + (et - st)/nvars - call check_pioerr(iostat,__FILE__,__LINE__,' r4 read_darray') - endif - - if(TestInt) then - dt_read_i4 = 0. - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_BARRIER()',ierr,__FILE__,__LINE__) - st = MPI_Wtime() -#ifdef TIMING - call t_startf('testpio_read') -#endif - call PIO_read_darray(File_i4,vard_i4,iodesc_i4, test_i4rd,iostat) -#ifdef TIMING - call t_stopf('testpio_read') -#endif - et = MPI_Wtime() - dt_read_i4 = dt_read_i4 + et - st - call check_pioerr(iostat,__FILE__,__LINE__,' i4 read_darray') - endif - - !------------------------------- - ! Print the maximum memory usage - !------------------------------- -! call alloc_print_usage(0,'testpio: after calls to PIO_read_darray') - -#ifdef TESTMEM -! stop -#endif - - if(Debug) then - write(*,'(a,2(a,i8))') myname,':: After PIO_read_darray tests, my_task=', & - my_task,', it=',it - endif - - !------------------- - ! close the file up - !------------------- - if(TestR8) call PIO_CloseFile(File_r8) - if(TestR4) call PIO_CloseFile(File_r4) - if(TestInt) call PIO_CloseFile(File_i4) - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #20' - - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to MPI_BARRIER()',ierr,__FILE__,__LINE__) - -! if(Debug) then -! write(*,*) myname,':: my_task=',my_task,'test_r8wr= ',test_r8wr -! if(TestR8 .or. TestCombo) write(*,*) myname,':: my_task=',my_task,'test_r8rd= ',test_r8rd -! endif - - !----------------------------- - ! Perform correctness testing - !----------------------------- - if(TestR8 .and. CheckArrays) then - call checkpattern(mpi_comm_compute, fname_r8,test_r8wr,test_r8rd,lLength,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' checkpattern r8 test') - endif - - if( TestR4 .and. CheckArrays) then - call checkpattern(mpi_comm_compute, fname_r4,test_r4wr,test_r4rd,lLength,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' checkpattern r4 test') - endif - - if(TestInt .and. CheckArrays) then - call checkpattern(mpi_comm_compute, fname_i4, test_i4wr,test_i4rd,lLength,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' checkpattern i4 test') - endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #21' - - if(TestCombo .and. CheckArrays) then - - !------------------------------------- - ! Open up and read the combined file - !------------------------------------- - - ierr = PIO_OpenFile(PIOSYS,File,iotype,fname) - call check_pioerr(ierr,__FILE__,__LINE__,' combo test read openfile') - - if(iofmtd(1:2).eq.'nc') then - iostat = PIO_inq_varid(File,'field_r8',vard_r8c) - call check_pioerr(iostat,__FILE__,__LINE__,' combo test r8 inq_varid') - iostat = PIO_inq_varid(File,'field_r4',vard_r4c) - call check_pioerr(iostat,__FILE__,__LINE__,' combo test r4 inq_varid') - iostat = PIO_inq_varid(File,'field_i4',vard_i4c) - call check_pioerr(iostat,__FILE__,__LINE__,' combo test i4 inq_varid') - - endif - - if(iofmtd.ne.'bin') then - iostat = PIO_inq_varid(file,'filename',varfn) - - iostat = pio_get_var(file,varfn, fnamechk) - - if(piosys%io_rank==0 .and. fnamechk /= fname) then - print *,__FILE__,__LINE__,'fname chk failed: ',trim(fname),'<>',trim(fnamechk),'<' - end if - end if - call PIO_read_darray(File,vard_r8c,iodesc_r8,test_r8rd,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' combo test r4 pio_read_darray') - call PIO_read_darray(File,vard_r4c,iodesc_r4,test_r4rd,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' combo test r4 pio_read_darray') - call PIO_read_darray(File,vard_i4c,iodesc_i4,test_i4rd,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' combo test i4 pio_read_darray') - - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #22' - call PIO_CloseFile(File) - - !----------------------------- - ! Check the combined file - !----------------------------- - call checkpattern(mpi_comm_compute, fname,test_r8wr,test_r8rd,lLength,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' checkpattern test_r8 ') - - call checkpattern(mpi_comm_compute, fname,test_r4wr,test_r4rd,lLength,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' checkpattern test_r4 ') - - call checkpattern(mpi_comm_compute, fname,test_i4wr,test_i4rd,lLength,iostat) - call check_pioerr(iostat,__FILE__,__LINE__,' checkpattern test_i4 ') - - endif - !--------------------------------------- - ! Print out the performance measurements - !--------------------------------------- - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - endif - - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #23' - if(TestR8 .or. TestCombo) then - ! Maximum read/write times - if(readPhase) call GetMaxTime(dt_read_r8, gdt_read_r8(it), MPI_COMM_COMPUTE, ierr) - if(writePhase) call GetMaxTime(dt_write_r8, gdt_write_r8(it), MPI_COMM_COMPUTE, ierr) - endif - - if(TestR4) then - ! Maximum read/write times - if(readPhase) call GetMaxTime(dt_read_r4, gdt_read_r4(it), MPI_COMM_COMPUTE, ierr) - if(writePhase) call GetMaxTime(dt_write_r4, gdt_write_r4(it), MPI_COMM_COMPUTE, ierr) - endif - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #24' - - if(TestInt) then - ! Maximum read/write times - if(readPhase) call GetMaxTime(dt_read_i4, gdt_read_i4(it), MPI_COMM_COMPUTE, ierr) - if(writePhase) call GetMaxTime(dt_write_i4, gdt_write_i4(it), MPI_COMM_COMPUTE, ierr) - endif - - - if(TestR8 .or. TestCombo) glenr8=iodesc_r8%glen - if(TestR4 .or. TestCombo) glenr4=iodesc_r4%glen - if(TestInt .or. TestCombo) gleni4=iodesc_i4%glen - if(TestR8 .or. TestCombo) call pio_freedecomp(PIOSYS, iodesc_r8) - if(TestR4 .or. TestCombo) call pio_freedecomp(PIOSYS, iodesc_r4) - if(TestInt .or. TestCombo) call pio_freedecomp(PIOSYS, iodesc_i4) - enddo ! do it=1,maxiter - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #25' - - enddo ! do ip=1,numphase - - - !-------------------------------- - ! Clean up initialization memory - ! note: make sure DOFs are not used later - !-------------------------------- - if (PIOSYS%comp_rank >= 0) call dealloc_check(compDOF) - if (trim(rearr) == 'mct') then - if (PIOSYS%io_rank >= 0) call dealloc_check(ioDOF) - endif - - !---------------------------------- - ! Print summary bandwidth statistics - !---------------------------------- - - if(Debug) print *,'iam: ',PIOSYS%comp_rank,'testpio: point #26' - if(TestR8 .or. TestCombo .and. (piosys%io_rank == 0) ) then - call WriteTimeTrialsStats(casename,TestR8CaseName, fname_r8, glenr8, gdt_read_r8, gdt_write_r8, maxiter) - endif - - if(TestR4 .and. (piosys%io_rank == 0) ) then - call WriteTimeTrialsStats(casename,TestR4CaseName, fname_r4, glenr4, gdt_read_r4, gdt_write_r4, maxiter) - endif - - if(TestInt .and. (piosys%io_rank == 0) ) then - call WriteTimeTrialsStats(casename,TestI4CaseName, fname_i4, gleni4, gdt_read_i4, gdt_write_i4, maxiter) - endif - - !------------------------------- - ! Print timers and memory usage - !------------------------------- - -#ifdef TIMING - call t_stopf('testpio_total') - call t_prf('timing.testpio',MPI_COMM_COMPUTE) - call t_finalizef() - call get_memusage(msize,mrss) - allocate(lmem(2),gmem(2,0:nprocs-1)) - lmem(1) = msize - lmem(2) = mrss - call mpi_gather(lmem,2,MPI_INTEGER,gmem,2,MPI_INTEGER,0,MPI_COMM_COMPUTE,ierr) - call CheckMPIReturn('Call to mpi_gather',ierr,__FILE__,__LINE__) - if (my_task == master_task) then - do n = 0,nprocs-1 - write(*,'(2a,i8,a,2f10.2)') myname,' my_task=',n,' : (hw, usage) memory (MB) = ',gmem(1,n)*mb_blk,gmem(2,n)*mb_blk - enddo -! indx = MAXLOC(gmem(1,:),dim=1) - 1 ! offset the location of the maximum memory usage by one - indx = MAXLOC(gmem(2,:),dim=1) - 1 - write(*,'(2a,i8,a,2f10.2)') myname,' my_task=',indx,' : (hw, usage) MAX memory (MB) = ',gmem(1,indx)*mb_blk,gmem(2,indx)*mb_blk - endif - deallocate(lmem,gmem) -#endif - - call MPI_Barrier(MPI_COMM_COMPUTE,ierr) - - if (my_task == master_task) then - print *,' ' - print *,'testpio completed successfully' - print *,' ' - endif - - !print *,'IAM: ',my_task,'before PIO_finalize' - call PIO_finalize(PIOSYS,ierr) - !print *,'IAM: ',my_task,'before MPI_finalize' - - !this is sometimes causing a problem on BGP....do this until fixed - !#ifndef BGx - call MPI_Finalize(ierr) - !#endif - - !print *,'IAM: ',my_task,'afterMPI_finalize' - !call CheckMPIReturn('Call to MPI_FINALIZE()',ierr,__FILE__,__LINE__) - !print *,'IAM: ',my_task,'after CheckMPIReturn' - - !============================================================================= -contains - !============================================================================= - - subroutine GetMaxTime(dtLocal, gdtMax, comm, ierror) - - implicit none - - real(r8), intent(IN) :: dtLocal - real(r8), intent(OUT) :: gdtMax - integer(i4), intent(IN) :: comm - integer(i4), intent(OUT) :: ierror - real(r8) :: local_temp -#ifdef _MPISERIAL - local_temp = dtlocal -#else - local_temp = max(dtlocal, MPI_Wtick()) -#endif - call MPI_Allreduce(Local_temp, gdtMax, 1,MPI_DOUBLE_PRECISION, MPI_MAX, comm, ierror) - - call CheckMPIReturn('Call to MPI_ALLREDUCE()',ierror,__FILE__,__LINE__) - - end subroutine GetMaxTime - - !============================================================================= - - subroutine WriteStats(CaseName, FileName, glen, trialNo, dtRead, dtWrite) - - implicit none - - character(len=*), intent(IN) :: CaseName - character(len=strlen), intent(IN) :: FileName - integer(i4), intent(in) :: glen - integer(i4), intent(IN) :: trialNo - real(r8), intent(IN) :: dtRead - real(r8), intent(IN) :: dtWrite - - character(len=*), parameter :: myname_=myname//'::WriteStats' - integer :: datumSize - - select case(CaseName) - case(TestR8CaseName) - datumSize = r8 - case(TestR4CaseName) - datumSize = r4 - case(TestI4CaseName) - datumSize = i4 - case(TestComboCaseName) - write(*,'(4a)') myname_,':: Case ',CaseName,' not supported. Returning without writing output.' - case default - write(*,'(4a)') myname_,':: Case ',CaseName,' not supported. Returning without writing output.' - end select - - print *,'-----------------------------------------' - print *,myname,':: Timings for ',CaseName,' Trial Number=',trialNo - print *,'Total Procs: ',nprocs,' IO Procs: ',num_iotasks, & - ' Aggregators: ',num_aggregator,' Stride: ',stride - print *,'Record bytes: ',INT(glen*datumSize,kind=i8), & - INT(glen*datumSize,kind=i8) - print *,'-----------------------------------------' - print *,'Type of I/O performed: ',Iofmtd - print *,' File name: ',FileName - print *,'-----------------------------------------' - print *, CaseName,' Trial No. ',trialNo,' Read time: ',dtRead, & - 'Read Mbytes/sec: ',MBYTES*glen*datumSize/dtRead - print *, CaseName,' Trial No. ',trialNo,' Write time: ',dtWrite, & - 'Write Mbytes/sec: ',MBYTES*glen*datumSize/dtWrite - print *,'-----------------------------------------' - - end subroutine WriteStats - - !============================================================================= - - subroutine WriteTimeTrialsStats(casename,TestName, FileName, glen, ReadTimes, WriteTimes, nTrials) - - implicit none - - character(len=*), intent(IN) :: casename - character(len=*), intent(IN) :: TestName - character(len=strlen), intent(IN) :: FileName - integer(i4), intent(IN) :: glen - real(r8), dimension(:), pointer :: ReadTimes - real(r8), dimension(:), pointer :: WriteTimes - integer(i4), intent(IN) :: nTrials - - character(len=*), parameter :: myname_=myname//'::WriteTimeTrialsStats' - - real(r8), parameter :: tiny = 1.e-10 - real(r8) :: ReadBWAvg, ReadBWStdDev, ReadBWStdErrMean, ReadBWMin, ReadBWMax - real(r8) :: WriteBWAvg, WriteBWStdDev, WriteBWStdErrMean, WriteBWMin, WriteBWMax - real(r8) :: WriteTimeAvg,ReadTimeAvg - real(r8) :: TotalMBytes - integer :: datumSize, i, nDOF - real(r8), dimension(:), pointer :: ReadBW, WriteBW - - if(nTrials .ne. size(ReadTimes)) then - write(*,'(a,2(a,i8))') myname_,':: ERROR--nTrials = ',nTrials,' size(ReadTimes)=',size(ReadTimes) - call piodie(__FILE__,__LINE__) - endif - - if(nTrials .ne. size(WriteTimes)) then - write(*,'(a,2(a,i8))') myname_,':: ERROR--nTrials = ',nTrials,' size(WriteTimes)=',size(WriteTimes) - call piodie(__FILE__,__LINE__) - endif - - select case(TestName) - case(TestR8CaseName) - datumSize = r8 - case(TestR4CaseName) - datumSize = r4 - case(TestI4CaseName) - datumSize = i4 - case(TestComboCaseName) - datumSize = 0 - case default - write(*,'(4a)') myname_,':: TestName ',TestName,' not supported. Returning without writing output.' - return - end select - - TotalMBytes = MBYTES * glen * datumSize - - write(*,*)'-----------------------------------------' - write(*,*) 'Timing Output :: case = ',trim(casename),' test = ',trim(TestName) - write(*,'(5a,g14.6)') & - ' I/O format = ',trim(Iofmtd),' File name = ',trim(FileName), & - ' Record Size Mbytes = ',TotalMBytes - write(*,*) ' Trials = ',nTrials,' Total Procs = ',nprocs - write(*,*) ' IO Procs=',num_iotasks,' Aggregators=',num_aggregator,& - ' Base=',base,' Stride=',stride - do i = 1,nTrials - if(writetimes(i)>0) then - write(*,101) 'n=',i,' write (Mb/sec)=',TotalMBytes/WriteTimes(i), & - ' write_time(sec)=',WriteTimes(i), & - trim(casename),trim(TestName) - else - write(*,101) 'n=',i,' write (Mb/sec)=',TotalMBytes, & - '? write_time(sec)=',WriteTimes(i), & - trim(casename),trim(TestName) - end if - enddo - if (nTrials > 1) write(*,*) ' -------------------' - do i = 1,nTrials - if(Readtimes(i)>0) then - write(*,101) 'n=',i,' read (Mb/sec)=',TotalMBytes/ReadTimes(i), & - ' read_time(sec)=',ReadTimes(i), & - trim(casename),trim(TestName) - else - write(*,101) 'n=',i,' read (Mb/sec)=',TotalMBytes, & - '? read_time(sec)=',ReadTimes(i), & - trim(casename),trim(TestName) - end if - enddo - -101 format(3x,a,i5,a,f9.1,a,e12.4,2x,a,2x,a) - - if (nTrials > 1) then - - call alloc_check(ReadBW, nTrials, myname_//':: ReadBW') - call alloc_check(WriteBW, nTrials, myname_//':: WriteBW') - - ! Compute mean read/write bandwidths - ReadBWAvg = 0. - ReadTimeAvg = 0. - WriteBWAvg = 0. - WriteTimeAvg = 0. - do i=1,nTrials - ReadBW(i) = TotalMBytes / (ReadTimes(i) + tiny) - WriteBW(i) = TotalMBytes / (WriteTimes(i) + tiny) - ReadBWAvg = ReadBWAvg + ReadBW(i) - ReadTimeAvg = ReadTimeAvg + 1.0e3*ReadTimes(i) - WriteBWAvg = WriteBWAvg + WriteBW(i) - WriteTimeAvg = WriteTimeAvg + 1.0e3*WriteTimes(i) - enddo - - ReadBWAvg = ReadBWAvg / float(nTrials) - ReadTimeAvg = ReadTimeAvg / float(nTrials) - WriteBWAvg = WriteBWAvg / float(nTrials) - WriteTimeAvg = WriteTimeAvg / float(nTrials) - - ! Compute Standard Deviation and Std Error of the Mean - ReadBWStdDev = 0. - WriteBWStdDev = 0. - do i=1,nTrials - ReadBWStdDev = ReadBWStdDev + (ReadBWAvg - ReadBW(i))**2 - WriteBWStdDev = WriteBWStdDev + (WriteBWAvg - WriteBW(i))**2 - enddo - - ! Compute std. deviation and std. error of the mean - nDOF = max(1,nTrials-1) ! sample number of degrees-of-freedom - ReadBWStdDev = sqrt( ReadBWStdDev / float(nDOF) ) - WriteBWStdDev = sqrt( WriteBWStdDev / float(nDOF) ) - ReadBWStdErrMean = ReadBWStdDev / sqrt( float(nTrials) ) - WriteBWStdErrMean = WriteBWStdDev / sqrt( float(nTrials) ) - - ! Determine minimum and maximum BW values - ReadBWMin = minval(ReadBW) - ReadBWMax = maxval(ReadBW) - WriteBWMin = minval(WriteBW) - WriteBWMax = maxval(WriteBW) - - write(*,*) ' -------------------' - write(*,*) ' Summary BW Stats (MB/sec) for ',nTrials,' trials of ',trim(casename),' ',trim(TestName) - write(*,102) 'write avg=',WriteBWAvg,' +/-',WriteBWStderrMean, & - ' min=',WriteBWMin,' max=',WriteBWMax,' stddev=',WriteBWStdDev, & - trim(casename),trim(TestName) - write(*,102) 'read avg=',ReadBWAvg ,' +/-',ReadBWStderrMean, & - ' min=',ReadBWMin ,' max=',ReadBWMax ,' stddev=',ReadBWStdDev, & - trim(casename),trim(TestName) - write(*,103) 'Write Time Avg (usec) =',WriteTimeAvg - write(*,103) 'Read Time Avg (usec) =',ReadTimeAvg - -102 format(3x,5(a,f9.1),1x,a,1x,a) -103 format(3x,a,f9.1) - - call dealloc_check(ReadBW, myname_//':: ReadBW') - call dealloc_check(WriteBW, myname_//':: WriteBW') - - endif ! (nTrials > 1) - write(*,*) '-----------------------------------------' - - end subroutine WriteTimeTrialsStats - - !======================================================================= -#ifdef TIMING - subroutine get_memusage(msize,mrss) - - integer :: msize,mrss - - integer :: mshare,mtext,mdatastack - integer :: ierr - integer :: GPTLget_memusage - - ierr = GPTLget_memusage (msize, mrss, mshare, mtext, mdatastack) - - end subroutine get_memusage -#endif - !============================================================================= - - subroutine c1dto3d(gindex,nx,ny,nz,i,j,k) - implicit none - integer(kind=pio_offset),intent(in) :: gindex - integer, intent(in) :: nx,ny,nz - integer,intent(out) :: i,j,k - - k = (gindex - 1) / (nx*ny) + 1 - j = (gindex - (k-1)*nx*ny - 1) / (nx) + 1 - i = (gindex - (k-1)*nx*ny - (j-1)*nx - 1) + 1 - - end subroutine c1dto3d - - !============================================================================= - subroutine check_pioerr(ierr, file, line, str1, str2) - - implicit none - integer(i4),intent(in) :: ierr - character(len=*),intent(in) :: file - integer(i4),intent(in) :: line - character(len=*),optional,intent(in) :: str1 - character(len=*),optional,intent(in) :: str2 - - character(len=256) lstr1 - character(len=256) lstr2 - character(len=*),parameter :: myname_='check_pioerr' - - lstr1 = '' - if (present(str1)) then - lstr1 = trim(str1) - endif - lstr2 = trim(lstr1) - if (present(str2)) then - lstr2 = trim(str2) - endif - - if(ierr /= PIO_noerr) then - write(*,*) trim(myname_),':: ERROR on my_task=',my_task,' ierr=',ierr,' ',trim(lstr1) - call piodie(file,line,trim(lstr2)) - endif - - end subroutine check_pioerr - !============================================================================= - -end program testpio - diff --git a/src/externals/pio1/tests/testpio/testpio_bench.pl b/src/externals/pio1/tests/testpio/testpio_bench.pl deleted file mode 100755 index a47eada67f0..00000000000 --- a/src/externals/pio1/tests/testpio/testpio_bench.pl +++ /dev/null @@ -1,823 +0,0 @@ -#!/usr/bin/perl -use strict; -use warnings; -use Cwd; -use Getopt::Long; -use File::Copy; - -my $preambleResource; -my $projectInfo; -my $project; -my $nodecount; -my $suites; -my $retry=0; -my $help=0; -my $host; -my $pecount = 0; -my $bname; -my $partdir; -my $iofmt; -my $rearr; -my $numIO; -my $stride; -my $maxiter; -my $dir; -my $debug=0; -my $numagg; -my $numvars; -my $iodecomp; -#my $logfile = 'testpio.out'; - -my $logfile = ''; -my $logfile_date_suffix = ''; # Optional suffix to logfile date (e.g., a,b,c,etc.) -my $logfile_name_comment = ''; -my $logfile_suffix; -my $logfile_name_user = ''; # Overrides automated logfile construction -my $enablenetcdf4; - -my $root = ''; -my $found = 0; -my $outfile = ''; - -my $date = ''; -my $cal_date = ''; # 2-digit year, month, day - -my $use_mpich_env = 1; # Set to one for MPICH diagnostic environment settings -my $use_cray_env = 1; # Set to one for use of Cray MPICH extensions -my $use_ibm_env = 1; # Set to one for use of IBM PE extensions - -my $set_mpi_values = 0; # Set to one if standard MPI settings are present -my $mpi_cb_buffer_size = ''; - -my $set_romio_values = 0; # Set to one if MPICH ROMIO settings are present -my $romio_cb_write = ''; # Use automatic, enable, or disable -my $romio_cb_read = ''; # Use automatic, enable, or disable -my $romio_direct_io = ''; # Use automatic, enable, or disable - -my $set_ibm_io_values = 0; # Set to one if IBM PE IO settings are present -my $ibm_io_buffer_size = ''; -my $ibm_io_largeblock_io = ''; # Set to "true" or "false" -my $ibm_io_sparse_access = ''; # Set to "true" or "false" - -my $set_lustre_values = 0; # Set to one if Lustre settings are present -my $lfs_stripe_cmd = 'lfs setstripe'; -my $lfs_ost_count = -1; -my $lfs_stripe_size = ''; - -my $argc = $#ARGV + 2; - -my $result = GetOptions("suites=s@"=>\$suites, - "retry"=>\$retry, - "host=s"=>\$host, - "pecount=i"=>\$pecount, - "bench=s"=>\$bname, - "iofmt=s"=>\$iofmt, - "rearr=s"=>\$rearr, - "numIO|numIOtasks=i"=>\$numIO, - "stride=i"=>\$stride, - "maxiter=i"=>\$maxiter, - "dir=s"=>\$dir, - "partdir=s"=>\$partdir, - "numagg=i"=>\$numagg, - "numvars=i"=>\$numvars, - "decomp=s"=>\$iodecomp, - "debug"=>\$debug, - "log=s"=>\$logfile, - "logfile-date-suffix=s"=>\$logfile_date_suffix, - "logfile-name-comment=s"=>\$logfile_name_comment, - "mpi-env-mpich"=>\$use_mpich_env, - "mpi-env-cray"=>\$use_cray_env, - "mpi-env-ibm"=>\$use_ibm_env, - "mpi-cb-buffer-size=s"=>\$mpi_cb_buffer_size, - "romio-cb-write=s"=>\$romio_cb_write, - "romio-cb-read=s"=>\$romio_cb_read, - "romio-direct-io"=>\$romio_direct_io, - "ibm-io-buffer-size=s"=>\$ibm_io_buffer_size, - "ibm-io-largeblock-io=s"=>\$ibm_io_largeblock_io, - "ibm-io-sparse-access=s"=>\$ibm_io_sparse_access, - "lfs-ost-count=i"=>\$lfs_ost_count, - "lfs-stripe-size=s"=>\$lfs_stripe_size, - "logfile-suffix=s"=>\$logfile_suffix, - "help"=>\$help); -usage() if($help || ($argc < 2)); - -sub usage{ - print "--suites : Test only the listed suites (all, snet, pnet, mpiio, ant, bench)\n"; - print "--retry : Do not repeat tests that have already passed\n"; - print "--host : Force a hostname for testing\n"; - print "--pecount : Select the processor count on which to run benchmark (defined in config_bench.xml) \n"; - print "--bench : Select the name of the benchmark to run (defined in config_bench.xml)\n"; - print "--iofmt : Selects the type of file to write (pnc,snc,bin)\n"; - print "--rearr : Selects the type of rearrangement (box,mct,none)\n"; - print "--numIOtasks (--numIO) : Sets the number of IO tasks used by PIO\n"; - print "--stride : Sets the stride between IO tasks, Note this is ignored on Blue Gene\n"; - print "--mpi-env-mpich : Adds MPICH environment settings\n"; - print "--mpi-env-cray : Adds Cray MPI environment settings\n"; - print "--mpi-env-ibm : Adds IBM (PE) MPI environment settings\n"; - print "--mpi-cb-buffer-size=N : Set PIO hint for cb_buffer_size (in bytes)\n"; - print "--romio-cb-write=str : romio_cb_write hint setting (\"automatic\",\n"; - print " \"enable\", or \"disable\") -- default is\n"; - print " automatic\n"; - print "--romio-cb-read=str : romio_cb_write hint setting (\"automatic\",\n"; - print " \"enable\", or \"disable\") -- default is\n"; - print " automatic\n"; - print "--romio-direct-io=str : romio_direct_io hint setting (\"automatic\",\n"; - print " \"enable\", or \"disable\") -- default is\n"; - print " automatic\n"; - print "--ibm-io-buffer-size=N : Sets the IBM (PE) IO buffer size --\n"; - print " give number of bytes or append \"k\" or \"m\"\n"; - print " to denote kilobytes or megabytes, respectively\n"; - print "--ibm-io-largeblock-io=str\n"; - print " : Set to \"true\" or \"false\"\n"; - print "--ibm-io-sparse-access=str\n"; - print " : Set to \"true\" or \"false\"\n"; - print "--lfs-ost-count=N : Sets the number of OSTs used for striping\n"; - print " on a Lustre filesystem\n"; - print "--lfs-stripe-size=N : Sets the size of the stripe used in the\n"; - print " Lustre file system -- stripe size must include\n"; - print " units (e.g., \"128k\", \"2m\", \"4g\")\n"; - print "--maxiter : Sets the number of files to write\n"; - print "--dir : Sets the subdirectory for which to write files \n"; - print "--numagg : Sets the number of MPI-IO aggregators to use \n"; - print "--numvars : Sets the number of variables to write to each file \n"; - print "--decomp : Sets the form of the IO-decomposition (x,y,z,xy,xye,xz,xze,yz,yze,xyz,xyze,setblk,cont1d,cont1dm)\n"; - print "--help : Print this message\n"; - print "--debug : Generate the runscript but do not submit it\n"; - print "--log : Manually sets the log output file for the benchmark\n"; - print "--logfile-date-suffix=str\n"; - print " : Suffix (e.g., a,b,c,etc.) added to date\n"; - print " in logfile\n"; - print "--logfile-name-comment=str\n"; - print " : Comment string included in logfile name\n"; - print "--logfile-suffix=str : Sets the final log file suffix\n"; - print "--partdir : Sets the input data directory for the MPAS partitioning files\n"; - exit; -} - -## Get a string to describe the date - -$date = `date +%y%m%d-%H%M%S`; -$cal_date = `date +%y%m%d`; - -chomp $date; -chomp $cal_date; - -## See if standard MPI options are requested - -if ($mpi_cb_buffer_size ne '') { - $set_mpi_values = 1; # True -} - - -## See if MPICH ROMIO options are requested - -if (($romio_cb_write ne '') || ($romio_cb_read ne '') - || ($romio_direct_io ne '')) { - - if (($romio_cb_write ne '') && ($romio_cb_write ne 'automatic') - && ($romio_cb_write ne 'enable') && ($romio_cb_write ne 'disable')) { - print "\nError: Invalid romio-cb-write entry\n\n"; - exit(-1); - } - - if (($romio_cb_read ne '') && ($romio_cb_read ne 'automatic') - && ($romio_cb_read ne 'enable') && ($romio_cb_read ne 'disable')) { - print "\nError: Invalid romio-cb-read entry\n\n"; - exit(-1); - } - - if (($romio_direct_io ne '') && ($romio_direct_io ne 'automatic') - && ($romio_direct_io ne 'enable') && ($romio_direct_io ne 'disable')) { - print "\nError: Invalid romio-direct-io entry\n\n"; - exit(-1); - } - - $set_romio_values = 1; # True -} - -## See if IBM PE IO options are requested - -if (($ibm_io_buffer_size ne '') || ($ibm_io_largeblock_io ne '') - || ($ibm_io_sparse_access ne '')) { - - if (($ibm_io_largeblock_io ne '') && ($ibm_io_largeblock_io ne 'true') - && ($ibm_io_largeblock_io ne 'false')) { - print "\nError: Invalid ibm-io-largeblock-io entry\n\n"; - exit(-1); - } - - if (($ibm_io_sparse_access ne '') && ($ibm_io_sparse_access ne 'true') - && ($ibm_io_sparse_access ne 'false')) { - print "\nError: Invalid ibm-io-sparse-access entry\n\n"; - exit(-1); - } - - $set_ibm_io_values = 1; # True -} - -## See if Lustre settings are requested - -if ($lfs_ost_count > 0) { - $set_lustre_values = 1; # True - - $lfs_stripe_cmd .= " -c " . $lfs_ost_count; -} - -if ($lfs_stripe_size ne "") { - $set_lustre_values = 1; # True - - $lfs_stripe_cmd .= " -s " . $lfs_stripe_size; -} - - -## Append an underscore to an existing logfile name comment - -if ($logfile_name_comment ne '') { - $logfile_name_comment .= '_'; -} - -my $cfgdir = `pwd`; -chomp $cfgdir; -my $clean = 'yes'; -my @valid_env = qw(NETCDF_PATH PNETCDF_PATH MPI_LIB MPI_INC F90 FC CC ALLCFLAGS FFLAGS - MPICC MPIF90 LDLIBS); - -my @testsuites = qw(bench); - -# The XML::Lite module is required to parse the XML configuration files. -(-f "$cfgdir/../testpio/perl5lib/XML/Lite.pm") or die <<"EOF"; -** Cannot find perl module \"XML/Lite.pm\" in directory \"$cfgdir/../testpio/perl5lib\" ** -EOF - -unshift @INC, "$cfgdir/../testpio","$cfgdir/../testpio/perl5lib"; -require XML::Lite; -require Utils; - -$host = Utils->host() unless(defined $host); -Utils->loadmodules("$host"); - -my $xml = XML::Lite->new( "$cfgdir/../testpio/build_defaults.xml" ); - -$root = $xml->root_element(); -my $settings = $xml->elements_by_name($host); -my %attributes = $settings->get_attributes; - - -foreach(keys %attributes){ - if(/ADDENV_(.*)/){ -# print F "\$ENV{$1}=\"$attributes{$_}:\$ENV{$1}\"\;\n"; - print "\$ENV{$1}=\"$attributes{$_}:\$ENV{$1}\"\;\n"; - }elsif(/ENV_(.*)/){ - print "set $1 $attributes{$_}\n"; -# print F "\$ENV{$1}=\"$attributes{$_}\"\;\n"; - print "\$ENV{$1}=\"$attributes{$_}\"\;\n"; - }elsif(/NETCDF_PATH/){ - if($attributes{NETCDF_PATH} =~ /netcdf-4/){ - $enablenetcdf4="--enable-netcdf4"; - } - } -} - -if(defined $suites){ - @testsuites = @$suites; -}elsif(defined $attributes{testsuites}){ - @testsuites = split(' ',$attributes{testsuites}); -} - - - -my $workdir = $attributes{workdir}; - -$workdir =~ s/\${(.*)}/$ENV{$1}/; - -if(-d $workdir){ - print "Using existing directory $workdir\n"; -}else{ - print "Creating directory $workdir\n"; - mkdir $workdir or die "Could not create directory" -} - -my $config = XML::Lite->new("$cfgdir/../testpio/config_bench.xml"); -my $elm = $config->root_element(); -print "pecount is $pecount\n"; - -my $ldx=0; -my $ldy=0; -my $ldz=0; -my $partfile; -my $nx_global=0; -my $ny_global=0; -my $nz_global=0; - -my %configuration = ( ldx => 0, - ldy => 0, - ldz => 0, - partfile => 'null', - partdir => 'foo', - iofmt => 'pnc', - rearr => 'box', - numprocsIO => 10, - stride => -1, - maxiter => 10, - dir => './none/', - iodecomp => 'yze', - numagg => -1, - numvars => 10, - set_mpi_values => 0, - mpi_cb_buffer_size => '', - set_romio_values => 0, - romio_cb_write => '', - romio_cb_read => '', - romio_direct_io => '', - set_ibm_io_values => 0, - ibm_io_buffer_size => '', - ibm_io_largeblock_io => '', - logfile_suffix => 'testpio.out', - ibm_io_sparse_access => ''); - -#------------------------------------------------- -# Modify the configuration based on arguments -#------------------------------------------------- -if (defined $iofmt) {$configuration{'iofmt'} = $iofmt;} -if (defined $rearr) {$configuration{'rearr'} = $rearr;} -if (defined $numIO) {$configuration{'numprocsIO'} = $numIO;} -if (defined $stride) {$configuration{'stride'} = $stride;} -if (defined $maxiter) {$configuration{'maxiter'} = $maxiter;} -if (defined $numvars) {$configuration{'numvars'} = $numvars;} -if (defined $dir) {$configuration{'dir'} = $dir;} -if (defined $partdir) {$configuration{'partdir'} = $partdir;} -if (defined $numagg) {$configuration{'numagg'} = $numagg;} -if (defined $logfile_suffix) {$configuration{'logfile_suffix'}=$logfile_suffix;} - - -if (defined $set_mpi_values) { - $configuration{'set_mpi_values'} = $set_mpi_values; -} - -if (defined $mpi_cb_buffer_size) { - $configuration{'mpi_cb_buffer_size'} = $mpi_cb_buffer_size; -} - -if (defined $set_romio_values) { - $configuration{'set_romio_values'} = $set_romio_values; -} - -if (defined $romio_cb_write) { - $configuration{'romio_cb_write'} = $romio_cb_write; -} - -if (defined $romio_cb_read) { - $configuration{'romio_cb_read'} = $romio_cb_read; -} - -if (defined $romio_direct_io) { - $configuration{'romio_direct_io'} = $romio_direct_io; -} - -if (defined $set_ibm_io_values) { - $configuration{'set_ibm_io_values'} = $set_ibm_io_values; -} - -if (defined $ibm_io_buffer_size) { - $configuration{'ibm_io_buffer_size'} = $ibm_io_buffer_size; -} - -if (defined $ibm_io_largeblock_io) { - $configuration{'ibm_io_largeblock_io'} = $ibm_io_largeblock_io; -} - -if (defined $ibm_io_sparse_access) { - $configuration{'ibm_io_sparse_access'} = $ibm_io_sparse_access; -} - -# See if you can find the general benchmark description first -my @blist = $config->elements_by_name("BenchConfig"); -my $bchildren = $elm->get_children(); - -$found=0; - -foreach my $child (@blist) { - my %atts = $child->get_attributes; - my $bn = $atts{"bench_name"}; - if ($bn =~ $bname) { - my $nx_global = $atts{"nx_global"}; $configuration{'nx_global'} = $nx_global; - my $ny_global = $atts{"ny_global"}; $configuration{'ny_global'} = $ny_global; - my $nz_global = $atts{"nz_global"}; $configuration{'nz_global'} = $nz_global; - $found = 1; - } -} -if(!$found) { - printf "Could not find configuration for benchmark: %s\n" ,$bname; - exit(-1); -} else { - print "nx_global: $configuration{'nx_global'} ny_global: $configuration{'ny_global'} nz_global: $configuration{'nz_global'}\n"; -} - -$root = "CompConfig"; -my @list = $config->elements_by_name($root); -my $children = $elm->get_children(); - -$found=0; - -foreach my $child (@list ) { - my %atts = $child->get_attributes; - my $name = $child->get_name(); - my @keys = keys(%atts); - my $np = $atts{"nprocs"}; - my $bn = $atts{"bench_name"}; -# printf "bench_name is $bn\n"; -# printf "bname is $bname\n"; - if(($np eq $pecount) & ($bn =~ $bname)) { - my @gchildren = $child->get_children(); - foreach my $grandchild (@gchildren) { - my $name = $grandchild->get_name(); - my $value = $grandchild->get_text(); - $configuration{$name}=$value; - } - $found = 1; - } -} -#my $suffix = $bname . "-" . $pecount; -my $suffix = $bname . "_PE-" . $pecount . "_IO-" . $iofmt . "-" . $numIO; - -## Add standard MPI values to the suffix - -if ($set_mpi_values != 0) { - $suffix .= "_MPI"; - - if ($mpi_cb_buffer_size ne '') { $suffix .= '-b' . $mpi_cb_buffer_size; } -} - -## Add MPICH/ROMIO values to the suffix - -if ($set_romio_values != 0) { - $suffix .= "_ROMIO"; - - if ($romio_cb_write ne "") { - $suffix .= "-w"; - - if ($romio_cb_write eq "automatic") { $suffix .= "A"; } - elsif ($romio_cb_write eq "enable") { $suffix .= "E"; } - elsif ($romio_cb_write eq "disable") { $suffix .= "D"; } - } - - if ($romio_cb_read ne "") { - $suffix .= "-r"; - - if ($romio_cb_read eq "automatic") { $suffix .= "A"; } - elsif ($romio_cb_read eq "enable") { $suffix .= "E"; } - elsif ($romio_cb_read eq "disable") { $suffix .= "D"; } - } - - if ($romio_direct_io ne "") { - $suffix .= "-d"; - - if ($romio_direct_io eq "automatic") { $suffix .= "A"; } - elsif ($romio_direct_io eq "enable") { $suffix .= "E"; } - elsif ($romio_direct_io eq "disable") { $suffix .= "D"; } - } -} - - -## Add IBM PE IO values to the suffix - -if ($set_ibm_io_values != 0) { - $suffix .= "_IBM"; - - if ($ibm_io_buffer_size ne '') { $suffix .= "-b" . $ibm_io_buffer_size; } - - if ($ibm_io_largeblock_io ne '') { - $suffix .= "-l"; - - if ($ibm_io_largeblock_io eq "true") { $suffix .= "T"; } - elsif ($ibm_io_largeblock_io eq "false") { $suffix .= "F"; } - } - - if ($ibm_io_sparse_access ne '') { - $suffix .= "-s"; - - if ($ibm_io_sparse_access eq "true") { $suffix .= "T"; } - elsif ($ibm_io_sparse_access eq "false") { $suffix .= "F"; } - } - -} - -## Add Lustre values to the suffix - -if ($set_lustre_values != 0) { - $suffix .= "_OST"; - - if ($lfs_ost_count != 0) { - $suffix .= "-c" . $lfs_ost_count; - } - - if ($lfs_stripe_size ne "") { - $suffix .= "-s". $lfs_stripe_size; - } -} - -## Build the logfile name - -if ($logfile_name_user ne '') { - $logfile = $logfile_name_user; -} else { -# $logfile = $cal_date . $logfile_date_suffix . "_" . $host . "_" - $logfile = $host . "_" . $logfile_name_comment . $suffix . "_" . $logfile_suffix; -} - -my $testname = "bench." . $date . "." . $suffix; - -printf "testname: %s\n",$testname; - -if (defined $iodecomp) {$configuration{'iodecomp'} = $iodecomp;} - -#print "ldx: $configuration{'ldx'} ldy: $ldy ldz: $ldz\n"; - -if ($found) { - print "ldx: $configuration{'ldx'} ldy: $configuration{'ldy'} ldz: $configuration{'ldz'}\n"; - $outfile = "$cfgdir/testpio_in." . $suffix; - unlink("$outfile") if(-e "$outfile"); - - open(F,"+> $outfile"); - gen_io_nml(); # Generate the io_nml namelist - gen_compdof_nml(); # Generate the compdof_nml namelist - if(defined $iodecomp) { - gen_iodof_nml(); # Generate the iodof_nml namelist - } - gen_prof_inparm(); # Generate the prof_inparm namelist - - close(F); -} else { - printf "Could not find configuration for benchmark: %s on %d MPI tasks \n" ,$bname, $pecount; - exit(-1); -} - -my $corespernode = $attributes{corespernode}; - -my $srcdir = "$workdir/src"; -my $tstdir = "$srcdir/testpio"; -copy("$outfile","$tstdir"); -my $testpiodir = cwd(); -my $piodir = "$testpiodir/.."; -# my $date = `date +%y%m%d-%H%M%S`; -my $user = $ENV{USER}; -chomp $date; - -$outfile = "$testpiodir/testpio.out.$date"; -my $script = "$testpiodir/testpio.sub.$date"; - -open(F,">$script"); -print F "#!/usr/bin/perl\n"; -$preambleResource = Utils->preambleResource("$host","$pecount","$corespernode"); -print F $preambleResource; -print F "$attributes{preamble}\n"; - -# Create a valid project string for this user -$projectInfo = Utils->projectInfo("$host","$user"); -print F $projectInfo; - -my @env; -foreach(keys %attributes){ -# if($attributes{$_} =~ /\$\{?(\w+)\}?/){ -# my $envvar = $ENV{$1}; -# $attributes{$_}=~ s/\$\{?$1\}?/$envvar/ -# } - if(/ADDENV_(.*)/){ - print F "\$ENV{$1}=\"$attributes{$_}:\$ENV{$1}\"\;\n"; - }elsif(/ENV_(.*)/){ - print "set $1 $attributes{$_}\n"; - print F "\$ENV{$1}=\"$attributes{$_}\"\;\n"; - } - -} - - -my $run = $attributes{run}; -my $exename = "./testpio"; - -#my $foo= Utils->runString($host,$pecount,$run,$exename,$log); -#print "EXEC command: ($foo)\n"; - -print F << "EOF"; -use strict; -use lib "$cfgdir"; -use File::Copy; -use File::Path; -use POSIX qw(ceil); -#unshift \@INC, "$cfgdir/../testpio"; - -#chmod 0755,"$cfgdir/../testpio/Utils.pm"; -use Utils; - -chdir ("$cfgdir"); - - - -mkdir "$srcdir" if(! -d "$srcdir"); - -my \$rc = 0xffff & system("rsync -rp $piodir $srcdir"); -if(\$rc != 0) { - print "rsync failed with \$rc, copying files\n"; - system("cp -fr $piodir/pio $srcdir"); - system("cp -fr $piodir/mct $srcdir"); - system("cp -fr $piodir/timing $srcdir"); - system("cp -fr $piodir/testpio $srcdir"); -} - -my \$confopts = {bench=>"--enable-pnetcdf --enable-mpiio --enable-netcdf --enable-timing $enablenetcdf4"}; -#my \$confopts = {bench=>""}; - -my \$testlist = {bench=>["generated"]}; - -unlink("$workdir/wr01.dof.txt") if(-e "$workdir/wr01.dof.txt"); -my \$suite; -my \$passcnt=0; -my \$failcnt=0; -my \$host = "$host"; -my \$pecount = $pecount; -my \$run = "$attributes{run}"; - -if ($use_mpich_env != 0) { - \$ENV{'MPICH_ENV_DISPLAY'} = '1'; # this displays all the MPICH environment variables -} - -if ($use_cray_env != 0) { - \$ENV{'MPICH_MPIIO_XSTATS'} = '1'; # this outputs MPI-IO statistics - \$ENV{'MPICH_MPIIO_HINTS_DISPLAY'} = '1'; # Displays hints for each file - \$ENV{'MPICH_MPIIO_CB_ALIGN'} = '2'; # Do not allign to lustre stripes -} - -if ($use_ibm_env != 0) { - if ('$ibm_io_buffer_size' ne '') { - \$ENV{'MP_IO_BUFFER_SIZE'} = "$ibm_io_buffer_size"; - } -} - -foreach \$suite (qw(@testsuites)){ - my \$confopts = \$confopts->{\$suite}; -# my \@testlist = \@{\$testlist->{\$suite}}; - my \@testlist = \"$suffix"; -# unlink("../pio/Makefile.conf"); -# copy("testpio_in","$tstdir"); # copy the namelist file into test directory - - chdir ("$tstdir"); - my \$test; - my \$run = "$attributes{run}"; - unless(-e "$tstdir/testpio"){ - system("perl ./testpio_build.pl --conopts=\\"\$confopts\\" --host=$host"); - } - if(-e "../pio/Makefile.conf" && -e "testpio"){ - foreach \$test (\@testlist){ - my \$casedir = "$workdir/\$suite.$date.\$test"; - mkdir \$casedir unless(-d \$casedir); - chdir(\$casedir) or die "Could not cd to \$casedir"; - print "\$suite \$test "; - if($retry && -e "TestStatus"){ - open(T,"TestStatus"); - my \$result = ; - close(T); - if(\$result =~ /PASS/){ - \$passcnt++; - print "Test already PASSED\\n"; - next; - } - } - - unlink("testpio") if(-e "testpio"); - - copy("$tstdir/testpio","testpio"); - - - chmod 0755,"testpio"; -# symlink("$tstdir/namelists/testpio_in.\$test","testpio_in"); -# symlink("$tstdir/testpio_in.\$test","testpio_in"); - symlink("$tstdir/testpio_in.\$test","testpio_in"); - rmtree "none" unless(! -d "none"); - mkdir "none" unless(-d "none"); - - if ($set_lustre_values != 0) { - system("$lfs_stripe_cmd" . " " . "none"); - } - - my \$exename = "./testpio"; - my \$log = "\$casedir/$logfile"; - unlink("\$log") if(-e "\$log"); - - my \$sysstr = Utils->runString(\$host,\$pecount,\$run,\$exename,\$log); - print "Running \$sysstr\\n"; - system(\$sysstr); - open(LOG,\$log); - my \@logout = ; - close(LOG); - - my \$cnt = grep /testpio completed successfully/ , \@logout; - open(T,">TestStatus"); - if(\$cnt>0){ - \$passcnt++; - print "PASS \\n"; - print T "PASS \\n"; - }else{ - \$failcnt++; - print "FAIL \\n"; - print T "FAIL \\n"; - } - close(T); - } - }else{ - print "suite \$suite FAILED to configure or build\\n"; - } -} -print "test complete on $host \$passcnt tests PASS, \$failcnt tests FAIL\\n"; -EOF -close(F); -chmod 0755, $script; -my $subsys = Utils->submitString($host,$pecount,$corespernode,$attributes{submit},$script); - -if ($debug) { - print "Submission command: ($subsys)\n"; -} else { - print "submit: ($subsys)\n"; -} - -if($debug) { - print "Created script ($script)\n"; -} else { -# exec("$subsys"); - my @foo2 = `$subsys`; - my $jobid; - foreach my $i (@foo2) { - ($jobid) = ($i =~/([0-9]+)/); -# print "jobid: ($jobid)\n"; - } -# exec("cqwait $jobid"); -} - - -sub gen_compdof_nml{ - print F "&compdof_nml\n"; - print F "grddecomp = 'setblk'\n"; - print F "gdx = $configuration{'ldx'}\n"; - print F "gdy = $configuration{'ldy'}\n"; - print F "gdz = $configuration{'ldz'}\n"; - print F "/\n"; -} -sub gen_iodof_nml { - print F "&iodof_nml\n"; - print F "grddecomp = '$configuration{'iodecomp'}'\n"; - print F "/\n"; -} -sub gen_prof_inparm { - print F "&prof_inparm\n"; - print F "profile_disable = .false.\n"; - print F "profile_barrier = .true.\n"; - print F "profile_single_file = .false.\n"; - print F "profile_depth_limit = 10\n"; - print F "profile_detail_limit = 0\n"; - print F "/\n"; -} -sub gen_io_nml { - print F "&io_nml\n"; - print F "casename = '$suffix'\n"; - print F "nx_global = $configuration{'nx_global'}\n"; - print F "ny_global = $configuration{'ny_global'}\n"; - print F "nz_global = $configuration{'nz_global'}\n"; - print F "nvars = $configuration{'numvars'}\n"; - print F "iofmt = '$configuration{'iofmt'}'\n"; - print F "rearr = '$configuration{'rearr'}'\n"; - print F "nprocsIO = $configuration{'numprocsIO'}\n"; - print F "stride = $configuration{'stride'}\n"; - print F "maxiter = $configuration{'maxiter'}\n"; - print F "dir = '$configuration{'dir'}'\n"; - print F "part_input = '$configuration{'partdir'}/$configuration{'partfile'}'\n"; - print F "num_aggregator = $configuration{'numagg'}\n"; - - print F "set_mpi_values = $configuration{'set_mpi_values'}\n"; - - if ($configuration{'set_mpi_values'} != 0) { - print F "mpi_cb_buffer_size = '$configuration{'mpi_cb_buffer_size'}'\n"; - } - - print F "set_romio_values = $configuration{'set_romio_values'}\n"; - - if ($configuration{'set_romio_values'} != 0) { - print F "romio_cb_write = '$configuration{'romio_cb_write'}'\n"; - print F "romio_cb_read = '$configuration{'romio_cb_read'}'\n"; - print F "romio_direct_io = '$configuration{'romio_direct_io'}'\n"; - } - - print F "set_ibm_io_values = $configuration{'set_ibm_io_values'}\n"; - - if ($configuration{'set_ibm_io_values'} != 0) { - print F "ibm_io_buffer_size = '$configuration{'ibm_io_buffer_size'}'\n"; - print F "ibm_io_largeblock_io = '$configuration{'ibm_io_largeblock_io'}'\n"; - print F "ibm_io_sparse_access = '$configuration{'ibm_io_sparse_access'}'\n"; - } - - print F "DebugLevel = 0\n"; - print F "compdof_input = 'namelist'\n"; - if(defined $iodecomp) { - print F "iodof_input = 'namelist'\n"; - } - print F "compdof_output = 'none'\n"; - print F "/\n"; -} diff --git a/src/externals/pio1/tests/testpio/testpio_build.pl b/src/externals/pio1/tests/testpio/testpio_build.pl deleted file mode 100644 index 720e92df12f..00000000000 --- a/src/externals/pio1/tests/testpio/testpio_build.pl +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/perl -use strict; -use Getopt::Long; - -my $host; -my @conopts; -my $result = GetOptions("host=s"=>\$host,"conopts=s@"=>\@conopts); - - -my $cfgdir = `pwd`; -chomp $cfgdir; -my $clean = 'yes'; -my @valid_env = qw(NETCDF_PATH PNETCDF_PATH MPI_LIB MPI_INC FC CC CFLAGS FFLAGS - MACHDEFS MPICC MPIFC LDLIBS CPPDEFS MEMMON_PATH CXX MPICXX); - - -# The XML::Lite module is required to parse the XML configuration files. -(-f "$cfgdir/perl5lib/XML/Lite.pm") or die <<"EOF"; -** Cannot find perl module \"XML/Lite.pm\" in directory \"$cfgdir/perl5lib\" ** -EOF - -unshift @INC, "$cfgdir/perl5lib"; -require XML::Lite; -require Utils; - -my $xml = XML::Lite->new( "build_defaults.xml" ); - -$host = Utils->host() unless(defined $host); - -print "host=$host\n"; -my $root = $xml->root_element(); -my $settings = $xml->elements_by_name($host); -my %attributes = $settings->get_attributes; -my @env; - -foreach(@valid_env){ - push(@env,"$_=\"$attributes{$_}\"") if(defined($attributes{$_})); -} - -foreach(keys %attributes){ - if($attributes{$_} =~ /\$\{?(\w+)\}?/){ - my $envvar = $ENV{$1}; - $attributes{$_}=~ s/\$\{?$1\}?/$envvar/ - } - if(/ADDENV_(.*)/){ - $ENV{$1}="$attributes{$_}:$ENV{$1}"; - }elsif(/ENV_(.*)/){ - print "set $1 $attributes{$_}\n"; - $ENV{$1}="$attributes{$_}"; - } - -} - - -my $conopts = "@conopts $attributes{conopts}" if(defined($attributes{conopts})); - -chdir('../pio'); - -my $syscmd = "./configure $conopts @env "; - -print "Building for $host using $syscmd\n"; - -system($syscmd); - -chdir('../timing'); -my $dir; -foreach $dir (qw(timing pio testpio)){ - chdir("$cfgdir/../$dir") or die "Cannot cd to $cfgdir/../$dir: $!\n";; - print "Building in $dir\n"; - system('gmake clean') if($clean eq 'yes'); - system('gmake'); -} - diff --git a/src/externals/pio1/tests/testpio/testpio_run.pl b/src/externals/pio1/tests/testpio/testpio_run.pl deleted file mode 100755 index bafd6b2d30c..00000000000 --- a/src/externals/pio1/tests/testpio/testpio_run.pl +++ /dev/null @@ -1,365 +0,0 @@ -#!/usr/bin/perl -use strict; -use Cwd; -use Getopt::Long; - -my $twopass=0; -my $preambleResource; -my $projectInfo; -my $suites; -my $retry=0; -my $help=0; -my $host; -my $debug=0; -my $pecount=64; -my $enablenetcdf4; -my $result = GetOptions("suites=s@"=>\$suites,"retry"=>\$retry,"host=s"=>\$host,"pecount=i"=>\$pecount,"help"=>\$help, - "twopass"=>\$twopass,"debug"=>\$debug); - -usage() if($help); -sub usage{ - print "--debug : Generate the runscript but do not submit it\n"; - print "--help : Print this message\n"; - print "--host : Force a hostname for testing\n"; - print "--pecount : Select the processor count on which to run tests\n"; - print "--retry : Do not repeat tests that have already passed\n"; - print "--suites : Test only the listed suites (all, snet, pnet, mpiio, ant, vdc)\n"; - print "--twopass : Run in two passes - first builds on login node, second submits (required on some systems)\n"; - exit; -} - - - - -my $cfgdir = `pwd`; -chomp $cfgdir; -my $clean = 'yes'; -my @valid_env = qw(NETCDF_PATH PNETCDF_PATH MPI_LIB MPI_INC F90 FC CC FFLAGS - MPICC MPIF90 LDLIBS MACHDEFS); - - -my @testsuites = qw(all snet pnet mpiio ant ); - - - -# The XML::Lite module is required to parse the XML configuration files. -(-f "$cfgdir/../testpio/perl5lib/XML/Lite.pm") or die <<"EOF"; -** Cannot find perl module \"XML/Lite.pm\" in directory \"$cfgdir/../testpio/perl5lib\" ** -EOF - -unshift @INC, "$cfgdir/../testpio/perl5lib"; -require XML::Lite; -require Utils; - -$host = Utils->host() unless(defined $host); -print "host = $host\n"; -Utils->loadmodules("$host"); -print "host = $host\n"; - -#if($host eq "jaguar"){ -# print "Using twopass run method\n"; -# $twopass = 1; -#} - - -my $xml = XML::Lite->new( "build_defaults.xml" ); - -my $root = $xml->root_element(); -my $settings = $xml->elements_by_name($host); -my %attributes = $settings->get_attributes; - - -foreach(keys %attributes){ - if($attributes{$_} =~ /\$\{?(\w+)\}?/){ - my $envvar = $ENV{$1}; - $attributes{$_}=~ s/\$\{?$1\}?/$envvar/ - } -# if(/ADDENV_(.*)/){ -# print F "\$ENV{$1}=\"$attributes{$_}:\$ENV{$1}\n\""; -# }elsif(/ENV_(.*)/){ -# print "set $1 $attributes{$_}\n"; -# print F "\$ENV{$1}=\"$attributes{$_}\n\""; -# } - -} - -if(defined $suites){ - @testsuites = @$suites; -}elsif(defined $attributes{testsuites}){ - @testsuites = split(' ',$attributes{testsuites}); -} - - -my $workdir = $attributes{workdir}; - -print "preamble: $attributes{preamble}\n"; -my $corespernode = $attributes{corespernode}; -$pecount = $attributes{pecount} if(defined $attributes{pecount}); - - -if(-d $workdir){ - print "Using existing directory $workdir\n"; -}else{ - print "Creating directory: ($workdir)\n"; - mkdir $workdir or die "Could not create directory" -} - -my $srcdir = "$workdir/src"; -my $tstdir = "$srcdir/testpio"; -my $testpiodir = cwd(); -my $piodir = "$testpiodir/.."; -my $date = `date +%y%m%d-%H%M%S`; -my $user = $ENV{USER}; -chomp $date; - -my $outfile = "$testpiodir/testpio.out.$date"; -my $script = "$testpiodir/testpio.sub.$date"; - -open(F,">$script"); -print F "#!/usr/bin/perl\n"; -$preambleResource = Utils->preambleResource("$host","$pecount","$corespernode"); -print F $preambleResource; -print F "$attributes{preamble}\n"; - - -# Create a valid project string for this user -$projectInfo = Utils->projectInfo("$host","$user"); -print F $projectInfo; - -my @env; -foreach(keys %attributes){ -# if($attributes{$_} =~ /\$\{?(\w+)\}?/){ -# my $envvar = $ENV{$1}; -# $attributes{$_}=~ s/\$\{?$1\}?/$envvar/ -# } - if(/ADDENV_(.*)/){ - print F "\$ENV{$1}=\"$attributes{$_}:\$ENV{$1}\"\;\n"; - }elsif(/ENV_(.*)/){ - print "set $1 $attributes{$_}\n"; - print F "\$ENV{$1}=\"$attributes{$_}\"\;\n"; - }elsif(/(.?NETCDF_PATH)/){ - print F "\$ENV{$1}=\"$attributes{$_}\"\;\n"; - if($attributes{netcdf4} =~ /true/){ - $enablenetcdf4="--enable-netcdf4"; - } - } - -} - -my $run = $attributes{run}; -my $exename = "./testpio"; -my $log = "testpio.log.lid"; -my $foo; - -Utils->runString($host,$pecount,$run,$exename,$log) - if($run ne ""); - - -print "EXEC command: ($foo)\n"; - -print F << "EOF"; -use strict; -use lib "$cfgdir"; -use File::Copy; -use POSIX qw(ceil); - -use Utils; - -chdir ("$cfgdir"); -my \$thispass = shift; -\$thispass = 2 unless(defined \$thispass); - -mkdir "$srcdir" if(! -d "$srcdir"); - -my \$rc = 0xffff & system("rsync -rp $piodir $srcdir"); -if(\$rc != 0) { - system("cp -fr $piodir/pio $srcdir"); - system("cp -fr $piodir/mct $srcdir"); - system("cp -fr $piodir/timing $srcdir"); - system("cp -fr $piodir/testpio $srcdir"); -} - -my \$confopts = {all=>" --enable-pnetcdf --enable-mpiio --enable-netcdf --enable-timing $enablenetcdf4", - snet=>"--disable-pnetcdf --disable-mpiio --enable-netcdf --enable-timing $enablenetcdf4", - pnet=>"--enable-pnetcdf --disable-mpiio --disable-netcdf --enable-timing", - ant=>"--enable-pnetcdf --enable-mpiio --enable-netcdf --disable-timing $enablenetcdf4", - mpiio=>"--disable-pnetcdf --enable-mpiio --disable-netcdf --enable-timing", - vdc=>"--enable-compression --enable-pnetcdf --disable-netcdf --enable-timing"}; - -my \$testlist = {all=>["sn01","sn02","sn03","sb01","sb02","sb03","sb04","sb05","sb06","sb07","sb08", - "pn01","pn02","pn03","pb01","pb02","pb03","pb04","pb05","pb06","pb07","pb08", - "bn01","bn02","bn03","bb01","bb02","bb03","bb04","bb05","bb06","bb07","bb08", - "wr01","rd01","apb05","asb01","asb04"], - snet=>["sn01","sn02","sn03","sb01","sb02","sb03","sb04","sb05","sb06","sb07","sb08","asb01","asb04" ], - pnet=>["pn01","pn02","pn03","pb01","pb02","pb03","pb04","pb05","pb06","pb07","pb08","apb05"], - ant=>["sn02","sb02","pn02","pb02","bn02","bb02"], - mpiio=>["bn01","bn02","bn03","bb01","bb02","bb03","bb04","bb05","bb06","bb07","bb08"]}; - -my \@vdctests = ("vdc01"); - -if(\"$attributes{conopts}\" =~ /with-piovdc/){ - \$confopts->{all} .= " --enable-compression"; - push(\@{\$testlist->{all}},\@vdctests); - push(\@{\$testlist->{vdc}},\@vdctests); -} - -my \@netcdf4tests = ("n4n01","n4n02","n4n03","n4b01","n4b02","n4b03","n4b04","n4b05","n4b06","n4b07","n4b08"); - -if(\"x$attributes{netcdf4}\" eq "xtrue" ){ - \$confopts->{all} .= " --enable-netcdf4"; - \$confopts->{snet} .= " --enable-netcdf4"; - push(\@{\$testlist->{all}},\@netcdf4tests); - push(\@{\$testlist->{snet}},\@netcdf4tests); -} - - - - -#my \$pecnt = $corespernode*ceil($pecount/$corespernode); - -unlink("$workdir/wr01.dof.txt") if(-e "$workdir/wr01.dof.txt"); -my \$suite; -my \$passcnt=0; -my \$failcnt=0; -my \$host = "$host"; -my \$pecount = $pecount; -my \$run = "$attributes{run}"; - -foreach \$suite (qw(@testsuites)){ - my \$confopts = \$confopts->{\$suite}; - my \@testlist = \@{\$testlist->{\$suite}}; - - - chdir ("$tstdir"); - unless($twopass && \$thispass==2){ - unlink("../pio/Makefile.conf"); - my \$saveprocs; - # allows for mpi build in configure - # if("$host" eq "erebus" or "$host" =~ /^yellowstone/){ - # \$saveprocs=\$ENV{MP_PROCS}; - # \$ENV{MP_PROCS} = 1; - #system("hostname > $tstdir/hostfile"); - #\$ENV{MP_HOSTFILE}="$tstdir/hostfile"; - - # } - if("$host" eq "yellowstone_pgi") { - \$ENV{LD_PRELOAD}="/opt/ibmhpc/pe1304/ppe.pami/gnu/lib64/pami64/libpami.so"; - } - system("perl ./testpio_build.pl --conopts=\\"\$confopts\\" --host=$host"); - if("$host" eq "erebus" or "$host" =~ /^yellowstone/){ - # \$ENV{MP_PROCS}=\$saveprocs; - # delete \$ENV{MP_HOSTFILE}; - } - } - my \$test; - - if($twopass && \$thispass==1 ) { - if(-e "testpio"){ - rename("testpio","testpio.\$suite"); - }else{ - die "Build of testpio.\$suite failed"; - } - }elsif(($twopass && \$thispass==2 && -e "testpio.\$suite") or (-e "../pio/Makefile.conf" && -e "testpio")){ - foreach \$test (\@testlist){ - if(\$host eq "intrepid" && (\$test =~ /^a/)) { - print "Skipping async test \$test on \$host\n"; - next; - } - - my \$casedir = "$workdir/\$suite.\$test"; - mkdir \$casedir unless(-d \$casedir); - chdir(\$casedir) or die "Could not cd to \$casedir"; - print "\$suite \$test "; - if($retry && -e "TestStatus"){ - open(T,"TestStatus"); - my \$result = ; - close(T); - if(\$result =~ /PASS/){ - \$passcnt++; - print "Test already PASSED\\n"; - next; - } - } - - unlink("testpio") if(-e "testpio"); - if($twopass){ - copy("$tstdir/testpio.\$suite","testpio"); - }else{ - copy("$tstdir/testpio","testpio"); - } - chmod 0755,"testpio"; - - copy("$tstdir/namelists/testpio_in.\$test","testpio_in"); - if("$host" eq "intrepid"){ - open(F,"$tstdir/namelists/testpio_in.\$test"); - my \@nl = ; - close(F); - open(F,">testpio_in"); - foreach(\@nl){ - if(/nprocsIO/){ - print F " nprocsIO = 0\n"; - next; - } - print F \$_; - } - close(F); - } - - - mkdir "none" unless(-d "none"); - my \$exename = "./testpio"; - my \$log = "\$casedir/testpio.out"; -# my \$log = "\$casedir/testpio.out.$date"; -# my \$sysstr; -# if (\$run ne ""){ -# \$sysstr = Utils->runString(\$host,\$pecount,\$run,\$exename,\$log); -# }else{ -# \$sysstr = "\$exename > \$log"; -# } - my \$sysstr = Utils->runString(\$host,\$pecount,\$run,\$exename,\$log); - # Utils->runString($host,$pecount,$run,$exename,$log); - # print "value for foo is (\$foo)\\n"; - system(\$sysstr); - open(LOG,\$log); - my \@logout = ; - close(LOG); - - my \$cnt = grep /testpio completed successfully/ , \@logout; - open(T,">TestStatus"); - if(\$cnt>0){ - \$passcnt++; - print "PASS \\n"; - print T "PASS \\n"; - }else{ - \$failcnt++; - print "FAIL \\n"; - print T "FAIL \\n"; - } - close(T); - } - }else{ - print "suite \$suite FAILED to configure or build\\n"; - } -} -if($twopass && \$thispass==1){ - chdir("$cfgdir"); - my \$subsys = Utils->submitString("$host",$pecount,$corespernode,"$attributes{submit}","$script"); - if($debug) { - print "Run ($script) second pass with \$subsys\n"; - }else{ - exec(\$subsys); - } -} - -print "test complete on $host \$passcnt tests PASS, \$failcnt tests FAIL\\n"; -EOF -close(F); -chmod 0755, $script; -my $subsys = Utils->submitString($host,$pecount,$corespernode,$attributes{submit},$script); -if($debug) { - print "Created script ($script)\n"; -}elsif($twopass){ - exec("$script 1"); -}else{ - exec($subsys); -} diff --git a/src/externals/pio1/tests/testpio/utils_mod.F90 b/src/externals/pio1/tests/testpio/utils_mod.F90 deleted file mode 100644 index b49f142e2df..00000000000 --- a/src/externals/pio1/tests/testpio/utils_mod.F90 +++ /dev/null @@ -1,115 +0,0 @@ -module utils_mod - - use pio ! _EXTERNAL - use kinds_mod - implicit none - private - - public :: WriteHeader, split_comm - -contains - -!> -!! @private -!! @brief Writes netcdf header information for testpio. -!! @param File @copydoc file_desc_t -!! @param nx -!! @param ny -!! @param nz -!! @param dimid_x -!! @param dimid_y -!! @param dimid_z -!< -subroutine WriteHeader(File,nx,ny,nz,dimid_x,dimid_y,dimid_z) - - type (File_desc_t), intent(inout) :: File - integer(i4), intent(in) :: nx,ny,nz - integer(i4), intent(out) :: dimid_x,dimid_y,dimid_z - - integer(i4) :: itmp,iostat - - iostat = PIO_put_att(File,pio_global,'title','Test NetCDF file') - if(iostat /= pio_noerr) then - write(*,*) 'testPIO: Error writing TITLE to netCDF file' - endif - - iostat = PIO_put_att(File,pio_global,'ivalue', 4) - if(iostat /= pio_noerr) then - write(*,*) 'testPIO: Error writing iVALUE to netCDF file' - endif - - iostat = PIO_def_dim(File,'X',nx,dimid_x) - if(iostat /= pio_noerr) then - write(*,*) 'testPIO: Error defining dimension X for netCDF file' - endif - - iostat = PIO_def_dim(File,'Y',ny,dimid_y) - if(iostat /= pio_noerr) then - write(*,*) 'testPIO: Error defining dimension Y for netCDF file' - endif - - iostat = PIO_def_dim(File,'Z',nz,dimid_z) - if(iostat /= pio_noerr) then - write(*,*) 'testPIO: Error defining dimension Z for netCDF file' - endif - -end subroutine WriteHeader - - - -subroutine split_comm(initial_comm, nprocs, num_iotasks, stride, base, mpi_comm_compute, mpi_comm_io, intercomm) - use pio_support !_EXTERNAL -#ifndef NO_MPIMOD - use mpi !_EXTERNAL -#endif - - implicit none - - integer, intent(in) :: initial_comm, nprocs, num_iotasks, stride, base - integer, intent(out) :: mpi_comm_compute, mpi_comm_io, intercomm - - integer :: ierr - integer :: pelist(3,1), mpigrp_init, mpigrp_io, mpigrp_compute -#ifdef NO_MPIMOD - include 'mpif.h' !_EXTERNAL -#endif -#ifndef _MPISERIAL - mpi_comm_compute = MPI_COMM_NULL - mpi_comm_io = MPI_COMM_NULL - - pelist(1,1) = base - pelist(2,1) = min(nprocs-1,num_iotasks*stride-1) - pelist(3,1) = stride - - call mpi_comm_group(initial_comm, mpigrp_init, ierr) - - call mpi_group_range_incl(mpigrp_init, 1, pelist, mpigrp_io, ierr) - - call mpi_group_range_excl(mpigrp_init, 1, pelist, mpigrp_compute, ierr) - - call mpi_comm_create(initial_comm, mpigrp_compute, mpi_comm_compute, ierr) - - call mpi_comm_create(initial_comm, mpigrp_io, mpi_comm_io, ierr) - - if(mpi_comm_compute/=MPI_COMM_NULL) then - call mpi_intercomm_create(mpi_comm_compute, 0, initial_comm, base, 1, intercomm, ierr) - else if(mpi_comm_io/=MPI_COMM_NULL) then - if(base==0) then - if(stride>1) then - call mpi_intercomm_create(mpi_comm_io, 0, initial_comm, 1, 1, intercomm, ierr) - else - call mpi_intercomm_create(mpi_comm_io, 0, initial_comm, num_iotasks, 1, intercomm, ierr) - end if - else - call mpi_intercomm_create(mpi_comm_io, 0, initial_comm, 0, 1, intercomm, ierr) - end if - else - call piodie(__FILE__,__LINE__) - end if -#endif -end subroutine split_comm - - - - -end module utils_mod diff --git a/src/externals/pio1/tests/testpio/ystest.sh b/src/externals/pio1/tests/testpio/ystest.sh deleted file mode 100644 index 05368f21d37..00000000000 --- a/src/externals/pio1/tests/testpio/ystest.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -#BSUB -XF # enable X forwarding -#BSUB -Is # interactive job -#BSUB -q caldera # queue -#BSUB -W 00:30 # wall-clock time (hrs:mins) -#BSUB -n 1 # number of tasks in job -#BSUB -J myjob # job name -#BSUB -o myjob.%J.out # output file name in which %J is replaced by the job ID -#BSUB -e myjob.%J.err # error file name in which %J is replaced by the job ID -#BSUB -P P93300606 -#start the application -./testpio_run.pl --host=yellowstone --twopass diff --git a/src/externals/pio1/tests/unittests/CMakeLists.txt b/src/externals/pio1/tests/unittests/CMakeLists.txt deleted file mode 100644 index dee3e57c8f2..00000000000 --- a/src/externals/pio1/tests/unittests/CMakeLists.txt +++ /dev/null @@ -1,20 +0,0 @@ -SET(SRC basic_tests.F90 driver.F90 global_vars.F90 ncdf_tests.F90 nc_set_log_level2.c) -ADD_DEFINITIONS(${PIO_DEFINITIONS}) - -# Use to trace hdf5 errors with a special netcdf build -OPTION(HDF5_LOGGING "Turn on hdf5 logging (requires instrumented netcdf4)" OFF) -if(HDF5_LOGGING) - ADD_DEFINITIONS(-DLOGGING) -endif() - -INCLUDE_DIRECTORIES(${PIO_INCLUDE_DIRS} ) -LINK_DIRECTORIES(${PIO_LIB_DIR} ) - -ADD_EXECUTABLE(piotest ${SRC}) - -if(${PIO_BUILD_TIMING}) - TARGET_LINK_LIBRARIES(piotest pio timing) -ELSE() - TARGET_LINK_LIBRARIES(piotest pio) -ENDIF() -ADD_TEST(piotest piotest) diff --git a/src/externals/pio1/tests/unittests/Levy_Notes b/src/externals/pio1/tests/unittests/Levy_Notes deleted file mode 100644 index 4fc4a627cf7..00000000000 --- a/src/externals/pio1/tests/unittests/Levy_Notes +++ /dev/null @@ -1,64 +0,0 @@ -This file should be deleted before copying the unittestpio directory onto the trunk. - -Issues to deal with: - -1) First example of pio_initdecomp from webpage is not supported. - pio_initdecomp(iosystem,PIO_int,dims,start,count,iodesc) - -"pio_support::pio_die:: myrank= -1 : ERROR: piolib_mod.f90: 441 : subroutine not yet implemented" - -2) Can not open a binary file with PIO_write and put data in it. - -"write_mpiio_int: after call to file_write_all:MPI_ERR_READ_ONLY: file is read only -pio_support::pio_die:: myrank= -1 : ERROR: iompi_mod.F90: 230 : (no message)" - -3) When "creating" a binary file that already exists with PIO_CLOBBER - (which is ignored), file is not emptied. - -"warning, the mode argument is currently ignored for binary file operations" - -4) When creating a binary file to write data using PIO_iotype_direct_pbinary, - file is 32767 times larger than same file written with PIO_iotype_pbinary - - Note: this is on my laptop... on Frankfurt and Yellowstone, using - PIO_iotype_direct_pbinary leads to MPI crashing: - -Frankfurt ---------- -"write_mpiio_int after call to file_set_view:MPI_ERR_ARG: invalid argument of so - me other kind - pio_support::pio_die:: myrank= -1 : ERROR: iompi_mod.F90: 223 - : (no message)" - -Yellowstone ------------ -"write_mpiio_int after call to file_set_view:Invalid argument, error stack: -MPI_ - FILE_SET_VIEW(57): Invalid displacement argument - pio_support::pio_die:: myrank= -1 : ERROR: iompi_mod.F90: 223 - : (no message)" - -5) For tests where we expect failure (e.g. writing a file that was opened nowrite) - I am just checking for ret_val.ne.0 - should I compare to an exact error val? - -6) PIO_openfile hangs in netcdf input / output tests when stride < ntasks: - stride = 1 with 2 tasks on my laptop - stride = 2 with 4 tasks on frankfurt - stride = 4 with 8 tasks on caldera - - (all 3 cases run fine with stride = ntasks) - - -FIXED ------ - -** Can not suppress MPI-IO warning message if file does not exist. - -"open_mpiio: after call to MPI_file_open:MPI_ERR_NO_SUCH_FILE: no such file or directory" - -Solution: added CheckMPI optional flag to iompio_mod.F90.in::PIO_openfile and piolib_mod.F90::PIO_openfile - -** Test netcdf file (3*ntasks integers) is always 4096 bytes, while pnetcdf - file is (80 + 12*ntasks) bytes - -Solution: not actually a problem diff --git a/src/externals/pio1/tests/unittests/README b/src/externals/pio1/tests/unittests/README deleted file mode 100644 index db7c5626cf6..00000000000 --- a/src/externals/pio1/tests/unittests/README +++ /dev/null @@ -1,5 +0,0 @@ -1) Make sure pio is built in ../pio - -3) Determine what tests to run by setting namelist values in input.nl - -4) Use your machine's mpirun to launch "piotest" with the desired number of tasks diff --git a/src/externals/pio1/tests/unittests/basic_tests.F90 b/src/externals/pio1/tests/unittests/basic_tests.F90 deleted file mode 100644 index c3b5d9d60b3..00000000000 --- a/src/externals/pio1/tests/unittests/basic_tests.F90 +++ /dev/null @@ -1,263 +0,0 @@ -!> -!! @file -!! @brief Module containing basic unit tests that are run for both -!! binary and netcdf file types. -!< - -module basic_tests - - use pio - use global_vars - - Implicit None - private - save - - public :: test_create - public :: test_open - - Contains - - Subroutine test_create(test_id, err_msg) - ! test_create(): - ! * Create an empty file, close it, test that it can be opened - ! * For netcdf / pnetcdf: - ! - Create same file with PIO_CLOBBER mode - ! - Try to create same file with PIO_NOCLOBBER mode, check for error - ! Routines used in test: PIO_createfile, PIO_openfile, PIO_closefile - ! Also uses PIO_enddef for [p]netcdf tests - - ! Input / Output Vars - integer, intent(in) :: test_id - character(len=str_len), intent(out) :: err_msg - - ! Local Vars - character(len=str_len) :: filename - integer :: iotype, ret_val, pio_dim - - err_msg = "no_error" - - filename = fnames(test_id) - iotype = iotypes(test_id) - - ! Delete file before initial create - if (master_task) call system("rm -f " // trim(filename)) - - ! Original file creation - ret_val = PIO_createfile(pio_iosystem, pio_file, iotype, filename) - if (ret_val.ne.0) then - ! Error in PIO_createfile - err_msg = "Could not create " // trim(filename) - return - end if - - - ! netcdf files need to end define mode before closing - if (is_netcdf(iotype)) then - ret_val = PIO_enddef(pio_file) - if (ret_val.ne.0) then - ! Error in PIO_enddef - err_msg = "Could not end define mode" - call PIO_closefile(pio_file) - return - end if - end if - call PIO_closefile(pio_file) - - ! Test opening of file - ret_val = PIO_openfile(pio_iosystem, pio_file, iotype, filename, PIO_nowrite) - if (ret_val.ne.0) then - ! Error in PIO_openfile - err_msg = "Could not open " // trim(filename) - return - end if - - ! Close file - call PIO_closefile(pio_file) - - ! Recreate file with CLOBBER (netcdf / pnetcdf only) - if (is_netcdf(iotype)) then - ret_val = PIO_createfile(pio_iosystem, pio_file, iotype, filename, PIO_CLOBBER) - if (ret_val.ne.0) then - ! Error in PIO_createfile - err_msg = "Could not clobber " // trim(filename) - return - end if - - ! Leave define mode - ret_val = PIO_enddef(pio_file) - if (ret_val.ne.0) then - ! Error in PIO_enddef - err_msg = "Could not end define mode in clobbered file" - call PIO_closefile(pio_file) - return - end if - - ! Close file - call PIO_closefile(pio_file) - end if - - ! Recreate file with NOCLOBBER - if (is_netcdf(iotype)) then - ret_val = PIO_createfile(pio_iosystem, pio_file, iotype, filename, PIO_NOCLOBBER) - if (ret_val.eq.0) then - ! Error in PIO_createfile - err_msg = "Was able to clobber file despite PIO_NOCLOBBER" - ret_val = PIO_enddef(pio_file) - call PIO_closefile(pio_file) - return - end if - end if - - End Subroutine test_create - - Subroutine test_open(test_id, err_msg) - ! test_open(): - ! * Try to open file that doesn't exist, check for error - ! * Open a file with PIO_write, write something, close - ! * Open a file with PIO_nowrite, try to write, check for error - ! * For netcdf / pnetcdf: - ! - Try to open non-netcdf file, check for error - ! Routines used in test: PIO_initdecomp, PIO_openfile, PIO_write_darray, - ! PIO_closefile, PIO_freedecomp - ! Also uses PIO_createfile for binary tests - ! PIO_redef, PIO_def_dim, PIO_def_var, PIO_enddef for [p]netcdf tests - - ! Input / Output Vars - integer, intent(in) :: test_id - character(len=str_len), intent(out) :: err_msg - - ! Local Vars - character(len=str_len) :: filename - integer :: iotype, ret_val - - ! Data used to test writing - integer, dimension(3) :: data_to_write, compdof - integer, dimension(1) :: dims - type(io_desc_t) :: iodesc_nCells - integer :: pio_dim - type(var_desc_t) :: pio_var - - err_msg = "no_error" - dims(1) = 3*ntasks - compdof = 3*my_rank+(/1,2,3/) ! Where in the global array each task writes - data_to_write = my_rank - - call PIO_initdecomp(pio_iosystem, PIO_int, dims, compdof, iodesc_nCells) - - filename = fnames(test_id) - iotype = iotypes(test_id) - - ! Open file that doesn't exist - ret_val = PIO_openfile(pio_iosystem, pio_file, iotype, "FAKE.FILE", & - PIO_nowrite, CheckMPI=.false.) - if (ret_val.eq.0) then - ! Error in PIO_openfile - err_msg = "Successfully opened file that doesn't exist" - call PIO_closefile(pio_file) - return - end if - - ! Open existing file, write data to it (for binary file, need to create new file) - if (is_netcdf(iotype)) then - ret_val = PIO_openfile(pio_iosystem, pio_file, iotype, filename, PIO_write) - else - ret_val = PIO_createfile(pio_iosystem, pio_file, iotype, filename) - end if - if (ret_val.ne.0) then - ! Error in PIO_openfile (or PIO_createfile) - err_msg = "Could not open " // trim(filename) // " in write mode" - return - end if - - ! Enter define mode for netcdf files - if (is_netcdf(iotype)) then - ret_val = PIO_redef(pio_file) - if (ret_val.ne.0) then - ! Error in PIO_redef - err_msg = "Could not enter redef mode" - call PIO_closefile(pio_file) - return - end if - - ! Define a new dimension N - ret_val = PIO_def_dim(pio_file, 'N', 3*ntasks, pio_dim) - if (ret_val.ne.0) then - ! Error in PIO_def_dim - err_msg = "Could not define dimension N" - call PIO_closefile(pio_file) - return - end if - - ! Define a new variable foo - ret_val = PIO_def_var(pio_file, 'foo', PIO_int, & - (/pio_dim/), pio_var) - if (ret_val.ne.0) then - ! Error in PIO_def_var - err_msg = "Could not define variable foo" - call PIO_closefile(pio_file) - return - end if - - ! Leave define mode - ret_val = PIO_enddef(pio_file) - if (ret_val.ne.0) then - ! Error in PIO_enddef - print *,__FILE__,__LINE__,ret_val - err_msg = "Could not end define mode" - call PIO_closefile(pio_file) - return - end if - end if - - ! Write foo - call PIO_write_darray(pio_file, pio_var, iodesc_nCells, data_to_write, ret_val) - if (ret_val.ne.0) then - ! Error in PIO_write_darray - err_msg = "Could not write data" - call PIO_closefile(pio_file) - return - end if - - ! Close file - call PIO_closefile(pio_file) - - ! Open existing file with PIO_nowrite, try to write (netcdf only) - if (is_netcdf(iotype)) then - ret_val = PIO_openfile(pio_iosystem, pio_file, iotype, filename, PIO_nowrite) - if (ret_val.ne.0) then - ! Error opening file - err_msg = "Could not open file in NoWrite mode" - return - end if - - ! Try to write (should fail) - call PIO_write_darray(pio_file, pio_var, iodesc_nCells, data_to_write, ret_val) - if (ret_val.eq.0) then - ! Error in PIO_write_darray - err_msg = "Wrote to file opened in NoWrite mode" - call PIO_closefile(pio_file) - return - end if - ! Close file - call PIO_closefile(pio_file) - end if - - ! Try to open standard binary file as netcdf (if iotype = netcdf) - if (is_netcdf(iotype)) then - ret_val = PIO_openfile(pio_iosystem, pio_file, iotype, & - "not_netcdf.ieee", PIO_nowrite) - if (ret_val.eq.0) then - ! Error in PIO_openfile - err_msg = "Opened a non-netcdf file as netcdf" - call PIO_closefile(pio_file) - return - end if - end if - - ! Free decomp - call PIO_freedecomp(pio_iosystem, iodesc_nCells) - - End Subroutine test_open - -end module basic_tests diff --git a/src/externals/pio1/tests/unittests/driver.F90 b/src/externals/pio1/tests/unittests/driver.F90 deleted file mode 100644 index 68ac1203dd5..00000000000 --- a/src/externals/pio1/tests/unittests/driver.F90 +++ /dev/null @@ -1,240 +0,0 @@ -!> -!! @file -!! @brief The driver for PIO unit tests -!< - -Program pio_unit_test_driver - use pio - use global_vars - use basic_tests - use ncdf_tests -#ifdef TIMING - use perf_mod ! _EXTERNAL -#endif - - Implicit None - - ! local variables - character(len=str_len) :: err_msg - integer :: fail_cnt, test_cnt, ios, test_id, ierr, test_val - logical :: ltest_bin, ltest_bin_direct, ltest_netcdf, ltest_pnetcdf - logical :: ltest_netcdf4p, ltest_netcdf4c - namelist/piotest_nml/ltest_bin, & - ltest_bin_direct, & - ltest_netcdf, & - ltest_netcdf4p, & - ltest_netcdf4c, & - ltest_pnetcdf, & - stride -#if defined( _NETCDF4) && defined(LOGGING) - integer, external :: nc_set_log_level2 -#endif - character(len=*), parameter :: nml_filename='input.nl' - ! Set up MPI - call MPI_Init(ierr) - call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr) - call MPI_Comm_size(MPI_COMM_WORLD, ntasks , ierr) -#ifdef TIMING - call t_initf(nml_filename, logprint=.false., logunit=6, & - mpicom=MPI_COMM_WORLD) -#endif - - - - master_task = my_rank.eq.0 - - if (master_task) then - ltest_bin = .false. - ltest_bin_direct = .false. - ltest_netcdf = .false. - ltest_netcdf4p = .false. - ltest_netcdf4c = .false. - ltest_pnetcdf = .false. - stride = 1 - - open(615, file=nml_filename) - read(615, nml=piotest_nml, iostat=ios) - if (ios.ne.0) then - print*, "ERROR reading ",nml_filename, " exiting!" - end if - close(615) - - write(*,"(A,x,I0,x,A,x,I0)") "Running unit tests with", ntasks, & - "MPI tasks and stride of", stride - - if (stride.gt.ntasks) then - stride = ntasks - write(*,"(A,x,A,I0)") "WARNING: stride value in namelist is larger than", & - "number of MPI tasks, reducing stride to ", stride - end if - - ! Ignore namelist values if PIO not built with correct options - ! (i.e. don't test pnetcdf if not built with pnetcdf) -#ifndef USEMPIIO - if (ltest_bin.or.ltest_bin_direct) then - write(*,"(A,x,A)") "WARNING: can not test binary files because PIO", & - "was not compiled with -DUSEMPIIO" - ltest_bin = .false. - ltest_bin_direct = .false. - end if -#endif -#ifndef _NETCDF - if (ltest_netcdf) then - write(*,"(A,x,A)") "WARNING: can not test netcdf files because PIO", & - "was not compiled with -D_NETCDF" - ltest_netcdf = .false. - end if -#endif -#ifndef _NETCDF4 - if (ltest_netcdf4p) then - write(*,"(A,x,A)") "WARNING: can not test netcdf4p files because PIO", & - "was not compiled with -D_NETCDF4" - ltest_netcdf4p = .false. - end if - if (ltest_netcdf4c) then - write(*,"(A,x,A)") "WARNING: can not test netcdf4c files because PIO", & - "was not compiled with -D_NETCDF4" - ltest_netcdf4c = .false. - end if -#endif -#ifndef _PNETCDF - if (ltest_pnetcdf) then - write(*,"(A,x,A)") "WARNING: can not test pnetcdf files because PIO", & - "was not compiled with -D_PNETCDF" - ltest_pnetcdf = .false. - end if -#endif - write(*,"(A)") "------" - - end if - - call MPI_Bcast(ios,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - if (ios.ne.0) then - call MPI_Abort(MPI_COMM_WORLD) - end if - - ltest(BINARY) = ltest_bin - ltest(BINDIR) = ltest_bin_direct - ltest(NETCDF) = ltest_netcdf - ltest(NETCDF4P) = ltest_netcdf4p - ltest(NETCDF4C) = ltest_netcdf4c - ltest(PNETCDF) = ltest_pnetcdf - - call MPI_Bcast(ltest,ntest,MPI_LOGICAL,0,MPI_COMM_WORLD,ierr) - call MPI_Bcast(stride,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - niotasks = ntasks/stride - - ! Set up PIO - call PIO_init(my_rank, & ! MPI rank - MPI_COMM_WORLD, & ! MPI communicator - niotasks, & ! Number of iotasks (ntasks/stride) - 0, & ! num_aggregator (?) - stride, & ! Stride - PIO_rearr_box, & ! rearr - pio_iosystem) ! iosystem - - call PIO_seterrorhandling(pio_iosystem, PIO_BCAST_ERROR) - - fail_cnt = 0 - test_cnt = 0 - - do test_id=1,ntest - if (ltest(test_id)) then - ! Make sure i is a valid test number - select case (test_id) - case (BINARY) - if (master_task) & - write(*,"(A)") "Testing PIO's binary input / output:" - case (BINDIR) - if (master_task) & - write(*,"(A)") "Testing PIO's direct binary input / output:" - case (NETCDF4P) - if (master_task) & - write(*,"(A)") "Testing PIO's netcdf4 parallel input / output:" - case (NETCDF4C) - if (master_task) & - write(*,"(A)") "Testing PIO's netcdf4 compressed input / output:" - case (NETCDF) - if (master_task) & - write(*,"(A)") "Testing PIO's netcdf input / output:" - case (PNETCDF) - if (master_task) & - write(*,"(A)") "Testing PIO's pnetcdf input / output:" - case DEFAULT - if (master_task) & - write(*,"(A,I0)") "Error, not configured for test #", test_id - call MPI_Abort(MPI_COMM_WORLD) - end select -#if defined( _NETCDF4) && defined(LOGGING) - if(master_task) ierr = nc_set_log_level2(3) -#endif - ! test_create() - if (master_task) write(*,"(3x,A,x)",advance="no") "testing PIO_createfile..." - call test_create(test_id, err_msg) - call parse(err_msg, fail_cnt) - - ! test_open() - if (master_task) write(*,"(3x,A,x)", advance="no") "testing PIO_openfile...",test_id - call test_open(test_id, err_msg) - call parse(err_msg, fail_cnt) - - ! netcdf-specific tests - if (is_netcdf(iotypes(test_id))) then - if (master_task) write(*,"(3x,A,x)", advance="no") "testing PIO_redef..." - call test_redef(test_id, err_msg) - call parse(err_msg, fail_cnt) - - if (master_task) write(*,"(3x,A,x)", advance="no") "testing PIO_enddef..." - call test_enddef(test_id, err_msg) - call parse(err_msg, fail_cnt) - end if - - if (master_task) write(*,*) "" - - end if ! ltest(test_id) - - end do - - if (master_task) then - write(*,"(A,I0)") "Total failure count: ", fail_cnt - if (fail_cnt.eq.0) then - write(*,"(A)") "PASSED unit testing." - else - write(*,"(A)") "FAILED unit testing." - end if - end if - - call PIO_finalize(pio_iosystem, ierr) - -#ifdef TIMING - call t_prf('timing.unittests',MPI_COMM_WORLD) - call t_finalizef() -#endif - - call MPI_Finalize(ierr) - if(fail_cnt>0) then - stop 1 - else - stop 0 - endif -Contains - - Subroutine parse(err_msg, fail_counter) - - character(len=*), intent(in) :: err_msg - integer, intent(inout) :: fail_counter - logical :: test_passed - - if (master_task) then - test_passed = (trim(err_msg).eq."no_error") - if (test_passed) then - write(*,"(A)") "success!" - else - write(*,"(A)") "FAILURE: " // trim(err_msg) - fail_counter = fail_counter+1 - end if - end if - - End Subroutine parse - -End Program pio_unit_test_driver diff --git a/src/externals/pio1/tests/unittests/global_vars.F90 b/src/externals/pio1/tests/unittests/global_vars.F90 deleted file mode 100644 index c5ae0ae7bbf..00000000000 --- a/src/externals/pio1/tests/unittests/global_vars.F90 +++ /dev/null @@ -1,63 +0,0 @@ -!> -!! @file -!! @brief Module containing variables used across all unit test files -!< - -module global_vars - - use pio - - Implicit None - public - - include 'mpif.h' ! _EXTERNAL - - integer, parameter :: str_len = 255, ntest=6 - integer, parameter :: BINARY =1, & - BINDIR =2, & - NETCDF =3, & - NETCDF4P=4, & - NETCDF4C=5, & - PNETCDF=6 - - ! MPI Variables - integer :: my_rank, ntasks - logical :: master_task - - ! PIO Variables - integer :: stride, niotasks - type(iosystem_desc_t), save :: pio_iosystem - type(file_desc_t), save :: pio_file - - ! Arguments for the different tests - character(len=str_len), dimension(ntest) :: fnames = (/& - "piotest_bin.ieee ", & - "piotest_bin2.ieee ", & - "piotest_netcdf.nc ", & - "piotest_netcdf4p.nc ", & - "piotest_netcdf4c.nc ", & - "piotest_pnetcdf.nc "/) - integer, dimension(ntest) :: iotypes = (/PIO_iotype_pbinary, & - PIO_iotype_direct_pbinary, & - PIO_iotype_netcdf, & - PIO_iotype_netcdf4p, & - PIO_iotype_netcdf4c, & - PIO_iotype_pnetcdf/) - logical, dimension(ntest) :: ltest - - Contains - - Function is_netcdf(iotype) - - integer, intent(in) :: iotype - logical :: is_netcdf - - is_netcdf = & - (iotype.eq.PIO_iotype_netcdf) .or. & - (iotype.eq.PIO_iotype_netcdf4p) .or. & - (iotype.eq.PIO_iotype_netcdf4c) .or. & - (iotype.eq.PIO_iotype_pnetcdf) - - End Function is_netcdf - -end module global_vars diff --git a/src/externals/pio1/tests/unittests/input.nl b/src/externals/pio1/tests/unittests/input.nl deleted file mode 100644 index 9c25cae9ba1..00000000000 --- a/src/externals/pio1/tests/unittests/input.nl +++ /dev/null @@ -1,9 +0,0 @@ -&piotest_nml -ltest_bin = .true. -!ltest_bin_direct = .true. ! Ignored if PIO is not built with -DUSEMPIIO -ltest_netcdf = .true. ! Ignored if PIO is not built with -D_NETCDF -ltest_netcdf4p = .true. ! Ignored if PIO is not built with -D_NETCDF4 -ltest_netcdf4c = .true. ! Ignored if PIO is not built with -D_NETCDF4 -ltest_pnetcdf = .true. ! Ignored if PIO is not built with -D_PNETCDF -stride = 3 -/ diff --git a/src/externals/pio1/tests/unittests/nc_set_log_level2.c b/src/externals/pio1/tests/unittests/nc_set_log_level2.c deleted file mode 100644 index deaf9087561..00000000000 --- a/src/externals/pio1/tests/unittests/nc_set_log_level2.c +++ /dev/null @@ -1,10 +0,0 @@ -#if defined(_NETCDF4) && defined(LOGGING) -#include - -int nc_set_log_level2_(int *il) -{ - int i; - i = nc_set_log_level( *il ); - return(i); -} -#endif diff --git a/src/externals/pio1/tests/unittests/ncdf_tests.F90 b/src/externals/pio1/tests/unittests/ncdf_tests.F90 deleted file mode 100644 index 0417d024434..00000000000 --- a/src/externals/pio1/tests/unittests/ncdf_tests.F90 +++ /dev/null @@ -1,237 +0,0 @@ -!> -!! @file -!! @brief Module containing netcdf-specific PIO unit tests -!< - -module ncdf_tests - - use pio - use global_vars - - Implicit None - private - save - - public :: test_redef - public :: test_enddef - - Contains - - Subroutine test_redef(test_id, err_msg) - ! test_redef(): - ! * Open file, enter define mode, add dimension / variable / attribute - ! * Try to run PIO_redef from define mode, check for error - ! * Leave define mode, close file - ! * Try to run PIO_redef with closed file - ! Routines used in test: PIO_initdecomp, PIO_openfile, PIO_redef, PIO_def_dim, - ! PIO_def_var, PIO_put_att, PIO_enddef, - ! PIO_write_darray, PIO_closefile, PIO_freedecomp - - ! Input / Output Vars - integer, intent(in) :: test_id - character(len=str_len), intent(out) :: err_msg - - ! Local Vars - character(len=str_len) :: filename - integer :: iotype, ret_val - - ! Data used to test writing - integer, dimension(2) :: data_to_write, compdof - integer, dimension(1) :: dims - type(io_desc_t) :: iodesc_nCells - integer :: pio_dim - type(var_desc_t) :: pio_var - - err_msg = "no_error" - - dims(1) = 2*ntasks - compdof = 2*my_rank+(/1,2/) ! Where in the global array each task writes - data_to_write = 1+my_rank - - call PIO_initdecomp(pio_iosystem, PIO_int, dims, compdof, iodesc_nCells) - - filename = fnames(test_id) - iotype = iotypes(test_id) - - ! Open existing file, write data to it - ret_val = PIO_openfile(pio_iosystem, pio_file, iotype, filename, PIO_write) - if (ret_val.ne.0) then - ! Error in PIO_openfile - err_msg = "Could not open " // trim(filename) // " in write mode" - return - end if - - ! Enter define mode - ret_val = PIO_redef(pio_file) - if (ret_val.ne.0) then - ! Error in PIO_redef - err_msg = "Could not enter define mode" - call PIO_closefile(pio_file) - return - end if - - ! Define a new dimension M (already has 'N' from previous tests) - ret_val = PIO_def_dim(pio_file, 'M', 2*ntasks, pio_dim) - if (ret_val.ne.0) then - err_msg = "Could not define dimension M" - call PIO_closefile(pio_file) - return - end if - - ! Define a new variable foo2 (already has 'foo' from previous tests) - ret_val = PIO_def_var(pio_file, 'foo2', PIO_int, & - (/pio_dim/), pio_var) - if (ret_val.ne.0) then - ! Error in PIO_def_var - err_msg = "Could not define variable foo2" - call PIO_closefile(pio_file) - return - end if - - ret_val = PIO_put_att(pio_file, pio_var, "max_val", ntasks) - if (ret_val.ne.0) then - ! Error in PIO_put_att - err_msg = "Could not define max_val attribute for foo2" - call PIO_closefile(pio_file) - return - end if - - ret_val = PIO_put_att(pio_file, PIO_global, "created_by", "PIO unit tests") - if (ret_val.ne.0) then - ! Error in PIO_put_att - err_msg = "Could not define global attribute" - call PIO_closefile(pio_file) - return - end if - - ! Try to enter define mode again - ret_val = PIO_redef(pio_file) - if (ret_val.eq.0) then - ! Error in PIO_redef - err_msg = "Entered define mode from define mode" - call PIO_closefile(pio_file) - return - end if - - ! Leave define mode - ret_val = PIO_enddef(pio_file) - if (ret_val.ne.0) then - ! Error in PIO_enddef - err_msg = "Could not end define mode" - return - end if - - ! Write foo2 - call PIO_write_darray(pio_file, pio_var, iodesc_nCells, data_to_write, ret_val) - if (ret_val.ne.0) then - ! Error in PIO_write_darray - err_msg = "Could not write data" - return - end if - - ! Close file - call PIO_closefile(pio_file) - - ! Try to enter define mode again - ret_val = PIO_redef(pio_file) - if (ret_val.eq.0) then - ! Error in PIO_redef - err_msg = "Entered define mode from a closed file" - return - end if - - ! Free decomp - call PIO_freedecomp(pio_iosystem, iodesc_nCells) - - End Subroutine test_redef - - Subroutine test_enddef(test_id, err_msg) - ! test_enddef(): - ! * Open file with PIO_nowrite, try to enter define mode, check for error - ! * Open file with PIO_write, enter define mode, leave define mode - ! * Try calling PIO_enddef from data mode, check for error - ! * Close file - ! * Try to run PIO_enddef with closed file - ! Routines used in test: PIO_openfile, PIO_redef, PIO_enddef, PIO_closefile - - ! Input / Output Vars - integer, intent(in) :: test_id - character(len=str_len), intent(out) :: err_msg - - ! Local Vars - character(len=str_len) :: filename - integer :: iotype, ret_val - - err_msg = "no_error" - filename = fnames(test_id) - iotype = iotypes(test_id) - - ! Open existing file (read-only) - ret_val = PIO_openfile(pio_iosystem, pio_file, iotype, filename, PIO_nowrite) - if (ret_val.ne.0) then - ! Error in PIO_openfile - err_msg = "Could not open " // trim(filename) // " in write mode" - return - end if - - ! Enter define mode - ret_val = PIO_redef(pio_file) - if (ret_val.eq.0) then - ! Error in PIO_redef - err_msg = "Entered define mode in read-only file" - call PIO_closefile(pio_file) - return - end if - - ! Close file - call PIO_closefile(pio_file) - - ! Open existing file - ret_val = PIO_openfile(pio_iosystem, pio_file, iotype, filename, PIO_write) - if (ret_val.ne.0) then - ! Error in PIO_openfile - err_msg = "Could not open " // trim(filename) // " in write mode" - return - end if - - ! Enter define mode - ret_val = PIO_redef(pio_file) - if (ret_val.ne.0) then - ! Error in PIO_redef - err_msg = "Could not enter define mode" - call PIO_closefile(pio_file) - return - end if - - ! End define mode - ret_val = PIO_enddef(pio_file) - if (ret_val.ne.0) then - ! Error in PIO_enddef - err_msg = "Could not end define mode" - call PIO_closefile(pio_file) - return - end if - - ! Try to end define mode from data mode - ret_val = PIO_enddef(pio_file) - if (ret_val.eq.0) then - ! Error in PIO_enddef - err_msg = "Ended define mode while in data mode" - call PIO_closefile(pio_file) - return - end if - - ! Close file - call PIO_closefile(pio_file) - - ! Try to end define mode in un-opened file - ret_val = PIO_enddef(pio_file) - if (ret_val.eq.0) then - ! Error in PIO_enddef - err_msg = "Ended define mode in a file that was already closed" - return - end if - - End Subroutine test_enddef - -end module ncdf_tests diff --git a/src/externals/pio1/tests/unittests/not_netcdf.ieee b/src/externals/pio1/tests/unittests/not_netcdf.ieee deleted file mode 100644 index 911064ba935..00000000000 Binary files a/src/externals/pio1/tests/unittests/not_netcdf.ieee and /dev/null differ diff --git a/src/externals/pio1/timing/CMakeLists.txt b/src/externals/pio1/timing/CMakeLists.txt deleted file mode 100644 index 8741699e1c2..00000000000 --- a/src/externals/pio1/timing/CMakeLists.txt +++ /dev/null @@ -1,25 +0,0 @@ -INCLUDE(FortranCInterface) -FortranCInterface_HEADER(cmake_fortran_c_interface.h - MACRO_NAMESPACE "FCI_") - -ADD_DEFINITIONS(${PIO_DEFINITIONS}) - -SET(TIMING_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_BINARY_DIR} - CACHE STRING "") -INCLUDE_DIRECTORIES(${TIMING_INCLUDE_DIRS}) - -ADD_DEFINITIONS(-DINCLUDE_CMAKE_FCI -DHAVE_MPI) - -SET(SRCS_C GPTLget_memusage.c - GPTLprint_memusage.c - GPTLutil.c - f_wrappers.c - gptl.c - gptl_papi.c - threadutil.c) - -SET(SRCS_F90 perf_mod.F90 - perf_utils.F90) - -ADD_LIBRARY(timing ${SRCS_F90} ${SRCS_C}) diff --git a/src/externals/pio1/timing/COPYING b/src/externals/pio1/timing/COPYING deleted file mode 100644 index 94a9ed024d3..00000000000 --- a/src/externals/pio1/timing/COPYING +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/src/externals/pio1/timing/ChangeLog b/src/externals/pio1/timing/ChangeLog deleted file mode 100644 index 8bbbbcfe4fc..00000000000 --- a/src/externals/pio1/timing/ChangeLog +++ /dev/null @@ -1,141 +0,0 @@ -timing_120921: Add code for cmake build, should not have any affect otherwise -timing_120803: Bug fix in setting timing_detail_limit default. - [Patrick Worley] -timing_120731: Correction in Makefile for serial build [Jim Edwards] -timing_120728: Replace process subset optional parameter in t_prf with - outpe_thispe optional parameter. Change def_perf_outpe_num to 0. - [Patrick Worley] -timing_120717: Retain timestamp on cp in Makefile [Jim Edwards] -timing_120710: Correct issue in Makefile [Jim Edwards] -timing_120709: Change for BGP to measure on compute nodes rather than IO nodes only, - minor Change in Makefile so that gptl can build seperate from csm_share - in cesm [Jim Edwards] -timing_120512: Bug fix in global statistics logic for when a thread has no events - to contribute to the merge (mods to gptl.c) - [Patrick Worley] -timing_120419: Minor changes for mpi-serial compile (jedwards) -timing_120408: Make HAVE_COMM_F2C default to true. (jedwards) -timing_120110: Update to GPTL 4.1 source (mods to gptl.c and GPTLprint_memusage) - [Jim Rosinski (GPTL 4.1), Patrick Worley] -timing_120109: Bug fix (adding shr_kind_i8 to shr_kind_mod list) -timing_111205: Update to gptl 4.0 (introducing CESM customizations); - support for handles in t_startf/t_stopf; - support for restricting output to explicitly named process subsets - [Jim Rosinski (gptl 4.0), Patrick Worley] -timing_111101: Workaround for mpi_rsend issue on cray/gemini -timing_110928: Add a Makefile and build as a library usable by mct and pio -timing_101215: No changes from previous tag other than updating Changelog -timing_101210: Fix interface to cesm build system, add workaround for xlf bug -timing_101202: updated get_memusage and print_memusage from GPTL version 3.7; adds - improved support for MacOS and SLASHPROC - [Jim Rosinski, Chuck Bardeen (integrated by P. Worley)] -timing_091021: update to GPTL version 3.5; rewrite of GPTLpr_summary: much faster, merging - events from all processes and all threads (not just process 0/thread 0); - miscellaneous fixes - [Jim Rosinski (gptl 3.5), Joseph Singh, Patrick Worley] -timing_090929: added explicit support for the GPTL-native token HAVE_MPI (indicating - presence of MPI library) - [Patrick Worley] -timing_081221: restore default assumption that gettimeofday available -timing_081028: bug fix in include order in gptl_papi.c -timing_081026: change in output format to make postprocessing simpler -timing_081024: support for up to one million processes and writing timing files to - subdirectories -timing_081017: updated to gptl version 3_4_2. Changed some defaults. - [Jim Rosinski, Patrick Worley] -timing_080629: added optional parameters perf_outpe_num and perf_outpe_stride to t_prf. - These are used to override the user specified values for timing data - written out before the end of a simulation. - [Patrick Worley] -timing_071213: changed default to disable inline keyword; changed global statistics - logic to avoid problems at scale; moved shr and CAM routine equivalencies - to a new module (in perf_utils.F90); added t_getLogUnit/t_setLogUnit - routines to control Log output in same way as shr_file_get/setLogUnit; - modified GPTLpr logic to support output of timing data during a run - [Patrick Worley] -timing_071023: updated to gptl version 2.16, added support for output of global - statistics; removed dependencies on shr and CAM routines; renamed - gptlutil.c to GPTLutil.c - [Patrick Worley, Jim Rosinski] -timing_071019: modified namelist logic to abort if try to set unknown namelist parameters; - changed default number of reporting processes to 1; - reversed meaning and changed names of CPP tokens to NO_C99_INLINE and NO_VPRINTF - [Patrick Worley] -timing_071010: modified gptl.c to remove the 'inline' specification unless the - CPP token C99 is defined. - [Patrick Worley] -timing_070810: added ChangeLog - updated to latest version of GPTL (from Jim Rosinski) - modified perf_mod.F90: - - added perf_outpe_num and perf_outpe_stride to perf_inparm - namelist to control which processes output timing data - - added perf_papi_enable to perf_inparm namelist to enable - PAPI counters - - added papi_inparm namelist and papi_ctr1,2,3,4 namelist - parameters to specify PAPI counters - [Patrick Worley, Jim Rosinski] -timing_070525: bug fix in gptl.c - - unitialized pointer, testing for null pter - before traversing - [Patrick Worley] -timing_070328: modified perf_mod.F90 - - deleted HIDE_MPI cpp token - [Erik Kluzek] -timing_070327: bug fixes in gptl.c - - testing for null pters before traversing - links; added missing type declaration to GPTLallocate for sum - bug fixes in perf_mod.F90 - - fixed OMP-related logic, modified settings reporting, - modified to work when namelist input is - missing; moved timer depth logic back into gptl.c - [Patrick Worley] -timing_070308: added perf_mod.F90 - - defines all t_xxx entry points - calling gptlxxx directly - and removing all external gptlxxx dependencies, - added detail option as an alternative way to disable - event timing, added runtime selection of timing_disable, - perf_timer, timer_depth_limit, timing_detail_limit, - timing_barrier, perf_single_file via namelist parameters - modified f_wrappers.c - - replaced all t_xxx entry points with gptlxxx entry points, - added new gptlxxx entry points, deleted _fcd support - modified gptl.c - - deleted DISABLE_TIMERS cpp token, modified GPTLpr call - and logic to move some of support for concatenating timing - output into a single file to perf_mod.F90 - modified gptl.h - - exposed gptlxxx entry points and to add support for choice - of GPTL timer - modified gptl.inc - - removed t_xxx entry points and expose gptlxxx entry points - [Patrick Worley] -timing_061207: modified gptl.c - - improved event output ordering - [Jim Edwards] -timing_061124: modified gptl.c - - modified GPTLpr to add option to concatenate - all timing data in a single output file, added GPTL_enable - and GPTL_disable as runtime control of event timing, - process 0-only reporting of timing options - unless DEBUG - cpp token defined - modified gptl.h - - redefined GPTLpr parameters - modified f_wrappers.c - - added t_enablef and t_disablef to call GPTL_enable and - GPTL_disable, added t_pr_onef, added string.h include - bug fix in f_wrappers.c - - changed character string size declaration from int to size_t - bug fix in gptl_papi.c - - modified error message - from Jim Edwards - modified private.h - - increased maximum event name length - [Patrick Worley] -timing_061028: modified f_wrappers.c - - deleted dependency on cfort.h - [Patrick Worley] -timing_060524: modified f_wrappers.c - - added support for CRAY cpp token and fixed routine - type declarations - [Patrick Worley] -timing_051212: original subversion version - - see CAM ChangeLog for earlier history diff --git a/src/externals/pio1/timing/GPTLget_memusage.c b/src/externals/pio1/timing/GPTLget_memusage.c deleted file mode 100644 index 4b0d138b2b6..00000000000 --- a/src/externals/pio1/timing/GPTLget_memusage.c +++ /dev/null @@ -1,160 +0,0 @@ -/* -** $Id: get_memusage.c,v 1.10 2010-11-09 19:08:53 rosinski Exp $ -** -** Author: Jim Rosinski -** Credit to Chuck Bardeen for MACOS section (__APPLE__ ifdef) -** -** get_memusage: -** -** Designed to be called from Fortran, returns information about memory -** usage in each of 5 input int* args. On Linux read from the /proc -** filesystem because getrusage() returns placebos (zeros). Return -1 for -** values which are unavailable or ambiguous on a particular architecture. -** -** Return value: 0 = success -** -1 = failure -*/ - -#include -#include "gptl.h" /* additional cpp defs and function prototypes */ - -/* _AIX is automatically defined when using the AIX C compilers */ -#ifdef _AIX -#include -#endif - -#ifdef IRIX64 -#include -#endif - -#ifdef HAVE_SLASHPROC - -#include -#include -#include -#include - -#elif (defined __APPLE__) - -#include -#include -#include - -#endif - -#ifdef BGP - -#include -#include -#include -#include -#define Personality _BGP_Personality_t - -#endif - - -int GPTLget_memusage (int *size, int *rss, int *share, int *text, int *datastack) -{ -#ifdef BGP - - long long alloc; - struct mallinfo m; - Personality pers; - - long long total; - int node_config; - - /* memory available */ - Kernel_GetPersonality(&pers, sizeof(pers)); - total = BGP_Personality_DDRSizeMB(&pers); - - node_config = BGP_Personality_processConfig(&pers); - if (node_config == _BGP_PERS_PROCESSCONFIG_VNM) total /= 4; - else if (node_config == _BGP_PERS_PROCESSCONFIG_2x2) total /= 2; - total *= 1024*1024; - - *size = total; - - /* total memory used - heap only (not static memory)*/ - - m = mallinfo(); - alloc = m.hblkhd + m.uordblks; - - *rss = alloc; - *share = -1; - *text = -1; - *datastack = -1; - - -#elif (defined HAVE_SLASHPROC) - FILE *fd; /* file descriptor for fopen */ - int pid; /* process id */ - static char *head = "/proc/"; /* part of path */ - static char *tail = "/statm"; /* part of path */ - char file[19]; /* full path to file in /proc */ - int dum; /* placeholder for unused return arguments */ - int ret; /* function return value */ - - /* - ** The file we want to open is /proc//statm - */ - - pid = (int) getpid (); - if (pid > 999999) { - fprintf (stderr, "get_memusage: pid %d is too large\n", pid); - return -1; - } - - sprintf (file, "%s%d%s", head, pid, tail); - if ((fd = fopen (file, "r")) < 0) { - fprintf (stderr, "get_memusage: bad attempt to open %s\n", file); - return -1; - } - - /* - ** Read the desired data from the /proc filesystem directly into the output - ** arguments, close the file and return. - */ - - ret = fscanf (fd, "%d %d %d %d %d %d %d", - size, rss, share, text, datastack, &dum, &dum); - ret = fclose (fd); - return 0; - -#elif (defined __APPLE__) - - FILE *fd; - char cmd[60]; - int pid = (int) getpid (); - - sprintf (cmd, "ps -o vsz -o rss -o tsiz -p %d | grep -v RSS", pid); - fd = popen (cmd, "r"); - - if (fd) { - fscanf (fd, "%d %d %d", size, rss, text); - *share = -1; - *datastack = -1; - (void) pclose (fd); - } - - return 0; - -#else - - struct rusage usage; /* structure filled in by getrusage */ - - if (getrusage (RUSAGE_SELF, &usage) < 0) - return -1; - - *size = -1; - *rss = usage.ru_maxrss; - *share = -1; - *text = -1; - *datastack = -1; -#ifdef IRIX64 - *datastack = usage.ru_idrss + usage.ru_isrss; -#endif - return 0; - -#endif -} diff --git a/src/externals/pio1/timing/GPTLprint_memusage.c b/src/externals/pio1/timing/GPTLprint_memusage.c deleted file mode 100644 index a185d61100f..00000000000 --- a/src/externals/pio1/timing/GPTLprint_memusage.c +++ /dev/null @@ -1,120 +0,0 @@ -/* -** $Id: print_memusage.c,v 1.13 2010-11-09 19:08:54 rosinski Exp $ -** -** Author: Jim Rosinski -** -** print_memusage: -** -** Prints info about memory usage of this process by calling get_memusage. -** -** Return value: 0 = success -** -1 = failure -*/ - -#include "gptl.h" -#include -#include -#include - -static int nearest_powerof2 (const int); -static int convert_to_mb = 1; /* true */ - -int GPTLprint_memusage (const char *str) -{ - int size, size2; /* process size (returned from OS) */ - int rss, rss2; /* resident set size (returned from OS) */ - int share, share2; /* shared data segment size (returned from OS) */ - int text, text2; /* text segment size (returned from OS) */ - int datastack, datastack2; /* data/stack size (returned from OS) */ - static int bytesperblock = -1; /* convert to bytes (init to invalid) */ - static const int nbytes = 1024*1024*10; /* allocate 10 MB */ - static double blockstomb; /* convert blocks to MB */ - void *space; /* allocated space */ - - if (GPTLget_memusage (&size, &rss, &share, &text, &datastack) < 0) - return -1; - -#if (defined HAVE_SLASHPROC || defined __APPLE__) - /* - ** Determine size in bytes of memory usage info presented by the OS. Method: allocate a - ** known amount of memory and see how much bigger the process becomes. - */ - - if (convert_to_mb && bytesperblock == -1 && (space = malloc (nbytes))) { - memset (space, 0, nbytes); /* ensure the space is really allocated */ - if (GPTLget_memusage (&size2, &rss2, &share2, &text2, &datastack2) == 0) { - if (size2 > size) { - /* - ** Estimate bytes per block, then refine to nearest power of 2. - ** The assumption is that the OS presents memory usage info in - ** units that are a power of 2. - */ - bytesperblock = (int) ((nbytes / (double) (size2 - size)) + 0.5); - bytesperblock = nearest_powerof2 (bytesperblock); - blockstomb = bytesperblock / (1024.*1024.); - printf ("GPTLprint_memusage: Using bytesperblock=%d\n", bytesperblock); - } - } - free (space); - } - - if (bytesperblock > 0) - printf ("%s size=%.1f MB rss=%.1f MB share=%.1f MB text=%.1f MB datastack=%.1f MB\n", - str, size*blockstomb, rss*blockstomb, share*blockstomb, - text*blockstomb, datastack*blockstomb); - else - printf ("%s size=%d rss=%d share=%d text=%d datastack=%d\n", - str, size, rss, share, text, datastack); - -#else - - /* - ** Use max rss as returned by getrusage. If someone knows how to - ** get the process size under AIX please tell me. - */ - - bytesperblock = 1024; - blockstomb = bytesperblock / (1024.*1024.); - if (convert_to_mb) - printf ("%s max rss=%.1f MB\n", str, rss*blockstomb); - else - printf ("%s max rss=%d\n", str, rss); -#endif - - return 0; -} - -/* -** nearest_powerof2: -** Determine nearest integer which is a power of 2. -** Note: algorithm can't use anything that requires -lm because this is a library, -** and we don't want to burden the user with having to add extra libraries to the -** link line. -** -** Input arguments: -** val: input integer -** -** Return value: nearest integer to val which is a power of 2 -*/ - -static int nearest_powerof2 (const int val) -{ - int lower; /* power of 2 which is just less than val */ - int higher; /* power of 2 which is just more than val */ - int delta1; /* difference between val and lower */ - int delta2; /* difference between val and higher */ - - if (val < 2) - return 0; - - for (higher = 1; higher < val; higher *= 2) - lower = higher; - - delta1 = val - lower; - delta2 = higher - val; - - if (delta1 < delta2) - return lower; - else - return higher; -} diff --git a/src/externals/pio1/timing/GPTLutil.c b/src/externals/pio1/timing/GPTLutil.c deleted file mode 100644 index f882834d2a1..00000000000 --- a/src/externals/pio1/timing/GPTLutil.c +++ /dev/null @@ -1,82 +0,0 @@ -/* -** $Id: util.c,v 1.13 2010-01-01 01:34:07 rosinski Exp $ -*/ - -#include -#include -#include - -#include "private.h" - -static bool abort_on_error = false; /* flag says to abort on any error */ -static int max_error = 500; /* max number of error print msgs */ - -/* -** GPTLerror: error return routine to print a message and return a failure -** value. -** -** Input arguments: -** fmt: format string -** variable list of additional arguments for vfprintf -** -** Return value: -1 (failure) -*/ - -int GPTLerror (const char *fmt, ...) -{ - va_list args; - - va_start (args, fmt); - static int num_error = 0; - - if (fmt != NULL && num_error < max_error) { -#ifndef NO_VPRINTF - (void) vfprintf (stderr, fmt, args); -#else - (void) fprintf (stderr, "GPTLerror: no vfprintf: fmt is %s\n", fmt); -#endif - if (num_error == max_error) - (void) fprintf (stderr, "Truncating further error print now after %d msgs", - num_error); - ++num_error; - } - - va_end (args); - - if (abort_on_error) - exit (-1); - - return (-1); -} - -/* -** GPTLset_abort_on_error: User-visible routine to set abort_on_error flag -** -** Input arguments: -** val: true (abort on error) or false (don't) -*/ - -void GPTLset_abort_on_error (bool val) -{ - abort_on_error = val; -} - -/* -** GPTLallocate: wrapper utility for malloc -** -** Input arguments: -** nbytes: size to allocate -** -** Return value: pointer to the new space (or NULL) -*/ - -void *GPTLallocate (const int nbytes) -{ - void *ptr; - - if ( nbytes <= 0 || ! (ptr = malloc (nbytes))) - (void) GPTLerror ("GPTLallocate: malloc failed for %d bytes\n", nbytes); - - return ptr; -} - diff --git a/src/externals/pio1/timing/Makefile b/src/externals/pio1/timing/Makefile deleted file mode 100644 index 37f1dc1198c..00000000000 --- a/src/externals/pio1/timing/Makefile +++ /dev/null @@ -1,88 +0,0 @@ -# -# Defined externally in Makefile.arch for each platform -# -# INCLUDES -# LIBS -# MPICC -# MPIF90 -# COPTS -# FOPTS -# CFLAGS -# FFLAGS -# AWK -# AR - -RM=/bin/rm -f -MODSUF = .mod -CPPSUF = .f90 - - -ifeq (,$(PIOARCH)) - PIOARCH=conf -endif -include ../pio/Makefile.$(PIOARCH) -export PIOARCH - -SRCS_C = GPTLget_memusage.c \ - GPTLprint_memusage.c \ - GPTLutil.c \ - f_wrappers.c \ - gptl.c \ - gptl_papi.c \ - threadutil.c \ - -SRCS_F90 = perf_mod.F90 \ - perf_utils.F90 - -OBJS= $(SRCS_C:.c=.o) \ - $(SRCS_F90:.F90=.o) - - -MODFILES := $(SRCS_F90:.F90=$(MODSUF)) - -PERL = /usr/bin/perl - -LIB= libtiming.a - -all: $(LIB) - - -# -# Suffix rules -# - -.SUFFIXES: -.SUFFIXES: .o .c .F90 $(DEPSUF) - - - -ifeq ($(EXPLICIT_CPP),yes) -SRCS_CPP= $(SRCS_F90:.F90=$(CPPSUF)) -.F90.o: - @if [ -w $*.f90 ] ; then echo "ERROR: file $*.f90 is writable - the .f90 suffix is reserved for temporary cpp output" ; exit 1; fi - $(RM) $*.f90 - $(CPP) $(CPPDEFS) $(CFLAGS) $(COPTS) $(INCLUDES) -o $*.f90 $*.F90 - chmod a-w $*.f90 - $(MPIFC) -c $(FFLAGS) $(FOPTS) $(INCLUDES) $*.f90 -else -SRCS_CPP= -.F90.o: - $(MPIFC) -c $(FFLAGS) $(FOPTS) $(INCLUDES) $*.F90 -endif - -.c.o: - $(MPICC) -c $(CFLAGS) $(CPPDEFS) $(COPTS) $(INCLUDES) $*.c - - -$(LIB): $(OBJS) - $(RM) $@ - $(AR) $(ARFLAGS) $@ $(OBJS) - - -clean: - $(RM) $(LIB) $(OBJS) $(MODFILES) $(SRCS_CPP) - - -perf_mod.o: perf_utils.o -perf_mod.o: gptl.inc -perf_utils.o: gptl.inc diff --git a/src/externals/pio1/timing/XXXdotF/perf_mod.F b/src/externals/pio1/timing/XXXdotF/perf_mod.F deleted file mode 100644 index 711705b1413..00000000000 --- a/src/externals/pio1/timing/XXXdotF/perf_mod.F +++ /dev/null @@ -1,1421 +0,0 @@ - module perf_mod -C----------------------------------------------------------------------- -C -C Purpose: This module is responsible for controlling the performance -C timer logic. -C -C Author: P. Worley, January 2007 -C -C $Id$ -C -C----------------------------------------------------------------------- - -C----------------------------------------------------------------------- -C- Uses ---------------------------------------------------------------- -C----------------------------------------------------------------------- - -#ifndef USE_CSM_SHARE - use perf_utils -#else - use shr_sys_mod, only: shr_sys_abort - use shr_kind_mod, only: shr_kind_cl, shr_kind_r8 - use shr_mpi_mod, only: shr_mpi_barrier, shr_mpi_bcast - use shr_file_mod, only: shr_file_getUnit, shr_file_freeUnit - use namelist_utils, only: find_group_name -#endif - -C----------------------------------------------------------------------- -C- module boilerplate -------------------------------------------------- -C----------------------------------------------------------------------- - implicit none -C Make the default access private - private - save - -C----------------------------------------------------------------------- -C Public interfaces ---------------------------------------------------- -C----------------------------------------------------------------------- - public t_initf - public t_setLogUnit - public t_getLogUnit - public t_profile_onf - public t_barrier_onf - public t_single_filef - public t_stampf - public t_startf - public t_stopf - public t_enablef - public t_disablef - public t_adj_detailf - public t_barrierf - public t_prf - public t_finalizef - -C----------------------------------------------------------------------- -C Private interfaces (local) ------------------------------------------- -C----------------------------------------------------------------------- - private perf_defaultopts - private perf_setopts - private papi_defaultopts - private papi_setopts - -C----------------------------------------------------------------------- -C- include statements -------------------------------------------------- -C----------------------------------------------------------------------- -#include -#include "gptl.inc" - -C----------------------------------------------------------------------- -C Private data --------------------------------------------------------- -C----------------------------------------------------------------------- - -C !---------------------------------------------------------------------------- -C ! perf_mod options -C !---------------------------------------------------------------------------- -C unit number for log output - integer, parameter :: def_p_logunit = 6 - integer, private :: p_logunit = def_p_logunit - -C flag indicating whether timing library has been initialized - logical, parameter :: def_timing_initialized = .false. - logical, private :: timing_initialized = def_timing_initialized - -C flag indicating whether timers are disabled - logical, parameter :: def_timing_disable = .false. - logical, private :: timing_disable = def_timing_disable - -C flag indicating whether the mpi_barrier in t_barrierf should be called - logical, parameter :: def_timing_barrier = .false. - logical, private :: timing_barrier = def_timing_barrier - -C integer indicating maximum number of levels of timer nesting - integer, parameter :: def_timer_depth_limit = 99999 - integer, private :: timer_depth_limit = def_timer_depth_limit - -C integer indicating maximum detail level to profile - integer, parameter :: def_timing_detail_limit = 1 - integer, private :: timing_detail_limit = def_timer_depth_limit - -C integer indicating depth of t_disablef calls - integer, parameter :: init_timing_disable_depth = 0 - integer, private :: timing_disable_depth = - & init_timing_disable_depth - -C current timing detail level - integer, parameter :: init_timing_detail = 0 - integer, private :: cur_timing_detail = init_timing_detail - -C flag indicating whether the performance timer output should be written -C to a single file (per component communicator) or to a separate file -C for each process - logical, parameter :: def_perf_single_file = .false. - logical, private :: perf_single_file = def_perf_single_file - -C maximum number of processes writing out -C timing data (for this component communicator) - integer, parameter :: def_perf_outpe_num = -1 - integer, private :: perf_outpe_num = def_perf_outpe_num - -C separation between process ids for processes that are writing out -C timing data (for this component communicator) - integer, parameter :: def_perf_outpe_stride = 1 - integer, private :: perf_outpe_stride = def_perf_outpe_stride - -C collect and print out global performance statistics -C (for this component communicator) - logical, parameter :: def_perf_global_stats = .false. - logical, private :: perf_global_stats = def_perf_global_stats - -C integer indicating which timer to use (as defined in gptl.inc) -#ifdef UNICOSMP - integer, parameter :: def_perf_timer = GPTLrtc -#else - integer, parameter :: def_perf_timer = GPTLmpiwtime -#endif - integer, private :: perf_timer = def_perf_timer - -C flag indicating whether the PAPI namelist -C should be read and HW performance counters -C used in profiling -#ifdef HAVE_PAPI - logical, parameter :: def_perf_papi_enable = .false. -#else - logical, parameter :: def_perf_papi_enable = .false. -#endif - logical, private :: perf_papi_enable = def_perf_papi_enable - -C PAPI counter ids - integer, parameter :: PAPI_NULL = -1 - - integer, parameter :: def_papi_ctr1 = PAPI_NULL - integer, private :: papi_ctr1 = def_papi_ctr1 - - integer, parameter :: def_papi_ctr2 = PAPI_NULL - integer, private :: papi_ctr2 = def_papi_ctr2 - - integer, parameter :: def_papi_ctr3 = PAPI_NULL - integer, private :: papi_ctr3 = def_papi_ctr3 - - integer, parameter :: def_papi_ctr4 = PAPI_NULL - integer, private :: papi_ctr4 = def_papi_ctr4 - -C======================================================================= - contains -C======================================================================= - -C -C======================================================================== -C - subroutine t_getLogUnit(LogUnit) -C----------------------------------------------------------------------- -C Purpose: Get log unit number. -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C -C Unit number for log output - integer(SHR_KIND_IN), intent(OUT) :: LogUnit -C----------------------------------------------------------------------- - - LogUnit = p_logunit - - return - end subroutine t_getLogUnit -C -C======================================================================== -C - subroutine t_setLogUnit(LogUnit) -C----------------------------------------------------------------------- -C Purpose: Set log unit number. -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C -C Unit number for log output - integer(SHR_KIND_IN), intent(IN) :: LogUnit -C----------------------------------------------------------------------- - - p_logunit = LogUnit -#ifndef USE_CSM_SHARE - call perfutils_setunit(p_logunit) -#endif - - return - end subroutine t_setLogUnit -C -C======================================================================== -C - subroutine perf_defaultopts(timing_disable_out, - & perf_timer_out, - & timer_depth_limit_out, - & timing_detail_limit_out, - & timing_barrier_out, - & perf_outpe_num_out, - & perf_outpe_stride_out, - & perf_single_file_out, - & perf_global_stats_out, - & perf_papi_enable_out ) -C----------------------------------------------------------------------- -C Purpose: Return default runtime options -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C timers disable/enable option - logical, intent(out), optional :: timing_disable_out -C performance timer option - integer, intent(out), optional :: perf_timer_out -C timer depth limit option - integer, intent(out), optional :: timer_depth_limit_out -C timer detail limit option - integer, intent(out), optional :: timing_detail_limit_out -C timing barrier enable/disable option - logical, intent(out), optional :: timing_barrier_out -C number of processes writing out timing data - integer, intent(out), optional :: perf_outpe_num_out -C separation between process ids for processes that are writing out timing data - integer, intent(out), optional :: perf_outpe_stride_out -C timing single / multple output file option - logical, intent(out), optional :: perf_single_file_out -C collect and output global performance statistics option - logical, intent(out), optional :: perf_global_stats_out -C calling PAPI to read HW performance counters option - logical, intent(out), optional :: perf_papi_enable_out -C----------------------------------------------------------------------- - if ( present(timing_disable_out) ) then - timing_disable_out = def_timing_disable - endif - if ( present(perf_timer_out) ) then - perf_timer_out = def_perf_timer - endif - if ( present(timer_depth_limit_out) ) then - timer_depth_limit_out = def_timer_depth_limit - endif - if ( present(timing_detail_limit_out) ) then - timing_detail_limit_out = def_timing_detail_limit - endif - if ( present(timing_barrier_out) ) then - timing_barrier_out = def_timing_barrier - endif - if ( present(perf_outpe_num_out) ) then - perf_outpe_num_out = def_perf_outpe_num - endif - if ( present(perf_outpe_stride_out) ) then - perf_outpe_stride_out = def_perf_outpe_stride - endif - if ( present(perf_single_file_out) ) then - perf_single_file_out = def_perf_single_file - endif - if ( present(perf_global_stats_out) ) then - perf_global_stats_out = def_perf_global_stats - endif - if ( present(perf_papi_enable_out) ) then - perf_papi_enable_out = def_perf_papi_enable - endif -C - return - end subroutine perf_defaultopts -C -C======================================================================== -C - subroutine perf_setopts(mastertask, - & LogPrint, - & timing_disable_in, - & perf_timer_in, - & timer_depth_limit_in, - & timing_detail_limit_in, - & timing_barrier_in, - & perf_outpe_num_in, - & perf_outpe_stride_in, - & perf_single_file_in, - & perf_global_stats_in, - & perf_papi_enable_in ) -C----------------------------------------------------------------------- -C Purpose: Set runtime options -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments---------------------------- -C -C master process? - logical, intent(in) :: mastertask -C Print out to log file? - logical, intent(IN) :: LogPrint -C timers disable/enable option - logical, intent(in), optional :: timing_disable_in -C performance timer option - integer, intent(in), optional :: perf_timer_in -C timer depth limit option - integer, intent(in), optional :: timer_depth_limit_in -C timer detail limit option - integer, intent(in), optional :: timing_detail_limit_in -C timing barrier enable/disable option - logical, intent(in), optional :: timing_barrier_in -C number of processes writing out timing data - integer, intent(in), optional :: perf_outpe_num_in -C separation between process ids for processes that are writing out timing data - integer, intent(in), optional :: perf_outpe_stride_in -C timing single / multple output file option - logical, intent(in), optional :: perf_single_file_in -C collect and output global performance statistics option - logical, intent(in), optional :: perf_global_stats_in -C calling PAPI to read HW performance counters option - logical, intent(in), optional :: perf_papi_enable_in -C -C---------------------------Local workspace----------------------------- -C -C error return - integer ierr -C----------------------------------------------------------------------- - if ( .not. timing_initialized ) then - - if ( present(timing_disable_in) ) then - timing_disable = timing_disable_in - if (timing_disable) then - ierr = GPTLdisable() - else - ierr = GPTLenable() - endif - endif - if ( present(perf_timer_in) ) then - if ((perf_timer_in .eq. GPTLgettimeofday) .or. - & (perf_timer_in .eq. GPTLnanotime) .or. - & (perf_timer_in .eq. GPTLrtc) .or. - & (perf_timer_in .eq. GPTLmpiwtime) .or. - & (perf_timer_in .eq. GPTLclockgettime) .or. - & (perf_timer_in .eq. GPTLpapitime)) then - perf_timer = perf_timer_in - else - if (mastertask) then - write(p_logunit,*) - & 'PERF_SETOPTS: illegal timer requested=', - & perf_timer_in, '. Request ignored.' - endif - endif - endif - if ( present(timer_depth_limit_in) ) then - timer_depth_limit = timer_depth_limit_in - endif - if ( present(timing_detail_limit_in) ) then - timing_detail_limit = timing_detail_limit_in - endif - if ( present(timing_barrier_in) ) then - timing_barrier = timing_barrier_in - endif - if ( present(perf_outpe_num_in) ) then - perf_outpe_num = perf_outpe_num_in - endif - if ( present(perf_outpe_stride_in) ) then - perf_outpe_stride = perf_outpe_stride_in - endif - if ( present(perf_single_file_in) ) then - perf_single_file = perf_single_file_in - endif - if ( present(perf_global_stats_in) ) then - perf_global_stats = perf_global_stats_in - endif - if ( present(perf_papi_enable_in) ) then -#ifdef HAVE_PAPI - perf_papi_enable = perf_papi_enable_in -#else - if (perf_papi_enable_in) then - if (mastertask) then - write(p_logunit,*) - & 'PERF_SETOPTS: PAPI library not linked in. ', - & 'Request to enable PAPI ignored.' - endif - endif - perf_papi_enable = .false. -#endif - endif -C - if (mastertask .and. LogPrint) then - write(p_logunit,*) '(t_initf) Using profile_disable=', - & timing_disable, ' profile_timer=', perf_timer - write(p_logunit,*) '(t_initf) profile_depth_limit=', - & timer_depth_limit, ' profile_detail_limit=', - & timing_detail_limit - write(p_logunit,*) '(t_initf) profile_barrier=', - & timing_barrier, ' profile_outpe_num=', - & perf_outpe_num - write(p_logunit,*) '(t_initf) profile_outpe_stride=', - & perf_outpe_stride , ' profile_single_file=', - & perf_single_file - write(p_logunit,*) '(t_initf) profile_global_stats=', - & perf_global_stats , ' profile_papi_enable=', - & perf_papi_enable - endif -C -#ifdef DEBUG - else - write(p_logunit,*) - & 'PERF_SETOPTS: timing library already initialized.', - & ' Request ignored.' -#endif - endif -C - return - end subroutine perf_setopts - -C -C======================================================================== -C - subroutine papi_defaultopts(papi_ctr1_out, - & papi_ctr2_out, - & papi_ctr3_out, - & papi_ctr4_out ) -C----------------------------------------------------------------------- -C Purpose: Return default runtime PAPI counter options -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C PAPI counter option #1 - integer, intent(out), optional :: papi_ctr1_out -C PAPI counter option #2 - integer, intent(out), optional :: papi_ctr2_out -C PAPI counter option #3 - integer, intent(out), optional :: papi_ctr3_out -C PAPI counter option #4 - integer, intent(out), optional :: papi_ctr4_out -C----------------------------------------------------------------------- - if ( present(papi_ctr1_out) ) then - papi_ctr1_out = def_papi_ctr1 - endif - if ( present(papi_ctr2_out) ) then - papi_ctr2_out = def_papi_ctr2 - endif - if ( present(papi_ctr3_out) ) then - papi_ctr3_out = def_papi_ctr3 - endif - if ( present(papi_ctr4_out) ) then - papi_ctr4_out = def_papi_ctr4 - endif -C - return - end subroutine papi_defaultopts -C -C======================================================================== -C - subroutine papi_setopts(papi_ctr1_in, - & papi_ctr2_in, - & papi_ctr3_in, - & papi_ctr4_in ) -C----------------------------------------------------------------------- -C Purpose: Set runtime PAPI counter options -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments---------------------------- -C -C performance counter option - integer, intent(in), optional :: papi_ctr1_in -C performance counter option - integer, intent(in), optional :: papi_ctr2_in -C performance counter option - integer, intent(in), optional :: papi_ctr3_in -C performance counter option - integer, intent(in), optional :: papi_ctr4_in -C -C---------------------------Local workspace----------------------------- -C -C error return - integer ierr -C----------------------------------------------------------------------- - if ( .not. timing_initialized ) then - - if ( present(papi_ctr1_in) ) then - if (papi_ctr1_in < 0) then - papi_ctr1 = papi_ctr1_in - else - papi_ctr1 = PAPI_NULL - endif - endif - if ( present(papi_ctr2_in) ) then - if (papi_ctr2_in < 0) then - papi_ctr2 = papi_ctr2_in - else - papi_ctr2 = PAPI_NULL - endif - endif - if ( present(papi_ctr3_in) ) then - if (papi_ctr3_in < 0) then - papi_ctr3 = papi_ctr3_in - else - papi_ctr3 = PAPI_NULL - endif - endif - if ( present(papi_ctr4_in) ) then - if (papi_ctr4_in < 0) then - papi_ctr4 = papi_ctr4_in - else - papi_ctr4 = PAPI_NULL - endif - endif -C -#ifdef DEBUG - else - write(p_logunit,*) - & 'PAPI_SETOPTS: timing library already initialized.', - & 'Request ignored.' -#endif - endif -C - return - end subroutine papi_setopts -C -C======================================================================== -C - logical function t_profile_onf() -C----------------------------------------------------------------------- -C Purpose: Return flag indicating whether profiling is currently active. -C Part of workaround to implement FVbarrierclock before -C communicators exposed in Pilgrim. Does not check level of -C event nesting. -C Author: P. Worley -C----------------------------------------------------------------------- - - if ((.not. timing_initialized) .or. - & (timing_disable_depth > 0) .or. - & (cur_timing_detail > timing_detail_limit)) then - t_profile_onf = .false. - else - t_profile_onf = .true. - endif - - end function t_profile_onf -C -C======================================================================== -C - logical function t_barrier_onf() -C----------------------------------------------------------------------- -C Purpose: Return timing_barrier. Part of workaround to implement -C FVbarrierclock before communicators exposed in Pilgrim. -C Author: P. Worley -C----------------------------------------------------------------------- - - t_barrier_onf = timing_barrier - - end function t_barrier_onf -C -C======================================================================== -C - logical function t_single_filef() -C----------------------------------------------------------------------- -C Purpose: Return perf_single_file. Used to control output of other -C performance data, only spmdstats currently. -C Author: P. Worley -C----------------------------------------------------------------------- - - t_single_filef = perf_single_file - - end function t_single_filef -C -C======================================================================== -C - subroutine t_stampf(wall, usr, sys) -C----------------------------------------------------------------------- -C Purpose: Record wallclock, user, and system times (seconds). -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Output arguments----------------------------- -C -C wallclock time - real(shr_kind_r8), intent(out) :: wall -C user time - real(shr_kind_r8), intent(out) :: usr -C system time - real(shr_kind_r8), intent(out) :: sys -C -C---------------------------Local workspace----------------------------- -C -C GPTL error return - integer ierr -C -C----------------------------------------------------------------------- -C - if ((.not. timing_initialized) .or. - & (timing_disable_depth > 0)) then - wall = 0.0 - usr = 0.0 - sys = 0.0 - else - ierr = GPTLstamp(wall, usr, sys) - endif - - return - end subroutine t_stampf -C -C======================================================================== -C - subroutine t_startf(event) -C----------------------------------------------------------------------- -C Purpose: Start an event timer -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C -C performance timer event name - character(len=*), intent(in) :: event -C -C---------------------------Local workspace----------------------------- -C -C GPTL error return - integer ierr -C -C----------------------------------------------------------------------- -C - if ((timing_initialized) .and. - & (timing_disable_depth .eq. 0) .and. - & (cur_timing_detail .le. timing_detail_limit)) then - - ierr = GPTLstart(event) - - endif - - return - end subroutine t_startf -C -C======================================================================== -C - subroutine t_stopf(event) -C----------------------------------------------------------------------- -C Purpose: Stop an event timer -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C -C performance timer event name - character(len=*), intent(in) :: event -C -C---------------------------Local workspace----------------------------- -C -C GPTL error return - integer ierr -C -C----------------------------------------------------------------------- -C - if ((timing_initialized) .and. - & (timing_disable_depth .eq. 0) .and. - & (cur_timing_detail .le. timing_detail_limit)) then - - ierr = GPTLstop(event) - - endif - - return - end subroutine t_stopf -C -C======================================================================== -C - subroutine t_enablef() -C----------------------------------------------------------------------- -C Purpose: Enable t_startf, t_stopf, t_stampf, and t_barrierf. Ignored -C in threaded regions. -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Local workspace----------------------------- -C -C GPTL error return - integer ierr -C -C---------------------------Externals----------------------------------- -C -#if ( defined _OPENMP ) - logical omp_in_parallel - external omp_in_parallel -#endif -C -C----------------------------------------------------------------------- -C - if (.not. timing_initialized) return - -#if ( defined _OPENMP ) - if (omp_in_parallel()) return -#endif - - if (timing_disable_depth > 0) then - if (timing_disable_depth .eq. 1) then - ierr = GPTLenable() - endif - timing_disable_depth = timing_disable_depth - 1 - endif - - return - end subroutine t_enablef -C -C======================================================================== -C - subroutine t_disablef() -C----------------------------------------------------------------------- -C Purpose: Disable t_startf, t_stopf, t_stampf, and t_barrierf. Ignored -C in threaded regions. -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Local workspace----------------------------- -C -C GPTL error return - integer ierr -C -C---------------------------Externals----------------------------------- -C -#if ( defined _OPENMP ) - logical omp_in_parallel - external omp_in_parallel -#endif -C -C----------------------------------------------------------------------- -C - if (.not. timing_initialized) return - -#if ( defined _OPENMP ) - if (omp_in_parallel()) return -#endif - - if (timing_disable_depth .eq. 0) then - ierr = GPTLdisable() - endif - timing_disable_depth = timing_disable_depth + 1 - - return - end subroutine t_disablef -C -C======================================================================== -C - subroutine t_adj_detailf(detail_adjustment) -C----------------------------------------------------------------------- -C Purpose: Modify current detail level. Ignored in threaded regions. -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C -C user defined increase or decrease in detail level - integer, intent(in) :: detail_adjustment -C -C---------------------------Externals----------------------------------- -C -#if ( defined _OPENMP ) - logical omp_in_parallel - external omp_in_parallel -#endif -C -C----------------------------------------------------------------------- -C - if (.not. timing_initialized) return - -#if ( defined _OPENMP ) - if (omp_in_parallel()) return -#endif - - cur_timing_detail = cur_timing_detail + detail_adjustment - - return - end subroutine t_adj_detailf -C -C======================================================================== -C - subroutine t_barrierf(event, mpicom) -C----------------------------------------------------------------------- -C Purpose: Call (and time) mpi_barrier. Ignored inside OpenMP -C threaded regions. Note that barrier executed even if -C event not recorded because of level of timer event nesting. -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C mpi communicator id - integer, intent(in), optional :: mpicom -C performance timer event name - character(len=*), intent(in), optional :: event -C -C---------------------------Local workspace----------------------------- -C -C GPTL error return - integer ierr -C -C---------------------------Externals----------------------------------- -C -#if ( defined _OPENMP ) - logical omp_in_parallel - external omp_in_parallel -#endif -C -C----------------------------------------------------------------------- -C -#if ( defined _OPENMP ) - if (omp_in_parallel()) return -#endif - if ((timing_initialized) .and. - & (timing_disable_depth .eq. 0) .and. - & (cur_timing_detail .le. timing_detail_limit)) then - - if (timing_barrier) then - - if ( present (event) ) then - ierr = GPTLstart(event) - endif - - if ( present (mpicom) ) then - call shr_mpi_barrier(mpicom, - & 'T_BARRIERF: bad mpi communicator') - else - call shr_mpi_barrier(MPI_COMM_WORLD, - & 'T_BARRIERF: bad mpi communicator') - endif - - if ( present (event) ) then - ierr = GPTLstop(event) - endif - - endif - - endif - - return - end subroutine t_barrierf -C -C======================================================================== -C - subroutine t_prf(filename, mpicom, num_outpe, stride_outpe) -C----------------------------------------------------------------------- -C Purpose: Write out performance timer data -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C -C performance timer output file name - character(len=*), intent(in), optional :: filename -C mpi communicator id - integer, intent(in), optional :: mpicom -C maximum number of processes writing out timing data - integer, intent(in), optional :: num_outpe -C separation between process ids for processes writing out data - integer, intent(in), optional :: stride_outpe -C -C---------------------------Local workspace----------------------------- -C -C loop index - integer i -C local copy of MPI communicator - integer mpicom2 -C communicator local process id - integer me -C local communicator group size - integer npes -C global process id - integer gme -C MPI error return - integer ierr -C max number of processes writing out timing data - integer outpe_num -C separation between process ids for processes writing out timing data - integer outpe_stride -C max process id for processes writing out timing data - integer max_outpe -C send/recv variable for single output file logic - integer signal -C string length - integer str_length -C file unit number - integer unitn -C length of filename suffix - integer cme_adj -C Status of message - integer status (MPI_STATUS_SIZE) -C string representation of process id - character(len=7) cme -C timing output filename - character(len=SHR_KIND_CX+14) fname -C----------------------------------------------------------------------- -C - if (.not. timing_initialized) return - - call t_startf("t_prf") -C$OMP MASTER - call mpi_comm_rank(MPI_COMM_WORLD, gme, ierr) - if ( present(mpicom) ) then - mpicom2 = mpicom - call mpi_comm_size(mpicom2, npes, ierr) - if (ierr .eq. MPI_ERR_COMM) then - call shr_sys_abort('T_PRF: bad mpi communicator') - endif - call mpi_comm_rank(mpicom2, me, ierr) - else - call mpi_comm_size(MPI_COMM_WORLD, npes, ierr) - mpicom2 = MPI_COMM_WORLD - me = gme - endif - - do i=1,SHR_KIND_CX+14 - fname(i:i) = " " - enddo - - unitn = shr_file_getUnit() - -C Determine which processes are writing out timing data - if (present(num_outpe)) then - if (num_outpe < 0) then - outpe_num = npes - else - outpe_num = num_outpe - endif - else - if (perf_outpe_num < 0) then - outpe_num = npes - else - outpe_num = perf_outpe_num - endif - endif -C - if (present(stride_outpe)) then - if (stride_outpe < 1) then - outpe_stride = 1 - else - outpe_stride = stride_outpe - endif - else - if (perf_outpe_stride < 1) then - outpe_stride = 1 - else - outpe_stride = perf_outpe_stride - endif - endif -C - max_outpe = min(outpe_num*outpe_stride, npes) - 1 - -C If a single timing output file, take turns writing to it. - if (perf_single_file) then - - if ( present(filename) ) then - str_length = min(SHR_KIND_CX,len_trim(filename)) - fname(1:str_length) = filename(1:str_length) - else - fname(1:10) = "timing_all" - endif - - signal = 0 - if (me .eq. 0) then - - if (perf_global_stats) then - open( unitn, file=trim(fname), status='UNKNOWN' ) - write( unitn, 100) npes -100 format(/,"***** GLOBAL STATISTICS (",I6, - & " MPI TASKS) *****",/) - close( unitn ) - - ierr = GPTLpr_summary(mpicom2, 0, trim(fname)) - endif - - if (me .le. max_outpe) then - if (perf_global_stats) then - open( unitn, file=trim(fname), status='OLD', - & position='APPEND' ) - else - open( unitn, file=trim(fname), status='UNKNOWN' ) - endif - - write( unitn, 101) me, gme - 101 format(/,"************ PROCESS ",I6," (",I6, - & ") ************",/) - close( unitn ) - - ierr = GPTLpr_file(0, trim(fname)) - endif - - else - - if (perf_global_stats) then - ierr = GPTLpr_summary(mpicom2, 0, trim(fname)) - endif - - call mpi_recv (signal, 1, mpi_integer, me-1, me-1, mpicom2, - & status, ierr) - if (ierr /= mpi_success) then - write(p_logunit,*) 'T_PRF: mpi_recv failed ierr=',ierr - call shr_sys_abort() - end if - - if ((mod(me, outpe_stride).eq.0).and.(me.le.max_outpe)) then - open( unitn, file=trim(fname), status='OLD', - & position='APPEND' ) - write( unitn, 101) me, gme - close( unitn ) - - ierr = GPTLpr_file(0, trim(fname)) - endif - - endif - - if (me+1 < npes) - & call mpi_send (signal, 1, mpi_integer, me+1, me, mpicom2, ierr) - - else - - if (perf_global_stats) then - if ( present(filename) ) then - str_length = min(SHR_KIND_CX-6,len_trim(filename)) - fname(1:str_length) = filename(1:str_length) - else - str_length = 6 - fname(1:10) = "timing" - endif - fname(str_length+1:str_length+6) = '_stats' - - if (me .eq. 0) then - open( unitn, file=trim(fname), status='UNKNOWN' ) - write( unitn, 100) npes - close( unitn ) - endif - - ierr = GPTLpr_summary(mpicom2, 0, trim(fname)) - fname(str_length+1:str_length+6) = ' ' - endif - - if ((mod(me, outpe_stride).eq.0).and.(me.le.max_outpe)) then - if (npes .le. 10) then - write(cme,'(i1.1)') me - cme_adj = 2 - elseif (npes .le. 100) then - write(cme,'(i2.2)') me - cme_adj = 3 - elseif (npes .le. 1000) then - write(cme,'(i3.3)') me - cme_adj = 4 - elseif (npes .le. 10000) then - write(cme,'(i4.4)') me - cme_adj = 5 - elseif (npes .le. 100000) then - write(cme,'(i5.5)') me - cme_adj = 6 - else - write(cme,'(i6.6)') me - cme_adj = 7 - endif - - if ( present(filename) ) then - str_length = min(SHR_KIND_CX-cme_adj,len_trim(filename)) - fname(1:str_length) = filename(1:str_length) - else - str_length = 6 - fname(1:10) = "timing" - endif - fname(str_length+1:str_length+1) = '.' - fname(str_length+2:str_length+cme_adj) = cme - - open( unitn, file=trim(fname), status='UNKNOWN' ) - write( unitn, 101) me, gme - close( unitn ) - - ierr = GPTLpr_file(0, trim(fname)) - endif - - endif - - call shr_file_freeUnit( unitn ) -C$OMP END MASTER - call t_stopf("t_prf") - - return - end subroutine t_prf -C -C======================================================================== -C - subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, - & MasterTask) -C----------------------------------------------------------------------- -C Purpose: Set default values of runtime timing options -C before namelists prof_inparm and papi_inparm are read, -C read namelists (and broadcast, if SPMD), -C then initialize timing library. -C Author: P. Worley (based on shr_inputinfo_mod and runtime_opts) -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C -C Name-list filename - character(len=*), intent(IN) :: NLFilename -C If print out to log file - logical, optional, intent(IN) :: LogPrint -C Unit number for log output - integer, optional, intent(IN) :: LogUnit -C MPI communicator - integer, optional, intent(IN) :: mpicom -C If MPI master task - logical, optional, intent(IN) :: MasterTask -C -C---------------------------Local workspace----------------------------- -C - character(len=*), parameter :: subname = '(T_INITF) ' -C If MPI master task - logical :: MasterTask2 -C If print to log - logical :: LogPrint2 - -C communicator local process id - integer me -C error return - integer ierr -C file unit number - integer unitn -C PAPI counter id - integer papi_ctr1_id -C PAPI counter id - integer papi_ctr2_id -C PAPI counter id - integer papi_ctr3_id -C PAPI counter id - integer papi_ctr4_id -C -C---------------------------Namelists ---------------------------------- -C - logical profile_disable - logical profile_barrier - logical profile_single_file - logical profile_global_stats - integer profile_depth_limit - integer profile_detail_limit - integer profile_outpe_num - integer profile_outpe_stride - integer profile_timer - logical profile_papi_enable - namelist /prof_inparm/ profile_disable, profile_barrier, - & profile_single_file, profile_global_stats, - & profile_depth_limit, - & profile_detail_limit, profile_outpe_num, - & profile_outpe_stride, profile_timer, - & profile_papi_enable - - character(len=16) papi_ctr1_str - character(len=16) papi_ctr2_str - character(len=16) papi_ctr3_str - character(len=16) papi_ctr4_str - namelist /papi_inparm/ papi_ctr1_str, papi_ctr2_str, - & papi_ctr3_str, papi_ctr4_str -C----------------------------------------------------------------------- - if ( timing_initialized ) then -#ifdef DEBUG - write(p_logunit,*) 'T_INITF: timing library already ', - & 'initialized. Request ignored.' -#endif - return - endif - -C$OMP MASTER - if ( present(LogUnit) ) then - call t_setLogUnit(LogUnit) - else - call t_setLogUnit(def_p_logunit) - endif - - if ( present(MasterTask) .and. present(mpicom) )then - call mpi_comm_rank(mpicom, me, ierr) - if (ierr .eq. MPI_ERR_COMM) then - call shr_sys_abort('T_INITF: bad mpi communicator') - endif - if (me .eq. 0) then - MasterTask2 = .true. - else - MasterTask2 = .false. - endif - else - MasterTask2 = .true. - end if - - if ( present(LogPrint) ) then - LogPrint2 = LogPrint - else - LogPrint2 = .true. - endif - -C Set PERF defaults, then override with user-specified input - call perf_defaultopts(timing_disable_out=profile_disable, - & perf_timer_out=profile_timer, - & timer_depth_limit_out=profile_depth_limit, - & timing_detail_limit_out=profile_detail_limit, - & timing_barrier_out=profile_barrier, - & perf_outpe_num_out = profile_outpe_num, - & perf_outpe_stride_out = profile_outpe_stride, - & perf_single_file_out=profile_single_file, - & perf_global_stats_out=profile_global_stats, - & perf_papi_enable_out=profile_papi_enable ) - if ( MasterTask2 ) then - -C Read in the prof_inparm namelist from NLFilename if it exists - - write(p_logunit,*) '(t_initf) Read in prof_inparm namelist', - & ' from: '//trim(NLFilename) - unitn = shr_file_getUnit() - - ierr = 1 - open( unitn, file=trim(NLFilename), status='old', iostat=ierr) - if (ierr .eq. 0) then - -C Look for prof_inparm group name in the input file. -C If found, leave the file positioned at that namelist group. - call find_group_name(unitn, 'prof_inparm', status=ierr) - -C found prof_inparm - if (ierr == 0) then - read(unitn, nml=prof_inparm, iostat=ierr) - if (ierr /= 0) then - call shr_sys_abort( - & subname//':: namelist read returns an'// - & ' error condition for prof_inparm' ) - end if - end if - - close(unitn) - - endif - call shr_file_freeUnit( unitn ) - - endif - -C This logic assumes that there will be only one MasterTask -C per communicator, and that this MasterTask is process 0. - if ( present(MasterTask) .and. present(mpicom) )then - call shr_mpi_bcast( profile_disable, MPICom ) - call shr_mpi_bcast( profile_barrier, MPICom ) - call shr_mpi_bcast( profile_single_file, MPICom ) - call shr_mpi_bcast( profile_global_stats, MPICom ) - call shr_mpi_bcast( profile_papi_enable, MPICom ) - call shr_mpi_bcast( profile_depth_limit, MPICom ) - call shr_mpi_bcast( profile_detail_limit, MPICom ) - call shr_mpi_bcast( profile_outpe_num, MPICom ) - call shr_mpi_bcast( profile_outpe_stride, MPICom ) - call shr_mpi_bcast( profile_timer, MPICom ) - end if - call perf_setopts (MasterTask2, LogPrint2, - & timing_disable_in=profile_disable, - & perf_timer_in=profile_timer, - & timer_depth_limit_in=profile_depth_limit, - & timing_detail_limit_in=profile_detail_limit, - & timing_barrier_in=profile_barrier, - & perf_outpe_num_in=profile_outpe_num, - & perf_outpe_stride_in=profile_outpe_stride, - & perf_single_file_in=profile_single_file, - & perf_global_stats_in=profile_global_stats, - & perf_papi_enable_in=profile_papi_enable ) - -C Set PAPI defaults, then override with user-specified input - if (perf_papi_enable) then - call papi_defaultopts(papi_ctr1_out=papi_ctr1_id, - & papi_ctr2_out=papi_ctr2_id, - & papi_ctr3_out=papi_ctr3_id, - & papi_ctr4_out=papi_ctr4_id ) - - if ( MasterTask2 ) then - papi_ctr1_str = "PAPI_NO_CTR" - papi_ctr2_str = "PAPI_NO_CTR" - papi_ctr3_str = "PAPI_NO_CTR" - papi_ctr4_str = "PAPI_NO_CTR" - - -C Read in the papi_inparm namelist from NLFilename if it exists - - write(p_logunit,*) - & '(t_initf) Read in papi_inparm namelist from: '//trim(NLFilename) - unitn = shr_file_getUnit() - - ierr = 1 - open(unitn,file=trim(NLFilename),status='old',iostat=ierr ) - if (ierr .eq. 0) then -C Look for papi_inparm group name in the input file. -C If found, leave the file positioned at that namelist group. - call find_group_name(unitn, 'papi_inparm', status=ierr) - - if (ierr == 0) then -C found papi_inparm - read(unitn, nml=papi_inparm, iostat=ierr) - if (ierr /= 0) then - call shr_sys_abort( - & subname//':: namelist read returns an'// - & ' error condition for papi_inparm' ) - end if - end if - - close(unitn) - - endif - call shr_file_freeUnit( unitn ) - -C if enabled and nothing set, use "defaults" - if ((papi_ctr1_str(1:11) .eq. "PAPI_NO_CTR") .and. - & (papi_ctr2_str(1:11) .eq. "PAPI_NO_CTR") .and. - & (papi_ctr3_str(1:11) .eq. "PAPI_NO_CTR") .and. - & (papi_ctr4_str(1:11) .eq. "PAPI_NO_CTR")) then - papi_ctr1_str = "PAPI_TOT_CYC" - papi_ctr2_str = "PAPI_FP_OPS" - papi_ctr3_str = "PAPI_FP_INS" - endif - - if (papi_ctr1_str(1:11) /= "PAPI_NO_CTR") then - papi_ctr1_id = gptl_papiname2id(trim(papi_ctr1_str)) - endif - if (papi_ctr2_str(1:11) /= "PAPI_NO_CTR") then - papi_ctr2_id = gptl_papiname2id(trim(papi_ctr2_str)) - endif - if (papi_ctr3_str(1:11) /= "PAPI_NO_CTR") then - papi_ctr3_id = gptl_papiname2id(trim(papi_ctr3_str)) - endif - if (papi_ctr4_str(1:11) /= "PAPI_NO_CTR") then - papi_ctr4_id = gptl_papiname2id(trim(papi_ctr4_str)) - endif - - endif -C This logic assumes that there will be only one MasterTask -C per communicator, and that this MasterTask is process 0. - if ( present(MasterTask) .and. present(mpicom) )then - call shr_mpi_bcast( papi_ctr1_id, MPICom ) - call shr_mpi_bcast( papi_ctr2_id, MPICom ) - call shr_mpi_bcast( papi_ctr3_id, MPICom ) - call shr_mpi_bcast( papi_ctr4_id, MPICom ) - end if - - call papi_setopts (papi_ctr1_in=papi_ctr1_id, - & papi_ctr2_in=papi_ctr2_id, - & papi_ctr3_in=papi_ctr3_id, - & papi_ctr4_in=papi_ctr4_id ) - endif -C$OMP END MASTER -C$OMP BARRIER - - if (timing_disable) return - -C$OMP MASTER -C -C Set options and initialize timing library. -C -C Set timer - if (gptlsetutr (perf_timer) < 0) - & call shr_sys_abort (subname//':: gptlsetutr') -C -C For logical settings, 2nd arg 0 -C to gptlsetoption means disable, non-zero means enable -C -C Turn off CPU timing (expensive) -C - if (gptlsetoption (gptlcpu, 0) < 0) - & call shr_sys_abort (subname//':: gptlsetoption') -C -C Set max timer depth -C - if (gptlsetoption (gptldepthlimit, timer_depth_limit) < 0) - & call shr_sys_abort (subname//':: gptlsetoption') -C -C Next 2 calls only work if PAPI is enabled. These examples enable counting -C of total cycles and floating point ops, respectively -C - if (perf_papi_enable) then - if (papi_ctr1 /= PAPI_NULL) then - if (gptlsetoption (papi_ctr1, 1) < 0) - & call shr_sys_abort (subname//':: gptlsetoption') - endif - if (papi_ctr2 /= PAPI_NULL) then - if (gptlsetoption (papi_ctr2, 1) < 0) - & call shr_sys_abort (subname//':: gptlsetoption') - endif - if (papi_ctr3 /= PAPI_NULL) then - if (gptlsetoption (papi_ctr3, 1) < 0) - & call shr_sys_abort (subname//':: gptlsetoption') - endif - if (papi_ctr4 /= PAPI_NULL) then - if (gptlsetoption (papi_ctr4, 1) < 0) - & call shr_sys_abort (subname//':: gptlsetoption') - endif - endif -C -C Initialize the timing lib. This call must occur after all gptlsetoption -C calls and before all other timing lib calls. -C - if (gptlinitialize () < 0) - & call shr_sys_abort (subname//':: gptlinitialize') - timing_initialized = .true. -C$OMP END MASTER -C$OMP BARRIER - - return - end subroutine t_initf -C -C======================================================================== -C - subroutine t_finalizef() -C----------------------------------------------------------------------- -C Purpose: shut down timing library -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Local workspace----------------------------- -C -C GPTL error return - integer ierr -C -C----------------------------------------------------------------------- -C - if (.not. timing_initialized) return - -C$OMP MASTER - ierr = GPTLfinalize() - timing_initialized = .false. -C$OMP END MASTER -C$OMP BARRIER - - return - end subroutine t_finalizef - -C=============================================================================== - - end module perf_mod diff --git a/src/externals/pio1/timing/XXXdotF/perf_utils.F b/src/externals/pio1/timing/XXXdotF/perf_utils.F deleted file mode 100644 index 0848852f38c..00000000000 --- a/src/externals/pio1/timing/XXXdotF/perf_utils.F +++ /dev/null @@ -1,570 +0,0 @@ - module perf_utils - -C----------------------------------------------------------------------- -C -C Purpose: This module supplies the csm_share and CAM utilities -C needed by perf_mod.F90 (when the csm_share and CAM utilities -C are not available). -C -C Author: P. Worley, October 2007 -C -C $Id$ -C -C----------------------------------------------------------------------- - -C----------------------------------------------------------------------- -C- module boilerplate -------------------------------------------------- -C----------------------------------------------------------------------- - implicit none -C Make the default access private - private - save - -C----------------------------------------------------------------------- -C Public interfaces ---------------------------------------------------- -C----------------------------------------------------------------------- - public perfutils_setunit - public shr_sys_abort - public shr_mpi_barrier - public shr_file_getUnit - public shr_file_freeUnit - public find_group_name - public to_lower - public shr_mpi_bcast - - interface shr_mpi_bcast ; module procedure - & shr_mpi_bcastl0, - & shr_mpi_bcasti0 - end interface - -C----------------------------------------------------------------------- -C Private interfaces --------------------------------------------------- -C----------------------------------------------------------------------- - private shr_sys_flush - private shr_mpi_chkerr - private shr_mpi_abort - -C----------------------------------------------------------------------- -C- include statements -------------------------------------------------- -C----------------------------------------------------------------------- -#include -#include "gptl.inc" - -C----------------------------------------------------------------------- -C Public data --------------------------------------------------------- -C----------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! precision/kind constants (from csm_share/shr/shr_kind_mod.F90) - !---------------------------------------------------------------------------- -C 8 byte real - integer,parameter,public :: SHR_KIND_R8 = selected_real_kind(12) -C native integer - integer,parameter,public :: SHR_KIND_IN = kind(1) -C long char - integer,parameter,public :: SHR_KIND_CL = 256 -C extra-long char - integer,parameter,public :: SHR_KIND_CX = 512 - -C----------------------------------------------------------------------- -C Private data --------------------------------------------------------- -C----------------------------------------------------------------------- - -C default - integer, parameter :: def_pu_logunit = 6 -C unit number for log output - integer, private :: pu_logunit = def_pu_logunit - -C======================================================================= - contains -C======================================================================= - -C -C======================================================================== -C - subroutine perfutils_setunit(LogUnit) -C----------------------------------------------------------------------- -C Purpose: Set log unit number. -C Author: P. Worley -C----------------------------------------------------------------------- -C---------------------------Input arguments----------------------------- -C -C Unit number for log output - integer(SHR_KIND_IN), intent(IN) :: LogUnit -C----------------------------------------------------------------------- - pu_logunit = LogUnit -C - return -C - end subroutine perfutils_setunit - -C============== Routines from csm_share/shr/shr_sys_mod.F90 ============ -C======================================================================= - - SUBROUTINE shr_sys_abort(string) - - IMPLICIT none - -C error message string - character(*) ,optional :: string - -C !----- local ----- - integer(SHR_KIND_IN) :: ierr - logical :: flag - -C !----- formats ----- - character(*),parameter :: subName = '(shr_sys_abort) ' - character(*),parameter :: F00 = "('(shr_sys_abort) ',4a)" - -C------------------------------------------------------------------------------- -C PURPOSE: consistent stopping mechanism -C (dumbed down from original shr_sys_mod.F90 version for use in perf_mod) -C------------------------------------------------------------------------------- - - call shr_sys_flush(pu_logunit) - - if ( present(string) ) then - if (len_trim(string) > 0) then - write(pu_logunit,*) trim(subName),' ERROR: ',trim(string) - else - write(pu_logunit,*) trim(subName),' ERROR ' - endif - else - write(pu_logunit,*) trim(subName),' ERROR ' - endif - - write(pu_logunit,F00) 'WARNING: calling mpi_abort() and stopping' - call shr_sys_flush(pu_logunit) - call mpi_abort(MPI_COMM_WORLD,0,ierr) - call shr_sys_flush(pu_logunit) - call abort() - - stop - - END SUBROUTINE shr_sys_abort - -C=============================================================================== -C=============================================================================== - - SUBROUTINE shr_sys_flush(unit) - - IMPLICIT none - -C !----- arguments ----- -C flush output buffer for this unit - integer(SHR_KIND_IN) :: unit - -C !----- formats ----- - character(*),parameter :: subName = '(shr_sys_flush) ' - character(*),parameter :: F00 = "('(shr_sys_flush) ',4a)" - -C------------------------------------------------------------------------------- -C PURPOSE: an architecture independant system call -C------------------------------------------------------------------------------- - -#if (defined IRIX64 || defined CRAY || defined OSF1 || defined SUNOS || defined LINUX || defined NEC_SX || defined UNICOSMP) - call flush(unit) -#endif -#if (defined AIX) - call flush_(unit) -#endif - -#if (!defined CRAY && !defined IRIX64 && !defined AIX && !defined OSF1 && !defined SUNOS && !defined LINUX && !defined NEC_SX && !defined UNICOSMP) -Cpw if (s_loglev > 0) write(pu_logunit,F00) 'WARNING: no implementation of flush for this architecture' -#endif - - END SUBROUTINE shr_sys_flush - -C=============================================================================== - -C================== Routines from csm_share/shr/shr_mpi_mod.F90 =============== -C=============================================================================== - - SUBROUTINE shr_mpi_chkerr(rcode,string) - - IMPLICIT none - -C !----- arguments --- -C input MPI error code - integer(SHR_KIND_IN), intent(in) :: rcode -C message - character(*), intent(in) :: string - -C !----- local --- - character(*),parameter :: subName = '(shr_mpi_chkerr) ' - character(MPI_MAX_ERROR_STRING) :: lstring - integer(SHR_KIND_IN) :: len - integer(SHR_KIND_IN) :: ierr - -C------------------------------------------------------------------------------- -C PURPOSE: layer on MPI error checking -C------------------------------------------------------------------------------- - - if (rcode /= MPI_SUCCESS) then - call MPI_ERROR_STRING(rcode,lstring,len,ierr) - write(pu_logunit,*) trim(subName),":",lstring(1:len) - call shr_mpi_abort(string,rcode) - endif - - END SUBROUTINE shr_mpi_chkerr - -C=============================================================================== -C=============================================================================== - - SUBROUTINE shr_mpi_abort(string,rcode) - - IMPLICIT none - -C !----- arguments --- -C message - character(*),optional,intent(in) :: string -C optional code - integer,optional,intent(in) :: rcode - -C !----- local --- - character(*),parameter :: subName = '(shr_mpi_abort) ' - integer(SHR_KIND_IN) :: ierr - -C------------------------------------------------------------------------------- -C PURPOSE: MPI abort -C------------------------------------------------------------------------------- - - if ( present(string) .and. present(rcode) ) then - write(pu_logunit,*) trim(subName),":",trim(string),rcode - endif - call MPI_ABORT(MPI_COMM_WORLD,rcode,ierr) - - END SUBROUTINE shr_mpi_abort - -C=============================================================================== -C=============================================================================== - - SUBROUTINE shr_mpi_barrier(comm,string) - - IMPLICIT none - -C !----- arguments --- - integer,intent(in) :: comm -C message - character(*),optional,intent(in) :: string - -C !----- local --- - character(*),parameter :: subName = '(shr_mpi_barrier) ' - integer(SHR_KIND_IN) :: ierr - -C------------------------------------------------------------------------------- -C PURPOSE: MPI barrier -C------------------------------------------------------------------------------- - - call MPI_BARRIER(comm,ierr) - if (present(string)) then - call shr_mpi_chkerr(ierr,subName//trim(string)) - else - call shr_mpi_chkerr(ierr,subName) - endif - - END SUBROUTINE shr_mpi_barrier - -C=============================================================================== -C=============================================================================== - - SUBROUTINE shr_mpi_bcasti0(vec,comm,string) - - IMPLICIT none - -C !----- arguments --- -C vector of 1 - integer(SHR_KIND_IN), intent(inout):: vec -C mpi communicator - integer(SHR_KIND_IN), intent(in) :: comm -C message - character(*),optional,intent(in) :: string - -C !----- local --- - character(*),parameter :: subName = '(shr_mpi_bcasti0) ' - integer(SHR_KIND_IN) :: ierr - integer(SHR_KIND_IN) :: lsize - -C------------------------------------------------------------------------------- -C PURPOSE: Broadcast an integer -C------------------------------------------------------------------------------- - - lsize = 1 - - call MPI_BCAST(vec,lsize,MPI_INTEGER,0,comm,ierr) - if (present(string)) then - call shr_mpi_chkerr(ierr,subName//trim(string)) - else - call shr_mpi_chkerr(ierr,subName) - endif - - END SUBROUTINE shr_mpi_bcasti0 - -C=============================================================================== -C=============================================================================== - - SUBROUTINE shr_mpi_bcastl0(vec,comm,string) - - IMPLICIT none - -C !----- arguments --- -C vector of 1 - logical, intent(inout):: vec -C mpi communicator - integer(SHR_KIND_IN), intent(in) :: comm -C message - character(*),optional,intent(in) :: string - -C !----- local --- - character(*),parameter :: subName = '(shr_mpi_bcastl0) ' - integer(SHR_KIND_IN) :: ierr - integer(SHR_KIND_IN) :: lsize - -C------------------------------------------------------------------------------- -C PURPOSE: Broadcast a logical -C------------------------------------------------------------------------------- - - lsize = 1 - - call MPI_BCAST(vec,lsize,MPI_LOGICAL,0,comm,ierr) - if (present(string)) then - call shr_mpi_chkerr(ierr,subName//trim(string)) - else - call shr_mpi_chkerr(ierr,subName) - endif - - END SUBROUTINE shr_mpi_bcastl0 - -C=============================================================================== - -C================== Routines from csm_share/shr/shr_file_mod.F90 =============== -C=============================================================================== -CBOP =========================================================================== -C -C !IROUTINE: shr_file_getUnit -- Get a free FORTRAN unit number -C -C !DESCRIPTION: Get the next free FORTRAN unit number. -C -C !REVISION HISTORY: -C 2005-Dec-14 - E. Kluzek - creation -C 2007-Oct-21 - P. Worley - dumbed down for use in perf_mod -C -C !INTERFACE: ------------------------------------------------------------------ - - INTEGER FUNCTION shr_file_getUnit () - - implicit none - -CEOP - -C !----- local parameters ----- -C Min unit number to give - integer(SHR_KIND_IN),parameter :: shr_file_minUnit = 10 -C Max unit number to give - integer(SHR_KIND_IN),parameter :: shr_file_maxUnit = 99 - -C !----- local variables ----- -C loop index - integer(SHR_KIND_IN) :: n -C If unit opened or not - logical :: opened - - !----- formats ----- - character(*),parameter :: subName = '(shr_file_getUnit) ' - character(*),parameter :: F00 = "('(shr_file_getUnit) ',A,I4,A)" - -C------------------------------------------------------------------------------- -C Notes: -C------------------------------------------------------------------------------- - -C ! --- Choose first available unit other than 0, 5, or 6 ------ - do n=shr_file_minUnit, shr_file_maxUnit - inquire( n, opened=opened ) - if (n == 5 .or. n == 6 .or. opened) then - cycle - end if - shr_file_getUnit = n - return - end do - - call shr_sys_abort( subName//': Error: no available units found' ) - - END FUNCTION shr_file_getUnit -C=============================================================================== - -C=============================================================================== -CBOP =========================================================================== -C -C !IROUTINE: shr_file_freeUnit -- Free up a FORTRAN unit number -C -C !DESCRIPTION: Free up the given unit number -C -C !REVISION HISTORY: -C 2005-Dec-14 - E. Kluzek - creation -C 2007-Oct-21 - P. Worley - dumbed down for use in perf_mod -C -C !INTERFACE: ------------------------------------------------------------------ - - SUBROUTINE shr_file_freeUnit ( unit) - - implicit none - -C !INPUT/OUTPUT PARAMETERS: - -C unit number to be freed - integer(SHR_KIND_IN),intent(in) :: unit - -CEOP - -C !----- local parameters ----- -C Min unit number to give - integer(SHR_KIND_IN),parameter :: shr_file_minUnit = 10 -C Max unit number to give - integer(SHR_KIND_IN),parameter :: shr_file_maxUnit = 99 - -C !----- formats ----- - character(*), parameter :: subName = '(shr_file_freeUnit) ' - character(*), parameter :: F00 = "('(shr_file_freeUnit) ',A,I4,A)" - -C------------------------------------------------------------------------------- -C Notes: -C------------------------------------------------------------------------------- - - if (unit < 0 .or. unit > shr_file_maxUnit) then -!pw if (s_loglev > 0) write(pu_logunit,F00) 'invalid unit number request:', unit - else if (unit == 0 .or. unit == 5 .or. unit == 6) then - call shr_sys_abort( - & subName//': Error: units 0, 5, and 6 must not be freed' ) - end if - - return - - END SUBROUTINE shr_file_freeUnit -C=============================================================================== - -C============= Routines from atm/cam/src/utils/namelist_utils.F90 ============== -C=============================================================================== - - subroutine find_group_name(unit, group, status) - -C--------------------------------------------------------------------------------------- -C Purpose: -C Search a file that contains namelist input for the specified namelist group name. -C Leave the file positioned so that the current record is the first record of the -C input for the specified group. -C -C Method: -C Read the file line by line. Each line is searched for an '&' which may only -C be preceded by blanks, immediately followed by the group name which is case -C insensitive. If found then backspace the file so the current record is the -C one containing the group name and return success. Otherwise return -1. -C -C Author: B. Eaton, August 2007 -C--------------------------------------------------------------------------------------- - -C fortran unit attached to file - integer, intent(in) :: unit -C namelist group name - character(len=*), intent(in) :: group -C 0 for success, -1 if group name not found - integer, intent(out) :: status - -C ! Local variables - - integer :: len_grp -C io status - integer :: ios -C first 80 characters of input record - character(len=80) :: inrec -C left adjusted input record - character(len=80) :: inrec2 - character(len=len(group)) :: lc_group - -C !--------------------------------------------------------------------------- - - len_grp = len_trim(group) - lc_group = to_lower(group) - - ios = 0 - do while (ios <= 0) - - read(unit, '(a)', iostat=ios, end=102) inrec - - if (ios <= 0) then -C ios < 0 indicates an end of record condition - -C ! look for group name in this record - -C ! remove leading blanks - inrec2 = adjustl(inrec) - -C ! check for leading '&' - if (inrec2(1:1) == '&') then - -C ! check for case insensitive group name - if (trim(lc_group) == to_lower(inrec2(2:len_grp+1))) then - -C ! found group name. backspace to leave file position at this record - backspace(unit) - status = 0 - return - - end if - end if - end if - - end do - -102 continue ! end of file processing - status = -1 - - end subroutine find_group_name -C=============================================================================== - -C================ Routines from atm/cam/src/utils/string_utils.F90 ============= -C=============================================================================== - - function to_lower(str) - -C----------------------------------------------------------------------- -C Purpose: -C Convert character string to lower case. -C -C Method: -C Use achar and iachar intrinsics to ensure use of ascii collating sequence. -C -C Author: B. Eaton, July 2001 -C -C $Id$ -C----------------------------------------------------------------------- - implicit none - -C String to convert to lower case - character(len=*), intent(in) :: str - character(len=len(str)) :: to_lower - -C Local variables - -C Index - integer :: i -C ascii collating sequence - integer :: aseq -C integer to convert case - integer :: upper_to_lower -C Character temporary - character(len=1) :: ctmp -C----------------------------------------------------------------------- - upper_to_lower = iachar("a") - iachar("A") - - do i = 1, len(str) - ctmp = str(i:i) - aseq = iachar(ctmp) - if ( aseq >= iachar("A") .and. aseq <= iachar("Z") ) - & ctmp = achar(aseq + upper_to_lower) - to_lower(i:i) = ctmp - end do - - end function to_lower -C=============================================================================== - - end module perf_utils diff --git a/src/externals/pio1/timing/f_wrappers.c b/src/externals/pio1/timing/f_wrappers.c deleted file mode 100644 index b1da29ec4eb..00000000000 --- a/src/externals/pio1/timing/f_wrappers.c +++ /dev/null @@ -1,538 +0,0 @@ -/* -** $Id: f_wrappers.c,v 1.56 2010-12-29 18:46:42 rosinski Exp $ -** -** Author: Jim Rosinski -** -** Fortran wrappers for timing library routines -*/ - -#include -#include -#include "private.h" /* MAX_CHARS, bool */ -#include "gptl.h" /* function prototypes and HAVE_MPI logic*/ - -#if ( defined FORTRANCAPS ) - -#define gptlinitialize GPTLINITIALIZE -#define gptlfinalize GPTLFINALIZE -#define gptlpr_set_append GPTLPR_SET_APPEND -#define gptlpr_query_append GPTLPR_QUERY_APPEND -#define gptlpr_set_write GPTLPR_SET_WRITE -#define gptlpr_query_write GPTLPR_QUERY_WRITE -#define gptlpr GPTLPR -#define gptlpr_file GPTLPR_FILE -#define gptlpr_summary GPTLPR_SUMMARY -#define gptlpr_summary_FILE GPTLPR_SUMMARY_FILE -#define gptlbarrier GPTLBARRIER -#define gptlreset GPTLRESET -#define gptlstamp GPTLSTAMP -#define gptlstart GPTLSTART -#define gptlstart_handle GPTLSTART_HANDLE -#define gptlstop GPTLSTOP -#define gptlstop_handle GPTLSTOP_HANDLE -#define gptlsetoption GPTLSETOPTION -#define gptlenable GPTLENABLE -#define gptldisable GPTLDISABLE -#define gptlsetutr GPTLSETUTR -#define gptlquery GPTLQUERY -#define gptlquerycounters GPTLQUERYCOUNTERS -#define gptlget_wallclock GPTLGET_WALLCLOCK -#define gptlget_eventvalue GPTLGET_EVENTVALUE -#define gptlget_nregions GPTLGET_NREGIONS -#define gptlget_regionname GPTLGET_REGIONNAME -#define gptlget_memusage GPTLGET_MEMUSAGE -#define gptlprint_memusage GPTLPRINT_MEMUSAGE -#define gptl_papilibraryinit GPTL_PAPILIBRARYINIT -#define gptlevent_name_to_code GPTLEVENT_NAME_TO_CODE -#define gptlevent_code_to_name GPTLEVENT_CODE_TO_NAME - -#elif ( defined INCLUDE_CMAKE_FCI ) - -#define gptlinitialize FCI_GLOBAL(gptlinitialize,GPTLINITIALIZE) -#define gptlfinalize FCI_GLOBAL(gptlfinalize,GPTLFINALIZE) -#define gptlpr FCI_GLOBAL(gptlpr,GPTLPR) -#define gptlpr_file FCI_GLOBAL(gptlpr_file,GPTLPR_FILE) -#define gptlpr_summary FCI_GLOBAL(gptlpr_summary,GPTLPR_SUMMARY) -#define gptlreset FCI_GLOBAL(gptlreset,GPTLRESET) -#define gptlstamp FCI_GLOBAL(gptlstamp,GPTLSTAMP) -#define gptlstart FCI_GLOBAL(gptlstart,GPTLSTART) -#define gptlstop FCI_GLOBAL(gptlstop,GPTLSTOP) -#define gptlsetoption FCI_GLOBAL(gptlsetoption,GPTLSETOPTION) -#define gptlenable FCI_GLOBAL(gptlenable,GPTLENABLE) -#define gptldisable FCI_GLOBAL(gptldisable,GPTLDISABLE) -#define gptlsetutr FCI_GLOBAL(gptlsetutr,GPTLSETUTR) -#define gptlquery FCI_GLOBAL(gptlquery,GPTLQUERY) -#define gptlquerycounters FCI_GLOBAL(gptlquerycounters,GPTLQUERYCOUNTERS) -#define gptlget_wallclock FCI_GLOBAL(gptlget_wallclock,GPTLGET_WALLCLOCK) -#define gptlget_eventvalue FCI_GLOBAL(gptlget_eventvalue,GPTLGET_EVENTVALUE) -#define gptlget_nregions FCI_GLOBAL(gptlget_nregions,GPTLGET_NREGIONS) -#define gptlget_regionname FCI_GLOBAL(gptlget_regionname,GPTLGET_REGIONNAME) -#define gptlget_memusage FCI_GLOBAL(gptlget_memusage,GPTLGET_MEMUSAGE) -#define gptlprint_memusage FCI_GLOBAL(gptlprint_memusage,GPTLPRINT_MEMUSAGE) -#define gptl_papilibraryinit FCI_GLOBAL(gptl_papilibraryinit,GPTL_PAPILIBRARYINIT) -#define gptlevent_name_to_code FCI_GLOBAL(gptlevent_name_to_code,GPTLEVENT_NAME_TO_CODE) -#define gptlevent_code_to_name FCI_GLOBAL(gptlevent_code_to_name,GPTLEVENT_CODE_TO_NAME) -#define gptlpr_query_write FCI_GLOBAL(gptlpr_query_write,GPTLPR_QUERY_WRITE) -#define gptlpr_summary_file FCI_GLOBAL(gptlpr_summary_file,GPTLPR_SUMMARY_FILE) -#define gptlpr_set_write FCI_GLOBAL(gptlpr_set_write,GPTLPR_SET_WRITE) -#define gptlpr_set_append FCI_GLOBAL(gptlpr_set_append,GPTLPR_SET_APPEND) -#define gptlstop_handle FCI_GLOBAL(gptlstop_handle,GPTLSTOP_HANDLE) -#define gptlstart_handle FCI_GLOBAL(gptlstart_handle,GPTLSTART_HANDLE) -#elif ( defined FORTRANUNDERSCORE ) - -#define gptlinitialize gptlinitialize_ -#define gptlfinalize gptlfinalize_ -#define gptlpr_set_append gptlpr_set_append_ -#define gptlpr_query_append gptlpr_query_append_ -#define gptlpr_set_write gptlpr_set_write_ -#define gptlpr_query_write gptlpr_query_write_ -#define gptlpr gptlpr_ -#define gptlpr_file gptlpr_file_ -#define gptlpr_summary gptlpr_summary_ -#define gptlpr_summary_file gptlpr_summary_file_ -#define gptlbarrier gptlbarrier_ -#define gptlreset gptlreset_ -#define gptlstamp gptlstamp_ -#define gptlstart gptlstart_ -#define gptlstart_handle gptlstart_handle_ -#define gptlstop gptlstop_ -#define gptlstop_handle gptlstop_handle_ -#define gptlsetoption gptlsetoption_ -#define gptlenable gptlenable_ -#define gptldisable gptldisable_ -#define gptlsetutr gptlsetutr_ -#define gptlquery gptlquery_ -#define gptlquerycounters gptlquerycounters_ -#define gptlget_wallclock gptlget_wallclock_ -#define gptlget_eventvalue gptlget_eventvalue_ -#define gptlget_nregions gptlget_nregions_ -#define gptlget_regionname gptlget_regionname_ -#define gptlget_memusage gptlget_memusage_ -#define gptlprint_memusage gptlprint_memusage_ -#define gptl_papilibraryinit gptl_papilibraryinit_ -#define gptlevent_name_to_code gptlevent_name_to_code_ -#define gptlevent_code_to_name gptlevent_code_to_name_ - -#elif ( defined FORTRANDOUBLEUNDERSCORE ) - -#define gptlinitialize gptlinitialize__ -#define gptlfinalize gptlfinalize__ -#define gptlpr_set_append gptlpr_set_append__ -#define gptlpr_query_append gptlpr_query_append__ -#define gptlpr_set_write gptlpr_set_write__ -#define gptlpr_query_write gptlpr_query_write__ -#define gptlpr gptlpr__ -#define gptlpr_file gptlpr_file__ -#define gptlpr_summary gptlpr_summary__ -#define gptlpr_summary_file gptlpr_summary_file__ -#define gptlbarrier gptlbarrier__ -#define gptlreset gptlreset__ -#define gptlstamp gptlstamp__ -#define gptlstart gptlstart__ -#define gptlstart_handle gptlstart_handle__ -#define gptlstop gptlstop__ -#define gptlstop_handle gptlstop_handle__ -#define gptlsetoption gptlsetoption__ -#define gptlenable gptlenable__ -#define gptldisable gptldisable__ -#define gptlsetutr gptlsetutr__ -#define gptlquery gptlquery__ -#define gptlquerycounters gptlquerycounters__ -#define gptlget_wallclock gptlget_wallclock__ -#define gptlget_eventvalue gptlget_eventvalue__ -#define gptlget_nregions gptlget_nregions__ -#define gptlget_regionname gptlget_regionname__ -#define gptlget_memusage gptlget_memusage__ -#define gptlprint_memusage gptlprint_memusage__ -#define gptl_papilibraryinit gptl_papilibraryinit__ -#define gptlevent_name_to_code gptlevent_name_to_code__ -#define gptlevent_code_to_name gptlevent_code_to_name__ - -#endif - -/* -** Local function prototypes -*/ - -int gptlinitialize (void); -int gptlfinalize (void); -int gptlpr_set_append (void); -int gptlpr_query_append (void); -int gptlpr_set_write (void); -int gptlpr_query_write (void); -int gptlpr (int *procid); -int gptlpr_file (char *file, int nc1); -int gptlpr_summary (int *fcomm); -int gptlpr_summary_file (int *fcomm, char *name, int nc1); -int gptlbarrier (int *fcomm, char *name, int nc1); -int gptlreset (void); -int gptlstamp (double *wall, double *usr, double *sys); -int gptlstart (char *name, int nc1); -int gptlstart_handle (char *name, void **, int nc1); -int gptlstop (char *name, int nc1); -int gptlstop_handle (char *name, void **, int nc1); -int gptlsetoption (int *option, int *val); -int gptlenable (void); -int gptldisable (void); -int gptlsetutr (int *option); -int gptlquery (const char *name, int *t, int *count, int *onflg, double *wallclock, - double *usr, double *sys, long long *papicounters_out, int *maxcounters, - int nc); -int gptlquerycounters (const char *name, int *t, long long *papicounters_out, int nc); -int gptlget_wallclock (const char *name, int *t, double *value, int nc); -int gptlget_eventvalue (const char *timername, const char *eventname, int *t, double *value, - int nc1, int nc2); -int gptlget_nregions (int *t, int *nregions); -int gptlget_regionname (int *t, int *region, char *name, int nc); -int gptlget_memusage (int *size, int *rss, int *share, int *text, int *datastack); -int gptlprint_memusage (const char *str, int nc); -#ifdef HAVE_PAPI -int gptl_papilibraryinit (void); -int gptlevent_name_to_code (const char *str, int *code, int nc); -int gptlevent_code_to_name (int *code, char *str, int nc); -#endif - -/* -** Fortran wrapper functions start here -*/ - -int gptlinitialize (void) -{ - return GPTLinitialize (); -} - -int gptlfinalize (void) -{ - return GPTLfinalize (); -} - -int gptlpr_set_append (void) -{ - return GPTLpr_set_append (); -} - -int gptlpr_query_append (void) -{ - return GPTLpr_set_append (); -} - -int gptlpr_set_write (void) -{ - return GPTLpr_set_append (); -} - -int gptlpr_query_write (void) -{ - return GPTLpr_set_append (); -} - -int gptlpr (int *procid) -{ - return GPTLpr (*procid); -} - -int gptlpr_file (char *file, int nc1) -{ - char *locfile; - int ret; - - if ( ! (locfile = (char *) malloc (nc1+1))) - return GPTLerror ("gptlpr_file: malloc error\n"); - - snprintf (locfile, nc1+1, "%s", file); - - ret = GPTLpr_file (locfile); - free (locfile); - return ret; -} - -int gptlpr_summary (int *fcomm) -{ -#ifdef HAVE_MPI - MPI_Comm ccomm; -#ifdef HAVE_COMM_F2C - ccomm = MPI_Comm_f2c (*fcomm); -#else - /* Punt and try just casting the Fortran communicator */ - ccomm = (MPI_Comm) *fcomm; -#endif -#else - int ccomm = 0; -#endif - - return GPTLpr_summary (ccomm); -} - -int gptlpr_summary_file (int *fcomm, char *file, int nc1) -{ - char *locfile; - int ret; - -#ifdef HAVE_MPI - MPI_Comm ccomm; -#ifdef HAVE_COMM_F2C - ccomm = MPI_Comm_f2c (*fcomm); -#else - /* Punt and try just casting the Fortran communicator */ - ccomm = (MPI_Comm) *fcomm; -#endif -#else - int ccomm = 0; -#endif - - if ( ! (locfile = (char *) malloc (nc1+1))) - return GPTLerror ("gptlpr_summary_file: malloc error\n"); - - snprintf (locfile, nc1+1, "%s", file); - - ret = GPTLpr_summary_file (ccomm, locfile); - free (locfile); - return ret; -} - -int gptlbarrier (int *fcomm, char *name, int nc1) -{ - char cname[MAX_CHARS+1]; - int numchars; -#ifdef HAVE_MPI - MPI_Comm ccomm; -#ifdef HAVE_COMM_F2C - ccomm = MPI_Comm_f2c (*fcomm); -#else - /* Punt and try just casting the Fortran communicator */ - ccomm = (MPI_Comm) *fcomm; -#endif -#else - int ccomm = 0; -#endif - - numchars = MIN (nc1, MAX_CHARS); - strncpy (cname, name, numchars); - cname[numchars] = '\0'; - return GPTLbarrier (ccomm, cname); -} - -int gptlreset (void) -{ - return GPTLreset(); -} - -int gptlstamp (double *wall, double *usr, double *sys) -{ - return GPTLstamp (wall, usr, sys); -} - -int gptlstart (char *name, int nc1) -{ - char cname[MAX_CHARS+1]; - int numchars; - - numchars = MIN (nc1, MAX_CHARS); - strncpy (cname, name, numchars); - cname[numchars] = '\0'; - return GPTLstart (cname); -} - -int gptlstart_handle (char *name, void **handle, int nc1) -{ - char cname[MAX_CHARS+1]; - int numchars; - - if (*handle) { - cname[0] = '\0'; - } else { - numchars = MIN (nc1, MAX_CHARS); - strncpy (cname, name, numchars); - cname[numchars] = '\0'; - } - return GPTLstart_handle (cname, handle); -} - -int gptlstop (char *name, int nc1) -{ - char cname[MAX_CHARS+1]; - int numchars; - - numchars = MIN (nc1, MAX_CHARS); - strncpy (cname, name, numchars); - cname[numchars] = '\0'; - return GPTLstop (cname); -} - -int gptlstop_handle (char *name, void **handle, int nc1) -{ - char cname[MAX_CHARS+1]; - int numchars; - - if (*handle) { - cname[0] = '\0'; - } else { - numchars = MIN (nc1, MAX_CHARS); - strncpy (cname, name, numchars); - cname[numchars] = '\0'; - } - return GPTLstop_handle (cname, handle); -} - -int gptlsetoption (int *option, int *val) -{ - return GPTLsetoption (*option, *val); -} - -int gptlenable (void) -{ - return GPTLenable (); -} - -int gptldisable (void) -{ - return GPTLdisable (); -} - -int gptlsetutr (int *option) -{ - return GPTLsetutr (*option); -} - -int gptlquery (const char *name, int *t, int *count, int *onflg, double *wallclock, - double *usr, double *sys, long long *papicounters_out, int *maxcounters, - int nc) -{ - char cname[MAX_CHARS+1]; - int numchars; - - numchars = MIN (nc, MAX_CHARS); - strncpy (cname, name, numchars); - cname[numchars] = '\0'; - return GPTLquery (cname, *t, count, onflg, wallclock, usr, sys, papicounters_out, *maxcounters); -} - -int gptlquerycounters (const char *name, int *t, long long *papicounters_out, int nc) -{ - char cname[MAX_CHARS+1]; - int numchars; - - numchars = MIN (nc, MAX_CHARS); - strncpy (cname, name, numchars); - cname[numchars] = '\0'; - return GPTLquerycounters (cname, *t, papicounters_out); -} - -int gptlget_wallclock (const char *name, int *t, double *value, int nc) -{ - char cname[MAX_CHARS+1]; - int numchars; - - numchars = MIN (nc, MAX_CHARS); - strncpy (cname, name, numchars); - cname[numchars] = '\0'; - - return GPTLget_wallclock (cname, *t, value); -} - -int gptlget_eventvalue (const char *timername, const char *eventname, int *t, double *value, - int nc1, int nc2) -{ - char ctimername[MAX_CHARS+1]; - char ceventname[MAX_CHARS+1]; - int numchars; - - numchars = MIN (nc1, MAX_CHARS); - strncpy (ctimername, timername, numchars); - ctimername[numchars] = '\0'; - - numchars = MIN (nc2, MAX_CHARS); - strncpy (ceventname, eventname, numchars); - ceventname[numchars] = '\0'; - - return GPTLget_eventvalue (ctimername, ceventname, *t, value); -} - -int gptlget_nregions (int *t, int *nregions) -{ - return GPTLget_nregions (*t, nregions); -} - -int gptlget_regionname (int *t, int *region, char *name, int nc) -{ - int n; - int ret; - - ret = GPTLget_regionname (*t, *region, name, nc); - /* Turn nulls into spaces for fortran */ - for (n = 0; n < nc; ++n) - if (name[n] == '\0') - name[n] = ' '; - return ret; -} - -int gptlget_memusage (int *size, int *rss, int *share, int *text, int *datastack) -{ - return GPTLget_memusage (size, rss, share, text, datastack); -} - -int gptlprint_memusage (const char *str, int nc) -{ - char cname[128+1]; - int numchars = MIN (nc, 128); - - strncpy (cname, str, numchars); - cname[numchars] = '\0'; - return GPTLprint_memusage (cname); -} - -#ifdef HAVE_PAPI -#include - -int gptl_papilibraryinit (void) -{ - return GPTL_PAPIlibraryinit (); -} - -int gptlevent_name_to_code (const char *str, int *code, int nc) -{ - char cname[PAPI_MAX_STR_LEN+1]; - int numchars = MIN (nc, PAPI_MAX_STR_LEN); - - strncpy (cname, str, numchars); - cname[numchars] = '\0'; - - /* "code" is an int* and is an output variable */ - - return GPTLevent_name_to_code (cname, code); -} - -int gptlevent_code_to_name (int *code, char *str, int nc) -{ - int i; - - if (nc < PAPI_MAX_STR_LEN) - return GPTLerror ("gptl_event_code_to_name: output name must hold at least %d characters\n", - PAPI_MAX_STR_LEN); - - if (GPTLevent_code_to_name (*code, str) == 0) { - for (i = strlen(str); i < nc; ++i) - str[i] = ' '; - } else { - return GPTLerror (""); - } - return 0; -} -#else - -int gptl_papilibraryinit (void) -{ - return GPTL_PAPIlibraryinit (); -} - -int gptlevent_name_to_code (const char *str, int *code, int nc) -{ - return GPTLevent_name_to_code (str, code); -} - -int gptlevent_code_to_name (const int *code, char *str, int nc) -{ - return GPTLevent_code_to_name (*code, str); -} - -#endif diff --git a/src/externals/pio1/timing/gptl.c b/src/externals/pio1/timing/gptl.c deleted file mode 100644 index 19c0ff7fa6a..00000000000 --- a/src/externals/pio1/timing/gptl.c +++ /dev/null @@ -1,4369 +0,0 @@ -/* -** $Id: gptl.c,v 1.157 2011-03-28 20:55:18 rosinski Exp $ -** -** Author: Jim Rosinski -** -** Main file contains most user-accessible GPTL functions -*/ - -#include /* malloc */ -#include /* gettimeofday */ -#include /* times */ -#include /* gettimeofday, syscall */ -#include -#include /* memset, strcmp (via STRMATCH) */ -#include /* isdigit */ -#include /* u_int8_t, u_int16_t */ -#include - -#ifndef HAVE_C99_INLINE -#define inline -#endif - -#ifdef HAVE_PAPI -#include /* PAPI_get_real_usec */ -#endif - -#ifdef HAVE_LIBRT -#include -#endif - -#ifdef _AIX -#include -#endif - -#include "private.h" -#include "gptl.h" - -static Timer **timers = 0; /* linked list of timers */ -static Timer **last = 0; /* last element in list */ -static int *max_depth; /* maximum indentation level encountered */ -static int *max_name_len; /* max length of timer name */ -static volatile int nthreads = -1; /* num threads. Init to bad value */ -static volatile int maxthreads = -1; /* max threads (=nthreads for OMP). Init to bad value */ -static int depthlimit = 99999; /* max depth for timers (99999 is effectively infinite) */ -static volatile bool disabled = false; /* Timers disabled? */ -static volatile bool initialized = false; /* GPTLinitialize has been called */ -static volatile bool pr_has_been_called = false; /* GPTLpr_file has been called */ -static Entry eventlist[MAX_AUX]; /* list of PAPI-based events to be counted */ -static int nevents = 0; /* number of PAPI events (init to 0) */ -static bool dousepapi = false; /* saves a function call if stays false */ -static bool verbose = false; /* output verbosity */ -static bool percent = false; /* print wallclock also as percent of 1st timers[0] */ -static bool dopr_preamble = true; /* whether to print preamble info */ -static bool dopr_threadsort = true; /* whether to print sorted thread stats */ -static bool dopr_multparent = true; /* whether to print multiple parent info */ -static bool dopr_collision = true; /* whether to print hash collision info */ -static bool pr_append = false; /* whether to append to output file */ - -static time_t ref_gettimeofday = -1; /* ref start point for gettimeofday */ -static time_t ref_clock_gettime = -1;/* ref start point for clock_gettime */ -#ifdef _AIX -static time_t ref_read_real_time = -1; /* ref start point for read_real_time */ -#endif -static long long ref_papitime = -1; /* ref start point for PAPI_get_real_usec */ - -#if ( defined THREADED_OMP ) - -#include -static volatile int *threadid_omp = 0; /* array of thread ids */ - -#elif ( defined THREADED_PTHREADS ) - -#include - -#define MUTEX_API -#ifdef MUTEX_API -static volatile pthread_mutex_t t_mutex; -#else -static volatile pthread_mutex_t t_mutex = PTHREAD_MUTEX_INITIALIZER; -#endif -static volatile pthread_t *threadid = 0; /* array of thread ids */ -static int lock_mutex (void); /* lock a mutex for entry into a critical region */ -static int unlock_mutex (void); /* unlock a mutex for exit from a critical region */ - -#else - -/* Unthreaded case */ -static int threadid = -1; - -#endif - -typedef struct { - const Option option; /* wall, cpu, etc. */ - const char *str; /* descriptive string for printing */ - bool enabled; /* flag */ -} Settings; - -/* For Summary stats */ - -typedef struct { - double wallmax; - double wallmin; - double walltotal; - int processes; - int threads; -#ifdef HAVE_PAPI - double papimax[MAX_AUX]; - double papimin[MAX_AUX]; - double papitotal[MAX_AUX]; -#endif - unsigned long count; - int wallmax_p; /* over processes */ - int wallmax_t; /* over threads */ - int wallmin_p; - int wallmin_t; -#ifdef HAVE_PAPI - int papimax_p[MAX_AUX]; /* over processes */ - int papimax_t[MAX_AUX]; /* over threads */ - int papimin_p[MAX_AUX]; - int papimin_t[MAX_AUX]; -#endif -} Summarystats; - -/* Options, print strings, and default enable flags */ - -static Settings cpustats = {GPTLcpu, "Usr sys usr+sys ", false}; -static Settings wallstats = {GPTLwall, " Wallclock max min", true }; -static Settings overheadstats = {GPTLoverhead, " UTR Overhead " , true }; - -static Hashentry **hashtable; /* table of entries */ -static long ticks_per_sec; /* clock ticks per second */ -static char **timerlist; /* list of all timers */ - -typedef struct { - int val; /* depth in calling tree */ - int padding[31]; /* padding is to mitigate false cache sharing */ -} Nofalse; -static Timer ***callstack; /* call stack */ -static Nofalse *stackidx; /* index into callstack: */ - -static Method method = GPTLmost_frequent; /* default parent/child printing mechanism */ - -/* Local function prototypes */ - -static void printstats (const Timer *, FILE *, const int, const int, const bool, double); -static void add (Timer *, const Timer *); - -static void get_threadstats (const int, const char *, Summarystats *); -static void get_summarystats (Summarystats *, const Summarystats *); -#ifdef HAVE_MPI -static int collect_data( const int, MPI_Comm, int *, Summarystats ** ); -#else -static int collect_data( const int, const int, int *, Summarystats ** ); -#endif -static int merge_thread_data(); - -static void print_multparentinfo (FILE *, Timer *); -static inline int get_cpustamp (long *, long *); -static int newchild (Timer *, Timer *); -static int get_max_depth (const Timer *, const int); -static int num_descendants (Timer *); -static int is_descendant (const Timer *, const Timer *); -static char *methodstr (Method); - -/* Prototypes from previously separate file threadutil.c */ - -static int threadinit (void); /* initialize threading environment */ -static void threadfinalize (void); /* finalize threading environment */ -static void print_threadmapping (FILE *); /* print mapping of thread ids */ -static inline int get_thread_num (void); /* get 0-based thread number */ - -/* These are the (possibly) supported underlying wallclock timers */ - -static inline double utr_nanotime (void); -static inline double utr_mpiwtime (void); -static inline double utr_clock_gettime (void); -static inline double utr_papitime (void); -static inline double utr_read_real_time (void); -static inline double utr_gettimeofday (void); - -static int init_nanotime (void); -static int init_mpiwtime (void); -static int init_clock_gettime (void); -static int init_papitime (void); -static int init_read_real_time (void); -static int init_gettimeofday (void); - -static double utr_getoverhead (void); -static inline Timer *getentry_instr (const Hashentry *, void *, unsigned int *); -static inline Timer *getentry (const Hashentry *, const char *, unsigned int *); -static void printself_andchildren (const Timer *, FILE *, const int, const int, const double); -static inline int update_parent_info (Timer *, Timer **, int); -static inline int update_stats (Timer *, const double, const long, const long, const int); -static int update_ll_hash (Timer *, const int, const unsigned int); -static inline int update_ptr (Timer *, const int); -static int construct_tree (Timer *, Method); - -static int cmp (const char **, const char **); -static int ncmp (const char **, const char **); -static int get_index ( const char *, const char *); - -typedef struct { - const Funcoption option; - double (*func)(void); - int (*funcinit)(void); - const char *name; -} Funcentry; - -static Funcentry funclist[] = { - {GPTLgettimeofday, utr_gettimeofday, init_gettimeofday, "gettimeofday"}, - {GPTLnanotime, utr_nanotime, init_nanotime, "nanotime"}, - {GPTLmpiwtime, utr_mpiwtime, init_mpiwtime, "MPI_Wtime"}, - {GPTLclockgettime, utr_clock_gettime, init_clock_gettime, "clock_gettime"}, - {GPTLpapitime, utr_papitime, init_papitime, "PAPI_get_real_usec"}, - {GPTLread_real_time, utr_read_real_time, init_read_real_time,"read_real_time"} /* AIX only */ -}; -static const int nfuncentries = sizeof (funclist) / sizeof (Funcentry); - -static double (*ptr2wtimefunc)() = 0; /* init to invalid */ -static int funcidx = 0; /* default timer is gettimeofday */ - -#ifdef HAVE_NANOTIME -static float cpumhz = -1.; /* init to bad value */ -static double cyc2sec = -1; /* init to bad value */ -static unsigned inline long long nanotime (void); /* read counter (assembler) */ -static float get_clockfreq (void); /* cycles/sec */ -#endif - -static int tablesize = 1024; /* per-thread size of hash table (settable parameter) */ -static char *outdir = 0; /* dir to write output files to (currently unused) */ - -/* VERBOSE is a debugging ifdef local to the rest of this file */ -#undef VERBOSE - -/* -** GPTLsetoption: set option value to true or false. -** -** Input arguments: -** option: option to be set -** val: value to which option should be set (nonzero=true, zero=false) -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLsetoption (const int option, /* option */ - const int val) /* value */ -{ - static const char *thisfunc = "GPTLsetoption"; - - if (initialized) - return GPTLerror ("%s: must be called BEFORE GPTLinitialize\n", thisfunc); - - if (option == GPTLabort_on_error) { - GPTLset_abort_on_error ((bool) val); - if (verbose) - printf ("%s: boolean abort_on_error = %d\n", thisfunc, val); - return 0; - } - - switch (option) { - case GPTLcpu: -#ifdef HAVE_TIMES - cpustats.enabled = (bool) val; - if (verbose) - printf ("%s: cpustats = %d\n", thisfunc, val); -#else - if (val) - return GPTLerror ("%s: times() not available\n", thisfunc); -#endif - return 0; - case GPTLwall: - wallstats.enabled = (bool) val; - if (verbose) - printf ("%s: boolean wallstats = %d\n", thisfunc, val); - return 0; - case GPTLoverhead: - overheadstats.enabled = (bool) val; - if (verbose) - printf ("%s: boolean overheadstats = %d\n", thisfunc, val); - return 0; - case GPTLdepthlimit: - depthlimit = val; - if (verbose) - printf ("%s: depthlimit = %d\n", thisfunc, val); - return 0; - case GPTLverbose: - verbose = (bool) val; -#ifdef HAVE_PAPI - (void) GPTL_PAPIsetoption (GPTLverbose, val); -#endif - if (verbose) - printf ("%s: boolean verbose = %d\n", thisfunc, val); - return 0; - case GPTLpercent: - percent = (bool) val; - if (verbose) - printf ("%s: boolean percent = %d\n", thisfunc, val); - return 0; - case GPTLdopr_preamble: - dopr_preamble = (bool) val; - if (verbose) - printf ("%s: boolean dopr_preamble = %d\n", thisfunc, val); - return 0; - case GPTLdopr_threadsort: - dopr_threadsort = (bool) val; - if (verbose) - printf ("%s: boolean dopr_threadsort = %d\n", thisfunc, val); - return 0; - case GPTLdopr_multparent: - dopr_multparent = (bool) val; - if (verbose) - printf ("%s: boolean dopr_multparent = %d\n", thisfunc, val); - return 0; - case GPTLdopr_collision: - dopr_collision = (bool) val; - if (verbose) - printf ("%s: boolean dopr_collision = %d\n", thisfunc, val); - return 0; - case GPTLprint_method: - method = (Method) val; - if (verbose) - printf ("%s: print_method = %s\n", thisfunc, methodstr (method)); - return 0; - case GPTLtablesize: - if (val < 1) - return GPTLerror ("%s: tablesize must be positive. %d is invalid\n", thisfunc, val); - - tablesize = val; - if (verbose) - printf ("%s: tablesize = %d\n", thisfunc, tablesize); - return 0; - case GPTLsync_mpi: -#ifdef ENABLE_PMPI - if (GPTLpmpi_setoption (option, val) != 0) - fprintf (stderr, "%s: GPTLpmpi_setoption failure\n", thisfunc); -#endif - if (verbose) - printf ("%s: boolean sync_mpi = %d\n", thisfunc, val); - return 0; - - /* - ** Allow GPTLmultiplex to fall through because it will be handled by - ** GPTL_PAPIsetoption() - */ - - case GPTLmultiplex: - default: - break; - } - -#ifdef HAVE_PAPI - if (GPTL_PAPIsetoption (option, val) == 0) { - if (val) - dousepapi = true; - return 0; - } -#else - /* Make GPTLnarrowprint a placebo if PAPI not enabled */ - - if (option == GPTLnarrowprint) - return 0; -#endif - - return GPTLerror ("%s: faiure to enable option %d\n", thisfunc, option); -} - -/* -** GPTLsetutr: set underlying timing routine. -** -** Input arguments: -** option: index which sets function -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLsetutr (const int option) -{ - int i; /* index over number of underlying timer */ - static const char *thisfunc = "GPTLsetutr"; - - if (initialized) - return GPTLerror ("%s: must be called BEFORE GPTLinitialize\n", thisfunc); - - for (i = 0; i < nfuncentries; i++) { - if (option == (int) funclist[i].option) { - if (verbose) - printf ("%s: underlying wallclock timer = %s\n", thisfunc, funclist[i].name); - funcidx = i; - - /* - ** Return an error condition if the function is not available. - ** OK for the user code to ignore: GPTLinitialize() will reset to gettimeofday - */ - - if ((*funclist[i].funcinit)() < 0) - return GPTLerror ("%s: utr=%s not available\n", thisfunc, funclist[i].name); - else - return 0; - } - } - return GPTLerror ("%s: unknown option %d\n", thisfunc, option); -} - -/* -** GPTLinitialize (): Initialization routine must be called from single-threaded -** region before any other timing routines may be called. The need for this -** routine could be eliminated if not targetting timing library for threaded -** capability. -** -** return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLinitialize (void) -{ - int i; /* loop index */ - int t; /* thread index */ - double t1, t2; /* returned from underlying timer */ - static const char *thisfunc = "GPTLinitialize"; - - if (initialized) - return GPTLerror ("%s: has already been called\n", thisfunc); - - if (threadinit () < 0) - return GPTLerror ("%s: bad return from threadinit\n", thisfunc); - - if ((ticks_per_sec = sysconf (_SC_CLK_TCK)) == -1) - return GPTLerror ("%s: failure from sysconf (_SC_CLK_TCK)\n", thisfunc); - - /* Allocate space for global arrays */ - - callstack = (Timer ***) GPTLallocate (maxthreads * sizeof (Timer **)); - stackidx = (Nofalse *) GPTLallocate (maxthreads * sizeof (Nofalse)); - timers = (Timer **) GPTLallocate (maxthreads * sizeof (Timer *)); - last = (Timer **) GPTLallocate (maxthreads * sizeof (Timer *)); - max_depth = (int *) GPTLallocate (maxthreads * sizeof (int)); - max_name_len = (int *) GPTLallocate (maxthreads * sizeof (int)); - hashtable = (Hashentry **) GPTLallocate (maxthreads * sizeof (Hashentry *)); - - /* Initialize array values */ - - for (t = 0; t < maxthreads; t++) { - max_depth[t] = -1; - max_name_len[t] = 0; - callstack[t] = (Timer **) GPTLallocate (MAX_STACK * sizeof (Timer *)); - hashtable[t] = (Hashentry *) GPTLallocate (tablesize * sizeof (Hashentry)); - for (i = 0; i < tablesize; i++) { - hashtable[t][i].nument = 0; - hashtable[t][i].entries = 0; - } - - /* - ** Make a timer "GPTL_ROOT" to ensure no orphans, and to simplify printing. - */ - - timers[t] = (Timer *) GPTLallocate (sizeof (Timer)); - memset (timers[t], 0, sizeof (Timer)); - strcpy (timers[t]->name, "GPTL_ROOT"); - timers[t]->onflg = true; - last[t] = timers[t]; - - stackidx[t].val = 0; - callstack[t][0] = timers[t]; - for (i = 1; i < MAX_STACK; i++) - callstack[t][i] = 0; - } - -#ifdef HAVE_PAPI - if (GPTL_PAPIinitialize (maxthreads, verbose, &nevents, eventlist) < 0) - return GPTLerror ("%s: Failure from GPTL_PAPIinitialize\n", thisfunc); -#endif - - /* - ** Call init routine for underlying timing routine. - */ - - if ((*funclist[funcidx].funcinit)() < 0) { - fprintf (stderr, "%s: Failure initializing %s. Reverting underlying timer to %s\n", - thisfunc, funclist[funcidx].name, funclist[0].name); - funcidx = 0; - } - - ptr2wtimefunc = funclist[funcidx].func; - - if (verbose) { - t1 = (*ptr2wtimefunc) (); - t2 = (*ptr2wtimefunc) (); - if (t1 > t2) - fprintf (stderr, "%s: negative delta-t=%g\n", thisfunc, t2-t1); - - printf ("Per call overhead est. t2-t1=%g should be near zero\n", t2-t1); - printf ("Underlying wallclock timing routine is %s\n", funclist[funcidx].name); - } - - initialized = true; - return 0; -} - -/* -** GPTLfinalize (): Finalization routine must be called from single-threaded -** region. Free all malloc'd space -** -** return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLfinalize (void) -{ - int t; /* thread index */ - int n; /* array index */ - Timer *ptr, *ptrnext; /* ll indices */ - static const char *thisfunc = "GPTLfinalize"; - - if ( ! initialized) - return GPTLerror ("%s: initialization was not completed\n", thisfunc); - - for (t = 0; t < maxthreads; ++t) { - for (n = 0; n < tablesize; ++n) { - if (hashtable[t][n].nument > 0) - free (hashtable[t][n].entries); - } - free (hashtable[t]); - hashtable[t] = NULL; - free (callstack[t]); - for (ptr = timers[t]; ptr; ptr = ptrnext) { - ptrnext = ptr->next; - if (ptr->nparent > 0) { - free (ptr->parent); - free (ptr->parent_count); - } - if (ptr->nchildren > 0) - free (ptr->children); - free (ptr); - } - } - - free (callstack); - free (stackidx); - free (timers); - free (last); - free (max_depth); - free (max_name_len); - free (hashtable); - - threadfinalize (); - -#ifdef HAVE_PAPI - GPTL_PAPIfinalize (maxthreads); -#endif - - /* Reset initial values */ - - timers = 0; - last = 0; - max_depth = 0; - max_name_len = 0; - nthreads = -1; - maxthreads = -1; - depthlimit = 99999; - disabled = false; - initialized = false; - pr_has_been_called = false; - dousepapi = false; - verbose = false; - percent = false; - dopr_preamble = true; - dopr_threadsort = true; - dopr_multparent = true; - dopr_collision = true; - pr_append = false; - ref_gettimeofday = -1; - ref_clock_gettime = -1; -#ifdef _AIX - ref_read_real_time = -1; -#endif - ref_papitime = -1; - funcidx = 0; -#ifdef HAVE_NANOTIME - cpumhz= 0; - cyc2sec = -1; -#endif - outdir = 0; - tablesize = 1024; - - return 0; -} - -/* -** GPTLstart_instr: start a timer (auto-instrumented) -** -** Input arguments: -** self: function address -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLstart_instr (void *self) -{ - Timer *ptr; /* linked list pointer */ - int t; /* thread index (of this thread) */ - unsigned int indx; /* hash table index */ - static const char *thisfunc = "GPTLstart_instr"; - - if (disabled) - return 0; - - if ( ! initialized) - return GPTLerror ("%s self=%p: GPTLinitialize has not been called\n", thisfunc, self); - - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); - - /* - ** If current depth exceeds a user-specified limit for print, just - ** increment and return - */ - - if (stackidx[t].val >= depthlimit) { - ++stackidx[t].val; - return 0; - } - - ptr = getentry_instr (hashtable[t], self, &indx); - - /* - ** Recursion => increment depth in recursion and return. We need to return - ** because we don't want to restart the timer. We want the reported time for - ** the timer to reflect the outermost layer of recursion. - */ - - if (ptr && ptr->onflg) { - ++ptr->recurselvl; - return 0; - } - - /* - ** Increment stackidx[t] unconditionally. This is necessary to ensure the correct - ** behavior when GPTLstop_instr decrements stackidx[t] unconditionally. - */ - - if (++stackidx[t].val > MAX_STACK-1) - return GPTLerror ("%s: stack too big\n", thisfunc); - - if ( ! ptr) { /* Add a new entry and initialize */ - ptr = (Timer *) GPTLallocate (sizeof (Timer)); - memset (ptr, 0, sizeof (Timer)); - - /* - ** Need to save the address string for later conversion back to a real - ** name by an offline tool. - */ - - snprintf (ptr->name, MAX_CHARS+1, "%lx", (unsigned long) self); - ptr->address = self; - - if (update_ll_hash (ptr, t, indx) != 0) - return GPTLerror ("%s: update_ll_hash error\n", thisfunc); - } - - if (update_parent_info (ptr, callstack[t], stackidx[t].val) != 0) - return GPTLerror ("%s: update_parent_info error\n", thisfunc); - - if (update_ptr (ptr, t) != 0) - return GPTLerror ("%s: update_ptr error\n", thisfunc); - - return (0); -} - -/* -** GPTLstart: start a timer -** -** Input arguments: -** name: timer name -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLstart (const char *name) /* timer name */ -{ - Timer *ptr; /* linked list pointer */ - int t; /* thread index (of this thread) */ - int numchars; /* number of characters to copy */ - unsigned int indx; /* hash table index */ - static const char *thisfunc = "GPTLstart"; - - if (disabled) - return 0; - - if ( ! initialized) - return GPTLerror ("%s name=%s: GPTLinitialize has not been called\n", thisfunc, name); - - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); - - /* - ** If current depth exceeds a user-specified limit for print, just - ** increment and return - */ - - if (stackidx[t].val >= depthlimit) { - ++stackidx[t].val; - return 0; - } - - /* - ** ptr will point to the requested timer in the current list, - ** or NULL if this is a new entry - */ - - ptr = getentry (hashtable[t], name, &indx); - - /* - ** Recursion => increment depth in recursion and return. We need to return - ** because we don't want to restart the timer. We want the reported time for - ** the timer to reflect the outermost layer of recursion. - */ - - if (ptr && ptr->onflg) { - ++ptr->recurselvl; - return 0; - } - - /* - ** Increment stackidx[t] unconditionally. This is necessary to ensure the correct - ** behavior when GPTLstop decrements stackidx[t] unconditionally. - */ - - if (++stackidx[t].val > MAX_STACK-1) - return GPTLerror ("%s: stack too big\n", thisfunc); - - if ( ! ptr) { /* Add a new entry and initialize */ - ptr = (Timer *) GPTLallocate (sizeof (Timer)); - memset (ptr, 0, sizeof (Timer)); - - numchars = MIN (strlen (name), MAX_CHARS); - strncpy (ptr->name, name, numchars); - ptr->name[numchars] = '\0'; - - if (update_ll_hash (ptr, t, indx) != 0) - return GPTLerror ("%s: update_ll_hash error\n", thisfunc); - } - - if (update_parent_info (ptr, callstack[t], stackidx[t].val) != 0) - return GPTLerror ("%s: update_parent_info error\n", thisfunc); - - if (update_ptr (ptr, t) != 0) - return GPTLerror ("%s: update_ptr error\n", thisfunc); - - return (0); -} - -/* -** GPTLstart_handle: start a timer based on a handle -** -** Input arguments: -** name: timer name (required when on input, handle=0) -** handle: pointer to timer matching "name" -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLstart_handle (const char *name, /* timer name */ - void **handle) /* handle (output if input value is 0) */ -{ - Timer *ptr; /* linked list pointer */ - int t; /* thread index (of this thread) */ - int numchars; /* number of characters to copy */ - unsigned int indx = (unsigned int) -1; /* hash table index: init to bad value */ - static const char *thisfunc = "GPTLstart_handle"; - - if (disabled) - return 0; - - if ( ! initialized) - return GPTLerror ("%s name=%s: GPTLinitialize has not been called\n", thisfunc, name); - - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); - - /* - ** If current depth exceeds a user-specified limit for print, just - ** increment and return - */ - - if (stackidx[t].val >= depthlimit) { - ++stackidx[t].val; - return 0; - } - - /* - ** If on input, handle references a non-zero value, assume it's a previously returned Timer* - ** passed in by the user. If zero, generate the hash entry and return it to the user. - */ - - if (*handle) { - ptr = (Timer *) *handle; - } else { - ptr = getentry (hashtable[t], name, &indx); - } - - /* - ** Recursion => increment depth in recursion and return. We need to return - ** because we don't want to restart the timer. We want the reported time for - ** the timer to reflect the outermost layer of recursion. - */ - - if (ptr && ptr->onflg) { - ++ptr->recurselvl; - return 0; - } - - /* - ** Increment stackidx[t] unconditionally. This is necessary to ensure the correct - ** behavior when GPTLstop decrements stackidx[t] unconditionally. - */ - - if (++stackidx[t].val > MAX_STACK-1) - return GPTLerror ("%s: stack too big\n", thisfunc); - - if ( ! ptr) { /* Add a new entry and initialize */ - ptr = (Timer *) GPTLallocate (sizeof (Timer)); - memset (ptr, 0, sizeof (Timer)); - - numchars = MIN (strlen (name), MAX_CHARS); - strncpy (ptr->name, name, numchars); - ptr->name[numchars] = '\0'; - - if (update_ll_hash (ptr, t, indx) != 0) - return GPTLerror ("%s: update_ll_hash error\n", thisfunc); - } - - if (update_parent_info (ptr, callstack[t], stackidx[t].val) != 0) - return GPTLerror ("%s: update_parent_info error\n", thisfunc); - - if (update_ptr (ptr, t) != 0) - return GPTLerror ("%s: update_ptr error\n", thisfunc); - - /* - ** If on input, *handle was 0, return the pointer to the timer for future input - */ - - if ( ! *handle) - *handle = (void *) ptr; - - return (0); -} - -/* -** update_ll_hash: Update linked list and hash table. -** Called by GPTLstart, GPTLstart_instr and GPTLstart_handle -** -** Input arguments: -** ptr: pointer to timer -** t: thread index -** indx: hash index -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -static int update_ll_hash (Timer *ptr, const int t, const unsigned int indx) -{ - int nchars; /* number of chars */ - int nument; /* number of entries */ - Timer **eptr; /* for realloc */ - - nchars = strlen (ptr->name); - if (nchars > max_name_len[t]) - max_name_len[t] = nchars; - - last[t]->next = ptr; - last[t] = ptr; - ++hashtable[t][indx].nument; - nument = hashtable[t][indx].nument; - - eptr = (Timer **) realloc (hashtable[t][indx].entries, nument * sizeof (Timer *)); - if ( ! eptr) - return GPTLerror ("update_ll_hash: realloc error\n"); - - hashtable[t][indx].entries = eptr; - hashtable[t][indx].entries[nument-1] = ptr; - - return 0; -} - -/* -** update_ptr: Update timer contents. Called by GPTLstart and GPTLstart_instr and GPTLstart_handle -** -** Input arguments: -** ptr: pointer to timer -** t: thread index -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -static inline int update_ptr (Timer *ptr, const int t) -{ - double tp2; /* time stamp */ - - ptr->onflg = true; - - if (cpustats.enabled && get_cpustamp (&ptr->cpu.last_utime, &ptr->cpu.last_stime) < 0) - return GPTLerror ("update_ptr: get_cpustamp error"); - - if (wallstats.enabled) { - tp2 = (*ptr2wtimefunc) (); - ptr->wall.last = tp2; - } - -#ifdef HAVE_PAPI - if (dousepapi && GPTL_PAPIstart (t, &ptr->aux) < 0) - return GPTLerror ("update_ptr: error from GPTL_PAPIstart\n"); -#endif - return 0; -} - -/* -** update_parent_info: update info about parent, and in the parent about this child -** -** Arguments: -** ptr: pointer to timer -** callstackt: callstack for this thread -** stackidxt: stack index for this thread -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -static inline int update_parent_info (Timer *ptr, - Timer **callstackt, - int stackidxt) -{ - int n; /* loop index through known parents */ - Timer *pptr; /* pointer to parent in callstack */ - Timer **pptrtmp; /* for realloc parent pointer array */ - int nparent; /* number of parents */ - int *parent_count; /* number of times parent invoked this child */ - static const char *thisfunc = "update_parent_info"; - - if ( ! ptr ) - return -1; - - if (stackidxt < 0) - return GPTLerror ("%s: called with negative stackidx\n", thisfunc); - - callstackt[stackidxt] = ptr; - - /* - ** If the region has no parent, bump its orphan count - ** (should never happen since "GPTL_ROOT" added). - */ - - if (stackidxt == 0) { - ++ptr->norphan; - return 0; - } - - pptr = callstackt[stackidxt-1]; - - /* If this parent occurred before, bump its count */ - - for (n = 0; n < ptr->nparent; ++n) { - if (ptr->parent[n] == pptr) { - ++ptr->parent_count[n]; - break; - } - } - - /* If this is a new parent, update info */ - - if (n == ptr->nparent) { - ++ptr->nparent; - nparent = ptr->nparent; - pptrtmp = (Timer **) realloc (ptr->parent, nparent * sizeof (Timer *)); - if ( ! pptrtmp) - return GPTLerror ("%s: realloc error pptrtmp nparent=%d\n", thisfunc, nparent); - - ptr->parent = pptrtmp; - ptr->parent[nparent-1] = pptr; - parent_count = (int *) realloc (ptr->parent_count, nparent * sizeof (int)); - if ( ! parent_count) - return GPTLerror ("%s: realloc error parent_count nparent=%d\n", thisfunc, nparent); - - ptr->parent_count = parent_count; - ptr->parent_count[nparent-1] = 1; - } - - return 0; -} - -/* -** GPTLstop_instr: stop a timer (auto-instrumented) -** -** Input arguments: -** self: function address -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLstop_instr (void *self) -{ - double tp1 = 0.0; /* time stamp */ - Timer *ptr; /* linked list pointer */ - int t; /* thread number for this process */ - unsigned int indx; /* index into hash table */ - long usr = 0; /* user time (returned from get_cpustamp) */ - long sys = 0; /* system time (returned from get_cpustamp) */ - static const char *thisfunc = "GPTLstop_instr"; - - if (disabled) - return 0; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - /* Get the timestamp */ - - if (wallstats.enabled) { - tp1 = (*ptr2wtimefunc) (); - } - - if (cpustats.enabled && get_cpustamp (&usr, &sys) < 0) - return GPTLerror ("%s: bad return from get_cpustamp\n", thisfunc); - - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); - - /* - ** If current depth exceeds a user-specified limit for print, just - ** decrement and return - */ - - if (stackidx[t].val > depthlimit) { - --stackidx[t].val; - return 0; - } - - ptr = getentry_instr (hashtable[t], self, &indx); - - if ( ! ptr) - return GPTLerror ("%s: timer for %p had not been started.\n", thisfunc, self); - - if ( ! ptr->onflg ) - return GPTLerror ("%s: timer %s was already off.\n", thisfunc, ptr->name); - - ++ptr->count; - - /* - ** Recursion => decrement depth in recursion and return. We need to return - ** because we don't want to stop the timer. We want the reported time for - ** the timer to reflect the outermost layer of recursion. - */ - - if (ptr->recurselvl > 0) { - ++ptr->nrecurse; - --ptr->recurselvl; - return 0; - } - - if (update_stats (ptr, tp1, usr, sys, t) != 0) - return GPTLerror ("%s: error from update_stats\n", thisfunc); - - return 0; -} - -/* -** GPTLstop: stop a timer -** -** Input arguments: -** name: timer name -** -** Return value: 0 (success) or -1 (failure) -*/ - -int GPTLstop (const char *name) /* timer name */ -{ - double tp1 = 0.0; /* time stamp */ - Timer *ptr; /* linked list pointer */ - int t; /* thread number for this process */ - unsigned int indx; /* index into hash table */ - long usr = 0; /* user time (returned from get_cpustamp) */ - long sys = 0; /* system time (returned from get_cpustamp) */ - static const char *thisfunc = "GPTLstop"; - - if (disabled) - return 0; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - /* Get the timestamp */ - - if (wallstats.enabled) { - tp1 = (*ptr2wtimefunc) (); - } - - if (cpustats.enabled && get_cpustamp (&usr, &sys) < 0) - return GPTLerror ("%s: get_cpustamp error", thisfunc); - - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); - - /* - ** If current depth exceeds a user-specified limit for print, just - ** decrement and return - */ - - if (stackidx[t].val > depthlimit) { - --stackidx[t].val; - return 0; - } - - if ( ! (ptr = getentry (hashtable[t], name, &indx))) - return GPTLerror ("%s thread %d: timer for %s had not been started.\n", thisfunc, t, name); - - if ( ! ptr->onflg ) - return GPTLerror ("%s: timer %s was already off.\n", thisfunc, ptr->name); - - ++ptr->count; - - /* - ** Recursion => decrement depth in recursion and return. We need to return - ** because we don't want to stop the timer. We want the reported time for - ** the timer to reflect the outermost layer of recursion. - */ - - if (ptr->recurselvl > 0) { - ++ptr->nrecurse; - --ptr->recurselvl; - return 0; - } - - if (update_stats (ptr, tp1, usr, sys, t) != 0) - return GPTLerror ("%s: error from update_stats\n", thisfunc); - - return 0; -} - -/* -** GPTLstop_handle: stop a timer based on a handle -** -** Input arguments: -** name: timer name (used only for diagnostics) -** handle: pointer to timer -** -** Return value: 0 (success) or -1 (failure) -*/ - -int GPTLstop_handle (const char *name, /* timer name */ - void **handle) /* handle (output if input value is 0) */ -{ - double tp1 = 0.0; /* time stamp */ - Timer *ptr; /* linked list pointer */ - int t; /* thread number for this process */ - unsigned int indx; /* index into hash table */ - long usr = 0; /* user time (returned from get_cpustamp) */ - long sys = 0; /* system time (returned from get_cpustamp) */ - static const char *thisfunc = "GPTLstop_handle"; - - if (disabled) - return 0; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - /* Get the timestamp */ - - if (wallstats.enabled) { - tp1 = (*ptr2wtimefunc) (); - } - - if (cpustats.enabled && get_cpustamp (&usr, &sys) < 0) - return GPTLerror (0); - - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); - - /* - ** If current depth exceeds a user-specified limit for print, just - ** decrement and return - */ - - if (stackidx[t].val > depthlimit) { - --stackidx[t].val; - return 0; - } - - /* - ** If on input, handle references a non-zero value, assume it's a previously returned Timer* - ** passed in by the user. If zero, generate the hash entry and return it to the user. - */ - - if (*handle) { - ptr = (Timer *) *handle; - } else { - if ( ! (ptr = getentry (hashtable[t], name, &indx))) - return GPTLerror ("%s thread %d: timer for %s had not been started.\n", thisfunc, t, name); - } - - if ( ! ptr->onflg ) - return GPTLerror ("%s: timer %s was already off.\n", thisfunc, ptr->name); - - ++ptr->count; - - /* - ** Recursion => decrement depth in recursion and return. We need to return - ** because we don't want to stop the timer. We want the reported time for - ** the timer to reflect the outermost layer of recursion. - */ - - if (ptr->recurselvl > 0) { - ++ptr->nrecurse; - --ptr->recurselvl; - return 0; - } - - if (update_stats (ptr, tp1, usr, sys, t) != 0) - return GPTLerror ("%s: error from update_stats\n", thisfunc); - - /* - ** If on input, *handle was 0, return the pointer to the timer for future input - */ - - if ( ! *handle) - *handle = (void *) ptr; - - return 0; -} - -/* -** update_stats: update stats inside ptr. Called by GPTLstop, GPTLstop_instr, -** GPTLstop_handle -** -** Input arguments: -** ptr: pointer to timer -** tp1: input time stapm -** usr: user time -** sys: system time -** t: thread index -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -static inline int update_stats (Timer *ptr, - const double tp1, - const long usr, - const long sys, - const int t) -{ - double delta; /* difference */ - static const char *thisfunc = "update_stats"; - - ptr->onflg = false; - --stackidx[t].val; - if (stackidx[t].val < -1) { - stackidx[t].val = -1; - return GPTLerror ("%s: tree depth has become negative.\n", thisfunc); - } - -#ifdef HAVE_PAPI - if (dousepapi && GPTL_PAPIstop (t, &ptr->aux) < 0) - return GPTLerror ("%s: error from GPTL_PAPIstop\n", thisfunc); -#endif - - if (wallstats.enabled) { - delta = tp1 - ptr->wall.last; - ptr->wall.accum += delta; - - if (delta < 0.) { - fprintf (stderr, "%s: negative delta=%g\n", thisfunc, delta); - } - - if (ptr->count == 1) { - ptr->wall.max = delta; - ptr->wall.min = delta; - } else { - if (delta > ptr->wall.max) - ptr->wall.max = delta; - if (delta < ptr->wall.min) - ptr->wall.min = delta; - } - } - - if (cpustats.enabled) { - ptr->cpu.accum_utime += usr - ptr->cpu.last_utime; - ptr->cpu.accum_stime += sys - ptr->cpu.last_stime; - ptr->cpu.last_utime = usr; - ptr->cpu.last_stime = sys; - } - return 0; -} - -/* -** GPTLenable: enable timers -** -** Return value: 0 (success) -*/ - -int GPTLenable (void) -{ - disabled = false; - return (0); -} - -/* -** GPTLdisable: disable timers -** -** Return value: 0 (success) -*/ - -int GPTLdisable (void) -{ - disabled = true; - return (0); -} - -/* -** GPTLstamp: Compute timestamp of usr, sys, and wallclock time (seconds) -** -** Output arguments: -** wall: wallclock -** usr: user time -** sys: system time -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLstamp (double *wall, double *usr, double *sys) -{ - struct tms buf; /* argument to times */ - - if ( ! initialized) - return GPTLerror ("GPTLstamp: GPTLinitialize has not been called\n"); - -#ifdef HAVE_TIMES - *usr = 0; - *sys = 0; - - if (times (&buf) == -1) - return GPTLerror ("GPTLstamp: times() failed. Results bogus\n"); - - *usr = buf.tms_utime / (double) ticks_per_sec; - *sys = buf.tms_stime / (double) ticks_per_sec; -#endif - *wall = (*ptr2wtimefunc) (); - return 0; -} - -/* -** GPTLreset: reset all timers to 0 -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLreset (void) -{ - int t; /* index over threads */ - Timer *ptr; /* linked list index */ - static const char *thisfunc = "GPTLreset"; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - for (t = 0; t < nthreads; t++) { - for (ptr = timers[t]; ptr; ptr = ptr->next) { - ptr->onflg = false; - ptr->count = 0; - memset (&ptr->wall, 0, sizeof (ptr->wall)); - memset (&ptr->cpu, 0, sizeof (ptr->cpu)); -#ifdef HAVE_PAPI - memset (&ptr->aux, 0, sizeof (ptr->aux)); -#endif - } - } - - if (verbose) - printf ("%s: accumulators for all timers set to zero\n", thisfunc); - - return 0; -} - -/* -** GPTLpr_set_append: set GPTLpr_file and GPTLpr_summary_file -** to use append mode -*/ - -int GPTLpr_set_append (void) -{ - pr_append = true; - return 0; -} - -/* -** GPTLpr_query_append: query whether GPTLpr_file and GPTLpr_summary_file -** use append mode -*/ - -int GPTLpr_query_append (void) -{ - if (pr_append) - return 1; - else - return 0; -} - -/* -** GPTLpr_set_write: set GPTLpr_file and GPTLpr_summary_file -** to use write mode -*/ - -int GPTLpr_set_write (void) -{ - pr_append = false; - return 0; -} - -/* -** GPTLpr_query_write: query whether GPTLpr_file and GPTLpr_summary_file -** use write mode -*/ - -int GPTLpr_query_write (void) -{ - if (pr_append) - return 0; - else - return 1; -} - -/* -** GPTLpr: Print values of all timers -** -** Input arguments: -** id: integer to append to string "timing." -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLpr (const int id) /* output file will be named "timing." */ -{ - char outfile[14]; /* name of output file: timing.xxxxxx */ - static const char *thisfunc = "GPTLpr"; - - if (id < 0 || id > 999999) - return GPTLerror ("%s: bad id=%d for output file. Must be >= 0 and < 1000000\n", thisfunc, id); - - sprintf (outfile, "timing.%d", id); - - if (GPTLpr_file (outfile) != 0) - return GPTLerror ("%s: Error in GPTLpr_file\n", thisfunc); - - return 0; -} - -/* -** GPTLpr_file: Print values of all timers -** -** Input arguments: -** outfile: Name of output file to write -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLpr_file (const char *outfile) /* output file to write */ -{ - FILE *fp; /* file handle to write to */ - Timer *ptr; /* walk through master thread linked list */ - Timer *tptr; /* walk through slave threads linked lists */ - Timer sumstats; /* sum of same timer stats over threads */ - int i, ii, n, t; /* indices */ - int totent; /* per-thread collision count (diagnostic) */ - int nument; /* per-index collision count (diagnostic) */ - int totlen; /* length for malloc */ - unsigned long totcount; /* total timer invocations */ - char *outpath; /* path to output file: outdir/timing.xxxxxx */ - float *sum; /* sum of overhead values (per thread) */ - float osum; /* sum of overhead over threads */ - double utr_overhead; /* overhead of calling underlying timing routine */ - double tot_overhead; /* utr_overhead + papi overhead */ - double papi_overhead = 0; /* overhead of reading papi counters */ - bool found; /* jump out of loop when name found */ - bool foundany; /* whether summation print necessary */ - bool first; /* flag 1st time entry found */ - /* - ** Diagnostics for collisions and GPTL memory usage - */ - int num_zero; /* number of buckets with 0 collisions */ - int num_one; /* number of buckets with 1 collision */ - int num_two; /* number of buckets with 2 collisions */ - int num_more; /* number of buckets with more than 2 collisions */ - int most; /* biggest collision count */ - int numtimers = 0; /* number of timers */ - float hashmem; /* hash table memory usage */ - float regionmem; /* timer memory usage */ - float papimem; /* PAPI stats memory usage */ - float pchmem; /* parent/child array memory usage */ - float gptlmem; /* total per-thread GPTL memory usage estimate */ - float totmem; /* sum of gptlmem across threads */ - - static const char *thisfunc = "GPTLpr_file"; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize() has not been called\n", thisfunc); - - /* 2 is for "/" plus null */ - if (outdir) - totlen = strlen (outdir) + strlen (outfile) + 2; - else - totlen = strlen (outfile) + 2; - - outpath = (char *) GPTLallocate (totlen); - - if (outdir) { - strcpy (outpath, outdir); - strcat (outpath, "/"); - strcat (outpath, outfile); - } else { - strcpy (outpath, outfile); - } - - if (pr_append){ - if ( ! (fp = fopen (outpath, "a"))) - fp = stderr; - } - else{ - if ( ! (fp = fopen (outpath, "w"))) - fp = stderr; - } - - free (outpath); - - fprintf (fp, "$Id: gptl.c,v 1.157 2011-03-28 20:55:18 rosinski Exp $\n"); - - /* - ** A set of nasty ifdefs to tell important aspects of how GPTL was built - */ - -#ifdef HAVE_NANOTIME - if (funclist[funcidx].option == GPTLnanotime) { - fprintf (fp, "Clock rate = %f MHz\n", cpumhz); -#ifdef BIT64 - fprintf (fp, " BIT64 was true\n"); -#else - fprintf (fp, " BIT64 was false\n"); -#endif - } -#endif - -#if ( defined THREADED_OMP ) - fprintf (fp, "GPTL was built with THREADED_OMP\n"); -#elif ( defined THREADED_PTHREADS ) - fprintf (fp, "GPTL was built with THREADED_PTHREADS\n"); -#else - fprintf (fp, "GPTL was built without threading\n"); -#endif - -#ifdef HAVE_MPI - fprintf (fp, "HAVE_MPI was true\n"); - -#ifdef HAVE_COMM_F2C - fprintf (fp, " HAVE_COMM_F2C was true\n"); -#else - fprintf (fp, " HAVE_COMM_F2C was false\n"); -#endif - -#ifdef ENABLE_PMPI - fprintf (fp, " ENABLE_PMPI was true\n"); -#else - fprintf (fp, " ENABLE_PMPI was false\n"); -#endif - -#else - fprintf (fp, "HAVE_MPI was false\n"); -#endif - -#ifdef HAVE_PAPI - fprintf (fp, "HAVE_PAPI was true\n"); - if (dousepapi) { - if (GPTL_PAPIis_multiplexed ()) - fprintf (fp, " PAPI event multiplexing was ON\n"); - else - fprintf (fp, " PAPI event multiplexing was OFF\n"); - GPTL_PAPIprintenabled (fp); - } -#else - fprintf (fp, "HAVE_PAPI was false\n"); -#endif - - /* - ** Estimate underlying timing routine overhead - */ - - utr_overhead = utr_getoverhead (); - fprintf (fp, "Underlying timing routine was %s.\n", funclist[funcidx].name); - fprintf (fp, "Per-call utr overhead est: %g sec.\n", utr_overhead); -#ifdef HAVE_PAPI - if (dousepapi) { - double t1, t2; - t1 = (*ptr2wtimefunc) (); - read_counters100 (); - t2 = (*ptr2wtimefunc) (); - papi_overhead = 0.01 * (t2 - t1); - fprintf (fp, "Per-call PAPI overhead est: %g sec.\n", papi_overhead); - } -#endif - tot_overhead = utr_overhead + papi_overhead; - if (dopr_preamble) { - fprintf (fp, "If overhead stats are printed, roughly half the estimated number is\n" - "embedded in the wallclock stats for each timer.\n" - "Print method was %s.\n", methodstr (method)); -#ifdef ENABLE_PMPI - fprintf (fp, "If a AVG_MPI_BYTES field is present, it is an estimate of the per-call " - "average number of bytes handled by that process.\n" - "If timers beginning with sync_ are present, it means MPI synchronization " - "was turned on.\n"); -#endif - fprintf (fp, "If a \'%%_of\' field is present, it is w.r.t. the first timer for thread 0.\n" - "If a \'e6_per_sec\' field is present, it is in millions of PAPI counts per sec.\n\n" - "A '*' in column 1 below means the timer had multiple parents, though the\n" - "values printed are for all calls.\n" - "Further down the listing may be more detailed information about multiple\n" - "parents. Look for 'Multiple parent info'\n\n"); - } - - sum = (float *) GPTLallocate (nthreads * sizeof (float)); - - for (t = 0; t < nthreads; ++t) { - - /* - ** Construct tree for printing timers in parent/child form. get_max_depth() must be called - ** AFTER construct_tree() because it relies on the per-parent children arrays being complete. - */ - - if (construct_tree (timers[t], method) != 0) - printf ("GPTLpr_file: failure from construct_tree: output will be incomplete\n"); - max_depth[t] = get_max_depth (timers[t], 0); - - if (t > 0) - fprintf (fp, "\n"); - fprintf (fp, "Stats for thread %d:\n", t); - - for (n = 0; n < max_depth[t]+1; ++n) /* +1 to always indent timer name */ - fprintf (fp, " "); - for (n = 0; n < max_name_len[t]; ++n) /* longest timer name */ - fprintf (fp, " "); - - fprintf (fp, " On Called Recurse"); - - /* Print strings for enabled timer types */ - - if (cpustats.enabled) - fprintf (fp, "%s", cpustats.str); - if (wallstats.enabled) { - fprintf (fp, "%s", wallstats.str); - if (percent && timers[0]->next) - fprintf (fp, "%%_of_%5.5s ", timers[0]->next->name); - if (overheadstats.enabled) - fprintf (fp, "%s", overheadstats.str); - } - -#ifdef ENABLE_PMPI - fprintf (fp, "AVG_MPI_BYTES "); -#endif - -#ifdef HAVE_PAPI - GPTL_PAPIprstr (fp); -#endif - - fprintf (fp, "\n"); /* Done with titles, now print stats */ - - /* - ** Print call tree and stats via recursive routine. "-1" is flag to - ** avoid printing dummy outermost timer, and initialize the depth. - */ - - printself_andchildren (timers[t], fp, t, -1, tot_overhead); - - /* - ** Sum of overhead across timers is meaningful. - ** Factor of 2 is because there are 2 utr calls per start/stop pair. - */ - - sum[t] = 0; - totcount = 0; - for (ptr = timers[t]->next; ptr; ptr = ptr->next) { - sum[t] += ptr->count * 2 * tot_overhead; - totcount += ptr->count; - } - if (wallstats.enabled && overheadstats.enabled) - fprintf (fp, "\n"); - fprintf (fp, "Overhead sum = %9.3g wallclock seconds\n", sum[t]); - if (totcount < PRTHRESH) - fprintf (fp, "Total calls = %lu\n", totcount); - else - fprintf (fp, "Total calls = %9.3e\n", (float) totcount); - } - - /* Print per-name stats for all threads */ - - if (dopr_threadsort && nthreads > 1) { - fprintf (fp, "\nSame stats sorted by timer for threaded regions (for timers active on thread 0):\n"); - fprintf (fp, "Thd "); - - for (n = 0; n < max_name_len[0]; ++n) /* longest timer name */ - fprintf (fp, " "); - - fprintf (fp, " On Called Recurse"); - - if (cpustats.enabled) - fprintf (fp, "%s", cpustats.str); - if (wallstats.enabled) { - fprintf (fp, "%s", wallstats.str); - if (percent && timers[0]->next) - fprintf (fp, "%%_of_%5.5s ", timers[0]->next->name); - if (overheadstats.enabled) - fprintf (fp, "%s", overheadstats.str); - } - -#ifdef HAVE_PAPI - GPTL_PAPIprstr (fp); -#endif - - fprintf (fp, "\n"); - - /* Start at next to skip dummy */ - - for (ptr = timers[0]->next; ptr; ptr = ptr->next) { - - /* - ** To print sum stats, first create a new timer then copy thread 0 - ** stats into it. then sum using "add", and finally print. - */ - - foundany = false; - first = true; - sumstats = *ptr; - for (t = 1; t < nthreads; ++t) { - found = false; - for (tptr = timers[t]->next; tptr && ! found; tptr = tptr->next) { - if (STRMATCH (ptr->name, tptr->name)) { - - /* Only print thread 0 when this timer found for other threads */ - - if (first) { - first = false; - fprintf (fp, "%3.3d ", 0); - printstats (ptr, fp, 0, 0, false, tot_overhead); - } - - found = true; - foundany = true; - fprintf (fp, "%3.3d ", t); - printstats (tptr, fp, 0, 0, false, tot_overhead); - add (&sumstats, tptr); - } - } - } - - if (foundany) { - fprintf (fp, "SUM "); - printstats (&sumstats, fp, 0, 0, false, tot_overhead); - fprintf (fp, "\n"); - } - } - - /* Repeat overhead print in loop over threads */ - - if (wallstats.enabled && overheadstats.enabled) { - osum = 0.; - for (t = 0; t < nthreads; ++t) { - fprintf (fp, "OVERHEAD.%3.3d (wallclock seconds) = %9.3g\n", t, sum[t]); - osum += sum[t]; - } - fprintf (fp, "OVERHEAD.SUM (wallclock seconds) = %9.3g\n", osum); - } - } - - /* Print info about timers with multiple parents */ - - if (dopr_multparent) { - for (t = 0; t < nthreads; ++t) { - bool some_multparents = false; /* thread has entries with multiple parents? */ - for (ptr = timers[t]->next; ptr; ptr = ptr->next) { - if (ptr->nparent > 1) { - some_multparents = true; - break; - } - } - - if (some_multparents) { - fprintf (fp, "\nMultiple parent info for thread %d:\n", t); - if (dopr_preamble && t == 0) { - fprintf (fp, "Columns are count and name for the listed child\n" - "Rows are each parent, with their common child being the last entry, " - "which is indented.\n" - "Count next to each parent is the number of times it called the child.\n" - "Count next to child is total number of times it was called by the " - "listed parents.\n\n"); - } - - for (ptr = timers[t]->next; ptr; ptr = ptr->next) - if (ptr->nparent > 1) - print_multparentinfo (fp, ptr); - } - } - } - - /* Print hash table stats */ - - if (dopr_collision) { - for (t = 0; t < nthreads; t++) { - first = true; - totent = 0; - num_zero = 0; - num_one = 0; - num_two = 0; - num_more = 0; - most = 0; - numtimers= 0; - - for (i = 0; i < tablesize; i++) { - nument = hashtable[t][i].nument; - if (nument > 1) { - totent += nument-1; - if (first) { - first = false; - fprintf (fp, "\nthread %d had some hash collisions:\n", t); - } - fprintf (fp, "hashtable[%d][%d] had %d entries:", t, i, nument); - for (ii = 0; ii < nument; ii++) - fprintf (fp, " %s", hashtable[t][i].entries[ii]->name); - fprintf (fp, "\n"); - } - switch (nument) { - case 0: - ++num_zero; - break; - case 1: - ++num_one; - break; - case 2: - ++num_two; - break; - default: - ++num_more; - break; - } - most = MAX (most, nument); - numtimers += nument; - } - - if (totent > 0) { - fprintf (fp, "Total collisions thread %d = %d\n", t, totent); - fprintf (fp, "Entry information:\n"); - fprintf (fp, "num_zero = %d num_one = %d num_two = %d num_more = %d\n", - num_zero, num_one, num_two, num_more); - fprintf (fp, "Most = %d\n", most); - } - } - } - - /* Stats on GPTL memory usage */ - - totmem = 0.; - for (t = 0; t < nthreads; t++) { - hashmem = (float) sizeof (Hashentry) * tablesize; - regionmem = (float) numtimers * sizeof (Timer); -#ifdef HAVE_PAPI - papimem = (float) numtimers * sizeof (Papistats); -#else - papimem = 0.; -#endif - pchmem = 0.; - for (ptr = timers[t]->next; ptr; ptr = ptr->next) - pchmem += (float) (sizeof (Timer *)) * (ptr->nchildren + ptr->nparent); - - gptlmem = hashmem + regionmem + pchmem; - totmem += gptlmem; - fprintf (fp, "\n"); - fprintf (fp, "Thread %d total memory usage = %g KB\n", t, gptlmem*.001); - fprintf (fp, " Hashmem = %g KB\n" - " Regionmem = %g KB (papimem portion = %g KB)\n" - " Parent/child arrays = %g KB\n", - hashmem*.001, regionmem*.001, papimem*.001, pchmem*.001); - } - fprintf (fp, "\n"); - fprintf (fp, "Total memory usage all threads = %g KB\n", totmem*0.001); - - print_threadmapping (fp); - free (sum); - - if (fclose (fp) != 0) - fprintf (stderr, "Attempt to close %s failed\n", outfile); - - pr_has_been_called = true; - return 0; -} - -/* -** construct_tree: Build the parent->children tree starting with knowledge of -** parent list for each child. -** -** Input arguments: -** timerst: Linked list of timers -** method: method to be used to define the links -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int construct_tree (Timer *timerst, Method method) -{ - Timer *ptr; /* loop through linked list */ - Timer *pptr = 0; /* parent (init to NULL to avoid compiler warning) */ - int nparent; /* number of parents */ - int maxcount; /* max calls by a single parent */ - int n; /* loop over nparent */ - - /* - ** Walk the linked list to build the parent-child tree, using whichever - ** mechanism is in place. newchild() will prevent loops. - */ - - for (ptr = timerst; ptr; ptr = ptr->next) { - switch (method) { - case GPTLfirst_parent: - if (ptr->nparent > 0) { - pptr = ptr->parent[0]; - if (newchild (pptr, ptr) != 0); - } - break; - case GPTLlast_parent: - if (ptr->nparent > 0) { - nparent = ptr->nparent; - pptr = ptr->parent[nparent-1]; - if (newchild (pptr, ptr) != 0); - } - break; - case GPTLmost_frequent: - maxcount = 0; - for (n = 0; n < ptr->nparent; ++n) { - if (ptr->parent_count[n] > maxcount) { - pptr = ptr->parent[n]; - maxcount = ptr->parent_count[n]; - } - } - if (maxcount > 0) { /* not an orphan */ - if (newchild (pptr, ptr) != 0); - } - break; - case GPTLfull_tree: - /* - ** Careful: this one can create *lots* of output! - */ - for (n = 0; n < ptr->nparent; ++n) { - pptr = ptr->parent[n]; - if (newchild (pptr, ptr) != 0); - } - break; - default: - return GPTLerror ("construct_tree: method %d is not known\n", method); - } - } - return 0; -} - -/* -** methodstr: Return a pointer to a string which represents the method -** -** Input arguments: -** method: method type -*/ - -static char *methodstr (Method method) -{ - if (method == GPTLfirst_parent) - return "first_parent"; - else if (method == GPTLlast_parent) - return "last_parent"; - else if (method == GPTLmost_frequent) - return "most_frequent"; - else if (method == GPTLfull_tree) - return "full_tree"; - else - return "Unknown"; -} - -/* -** newchild: Add an entry to the children list of parent. Use function -** is_descendant() to prevent infinite loops. -** -** Input arguments: -** parent: parent node -** child: child to be added -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -static int newchild (Timer *parent, Timer *child) -{ - int nchildren; /* number of children (temporary) */ - Timer **chptr; /* array of pointers to children */ - int n; /* loop over nchildren */ - - static const char *thisfunc = "newchild"; - - if (parent == child) - return GPTLerror ("%s: child %s can't be a parent of itself\n", thisfunc, child->name); - - /* - ** To allow construct_tree to be called multiple times, check that proposed child - ** is not a known child - */ - - for (n = 0; n < parent->nchildren; ++n) { - if (parent->children[n] == child){ - n = parent->nchildren + 1; - } - } - if (n > parent->nchildren){ - return 0; - } - - /* - ** To guarantee no loops, ensure that proposed parent isn't already a descendant of - ** proposed child - */ - - if (is_descendant (child, parent)) { - return GPTLerror ("%s: loop detected: NOT adding %s to descendant list of %s. " - "Proposed parent is in child's descendant path.\n", - thisfunc, child->name, parent->name); - } - - /* Safe to add the child to the parent's list of children */ - - ++parent->nchildren; - nchildren = parent->nchildren; - chptr = (Timer **) realloc (parent->children, nchildren * sizeof (Timer *)); - if ( ! chptr) - return GPTLerror ("%s: realloc error\n", thisfunc); - parent->children = chptr; - parent->children[nchildren-1] = child; - - return 0; -} - -/* -** get_max_depth: Determine the maximum call tree depth by traversing the -** tree recursively -** -** Input arguments: -** ptr: Starting timer -** startdepth: current depth when function invoked -** -** Return value: maximum depth -*/ - -static int get_max_depth (const Timer *ptr, const int startdepth) -{ - int maxdepth = startdepth; - int depth; - int n; - - for (n = 0; n < ptr->nchildren; ++n) - if ((depth = get_max_depth (ptr->children[n], startdepth+1)) > maxdepth) - maxdepth = depth; - - return maxdepth; -} - -/* -** num_descendants: Determine the number of descendants of a timer by traversing -** the tree recursively. This function is not currently used. It could be -** useful in a pruning algorithm -** -** Input arguments: -** ptr: Starting timer -** -** Return value: number of descendants -*/ - -static int num_descendants (Timer *ptr) -{ - int n; - - ptr->num_desc = ptr->nchildren; - for (n = 0; n < ptr->nchildren; ++n) { - ptr->num_desc += num_descendants (ptr->children[n]); - } - return ptr->num_desc; -} - -/* -** is_descendant: Determine whether node2 is in the descendant list for -** node1 -** -** Input arguments: -** node1: starting node for recursive search -** node2: node to be searched for -** -** Return value: true or false -*/ - -static int is_descendant (const Timer *node1, const Timer *node2) -{ - int n; - - /* Breadth before depth for efficiency */ - - for (n = 0; n < node1->nchildren; ++n) - if (node1->children[n] == node2) - return 1; - - for (n = 0; n < node1->nchildren; ++n) - if (is_descendant (node1->children[n], node2)) - return 1; - - return 0; -} - -/* -** printstats: print a single timer -** -** Input arguments: -** timer: timer for which to print stats -** fp: file descriptor to write to -** t: thread number -** depth: depth to indent timer -** doindent: whether indenting will be done -** tot_overhead: underlying timing routine overhead -*/ - -static void printstats (const Timer *timer, - FILE *fp, - const int t, - const int depth, - const bool doindent, - const double tot_overhead) -{ - int i; /* index */ - int indent; /* index for indenting */ - int extraspace; /* for padding to length of longest name */ - float fusr; /* user time as float */ - float fsys; /* system time as float */ - float usrsys; /* usr + sys */ - float elapse; /* elapsed time */ - float wallmax; /* max wall time */ - float wallmin; /* min wall time */ - float ratio; /* percentage calc */ - - /* Flag regions having multiple parents with a "*" in column 1 */ - - if (doindent) { - if (timer->nparent > 1) - fprintf (fp, "* "); - else - fprintf (fp, " "); - - /* Indent to depth of this timer */ - - for (indent = 0; indent < depth; ++indent) - fprintf (fp, " "); - } - - fprintf (fp, "%s", timer->name); - - /* Pad to length of longest name */ - - extraspace = max_name_len[t] - strlen (timer->name); - for (i = 0; i < extraspace; ++i) - fprintf (fp, " "); - - /* Pad to max indent level */ - - if (doindent) - for (indent = depth; indent < max_depth[t]; ++indent) - fprintf (fp, " "); - - if (timer->onflg) - fprintf (fp, " y "); - else - fprintf (fp, " - "); - - if (timer->count < PRTHRESH) { - if (timer->nrecurse > 0) - fprintf (fp, "%8lu %6lu ", timer->count, timer->nrecurse); - else - fprintf (fp, "%8lu - ", timer->count); - } else { - if (timer->nrecurse > 0) - fprintf (fp, "%8.1e %6.0e ", (float) timer->count, (float) timer->nrecurse); - else - fprintf (fp, "%8.1e - ", (float) timer->count); - } - - if (cpustats.enabled) { - fusr = timer->cpu.accum_utime / (float) ticks_per_sec; - fsys = timer->cpu.accum_stime / (float) ticks_per_sec; - usrsys = fusr + fsys; - fprintf (fp, "%9.3f %9.3f %9.3f ", fusr, fsys, usrsys); - } - - if (wallstats.enabled) { - elapse = timer->wall.accum; - wallmax = timer->wall.max; - wallmin = timer->wall.min; - fprintf (fp, "%12.6f %12.6f %12.6f ", elapse, wallmax, wallmin); - - if (percent && timers[0]->next) { - ratio = 0.; - if (timers[0]->next->wall.accum > 0.) - ratio = (timer->wall.accum * 100.) / timers[0]->next->wall.accum; - fprintf (fp, " %9.2f ", ratio); - } - - /* - ** Factor of 2 is because there are 2 utr calls per start/stop pair. - */ - - if (overheadstats.enabled) { - fprintf (fp, "%16.6f ", timer->count * 2 * tot_overhead); - } - } - -#ifdef ENABLE_PMPI - if (timer->nbytes == 0.) - fprintf (fp, " - "); - else - fprintf (fp, "%13.3e ", timer->nbytes / timer->count); -#endif - -#ifdef HAVE_PAPI - GPTL_PAPIpr (fp, &timer->aux, t, timer->count, timer->wall.accum); -#endif - - fprintf (fp, "\n"); -} - -/* -** print_multparentinfo: -** -** Input arguments: -** Input/output arguments: -*/ -void print_multparentinfo (FILE *fp, - Timer *ptr) -{ - int n; - - if (ptr->norphan > 0) { - if (ptr->norphan < PRTHRESH) - fprintf (fp, "%8u %-32s\n", ptr->norphan, "ORPHAN"); - else - fprintf (fp, "%8.1e %-32s\n", (float) ptr->norphan, "ORPHAN"); - } - - for (n = 0; n < ptr->nparent; ++n) { - if (ptr->parent_count[n] < PRTHRESH) - fprintf (fp, "%8d %-32s\n", ptr->parent_count[n], ptr->parent[n]->name); - else - fprintf (fp, "%8.1e %-32s\n", (float) ptr->parent_count[n], ptr->parent[n]->name); - } - - if (ptr->count < PRTHRESH) - fprintf (fp, "%8lu %-32s\n\n", ptr->count, ptr->name); - else - fprintf (fp, "%8.1e %-32s\n\n", (float) ptr->count, ptr->name); -} - -/* -** add: add the contents of tin to tout -** -** Input arguments: -** tin: input timer -** Input/output arguments: -** tout: output timer summed into -*/ - -static void add (Timer *tout, - const Timer *tin) -{ - tout->count += tin->count; - - if (wallstats.enabled) { - tout->wall.accum += tin->wall.accum; - - tout->wall.max = MAX (tout->wall.max, tin->wall.max); - tout->wall.min = MIN (tout->wall.min, tin->wall.min); - } - - if (cpustats.enabled) { - tout->cpu.accum_utime += tin->cpu.accum_utime; - tout->cpu.accum_stime += tin->cpu.accum_stime; - } -#ifdef HAVE_PAPI - GPTL_PAPIadd (&tout->aux, &tin->aux); -#endif -} - -/* -** GPTLpr_summary: Gather and print summary stats across -** threads and MPI tasks -** -** Input arguments: -** comm: commuicator (e.g. MPI_COMM_WORLD). If zero, use MPI_COMM_WORLD -*/ - -#ifdef HAVE_MPI -int GPTLpr_summary (MPI_Comm comm) -#else -int GPTLpr_summary (int comm) -#endif -{ - const char *outfile = "timing.summary"; - int ret; - - ret = GPTLpr_summary_file(comm, outfile); - return 0; -} - -#ifdef HAVE_MPI -int GPTLpr_summary_file (MPI_Comm comm, - const char *outfile) -#else -int GPTLpr_summary_file (int comm, - const char *outfile) -#endif -{ - int iam = 0; /* MPI rank: default master */ - int n; /* index */ - int extraspace; /* for padding to length of longest name */ - int totlen; /* length for malloc */ - char *outpath; /* path to output file: outdir/outfile */ - FILE *fp = 0; /* output file */ - - int count; /* number of timers */ - Summarystats *storage; /* storage for data from all timers */ - - int x; /* pointer increment */ - int k; /* counter */ - char *tempname; /* event name workspace */ - int max_name_length; - int len; - float temp; - int ret; /* return code */ - - static const char *thisfunc = "GPTLpr_summary_file"; - -#ifdef HAVE_MPI - int nproc; /* number of procs in MPI communicator */ - - char name[MAX_CHARS+1]; /* timer name requested by master */ - - if (((int) comm) == 0) - comm = MPI_COMM_WORLD; - - if ((ret = MPI_Comm_rank (comm, &iam)) != MPI_SUCCESS) - return GPTLerror ("%s: Bad return from MPI_Comm_rank=%d\n", thisfunc, ret); - - if ((ret = MPI_Comm_size (comm, &nproc)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Comm_size=%d\n", thisfunc, iam, ret); - -#endif - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize() has not been called\n", thisfunc); - - /* - ** Each process gathers stats for its threads. - ** Binary tree used combine results. - ** Master prints results. - */ - - if (iam == 0) { - - /* 2 is for "/" plus null */ - if (outdir) - totlen = strlen (outdir) + strlen (outfile) + 2; - else - totlen = strlen (outfile) + 2; - - outpath = (char *) GPTLallocate (totlen); - - if (outdir) { - strcpy (outpath, outdir); - strcat (outpath, "/"); - strcat (outpath, outfile); - } else { - strcpy (outpath, outfile); - } - - if (pr_append){ - if ( ! (fp = fopen (outpath, "a"))) - fp = stderr; - } - else{ - if ( ! (fp = fopen (outpath, "w"))) - fp = stderr; - } - - free (outpath); - - fprintf (fp, "$Id: gptl.c,v 1.157 2011-03-28 20:55:18 rosinski Exp $\n"); - fprintf (fp, "'count' is cumulative. All other stats are max/min\n"); -#ifndef HAVE_MPI - fprintf (fp, "NOTE: GPTL was built WITHOUT MPI: Only task 0 stats will be printed.\n"); - fprintf (fp, "This is even for MPI codes.\n"); -#endif - - count = merge_thread_data(); /*merges events from all threads*/ - - if( !( tempname = (char*)malloc((MAX_CHARS + 1) * sizeof(char) ) ) ) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - - /* allocate storage for data for all timers */ - if( !( storage = malloc( sizeof(Summarystats) * count ) ) && count ) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - - if ( (ret = collect_data( iam, comm, &count, &storage) ) != 0 ) - return GPTLerror ("%s: master collect_data failed\n", thisfunc); - - x = 0; /*finds max timer name length*/ - max_name_length = 0; - for( k = 0; k < count; k++ ) { - len = strlen( timerlist[0] + x ); - if( len > max_name_length ) - max_name_length = len; - x += MAX_CHARS + 1; - } - - /* Print heading */ - - fprintf (fp, "name"); - extraspace = max_name_length - strlen ("name"); - for (n = 0; n < extraspace; ++n) - fprintf (fp, " "); - fprintf (fp, " processes threads count"); - fprintf (fp, " walltotal wallmax (proc thrd ) wallmin (proc thrd )"); - - for (n = 0; n < nevents; ++n) { - fprintf (fp, " %8.8stotal", eventlist[n].str8); - fprintf (fp, " %8.8smax (proc thrd )", eventlist[n].str8); - fprintf (fp, " %8.8smin (proc thrd )", eventlist[n].str8); - } - - fprintf (fp, "\n"); - - x = 0; - for( k = 0; k < count; k++ ) { - - /* Print the results for this timer */ - memset( tempname, 0, (MAX_CHARS + 1) * sizeof(char) ); - memcpy( tempname, timerlist[0] + x, (MAX_CHARS + 1) * sizeof(char) ); - - x += (MAX_CHARS + 1); - fprintf (fp, "%s", tempname); - extraspace = max_name_length - strlen (tempname); - for (n = 0; n < extraspace; ++n) - fprintf (fp, " "); - temp = storage[k].count; - fprintf(fp, " %8d %8d %12.6e ", - storage[k].processes, storage[k].threads, temp); - fprintf (fp, " %12.6e %9.3f (%6d %6d) %9.3f (%6d %6d)", - storage[k].walltotal, - storage[k].wallmax, storage[k].wallmax_p, storage[k].wallmax_t, - storage[k].wallmin, storage[k].wallmin_p, storage[k].wallmin_t); -#ifdef HAVE_PAPI - for (n = 0; n < nevents; ++n) { - fprintf (fp, " %12.6e", storage[k].papitotal[n]); - - fprintf (fp, " %9.3e (%6d %6d)", - storage[k].papimax[n], storage[k].papimax_p[n], - storage[k].papimax_t[n]); - - fprintf (fp, " %9.3e (%6d %6d)", - storage[k].papimin[n], storage[k].papimin_p[n], - storage[k].papimin_t[n]); - } -#endif - fprintf (fp, "\n"); - } - - fprintf (fp, "\n"); - free(tempname); - - } - else { /* iam != 0 (slave) */ -#ifdef HAVE_MPI - /* count number of timers from linked list */ - count = merge_thread_data(); - - /*allocate storage for data for all timers */ - if( !( storage = malloc( sizeof(Summarystats) * count ) ) && count ) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - - if ( (ret = collect_data( iam, comm, &count, &storage ) ) != 0 ) - return GPTLerror ("%s: slave collect_data failed\n", thisfunc); -#endif - } - - free(timerlist[0]); - free(timerlist); - free(storage); - if (iam == 0 && fclose (fp) != 0) - fprintf (stderr, "%s: Attempt to close %s failed\n", thisfunc, outfile); - return 0; -} - -/* -** merge_thread_data: returns number of events in merged list -*/ - -static int merge_thread_data() -{ - int n, k, x; /*counters*/ - int t; /*current thread*/ - int num_newtimers; - int compare; - int *count; - int max_count; /* largest number of timers among non-thread-0 threads */ - char **newtimers; - int length = MAX_CHARS + 1; - char ***sort; - int count_r; /* count to be returned, allows *count to be free()ed */ - Timer *ptr; - - static const char *thisfunc = "merge_thread_data"; - - if( nthreads == 1 ) { /* merging is not needed since only 1 thread */ - - /* count timers for thread 0 */ - count_r = 0; - for (ptr = timers[0]->next; ptr; ptr = ptr->next) count_r++; - - timerlist = (char **) GPTLallocate( sizeof (char *)); - if( !( timerlist[0] = (char *)malloc( count_r * length * sizeof (char)) ) && count_r) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - - x = 0; - for (ptr = timers[0]->next; ptr; ptr = ptr->next) { - strcpy((timerlist[0] + x), ptr->name); - x += length; - } - - return count_r; - - } - - timerlist = (char **) GPTLallocate( nthreads * sizeof (char *)); - count = (int *) GPTLallocate( nthreads * sizeof (int)); - sort = (char ***) GPTLallocate( nthreads * sizeof (void *)); - - max_count = 0; - for (t = 0; t < nthreads; t++) { - - /* count timers for thread */ - count[t] = 0; - for (ptr = timers[t]->next; ptr; ptr = ptr->next) count[t]++; - - if( count[t] > max_count || max_count == 0 ) max_count = count[t]; - - if( !( sort[t] = (char **)malloc( count[t] * sizeof (char *)) ) && count[t]) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - - /* allocate memory to hold list of timer names */ - if( !( timerlist[t] = (char *)malloc( length * count[t] * sizeof (char)) ) && count[t]) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - memset( timerlist[t], length * count[t] * sizeof (char), 0 ); - - x = 0; - for (ptr = timers[t]->next; ptr; ptr = ptr->next) { - strcpy((timerlist[t] + x), ptr->name); - x += length; - } - - x = 0; - for (k = 0; k < count[t]; k++) { - sort[t][k] = timerlist[t] + x; - x += length; - } - - qsort( sort[t], count[t], sizeof (char *), cmp ); - - } - - if( !( newtimers = (char **)malloc( max_count * sizeof (char *)) ) && max_count) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - - for (t = 1; t < nthreads; t++) { - memset( newtimers, max_count * sizeof (char *), 0 ); - k = 0; - n = 0; - num_newtimers = 0; - while( k < count[0] && n < count[t] ) { - /* linear comparison of timers */ - compare = strcmp( sort[0][k], sort[t][n] ); - - if( compare == 0 ) { - /* both have, nothing needs to be done */ - k++; - n++; - continue; - } - - if( compare < 0 ) { - /* event that only master has, nothing needs to be done */ - k++; - continue; - } - - if( compare > 0 ) { - /* event that only slave thread has, need to add */ - newtimers[num_newtimers] = sort[t][n]; - n++; - num_newtimers++; - } - } - - while( n < count[t] ) { - /* adds any remaining timers, since we know that all the rest - are new since have checked all master thread timers */ - newtimers[num_newtimers] = sort[t][n]; - num_newtimers++; - n++; - } - - if( num_newtimers ) { - /* sorts by memory address to restore original order */ - qsort( newtimers, num_newtimers, sizeof(char*), ncmp ); - - /* reallocate memory to hold additional timers */ - if( !( sort[0] = realloc( sort[0], (count[0] + num_newtimers) * sizeof (char *)) ) ) - return GPTLerror ("%s: memory reallocation failed\n", thisfunc); - if( !(timerlist[0] = realloc(timerlist[0], length * (count[0] + num_newtimers) * sizeof (char)) ) ) - return GPTLerror ("%s: memory reallocation failed\n", thisfunc); - - k = count[0]; - for (n = 0; n < num_newtimers; n++) { - /* add new found timers */ - memcpy( timerlist[0] + (count[0] + n) * length, newtimers[n], length * sizeof (char) ); - } - - count[0] += num_newtimers; - - /* reassign pointers in sort since realloc will have broken them if it moved the memory. */ - x = 0; - for (k = 0; k < count[0]; k++) { - sort[0][k] = timerlist[0] + x; - x += length; - } - - qsort( sort[0], count[0], sizeof (char *), cmp ); - } - } - - free(sort[0]); - /* don't free timerlist[0], since needed for subsequent steps in gathering global statistics */ - for (t = 1; t < nthreads; t++) { - free(sort[t]); - free(timerlist[t]); - } - - free(sort); - count_r = count[0]; - free(count); - - return count_r; -} - -/* -** collect data: compute global stats using tree reduction algorithm -** returns pointer to new summarystats list -** -** Input arguments: -** iam: process id -** comm: MPI communicator -** Input/Output arguments: -** summarystats: max/min/etc stats over all processes and threads -** count: number of events -** timerlist: list of all timer names (global variable) -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -#ifdef HAVE_MPI -static int collect_data(const int iam, - MPI_Comm comm, - int *count, - Summarystats **summarystats_cumul ) -#else -static int collect_data(const int iam, - int comm, - int *count, - Summarystats **summarystats_cumul ) -#endif -{ - int step; /* spacing beween active processes */ - int mstep; /* spacing between active masters */ - int procid; /* process to communicate with */ - int ret; - int nproc; - int signal = 1; - int x, k, n; /* counters */ - char *tempname; - int s = (MAX_CHARS + 1 ); /* spacing between timer names */ - int length = MAX_CHARS + 1; - int compare; - int num_newtimers; - int count_slave; - char *timers_slave; /* slave timerlist */ - char **newtimers; - char **sort_slave; /* slave sorted list */ - char **sort_master; /* master sorted list */ - int m_index, s_index; - Summarystats *summarystats; /* stats collected on master */ - - static const char *thisfunc = "collect_data"; - -#ifdef HAVE_MPI - Summarystats *summarystats_slave; /* stats sent to master */ - const int taga = 99; - const int tagb = 100; - const int tagc = 101; - MPI_Status status; - MPI_Request rcvreq1; - MPI_Request rcvreq2; - MPI_Request rcvreq3; - - if ((ret = MPI_Comm_size (comm, &nproc)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Comm_size=%d\n", thisfunc, iam, ret); - -#endif - - summarystats = *summarystats_cumul; - - if (!( tempname = (char*)malloc((MAX_CHARS +1) * sizeof(char) ) )) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - - x = 0; - for (k = 0; k < *count; k++) { - memcpy( tempname, timerlist[0] + x, (MAX_CHARS + 1) * sizeof (char) ); - /* calculate individual stats */ - get_threadstats( iam, tempname, &summarystats[k]); - x += (MAX_CHARS + 1); - } - -#ifdef HAVE_MPI - step = 1; - mstep = 2; - while( step < nproc ) { - - if ((iam % mstep) == 0) { - /* find new masters at the current level, which are at every n*step starting with 0 */ - - procid = iam + step; - if (procid < nproc) { - /* prevent lone master wanting data from nonexistent process problem */ - - /* prepare for receive */ - if ((ret = MPI_Irecv (&count_slave, 1, MPI_INTEGER, procid, taga, comm, &rcvreq2)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Irecv=%d\n", thisfunc, iam, ret); - - /* handshake with slave */ - if ((ret = MPI_Send (&signal, 1, MPI_INTEGER, procid, taga, comm)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); - - /* wait for message from slave */ - if ((ret = MPI_Wait (&rcvreq2, MPI_STATUS_IGNORE)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Wait=%d\n", thisfunc, iam, ret); - - if (count_slave != 0) { /* if slave had no events, then nothing needs to be done*/ - - if (!(sort_master = (char **) malloc( (*count) * sizeof (char *) ) ) && (*count)) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - if (!(newtimers = (char **) malloc( count_slave * sizeof (char *) ) )) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - if (!(sort_slave = (char **) malloc( count_slave * sizeof (char *) ) )) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - if (!(summarystats_slave = (Summarystats *) malloc( count_slave * sizeof (Summarystats) ) )) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - if (!(timers_slave = (char *) malloc( count_slave * (MAX_CHARS + 1) * sizeof (char) ) )) - return GPTLerror ("%s: memory allocation failed\n", thisfunc); - - if ((ret = MPI_Irecv (timers_slave, count_slave * (MAX_CHARS + 1), MPI_CHAR, procid, tagb, comm, &rcvreq3)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Irecv=%d\n", thisfunc, iam, ret); - if ((ret = MPI_Irecv (summarystats_slave, count_slave * sizeof(Summarystats), MPI_BYTE, procid, tagc, comm, &rcvreq1)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Irecv=%d\n", thisfunc, iam, ret); - if ((ret = MPI_Send (&signal, 1, MPI_INT, procid, tagb, comm)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); - if ((ret = MPI_Wait (&rcvreq1, MPI_STATUS_IGNORE)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Wait=%d\n", thisfunc, iam, ret); - if ((ret = MPI_Wait (&rcvreq3, MPI_STATUS_IGNORE)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Wait=%d\n", thisfunc, iam, ret); - - x = 0; - for (k = 0; k < count_slave; k++) { - sort_slave[k] = timers_slave + x; - x += MAX_CHARS + 1; - } - x = 0; - for (k = 0; k < *count; k++) { - sort_master[k] = timerlist[0] + x; - x += MAX_CHARS + 1; - } - - qsort(sort_master, *count, sizeof(char*), cmp); - qsort(sort_slave, count_slave, sizeof(char*), cmp); - - num_newtimers = 0; - n = 0; - k = 0; - while (k < *count && n < count_slave) - { - compare = strcmp(sort_master[k], sort_slave[n]); - - if (compare == 0) { - /* matching timers found */ - - /* find element number of the name in original timerlist so that it can be matched with its summarystats */ - m_index = get_index( timerlist[0], sort_master[k] ); - - s_index = get_index( timers_slave, sort_slave[n] ); - get_summarystats (&summarystats[m_index], &summarystats_slave[s_index]); - k++; - n++; - continue; - } - - if (compare > 0) { - /* s1 >s2 . slave has event; master does not */ - newtimers[num_newtimers] = sort_slave[n]; - num_newtimers++; - n++; - continue; - } - - if (compare < 0) /* only master has event; nothing needs to be done */ - k++; - } - - while (n < count_slave) { - /* add all remaining timers which only the slave has */ - newtimers[num_newtimers] = sort_slave[n]; - num_newtimers++; - n++; - } - - /* sort by memory address to get original order */ - qsort (newtimers, num_newtimers, sizeof(char*), ncmp); - - /* reallocate to hold new timer names and summary stats from slave */ - if (!(timerlist[0] = realloc( timerlist[0], length * (*count + num_newtimers) * sizeof (char) ) )) - return GPTLerror ("%s: memory reallocation failed\n", thisfunc); - if (!(summarystats = realloc( summarystats, (*count + count_slave ) * sizeof (Summarystats) ) )) - return GPTLerror ("%s: memory reallocation failed\n", thisfunc); - - k = *count; - x = *count * (MAX_CHARS + 1); - for (n = 0; n < num_newtimers; n++) { - /* copy new timers names and new timer data */ - memcpy(timerlist[0] + x, newtimers[n], length * sizeof (char)); - s_index = get_index( timers_slave, newtimers[n] ); - memcpy(&summarystats[k], &summarystats_slave[s_index], sizeof (Summarystats)); - k++; - x += MAX_CHARS + 1; - } - *count += num_newtimers; - - free(timers_slave); - free(summarystats_slave); - free(newtimers); - free(sort_slave); - free(sort_master); - } - - } - - } - else if ( (iam % step) == 0 ) { - /* non masters send data */ - - procid = iam - step; - - /* wait for ready signal from master */ - if ((ret = MPI_Recv (&signal, 1, MPI_INTEGER, procid, taga, comm, MPI_STATUS_IGNORE)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Recv=%d\n", thisfunc, iam, ret); - - if ((ret = MPI_Send (count, 1, MPI_INTEGER, procid, taga, comm)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); - - if ( count != 0) { - if ((ret = MPI_Recv (&signal, 1, MPI_INTEGER, procid, tagb, comm, MPI_STATUS_IGNORE)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Recv=%d\n", thisfunc, iam, ret); - if ((ret = MPI_Send (timerlist[0], (*count) * (MAX_CHARS + 1), MPI_CHAR, procid, tagb, comm)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); - if ((ret = MPI_Send (summarystats, (*count) * sizeof(Summarystats), MPI_BYTE, procid, tagc, comm)) != MPI_SUCCESS) - return GPTLerror ("%s rank %d: Bad return from MPI_Send=%d\n", thisfunc, iam, ret); - } - free(tempname); - *summarystats_cumul = summarystats; - return 0; - - } - - step = mstep; - mstep = 2 * mstep; - - } - -#endif - - free(tempname); - *summarystats_cumul = summarystats; - return 0; -} - -/* -** get_index: calculates the index number of an element in a list -** based on the start memory address and memory address of the element -** where each element is MAX_CHARS+1 long -** -** Input arguments: -** list: start address of list -** element: start address of element -** -** Return value: index of element in list -*/ - -int get_index( const char * list, - const char * element ) -{ - return (( element - list ) / ( MAX_CHARS + 1 )); -} - - -/* -** cmp: returns value from strcmp. for use with qsort -*/ - -static int cmp(const char **x, const char **y) -{ - return strcmp(*x, *y); -} - - -/* -** ncmp: compares values of memory adresses pointed to by a pointer. for use with qsort -*/ - -static int ncmp( const char **x, const char **y ) -{ - static const char *thisfunc = "GPTLsetoption"; - - if( *x > *y ) - return 1; - if( *x < *y ) - return -1; - if( *x == *y ) - GPTLerror("%s: shared memory address between timers\n", thisfunc); -} - -/* -** get_threadstats: gather stats for timer "name" over all threads -** -** Input arguments: -** iam: MPI process id -** name: timer name -** Output arguments: -** summarystats: max/min stats over all threads -*/ - -void get_threadstats (const int iam, - const char *name, - Summarystats *summarystats) -{ -#ifdef HAVE_PAPI - int n; /* event index */ -#endif - int t; /* thread index */ - unsigned int indx; /* returned from getentry() */ - Timer *ptr; /* timer */ - - /* - ** This memset fortuitiously initializes the process values (_p) to master (0) - */ - - memset (summarystats, 0, sizeof (Summarystats)); - - summarystats->wallmax_p = iam; - summarystats->wallmin_p = iam; - - for (t = 0; t < nthreads; ++t) { - if ((ptr = getentry (hashtable[t], name, &indx))) { - - if (ptr->count > 0) { - summarystats->threads++; - summarystats->walltotal += ptr->wall.accum; - } - summarystats->count += ptr->count; - - if (ptr->wall.accum > summarystats->wallmax) { - summarystats->wallmax = ptr->wall.accum; - summarystats->wallmax_t = t; - } - - if (ptr->wall.accum < summarystats->wallmin || summarystats->wallmin == 0.) { - summarystats->wallmin = ptr->wall.accum; - summarystats->wallmin_t = t; - } -#ifdef HAVE_PAPI - for (n = 0; n < nevents; ++n) { - double value; - if (GPTL_PAPIget_eventvalue (eventlist[n].namestr, &ptr->aux, &value) != 0) { - fprintf (stderr, "Bad return from GPTL_PAPIget_eventvalue\n"); - return; - } - summarystats->papimax_p[n] = iam; - summarystats->papimin_p[n] = iam; - - if (value > summarystats->papimax[n]) { - summarystats->papimax[n] = value; - summarystats->papimax_t[n] = t; - } - - if (value < summarystats->papimin[n] || summarystats->papimin[n] == 0.) { - summarystats->papimin[n] = value; - summarystats->papimin_t[n] = t; - } - summarystats->papitotal[n] += value; - } -#endif - } - } - if ( summarystats->count ) summarystats->processes = 1; -} - -/* -** get_summarystats: write max/min stats into mpistats based on comparison -** with summarystats_slave -** -** Input arguments: -** summarystats_slave: stats from a slave process -** Input/Output arguments: -** summarystats: stats (starts out as master stats) -*/ - -void get_summarystats (Summarystats *summarystats, - const Summarystats *summarystats_slave) -{ - if (summarystats_slave->count == 0) return; - - if (summarystats_slave->wallmax > summarystats->wallmax) { - summarystats->wallmax = summarystats_slave->wallmax; - summarystats->wallmax_p = summarystats_slave->wallmax_p; - summarystats->wallmax_t = summarystats_slave->wallmax_t; - } - - if ((summarystats_slave->wallmin < summarystats->wallmin) || - (summarystats->count == 0)){ - summarystats->wallmin = summarystats_slave->wallmin; - summarystats->wallmin_p = summarystats_slave->wallmin_p; - summarystats->wallmin_t = summarystats_slave->wallmin_t; - } - -#ifdef HAVE_PAPI - { - int n; - for (n = 0; n < nevents; ++n) { - if (summarystats_slave->papimax[n] > summarystats->papimax[n]) { - summarystats->papimax[n] = summarystats_slave->papimax[n]; - summarystats->papimax_p[n] = summarystats_slave->papimax_p[n]; - summarystats->papimax_t[n] = summarystats_slave->papimax_t[n]; - } - - if ((summarystats_slave->papimin[n] < summarystats->papimin[n]) || - (summarystats->count == 0)){ - summarystats->papimin[n] = summarystats_slave->papimin[n]; - summarystats->papimin_p[n] = summarystats_slave->papimin_p[n]; - summarystats->papimin_t[n] = summarystats_slave->papimin_t[n]; - } - summarystats->papitotal[n] += summarystats_slave->papitotal[n]; - } - } -#endif - - summarystats->count += summarystats_slave->count; - summarystats->walltotal += summarystats_slave->walltotal; - summarystats->processes += summarystats_slave->processes; - summarystats->threads += summarystats_slave->threads; -} - -/* -** GPTLbarrier: When MPI enabled, set and time an MPI barrier -** -** Input arguments: -** comm: commuicator (e.g. MPI_COMM_WORLD). If zero, use MPI_COMM_WORLD -** name: region name -** -** Return value: 0 (success) -*/ - -#ifdef HAVE_MPI -int GPTLbarrier (MPI_Comm comm, const char *name) -#else -int GPTLbarrier (int comm, const char *name) -#endif -{ - int ret; - static const char *thisfunc = "GPTLbarrier"; - - ret = GPTLstart (name); -#ifdef HAVE_MPI - if ((ret = MPI_Barrier (comm)) != MPI_SUCCESS) - return GPTLerror ("%s: Bad return from MPI_Barrier=%d", thisfunc, ret); -#endif - ret = GPTLstop (name); - return 0; -} - -/* -** get_cpustamp: Invoke the proper system timer and return stats. -** -** Output arguments: -** usr: user time -** sys: system time -** -** Return value: 0 (success) -*/ - -static inline int get_cpustamp (long *usr, long *sys) -{ -#ifdef HAVE_TIMES - struct tms buf; - - (void) times (&buf); - *usr = buf.tms_utime; - *sys = buf.tms_stime; - return 0; -#else - return GPTLerror ("get_cpustamp: times() not available\n"); -#endif -} - -/* -** GPTLquery: return current status info about a timer. If certain stats are not -** enabled, they should just have zeros in them. If PAPI is not enabled, input -** counter info is ignored. -** -** Input args: -** name: timer name -** maxcounters: max number of PAPI counters to get info for -** t: thread number (if < 0, the request is for the current thread) -** -** Output args: -** count: number of times this timer was called -** onflg: whether timer is currently on -** wallclock: accumulated wallclock time -** usr: accumulated user CPU time -** sys: accumulated system CPU time -** papicounters_out: accumulated PAPI counters -*/ - -int GPTLquery (const char *name, - int t, - int *count, - int *onflg, - double *wallclock, - double *dusr, - double *dsys, - long long *papicounters_out, - const int maxcounters) -{ - Timer *ptr; /* linked list pointer */ - unsigned int indx; /* linked list index returned from getentry (unused) */ - static const char *thisfunc = "GPTLquery"; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - /* - ** If t is < 0, assume the request is for the current thread - */ - - if (t < 0) { - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: get_thread_num failure\n", thisfunc); - } else { - if (t >= maxthreads) - return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); - } - - ptr = getentry (hashtable[t], name, &indx); - if ( !ptr) - return GPTLerror ("%s: requested timer %s does not have a name hash\n", thisfunc, name); - - *onflg = ptr->onflg; - *count = ptr->count; - *wallclock = ptr->wall.accum; - *dusr = ptr->cpu.accum_utime / (double) ticks_per_sec; - *dsys = ptr->cpu.accum_stime / (double) ticks_per_sec; -#ifdef HAVE_PAPI - GPTL_PAPIquery (&ptr->aux, papicounters_out, maxcounters); -#endif - return 0; -} - -/* -** GPTLquerycounters: return current PAPI counters for a timer. -** THIS ROUTINE ID DEPRECATED. USE GPTLget_eventvalue() instead -** -** Input args: -** name: timer name -** t: thread number (if < 0, the request is for the current thread) -** -** Output args: -** papicounters_out: accumulated PAPI counters -*/ - -int GPTLquerycounters (const char *name, - int t, - long long *papicounters_out) -{ - Timer *ptr; /* linked list pointer */ - unsigned int indx; /* hash index returned from getentry */ - static const char *thisfunc = "GPTLquery_counters"; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - /* - ** If t is < 0, assume the request is for the current thread - */ - - if (t < 0) { - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: get_thread_num failure\n", thisfunc); - } else { - if (t >= maxthreads) - return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); - } - - ptr = getentry (hashtable[t], name, &indx); - if ( !ptr) - return GPTLerror ("%s: requested timer %s does not have a name hash\n", thisfunc, name); - -#ifdef HAVE_PAPI - /* The 999 is a hack to say "give me all the counters" */ - GPTL_PAPIquery (&ptr->aux, papicounters_out, 999); -#endif - return 0; -} - -/* -** GPTLget_wallclock: return wallclock accumulation for a timer. -** -** Input args: -** timername: timer name -** t: thread number (if < 0, the request is for the current thread) -** -** Output args: -** value: current wallclock accumulation for the timer -*/ - -int GPTLget_wallclock (const char *timername, - int t, - double *value) -{ - void *self; /* timer address when hash entry generated with *_instr */ - Timer *ptr; /* linked list pointer */ - unsigned int indx; /* hash index returned from getentry (unused) */ - static const char *thisfunc = "GPTLget_wallclock"; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - if ( ! wallstats.enabled) - return GPTLerror ("%s: wallstats not enabled\n", thisfunc); - - /* - ** If t is < 0, assume the request is for the current thread - */ - - if (t < 0) { - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); - } else { - if (t >= maxthreads) - return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); - } - - /* - ** Don't know whether hashtable entry for timername was generated with - ** *_instr() or not, so try both possibilities - */ - - ptr = getentry (hashtable[t], timername, &indx); - if ( !ptr) { - if (sscanf (timername, "%lx", (unsigned long *) &self) < 1) - return GPTLerror ("%s: requested timer %s does not exist\n", thisfunc, timername); - ptr = getentry_instr (hashtable[t], self, &indx); - if ( !ptr) - return GPTLerror ("%s: requested timer %s does not exist\n", thisfunc, timername); - } - - *value = ptr->wall.accum; - return 0; -} - -/* -** GPTLget_eventvalue: return PAPI-based event value for a timer. All values will be -** returned as doubles, even if the event is not derived. -** -** Input args: -** timername: timer name -** eventname: event name (must be currently enabled) -** t: thread number (if < 0, the request is for the current thread) -** -** Output args: -** value: current value of the event for this timer -*/ - -int GPTLget_eventvalue (const char *timername, - const char *eventname, - int t, - double *value) -{ - void *self; /* timer address when hash entry generated with *_instr */ - Timer *ptr; /* linked list pointer */ - unsigned int indx; /* hash index returned from getentry (unused) */ - static const char *thisfunc = "GPTLget_eventvalue"; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - /* - ** If t is < 0, assume the request is for the current thread - */ - - if (t < 0) { - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: get_thread_num failure\n", thisfunc); - } else { - if (t >= maxthreads) - return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); - } - - /* - ** Don't know whether hashtable entry for timername was generated with - ** *_instr() or not, so try both possibilities - */ - - ptr = getentry (hashtable[t], timername, &indx); - if ( !ptr) { - if (sscanf (timername, "%lx", (unsigned long *) &self) < 1) - return GPTLerror ("%s: requested timer %s does not exist\n", thisfunc, timername); - ptr = getentry_instr (hashtable[t], self, &indx); - if ( !ptr) - return GPTLerror ("%s: requested timer %s does not exist\n", thisfunc, timername); - } - -#ifdef HAVE_PAPI - return GPTL_PAPIget_eventvalue (eventname, &ptr->aux, value); -#else - return GPTLerror ("%s: PAPI not enabled\n", thisfunc); -#endif -} - -/* -** GPTLget_nregions: return number of regions (i.e. timer names) for this thread -** -** Input args: -** t: thread number (if < 0, the request is for the current thread) -** -** Output args: -** nregions: number of regions -*/ - -int GPTLget_nregions (int t, - int *nregions) -{ - Timer *ptr; /* walk through linked list */ - static const char *thisfunc = "GPTLget_nregions"; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - /* - ** If t is < 0, assume the request is for the current thread - */ - - if (t < 0) { - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: get_thread_num failure\n", thisfunc); - } else { - if (t >= maxthreads) - return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); - } - - *nregions = 0; - for (ptr = timers[t]->next; ptr; ptr = ptr->next) - ++*nregions; - - return 0; -} - -/* -** GPTLget_regionname: return region name for this thread -** -** Input args: -** t: thread number (if < 0, the request is for the current thread) -** region: region number -** nc: max number of chars to put in name -** -** Output args: -** name region name -*/ - -int GPTLget_regionname (int t, /* thread number */ - int region, /* region number (0-based) */ - char *name, /* output region name */ - int nc) /* number of chars in name (free form Fortran) */ -{ - int ncpy; /* number of characters to copy */ - int i; /* index */ - Timer *ptr; /* walk through linked list */ - static const char *thisfunc = "GPTLget_regionname"; - - if ( ! initialized) - return GPTLerror ("%s: GPTLinitialize has not been called\n", thisfunc); - - /* - ** If t is < 0, assume the request is for the current thread - */ - - if (t < 0) { - if ((t = get_thread_num ()) < 0) - return GPTLerror ("%s: get_thread_num failure\n", thisfunc); - } else { - if (t >= maxthreads) - return GPTLerror ("%s: requested thread %d is too big\n", thisfunc, t); - } - - ptr = timers[t]->next; - for (i = 0; i < region; i++) { - if ( ! ptr) - return GPTLerror ("%s: timer number %d does not exist in thread %d\n", thisfunc, region, t); - ptr = ptr->next; - } - - if (ptr) { - ncpy = MIN (nc, strlen (ptr->name)); - strncpy (name, ptr->name, ncpy); - - /* - ** Adding the \0 is only important when called from C - */ - - if (ncpy < nc) - name[ncpy] = '\0'; - } else { - return GPTLerror ("%s: timer number %d does not exist in thread %d\n", thisfunc, region, t); - } - return 0; -} - -/* -** GPTLis_initialized: Return whether GPTL has been initialized -*/ - -int GPTLis_initialized (void) -{ - return (int) initialized; -} - -/* -** getentry_instr: find hash table entry and return a pointer to it -** -** Input args: -** hashtable: the hashtable (array) -** self: input address (from -finstrument-functions) -** Output args: -** indx: hashtable index -** -** Return value: pointer to the entry, or NULL if not found -*/ - -static inline Timer *getentry_instr (const Hashentry *hashtable, /* hash table */ - void *self, /* address */ - unsigned int *indx) /* hash index */ -{ - int i; - Timer *ptr = 0; /* return value when entry not found */ - - /* - ** Hash index is timer address modulo the table size - ** On most machines, right-shifting the address helps because linkers often - ** align functions on even boundaries - */ - - *indx = (((unsigned long) self) >> 4) % tablesize; - for (i = 0; i < hashtable[*indx].nument; ++i) { - if (hashtable[*indx].entries[i]->address == self) { - ptr = hashtable[*indx].entries[i]; - break; - } - } - return ptr; -} - -/* -** getentry: find the entry in the hash table and return a pointer to it. -** -** Input args: -** hashtable: the hashtable (array) -** name: string to be hashed on (specifically, summed) -** Output args: -** indx: hashtable index -** -** Return value: pointer to the entry, or NULL if not found -*/ - -static inline Timer *getentry (const Hashentry *hashtable, /* hash table */ - const char *name, /* name to hash */ - unsigned int *indx) /* hash index */ -{ - int i; /* multiplier for hashing; loop index */ - const unsigned char *c; /* pointer to elements of "name" */ - Timer *ptr = 0; /* return value when entry not found */ - - /* - ** Hash value is sum of: chars times their 1-based position index, modulo tablesize - */ - - *indx = 0; - c = (unsigned char *) name; - for (i = 1; *c && i < MAX_CHARS+1; ++c, ++i) { - *indx += (*c) * i; - } - - *indx %= tablesize; - - /* - ** If nument exceeds 1 there was a hash collision and we must search - ** linearly through an array for a match - */ - - for (i = 0; i < hashtable[*indx].nument; i++) { - if (STRMATCH (name, hashtable[*indx].entries[i]->name)) { - ptr = hashtable[*indx].entries[i]; - break; - } - } - return ptr; -} - -/* -** Add entry points for auto-instrumented codes -** Auto instrumentation flags for various compilers: -** -** gcc, pathcc, icc: -finstrument-functions -** pgcc: -Minstrument:functions -** xlc: -qdebug=function_trace -*/ - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef _AIX -void __func_trace_enter (const char *function_name, - const char *file_name, - int line_number, - void **const user_data) -{ - (void) GPTLstart (function_name); -} - -void __func_trace_exit (const char *function_name, - const char *file_name, - int line_number, - void **const user_data) -{ - (void) GPTLstop (function_name); -} - -#else - -void __cyg_profile_func_enter (void *this_fn, - void *call_site) -{ - (void) GPTLstart_instr (this_fn); -} - -void __cyg_profile_func_exit (void *this_fn, - void *call_site) -{ - (void) GPTLstop_instr (this_fn); -} -#endif - -#ifdef __cplusplus -}; -#endif - -#ifdef HAVE_NANOTIME -#ifdef BIT64 -/* 64-bit code copied from PAPI library */ -static inline unsigned long long nanotime (void) -{ - unsigned long long val; - do { - unsigned int a,d; - asm volatile("rdtsc" : "=a" (a), "=d" (d)); - (val) = ((unsigned long)a) | (((unsigned long)d)<<32); - } while(0); - - return (val); -} -#else -static inline unsigned long long nanotime (void) -{ - unsigned long long val; - __asm__ __volatile__("rdtsc" : "=A" (val) : ); - return (val); -} -#endif - -#define LEN 4096 - -static float get_clockfreq () -{ - FILE *fd = 0; - char buf[LEN]; - int is; - - if ( ! (fd = fopen ("/proc/cpuinfo", "r"))) { - fprintf (stderr, "get_clockfreq: can't open /proc/cpuinfo\n"); - return -1.; - } - - while (fgets (buf, LEN, fd)) { - if (strncmp (buf, "cpu MHz", 7) == 0) { - for (is = 7; buf[is] != '\0' && !isdigit (buf[is]); is++); - if (isdigit (buf[is])) - return (float) atof (&buf[is]); - } - } - - return -1.; -} -#endif - -/* -** The following are the set of underlying timing routines which may or may -** not be available. And their accompanying init routines. -** NANOTIME is currently only available on x86. -*/ - -static int init_nanotime () -{ - static const char *thisfunc = "init_nanotime"; -#ifdef HAVE_NANOTIME - if ((cpumhz = get_clockfreq ()) < 0) - return GPTLerror ("%s: Can't get clock freq\n", thisfunc); - - if (verbose) - printf ("%s: Clock rate = %f MHz\n", thisfunc, cpumhz); - - cyc2sec = 1./(cpumhz * 1.e6); - return 0; -#else - return GPTLerror ("%s: not enabled\n", thisfunc); -#endif -} - -static inline double utr_nanotime () -{ -#ifdef HAVE_NANOTIME - double timestamp; - timestamp = nanotime () * cyc2sec; - return timestamp; -#else - static const char *thisfunc = "utr_nanotime"; - (void) GPTLerror ("%s: not enabled\n", thisfunc); - return -1.; -#endif -} - -/* -** MPI_Wtime requires the MPI lib. -*/ - -static int init_mpiwtime () -{ -#ifdef HAVE_MPI - return 0; -#else - static const char *thisfunc = "init_mpiwtime"; - return GPTLerror ("%s: not enabled\n", thisfunc); -#endif -} - -static inline double utr_mpiwtime () -{ -#ifdef HAVE_MPI - return MPI_Wtime (); -#else - static const char *thisfunc = "utr_mpiwtime"; - (void) GPTLerror ("%s: not enabled\n", thisfunc); - return -1.; -#endif -} - -/* -** PAPI_get_real_usec requires the PAPI lib. -*/ - -static int init_papitime () -{ - static const char *thisfunc = "init_papitime"; -#ifdef HAVE_PAPI - ref_papitime = PAPI_get_real_usec (); - if (verbose) - printf ("%s: ref_papitime=%ld\n", thisfunc, (long) ref_papitime); - return 0; -#else - return GPTLerror ("%s: not enabled\n", thisfunc); -#endif -} - -static inline double utr_papitime () -{ -#ifdef HAVE_PAPI - return (PAPI_get_real_usec () - ref_papitime) * 1.e-6; -#else - static const char *thisfunc = "utr_papitime"; - (void) GPTLerror ("%s: not enabled\n", thisfunc); - return -1.; -#endif -} - -/* -** Probably need to link with -lrt for this one to work -*/ - -static int init_clock_gettime () -{ - static const char *thisfunc = "init_clock_gettime"; -#ifdef HAVE_LIBRT - struct timespec tp; - (void) clock_gettime (CLOCK_REALTIME, &tp); - ref_clock_gettime = tp.tv_sec; - if (verbose) - printf ("%s: ref_clock_gettime=%ld\n", thisfunc, (long) ref_clock_gettime); - return 0; -#else - return GPTLerror ("%s: not enabled\n", thisfunc); -#endif -} - -static inline double utr_clock_gettime () -{ -#ifdef HAVE_LIBRT - struct timespec tp; - (void) clock_gettime (CLOCK_REALTIME, &tp); - return (tp.tv_sec - ref_clock_gettime) + 1.e-9*tp.tv_nsec; -#else - static const char *thisfunc = "utr_clock_gettime"; - (void) GPTLerror ("%s: not enabled\n", thisfunc); - return -1.; -#endif -} - -/* -** High-res timer on AIX: read_real_time -*/ - -static int init_read_real_time () -{ - static const char *thisfunc = "init_read_real_time"; -#ifdef _AIX - timebasestruct_t ibmtime; - (void) read_real_time (&ibmtime, TIMEBASE_SZ); - (void) time_base_to_time (&ibmtime, TIMEBASE_SZ); - ref_read_real_time = ibmtime.tb_high; - if (verbose) - printf ("%s: ref_read_real_time=%ld\n", thisfunc, (long) ref_read_real_time); - return 0; -#else - return GPTLerror ("%s: not enabled\n", thisfunc); -#endif -} - -static inline double utr_read_real_time () -{ -#ifdef _AIX - timebasestruct_t ibmtime; - (void) read_real_time (&ibmtime, TIMEBASE_SZ); - (void) time_base_to_time (&ibmtime, TIMEBASE_SZ); - return (ibmtime.tb_high - ref_read_real_time) + 1.e-9*ibmtime.tb_low; -#else - static const char *thisfunc = "utr_read_real_time"; - return GPTLerror ("%s: not enabled\n", thisfunc); -#endif -} - -/* -** Default available most places: gettimeofday -*/ - -static int init_gettimeofday () -{ - static const char *thisfunc = "init_gettimeofday"; -#ifdef HAVE_GETTIMEOFDAY - struct timeval tp; - (void) gettimeofday (&tp, 0); - ref_gettimeofday = tp.tv_sec; - if (verbose) - printf ("%s: ref_gettimeofday=%ld\n", thisfunc, (long) ref_gettimeofday); - return 0; -#else - return GPTLerror ("%s: not enabled\n", thisfunc); -#endif -} - -static inline double utr_gettimeofday () -{ -#ifdef HAVE_GETTIMEOFDAY - struct timeval tp; - (void) gettimeofday (&tp, 0); - return (tp.tv_sec - ref_gettimeofday) + 1.e-6*tp.tv_usec; -#else - static const char *thisfunc = "utr_gettimeofday"; - return GPTLerror ("%s: not enabled\n", thisfunc); -#endif -} - -/* -** Determine underlying timing routine overhead: call it 1000 times. -*/ - -static double utr_getoverhead () -{ - double val2[1001]; - int i; - - val2[0] = (*ptr2wtimefunc)(); - for (i = 1; i < 1001; ++i) { - val2[i] = (*ptr2wtimefunc)(); - } - return 0.001 * (val2[1000] - val2[0]); -} - -/* -** printself_andchildren: Recurse through call tree, printing stats for self, then children -*/ - -static void printself_andchildren (const Timer *ptr, - FILE *fp, - const int t, - const int depth, - const double tot_overhead) -{ - int n; - - if (depth > -1) /* -1 flag is to avoid printing stats for dummy outer timer */ - printstats (ptr, fp, t, depth, true, tot_overhead); - - for (n = 0; n < ptr->nchildren; n++) - printself_andchildren (ptr->children[n], fp, t, depth+1, tot_overhead); -} - -#ifdef ENABLE_PMPI -/* -** GPTLgetentry: called ONLY from pmpi.c (i.e. not a public entry point). Returns a pointer to the -** requested timer name by calling internal function getentry() -** -** Return value: 0 (NULL) or the return value of getentry() -*/ - -Timer *GPTLgetentry (const char *name) -{ - int t; /* thread number */ - unsigned int indx; /* returned from getentry (unused) */ - static const char *thisfunc = "GPTLgetentry"; - - if ( ! initialized) { - (void) GPTLerror ("%s: initialization was not completed\n", thisfunc); - return 0; - } - - if ((t = get_thread_num ()) < 0) { - (void) GPTLerror ("%s: bad return from get_thread_num\n", thisfunc); - return 0; - } - - return (getentry (hashtable[t], name, &indx)); -} - -/* -** GPTLpr_file_has_been_called: Called ONLY from pmpi.c (i.e. not a public entry point). Return -** whether GPTLpr_file has been called. MPI_Finalize wrapper needs -** to know whether it needs to call GPTLpr. -*/ - -int GPTLpr_has_been_called (void) -{ - return (int) pr_has_been_called; -} - -#endif - -/*************************************************************************************/ - -/* -** Contents of inserted threadutil.c starts here. -** Moved to gptl.c to enable inlining -*/ - -/* -** $Id: gptl.c,v 1.157 2011-03-28 20:55:18 rosinski Exp $ -** -** Author: Jim Rosinski -** -** Utility functions handle thread-based GPTL needs. -*/ - -/* Max allowable number of threads (used only when THREADED_PTHREADS is true) */ -#define MAX_THREADS 128 - -/**********************************************************************************/ -/* -** 3 sets of routines: OMP threading, PTHREADS, unthreaded -*/ - -#if ( defined THREADED_OMP ) - -/* -** threadinit: Allocate and initialize threadid_omp; set max number of threads -** -** Output results: -** maxthreads: max number of threads -** -** threadid_omp[] is allocated and initialized to -1 -** -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -static int threadinit (void) -{ - int t; /* loop index */ - static const char *thisfunc = "threadinit"; - - if (omp_get_thread_num () != 0) - return GPTLerror ("OMP %s: MUST only be called by the master thread\n", thisfunc); - - /* - ** Allocate the threadid array which maps physical thread IDs to logical IDs - ** For OpenMP this will be just threadid_omp[iam] = iam; - */ - - if (threadid_omp) - return GPTLerror ("OMP %s: has already been called.\nMaybe mistakenly called by multiple threads?", - thisfunc); - - maxthreads = MAX ((1), (omp_get_max_threads ())); - if ( ! (threadid_omp = (int *) GPTLallocate (maxthreads * sizeof (int)))) - return GPTLerror ("OMP %s: malloc failure for %d elements of threadid_omp\n", thisfunc, maxthreads); - - /* - ** Initialize threadid array to flag values for use by get_thread_num(). - ** get_thread_num() will fill in the values on first use. - */ - - for (t = 0; t < maxthreads; ++t) - threadid_omp[t] = -1; - -#ifdef VERBOSE - printf ("OMP %s: Set maxthreads=%d\n", thisfunc, maxthreads); -#endif - - return 0; -} - -/* -** Threadfinalize: clean up -** -** Output results: -** threadid_omp array is freed and array pointer nullified -*/ - -static void threadfinalize () -{ - free ((void *) threadid_omp); - threadid_omp = 0; -} - -/* -** get_thread_num: Determine thread number of the calling thread -** Start PAPI counters if enabled and first call for this thread. -** -** Output results: -** nthreads: Number of threads (=maxthreads) -** threadid_omp: Our thread id added to list on 1st call -** -** Return value: thread number (success) or GPTLerror (failure) -*/ - -static inline int get_thread_num (void) -{ - int t; /* thread number */ - static const char *thisfunc = "get_thread_num"; - - if ((t = omp_get_thread_num ()) >= maxthreads) - return GPTLerror ("OMP %s: returned id=%d exceeds maxthreads=%d\n", thisfunc, t, maxthreads); - - /* - ** If our thread number has already been set in the list, we are done - */ - - if (t == threadid_omp[t]) - return t; - - /* - ** Thread id not found. Modify threadid_omp with our ID, then start PAPI events if required. - ** Due to the setting of threadid_omp, everything below here will only execute once per thread. - */ - - threadid_omp[t] = t; - -#ifdef VERBOSE - printf ("OMP %s: 1st call t=%d\n", thisfunc, t); -#endif - -#ifdef HAVE_PAPI - - /* - ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, - ** create and start an event set for the new thread. - */ - - if (GPTLget_npapievents () > 0) { -#ifdef VERBOSE - printf ("OMP %s: Starting EventSet t=%d\n", thisfunc, t); -#endif - - if (GPTLcreate_and_start_events (t) < 0) - return GPTLerror ("OMP %s: error from GPTLcreate_and_start_events for thread %d\n", thisfunc, t); - } -#endif - - /* - ** nthreads = maxthreads based on setting in threadinit - */ - - nthreads = maxthreads; -#ifdef VERBOSE - printf ("OMP %s: nthreads=%d\n", thisfunc, nthreads); -#endif - - return t; -} - -static void print_threadmapping (FILE *fp) -{ - int n; - - fprintf (fp, "\n"); - fprintf (fp, "Thread mapping:\n"); - for (n = 0; n < nthreads; ++n) - fprintf (fp, "threadid_omp[%d] = %d\n", n, threadid_omp[n]); -} - -/**********************************************************************************/ -/* -** PTHREADS -*/ - -#elif ( defined THREADED_PTHREADS ) - -/* -** threadinit: Allocate threadid and initialize to -1; set max number of threads; -** Initialize the mutex for later use; Initialize nthreads to 0 -** -** Output results: -** nthreads: number of threads (init to zero here, increment later in get_thread_num) -** maxthreads: max number of threads (MAX_THREADS) -** -** threadid[] is allocated and initialized to -1 -** mutex is initialized for future use -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -static int threadinit (void) -{ - int t; /* thread number */ - int ret; /* return code */ - static const char *thisfunc = "threadinit"; - - /* - ** The following test is not rock-solid, but it's pretty close in terms of guaranteeing that - ** threadinit gets called by only 1 thread. Problem is, mutex hasn't yet been initialized - ** so we can't use it. - */ - - if (nthreads == -1) - nthreads = 0; - else - return GPTLerror ("PTHREADS %s: has already been called.\n" - "Maybe mistakenly called by multiple threads?\n", thisfunc); - - /* - ** Initialize the mutex required for critical regions. - ** Previously, t_mutex = PTHREAD_MUTEX_INITIALIZER on the static declaration line was - ** adequate to initialize the mutex. But this failed in programs that invoked - ** GPTLfinalize() followed by GPTLinitialize(). - ** "man pthread_mutex_init" indicates that passing NULL as the second argument to - ** pthread_mutex_init() should appropriately initialize the mutex, assuming it was - ** properly destroyed by a previous call to pthread_mutex_destroy(); - */ - -#ifdef MUTEX_API - if ((ret = pthread_mutex_init ((pthread_mutex_t *) &t_mutex, NULL)) != 0) - return GPTLerror ("PTHREADS %s: mutex init failure: ret=%d\n", thisfunc, ret); -#endif - - /* - ** Allocate the threadid array which maps physical thread IDs to logical IDs - */ - - if (threadid) - return GPTLerror ("PTHREADS %s: threadid not null\n", thisfunc); - else if ( ! (threadid = (pthread_t *) GPTLallocate (MAX_THREADS * sizeof (pthread_t)))) - return GPTLerror ("PTHREADS %s: malloc failure for %d elements of threadid\n", thisfunc, MAX_THREADS); - - maxthreads = MAX_THREADS; - - /* - ** Initialize threadid array to flag values for use by get_thread_num(). - ** get_thread_num() will fill in the values on first use. - */ - - for (t = 0; t < maxthreads; ++t) - threadid[t] = (pthread_t) -1; - -#ifdef VERBOSE - printf ("PTHREADS %s: Set maxthreads=%d nthreads=%d\n", thisfunc, maxthreads, nthreads); -#endif - - return 0; -} - -/* -** threadfinalize: Clean up -** -** Output results: -** threadid array is freed and array pointer nullified -** mutex is destroyed -*/ - -static void threadfinalize () -{ - int ret; - -#ifdef MUTEX_API - if ((ret = pthread_mutex_destroy ((pthread_mutex_t *) &t_mutex)) != 0) - printf ("threadfinalize: failed attempt to destroy t_mutex: ret=%d\n", ret); -#endif - free ((void *) threadid); - threadid = 0; -} - -/* -** get_thread_num: Determine zero-based thread number of the calling thread. -** Update nthreads and maxthreads if necessary. -** Start PAPI counters if enabled and first call for this thread. -** -** Output results: -** nthreads: Updated number of threads -** threadid: Our thread id added to list on 1st call -** -** Return value: thread number (success) or GPTLerror (failure) -*/ - -static inline int get_thread_num (void) -{ - int t; /* logical thread number, defined by array index of found threadid */ - pthread_t mythreadid; /* thread id from pthreads library */ - int retval; /* value to return to caller */ - bool foundit = false; /* thread id found in list */ - static const char *thisfunc = "get_thread_num"; - - mythreadid = pthread_self (); - - /* - ** If our thread number has already been set in the list, we are done - ** VECTOR code should run a bit faster on vector machines. - */ -#define VECTOR -#ifdef VECTOR - for (t = 0; t < nthreads; ++t) - if (pthread_equal (mythreadid, threadid[t])) { - foundit = true; - retval = t; - } - - if (foundit) - return retval; -#else - for (t = 0; t < nthreads; ++t) - if (pthread_equal (mythreadid, threadid[t])) - return t; -#endif - - /* - ** Thread id not found. Define a critical region, then start PAPI counters if - ** necessary and modify threadid[] with our id. - */ - - if (lock_mutex () < 0) - return GPTLerror ("PTHREADS %s: mutex lock failure\n", thisfunc); - - /* - ** If our thread id is not in the known list, add to it after checking that - ** we do not have too many threads. - */ - - if (nthreads >= MAX_THREADS) { - if (unlock_mutex () < 0) - fprintf (stderr, "PTHREADS %s: mutex unlock failure\n", thisfunc); - - return GPTLerror ("PTHREADS %s: nthreads=%d is too big. Recompile " - "with larger value of MAX_THREADS\n", thisfunc, nthreads); - } - - threadid[nthreads] = mythreadid; - -#ifdef VERBOSE - printf ("PTHREADS %s: 1st call threadid=%lu maps to location %d\n", - thisfunc, (unsigned long) mythreadid, nthreads); -#endif - -#ifdef HAVE_PAPI - - /* - ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, - ** create and start an event set for the new thread. - */ - - if (GPTLget_npapievents () > 0) { -#ifdef VERBOSE - printf ("PTHREADS get_thread_num: Starting EventSet threadid=%lu location=%d\n", - (unsigned long) mythreadid, nthreads); -#endif - if (GPTLcreate_and_start_events (nthreads) < 0) { - if (unlock_mutex () < 0) - fprintf (stderr, "PTHREADS %s: mutex unlock failure\n", thisfunc); - - return GPTLerror ("PTHREADS %s: error from GPTLcreate_and_start_events for thread %d\n", - thisfunc, nthreads); - } - } -#endif - - /* - ** IMPORTANT to set return value before unlocking the mutex!!!! - ** "return nthreads-1" fails occasionally when another thread modifies - ** nthreads after it gets the mutex! - */ - - retval = nthreads++; - -#ifdef VERBOSE - printf ("PTHREADS get_thread_num: nthreads bumped to %d\n", nthreads); -#endif - - if (unlock_mutex () < 0) - return GPTLerror ("PTHREADS %s: mutex unlock failure\n", thisfunc); - - return retval; -} - -/* -** lock_mutex: lock a mutex for private access -*/ - -static int lock_mutex () -{ - static const char *thisfunc = "lock_mutex"; - - if (pthread_mutex_lock ((pthread_mutex_t *) &t_mutex) != 0) - return GPTLerror ("%s: failure from pthread_lock_mutex\n", thisfunc); - - return 0; -} - -/* -** unlock_mutex: unlock a mutex from private access -*/ - -static int unlock_mutex () -{ - static const char *thisfunc = "unlock_mutex"; - - if (pthread_mutex_unlock ((pthread_mutex_t *) &t_mutex) != 0) - return GPTLerror ("%s: failure from pthread_unlock_mutex\n", thisfunc); - return 0; -} - -static void print_threadmapping (FILE *fp) -{ - int t; - - fprintf (fp, "\n"); - fprintf (fp, "Thread mapping:\n"); - for (t = 0; t < nthreads; ++t) - fprintf (fp, "threadid[%d] = %lu\n", t, (unsigned long) threadid[t]); -} - -/**********************************************************************************/ -/* -** Unthreaded case -*/ - -#else - -static int threadinit (void) -{ - static const char *thisfunc = "threadinit"; - - if (nthreads != -1) - return GPTLerror ("Unthreaded %s: MUST only be called once", thisfunc); - - nthreads = 0; - maxthreads = 1; - return 0; -} - -void threadfinalize () -{ - threadid = -1; -} - -static inline int get_thread_num () -{ - static const char *thisfunc = "get_thread_num"; -#ifdef HAVE_PAPI - /* - ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, - ** create and start an event set for the new thread. - */ - - if (threadid == -1 && GPTLget_npapievents () > 0) { - if (GPTLcreate_and_start_events (0) < 0) - return GPTLerror ("Unthreaded %s: error from GPTLcreate_and_start_events for thread %0\n", thisfunc); - - threadid = 0; - } -#endif - - nthreads = 1; - return 0; -} - -static void print_threadmapping (FILE *fp) -{ - fprintf (fp, "\n"); - fprintf (fp, "threadid[0] = 0\n"); -} - -#endif diff --git a/src/externals/pio1/timing/gptl.h b/src/externals/pio1/timing/gptl.h deleted file mode 100644 index 087fc88b214..00000000000 --- a/src/externals/pio1/timing/gptl.h +++ /dev/null @@ -1,158 +0,0 @@ -/* -** $Id: gptl.h,v 1.59 2011-03-28 20:55:19 rosinski Exp $ -** -** Author: Jim Rosinski -** -** GPTL header file to be included in user code -*/ - -#ifndef GPTL_H -#define GPTL_H - -#ifdef INCLUDE_CMAKE_FCI -#include "cmake_fortran_c_interface.h" -#endif - -/* following block for camtimers only */ -#ifndef NO_GETTIMEOFDAY -#define HAVE_GETTIMEOFDAY -#endif - -#ifdef SPMD -#define HAVE_MPI -#endif - -#ifdef _OPENMP -#ifndef THREADED_PTHREADS -#define THREADED_OMP -#endif -#endif -/* above block for camtimers only */ - -#ifdef HAVE_MPI -#include -#endif - -/* -** Options settable by a call to GPTLsetoption() (default in parens) -** These numbers need to be small integers because GPTLsetoption can -** be passed PAPI counters, and we need to avoid collisions in that -** integer space. PAPI presets are big negative integers, and PAPI -** native events are big positive integers. -*/ - -typedef enum { - GPTLsync_mpi = 0, /* Synchronize before certain MPI calls (PMPI-mode only) */ - GPTLwall = 1, /* Collect wallclock stats (true) */ - GPTLcpu = 2, /* Collect CPU stats (false)*/ - GPTLabort_on_error = 3, /* Abort on failure (false) */ - GPTLoverhead = 4, /* Estimate overhead of underlying timing routine (true) */ - GPTLdepthlimit = 5, /* Only print timers this depth or less in the tree (inf) */ - GPTLverbose = 6, /* Verbose output (false) */ - GPTLnarrowprint = 7, /* Print PAPI and derived stats in 8 columns not 16 (true) */ - GPTLpercent = 9, /* Add a column for percent of first timer (false) */ - GPTLpersec = 10, /* Add a PAPI column that prints "per second" stats (true) */ - GPTLmultiplex = 11, /* Allow PAPI multiplexing (false) */ - GPTLdopr_preamble = 12, /* Print preamble info (true) */ - GPTLdopr_threadsort = 13, /* Print sorted thread stats (true) */ - GPTLdopr_multparent = 14, /* Print multiple parent info (true) */ - GPTLdopr_collision = 15, /* Print hastable collision info (true) */ - GPTLprint_method = 16, /* Tree print method: first parent, last parent - most frequent, or full tree (most frequent) */ - GPTLtablesize = 50, /* per-thread size of hash table (1024) */ - /* - ** These are derived counters based on PAPI counters. All default to false - */ - GPTL_IPC = 17, /* Instructions per cycle */ - GPTL_CI = 18, /* Computational intensity */ - GPTL_FPC = 19, /* FP ops per cycle */ - GPTL_FPI = 20, /* FP ops per instruction */ - GPTL_LSTPI = 21, /* Load-store instruction fraction */ - GPTL_DCMRT = 22, /* L1 miss rate (fraction) */ - GPTL_LSTPDCM = 23, /* Load-stores per L1 miss */ - GPTL_L2MRT = 24, /* L2 miss rate (fraction) */ - GPTL_LSTPL2M = 25, /* Load-stores per L2 miss */ - GPTL_L3MRT = 26 /* L3 read miss rate (fraction) */ -} Option; - -/* -** Underlying wallclock timer: optimize for best granularity with least overhead. -** These numbers need not be distinct from the above because these are passed -** to GPTLsetutr() and the above are passed to GPTLsetoption() -*/ - -typedef enum { - GPTLgettimeofday = 1, /* the default */ - GPTLnanotime = 2, /* only available on x86 */ - GPTLmpiwtime = 4, /* MPI_Wtime */ - GPTLclockgettime = 5, /* clock_gettime */ - GPTLpapitime = 6, /* only if PAPI is available */ - GPTLread_real_time = 3 /* AIX only */ -} Funcoption; - -/* -** How to report parent/child relationships at print time (for children with multiple parents) -*/ - -typedef enum { - GPTLfirst_parent = 1, /* first parent found */ - GPTLlast_parent = 2, /* last parent found */ - GPTLmost_frequent = 3, /* most frequent parent (default) */ - GPTLfull_tree = 4 /* complete call tree */ -} Method; - -/* -** Function prototypes -*/ - -#ifdef __cplusplus -extern "C" { -#endif - -extern int GPTLsetoption (const int, const int); -extern int GPTLinitialize (void); -extern int GPTLstart (const char *); -extern int GPTLstart_handle (const char *, void **); -extern int GPTLstop (const char *); -extern int GPTLstop_handle (const char *, void **); -extern int GPTLstamp (double *, double *, double *); -extern int GPTLpr_set_append (void); -extern int GPTLpr_query_append (void); -extern int GPTLpr_set_write (void); -extern int GPTLpr_query_write (void); -extern int GPTLpr (const int); -extern int GPTLpr_file (const char *); - -#ifdef HAVE_MPI -extern int GPTLpr_summary (MPI_Comm comm); -extern int GPTLpr_summary_file (MPI_Comm, const char *); -extern int GPTLbarrier (MPI_Comm comm, const char *); -#else -extern int GPTLpr_summary (int); -extern int GPTLpr_summary_file (int, const char *); -extern int GPTLbarrier (int, const char *); -#endif - -extern int GPTLreset (void); -extern int GPTLfinalize (void); -extern int GPTLget_memusage (int *, int *, int *, int *, int *); -extern int GPTLprint_memusage (const char *); -extern int GPTLenable (void); -extern int GPTLdisable (void); -extern int GPTLsetutr (const int); -extern int GPTLquery (const char *, int, int *, int *, double *, double *, double *, - long long *, const int); -extern int GPTLquerycounters (const char *, int, long long *); -extern int GPTLget_wallclock (const char *, int, double *); -extern int GPTLget_eventvalue (const char *, const char *, int, double *); -extern int GPTLget_nregions (int, int *); -extern int GPTLget_regionname (int, int, char *, int); -extern int GPTL_PAPIlibraryinit (void); -extern int GPTLevent_name_to_code (const char *, int *); -extern int GPTLevent_code_to_name (const int, char *); - -#ifdef __cplusplus -}; -#endif - -#endif diff --git a/src/externals/pio1/timing/gptl.inc b/src/externals/pio1/timing/gptl.inc deleted file mode 100644 index 2ed2ca5c070..00000000000 --- a/src/externals/pio1/timing/gptl.inc +++ /dev/null @@ -1,158 +0,0 @@ -! -! $Id: gptl.inc,v 1.44 2011-03-28 20:55:19 rosinski Exp $ -! -! Author: Jim Rosinski -! -! GPTL header file to be included in user code. Values match -! their counterparts in gptl.h. See that file or man pages -! or web-based documenation for descriptions of each value -! - integer GPTLsync_mpi - integer GPTLwall - integer GPTLcpu - integer GPTLabort_on_error - integer GPTLoverhead - integer GPTLdepthlimit - integer GPTLverbose - integer GPTLnarrowprint - integer GPTLpercent - integer GPTLpersec - integer GPTLmultiplex - integer GPTLdopr_preamble - integer GPTLdopr_threadsort - integer GPTLdopr_multparent - integer GPTLdopr_collision - integer GPTLprint_method - integer GPTLtablesize - - integer GPTL_IPC - integer GPTL_CI - integer GPTL_FPC - integer GPTL_FPI - integer GPTL_LSTPI - integer GPTL_DCMRT - integer GPTL_LSTPDCM - integer GPTL_L2MRT - integer GPTL_LSTPL2M - integer GPTL_L3MRT - - integer GPTLnanotime - integer GPTLmpiwtime - integer GPTLclockgettime - integer GPTLgettimeofday - integer GPTLpapitime - integer GPTLread_real_time - - integer GPTLfirst_parent - integer GPTLlast_parent - integer GPTLmost_frequent - integer GPTLfull_tree - - parameter (GPTLsync_mpi = 0) - parameter (GPTLwall = 1) - parameter (GPTLcpu = 2) - parameter (GPTLabort_on_error = 3) - parameter (GPTLoverhead = 4) - parameter (GPTLdepthlimit = 5) - parameter (GPTLverbose = 6) - parameter (GPTLnarrowprint = 7) - parameter (GPTLpercent = 9) - parameter (GPTLpersec = 10) - parameter (GPTLmultiplex = 11) - parameter (GPTLdopr_preamble = 12) - parameter (GPTLdopr_threadsort= 13) - parameter (GPTLdopr_multparent= 14) - parameter (GPTLdopr_collision = 15) - parameter (GPTLprint_method = 16) - parameter (GPTLtablesize = 50) - - parameter (GPTL_IPC = 17) - parameter (GPTL_CI = 18) - parameter (GPTL_FPC = 19) - parameter (GPTL_FPI = 20) - parameter (GPTL_LSTPI = 21) - parameter (GPTL_DCMRT = 22) - parameter (GPTL_LSTPDCM = 23) - parameter (GPTL_L2MRT = 24) - parameter (GPTL_LSTPL2M = 25) - parameter (GPTL_L3MRT = 26) - - parameter (GPTLgettimeofday = 1) - parameter (GPTLnanotime = 2) - parameter (GPTLmpiwtime = 4) - parameter (GPTLclockgettime = 5) - parameter (GPTLpapitime = 6) - parameter (GPTLread_real_time = 3) - - parameter (GPTLfirst_parent = 1) - parameter (GPTLlast_parent = 2) - parameter (GPTLmost_frequent = 3) - parameter (GPTLfull_tree = 4) - -! Externals - - integer gptlsetoption - integer gptlinitialize - integer gptlstart - integer gptlstart_handle - integer gptlstop - integer gptlstop_handle - integer gptlstamp - integer gptlpr_set_append - integer gptlpr_query_append - integer gptlpr_set_write - integer gptlpr_query_write - integer gptlpr - integer gptlpr_file - integer gptlpr_summary - integer gptlpr_summary_file - integer gptlbarrier - integer gptlreset - integer gptlfinalize - integer gptlget_memusage - integer gptlprint_memusage - integer gptlenable - integer gptldisable - integer gptlsetutr - integer gptlquery - integer gptlquerycounters - integer gptlget_wallclock - integer gptlget_eventvalue - integer gptlget_nregions - integer gptlget_regionname - integer gptl_papilibraryinit - integer gptlevent_name_to_code - integer gptlevent_code_to_name - - external gptlsetoption - external gptlinitialize - external gptlstart - external gptlstart_handle - external gptlstop - external gptlstop_handle - external gptlstamp - external gptlpr_set_append - external gptlpr_query_append - external gptlpr_set_write - external gptlpr_query_write - external gptlpr - external gptlpr_file - external gptlpr_summary - external gptlpr_summary_file - external gptlbarrier - external gptlreset - external gptlfinalize - external gptlget_memusage - external gptlprint_memusage - external gptlenable - external gptldisable - external gptlsetutr - external gptlquery - external gptlquerycounters - external gptlget_wallclock - external gptlget_eventvalue - external gptlget_nregions - external gptlget_regionname - external gptl_papilibraryinit - external gptlevent_name_to_code - external gptlevent_code_to_name diff --git a/src/externals/pio1/timing/gptl_papi.c b/src/externals/pio1/timing/gptl_papi.c deleted file mode 100644 index 941316918be..00000000000 --- a/src/externals/pio1/timing/gptl_papi.c +++ /dev/null @@ -1,1326 +0,0 @@ -/* -** $Id: gptl_papi.c,v 1.79 2011-03-28 20:55:19 rosinski Exp $ -** -** Author: Jim Rosinski -** -** Contains routines which interface to PAPI library -*/ - -#include "private.h" -#include "gptl.h" - -#ifdef HAVE_PAPI - -#include -#include -#include -#include - -#if ( defined THREADED_OMP ) -#include -#elif ( defined THREADED_PTHREADS ) -#include -#endif - -/* Mapping of PAPI counters to short and long printed strings */ - -static const Entry papitable [] = { - {PAPI_L1_DCM, "PAPI_L1_DCM", "L1_DCM ", "L1_Dcache_miss ", "Level 1 data cache misses"}, - {PAPI_L1_ICM, "PAPI_L1_ICM", "L1_ICM ", "L1_Icache_miss ", "Level 1 instruction cache misses"}, - {PAPI_L2_DCM, "PAPI_L2_DCM", "L2_DCM ", "L2_Dcache_miss ", "Level 2 data cache misses"}, - {PAPI_L2_ICM, "PAPI_L2_ICM", "L2_ICM ", "L2_Icache_miss ", "Level 2 instruction cache misses"}, - {PAPI_L3_DCM, "PAPI_L3_DCM", "L3_DCM ", "L3_Dcache_miss ", "Level 3 data cache misses"}, - {PAPI_L3_ICM, "PAPI_L3_ICM", "L3_ICM ", "L3_Icache_miss ", "Level 3 instruction cache misses"}, - {PAPI_L1_TCM, "PAPI_L1_TCM", "L1_TCM ", "L1_cache_miss ", "Level 1 total cache misses"}, - {PAPI_L2_TCM, "PAPI_L2_TCM", "L2_TCM ", "L2_cache_miss ", "Level 2 total cache misses"}, - {PAPI_L3_TCM, "PAPI_L3_TCM", "L3_TCM ", "L3_cache_miss ", "Level 3 total cache misses"}, - {PAPI_CA_SNP, "PAPI_CA_SNP", "CA_SNP ", "Snoops ", "Snoops "}, - {PAPI_CA_SHR, "PAPI_CA_SHR", "CA_SHR ", "PAPI_CA_SHR ", "Request for shared cache line (SMP)"}, - {PAPI_CA_CLN, "PAPI_CA_CLN", "CA_CLN ", "PAPI_CA_CLN ", "Request for clean cache line (SMP)"}, - {PAPI_CA_INV, "PAPI_CA_INV", "CA_INV ", "PAPI_CA_INV ", "Request for cache line Invalidation (SMP)"}, - {PAPI_CA_ITV, "PAPI_CA_ITV", "CA_ITV ", "PAPI_CA_ITV ", "Request for cache line Intervention (SMP)"}, - {PAPI_L3_LDM, "PAPI_L3_LDM", "L3_LDM ", "L3_load_misses ", "Level 3 load misses"}, - {PAPI_L3_STM, "PAPI_L3_STM", "L3_STM ", "L3_store_misses ", "Level 3 store misses"}, - {PAPI_BRU_IDL,"PAPI_BRU_IDL","BRU_IDL ", "PAPI_BRU_IDL ", "Cycles branch units are idle"}, - {PAPI_FXU_IDL,"PAPI_FXU_IDL","FXU_IDL ", "PAPI_FXU_IDL ", "Cycles integer units are idle"}, - {PAPI_FPU_IDL,"PAPI_FPU_IDL","FPU_IDL ", "PAPI_FPU_IDL ", "Cycles floating point units are idle"}, - {PAPI_LSU_IDL,"PAPI_LSU_IDL","LSU_IDL ", "PAPI_LSU_IDL ", "Cycles load/store units are idle"}, - {PAPI_TLB_DM, "PAPI_TLB_DM" "TLB_DM ", "Data_TLB_misses ", "Data translation lookaside buffer misses"}, - {PAPI_TLB_IM, "PAPI_TLB_IM", "TLB_IM ", "Inst_TLB_misses ", "Instr translation lookaside buffer misses"}, - {PAPI_TLB_TL, "PAPI_TLB_TL", "TLB_TL ", "Tot_TLB_misses ", "Total translation lookaside buffer misses"}, - {PAPI_L1_LDM, "PAPI_L1_LDM", "L1_LDM ", "L1_load_misses ", "Level 1 load misses"}, - {PAPI_L1_STM, "PAPI_L1_STM", "L1_STM ", "L1_store_misses ", "Level 1 store misses"}, - {PAPI_L2_LDM, "PAPI_L2_LDM", "L2_LDM ", "L2_load_misses ", "Level 2 load misses"}, - {PAPI_L2_STM, "PAPI_L2_STM", "L2_STM ", "L2_store_misses ", "Level 2 store misses"}, - {PAPI_BTAC_M, "PAPI_BTAC_M", "BTAC_M ", "BTAC_miss ", "BTAC miss"}, - {PAPI_PRF_DM, "PAPI_PRF_DM", "PRF_DM ", "PAPI_PRF_DM ", "Prefetch data instruction caused a miss"}, - {PAPI_L3_DCH, "PAPI_L3_DCH", "L3_DCH ", "L3_DCache_Hit ", "Level 3 Data Cache Hit"}, - {PAPI_TLB_SD, "PAPI_TLB_SD", "TLB_SD ", "PAPI_TLB_SD ", "Xlation lookaside buffer shootdowns (SMP)"}, - {PAPI_CSR_FAL,"PAPI_CSR_FAL","CSR_FAL ", "PAPI_CSR_FAL ", "Failed store conditional instructions"}, - {PAPI_CSR_SUC,"PAPI_CSR_SUC","CSR_SUC ", "PAPI_CSR_SUC ", "Successful store conditional instructions"}, - {PAPI_CSR_TOT,"PAPI_CSR_TOT","CSR_TOT ", "PAPI_CSR_TOT ", "Total store conditional instructions"}, - {PAPI_MEM_SCY,"PAPI_MEM_SCY","MEM_SCY ", "Cyc_Stalled_Mem ", "Cycles Stalled Waiting for Memory Access"}, - {PAPI_MEM_RCY,"PAPI_MEM_RCY","MEM_RCY ", "Cyc_Stalled_MemR", "Cycles Stalled Waiting for Memory Read"}, - {PAPI_MEM_WCY,"PAPI_MEM_WCY","MEM_WCY ", "Cyc_Stalled_MemW", "Cycles Stalled Waiting for Memory Write"}, - {PAPI_STL_ICY,"PAPI_STL_ICY","STL_ICY ", "Cyc_no_InstrIss ", "Cycles with No Instruction Issue"}, - {PAPI_FUL_ICY,"PAPI_FUL_ICY","FUL_ICY ", "Cyc_Max_InstrIss", "Cycles with Maximum Instruction Issue"}, - {PAPI_STL_CCY,"PAPI_STL_CCY","STL_CCY ", "Cyc_No_InstrComp", "Cycles with No Instruction Completion"}, - {PAPI_FUL_CCY,"PAPI_FUL_CCY","FUL_CCY ", "Cyc_Max_InstComp", "Cycles with Maximum Instruction Completion"}, - {PAPI_HW_INT, "PAPI_HW_INT", "HW_INT ", "HW_interrupts ", "Hardware interrupts"}, - {PAPI_BR_UCN, "PAPI_BR_UCN", "BR_UCN ", "Uncond_br_instr ", "Unconditional branch instructions executed"}, - {PAPI_BR_CN, "PAPI_BR_CN", "BR_CN ", "Cond_br_instr_ex", "Conditional branch instructions executed"}, - {PAPI_BR_TKN, "PAPI_BR_TKN", "BR_TKN ", "Cond_br_instr_tk", "Conditional branch instructions taken"}, - {PAPI_BR_NTK, "PAPI_BR_NTK", "BR_NTK ", "Cond_br_instrNtk", "Conditional branch instructions not taken"}, - {PAPI_BR_MSP, "PAPI_BR_MSP", "BR_MSP ", "Cond_br_instrMPR", "Conditional branch instructions mispred"}, - {PAPI_BR_PRC, "PAPI_BR_PRC", "BR_PRC ", "Cond_br_instrCPR", "Conditional branch instructions corr. pred"}, - {PAPI_FMA_INS,"PAPI_FMA_INS","FMA_INS ", "FMA_instr_comp ", "FMA instructions completed"}, - {PAPI_TOT_IIS,"PAPI_TOT_IIS","TOT_IIS ", "Total_instr_iss ", "Total instructions issued"}, - {PAPI_TOT_INS,"PAPI_TOT_INS","TOT_INS ", "Total_instr_ex ", "Total instructions executed"}, - {PAPI_INT_INS,"PAPI_INT_INS","INT_INS ", "Int_instr_ex ", "Integer instructions executed"}, - {PAPI_FP_INS, "PAPI_FP_INS", "FP_INS ", "FP_instr_ex ", "Floating point instructions executed"}, - {PAPI_LD_INS, "PAPI_LD_INS", "LD_INS ", "Load_instr_ex ", "Load instructions executed"}, - {PAPI_SR_INS, "PAPI_SR_INS", "SR_INS ", "Store_instr_ex ", "Store instructions executed"}, - {PAPI_BR_INS, "PAPI_BR_INS", "BR_INS ", "br_instr_ex ", "Total branch instructions executed"}, - {PAPI_VEC_INS,"PAPI_VEC_INS","VEC_INS ", "Vec/SIMD_instrEx", "Vector/SIMD instructions executed"}, - {PAPI_RES_STL,"PAPI_RES_STL","RES_STL ", "Cyc_proc_stalled", "Cycles processor is stalled on resource"}, - {PAPI_FP_STAL,"PAPI_FP_STAL","FP_STAL ", "Cyc_any_FP_stall", "Cycles any FP units are stalled"}, - {PAPI_TOT_CYC,"PAPI_TOT_CYC","TOT_CYC ", "Total_cycles ", "Total cycles"}, - {PAPI_LST_INS,"PAPI_LST_INS","LST_INS ", "Tot_L/S_inst_ex ", "Total load/store inst. executed"}, - {PAPI_SYC_INS,"PAPI_SYC_INS","SYC_INS ", "Sync._inst._ex ", "Sync. inst. executed"}, - {PAPI_L1_DCH, "PAPI_L1_DCH", "L1_DCH ", "L1_D_Cache_Hit ", "L1 D Cache Hit"}, - {PAPI_L2_DCH, "PAPI_L2_DCH", "L2_DCH ", "L2_D_Cache_Hit ", "L2 D Cache Hit"}, - {PAPI_L1_DCA, "PAPI_L1_DCA", "L1_DCA ", "L1_D_Cache_Acc ", "L1 D Cache Access"}, - {PAPI_L2_DCA, "PAPI_L2_DCA", "L2_DCA ", "L2_D_Cache_Acc ", "L2 D Cache Access"}, - {PAPI_L3_DCA, "PAPI_L3_DCA", "L3_DCA ", "L3_D_Cache_Acc ", "L3 D Cache Access"}, - {PAPI_L1_DCR, "PAPI_L1_DCR", "L1_DCR ", "L1_D_Cache_Read ", "L1 D Cache Read"}, - {PAPI_L2_DCR, "PAPI_L2_DCR", "L2_DCR ", "L2_D_Cache_Read ", "L2 D Cache Read"}, - {PAPI_L3_DCR, "PAPI_L3_DCR", "L3_DCR ", "L3_D_Cache_Read ", "L3 D Cache Read"}, - {PAPI_L1_DCW, "PAPI_L1_DCW", "L1_DCW ", "L1_D_Cache_Write", "L1 D Cache Write"}, - {PAPI_L2_DCW, "PAPI_L2_DCW", "L2_DCW ", "L2_D_Cache_Write", "L2 D Cache Write"}, - {PAPI_L3_DCW, "PAPI_L3_DCW", "L3_DCW ", "L3_D_Cache_Write", "L3 D Cache Write"}, - {PAPI_L1_ICH, "PAPI_L1_ICH", "L1_ICH ", "L1_I_cache_hits ", "L1 instruction cache hits"}, - {PAPI_L2_ICH, "PAPI_L2_ICH", "L2_ICH ", "L2_I_cache_hits ", "L2 instruction cache hits"}, - {PAPI_L3_ICH, "PAPI_L3_ICH", "L3_ICH ", "L3_I_cache_hits ", "L3 instruction cache hits"}, - {PAPI_L1_ICA, "PAPI_L1_ICA", "L1_ICA ", "L1_I_cache_acc ", "L1 instruction cache accesses"}, - {PAPI_L2_ICA, "PAPI_L2_ICA", "L2_ICA ", "L2_I_cache_acc ", "L2 instruction cache accesses"}, - {PAPI_L3_ICA, "PAPI_L3_ICA", "L3_ICA ", "L3_I_cache_acc ", "L3 instruction cache accesses"}, - {PAPI_L1_ICR, "PAPI_L1_ICR", "L1_ICR ", "L1_I_cache_reads", "L1 instruction cache reads"}, - {PAPI_L2_ICR, "PAPI_L2_ICR", "L2_ICR ", "L2_I_cache_reads", "L2 instruction cache reads"}, - {PAPI_L3_ICR, "PAPI_L3_ICR", "L3_ICR ", "L3_I_cache_reads", "L3 instruction cache reads"}, - {PAPI_L1_ICW, "PAPI_L1_ICW", "L1_ICW ", "L1_I_cache_write", "L1 instruction cache writes"}, - {PAPI_L2_ICW, "PAPI_L2_ICW", "L2_ICW ", "L2_I_cache_write", "L2 instruction cache writes"}, - {PAPI_L3_ICW, "PAPI_L3_ICW", "L3_ICW ", "L3_I_cache_write", "L3 instruction cache writes"}, - {PAPI_L1_TCH, "PAPI_L1_TCH", "L1_TCH ", "L1_cache_hits ", "L1 total cache hits"}, - {PAPI_L2_TCH, "PAPI_L2_TCH", "L2_TCH ", "L2_cache_hits ", "L2 total cache hits"}, - {PAPI_L3_TCH, "PAPI_L3_TCH", "L3_TCH ", "L3_cache_hits ", "L3 total cache hits"}, - {PAPI_L1_TCA, "PAPI_L1_TCA", "L1_TCA ", "L1_cache_access ", "L1 total cache accesses"}, - {PAPI_L2_TCA, "PAPI_L2_TCA", "L2_TCA ", "L2_cache_access ", "L2 total cache accesses"}, - {PAPI_L3_TCA, "PAPI_L3_TCA", "L3_TCA ", "L3_cache_access ", "L3 total cache accesses"}, - {PAPI_L1_TCR, "PAPI_L1_TCR", "L1_TCR ", "L1_cache_reads ", "L1 total cache reads"}, - {PAPI_L2_TCR, "PAPI_L2_TCR", "L2_TCR ", "L2_cache_reads ", "L2 total cache reads"}, - {PAPI_L3_TCR, "PAPI_L3_TCR", "L3_TCR ", "L3_cache_reads ", "L3 total cache reads"}, - {PAPI_L1_TCW, "PAPI_L1_TCW", "L1_TCW ", "L1_cache_writes ", "L1 total cache writes"}, - {PAPI_L2_TCW, "PAPI_L2_TCW", "L2_TCW ", "L2_cache_writes ", "L2 total cache writes"}, - {PAPI_L3_TCW, "PAPI_L3_TCW", "L3_TCW ", "L3_cache_writes ", "L3 total cache writes"}, - {PAPI_FML_INS,"PAPI_FML_INS","FML_INS ", "FM_ins ", "FM ins"}, - {PAPI_FAD_INS,"PAPI_FAD_INS","FAD_INS ", "FA_ins ", "FA ins"}, - {PAPI_FDV_INS,"PAPI_FDV_INS","FDV_INS ", "FD_ins ", "FD ins"}, - {PAPI_FSQ_INS,"PAPI_FSQ_INS","FSQ_INS ", "FSq_ins ", "FSq ins"}, - {PAPI_FNV_INS,"PAPI_FNV_INS","FNV_INS ", "Finv_ins ", "Finv ins"}, - {PAPI_FP_OPS, "PAPI_FP_OPS", "FP_OPS ", "FP_ops_executed ", "Floating point operations executed"} -}; - -static const int npapientries = sizeof (papitable) / sizeof (Entry); -static int papieventlist[MAX_AUX]; /* list of PAPI events to be counted */ -static Pr_event pr_event[MAX_AUX]; /* list of events (PAPI or derived) */ - -/* Derived events */ -static const Entry derivedtable [] = { - {GPTL_IPC, "GPTL_IPC", "IPC ", "Instr_per_cycle ", "Instructions per cycle"}, - {GPTL_CI, "GPTL_CI", "CI ", "Comp_Intensity ", "Computational intensity"}, - {GPTL_FPC, "GPTL_FPC", "Flop/Cyc", "FP_Ops_per_cycle", "Floating point ops per cycle"}, - {GPTL_FPI, "GPTL_FPI", "Flop/Ins", "FP_Ops_per_instr", "Floating point ops per instruction"}, - {GPTL_LSTPI, "GPTL_LSTPI", "LST_frac", "LST_fraction ", "Load-store instruction fraction"}, - {GPTL_DCMRT, "GPTL_DCMRT", "DCMISRAT", "L1_Miss_Rate ", "L1 miss rate (fraction)"}, - {GPTL_LSTPDCM,"GPTL_LSTPDCM", "LSTPDCM ", "LST_per_L1_miss ", "Load-store instructions per L1 miss"}, - {GPTL_L2MRT, "GPTL_L2MRT", "L2MISRAT", "L2_Miss_Rate ", "L2 miss rate (fraction)"}, - {GPTL_LSTPL2M,"GPTL_LSTPL2M", "LSTPL2M ", "LST_per_L2_miss ", "Load-store instructions per L2 miss"}, - {GPTL_L3MRT, "GPTL_L3MRT", "L3MISRAT", "L3_Miss_Rate ", "L3 read miss rate (fraction)"} -}; -static const int nderivedentries = sizeof (derivedtable) / sizeof (Entry); - -static int npapievents = 0; /* number of PAPI events: initialize to 0 */ -static int nevents = 0; /* number of events: initialize to 0 */ -static int *EventSet; /* list of events to be counted by PAPI */ -static long_long **papicounters; /* counters returned from PAPI */ - -static const int BADCOUNT = -999999; /* Set counters to this when they are bad */ -static bool is_multiplexed = false; /* whether multiplexed (always start false)*/ -static bool narrowprint = true; /* only use 8 digits not 16 for counter prints */ -static bool persec = true; /* print PAPI stats per second */ -static bool enable_multiplexing = false; /* whether to try multiplexing */ -static bool verbose = false; /* output verbosity */ - -/* Function prototypes */ - -static int canenable (int); -static int canenable2 (int, int); -static int papievent_is_enabled (int); -static int already_enabled (int); -static int enable (int); -static int getderivedidx (int); - -/* -** GPTL_PAPIsetoption: enable or disable PAPI event defined by "counter". Called -** from GPTLsetoption. Since all events are off by default, val=false degenerates -** to a no-op. Coded this way to be consistent with the rest of GPTL -** -** Input args: -** counter: PAPI counter -** val: true or false for enable or disable -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTL_PAPIsetoption (const int counter, /* PAPI counter (or option) */ - const int val) /* true or false for enable or disable */ -{ - int n; /* loop index */ - int ret; /* return code */ - int numidx; /* numerator index */ - int idx; /* derived counter index */ - char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ - - /* - ** First, check for option which is not an actual counter - */ - - switch (counter) { - case GPTLverbose: - /* don't printf here--that'd duplicate what's in gptl.c */ - verbose = (bool) val; - return 0; - case GPTLmultiplex: - enable_multiplexing = (bool) val; - if (verbose) - printf ("GPTL_PAPIsetoption: boolean enable_multiplexing = %d\n", val); - return 0; - case GPTLnarrowprint: - narrowprint = (bool) val; - if (verbose) - printf ("GPTL_PAPIsetoption: boolean narrowprint = %d\n", val); - return 0; - case GPTLpersec: - persec = (bool) val; - if (verbose) - printf ("GPTL_PAPIsetoption: boolean persec = %d\n", val); - return 0; - default: - break; - } - - /* - ** If val is false, return an error if the event has already been enabled. - ** Otherwise just warn that attempting to disable a PAPI-based event - ** that has already been enabled doesn't work--for now it's just a no-op - */ - - if (! val) { - if (already_enabled (counter)) - return GPTLerror ("GPTL_PAPIsetoption: already enabled counter %d cannot be disabled\n", - counter); - else - if (verbose) - printf ("GPTL_PAPIsetoption: 'disable' %d currently is just a no-op\n", counter); - return 0; - } - - /* If the event has already been enabled for printing, exit */ - - if (already_enabled (counter)) - return GPTLerror ("GPTL_PAPIsetoption: counter %d has already been enabled\n", - counter); - - /* - ** Initialize PAPI if it hasn't already been done. - ** From here on down we can assume the intent is to enable (not disable) an option - */ - - if (GPTL_PAPIlibraryinit () < 0) - return GPTLerror ("GPTL_PAPIsetoption: PAPI library init error\n"); - - /* Ensure max nevents won't be exceeded */ - - if (nevents+1 > MAX_AUX) - return GPTLerror ("GPTL_PAPIsetoption: %d is too many events. Can be increased in private.h\n", - nevents+1); - - /* Check derived events */ - - switch (counter) { - case GPTL_IPC: - if ( ! canenable2 (PAPI_TOT_INS, PAPI_TOT_CYC)) - return GPTLerror ("GPTL_PAPIsetoption: GPTL_IPC unavailable\n"); - - idx = getderivedidx (GPTL_IPC); - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_TOT_INS); - pr_event[nevents].denomidx = enable (PAPI_TOT_CYC); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_TOT_INS / PAPI_TOT_CYC\n", - pr_event[nevents].event.namestr); - ++nevents; - return 0; - case GPTL_CI: - idx = getderivedidx (GPTL_CI); - if (canenable2 (PAPI_FP_OPS, PAPI_LST_INS)) { - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_FP_OPS); - pr_event[nevents].denomidx = enable (PAPI_LST_INS); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_FP_OPS / PAPI_LST_INS\n", - pr_event[nevents].event.namestr); - } else if (canenable2 (PAPI_FP_OPS, PAPI_L1_DCA)) { - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_FP_OPS); - pr_event[nevents].denomidx = enable (PAPI_L1_DCA); -#ifdef DEBUG - printf ("GPTL_PAPIsetoption: pr_event %d is derived and will be PAPI event %d / %d\n", - nevents, pr_event[nevents].numidx, pr_event[nevents].denomidx); -#endif - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_FP_OPS / PAPI_L1_DCA\n", - pr_event[nevents].event.namestr); - } else { - return GPTLerror ("GPTL_PAPIsetoption: GPTL_CI unavailable\n"); - } - ++nevents; - return 0; - case GPTL_FPC: - if ( ! canenable2 (PAPI_FP_OPS, PAPI_TOT_CYC)) - return GPTLerror ("GPTL_PAPIsetoption: GPTL_FPC unavailable\n"); - - idx = getderivedidx (GPTL_FPC); - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_FP_OPS); - pr_event[nevents].denomidx = enable (PAPI_TOT_CYC); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_FP_OPS / PAPI_TOT_CYC\n", - pr_event[nevents].event.namestr); - ++nevents; - return 0; - case GPTL_FPI: - if ( ! canenable2 (PAPI_FP_OPS, PAPI_TOT_INS)) - return GPTLerror ("GPTL_PAPIsetoption: GPTL_FPI unavailable\n"); - - idx = getderivedidx (GPTL_FPI); - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_FP_OPS); - pr_event[nevents].denomidx = enable (PAPI_TOT_INS); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_FP_OPS / PAPI_TOT_INS\n", - pr_event[nevents].event.namestr); - ++nevents; - return 0; - case GPTL_LSTPI: - idx = getderivedidx (GPTL_LSTPI); - if (canenable2 (PAPI_LST_INS, PAPI_TOT_INS)) { - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_LST_INS); - pr_event[nevents].denomidx = enable (PAPI_TOT_INS); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_LST_INS / PAPI_TOT_INS\n", - pr_event[nevents].event.namestr); - } else if (canenable2 (PAPI_L1_DCA, PAPI_TOT_INS)) { - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_L1_DCA); - pr_event[nevents].denomidx = enable (PAPI_TOT_INS); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L1_DCA / PAPI_TOT_INS\n", - pr_event[nevents].event.namestr); - } else { - return GPTLerror ("GPTL_PAPIsetoption: GPTL_LSTPI unavailable\n"); - } - ++nevents; - return 0; - case GPTL_DCMRT: - if ( ! canenable2 (PAPI_L1_DCM, PAPI_L1_DCA)) - return GPTLerror ("GPTL_PAPIsetoption: GPTL_DCMRT unavailable\n"); - - idx = getderivedidx (GPTL_DCMRT); - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_L1_DCM); - pr_event[nevents].denomidx = enable (PAPI_L1_DCA); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L1_DCM / PAPI_L1_DCA\n", - pr_event[nevents].event.namestr); - ++nevents; - return 0; - case GPTL_LSTPDCM: - idx = getderivedidx (GPTL_LSTPDCM); - if (canenable2 (PAPI_LST_INS, PAPI_L1_DCM)) { - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_LST_INS); - pr_event[nevents].denomidx = enable (PAPI_L1_DCM); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_LST_INS / PAPI_L1_DCM\n", - pr_event[nevents].event.namestr); - } else if (canenable2 (PAPI_L1_DCA, PAPI_L1_DCM)) { - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_L1_DCA); - pr_event[nevents].denomidx = enable (PAPI_L1_DCM); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L1_DCA / PAPI_L1_DCM\n", - pr_event[nevents].event.namestr); - } else { - return GPTLerror ("GPTL_PAPIsetoption: GPTL_LSTPDCM unavailable\n"); - } - ++nevents; - return 0; - /* - ** For L2 counts, use TC* instead of DC* to avoid PAPI derived events - */ - case GPTL_L2MRT: - if ( ! canenable2 (PAPI_L2_TCM, PAPI_L2_TCA)) - return GPTLerror ("GPTL_PAPIsetoption: GPTL_L2MRT unavailable\n"); - - idx = getderivedidx (GPTL_L2MRT); - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_L2_TCM); - pr_event[nevents].denomidx = enable (PAPI_L2_TCA); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L2_TCM / PAPI_L2_TCA\n", - pr_event[nevents].event.namestr); - ++nevents; - return 0; - case GPTL_LSTPL2M: - idx = getderivedidx (GPTL_LSTPL2M); - if (canenable2 (PAPI_LST_INS, PAPI_L2_TCM)) { - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_LST_INS); - pr_event[nevents].denomidx = enable (PAPI_L2_TCM); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_LST_INS / PAPI_L2_TCM\n", - pr_event[nevents].event.namestr); - } else if (canenable2 (PAPI_L1_DCA, PAPI_L2_TCM)) { - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_L1_DCA); - pr_event[nevents].denomidx = enable (PAPI_L2_TCM); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L1_DCA / PAPI_L2_TCM\n", - pr_event[nevents].event.namestr); - } else { - return GPTLerror ("GPTL_PAPIsetoption: GPTL_LSTPL2M unavailable\n"); - } - ++nevents; - return 0; - case GPTL_L3MRT: - if ( ! canenable2 (PAPI_L3_TCM, PAPI_L3_TCR)) - return GPTLerror ("GPTL_PAPIsetoption: GPTL_L3MRT unavailable\n"); - - idx = getderivedidx (GPTL_L3MRT); - pr_event[nevents].event = derivedtable[idx]; - pr_event[nevents].numidx = enable (PAPI_L3_TCM); - pr_event[nevents].denomidx = enable (PAPI_L3_TCR); - if (verbose) - printf ("GPTL_PAPIsetoption: enabling derived event %s = PAPI_L3_TCM / PAPI_L3_TCR\n", - pr_event[nevents].event.namestr); - ++nevents; - return 0; - default: - break; - } - - /* Check PAPI presets */ - - for (n = 0; n < npapientries; n++) { - if (counter == papitable[n].counter) { - if ((numidx = papievent_is_enabled (counter)) >= 0) { - pr_event[nevents].event = papitable[n]; - pr_event[nevents].numidx = numidx; - pr_event[nevents].denomidx = -1; /* flag says not derived (no denominator) */ - } else if (canenable (counter)) { - pr_event[nevents].event = papitable[n]; - pr_event[nevents].numidx = enable (counter); - pr_event[nevents].denomidx = -1; /* flag says not derived (no denominator) */ - } else { - return GPTLerror ("GPTL_PAPIsetoption: Can't enable event \n", - papitable[n].longstr); - } - if (verbose) - printf ("GPTL_PAPIsetoption: enabling PAPI preset event %s\n", - pr_event[nevents].event.namestr); - ++nevents; - return 0; - } - } - - /* - ** Check native events last: If PAPI_event_code_to_name fails, give up - */ - - if ((ret = PAPI_event_code_to_name (counter, eventname)) != PAPI_OK) - return GPTLerror ("GPTL_PAPIsetoption: name not found for counter %d: PAPI_strerror: %s\n", - counter, PAPI_strerror (ret)); - - /* - ** A table with predefined names of various lengths does not exist for - ** native events. Just truncate eventname. - */ - - if ((numidx = papievent_is_enabled (counter)) >= 0) { - pr_event[nevents].event.counter = counter; - - pr_event[nevents].event.namestr = (char *) GPTLallocate (12+1); - strncpy (pr_event[nevents].event.namestr, eventname, 12); - pr_event[nevents].event.namestr[12] = '\0'; - - pr_event[nevents].event.str16 = (char *) GPTLallocate (16+1); - strncpy (pr_event[nevents].event.str16, eventname, 16); - pr_event[nevents].event.str16[16] = '\0'; - - pr_event[nevents].event.longstr = (char *) GPTLallocate (PAPI_MAX_STR_LEN); - strncpy (pr_event[nevents].event.longstr, eventname, PAPI_MAX_STR_LEN); - - pr_event[nevents].numidx = numidx; - pr_event[nevents].denomidx = -1; /* flag says not derived (no denominator) */ - } else if (canenable (counter)) { - pr_event[nevents].event.counter = counter; - - pr_event[nevents].event.namestr = (char *) GPTLallocate (12+1); - strncpy (pr_event[nevents].event.namestr, eventname, 12); - pr_event[nevents].event.namestr[12] = '\0'; - - pr_event[nevents].event.str16 = (char *) GPTLallocate (16+1); - strncpy (pr_event[nevents].event.str16, eventname, 16); - pr_event[nevents].event.str16[16] = '\0'; - - pr_event[nevents].event.longstr = (char *) GPTLallocate (PAPI_MAX_STR_LEN); - strncpy (pr_event[nevents].event.longstr, eventname, PAPI_MAX_STR_LEN); - - pr_event[nevents].numidx = enable (counter); - pr_event[nevents].denomidx = -1; /* flag says not derived (no denominator) */ - } else { - return GPTLerror ("GPTL_PAPIsetoption: Can't enable event %s\n", eventname); - } - - if (verbose) - printf ("GPTL_PAPIsetoption: enabling native event %s\n", pr_event[nevents].event.longstr); - - ++nevents; - return 0; -} - -/* -** canenable: determine whether a PAPI counter can be enabled -** -** Input args: -** counter: PAPI counter -** -** Return value: 0 (success) or non-zero (failure) -*/ - -int canenable (int counter) -{ - char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ - - if (npapievents+1 > MAX_AUX) - return false; - - if (PAPI_query_event (counter) != PAPI_OK) { - (void) PAPI_event_code_to_name (counter, eventname); - fprintf (stderr, "canenable: event %s not available on this arch\n", eventname); - return false; - } - - return true; -} - -/* -** canenable2: determine whether 2 PAPI counters can be enabled -** -** Input args: -** counter1: PAPI counter -** counter2: PAPI counter -** -** Return value: 0 (success) or non-zero (failure) -*/ - -int canenable2 (int counter1, int counter2) -{ - char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ - - if (npapievents+2 > MAX_AUX) - return false; - - if (PAPI_query_event (counter1) != PAPI_OK) { - (void) PAPI_event_code_to_name (counter1, eventname); - return false; - } - - if (PAPI_query_event (counter2) != PAPI_OK) { - (void) PAPI_event_code_to_name (counter2, eventname); - return false; - } - - return true; -} - -/* -** papievent_is_enabled: determine whether a PAPI counter has already been -** enabled. Used internally to keep track of PAPI counters enabled. A given -** PAPI counter may occur in the computation of multiple derived events, as -** well as output directly. E.g. PAPI_FP_OPS is used to compute -** computational intensity, and floating point ops per instruction. -** -** Input args: -** counter: PAPI counter -** -** Return value: index into papieventlist (success) or negative (not found) -*/ - -int papievent_is_enabled (int counter) -{ - int n; - - for (n = 0; n < npapievents; ++n) - if (papieventlist[n] == counter) - return n; - return -1; -} - -/* -** already_enabled: determine whether a PAPI-based event has already been -** enabled for printing. -** -** Input args: -** counter: PAPI or derived counter -** -** Return value: 1 (true) or 0 (false) -*/ - -int already_enabled (int counter) -{ - int n; - - for (n = 0; n < nevents; ++n) - if (pr_event[n].event.counter == counter) - return 1; - return 0; -} - -/* -** enable: enable a PAPI event. ASSUMES that canenable() has already determined -** that the event can be enabled. -** -** Input args: -** counter: PAPI counter -** -** Return value: index into papieventlist -*/ - -int enable (int counter) -{ - int n; - - /* If the event is already enabled, return its index */ - - for (n = 0; n < npapievents; ++n) { - if (papieventlist[n] == counter) { -#ifdef DEBUG - printf ("enable: PAPI event %d is %d\n", n, counter); -#endif - return n; - } - } - - /* New event */ - - papieventlist[npapievents++] = counter; - return npapievents-1; -} - -/* -** getderivedidx: find the table index of a derived counter -** -** Input args: -** counter: derived counter -** -** Return value: index into derivedtable (success) or GPTLerror (failure) -*/ - -int getderivedidx (int dcounter) -{ - int n; - - for (n = 0; n < nderivedentries; ++n) { - if (derivedtable[n].counter == dcounter) - return n; - } - return GPTLerror ("getderivedidx: failed to find derived counter %d\n", dcounter); -} - -/* -** GPTL_PAPIlibraryinit: Call PAPI_library_init if necessary -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTL_PAPIlibraryinit () -{ - int ret; - - if ((ret = PAPI_is_initialized ()) == PAPI_NOT_INITED) { - if ((ret = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) { - fprintf (stderr, "GPTL_PAPIlibraryinit: ret=%d PAPI_VER_CURRENT=%d\n", - ret, (int) PAPI_VER_CURRENT); - return GPTLerror ("GPTL_PAPIlibraryinit: PAPI_library_init failure:%s\n", - PAPI_strerror (ret)); - } - } - return 0; -} - -/* -** GPTL_PAPIinitialize(): Initialize the PAPI interface. Called from GPTLinitialize. -** PAPI_library_init must be called before any other PAPI routines. -** PAPI_thread_init is called subsequently if threading is enabled. -** Finally, allocate space for PAPI counters and start them. -** -** Input args: -** maxthreads: number of threads -** -** Return value: 0 (success) or GPTLerror or -1 (failure) -*/ - -int GPTL_PAPIinitialize (const int maxthreads, /* number of threads */ - const bool verbose_flag, /* output verbosity */ - int *nevents_out, /* nevents needed by gptl.c */ - Entry *pr_event_out) /* events needed by gptl.c */ -{ - int ret; /* return code */ - int n; /* loop index */ - int t; /* thread index */ - - verbose = verbose_flag; - - if (maxthreads < 1) - return GPTLerror ("GPTL_PAPIinitialize: maxthreads = %d\n", maxthreads); - - /* Ensure that PAPI_library_init has already been called */ - - if ((ret = GPTL_PAPIlibraryinit ()) < 0) - return GPTLerror ("GPTL_PAPIinitialize: GPTL_PAPIlibraryinit failure\n"); - - /* PAPI_thread_init needs to be called if threading enabled */ - -#if ( defined THREADED_OMP ) - if (PAPI_thread_init ((unsigned long (*)(void)) (omp_get_thread_num)) != PAPI_OK) - return GPTLerror ("GPTL_PAPIinitialize: PAPI_thread_init failure\n"); -#elif ( defined THREADED_PTHREADS ) - if (PAPI_thread_init ((unsigned long (*)(void)) (pthread_self)) != PAPI_OK) - return GPTLerror ("GPTL_PAPIinitialize: PAPI_thread_init failure\n"); -#endif - - /* allocate and initialize static local space */ - - EventSet = (int *) GPTLallocate (maxthreads * sizeof (int)); - papicounters = (long_long **) GPTLallocate (maxthreads * sizeof (long_long *)); - - for (t = 0; t < maxthreads; t++) { - EventSet[t] = PAPI_NULL; - papicounters[t] = (long_long *) GPTLallocate (MAX_AUX * sizeof (long_long)); - } - - *nevents_out = nevents; - for (n = 0; n < nevents; ++n) { - pr_event_out[n].counter = pr_event[n].event.counter; - pr_event_out[n].namestr = pr_event[n].event.namestr; - pr_event_out[n].str8 = pr_event[n].event.str8; - pr_event_out[n].str16 = pr_event[n].event.str16; - pr_event_out[n].longstr = pr_event[n].event.longstr; - } - return 0; -} - -/* -** GPTLcreate_and_start_events: Create and start the PAPI eventset. -** Threaded routine to create the "event set" (PAPI terminology) and start -** the counters. This is only done once, and is called from get_thread_num -** for the first time for the thread. -** -** Input args: -** t: thread number -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLcreate_and_start_events (const int t) /* thread number */ -{ - int ret; /* return code */ - int n; /* loop index over events */ - char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ - - /* Create the event set */ - - if ((ret = PAPI_create_eventset (&EventSet[t])) != PAPI_OK) - return GPTLerror ("GPTLcreate_and_start_events: thread %d failure creating eventset: %s\n", - t, PAPI_strerror (ret)); - - if (verbose) - printf ("GPTLcreate_and_start_events: successfully created eventset for thread %d\n", t); - - /* Add requested events to the event set */ - - for (n = 0; n < npapievents; n++) { - if ((ret = PAPI_add_event (EventSet[t], papieventlist[n])) != PAPI_OK) { - if (verbose) { - fprintf (stderr, "%s\n", PAPI_strerror (ret)); - ret = PAPI_event_code_to_name (papieventlist[n], eventname); - fprintf (stderr, "GPTLcreate_and_start_events: failure adding event:%s\n", - eventname); - } - - if (enable_multiplexing) { - if (verbose) - printf ("Trying multiplexing...\n"); - is_multiplexed = true; - break; - } else - return GPTLerror ("enable_multiplexing is false: giving up\n"); - } - } - - if (is_multiplexed) { - - /* Cleanup the eventset for multiplexing */ - - if ((ret = PAPI_cleanup_eventset (EventSet[t])) != PAPI_OK) - return GPTLerror ("GPTLcreate_and_start_events: %s\n", PAPI_strerror (ret)); - - if ((ret = PAPI_destroy_eventset (&EventSet[t])) != PAPI_OK) - return GPTLerror ("GPTLcreate_and_start_events: %s\n", PAPI_strerror (ret)); - - if ((ret = PAPI_create_eventset (&EventSet[t])) != PAPI_OK) - return GPTLerror ("GPTLcreate_and_start_events: failure creating eventset: %s\n", - PAPI_strerror (ret)); - - if ((ret = PAPI_multiplex_init ()) != PAPI_OK) - return GPTLerror ("GPTLcreate_and_start_events: failure from PAPI_multiplex_init%s\n", - PAPI_strerror (ret)); - - if ((ret = PAPI_set_multiplex (EventSet[t])) != PAPI_OK) - return GPTLerror ("GPTLcreate_and_start_events: failure from PAPI_set_multiplex: %s\n", - PAPI_strerror (ret)); - - for (n = 0; n < npapievents; n++) { - if ((ret = PAPI_add_event (EventSet[t], papieventlist[n])) != PAPI_OK) { - ret = PAPI_event_code_to_name (papieventlist[n], eventname); - return GPTLerror ("GPTLcreate_and_start_events: failure adding event:%s\n" - " Error was: %s\n", eventname, PAPI_strerror (ret)); - } - } - } - - /* Start the event set. It will only be read from now on--never stopped */ - - if ((ret = PAPI_start (EventSet[t])) != PAPI_OK) - return GPTLerror ("GPTLcreate_and_start_events: failed to start event set: %s\n", - PAPI_strerror (ret)); - - return 0; -} - -/* -** GPTL_PAPIstart: Start the PAPI counters (actually they are just read). -** Called from GPTLstart. -** -** Input args: -** t: thread number -** -** Output args: -** aux: struct containing the counters -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTL_PAPIstart (const int t, /* thread number */ - Papistats *aux) /* struct containing PAPI stats */ -{ - int ret; /* return code from PAPI lib calls */ - int n; /* loop index */ - - /* If no events are to be counted just return */ - - if (npapievents == 0) - return 0; - - /* Read the counters */ - - if ((ret = PAPI_read (EventSet[t], papicounters[t])) != PAPI_OK) - return GPTLerror ("GPTL_PAPIstart: %s\n", PAPI_strerror (ret)); - - /* - ** Store the counter values. When GPTL_PAPIstop is called, the counters - ** will again be read, and differenced with the values saved here. - */ - - for (n = 0; n < npapievents; n++) - aux->last[n] = papicounters[t][n]; - - return 0; -} - -/* -** GPTL_PAPIstop: Stop the PAPI counters (actually they are just read). -** Called from GPTLstop. -** -** Input args: -** t: thread number -** -** Input/output args: -** aux: struct containing the counters -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTL_PAPIstop (const int t, /* thread number */ - Papistats *aux) /* struct containing PAPI stats */ -{ - int ret; /* return code from PAPI lib calls */ - int n; /* loop index */ - long_long delta; /* change in counters from previous read */ - - /* If no events are to be counted just return */ - - if (npapievents == 0) - return 0; - - /* Read the counters */ - - if ((ret = PAPI_read (EventSet[t], papicounters[t])) != PAPI_OK) - return GPTLerror ("GPTL_PAPIstop: %s\n", PAPI_strerror (ret)); - - /* - ** Accumulate the difference since timer start in aux. - ** Negative accumulation can happen when multiplexing is enabled, so don't - ** set count to BADCOUNT in that case. - */ - - for (n = 0; n < npapievents; n++) { -#ifdef DEBUG - printf ("GPTL_PAPIstop: event %d counter value is %ld\n", n, (long) papicounters[t][n]); -#endif - delta = papicounters[t][n] - aux->last[n]; - if ( ! is_multiplexed && delta < 0) - aux->accum[n] = BADCOUNT; - else - aux->accum[n] += delta; - } - return 0; -} - -/* -** GPTL_PAPIprstr: Print the descriptive string for all enabled PAPI events. -** Called from GPTLpr. -** -** Input args: -** fp: file descriptor -*/ - -void GPTL_PAPIprstr (FILE *fp) -{ - int n; - - if (narrowprint) { - for (n = 0; n < nevents; n++) { - fprintf (fp, "%8.8s ", pr_event[n].event.str8); - - /* Test on < 0 says it's a PAPI preset */ - - if (persec && pr_event[n].event.counter < 0) - fprintf (fp, "e6_/_sec "); - } - } else { - for (n = 0; n < nevents; n++) { - fprintf (fp, "%16.16s ", pr_event[n].event.str16); - - /* Test on < 0 says it's a PAPI preset */ - - if (persec && pr_event[n].event.counter < 0) - fprintf (fp, "e6_/_sec "); - } - } -} - -/* -** GPTL_PAPIpr: Print PAPI counter values for all enabled events, including -** derived events. Called from GPTLpr. -** -** Input args: -** fp: file descriptor -** aux: struct containing the counters -*/ - -void GPTL_PAPIpr (FILE *fp, /* file descriptor to write to */ - const Papistats *aux, /* stats to write */ - const int t, /* thread number */ - const int count, /* number of invocations */ - const double wcsec) /* wallclock time (sec) */ -{ - const char *shortintfmt = "%8ld "; - const char *longintfmt = "%16ld "; - const char *shortfloatfmt = "%8.2e "; - const char *longfloatfmt = "%16.10e "; - const char *intfmt; /* integer format */ - const char *floatfmt; /* floating point format */ - - int n; /* loop index */ - int numidx; /* index pointer to appropriated (derived) numerator */ - int denomidx; /* index pointer to appropriated (derived) denominator */ - double val; /* value to be printed */ - - intfmt = narrowprint ? shortintfmt : longintfmt; - floatfmt = narrowprint ? shortfloatfmt : longfloatfmt; - - for (n = 0; n < nevents; n++) { - numidx = pr_event[n].numidx; - if (pr_event[n].denomidx > -1) { /* derived event */ - denomidx = pr_event[n].denomidx; - -#ifdef DEBUG - printf ("GPTL_PAPIpr: derived event: numidx=%d denomidx=%d values = %ld %ld\n", - numidx, denomidx, (long) aux->accum[numidx], (long) aux->accum[denomidx]); -#endif - /* Protect against divide by zero */ - - if (aux->accum[denomidx] > 0) - val = (double) aux->accum[numidx] / (double) aux->accum[denomidx]; - else - val = 0.; - fprintf (fp, floatfmt, val); - - } else { /* Raw PAPI event */ - -#ifdef DEBUG - printf ("GPTL_PAPIpr: raw event: numidx=%d value = %ld\n", - numidx, (long) aux->accum[numidx]); -#endif - if (aux->accum[numidx] < PRTHRESH) - fprintf (fp, intfmt, (long) aux->accum[numidx]); - else - fprintf (fp, floatfmt, (double) aux->accum[numidx]); - - if (persec) { - if (wcsec > 0.) - fprintf (fp, "%8.2f ", aux->accum[numidx] * 1.e-6 / wcsec); - else - fprintf (fp, "%8.2f ", 0.); - } - } - } -} - -/* -** GPTL_PAPIprintenabled: Print list of enabled timers -** -** Input args: -** fp: file descriptor -*/ - -void GPTL_PAPIprintenabled (FILE *fp) -{ - int n, nn; - PAPI_event_info_t info; /* returned from PAPI_get_event_info */ - char eventname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ - - if (nevents > 0) { - fprintf (fp, "Description of printed events (PAPI and derived):\n"); - for (n = 0; n < nevents; n++) { - if (strncmp (pr_event[n].event.namestr, "GPTL", 4) == 0) { - fprintf (fp, " %s: %s\n", pr_event[n].event.namestr, pr_event[n].event.longstr); - } else { - nn = pr_event[n].event.counter; - if (PAPI_get_event_info (nn, &info) == PAPI_OK) { - fprintf (fp, " %s\n", info.short_descr); - fprintf (fp, " %s\n", info.note); - } - } - } - fprintf (fp, "\n"); - - fprintf (fp, "PAPI events enabled (including those required for derived events):\n"); - for (n = 0; n < npapievents; n++) - if (PAPI_event_code_to_name (papieventlist[n], eventname) == PAPI_OK) - fprintf (fp, " %s\n", eventname); - fprintf (fp, "\n"); - } -} - -/* -** GPTL_PAPIadd: Accumulate PAPI counters. Called from add. -** -** Input/Output args: -** auxout: auxout = auxout + auxin -** -** Input args: -** auxin: counters to be summed into auxout -*/ - -void GPTL_PAPIadd (Papistats *auxout, /* output struct */ - const Papistats *auxin) /* input struct */ -{ - int n; - - for (n = 0; n < npapievents; n++) - if (auxin->accum[n] == BADCOUNT || auxout->accum[n] == BADCOUNT) - auxout->accum[n] = BADCOUNT; - else - auxout->accum[n] += auxin->accum[n]; -} - -/* -** GPTL_PAPIfinalize: finalization routine must be called from single-threaded -** region. Free all malloc'd space -*/ - -void GPTL_PAPIfinalize (int maxthreads) -{ - int t; /* thread index */ - int ret; /* return code */ - - for (t = 0; t < maxthreads; t++) { - ret = PAPI_stop (EventSet[t], papicounters[t]); - free (papicounters[t]); - ret = PAPI_cleanup_eventset (EventSet[t]); - ret = PAPI_destroy_eventset (&EventSet[t]); - } - - free (EventSet); - free (papicounters); - - /* Reset initial values */ - - npapievents = 0; - nevents = 0; - is_multiplexed = false; - narrowprint = true; - persec = true; - enable_multiplexing = false; - verbose = false; -} - -/* -** GPTL_PAPIquery: return current PAPI counter info. Return into a long for best -** compatibility possibilities with Fortran. -** -** Input args: -** aux: struct containing the counters -** ncounters: max number of counters to return -** -** Output args: -** papicounters_out: current value of PAPI counters -*/ - -void GPTL_PAPIquery (const Papistats *aux, - long long *papicounters_out, - int ncounters) -{ - int n; - - if (ncounters > 0) { - for (n = 0; n < ncounters && n < npapievents; n++) { - papicounters_out[n] = (long long) aux->accum[n]; - } - } -} - -/* -** GPTL_PAPIget_eventvalue: return current value for an enabled event. -** -** Input args: -** eventname: event name to check (whether derived or raw PAPI counter) -** aux: struct containing the counter(s) for the event -** -** Output args: -** value: current value of the event -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTL_PAPIget_eventvalue (const char *eventname, - const Papistats *aux, - double *value) -{ - int n; /* loop index through enabled events */ - int numidx; /* numerator index into papicounters */ - int denomidx; /* denominator index into papicounters */ - - for (n = 0; n < nevents; ++n) { - if (STRMATCH (eventname, pr_event[n].event.namestr)) { - numidx = pr_event[n].numidx; - if (pr_event[n].denomidx > -1) { /* derived event */ - denomidx = pr_event[n].denomidx; - if (aux->accum[denomidx] > 0) /* protect against divide by zero */ - *value = (double) aux->accum[numidx] / (double) aux->accum[denomidx]; - else - *value = 0.; - } else { /* Raw PAPI event */ - *value = (double) aux->accum[numidx]; - } - break; - } - } - if (n == nevents) - return GPTLerror ("GPTL_PAPIget_eventvalue: event %s not enabled\n", eventname); - return 0; -} - -/* -** GPTL_PAPIis_multiplexed: return status of whether events are being multiplexed -*/ - -bool GPTL_PAPIis_multiplexed () -{ - return is_multiplexed; -} - -/* -** The following functions are publicly available -*/ - -void read_counters100 () -{ - int i; - int ret; - long_long counters[MAX_AUX]; - - for (i = 0; i < 10; ++i) { - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - ret = PAPI_read (EventSet[0], counters); - } - return; -} - -/* -** GPTLevent_name_to_code: convert a string to a PAPI code -** or derived event code. -** -** Input arguments: -** arg: string to convert -** -** Output arguments: -** code: PAPI or GPTL derived code -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLevent_name_to_code (const char *name, int *code) -{ - int ret; /* return code */ - int n; /* loop over derived entries */ - - /* - ** First check derived events - */ - - for (n = 0; n < nderivedentries; ++n) { - if (STRMATCH (name, derivedtable[n].namestr)) { - *code = derivedtable[n].counter; - return 0; - } - } - - /* - ** Next check PAPI events--note that PAPI must be initialized before the - ** name_to_code function can be invoked. - */ - - if ((ret = GPTL_PAPIlibraryinit ()) < 0) - return GPTLerror ("GPTL_event_name_to_code: GPTL_PAPIlibraryinit failure\n"); - - if ((PAPI_event_name_to_code ((char *) name, code)) != PAPI_OK) - return GPTLerror ("GPTL_event_name_to_code: PAPI_event_name_to_code failure\n"); - - return 0; -} - -/* -** GPTLevent_code_to_name: convert a string to a PAPI code -** or derived event code. -** -** Input arguments: -** code: event code (PAPI or derived) -** -** Output arguments: -** name: string corresponding to code -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int GPTLevent_code_to_name (const int code, char *name) -{ - int ret; /* return code */ - int n; /* loop over derived entries */ - - /* - ** First check derived events - */ - - for (n = 0; n < nderivedentries; ++n) { - if (code == derivedtable[n].counter) { - strcpy (name, derivedtable[n].namestr); - return 0; - } - } - - /* - ** Next check PAPI events--note that PAPI must be initialized before the - ** code_to_name function can be invoked. - */ - - if ((ret = GPTL_PAPIlibraryinit ()) < 0) - return GPTLerror ("GPTL_event_code_to_name: GPTL_PAPIlibraryinit failure\n"); - - if (PAPI_event_code_to_name (code, name) != PAPI_OK) - return GPTLerror ("GPTL_event_code_to_name: PAPI_event_code_to_name failure\n"); - - return 0; -} - -int GPTLget_npapievents (void) -{ - return npapievents; -} - -#else - -/* -** HAVE_PAPI not defined branch: "Should not be called" entry points for public routines -*/ - -int GPTL_PAPIlibraryinit () -{ - return GPTLerror ("GPTL_PAPIlibraryinit: PAPI not enabled\n"); -} - -int GPTLevent_name_to_code (const char *name, int *code) -{ - return GPTLerror ("GPTLevent_name_to_code: PAPI not enabled\n"); -} - -int GPTLevent_code_to_name (int code, char *name) -{ - return GPTLerror ("GPTLevent_code_to_name: PAPI not enabled\n"); -} - -#endif /* HAVE_PAPI */ - diff --git a/src/externals/pio1/timing/perf_mod.F90 b/src/externals/pio1/timing/perf_mod.F90 deleted file mode 100644 index a4e25cc1f4d..00000000000 --- a/src/externals/pio1/timing/perf_mod.F90 +++ /dev/null @@ -1,1430 +0,0 @@ -module perf_mod - -!----------------------------------------------------------------------- -! -! Purpose: This module is responsible for controlling the performance -! timer logic. -! -! Author: P. Worley, January 2007 -! -! $Id$ -! -!----------------------------------------------------------------------- - -!----------------------------------------------------------------------- -!- Uses ---------------------------------------------------------------- -!----------------------------------------------------------------------- - -#ifndef USE_CSM_SHARE - use perf_utils -#else - use shr_sys_mod, only: shr_sys_abort - use shr_kind_mod, only: shr_kind_cl, shr_kind_r8, shr_kind_i8 - use shr_mpi_mod, only: shr_mpi_barrier, shr_mpi_bcast - use shr_file_mod, only: shr_file_getUnit, shr_file_freeUnit - use namelist_utils, only: find_group_name -#endif - -!----------------------------------------------------------------------- -!- module boilerplate -------------------------------------------------- -!----------------------------------------------------------------------- - implicit none - private ! Make the default access private - save - -!----------------------------------------------------------------------- -! Public interfaces ---------------------------------------------------- -!----------------------------------------------------------------------- - public t_initf - public t_setLogUnit - public t_getLogUnit - public t_profile_onf - public t_barrier_onf - public t_single_filef - public t_stampf - public t_startf - public t_stopf - public t_enablef - public t_disablef - public t_adj_detailf - public t_barrierf - public t_prf - public t_finalizef - -!----------------------------------------------------------------------- -! Private interfaces (local) ------------------------------------------- -!----------------------------------------------------------------------- - private perf_defaultopts - private perf_setopts - private papi_defaultopts - private papi_setopts - -!----------------------------------------------------------------------- -!- include statements -------------------------------------------------- -!----------------------------------------------------------------------- -#include -#include "gptl.inc" - -!----------------------------------------------------------------------- -! Private data --------------------------------------------------------- -!----------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! perf_mod options - !---------------------------------------------------------------------------- - integer, parameter :: def_p_logunit = 6 ! default - integer, private :: p_logunit = def_p_logunit - ! unit number for log output - - logical, parameter :: def_timing_initialized = .false. ! default - logical, private :: timing_initialized = def_timing_initialized - ! flag indicating whether timing library has - ! been initialized - - logical, parameter :: def_timing_disable = .false. ! default - logical, private :: timing_disable = def_timing_disable - ! flag indicating whether timers are disabled - - logical, parameter :: def_timing_barrier = .false. ! default - logical, private :: timing_barrier = def_timing_barrier - ! flag indicating whether the mpi_barrier in - ! t_barrierf should be called - - integer, parameter :: def_timer_depth_limit = 99999 ! default - integer, private :: timer_depth_limit = def_timer_depth_limit - ! integer indicating maximum number of levels of - ! timer nesting - - integer, parameter :: def_timing_detail_limit = 1 ! default - integer, private :: timing_detail_limit = def_timing_detail_limit - ! integer indicating maximum detail level to - ! profile - - integer, parameter :: init_timing_disable_depth = 0 ! init - integer, private :: timing_disable_depth = init_timing_disable_depth - ! integer indicating depth of t_disablef calls - - integer, parameter :: init_timing_detail = 0 ! init - integer, private :: cur_timing_detail = init_timing_detail - ! current timing detail level - - logical, parameter :: def_perf_single_file = .false. ! default - logical, private :: perf_single_file = def_perf_single_file - ! flag indicating whether the performance timer - ! output should be written to a single file - ! (per component communicator) or to a - ! separate file for each process - - integer, parameter :: def_perf_outpe_num = 0 ! default - integer, private :: perf_outpe_num = def_perf_outpe_num - ! maximum number of processes writing out - ! timing data (for this component communicator) - - integer, parameter :: def_perf_outpe_stride = 1 ! default - integer, private :: perf_outpe_stride = def_perf_outpe_stride - ! separation between process ids for processes - ! that are writing out timing data - ! (for this component communicator) - - logical, parameter :: def_perf_global_stats = .true. ! default - logical, private :: perf_global_stats = def_perf_global_stats - ! collect and print out global performance statistics - ! (for this component communicator) -#ifdef HAVE_MPI - integer, parameter :: def_perf_timer = GPTLmpiwtime ! default -#else -#ifdef CPRIBM - integer,parameter :: def_perf_timer = GPTLread_real_time -#else - integer,parameter :: def_perf_timer = GPTLgettimeofday -#endif -#endif - integer, private :: perf_timer = def_perf_timer ! default - ! integer indicating which timer to use - ! (as defined in gptl.inc) - -#ifdef HAVE_PAPI - logical, parameter :: def_perf_papi_enable = .false. ! default -#else - logical, parameter :: def_perf_papi_enable = .false. ! default -#endif - logical, private :: perf_papi_enable = def_perf_papi_enable - ! flag indicating whether the PAPI namelist - ! should be read and HW performance counters - ! used in profiling - - ! PAPI counter ids - integer, parameter :: PAPI_NULL = -1 - - integer, parameter :: def_papi_ctr1 = PAPI_NULL ! default - integer, private :: papi_ctr1 = def_papi_ctr1 - - integer, parameter :: def_papi_ctr2 = PAPI_NULL ! default - integer, private :: papi_ctr2 = def_papi_ctr2 - - integer, parameter :: def_papi_ctr3 = PAPI_NULL ! default - integer, private :: papi_ctr3 = def_papi_ctr3 - - integer, parameter :: def_papi_ctr4 = PAPI_NULL ! default - integer, private :: papi_ctr4 = def_papi_ctr4 - -!======================================================================= -contains -!======================================================================= - -! -!======================================================================== -! - subroutine t_getLogUnit(LogUnit) -!----------------------------------------------------------------------- -! Purpose: Get log unit number. -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- -! - integer(SHR_KIND_IN), intent(OUT) :: LogUnit ! Unit number for log output -!----------------------------------------------------------------------- - - LogUnit = p_logunit - - return - end subroutine t_getLogUnit -! -!======================================================================== -! - subroutine t_setLogUnit(LogUnit) -!----------------------------------------------------------------------- -! Purpose: Set log unit number. -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- -! - integer(SHR_KIND_IN), intent(IN) :: LogUnit ! Unit number for log output -!----------------------------------------------------------------------- - - p_logunit = LogUnit -#ifndef USE_CSM_SHARE - call perfutils_setunit(p_logunit) -#endif - - return - end subroutine t_setLogUnit -! -!======================================================================== -! - subroutine perf_defaultopts(timing_disable_out, & - perf_timer_out, & - timer_depth_limit_out, & - timing_detail_limit_out, & - timing_barrier_out, & - perf_outpe_num_out, & - perf_outpe_stride_out, & - perf_single_file_out, & - perf_global_stats_out, & - perf_papi_enable_out ) -!----------------------------------------------------------------------- -! Purpose: Return default runtime options -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- - ! timers disable/enable option - logical, intent(out), optional :: timing_disable_out - ! performance timer option - integer, intent(out), optional :: perf_timer_out - ! timer depth limit option - integer, intent(out), optional :: timer_depth_limit_out - ! timer detail limit option - integer, intent(out), optional :: timing_detail_limit_out - ! timing barrier enable/disable option - logical, intent(out), optional :: timing_barrier_out - ! number of processes writing out timing data - integer, intent(out), optional :: perf_outpe_num_out - ! separation between process ids for processes that are writing out timing data - integer, intent(out), optional :: perf_outpe_stride_out - ! timing single / multple output file option - logical, intent(out), optional :: perf_single_file_out - ! collect and output global performance statistics option - logical, intent(out), optional :: perf_global_stats_out - ! calling PAPI to read HW performance counters option - logical, intent(out), optional :: perf_papi_enable_out -!----------------------------------------------------------------------- - if ( present(timing_disable_out) ) then - timing_disable_out = def_timing_disable - endif - if ( present(perf_timer_out) ) then - perf_timer_out = def_perf_timer - endif - if ( present(timer_depth_limit_out) ) then - timer_depth_limit_out = def_timer_depth_limit - endif - if ( present(timing_detail_limit_out) ) then - timing_detail_limit_out = def_timing_detail_limit - endif - if ( present(timing_barrier_out) ) then - timing_barrier_out = def_timing_barrier - endif - if ( present(perf_outpe_num_out) ) then - perf_outpe_num_out = def_perf_outpe_num - endif - if ( present(perf_outpe_stride_out) ) then - perf_outpe_stride_out = def_perf_outpe_stride - endif - if ( present(perf_single_file_out) ) then - perf_single_file_out = def_perf_single_file - endif - if ( present(perf_global_stats_out) ) then - perf_global_stats_out = def_perf_global_stats - endif - if ( present(perf_papi_enable_out) ) then - perf_papi_enable_out = def_perf_papi_enable - endif -! - return - end subroutine perf_defaultopts -! -!======================================================================== -! - subroutine perf_setopts(mastertask, & - LogPrint, & - timing_disable_in, & - perf_timer_in, & - timer_depth_limit_in, & - timing_detail_limit_in, & - timing_barrier_in, & - perf_outpe_num_in, & - perf_outpe_stride_in, & - perf_single_file_in, & - perf_global_stats_in, & - perf_papi_enable_in ) -!----------------------------------------------------------------------- -! Purpose: Set runtime options -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments---------------------------- -! - ! master process? - logical, intent(in) :: mastertask - ! Print out to log file? - logical, intent(IN) :: LogPrint - ! timers disable/enable option - logical, intent(in), optional :: timing_disable_in - ! performance timer option - integer, intent(in), optional :: perf_timer_in - ! timer depth limit option - integer, intent(in), optional :: timer_depth_limit_in - ! timer detail limit option - integer, intent(in), optional :: timing_detail_limit_in - ! timing barrier enable/disable option - logical, intent(in), optional :: timing_barrier_in - ! number of processes writing out timing data - integer, intent(in), optional :: perf_outpe_num_in - ! separation between process ids for processes that are writing out timing data - integer, intent(in), optional :: perf_outpe_stride_in - ! timing single / multple output file option - logical, intent(in), optional :: perf_single_file_in - ! collect and output global performance statistics option - logical, intent(in), optional :: perf_global_stats_in - ! calling PAPI to read HW performance counters option - logical, intent(in), optional :: perf_papi_enable_in -! -!---------------------------Local workspace----------------------------- -! - integer ierr ! error return -!----------------------------------------------------------------------- - if ( .not. timing_initialized ) then - - if ( present(timing_disable_in) ) then - timing_disable = timing_disable_in - if (timing_disable) then - ierr = GPTLdisable() - else - ierr = GPTLenable() - endif - endif - if ( present(perf_timer_in) ) then - if ((perf_timer_in .eq. GPTLgettimeofday) .or. & - (perf_timer_in .eq. GPTLnanotime) .or. & - (perf_timer_in .eq. GPTLread_real_time) .or. & - (perf_timer_in .eq. GPTLmpiwtime) .or. & - (perf_timer_in .eq. GPTLclockgettime) .or. & - (perf_timer_in .eq. GPTLpapitime)) then - perf_timer = perf_timer_in - else - if (mastertask) then - write(p_logunit,*) 'PERF_SETOPTS: illegal timer requested=',& - perf_timer_in, '. Request ignored.' - endif - endif - endif - if ( present(timer_depth_limit_in) ) then - timer_depth_limit = timer_depth_limit_in - endif - if ( present(timing_detail_limit_in) ) then - timing_detail_limit = timing_detail_limit_in - endif - if ( present(timing_barrier_in) ) then - timing_barrier = timing_barrier_in - endif - if ( present(perf_outpe_num_in) ) then - perf_outpe_num = perf_outpe_num_in - endif - if ( present(perf_outpe_stride_in) ) then - perf_outpe_stride = perf_outpe_stride_in - endif - if ( present(perf_single_file_in) ) then - perf_single_file = perf_single_file_in - endif - if ( present(perf_global_stats_in) ) then - perf_global_stats = perf_global_stats_in - endif - if ( present(perf_papi_enable_in) ) then -#ifdef HAVE_PAPI - perf_papi_enable = perf_papi_enable_in -#else - if (perf_papi_enable_in) then - if (mastertask) then - write(p_logunit,*) 'PERF_SETOPTS: PAPI library not linked in. ',& - 'Request to enable PAPI ignored.' - endif - endif - perf_papi_enable = .false. -#endif - endif -! - if (mastertask .and. LogPrint) then - write(p_logunit,*) '(t_initf) Using profile_disable=', timing_disable, & - ' profile_timer=', perf_timer - write(p_logunit,*) '(t_initf) profile_depth_limit=', timer_depth_limit, & - ' profile_detail_limit=', timing_detail_limit - write(p_logunit,*) '(t_initf) profile_barrier=', timing_barrier, & - ' profile_outpe_num=', perf_outpe_num - write(p_logunit,*) '(t_initf) profile_outpe_stride=', perf_outpe_stride , & - ' profile_single_file=', perf_single_file - write(p_logunit,*) '(t_initf) profile_global_stats=', perf_global_stats , & - ' profile_papi_enable=', perf_papi_enable - endif -! -#ifdef DEBUG - else - write(p_logunit,*) 'PERF_SETOPTS: timing library already initialized. Request ignored.' -#endif - endif -! - return - end subroutine perf_setopts - -! -!======================================================================== -! - subroutine papi_defaultopts(papi_ctr1_out, & - papi_ctr2_out, & - papi_ctr3_out, & - papi_ctr4_out ) -!----------------------------------------------------------------------- -! Purpose: Return default runtime PAPI counter options -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- - ! PAPI counter option #1 - integer, intent(out), optional :: papi_ctr1_out - ! PAPI counter option #2 - integer, intent(out), optional :: papi_ctr2_out - ! PAPI counter option #3 - integer, intent(out), optional :: papi_ctr3_out - ! PAPI counter option #4 - integer, intent(out), optional :: papi_ctr4_out -!----------------------------------------------------------------------- - if ( present(papi_ctr1_out) ) then - papi_ctr1_out = def_papi_ctr1 - endif - if ( present(papi_ctr2_out) ) then - papi_ctr2_out = def_papi_ctr2 - endif - if ( present(papi_ctr3_out) ) then - papi_ctr3_out = def_papi_ctr3 - endif - if ( present(papi_ctr4_out) ) then - papi_ctr4_out = def_papi_ctr4 - endif -! - return - end subroutine papi_defaultopts -! -!======================================================================== -! - subroutine papi_setopts(papi_ctr1_in, & - papi_ctr2_in, & - papi_ctr3_in, & - papi_ctr4_in ) -!----------------------------------------------------------------------- -! Purpose: Set runtime PAPI counter options -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments---------------------------- -! - ! performance counter option - integer, intent(in), optional :: papi_ctr1_in - ! performance counter option - integer, intent(in), optional :: papi_ctr2_in - ! performance counter option - integer, intent(in), optional :: papi_ctr3_in - ! performance counter option - integer, intent(in), optional :: papi_ctr4_in -! -!---------------------------Local workspace----------------------------- -! - integer ierr ! error return -!----------------------------------------------------------------------- - if ( .not. timing_initialized ) then - - if ( present(papi_ctr1_in) ) then - if (papi_ctr1_in < 0) then - papi_ctr1 = papi_ctr1_in - else - papi_ctr1 = PAPI_NULL - endif - endif - if ( present(papi_ctr2_in) ) then - if (papi_ctr2_in < 0) then - papi_ctr2 = papi_ctr2_in - else - papi_ctr2 = PAPI_NULL - endif - endif - if ( present(papi_ctr3_in) ) then - if (papi_ctr3_in < 0) then - papi_ctr3 = papi_ctr3_in - else - papi_ctr3 = PAPI_NULL - endif - endif - if ( present(papi_ctr4_in) ) then - if (papi_ctr4_in < 0) then - papi_ctr4 = papi_ctr4_in - else - papi_ctr4 = PAPI_NULL - endif - endif -! -#ifdef DEBUG - else - write(p_logunit,*) 'PAPI_SETOPTS: timing library already initialized. Request ignored.' -#endif - endif -! - return - end subroutine papi_setopts -! -!======================================================================== -! - logical function t_profile_onf() -!----------------------------------------------------------------------- -! Purpose: Return flag indicating whether profiling is currently active. -! Part of workaround to implement FVbarrierclock before -! communicators exposed in Pilgrim. Does not check level of -! event nesting. -! Author: P. Worley -!----------------------------------------------------------------------- - - if ((.not. timing_initialized) .or. & - (timing_disable_depth > 0) .or. & - (cur_timing_detail > timing_detail_limit)) then - t_profile_onf = .false. - else - t_profile_onf = .true. - endif - - end function t_profile_onf -! -!======================================================================== -! - logical function t_barrier_onf() -!----------------------------------------------------------------------- -! Purpose: Return timing_barrier. Part of workaround to implement -! FVbarrierclock before communicators exposed in Pilgrim. -! Author: P. Worley -!----------------------------------------------------------------------- - - t_barrier_onf = timing_barrier - - end function t_barrier_onf -! -!======================================================================== -! - logical function t_single_filef() -!----------------------------------------------------------------------- -! Purpose: Return perf_single_file. Used to control output of other -! performance data, only spmdstats currently. -! Author: P. Worley -!----------------------------------------------------------------------- - - t_single_filef = perf_single_file - - end function t_single_filef -! -!======================================================================== -! - subroutine t_stampf(wall, usr, sys) -!----------------------------------------------------------------------- -! Purpose: Record wallclock, user, and system times (seconds). -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Output arguments----------------------------- -! - real(shr_kind_r8), intent(out) :: wall ! wallclock time - real(shr_kind_r8), intent(out) :: usr ! user time - real(shr_kind_r8), intent(out) :: sys ! system time -! -!---------------------------Local workspace----------------------------- -! - integer ierr ! GPTL error return -! -!----------------------------------------------------------------------- -! - if ((.not. timing_initialized) .or. & - (timing_disable_depth > 0)) then - wall = 0.0 - usr = 0.0 - sys = 0.0 - else - ierr = GPTLstamp(wall, usr, sys) - endif - - return - end subroutine t_stampf -! -!======================================================================== -! - subroutine t_startf(event, handle) -!----------------------------------------------------------------------- -! Purpose: Start an event timer -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- -! - ! performance timer event name - character(len=*), intent(in) :: event -! -!---------------------------Input/Output arguments---------------------- -! - ! GPTL event handle - integer(shr_kind_i8), optional :: handle -! -!---------------------------Local workspace----------------------------- -! - integer ierr ! GPTL error return -! -!----------------------------------------------------------------------- -! - if ((timing_initialized) .and. & - (timing_disable_depth .eq. 0) .and. & - (cur_timing_detail .le. timing_detail_limit)) then - - if ( present (handle) ) then - ierr = GPTLstart_handle(event, handle) - else - ierr = GPTLstart(event) - endif - - endif - - return - end subroutine t_startf -! -!======================================================================== -! - subroutine t_stopf(event, handle) -!----------------------------------------------------------------------- -! Purpose: Stop an event timer -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- -! - ! performance timer event name - character(len=*), intent(in) :: event -! -!---------------------------Input/Output arguments---------------------- -! - ! GPTL event handle - integer(shr_kind_i8), optional :: handle -! -!---------------------------Local workspace----------------------------- -! - integer ierr ! GPTL error return -! -!----------------------------------------------------------------------- -! - if ((timing_initialized) .and. & - (timing_disable_depth .eq. 0) .and. & - (cur_timing_detail .le. timing_detail_limit)) then - - if ( present (handle) ) then - ierr = GPTLstop_handle(event, handle) - else - ierr = GPTLstop(event) - endif - - endif - - return - end subroutine t_stopf -! -!======================================================================== -! - subroutine t_enablef() -!----------------------------------------------------------------------- -! Purpose: Enable t_startf, t_stopf, t_stampf, and t_barrierf. Ignored -! in threaded regions. -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Local workspace----------------------------- -! - integer ierr ! GPTL error return -! -!---------------------------Externals----------------------------------- -! -#if ( defined _OPENMP ) - logical omp_in_parallel - external omp_in_parallel -#endif -! -!----------------------------------------------------------------------- -! - if (.not. timing_initialized) return - -#if ( defined _OPENMP ) - if (omp_in_parallel()) return -#endif - - if (timing_disable_depth > 0) then - if (timing_disable_depth .eq. 1) then - ierr = GPTLenable() - endif - timing_disable_depth = timing_disable_depth - 1 - endif - - return - end subroutine t_enablef -! -!======================================================================== -! - subroutine t_disablef() -!----------------------------------------------------------------------- -! Purpose: Disable t_startf, t_stopf, t_stampf, and t_barrierf. Ignored -! in threaded regions. -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Local workspace----------------------------- -! - integer ierr ! GPTL error return -! -!---------------------------Externals----------------------------------- -! -#if ( defined _OPENMP ) - logical omp_in_parallel - external omp_in_parallel -#endif -! -!----------------------------------------------------------------------- -! - if (.not. timing_initialized) return - -#if ( defined _OPENMP ) - if (omp_in_parallel()) return -#endif - - if (timing_disable_depth .eq. 0) then - ierr = GPTLdisable() - endif - timing_disable_depth = timing_disable_depth + 1 - - return - end subroutine t_disablef -! -!======================================================================== -! - subroutine t_adj_detailf(detail_adjustment) -!----------------------------------------------------------------------- -! Purpose: Modify current detail level. Ignored in threaded regions. -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- -! - integer, intent(in) :: detail_adjustment ! user defined increase or - ! decrease in detail level -! -!---------------------------Externals----------------------------------- -! -#if ( defined _OPENMP ) - logical omp_in_parallel - external omp_in_parallel -#endif -! -!----------------------------------------------------------------------- -! - if (.not. timing_initialized) return - -#if ( defined _OPENMP ) - if (omp_in_parallel()) return -#endif - - cur_timing_detail = cur_timing_detail + detail_adjustment - - return - end subroutine t_adj_detailf -! -!======================================================================== -! - subroutine t_barrierf(event, mpicom) -!----------------------------------------------------------------------- -! Purpose: Call (and time) mpi_barrier. Ignored inside OpenMP -! threaded regions. Note that barrier executed even if -! event not recorded because of level of timer event nesting. -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- - ! mpi communicator id - integer, intent(in), optional :: mpicom - ! performance timer event name - character(len=*), intent(in), optional :: event -! -!---------------------------Local workspace----------------------------- -! - integer ierr ! GPTL error return -! -!---------------------------Externals----------------------------------- -! -#if ( defined _OPENMP ) - logical omp_in_parallel - external omp_in_parallel -#endif -! -!----------------------------------------------------------------------- -! -#if ( defined _OPENMP ) - if (omp_in_parallel()) return -#endif - if ((timing_initialized) .and. & - (timing_disable_depth .eq. 0) .and. & - (cur_timing_detail .le. timing_detail_limit)) then - - if (timing_barrier) then - - if ( present (event) ) then - ierr = GPTLstart(event) - endif - - if ( present (mpicom) ) then - call shr_mpi_barrier(mpicom, 'T_BARRIERF: bad mpi communicator') - else - call shr_mpi_barrier(MPI_COMM_WORLD, 'T_BARRIERF: bad mpi communicator') - endif - - if ( present (event) ) then - ierr = GPTLstop(event) - endif - - endif - - endif - - return - end subroutine t_barrierf -! -!======================================================================== -! - subroutine t_prf(filename, mpicom, num_outpe, stride_outpe, & - single_file, global_stats, output_thispe) -!----------------------------------------------------------------------- -! Purpose: Write out performance timer data -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- -! - ! performance timer output file name - character(len=*), intent(in), optional :: filename - ! mpi communicator id - integer, intent(in), optional :: mpicom - ! maximum number of processes writing out timing data - integer, intent(in), optional :: num_outpe - ! separation between process ids for processes writing out data - integer, intent(in), optional :: stride_outpe - ! enable/disable the writing of data to a single file - logical, intent(in), optional :: single_file - ! enable/disable the collection of global statistics - logical, intent(in), optional :: global_stats - ! output timing data for this process - logical, intent(in), optional :: output_thispe -! -!---------------------------Local workspace----------------------------- -! - logical one_file ! flag indicting whether to write - ! all data to a single file - logical glb_stats ! flag indicting whether to compute - ! global statistics - logical pr_write ! flag indicating whether the current - ! GPTL output mode is write - logical write_data ! flag indicating whether this process - ! should output its timing data - integer i ! loop index - integer mpicom2 ! local copy of MPI communicator - integer me ! communicator local process id - integer npes ! local communicator group size - integer gme ! global process id - integer ierr ! MPI error return - integer outpe_num ! max number of processes writing out - ! timing data (excluding output_thispe) - integer outpe_stride ! separation between process ids for - ! processes writing out timing data - integer max_outpe ! max process id for processes - ! writing out timing data - integer signal ! send/recv variable for single - ! output file logic - integer str_length ! string length - integer unitn ! file unit number - integer cme_adj ! length of filename suffix - integer status (MPI_STATUS_SIZE) ! Status of message - character(len=7) cme ! string representation of process id - character(len=SHR_KIND_CX+14) fname ! timing output filename -!----------------------------------------------------------------------- -! - if (.not. timing_initialized) return - - call t_startf("t_prf") -!$OMP MASTER - call mpi_comm_rank(MPI_COMM_WORLD, gme, ierr) - if ( present(mpicom) ) then - mpicom2 = mpicom - call mpi_comm_size(mpicom2, npes, ierr) - if (ierr .eq. MPI_ERR_COMM) then - call shr_sys_abort('T_PRF: bad mpi communicator') - endif - call mpi_comm_rank(mpicom2, me, ierr) - else - call mpi_comm_size(MPI_COMM_WORLD, npes, ierr) - mpicom2 = MPI_COMM_WORLD - me = gme - endif - - do i=1,SHR_KIND_CX+14 - fname(i:i) = " " - enddo - - unitn = shr_file_getUnit() - - ! determine what the current output mode is (append or write) - if (GPTLpr_query_write() == 1) then - pr_write = .true. - ierr = GPTLpr_set_append() - else - pr_write=.false. - endif - - ! Determine whether to write all data to a single fie - if (present(single_file)) then - one_file = single_file - else - one_file = perf_single_file - endif - - ! Determine whether to compute global statistics - if (present(global_stats)) then - glb_stats = global_stats - else - glb_stats = perf_global_stats - endif - - ! Determine which processes are writing out timing data - write_data = .false. - - if (present(num_outpe)) then - if (num_outpe < 0) then - outpe_num = npes - else - outpe_num = num_outpe - endif - else - if (perf_outpe_num < 0) then - outpe_num = npes - else - outpe_num = perf_outpe_num - endif - endif - - if (present(stride_outpe)) then - if (stride_outpe < 1) then - outpe_stride = 1 - else - outpe_stride = stride_outpe - endif - else - if (perf_outpe_stride < 1) then - outpe_stride = 1 - else - outpe_stride = perf_outpe_stride - endif - endif - - max_outpe = min(outpe_num*outpe_stride, npes) - 1 - - if ((mod(me, outpe_stride) .eq. 0) .and. (me .le. max_outpe)) & - write_data = .true. - - if (present(output_thispe)) then - write_data = output_thispe - endif - - ! If a single timing output file, take turns writing to it. - if (one_file) then - - if ( present(filename) ) then - str_length = min(SHR_KIND_CX,len_trim(filename)) - fname(1:str_length) = filename(1:str_length) - else - fname(1:10) = "timing_all" - endif - - signal = 0 - if (me .eq. 0) then - - if (glb_stats) then - open( unitn, file=trim(fname), status='UNKNOWN' ) - write( unitn, 100) npes - 100 format(/,"***** GLOBAL STATISTICS (",I6," MPI TASKS) *****",/) - close( unitn ) - - ierr = GPTLpr_summary_file(mpicom2, trim(fname)) - endif - - if (write_data) then - if (glb_stats) then - open( unitn, file=trim(fname), status='OLD', position='APPEND' ) - else - open( unitn, file=trim(fname), status='UNKNOWN' ) - endif - - write( unitn, 101) me, gme - 101 format(/,"************ PROCESS ",I6," (",I6,") ************",/) - close( unitn ) - - ierr = GPTLpr_file(trim(fname)) - endif - - else - - if (glb_stats) then - ierr = GPTLpr_summary_file(mpicom2, trim(fname)) - endif - - call mpi_recv (signal, 1, mpi_integer, me-1, me-1, mpicom2, status, ierr) - if (ierr /= mpi_success) then - write(p_logunit,*) 'T_PRF: mpi_recv failed ierr=',ierr - call shr_sys_abort() - end if - - if (write_data) then - open( unitn, file=trim(fname), status='OLD', position='APPEND' ) - write( unitn, 101) me, gme - close( unitn ) - - ierr = GPTLpr_file(trim(fname)) - endif - - endif - - if (me+1 < npes) & - call mpi_send (signal, 1, mpi_integer, me+1, me, mpicom2, ierr) - - else - - if (glb_stats) then - if ( present(filename) ) then - str_length = min(SHR_KIND_CX-6,len_trim(filename)) - fname(1:str_length) = filename(1:str_length) - else - str_length = 6 - fname(1:10) = "timing" - endif - fname(str_length+1:str_length+6) = '_stats' - - if (me .eq. 0) then - open( unitn, file=trim(fname), status='UNKNOWN' ) - write( unitn, 100) npes - close( unitn ) - endif - - ierr = GPTLpr_summary_file(mpicom2, trim(fname)) - fname(str_length+1:str_length+6) = ' ' - endif - - if (write_data) then - if (npes .le. 10) then - write(cme,'(i1.1)') me - cme_adj = 2 - elseif (npes .le. 100) then - write(cme,'(i2.2)') me - cme_adj = 3 - elseif (npes .le. 1000) then - write(cme,'(i3.3)') me - cme_adj = 4 - elseif (npes .le. 10000) then - write(cme,'(i4.4)') me - cme_adj = 5 - elseif (npes .le. 100000) then - write(cme,'(i5.5)') me - cme_adj = 6 - else - write(cme,'(i6.6)') me - cme_adj = 7 - endif - - if ( present(filename) ) then - str_length = min(SHR_KIND_CX-cme_adj,len_trim(filename)) - fname(1:str_length) = filename(1:str_length) - else - str_length = 6 - fname(1:10) = "timing" - endif - fname(str_length+1:str_length+1) = '.' - fname(str_length+2:str_length+cme_adj) = cme - - open( unitn, file=trim(fname), status='UNKNOWN' ) - write( unitn, 101) me, gme - close( unitn ) - - ierr = GPTLpr_file(trim(fname)) - endif - - endif - - call shr_file_freeUnit( unitn ) - - ! reset GPTL output mode - if (pr_write) then - ierr = GPTLpr_set_write() - endif - -!$OMP END MASTER - call t_stopf("t_prf") - - return - end subroutine t_prf -! -!======================================================================== -! - subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask) -!----------------------------------------------------------------------- -! Purpose: Set default values of runtime timing options -! before namelists prof_inparm and papi_inparm are read, -! read namelists (and broadcast, if SPMD), -! then initialize timing library. -! Author: P. Worley (based on shr_inputinfo_mod and runtime_opts) -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- -! - character(len=*), intent(IN) :: NLFilename ! Name-list filename - logical, optional, intent(IN) :: LogPrint ! If print out to log file - integer, optional, intent(IN) :: LogUnit ! Unit number for log output - integer, optional, intent(IN) :: mpicom ! MPI communicator - logical, optional, intent(IN) :: MasterTask ! If MPI master task -! -!---------------------------Local workspace----------------------------- -! - character(len=*), parameter :: subname = '(T_INITF) ' - logical :: MasterTask2 ! If MPI master task - logical :: LogPrint2 ! If print to log - - integer me ! communicator local process id - integer ierr ! error return - integer unitn ! file unit number - integer papi_ctr1_id ! PAPI counter id - integer papi_ctr2_id ! PAPI counter id - integer papi_ctr3_id ! PAPI counter id - integer papi_ctr4_id ! PAPI counter id -! -!---------------------------Namelists ---------------------------------- -! - logical profile_disable - logical profile_barrier - logical profile_single_file - logical profile_global_stats - integer profile_depth_limit - integer profile_detail_limit - integer profile_outpe_num - integer profile_outpe_stride - integer profile_timer - logical profile_papi_enable - namelist /prof_inparm/ profile_disable, profile_barrier, & - profile_single_file, profile_global_stats, & - profile_depth_limit, & - profile_detail_limit, profile_outpe_num, & - profile_outpe_stride, profile_timer, & - profile_papi_enable - - character(len=16) papi_ctr1_str - character(len=16) papi_ctr2_str - character(len=16) papi_ctr3_str - character(len=16) papi_ctr4_str - namelist /papi_inparm/ papi_ctr1_str, papi_ctr2_str, & - papi_ctr3_str, papi_ctr4_str -!----------------------------------------------------------------------- - if ( timing_initialized ) then -#ifdef DEBUG - write(p_logunit,*) 'T_INITF: timing library already initialized. Request ignored.' -#endif - return - endif - -!$OMP MASTER - if ( present(LogUnit) ) then - call t_setLogUnit(LogUnit) - else - call t_setLogUnit(def_p_logunit) - endif - - if ( present(MasterTask) .and. present(mpicom) )then - call mpi_comm_rank(mpicom, me, ierr) - if (ierr .eq. MPI_ERR_COMM) then - call shr_sys_abort('T_INITF: bad mpi communicator') - endif - if (me .eq. 0) then - MasterTask2 = .true. - else - MasterTask2 = .false. - endif - else - MasterTask2 = .true. - end if - - if ( present(LogPrint) ) then - LogPrint2 = LogPrint - else - LogPrint2 = .true. - endif - - ! Set PERF defaults, then override with user-specified input - call perf_defaultopts(timing_disable_out=profile_disable, & - perf_timer_out=profile_timer, & - timer_depth_limit_out=profile_depth_limit, & - timing_detail_limit_out=profile_detail_limit, & - timing_barrier_out=profile_barrier, & - perf_outpe_num_out = profile_outpe_num, & - perf_outpe_stride_out = profile_outpe_stride, & - perf_single_file_out=profile_single_file, & - perf_global_stats_out=profile_global_stats, & - perf_papi_enable_out=profile_papi_enable ) - if ( MasterTask2 ) then - - ! Read in the prof_inparm namelist from NLFilename if it exists - - write(p_logunit,*) '(t_initf) Read in prof_inparm namelist from: '//trim(NLFilename) - unitn = shr_file_getUnit() - - ierr = 1 - open( unitn, file=trim(NLFilename), status='old', iostat=ierr ) - if (ierr .eq. 0) then - - ! Look for prof_inparm group name in the input file. - ! If found, leave the file positioned at that namelist group. - call find_group_name(unitn, 'prof_inparm', status=ierr) - - if (ierr == 0) then ! found prof_inparm - read(unitn, nml=prof_inparm, iostat=ierr) - if (ierr /= 0) then - call shr_sys_abort( subname//':: namelist read returns an'// & - ' error condition for prof_inparm' ) - end if - end if - - close(unitn) - - endif - call shr_file_freeUnit( unitn ) - - endif - - ! This logic assumes that there will be only one MasterTask - ! per communicator, and that this MasterTask is process 0. - if ( present(MasterTask) .and. present(mpicom) )then - call shr_mpi_bcast( profile_disable, MPICom ) - call shr_mpi_bcast( profile_barrier, MPICom ) - call shr_mpi_bcast( profile_single_file, MPICom ) - call shr_mpi_bcast( profile_global_stats, MPICom ) - call shr_mpi_bcast( profile_papi_enable, MPICom ) - call shr_mpi_bcast( profile_depth_limit, MPICom ) - call shr_mpi_bcast( profile_detail_limit, MPICom ) - call shr_mpi_bcast( profile_outpe_num, MPICom ) - call shr_mpi_bcast( profile_outpe_stride, MPICom ) - call shr_mpi_bcast( profile_timer, MPICom ) - end if - call perf_setopts (MasterTask2, LogPrint2, & - timing_disable_in=profile_disable, & - perf_timer_in=profile_timer, & - timer_depth_limit_in=profile_depth_limit, & - timing_detail_limit_in=profile_detail_limit, & - timing_barrier_in=profile_barrier, & - perf_outpe_num_in=profile_outpe_num, & - perf_outpe_stride_in=profile_outpe_stride, & - perf_single_file_in=profile_single_file, & - perf_global_stats_in=profile_global_stats, & - perf_papi_enable_in=profile_papi_enable ) - - ! Set PAPI defaults, then override with user-specified input - if (perf_papi_enable) then - call papi_defaultopts(papi_ctr1_out=papi_ctr1_id, & - papi_ctr2_out=papi_ctr2_id, & - papi_ctr3_out=papi_ctr3_id, & - papi_ctr4_out=papi_ctr4_id ) - - if ( MasterTask2 ) then - papi_ctr1_str = "PAPI_NO_CTR" - papi_ctr2_str = "PAPI_NO_CTR" - papi_ctr3_str = "PAPI_NO_CTR" - papi_ctr4_str = "PAPI_NO_CTR" - - - ! Read in the papi_inparm namelist from NLFilename if it exists - - write(p_logunit,*) '(t_initf) Read in papi_inparm namelist from: '//trim(NLFilename) - unitn = shr_file_getUnit() - - ierr = 1 - open( unitn, file=trim(NLFilename), status='old', iostat=ierr ) - if (ierr .eq. 0) then - ! Look for papi_inparm group name in the input file. - ! If found, leave the file positioned at that namelist group. - call find_group_name(unitn, 'papi_inparm', status=ierr) - - if (ierr == 0) then ! found papi_inparm - read(unitn, nml=papi_inparm, iostat=ierr) - if (ierr /= 0) then - call shr_sys_abort( subname//':: namelist read returns an'// & - ' error condition for papi_inparm' ) - end if - end if - - close(unitn) - - endif - call shr_file_freeUnit( unitn ) - - ! if enabled and nothing set, use "defaults" - if ((papi_ctr1_str(1:11) .eq. "PAPI_NO_CTR") .and. & - (papi_ctr2_str(1:11) .eq. "PAPI_NO_CTR") .and. & - (papi_ctr3_str(1:11) .eq. "PAPI_NO_CTR") .and. & - (papi_ctr4_str(1:11) .eq. "PAPI_NO_CTR")) then -!pw papi_ctr1_str = "PAPI_TOT_CYC" -!pw papi_ctr2_str = "PAPI_TOT_INS" -!pw papi_ctr3_str = "PAPI_FP_OPS" -!pw papi_ctr4_str = "PAPI_FP_INS" - papi_ctr1_str = "PAPI_FP_OPS" - endif - - if (papi_ctr1_str(1:11) /= "PAPI_NO_CTR") then - ierr = gptlevent_name_to_code(trim(papi_ctr1_str), papi_ctr1_id) - endif - if (papi_ctr2_str(1:11) /= "PAPI_NO_CTR") then - ierr = gptlevent_name_to_code(trim(papi_ctr2_str), papi_ctr2_id) - endif - if (papi_ctr3_str(1:11) /= "PAPI_NO_CTR") then - ierr = gptlevent_name_to_code(trim(papi_ctr3_str), papi_ctr3_id) - endif - if (papi_ctr4_str(1:11) /= "PAPI_NO_CTR") then - ierr = gptlevent_name_to_code(trim(papi_ctr4_str), papi_ctr4_id) - endif - - endif - ! This logic assumes that there will be only one MasterTask - ! per communicator, and that this MasterTask is process 0. - if ( present(MasterTask) .and. present(mpicom) )then - call shr_mpi_bcast( papi_ctr1_id, MPICom ) - call shr_mpi_bcast( papi_ctr2_id, MPICom ) - call shr_mpi_bcast( papi_ctr3_id, MPICom ) - call shr_mpi_bcast( papi_ctr4_id, MPICom ) - end if - - call papi_setopts (papi_ctr1_in=papi_ctr1_id, & - papi_ctr2_in=papi_ctr2_id, & - papi_ctr3_in=papi_ctr3_id, & - papi_ctr4_in=papi_ctr4_id ) - endif -!$OMP END MASTER -!$OMP BARRIER - - if (timing_disable) return - -!$OMP MASTER - ! - ! Set options and initialize timing library. - ! - ! Set timer - if (gptlsetutr (perf_timer) < 0) call shr_sys_abort (subname//':: gptlsetutr') - ! - ! For logical settings, 2nd arg 0 - ! to gptlsetoption means disable, non-zero means enable - ! - ! Turn off CPU timing (expensive) - ! - if (gptlsetoption (gptlcpu, 0) < 0) call shr_sys_abort (subname//':: gptlsetoption') - ! - ! Set max timer depth - ! - if (gptlsetoption (gptldepthlimit, timer_depth_limit) < 0) & - call shr_sys_abort (subname//':: gptlsetoption') - ! - ! Next 2 calls only work if PAPI is enabled. These examples enable counting - ! of total cycles and floating point ops, respectively - ! - if (perf_papi_enable) then - if (papi_ctr1 /= PAPI_NULL) then - if (gptlsetoption (papi_ctr1, 1) < 0) call shr_sys_abort (subname//':: gptlsetoption') - endif - if (papi_ctr2 /= PAPI_NULL) then - if (gptlsetoption (papi_ctr2, 1) < 0) call shr_sys_abort (subname//':: gptlsetoption') - endif - if (papi_ctr3 /= PAPI_NULL) then - if (gptlsetoption (papi_ctr3, 1) < 0) call shr_sys_abort (subname//':: gptlsetoption') - endif - if (papi_ctr4 /= PAPI_NULL) then - if (gptlsetoption (papi_ctr4, 1) < 0) call shr_sys_abort (subname//':: gptlsetoption') - endif - endif - ! - ! Initialize the timing lib. This call must occur after all gptlsetoption - ! calls and before all other timing lib calls. - ! - if (gptlinitialize () < 0) call shr_sys_abort (subname//':: gptlinitialize') - timing_initialized = .true. -!$OMP END MASTER -!$OMP BARRIER - - return - end subroutine t_initf -! -!======================================================================== -! - subroutine t_finalizef() -!----------------------------------------------------------------------- -! Purpose: shut down timing library -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Local workspace----------------------------- -! - integer ierr ! GPTL error return -! -!----------------------------------------------------------------------- -! - if (.not. timing_initialized) return - -!$OMP MASTER - ierr = GPTLfinalize() - timing_initialized = .false. -!$OMP END MASTER -!$OMP BARRIER - - return - end subroutine t_finalizef - -!=============================================================================== - -end module perf_mod diff --git a/src/externals/pio1/timing/perf_utils.F90 b/src/externals/pio1/timing/perf_utils.F90 deleted file mode 100644 index 241caefa199..00000000000 --- a/src/externals/pio1/timing/perf_utils.F90 +++ /dev/null @@ -1,531 +0,0 @@ -module perf_utils - -!----------------------------------------------------------------------- -! -! Purpose: This module supplies the csm_share and CAM utilities -! needed by perf_mod.F90 (when the csm_share and CAM utilities -! are not available). -! -! Author: P. Worley, October 2007 -! -! $Id$ -! -!----------------------------------------------------------------------- - -!----------------------------------------------------------------------- -!- module boilerplate -------------------------------------------------- -!----------------------------------------------------------------------- - implicit none - private ! Make the default access private - save - -!----------------------------------------------------------------------- -! Public interfaces ---------------------------------------------------- -!----------------------------------------------------------------------- - public perfutils_setunit - public shr_sys_abort - public shr_mpi_barrier - public shr_file_getUnit - public shr_file_freeUnit - public find_group_name - public to_lower - public shr_mpi_bcast - - interface shr_mpi_bcast ; module procedure & - shr_mpi_bcastl0, & - shr_mpi_bcasti0 - end interface - -!----------------------------------------------------------------------- -! Private interfaces --------------------------------------------------- -!----------------------------------------------------------------------- - private shr_sys_flush - private shr_mpi_chkerr - private shr_mpi_abort - -!----------------------------------------------------------------------- -!- include statements -------------------------------------------------- -!----------------------------------------------------------------------- -#include -#include "gptl.inc" - -!----------------------------------------------------------------------- -! Public data --------------------------------------------------------- -!----------------------------------------------------------------------- - - !---------------------------------------------------------------------------- - ! precision/kind constants (from csm_share/shr/shr_kind_mod.F90) - !---------------------------------------------------------------------------- - integer,parameter,public :: SHR_KIND_R8 = selected_real_kind(12) ! 8 byte real - integer,parameter,public :: SHR_KIND_I8 = selected_int_kind (13) ! 8 byte integer - integer,parameter,public :: SHR_KIND_IN = kind(1) ! native integer - integer,parameter,public :: SHR_KIND_CL = 256 ! long char - integer,parameter,public :: SHR_KIND_CX = 512 ! extra-long char - -!----------------------------------------------------------------------- -! Private data --------------------------------------------------------- -!----------------------------------------------------------------------- - - integer, parameter :: def_pu_logunit = 6 ! default - integer, private :: pu_logunit = def_pu_logunit - ! unit number for log output - -!======================================================================= -contains -!======================================================================= - -! -!======================================================================== -! - subroutine perfutils_setunit(LogUnit) -!----------------------------------------------------------------------- -! Purpose: Set log unit number. -! Author: P. Worley -!----------------------------------------------------------------------- -!---------------------------Input arguments----------------------------- -! - integer(SHR_KIND_IN), intent(IN) :: LogUnit ! Unit number for log output -!----------------------------------------------------------------------- - pu_logunit = LogUnit -! - return -! - end subroutine perfutils_setunit - -!============== Routines from csm_share/shr/shr_sys_mod.F90 ============ -!======================================================================= - -SUBROUTINE shr_sys_abort(string) - - IMPLICIT none - - character(*) ,optional :: string ! error message string - - !----- local ----- - integer(SHR_KIND_IN) :: ierr - logical :: flag - - !----- formats ----- - character(*),parameter :: subName = '(shr_sys_abort) ' - character(*),parameter :: F00 = "('(shr_sys_abort) ',4a)" - -!------------------------------------------------------------------------------- -! PURPOSE: consistent stopping mechanism -! (dumbed down from original shr_sys_mod.F90 version for use in perf_mod) -!------------------------------------------------------------------------------- - - call shr_sys_flush(pu_logunit) - - if ( present(string) ) then - if (len_trim(string) > 0) then - write(pu_logunit,*) trim(subName),' ERROR: ',trim(string) - else - write(pu_logunit,*) trim(subName),' ERROR ' - endif - else - write(pu_logunit,*) trim(subName),' ERROR ' - endif - - write(pu_logunit,F00) 'WARNING: calling mpi_abort() and stopping' - call shr_sys_flush(pu_logunit) - call mpi_abort(MPI_COMM_WORLD,0,ierr) - call shr_sys_flush(pu_logunit) - call abort() - - stop - -END SUBROUTINE shr_sys_abort - -!=============================================================================== -!=============================================================================== - -SUBROUTINE shr_sys_flush(unit) - - IMPLICIT none - - !----- arguments ----- - integer(SHR_KIND_IN) :: unit ! flush output buffer for this unit - - !----- formats ----- - character(*),parameter :: subName = '(shr_sys_flush) ' - character(*),parameter :: F00 = "('(shr_sys_flush) ',4a)" - -!------------------------------------------------------------------------------- -! PURPOSE: an architecture independant system call -!------------------------------------------------------------------------------- - -#if (defined IRIX64 || defined CRAY || defined OSF1 || defined SUNOS || defined LINUX || defined NEC_SX || defined UNICOSMP) - call flush(unit) -#endif -#if (defined AIX) - call flush_(unit) -#endif - -#if (!defined CRAY && !defined IRIX64 && !defined AIX && !defined OSF1 && !defined SUNOS && !defined LINUX && !defined NEC_SX && !defined UNICOSMP) -!pw if (s_loglev > 0) write(pu_logunit,F00) 'WARNING: no implementation of flush for this architecture' -#endif - -END SUBROUTINE shr_sys_flush - -!=============================================================================== - -!================== Routines from csm_share/shr/shr_mpi_mod.F90 =============== -!=============================================================================== - -SUBROUTINE shr_mpi_chkerr(rcode,string) - - IMPLICIT none - - !----- arguments --- - integer(SHR_KIND_IN), intent(in) :: rcode ! input MPI error code - character(*), intent(in) :: string ! message - - !----- local --- - character(*),parameter :: subName = '(shr_mpi_chkerr) ' - character(MPI_MAX_ERROR_STRING) :: lstring - integer(SHR_KIND_IN) :: len - integer(SHR_KIND_IN) :: ierr - -!------------------------------------------------------------------------------- -! PURPOSE: layer on MPI error checking -!------------------------------------------------------------------------------- - - if (rcode /= MPI_SUCCESS) then - call MPI_ERROR_STRING(rcode,lstring,len,ierr) - write(pu_logunit,*) trim(subName),":",lstring(1:len) - call shr_mpi_abort(string,rcode) - endif - -END SUBROUTINE shr_mpi_chkerr - -!=============================================================================== -!=============================================================================== - -SUBROUTINE shr_mpi_abort(string,rcode) - - IMPLICIT none - - !----- arguments --- - character(*),optional,intent(in) :: string ! message - integer,optional,intent(in) :: rcode ! optional code - - !----- local --- - character(*),parameter :: subName = '(shr_mpi_abort) ' - integer(SHR_KIND_IN) :: ierr - -!------------------------------------------------------------------------------- -! PURPOSE: MPI abort -!------------------------------------------------------------------------------- - - if ( present(string) .and. present(rcode) ) then - write(pu_logunit,*) trim(subName),":",trim(string),rcode - endif - call MPI_ABORT(MPI_COMM_WORLD,rcode,ierr) - -END SUBROUTINE shr_mpi_abort - -!=============================================================================== -!=============================================================================== - -SUBROUTINE shr_mpi_barrier(comm,string) - - IMPLICIT none - - !----- arguments --- - integer,intent(in) :: comm - character(*),optional,intent(in) :: string ! message - - !----- local --- - character(*),parameter :: subName = '(shr_mpi_barrier) ' - integer(SHR_KIND_IN) :: ierr - -!------------------------------------------------------------------------------- -! PURPOSE: MPI barrier -!------------------------------------------------------------------------------- - - call MPI_BARRIER(comm,ierr) - if (present(string)) then - call shr_mpi_chkerr(ierr,subName//trim(string)) - else - call shr_mpi_chkerr(ierr,subName) - endif - -END SUBROUTINE shr_mpi_barrier - -!=============================================================================== -!=============================================================================== - -SUBROUTINE shr_mpi_bcasti0(vec,comm,string) - - IMPLICIT none - - !----- arguments --- - integer(SHR_KIND_IN), intent(inout):: vec ! vector of 1 - integer(SHR_KIND_IN), intent(in) :: comm ! mpi communicator - character(*),optional,intent(in) :: string ! message - - !----- local --- - character(*),parameter :: subName = '(shr_mpi_bcasti0) ' - integer(SHR_KIND_IN) :: ierr - integer(SHR_KIND_IN) :: lsize - -!------------------------------------------------------------------------------- -! PURPOSE: Broadcast an integer -!------------------------------------------------------------------------------- - - lsize = 1 - - call MPI_BCAST(vec,lsize,MPI_INTEGER,0,comm,ierr) - if (present(string)) then - call shr_mpi_chkerr(ierr,subName//trim(string)) - else - call shr_mpi_chkerr(ierr,subName) - endif - -END SUBROUTINE shr_mpi_bcasti0 - -!=============================================================================== -!=============================================================================== - -SUBROUTINE shr_mpi_bcastl0(vec,comm,string) - - IMPLICIT none - - !----- arguments --- - logical, intent(inout):: vec ! vector of 1 - integer(SHR_KIND_IN), intent(in) :: comm ! mpi communicator - character(*),optional,intent(in) :: string ! message - - !----- local --- - character(*),parameter :: subName = '(shr_mpi_bcastl0) ' - integer(SHR_KIND_IN) :: ierr - integer(SHR_KIND_IN) :: lsize - -!------------------------------------------------------------------------------- -! PURPOSE: Broadcast a logical -!------------------------------------------------------------------------------- - - lsize = 1 - - call MPI_BCAST(vec,lsize,MPI_LOGICAL,0,comm,ierr) - if (present(string)) then - call shr_mpi_chkerr(ierr,subName//trim(string)) - else - call shr_mpi_chkerr(ierr,subName) - endif - -END SUBROUTINE shr_mpi_bcastl0 - -!=============================================================================== - -!================== Routines from csm_share/shr/shr_file_mod.F90 =============== -!=============================================================================== -!BOP =========================================================================== -! -! !IROUTINE: shr_file_getUnit -- Get a free FORTRAN unit number -! -! !DESCRIPTION: Get the next free FORTRAN unit number. -! -! !REVISION HISTORY: -! 2005-Dec-14 - E. Kluzek - creation -! 2007-Oct-21 - P. Worley - dumbed down for use in perf_mod -! -! !INTERFACE: ------------------------------------------------------------------ - -INTEGER FUNCTION shr_file_getUnit () - - implicit none - -!EOP - - !----- local parameters ----- - integer(SHR_KIND_IN),parameter :: shr_file_minUnit = 10 ! Min unit number to give - integer(SHR_KIND_IN),parameter :: shr_file_maxUnit = 99 ! Max unit number to give - - !----- local variables ----- - integer(SHR_KIND_IN) :: n ! loop index - logical :: opened ! If unit opened or not - - !----- formats ----- - character(*),parameter :: subName = '(shr_file_getUnit) ' - character(*),parameter :: F00 = "('(shr_file_getUnit) ',A,I4,A)" - -!------------------------------------------------------------------------------- -! Notes: -!------------------------------------------------------------------------------- - - ! --- Choose first available unit other than 0, 5, or 6 ------ - do n=shr_file_minUnit, shr_file_maxUnit - inquire( n, opened=opened ) - if (n == 5 .or. n == 6 .or. opened) then - cycle - end if - shr_file_getUnit = n - return - end do - - call shr_sys_abort( subName//': Error: no available units found' ) - -END FUNCTION shr_file_getUnit -!=============================================================================== - -!=============================================================================== -!BOP =========================================================================== -! -! !IROUTINE: shr_file_freeUnit -- Free up a FORTRAN unit number -! -! !DESCRIPTION: Free up the given unit number -! -! !REVISION HISTORY: -! 2005-Dec-14 - E. Kluzek - creation -! 2007-Oct-21 - P. Worley - dumbed down for use in perf_mod -! -! !INTERFACE: ------------------------------------------------------------------ - -SUBROUTINE shr_file_freeUnit ( unit) - - implicit none - -! !INPUT/OUTPUT PARAMETERS: - - integer(SHR_KIND_IN),intent(in) :: unit ! unit number to be freed - -!EOP - - !----- local parameters ----- - integer(SHR_KIND_IN),parameter :: shr_file_minUnit = 10 ! Min unit number to give - integer(SHR_KIND_IN),parameter :: shr_file_maxUnit = 99 ! Max unit number to give - - !----- formats ----- - character(*), parameter :: subName = '(shr_file_freeUnit) ' - character(*), parameter :: F00 = "('(shr_file_freeUnit) ',A,I4,A)" - -!------------------------------------------------------------------------------- -! Notes: -!------------------------------------------------------------------------------- - - if (unit < 0 .or. unit > shr_file_maxUnit) then -!pw if (s_loglev > 0) write(pu_logunit,F00) 'invalid unit number request:', unit - else if (unit == 0 .or. unit == 5 .or. unit == 6) then - call shr_sys_abort( subName//': Error: units 0, 5, and 6 must not be freed' ) - end if - - return - -END SUBROUTINE shr_file_freeUnit -!=============================================================================== - -!============= Routines from atm/cam/src/utils/namelist_utils.F90 ============== -!=============================================================================== - -subroutine find_group_name(unit, group, status) - -!--------------------------------------------------------------------------------------- -! Purpose: -! Search a file that contains namelist input for the specified namelist group name. -! Leave the file positioned so that the current record is the first record of the -! input for the specified group. -! -! Method: -! Read the file line by line. Each line is searched for an '&' which may only -! be preceded by blanks, immediately followed by the group name which is case -! insensitive. If found then backspace the file so the current record is the -! one containing the group name and return success. Otherwise return -1. -! -! Author: B. Eaton, August 2007 -!--------------------------------------------------------------------------------------- - - integer, intent(in) :: unit ! fortran unit attached to file - character(len=*), intent(in) :: group ! namelist group name - integer, intent(out) :: status ! 0 for success, -1 if group name not found - - ! Local variables - - integer :: len_grp - integer :: ios ! io status - character(len=80) :: inrec ! first 80 characters of input record - character(len=80) :: inrec2 ! left adjusted input record - character(len=len(group)) :: lc_group - - !--------------------------------------------------------------------------- - - len_grp = len_trim(group) - lc_group = to_lower(group) - - ios = 0 - do while (ios <= 0) - - read(unit, '(a)', iostat=ios, end=102) inrec - - if (ios <= 0) then ! ios < 0 indicates an end of record condition - - ! look for group name in this record - - ! remove leading blanks - inrec2 = to_lower(adjustl(inrec)) - - ! check for leading '&' - if (inrec2(1:1) == '&') then - - ! check for case insensitive group name - if (trim(lc_group) == inrec2(2:len_grp+1)) then - - ! found group name. backspace to leave file position at this record - backspace(unit) - status = 0 - return - - end if - end if - end if - - end do - - 102 continue ! end of file processing - status = -1 - -end subroutine find_group_name -!=============================================================================== - -!================ Routines from atm/cam/src/utils/string_utils.F90 ============= -!=============================================================================== - -function to_lower(str) - -!----------------------------------------------------------------------- -! Purpose: -! Convert character string to lower case. -! -! Method: -! Use achar and iachar intrinsics to ensure use of ascii collating sequence. -! -! Author: B. Eaton, July 2001 -! -! $Id$ -!----------------------------------------------------------------------- - implicit none - - character(len=*), intent(in) :: str ! String to convert to lower case - character(len=len(str)) :: to_lower - -! Local variables - - integer :: i ! Index - integer :: aseq ! ascii collating sequence - integer :: upper_to_lower ! integer to convert case - character(len=1) :: ctmp ! Character temporary -!----------------------------------------------------------------------- - upper_to_lower = iachar("a") - iachar("A") - - do i = 1, len(str) - ctmp = str(i:i) - aseq = iachar(ctmp) - if ( aseq >= iachar("A") .and. aseq <= iachar("Z") ) & - ctmp = achar(aseq + upper_to_lower) - to_lower(i:i) = ctmp - end do - -end function to_lower -!=============================================================================== - -end module perf_utils diff --git a/src/externals/pio1/timing/private.h b/src/externals/pio1/timing/private.h deleted file mode 100644 index c8a52a9f356..00000000000 --- a/src/externals/pio1/timing/private.h +++ /dev/null @@ -1,156 +0,0 @@ -/* -** $Id: private.h,v 1.74 2011-03-28 20:55:19 rosinski Exp $ -** -** Author: Jim Rosinski -** -** Contains definitions private to GPTL and inaccessible to invoking user environment -*/ - -#include -#include - -#ifndef NO_COMM_F2C -#define HAVE_COMM_F2C -#endif - -#ifndef MIN -#define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) -#endif - -#ifndef MAX -#define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) -#endif - -#define STRMATCH(X,Y) (strcmp((X),(Y)) == 0) - -/* Output counts less than PRTHRESH will be printed as integers */ -#define PRTHRESH 1000000L - -/* Maximum allowed callstack depth */ -#define MAX_STACK 128 - -/* longest timer name allowed (probably safe to just change) */ -#define MAX_CHARS 63 - -/* -** max allowable number of PAPI counters, or derived events. For convenience, -** set to max (# derived events, # papi counters required) so "avail" lists -** all available options. -*/ -#define MAX_AUX 9 - -#ifndef __cplusplus -typedef enum {false = 0, true = 1} bool; /* mimic C++ */ -#endif - -typedef struct { - long last_utime; /* saved usr time from "start" */ - long last_stime; /* saved sys time from "start" */ - long accum_utime; /* accumulator for usr time */ - long accum_stime; /* accumulator for sys time */ -} Cpustats; - -typedef struct { - double last; /* timestamp from last call */ - double accum; /* accumulated time */ - float max; /* longest time for start/stop pair */ - float min; /* shortest time for start/stop pair */ -} Wallstats; - -typedef struct { - long long last[MAX_AUX]; /* array of saved counters from "start" */ - long long accum[MAX_AUX]; /* accumulator for counters */ -} Papistats; - -typedef struct { - int counter; /* PAPI or Derived counter */ - char *namestr; /* PAPI or Derived counter as string */ - char *str8; /* print string for output timers (8 chars) */ - char *str16; /* print string for output timers (16 chars) */ - char *longstr; /* long descriptive print string */ -} Entry; - -typedef struct { - Entry event; - int numidx; /* derived event: PAPI counter array index for numerator */ - int denomidx; /* derived event: PAPI counter array index for denominator */ -} Pr_event; - -typedef struct TIMER { - char name[MAX_CHARS+1]; /* timer name (user input) */ - bool onflg; /* timer currently on or off */ -#ifdef ENABLE_PMPI - double nbytes; /* number of bytes for MPI call */ -#endif -#ifdef HAVE_PAPI - Papistats aux; /* PAPI stats */ -#endif - Wallstats wall; /* wallclock stats */ - Cpustats cpu; /* cpu stats */ - unsigned long count; /* number of start/stop calls */ - unsigned long nrecurse; /* number of recursive start/stop calls */ - void *address; /* address of timer: used only by _instr routines */ - struct TIMER *next; /* next timer in linked list */ - struct TIMER **parent; /* array of parents */ - struct TIMER **children; /* array of children */ - int *parent_count; /* array of call counts, one for each parent */ - unsigned int recurselvl; /* recursion level */ - unsigned int nchildren; /* number of children */ - unsigned int nparent; /* number of parents */ - unsigned int norphan; /* number of times this timer was an orphan */ - int num_desc; /* number of descendants */ -} Timer; - -typedef struct { - Timer **entries; /* array of timers hashed to the same value */ - unsigned int nument; /* number of entries hashed to the same value */ -} Hashentry; - -/* Function prototypes */ - -extern int GPTLerror (const char *, ...); /* print error msg and return */ -extern void GPTLset_abort_on_error (bool val); /* set flag to abort on error */ -extern void *GPTLallocate (const int); /* malloc wrapper */ - -extern int GPTLstart_instr (void *); /* auto-instrumented start */ -extern int GPTLstop_instr (void *); /* auto-instrumented stop */ -extern int GPTLis_initialized (void); /* needed by MPI_Init wrapper */ - -#ifdef __cplusplus -extern "C" { -#endif - -extern void __cyg_profile_func_enter (void *, void *); -extern void __cyg_profile_func_exit (void *, void *); - -#ifdef __cplusplus -}; -#endif - -/* -** These are needed for communication between gptl.c and gptl_papi.c -*/ - -#ifdef HAVE_PAPI -extern int GPTL_PAPIsetoption (const int, const int); -extern int GPTL_PAPIinitialize (const int, const bool, int *, Entry *); -extern int GPTL_PAPIstart (const int, Papistats *); -extern int GPTL_PAPIstop (const int, Papistats *); -extern void GPTL_PAPIprstr (FILE *); -extern void GPTL_PAPIpr (FILE *, const Papistats *, const int, const int, const double); -extern void GPTL_PAPIadd (Papistats *, const Papistats *); -extern void GPTL_PAPIfinalize (int); -extern void GPTL_PAPIquery (const Papistats *, long long *, int); -extern int GPTL_PAPIget_eventvalue (const char *, const Papistats *, double *); -extern bool GPTL_PAPIis_multiplexed (void); -extern void GPTL_PAPIprintenabled (FILE *); -extern void read_counters100 (void); -extern int GPTLget_npapievents (void); -extern int GPTLcreate_and_start_events (const int); -#endif - -#ifdef ENABLE_PMPI -extern Timer *GPTLgetentry (const char *); -extern int GPTLpmpi_setoption (const int, const int); -extern int GPTLpr_has_been_called (void); /* needed by MPI_Finalize wrapper*/ -#endif diff --git a/src/externals/pio1/timing/threadutil.c b/src/externals/pio1/timing/threadutil.c deleted file mode 100644 index 86e4681c1ca..00000000000 --- a/src/externals/pio1/timing/threadutil.c +++ /dev/null @@ -1,380 +0,0 @@ -/* -** $Id: threadutil.c,v 1.28 2009/12/31 01:51:59 rosinski Exp $ -** -** Author: Jim Rosinski -** -** Utility functions handle thread-based GPTL needs. -*/ - -#include -#include - -#include "private.h" - -/* Max allowable number of threads (used only when THREADED_PTHREADS is true) */ -#define MAX_THREADS 128 - -/* VERBOSE is a debugging ifdef local to this file */ -#undef VERBOSE - -/* Ensure that threadinit() is called only once */ -static bool first = true; - -/**********************************************************************************/ -/* -** 3 sets of routines: OMP threading, PTHREADS, unthreaded -*/ - -#if ( defined THREADED_OMP ) - -#include - -/* array of thread ids used to determine if thread has been started (omp only) */ -static int *threadid_omp; - -/* -** threadinit: Initialize threadid_omp and set number of threads -** -** Output arguments: -** nthreads: number of threads (set to zero, reset to maxthreads by get_thread_num) -** maxthreads: max number of threads -*/ - -int threadinit (int *nthreads, int *maxthreads) -{ - int t; /* loop index */ - - *maxthreads = MAX ((1), (omp_get_max_threads ())); - *nthreads = 0; - - if (omp_get_thread_num () > 0) - return GPTLerror ("GPTL: threadinit: MUST be called only by master thread"); - - if ( ! first) - return GPTLerror ("GPTL: threadinit: MUST only be called once"); - - first = false; - - threadid_omp = GPTLallocate (*maxthreads * sizeof (int)); - for (t = 0; t < *maxthreads; ++t) - threadid_omp[t] = -1; - -#ifdef VERBOSE - printf ("OMP threadinit: Set *maxthreads=%d *nthreads=%d\n", *maxthreads, *nthreads); -#endif - - return 0; -} - -/* -** threadfinalize: clean up -*/ - -void threadfinalize () -{ - free (threadid_omp); - first = true; -} - -/* -** get_thread_num: determine thread number of the calling thread -** -** Input args: -** nthreads: number of threads -** maxthreads: number of threads (unused in OpenMP case) -** -** Return value: thread number (success) or GPTLerror (failure) -*/ - -int get_thread_num (int *nthreads, int *maxthreads) -{ - int t; /* thread number */ - - if ((t = omp_get_thread_num ()) >= *maxthreads) - return GPTLerror ("get_thread_num: returned id=%d exceeds maxthreads=%d\n", - t, *maxthreads); - - /* - ** The following test is true only once for each thread, so no need to worry - ** about false cache sharing - */ - - if (threadid_omp[t] == -1) { - threadid_omp[t] = t; - -#ifdef VERBOSE - printf ("OMP get_thread_num: 1st call t=%d\n", t); -#endif - -#ifdef HAVE_PAPI - /* - ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, - ** create and start an event set for the new thread. - */ - - if (GPTLget_npapievents () > 0) { -#ifdef VERBOSE - printf ("OMP get_thread_num: Starting EventSet t=%d\n", t); -#endif - if (GPTLcreate_and_start_events (t) < 0) - return GPTLerror ("get_thread_num: error from GPTLcreate_and_start_events for thread %d\n", - t); - } -#endif - - *nthreads = *maxthreads; - } - return t; -} - -void print_threadmapping (int nthreads, FILE *fp) -{ - int n; - - fprintf (fp, "\n"); - fprintf (fp, "Thread mapping:\n"); - for (n = 0; n < nthreads; ++n) - fprintf (fp, "threadid_omp[%d]=%d\n", n, threadid_omp[n]); -} - -/**********************************************************************************/ -/* -** PTHREADS -*/ - -#elif ( defined THREADED_PTHREADS ) - -#include - -static int lock_mutex (void); /* lock a mutex for entry into a critical region */ -static int unlock_mutex (void); /* unlock a mutex for exit from a critical region */ - -static pthread_mutex_t t_mutex = PTHREAD_MUTEX_INITIALIZER; -static pthread_t *threadid; - -/* -** threadinit: Set number of threads and max number of threads -** -** Output arguments: -** nthreads: number of threads (init to zero here, increment in get_thread_num) -** maxthreads: max number of threads (MAX_THREADS) -** -** Return value: 0 (success) or GPTLerror (failure) -*/ - -int threadinit (int *nthreads, int *maxthreads) -{ - int nbytes; - int t; - - /* Manage the threadid array which maps physical thread IDs to logical IDs */ - - nbytes = MAX_THREADS * sizeof (pthread_t); - if ( ! (threadid = (pthread_t *) GPTLallocate (nbytes))) - return GPTLerror ("threadinit: malloc failure for %d items\n", MAX_THREADS); - - if ( ! first) - return GPTLerror ("GPTL: threadinit: MUST only be called once"); - - first = false; - - /* - ** Initialize nthreads to 0 and define the threadid array now that initialization - ** is done. The actual value will be determined as get_thread_num is called. - */ - - *nthreads = 0; - *maxthreads = MAX_THREADS; - - for (t = 0; t < *maxthreads; ++t) - threadid[t] = (pthread_t) -1; - -#ifdef VERBOSE - printf ("PTHREADS threadinit: Set *maxthreads=%d *nthreads=%d\n", *maxthreads, *nthreads); -#endif - - return 0; -} - -/* -** threadfinalize: clean up -*/ - -void threadfinalize () -{ - free (threadid); - first = true; -} - -/* -** get_thread_num: determine zero-based thread number of the calling thread. -** Also: update nthreads and maxthreads if necessary. -** -** Input/output args: -** nthreads: number of threads -** maxthreads: max number of threads -** -** Return value: thread number (success) or GPTLerror (failure) -*/ - -int get_thread_num (int *nthreads, int *maxthreads) -{ - int n; /* return value: loop index over number of threads */ - pthread_t mythreadid; /* thread id from pthreads library */ - - mythreadid = pthread_self (); - - if (lock_mutex () < 0) - return GPTLerror ("get_thread_num: mutex lock failure\n"); - - /* - ** Loop over known physical thread IDs. When my id is found, map it - ** to logical thread id for indexing. If not found return a negative - ** number. - ** A critical region is necessary because acess to - ** the array threadid must be by only one thread at a time. - */ - - for (n = 0; n < *nthreads; ++n) - if (pthread_equal (mythreadid, threadid[n])) - break; - - /* - ** If our thread id is not in the known list, add to it after checking that - ** we do not have too many threads. - */ - - if (n == *nthreads) { - if (*nthreads >= MAX_THREADS) { - if (unlock_mutex () < 0) - fprintf (stderr, "get_thread_num: mutex unlock failure\n"); - - return GPTLerror ("get_thread_num: nthreads=%d is too big Recompile " - "with larger value of MAX_THREADS\n", *nthreads); - } - - threadid[n] = mythreadid; - -#ifdef VERBOSE - printf ("PTHREADS get_thread_num: 1st call threadid=%lu maps to location %d\n", (unsigned long) mythreadid, n); -#endif - -#ifdef HAVE_PAPI - - /* - ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, - ** create and start an event set for the new thread. - */ - - if (GPTLget_npapievents () > 0) { -#ifdef VERBOSE - printf ("PTHREADS get_thread_num: Starting EventSet threadid=%lu location=%d\n", - (unsigned long) mythreadid, n); -#endif - if (GPTLcreate_and_start_events (n) < 0) { - if (unlock_mutex () < 0) - fprintf (stderr, "get_thread_num: mutex unlock failure\n"); - - return GPTLerror ("get_thread_num: error from GPTLcreate_and_start_events for thread %d\n", - n); - } - } -#endif - - ++*nthreads; -#ifdef VERBOSE - printf ("PTHREADS get_thread_num: *nthreads=%d\n", *nthreads); -#endif - } - - if (unlock_mutex () < 0) - return GPTLerror ("get_thread_num: mutex unlock failure\n"); - - return n; -} - -/* -** lock_mutex: lock a mutex for private access -*/ - -static int lock_mutex () -{ - if (pthread_mutex_lock (&t_mutex) != 0) - return GPTLerror ("pthread_lock_mutex failure\n"); - return 0; -} - -/* -** unlock_mutex: unlock a mutex from private access -*/ - -static int unlock_mutex () -{ - if (pthread_mutex_unlock (&t_mutex) != 0) - return GPTLerror ("pthread_unlock_mutex failure\n"); - return 0; -} - -void print_threadmapping (int nthreads, FILE *fp) -{ - int n; - - fprintf (fp, "\n"); - fprintf (fp, "Thread mapping:\n"); - for (n = 0; n < nthreads; ++n) - fprintf (fp, "threadid[%d]=%d\n", n, (int) threadid[n]); -} - -/**********************************************************************************/ -/* -** Unthreaded case -*/ - -#else - -static int threadid = -1; - -int threadinit (int *nthreads, int *maxthreads) -{ - if ( ! first) - return GPTLerror ("GPTL: threadinit: MUST only be called once"); - - first = false; - *nthreads = 0; - *maxthreads = 1; - return 0; -} - -void threadfinalize () -{ - threadid = -1; - first = true; -} - -int get_thread_num (int *nthreads, int *maxthreads) -{ -#ifdef HAVE_PAPI - /* - ** When HAVE_PAPI is true, if 1 or more PAPI events are enabled, - ** create and start an event set for the new thread. - */ - - if (threadid == -1 && GPTLget_npapievents () > 0) { - if (GPTLcreate_and_start_events (0) < 0) - return GPTLerror ("get_thread_num: error from GPTLcreate_and_start_events for thread %0\n"); - - threadid = 0; - } -#endif - - *nthreads = 1; - return 0; -} - -void print_threadmapping (int nthreads, FILE *fp) -{ - fprintf (fp, "\n"); - fprintf (fp, "threadid[0]=0\n"); -} - -#endif diff --git a/src/externals/pio2/.gitignore b/src/externals/pio2/.gitignore deleted file mode 100644 index 5eddd0d76f2..00000000000 --- a/src/externals/pio2/.gitignore +++ /dev/null @@ -1,31 +0,0 @@ -.project -html/ -*~ -\#*\# -*.o -*.in -*.lo -*.la -Makefile -acinclude.m4 -aclocal.m4 -atconfig -autom4te.cache -config-h.in -config.* -configure -stamp-h1 -conftest* -missing -libtool -install-sh -ltmain.sh -compile -depcomp -build/ -.deps/ -.libs/ -m4/ -*.nc -*.log -*.gz diff --git a/src/externals/pio2/.travis.yml b/src/externals/pio2/.travis.yml deleted file mode 100644 index d96018a36fb..00000000000 --- a/src/externals/pio2/.travis.yml +++ /dev/null @@ -1,57 +0,0 @@ -language: c -dist: trusty -sudo: false - -branches: - only: - - master - -addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - pkg-config netcdf-bin libnetcdf-dev openmpi-bin libopenmpi-dev gfortran doxygen graphviz - -before_install: - - test -n $CC && unset CC - - test -n $FC && unset FC - - test -n $CPPFLAGS && unset CPPFLAGS - - test -n FCFLAGS && unset FCFLAGS - -before_script: - - export CC=mpicc - - export FC=mpif90 - - export CPPFLAGS='-I/usr/include' - - wget https://parallel-netcdf.github.io/Release/pnetcdf-1.11.0.tar.gz - - tar -xzvf pnetcdf-1.11.0.tar.gz - - ls -l - - pushd pnetcdf-1.11.0 - - ./configure --prefix=/usr --enable-shared - - make - - sudo make install - - popd -env: - global: - - CC=mpicc - - FC=mpif90 - - CPPFLAGS='-I/usr/include' - - CFLAGS='-std=c99' - - LDFLAGS='-L/usr/lib' - -script: - - autoreconf -i - - export CFLAGS='-std=c99 -fsanitize=address -fno-omit-frame-pointer -Werror' - - export FFLAGS='-fsanitize=address -fno-omit-frame-pointer' - - export FCFLAGS='-fsanitize=address -fno-omit-frame-pointer -Werror' - - export DISTCHECK_CONFIGURE_FLAGS='--enable-fortran' - - ./configure --enable-fortran --enable-developer-docs - - make -j distcheck - - make -j distclean - - rm -rf build - - mkdir build - - cd build - - cmake -DPIO_HDF5_LOGGING=On -DPIO_USE_MALLOC=On -DPIO_ENABLE_LOGGING=On -DPIO_ENABLE_TIMING=Off .. - - make VERBOSE=1 - - make tests VERBOSE=1 - - make test VERBOSE=1 \ No newline at end of file diff --git a/src/externals/pio2/CMakeLists.txt b/src/externals/pio2/CMakeLists.txt deleted file mode 100644 index 837cad837ba..00000000000 --- a/src/externals/pio2/CMakeLists.txt +++ /dev/null @@ -1,209 +0,0 @@ -cmake_minimum_required (VERSION 2.8.12) -project (PIO C Fortran) -#cmake_policy(VERSION 3.5.2) - -# The project version number. -set(VERSION_MAJOR 2 CACHE STRING "Project major version number.") -set(VERSION_MINOR 4 CACHE STRING "Project minor version number.") -set(VERSION_PATCH 4 CACHE STRING "Project patch version number.") -mark_as_advanced(VERSION_MAJOR VERSION_MINOR VERSION_PATCH) - -# The size of the data buffer for write/read_darray(). -set(PIO_BUFFER_SIZE 134217728) - -#============================================================================== -# USER-DEFINED OPTIONS (set with "-DOPT=VAL" from command line) -#============================================================================== - -#===== Library Options ===== -option (PIO_ENABLE_FORTRAN "Enable the Fortran library builds" ON) -option (PIO_ENABLE_TIMING "Enable the use of the GPTL timing library" ON) -option (PIO_ENABLE_LOGGING "Enable debug logging (large output possible)" OFF) -option (PIO_ENABLE_DOC "Enable building PIO documentation" ON) -option (PIO_ENABLE_COVERAGE "Enable code coverage" OFF) -option (PIO_ENABLE_EXAMPLES "Enable PIO examples" ON) -option (PIO_INTERNAL_DOC "Enable PIO developer documentation" OFF) -option (PIO_TEST_BIG_ENDIAN "Enable test to see if machine is big endian" ON) -option (PIO_USE_MPIIO "Enable support for MPI-IO auto detect" ON) -option (PIO_USE_MPISERIAL "Enable mpi-serial support (instead of MPI)" OFF) -option (PIO_USE_MALLOC "Use native malloc (instead of bget package)" OFF) -option (PIO_USE_PNETCDF_VARD "Use pnetcdf put_vard " OFF) -option (WITH_PNETCDF "Require the use of PnetCDF" ON) - -# Set a variable that appears in the config.h.in file. -if(PIO_USE_PNETCDF_VARD) - set(USE_VARD 1) -else() - set(USE_VARD 0) -endif() - -# Set a variable that appears in the config.h.in file. -if(PIO_USE_MALLOC) - set(USE_MALLOC 1) -else() - set(USE_MALLOC 0) -endif() - -# Set a variable that appears in the config.h.in file. -if(PIO_ENABLE_LOGGING) - set(ENABLE_LOGGING 1) -else() - set(ENABLE_LOGGING 0) -endif() - -if(PIO_USE_MPISERIAL) - set(USE_MPI_SERIAL 1) -else() - set(USE_MPI_SERIAL 0) -endif() - -#============================================================================== -# PREPEND TO CMAKE MODULE PATH -#============================================================================== - -#===== Local modules ===== -list (APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) - -#===== External modules ===== -if (NOT DEFINED USER_CMAKE_MODULE_PATH) - message (STATUS "Importing CMake_Fortran_utils") - execute_process( - COMMAND git clone https://github.com/CESM-Development/CMake_Fortran_utils - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - OUTPUT_QUIET - ERROR_QUIET) - find_path (USER_CMAKE_MODULE_PATH - NAMES mpiexec.cmake - HINTS ${CMAKE_BINARY_DIR}/CMake_Fortran_utils) - if (USER_CMAKE_MODULE_PATH) - message (STATUS "Importing CMake_Fortran_utils - success") - else () - message (FATAL_ERROR "Failed to import CMake_Fortran_utils") - endif () -endif () -set (USER_CMAKE_MODULE_PATH ${USER_CMAKE_MODULE_PATH} - CACHE STRING "Location of the CMake_Fortran_utils") -list (APPEND CMAKE_MODULE_PATH ${USER_CMAKE_MODULE_PATH}) - -INCLUDE (CheckTypeSize) - -#===== MPI ===== -if (PIO_USE_MPISERIAL) - find_package (MPISERIAL COMPONENTS C REQUIRED) - if (MPISERIAL_C_FOUND) - set (CMAKE_REQUIRED_INCLUDES ${MPISERIAL_C_INCLUDE_DIRS}) - endif () -else () - find_package (MPI REQUIRED) - set (CMAKE_REQUIRED_INCLUDES ${MPI_INCLUDE_PATH}) -endif () - -SET(CMAKE_EXTRA_INCLUDE_FILES "mpi.h") -check_type_size("MPI_Offset" SIZEOF_MPI_OFFSET) -SET(CMAKE_EXTRA_INCLUDE_FILES) - -#===== Library Variables ===== -set (PIO_FILESYSTEM_HINTS IGNORE CACHE STRING "Filesystem hints (lustre or gpfs)") - -#===== Testing Options ===== -option (PIO_ENABLE_TESTS "Enable the testing builds" ON) -option (PIO_VALGRIND_CHECK "Enable memory leak check using valgrind" OFF) - -#============================================================================== -# BACKWARDS COMPATIBILITY -#============================================================================== - -# Old NETCDF_DIR variable --> NetCDF_PATH -if (DEFINED NETCDF_DIR) - set (NetCDF_PATH ${NETCDF_DIR} - CACHE STRING "Location of the NetCDF library installation") -endif () - -# Old PNETCDF_DIR variable --> PnetCDF_PATH -if (DEFINED PNETCDF_DIR) - set (PnetCDF_PATH ${PNETCDF_DIR} - CACHE STRING "Location of the PnetCDF library installation") -endif () - -#============================================================================== -# HELPFUL GLOBAL VARIABLES -#============================================================================== - -# System Name -string (TOUPPER "${CMAKE_SYSTEM_NAME}" CMAKE_SYSTEM_NAME_CAPS) -set (CMAKE_SYSTEM_DIRECTIVE "${CMAKE_SYSTEM_NAME_CAPS}" - CACHE STRING "System name preprocessor directive") - -# C Compiler Name -string (TOUPPER "${CMAKE_C_COMPILER_ID}" CMAKE_C_COMPILER_NAME) -if (CMAKE_C_COMPILER_NAME STREQUAL "XL") - set (CMAKE_C_COMPILER_NAME "IBM") -endif () -set (CMAKE_C_COMPILER_DIRECTIVE "CPR${CMAKE_C_COMPILER_NAME}" - CACHE STRING "C compiler name preprocessor directive") - -# Fortran Compiler Name -string (TOUPPER "${CMAKE_Fortran_COMPILER_ID}" CMAKE_Fortran_COMPILER_NAME) -if (CMAKE_Fortran_COMPILER_NAME STREQUAL "XL") - set (CMAKE_Fortran_COMPILER_NAME "IBM") -endif () -set (CMAKE_Fortran_COMPILER_DIRECTIVE "CPR${CMAKE_Fortran_COMPILER_NAME}" - CACHE STRING "Fortran compiler name preprocessor directive") - -# configure a header file to pass some of the CMake settings -# to the source code -configure_file ( - "${PROJECT_SOURCE_DIR}/cmake_config.h.in" - "${PROJECT_BINARY_DIR}/config.h" - ) - -#============================================================================== -# SET CODE COVERAGE COMPILER FLAGS -#============================================================================== - -# Only support GNU compilers at this time -if (PIO_ENABLE_COVERAGE) - if (CMAKE_C_COMPILER_NAME STREQUAL "GNU") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage") - else () - message (WARNING "The C compiler is non-GNU: coverage of C code could NOT be enabled") - endif () - if (CMAKE_Fortran_COMPILER_NAME STREQUAL "GNU") - set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fprofile-arcs -ftest-coverage") - else () - message (WARNING "The Fortran compiler is non-GNU: coverage of Fortran code could NOT be enabled") - endif () -endif () - -#============================================================================== -# INCLUDE SOURCE DIRECTORIES -#============================================================================== - -# Libraries -add_subdirectory (src) - -#============================================================================== -# TESTING TARGET -#============================================================================== - -# Custom "piotests" target (builds the test executables) -add_custom_target (tests) - -# Custom "check" target that depends upon "tests" -add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}) -add_dependencies (check tests) - -# Tests -if (PIO_ENABLE_TESTS) - enable_testing() - include (CTest) - add_subdirectory (tests) - if (PIO_ENABLE_EXAMPLES) - add_subdirectory (examples) - endif () -endif () - -# Documentation -if (PIO_ENABLE_DOC) - add_subdirectory (doc) -endif () diff --git a/src/externals/pio2/COPYRIGHT b/src/externals/pio2/COPYRIGHT deleted file mode 100644 index 8652d01dda1..00000000000 --- a/src/externals/pio2/COPYRIGHT +++ /dev/null @@ -1,16 +0,0 @@ -/****************************************************************************** - * - * - * - * Copyright (C) 2009-2019 - * - * Permission to use, copy, modify, and distribute this software and its - * documentation under the terms of the GNU General Public License is hereby - * granted. No representations are made about the suitability of this software - * for any purpose. It is provided "as is" without express or implied warranty. - * See the GNU General Public License for more details. - * - * Documents produced by Doxygen are derivative works derived from the - * input used in their production; they are not affected by this license. - * - */ \ No newline at end of file diff --git a/src/externals/pio2/CTestConfig.cmake b/src/externals/pio2/CTestConfig.cmake deleted file mode 100644 index cd7099ae6bb..00000000000 --- a/src/externals/pio2/CTestConfig.cmake +++ /dev/null @@ -1,23 +0,0 @@ -## This file should be placed in the root directory of your project. -## Then modify the CMakeLists.txt file in the root directory of your -## project to incorporate the testing dashboard. -## -## # The following are required to submit to the CDash dashboard: -## ENABLE_TESTING() -## INCLUDE(CTest) - -set (CTEST_PROJECT_NAME "PIO") -set (CTEST_NIGHTLY_START_TIME "00:00:00 EST") - -set (CTEST_DROP_METHOD "http") -if (DEFINED ENV{PIO_DASHBOARD_DROP_SITE}) - set (CTEST_DROP_SITE "$ENV{PIO_DASHBOARD_DROP_SITE}") -else () - set (CTEST_DROP_SITE "my.cdash.org") -endif () -if (DEFINED ENV{PIO_DASHBOARD_PROJECT_NAME}) - set (CTEST_DROP_LOCATION "/submit.php?project=$ENV{PIO_DASHBOARD_PROJECT_NAME}") -else () - set (CTEST_DROP_LOCATION "/submit.php?project=PIO") -endif () -set (CTEST_DROP_SITE_CDASH TRUE) diff --git a/src/externals/pio2/CTestScript.cmake b/src/externals/pio2/CTestScript.cmake deleted file mode 100644 index 23d2a25a418..00000000000 --- a/src/externals/pio2/CTestScript.cmake +++ /dev/null @@ -1,193 +0,0 @@ -#============================================================================== -# -# This is the CTest script for PIO builds and submission to the CTest -# Dashboard site: my.cdash.org. -# -# Example originally stolen from: -# http://www.vtk.org/Wiki/CTest:Using_CTEST_and_CDASH_without_CMAKE -#============================================================================== - -#--------------------------------------- -#-- User-defined setup from environment -#--------------------------------------- - -## -- CTest Dashboard Root Directory -if (DEFINED ENV{PIO_DASHBOARD_ROOT}) - set (CTEST_DASHBOARD_ROOT "$ENV{PIO_DASHBOARD_ROOT}") -else () - set (CTEST_DASHBOARD_ROOT "$ENV{HOME}/pio-dashboard") -endif () - -## -- Compiler ID -if (DEFINED ENV{PIO_COMPILER_ID}) - set (compid "$ENV{PIO_COMPILER_ID}") -else () - set (compid "?") -endif () - -## -- CTest Dashboard Build Group -set (CTEST_BUILD_GROUP "${CTEST_SCRIPT_ARG}") - -#--------------------------------------- -#-- Get the machine environment -#--------------------------------------- - -## -- Set hostname - -find_program (HOSTNAME_CMD NAMES hostname) -execute_process (COMMAND ${HOSTNAME_CMD} - OUTPUT_VARIABLE HOSTNAME - OUTPUT_STRIP_TRAILING_WHITESPACE) - -## -- Set hostname ID (e.g., alcf, nwsc, nersc, ...) -message ("hostname is ${HOSTNAME}") - -# UCAR/NWSC Machines -if (HOSTNAME MATCHES "^yslogin" OR - HOSTNAME MATCHES "^geyser" OR - HOSTNAME MATCHES "^caldera" OR - HOSTNAME MATCHES "^pronghorn") - set (HOSTNAME_ID "nwsc") -# New UCAR/NWSC SGI Machines -elseif (HOSTNAME MATCHES "^laramie" OR - HOSTNAME MATCHES "^chadmin" OR - HOSTNAME MATCHES "^cheyenne") - set (HOSTNAME_ID "nwscla") -# ALCF/Argonne Machines -elseif (HOSTNAME MATCHES "^mira" OR - HOSTNAME MATCHES "^cetus" OR - HOSTNAME MATCHES "^vesta" OR - HOSTNAME MATCHES "^cooley") - set (HOSTNAME_ID "alcf") -# NERSC Machines -elseif (HOSTNAME MATCHES "^edison" OR - HOSTNAME MATCHES "^cori" OR - HOSTNAME MATCHES "^nid") - set (HOSTNAME_ID "nersc") -# Blue Waters at NCSA -elseif (HOSTNAME MATCHES "^h2ologin" ) - set (HOSTNAME_ID "ncsa") -# CGD local linux cluster -elseif (HOSTNAME MATCHES "^hobart") - set (HOSTNAME_ID "cgd") -# Argonne Linux workstations -elseif (HOSTNAME MATCHES "^compute001" OR - HOSTNAME MATCHES "^thwomp" OR - HOSTNAME MATCHES "^stomp" OR - HOSTNAME MATCHES "^crush" OR - HOSTNAME MATCHES "^crank" OR - HOSTNAME MATCHES "^steamroller" OR - HOSTNAME MATCHES "^grind" OR - HOSTNAME MATCHES "^churn" OR - HOSTNAME MATCHES "^trounce" OR - HOSTNAME MATCHES "^thrash" OR - HOSTNAME MATCHES "^vanquish") - set (HOSTNAME_ID "anlworkstation") -else () - if (CMAKE_SYSTEM_NAME MATCHES "Catamount") - set (HOSTNAME_ID "ncsa") - else () - set (HOSTNAME_ID "unknown") - endif () -endif () - -## -- Get system info - -find_program (UNAME NAMES uname) -function (getuname name flag) - execute_process (COMMAND ${UNAME} ${flag} - OUTPUT_VARIABLE res - OUTPUT_STRIP_TRAILING_WHITESPACE) - set (${name} ${res} PARENT_SCOPE) -endfunction () - -getuname (osname -s) -getuname (osrel -r) -getuname (cpu -m) - -## -- Git command -find_program (CTEST_GIT_COMMAND NAMES git) - -## -- make command -find_program (MAKE NAMES make) - -#----------------------------------------------------------- -#-- Generate build-specific information -#----------------------------------------------------------- - -## -- CTest Site Name - -set (CTEST_SITE "${HOSTNAME_ID}-${HOSTNAME}") - -## -- CTest Build Name - -set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}-${compid}") - -## -- SRC Dir (where this script exists) -set (CTEST_SOURCE_DIRECTORY "${CTEST_SCRIPT_DIRECTORY}") - -## -- BIN Dir -set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/build-${CTEST_BUILD_NAME}-${CTEST_BUILD_GROUP}") - -## -- Add the CTest script directory to the module path -set (CTEST_EXTRA_SCRIPT_PATH "${CTEST_SOURCE_DIRECTORY}/ctest") -list (APPEND CMAKE_MODULE_PATH ${CTEST_EXTRA_SCRIPT_PATH}) - -# ----------------------------------------------------------- -# -- Store Build-Specific Info (environment variables) -# ----------------------------------------------------------- - -set (ENV{PIO_DASHBOARD_SITE} ${CTEST_SITE}) -set (ENV{PIO_DASHBOARD_BUILD_NAME} ${CTEST_BUILD_NAME}) -set (ENV{PIO_DASHBOARD_SOURCE_DIR} ${CTEST_SOURCE_DIRECTORY}) -set (ENV{PIO_DASHBOARD_BINARY_DIR} ${CTEST_BINARY_DIRECTORY}) - -# ----------------------------------------------------------- -# -- Run CTest -# ----------------------------------------------------------- - -## -- Empty the binary directory -ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY}) - -## -- Start -message (" -- Hostname_id = ${HOSTNAME_ID}") -message (" -- Start dashboard - ${CTEST_BUILD_NAME} --") -ctest_start("${CTEST_SCRIPT_ARG}") - -## -- Update -message (" -- Update source - ${CTEST_BUILD_NAME} --") -set (CTEST_UPDATE_COMMAND "${CTEST_GIT_COMMAND}") -ctest_update () - -## -- Configure -message (" -- Configure build - ${CTEST_BUILD_NAME} -- with options ${CTEST_CONFIGURE_OPTIONS}") -include (CTestEnvironment-${HOSTNAME_ID}) -set (CTEST_CONFIGURE_COMMAND "${CMAKE_COMMAND} ${CTEST_CONFIGURE_OPTIONS} ${CTEST_SOURCE_DIRECTORY}") -ctest_configure () - -## -- BUILD -message (" -- Build - ${CTEST_BUILD_NAME} --") -set (CTEST_BUILD_COMMAND "${MAKE} tests") -ctest_build () - -## -- TEST -message (" -- Test - ${CTEST_BUILD_NAME} --") -execute_process (COMMAND ${CTEST_EXTRA_SCRIPT_PATH}/runctest-${HOSTNAME_ID}.sh - ${CTEST_EXTRA_SCRIPT_PATH} ${CTEST_SCRIPT_ARG} - WORKING_DIRECTORY ${CTEST_BINARY_DIRECTORY}) - -## -- SUBMIT -message (" -- Submit to dashboard - ${CTEST_BUILD_NAME} --") -message ("** -- PIO_DASHBOARD_SITE=$ENV{PIO_DASHBOARD_SITE}") -ctest_submit () - -# ----------------------------------------------------------- -# -- Clear environment -# ----------------------------------------------------------- - -unset (ENV{PIO_DASHBOARD_SITE}) -unset (ENV{PIO_DASHBOARD_BUILD_NAME}) -unset (ENV{PIO_DASHBOARD_SOURCE_DIR}) -unset (ENV{PIO_DASHBOARD_BINARY_DIR}) - -message (" -- Finished - ${CTEST_BUILD_NAME} --") diff --git a/src/externals/pio2/Makefile.am b/src/externals/pio2/Makefile.am deleted file mode 100644 index b114d46e041..00000000000 --- a/src/externals/pio2/Makefile.am +++ /dev/null @@ -1,16 +0,0 @@ -# This is part of PIO. It creates the main Makefile. - -# Ed Hartnett - -# Look in the m4 directory for autotools stuff. -ACLOCAL_AMFLAGS= -I m4 - -# Does the user want to build fortran? -if BUILD_DOCS -DOC = doc -endif - -SUBDIRS = src tests examples ${DOC} scripts - -EXTRA_DIST = CMakeLists.txt set_flags.am COPYRIGHT - diff --git a/src/externals/pio2/README.md b/src/externals/pio2/README.md deleted file mode 100644 index 6e8b263e018..00000000000 --- a/src/externals/pio2/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# ParallelIO - -A high-level Parallel I/O Library for structured grid applications - -## Website - -For complete documentation, see our website at -[http://ncar.github.io/ParallelIO/](http://ncar.github.io/ParallelIO/). - -## Mailing List - -The (low-traffic) PIO mailing list is at -https://groups.google.com/forum/#!forum/parallelio, send email to the -list at parallelio@googlegroups.com. - -## Nightly Tests - -The results of our nightly tests on multiple platforms can be found on our -cdash site at [http://my.cdash.org/index.php?project=PIO](http://my.cdash.org/index.php?project=PIO). - -## Dependencies - -PIO can use NetCDF (version 4.6.1+) and/or PnetCDF (version 1.9.0+) -for I/O. NetCDF may be built with or without netCDF-4 features. NetCDF -is required for PIO, PnetCDF is optional. - -Ideally, the NetCDF version should be built with MPI, which requires that it -be linked with an MPI-enabled version of HDF5. Optionally, NetCDF can be -built with DAP support, which introduces a dependency on CURL. Additionally, -HDF5, itself, introduces dependencies on LIBZ and (optionally) SZIP. - -## Building PIO - -To build PIO, unpack the distribution tarball and do: - -``` -CC=mpicc FC=mpif90 ./configure --enable-fortran && make check install -``` - -For a full description of the available options and flags, try: -``` -./configure --help -``` - -Note that environment variables CC and FC may need to be set to the -MPI versions of the C and Fortran compiler. Also CPPFLAGS and LDFLAGS -may need to be set to indicate the locations of one or more of the -dependent libraries. (If using MPI compilers, the entire set of -dependent libraries should be built with the same compilers.) For -example: - -``` -export CC=mpicc -export FC=mpifort -export CPPFLAGS='-I/usr/local/netcdf-fortran-4.4.5_c_4.6.3_mpich-3.2/include -I/usr/local/netcdf-c-4.6.3_hdf5-1.10.5/include -I/usr/local/pnetcdf-1.11.0_shared/include' -export LDFLAGS='-L/usr/local/netcdf-c-4.6.3_hdf5-1.10.5/lib -L/usr/local/pnetcdf-1.11.0_shared/lib' -./configure --prefix=/usr/local/pio-2.4.2 --enable-fortran -make check -make install -``` - -## Building with CMake - -The typical configuration with CMake can be done as follows: - -``` -CC=mpicc FC=mpif90 cmake [-DOPTION1=value1 -DOPTION2=value2 ...] /path/to/pio/source -``` - -Full instructions for the cmake build can be found in the installation -documentation. diff --git a/src/externals/pio2/cmake/FindGPTL.cmake b/src/externals/pio2/cmake/FindGPTL.cmake deleted file mode 100644 index c223c1b3461..00000000000 --- a/src/externals/pio2/cmake/FindGPTL.cmake +++ /dev/null @@ -1,72 +0,0 @@ -# - Try to find GPTL -# -# This can be controlled by setting the GPTL_DIR (or, equivalently, the -# GPTL environment variable), or GPTL__DIR CMake variables, where -# is the COMPONENT language one needs. -# -# Once done, this will define: -# -# GPTL__FOUND (BOOL) - system has GPTL -# GPTL__IS_SHARED (BOOL) - whether library is shared/dynamic -# GPTL__INCLUDE_DIR (PATH) - Location of the C header file -# GPTL__INCLUDE_DIRS (LIST) - the GPTL include directories -# GPTL__LIBRARY (FILE) - Path to the C library file -# GPTL__LIBRARIES (LIST) - link these to use GPTL -# -# The available COMPONENTS are: C Fortran Perfmod -# If no components are specified, it assumes only C -include (LibFind) - -# Define GPTL C Component -define_package_component (GPTL DEFAULT - COMPONENT C - INCLUDE_NAMES gptl.h - LIBRARY_NAMES gptl) - -# Define GPTL Fortran Component -define_package_component (GPTL - COMPONENT Fortran - INCLUDE_NAMES gptl.mod - LIBRARY_NAMES gptl) - -# Define GPTL Fortran_Perf Component -define_package_component (GPTL - COMPONENT Fortran_Perf - INCLUDE_NAMES perf_mod.mod - LIBRARY_NAMES gptl) - -# Search for list of valid components requested -find_valid_components (GPTL) - -#============================================================================== -# SEARCH FOR VALIDATED COMPONENTS -foreach (GPTL_comp IN LISTS GPTL_FIND_VALID_COMPONENTS) - - # If not found already, search... - if (NOT GPTL_${GPTL_comp}_FOUND) - - # Manually add the MPI include and library dirs to search paths - if (GPTL_comp STREQUAL C AND MPI_C_FOUND) - set (mpiincs ${MPI_C_INCLUDE_PATH}) - set (mpilibs ${MPI_C_LIBRARIES}) - set (mpifound ${MPI_C_FOUND}) - elseif (MPI_Fortran_FOUND) - set (mpiincs ${MPI_Fortran_INCLUDE_PATH}) - set (mpilibs ${MPI_Fortran_LIBRARIES}) - set (mpifound ${MPI_Fortran_FOUND}) - endif () - - # Search for the package component - if (mpifound) - initialize_paths (GPTL_${GPTL_comp}_PATHS - INCLUDE_DIRECTORIES ${mpiincs} - LIBRARIES ${mpilibs}) - find_package_component(GPTL COMPONENT ${GPTL_comp} - PATHS ${GPTL_${GPTL_comp}_PATHS}) - else () - find_package_component(GPTL COMPONENT ${GPTL_comp}) - endif () - - endif () - -endforeach () diff --git a/src/externals/pio2/cmake/FindHDF5.cmake b/src/externals/pio2/cmake/FindHDF5.cmake deleted file mode 100644 index e918277b1ae..00000000000 --- a/src/externals/pio2/cmake/FindHDF5.cmake +++ /dev/null @@ -1,118 +0,0 @@ -# - Try to find HDF5 -# -# This can be controlled by setting the HDF5_DIR (or, equivalently, the -# HDF5 environment variable), or HDF5__DIR CMake variables, where -# is the COMPONENT language one needs. -# -# Once done, this will define: -# -# HDF5__FOUND (BOOL) - system has HDF5 -# HDF5__IS_SHARED (BOOL) - whether library is shared/dynamic -# HDF5__INCLUDE_DIR (PATH) - Location of the C header file -# HDF5__INCLUDE_DIRS (LIST) - the HDF5 include directories -# HDF5__LIBRARY (FILE) - Path to the C library file -# HDF5__LIBRARIES (LIST) - link these to use HDF5 -# -# The available COMPONENTS are: C HL Fortran Fortran_HL -# If no components are specified, it assumes only C -include (LibFind) - -# Define HDF5 C Component -define_package_component (HDF5 DEFAULT - COMPONENT C - INCLUDE_NAMES hdf5.h - LIBRARY_NAMES hdf5) - -# Define HDF5 HL Component -define_package_component (HDF5 - COMPONENT HL - INCLUDE_NAMES hdf5_hl.h - LIBRARY_NAMES hdf5_hl) - -# Define HDF5 Fortran Component -define_package_component (HDF5 - COMPONENT Fortran - INCLUDE_NAMES hdf5.mod - LIBRARY_NAMES hdf5_fortran) - -# Define HDF5 Fortran_HL Component -define_package_component (HDF5 - COMPONENT Fortran_HL - INCLUDE_NAMES hdf5.mod - LIBRARY_NAMES hdf5hl_fortran) - -# Search for list of valid components requested -find_valid_components (HDF5) - -#============================================================================== -# SEARCH FOR VALIDATED COMPONENTS -foreach (HDF5_comp IN LISTS HDF5_FIND_VALID_COMPONENTS) - - # If not found already, search... - if (NOT HDF5_${HDF5_comp}_FOUND) - - # Manually add the MPI include and library dirs to search paths - if ( (HDF5_comp STREQUAL C OR HDF5_comp STREQUAL HL) AND MPI_C_FOUND) - set (mpiincs ${MPI_C_INCLUDE_PATH}) - set (mpilibs ${MPI_C_LIBRARIES}) - set (mpifound ${MPI_C_FOUND}) - elseif (MPI_Fortran_FOUND) - set (mpiincs ${MPI_Fortran_INCLUDE_PATH}) - set (mpilibs ${MPI_Fortran_LIBRARIES}) - set (mpifound ${MPI_Fortran_FOUND}) - endif () - - # Search for the package component - if (mpifound) - initialize_paths (HDF5_${HDF5_comp}_PATHS - INCLUDE_DIRECTORIES ${mpiincs} - LIBRARIES ${mpilibs}) - find_package_component(HDF5 COMPONENT ${HDF5_comp} - PATHS ${HDF5_${HDF5_comp}_PATHS}) - else () - find_package_component(HDF5 COMPONENT ${HDF5_comp}) - endif () - - # Continue only if found - if (HDF5_${HDF5_comp}_FOUND) - - # Dependencies - if (HDF5_comp STREQUAL C AND NOT HDF5_C_IS_SHARED) - - # DEPENDENCY: LIBZ - find_package (LIBZ) - if (LIBZ_FOUND) - list (APPEND HDF5_C_INCLUDE_DIRS ${LIBZ_INCLUDE_DIRS}) - list (APPEND HDF5_C_LIBRARIES ${LIBZ_LIBRARIES}) - endif () - - # DEPENDENCY: SZIP (Optional) - check_macro (HDF5_C_HAS_SZIP - NAME TryHDF5_HAS_SZIP.c - HINTS ${CMAKE_MODULE_PATH} - DEFINITIONS -I${HDF5_C_INCLUDE_DIRS} - COMMENT "whether HDF5 has SZIP support") - if (HDF5_C_HAS_SZIP) - find_package (SZIP) - if (SZIP_FOUND) - list (APPEND HDF5_C_INCLUDE_DIRS ${SZIP_INCLUDE_DIRS}) - list (APPEND HDF5_C_LIBRARIES ${SZIP_LIBRARIES}) - endif () - endif () - - elseif (NOT HDF5_${HDF5_comp}_IS_SHARED) - - # DEPENDENCY: HDF5 - find_package (HDF5 COMPONENTS C) - if (HDF5_C_FOUND) - list (APPEND HDF5_${HDF5_comp}_INCLUDE_DIRS ${HDF5_C_INCLUDE_DIRS}) - list (APPEND HDF5_${HDF5_comp}_LIBRARIES ${HDF5_C_LIBRARIES}) - endif () - - endif () - - endif () - - endif () - -endforeach () diff --git a/src/externals/pio2/cmake/FindLIBRT.cmake b/src/externals/pio2/cmake/FindLIBRT.cmake deleted file mode 100644 index 1f55f9f3f13..00000000000 --- a/src/externals/pio2/cmake/FindLIBRT.cmake +++ /dev/null @@ -1,28 +0,0 @@ -# - Try to find LIBRT -# -# This can be controlled by setting the LIBRT_DIR (or, equivalently, the -# LIBRT environment variable). -# -# Once done, this will define: -# -# LIBRT_FOUND (BOOL) - system has LIBRT -# LIBRT_IS_SHARED (BOOL) - whether library is shared/dynamic -# LIBRT_INCLUDE_DIR (PATH) - Location of the C header file -# LIBRT_INCLUDE_DIRS (LIST) - the LIBRT include directories -# LIBRT_LIBRARY (FILE) - Path to the C library file -# LIBRT_LIBRARIES (LIST) - link these to use LIBRT -# -include (LibFind) - -# Define LIBRT package -define_package_component (LIBRT - INCLUDE_NAMES time.h - LIBRARY_NAMES rt) - -# SEARCH FOR PACKAGE -if (NOT LIBRT_FOUND) - - # Search for the package - find_package_component(LIBRT) - -endif () diff --git a/src/externals/pio2/cmake/FindLIBZ.cmake b/src/externals/pio2/cmake/FindLIBZ.cmake deleted file mode 100644 index 8ebbaefeed8..00000000000 --- a/src/externals/pio2/cmake/FindLIBZ.cmake +++ /dev/null @@ -1,37 +0,0 @@ -# - Try to find LIBZ -# -# This can be controlled by setting the LIBZ_DIR (or, equivalently, the -# LIBZ environment variable). -# -# Once done, this will define: -# -# LIBZ_FOUND (BOOL) - system has LIBZ -# LIBZ_IS_SHARED (BOOL) - whether library is shared/dynamic -# LIBZ_INCLUDE_DIR (PATH) - Location of the C header file -# LIBZ_INCLUDE_DIRS (LIST) - the LIBZ include directories -# LIBZ_LIBRARY (FILE) - Path to the C library file -# LIBZ_LIBRARIES (LIST) - link these to use LIBZ -# -include (LibFind) - -# Define LIBZ package -define_package_component (LIBZ - INCLUDE_NAMES zlib.h - LIBRARY_NAMES z) - -# SEARCH FOR PACKAGE -if (NOT LIBZ_FOUND) - - # Manually add the MPI include and library dirs to search paths - # and search for the package component - if (MPI_C_FOUND) - initialize_paths (LIBZ_PATHS - INCLUDE_DIRECTORIES ${MPI_C_INCLUDE_PATH} - LIBRARIES ${MPI_C_LIBRARIES}) - find_package_component(LIBZ - PATHS ${LIBZ_PATHS}) - else () - find_package_component(LIBZ) - endif () - -endif () diff --git a/src/externals/pio2/cmake/FindMPE.cmake b/src/externals/pio2/cmake/FindMPE.cmake deleted file mode 100644 index 5a964172da7..00000000000 --- a/src/externals/pio2/cmake/FindMPE.cmake +++ /dev/null @@ -1,50 +0,0 @@ -# - Try to find MPE -# -# This can be controlled by setting the MPE_PATH (or, equivalently, -# the MPE environment variable), or MPE__PATH CMake variables, -# where is the COMPONENT language one needs. -# -# Once done, this will define: -# -# MPE__FOUND (BOOL) - system has MPE -# MPE__IS_SHARED (BOOL) - whether library is shared/dynamic -# MPE__INCLUDE_DIR (PATH) - Location of the C header file -# MPE__INCLUDE_DIRS (LIST) - the MPE include directories -# MPE__LIBRARY (FILE) - Path to the C library file -# MPE__LIBRARIES (LIST) - link these to use MPE -# -# The available COMPONENTS are: C -include (LibFind) -include (LibCheck) - -# Define MPE C Component -define_package_component (MPE DEFAULT - COMPONENT C - INCLUDE_NAMES mpe.h - LIBRARY_NAMES mpe) - -# Search for list of valid components requested -find_valid_components (MPE) - -#============================================================================== -# SEARCH FOR VALIDATED COMPONENTS -foreach (NCDFcomp IN LISTS MPE_FIND_VALID_COMPONENTS) - - # If not found already, search... - if (NOT MPE_${NCDFcomp}_FOUND) - - # Manually add the MPI include and library dirs to search paths - # and search for the package component - if (MPI_${NCDFcomp}_FOUND) - initialize_paths (MPE_${NCDFcomp}_PATHS - INCLUDE_DIRECTORIES ${MPI_${NCDFcomp}_INCLUDE_PATH} - LIBRARIES ${MPI_${NCDFcomp}_LIBRARIES}) - find_package_component(MPE COMPONENT ${NCDFcomp} - PATHS ${MPE_${NCDFcomp}_PATHS}) - else () - find_package_component(MPE COMPONENT ${NCDFcomp}) - endif () - - endif () - -endforeach () diff --git a/src/externals/pio2/cmake/FindMPISERIAL.cmake b/src/externals/pio2/cmake/FindMPISERIAL.cmake deleted file mode 100644 index 09906eb7a2d..00000000000 --- a/src/externals/pio2/cmake/FindMPISERIAL.cmake +++ /dev/null @@ -1,44 +0,0 @@ -# - Try to find MPISERIAL -# -# This can be controlled by setting the MPISERIAL_PATH (or, equivalently, the -# MPISERIAL environment variable). -# -# Once done, this will define: -# -# MPISERIAL_FOUND (BOOL) - system has MPISERIAL -# MPISERIAL_IS_SHARED (BOOL) - whether library is shared/dynamic -# MPISERIAL_INCLUDE_DIR (PATH) - Location of the C header file -# MPISERIAL_INCLUDE_DIRS (LIST) - the MPISERIAL include directories -# MPISERIAL_LIBRARY (FILE) - Path to the C library file -# MPISERIAL_LIBRARIES (LIST) - link these to use MPISERIAL -# -include (LibFind) - -# Define MPISERIAL C component -define_package_component (MPISERIAL DEFAULT - COMPONENT C - INCLUDE_NAMES mpi.h - LIBRARY_NAMES mpi-serial) - -# Define MPISERIAL Fortran component -define_package_component (MPISERIAL - COMPONENT Fortran - INCLUDE_NAMES mpi.mod mpif.h - LIBRARY_NAMES mpi-serial) - -# Search for list of valid components requested -find_valid_components (MPISERIAL) - -#============================================================================== -# SEARCH FOR VALIDATED COMPONENTS -foreach (MPISERIAL_comp IN LISTS MPISERIAL_FIND_VALID_COMPONENTS) - - # If not found already, search... - if (NOT MPISERIAL_${MPISERIAL_comp}_FOUND) - - # Search for the package - find_package_component(MPISERIAL COMPONENT ${MPISERIAL_comp}) - - endif () - -endforeach () diff --git a/src/externals/pio2/cmake/FindNetCDF.cmake b/src/externals/pio2/cmake/FindNetCDF.cmake deleted file mode 100644 index 344714b18ab..00000000000 --- a/src/externals/pio2/cmake/FindNetCDF.cmake +++ /dev/null @@ -1,143 +0,0 @@ -# - Try to find NetCDF -# -# This can be controlled by setting the NetCDF_PATH (or, equivalently, the -# NETCDF environment variable), or NetCDF__PATH CMake variables, where -# is the COMPONENT language one needs. -# -# Once done, this will define: -# -# NetCDF__FOUND (BOOL) - system has NetCDF -# NetCDF__IS_SHARED (BOOL) - whether library is shared/dynamic -# NetCDF__INCLUDE_DIR (PATH) - Location of the C header file -# NetCDF__INCLUDE_DIRS (LIST) - the NetCDF include directories -# NetCDF__LIBRARY (FILE) - Path to the C library file -# NetCDF__LIBRARIES (LIST) - link these to use NetCDF -# -# The available COMPONENTS are: C Fortran -# If no components are specified, it assumes only C -include (LibFind) -include (LibCheck) - -# Define NetCDF C Component -define_package_component (NetCDF DEFAULT - COMPONENT C - INCLUDE_NAMES netcdf.h - LIBRARY_NAMES netcdf) - -# Define NetCDF Fortran Component -define_package_component (NetCDF - COMPONENT Fortran - INCLUDE_NAMES netcdf.mod netcdf.inc - LIBRARY_NAMES netcdff) - -# Search for list of valid components requested -find_valid_components (NetCDF) - -#============================================================================== -# SEARCH FOR VALIDATED COMPONENTS -foreach (NCDFcomp IN LISTS NetCDF_FIND_VALID_COMPONENTS) - - # If not found already, search... - if (NOT NetCDF_${NCDFcomp}_FOUND) - - # Manually add the MPI include and library dirs to search paths - # and search for the package component - if (MPI_${NCDFcomp}_FOUND) - initialize_paths (NetCDF_${NCDFcomp}_PATHS - INCLUDE_DIRECTORIES ${MPI_${NCDFcomp}_INCLUDE_PATH} - LIBRARIES ${MPI_${NCDFcomp}_LIBRARIES}) - find_package_component(NetCDF COMPONENT ${NCDFcomp} - PATHS ${NetCDF_${NCDFcomp}_PATHS}) - else () - find_package_component(NetCDF COMPONENT ${NCDFcomp}) - endif () - - # Continue only if component found - if (NetCDF_${NCDFcomp}_FOUND) - - # Checks - if (NCDFcomp STREQUAL C) - - # Check version - check_version (NetCDF - NAME "netcdf_meta.h" - HINTS ${NetCDF_C_INCLUDE_DIRS} - MACRO_REGEX "NC_VERSION_") - - # Check for parallel support - check_macro (NetCDF_C_HAS_PARALLEL - NAME TryNetCDF_PARALLEL.c - HINTS ${CMAKE_MODULE_PATH} - DEFINITIONS -I${NetCDF_C_INCLUDE_DIR} - COMMENT "whether NetCDF has parallel support") - - # Check if logging enabled - set(CMAKE_REQUIRED_INCLUDES ${NetCDF_C_INCLUDE_DIR}) - set(CMAKE_REQUIRED_LIBRARIES ${NetCDF_C_LIBRARIES}) - CHECK_FUNCTION_EXISTS(nc_set_log_level NetCDF_C_LOGGING_ENABLED) - - endif () - - # Dependencies - if (NCDFcomp STREQUAL C AND NOT NetCDF_C_IS_SHARED) - - # DEPENDENCY: PnetCDF (if PnetCDF enabled) - check_macro (NetCDF_C_HAS_PNETCDF - NAME TryNetCDF_PNETCDF.c - HINTS ${CMAKE_MODULE_PATH} - DEFINITIONS -I${NetCDF_C_INCLUDE_DIR} - COMMENT "whether NetCDF has PnetCDF support") - if (NetCDF_C_HAS_PNETCDF) - find_package (PnetCDF COMPONENTS C) - if (CURL_FOUND) - list (APPEND NetCDF_C_INCLUDE_DIRS ${PnetCDF_C_INCLUDE_DIRS}) - list (APPEND NetCDF_C_LIBRARIES ${PnetCDF_C_LIBRARIES}) - endif () - endif () - - # DEPENDENCY: CURL (If DAP enabled) - check_macro (NetCDF_C_HAS_DAP - NAME TryNetCDF_DAP.c - HINTS ${CMAKE_MODULE_PATH} - DEFINITIONS -I${NetCDF_C_INCLUDE_DIR} - COMMENT "whether NetCDF has DAP support") - if (NetCDF_C_HAS_DAP) - find_package (CURL) - if (CURL_FOUND) - list (APPEND NetCDF_C_INCLUDE_DIRS ${CURL_INCLUDE_DIRS}) - list (APPEND NetCDF_C_LIBRARIES ${CURL_LIBRARIES}) - endif () - endif () - - # DEPENDENCY: HDF5 - find_package (HDF5 COMPONENTS HL C) - if (HDF5_C_FOUND) - list (APPEND NetCDF_C_INCLUDE_DIRS ${HDF5_C_INCLUDE_DIRS} - ${HDF5_HL_INCLUDE_DIRS}) - list (APPEND NetCDF_C_LIBRARIES ${HDF5_C_LIBRARIES} - ${HDF5_HL_LIBRARIES}) - endif () - - # DEPENDENCY: LIBDL Math - list (APPEND NetCDF_C_LIBRARIES -ldl -lm) - - elseif (NCDFcomp STREQUAL Fortran AND NOT NetCDF_Fortran_IS_SHARED) - - # DEPENDENCY: NetCDF - set (orig_comp ${NCDFcomp}) - set (orig_comps ${NetCDF_FIND_VALID_COMPONENTS}) - find_package (NetCDF COMPONENTS C) - set (NetCDF_FIND_VALID_COMPONENTS ${orig_comps}) - set (NCDFcomp ${orig_comp}) - if (NetCDF_C_FOUND) - list (APPEND NetCDF_Fortran_INCLUDE_DIRS ${NetCDF_C_INCLUDE_DIRS}) - list (APPEND NetCDF_Fortran_LIBRARIES ${NetCDF_C_LIBRARIES}) - endif () - - endif () - - endif () - - endif () - -endforeach () diff --git a/src/externals/pio2/cmake/FindPAPI.cmake b/src/externals/pio2/cmake/FindPAPI.cmake deleted file mode 100644 index dcf1445bc7d..00000000000 --- a/src/externals/pio2/cmake/FindPAPI.cmake +++ /dev/null @@ -1,28 +0,0 @@ -# - Try to find PAPI -# -# This can be controlled by setting the PAPI_DIR (or, equivalently, the -# PAPI environment variable). -# -# Once done, this will define: -# -# PAPI_FOUND (BOOL) - system has PAPI -# PAPI_IS_SHARED (BOOL) - whether library is shared/dynamic -# PAPI_INCLUDE_DIR (PATH) - Location of the C header file -# PAPI_INCLUDE_DIRS (LIST) - the PAPI include directories -# PAPI_LIBRARY (FILE) - Path to the C library file -# PAPI_LIBRARIES (LIST) - link these to use PAPI -# -include (LibFind) - -# Define PAPI package -define_package_component (PAPI - INCLUDE_NAMES papi.h - LIBRARY_NAMES papi) - -# SEARCH FOR PACKAGE -if (NOT PAPI_FOUND) - - # Search for the package - find_package_component(PAPI) - -endif () diff --git a/src/externals/pio2/cmake/FindPnetCDF.cmake b/src/externals/pio2/cmake/FindPnetCDF.cmake deleted file mode 100644 index b87d245cd10..00000000000 --- a/src/externals/pio2/cmake/FindPnetCDF.cmake +++ /dev/null @@ -1,68 +0,0 @@ -# - Try to find PnetCDF -# -# This can be controlled by setting the PnetCDF_PATH (or, equivalently, the -# PNETCDF environment variable), or PnetCDF__PATH CMake variables, where -# is the COMPONENT language one needs. -# -# Once done, this will define: -# -# PnetCDF__FOUND (BOOL) - system has PnetCDF -# PnetCDF__IS_SHARED (BOOL) - whether library is shared/dynamic -# PnetCDF__INCLUDE_DIR (PATH) - Location of the C header file -# PnetCDF__INCLUDE_DIRS (LIST) - the PnetCDF include directories -# PnetCDF__LIBRARY (FILE) - Path to the C library file -# PnetCDF__LIBRARIES (LIST) - link these to use PnetCDF -# -# The available COMPONENTS are: C, Fortran -# If no components are specified, it assumes only C -include (LibFind) -include (LibCheck) - -# Define PnetCDF C Component -define_package_component (PnetCDF DEFAULT - COMPONENT C - INCLUDE_NAMES pnetcdf.h - LIBRARY_NAMES pnetcdf) - -# Define PnetCDF Fortran Component -define_package_component (PnetCDF - COMPONENT Fortran - INCLUDE_NAMES pnetcdf.mod pnetcdf.inc - LIBRARY_NAMES pnetcdf) - -# Search for list of valid components requested -find_valid_components (PnetCDF) - -#============================================================================== -# SEARCH FOR VALIDATED COMPONENTS -foreach (PNCDFcomp IN LISTS PnetCDF_FIND_VALID_COMPONENTS) - - # If not found already, search... - if (NOT PnetCDF_${PNCDFcomp}_FOUND) - - # Manually add the MPI include and library dirs to search paths - # and search for the package component - if (MPI_${PNCDFcomp}_FOUND) - initialize_paths (PnetCDF_${PNCDFcomp}_PATHS - INCLUDE_DIRECTORIES ${MPI_${PNCDFcomp}_INCLUDE_PATH} - LIBRARIES ${MPI_${PNCDFcomp}_LIBRARIES}) - find_package_component(PnetCDF COMPONENT ${PNCDFcomp} - PATHS ${PnetCDF_${PNCDFcomp}_PATHS}) - else () - find_package_component(PnetCDF COMPONENT ${PNCDFcomp}) - endif () - - # Continue only if component found - if (PnetCDF_${PNCDFcomp}_FOUND) - - # Check version - check_version (PnetCDF - NAME "pnetcdf.h" - HINTS ${PnetCDF_${PNCDFcomp}_INCLUDE_DIR} - MACRO_REGEX "PNETCDF_VERSION_") - - endif () - - endif () - -endforeach () diff --git a/src/externals/pio2/cmake/FindSZIP.cmake b/src/externals/pio2/cmake/FindSZIP.cmake deleted file mode 100644 index e65cfe5fd68..00000000000 --- a/src/externals/pio2/cmake/FindSZIP.cmake +++ /dev/null @@ -1,37 +0,0 @@ -# - Try to find SZIP -# -# This can be controlled by setting the SZIP_DIR (or, equivalently, the -# SZIP environment variable). -# -# Once done, this will define: -# -# SZIP_FOUND (BOOL) - system has SZIP -# SZIP_IS_SHARED (BOOL) - whether library is shared/dynamic -# SZIP_INCLUDE_DIR (PATH) - Location of the C header file -# SZIP_INCLUDE_DIRS (LIST) - the SZIP include directories -# SZIP_LIBRARY (FILE) - Path to the C library file -# SZIP_LIBRARIES (LIST) - link these to use SZIP -# -include (LibFind) - -# Define SZIP package -define_package_component (SZIP - INCLUDE_NAMES szlib.h - LIBRARY_NAMES sz) - -# SEARCH FOR PACKAGE -if (NOT SZIP_FOUND) - - # Manually add the MPI include and library dirs to search paths - # and search for the package component - if (MPI_C_FOUND) - initialize_paths (SZIP_PATHS - INCLUDE_DIRECTORIES ${MPI_C_INCLUDE_PATH} - LIBRARIES ${MPI_C_LIBRARIES}) - find_package_component(SZIP - PATHS ${SZIP_PATHS}) - else () - find_package_component(SZIP) - endif () - -endif () diff --git a/src/externals/pio2/cmake/LibCheck.cmake b/src/externals/pio2/cmake/LibCheck.cmake deleted file mode 100644 index 3f12bdf7965..00000000000 --- a/src/externals/pio2/cmake/LibCheck.cmake +++ /dev/null @@ -1,104 +0,0 @@ -include (CMakeParseArguments) -include (CheckFunctionExists) -#============================================================================== -# -# FUNCTIONS TO HELP WITH Check* MODULES -# -#============================================================================== - -#______________________________________________________________________________ -# - Basic function to check a property of a package using a try_compile step -# -# SYNTAX: check_macro ( -# NAME -# HINTS ... -# DEFINITIONS ... -# COMMENT ) -# -function (check_macro VARIABLE) - - # Parse the input arguments - set (oneValueArgs COMMENT NAME) - set (multiValueArgs HINTS DEFINITIONS) - cmake_parse_arguments (${VARIABLE} "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - # If the return variable is defined, already, don't continue - if (NOT DEFINED ${VARIABLE}) - - message (STATUS "Checking ${${VARIABLE}_COMMENT}") - find_file (${VARIABLE}_TRY_FILE - NAMES ${${VARIABLE}_NAME} - HINTS ${${VARIABLE}_HINTS}) - if (${VARIABLE}_TRY_FILE) - try_compile (COMPILE_RESULT - ${CMAKE_CURRENT_BINARY_DIR}/try${VARIABLE} - SOURCES ${${VARIABLE}_TRY_FILE} - COMPILE_DEFINITIONS ${${VARIABLE}_DEFINITIONS} - OUTPUT_VARIABLE TryOUT) - if (COMPILE_RESULT) - message (STATUS "Checking ${${VARIABLE}_COMMENT} - yes") - else () - message (STATUS "Checking ${${VARIABLE}_COMMENT} - no") - endif () - - set (${VARIABLE} ${COMPILE_RESULT} - CACHE BOOL "${${VARIABLE}_COMMENT}") - - else () - message (STATUS "Checking ${${VARIABLE}_COMMENT} - failed") - endif () - - unset (${VARIABLE}_TRY_FILE CACHE) - endif () - -endfunction () - -#______________________________________________________________________________ -# - Basic function to check the version of a package using a try_run step -# -# SYNTAX: check_version ( -# NAME -# HINTS ... -# DEFINITIONS ...) -# -function (check_version PKG) - - # Parse the input arguments - set (oneValueArgs NAME MACRO_REGEX) - set (multiValueArgs HINTS) - cmake_parse_arguments (${PKG} "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - # If the return variable is defined, already, don't continue - if (NOT DEFINED ${PKG}_VERSION) - - message (STATUS "Checking ${PKG} version") - find_file (${PKG}_VERSION_HEADER - NAMES ${${PKG}_NAME} - HINTS ${${PKG}_HINTS}) - if (${PKG}_VERSION_HEADER) - set (def) - file (STRINGS ${${PKG}_VERSION_HEADER} deflines - REGEX "^#define[ \\t]+${${PKG}_MACRO_REGEX}") - foreach (defline IN LISTS deflines) - string (REPLACE "\"" "" defline "${defline}") - string (REPLACE "." "" defline "${defline}") - string (REGEX REPLACE "[ \\t]+" ";" deflist "${defline}") - list (GET deflist 2 arg) - list (APPEND def ${arg}) - endforeach () - string (REPLACE ";" "." vers "${def}") - message (STATUS "Checking ${PKG} version - ${vers}") - set (${PKG}_VERSION ${vers} - CACHE STRING "${PKG} version string") - if (${PKG}_VERSION VERSION_LESS ${PKG}_FIND_VERSION}) - message (FATAL_ERROR "${PKG} version insufficient") - endif () - else () - message (STATUS "Checking ${PKG} version - failed") - endif () - - unset (${PKG}_VERSION_HEADER CACHE) - - endif () - -endfunction () \ No newline at end of file diff --git a/src/externals/pio2/cmake/LibFind.cmake b/src/externals/pio2/cmake/LibFind.cmake deleted file mode 100644 index 61cd93aa37d..00000000000 --- a/src/externals/pio2/cmake/LibFind.cmake +++ /dev/null @@ -1,333 +0,0 @@ -include (CMakeParseArguments) -include(FindPackageHandleStandardArgs) - -#============================================================================== -# -# FUNCTIONS TO HELP WITH Find* MODULES -# -#============================================================================== - -#______________________________________________________________________________ -# - Wrapper for finding static libraries ONLY -# -macro (find_static_library) - set (_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) - set (CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_STATIC_LIBRARY_SUFFIX}) - find_library(${ARGN}) - set (CMAKE_FIND_LIBRARY_SUFFIXES ${_CMAKE_FIND_LIBRARY_SUFFIXES}) - unset (_CMAKE_FIND_LIBRARY_SUFFIXES) -endmacro () - - -#______________________________________________________________________________ -# - Wrapper for finding shared/dynamic libraries ONLY -# -macro (find_shared_library) - set (_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) - set (CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_SHARED_LIBRARY_SUFFIX}) - find_library(${ARGN}) - set (CMAKE_FIND_LIBRARY_SUFFIXES ${_CMAKE_FIND_LIBRARY_SUFFIXES}) - unset (_CMAKE_FIND_LIBRARY_SUFFIXES) -endmacro () - - -#______________________________________________________________________________ -# - Function to determine type (SHARED or STATIC) of library -# -# Input: -# LIB (FILE) -# -# Returns: -# RETURN_VAR (BOOL) -# -function (is_shared_library RETURN_VAR LIB) - get_filename_component(libext ${LIB} EXT) - if (libext MATCHES ${CMAKE_SHARED_LIBRARY_SUFFIX}) - set (${RETURN_VAR} TRUE PARENT_SCOPE) - else () - set (${RETURN_VAR} FALSE PARENT_SCOPE) - endif () -endfunction () - - -#______________________________________________________________________________ -# - Function to define a valid package component -# -# Input: -# ${PKG}_DEFAULT (BOOL) -# ${PKG}_COMPONENT (STRING) -# ${PKG}_INCLUDE_NAMES (LIST) -# ${PKG}_LIBRARY_NAMES (LIST) -# -# Returns: -# ${PKG}_DEFAULT_COMPONENT (STRING) -# ${PKG}_VALID_COMPONENTS (LIST) -# ${PKG}_${COMPONENT}_INCLUDE_NAMES (LIST) -# ${PKG}_${COMPONENT}_LIBRARY_NAMES (LIST) -# -function (define_package_component PKG) - - # Parse the input arguments - set (options DEFAULT) - set (oneValueArgs COMPONENT) - set (multiValueArgs INCLUDE_NAMES LIBRARY_NAMES) - cmake_parse_arguments (${PKG} "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - if (${PKG}_COMPONENT) - set (PKGCOMP ${PKG}_${${PKG}_COMPONENT}) - else () - set (PKGCOMP ${PKG}) - endif () - - # Set return values - if (${PKG}_COMPONENT) - if (${PKG}_DEFAULT) - set (${PKG}_DEFAULT_COMPONENT ${${PKG}_COMPONENT} PARENT_SCOPE) - endif () - set (VALID_COMPONENTS ${${PKG}_VALID_COMPONENTS}) - list (APPEND VALID_COMPONENTS ${${PKG}_COMPONENT}) - set (${PKG}_VALID_COMPONENTS ${VALID_COMPONENTS} PARENT_SCOPE) - endif () - set (${PKGCOMP}_INCLUDE_NAMES ${${PKG}_INCLUDE_NAMES} PARENT_SCOPE) - set (${PKGCOMP}_LIBRARY_NAMES ${${PKG}_LIBRARY_NAMES} PARENT_SCOPE) - -endfunction () - - -#______________________________________________________________________________ -# - Function to find valid package components -# -# Assumes pre-defined variables: -# ${PKG}_FIND_COMPONENTS (LIST) -# ${PKG}_DEFAULT_COMPONENT (STRING) -# ${PKG}_VALID_COMPONENTS (LIST) -# -# Returns: -# ${PKG}_FIND_VALID_COMPONENTS (LIST) -# -function (find_valid_components PKG) - - if (NOT ${PKG}_FIND_COMPONENTS) - set (${PKG}_FIND_COMPONENTS ${${PKG}_DEFAULT_COMPONENT}) - endif () - - set (FIND_VALID_COMPONENTS) - foreach (comp IN LISTS ${PKG}_FIND_COMPONENTS) - if (";${${PKG}_VALID_COMPONENTS};" MATCHES ";${comp};") - list (APPEND FIND_VALID_COMPONENTS ${comp}) - endif () - endforeach () - - set (${PKG}_FIND_VALID_COMPONENTS ${FIND_VALID_COMPONENTS} PARENT_SCOPE) - -endfunction () - - -#______________________________________________________________________________ -# - Initialize a list of paths from a list of includes and libraries -# -# Input: -# INCLUDE_DIRECTORIES -# LIBRARIES -# -# Ouput: -# ${PATHLIST} -# -function (initialize_paths PATHLIST) - - # Parse the input arguments - set (multiValueArgs INCLUDE_DIRECTORIES LIBRARIES) - cmake_parse_arguments (INIT "" "" "${multiValueArgs}" ${ARGN}) - - set (paths) - foreach (inc IN LISTS INIT_INCLUDE_DIRECTORIES) - list (APPEND paths ${inc}) - get_filename_component (dname ${inc} NAME) - if (dname MATCHES "include") - get_filename_component (prefx ${inc} PATH) - list (APPEND paths ${prefx}) - endif () - endforeach () - foreach (lib IN LISTS INIT_LIBRARIES) - get_filename_component (libdir ${lib} PATH) - list (APPEND paths ${libdir}) - get_filename_component (dname ${libdir} PATH) - if (dname MATCHES "lib") - get_filename_component (prefx ${libdir} PATH) - list (APPEND paths ${prefx}) - endif () - endforeach () - - set (${PATHLIST} ${paths} PARENT_SCOPE) - -endfunction () - - -#______________________________________________________________________________ -# - Basic find package macro for a specific component -# -# Assumes pre-defined variables: -# ${PKG}_${COMP}_INCLUDE_NAMES or ${PKG}_INCLUDE_NAMES -# ${PKG}_${COMP}_LIBRARY_NAMES or ${PKG}_LIBRARY_NAMES -# -# Input: -# ${PKG}_COMPONENT -# ${PKG}_HINTS -# ${PKG}_PATHS -# -function (find_package_component PKG) - - # Parse the input arguments - set (options) - set (oneValueArgs COMPONENT) - set (multiValueArgs HINTS PATHS) - cmake_parse_arguments (${PKG} "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - set (COMP ${${PKG}_COMPONENT}) - if (COMP) - set (PKGCOMP ${PKG}_${COMP}) - else () - set (PKGCOMP ${PKG}) - endif () - string (TOUPPER ${PKG} PKGUP) - string (TOUPPER ${PKGCOMP} PKGCOMPUP) - - # Only continue if package not found already - if (NOT ${PKGCOMP}_FOUND) - - # Handle QUIET and REQUIRED arguments - if (${${PKG}_FIND_QUIETLY}) - set (${PKGCOMP}_FIND_QUIETLY TRUE) - endif () - if (${${PKG}_FIND_REQUIRED}) - set (${PKGCOMP}_FIND_REQUIRED TRUE) - endif () - - # Determine search order - set (SEARCH_DIRS) - if (${PKG}_HINTS) - list (APPEND SEARCH_DIRS ${${PKG}_HINTS}) - endif () - if (${PKGCOMP}_PATH) - list (APPEND SEARCH_DIRS ${${PKGCOMP}_PATH}) - endif () - if (${PKG}_PATH) - list (APPEND SEARCH_DIRS ${${PKG}_PATH}) - endif () - if (DEFINED ENV{${PKGCOMPUP}}) - list (APPEND SEARCH_DIRS $ENV{${PKGCOMPUP}}) - endif () - if (DEFINED ENV{${PKGUP}}) - list (APPEND SEARCH_DIRS $ENV{${PKGUP}}) - endif () - if (CMAKE_SYSTEM_PREFIX_PATH) - list (APPEND SEARCH_DIRS ${CMAKE_SYSTEM_PREFIX_PATH}) - endif () - if (${PKG}_PATHS) - list (APPEND SEARCH_DIRS ${${PKG}_PATHS}) - endif () - - # Start the search for the include file and library file. Only overload - # if the variable is not defined. - foreach (suffix PREFIX LIBRARY INCLUDE_DIR) - if (NOT DEFINED ${PKGCOMP}_${suffix}) - set (${PKGCOMP}_${suffix} ${PKGCOMP}_${suffix}-NOTFOUND) - endif () - endforeach () - - foreach (dir IN LISTS SEARCH_DIRS) - - # Search for include file names in current dirrectory - foreach (iname IN LISTS ${PKGCOMP}_INCLUDE_NAMES) - if (EXISTS ${dir}/${iname}) - set (${PKGCOMP}_PREFIX ${dir}) - set (${PKGCOMP}_INCLUDE_DIR ${dir}) - break () - endif () - if (EXISTS ${dir}/include/${iname}) - set (${PKGCOMP}_PREFIX ${dir}) - set (${PKGCOMP}_INCLUDE_DIR ${dir}/include) - break () - endif () - endforeach () - - # Search for library file names in the found prefix only! - if (${PKGCOMP}_PREFIX) - find_library (${PKGCOMP}_LIBRARY - NAMES ${${PKGCOMP}_LIBRARY_NAMES} - PATHS ${${PKGCOMP}_PREFIX} - PATH_SUFFIXES lib - NO_DEFAULT_PATH) - - # If found, check if library is static or dynamic - if (${PKGCOMP}_LIBRARY) - is_shared_library (${PKGCOMP}_IS_SHARED ${${PKGCOMP}_LIBRARY}) - - # If we want only shared libraries, and it isn't shared... - if (PREFER_SHARED AND NOT ${PKGCOMP}_IS_SHARED) - find_shared_library (${PKGCOMP}_SHARED_LIBRARY - NAMES ${${PKGCOMP}_LIBRARY_NAMES} - PATHS ${${PKGCOMP}_PREFIX} - PATH_SUFFIXES lib - NO_DEFAULT_PATH) - if (${PKGCOMP}_SHARED_LIBRARY) - set (${PKGCOMP}_LIBRARY ${${PKGCOMP}_SHARED_LIBRARY}) - set (${PKGCOMP}_IS_SHARED TRUE) - endif () - - # If we want only static libraries, and it is shared... - elseif (PREFER_STATIC AND ${PKGCOMP}_IS_SHARED) - find_static_library (${PKGCOMP}_STATIC_LIBRARY - NAMES ${${PKGCOMP}_LIBRARY_NAMES} - PATHS ${${PKGCOMP}_PREFIX} - PATH_SUFFIXES lib - NO_DEFAULT_PATH) - if (${PKGCOMP}_STATIC_LIBRARY) - set (${PKGCOMP}_LIBRARY ${${PKGCOMP}_STATIC_LIBRARY}) - set (${PKGCOMP}_IS_SHARED FALSE) - endif () - endif () - endif () - - # If include dir and library both found, then we're done - if (${PKGCOMP}_INCLUDE_DIR AND ${PKGCOMP}_LIBRARY) - break () - - # Otherwise, reset the search variables and continue - else () - set (${PKGCOMP}_PREFIX ${PKGCOMP}_PREFIX-NOTFOUND) - set (${PKGCOMP}_INCLUDE_DIR ${PKGCOMP}_INCLUDE_DIR-NOTFOUND) - set (${PKGCOMP}_LIBRARY ${PKGCOMP}_LIBRARY-NOTFOUND) - endif () - endif () - - endforeach () - - # handle the QUIETLY and REQUIRED arguments and - # set NetCDF_C_FOUND to TRUE if all listed variables are TRUE - find_package_handle_standard_args (${PKGCOMP} DEFAULT_MSG - ${PKGCOMP}_LIBRARY - ${PKGCOMP}_INCLUDE_DIR) - mark_as_advanced (${PKGCOMP}_INCLUDE_DIR ${PKGCOMP}_LIBRARY) - - # HACK For bug in CMake v3.0: - set (${PKGCOMP}_FOUND ${${PKGCOMPUP}_FOUND}) - - # Set return variables - if (${PKGCOMP}_FOUND) - set (${PKGCOMP}_INCLUDE_DIRS ${${PKGCOMP}_INCLUDE_DIR}) - set (${PKGCOMP}_LIBRARIES ${${PKGCOMP}_LIBRARY}) - endif () - - # Set variables in parent scope - set (${PKGCOMP}_FOUND ${${PKGCOMP}_FOUND} PARENT_SCOPE) - set (${PKGCOMP}_INCLUDE_DIR ${${PKGCOMP}_INCLUDE_DIR} PARENT_SCOPE) - set (${PKGCOMP}_INCLUDE_DIRS ${${PKGCOMP}_INCLUDE_DIRS} PARENT_SCOPE) - set (${PKGCOMP}_LIBRARY ${${PKGCOMP}_LIBRARY} PARENT_SCOPE) - set (${PKGCOMP}_LIBRARIES ${${PKGCOMP}_LIBRARIES} PARENT_SCOPE) - set (${PKGCOMP}_IS_SHARED ${${PKGCOMP}_IS_SHARED} PARENT_SCOPE) - - endif () - -endfunction () - - - diff --git a/src/externals/pio2/cmake/LibMPI.cmake b/src/externals/pio2/cmake/LibMPI.cmake deleted file mode 100644 index f1116724831..00000000000 --- a/src/externals/pio2/cmake/LibMPI.cmake +++ /dev/null @@ -1,115 +0,0 @@ -include (CMakeParseArguments) - -# Find Valgrind to perform memory leak check -if (PIO_VALGRIND_CHECK) - find_program (VALGRIND_COMMAND NAMES valgrind) - if (VALGRIND_COMMAND) - set (VALGRIND_COMMAND_OPTIONS --leak-check=full --show-reachable=yes) - else () - message (WARNING "Valgrind not found: memory leak check could not be performed") - set (VALGRIND_COMMAND "") - endif () -endif () - -# -# - Functions for parallel testing with CTest -# - -#============================================================================== -# - Get the machine platform-specific -# -# Syntax: platform_name (RETURN_VARIABLE) -# -function (platform_name RETURN_VARIABLE) - - # Determine platform name from site name... - site_name (SITENAME) - - - if (SITENAME MATCHES "^laramie" OR - SITENAME MATCHES "^cheyenne" OR - SITENAME MATCHES "^chadmin") - - set (${RETURN_VARIABLE} "nwscla" PARENT_SCOPE) - - # ALCF/Argonne Machines - elseif (SITENAME MATCHES "^mira" OR - SITENAME MATCHES "^cetus" OR - SITENAME MATCHES "^vesta" OR - SITENAME MATCHES "^cooley") - - set (${RETURN_VARIABLE} "alcf" PARENT_SCOPE) - - # NERSC Machines - elseif (SITENAME MATCHES "^edison" OR - SITENAME MATCHES "^cori") - - set (${RETURN_VARIABLE} "nersc" PARENT_SCOPE) - - # NCSA Machine (Blue Waters) - elseif (SITENAME MATCHES "^h2ologin") - - set (${RETURN_VARIABLE} "ncsa" PARENT_SCOPE) - - # OLCF/Oak Ridge Machines - elseif (SITENAME MATCHES "^eos" OR - SITENAME MATCHES "^titan") - - set (${RETURN_VARIABLE} "olcf" PARENT_SCOPE) - - else () - - set (${RETURN_VARIABLE} "unknown" PARENT_SCOPE) - - endif () - -endfunction () - -#============================================================================== -# - Add a new parallel test -# -# Syntax: add_mpi_test ( -# EXECUTABLE -# ARGUMENTS ... -# NUMPROCS -# TIMEOUT ) -function (add_mpi_test TESTNAME) - - # Parse the input arguments - set (options) - set (oneValueArgs NUMPROCS TIMEOUT EXECUTABLE) - set (multiValueArgs ARGUMENTS) - cmake_parse_arguments (${TESTNAME} "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - # Store parsed arguments for convenience - set (exec_file ${${TESTNAME}_EXECUTABLE}) - set (exec_args ${${TESTNAME}_ARGUMENTS}) - set (num_procs ${${TESTNAME}_NUMPROCS}) - set (timeout ${${TESTNAME}_TIMEOUT}) - - # Get the platform name - platform_name (PLATFORM) - - # Default ("unknown" platform) execution - if (PLATFORM STREQUAL "unknown") - - # Run tests directly from the command line - set(EXE_CMD ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${num_procs} - ${MPIEXEC_PREFLAGS} ${VALGRIND_COMMAND} ${VALGRIND_COMMAND_OPTIONS} ${exec_file} - ${MPIEXEC_POSTFLAGS} ${exec_args}) - - else () - - # Run tests from the platform-specific executable - set (EXE_CMD ${CMAKE_SOURCE_DIR}/cmake/mpiexec.${PLATFORM} - ${num_procs} ${VALGRIND_COMMAND} ${VALGRIND_COMMAND_OPTIONS} ${exec_file} ${exec_args}) - - endif () - - # Add the test to CTest - add_test(NAME ${TESTNAME} COMMAND ${EXE_CMD}) - - # Adjust the test timeout - set_tests_properties(${TESTNAME} PROPERTIES TIMEOUT ${timeout}) - -endfunction() diff --git a/src/externals/pio2/cmake/TryHDF5_HAS_SZIP.c b/src/externals/pio2/cmake/TryHDF5_HAS_SZIP.c deleted file mode 100644 index c4013455c2d..00000000000 --- a/src/externals/pio2/cmake/TryHDF5_HAS_SZIP.c +++ /dev/null @@ -1,13 +0,0 @@ -/* - * HDF5 C Test for szip filter - */ -#include "H5pubconf.h" - -int main() -{ -#if H5_HAVE_FILTER_SZIP==1 - return 0; -#else - XXX; -#endif -} diff --git a/src/externals/pio2/cmake/TryNetCDF_DAP.c b/src/externals/pio2/cmake/TryNetCDF_DAP.c deleted file mode 100644 index 9a895e8acb3..00000000000 --- a/src/externals/pio2/cmake/TryNetCDF_DAP.c +++ /dev/null @@ -1,13 +0,0 @@ -/* - * NetCDF C Test for DAP Support - */ -#include "netcdf_meta.h" - -int main() -{ -#if NC_HAS_DAP==1 - return 0; -#else - XXX; -#endif -} diff --git a/src/externals/pio2/cmake/TryNetCDF_PARALLEL.c b/src/externals/pio2/cmake/TryNetCDF_PARALLEL.c deleted file mode 100644 index 7b041f63716..00000000000 --- a/src/externals/pio2/cmake/TryNetCDF_PARALLEL.c +++ /dev/null @@ -1,13 +0,0 @@ -/* - * NetCDF C Test for parallel Support - */ -#include "netcdf_meta.h" - -int main() -{ -#if NC_HAS_PARALLEL==1 - return 0; -#else - XXX; -#endif -} diff --git a/src/externals/pio2/cmake/TryNetCDF_PNETCDF.c b/src/externals/pio2/cmake/TryNetCDF_PNETCDF.c deleted file mode 100644 index 60a0c08864d..00000000000 --- a/src/externals/pio2/cmake/TryNetCDF_PNETCDF.c +++ /dev/null @@ -1,13 +0,0 @@ -/* - * NetCDF C Test for PnetCDF Support - */ -#include "netcdf_meta.h" - -int main() -{ -#if NC_HAS_PNETCDF==1 - return 0; -#else - XXX; -#endif -} diff --git a/src/externals/pio2/cmake/mpiexec.alcf b/src/externals/pio2/cmake/mpiexec.alcf deleted file mode 100755 index 48765fd022a..00000000000 --- a/src/externals/pio2/cmake/mpiexec.alcf +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# -# Arguments: -# -# $1 - Number of MPI Tasks -# $2+ - Executable and its arguments -# - -NP=$1 -shift - -${BGQ_RUNJOB:-runjob} --np $NP --block $COBALT_PARTNAME \ - --envs GPFSMPIO_NAGG_PSET=16 GPFSMPIO_ONESIDED_ALWAYS_RMW=1 \ - GPFSMPIO_BALANCECONTIG=1 GPFSMPIO_WRITE_AGGMETHOD=2 \ - GPFSMPIO_READ_AGGMETHOD=2 PAMID_TYPED_ONESIDED=1 \ - PAMID_RMA_PENDING=1M GPFSMPIO_BRIDGERINGAGG=1 : $@ diff --git a/src/externals/pio2/cmake/mpiexec.ncsa b/src/externals/pio2/cmake/mpiexec.ncsa deleted file mode 100755 index 2bb0d1c8468..00000000000 --- a/src/externals/pio2/cmake/mpiexec.ncsa +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -# -# Arguments: -# -# $1 - Number of MPI Tasks -# $2+ - Executable and its arguments -# - -NP=$1 -shift - -aprun -n $NP $@ diff --git a/src/externals/pio2/cmake/mpiexec.nersc b/src/externals/pio2/cmake/mpiexec.nersc deleted file mode 100755 index e8774b0e989..00000000000 --- a/src/externals/pio2/cmake/mpiexec.nersc +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -# -# Arguments: -# -# $1 - Number of MPI Tasks -# $2+ - Executable and its arguments -# - -NP=$1 -shift - -srun -n $NP $@ diff --git a/src/externals/pio2/cmake/mpiexec.nwscla b/src/externals/pio2/cmake/mpiexec.nwscla deleted file mode 100755 index 9aea7be13e7..00000000000 --- a/src/externals/pio2/cmake/mpiexec.nwscla +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# Arguments: -# -# $1 - Number of MPI Tasks -# $2+ - Executable and its arguments -# - -NP=$1 -shift -mpirun -np $NP $@ diff --git a/src/externals/pio2/cmake/mpiexec.olcf b/src/externals/pio2/cmake/mpiexec.olcf deleted file mode 100755 index 2bb0d1c8468..00000000000 --- a/src/externals/pio2/cmake/mpiexec.olcf +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -# -# Arguments: -# -# $1 - Number of MPI Tasks -# $2+ - Executable and its arguments -# - -NP=$1 -shift - -aprun -n $NP $@ diff --git a/src/externals/pio2/cmake_config.h.in b/src/externals/pio2/cmake_config.h.in deleted file mode 100644 index cfd26d77601..00000000000 --- a/src/externals/pio2/cmake_config.h.in +++ /dev/null @@ -1,35 +0,0 @@ -/** @file - * - * This is the template for the config.h file, which is created at - * build-time by cmake. - */ -#ifndef _PIO_CONFIG_ -#define _PIO_CONFIG_ -/* Set to avoid warning in intel19 compiler wrt strnlen */ -#define _GNU_SOURCE - -/** The major part of the version number. */ -#define PIO_VERSION_MAJOR @VERSION_MAJOR@ - -/** The minor part of the version number. */ -#define PIO_VERSION_MINOR @VERSION_MINOR@ - -/** The patch part of the version number. */ -#define PIO_VERSION_PATCH @VERSION_PATCH@ - -/** Set to non-zero to use native malloc. By defauly the PIO library - * will use the included bget() package for memory management. */ -#define PIO_USE_MALLOC @USE_MALLOC@ - -/** Set to non-zero to turn on logging. Output may be large. */ -#define PIO_ENABLE_LOGGING @ENABLE_LOGGING@ - -/** Size of MPI_Offset type. */ -#define SIZEOF_MPI_OFFSET @SIZEOF_MPI_OFFSET@ - -/* buffer size for darray data. */ -#define PIO_BUFFER_SIZE @PIO_BUFFER_SIZE@ - -#define USE_VARD @USE_VARD@ - -#endif /* _PIO_CONFIG_ */ diff --git a/src/externals/pio2/configure.ac b/src/externals/pio2/configure.ac deleted file mode 100644 index 7e155b7cadb..00000000000 --- a/src/externals/pio2/configure.ac +++ /dev/null @@ -1,317 +0,0 @@ -## This is the autoconf file for the PIO library. -## Ed Hartnett 8/16/17 - - -# Initialize autoconf and automake. -AC_INIT(pio, 2.4.4-development) -AC_CONFIG_SRCDIR(src/clib/pio_darray.c) -AM_INIT_AUTOMAKE([foreign serial-tests]) - -# The PIO version, again. -AC_DEFINE([PIO_VERSION_MAJOR], [2], [PIO major version]) -AC_DEFINE([PIO_VERSION_MINOR], [4], [PIO minor version]) -AC_DEFINE([PIO_VERSION_PATCH], [4], [PIO patch version]) - -# Once more for the documentation. -AC_SUBST([VERSION_MAJOR], [2]) -AC_SUBST([VERSION_MINOR], [4]) -AC_SUBST([VERSION_PATCH], [4]) - -# The m4 directory holds macros for autoconf. -AC_CONFIG_MACRO_DIR([m4]) - -# Libtool initialisation. -LD=ld # Required for MPE to work. -LT_INIT - -# Find and learn about the C compiler. -AC_PROG_CC - -# Find and learn about the Fortran compiler. -AC_PROG_FC - -# Always use malloc in autotools builds. -AC_DEFINE([PIO_USE_MALLOC], [1], [use malloc for memory]) - -AC_MSG_CHECKING([whether a PIO_BUFFER_SIZE was specified]) -AC_ARG_WITH([piobuffersize], - [AS_HELP_STRING([--with-piobuffersize=], - [Specify buffer size for PIO.])], - [PIO_BUFFER_SIZE=$with_piobuffersize], [PIO_BUFFER_SIZE=134217728]) -AC_MSG_RESULT([$PIO_BUFFER_SIZE]) -AC_DEFINE_UNQUOTED([PIO_BUFFER_SIZE], [$PIO_BUFFER_SIZE], [buffer size for darray data.]) - -# Does the user want to enable logging? -AC_MSG_CHECKING([whether debug logging is enabled]) -AC_ARG_ENABLE([logging], - [AS_HELP_STRING([--enable-logging], - [enable debug logging capability (will negatively impact performance). \ - This debugging feature is probably only of interest to PIO developers.])]) -test "x$enable_logging" = xyes || enable_logging=no -AC_MSG_RESULT([$enable_logging]) -if test "x$enable_logging" = xyes; then - AC_DEFINE([PIO_ENABLE_LOGGING], 1, [If true, turn on logging.]) -fi - -# Does the user want to enable timing? -AC_MSG_CHECKING([whether GPTL timing library is used]) -AC_ARG_ENABLE([timing], - [AS_HELP_STRING([--enable-timing], - [enable use of the GPTL timing library.])]) -test "x$enable_timing" = xyes || enable_timing=no -AC_MSG_RESULT([$enable_timing]) -if test "x$enable_timing" = xyes; then - AC_DEFINE([TIMING], 1, [If true, use GPTL timing library.]) -fi -AM_CONDITIONAL(USE_GPTL, [test "x$enable_timing" = xyes]) - -# Does the user want to disable papi? -AC_MSG_CHECKING([whether PAPI should be enabled (if enable-timing is used)]) -AC_ARG_ENABLE([papi], [AS_HELP_STRING([--disable-papi], - [disable PAPI library use])]) -test "x$enable_papi" = xno || enable_papi=yes -AC_MSG_RESULT($enable_papi) - -# Does the user want to disable test runs? -AC_MSG_CHECKING([whether test runs should be enabled for make check]) -AC_ARG_ENABLE([test-runs], [AS_HELP_STRING([--disable-test-runs], - [disable running run_test.sh test scripts for make check. Tests will still be built.])]) -test "x$enable_test_runs" = xno || enable_test_runs=yes -AC_MSG_RESULT($enable_test_runs) -AM_CONDITIONAL(RUN_TESTS, [test "x$enable_test_runs" = xyes]) - -# Does the user want to enable Fortran library? -AC_MSG_CHECKING([whether Fortran library should be built]) -AC_ARG_ENABLE([fortran], - [AS_HELP_STRING([--enable-fortran], - [build the PIO Fortran library.])]) -test "x$enable_fortran" = xyes || enable_fortran=no -AC_MSG_RESULT([$enable_fortran]) -AM_CONDITIONAL(BUILD_FORTRAN, [test "x$enable_fortran" = xyes]) - -# Does the user want to use MPE library? -AC_MSG_CHECKING([whether use of MPE library is enabled]) -AC_ARG_ENABLE([mpe], - [AS_HELP_STRING([--enable-mpe], - [enable use of MPE library for timing and diagnostic info (may negatively impact performance).])]) -test "x$enable_mpe" = xyes || enable_mpe=no -AC_MSG_RESULT([$enable_mpe]) -if test "x$enable_mpe" = xyes; then - - AC_SEARCH_LIBS([pthread_setspecific], [pthread], [], [], []) - dnl AC_SEARCH_LIBS([MPE_Log_get_event_number], [mpe], [HAVE_LIBMPE=yes], [HAVE_LIBMPE=no], []) - dnl AC_SEARCH_LIBS([MPE_Init_mpi_core], [lmpe], [HAVE_LIBLMPE=yes], [HAVE_LIBLMPE=no], []) - AC_CHECK_HEADERS([mpe.h], [HAVE_MPE=yes], [HAVE_MPE=no]) - dnl if test "x$HAVE_LIBMPE" != xyes; then - dnl AC_MSG_ERROR([-lmpe not found but --enable-mpe used.]) - dnl fi - dnl if test "x$HAVE_LIBLMPE" != xyes; then - dnl AC_MSG_ERROR([-llmpe not found but --enable-mpe used.]) - dnl fi - if test $enable_fortran = yes; then - AC_MSG_ERROR([MPE not implemented in Fortran tests and examples. Build without --enable-fortran]) - fi - AC_DEFINE([USE_MPE], 1, [If true, use MPE timing library.]) - -fi - -# Does the user want to disable pnetcdf? -AC_MSG_CHECKING([whether pnetcdf is to be used]) -AC_ARG_ENABLE([pnetcdf], - [AS_HELP_STRING([--disable-pnetcdf], - [Disable pnetcdf use.])]) -test "x$enable_pnetcdf" = xno || enable_pnetcdf=yes -AC_MSG_RESULT([$enable_pnetcdf]) -AM_CONDITIONAL(BUILD_PNETCDF, [test "x$enable_pnetcdf" = xyes]) - -# Does the user want to build documentation? -AC_MSG_CHECKING([whether documentation should be build (requires doxygen)]) -AC_ARG_ENABLE([docs], - [AS_HELP_STRING([--enable-docs], - [enable building of documentation with doxygen.])]) -test "x$enable_docs" = xyes || enable_docs=no -AC_MSG_RESULT([$enable_docs]) - -# Does the user want to developer documentation? -AC_MSG_CHECKING([whether PIO developer documentation should be build (only for PIO developers)]) -AC_ARG_ENABLE([developer-docs], - [AS_HELP_STRING([--enable-developer-docs], - [enable building of PIO developer documentation with doxygen.])]) -test "x$enable_developer_docs" = xyes || enable_developer_docs=no -AC_MSG_RESULT([$enable_developer_docs]) - -# Developer docs enables docs. -if test "x$enable_developer_docs" = xyes; then - enable_docs=yes -fi -AM_CONDITIONAL(BUILD_DOCS, [test "x$enable_docs" = xyes]) - -# Is doxygen installed? -AC_CHECK_PROGS([DOXYGEN], [doxygen]) -if test -z "$DOXYGEN" -a "x$enable_docs" = xyes; then - AC_MSG_ERROR([Doxygen not found but --enable-docs used.]) -fi - -# If building docs, process Doxyfile.in into Doxyfile. -if test "x$enable_docs" = xyes; then - AC_SUBST([CMAKE_CURRENT_SOURCE_DIR], ["."]) - AC_SUBST([CMAKE_BINARY_DIR], [".."]) - if test "x$enable_fortran" = xno; then - AC_MSG_ERROR([--enable-fortran is required for documentation builds.]) - fi - AC_SUBST([FORTRAN_SRC_FILES], ["../src/flib/piodarray.f90 ../src/flib/pio.F90 ../src/flib/pio_kinds.F90 ../src/flib/piolib_mod.f90 ../src/flib/pionfatt_mod_2.f90 ../src/flib/pio_nf.F90 ../src/flib/pionfget_mod_2.f90 ../src/flib/pionfput_mod.f90 ../src/flib/pio_support.F90 ../src/flib/pio_types.F90"]) - if test "x$enable_developer_docs" = xyes; then - AC_SUBST([C_SRC_FILES], ["../src/clib ../src/ncint"]) - else - AC_SUBST([C_SRC_FILES], ["../src/clib/pio_nc.c ../src/clib/pio_nc4.c ../src/clib/pio_darray.c ../src/clib/pio_get_nc.c ../src/clib/pio_put_nc.c ../src/clib/pioc_support.c ../src/clib/pioc.c ../src/clib/pio_file.c ../src/clib/pio.h ../src/clib/pio_get_vard.c ../src/clib/pio_put_vard.c ../src/ncint/ncint_pio.c ../src/ncint/nc_put_vard.c ../src/ncint/nc_get_vard.c"]) - fi - AC_CONFIG_FILES([doc/Doxyfile]) -fi - -# NetCDF (at least classic) is required for PIO to build. -AC_DEFINE([_NETCDF], [1], [netCDF classic library available]) - -# ???? -AC_DEFINE([CPRGNU], [1], [defined by CMake build]) - -# We must have MPI to build PIO. -AC_DEFINE([HAVE_MPI], [1], [defined by CMake build]) - -# ??? -AC_DEFINE([INCLUDE_CMAKE_FCI], [1], [defined by CMake build]) - -# All builds are on LINUX. -AC_DEFINE([LINUX], [1], [defined by CMake build]) - -# Define to solve intel compiler warning. -AC_DEFINE([_GNU_SOURCE], [1], [solve strnlen declared implicitly warning on intel compiler]) - -# Check for netCDF library. -AC_CHECK_LIB([netcdf], [nc_create], [], [AC_MSG_ERROR([Can't find or link to the netcdf library.])]) - -# Check for pnetcdf library. -AC_CHECK_LIB([pnetcdf], [ncmpi_create], [], []) -if test "x$ac_cv_lib_pnetcdf_ncmpi_create" = xno -a $enable_pnetcdf = yes; then - AC_MSG_ERROR([Pnetcdf not found. Set CPPFLAGS/LDFLAGS or use --disable-pnetcdf.]) -fi - -# If we have parallel-netcdf, then set these as well. -if test x$ac_cv_lib_pnetcdf_ncmpi_create = xyes; then - AC_DEFINE([_PNETCDF], [1], [parallel-netcdf library available]) - AC_DEFINE([USE_PNETCDF_VARN], [1], [defined by CMake build]) - AC_DEFINE([USE_PNETCDF_VARN_ON_READ], [1], [defined by CMake build]) -fi - -# Do we have a parallel build of netCDF-4? -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include "netcdf_meta.h"], -[[#if !NC_HAS_PARALLEL -# error -#endif] -])], [have_netcdf_par=yes], [have_netcdf_par=no]) - -AC_MSG_CHECKING([whether netCDF provides parallel IO]) -AC_MSG_RESULT([${have_netcdf_par}]) -if test x$have_netcdf_par = xyes; then - AC_DEFINE([_NETCDF4],[1],[Does netCDF library provide netCDF-4 with parallel access]) -fi -AM_CONDITIONAL(BUILD_NETCDF4, [test "x$have_netcdf_par" = xyes]) - -# Not working for some reason, so I will just set it... -AC_CHECK_TYPE([MPI_Offset], [], [], [#include ]) -if test "x${ac_cv_type_MPI_Offset}" = xyes; then - AC_CHECK_SIZEOF([MPI_Offset], [], [#include ]) -else - AC_MSG_ERROR([Unable to find type MPI_Offset in mpi.h]) -fi - -#AC_CHECK_SIZEOF([MPI_Offset], [], [[#include ]]) -#AC_DEFINE([SIZEOF_MPI_OFFSET], [8], [netCDF classic library available]) - -# If we want the timing library, we must find it. -if test "x$enable_timing" = xyes; then - AC_CHECK_HEADERS([gptl.h]) - AC_CHECK_LIB([gptl], [GPTLinitialize], [], - [AC_MSG_ERROR([Can't find or link to the GPTL library.])]) - if test "x$enable_fortran" = xyes; then - AC_LANG_PUSH([Fortran]) -# AC_CHECK_HEADERS([gptl.inc]) - AC_CHECK_LIB([gptlf], [gptlstart], [], - [AC_MSG_ERROR([Can't find or link to the GPTL Fortran library.])]) - AC_LANG_POP([Fortran]) - fi - - # Check for papi library. - AC_CHECK_LIB([papi], [PAPI_library_init]) - AC_MSG_CHECKING([whether system can support PAPI]) - have_papi=no - if test $enable_papi = yes; then - if test "x$ac_cv_lib_papi_PAPI_library_init" = xyes; then - # If we have PAPI library, check /proc/sys/kernel/perf_event_paranoid - # to see if we have permissions. - if test -f /proc/sys/kernel/perf_event_paranoid; then - if test `cat /proc/sys/kernel/perf_event_paranoid` != 1; then - AC_MSG_ERROR([PAPI library found, but /proc/sys/kernel/perf_event_paranoid != 1 - try sudo sh -c 'echo 1 >/proc/sys/kernel/perf_event_paranoid']) - fi - fi - AC_DEFINE([HAVE_PAPI], [1], [PAPI library is present and usable]) - have_papi=yes - fi - fi - AC_MSG_RESULT($have_papi) -fi -AM_CONDITIONAL([HAVE_PAPI], [test "x$have_papi" = xyes]) - -# Does the user want to build netcdf-c integration layer? -AC_MSG_CHECKING([whether netcdf-c integration layer should be build]) -AC_ARG_ENABLE([netcdf-integration], - [AS_HELP_STRING([--enable-netcdf-integration], - [enable building of netCDF C API integration.])]) -test "x$enable_netcdf_integration" = xyes || enable_netcdf_integration=no -AC_MSG_RESULT([$enable_netcdf_integration]) -if test "x$enable_netcdf_integration" = xyes -a "x$enable_timing" = xyes; then - AC_MSG_ERROR([Cannot use GPTL timing library with netCDF interation.]) -fi -if test "x$enable_netcdf_integration" = xyes -a "x$have_netcdf_par" = xno; then - AC_MSG_ERROR([Cannot use netCDF integration unless netCDF library was built for parallel I/O.]) -fi -# These are needed by ncdispatch.h. Only build with HDF5 parallel -# versions of netCDF. */ -if test "x$enable_netcdf_integration" = xyes; then - AC_DEFINE([HDF5_PARALLEL],[1],[Does HDF5 library provide parallel access]) - AC_DEFINE([USE_NETCDF4],[1],[Does HDF5 library provide parallel access]) - AC_DEFINE([NETCDF_INTEGRATION],[1],[Are we building with netCDF integration]) -fi - -AM_CONDITIONAL(BUILD_NCINT, [test "x$enable_netcdf_integration" = xyes]) -AM_CONDITIONAL(NETCDF_INTEGRATION, [test "x$enable_netcdf_integration" = xyes]) - -AC_CONFIG_FILES([tests/general/pio_tutil.F90:tests/general/util/pio_tutil.F90]) - -AC_CONFIG_LINKS([tests/unit/input.nl:tests/unit/input.nl]) - -# Create the config.h file. -AC_CONFIG_HEADERS([config.h]) - -# Create the makefiles. -AC_OUTPUT(Makefile - src/Makefile - src/clib/Makefile - src/ncint/Makefile - src/flib/Makefile - src/gptl/Makefile - tests/Makefile - tests/cunit/Makefile - tests/ncint/Makefile - tests/fncint/Makefile - tests/unit/Makefile - tests/general/Makefile - tests/general/util/Makefile - tests/performance/Makefile - doc/Makefile - doc/source/Makefile - doc/images/Makefile - examples/Makefile - examples/c/Makefile - examples/f03/Makefile - scripts/Makefile) diff --git a/src/externals/pio2/ctest/CTestEnvironment-alcf.cmake b/src/externals/pio2/ctest/CTestEnvironment-alcf.cmake deleted file mode 100644 index 607076479de..00000000000 --- a/src/externals/pio2/ctest/CTestEnvironment-alcf.cmake +++ /dev/null @@ -1,14 +0,0 @@ -#============================================================================== -# -# This file sets the environment variables needed to configure and build -# on the Argonne Leadership Computing Facility systems -# (mira/cetus/vesta/cooley). -# -#============================================================================== - -# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already -# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. - -# Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPREFER_STATIC=TRUE") diff --git a/src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake b/src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake deleted file mode 100644 index ddf04f063a6..00000000000 --- a/src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake +++ /dev/null @@ -1,30 +0,0 @@ -#============================================================================== -# -# This file sets the environment variables needed to configure and build -# on Argonne Linux workstations -# -#============================================================================== - -# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already -# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. - -# Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DNetCDF_PATH=$ENV{NETCDFROOT}") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPnetCDF_PATH=$ENV{PNETCDFROOT}") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DHDF5_PATH=$ENV{HDF5ROOT}") - -# If ENABLE_COVERAGE environment variable is set, then enable code coverage -if (DEFINED ENV{ENABLE_COVERAGE}) - set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_ENABLE_COVERAGE=ON") -endif () - -# If VALGRIND_CHECK environment variable is set, then enable memory leak check using Valgrind -if (DEFINED ENV{VALGRIND_CHECK}) - set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_VALGRIND_CHECK=ON") -endif () - -# If USE_MALLOC environment variable is set, then use native malloc (instead of bget package) -if (DEFINED ENV{USE_MALLOC}) - set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_USE_MALLOC=ON") -endif () diff --git a/src/externals/pio2/ctest/CTestEnvironment-cgd.cmake b/src/externals/pio2/ctest/CTestEnvironment-cgd.cmake deleted file mode 100644 index eb8606e46d0..00000000000 --- a/src/externals/pio2/ctest/CTestEnvironment-cgd.cmake +++ /dev/null @@ -1,17 +0,0 @@ -#============================================================================== -# -# This file sets the environment variables needed to configure and build -# on the NCAR CGD cluster Hobart -# -#============================================================================== - -# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already -# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. - -# Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DCMAKE_VERBOSE_MAKEFILE=TRUE -DPNETCDF_DIR=$ENV{PNETCDF_PATH} -DNETCDF_DIR=$ENV{NETCDF_PATH}") - -# If MPISERIAL environment variable is set, then enable MPISERIAL -if (DEFINED ENV{MPISERIAL}) - set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_USE_MPISERIAL=ON") -endif () diff --git a/src/externals/pio2/ctest/CTestEnvironment-ncsa.cmake b/src/externals/pio2/ctest/CTestEnvironment-ncsa.cmake deleted file mode 100644 index 706946ec2bc..00000000000 --- a/src/externals/pio2/ctest/CTestEnvironment-ncsa.cmake +++ /dev/null @@ -1,22 +0,0 @@ -#============================================================================== -# -# This file sets the environment variables needed to configure and build -# on the NCSA systems -# (Blue Waters). -# -#============================================================================== - -# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already -# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. - -# Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPREFER_STATIC=TRUE") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DNetCDF_PATH=$ENV{NETCDF_DIR}") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPnetCDF_PATH=$ENV{PARALLEL_NETCDF_DIR}") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DHDF5_PATH=$ENV{HDF5_DIR}") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_C_INCLUDE_PATH=$ENV{MPICH_DIR}/include") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_Fortran_INCLUDE_PATH=$ENV{MPICH_DIR}/include") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_C_LIBRARIES=$ENV{MPICH_DIR}/lib/libmpich.a") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_Fortran_LIBRARIES=$ENV{MPICH_DIR}/lib/libmpichf90.a") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DCMAKE_SYSTEM_NAME=Catamount") diff --git a/src/externals/pio2/ctest/CTestEnvironment-nersc.cmake b/src/externals/pio2/ctest/CTestEnvironment-nersc.cmake deleted file mode 100644 index 6b1ac8fa791..00000000000 --- a/src/externals/pio2/ctest/CTestEnvironment-nersc.cmake +++ /dev/null @@ -1,22 +0,0 @@ -#============================================================================== -# -# This file sets the environment variables needed to configure and build -# on the NERSC systems -# (edison/ corip1). -# -#============================================================================== - -# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already -# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. - -# Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPREFER_STATIC=TRUE") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DNETCDF_DIR=$ENV{NETCDF_DIR}") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPNETCDF_DIR=$ENV{PARALLEL_NETCDF_DIR}") -#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DHDF5_PATH=$ENV{HDF5_DIR}") -#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_C_INCLUDE_PATH=$ENV{MPICH_DIR}/include") -#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_Fortran_INCLUDE_PATH=$ENV{MPICH_DIR}/include") -#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_C_LIBRARIES=$ENV{MPICH_DIR}/lib/libmpich.a") -#set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DMPI_Fortran_LIBRARIES=$ENV{MPICH_DIR}/lib/libmpichf90.a") -set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DCMAKE_SYSTEM_NAME=Catamount") diff --git a/src/externals/pio2/ctest/CTestEnvironment-nwscla.cmake b/src/externals/pio2/ctest/CTestEnvironment-nwscla.cmake deleted file mode 100644 index efee6bf659d..00000000000 --- a/src/externals/pio2/ctest/CTestEnvironment-nwscla.cmake +++ /dev/null @@ -1,18 +0,0 @@ -#============================================================================== -# -# This file sets the environment variables needed to configure and build -# on the new NCAR Wyoming Supercomputing Center systems -# (laramie/cheyenne). -# -#============================================================================== - -# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already -# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. - -# Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE ") - -# If MPISERIAL environment variable is set, then enable MPISERIAL -if (DEFINED ENV{MPISERIAL}) - set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DPIO_USE_MPISERIAL=ON") -endif () diff --git a/src/externals/pio2/ctest/CTestEnvironment-unknown.cmake b/src/externals/pio2/ctest/CTestEnvironment-unknown.cmake deleted file mode 100644 index 8d51f15fffd..00000000000 --- a/src/externals/pio2/ctest/CTestEnvironment-unknown.cmake +++ /dev/null @@ -1,12 +0,0 @@ -#============================================================================== -# -# This file sets the CMake variables needed to configure and build -# on the default ("unknown") system. -# -#============================================================================== - -# Assume all package locations (NetCDF, PnetCDF, HDF5, etc) are already -# set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. - -# Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE") diff --git a/src/externals/pio2/ctest/CTestScript-Test.cmake b/src/externals/pio2/ctest/CTestScript-Test.cmake deleted file mode 100644 index 79aec3bca68..00000000000 --- a/src/externals/pio2/ctest/CTestScript-Test.cmake +++ /dev/null @@ -1,29 +0,0 @@ -#============================================================================== -# -# This is the CTest script for generating test results for submission to the -# CTest Dashboard site: my.cdash.org. -# -# Example originally stolen from: -# http://www.vtk.org/Wiki/CTest:Using_CTEST_and_CDASH_without_CMAKE -#============================================================================== - -#------------------------------------------- -#-- Get the common build information -#------------------------------------------- - -set (CTEST_SITE $ENV{PIO_DASHBOARD_SITE}) -set (CTEST_BUILD_NAME $ENV{PIO_DASHBOARD_BUILD_NAME}) -set (CTEST_SOURCE_DIRECTORY $ENV{PIO_DASHBOARD_SOURCE_DIR}) -set (CTEST_BINARY_DIRECTORY $ENV{PIO_DASHBOARD_BINARY_DIR}) - -# ----------------------------------------------------------- -# -- Run CTest- TESTING ONLY (Appended to existing TAG) -# ----------------------------------------------------------- - -## -- Start -ctest_start("${CTEST_SCRIPT_ARG}" APPEND) - -## -- TEST -ctest_test() - -## Don't submit! Submission handled by main CTestScript diff --git a/src/externals/pio2/ctest/runcdash-alcf-ibm.sh b/src/externals/pio2/ctest/runcdash-alcf-ibm.sh deleted file mode 100755 index 9f36996bdaf..00000000000 --- a/src/externals/pio2/ctest/runcdash-alcf-ibm.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -# Manually set environment variables for CTest run/build -GIT=/soft/versioning/git/2.3.0/bin/git -CTEST=/soft/buildtools/cmake/3.3.0/bin/ctest - -export LIBZ=/soft/libraries/alcf/current/xl/ZLIB -export HDF5=/soft/libraries/hdf5/1.8.14/cnk-xl/V1R2M2-20150213 -export NETCDF=/soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/V1R2M2-20150213 -export PNETCDF=/soft/libraries/pnetcdf/1.6.0/cnk-xl/V1R2M2-20150213 - -export CC=/soft/compilers/wrappers/xl/mpixlc_r -export FC=/soft/compilers/wrappers/xl/mpixlf90_r - -export PIO_DASHBOARD_ROOT=`pwd`/dashboard -export PIO_COMPILER_ID=Cray-`$CC -qversion | head -n 2 | tail -n 1 | cut -d' ' -f2` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -if [ ! -d src ]; then - $GIT clone https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout develop -git pull origin develop - -$CTEST -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-anlworkstation.sh b/src/externals/pio2/ctest/runcdash-anlworkstation.sh deleted file mode 100755 index 44651b2f3e6..00000000000 --- a/src/externals/pio2/ctest/runcdash-anlworkstation.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -source /software/common/adm/packages/softenv-1.6.2/etc/softenv-load.sh -source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.sh - -soft add +gcc-6.2.0 -soft add +mpich-3.2-gcc-6.2.0 -soft add +cmake-3.5.1 - -export NETCDFROOT=/soft/apps/packages/climate/netcdf/4.4.1c-4.2cxx-4.4.4f-parallel/gcc-6.2.0 -export PNETCDFROOT=/soft/apps/packages/climate/pnetcdf/1.7.0/gcc-6.2.0 -export HDF5ROOT=/soft/apps/packages/climate/hdf5/1.8.16-parallel/gcc-6.2.0 - -export CC=mpicc -export FC=mpifort - -export PIO_DASHBOARD_SITE=anlworkstation-`hostname` -export PIO_DASHBOARD_ROOT=/sandbox/dashboard -export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src -export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} -export PIO_COMPILER_ID=gcc-`gcc --version | head -n 1 | cut -d' ' -f3` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -echo "CTEST_SCRIPT_DIRECTORY="${CTEST_SCRIPT_DIRECTORY} -echo "PIO_DASHBOARD_SOURCE_DIR="${PIO_DASHBOARD_SOURCE_DIR} - -if [ ! -d src ]; then - git clone --branch develop https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout develop -git pull origin develop - -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh b/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh deleted file mode 100755 index 57cad940539..00000000000 --- a/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -module purge -module load compiler/gnu/5.4.0 -module load tool/parallel-netcdf/1.8.1/gnu-5.4.0/openmpi - -export CC=mpicc -export FC=mpif90 -export PIO_DASHBOARD_SITE="cgd" -export PIO_DASHBOARD_ROOT=/scratch/cluster/jedwards/dashboard -export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src -export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} -export PIO_COMPILER_ID=gcc-`gcc --version | head -n 1 | cut -d' ' -f3` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -echo "CTEST_SCRIPT_DIRECTORY="${CTEST_SCRIPT_DIRECTORY} -echo "PIO_DASHBOARD_SOURCE_DIR="${PIO_DASHBOARD_SOURCE_DIR} - -if [ ! -d src ]; then - git clone --branch develop https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout develop -git pull origin develop - - -ctest -S CTestScript.cmake,${model} -VV -DCTEST_CONFIGURE_OPTIONS="-DCMAKE_EXE_LINKER_FLAGS=-ldl" diff --git a/src/externals/pio2/ctest/runcdash-cgd-nag.sh b/src/externals/pio2/ctest/runcdash-cgd-nag.sh deleted file mode 100755 index e413186131a..00000000000 --- a/src/externals/pio2/ctest/runcdash-cgd-nag.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -module purge -module load compiler/nag/6.1 -module load tool/parallel-netcdf/1.7.0/nag/mvapich2 - -export CC=mpicc -export FC=mpif90 -export PIO_DASHBOARD_SITE="cgd" -export PIO_DASHBOARD_ROOT=/scratch/cluster/jedwards/dashboard -export CTEST_SCRIPT_DIRECTORY=${PIO_DASHBOARD_ROOT}/src -export PIO_DASHBOARD_SOURCE_DIR=${CTEST_SCRIPT_DIRECTORY} -export PIO_COMPILER_ID=Nag-6.1-gcc-`gcc --version | head -n 1 | cut -d' ' -f3` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -echo "CTEST_SCRIPT_DIRECTORY="${CTEST_SCRIPT_DIRECTORY} -echo "PIO_DASHBOARD_SOURCE_DIR="${PIO_DASHBOARD_SOURCE_DIR} - -if [ ! -d src ]; then - git clone --branch develop https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout develop -git pull origin develop - - -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-nersc-cray.sh b/src/externals/pio2/ctest/runcdash-nersc-cray.sh deleted file mode 100755 index d3516cea7d5..00000000000 --- a/src/externals/pio2/ctest/runcdash-nersc-cray.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -module rm PrgEnv-intel -module rm PrgEnv-cray -module rm PrgEnv-gnu -module rm intel -module rm cce -module rm cray-parallel-netcdf -module rm cray-parallel-hdf5 -module rm pmi -module rm cray-libsci -module rm cray-mpich2 -module rm cray-mpich -module rm cray-netcdf -module rm cray-hdf5 -module rm cray-netcdf-hdf5parallel -module rm craype-sandybridge -module rm craype-ivybridge -module rm craype-haswell -module rm craype -module load PrgEnv-cray - -case "$NERSC_HOST" in - edison) - cd $CSCRATCH/dashboard - module switch cce cce/8.5.1 - module load craype-ivybridge - module load git/2.4.6 - module load cmake/3.3.2 - module load cray-hdf5-parallel/1.8.16 - module load cray-netcdf-hdf5parallel/4.3.3.1 - module load cray-parallel-netcdf/1.7.0 - ;; - cori) - cd $SCRATCH/dashboard - module switch cce cce/8.5.4 - module load craype-mic-knl - module load git/2.9.1 - module load cmake/3.3.2 - module load cray-hdf5-parallel/1.8.16 - module load cray-netcdf-hdf5parallel/4.3.3.1 - module load cray-parallel-netcdf/1.7.0 - ;; - -esac - -export CC=cc -export FC=ftn - -export PIO_DASHBOARD_ROOT=`pwd`/dashboard -export PIO_COMPILER_ID=Cray-`$CC -V 2>&1 | cut -d' ' -f5` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -if [ ! -d src ]; then - git clone --branch develop https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout develop -git pull origin develop - -export HDF5_DISABLE_VERSION_CHECK=2 - -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-nersc-intel.sh b/src/externals/pio2/ctest/runcdash-nersc-intel.sh deleted file mode 100755 index 55c80559b60..00000000000 --- a/src/externals/pio2/ctest/runcdash-nersc-intel.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -module rm PrgEnv-intel -module rm PrgEnv-cray -module rm PrgEnv-gnu -module rm intel -module rm cce -module rm cray-parallel-netcdf -module rm cray-parallel-hdf5 -module rm pmi -module rm cray-libsci -module rm cray-mpich2 -module rm cray-mpich -module rm cray-netcdf -module rm cray-hdf5 -module rm cray-netcdf-hdf5parallel -module rm craype-sandybridge -module rm craype-ivybridge -module rm craype-haswell -module rm craype -module load PrgEnv-intel - -case "$NERSC_HOST" in - edison) - cd $CSCRATCH/dashboard - module switch intel intel/16.0.0.109 - module load craype-ivybridge - module load git/2.4.6 - module load cmake/3.3.2 - module load cray-hdf5-parallel/1.8.16 - module load cray-netcdf-hdf5parallel/4.3.3.1 - module load cray-parallel-netcdf/1.7.0 - ;; - cori) - cd $SCRATCH/dashboard - module switch intel intel/17.0.1.132 - module load craype-mic-knl - module load git/2.9.1 - module load cmake/3.3.2 - module load cray-hdf5-parallel/1.8.16 - module load cray-netcdf-hdf5parallel/4.3.3.1 - module load cray-parallel-netcdf/1.7.0 - ;; - -esac - -export CC=cc -export FC=ftn - -export PIO_DASHBOARD_ROOT=`pwd`/dashboard -export PIO_COMPILER_ID=Intel-`$CC --version | head -n 1 | cut -d' ' -f3` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -if [ ! -d src ]; then - git clone --branch develop https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout develop -git pull origin develop - -export HDF5_DISABLE_VERSION_CHECK=2 -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh b/src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh deleted file mode 100755 index 68ac5826be9..00000000000 --- a/src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -module reset -module unload netcdf -module swap intel intel/15.0.3 -module load git/2.3.0 -module load cmake/3.0.2 -module load netcdf/4.3.3.1 - -export MPISERIAL=/glade/u/home/jedwards/mpi-serial/intel15.0.3/ - -export CC=icc -export FC=ifort - -export PIO_DASHBOARD_ROOT=`pwd`/dashboard -export PIO_COMPILER_ID=Serial-Intel-`$CC --version | head -n 1 | cut -d' ' -f3` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -if [ ! -d src ]; then - git clone --branch develop https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout develop -git pull origin develop - -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-nwsc-intel.sh b/src/externals/pio2/ctest/runcdash-nwsc-intel.sh deleted file mode 100755 index 1e72e9a1b31..00000000000 --- a/src/externals/pio2/ctest/runcdash-nwsc-intel.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -module reset -module unload netcdf -module swap intel intel/16.0.3 -module load git/2.3.0 -module load cmake/3.0.2 -module load netcdf-mpi/4.4.1 -module load pnetcdf/1.7.0 - -export CC=mpicc -export FC=mpif90 - -export PIO_DASHBOARD_ROOT=`pwd`/dashboard -export PIO_COMPILER_ID=Intel-`$CC --version | head -n 1 | cut -d' ' -f3` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -if [ ! -d src ]; then - git clone --branch develop https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout develop -git pull origin develop - -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-nwscla-gnu.sh b/src/externals/pio2/ctest/runcdash-nwscla-gnu.sh deleted file mode 100755 index cdc9eb9916d..00000000000 --- a/src/externals/pio2/ctest/runcdash-nwscla-gnu.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -module reset -module unload netcdf -module swap intel gnu/8.1.0 -module swap mpt openmpi/3.1.0 -module load git/2.10.2 -module load cmake/3.12.1 -module load netcdf/4.6.1 -module load pnetcdf/1.10.0 - -export CC=mpicc -export FC=mpif90 - -export PIO_DASHBOARD_ROOT=/glade/u/home/jedwards/sandboxes/dashboard -export PIO_COMPILER_ID=GNU-`$CC --version | head -n 1 | tail -n 1 | cut -d' ' -f3` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -if [ ! -d src ]; then - git clone https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout master -git pull origin master - -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-nwscla-intel.sh b/src/externals/pio2/ctest/runcdash-nwscla-intel.sh deleted file mode 100755 index 9393086ed8d..00000000000 --- a/src/externals/pio2/ctest/runcdash-nwscla-intel.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -source /etc/profile.d/modules.sh - -module reset -module unload netcdf -module swap intel intel/19.0.2 -module switch mpt mpt/2.19 -module load cmake/3.7.2 -module load netcdf-mpi/4.6.1 -module load pnetcdf/1.11.0 -echo "MODULE LIST..." -module list - -export CC=mpicc -export FC=mpif90 -export MPI_TYPE_DEPTH=24 -export PIO_DASHBOARD_ROOT=/glade/scratch/jedwards/dashboard -export PIO_COMPILER_ID=Intel-`$CC --version | head -n 1 | cut -d' ' -f3` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -if [ ! -d src ]; then - git clone https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout master -git pull origin master - -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runcdash-nwscla-pgi.sh b/src/externals/pio2/ctest/runcdash-nwscla-pgi.sh deleted file mode 100755 index 08538185f44..00000000000 --- a/src/externals/pio2/ctest/runcdash-nwscla-pgi.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh - -# Get/Generate the Dashboard Model -if [ $# -eq 0 ]; then - model=Experimental -else - model=$1 -fi - -module reset -module unload netcdf -module swap intel pgi/17.9 -module swap mpt mpt/2.19 -module load git/2.10.2 -module load cmake/3.12.1 -module load netcdf-mpi/4.6.1 -module load pnetcdf/1.11.0 - -export CC=mpicc -export FC=mpif90 -export MPI_TYPE_DEPTH=24 -export PIO_DASHBOARD_ROOT=/glade/u/home/jedwards/sandboxes/dashboard -export PIO_COMPILER_ID=PGI-`$CC --version | head -n 2 | tail -n 1 | cut -d' ' -f2` - -if [ ! -d "$PIO_DASHBOARD_ROOT" ]; then - mkdir "$PIO_DASHBOARD_ROOT" -fi -cd "$PIO_DASHBOARD_ROOT" - -if [ ! -d src ]; then - git clone https://github.com/PARALLELIO/ParallelIO src -fi -cd src -git checkout master -git pull origin master - -ctest -S CTestScript.cmake,${model} -VV diff --git a/src/externals/pio2/ctest/runctest-alcf.sh b/src/externals/pio2/ctest/runctest-alcf.sh deleted file mode 100755 index 6b5fa20f1c0..00000000000 --- a/src/externals/pio2/ctest/runctest-alcf.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh -#============================================================================== -# -# This script defines how to run CTest on the Argonne Leadership Computing -# Facility systems (mira/cetus/vesta/cooley). -# -# This assumes the CTest model name (e.g., "Nightly") is passed to it when -# run. -# -#============================================================================== - -# Get the CTest script directory -scrdir=$1 - -# Get the CTest model name -model=$2 - -# Write QSUB submission script with the test execution command -echo "#!/bin/sh" > runctest.sh -echo "CTESTCMD=`which ctest`" >> runctest.sh -echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh - -# Make the QSUB script executable -chmod +x runctest.sh - -# Submit the job to the queue -jobid=`qsub -t 20 -n 4 --proccount 4 \ - --env PIO_DASHBOARD_SITE=$PIO_DASHBOARD_SITE \ - --env PIO_DASHBOARD_BUILD_NAME=$PIO_DASHBOARD_BUILD_NAME \ - --env PIO_DASHBOARD_SOURCE_DIR=$PIO_DASHBOARD_SOURCE_DIR \ - --env PIO_DASHBOARD_BINARY_DIR=$PIO_DASHBOARD_BINARY_DIR \ - --mode script runctest.sh` - -# Wait for the job to complete before exiting -while true; do - status=`qstat $jobid` - if [ "$status" == "" ]; then - break - else - sleep 10 - fi -done diff --git a/src/externals/pio2/ctest/runctest-anlworkstation.sh b/src/externals/pio2/ctest/runctest-anlworkstation.sh deleted file mode 100755 index 9718a834804..00000000000 --- a/src/externals/pio2/ctest/runctest-anlworkstation.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -#============================================================================== -# -# This script defines how to run CTest on the Argonne Linux workstations. -# -# This assumes the CTest model name (e.g., "Nightly") is passed to it when -# run. -# -#============================================================================== - -# Get the CTest script directory -scrdir=$1 - -# Get the CTest model name -model=$2 - -# Run the "ctest" command in another process -ctest -S ${scrdir}/CTestScript-Test.cmake,${model} -V diff --git a/src/externals/pio2/ctest/runctest-cgd.sh b/src/externals/pio2/ctest/runctest-cgd.sh deleted file mode 100755 index bbd31ccf5d0..00000000000 --- a/src/externals/pio2/ctest/runctest-cgd.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh -#============================================================================== -# -# This script defines how to run CTest on the NCAR CGD local cluster -# Hobart. -# -# This assumes the CTest model name (e.g., "Nightly") is passed to it when -# run. -# -#============================================================================== - -# Get the CTest script directory -scrdir=$1 - -# Get the CTest model name -model=$2 - -# Write QSUB submission script with the test execution command -echo "#!/bin/sh" > runctest.sh -echo "export PIO_DASHBOARD_BUILD_NAME=${PIO_DASHBOARD_BUILD_NAME}" >> runctest.sh -echo "export PIO_DASHBOARD_SOURCE_DIR=${PIO_DASHBOARD_BINARY_DIR}/../src/" >> runctest.sh -echo "export PIO_DASHBOARD_BINARY_DIR=${PIO_DASHBOARD_BINARY_DIR}" >> runctest.sh -echo "export PIO_DASHBOARD_SITE=cgd-${HOSTNAME}" >> runctest.sh - -echo "CTESTCMD=`which ctest`" >> runctest.sh -echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh - -# Make the QSUB script executable -chmod +x runctest.sh - -# Submit the job to the queue -jobid=`/usr/local/bin/qsub -l nodes=1:ppn=8 runctest.sh -q short` - -# Wait for the job to complete before exiting -while true; do - status=`/usr/local/bin/qstat $jobid` - echo $status - if [ "$status" == "" ]; then - break - else - sleep 10 - fi -done - -exit 0 diff --git a/src/externals/pio2/ctest/runctest-ncsa.sh b/src/externals/pio2/ctest/runctest-ncsa.sh deleted file mode 100755 index c3cd75e3001..00000000000 --- a/src/externals/pio2/ctest/runctest-ncsa.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh -#============================================================================== -# -# This script defines how to run CTest on the National Center for -# Supercomputing Applications system (blue waters). -# -# This assumes the CTest model name (e.g., "Nightly") is passed to it when -# run. -# -#============================================================================== - -# Get the CTest script directory -scrdir=$1 - -# Get the CTest model name -model=$2 - -# Write QSUB submission script with the test execution command -echo "#!/bin/sh" > runctest.pbs -echo "#PBS -q debug" >> runctest.pbs -echo "#PBS -l mppwidth=24" >> runctest.pbs -echo "#PBS -l walltime=00:20:00" >> runctest.pbs -echo "#PBS -v PIO_DASHBOARD_SITE,PIO_DASHBOARD_BUILD_NAME,PIO_DASHBOARD_SOURCE_DIR,PIO_DASHBOARD_BINARY_DIR" >> runctest.pbs -echo "cd \$PBS_O_WORKDIR" >> runctest.pbs -echo "CTEST_CMD=`which ctest`" >> runctest.pbs -echo "\$CTEST_CMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.pbs - -# Submit the job to the queue -jobid=`qsub runctest.pbs` - -# Wait for the job to complete before exiting -while true; do - status=`qstat $jobid` - if [ "$status" == "" ]; then - break - else - sleep 10 - fi -done diff --git a/src/externals/pio2/ctest/runctest-nersc.sh b/src/externals/pio2/ctest/runctest-nersc.sh deleted file mode 100755 index a84d26bbeb8..00000000000 --- a/src/externals/pio2/ctest/runctest-nersc.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/sh -#============================================================================== -# -# This script defines how to run CTest on the National Energy Research -# Scientific Computing Center systems (edison/cori). -# -# This assumes the CTest model name (e.g., "Nightly") is passed to it when -# run. -# -#============================================================================== - -# Get the CTest script directory -scrdir=$1 - -# Get the CTest model name -model=$2 - -# Write QSUB submission script with the test execution command -echo "#!/bin/sh" > runctest.slurm -echo "#SBATCH --partition debug" >> runctest.slurm -echo "#SBATCH --nodes=1" >> runctest.slurm -case "$NERSC_HOST" in - edison) - echo "#SBATCH --ntasks-per-node=32" >> runctest.slurm - ;; - cori) - echo "#SBATCH --ntasks-per-node=68" >> runctest.slurm - echo "#SBATCH -C knl" >> runctest.slurm - ;; -esac - -echo "#SBATCH --time=01:00:00" >> runctest.slurm - -echo "#SBATCH --export PIO_DASHBOARD_SITE,PIO_DASHBOARD_BUILD_NAME,PIO_DASHBOARD_SOURCE_DIR,PIO_DASHBOARD_BINARY_DIR" >> runctest.slurm -#echo "cd \$PBS_O_WORKDIR" >> runctest.pbs -echo "CTEST_CMD=`which ctest`" >> runctest.slurm -echo "\$CTEST_CMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.slurm -chmod +x runctest.slurm -# Submit the job to the queue -#jobid=`sbatch runctest.slurm| egrep -o -e "\b[0-9]+$"` -case "$NERSC_HOST" in - edison) - salloc -N 1 ./runctest.slurm - ;; - cori) - salloc -N 1 -C knl ./runctest.slurm - ;; -esac -# Wait for the job to complete before exiting -#while true; do -# status=`squeue -j $jobid` -# if [ "$status" == "" ]; then -# break -# else -# sleep 10 -# fi -#done diff --git a/src/externals/pio2/ctest/runctest-nwscla.sh b/src/externals/pio2/ctest/runctest-nwscla.sh deleted file mode 100755 index d3e252317d0..00000000000 --- a/src/externals/pio2/ctest/runctest-nwscla.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/sh -#============================================================================== -# -# This script defines how to run CTest on the NCAR Wyoming Supercomputing -# Center systems (cheyenne/laramie). -# -# This assumes the CTest model name (e.g., "Nightly") is passed to it when -# run. -# -#============================================================================== - -# Get the CTest script directory -scrdir=$1 - -# Get the CTest model name -model=$2 - -# Write QSUB submission script with the test execution command -echo "#!/bin/sh" > runctest.sh -echo "#PBS -l walltime=01:00:00" >> runctest.sh -echo "#PBS -l select=1:ncpus=8:mpiprocs=8" >> runctest.sh -echo "#PBS -A P93300606" >> runctest.sh -echo "#PBS -q regular" >> runctest.sh -echo "export PIO_DASHBOARD_SITE=nwscla-${HOSTNAME}" >> runctest.sh -echo "CTESTCMD=`which ctest`" >> runctest.sh -echo "\$CTESTCMD -S ${scrdir}/CTestScript-Test.cmake,${model} -V" >> runctest.sh - -# Make the QSUB script executable -chmod +x runctest.sh - -# Submit the job to the queue -jobid=`qsub -l walltime=01:00:00 runctest.sh` - -# Wait for the job to complete before exiting -while true; do - qstat $jobid - if [ $? -eq 0 ]; then - sleep 30 - else - break; - fi -done - -exit 0 diff --git a/src/externals/pio2/ctest/runctest-unknown.sh b/src/externals/pio2/ctest/runctest-unknown.sh deleted file mode 100755 index 01ba66403c9..00000000000 --- a/src/externals/pio2/ctest/runctest-unknown.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -#============================================================================== -# -# This script defines how to run CTest on the default ("unknown") machine. -# -# This assumes the CTest model name (e.g., "Nightly") is passed to it when -# run. -# -#============================================================================== - -# Get the CTest script directory -scrdir=$1 - -# Get the dashboard model name -model=$2 - -# Run the "ctest" command in another process -ctest -S ${scrdir}/CTestScript-Test.cmake,${model} -V diff --git a/src/externals/pio2/doc/CMakeFiles/3.2.3/CMakeSystem.cmake b/src/externals/pio2/doc/CMakeFiles/3.2.3/CMakeSystem.cmake deleted file mode 100644 index c94e370e1b0..00000000000 --- a/src/externals/pio2/doc/CMakeFiles/3.2.3/CMakeSystem.cmake +++ /dev/null @@ -1,15 +0,0 @@ -set(CMAKE_HOST_SYSTEM "Linux-3.10.0-123.el7.x86_64") -set(CMAKE_HOST_SYSTEM_NAME "Linux") -set(CMAKE_HOST_SYSTEM_VERSION "3.10.0-123.el7.x86_64") -set(CMAKE_HOST_SYSTEM_PROCESSOR "x86_64") - - - -set(CMAKE_SYSTEM "Linux-3.10.0-123.el7.x86_64") -set(CMAKE_SYSTEM_NAME "Linux") -set(CMAKE_SYSTEM_VERSION "3.10.0-123.el7.x86_64") -set(CMAKE_SYSTEM_PROCESSOR "x86_64") - -set(CMAKE_CROSSCOMPILING "FALSE") - -set(CMAKE_SYSTEM_LOADED 1) diff --git a/src/externals/pio2/doc/CMakeFiles/CMakeOutput.log b/src/externals/pio2/doc/CMakeFiles/CMakeOutput.log deleted file mode 100644 index c254632f6ba..00000000000 --- a/src/externals/pio2/doc/CMakeFiles/CMakeOutput.log +++ /dev/null @@ -1,512 +0,0 @@ -The system is: Linux - 3.10.0-123.el7.x86_64 - x86_64 -Compiling the C compiler identification source file "CMakeCCompilerId.c" succeeded. -Compiler: /usr/mpi/intel/mvapich2-1.8.1-qlc/bin/mpicc -Build flags: -Id flags: - -The output was: -0 -icc: command line remark #10148: option '-i-dynamic' not supported - - -Compilation of the C compiler identification source "CMakeCCompilerId.c" produced "a.out" - -The C compiler identification is Intel, found in "/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/3.2.3/CompilerIdC/a.out" - -Compiling the CXX compiler identification source file "CMakeCXXCompilerId.cpp" succeeded. -Compiler: /usr/bin/c++ -Build flags: -Id flags: - -The output was: -0 - - -Compilation of the CXX compiler identification source "CMakeCXXCompilerId.cpp" produced "a.out" - -The CXX compiler identification is GNU, found in "/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/3.2.3/CompilerIdCXX/a.out" - -Determining if the C compiler works passed with the following output: -Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp - -Run Build Command:"/usr/bin/gmake" "cmTryCompileExec1905307408/fast" -/usr/bin/gmake -f CMakeFiles/cmTryCompileExec1905307408.dir/build.make CMakeFiles/cmTryCompileExec1905307408.dir/build -gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1 -Building C object CMakeFiles/cmTryCompileExec1905307408.dir/testCCompiler.c.o -/usr/mpi/intel/mvapich2-1.8.1-qlc/bin/mpicc -o CMakeFiles/cmTryCompileExec1905307408.dir/testCCompiler.c.o -c /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/testCCompiler.c -icc: command line remark #10148: option '-i-dynamic' not supported -Linking C executable cmTryCompileExec1905307408 -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec1905307408.dir/link.txt --verbose=1 -/usr/mpi/intel/mvapich2-1.8.1-qlc/bin/mpicc CMakeFiles/cmTryCompileExec1905307408.dir/testCCompiler.c.o -o cmTryCompileExec1905307408 -rdynamic -icc: command line remark #10148: option '-i-dynamic' not supported -gmake[1]: Leaving directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' - - -Detecting C compiler ABI info compiled with the following output: -Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp - -Run Build Command:"/usr/bin/gmake" "cmTryCompileExec3327212404/fast" -/usr/bin/gmake -f CMakeFiles/cmTryCompileExec3327212404.dir/build.make CMakeFiles/cmTryCompileExec3327212404.dir/build -gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1 -Building C object CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -/usr/mpi/intel/mvapich2-1.8.1-qlc/bin/mpicc -o CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -c /home/katec/cmake/cmake-3.2.3/Modules/CMakeCCompilerABI.c -icc: command line remark #10148: option '-i-dynamic' not supported -Linking C executable cmTryCompileExec3327212404 -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec3327212404.dir/link.txt --verbose=1 -/usr/mpi/intel/mvapich2-1.8.1-qlc/bin/mpicc -v CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -o cmTryCompileExec3327212404 -rdynamic -mpicc for MVAPICH2 version 1.8.1 -icc: command line remark #10148: option '-i-dynamic' not supported -icc version 15.0.2 (gcc version 4.8.3 compatibility) -/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/bin/intel64/mcpcom -mP1OPT_version=15.0-intel64 -mGLOB_diag_file=CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.diag -mP1OPT_print_version=FALSE -mCG_use_gas_got_workaround=F -mP2OPT_align_option_used=TRUE -mGLOB_gcc_version=483 "-mGLOB_options_string=-I/usr/mpi/intel/mvapich2-1.8.1-qlc/include -i-dynamic -Wl,-rpath,/usr/local/intel-cluster-15.0.2.164/lib/intel64 -Wl,-rpath,/usr/local/intel-cluster-15.0.2.164/lib/intel64 -lpsm_infinipath -v -o cmTryCompileExec3327212404 -rdynamic -L/usr/mpi/intel/mvapich2-1.8.1-qlc/lib -Wl,-rpath,/usr/mpi/intel/mvapich2-1.8.1-qlc/lib -lmpich -lopa -lmpl -lrt -lpthread -lpsm_infinipath -lpthread" -mGLOB_cxx_limited_range=FALSE -mCG_extend_parms=FALSE -mGLOB_compiler_bin_directory=/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/bin/intel64 -mGLOB_as_output_backup_file_name=/tmp/iccgh3YrFas_.s -mIPOPT_activate -mGLOB_em64t -mGLOB_product_id_code=0x22006d91 -mP3OPT_use_mspp_call_convention -mP2OPT_subs_out_of_bound=FALSE -mP2OPT_disam_type_based_disam=2 -mP2OPT_disam_assume_ansi_c -mP2OPT_checked_disam_ansi_alias=TRUE -mGLOB_ansi_alias -mPGOPTI_value_profile_use=T -mGLOB_opt_report_use_source_name -mP2OPT_il0_array_sections=TRUE -mGLOB_offload_mode=1 -mP2OPT_offload_unique_var_string=icc0101950814685UOffaN -mP2OPT_hlo -mP2OPT_hpo_rtt_control=0 -mIPOPT_args_in_regs=0 -mP2OPT_disam_assume_nonstd_intent_in=FALSE -mGLOB_imf_mapping_library=/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/bin/intel64/libiml_attr.so -mPGOPTI_gen_threadsafe_level=0 -mIPOPT_link -mIPOPT_ipo_activate -mIPOPT_mo_activate -mIPOPT_source_files_list=/tmp/iccslisv13FbC -mIPOPT_mo_global_data -mIPOPT_link_script_file=/tmp/iccscriptrK8CBi "-mIPOPT_cmdline_link="/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crt1.o" "/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crti.o" "/usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtbegin.o" "-export-dynamic" "--eh-frame-hdr" "--build-id" "-dynamic-linker" "/lib64/ld-linux-x86-64.so.2" "-m" "elf_x86_64" "-L/usr/mpi/intel/mvapich2-1.8.1-qlc/lib" "-o" "cmTryCompileExec3327212404" "-L/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64" "-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/" "-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64" "-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/" "-L/lib/../lib64" "-L/lib/../lib64/" "-L/usr/lib/../lib64" "-L/usr/lib/../lib64/" "-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../" "-L/lib64" "-L/lib/" "-L/usr/lib64" "-L/usr/lib" "-rpath" "/usr/local/intel-cluster-15.0.2.164/lib/intel64" "-rpath" "/usr/local/intel-cluster-15.0.2.164/lib/intel64" "-lpsm_infinipath" "CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o" "-rpath" "/usr/mpi/intel/mvapich2-1.8.1-qlc/lib" "-lmpich" "-lopa" "-lmpl" "-lrt" "-lpthread" "-lpsm_infinipath" "-lpthread" "-Bdynamic" "-Bstatic" "-limf" "-lsvml" "-lirng" "-Bdynamic" "-lm" "-Bstatic" "-lipgo" "-ldecimal" "--as-needed" "-Bdynamic" "-lcilkrts" "-lstdc++" "--no-as-needed" "-lgcc" "-lgcc_s" "-Bstatic" "-lirc" "-lsvml" "-Bdynamic" "-lc" "-lgcc" "-lgcc_s" "-Bstatic" "-lirc_s" "-Bdynamic" "-ldl" "-lc" "/usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtend.o" "/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crtn.o"" -mIPOPT_il_in_obj -mIPOPT_ipo_activate_warn=FALSE -mIPOPT_obj_output_file_name=/tmp/ipo_iccFcA1Fv.o -mIPOPT_whole_archive_fixup_file_name=/tmp/iccwarchcJ1SWv -mGLOB_linker_version=2.23.52.0.1 -mGLOB_long_size_64 -mGLOB_routine_pointer_size_64 -mGLOB_driver_tempfile_name=/tmp/icctempfileFZTXpc -mP3OPT_asm_target=P3OPT_ASM_TARGET_GAS -mGLOB_async_unwind_tables=TRUE -mGLOB_obj_output_file=/tmp/ipo_iccFcA1Fv.o -mGLOB_source_dialect=GLOB_SOURCE_DIALECT_NONE -mP1OPT_source_file_name=ipo_out.c CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -mIPOPT_object_files=T -mIPOPT_assembly_files=/tmp/iccalis4BFTvS -mIPOPT_generated_tempfiles=/tmp/iccelisvoZPVy -mIPOPT_embedded_object_base_name=/tmp/icceobjgHtMlf -mIPOPT_cmdline_link_new_name=/tmp/iccllisaJbJLV -ld /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crt1.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crti.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtbegin.o -export-dynamic --eh-frame-hdr --build-id -dynamic-linker /lib64/ld-linux-x86-64.so.2 -m elf_x86_64 -L/usr/mpi/intel/mvapich2-1.8.1-qlc/lib -o cmTryCompileExec3327212404 -L/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64 -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/ -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64 -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/ -L/lib/../lib64 -L/lib/../lib64/ -L/usr/lib/../lib64 -L/usr/lib/../lib64/ -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../ -L/lib64 -L/lib/ -L/usr/lib64 -L/usr/lib -rpath /usr/local/intel-cluster-15.0.2.164/lib/intel64 -rpath /usr/local/intel-cluster-15.0.2.164/lib/intel64 -lpsm_infinipath CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -rpath /usr/mpi/intel/mvapich2-1.8.1-qlc/lib -lmpich -lopa -lmpl -lrt -lpthread -lpsm_infinipath -lpthread -Bdynamic -Bstatic -limf -lsvml -lirng -Bdynamic -lm -Bstatic -lipgo -ldecimal --as-needed -Bdynamic -lcilkrts -lstdc++ --no-as-needed -lgcc -lgcc_s -Bstatic -lirc -lsvml -Bdynamic -lc -lgcc -lgcc_s -Bstatic -lirc_s -Bdynamic -ldl -lc /usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtend.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crtn.o -gmake[1]: Leaving directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' - - -Parsed C implicit link information from above output: - link line regex: [^( *|.*[/\])(ld|([^/\]+-)?ld|collect2)[^/\]*( |$)] - ignore line: [Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp] - ignore line: [] - ignore line: [Run Build Command:"/usr/bin/gmake" "cmTryCompileExec3327212404/fast"] - ignore line: [/usr/bin/gmake -f CMakeFiles/cmTryCompileExec3327212404.dir/build.make CMakeFiles/cmTryCompileExec3327212404.dir/build] - ignore line: [gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp'] - ignore line: [/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1] - ignore line: [Building C object CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o] - ignore line: [/usr/mpi/intel/mvapich2-1.8.1-qlc/bin/mpicc -o CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -c /home/katec/cmake/cmake-3.2.3/Modules/CMakeCCompilerABI.c] - ignore line: [icc: command line remark #10148: option '-i-dynamic' not supported] - ignore line: [Linking C executable cmTryCompileExec3327212404] - ignore line: [/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec3327212404.dir/link.txt --verbose=1] - ignore line: [/usr/mpi/intel/mvapich2-1.8.1-qlc/bin/mpicc -v CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -o cmTryCompileExec3327212404 -rdynamic ] - ignore line: [mpicc for MVAPICH2 version 1.8.1] - ignore line: [icc: command line remark #10148: option '-i-dynamic' not supported] - ignore line: [icc version 15.0.2 (gcc version 4.8.3 compatibility)] - ignore line: [/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/bin/intel64/mcpcom -mP1OPT_version=15.0-intel64 -mGLOB_diag_file=CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.diag -mP1OPT_print_version=FALSE -mCG_use_gas_got_workaround=F -mP2OPT_align_option_used=TRUE -mGLOB_gcc_version=483 "-mGLOB_options_string=-I/usr/mpi/intel/mvapich2-1.8.1-qlc/include -i-dynamic -Wl,-rpath,/usr/local/intel-cluster-15.0.2.164/lib/intel64 -Wl,-rpath,/usr/local/intel-cluster-15.0.2.164/lib/intel64 -lpsm_infinipath -v -o cmTryCompileExec3327212404 -rdynamic -L/usr/mpi/intel/mvapich2-1.8.1-qlc/lib -Wl,-rpath,/usr/mpi/intel/mvapich2-1.8.1-qlc/lib -lmpich -lopa -lmpl -lrt -lpthread -lpsm_infinipath -lpthread" -mGLOB_cxx_limited_range=FALSE -mCG_extend_parms=FALSE -mGLOB_compiler_bin_directory=/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/bin/intel64 -mGLOB_as_output_backup_file_name=/tmp/iccgh3YrFas_.s -mIPOPT_activate -mGLOB_em64t -mGLOB_product_id_code=0x22006d91 -mP3OPT_use_mspp_call_convention -mP2OPT_subs_out_of_bound=FALSE -mP2OPT_disam_type_based_disam=2 -mP2OPT_disam_assume_ansi_c -mP2OPT_checked_disam_ansi_alias=TRUE -mGLOB_ansi_alias -mPGOPTI_value_profile_use=T -mGLOB_opt_report_use_source_name -mP2OPT_il0_array_sections=TRUE -mGLOB_offload_mode=1 -mP2OPT_offload_unique_var_string=icc0101950814685UOffaN -mP2OPT_hlo -mP2OPT_hpo_rtt_control=0 -mIPOPT_args_in_regs=0 -mP2OPT_disam_assume_nonstd_intent_in=FALSE -mGLOB_imf_mapping_library=/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/bin/intel64/libiml_attr.so -mPGOPTI_gen_threadsafe_level=0 -mIPOPT_link -mIPOPT_ipo_activate -mIPOPT_mo_activate -mIPOPT_source_files_list=/tmp/iccslisv13FbC -mIPOPT_mo_global_data -mIPOPT_link_script_file=/tmp/iccscriptrK8CBi "-mIPOPT_cmdline_link="/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crt1.o" "/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crti.o" "/usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtbegin.o" "-export-dynamic" "--eh-frame-hdr" "--build-id" "-dynamic-linker" "/lib64/ld-linux-x86-64.so.2" "-m" "elf_x86_64" "-L/usr/mpi/intel/mvapich2-1.8.1-qlc/lib" "-o" "cmTryCompileExec3327212404" "-L/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64" "-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/" "-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64" "-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/" "-L/lib/../lib64" "-L/lib/../lib64/" "-L/usr/lib/../lib64" "-L/usr/lib/../lib64/" "-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../" "-L/lib64" "-L/lib/" "-L/usr/lib64" "-L/usr/lib" "-rpath" "/usr/local/intel-cluster-15.0.2.164/lib/intel64" "-rpath" "/usr/local/intel-cluster-15.0.2.164/lib/intel64" "-lpsm_infinipath" "CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o" "-rpath" "/usr/mpi/intel/mvapich2-1.8.1-qlc/lib" "-lmpich" "-lopa" "-lmpl" "-lrt" "-lpthread" "-lpsm_infinipath" "-lpthread" "-Bdynamic" "-Bstatic" "-limf" "-lsvml" "-lirng" "-Bdynamic" "-lm" "-Bstatic" "-lipgo" "-ldecimal" "--as-needed" "-Bdynamic" "-lcilkrts" "-lstdc++" "--no-as-needed" "-lgcc" "-lgcc_s" "-Bstatic" "-lirc" "-lsvml" "-Bdynamic" "-lc" "-lgcc" "-lgcc_s" "-Bstatic" "-lirc_s" "-Bdynamic" "-ldl" "-lc" "/usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtend.o" "/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crtn.o"" -mIPOPT_il_in_obj -mIPOPT_ipo_activate_warn=FALSE -mIPOPT_obj_output_file_name=/tmp/ipo_iccFcA1Fv.o -mIPOPT_whole_archive_fixup_file_name=/tmp/iccwarchcJ1SWv -mGLOB_linker_version=2.23.52.0.1 -mGLOB_long_size_64 -mGLOB_routine_pointer_size_64 -mGLOB_driver_tempfile_name=/tmp/icctempfileFZTXpc -mP3OPT_asm_target=P3OPT_ASM_TARGET_GAS -mGLOB_async_unwind_tables=TRUE -mGLOB_obj_output_file=/tmp/ipo_iccFcA1Fv.o -mGLOB_source_dialect=GLOB_SOURCE_DIALECT_NONE -mP1OPT_source_file_name=ipo_out.c CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -mIPOPT_object_files=T -mIPOPT_assembly_files=/tmp/iccalis4BFTvS -mIPOPT_generated_tempfiles=/tmp/iccelisvoZPVy -mIPOPT_embedded_object_base_name=/tmp/icceobjgHtMlf -mIPOPT_cmdline_link_new_name=/tmp/iccllisaJbJLV] - link line: [ld /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crt1.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crti.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtbegin.o -export-dynamic --eh-frame-hdr --build-id -dynamic-linker /lib64/ld-linux-x86-64.so.2 -m elf_x86_64 -L/usr/mpi/intel/mvapich2-1.8.1-qlc/lib -o cmTryCompileExec3327212404 -L/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64 -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/ -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64 -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/ -L/lib/../lib64 -L/lib/../lib64/ -L/usr/lib/../lib64 -L/usr/lib/../lib64/ -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../ -L/lib64 -L/lib/ -L/usr/lib64 -L/usr/lib -rpath /usr/local/intel-cluster-15.0.2.164/lib/intel64 -rpath /usr/local/intel-cluster-15.0.2.164/lib/intel64 -lpsm_infinipath CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o -rpath /usr/mpi/intel/mvapich2-1.8.1-qlc/lib -lmpich -lopa -lmpl -lrt -lpthread -lpsm_infinipath -lpthread -Bdynamic -Bstatic -limf -lsvml -lirng -Bdynamic -lm -Bstatic -lipgo -ldecimal --as-needed -Bdynamic -lcilkrts -lstdc++ --no-as-needed -lgcc -lgcc_s -Bstatic -lirc -lsvml -Bdynamic -lc -lgcc -lgcc_s -Bstatic -lirc_s -Bdynamic -ldl -lc /usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtend.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crtn.o] - arg [ld] ==> ignore - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crt1.o] ==> ignore - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crti.o] ==> ignore - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtbegin.o] ==> ignore - arg [-export-dynamic] ==> ignore - arg [--eh-frame-hdr] ==> ignore - arg [--build-id] ==> ignore - arg [-dynamic-linker] ==> ignore - arg [/lib64/ld-linux-x86-64.so.2] ==> ignore - arg [-m] ==> ignore - arg [elf_x86_64] ==> ignore - arg [-L/usr/mpi/intel/mvapich2-1.8.1-qlc/lib] ==> dir [/usr/mpi/intel/mvapich2-1.8.1-qlc/lib] - arg [-o] ==> ignore - arg [cmTryCompileExec3327212404] ==> ignore - arg [-L/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64] ==> dir [/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64] - arg [-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/] ==> dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/] - arg [-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64] ==> dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64] - arg [-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/] ==> dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/] - arg [-L/lib/../lib64] ==> dir [/lib/../lib64] - arg [-L/lib/../lib64/] ==> dir [/lib/../lib64/] - arg [-L/usr/lib/../lib64] ==> dir [/usr/lib/../lib64] - arg [-L/usr/lib/../lib64/] ==> dir [/usr/lib/../lib64/] - arg [-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../] ==> dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../] - arg [-L/lib64] ==> dir [/lib64] - arg [-L/lib/] ==> dir [/lib/] - arg [-L/usr/lib64] ==> dir [/usr/lib64] - arg [-L/usr/lib] ==> dir [/usr/lib] - arg [-rpath] ==> ignore - arg [/usr/local/intel-cluster-15.0.2.164/lib/intel64] ==> ignore - arg [-rpath] ==> ignore - arg [/usr/local/intel-cluster-15.0.2.164/lib/intel64] ==> ignore - arg [-lpsm_infinipath] ==> lib [psm_infinipath] - arg [CMakeFiles/cmTryCompileExec3327212404.dir/CMakeCCompilerABI.c.o] ==> ignore - arg [-rpath] ==> ignore - arg [/usr/mpi/intel/mvapich2-1.8.1-qlc/lib] ==> ignore - arg [-lmpich] ==> lib [mpich] - arg [-lopa] ==> lib [opa] - arg [-lmpl] ==> lib [mpl] - arg [-lrt] ==> lib [rt] - arg [-lpthread] ==> lib [pthread] - arg [-lpsm_infinipath] ==> lib [psm_infinipath] - arg [-lpthread] ==> lib [pthread] - arg [-Bdynamic] ==> ignore - arg [-Bstatic] ==> ignore - arg [-limf] ==> lib [imf] - arg [-lsvml] ==> lib [svml] - arg [-lirng] ==> lib [irng] - arg [-Bdynamic] ==> ignore - arg [-lm] ==> lib [m] - arg [-Bstatic] ==> ignore - arg [-lipgo] ==> lib [ipgo] - arg [-ldecimal] ==> lib [decimal] - arg [--as-needed] ==> ignore - arg [-Bdynamic] ==> ignore - arg [-lcilkrts] ==> lib [cilkrts] - arg [-lstdc++] ==> lib [stdc++] - arg [--no-as-needed] ==> ignore - arg [-lgcc] ==> lib [gcc] - arg [-lgcc_s] ==> lib [gcc_s] - arg [-Bstatic] ==> ignore - arg [-lirc] ==> lib [irc] - arg [-lsvml] ==> lib [svml] - arg [-Bdynamic] ==> ignore - arg [-lc] ==> lib [c] - arg [-lgcc] ==> lib [gcc] - arg [-lgcc_s] ==> lib [gcc_s] - arg [-Bstatic] ==> ignore - arg [-lirc_s] ==> lib [irc_s] - arg [-Bdynamic] ==> ignore - arg [-ldl] ==> lib [dl] - arg [-lc] ==> lib [c] - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtend.o] ==> ignore - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crtn.o] ==> ignore - remove lib [gcc] - remove lib [gcc_s] - remove lib [gcc] - remove lib [gcc_s] - collapse library dir [/usr/mpi/intel/mvapich2-1.8.1-qlc/lib] ==> [/usr/mpi/intel/mvapich2-1.8.1-qlc/lib] - collapse library dir [/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64] ==> [/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64] - collapse library dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/] ==> [/usr/lib/gcc/x86_64-redhat-linux/4.8.3] - collapse library dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64] ==> [/usr/lib64] - collapse library dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/] ==> [/usr/lib64] - collapse library dir [/lib/../lib64] ==> [/lib64] - collapse library dir [/lib/../lib64/] ==> [/lib64] - collapse library dir [/usr/lib/../lib64] ==> [/usr/lib64] - collapse library dir [/usr/lib/../lib64/] ==> [/usr/lib64] - collapse library dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../] ==> [/usr/lib] - collapse library dir [/lib64] ==> [/lib64] - collapse library dir [/lib/] ==> [/lib] - collapse library dir [/usr/lib64] ==> [/usr/lib64] - collapse library dir [/usr/lib] ==> [/usr/lib] - implicit libs: [psm_infinipath;mpich;opa;mpl;rt;pthread;psm_infinipath;pthread;imf;svml;irng;m;ipgo;decimal;cilkrts;stdc++;irc;svml;c;irc_s;dl;c] - implicit dirs: [/usr/mpi/intel/mvapich2-1.8.1-qlc/lib;/usr/local/intel-cluster-15.0.2.164/composer_xe_2015.2.164/compiler/lib/intel64;/usr/lib/gcc/x86_64-redhat-linux/4.8.3;/usr/lib64;/lib64;/usr/lib;/lib] - implicit fwks: [] - - -Determining if the CXX compiler works passed with the following output: -Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp - -Run Build Command:"/usr/bin/gmake" "cmTryCompileExec2556829595/fast" -/usr/bin/gmake -f CMakeFiles/cmTryCompileExec2556829595.dir/build.make CMakeFiles/cmTryCompileExec2556829595.dir/build -gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1 -Building CXX object CMakeFiles/cmTryCompileExec2556829595.dir/testCXXCompiler.cxx.o -/usr/bin/c++ -o CMakeFiles/cmTryCompileExec2556829595.dir/testCXXCompiler.cxx.o -c /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/testCXXCompiler.cxx -Linking CXX executable cmTryCompileExec2556829595 -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec2556829595.dir/link.txt --verbose=1 -/usr/bin/c++ CMakeFiles/cmTryCompileExec2556829595.dir/testCXXCompiler.cxx.o -o cmTryCompileExec2556829595 -rdynamic -gmake[1]: Leaving directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' - - -Detecting CXX compiler ABI info compiled with the following output: -Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp - -Run Build Command:"/usr/bin/gmake" "cmTryCompileExec1080422183/fast" -/usr/bin/gmake -f CMakeFiles/cmTryCompileExec1080422183.dir/build.make CMakeFiles/cmTryCompileExec1080422183.dir/build -gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1 -Building CXX object CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o -/usr/bin/c++ -o CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o -c /home/katec/cmake/cmake-3.2.3/Modules/CMakeCXXCompilerABI.cpp -Linking CXX executable cmTryCompileExec1080422183 -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec1080422183.dir/link.txt --verbose=1 -/usr/bin/c++ -v CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o -o cmTryCompileExec1080422183 -rdynamic -Using built-in specs. -COLLECT_GCC=/usr/bin/c++ -COLLECT_LTO_WRAPPER=/usr/libexec/gcc/x86_64-redhat-linux/4.8.3/lto-wrapper -Target: x86_64-redhat-linux -Configured with: ../configure --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-bugurl=http://bugzilla.redhat.com/bugzilla --enable-bootstrap --enable-shared --enable-threads=posix --enable-checking=release --with-system-zlib --enable-__cxa_atexit --disable-libunwind-exceptions --enable-gnu-unique-object --enable-linker-build-id --with-linker-hash-style=gnu --enable-languages=c,c++,objc,obj-c++,java,fortran,ada,go,lto --enable-plugin --enable-initfini-array --disable-libgcj --with-isl=/builddir/build/BUILD/gcc-4.8.3-20140911/obj-x86_64-redhat-linux/isl-install --with-cloog=/builddir/build/BUILD/gcc-4.8.3-20140911/obj-x86_64-redhat-linux/cloog-install --enable-gnu-indirect-function --with-tune=generic --with-arch_32=x86-64 --build=x86_64-redhat-linux -Thread model: posix -gcc version 4.8.3 20140911 (Red Hat 4.8.3-9) (GCC) -COMPILER_PATH=/usr/local/intel-cluster-15.0.2.164/:/usr/libexec/gcc/x86_64-redhat-linux/4.8.3/:/usr/libexec/gcc/x86_64-redhat-linux/4.8.3/:/usr/libexec/gcc/x86_64-redhat-linux/:/usr/lib/gcc/x86_64-redhat-linux/4.8.3/:/usr/lib/gcc/x86_64-redhat-linux/ -LIBRARY_PATH=/usr/lib/gcc/x86_64-redhat-linux/4.8.3/:/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/:/lib/../lib64/:/usr/lib/../lib64/:/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../:/lib/:/usr/lib/ -COLLECT_GCC_OPTIONS='-v' '-o' 'cmTryCompileExec1080422183' '-rdynamic' '-shared-libgcc' '-mtune=generic' '-march=x86-64' - /usr/libexec/gcc/x86_64-redhat-linux/4.8.3/collect2 --build-id --no-add-needed --eh-frame-hdr --hash-style=gnu -m elf_x86_64 -export-dynamic -dynamic-linker /lib64/ld-linux-x86-64.so.2 -o cmTryCompileExec1080422183 /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crt1.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crti.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtbegin.o -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3 -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64 -L/lib/../lib64 -L/usr/lib/../lib64 -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../.. CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o -lstdc++ -lm -lgcc_s -lgcc -lc -lgcc_s -lgcc /usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtend.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crtn.o -gmake[1]: Leaving directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' - - -Parsed CXX implicit link information from above output: - link line regex: [^( *|.*[/\])(ld|([^/\]+-)?ld|collect2)[^/\]*( |$)] - ignore line: [Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp] - ignore line: [] - ignore line: [Run Build Command:"/usr/bin/gmake" "cmTryCompileExec1080422183/fast"] - ignore line: [/usr/bin/gmake -f CMakeFiles/cmTryCompileExec1080422183.dir/build.make CMakeFiles/cmTryCompileExec1080422183.dir/build] - ignore line: [gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp'] - ignore line: [/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1] - ignore line: [Building CXX object CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o] - ignore line: [/usr/bin/c++ -o CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o -c /home/katec/cmake/cmake-3.2.3/Modules/CMakeCXXCompilerABI.cpp] - ignore line: [Linking CXX executable cmTryCompileExec1080422183] - ignore line: [/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec1080422183.dir/link.txt --verbose=1] - ignore line: [/usr/bin/c++ -v CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o -o cmTryCompileExec1080422183 -rdynamic ] - ignore line: [Using built-in specs.] - ignore line: [COLLECT_GCC=/usr/bin/c++] - ignore line: [COLLECT_LTO_WRAPPER=/usr/libexec/gcc/x86_64-redhat-linux/4.8.3/lto-wrapper] - ignore line: [Target: x86_64-redhat-linux] - ignore line: [Configured with: ../configure --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-bugurl=http://bugzilla.redhat.com/bugzilla --enable-bootstrap --enable-shared --enable-threads=posix --enable-checking=release --with-system-zlib --enable-__cxa_atexit --disable-libunwind-exceptions --enable-gnu-unique-object --enable-linker-build-id --with-linker-hash-style=gnu --enable-languages=c,c++,objc,obj-c++,java,fortran,ada,go,lto --enable-plugin --enable-initfini-array --disable-libgcj --with-isl=/builddir/build/BUILD/gcc-4.8.3-20140911/obj-x86_64-redhat-linux/isl-install --with-cloog=/builddir/build/BUILD/gcc-4.8.3-20140911/obj-x86_64-redhat-linux/cloog-install --enable-gnu-indirect-function --with-tune=generic --with-arch_32=x86-64 --build=x86_64-redhat-linux] - ignore line: [Thread model: posix] - ignore line: [gcc version 4.8.3 20140911 (Red Hat 4.8.3-9) (GCC) ] - ignore line: [COMPILER_PATH=/usr/local/intel-cluster-15.0.2.164/:/usr/libexec/gcc/x86_64-redhat-linux/4.8.3/:/usr/libexec/gcc/x86_64-redhat-linux/4.8.3/:/usr/libexec/gcc/x86_64-redhat-linux/:/usr/lib/gcc/x86_64-redhat-linux/4.8.3/:/usr/lib/gcc/x86_64-redhat-linux/] - ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-redhat-linux/4.8.3/:/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/:/lib/../lib64/:/usr/lib/../lib64/:/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../:/lib/:/usr/lib/] - ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'cmTryCompileExec1080422183' '-rdynamic' '-shared-libgcc' '-mtune=generic' '-march=x86-64'] - link line: [ /usr/libexec/gcc/x86_64-redhat-linux/4.8.3/collect2 --build-id --no-add-needed --eh-frame-hdr --hash-style=gnu -m elf_x86_64 -export-dynamic -dynamic-linker /lib64/ld-linux-x86-64.so.2 -o cmTryCompileExec1080422183 /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crt1.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crti.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtbegin.o -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3 -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64 -L/lib/../lib64 -L/usr/lib/../lib64 -L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../.. CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o -lstdc++ -lm -lgcc_s -lgcc -lc -lgcc_s -lgcc /usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtend.o /usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crtn.o] - arg [/usr/libexec/gcc/x86_64-redhat-linux/4.8.3/collect2] ==> ignore - arg [--build-id] ==> ignore - arg [--no-add-needed] ==> ignore - arg [--eh-frame-hdr] ==> ignore - arg [--hash-style=gnu] ==> ignore - arg [-m] ==> ignore - arg [elf_x86_64] ==> ignore - arg [-export-dynamic] ==> ignore - arg [-dynamic-linker] ==> ignore - arg [/lib64/ld-linux-x86-64.so.2] ==> ignore - arg [-o] ==> ignore - arg [cmTryCompileExec1080422183] ==> ignore - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crt1.o] ==> ignore - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crti.o] ==> ignore - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtbegin.o] ==> ignore - arg [-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3] ==> dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3] - arg [-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64] ==> dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64] - arg [-L/lib/../lib64] ==> dir [/lib/../lib64] - arg [-L/usr/lib/../lib64] ==> dir [/usr/lib/../lib64] - arg [-L/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../..] ==> dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../..] - arg [CMakeFiles/cmTryCompileExec1080422183.dir/CMakeCXXCompilerABI.cpp.o] ==> ignore - arg [-lstdc++] ==> lib [stdc++] - arg [-lm] ==> lib [m] - arg [-lgcc_s] ==> lib [gcc_s] - arg [-lgcc] ==> lib [gcc] - arg [-lc] ==> lib [c] - arg [-lgcc_s] ==> lib [gcc_s] - arg [-lgcc] ==> lib [gcc] - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/crtend.o] ==> ignore - arg [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64/crtn.o] ==> ignore - remove lib [gcc_s] - remove lib [gcc] - remove lib [gcc_s] - remove lib [gcc] - collapse library dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3] ==> [/usr/lib/gcc/x86_64-redhat-linux/4.8.3] - collapse library dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../../../lib64] ==> [/usr/lib64] - collapse library dir [/lib/../lib64] ==> [/lib64] - collapse library dir [/usr/lib/../lib64] ==> [/usr/lib64] - collapse library dir [/usr/lib/gcc/x86_64-redhat-linux/4.8.3/../../..] ==> [/usr/lib] - implicit libs: [stdc++;m;c] - implicit dirs: [/usr/lib/gcc/x86_64-redhat-linux/4.8.3;/usr/lib64;/lib64;/usr/lib] - implicit fwks: [] - - - - -Detecting CXX [-std=c++1y] compiler features compiled with the following output: -Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp - -Run Build Command:"/usr/bin/gmake" "cmTryCompileExec2444100226/fast" -/usr/bin/gmake -f CMakeFiles/cmTryCompileExec2444100226.dir/build.make CMakeFiles/cmTryCompileExec2444100226.dir/build -gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1 -Building CXX object CMakeFiles/cmTryCompileExec2444100226.dir/feature_tests.cxx.o -/usr/bin/c++ -std=c++1y -o CMakeFiles/cmTryCompileExec2444100226.dir/feature_tests.cxx.o -c /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/feature_tests.cxx -Linking CXX executable cmTryCompileExec2444100226 -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec2444100226.dir/link.txt --verbose=1 -/usr/bin/c++ CMakeFiles/cmTryCompileExec2444100226.dir/feature_tests.cxx.o -o cmTryCompileExec2444100226 -rdynamic -gmake[1]: Leaving directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' - - - Feature record: CXX_FEATURE:0cxx_aggregate_default_initializers - Feature record: CXX_FEATURE:1cxx_alias_templates - Feature record: CXX_FEATURE:1cxx_alignas - Feature record: CXX_FEATURE:1cxx_alignof - Feature record: CXX_FEATURE:1cxx_attributes - Feature record: CXX_FEATURE:0cxx_attribute_deprecated - Feature record: CXX_FEATURE:1cxx_auto_type - Feature record: CXX_FEATURE:0cxx_binary_literals - Feature record: CXX_FEATURE:1cxx_constexpr - Feature record: CXX_FEATURE:0cxx_contextual_conversions - Feature record: CXX_FEATURE:1cxx_decltype - Feature record: CXX_FEATURE:0cxx_decltype_auto - Feature record: CXX_FEATURE:1cxx_decltype_incomplete_return_types - Feature record: CXX_FEATURE:1cxx_default_function_template_args - Feature record: CXX_FEATURE:1cxx_defaulted_functions - Feature record: CXX_FEATURE:1cxx_defaulted_move_initializers - Feature record: CXX_FEATURE:1cxx_delegating_constructors - Feature record: CXX_FEATURE:1cxx_deleted_functions - Feature record: CXX_FEATURE:0cxx_digit_separators - Feature record: CXX_FEATURE:1cxx_enum_forward_declarations - Feature record: CXX_FEATURE:1cxx_explicit_conversions - Feature record: CXX_FEATURE:1cxx_extended_friend_declarations - Feature record: CXX_FEATURE:1cxx_extern_templates - Feature record: CXX_FEATURE:1cxx_final - Feature record: CXX_FEATURE:1cxx_func_identifier - Feature record: CXX_FEATURE:1cxx_generalized_initializers - Feature record: CXX_FEATURE:0cxx_generic_lambdas - Feature record: CXX_FEATURE:1cxx_inheriting_constructors - Feature record: CXX_FEATURE:1cxx_inline_namespaces - Feature record: CXX_FEATURE:1cxx_lambdas - Feature record: CXX_FEATURE:0cxx_lambda_init_captures - Feature record: CXX_FEATURE:1cxx_local_type_template_args - Feature record: CXX_FEATURE:1cxx_long_long_type - Feature record: CXX_FEATURE:1cxx_noexcept - Feature record: CXX_FEATURE:1cxx_nonstatic_member_init - Feature record: CXX_FEATURE:1cxx_nullptr - Feature record: CXX_FEATURE:1cxx_override - Feature record: CXX_FEATURE:1cxx_range_for - Feature record: CXX_FEATURE:1cxx_raw_string_literals - Feature record: CXX_FEATURE:1cxx_reference_qualified_functions - Feature record: CXX_FEATURE:0cxx_relaxed_constexpr - Feature record: CXX_FEATURE:0cxx_return_type_deduction - Feature record: CXX_FEATURE:1cxx_right_angle_brackets - Feature record: CXX_FEATURE:1cxx_rvalue_references - Feature record: CXX_FEATURE:1cxx_sizeof_member - Feature record: CXX_FEATURE:1cxx_static_assert - Feature record: CXX_FEATURE:1cxx_strong_enums - Feature record: CXX_FEATURE:1cxx_template_template_parameters - Feature record: CXX_FEATURE:1cxx_thread_local - Feature record: CXX_FEATURE:1cxx_trailing_return_types - Feature record: CXX_FEATURE:1cxx_unicode_literals - Feature record: CXX_FEATURE:1cxx_uniform_initialization - Feature record: CXX_FEATURE:1cxx_unrestricted_unions - Feature record: CXX_FEATURE:1cxx_user_literals - Feature record: CXX_FEATURE:0cxx_variable_templates - Feature record: CXX_FEATURE:1cxx_variadic_macros - Feature record: CXX_FEATURE:1cxx_variadic_templates - - -Detecting CXX [-std=c++11] compiler features compiled with the following output: -Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp - -Run Build Command:"/usr/bin/gmake" "cmTryCompileExec295155124/fast" -/usr/bin/gmake -f CMakeFiles/cmTryCompileExec295155124.dir/build.make CMakeFiles/cmTryCompileExec295155124.dir/build -gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1 -Building CXX object CMakeFiles/cmTryCompileExec295155124.dir/feature_tests.cxx.o -/usr/bin/c++ -std=c++11 -o CMakeFiles/cmTryCompileExec295155124.dir/feature_tests.cxx.o -c /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/feature_tests.cxx -Linking CXX executable cmTryCompileExec295155124 -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec295155124.dir/link.txt --verbose=1 -/usr/bin/c++ CMakeFiles/cmTryCompileExec295155124.dir/feature_tests.cxx.o -o cmTryCompileExec295155124 -rdynamic -gmake[1]: Leaving directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' - - - Feature record: CXX_FEATURE:0cxx_aggregate_default_initializers - Feature record: CXX_FEATURE:1cxx_alias_templates - Feature record: CXX_FEATURE:1cxx_alignas - Feature record: CXX_FEATURE:1cxx_alignof - Feature record: CXX_FEATURE:1cxx_attributes - Feature record: CXX_FEATURE:0cxx_attribute_deprecated - Feature record: CXX_FEATURE:1cxx_auto_type - Feature record: CXX_FEATURE:0cxx_binary_literals - Feature record: CXX_FEATURE:1cxx_constexpr - Feature record: CXX_FEATURE:0cxx_contextual_conversions - Feature record: CXX_FEATURE:1cxx_decltype - Feature record: CXX_FEATURE:0cxx_decltype_auto - Feature record: CXX_FEATURE:1cxx_decltype_incomplete_return_types - Feature record: CXX_FEATURE:1cxx_default_function_template_args - Feature record: CXX_FEATURE:1cxx_defaulted_functions - Feature record: CXX_FEATURE:1cxx_defaulted_move_initializers - Feature record: CXX_FEATURE:1cxx_delegating_constructors - Feature record: CXX_FEATURE:1cxx_deleted_functions - Feature record: CXX_FEATURE:0cxx_digit_separators - Feature record: CXX_FEATURE:1cxx_enum_forward_declarations - Feature record: CXX_FEATURE:1cxx_explicit_conversions - Feature record: CXX_FEATURE:1cxx_extended_friend_declarations - Feature record: CXX_FEATURE:1cxx_extern_templates - Feature record: CXX_FEATURE:1cxx_final - Feature record: CXX_FEATURE:1cxx_func_identifier - Feature record: CXX_FEATURE:1cxx_generalized_initializers - Feature record: CXX_FEATURE:0cxx_generic_lambdas - Feature record: CXX_FEATURE:1cxx_inheriting_constructors - Feature record: CXX_FEATURE:1cxx_inline_namespaces - Feature record: CXX_FEATURE:1cxx_lambdas - Feature record: CXX_FEATURE:0cxx_lambda_init_captures - Feature record: CXX_FEATURE:1cxx_local_type_template_args - Feature record: CXX_FEATURE:1cxx_long_long_type - Feature record: CXX_FEATURE:1cxx_noexcept - Feature record: CXX_FEATURE:1cxx_nonstatic_member_init - Feature record: CXX_FEATURE:1cxx_nullptr - Feature record: CXX_FEATURE:1cxx_override - Feature record: CXX_FEATURE:1cxx_range_for - Feature record: CXX_FEATURE:1cxx_raw_string_literals - Feature record: CXX_FEATURE:1cxx_reference_qualified_functions - Feature record: CXX_FEATURE:0cxx_relaxed_constexpr - Feature record: CXX_FEATURE:0cxx_return_type_deduction - Feature record: CXX_FEATURE:1cxx_right_angle_brackets - Feature record: CXX_FEATURE:1cxx_rvalue_references - Feature record: CXX_FEATURE:1cxx_sizeof_member - Feature record: CXX_FEATURE:1cxx_static_assert - Feature record: CXX_FEATURE:1cxx_strong_enums - Feature record: CXX_FEATURE:1cxx_template_template_parameters - Feature record: CXX_FEATURE:1cxx_thread_local - Feature record: CXX_FEATURE:1cxx_trailing_return_types - Feature record: CXX_FEATURE:1cxx_unicode_literals - Feature record: CXX_FEATURE:1cxx_uniform_initialization - Feature record: CXX_FEATURE:1cxx_unrestricted_unions - Feature record: CXX_FEATURE:1cxx_user_literals - Feature record: CXX_FEATURE:0cxx_variable_templates - Feature record: CXX_FEATURE:1cxx_variadic_macros - Feature record: CXX_FEATURE:1cxx_variadic_templates - - -Detecting CXX [-std=c++98] compiler features compiled with the following output: -Change Dir: /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp - -Run Build Command:"/usr/bin/gmake" "cmTryCompileExec3307289814/fast" -/usr/bin/gmake -f CMakeFiles/cmTryCompileExec3307289814.dir/build.make CMakeFiles/cmTryCompileExec3307289814.dir/build -gmake[1]: Entering directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_progress_report /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp/CMakeFiles 1 -Building CXX object CMakeFiles/cmTryCompileExec3307289814.dir/feature_tests.cxx.o -/usr/bin/c++ -std=c++98 -o CMakeFiles/cmTryCompileExec3307289814.dir/feature_tests.cxx.o -c /scratch/cluster/katec/ParallelIO/doc/CMakeFiles/feature_tests.cxx -Linking CXX executable cmTryCompileExec3307289814 -/home/katec/cmake/cmake-3.2.3/bin/cmake -E cmake_link_script CMakeFiles/cmTryCompileExec3307289814.dir/link.txt --verbose=1 -/usr/bin/c++ CMakeFiles/cmTryCompileExec3307289814.dir/feature_tests.cxx.o -o cmTryCompileExec3307289814 -rdynamic -gmake[1]: Leaving directory `/scratch/cluster/katec/ParallelIO/doc/CMakeFiles/CMakeTmp' - - - Feature record: CXX_FEATURE:0cxx_aggregate_default_initializers - Feature record: CXX_FEATURE:0cxx_alias_templates - Feature record: CXX_FEATURE:0cxx_alignas - Feature record: CXX_FEATURE:0cxx_alignof - Feature record: CXX_FEATURE:0cxx_attributes - Feature record: CXX_FEATURE:0cxx_attribute_deprecated - Feature record: CXX_FEATURE:0cxx_auto_type - Feature record: CXX_FEATURE:0cxx_binary_literals - Feature record: CXX_FEATURE:0cxx_constexpr - Feature record: CXX_FEATURE:0cxx_contextual_conversions - Feature record: CXX_FEATURE:0cxx_decltype - Feature record: CXX_FEATURE:0cxx_decltype_auto - Feature record: CXX_FEATURE:0cxx_decltype_incomplete_return_types - Feature record: CXX_FEATURE:0cxx_default_function_template_args - Feature record: CXX_FEATURE:0cxx_defaulted_functions - Feature record: CXX_FEATURE:0cxx_defaulted_move_initializers - Feature record: CXX_FEATURE:0cxx_delegating_constructors - Feature record: CXX_FEATURE:0cxx_deleted_functions - Feature record: CXX_FEATURE:0cxx_digit_separators - Feature record: CXX_FEATURE:0cxx_enum_forward_declarations - Feature record: CXX_FEATURE:0cxx_explicit_conversions - Feature record: CXX_FEATURE:0cxx_extended_friend_declarations - Feature record: CXX_FEATURE:0cxx_extern_templates - Feature record: CXX_FEATURE:0cxx_final - Feature record: CXX_FEATURE:0cxx_func_identifier - Feature record: CXX_FEATURE:0cxx_generalized_initializers - Feature record: CXX_FEATURE:0cxx_generic_lambdas - Feature record: CXX_FEATURE:0cxx_inheriting_constructors - Feature record: CXX_FEATURE:0cxx_inline_namespaces - Feature record: CXX_FEATURE:0cxx_lambdas - Feature record: CXX_FEATURE:0cxx_lambda_init_captures - Feature record: CXX_FEATURE:0cxx_local_type_template_args - Feature record: CXX_FEATURE:0cxx_long_long_type - Feature record: CXX_FEATURE:0cxx_noexcept - Feature record: CXX_FEATURE:0cxx_nonstatic_member_init - Feature record: CXX_FEATURE:0cxx_nullptr - Feature record: CXX_FEATURE:0cxx_override - Feature record: CXX_FEATURE:0cxx_range_for - Feature record: CXX_FEATURE:0cxx_raw_string_literals - Feature record: CXX_FEATURE:0cxx_reference_qualified_functions - Feature record: CXX_FEATURE:0cxx_relaxed_constexpr - Feature record: CXX_FEATURE:0cxx_return_type_deduction - Feature record: CXX_FEATURE:0cxx_right_angle_brackets - Feature record: CXX_FEATURE:0cxx_rvalue_references - Feature record: CXX_FEATURE:0cxx_sizeof_member - Feature record: CXX_FEATURE:0cxx_static_assert - Feature record: CXX_FEATURE:0cxx_strong_enums - Feature record: CXX_FEATURE:1cxx_template_template_parameters - Feature record: CXX_FEATURE:0cxx_thread_local - Feature record: CXX_FEATURE:0cxx_trailing_return_types - Feature record: CXX_FEATURE:0cxx_unicode_literals - Feature record: CXX_FEATURE:0cxx_uniform_initialization - Feature record: CXX_FEATURE:0cxx_unrestricted_unions - Feature record: CXX_FEATURE:0cxx_user_literals - Feature record: CXX_FEATURE:0cxx_variable_templates - Feature record: CXX_FEATURE:0cxx_variadic_macros - Feature record: CXX_FEATURE:0cxx_variadic_templates diff --git a/src/externals/pio2/doc/CMakeFiles/cmake.check_cache b/src/externals/pio2/doc/CMakeFiles/cmake.check_cache deleted file mode 100644 index 3dccd731726..00000000000 --- a/src/externals/pio2/doc/CMakeFiles/cmake.check_cache +++ /dev/null @@ -1 +0,0 @@ -# This file is generated by cmake for dependency checking of the CMakeCache.txt file diff --git a/src/externals/pio2/doc/CMakeLists.txt b/src/externals/pio2/doc/CMakeLists.txt deleted file mode 100644 index 982b445b66a..00000000000 --- a/src/externals/pio2/doc/CMakeLists.txt +++ /dev/null @@ -1,43 +0,0 @@ -#============================================================================== -# -# API documentation with Doxygen -# -#============================================================================== - -find_package(Doxygen) - -if(DOXYGEN_FOUND) - # This supports the build with/witout internal documentation. - if (PIO_INTERNAL_DOC) - SET(C_SRC_FILES "${CMAKE_CURRENT_SOURCE_DIR}/../src/clib") - else () - SET(C_SRC_FILES - "${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pioc.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_nc4.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_darray.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_get_nc.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_put_nc.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_varm.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_file.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio.h \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pio_nc.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/topology.c \\ -${CMAKE_CURRENT_SOURCE_DIR}/../src/clib/pioc_sc.c" ) - endif () - - # Process the Doxyfile using options set during configure. - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in - ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) - - # Copy necessary files. - add_custom_target(doc - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/customdoxygen.css - ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml - ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/doxygen.sty - ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMENT "Generating API documentation with Doxygen" VERBATIM) -endif(DOXYGEN_FOUND) diff --git a/src/externals/pio2/doc/Doxyfile.in b/src/externals/pio2/doc/Doxyfile.in deleted file mode 100644 index 0f43531ce46..00000000000 --- a/src/externals/pio2/doc/Doxyfile.in +++ /dev/null @@ -1,2389 +0,0 @@ -# Doxyfile 1.8.9.1 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = PIO - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = @VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@ - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = .. - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = NO - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = YES - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = YES - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = f90=Fortran - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = NO - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = @CMAKE_CURRENT_SOURCE_DIR@/DoxygenLayout.xml - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = doxywarn.log - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. -# Note: If this tag is empty the current directory is searched. - -INPUT = @CMAKE_CURRENT_SOURCE_DIR@/../doc/source \ - @CMAKE_CURRENT_SOURCE_DIR@/../examples/c \ - @CMAKE_CURRENT_SOURCE_DIR@/../examples/f03 \ - @FORTRAN_SRC_FILES@ \ - @C_SRC_FILES@ - - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank the -# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, -# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, -# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, -# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, -# *.qsf, *.as and *.js. - -FILE_PATTERNS = *.c \ - *.h \ - *.inc \ - *.dox \ - *.f90 \ - *.F90 \ - *.txt - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = gptl \ - @CMAKE_BINARY_DIR@/src/flib/*.dir \ - @CMAKE_BINARY_DIR@/src/flib/genf90 \ - ../src/clib/uthash.h \ - _UNUSED_ - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = ./source/example \ - ../examples/basic - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = * - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = ./images - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: NO. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = docs - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -#HTML_EXTRA_STYLESHEET = ../../docs/customdoxygen.css -HTML_EXTRA_STYLESHEET = customdoxygen.css - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to NO can help when comparing the output of multiple runs. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /